From 31b2a680a054808022b120eab016e5ff2f304147 Mon Sep 17 00:00:00 2001 From: eopXD Date: Fri, 13 Oct 2023 14:52:19 -0700 Subject: [PATCH] [Auto-gen] Update tests under ../auto-generated. (make git-commit-autogen-test) --- auto-generated/gnu-api-tests/vaadd.c | 352 +-- auto-generated/gnu-api-tests/vaaddu.c | 352 +-- auto-generated/gnu-api-tests/vadc.c | 352 +-- auto-generated/gnu-api-tests/vadd.c | 704 +++--- auto-generated/gnu-api-tests/vand.c | 704 +++--- auto-generated/gnu-api-tests/vasub.c | 352 +-- auto-generated/gnu-api-tests/vasubu.c | 352 +-- auto-generated/gnu-api-tests/vcompress.c | 236 +- auto-generated/gnu-api-tests/vcpop.c | 56 +- auto-generated/gnu-api-tests/vdiv.c | 352 +-- auto-generated/gnu-api-tests/vdivu.c | 352 +-- auto-generated/gnu-api-tests/vfabs.c | 120 +- auto-generated/gnu-api-tests/vfadd.c | 480 ++--- auto-generated/gnu-api-tests/vfclass.c | 120 +- auto-generated/gnu-api-tests/vfcvt.c | 960 ++++----- auto-generated/gnu-api-tests/vfcvt_rtz.c | 240 +-- auto-generated/gnu-api-tests/vfdiv.c | 480 ++--- auto-generated/gnu-api-tests/vfirst.c | 56 +- auto-generated/gnu-api-tests/vfmacc.c | 240 +-- auto-generated/gnu-api-tests/vfmadd.c | 240 +-- auto-generated/gnu-api-tests/vfmax.c | 240 +-- auto-generated/gnu-api-tests/vfmerge.c | 60 +- auto-generated/gnu-api-tests/vfmin.c | 240 +-- auto-generated/gnu-api-tests/vfmsac.c | 240 +-- auto-generated/gnu-api-tests/vfmsub.c | 240 +-- auto-generated/gnu-api-tests/vfmul.c | 480 ++--- auto-generated/gnu-api-tests/vfmv.c | 180 +- auto-generated/gnu-api-tests/vfncvt.c | 912 ++++---- auto-generated/gnu-api-tests/vfncvt_rod.c | 72 +- auto-generated/gnu-api-tests/vfncvt_rtz.c | 240 +-- auto-generated/gnu-api-tests/vfneg.c | 120 +- auto-generated/gnu-api-tests/vfnmacc.c | 240 +-- auto-generated/gnu-api-tests/vfnmadd.c | 240 +-- auto-generated/gnu-api-tests/vfnmsac.c | 240 +-- auto-generated/gnu-api-tests/vfnmsub.c | 240 +-- auto-generated/gnu-api-tests/vfrdiv.c | 240 +-- auto-generated/gnu-api-tests/vfrec7.c | 240 +-- auto-generated/gnu-api-tests/vfredmax.c | 120 +- auto-generated/gnu-api-tests/vfredmin.c | 120 +- auto-generated/gnu-api-tests/vfredosum.c | 240 +-- auto-generated/gnu-api-tests/vfredusum.c | 240 +-- auto-generated/gnu-api-tests/vfrsqrt7.c | 120 +- auto-generated/gnu-api-tests/vfrsub.c | 240 +-- auto-generated/gnu-api-tests/vfsgnj.c | 240 +-- auto-generated/gnu-api-tests/vfsgnjn.c | 240 +-- auto-generated/gnu-api-tests/vfsgnjx.c | 240 +-- auto-generated/gnu-api-tests/vfslide1down.c | 120 +- auto-generated/gnu-api-tests/vfslide1up.c | 120 +- auto-generated/gnu-api-tests/vfsqrt.c | 240 +-- auto-generated/gnu-api-tests/vfsub.c | 480 ++--- auto-generated/gnu-api-tests/vfwadd.c | 576 ++--- auto-generated/gnu-api-tests/vfwcvt.c | 600 +++--- auto-generated/gnu-api-tests/vfwcvt_rtz.c | 144 +- auto-generated/gnu-api-tests/vfwmacc.c | 144 +- auto-generated/gnu-api-tests/vfwmsac.c | 144 +- auto-generated/gnu-api-tests/vfwmul.c | 288 +-- auto-generated/gnu-api-tests/vfwnmacc.c | 144 +- auto-generated/gnu-api-tests/vfwnmsac.c | 144 +- auto-generated/gnu-api-tests/vfwredosum.c | 176 +- auto-generated/gnu-api-tests/vfwredusum.c | 176 +- auto-generated/gnu-api-tests/vfwsub.c | 576 ++--- auto-generated/gnu-api-tests/vid.c | 88 +- auto-generated/gnu-api-tests/viota.c | 176 +- auto-generated/gnu-api-tests/vle16.c | 144 +- auto-generated/gnu-api-tests/vle16ff.c | 144 +- auto-generated/gnu-api-tests/vle32.c | 120 +- auto-generated/gnu-api-tests/vle32ff.c | 120 +- auto-generated/gnu-api-tests/vle64.c | 96 +- auto-generated/gnu-api-tests/vle64ff.c | 96 +- auto-generated/gnu-api-tests/vle8.c | 112 +- auto-generated/gnu-api-tests/vle8ff.c | 112 +- auto-generated/gnu-api-tests/vlm.c | 28 +- auto-generated/gnu-api-tests/vlmul_ext_v.c | 540 ++--- auto-generated/gnu-api-tests/vlmul_trunc_v.c | 540 ++--- auto-generated/gnu-api-tests/vloxei16.c | 456 ++-- auto-generated/gnu-api-tests/vloxei32.c | 416 ++-- auto-generated/gnu-api-tests/vloxei64.c | 352 +-- auto-generated/gnu-api-tests/vloxei8.c | 472 ++-- auto-generated/gnu-api-tests/vloxseg2ei16.c | 384 ++-- auto-generated/gnu-api-tests/vloxseg2ei32.c | 368 ++-- auto-generated/gnu-api-tests/vloxseg2ei64.c | 328 +-- auto-generated/gnu-api-tests/vloxseg2ei8.c | 384 ++-- auto-generated/gnu-api-tests/vloxseg3ei16.c | 296 +-- auto-generated/gnu-api-tests/vloxseg3ei32.c | 296 +-- auto-generated/gnu-api-tests/vloxseg3ei64.c | 280 +-- auto-generated/gnu-api-tests/vloxseg3ei8.c | 296 +-- auto-generated/gnu-api-tests/vloxseg4ei16.c | 296 +-- auto-generated/gnu-api-tests/vloxseg4ei32.c | 296 +-- auto-generated/gnu-api-tests/vloxseg4ei64.c | 280 +-- auto-generated/gnu-api-tests/vloxseg4ei8.c | 296 +-- auto-generated/gnu-api-tests/vloxseg5ei16.c | 208 +- auto-generated/gnu-api-tests/vloxseg5ei32.c | 208 +- auto-generated/gnu-api-tests/vloxseg5ei64.c | 208 +- auto-generated/gnu-api-tests/vloxseg5ei8.c | 208 +- auto-generated/gnu-api-tests/vloxseg6ei16.c | 208 +- auto-generated/gnu-api-tests/vloxseg6ei32.c | 208 +- auto-generated/gnu-api-tests/vloxseg6ei64.c | 208 +- auto-generated/gnu-api-tests/vloxseg6ei8.c | 208 +- auto-generated/gnu-api-tests/vloxseg7ei16.c | 208 +- auto-generated/gnu-api-tests/vloxseg7ei32.c | 208 +- auto-generated/gnu-api-tests/vloxseg7ei64.c | 208 +- auto-generated/gnu-api-tests/vloxseg7ei8.c | 208 +- auto-generated/gnu-api-tests/vloxseg8ei16.c | 208 +- auto-generated/gnu-api-tests/vloxseg8ei32.c | 208 +- auto-generated/gnu-api-tests/vloxseg8ei64.c | 208 +- auto-generated/gnu-api-tests/vloxseg8ei8.c | 208 +- auto-generated/gnu-api-tests/vlse16.c | 144 +- auto-generated/gnu-api-tests/vlse32.c | 120 +- auto-generated/gnu-api-tests/vlse64.c | 96 +- auto-generated/gnu-api-tests/vlse8.c | 112 +- auto-generated/gnu-api-tests/vlseg2e16.c | 120 +- auto-generated/gnu-api-tests/vlseg2e16ff.c | 120 +- auto-generated/gnu-api-tests/vlseg2e32.c | 96 +- auto-generated/gnu-api-tests/vlseg2e32ff.c | 96 +- auto-generated/gnu-api-tests/vlseg2e64.c | 72 +- auto-generated/gnu-api-tests/vlseg2e64ff.c | 72 +- auto-generated/gnu-api-tests/vlseg2e8.c | 96 +- auto-generated/gnu-api-tests/vlseg2e8ff.c | 96 +- auto-generated/gnu-api-tests/vlseg3e16.c | 96 +- auto-generated/gnu-api-tests/vlseg3e16ff.c | 96 +- auto-generated/gnu-api-tests/vlseg3e32.c | 72 +- auto-generated/gnu-api-tests/vlseg3e32ff.c | 72 +- auto-generated/gnu-api-tests/vlseg3e64.c | 48 +- auto-generated/gnu-api-tests/vlseg3e64ff.c | 48 +- auto-generated/gnu-api-tests/vlseg3e8.c | 80 +- auto-generated/gnu-api-tests/vlseg3e8ff.c | 80 +- auto-generated/gnu-api-tests/vlseg4e16.c | 96 +- auto-generated/gnu-api-tests/vlseg4e16ff.c | 96 +- auto-generated/gnu-api-tests/vlseg4e32.c | 72 +- auto-generated/gnu-api-tests/vlseg4e32ff.c | 72 +- auto-generated/gnu-api-tests/vlseg4e64.c | 48 +- auto-generated/gnu-api-tests/vlseg4e64ff.c | 48 +- auto-generated/gnu-api-tests/vlseg4e8.c | 80 +- auto-generated/gnu-api-tests/vlseg4e8ff.c | 80 +- auto-generated/gnu-api-tests/vlseg5e16.c | 72 +- auto-generated/gnu-api-tests/vlseg5e16ff.c | 72 +- auto-generated/gnu-api-tests/vlseg5e32.c | 48 +- auto-generated/gnu-api-tests/vlseg5e32ff.c | 48 +- auto-generated/gnu-api-tests/vlseg5e64.c | 24 +- auto-generated/gnu-api-tests/vlseg5e64ff.c | 24 +- auto-generated/gnu-api-tests/vlseg5e8.c | 64 +- auto-generated/gnu-api-tests/vlseg5e8ff.c | 64 +- auto-generated/gnu-api-tests/vlseg6e16.c | 72 +- auto-generated/gnu-api-tests/vlseg6e16ff.c | 72 +- auto-generated/gnu-api-tests/vlseg6e32.c | 48 +- auto-generated/gnu-api-tests/vlseg6e32ff.c | 48 +- auto-generated/gnu-api-tests/vlseg6e64.c | 24 +- auto-generated/gnu-api-tests/vlseg6e64ff.c | 24 +- auto-generated/gnu-api-tests/vlseg6e8.c | 64 +- auto-generated/gnu-api-tests/vlseg6e8ff.c | 64 +- auto-generated/gnu-api-tests/vlseg7e16.c | 72 +- auto-generated/gnu-api-tests/vlseg7e16ff.c | 72 +- auto-generated/gnu-api-tests/vlseg7e32.c | 48 +- auto-generated/gnu-api-tests/vlseg7e32ff.c | 48 +- auto-generated/gnu-api-tests/vlseg7e64.c | 24 +- auto-generated/gnu-api-tests/vlseg7e64ff.c | 24 +- auto-generated/gnu-api-tests/vlseg7e8.c | 64 +- auto-generated/gnu-api-tests/vlseg7e8ff.c | 64 +- auto-generated/gnu-api-tests/vlseg8e16.c | 72 +- auto-generated/gnu-api-tests/vlseg8e16ff.c | 72 +- auto-generated/gnu-api-tests/vlseg8e32.c | 48 +- auto-generated/gnu-api-tests/vlseg8e32ff.c | 48 +- auto-generated/gnu-api-tests/vlseg8e64.c | 24 +- auto-generated/gnu-api-tests/vlseg8e64ff.c | 24 +- auto-generated/gnu-api-tests/vlseg8e8.c | 64 +- auto-generated/gnu-api-tests/vlseg8e8ff.c | 64 +- auto-generated/gnu-api-tests/vlsseg2e16.c | 120 +- auto-generated/gnu-api-tests/vlsseg2e32.c | 96 +- auto-generated/gnu-api-tests/vlsseg2e64.c | 72 +- auto-generated/gnu-api-tests/vlsseg2e8.c | 96 +- auto-generated/gnu-api-tests/vlsseg3e16.c | 96 +- auto-generated/gnu-api-tests/vlsseg3e32.c | 72 +- auto-generated/gnu-api-tests/vlsseg3e64.c | 48 +- auto-generated/gnu-api-tests/vlsseg3e8.c | 80 +- auto-generated/gnu-api-tests/vlsseg4e16.c | 96 +- auto-generated/gnu-api-tests/vlsseg4e32.c | 72 +- auto-generated/gnu-api-tests/vlsseg4e64.c | 48 +- auto-generated/gnu-api-tests/vlsseg4e8.c | 80 +- auto-generated/gnu-api-tests/vlsseg5e16.c | 72 +- auto-generated/gnu-api-tests/vlsseg5e32.c | 48 +- auto-generated/gnu-api-tests/vlsseg5e64.c | 24 +- auto-generated/gnu-api-tests/vlsseg5e8.c | 64 +- auto-generated/gnu-api-tests/vlsseg6e16.c | 72 +- auto-generated/gnu-api-tests/vlsseg6e32.c | 48 +- auto-generated/gnu-api-tests/vlsseg6e64.c | 24 +- auto-generated/gnu-api-tests/vlsseg6e8.c | 64 +- auto-generated/gnu-api-tests/vlsseg7e16.c | 72 +- auto-generated/gnu-api-tests/vlsseg7e32.c | 48 +- auto-generated/gnu-api-tests/vlsseg7e64.c | 24 +- auto-generated/gnu-api-tests/vlsseg7e8.c | 64 +- auto-generated/gnu-api-tests/vlsseg8e16.c | 72 +- auto-generated/gnu-api-tests/vlsseg8e32.c | 48 +- auto-generated/gnu-api-tests/vlsseg8e64.c | 24 +- auto-generated/gnu-api-tests/vlsseg8e8.c | 64 +- auto-generated/gnu-api-tests/vluxei16.c | 456 ++-- auto-generated/gnu-api-tests/vluxei32.c | 416 ++-- auto-generated/gnu-api-tests/vluxei64.c | 352 +-- auto-generated/gnu-api-tests/vluxei8.c | 472 ++-- auto-generated/gnu-api-tests/vluxseg2ei16.c | 384 ++-- auto-generated/gnu-api-tests/vluxseg2ei32.c | 368 ++-- auto-generated/gnu-api-tests/vluxseg2ei64.c | 328 +-- auto-generated/gnu-api-tests/vluxseg2ei8.c | 384 ++-- auto-generated/gnu-api-tests/vluxseg3ei16.c | 296 +-- auto-generated/gnu-api-tests/vluxseg3ei32.c | 296 +-- auto-generated/gnu-api-tests/vluxseg3ei64.c | 280 +-- auto-generated/gnu-api-tests/vluxseg3ei8.c | 296 +-- auto-generated/gnu-api-tests/vluxseg4ei16.c | 296 +-- auto-generated/gnu-api-tests/vluxseg4ei32.c | 296 +-- auto-generated/gnu-api-tests/vluxseg4ei64.c | 280 +-- auto-generated/gnu-api-tests/vluxseg4ei8.c | 296 +-- auto-generated/gnu-api-tests/vluxseg5ei16.c | 208 +- auto-generated/gnu-api-tests/vluxseg5ei32.c | 208 +- auto-generated/gnu-api-tests/vluxseg5ei64.c | 208 +- auto-generated/gnu-api-tests/vluxseg5ei8.c | 208 +- auto-generated/gnu-api-tests/vluxseg6ei16.c | 208 +- auto-generated/gnu-api-tests/vluxseg6ei32.c | 208 +- auto-generated/gnu-api-tests/vluxseg6ei64.c | 208 +- auto-generated/gnu-api-tests/vluxseg6ei8.c | 208 +- auto-generated/gnu-api-tests/vluxseg7ei16.c | 208 +- auto-generated/gnu-api-tests/vluxseg7ei32.c | 208 +- auto-generated/gnu-api-tests/vluxseg7ei64.c | 208 +- auto-generated/gnu-api-tests/vluxseg7ei8.c | 208 +- auto-generated/gnu-api-tests/vluxseg8ei16.c | 208 +- auto-generated/gnu-api-tests/vluxseg8ei32.c | 208 +- auto-generated/gnu-api-tests/vluxseg8ei64.c | 208 +- auto-generated/gnu-api-tests/vluxseg8ei8.c | 208 +- auto-generated/gnu-api-tests/vmacc.c | 352 +-- auto-generated/gnu-api-tests/vmadc.c | 704 +++--- auto-generated/gnu-api-tests/vmadd.c | 352 +-- auto-generated/gnu-api-tests/vmand.c | 28 +- auto-generated/gnu-api-tests/vmandn.c | 28 +- auto-generated/gnu-api-tests/vmax.c | 352 +-- auto-generated/gnu-api-tests/vmaxu.c | 352 +-- auto-generated/gnu-api-tests/vmerge.c | 412 ++-- auto-generated/gnu-api-tests/vmfeq.c | 240 +-- auto-generated/gnu-api-tests/vmfge.c | 240 +-- auto-generated/gnu-api-tests/vmfgt.c | 240 +-- auto-generated/gnu-api-tests/vmfle.c | 240 +-- auto-generated/gnu-api-tests/vmflt.c | 240 +-- auto-generated/gnu-api-tests/vmfne.c | 240 +-- auto-generated/gnu-api-tests/vmin.c | 352 +-- auto-generated/gnu-api-tests/vminu.c | 352 +-- auto-generated/gnu-api-tests/vmmv.c | 28 +- auto-generated/gnu-api-tests/vmnand.c | 28 +- auto-generated/gnu-api-tests/vmnor.c | 28 +- auto-generated/gnu-api-tests/vmnot.c | 28 +- auto-generated/gnu-api-tests/vmor.c | 28 +- auto-generated/gnu-api-tests/vmorn.c | 28 +- auto-generated/gnu-api-tests/vmsbc.c | 704 +++--- auto-generated/gnu-api-tests/vmsbf.c | 56 +- auto-generated/gnu-api-tests/vmseq.c | 704 +++--- auto-generated/gnu-api-tests/vmsge.c | 352 +-- auto-generated/gnu-api-tests/vmsgeu.c | 352 +-- auto-generated/gnu-api-tests/vmsgt.c | 352 +-- auto-generated/gnu-api-tests/vmsgtu.c | 352 +-- auto-generated/gnu-api-tests/vmsif.c | 56 +- auto-generated/gnu-api-tests/vmsle.c | 352 +-- auto-generated/gnu-api-tests/vmsleu.c | 352 +-- auto-generated/gnu-api-tests/vmslt.c | 352 +-- auto-generated/gnu-api-tests/vmsltu.c | 352 +-- auto-generated/gnu-api-tests/vmsne.c | 704 +++--- auto-generated/gnu-api-tests/vmsof.c | 56 +- auto-generated/gnu-api-tests/vmul.c | 704 +++--- auto-generated/gnu-api-tests/vmulh.c | 352 +-- auto-generated/gnu-api-tests/vmulhsu.c | 352 +-- auto-generated/gnu-api-tests/vmulhu.c | 352 +-- auto-generated/gnu-api-tests/vmv.c | 764 +++---- auto-generated/gnu-api-tests/vmxnor.c | 28 +- auto-generated/gnu-api-tests/vmxor.c | 28 +- auto-generated/gnu-api-tests/vnclip.c | 240 +-- auto-generated/gnu-api-tests/vnclipu.c | 240 +-- auto-generated/gnu-api-tests/vncvt.c | 240 +-- auto-generated/gnu-api-tests/vneg.c | 176 +- auto-generated/gnu-api-tests/vnmsac.c | 352 +-- auto-generated/gnu-api-tests/vnmsub.c | 352 +-- auto-generated/gnu-api-tests/vnot.c | 352 +-- auto-generated/gnu-api-tests/vnsra.c | 240 +-- auto-generated/gnu-api-tests/vnsrl.c | 240 +-- auto-generated/gnu-api-tests/vor.c | 704 +++--- auto-generated/gnu-api-tests/vredand.c | 352 +-- auto-generated/gnu-api-tests/vredmax.c | 176 +- auto-generated/gnu-api-tests/vredmaxu.c | 176 +- auto-generated/gnu-api-tests/vredmin.c | 176 +- auto-generated/gnu-api-tests/vredminu.c | 176 +- auto-generated/gnu-api-tests/vredor.c | 352 +-- auto-generated/gnu-api-tests/vredsum.c | 352 +-- auto-generated/gnu-api-tests/vredxor.c | 352 +-- auto-generated/gnu-api-tests/vrem.c | 352 +-- auto-generated/gnu-api-tests/vremu.c | 352 +-- auto-generated/gnu-api-tests/vrgather.c | 944 ++++---- auto-generated/gnu-api-tests/vrgatherei16.c | 456 ++-- auto-generated/gnu-api-tests/vrsub.c | 352 +-- auto-generated/gnu-api-tests/vsadd.c | 352 +-- auto-generated/gnu-api-tests/vsaddu.c | 352 +-- auto-generated/gnu-api-tests/vsbc.c | 352 +-- auto-generated/gnu-api-tests/vse16.c | 144 +- auto-generated/gnu-api-tests/vse32.c | 120 +- auto-generated/gnu-api-tests/vse64.c | 96 +- auto-generated/gnu-api-tests/vse8.c | 112 +- auto-generated/gnu-api-tests/vset.c | 1168 +++++----- auto-generated/gnu-api-tests/vsext_vf2.c | 120 +- auto-generated/gnu-api-tests/vsext_vf4.c | 72 +- auto-generated/gnu-api-tests/vsext_vf8.c | 32 +- auto-generated/gnu-api-tests/vslide1down.c | 352 +-- auto-generated/gnu-api-tests/vslide1up.c | 352 +-- auto-generated/gnu-api-tests/vslidedown.c | 472 ++-- auto-generated/gnu-api-tests/vslideup.c | 472 ++-- auto-generated/gnu-api-tests/vsll.c | 704 +++--- auto-generated/gnu-api-tests/vsm.c | 28 +- auto-generated/gnu-api-tests/vsmul.c | 352 +-- auto-generated/gnu-api-tests/vsoxei16.c | 456 ++-- auto-generated/gnu-api-tests/vsoxei32.c | 416 ++-- auto-generated/gnu-api-tests/vsoxei64.c | 352 +-- auto-generated/gnu-api-tests/vsoxei8.c | 472 ++-- auto-generated/gnu-api-tests/vsoxseg2ei16.c | 384 ++-- auto-generated/gnu-api-tests/vsoxseg2ei32.c | 368 ++-- auto-generated/gnu-api-tests/vsoxseg2ei64.c | 328 +-- auto-generated/gnu-api-tests/vsoxseg2ei8.c | 384 ++-- auto-generated/gnu-api-tests/vsoxseg3ei16.c | 296 +-- auto-generated/gnu-api-tests/vsoxseg3ei32.c | 296 +-- auto-generated/gnu-api-tests/vsoxseg3ei64.c | 280 +-- auto-generated/gnu-api-tests/vsoxseg3ei8.c | 296 +-- auto-generated/gnu-api-tests/vsoxseg4ei16.c | 296 +-- auto-generated/gnu-api-tests/vsoxseg4ei32.c | 296 +-- auto-generated/gnu-api-tests/vsoxseg4ei64.c | 280 +-- auto-generated/gnu-api-tests/vsoxseg4ei8.c | 296 +-- auto-generated/gnu-api-tests/vsoxseg5ei16.c | 208 +- auto-generated/gnu-api-tests/vsoxseg5ei32.c | 208 +- auto-generated/gnu-api-tests/vsoxseg5ei64.c | 208 +- auto-generated/gnu-api-tests/vsoxseg5ei8.c | 208 +- auto-generated/gnu-api-tests/vsoxseg6ei16.c | 208 +- auto-generated/gnu-api-tests/vsoxseg6ei32.c | 208 +- auto-generated/gnu-api-tests/vsoxseg6ei64.c | 208 +- auto-generated/gnu-api-tests/vsoxseg6ei8.c | 208 +- auto-generated/gnu-api-tests/vsoxseg7ei16.c | 208 +- auto-generated/gnu-api-tests/vsoxseg7ei32.c | 208 +- auto-generated/gnu-api-tests/vsoxseg7ei64.c | 208 +- auto-generated/gnu-api-tests/vsoxseg7ei8.c | 208 +- auto-generated/gnu-api-tests/vsoxseg8ei16.c | 208 +- auto-generated/gnu-api-tests/vsoxseg8ei32.c | 208 +- auto-generated/gnu-api-tests/vsoxseg8ei64.c | 208 +- auto-generated/gnu-api-tests/vsoxseg8ei8.c | 208 +- auto-generated/gnu-api-tests/vsra.c | 352 +-- auto-generated/gnu-api-tests/vsrl.c | 352 +-- auto-generated/gnu-api-tests/vsse16.c | 144 +- auto-generated/gnu-api-tests/vsse32.c | 120 +- auto-generated/gnu-api-tests/vsse64.c | 96 +- auto-generated/gnu-api-tests/vsse8.c | 112 +- auto-generated/gnu-api-tests/vsseg2e16.c | 120 +- auto-generated/gnu-api-tests/vsseg2e32.c | 96 +- auto-generated/gnu-api-tests/vsseg2e64.c | 72 +- auto-generated/gnu-api-tests/vsseg2e8.c | 96 +- auto-generated/gnu-api-tests/vsseg3e16.c | 96 +- auto-generated/gnu-api-tests/vsseg3e32.c | 72 +- auto-generated/gnu-api-tests/vsseg3e64.c | 48 +- auto-generated/gnu-api-tests/vsseg3e8.c | 80 +- auto-generated/gnu-api-tests/vsseg4e16.c | 96 +- auto-generated/gnu-api-tests/vsseg4e32.c | 72 +- auto-generated/gnu-api-tests/vsseg4e64.c | 48 +- auto-generated/gnu-api-tests/vsseg4e8.c | 80 +- auto-generated/gnu-api-tests/vsseg5e16.c | 72 +- auto-generated/gnu-api-tests/vsseg5e32.c | 48 +- auto-generated/gnu-api-tests/vsseg5e64.c | 24 +- auto-generated/gnu-api-tests/vsseg5e8.c | 64 +- auto-generated/gnu-api-tests/vsseg6e16.c | 72 +- auto-generated/gnu-api-tests/vsseg6e32.c | 48 +- auto-generated/gnu-api-tests/vsseg6e64.c | 24 +- auto-generated/gnu-api-tests/vsseg6e8.c | 64 +- auto-generated/gnu-api-tests/vsseg7e16.c | 72 +- auto-generated/gnu-api-tests/vsseg7e32.c | 48 +- auto-generated/gnu-api-tests/vsseg7e64.c | 24 +- auto-generated/gnu-api-tests/vsseg7e8.c | 64 +- auto-generated/gnu-api-tests/vsseg8e16.c | 72 +- auto-generated/gnu-api-tests/vsseg8e32.c | 48 +- auto-generated/gnu-api-tests/vsseg8e64.c | 24 +- auto-generated/gnu-api-tests/vsseg8e8.c | 64 +- auto-generated/gnu-api-tests/vssra.c | 352 +-- auto-generated/gnu-api-tests/vssrl.c | 352 +-- auto-generated/gnu-api-tests/vssseg2e16.c | 120 +- auto-generated/gnu-api-tests/vssseg2e32.c | 96 +- auto-generated/gnu-api-tests/vssseg2e64.c | 72 +- auto-generated/gnu-api-tests/vssseg2e8.c | 96 +- auto-generated/gnu-api-tests/vssseg3e16.c | 96 +- auto-generated/gnu-api-tests/vssseg3e32.c | 72 +- auto-generated/gnu-api-tests/vssseg3e64.c | 48 +- auto-generated/gnu-api-tests/vssseg3e8.c | 80 +- auto-generated/gnu-api-tests/vssseg4e16.c | 96 +- auto-generated/gnu-api-tests/vssseg4e32.c | 72 +- auto-generated/gnu-api-tests/vssseg4e64.c | 48 +- auto-generated/gnu-api-tests/vssseg4e8.c | 80 +- auto-generated/gnu-api-tests/vssseg5e16.c | 72 +- auto-generated/gnu-api-tests/vssseg5e32.c | 48 +- auto-generated/gnu-api-tests/vssseg5e64.c | 24 +- auto-generated/gnu-api-tests/vssseg5e8.c | 64 +- auto-generated/gnu-api-tests/vssseg6e16.c | 72 +- auto-generated/gnu-api-tests/vssseg6e32.c | 48 +- auto-generated/gnu-api-tests/vssseg6e64.c | 24 +- auto-generated/gnu-api-tests/vssseg6e8.c | 64 +- auto-generated/gnu-api-tests/vssseg7e16.c | 72 +- auto-generated/gnu-api-tests/vssseg7e32.c | 48 +- auto-generated/gnu-api-tests/vssseg7e64.c | 24 +- auto-generated/gnu-api-tests/vssseg7e8.c | 64 +- auto-generated/gnu-api-tests/vssseg8e16.c | 72 +- auto-generated/gnu-api-tests/vssseg8e32.c | 48 +- auto-generated/gnu-api-tests/vssseg8e64.c | 24 +- auto-generated/gnu-api-tests/vssseg8e8.c | 64 +- auto-generated/gnu-api-tests/vssub.c | 352 +-- auto-generated/gnu-api-tests/vssubu.c | 352 +-- auto-generated/gnu-api-tests/vsub.c | 704 +++--- auto-generated/gnu-api-tests/vsuxei16.c | 456 ++-- auto-generated/gnu-api-tests/vsuxei32.c | 416 ++-- auto-generated/gnu-api-tests/vsuxei64.c | 352 +-- auto-generated/gnu-api-tests/vsuxei8.c | 472 ++-- auto-generated/gnu-api-tests/vsuxseg2ei16.c | 384 ++-- auto-generated/gnu-api-tests/vsuxseg2ei32.c | 368 ++-- auto-generated/gnu-api-tests/vsuxseg2ei64.c | 328 +-- auto-generated/gnu-api-tests/vsuxseg2ei8.c | 384 ++-- auto-generated/gnu-api-tests/vsuxseg3ei16.c | 296 +-- auto-generated/gnu-api-tests/vsuxseg3ei32.c | 296 +-- auto-generated/gnu-api-tests/vsuxseg3ei64.c | 280 +-- auto-generated/gnu-api-tests/vsuxseg3ei8.c | 296 +-- auto-generated/gnu-api-tests/vsuxseg4ei16.c | 296 +-- auto-generated/gnu-api-tests/vsuxseg4ei32.c | 296 +-- auto-generated/gnu-api-tests/vsuxseg4ei64.c | 280 +-- auto-generated/gnu-api-tests/vsuxseg4ei8.c | 296 +-- auto-generated/gnu-api-tests/vsuxseg5ei16.c | 208 +- auto-generated/gnu-api-tests/vsuxseg5ei32.c | 208 +- auto-generated/gnu-api-tests/vsuxseg5ei64.c | 208 +- auto-generated/gnu-api-tests/vsuxseg5ei8.c | 208 +- auto-generated/gnu-api-tests/vsuxseg6ei16.c | 208 +- auto-generated/gnu-api-tests/vsuxseg6ei32.c | 208 +- auto-generated/gnu-api-tests/vsuxseg6ei64.c | 208 +- auto-generated/gnu-api-tests/vsuxseg6ei8.c | 208 +- auto-generated/gnu-api-tests/vsuxseg7ei16.c | 208 +- auto-generated/gnu-api-tests/vsuxseg7ei32.c | 208 +- auto-generated/gnu-api-tests/vsuxseg7ei64.c | 208 +- auto-generated/gnu-api-tests/vsuxseg7ei8.c | 208 +- auto-generated/gnu-api-tests/vsuxseg8ei16.c | 208 +- auto-generated/gnu-api-tests/vsuxseg8ei32.c | 208 +- auto-generated/gnu-api-tests/vsuxseg8ei64.c | 208 +- auto-generated/gnu-api-tests/vsuxseg8ei8.c | 208 +- auto-generated/gnu-api-tests/vwadd.c | 480 ++--- auto-generated/gnu-api-tests/vwaddu.c | 480 ++--- auto-generated/gnu-api-tests/vwcvt.c | 120 +- auto-generated/gnu-api-tests/vwcvtu.c | 120 +- auto-generated/gnu-api-tests/vwmacc.c | 120 +- auto-generated/gnu-api-tests/vwmaccsu.c | 120 +- auto-generated/gnu-api-tests/vwmaccu.c | 120 +- auto-generated/gnu-api-tests/vwmaccus.c | 60 +- auto-generated/gnu-api-tests/vwmul.c | 240 +-- auto-generated/gnu-api-tests/vwmulsu.c | 240 +-- auto-generated/gnu-api-tests/vwmulu.c | 240 +-- auto-generated/gnu-api-tests/vwredsum.c | 144 +- auto-generated/gnu-api-tests/vwredsumu.c | 144 +- auto-generated/gnu-api-tests/vwsub.c | 480 ++--- auto-generated/gnu-api-tests/vwsubu.c | 480 ++--- auto-generated/gnu-api-tests/vxor.c | 704 +++--- auto-generated/gnu-api-tests/vzext_vf2.c | 120 +- auto-generated/gnu-api-tests/vzext_vf4.c | 72 +- auto-generated/gnu-api-tests/vzext_vf8.c | 32 +- auto-generated/gnu-overloaded-tests/vaadd.c | 352 +-- auto-generated/gnu-overloaded-tests/vaaddu.c | 352 +-- auto-generated/gnu-overloaded-tests/vadc.c | 352 +-- auto-generated/gnu-overloaded-tests/vadd.c | 704 +++--- auto-generated/gnu-overloaded-tests/vand.c | 704 +++--- auto-generated/gnu-overloaded-tests/vasub.c | 352 +-- auto-generated/gnu-overloaded-tests/vasubu.c | 352 +-- .../gnu-overloaded-tests/vcompress.c | 236 +- auto-generated/gnu-overloaded-tests/vcpop.c | 56 +- auto-generated/gnu-overloaded-tests/vdiv.c | 352 +-- auto-generated/gnu-overloaded-tests/vdivu.c | 352 +-- auto-generated/gnu-overloaded-tests/vfabs.c | 120 +- auto-generated/gnu-overloaded-tests/vfadd.c | 480 ++--- auto-generated/gnu-overloaded-tests/vfclass.c | 120 +- auto-generated/gnu-overloaded-tests/vfcvt.c | 960 ++++----- .../gnu-overloaded-tests/vfcvt_rtz.c | 240 +-- auto-generated/gnu-overloaded-tests/vfdiv.c | 480 ++--- auto-generated/gnu-overloaded-tests/vfirst.c | 56 +- auto-generated/gnu-overloaded-tests/vfmacc.c | 240 +-- auto-generated/gnu-overloaded-tests/vfmadd.c | 240 +-- auto-generated/gnu-overloaded-tests/vfmax.c | 240 +-- auto-generated/gnu-overloaded-tests/vfmerge.c | 60 +- auto-generated/gnu-overloaded-tests/vfmin.c | 240 +-- auto-generated/gnu-overloaded-tests/vfmsac.c | 240 +-- auto-generated/gnu-overloaded-tests/vfmsub.c | 240 +-- auto-generated/gnu-overloaded-tests/vfmul.c | 480 ++--- auto-generated/gnu-overloaded-tests/vfmv.c | 60 +- auto-generated/gnu-overloaded-tests/vfncvt.c | 912 ++++---- .../gnu-overloaded-tests/vfncvt_rod.c | 72 +- .../gnu-overloaded-tests/vfncvt_rtz.c | 240 +-- auto-generated/gnu-overloaded-tests/vfneg.c | 120 +- auto-generated/gnu-overloaded-tests/vfnmacc.c | 240 +-- auto-generated/gnu-overloaded-tests/vfnmadd.c | 240 +-- auto-generated/gnu-overloaded-tests/vfnmsac.c | 240 +-- auto-generated/gnu-overloaded-tests/vfnmsub.c | 240 +-- auto-generated/gnu-overloaded-tests/vfrdiv.c | 240 +-- auto-generated/gnu-overloaded-tests/vfrec7.c | 240 +-- .../gnu-overloaded-tests/vfredmax.c | 120 +- .../gnu-overloaded-tests/vfredmin.c | 120 +- .../gnu-overloaded-tests/vfredosum.c | 240 +-- .../gnu-overloaded-tests/vfredusum.c | 240 +-- .../gnu-overloaded-tests/vfrsqrt7.c | 120 +- auto-generated/gnu-overloaded-tests/vfrsub.c | 240 +-- auto-generated/gnu-overloaded-tests/vfsgnj.c | 240 +-- auto-generated/gnu-overloaded-tests/vfsgnjn.c | 240 +-- auto-generated/gnu-overloaded-tests/vfsgnjx.c | 240 +-- .../gnu-overloaded-tests/vfslide1down.c | 120 +- .../gnu-overloaded-tests/vfslide1up.c | 120 +- auto-generated/gnu-overloaded-tests/vfsqrt.c | 240 +-- auto-generated/gnu-overloaded-tests/vfsub.c | 480 ++--- auto-generated/gnu-overloaded-tests/vfwadd.c | 576 ++--- auto-generated/gnu-overloaded-tests/vfwcvt.c | 600 +++--- .../gnu-overloaded-tests/vfwcvt_rtz.c | 144 +- auto-generated/gnu-overloaded-tests/vfwmacc.c | 144 +- auto-generated/gnu-overloaded-tests/vfwmsac.c | 144 +- auto-generated/gnu-overloaded-tests/vfwmul.c | 288 +-- .../gnu-overloaded-tests/vfwnmacc.c | 144 +- .../gnu-overloaded-tests/vfwnmsac.c | 144 +- .../gnu-overloaded-tests/vfwredosum.c | 176 +- .../gnu-overloaded-tests/vfwredusum.c | 176 +- auto-generated/gnu-overloaded-tests/vfwsub.c | 576 ++--- auto-generated/gnu-overloaded-tests/vle16.c | 72 +- auto-generated/gnu-overloaded-tests/vle16ff.c | 72 +- auto-generated/gnu-overloaded-tests/vle32.c | 60 +- auto-generated/gnu-overloaded-tests/vle32ff.c | 60 +- auto-generated/gnu-overloaded-tests/vle64.c | 48 +- auto-generated/gnu-overloaded-tests/vle64ff.c | 48 +- auto-generated/gnu-overloaded-tests/vle8.c | 56 +- auto-generated/gnu-overloaded-tests/vle8ff.c | 56 +- .../gnu-overloaded-tests/vlmul_ext_v.c | 540 ++--- .../gnu-overloaded-tests/vlmul_trunc_v.c | 540 ++--- .../gnu-overloaded-tests/vloxei16.c | 456 ++-- .../gnu-overloaded-tests/vloxei32.c | 416 ++-- .../gnu-overloaded-tests/vloxei64.c | 352 +-- auto-generated/gnu-overloaded-tests/vloxei8.c | 472 ++-- .../gnu-overloaded-tests/vloxseg2ei16.c | 384 ++-- .../gnu-overloaded-tests/vloxseg2ei32.c | 368 ++-- .../gnu-overloaded-tests/vloxseg2ei64.c | 328 +-- .../gnu-overloaded-tests/vloxseg2ei8.c | 384 ++-- .../gnu-overloaded-tests/vloxseg3ei16.c | 296 +-- .../gnu-overloaded-tests/vloxseg3ei32.c | 296 +-- .../gnu-overloaded-tests/vloxseg3ei64.c | 280 +-- .../gnu-overloaded-tests/vloxseg3ei8.c | 296 +-- .../gnu-overloaded-tests/vloxseg4ei16.c | 296 +-- .../gnu-overloaded-tests/vloxseg4ei32.c | 296 +-- .../gnu-overloaded-tests/vloxseg4ei64.c | 280 +-- .../gnu-overloaded-tests/vloxseg4ei8.c | 296 +-- .../gnu-overloaded-tests/vloxseg5ei16.c | 208 +- .../gnu-overloaded-tests/vloxseg5ei32.c | 208 +- .../gnu-overloaded-tests/vloxseg5ei64.c | 208 +- .../gnu-overloaded-tests/vloxseg5ei8.c | 208 +- .../gnu-overloaded-tests/vloxseg6ei16.c | 208 +- .../gnu-overloaded-tests/vloxseg6ei32.c | 208 +- .../gnu-overloaded-tests/vloxseg6ei64.c | 208 +- .../gnu-overloaded-tests/vloxseg6ei8.c | 208 +- .../gnu-overloaded-tests/vloxseg7ei16.c | 208 +- .../gnu-overloaded-tests/vloxseg7ei32.c | 208 +- .../gnu-overloaded-tests/vloxseg7ei64.c | 208 +- .../gnu-overloaded-tests/vloxseg7ei8.c | 208 +- .../gnu-overloaded-tests/vloxseg8ei16.c | 208 +- .../gnu-overloaded-tests/vloxseg8ei32.c | 208 +- .../gnu-overloaded-tests/vloxseg8ei64.c | 208 +- .../gnu-overloaded-tests/vloxseg8ei8.c | 208 +- auto-generated/gnu-overloaded-tests/vlse16.c | 72 +- auto-generated/gnu-overloaded-tests/vlse32.c | 60 +- auto-generated/gnu-overloaded-tests/vlse64.c | 48 +- auto-generated/gnu-overloaded-tests/vlse8.c | 56 +- .../gnu-overloaded-tests/vlseg2e16.c | 60 +- .../gnu-overloaded-tests/vlseg2e16ff.c | 60 +- .../gnu-overloaded-tests/vlseg2e32.c | 48 +- .../gnu-overloaded-tests/vlseg2e32ff.c | 48 +- .../gnu-overloaded-tests/vlseg2e64.c | 36 +- .../gnu-overloaded-tests/vlseg2e64ff.c | 36 +- .../gnu-overloaded-tests/vlseg2e8.c | 48 +- .../gnu-overloaded-tests/vlseg2e8ff.c | 48 +- .../gnu-overloaded-tests/vlseg3e16.c | 48 +- .../gnu-overloaded-tests/vlseg3e16ff.c | 48 +- .../gnu-overloaded-tests/vlseg3e32.c | 36 +- .../gnu-overloaded-tests/vlseg3e32ff.c | 36 +- .../gnu-overloaded-tests/vlseg3e64.c | 24 +- .../gnu-overloaded-tests/vlseg3e64ff.c | 24 +- .../gnu-overloaded-tests/vlseg3e8.c | 40 +- .../gnu-overloaded-tests/vlseg3e8ff.c | 40 +- .../gnu-overloaded-tests/vlseg4e16.c | 48 +- .../gnu-overloaded-tests/vlseg4e16ff.c | 48 +- .../gnu-overloaded-tests/vlseg4e32.c | 36 +- .../gnu-overloaded-tests/vlseg4e32ff.c | 36 +- .../gnu-overloaded-tests/vlseg4e64.c | 24 +- .../gnu-overloaded-tests/vlseg4e64ff.c | 24 +- .../gnu-overloaded-tests/vlseg4e8.c | 40 +- .../gnu-overloaded-tests/vlseg4e8ff.c | 40 +- .../gnu-overloaded-tests/vlseg5e16.c | 36 +- .../gnu-overloaded-tests/vlseg5e16ff.c | 36 +- .../gnu-overloaded-tests/vlseg5e32.c | 24 +- .../gnu-overloaded-tests/vlseg5e32ff.c | 24 +- .../gnu-overloaded-tests/vlseg5e64.c | 12 +- .../gnu-overloaded-tests/vlseg5e64ff.c | 12 +- .../gnu-overloaded-tests/vlseg5e8.c | 32 +- .../gnu-overloaded-tests/vlseg5e8ff.c | 32 +- .../gnu-overloaded-tests/vlseg6e16.c | 36 +- .../gnu-overloaded-tests/vlseg6e16ff.c | 36 +- .../gnu-overloaded-tests/vlseg6e32.c | 24 +- .../gnu-overloaded-tests/vlseg6e32ff.c | 24 +- .../gnu-overloaded-tests/vlseg6e64.c | 12 +- .../gnu-overloaded-tests/vlseg6e64ff.c | 12 +- .../gnu-overloaded-tests/vlseg6e8.c | 32 +- .../gnu-overloaded-tests/vlseg6e8ff.c | 32 +- .../gnu-overloaded-tests/vlseg7e16.c | 36 +- .../gnu-overloaded-tests/vlseg7e16ff.c | 36 +- .../gnu-overloaded-tests/vlseg7e32.c | 24 +- .../gnu-overloaded-tests/vlseg7e32ff.c | 24 +- .../gnu-overloaded-tests/vlseg7e64.c | 12 +- .../gnu-overloaded-tests/vlseg7e64ff.c | 12 +- .../gnu-overloaded-tests/vlseg7e8.c | 32 +- .../gnu-overloaded-tests/vlseg7e8ff.c | 32 +- .../gnu-overloaded-tests/vlseg8e16.c | 36 +- .../gnu-overloaded-tests/vlseg8e16ff.c | 36 +- .../gnu-overloaded-tests/vlseg8e32.c | 24 +- .../gnu-overloaded-tests/vlseg8e32ff.c | 24 +- .../gnu-overloaded-tests/vlseg8e64.c | 12 +- .../gnu-overloaded-tests/vlseg8e64ff.c | 12 +- .../gnu-overloaded-tests/vlseg8e8.c | 32 +- .../gnu-overloaded-tests/vlseg8e8ff.c | 32 +- .../gnu-overloaded-tests/vlsseg2e16.c | 60 +- .../gnu-overloaded-tests/vlsseg2e32.c | 48 +- .../gnu-overloaded-tests/vlsseg2e64.c | 36 +- .../gnu-overloaded-tests/vlsseg2e8.c | 48 +- .../gnu-overloaded-tests/vlsseg3e16.c | 48 +- .../gnu-overloaded-tests/vlsseg3e32.c | 36 +- .../gnu-overloaded-tests/vlsseg3e64.c | 24 +- .../gnu-overloaded-tests/vlsseg3e8.c | 40 +- .../gnu-overloaded-tests/vlsseg4e16.c | 48 +- .../gnu-overloaded-tests/vlsseg4e32.c | 36 +- .../gnu-overloaded-tests/vlsseg4e64.c | 24 +- .../gnu-overloaded-tests/vlsseg4e8.c | 40 +- .../gnu-overloaded-tests/vlsseg5e16.c | 36 +- .../gnu-overloaded-tests/vlsseg5e32.c | 24 +- .../gnu-overloaded-tests/vlsseg5e64.c | 12 +- .../gnu-overloaded-tests/vlsseg5e8.c | 32 +- .../gnu-overloaded-tests/vlsseg6e16.c | 36 +- .../gnu-overloaded-tests/vlsseg6e32.c | 24 +- .../gnu-overloaded-tests/vlsseg6e64.c | 12 +- .../gnu-overloaded-tests/vlsseg6e8.c | 32 +- .../gnu-overloaded-tests/vlsseg7e16.c | 36 +- .../gnu-overloaded-tests/vlsseg7e32.c | 24 +- .../gnu-overloaded-tests/vlsseg7e64.c | 12 +- .../gnu-overloaded-tests/vlsseg7e8.c | 32 +- .../gnu-overloaded-tests/vlsseg8e16.c | 36 +- .../gnu-overloaded-tests/vlsseg8e32.c | 24 +- .../gnu-overloaded-tests/vlsseg8e64.c | 12 +- .../gnu-overloaded-tests/vlsseg8e8.c | 32 +- .../gnu-overloaded-tests/vluxei16.c | 456 ++-- .../gnu-overloaded-tests/vluxei32.c | 416 ++-- .../gnu-overloaded-tests/vluxei64.c | 352 +-- auto-generated/gnu-overloaded-tests/vluxei8.c | 472 ++-- .../gnu-overloaded-tests/vluxseg2ei16.c | 384 ++-- .../gnu-overloaded-tests/vluxseg2ei32.c | 368 ++-- .../gnu-overloaded-tests/vluxseg2ei64.c | 328 +-- .../gnu-overloaded-tests/vluxseg2ei8.c | 384 ++-- .../gnu-overloaded-tests/vluxseg3ei16.c | 296 +-- .../gnu-overloaded-tests/vluxseg3ei32.c | 296 +-- .../gnu-overloaded-tests/vluxseg3ei64.c | 280 +-- .../gnu-overloaded-tests/vluxseg3ei8.c | 296 +-- .../gnu-overloaded-tests/vluxseg4ei16.c | 296 +-- .../gnu-overloaded-tests/vluxseg4ei32.c | 296 +-- .../gnu-overloaded-tests/vluxseg4ei64.c | 280 +-- .../gnu-overloaded-tests/vluxseg4ei8.c | 296 +-- .../gnu-overloaded-tests/vluxseg5ei16.c | 208 +- .../gnu-overloaded-tests/vluxseg5ei32.c | 208 +- .../gnu-overloaded-tests/vluxseg5ei64.c | 208 +- .../gnu-overloaded-tests/vluxseg5ei8.c | 208 +- .../gnu-overloaded-tests/vluxseg6ei16.c | 208 +- .../gnu-overloaded-tests/vluxseg6ei32.c | 208 +- .../gnu-overloaded-tests/vluxseg6ei64.c | 208 +- .../gnu-overloaded-tests/vluxseg6ei8.c | 208 +- .../gnu-overloaded-tests/vluxseg7ei16.c | 208 +- .../gnu-overloaded-tests/vluxseg7ei32.c | 208 +- .../gnu-overloaded-tests/vluxseg7ei64.c | 208 +- .../gnu-overloaded-tests/vluxseg7ei8.c | 208 +- .../gnu-overloaded-tests/vluxseg8ei16.c | 208 +- .../gnu-overloaded-tests/vluxseg8ei32.c | 208 +- .../gnu-overloaded-tests/vluxseg8ei64.c | 208 +- .../gnu-overloaded-tests/vluxseg8ei8.c | 208 +- auto-generated/gnu-overloaded-tests/vmacc.c | 352 +-- auto-generated/gnu-overloaded-tests/vmadc.c | 704 +++--- auto-generated/gnu-overloaded-tests/vmadd.c | 352 +-- auto-generated/gnu-overloaded-tests/vmand.c | 28 +- auto-generated/gnu-overloaded-tests/vmandn.c | 28 +- auto-generated/gnu-overloaded-tests/vmax.c | 352 +-- auto-generated/gnu-overloaded-tests/vmaxu.c | 352 +-- auto-generated/gnu-overloaded-tests/vmerge.c | 412 ++-- auto-generated/gnu-overloaded-tests/vmfeq.c | 240 +-- auto-generated/gnu-overloaded-tests/vmfge.c | 240 +-- auto-generated/gnu-overloaded-tests/vmfgt.c | 240 +-- auto-generated/gnu-overloaded-tests/vmfle.c | 240 +-- auto-generated/gnu-overloaded-tests/vmflt.c | 240 +-- auto-generated/gnu-overloaded-tests/vmfne.c | 240 +-- auto-generated/gnu-overloaded-tests/vmin.c | 352 +-- auto-generated/gnu-overloaded-tests/vminu.c | 352 +-- auto-generated/gnu-overloaded-tests/vmmv.c | 28 +- auto-generated/gnu-overloaded-tests/vmnand.c | 28 +- auto-generated/gnu-overloaded-tests/vmnor.c | 28 +- auto-generated/gnu-overloaded-tests/vmnot.c | 28 +- auto-generated/gnu-overloaded-tests/vmor.c | 28 +- auto-generated/gnu-overloaded-tests/vmorn.c | 28 +- auto-generated/gnu-overloaded-tests/vmsbc.c | 704 +++--- auto-generated/gnu-overloaded-tests/vmsbf.c | 56 +- auto-generated/gnu-overloaded-tests/vmseq.c | 704 +++--- auto-generated/gnu-overloaded-tests/vmsge.c | 352 +-- auto-generated/gnu-overloaded-tests/vmsgeu.c | 352 +-- auto-generated/gnu-overloaded-tests/vmsgt.c | 352 +-- auto-generated/gnu-overloaded-tests/vmsgtu.c | 352 +-- auto-generated/gnu-overloaded-tests/vmsif.c | 56 +- auto-generated/gnu-overloaded-tests/vmsle.c | 352 +-- auto-generated/gnu-overloaded-tests/vmsleu.c | 352 +-- auto-generated/gnu-overloaded-tests/vmslt.c | 352 +-- auto-generated/gnu-overloaded-tests/vmsltu.c | 352 +-- auto-generated/gnu-overloaded-tests/vmsne.c | 704 +++--- auto-generated/gnu-overloaded-tests/vmsof.c | 56 +- auto-generated/gnu-overloaded-tests/vmul.c | 704 +++--- auto-generated/gnu-overloaded-tests/vmulh.c | 352 +-- auto-generated/gnu-overloaded-tests/vmulhsu.c | 352 +-- auto-generated/gnu-overloaded-tests/vmulhu.c | 352 +-- auto-generated/gnu-overloaded-tests/vmv.c | 412 ++-- auto-generated/gnu-overloaded-tests/vmxnor.c | 28 +- auto-generated/gnu-overloaded-tests/vmxor.c | 28 +- auto-generated/gnu-overloaded-tests/vnclip.c | 240 +-- auto-generated/gnu-overloaded-tests/vnclipu.c | 240 +-- auto-generated/gnu-overloaded-tests/vncvt.c | 240 +-- auto-generated/gnu-overloaded-tests/vneg.c | 176 +- auto-generated/gnu-overloaded-tests/vnmsac.c | 352 +-- auto-generated/gnu-overloaded-tests/vnmsub.c | 352 +-- auto-generated/gnu-overloaded-tests/vnot.c | 352 +-- auto-generated/gnu-overloaded-tests/vnsra.c | 240 +-- auto-generated/gnu-overloaded-tests/vnsrl.c | 240 +-- auto-generated/gnu-overloaded-tests/vor.c | 704 +++--- auto-generated/gnu-overloaded-tests/vredand.c | 352 +-- auto-generated/gnu-overloaded-tests/vredmax.c | 176 +- .../gnu-overloaded-tests/vredmaxu.c | 176 +- auto-generated/gnu-overloaded-tests/vredmin.c | 176 +- .../gnu-overloaded-tests/vredminu.c | 176 +- auto-generated/gnu-overloaded-tests/vredor.c | 352 +-- auto-generated/gnu-overloaded-tests/vredsum.c | 352 +-- auto-generated/gnu-overloaded-tests/vredxor.c | 352 +-- auto-generated/gnu-overloaded-tests/vrem.c | 352 +-- auto-generated/gnu-overloaded-tests/vremu.c | 352 +-- .../gnu-overloaded-tests/vrgather.c | 944 ++++---- .../gnu-overloaded-tests/vrgatherei16.c | 456 ++-- auto-generated/gnu-overloaded-tests/vrsub.c | 352 +-- auto-generated/gnu-overloaded-tests/vsadd.c | 352 +-- auto-generated/gnu-overloaded-tests/vsaddu.c | 352 +-- auto-generated/gnu-overloaded-tests/vsbc.c | 352 +-- auto-generated/gnu-overloaded-tests/vse16.c | 144 +- auto-generated/gnu-overloaded-tests/vse32.c | 120 +- auto-generated/gnu-overloaded-tests/vse64.c | 96 +- auto-generated/gnu-overloaded-tests/vse8.c | 112 +- auto-generated/gnu-overloaded-tests/vset.c | 1168 +++++----- .../gnu-overloaded-tests/vsext_vf2.c | 120 +- .../gnu-overloaded-tests/vsext_vf4.c | 72 +- .../gnu-overloaded-tests/vsext_vf8.c | 32 +- .../gnu-overloaded-tests/vslide1down.c | 352 +-- .../gnu-overloaded-tests/vslide1up.c | 352 +-- .../gnu-overloaded-tests/vslidedown.c | 472 ++-- .../gnu-overloaded-tests/vslideup.c | 472 ++-- auto-generated/gnu-overloaded-tests/vsll.c | 704 +++--- auto-generated/gnu-overloaded-tests/vsm.c | 28 +- auto-generated/gnu-overloaded-tests/vsmul.c | 352 +-- .../gnu-overloaded-tests/vsoxei16.c | 456 ++-- .../gnu-overloaded-tests/vsoxei32.c | 416 ++-- .../gnu-overloaded-tests/vsoxei64.c | 352 +-- auto-generated/gnu-overloaded-tests/vsoxei8.c | 472 ++-- .../gnu-overloaded-tests/vsoxseg2ei16.c | 384 ++-- .../gnu-overloaded-tests/vsoxseg2ei32.c | 368 ++-- .../gnu-overloaded-tests/vsoxseg2ei64.c | 328 +-- .../gnu-overloaded-tests/vsoxseg2ei8.c | 384 ++-- .../gnu-overloaded-tests/vsoxseg3ei16.c | 296 +-- .../gnu-overloaded-tests/vsoxseg3ei32.c | 296 +-- .../gnu-overloaded-tests/vsoxseg3ei64.c | 280 +-- .../gnu-overloaded-tests/vsoxseg3ei8.c | 296 +-- .../gnu-overloaded-tests/vsoxseg4ei16.c | 296 +-- .../gnu-overloaded-tests/vsoxseg4ei32.c | 296 +-- .../gnu-overloaded-tests/vsoxseg4ei64.c | 280 +-- .../gnu-overloaded-tests/vsoxseg4ei8.c | 296 +-- .../gnu-overloaded-tests/vsoxseg5ei16.c | 208 +- .../gnu-overloaded-tests/vsoxseg5ei32.c | 208 +- .../gnu-overloaded-tests/vsoxseg5ei64.c | 208 +- .../gnu-overloaded-tests/vsoxseg5ei8.c | 208 +- .../gnu-overloaded-tests/vsoxseg6ei16.c | 208 +- .../gnu-overloaded-tests/vsoxseg6ei32.c | 208 +- .../gnu-overloaded-tests/vsoxseg6ei64.c | 208 +- .../gnu-overloaded-tests/vsoxseg6ei8.c | 208 +- .../gnu-overloaded-tests/vsoxseg7ei16.c | 208 +- .../gnu-overloaded-tests/vsoxseg7ei32.c | 208 +- .../gnu-overloaded-tests/vsoxseg7ei64.c | 208 +- .../gnu-overloaded-tests/vsoxseg7ei8.c | 208 +- .../gnu-overloaded-tests/vsoxseg8ei16.c | 208 +- .../gnu-overloaded-tests/vsoxseg8ei32.c | 208 +- .../gnu-overloaded-tests/vsoxseg8ei64.c | 208 +- .../gnu-overloaded-tests/vsoxseg8ei8.c | 208 +- auto-generated/gnu-overloaded-tests/vsra.c | 352 +-- auto-generated/gnu-overloaded-tests/vsrl.c | 352 +-- auto-generated/gnu-overloaded-tests/vsse16.c | 144 +- auto-generated/gnu-overloaded-tests/vsse32.c | 120 +- auto-generated/gnu-overloaded-tests/vsse64.c | 96 +- auto-generated/gnu-overloaded-tests/vsse8.c | 112 +- .../gnu-overloaded-tests/vsseg2e16.c | 120 +- .../gnu-overloaded-tests/vsseg2e32.c | 96 +- .../gnu-overloaded-tests/vsseg2e64.c | 72 +- .../gnu-overloaded-tests/vsseg2e8.c | 96 +- .../gnu-overloaded-tests/vsseg3e16.c | 96 +- .../gnu-overloaded-tests/vsseg3e32.c | 72 +- .../gnu-overloaded-tests/vsseg3e64.c | 48 +- .../gnu-overloaded-tests/vsseg3e8.c | 80 +- .../gnu-overloaded-tests/vsseg4e16.c | 96 +- .../gnu-overloaded-tests/vsseg4e32.c | 72 +- .../gnu-overloaded-tests/vsseg4e64.c | 48 +- .../gnu-overloaded-tests/vsseg4e8.c | 80 +- .../gnu-overloaded-tests/vsseg5e16.c | 72 +- .../gnu-overloaded-tests/vsseg5e32.c | 48 +- .../gnu-overloaded-tests/vsseg5e64.c | 24 +- .../gnu-overloaded-tests/vsseg5e8.c | 64 +- .../gnu-overloaded-tests/vsseg6e16.c | 72 +- .../gnu-overloaded-tests/vsseg6e32.c | 48 +- .../gnu-overloaded-tests/vsseg6e64.c | 24 +- .../gnu-overloaded-tests/vsseg6e8.c | 64 +- .../gnu-overloaded-tests/vsseg7e16.c | 72 +- .../gnu-overloaded-tests/vsseg7e32.c | 48 +- .../gnu-overloaded-tests/vsseg7e64.c | 24 +- .../gnu-overloaded-tests/vsseg7e8.c | 64 +- .../gnu-overloaded-tests/vsseg8e16.c | 72 +- .../gnu-overloaded-tests/vsseg8e32.c | 48 +- .../gnu-overloaded-tests/vsseg8e64.c | 24 +- .../gnu-overloaded-tests/vsseg8e8.c | 64 +- auto-generated/gnu-overloaded-tests/vssra.c | 352 +-- auto-generated/gnu-overloaded-tests/vssrl.c | 352 +-- .../gnu-overloaded-tests/vssseg2e16.c | 120 +- .../gnu-overloaded-tests/vssseg2e32.c | 96 +- .../gnu-overloaded-tests/vssseg2e64.c | 72 +- .../gnu-overloaded-tests/vssseg2e8.c | 96 +- .../gnu-overloaded-tests/vssseg3e16.c | 96 +- .../gnu-overloaded-tests/vssseg3e32.c | 72 +- .../gnu-overloaded-tests/vssseg3e64.c | 48 +- .../gnu-overloaded-tests/vssseg3e8.c | 80 +- .../gnu-overloaded-tests/vssseg4e16.c | 96 +- .../gnu-overloaded-tests/vssseg4e32.c | 72 +- .../gnu-overloaded-tests/vssseg4e64.c | 48 +- .../gnu-overloaded-tests/vssseg4e8.c | 80 +- .../gnu-overloaded-tests/vssseg5e16.c | 72 +- .../gnu-overloaded-tests/vssseg5e32.c | 48 +- .../gnu-overloaded-tests/vssseg5e64.c | 24 +- .../gnu-overloaded-tests/vssseg5e8.c | 64 +- .../gnu-overloaded-tests/vssseg6e16.c | 72 +- .../gnu-overloaded-tests/vssseg6e32.c | 48 +- .../gnu-overloaded-tests/vssseg6e64.c | 24 +- .../gnu-overloaded-tests/vssseg6e8.c | 64 +- .../gnu-overloaded-tests/vssseg7e16.c | 72 +- .../gnu-overloaded-tests/vssseg7e32.c | 48 +- .../gnu-overloaded-tests/vssseg7e64.c | 24 +- .../gnu-overloaded-tests/vssseg7e8.c | 64 +- .../gnu-overloaded-tests/vssseg8e16.c | 72 +- .../gnu-overloaded-tests/vssseg8e32.c | 48 +- .../gnu-overloaded-tests/vssseg8e64.c | 24 +- .../gnu-overloaded-tests/vssseg8e8.c | 64 +- auto-generated/gnu-overloaded-tests/vssub.c | 352 +-- auto-generated/gnu-overloaded-tests/vssubu.c | 352 +-- auto-generated/gnu-overloaded-tests/vsub.c | 704 +++--- .../gnu-overloaded-tests/vsuxei16.c | 456 ++-- .../gnu-overloaded-tests/vsuxei32.c | 416 ++-- .../gnu-overloaded-tests/vsuxei64.c | 352 +-- auto-generated/gnu-overloaded-tests/vsuxei8.c | 472 ++-- .../gnu-overloaded-tests/vsuxseg2ei16.c | 384 ++-- .../gnu-overloaded-tests/vsuxseg2ei32.c | 368 ++-- .../gnu-overloaded-tests/vsuxseg2ei64.c | 328 +-- .../gnu-overloaded-tests/vsuxseg2ei8.c | 384 ++-- .../gnu-overloaded-tests/vsuxseg3ei16.c | 296 +-- .../gnu-overloaded-tests/vsuxseg3ei32.c | 296 +-- .../gnu-overloaded-tests/vsuxseg3ei64.c | 280 +-- .../gnu-overloaded-tests/vsuxseg3ei8.c | 296 +-- .../gnu-overloaded-tests/vsuxseg4ei16.c | 296 +-- .../gnu-overloaded-tests/vsuxseg4ei32.c | 296 +-- .../gnu-overloaded-tests/vsuxseg4ei64.c | 280 +-- .../gnu-overloaded-tests/vsuxseg4ei8.c | 296 +-- .../gnu-overloaded-tests/vsuxseg5ei16.c | 208 +- .../gnu-overloaded-tests/vsuxseg5ei32.c | 208 +- .../gnu-overloaded-tests/vsuxseg5ei64.c | 208 +- .../gnu-overloaded-tests/vsuxseg5ei8.c | 208 +- .../gnu-overloaded-tests/vsuxseg6ei16.c | 208 +- .../gnu-overloaded-tests/vsuxseg6ei32.c | 208 +- .../gnu-overloaded-tests/vsuxseg6ei64.c | 208 +- .../gnu-overloaded-tests/vsuxseg6ei8.c | 208 +- .../gnu-overloaded-tests/vsuxseg7ei16.c | 208 +- .../gnu-overloaded-tests/vsuxseg7ei32.c | 208 +- .../gnu-overloaded-tests/vsuxseg7ei64.c | 208 +- .../gnu-overloaded-tests/vsuxseg7ei8.c | 208 +- .../gnu-overloaded-tests/vsuxseg8ei16.c | 208 +- .../gnu-overloaded-tests/vsuxseg8ei32.c | 208 +- .../gnu-overloaded-tests/vsuxseg8ei64.c | 208 +- .../gnu-overloaded-tests/vsuxseg8ei8.c | 208 +- auto-generated/gnu-overloaded-tests/vwadd.c | 480 ++--- auto-generated/gnu-overloaded-tests/vwaddu.c | 480 ++--- auto-generated/gnu-overloaded-tests/vwcvt.c | 120 +- auto-generated/gnu-overloaded-tests/vwcvtu.c | 120 +- auto-generated/gnu-overloaded-tests/vwmacc.c | 120 +- .../gnu-overloaded-tests/vwmaccsu.c | 120 +- auto-generated/gnu-overloaded-tests/vwmaccu.c | 120 +- .../gnu-overloaded-tests/vwmaccus.c | 60 +- auto-generated/gnu-overloaded-tests/vwmul.c | 240 +-- auto-generated/gnu-overloaded-tests/vwmulsu.c | 240 +-- auto-generated/gnu-overloaded-tests/vwmulu.c | 240 +-- .../gnu-overloaded-tests/vwredsum.c | 144 +- .../gnu-overloaded-tests/vwredsumu.c | 144 +- auto-generated/gnu-overloaded-tests/vwsub.c | 480 ++--- auto-generated/gnu-overloaded-tests/vwsubu.c | 480 ++--- auto-generated/gnu-overloaded-tests/vxor.c | 704 +++--- .../gnu-overloaded-tests/vzext_vf2.c | 120 +- .../gnu-overloaded-tests/vzext_vf4.c | 72 +- .../gnu-overloaded-tests/vzext_vf8.c | 32 +- .../policy_funcs/gnu-api-tests/vaadd.c | 704 +++--- .../policy_funcs/gnu-api-tests/vaaddu.c | 704 +++--- .../policy_funcs/gnu-api-tests/vadc.c | 352 +-- .../policy_funcs/gnu-api-tests/vadd.c | 1408 ++++++------ .../policy_funcs/gnu-api-tests/vand.c | 1408 ++++++------ .../policy_funcs/gnu-api-tests/vasub.c | 704 +++--- .../policy_funcs/gnu-api-tests/vasubu.c | 704 +++--- .../policy_funcs/gnu-api-tests/vcompress.c | 236 +- .../policy_funcs/gnu-api-tests/vdiv.c | 704 +++--- .../policy_funcs/gnu-api-tests/vdivu.c | 704 +++--- .../policy_funcs/gnu-api-tests/vfabs.c | 240 +-- .../policy_funcs/gnu-api-tests/vfadd.c | 960 ++++----- .../policy_funcs/gnu-api-tests/vfclass.c | 240 +-- .../policy_funcs/gnu-api-tests/vfcvt.c | 1920 ++++++++--------- .../policy_funcs/gnu-api-tests/vfcvt_rtz.c | 480 ++--- .../policy_funcs/gnu-api-tests/vfdiv.c | 960 ++++----- .../policy_funcs/gnu-api-tests/vfmacc.c | 720 +++---- .../policy_funcs/gnu-api-tests/vfmadd.c | 720 +++---- .../policy_funcs/gnu-api-tests/vfmax.c | 480 ++--- .../policy_funcs/gnu-api-tests/vfmerge.c | 60 +- .../policy_funcs/gnu-api-tests/vfmin.c | 480 ++--- .../policy_funcs/gnu-api-tests/vfmsac.c | 720 +++---- .../policy_funcs/gnu-api-tests/vfmsub.c | 720 +++---- .../policy_funcs/gnu-api-tests/vfmul.c | 960 ++++----- .../policy_funcs/gnu-api-tests/vfmv.c | 120 +- .../policy_funcs/gnu-api-tests/vfncvt.c | 1824 ++++++++-------- .../policy_funcs/gnu-api-tests/vfncvt_rod.c | 144 +- .../policy_funcs/gnu-api-tests/vfncvt_rtz.c | 480 ++--- .../policy_funcs/gnu-api-tests/vfneg.c | 240 +-- .../policy_funcs/gnu-api-tests/vfnmacc.c | 720 +++---- .../policy_funcs/gnu-api-tests/vfnmadd.c | 720 +++---- .../policy_funcs/gnu-api-tests/vfnmsac.c | 720 +++---- .../policy_funcs/gnu-api-tests/vfnmsub.c | 720 +++---- .../policy_funcs/gnu-api-tests/vfrdiv.c | 480 ++--- .../policy_funcs/gnu-api-tests/vfrec7.c | 480 ++--- .../policy_funcs/gnu-api-tests/vfredmax.c | 120 +- .../policy_funcs/gnu-api-tests/vfredmin.c | 120 +- .../policy_funcs/gnu-api-tests/vfredosum.c | 240 +-- .../policy_funcs/gnu-api-tests/vfredusum.c | 240 +-- .../policy_funcs/gnu-api-tests/vfrsqrt7.c | 240 +-- .../policy_funcs/gnu-api-tests/vfrsub.c | 480 ++--- .../policy_funcs/gnu-api-tests/vfsgnj.c | 480 ++--- .../policy_funcs/gnu-api-tests/vfsgnjn.c | 480 ++--- .../policy_funcs/gnu-api-tests/vfsgnjx.c | 480 ++--- .../policy_funcs/gnu-api-tests/vfslide1down.c | 240 +-- .../policy_funcs/gnu-api-tests/vfslide1up.c | 240 +-- .../policy_funcs/gnu-api-tests/vfsqrt.c | 480 ++--- .../policy_funcs/gnu-api-tests/vfsub.c | 960 ++++----- .../policy_funcs/gnu-api-tests/vfwadd.c | 1152 +++++----- .../policy_funcs/gnu-api-tests/vfwcvt.c | 1200 +++++------ .../policy_funcs/gnu-api-tests/vfwcvt_rtz.c | 288 +-- .../policy_funcs/gnu-api-tests/vfwmacc.c | 432 ++-- .../policy_funcs/gnu-api-tests/vfwmsac.c | 432 ++-- .../policy_funcs/gnu-api-tests/vfwmul.c | 576 ++--- .../policy_funcs/gnu-api-tests/vfwnmacc.c | 432 ++-- .../policy_funcs/gnu-api-tests/vfwnmsac.c | 432 ++-- .../policy_funcs/gnu-api-tests/vfwredosum.c | 176 +- .../policy_funcs/gnu-api-tests/vfwredusum.c | 176 +- .../policy_funcs/gnu-api-tests/vfwsub.c | 1152 +++++----- .../policy_funcs/gnu-api-tests/vid.c | 352 +-- .../policy_funcs/gnu-api-tests/viota.c | 352 +-- .../policy_funcs/gnu-api-tests/vle16.c | 288 +-- .../policy_funcs/gnu-api-tests/vle16ff.c | 288 +-- .../policy_funcs/gnu-api-tests/vle32.c | 240 +-- .../policy_funcs/gnu-api-tests/vle32ff.c | 240 +-- .../policy_funcs/gnu-api-tests/vle64.c | 192 +- .../policy_funcs/gnu-api-tests/vle64ff.c | 192 +- .../policy_funcs/gnu-api-tests/vle8.c | 224 +- .../policy_funcs/gnu-api-tests/vle8ff.c | 224 +- .../policy_funcs/gnu-api-tests/vloxei16.c | 912 ++++---- .../policy_funcs/gnu-api-tests/vloxei32.c | 832 +++---- .../policy_funcs/gnu-api-tests/vloxei64.c | 704 +++--- .../policy_funcs/gnu-api-tests/vloxei8.c | 944 ++++---- .../policy_funcs/gnu-api-tests/vloxseg2ei16.c | 768 +++---- .../policy_funcs/gnu-api-tests/vloxseg2ei32.c | 736 +++---- .../policy_funcs/gnu-api-tests/vloxseg2ei64.c | 656 +++--- .../policy_funcs/gnu-api-tests/vloxseg2ei8.c | 768 +++---- .../policy_funcs/gnu-api-tests/vloxseg3ei16.c | 592 ++--- .../policy_funcs/gnu-api-tests/vloxseg3ei32.c | 592 ++--- .../policy_funcs/gnu-api-tests/vloxseg3ei64.c | 560 ++--- .../policy_funcs/gnu-api-tests/vloxseg3ei8.c | 592 ++--- .../policy_funcs/gnu-api-tests/vloxseg4ei16.c | 592 ++--- .../policy_funcs/gnu-api-tests/vloxseg4ei32.c | 592 ++--- .../policy_funcs/gnu-api-tests/vloxseg4ei64.c | 560 ++--- .../policy_funcs/gnu-api-tests/vloxseg4ei8.c | 592 ++--- .../policy_funcs/gnu-api-tests/vloxseg5ei16.c | 416 ++-- .../policy_funcs/gnu-api-tests/vloxseg5ei32.c | 416 ++-- .../policy_funcs/gnu-api-tests/vloxseg5ei64.c | 416 ++-- .../policy_funcs/gnu-api-tests/vloxseg5ei8.c | 416 ++-- .../policy_funcs/gnu-api-tests/vloxseg6ei16.c | 416 ++-- .../policy_funcs/gnu-api-tests/vloxseg6ei32.c | 416 ++-- .../policy_funcs/gnu-api-tests/vloxseg6ei64.c | 416 ++-- .../policy_funcs/gnu-api-tests/vloxseg6ei8.c | 416 ++-- .../policy_funcs/gnu-api-tests/vloxseg7ei16.c | 416 ++-- .../policy_funcs/gnu-api-tests/vloxseg7ei32.c | 416 ++-- .../policy_funcs/gnu-api-tests/vloxseg7ei64.c | 416 ++-- .../policy_funcs/gnu-api-tests/vloxseg7ei8.c | 416 ++-- .../policy_funcs/gnu-api-tests/vloxseg8ei16.c | 416 ++-- .../policy_funcs/gnu-api-tests/vloxseg8ei32.c | 416 ++-- .../policy_funcs/gnu-api-tests/vloxseg8ei64.c | 416 ++-- .../policy_funcs/gnu-api-tests/vloxseg8ei8.c | 416 ++-- .../policy_funcs/gnu-api-tests/vlse16.c | 288 +-- .../policy_funcs/gnu-api-tests/vlse32.c | 240 +-- .../policy_funcs/gnu-api-tests/vlse64.c | 192 +- .../policy_funcs/gnu-api-tests/vlse8.c | 224 +- .../policy_funcs/gnu-api-tests/vlseg2e16.c | 240 +-- .../policy_funcs/gnu-api-tests/vlseg2e16ff.c | 240 +-- .../policy_funcs/gnu-api-tests/vlseg2e32.c | 192 +- .../policy_funcs/gnu-api-tests/vlseg2e32ff.c | 192 +- .../policy_funcs/gnu-api-tests/vlseg2e64.c | 144 +- .../policy_funcs/gnu-api-tests/vlseg2e64ff.c | 144 +- .../policy_funcs/gnu-api-tests/vlseg2e8.c | 192 +- .../policy_funcs/gnu-api-tests/vlseg2e8ff.c | 192 +- .../policy_funcs/gnu-api-tests/vlseg3e16.c | 192 +- .../policy_funcs/gnu-api-tests/vlseg3e16ff.c | 192 +- .../policy_funcs/gnu-api-tests/vlseg3e32.c | 144 +- .../policy_funcs/gnu-api-tests/vlseg3e32ff.c | 144 +- .../policy_funcs/gnu-api-tests/vlseg3e64.c | 96 +- .../policy_funcs/gnu-api-tests/vlseg3e64ff.c | 96 +- .../policy_funcs/gnu-api-tests/vlseg3e8.c | 160 +- .../policy_funcs/gnu-api-tests/vlseg3e8ff.c | 160 +- .../policy_funcs/gnu-api-tests/vlseg4e16.c | 192 +- .../policy_funcs/gnu-api-tests/vlseg4e16ff.c | 192 +- .../policy_funcs/gnu-api-tests/vlseg4e32.c | 144 +- .../policy_funcs/gnu-api-tests/vlseg4e32ff.c | 144 +- .../policy_funcs/gnu-api-tests/vlseg4e64.c | 96 +- .../policy_funcs/gnu-api-tests/vlseg4e64ff.c | 96 +- .../policy_funcs/gnu-api-tests/vlseg4e8.c | 160 +- .../policy_funcs/gnu-api-tests/vlseg4e8ff.c | 160 +- .../policy_funcs/gnu-api-tests/vlseg5e16.c | 144 +- .../policy_funcs/gnu-api-tests/vlseg5e16ff.c | 144 +- .../policy_funcs/gnu-api-tests/vlseg5e32.c | 96 +- .../policy_funcs/gnu-api-tests/vlseg5e32ff.c | 96 +- .../policy_funcs/gnu-api-tests/vlseg5e64.c | 48 +- .../policy_funcs/gnu-api-tests/vlseg5e64ff.c | 48 +- .../policy_funcs/gnu-api-tests/vlseg5e8.c | 128 +- .../policy_funcs/gnu-api-tests/vlseg5e8ff.c | 128 +- .../policy_funcs/gnu-api-tests/vlseg6e16.c | 144 +- .../policy_funcs/gnu-api-tests/vlseg6e16ff.c | 144 +- .../policy_funcs/gnu-api-tests/vlseg6e32.c | 96 +- .../policy_funcs/gnu-api-tests/vlseg6e32ff.c | 96 +- .../policy_funcs/gnu-api-tests/vlseg6e64.c | 48 +- .../policy_funcs/gnu-api-tests/vlseg6e64ff.c | 48 +- .../policy_funcs/gnu-api-tests/vlseg6e8.c | 128 +- .../policy_funcs/gnu-api-tests/vlseg6e8ff.c | 128 +- .../policy_funcs/gnu-api-tests/vlseg7e16.c | 144 +- .../policy_funcs/gnu-api-tests/vlseg7e16ff.c | 144 +- .../policy_funcs/gnu-api-tests/vlseg7e32.c | 96 +- .../policy_funcs/gnu-api-tests/vlseg7e32ff.c | 96 +- .../policy_funcs/gnu-api-tests/vlseg7e64.c | 48 +- .../policy_funcs/gnu-api-tests/vlseg7e64ff.c | 48 +- .../policy_funcs/gnu-api-tests/vlseg7e8.c | 128 +- .../policy_funcs/gnu-api-tests/vlseg7e8ff.c | 128 +- .../policy_funcs/gnu-api-tests/vlseg8e16.c | 144 +- .../policy_funcs/gnu-api-tests/vlseg8e16ff.c | 144 +- .../policy_funcs/gnu-api-tests/vlseg8e32.c | 96 +- .../policy_funcs/gnu-api-tests/vlseg8e32ff.c | 96 +- .../policy_funcs/gnu-api-tests/vlseg8e64.c | 48 +- .../policy_funcs/gnu-api-tests/vlseg8e64ff.c | 48 +- .../policy_funcs/gnu-api-tests/vlseg8e8.c | 128 +- .../policy_funcs/gnu-api-tests/vlseg8e8ff.c | 128 +- .../policy_funcs/gnu-api-tests/vlsseg2e16.c | 240 +-- .../policy_funcs/gnu-api-tests/vlsseg2e32.c | 192 +- .../policy_funcs/gnu-api-tests/vlsseg2e64.c | 144 +- .../policy_funcs/gnu-api-tests/vlsseg2e8.c | 192 +- .../policy_funcs/gnu-api-tests/vlsseg3e16.c | 192 +- .../policy_funcs/gnu-api-tests/vlsseg3e32.c | 144 +- .../policy_funcs/gnu-api-tests/vlsseg3e64.c | 96 +- .../policy_funcs/gnu-api-tests/vlsseg3e8.c | 160 +- .../policy_funcs/gnu-api-tests/vlsseg4e16.c | 192 +- .../policy_funcs/gnu-api-tests/vlsseg4e32.c | 144 +- .../policy_funcs/gnu-api-tests/vlsseg4e64.c | 96 +- .../policy_funcs/gnu-api-tests/vlsseg4e8.c | 160 +- .../policy_funcs/gnu-api-tests/vlsseg5e16.c | 144 +- .../policy_funcs/gnu-api-tests/vlsseg5e32.c | 96 +- .../policy_funcs/gnu-api-tests/vlsseg5e64.c | 48 +- .../policy_funcs/gnu-api-tests/vlsseg5e8.c | 128 +- .../policy_funcs/gnu-api-tests/vlsseg6e16.c | 144 +- .../policy_funcs/gnu-api-tests/vlsseg6e32.c | 96 +- .../policy_funcs/gnu-api-tests/vlsseg6e64.c | 48 +- .../policy_funcs/gnu-api-tests/vlsseg6e8.c | 128 +- .../policy_funcs/gnu-api-tests/vlsseg7e16.c | 144 +- .../policy_funcs/gnu-api-tests/vlsseg7e32.c | 96 +- .../policy_funcs/gnu-api-tests/vlsseg7e64.c | 48 +- .../policy_funcs/gnu-api-tests/vlsseg7e8.c | 128 +- .../policy_funcs/gnu-api-tests/vlsseg8e16.c | 144 +- .../policy_funcs/gnu-api-tests/vlsseg8e32.c | 96 +- .../policy_funcs/gnu-api-tests/vlsseg8e64.c | 48 +- .../policy_funcs/gnu-api-tests/vlsseg8e8.c | 128 +- .../policy_funcs/gnu-api-tests/vluxei16.c | 912 ++++---- .../policy_funcs/gnu-api-tests/vluxei32.c | 832 +++---- .../policy_funcs/gnu-api-tests/vluxei64.c | 704 +++--- .../policy_funcs/gnu-api-tests/vluxei8.c | 944 ++++---- .../policy_funcs/gnu-api-tests/vluxseg2ei16.c | 768 +++---- .../policy_funcs/gnu-api-tests/vluxseg2ei32.c | 736 +++---- .../policy_funcs/gnu-api-tests/vluxseg2ei64.c | 656 +++--- .../policy_funcs/gnu-api-tests/vluxseg2ei8.c | 768 +++---- .../policy_funcs/gnu-api-tests/vluxseg3ei16.c | 592 ++--- .../policy_funcs/gnu-api-tests/vluxseg3ei32.c | 592 ++--- .../policy_funcs/gnu-api-tests/vluxseg3ei64.c | 560 ++--- .../policy_funcs/gnu-api-tests/vluxseg3ei8.c | 592 ++--- .../policy_funcs/gnu-api-tests/vluxseg4ei16.c | 592 ++--- .../policy_funcs/gnu-api-tests/vluxseg4ei32.c | 592 ++--- .../policy_funcs/gnu-api-tests/vluxseg4ei64.c | 560 ++--- .../policy_funcs/gnu-api-tests/vluxseg4ei8.c | 592 ++--- .../policy_funcs/gnu-api-tests/vluxseg5ei16.c | 416 ++-- .../policy_funcs/gnu-api-tests/vluxseg5ei32.c | 416 ++-- .../policy_funcs/gnu-api-tests/vluxseg5ei64.c | 416 ++-- .../policy_funcs/gnu-api-tests/vluxseg5ei8.c | 416 ++-- .../policy_funcs/gnu-api-tests/vluxseg6ei16.c | 416 ++-- .../policy_funcs/gnu-api-tests/vluxseg6ei32.c | 416 ++-- .../policy_funcs/gnu-api-tests/vluxseg6ei64.c | 416 ++-- .../policy_funcs/gnu-api-tests/vluxseg6ei8.c | 416 ++-- .../policy_funcs/gnu-api-tests/vluxseg7ei16.c | 416 ++-- .../policy_funcs/gnu-api-tests/vluxseg7ei32.c | 416 ++-- .../policy_funcs/gnu-api-tests/vluxseg7ei64.c | 416 ++-- .../policy_funcs/gnu-api-tests/vluxseg7ei8.c | 416 ++-- .../policy_funcs/gnu-api-tests/vluxseg8ei16.c | 416 ++-- .../policy_funcs/gnu-api-tests/vluxseg8ei32.c | 416 ++-- .../policy_funcs/gnu-api-tests/vluxseg8ei64.c | 416 ++-- .../policy_funcs/gnu-api-tests/vluxseg8ei8.c | 416 ++-- .../policy_funcs/gnu-api-tests/vmacc.c | 1056 ++++----- .../policy_funcs/gnu-api-tests/vmadd.c | 1056 ++++----- .../policy_funcs/gnu-api-tests/vmax.c | 704 +++--- .../policy_funcs/gnu-api-tests/vmaxu.c | 704 +++--- .../policy_funcs/gnu-api-tests/vmerge.c | 412 ++-- .../policy_funcs/gnu-api-tests/vmfeq.c | 120 +- .../policy_funcs/gnu-api-tests/vmfge.c | 120 +- .../policy_funcs/gnu-api-tests/vmfgt.c | 120 +- .../policy_funcs/gnu-api-tests/vmfle.c | 120 +- .../policy_funcs/gnu-api-tests/vmflt.c | 120 +- .../policy_funcs/gnu-api-tests/vmfne.c | 120 +- .../policy_funcs/gnu-api-tests/vmin.c | 704 +++--- .../policy_funcs/gnu-api-tests/vminu.c | 704 +++--- .../policy_funcs/gnu-api-tests/vmsbf.c | 28 +- .../policy_funcs/gnu-api-tests/vmseq.c | 352 +-- .../policy_funcs/gnu-api-tests/vmsge.c | 176 +- .../policy_funcs/gnu-api-tests/vmsgeu.c | 176 +- .../policy_funcs/gnu-api-tests/vmsgt.c | 176 +- .../policy_funcs/gnu-api-tests/vmsgtu.c | 176 +- .../policy_funcs/gnu-api-tests/vmsif.c | 28 +- .../policy_funcs/gnu-api-tests/vmsle.c | 176 +- .../policy_funcs/gnu-api-tests/vmsleu.c | 176 +- .../policy_funcs/gnu-api-tests/vmslt.c | 176 +- .../policy_funcs/gnu-api-tests/vmsltu.c | 176 +- .../policy_funcs/gnu-api-tests/vmsne.c | 352 +-- .../policy_funcs/gnu-api-tests/vmsof.c | 28 +- .../policy_funcs/gnu-api-tests/vmul.c | 1408 ++++++------ .../policy_funcs/gnu-api-tests/vmulh.c | 704 +++--- .../policy_funcs/gnu-api-tests/vmulhsu.c | 704 +++--- .../policy_funcs/gnu-api-tests/vmulhu.c | 704 +++--- .../policy_funcs/gnu-api-tests/vmv.c | 588 ++--- .../policy_funcs/gnu-api-tests/vnclip.c | 480 ++--- .../policy_funcs/gnu-api-tests/vnclipu.c | 480 ++--- .../policy_funcs/gnu-api-tests/vncvt.c | 480 ++--- .../policy_funcs/gnu-api-tests/vneg.c | 352 +-- .../policy_funcs/gnu-api-tests/vnmsac.c | 1056 ++++----- .../policy_funcs/gnu-api-tests/vnmsub.c | 1056 ++++----- .../policy_funcs/gnu-api-tests/vnot.c | 704 +++--- .../policy_funcs/gnu-api-tests/vnsra.c | 480 ++--- .../policy_funcs/gnu-api-tests/vnsrl.c | 480 ++--- .../policy_funcs/gnu-api-tests/vor.c | 1408 ++++++------ .../policy_funcs/gnu-api-tests/vredand.c | 352 +-- .../policy_funcs/gnu-api-tests/vredmax.c | 176 +- .../policy_funcs/gnu-api-tests/vredmaxu.c | 176 +- .../policy_funcs/gnu-api-tests/vredmin.c | 176 +- .../policy_funcs/gnu-api-tests/vredminu.c | 176 +- .../policy_funcs/gnu-api-tests/vredor.c | 352 +-- .../policy_funcs/gnu-api-tests/vredsum.c | 352 +-- .../policy_funcs/gnu-api-tests/vredxor.c | 352 +-- .../policy_funcs/gnu-api-tests/vrem.c | 704 +++--- .../policy_funcs/gnu-api-tests/vremu.c | 704 +++--- .../policy_funcs/gnu-api-tests/vrgather.c | 1888 ++++++++-------- .../policy_funcs/gnu-api-tests/vrgatherei16.c | 912 ++++---- .../policy_funcs/gnu-api-tests/vrsub.c | 704 +++--- .../policy_funcs/gnu-api-tests/vsadd.c | 704 +++--- .../policy_funcs/gnu-api-tests/vsaddu.c | 704 +++--- .../policy_funcs/gnu-api-tests/vsbc.c | 352 +-- .../policy_funcs/gnu-api-tests/vsext_vf2.c | 240 +-- .../policy_funcs/gnu-api-tests/vsext_vf4.c | 144 +- .../policy_funcs/gnu-api-tests/vsext_vf8.c | 64 +- .../policy_funcs/gnu-api-tests/vslide1down.c | 704 +++--- .../policy_funcs/gnu-api-tests/vslide1up.c | 704 +++--- .../policy_funcs/gnu-api-tests/vslidedown.c | 944 ++++---- .../policy_funcs/gnu-api-tests/vslideup.c | 944 ++++---- .../policy_funcs/gnu-api-tests/vsll.c | 1408 ++++++------ .../policy_funcs/gnu-api-tests/vsmul.c | 704 +++--- .../policy_funcs/gnu-api-tests/vsra.c | 704 +++--- .../policy_funcs/gnu-api-tests/vsrl.c | 704 +++--- .../policy_funcs/gnu-api-tests/vssra.c | 704 +++--- .../policy_funcs/gnu-api-tests/vssrl.c | 704 +++--- .../policy_funcs/gnu-api-tests/vssub.c | 704 +++--- .../policy_funcs/gnu-api-tests/vssubu.c | 704 +++--- .../policy_funcs/gnu-api-tests/vsub.c | 1408 ++++++------ .../policy_funcs/gnu-api-tests/vwadd.c | 960 ++++----- .../policy_funcs/gnu-api-tests/vwaddu.c | 960 ++++----- .../policy_funcs/gnu-api-tests/vwcvt.c | 240 +-- .../policy_funcs/gnu-api-tests/vwcvtu.c | 240 +-- .../policy_funcs/gnu-api-tests/vwmacc.c | 360 ++-- .../policy_funcs/gnu-api-tests/vwmaccsu.c | 360 ++-- .../policy_funcs/gnu-api-tests/vwmaccu.c | 360 ++-- .../policy_funcs/gnu-api-tests/vwmaccus.c | 180 +- .../policy_funcs/gnu-api-tests/vwmul.c | 480 ++--- .../policy_funcs/gnu-api-tests/vwmulsu.c | 480 ++--- .../policy_funcs/gnu-api-tests/vwmulu.c | 480 ++--- .../policy_funcs/gnu-api-tests/vwredsum.c | 144 +- .../policy_funcs/gnu-api-tests/vwredsumu.c | 144 +- .../policy_funcs/gnu-api-tests/vwsub.c | 960 ++++----- .../policy_funcs/gnu-api-tests/vwsubu.c | 960 ++++----- .../policy_funcs/gnu-api-tests/vxor.c | 1408 ++++++------ .../policy_funcs/gnu-api-tests/vzext_vf2.c | 240 +-- .../policy_funcs/gnu-api-tests/vzext_vf4.c | 144 +- .../policy_funcs/gnu-api-tests/vzext_vf8.c | 64 +- .../policy_funcs/gnu-overloaded-tests/vaadd.c | 704 +++--- .../gnu-overloaded-tests/vaaddu.c | 704 +++--- .../policy_funcs/gnu-overloaded-tests/vadc.c | 352 +-- .../policy_funcs/gnu-overloaded-tests/vadd.c | 1408 ++++++------ .../policy_funcs/gnu-overloaded-tests/vand.c | 1408 ++++++------ .../policy_funcs/gnu-overloaded-tests/vasub.c | 704 +++--- .../gnu-overloaded-tests/vasubu.c | 704 +++--- .../gnu-overloaded-tests/vcompress.c | 236 +- .../policy_funcs/gnu-overloaded-tests/vdiv.c | 704 +++--- .../policy_funcs/gnu-overloaded-tests/vdivu.c | 704 +++--- .../policy_funcs/gnu-overloaded-tests/vfabs.c | 240 +-- .../policy_funcs/gnu-overloaded-tests/vfadd.c | 960 ++++----- .../gnu-overloaded-tests/vfclass.c | 240 +-- .../policy_funcs/gnu-overloaded-tests/vfcvt.c | 1920 ++++++++--------- .../gnu-overloaded-tests/vfcvt_rtz.c | 480 ++--- .../policy_funcs/gnu-overloaded-tests/vfdiv.c | 960 ++++----- .../gnu-overloaded-tests/vfmacc.c | 720 +++---- .../gnu-overloaded-tests/vfmadd.c | 720 +++---- .../policy_funcs/gnu-overloaded-tests/vfmax.c | 480 ++--- .../gnu-overloaded-tests/vfmerge.c | 60 +- .../policy_funcs/gnu-overloaded-tests/vfmin.c | 480 ++--- .../gnu-overloaded-tests/vfmsac.c | 720 +++---- .../gnu-overloaded-tests/vfmsub.c | 720 +++---- .../policy_funcs/gnu-overloaded-tests/vfmul.c | 960 ++++----- .../policy_funcs/gnu-overloaded-tests/vfmv.c | 120 +- .../gnu-overloaded-tests/vfncvt.c | 1824 ++++++++-------- .../gnu-overloaded-tests/vfncvt_rod.c | 144 +- .../gnu-overloaded-tests/vfncvt_rtz.c | 480 ++--- .../policy_funcs/gnu-overloaded-tests/vfneg.c | 240 +-- .../gnu-overloaded-tests/vfnmacc.c | 720 +++---- .../gnu-overloaded-tests/vfnmadd.c | 720 +++---- .../gnu-overloaded-tests/vfnmsac.c | 720 +++---- .../gnu-overloaded-tests/vfnmsub.c | 720 +++---- .../gnu-overloaded-tests/vfrdiv.c | 480 ++--- .../gnu-overloaded-tests/vfrec7.c | 480 ++--- .../gnu-overloaded-tests/vfredmax.c | 120 +- .../gnu-overloaded-tests/vfredmin.c | 120 +- .../gnu-overloaded-tests/vfredosum.c | 240 +-- .../gnu-overloaded-tests/vfredusum.c | 240 +-- .../gnu-overloaded-tests/vfrsqrt7.c | 240 +-- .../gnu-overloaded-tests/vfrsub.c | 480 ++--- .../gnu-overloaded-tests/vfsgnj.c | 480 ++--- .../gnu-overloaded-tests/vfsgnjn.c | 480 ++--- .../gnu-overloaded-tests/vfsgnjx.c | 480 ++--- .../gnu-overloaded-tests/vfslide1down.c | 240 +-- .../gnu-overloaded-tests/vfslide1up.c | 240 +-- .../gnu-overloaded-tests/vfsqrt.c | 480 ++--- .../policy_funcs/gnu-overloaded-tests/vfsub.c | 960 ++++----- .../gnu-overloaded-tests/vfwadd.c | 1152 +++++----- .../gnu-overloaded-tests/vfwcvt.c | 1200 +++++------ .../gnu-overloaded-tests/vfwcvt_rtz.c | 288 +-- .../gnu-overloaded-tests/vfwmacc.c | 432 ++-- .../gnu-overloaded-tests/vfwmsac.c | 432 ++-- .../gnu-overloaded-tests/vfwmul.c | 576 ++--- .../gnu-overloaded-tests/vfwnmacc.c | 432 ++-- .../gnu-overloaded-tests/vfwnmsac.c | 432 ++-- .../gnu-overloaded-tests/vfwredosum.c | 176 +- .../gnu-overloaded-tests/vfwredusum.c | 176 +- .../gnu-overloaded-tests/vfwsub.c | 1152 +++++----- .../policy_funcs/gnu-overloaded-tests/vid.c | 352 +-- .../policy_funcs/gnu-overloaded-tests/viota.c | 352 +-- .../policy_funcs/gnu-overloaded-tests/vle16.c | 288 +-- .../gnu-overloaded-tests/vle16ff.c | 288 +-- .../policy_funcs/gnu-overloaded-tests/vle32.c | 240 +-- .../gnu-overloaded-tests/vle32ff.c | 240 +-- .../policy_funcs/gnu-overloaded-tests/vle64.c | 192 +- .../gnu-overloaded-tests/vle64ff.c | 192 +- .../policy_funcs/gnu-overloaded-tests/vle8.c | 224 +- .../gnu-overloaded-tests/vle8ff.c | 224 +- .../gnu-overloaded-tests/vloxei16.c | 912 ++++---- .../gnu-overloaded-tests/vloxei32.c | 832 +++---- .../gnu-overloaded-tests/vloxei64.c | 704 +++--- .../gnu-overloaded-tests/vloxei8.c | 944 ++++---- .../gnu-overloaded-tests/vloxseg2ei16.c | 768 +++---- .../gnu-overloaded-tests/vloxseg2ei32.c | 736 +++---- .../gnu-overloaded-tests/vloxseg2ei64.c | 656 +++--- .../gnu-overloaded-tests/vloxseg2ei8.c | 768 +++---- .../gnu-overloaded-tests/vloxseg3ei16.c | 592 ++--- .../gnu-overloaded-tests/vloxseg3ei32.c | 592 ++--- .../gnu-overloaded-tests/vloxseg3ei64.c | 560 ++--- .../gnu-overloaded-tests/vloxseg3ei8.c | 592 ++--- .../gnu-overloaded-tests/vloxseg4ei16.c | 592 ++--- .../gnu-overloaded-tests/vloxseg4ei32.c | 592 ++--- .../gnu-overloaded-tests/vloxseg4ei64.c | 560 ++--- .../gnu-overloaded-tests/vloxseg4ei8.c | 592 ++--- .../gnu-overloaded-tests/vloxseg5ei16.c | 416 ++-- .../gnu-overloaded-tests/vloxseg5ei32.c | 416 ++-- .../gnu-overloaded-tests/vloxseg5ei64.c | 416 ++-- .../gnu-overloaded-tests/vloxseg5ei8.c | 416 ++-- .../gnu-overloaded-tests/vloxseg6ei16.c | 416 ++-- .../gnu-overloaded-tests/vloxseg6ei32.c | 416 ++-- .../gnu-overloaded-tests/vloxseg6ei64.c | 416 ++-- .../gnu-overloaded-tests/vloxseg6ei8.c | 416 ++-- .../gnu-overloaded-tests/vloxseg7ei16.c | 416 ++-- .../gnu-overloaded-tests/vloxseg7ei32.c | 416 ++-- .../gnu-overloaded-tests/vloxseg7ei64.c | 416 ++-- .../gnu-overloaded-tests/vloxseg7ei8.c | 416 ++-- .../gnu-overloaded-tests/vloxseg8ei16.c | 416 ++-- .../gnu-overloaded-tests/vloxseg8ei32.c | 416 ++-- .../gnu-overloaded-tests/vloxseg8ei64.c | 416 ++-- .../gnu-overloaded-tests/vloxseg8ei8.c | 416 ++-- .../gnu-overloaded-tests/vlse16.c | 288 +-- .../gnu-overloaded-tests/vlse32.c | 240 +-- .../gnu-overloaded-tests/vlse64.c | 192 +- .../policy_funcs/gnu-overloaded-tests/vlse8.c | 224 +- .../gnu-overloaded-tests/vlseg2e16.c | 240 +-- .../gnu-overloaded-tests/vlseg2e16ff.c | 240 +-- .../gnu-overloaded-tests/vlseg2e32.c | 192 +- .../gnu-overloaded-tests/vlseg2e32ff.c | 192 +- .../gnu-overloaded-tests/vlseg2e64.c | 144 +- .../gnu-overloaded-tests/vlseg2e64ff.c | 144 +- .../gnu-overloaded-tests/vlseg2e8.c | 192 +- .../gnu-overloaded-tests/vlseg2e8ff.c | 192 +- .../gnu-overloaded-tests/vlseg3e16.c | 192 +- .../gnu-overloaded-tests/vlseg3e16ff.c | 192 +- .../gnu-overloaded-tests/vlseg3e32.c | 144 +- .../gnu-overloaded-tests/vlseg3e32ff.c | 144 +- .../gnu-overloaded-tests/vlseg3e64.c | 96 +- .../gnu-overloaded-tests/vlseg3e64ff.c | 96 +- .../gnu-overloaded-tests/vlseg3e8.c | 160 +- .../gnu-overloaded-tests/vlseg3e8ff.c | 160 +- .../gnu-overloaded-tests/vlseg4e16.c | 192 +- .../gnu-overloaded-tests/vlseg4e16ff.c | 192 +- .../gnu-overloaded-tests/vlseg4e32.c | 144 +- .../gnu-overloaded-tests/vlseg4e32ff.c | 144 +- .../gnu-overloaded-tests/vlseg4e64.c | 96 +- .../gnu-overloaded-tests/vlseg4e64ff.c | 96 +- .../gnu-overloaded-tests/vlseg4e8.c | 160 +- .../gnu-overloaded-tests/vlseg4e8ff.c | 160 +- .../gnu-overloaded-tests/vlseg5e16.c | 144 +- .../gnu-overloaded-tests/vlseg5e16ff.c | 144 +- .../gnu-overloaded-tests/vlseg5e32.c | 96 +- .../gnu-overloaded-tests/vlseg5e32ff.c | 96 +- .../gnu-overloaded-tests/vlseg5e64.c | 48 +- .../gnu-overloaded-tests/vlseg5e64ff.c | 48 +- .../gnu-overloaded-tests/vlseg5e8.c | 128 +- .../gnu-overloaded-tests/vlseg5e8ff.c | 128 +- .../gnu-overloaded-tests/vlseg6e16.c | 144 +- .../gnu-overloaded-tests/vlseg6e16ff.c | 144 +- .../gnu-overloaded-tests/vlseg6e32.c | 96 +- .../gnu-overloaded-tests/vlseg6e32ff.c | 96 +- .../gnu-overloaded-tests/vlseg6e64.c | 48 +- .../gnu-overloaded-tests/vlseg6e64ff.c | 48 +- .../gnu-overloaded-tests/vlseg6e8.c | 128 +- .../gnu-overloaded-tests/vlseg6e8ff.c | 128 +- .../gnu-overloaded-tests/vlseg7e16.c | 144 +- .../gnu-overloaded-tests/vlseg7e16ff.c | 144 +- .../gnu-overloaded-tests/vlseg7e32.c | 96 +- .../gnu-overloaded-tests/vlseg7e32ff.c | 96 +- .../gnu-overloaded-tests/vlseg7e64.c | 48 +- .../gnu-overloaded-tests/vlseg7e64ff.c | 48 +- .../gnu-overloaded-tests/vlseg7e8.c | 128 +- .../gnu-overloaded-tests/vlseg7e8ff.c | 128 +- .../gnu-overloaded-tests/vlseg8e16.c | 144 +- .../gnu-overloaded-tests/vlseg8e16ff.c | 144 +- .../gnu-overloaded-tests/vlseg8e32.c | 96 +- .../gnu-overloaded-tests/vlseg8e32ff.c | 96 +- .../gnu-overloaded-tests/vlseg8e64.c | 48 +- .../gnu-overloaded-tests/vlseg8e64ff.c | 48 +- .../gnu-overloaded-tests/vlseg8e8.c | 128 +- .../gnu-overloaded-tests/vlseg8e8ff.c | 128 +- .../gnu-overloaded-tests/vlsseg2e16.c | 240 +-- .../gnu-overloaded-tests/vlsseg2e32.c | 192 +- .../gnu-overloaded-tests/vlsseg2e64.c | 144 +- .../gnu-overloaded-tests/vlsseg2e8.c | 192 +- .../gnu-overloaded-tests/vlsseg3e16.c | 192 +- .../gnu-overloaded-tests/vlsseg3e32.c | 144 +- .../gnu-overloaded-tests/vlsseg3e64.c | 96 +- .../gnu-overloaded-tests/vlsseg3e8.c | 160 +- .../gnu-overloaded-tests/vlsseg4e16.c | 192 +- .../gnu-overloaded-tests/vlsseg4e32.c | 144 +- .../gnu-overloaded-tests/vlsseg4e64.c | 96 +- .../gnu-overloaded-tests/vlsseg4e8.c | 160 +- .../gnu-overloaded-tests/vlsseg5e16.c | 144 +- .../gnu-overloaded-tests/vlsseg5e32.c | 96 +- .../gnu-overloaded-tests/vlsseg5e64.c | 48 +- .../gnu-overloaded-tests/vlsseg5e8.c | 128 +- .../gnu-overloaded-tests/vlsseg6e16.c | 144 +- .../gnu-overloaded-tests/vlsseg6e32.c | 96 +- .../gnu-overloaded-tests/vlsseg6e64.c | 48 +- .../gnu-overloaded-tests/vlsseg6e8.c | 128 +- .../gnu-overloaded-tests/vlsseg7e16.c | 144 +- .../gnu-overloaded-tests/vlsseg7e32.c | 96 +- .../gnu-overloaded-tests/vlsseg7e64.c | 48 +- .../gnu-overloaded-tests/vlsseg7e8.c | 128 +- .../gnu-overloaded-tests/vlsseg8e16.c | 144 +- .../gnu-overloaded-tests/vlsseg8e32.c | 96 +- .../gnu-overloaded-tests/vlsseg8e64.c | 48 +- .../gnu-overloaded-tests/vlsseg8e8.c | 128 +- .../gnu-overloaded-tests/vluxei16.c | 912 ++++---- .../gnu-overloaded-tests/vluxei32.c | 832 +++---- .../gnu-overloaded-tests/vluxei64.c | 704 +++--- .../gnu-overloaded-tests/vluxei8.c | 944 ++++---- .../gnu-overloaded-tests/vluxseg2ei16.c | 768 +++---- .../gnu-overloaded-tests/vluxseg2ei32.c | 736 +++---- .../gnu-overloaded-tests/vluxseg2ei64.c | 656 +++--- .../gnu-overloaded-tests/vluxseg2ei8.c | 768 +++---- .../gnu-overloaded-tests/vluxseg3ei16.c | 592 ++--- .../gnu-overloaded-tests/vluxseg3ei32.c | 592 ++--- .../gnu-overloaded-tests/vluxseg3ei64.c | 560 ++--- .../gnu-overloaded-tests/vluxseg3ei8.c | 592 ++--- .../gnu-overloaded-tests/vluxseg4ei16.c | 592 ++--- .../gnu-overloaded-tests/vluxseg4ei32.c | 592 ++--- .../gnu-overloaded-tests/vluxseg4ei64.c | 560 ++--- .../gnu-overloaded-tests/vluxseg4ei8.c | 592 ++--- .../gnu-overloaded-tests/vluxseg5ei16.c | 416 ++-- .../gnu-overloaded-tests/vluxseg5ei32.c | 416 ++-- .../gnu-overloaded-tests/vluxseg5ei64.c | 416 ++-- .../gnu-overloaded-tests/vluxseg5ei8.c | 416 ++-- .../gnu-overloaded-tests/vluxseg6ei16.c | 416 ++-- .../gnu-overloaded-tests/vluxseg6ei32.c | 416 ++-- .../gnu-overloaded-tests/vluxseg6ei64.c | 416 ++-- .../gnu-overloaded-tests/vluxseg6ei8.c | 416 ++-- .../gnu-overloaded-tests/vluxseg7ei16.c | 416 ++-- .../gnu-overloaded-tests/vluxseg7ei32.c | 416 ++-- .../gnu-overloaded-tests/vluxseg7ei64.c | 416 ++-- .../gnu-overloaded-tests/vluxseg7ei8.c | 416 ++-- .../gnu-overloaded-tests/vluxseg8ei16.c | 416 ++-- .../gnu-overloaded-tests/vluxseg8ei32.c | 416 ++-- .../gnu-overloaded-tests/vluxseg8ei64.c | 416 ++-- .../gnu-overloaded-tests/vluxseg8ei8.c | 416 ++-- .../policy_funcs/gnu-overloaded-tests/vmacc.c | 1056 ++++----- .../policy_funcs/gnu-overloaded-tests/vmadd.c | 1056 ++++----- .../policy_funcs/gnu-overloaded-tests/vmax.c | 704 +++--- .../policy_funcs/gnu-overloaded-tests/vmaxu.c | 704 +++--- .../gnu-overloaded-tests/vmerge.c | 412 ++-- .../policy_funcs/gnu-overloaded-tests/vmfeq.c | 120 +- .../policy_funcs/gnu-overloaded-tests/vmfge.c | 120 +- .../policy_funcs/gnu-overloaded-tests/vmfgt.c | 120 +- .../policy_funcs/gnu-overloaded-tests/vmfle.c | 120 +- .../policy_funcs/gnu-overloaded-tests/vmflt.c | 120 +- .../policy_funcs/gnu-overloaded-tests/vmfne.c | 120 +- .../policy_funcs/gnu-overloaded-tests/vmin.c | 704 +++--- .../policy_funcs/gnu-overloaded-tests/vminu.c | 704 +++--- .../policy_funcs/gnu-overloaded-tests/vmsbf.c | 28 +- .../policy_funcs/gnu-overloaded-tests/vmseq.c | 352 +-- .../policy_funcs/gnu-overloaded-tests/vmsge.c | 176 +- .../gnu-overloaded-tests/vmsgeu.c | 176 +- .../policy_funcs/gnu-overloaded-tests/vmsgt.c | 176 +- .../gnu-overloaded-tests/vmsgtu.c | 176 +- .../policy_funcs/gnu-overloaded-tests/vmsif.c | 28 +- .../policy_funcs/gnu-overloaded-tests/vmsle.c | 176 +- .../gnu-overloaded-tests/vmsleu.c | 176 +- .../policy_funcs/gnu-overloaded-tests/vmslt.c | 176 +- .../gnu-overloaded-tests/vmsltu.c | 176 +- .../policy_funcs/gnu-overloaded-tests/vmsne.c | 352 +-- .../policy_funcs/gnu-overloaded-tests/vmsof.c | 28 +- .../policy_funcs/gnu-overloaded-tests/vmul.c | 1408 ++++++------ .../policy_funcs/gnu-overloaded-tests/vmulh.c | 704 +++--- .../gnu-overloaded-tests/vmulhsu.c | 704 +++--- .../gnu-overloaded-tests/vmulhu.c | 704 +++--- .../policy_funcs/gnu-overloaded-tests/vmv.c | 588 ++--- .../gnu-overloaded-tests/vnclip.c | 480 ++--- .../gnu-overloaded-tests/vnclipu.c | 480 ++--- .../policy_funcs/gnu-overloaded-tests/vncvt.c | 480 ++--- .../policy_funcs/gnu-overloaded-tests/vneg.c | 352 +-- .../gnu-overloaded-tests/vnmsac.c | 1056 ++++----- .../gnu-overloaded-tests/vnmsub.c | 1056 ++++----- .../policy_funcs/gnu-overloaded-tests/vnot.c | 704 +++--- .../policy_funcs/gnu-overloaded-tests/vnsra.c | 480 ++--- .../policy_funcs/gnu-overloaded-tests/vnsrl.c | 480 ++--- .../policy_funcs/gnu-overloaded-tests/vor.c | 1408 ++++++------ .../gnu-overloaded-tests/vredand.c | 352 +-- .../gnu-overloaded-tests/vredmax.c | 176 +- .../gnu-overloaded-tests/vredmaxu.c | 176 +- .../gnu-overloaded-tests/vredmin.c | 176 +- .../gnu-overloaded-tests/vredminu.c | 176 +- .../gnu-overloaded-tests/vredor.c | 352 +-- .../gnu-overloaded-tests/vredsum.c | 352 +-- .../gnu-overloaded-tests/vredxor.c | 352 +-- .../policy_funcs/gnu-overloaded-tests/vrem.c | 704 +++--- .../policy_funcs/gnu-overloaded-tests/vremu.c | 704 +++--- .../gnu-overloaded-tests/vrgather.c | 1888 ++++++++-------- .../gnu-overloaded-tests/vrgatherei16.c | 912 ++++---- .../policy_funcs/gnu-overloaded-tests/vrsub.c | 704 +++--- .../policy_funcs/gnu-overloaded-tests/vsadd.c | 704 +++--- .../gnu-overloaded-tests/vsaddu.c | 704 +++--- .../policy_funcs/gnu-overloaded-tests/vsbc.c | 352 +-- .../gnu-overloaded-tests/vsext_vf2.c | 240 +-- .../gnu-overloaded-tests/vsext_vf4.c | 144 +- .../gnu-overloaded-tests/vsext_vf8.c | 64 +- .../gnu-overloaded-tests/vslide1down.c | 704 +++--- .../gnu-overloaded-tests/vslide1up.c | 704 +++--- .../gnu-overloaded-tests/vslidedown.c | 944 ++++---- .../gnu-overloaded-tests/vslideup.c | 944 ++++---- .../policy_funcs/gnu-overloaded-tests/vsll.c | 1408 ++++++------ .../policy_funcs/gnu-overloaded-tests/vsmul.c | 704 +++--- .../policy_funcs/gnu-overloaded-tests/vsra.c | 704 +++--- .../policy_funcs/gnu-overloaded-tests/vsrl.c | 704 +++--- .../policy_funcs/gnu-overloaded-tests/vssra.c | 704 +++--- .../policy_funcs/gnu-overloaded-tests/vssrl.c | 704 +++--- .../policy_funcs/gnu-overloaded-tests/vssub.c | 704 +++--- .../gnu-overloaded-tests/vssubu.c | 704 +++--- .../policy_funcs/gnu-overloaded-tests/vsub.c | 1408 ++++++------ .../policy_funcs/gnu-overloaded-tests/vwadd.c | 960 ++++----- .../gnu-overloaded-tests/vwaddu.c | 960 ++++----- .../policy_funcs/gnu-overloaded-tests/vwcvt.c | 240 +-- .../gnu-overloaded-tests/vwcvtu.c | 240 +-- .../gnu-overloaded-tests/vwmacc.c | 360 ++-- .../gnu-overloaded-tests/vwmaccsu.c | 360 ++-- .../gnu-overloaded-tests/vwmaccu.c | 360 ++-- .../gnu-overloaded-tests/vwmaccus.c | 180 +- .../policy_funcs/gnu-overloaded-tests/vwmul.c | 480 ++--- .../gnu-overloaded-tests/vwmulsu.c | 480 ++--- .../gnu-overloaded-tests/vwmulu.c | 480 ++--- .../gnu-overloaded-tests/vwredsum.c | 144 +- .../gnu-overloaded-tests/vwredsumu.c | 144 +- .../policy_funcs/gnu-overloaded-tests/vwsub.c | 960 ++++----- .../gnu-overloaded-tests/vwsubu.c | 960 ++++----- .../policy_funcs/gnu-overloaded-tests/vxor.c | 1408 ++++++------ .../gnu-overloaded-tests/vzext_vf2.c | 240 +-- .../gnu-overloaded-tests/vzext_vf4.c | 144 +- .../gnu-overloaded-tests/vzext_vf8.c | 64 +- 1543 files changed, 229836 insertions(+), 229836 deletions(-) diff --git a/auto-generated/gnu-api-tests/vaadd.c b/auto-generated/gnu-api-tests/vaadd.c index f760c667a..254b44fb2 100644 --- a/auto-generated/gnu-api-tests/vaadd.c +++ b/auto-generated/gnu-api-tests/vaadd.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vaadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vaadd_vv_i8mf8(op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vaadd_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vaadd_vv_i8mf8(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vaadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_vx_i8mf8(op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vaadd_vx_i8mf8(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_vx_i8mf8(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vaadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vaadd_vv_i8mf4(op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vaadd_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vaadd_vv_i8mf4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vaadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_vx_i8mf4(op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vaadd_vx_i8mf4(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_vx_i8mf4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vaadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vaadd_vv_i8mf2(op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vaadd_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vaadd_vv_i8mf2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vaadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_vx_i8mf2(op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vaadd_vx_i8mf2(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_vx_i8mf2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vaadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vaadd_vv_i8m1(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vaadd_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vaadd_vv_i8m1(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vaadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_vx_i8m1(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vaadd_vx_i8m1(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_vx_i8m1(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vaadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vaadd_vv_i8m2(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vaadd_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vaadd_vv_i8m2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vaadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_vx_i8m2(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vaadd_vx_i8m2(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_vx_i8m2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vaadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vaadd_vv_i8m4(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vaadd_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vaadd_vv_i8m4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vaadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_vx_i8m4(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vaadd_vx_i8m4(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_vx_i8m4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vaadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vaadd_vv_i8m8(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vaadd_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vaadd_vv_i8m8(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vaadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_vx_i8m8(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vaadd_vx_i8m8(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_vx_i8m8(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vaadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vaadd_vv_i16mf4(op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vaadd_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vaadd_vv_i16mf4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vaadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_vx_i16mf4(op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vaadd_vx_i16mf4(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_vx_i16mf4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vaadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vaadd_vv_i16mf2(op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vaadd_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vaadd_vv_i16mf2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vaadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_vx_i16mf2(op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vaadd_vx_i16mf2(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_vx_i16mf2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vaadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vaadd_vv_i16m1(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vaadd_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vaadd_vv_i16m1(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vaadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_vx_i16m1(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vaadd_vx_i16m1(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_vx_i16m1(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vaadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vaadd_vv_i16m2(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vaadd_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vaadd_vv_i16m2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vaadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_vx_i16m2(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vaadd_vx_i16m2(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_vx_i16m2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vaadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vaadd_vv_i16m4(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vaadd_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vaadd_vv_i16m4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vaadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_vx_i16m4(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vaadd_vx_i16m4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_vx_i16m4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vaadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vaadd_vv_i16m8(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vaadd_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vaadd_vv_i16m8(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vaadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_vx_i16m8(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vaadd_vx_i16m8(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_vx_i16m8(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vaadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vaadd_vv_i32mf2(op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vaadd_vv_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vaadd_vv_i32mf2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vaadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_vx_i32mf2(op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vaadd_vx_i32mf2(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_vx_i32mf2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vaadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vaadd_vv_i32m1(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vaadd_vv_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vaadd_vv_i32m1(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vaadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_vx_i32m1(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vaadd_vx_i32m1(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_vx_i32m1(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vaadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vaadd_vv_i32m2(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vaadd_vv_i32m2(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vaadd_vv_i32m2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vaadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_vx_i32m2(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vaadd_vx_i32m2(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_vx_i32m2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vaadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vaadd_vv_i32m4(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vaadd_vv_i32m4(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vaadd_vv_i32m4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vaadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_vx_i32m4(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vaadd_vx_i32m4(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_vx_i32m4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vaadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vaadd_vv_i32m8(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vaadd_vv_i32m8(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vaadd_vv_i32m8(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vaadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_vx_i32m8(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vaadd_vx_i32m8(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_vx_i32m8(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vaadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vaadd_vv_i64m1(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vaadd_vv_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vaadd_vv_i64m1(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vaadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd_vx_i64m1(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vaadd_vx_i64m1(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd_vx_i64m1(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vaadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vaadd_vv_i64m2(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vaadd_vv_i64m2(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vaadd_vv_i64m2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vaadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd_vx_i64m2(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vaadd_vx_i64m2(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd_vx_i64m2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vaadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vaadd_vv_i64m4(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vaadd_vv_i64m4(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vaadd_vv_i64m4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vaadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd_vx_i64m4(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vaadd_vx_i64m4(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd_vx_i64m4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vaadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vaadd_vv_i64m8(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vaadd_vv_i64m8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vaadd_vv_i64m8(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vaadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd_vx_i64m8(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vaadd_vx_i64m8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd_vx_i64m8(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vaadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vaadd_vv_i8mf8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vaadd_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vaadd_vv_i8mf8_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vaadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_vx_i8mf8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vaadd_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_vx_i8mf8_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vaadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vaadd_vv_i8mf4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vaadd_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vaadd_vv_i8mf4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vaadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_vx_i8mf4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vaadd_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_vx_i8mf4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vaadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vaadd_vv_i8mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vaadd_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vaadd_vv_i8mf2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vaadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_vx_i8mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vaadd_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_vx_i8mf2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vaadd_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vaadd_vv_i8m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vaadd_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vaadd_vv_i8m1_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vaadd_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_vx_i8m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vaadd_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_vx_i8m1_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vaadd_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vaadd_vv_i8m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vaadd_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vaadd_vv_i8m2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vaadd_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_vx_i8m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vaadd_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_vx_i8m2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vaadd_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vaadd_vv_i8m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vaadd_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vaadd_vv_i8m4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vaadd_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_vx_i8m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vaadd_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_vx_i8m4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vaadd_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vaadd_vv_i8m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vaadd_vv_i8m8_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vaadd_vv_i8m8_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vaadd_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_vx_i8m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vaadd_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_vx_i8m8_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vaadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vaadd_vv_i16mf4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vaadd_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vaadd_vv_i16mf4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vaadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_vx_i16mf4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vaadd_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_vx_i16mf4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vaadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vaadd_vv_i16mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vaadd_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vaadd_vv_i16mf2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vaadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_vx_i16mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vaadd_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_vx_i16mf2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vaadd_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vaadd_vv_i16m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vaadd_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vaadd_vv_i16m1_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vaadd_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_vx_i16m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vaadd_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_vx_i16m1_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vaadd_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vaadd_vv_i16m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vaadd_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vaadd_vv_i16m2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vaadd_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_vx_i16m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vaadd_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_vx_i16m2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vaadd_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vaadd_vv_i16m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vaadd_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vaadd_vv_i16m4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vaadd_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_vx_i16m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vaadd_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_vx_i16m4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vaadd_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vaadd_vv_i16m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vaadd_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vaadd_vv_i16m8_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vaadd_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_vx_i16m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vaadd_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_vx_i16m8_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vaadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vaadd_vv_i32mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vaadd_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vaadd_vv_i32mf2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vaadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_vx_i32mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vaadd_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_vx_i32mf2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vaadd_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vaadd_vv_i32m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vaadd_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vaadd_vv_i32m1_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vaadd_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_vx_i32m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vaadd_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_vx_i32m1_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vaadd_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vaadd_vv_i32m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vaadd_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vaadd_vv_i32m2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vaadd_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_vx_i32m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vaadd_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_vx_i32m2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vaadd_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vaadd_vv_i32m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vaadd_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vaadd_vv_i32m4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vaadd_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_vx_i32m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vaadd_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_vx_i32m4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vaadd_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vaadd_vv_i32m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vaadd_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vaadd_vv_i32m8_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vaadd_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_vx_i32m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vaadd_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_vx_i32m8_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vaadd_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vaadd_vv_i64m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vaadd_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vaadd_vv_i64m1_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vaadd_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd_vx_i64m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vaadd_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd_vx_i64m1_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vaadd_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vaadd_vv_i64m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vaadd_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vaadd_vv_i64m2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vaadd_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd_vx_i64m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vaadd_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd_vx_i64m2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vaadd_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vaadd_vv_i64m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vaadd_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vaadd_vv_i64m4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vaadd_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd_vx_i64m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vaadd_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd_vx_i64m4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vaadd_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vaadd_vv_i64m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vaadd_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vaadd_vv_i64m8_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vaadd_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd_vx_i64m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vaadd_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd_vx_i64m8_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vaadd\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-api-tests/vaaddu.c b/auto-generated/gnu-api-tests/vaaddu.c index 1805fbf45..f67520995 100644 --- a/auto-generated/gnu-api-tests/vaaddu.c +++ b/auto-generated/gnu-api-tests/vaaddu.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vaaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vaaddu_vv_u8mf8(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vaaddu_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u8mf8(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vaaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_vx_u8mf8(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vaaddu_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u8mf8(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vaaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vaaddu_vv_u8mf4(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vaaddu_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u8mf4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vaaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_vx_u8mf4(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vaaddu_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u8mf4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vaaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vaaddu_vv_u8mf2(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vaaddu_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u8mf2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vaaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_vx_u8mf2(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vaaddu_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u8mf2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vaaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vaaddu_vv_u8m1(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vaaddu_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u8m1(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vaaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_vx_u8m1(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vaaddu_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u8m1(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vaaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vaaddu_vv_u8m2(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vaaddu_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u8m2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vaaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_vx_u8m2(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vaaddu_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u8m2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vaaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vaaddu_vv_u8m4(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vaaddu_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u8m4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vaaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_vx_u8m4(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vaaddu_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u8m4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vaaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vaaddu_vv_u8m8(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vaaddu_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u8m8(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vaaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_vx_u8m8(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vaaddu_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u8m8(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vaaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vaaddu_vv_u16mf4(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vaaddu_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u16mf4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vaaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_vx_u16mf4(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vaaddu_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u16mf4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vaaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vaaddu_vv_u16mf2(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vaaddu_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u16mf2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vaaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_vx_u16mf2(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vaaddu_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u16mf2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vaaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vaaddu_vv_u16m1(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vaaddu_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u16m1(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vaaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_vx_u16m1(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vaaddu_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u16m1(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vaaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vaaddu_vv_u16m2(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vaaddu_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u16m2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vaaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_vx_u16m2(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vaaddu_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u16m2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vaaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vaaddu_vv_u16m4(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vaaddu_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u16m4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vaaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_vx_u16m4(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vaaddu_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u16m4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vaaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vaaddu_vv_u16m8(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vaaddu_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u16m8(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vaaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_vx_u16m8(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vaaddu_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u16m8(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vaaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vaaddu_vv_u32mf2(op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vaaddu_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u32mf2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vaaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_vx_u32mf2(op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vaaddu_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u32mf2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vaaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vaaddu_vv_u32m1(op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vaaddu_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u32m1(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vaaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_vx_u32m1(op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vaaddu_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u32m1(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vaaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vaaddu_vv_u32m2(op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vaaddu_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u32m2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vaaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_vx_u32m2(op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vaaddu_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u32m2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vaaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vaaddu_vv_u32m4(op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vaaddu_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u32m4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vaaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_vx_u32m4(op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vaaddu_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u32m4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vaaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vaaddu_vv_u32m8(op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vaaddu_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u32m8(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vaaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_vx_u32m8(op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vaaddu_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u32m8(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vaaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vaaddu_vv_u64m1(op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vaaddu_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u64m1(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vaaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu_vx_u64m1(op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vaaddu_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u64m1(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vaaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vaaddu_vv_u64m2(op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vaaddu_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u64m2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vaaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu_vx_u64m2(op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vaaddu_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u64m2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vaaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vaaddu_vv_u64m4(op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vaaddu_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u64m4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vaaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu_vx_u64m4(op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vaaddu_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u64m4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vaaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vaaddu_vv_u64m8(op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vaaddu_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u64m8(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vaaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu_vx_u64m8(op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vaaddu_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u64m8(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vaaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vaaddu_vv_u8mf8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vaaddu_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u8mf8_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vaaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_vx_u8mf8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vaaddu_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u8mf8_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vaaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vaaddu_vv_u8mf4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vaaddu_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u8mf4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vaaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_vx_u8mf4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vaaddu_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u8mf4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vaaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vaaddu_vv_u8mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vaaddu_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u8mf2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vaaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_vx_u8mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vaaddu_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u8mf2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vaaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vaaddu_vv_u8m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vaaddu_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u8m1_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vaaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_vx_u8m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vaaddu_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u8m1_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vaaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vaaddu_vv_u8m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vaaddu_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u8m2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vaaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_vx_u8m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vaaddu_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u8m2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vaaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vaaddu_vv_u8m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vaaddu_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u8m4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vaaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_vx_u8m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vaaddu_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u8m4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vaaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vaaddu_vv_u8m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vaaddu_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u8m8_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vaaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_vx_u8m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vaaddu_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u8m8_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vaaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vaaddu_vv_u16mf4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vaaddu_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u16mf4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vaaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_vx_u16mf4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vaaddu_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u16mf4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vaaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vaaddu_vv_u16mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vaaddu_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u16mf2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vaaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_vx_u16mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vaaddu_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u16mf2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vaaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vaaddu_vv_u16m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vaaddu_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u16m1_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vaaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_vx_u16m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vaaddu_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u16m1_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vaaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vaaddu_vv_u16m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vaaddu_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u16m2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vaaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_vx_u16m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vaaddu_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u16m2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vaaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vaaddu_vv_u16m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vaaddu_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u16m4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vaaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_vx_u16m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vaaddu_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u16m4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vaaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vaaddu_vv_u16m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vaaddu_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u16m8_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vaaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_vx_u16m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vaaddu_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u16m8_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vaaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vaaddu_vv_u32mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vaaddu_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u32mf2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vaaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_vx_u32mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vaaddu_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u32mf2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vaaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vaaddu_vv_u32m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vaaddu_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u32m1_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vaaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_vx_u32m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vaaddu_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u32m1_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vaaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vaaddu_vv_u32m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vaaddu_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u32m2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vaaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_vx_u32m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vaaddu_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u32m2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vaaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vaaddu_vv_u32m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vaaddu_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u32m4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vaaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_vx_u32m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vaaddu_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u32m4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vaaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vaaddu_vv_u32m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vaaddu_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u32m8_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vaaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_vx_u32m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vaaddu_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u32m8_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vaaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vaaddu_vv_u64m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vaaddu_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u64m1_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vaaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu_vx_u64m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vaaddu_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u64m1_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vaaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vaaddu_vv_u64m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vaaddu_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u64m2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vaaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu_vx_u64m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vaaddu_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u64m2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vaaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vaaddu_vv_u64m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vaaddu_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u64m4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vaaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu_vx_u64m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vaaddu_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u64m4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vaaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vaaddu_vv_u64m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vaaddu_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u64m8_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vaaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu_vx_u64m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vaaddu_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u64m8_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vaaddu\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-api-tests/vadc.c b/auto-generated/gnu-api-tests/vadc.c index 32f9cc451..f4c292784 100644 --- a/auto-generated/gnu-api-tests/vadc.c +++ b/auto-generated/gnu-api-tests/vadc.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vadc_vvm_i8mf8(vint8mf8_t op1, vint8mf8_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_vvm_i8mf8(op1, op2, carryin, vl); +vint8mf8_t test_vadc_vvm_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_vvm_i8mf8(vs2, vs1, v0, vl); } -vint8mf8_t test_vadc_vxm_i8mf8(vint8mf8_t op1, int8_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_vxm_i8mf8(op1, op2, carryin, vl); +vint8mf8_t test_vadc_vxm_i8mf8(vint8mf8_t vs2, int8_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_vxm_i8mf8(vs2, rs1, v0, vl); } -vint8mf4_t test_vadc_vvm_i8mf4(vint8mf4_t op1, vint8mf4_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_vvm_i8mf4(op1, op2, carryin, vl); +vint8mf4_t test_vadc_vvm_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_vvm_i8mf4(vs2, vs1, v0, vl); } -vint8mf4_t test_vadc_vxm_i8mf4(vint8mf4_t op1, int8_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_vxm_i8mf4(op1, op2, carryin, vl); +vint8mf4_t test_vadc_vxm_i8mf4(vint8mf4_t vs2, int8_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_vxm_i8mf4(vs2, rs1, v0, vl); } -vint8mf2_t test_vadc_vvm_i8mf2(vint8mf2_t op1, vint8mf2_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_vvm_i8mf2(op1, op2, carryin, vl); +vint8mf2_t test_vadc_vvm_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_vvm_i8mf2(vs2, vs1, v0, vl); } -vint8mf2_t test_vadc_vxm_i8mf2(vint8mf2_t op1, int8_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_vxm_i8mf2(op1, op2, carryin, vl); +vint8mf2_t test_vadc_vxm_i8mf2(vint8mf2_t vs2, int8_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_vxm_i8mf2(vs2, rs1, v0, vl); } -vint8m1_t test_vadc_vvm_i8m1(vint8m1_t op1, vint8m1_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_vvm_i8m1(op1, op2, carryin, vl); +vint8m1_t test_vadc_vvm_i8m1(vint8m1_t vs2, vint8m1_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_vvm_i8m1(vs2, vs1, v0, vl); } -vint8m1_t test_vadc_vxm_i8m1(vint8m1_t op1, int8_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_vxm_i8m1(op1, op2, carryin, vl); +vint8m1_t test_vadc_vxm_i8m1(vint8m1_t vs2, int8_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_vxm_i8m1(vs2, rs1, v0, vl); } -vint8m2_t test_vadc_vvm_i8m2(vint8m2_t op1, vint8m2_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc_vvm_i8m2(op1, op2, carryin, vl); +vint8m2_t test_vadc_vvm_i8m2(vint8m2_t vs2, vint8m2_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vadc_vvm_i8m2(vs2, vs1, v0, vl); } -vint8m2_t test_vadc_vxm_i8m2(vint8m2_t op1, int8_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc_vxm_i8m2(op1, op2, carryin, vl); +vint8m2_t test_vadc_vxm_i8m2(vint8m2_t vs2, int8_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vadc_vxm_i8m2(vs2, rs1, v0, vl); } -vint8m4_t test_vadc_vvm_i8m4(vint8m4_t op1, vint8m4_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vadc_vvm_i8m4(op1, op2, carryin, vl); +vint8m4_t test_vadc_vvm_i8m4(vint8m4_t vs2, vint8m4_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vadc_vvm_i8m4(vs2, vs1, v0, vl); } -vint8m4_t test_vadc_vxm_i8m4(vint8m4_t op1, int8_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vadc_vxm_i8m4(op1, op2, carryin, vl); +vint8m4_t test_vadc_vxm_i8m4(vint8m4_t vs2, int8_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vadc_vxm_i8m4(vs2, rs1, v0, vl); } -vint8m8_t test_vadc_vvm_i8m8(vint8m8_t op1, vint8m8_t op2, vbool1_t carryin, size_t vl) { - return __riscv_vadc_vvm_i8m8(op1, op2, carryin, vl); +vint8m8_t test_vadc_vvm_i8m8(vint8m8_t vs2, vint8m8_t vs1, vbool1_t v0, size_t vl) { + return __riscv_vadc_vvm_i8m8(vs2, vs1, v0, vl); } -vint8m8_t test_vadc_vxm_i8m8(vint8m8_t op1, int8_t op2, vbool1_t carryin, size_t vl) { - return __riscv_vadc_vxm_i8m8(op1, op2, carryin, vl); +vint8m8_t test_vadc_vxm_i8m8(vint8m8_t vs2, int8_t rs1, vbool1_t v0, size_t vl) { + return __riscv_vadc_vxm_i8m8(vs2, rs1, v0, vl); } -vint16mf4_t test_vadc_vvm_i16mf4(vint16mf4_t op1, vint16mf4_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_vvm_i16mf4(op1, op2, carryin, vl); +vint16mf4_t test_vadc_vvm_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_vvm_i16mf4(vs2, vs1, v0, vl); } -vint16mf4_t test_vadc_vxm_i16mf4(vint16mf4_t op1, int16_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_vxm_i16mf4(op1, op2, carryin, vl); +vint16mf4_t test_vadc_vxm_i16mf4(vint16mf4_t vs2, int16_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_vxm_i16mf4(vs2, rs1, v0, vl); } -vint16mf2_t test_vadc_vvm_i16mf2(vint16mf2_t op1, vint16mf2_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_vvm_i16mf2(op1, op2, carryin, vl); +vint16mf2_t test_vadc_vvm_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_vvm_i16mf2(vs2, vs1, v0, vl); } -vint16mf2_t test_vadc_vxm_i16mf2(vint16mf2_t op1, int16_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_vxm_i16mf2(op1, op2, carryin, vl); +vint16mf2_t test_vadc_vxm_i16mf2(vint16mf2_t vs2, int16_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_vxm_i16mf2(vs2, rs1, v0, vl); } -vint16m1_t test_vadc_vvm_i16m1(vint16m1_t op1, vint16m1_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_vvm_i16m1(op1, op2, carryin, vl); +vint16m1_t test_vadc_vvm_i16m1(vint16m1_t vs2, vint16m1_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_vvm_i16m1(vs2, vs1, v0, vl); } -vint16m1_t test_vadc_vxm_i16m1(vint16m1_t op1, int16_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_vxm_i16m1(op1, op2, carryin, vl); +vint16m1_t test_vadc_vxm_i16m1(vint16m1_t vs2, int16_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_vxm_i16m1(vs2, rs1, v0, vl); } -vint16m2_t test_vadc_vvm_i16m2(vint16m2_t op1, vint16m2_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_vvm_i16m2(op1, op2, carryin, vl); +vint16m2_t test_vadc_vvm_i16m2(vint16m2_t vs2, vint16m2_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_vvm_i16m2(vs2, vs1, v0, vl); } -vint16m2_t test_vadc_vxm_i16m2(vint16m2_t op1, int16_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_vxm_i16m2(op1, op2, carryin, vl); +vint16m2_t test_vadc_vxm_i16m2(vint16m2_t vs2, int16_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_vxm_i16m2(vs2, rs1, v0, vl); } -vint16m4_t test_vadc_vvm_i16m4(vint16m4_t op1, vint16m4_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc_vvm_i16m4(op1, op2, carryin, vl); +vint16m4_t test_vadc_vvm_i16m4(vint16m4_t vs2, vint16m4_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vadc_vvm_i16m4(vs2, vs1, v0, vl); } -vint16m4_t test_vadc_vxm_i16m4(vint16m4_t op1, int16_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc_vxm_i16m4(op1, op2, carryin, vl); +vint16m4_t test_vadc_vxm_i16m4(vint16m4_t vs2, int16_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vadc_vxm_i16m4(vs2, rs1, v0, vl); } -vint16m8_t test_vadc_vvm_i16m8(vint16m8_t op1, vint16m8_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vadc_vvm_i16m8(op1, op2, carryin, vl); +vint16m8_t test_vadc_vvm_i16m8(vint16m8_t vs2, vint16m8_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vadc_vvm_i16m8(vs2, vs1, v0, vl); } -vint16m8_t test_vadc_vxm_i16m8(vint16m8_t op1, int16_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vadc_vxm_i16m8(op1, op2, carryin, vl); +vint16m8_t test_vadc_vxm_i16m8(vint16m8_t vs2, int16_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vadc_vxm_i16m8(vs2, rs1, v0, vl); } -vint32mf2_t test_vadc_vvm_i32mf2(vint32mf2_t op1, vint32mf2_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_vvm_i32mf2(op1, op2, carryin, vl); +vint32mf2_t test_vadc_vvm_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_vvm_i32mf2(vs2, vs1, v0, vl); } -vint32mf2_t test_vadc_vxm_i32mf2(vint32mf2_t op1, int32_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_vxm_i32mf2(op1, op2, carryin, vl); +vint32mf2_t test_vadc_vxm_i32mf2(vint32mf2_t vs2, int32_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_vxm_i32mf2(vs2, rs1, v0, vl); } -vint32m1_t test_vadc_vvm_i32m1(vint32m1_t op1, vint32m1_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_vvm_i32m1(op1, op2, carryin, vl); +vint32m1_t test_vadc_vvm_i32m1(vint32m1_t vs2, vint32m1_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_vvm_i32m1(vs2, vs1, v0, vl); } -vint32m1_t test_vadc_vxm_i32m1(vint32m1_t op1, int32_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_vxm_i32m1(op1, op2, carryin, vl); +vint32m1_t test_vadc_vxm_i32m1(vint32m1_t vs2, int32_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_vxm_i32m1(vs2, rs1, v0, vl); } -vint32m2_t test_vadc_vvm_i32m2(vint32m2_t op1, vint32m2_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_vvm_i32m2(op1, op2, carryin, vl); +vint32m2_t test_vadc_vvm_i32m2(vint32m2_t vs2, vint32m2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_vvm_i32m2(vs2, vs1, v0, vl); } -vint32m2_t test_vadc_vxm_i32m2(vint32m2_t op1, int32_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_vxm_i32m2(op1, op2, carryin, vl); +vint32m2_t test_vadc_vxm_i32m2(vint32m2_t vs2, int32_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_vxm_i32m2(vs2, rs1, v0, vl); } -vint32m4_t test_vadc_vvm_i32m4(vint32m4_t op1, vint32m4_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_vvm_i32m4(op1, op2, carryin, vl); +vint32m4_t test_vadc_vvm_i32m4(vint32m4_t vs2, vint32m4_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_vvm_i32m4(vs2, vs1, v0, vl); } -vint32m4_t test_vadc_vxm_i32m4(vint32m4_t op1, int32_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_vxm_i32m4(op1, op2, carryin, vl); +vint32m4_t test_vadc_vxm_i32m4(vint32m4_t vs2, int32_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_vxm_i32m4(vs2, rs1, v0, vl); } -vint32m8_t test_vadc_vvm_i32m8(vint32m8_t op1, vint32m8_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc_vvm_i32m8(op1, op2, carryin, vl); +vint32m8_t test_vadc_vvm_i32m8(vint32m8_t vs2, vint32m8_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vadc_vvm_i32m8(vs2, vs1, v0, vl); } -vint32m8_t test_vadc_vxm_i32m8(vint32m8_t op1, int32_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc_vxm_i32m8(op1, op2, carryin, vl); +vint32m8_t test_vadc_vxm_i32m8(vint32m8_t vs2, int32_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vadc_vxm_i32m8(vs2, rs1, v0, vl); } -vint64m1_t test_vadc_vvm_i64m1(vint64m1_t op1, vint64m1_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_vvm_i64m1(op1, op2, carryin, vl); +vint64m1_t test_vadc_vvm_i64m1(vint64m1_t vs2, vint64m1_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_vvm_i64m1(vs2, vs1, v0, vl); } -vint64m1_t test_vadc_vxm_i64m1(vint64m1_t op1, int64_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_vxm_i64m1(op1, op2, carryin, vl); +vint64m1_t test_vadc_vxm_i64m1(vint64m1_t vs2, int64_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_vxm_i64m1(vs2, rs1, v0, vl); } -vint64m2_t test_vadc_vvm_i64m2(vint64m2_t op1, vint64m2_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_vvm_i64m2(op1, op2, carryin, vl); +vint64m2_t test_vadc_vvm_i64m2(vint64m2_t vs2, vint64m2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_vvm_i64m2(vs2, vs1, v0, vl); } -vint64m2_t test_vadc_vxm_i64m2(vint64m2_t op1, int64_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_vxm_i64m2(op1, op2, carryin, vl); +vint64m2_t test_vadc_vxm_i64m2(vint64m2_t vs2, int64_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_vxm_i64m2(vs2, rs1, v0, vl); } -vint64m4_t test_vadc_vvm_i64m4(vint64m4_t op1, vint64m4_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_vvm_i64m4(op1, op2, carryin, vl); +vint64m4_t test_vadc_vvm_i64m4(vint64m4_t vs2, vint64m4_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_vvm_i64m4(vs2, vs1, v0, vl); } -vint64m4_t test_vadc_vxm_i64m4(vint64m4_t op1, int64_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_vxm_i64m4(op1, op2, carryin, vl); +vint64m4_t test_vadc_vxm_i64m4(vint64m4_t vs2, int64_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_vxm_i64m4(vs2, rs1, v0, vl); } -vint64m8_t test_vadc_vvm_i64m8(vint64m8_t op1, vint64m8_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_vvm_i64m8(op1, op2, carryin, vl); +vint64m8_t test_vadc_vvm_i64m8(vint64m8_t vs2, vint64m8_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_vvm_i64m8(vs2, vs1, v0, vl); } -vint64m8_t test_vadc_vxm_i64m8(vint64m8_t op1, int64_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_vxm_i64m8(op1, op2, carryin, vl); +vint64m8_t test_vadc_vxm_i64m8(vint64m8_t vs2, int64_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_vxm_i64m8(vs2, rs1, v0, vl); } -vuint8mf8_t test_vadc_vvm_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_vvm_u8mf8(op1, op2, carryin, vl); +vuint8mf8_t test_vadc_vvm_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_vvm_u8mf8(vs2, vs1, v0, vl); } -vuint8mf8_t test_vadc_vxm_u8mf8(vuint8mf8_t op1, uint8_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_vxm_u8mf8(op1, op2, carryin, vl); +vuint8mf8_t test_vadc_vxm_u8mf8(vuint8mf8_t vs2, uint8_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_vxm_u8mf8(vs2, rs1, v0, vl); } -vuint8mf4_t test_vadc_vvm_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_vvm_u8mf4(op1, op2, carryin, vl); +vuint8mf4_t test_vadc_vvm_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_vvm_u8mf4(vs2, vs1, v0, vl); } -vuint8mf4_t test_vadc_vxm_u8mf4(vuint8mf4_t op1, uint8_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_vxm_u8mf4(op1, op2, carryin, vl); +vuint8mf4_t test_vadc_vxm_u8mf4(vuint8mf4_t vs2, uint8_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_vxm_u8mf4(vs2, rs1, v0, vl); } -vuint8mf2_t test_vadc_vvm_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_vvm_u8mf2(op1, op2, carryin, vl); +vuint8mf2_t test_vadc_vvm_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_vvm_u8mf2(vs2, vs1, v0, vl); } -vuint8mf2_t test_vadc_vxm_u8mf2(vuint8mf2_t op1, uint8_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_vxm_u8mf2(op1, op2, carryin, vl); +vuint8mf2_t test_vadc_vxm_u8mf2(vuint8mf2_t vs2, uint8_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_vxm_u8mf2(vs2, rs1, v0, vl); } -vuint8m1_t test_vadc_vvm_u8m1(vuint8m1_t op1, vuint8m1_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_vvm_u8m1(op1, op2, carryin, vl); +vuint8m1_t test_vadc_vvm_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_vvm_u8m1(vs2, vs1, v0, vl); } -vuint8m1_t test_vadc_vxm_u8m1(vuint8m1_t op1, uint8_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_vxm_u8m1(op1, op2, carryin, vl); +vuint8m1_t test_vadc_vxm_u8m1(vuint8m1_t vs2, uint8_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_vxm_u8m1(vs2, rs1, v0, vl); } -vuint8m2_t test_vadc_vvm_u8m2(vuint8m2_t op1, vuint8m2_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc_vvm_u8m2(op1, op2, carryin, vl); +vuint8m2_t test_vadc_vvm_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vadc_vvm_u8m2(vs2, vs1, v0, vl); } -vuint8m2_t test_vadc_vxm_u8m2(vuint8m2_t op1, uint8_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc_vxm_u8m2(op1, op2, carryin, vl); +vuint8m2_t test_vadc_vxm_u8m2(vuint8m2_t vs2, uint8_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vadc_vxm_u8m2(vs2, rs1, v0, vl); } -vuint8m4_t test_vadc_vvm_u8m4(vuint8m4_t op1, vuint8m4_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vadc_vvm_u8m4(op1, op2, carryin, vl); +vuint8m4_t test_vadc_vvm_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vadc_vvm_u8m4(vs2, vs1, v0, vl); } -vuint8m4_t test_vadc_vxm_u8m4(vuint8m4_t op1, uint8_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vadc_vxm_u8m4(op1, op2, carryin, vl); +vuint8m4_t test_vadc_vxm_u8m4(vuint8m4_t vs2, uint8_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vadc_vxm_u8m4(vs2, rs1, v0, vl); } -vuint8m8_t test_vadc_vvm_u8m8(vuint8m8_t op1, vuint8m8_t op2, vbool1_t carryin, size_t vl) { - return __riscv_vadc_vvm_u8m8(op1, op2, carryin, vl); +vuint8m8_t test_vadc_vvm_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, vbool1_t v0, size_t vl) { + return __riscv_vadc_vvm_u8m8(vs2, vs1, v0, vl); } -vuint8m8_t test_vadc_vxm_u8m8(vuint8m8_t op1, uint8_t op2, vbool1_t carryin, size_t vl) { - return __riscv_vadc_vxm_u8m8(op1, op2, carryin, vl); +vuint8m8_t test_vadc_vxm_u8m8(vuint8m8_t vs2, uint8_t rs1, vbool1_t v0, size_t vl) { + return __riscv_vadc_vxm_u8m8(vs2, rs1, v0, vl); } -vuint16mf4_t test_vadc_vvm_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_vvm_u16mf4(op1, op2, carryin, vl); +vuint16mf4_t test_vadc_vvm_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_vvm_u16mf4(vs2, vs1, v0, vl); } -vuint16mf4_t test_vadc_vxm_u16mf4(vuint16mf4_t op1, uint16_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_vxm_u16mf4(op1, op2, carryin, vl); +vuint16mf4_t test_vadc_vxm_u16mf4(vuint16mf4_t vs2, uint16_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_vxm_u16mf4(vs2, rs1, v0, vl); } -vuint16mf2_t test_vadc_vvm_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_vvm_u16mf2(op1, op2, carryin, vl); +vuint16mf2_t test_vadc_vvm_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_vvm_u16mf2(vs2, vs1, v0, vl); } -vuint16mf2_t test_vadc_vxm_u16mf2(vuint16mf2_t op1, uint16_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_vxm_u16mf2(op1, op2, carryin, vl); +vuint16mf2_t test_vadc_vxm_u16mf2(vuint16mf2_t vs2, uint16_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_vxm_u16mf2(vs2, rs1, v0, vl); } -vuint16m1_t test_vadc_vvm_u16m1(vuint16m1_t op1, vuint16m1_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_vvm_u16m1(op1, op2, carryin, vl); +vuint16m1_t test_vadc_vvm_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_vvm_u16m1(vs2, vs1, v0, vl); } -vuint16m1_t test_vadc_vxm_u16m1(vuint16m1_t op1, uint16_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_vxm_u16m1(op1, op2, carryin, vl); +vuint16m1_t test_vadc_vxm_u16m1(vuint16m1_t vs2, uint16_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_vxm_u16m1(vs2, rs1, v0, vl); } -vuint16m2_t test_vadc_vvm_u16m2(vuint16m2_t op1, vuint16m2_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_vvm_u16m2(op1, op2, carryin, vl); +vuint16m2_t test_vadc_vvm_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_vvm_u16m2(vs2, vs1, v0, vl); } -vuint16m2_t test_vadc_vxm_u16m2(vuint16m2_t op1, uint16_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_vxm_u16m2(op1, op2, carryin, vl); +vuint16m2_t test_vadc_vxm_u16m2(vuint16m2_t vs2, uint16_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_vxm_u16m2(vs2, rs1, v0, vl); } -vuint16m4_t test_vadc_vvm_u16m4(vuint16m4_t op1, vuint16m4_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc_vvm_u16m4(op1, op2, carryin, vl); +vuint16m4_t test_vadc_vvm_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vadc_vvm_u16m4(vs2, vs1, v0, vl); } -vuint16m4_t test_vadc_vxm_u16m4(vuint16m4_t op1, uint16_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc_vxm_u16m4(op1, op2, carryin, vl); +vuint16m4_t test_vadc_vxm_u16m4(vuint16m4_t vs2, uint16_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vadc_vxm_u16m4(vs2, rs1, v0, vl); } -vuint16m8_t test_vadc_vvm_u16m8(vuint16m8_t op1, vuint16m8_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vadc_vvm_u16m8(op1, op2, carryin, vl); +vuint16m8_t test_vadc_vvm_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vadc_vvm_u16m8(vs2, vs1, v0, vl); } -vuint16m8_t test_vadc_vxm_u16m8(vuint16m8_t op1, uint16_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vadc_vxm_u16m8(op1, op2, carryin, vl); +vuint16m8_t test_vadc_vxm_u16m8(vuint16m8_t vs2, uint16_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vadc_vxm_u16m8(vs2, rs1, v0, vl); } -vuint32mf2_t test_vadc_vvm_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_vvm_u32mf2(op1, op2, carryin, vl); +vuint32mf2_t test_vadc_vvm_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_vvm_u32mf2(vs2, vs1, v0, vl); } -vuint32mf2_t test_vadc_vxm_u32mf2(vuint32mf2_t op1, uint32_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_vxm_u32mf2(op1, op2, carryin, vl); +vuint32mf2_t test_vadc_vxm_u32mf2(vuint32mf2_t vs2, uint32_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_vxm_u32mf2(vs2, rs1, v0, vl); } -vuint32m1_t test_vadc_vvm_u32m1(vuint32m1_t op1, vuint32m1_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_vvm_u32m1(op1, op2, carryin, vl); +vuint32m1_t test_vadc_vvm_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_vvm_u32m1(vs2, vs1, v0, vl); } -vuint32m1_t test_vadc_vxm_u32m1(vuint32m1_t op1, uint32_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_vxm_u32m1(op1, op2, carryin, vl); +vuint32m1_t test_vadc_vxm_u32m1(vuint32m1_t vs2, uint32_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_vxm_u32m1(vs2, rs1, v0, vl); } -vuint32m2_t test_vadc_vvm_u32m2(vuint32m2_t op1, vuint32m2_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_vvm_u32m2(op1, op2, carryin, vl); +vuint32m2_t test_vadc_vvm_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_vvm_u32m2(vs2, vs1, v0, vl); } -vuint32m2_t test_vadc_vxm_u32m2(vuint32m2_t op1, uint32_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_vxm_u32m2(op1, op2, carryin, vl); +vuint32m2_t test_vadc_vxm_u32m2(vuint32m2_t vs2, uint32_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_vxm_u32m2(vs2, rs1, v0, vl); } -vuint32m4_t test_vadc_vvm_u32m4(vuint32m4_t op1, vuint32m4_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_vvm_u32m4(op1, op2, carryin, vl); +vuint32m4_t test_vadc_vvm_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_vvm_u32m4(vs2, vs1, v0, vl); } -vuint32m4_t test_vadc_vxm_u32m4(vuint32m4_t op1, uint32_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_vxm_u32m4(op1, op2, carryin, vl); +vuint32m4_t test_vadc_vxm_u32m4(vuint32m4_t vs2, uint32_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_vxm_u32m4(vs2, rs1, v0, vl); } -vuint32m8_t test_vadc_vvm_u32m8(vuint32m8_t op1, vuint32m8_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc_vvm_u32m8(op1, op2, carryin, vl); +vuint32m8_t test_vadc_vvm_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vadc_vvm_u32m8(vs2, vs1, v0, vl); } -vuint32m8_t test_vadc_vxm_u32m8(vuint32m8_t op1, uint32_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc_vxm_u32m8(op1, op2, carryin, vl); +vuint32m8_t test_vadc_vxm_u32m8(vuint32m8_t vs2, uint32_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vadc_vxm_u32m8(vs2, rs1, v0, vl); } -vuint64m1_t test_vadc_vvm_u64m1(vuint64m1_t op1, vuint64m1_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_vvm_u64m1(op1, op2, carryin, vl); +vuint64m1_t test_vadc_vvm_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_vvm_u64m1(vs2, vs1, v0, vl); } -vuint64m1_t test_vadc_vxm_u64m1(vuint64m1_t op1, uint64_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_vxm_u64m1(op1, op2, carryin, vl); +vuint64m1_t test_vadc_vxm_u64m1(vuint64m1_t vs2, uint64_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_vxm_u64m1(vs2, rs1, v0, vl); } -vuint64m2_t test_vadc_vvm_u64m2(vuint64m2_t op1, vuint64m2_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_vvm_u64m2(op1, op2, carryin, vl); +vuint64m2_t test_vadc_vvm_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_vvm_u64m2(vs2, vs1, v0, vl); } -vuint64m2_t test_vadc_vxm_u64m2(vuint64m2_t op1, uint64_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_vxm_u64m2(op1, op2, carryin, vl); +vuint64m2_t test_vadc_vxm_u64m2(vuint64m2_t vs2, uint64_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_vxm_u64m2(vs2, rs1, v0, vl); } -vuint64m4_t test_vadc_vvm_u64m4(vuint64m4_t op1, vuint64m4_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_vvm_u64m4(op1, op2, carryin, vl); +vuint64m4_t test_vadc_vvm_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_vvm_u64m4(vs2, vs1, v0, vl); } -vuint64m4_t test_vadc_vxm_u64m4(vuint64m4_t op1, uint64_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_vxm_u64m4(op1, op2, carryin, vl); +vuint64m4_t test_vadc_vxm_u64m4(vuint64m4_t vs2, uint64_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_vxm_u64m4(vs2, rs1, v0, vl); } -vuint64m8_t test_vadc_vvm_u64m8(vuint64m8_t op1, vuint64m8_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_vvm_u64m8(op1, op2, carryin, vl); +vuint64m8_t test_vadc_vvm_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_vvm_u64m8(vs2, vs1, v0, vl); } -vuint64m8_t test_vadc_vxm_u64m8(vuint64m8_t op1, uint64_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_vxm_u64m8(op1, op2, carryin, vl); +vuint64m8_t test_vadc_vxm_u64m8(vuint64m8_t vs2, uint64_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_vxm_u64m8(vs2, rs1, v0, vl); } /* { dg-final { scan-assembler-times {vadc\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-api-tests/vadd.c b/auto-generated/gnu-api-tests/vadd.c index aa1ef98da..2f1800adb 100644 --- a/auto-generated/gnu-api-tests/vadd.c +++ b/auto-generated/gnu-api-tests/vadd.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vadd_vv_i8mf8(op1, op2, vl); +vint8mf8_t test_vadd_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vadd_vv_i8mf8(vs2, vs1, vl); } -vint8mf8_t test_vadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_vx_i8mf8(op1, op2, vl); +vint8mf8_t test_vadd_vx_i8mf8(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_vx_i8mf8(vs2, rs1, vl); } -vint8mf4_t test_vadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vadd_vv_i8mf4(op1, op2, vl); +vint8mf4_t test_vadd_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vadd_vv_i8mf4(vs2, vs1, vl); } -vint8mf4_t test_vadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_vx_i8mf4(op1, op2, vl); +vint8mf4_t test_vadd_vx_i8mf4(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_vx_i8mf4(vs2, rs1, vl); } -vint8mf2_t test_vadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vadd_vv_i8mf2(op1, op2, vl); +vint8mf2_t test_vadd_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vadd_vv_i8mf2(vs2, vs1, vl); } -vint8mf2_t test_vadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_vx_i8mf2(op1, op2, vl); +vint8mf2_t test_vadd_vx_i8mf2(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_vx_i8mf2(vs2, rs1, vl); } -vint8m1_t test_vadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vadd_vv_i8m1(op1, op2, vl); +vint8m1_t test_vadd_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vadd_vv_i8m1(vs2, vs1, vl); } -vint8m1_t test_vadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_vx_i8m1(op1, op2, vl); +vint8m1_t test_vadd_vx_i8m1(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_vx_i8m1(vs2, rs1, vl); } -vint8m2_t test_vadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vadd_vv_i8m2(op1, op2, vl); +vint8m2_t test_vadd_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vadd_vv_i8m2(vs2, vs1, vl); } -vint8m2_t test_vadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_vx_i8m2(op1, op2, vl); +vint8m2_t test_vadd_vx_i8m2(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_vx_i8m2(vs2, rs1, vl); } -vint8m4_t test_vadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vadd_vv_i8m4(op1, op2, vl); +vint8m4_t test_vadd_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vadd_vv_i8m4(vs2, vs1, vl); } -vint8m4_t test_vadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_vx_i8m4(op1, op2, vl); +vint8m4_t test_vadd_vx_i8m4(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_vx_i8m4(vs2, rs1, vl); } -vint8m8_t test_vadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vadd_vv_i8m8(op1, op2, vl); +vint8m8_t test_vadd_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vadd_vv_i8m8(vs2, vs1, vl); } -vint8m8_t test_vadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_vx_i8m8(op1, op2, vl); +vint8m8_t test_vadd_vx_i8m8(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_vx_i8m8(vs2, rs1, vl); } -vint16mf4_t test_vadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vadd_vv_i16mf4(op1, op2, vl); +vint16mf4_t test_vadd_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vadd_vv_i16mf4(vs2, vs1, vl); } -vint16mf4_t test_vadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_vx_i16mf4(op1, op2, vl); +vint16mf4_t test_vadd_vx_i16mf4(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_vx_i16mf4(vs2, rs1, vl); } -vint16mf2_t test_vadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vadd_vv_i16mf2(op1, op2, vl); +vint16mf2_t test_vadd_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vadd_vv_i16mf2(vs2, vs1, vl); } -vint16mf2_t test_vadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_vx_i16mf2(op1, op2, vl); +vint16mf2_t test_vadd_vx_i16mf2(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_vx_i16mf2(vs2, rs1, vl); } -vint16m1_t test_vadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vadd_vv_i16m1(op1, op2, vl); +vint16m1_t test_vadd_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vadd_vv_i16m1(vs2, vs1, vl); } -vint16m1_t test_vadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_vx_i16m1(op1, op2, vl); +vint16m1_t test_vadd_vx_i16m1(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_vx_i16m1(vs2, rs1, vl); } -vint16m2_t test_vadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vadd_vv_i16m2(op1, op2, vl); +vint16m2_t test_vadd_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vadd_vv_i16m2(vs2, vs1, vl); } -vint16m2_t test_vadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_vx_i16m2(op1, op2, vl); +vint16m2_t test_vadd_vx_i16m2(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_vx_i16m2(vs2, rs1, vl); } -vint16m4_t test_vadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vadd_vv_i16m4(op1, op2, vl); +vint16m4_t test_vadd_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vadd_vv_i16m4(vs2, vs1, vl); } -vint16m4_t test_vadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_vx_i16m4(op1, op2, vl); +vint16m4_t test_vadd_vx_i16m4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_vx_i16m4(vs2, rs1, vl); } -vint16m8_t test_vadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vadd_vv_i16m8(op1, op2, vl); +vint16m8_t test_vadd_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vadd_vv_i16m8(vs2, vs1, vl); } -vint16m8_t test_vadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_vx_i16m8(op1, op2, vl); +vint16m8_t test_vadd_vx_i16m8(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_vx_i16m8(vs2, rs1, vl); } -vint32mf2_t test_vadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vadd_vv_i32mf2(op1, op2, vl); +vint32mf2_t test_vadd_vv_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vadd_vv_i32mf2(vs2, vs1, vl); } -vint32mf2_t test_vadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_vx_i32mf2(op1, op2, vl); +vint32mf2_t test_vadd_vx_i32mf2(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_vx_i32mf2(vs2, rs1, vl); } -vint32m1_t test_vadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vadd_vv_i32m1(op1, op2, vl); +vint32m1_t test_vadd_vv_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vadd_vv_i32m1(vs2, vs1, vl); } -vint32m1_t test_vadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_vx_i32m1(op1, op2, vl); +vint32m1_t test_vadd_vx_i32m1(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_vx_i32m1(vs2, rs1, vl); } -vint32m2_t test_vadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vadd_vv_i32m2(op1, op2, vl); +vint32m2_t test_vadd_vv_i32m2(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vadd_vv_i32m2(vs2, vs1, vl); } -vint32m2_t test_vadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_vx_i32m2(op1, op2, vl); +vint32m2_t test_vadd_vx_i32m2(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_vx_i32m2(vs2, rs1, vl); } -vint32m4_t test_vadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vadd_vv_i32m4(op1, op2, vl); +vint32m4_t test_vadd_vv_i32m4(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vadd_vv_i32m4(vs2, vs1, vl); } -vint32m4_t test_vadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_vx_i32m4(op1, op2, vl); +vint32m4_t test_vadd_vx_i32m4(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_vx_i32m4(vs2, rs1, vl); } -vint32m8_t test_vadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vadd_vv_i32m8(op1, op2, vl); +vint32m8_t test_vadd_vv_i32m8(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vadd_vv_i32m8(vs2, vs1, vl); } -vint32m8_t test_vadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_vx_i32m8(op1, op2, vl); +vint32m8_t test_vadd_vx_i32m8(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_vx_i32m8(vs2, rs1, vl); } -vint64m1_t test_vadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vadd_vv_i64m1(op1, op2, vl); +vint64m1_t test_vadd_vv_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vadd_vv_i64m1(vs2, vs1, vl); } -vint64m1_t test_vadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vadd_vx_i64m1(op1, op2, vl); +vint64m1_t test_vadd_vx_i64m1(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd_vx_i64m1(vs2, rs1, vl); } -vint64m2_t test_vadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vadd_vv_i64m2(op1, op2, vl); +vint64m2_t test_vadd_vv_i64m2(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vadd_vv_i64m2(vs2, vs1, vl); } -vint64m2_t test_vadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vadd_vx_i64m2(op1, op2, vl); +vint64m2_t test_vadd_vx_i64m2(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd_vx_i64m2(vs2, rs1, vl); } -vint64m4_t test_vadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vadd_vv_i64m4(op1, op2, vl); +vint64m4_t test_vadd_vv_i64m4(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vadd_vv_i64m4(vs2, vs1, vl); } -vint64m4_t test_vadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vadd_vx_i64m4(op1, op2, vl); +vint64m4_t test_vadd_vx_i64m4(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd_vx_i64m4(vs2, rs1, vl); } -vint64m8_t test_vadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vadd_vv_i64m8(op1, op2, vl); +vint64m8_t test_vadd_vv_i64m8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vadd_vv_i64m8(vs2, vs1, vl); } -vint64m8_t test_vadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vadd_vx_i64m8(op1, op2, vl); +vint64m8_t test_vadd_vx_i64m8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd_vx_i64m8(vs2, rs1, vl); } -vuint8mf8_t test_vadd_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vadd_vv_u8mf8(op1, op2, vl); +vuint8mf8_t test_vadd_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vadd_vv_u8mf8(vs2, vs1, vl); } -vuint8mf8_t test_vadd_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_vx_u8mf8(op1, op2, vl); +vuint8mf8_t test_vadd_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_vx_u8mf8(vs2, rs1, vl); } -vuint8mf4_t test_vadd_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vadd_vv_u8mf4(op1, op2, vl); +vuint8mf4_t test_vadd_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vadd_vv_u8mf4(vs2, vs1, vl); } -vuint8mf4_t test_vadd_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_vx_u8mf4(op1, op2, vl); +vuint8mf4_t test_vadd_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_vx_u8mf4(vs2, rs1, vl); } -vuint8mf2_t test_vadd_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vadd_vv_u8mf2(op1, op2, vl); +vuint8mf2_t test_vadd_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vadd_vv_u8mf2(vs2, vs1, vl); } -vuint8mf2_t test_vadd_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_vx_u8mf2(op1, op2, vl); +vuint8mf2_t test_vadd_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_vx_u8mf2(vs2, rs1, vl); } -vuint8m1_t test_vadd_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vadd_vv_u8m1(op1, op2, vl); +vuint8m1_t test_vadd_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vadd_vv_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vadd_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_vx_u8m1(op1, op2, vl); +vuint8m1_t test_vadd_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_vx_u8m1(vs2, rs1, vl); } -vuint8m2_t test_vadd_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vadd_vv_u8m2(op1, op2, vl); +vuint8m2_t test_vadd_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vadd_vv_u8m2(vs2, vs1, vl); } -vuint8m2_t test_vadd_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_vx_u8m2(op1, op2, vl); +vuint8m2_t test_vadd_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_vx_u8m2(vs2, rs1, vl); } -vuint8m4_t test_vadd_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vadd_vv_u8m4(op1, op2, vl); +vuint8m4_t test_vadd_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vadd_vv_u8m4(vs2, vs1, vl); } -vuint8m4_t test_vadd_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_vx_u8m4(op1, op2, vl); +vuint8m4_t test_vadd_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_vx_u8m4(vs2, rs1, vl); } -vuint8m8_t test_vadd_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vadd_vv_u8m8(op1, op2, vl); +vuint8m8_t test_vadd_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vadd_vv_u8m8(vs2, vs1, vl); } -vuint8m8_t test_vadd_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_vx_u8m8(op1, op2, vl); +vuint8m8_t test_vadd_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_vx_u8m8(vs2, rs1, vl); } -vuint16mf4_t test_vadd_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vadd_vv_u16mf4(op1, op2, vl); +vuint16mf4_t test_vadd_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vadd_vv_u16mf4(vs2, vs1, vl); } -vuint16mf4_t test_vadd_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_vx_u16mf4(op1, op2, vl); +vuint16mf4_t test_vadd_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_vx_u16mf4(vs2, rs1, vl); } -vuint16mf2_t test_vadd_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vadd_vv_u16mf2(op1, op2, vl); +vuint16mf2_t test_vadd_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vadd_vv_u16mf2(vs2, vs1, vl); } -vuint16mf2_t test_vadd_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_vx_u16mf2(op1, op2, vl); +vuint16mf2_t test_vadd_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_vx_u16mf2(vs2, rs1, vl); } -vuint16m1_t test_vadd_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vadd_vv_u16m1(op1, op2, vl); +vuint16m1_t test_vadd_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vadd_vv_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vadd_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_vx_u16m1(op1, op2, vl); +vuint16m1_t test_vadd_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_vx_u16m1(vs2, rs1, vl); } -vuint16m2_t test_vadd_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vadd_vv_u16m2(op1, op2, vl); +vuint16m2_t test_vadd_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vadd_vv_u16m2(vs2, vs1, vl); } -vuint16m2_t test_vadd_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_vx_u16m2(op1, op2, vl); +vuint16m2_t test_vadd_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_vx_u16m2(vs2, rs1, vl); } -vuint16m4_t test_vadd_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vadd_vv_u16m4(op1, op2, vl); +vuint16m4_t test_vadd_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vadd_vv_u16m4(vs2, vs1, vl); } -vuint16m4_t test_vadd_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_vx_u16m4(op1, op2, vl); +vuint16m4_t test_vadd_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_vx_u16m4(vs2, rs1, vl); } -vuint16m8_t test_vadd_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vadd_vv_u16m8(op1, op2, vl); +vuint16m8_t test_vadd_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vadd_vv_u16m8(vs2, vs1, vl); } -vuint16m8_t test_vadd_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_vx_u16m8(op1, op2, vl); +vuint16m8_t test_vadd_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_vx_u16m8(vs2, rs1, vl); } -vuint32mf2_t test_vadd_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vadd_vv_u32mf2(op1, op2, vl); +vuint32mf2_t test_vadd_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vadd_vv_u32mf2(vs2, vs1, vl); } -vuint32mf2_t test_vadd_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_vx_u32mf2(op1, op2, vl); +vuint32mf2_t test_vadd_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_vx_u32mf2(vs2, rs1, vl); } -vuint32m1_t test_vadd_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vadd_vv_u32m1(op1, op2, vl); +vuint32m1_t test_vadd_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vadd_vv_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vadd_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_vx_u32m1(op1, op2, vl); +vuint32m1_t test_vadd_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_vx_u32m1(vs2, rs1, vl); } -vuint32m2_t test_vadd_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vadd_vv_u32m2(op1, op2, vl); +vuint32m2_t test_vadd_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vadd_vv_u32m2(vs2, vs1, vl); } -vuint32m2_t test_vadd_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_vx_u32m2(op1, op2, vl); +vuint32m2_t test_vadd_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_vx_u32m2(vs2, rs1, vl); } -vuint32m4_t test_vadd_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vadd_vv_u32m4(op1, op2, vl); +vuint32m4_t test_vadd_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vadd_vv_u32m4(vs2, vs1, vl); } -vuint32m4_t test_vadd_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_vx_u32m4(op1, op2, vl); +vuint32m4_t test_vadd_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_vx_u32m4(vs2, rs1, vl); } -vuint32m8_t test_vadd_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vadd_vv_u32m8(op1, op2, vl); +vuint32m8_t test_vadd_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vadd_vv_u32m8(vs2, vs1, vl); } -vuint32m8_t test_vadd_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_vx_u32m8(op1, op2, vl); +vuint32m8_t test_vadd_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_vx_u32m8(vs2, rs1, vl); } -vuint64m1_t test_vadd_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vadd_vv_u64m1(op1, op2, vl); +vuint64m1_t test_vadd_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vadd_vv_u64m1(vs2, vs1, vl); } -vuint64m1_t test_vadd_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd_vx_u64m1(op1, op2, vl); +vuint64m1_t test_vadd_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd_vx_u64m1(vs2, rs1, vl); } -vuint64m2_t test_vadd_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vadd_vv_u64m2(op1, op2, vl); +vuint64m2_t test_vadd_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vadd_vv_u64m2(vs2, vs1, vl); } -vuint64m2_t test_vadd_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd_vx_u64m2(op1, op2, vl); +vuint64m2_t test_vadd_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd_vx_u64m2(vs2, rs1, vl); } -vuint64m4_t test_vadd_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vadd_vv_u64m4(op1, op2, vl); +vuint64m4_t test_vadd_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vadd_vv_u64m4(vs2, vs1, vl); } -vuint64m4_t test_vadd_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd_vx_u64m4(op1, op2, vl); +vuint64m4_t test_vadd_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd_vx_u64m4(vs2, rs1, vl); } -vuint64m8_t test_vadd_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vadd_vv_u64m8(op1, op2, vl); +vuint64m8_t test_vadd_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vadd_vv_u64m8(vs2, vs1, vl); } -vuint64m8_t test_vadd_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd_vx_u64m8(op1, op2, vl); +vuint64m8_t test_vadd_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd_vx_u64m8(vs2, rs1, vl); } -vint8mf8_t test_vadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vadd_vv_i8mf8_m(mask, op1, op2, vl); +vint8mf8_t test_vadd_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vadd_vv_i8mf8_m(vm, vs2, vs1, vl); } -vint8mf8_t test_vadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_vx_i8mf8_m(mask, op1, op2, vl); +vint8mf8_t test_vadd_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_vx_i8mf8_m(vm, vs2, rs1, vl); } -vint8mf4_t test_vadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vadd_vv_i8mf4_m(mask, op1, op2, vl); +vint8mf4_t test_vadd_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vadd_vv_i8mf4_m(vm, vs2, vs1, vl); } -vint8mf4_t test_vadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_vx_i8mf4_m(mask, op1, op2, vl); +vint8mf4_t test_vadd_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_vx_i8mf4_m(vm, vs2, rs1, vl); } -vint8mf2_t test_vadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vadd_vv_i8mf2_m(mask, op1, op2, vl); +vint8mf2_t test_vadd_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vadd_vv_i8mf2_m(vm, vs2, vs1, vl); } -vint8mf2_t test_vadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_vx_i8mf2_m(mask, op1, op2, vl); +vint8mf2_t test_vadd_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_vx_i8mf2_m(vm, vs2, rs1, vl); } -vint8m1_t test_vadd_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vadd_vv_i8m1_m(mask, op1, op2, vl); +vint8m1_t test_vadd_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vadd_vv_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vadd_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_vx_i8m1_m(mask, op1, op2, vl); +vint8m1_t test_vadd_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_vx_i8m1_m(vm, vs2, rs1, vl); } -vint8m2_t test_vadd_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vadd_vv_i8m2_m(mask, op1, op2, vl); +vint8m2_t test_vadd_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vadd_vv_i8m2_m(vm, vs2, vs1, vl); } -vint8m2_t test_vadd_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_vx_i8m2_m(mask, op1, op2, vl); +vint8m2_t test_vadd_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_vx_i8m2_m(vm, vs2, rs1, vl); } -vint8m4_t test_vadd_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vadd_vv_i8m4_m(mask, op1, op2, vl); +vint8m4_t test_vadd_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vadd_vv_i8m4_m(vm, vs2, vs1, vl); } -vint8m4_t test_vadd_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_vx_i8m4_m(mask, op1, op2, vl); +vint8m4_t test_vadd_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_vx_i8m4_m(vm, vs2, rs1, vl); } -vint8m8_t test_vadd_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vadd_vv_i8m8_m(mask, op1, op2, vl); +vint8m8_t test_vadd_vv_i8m8_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vadd_vv_i8m8_m(vm, vs2, vs1, vl); } -vint8m8_t test_vadd_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_vx_i8m8_m(mask, op1, op2, vl); +vint8m8_t test_vadd_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_vx_i8m8_m(vm, vs2, rs1, vl); } -vint16mf4_t test_vadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vadd_vv_i16mf4_m(mask, op1, op2, vl); +vint16mf4_t test_vadd_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vadd_vv_i16mf4_m(vm, vs2, vs1, vl); } -vint16mf4_t test_vadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_vx_i16mf4_m(mask, op1, op2, vl); +vint16mf4_t test_vadd_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_vx_i16mf4_m(vm, vs2, rs1, vl); } -vint16mf2_t test_vadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vadd_vv_i16mf2_m(mask, op1, op2, vl); +vint16mf2_t test_vadd_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vadd_vv_i16mf2_m(vm, vs2, vs1, vl); } -vint16mf2_t test_vadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_vx_i16mf2_m(mask, op1, op2, vl); +vint16mf2_t test_vadd_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_vx_i16mf2_m(vm, vs2, rs1, vl); } -vint16m1_t test_vadd_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vadd_vv_i16m1_m(mask, op1, op2, vl); +vint16m1_t test_vadd_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vadd_vv_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vadd_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_vx_i16m1_m(mask, op1, op2, vl); +vint16m1_t test_vadd_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_vx_i16m1_m(vm, vs2, rs1, vl); } -vint16m2_t test_vadd_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vadd_vv_i16m2_m(mask, op1, op2, vl); +vint16m2_t test_vadd_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vadd_vv_i16m2_m(vm, vs2, vs1, vl); } -vint16m2_t test_vadd_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_vx_i16m2_m(mask, op1, op2, vl); +vint16m2_t test_vadd_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_vx_i16m2_m(vm, vs2, rs1, vl); } -vint16m4_t test_vadd_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vadd_vv_i16m4_m(mask, op1, op2, vl); +vint16m4_t test_vadd_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vadd_vv_i16m4_m(vm, vs2, vs1, vl); } -vint16m4_t test_vadd_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_vx_i16m4_m(mask, op1, op2, vl); +vint16m4_t test_vadd_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_vx_i16m4_m(vm, vs2, rs1, vl); } -vint16m8_t test_vadd_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vadd_vv_i16m8_m(mask, op1, op2, vl); +vint16m8_t test_vadd_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vadd_vv_i16m8_m(vm, vs2, vs1, vl); } -vint16m8_t test_vadd_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_vx_i16m8_m(mask, op1, op2, vl); +vint16m8_t test_vadd_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_vx_i16m8_m(vm, vs2, rs1, vl); } -vint32mf2_t test_vadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vadd_vv_i32mf2_m(mask, op1, op2, vl); +vint32mf2_t test_vadd_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vadd_vv_i32mf2_m(vm, vs2, vs1, vl); } -vint32mf2_t test_vadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_vx_i32mf2_m(mask, op1, op2, vl); +vint32mf2_t test_vadd_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_vx_i32mf2_m(vm, vs2, rs1, vl); } -vint32m1_t test_vadd_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vadd_vv_i32m1_m(mask, op1, op2, vl); +vint32m1_t test_vadd_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vadd_vv_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vadd_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_vx_i32m1_m(mask, op1, op2, vl); +vint32m1_t test_vadd_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_vx_i32m1_m(vm, vs2, rs1, vl); } -vint32m2_t test_vadd_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vadd_vv_i32m2_m(mask, op1, op2, vl); +vint32m2_t test_vadd_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vadd_vv_i32m2_m(vm, vs2, vs1, vl); } -vint32m2_t test_vadd_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_vx_i32m2_m(mask, op1, op2, vl); +vint32m2_t test_vadd_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_vx_i32m2_m(vm, vs2, rs1, vl); } -vint32m4_t test_vadd_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vadd_vv_i32m4_m(mask, op1, op2, vl); +vint32m4_t test_vadd_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vadd_vv_i32m4_m(vm, vs2, vs1, vl); } -vint32m4_t test_vadd_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_vx_i32m4_m(mask, op1, op2, vl); +vint32m4_t test_vadd_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_vx_i32m4_m(vm, vs2, rs1, vl); } -vint32m8_t test_vadd_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vadd_vv_i32m8_m(mask, op1, op2, vl); +vint32m8_t test_vadd_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vadd_vv_i32m8_m(vm, vs2, vs1, vl); } -vint32m8_t test_vadd_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_vx_i32m8_m(mask, op1, op2, vl); +vint32m8_t test_vadd_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_vx_i32m8_m(vm, vs2, rs1, vl); } -vint64m1_t test_vadd_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vadd_vv_i64m1_m(mask, op1, op2, vl); +vint64m1_t test_vadd_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vadd_vv_i64m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vadd_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vadd_vx_i64m1_m(mask, op1, op2, vl); +vint64m1_t test_vadd_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd_vx_i64m1_m(vm, vs2, rs1, vl); } -vint64m2_t test_vadd_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vadd_vv_i64m2_m(mask, op1, op2, vl); +vint64m2_t test_vadd_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vadd_vv_i64m2_m(vm, vs2, vs1, vl); } -vint64m2_t test_vadd_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vadd_vx_i64m2_m(mask, op1, op2, vl); +vint64m2_t test_vadd_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd_vx_i64m2_m(vm, vs2, rs1, vl); } -vint64m4_t test_vadd_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vadd_vv_i64m4_m(mask, op1, op2, vl); +vint64m4_t test_vadd_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vadd_vv_i64m4_m(vm, vs2, vs1, vl); } -vint64m4_t test_vadd_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vadd_vx_i64m4_m(mask, op1, op2, vl); +vint64m4_t test_vadd_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd_vx_i64m4_m(vm, vs2, rs1, vl); } -vint64m8_t test_vadd_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vadd_vv_i64m8_m(mask, op1, op2, vl); +vint64m8_t test_vadd_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vadd_vv_i64m8_m(vm, vs2, vs1, vl); } -vint64m8_t test_vadd_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vadd_vx_i64m8_m(mask, op1, op2, vl); +vint64m8_t test_vadd_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd_vx_i64m8_m(vm, vs2, rs1, vl); } -vuint8mf8_t test_vadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vadd_vv_u8mf8_m(mask, op1, op2, vl); +vuint8mf8_t test_vadd_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vadd_vv_u8mf8_m(vm, vs2, vs1, vl); } -vuint8mf8_t test_vadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_vx_u8mf8_m(mask, op1, op2, vl); +vuint8mf8_t test_vadd_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_vx_u8mf8_m(vm, vs2, rs1, vl); } -vuint8mf4_t test_vadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vadd_vv_u8mf4_m(mask, op1, op2, vl); +vuint8mf4_t test_vadd_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vadd_vv_u8mf4_m(vm, vs2, vs1, vl); } -vuint8mf4_t test_vadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_vx_u8mf4_m(mask, op1, op2, vl); +vuint8mf4_t test_vadd_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_vx_u8mf4_m(vm, vs2, rs1, vl); } -vuint8mf2_t test_vadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vadd_vv_u8mf2_m(mask, op1, op2, vl); +vuint8mf2_t test_vadd_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vadd_vv_u8mf2_m(vm, vs2, vs1, vl); } -vuint8mf2_t test_vadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_vx_u8mf2_m(mask, op1, op2, vl); +vuint8mf2_t test_vadd_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_vx_u8mf2_m(vm, vs2, rs1, vl); } -vuint8m1_t test_vadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vadd_vv_u8m1_m(mask, op1, op2, vl); +vuint8m1_t test_vadd_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vadd_vv_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_vx_u8m1_m(mask, op1, op2, vl); +vuint8m1_t test_vadd_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_vx_u8m1_m(vm, vs2, rs1, vl); } -vuint8m2_t test_vadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vadd_vv_u8m2_m(mask, op1, op2, vl); +vuint8m2_t test_vadd_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vadd_vv_u8m2_m(vm, vs2, vs1, vl); } -vuint8m2_t test_vadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_vx_u8m2_m(mask, op1, op2, vl); +vuint8m2_t test_vadd_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_vx_u8m2_m(vm, vs2, rs1, vl); } -vuint8m4_t test_vadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vadd_vv_u8m4_m(mask, op1, op2, vl); +vuint8m4_t test_vadd_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vadd_vv_u8m4_m(vm, vs2, vs1, vl); } -vuint8m4_t test_vadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_vx_u8m4_m(mask, op1, op2, vl); +vuint8m4_t test_vadd_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_vx_u8m4_m(vm, vs2, rs1, vl); } -vuint8m8_t test_vadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vadd_vv_u8m8_m(mask, op1, op2, vl); +vuint8m8_t test_vadd_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vadd_vv_u8m8_m(vm, vs2, vs1, vl); } -vuint8m8_t test_vadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_vx_u8m8_m(mask, op1, op2, vl); +vuint8m8_t test_vadd_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_vx_u8m8_m(vm, vs2, rs1, vl); } -vuint16mf4_t test_vadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vadd_vv_u16mf4_m(mask, op1, op2, vl); +vuint16mf4_t test_vadd_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vadd_vv_u16mf4_m(vm, vs2, vs1, vl); } -vuint16mf4_t test_vadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_vx_u16mf4_m(mask, op1, op2, vl); +vuint16mf4_t test_vadd_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_vx_u16mf4_m(vm, vs2, rs1, vl); } -vuint16mf2_t test_vadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vadd_vv_u16mf2_m(mask, op1, op2, vl); +vuint16mf2_t test_vadd_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vadd_vv_u16mf2_m(vm, vs2, vs1, vl); } -vuint16mf2_t test_vadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_vx_u16mf2_m(mask, op1, op2, vl); +vuint16mf2_t test_vadd_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_vx_u16mf2_m(vm, vs2, rs1, vl); } -vuint16m1_t test_vadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vadd_vv_u16m1_m(mask, op1, op2, vl); +vuint16m1_t test_vadd_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vadd_vv_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_vx_u16m1_m(mask, op1, op2, vl); +vuint16m1_t test_vadd_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_vx_u16m1_m(vm, vs2, rs1, vl); } -vuint16m2_t test_vadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vadd_vv_u16m2_m(mask, op1, op2, vl); +vuint16m2_t test_vadd_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vadd_vv_u16m2_m(vm, vs2, vs1, vl); } -vuint16m2_t test_vadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_vx_u16m2_m(mask, op1, op2, vl); +vuint16m2_t test_vadd_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_vx_u16m2_m(vm, vs2, rs1, vl); } -vuint16m4_t test_vadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vadd_vv_u16m4_m(mask, op1, op2, vl); +vuint16m4_t test_vadd_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vadd_vv_u16m4_m(vm, vs2, vs1, vl); } -vuint16m4_t test_vadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_vx_u16m4_m(mask, op1, op2, vl); +vuint16m4_t test_vadd_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_vx_u16m4_m(vm, vs2, rs1, vl); } -vuint16m8_t test_vadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vadd_vv_u16m8_m(mask, op1, op2, vl); +vuint16m8_t test_vadd_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vadd_vv_u16m8_m(vm, vs2, vs1, vl); } -vuint16m8_t test_vadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_vx_u16m8_m(mask, op1, op2, vl); +vuint16m8_t test_vadd_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_vx_u16m8_m(vm, vs2, rs1, vl); } -vuint32mf2_t test_vadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vadd_vv_u32mf2_m(mask, op1, op2, vl); +vuint32mf2_t test_vadd_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vadd_vv_u32mf2_m(vm, vs2, vs1, vl); } -vuint32mf2_t test_vadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_vx_u32mf2_m(mask, op1, op2, vl); +vuint32mf2_t test_vadd_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_vx_u32mf2_m(vm, vs2, rs1, vl); } -vuint32m1_t test_vadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vadd_vv_u32m1_m(mask, op1, op2, vl); +vuint32m1_t test_vadd_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vadd_vv_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_vx_u32m1_m(mask, op1, op2, vl); +vuint32m1_t test_vadd_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_vx_u32m1_m(vm, vs2, rs1, vl); } -vuint32m2_t test_vadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vadd_vv_u32m2_m(mask, op1, op2, vl); +vuint32m2_t test_vadd_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vadd_vv_u32m2_m(vm, vs2, vs1, vl); } -vuint32m2_t test_vadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_vx_u32m2_m(mask, op1, op2, vl); +vuint32m2_t test_vadd_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_vx_u32m2_m(vm, vs2, rs1, vl); } -vuint32m4_t test_vadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vadd_vv_u32m4_m(mask, op1, op2, vl); +vuint32m4_t test_vadd_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vadd_vv_u32m4_m(vm, vs2, vs1, vl); } -vuint32m4_t test_vadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_vx_u32m4_m(mask, op1, op2, vl); +vuint32m4_t test_vadd_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_vx_u32m4_m(vm, vs2, rs1, vl); } -vuint32m8_t test_vadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vadd_vv_u32m8_m(mask, op1, op2, vl); +vuint32m8_t test_vadd_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vadd_vv_u32m8_m(vm, vs2, vs1, vl); } -vuint32m8_t test_vadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_vx_u32m8_m(mask, op1, op2, vl); +vuint32m8_t test_vadd_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_vx_u32m8_m(vm, vs2, rs1, vl); } -vuint64m1_t test_vadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vadd_vv_u64m1_m(mask, op1, op2, vl); +vuint64m1_t test_vadd_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vadd_vv_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd_vx_u64m1_m(mask, op1, op2, vl); +vuint64m1_t test_vadd_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd_vx_u64m1_m(vm, vs2, rs1, vl); } -vuint64m2_t test_vadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vadd_vv_u64m2_m(mask, op1, op2, vl); +vuint64m2_t test_vadd_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vadd_vv_u64m2_m(vm, vs2, vs1, vl); } -vuint64m2_t test_vadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd_vx_u64m2_m(mask, op1, op2, vl); +vuint64m2_t test_vadd_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd_vx_u64m2_m(vm, vs2, rs1, vl); } -vuint64m4_t test_vadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vadd_vv_u64m4_m(mask, op1, op2, vl); +vuint64m4_t test_vadd_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vadd_vv_u64m4_m(vm, vs2, vs1, vl); } -vuint64m4_t test_vadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd_vx_u64m4_m(mask, op1, op2, vl); +vuint64m4_t test_vadd_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd_vx_u64m4_m(vm, vs2, rs1, vl); } -vuint64m8_t test_vadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vadd_vv_u64m8_m(mask, op1, op2, vl); +vuint64m8_t test_vadd_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vadd_vv_u64m8_m(vm, vs2, vs1, vl); } -vuint64m8_t test_vadd_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd_vx_u64m8_m(mask, op1, op2, vl); +vuint64m8_t test_vadd_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd_vx_u64m8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vadd\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/gnu-api-tests/vand.c b/auto-generated/gnu-api-tests/vand.c index a1ac137f4..10608318a 100644 --- a/auto-generated/gnu-api-tests/vand.c +++ b/auto-generated/gnu-api-tests/vand.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vand_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vand_vv_i8mf8(op1, op2, vl); +vint8mf8_t test_vand_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vand_vv_i8mf8(vs2, vs1, vl); } -vint8mf8_t test_vand_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vand_vx_i8mf8(op1, op2, vl); +vint8mf8_t test_vand_vx_i8mf8(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_vx_i8mf8(vs2, rs1, vl); } -vint8mf4_t test_vand_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vand_vv_i8mf4(op1, op2, vl); +vint8mf4_t test_vand_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vand_vv_i8mf4(vs2, vs1, vl); } -vint8mf4_t test_vand_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vand_vx_i8mf4(op1, op2, vl); +vint8mf4_t test_vand_vx_i8mf4(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_vx_i8mf4(vs2, rs1, vl); } -vint8mf2_t test_vand_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vand_vv_i8mf2(op1, op2, vl); +vint8mf2_t test_vand_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vand_vv_i8mf2(vs2, vs1, vl); } -vint8mf2_t test_vand_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vand_vx_i8mf2(op1, op2, vl); +vint8mf2_t test_vand_vx_i8mf2(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_vx_i8mf2(vs2, rs1, vl); } -vint8m1_t test_vand_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vand_vv_i8m1(op1, op2, vl); +vint8m1_t test_vand_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vand_vv_i8m1(vs2, vs1, vl); } -vint8m1_t test_vand_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vand_vx_i8m1(op1, op2, vl); +vint8m1_t test_vand_vx_i8m1(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_vx_i8m1(vs2, rs1, vl); } -vint8m2_t test_vand_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vand_vv_i8m2(op1, op2, vl); +vint8m2_t test_vand_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vand_vv_i8m2(vs2, vs1, vl); } -vint8m2_t test_vand_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vand_vx_i8m2(op1, op2, vl); +vint8m2_t test_vand_vx_i8m2(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_vx_i8m2(vs2, rs1, vl); } -vint8m4_t test_vand_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vand_vv_i8m4(op1, op2, vl); +vint8m4_t test_vand_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vand_vv_i8m4(vs2, vs1, vl); } -vint8m4_t test_vand_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vand_vx_i8m4(op1, op2, vl); +vint8m4_t test_vand_vx_i8m4(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_vx_i8m4(vs2, rs1, vl); } -vint8m8_t test_vand_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vand_vv_i8m8(op1, op2, vl); +vint8m8_t test_vand_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vand_vv_i8m8(vs2, vs1, vl); } -vint8m8_t test_vand_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vand_vx_i8m8(op1, op2, vl); +vint8m8_t test_vand_vx_i8m8(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_vx_i8m8(vs2, rs1, vl); } -vint16mf4_t test_vand_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vand_vv_i16mf4(op1, op2, vl); +vint16mf4_t test_vand_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vand_vv_i16mf4(vs2, vs1, vl); } -vint16mf4_t test_vand_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vand_vx_i16mf4(op1, op2, vl); +vint16mf4_t test_vand_vx_i16mf4(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_vx_i16mf4(vs2, rs1, vl); } -vint16mf2_t test_vand_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vand_vv_i16mf2(op1, op2, vl); +vint16mf2_t test_vand_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vand_vv_i16mf2(vs2, vs1, vl); } -vint16mf2_t test_vand_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vand_vx_i16mf2(op1, op2, vl); +vint16mf2_t test_vand_vx_i16mf2(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_vx_i16mf2(vs2, rs1, vl); } -vint16m1_t test_vand_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vand_vv_i16m1(op1, op2, vl); +vint16m1_t test_vand_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vand_vv_i16m1(vs2, vs1, vl); } -vint16m1_t test_vand_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vand_vx_i16m1(op1, op2, vl); +vint16m1_t test_vand_vx_i16m1(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_vx_i16m1(vs2, rs1, vl); } -vint16m2_t test_vand_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vand_vv_i16m2(op1, op2, vl); +vint16m2_t test_vand_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vand_vv_i16m2(vs2, vs1, vl); } -vint16m2_t test_vand_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vand_vx_i16m2(op1, op2, vl); +vint16m2_t test_vand_vx_i16m2(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_vx_i16m2(vs2, rs1, vl); } -vint16m4_t test_vand_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vand_vv_i16m4(op1, op2, vl); +vint16m4_t test_vand_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vand_vv_i16m4(vs2, vs1, vl); } -vint16m4_t test_vand_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vand_vx_i16m4(op1, op2, vl); +vint16m4_t test_vand_vx_i16m4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_vx_i16m4(vs2, rs1, vl); } -vint16m8_t test_vand_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vand_vv_i16m8(op1, op2, vl); +vint16m8_t test_vand_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vand_vv_i16m8(vs2, vs1, vl); } -vint16m8_t test_vand_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vand_vx_i16m8(op1, op2, vl); +vint16m8_t test_vand_vx_i16m8(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_vx_i16m8(vs2, rs1, vl); } -vint32mf2_t test_vand_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vand_vv_i32mf2(op1, op2, vl); +vint32mf2_t test_vand_vv_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vand_vv_i32mf2(vs2, vs1, vl); } -vint32mf2_t test_vand_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vand_vx_i32mf2(op1, op2, vl); +vint32mf2_t test_vand_vx_i32mf2(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_vx_i32mf2(vs2, rs1, vl); } -vint32m1_t test_vand_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vand_vv_i32m1(op1, op2, vl); +vint32m1_t test_vand_vv_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vand_vv_i32m1(vs2, vs1, vl); } -vint32m1_t test_vand_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vand_vx_i32m1(op1, op2, vl); +vint32m1_t test_vand_vx_i32m1(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_vx_i32m1(vs2, rs1, vl); } -vint32m2_t test_vand_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vand_vv_i32m2(op1, op2, vl); +vint32m2_t test_vand_vv_i32m2(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vand_vv_i32m2(vs2, vs1, vl); } -vint32m2_t test_vand_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vand_vx_i32m2(op1, op2, vl); +vint32m2_t test_vand_vx_i32m2(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_vx_i32m2(vs2, rs1, vl); } -vint32m4_t test_vand_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vand_vv_i32m4(op1, op2, vl); +vint32m4_t test_vand_vv_i32m4(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vand_vv_i32m4(vs2, vs1, vl); } -vint32m4_t test_vand_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vand_vx_i32m4(op1, op2, vl); +vint32m4_t test_vand_vx_i32m4(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_vx_i32m4(vs2, rs1, vl); } -vint32m8_t test_vand_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vand_vv_i32m8(op1, op2, vl); +vint32m8_t test_vand_vv_i32m8(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vand_vv_i32m8(vs2, vs1, vl); } -vint32m8_t test_vand_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vand_vx_i32m8(op1, op2, vl); +vint32m8_t test_vand_vx_i32m8(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_vx_i32m8(vs2, rs1, vl); } -vint64m1_t test_vand_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vand_vv_i64m1(op1, op2, vl); +vint64m1_t test_vand_vv_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vand_vv_i64m1(vs2, vs1, vl); } -vint64m1_t test_vand_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vand_vx_i64m1(op1, op2, vl); +vint64m1_t test_vand_vx_i64m1(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand_vx_i64m1(vs2, rs1, vl); } -vint64m2_t test_vand_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vand_vv_i64m2(op1, op2, vl); +vint64m2_t test_vand_vv_i64m2(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vand_vv_i64m2(vs2, vs1, vl); } -vint64m2_t test_vand_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vand_vx_i64m2(op1, op2, vl); +vint64m2_t test_vand_vx_i64m2(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand_vx_i64m2(vs2, rs1, vl); } -vint64m4_t test_vand_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vand_vv_i64m4(op1, op2, vl); +vint64m4_t test_vand_vv_i64m4(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vand_vv_i64m4(vs2, vs1, vl); } -vint64m4_t test_vand_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vand_vx_i64m4(op1, op2, vl); +vint64m4_t test_vand_vx_i64m4(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand_vx_i64m4(vs2, rs1, vl); } -vint64m8_t test_vand_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vand_vv_i64m8(op1, op2, vl); +vint64m8_t test_vand_vv_i64m8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vand_vv_i64m8(vs2, vs1, vl); } -vint64m8_t test_vand_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vand_vx_i64m8(op1, op2, vl); +vint64m8_t test_vand_vx_i64m8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand_vx_i64m8(vs2, rs1, vl); } -vuint8mf8_t test_vand_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vand_vv_u8mf8(op1, op2, vl); +vuint8mf8_t test_vand_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vand_vv_u8mf8(vs2, vs1, vl); } -vuint8mf8_t test_vand_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_vx_u8mf8(op1, op2, vl); +vuint8mf8_t test_vand_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_vx_u8mf8(vs2, rs1, vl); } -vuint8mf4_t test_vand_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vand_vv_u8mf4(op1, op2, vl); +vuint8mf4_t test_vand_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vand_vv_u8mf4(vs2, vs1, vl); } -vuint8mf4_t test_vand_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_vx_u8mf4(op1, op2, vl); +vuint8mf4_t test_vand_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_vx_u8mf4(vs2, rs1, vl); } -vuint8mf2_t test_vand_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vand_vv_u8mf2(op1, op2, vl); +vuint8mf2_t test_vand_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vand_vv_u8mf2(vs2, vs1, vl); } -vuint8mf2_t test_vand_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_vx_u8mf2(op1, op2, vl); +vuint8mf2_t test_vand_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_vx_u8mf2(vs2, rs1, vl); } -vuint8m1_t test_vand_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vand_vv_u8m1(op1, op2, vl); +vuint8m1_t test_vand_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vand_vv_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vand_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_vx_u8m1(op1, op2, vl); +vuint8m1_t test_vand_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_vx_u8m1(vs2, rs1, vl); } -vuint8m2_t test_vand_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vand_vv_u8m2(op1, op2, vl); +vuint8m2_t test_vand_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vand_vv_u8m2(vs2, vs1, vl); } -vuint8m2_t test_vand_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_vx_u8m2(op1, op2, vl); +vuint8m2_t test_vand_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_vx_u8m2(vs2, rs1, vl); } -vuint8m4_t test_vand_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vand_vv_u8m4(op1, op2, vl); +vuint8m4_t test_vand_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vand_vv_u8m4(vs2, vs1, vl); } -vuint8m4_t test_vand_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_vx_u8m4(op1, op2, vl); +vuint8m4_t test_vand_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_vx_u8m4(vs2, rs1, vl); } -vuint8m8_t test_vand_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vand_vv_u8m8(op1, op2, vl); +vuint8m8_t test_vand_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vand_vv_u8m8(vs2, vs1, vl); } -vuint8m8_t test_vand_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_vx_u8m8(op1, op2, vl); +vuint8m8_t test_vand_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_vx_u8m8(vs2, rs1, vl); } -vuint16mf4_t test_vand_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vand_vv_u16mf4(op1, op2, vl); +vuint16mf4_t test_vand_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vand_vv_u16mf4(vs2, vs1, vl); } -vuint16mf4_t test_vand_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_vx_u16mf4(op1, op2, vl); +vuint16mf4_t test_vand_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_vx_u16mf4(vs2, rs1, vl); } -vuint16mf2_t test_vand_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vand_vv_u16mf2(op1, op2, vl); +vuint16mf2_t test_vand_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vand_vv_u16mf2(vs2, vs1, vl); } -vuint16mf2_t test_vand_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_vx_u16mf2(op1, op2, vl); +vuint16mf2_t test_vand_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_vx_u16mf2(vs2, rs1, vl); } -vuint16m1_t test_vand_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vand_vv_u16m1(op1, op2, vl); +vuint16m1_t test_vand_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vand_vv_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vand_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_vx_u16m1(op1, op2, vl); +vuint16m1_t test_vand_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_vx_u16m1(vs2, rs1, vl); } -vuint16m2_t test_vand_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vand_vv_u16m2(op1, op2, vl); +vuint16m2_t test_vand_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vand_vv_u16m2(vs2, vs1, vl); } -vuint16m2_t test_vand_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_vx_u16m2(op1, op2, vl); +vuint16m2_t test_vand_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_vx_u16m2(vs2, rs1, vl); } -vuint16m4_t test_vand_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vand_vv_u16m4(op1, op2, vl); +vuint16m4_t test_vand_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vand_vv_u16m4(vs2, vs1, vl); } -vuint16m4_t test_vand_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_vx_u16m4(op1, op2, vl); +vuint16m4_t test_vand_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_vx_u16m4(vs2, rs1, vl); } -vuint16m8_t test_vand_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vand_vv_u16m8(op1, op2, vl); +vuint16m8_t test_vand_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vand_vv_u16m8(vs2, vs1, vl); } -vuint16m8_t test_vand_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_vx_u16m8(op1, op2, vl); +vuint16m8_t test_vand_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_vx_u16m8(vs2, rs1, vl); } -vuint32mf2_t test_vand_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vand_vv_u32mf2(op1, op2, vl); +vuint32mf2_t test_vand_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vand_vv_u32mf2(vs2, vs1, vl); } -vuint32mf2_t test_vand_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_vx_u32mf2(op1, op2, vl); +vuint32mf2_t test_vand_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_vx_u32mf2(vs2, rs1, vl); } -vuint32m1_t test_vand_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vand_vv_u32m1(op1, op2, vl); +vuint32m1_t test_vand_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vand_vv_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vand_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_vx_u32m1(op1, op2, vl); +vuint32m1_t test_vand_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_vx_u32m1(vs2, rs1, vl); } -vuint32m2_t test_vand_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vand_vv_u32m2(op1, op2, vl); +vuint32m2_t test_vand_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vand_vv_u32m2(vs2, vs1, vl); } -vuint32m2_t test_vand_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_vx_u32m2(op1, op2, vl); +vuint32m2_t test_vand_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_vx_u32m2(vs2, rs1, vl); } -vuint32m4_t test_vand_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vand_vv_u32m4(op1, op2, vl); +vuint32m4_t test_vand_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vand_vv_u32m4(vs2, vs1, vl); } -vuint32m4_t test_vand_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_vx_u32m4(op1, op2, vl); +vuint32m4_t test_vand_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_vx_u32m4(vs2, rs1, vl); } -vuint32m8_t test_vand_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vand_vv_u32m8(op1, op2, vl); +vuint32m8_t test_vand_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vand_vv_u32m8(vs2, vs1, vl); } -vuint32m8_t test_vand_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_vx_u32m8(op1, op2, vl); +vuint32m8_t test_vand_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_vx_u32m8(vs2, rs1, vl); } -vuint64m1_t test_vand_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vand_vv_u64m1(op1, op2, vl); +vuint64m1_t test_vand_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vand_vv_u64m1(vs2, vs1, vl); } -vuint64m1_t test_vand_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vand_vx_u64m1(op1, op2, vl); +vuint64m1_t test_vand_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand_vx_u64m1(vs2, rs1, vl); } -vuint64m2_t test_vand_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vand_vv_u64m2(op1, op2, vl); +vuint64m2_t test_vand_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vand_vv_u64m2(vs2, vs1, vl); } -vuint64m2_t test_vand_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vand_vx_u64m2(op1, op2, vl); +vuint64m2_t test_vand_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand_vx_u64m2(vs2, rs1, vl); } -vuint64m4_t test_vand_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vand_vv_u64m4(op1, op2, vl); +vuint64m4_t test_vand_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vand_vv_u64m4(vs2, vs1, vl); } -vuint64m4_t test_vand_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vand_vx_u64m4(op1, op2, vl); +vuint64m4_t test_vand_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand_vx_u64m4(vs2, rs1, vl); } -vuint64m8_t test_vand_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vand_vv_u64m8(op1, op2, vl); +vuint64m8_t test_vand_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vand_vv_u64m8(vs2, vs1, vl); } -vuint64m8_t test_vand_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vand_vx_u64m8(op1, op2, vl); +vuint64m8_t test_vand_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand_vx_u64m8(vs2, rs1, vl); } -vint8mf8_t test_vand_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vand_vv_i8mf8_m(mask, op1, op2, vl); +vint8mf8_t test_vand_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vand_vv_i8mf8_m(vm, vs2, vs1, vl); } -vint8mf8_t test_vand_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vand_vx_i8mf8_m(mask, op1, op2, vl); +vint8mf8_t test_vand_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_vx_i8mf8_m(vm, vs2, rs1, vl); } -vint8mf4_t test_vand_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vand_vv_i8mf4_m(mask, op1, op2, vl); +vint8mf4_t test_vand_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vand_vv_i8mf4_m(vm, vs2, vs1, vl); } -vint8mf4_t test_vand_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vand_vx_i8mf4_m(mask, op1, op2, vl); +vint8mf4_t test_vand_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_vx_i8mf4_m(vm, vs2, rs1, vl); } -vint8mf2_t test_vand_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vand_vv_i8mf2_m(mask, op1, op2, vl); +vint8mf2_t test_vand_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vand_vv_i8mf2_m(vm, vs2, vs1, vl); } -vint8mf2_t test_vand_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vand_vx_i8mf2_m(mask, op1, op2, vl); +vint8mf2_t test_vand_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_vx_i8mf2_m(vm, vs2, rs1, vl); } -vint8m1_t test_vand_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vand_vv_i8m1_m(mask, op1, op2, vl); +vint8m1_t test_vand_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vand_vv_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vand_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vand_vx_i8m1_m(mask, op1, op2, vl); +vint8m1_t test_vand_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_vx_i8m1_m(vm, vs2, rs1, vl); } -vint8m2_t test_vand_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vand_vv_i8m2_m(mask, op1, op2, vl); +vint8m2_t test_vand_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vand_vv_i8m2_m(vm, vs2, vs1, vl); } -vint8m2_t test_vand_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vand_vx_i8m2_m(mask, op1, op2, vl); +vint8m2_t test_vand_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_vx_i8m2_m(vm, vs2, rs1, vl); } -vint8m4_t test_vand_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vand_vv_i8m4_m(mask, op1, op2, vl); +vint8m4_t test_vand_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vand_vv_i8m4_m(vm, vs2, vs1, vl); } -vint8m4_t test_vand_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vand_vx_i8m4_m(mask, op1, op2, vl); +vint8m4_t test_vand_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_vx_i8m4_m(vm, vs2, rs1, vl); } -vint8m8_t test_vand_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vand_vv_i8m8_m(mask, op1, op2, vl); +vint8m8_t test_vand_vv_i8m8_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vand_vv_i8m8_m(vm, vs2, vs1, vl); } -vint8m8_t test_vand_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vand_vx_i8m8_m(mask, op1, op2, vl); +vint8m8_t test_vand_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_vx_i8m8_m(vm, vs2, rs1, vl); } -vint16mf4_t test_vand_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vand_vv_i16mf4_m(mask, op1, op2, vl); +vint16mf4_t test_vand_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vand_vv_i16mf4_m(vm, vs2, vs1, vl); } -vint16mf4_t test_vand_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vand_vx_i16mf4_m(mask, op1, op2, vl); +vint16mf4_t test_vand_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_vx_i16mf4_m(vm, vs2, rs1, vl); } -vint16mf2_t test_vand_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vand_vv_i16mf2_m(mask, op1, op2, vl); +vint16mf2_t test_vand_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vand_vv_i16mf2_m(vm, vs2, vs1, vl); } -vint16mf2_t test_vand_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vand_vx_i16mf2_m(mask, op1, op2, vl); +vint16mf2_t test_vand_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_vx_i16mf2_m(vm, vs2, rs1, vl); } -vint16m1_t test_vand_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vand_vv_i16m1_m(mask, op1, op2, vl); +vint16m1_t test_vand_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vand_vv_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vand_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vand_vx_i16m1_m(mask, op1, op2, vl); +vint16m1_t test_vand_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_vx_i16m1_m(vm, vs2, rs1, vl); } -vint16m2_t test_vand_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vand_vv_i16m2_m(mask, op1, op2, vl); +vint16m2_t test_vand_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vand_vv_i16m2_m(vm, vs2, vs1, vl); } -vint16m2_t test_vand_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vand_vx_i16m2_m(mask, op1, op2, vl); +vint16m2_t test_vand_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_vx_i16m2_m(vm, vs2, rs1, vl); } -vint16m4_t test_vand_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vand_vv_i16m4_m(mask, op1, op2, vl); +vint16m4_t test_vand_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vand_vv_i16m4_m(vm, vs2, vs1, vl); } -vint16m4_t test_vand_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vand_vx_i16m4_m(mask, op1, op2, vl); +vint16m4_t test_vand_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_vx_i16m4_m(vm, vs2, rs1, vl); } -vint16m8_t test_vand_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vand_vv_i16m8_m(mask, op1, op2, vl); +vint16m8_t test_vand_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vand_vv_i16m8_m(vm, vs2, vs1, vl); } -vint16m8_t test_vand_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vand_vx_i16m8_m(mask, op1, op2, vl); +vint16m8_t test_vand_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_vx_i16m8_m(vm, vs2, rs1, vl); } -vint32mf2_t test_vand_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vand_vv_i32mf2_m(mask, op1, op2, vl); +vint32mf2_t test_vand_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vand_vv_i32mf2_m(vm, vs2, vs1, vl); } -vint32mf2_t test_vand_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vand_vx_i32mf2_m(mask, op1, op2, vl); +vint32mf2_t test_vand_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_vx_i32mf2_m(vm, vs2, rs1, vl); } -vint32m1_t test_vand_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vand_vv_i32m1_m(mask, op1, op2, vl); +vint32m1_t test_vand_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vand_vv_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vand_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vand_vx_i32m1_m(mask, op1, op2, vl); +vint32m1_t test_vand_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_vx_i32m1_m(vm, vs2, rs1, vl); } -vint32m2_t test_vand_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vand_vv_i32m2_m(mask, op1, op2, vl); +vint32m2_t test_vand_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vand_vv_i32m2_m(vm, vs2, vs1, vl); } -vint32m2_t test_vand_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vand_vx_i32m2_m(mask, op1, op2, vl); +vint32m2_t test_vand_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_vx_i32m2_m(vm, vs2, rs1, vl); } -vint32m4_t test_vand_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vand_vv_i32m4_m(mask, op1, op2, vl); +vint32m4_t test_vand_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vand_vv_i32m4_m(vm, vs2, vs1, vl); } -vint32m4_t test_vand_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vand_vx_i32m4_m(mask, op1, op2, vl); +vint32m4_t test_vand_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_vx_i32m4_m(vm, vs2, rs1, vl); } -vint32m8_t test_vand_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vand_vv_i32m8_m(mask, op1, op2, vl); +vint32m8_t test_vand_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vand_vv_i32m8_m(vm, vs2, vs1, vl); } -vint32m8_t test_vand_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vand_vx_i32m8_m(mask, op1, op2, vl); +vint32m8_t test_vand_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_vx_i32m8_m(vm, vs2, rs1, vl); } -vint64m1_t test_vand_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vand_vv_i64m1_m(mask, op1, op2, vl); +vint64m1_t test_vand_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vand_vv_i64m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vand_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vand_vx_i64m1_m(mask, op1, op2, vl); +vint64m1_t test_vand_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand_vx_i64m1_m(vm, vs2, rs1, vl); } -vint64m2_t test_vand_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vand_vv_i64m2_m(mask, op1, op2, vl); +vint64m2_t test_vand_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vand_vv_i64m2_m(vm, vs2, vs1, vl); } -vint64m2_t test_vand_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vand_vx_i64m2_m(mask, op1, op2, vl); +vint64m2_t test_vand_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand_vx_i64m2_m(vm, vs2, rs1, vl); } -vint64m4_t test_vand_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vand_vv_i64m4_m(mask, op1, op2, vl); +vint64m4_t test_vand_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vand_vv_i64m4_m(vm, vs2, vs1, vl); } -vint64m4_t test_vand_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vand_vx_i64m4_m(mask, op1, op2, vl); +vint64m4_t test_vand_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand_vx_i64m4_m(vm, vs2, rs1, vl); } -vint64m8_t test_vand_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vand_vv_i64m8_m(mask, op1, op2, vl); +vint64m8_t test_vand_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vand_vv_i64m8_m(vm, vs2, vs1, vl); } -vint64m8_t test_vand_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vand_vx_i64m8_m(mask, op1, op2, vl); +vint64m8_t test_vand_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand_vx_i64m8_m(vm, vs2, rs1, vl); } -vuint8mf8_t test_vand_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vand_vv_u8mf8_m(mask, op1, op2, vl); +vuint8mf8_t test_vand_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vand_vv_u8mf8_m(vm, vs2, vs1, vl); } -vuint8mf8_t test_vand_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_vx_u8mf8_m(mask, op1, op2, vl); +vuint8mf8_t test_vand_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_vx_u8mf8_m(vm, vs2, rs1, vl); } -vuint8mf4_t test_vand_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vand_vv_u8mf4_m(mask, op1, op2, vl); +vuint8mf4_t test_vand_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vand_vv_u8mf4_m(vm, vs2, vs1, vl); } -vuint8mf4_t test_vand_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_vx_u8mf4_m(mask, op1, op2, vl); +vuint8mf4_t test_vand_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_vx_u8mf4_m(vm, vs2, rs1, vl); } -vuint8mf2_t test_vand_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vand_vv_u8mf2_m(mask, op1, op2, vl); +vuint8mf2_t test_vand_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vand_vv_u8mf2_m(vm, vs2, vs1, vl); } -vuint8mf2_t test_vand_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_vx_u8mf2_m(mask, op1, op2, vl); +vuint8mf2_t test_vand_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_vx_u8mf2_m(vm, vs2, rs1, vl); } -vuint8m1_t test_vand_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vand_vv_u8m1_m(mask, op1, op2, vl); +vuint8m1_t test_vand_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vand_vv_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vand_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_vx_u8m1_m(mask, op1, op2, vl); +vuint8m1_t test_vand_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_vx_u8m1_m(vm, vs2, rs1, vl); } -vuint8m2_t test_vand_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vand_vv_u8m2_m(mask, op1, op2, vl); +vuint8m2_t test_vand_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vand_vv_u8m2_m(vm, vs2, vs1, vl); } -vuint8m2_t test_vand_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_vx_u8m2_m(mask, op1, op2, vl); +vuint8m2_t test_vand_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_vx_u8m2_m(vm, vs2, rs1, vl); } -vuint8m4_t test_vand_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vand_vv_u8m4_m(mask, op1, op2, vl); +vuint8m4_t test_vand_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vand_vv_u8m4_m(vm, vs2, vs1, vl); } -vuint8m4_t test_vand_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_vx_u8m4_m(mask, op1, op2, vl); +vuint8m4_t test_vand_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_vx_u8m4_m(vm, vs2, rs1, vl); } -vuint8m8_t test_vand_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vand_vv_u8m8_m(mask, op1, op2, vl); +vuint8m8_t test_vand_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vand_vv_u8m8_m(vm, vs2, vs1, vl); } -vuint8m8_t test_vand_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_vx_u8m8_m(mask, op1, op2, vl); +vuint8m8_t test_vand_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_vx_u8m8_m(vm, vs2, rs1, vl); } -vuint16mf4_t test_vand_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vand_vv_u16mf4_m(mask, op1, op2, vl); +vuint16mf4_t test_vand_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vand_vv_u16mf4_m(vm, vs2, vs1, vl); } -vuint16mf4_t test_vand_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_vx_u16mf4_m(mask, op1, op2, vl); +vuint16mf4_t test_vand_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_vx_u16mf4_m(vm, vs2, rs1, vl); } -vuint16mf2_t test_vand_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vand_vv_u16mf2_m(mask, op1, op2, vl); +vuint16mf2_t test_vand_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vand_vv_u16mf2_m(vm, vs2, vs1, vl); } -vuint16mf2_t test_vand_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_vx_u16mf2_m(mask, op1, op2, vl); +vuint16mf2_t test_vand_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_vx_u16mf2_m(vm, vs2, rs1, vl); } -vuint16m1_t test_vand_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vand_vv_u16m1_m(mask, op1, op2, vl); +vuint16m1_t test_vand_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vand_vv_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vand_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_vx_u16m1_m(mask, op1, op2, vl); +vuint16m1_t test_vand_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_vx_u16m1_m(vm, vs2, rs1, vl); } -vuint16m2_t test_vand_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vand_vv_u16m2_m(mask, op1, op2, vl); +vuint16m2_t test_vand_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vand_vv_u16m2_m(vm, vs2, vs1, vl); } -vuint16m2_t test_vand_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_vx_u16m2_m(mask, op1, op2, vl); +vuint16m2_t test_vand_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_vx_u16m2_m(vm, vs2, rs1, vl); } -vuint16m4_t test_vand_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vand_vv_u16m4_m(mask, op1, op2, vl); +vuint16m4_t test_vand_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vand_vv_u16m4_m(vm, vs2, vs1, vl); } -vuint16m4_t test_vand_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_vx_u16m4_m(mask, op1, op2, vl); +vuint16m4_t test_vand_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_vx_u16m4_m(vm, vs2, rs1, vl); } -vuint16m8_t test_vand_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vand_vv_u16m8_m(mask, op1, op2, vl); +vuint16m8_t test_vand_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vand_vv_u16m8_m(vm, vs2, vs1, vl); } -vuint16m8_t test_vand_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_vx_u16m8_m(mask, op1, op2, vl); +vuint16m8_t test_vand_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_vx_u16m8_m(vm, vs2, rs1, vl); } -vuint32mf2_t test_vand_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vand_vv_u32mf2_m(mask, op1, op2, vl); +vuint32mf2_t test_vand_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vand_vv_u32mf2_m(vm, vs2, vs1, vl); } -vuint32mf2_t test_vand_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_vx_u32mf2_m(mask, op1, op2, vl); +vuint32mf2_t test_vand_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_vx_u32mf2_m(vm, vs2, rs1, vl); } -vuint32m1_t test_vand_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vand_vv_u32m1_m(mask, op1, op2, vl); +vuint32m1_t test_vand_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vand_vv_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vand_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_vx_u32m1_m(mask, op1, op2, vl); +vuint32m1_t test_vand_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_vx_u32m1_m(vm, vs2, rs1, vl); } -vuint32m2_t test_vand_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vand_vv_u32m2_m(mask, op1, op2, vl); +vuint32m2_t test_vand_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vand_vv_u32m2_m(vm, vs2, vs1, vl); } -vuint32m2_t test_vand_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_vx_u32m2_m(mask, op1, op2, vl); +vuint32m2_t test_vand_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_vx_u32m2_m(vm, vs2, rs1, vl); } -vuint32m4_t test_vand_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vand_vv_u32m4_m(mask, op1, op2, vl); +vuint32m4_t test_vand_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vand_vv_u32m4_m(vm, vs2, vs1, vl); } -vuint32m4_t test_vand_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_vx_u32m4_m(mask, op1, op2, vl); +vuint32m4_t test_vand_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_vx_u32m4_m(vm, vs2, rs1, vl); } -vuint32m8_t test_vand_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vand_vv_u32m8_m(mask, op1, op2, vl); +vuint32m8_t test_vand_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vand_vv_u32m8_m(vm, vs2, vs1, vl); } -vuint32m8_t test_vand_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_vx_u32m8_m(mask, op1, op2, vl); +vuint32m8_t test_vand_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_vx_u32m8_m(vm, vs2, rs1, vl); } -vuint64m1_t test_vand_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vand_vv_u64m1_m(mask, op1, op2, vl); +vuint64m1_t test_vand_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vand_vv_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vand_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vand_vx_u64m1_m(mask, op1, op2, vl); +vuint64m1_t test_vand_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand_vx_u64m1_m(vm, vs2, rs1, vl); } -vuint64m2_t test_vand_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vand_vv_u64m2_m(mask, op1, op2, vl); +vuint64m2_t test_vand_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vand_vv_u64m2_m(vm, vs2, vs1, vl); } -vuint64m2_t test_vand_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vand_vx_u64m2_m(mask, op1, op2, vl); +vuint64m2_t test_vand_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand_vx_u64m2_m(vm, vs2, rs1, vl); } -vuint64m4_t test_vand_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vand_vv_u64m4_m(mask, op1, op2, vl); +vuint64m4_t test_vand_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vand_vv_u64m4_m(vm, vs2, vs1, vl); } -vuint64m4_t test_vand_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vand_vx_u64m4_m(mask, op1, op2, vl); +vuint64m4_t test_vand_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand_vx_u64m4_m(vm, vs2, rs1, vl); } -vuint64m8_t test_vand_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vand_vv_u64m8_m(mask, op1, op2, vl); +vuint64m8_t test_vand_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vand_vv_u64m8_m(vm, vs2, vs1, vl); } -vuint64m8_t test_vand_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vand_vx_u64m8_m(mask, op1, op2, vl); +vuint64m8_t test_vand_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand_vx_u64m8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vand\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/gnu-api-tests/vasub.c b/auto-generated/gnu-api-tests/vasub.c index fe1b181ca..99e48554e 100644 --- a/auto-generated/gnu-api-tests/vasub.c +++ b/auto-generated/gnu-api-tests/vasub.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vasub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vasub_vv_i8mf8(op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vasub_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vasub_vv_i8mf8(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vasub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_vx_i8mf8(op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vasub_vx_i8mf8(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_vx_i8mf8(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vasub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vasub_vv_i8mf4(op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vasub_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vasub_vv_i8mf4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vasub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_vx_i8mf4(op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vasub_vx_i8mf4(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_vx_i8mf4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vasub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vasub_vv_i8mf2(op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vasub_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vasub_vv_i8mf2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vasub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_vx_i8mf2(op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vasub_vx_i8mf2(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_vx_i8mf2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vasub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vasub_vv_i8m1(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vasub_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vasub_vv_i8m1(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vasub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_vx_i8m1(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vasub_vx_i8m1(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_vx_i8m1(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vasub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vasub_vv_i8m2(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vasub_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vasub_vv_i8m2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vasub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_vx_i8m2(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vasub_vx_i8m2(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_vx_i8m2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vasub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vasub_vv_i8m4(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vasub_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vasub_vv_i8m4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vasub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_vx_i8m4(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vasub_vx_i8m4(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_vx_i8m4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vasub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vasub_vv_i8m8(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vasub_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vasub_vv_i8m8(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vasub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_vx_i8m8(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vasub_vx_i8m8(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_vx_i8m8(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vasub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vasub_vv_i16mf4(op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vasub_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vasub_vv_i16mf4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vasub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_vx_i16mf4(op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vasub_vx_i16mf4(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_vx_i16mf4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vasub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vasub_vv_i16mf2(op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vasub_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vasub_vv_i16mf2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vasub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_vx_i16mf2(op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vasub_vx_i16mf2(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_vx_i16mf2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vasub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vasub_vv_i16m1(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vasub_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vasub_vv_i16m1(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vasub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_vx_i16m1(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vasub_vx_i16m1(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_vx_i16m1(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vasub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vasub_vv_i16m2(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vasub_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vasub_vv_i16m2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vasub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_vx_i16m2(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vasub_vx_i16m2(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_vx_i16m2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vasub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vasub_vv_i16m4(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vasub_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vasub_vv_i16m4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vasub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_vx_i16m4(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vasub_vx_i16m4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_vx_i16m4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vasub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vasub_vv_i16m8(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vasub_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vasub_vv_i16m8(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vasub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_vx_i16m8(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vasub_vx_i16m8(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_vx_i16m8(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vasub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vasub_vv_i32mf2(op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vasub_vv_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vasub_vv_i32mf2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vasub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_vx_i32mf2(op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vasub_vx_i32mf2(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_vx_i32mf2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vasub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vasub_vv_i32m1(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vasub_vv_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vasub_vv_i32m1(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vasub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_vx_i32m1(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vasub_vx_i32m1(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_vx_i32m1(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vasub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vasub_vv_i32m2(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vasub_vv_i32m2(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vasub_vv_i32m2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vasub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_vx_i32m2(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vasub_vx_i32m2(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_vx_i32m2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vasub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vasub_vv_i32m4(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vasub_vv_i32m4(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vasub_vv_i32m4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vasub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_vx_i32m4(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vasub_vx_i32m4(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_vx_i32m4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vasub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vasub_vv_i32m8(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vasub_vv_i32m8(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vasub_vv_i32m8(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vasub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_vx_i32m8(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vasub_vx_i32m8(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_vx_i32m8(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vasub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vasub_vv_i64m1(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vasub_vv_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vasub_vv_i64m1(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vasub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vasub_vx_i64m1(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vasub_vx_i64m1(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub_vx_i64m1(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vasub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vasub_vv_i64m2(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vasub_vv_i64m2(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vasub_vv_i64m2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vasub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vasub_vx_i64m2(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vasub_vx_i64m2(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub_vx_i64m2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vasub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vasub_vv_i64m4(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vasub_vv_i64m4(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vasub_vv_i64m4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vasub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vasub_vx_i64m4(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vasub_vx_i64m4(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub_vx_i64m4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vasub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vasub_vv_i64m8(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vasub_vv_i64m8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vasub_vv_i64m8(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vasub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vasub_vx_i64m8(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vasub_vx_i64m8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub_vx_i64m8(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vasub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vasub_vv_i8mf8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vasub_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vasub_vv_i8mf8_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vasub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_vx_i8mf8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vasub_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_vx_i8mf8_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vasub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vasub_vv_i8mf4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vasub_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vasub_vv_i8mf4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vasub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_vx_i8mf4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vasub_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_vx_i8mf4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vasub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vasub_vv_i8mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vasub_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vasub_vv_i8mf2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vasub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_vx_i8mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vasub_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_vx_i8mf2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vasub_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vasub_vv_i8m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vasub_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vasub_vv_i8m1_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vasub_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_vx_i8m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vasub_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_vx_i8m1_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vasub_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vasub_vv_i8m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vasub_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vasub_vv_i8m2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vasub_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_vx_i8m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vasub_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_vx_i8m2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vasub_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vasub_vv_i8m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vasub_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vasub_vv_i8m4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vasub_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_vx_i8m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vasub_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_vx_i8m4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vasub_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vasub_vv_i8m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vasub_vv_i8m8_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vasub_vv_i8m8_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vasub_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_vx_i8m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vasub_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_vx_i8m8_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vasub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vasub_vv_i16mf4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vasub_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vasub_vv_i16mf4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vasub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_vx_i16mf4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vasub_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_vx_i16mf4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vasub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vasub_vv_i16mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vasub_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vasub_vv_i16mf2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vasub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_vx_i16mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vasub_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_vx_i16mf2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vasub_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vasub_vv_i16m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vasub_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vasub_vv_i16m1_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vasub_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_vx_i16m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vasub_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_vx_i16m1_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vasub_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vasub_vv_i16m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vasub_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vasub_vv_i16m2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vasub_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_vx_i16m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vasub_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_vx_i16m2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vasub_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vasub_vv_i16m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vasub_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vasub_vv_i16m4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vasub_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_vx_i16m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vasub_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_vx_i16m4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vasub_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vasub_vv_i16m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vasub_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vasub_vv_i16m8_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vasub_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_vx_i16m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vasub_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_vx_i16m8_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vasub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vasub_vv_i32mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vasub_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vasub_vv_i32mf2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vasub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_vx_i32mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vasub_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_vx_i32mf2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vasub_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vasub_vv_i32m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vasub_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vasub_vv_i32m1_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vasub_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_vx_i32m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vasub_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_vx_i32m1_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vasub_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vasub_vv_i32m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vasub_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vasub_vv_i32m2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vasub_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_vx_i32m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vasub_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_vx_i32m2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vasub_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vasub_vv_i32m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vasub_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vasub_vv_i32m4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vasub_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_vx_i32m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vasub_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_vx_i32m4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vasub_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vasub_vv_i32m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vasub_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vasub_vv_i32m8_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vasub_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_vx_i32m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vasub_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_vx_i32m8_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vasub_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vasub_vv_i64m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vasub_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vasub_vv_i64m1_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vasub_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vasub_vx_i64m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vasub_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub_vx_i64m1_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vasub_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vasub_vv_i64m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vasub_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vasub_vv_i64m2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vasub_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vasub_vx_i64m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vasub_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub_vx_i64m2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vasub_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vasub_vv_i64m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vasub_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vasub_vv_i64m4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vasub_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vasub_vx_i64m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vasub_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub_vx_i64m4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vasub_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vasub_vv_i64m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vasub_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vasub_vv_i64m8_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vasub_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vasub_vx_i64m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vasub_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub_vx_i64m8_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vasub\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-api-tests/vasubu.c b/auto-generated/gnu-api-tests/vasubu.c index ad7f9b645..177fbcc30 100644 --- a/auto-generated/gnu-api-tests/vasubu.c +++ b/auto-generated/gnu-api-tests/vasubu.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vasubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vasubu_vv_u8mf8(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vasubu_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vasubu_vv_u8mf8(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vasubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_vx_u8mf8(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vasubu_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_vx_u8mf8(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vasubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vasubu_vv_u8mf4(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vasubu_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vasubu_vv_u8mf4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vasubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_vx_u8mf4(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vasubu_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_vx_u8mf4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vasubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vasubu_vv_u8mf2(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vasubu_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vasubu_vv_u8mf2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vasubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_vx_u8mf2(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vasubu_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_vx_u8mf2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vasubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vasubu_vv_u8m1(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vasubu_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vasubu_vv_u8m1(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vasubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_vx_u8m1(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vasubu_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_vx_u8m1(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vasubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vasubu_vv_u8m2(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vasubu_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vasubu_vv_u8m2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vasubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_vx_u8m2(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vasubu_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_vx_u8m2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vasubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vasubu_vv_u8m4(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vasubu_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vasubu_vv_u8m4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vasubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_vx_u8m4(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vasubu_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_vx_u8m4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vasubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vasubu_vv_u8m8(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vasubu_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vasubu_vv_u8m8(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vasubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_vx_u8m8(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vasubu_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_vx_u8m8(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vasubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vasubu_vv_u16mf4(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vasubu_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vasubu_vv_u16mf4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vasubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_vx_u16mf4(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vasubu_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_vx_u16mf4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vasubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vasubu_vv_u16mf2(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vasubu_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vasubu_vv_u16mf2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vasubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_vx_u16mf2(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vasubu_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_vx_u16mf2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vasubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vasubu_vv_u16m1(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vasubu_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vasubu_vv_u16m1(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vasubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_vx_u16m1(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vasubu_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_vx_u16m1(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vasubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vasubu_vv_u16m2(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vasubu_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vasubu_vv_u16m2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vasubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_vx_u16m2(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vasubu_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_vx_u16m2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vasubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vasubu_vv_u16m4(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vasubu_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vasubu_vv_u16m4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vasubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_vx_u16m4(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vasubu_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_vx_u16m4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vasubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vasubu_vv_u16m8(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vasubu_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vasubu_vv_u16m8(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vasubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_vx_u16m8(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vasubu_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_vx_u16m8(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vasubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vasubu_vv_u32mf2(op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vasubu_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vasubu_vv_u32mf2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vasubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_vx_u32mf2(op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vasubu_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_vx_u32mf2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vasubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vasubu_vv_u32m1(op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vasubu_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vasubu_vv_u32m1(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vasubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_vx_u32m1(op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vasubu_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_vx_u32m1(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vasubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vasubu_vv_u32m2(op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vasubu_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vasubu_vv_u32m2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vasubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_vx_u32m2(op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vasubu_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_vx_u32m2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vasubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vasubu_vv_u32m4(op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vasubu_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vasubu_vv_u32m4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vasubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_vx_u32m4(op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vasubu_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_vx_u32m4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vasubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vasubu_vv_u32m8(op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vasubu_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vasubu_vv_u32m8(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vasubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_vx_u32m8(op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vasubu_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_vx_u32m8(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vasubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vasubu_vv_u64m1(op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vasubu_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vasubu_vv_u64m1(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vasubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu_vx_u64m1(op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vasubu_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu_vx_u64m1(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vasubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vasubu_vv_u64m2(op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vasubu_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vasubu_vv_u64m2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vasubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu_vx_u64m2(op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vasubu_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu_vx_u64m2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vasubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vasubu_vv_u64m4(op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vasubu_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vasubu_vv_u64m4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vasubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu_vx_u64m4(op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vasubu_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu_vx_u64m4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vasubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vasubu_vv_u64m8(op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vasubu_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vasubu_vv_u64m8(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vasubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu_vx_u64m8(op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vasubu_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu_vx_u64m8(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vasubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vasubu_vv_u8mf8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vasubu_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vasubu_vv_u8mf8_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vasubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_vx_u8mf8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vasubu_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_vx_u8mf8_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vasubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vasubu_vv_u8mf4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vasubu_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vasubu_vv_u8mf4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vasubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_vx_u8mf4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vasubu_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_vx_u8mf4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vasubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vasubu_vv_u8mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vasubu_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vasubu_vv_u8mf2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vasubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_vx_u8mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vasubu_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_vx_u8mf2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vasubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vasubu_vv_u8m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vasubu_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vasubu_vv_u8m1_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vasubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_vx_u8m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vasubu_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_vx_u8m1_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vasubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vasubu_vv_u8m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vasubu_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vasubu_vv_u8m2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vasubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_vx_u8m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vasubu_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_vx_u8m2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vasubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vasubu_vv_u8m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vasubu_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vasubu_vv_u8m4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vasubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_vx_u8m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vasubu_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_vx_u8m4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vasubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vasubu_vv_u8m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vasubu_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vasubu_vv_u8m8_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vasubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_vx_u8m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vasubu_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_vx_u8m8_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vasubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vasubu_vv_u16mf4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vasubu_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vasubu_vv_u16mf4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vasubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_vx_u16mf4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vasubu_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_vx_u16mf4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vasubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vasubu_vv_u16mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vasubu_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vasubu_vv_u16mf2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vasubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_vx_u16mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vasubu_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_vx_u16mf2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vasubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vasubu_vv_u16m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vasubu_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vasubu_vv_u16m1_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vasubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_vx_u16m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vasubu_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_vx_u16m1_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vasubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vasubu_vv_u16m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vasubu_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vasubu_vv_u16m2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vasubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_vx_u16m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vasubu_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_vx_u16m2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vasubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vasubu_vv_u16m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vasubu_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vasubu_vv_u16m4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vasubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_vx_u16m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vasubu_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_vx_u16m4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vasubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vasubu_vv_u16m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vasubu_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vasubu_vv_u16m8_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vasubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_vx_u16m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vasubu_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_vx_u16m8_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vasubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vasubu_vv_u32mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vasubu_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vasubu_vv_u32mf2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vasubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_vx_u32mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vasubu_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_vx_u32mf2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vasubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vasubu_vv_u32m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vasubu_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vasubu_vv_u32m1_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vasubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_vx_u32m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vasubu_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_vx_u32m1_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vasubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vasubu_vv_u32m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vasubu_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vasubu_vv_u32m2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vasubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_vx_u32m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vasubu_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_vx_u32m2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vasubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vasubu_vv_u32m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vasubu_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vasubu_vv_u32m4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vasubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_vx_u32m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vasubu_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_vx_u32m4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vasubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vasubu_vv_u32m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vasubu_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vasubu_vv_u32m8_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vasubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_vx_u32m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vasubu_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_vx_u32m8_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vasubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vasubu_vv_u64m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vasubu_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vasubu_vv_u64m1_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vasubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu_vx_u64m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vasubu_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu_vx_u64m1_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vasubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vasubu_vv_u64m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vasubu_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vasubu_vv_u64m2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vasubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu_vx_u64m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vasubu_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu_vx_u64m2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vasubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vasubu_vv_u64m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vasubu_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vasubu_vv_u64m4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vasubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu_vx_u64m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vasubu_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu_vx_u64m4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vasubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vasubu_vv_u64m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vasubu_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vasubu_vv_u64m8_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vasubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu_vx_u64m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vasubu_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu_vx_u64m8_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vasubu\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-api-tests/vcompress.c b/auto-generated/gnu-api-tests/vcompress.c index 4d44436c7..55c2925a9 100644 --- a/auto-generated/gnu-api-tests/vcompress.c +++ b/auto-generated/gnu-api-tests/vcompress.c @@ -6,240 +6,240 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vcompress_vm_f16mf4(vfloat16mf4_t src, vbool64_t mask, size_t vl) { - return __riscv_vcompress_vm_f16mf4(src, mask, vl); +vfloat16mf4_t test_vcompress_vm_f16mf4(vfloat16mf4_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vcompress_vm_f16mf4(vs2, vs1, vl); } -vfloat16mf2_t test_vcompress_vm_f16mf2(vfloat16mf2_t src, vbool32_t mask, size_t vl) { - return __riscv_vcompress_vm_f16mf2(src, mask, vl); +vfloat16mf2_t test_vcompress_vm_f16mf2(vfloat16mf2_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vcompress_vm_f16mf2(vs2, vs1, vl); } -vfloat16m1_t test_vcompress_vm_f16m1(vfloat16m1_t src, vbool16_t mask, size_t vl) { - return __riscv_vcompress_vm_f16m1(src, mask, vl); +vfloat16m1_t test_vcompress_vm_f16m1(vfloat16m1_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vcompress_vm_f16m1(vs2, vs1, vl); } -vfloat16m2_t test_vcompress_vm_f16m2(vfloat16m2_t src, vbool8_t mask, size_t vl) { - return __riscv_vcompress_vm_f16m2(src, mask, vl); +vfloat16m2_t test_vcompress_vm_f16m2(vfloat16m2_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vcompress_vm_f16m2(vs2, vs1, vl); } -vfloat16m4_t test_vcompress_vm_f16m4(vfloat16m4_t src, vbool4_t mask, size_t vl) { - return __riscv_vcompress_vm_f16m4(src, mask, vl); +vfloat16m4_t test_vcompress_vm_f16m4(vfloat16m4_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vcompress_vm_f16m4(vs2, vs1, vl); } -vfloat16m8_t test_vcompress_vm_f16m8(vfloat16m8_t src, vbool2_t mask, size_t vl) { - return __riscv_vcompress_vm_f16m8(src, mask, vl); +vfloat16m8_t test_vcompress_vm_f16m8(vfloat16m8_t vs2, vbool2_t vs1, size_t vl) { + return __riscv_vcompress_vm_f16m8(vs2, vs1, vl); } -vfloat32mf2_t test_vcompress_vm_f32mf2(vfloat32mf2_t src, vbool64_t mask, size_t vl) { - return __riscv_vcompress_vm_f32mf2(src, mask, vl); +vfloat32mf2_t test_vcompress_vm_f32mf2(vfloat32mf2_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vcompress_vm_f32mf2(vs2, vs1, vl); } -vfloat32m1_t test_vcompress_vm_f32m1(vfloat32m1_t src, vbool32_t mask, size_t vl) { - return __riscv_vcompress_vm_f32m1(src, mask, vl); +vfloat32m1_t test_vcompress_vm_f32m1(vfloat32m1_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vcompress_vm_f32m1(vs2, vs1, vl); } -vfloat32m2_t test_vcompress_vm_f32m2(vfloat32m2_t src, vbool16_t mask, size_t vl) { - return __riscv_vcompress_vm_f32m2(src, mask, vl); +vfloat32m2_t test_vcompress_vm_f32m2(vfloat32m2_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vcompress_vm_f32m2(vs2, vs1, vl); } -vfloat32m4_t test_vcompress_vm_f32m4(vfloat32m4_t src, vbool8_t mask, size_t vl) { - return __riscv_vcompress_vm_f32m4(src, mask, vl); +vfloat32m4_t test_vcompress_vm_f32m4(vfloat32m4_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vcompress_vm_f32m4(vs2, vs1, vl); } -vfloat32m8_t test_vcompress_vm_f32m8(vfloat32m8_t src, vbool4_t mask, size_t vl) { - return __riscv_vcompress_vm_f32m8(src, mask, vl); +vfloat32m8_t test_vcompress_vm_f32m8(vfloat32m8_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vcompress_vm_f32m8(vs2, vs1, vl); } -vfloat64m1_t test_vcompress_vm_f64m1(vfloat64m1_t src, vbool64_t mask, size_t vl) { - return __riscv_vcompress_vm_f64m1(src, mask, vl); +vfloat64m1_t test_vcompress_vm_f64m1(vfloat64m1_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vcompress_vm_f64m1(vs2, vs1, vl); } -vfloat64m2_t test_vcompress_vm_f64m2(vfloat64m2_t src, vbool32_t mask, size_t vl) { - return __riscv_vcompress_vm_f64m2(src, mask, vl); +vfloat64m2_t test_vcompress_vm_f64m2(vfloat64m2_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vcompress_vm_f64m2(vs2, vs1, vl); } -vfloat64m4_t test_vcompress_vm_f64m4(vfloat64m4_t src, vbool16_t mask, size_t vl) { - return __riscv_vcompress_vm_f64m4(src, mask, vl); +vfloat64m4_t test_vcompress_vm_f64m4(vfloat64m4_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vcompress_vm_f64m4(vs2, vs1, vl); } -vfloat64m8_t test_vcompress_vm_f64m8(vfloat64m8_t src, vbool8_t mask, size_t vl) { - return __riscv_vcompress_vm_f64m8(src, mask, vl); +vfloat64m8_t test_vcompress_vm_f64m8(vfloat64m8_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vcompress_vm_f64m8(vs2, vs1, vl); } -vint8mf8_t test_vcompress_vm_i8mf8(vint8mf8_t src, vbool64_t mask, size_t vl) { - return __riscv_vcompress_vm_i8mf8(src, mask, vl); +vint8mf8_t test_vcompress_vm_i8mf8(vint8mf8_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vcompress_vm_i8mf8(vs2, vs1, vl); } -vint8mf4_t test_vcompress_vm_i8mf4(vint8mf4_t src, vbool32_t mask, size_t vl) { - return __riscv_vcompress_vm_i8mf4(src, mask, vl); +vint8mf4_t test_vcompress_vm_i8mf4(vint8mf4_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vcompress_vm_i8mf4(vs2, vs1, vl); } -vint8mf2_t test_vcompress_vm_i8mf2(vint8mf2_t src, vbool16_t mask, size_t vl) { - return __riscv_vcompress_vm_i8mf2(src, mask, vl); +vint8mf2_t test_vcompress_vm_i8mf2(vint8mf2_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vcompress_vm_i8mf2(vs2, vs1, vl); } -vint8m1_t test_vcompress_vm_i8m1(vint8m1_t src, vbool8_t mask, size_t vl) { - return __riscv_vcompress_vm_i8m1(src, mask, vl); +vint8m1_t test_vcompress_vm_i8m1(vint8m1_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vcompress_vm_i8m1(vs2, vs1, vl); } -vint8m2_t test_vcompress_vm_i8m2(vint8m2_t src, vbool4_t mask, size_t vl) { - return __riscv_vcompress_vm_i8m2(src, mask, vl); +vint8m2_t test_vcompress_vm_i8m2(vint8m2_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vcompress_vm_i8m2(vs2, vs1, vl); } -vint8m4_t test_vcompress_vm_i8m4(vint8m4_t src, vbool2_t mask, size_t vl) { - return __riscv_vcompress_vm_i8m4(src, mask, vl); +vint8m4_t test_vcompress_vm_i8m4(vint8m4_t vs2, vbool2_t vs1, size_t vl) { + return __riscv_vcompress_vm_i8m4(vs2, vs1, vl); } -vint8m8_t test_vcompress_vm_i8m8(vint8m8_t src, vbool1_t mask, size_t vl) { - return __riscv_vcompress_vm_i8m8(src, mask, vl); +vint8m8_t test_vcompress_vm_i8m8(vint8m8_t vs2, vbool1_t vs1, size_t vl) { + return __riscv_vcompress_vm_i8m8(vs2, vs1, vl); } -vint16mf4_t test_vcompress_vm_i16mf4(vint16mf4_t src, vbool64_t mask, size_t vl) { - return __riscv_vcompress_vm_i16mf4(src, mask, vl); +vint16mf4_t test_vcompress_vm_i16mf4(vint16mf4_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vcompress_vm_i16mf4(vs2, vs1, vl); } -vint16mf2_t test_vcompress_vm_i16mf2(vint16mf2_t src, vbool32_t mask, size_t vl) { - return __riscv_vcompress_vm_i16mf2(src, mask, vl); +vint16mf2_t test_vcompress_vm_i16mf2(vint16mf2_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vcompress_vm_i16mf2(vs2, vs1, vl); } -vint16m1_t test_vcompress_vm_i16m1(vint16m1_t src, vbool16_t mask, size_t vl) { - return __riscv_vcompress_vm_i16m1(src, mask, vl); +vint16m1_t test_vcompress_vm_i16m1(vint16m1_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vcompress_vm_i16m1(vs2, vs1, vl); } -vint16m2_t test_vcompress_vm_i16m2(vint16m2_t src, vbool8_t mask, size_t vl) { - return __riscv_vcompress_vm_i16m2(src, mask, vl); +vint16m2_t test_vcompress_vm_i16m2(vint16m2_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vcompress_vm_i16m2(vs2, vs1, vl); } -vint16m4_t test_vcompress_vm_i16m4(vint16m4_t src, vbool4_t mask, size_t vl) { - return __riscv_vcompress_vm_i16m4(src, mask, vl); +vint16m4_t test_vcompress_vm_i16m4(vint16m4_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vcompress_vm_i16m4(vs2, vs1, vl); } -vint16m8_t test_vcompress_vm_i16m8(vint16m8_t src, vbool2_t mask, size_t vl) { - return __riscv_vcompress_vm_i16m8(src, mask, vl); +vint16m8_t test_vcompress_vm_i16m8(vint16m8_t vs2, vbool2_t vs1, size_t vl) { + return __riscv_vcompress_vm_i16m8(vs2, vs1, vl); } -vint32mf2_t test_vcompress_vm_i32mf2(vint32mf2_t src, vbool64_t mask, size_t vl) { - return __riscv_vcompress_vm_i32mf2(src, mask, vl); +vint32mf2_t test_vcompress_vm_i32mf2(vint32mf2_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vcompress_vm_i32mf2(vs2, vs1, vl); } -vint32m1_t test_vcompress_vm_i32m1(vint32m1_t src, vbool32_t mask, size_t vl) { - return __riscv_vcompress_vm_i32m1(src, mask, vl); +vint32m1_t test_vcompress_vm_i32m1(vint32m1_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vcompress_vm_i32m1(vs2, vs1, vl); } -vint32m2_t test_vcompress_vm_i32m2(vint32m2_t src, vbool16_t mask, size_t vl) { - return __riscv_vcompress_vm_i32m2(src, mask, vl); +vint32m2_t test_vcompress_vm_i32m2(vint32m2_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vcompress_vm_i32m2(vs2, vs1, vl); } -vint32m4_t test_vcompress_vm_i32m4(vint32m4_t src, vbool8_t mask, size_t vl) { - return __riscv_vcompress_vm_i32m4(src, mask, vl); +vint32m4_t test_vcompress_vm_i32m4(vint32m4_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vcompress_vm_i32m4(vs2, vs1, vl); } -vint32m8_t test_vcompress_vm_i32m8(vint32m8_t src, vbool4_t mask, size_t vl) { - return __riscv_vcompress_vm_i32m8(src, mask, vl); +vint32m8_t test_vcompress_vm_i32m8(vint32m8_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vcompress_vm_i32m8(vs2, vs1, vl); } -vint64m1_t test_vcompress_vm_i64m1(vint64m1_t src, vbool64_t mask, size_t vl) { - return __riscv_vcompress_vm_i64m1(src, mask, vl); +vint64m1_t test_vcompress_vm_i64m1(vint64m1_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vcompress_vm_i64m1(vs2, vs1, vl); } -vint64m2_t test_vcompress_vm_i64m2(vint64m2_t src, vbool32_t mask, size_t vl) { - return __riscv_vcompress_vm_i64m2(src, mask, vl); +vint64m2_t test_vcompress_vm_i64m2(vint64m2_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vcompress_vm_i64m2(vs2, vs1, vl); } -vint64m4_t test_vcompress_vm_i64m4(vint64m4_t src, vbool16_t mask, size_t vl) { - return __riscv_vcompress_vm_i64m4(src, mask, vl); +vint64m4_t test_vcompress_vm_i64m4(vint64m4_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vcompress_vm_i64m4(vs2, vs1, vl); } -vint64m8_t test_vcompress_vm_i64m8(vint64m8_t src, vbool8_t mask, size_t vl) { - return __riscv_vcompress_vm_i64m8(src, mask, vl); +vint64m8_t test_vcompress_vm_i64m8(vint64m8_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vcompress_vm_i64m8(vs2, vs1, vl); } -vuint8mf8_t test_vcompress_vm_u8mf8(vuint8mf8_t src, vbool64_t mask, size_t vl) { - return __riscv_vcompress_vm_u8mf8(src, mask, vl); +vuint8mf8_t test_vcompress_vm_u8mf8(vuint8mf8_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vcompress_vm_u8mf8(vs2, vs1, vl); } -vuint8mf4_t test_vcompress_vm_u8mf4(vuint8mf4_t src, vbool32_t mask, size_t vl) { - return __riscv_vcompress_vm_u8mf4(src, mask, vl); +vuint8mf4_t test_vcompress_vm_u8mf4(vuint8mf4_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vcompress_vm_u8mf4(vs2, vs1, vl); } -vuint8mf2_t test_vcompress_vm_u8mf2(vuint8mf2_t src, vbool16_t mask, size_t vl) { - return __riscv_vcompress_vm_u8mf2(src, mask, vl); +vuint8mf2_t test_vcompress_vm_u8mf2(vuint8mf2_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vcompress_vm_u8mf2(vs2, vs1, vl); } -vuint8m1_t test_vcompress_vm_u8m1(vuint8m1_t src, vbool8_t mask, size_t vl) { - return __riscv_vcompress_vm_u8m1(src, mask, vl); +vuint8m1_t test_vcompress_vm_u8m1(vuint8m1_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vcompress_vm_u8m1(vs2, vs1, vl); } -vuint8m2_t test_vcompress_vm_u8m2(vuint8m2_t src, vbool4_t mask, size_t vl) { - return __riscv_vcompress_vm_u8m2(src, mask, vl); +vuint8m2_t test_vcompress_vm_u8m2(vuint8m2_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vcompress_vm_u8m2(vs2, vs1, vl); } -vuint8m4_t test_vcompress_vm_u8m4(vuint8m4_t src, vbool2_t mask, size_t vl) { - return __riscv_vcompress_vm_u8m4(src, mask, vl); +vuint8m4_t test_vcompress_vm_u8m4(vuint8m4_t vs2, vbool2_t vs1, size_t vl) { + return __riscv_vcompress_vm_u8m4(vs2, vs1, vl); } -vuint8m8_t test_vcompress_vm_u8m8(vuint8m8_t src, vbool1_t mask, size_t vl) { - return __riscv_vcompress_vm_u8m8(src, mask, vl); +vuint8m8_t test_vcompress_vm_u8m8(vuint8m8_t vs2, vbool1_t vs1, size_t vl) { + return __riscv_vcompress_vm_u8m8(vs2, vs1, vl); } -vuint16mf4_t test_vcompress_vm_u16mf4(vuint16mf4_t src, vbool64_t mask, size_t vl) { - return __riscv_vcompress_vm_u16mf4(src, mask, vl); +vuint16mf4_t test_vcompress_vm_u16mf4(vuint16mf4_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vcompress_vm_u16mf4(vs2, vs1, vl); } -vuint16mf2_t test_vcompress_vm_u16mf2(vuint16mf2_t src, vbool32_t mask, size_t vl) { - return __riscv_vcompress_vm_u16mf2(src, mask, vl); +vuint16mf2_t test_vcompress_vm_u16mf2(vuint16mf2_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vcompress_vm_u16mf2(vs2, vs1, vl); } -vuint16m1_t test_vcompress_vm_u16m1(vuint16m1_t src, vbool16_t mask, size_t vl) { - return __riscv_vcompress_vm_u16m1(src, mask, vl); +vuint16m1_t test_vcompress_vm_u16m1(vuint16m1_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vcompress_vm_u16m1(vs2, vs1, vl); } -vuint16m2_t test_vcompress_vm_u16m2(vuint16m2_t src, vbool8_t mask, size_t vl) { - return __riscv_vcompress_vm_u16m2(src, mask, vl); +vuint16m2_t test_vcompress_vm_u16m2(vuint16m2_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vcompress_vm_u16m2(vs2, vs1, vl); } -vuint16m4_t test_vcompress_vm_u16m4(vuint16m4_t src, vbool4_t mask, size_t vl) { - return __riscv_vcompress_vm_u16m4(src, mask, vl); +vuint16m4_t test_vcompress_vm_u16m4(vuint16m4_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vcompress_vm_u16m4(vs2, vs1, vl); } -vuint16m8_t test_vcompress_vm_u16m8(vuint16m8_t src, vbool2_t mask, size_t vl) { - return __riscv_vcompress_vm_u16m8(src, mask, vl); +vuint16m8_t test_vcompress_vm_u16m8(vuint16m8_t vs2, vbool2_t vs1, size_t vl) { + return __riscv_vcompress_vm_u16m8(vs2, vs1, vl); } -vuint32mf2_t test_vcompress_vm_u32mf2(vuint32mf2_t src, vbool64_t mask, size_t vl) { - return __riscv_vcompress_vm_u32mf2(src, mask, vl); +vuint32mf2_t test_vcompress_vm_u32mf2(vuint32mf2_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vcompress_vm_u32mf2(vs2, vs1, vl); } -vuint32m1_t test_vcompress_vm_u32m1(vuint32m1_t src, vbool32_t mask, size_t vl) { - return __riscv_vcompress_vm_u32m1(src, mask, vl); +vuint32m1_t test_vcompress_vm_u32m1(vuint32m1_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vcompress_vm_u32m1(vs2, vs1, vl); } -vuint32m2_t test_vcompress_vm_u32m2(vuint32m2_t src, vbool16_t mask, size_t vl) { - return __riscv_vcompress_vm_u32m2(src, mask, vl); +vuint32m2_t test_vcompress_vm_u32m2(vuint32m2_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vcompress_vm_u32m2(vs2, vs1, vl); } -vuint32m4_t test_vcompress_vm_u32m4(vuint32m4_t src, vbool8_t mask, size_t vl) { - return __riscv_vcompress_vm_u32m4(src, mask, vl); +vuint32m4_t test_vcompress_vm_u32m4(vuint32m4_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vcompress_vm_u32m4(vs2, vs1, vl); } -vuint32m8_t test_vcompress_vm_u32m8(vuint32m8_t src, vbool4_t mask, size_t vl) { - return __riscv_vcompress_vm_u32m8(src, mask, vl); +vuint32m8_t test_vcompress_vm_u32m8(vuint32m8_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vcompress_vm_u32m8(vs2, vs1, vl); } -vuint64m1_t test_vcompress_vm_u64m1(vuint64m1_t src, vbool64_t mask, size_t vl) { - return __riscv_vcompress_vm_u64m1(src, mask, vl); +vuint64m1_t test_vcompress_vm_u64m1(vuint64m1_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vcompress_vm_u64m1(vs2, vs1, vl); } -vuint64m2_t test_vcompress_vm_u64m2(vuint64m2_t src, vbool32_t mask, size_t vl) { - return __riscv_vcompress_vm_u64m2(src, mask, vl); +vuint64m2_t test_vcompress_vm_u64m2(vuint64m2_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vcompress_vm_u64m2(vs2, vs1, vl); } -vuint64m4_t test_vcompress_vm_u64m4(vuint64m4_t src, vbool16_t mask, size_t vl) { - return __riscv_vcompress_vm_u64m4(src, mask, vl); +vuint64m4_t test_vcompress_vm_u64m4(vuint64m4_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vcompress_vm_u64m4(vs2, vs1, vl); } -vuint64m8_t test_vcompress_vm_u64m8(vuint64m8_t src, vbool8_t mask, size_t vl) { - return __riscv_vcompress_vm_u64m8(src, mask, vl); +vuint64m8_t test_vcompress_vm_u64m8(vuint64m8_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vcompress_vm_u64m8(vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vcompress\.[ivxfswum.]+\s+} 59 } } */ diff --git a/auto-generated/gnu-api-tests/vcpop.c b/auto-generated/gnu-api-tests/vcpop.c index 6f0b6670d..284fbe4a7 100644 --- a/auto-generated/gnu-api-tests/vcpop.c +++ b/auto-generated/gnu-api-tests/vcpop.c @@ -6,60 +6,60 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -unsigned long test_vcpop_m_b1(vbool1_t op1, size_t vl) { - return __riscv_vcpop_m_b1(op1, vl); +unsigned long test_vcpop_m_b1(vbool1_t vs2, size_t vl) { + return __riscv_vcpop_m_b1(vs2, vl); } -unsigned long test_vcpop_m_b2(vbool2_t op1, size_t vl) { - return __riscv_vcpop_m_b2(op1, vl); +unsigned long test_vcpop_m_b2(vbool2_t vs2, size_t vl) { + return __riscv_vcpop_m_b2(vs2, vl); } -unsigned long test_vcpop_m_b4(vbool4_t op1, size_t vl) { - return __riscv_vcpop_m_b4(op1, vl); +unsigned long test_vcpop_m_b4(vbool4_t vs2, size_t vl) { + return __riscv_vcpop_m_b4(vs2, vl); } -unsigned long test_vcpop_m_b8(vbool8_t op1, size_t vl) { - return __riscv_vcpop_m_b8(op1, vl); +unsigned long test_vcpop_m_b8(vbool8_t vs2, size_t vl) { + return __riscv_vcpop_m_b8(vs2, vl); } -unsigned long test_vcpop_m_b16(vbool16_t op1, size_t vl) { - return __riscv_vcpop_m_b16(op1, vl); +unsigned long test_vcpop_m_b16(vbool16_t vs2, size_t vl) { + return __riscv_vcpop_m_b16(vs2, vl); } -unsigned long test_vcpop_m_b32(vbool32_t op1, size_t vl) { - return __riscv_vcpop_m_b32(op1, vl); +unsigned long test_vcpop_m_b32(vbool32_t vs2, size_t vl) { + return __riscv_vcpop_m_b32(vs2, vl); } -unsigned long test_vcpop_m_b64(vbool64_t op1, size_t vl) { - return __riscv_vcpop_m_b64(op1, vl); +unsigned long test_vcpop_m_b64(vbool64_t vs2, size_t vl) { + return __riscv_vcpop_m_b64(vs2, vl); } -unsigned long test_vcpop_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) { - return __riscv_vcpop_m_b1_m(mask, op1, vl); +unsigned long test_vcpop_m_b1_m(vbool1_t vm, vbool1_t vs2, size_t vl) { + return __riscv_vcpop_m_b1_m(vm, vs2, vl); } -unsigned long test_vcpop_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) { - return __riscv_vcpop_m_b2_m(mask, op1, vl); +unsigned long test_vcpop_m_b2_m(vbool2_t vm, vbool2_t vs2, size_t vl) { + return __riscv_vcpop_m_b2_m(vm, vs2, vl); } -unsigned long test_vcpop_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) { - return __riscv_vcpop_m_b4_m(mask, op1, vl); +unsigned long test_vcpop_m_b4_m(vbool4_t vm, vbool4_t vs2, size_t vl) { + return __riscv_vcpop_m_b4_m(vm, vs2, vl); } -unsigned long test_vcpop_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) { - return __riscv_vcpop_m_b8_m(mask, op1, vl); +unsigned long test_vcpop_m_b8_m(vbool8_t vm, vbool8_t vs2, size_t vl) { + return __riscv_vcpop_m_b8_m(vm, vs2, vl); } -unsigned long test_vcpop_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) { - return __riscv_vcpop_m_b16_m(mask, op1, vl); +unsigned long test_vcpop_m_b16_m(vbool16_t vm, vbool16_t vs2, size_t vl) { + return __riscv_vcpop_m_b16_m(vm, vs2, vl); } -unsigned long test_vcpop_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) { - return __riscv_vcpop_m_b32_m(mask, op1, vl); +unsigned long test_vcpop_m_b32_m(vbool32_t vm, vbool32_t vs2, size_t vl) { + return __riscv_vcpop_m_b32_m(vm, vs2, vl); } -unsigned long test_vcpop_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) { - return __riscv_vcpop_m_b64_m(mask, op1, vl); +unsigned long test_vcpop_m_b64_m(vbool64_t vm, vbool64_t vs2, size_t vl) { + return __riscv_vcpop_m_b64_m(vm, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vcpop\.[ivxfswum.]+\s+} 14 } } */ diff --git a/auto-generated/gnu-api-tests/vdiv.c b/auto-generated/gnu-api-tests/vdiv.c index ee6efc3a7..171fda517 100644 --- a/auto-generated/gnu-api-tests/vdiv.c +++ b/auto-generated/gnu-api-tests/vdiv.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vdiv_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vdiv_vv_i8mf8(op1, op2, vl); +vint8mf8_t test_vdiv_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vdiv_vv_i8mf8(vs2, vs1, vl); } -vint8mf8_t test_vdiv_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_vx_i8mf8(op1, op2, vl); +vint8mf8_t test_vdiv_vx_i8mf8(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_vx_i8mf8(vs2, rs1, vl); } -vint8mf4_t test_vdiv_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vdiv_vv_i8mf4(op1, op2, vl); +vint8mf4_t test_vdiv_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vdiv_vv_i8mf4(vs2, vs1, vl); } -vint8mf4_t test_vdiv_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_vx_i8mf4(op1, op2, vl); +vint8mf4_t test_vdiv_vx_i8mf4(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_vx_i8mf4(vs2, rs1, vl); } -vint8mf2_t test_vdiv_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vdiv_vv_i8mf2(op1, op2, vl); +vint8mf2_t test_vdiv_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vdiv_vv_i8mf2(vs2, vs1, vl); } -vint8mf2_t test_vdiv_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_vx_i8mf2(op1, op2, vl); +vint8mf2_t test_vdiv_vx_i8mf2(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_vx_i8mf2(vs2, rs1, vl); } -vint8m1_t test_vdiv_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vdiv_vv_i8m1(op1, op2, vl); +vint8m1_t test_vdiv_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vdiv_vv_i8m1(vs2, vs1, vl); } -vint8m1_t test_vdiv_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_vx_i8m1(op1, op2, vl); +vint8m1_t test_vdiv_vx_i8m1(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_vx_i8m1(vs2, rs1, vl); } -vint8m2_t test_vdiv_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vdiv_vv_i8m2(op1, op2, vl); +vint8m2_t test_vdiv_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vdiv_vv_i8m2(vs2, vs1, vl); } -vint8m2_t test_vdiv_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_vx_i8m2(op1, op2, vl); +vint8m2_t test_vdiv_vx_i8m2(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_vx_i8m2(vs2, rs1, vl); } -vint8m4_t test_vdiv_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vdiv_vv_i8m4(op1, op2, vl); +vint8m4_t test_vdiv_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vdiv_vv_i8m4(vs2, vs1, vl); } -vint8m4_t test_vdiv_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_vx_i8m4(op1, op2, vl); +vint8m4_t test_vdiv_vx_i8m4(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_vx_i8m4(vs2, rs1, vl); } -vint8m8_t test_vdiv_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vdiv_vv_i8m8(op1, op2, vl); +vint8m8_t test_vdiv_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vdiv_vv_i8m8(vs2, vs1, vl); } -vint8m8_t test_vdiv_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_vx_i8m8(op1, op2, vl); +vint8m8_t test_vdiv_vx_i8m8(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_vx_i8m8(vs2, rs1, vl); } -vint16mf4_t test_vdiv_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vdiv_vv_i16mf4(op1, op2, vl); +vint16mf4_t test_vdiv_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vdiv_vv_i16mf4(vs2, vs1, vl); } -vint16mf4_t test_vdiv_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_vx_i16mf4(op1, op2, vl); +vint16mf4_t test_vdiv_vx_i16mf4(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_vx_i16mf4(vs2, rs1, vl); } -vint16mf2_t test_vdiv_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vdiv_vv_i16mf2(op1, op2, vl); +vint16mf2_t test_vdiv_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vdiv_vv_i16mf2(vs2, vs1, vl); } -vint16mf2_t test_vdiv_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_vx_i16mf2(op1, op2, vl); +vint16mf2_t test_vdiv_vx_i16mf2(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_vx_i16mf2(vs2, rs1, vl); } -vint16m1_t test_vdiv_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vdiv_vv_i16m1(op1, op2, vl); +vint16m1_t test_vdiv_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vdiv_vv_i16m1(vs2, vs1, vl); } -vint16m1_t test_vdiv_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_vx_i16m1(op1, op2, vl); +vint16m1_t test_vdiv_vx_i16m1(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_vx_i16m1(vs2, rs1, vl); } -vint16m2_t test_vdiv_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vdiv_vv_i16m2(op1, op2, vl); +vint16m2_t test_vdiv_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vdiv_vv_i16m2(vs2, vs1, vl); } -vint16m2_t test_vdiv_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_vx_i16m2(op1, op2, vl); +vint16m2_t test_vdiv_vx_i16m2(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_vx_i16m2(vs2, rs1, vl); } -vint16m4_t test_vdiv_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vdiv_vv_i16m4(op1, op2, vl); +vint16m4_t test_vdiv_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vdiv_vv_i16m4(vs2, vs1, vl); } -vint16m4_t test_vdiv_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_vx_i16m4(op1, op2, vl); +vint16m4_t test_vdiv_vx_i16m4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_vx_i16m4(vs2, rs1, vl); } -vint16m8_t test_vdiv_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vdiv_vv_i16m8(op1, op2, vl); +vint16m8_t test_vdiv_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vdiv_vv_i16m8(vs2, vs1, vl); } -vint16m8_t test_vdiv_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_vx_i16m8(op1, op2, vl); +vint16m8_t test_vdiv_vx_i16m8(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_vx_i16m8(vs2, rs1, vl); } -vint32mf2_t test_vdiv_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vdiv_vv_i32mf2(op1, op2, vl); +vint32mf2_t test_vdiv_vv_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vdiv_vv_i32mf2(vs2, vs1, vl); } -vint32mf2_t test_vdiv_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_vx_i32mf2(op1, op2, vl); +vint32mf2_t test_vdiv_vx_i32mf2(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_vx_i32mf2(vs2, rs1, vl); } -vint32m1_t test_vdiv_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vdiv_vv_i32m1(op1, op2, vl); +vint32m1_t test_vdiv_vv_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vdiv_vv_i32m1(vs2, vs1, vl); } -vint32m1_t test_vdiv_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_vx_i32m1(op1, op2, vl); +vint32m1_t test_vdiv_vx_i32m1(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_vx_i32m1(vs2, rs1, vl); } -vint32m2_t test_vdiv_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vdiv_vv_i32m2(op1, op2, vl); +vint32m2_t test_vdiv_vv_i32m2(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vdiv_vv_i32m2(vs2, vs1, vl); } -vint32m2_t test_vdiv_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_vx_i32m2(op1, op2, vl); +vint32m2_t test_vdiv_vx_i32m2(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_vx_i32m2(vs2, rs1, vl); } -vint32m4_t test_vdiv_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vdiv_vv_i32m4(op1, op2, vl); +vint32m4_t test_vdiv_vv_i32m4(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vdiv_vv_i32m4(vs2, vs1, vl); } -vint32m4_t test_vdiv_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_vx_i32m4(op1, op2, vl); +vint32m4_t test_vdiv_vx_i32m4(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_vx_i32m4(vs2, rs1, vl); } -vint32m8_t test_vdiv_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vdiv_vv_i32m8(op1, op2, vl); +vint32m8_t test_vdiv_vv_i32m8(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vdiv_vv_i32m8(vs2, vs1, vl); } -vint32m8_t test_vdiv_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_vx_i32m8(op1, op2, vl); +vint32m8_t test_vdiv_vx_i32m8(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_vx_i32m8(vs2, rs1, vl); } -vint64m1_t test_vdiv_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vdiv_vv_i64m1(op1, op2, vl); +vint64m1_t test_vdiv_vv_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vdiv_vv_i64m1(vs2, vs1, vl); } -vint64m1_t test_vdiv_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv_vx_i64m1(op1, op2, vl); +vint64m1_t test_vdiv_vx_i64m1(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv_vx_i64m1(vs2, rs1, vl); } -vint64m2_t test_vdiv_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vdiv_vv_i64m2(op1, op2, vl); +vint64m2_t test_vdiv_vv_i64m2(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vdiv_vv_i64m2(vs2, vs1, vl); } -vint64m2_t test_vdiv_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv_vx_i64m2(op1, op2, vl); +vint64m2_t test_vdiv_vx_i64m2(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv_vx_i64m2(vs2, rs1, vl); } -vint64m4_t test_vdiv_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vdiv_vv_i64m4(op1, op2, vl); +vint64m4_t test_vdiv_vv_i64m4(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vdiv_vv_i64m4(vs2, vs1, vl); } -vint64m4_t test_vdiv_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv_vx_i64m4(op1, op2, vl); +vint64m4_t test_vdiv_vx_i64m4(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv_vx_i64m4(vs2, rs1, vl); } -vint64m8_t test_vdiv_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vdiv_vv_i64m8(op1, op2, vl); +vint64m8_t test_vdiv_vv_i64m8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vdiv_vv_i64m8(vs2, vs1, vl); } -vint64m8_t test_vdiv_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv_vx_i64m8(op1, op2, vl); +vint64m8_t test_vdiv_vx_i64m8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv_vx_i64m8(vs2, rs1, vl); } -vint8mf8_t test_vdiv_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vdiv_vv_i8mf8_m(mask, op1, op2, vl); +vint8mf8_t test_vdiv_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vdiv_vv_i8mf8_m(vm, vs2, vs1, vl); } -vint8mf8_t test_vdiv_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_vx_i8mf8_m(mask, op1, op2, vl); +vint8mf8_t test_vdiv_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_vx_i8mf8_m(vm, vs2, rs1, vl); } -vint8mf4_t test_vdiv_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vdiv_vv_i8mf4_m(mask, op1, op2, vl); +vint8mf4_t test_vdiv_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vdiv_vv_i8mf4_m(vm, vs2, vs1, vl); } -vint8mf4_t test_vdiv_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_vx_i8mf4_m(mask, op1, op2, vl); +vint8mf4_t test_vdiv_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_vx_i8mf4_m(vm, vs2, rs1, vl); } -vint8mf2_t test_vdiv_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vdiv_vv_i8mf2_m(mask, op1, op2, vl); +vint8mf2_t test_vdiv_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vdiv_vv_i8mf2_m(vm, vs2, vs1, vl); } -vint8mf2_t test_vdiv_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_vx_i8mf2_m(mask, op1, op2, vl); +vint8mf2_t test_vdiv_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_vx_i8mf2_m(vm, vs2, rs1, vl); } -vint8m1_t test_vdiv_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vdiv_vv_i8m1_m(mask, op1, op2, vl); +vint8m1_t test_vdiv_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vdiv_vv_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vdiv_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_vx_i8m1_m(mask, op1, op2, vl); +vint8m1_t test_vdiv_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_vx_i8m1_m(vm, vs2, rs1, vl); } -vint8m2_t test_vdiv_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vdiv_vv_i8m2_m(mask, op1, op2, vl); +vint8m2_t test_vdiv_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vdiv_vv_i8m2_m(vm, vs2, vs1, vl); } -vint8m2_t test_vdiv_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_vx_i8m2_m(mask, op1, op2, vl); +vint8m2_t test_vdiv_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_vx_i8m2_m(vm, vs2, rs1, vl); } -vint8m4_t test_vdiv_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vdiv_vv_i8m4_m(mask, op1, op2, vl); +vint8m4_t test_vdiv_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vdiv_vv_i8m4_m(vm, vs2, vs1, vl); } -vint8m4_t test_vdiv_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_vx_i8m4_m(mask, op1, op2, vl); +vint8m4_t test_vdiv_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_vx_i8m4_m(vm, vs2, rs1, vl); } -vint8m8_t test_vdiv_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vdiv_vv_i8m8_m(mask, op1, op2, vl); +vint8m8_t test_vdiv_vv_i8m8_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vdiv_vv_i8m8_m(vm, vs2, vs1, vl); } -vint8m8_t test_vdiv_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_vx_i8m8_m(mask, op1, op2, vl); +vint8m8_t test_vdiv_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_vx_i8m8_m(vm, vs2, rs1, vl); } -vint16mf4_t test_vdiv_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vdiv_vv_i16mf4_m(mask, op1, op2, vl); +vint16mf4_t test_vdiv_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vdiv_vv_i16mf4_m(vm, vs2, vs1, vl); } -vint16mf4_t test_vdiv_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_vx_i16mf4_m(mask, op1, op2, vl); +vint16mf4_t test_vdiv_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_vx_i16mf4_m(vm, vs2, rs1, vl); } -vint16mf2_t test_vdiv_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vdiv_vv_i16mf2_m(mask, op1, op2, vl); +vint16mf2_t test_vdiv_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vdiv_vv_i16mf2_m(vm, vs2, vs1, vl); } -vint16mf2_t test_vdiv_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_vx_i16mf2_m(mask, op1, op2, vl); +vint16mf2_t test_vdiv_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_vx_i16mf2_m(vm, vs2, rs1, vl); } -vint16m1_t test_vdiv_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vdiv_vv_i16m1_m(mask, op1, op2, vl); +vint16m1_t test_vdiv_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vdiv_vv_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vdiv_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_vx_i16m1_m(mask, op1, op2, vl); +vint16m1_t test_vdiv_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_vx_i16m1_m(vm, vs2, rs1, vl); } -vint16m2_t test_vdiv_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vdiv_vv_i16m2_m(mask, op1, op2, vl); +vint16m2_t test_vdiv_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vdiv_vv_i16m2_m(vm, vs2, vs1, vl); } -vint16m2_t test_vdiv_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_vx_i16m2_m(mask, op1, op2, vl); +vint16m2_t test_vdiv_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_vx_i16m2_m(vm, vs2, rs1, vl); } -vint16m4_t test_vdiv_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vdiv_vv_i16m4_m(mask, op1, op2, vl); +vint16m4_t test_vdiv_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vdiv_vv_i16m4_m(vm, vs2, vs1, vl); } -vint16m4_t test_vdiv_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_vx_i16m4_m(mask, op1, op2, vl); +vint16m4_t test_vdiv_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_vx_i16m4_m(vm, vs2, rs1, vl); } -vint16m8_t test_vdiv_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vdiv_vv_i16m8_m(mask, op1, op2, vl); +vint16m8_t test_vdiv_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vdiv_vv_i16m8_m(vm, vs2, vs1, vl); } -vint16m8_t test_vdiv_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_vx_i16m8_m(mask, op1, op2, vl); +vint16m8_t test_vdiv_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_vx_i16m8_m(vm, vs2, rs1, vl); } -vint32mf2_t test_vdiv_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vdiv_vv_i32mf2_m(mask, op1, op2, vl); +vint32mf2_t test_vdiv_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vdiv_vv_i32mf2_m(vm, vs2, vs1, vl); } -vint32mf2_t test_vdiv_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_vx_i32mf2_m(mask, op1, op2, vl); +vint32mf2_t test_vdiv_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_vx_i32mf2_m(vm, vs2, rs1, vl); } -vint32m1_t test_vdiv_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vdiv_vv_i32m1_m(mask, op1, op2, vl); +vint32m1_t test_vdiv_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vdiv_vv_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vdiv_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_vx_i32m1_m(mask, op1, op2, vl); +vint32m1_t test_vdiv_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_vx_i32m1_m(vm, vs2, rs1, vl); } -vint32m2_t test_vdiv_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vdiv_vv_i32m2_m(mask, op1, op2, vl); +vint32m2_t test_vdiv_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vdiv_vv_i32m2_m(vm, vs2, vs1, vl); } -vint32m2_t test_vdiv_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_vx_i32m2_m(mask, op1, op2, vl); +vint32m2_t test_vdiv_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_vx_i32m2_m(vm, vs2, rs1, vl); } -vint32m4_t test_vdiv_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vdiv_vv_i32m4_m(mask, op1, op2, vl); +vint32m4_t test_vdiv_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vdiv_vv_i32m4_m(vm, vs2, vs1, vl); } -vint32m4_t test_vdiv_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_vx_i32m4_m(mask, op1, op2, vl); +vint32m4_t test_vdiv_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_vx_i32m4_m(vm, vs2, rs1, vl); } -vint32m8_t test_vdiv_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vdiv_vv_i32m8_m(mask, op1, op2, vl); +vint32m8_t test_vdiv_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vdiv_vv_i32m8_m(vm, vs2, vs1, vl); } -vint32m8_t test_vdiv_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_vx_i32m8_m(mask, op1, op2, vl); +vint32m8_t test_vdiv_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_vx_i32m8_m(vm, vs2, rs1, vl); } -vint64m1_t test_vdiv_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vdiv_vv_i64m1_m(mask, op1, op2, vl); +vint64m1_t test_vdiv_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vdiv_vv_i64m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vdiv_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv_vx_i64m1_m(mask, op1, op2, vl); +vint64m1_t test_vdiv_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv_vx_i64m1_m(vm, vs2, rs1, vl); } -vint64m2_t test_vdiv_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vdiv_vv_i64m2_m(mask, op1, op2, vl); +vint64m2_t test_vdiv_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vdiv_vv_i64m2_m(vm, vs2, vs1, vl); } -vint64m2_t test_vdiv_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv_vx_i64m2_m(mask, op1, op2, vl); +vint64m2_t test_vdiv_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv_vx_i64m2_m(vm, vs2, rs1, vl); } -vint64m4_t test_vdiv_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vdiv_vv_i64m4_m(mask, op1, op2, vl); +vint64m4_t test_vdiv_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vdiv_vv_i64m4_m(vm, vs2, vs1, vl); } -vint64m4_t test_vdiv_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv_vx_i64m4_m(mask, op1, op2, vl); +vint64m4_t test_vdiv_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv_vx_i64m4_m(vm, vs2, rs1, vl); } -vint64m8_t test_vdiv_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vdiv_vv_i64m8_m(mask, op1, op2, vl); +vint64m8_t test_vdiv_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vdiv_vv_i64m8_m(vm, vs2, vs1, vl); } -vint64m8_t test_vdiv_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv_vx_i64m8_m(mask, op1, op2, vl); +vint64m8_t test_vdiv_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv_vx_i64m8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vdiv\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-api-tests/vdivu.c b/auto-generated/gnu-api-tests/vdivu.c index fae165002..9b9d7d8f8 100644 --- a/auto-generated/gnu-api-tests/vdivu.c +++ b/auto-generated/gnu-api-tests/vdivu.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vdivu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vdivu_vv_u8mf8(op1, op2, vl); +vuint8mf8_t test_vdivu_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vdivu_vv_u8mf8(vs2, vs1, vl); } -vuint8mf8_t test_vdivu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_vx_u8mf8(op1, op2, vl); +vuint8mf8_t test_vdivu_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_vx_u8mf8(vs2, rs1, vl); } -vuint8mf4_t test_vdivu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vdivu_vv_u8mf4(op1, op2, vl); +vuint8mf4_t test_vdivu_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vdivu_vv_u8mf4(vs2, vs1, vl); } -vuint8mf4_t test_vdivu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_vx_u8mf4(op1, op2, vl); +vuint8mf4_t test_vdivu_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_vx_u8mf4(vs2, rs1, vl); } -vuint8mf2_t test_vdivu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vdivu_vv_u8mf2(op1, op2, vl); +vuint8mf2_t test_vdivu_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vdivu_vv_u8mf2(vs2, vs1, vl); } -vuint8mf2_t test_vdivu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_vx_u8mf2(op1, op2, vl); +vuint8mf2_t test_vdivu_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_vx_u8mf2(vs2, rs1, vl); } -vuint8m1_t test_vdivu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vdivu_vv_u8m1(op1, op2, vl); +vuint8m1_t test_vdivu_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vdivu_vv_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vdivu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_vx_u8m1(op1, op2, vl); +vuint8m1_t test_vdivu_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_vx_u8m1(vs2, rs1, vl); } -vuint8m2_t test_vdivu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vdivu_vv_u8m2(op1, op2, vl); +vuint8m2_t test_vdivu_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vdivu_vv_u8m2(vs2, vs1, vl); } -vuint8m2_t test_vdivu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_vx_u8m2(op1, op2, vl); +vuint8m2_t test_vdivu_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_vx_u8m2(vs2, rs1, vl); } -vuint8m4_t test_vdivu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vdivu_vv_u8m4(op1, op2, vl); +vuint8m4_t test_vdivu_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vdivu_vv_u8m4(vs2, vs1, vl); } -vuint8m4_t test_vdivu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_vx_u8m4(op1, op2, vl); +vuint8m4_t test_vdivu_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_vx_u8m4(vs2, rs1, vl); } -vuint8m8_t test_vdivu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vdivu_vv_u8m8(op1, op2, vl); +vuint8m8_t test_vdivu_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vdivu_vv_u8m8(vs2, vs1, vl); } -vuint8m8_t test_vdivu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_vx_u8m8(op1, op2, vl); +vuint8m8_t test_vdivu_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_vx_u8m8(vs2, rs1, vl); } -vuint16mf4_t test_vdivu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vdivu_vv_u16mf4(op1, op2, vl); +vuint16mf4_t test_vdivu_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vdivu_vv_u16mf4(vs2, vs1, vl); } -vuint16mf4_t test_vdivu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_vx_u16mf4(op1, op2, vl); +vuint16mf4_t test_vdivu_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_vx_u16mf4(vs2, rs1, vl); } -vuint16mf2_t test_vdivu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vdivu_vv_u16mf2(op1, op2, vl); +vuint16mf2_t test_vdivu_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vdivu_vv_u16mf2(vs2, vs1, vl); } -vuint16mf2_t test_vdivu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_vx_u16mf2(op1, op2, vl); +vuint16mf2_t test_vdivu_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_vx_u16mf2(vs2, rs1, vl); } -vuint16m1_t test_vdivu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vdivu_vv_u16m1(op1, op2, vl); +vuint16m1_t test_vdivu_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vdivu_vv_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vdivu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_vx_u16m1(op1, op2, vl); +vuint16m1_t test_vdivu_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_vx_u16m1(vs2, rs1, vl); } -vuint16m2_t test_vdivu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vdivu_vv_u16m2(op1, op2, vl); +vuint16m2_t test_vdivu_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vdivu_vv_u16m2(vs2, vs1, vl); } -vuint16m2_t test_vdivu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_vx_u16m2(op1, op2, vl); +vuint16m2_t test_vdivu_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_vx_u16m2(vs2, rs1, vl); } -vuint16m4_t test_vdivu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vdivu_vv_u16m4(op1, op2, vl); +vuint16m4_t test_vdivu_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vdivu_vv_u16m4(vs2, vs1, vl); } -vuint16m4_t test_vdivu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_vx_u16m4(op1, op2, vl); +vuint16m4_t test_vdivu_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_vx_u16m4(vs2, rs1, vl); } -vuint16m8_t test_vdivu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vdivu_vv_u16m8(op1, op2, vl); +vuint16m8_t test_vdivu_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vdivu_vv_u16m8(vs2, vs1, vl); } -vuint16m8_t test_vdivu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_vx_u16m8(op1, op2, vl); +vuint16m8_t test_vdivu_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_vx_u16m8(vs2, rs1, vl); } -vuint32mf2_t test_vdivu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vdivu_vv_u32mf2(op1, op2, vl); +vuint32mf2_t test_vdivu_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vdivu_vv_u32mf2(vs2, vs1, vl); } -vuint32mf2_t test_vdivu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_vx_u32mf2(op1, op2, vl); +vuint32mf2_t test_vdivu_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_vx_u32mf2(vs2, rs1, vl); } -vuint32m1_t test_vdivu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vdivu_vv_u32m1(op1, op2, vl); +vuint32m1_t test_vdivu_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vdivu_vv_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vdivu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_vx_u32m1(op1, op2, vl); +vuint32m1_t test_vdivu_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_vx_u32m1(vs2, rs1, vl); } -vuint32m2_t test_vdivu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vdivu_vv_u32m2(op1, op2, vl); +vuint32m2_t test_vdivu_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vdivu_vv_u32m2(vs2, vs1, vl); } -vuint32m2_t test_vdivu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_vx_u32m2(op1, op2, vl); +vuint32m2_t test_vdivu_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_vx_u32m2(vs2, rs1, vl); } -vuint32m4_t test_vdivu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vdivu_vv_u32m4(op1, op2, vl); +vuint32m4_t test_vdivu_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vdivu_vv_u32m4(vs2, vs1, vl); } -vuint32m4_t test_vdivu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_vx_u32m4(op1, op2, vl); +vuint32m4_t test_vdivu_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_vx_u32m4(vs2, rs1, vl); } -vuint32m8_t test_vdivu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vdivu_vv_u32m8(op1, op2, vl); +vuint32m8_t test_vdivu_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vdivu_vv_u32m8(vs2, vs1, vl); } -vuint32m8_t test_vdivu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_vx_u32m8(op1, op2, vl); +vuint32m8_t test_vdivu_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_vx_u32m8(vs2, rs1, vl); } -vuint64m1_t test_vdivu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vdivu_vv_u64m1(op1, op2, vl); +vuint64m1_t test_vdivu_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vdivu_vv_u64m1(vs2, vs1, vl); } -vuint64m1_t test_vdivu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu_vx_u64m1(op1, op2, vl); +vuint64m1_t test_vdivu_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu_vx_u64m1(vs2, rs1, vl); } -vuint64m2_t test_vdivu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vdivu_vv_u64m2(op1, op2, vl); +vuint64m2_t test_vdivu_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vdivu_vv_u64m2(vs2, vs1, vl); } -vuint64m2_t test_vdivu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu_vx_u64m2(op1, op2, vl); +vuint64m2_t test_vdivu_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu_vx_u64m2(vs2, rs1, vl); } -vuint64m4_t test_vdivu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vdivu_vv_u64m4(op1, op2, vl); +vuint64m4_t test_vdivu_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vdivu_vv_u64m4(vs2, vs1, vl); } -vuint64m4_t test_vdivu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu_vx_u64m4(op1, op2, vl); +vuint64m4_t test_vdivu_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu_vx_u64m4(vs2, rs1, vl); } -vuint64m8_t test_vdivu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vdivu_vv_u64m8(op1, op2, vl); +vuint64m8_t test_vdivu_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vdivu_vv_u64m8(vs2, vs1, vl); } -vuint64m8_t test_vdivu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu_vx_u64m8(op1, op2, vl); +vuint64m8_t test_vdivu_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu_vx_u64m8(vs2, rs1, vl); } -vuint8mf8_t test_vdivu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vdivu_vv_u8mf8_m(mask, op1, op2, vl); +vuint8mf8_t test_vdivu_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vdivu_vv_u8mf8_m(vm, vs2, vs1, vl); } -vuint8mf8_t test_vdivu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_vx_u8mf8_m(mask, op1, op2, vl); +vuint8mf8_t test_vdivu_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_vx_u8mf8_m(vm, vs2, rs1, vl); } -vuint8mf4_t test_vdivu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vdivu_vv_u8mf4_m(mask, op1, op2, vl); +vuint8mf4_t test_vdivu_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vdivu_vv_u8mf4_m(vm, vs2, vs1, vl); } -vuint8mf4_t test_vdivu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_vx_u8mf4_m(mask, op1, op2, vl); +vuint8mf4_t test_vdivu_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_vx_u8mf4_m(vm, vs2, rs1, vl); } -vuint8mf2_t test_vdivu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vdivu_vv_u8mf2_m(mask, op1, op2, vl); +vuint8mf2_t test_vdivu_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vdivu_vv_u8mf2_m(vm, vs2, vs1, vl); } -vuint8mf2_t test_vdivu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_vx_u8mf2_m(mask, op1, op2, vl); +vuint8mf2_t test_vdivu_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_vx_u8mf2_m(vm, vs2, rs1, vl); } -vuint8m1_t test_vdivu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vdivu_vv_u8m1_m(mask, op1, op2, vl); +vuint8m1_t test_vdivu_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vdivu_vv_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vdivu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_vx_u8m1_m(mask, op1, op2, vl); +vuint8m1_t test_vdivu_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_vx_u8m1_m(vm, vs2, rs1, vl); } -vuint8m2_t test_vdivu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vdivu_vv_u8m2_m(mask, op1, op2, vl); +vuint8m2_t test_vdivu_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vdivu_vv_u8m2_m(vm, vs2, vs1, vl); } -vuint8m2_t test_vdivu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_vx_u8m2_m(mask, op1, op2, vl); +vuint8m2_t test_vdivu_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_vx_u8m2_m(vm, vs2, rs1, vl); } -vuint8m4_t test_vdivu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vdivu_vv_u8m4_m(mask, op1, op2, vl); +vuint8m4_t test_vdivu_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vdivu_vv_u8m4_m(vm, vs2, vs1, vl); } -vuint8m4_t test_vdivu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_vx_u8m4_m(mask, op1, op2, vl); +vuint8m4_t test_vdivu_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_vx_u8m4_m(vm, vs2, rs1, vl); } -vuint8m8_t test_vdivu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vdivu_vv_u8m8_m(mask, op1, op2, vl); +vuint8m8_t test_vdivu_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vdivu_vv_u8m8_m(vm, vs2, vs1, vl); } -vuint8m8_t test_vdivu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_vx_u8m8_m(mask, op1, op2, vl); +vuint8m8_t test_vdivu_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_vx_u8m8_m(vm, vs2, rs1, vl); } -vuint16mf4_t test_vdivu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vdivu_vv_u16mf4_m(mask, op1, op2, vl); +vuint16mf4_t test_vdivu_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vdivu_vv_u16mf4_m(vm, vs2, vs1, vl); } -vuint16mf4_t test_vdivu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_vx_u16mf4_m(mask, op1, op2, vl); +vuint16mf4_t test_vdivu_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_vx_u16mf4_m(vm, vs2, rs1, vl); } -vuint16mf2_t test_vdivu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vdivu_vv_u16mf2_m(mask, op1, op2, vl); +vuint16mf2_t test_vdivu_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vdivu_vv_u16mf2_m(vm, vs2, vs1, vl); } -vuint16mf2_t test_vdivu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_vx_u16mf2_m(mask, op1, op2, vl); +vuint16mf2_t test_vdivu_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_vx_u16mf2_m(vm, vs2, rs1, vl); } -vuint16m1_t test_vdivu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vdivu_vv_u16m1_m(mask, op1, op2, vl); +vuint16m1_t test_vdivu_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vdivu_vv_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vdivu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_vx_u16m1_m(mask, op1, op2, vl); +vuint16m1_t test_vdivu_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_vx_u16m1_m(vm, vs2, rs1, vl); } -vuint16m2_t test_vdivu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vdivu_vv_u16m2_m(mask, op1, op2, vl); +vuint16m2_t test_vdivu_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vdivu_vv_u16m2_m(vm, vs2, vs1, vl); } -vuint16m2_t test_vdivu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_vx_u16m2_m(mask, op1, op2, vl); +vuint16m2_t test_vdivu_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_vx_u16m2_m(vm, vs2, rs1, vl); } -vuint16m4_t test_vdivu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vdivu_vv_u16m4_m(mask, op1, op2, vl); +vuint16m4_t test_vdivu_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vdivu_vv_u16m4_m(vm, vs2, vs1, vl); } -vuint16m4_t test_vdivu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_vx_u16m4_m(mask, op1, op2, vl); +vuint16m4_t test_vdivu_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_vx_u16m4_m(vm, vs2, rs1, vl); } -vuint16m8_t test_vdivu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vdivu_vv_u16m8_m(mask, op1, op2, vl); +vuint16m8_t test_vdivu_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vdivu_vv_u16m8_m(vm, vs2, vs1, vl); } -vuint16m8_t test_vdivu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_vx_u16m8_m(mask, op1, op2, vl); +vuint16m8_t test_vdivu_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_vx_u16m8_m(vm, vs2, rs1, vl); } -vuint32mf2_t test_vdivu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vdivu_vv_u32mf2_m(mask, op1, op2, vl); +vuint32mf2_t test_vdivu_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vdivu_vv_u32mf2_m(vm, vs2, vs1, vl); } -vuint32mf2_t test_vdivu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_vx_u32mf2_m(mask, op1, op2, vl); +vuint32mf2_t test_vdivu_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_vx_u32mf2_m(vm, vs2, rs1, vl); } -vuint32m1_t test_vdivu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vdivu_vv_u32m1_m(mask, op1, op2, vl); +vuint32m1_t test_vdivu_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vdivu_vv_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vdivu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_vx_u32m1_m(mask, op1, op2, vl); +vuint32m1_t test_vdivu_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_vx_u32m1_m(vm, vs2, rs1, vl); } -vuint32m2_t test_vdivu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vdivu_vv_u32m2_m(mask, op1, op2, vl); +vuint32m2_t test_vdivu_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vdivu_vv_u32m2_m(vm, vs2, vs1, vl); } -vuint32m2_t test_vdivu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_vx_u32m2_m(mask, op1, op2, vl); +vuint32m2_t test_vdivu_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_vx_u32m2_m(vm, vs2, rs1, vl); } -vuint32m4_t test_vdivu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vdivu_vv_u32m4_m(mask, op1, op2, vl); +vuint32m4_t test_vdivu_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vdivu_vv_u32m4_m(vm, vs2, vs1, vl); } -vuint32m4_t test_vdivu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_vx_u32m4_m(mask, op1, op2, vl); +vuint32m4_t test_vdivu_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_vx_u32m4_m(vm, vs2, rs1, vl); } -vuint32m8_t test_vdivu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vdivu_vv_u32m8_m(mask, op1, op2, vl); +vuint32m8_t test_vdivu_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vdivu_vv_u32m8_m(vm, vs2, vs1, vl); } -vuint32m8_t test_vdivu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_vx_u32m8_m(mask, op1, op2, vl); +vuint32m8_t test_vdivu_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_vx_u32m8_m(vm, vs2, rs1, vl); } -vuint64m1_t test_vdivu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vdivu_vv_u64m1_m(mask, op1, op2, vl); +vuint64m1_t test_vdivu_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vdivu_vv_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vdivu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu_vx_u64m1_m(mask, op1, op2, vl); +vuint64m1_t test_vdivu_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu_vx_u64m1_m(vm, vs2, rs1, vl); } -vuint64m2_t test_vdivu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vdivu_vv_u64m2_m(mask, op1, op2, vl); +vuint64m2_t test_vdivu_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vdivu_vv_u64m2_m(vm, vs2, vs1, vl); } -vuint64m2_t test_vdivu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu_vx_u64m2_m(mask, op1, op2, vl); +vuint64m2_t test_vdivu_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu_vx_u64m2_m(vm, vs2, rs1, vl); } -vuint64m4_t test_vdivu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vdivu_vv_u64m4_m(mask, op1, op2, vl); +vuint64m4_t test_vdivu_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vdivu_vv_u64m4_m(vm, vs2, vs1, vl); } -vuint64m4_t test_vdivu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu_vx_u64m4_m(mask, op1, op2, vl); +vuint64m4_t test_vdivu_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu_vx_u64m4_m(vm, vs2, rs1, vl); } -vuint64m8_t test_vdivu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vdivu_vv_u64m8_m(mask, op1, op2, vl); +vuint64m8_t test_vdivu_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vdivu_vv_u64m8_m(vm, vs2, vs1, vl); } -vuint64m8_t test_vdivu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu_vx_u64m8_m(mask, op1, op2, vl); +vuint64m8_t test_vdivu_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu_vx_u64m8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vdivu\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-api-tests/vfabs.c b/auto-generated/gnu-api-tests/vfabs.c index 3615f4757..636fb594a 100644 --- a/auto-generated/gnu-api-tests/vfabs.c +++ b/auto-generated/gnu-api-tests/vfabs.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfabs_v_f16mf4(vfloat16mf4_t op1, size_t vl) { - return __riscv_vfabs_v_f16mf4(op1, vl); +vfloat16mf4_t test_vfabs_v_f16mf4(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfabs_v_f16mf4(vs2, vl); } -vfloat16mf2_t test_vfabs_v_f16mf2(vfloat16mf2_t op1, size_t vl) { - return __riscv_vfabs_v_f16mf2(op1, vl); +vfloat16mf2_t test_vfabs_v_f16mf2(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfabs_v_f16mf2(vs2, vl); } -vfloat16m1_t test_vfabs_v_f16m1(vfloat16m1_t op1, size_t vl) { - return __riscv_vfabs_v_f16m1(op1, vl); +vfloat16m1_t test_vfabs_v_f16m1(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfabs_v_f16m1(vs2, vl); } -vfloat16m2_t test_vfabs_v_f16m2(vfloat16m2_t op1, size_t vl) { - return __riscv_vfabs_v_f16m2(op1, vl); +vfloat16m2_t test_vfabs_v_f16m2(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfabs_v_f16m2(vs2, vl); } -vfloat16m4_t test_vfabs_v_f16m4(vfloat16m4_t op1, size_t vl) { - return __riscv_vfabs_v_f16m4(op1, vl); +vfloat16m4_t test_vfabs_v_f16m4(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfabs_v_f16m4(vs2, vl); } -vfloat16m8_t test_vfabs_v_f16m8(vfloat16m8_t op1, size_t vl) { - return __riscv_vfabs_v_f16m8(op1, vl); +vfloat16m8_t test_vfabs_v_f16m8(vfloat16m8_t vs2, size_t vl) { + return __riscv_vfabs_v_f16m8(vs2, vl); } -vfloat32mf2_t test_vfabs_v_f32mf2(vfloat32mf2_t op1, size_t vl) { - return __riscv_vfabs_v_f32mf2(op1, vl); +vfloat32mf2_t test_vfabs_v_f32mf2(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfabs_v_f32mf2(vs2, vl); } -vfloat32m1_t test_vfabs_v_f32m1(vfloat32m1_t op1, size_t vl) { - return __riscv_vfabs_v_f32m1(op1, vl); +vfloat32m1_t test_vfabs_v_f32m1(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfabs_v_f32m1(vs2, vl); } -vfloat32m2_t test_vfabs_v_f32m2(vfloat32m2_t op1, size_t vl) { - return __riscv_vfabs_v_f32m2(op1, vl); +vfloat32m2_t test_vfabs_v_f32m2(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfabs_v_f32m2(vs2, vl); } -vfloat32m4_t test_vfabs_v_f32m4(vfloat32m4_t op1, size_t vl) { - return __riscv_vfabs_v_f32m4(op1, vl); +vfloat32m4_t test_vfabs_v_f32m4(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfabs_v_f32m4(vs2, vl); } -vfloat32m8_t test_vfabs_v_f32m8(vfloat32m8_t op1, size_t vl) { - return __riscv_vfabs_v_f32m8(op1, vl); +vfloat32m8_t test_vfabs_v_f32m8(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfabs_v_f32m8(vs2, vl); } -vfloat64m1_t test_vfabs_v_f64m1(vfloat64m1_t op1, size_t vl) { - return __riscv_vfabs_v_f64m1(op1, vl); +vfloat64m1_t test_vfabs_v_f64m1(vfloat64m1_t vs2, size_t vl) { + return __riscv_vfabs_v_f64m1(vs2, vl); } -vfloat64m2_t test_vfabs_v_f64m2(vfloat64m2_t op1, size_t vl) { - return __riscv_vfabs_v_f64m2(op1, vl); +vfloat64m2_t test_vfabs_v_f64m2(vfloat64m2_t vs2, size_t vl) { + return __riscv_vfabs_v_f64m2(vs2, vl); } -vfloat64m4_t test_vfabs_v_f64m4(vfloat64m4_t op1, size_t vl) { - return __riscv_vfabs_v_f64m4(op1, vl); +vfloat64m4_t test_vfabs_v_f64m4(vfloat64m4_t vs2, size_t vl) { + return __riscv_vfabs_v_f64m4(vs2, vl); } -vfloat64m8_t test_vfabs_v_f64m8(vfloat64m8_t op1, size_t vl) { - return __riscv_vfabs_v_f64m8(op1, vl); +vfloat64m8_t test_vfabs_v_f64m8(vfloat64m8_t vs2, size_t vl) { + return __riscv_vfabs_v_f64m8(vs2, vl); } -vfloat16mf4_t test_vfabs_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfabs_v_f16mf4_m(mask, op1, vl); +vfloat16mf4_t test_vfabs_v_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfabs_v_f16mf4_m(vm, vs2, vl); } -vfloat16mf2_t test_vfabs_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfabs_v_f16mf2_m(mask, op1, vl); +vfloat16mf2_t test_vfabs_v_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfabs_v_f16mf2_m(vm, vs2, vl); } -vfloat16m1_t test_vfabs_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) { - return __riscv_vfabs_v_f16m1_m(mask, op1, vl); +vfloat16m1_t test_vfabs_v_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfabs_v_f16m1_m(vm, vs2, vl); } -vfloat16m2_t test_vfabs_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) { - return __riscv_vfabs_v_f16m2_m(mask, op1, vl); +vfloat16m2_t test_vfabs_v_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfabs_v_f16m2_m(vm, vs2, vl); } -vfloat16m4_t test_vfabs_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) { - return __riscv_vfabs_v_f16m4_m(mask, op1, vl); +vfloat16m4_t test_vfabs_v_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfabs_v_f16m4_m(vm, vs2, vl); } -vfloat16m8_t test_vfabs_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) { - return __riscv_vfabs_v_f16m8_m(mask, op1, vl); +vfloat16m8_t test_vfabs_v_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfabs_v_f16m8_m(vm, vs2, vl); } -vfloat32mf2_t test_vfabs_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfabs_v_f32mf2_m(mask, op1, vl); +vfloat32mf2_t test_vfabs_v_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfabs_v_f32mf2_m(vm, vs2, vl); } -vfloat32m1_t test_vfabs_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) { - return __riscv_vfabs_v_f32m1_m(mask, op1, vl); +vfloat32m1_t test_vfabs_v_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfabs_v_f32m1_m(vm, vs2, vl); } -vfloat32m2_t test_vfabs_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) { - return __riscv_vfabs_v_f32m2_m(mask, op1, vl); +vfloat32m2_t test_vfabs_v_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfabs_v_f32m2_m(vm, vs2, vl); } -vfloat32m4_t test_vfabs_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) { - return __riscv_vfabs_v_f32m4_m(mask, op1, vl); +vfloat32m4_t test_vfabs_v_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfabs_v_f32m4_m(vm, vs2, vl); } -vfloat32m8_t test_vfabs_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) { - return __riscv_vfabs_v_f32m8_m(mask, op1, vl); +vfloat32m8_t test_vfabs_v_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfabs_v_f32m8_m(vm, vs2, vl); } -vfloat64m1_t test_vfabs_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) { - return __riscv_vfabs_v_f64m1_m(mask, op1, vl); +vfloat64m1_t test_vfabs_v_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfabs_v_f64m1_m(vm, vs2, vl); } -vfloat64m2_t test_vfabs_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) { - return __riscv_vfabs_v_f64m2_m(mask, op1, vl); +vfloat64m2_t test_vfabs_v_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfabs_v_f64m2_m(vm, vs2, vl); } -vfloat64m4_t test_vfabs_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) { - return __riscv_vfabs_v_f64m4_m(mask, op1, vl); +vfloat64m4_t test_vfabs_v_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfabs_v_f64m4_m(vm, vs2, vl); } -vfloat64m8_t test_vfabs_v_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) { - return __riscv_vfabs_v_f64m8_m(mask, op1, vl); +vfloat64m8_t test_vfabs_v_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfabs_v_f64m8_m(vm, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfabs\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/gnu-api-tests/vfadd.c b/auto-generated/gnu-api-tests/vfadd.c index dc04eb83b..1b1ae56bf 100644 --- a/auto-generated/gnu-api-tests/vfadd.c +++ b/auto-generated/gnu-api-tests/vfadd.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfadd_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfadd_vv_f16mf4(op1, op2, vl); +vfloat16mf4_t test_vfadd_vv_f16mf4(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16mf4(vs2, vs1, vl); } -vfloat16mf4_t test_vfadd_vf_f16mf4(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16mf4(op1, op2, vl); +vfloat16mf4_t test_vfadd_vf_f16mf4(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16mf4(vs2, rs1, vl); } -vfloat16mf2_t test_vfadd_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfadd_vv_f16mf2(op1, op2, vl); +vfloat16mf2_t test_vfadd_vv_f16mf2(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16mf2(vs2, vs1, vl); } -vfloat16mf2_t test_vfadd_vf_f16mf2(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16mf2(op1, op2, vl); +vfloat16mf2_t test_vfadd_vf_f16mf2(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16mf2(vs2, rs1, vl); } -vfloat16m1_t test_vfadd_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m1(op1, op2, vl); +vfloat16m1_t test_vfadd_vv_f16m1(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m1(vs2, vs1, vl); } -vfloat16m1_t test_vfadd_vf_f16m1(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m1(op1, op2, vl); +vfloat16m1_t test_vfadd_vf_f16m1(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m1(vs2, rs1, vl); } -vfloat16m2_t test_vfadd_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m2(op1, op2, vl); +vfloat16m2_t test_vfadd_vv_f16m2(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m2(vs2, vs1, vl); } -vfloat16m2_t test_vfadd_vf_f16m2(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m2(op1, op2, vl); +vfloat16m2_t test_vfadd_vf_f16m2(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m2(vs2, rs1, vl); } -vfloat16m4_t test_vfadd_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m4(op1, op2, vl); +vfloat16m4_t test_vfadd_vv_f16m4(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m4(vs2, vs1, vl); } -vfloat16m4_t test_vfadd_vf_f16m4(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m4(op1, op2, vl); +vfloat16m4_t test_vfadd_vf_f16m4(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m4(vs2, rs1, vl); } -vfloat16m8_t test_vfadd_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m8(op1, op2, vl); +vfloat16m8_t test_vfadd_vv_f16m8(vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m8(vs2, vs1, vl); } -vfloat16m8_t test_vfadd_vf_f16m8(vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m8(op1, op2, vl); +vfloat16m8_t test_vfadd_vf_f16m8(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m8(vs2, rs1, vl); } -vfloat32mf2_t test_vfadd_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfadd_vv_f32mf2(op1, op2, vl); +vfloat32mf2_t test_vfadd_vv_f32mf2(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32mf2(vs2, vs1, vl); } -vfloat32mf2_t test_vfadd_vf_f32mf2(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32mf2(op1, op2, vl); +vfloat32mf2_t test_vfadd_vf_f32mf2(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32mf2(vs2, rs1, vl); } -vfloat32m1_t test_vfadd_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m1(op1, op2, vl); +vfloat32m1_t test_vfadd_vv_f32m1(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m1(vs2, vs1, vl); } -vfloat32m1_t test_vfadd_vf_f32m1(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m1(op1, op2, vl); +vfloat32m1_t test_vfadd_vf_f32m1(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m1(vs2, rs1, vl); } -vfloat32m2_t test_vfadd_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m2(op1, op2, vl); +vfloat32m2_t test_vfadd_vv_f32m2(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m2(vs2, vs1, vl); } -vfloat32m2_t test_vfadd_vf_f32m2(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m2(op1, op2, vl); +vfloat32m2_t test_vfadd_vf_f32m2(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m2(vs2, rs1, vl); } -vfloat32m4_t test_vfadd_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m4(op1, op2, vl); +vfloat32m4_t test_vfadd_vv_f32m4(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m4(vs2, vs1, vl); } -vfloat32m4_t test_vfadd_vf_f32m4(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m4(op1, op2, vl); +vfloat32m4_t test_vfadd_vf_f32m4(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m4(vs2, rs1, vl); } -vfloat32m8_t test_vfadd_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m8(op1, op2, vl); +vfloat32m8_t test_vfadd_vv_f32m8(vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m8(vs2, vs1, vl); } -vfloat32m8_t test_vfadd_vf_f32m8(vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m8(op1, op2, vl); +vfloat32m8_t test_vfadd_vf_f32m8(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m8(vs2, rs1, vl); } -vfloat64m1_t test_vfadd_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m1(op1, op2, vl); +vfloat64m1_t test_vfadd_vv_f64m1(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m1(vs2, vs1, vl); } -vfloat64m1_t test_vfadd_vf_f64m1(vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m1(op1, op2, vl); +vfloat64m1_t test_vfadd_vf_f64m1(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m1(vs2, rs1, vl); } -vfloat64m2_t test_vfadd_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m2(op1, op2, vl); +vfloat64m2_t test_vfadd_vv_f64m2(vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m2(vs2, vs1, vl); } -vfloat64m2_t test_vfadd_vf_f64m2(vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m2(op1, op2, vl); +vfloat64m2_t test_vfadd_vf_f64m2(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m2(vs2, rs1, vl); } -vfloat64m4_t test_vfadd_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m4(op1, op2, vl); +vfloat64m4_t test_vfadd_vv_f64m4(vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m4(vs2, vs1, vl); } -vfloat64m4_t test_vfadd_vf_f64m4(vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m4(op1, op2, vl); +vfloat64m4_t test_vfadd_vf_f64m4(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m4(vs2, rs1, vl); } -vfloat64m8_t test_vfadd_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m8(op1, op2, vl); +vfloat64m8_t test_vfadd_vv_f64m8(vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m8(vs2, vs1, vl); } -vfloat64m8_t test_vfadd_vf_f64m8(vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m8(op1, op2, vl); +vfloat64m8_t test_vfadd_vf_f64m8(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m8(vs2, rs1, vl); } -vfloat16mf4_t test_vfadd_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfadd_vv_f16mf4_m(mask, op1, op2, vl); +vfloat16mf4_t test_vfadd_vv_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16mf4_m(vm, vs2, vs1, vl); } -vfloat16mf4_t test_vfadd_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16mf4_m(mask, op1, op2, vl); +vfloat16mf4_t test_vfadd_vf_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16mf4_m(vm, vs2, rs1, vl); } -vfloat16mf2_t test_vfadd_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfadd_vv_f16mf2_m(mask, op1, op2, vl); +vfloat16mf2_t test_vfadd_vv_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16mf2_m(vm, vs2, vs1, vl); } -vfloat16mf2_t test_vfadd_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16mf2_m(mask, op1, op2, vl); +vfloat16mf2_t test_vfadd_vf_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16mf2_m(vm, vs2, rs1, vl); } -vfloat16m1_t test_vfadd_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m1_m(mask, op1, op2, vl); +vfloat16m1_t test_vfadd_vv_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m1_m(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfadd_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m1_m(mask, op1, op2, vl); +vfloat16m1_t test_vfadd_vf_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m1_m(vm, vs2, rs1, vl); } -vfloat16m2_t test_vfadd_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m2_m(mask, op1, op2, vl); +vfloat16m2_t test_vfadd_vv_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m2_m(vm, vs2, vs1, vl); } -vfloat16m2_t test_vfadd_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m2_m(mask, op1, op2, vl); +vfloat16m2_t test_vfadd_vf_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m2_m(vm, vs2, rs1, vl); } -vfloat16m4_t test_vfadd_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m4_m(mask, op1, op2, vl); +vfloat16m4_t test_vfadd_vv_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m4_m(vm, vs2, vs1, vl); } -vfloat16m4_t test_vfadd_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m4_m(mask, op1, op2, vl); +vfloat16m4_t test_vfadd_vf_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m4_m(vm, vs2, rs1, vl); } -vfloat16m8_t test_vfadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m8_m(mask, op1, op2, vl); +vfloat16m8_t test_vfadd_vv_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m8_m(vm, vs2, vs1, vl); } -vfloat16m8_t test_vfadd_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m8_m(mask, op1, op2, vl); +vfloat16m8_t test_vfadd_vf_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m8_m(vm, vs2, rs1, vl); } -vfloat32mf2_t test_vfadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfadd_vv_f32mf2_m(mask, op1, op2, vl); +vfloat32mf2_t test_vfadd_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32mf2_m(vm, vs2, vs1, vl); } -vfloat32mf2_t test_vfadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32mf2_m(mask, op1, op2, vl); +vfloat32mf2_t test_vfadd_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32mf2_m(vm, vs2, rs1, vl); } -vfloat32m1_t test_vfadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfadd_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m1_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfadd_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m1_m(vm, vs2, rs1, vl); } -vfloat32m2_t test_vfadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfadd_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m2_m(vm, vs2, vs1, vl); } -vfloat32m2_t test_vfadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfadd_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m2_m(vm, vs2, rs1, vl); } -vfloat32m4_t test_vfadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfadd_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m4_m(vm, vs2, vs1, vl); } -vfloat32m4_t test_vfadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfadd_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m4_m(vm, vs2, rs1, vl); } -vfloat32m8_t test_vfadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfadd_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m8_m(vm, vs2, vs1, vl); } -vfloat32m8_t test_vfadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfadd_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m8_m(vm, vs2, rs1, vl); } -vfloat64m1_t test_vfadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfadd_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m1_m(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfadd_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m1_m(vm, vs2, rs1, vl); } -vfloat64m2_t test_vfadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfadd_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m2_m(vm, vs2, vs1, vl); } -vfloat64m2_t test_vfadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfadd_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m2_m(vm, vs2, rs1, vl); } -vfloat64m4_t test_vfadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfadd_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m4_m(vm, vs2, vs1, vl); } -vfloat64m4_t test_vfadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfadd_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m4_m(vm, vs2, rs1, vl); } -vfloat64m8_t test_vfadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfadd_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m8_m(vm, vs2, vs1, vl); } -vfloat64m8_t test_vfadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfadd_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m8_m(vm, vs2, rs1, vl); } -vfloat16mf4_t test_vfadd_vv_f16mf4_rm(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfadd_vv_f16mf4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfadd_vv_f16mf4_rm(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16mf4_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfadd_vf_f16mf4_rm(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16mf4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfadd_vf_f16mf4_rm(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16mf4_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfadd_vv_f16mf2_rm(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfadd_vv_f16mf2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfadd_vv_f16mf2_rm(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16mf2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfadd_vf_f16mf2_rm(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16mf2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfadd_vf_f16mf2_rm(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16mf2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfadd_vv_f16m1_rm(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfadd_vv_f16m1_rm(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfadd_vf_f16m1_rm(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfadd_vf_f16m1_rm(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfadd_vv_f16m2_rm(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfadd_vv_f16m2_rm(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfadd_vf_f16m2_rm(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfadd_vf_f16m2_rm(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfadd_vv_f16m4_rm(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfadd_vv_f16m4_rm(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfadd_vf_f16m4_rm(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfadd_vf_f16m4_rm(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfadd_vv_f16m8_rm(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfadd_vv_f16m8_rm(vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfadd_vf_f16m8_rm(vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfadd_vf_f16m8_rm(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfadd_vv_f32mf2_rm(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfadd_vv_f32mf2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfadd_vv_f32mf2_rm(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32mf2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfadd_vf_f32mf2_rm(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32mf2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfadd_vf_f32mf2_rm(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32mf2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfadd_vv_f32m1_rm(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfadd_vv_f32m1_rm(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfadd_vf_f32m1_rm(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfadd_vf_f32m1_rm(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfadd_vv_f32m2_rm(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfadd_vv_f32m2_rm(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfadd_vf_f32m2_rm(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfadd_vf_f32m2_rm(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfadd_vv_f32m4_rm(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfadd_vv_f32m4_rm(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfadd_vf_f32m4_rm(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfadd_vf_f32m4_rm(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfadd_vv_f32m8_rm(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfadd_vv_f32m8_rm(vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfadd_vf_f32m8_rm(vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfadd_vf_f32m8_rm(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfadd_vv_f64m1_rm(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfadd_vv_f64m1_rm(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfadd_vf_f64m1_rm(vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfadd_vf_f64m1_rm(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfadd_vv_f64m2_rm(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfadd_vv_f64m2_rm(vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfadd_vf_f64m2_rm(vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfadd_vf_f64m2_rm(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfadd_vv_f64m4_rm(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfadd_vv_f64m4_rm(vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfadd_vf_f64m4_rm(vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfadd_vf_f64m4_rm(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfadd_vv_f64m8_rm(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfadd_vv_f64m8_rm(vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfadd_vf_f64m8_rm(vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfadd_vf_f64m8_rm(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfadd_vv_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfadd_vv_f16mf4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfadd_vv_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16mf4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfadd_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16mf4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfadd_vf_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16mf4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfadd_vv_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfadd_vv_f16mf2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfadd_vv_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16mf2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfadd_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16mf2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfadd_vf_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16mf2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfadd_vv_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfadd_vv_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfadd_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfadd_vf_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfadd_vv_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfadd_vv_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfadd_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfadd_vf_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfadd_vv_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfadd_vv_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfadd_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfadd_vf_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfadd_vv_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfadd_vv_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfadd_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfadd_vf_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfadd_vv_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfadd_vv_f32mf2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfadd_vv_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32mf2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfadd_vf_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32mf2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfadd_vf_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32mf2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfadd_vv_f32m1_rm_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfadd_vv_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfadd_vf_f32m1_rm_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfadd_vf_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfadd_vv_f32m2_rm_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfadd_vv_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfadd_vf_f32m2_rm_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfadd_vf_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfadd_vv_f32m4_rm_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfadd_vv_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfadd_vf_f32m4_rm_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfadd_vf_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfadd_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfadd_vv_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfadd_vf_f32m8_rm_m(vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfadd_vf_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfadd_vv_f64m1_rm_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfadd_vv_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfadd_vf_f64m1_rm_m(vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfadd_vf_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfadd_vv_f64m2_rm_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfadd_vv_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfadd_vf_f64m2_rm_m(vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfadd_vf_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfadd_vv_f64m4_rm_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfadd_vv_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfadd_vf_f64m4_rm_m(vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfadd_vf_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfadd_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfadd_vv_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfadd_vf_f64m8_rm_m(vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfadd_vf_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfadd\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/gnu-api-tests/vfclass.c b/auto-generated/gnu-api-tests/vfclass.c index dcece85ed..887b83e83 100644 --- a/auto-generated/gnu-api-tests/vfclass.c +++ b/auto-generated/gnu-api-tests/vfclass.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint16mf4_t test_vfclass_v_u16mf4(vfloat16mf4_t op1, size_t vl) { - return __riscv_vfclass_v_u16mf4(op1, vl); +vuint16mf4_t test_vfclass_v_u16mf4(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfclass_v_u16mf4(vs2, vl); } -vuint16mf2_t test_vfclass_v_u16mf2(vfloat16mf2_t op1, size_t vl) { - return __riscv_vfclass_v_u16mf2(op1, vl); +vuint16mf2_t test_vfclass_v_u16mf2(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfclass_v_u16mf2(vs2, vl); } -vuint16m1_t test_vfclass_v_u16m1(vfloat16m1_t op1, size_t vl) { - return __riscv_vfclass_v_u16m1(op1, vl); +vuint16m1_t test_vfclass_v_u16m1(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfclass_v_u16m1(vs2, vl); } -vuint16m2_t test_vfclass_v_u16m2(vfloat16m2_t op1, size_t vl) { - return __riscv_vfclass_v_u16m2(op1, vl); +vuint16m2_t test_vfclass_v_u16m2(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfclass_v_u16m2(vs2, vl); } -vuint16m4_t test_vfclass_v_u16m4(vfloat16m4_t op1, size_t vl) { - return __riscv_vfclass_v_u16m4(op1, vl); +vuint16m4_t test_vfclass_v_u16m4(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfclass_v_u16m4(vs2, vl); } -vuint16m8_t test_vfclass_v_u16m8(vfloat16m8_t op1, size_t vl) { - return __riscv_vfclass_v_u16m8(op1, vl); +vuint16m8_t test_vfclass_v_u16m8(vfloat16m8_t vs2, size_t vl) { + return __riscv_vfclass_v_u16m8(vs2, vl); } -vuint32mf2_t test_vfclass_v_u32mf2(vfloat32mf2_t op1, size_t vl) { - return __riscv_vfclass_v_u32mf2(op1, vl); +vuint32mf2_t test_vfclass_v_u32mf2(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfclass_v_u32mf2(vs2, vl); } -vuint32m1_t test_vfclass_v_u32m1(vfloat32m1_t op1, size_t vl) { - return __riscv_vfclass_v_u32m1(op1, vl); +vuint32m1_t test_vfclass_v_u32m1(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfclass_v_u32m1(vs2, vl); } -vuint32m2_t test_vfclass_v_u32m2(vfloat32m2_t op1, size_t vl) { - return __riscv_vfclass_v_u32m2(op1, vl); +vuint32m2_t test_vfclass_v_u32m2(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfclass_v_u32m2(vs2, vl); } -vuint32m4_t test_vfclass_v_u32m4(vfloat32m4_t op1, size_t vl) { - return __riscv_vfclass_v_u32m4(op1, vl); +vuint32m4_t test_vfclass_v_u32m4(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfclass_v_u32m4(vs2, vl); } -vuint32m8_t test_vfclass_v_u32m8(vfloat32m8_t op1, size_t vl) { - return __riscv_vfclass_v_u32m8(op1, vl); +vuint32m8_t test_vfclass_v_u32m8(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfclass_v_u32m8(vs2, vl); } -vuint64m1_t test_vfclass_v_u64m1(vfloat64m1_t op1, size_t vl) { - return __riscv_vfclass_v_u64m1(op1, vl); +vuint64m1_t test_vfclass_v_u64m1(vfloat64m1_t vs2, size_t vl) { + return __riscv_vfclass_v_u64m1(vs2, vl); } -vuint64m2_t test_vfclass_v_u64m2(vfloat64m2_t op1, size_t vl) { - return __riscv_vfclass_v_u64m2(op1, vl); +vuint64m2_t test_vfclass_v_u64m2(vfloat64m2_t vs2, size_t vl) { + return __riscv_vfclass_v_u64m2(vs2, vl); } -vuint64m4_t test_vfclass_v_u64m4(vfloat64m4_t op1, size_t vl) { - return __riscv_vfclass_v_u64m4(op1, vl); +vuint64m4_t test_vfclass_v_u64m4(vfloat64m4_t vs2, size_t vl) { + return __riscv_vfclass_v_u64m4(vs2, vl); } -vuint64m8_t test_vfclass_v_u64m8(vfloat64m8_t op1, size_t vl) { - return __riscv_vfclass_v_u64m8(op1, vl); +vuint64m8_t test_vfclass_v_u64m8(vfloat64m8_t vs2, size_t vl) { + return __riscv_vfclass_v_u64m8(vs2, vl); } -vuint16mf4_t test_vfclass_v_u16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfclass_v_u16mf4_m(mask, op1, vl); +vuint16mf4_t test_vfclass_v_u16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfclass_v_u16mf4_m(vm, vs2, vl); } -vuint16mf2_t test_vfclass_v_u16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfclass_v_u16mf2_m(mask, op1, vl); +vuint16mf2_t test_vfclass_v_u16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfclass_v_u16mf2_m(vm, vs2, vl); } -vuint16m1_t test_vfclass_v_u16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) { - return __riscv_vfclass_v_u16m1_m(mask, op1, vl); +vuint16m1_t test_vfclass_v_u16m1_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfclass_v_u16m1_m(vm, vs2, vl); } -vuint16m2_t test_vfclass_v_u16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) { - return __riscv_vfclass_v_u16m2_m(mask, op1, vl); +vuint16m2_t test_vfclass_v_u16m2_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfclass_v_u16m2_m(vm, vs2, vl); } -vuint16m4_t test_vfclass_v_u16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) { - return __riscv_vfclass_v_u16m4_m(mask, op1, vl); +vuint16m4_t test_vfclass_v_u16m4_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfclass_v_u16m4_m(vm, vs2, vl); } -vuint16m8_t test_vfclass_v_u16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) { - return __riscv_vfclass_v_u16m8_m(mask, op1, vl); +vuint16m8_t test_vfclass_v_u16m8_m(vbool2_t vm, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfclass_v_u16m8_m(vm, vs2, vl); } -vuint32mf2_t test_vfclass_v_u32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfclass_v_u32mf2_m(mask, op1, vl); +vuint32mf2_t test_vfclass_v_u32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfclass_v_u32mf2_m(vm, vs2, vl); } -vuint32m1_t test_vfclass_v_u32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) { - return __riscv_vfclass_v_u32m1_m(mask, op1, vl); +vuint32m1_t test_vfclass_v_u32m1_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfclass_v_u32m1_m(vm, vs2, vl); } -vuint32m2_t test_vfclass_v_u32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) { - return __riscv_vfclass_v_u32m2_m(mask, op1, vl); +vuint32m2_t test_vfclass_v_u32m2_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfclass_v_u32m2_m(vm, vs2, vl); } -vuint32m4_t test_vfclass_v_u32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) { - return __riscv_vfclass_v_u32m4_m(mask, op1, vl); +vuint32m4_t test_vfclass_v_u32m4_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfclass_v_u32m4_m(vm, vs2, vl); } -vuint32m8_t test_vfclass_v_u32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) { - return __riscv_vfclass_v_u32m8_m(mask, op1, vl); +vuint32m8_t test_vfclass_v_u32m8_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfclass_v_u32m8_m(vm, vs2, vl); } -vuint64m1_t test_vfclass_v_u64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) { - return __riscv_vfclass_v_u64m1_m(mask, op1, vl); +vuint64m1_t test_vfclass_v_u64m1_m(vbool64_t vm, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfclass_v_u64m1_m(vm, vs2, vl); } -vuint64m2_t test_vfclass_v_u64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) { - return __riscv_vfclass_v_u64m2_m(mask, op1, vl); +vuint64m2_t test_vfclass_v_u64m2_m(vbool32_t vm, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfclass_v_u64m2_m(vm, vs2, vl); } -vuint64m4_t test_vfclass_v_u64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) { - return __riscv_vfclass_v_u64m4_m(mask, op1, vl); +vuint64m4_t test_vfclass_v_u64m4_m(vbool16_t vm, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfclass_v_u64m4_m(vm, vs2, vl); } -vuint64m8_t test_vfclass_v_u64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) { - return __riscv_vfclass_v_u64m8_m(mask, op1, vl); +vuint64m8_t test_vfclass_v_u64m8_m(vbool8_t vm, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfclass_v_u64m8_m(vm, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfclass\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/gnu-api-tests/vfcvt.c b/auto-generated/gnu-api-tests/vfcvt.c index 892d77896..2c1643b95 100644 --- a/auto-generated/gnu-api-tests/vfcvt.c +++ b/auto-generated/gnu-api-tests/vfcvt.c @@ -6,964 +6,964 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint16mf4_t test_vfcvt_x_f_v_i16mf4(vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16mf4(src, vl); +vint16mf4_t test_vfcvt_x_f_v_i16mf4(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16mf4(vs2, vl); } -vint16mf2_t test_vfcvt_x_f_v_i16mf2(vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16mf2(src, vl); +vint16mf2_t test_vfcvt_x_f_v_i16mf2(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16mf2(vs2, vl); } -vint16m1_t test_vfcvt_x_f_v_i16m1(vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m1(src, vl); +vint16m1_t test_vfcvt_x_f_v_i16m1(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m1(vs2, vl); } -vint16m2_t test_vfcvt_x_f_v_i16m2(vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m2(src, vl); +vint16m2_t test_vfcvt_x_f_v_i16m2(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m2(vs2, vl); } -vint16m4_t test_vfcvt_x_f_v_i16m4(vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m4(src, vl); +vint16m4_t test_vfcvt_x_f_v_i16m4(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m4(vs2, vl); } -vint16m8_t test_vfcvt_x_f_v_i16m8(vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m8(src, vl); +vint16m8_t test_vfcvt_x_f_v_i16m8(vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m8(vs2, vl); } -vuint16mf4_t test_vfcvt_xu_f_v_u16mf4(vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16mf4(src, vl); +vuint16mf4_t test_vfcvt_xu_f_v_u16mf4(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16mf4(vs2, vl); } -vuint16mf2_t test_vfcvt_xu_f_v_u16mf2(vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16mf2(src, vl); +vuint16mf2_t test_vfcvt_xu_f_v_u16mf2(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16mf2(vs2, vl); } -vuint16m1_t test_vfcvt_xu_f_v_u16m1(vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m1(src, vl); +vuint16m1_t test_vfcvt_xu_f_v_u16m1(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m1(vs2, vl); } -vuint16m2_t test_vfcvt_xu_f_v_u16m2(vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m2(src, vl); +vuint16m2_t test_vfcvt_xu_f_v_u16m2(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m2(vs2, vl); } -vuint16m4_t test_vfcvt_xu_f_v_u16m4(vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m4(src, vl); +vuint16m4_t test_vfcvt_xu_f_v_u16m4(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m4(vs2, vl); } -vuint16m8_t test_vfcvt_xu_f_v_u16m8(vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m8(src, vl); +vuint16m8_t test_vfcvt_xu_f_v_u16m8(vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m8(vs2, vl); } -vfloat16mf4_t test_vfcvt_f_x_v_f16mf4(vint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16mf4(src, vl); +vfloat16mf4_t test_vfcvt_f_x_v_f16mf4(vint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16mf4(vs2, vl); } -vfloat16mf2_t test_vfcvt_f_x_v_f16mf2(vint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16mf2(src, vl); +vfloat16mf2_t test_vfcvt_f_x_v_f16mf2(vint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16mf2(vs2, vl); } -vfloat16m1_t test_vfcvt_f_x_v_f16m1(vint16m1_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m1(src, vl); +vfloat16m1_t test_vfcvt_f_x_v_f16m1(vint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m1(vs2, vl); } -vfloat16m2_t test_vfcvt_f_x_v_f16m2(vint16m2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m2(src, vl); +vfloat16m2_t test_vfcvt_f_x_v_f16m2(vint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m2(vs2, vl); } -vfloat16m4_t test_vfcvt_f_x_v_f16m4(vint16m4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m4(src, vl); +vfloat16m4_t test_vfcvt_f_x_v_f16m4(vint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m4(vs2, vl); } -vfloat16m8_t test_vfcvt_f_x_v_f16m8(vint16m8_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m8(src, vl); +vfloat16m8_t test_vfcvt_f_x_v_f16m8(vint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m8(vs2, vl); } -vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4(vuint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16mf4(src, vl); +vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16mf4(vs2, vl); } -vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2(vuint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16mf2(src, vl); +vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16mf2(vs2, vl); } -vfloat16m1_t test_vfcvt_f_xu_v_f16m1(vuint16m1_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m1(src, vl); +vfloat16m1_t test_vfcvt_f_xu_v_f16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m1(vs2, vl); } -vfloat16m2_t test_vfcvt_f_xu_v_f16m2(vuint16m2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m2(src, vl); +vfloat16m2_t test_vfcvt_f_xu_v_f16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m2(vs2, vl); } -vfloat16m4_t test_vfcvt_f_xu_v_f16m4(vuint16m4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m4(src, vl); +vfloat16m4_t test_vfcvt_f_xu_v_f16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m4(vs2, vl); } -vfloat16m8_t test_vfcvt_f_xu_v_f16m8(vuint16m8_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m8(src, vl); +vfloat16m8_t test_vfcvt_f_xu_v_f16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m8(vs2, vl); } -vint32mf2_t test_vfcvt_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32mf2(src, vl); +vint32mf2_t test_vfcvt_x_f_v_i32mf2(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32mf2(vs2, vl); } -vint32m1_t test_vfcvt_x_f_v_i32m1(vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m1(src, vl); +vint32m1_t test_vfcvt_x_f_v_i32m1(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m1(vs2, vl); } -vint32m2_t test_vfcvt_x_f_v_i32m2(vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m2(src, vl); +vint32m2_t test_vfcvt_x_f_v_i32m2(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m2(vs2, vl); } -vint32m4_t test_vfcvt_x_f_v_i32m4(vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m4(src, vl); +vint32m4_t test_vfcvt_x_f_v_i32m4(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m4(vs2, vl); } -vint32m8_t test_vfcvt_x_f_v_i32m8(vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m8(src, vl); +vint32m8_t test_vfcvt_x_f_v_i32m8(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m8(vs2, vl); } -vuint32mf2_t test_vfcvt_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32mf2(src, vl); +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32mf2(vs2, vl); } -vuint32m1_t test_vfcvt_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m1(src, vl); +vuint32m1_t test_vfcvt_xu_f_v_u32m1(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m1(vs2, vl); } -vuint32m2_t test_vfcvt_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m2(src, vl); +vuint32m2_t test_vfcvt_xu_f_v_u32m2(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m2(vs2, vl); } -vuint32m4_t test_vfcvt_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m4(src, vl); +vuint32m4_t test_vfcvt_xu_f_v_u32m4(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m4(vs2, vl); } -vuint32m8_t test_vfcvt_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m8(src, vl); +vuint32m8_t test_vfcvt_xu_f_v_u32m8(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m8(vs2, vl); } -vfloat32mf2_t test_vfcvt_f_x_v_f32mf2(vint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32mf2(src, vl); +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2(vint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32mf2(vs2, vl); } -vfloat32m1_t test_vfcvt_f_x_v_f32m1(vint32m1_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m1(src, vl); +vfloat32m1_t test_vfcvt_f_x_v_f32m1(vint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m1(vs2, vl); } -vfloat32m2_t test_vfcvt_f_x_v_f32m2(vint32m2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m2(src, vl); +vfloat32m2_t test_vfcvt_f_x_v_f32m2(vint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m2(vs2, vl); } -vfloat32m4_t test_vfcvt_f_x_v_f32m4(vint32m4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m4(src, vl); +vfloat32m4_t test_vfcvt_f_x_v_f32m4(vint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m4(vs2, vl); } -vfloat32m8_t test_vfcvt_f_x_v_f32m8(vint32m8_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m8(src, vl); +vfloat32m8_t test_vfcvt_f_x_v_f32m8(vint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m8(vs2, vl); } -vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2(vuint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32mf2(src, vl); +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32mf2(vs2, vl); } -vfloat32m1_t test_vfcvt_f_xu_v_f32m1(vuint32m1_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m1(src, vl); +vfloat32m1_t test_vfcvt_f_xu_v_f32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m1(vs2, vl); } -vfloat32m2_t test_vfcvt_f_xu_v_f32m2(vuint32m2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m2(src, vl); +vfloat32m2_t test_vfcvt_f_xu_v_f32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m2(vs2, vl); } -vfloat32m4_t test_vfcvt_f_xu_v_f32m4(vuint32m4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m4(src, vl); +vfloat32m4_t test_vfcvt_f_xu_v_f32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m4(vs2, vl); } -vfloat32m8_t test_vfcvt_f_xu_v_f32m8(vuint32m8_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m8(src, vl); +vfloat32m8_t test_vfcvt_f_xu_v_f32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m8(vs2, vl); } -vint64m1_t test_vfcvt_x_f_v_i64m1(vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m1(src, vl); +vint64m1_t test_vfcvt_x_f_v_i64m1(vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m1(vs2, vl); } -vint64m2_t test_vfcvt_x_f_v_i64m2(vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m2(src, vl); +vint64m2_t test_vfcvt_x_f_v_i64m2(vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m2(vs2, vl); } -vint64m4_t test_vfcvt_x_f_v_i64m4(vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m4(src, vl); +vint64m4_t test_vfcvt_x_f_v_i64m4(vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m4(vs2, vl); } -vint64m8_t test_vfcvt_x_f_v_i64m8(vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m8(src, vl); +vint64m8_t test_vfcvt_x_f_v_i64m8(vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m8(vs2, vl); } -vuint64m1_t test_vfcvt_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m1(src, vl); +vuint64m1_t test_vfcvt_xu_f_v_u64m1(vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m1(vs2, vl); } -vuint64m2_t test_vfcvt_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m2(src, vl); +vuint64m2_t test_vfcvt_xu_f_v_u64m2(vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m2(vs2, vl); } -vuint64m4_t test_vfcvt_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m4(src, vl); +vuint64m4_t test_vfcvt_xu_f_v_u64m4(vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m4(vs2, vl); } -vuint64m8_t test_vfcvt_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m8(src, vl); +vuint64m8_t test_vfcvt_xu_f_v_u64m8(vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m8(vs2, vl); } -vfloat64m1_t test_vfcvt_f_x_v_f64m1(vint64m1_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m1(src, vl); +vfloat64m1_t test_vfcvt_f_x_v_f64m1(vint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m1(vs2, vl); } -vfloat64m2_t test_vfcvt_f_x_v_f64m2(vint64m2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m2(src, vl); +vfloat64m2_t test_vfcvt_f_x_v_f64m2(vint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m2(vs2, vl); } -vfloat64m4_t test_vfcvt_f_x_v_f64m4(vint64m4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m4(src, vl); +vfloat64m4_t test_vfcvt_f_x_v_f64m4(vint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m4(vs2, vl); } -vfloat64m8_t test_vfcvt_f_x_v_f64m8(vint64m8_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m8(src, vl); +vfloat64m8_t test_vfcvt_f_x_v_f64m8(vint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m8(vs2, vl); } -vfloat64m1_t test_vfcvt_f_xu_v_f64m1(vuint64m1_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m1(src, vl); +vfloat64m1_t test_vfcvt_f_xu_v_f64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m1(vs2, vl); } -vfloat64m2_t test_vfcvt_f_xu_v_f64m2(vuint64m2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m2(src, vl); +vfloat64m2_t test_vfcvt_f_xu_v_f64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m2(vs2, vl); } -vfloat64m4_t test_vfcvt_f_xu_v_f64m4(vuint64m4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m4(src, vl); +vfloat64m4_t test_vfcvt_f_xu_v_f64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m4(vs2, vl); } -vfloat64m8_t test_vfcvt_f_xu_v_f64m8(vuint64m8_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m8(src, vl); +vfloat64m8_t test_vfcvt_f_xu_v_f64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m8(vs2, vl); } -vint16mf4_t test_vfcvt_x_f_v_i16mf4_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16mf4_m(mask, src, vl); +vint16mf4_t test_vfcvt_x_f_v_i16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16mf4_m(vm, vs2, vl); } -vint16mf2_t test_vfcvt_x_f_v_i16mf2_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16mf2_m(mask, src, vl); +vint16mf2_t test_vfcvt_x_f_v_i16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16mf2_m(vm, vs2, vl); } -vint16m1_t test_vfcvt_x_f_v_i16m1_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m1_m(mask, src, vl); +vint16m1_t test_vfcvt_x_f_v_i16m1_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m1_m(vm, vs2, vl); } -vint16m2_t test_vfcvt_x_f_v_i16m2_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m2_m(mask, src, vl); +vint16m2_t test_vfcvt_x_f_v_i16m2_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m2_m(vm, vs2, vl); } -vint16m4_t test_vfcvt_x_f_v_i16m4_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m4_m(mask, src, vl); +vint16m4_t test_vfcvt_x_f_v_i16m4_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m4_m(vm, vs2, vl); } -vint16m8_t test_vfcvt_x_f_v_i16m8_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m8_m(mask, src, vl); +vint16m8_t test_vfcvt_x_f_v_i16m8_m(vbool2_t vm, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m8_m(vm, vs2, vl); } -vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16mf4_m(mask, src, vl); +vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16mf4_m(vm, vs2, vl); } -vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16mf2_m(mask, src, vl); +vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16mf2_m(vm, vs2, vl); } -vuint16m1_t test_vfcvt_xu_f_v_u16m1_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m1_m(mask, src, vl); +vuint16m1_t test_vfcvt_xu_f_v_u16m1_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m1_m(vm, vs2, vl); } -vuint16m2_t test_vfcvt_xu_f_v_u16m2_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m2_m(mask, src, vl); +vuint16m2_t test_vfcvt_xu_f_v_u16m2_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m2_m(vm, vs2, vl); } -vuint16m4_t test_vfcvt_xu_f_v_u16m4_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m4_m(mask, src, vl); +vuint16m4_t test_vfcvt_xu_f_v_u16m4_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m4_m(vm, vs2, vl); } -vuint16m8_t test_vfcvt_xu_f_v_u16m8_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m8_m(mask, src, vl); +vuint16m8_t test_vfcvt_xu_f_v_u16m8_m(vbool2_t vm, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m8_m(vm, vs2, vl); } -vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_m(vbool64_t mask, vint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16mf4_m(mask, src, vl); +vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_m(vbool64_t vm, vint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16mf4_m(vm, vs2, vl); } -vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_m(vbool32_t mask, vint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16mf2_m(mask, src, vl); +vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_m(vbool32_t vm, vint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16mf2_m(vm, vs2, vl); } -vfloat16m1_t test_vfcvt_f_x_v_f16m1_m(vbool16_t mask, vint16m1_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m1_m(mask, src, vl); +vfloat16m1_t test_vfcvt_f_x_v_f16m1_m(vbool16_t vm, vint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m1_m(vm, vs2, vl); } -vfloat16m2_t test_vfcvt_f_x_v_f16m2_m(vbool8_t mask, vint16m2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m2_m(mask, src, vl); +vfloat16m2_t test_vfcvt_f_x_v_f16m2_m(vbool8_t vm, vint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m2_m(vm, vs2, vl); } -vfloat16m4_t test_vfcvt_f_x_v_f16m4_m(vbool4_t mask, vint16m4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m4_m(mask, src, vl); +vfloat16m4_t test_vfcvt_f_x_v_f16m4_m(vbool4_t vm, vint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m4_m(vm, vs2, vl); } -vfloat16m8_t test_vfcvt_f_x_v_f16m8_m(vbool2_t mask, vint16m8_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m8_m(mask, src, vl); +vfloat16m8_t test_vfcvt_f_x_v_f16m8_m(vbool2_t vm, vint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m8_m(vm, vs2, vl); } -vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_m(vbool64_t mask, vuint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16mf4_m(mask, src, vl); +vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16mf4_m(vm, vs2, vl); } -vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_m(vbool32_t mask, vuint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16mf2_m(mask, src, vl); +vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16mf2_m(vm, vs2, vl); } -vfloat16m1_t test_vfcvt_f_xu_v_f16m1_m(vbool16_t mask, vuint16m1_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m1_m(mask, src, vl); +vfloat16m1_t test_vfcvt_f_xu_v_f16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m1_m(vm, vs2, vl); } -vfloat16m2_t test_vfcvt_f_xu_v_f16m2_m(vbool8_t mask, vuint16m2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m2_m(mask, src, vl); +vfloat16m2_t test_vfcvt_f_xu_v_f16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m2_m(vm, vs2, vl); } -vfloat16m4_t test_vfcvt_f_xu_v_f16m4_m(vbool4_t mask, vuint16m4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m4_m(mask, src, vl); +vfloat16m4_t test_vfcvt_f_xu_v_f16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m4_m(vm, vs2, vl); } -vfloat16m8_t test_vfcvt_f_xu_v_f16m8_m(vbool2_t mask, vuint16m8_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m8_m(mask, src, vl); +vfloat16m8_t test_vfcvt_f_xu_v_f16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m8_m(vm, vs2, vl); } -vint32mf2_t test_vfcvt_x_f_v_i32mf2_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32mf2_m(mask, src, vl); +vint32mf2_t test_vfcvt_x_f_v_i32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32mf2_m(vm, vs2, vl); } -vint32m1_t test_vfcvt_x_f_v_i32m1_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m1_m(mask, src, vl); +vint32m1_t test_vfcvt_x_f_v_i32m1_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m1_m(vm, vs2, vl); } -vint32m2_t test_vfcvt_x_f_v_i32m2_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m2_m(mask, src, vl); +vint32m2_t test_vfcvt_x_f_v_i32m2_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m2_m(vm, vs2, vl); } -vint32m4_t test_vfcvt_x_f_v_i32m4_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m4_m(mask, src, vl); +vint32m4_t test_vfcvt_x_f_v_i32m4_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m4_m(vm, vs2, vl); } -vint32m8_t test_vfcvt_x_f_v_i32m8_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m8_m(mask, src, vl); +vint32m8_t test_vfcvt_x_f_v_i32m8_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m8_m(vm, vs2, vl); } -vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32mf2_m(mask, src, vl); +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32mf2_m(vm, vs2, vl); } -vuint32m1_t test_vfcvt_xu_f_v_u32m1_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m1_m(mask, src, vl); +vuint32m1_t test_vfcvt_xu_f_v_u32m1_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m1_m(vm, vs2, vl); } -vuint32m2_t test_vfcvt_xu_f_v_u32m2_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m2_m(mask, src, vl); +vuint32m2_t test_vfcvt_xu_f_v_u32m2_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m2_m(vm, vs2, vl); } -vuint32m4_t test_vfcvt_xu_f_v_u32m4_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m4_m(mask, src, vl); +vuint32m4_t test_vfcvt_xu_f_v_u32m4_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m4_m(vm, vs2, vl); } -vuint32m8_t test_vfcvt_xu_f_v_u32m8_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m8_m(mask, src, vl); +vuint32m8_t test_vfcvt_xu_f_v_u32m8_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m8_m(vm, vs2, vl); } -vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_m(vbool64_t mask, vint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32mf2_m(mask, src, vl); +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_m(vbool64_t vm, vint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32mf2_m(vm, vs2, vl); } -vfloat32m1_t test_vfcvt_f_x_v_f32m1_m(vbool32_t mask, vint32m1_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m1_m(mask, src, vl); +vfloat32m1_t test_vfcvt_f_x_v_f32m1_m(vbool32_t vm, vint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m1_m(vm, vs2, vl); } -vfloat32m2_t test_vfcvt_f_x_v_f32m2_m(vbool16_t mask, vint32m2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m2_m(mask, src, vl); +vfloat32m2_t test_vfcvt_f_x_v_f32m2_m(vbool16_t vm, vint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m2_m(vm, vs2, vl); } -vfloat32m4_t test_vfcvt_f_x_v_f32m4_m(vbool8_t mask, vint32m4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m4_m(mask, src, vl); +vfloat32m4_t test_vfcvt_f_x_v_f32m4_m(vbool8_t vm, vint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m4_m(vm, vs2, vl); } -vfloat32m8_t test_vfcvt_f_x_v_f32m8_m(vbool4_t mask, vint32m8_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m8_m(mask, src, vl); +vfloat32m8_t test_vfcvt_f_x_v_f32m8_m(vbool4_t vm, vint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m8_m(vm, vs2, vl); } -vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_m(vbool64_t mask, vuint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32mf2_m(mask, src, vl); +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32mf2_m(vm, vs2, vl); } -vfloat32m1_t test_vfcvt_f_xu_v_f32m1_m(vbool32_t mask, vuint32m1_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m1_m(mask, src, vl); +vfloat32m1_t test_vfcvt_f_xu_v_f32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m1_m(vm, vs2, vl); } -vfloat32m2_t test_vfcvt_f_xu_v_f32m2_m(vbool16_t mask, vuint32m2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m2_m(mask, src, vl); +vfloat32m2_t test_vfcvt_f_xu_v_f32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m2_m(vm, vs2, vl); } -vfloat32m4_t test_vfcvt_f_xu_v_f32m4_m(vbool8_t mask, vuint32m4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m4_m(mask, src, vl); +vfloat32m4_t test_vfcvt_f_xu_v_f32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m4_m(vm, vs2, vl); } -vfloat32m8_t test_vfcvt_f_xu_v_f32m8_m(vbool4_t mask, vuint32m8_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m8_m(mask, src, vl); +vfloat32m8_t test_vfcvt_f_xu_v_f32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m8_m(vm, vs2, vl); } -vint64m1_t test_vfcvt_x_f_v_i64m1_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m1_m(mask, src, vl); +vint64m1_t test_vfcvt_x_f_v_i64m1_m(vbool64_t vm, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m1_m(vm, vs2, vl); } -vint64m2_t test_vfcvt_x_f_v_i64m2_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m2_m(mask, src, vl); +vint64m2_t test_vfcvt_x_f_v_i64m2_m(vbool32_t vm, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m2_m(vm, vs2, vl); } -vint64m4_t test_vfcvt_x_f_v_i64m4_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m4_m(mask, src, vl); +vint64m4_t test_vfcvt_x_f_v_i64m4_m(vbool16_t vm, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m4_m(vm, vs2, vl); } -vint64m8_t test_vfcvt_x_f_v_i64m8_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m8_m(mask, src, vl); +vint64m8_t test_vfcvt_x_f_v_i64m8_m(vbool8_t vm, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m8_m(vm, vs2, vl); } -vuint64m1_t test_vfcvt_xu_f_v_u64m1_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m1_m(mask, src, vl); +vuint64m1_t test_vfcvt_xu_f_v_u64m1_m(vbool64_t vm, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m1_m(vm, vs2, vl); } -vuint64m2_t test_vfcvt_xu_f_v_u64m2_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m2_m(mask, src, vl); +vuint64m2_t test_vfcvt_xu_f_v_u64m2_m(vbool32_t vm, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m2_m(vm, vs2, vl); } -vuint64m4_t test_vfcvt_xu_f_v_u64m4_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m4_m(mask, src, vl); +vuint64m4_t test_vfcvt_xu_f_v_u64m4_m(vbool16_t vm, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m4_m(vm, vs2, vl); } -vuint64m8_t test_vfcvt_xu_f_v_u64m8_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m8_m(mask, src, vl); +vuint64m8_t test_vfcvt_xu_f_v_u64m8_m(vbool8_t vm, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m8_m(vm, vs2, vl); } -vfloat64m1_t test_vfcvt_f_x_v_f64m1_m(vbool64_t mask, vint64m1_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m1_m(mask, src, vl); +vfloat64m1_t test_vfcvt_f_x_v_f64m1_m(vbool64_t vm, vint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m1_m(vm, vs2, vl); } -vfloat64m2_t test_vfcvt_f_x_v_f64m2_m(vbool32_t mask, vint64m2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m2_m(mask, src, vl); +vfloat64m2_t test_vfcvt_f_x_v_f64m2_m(vbool32_t vm, vint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m2_m(vm, vs2, vl); } -vfloat64m4_t test_vfcvt_f_x_v_f64m4_m(vbool16_t mask, vint64m4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m4_m(mask, src, vl); +vfloat64m4_t test_vfcvt_f_x_v_f64m4_m(vbool16_t vm, vint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m4_m(vm, vs2, vl); } -vfloat64m8_t test_vfcvt_f_x_v_f64m8_m(vbool8_t mask, vint64m8_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m8_m(mask, src, vl); +vfloat64m8_t test_vfcvt_f_x_v_f64m8_m(vbool8_t vm, vint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m8_m(vm, vs2, vl); } -vfloat64m1_t test_vfcvt_f_xu_v_f64m1_m(vbool64_t mask, vuint64m1_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m1_m(mask, src, vl); +vfloat64m1_t test_vfcvt_f_xu_v_f64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m1_m(vm, vs2, vl); } -vfloat64m2_t test_vfcvt_f_xu_v_f64m2_m(vbool32_t mask, vuint64m2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m2_m(mask, src, vl); +vfloat64m2_t test_vfcvt_f_xu_v_f64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m2_m(vm, vs2, vl); } -vfloat64m4_t test_vfcvt_f_xu_v_f64m4_m(vbool16_t mask, vuint64m4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m4_m(mask, src, vl); +vfloat64m4_t test_vfcvt_f_xu_v_f64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m4_m(vm, vs2, vl); } -vfloat64m8_t test_vfcvt_f_xu_v_f64m8_m(vbool8_t mask, vuint64m8_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m8_m(mask, src, vl); +vfloat64m8_t test_vfcvt_f_xu_v_f64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m8_m(vm, vs2, vl); } -vint16mf4_t test_vfcvt_x_f_v_i16mf4_rm(vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16mf4_rm(src, __RISCV_FRM_RNE, vl); +vint16mf4_t test_vfcvt_x_f_v_i16mf4_rm(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16mf4_rm(vs2, __RISCV_FRM_RNE, vl); } -vint16mf2_t test_vfcvt_x_f_v_i16mf2_rm(vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16mf2_rm(src, __RISCV_FRM_RNE, vl); +vint16mf2_t test_vfcvt_x_f_v_i16mf2_rm(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16mf2_rm(vs2, __RISCV_FRM_RNE, vl); } -vint16m1_t test_vfcvt_x_f_v_i16m1_rm(vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m1_rm(src, __RISCV_FRM_RNE, vl); +vint16m1_t test_vfcvt_x_f_v_i16m1_rm(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m1_rm(vs2, __RISCV_FRM_RNE, vl); } -vint16m2_t test_vfcvt_x_f_v_i16m2_rm(vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m2_rm(src, __RISCV_FRM_RNE, vl); +vint16m2_t test_vfcvt_x_f_v_i16m2_rm(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m2_rm(vs2, __RISCV_FRM_RNE, vl); } -vint16m4_t test_vfcvt_x_f_v_i16m4_rm(vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m4_rm(src, __RISCV_FRM_RNE, vl); +vint16m4_t test_vfcvt_x_f_v_i16m4_rm(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m4_rm(vs2, __RISCV_FRM_RNE, vl); } -vint16m8_t test_vfcvt_x_f_v_i16m8_rm(vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m8_rm(src, __RISCV_FRM_RNE, vl); +vint16m8_t test_vfcvt_x_f_v_i16m8_rm(vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m8_rm(vs2, __RISCV_FRM_RNE, vl); } -vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_rm(vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16mf4_rm(src, __RISCV_FRM_RNE, vl); +vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_rm(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16mf4_rm(vs2, __RISCV_FRM_RNE, vl); } -vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_rm(vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16mf2_rm(src, __RISCV_FRM_RNE, vl); +vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_rm(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16mf2_rm(vs2, __RISCV_FRM_RNE, vl); } -vuint16m1_t test_vfcvt_xu_f_v_u16m1_rm(vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m1_rm(src, __RISCV_FRM_RNE, vl); +vuint16m1_t test_vfcvt_xu_f_v_u16m1_rm(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m1_rm(vs2, __RISCV_FRM_RNE, vl); } -vuint16m2_t test_vfcvt_xu_f_v_u16m2_rm(vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m2_rm(src, __RISCV_FRM_RNE, vl); +vuint16m2_t test_vfcvt_xu_f_v_u16m2_rm(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m2_rm(vs2, __RISCV_FRM_RNE, vl); } -vuint16m4_t test_vfcvt_xu_f_v_u16m4_rm(vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m4_rm(src, __RISCV_FRM_RNE, vl); +vuint16m4_t test_vfcvt_xu_f_v_u16m4_rm(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m4_rm(vs2, __RISCV_FRM_RNE, vl); } -vuint16m8_t test_vfcvt_xu_f_v_u16m8_rm(vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m8_rm(src, __RISCV_FRM_RNE, vl); +vuint16m8_t test_vfcvt_xu_f_v_u16m8_rm(vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m8_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_rm(vint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16mf4_rm(src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_rm(vint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16mf4_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_rm(vint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16mf2_rm(src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_rm(vint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16mf2_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfcvt_f_x_v_f16m1_rm(vint16m1_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m1_rm(src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfcvt_f_x_v_f16m1_rm(vint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m1_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfcvt_f_x_v_f16m2_rm(vint16m2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m2_rm(src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfcvt_f_x_v_f16m2_rm(vint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m2_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfcvt_f_x_v_f16m4_rm(vint16m4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m4_rm(src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfcvt_f_x_v_f16m4_rm(vint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m4_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfcvt_f_x_v_f16m8_rm(vint16m8_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m8_rm(src, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfcvt_f_x_v_f16m8_rm(vint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m8_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_rm(vuint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16mf4_rm(src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_rm(vuint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16mf4_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_rm(vuint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16mf2_rm(src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_rm(vuint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16mf2_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfcvt_f_xu_v_f16m1_rm(vuint16m1_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m1_rm(src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfcvt_f_xu_v_f16m1_rm(vuint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m1_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfcvt_f_xu_v_f16m2_rm(vuint16m2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m2_rm(src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfcvt_f_xu_v_f16m2_rm(vuint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m2_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfcvt_f_xu_v_f16m4_rm(vuint16m4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m4_rm(src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfcvt_f_xu_v_f16m4_rm(vuint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m4_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfcvt_f_xu_v_f16m8_rm(vuint16m8_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m8_rm(src, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfcvt_f_xu_v_f16m8_rm(vuint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m8_rm(vs2, __RISCV_FRM_RNE, vl); } -vint32mf2_t test_vfcvt_x_f_v_i32mf2_rm(vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32mf2_rm(src, __RISCV_FRM_RNE, vl); +vint32mf2_t test_vfcvt_x_f_v_i32mf2_rm(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32mf2_rm(vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfcvt_x_f_v_i32m1_rm(vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m1_rm(src, __RISCV_FRM_RNE, vl); +vint32m1_t test_vfcvt_x_f_v_i32m1_rm(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m1_rm(vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfcvt_x_f_v_i32m2_rm(vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m2_rm(src, __RISCV_FRM_RNE, vl); +vint32m2_t test_vfcvt_x_f_v_i32m2_rm(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m2_rm(vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfcvt_x_f_v_i32m4_rm(vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m4_rm(src, __RISCV_FRM_RNE, vl); +vint32m4_t test_vfcvt_x_f_v_i32m4_rm(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m4_rm(vs2, __RISCV_FRM_RNE, vl); } -vint32m8_t test_vfcvt_x_f_v_i32m8_rm(vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m8_rm(src, __RISCV_FRM_RNE, vl); +vint32m8_t test_vfcvt_x_f_v_i32m8_rm(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m8_rm(vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_rm(vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32mf2_rm(src, __RISCV_FRM_RNE, vl); +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_rm(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32mf2_rm(vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfcvt_xu_f_v_u32m1_rm(vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m1_rm(src, __RISCV_FRM_RNE, vl); +vuint32m1_t test_vfcvt_xu_f_v_u32m1_rm(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m1_rm(vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfcvt_xu_f_v_u32m2_rm(vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m2_rm(src, __RISCV_FRM_RNE, vl); +vuint32m2_t test_vfcvt_xu_f_v_u32m2_rm(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m2_rm(vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfcvt_xu_f_v_u32m4_rm(vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m4_rm(src, __RISCV_FRM_RNE, vl); +vuint32m4_t test_vfcvt_xu_f_v_u32m4_rm(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m4_rm(vs2, __RISCV_FRM_RNE, vl); } -vuint32m8_t test_vfcvt_xu_f_v_u32m8_rm(vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m8_rm(src, __RISCV_FRM_RNE, vl); +vuint32m8_t test_vfcvt_xu_f_v_u32m8_rm(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m8_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_rm(vint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32mf2_rm(src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_rm(vint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32mf2_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfcvt_f_x_v_f32m1_rm(vint32m1_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m1_rm(src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfcvt_f_x_v_f32m1_rm(vint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m1_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfcvt_f_x_v_f32m2_rm(vint32m2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m2_rm(src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfcvt_f_x_v_f32m2_rm(vint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m2_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfcvt_f_x_v_f32m4_rm(vint32m4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m4_rm(src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfcvt_f_x_v_f32m4_rm(vint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m4_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfcvt_f_x_v_f32m8_rm(vint32m8_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m8_rm(src, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfcvt_f_x_v_f32m8_rm(vint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m8_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_rm(vuint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32mf2_rm(src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_rm(vuint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32mf2_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfcvt_f_xu_v_f32m1_rm(vuint32m1_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m1_rm(src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfcvt_f_xu_v_f32m1_rm(vuint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m1_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfcvt_f_xu_v_f32m2_rm(vuint32m2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m2_rm(src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfcvt_f_xu_v_f32m2_rm(vuint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m2_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfcvt_f_xu_v_f32m4_rm(vuint32m4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m4_rm(src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfcvt_f_xu_v_f32m4_rm(vuint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m4_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfcvt_f_xu_v_f32m8_rm(vuint32m8_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m8_rm(src, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfcvt_f_xu_v_f32m8_rm(vuint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m8_rm(vs2, __RISCV_FRM_RNE, vl); } -vint64m1_t test_vfcvt_x_f_v_i64m1_rm(vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m1_rm(src, __RISCV_FRM_RNE, vl); +vint64m1_t test_vfcvt_x_f_v_i64m1_rm(vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m1_rm(vs2, __RISCV_FRM_RNE, vl); } -vint64m2_t test_vfcvt_x_f_v_i64m2_rm(vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m2_rm(src, __RISCV_FRM_RNE, vl); +vint64m2_t test_vfcvt_x_f_v_i64m2_rm(vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m2_rm(vs2, __RISCV_FRM_RNE, vl); } -vint64m4_t test_vfcvt_x_f_v_i64m4_rm(vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m4_rm(src, __RISCV_FRM_RNE, vl); +vint64m4_t test_vfcvt_x_f_v_i64m4_rm(vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m4_rm(vs2, __RISCV_FRM_RNE, vl); } -vint64m8_t test_vfcvt_x_f_v_i64m8_rm(vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m8_rm(src, __RISCV_FRM_RNE, vl); +vint64m8_t test_vfcvt_x_f_v_i64m8_rm(vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m8_rm(vs2, __RISCV_FRM_RNE, vl); } -vuint64m1_t test_vfcvt_xu_f_v_u64m1_rm(vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m1_rm(src, __RISCV_FRM_RNE, vl); +vuint64m1_t test_vfcvt_xu_f_v_u64m1_rm(vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m1_rm(vs2, __RISCV_FRM_RNE, vl); } -vuint64m2_t test_vfcvt_xu_f_v_u64m2_rm(vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m2_rm(src, __RISCV_FRM_RNE, vl); +vuint64m2_t test_vfcvt_xu_f_v_u64m2_rm(vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m2_rm(vs2, __RISCV_FRM_RNE, vl); } -vuint64m4_t test_vfcvt_xu_f_v_u64m4_rm(vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m4_rm(src, __RISCV_FRM_RNE, vl); +vuint64m4_t test_vfcvt_xu_f_v_u64m4_rm(vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m4_rm(vs2, __RISCV_FRM_RNE, vl); } -vuint64m8_t test_vfcvt_xu_f_v_u64m8_rm(vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m8_rm(src, __RISCV_FRM_RNE, vl); +vuint64m8_t test_vfcvt_xu_f_v_u64m8_rm(vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m8_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfcvt_f_x_v_f64m1_rm(vint64m1_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m1_rm(src, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfcvt_f_x_v_f64m1_rm(vint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m1_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfcvt_f_x_v_f64m2_rm(vint64m2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m2_rm(src, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfcvt_f_x_v_f64m2_rm(vint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m2_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfcvt_f_x_v_f64m4_rm(vint64m4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m4_rm(src, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfcvt_f_x_v_f64m4_rm(vint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m4_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfcvt_f_x_v_f64m8_rm(vint64m8_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m8_rm(src, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfcvt_f_x_v_f64m8_rm(vint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m8_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfcvt_f_xu_v_f64m1_rm(vuint64m1_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m1_rm(src, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfcvt_f_xu_v_f64m1_rm(vuint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m1_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfcvt_f_xu_v_f64m2_rm(vuint64m2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m2_rm(src, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfcvt_f_xu_v_f64m2_rm(vuint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m2_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfcvt_f_xu_v_f64m4_rm(vuint64m4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m4_rm(src, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfcvt_f_xu_v_f64m4_rm(vuint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m4_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfcvt_f_xu_v_f64m8_rm(vuint64m8_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m8_rm(src, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfcvt_f_xu_v_f64m8_rm(vuint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m8_rm(vs2, __RISCV_FRM_RNE, vl); } -vint16mf4_t test_vfcvt_x_f_v_i16mf4_rm_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16mf4_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vint16mf4_t test_vfcvt_x_f_v_i16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16mf4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vint16mf2_t test_vfcvt_x_f_v_i16mf2_rm_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16mf2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vint16mf2_t test_vfcvt_x_f_v_i16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vint16m1_t test_vfcvt_x_f_v_i16m1_rm_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m1_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vint16m1_t test_vfcvt_x_f_v_i16m1_rm_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vint16m2_t test_vfcvt_x_f_v_i16m2_rm_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vint16m2_t test_vfcvt_x_f_v_i16m2_rm_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vint16m4_t test_vfcvt_x_f_v_i16m4_rm_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m4_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vint16m4_t test_vfcvt_x_f_v_i16m4_rm_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vint16m8_t test_vfcvt_x_f_v_i16m8_rm_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m8_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vint16m8_t test_vfcvt_x_f_v_i16m8_rm_m(vbool2_t vm, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m8_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_rm_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16mf4_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16mf4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_rm_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16mf2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint16m1_t test_vfcvt_xu_f_v_u16m1_rm_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m1_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vuint16m1_t test_vfcvt_xu_f_v_u16m1_rm_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint16m2_t test_vfcvt_xu_f_v_u16m2_rm_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vuint16m2_t test_vfcvt_xu_f_v_u16m2_rm_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint16m4_t test_vfcvt_xu_f_v_u16m4_rm_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m4_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vuint16m4_t test_vfcvt_xu_f_v_u16m4_rm_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint16m8_t test_vfcvt_xu_f_v_u16m8_rm_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m8_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vuint16m8_t test_vfcvt_xu_f_v_u16m8_rm_m(vbool2_t vm, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m8_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_rm_m(vbool64_t mask, vint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16mf4_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_rm_m(vbool64_t vm, vint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16mf4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_rm_m(vbool32_t mask, vint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16mf2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_rm_m(vbool32_t vm, vint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfcvt_f_x_v_f16m1_rm_m(vbool16_t mask, vint16m1_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m1_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfcvt_f_x_v_f16m1_rm_m(vbool16_t vm, vint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfcvt_f_x_v_f16m2_rm_m(vbool8_t mask, vint16m2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfcvt_f_x_v_f16m2_rm_m(vbool8_t vm, vint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfcvt_f_x_v_f16m4_rm_m(vbool4_t mask, vint16m4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m4_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfcvt_f_x_v_f16m4_rm_m(vbool4_t vm, vint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfcvt_f_x_v_f16m8_rm_m(vbool2_t mask, vint16m8_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m8_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfcvt_f_x_v_f16m8_rm_m(vbool2_t vm, vint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m8_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_rm_m(vbool64_t mask, vuint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16mf4_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_rm_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16mf4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_rm_m(vbool32_t mask, vuint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16mf2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_rm_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfcvt_f_xu_v_f16m1_rm_m(vbool16_t mask, vuint16m1_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m1_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfcvt_f_xu_v_f16m1_rm_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfcvt_f_xu_v_f16m2_rm_m(vbool8_t mask, vuint16m2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfcvt_f_xu_v_f16m2_rm_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfcvt_f_xu_v_f16m4_rm_m(vbool4_t mask, vuint16m4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m4_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfcvt_f_xu_v_f16m4_rm_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfcvt_f_xu_v_f16m8_rm_m(vbool2_t mask, vuint16m8_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m8_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfcvt_f_xu_v_f16m8_rm_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m8_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vint32mf2_t test_vfcvt_x_f_v_i32mf2_rm_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32mf2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vint32mf2_t test_vfcvt_x_f_v_i32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfcvt_x_f_v_i32m1_rm_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m1_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vint32m1_t test_vfcvt_x_f_v_i32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfcvt_x_f_v_i32m2_rm_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vint32m2_t test_vfcvt_x_f_v_i32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfcvt_x_f_v_i32m4_rm_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m4_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vint32m4_t test_vfcvt_x_f_v_i32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vint32m8_t test_vfcvt_x_f_v_i32m8_rm_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m8_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vint32m8_t test_vfcvt_x_f_v_i32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m8_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_rm_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32mf2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfcvt_xu_f_v_u32m1_rm_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m1_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vuint32m1_t test_vfcvt_xu_f_v_u32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfcvt_xu_f_v_u32m2_rm_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vuint32m2_t test_vfcvt_xu_f_v_u32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfcvt_xu_f_v_u32m4_rm_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m4_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vuint32m4_t test_vfcvt_xu_f_v_u32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint32m8_t test_vfcvt_xu_f_v_u32m8_rm_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m8_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vuint32m8_t test_vfcvt_xu_f_v_u32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m8_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_rm_m(vbool64_t mask, vint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32mf2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_rm_m(vbool64_t vm, vint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfcvt_f_x_v_f32m1_rm_m(vbool32_t mask, vint32m1_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m1_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfcvt_f_x_v_f32m1_rm_m(vbool32_t vm, vint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfcvt_f_x_v_f32m2_rm_m(vbool16_t mask, vint32m2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfcvt_f_x_v_f32m2_rm_m(vbool16_t vm, vint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfcvt_f_x_v_f32m4_rm_m(vbool8_t mask, vint32m4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m4_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfcvt_f_x_v_f32m4_rm_m(vbool8_t vm, vint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfcvt_f_x_v_f32m8_rm_m(vbool4_t mask, vint32m8_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m8_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfcvt_f_x_v_f32m8_rm_m(vbool4_t vm, vint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m8_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_rm_m(vbool64_t mask, vuint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32mf2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_rm_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfcvt_f_xu_v_f32m1_rm_m(vbool32_t mask, vuint32m1_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m1_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfcvt_f_xu_v_f32m1_rm_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfcvt_f_xu_v_f32m2_rm_m(vbool16_t mask, vuint32m2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfcvt_f_xu_v_f32m2_rm_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfcvt_f_xu_v_f32m4_rm_m(vbool8_t mask, vuint32m4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m4_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfcvt_f_xu_v_f32m4_rm_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfcvt_f_xu_v_f32m8_rm_m(vbool4_t mask, vuint32m8_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m8_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfcvt_f_xu_v_f32m8_rm_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m8_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vint64m1_t test_vfcvt_x_f_v_i64m1_rm_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m1_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vint64m1_t test_vfcvt_x_f_v_i64m1_rm_m(vbool64_t vm, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vint64m2_t test_vfcvt_x_f_v_i64m2_rm_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vint64m2_t test_vfcvt_x_f_v_i64m2_rm_m(vbool32_t vm, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vint64m4_t test_vfcvt_x_f_v_i64m4_rm_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m4_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vint64m4_t test_vfcvt_x_f_v_i64m4_rm_m(vbool16_t vm, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vint64m8_t test_vfcvt_x_f_v_i64m8_rm_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m8_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vint64m8_t test_vfcvt_x_f_v_i64m8_rm_m(vbool8_t vm, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m8_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint64m1_t test_vfcvt_xu_f_v_u64m1_rm_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m1_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vuint64m1_t test_vfcvt_xu_f_v_u64m1_rm_m(vbool64_t vm, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint64m2_t test_vfcvt_xu_f_v_u64m2_rm_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vuint64m2_t test_vfcvt_xu_f_v_u64m2_rm_m(vbool32_t vm, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint64m4_t test_vfcvt_xu_f_v_u64m4_rm_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m4_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vuint64m4_t test_vfcvt_xu_f_v_u64m4_rm_m(vbool16_t vm, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint64m8_t test_vfcvt_xu_f_v_u64m8_rm_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m8_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vuint64m8_t test_vfcvt_xu_f_v_u64m8_rm_m(vbool8_t vm, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m8_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfcvt_f_x_v_f64m1_rm_m(vbool64_t mask, vint64m1_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m1_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfcvt_f_x_v_f64m1_rm_m(vbool64_t vm, vint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfcvt_f_x_v_f64m2_rm_m(vbool32_t mask, vint64m2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfcvt_f_x_v_f64m2_rm_m(vbool32_t vm, vint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfcvt_f_x_v_f64m4_rm_m(vbool16_t mask, vint64m4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m4_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfcvt_f_x_v_f64m4_rm_m(vbool16_t vm, vint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfcvt_f_x_v_f64m8_rm_m(vbool8_t mask, vint64m8_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m8_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfcvt_f_x_v_f64m8_rm_m(vbool8_t vm, vint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m8_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfcvt_f_xu_v_f64m1_rm_m(vbool64_t mask, vuint64m1_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m1_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfcvt_f_xu_v_f64m1_rm_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfcvt_f_xu_v_f64m2_rm_m(vbool32_t mask, vuint64m2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfcvt_f_xu_v_f64m2_rm_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfcvt_f_xu_v_f64m4_rm_m(vbool16_t mask, vuint64m4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m4_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfcvt_f_xu_v_f64m4_rm_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfcvt_f_xu_v_f64m8_rm_m(vbool8_t mask, vuint64m8_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m8_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfcvt_f_xu_v_f64m8_rm_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m8_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfcvt\.[ivxfswum.]+\s+} 240 } } */ diff --git a/auto-generated/gnu-api-tests/vfcvt_rtz.c b/auto-generated/gnu-api-tests/vfcvt_rtz.c index f1a33ea6e..fed406672 100644 --- a/auto-generated/gnu-api-tests/vfcvt_rtz.c +++ b/auto-generated/gnu-api-tests/vfcvt_rtz.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4(vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16mf4(src, vl); +vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16mf4(vs2, vl); } -vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2(vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16mf2(src, vl); +vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16mf2(vs2, vl); } -vint16m1_t test_vfcvt_rtz_x_f_v_i16m1(vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m1(src, vl); +vint16m1_t test_vfcvt_rtz_x_f_v_i16m1(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m1(vs2, vl); } -vint16m2_t test_vfcvt_rtz_x_f_v_i16m2(vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m2(src, vl); +vint16m2_t test_vfcvt_rtz_x_f_v_i16m2(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m2(vs2, vl); } -vint16m4_t test_vfcvt_rtz_x_f_v_i16m4(vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m4(src, vl); +vint16m4_t test_vfcvt_rtz_x_f_v_i16m4(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m4(vs2, vl); } -vint16m8_t test_vfcvt_rtz_x_f_v_i16m8(vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m8(src, vl); +vint16m8_t test_vfcvt_rtz_x_f_v_i16m8(vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m8(vs2, vl); } -vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4(vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16mf4(src, vl); +vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16mf4(vs2, vl); } -vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2(vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16mf2(src, vl); +vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16mf2(vs2, vl); } -vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1(vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m1(src, vl); +vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m1(vs2, vl); } -vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2(vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m2(src, vl); +vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m2(vs2, vl); } -vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4(vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m4(src, vl); +vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m4(vs2, vl); } -vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8(vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m8(src, vl); +vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8(vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m8(vs2, vl); } -vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32mf2(src, vl); +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32mf2(vs2, vl); } -vint32m1_t test_vfcvt_rtz_x_f_v_i32m1(vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m1(src, vl); +vint32m1_t test_vfcvt_rtz_x_f_v_i32m1(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m1(vs2, vl); } -vint32m2_t test_vfcvt_rtz_x_f_v_i32m2(vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m2(src, vl); +vint32m2_t test_vfcvt_rtz_x_f_v_i32m2(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m2(vs2, vl); } -vint32m4_t test_vfcvt_rtz_x_f_v_i32m4(vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m4(src, vl); +vint32m4_t test_vfcvt_rtz_x_f_v_i32m4(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m4(vs2, vl); } -vint32m8_t test_vfcvt_rtz_x_f_v_i32m8(vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m8(src, vl); +vint32m8_t test_vfcvt_rtz_x_f_v_i32m8(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m8(vs2, vl); } -vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32mf2(src, vl); +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32mf2(vs2, vl); } -vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m1(src, vl); +vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m1(vs2, vl); } -vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m2(src, vl); +vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m2(vs2, vl); } -vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m4(src, vl); +vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m4(vs2, vl); } -vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m8(src, vl); +vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m8(vs2, vl); } -vint64m1_t test_vfcvt_rtz_x_f_v_i64m1(vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m1(src, vl); +vint64m1_t test_vfcvt_rtz_x_f_v_i64m1(vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m1(vs2, vl); } -vint64m2_t test_vfcvt_rtz_x_f_v_i64m2(vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m2(src, vl); +vint64m2_t test_vfcvt_rtz_x_f_v_i64m2(vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m2(vs2, vl); } -vint64m4_t test_vfcvt_rtz_x_f_v_i64m4(vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m4(src, vl); +vint64m4_t test_vfcvt_rtz_x_f_v_i64m4(vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m4(vs2, vl); } -vint64m8_t test_vfcvt_rtz_x_f_v_i64m8(vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m8(src, vl); +vint64m8_t test_vfcvt_rtz_x_f_v_i64m8(vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m8(vs2, vl); } -vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m1(src, vl); +vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1(vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m1(vs2, vl); } -vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m2(src, vl); +vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2(vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m2(vs2, vl); } -vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m4(src, vl); +vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4(vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m4(vs2, vl); } -vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m8(src, vl); +vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8(vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m8(vs2, vl); } -vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16mf4_m(mask, src, vl); +vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16mf4_m(vm, vs2, vl); } -vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16mf2_m(mask, src, vl); +vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16mf2_m(vm, vs2, vl); } -vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m1_m(mask, src, vl); +vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m1_m(vm, vs2, vl); } -vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m2_m(mask, src, vl); +vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m2_m(vm, vs2, vl); } -vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m4_m(mask, src, vl); +vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m4_m(vm, vs2, vl); } -vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m8_m(mask, src, vl); +vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_m(vbool2_t vm, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m8_m(vm, vs2, vl); } -vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16mf4_m(mask, src, vl); +vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16mf4_m(vm, vs2, vl); } -vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16mf2_m(mask, src, vl); +vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16mf2_m(vm, vs2, vl); } -vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m1_m(mask, src, vl); +vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m1_m(vm, vs2, vl); } -vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m2_m(mask, src, vl); +vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m2_m(vm, vs2, vl); } -vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m4_m(mask, src, vl); +vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m4_m(vm, vs2, vl); } -vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m8_m(mask, src, vl); +vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_m(vbool2_t vm, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m8_m(vm, vs2, vl); } -vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32mf2_m(mask, src, vl); +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32mf2_m(vm, vs2, vl); } -vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m1_m(mask, src, vl); +vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m1_m(vm, vs2, vl); } -vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m2_m(mask, src, vl); +vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m2_m(vm, vs2, vl); } -vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m4_m(mask, src, vl); +vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m4_m(vm, vs2, vl); } -vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m8_m(mask, src, vl); +vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m8_m(vm, vs2, vl); } -vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32mf2_m(mask, src, vl); +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32mf2_m(vm, vs2, vl); } -vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m1_m(mask, src, vl); +vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m1_m(vm, vs2, vl); } -vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m2_m(mask, src, vl); +vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m2_m(vm, vs2, vl); } -vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m4_m(mask, src, vl); +vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m4_m(vm, vs2, vl); } -vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m8_m(mask, src, vl); +vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m8_m(vm, vs2, vl); } -vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m1_m(mask, src, vl); +vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_m(vbool64_t vm, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m1_m(vm, vs2, vl); } -vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m2_m(mask, src, vl); +vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_m(vbool32_t vm, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m2_m(vm, vs2, vl); } -vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m4_m(mask, src, vl); +vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_m(vbool16_t vm, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m4_m(vm, vs2, vl); } -vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m8_m(mask, src, vl); +vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_m(vbool8_t vm, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m8_m(vm, vs2, vl); } -vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m1_m(mask, src, vl); +vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_m(vbool64_t vm, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m1_m(vm, vs2, vl); } -vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m2_m(mask, src, vl); +vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_m(vbool32_t vm, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m2_m(vm, vs2, vl); } -vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m4_m(mask, src, vl); +vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_m(vbool16_t vm, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m4_m(vm, vs2, vl); } -vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m8_m(mask, src, vl); +vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_m(vbool8_t vm, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m8_m(vm, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfcvt\.rtz[ivxfswum.]*\s+} 60 } } */ diff --git a/auto-generated/gnu-api-tests/vfdiv.c b/auto-generated/gnu-api-tests/vfdiv.c index 6fa5a3353..11aa54839 100644 --- a/auto-generated/gnu-api-tests/vfdiv.c +++ b/auto-generated/gnu-api-tests/vfdiv.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfdiv_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16mf4(op1, op2, vl); +vfloat16mf4_t test_vfdiv_vv_f16mf4(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16mf4(vs2, vs1, vl); } -vfloat16mf4_t test_vfdiv_vf_f16mf4(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16mf4(op1, op2, vl); +vfloat16mf4_t test_vfdiv_vf_f16mf4(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16mf4(vs2, rs1, vl); } -vfloat16mf2_t test_vfdiv_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16mf2(op1, op2, vl); +vfloat16mf2_t test_vfdiv_vv_f16mf2(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16mf2(vs2, vs1, vl); } -vfloat16mf2_t test_vfdiv_vf_f16mf2(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16mf2(op1, op2, vl); +vfloat16mf2_t test_vfdiv_vf_f16mf2(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16mf2(vs2, rs1, vl); } -vfloat16m1_t test_vfdiv_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m1(op1, op2, vl); +vfloat16m1_t test_vfdiv_vv_f16m1(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m1(vs2, vs1, vl); } -vfloat16m1_t test_vfdiv_vf_f16m1(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m1(op1, op2, vl); +vfloat16m1_t test_vfdiv_vf_f16m1(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m1(vs2, rs1, vl); } -vfloat16m2_t test_vfdiv_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m2(op1, op2, vl); +vfloat16m2_t test_vfdiv_vv_f16m2(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m2(vs2, vs1, vl); } -vfloat16m2_t test_vfdiv_vf_f16m2(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m2(op1, op2, vl); +vfloat16m2_t test_vfdiv_vf_f16m2(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m2(vs2, rs1, vl); } -vfloat16m4_t test_vfdiv_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m4(op1, op2, vl); +vfloat16m4_t test_vfdiv_vv_f16m4(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m4(vs2, vs1, vl); } -vfloat16m4_t test_vfdiv_vf_f16m4(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m4(op1, op2, vl); +vfloat16m4_t test_vfdiv_vf_f16m4(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m4(vs2, rs1, vl); } -vfloat16m8_t test_vfdiv_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m8(op1, op2, vl); +vfloat16m8_t test_vfdiv_vv_f16m8(vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m8(vs2, vs1, vl); } -vfloat16m8_t test_vfdiv_vf_f16m8(vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m8(op1, op2, vl); +vfloat16m8_t test_vfdiv_vf_f16m8(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m8(vs2, rs1, vl); } -vfloat32mf2_t test_vfdiv_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32mf2(op1, op2, vl); +vfloat32mf2_t test_vfdiv_vv_f32mf2(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32mf2(vs2, vs1, vl); } -vfloat32mf2_t test_vfdiv_vf_f32mf2(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32mf2(op1, op2, vl); +vfloat32mf2_t test_vfdiv_vf_f32mf2(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32mf2(vs2, rs1, vl); } -vfloat32m1_t test_vfdiv_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m1(op1, op2, vl); +vfloat32m1_t test_vfdiv_vv_f32m1(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m1(vs2, vs1, vl); } -vfloat32m1_t test_vfdiv_vf_f32m1(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m1(op1, op2, vl); +vfloat32m1_t test_vfdiv_vf_f32m1(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m1(vs2, rs1, vl); } -vfloat32m2_t test_vfdiv_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m2(op1, op2, vl); +vfloat32m2_t test_vfdiv_vv_f32m2(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m2(vs2, vs1, vl); } -vfloat32m2_t test_vfdiv_vf_f32m2(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m2(op1, op2, vl); +vfloat32m2_t test_vfdiv_vf_f32m2(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m2(vs2, rs1, vl); } -vfloat32m4_t test_vfdiv_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m4(op1, op2, vl); +vfloat32m4_t test_vfdiv_vv_f32m4(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m4(vs2, vs1, vl); } -vfloat32m4_t test_vfdiv_vf_f32m4(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m4(op1, op2, vl); +vfloat32m4_t test_vfdiv_vf_f32m4(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m4(vs2, rs1, vl); } -vfloat32m8_t test_vfdiv_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m8(op1, op2, vl); +vfloat32m8_t test_vfdiv_vv_f32m8(vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m8(vs2, vs1, vl); } -vfloat32m8_t test_vfdiv_vf_f32m8(vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m8(op1, op2, vl); +vfloat32m8_t test_vfdiv_vf_f32m8(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m8(vs2, rs1, vl); } -vfloat64m1_t test_vfdiv_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m1(op1, op2, vl); +vfloat64m1_t test_vfdiv_vv_f64m1(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m1(vs2, vs1, vl); } -vfloat64m1_t test_vfdiv_vf_f64m1(vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m1(op1, op2, vl); +vfloat64m1_t test_vfdiv_vf_f64m1(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m1(vs2, rs1, vl); } -vfloat64m2_t test_vfdiv_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m2(op1, op2, vl); +vfloat64m2_t test_vfdiv_vv_f64m2(vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m2(vs2, vs1, vl); } -vfloat64m2_t test_vfdiv_vf_f64m2(vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m2(op1, op2, vl); +vfloat64m2_t test_vfdiv_vf_f64m2(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m2(vs2, rs1, vl); } -vfloat64m4_t test_vfdiv_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m4(op1, op2, vl); +vfloat64m4_t test_vfdiv_vv_f64m4(vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m4(vs2, vs1, vl); } -vfloat64m4_t test_vfdiv_vf_f64m4(vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m4(op1, op2, vl); +vfloat64m4_t test_vfdiv_vf_f64m4(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m4(vs2, rs1, vl); } -vfloat64m8_t test_vfdiv_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m8(op1, op2, vl); +vfloat64m8_t test_vfdiv_vv_f64m8(vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m8(vs2, vs1, vl); } -vfloat64m8_t test_vfdiv_vf_f64m8(vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m8(op1, op2, vl); +vfloat64m8_t test_vfdiv_vf_f64m8(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m8(vs2, rs1, vl); } -vfloat16mf4_t test_vfdiv_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16mf4_m(mask, op1, op2, vl); +vfloat16mf4_t test_vfdiv_vv_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16mf4_m(vm, vs2, vs1, vl); } -vfloat16mf4_t test_vfdiv_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16mf4_m(mask, op1, op2, vl); +vfloat16mf4_t test_vfdiv_vf_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16mf4_m(vm, vs2, rs1, vl); } -vfloat16mf2_t test_vfdiv_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16mf2_m(mask, op1, op2, vl); +vfloat16mf2_t test_vfdiv_vv_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16mf2_m(vm, vs2, vs1, vl); } -vfloat16mf2_t test_vfdiv_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16mf2_m(mask, op1, op2, vl); +vfloat16mf2_t test_vfdiv_vf_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16mf2_m(vm, vs2, rs1, vl); } -vfloat16m1_t test_vfdiv_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m1_m(mask, op1, op2, vl); +vfloat16m1_t test_vfdiv_vv_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m1_m(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfdiv_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m1_m(mask, op1, op2, vl); +vfloat16m1_t test_vfdiv_vf_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m1_m(vm, vs2, rs1, vl); } -vfloat16m2_t test_vfdiv_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m2_m(mask, op1, op2, vl); +vfloat16m2_t test_vfdiv_vv_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m2_m(vm, vs2, vs1, vl); } -vfloat16m2_t test_vfdiv_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m2_m(mask, op1, op2, vl); +vfloat16m2_t test_vfdiv_vf_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m2_m(vm, vs2, rs1, vl); } -vfloat16m4_t test_vfdiv_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m4_m(mask, op1, op2, vl); +vfloat16m4_t test_vfdiv_vv_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m4_m(vm, vs2, vs1, vl); } -vfloat16m4_t test_vfdiv_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m4_m(mask, op1, op2, vl); +vfloat16m4_t test_vfdiv_vf_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m4_m(vm, vs2, rs1, vl); } -vfloat16m8_t test_vfdiv_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m8_m(mask, op1, op2, vl); +vfloat16m8_t test_vfdiv_vv_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m8_m(vm, vs2, vs1, vl); } -vfloat16m8_t test_vfdiv_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m8_m(mask, op1, op2, vl); +vfloat16m8_t test_vfdiv_vf_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m8_m(vm, vs2, rs1, vl); } -vfloat32mf2_t test_vfdiv_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32mf2_m(mask, op1, op2, vl); +vfloat32mf2_t test_vfdiv_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32mf2_m(vm, vs2, vs1, vl); } -vfloat32mf2_t test_vfdiv_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32mf2_m(mask, op1, op2, vl); +vfloat32mf2_t test_vfdiv_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32mf2_m(vm, vs2, rs1, vl); } -vfloat32m1_t test_vfdiv_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfdiv_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m1_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfdiv_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m1_m(vm, vs2, rs1, vl); } -vfloat32m2_t test_vfdiv_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfdiv_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m2_m(vm, vs2, vs1, vl); } -vfloat32m2_t test_vfdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfdiv_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m2_m(vm, vs2, rs1, vl); } -vfloat32m4_t test_vfdiv_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfdiv_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m4_m(vm, vs2, vs1, vl); } -vfloat32m4_t test_vfdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfdiv_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m4_m(vm, vs2, rs1, vl); } -vfloat32m8_t test_vfdiv_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfdiv_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m8_m(vm, vs2, vs1, vl); } -vfloat32m8_t test_vfdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfdiv_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m8_m(vm, vs2, rs1, vl); } -vfloat64m1_t test_vfdiv_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfdiv_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m1_m(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfdiv_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m1_m(vm, vs2, rs1, vl); } -vfloat64m2_t test_vfdiv_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfdiv_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m2_m(vm, vs2, vs1, vl); } -vfloat64m2_t test_vfdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfdiv_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m2_m(vm, vs2, rs1, vl); } -vfloat64m4_t test_vfdiv_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfdiv_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m4_m(vm, vs2, vs1, vl); } -vfloat64m4_t test_vfdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfdiv_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m4_m(vm, vs2, rs1, vl); } -vfloat64m8_t test_vfdiv_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfdiv_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m8_m(vm, vs2, vs1, vl); } -vfloat64m8_t test_vfdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfdiv_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m8_m(vm, vs2, rs1, vl); } -vfloat16mf4_t test_vfdiv_vv_f16mf4_rm(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16mf4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfdiv_vv_f16mf4_rm(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16mf4_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfdiv_vf_f16mf4_rm(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16mf4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfdiv_vf_f16mf4_rm(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16mf4_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfdiv_vv_f16mf2_rm(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16mf2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfdiv_vv_f16mf2_rm(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16mf2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfdiv_vf_f16mf2_rm(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16mf2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfdiv_vf_f16mf2_rm(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16mf2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfdiv_vv_f16m1_rm(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfdiv_vv_f16m1_rm(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfdiv_vf_f16m1_rm(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfdiv_vf_f16m1_rm(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfdiv_vv_f16m2_rm(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfdiv_vv_f16m2_rm(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfdiv_vf_f16m2_rm(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfdiv_vf_f16m2_rm(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfdiv_vv_f16m4_rm(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfdiv_vv_f16m4_rm(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfdiv_vf_f16m4_rm(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfdiv_vf_f16m4_rm(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfdiv_vv_f16m8_rm(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfdiv_vv_f16m8_rm(vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfdiv_vf_f16m8_rm(vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfdiv_vf_f16m8_rm(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfdiv_vv_f32mf2_rm(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32mf2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfdiv_vv_f32mf2_rm(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32mf2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfdiv_vf_f32mf2_rm(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32mf2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfdiv_vf_f32mf2_rm(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32mf2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfdiv_vv_f32m1_rm(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfdiv_vv_f32m1_rm(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfdiv_vf_f32m1_rm(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfdiv_vf_f32m1_rm(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfdiv_vv_f32m2_rm(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfdiv_vv_f32m2_rm(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfdiv_vf_f32m2_rm(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfdiv_vf_f32m2_rm(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfdiv_vv_f32m4_rm(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfdiv_vv_f32m4_rm(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfdiv_vf_f32m4_rm(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfdiv_vf_f32m4_rm(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfdiv_vv_f32m8_rm(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfdiv_vv_f32m8_rm(vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfdiv_vf_f32m8_rm(vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfdiv_vf_f32m8_rm(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfdiv_vv_f64m1_rm(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfdiv_vv_f64m1_rm(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfdiv_vf_f64m1_rm(vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfdiv_vf_f64m1_rm(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfdiv_vv_f64m2_rm(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfdiv_vv_f64m2_rm(vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfdiv_vf_f64m2_rm(vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfdiv_vf_f64m2_rm(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfdiv_vv_f64m4_rm(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfdiv_vv_f64m4_rm(vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfdiv_vf_f64m4_rm(vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfdiv_vf_f64m4_rm(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfdiv_vv_f64m8_rm(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfdiv_vv_f64m8_rm(vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfdiv_vf_f64m8_rm(vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfdiv_vf_f64m8_rm(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfdiv_vv_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16mf4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfdiv_vv_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16mf4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfdiv_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16mf4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfdiv_vf_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16mf4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfdiv_vv_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16mf2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfdiv_vv_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16mf2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfdiv_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16mf2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfdiv_vf_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16mf2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfdiv_vv_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfdiv_vv_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfdiv_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfdiv_vf_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfdiv_vv_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfdiv_vv_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfdiv_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfdiv_vf_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfdiv_vv_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfdiv_vv_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfdiv_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfdiv_vf_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfdiv_vv_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfdiv_vv_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfdiv_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfdiv_vf_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfdiv_vv_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32mf2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfdiv_vv_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32mf2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfdiv_vf_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32mf2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfdiv_vf_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32mf2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfdiv_vv_f32m1_rm_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfdiv_vv_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfdiv_vf_f32m1_rm_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfdiv_vf_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfdiv_vv_f32m2_rm_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfdiv_vv_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfdiv_vf_f32m2_rm_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfdiv_vf_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfdiv_vv_f32m4_rm_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfdiv_vv_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfdiv_vf_f32m4_rm_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfdiv_vf_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfdiv_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfdiv_vv_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfdiv_vf_f32m8_rm_m(vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfdiv_vf_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfdiv_vv_f64m1_rm_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfdiv_vv_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfdiv_vf_f64m1_rm_m(vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfdiv_vf_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfdiv_vv_f64m2_rm_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfdiv_vv_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfdiv_vf_f64m2_rm_m(vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfdiv_vf_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfdiv_vv_f64m4_rm_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfdiv_vv_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfdiv_vf_f64m4_rm_m(vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfdiv_vf_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfdiv_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfdiv_vv_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfdiv_vf_f64m8_rm_m(vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfdiv_vf_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfdiv\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/gnu-api-tests/vfirst.c b/auto-generated/gnu-api-tests/vfirst.c index f7657fd10..6ba288af1 100644 --- a/auto-generated/gnu-api-tests/vfirst.c +++ b/auto-generated/gnu-api-tests/vfirst.c @@ -6,60 +6,60 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -long test_vfirst_m_b1(vbool1_t op1, size_t vl) { - return __riscv_vfirst_m_b1(op1, vl); +long test_vfirst_m_b1(vbool1_t vs2, size_t vl) { + return __riscv_vfirst_m_b1(vs2, vl); } -long test_vfirst_m_b2(vbool2_t op1, size_t vl) { - return __riscv_vfirst_m_b2(op1, vl); +long test_vfirst_m_b2(vbool2_t vs2, size_t vl) { + return __riscv_vfirst_m_b2(vs2, vl); } -long test_vfirst_m_b4(vbool4_t op1, size_t vl) { - return __riscv_vfirst_m_b4(op1, vl); +long test_vfirst_m_b4(vbool4_t vs2, size_t vl) { + return __riscv_vfirst_m_b4(vs2, vl); } -long test_vfirst_m_b8(vbool8_t op1, size_t vl) { - return __riscv_vfirst_m_b8(op1, vl); +long test_vfirst_m_b8(vbool8_t vs2, size_t vl) { + return __riscv_vfirst_m_b8(vs2, vl); } -long test_vfirst_m_b16(vbool16_t op1, size_t vl) { - return __riscv_vfirst_m_b16(op1, vl); +long test_vfirst_m_b16(vbool16_t vs2, size_t vl) { + return __riscv_vfirst_m_b16(vs2, vl); } -long test_vfirst_m_b32(vbool32_t op1, size_t vl) { - return __riscv_vfirst_m_b32(op1, vl); +long test_vfirst_m_b32(vbool32_t vs2, size_t vl) { + return __riscv_vfirst_m_b32(vs2, vl); } -long test_vfirst_m_b64(vbool64_t op1, size_t vl) { - return __riscv_vfirst_m_b64(op1, vl); +long test_vfirst_m_b64(vbool64_t vs2, size_t vl) { + return __riscv_vfirst_m_b64(vs2, vl); } -long test_vfirst_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) { - return __riscv_vfirst_m_b1_m(mask, op1, vl); +long test_vfirst_m_b1_m(vbool1_t vm, vbool1_t vs2, size_t vl) { + return __riscv_vfirst_m_b1_m(vm, vs2, vl); } -long test_vfirst_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) { - return __riscv_vfirst_m_b2_m(mask, op1, vl); +long test_vfirst_m_b2_m(vbool2_t vm, vbool2_t vs2, size_t vl) { + return __riscv_vfirst_m_b2_m(vm, vs2, vl); } -long test_vfirst_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) { - return __riscv_vfirst_m_b4_m(mask, op1, vl); +long test_vfirst_m_b4_m(vbool4_t vm, vbool4_t vs2, size_t vl) { + return __riscv_vfirst_m_b4_m(vm, vs2, vl); } -long test_vfirst_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) { - return __riscv_vfirst_m_b8_m(mask, op1, vl); +long test_vfirst_m_b8_m(vbool8_t vm, vbool8_t vs2, size_t vl) { + return __riscv_vfirst_m_b8_m(vm, vs2, vl); } -long test_vfirst_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) { - return __riscv_vfirst_m_b16_m(mask, op1, vl); +long test_vfirst_m_b16_m(vbool16_t vm, vbool16_t vs2, size_t vl) { + return __riscv_vfirst_m_b16_m(vm, vs2, vl); } -long test_vfirst_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) { - return __riscv_vfirst_m_b32_m(mask, op1, vl); +long test_vfirst_m_b32_m(vbool32_t vm, vbool32_t vs2, size_t vl) { + return __riscv_vfirst_m_b32_m(vm, vs2, vl); } -long test_vfirst_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) { - return __riscv_vfirst_m_b64_m(mask, op1, vl); +long test_vfirst_m_b64_m(vbool64_t vm, vbool64_t vs2, size_t vl) { + return __riscv_vfirst_m_b64_m(vm, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfirst\.[ivxfswum.]+\s+} 14 } } */ diff --git a/auto-generated/gnu-api-tests/vfmacc.c b/auto-generated/gnu-api-tests/vfmacc.c index f86ded6d6..8035765ad 100644 --- a/auto-generated/gnu-api-tests/vfmacc.c +++ b/auto-generated/gnu-api-tests/vfmacc.c @@ -126,124 +126,124 @@ vfloat64m8_t test_vfmacc_vf_f64m8(vfloat64m8_t vd, float64_t rs1, vfloat64m8_t v return __riscv_vfmacc_vf_f64m8(vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmacc_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16mf4_m(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfmacc_vv_f16mf4_m(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16mf4_m(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmacc_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16mf4_m(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfmacc_vf_f16mf4_m(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16mf4_m(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmacc_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16mf2_m(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfmacc_vv_f16mf2_m(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16mf2_m(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmacc_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16mf2_m(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfmacc_vf_f16mf2_m(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16mf2_m(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmacc_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16m1_m(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfmacc_vv_f16m1_m(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16m1_m(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmacc_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16m1_m(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfmacc_vf_f16m1_m(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16m1_m(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmacc_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16m2_m(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfmacc_vv_f16m2_m(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16m2_m(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmacc_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16m2_m(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfmacc_vf_f16m2_m(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16m2_m(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmacc_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16m4_m(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfmacc_vv_f16m4_m(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16m4_m(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmacc_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16m4_m(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfmacc_vf_f16m4_m(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16m4_m(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmacc_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16m8_m(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfmacc_vv_f16m8_m(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16m8_m(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmacc_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16m8_m(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfmacc_vf_f16m8_m(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16m8_m(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f32mf2_m(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfmacc_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f32mf2_m(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f32mf2_m(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfmacc_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f32mf2_m(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f32m1_m(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfmacc_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f32m1_m(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f32m1_m(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfmacc_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f32m1_m(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f32m2_m(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfmacc_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f32m2_m(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f32m2_m(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfmacc_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f32m2_m(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f32m4_m(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfmacc_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f32m4_m(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f32m4_m(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfmacc_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f32m4_m(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f32m8_m(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfmacc_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f32m8_m(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f32m8_m(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfmacc_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f32m8_m(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f64m1_m(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfmacc_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f64m1_m(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f64m1_m(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfmacc_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f64m1_m(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f64m2_m(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfmacc_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f64m2_m(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f64m2_m(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfmacc_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f64m2_m(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f64m4_m(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfmacc_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f64m4_m(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f64m4_m(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfmacc_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f64m4_m(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f64m8_m(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfmacc_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f64m8_m(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f64m8_m(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfmacc_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f64m8_m(vm, vd, rs1, vs2, vl); } vfloat16mf4_t test_vfmacc_vv_f16mf4_rm(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -366,124 +366,124 @@ vfloat64m8_t test_vfmacc_vf_f64m8_rm(vfloat64m8_t vd, float64_t rs1, vfloat64m8_ return __riscv_vfmacc_vf_f64m8_rm(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmacc_vv_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16mf4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmacc_vv_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16mf4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmacc_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16mf4_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmacc_vf_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16mf4_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmacc_vv_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16mf2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmacc_vv_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16mf2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmacc_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16mf2_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmacc_vf_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16mf2_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmacc_vv_f16m1_rm_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16m1_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmacc_vv_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16m1_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmacc_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16m1_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmacc_vf_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16m1_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmacc_vv_f16m2_rm_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16m2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmacc_vv_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmacc_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16m2_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmacc_vf_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16m2_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmacc_vv_f16m4_rm_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16m4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmacc_vv_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmacc_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16m4_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmacc_vf_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16m4_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmacc_vv_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmacc_vv_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmacc_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16m8_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmacc_vf_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16m8_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmacc_vv_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f32mf2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmacc_vv_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f32mf2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmacc_vf_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f32mf2_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmacc_vf_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f32mf2_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmacc_vv_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f32m1_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmacc_vv_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f32m1_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmacc_vf_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f32m1_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmacc_vf_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f32m1_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmacc_vv_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f32m2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmacc_vv_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f32m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmacc_vf_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f32m2_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmacc_vf_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f32m2_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmacc_vv_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f32m4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmacc_vv_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f32m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmacc_vf_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f32m4_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmacc_vf_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f32m4_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmacc_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f32m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmacc_vv_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f32m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmacc_vf_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f32m8_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmacc_vf_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f32m8_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmacc_vv_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f64m1_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmacc_vv_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f64m1_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmacc_vf_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f64m1_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmacc_vf_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f64m1_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmacc_vv_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f64m2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmacc_vv_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f64m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmacc_vf_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f64m2_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmacc_vf_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f64m2_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmacc_vv_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f64m4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmacc_vv_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f64m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmacc_vf_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f64m4_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmacc_vf_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f64m4_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmacc_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f64m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmacc_vv_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f64m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmacc_vf_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f64m8_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmacc_vf_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f64m8_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfmacc\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/gnu-api-tests/vfmadd.c b/auto-generated/gnu-api-tests/vfmadd.c index 3e3e69898..c6fba54e0 100644 --- a/auto-generated/gnu-api-tests/vfmadd.c +++ b/auto-generated/gnu-api-tests/vfmadd.c @@ -126,124 +126,124 @@ vfloat64m8_t test_vfmadd_vf_f64m8(vfloat64m8_t vd, float64_t rs1, vfloat64m8_t v return __riscv_vfmadd_vf_f64m8(vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmadd_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16mf4_m(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfmadd_vv_f16mf4_m(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16mf4_m(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmadd_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16mf4_m(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfmadd_vf_f16mf4_m(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16mf4_m(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmadd_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16mf2_m(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfmadd_vv_f16mf2_m(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16mf2_m(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmadd_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16mf2_m(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfmadd_vf_f16mf2_m(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16mf2_m(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmadd_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16m1_m(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfmadd_vv_f16m1_m(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16m1_m(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmadd_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16m1_m(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfmadd_vf_f16m1_m(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16m1_m(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmadd_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16m2_m(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfmadd_vv_f16m2_m(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16m2_m(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmadd_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16m2_m(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfmadd_vf_f16m2_m(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16m2_m(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmadd_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16m4_m(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfmadd_vv_f16m4_m(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16m4_m(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmadd_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16m4_m(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfmadd_vf_f16m4_m(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16m4_m(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16m8_m(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfmadd_vv_f16m8_m(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16m8_m(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmadd_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16m8_m(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfmadd_vf_f16m8_m(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16m8_m(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f32mf2_m(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfmadd_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f32mf2_m(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f32mf2_m(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfmadd_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f32mf2_m(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f32m1_m(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfmadd_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f32m1_m(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f32m1_m(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfmadd_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f32m1_m(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f32m2_m(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfmadd_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f32m2_m(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f32m2_m(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfmadd_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f32m2_m(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f32m4_m(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfmadd_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f32m4_m(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f32m4_m(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfmadd_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f32m4_m(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f32m8_m(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfmadd_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f32m8_m(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f32m8_m(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfmadd_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f32m8_m(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f64m1_m(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfmadd_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f64m1_m(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f64m1_m(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfmadd_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f64m1_m(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f64m2_m(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfmadd_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f64m2_m(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f64m2_m(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfmadd_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f64m2_m(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f64m4_m(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfmadd_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f64m4_m(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f64m4_m(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfmadd_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f64m4_m(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f64m8_m(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfmadd_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f64m8_m(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f64m8_m(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfmadd_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f64m8_m(vm, vd, rs1, vs2, vl); } vfloat16mf4_t test_vfmadd_vv_f16mf4_rm(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -366,124 +366,124 @@ vfloat64m8_t test_vfmadd_vf_f64m8_rm(vfloat64m8_t vd, float64_t rs1, vfloat64m8_ return __riscv_vfmadd_vf_f64m8_rm(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmadd_vv_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16mf4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmadd_vv_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16mf4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmadd_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16mf4_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmadd_vf_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16mf4_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmadd_vv_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16mf2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmadd_vv_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16mf2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmadd_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16mf2_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmadd_vf_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16mf2_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmadd_vv_f16m1_rm_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16m1_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmadd_vv_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16m1_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmadd_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16m1_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmadd_vf_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16m1_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmadd_vv_f16m2_rm_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16m2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmadd_vv_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmadd_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16m2_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmadd_vf_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16m2_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmadd_vv_f16m4_rm_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16m4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmadd_vv_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmadd_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16m4_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmadd_vf_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16m4_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmadd_vv_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmadd_vv_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmadd_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16m8_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmadd_vf_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16m8_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmadd_vv_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f32mf2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmadd_vv_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f32mf2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmadd_vf_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f32mf2_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmadd_vf_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f32mf2_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmadd_vv_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f32m1_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmadd_vv_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f32m1_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmadd_vf_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f32m1_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmadd_vf_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f32m1_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmadd_vv_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f32m2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmadd_vv_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f32m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmadd_vf_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f32m2_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmadd_vf_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f32m2_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmadd_vv_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f32m4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmadd_vv_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f32m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmadd_vf_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f32m4_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmadd_vf_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f32m4_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmadd_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f32m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmadd_vv_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f32m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmadd_vf_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f32m8_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmadd_vf_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f32m8_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmadd_vv_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f64m1_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmadd_vv_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f64m1_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmadd_vf_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f64m1_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmadd_vf_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f64m1_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmadd_vv_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f64m2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmadd_vv_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f64m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmadd_vf_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f64m2_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmadd_vf_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f64m2_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmadd_vv_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f64m4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmadd_vv_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f64m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmadd_vf_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f64m4_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmadd_vf_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f64m4_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmadd_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f64m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmadd_vv_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f64m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmadd_vf_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f64m8_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmadd_vf_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f64m8_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfmadd\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/gnu-api-tests/vfmax.c b/auto-generated/gnu-api-tests/vfmax.c index a1630830e..c840c9c54 100644 --- a/auto-generated/gnu-api-tests/vfmax.c +++ b/auto-generated/gnu-api-tests/vfmax.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfmax_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmax_vv_f16mf4(op1, op2, vl); +vfloat16mf4_t test_vfmax_vv_f16mf4(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmax_vv_f16mf4(vs2, vs1, vl); } -vfloat16mf4_t test_vfmax_vf_f16mf4(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_vf_f16mf4(op1, op2, vl); +vfloat16mf4_t test_vfmax_vf_f16mf4(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_vf_f16mf4(vs2, rs1, vl); } -vfloat16mf2_t test_vfmax_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmax_vv_f16mf2(op1, op2, vl); +vfloat16mf2_t test_vfmax_vv_f16mf2(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmax_vv_f16mf2(vs2, vs1, vl); } -vfloat16mf2_t test_vfmax_vf_f16mf2(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_vf_f16mf2(op1, op2, vl); +vfloat16mf2_t test_vfmax_vf_f16mf2(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_vf_f16mf2(vs2, rs1, vl); } -vfloat16m1_t test_vfmax_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmax_vv_f16m1(op1, op2, vl); +vfloat16m1_t test_vfmax_vv_f16m1(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmax_vv_f16m1(vs2, vs1, vl); } -vfloat16m1_t test_vfmax_vf_f16m1(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_vf_f16m1(op1, op2, vl); +vfloat16m1_t test_vfmax_vf_f16m1(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_vf_f16m1(vs2, rs1, vl); } -vfloat16m2_t test_vfmax_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmax_vv_f16m2(op1, op2, vl); +vfloat16m2_t test_vfmax_vv_f16m2(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmax_vv_f16m2(vs2, vs1, vl); } -vfloat16m2_t test_vfmax_vf_f16m2(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_vf_f16m2(op1, op2, vl); +vfloat16m2_t test_vfmax_vf_f16m2(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_vf_f16m2(vs2, rs1, vl); } -vfloat16m4_t test_vfmax_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmax_vv_f16m4(op1, op2, vl); +vfloat16m4_t test_vfmax_vv_f16m4(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmax_vv_f16m4(vs2, vs1, vl); } -vfloat16m4_t test_vfmax_vf_f16m4(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_vf_f16m4(op1, op2, vl); +vfloat16m4_t test_vfmax_vf_f16m4(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_vf_f16m4(vs2, rs1, vl); } -vfloat16m8_t test_vfmax_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmax_vv_f16m8(op1, op2, vl); +vfloat16m8_t test_vfmax_vv_f16m8(vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmax_vv_f16m8(vs2, vs1, vl); } -vfloat16m8_t test_vfmax_vf_f16m8(vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_vf_f16m8(op1, op2, vl); +vfloat16m8_t test_vfmax_vf_f16m8(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_vf_f16m8(vs2, rs1, vl); } -vfloat32mf2_t test_vfmax_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmax_vv_f32mf2(op1, op2, vl); +vfloat32mf2_t test_vfmax_vv_f32mf2(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmax_vv_f32mf2(vs2, vs1, vl); } -vfloat32mf2_t test_vfmax_vf_f32mf2(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_vf_f32mf2(op1, op2, vl); +vfloat32mf2_t test_vfmax_vf_f32mf2(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_vf_f32mf2(vs2, rs1, vl); } -vfloat32m1_t test_vfmax_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmax_vv_f32m1(op1, op2, vl); +vfloat32m1_t test_vfmax_vv_f32m1(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmax_vv_f32m1(vs2, vs1, vl); } -vfloat32m1_t test_vfmax_vf_f32m1(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_vf_f32m1(op1, op2, vl); +vfloat32m1_t test_vfmax_vf_f32m1(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_vf_f32m1(vs2, rs1, vl); } -vfloat32m2_t test_vfmax_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmax_vv_f32m2(op1, op2, vl); +vfloat32m2_t test_vfmax_vv_f32m2(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmax_vv_f32m2(vs2, vs1, vl); } -vfloat32m2_t test_vfmax_vf_f32m2(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_vf_f32m2(op1, op2, vl); +vfloat32m2_t test_vfmax_vf_f32m2(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_vf_f32m2(vs2, rs1, vl); } -vfloat32m4_t test_vfmax_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmax_vv_f32m4(op1, op2, vl); +vfloat32m4_t test_vfmax_vv_f32m4(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmax_vv_f32m4(vs2, vs1, vl); } -vfloat32m4_t test_vfmax_vf_f32m4(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_vf_f32m4(op1, op2, vl); +vfloat32m4_t test_vfmax_vf_f32m4(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_vf_f32m4(vs2, rs1, vl); } -vfloat32m8_t test_vfmax_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmax_vv_f32m8(op1, op2, vl); +vfloat32m8_t test_vfmax_vv_f32m8(vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmax_vv_f32m8(vs2, vs1, vl); } -vfloat32m8_t test_vfmax_vf_f32m8(vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_vf_f32m8(op1, op2, vl); +vfloat32m8_t test_vfmax_vf_f32m8(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_vf_f32m8(vs2, rs1, vl); } -vfloat64m1_t test_vfmax_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmax_vv_f64m1(op1, op2, vl); +vfloat64m1_t test_vfmax_vv_f64m1(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmax_vv_f64m1(vs2, vs1, vl); } -vfloat64m1_t test_vfmax_vf_f64m1(vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax_vf_f64m1(op1, op2, vl); +vfloat64m1_t test_vfmax_vf_f64m1(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax_vf_f64m1(vs2, rs1, vl); } -vfloat64m2_t test_vfmax_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmax_vv_f64m2(op1, op2, vl); +vfloat64m2_t test_vfmax_vv_f64m2(vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmax_vv_f64m2(vs2, vs1, vl); } -vfloat64m2_t test_vfmax_vf_f64m2(vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax_vf_f64m2(op1, op2, vl); +vfloat64m2_t test_vfmax_vf_f64m2(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax_vf_f64m2(vs2, rs1, vl); } -vfloat64m4_t test_vfmax_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmax_vv_f64m4(op1, op2, vl); +vfloat64m4_t test_vfmax_vv_f64m4(vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmax_vv_f64m4(vs2, vs1, vl); } -vfloat64m4_t test_vfmax_vf_f64m4(vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax_vf_f64m4(op1, op2, vl); +vfloat64m4_t test_vfmax_vf_f64m4(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax_vf_f64m4(vs2, rs1, vl); } -vfloat64m8_t test_vfmax_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmax_vv_f64m8(op1, op2, vl); +vfloat64m8_t test_vfmax_vv_f64m8(vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmax_vv_f64m8(vs2, vs1, vl); } -vfloat64m8_t test_vfmax_vf_f64m8(vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax_vf_f64m8(op1, op2, vl); +vfloat64m8_t test_vfmax_vf_f64m8(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax_vf_f64m8(vs2, rs1, vl); } -vfloat16mf4_t test_vfmax_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmax_vv_f16mf4_m(mask, op1, op2, vl); +vfloat16mf4_t test_vfmax_vv_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmax_vv_f16mf4_m(vm, vs2, vs1, vl); } -vfloat16mf4_t test_vfmax_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_vf_f16mf4_m(mask, op1, op2, vl); +vfloat16mf4_t test_vfmax_vf_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_vf_f16mf4_m(vm, vs2, rs1, vl); } -vfloat16mf2_t test_vfmax_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmax_vv_f16mf2_m(mask, op1, op2, vl); +vfloat16mf2_t test_vfmax_vv_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmax_vv_f16mf2_m(vm, vs2, vs1, vl); } -vfloat16mf2_t test_vfmax_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_vf_f16mf2_m(mask, op1, op2, vl); +vfloat16mf2_t test_vfmax_vf_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_vf_f16mf2_m(vm, vs2, rs1, vl); } -vfloat16m1_t test_vfmax_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmax_vv_f16m1_m(mask, op1, op2, vl); +vfloat16m1_t test_vfmax_vv_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmax_vv_f16m1_m(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfmax_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_vf_f16m1_m(mask, op1, op2, vl); +vfloat16m1_t test_vfmax_vf_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_vf_f16m1_m(vm, vs2, rs1, vl); } -vfloat16m2_t test_vfmax_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmax_vv_f16m2_m(mask, op1, op2, vl); +vfloat16m2_t test_vfmax_vv_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmax_vv_f16m2_m(vm, vs2, vs1, vl); } -vfloat16m2_t test_vfmax_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_vf_f16m2_m(mask, op1, op2, vl); +vfloat16m2_t test_vfmax_vf_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_vf_f16m2_m(vm, vs2, rs1, vl); } -vfloat16m4_t test_vfmax_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmax_vv_f16m4_m(mask, op1, op2, vl); +vfloat16m4_t test_vfmax_vv_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmax_vv_f16m4_m(vm, vs2, vs1, vl); } -vfloat16m4_t test_vfmax_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_vf_f16m4_m(mask, op1, op2, vl); +vfloat16m4_t test_vfmax_vf_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_vf_f16m4_m(vm, vs2, rs1, vl); } -vfloat16m8_t test_vfmax_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmax_vv_f16m8_m(mask, op1, op2, vl); +vfloat16m8_t test_vfmax_vv_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmax_vv_f16m8_m(vm, vs2, vs1, vl); } -vfloat16m8_t test_vfmax_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_vf_f16m8_m(mask, op1, op2, vl); +vfloat16m8_t test_vfmax_vf_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_vf_f16m8_m(vm, vs2, rs1, vl); } -vfloat32mf2_t test_vfmax_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmax_vv_f32mf2_m(mask, op1, op2, vl); +vfloat32mf2_t test_vfmax_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmax_vv_f32mf2_m(vm, vs2, vs1, vl); } -vfloat32mf2_t test_vfmax_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_vf_f32mf2_m(mask, op1, op2, vl); +vfloat32mf2_t test_vfmax_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_vf_f32mf2_m(vm, vs2, rs1, vl); } -vfloat32m1_t test_vfmax_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmax_vv_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfmax_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmax_vv_f32m1_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfmax_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_vf_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfmax_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_vf_f32m1_m(vm, vs2, rs1, vl); } -vfloat32m2_t test_vfmax_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmax_vv_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfmax_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmax_vv_f32m2_m(vm, vs2, vs1, vl); } -vfloat32m2_t test_vfmax_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_vf_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfmax_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_vf_f32m2_m(vm, vs2, rs1, vl); } -vfloat32m4_t test_vfmax_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmax_vv_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfmax_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmax_vv_f32m4_m(vm, vs2, vs1, vl); } -vfloat32m4_t test_vfmax_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_vf_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfmax_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_vf_f32m4_m(vm, vs2, rs1, vl); } -vfloat32m8_t test_vfmax_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmax_vv_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfmax_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmax_vv_f32m8_m(vm, vs2, vs1, vl); } -vfloat32m8_t test_vfmax_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_vf_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfmax_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_vf_f32m8_m(vm, vs2, rs1, vl); } -vfloat64m1_t test_vfmax_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmax_vv_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfmax_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmax_vv_f64m1_m(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfmax_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax_vf_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfmax_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax_vf_f64m1_m(vm, vs2, rs1, vl); } -vfloat64m2_t test_vfmax_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmax_vv_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfmax_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmax_vv_f64m2_m(vm, vs2, vs1, vl); } -vfloat64m2_t test_vfmax_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax_vf_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfmax_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax_vf_f64m2_m(vm, vs2, rs1, vl); } -vfloat64m4_t test_vfmax_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmax_vv_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfmax_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmax_vv_f64m4_m(vm, vs2, vs1, vl); } -vfloat64m4_t test_vfmax_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax_vf_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfmax_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax_vf_f64m4_m(vm, vs2, rs1, vl); } -vfloat64m8_t test_vfmax_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmax_vv_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfmax_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmax_vv_f64m8_m(vm, vs2, vs1, vl); } -vfloat64m8_t test_vfmax_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax_vf_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfmax_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax_vf_f64m8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfmax\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-api-tests/vfmerge.c b/auto-generated/gnu-api-tests/vfmerge.c index c00021b97..b96456cec 100644 --- a/auto-generated/gnu-api-tests/vfmerge.c +++ b/auto-generated/gnu-api-tests/vfmerge.c @@ -6,64 +6,64 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfmerge_vfm_f16mf4(vfloat16mf4_t op1, float16_t op2, vbool64_t mask, size_t vl) { - return __riscv_vfmerge_vfm_f16mf4(op1, op2, mask, vl); +vfloat16mf4_t test_vfmerge_vfm_f16mf4(vfloat16mf4_t vs2, float16_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vfmerge_vfm_f16mf4(vs2, rs1, v0, vl); } -vfloat16mf2_t test_vfmerge_vfm_f16mf2(vfloat16mf2_t op1, float16_t op2, vbool32_t mask, size_t vl) { - return __riscv_vfmerge_vfm_f16mf2(op1, op2, mask, vl); +vfloat16mf2_t test_vfmerge_vfm_f16mf2(vfloat16mf2_t vs2, float16_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vfmerge_vfm_f16mf2(vs2, rs1, v0, vl); } -vfloat16m1_t test_vfmerge_vfm_f16m1(vfloat16m1_t op1, float16_t op2, vbool16_t mask, size_t vl) { - return __riscv_vfmerge_vfm_f16m1(op1, op2, mask, vl); +vfloat16m1_t test_vfmerge_vfm_f16m1(vfloat16m1_t vs2, float16_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vfmerge_vfm_f16m1(vs2, rs1, v0, vl); } -vfloat16m2_t test_vfmerge_vfm_f16m2(vfloat16m2_t op1, float16_t op2, vbool8_t mask, size_t vl) { - return __riscv_vfmerge_vfm_f16m2(op1, op2, mask, vl); +vfloat16m2_t test_vfmerge_vfm_f16m2(vfloat16m2_t vs2, float16_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vfmerge_vfm_f16m2(vs2, rs1, v0, vl); } -vfloat16m4_t test_vfmerge_vfm_f16m4(vfloat16m4_t op1, float16_t op2, vbool4_t mask, size_t vl) { - return __riscv_vfmerge_vfm_f16m4(op1, op2, mask, vl); +vfloat16m4_t test_vfmerge_vfm_f16m4(vfloat16m4_t vs2, float16_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vfmerge_vfm_f16m4(vs2, rs1, v0, vl); } -vfloat16m8_t test_vfmerge_vfm_f16m8(vfloat16m8_t op1, float16_t op2, vbool2_t mask, size_t vl) { - return __riscv_vfmerge_vfm_f16m8(op1, op2, mask, vl); +vfloat16m8_t test_vfmerge_vfm_f16m8(vfloat16m8_t vs2, float16_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vfmerge_vfm_f16m8(vs2, rs1, v0, vl); } -vfloat32mf2_t test_vfmerge_vfm_f32mf2(vfloat32mf2_t op1, float32_t op2, vbool64_t mask, size_t vl) { - return __riscv_vfmerge_vfm_f32mf2(op1, op2, mask, vl); +vfloat32mf2_t test_vfmerge_vfm_f32mf2(vfloat32mf2_t vs2, float32_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vfmerge_vfm_f32mf2(vs2, rs1, v0, vl); } -vfloat32m1_t test_vfmerge_vfm_f32m1(vfloat32m1_t op1, float32_t op2, vbool32_t mask, size_t vl) { - return __riscv_vfmerge_vfm_f32m1(op1, op2, mask, vl); +vfloat32m1_t test_vfmerge_vfm_f32m1(vfloat32m1_t vs2, float32_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vfmerge_vfm_f32m1(vs2, rs1, v0, vl); } -vfloat32m2_t test_vfmerge_vfm_f32m2(vfloat32m2_t op1, float32_t op2, vbool16_t mask, size_t vl) { - return __riscv_vfmerge_vfm_f32m2(op1, op2, mask, vl); +vfloat32m2_t test_vfmerge_vfm_f32m2(vfloat32m2_t vs2, float32_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vfmerge_vfm_f32m2(vs2, rs1, v0, vl); } -vfloat32m4_t test_vfmerge_vfm_f32m4(vfloat32m4_t op1, float32_t op2, vbool8_t mask, size_t vl) { - return __riscv_vfmerge_vfm_f32m4(op1, op2, mask, vl); +vfloat32m4_t test_vfmerge_vfm_f32m4(vfloat32m4_t vs2, float32_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vfmerge_vfm_f32m4(vs2, rs1, v0, vl); } -vfloat32m8_t test_vfmerge_vfm_f32m8(vfloat32m8_t op1, float32_t op2, vbool4_t mask, size_t vl) { - return __riscv_vfmerge_vfm_f32m8(op1, op2, mask, vl); +vfloat32m8_t test_vfmerge_vfm_f32m8(vfloat32m8_t vs2, float32_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vfmerge_vfm_f32m8(vs2, rs1, v0, vl); } -vfloat64m1_t test_vfmerge_vfm_f64m1(vfloat64m1_t op1, float64_t op2, vbool64_t mask, size_t vl) { - return __riscv_vfmerge_vfm_f64m1(op1, op2, mask, vl); +vfloat64m1_t test_vfmerge_vfm_f64m1(vfloat64m1_t vs2, float64_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vfmerge_vfm_f64m1(vs2, rs1, v0, vl); } -vfloat64m2_t test_vfmerge_vfm_f64m2(vfloat64m2_t op1, float64_t op2, vbool32_t mask, size_t vl) { - return __riscv_vfmerge_vfm_f64m2(op1, op2, mask, vl); +vfloat64m2_t test_vfmerge_vfm_f64m2(vfloat64m2_t vs2, float64_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vfmerge_vfm_f64m2(vs2, rs1, v0, vl); } -vfloat64m4_t test_vfmerge_vfm_f64m4(vfloat64m4_t op1, float64_t op2, vbool16_t mask, size_t vl) { - return __riscv_vfmerge_vfm_f64m4(op1, op2, mask, vl); +vfloat64m4_t test_vfmerge_vfm_f64m4(vfloat64m4_t vs2, float64_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vfmerge_vfm_f64m4(vs2, rs1, v0, vl); } -vfloat64m8_t test_vfmerge_vfm_f64m8(vfloat64m8_t op1, float64_t op2, vbool8_t mask, size_t vl) { - return __riscv_vfmerge_vfm_f64m8(op1, op2, mask, vl); +vfloat64m8_t test_vfmerge_vfm_f64m8(vfloat64m8_t vs2, float64_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vfmerge_vfm_f64m8(vs2, rs1, v0, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfmerge\.[ivxfswum.]+\s+} 15 } } */ diff --git a/auto-generated/gnu-api-tests/vfmin.c b/auto-generated/gnu-api-tests/vfmin.c index 5453b4768..2bd473740 100644 --- a/auto-generated/gnu-api-tests/vfmin.c +++ b/auto-generated/gnu-api-tests/vfmin.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfmin_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmin_vv_f16mf4(op1, op2, vl); +vfloat16mf4_t test_vfmin_vv_f16mf4(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmin_vv_f16mf4(vs2, vs1, vl); } -vfloat16mf4_t test_vfmin_vf_f16mf4(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_vf_f16mf4(op1, op2, vl); +vfloat16mf4_t test_vfmin_vf_f16mf4(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_vf_f16mf4(vs2, rs1, vl); } -vfloat16mf2_t test_vfmin_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmin_vv_f16mf2(op1, op2, vl); +vfloat16mf2_t test_vfmin_vv_f16mf2(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmin_vv_f16mf2(vs2, vs1, vl); } -vfloat16mf2_t test_vfmin_vf_f16mf2(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_vf_f16mf2(op1, op2, vl); +vfloat16mf2_t test_vfmin_vf_f16mf2(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_vf_f16mf2(vs2, rs1, vl); } -vfloat16m1_t test_vfmin_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmin_vv_f16m1(op1, op2, vl); +vfloat16m1_t test_vfmin_vv_f16m1(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmin_vv_f16m1(vs2, vs1, vl); } -vfloat16m1_t test_vfmin_vf_f16m1(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_vf_f16m1(op1, op2, vl); +vfloat16m1_t test_vfmin_vf_f16m1(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_vf_f16m1(vs2, rs1, vl); } -vfloat16m2_t test_vfmin_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmin_vv_f16m2(op1, op2, vl); +vfloat16m2_t test_vfmin_vv_f16m2(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmin_vv_f16m2(vs2, vs1, vl); } -vfloat16m2_t test_vfmin_vf_f16m2(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_vf_f16m2(op1, op2, vl); +vfloat16m2_t test_vfmin_vf_f16m2(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_vf_f16m2(vs2, rs1, vl); } -vfloat16m4_t test_vfmin_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmin_vv_f16m4(op1, op2, vl); +vfloat16m4_t test_vfmin_vv_f16m4(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmin_vv_f16m4(vs2, vs1, vl); } -vfloat16m4_t test_vfmin_vf_f16m4(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_vf_f16m4(op1, op2, vl); +vfloat16m4_t test_vfmin_vf_f16m4(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_vf_f16m4(vs2, rs1, vl); } -vfloat16m8_t test_vfmin_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmin_vv_f16m8(op1, op2, vl); +vfloat16m8_t test_vfmin_vv_f16m8(vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmin_vv_f16m8(vs2, vs1, vl); } -vfloat16m8_t test_vfmin_vf_f16m8(vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_vf_f16m8(op1, op2, vl); +vfloat16m8_t test_vfmin_vf_f16m8(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_vf_f16m8(vs2, rs1, vl); } -vfloat32mf2_t test_vfmin_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmin_vv_f32mf2(op1, op2, vl); +vfloat32mf2_t test_vfmin_vv_f32mf2(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmin_vv_f32mf2(vs2, vs1, vl); } -vfloat32mf2_t test_vfmin_vf_f32mf2(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_vf_f32mf2(op1, op2, vl); +vfloat32mf2_t test_vfmin_vf_f32mf2(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_vf_f32mf2(vs2, rs1, vl); } -vfloat32m1_t test_vfmin_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmin_vv_f32m1(op1, op2, vl); +vfloat32m1_t test_vfmin_vv_f32m1(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmin_vv_f32m1(vs2, vs1, vl); } -vfloat32m1_t test_vfmin_vf_f32m1(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_vf_f32m1(op1, op2, vl); +vfloat32m1_t test_vfmin_vf_f32m1(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_vf_f32m1(vs2, rs1, vl); } -vfloat32m2_t test_vfmin_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmin_vv_f32m2(op1, op2, vl); +vfloat32m2_t test_vfmin_vv_f32m2(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmin_vv_f32m2(vs2, vs1, vl); } -vfloat32m2_t test_vfmin_vf_f32m2(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_vf_f32m2(op1, op2, vl); +vfloat32m2_t test_vfmin_vf_f32m2(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_vf_f32m2(vs2, rs1, vl); } -vfloat32m4_t test_vfmin_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmin_vv_f32m4(op1, op2, vl); +vfloat32m4_t test_vfmin_vv_f32m4(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmin_vv_f32m4(vs2, vs1, vl); } -vfloat32m4_t test_vfmin_vf_f32m4(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_vf_f32m4(op1, op2, vl); +vfloat32m4_t test_vfmin_vf_f32m4(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_vf_f32m4(vs2, rs1, vl); } -vfloat32m8_t test_vfmin_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmin_vv_f32m8(op1, op2, vl); +vfloat32m8_t test_vfmin_vv_f32m8(vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmin_vv_f32m8(vs2, vs1, vl); } -vfloat32m8_t test_vfmin_vf_f32m8(vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_vf_f32m8(op1, op2, vl); +vfloat32m8_t test_vfmin_vf_f32m8(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_vf_f32m8(vs2, rs1, vl); } -vfloat64m1_t test_vfmin_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmin_vv_f64m1(op1, op2, vl); +vfloat64m1_t test_vfmin_vv_f64m1(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmin_vv_f64m1(vs2, vs1, vl); } -vfloat64m1_t test_vfmin_vf_f64m1(vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin_vf_f64m1(op1, op2, vl); +vfloat64m1_t test_vfmin_vf_f64m1(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin_vf_f64m1(vs2, rs1, vl); } -vfloat64m2_t test_vfmin_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmin_vv_f64m2(op1, op2, vl); +vfloat64m2_t test_vfmin_vv_f64m2(vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmin_vv_f64m2(vs2, vs1, vl); } -vfloat64m2_t test_vfmin_vf_f64m2(vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin_vf_f64m2(op1, op2, vl); +vfloat64m2_t test_vfmin_vf_f64m2(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin_vf_f64m2(vs2, rs1, vl); } -vfloat64m4_t test_vfmin_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmin_vv_f64m4(op1, op2, vl); +vfloat64m4_t test_vfmin_vv_f64m4(vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmin_vv_f64m4(vs2, vs1, vl); } -vfloat64m4_t test_vfmin_vf_f64m4(vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin_vf_f64m4(op1, op2, vl); +vfloat64m4_t test_vfmin_vf_f64m4(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin_vf_f64m4(vs2, rs1, vl); } -vfloat64m8_t test_vfmin_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmin_vv_f64m8(op1, op2, vl); +vfloat64m8_t test_vfmin_vv_f64m8(vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmin_vv_f64m8(vs2, vs1, vl); } -vfloat64m8_t test_vfmin_vf_f64m8(vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin_vf_f64m8(op1, op2, vl); +vfloat64m8_t test_vfmin_vf_f64m8(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin_vf_f64m8(vs2, rs1, vl); } -vfloat16mf4_t test_vfmin_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmin_vv_f16mf4_m(mask, op1, op2, vl); +vfloat16mf4_t test_vfmin_vv_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmin_vv_f16mf4_m(vm, vs2, vs1, vl); } -vfloat16mf4_t test_vfmin_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_vf_f16mf4_m(mask, op1, op2, vl); +vfloat16mf4_t test_vfmin_vf_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_vf_f16mf4_m(vm, vs2, rs1, vl); } -vfloat16mf2_t test_vfmin_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmin_vv_f16mf2_m(mask, op1, op2, vl); +vfloat16mf2_t test_vfmin_vv_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmin_vv_f16mf2_m(vm, vs2, vs1, vl); } -vfloat16mf2_t test_vfmin_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_vf_f16mf2_m(mask, op1, op2, vl); +vfloat16mf2_t test_vfmin_vf_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_vf_f16mf2_m(vm, vs2, rs1, vl); } -vfloat16m1_t test_vfmin_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmin_vv_f16m1_m(mask, op1, op2, vl); +vfloat16m1_t test_vfmin_vv_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmin_vv_f16m1_m(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfmin_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_vf_f16m1_m(mask, op1, op2, vl); +vfloat16m1_t test_vfmin_vf_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_vf_f16m1_m(vm, vs2, rs1, vl); } -vfloat16m2_t test_vfmin_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmin_vv_f16m2_m(mask, op1, op2, vl); +vfloat16m2_t test_vfmin_vv_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmin_vv_f16m2_m(vm, vs2, vs1, vl); } -vfloat16m2_t test_vfmin_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_vf_f16m2_m(mask, op1, op2, vl); +vfloat16m2_t test_vfmin_vf_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_vf_f16m2_m(vm, vs2, rs1, vl); } -vfloat16m4_t test_vfmin_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmin_vv_f16m4_m(mask, op1, op2, vl); +vfloat16m4_t test_vfmin_vv_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmin_vv_f16m4_m(vm, vs2, vs1, vl); } -vfloat16m4_t test_vfmin_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_vf_f16m4_m(mask, op1, op2, vl); +vfloat16m4_t test_vfmin_vf_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_vf_f16m4_m(vm, vs2, rs1, vl); } -vfloat16m8_t test_vfmin_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmin_vv_f16m8_m(mask, op1, op2, vl); +vfloat16m8_t test_vfmin_vv_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmin_vv_f16m8_m(vm, vs2, vs1, vl); } -vfloat16m8_t test_vfmin_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_vf_f16m8_m(mask, op1, op2, vl); +vfloat16m8_t test_vfmin_vf_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_vf_f16m8_m(vm, vs2, rs1, vl); } -vfloat32mf2_t test_vfmin_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmin_vv_f32mf2_m(mask, op1, op2, vl); +vfloat32mf2_t test_vfmin_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmin_vv_f32mf2_m(vm, vs2, vs1, vl); } -vfloat32mf2_t test_vfmin_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_vf_f32mf2_m(mask, op1, op2, vl); +vfloat32mf2_t test_vfmin_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_vf_f32mf2_m(vm, vs2, rs1, vl); } -vfloat32m1_t test_vfmin_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmin_vv_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfmin_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmin_vv_f32m1_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfmin_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_vf_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfmin_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_vf_f32m1_m(vm, vs2, rs1, vl); } -vfloat32m2_t test_vfmin_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmin_vv_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfmin_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmin_vv_f32m2_m(vm, vs2, vs1, vl); } -vfloat32m2_t test_vfmin_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_vf_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfmin_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_vf_f32m2_m(vm, vs2, rs1, vl); } -vfloat32m4_t test_vfmin_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmin_vv_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfmin_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmin_vv_f32m4_m(vm, vs2, vs1, vl); } -vfloat32m4_t test_vfmin_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_vf_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfmin_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_vf_f32m4_m(vm, vs2, rs1, vl); } -vfloat32m8_t test_vfmin_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmin_vv_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfmin_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmin_vv_f32m8_m(vm, vs2, vs1, vl); } -vfloat32m8_t test_vfmin_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_vf_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfmin_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_vf_f32m8_m(vm, vs2, rs1, vl); } -vfloat64m1_t test_vfmin_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmin_vv_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfmin_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmin_vv_f64m1_m(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfmin_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin_vf_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfmin_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin_vf_f64m1_m(vm, vs2, rs1, vl); } -vfloat64m2_t test_vfmin_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmin_vv_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfmin_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmin_vv_f64m2_m(vm, vs2, vs1, vl); } -vfloat64m2_t test_vfmin_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin_vf_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfmin_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin_vf_f64m2_m(vm, vs2, rs1, vl); } -vfloat64m4_t test_vfmin_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmin_vv_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfmin_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmin_vv_f64m4_m(vm, vs2, vs1, vl); } -vfloat64m4_t test_vfmin_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin_vf_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfmin_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin_vf_f64m4_m(vm, vs2, rs1, vl); } -vfloat64m8_t test_vfmin_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmin_vv_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfmin_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmin_vv_f64m8_m(vm, vs2, vs1, vl); } -vfloat64m8_t test_vfmin_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin_vf_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfmin_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin_vf_f64m8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfmin\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-api-tests/vfmsac.c b/auto-generated/gnu-api-tests/vfmsac.c index 020f35d11..43adcf4a4 100644 --- a/auto-generated/gnu-api-tests/vfmsac.c +++ b/auto-generated/gnu-api-tests/vfmsac.c @@ -126,124 +126,124 @@ vfloat64m8_t test_vfmsac_vf_f64m8(vfloat64m8_t vd, float64_t rs1, vfloat64m8_t v return __riscv_vfmsac_vf_f64m8(vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmsac_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16mf4_m(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfmsac_vv_f16mf4_m(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16mf4_m(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmsac_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16mf4_m(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfmsac_vf_f16mf4_m(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16mf4_m(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmsac_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16mf2_m(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfmsac_vv_f16mf2_m(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16mf2_m(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmsac_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16mf2_m(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfmsac_vf_f16mf2_m(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16mf2_m(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmsac_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16m1_m(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfmsac_vv_f16m1_m(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16m1_m(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmsac_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16m1_m(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfmsac_vf_f16m1_m(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16m1_m(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmsac_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16m2_m(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfmsac_vv_f16m2_m(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16m2_m(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmsac_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16m2_m(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfmsac_vf_f16m2_m(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16m2_m(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmsac_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16m4_m(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfmsac_vv_f16m4_m(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16m4_m(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmsac_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16m4_m(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfmsac_vf_f16m4_m(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16m4_m(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmsac_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16m8_m(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfmsac_vv_f16m8_m(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16m8_m(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmsac_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16m8_m(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfmsac_vf_f16m8_m(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16m8_m(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f32mf2_m(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfmsac_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f32mf2_m(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f32mf2_m(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfmsac_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f32mf2_m(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f32m1_m(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfmsac_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f32m1_m(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f32m1_m(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfmsac_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f32m1_m(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f32m2_m(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfmsac_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f32m2_m(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f32m2_m(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfmsac_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f32m2_m(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f32m4_m(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfmsac_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f32m4_m(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f32m4_m(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfmsac_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f32m4_m(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f32m8_m(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfmsac_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f32m8_m(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f32m8_m(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfmsac_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f32m8_m(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f64m1_m(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfmsac_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f64m1_m(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f64m1_m(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfmsac_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f64m1_m(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f64m2_m(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfmsac_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f64m2_m(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f64m2_m(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfmsac_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f64m2_m(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f64m4_m(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfmsac_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f64m4_m(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f64m4_m(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfmsac_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f64m4_m(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f64m8_m(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfmsac_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f64m8_m(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f64m8_m(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfmsac_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f64m8_m(vm, vd, rs1, vs2, vl); } vfloat16mf4_t test_vfmsac_vv_f16mf4_rm(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -366,124 +366,124 @@ vfloat64m8_t test_vfmsac_vf_f64m8_rm(vfloat64m8_t vd, float64_t rs1, vfloat64m8_ return __riscv_vfmsac_vf_f64m8_rm(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmsac_vv_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16mf4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmsac_vv_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16mf4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmsac_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16mf4_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmsac_vf_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16mf4_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmsac_vv_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16mf2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmsac_vv_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16mf2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmsac_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16mf2_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmsac_vf_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16mf2_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmsac_vv_f16m1_rm_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16m1_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmsac_vv_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16m1_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmsac_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16m1_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmsac_vf_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16m1_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsac_vv_f16m2_rm_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16m2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmsac_vv_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsac_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16m2_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmsac_vf_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16m2_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsac_vv_f16m4_rm_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16m4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmsac_vv_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsac_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16m4_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmsac_vf_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16m4_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsac_vv_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmsac_vv_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsac_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16m8_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmsac_vf_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16m8_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmsac_vv_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f32mf2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmsac_vv_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f32mf2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmsac_vf_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f32mf2_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmsac_vf_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f32mf2_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmsac_vv_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f32m1_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmsac_vv_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f32m1_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmsac_vf_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f32m1_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmsac_vf_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f32m1_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsac_vv_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f32m2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmsac_vv_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f32m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsac_vf_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f32m2_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmsac_vf_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f32m2_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsac_vv_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f32m4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmsac_vv_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f32m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsac_vf_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f32m4_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmsac_vf_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f32m4_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsac_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f32m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmsac_vv_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f32m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsac_vf_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f32m8_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmsac_vf_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f32m8_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsac_vv_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f64m1_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmsac_vv_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f64m1_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsac_vf_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f64m1_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmsac_vf_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f64m1_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsac_vv_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f64m2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmsac_vv_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f64m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsac_vf_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f64m2_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmsac_vf_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f64m2_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsac_vv_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f64m4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmsac_vv_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f64m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsac_vf_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f64m4_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmsac_vf_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f64m4_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsac_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f64m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmsac_vv_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f64m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsac_vf_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f64m8_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmsac_vf_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f64m8_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfmsac\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/gnu-api-tests/vfmsub.c b/auto-generated/gnu-api-tests/vfmsub.c index 2aed07d83..897238e76 100644 --- a/auto-generated/gnu-api-tests/vfmsub.c +++ b/auto-generated/gnu-api-tests/vfmsub.c @@ -126,124 +126,124 @@ vfloat64m8_t test_vfmsub_vf_f64m8(vfloat64m8_t vd, float64_t rs1, vfloat64m8_t v return __riscv_vfmsub_vf_f64m8(vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmsub_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16mf4_m(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfmsub_vv_f16mf4_m(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16mf4_m(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmsub_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16mf4_m(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfmsub_vf_f16mf4_m(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16mf4_m(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmsub_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16mf2_m(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfmsub_vv_f16mf2_m(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16mf2_m(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmsub_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16mf2_m(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfmsub_vf_f16mf2_m(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16mf2_m(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmsub_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16m1_m(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfmsub_vv_f16m1_m(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16m1_m(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmsub_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16m1_m(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfmsub_vf_f16m1_m(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16m1_m(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmsub_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16m2_m(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfmsub_vv_f16m2_m(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16m2_m(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmsub_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16m2_m(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfmsub_vf_f16m2_m(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16m2_m(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmsub_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16m4_m(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfmsub_vv_f16m4_m(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16m4_m(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmsub_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16m4_m(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfmsub_vf_f16m4_m(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16m4_m(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmsub_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16m8_m(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfmsub_vv_f16m8_m(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16m8_m(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmsub_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16m8_m(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfmsub_vf_f16m8_m(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16m8_m(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f32mf2_m(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfmsub_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f32mf2_m(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f32mf2_m(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfmsub_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f32mf2_m(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f32m1_m(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfmsub_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f32m1_m(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f32m1_m(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfmsub_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f32m1_m(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f32m2_m(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfmsub_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f32m2_m(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f32m2_m(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfmsub_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f32m2_m(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f32m4_m(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfmsub_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f32m4_m(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f32m4_m(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfmsub_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f32m4_m(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f32m8_m(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfmsub_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f32m8_m(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f32m8_m(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfmsub_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f32m8_m(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f64m1_m(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfmsub_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f64m1_m(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f64m1_m(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfmsub_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f64m1_m(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f64m2_m(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfmsub_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f64m2_m(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f64m2_m(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfmsub_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f64m2_m(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f64m4_m(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfmsub_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f64m4_m(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f64m4_m(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfmsub_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f64m4_m(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f64m8_m(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfmsub_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f64m8_m(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f64m8_m(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfmsub_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f64m8_m(vm, vd, rs1, vs2, vl); } vfloat16mf4_t test_vfmsub_vv_f16mf4_rm(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -366,124 +366,124 @@ vfloat64m8_t test_vfmsub_vf_f64m8_rm(vfloat64m8_t vd, float64_t rs1, vfloat64m8_ return __riscv_vfmsub_vf_f64m8_rm(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmsub_vv_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16mf4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmsub_vv_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16mf4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmsub_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16mf4_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmsub_vf_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16mf4_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmsub_vv_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16mf2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmsub_vv_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16mf2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmsub_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16mf2_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmsub_vf_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16mf2_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmsub_vv_f16m1_rm_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16m1_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmsub_vv_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16m1_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmsub_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16m1_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmsub_vf_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16m1_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsub_vv_f16m2_rm_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16m2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmsub_vv_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsub_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16m2_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmsub_vf_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16m2_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsub_vv_f16m4_rm_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16m4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmsub_vv_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsub_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16m4_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmsub_vf_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16m4_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsub_vv_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmsub_vv_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsub_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16m8_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmsub_vf_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16m8_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmsub_vv_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f32mf2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmsub_vv_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f32mf2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmsub_vf_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f32mf2_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmsub_vf_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f32mf2_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmsub_vv_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f32m1_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmsub_vv_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f32m1_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmsub_vf_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f32m1_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmsub_vf_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f32m1_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsub_vv_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f32m2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmsub_vv_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f32m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsub_vf_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f32m2_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmsub_vf_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f32m2_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsub_vv_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f32m4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmsub_vv_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f32m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsub_vf_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f32m4_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmsub_vf_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f32m4_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsub_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f32m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmsub_vv_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f32m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsub_vf_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f32m8_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmsub_vf_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f32m8_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsub_vv_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f64m1_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmsub_vv_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f64m1_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsub_vf_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f64m1_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmsub_vf_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f64m1_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsub_vv_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f64m2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmsub_vv_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f64m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsub_vf_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f64m2_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmsub_vf_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f64m2_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsub_vv_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f64m4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmsub_vv_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f64m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsub_vf_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f64m4_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmsub_vf_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f64m4_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsub_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f64m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmsub_vv_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f64m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsub_vf_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f64m8_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmsub_vf_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f64m8_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfmsub\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/gnu-api-tests/vfmul.c b/auto-generated/gnu-api-tests/vfmul.c index 26665e943..a6052cdb5 100644 --- a/auto-generated/gnu-api-tests/vfmul.c +++ b/auto-generated/gnu-api-tests/vfmul.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfmul_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmul_vv_f16mf4(op1, op2, vl); +vfloat16mf4_t test_vfmul_vv_f16mf4(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16mf4(vs2, vs1, vl); } -vfloat16mf4_t test_vfmul_vf_f16mf4(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16mf4(op1, op2, vl); +vfloat16mf4_t test_vfmul_vf_f16mf4(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16mf4(vs2, rs1, vl); } -vfloat16mf2_t test_vfmul_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmul_vv_f16mf2(op1, op2, vl); +vfloat16mf2_t test_vfmul_vv_f16mf2(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16mf2(vs2, vs1, vl); } -vfloat16mf2_t test_vfmul_vf_f16mf2(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16mf2(op1, op2, vl); +vfloat16mf2_t test_vfmul_vf_f16mf2(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16mf2(vs2, rs1, vl); } -vfloat16m1_t test_vfmul_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m1(op1, op2, vl); +vfloat16m1_t test_vfmul_vv_f16m1(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m1(vs2, vs1, vl); } -vfloat16m1_t test_vfmul_vf_f16m1(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m1(op1, op2, vl); +vfloat16m1_t test_vfmul_vf_f16m1(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m1(vs2, rs1, vl); } -vfloat16m2_t test_vfmul_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m2(op1, op2, vl); +vfloat16m2_t test_vfmul_vv_f16m2(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m2(vs2, vs1, vl); } -vfloat16m2_t test_vfmul_vf_f16m2(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m2(op1, op2, vl); +vfloat16m2_t test_vfmul_vf_f16m2(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m2(vs2, rs1, vl); } -vfloat16m4_t test_vfmul_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m4(op1, op2, vl); +vfloat16m4_t test_vfmul_vv_f16m4(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m4(vs2, vs1, vl); } -vfloat16m4_t test_vfmul_vf_f16m4(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m4(op1, op2, vl); +vfloat16m4_t test_vfmul_vf_f16m4(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m4(vs2, rs1, vl); } -vfloat16m8_t test_vfmul_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m8(op1, op2, vl); +vfloat16m8_t test_vfmul_vv_f16m8(vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m8(vs2, vs1, vl); } -vfloat16m8_t test_vfmul_vf_f16m8(vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m8(op1, op2, vl); +vfloat16m8_t test_vfmul_vf_f16m8(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m8(vs2, rs1, vl); } -vfloat32mf2_t test_vfmul_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmul_vv_f32mf2(op1, op2, vl); +vfloat32mf2_t test_vfmul_vv_f32mf2(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32mf2(vs2, vs1, vl); } -vfloat32mf2_t test_vfmul_vf_f32mf2(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32mf2(op1, op2, vl); +vfloat32mf2_t test_vfmul_vf_f32mf2(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32mf2(vs2, rs1, vl); } -vfloat32m1_t test_vfmul_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m1(op1, op2, vl); +vfloat32m1_t test_vfmul_vv_f32m1(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m1(vs2, vs1, vl); } -vfloat32m1_t test_vfmul_vf_f32m1(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m1(op1, op2, vl); +vfloat32m1_t test_vfmul_vf_f32m1(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m1(vs2, rs1, vl); } -vfloat32m2_t test_vfmul_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m2(op1, op2, vl); +vfloat32m2_t test_vfmul_vv_f32m2(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m2(vs2, vs1, vl); } -vfloat32m2_t test_vfmul_vf_f32m2(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m2(op1, op2, vl); +vfloat32m2_t test_vfmul_vf_f32m2(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m2(vs2, rs1, vl); } -vfloat32m4_t test_vfmul_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m4(op1, op2, vl); +vfloat32m4_t test_vfmul_vv_f32m4(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m4(vs2, vs1, vl); } -vfloat32m4_t test_vfmul_vf_f32m4(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m4(op1, op2, vl); +vfloat32m4_t test_vfmul_vf_f32m4(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m4(vs2, rs1, vl); } -vfloat32m8_t test_vfmul_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m8(op1, op2, vl); +vfloat32m8_t test_vfmul_vv_f32m8(vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m8(vs2, vs1, vl); } -vfloat32m8_t test_vfmul_vf_f32m8(vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m8(op1, op2, vl); +vfloat32m8_t test_vfmul_vf_f32m8(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m8(vs2, rs1, vl); } -vfloat64m1_t test_vfmul_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m1(op1, op2, vl); +vfloat64m1_t test_vfmul_vv_f64m1(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m1(vs2, vs1, vl); } -vfloat64m1_t test_vfmul_vf_f64m1(vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m1(op1, op2, vl); +vfloat64m1_t test_vfmul_vf_f64m1(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m1(vs2, rs1, vl); } -vfloat64m2_t test_vfmul_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m2(op1, op2, vl); +vfloat64m2_t test_vfmul_vv_f64m2(vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m2(vs2, vs1, vl); } -vfloat64m2_t test_vfmul_vf_f64m2(vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m2(op1, op2, vl); +vfloat64m2_t test_vfmul_vf_f64m2(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m2(vs2, rs1, vl); } -vfloat64m4_t test_vfmul_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m4(op1, op2, vl); +vfloat64m4_t test_vfmul_vv_f64m4(vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m4(vs2, vs1, vl); } -vfloat64m4_t test_vfmul_vf_f64m4(vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m4(op1, op2, vl); +vfloat64m4_t test_vfmul_vf_f64m4(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m4(vs2, rs1, vl); } -vfloat64m8_t test_vfmul_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m8(op1, op2, vl); +vfloat64m8_t test_vfmul_vv_f64m8(vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m8(vs2, vs1, vl); } -vfloat64m8_t test_vfmul_vf_f64m8(vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m8(op1, op2, vl); +vfloat64m8_t test_vfmul_vf_f64m8(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m8(vs2, rs1, vl); } -vfloat16mf4_t test_vfmul_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmul_vv_f16mf4_m(mask, op1, op2, vl); +vfloat16mf4_t test_vfmul_vv_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16mf4_m(vm, vs2, vs1, vl); } -vfloat16mf4_t test_vfmul_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16mf4_m(mask, op1, op2, vl); +vfloat16mf4_t test_vfmul_vf_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16mf4_m(vm, vs2, rs1, vl); } -vfloat16mf2_t test_vfmul_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmul_vv_f16mf2_m(mask, op1, op2, vl); +vfloat16mf2_t test_vfmul_vv_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16mf2_m(vm, vs2, vs1, vl); } -vfloat16mf2_t test_vfmul_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16mf2_m(mask, op1, op2, vl); +vfloat16mf2_t test_vfmul_vf_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16mf2_m(vm, vs2, rs1, vl); } -vfloat16m1_t test_vfmul_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m1_m(mask, op1, op2, vl); +vfloat16m1_t test_vfmul_vv_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m1_m(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfmul_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m1_m(mask, op1, op2, vl); +vfloat16m1_t test_vfmul_vf_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m1_m(vm, vs2, rs1, vl); } -vfloat16m2_t test_vfmul_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m2_m(mask, op1, op2, vl); +vfloat16m2_t test_vfmul_vv_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m2_m(vm, vs2, vs1, vl); } -vfloat16m2_t test_vfmul_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m2_m(mask, op1, op2, vl); +vfloat16m2_t test_vfmul_vf_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m2_m(vm, vs2, rs1, vl); } -vfloat16m4_t test_vfmul_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m4_m(mask, op1, op2, vl); +vfloat16m4_t test_vfmul_vv_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m4_m(vm, vs2, vs1, vl); } -vfloat16m4_t test_vfmul_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m4_m(mask, op1, op2, vl); +vfloat16m4_t test_vfmul_vf_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m4_m(vm, vs2, rs1, vl); } -vfloat16m8_t test_vfmul_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m8_m(mask, op1, op2, vl); +vfloat16m8_t test_vfmul_vv_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m8_m(vm, vs2, vs1, vl); } -vfloat16m8_t test_vfmul_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m8_m(mask, op1, op2, vl); +vfloat16m8_t test_vfmul_vf_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m8_m(vm, vs2, rs1, vl); } -vfloat32mf2_t test_vfmul_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmul_vv_f32mf2_m(mask, op1, op2, vl); +vfloat32mf2_t test_vfmul_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32mf2_m(vm, vs2, vs1, vl); } -vfloat32mf2_t test_vfmul_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32mf2_m(mask, op1, op2, vl); +vfloat32mf2_t test_vfmul_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32mf2_m(vm, vs2, rs1, vl); } -vfloat32m1_t test_vfmul_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfmul_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m1_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfmul_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfmul_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m1_m(vm, vs2, rs1, vl); } -vfloat32m2_t test_vfmul_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfmul_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m2_m(vm, vs2, vs1, vl); } -vfloat32m2_t test_vfmul_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfmul_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m2_m(vm, vs2, rs1, vl); } -vfloat32m4_t test_vfmul_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfmul_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m4_m(vm, vs2, vs1, vl); } -vfloat32m4_t test_vfmul_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfmul_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m4_m(vm, vs2, rs1, vl); } -vfloat32m8_t test_vfmul_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfmul_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m8_m(vm, vs2, vs1, vl); } -vfloat32m8_t test_vfmul_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfmul_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m8_m(vm, vs2, rs1, vl); } -vfloat64m1_t test_vfmul_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfmul_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m1_m(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfmul_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfmul_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m1_m(vm, vs2, rs1, vl); } -vfloat64m2_t test_vfmul_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfmul_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m2_m(vm, vs2, vs1, vl); } -vfloat64m2_t test_vfmul_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfmul_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m2_m(vm, vs2, rs1, vl); } -vfloat64m4_t test_vfmul_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfmul_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m4_m(vm, vs2, vs1, vl); } -vfloat64m4_t test_vfmul_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfmul_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m4_m(vm, vs2, rs1, vl); } -vfloat64m8_t test_vfmul_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfmul_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m8_m(vm, vs2, vs1, vl); } -vfloat64m8_t test_vfmul_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfmul_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m8_m(vm, vs2, rs1, vl); } -vfloat16mf4_t test_vfmul_vv_f16mf4_rm(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmul_vv_f16mf4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmul_vv_f16mf4_rm(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16mf4_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmul_vf_f16mf4_rm(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16mf4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmul_vf_f16mf4_rm(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16mf4_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmul_vv_f16mf2_rm(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmul_vv_f16mf2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmul_vv_f16mf2_rm(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16mf2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmul_vf_f16mf2_rm(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16mf2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmul_vf_f16mf2_rm(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16mf2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmul_vv_f16m1_rm(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmul_vv_f16m1_rm(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmul_vf_f16m1_rm(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmul_vf_f16m1_rm(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmul_vv_f16m2_rm(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmul_vv_f16m2_rm(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmul_vf_f16m2_rm(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmul_vf_f16m2_rm(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmul_vv_f16m4_rm(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmul_vv_f16m4_rm(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmul_vf_f16m4_rm(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmul_vf_f16m4_rm(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmul_vv_f16m8_rm(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmul_vv_f16m8_rm(vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmul_vf_f16m8_rm(vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmul_vf_f16m8_rm(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmul_vv_f32mf2_rm(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmul_vv_f32mf2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmul_vv_f32mf2_rm(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32mf2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmul_vf_f32mf2_rm(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32mf2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmul_vf_f32mf2_rm(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32mf2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmul_vv_f32m1_rm(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmul_vv_f32m1_rm(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmul_vf_f32m1_rm(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmul_vf_f32m1_rm(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmul_vv_f32m2_rm(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmul_vv_f32m2_rm(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmul_vf_f32m2_rm(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmul_vf_f32m2_rm(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmul_vv_f32m4_rm(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmul_vv_f32m4_rm(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmul_vf_f32m4_rm(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmul_vf_f32m4_rm(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmul_vv_f32m8_rm(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmul_vv_f32m8_rm(vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmul_vf_f32m8_rm(vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmul_vf_f32m8_rm(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmul_vv_f64m1_rm(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmul_vv_f64m1_rm(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmul_vf_f64m1_rm(vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmul_vf_f64m1_rm(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmul_vv_f64m2_rm(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmul_vv_f64m2_rm(vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmul_vf_f64m2_rm(vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmul_vf_f64m2_rm(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmul_vv_f64m4_rm(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmul_vv_f64m4_rm(vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmul_vf_f64m4_rm(vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmul_vf_f64m4_rm(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmul_vv_f64m8_rm(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmul_vv_f64m8_rm(vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmul_vf_f64m8_rm(vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmul_vf_f64m8_rm(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmul_vv_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmul_vv_f16mf4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmul_vv_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16mf4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmul_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16mf4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmul_vf_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16mf4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmul_vv_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmul_vv_f16mf2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmul_vv_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16mf2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmul_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16mf2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmul_vf_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16mf2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmul_vv_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmul_vv_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmul_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmul_vf_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmul_vv_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmul_vv_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmul_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmul_vf_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmul_vv_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmul_vv_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmul_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmul_vf_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmul_vv_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmul_vv_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmul_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmul_vf_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmul_vv_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmul_vv_f32mf2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmul_vv_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32mf2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmul_vf_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32mf2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmul_vf_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32mf2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmul_vv_f32m1_rm_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmul_vv_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmul_vf_f32m1_rm_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmul_vf_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmul_vv_f32m2_rm_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmul_vv_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmul_vf_f32m2_rm_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmul_vf_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmul_vv_f32m4_rm_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmul_vv_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmul_vf_f32m4_rm_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmul_vf_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmul_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmul_vv_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmul_vf_f32m8_rm_m(vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmul_vf_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmul_vv_f64m1_rm_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmul_vv_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmul_vf_f64m1_rm_m(vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmul_vf_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmul_vv_f64m2_rm_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmul_vv_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmul_vf_f64m2_rm_m(vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmul_vf_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmul_vv_f64m4_rm_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmul_vv_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmul_vf_f64m4_rm_m(vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmul_vf_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmul_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmul_vv_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmul_vf_f64m8_rm_m(vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmul_vf_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfmul\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/gnu-api-tests/vfmv.c b/auto-generated/gnu-api-tests/vfmv.c index e7fc89f1d..ad5da4f41 100644 --- a/auto-generated/gnu-api-tests/vfmv.c +++ b/auto-generated/gnu-api-tests/vfmv.c @@ -6,184 +6,184 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfmv_v_f_f16mf4(float16_t src, size_t vl) { - return __riscv_vfmv_v_f_f16mf4(src, vl); +vfloat16mf4_t test_vfmv_v_f_f16mf4(float16_t rs1, size_t vl) { + return __riscv_vfmv_v_f_f16mf4(rs1, vl); } -vfloat16mf2_t test_vfmv_v_f_f16mf2(float16_t src, size_t vl) { - return __riscv_vfmv_v_f_f16mf2(src, vl); +vfloat16mf2_t test_vfmv_v_f_f16mf2(float16_t rs1, size_t vl) { + return __riscv_vfmv_v_f_f16mf2(rs1, vl); } -vfloat16m1_t test_vfmv_v_f_f16m1(float16_t src, size_t vl) { - return __riscv_vfmv_v_f_f16m1(src, vl); +vfloat16m1_t test_vfmv_v_f_f16m1(float16_t rs1, size_t vl) { + return __riscv_vfmv_v_f_f16m1(rs1, vl); } -vfloat16m2_t test_vfmv_v_f_f16m2(float16_t src, size_t vl) { - return __riscv_vfmv_v_f_f16m2(src, vl); +vfloat16m2_t test_vfmv_v_f_f16m2(float16_t rs1, size_t vl) { + return __riscv_vfmv_v_f_f16m2(rs1, vl); } -vfloat16m4_t test_vfmv_v_f_f16m4(float16_t src, size_t vl) { - return __riscv_vfmv_v_f_f16m4(src, vl); +vfloat16m4_t test_vfmv_v_f_f16m4(float16_t rs1, size_t vl) { + return __riscv_vfmv_v_f_f16m4(rs1, vl); } -vfloat16m8_t test_vfmv_v_f_f16m8(float16_t src, size_t vl) { - return __riscv_vfmv_v_f_f16m8(src, vl); +vfloat16m8_t test_vfmv_v_f_f16m8(float16_t rs1, size_t vl) { + return __riscv_vfmv_v_f_f16m8(rs1, vl); } -vfloat32mf2_t test_vfmv_v_f_f32mf2(float32_t src, size_t vl) { - return __riscv_vfmv_v_f_f32mf2(src, vl); +vfloat32mf2_t test_vfmv_v_f_f32mf2(float32_t rs1, size_t vl) { + return __riscv_vfmv_v_f_f32mf2(rs1, vl); } -vfloat32m1_t test_vfmv_v_f_f32m1(float32_t src, size_t vl) { - return __riscv_vfmv_v_f_f32m1(src, vl); +vfloat32m1_t test_vfmv_v_f_f32m1(float32_t rs1, size_t vl) { + return __riscv_vfmv_v_f_f32m1(rs1, vl); } -vfloat32m2_t test_vfmv_v_f_f32m2(float32_t src, size_t vl) { - return __riscv_vfmv_v_f_f32m2(src, vl); +vfloat32m2_t test_vfmv_v_f_f32m2(float32_t rs1, size_t vl) { + return __riscv_vfmv_v_f_f32m2(rs1, vl); } -vfloat32m4_t test_vfmv_v_f_f32m4(float32_t src, size_t vl) { - return __riscv_vfmv_v_f_f32m4(src, vl); +vfloat32m4_t test_vfmv_v_f_f32m4(float32_t rs1, size_t vl) { + return __riscv_vfmv_v_f_f32m4(rs1, vl); } -vfloat32m8_t test_vfmv_v_f_f32m8(float32_t src, size_t vl) { - return __riscv_vfmv_v_f_f32m8(src, vl); +vfloat32m8_t test_vfmv_v_f_f32m8(float32_t rs1, size_t vl) { + return __riscv_vfmv_v_f_f32m8(rs1, vl); } -vfloat64m1_t test_vfmv_v_f_f64m1(float64_t src, size_t vl) { - return __riscv_vfmv_v_f_f64m1(src, vl); +vfloat64m1_t test_vfmv_v_f_f64m1(float64_t rs1, size_t vl) { + return __riscv_vfmv_v_f_f64m1(rs1, vl); } -vfloat64m2_t test_vfmv_v_f_f64m2(float64_t src, size_t vl) { - return __riscv_vfmv_v_f_f64m2(src, vl); +vfloat64m2_t test_vfmv_v_f_f64m2(float64_t rs1, size_t vl) { + return __riscv_vfmv_v_f_f64m2(rs1, vl); } -vfloat64m4_t test_vfmv_v_f_f64m4(float64_t src, size_t vl) { - return __riscv_vfmv_v_f_f64m4(src, vl); +vfloat64m4_t test_vfmv_v_f_f64m4(float64_t rs1, size_t vl) { + return __riscv_vfmv_v_f_f64m4(rs1, vl); } -vfloat64m8_t test_vfmv_v_f_f64m8(float64_t src, size_t vl) { - return __riscv_vfmv_v_f_f64m8(src, vl); +vfloat64m8_t test_vfmv_v_f_f64m8(float64_t rs1, size_t vl) { + return __riscv_vfmv_v_f_f64m8(rs1, vl); } -float16_t test_vfmv_f_s_f16mf4_f16(vfloat16mf4_t src) { - return __riscv_vfmv_f_s_f16mf4_f16(src); +float16_t test_vfmv_f_s_f16mf4_f16(vfloat16mf4_t vs1) { + return __riscv_vfmv_f_s_f16mf4_f16(vs1); } -vfloat16mf4_t test_vfmv_s_f_f16mf4(float16_t src, size_t vl) { - return __riscv_vfmv_s_f_f16mf4(src, vl); +vfloat16mf4_t test_vfmv_s_f_f16mf4(float16_t rs1, size_t vl) { + return __riscv_vfmv_s_f_f16mf4(rs1, vl); } -float16_t test_vfmv_f_s_f16mf2_f16(vfloat16mf2_t src) { - return __riscv_vfmv_f_s_f16mf2_f16(src); +float16_t test_vfmv_f_s_f16mf2_f16(vfloat16mf2_t vs1) { + return __riscv_vfmv_f_s_f16mf2_f16(vs1); } -vfloat16mf2_t test_vfmv_s_f_f16mf2(float16_t src, size_t vl) { - return __riscv_vfmv_s_f_f16mf2(src, vl); +vfloat16mf2_t test_vfmv_s_f_f16mf2(float16_t rs1, size_t vl) { + return __riscv_vfmv_s_f_f16mf2(rs1, vl); } -float16_t test_vfmv_f_s_f16m1_f16(vfloat16m1_t src) { - return __riscv_vfmv_f_s_f16m1_f16(src); +float16_t test_vfmv_f_s_f16m1_f16(vfloat16m1_t vs1) { + return __riscv_vfmv_f_s_f16m1_f16(vs1); } -vfloat16m1_t test_vfmv_s_f_f16m1(float16_t src, size_t vl) { - return __riscv_vfmv_s_f_f16m1(src, vl); +vfloat16m1_t test_vfmv_s_f_f16m1(float16_t rs1, size_t vl) { + return __riscv_vfmv_s_f_f16m1(rs1, vl); } -float16_t test_vfmv_f_s_f16m2_f16(vfloat16m2_t src) { - return __riscv_vfmv_f_s_f16m2_f16(src); +float16_t test_vfmv_f_s_f16m2_f16(vfloat16m2_t vs1) { + return __riscv_vfmv_f_s_f16m2_f16(vs1); } -vfloat16m2_t test_vfmv_s_f_f16m2(float16_t src, size_t vl) { - return __riscv_vfmv_s_f_f16m2(src, vl); +vfloat16m2_t test_vfmv_s_f_f16m2(float16_t rs1, size_t vl) { + return __riscv_vfmv_s_f_f16m2(rs1, vl); } -float16_t test_vfmv_f_s_f16m4_f16(vfloat16m4_t src) { - return __riscv_vfmv_f_s_f16m4_f16(src); +float16_t test_vfmv_f_s_f16m4_f16(vfloat16m4_t vs1) { + return __riscv_vfmv_f_s_f16m4_f16(vs1); } -vfloat16m4_t test_vfmv_s_f_f16m4(float16_t src, size_t vl) { - return __riscv_vfmv_s_f_f16m4(src, vl); +vfloat16m4_t test_vfmv_s_f_f16m4(float16_t rs1, size_t vl) { + return __riscv_vfmv_s_f_f16m4(rs1, vl); } -float16_t test_vfmv_f_s_f16m8_f16(vfloat16m8_t src) { - return __riscv_vfmv_f_s_f16m8_f16(src); +float16_t test_vfmv_f_s_f16m8_f16(vfloat16m8_t vs1) { + return __riscv_vfmv_f_s_f16m8_f16(vs1); } -vfloat16m8_t test_vfmv_s_f_f16m8(float16_t src, size_t vl) { - return __riscv_vfmv_s_f_f16m8(src, vl); +vfloat16m8_t test_vfmv_s_f_f16m8(float16_t rs1, size_t vl) { + return __riscv_vfmv_s_f_f16m8(rs1, vl); } -float32_t test_vfmv_f_s_f32mf2_f32(vfloat32mf2_t src) { - return __riscv_vfmv_f_s_f32mf2_f32(src); +float32_t test_vfmv_f_s_f32mf2_f32(vfloat32mf2_t vs1) { + return __riscv_vfmv_f_s_f32mf2_f32(vs1); } -vfloat32mf2_t test_vfmv_s_f_f32mf2(float32_t src, size_t vl) { - return __riscv_vfmv_s_f_f32mf2(src, vl); +vfloat32mf2_t test_vfmv_s_f_f32mf2(float32_t rs1, size_t vl) { + return __riscv_vfmv_s_f_f32mf2(rs1, vl); } -float32_t test_vfmv_f_s_f32m1_f32(vfloat32m1_t src) { - return __riscv_vfmv_f_s_f32m1_f32(src); +float32_t test_vfmv_f_s_f32m1_f32(vfloat32m1_t vs1) { + return __riscv_vfmv_f_s_f32m1_f32(vs1); } -vfloat32m1_t test_vfmv_s_f_f32m1(float32_t src, size_t vl) { - return __riscv_vfmv_s_f_f32m1(src, vl); +vfloat32m1_t test_vfmv_s_f_f32m1(float32_t rs1, size_t vl) { + return __riscv_vfmv_s_f_f32m1(rs1, vl); } -float32_t test_vfmv_f_s_f32m2_f32(vfloat32m2_t src) { - return __riscv_vfmv_f_s_f32m2_f32(src); +float32_t test_vfmv_f_s_f32m2_f32(vfloat32m2_t vs1) { + return __riscv_vfmv_f_s_f32m2_f32(vs1); } -vfloat32m2_t test_vfmv_s_f_f32m2(float32_t src, size_t vl) { - return __riscv_vfmv_s_f_f32m2(src, vl); +vfloat32m2_t test_vfmv_s_f_f32m2(float32_t rs1, size_t vl) { + return __riscv_vfmv_s_f_f32m2(rs1, vl); } -float32_t test_vfmv_f_s_f32m4_f32(vfloat32m4_t src) { - return __riscv_vfmv_f_s_f32m4_f32(src); +float32_t test_vfmv_f_s_f32m4_f32(vfloat32m4_t vs1) { + return __riscv_vfmv_f_s_f32m4_f32(vs1); } -vfloat32m4_t test_vfmv_s_f_f32m4(float32_t src, size_t vl) { - return __riscv_vfmv_s_f_f32m4(src, vl); +vfloat32m4_t test_vfmv_s_f_f32m4(float32_t rs1, size_t vl) { + return __riscv_vfmv_s_f_f32m4(rs1, vl); } -float32_t test_vfmv_f_s_f32m8_f32(vfloat32m8_t src) { - return __riscv_vfmv_f_s_f32m8_f32(src); +float32_t test_vfmv_f_s_f32m8_f32(vfloat32m8_t vs1) { + return __riscv_vfmv_f_s_f32m8_f32(vs1); } -vfloat32m8_t test_vfmv_s_f_f32m8(float32_t src, size_t vl) { - return __riscv_vfmv_s_f_f32m8(src, vl); +vfloat32m8_t test_vfmv_s_f_f32m8(float32_t rs1, size_t vl) { + return __riscv_vfmv_s_f_f32m8(rs1, vl); } -float64_t test_vfmv_f_s_f64m1_f64(vfloat64m1_t src) { - return __riscv_vfmv_f_s_f64m1_f64(src); +float64_t test_vfmv_f_s_f64m1_f64(vfloat64m1_t vs1) { + return __riscv_vfmv_f_s_f64m1_f64(vs1); } -vfloat64m1_t test_vfmv_s_f_f64m1(float64_t src, size_t vl) { - return __riscv_vfmv_s_f_f64m1(src, vl); +vfloat64m1_t test_vfmv_s_f_f64m1(float64_t rs1, size_t vl) { + return __riscv_vfmv_s_f_f64m1(rs1, vl); } -float64_t test_vfmv_f_s_f64m2_f64(vfloat64m2_t src) { - return __riscv_vfmv_f_s_f64m2_f64(src); +float64_t test_vfmv_f_s_f64m2_f64(vfloat64m2_t vs1) { + return __riscv_vfmv_f_s_f64m2_f64(vs1); } -vfloat64m2_t test_vfmv_s_f_f64m2(float64_t src, size_t vl) { - return __riscv_vfmv_s_f_f64m2(src, vl); +vfloat64m2_t test_vfmv_s_f_f64m2(float64_t rs1, size_t vl) { + return __riscv_vfmv_s_f_f64m2(rs1, vl); } -float64_t test_vfmv_f_s_f64m4_f64(vfloat64m4_t src) { - return __riscv_vfmv_f_s_f64m4_f64(src); +float64_t test_vfmv_f_s_f64m4_f64(vfloat64m4_t vs1) { + return __riscv_vfmv_f_s_f64m4_f64(vs1); } -vfloat64m4_t test_vfmv_s_f_f64m4(float64_t src, size_t vl) { - return __riscv_vfmv_s_f_f64m4(src, vl); +vfloat64m4_t test_vfmv_s_f_f64m4(float64_t rs1, size_t vl) { + return __riscv_vfmv_s_f_f64m4(rs1, vl); } -float64_t test_vfmv_f_s_f64m8_f64(vfloat64m8_t src) { - return __riscv_vfmv_f_s_f64m8_f64(src); +float64_t test_vfmv_f_s_f64m8_f64(vfloat64m8_t vs1) { + return __riscv_vfmv_f_s_f64m8_f64(vs1); } -vfloat64m8_t test_vfmv_s_f_f64m8(float64_t src, size_t vl) { - return __riscv_vfmv_s_f_f64m8(src, vl); +vfloat64m8_t test_vfmv_s_f_f64m8(float64_t rs1, size_t vl) { + return __riscv_vfmv_s_f_f64m8(rs1, vl); } /* { dg-final { scan-assembler-times {vfmv\.[ivxfswum.]+\s+[,\sa-x0-9()]+} 45 } } */ diff --git a/auto-generated/gnu-api-tests/vfncvt.c b/auto-generated/gnu-api-tests/vfncvt.c index 6b8be9f81..6d2c05a19 100644 --- a/auto-generated/gnu-api-tests/vfncvt.c +++ b/auto-generated/gnu-api-tests/vfncvt.c @@ -6,916 +6,916 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vfncvt_x_f_w_i8mf8(vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8mf8(src, vl); +vint8mf8_t test_vfncvt_x_f_w_i8mf8(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8mf8(vs2, vl); } -vint8mf4_t test_vfncvt_x_f_w_i8mf4(vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8mf4(src, vl); +vint8mf4_t test_vfncvt_x_f_w_i8mf4(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8mf4(vs2, vl); } -vint8mf2_t test_vfncvt_x_f_w_i8mf2(vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8mf2(src, vl); +vint8mf2_t test_vfncvt_x_f_w_i8mf2(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8mf2(vs2, vl); } -vint8m1_t test_vfncvt_x_f_w_i8m1(vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8m1(src, vl); +vint8m1_t test_vfncvt_x_f_w_i8m1(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8m1(vs2, vl); } -vint8m2_t test_vfncvt_x_f_w_i8m2(vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8m2(src, vl); +vint8m2_t test_vfncvt_x_f_w_i8m2(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8m2(vs2, vl); } -vint8m4_t test_vfncvt_x_f_w_i8m4(vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8m4(src, vl); +vint8m4_t test_vfncvt_x_f_w_i8m4(vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8m4(vs2, vl); } -vuint8mf8_t test_vfncvt_xu_f_w_u8mf8(vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8mf8(src, vl); +vuint8mf8_t test_vfncvt_xu_f_w_u8mf8(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8mf8(vs2, vl); } -vuint8mf4_t test_vfncvt_xu_f_w_u8mf4(vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8mf4(src, vl); +vuint8mf4_t test_vfncvt_xu_f_w_u8mf4(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8mf4(vs2, vl); } -vuint8mf2_t test_vfncvt_xu_f_w_u8mf2(vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8mf2(src, vl); +vuint8mf2_t test_vfncvt_xu_f_w_u8mf2(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8mf2(vs2, vl); } -vuint8m1_t test_vfncvt_xu_f_w_u8m1(vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8m1(src, vl); +vuint8m1_t test_vfncvt_xu_f_w_u8m1(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8m1(vs2, vl); } -vuint8m2_t test_vfncvt_xu_f_w_u8m2(vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8m2(src, vl); +vuint8m2_t test_vfncvt_xu_f_w_u8m2(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8m2(vs2, vl); } -vuint8m4_t test_vfncvt_xu_f_w_u8m4(vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8m4(src, vl); +vuint8m4_t test_vfncvt_xu_f_w_u8m4(vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8m4(vs2, vl); } -vint16mf4_t test_vfncvt_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16mf4(src, vl); +vint16mf4_t test_vfncvt_x_f_w_i16mf4(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16mf4(vs2, vl); } -vint16mf2_t test_vfncvt_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16mf2(src, vl); +vint16mf2_t test_vfncvt_x_f_w_i16mf2(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16mf2(vs2, vl); } -vint16m1_t test_vfncvt_x_f_w_i16m1(vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16m1(src, vl); +vint16m1_t test_vfncvt_x_f_w_i16m1(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16m1(vs2, vl); } -vint16m2_t test_vfncvt_x_f_w_i16m2(vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16m2(src, vl); +vint16m2_t test_vfncvt_x_f_w_i16m2(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16m2(vs2, vl); } -vint16m4_t test_vfncvt_x_f_w_i16m4(vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16m4(src, vl); +vint16m4_t test_vfncvt_x_f_w_i16m4(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16m4(vs2, vl); } -vuint16mf4_t test_vfncvt_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16mf4(src, vl); +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16mf4(vs2, vl); } -vuint16mf2_t test_vfncvt_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16mf2(src, vl); +vuint16mf2_t test_vfncvt_xu_f_w_u16mf2(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16mf2(vs2, vl); } -vuint16m1_t test_vfncvt_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16m1(src, vl); +vuint16m1_t test_vfncvt_xu_f_w_u16m1(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16m1(vs2, vl); } -vuint16m2_t test_vfncvt_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16m2(src, vl); +vuint16m2_t test_vfncvt_xu_f_w_u16m2(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16m2(vs2, vl); } -vuint16m4_t test_vfncvt_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16m4(src, vl); +vuint16m4_t test_vfncvt_xu_f_w_u16m4(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16m4(vs2, vl); } -vfloat16mf4_t test_vfncvt_f_x_w_f16mf4(vint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16mf4(src, vl); +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4(vint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16mf4(vs2, vl); } -vfloat16mf2_t test_vfncvt_f_x_w_f16mf2(vint32m1_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16mf2(src, vl); +vfloat16mf2_t test_vfncvt_f_x_w_f16mf2(vint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16mf2(vs2, vl); } -vfloat16m1_t test_vfncvt_f_x_w_f16m1(vint32m2_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16m1(src, vl); +vfloat16m1_t test_vfncvt_f_x_w_f16m1(vint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16m1(vs2, vl); } -vfloat16m2_t test_vfncvt_f_x_w_f16m2(vint32m4_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16m2(src, vl); +vfloat16m2_t test_vfncvt_f_x_w_f16m2(vint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16m2(vs2, vl); } -vfloat16m4_t test_vfncvt_f_x_w_f16m4(vint32m8_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16m4(src, vl); +vfloat16m4_t test_vfncvt_f_x_w_f16m4(vint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16m4(vs2, vl); } -vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4(vuint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16mf4(src, vl); +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4(vuint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16mf4(vs2, vl); } -vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2(vuint32m1_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16mf2(src, vl); +vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2(vuint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16mf2(vs2, vl); } -vfloat16m1_t test_vfncvt_f_xu_w_f16m1(vuint32m2_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16m1(src, vl); +vfloat16m1_t test_vfncvt_f_xu_w_f16m1(vuint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16m1(vs2, vl); } -vfloat16m2_t test_vfncvt_f_xu_w_f16m2(vuint32m4_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16m2(src, vl); +vfloat16m2_t test_vfncvt_f_xu_w_f16m2(vuint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16m2(vs2, vl); } -vfloat16m4_t test_vfncvt_f_xu_w_f16m4(vuint32m8_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16m4(src, vl); +vfloat16m4_t test_vfncvt_f_xu_w_f16m4(vuint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16m4(vs2, vl); } -vfloat16mf4_t test_vfncvt_f_f_w_f16mf4(vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16mf4(src, vl); +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16mf4(vs2, vl); } -vfloat16mf2_t test_vfncvt_f_f_w_f16mf2(vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16mf2(src, vl); +vfloat16mf2_t test_vfncvt_f_f_w_f16mf2(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16mf2(vs2, vl); } -vfloat16m1_t test_vfncvt_f_f_w_f16m1(vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16m1(src, vl); +vfloat16m1_t test_vfncvt_f_f_w_f16m1(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16m1(vs2, vl); } -vfloat16m2_t test_vfncvt_f_f_w_f16m2(vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16m2(src, vl); +vfloat16m2_t test_vfncvt_f_f_w_f16m2(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16m2(vs2, vl); } -vfloat16m4_t test_vfncvt_f_f_w_f16m4(vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16m4(src, vl); +vfloat16m4_t test_vfncvt_f_f_w_f16m4(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16m4(vs2, vl); } -vint32mf2_t test_vfncvt_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32mf2(src, vl); +vint32mf2_t test_vfncvt_x_f_w_i32mf2(vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32mf2(vs2, vl); } -vint32m1_t test_vfncvt_x_f_w_i32m1(vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32m1(src, vl); +vint32m1_t test_vfncvt_x_f_w_i32m1(vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32m1(vs2, vl); } -vint32m2_t test_vfncvt_x_f_w_i32m2(vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32m2(src, vl); +vint32m2_t test_vfncvt_x_f_w_i32m2(vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32m2(vs2, vl); } -vint32m4_t test_vfncvt_x_f_w_i32m4(vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32m4(src, vl); +vint32m4_t test_vfncvt_x_f_w_i32m4(vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32m4(vs2, vl); } -vuint32mf2_t test_vfncvt_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32mf2(src, vl); +vuint32mf2_t test_vfncvt_xu_f_w_u32mf2(vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32mf2(vs2, vl); } -vuint32m1_t test_vfncvt_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32m1(src, vl); +vuint32m1_t test_vfncvt_xu_f_w_u32m1(vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32m1(vs2, vl); } -vuint32m2_t test_vfncvt_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32m2(src, vl); +vuint32m2_t test_vfncvt_xu_f_w_u32m2(vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32m2(vs2, vl); } -vuint32m4_t test_vfncvt_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32m4(src, vl); +vuint32m4_t test_vfncvt_xu_f_w_u32m4(vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32m4(vs2, vl); } -vfloat32mf2_t test_vfncvt_f_x_w_f32mf2(vint64m1_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32mf2(src, vl); +vfloat32mf2_t test_vfncvt_f_x_w_f32mf2(vint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32mf2(vs2, vl); } -vfloat32m1_t test_vfncvt_f_x_w_f32m1(vint64m2_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32m1(src, vl); +vfloat32m1_t test_vfncvt_f_x_w_f32m1(vint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32m1(vs2, vl); } -vfloat32m2_t test_vfncvt_f_x_w_f32m2(vint64m4_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32m2(src, vl); +vfloat32m2_t test_vfncvt_f_x_w_f32m2(vint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32m2(vs2, vl); } -vfloat32m4_t test_vfncvt_f_x_w_f32m4(vint64m8_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32m4(src, vl); +vfloat32m4_t test_vfncvt_f_x_w_f32m4(vint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32m4(vs2, vl); } -vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2(vuint64m1_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32mf2(src, vl); +vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2(vuint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32mf2(vs2, vl); } -vfloat32m1_t test_vfncvt_f_xu_w_f32m1(vuint64m2_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32m1(src, vl); +vfloat32m1_t test_vfncvt_f_xu_w_f32m1(vuint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32m1(vs2, vl); } -vfloat32m2_t test_vfncvt_f_xu_w_f32m2(vuint64m4_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32m2(src, vl); +vfloat32m2_t test_vfncvt_f_xu_w_f32m2(vuint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32m2(vs2, vl); } -vfloat32m4_t test_vfncvt_f_xu_w_f32m4(vuint64m8_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32m4(src, vl); +vfloat32m4_t test_vfncvt_f_xu_w_f32m4(vuint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32m4(vs2, vl); } -vfloat32mf2_t test_vfncvt_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32mf2(src, vl); +vfloat32mf2_t test_vfncvt_f_f_w_f32mf2(vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32mf2(vs2, vl); } -vfloat32m1_t test_vfncvt_f_f_w_f32m1(vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32m1(src, vl); +vfloat32m1_t test_vfncvt_f_f_w_f32m1(vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32m1(vs2, vl); } -vfloat32m2_t test_vfncvt_f_f_w_f32m2(vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32m2(src, vl); +vfloat32m2_t test_vfncvt_f_f_w_f32m2(vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32m2(vs2, vl); } -vfloat32m4_t test_vfncvt_f_f_w_f32m4(vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32m4(src, vl); +vfloat32m4_t test_vfncvt_f_f_w_f32m4(vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32m4(vs2, vl); } -vint8mf8_t test_vfncvt_x_f_w_i8mf8_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8mf8_m(mask, src, vl); +vint8mf8_t test_vfncvt_x_f_w_i8mf8_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8mf8_m(vm, vs2, vl); } -vint8mf4_t test_vfncvt_x_f_w_i8mf4_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8mf4_m(mask, src, vl); +vint8mf4_t test_vfncvt_x_f_w_i8mf4_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8mf4_m(vm, vs2, vl); } -vint8mf2_t test_vfncvt_x_f_w_i8mf2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8mf2_m(mask, src, vl); +vint8mf2_t test_vfncvt_x_f_w_i8mf2_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8mf2_m(vm, vs2, vl); } -vint8m1_t test_vfncvt_x_f_w_i8m1_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8m1_m(mask, src, vl); +vint8m1_t test_vfncvt_x_f_w_i8m1_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8m1_m(vm, vs2, vl); } -vint8m2_t test_vfncvt_x_f_w_i8m2_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8m2_m(mask, src, vl); +vint8m2_t test_vfncvt_x_f_w_i8m2_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8m2_m(vm, vs2, vl); } -vint8m4_t test_vfncvt_x_f_w_i8m4_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8m4_m(mask, src, vl); +vint8m4_t test_vfncvt_x_f_w_i8m4_m(vbool2_t vm, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8m4_m(vm, vs2, vl); } -vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8mf8_m(mask, src, vl); +vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8mf8_m(vm, vs2, vl); } -vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8mf4_m(mask, src, vl); +vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8mf4_m(vm, vs2, vl); } -vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8mf2_m(mask, src, vl); +vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8mf2_m(vm, vs2, vl); } -vuint8m1_t test_vfncvt_xu_f_w_u8m1_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8m1_m(mask, src, vl); +vuint8m1_t test_vfncvt_xu_f_w_u8m1_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8m1_m(vm, vs2, vl); } -vuint8m2_t test_vfncvt_xu_f_w_u8m2_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8m2_m(mask, src, vl); +vuint8m2_t test_vfncvt_xu_f_w_u8m2_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8m2_m(vm, vs2, vl); } -vuint8m4_t test_vfncvt_xu_f_w_u8m4_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8m4_m(mask, src, vl); +vuint8m4_t test_vfncvt_xu_f_w_u8m4_m(vbool2_t vm, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8m4_m(vm, vs2, vl); } -vint16mf4_t test_vfncvt_x_f_w_i16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16mf4_m(mask, src, vl); +vint16mf4_t test_vfncvt_x_f_w_i16mf4_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16mf4_m(vm, vs2, vl); } -vint16mf2_t test_vfncvt_x_f_w_i16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16mf2_m(mask, src, vl); +vint16mf2_t test_vfncvt_x_f_w_i16mf2_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16mf2_m(vm, vs2, vl); } -vint16m1_t test_vfncvt_x_f_w_i16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16m1_m(mask, src, vl); +vint16m1_t test_vfncvt_x_f_w_i16m1_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16m1_m(vm, vs2, vl); } -vint16m2_t test_vfncvt_x_f_w_i16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16m2_m(mask, src, vl); +vint16m2_t test_vfncvt_x_f_w_i16m2_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16m2_m(vm, vs2, vl); } -vint16m4_t test_vfncvt_x_f_w_i16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16m4_m(mask, src, vl); +vint16m4_t test_vfncvt_x_f_w_i16m4_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16m4_m(vm, vs2, vl); } -vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16mf4_m(mask, src, vl); +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16mf4_m(vm, vs2, vl); } -vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16mf2_m(mask, src, vl); +vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16mf2_m(vm, vs2, vl); } -vuint16m1_t test_vfncvt_xu_f_w_u16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16m1_m(mask, src, vl); +vuint16m1_t test_vfncvt_xu_f_w_u16m1_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16m1_m(vm, vs2, vl); } -vuint16m2_t test_vfncvt_xu_f_w_u16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16m2_m(mask, src, vl); +vuint16m2_t test_vfncvt_xu_f_w_u16m2_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16m2_m(vm, vs2, vl); } -vuint16m4_t test_vfncvt_xu_f_w_u16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16m4_m(mask, src, vl); +vuint16m4_t test_vfncvt_xu_f_w_u16m4_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16m4_m(vm, vs2, vl); } -vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_m(vbool64_t mask, vint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16mf4_m(mask, src, vl); +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_m(vbool64_t vm, vint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16mf4_m(vm, vs2, vl); } -vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_m(vbool32_t mask, vint32m1_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16mf2_m(mask, src, vl); +vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_m(vbool32_t vm, vint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16mf2_m(vm, vs2, vl); } -vfloat16m1_t test_vfncvt_f_x_w_f16m1_m(vbool16_t mask, vint32m2_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16m1_m(mask, src, vl); +vfloat16m1_t test_vfncvt_f_x_w_f16m1_m(vbool16_t vm, vint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16m1_m(vm, vs2, vl); } -vfloat16m2_t test_vfncvt_f_x_w_f16m2_m(vbool8_t mask, vint32m4_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16m2_m(mask, src, vl); +vfloat16m2_t test_vfncvt_f_x_w_f16m2_m(vbool8_t vm, vint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16m2_m(vm, vs2, vl); } -vfloat16m4_t test_vfncvt_f_x_w_f16m4_m(vbool4_t mask, vint32m8_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16m4_m(mask, src, vl); +vfloat16m4_t test_vfncvt_f_x_w_f16m4_m(vbool4_t vm, vint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16m4_m(vm, vs2, vl); } -vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_m(vbool64_t mask, vuint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16mf4_m(mask, src, vl); +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16mf4_m(vm, vs2, vl); } -vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_m(vbool32_t mask, vuint32m1_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16mf2_m(mask, src, vl); +vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16mf2_m(vm, vs2, vl); } -vfloat16m1_t test_vfncvt_f_xu_w_f16m1_m(vbool16_t mask, vuint32m2_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16m1_m(mask, src, vl); +vfloat16m1_t test_vfncvt_f_xu_w_f16m1_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16m1_m(vm, vs2, vl); } -vfloat16m2_t test_vfncvt_f_xu_w_f16m2_m(vbool8_t mask, vuint32m4_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16m2_m(mask, src, vl); +vfloat16m2_t test_vfncvt_f_xu_w_f16m2_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16m2_m(vm, vs2, vl); } -vfloat16m4_t test_vfncvt_f_xu_w_f16m4_m(vbool4_t mask, vuint32m8_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16m4_m(mask, src, vl); +vfloat16m4_t test_vfncvt_f_xu_w_f16m4_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16m4_m(vm, vs2, vl); } -vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16mf4_m(mask, src, vl); +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16mf4_m(vm, vs2, vl); } -vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16mf2_m(mask, src, vl); +vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16mf2_m(vm, vs2, vl); } -vfloat16m1_t test_vfncvt_f_f_w_f16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16m1_m(mask, src, vl); +vfloat16m1_t test_vfncvt_f_f_w_f16m1_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16m1_m(vm, vs2, vl); } -vfloat16m2_t test_vfncvt_f_f_w_f16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16m2_m(mask, src, vl); +vfloat16m2_t test_vfncvt_f_f_w_f16m2_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16m2_m(vm, vs2, vl); } -vfloat16m4_t test_vfncvt_f_f_w_f16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16m4_m(mask, src, vl); +vfloat16m4_t test_vfncvt_f_f_w_f16m4_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16m4_m(vm, vs2, vl); } -vint32mf2_t test_vfncvt_x_f_w_i32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32mf2_m(mask, src, vl); +vint32mf2_t test_vfncvt_x_f_w_i32mf2_m(vbool64_t vm, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32mf2_m(vm, vs2, vl); } -vint32m1_t test_vfncvt_x_f_w_i32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32m1_m(mask, src, vl); +vint32m1_t test_vfncvt_x_f_w_i32m1_m(vbool32_t vm, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32m1_m(vm, vs2, vl); } -vint32m2_t test_vfncvt_x_f_w_i32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32m2_m(mask, src, vl); +vint32m2_t test_vfncvt_x_f_w_i32m2_m(vbool16_t vm, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32m2_m(vm, vs2, vl); } -vint32m4_t test_vfncvt_x_f_w_i32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32m4_m(mask, src, vl); +vint32m4_t test_vfncvt_x_f_w_i32m4_m(vbool8_t vm, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32m4_m(vm, vs2, vl); } -vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32mf2_m(mask, src, vl); +vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_m(vbool64_t vm, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32mf2_m(vm, vs2, vl); } -vuint32m1_t test_vfncvt_xu_f_w_u32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32m1_m(mask, src, vl); +vuint32m1_t test_vfncvt_xu_f_w_u32m1_m(vbool32_t vm, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32m1_m(vm, vs2, vl); } -vuint32m2_t test_vfncvt_xu_f_w_u32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32m2_m(mask, src, vl); +vuint32m2_t test_vfncvt_xu_f_w_u32m2_m(vbool16_t vm, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32m2_m(vm, vs2, vl); } -vuint32m4_t test_vfncvt_xu_f_w_u32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32m4_m(mask, src, vl); +vuint32m4_t test_vfncvt_xu_f_w_u32m4_m(vbool8_t vm, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32m4_m(vm, vs2, vl); } -vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_m(vbool64_t mask, vint64m1_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32mf2_m(mask, src, vl); +vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_m(vbool64_t vm, vint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32mf2_m(vm, vs2, vl); } -vfloat32m1_t test_vfncvt_f_x_w_f32m1_m(vbool32_t mask, vint64m2_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32m1_m(mask, src, vl); +vfloat32m1_t test_vfncvt_f_x_w_f32m1_m(vbool32_t vm, vint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32m1_m(vm, vs2, vl); } -vfloat32m2_t test_vfncvt_f_x_w_f32m2_m(vbool16_t mask, vint64m4_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32m2_m(mask, src, vl); +vfloat32m2_t test_vfncvt_f_x_w_f32m2_m(vbool16_t vm, vint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32m2_m(vm, vs2, vl); } -vfloat32m4_t test_vfncvt_f_x_w_f32m4_m(vbool8_t mask, vint64m8_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32m4_m(mask, src, vl); +vfloat32m4_t test_vfncvt_f_x_w_f32m4_m(vbool8_t vm, vint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32m4_m(vm, vs2, vl); } -vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_m(vbool64_t mask, vuint64m1_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32mf2_m(mask, src, vl); +vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32mf2_m(vm, vs2, vl); } -vfloat32m1_t test_vfncvt_f_xu_w_f32m1_m(vbool32_t mask, vuint64m2_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32m1_m(mask, src, vl); +vfloat32m1_t test_vfncvt_f_xu_w_f32m1_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32m1_m(vm, vs2, vl); } -vfloat32m2_t test_vfncvt_f_xu_w_f32m2_m(vbool16_t mask, vuint64m4_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32m2_m(mask, src, vl); +vfloat32m2_t test_vfncvt_f_xu_w_f32m2_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32m2_m(vm, vs2, vl); } -vfloat32m4_t test_vfncvt_f_xu_w_f32m4_m(vbool8_t mask, vuint64m8_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32m4_m(mask, src, vl); +vfloat32m4_t test_vfncvt_f_xu_w_f32m4_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32m4_m(vm, vs2, vl); } -vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32mf2_m(mask, src, vl); +vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_m(vbool64_t vm, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32mf2_m(vm, vs2, vl); } -vfloat32m1_t test_vfncvt_f_f_w_f32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32m1_m(mask, src, vl); +vfloat32m1_t test_vfncvt_f_f_w_f32m1_m(vbool32_t vm, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32m1_m(vm, vs2, vl); } -vfloat32m2_t test_vfncvt_f_f_w_f32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32m2_m(mask, src, vl); +vfloat32m2_t test_vfncvt_f_f_w_f32m2_m(vbool16_t vm, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32m2_m(vm, vs2, vl); } -vfloat32m4_t test_vfncvt_f_f_w_f32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32m4_m(mask, src, vl); +vfloat32m4_t test_vfncvt_f_f_w_f32m4_m(vbool8_t vm, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32m4_m(vm, vs2, vl); } -vint8mf8_t test_vfncvt_x_f_w_i8mf8_rm(vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8mf8_rm(src, __RISCV_FRM_RNE, vl); +vint8mf8_t test_vfncvt_x_f_w_i8mf8_rm(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8mf8_rm(vs2, __RISCV_FRM_RNE, vl); } -vint8mf4_t test_vfncvt_x_f_w_i8mf4_rm(vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8mf4_rm(src, __RISCV_FRM_RNE, vl); +vint8mf4_t test_vfncvt_x_f_w_i8mf4_rm(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8mf4_rm(vs2, __RISCV_FRM_RNE, vl); } -vint8mf2_t test_vfncvt_x_f_w_i8mf2_rm(vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8mf2_rm(src, __RISCV_FRM_RNE, vl); +vint8mf2_t test_vfncvt_x_f_w_i8mf2_rm(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8mf2_rm(vs2, __RISCV_FRM_RNE, vl); } -vint8m1_t test_vfncvt_x_f_w_i8m1_rm(vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8m1_rm(src, __RISCV_FRM_RNE, vl); +vint8m1_t test_vfncvt_x_f_w_i8m1_rm(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8m1_rm(vs2, __RISCV_FRM_RNE, vl); } -vint8m2_t test_vfncvt_x_f_w_i8m2_rm(vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8m2_rm(src, __RISCV_FRM_RNE, vl); +vint8m2_t test_vfncvt_x_f_w_i8m2_rm(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8m2_rm(vs2, __RISCV_FRM_RNE, vl); } -vint8m4_t test_vfncvt_x_f_w_i8m4_rm(vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8m4_rm(src, __RISCV_FRM_RNE, vl); +vint8m4_t test_vfncvt_x_f_w_i8m4_rm(vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8m4_rm(vs2, __RISCV_FRM_RNE, vl); } -vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_rm(vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8mf8_rm(src, __RISCV_FRM_RNE, vl); +vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_rm(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8mf8_rm(vs2, __RISCV_FRM_RNE, vl); } -vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_rm(vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8mf4_rm(src, __RISCV_FRM_RNE, vl); +vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_rm(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8mf4_rm(vs2, __RISCV_FRM_RNE, vl); } -vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_rm(vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8mf2_rm(src, __RISCV_FRM_RNE, vl); +vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_rm(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8mf2_rm(vs2, __RISCV_FRM_RNE, vl); } -vuint8m1_t test_vfncvt_xu_f_w_u8m1_rm(vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8m1_rm(src, __RISCV_FRM_RNE, vl); +vuint8m1_t test_vfncvt_xu_f_w_u8m1_rm(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8m1_rm(vs2, __RISCV_FRM_RNE, vl); } -vuint8m2_t test_vfncvt_xu_f_w_u8m2_rm(vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8m2_rm(src, __RISCV_FRM_RNE, vl); +vuint8m2_t test_vfncvt_xu_f_w_u8m2_rm(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8m2_rm(vs2, __RISCV_FRM_RNE, vl); } -vuint8m4_t test_vfncvt_xu_f_w_u8m4_rm(vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8m4_rm(src, __RISCV_FRM_RNE, vl); +vuint8m4_t test_vfncvt_xu_f_w_u8m4_rm(vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8m4_rm(vs2, __RISCV_FRM_RNE, vl); } -vint16mf4_t test_vfncvt_x_f_w_i16mf4_rm(vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16mf4_rm(src, __RISCV_FRM_RNE, vl); +vint16mf4_t test_vfncvt_x_f_w_i16mf4_rm(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16mf4_rm(vs2, __RISCV_FRM_RNE, vl); } -vint16mf2_t test_vfncvt_x_f_w_i16mf2_rm(vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16mf2_rm(src, __RISCV_FRM_RNE, vl); +vint16mf2_t test_vfncvt_x_f_w_i16mf2_rm(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16mf2_rm(vs2, __RISCV_FRM_RNE, vl); } -vint16m1_t test_vfncvt_x_f_w_i16m1_rm(vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16m1_rm(src, __RISCV_FRM_RNE, vl); +vint16m1_t test_vfncvt_x_f_w_i16m1_rm(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16m1_rm(vs2, __RISCV_FRM_RNE, vl); } -vint16m2_t test_vfncvt_x_f_w_i16m2_rm(vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16m2_rm(src, __RISCV_FRM_RNE, vl); +vint16m2_t test_vfncvt_x_f_w_i16m2_rm(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16m2_rm(vs2, __RISCV_FRM_RNE, vl); } -vint16m4_t test_vfncvt_x_f_w_i16m4_rm(vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16m4_rm(src, __RISCV_FRM_RNE, vl); +vint16m4_t test_vfncvt_x_f_w_i16m4_rm(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16m4_rm(vs2, __RISCV_FRM_RNE, vl); } -vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_rm(vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16mf4_rm(src, __RISCV_FRM_RNE, vl); +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_rm(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16mf4_rm(vs2, __RISCV_FRM_RNE, vl); } -vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_rm(vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16mf2_rm(src, __RISCV_FRM_RNE, vl); +vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_rm(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16mf2_rm(vs2, __RISCV_FRM_RNE, vl); } -vuint16m1_t test_vfncvt_xu_f_w_u16m1_rm(vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16m1_rm(src, __RISCV_FRM_RNE, vl); +vuint16m1_t test_vfncvt_xu_f_w_u16m1_rm(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16m1_rm(vs2, __RISCV_FRM_RNE, vl); } -vuint16m2_t test_vfncvt_xu_f_w_u16m2_rm(vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16m2_rm(src, __RISCV_FRM_RNE, vl); +vuint16m2_t test_vfncvt_xu_f_w_u16m2_rm(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16m2_rm(vs2, __RISCV_FRM_RNE, vl); } -vuint16m4_t test_vfncvt_xu_f_w_u16m4_rm(vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16m4_rm(src, __RISCV_FRM_RNE, vl); +vuint16m4_t test_vfncvt_xu_f_w_u16m4_rm(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16m4_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_rm(vint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16mf4_rm(src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_rm(vint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16mf4_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_rm(vint32m1_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16mf2_rm(src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_rm(vint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16mf2_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_x_w_f16m1_rm(vint32m2_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16m1_rm(src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfncvt_f_x_w_f16m1_rm(vint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16m1_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_x_w_f16m2_rm(vint32m4_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16m2_rm(src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfncvt_f_x_w_f16m2_rm(vint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16m2_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_x_w_f16m4_rm(vint32m8_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16m4_rm(src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfncvt_f_x_w_f16m4_rm(vint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16m4_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_rm(vuint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16mf4_rm(src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_rm(vuint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16mf4_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_rm(vuint32m1_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16mf2_rm(src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_rm(vuint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16mf2_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_xu_w_f16m1_rm(vuint32m2_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16m1_rm(src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfncvt_f_xu_w_f16m1_rm(vuint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16m1_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_xu_w_f16m2_rm(vuint32m4_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16m2_rm(src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfncvt_f_xu_w_f16m2_rm(vuint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16m2_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_xu_w_f16m4_rm(vuint32m8_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16m4_rm(src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfncvt_f_xu_w_f16m4_rm(vuint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16m4_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_rm(vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16mf4_rm(src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_rm(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16mf4_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_rm(vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16mf2_rm(src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_rm(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16mf2_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_f_w_f16m1_rm(vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16m1_rm(src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfncvt_f_f_w_f16m1_rm(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16m1_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_f_w_f16m2_rm(vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16m2_rm(src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfncvt_f_f_w_f16m2_rm(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16m2_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_f_w_f16m4_rm(vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16m4_rm(src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfncvt_f_f_w_f16m4_rm(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16m4_rm(vs2, __RISCV_FRM_RNE, vl); } -vint32mf2_t test_vfncvt_x_f_w_i32mf2_rm(vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32mf2_rm(src, __RISCV_FRM_RNE, vl); +vint32mf2_t test_vfncvt_x_f_w_i32mf2_rm(vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32mf2_rm(vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfncvt_x_f_w_i32m1_rm(vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32m1_rm(src, __RISCV_FRM_RNE, vl); +vint32m1_t test_vfncvt_x_f_w_i32m1_rm(vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32m1_rm(vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfncvt_x_f_w_i32m2_rm(vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32m2_rm(src, __RISCV_FRM_RNE, vl); +vint32m2_t test_vfncvt_x_f_w_i32m2_rm(vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32m2_rm(vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfncvt_x_f_w_i32m4_rm(vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32m4_rm(src, __RISCV_FRM_RNE, vl); +vint32m4_t test_vfncvt_x_f_w_i32m4_rm(vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32m4_rm(vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_rm(vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32mf2_rm(src, __RISCV_FRM_RNE, vl); +vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_rm(vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32mf2_rm(vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfncvt_xu_f_w_u32m1_rm(vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32m1_rm(src, __RISCV_FRM_RNE, vl); +vuint32m1_t test_vfncvt_xu_f_w_u32m1_rm(vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32m1_rm(vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfncvt_xu_f_w_u32m2_rm(vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32m2_rm(src, __RISCV_FRM_RNE, vl); +vuint32m2_t test_vfncvt_xu_f_w_u32m2_rm(vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32m2_rm(vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfncvt_xu_f_w_u32m4_rm(vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32m4_rm(src, __RISCV_FRM_RNE, vl); +vuint32m4_t test_vfncvt_xu_f_w_u32m4_rm(vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32m4_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_rm(vint64m1_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32mf2_rm(src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_rm(vint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32mf2_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_x_w_f32m1_rm(vint64m2_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32m1_rm(src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfncvt_f_x_w_f32m1_rm(vint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32m1_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_x_w_f32m2_rm(vint64m4_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32m2_rm(src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfncvt_f_x_w_f32m2_rm(vint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32m2_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_x_w_f32m4_rm(vint64m8_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32m4_rm(src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfncvt_f_x_w_f32m4_rm(vint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32m4_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_rm(vuint64m1_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32mf2_rm(src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_rm(vuint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32mf2_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_xu_w_f32m1_rm(vuint64m2_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32m1_rm(src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfncvt_f_xu_w_f32m1_rm(vuint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32m1_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_xu_w_f32m2_rm(vuint64m4_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32m2_rm(src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfncvt_f_xu_w_f32m2_rm(vuint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32m2_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_xu_w_f32m4_rm(vuint64m8_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32m4_rm(src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfncvt_f_xu_w_f32m4_rm(vuint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32m4_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_rm(vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32mf2_rm(src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_rm(vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32mf2_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_f_w_f32m1_rm(vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32m1_rm(src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfncvt_f_f_w_f32m1_rm(vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32m1_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_f_w_f32m2_rm(vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32m2_rm(src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfncvt_f_f_w_f32m2_rm(vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32m2_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_f_w_f32m4_rm(vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32m4_rm(src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfncvt_f_f_w_f32m4_rm(vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32m4_rm(vs2, __RISCV_FRM_RNE, vl); } -vint8mf8_t test_vfncvt_x_f_w_i8mf8_rm_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8mf8_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vint8mf8_t test_vfncvt_x_f_w_i8mf8_rm_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8mf8_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vint8mf4_t test_vfncvt_x_f_w_i8mf4_rm_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8mf4_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vint8mf4_t test_vfncvt_x_f_w_i8mf4_rm_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8mf4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vint8mf2_t test_vfncvt_x_f_w_i8mf2_rm_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8mf2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vint8mf2_t test_vfncvt_x_f_w_i8mf2_rm_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vint8m1_t test_vfncvt_x_f_w_i8m1_rm_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8m1_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vint8m1_t test_vfncvt_x_f_w_i8m1_rm_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vint8m2_t test_vfncvt_x_f_w_i8m2_rm_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8m2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vint8m2_t test_vfncvt_x_f_w_i8m2_rm_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vint8m4_t test_vfncvt_x_f_w_i8m4_rm_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8m4_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vint8m4_t test_vfncvt_x_f_w_i8m4_rm_m(vbool2_t vm, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_rm_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8mf8_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_rm_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8mf8_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_rm_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8mf4_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_rm_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8mf4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_rm_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8mf2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_rm_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint8m1_t test_vfncvt_xu_f_w_u8m1_rm_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8m1_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vuint8m1_t test_vfncvt_xu_f_w_u8m1_rm_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint8m2_t test_vfncvt_xu_f_w_u8m2_rm_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8m2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vuint8m2_t test_vfncvt_xu_f_w_u8m2_rm_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint8m4_t test_vfncvt_xu_f_w_u8m4_rm_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8m4_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vuint8m4_t test_vfncvt_xu_f_w_u8m4_rm_m(vbool2_t vm, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vint16mf4_t test_vfncvt_x_f_w_i16mf4_rm_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16mf4_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vint16mf4_t test_vfncvt_x_f_w_i16mf4_rm_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16mf4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vint16mf2_t test_vfncvt_x_f_w_i16mf2_rm_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16mf2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vint16mf2_t test_vfncvt_x_f_w_i16mf2_rm_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vint16m1_t test_vfncvt_x_f_w_i16m1_rm_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16m1_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vint16m1_t test_vfncvt_x_f_w_i16m1_rm_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vint16m2_t test_vfncvt_x_f_w_i16m2_rm_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16m2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vint16m2_t test_vfncvt_x_f_w_i16m2_rm_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vint16m4_t test_vfncvt_x_f_w_i16m4_rm_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16m4_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vint16m4_t test_vfncvt_x_f_w_i16m4_rm_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_rm_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16mf4_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_rm_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16mf4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_rm_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16mf2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_rm_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint16m1_t test_vfncvt_xu_f_w_u16m1_rm_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16m1_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vuint16m1_t test_vfncvt_xu_f_w_u16m1_rm_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint16m2_t test_vfncvt_xu_f_w_u16m2_rm_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16m2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vuint16m2_t test_vfncvt_xu_f_w_u16m2_rm_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint16m4_t test_vfncvt_xu_f_w_u16m4_rm_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16m4_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vuint16m4_t test_vfncvt_xu_f_w_u16m4_rm_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_rm_m(vbool64_t mask, vint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16mf4_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_rm_m(vbool64_t vm, vint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16mf4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_rm_m(vbool32_t mask, vint32m1_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16mf2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_rm_m(vbool32_t vm, vint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_x_w_f16m1_rm_m(vbool16_t mask, vint32m2_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16m1_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfncvt_f_x_w_f16m1_rm_m(vbool16_t vm, vint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_x_w_f16m2_rm_m(vbool8_t mask, vint32m4_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16m2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfncvt_f_x_w_f16m2_rm_m(vbool8_t vm, vint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_x_w_f16m4_rm_m(vbool4_t mask, vint32m8_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16m4_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfncvt_f_x_w_f16m4_rm_m(vbool4_t vm, vint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_rm_m(vbool64_t mask, vuint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16mf4_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_rm_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16mf4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_rm_m(vbool32_t mask, vuint32m1_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16mf2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_rm_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_xu_w_f16m1_rm_m(vbool16_t mask, vuint32m2_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16m1_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfncvt_f_xu_w_f16m1_rm_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_xu_w_f16m2_rm_m(vbool8_t mask, vuint32m4_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16m2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfncvt_f_xu_w_f16m2_rm_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_xu_w_f16m4_rm_m(vbool4_t mask, vuint32m8_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16m4_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfncvt_f_xu_w_f16m4_rm_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_rm_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16mf4_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_rm_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16mf4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_rm_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16mf2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_rm_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_f_w_f16m1_rm_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16m1_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfncvt_f_f_w_f16m1_rm_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_f_w_f16m2_rm_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16m2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfncvt_f_f_w_f16m2_rm_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_f_w_f16m4_rm_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16m4_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfncvt_f_f_w_f16m4_rm_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vint32mf2_t test_vfncvt_x_f_w_i32mf2_rm_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32mf2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vint32mf2_t test_vfncvt_x_f_w_i32mf2_rm_m(vbool64_t vm, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfncvt_x_f_w_i32m1_rm_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32m1_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vint32m1_t test_vfncvt_x_f_w_i32m1_rm_m(vbool32_t vm, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfncvt_x_f_w_i32m2_rm_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32m2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vint32m2_t test_vfncvt_x_f_w_i32m2_rm_m(vbool16_t vm, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfncvt_x_f_w_i32m4_rm_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32m4_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vint32m4_t test_vfncvt_x_f_w_i32m4_rm_m(vbool8_t vm, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_rm_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32mf2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_rm_m(vbool64_t vm, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfncvt_xu_f_w_u32m1_rm_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32m1_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vuint32m1_t test_vfncvt_xu_f_w_u32m1_rm_m(vbool32_t vm, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfncvt_xu_f_w_u32m2_rm_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32m2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vuint32m2_t test_vfncvt_xu_f_w_u32m2_rm_m(vbool16_t vm, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfncvt_xu_f_w_u32m4_rm_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32m4_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vuint32m4_t test_vfncvt_xu_f_w_u32m4_rm_m(vbool8_t vm, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_rm_m(vbool64_t mask, vint64m1_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32mf2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_rm_m(vbool64_t vm, vint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_x_w_f32m1_rm_m(vbool32_t mask, vint64m2_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32m1_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfncvt_f_x_w_f32m1_rm_m(vbool32_t vm, vint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_x_w_f32m2_rm_m(vbool16_t mask, vint64m4_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32m2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfncvt_f_x_w_f32m2_rm_m(vbool16_t vm, vint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_x_w_f32m4_rm_m(vbool8_t mask, vint64m8_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32m4_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfncvt_f_x_w_f32m4_rm_m(vbool8_t vm, vint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_rm_m(vbool64_t mask, vuint64m1_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32mf2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_rm_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_xu_w_f32m1_rm_m(vbool32_t mask, vuint64m2_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32m1_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfncvt_f_xu_w_f32m1_rm_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_xu_w_f32m2_rm_m(vbool16_t mask, vuint64m4_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32m2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfncvt_f_xu_w_f32m2_rm_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_xu_w_f32m4_rm_m(vbool8_t mask, vuint64m8_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32m4_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfncvt_f_xu_w_f32m4_rm_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_rm_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32mf2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_rm_m(vbool64_t vm, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_f_w_f32m1_rm_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32m1_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfncvt_f_f_w_f32m1_rm_m(vbool32_t vm, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_f_w_f32m2_rm_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32m2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfncvt_f_f_w_f32m2_rm_m(vbool16_t vm, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_f_w_f32m4_rm_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32m4_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfncvt_f_f_w_f32m4_rm_m(vbool8_t vm, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfncvt\.[ivxfswum.]+\s+} 228 } } */ diff --git a/auto-generated/gnu-api-tests/vfncvt_rod.c b/auto-generated/gnu-api-tests/vfncvt_rod.c index c40771f11..1c623a8f5 100644 --- a/auto-generated/gnu-api-tests/vfncvt_rod.c +++ b/auto-generated/gnu-api-tests/vfncvt_rod.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4(vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16mf4(src, vl); +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16mf4(vs2, vl); } -vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2(vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16mf2(src, vl); +vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16mf2(vs2, vl); } -vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1(vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16m1(src, vl); +vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16m1(vs2, vl); } -vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2(vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16m2(src, vl); +vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16m2(vs2, vl); } -vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4(vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16m4(src, vl); +vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16m4(vs2, vl); } -vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32mf2(src, vl); +vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2(vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32mf2(vs2, vl); } -vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1(vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32m1(src, vl); +vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1(vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32m1(vs2, vl); } -vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2(vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32m2(src, vl); +vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2(vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32m2(vs2, vl); } -vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4(vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32m4(src, vl); +vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4(vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32m4(vs2, vl); } -vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16mf4_m(mask, src, vl); +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16mf4_m(vm, vs2, vl); } -vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16mf2_m(mask, src, vl); +vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16mf2_m(vm, vs2, vl); } -vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16m1_m(mask, src, vl); +vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16m1_m(vm, vs2, vl); } -vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16m2_m(mask, src, vl); +vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16m2_m(vm, vs2, vl); } -vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16m4_m(mask, src, vl); +vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16m4_m(vm, vs2, vl); } -vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32mf2_m(mask, src, vl); +vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_m(vbool64_t vm, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32mf2_m(vm, vs2, vl); } -vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32m1_m(mask, src, vl); +vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_m(vbool32_t vm, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32m1_m(vm, vs2, vl); } -vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32m2_m(mask, src, vl); +vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_m(vbool16_t vm, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32m2_m(vm, vs2, vl); } -vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32m4_m(mask, src, vl); +vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_m(vbool8_t vm, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32m4_m(vm, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfncvt\.rod[ivxfswum.]*\s+} 18 } } */ diff --git a/auto-generated/gnu-api-tests/vfncvt_rtz.c b/auto-generated/gnu-api-tests/vfncvt_rtz.c index 844be0b9f..03dc4ca43 100644 --- a/auto-generated/gnu-api-tests/vfncvt_rtz.c +++ b/auto-generated/gnu-api-tests/vfncvt_rtz.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8(vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8mf8(src, vl); +vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8mf8(vs2, vl); } -vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4(vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8mf4(src, vl); +vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8mf4(vs2, vl); } -vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2(vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8mf2(src, vl); +vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8mf2(vs2, vl); } -vint8m1_t test_vfncvt_rtz_x_f_w_i8m1(vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8m1(src, vl); +vint8m1_t test_vfncvt_rtz_x_f_w_i8m1(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8m1(vs2, vl); } -vint8m2_t test_vfncvt_rtz_x_f_w_i8m2(vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8m2(src, vl); +vint8m2_t test_vfncvt_rtz_x_f_w_i8m2(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8m2(vs2, vl); } -vint8m4_t test_vfncvt_rtz_x_f_w_i8m4(vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8m4(src, vl); +vint8m4_t test_vfncvt_rtz_x_f_w_i8m4(vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8m4(vs2, vl); } -vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8(vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8mf8(src, vl); +vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8mf8(vs2, vl); } -vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4(vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8mf4(src, vl); +vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8mf4(vs2, vl); } -vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2(vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8mf2(src, vl); +vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8mf2(vs2, vl); } -vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1(vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8m1(src, vl); +vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8m1(vs2, vl); } -vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2(vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8m2(src, vl); +vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8m2(vs2, vl); } -vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4(vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8m4(src, vl); +vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4(vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8m4(vs2, vl); } -vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16mf4(src, vl); +vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16mf4(vs2, vl); } -vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16mf2(src, vl); +vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16mf2(vs2, vl); } -vint16m1_t test_vfncvt_rtz_x_f_w_i16m1(vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16m1(src, vl); +vint16m1_t test_vfncvt_rtz_x_f_w_i16m1(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16m1(vs2, vl); } -vint16m2_t test_vfncvt_rtz_x_f_w_i16m2(vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16m2(src, vl); +vint16m2_t test_vfncvt_rtz_x_f_w_i16m2(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16m2(vs2, vl); } -vint16m4_t test_vfncvt_rtz_x_f_w_i16m4(vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16m4(src, vl); +vint16m4_t test_vfncvt_rtz_x_f_w_i16m4(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16m4(vs2, vl); } -vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16mf4(src, vl); +vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16mf4(vs2, vl); } -vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16mf2(src, vl); +vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16mf2(vs2, vl); } -vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16m1(src, vl); +vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16m1(vs2, vl); } -vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16m2(src, vl); +vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16m2(vs2, vl); } -vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16m4(src, vl); +vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16m4(vs2, vl); } -vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32mf2(src, vl); +vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2(vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32mf2(vs2, vl); } -vint32m1_t test_vfncvt_rtz_x_f_w_i32m1(vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32m1(src, vl); +vint32m1_t test_vfncvt_rtz_x_f_w_i32m1(vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32m1(vs2, vl); } -vint32m2_t test_vfncvt_rtz_x_f_w_i32m2(vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32m2(src, vl); +vint32m2_t test_vfncvt_rtz_x_f_w_i32m2(vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32m2(vs2, vl); } -vint32m4_t test_vfncvt_rtz_x_f_w_i32m4(vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32m4(src, vl); +vint32m4_t test_vfncvt_rtz_x_f_w_i32m4(vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32m4(vs2, vl); } -vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32mf2(src, vl); +vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2(vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32mf2(vs2, vl); } -vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32m1(src, vl); +vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1(vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32m1(vs2, vl); } -vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32m2(src, vl); +vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2(vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32m2(vs2, vl); } -vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32m4(src, vl); +vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4(vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32m4(vs2, vl); } -vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8mf8_m(mask, src, vl); +vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8mf8_m(vm, vs2, vl); } -vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8mf4_m(mask, src, vl); +vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8mf4_m(vm, vs2, vl); } -vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8mf2_m(mask, src, vl); +vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8mf2_m(vm, vs2, vl); } -vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8m1_m(mask, src, vl); +vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8m1_m(vm, vs2, vl); } -vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8m2_m(mask, src, vl); +vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8m2_m(vm, vs2, vl); } -vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8m4_m(mask, src, vl); +vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_m(vbool2_t vm, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8m4_m(vm, vs2, vl); } -vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8mf8_m(mask, src, vl); +vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8mf8_m(vm, vs2, vl); } -vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8mf4_m(mask, src, vl); +vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8mf4_m(vm, vs2, vl); } -vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8mf2_m(mask, src, vl); +vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8mf2_m(vm, vs2, vl); } -vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8m1_m(mask, src, vl); +vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8m1_m(vm, vs2, vl); } -vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8m2_m(mask, src, vl); +vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8m2_m(vm, vs2, vl); } -vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8m4_m(mask, src, vl); +vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_m(vbool2_t vm, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8m4_m(vm, vs2, vl); } -vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16mf4_m(mask, src, vl); +vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16mf4_m(vm, vs2, vl); } -vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16mf2_m(mask, src, vl); +vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16mf2_m(vm, vs2, vl); } -vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16m1_m(mask, src, vl); +vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16m1_m(vm, vs2, vl); } -vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16m2_m(mask, src, vl); +vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16m2_m(vm, vs2, vl); } -vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16m4_m(mask, src, vl); +vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16m4_m(vm, vs2, vl); } -vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16mf4_m(mask, src, vl); +vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16mf4_m(vm, vs2, vl); } -vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16mf2_m(mask, src, vl); +vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16mf2_m(vm, vs2, vl); } -vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16m1_m(mask, src, vl); +vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16m1_m(vm, vs2, vl); } -vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16m2_m(mask, src, vl); +vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16m2_m(vm, vs2, vl); } -vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16m4_m(mask, src, vl); +vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16m4_m(vm, vs2, vl); } -vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32mf2_m(mask, src, vl); +vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_m(vbool64_t vm, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32mf2_m(vm, vs2, vl); } -vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32m1_m(mask, src, vl); +vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_m(vbool32_t vm, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32m1_m(vm, vs2, vl); } -vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32m2_m(mask, src, vl); +vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_m(vbool16_t vm, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32m2_m(vm, vs2, vl); } -vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32m4_m(mask, src, vl); +vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_m(vbool8_t vm, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32m4_m(vm, vs2, vl); } -vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32mf2_m(mask, src, vl); +vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_m(vbool64_t vm, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32mf2_m(vm, vs2, vl); } -vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32m1_m(mask, src, vl); +vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_m(vbool32_t vm, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32m1_m(vm, vs2, vl); } -vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32m2_m(mask, src, vl); +vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_m(vbool16_t vm, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32m2_m(vm, vs2, vl); } -vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32m4_m(mask, src, vl); +vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_m(vbool8_t vm, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32m4_m(vm, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfncvt\.rtz[ivxfswum.]*\s+} 60 } } */ diff --git a/auto-generated/gnu-api-tests/vfneg.c b/auto-generated/gnu-api-tests/vfneg.c index 1139d1bc5..c9d94a711 100644 --- a/auto-generated/gnu-api-tests/vfneg.c +++ b/auto-generated/gnu-api-tests/vfneg.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfneg_v_f16mf4(vfloat16mf4_t op1, size_t vl) { - return __riscv_vfneg_v_f16mf4(op1, vl); +vfloat16mf4_t test_vfneg_v_f16mf4(vfloat16mf4_t vs, size_t vl) { + return __riscv_vfneg_v_f16mf4(vs, vl); } -vfloat16mf2_t test_vfneg_v_f16mf2(vfloat16mf2_t op1, size_t vl) { - return __riscv_vfneg_v_f16mf2(op1, vl); +vfloat16mf2_t test_vfneg_v_f16mf2(vfloat16mf2_t vs, size_t vl) { + return __riscv_vfneg_v_f16mf2(vs, vl); } -vfloat16m1_t test_vfneg_v_f16m1(vfloat16m1_t op1, size_t vl) { - return __riscv_vfneg_v_f16m1(op1, vl); +vfloat16m1_t test_vfneg_v_f16m1(vfloat16m1_t vs, size_t vl) { + return __riscv_vfneg_v_f16m1(vs, vl); } -vfloat16m2_t test_vfneg_v_f16m2(vfloat16m2_t op1, size_t vl) { - return __riscv_vfneg_v_f16m2(op1, vl); +vfloat16m2_t test_vfneg_v_f16m2(vfloat16m2_t vs, size_t vl) { + return __riscv_vfneg_v_f16m2(vs, vl); } -vfloat16m4_t test_vfneg_v_f16m4(vfloat16m4_t op1, size_t vl) { - return __riscv_vfneg_v_f16m4(op1, vl); +vfloat16m4_t test_vfneg_v_f16m4(vfloat16m4_t vs, size_t vl) { + return __riscv_vfneg_v_f16m4(vs, vl); } -vfloat16m8_t test_vfneg_v_f16m8(vfloat16m8_t op1, size_t vl) { - return __riscv_vfneg_v_f16m8(op1, vl); +vfloat16m8_t test_vfneg_v_f16m8(vfloat16m8_t vs, size_t vl) { + return __riscv_vfneg_v_f16m8(vs, vl); } -vfloat32mf2_t test_vfneg_v_f32mf2(vfloat32mf2_t op1, size_t vl) { - return __riscv_vfneg_v_f32mf2(op1, vl); +vfloat32mf2_t test_vfneg_v_f32mf2(vfloat32mf2_t vs, size_t vl) { + return __riscv_vfneg_v_f32mf2(vs, vl); } -vfloat32m1_t test_vfneg_v_f32m1(vfloat32m1_t op1, size_t vl) { - return __riscv_vfneg_v_f32m1(op1, vl); +vfloat32m1_t test_vfneg_v_f32m1(vfloat32m1_t vs, size_t vl) { + return __riscv_vfneg_v_f32m1(vs, vl); } -vfloat32m2_t test_vfneg_v_f32m2(vfloat32m2_t op1, size_t vl) { - return __riscv_vfneg_v_f32m2(op1, vl); +vfloat32m2_t test_vfneg_v_f32m2(vfloat32m2_t vs, size_t vl) { + return __riscv_vfneg_v_f32m2(vs, vl); } -vfloat32m4_t test_vfneg_v_f32m4(vfloat32m4_t op1, size_t vl) { - return __riscv_vfneg_v_f32m4(op1, vl); +vfloat32m4_t test_vfneg_v_f32m4(vfloat32m4_t vs, size_t vl) { + return __riscv_vfneg_v_f32m4(vs, vl); } -vfloat32m8_t test_vfneg_v_f32m8(vfloat32m8_t op1, size_t vl) { - return __riscv_vfneg_v_f32m8(op1, vl); +vfloat32m8_t test_vfneg_v_f32m8(vfloat32m8_t vs, size_t vl) { + return __riscv_vfneg_v_f32m8(vs, vl); } -vfloat64m1_t test_vfneg_v_f64m1(vfloat64m1_t op1, size_t vl) { - return __riscv_vfneg_v_f64m1(op1, vl); +vfloat64m1_t test_vfneg_v_f64m1(vfloat64m1_t vs, size_t vl) { + return __riscv_vfneg_v_f64m1(vs, vl); } -vfloat64m2_t test_vfneg_v_f64m2(vfloat64m2_t op1, size_t vl) { - return __riscv_vfneg_v_f64m2(op1, vl); +vfloat64m2_t test_vfneg_v_f64m2(vfloat64m2_t vs, size_t vl) { + return __riscv_vfneg_v_f64m2(vs, vl); } -vfloat64m4_t test_vfneg_v_f64m4(vfloat64m4_t op1, size_t vl) { - return __riscv_vfneg_v_f64m4(op1, vl); +vfloat64m4_t test_vfneg_v_f64m4(vfloat64m4_t vs, size_t vl) { + return __riscv_vfneg_v_f64m4(vs, vl); } -vfloat64m8_t test_vfneg_v_f64m8(vfloat64m8_t op1, size_t vl) { - return __riscv_vfneg_v_f64m8(op1, vl); +vfloat64m8_t test_vfneg_v_f64m8(vfloat64m8_t vs, size_t vl) { + return __riscv_vfneg_v_f64m8(vs, vl); } -vfloat16mf4_t test_vfneg_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfneg_v_f16mf4_m(mask, op1, vl); +vfloat16mf4_t test_vfneg_v_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs, size_t vl) { + return __riscv_vfneg_v_f16mf4_m(vm, vs, vl); } -vfloat16mf2_t test_vfneg_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfneg_v_f16mf2_m(mask, op1, vl); +vfloat16mf2_t test_vfneg_v_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs, size_t vl) { + return __riscv_vfneg_v_f16mf2_m(vm, vs, vl); } -vfloat16m1_t test_vfneg_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) { - return __riscv_vfneg_v_f16m1_m(mask, op1, vl); +vfloat16m1_t test_vfneg_v_f16m1_m(vbool16_t vm, vfloat16m1_t vs, size_t vl) { + return __riscv_vfneg_v_f16m1_m(vm, vs, vl); } -vfloat16m2_t test_vfneg_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) { - return __riscv_vfneg_v_f16m2_m(mask, op1, vl); +vfloat16m2_t test_vfneg_v_f16m2_m(vbool8_t vm, vfloat16m2_t vs, size_t vl) { + return __riscv_vfneg_v_f16m2_m(vm, vs, vl); } -vfloat16m4_t test_vfneg_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) { - return __riscv_vfneg_v_f16m4_m(mask, op1, vl); +vfloat16m4_t test_vfneg_v_f16m4_m(vbool4_t vm, vfloat16m4_t vs, size_t vl) { + return __riscv_vfneg_v_f16m4_m(vm, vs, vl); } -vfloat16m8_t test_vfneg_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) { - return __riscv_vfneg_v_f16m8_m(mask, op1, vl); +vfloat16m8_t test_vfneg_v_f16m8_m(vbool2_t vm, vfloat16m8_t vs, size_t vl) { + return __riscv_vfneg_v_f16m8_m(vm, vs, vl); } -vfloat32mf2_t test_vfneg_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfneg_v_f32mf2_m(mask, op1, vl); +vfloat32mf2_t test_vfneg_v_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs, size_t vl) { + return __riscv_vfneg_v_f32mf2_m(vm, vs, vl); } -vfloat32m1_t test_vfneg_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) { - return __riscv_vfneg_v_f32m1_m(mask, op1, vl); +vfloat32m1_t test_vfneg_v_f32m1_m(vbool32_t vm, vfloat32m1_t vs, size_t vl) { + return __riscv_vfneg_v_f32m1_m(vm, vs, vl); } -vfloat32m2_t test_vfneg_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) { - return __riscv_vfneg_v_f32m2_m(mask, op1, vl); +vfloat32m2_t test_vfneg_v_f32m2_m(vbool16_t vm, vfloat32m2_t vs, size_t vl) { + return __riscv_vfneg_v_f32m2_m(vm, vs, vl); } -vfloat32m4_t test_vfneg_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) { - return __riscv_vfneg_v_f32m4_m(mask, op1, vl); +vfloat32m4_t test_vfneg_v_f32m4_m(vbool8_t vm, vfloat32m4_t vs, size_t vl) { + return __riscv_vfneg_v_f32m4_m(vm, vs, vl); } -vfloat32m8_t test_vfneg_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) { - return __riscv_vfneg_v_f32m8_m(mask, op1, vl); +vfloat32m8_t test_vfneg_v_f32m8_m(vbool4_t vm, vfloat32m8_t vs, size_t vl) { + return __riscv_vfneg_v_f32m8_m(vm, vs, vl); } -vfloat64m1_t test_vfneg_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) { - return __riscv_vfneg_v_f64m1_m(mask, op1, vl); +vfloat64m1_t test_vfneg_v_f64m1_m(vbool64_t vm, vfloat64m1_t vs, size_t vl) { + return __riscv_vfneg_v_f64m1_m(vm, vs, vl); } -vfloat64m2_t test_vfneg_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) { - return __riscv_vfneg_v_f64m2_m(mask, op1, vl); +vfloat64m2_t test_vfneg_v_f64m2_m(vbool32_t vm, vfloat64m2_t vs, size_t vl) { + return __riscv_vfneg_v_f64m2_m(vm, vs, vl); } -vfloat64m4_t test_vfneg_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) { - return __riscv_vfneg_v_f64m4_m(mask, op1, vl); +vfloat64m4_t test_vfneg_v_f64m4_m(vbool16_t vm, vfloat64m4_t vs, size_t vl) { + return __riscv_vfneg_v_f64m4_m(vm, vs, vl); } -vfloat64m8_t test_vfneg_v_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) { - return __riscv_vfneg_v_f64m8_m(mask, op1, vl); +vfloat64m8_t test_vfneg_v_f64m8_m(vbool8_t vm, vfloat64m8_t vs, size_t vl) { + return __riscv_vfneg_v_f64m8_m(vm, vs, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfneg\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/gnu-api-tests/vfnmacc.c b/auto-generated/gnu-api-tests/vfnmacc.c index 786c1e911..4fd966523 100644 --- a/auto-generated/gnu-api-tests/vfnmacc.c +++ b/auto-generated/gnu-api-tests/vfnmacc.c @@ -126,124 +126,124 @@ vfloat64m8_t test_vfnmacc_vf_f64m8(vfloat64m8_t vd, float64_t rs1, vfloat64m8_t return __riscv_vfnmacc_vf_f64m8(vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmacc_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16mf4_m(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfnmacc_vv_f16mf4_m(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16mf4_m(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmacc_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16mf4_m(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfnmacc_vf_f16mf4_m(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16mf4_m(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmacc_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16mf2_m(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfnmacc_vv_f16mf2_m(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16mf2_m(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmacc_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16mf2_m(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfnmacc_vf_f16mf2_m(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16mf2_m(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmacc_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16m1_m(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfnmacc_vv_f16m1_m(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16m1_m(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmacc_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16m1_m(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfnmacc_vf_f16m1_m(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16m1_m(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmacc_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16m2_m(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfnmacc_vv_f16m2_m(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16m2_m(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmacc_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16m2_m(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfnmacc_vf_f16m2_m(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16m2_m(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmacc_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16m4_m(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfnmacc_vv_f16m4_m(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16m4_m(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmacc_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16m4_m(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfnmacc_vf_f16m4_m(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16m4_m(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmacc_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16m8_m(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfnmacc_vv_f16m8_m(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16m8_m(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmacc_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16m8_m(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfnmacc_vf_f16m8_m(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16m8_m(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f32mf2_m(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfnmacc_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f32mf2_m(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f32mf2_m(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfnmacc_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f32mf2_m(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f32m1_m(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfnmacc_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f32m1_m(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f32m1_m(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfnmacc_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f32m1_m(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f32m2_m(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfnmacc_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f32m2_m(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f32m2_m(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfnmacc_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f32m2_m(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f32m4_m(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfnmacc_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f32m4_m(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f32m4_m(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfnmacc_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f32m4_m(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f32m8_m(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfnmacc_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f32m8_m(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f32m8_m(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfnmacc_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f32m8_m(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f64m1_m(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfnmacc_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f64m1_m(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f64m1_m(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfnmacc_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f64m1_m(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f64m2_m(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfnmacc_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f64m2_m(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f64m2_m(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfnmacc_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f64m2_m(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f64m4_m(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfnmacc_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f64m4_m(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f64m4_m(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfnmacc_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f64m4_m(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f64m8_m(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfnmacc_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f64m8_m(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f64m8_m(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfnmacc_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f64m8_m(vm, vd, rs1, vs2, vl); } vfloat16mf4_t test_vfnmacc_vv_f16mf4_rm(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -366,124 +366,124 @@ vfloat64m8_t test_vfnmacc_vf_f64m8_rm(vfloat64m8_t vd, float64_t rs1, vfloat64m8 return __riscv_vfnmacc_vf_f64m8_rm(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmacc_vv_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16mf4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmacc_vv_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16mf4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmacc_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16mf4_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmacc_vf_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16mf4_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmacc_vv_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16mf2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmacc_vv_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16mf2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmacc_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16mf2_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmacc_vf_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16mf2_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmacc_vv_f16m1_rm_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16m1_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmacc_vv_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16m1_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmacc_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16m1_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmacc_vf_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16m1_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmacc_vv_f16m2_rm_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16m2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmacc_vv_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmacc_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16m2_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmacc_vf_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16m2_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmacc_vv_f16m4_rm_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16m4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmacc_vv_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmacc_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16m4_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmacc_vf_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16m4_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmacc_vv_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmacc_vv_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmacc_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16m8_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmacc_vf_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16m8_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmacc_vv_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f32mf2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmacc_vv_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f32mf2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmacc_vf_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f32mf2_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmacc_vf_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f32mf2_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmacc_vv_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f32m1_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmacc_vv_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f32m1_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmacc_vf_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f32m1_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmacc_vf_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f32m1_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmacc_vv_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f32m2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmacc_vv_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f32m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmacc_vf_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f32m2_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmacc_vf_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f32m2_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmacc_vv_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f32m4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmacc_vv_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f32m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmacc_vf_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f32m4_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmacc_vf_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f32m4_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmacc_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f32m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmacc_vv_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f32m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmacc_vf_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f32m8_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmacc_vf_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f32m8_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmacc_vv_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f64m1_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmacc_vv_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f64m1_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmacc_vf_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f64m1_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmacc_vf_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f64m1_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmacc_vv_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f64m2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmacc_vv_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f64m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmacc_vf_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f64m2_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmacc_vf_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f64m2_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmacc_vv_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f64m4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmacc_vv_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f64m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmacc_vf_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f64m4_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmacc_vf_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f64m4_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmacc_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f64m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmacc_vv_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f64m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmacc_vf_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f64m8_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmacc_vf_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f64m8_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfnmacc\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/gnu-api-tests/vfnmadd.c b/auto-generated/gnu-api-tests/vfnmadd.c index 5b79f7c3c..18eb30748 100644 --- a/auto-generated/gnu-api-tests/vfnmadd.c +++ b/auto-generated/gnu-api-tests/vfnmadd.c @@ -126,124 +126,124 @@ vfloat64m8_t test_vfnmadd_vf_f64m8(vfloat64m8_t vd, float64_t rs1, vfloat64m8_t return __riscv_vfnmadd_vf_f64m8(vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmadd_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16mf4_m(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfnmadd_vv_f16mf4_m(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16mf4_m(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmadd_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16mf4_m(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfnmadd_vf_f16mf4_m(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16mf4_m(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmadd_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16mf2_m(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfnmadd_vv_f16mf2_m(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16mf2_m(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmadd_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16mf2_m(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfnmadd_vf_f16mf2_m(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16mf2_m(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmadd_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16m1_m(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfnmadd_vv_f16m1_m(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16m1_m(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmadd_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16m1_m(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfnmadd_vf_f16m1_m(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16m1_m(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmadd_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16m2_m(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfnmadd_vv_f16m2_m(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16m2_m(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmadd_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16m2_m(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfnmadd_vf_f16m2_m(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16m2_m(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmadd_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16m4_m(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfnmadd_vv_f16m4_m(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16m4_m(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmadd_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16m4_m(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfnmadd_vf_f16m4_m(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16m4_m(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16m8_m(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfnmadd_vv_f16m8_m(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16m8_m(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmadd_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16m8_m(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfnmadd_vf_f16m8_m(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16m8_m(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f32mf2_m(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfnmadd_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f32mf2_m(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f32mf2_m(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfnmadd_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f32mf2_m(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f32m1_m(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfnmadd_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f32m1_m(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f32m1_m(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfnmadd_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f32m1_m(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f32m2_m(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfnmadd_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f32m2_m(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f32m2_m(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfnmadd_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f32m2_m(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f32m4_m(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfnmadd_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f32m4_m(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f32m4_m(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfnmadd_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f32m4_m(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f32m8_m(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfnmadd_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f32m8_m(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f32m8_m(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfnmadd_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f32m8_m(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f64m1_m(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfnmadd_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f64m1_m(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f64m1_m(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfnmadd_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f64m1_m(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f64m2_m(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfnmadd_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f64m2_m(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f64m2_m(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfnmadd_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f64m2_m(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f64m4_m(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfnmadd_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f64m4_m(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f64m4_m(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfnmadd_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f64m4_m(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f64m8_m(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfnmadd_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f64m8_m(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f64m8_m(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfnmadd_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f64m8_m(vm, vd, rs1, vs2, vl); } vfloat16mf4_t test_vfnmadd_vv_f16mf4_rm(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -366,124 +366,124 @@ vfloat64m8_t test_vfnmadd_vf_f64m8_rm(vfloat64m8_t vd, float64_t rs1, vfloat64m8 return __riscv_vfnmadd_vf_f64m8_rm(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmadd_vv_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16mf4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmadd_vv_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16mf4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmadd_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16mf4_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmadd_vf_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16mf4_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmadd_vv_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16mf2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmadd_vv_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16mf2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmadd_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16mf2_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmadd_vf_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16mf2_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmadd_vv_f16m1_rm_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16m1_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmadd_vv_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16m1_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmadd_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16m1_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmadd_vf_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16m1_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmadd_vv_f16m2_rm_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16m2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmadd_vv_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmadd_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16m2_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmadd_vf_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16m2_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmadd_vv_f16m4_rm_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16m4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmadd_vv_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmadd_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16m4_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmadd_vf_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16m4_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmadd_vv_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmadd_vv_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmadd_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16m8_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmadd_vf_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16m8_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmadd_vv_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f32mf2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmadd_vv_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f32mf2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmadd_vf_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f32mf2_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmadd_vf_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f32mf2_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmadd_vv_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f32m1_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmadd_vv_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f32m1_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmadd_vf_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f32m1_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmadd_vf_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f32m1_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmadd_vv_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f32m2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmadd_vv_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f32m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmadd_vf_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f32m2_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmadd_vf_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f32m2_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmadd_vv_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f32m4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmadd_vv_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f32m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmadd_vf_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f32m4_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmadd_vf_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f32m4_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmadd_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f32m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmadd_vv_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f32m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmadd_vf_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f32m8_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmadd_vf_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f32m8_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmadd_vv_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f64m1_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmadd_vv_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f64m1_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmadd_vf_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f64m1_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmadd_vf_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f64m1_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmadd_vv_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f64m2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmadd_vv_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f64m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmadd_vf_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f64m2_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmadd_vf_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f64m2_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmadd_vv_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f64m4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmadd_vv_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f64m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmadd_vf_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f64m4_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmadd_vf_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f64m4_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmadd_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f64m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmadd_vv_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f64m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmadd_vf_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f64m8_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmadd_vf_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f64m8_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfnmadd\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/gnu-api-tests/vfnmsac.c b/auto-generated/gnu-api-tests/vfnmsac.c index da1705d16..ada796aa2 100644 --- a/auto-generated/gnu-api-tests/vfnmsac.c +++ b/auto-generated/gnu-api-tests/vfnmsac.c @@ -126,124 +126,124 @@ vfloat64m8_t test_vfnmsac_vf_f64m8(vfloat64m8_t vd, float64_t rs1, vfloat64m8_t return __riscv_vfnmsac_vf_f64m8(vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmsac_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16mf4_m(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfnmsac_vv_f16mf4_m(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16mf4_m(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmsac_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16mf4_m(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfnmsac_vf_f16mf4_m(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16mf4_m(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmsac_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16mf2_m(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfnmsac_vv_f16mf2_m(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16mf2_m(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmsac_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16mf2_m(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfnmsac_vf_f16mf2_m(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16mf2_m(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmsac_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16m1_m(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfnmsac_vv_f16m1_m(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16m1_m(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmsac_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16m1_m(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfnmsac_vf_f16m1_m(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16m1_m(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmsac_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16m2_m(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfnmsac_vv_f16m2_m(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16m2_m(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmsac_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16m2_m(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfnmsac_vf_f16m2_m(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16m2_m(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmsac_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16m4_m(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfnmsac_vv_f16m4_m(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16m4_m(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmsac_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16m4_m(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfnmsac_vf_f16m4_m(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16m4_m(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmsac_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16m8_m(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfnmsac_vv_f16m8_m(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16m8_m(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmsac_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16m8_m(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfnmsac_vf_f16m8_m(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16m8_m(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f32mf2_m(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfnmsac_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f32mf2_m(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f32mf2_m(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfnmsac_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f32mf2_m(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f32m1_m(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfnmsac_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f32m1_m(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f32m1_m(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfnmsac_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f32m1_m(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f32m2_m(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfnmsac_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f32m2_m(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f32m2_m(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfnmsac_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f32m2_m(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f32m4_m(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfnmsac_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f32m4_m(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f32m4_m(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfnmsac_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f32m4_m(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f32m8_m(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfnmsac_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f32m8_m(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f32m8_m(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfnmsac_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f32m8_m(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f64m1_m(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfnmsac_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f64m1_m(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f64m1_m(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfnmsac_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f64m1_m(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f64m2_m(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfnmsac_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f64m2_m(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f64m2_m(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfnmsac_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f64m2_m(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f64m4_m(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfnmsac_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f64m4_m(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f64m4_m(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfnmsac_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f64m4_m(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f64m8_m(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfnmsac_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f64m8_m(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f64m8_m(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfnmsac_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f64m8_m(vm, vd, rs1, vs2, vl); } vfloat16mf4_t test_vfnmsac_vv_f16mf4_rm(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -366,124 +366,124 @@ vfloat64m8_t test_vfnmsac_vf_f64m8_rm(vfloat64m8_t vd, float64_t rs1, vfloat64m8 return __riscv_vfnmsac_vf_f64m8_rm(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmsac_vv_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16mf4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmsac_vv_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16mf4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmsac_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16mf4_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmsac_vf_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16mf4_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmsac_vv_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16mf2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmsac_vv_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16mf2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmsac_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16mf2_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmsac_vf_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16mf2_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmsac_vv_f16m1_rm_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16m1_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmsac_vv_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16m1_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmsac_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16m1_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmsac_vf_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16m1_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmsac_vv_f16m2_rm_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16m2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmsac_vv_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmsac_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16m2_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmsac_vf_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16m2_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmsac_vv_f16m4_rm_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16m4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmsac_vv_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmsac_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16m4_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmsac_vf_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16m4_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmsac_vv_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmsac_vv_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmsac_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16m8_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmsac_vf_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16m8_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmsac_vv_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f32mf2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmsac_vv_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f32mf2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmsac_vf_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f32mf2_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmsac_vf_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f32mf2_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmsac_vv_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f32m1_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmsac_vv_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f32m1_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmsac_vf_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f32m1_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmsac_vf_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f32m1_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmsac_vv_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f32m2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmsac_vv_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f32m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmsac_vf_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f32m2_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmsac_vf_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f32m2_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmsac_vv_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f32m4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmsac_vv_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f32m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmsac_vf_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f32m4_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmsac_vf_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f32m4_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmsac_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f32m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmsac_vv_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f32m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmsac_vf_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f32m8_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmsac_vf_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f32m8_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmsac_vv_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f64m1_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmsac_vv_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f64m1_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmsac_vf_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f64m1_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmsac_vf_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f64m1_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmsac_vv_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f64m2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmsac_vv_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f64m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmsac_vf_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f64m2_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmsac_vf_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f64m2_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmsac_vv_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f64m4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmsac_vv_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f64m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmsac_vf_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f64m4_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmsac_vf_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f64m4_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmsac_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f64m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmsac_vv_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f64m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmsac_vf_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f64m8_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmsac_vf_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f64m8_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfnmsac\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/gnu-api-tests/vfnmsub.c b/auto-generated/gnu-api-tests/vfnmsub.c index 62c21a7a0..87ca95d73 100644 --- a/auto-generated/gnu-api-tests/vfnmsub.c +++ b/auto-generated/gnu-api-tests/vfnmsub.c @@ -126,124 +126,124 @@ vfloat64m8_t test_vfnmsub_vf_f64m8(vfloat64m8_t vd, float64_t rs1, vfloat64m8_t return __riscv_vfnmsub_vf_f64m8(vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmsub_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16mf4_m(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfnmsub_vv_f16mf4_m(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16mf4_m(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmsub_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16mf4_m(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfnmsub_vf_f16mf4_m(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16mf4_m(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmsub_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16mf2_m(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfnmsub_vv_f16mf2_m(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16mf2_m(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmsub_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16mf2_m(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfnmsub_vf_f16mf2_m(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16mf2_m(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmsub_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16m1_m(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfnmsub_vv_f16m1_m(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16m1_m(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmsub_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16m1_m(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfnmsub_vf_f16m1_m(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16m1_m(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmsub_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16m2_m(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfnmsub_vv_f16m2_m(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16m2_m(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmsub_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16m2_m(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfnmsub_vf_f16m2_m(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16m2_m(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmsub_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16m4_m(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfnmsub_vv_f16m4_m(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16m4_m(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmsub_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16m4_m(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfnmsub_vf_f16m4_m(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16m4_m(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmsub_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16m8_m(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfnmsub_vv_f16m8_m(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16m8_m(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmsub_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16m8_m(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfnmsub_vf_f16m8_m(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16m8_m(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f32mf2_m(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfnmsub_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f32mf2_m(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f32mf2_m(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfnmsub_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f32mf2_m(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f32m1_m(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfnmsub_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f32m1_m(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f32m1_m(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfnmsub_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f32m1_m(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f32m2_m(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfnmsub_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f32m2_m(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f32m2_m(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfnmsub_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f32m2_m(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f32m4_m(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfnmsub_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f32m4_m(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f32m4_m(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfnmsub_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f32m4_m(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f32m8_m(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfnmsub_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f32m8_m(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f32m8_m(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfnmsub_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f32m8_m(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f64m1_m(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfnmsub_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f64m1_m(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f64m1_m(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfnmsub_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f64m1_m(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f64m2_m(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfnmsub_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f64m2_m(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f64m2_m(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfnmsub_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f64m2_m(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f64m4_m(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfnmsub_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f64m4_m(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f64m4_m(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfnmsub_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f64m4_m(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f64m8_m(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfnmsub_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f64m8_m(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f64m8_m(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfnmsub_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f64m8_m(vm, vd, rs1, vs2, vl); } vfloat16mf4_t test_vfnmsub_vv_f16mf4_rm(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -366,124 +366,124 @@ vfloat64m8_t test_vfnmsub_vf_f64m8_rm(vfloat64m8_t vd, float64_t rs1, vfloat64m8 return __riscv_vfnmsub_vf_f64m8_rm(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmsub_vv_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16mf4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmsub_vv_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16mf4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmsub_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16mf4_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmsub_vf_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16mf4_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmsub_vv_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16mf2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmsub_vv_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16mf2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmsub_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16mf2_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmsub_vf_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16mf2_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmsub_vv_f16m1_rm_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16m1_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmsub_vv_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16m1_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmsub_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16m1_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmsub_vf_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16m1_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmsub_vv_f16m2_rm_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16m2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmsub_vv_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmsub_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16m2_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmsub_vf_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16m2_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmsub_vv_f16m4_rm_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16m4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmsub_vv_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmsub_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16m4_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmsub_vf_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16m4_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmsub_vv_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmsub_vv_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmsub_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16m8_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmsub_vf_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16m8_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmsub_vv_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f32mf2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmsub_vv_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f32mf2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmsub_vf_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f32mf2_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmsub_vf_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f32mf2_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmsub_vv_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f32m1_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmsub_vv_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f32m1_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmsub_vf_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f32m1_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmsub_vf_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f32m1_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmsub_vv_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f32m2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmsub_vv_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f32m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmsub_vf_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f32m2_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmsub_vf_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f32m2_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmsub_vv_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f32m4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmsub_vv_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f32m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmsub_vf_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f32m4_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmsub_vf_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f32m4_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmsub_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f32m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmsub_vv_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f32m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmsub_vf_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f32m8_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmsub_vf_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f32m8_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmsub_vv_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f64m1_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmsub_vv_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f64m1_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmsub_vf_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f64m1_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmsub_vf_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f64m1_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmsub_vv_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f64m2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmsub_vv_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f64m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmsub_vf_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f64m2_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmsub_vf_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f64m2_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmsub_vv_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f64m4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmsub_vv_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f64m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmsub_vf_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f64m4_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmsub_vf_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f64m4_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmsub_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f64m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmsub_vv_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f64m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmsub_vf_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f64m8_rm_m(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmsub_vf_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f64m8_rm_m(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfnmsub\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/gnu-api-tests/vfrdiv.c b/auto-generated/gnu-api-tests/vfrdiv.c index 76e00f2dd..9639a7a29 100644 --- a/auto-generated/gnu-api-tests/vfrdiv.c +++ b/auto-generated/gnu-api-tests/vfrdiv.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfrdiv_vf_f16mf4(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16mf4(op1, op2, vl); +vfloat16mf4_t test_vfrdiv_vf_f16mf4(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16mf4(vs2, rs1, vl); } -vfloat16mf2_t test_vfrdiv_vf_f16mf2(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16mf2(op1, op2, vl); +vfloat16mf2_t test_vfrdiv_vf_f16mf2(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16mf2(vs2, rs1, vl); } -vfloat16m1_t test_vfrdiv_vf_f16m1(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m1(op1, op2, vl); +vfloat16m1_t test_vfrdiv_vf_f16m1(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m1(vs2, rs1, vl); } -vfloat16m2_t test_vfrdiv_vf_f16m2(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m2(op1, op2, vl); +vfloat16m2_t test_vfrdiv_vf_f16m2(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m2(vs2, rs1, vl); } -vfloat16m4_t test_vfrdiv_vf_f16m4(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m4(op1, op2, vl); +vfloat16m4_t test_vfrdiv_vf_f16m4(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m4(vs2, rs1, vl); } -vfloat16m8_t test_vfrdiv_vf_f16m8(vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m8(op1, op2, vl); +vfloat16m8_t test_vfrdiv_vf_f16m8(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m8(vs2, rs1, vl); } -vfloat32mf2_t test_vfrdiv_vf_f32mf2(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32mf2(op1, op2, vl); +vfloat32mf2_t test_vfrdiv_vf_f32mf2(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32mf2(vs2, rs1, vl); } -vfloat32m1_t test_vfrdiv_vf_f32m1(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m1(op1, op2, vl); +vfloat32m1_t test_vfrdiv_vf_f32m1(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m1(vs2, rs1, vl); } -vfloat32m2_t test_vfrdiv_vf_f32m2(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m2(op1, op2, vl); +vfloat32m2_t test_vfrdiv_vf_f32m2(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m2(vs2, rs1, vl); } -vfloat32m4_t test_vfrdiv_vf_f32m4(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m4(op1, op2, vl); +vfloat32m4_t test_vfrdiv_vf_f32m4(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m4(vs2, rs1, vl); } -vfloat32m8_t test_vfrdiv_vf_f32m8(vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m8(op1, op2, vl); +vfloat32m8_t test_vfrdiv_vf_f32m8(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m8(vs2, rs1, vl); } -vfloat64m1_t test_vfrdiv_vf_f64m1(vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m1(op1, op2, vl); +vfloat64m1_t test_vfrdiv_vf_f64m1(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m1(vs2, rs1, vl); } -vfloat64m2_t test_vfrdiv_vf_f64m2(vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m2(op1, op2, vl); +vfloat64m2_t test_vfrdiv_vf_f64m2(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m2(vs2, rs1, vl); } -vfloat64m4_t test_vfrdiv_vf_f64m4(vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m4(op1, op2, vl); +vfloat64m4_t test_vfrdiv_vf_f64m4(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m4(vs2, rs1, vl); } -vfloat64m8_t test_vfrdiv_vf_f64m8(vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m8(op1, op2, vl); +vfloat64m8_t test_vfrdiv_vf_f64m8(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m8(vs2, rs1, vl); } -vfloat16mf4_t test_vfrdiv_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16mf4_m(mask, op1, op2, vl); +vfloat16mf4_t test_vfrdiv_vf_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16mf4_m(vm, vs2, rs1, vl); } -vfloat16mf2_t test_vfrdiv_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16mf2_m(mask, op1, op2, vl); +vfloat16mf2_t test_vfrdiv_vf_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16mf2_m(vm, vs2, rs1, vl); } -vfloat16m1_t test_vfrdiv_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m1_m(mask, op1, op2, vl); +vfloat16m1_t test_vfrdiv_vf_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m1_m(vm, vs2, rs1, vl); } -vfloat16m2_t test_vfrdiv_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m2_m(mask, op1, op2, vl); +vfloat16m2_t test_vfrdiv_vf_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m2_m(vm, vs2, rs1, vl); } -vfloat16m4_t test_vfrdiv_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m4_m(mask, op1, op2, vl); +vfloat16m4_t test_vfrdiv_vf_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m4_m(vm, vs2, rs1, vl); } -vfloat16m8_t test_vfrdiv_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m8_m(mask, op1, op2, vl); +vfloat16m8_t test_vfrdiv_vf_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m8_m(vm, vs2, rs1, vl); } -vfloat32mf2_t test_vfrdiv_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32mf2_m(mask, op1, op2, vl); +vfloat32mf2_t test_vfrdiv_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32mf2_m(vm, vs2, rs1, vl); } -vfloat32m1_t test_vfrdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfrdiv_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m1_m(vm, vs2, rs1, vl); } -vfloat32m2_t test_vfrdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfrdiv_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m2_m(vm, vs2, rs1, vl); } -vfloat32m4_t test_vfrdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfrdiv_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m4_m(vm, vs2, rs1, vl); } -vfloat32m8_t test_vfrdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfrdiv_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m8_m(vm, vs2, rs1, vl); } -vfloat64m1_t test_vfrdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfrdiv_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m1_m(vm, vs2, rs1, vl); } -vfloat64m2_t test_vfrdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfrdiv_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m2_m(vm, vs2, rs1, vl); } -vfloat64m4_t test_vfrdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfrdiv_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m4_m(vm, vs2, rs1, vl); } -vfloat64m8_t test_vfrdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfrdiv_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m8_m(vm, vs2, rs1, vl); } -vfloat16mf4_t test_vfrdiv_vf_f16mf4_rm(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16mf4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfrdiv_vf_f16mf4_rm(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16mf4_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfrdiv_vf_f16mf2_rm(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16mf2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfrdiv_vf_f16mf2_rm(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16mf2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfrdiv_vf_f16m1_rm(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfrdiv_vf_f16m1_rm(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrdiv_vf_f16m2_rm(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfrdiv_vf_f16m2_rm(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrdiv_vf_f16m4_rm(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfrdiv_vf_f16m4_rm(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrdiv_vf_f16m8_rm(vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfrdiv_vf_f16m8_rm(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrdiv_vf_f32mf2_rm(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32mf2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfrdiv_vf_f32mf2_rm(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32mf2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfrdiv_vf_f32m1_rm(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfrdiv_vf_f32m1_rm(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrdiv_vf_f32m2_rm(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfrdiv_vf_f32m2_rm(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrdiv_vf_f32m4_rm(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfrdiv_vf_f32m4_rm(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrdiv_vf_f32m8_rm(vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfrdiv_vf_f32m8_rm(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrdiv_vf_f64m1_rm(vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfrdiv_vf_f64m1_rm(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrdiv_vf_f64m2_rm(vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfrdiv_vf_f64m2_rm(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrdiv_vf_f64m4_rm(vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfrdiv_vf_f64m4_rm(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrdiv_vf_f64m8_rm(vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfrdiv_vf_f64m8_rm(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfrdiv_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16mf4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfrdiv_vf_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16mf4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfrdiv_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16mf2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfrdiv_vf_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16mf2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfrdiv_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfrdiv_vf_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrdiv_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfrdiv_vf_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrdiv_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfrdiv_vf_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrdiv_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfrdiv_vf_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrdiv_vf_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32mf2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfrdiv_vf_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32mf2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfrdiv_vf_f32m1_rm_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfrdiv_vf_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrdiv_vf_f32m2_rm_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfrdiv_vf_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrdiv_vf_f32m4_rm_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfrdiv_vf_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrdiv_vf_f32m8_rm_m(vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfrdiv_vf_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrdiv_vf_f64m1_rm_m(vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfrdiv_vf_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrdiv_vf_f64m2_rm_m(vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfrdiv_vf_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrdiv_vf_f64m4_rm_m(vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfrdiv_vf_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrdiv_vf_f64m8_rm_m(vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfrdiv_vf_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfrdiv\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-api-tests/vfrec7.c b/auto-generated/gnu-api-tests/vfrec7.c index 30a2b2aa2..95de87b2f 100644 --- a/auto-generated/gnu-api-tests/vfrec7.c +++ b/auto-generated/gnu-api-tests/vfrec7.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfrec7_v_f16mf4(vfloat16mf4_t op1, size_t vl) { - return __riscv_vfrec7_v_f16mf4(op1, vl); +vfloat16mf4_t test_vfrec7_v_f16mf4(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16mf4(vs2, vl); } -vfloat16mf2_t test_vfrec7_v_f16mf2(vfloat16mf2_t op1, size_t vl) { - return __riscv_vfrec7_v_f16mf2(op1, vl); +vfloat16mf2_t test_vfrec7_v_f16mf2(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16mf2(vs2, vl); } -vfloat16m1_t test_vfrec7_v_f16m1(vfloat16m1_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m1(op1, vl); +vfloat16m1_t test_vfrec7_v_f16m1(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m1(vs2, vl); } -vfloat16m2_t test_vfrec7_v_f16m2(vfloat16m2_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m2(op1, vl); +vfloat16m2_t test_vfrec7_v_f16m2(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m2(vs2, vl); } -vfloat16m4_t test_vfrec7_v_f16m4(vfloat16m4_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m4(op1, vl); +vfloat16m4_t test_vfrec7_v_f16m4(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m4(vs2, vl); } -vfloat16m8_t test_vfrec7_v_f16m8(vfloat16m8_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m8(op1, vl); +vfloat16m8_t test_vfrec7_v_f16m8(vfloat16m8_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m8(vs2, vl); } -vfloat32mf2_t test_vfrec7_v_f32mf2(vfloat32mf2_t op1, size_t vl) { - return __riscv_vfrec7_v_f32mf2(op1, vl); +vfloat32mf2_t test_vfrec7_v_f32mf2(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32mf2(vs2, vl); } -vfloat32m1_t test_vfrec7_v_f32m1(vfloat32m1_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m1(op1, vl); +vfloat32m1_t test_vfrec7_v_f32m1(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m1(vs2, vl); } -vfloat32m2_t test_vfrec7_v_f32m2(vfloat32m2_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m2(op1, vl); +vfloat32m2_t test_vfrec7_v_f32m2(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m2(vs2, vl); } -vfloat32m4_t test_vfrec7_v_f32m4(vfloat32m4_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m4(op1, vl); +vfloat32m4_t test_vfrec7_v_f32m4(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m4(vs2, vl); } -vfloat32m8_t test_vfrec7_v_f32m8(vfloat32m8_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m8(op1, vl); +vfloat32m8_t test_vfrec7_v_f32m8(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m8(vs2, vl); } -vfloat64m1_t test_vfrec7_v_f64m1(vfloat64m1_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m1(op1, vl); +vfloat64m1_t test_vfrec7_v_f64m1(vfloat64m1_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m1(vs2, vl); } -vfloat64m2_t test_vfrec7_v_f64m2(vfloat64m2_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m2(op1, vl); +vfloat64m2_t test_vfrec7_v_f64m2(vfloat64m2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m2(vs2, vl); } -vfloat64m4_t test_vfrec7_v_f64m4(vfloat64m4_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m4(op1, vl); +vfloat64m4_t test_vfrec7_v_f64m4(vfloat64m4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m4(vs2, vl); } -vfloat64m8_t test_vfrec7_v_f64m8(vfloat64m8_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m8(op1, vl); +vfloat64m8_t test_vfrec7_v_f64m8(vfloat64m8_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m8(vs2, vl); } -vfloat16mf4_t test_vfrec7_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfrec7_v_f16mf4_m(mask, op1, vl); +vfloat16mf4_t test_vfrec7_v_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16mf4_m(vm, vs2, vl); } -vfloat16mf2_t test_vfrec7_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfrec7_v_f16mf2_m(mask, op1, vl); +vfloat16mf2_t test_vfrec7_v_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16mf2_m(vm, vs2, vl); } -vfloat16m1_t test_vfrec7_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m1_m(mask, op1, vl); +vfloat16m1_t test_vfrec7_v_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m1_m(vm, vs2, vl); } -vfloat16m2_t test_vfrec7_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m2_m(mask, op1, vl); +vfloat16m2_t test_vfrec7_v_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m2_m(vm, vs2, vl); } -vfloat16m4_t test_vfrec7_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m4_m(mask, op1, vl); +vfloat16m4_t test_vfrec7_v_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m4_m(vm, vs2, vl); } -vfloat16m8_t test_vfrec7_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m8_m(mask, op1, vl); +vfloat16m8_t test_vfrec7_v_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m8_m(vm, vs2, vl); } -vfloat32mf2_t test_vfrec7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfrec7_v_f32mf2_m(mask, op1, vl); +vfloat32mf2_t test_vfrec7_v_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32mf2_m(vm, vs2, vl); } -vfloat32m1_t test_vfrec7_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m1_m(mask, op1, vl); +vfloat32m1_t test_vfrec7_v_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m1_m(vm, vs2, vl); } -vfloat32m2_t test_vfrec7_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m2_m(mask, op1, vl); +vfloat32m2_t test_vfrec7_v_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m2_m(vm, vs2, vl); } -vfloat32m4_t test_vfrec7_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m4_m(mask, op1, vl); +vfloat32m4_t test_vfrec7_v_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m4_m(vm, vs2, vl); } -vfloat32m8_t test_vfrec7_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m8_m(mask, op1, vl); +vfloat32m8_t test_vfrec7_v_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m8_m(vm, vs2, vl); } -vfloat64m1_t test_vfrec7_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m1_m(mask, op1, vl); +vfloat64m1_t test_vfrec7_v_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m1_m(vm, vs2, vl); } -vfloat64m2_t test_vfrec7_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m2_m(mask, op1, vl); +vfloat64m2_t test_vfrec7_v_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m2_m(vm, vs2, vl); } -vfloat64m4_t test_vfrec7_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m4_m(mask, op1, vl); +vfloat64m4_t test_vfrec7_v_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m4_m(vm, vs2, vl); } -vfloat64m8_t test_vfrec7_v_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m8_m(mask, op1, vl); +vfloat64m8_t test_vfrec7_v_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m8_m(vm, vs2, vl); } -vfloat16mf4_t test_vfrec7_v_f16mf4_rm(vfloat16mf4_t op1, size_t vl) { - return __riscv_vfrec7_v_f16mf4_rm(op1, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfrec7_v_f16mf4_rm(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16mf4_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfrec7_v_f16mf2_rm(vfloat16mf2_t op1, size_t vl) { - return __riscv_vfrec7_v_f16mf2_rm(op1, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfrec7_v_f16mf2_rm(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16mf2_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfrec7_v_f16m1_rm(vfloat16m1_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m1_rm(op1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfrec7_v_f16m1_rm(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m1_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrec7_v_f16m2_rm(vfloat16m2_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m2_rm(op1, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfrec7_v_f16m2_rm(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m2_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrec7_v_f16m4_rm(vfloat16m4_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m4_rm(op1, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfrec7_v_f16m4_rm(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m4_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrec7_v_f16m8_rm(vfloat16m8_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m8_rm(op1, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfrec7_v_f16m8_rm(vfloat16m8_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m8_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrec7_v_f32mf2_rm(vfloat32mf2_t op1, size_t vl) { - return __riscv_vfrec7_v_f32mf2_rm(op1, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfrec7_v_f32mf2_rm(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32mf2_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfrec7_v_f32m1_rm(vfloat32m1_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m1_rm(op1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfrec7_v_f32m1_rm(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m1_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrec7_v_f32m2_rm(vfloat32m2_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m2_rm(op1, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfrec7_v_f32m2_rm(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m2_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrec7_v_f32m4_rm(vfloat32m4_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m4_rm(op1, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfrec7_v_f32m4_rm(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m4_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrec7_v_f32m8_rm(vfloat32m8_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m8_rm(op1, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfrec7_v_f32m8_rm(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m8_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrec7_v_f64m1_rm(vfloat64m1_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m1_rm(op1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfrec7_v_f64m1_rm(vfloat64m1_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m1_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrec7_v_f64m2_rm(vfloat64m2_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m2_rm(op1, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfrec7_v_f64m2_rm(vfloat64m2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m2_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrec7_v_f64m4_rm(vfloat64m4_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m4_rm(op1, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfrec7_v_f64m4_rm(vfloat64m4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m4_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrec7_v_f64m8_rm(vfloat64m8_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m8_rm(op1, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfrec7_v_f64m8_rm(vfloat64m8_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m8_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfrec7_v_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfrec7_v_f16mf4_rm_m(mask, op1, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfrec7_v_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16mf4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfrec7_v_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfrec7_v_f16mf2_rm_m(mask, op1, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfrec7_v_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfrec7_v_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m1_rm_m(mask, op1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfrec7_v_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrec7_v_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m2_rm_m(mask, op1, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfrec7_v_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrec7_v_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m4_rm_m(mask, op1, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfrec7_v_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrec7_v_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m8_rm_m(mask, op1, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfrec7_v_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m8_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrec7_v_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfrec7_v_f32mf2_rm_m(mask, op1, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfrec7_v_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfrec7_v_f32m1_rm_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m1_rm_m(mask, op1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfrec7_v_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrec7_v_f32m2_rm_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m2_rm_m(mask, op1, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfrec7_v_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrec7_v_f32m4_rm_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m4_rm_m(mask, op1, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfrec7_v_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrec7_v_f32m8_rm_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m8_rm_m(mask, op1, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfrec7_v_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m8_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrec7_v_f64m1_rm_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m1_rm_m(mask, op1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfrec7_v_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrec7_v_f64m2_rm_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m2_rm_m(mask, op1, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfrec7_v_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrec7_v_f64m4_rm_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m4_rm_m(mask, op1, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfrec7_v_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrec7_v_f64m8_rm_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m8_rm_m(mask, op1, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfrec7_v_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m8_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfrec7\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-api-tests/vfredmax.c b/auto-generated/gnu-api-tests/vfredmax.c index 9e7c527f4..f39feb718 100644 --- a/auto-generated/gnu-api-tests/vfredmax.c +++ b/auto-generated/gnu-api-tests/vfredmax.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1(vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f16mf4_f16m1(vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1(vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f16mf4_f16m1(vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1(vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f16mf2_f16m1(vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1(vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f16mf2_f16m1(vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f16m1_f16m1(vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m1_f16m1(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f16m1_f16m1(vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f16m2_f16m1(vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m2_f16m1(vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f16m2_f16m1(vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f16m4_f16m1(vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m4_f16m1(vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f16m4_f16m1(vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f16m8_f16m1(vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m8_f16m1(vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f16m8_f16m1(vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f32mf2_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1(vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f32mf2_f32m1(vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f32m1_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m1_f32m1(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f32m1_f32m1(vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f32m2_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m2_f32m1(vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f32m2_f32m1(vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f32m4_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m4_f32m1(vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f32m4_f32m1(vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f32m8_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m8_f32m1(vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f32m8_f32m1(vs2, vs1, vl); } -vfloat64m1_t test_vfredmax_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f64m1_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m1_f64m1(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f64m1_f64m1(vs2, vs1, vl); } -vfloat64m1_t test_vfredmax_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f64m2_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m2_f64m1(vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f64m2_f64m1(vs2, vs1, vl); } -vfloat64m1_t test_vfredmax_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f64m4_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m4_f64m1(vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f64m4_f64m1(vs2, vs1, vl); } -vfloat64m1_t test_vfredmax_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f64m8_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m8_f64m1(vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f64m8_f64m1(vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f16mf4_f16m1_m(mask, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f16mf4_f16m1_m(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f16mf2_f16m1_m(mask, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f16mf2_f16m1_m(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f16m1_f16m1_m(mask, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f16m1_f16m1_m(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f16m2_f16m1_m(mask, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f16m2_f16m1_m(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f16m4_f16m1_m(mask, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f16m4_f16m1_m(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f16m8_f16m1_m(mask, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f16m8_f16m1_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f32mf2_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f32mf2_f32m1_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f32m1_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f32m1_f32m1_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f32m2_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f32m2_f32m1_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f32m4_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f32m4_f32m1_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f32m8_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f32m8_f32m1_m(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f64m1_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f64m1_f64m1_m(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f64m2_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f64m2_f64m1_m(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f64m4_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f64m4_f64m1_m(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f64m8_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f64m8_f64m1_m(vm, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfredmax\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/gnu-api-tests/vfredmin.c b/auto-generated/gnu-api-tests/vfredmin.c index 50432f736..544c41559 100644 --- a/auto-generated/gnu-api-tests/vfredmin.c +++ b/auto-generated/gnu-api-tests/vfredmin.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1(vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f16mf4_f16m1(vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1(vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f16mf4_f16m1(vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1(vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f16mf2_f16m1(vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1(vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f16mf2_f16m1(vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f16m1_f16m1(vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m1_f16m1(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f16m1_f16m1(vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f16m2_f16m1(vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m2_f16m1(vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f16m2_f16m1(vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f16m4_f16m1(vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m4_f16m1(vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f16m4_f16m1(vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f16m8_f16m1(vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m8_f16m1(vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f16m8_f16m1(vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f32mf2_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1(vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f32mf2_f32m1(vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f32m1_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m1_f32m1(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f32m1_f32m1(vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f32m2_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m2_f32m1(vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f32m2_f32m1(vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f32m4_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m4_f32m1(vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f32m4_f32m1(vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f32m8_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m8_f32m1(vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f32m8_f32m1(vs2, vs1, vl); } -vfloat64m1_t test_vfredmin_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f64m1_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m1_f64m1(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f64m1_f64m1(vs2, vs1, vl); } -vfloat64m1_t test_vfredmin_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f64m2_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m2_f64m1(vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f64m2_f64m1(vs2, vs1, vl); } -vfloat64m1_t test_vfredmin_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f64m4_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m4_f64m1(vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f64m4_f64m1(vs2, vs1, vl); } -vfloat64m1_t test_vfredmin_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f64m8_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m8_f64m1(vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f64m8_f64m1(vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f16mf4_f16m1_m(mask, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f16mf4_f16m1_m(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f16mf2_f16m1_m(mask, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f16mf2_f16m1_m(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f16m1_f16m1_m(mask, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f16m1_f16m1_m(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f16m2_f16m1_m(mask, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f16m2_f16m1_m(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f16m4_f16m1_m(mask, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f16m4_f16m1_m(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f16m8_f16m1_m(mask, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f16m8_f16m1_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f32mf2_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f32mf2_f32m1_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f32m1_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f32m1_f32m1_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f32m2_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f32m2_f32m1_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f32m4_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f32m4_f32m1_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f32m8_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f32m8_f32m1_m(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f64m1_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f64m1_f64m1_m(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f64m2_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f64m2_f64m1_m(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f64m4_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f64m4_f64m1_m(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f64m8_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f64m8_f64m1_m(vm, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfredmin\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/gnu-api-tests/vfredosum.c b/auto-generated/gnu-api-tests/vfredosum.c index d51daae5d..4a3b01ebd 100644 --- a/auto-generated/gnu-api-tests/vfredosum.c +++ b/auto-generated/gnu-api-tests/vfredosum.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1(vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16mf4_f16m1(vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1(vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16mf4_f16m1(vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1(vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16mf2_f16m1(vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1(vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16mf2_f16m1(vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16m1_f16m1(vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16m1_f16m1(vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16m2_f16m1(vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1(vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16m2_f16m1(vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16m4_f16m1(vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1(vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16m4_f16m1(vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16m8_f16m1(vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1(vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16m8_f16m1(vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32mf2_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1(vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32mf2_f32m1(vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32m1_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32m1_f32m1(vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32m2_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1(vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32m2_f32m1(vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32m4_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1(vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32m4_f32m1(vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32m8_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1(vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32m8_f32m1(vs2, vs1, vl); } -vfloat64m1_t test_vfredosum_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f64m1_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f64m1_f64m1(vs2, vs1, vl); } -vfloat64m1_t test_vfredosum_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f64m2_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1(vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f64m2_f64m1(vs2, vs1, vl); } -vfloat64m1_t test_vfredosum_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f64m4_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1(vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f64m4_f64m1(vs2, vs1, vl); } -vfloat64m1_t test_vfredosum_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f64m8_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1(vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f64m8_f64m1(vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16mf4_f16m1_m(mask, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16mf4_f16m1_m(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16mf2_f16m1_m(mask, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16mf2_f16m1_m(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16m1_f16m1_m(mask, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16m1_f16m1_m(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16m2_f16m1_m(mask, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16m2_f16m1_m(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16m4_f16m1_m(mask, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16m4_f16m1_m(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16m8_f16m1_m(mask, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16m8_f16m1_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32mf2_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32mf2_f32m1_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32m1_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32m1_f32m1_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32m2_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32m2_f32m1_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32m4_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32m4_f32m1_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32m8_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32m8_f32m1_m(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f64m1_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f64m1_f64m1_m(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f64m2_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f64m2_f64m1_m(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f64m4_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f64m4_f64m1_m(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f64m8_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f64m8_f64m1_m(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_rm(vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16mf4_f16m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_rm(vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16mf4_f16m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_rm(vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16mf2_f16m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_rm(vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16mf2_f16m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_rm(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16m1_f16m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_rm(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16m1_f16m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_rm(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16m2_f16m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_rm(vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16m2_f16m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_rm(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16m4_f16m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_rm(vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16m4_f16m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_rm(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16m8_f16m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_rm(vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16m8_f16m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_rm(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32mf2_f32m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_rm(vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32mf2_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_rm(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32m1_f32m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_rm(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32m1_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_rm(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32m2_f32m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_rm(vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32m2_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_rm(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32m4_f32m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_rm(vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32m4_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_rm(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32m8_f32m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_rm(vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32m8_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_rm(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f64m1_f64m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_rm(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f64m1_f64m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_rm(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f64m2_f64m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_rm(vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f64m2_f64m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_rm(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f64m4_f64m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_rm(vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f64m4_f64m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_rm(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f64m8_f64m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_rm(vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f64m8_f64m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_rm_m(vbool64_t mask, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16mf4_f16m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_rm_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16mf4_f16m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_rm_m(vbool32_t mask, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16mf2_f16m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_rm_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16mf2_f16m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_rm_m(vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16m1_f16m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16m1_f16m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_rm_m(vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16m2_f16m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_rm_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16m2_f16m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_rm_m(vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16m4_f16m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_rm_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16m4_f16m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_rm_m(vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16m8_f16m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_rm_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16m8_f16m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_rm_m(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32mf2_f32m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_rm_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32mf2_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32m1_f32m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32m1_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_rm_m(vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32m2_f32m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_rm_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32m2_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_rm_m(vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32m4_f32m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_rm_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32m4_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_rm_m(vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32m8_f32m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_rm_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32m8_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f64m1_f64m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f64m1_f64m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_rm_m(vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f64m2_f64m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_rm_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f64m2_f64m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_rm_m(vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f64m4_f64m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_rm_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f64m4_f64m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_rm_m(vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f64m8_f64m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_rm_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f64m8_f64m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfredosum\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-api-tests/vfredusum.c b/auto-generated/gnu-api-tests/vfredusum.c index 77c598f7f..44095c198 100644 --- a/auto-generated/gnu-api-tests/vfredusum.c +++ b/auto-generated/gnu-api-tests/vfredusum.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1(vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16mf4_f16m1(vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1(vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16mf4_f16m1(vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1(vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16mf2_f16m1(vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1(vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16mf2_f16m1(vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16m1_f16m1(vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m1_f16m1(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16m1_f16m1(vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16m2_f16m1(vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m2_f16m1(vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16m2_f16m1(vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16m4_f16m1(vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m4_f16m1(vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16m4_f16m1(vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16m8_f16m1(vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m8_f16m1(vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16m8_f16m1(vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f32mf2_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1(vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32mf2_f32m1(vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f32m1_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m1_f32m1(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32m1_f32m1(vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f32m2_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m2_f32m1(vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32m2_f32m1(vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f32m4_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m4_f32m1(vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32m4_f32m1(vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f32m8_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m8_f32m1(vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32m8_f32m1(vs2, vs1, vl); } -vfloat64m1_t test_vfredusum_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f64m1_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m1_f64m1(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f64m1_f64m1(vs2, vs1, vl); } -vfloat64m1_t test_vfredusum_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f64m2_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m2_f64m1(vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f64m2_f64m1(vs2, vs1, vl); } -vfloat64m1_t test_vfredusum_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f64m4_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m4_f64m1(vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f64m4_f64m1(vs2, vs1, vl); } -vfloat64m1_t test_vfredusum_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f64m8_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m8_f64m1(vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f64m8_f64m1(vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16mf4_f16m1_m(mask, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16mf4_f16m1_m(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16mf2_f16m1_m(mask, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16mf2_f16m1_m(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16m1_f16m1_m(mask, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16m1_f16m1_m(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16m2_f16m1_m(mask, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16m2_f16m1_m(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16m4_f16m1_m(mask, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16m4_f16m1_m(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16m8_f16m1_m(mask, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16m8_f16m1_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f32mf2_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32mf2_f32m1_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f32m1_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32m1_f32m1_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f32m2_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32m2_f32m1_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f32m4_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32m4_f32m1_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f32m8_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32m8_f32m1_m(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f64m1_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f64m1_f64m1_m(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f64m2_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f64m2_f64m1_m(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f64m4_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f64m4_f64m1_m(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f64m8_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f64m8_f64m1_m(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_rm(vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16mf4_f16m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_rm(vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16mf4_f16m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_rm(vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16mf2_f16m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_rm(vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16mf2_f16m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_rm(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16m1_f16m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_rm(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16m1_f16m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_rm(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16m2_f16m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_rm(vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16m2_f16m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_rm(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16m4_f16m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_rm(vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16m4_f16m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_rm(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16m8_f16m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_rm(vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16m8_f16m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_rm(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f32mf2_f32m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_rm(vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32mf2_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_rm(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f32m1_f32m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_rm(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32m1_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_rm(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f32m2_f32m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_rm(vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32m2_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_rm(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f32m4_f32m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_rm(vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32m4_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_rm(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f32m8_f32m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_rm(vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32m8_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_rm(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f64m1_f64m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_rm(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f64m1_f64m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_rm(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f64m2_f64m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_rm(vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f64m2_f64m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_rm(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f64m4_f64m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_rm(vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f64m4_f64m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_rm(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f64m8_f64m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_rm(vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f64m8_f64m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_rm_m(vbool64_t mask, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16mf4_f16m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_rm_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16mf4_f16m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_rm_m(vbool32_t mask, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16mf2_f16m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_rm_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16mf2_f16m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_rm_m(vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16m1_f16m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16m1_f16m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_rm_m(vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16m2_f16m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_rm_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16m2_f16m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_rm_m(vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16m4_f16m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_rm_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16m4_f16m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_rm_m(vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16m8_f16m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_rm_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16m8_f16m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_rm_m(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f32mf2_f32m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_rm_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32mf2_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f32m1_f32m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32m1_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_rm_m(vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f32m2_f32m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_rm_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32m2_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_rm_m(vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f32m4_f32m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_rm_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32m4_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_rm_m(vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f32m8_f32m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_rm_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32m8_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f64m1_f64m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f64m1_f64m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_rm_m(vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f64m2_f64m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_rm_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f64m2_f64m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_rm_m(vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f64m4_f64m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_rm_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f64m4_f64m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_rm_m(vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f64m8_f64m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_rm_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f64m8_f64m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfredusum\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-api-tests/vfrsqrt7.c b/auto-generated/gnu-api-tests/vfrsqrt7.c index 63b76de8c..a5768f682 100644 --- a/auto-generated/gnu-api-tests/vfrsqrt7.c +++ b/auto-generated/gnu-api-tests/vfrsqrt7.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfrsqrt7_v_f16mf4(vfloat16mf4_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f16mf4(op1, vl); +vfloat16mf4_t test_vfrsqrt7_v_f16mf4(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f16mf4(vs2, vl); } -vfloat16mf2_t test_vfrsqrt7_v_f16mf2(vfloat16mf2_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f16mf2(op1, vl); +vfloat16mf2_t test_vfrsqrt7_v_f16mf2(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f16mf2(vs2, vl); } -vfloat16m1_t test_vfrsqrt7_v_f16m1(vfloat16m1_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f16m1(op1, vl); +vfloat16m1_t test_vfrsqrt7_v_f16m1(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f16m1(vs2, vl); } -vfloat16m2_t test_vfrsqrt7_v_f16m2(vfloat16m2_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f16m2(op1, vl); +vfloat16m2_t test_vfrsqrt7_v_f16m2(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f16m2(vs2, vl); } -vfloat16m4_t test_vfrsqrt7_v_f16m4(vfloat16m4_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f16m4(op1, vl); +vfloat16m4_t test_vfrsqrt7_v_f16m4(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f16m4(vs2, vl); } -vfloat16m8_t test_vfrsqrt7_v_f16m8(vfloat16m8_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f16m8(op1, vl); +vfloat16m8_t test_vfrsqrt7_v_f16m8(vfloat16m8_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f16m8(vs2, vl); } -vfloat32mf2_t test_vfrsqrt7_v_f32mf2(vfloat32mf2_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f32mf2(op1, vl); +vfloat32mf2_t test_vfrsqrt7_v_f32mf2(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f32mf2(vs2, vl); } -vfloat32m1_t test_vfrsqrt7_v_f32m1(vfloat32m1_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f32m1(op1, vl); +vfloat32m1_t test_vfrsqrt7_v_f32m1(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f32m1(vs2, vl); } -vfloat32m2_t test_vfrsqrt7_v_f32m2(vfloat32m2_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f32m2(op1, vl); +vfloat32m2_t test_vfrsqrt7_v_f32m2(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f32m2(vs2, vl); } -vfloat32m4_t test_vfrsqrt7_v_f32m4(vfloat32m4_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f32m4(op1, vl); +vfloat32m4_t test_vfrsqrt7_v_f32m4(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f32m4(vs2, vl); } -vfloat32m8_t test_vfrsqrt7_v_f32m8(vfloat32m8_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f32m8(op1, vl); +vfloat32m8_t test_vfrsqrt7_v_f32m8(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f32m8(vs2, vl); } -vfloat64m1_t test_vfrsqrt7_v_f64m1(vfloat64m1_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f64m1(op1, vl); +vfloat64m1_t test_vfrsqrt7_v_f64m1(vfloat64m1_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f64m1(vs2, vl); } -vfloat64m2_t test_vfrsqrt7_v_f64m2(vfloat64m2_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f64m2(op1, vl); +vfloat64m2_t test_vfrsqrt7_v_f64m2(vfloat64m2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f64m2(vs2, vl); } -vfloat64m4_t test_vfrsqrt7_v_f64m4(vfloat64m4_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f64m4(op1, vl); +vfloat64m4_t test_vfrsqrt7_v_f64m4(vfloat64m4_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f64m4(vs2, vl); } -vfloat64m8_t test_vfrsqrt7_v_f64m8(vfloat64m8_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f64m8(op1, vl); +vfloat64m8_t test_vfrsqrt7_v_f64m8(vfloat64m8_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f64m8(vs2, vl); } -vfloat16mf4_t test_vfrsqrt7_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f16mf4_m(mask, op1, vl); +vfloat16mf4_t test_vfrsqrt7_v_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f16mf4_m(vm, vs2, vl); } -vfloat16mf2_t test_vfrsqrt7_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f16mf2_m(mask, op1, vl); +vfloat16mf2_t test_vfrsqrt7_v_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f16mf2_m(vm, vs2, vl); } -vfloat16m1_t test_vfrsqrt7_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f16m1_m(mask, op1, vl); +vfloat16m1_t test_vfrsqrt7_v_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f16m1_m(vm, vs2, vl); } -vfloat16m2_t test_vfrsqrt7_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f16m2_m(mask, op1, vl); +vfloat16m2_t test_vfrsqrt7_v_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f16m2_m(vm, vs2, vl); } -vfloat16m4_t test_vfrsqrt7_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f16m4_m(mask, op1, vl); +vfloat16m4_t test_vfrsqrt7_v_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f16m4_m(vm, vs2, vl); } -vfloat16m8_t test_vfrsqrt7_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f16m8_m(mask, op1, vl); +vfloat16m8_t test_vfrsqrt7_v_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f16m8_m(vm, vs2, vl); } -vfloat32mf2_t test_vfrsqrt7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f32mf2_m(mask, op1, vl); +vfloat32mf2_t test_vfrsqrt7_v_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f32mf2_m(vm, vs2, vl); } -vfloat32m1_t test_vfrsqrt7_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f32m1_m(mask, op1, vl); +vfloat32m1_t test_vfrsqrt7_v_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f32m1_m(vm, vs2, vl); } -vfloat32m2_t test_vfrsqrt7_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f32m2_m(mask, op1, vl); +vfloat32m2_t test_vfrsqrt7_v_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f32m2_m(vm, vs2, vl); } -vfloat32m4_t test_vfrsqrt7_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f32m4_m(mask, op1, vl); +vfloat32m4_t test_vfrsqrt7_v_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f32m4_m(vm, vs2, vl); } -vfloat32m8_t test_vfrsqrt7_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f32m8_m(mask, op1, vl); +vfloat32m8_t test_vfrsqrt7_v_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f32m8_m(vm, vs2, vl); } -vfloat64m1_t test_vfrsqrt7_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f64m1_m(mask, op1, vl); +vfloat64m1_t test_vfrsqrt7_v_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f64m1_m(vm, vs2, vl); } -vfloat64m2_t test_vfrsqrt7_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f64m2_m(mask, op1, vl); +vfloat64m2_t test_vfrsqrt7_v_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f64m2_m(vm, vs2, vl); } -vfloat64m4_t test_vfrsqrt7_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f64m4_m(mask, op1, vl); +vfloat64m4_t test_vfrsqrt7_v_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f64m4_m(vm, vs2, vl); } -vfloat64m8_t test_vfrsqrt7_v_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f64m8_m(mask, op1, vl); +vfloat64m8_t test_vfrsqrt7_v_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f64m8_m(vm, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfrsqrt7\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/gnu-api-tests/vfrsub.c b/auto-generated/gnu-api-tests/vfrsub.c index 4fdc2e7d2..e45450549 100644 --- a/auto-generated/gnu-api-tests/vfrsub.c +++ b/auto-generated/gnu-api-tests/vfrsub.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfrsub_vf_f16mf4(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16mf4(op1, op2, vl); +vfloat16mf4_t test_vfrsub_vf_f16mf4(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16mf4(vs2, rs1, vl); } -vfloat16mf2_t test_vfrsub_vf_f16mf2(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16mf2(op1, op2, vl); +vfloat16mf2_t test_vfrsub_vf_f16mf2(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16mf2(vs2, rs1, vl); } -vfloat16m1_t test_vfrsub_vf_f16m1(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m1(op1, op2, vl); +vfloat16m1_t test_vfrsub_vf_f16m1(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m1(vs2, rs1, vl); } -vfloat16m2_t test_vfrsub_vf_f16m2(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m2(op1, op2, vl); +vfloat16m2_t test_vfrsub_vf_f16m2(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m2(vs2, rs1, vl); } -vfloat16m4_t test_vfrsub_vf_f16m4(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m4(op1, op2, vl); +vfloat16m4_t test_vfrsub_vf_f16m4(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m4(vs2, rs1, vl); } -vfloat16m8_t test_vfrsub_vf_f16m8(vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m8(op1, op2, vl); +vfloat16m8_t test_vfrsub_vf_f16m8(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m8(vs2, rs1, vl); } -vfloat32mf2_t test_vfrsub_vf_f32mf2(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32mf2(op1, op2, vl); +vfloat32mf2_t test_vfrsub_vf_f32mf2(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32mf2(vs2, rs1, vl); } -vfloat32m1_t test_vfrsub_vf_f32m1(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m1(op1, op2, vl); +vfloat32m1_t test_vfrsub_vf_f32m1(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m1(vs2, rs1, vl); } -vfloat32m2_t test_vfrsub_vf_f32m2(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m2(op1, op2, vl); +vfloat32m2_t test_vfrsub_vf_f32m2(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m2(vs2, rs1, vl); } -vfloat32m4_t test_vfrsub_vf_f32m4(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m4(op1, op2, vl); +vfloat32m4_t test_vfrsub_vf_f32m4(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m4(vs2, rs1, vl); } -vfloat32m8_t test_vfrsub_vf_f32m8(vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m8(op1, op2, vl); +vfloat32m8_t test_vfrsub_vf_f32m8(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m8(vs2, rs1, vl); } -vfloat64m1_t test_vfrsub_vf_f64m1(vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m1(op1, op2, vl); +vfloat64m1_t test_vfrsub_vf_f64m1(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m1(vs2, rs1, vl); } -vfloat64m2_t test_vfrsub_vf_f64m2(vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m2(op1, op2, vl); +vfloat64m2_t test_vfrsub_vf_f64m2(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m2(vs2, rs1, vl); } -vfloat64m4_t test_vfrsub_vf_f64m4(vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m4(op1, op2, vl); +vfloat64m4_t test_vfrsub_vf_f64m4(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m4(vs2, rs1, vl); } -vfloat64m8_t test_vfrsub_vf_f64m8(vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m8(op1, op2, vl); +vfloat64m8_t test_vfrsub_vf_f64m8(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m8(vs2, rs1, vl); } -vfloat16mf4_t test_vfrsub_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16mf4_m(mask, op1, op2, vl); +vfloat16mf4_t test_vfrsub_vf_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16mf4_m(vm, vs2, rs1, vl); } -vfloat16mf2_t test_vfrsub_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16mf2_m(mask, op1, op2, vl); +vfloat16mf2_t test_vfrsub_vf_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16mf2_m(vm, vs2, rs1, vl); } -vfloat16m1_t test_vfrsub_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m1_m(mask, op1, op2, vl); +vfloat16m1_t test_vfrsub_vf_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m1_m(vm, vs2, rs1, vl); } -vfloat16m2_t test_vfrsub_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m2_m(mask, op1, op2, vl); +vfloat16m2_t test_vfrsub_vf_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m2_m(vm, vs2, rs1, vl); } -vfloat16m4_t test_vfrsub_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m4_m(mask, op1, op2, vl); +vfloat16m4_t test_vfrsub_vf_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m4_m(vm, vs2, rs1, vl); } -vfloat16m8_t test_vfrsub_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m8_m(mask, op1, op2, vl); +vfloat16m8_t test_vfrsub_vf_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m8_m(vm, vs2, rs1, vl); } -vfloat32mf2_t test_vfrsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32mf2_m(mask, op1, op2, vl); +vfloat32mf2_t test_vfrsub_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32mf2_m(vm, vs2, rs1, vl); } -vfloat32m1_t test_vfrsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfrsub_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m1_m(vm, vs2, rs1, vl); } -vfloat32m2_t test_vfrsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfrsub_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m2_m(vm, vs2, rs1, vl); } -vfloat32m4_t test_vfrsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfrsub_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m4_m(vm, vs2, rs1, vl); } -vfloat32m8_t test_vfrsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfrsub_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m8_m(vm, vs2, rs1, vl); } -vfloat64m1_t test_vfrsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfrsub_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m1_m(vm, vs2, rs1, vl); } -vfloat64m2_t test_vfrsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfrsub_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m2_m(vm, vs2, rs1, vl); } -vfloat64m4_t test_vfrsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfrsub_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m4_m(vm, vs2, rs1, vl); } -vfloat64m8_t test_vfrsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfrsub_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m8_m(vm, vs2, rs1, vl); } -vfloat16mf4_t test_vfrsub_vf_f16mf4_rm(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16mf4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfrsub_vf_f16mf4_rm(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16mf4_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfrsub_vf_f16mf2_rm(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16mf2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfrsub_vf_f16mf2_rm(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16mf2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfrsub_vf_f16m1_rm(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfrsub_vf_f16m1_rm(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrsub_vf_f16m2_rm(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfrsub_vf_f16m2_rm(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrsub_vf_f16m4_rm(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfrsub_vf_f16m4_rm(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrsub_vf_f16m8_rm(vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfrsub_vf_f16m8_rm(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrsub_vf_f32mf2_rm(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32mf2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfrsub_vf_f32mf2_rm(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32mf2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfrsub_vf_f32m1_rm(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfrsub_vf_f32m1_rm(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrsub_vf_f32m2_rm(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfrsub_vf_f32m2_rm(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrsub_vf_f32m4_rm(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfrsub_vf_f32m4_rm(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrsub_vf_f32m8_rm(vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfrsub_vf_f32m8_rm(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrsub_vf_f64m1_rm(vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfrsub_vf_f64m1_rm(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrsub_vf_f64m2_rm(vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfrsub_vf_f64m2_rm(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrsub_vf_f64m4_rm(vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfrsub_vf_f64m4_rm(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrsub_vf_f64m8_rm(vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfrsub_vf_f64m8_rm(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfrsub_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16mf4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfrsub_vf_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16mf4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfrsub_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16mf2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfrsub_vf_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16mf2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfrsub_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfrsub_vf_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrsub_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfrsub_vf_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrsub_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfrsub_vf_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrsub_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfrsub_vf_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrsub_vf_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32mf2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfrsub_vf_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32mf2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfrsub_vf_f32m1_rm_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfrsub_vf_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrsub_vf_f32m2_rm_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfrsub_vf_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrsub_vf_f32m4_rm_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfrsub_vf_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrsub_vf_f32m8_rm_m(vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfrsub_vf_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrsub_vf_f64m1_rm_m(vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfrsub_vf_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrsub_vf_f64m2_rm_m(vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfrsub_vf_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrsub_vf_f64m4_rm_m(vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfrsub_vf_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrsub_vf_f64m8_rm_m(vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfrsub_vf_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfrsub\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-api-tests/vfsgnj.c b/auto-generated/gnu-api-tests/vfsgnj.c index 484b9c145..2bbfc05b6 100644 --- a/auto-generated/gnu-api-tests/vfsgnj.c +++ b/auto-generated/gnu-api-tests/vfsgnj.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfsgnj_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f16mf4(op1, op2, vl); +vfloat16mf4_t test_vfsgnj_vv_f16mf4(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f16mf4(vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnj_vf_f16mf4(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f16mf4(op1, op2, vl); +vfloat16mf4_t test_vfsgnj_vf_f16mf4(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f16mf4(vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnj_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f16mf2(op1, op2, vl); +vfloat16mf2_t test_vfsgnj_vv_f16mf2(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f16mf2(vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnj_vf_f16mf2(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f16mf2(op1, op2, vl); +vfloat16mf2_t test_vfsgnj_vf_f16mf2(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f16mf2(vs2, rs1, vl); } -vfloat16m1_t test_vfsgnj_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f16m1(op1, op2, vl); +vfloat16m1_t test_vfsgnj_vv_f16m1(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f16m1(vs2, vs1, vl); } -vfloat16m1_t test_vfsgnj_vf_f16m1(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f16m1(op1, op2, vl); +vfloat16m1_t test_vfsgnj_vf_f16m1(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f16m1(vs2, rs1, vl); } -vfloat16m2_t test_vfsgnj_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f16m2(op1, op2, vl); +vfloat16m2_t test_vfsgnj_vv_f16m2(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f16m2(vs2, vs1, vl); } -vfloat16m2_t test_vfsgnj_vf_f16m2(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f16m2(op1, op2, vl); +vfloat16m2_t test_vfsgnj_vf_f16m2(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f16m2(vs2, rs1, vl); } -vfloat16m4_t test_vfsgnj_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f16m4(op1, op2, vl); +vfloat16m4_t test_vfsgnj_vv_f16m4(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f16m4(vs2, vs1, vl); } -vfloat16m4_t test_vfsgnj_vf_f16m4(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f16m4(op1, op2, vl); +vfloat16m4_t test_vfsgnj_vf_f16m4(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f16m4(vs2, rs1, vl); } -vfloat16m8_t test_vfsgnj_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f16m8(op1, op2, vl); +vfloat16m8_t test_vfsgnj_vv_f16m8(vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f16m8(vs2, vs1, vl); } -vfloat16m8_t test_vfsgnj_vf_f16m8(vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f16m8(op1, op2, vl); +vfloat16m8_t test_vfsgnj_vf_f16m8(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f16m8(vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnj_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f32mf2(op1, op2, vl); +vfloat32mf2_t test_vfsgnj_vv_f32mf2(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f32mf2(vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnj_vf_f32mf2(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f32mf2(op1, op2, vl); +vfloat32mf2_t test_vfsgnj_vf_f32mf2(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f32mf2(vs2, rs1, vl); } -vfloat32m1_t test_vfsgnj_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f32m1(op1, op2, vl); +vfloat32m1_t test_vfsgnj_vv_f32m1(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f32m1(vs2, vs1, vl); } -vfloat32m1_t test_vfsgnj_vf_f32m1(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f32m1(op1, op2, vl); +vfloat32m1_t test_vfsgnj_vf_f32m1(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f32m1(vs2, rs1, vl); } -vfloat32m2_t test_vfsgnj_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f32m2(op1, op2, vl); +vfloat32m2_t test_vfsgnj_vv_f32m2(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f32m2(vs2, vs1, vl); } -vfloat32m2_t test_vfsgnj_vf_f32m2(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f32m2(op1, op2, vl); +vfloat32m2_t test_vfsgnj_vf_f32m2(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f32m2(vs2, rs1, vl); } -vfloat32m4_t test_vfsgnj_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f32m4(op1, op2, vl); +vfloat32m4_t test_vfsgnj_vv_f32m4(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f32m4(vs2, vs1, vl); } -vfloat32m4_t test_vfsgnj_vf_f32m4(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f32m4(op1, op2, vl); +vfloat32m4_t test_vfsgnj_vf_f32m4(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f32m4(vs2, rs1, vl); } -vfloat32m8_t test_vfsgnj_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f32m8(op1, op2, vl); +vfloat32m8_t test_vfsgnj_vv_f32m8(vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f32m8(vs2, vs1, vl); } -vfloat32m8_t test_vfsgnj_vf_f32m8(vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f32m8(op1, op2, vl); +vfloat32m8_t test_vfsgnj_vf_f32m8(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f32m8(vs2, rs1, vl); } -vfloat64m1_t test_vfsgnj_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f64m1(op1, op2, vl); +vfloat64m1_t test_vfsgnj_vv_f64m1(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f64m1(vs2, vs1, vl); } -vfloat64m1_t test_vfsgnj_vf_f64m1(vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f64m1(op1, op2, vl); +vfloat64m1_t test_vfsgnj_vf_f64m1(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f64m1(vs2, rs1, vl); } -vfloat64m2_t test_vfsgnj_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f64m2(op1, op2, vl); +vfloat64m2_t test_vfsgnj_vv_f64m2(vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f64m2(vs2, vs1, vl); } -vfloat64m2_t test_vfsgnj_vf_f64m2(vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f64m2(op1, op2, vl); +vfloat64m2_t test_vfsgnj_vf_f64m2(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f64m2(vs2, rs1, vl); } -vfloat64m4_t test_vfsgnj_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f64m4(op1, op2, vl); +vfloat64m4_t test_vfsgnj_vv_f64m4(vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f64m4(vs2, vs1, vl); } -vfloat64m4_t test_vfsgnj_vf_f64m4(vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f64m4(op1, op2, vl); +vfloat64m4_t test_vfsgnj_vf_f64m4(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f64m4(vs2, rs1, vl); } -vfloat64m8_t test_vfsgnj_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f64m8(op1, op2, vl); +vfloat64m8_t test_vfsgnj_vv_f64m8(vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f64m8(vs2, vs1, vl); } -vfloat64m8_t test_vfsgnj_vf_f64m8(vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f64m8(op1, op2, vl); +vfloat64m8_t test_vfsgnj_vf_f64m8(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f64m8(vs2, rs1, vl); } -vfloat16mf4_t test_vfsgnj_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f16mf4_m(mask, op1, op2, vl); +vfloat16mf4_t test_vfsgnj_vv_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f16mf4_m(vm, vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnj_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f16mf4_m(mask, op1, op2, vl); +vfloat16mf4_t test_vfsgnj_vf_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f16mf4_m(vm, vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnj_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f16mf2_m(mask, op1, op2, vl); +vfloat16mf2_t test_vfsgnj_vv_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f16mf2_m(vm, vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnj_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f16mf2_m(mask, op1, op2, vl); +vfloat16mf2_t test_vfsgnj_vf_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f16mf2_m(vm, vs2, rs1, vl); } -vfloat16m1_t test_vfsgnj_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f16m1_m(mask, op1, op2, vl); +vfloat16m1_t test_vfsgnj_vv_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f16m1_m(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfsgnj_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f16m1_m(mask, op1, op2, vl); +vfloat16m1_t test_vfsgnj_vf_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f16m1_m(vm, vs2, rs1, vl); } -vfloat16m2_t test_vfsgnj_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f16m2_m(mask, op1, op2, vl); +vfloat16m2_t test_vfsgnj_vv_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f16m2_m(vm, vs2, vs1, vl); } -vfloat16m2_t test_vfsgnj_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f16m2_m(mask, op1, op2, vl); +vfloat16m2_t test_vfsgnj_vf_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f16m2_m(vm, vs2, rs1, vl); } -vfloat16m4_t test_vfsgnj_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f16m4_m(mask, op1, op2, vl); +vfloat16m4_t test_vfsgnj_vv_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f16m4_m(vm, vs2, vs1, vl); } -vfloat16m4_t test_vfsgnj_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f16m4_m(mask, op1, op2, vl); +vfloat16m4_t test_vfsgnj_vf_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f16m4_m(vm, vs2, rs1, vl); } -vfloat16m8_t test_vfsgnj_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f16m8_m(mask, op1, op2, vl); +vfloat16m8_t test_vfsgnj_vv_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f16m8_m(vm, vs2, vs1, vl); } -vfloat16m8_t test_vfsgnj_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f16m8_m(mask, op1, op2, vl); +vfloat16m8_t test_vfsgnj_vf_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f16m8_m(vm, vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnj_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f32mf2_m(mask, op1, op2, vl); +vfloat32mf2_t test_vfsgnj_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f32mf2_m(vm, vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnj_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f32mf2_m(mask, op1, op2, vl); +vfloat32mf2_t test_vfsgnj_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f32mf2_m(vm, vs2, rs1, vl); } -vfloat32m1_t test_vfsgnj_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfsgnj_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f32m1_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfsgnj_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfsgnj_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f32m1_m(vm, vs2, rs1, vl); } -vfloat32m2_t test_vfsgnj_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfsgnj_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f32m2_m(vm, vs2, vs1, vl); } -vfloat32m2_t test_vfsgnj_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfsgnj_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f32m2_m(vm, vs2, rs1, vl); } -vfloat32m4_t test_vfsgnj_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfsgnj_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f32m4_m(vm, vs2, vs1, vl); } -vfloat32m4_t test_vfsgnj_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfsgnj_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f32m4_m(vm, vs2, rs1, vl); } -vfloat32m8_t test_vfsgnj_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfsgnj_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f32m8_m(vm, vs2, vs1, vl); } -vfloat32m8_t test_vfsgnj_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfsgnj_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f32m8_m(vm, vs2, rs1, vl); } -vfloat64m1_t test_vfsgnj_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfsgnj_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f64m1_m(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfsgnj_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfsgnj_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f64m1_m(vm, vs2, rs1, vl); } -vfloat64m2_t test_vfsgnj_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfsgnj_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f64m2_m(vm, vs2, vs1, vl); } -vfloat64m2_t test_vfsgnj_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfsgnj_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f64m2_m(vm, vs2, rs1, vl); } -vfloat64m4_t test_vfsgnj_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfsgnj_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f64m4_m(vm, vs2, vs1, vl); } -vfloat64m4_t test_vfsgnj_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfsgnj_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f64m4_m(vm, vs2, rs1, vl); } -vfloat64m8_t test_vfsgnj_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfsgnj_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f64m8_m(vm, vs2, vs1, vl); } -vfloat64m8_t test_vfsgnj_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfsgnj_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f64m8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfsgnj\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-api-tests/vfsgnjn.c b/auto-generated/gnu-api-tests/vfsgnjn.c index 98afe71f0..4d94bf345 100644 --- a/auto-generated/gnu-api-tests/vfsgnjn.c +++ b/auto-generated/gnu-api-tests/vfsgnjn.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfsgnjn_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f16mf4(op1, op2, vl); +vfloat16mf4_t test_vfsgnjn_vv_f16mf4(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f16mf4(vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnjn_vf_f16mf4(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f16mf4(op1, op2, vl); +vfloat16mf4_t test_vfsgnjn_vf_f16mf4(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f16mf4(vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnjn_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f16mf2(op1, op2, vl); +vfloat16mf2_t test_vfsgnjn_vv_f16mf2(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f16mf2(vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnjn_vf_f16mf2(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f16mf2(op1, op2, vl); +vfloat16mf2_t test_vfsgnjn_vf_f16mf2(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f16mf2(vs2, rs1, vl); } -vfloat16m1_t test_vfsgnjn_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f16m1(op1, op2, vl); +vfloat16m1_t test_vfsgnjn_vv_f16m1(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f16m1(vs2, vs1, vl); } -vfloat16m1_t test_vfsgnjn_vf_f16m1(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f16m1(op1, op2, vl); +vfloat16m1_t test_vfsgnjn_vf_f16m1(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f16m1(vs2, rs1, vl); } -vfloat16m2_t test_vfsgnjn_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f16m2(op1, op2, vl); +vfloat16m2_t test_vfsgnjn_vv_f16m2(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f16m2(vs2, vs1, vl); } -vfloat16m2_t test_vfsgnjn_vf_f16m2(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f16m2(op1, op2, vl); +vfloat16m2_t test_vfsgnjn_vf_f16m2(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f16m2(vs2, rs1, vl); } -vfloat16m4_t test_vfsgnjn_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f16m4(op1, op2, vl); +vfloat16m4_t test_vfsgnjn_vv_f16m4(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f16m4(vs2, vs1, vl); } -vfloat16m4_t test_vfsgnjn_vf_f16m4(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f16m4(op1, op2, vl); +vfloat16m4_t test_vfsgnjn_vf_f16m4(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f16m4(vs2, rs1, vl); } -vfloat16m8_t test_vfsgnjn_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f16m8(op1, op2, vl); +vfloat16m8_t test_vfsgnjn_vv_f16m8(vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f16m8(vs2, vs1, vl); } -vfloat16m8_t test_vfsgnjn_vf_f16m8(vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f16m8(op1, op2, vl); +vfloat16m8_t test_vfsgnjn_vf_f16m8(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f16m8(vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnjn_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f32mf2(op1, op2, vl); +vfloat32mf2_t test_vfsgnjn_vv_f32mf2(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f32mf2(vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnjn_vf_f32mf2(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f32mf2(op1, op2, vl); +vfloat32mf2_t test_vfsgnjn_vf_f32mf2(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f32mf2(vs2, rs1, vl); } -vfloat32m1_t test_vfsgnjn_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f32m1(op1, op2, vl); +vfloat32m1_t test_vfsgnjn_vv_f32m1(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f32m1(vs2, vs1, vl); } -vfloat32m1_t test_vfsgnjn_vf_f32m1(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f32m1(op1, op2, vl); +vfloat32m1_t test_vfsgnjn_vf_f32m1(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f32m1(vs2, rs1, vl); } -vfloat32m2_t test_vfsgnjn_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f32m2(op1, op2, vl); +vfloat32m2_t test_vfsgnjn_vv_f32m2(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f32m2(vs2, vs1, vl); } -vfloat32m2_t test_vfsgnjn_vf_f32m2(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f32m2(op1, op2, vl); +vfloat32m2_t test_vfsgnjn_vf_f32m2(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f32m2(vs2, rs1, vl); } -vfloat32m4_t test_vfsgnjn_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f32m4(op1, op2, vl); +vfloat32m4_t test_vfsgnjn_vv_f32m4(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f32m4(vs2, vs1, vl); } -vfloat32m4_t test_vfsgnjn_vf_f32m4(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f32m4(op1, op2, vl); +vfloat32m4_t test_vfsgnjn_vf_f32m4(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f32m4(vs2, rs1, vl); } -vfloat32m8_t test_vfsgnjn_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f32m8(op1, op2, vl); +vfloat32m8_t test_vfsgnjn_vv_f32m8(vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f32m8(vs2, vs1, vl); } -vfloat32m8_t test_vfsgnjn_vf_f32m8(vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f32m8(op1, op2, vl); +vfloat32m8_t test_vfsgnjn_vf_f32m8(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f32m8(vs2, rs1, vl); } -vfloat64m1_t test_vfsgnjn_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f64m1(op1, op2, vl); +vfloat64m1_t test_vfsgnjn_vv_f64m1(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f64m1(vs2, vs1, vl); } -vfloat64m1_t test_vfsgnjn_vf_f64m1(vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f64m1(op1, op2, vl); +vfloat64m1_t test_vfsgnjn_vf_f64m1(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f64m1(vs2, rs1, vl); } -vfloat64m2_t test_vfsgnjn_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f64m2(op1, op2, vl); +vfloat64m2_t test_vfsgnjn_vv_f64m2(vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f64m2(vs2, vs1, vl); } -vfloat64m2_t test_vfsgnjn_vf_f64m2(vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f64m2(op1, op2, vl); +vfloat64m2_t test_vfsgnjn_vf_f64m2(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f64m2(vs2, rs1, vl); } -vfloat64m4_t test_vfsgnjn_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f64m4(op1, op2, vl); +vfloat64m4_t test_vfsgnjn_vv_f64m4(vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f64m4(vs2, vs1, vl); } -vfloat64m4_t test_vfsgnjn_vf_f64m4(vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f64m4(op1, op2, vl); +vfloat64m4_t test_vfsgnjn_vf_f64m4(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f64m4(vs2, rs1, vl); } -vfloat64m8_t test_vfsgnjn_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f64m8(op1, op2, vl); +vfloat64m8_t test_vfsgnjn_vv_f64m8(vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f64m8(vs2, vs1, vl); } -vfloat64m8_t test_vfsgnjn_vf_f64m8(vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f64m8(op1, op2, vl); +vfloat64m8_t test_vfsgnjn_vf_f64m8(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f64m8(vs2, rs1, vl); } -vfloat16mf4_t test_vfsgnjn_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f16mf4_m(mask, op1, op2, vl); +vfloat16mf4_t test_vfsgnjn_vv_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f16mf4_m(vm, vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnjn_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f16mf4_m(mask, op1, op2, vl); +vfloat16mf4_t test_vfsgnjn_vf_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f16mf4_m(vm, vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnjn_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f16mf2_m(mask, op1, op2, vl); +vfloat16mf2_t test_vfsgnjn_vv_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f16mf2_m(vm, vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnjn_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f16mf2_m(mask, op1, op2, vl); +vfloat16mf2_t test_vfsgnjn_vf_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f16mf2_m(vm, vs2, rs1, vl); } -vfloat16m1_t test_vfsgnjn_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f16m1_m(mask, op1, op2, vl); +vfloat16m1_t test_vfsgnjn_vv_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f16m1_m(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfsgnjn_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f16m1_m(mask, op1, op2, vl); +vfloat16m1_t test_vfsgnjn_vf_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f16m1_m(vm, vs2, rs1, vl); } -vfloat16m2_t test_vfsgnjn_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f16m2_m(mask, op1, op2, vl); +vfloat16m2_t test_vfsgnjn_vv_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f16m2_m(vm, vs2, vs1, vl); } -vfloat16m2_t test_vfsgnjn_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f16m2_m(mask, op1, op2, vl); +vfloat16m2_t test_vfsgnjn_vf_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f16m2_m(vm, vs2, rs1, vl); } -vfloat16m4_t test_vfsgnjn_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f16m4_m(mask, op1, op2, vl); +vfloat16m4_t test_vfsgnjn_vv_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f16m4_m(vm, vs2, vs1, vl); } -vfloat16m4_t test_vfsgnjn_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f16m4_m(mask, op1, op2, vl); +vfloat16m4_t test_vfsgnjn_vf_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f16m4_m(vm, vs2, rs1, vl); } -vfloat16m8_t test_vfsgnjn_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f16m8_m(mask, op1, op2, vl); +vfloat16m8_t test_vfsgnjn_vv_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f16m8_m(vm, vs2, vs1, vl); } -vfloat16m8_t test_vfsgnjn_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f16m8_m(mask, op1, op2, vl); +vfloat16m8_t test_vfsgnjn_vf_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f16m8_m(vm, vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnjn_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f32mf2_m(mask, op1, op2, vl); +vfloat32mf2_t test_vfsgnjn_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f32mf2_m(vm, vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnjn_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f32mf2_m(mask, op1, op2, vl); +vfloat32mf2_t test_vfsgnjn_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f32mf2_m(vm, vs2, rs1, vl); } -vfloat32m1_t test_vfsgnjn_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfsgnjn_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f32m1_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfsgnjn_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfsgnjn_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f32m1_m(vm, vs2, rs1, vl); } -vfloat32m2_t test_vfsgnjn_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfsgnjn_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f32m2_m(vm, vs2, vs1, vl); } -vfloat32m2_t test_vfsgnjn_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfsgnjn_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f32m2_m(vm, vs2, rs1, vl); } -vfloat32m4_t test_vfsgnjn_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfsgnjn_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f32m4_m(vm, vs2, vs1, vl); } -vfloat32m4_t test_vfsgnjn_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfsgnjn_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f32m4_m(vm, vs2, rs1, vl); } -vfloat32m8_t test_vfsgnjn_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfsgnjn_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f32m8_m(vm, vs2, vs1, vl); } -vfloat32m8_t test_vfsgnjn_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfsgnjn_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f32m8_m(vm, vs2, rs1, vl); } -vfloat64m1_t test_vfsgnjn_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfsgnjn_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f64m1_m(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfsgnjn_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfsgnjn_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f64m1_m(vm, vs2, rs1, vl); } -vfloat64m2_t test_vfsgnjn_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfsgnjn_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f64m2_m(vm, vs2, vs1, vl); } -vfloat64m2_t test_vfsgnjn_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfsgnjn_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f64m2_m(vm, vs2, rs1, vl); } -vfloat64m4_t test_vfsgnjn_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfsgnjn_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f64m4_m(vm, vs2, vs1, vl); } -vfloat64m4_t test_vfsgnjn_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfsgnjn_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f64m4_m(vm, vs2, rs1, vl); } -vfloat64m8_t test_vfsgnjn_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfsgnjn_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f64m8_m(vm, vs2, vs1, vl); } -vfloat64m8_t test_vfsgnjn_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfsgnjn_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f64m8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfsgnjn\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-api-tests/vfsgnjx.c b/auto-generated/gnu-api-tests/vfsgnjx.c index b9e98646b..65c10583a 100644 --- a/auto-generated/gnu-api-tests/vfsgnjx.c +++ b/auto-generated/gnu-api-tests/vfsgnjx.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfsgnjx_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f16mf4(op1, op2, vl); +vfloat16mf4_t test_vfsgnjx_vv_f16mf4(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f16mf4(vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnjx_vf_f16mf4(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f16mf4(op1, op2, vl); +vfloat16mf4_t test_vfsgnjx_vf_f16mf4(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f16mf4(vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnjx_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f16mf2(op1, op2, vl); +vfloat16mf2_t test_vfsgnjx_vv_f16mf2(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f16mf2(vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnjx_vf_f16mf2(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f16mf2(op1, op2, vl); +vfloat16mf2_t test_vfsgnjx_vf_f16mf2(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f16mf2(vs2, rs1, vl); } -vfloat16m1_t test_vfsgnjx_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f16m1(op1, op2, vl); +vfloat16m1_t test_vfsgnjx_vv_f16m1(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f16m1(vs2, vs1, vl); } -vfloat16m1_t test_vfsgnjx_vf_f16m1(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f16m1(op1, op2, vl); +vfloat16m1_t test_vfsgnjx_vf_f16m1(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f16m1(vs2, rs1, vl); } -vfloat16m2_t test_vfsgnjx_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f16m2(op1, op2, vl); +vfloat16m2_t test_vfsgnjx_vv_f16m2(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f16m2(vs2, vs1, vl); } -vfloat16m2_t test_vfsgnjx_vf_f16m2(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f16m2(op1, op2, vl); +vfloat16m2_t test_vfsgnjx_vf_f16m2(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f16m2(vs2, rs1, vl); } -vfloat16m4_t test_vfsgnjx_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f16m4(op1, op2, vl); +vfloat16m4_t test_vfsgnjx_vv_f16m4(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f16m4(vs2, vs1, vl); } -vfloat16m4_t test_vfsgnjx_vf_f16m4(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f16m4(op1, op2, vl); +vfloat16m4_t test_vfsgnjx_vf_f16m4(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f16m4(vs2, rs1, vl); } -vfloat16m8_t test_vfsgnjx_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f16m8(op1, op2, vl); +vfloat16m8_t test_vfsgnjx_vv_f16m8(vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f16m8(vs2, vs1, vl); } -vfloat16m8_t test_vfsgnjx_vf_f16m8(vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f16m8(op1, op2, vl); +vfloat16m8_t test_vfsgnjx_vf_f16m8(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f16m8(vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnjx_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f32mf2(op1, op2, vl); +vfloat32mf2_t test_vfsgnjx_vv_f32mf2(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f32mf2(vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnjx_vf_f32mf2(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f32mf2(op1, op2, vl); +vfloat32mf2_t test_vfsgnjx_vf_f32mf2(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f32mf2(vs2, rs1, vl); } -vfloat32m1_t test_vfsgnjx_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f32m1(op1, op2, vl); +vfloat32m1_t test_vfsgnjx_vv_f32m1(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f32m1(vs2, vs1, vl); } -vfloat32m1_t test_vfsgnjx_vf_f32m1(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f32m1(op1, op2, vl); +vfloat32m1_t test_vfsgnjx_vf_f32m1(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f32m1(vs2, rs1, vl); } -vfloat32m2_t test_vfsgnjx_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f32m2(op1, op2, vl); +vfloat32m2_t test_vfsgnjx_vv_f32m2(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f32m2(vs2, vs1, vl); } -vfloat32m2_t test_vfsgnjx_vf_f32m2(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f32m2(op1, op2, vl); +vfloat32m2_t test_vfsgnjx_vf_f32m2(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f32m2(vs2, rs1, vl); } -vfloat32m4_t test_vfsgnjx_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f32m4(op1, op2, vl); +vfloat32m4_t test_vfsgnjx_vv_f32m4(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f32m4(vs2, vs1, vl); } -vfloat32m4_t test_vfsgnjx_vf_f32m4(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f32m4(op1, op2, vl); +vfloat32m4_t test_vfsgnjx_vf_f32m4(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f32m4(vs2, rs1, vl); } -vfloat32m8_t test_vfsgnjx_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f32m8(op1, op2, vl); +vfloat32m8_t test_vfsgnjx_vv_f32m8(vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f32m8(vs2, vs1, vl); } -vfloat32m8_t test_vfsgnjx_vf_f32m8(vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f32m8(op1, op2, vl); +vfloat32m8_t test_vfsgnjx_vf_f32m8(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f32m8(vs2, rs1, vl); } -vfloat64m1_t test_vfsgnjx_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f64m1(op1, op2, vl); +vfloat64m1_t test_vfsgnjx_vv_f64m1(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f64m1(vs2, vs1, vl); } -vfloat64m1_t test_vfsgnjx_vf_f64m1(vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f64m1(op1, op2, vl); +vfloat64m1_t test_vfsgnjx_vf_f64m1(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f64m1(vs2, rs1, vl); } -vfloat64m2_t test_vfsgnjx_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f64m2(op1, op2, vl); +vfloat64m2_t test_vfsgnjx_vv_f64m2(vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f64m2(vs2, vs1, vl); } -vfloat64m2_t test_vfsgnjx_vf_f64m2(vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f64m2(op1, op2, vl); +vfloat64m2_t test_vfsgnjx_vf_f64m2(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f64m2(vs2, rs1, vl); } -vfloat64m4_t test_vfsgnjx_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f64m4(op1, op2, vl); +vfloat64m4_t test_vfsgnjx_vv_f64m4(vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f64m4(vs2, vs1, vl); } -vfloat64m4_t test_vfsgnjx_vf_f64m4(vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f64m4(op1, op2, vl); +vfloat64m4_t test_vfsgnjx_vf_f64m4(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f64m4(vs2, rs1, vl); } -vfloat64m8_t test_vfsgnjx_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f64m8(op1, op2, vl); +vfloat64m8_t test_vfsgnjx_vv_f64m8(vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f64m8(vs2, vs1, vl); } -vfloat64m8_t test_vfsgnjx_vf_f64m8(vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f64m8(op1, op2, vl); +vfloat64m8_t test_vfsgnjx_vf_f64m8(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f64m8(vs2, rs1, vl); } -vfloat16mf4_t test_vfsgnjx_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f16mf4_m(mask, op1, op2, vl); +vfloat16mf4_t test_vfsgnjx_vv_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f16mf4_m(vm, vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnjx_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f16mf4_m(mask, op1, op2, vl); +vfloat16mf4_t test_vfsgnjx_vf_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f16mf4_m(vm, vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnjx_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f16mf2_m(mask, op1, op2, vl); +vfloat16mf2_t test_vfsgnjx_vv_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f16mf2_m(vm, vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnjx_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f16mf2_m(mask, op1, op2, vl); +vfloat16mf2_t test_vfsgnjx_vf_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f16mf2_m(vm, vs2, rs1, vl); } -vfloat16m1_t test_vfsgnjx_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f16m1_m(mask, op1, op2, vl); +vfloat16m1_t test_vfsgnjx_vv_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f16m1_m(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfsgnjx_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f16m1_m(mask, op1, op2, vl); +vfloat16m1_t test_vfsgnjx_vf_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f16m1_m(vm, vs2, rs1, vl); } -vfloat16m2_t test_vfsgnjx_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f16m2_m(mask, op1, op2, vl); +vfloat16m2_t test_vfsgnjx_vv_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f16m2_m(vm, vs2, vs1, vl); } -vfloat16m2_t test_vfsgnjx_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f16m2_m(mask, op1, op2, vl); +vfloat16m2_t test_vfsgnjx_vf_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f16m2_m(vm, vs2, rs1, vl); } -vfloat16m4_t test_vfsgnjx_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f16m4_m(mask, op1, op2, vl); +vfloat16m4_t test_vfsgnjx_vv_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f16m4_m(vm, vs2, vs1, vl); } -vfloat16m4_t test_vfsgnjx_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f16m4_m(mask, op1, op2, vl); +vfloat16m4_t test_vfsgnjx_vf_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f16m4_m(vm, vs2, rs1, vl); } -vfloat16m8_t test_vfsgnjx_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f16m8_m(mask, op1, op2, vl); +vfloat16m8_t test_vfsgnjx_vv_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f16m8_m(vm, vs2, vs1, vl); } -vfloat16m8_t test_vfsgnjx_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f16m8_m(mask, op1, op2, vl); +vfloat16m8_t test_vfsgnjx_vf_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f16m8_m(vm, vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnjx_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f32mf2_m(mask, op1, op2, vl); +vfloat32mf2_t test_vfsgnjx_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f32mf2_m(vm, vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnjx_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f32mf2_m(mask, op1, op2, vl); +vfloat32mf2_t test_vfsgnjx_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f32mf2_m(vm, vs2, rs1, vl); } -vfloat32m1_t test_vfsgnjx_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfsgnjx_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f32m1_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfsgnjx_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfsgnjx_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f32m1_m(vm, vs2, rs1, vl); } -vfloat32m2_t test_vfsgnjx_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfsgnjx_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f32m2_m(vm, vs2, vs1, vl); } -vfloat32m2_t test_vfsgnjx_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfsgnjx_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f32m2_m(vm, vs2, rs1, vl); } -vfloat32m4_t test_vfsgnjx_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfsgnjx_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f32m4_m(vm, vs2, vs1, vl); } -vfloat32m4_t test_vfsgnjx_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfsgnjx_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f32m4_m(vm, vs2, rs1, vl); } -vfloat32m8_t test_vfsgnjx_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfsgnjx_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f32m8_m(vm, vs2, vs1, vl); } -vfloat32m8_t test_vfsgnjx_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfsgnjx_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f32m8_m(vm, vs2, rs1, vl); } -vfloat64m1_t test_vfsgnjx_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfsgnjx_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f64m1_m(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfsgnjx_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfsgnjx_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f64m1_m(vm, vs2, rs1, vl); } -vfloat64m2_t test_vfsgnjx_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfsgnjx_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f64m2_m(vm, vs2, vs1, vl); } -vfloat64m2_t test_vfsgnjx_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfsgnjx_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f64m2_m(vm, vs2, rs1, vl); } -vfloat64m4_t test_vfsgnjx_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfsgnjx_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f64m4_m(vm, vs2, vs1, vl); } -vfloat64m4_t test_vfsgnjx_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfsgnjx_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f64m4_m(vm, vs2, rs1, vl); } -vfloat64m8_t test_vfsgnjx_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfsgnjx_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f64m8_m(vm, vs2, vs1, vl); } -vfloat64m8_t test_vfsgnjx_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfsgnjx_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f64m8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfsgnjx\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-api-tests/vfslide1down.c b/auto-generated/gnu-api-tests/vfslide1down.c index 269adba5d..0790e0825 100644 --- a/auto-generated/gnu-api-tests/vfslide1down.c +++ b/auto-generated/gnu-api-tests/vfslide1down.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfslide1down_vf_f16mf4(vfloat16mf4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_vf_f16mf4(src, value, vl); +vfloat16mf4_t test_vfslide1down_vf_f16mf4(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f16mf4(vs2, rs1, vl); } -vfloat16mf2_t test_vfslide1down_vf_f16mf2(vfloat16mf2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_vf_f16mf2(src, value, vl); +vfloat16mf2_t test_vfslide1down_vf_f16mf2(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f16mf2(vs2, rs1, vl); } -vfloat16m1_t test_vfslide1down_vf_f16m1(vfloat16m1_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_vf_f16m1(src, value, vl); +vfloat16m1_t test_vfslide1down_vf_f16m1(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f16m1(vs2, rs1, vl); } -vfloat16m2_t test_vfslide1down_vf_f16m2(vfloat16m2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_vf_f16m2(src, value, vl); +vfloat16m2_t test_vfslide1down_vf_f16m2(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f16m2(vs2, rs1, vl); } -vfloat16m4_t test_vfslide1down_vf_f16m4(vfloat16m4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_vf_f16m4(src, value, vl); +vfloat16m4_t test_vfslide1down_vf_f16m4(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f16m4(vs2, rs1, vl); } -vfloat16m8_t test_vfslide1down_vf_f16m8(vfloat16m8_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_vf_f16m8(src, value, vl); +vfloat16m8_t test_vfslide1down_vf_f16m8(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f16m8(vs2, rs1, vl); } -vfloat32mf2_t test_vfslide1down_vf_f32mf2(vfloat32mf2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_vf_f32mf2(src, value, vl); +vfloat32mf2_t test_vfslide1down_vf_f32mf2(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f32mf2(vs2, rs1, vl); } -vfloat32m1_t test_vfslide1down_vf_f32m1(vfloat32m1_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_vf_f32m1(src, value, vl); +vfloat32m1_t test_vfslide1down_vf_f32m1(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f32m1(vs2, rs1, vl); } -vfloat32m2_t test_vfslide1down_vf_f32m2(vfloat32m2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_vf_f32m2(src, value, vl); +vfloat32m2_t test_vfslide1down_vf_f32m2(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f32m2(vs2, rs1, vl); } -vfloat32m4_t test_vfslide1down_vf_f32m4(vfloat32m4_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_vf_f32m4(src, value, vl); +vfloat32m4_t test_vfslide1down_vf_f32m4(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f32m4(vs2, rs1, vl); } -vfloat32m8_t test_vfslide1down_vf_f32m8(vfloat32m8_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_vf_f32m8(src, value, vl); +vfloat32m8_t test_vfslide1down_vf_f32m8(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f32m8(vs2, rs1, vl); } -vfloat64m1_t test_vfslide1down_vf_f64m1(vfloat64m1_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down_vf_f64m1(src, value, vl); +vfloat64m1_t test_vfslide1down_vf_f64m1(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f64m1(vs2, rs1, vl); } -vfloat64m2_t test_vfslide1down_vf_f64m2(vfloat64m2_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down_vf_f64m2(src, value, vl); +vfloat64m2_t test_vfslide1down_vf_f64m2(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f64m2(vs2, rs1, vl); } -vfloat64m4_t test_vfslide1down_vf_f64m4(vfloat64m4_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down_vf_f64m4(src, value, vl); +vfloat64m4_t test_vfslide1down_vf_f64m4(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f64m4(vs2, rs1, vl); } -vfloat64m8_t test_vfslide1down_vf_f64m8(vfloat64m8_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down_vf_f64m8(src, value, vl); +vfloat64m8_t test_vfslide1down_vf_f64m8(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f64m8(vs2, rs1, vl); } -vfloat16mf4_t test_vfslide1down_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_vf_f16mf4_m(mask, src, value, vl); +vfloat16mf4_t test_vfslide1down_vf_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f16mf4_m(vm, vs2, rs1, vl); } -vfloat16mf2_t test_vfslide1down_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_vf_f16mf2_m(mask, src, value, vl); +vfloat16mf2_t test_vfslide1down_vf_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f16mf2_m(vm, vs2, rs1, vl); } -vfloat16m1_t test_vfslide1down_vf_f16m1_m(vbool16_t mask, vfloat16m1_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_vf_f16m1_m(mask, src, value, vl); +vfloat16m1_t test_vfslide1down_vf_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f16m1_m(vm, vs2, rs1, vl); } -vfloat16m2_t test_vfslide1down_vf_f16m2_m(vbool8_t mask, vfloat16m2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_vf_f16m2_m(mask, src, value, vl); +vfloat16m2_t test_vfslide1down_vf_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f16m2_m(vm, vs2, rs1, vl); } -vfloat16m4_t test_vfslide1down_vf_f16m4_m(vbool4_t mask, vfloat16m4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_vf_f16m4_m(mask, src, value, vl); +vfloat16m4_t test_vfslide1down_vf_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f16m4_m(vm, vs2, rs1, vl); } -vfloat16m8_t test_vfslide1down_vf_f16m8_m(vbool2_t mask, vfloat16m8_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_vf_f16m8_m(mask, src, value, vl); +vfloat16m8_t test_vfslide1down_vf_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f16m8_m(vm, vs2, rs1, vl); } -vfloat32mf2_t test_vfslide1down_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_vf_f32mf2_m(mask, src, value, vl); +vfloat32mf2_t test_vfslide1down_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f32mf2_m(vm, vs2, rs1, vl); } -vfloat32m1_t test_vfslide1down_vf_f32m1_m(vbool32_t mask, vfloat32m1_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_vf_f32m1_m(mask, src, value, vl); +vfloat32m1_t test_vfslide1down_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f32m1_m(vm, vs2, rs1, vl); } -vfloat32m2_t test_vfslide1down_vf_f32m2_m(vbool16_t mask, vfloat32m2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_vf_f32m2_m(mask, src, value, vl); +vfloat32m2_t test_vfslide1down_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f32m2_m(vm, vs2, rs1, vl); } -vfloat32m4_t test_vfslide1down_vf_f32m4_m(vbool8_t mask, vfloat32m4_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_vf_f32m4_m(mask, src, value, vl); +vfloat32m4_t test_vfslide1down_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f32m4_m(vm, vs2, rs1, vl); } -vfloat32m8_t test_vfslide1down_vf_f32m8_m(vbool4_t mask, vfloat32m8_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_vf_f32m8_m(mask, src, value, vl); +vfloat32m8_t test_vfslide1down_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f32m8_m(vm, vs2, rs1, vl); } -vfloat64m1_t test_vfslide1down_vf_f64m1_m(vbool64_t mask, vfloat64m1_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down_vf_f64m1_m(mask, src, value, vl); +vfloat64m1_t test_vfslide1down_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f64m1_m(vm, vs2, rs1, vl); } -vfloat64m2_t test_vfslide1down_vf_f64m2_m(vbool32_t mask, vfloat64m2_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down_vf_f64m2_m(mask, src, value, vl); +vfloat64m2_t test_vfslide1down_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f64m2_m(vm, vs2, rs1, vl); } -vfloat64m4_t test_vfslide1down_vf_f64m4_m(vbool16_t mask, vfloat64m4_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down_vf_f64m4_m(mask, src, value, vl); +vfloat64m4_t test_vfslide1down_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f64m4_m(vm, vs2, rs1, vl); } -vfloat64m8_t test_vfslide1down_vf_f64m8_m(vbool8_t mask, vfloat64m8_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down_vf_f64m8_m(mask, src, value, vl); +vfloat64m8_t test_vfslide1down_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f64m8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfslide1down\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/gnu-api-tests/vfslide1up.c b/auto-generated/gnu-api-tests/vfslide1up.c index 7c0dbeb4a..1d69852ba 100644 --- a/auto-generated/gnu-api-tests/vfslide1up.c +++ b/auto-generated/gnu-api-tests/vfslide1up.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfslide1up_vf_f16mf4(vfloat16mf4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_vf_f16mf4(src, value, vl); +vfloat16mf4_t test_vfslide1up_vf_f16mf4(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f16mf4(vs2, rs1, vl); } -vfloat16mf2_t test_vfslide1up_vf_f16mf2(vfloat16mf2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_vf_f16mf2(src, value, vl); +vfloat16mf2_t test_vfslide1up_vf_f16mf2(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f16mf2(vs2, rs1, vl); } -vfloat16m1_t test_vfslide1up_vf_f16m1(vfloat16m1_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_vf_f16m1(src, value, vl); +vfloat16m1_t test_vfslide1up_vf_f16m1(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f16m1(vs2, rs1, vl); } -vfloat16m2_t test_vfslide1up_vf_f16m2(vfloat16m2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_vf_f16m2(src, value, vl); +vfloat16m2_t test_vfslide1up_vf_f16m2(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f16m2(vs2, rs1, vl); } -vfloat16m4_t test_vfslide1up_vf_f16m4(vfloat16m4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_vf_f16m4(src, value, vl); +vfloat16m4_t test_vfslide1up_vf_f16m4(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f16m4(vs2, rs1, vl); } -vfloat16m8_t test_vfslide1up_vf_f16m8(vfloat16m8_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_vf_f16m8(src, value, vl); +vfloat16m8_t test_vfslide1up_vf_f16m8(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f16m8(vs2, rs1, vl); } -vfloat32mf2_t test_vfslide1up_vf_f32mf2(vfloat32mf2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_vf_f32mf2(src, value, vl); +vfloat32mf2_t test_vfslide1up_vf_f32mf2(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f32mf2(vs2, rs1, vl); } -vfloat32m1_t test_vfslide1up_vf_f32m1(vfloat32m1_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_vf_f32m1(src, value, vl); +vfloat32m1_t test_vfslide1up_vf_f32m1(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f32m1(vs2, rs1, vl); } -vfloat32m2_t test_vfslide1up_vf_f32m2(vfloat32m2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_vf_f32m2(src, value, vl); +vfloat32m2_t test_vfslide1up_vf_f32m2(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f32m2(vs2, rs1, vl); } -vfloat32m4_t test_vfslide1up_vf_f32m4(vfloat32m4_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_vf_f32m4(src, value, vl); +vfloat32m4_t test_vfslide1up_vf_f32m4(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f32m4(vs2, rs1, vl); } -vfloat32m8_t test_vfslide1up_vf_f32m8(vfloat32m8_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_vf_f32m8(src, value, vl); +vfloat32m8_t test_vfslide1up_vf_f32m8(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f32m8(vs2, rs1, vl); } -vfloat64m1_t test_vfslide1up_vf_f64m1(vfloat64m1_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up_vf_f64m1(src, value, vl); +vfloat64m1_t test_vfslide1up_vf_f64m1(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f64m1(vs2, rs1, vl); } -vfloat64m2_t test_vfslide1up_vf_f64m2(vfloat64m2_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up_vf_f64m2(src, value, vl); +vfloat64m2_t test_vfslide1up_vf_f64m2(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f64m2(vs2, rs1, vl); } -vfloat64m4_t test_vfslide1up_vf_f64m4(vfloat64m4_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up_vf_f64m4(src, value, vl); +vfloat64m4_t test_vfslide1up_vf_f64m4(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f64m4(vs2, rs1, vl); } -vfloat64m8_t test_vfslide1up_vf_f64m8(vfloat64m8_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up_vf_f64m8(src, value, vl); +vfloat64m8_t test_vfslide1up_vf_f64m8(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f64m8(vs2, rs1, vl); } -vfloat16mf4_t test_vfslide1up_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_vf_f16mf4_m(mask, src, value, vl); +vfloat16mf4_t test_vfslide1up_vf_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f16mf4_m(vm, vs2, rs1, vl); } -vfloat16mf2_t test_vfslide1up_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_vf_f16mf2_m(mask, src, value, vl); +vfloat16mf2_t test_vfslide1up_vf_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f16mf2_m(vm, vs2, rs1, vl); } -vfloat16m1_t test_vfslide1up_vf_f16m1_m(vbool16_t mask, vfloat16m1_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_vf_f16m1_m(mask, src, value, vl); +vfloat16m1_t test_vfslide1up_vf_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f16m1_m(vm, vs2, rs1, vl); } -vfloat16m2_t test_vfslide1up_vf_f16m2_m(vbool8_t mask, vfloat16m2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_vf_f16m2_m(mask, src, value, vl); +vfloat16m2_t test_vfslide1up_vf_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f16m2_m(vm, vs2, rs1, vl); } -vfloat16m4_t test_vfslide1up_vf_f16m4_m(vbool4_t mask, vfloat16m4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_vf_f16m4_m(mask, src, value, vl); +vfloat16m4_t test_vfslide1up_vf_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f16m4_m(vm, vs2, rs1, vl); } -vfloat16m8_t test_vfslide1up_vf_f16m8_m(vbool2_t mask, vfloat16m8_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_vf_f16m8_m(mask, src, value, vl); +vfloat16m8_t test_vfslide1up_vf_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f16m8_m(vm, vs2, rs1, vl); } -vfloat32mf2_t test_vfslide1up_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_vf_f32mf2_m(mask, src, value, vl); +vfloat32mf2_t test_vfslide1up_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f32mf2_m(vm, vs2, rs1, vl); } -vfloat32m1_t test_vfslide1up_vf_f32m1_m(vbool32_t mask, vfloat32m1_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_vf_f32m1_m(mask, src, value, vl); +vfloat32m1_t test_vfslide1up_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f32m1_m(vm, vs2, rs1, vl); } -vfloat32m2_t test_vfslide1up_vf_f32m2_m(vbool16_t mask, vfloat32m2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_vf_f32m2_m(mask, src, value, vl); +vfloat32m2_t test_vfslide1up_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f32m2_m(vm, vs2, rs1, vl); } -vfloat32m4_t test_vfslide1up_vf_f32m4_m(vbool8_t mask, vfloat32m4_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_vf_f32m4_m(mask, src, value, vl); +vfloat32m4_t test_vfslide1up_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f32m4_m(vm, vs2, rs1, vl); } -vfloat32m8_t test_vfslide1up_vf_f32m8_m(vbool4_t mask, vfloat32m8_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_vf_f32m8_m(mask, src, value, vl); +vfloat32m8_t test_vfslide1up_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f32m8_m(vm, vs2, rs1, vl); } -vfloat64m1_t test_vfslide1up_vf_f64m1_m(vbool64_t mask, vfloat64m1_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up_vf_f64m1_m(mask, src, value, vl); +vfloat64m1_t test_vfslide1up_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f64m1_m(vm, vs2, rs1, vl); } -vfloat64m2_t test_vfslide1up_vf_f64m2_m(vbool32_t mask, vfloat64m2_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up_vf_f64m2_m(mask, src, value, vl); +vfloat64m2_t test_vfslide1up_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f64m2_m(vm, vs2, rs1, vl); } -vfloat64m4_t test_vfslide1up_vf_f64m4_m(vbool16_t mask, vfloat64m4_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up_vf_f64m4_m(mask, src, value, vl); +vfloat64m4_t test_vfslide1up_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f64m4_m(vm, vs2, rs1, vl); } -vfloat64m8_t test_vfslide1up_vf_f64m8_m(vbool8_t mask, vfloat64m8_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up_vf_f64m8_m(mask, src, value, vl); +vfloat64m8_t test_vfslide1up_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f64m8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfslide1up\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/gnu-api-tests/vfsqrt.c b/auto-generated/gnu-api-tests/vfsqrt.c index 926923a1b..9aac3c505 100644 --- a/auto-generated/gnu-api-tests/vfsqrt.c +++ b/auto-generated/gnu-api-tests/vfsqrt.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfsqrt_v_f16mf4(vfloat16mf4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16mf4(op1, vl); +vfloat16mf4_t test_vfsqrt_v_f16mf4(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16mf4(vs2, vl); } -vfloat16mf2_t test_vfsqrt_v_f16mf2(vfloat16mf2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16mf2(op1, vl); +vfloat16mf2_t test_vfsqrt_v_f16mf2(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16mf2(vs2, vl); } -vfloat16m1_t test_vfsqrt_v_f16m1(vfloat16m1_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m1(op1, vl); +vfloat16m1_t test_vfsqrt_v_f16m1(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m1(vs2, vl); } -vfloat16m2_t test_vfsqrt_v_f16m2(vfloat16m2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m2(op1, vl); +vfloat16m2_t test_vfsqrt_v_f16m2(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m2(vs2, vl); } -vfloat16m4_t test_vfsqrt_v_f16m4(vfloat16m4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m4(op1, vl); +vfloat16m4_t test_vfsqrt_v_f16m4(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m4(vs2, vl); } -vfloat16m8_t test_vfsqrt_v_f16m8(vfloat16m8_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m8(op1, vl); +vfloat16m8_t test_vfsqrt_v_f16m8(vfloat16m8_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m8(vs2, vl); } -vfloat32mf2_t test_vfsqrt_v_f32mf2(vfloat32mf2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32mf2(op1, vl); +vfloat32mf2_t test_vfsqrt_v_f32mf2(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32mf2(vs2, vl); } -vfloat32m1_t test_vfsqrt_v_f32m1(vfloat32m1_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m1(op1, vl); +vfloat32m1_t test_vfsqrt_v_f32m1(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m1(vs2, vl); } -vfloat32m2_t test_vfsqrt_v_f32m2(vfloat32m2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m2(op1, vl); +vfloat32m2_t test_vfsqrt_v_f32m2(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m2(vs2, vl); } -vfloat32m4_t test_vfsqrt_v_f32m4(vfloat32m4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m4(op1, vl); +vfloat32m4_t test_vfsqrt_v_f32m4(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m4(vs2, vl); } -vfloat32m8_t test_vfsqrt_v_f32m8(vfloat32m8_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m8(op1, vl); +vfloat32m8_t test_vfsqrt_v_f32m8(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m8(vs2, vl); } -vfloat64m1_t test_vfsqrt_v_f64m1(vfloat64m1_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m1(op1, vl); +vfloat64m1_t test_vfsqrt_v_f64m1(vfloat64m1_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m1(vs2, vl); } -vfloat64m2_t test_vfsqrt_v_f64m2(vfloat64m2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m2(op1, vl); +vfloat64m2_t test_vfsqrt_v_f64m2(vfloat64m2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m2(vs2, vl); } -vfloat64m4_t test_vfsqrt_v_f64m4(vfloat64m4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m4(op1, vl); +vfloat64m4_t test_vfsqrt_v_f64m4(vfloat64m4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m4(vs2, vl); } -vfloat64m8_t test_vfsqrt_v_f64m8(vfloat64m8_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m8(op1, vl); +vfloat64m8_t test_vfsqrt_v_f64m8(vfloat64m8_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m8(vs2, vl); } -vfloat16mf4_t test_vfsqrt_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16mf4_m(mask, op1, vl); +vfloat16mf4_t test_vfsqrt_v_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16mf4_m(vm, vs2, vl); } -vfloat16mf2_t test_vfsqrt_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16mf2_m(mask, op1, vl); +vfloat16mf2_t test_vfsqrt_v_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16mf2_m(vm, vs2, vl); } -vfloat16m1_t test_vfsqrt_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m1_m(mask, op1, vl); +vfloat16m1_t test_vfsqrt_v_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m1_m(vm, vs2, vl); } -vfloat16m2_t test_vfsqrt_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m2_m(mask, op1, vl); +vfloat16m2_t test_vfsqrt_v_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m2_m(vm, vs2, vl); } -vfloat16m4_t test_vfsqrt_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m4_m(mask, op1, vl); +vfloat16m4_t test_vfsqrt_v_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m4_m(vm, vs2, vl); } -vfloat16m8_t test_vfsqrt_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m8_m(mask, op1, vl); +vfloat16m8_t test_vfsqrt_v_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m8_m(vm, vs2, vl); } -vfloat32mf2_t test_vfsqrt_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32mf2_m(mask, op1, vl); +vfloat32mf2_t test_vfsqrt_v_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32mf2_m(vm, vs2, vl); } -vfloat32m1_t test_vfsqrt_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m1_m(mask, op1, vl); +vfloat32m1_t test_vfsqrt_v_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m1_m(vm, vs2, vl); } -vfloat32m2_t test_vfsqrt_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m2_m(mask, op1, vl); +vfloat32m2_t test_vfsqrt_v_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m2_m(vm, vs2, vl); } -vfloat32m4_t test_vfsqrt_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m4_m(mask, op1, vl); +vfloat32m4_t test_vfsqrt_v_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m4_m(vm, vs2, vl); } -vfloat32m8_t test_vfsqrt_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m8_m(mask, op1, vl); +vfloat32m8_t test_vfsqrt_v_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m8_m(vm, vs2, vl); } -vfloat64m1_t test_vfsqrt_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m1_m(mask, op1, vl); +vfloat64m1_t test_vfsqrt_v_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m1_m(vm, vs2, vl); } -vfloat64m2_t test_vfsqrt_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m2_m(mask, op1, vl); +vfloat64m2_t test_vfsqrt_v_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m2_m(vm, vs2, vl); } -vfloat64m4_t test_vfsqrt_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m4_m(mask, op1, vl); +vfloat64m4_t test_vfsqrt_v_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m4_m(vm, vs2, vl); } -vfloat64m8_t test_vfsqrt_v_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m8_m(mask, op1, vl); +vfloat64m8_t test_vfsqrt_v_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m8_m(vm, vs2, vl); } -vfloat16mf4_t test_vfsqrt_v_f16mf4_rm(vfloat16mf4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16mf4_rm(op1, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfsqrt_v_f16mf4_rm(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16mf4_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsqrt_v_f16mf2_rm(vfloat16mf2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16mf2_rm(op1, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfsqrt_v_f16mf2_rm(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16mf2_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsqrt_v_f16m1_rm(vfloat16m1_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m1_rm(op1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfsqrt_v_f16m1_rm(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m1_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsqrt_v_f16m2_rm(vfloat16m2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m2_rm(op1, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfsqrt_v_f16m2_rm(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m2_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsqrt_v_f16m4_rm(vfloat16m4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m4_rm(op1, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfsqrt_v_f16m4_rm(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m4_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsqrt_v_f16m8_rm(vfloat16m8_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m8_rm(op1, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfsqrt_v_f16m8_rm(vfloat16m8_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m8_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsqrt_v_f32mf2_rm(vfloat32mf2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32mf2_rm(op1, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfsqrt_v_f32mf2_rm(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32mf2_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsqrt_v_f32m1_rm(vfloat32m1_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m1_rm(op1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfsqrt_v_f32m1_rm(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m1_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsqrt_v_f32m2_rm(vfloat32m2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m2_rm(op1, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfsqrt_v_f32m2_rm(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m2_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsqrt_v_f32m4_rm(vfloat32m4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m4_rm(op1, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfsqrt_v_f32m4_rm(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m4_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsqrt_v_f32m8_rm(vfloat32m8_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m8_rm(op1, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfsqrt_v_f32m8_rm(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m8_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsqrt_v_f64m1_rm(vfloat64m1_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m1_rm(op1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfsqrt_v_f64m1_rm(vfloat64m1_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m1_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsqrt_v_f64m2_rm(vfloat64m2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m2_rm(op1, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfsqrt_v_f64m2_rm(vfloat64m2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m2_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsqrt_v_f64m4_rm(vfloat64m4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m4_rm(op1, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfsqrt_v_f64m4_rm(vfloat64m4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m4_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsqrt_v_f64m8_rm(vfloat64m8_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m8_rm(op1, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfsqrt_v_f64m8_rm(vfloat64m8_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m8_rm(vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfsqrt_v_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16mf4_rm_m(mask, op1, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfsqrt_v_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16mf4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsqrt_v_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16mf2_rm_m(mask, op1, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfsqrt_v_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsqrt_v_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m1_rm_m(mask, op1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfsqrt_v_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsqrt_v_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m2_rm_m(mask, op1, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfsqrt_v_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsqrt_v_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m4_rm_m(mask, op1, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfsqrt_v_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsqrt_v_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m8_rm_m(mask, op1, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfsqrt_v_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m8_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsqrt_v_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32mf2_rm_m(mask, op1, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfsqrt_v_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsqrt_v_f32m1_rm_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m1_rm_m(mask, op1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfsqrt_v_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsqrt_v_f32m2_rm_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m2_rm_m(mask, op1, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfsqrt_v_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsqrt_v_f32m4_rm_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m4_rm_m(mask, op1, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfsqrt_v_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsqrt_v_f32m8_rm_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m8_rm_m(mask, op1, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfsqrt_v_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m8_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsqrt_v_f64m1_rm_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m1_rm_m(mask, op1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfsqrt_v_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsqrt_v_f64m2_rm_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m2_rm_m(mask, op1, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfsqrt_v_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsqrt_v_f64m4_rm_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m4_rm_m(mask, op1, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfsqrt_v_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsqrt_v_f64m8_rm_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m8_rm_m(mask, op1, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfsqrt_v_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m8_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfsqrt\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-api-tests/vfsub.c b/auto-generated/gnu-api-tests/vfsub.c index 09a551d93..b0d4ed713 100644 --- a/auto-generated/gnu-api-tests/vfsub.c +++ b/auto-generated/gnu-api-tests/vfsub.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfsub_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsub_vv_f16mf4(op1, op2, vl); +vfloat16mf4_t test_vfsub_vv_f16mf4(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16mf4(vs2, vs1, vl); } -vfloat16mf4_t test_vfsub_vf_f16mf4(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16mf4(op1, op2, vl); +vfloat16mf4_t test_vfsub_vf_f16mf4(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16mf4(vs2, rs1, vl); } -vfloat16mf2_t test_vfsub_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsub_vv_f16mf2(op1, op2, vl); +vfloat16mf2_t test_vfsub_vv_f16mf2(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16mf2(vs2, vs1, vl); } -vfloat16mf2_t test_vfsub_vf_f16mf2(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16mf2(op1, op2, vl); +vfloat16mf2_t test_vfsub_vf_f16mf2(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16mf2(vs2, rs1, vl); } -vfloat16m1_t test_vfsub_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m1(op1, op2, vl); +vfloat16m1_t test_vfsub_vv_f16m1(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m1(vs2, vs1, vl); } -vfloat16m1_t test_vfsub_vf_f16m1(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m1(op1, op2, vl); +vfloat16m1_t test_vfsub_vf_f16m1(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m1(vs2, rs1, vl); } -vfloat16m2_t test_vfsub_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m2(op1, op2, vl); +vfloat16m2_t test_vfsub_vv_f16m2(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m2(vs2, vs1, vl); } -vfloat16m2_t test_vfsub_vf_f16m2(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m2(op1, op2, vl); +vfloat16m2_t test_vfsub_vf_f16m2(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m2(vs2, rs1, vl); } -vfloat16m4_t test_vfsub_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m4(op1, op2, vl); +vfloat16m4_t test_vfsub_vv_f16m4(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m4(vs2, vs1, vl); } -vfloat16m4_t test_vfsub_vf_f16m4(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m4(op1, op2, vl); +vfloat16m4_t test_vfsub_vf_f16m4(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m4(vs2, rs1, vl); } -vfloat16m8_t test_vfsub_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m8(op1, op2, vl); +vfloat16m8_t test_vfsub_vv_f16m8(vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m8(vs2, vs1, vl); } -vfloat16m8_t test_vfsub_vf_f16m8(vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m8(op1, op2, vl); +vfloat16m8_t test_vfsub_vf_f16m8(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m8(vs2, rs1, vl); } -vfloat32mf2_t test_vfsub_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsub_vv_f32mf2(op1, op2, vl); +vfloat32mf2_t test_vfsub_vv_f32mf2(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32mf2(vs2, vs1, vl); } -vfloat32mf2_t test_vfsub_vf_f32mf2(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32mf2(op1, op2, vl); +vfloat32mf2_t test_vfsub_vf_f32mf2(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32mf2(vs2, rs1, vl); } -vfloat32m1_t test_vfsub_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m1(op1, op2, vl); +vfloat32m1_t test_vfsub_vv_f32m1(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m1(vs2, vs1, vl); } -vfloat32m1_t test_vfsub_vf_f32m1(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m1(op1, op2, vl); +vfloat32m1_t test_vfsub_vf_f32m1(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m1(vs2, rs1, vl); } -vfloat32m2_t test_vfsub_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m2(op1, op2, vl); +vfloat32m2_t test_vfsub_vv_f32m2(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m2(vs2, vs1, vl); } -vfloat32m2_t test_vfsub_vf_f32m2(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m2(op1, op2, vl); +vfloat32m2_t test_vfsub_vf_f32m2(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m2(vs2, rs1, vl); } -vfloat32m4_t test_vfsub_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m4(op1, op2, vl); +vfloat32m4_t test_vfsub_vv_f32m4(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m4(vs2, vs1, vl); } -vfloat32m4_t test_vfsub_vf_f32m4(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m4(op1, op2, vl); +vfloat32m4_t test_vfsub_vf_f32m4(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m4(vs2, rs1, vl); } -vfloat32m8_t test_vfsub_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m8(op1, op2, vl); +vfloat32m8_t test_vfsub_vv_f32m8(vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m8(vs2, vs1, vl); } -vfloat32m8_t test_vfsub_vf_f32m8(vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m8(op1, op2, vl); +vfloat32m8_t test_vfsub_vf_f32m8(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m8(vs2, rs1, vl); } -vfloat64m1_t test_vfsub_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m1(op1, op2, vl); +vfloat64m1_t test_vfsub_vv_f64m1(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m1(vs2, vs1, vl); } -vfloat64m1_t test_vfsub_vf_f64m1(vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m1(op1, op2, vl); +vfloat64m1_t test_vfsub_vf_f64m1(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m1(vs2, rs1, vl); } -vfloat64m2_t test_vfsub_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m2(op1, op2, vl); +vfloat64m2_t test_vfsub_vv_f64m2(vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m2(vs2, vs1, vl); } -vfloat64m2_t test_vfsub_vf_f64m2(vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m2(op1, op2, vl); +vfloat64m2_t test_vfsub_vf_f64m2(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m2(vs2, rs1, vl); } -vfloat64m4_t test_vfsub_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m4(op1, op2, vl); +vfloat64m4_t test_vfsub_vv_f64m4(vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m4(vs2, vs1, vl); } -vfloat64m4_t test_vfsub_vf_f64m4(vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m4(op1, op2, vl); +vfloat64m4_t test_vfsub_vf_f64m4(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m4(vs2, rs1, vl); } -vfloat64m8_t test_vfsub_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m8(op1, op2, vl); +vfloat64m8_t test_vfsub_vv_f64m8(vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m8(vs2, vs1, vl); } -vfloat64m8_t test_vfsub_vf_f64m8(vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m8(op1, op2, vl); +vfloat64m8_t test_vfsub_vf_f64m8(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m8(vs2, rs1, vl); } -vfloat16mf4_t test_vfsub_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsub_vv_f16mf4_m(mask, op1, op2, vl); +vfloat16mf4_t test_vfsub_vv_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16mf4_m(vm, vs2, vs1, vl); } -vfloat16mf4_t test_vfsub_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16mf4_m(mask, op1, op2, vl); +vfloat16mf4_t test_vfsub_vf_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16mf4_m(vm, vs2, rs1, vl); } -vfloat16mf2_t test_vfsub_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsub_vv_f16mf2_m(mask, op1, op2, vl); +vfloat16mf2_t test_vfsub_vv_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16mf2_m(vm, vs2, vs1, vl); } -vfloat16mf2_t test_vfsub_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16mf2_m(mask, op1, op2, vl); +vfloat16mf2_t test_vfsub_vf_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16mf2_m(vm, vs2, rs1, vl); } -vfloat16m1_t test_vfsub_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m1_m(mask, op1, op2, vl); +vfloat16m1_t test_vfsub_vv_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m1_m(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfsub_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m1_m(mask, op1, op2, vl); +vfloat16m1_t test_vfsub_vf_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m1_m(vm, vs2, rs1, vl); } -vfloat16m2_t test_vfsub_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m2_m(mask, op1, op2, vl); +vfloat16m2_t test_vfsub_vv_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m2_m(vm, vs2, vs1, vl); } -vfloat16m2_t test_vfsub_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m2_m(mask, op1, op2, vl); +vfloat16m2_t test_vfsub_vf_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m2_m(vm, vs2, rs1, vl); } -vfloat16m4_t test_vfsub_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m4_m(mask, op1, op2, vl); +vfloat16m4_t test_vfsub_vv_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m4_m(vm, vs2, vs1, vl); } -vfloat16m4_t test_vfsub_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m4_m(mask, op1, op2, vl); +vfloat16m4_t test_vfsub_vf_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m4_m(vm, vs2, rs1, vl); } -vfloat16m8_t test_vfsub_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m8_m(mask, op1, op2, vl); +vfloat16m8_t test_vfsub_vv_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m8_m(vm, vs2, vs1, vl); } -vfloat16m8_t test_vfsub_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m8_m(mask, op1, op2, vl); +vfloat16m8_t test_vfsub_vf_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m8_m(vm, vs2, rs1, vl); } -vfloat32mf2_t test_vfsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsub_vv_f32mf2_m(mask, op1, op2, vl); +vfloat32mf2_t test_vfsub_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32mf2_m(vm, vs2, vs1, vl); } -vfloat32mf2_t test_vfsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32mf2_m(mask, op1, op2, vl); +vfloat32mf2_t test_vfsub_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32mf2_m(vm, vs2, rs1, vl); } -vfloat32m1_t test_vfsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfsub_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m1_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfsub_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m1_m(vm, vs2, rs1, vl); } -vfloat32m2_t test_vfsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfsub_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m2_m(vm, vs2, vs1, vl); } -vfloat32m2_t test_vfsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfsub_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m2_m(vm, vs2, rs1, vl); } -vfloat32m4_t test_vfsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfsub_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m4_m(vm, vs2, vs1, vl); } -vfloat32m4_t test_vfsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfsub_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m4_m(vm, vs2, rs1, vl); } -vfloat32m8_t test_vfsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfsub_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m8_m(vm, vs2, vs1, vl); } -vfloat32m8_t test_vfsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfsub_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m8_m(vm, vs2, rs1, vl); } -vfloat64m1_t test_vfsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfsub_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m1_m(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfsub_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m1_m(vm, vs2, rs1, vl); } -vfloat64m2_t test_vfsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfsub_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m2_m(vm, vs2, vs1, vl); } -vfloat64m2_t test_vfsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfsub_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m2_m(vm, vs2, rs1, vl); } -vfloat64m4_t test_vfsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfsub_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m4_m(vm, vs2, vs1, vl); } -vfloat64m4_t test_vfsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfsub_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m4_m(vm, vs2, rs1, vl); } -vfloat64m8_t test_vfsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfsub_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m8_m(vm, vs2, vs1, vl); } -vfloat64m8_t test_vfsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfsub_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m8_m(vm, vs2, rs1, vl); } -vfloat16mf4_t test_vfsub_vv_f16mf4_rm(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsub_vv_f16mf4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfsub_vv_f16mf4_rm(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16mf4_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfsub_vf_f16mf4_rm(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16mf4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfsub_vf_f16mf4_rm(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16mf4_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsub_vv_f16mf2_rm(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsub_vv_f16mf2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfsub_vv_f16mf2_rm(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16mf2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsub_vf_f16mf2_rm(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16mf2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfsub_vf_f16mf2_rm(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16mf2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsub_vv_f16m1_rm(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfsub_vv_f16m1_rm(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsub_vf_f16m1_rm(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfsub_vf_f16m1_rm(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsub_vv_f16m2_rm(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfsub_vv_f16m2_rm(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsub_vf_f16m2_rm(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfsub_vf_f16m2_rm(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsub_vv_f16m4_rm(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfsub_vv_f16m4_rm(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsub_vf_f16m4_rm(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfsub_vf_f16m4_rm(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsub_vv_f16m8_rm(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfsub_vv_f16m8_rm(vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsub_vf_f16m8_rm(vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfsub_vf_f16m8_rm(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsub_vv_f32mf2_rm(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsub_vv_f32mf2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfsub_vv_f32mf2_rm(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32mf2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsub_vf_f32mf2_rm(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32mf2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfsub_vf_f32mf2_rm(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32mf2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsub_vv_f32m1_rm(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfsub_vv_f32m1_rm(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsub_vf_f32m1_rm(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfsub_vf_f32m1_rm(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsub_vv_f32m2_rm(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfsub_vv_f32m2_rm(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsub_vf_f32m2_rm(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfsub_vf_f32m2_rm(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsub_vv_f32m4_rm(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfsub_vv_f32m4_rm(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsub_vf_f32m4_rm(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfsub_vf_f32m4_rm(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsub_vv_f32m8_rm(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfsub_vv_f32m8_rm(vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsub_vf_f32m8_rm(vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfsub_vf_f32m8_rm(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsub_vv_f64m1_rm(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfsub_vv_f64m1_rm(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsub_vf_f64m1_rm(vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfsub_vf_f64m1_rm(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsub_vv_f64m2_rm(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfsub_vv_f64m2_rm(vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsub_vf_f64m2_rm(vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfsub_vf_f64m2_rm(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsub_vv_f64m4_rm(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfsub_vv_f64m4_rm(vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsub_vf_f64m4_rm(vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfsub_vf_f64m4_rm(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsub_vv_f64m8_rm(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfsub_vv_f64m8_rm(vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsub_vf_f64m8_rm(vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfsub_vf_f64m8_rm(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfsub_vv_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsub_vv_f16mf4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfsub_vv_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16mf4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfsub_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16mf4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfsub_vf_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16mf4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsub_vv_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsub_vv_f16mf2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfsub_vv_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16mf2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsub_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16mf2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfsub_vf_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16mf2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsub_vv_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfsub_vv_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsub_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfsub_vf_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsub_vv_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfsub_vv_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsub_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfsub_vf_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsub_vv_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfsub_vv_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsub_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfsub_vf_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsub_vv_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfsub_vv_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsub_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfsub_vf_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsub_vv_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsub_vv_f32mf2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfsub_vv_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32mf2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsub_vf_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32mf2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfsub_vf_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32mf2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsub_vv_f32m1_rm_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfsub_vv_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsub_vf_f32m1_rm_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfsub_vf_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsub_vv_f32m2_rm_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfsub_vv_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsub_vf_f32m2_rm_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfsub_vf_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsub_vv_f32m4_rm_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfsub_vv_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsub_vf_f32m4_rm_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfsub_vf_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsub_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfsub_vv_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsub_vf_f32m8_rm_m(vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfsub_vf_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsub_vv_f64m1_rm_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfsub_vv_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsub_vf_f64m1_rm_m(vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfsub_vf_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsub_vv_f64m2_rm_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfsub_vv_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsub_vf_f64m2_rm_m(vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfsub_vf_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsub_vv_f64m4_rm_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfsub_vv_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsub_vf_f64m4_rm_m(vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfsub_vf_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsub_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfsub_vv_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsub_vf_f64m8_rm_m(vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfsub_vf_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfsub\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/gnu-api-tests/vfwadd.c b/auto-generated/gnu-api-tests/vfwadd.c index cc443625c..30ab40f8b 100644 --- a/auto-generated/gnu-api-tests/vfwadd.c +++ b/auto-generated/gnu-api-tests/vfwadd.c @@ -6,580 +6,580 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2_t test_vfwadd_vv_f32mf2(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32mf2(op1, op2, vl); +vfloat32mf2_t test_vfwadd_vv_f32mf2(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32mf2(vs2, vs1, vl); } -vfloat32mf2_t test_vfwadd_vf_f32mf2(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32mf2(op1, op2, vl); +vfloat32mf2_t test_vfwadd_vf_f32mf2(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32mf2(vs2, rs1, vl); } -vfloat32mf2_t test_vfwadd_wv_f32mf2(vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32mf2(op1, op2, vl); +vfloat32mf2_t test_vfwadd_wv_f32mf2(vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32mf2(vs2, vs1, vl); } -vfloat32mf2_t test_vfwadd_wf_f32mf2(vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32mf2(op1, op2, vl); +vfloat32mf2_t test_vfwadd_wf_f32mf2(vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32mf2(vs2, rs1, vl); } -vfloat32m1_t test_vfwadd_vv_f32m1(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m1(op1, op2, vl); +vfloat32m1_t test_vfwadd_vv_f32m1(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m1(vs2, vs1, vl); } -vfloat32m1_t test_vfwadd_vf_f32m1(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m1(op1, op2, vl); +vfloat32m1_t test_vfwadd_vf_f32m1(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m1(vs2, rs1, vl); } -vfloat32m1_t test_vfwadd_wv_f32m1(vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m1(op1, op2, vl); +vfloat32m1_t test_vfwadd_wv_f32m1(vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m1(vs2, vs1, vl); } -vfloat32m1_t test_vfwadd_wf_f32m1(vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m1(op1, op2, vl); +vfloat32m1_t test_vfwadd_wf_f32m1(vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m1(vs2, rs1, vl); } -vfloat32m2_t test_vfwadd_vv_f32m2(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m2(op1, op2, vl); +vfloat32m2_t test_vfwadd_vv_f32m2(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m2(vs2, vs1, vl); } -vfloat32m2_t test_vfwadd_vf_f32m2(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m2(op1, op2, vl); +vfloat32m2_t test_vfwadd_vf_f32m2(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m2(vs2, rs1, vl); } -vfloat32m2_t test_vfwadd_wv_f32m2(vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m2(op1, op2, vl); +vfloat32m2_t test_vfwadd_wv_f32m2(vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m2(vs2, vs1, vl); } -vfloat32m2_t test_vfwadd_wf_f32m2(vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m2(op1, op2, vl); +vfloat32m2_t test_vfwadd_wf_f32m2(vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m2(vs2, rs1, vl); } -vfloat32m4_t test_vfwadd_vv_f32m4(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m4(op1, op2, vl); +vfloat32m4_t test_vfwadd_vv_f32m4(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m4(vs2, vs1, vl); } -vfloat32m4_t test_vfwadd_vf_f32m4(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m4(op1, op2, vl); +vfloat32m4_t test_vfwadd_vf_f32m4(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m4(vs2, rs1, vl); } -vfloat32m4_t test_vfwadd_wv_f32m4(vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m4(op1, op2, vl); +vfloat32m4_t test_vfwadd_wv_f32m4(vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m4(vs2, vs1, vl); } -vfloat32m4_t test_vfwadd_wf_f32m4(vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m4(op1, op2, vl); +vfloat32m4_t test_vfwadd_wf_f32m4(vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m4(vs2, rs1, vl); } -vfloat32m8_t test_vfwadd_vv_f32m8(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m8(op1, op2, vl); +vfloat32m8_t test_vfwadd_vv_f32m8(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m8(vs2, vs1, vl); } -vfloat32m8_t test_vfwadd_vf_f32m8(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m8(op1, op2, vl); +vfloat32m8_t test_vfwadd_vf_f32m8(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m8(vs2, rs1, vl); } -vfloat32m8_t test_vfwadd_wv_f32m8(vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m8(op1, op2, vl); +vfloat32m8_t test_vfwadd_wv_f32m8(vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m8(vs2, vs1, vl); } -vfloat32m8_t test_vfwadd_wf_f32m8(vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m8(op1, op2, vl); +vfloat32m8_t test_vfwadd_wf_f32m8(vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m8(vs2, rs1, vl); } -vfloat64m1_t test_vfwadd_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m1(op1, op2, vl); +vfloat64m1_t test_vfwadd_vv_f64m1(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m1(vs2, vs1, vl); } -vfloat64m1_t test_vfwadd_vf_f64m1(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m1(op1, op2, vl); +vfloat64m1_t test_vfwadd_vf_f64m1(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m1(vs2, rs1, vl); } -vfloat64m1_t test_vfwadd_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m1(op1, op2, vl); +vfloat64m1_t test_vfwadd_wv_f64m1(vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m1(vs2, vs1, vl); } -vfloat64m1_t test_vfwadd_wf_f64m1(vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m1(op1, op2, vl); +vfloat64m1_t test_vfwadd_wf_f64m1(vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m1(vs2, rs1, vl); } -vfloat64m2_t test_vfwadd_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m2(op1, op2, vl); +vfloat64m2_t test_vfwadd_vv_f64m2(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m2(vs2, vs1, vl); } -vfloat64m2_t test_vfwadd_vf_f64m2(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m2(op1, op2, vl); +vfloat64m2_t test_vfwadd_vf_f64m2(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m2(vs2, rs1, vl); } -vfloat64m2_t test_vfwadd_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m2(op1, op2, vl); +vfloat64m2_t test_vfwadd_wv_f64m2(vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m2(vs2, vs1, vl); } -vfloat64m2_t test_vfwadd_wf_f64m2(vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m2(op1, op2, vl); +vfloat64m2_t test_vfwadd_wf_f64m2(vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m2(vs2, rs1, vl); } -vfloat64m4_t test_vfwadd_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m4(op1, op2, vl); +vfloat64m4_t test_vfwadd_vv_f64m4(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m4(vs2, vs1, vl); } -vfloat64m4_t test_vfwadd_vf_f64m4(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m4(op1, op2, vl); +vfloat64m4_t test_vfwadd_vf_f64m4(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m4(vs2, rs1, vl); } -vfloat64m4_t test_vfwadd_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m4(op1, op2, vl); +vfloat64m4_t test_vfwadd_wv_f64m4(vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m4(vs2, vs1, vl); } -vfloat64m4_t test_vfwadd_wf_f64m4(vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m4(op1, op2, vl); +vfloat64m4_t test_vfwadd_wf_f64m4(vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m4(vs2, rs1, vl); } -vfloat64m8_t test_vfwadd_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m8(op1, op2, vl); +vfloat64m8_t test_vfwadd_vv_f64m8(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m8(vs2, vs1, vl); } -vfloat64m8_t test_vfwadd_vf_f64m8(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m8(op1, op2, vl); +vfloat64m8_t test_vfwadd_vf_f64m8(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m8(vs2, rs1, vl); } -vfloat64m8_t test_vfwadd_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m8(op1, op2, vl); +vfloat64m8_t test_vfwadd_wv_f64m8(vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m8(vs2, vs1, vl); } -vfloat64m8_t test_vfwadd_wf_f64m8(vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m8(op1, op2, vl); +vfloat64m8_t test_vfwadd_wf_f64m8(vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m8(vs2, rs1, vl); } -vfloat32mf2_t test_vfwadd_vv_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32mf2_m(mask, op1, op2, vl); +vfloat32mf2_t test_vfwadd_vv_f32mf2_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32mf2_m(vm, vs2, vs1, vl); } -vfloat32mf2_t test_vfwadd_vf_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32mf2_m(mask, op1, op2, vl); +vfloat32mf2_t test_vfwadd_vf_f32mf2_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32mf2_m(vm, vs2, rs1, vl); } -vfloat32mf2_t test_vfwadd_wv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32mf2_m(mask, op1, op2, vl); +vfloat32mf2_t test_vfwadd_wv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32mf2_m(vm, vs2, vs1, vl); } -vfloat32mf2_t test_vfwadd_wf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32mf2_m(mask, op1, op2, vl); +vfloat32mf2_t test_vfwadd_wf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32mf2_m(vm, vs2, rs1, vl); } -vfloat32m1_t test_vfwadd_vv_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfwadd_vv_f32m1_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m1_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfwadd_vf_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfwadd_vf_f32m1_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m1_m(vm, vs2, rs1, vl); } -vfloat32m1_t test_vfwadd_wv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfwadd_wv_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m1_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfwadd_wf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfwadd_wf_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m1_m(vm, vs2, rs1, vl); } -vfloat32m2_t test_vfwadd_vv_f32m2_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfwadd_vv_f32m2_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m2_m(vm, vs2, vs1, vl); } -vfloat32m2_t test_vfwadd_vf_f32m2_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfwadd_vf_f32m2_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m2_m(vm, vs2, rs1, vl); } -vfloat32m2_t test_vfwadd_wv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfwadd_wv_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m2_m(vm, vs2, vs1, vl); } -vfloat32m2_t test_vfwadd_wf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfwadd_wf_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m2_m(vm, vs2, rs1, vl); } -vfloat32m4_t test_vfwadd_vv_f32m4_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfwadd_vv_f32m4_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m4_m(vm, vs2, vs1, vl); } -vfloat32m4_t test_vfwadd_vf_f32m4_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfwadd_vf_f32m4_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m4_m(vm, vs2, rs1, vl); } -vfloat32m4_t test_vfwadd_wv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfwadd_wv_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m4_m(vm, vs2, vs1, vl); } -vfloat32m4_t test_vfwadd_wf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfwadd_wf_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m4_m(vm, vs2, rs1, vl); } -vfloat32m8_t test_vfwadd_vv_f32m8_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfwadd_vv_f32m8_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m8_m(vm, vs2, vs1, vl); } -vfloat32m8_t test_vfwadd_vf_f32m8_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfwadd_vf_f32m8_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m8_m(vm, vs2, rs1, vl); } -vfloat32m8_t test_vfwadd_wv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfwadd_wv_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m8_m(vm, vs2, vs1, vl); } -vfloat32m8_t test_vfwadd_wf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfwadd_wf_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m8_m(vm, vs2, rs1, vl); } -vfloat64m1_t test_vfwadd_vv_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfwadd_vv_f64m1_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m1_m(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfwadd_vf_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfwadd_vf_f64m1_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m1_m(vm, vs2, rs1, vl); } -vfloat64m1_t test_vfwadd_wv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfwadd_wv_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m1_m(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfwadd_wf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfwadd_wf_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m1_m(vm, vs2, rs1, vl); } -vfloat64m2_t test_vfwadd_vv_f64m2_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfwadd_vv_f64m2_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m2_m(vm, vs2, vs1, vl); } -vfloat64m2_t test_vfwadd_vf_f64m2_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfwadd_vf_f64m2_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m2_m(vm, vs2, rs1, vl); } -vfloat64m2_t test_vfwadd_wv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfwadd_wv_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m2_m(vm, vs2, vs1, vl); } -vfloat64m2_t test_vfwadd_wf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfwadd_wf_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m2_m(vm, vs2, rs1, vl); } -vfloat64m4_t test_vfwadd_vv_f64m4_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfwadd_vv_f64m4_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m4_m(vm, vs2, vs1, vl); } -vfloat64m4_t test_vfwadd_vf_f64m4_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfwadd_vf_f64m4_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m4_m(vm, vs2, rs1, vl); } -vfloat64m4_t test_vfwadd_wv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfwadd_wv_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m4_m(vm, vs2, vs1, vl); } -vfloat64m4_t test_vfwadd_wf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfwadd_wf_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m4_m(vm, vs2, rs1, vl); } -vfloat64m8_t test_vfwadd_vv_f64m8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfwadd_vv_f64m8_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m8_m(vm, vs2, vs1, vl); } -vfloat64m8_t test_vfwadd_vf_f64m8_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfwadd_vf_f64m8_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m8_m(vm, vs2, rs1, vl); } -vfloat64m8_t test_vfwadd_wv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfwadd_wv_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m8_m(vm, vs2, vs1, vl); } -vfloat64m8_t test_vfwadd_wf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfwadd_wf_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m8_m(vm, vs2, rs1, vl); } -vfloat32mf2_t test_vfwadd_vv_f32mf2_rm(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32mf2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_vv_f32mf2_rm(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32mf2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_vf_f32mf2_rm(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32mf2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_vf_f32mf2_rm(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32mf2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_wv_f32mf2_rm(vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32mf2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_wv_f32mf2_rm(vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32mf2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_wf_f32mf2_rm(vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32mf2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_wf_f32mf2_rm(vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32mf2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_vv_f32m1_rm(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_vv_f32m1_rm(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_vf_f32m1_rm(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_vf_f32m1_rm(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_wv_f32m1_rm(vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_wv_f32m1_rm(vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_wf_f32m1_rm(vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_wf_f32m1_rm(vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_vv_f32m2_rm(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_vv_f32m2_rm(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_vf_f32m2_rm(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_vf_f32m2_rm(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_wv_f32m2_rm(vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_wv_f32m2_rm(vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_wf_f32m2_rm(vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_wf_f32m2_rm(vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_vv_f32m4_rm(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_vv_f32m4_rm(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_vf_f32m4_rm(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_vf_f32m4_rm(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_wv_f32m4_rm(vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_wv_f32m4_rm(vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_wf_f32m4_rm(vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_wf_f32m4_rm(vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_vv_f32m8_rm(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_vv_f32m8_rm(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_vf_f32m8_rm(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_vf_f32m8_rm(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_wv_f32m8_rm(vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_wv_f32m8_rm(vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_wf_f32m8_rm(vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_wf_f32m8_rm(vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_vv_f64m1_rm(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_vv_f64m1_rm(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_vf_f64m1_rm(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_vf_f64m1_rm(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_wv_f64m1_rm(vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_wv_f64m1_rm(vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_wf_f64m1_rm(vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_wf_f64m1_rm(vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_vv_f64m2_rm(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_vv_f64m2_rm(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_vf_f64m2_rm(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_vf_f64m2_rm(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_wv_f64m2_rm(vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_wv_f64m2_rm(vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_wf_f64m2_rm(vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_wf_f64m2_rm(vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_vv_f64m4_rm(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_vv_f64m4_rm(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_vf_f64m4_rm(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_vf_f64m4_rm(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_wv_f64m4_rm(vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_wv_f64m4_rm(vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_wf_f64m4_rm(vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_wf_f64m4_rm(vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_vv_f64m8_rm(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_vv_f64m8_rm(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_vf_f64m8_rm(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_vf_f64m8_rm(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_wv_f64m8_rm(vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_wv_f64m8_rm(vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_wf_f64m8_rm(vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_wf_f64m8_rm(vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_vv_f32mf2_rm_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32mf2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_vv_f32mf2_rm_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32mf2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_vf_f32mf2_rm_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32mf2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_vf_f32mf2_rm_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32mf2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_wv_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32mf2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_wv_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32mf2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_wf_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32mf2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_wf_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32mf2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_vv_f32m1_rm_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_vv_f32m1_rm_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_vf_f32m1_rm_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_vf_f32m1_rm_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_wv_f32m1_rm_m(vbool32_t mask, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_wv_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_wf_f32m1_rm_m(vbool32_t mask, vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_wf_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_vv_f32m2_rm_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_vv_f32m2_rm_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_vf_f32m2_rm_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_vf_f32m2_rm_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_wv_f32m2_rm_m(vbool16_t mask, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_wv_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_wf_f32m2_rm_m(vbool16_t mask, vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_wf_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_vv_f32m4_rm_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_vv_f32m4_rm_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_vf_f32m4_rm_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_vf_f32m4_rm_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_wv_f32m4_rm_m(vbool8_t mask, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_wv_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_wf_f32m4_rm_m(vbool8_t mask, vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_wf_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_vv_f32m8_rm_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_vv_f32m8_rm_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_vf_f32m8_rm_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_vf_f32m8_rm_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_wv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_wv_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_wf_f32m8_rm_m(vbool4_t mask, vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_wf_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_vv_f64m1_rm_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_vv_f64m1_rm_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_vf_f64m1_rm_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_vf_f64m1_rm_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_wv_f64m1_rm_m(vbool64_t mask, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_wv_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_wf_f64m1_rm_m(vbool64_t mask, vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_wf_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_vv_f64m2_rm_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_vv_f64m2_rm_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_vf_f64m2_rm_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_vf_f64m2_rm_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_wv_f64m2_rm_m(vbool32_t mask, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_wv_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_wf_f64m2_rm_m(vbool32_t mask, vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_wf_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_vv_f64m4_rm_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_vv_f64m4_rm_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_vf_f64m4_rm_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_vf_f64m4_rm_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_wv_f64m4_rm_m(vbool16_t mask, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_wv_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_wf_f64m4_rm_m(vbool16_t mask, vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_wf_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_vv_f64m8_rm_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_vv_f64m8_rm_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_vf_f64m8_rm_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_vf_f64m8_rm_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_wv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_wv_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_wf_f64m8_rm_m(vbool8_t mask, vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_wf_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfwadd\.[ivxfswum.]+\s+} 144 } } */ diff --git a/auto-generated/gnu-api-tests/vfwcvt.c b/auto-generated/gnu-api-tests/vfwcvt.c index 70f8fefd8..2648a10b8 100644 --- a/auto-generated/gnu-api-tests/vfwcvt.c +++ b/auto-generated/gnu-api-tests/vfwcvt.c @@ -6,604 +6,604 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4(vint8mf8_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f16mf4(src, vl); +vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4(vint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f16mf4(vs2, vl); } -vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2(vint8mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f16mf2(src, vl); +vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2(vint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f16mf2(vs2, vl); } -vfloat16m1_t test_vfwcvt_f_x_v_f16m1(vint8mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f16m1(src, vl); +vfloat16m1_t test_vfwcvt_f_x_v_f16m1(vint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f16m1(vs2, vl); } -vfloat16m2_t test_vfwcvt_f_x_v_f16m2(vint8m1_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f16m2(src, vl); +vfloat16m2_t test_vfwcvt_f_x_v_f16m2(vint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f16m2(vs2, vl); } -vfloat16m4_t test_vfwcvt_f_x_v_f16m4(vint8m2_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f16m4(src, vl); +vfloat16m4_t test_vfwcvt_f_x_v_f16m4(vint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f16m4(vs2, vl); } -vfloat16m8_t test_vfwcvt_f_x_v_f16m8(vint8m4_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f16m8(src, vl); +vfloat16m8_t test_vfwcvt_f_x_v_f16m8(vint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f16m8(vs2, vl); } -vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4(vuint8mf8_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f16mf4(src, vl); +vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4(vuint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f16mf4(vs2, vl); } -vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2(vuint8mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f16mf2(src, vl); +vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2(vuint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f16mf2(vs2, vl); } -vfloat16m1_t test_vfwcvt_f_xu_v_f16m1(vuint8mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f16m1(src, vl); +vfloat16m1_t test_vfwcvt_f_xu_v_f16m1(vuint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f16m1(vs2, vl); } -vfloat16m2_t test_vfwcvt_f_xu_v_f16m2(vuint8m1_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f16m2(src, vl); +vfloat16m2_t test_vfwcvt_f_xu_v_f16m2(vuint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f16m2(vs2, vl); } -vfloat16m4_t test_vfwcvt_f_xu_v_f16m4(vuint8m2_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f16m4(src, vl); +vfloat16m4_t test_vfwcvt_f_xu_v_f16m4(vuint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f16m4(vs2, vl); } -vfloat16m8_t test_vfwcvt_f_xu_v_f16m8(vuint8m4_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f16m8(src, vl); +vfloat16m8_t test_vfwcvt_f_xu_v_f16m8(vuint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f16m8(vs2, vl); } -vint32mf2_t test_vfwcvt_x_f_v_i32mf2(vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32mf2(src, vl); +vint32mf2_t test_vfwcvt_x_f_v_i32mf2(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32mf2(vs2, vl); } -vint32m1_t test_vfwcvt_x_f_v_i32m1(vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m1(src, vl); +vint32m1_t test_vfwcvt_x_f_v_i32m1(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m1(vs2, vl); } -vint32m2_t test_vfwcvt_x_f_v_i32m2(vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m2(src, vl); +vint32m2_t test_vfwcvt_x_f_v_i32m2(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m2(vs2, vl); } -vint32m4_t test_vfwcvt_x_f_v_i32m4(vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m4(src, vl); +vint32m4_t test_vfwcvt_x_f_v_i32m4(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m4(vs2, vl); } -vint32m8_t test_vfwcvt_x_f_v_i32m8(vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m8(src, vl); +vint32m8_t test_vfwcvt_x_f_v_i32m8(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m8(vs2, vl); } -vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2(vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32mf2(src, vl); +vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32mf2(vs2, vl); } -vuint32m1_t test_vfwcvt_xu_f_v_u32m1(vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m1(src, vl); +vuint32m1_t test_vfwcvt_xu_f_v_u32m1(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m1(vs2, vl); } -vuint32m2_t test_vfwcvt_xu_f_v_u32m2(vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m2(src, vl); +vuint32m2_t test_vfwcvt_xu_f_v_u32m2(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m2(vs2, vl); } -vuint32m4_t test_vfwcvt_xu_f_v_u32m4(vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m4(src, vl); +vuint32m4_t test_vfwcvt_xu_f_v_u32m4(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m4(vs2, vl); } -vuint32m8_t test_vfwcvt_xu_f_v_u32m8(vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m8(src, vl); +vuint32m8_t test_vfwcvt_xu_f_v_u32m8(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m8(vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2(vint16mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f32mf2(src, vl); +vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2(vint16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f32mf2(vs2, vl); } -vfloat32m1_t test_vfwcvt_f_x_v_f32m1(vint16mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f32m1(src, vl); +vfloat32m1_t test_vfwcvt_f_x_v_f32m1(vint16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f32m1(vs2, vl); } -vfloat32m2_t test_vfwcvt_f_x_v_f32m2(vint16m1_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f32m2(src, vl); +vfloat32m2_t test_vfwcvt_f_x_v_f32m2(vint16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f32m2(vs2, vl); } -vfloat32m4_t test_vfwcvt_f_x_v_f32m4(vint16m2_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f32m4(src, vl); +vfloat32m4_t test_vfwcvt_f_x_v_f32m4(vint16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f32m4(vs2, vl); } -vfloat32m8_t test_vfwcvt_f_x_v_f32m8(vint16m4_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f32m8(src, vl); +vfloat32m8_t test_vfwcvt_f_x_v_f32m8(vint16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f32m8(vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2(vuint16mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f32mf2(src, vl); +vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2(vuint16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f32mf2(vs2, vl); } -vfloat32m1_t test_vfwcvt_f_xu_v_f32m1(vuint16mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f32m1(src, vl); +vfloat32m1_t test_vfwcvt_f_xu_v_f32m1(vuint16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f32m1(vs2, vl); } -vfloat32m2_t test_vfwcvt_f_xu_v_f32m2(vuint16m1_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f32m2(src, vl); +vfloat32m2_t test_vfwcvt_f_xu_v_f32m2(vuint16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f32m2(vs2, vl); } -vfloat32m4_t test_vfwcvt_f_xu_v_f32m4(vuint16m2_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f32m4(src, vl); +vfloat32m4_t test_vfwcvt_f_xu_v_f32m4(vuint16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f32m4(vs2, vl); } -vfloat32m8_t test_vfwcvt_f_xu_v_f32m8(vuint16m4_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f32m8(src, vl); +vfloat32m8_t test_vfwcvt_f_xu_v_f32m8(vuint16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f32m8(vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2(vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f32mf2(src, vl); +vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f32mf2(vs2, vl); } -vfloat32m1_t test_vfwcvt_f_f_v_f32m1(vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f32m1(src, vl); +vfloat32m1_t test_vfwcvt_f_f_v_f32m1(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f32m1(vs2, vl); } -vfloat32m2_t test_vfwcvt_f_f_v_f32m2(vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f32m2(src, vl); +vfloat32m2_t test_vfwcvt_f_f_v_f32m2(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f32m2(vs2, vl); } -vfloat32m4_t test_vfwcvt_f_f_v_f32m4(vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f32m4(src, vl); +vfloat32m4_t test_vfwcvt_f_f_v_f32m4(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f32m4(vs2, vl); } -vfloat32m8_t test_vfwcvt_f_f_v_f32m8(vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f32m8(src, vl); +vfloat32m8_t test_vfwcvt_f_f_v_f32m8(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f32m8(vs2, vl); } -vint64m1_t test_vfwcvt_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m1(src, vl); +vint64m1_t test_vfwcvt_x_f_v_i64m1(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m1(vs2, vl); } -vint64m2_t test_vfwcvt_x_f_v_i64m2(vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m2(src, vl); +vint64m2_t test_vfwcvt_x_f_v_i64m2(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m2(vs2, vl); } -vint64m4_t test_vfwcvt_x_f_v_i64m4(vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m4(src, vl); +vint64m4_t test_vfwcvt_x_f_v_i64m4(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m4(vs2, vl); } -vint64m8_t test_vfwcvt_x_f_v_i64m8(vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m8(src, vl); +vint64m8_t test_vfwcvt_x_f_v_i64m8(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m8(vs2, vl); } -vuint64m1_t test_vfwcvt_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m1(src, vl); +vuint64m1_t test_vfwcvt_xu_f_v_u64m1(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m1(vs2, vl); } -vuint64m2_t test_vfwcvt_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m2(src, vl); +vuint64m2_t test_vfwcvt_xu_f_v_u64m2(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m2(vs2, vl); } -vuint64m4_t test_vfwcvt_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m4(src, vl); +vuint64m4_t test_vfwcvt_xu_f_v_u64m4(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m4(vs2, vl); } -vuint64m8_t test_vfwcvt_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m8(src, vl); +vuint64m8_t test_vfwcvt_xu_f_v_u64m8(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m8(vs2, vl); } -vfloat64m1_t test_vfwcvt_f_x_v_f64m1(vint32mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f64m1(src, vl); +vfloat64m1_t test_vfwcvt_f_x_v_f64m1(vint32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f64m1(vs2, vl); } -vfloat64m2_t test_vfwcvt_f_x_v_f64m2(vint32m1_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f64m2(src, vl); +vfloat64m2_t test_vfwcvt_f_x_v_f64m2(vint32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f64m2(vs2, vl); } -vfloat64m4_t test_vfwcvt_f_x_v_f64m4(vint32m2_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f64m4(src, vl); +vfloat64m4_t test_vfwcvt_f_x_v_f64m4(vint32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f64m4(vs2, vl); } -vfloat64m8_t test_vfwcvt_f_x_v_f64m8(vint32m4_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f64m8(src, vl); +vfloat64m8_t test_vfwcvt_f_x_v_f64m8(vint32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f64m8(vs2, vl); } -vfloat64m1_t test_vfwcvt_f_xu_v_f64m1(vuint32mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f64m1(src, vl); +vfloat64m1_t test_vfwcvt_f_xu_v_f64m1(vuint32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f64m1(vs2, vl); } -vfloat64m2_t test_vfwcvt_f_xu_v_f64m2(vuint32m1_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f64m2(src, vl); +vfloat64m2_t test_vfwcvt_f_xu_v_f64m2(vuint32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f64m2(vs2, vl); } -vfloat64m4_t test_vfwcvt_f_xu_v_f64m4(vuint32m2_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f64m4(src, vl); +vfloat64m4_t test_vfwcvt_f_xu_v_f64m4(vuint32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f64m4(vs2, vl); } -vfloat64m8_t test_vfwcvt_f_xu_v_f64m8(vuint32m4_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f64m8(src, vl); +vfloat64m8_t test_vfwcvt_f_xu_v_f64m8(vuint32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f64m8(vs2, vl); } -vfloat64m1_t test_vfwcvt_f_f_v_f64m1(vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f64m1(src, vl); +vfloat64m1_t test_vfwcvt_f_f_v_f64m1(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f64m1(vs2, vl); } -vfloat64m2_t test_vfwcvt_f_f_v_f64m2(vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f64m2(src, vl); +vfloat64m2_t test_vfwcvt_f_f_v_f64m2(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f64m2(vs2, vl); } -vfloat64m4_t test_vfwcvt_f_f_v_f64m4(vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f64m4(src, vl); +vfloat64m4_t test_vfwcvt_f_f_v_f64m4(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f64m4(vs2, vl); } -vfloat64m8_t test_vfwcvt_f_f_v_f64m8(vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f64m8(src, vl); +vfloat64m8_t test_vfwcvt_f_f_v_f64m8(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f64m8(vs2, vl); } -vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_m(vbool64_t mask, vint8mf8_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f16mf4_m(mask, src, vl); +vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_m(vbool64_t vm, vint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f16mf4_m(vm, vs2, vl); } -vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_m(vbool32_t mask, vint8mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f16mf2_m(mask, src, vl); +vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_m(vbool32_t vm, vint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f16mf2_m(vm, vs2, vl); } -vfloat16m1_t test_vfwcvt_f_x_v_f16m1_m(vbool16_t mask, vint8mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f16m1_m(mask, src, vl); +vfloat16m1_t test_vfwcvt_f_x_v_f16m1_m(vbool16_t vm, vint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f16m1_m(vm, vs2, vl); } -vfloat16m2_t test_vfwcvt_f_x_v_f16m2_m(vbool8_t mask, vint8m1_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f16m2_m(mask, src, vl); +vfloat16m2_t test_vfwcvt_f_x_v_f16m2_m(vbool8_t vm, vint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f16m2_m(vm, vs2, vl); } -vfloat16m4_t test_vfwcvt_f_x_v_f16m4_m(vbool4_t mask, vint8m2_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f16m4_m(mask, src, vl); +vfloat16m4_t test_vfwcvt_f_x_v_f16m4_m(vbool4_t vm, vint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f16m4_m(vm, vs2, vl); } -vfloat16m8_t test_vfwcvt_f_x_v_f16m8_m(vbool2_t mask, vint8m4_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f16m8_m(mask, src, vl); +vfloat16m8_t test_vfwcvt_f_x_v_f16m8_m(vbool2_t vm, vint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f16m8_m(vm, vs2, vl); } -vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_m(vbool64_t mask, vuint8mf8_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f16mf4_m(mask, src, vl); +vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f16mf4_m(vm, vs2, vl); } -vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_m(vbool32_t mask, vuint8mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f16mf2_m(mask, src, vl); +vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f16mf2_m(vm, vs2, vl); } -vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_m(vbool16_t mask, vuint8mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f16m1_m(mask, src, vl); +vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f16m1_m(vm, vs2, vl); } -vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_m(vbool8_t mask, vuint8m1_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f16m2_m(mask, src, vl); +vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f16m2_m(vm, vs2, vl); } -vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_m(vbool4_t mask, vuint8m2_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f16m4_m(mask, src, vl); +vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f16m4_m(vm, vs2, vl); } -vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_m(vbool2_t mask, vuint8m4_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f16m8_m(mask, src, vl); +vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f16m8_m(vm, vs2, vl); } -vint32mf2_t test_vfwcvt_x_f_v_i32mf2_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32mf2_m(mask, src, vl); +vint32mf2_t test_vfwcvt_x_f_v_i32mf2_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32mf2_m(vm, vs2, vl); } -vint32m1_t test_vfwcvt_x_f_v_i32m1_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m1_m(mask, src, vl); +vint32m1_t test_vfwcvt_x_f_v_i32m1_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m1_m(vm, vs2, vl); } -vint32m2_t test_vfwcvt_x_f_v_i32m2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m2_m(mask, src, vl); +vint32m2_t test_vfwcvt_x_f_v_i32m2_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m2_m(vm, vs2, vl); } -vint32m4_t test_vfwcvt_x_f_v_i32m4_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m4_m(mask, src, vl); +vint32m4_t test_vfwcvt_x_f_v_i32m4_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m4_m(vm, vs2, vl); } -vint32m8_t test_vfwcvt_x_f_v_i32m8_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m8_m(mask, src, vl); +vint32m8_t test_vfwcvt_x_f_v_i32m8_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m8_m(vm, vs2, vl); } -vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32mf2_m(mask, src, vl); +vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32mf2_m(vm, vs2, vl); } -vuint32m1_t test_vfwcvt_xu_f_v_u32m1_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m1_m(mask, src, vl); +vuint32m1_t test_vfwcvt_xu_f_v_u32m1_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m1_m(vm, vs2, vl); } -vuint32m2_t test_vfwcvt_xu_f_v_u32m2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m2_m(mask, src, vl); +vuint32m2_t test_vfwcvt_xu_f_v_u32m2_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m2_m(vm, vs2, vl); } -vuint32m4_t test_vfwcvt_xu_f_v_u32m4_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m4_m(mask, src, vl); +vuint32m4_t test_vfwcvt_xu_f_v_u32m4_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m4_m(vm, vs2, vl); } -vuint32m8_t test_vfwcvt_xu_f_v_u32m8_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m8_m(mask, src, vl); +vuint32m8_t test_vfwcvt_xu_f_v_u32m8_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m8_m(vm, vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_m(vbool64_t mask, vint16mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f32mf2_m(mask, src, vl); +vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_m(vbool64_t vm, vint16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f32mf2_m(vm, vs2, vl); } -vfloat32m1_t test_vfwcvt_f_x_v_f32m1_m(vbool32_t mask, vint16mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f32m1_m(mask, src, vl); +vfloat32m1_t test_vfwcvt_f_x_v_f32m1_m(vbool32_t vm, vint16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f32m1_m(vm, vs2, vl); } -vfloat32m2_t test_vfwcvt_f_x_v_f32m2_m(vbool16_t mask, vint16m1_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f32m2_m(mask, src, vl); +vfloat32m2_t test_vfwcvt_f_x_v_f32m2_m(vbool16_t vm, vint16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f32m2_m(vm, vs2, vl); } -vfloat32m4_t test_vfwcvt_f_x_v_f32m4_m(vbool8_t mask, vint16m2_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f32m4_m(mask, src, vl); +vfloat32m4_t test_vfwcvt_f_x_v_f32m4_m(vbool8_t vm, vint16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f32m4_m(vm, vs2, vl); } -vfloat32m8_t test_vfwcvt_f_x_v_f32m8_m(vbool4_t mask, vint16m4_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f32m8_m(mask, src, vl); +vfloat32m8_t test_vfwcvt_f_x_v_f32m8_m(vbool4_t vm, vint16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f32m8_m(vm, vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_m(vbool64_t mask, vuint16mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f32mf2_m(mask, src, vl); +vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f32mf2_m(vm, vs2, vl); } -vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_m(vbool32_t mask, vuint16mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f32m1_m(mask, src, vl); +vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f32m1_m(vm, vs2, vl); } -vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_m(vbool16_t mask, vuint16m1_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f32m2_m(mask, src, vl); +vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f32m2_m(vm, vs2, vl); } -vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_m(vbool8_t mask, vuint16m2_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f32m4_m(mask, src, vl); +vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f32m4_m(vm, vs2, vl); } -vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_m(vbool4_t mask, vuint16m4_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f32m8_m(mask, src, vl); +vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f32m8_m(vm, vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f32mf2_m(mask, src, vl); +vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f32mf2_m(vm, vs2, vl); } -vfloat32m1_t test_vfwcvt_f_f_v_f32m1_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f32m1_m(mask, src, vl); +vfloat32m1_t test_vfwcvt_f_f_v_f32m1_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f32m1_m(vm, vs2, vl); } -vfloat32m2_t test_vfwcvt_f_f_v_f32m2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f32m2_m(mask, src, vl); +vfloat32m2_t test_vfwcvt_f_f_v_f32m2_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f32m2_m(vm, vs2, vl); } -vfloat32m4_t test_vfwcvt_f_f_v_f32m4_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f32m4_m(mask, src, vl); +vfloat32m4_t test_vfwcvt_f_f_v_f32m4_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f32m4_m(vm, vs2, vl); } -vfloat32m8_t test_vfwcvt_f_f_v_f32m8_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f32m8_m(mask, src, vl); +vfloat32m8_t test_vfwcvt_f_f_v_f32m8_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f32m8_m(vm, vs2, vl); } -vint64m1_t test_vfwcvt_x_f_v_i64m1_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m1_m(mask, src, vl); +vint64m1_t test_vfwcvt_x_f_v_i64m1_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m1_m(vm, vs2, vl); } -vint64m2_t test_vfwcvt_x_f_v_i64m2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m2_m(mask, src, vl); +vint64m2_t test_vfwcvt_x_f_v_i64m2_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m2_m(vm, vs2, vl); } -vint64m4_t test_vfwcvt_x_f_v_i64m4_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m4_m(mask, src, vl); +vint64m4_t test_vfwcvt_x_f_v_i64m4_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m4_m(vm, vs2, vl); } -vint64m8_t test_vfwcvt_x_f_v_i64m8_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m8_m(mask, src, vl); +vint64m8_t test_vfwcvt_x_f_v_i64m8_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m8_m(vm, vs2, vl); } -vuint64m1_t test_vfwcvt_xu_f_v_u64m1_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m1_m(mask, src, vl); +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m1_m(vm, vs2, vl); } -vuint64m2_t test_vfwcvt_xu_f_v_u64m2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m2_m(mask, src, vl); +vuint64m2_t test_vfwcvt_xu_f_v_u64m2_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m2_m(vm, vs2, vl); } -vuint64m4_t test_vfwcvt_xu_f_v_u64m4_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m4_m(mask, src, vl); +vuint64m4_t test_vfwcvt_xu_f_v_u64m4_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m4_m(vm, vs2, vl); } -vuint64m8_t test_vfwcvt_xu_f_v_u64m8_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m8_m(mask, src, vl); +vuint64m8_t test_vfwcvt_xu_f_v_u64m8_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m8_m(vm, vs2, vl); } -vfloat64m1_t test_vfwcvt_f_x_v_f64m1_m(vbool64_t mask, vint32mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f64m1_m(mask, src, vl); +vfloat64m1_t test_vfwcvt_f_x_v_f64m1_m(vbool64_t vm, vint32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f64m1_m(vm, vs2, vl); } -vfloat64m2_t test_vfwcvt_f_x_v_f64m2_m(vbool32_t mask, vint32m1_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f64m2_m(mask, src, vl); +vfloat64m2_t test_vfwcvt_f_x_v_f64m2_m(vbool32_t vm, vint32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f64m2_m(vm, vs2, vl); } -vfloat64m4_t test_vfwcvt_f_x_v_f64m4_m(vbool16_t mask, vint32m2_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f64m4_m(mask, src, vl); +vfloat64m4_t test_vfwcvt_f_x_v_f64m4_m(vbool16_t vm, vint32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f64m4_m(vm, vs2, vl); } -vfloat64m8_t test_vfwcvt_f_x_v_f64m8_m(vbool8_t mask, vint32m4_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f64m8_m(mask, src, vl); +vfloat64m8_t test_vfwcvt_f_x_v_f64m8_m(vbool8_t vm, vint32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f64m8_m(vm, vs2, vl); } -vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_m(vbool64_t mask, vuint32mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f64m1_m(mask, src, vl); +vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f64m1_m(vm, vs2, vl); } -vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_m(vbool32_t mask, vuint32m1_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f64m2_m(mask, src, vl); +vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f64m2_m(vm, vs2, vl); } -vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_m(vbool16_t mask, vuint32m2_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f64m4_m(mask, src, vl); +vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f64m4_m(vm, vs2, vl); } -vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_m(vbool8_t mask, vuint32m4_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f64m8_m(mask, src, vl); +vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f64m8_m(vm, vs2, vl); } -vfloat64m1_t test_vfwcvt_f_f_v_f64m1_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f64m1_m(mask, src, vl); +vfloat64m1_t test_vfwcvt_f_f_v_f64m1_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f64m1_m(vm, vs2, vl); } -vfloat64m2_t test_vfwcvt_f_f_v_f64m2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f64m2_m(mask, src, vl); +vfloat64m2_t test_vfwcvt_f_f_v_f64m2_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f64m2_m(vm, vs2, vl); } -vfloat64m4_t test_vfwcvt_f_f_v_f64m4_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f64m4_m(mask, src, vl); +vfloat64m4_t test_vfwcvt_f_f_v_f64m4_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f64m4_m(vm, vs2, vl); } -vfloat64m8_t test_vfwcvt_f_f_v_f64m8_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f64m8_m(mask, src, vl); +vfloat64m8_t test_vfwcvt_f_f_v_f64m8_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f64m8_m(vm, vs2, vl); } -vint32mf2_t test_vfwcvt_x_f_v_i32mf2_rm(vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32mf2_rm(src, __RISCV_FRM_RNE, vl); +vint32mf2_t test_vfwcvt_x_f_v_i32mf2_rm(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32mf2_rm(vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfwcvt_x_f_v_i32m1_rm(vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m1_rm(src, __RISCV_FRM_RNE, vl); +vint32m1_t test_vfwcvt_x_f_v_i32m1_rm(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m1_rm(vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfwcvt_x_f_v_i32m2_rm(vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m2_rm(src, __RISCV_FRM_RNE, vl); +vint32m2_t test_vfwcvt_x_f_v_i32m2_rm(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m2_rm(vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfwcvt_x_f_v_i32m4_rm(vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m4_rm(src, __RISCV_FRM_RNE, vl); +vint32m4_t test_vfwcvt_x_f_v_i32m4_rm(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m4_rm(vs2, __RISCV_FRM_RNE, vl); } -vint32m8_t test_vfwcvt_x_f_v_i32m8_rm(vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m8_rm(src, __RISCV_FRM_RNE, vl); +vint32m8_t test_vfwcvt_x_f_v_i32m8_rm(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m8_rm(vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_rm(vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32mf2_rm(src, __RISCV_FRM_RNE, vl); +vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_rm(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32mf2_rm(vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfwcvt_xu_f_v_u32m1_rm(vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m1_rm(src, __RISCV_FRM_RNE, vl); +vuint32m1_t test_vfwcvt_xu_f_v_u32m1_rm(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m1_rm(vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfwcvt_xu_f_v_u32m2_rm(vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m2_rm(src, __RISCV_FRM_RNE, vl); +vuint32m2_t test_vfwcvt_xu_f_v_u32m2_rm(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m2_rm(vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfwcvt_xu_f_v_u32m4_rm(vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m4_rm(src, __RISCV_FRM_RNE, vl); +vuint32m4_t test_vfwcvt_xu_f_v_u32m4_rm(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m4_rm(vs2, __RISCV_FRM_RNE, vl); } -vuint32m8_t test_vfwcvt_xu_f_v_u32m8_rm(vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m8_rm(src, __RISCV_FRM_RNE, vl); +vuint32m8_t test_vfwcvt_xu_f_v_u32m8_rm(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m8_rm(vs2, __RISCV_FRM_RNE, vl); } -vint64m1_t test_vfwcvt_x_f_v_i64m1_rm(vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m1_rm(src, __RISCV_FRM_RNE, vl); +vint64m1_t test_vfwcvt_x_f_v_i64m1_rm(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m1_rm(vs2, __RISCV_FRM_RNE, vl); } -vint64m2_t test_vfwcvt_x_f_v_i64m2_rm(vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m2_rm(src, __RISCV_FRM_RNE, vl); +vint64m2_t test_vfwcvt_x_f_v_i64m2_rm(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m2_rm(vs2, __RISCV_FRM_RNE, vl); } -vint64m4_t test_vfwcvt_x_f_v_i64m4_rm(vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m4_rm(src, __RISCV_FRM_RNE, vl); +vint64m4_t test_vfwcvt_x_f_v_i64m4_rm(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m4_rm(vs2, __RISCV_FRM_RNE, vl); } -vint64m8_t test_vfwcvt_x_f_v_i64m8_rm(vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m8_rm(src, __RISCV_FRM_RNE, vl); +vint64m8_t test_vfwcvt_x_f_v_i64m8_rm(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m8_rm(vs2, __RISCV_FRM_RNE, vl); } -vuint64m1_t test_vfwcvt_xu_f_v_u64m1_rm(vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m1_rm(src, __RISCV_FRM_RNE, vl); +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_rm(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m1_rm(vs2, __RISCV_FRM_RNE, vl); } -vuint64m2_t test_vfwcvt_xu_f_v_u64m2_rm(vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m2_rm(src, __RISCV_FRM_RNE, vl); +vuint64m2_t test_vfwcvt_xu_f_v_u64m2_rm(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m2_rm(vs2, __RISCV_FRM_RNE, vl); } -vuint64m4_t test_vfwcvt_xu_f_v_u64m4_rm(vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m4_rm(src, __RISCV_FRM_RNE, vl); +vuint64m4_t test_vfwcvt_xu_f_v_u64m4_rm(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m4_rm(vs2, __RISCV_FRM_RNE, vl); } -vuint64m8_t test_vfwcvt_xu_f_v_u64m8_rm(vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m8_rm(src, __RISCV_FRM_RNE, vl); +vuint64m8_t test_vfwcvt_xu_f_v_u64m8_rm(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m8_rm(vs2, __RISCV_FRM_RNE, vl); } -vint32mf2_t test_vfwcvt_x_f_v_i32mf2_rm_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32mf2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vint32mf2_t test_vfwcvt_x_f_v_i32mf2_rm_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfwcvt_x_f_v_i32m1_rm_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m1_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vint32m1_t test_vfwcvt_x_f_v_i32m1_rm_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfwcvt_x_f_v_i32m2_rm_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vint32m2_t test_vfwcvt_x_f_v_i32m2_rm_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfwcvt_x_f_v_i32m4_rm_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m4_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vint32m4_t test_vfwcvt_x_f_v_i32m4_rm_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vint32m8_t test_vfwcvt_x_f_v_i32m8_rm_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m8_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vint32m8_t test_vfwcvt_x_f_v_i32m8_rm_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m8_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_rm_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32mf2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_rm_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfwcvt_xu_f_v_u32m1_rm_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m1_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vuint32m1_t test_vfwcvt_xu_f_v_u32m1_rm_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfwcvt_xu_f_v_u32m2_rm_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vuint32m2_t test_vfwcvt_xu_f_v_u32m2_rm_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfwcvt_xu_f_v_u32m4_rm_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m4_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vuint32m4_t test_vfwcvt_xu_f_v_u32m4_rm_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint32m8_t test_vfwcvt_xu_f_v_u32m8_rm_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m8_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vuint32m8_t test_vfwcvt_xu_f_v_u32m8_rm_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m8_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vint64m1_t test_vfwcvt_x_f_v_i64m1_rm_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m1_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vint64m1_t test_vfwcvt_x_f_v_i64m1_rm_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vint64m2_t test_vfwcvt_x_f_v_i64m2_rm_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vint64m2_t test_vfwcvt_x_f_v_i64m2_rm_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vint64m4_t test_vfwcvt_x_f_v_i64m4_rm_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m4_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vint64m4_t test_vfwcvt_x_f_v_i64m4_rm_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vint64m8_t test_vfwcvt_x_f_v_i64m8_rm_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m8_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vint64m8_t test_vfwcvt_x_f_v_i64m8_rm_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m8_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint64m1_t test_vfwcvt_xu_f_v_u64m1_rm_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m1_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_rm_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint64m2_t test_vfwcvt_xu_f_v_u64m2_rm_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m2_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vuint64m2_t test_vfwcvt_xu_f_v_u64m2_rm_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint64m4_t test_vfwcvt_xu_f_v_u64m4_rm_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m4_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vuint64m4_t test_vfwcvt_xu_f_v_u64m4_rm_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint64m8_t test_vfwcvt_xu_f_v_u64m8_rm_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m8_rm_m(mask, src, __RISCV_FRM_RNE, vl); +vuint64m8_t test_vfwcvt_xu_f_v_u64m8_rm_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m8_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfwcvt\.[ivxfswum.]+\s+} 150 } } */ diff --git a/auto-generated/gnu-api-tests/vfwcvt_rtz.c b/auto-generated/gnu-api-tests/vfwcvt_rtz.c index f0190d097..91d04fc69 100644 --- a/auto-generated/gnu-api-tests/vfwcvt_rtz.c +++ b/auto-generated/gnu-api-tests/vfwcvt_rtz.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2(vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32mf2(src, vl); +vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32mf2(vs2, vl); } -vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1(vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m1(src, vl); +vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m1(vs2, vl); } -vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2(vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m2(src, vl); +vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m2(vs2, vl); } -vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4(vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m4(src, vl); +vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m4(vs2, vl); } -vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8(vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m8(src, vl); +vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m8(vs2, vl); } -vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2(vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32mf2(src, vl); +vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32mf2(vs2, vl); } -vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1(vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m1(src, vl); +vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m1(vs2, vl); } -vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2(vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m2(src, vl); +vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m2(vs2, vl); } -vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4(vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m4(src, vl); +vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m4(vs2, vl); } -vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8(vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m8(src, vl); +vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m8(vs2, vl); } -vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m1(src, vl); +vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m1(vs2, vl); } -vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2(vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m2(src, vl); +vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m2(vs2, vl); } -vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4(vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m4(src, vl); +vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m4(vs2, vl); } -vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8(vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m8(src, vl); +vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m8(vs2, vl); } -vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m1(src, vl); +vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m1(vs2, vl); } -vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m2(src, vl); +vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m2(vs2, vl); } -vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m4(src, vl); +vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m4(vs2, vl); } -vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m8(src, vl); +vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m8(vs2, vl); } -vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32mf2_m(mask, src, vl); +vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32mf2_m(vm, vs2, vl); } -vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m1_m(mask, src, vl); +vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m1_m(vm, vs2, vl); } -vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m2_m(mask, src, vl); +vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m2_m(vm, vs2, vl); } -vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m4_m(mask, src, vl); +vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m4_m(vm, vs2, vl); } -vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m8_m(mask, src, vl); +vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m8_m(vm, vs2, vl); } -vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32mf2_m(mask, src, vl); +vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32mf2_m(vm, vs2, vl); } -vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m1_m(mask, src, vl); +vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m1_m(vm, vs2, vl); } -vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m2_m(mask, src, vl); +vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m2_m(vm, vs2, vl); } -vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m4_m(mask, src, vl); +vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m4_m(vm, vs2, vl); } -vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m8_m(mask, src, vl); +vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m8_m(vm, vs2, vl); } -vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m1_m(mask, src, vl); +vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m1_m(vm, vs2, vl); } -vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m2_m(mask, src, vl); +vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m2_m(vm, vs2, vl); } -vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m4_m(mask, src, vl); +vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m4_m(vm, vs2, vl); } -vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m8_m(mask, src, vl); +vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m8_m(vm, vs2, vl); } -vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m1_m(mask, src, vl); +vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m1_m(vm, vs2, vl); } -vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m2_m(mask, src, vl); +vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m2_m(vm, vs2, vl); } -vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m4_m(mask, src, vl); +vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m4_m(vm, vs2, vl); } -vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m8_m(mask, src, vl); +vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m8_m(vm, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfwcvt\.rtz[ivxfswum.]*\s+} 36 } } */ diff --git a/auto-generated/gnu-api-tests/vfwmacc.c b/auto-generated/gnu-api-tests/vfwmacc.c index 97e520419..8d46323b2 100644 --- a/auto-generated/gnu-api-tests/vfwmacc.c +++ b/auto-generated/gnu-api-tests/vfwmacc.c @@ -78,76 +78,76 @@ vfloat64m8_t test_vfwmacc_vf_f64m8(vfloat64m8_t vd, float32_t vs1, vfloat32m4_t return __riscv_vfwmacc_vf_f64m8(vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f32mf2_m(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwmacc_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f32mf2_m(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f32mf2_m(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwmacc_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f32mf2_m(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f32m1_m(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwmacc_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f32m1_m(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f32m1_m(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwmacc_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f32m1_m(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f32m2_m(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwmacc_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f32m2_m(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f32m2_m(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwmacc_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f32m2_m(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f32m4_m(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwmacc_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f32m4_m(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f32m4_m(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwmacc_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f32m4_m(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f32m8_m(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwmacc_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f32m8_m(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f32m8_m(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwmacc_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f32m8_m(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f64m1_m(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwmacc_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f64m1_m(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f64m1_m(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwmacc_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f64m1_m(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f64m2_m(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwmacc_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f64m2_m(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f64m2_m(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwmacc_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f64m2_m(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f64m4_m(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwmacc_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f64m4_m(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f64m4_m(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwmacc_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f64m4_m(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f64m8_m(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwmacc_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f64m8_m(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f64m8_m(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwmacc_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f64m8_m(vm, vd, vs1, vs2, vl); } vfloat32mf2_t test_vfwmacc_vv_f32mf2_rm(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -222,76 +222,76 @@ vfloat64m8_t test_vfwmacc_vf_f64m8_rm(vfloat64m8_t vd, float32_t vs1, vfloat32m4 return __riscv_vfwmacc_vf_f64m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmacc_vv_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f32mf2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmacc_vv_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f32mf2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmacc_vf_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f32mf2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmacc_vf_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f32mf2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmacc_vv_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f32m1_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmacc_vv_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f32m1_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmacc_vf_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f32m1_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmacc_vf_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f32m1_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmacc_vv_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f32m2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmacc_vv_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f32m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmacc_vf_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f32m2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmacc_vf_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f32m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmacc_vv_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f32m4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmacc_vv_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f32m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmacc_vf_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f32m4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmacc_vf_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f32m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmacc_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f32m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmacc_vv_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f32m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmacc_vf_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f32m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmacc_vf_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f32m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmacc_vv_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f64m1_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmacc_vv_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f64m1_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmacc_vf_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f64m1_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmacc_vf_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f64m1_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmacc_vv_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f64m2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmacc_vv_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f64m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmacc_vf_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f64m2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmacc_vf_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f64m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmacc_vv_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f64m4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmacc_vv_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f64m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmacc_vf_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f64m4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmacc_vf_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f64m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmacc_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f64m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmacc_vv_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f64m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmacc_vf_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f64m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmacc_vf_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f64m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfwmacc\.[ivxfswum.]+\s+} 72 } } */ diff --git a/auto-generated/gnu-api-tests/vfwmsac.c b/auto-generated/gnu-api-tests/vfwmsac.c index 075ce2088..b64514270 100644 --- a/auto-generated/gnu-api-tests/vfwmsac.c +++ b/auto-generated/gnu-api-tests/vfwmsac.c @@ -78,76 +78,76 @@ vfloat64m8_t test_vfwmsac_vf_f64m8(vfloat64m8_t vd, float32_t vs1, vfloat32m4_t return __riscv_vfwmsac_vf_f64m8(vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f32mf2_m(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwmsac_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f32mf2_m(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f32mf2_m(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwmsac_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f32mf2_m(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f32m1_m(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwmsac_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f32m1_m(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f32m1_m(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwmsac_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f32m1_m(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f32m2_m(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwmsac_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f32m2_m(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f32m2_m(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwmsac_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f32m2_m(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f32m4_m(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwmsac_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f32m4_m(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f32m4_m(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwmsac_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f32m4_m(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f32m8_m(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwmsac_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f32m8_m(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f32m8_m(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwmsac_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f32m8_m(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f64m1_m(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwmsac_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f64m1_m(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f64m1_m(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwmsac_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f64m1_m(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f64m2_m(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwmsac_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f64m2_m(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f64m2_m(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwmsac_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f64m2_m(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f64m4_m(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwmsac_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f64m4_m(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f64m4_m(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwmsac_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f64m4_m(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f64m8_m(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwmsac_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f64m8_m(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f64m8_m(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwmsac_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f64m8_m(vm, vd, vs1, vs2, vl); } vfloat32mf2_t test_vfwmsac_vv_f32mf2_rm(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -222,76 +222,76 @@ vfloat64m8_t test_vfwmsac_vf_f64m8_rm(vfloat64m8_t vd, float32_t vs1, vfloat32m4 return __riscv_vfwmsac_vf_f64m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmsac_vv_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f32mf2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmsac_vv_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f32mf2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmsac_vf_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f32mf2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmsac_vf_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f32mf2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmsac_vv_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f32m1_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmsac_vv_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f32m1_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmsac_vf_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f32m1_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmsac_vf_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f32m1_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmsac_vv_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f32m2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmsac_vv_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f32m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmsac_vf_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f32m2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmsac_vf_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f32m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmsac_vv_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f32m4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmsac_vv_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f32m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmsac_vf_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f32m4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmsac_vf_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f32m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmsac_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f32m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmsac_vv_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f32m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmsac_vf_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f32m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmsac_vf_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f32m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmsac_vv_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f64m1_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmsac_vv_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f64m1_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmsac_vf_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f64m1_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmsac_vf_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f64m1_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmsac_vv_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f64m2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmsac_vv_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f64m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmsac_vf_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f64m2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmsac_vf_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f64m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmsac_vv_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f64m4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmsac_vv_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f64m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmsac_vf_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f64m4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmsac_vf_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f64m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmsac_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f64m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmsac_vv_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f64m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmsac_vf_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f64m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmsac_vf_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f64m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfwmsac\.[ivxfswum.]+\s+} 72 } } */ diff --git a/auto-generated/gnu-api-tests/vfwmul.c b/auto-generated/gnu-api-tests/vfwmul.c index e2d9d4c51..a5aef6245 100644 --- a/auto-generated/gnu-api-tests/vfwmul.c +++ b/auto-generated/gnu-api-tests/vfwmul.c @@ -6,292 +6,292 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2_t test_vfwmul_vv_f32mf2(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32mf2(op1, op2, vl); +vfloat32mf2_t test_vfwmul_vv_f32mf2(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32mf2(vs2, vs1, vl); } -vfloat32mf2_t test_vfwmul_vf_f32mf2(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32mf2(op1, op2, vl); +vfloat32mf2_t test_vfwmul_vf_f32mf2(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32mf2(vs2, rs1, vl); } -vfloat32m1_t test_vfwmul_vv_f32m1(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m1(op1, op2, vl); +vfloat32m1_t test_vfwmul_vv_f32m1(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m1(vs2, vs1, vl); } -vfloat32m1_t test_vfwmul_vf_f32m1(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m1(op1, op2, vl); +vfloat32m1_t test_vfwmul_vf_f32m1(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m1(vs2, rs1, vl); } -vfloat32m2_t test_vfwmul_vv_f32m2(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m2(op1, op2, vl); +vfloat32m2_t test_vfwmul_vv_f32m2(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m2(vs2, vs1, vl); } -vfloat32m2_t test_vfwmul_vf_f32m2(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m2(op1, op2, vl); +vfloat32m2_t test_vfwmul_vf_f32m2(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m2(vs2, rs1, vl); } -vfloat32m4_t test_vfwmul_vv_f32m4(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m4(op1, op2, vl); +vfloat32m4_t test_vfwmul_vv_f32m4(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m4(vs2, vs1, vl); } -vfloat32m4_t test_vfwmul_vf_f32m4(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m4(op1, op2, vl); +vfloat32m4_t test_vfwmul_vf_f32m4(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m4(vs2, rs1, vl); } -vfloat32m8_t test_vfwmul_vv_f32m8(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m8(op1, op2, vl); +vfloat32m8_t test_vfwmul_vv_f32m8(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m8(vs2, vs1, vl); } -vfloat32m8_t test_vfwmul_vf_f32m8(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m8(op1, op2, vl); +vfloat32m8_t test_vfwmul_vf_f32m8(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m8(vs2, rs1, vl); } -vfloat64m1_t test_vfwmul_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m1(op1, op2, vl); +vfloat64m1_t test_vfwmul_vv_f64m1(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m1(vs2, vs1, vl); } -vfloat64m1_t test_vfwmul_vf_f64m1(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m1(op1, op2, vl); +vfloat64m1_t test_vfwmul_vf_f64m1(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m1(vs2, rs1, vl); } -vfloat64m2_t test_vfwmul_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m2(op1, op2, vl); +vfloat64m2_t test_vfwmul_vv_f64m2(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m2(vs2, vs1, vl); } -vfloat64m2_t test_vfwmul_vf_f64m2(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m2(op1, op2, vl); +vfloat64m2_t test_vfwmul_vf_f64m2(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m2(vs2, rs1, vl); } -vfloat64m4_t test_vfwmul_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m4(op1, op2, vl); +vfloat64m4_t test_vfwmul_vv_f64m4(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m4(vs2, vs1, vl); } -vfloat64m4_t test_vfwmul_vf_f64m4(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m4(op1, op2, vl); +vfloat64m4_t test_vfwmul_vf_f64m4(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m4(vs2, rs1, vl); } -vfloat64m8_t test_vfwmul_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m8(op1, op2, vl); +vfloat64m8_t test_vfwmul_vv_f64m8(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m8(vs2, vs1, vl); } -vfloat64m8_t test_vfwmul_vf_f64m8(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m8(op1, op2, vl); +vfloat64m8_t test_vfwmul_vf_f64m8(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m8(vs2, rs1, vl); } -vfloat32mf2_t test_vfwmul_vv_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32mf2_m(mask, op1, op2, vl); +vfloat32mf2_t test_vfwmul_vv_f32mf2_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32mf2_m(vm, vs2, vs1, vl); } -vfloat32mf2_t test_vfwmul_vf_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32mf2_m(mask, op1, op2, vl); +vfloat32mf2_t test_vfwmul_vf_f32mf2_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32mf2_m(vm, vs2, rs1, vl); } -vfloat32m1_t test_vfwmul_vv_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfwmul_vv_f32m1_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m1_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfwmul_vf_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfwmul_vf_f32m1_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m1_m(vm, vs2, rs1, vl); } -vfloat32m2_t test_vfwmul_vv_f32m2_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfwmul_vv_f32m2_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m2_m(vm, vs2, vs1, vl); } -vfloat32m2_t test_vfwmul_vf_f32m2_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfwmul_vf_f32m2_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m2_m(vm, vs2, rs1, vl); } -vfloat32m4_t test_vfwmul_vv_f32m4_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfwmul_vv_f32m4_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m4_m(vm, vs2, vs1, vl); } -vfloat32m4_t test_vfwmul_vf_f32m4_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfwmul_vf_f32m4_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m4_m(vm, vs2, rs1, vl); } -vfloat32m8_t test_vfwmul_vv_f32m8_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfwmul_vv_f32m8_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m8_m(vm, vs2, vs1, vl); } -vfloat32m8_t test_vfwmul_vf_f32m8_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfwmul_vf_f32m8_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m8_m(vm, vs2, rs1, vl); } -vfloat64m1_t test_vfwmul_vv_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfwmul_vv_f64m1_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m1_m(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfwmul_vf_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfwmul_vf_f64m1_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m1_m(vm, vs2, rs1, vl); } -vfloat64m2_t test_vfwmul_vv_f64m2_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfwmul_vv_f64m2_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m2_m(vm, vs2, vs1, vl); } -vfloat64m2_t test_vfwmul_vf_f64m2_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfwmul_vf_f64m2_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m2_m(vm, vs2, rs1, vl); } -vfloat64m4_t test_vfwmul_vv_f64m4_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfwmul_vv_f64m4_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m4_m(vm, vs2, vs1, vl); } -vfloat64m4_t test_vfwmul_vf_f64m4_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfwmul_vf_f64m4_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m4_m(vm, vs2, rs1, vl); } -vfloat64m8_t test_vfwmul_vv_f64m8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfwmul_vv_f64m8_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m8_m(vm, vs2, vs1, vl); } -vfloat64m8_t test_vfwmul_vf_f64m8_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfwmul_vf_f64m8_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m8_m(vm, vs2, rs1, vl); } -vfloat32mf2_t test_vfwmul_vv_f32mf2_rm(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32mf2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmul_vv_f32mf2_rm(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32mf2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmul_vf_f32mf2_rm(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32mf2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmul_vf_f32mf2_rm(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32mf2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmul_vv_f32m1_rm(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmul_vv_f32m1_rm(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmul_vf_f32m1_rm(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmul_vf_f32m1_rm(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmul_vv_f32m2_rm(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmul_vv_f32m2_rm(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmul_vf_f32m2_rm(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmul_vf_f32m2_rm(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmul_vv_f32m4_rm(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmul_vv_f32m4_rm(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmul_vf_f32m4_rm(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmul_vf_f32m4_rm(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmul_vv_f32m8_rm(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmul_vv_f32m8_rm(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmul_vf_f32m8_rm(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmul_vf_f32m8_rm(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmul_vv_f64m1_rm(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmul_vv_f64m1_rm(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmul_vf_f64m1_rm(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmul_vf_f64m1_rm(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmul_vv_f64m2_rm(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmul_vv_f64m2_rm(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmul_vf_f64m2_rm(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmul_vf_f64m2_rm(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmul_vv_f64m4_rm(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmul_vv_f64m4_rm(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmul_vf_f64m4_rm(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmul_vf_f64m4_rm(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmul_vv_f64m8_rm(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmul_vv_f64m8_rm(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmul_vf_f64m8_rm(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmul_vf_f64m8_rm(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmul_vv_f32mf2_rm_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32mf2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmul_vv_f32mf2_rm_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32mf2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmul_vf_f32mf2_rm_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32mf2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmul_vf_f32mf2_rm_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32mf2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmul_vv_f32m1_rm_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmul_vv_f32m1_rm_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmul_vf_f32m1_rm_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmul_vf_f32m1_rm_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmul_vv_f32m2_rm_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmul_vv_f32m2_rm_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmul_vf_f32m2_rm_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmul_vf_f32m2_rm_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmul_vv_f32m4_rm_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmul_vv_f32m4_rm_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmul_vf_f32m4_rm_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmul_vf_f32m4_rm_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmul_vv_f32m8_rm_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmul_vv_f32m8_rm_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmul_vf_f32m8_rm_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmul_vf_f32m8_rm_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmul_vv_f64m1_rm_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmul_vv_f64m1_rm_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmul_vf_f64m1_rm_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmul_vf_f64m1_rm_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmul_vv_f64m2_rm_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmul_vv_f64m2_rm_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmul_vf_f64m2_rm_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmul_vf_f64m2_rm_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmul_vv_f64m4_rm_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmul_vv_f64m4_rm_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmul_vf_f64m4_rm_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmul_vf_f64m4_rm_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmul_vv_f64m8_rm_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmul_vv_f64m8_rm_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmul_vf_f64m8_rm_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmul_vf_f64m8_rm_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfwmul\.[ivxfswum.]+\s+} 72 } } */ diff --git a/auto-generated/gnu-api-tests/vfwnmacc.c b/auto-generated/gnu-api-tests/vfwnmacc.c index ce2d8d623..fde94e615 100644 --- a/auto-generated/gnu-api-tests/vfwnmacc.c +++ b/auto-generated/gnu-api-tests/vfwnmacc.c @@ -78,76 +78,76 @@ vfloat64m8_t test_vfwnmacc_vf_f64m8(vfloat64m8_t vd, float32_t vs1, vfloat32m4_t return __riscv_vfwnmacc_vf_f64m8(vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32mf2_m(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwnmacc_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f32mf2_m(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32mf2_m(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwnmacc_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f32mf2_m(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32m1_m(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwnmacc_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f32m1_m(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32m1_m(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwnmacc_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f32m1_m(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32m2_m(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwnmacc_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f32m2_m(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32m2_m(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwnmacc_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f32m2_m(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32m4_m(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwnmacc_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f32m4_m(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32m4_m(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwnmacc_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f32m4_m(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32m8_m(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwnmacc_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f32m8_m(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32m8_m(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwnmacc_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f32m8_m(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f64m1_m(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwnmacc_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f64m1_m(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f64m1_m(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwnmacc_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f64m1_m(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f64m2_m(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwnmacc_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f64m2_m(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f64m2_m(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwnmacc_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f64m2_m(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f64m4_m(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwnmacc_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f64m4_m(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f64m4_m(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwnmacc_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f64m4_m(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f64m8_m(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwnmacc_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f64m8_m(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f64m8_m(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwnmacc_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f64m8_m(vm, vd, vs1, vs2, vl); } vfloat32mf2_t test_vfwnmacc_vv_f32mf2_rm(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -222,76 +222,76 @@ vfloat64m8_t test_vfwnmacc_vf_f64m8_rm(vfloat64m8_t vd, float32_t vs1, vfloat32m return __riscv_vfwnmacc_vf_f64m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwnmacc_vv_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32mf2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwnmacc_vv_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f32mf2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwnmacc_vf_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32mf2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwnmacc_vf_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f32mf2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwnmacc_vv_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32m1_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwnmacc_vv_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f32m1_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwnmacc_vf_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32m1_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwnmacc_vf_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f32m1_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwnmacc_vv_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32m2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwnmacc_vv_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f32m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwnmacc_vf_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32m2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwnmacc_vf_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f32m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwnmacc_vv_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32m4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwnmacc_vv_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f32m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwnmacc_vf_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32m4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwnmacc_vf_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f32m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwnmacc_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwnmacc_vv_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f32m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwnmacc_vf_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwnmacc_vf_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f32m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwnmacc_vv_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f64m1_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwnmacc_vv_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f64m1_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwnmacc_vf_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f64m1_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwnmacc_vf_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f64m1_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwnmacc_vv_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f64m2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwnmacc_vv_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f64m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwnmacc_vf_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f64m2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwnmacc_vf_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f64m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwnmacc_vv_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f64m4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwnmacc_vv_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f64m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwnmacc_vf_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f64m4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwnmacc_vf_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f64m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwnmacc_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f64m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwnmacc_vv_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f64m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwnmacc_vf_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f64m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwnmacc_vf_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f64m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfwnmacc\.[ivxfswum.]+\s+} 72 } } */ diff --git a/auto-generated/gnu-api-tests/vfwnmsac.c b/auto-generated/gnu-api-tests/vfwnmsac.c index 6f8ef08e7..ed3f58737 100644 --- a/auto-generated/gnu-api-tests/vfwnmsac.c +++ b/auto-generated/gnu-api-tests/vfwnmsac.c @@ -78,76 +78,76 @@ vfloat64m8_t test_vfwnmsac_vf_f64m8(vfloat64m8_t vd, float32_t vs1, vfloat32m4_t return __riscv_vfwnmsac_vf_f64m8(vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32mf2_m(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwnmsac_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f32mf2_m(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32mf2_m(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwnmsac_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f32mf2_m(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32m1_m(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwnmsac_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f32m1_m(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32m1_m(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwnmsac_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f32m1_m(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32m2_m(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwnmsac_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f32m2_m(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32m2_m(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwnmsac_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f32m2_m(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32m4_m(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwnmsac_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f32m4_m(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32m4_m(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwnmsac_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f32m4_m(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32m8_m(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwnmsac_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f32m8_m(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32m8_m(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwnmsac_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f32m8_m(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f64m1_m(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwnmsac_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f64m1_m(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f64m1_m(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwnmsac_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f64m1_m(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f64m2_m(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwnmsac_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f64m2_m(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f64m2_m(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwnmsac_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f64m2_m(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f64m4_m(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwnmsac_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f64m4_m(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f64m4_m(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwnmsac_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f64m4_m(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f64m8_m(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwnmsac_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f64m8_m(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f64m8_m(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwnmsac_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f64m8_m(vm, vd, vs1, vs2, vl); } vfloat32mf2_t test_vfwnmsac_vv_f32mf2_rm(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -222,76 +222,76 @@ vfloat64m8_t test_vfwnmsac_vf_f64m8_rm(vfloat64m8_t vd, float32_t vs1, vfloat32m return __riscv_vfwnmsac_vf_f64m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwnmsac_vv_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32mf2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwnmsac_vv_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f32mf2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwnmsac_vf_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32mf2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwnmsac_vf_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f32mf2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwnmsac_vv_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32m1_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwnmsac_vv_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f32m1_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwnmsac_vf_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32m1_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwnmsac_vf_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f32m1_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwnmsac_vv_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32m2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwnmsac_vv_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f32m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwnmsac_vf_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32m2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwnmsac_vf_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f32m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwnmsac_vv_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32m4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwnmsac_vv_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f32m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwnmsac_vf_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32m4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwnmsac_vf_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f32m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwnmsac_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwnmsac_vv_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f32m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwnmsac_vf_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwnmsac_vf_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f32m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwnmsac_vv_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f64m1_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwnmsac_vv_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f64m1_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwnmsac_vf_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f64m1_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwnmsac_vf_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f64m1_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwnmsac_vv_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f64m2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwnmsac_vv_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f64m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwnmsac_vf_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f64m2_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwnmsac_vf_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f64m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwnmsac_vv_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f64m4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwnmsac_vv_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f64m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwnmsac_vf_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f64m4_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwnmsac_vf_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f64m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwnmsac_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f64m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwnmsac_vv_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f64m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwnmsac_vf_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f64m8_rm_m(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwnmsac_vf_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f64m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfwnmsac\.[ivxfswum.]+\s+} 72 } } */ diff --git a/auto-generated/gnu-api-tests/vfwredosum.c b/auto-generated/gnu-api-tests/vfwredosum.c index 86f8b0db2..df29de2c1 100644 --- a/auto-generated/gnu-api-tests/vfwredosum.c +++ b/auto-generated/gnu-api-tests/vfwredosum.c @@ -6,180 +6,180 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1(vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16mf4_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1(vfloat16mf4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16mf4_f32m1(vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1(vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16mf2_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1(vfloat16mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16mf2_f32m1(vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1(vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16m1_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1(vfloat16m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16m1_f32m1(vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1(vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16m2_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1(vfloat16m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16m2_f32m1(vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1(vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16m4_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1(vfloat16m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16m4_f32m1(vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1(vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16m8_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1(vfloat16m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16m8_f32m1(vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1(vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32mf2_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1(vfloat32mf2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f32mf2_f64m1(vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1(vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32m1_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1(vfloat32m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f32m1_f64m1(vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1(vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32m2_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1(vfloat32m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f32m2_f64m1(vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1(vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32m4_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1(vfloat32m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f32m4_f64m1(vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1(vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32m8_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1(vfloat32m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f32m8_f64m1(vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_m(vbool64_t mask, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16mf4_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16mf4_f32m1_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_m(vbool32_t mask, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16mf2_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16mf2_f32m1_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16m1_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_m(vbool16_t vm, vfloat16m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16m1_f32m1_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16m2_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_m(vbool8_t vm, vfloat16m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16m2_f32m1_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16m4_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_m(vbool4_t vm, vfloat16m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16m4_f32m1_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16m8_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_m(vbool2_t vm, vfloat16m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16m8_f32m1_m(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32mf2_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f32mf2_f64m1_m(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32m1_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_m(vbool32_t vm, vfloat32m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f32m1_f64m1_m(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32m2_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_m(vbool16_t vm, vfloat32m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f32m2_f64m1_m(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32m4_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_m(vbool8_t vm, vfloat32m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f32m4_f64m1_m(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32m8_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_m(vbool4_t vm, vfloat32m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f32m8_f64m1_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_rm(vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16mf4_f32m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_rm(vfloat16mf4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16mf4_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_rm(vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16mf2_f32m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_rm(vfloat16mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16mf2_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_rm(vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16m1_f32m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_rm(vfloat16m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16m1_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_rm(vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16m2_f32m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_rm(vfloat16m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16m2_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_rm(vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16m4_f32m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_rm(vfloat16m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16m4_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_rm(vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16m8_f32m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_rm(vfloat16m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16m8_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_rm(vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32mf2_f64m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_rm(vfloat32mf2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f32mf2_f64m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_rm(vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32m1_f64m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_rm(vfloat32m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f32m1_f64m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_rm(vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32m2_f64m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_rm(vfloat32m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f32m2_f64m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_rm(vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32m4_f64m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_rm(vfloat32m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f32m4_f64m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_rm(vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32m8_f64m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_rm(vfloat32m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f32m8_f64m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_rm_m(vbool64_t mask, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16mf4_f32m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_rm_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16mf4_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_rm_m(vbool32_t mask, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16mf2_f32m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_rm_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16mf2_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_rm_m(vbool16_t mask, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16m1_f32m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_rm_m(vbool16_t vm, vfloat16m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16m1_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_rm_m(vbool8_t mask, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16m2_f32m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_rm_m(vbool8_t vm, vfloat16m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16m2_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_rm_m(vbool4_t mask, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16m4_f32m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_rm_m(vbool4_t vm, vfloat16m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16m4_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_rm_m(vbool2_t mask, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16m8_f32m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_rm_m(vbool2_t vm, vfloat16m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16m8_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_rm_m(vbool64_t mask, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32mf2_f64m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_rm_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f32mf2_f64m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_rm_m(vbool32_t mask, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32m1_f64m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f32m1_f64m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_rm_m(vbool16_t mask, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32m2_f64m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_rm_m(vbool16_t vm, vfloat32m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f32m2_f64m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_rm_m(vbool8_t mask, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32m4_f64m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_rm_m(vbool8_t vm, vfloat32m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f32m4_f64m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_rm_m(vbool4_t mask, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32m8_f64m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_rm_m(vbool4_t vm, vfloat32m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f32m8_f64m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfwredosum\.[ivxfswum.]+\s+} 44 } } */ diff --git a/auto-generated/gnu-api-tests/vfwredusum.c b/auto-generated/gnu-api-tests/vfwredusum.c index 9103cdbda..953b8e6fb 100644 --- a/auto-generated/gnu-api-tests/vfwredusum.c +++ b/auto-generated/gnu-api-tests/vfwredusum.c @@ -6,180 +6,180 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1(vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16mf4_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1(vfloat16mf4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16mf4_f32m1(vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1(vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16mf2_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1(vfloat16mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16mf2_f32m1(vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1(vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16m1_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1(vfloat16m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16m1_f32m1(vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1(vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16m2_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1(vfloat16m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16m2_f32m1(vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1(vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16m4_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1(vfloat16m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16m4_f32m1(vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1(vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16m8_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1(vfloat16m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16m8_f32m1(vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1(vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f32mf2_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1(vfloat32mf2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f32mf2_f64m1(vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1(vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f32m1_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1(vfloat32m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f32m1_f64m1(vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1(vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f32m2_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1(vfloat32m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f32m2_f64m1(vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1(vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f32m4_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1(vfloat32m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f32m4_f64m1(vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1(vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f32m8_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1(vfloat32m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f32m8_f64m1(vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_m(vbool64_t mask, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16mf4_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16mf4_f32m1_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_m(vbool32_t mask, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16mf2_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16mf2_f32m1_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16m1_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_m(vbool16_t vm, vfloat16m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16m1_f32m1_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16m2_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_m(vbool8_t vm, vfloat16m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16m2_f32m1_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16m4_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_m(vbool4_t vm, vfloat16m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16m4_f32m1_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16m8_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_m(vbool2_t vm, vfloat16m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16m8_f32m1_m(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f32mf2_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f32mf2_f64m1_m(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f32m1_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_m(vbool32_t vm, vfloat32m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f32m1_f64m1_m(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f32m2_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_m(vbool16_t vm, vfloat32m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f32m2_f64m1_m(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f32m4_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_m(vbool8_t vm, vfloat32m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f32m4_f64m1_m(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f32m8_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_m(vbool4_t vm, vfloat32m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f32m8_f64m1_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_rm(vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16mf4_f32m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_rm(vfloat16mf4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16mf4_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_rm(vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16mf2_f32m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_rm(vfloat16mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16mf2_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_rm(vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16m1_f32m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_rm(vfloat16m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16m1_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_rm(vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16m2_f32m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_rm(vfloat16m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16m2_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_rm(vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16m4_f32m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_rm(vfloat16m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16m4_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_rm(vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16m8_f32m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_rm(vfloat16m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16m8_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_rm(vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f32mf2_f64m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_rm(vfloat32mf2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f32mf2_f64m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_rm(vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f32m1_f64m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_rm(vfloat32m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f32m1_f64m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_rm(vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f32m2_f64m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_rm(vfloat32m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f32m2_f64m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_rm(vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f32m4_f64m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_rm(vfloat32m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f32m4_f64m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_rm(vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f32m8_f64m1_rm(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_rm(vfloat32m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f32m8_f64m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_rm_m(vbool64_t mask, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16mf4_f32m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_rm_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16mf4_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_rm_m(vbool32_t mask, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16mf2_f32m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_rm_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16mf2_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_rm_m(vbool16_t mask, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16m1_f32m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_rm_m(vbool16_t vm, vfloat16m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16m1_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_rm_m(vbool8_t mask, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16m2_f32m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_rm_m(vbool8_t vm, vfloat16m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16m2_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_rm_m(vbool4_t mask, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16m4_f32m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_rm_m(vbool4_t vm, vfloat16m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16m4_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_rm_m(vbool2_t mask, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16m8_f32m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_rm_m(vbool2_t vm, vfloat16m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16m8_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_rm_m(vbool64_t mask, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f32mf2_f64m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_rm_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f32mf2_f64m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_rm_m(vbool32_t mask, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f32m1_f64m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f32m1_f64m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_rm_m(vbool16_t mask, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f32m2_f64m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_rm_m(vbool16_t vm, vfloat32m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f32m2_f64m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_rm_m(vbool8_t mask, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f32m4_f64m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_rm_m(vbool8_t vm, vfloat32m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f32m4_f64m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_rm_m(vbool4_t mask, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f32m8_f64m1_rm_m(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_rm_m(vbool4_t vm, vfloat32m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f32m8_f64m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfwredusum\.[ivxfswum.]+\s+} 44 } } */ diff --git a/auto-generated/gnu-api-tests/vfwsub.c b/auto-generated/gnu-api-tests/vfwsub.c index af898d36d..426b44bcf 100644 --- a/auto-generated/gnu-api-tests/vfwsub.c +++ b/auto-generated/gnu-api-tests/vfwsub.c @@ -6,580 +6,580 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2_t test_vfwsub_vv_f32mf2(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32mf2(op1, op2, vl); +vfloat32mf2_t test_vfwsub_vv_f32mf2(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32mf2(vs2, vs1, vl); } -vfloat32mf2_t test_vfwsub_vf_f32mf2(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32mf2(op1, op2, vl); +vfloat32mf2_t test_vfwsub_vf_f32mf2(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32mf2(vs2, rs1, vl); } -vfloat32mf2_t test_vfwsub_wv_f32mf2(vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32mf2(op1, op2, vl); +vfloat32mf2_t test_vfwsub_wv_f32mf2(vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32mf2(vs2, vs1, vl); } -vfloat32mf2_t test_vfwsub_wf_f32mf2(vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32mf2(op1, op2, vl); +vfloat32mf2_t test_vfwsub_wf_f32mf2(vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32mf2(vs2, rs1, vl); } -vfloat32m1_t test_vfwsub_vv_f32m1(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m1(op1, op2, vl); +vfloat32m1_t test_vfwsub_vv_f32m1(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m1(vs2, vs1, vl); } -vfloat32m1_t test_vfwsub_vf_f32m1(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m1(op1, op2, vl); +vfloat32m1_t test_vfwsub_vf_f32m1(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m1(vs2, rs1, vl); } -vfloat32m1_t test_vfwsub_wv_f32m1(vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m1(op1, op2, vl); +vfloat32m1_t test_vfwsub_wv_f32m1(vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m1(vs2, vs1, vl); } -vfloat32m1_t test_vfwsub_wf_f32m1(vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m1(op1, op2, vl); +vfloat32m1_t test_vfwsub_wf_f32m1(vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m1(vs2, rs1, vl); } -vfloat32m2_t test_vfwsub_vv_f32m2(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m2(op1, op2, vl); +vfloat32m2_t test_vfwsub_vv_f32m2(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m2(vs2, vs1, vl); } -vfloat32m2_t test_vfwsub_vf_f32m2(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m2(op1, op2, vl); +vfloat32m2_t test_vfwsub_vf_f32m2(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m2(vs2, rs1, vl); } -vfloat32m2_t test_vfwsub_wv_f32m2(vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m2(op1, op2, vl); +vfloat32m2_t test_vfwsub_wv_f32m2(vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m2(vs2, vs1, vl); } -vfloat32m2_t test_vfwsub_wf_f32m2(vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m2(op1, op2, vl); +vfloat32m2_t test_vfwsub_wf_f32m2(vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m2(vs2, rs1, vl); } -vfloat32m4_t test_vfwsub_vv_f32m4(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m4(op1, op2, vl); +vfloat32m4_t test_vfwsub_vv_f32m4(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m4(vs2, vs1, vl); } -vfloat32m4_t test_vfwsub_vf_f32m4(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m4(op1, op2, vl); +vfloat32m4_t test_vfwsub_vf_f32m4(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m4(vs2, rs1, vl); } -vfloat32m4_t test_vfwsub_wv_f32m4(vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m4(op1, op2, vl); +vfloat32m4_t test_vfwsub_wv_f32m4(vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m4(vs2, vs1, vl); } -vfloat32m4_t test_vfwsub_wf_f32m4(vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m4(op1, op2, vl); +vfloat32m4_t test_vfwsub_wf_f32m4(vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m4(vs2, rs1, vl); } -vfloat32m8_t test_vfwsub_vv_f32m8(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m8(op1, op2, vl); +vfloat32m8_t test_vfwsub_vv_f32m8(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m8(vs2, vs1, vl); } -vfloat32m8_t test_vfwsub_vf_f32m8(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m8(op1, op2, vl); +vfloat32m8_t test_vfwsub_vf_f32m8(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m8(vs2, rs1, vl); } -vfloat32m8_t test_vfwsub_wv_f32m8(vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m8(op1, op2, vl); +vfloat32m8_t test_vfwsub_wv_f32m8(vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m8(vs2, vs1, vl); } -vfloat32m8_t test_vfwsub_wf_f32m8(vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m8(op1, op2, vl); +vfloat32m8_t test_vfwsub_wf_f32m8(vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m8(vs2, rs1, vl); } -vfloat64m1_t test_vfwsub_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m1(op1, op2, vl); +vfloat64m1_t test_vfwsub_vv_f64m1(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m1(vs2, vs1, vl); } -vfloat64m1_t test_vfwsub_vf_f64m1(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m1(op1, op2, vl); +vfloat64m1_t test_vfwsub_vf_f64m1(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m1(vs2, rs1, vl); } -vfloat64m1_t test_vfwsub_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m1(op1, op2, vl); +vfloat64m1_t test_vfwsub_wv_f64m1(vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m1(vs2, vs1, vl); } -vfloat64m1_t test_vfwsub_wf_f64m1(vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m1(op1, op2, vl); +vfloat64m1_t test_vfwsub_wf_f64m1(vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m1(vs2, rs1, vl); } -vfloat64m2_t test_vfwsub_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m2(op1, op2, vl); +vfloat64m2_t test_vfwsub_vv_f64m2(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m2(vs2, vs1, vl); } -vfloat64m2_t test_vfwsub_vf_f64m2(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m2(op1, op2, vl); +vfloat64m2_t test_vfwsub_vf_f64m2(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m2(vs2, rs1, vl); } -vfloat64m2_t test_vfwsub_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m2(op1, op2, vl); +vfloat64m2_t test_vfwsub_wv_f64m2(vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m2(vs2, vs1, vl); } -vfloat64m2_t test_vfwsub_wf_f64m2(vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m2(op1, op2, vl); +vfloat64m2_t test_vfwsub_wf_f64m2(vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m2(vs2, rs1, vl); } -vfloat64m4_t test_vfwsub_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m4(op1, op2, vl); +vfloat64m4_t test_vfwsub_vv_f64m4(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m4(vs2, vs1, vl); } -vfloat64m4_t test_vfwsub_vf_f64m4(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m4(op1, op2, vl); +vfloat64m4_t test_vfwsub_vf_f64m4(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m4(vs2, rs1, vl); } -vfloat64m4_t test_vfwsub_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m4(op1, op2, vl); +vfloat64m4_t test_vfwsub_wv_f64m4(vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m4(vs2, vs1, vl); } -vfloat64m4_t test_vfwsub_wf_f64m4(vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m4(op1, op2, vl); +vfloat64m4_t test_vfwsub_wf_f64m4(vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m4(vs2, rs1, vl); } -vfloat64m8_t test_vfwsub_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m8(op1, op2, vl); +vfloat64m8_t test_vfwsub_vv_f64m8(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m8(vs2, vs1, vl); } -vfloat64m8_t test_vfwsub_vf_f64m8(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m8(op1, op2, vl); +vfloat64m8_t test_vfwsub_vf_f64m8(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m8(vs2, rs1, vl); } -vfloat64m8_t test_vfwsub_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m8(op1, op2, vl); +vfloat64m8_t test_vfwsub_wv_f64m8(vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m8(vs2, vs1, vl); } -vfloat64m8_t test_vfwsub_wf_f64m8(vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m8(op1, op2, vl); +vfloat64m8_t test_vfwsub_wf_f64m8(vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m8(vs2, rs1, vl); } -vfloat32mf2_t test_vfwsub_vv_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32mf2_m(mask, op1, op2, vl); +vfloat32mf2_t test_vfwsub_vv_f32mf2_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32mf2_m(vm, vs2, vs1, vl); } -vfloat32mf2_t test_vfwsub_vf_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32mf2_m(mask, op1, op2, vl); +vfloat32mf2_t test_vfwsub_vf_f32mf2_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32mf2_m(vm, vs2, rs1, vl); } -vfloat32mf2_t test_vfwsub_wv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32mf2_m(mask, op1, op2, vl); +vfloat32mf2_t test_vfwsub_wv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32mf2_m(vm, vs2, vs1, vl); } -vfloat32mf2_t test_vfwsub_wf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32mf2_m(mask, op1, op2, vl); +vfloat32mf2_t test_vfwsub_wf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32mf2_m(vm, vs2, rs1, vl); } -vfloat32m1_t test_vfwsub_vv_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfwsub_vv_f32m1_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m1_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfwsub_vf_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfwsub_vf_f32m1_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m1_m(vm, vs2, rs1, vl); } -vfloat32m1_t test_vfwsub_wv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfwsub_wv_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m1_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfwsub_wf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfwsub_wf_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m1_m(vm, vs2, rs1, vl); } -vfloat32m2_t test_vfwsub_vv_f32m2_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfwsub_vv_f32m2_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m2_m(vm, vs2, vs1, vl); } -vfloat32m2_t test_vfwsub_vf_f32m2_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfwsub_vf_f32m2_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m2_m(vm, vs2, rs1, vl); } -vfloat32m2_t test_vfwsub_wv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfwsub_wv_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m2_m(vm, vs2, vs1, vl); } -vfloat32m2_t test_vfwsub_wf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfwsub_wf_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m2_m(vm, vs2, rs1, vl); } -vfloat32m4_t test_vfwsub_vv_f32m4_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfwsub_vv_f32m4_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m4_m(vm, vs2, vs1, vl); } -vfloat32m4_t test_vfwsub_vf_f32m4_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfwsub_vf_f32m4_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m4_m(vm, vs2, rs1, vl); } -vfloat32m4_t test_vfwsub_wv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfwsub_wv_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m4_m(vm, vs2, vs1, vl); } -vfloat32m4_t test_vfwsub_wf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfwsub_wf_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m4_m(vm, vs2, rs1, vl); } -vfloat32m8_t test_vfwsub_vv_f32m8_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfwsub_vv_f32m8_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m8_m(vm, vs2, vs1, vl); } -vfloat32m8_t test_vfwsub_vf_f32m8_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfwsub_vf_f32m8_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m8_m(vm, vs2, rs1, vl); } -vfloat32m8_t test_vfwsub_wv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfwsub_wv_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m8_m(vm, vs2, vs1, vl); } -vfloat32m8_t test_vfwsub_wf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfwsub_wf_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m8_m(vm, vs2, rs1, vl); } -vfloat64m1_t test_vfwsub_vv_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfwsub_vv_f64m1_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m1_m(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfwsub_vf_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfwsub_vf_f64m1_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m1_m(vm, vs2, rs1, vl); } -vfloat64m1_t test_vfwsub_wv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfwsub_wv_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m1_m(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfwsub_wf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfwsub_wf_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m1_m(vm, vs2, rs1, vl); } -vfloat64m2_t test_vfwsub_vv_f64m2_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfwsub_vv_f64m2_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m2_m(vm, vs2, vs1, vl); } -vfloat64m2_t test_vfwsub_vf_f64m2_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfwsub_vf_f64m2_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m2_m(vm, vs2, rs1, vl); } -vfloat64m2_t test_vfwsub_wv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfwsub_wv_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m2_m(vm, vs2, vs1, vl); } -vfloat64m2_t test_vfwsub_wf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfwsub_wf_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m2_m(vm, vs2, rs1, vl); } -vfloat64m4_t test_vfwsub_vv_f64m4_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfwsub_vv_f64m4_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m4_m(vm, vs2, vs1, vl); } -vfloat64m4_t test_vfwsub_vf_f64m4_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfwsub_vf_f64m4_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m4_m(vm, vs2, rs1, vl); } -vfloat64m4_t test_vfwsub_wv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfwsub_wv_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m4_m(vm, vs2, vs1, vl); } -vfloat64m4_t test_vfwsub_wf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfwsub_wf_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m4_m(vm, vs2, rs1, vl); } -vfloat64m8_t test_vfwsub_vv_f64m8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfwsub_vv_f64m8_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m8_m(vm, vs2, vs1, vl); } -vfloat64m8_t test_vfwsub_vf_f64m8_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfwsub_vf_f64m8_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m8_m(vm, vs2, rs1, vl); } -vfloat64m8_t test_vfwsub_wv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfwsub_wv_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m8_m(vm, vs2, vs1, vl); } -vfloat64m8_t test_vfwsub_wf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfwsub_wf_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m8_m(vm, vs2, rs1, vl); } -vfloat32mf2_t test_vfwsub_vv_f32mf2_rm(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32mf2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_vv_f32mf2_rm(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32mf2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_vf_f32mf2_rm(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32mf2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_vf_f32mf2_rm(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32mf2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_wv_f32mf2_rm(vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32mf2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_wv_f32mf2_rm(vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32mf2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_wf_f32mf2_rm(vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32mf2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_wf_f32mf2_rm(vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32mf2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_vv_f32m1_rm(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_vv_f32m1_rm(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_vf_f32m1_rm(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_vf_f32m1_rm(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_wv_f32m1_rm(vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_wv_f32m1_rm(vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_wf_f32m1_rm(vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_wf_f32m1_rm(vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_vv_f32m2_rm(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_vv_f32m2_rm(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_vf_f32m2_rm(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_vf_f32m2_rm(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_wv_f32m2_rm(vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_wv_f32m2_rm(vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_wf_f32m2_rm(vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_wf_f32m2_rm(vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_vv_f32m4_rm(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_vv_f32m4_rm(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_vf_f32m4_rm(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_vf_f32m4_rm(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_wv_f32m4_rm(vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_wv_f32m4_rm(vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_wf_f32m4_rm(vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_wf_f32m4_rm(vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_vv_f32m8_rm(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_vv_f32m8_rm(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_vf_f32m8_rm(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_vf_f32m8_rm(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_wv_f32m8_rm(vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_wv_f32m8_rm(vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_wf_f32m8_rm(vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_wf_f32m8_rm(vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_vv_f64m1_rm(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_vv_f64m1_rm(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_vf_f64m1_rm(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_vf_f64m1_rm(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_wv_f64m1_rm(vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_wv_f64m1_rm(vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_wf_f64m1_rm(vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m1_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_wf_f64m1_rm(vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_vv_f64m2_rm(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_vv_f64m2_rm(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_vf_f64m2_rm(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_vf_f64m2_rm(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_wv_f64m2_rm(vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_wv_f64m2_rm(vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_wf_f64m2_rm(vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m2_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_wf_f64m2_rm(vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_vv_f64m4_rm(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_vv_f64m4_rm(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_vf_f64m4_rm(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_vf_f64m4_rm(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_wv_f64m4_rm(vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_wv_f64m4_rm(vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_wf_f64m4_rm(vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m4_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_wf_f64m4_rm(vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_vv_f64m8_rm(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_vv_f64m8_rm(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_vf_f64m8_rm(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_vf_f64m8_rm(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_wv_f64m8_rm(vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_wv_f64m8_rm(vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_wf_f64m8_rm(vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m8_rm(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_wf_f64m8_rm(vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_vv_f32mf2_rm_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32mf2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_vv_f32mf2_rm_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32mf2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_vf_f32mf2_rm_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32mf2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_vf_f32mf2_rm_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32mf2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_wv_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32mf2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_wv_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32mf2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_wf_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32mf2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_wf_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32mf2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_vv_f32m1_rm_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_vv_f32m1_rm_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_vf_f32m1_rm_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_vf_f32m1_rm_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_wv_f32m1_rm_m(vbool32_t mask, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_wv_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_wf_f32m1_rm_m(vbool32_t mask, vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_wf_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_vv_f32m2_rm_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_vv_f32m2_rm_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_vf_f32m2_rm_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_vf_f32m2_rm_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_wv_f32m2_rm_m(vbool16_t mask, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_wv_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_wf_f32m2_rm_m(vbool16_t mask, vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_wf_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_vv_f32m4_rm_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_vv_f32m4_rm_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_vf_f32m4_rm_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_vf_f32m4_rm_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_wv_f32m4_rm_m(vbool8_t mask, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_wv_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_wf_f32m4_rm_m(vbool8_t mask, vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_wf_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_vv_f32m8_rm_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_vv_f32m8_rm_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_vf_f32m8_rm_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_vf_f32m8_rm_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_wv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_wv_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_wf_f32m8_rm_m(vbool4_t mask, vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_wf_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_vv_f64m1_rm_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_vv_f64m1_rm_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_vf_f64m1_rm_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_vf_f64m1_rm_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_wv_f64m1_rm_m(vbool64_t mask, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_wv_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_wf_f64m1_rm_m(vbool64_t mask, vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_wf_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_vv_f64m2_rm_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_vv_f64m2_rm_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_vf_f64m2_rm_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_vf_f64m2_rm_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_wv_f64m2_rm_m(vbool32_t mask, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_wv_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_wf_f64m2_rm_m(vbool32_t mask, vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_wf_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_vv_f64m4_rm_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_vv_f64m4_rm_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_vf_f64m4_rm_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_vf_f64m4_rm_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_wv_f64m4_rm_m(vbool16_t mask, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_wv_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_wf_f64m4_rm_m(vbool16_t mask, vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_wf_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_vv_f64m8_rm_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_vv_f64m8_rm_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_vf_f64m8_rm_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_vf_f64m8_rm_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_wv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_wv_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_wf_f64m8_rm_m(vbool8_t mask, vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_wf_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfwsub\.[ivxfswum.]+\s+} 144 } } */ diff --git a/auto-generated/gnu-api-tests/vid.c b/auto-generated/gnu-api-tests/vid.c index e77749046..224569d81 100644 --- a/auto-generated/gnu-api-tests/vid.c +++ b/auto-generated/gnu-api-tests/vid.c @@ -94,92 +94,92 @@ vuint64m8_t test_vid_v_u64m8(size_t vl) { return __riscv_vid_v_u64m8(vl); } -vuint8mf8_t test_vid_v_u8mf8_m(vbool64_t mask, size_t vl) { - return __riscv_vid_v_u8mf8_m(mask, vl); +vuint8mf8_t test_vid_v_u8mf8_m(vbool64_t vm, size_t vl) { + return __riscv_vid_v_u8mf8_m(vm, vl); } -vuint8mf4_t test_vid_v_u8mf4_m(vbool32_t mask, size_t vl) { - return __riscv_vid_v_u8mf4_m(mask, vl); +vuint8mf4_t test_vid_v_u8mf4_m(vbool32_t vm, size_t vl) { + return __riscv_vid_v_u8mf4_m(vm, vl); } -vuint8mf2_t test_vid_v_u8mf2_m(vbool16_t mask, size_t vl) { - return __riscv_vid_v_u8mf2_m(mask, vl); +vuint8mf2_t test_vid_v_u8mf2_m(vbool16_t vm, size_t vl) { + return __riscv_vid_v_u8mf2_m(vm, vl); } -vuint8m1_t test_vid_v_u8m1_m(vbool8_t mask, size_t vl) { - return __riscv_vid_v_u8m1_m(mask, vl); +vuint8m1_t test_vid_v_u8m1_m(vbool8_t vm, size_t vl) { + return __riscv_vid_v_u8m1_m(vm, vl); } -vuint8m2_t test_vid_v_u8m2_m(vbool4_t mask, size_t vl) { - return __riscv_vid_v_u8m2_m(mask, vl); +vuint8m2_t test_vid_v_u8m2_m(vbool4_t vm, size_t vl) { + return __riscv_vid_v_u8m2_m(vm, vl); } -vuint8m4_t test_vid_v_u8m4_m(vbool2_t mask, size_t vl) { - return __riscv_vid_v_u8m4_m(mask, vl); +vuint8m4_t test_vid_v_u8m4_m(vbool2_t vm, size_t vl) { + return __riscv_vid_v_u8m4_m(vm, vl); } -vuint8m8_t test_vid_v_u8m8_m(vbool1_t mask, size_t vl) { - return __riscv_vid_v_u8m8_m(mask, vl); +vuint8m8_t test_vid_v_u8m8_m(vbool1_t vm, size_t vl) { + return __riscv_vid_v_u8m8_m(vm, vl); } -vuint16mf4_t test_vid_v_u16mf4_m(vbool64_t mask, size_t vl) { - return __riscv_vid_v_u16mf4_m(mask, vl); +vuint16mf4_t test_vid_v_u16mf4_m(vbool64_t vm, size_t vl) { + return __riscv_vid_v_u16mf4_m(vm, vl); } -vuint16mf2_t test_vid_v_u16mf2_m(vbool32_t mask, size_t vl) { - return __riscv_vid_v_u16mf2_m(mask, vl); +vuint16mf2_t test_vid_v_u16mf2_m(vbool32_t vm, size_t vl) { + return __riscv_vid_v_u16mf2_m(vm, vl); } -vuint16m1_t test_vid_v_u16m1_m(vbool16_t mask, size_t vl) { - return __riscv_vid_v_u16m1_m(mask, vl); +vuint16m1_t test_vid_v_u16m1_m(vbool16_t vm, size_t vl) { + return __riscv_vid_v_u16m1_m(vm, vl); } -vuint16m2_t test_vid_v_u16m2_m(vbool8_t mask, size_t vl) { - return __riscv_vid_v_u16m2_m(mask, vl); +vuint16m2_t test_vid_v_u16m2_m(vbool8_t vm, size_t vl) { + return __riscv_vid_v_u16m2_m(vm, vl); } -vuint16m4_t test_vid_v_u16m4_m(vbool4_t mask, size_t vl) { - return __riscv_vid_v_u16m4_m(mask, vl); +vuint16m4_t test_vid_v_u16m4_m(vbool4_t vm, size_t vl) { + return __riscv_vid_v_u16m4_m(vm, vl); } -vuint16m8_t test_vid_v_u16m8_m(vbool2_t mask, size_t vl) { - return __riscv_vid_v_u16m8_m(mask, vl); +vuint16m8_t test_vid_v_u16m8_m(vbool2_t vm, size_t vl) { + return __riscv_vid_v_u16m8_m(vm, vl); } -vuint32mf2_t test_vid_v_u32mf2_m(vbool64_t mask, size_t vl) { - return __riscv_vid_v_u32mf2_m(mask, vl); +vuint32mf2_t test_vid_v_u32mf2_m(vbool64_t vm, size_t vl) { + return __riscv_vid_v_u32mf2_m(vm, vl); } -vuint32m1_t test_vid_v_u32m1_m(vbool32_t mask, size_t vl) { - return __riscv_vid_v_u32m1_m(mask, vl); +vuint32m1_t test_vid_v_u32m1_m(vbool32_t vm, size_t vl) { + return __riscv_vid_v_u32m1_m(vm, vl); } -vuint32m2_t test_vid_v_u32m2_m(vbool16_t mask, size_t vl) { - return __riscv_vid_v_u32m2_m(mask, vl); +vuint32m2_t test_vid_v_u32m2_m(vbool16_t vm, size_t vl) { + return __riscv_vid_v_u32m2_m(vm, vl); } -vuint32m4_t test_vid_v_u32m4_m(vbool8_t mask, size_t vl) { - return __riscv_vid_v_u32m4_m(mask, vl); +vuint32m4_t test_vid_v_u32m4_m(vbool8_t vm, size_t vl) { + return __riscv_vid_v_u32m4_m(vm, vl); } -vuint32m8_t test_vid_v_u32m8_m(vbool4_t mask, size_t vl) { - return __riscv_vid_v_u32m8_m(mask, vl); +vuint32m8_t test_vid_v_u32m8_m(vbool4_t vm, size_t vl) { + return __riscv_vid_v_u32m8_m(vm, vl); } -vuint64m1_t test_vid_v_u64m1_m(vbool64_t mask, size_t vl) { - return __riscv_vid_v_u64m1_m(mask, vl); +vuint64m1_t test_vid_v_u64m1_m(vbool64_t vm, size_t vl) { + return __riscv_vid_v_u64m1_m(vm, vl); } -vuint64m2_t test_vid_v_u64m2_m(vbool32_t mask, size_t vl) { - return __riscv_vid_v_u64m2_m(mask, vl); +vuint64m2_t test_vid_v_u64m2_m(vbool32_t vm, size_t vl) { + return __riscv_vid_v_u64m2_m(vm, vl); } -vuint64m4_t test_vid_v_u64m4_m(vbool16_t mask, size_t vl) { - return __riscv_vid_v_u64m4_m(mask, vl); +vuint64m4_t test_vid_v_u64m4_m(vbool16_t vm, size_t vl) { + return __riscv_vid_v_u64m4_m(vm, vl); } -vuint64m8_t test_vid_v_u64m8_m(vbool8_t mask, size_t vl) { - return __riscv_vid_v_u64m8_m(mask, vl); +vuint64m8_t test_vid_v_u64m8_m(vbool8_t vm, size_t vl) { + return __riscv_vid_v_u64m8_m(vm, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vid\.[ivxfswum.]+\s+} 44 } } */ diff --git a/auto-generated/gnu-api-tests/viota.c b/auto-generated/gnu-api-tests/viota.c index 0d9daf68a..667af6090 100644 --- a/auto-generated/gnu-api-tests/viota.c +++ b/auto-generated/gnu-api-tests/viota.c @@ -6,180 +6,180 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_viota_m_u8mf8(vbool64_t op1, size_t vl) { - return __riscv_viota_m_u8mf8(op1, vl); +vuint8mf8_t test_viota_m_u8mf8(vbool64_t vs2, size_t vl) { + return __riscv_viota_m_u8mf8(vs2, vl); } -vuint8mf4_t test_viota_m_u8mf4(vbool32_t op1, size_t vl) { - return __riscv_viota_m_u8mf4(op1, vl); +vuint8mf4_t test_viota_m_u8mf4(vbool32_t vs2, size_t vl) { + return __riscv_viota_m_u8mf4(vs2, vl); } -vuint8mf2_t test_viota_m_u8mf2(vbool16_t op1, size_t vl) { - return __riscv_viota_m_u8mf2(op1, vl); +vuint8mf2_t test_viota_m_u8mf2(vbool16_t vs2, size_t vl) { + return __riscv_viota_m_u8mf2(vs2, vl); } -vuint8m1_t test_viota_m_u8m1(vbool8_t op1, size_t vl) { - return __riscv_viota_m_u8m1(op1, vl); +vuint8m1_t test_viota_m_u8m1(vbool8_t vs2, size_t vl) { + return __riscv_viota_m_u8m1(vs2, vl); } -vuint8m2_t test_viota_m_u8m2(vbool4_t op1, size_t vl) { - return __riscv_viota_m_u8m2(op1, vl); +vuint8m2_t test_viota_m_u8m2(vbool4_t vs2, size_t vl) { + return __riscv_viota_m_u8m2(vs2, vl); } -vuint8m4_t test_viota_m_u8m4(vbool2_t op1, size_t vl) { - return __riscv_viota_m_u8m4(op1, vl); +vuint8m4_t test_viota_m_u8m4(vbool2_t vs2, size_t vl) { + return __riscv_viota_m_u8m4(vs2, vl); } -vuint8m8_t test_viota_m_u8m8(vbool1_t op1, size_t vl) { - return __riscv_viota_m_u8m8(op1, vl); +vuint8m8_t test_viota_m_u8m8(vbool1_t vs2, size_t vl) { + return __riscv_viota_m_u8m8(vs2, vl); } -vuint16mf4_t test_viota_m_u16mf4(vbool64_t op1, size_t vl) { - return __riscv_viota_m_u16mf4(op1, vl); +vuint16mf4_t test_viota_m_u16mf4(vbool64_t vs2, size_t vl) { + return __riscv_viota_m_u16mf4(vs2, vl); } -vuint16mf2_t test_viota_m_u16mf2(vbool32_t op1, size_t vl) { - return __riscv_viota_m_u16mf2(op1, vl); +vuint16mf2_t test_viota_m_u16mf2(vbool32_t vs2, size_t vl) { + return __riscv_viota_m_u16mf2(vs2, vl); } -vuint16m1_t test_viota_m_u16m1(vbool16_t op1, size_t vl) { - return __riscv_viota_m_u16m1(op1, vl); +vuint16m1_t test_viota_m_u16m1(vbool16_t vs2, size_t vl) { + return __riscv_viota_m_u16m1(vs2, vl); } -vuint16m2_t test_viota_m_u16m2(vbool8_t op1, size_t vl) { - return __riscv_viota_m_u16m2(op1, vl); +vuint16m2_t test_viota_m_u16m2(vbool8_t vs2, size_t vl) { + return __riscv_viota_m_u16m2(vs2, vl); } -vuint16m4_t test_viota_m_u16m4(vbool4_t op1, size_t vl) { - return __riscv_viota_m_u16m4(op1, vl); +vuint16m4_t test_viota_m_u16m4(vbool4_t vs2, size_t vl) { + return __riscv_viota_m_u16m4(vs2, vl); } -vuint16m8_t test_viota_m_u16m8(vbool2_t op1, size_t vl) { - return __riscv_viota_m_u16m8(op1, vl); +vuint16m8_t test_viota_m_u16m8(vbool2_t vs2, size_t vl) { + return __riscv_viota_m_u16m8(vs2, vl); } -vuint32mf2_t test_viota_m_u32mf2(vbool64_t op1, size_t vl) { - return __riscv_viota_m_u32mf2(op1, vl); +vuint32mf2_t test_viota_m_u32mf2(vbool64_t vs2, size_t vl) { + return __riscv_viota_m_u32mf2(vs2, vl); } -vuint32m1_t test_viota_m_u32m1(vbool32_t op1, size_t vl) { - return __riscv_viota_m_u32m1(op1, vl); +vuint32m1_t test_viota_m_u32m1(vbool32_t vs2, size_t vl) { + return __riscv_viota_m_u32m1(vs2, vl); } -vuint32m2_t test_viota_m_u32m2(vbool16_t op1, size_t vl) { - return __riscv_viota_m_u32m2(op1, vl); +vuint32m2_t test_viota_m_u32m2(vbool16_t vs2, size_t vl) { + return __riscv_viota_m_u32m2(vs2, vl); } -vuint32m4_t test_viota_m_u32m4(vbool8_t op1, size_t vl) { - return __riscv_viota_m_u32m4(op1, vl); +vuint32m4_t test_viota_m_u32m4(vbool8_t vs2, size_t vl) { + return __riscv_viota_m_u32m4(vs2, vl); } -vuint32m8_t test_viota_m_u32m8(vbool4_t op1, size_t vl) { - return __riscv_viota_m_u32m8(op1, vl); +vuint32m8_t test_viota_m_u32m8(vbool4_t vs2, size_t vl) { + return __riscv_viota_m_u32m8(vs2, vl); } -vuint64m1_t test_viota_m_u64m1(vbool64_t op1, size_t vl) { - return __riscv_viota_m_u64m1(op1, vl); +vuint64m1_t test_viota_m_u64m1(vbool64_t vs2, size_t vl) { + return __riscv_viota_m_u64m1(vs2, vl); } -vuint64m2_t test_viota_m_u64m2(vbool32_t op1, size_t vl) { - return __riscv_viota_m_u64m2(op1, vl); +vuint64m2_t test_viota_m_u64m2(vbool32_t vs2, size_t vl) { + return __riscv_viota_m_u64m2(vs2, vl); } -vuint64m4_t test_viota_m_u64m4(vbool16_t op1, size_t vl) { - return __riscv_viota_m_u64m4(op1, vl); +vuint64m4_t test_viota_m_u64m4(vbool16_t vs2, size_t vl) { + return __riscv_viota_m_u64m4(vs2, vl); } -vuint64m8_t test_viota_m_u64m8(vbool8_t op1, size_t vl) { - return __riscv_viota_m_u64m8(op1, vl); +vuint64m8_t test_viota_m_u64m8(vbool8_t vs2, size_t vl) { + return __riscv_viota_m_u64m8(vs2, vl); } -vuint8mf8_t test_viota_m_u8mf8_m(vbool64_t mask, vbool64_t op1, size_t vl) { - return __riscv_viota_m_u8mf8_m(mask, op1, vl); +vuint8mf8_t test_viota_m_u8mf8_m(vbool64_t vm, vbool64_t vs2, size_t vl) { + return __riscv_viota_m_u8mf8_m(vm, vs2, vl); } -vuint8mf4_t test_viota_m_u8mf4_m(vbool32_t mask, vbool32_t op1, size_t vl) { - return __riscv_viota_m_u8mf4_m(mask, op1, vl); +vuint8mf4_t test_viota_m_u8mf4_m(vbool32_t vm, vbool32_t vs2, size_t vl) { + return __riscv_viota_m_u8mf4_m(vm, vs2, vl); } -vuint8mf2_t test_viota_m_u8mf2_m(vbool16_t mask, vbool16_t op1, size_t vl) { - return __riscv_viota_m_u8mf2_m(mask, op1, vl); +vuint8mf2_t test_viota_m_u8mf2_m(vbool16_t vm, vbool16_t vs2, size_t vl) { + return __riscv_viota_m_u8mf2_m(vm, vs2, vl); } -vuint8m1_t test_viota_m_u8m1_m(vbool8_t mask, vbool8_t op1, size_t vl) { - return __riscv_viota_m_u8m1_m(mask, op1, vl); +vuint8m1_t test_viota_m_u8m1_m(vbool8_t vm, vbool8_t vs2, size_t vl) { + return __riscv_viota_m_u8m1_m(vm, vs2, vl); } -vuint8m2_t test_viota_m_u8m2_m(vbool4_t mask, vbool4_t op1, size_t vl) { - return __riscv_viota_m_u8m2_m(mask, op1, vl); +vuint8m2_t test_viota_m_u8m2_m(vbool4_t vm, vbool4_t vs2, size_t vl) { + return __riscv_viota_m_u8m2_m(vm, vs2, vl); } -vuint8m4_t test_viota_m_u8m4_m(vbool2_t mask, vbool2_t op1, size_t vl) { - return __riscv_viota_m_u8m4_m(mask, op1, vl); +vuint8m4_t test_viota_m_u8m4_m(vbool2_t vm, vbool2_t vs2, size_t vl) { + return __riscv_viota_m_u8m4_m(vm, vs2, vl); } -vuint8m8_t test_viota_m_u8m8_m(vbool1_t mask, vbool1_t op1, size_t vl) { - return __riscv_viota_m_u8m8_m(mask, op1, vl); +vuint8m8_t test_viota_m_u8m8_m(vbool1_t vm, vbool1_t vs2, size_t vl) { + return __riscv_viota_m_u8m8_m(vm, vs2, vl); } -vuint16mf4_t test_viota_m_u16mf4_m(vbool64_t mask, vbool64_t op1, size_t vl) { - return __riscv_viota_m_u16mf4_m(mask, op1, vl); +vuint16mf4_t test_viota_m_u16mf4_m(vbool64_t vm, vbool64_t vs2, size_t vl) { + return __riscv_viota_m_u16mf4_m(vm, vs2, vl); } -vuint16mf2_t test_viota_m_u16mf2_m(vbool32_t mask, vbool32_t op1, size_t vl) { - return __riscv_viota_m_u16mf2_m(mask, op1, vl); +vuint16mf2_t test_viota_m_u16mf2_m(vbool32_t vm, vbool32_t vs2, size_t vl) { + return __riscv_viota_m_u16mf2_m(vm, vs2, vl); } -vuint16m1_t test_viota_m_u16m1_m(vbool16_t mask, vbool16_t op1, size_t vl) { - return __riscv_viota_m_u16m1_m(mask, op1, vl); +vuint16m1_t test_viota_m_u16m1_m(vbool16_t vm, vbool16_t vs2, size_t vl) { + return __riscv_viota_m_u16m1_m(vm, vs2, vl); } -vuint16m2_t test_viota_m_u16m2_m(vbool8_t mask, vbool8_t op1, size_t vl) { - return __riscv_viota_m_u16m2_m(mask, op1, vl); +vuint16m2_t test_viota_m_u16m2_m(vbool8_t vm, vbool8_t vs2, size_t vl) { + return __riscv_viota_m_u16m2_m(vm, vs2, vl); } -vuint16m4_t test_viota_m_u16m4_m(vbool4_t mask, vbool4_t op1, size_t vl) { - return __riscv_viota_m_u16m4_m(mask, op1, vl); +vuint16m4_t test_viota_m_u16m4_m(vbool4_t vm, vbool4_t vs2, size_t vl) { + return __riscv_viota_m_u16m4_m(vm, vs2, vl); } -vuint16m8_t test_viota_m_u16m8_m(vbool2_t mask, vbool2_t op1, size_t vl) { - return __riscv_viota_m_u16m8_m(mask, op1, vl); +vuint16m8_t test_viota_m_u16m8_m(vbool2_t vm, vbool2_t vs2, size_t vl) { + return __riscv_viota_m_u16m8_m(vm, vs2, vl); } -vuint32mf2_t test_viota_m_u32mf2_m(vbool64_t mask, vbool64_t op1, size_t vl) { - return __riscv_viota_m_u32mf2_m(mask, op1, vl); +vuint32mf2_t test_viota_m_u32mf2_m(vbool64_t vm, vbool64_t vs2, size_t vl) { + return __riscv_viota_m_u32mf2_m(vm, vs2, vl); } -vuint32m1_t test_viota_m_u32m1_m(vbool32_t mask, vbool32_t op1, size_t vl) { - return __riscv_viota_m_u32m1_m(mask, op1, vl); +vuint32m1_t test_viota_m_u32m1_m(vbool32_t vm, vbool32_t vs2, size_t vl) { + return __riscv_viota_m_u32m1_m(vm, vs2, vl); } -vuint32m2_t test_viota_m_u32m2_m(vbool16_t mask, vbool16_t op1, size_t vl) { - return __riscv_viota_m_u32m2_m(mask, op1, vl); +vuint32m2_t test_viota_m_u32m2_m(vbool16_t vm, vbool16_t vs2, size_t vl) { + return __riscv_viota_m_u32m2_m(vm, vs2, vl); } -vuint32m4_t test_viota_m_u32m4_m(vbool8_t mask, vbool8_t op1, size_t vl) { - return __riscv_viota_m_u32m4_m(mask, op1, vl); +vuint32m4_t test_viota_m_u32m4_m(vbool8_t vm, vbool8_t vs2, size_t vl) { + return __riscv_viota_m_u32m4_m(vm, vs2, vl); } -vuint32m8_t test_viota_m_u32m8_m(vbool4_t mask, vbool4_t op1, size_t vl) { - return __riscv_viota_m_u32m8_m(mask, op1, vl); +vuint32m8_t test_viota_m_u32m8_m(vbool4_t vm, vbool4_t vs2, size_t vl) { + return __riscv_viota_m_u32m8_m(vm, vs2, vl); } -vuint64m1_t test_viota_m_u64m1_m(vbool64_t mask, vbool64_t op1, size_t vl) { - return __riscv_viota_m_u64m1_m(mask, op1, vl); +vuint64m1_t test_viota_m_u64m1_m(vbool64_t vm, vbool64_t vs2, size_t vl) { + return __riscv_viota_m_u64m1_m(vm, vs2, vl); } -vuint64m2_t test_viota_m_u64m2_m(vbool32_t mask, vbool32_t op1, size_t vl) { - return __riscv_viota_m_u64m2_m(mask, op1, vl); +vuint64m2_t test_viota_m_u64m2_m(vbool32_t vm, vbool32_t vs2, size_t vl) { + return __riscv_viota_m_u64m2_m(vm, vs2, vl); } -vuint64m4_t test_viota_m_u64m4_m(vbool16_t mask, vbool16_t op1, size_t vl) { - return __riscv_viota_m_u64m4_m(mask, op1, vl); +vuint64m4_t test_viota_m_u64m4_m(vbool16_t vm, vbool16_t vs2, size_t vl) { + return __riscv_viota_m_u64m4_m(vm, vs2, vl); } -vuint64m8_t test_viota_m_u64m8_m(vbool8_t mask, vbool8_t op1, size_t vl) { - return __riscv_viota_m_u64m8_m(mask, op1, vl); +vuint64m8_t test_viota_m_u64m8_m(vbool8_t vm, vbool8_t vs2, size_t vl) { + return __riscv_viota_m_u64m8_m(vm, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+viota\.[ivxfswum.]+\s+} 44 } } */ diff --git a/auto-generated/gnu-api-tests/vle16.c b/auto-generated/gnu-api-tests/vle16.c index aa36ace3f..e8af3662d 100644 --- a/auto-generated/gnu-api-tests/vle16.c +++ b/auto-generated/gnu-api-tests/vle16.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vle16_v_f16mf4(const float16_t *base, size_t vl) { - return __riscv_vle16_v_f16mf4(base, vl); +vfloat16mf4_t test_vle16_v_f16mf4(const float16_t *rs1, size_t vl) { + return __riscv_vle16_v_f16mf4(rs1, vl); } -vfloat16mf2_t test_vle16_v_f16mf2(const float16_t *base, size_t vl) { - return __riscv_vle16_v_f16mf2(base, vl); +vfloat16mf2_t test_vle16_v_f16mf2(const float16_t *rs1, size_t vl) { + return __riscv_vle16_v_f16mf2(rs1, vl); } -vfloat16m1_t test_vle16_v_f16m1(const float16_t *base, size_t vl) { - return __riscv_vle16_v_f16m1(base, vl); +vfloat16m1_t test_vle16_v_f16m1(const float16_t *rs1, size_t vl) { + return __riscv_vle16_v_f16m1(rs1, vl); } -vfloat16m2_t test_vle16_v_f16m2(const float16_t *base, size_t vl) { - return __riscv_vle16_v_f16m2(base, vl); +vfloat16m2_t test_vle16_v_f16m2(const float16_t *rs1, size_t vl) { + return __riscv_vle16_v_f16m2(rs1, vl); } -vfloat16m4_t test_vle16_v_f16m4(const float16_t *base, size_t vl) { - return __riscv_vle16_v_f16m4(base, vl); +vfloat16m4_t test_vle16_v_f16m4(const float16_t *rs1, size_t vl) { + return __riscv_vle16_v_f16m4(rs1, vl); } -vfloat16m8_t test_vle16_v_f16m8(const float16_t *base, size_t vl) { - return __riscv_vle16_v_f16m8(base, vl); +vfloat16m8_t test_vle16_v_f16m8(const float16_t *rs1, size_t vl) { + return __riscv_vle16_v_f16m8(rs1, vl); } -vint16mf4_t test_vle16_v_i16mf4(const int16_t *base, size_t vl) { - return __riscv_vle16_v_i16mf4(base, vl); +vint16mf4_t test_vle16_v_i16mf4(const int16_t *rs1, size_t vl) { + return __riscv_vle16_v_i16mf4(rs1, vl); } -vint16mf2_t test_vle16_v_i16mf2(const int16_t *base, size_t vl) { - return __riscv_vle16_v_i16mf2(base, vl); +vint16mf2_t test_vle16_v_i16mf2(const int16_t *rs1, size_t vl) { + return __riscv_vle16_v_i16mf2(rs1, vl); } -vint16m1_t test_vle16_v_i16m1(const int16_t *base, size_t vl) { - return __riscv_vle16_v_i16m1(base, vl); +vint16m1_t test_vle16_v_i16m1(const int16_t *rs1, size_t vl) { + return __riscv_vle16_v_i16m1(rs1, vl); } -vint16m2_t test_vle16_v_i16m2(const int16_t *base, size_t vl) { - return __riscv_vle16_v_i16m2(base, vl); +vint16m2_t test_vle16_v_i16m2(const int16_t *rs1, size_t vl) { + return __riscv_vle16_v_i16m2(rs1, vl); } -vint16m4_t test_vle16_v_i16m4(const int16_t *base, size_t vl) { - return __riscv_vle16_v_i16m4(base, vl); +vint16m4_t test_vle16_v_i16m4(const int16_t *rs1, size_t vl) { + return __riscv_vle16_v_i16m4(rs1, vl); } -vint16m8_t test_vle16_v_i16m8(const int16_t *base, size_t vl) { - return __riscv_vle16_v_i16m8(base, vl); +vint16m8_t test_vle16_v_i16m8(const int16_t *rs1, size_t vl) { + return __riscv_vle16_v_i16m8(rs1, vl); } -vuint16mf4_t test_vle16_v_u16mf4(const uint16_t *base, size_t vl) { - return __riscv_vle16_v_u16mf4(base, vl); +vuint16mf4_t test_vle16_v_u16mf4(const uint16_t *rs1, size_t vl) { + return __riscv_vle16_v_u16mf4(rs1, vl); } -vuint16mf2_t test_vle16_v_u16mf2(const uint16_t *base, size_t vl) { - return __riscv_vle16_v_u16mf2(base, vl); +vuint16mf2_t test_vle16_v_u16mf2(const uint16_t *rs1, size_t vl) { + return __riscv_vle16_v_u16mf2(rs1, vl); } -vuint16m1_t test_vle16_v_u16m1(const uint16_t *base, size_t vl) { - return __riscv_vle16_v_u16m1(base, vl); +vuint16m1_t test_vle16_v_u16m1(const uint16_t *rs1, size_t vl) { + return __riscv_vle16_v_u16m1(rs1, vl); } -vuint16m2_t test_vle16_v_u16m2(const uint16_t *base, size_t vl) { - return __riscv_vle16_v_u16m2(base, vl); +vuint16m2_t test_vle16_v_u16m2(const uint16_t *rs1, size_t vl) { + return __riscv_vle16_v_u16m2(rs1, vl); } -vuint16m4_t test_vle16_v_u16m4(const uint16_t *base, size_t vl) { - return __riscv_vle16_v_u16m4(base, vl); +vuint16m4_t test_vle16_v_u16m4(const uint16_t *rs1, size_t vl) { + return __riscv_vle16_v_u16m4(rs1, vl); } -vuint16m8_t test_vle16_v_u16m8(const uint16_t *base, size_t vl) { - return __riscv_vle16_v_u16m8(base, vl); +vuint16m8_t test_vle16_v_u16m8(const uint16_t *rs1, size_t vl) { + return __riscv_vle16_v_u16m8(rs1, vl); } -vfloat16mf4_t test_vle16_v_f16mf4_m(vbool64_t mask, const float16_t *base, size_t vl) { - return __riscv_vle16_v_f16mf4_m(mask, base, vl); +vfloat16mf4_t test_vle16_v_f16mf4_m(vbool64_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vle16_v_f16mf4_m(vm, rs1, vl); } -vfloat16mf2_t test_vle16_v_f16mf2_m(vbool32_t mask, const float16_t *base, size_t vl) { - return __riscv_vle16_v_f16mf2_m(mask, base, vl); +vfloat16mf2_t test_vle16_v_f16mf2_m(vbool32_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vle16_v_f16mf2_m(vm, rs1, vl); } -vfloat16m1_t test_vle16_v_f16m1_m(vbool16_t mask, const float16_t *base, size_t vl) { - return __riscv_vle16_v_f16m1_m(mask, base, vl); +vfloat16m1_t test_vle16_v_f16m1_m(vbool16_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vle16_v_f16m1_m(vm, rs1, vl); } -vfloat16m2_t test_vle16_v_f16m2_m(vbool8_t mask, const float16_t *base, size_t vl) { - return __riscv_vle16_v_f16m2_m(mask, base, vl); +vfloat16m2_t test_vle16_v_f16m2_m(vbool8_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vle16_v_f16m2_m(vm, rs1, vl); } -vfloat16m4_t test_vle16_v_f16m4_m(vbool4_t mask, const float16_t *base, size_t vl) { - return __riscv_vle16_v_f16m4_m(mask, base, vl); +vfloat16m4_t test_vle16_v_f16m4_m(vbool4_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vle16_v_f16m4_m(vm, rs1, vl); } -vfloat16m8_t test_vle16_v_f16m8_m(vbool2_t mask, const float16_t *base, size_t vl) { - return __riscv_vle16_v_f16m8_m(mask, base, vl); +vfloat16m8_t test_vle16_v_f16m8_m(vbool2_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vle16_v_f16m8_m(vm, rs1, vl); } -vint16mf4_t test_vle16_v_i16mf4_m(vbool64_t mask, const int16_t *base, size_t vl) { - return __riscv_vle16_v_i16mf4_m(mask, base, vl); +vint16mf4_t test_vle16_v_i16mf4_m(vbool64_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vle16_v_i16mf4_m(vm, rs1, vl); } -vint16mf2_t test_vle16_v_i16mf2_m(vbool32_t mask, const int16_t *base, size_t vl) { - return __riscv_vle16_v_i16mf2_m(mask, base, vl); +vint16mf2_t test_vle16_v_i16mf2_m(vbool32_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vle16_v_i16mf2_m(vm, rs1, vl); } -vint16m1_t test_vle16_v_i16m1_m(vbool16_t mask, const int16_t *base, size_t vl) { - return __riscv_vle16_v_i16m1_m(mask, base, vl); +vint16m1_t test_vle16_v_i16m1_m(vbool16_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vle16_v_i16m1_m(vm, rs1, vl); } -vint16m2_t test_vle16_v_i16m2_m(vbool8_t mask, const int16_t *base, size_t vl) { - return __riscv_vle16_v_i16m2_m(mask, base, vl); +vint16m2_t test_vle16_v_i16m2_m(vbool8_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vle16_v_i16m2_m(vm, rs1, vl); } -vint16m4_t test_vle16_v_i16m4_m(vbool4_t mask, const int16_t *base, size_t vl) { - return __riscv_vle16_v_i16m4_m(mask, base, vl); +vint16m4_t test_vle16_v_i16m4_m(vbool4_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vle16_v_i16m4_m(vm, rs1, vl); } -vint16m8_t test_vle16_v_i16m8_m(vbool2_t mask, const int16_t *base, size_t vl) { - return __riscv_vle16_v_i16m8_m(mask, base, vl); +vint16m8_t test_vle16_v_i16m8_m(vbool2_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vle16_v_i16m8_m(vm, rs1, vl); } -vuint16mf4_t test_vle16_v_u16mf4_m(vbool64_t mask, const uint16_t *base, size_t vl) { - return __riscv_vle16_v_u16mf4_m(mask, base, vl); +vuint16mf4_t test_vle16_v_u16mf4_m(vbool64_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_v_u16mf4_m(vm, rs1, vl); } -vuint16mf2_t test_vle16_v_u16mf2_m(vbool32_t mask, const uint16_t *base, size_t vl) { - return __riscv_vle16_v_u16mf2_m(mask, base, vl); +vuint16mf2_t test_vle16_v_u16mf2_m(vbool32_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_v_u16mf2_m(vm, rs1, vl); } -vuint16m1_t test_vle16_v_u16m1_m(vbool16_t mask, const uint16_t *base, size_t vl) { - return __riscv_vle16_v_u16m1_m(mask, base, vl); +vuint16m1_t test_vle16_v_u16m1_m(vbool16_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_v_u16m1_m(vm, rs1, vl); } -vuint16m2_t test_vle16_v_u16m2_m(vbool8_t mask, const uint16_t *base, size_t vl) { - return __riscv_vle16_v_u16m2_m(mask, base, vl); +vuint16m2_t test_vle16_v_u16m2_m(vbool8_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_v_u16m2_m(vm, rs1, vl); } -vuint16m4_t test_vle16_v_u16m4_m(vbool4_t mask, const uint16_t *base, size_t vl) { - return __riscv_vle16_v_u16m4_m(mask, base, vl); +vuint16m4_t test_vle16_v_u16m4_m(vbool4_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_v_u16m4_m(vm, rs1, vl); } -vuint16m8_t test_vle16_v_u16m8_m(vbool2_t mask, const uint16_t *base, size_t vl) { - return __riscv_vle16_v_u16m8_m(mask, base, vl); +vuint16m8_t test_vle16_v_u16m8_m(vbool2_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_v_u16m8_m(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vle16\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/gnu-api-tests/vle16ff.c b/auto-generated/gnu-api-tests/vle16ff.c index 140d800da..720e1effd 100644 --- a/auto-generated/gnu-api-tests/vle16ff.c +++ b/auto-generated/gnu-api-tests/vle16ff.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vle16ff_v_f16mf4(const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_f16mf4(base, new_vl, vl); +vfloat16mf4_t test_vle16ff_v_f16mf4(const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_f16mf4(rs1, new_vl, vl); } -vfloat16mf2_t test_vle16ff_v_f16mf2(const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_f16mf2(base, new_vl, vl); +vfloat16mf2_t test_vle16ff_v_f16mf2(const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_f16mf2(rs1, new_vl, vl); } -vfloat16m1_t test_vle16ff_v_f16m1(const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_f16m1(base, new_vl, vl); +vfloat16m1_t test_vle16ff_v_f16m1(const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_f16m1(rs1, new_vl, vl); } -vfloat16m2_t test_vle16ff_v_f16m2(const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_f16m2(base, new_vl, vl); +vfloat16m2_t test_vle16ff_v_f16m2(const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_f16m2(rs1, new_vl, vl); } -vfloat16m4_t test_vle16ff_v_f16m4(const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_f16m4(base, new_vl, vl); +vfloat16m4_t test_vle16ff_v_f16m4(const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_f16m4(rs1, new_vl, vl); } -vfloat16m8_t test_vle16ff_v_f16m8(const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_f16m8(base, new_vl, vl); +vfloat16m8_t test_vle16ff_v_f16m8(const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_f16m8(rs1, new_vl, vl); } -vint16mf4_t test_vle16ff_v_i16mf4(const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_i16mf4(base, new_vl, vl); +vint16mf4_t test_vle16ff_v_i16mf4(const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_i16mf4(rs1, new_vl, vl); } -vint16mf2_t test_vle16ff_v_i16mf2(const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_i16mf2(base, new_vl, vl); +vint16mf2_t test_vle16ff_v_i16mf2(const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_i16mf2(rs1, new_vl, vl); } -vint16m1_t test_vle16ff_v_i16m1(const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_i16m1(base, new_vl, vl); +vint16m1_t test_vle16ff_v_i16m1(const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_i16m1(rs1, new_vl, vl); } -vint16m2_t test_vle16ff_v_i16m2(const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_i16m2(base, new_vl, vl); +vint16m2_t test_vle16ff_v_i16m2(const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_i16m2(rs1, new_vl, vl); } -vint16m4_t test_vle16ff_v_i16m4(const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_i16m4(base, new_vl, vl); +vint16m4_t test_vle16ff_v_i16m4(const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_i16m4(rs1, new_vl, vl); } -vint16m8_t test_vle16ff_v_i16m8(const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_i16m8(base, new_vl, vl); +vint16m8_t test_vle16ff_v_i16m8(const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_i16m8(rs1, new_vl, vl); } -vuint16mf4_t test_vle16ff_v_u16mf4(const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_u16mf4(base, new_vl, vl); +vuint16mf4_t test_vle16ff_v_u16mf4(const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_u16mf4(rs1, new_vl, vl); } -vuint16mf2_t test_vle16ff_v_u16mf2(const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_u16mf2(base, new_vl, vl); +vuint16mf2_t test_vle16ff_v_u16mf2(const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_u16mf2(rs1, new_vl, vl); } -vuint16m1_t test_vle16ff_v_u16m1(const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_u16m1(base, new_vl, vl); +vuint16m1_t test_vle16ff_v_u16m1(const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_u16m1(rs1, new_vl, vl); } -vuint16m2_t test_vle16ff_v_u16m2(const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_u16m2(base, new_vl, vl); +vuint16m2_t test_vle16ff_v_u16m2(const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_u16m2(rs1, new_vl, vl); } -vuint16m4_t test_vle16ff_v_u16m4(const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_u16m4(base, new_vl, vl); +vuint16m4_t test_vle16ff_v_u16m4(const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_u16m4(rs1, new_vl, vl); } -vuint16m8_t test_vle16ff_v_u16m8(const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_u16m8(base, new_vl, vl); +vuint16m8_t test_vle16ff_v_u16m8(const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_u16m8(rs1, new_vl, vl); } -vfloat16mf4_t test_vle16ff_v_f16mf4_m(vbool64_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_f16mf4_m(mask, base, new_vl, vl); +vfloat16mf4_t test_vle16ff_v_f16mf4_m(vbool64_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_f16mf4_m(vm, rs1, new_vl, vl); } -vfloat16mf2_t test_vle16ff_v_f16mf2_m(vbool32_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_f16mf2_m(mask, base, new_vl, vl); +vfloat16mf2_t test_vle16ff_v_f16mf2_m(vbool32_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_f16mf2_m(vm, rs1, new_vl, vl); } -vfloat16m1_t test_vle16ff_v_f16m1_m(vbool16_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_f16m1_m(mask, base, new_vl, vl); +vfloat16m1_t test_vle16ff_v_f16m1_m(vbool16_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_f16m1_m(vm, rs1, new_vl, vl); } -vfloat16m2_t test_vle16ff_v_f16m2_m(vbool8_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_f16m2_m(mask, base, new_vl, vl); +vfloat16m2_t test_vle16ff_v_f16m2_m(vbool8_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_f16m2_m(vm, rs1, new_vl, vl); } -vfloat16m4_t test_vle16ff_v_f16m4_m(vbool4_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_f16m4_m(mask, base, new_vl, vl); +vfloat16m4_t test_vle16ff_v_f16m4_m(vbool4_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_f16m4_m(vm, rs1, new_vl, vl); } -vfloat16m8_t test_vle16ff_v_f16m8_m(vbool2_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_f16m8_m(mask, base, new_vl, vl); +vfloat16m8_t test_vle16ff_v_f16m8_m(vbool2_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_f16m8_m(vm, rs1, new_vl, vl); } -vint16mf4_t test_vle16ff_v_i16mf4_m(vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_i16mf4_m(mask, base, new_vl, vl); +vint16mf4_t test_vle16ff_v_i16mf4_m(vbool64_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_i16mf4_m(vm, rs1, new_vl, vl); } -vint16mf2_t test_vle16ff_v_i16mf2_m(vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_i16mf2_m(mask, base, new_vl, vl); +vint16mf2_t test_vle16ff_v_i16mf2_m(vbool32_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_i16mf2_m(vm, rs1, new_vl, vl); } -vint16m1_t test_vle16ff_v_i16m1_m(vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_i16m1_m(mask, base, new_vl, vl); +vint16m1_t test_vle16ff_v_i16m1_m(vbool16_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_i16m1_m(vm, rs1, new_vl, vl); } -vint16m2_t test_vle16ff_v_i16m2_m(vbool8_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_i16m2_m(mask, base, new_vl, vl); +vint16m2_t test_vle16ff_v_i16m2_m(vbool8_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_i16m2_m(vm, rs1, new_vl, vl); } -vint16m4_t test_vle16ff_v_i16m4_m(vbool4_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_i16m4_m(mask, base, new_vl, vl); +vint16m4_t test_vle16ff_v_i16m4_m(vbool4_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_i16m4_m(vm, rs1, new_vl, vl); } -vint16m8_t test_vle16ff_v_i16m8_m(vbool2_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_i16m8_m(mask, base, new_vl, vl); +vint16m8_t test_vle16ff_v_i16m8_m(vbool2_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_i16m8_m(vm, rs1, new_vl, vl); } -vuint16mf4_t test_vle16ff_v_u16mf4_m(vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_u16mf4_m(mask, base, new_vl, vl); +vuint16mf4_t test_vle16ff_v_u16mf4_m(vbool64_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_u16mf4_m(vm, rs1, new_vl, vl); } -vuint16mf2_t test_vle16ff_v_u16mf2_m(vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_u16mf2_m(mask, base, new_vl, vl); +vuint16mf2_t test_vle16ff_v_u16mf2_m(vbool32_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_u16mf2_m(vm, rs1, new_vl, vl); } -vuint16m1_t test_vle16ff_v_u16m1_m(vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_u16m1_m(mask, base, new_vl, vl); +vuint16m1_t test_vle16ff_v_u16m1_m(vbool16_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_u16m1_m(vm, rs1, new_vl, vl); } -vuint16m2_t test_vle16ff_v_u16m2_m(vbool8_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_u16m2_m(mask, base, new_vl, vl); +vuint16m2_t test_vle16ff_v_u16m2_m(vbool8_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_u16m2_m(vm, rs1, new_vl, vl); } -vuint16m4_t test_vle16ff_v_u16m4_m(vbool4_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_u16m4_m(mask, base, new_vl, vl); +vuint16m4_t test_vle16ff_v_u16m4_m(vbool4_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_u16m4_m(vm, rs1, new_vl, vl); } -vuint16m8_t test_vle16ff_v_u16m8_m(vbool2_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_u16m8_m(mask, base, new_vl, vl); +vuint16m8_t test_vle16ff_v_u16m8_m(vbool2_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_u16m8_m(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vle16ff\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/gnu-api-tests/vle32.c b/auto-generated/gnu-api-tests/vle32.c index cfb997d88..27b48a59f 100644 --- a/auto-generated/gnu-api-tests/vle32.c +++ b/auto-generated/gnu-api-tests/vle32.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2_t test_vle32_v_f32mf2(const float32_t *base, size_t vl) { - return __riscv_vle32_v_f32mf2(base, vl); +vfloat32mf2_t test_vle32_v_f32mf2(const float32_t *rs1, size_t vl) { + return __riscv_vle32_v_f32mf2(rs1, vl); } -vfloat32m1_t test_vle32_v_f32m1(const float32_t *base, size_t vl) { - return __riscv_vle32_v_f32m1(base, vl); +vfloat32m1_t test_vle32_v_f32m1(const float32_t *rs1, size_t vl) { + return __riscv_vle32_v_f32m1(rs1, vl); } -vfloat32m2_t test_vle32_v_f32m2(const float32_t *base, size_t vl) { - return __riscv_vle32_v_f32m2(base, vl); +vfloat32m2_t test_vle32_v_f32m2(const float32_t *rs1, size_t vl) { + return __riscv_vle32_v_f32m2(rs1, vl); } -vfloat32m4_t test_vle32_v_f32m4(const float32_t *base, size_t vl) { - return __riscv_vle32_v_f32m4(base, vl); +vfloat32m4_t test_vle32_v_f32m4(const float32_t *rs1, size_t vl) { + return __riscv_vle32_v_f32m4(rs1, vl); } -vfloat32m8_t test_vle32_v_f32m8(const float32_t *base, size_t vl) { - return __riscv_vle32_v_f32m8(base, vl); +vfloat32m8_t test_vle32_v_f32m8(const float32_t *rs1, size_t vl) { + return __riscv_vle32_v_f32m8(rs1, vl); } -vint32mf2_t test_vle32_v_i32mf2(const int32_t *base, size_t vl) { - return __riscv_vle32_v_i32mf2(base, vl); +vint32mf2_t test_vle32_v_i32mf2(const int32_t *rs1, size_t vl) { + return __riscv_vle32_v_i32mf2(rs1, vl); } -vint32m1_t test_vle32_v_i32m1(const int32_t *base, size_t vl) { - return __riscv_vle32_v_i32m1(base, vl); +vint32m1_t test_vle32_v_i32m1(const int32_t *rs1, size_t vl) { + return __riscv_vle32_v_i32m1(rs1, vl); } -vint32m2_t test_vle32_v_i32m2(const int32_t *base, size_t vl) { - return __riscv_vle32_v_i32m2(base, vl); +vint32m2_t test_vle32_v_i32m2(const int32_t *rs1, size_t vl) { + return __riscv_vle32_v_i32m2(rs1, vl); } -vint32m4_t test_vle32_v_i32m4(const int32_t *base, size_t vl) { - return __riscv_vle32_v_i32m4(base, vl); +vint32m4_t test_vle32_v_i32m4(const int32_t *rs1, size_t vl) { + return __riscv_vle32_v_i32m4(rs1, vl); } -vint32m8_t test_vle32_v_i32m8(const int32_t *base, size_t vl) { - return __riscv_vle32_v_i32m8(base, vl); +vint32m8_t test_vle32_v_i32m8(const int32_t *rs1, size_t vl) { + return __riscv_vle32_v_i32m8(rs1, vl); } -vuint32mf2_t test_vle32_v_u32mf2(const uint32_t *base, size_t vl) { - return __riscv_vle32_v_u32mf2(base, vl); +vuint32mf2_t test_vle32_v_u32mf2(const uint32_t *rs1, size_t vl) { + return __riscv_vle32_v_u32mf2(rs1, vl); } -vuint32m1_t test_vle32_v_u32m1(const uint32_t *base, size_t vl) { - return __riscv_vle32_v_u32m1(base, vl); +vuint32m1_t test_vle32_v_u32m1(const uint32_t *rs1, size_t vl) { + return __riscv_vle32_v_u32m1(rs1, vl); } -vuint32m2_t test_vle32_v_u32m2(const uint32_t *base, size_t vl) { - return __riscv_vle32_v_u32m2(base, vl); +vuint32m2_t test_vle32_v_u32m2(const uint32_t *rs1, size_t vl) { + return __riscv_vle32_v_u32m2(rs1, vl); } -vuint32m4_t test_vle32_v_u32m4(const uint32_t *base, size_t vl) { - return __riscv_vle32_v_u32m4(base, vl); +vuint32m4_t test_vle32_v_u32m4(const uint32_t *rs1, size_t vl) { + return __riscv_vle32_v_u32m4(rs1, vl); } -vuint32m8_t test_vle32_v_u32m8(const uint32_t *base, size_t vl) { - return __riscv_vle32_v_u32m8(base, vl); +vuint32m8_t test_vle32_v_u32m8(const uint32_t *rs1, size_t vl) { + return __riscv_vle32_v_u32m8(rs1, vl); } -vfloat32mf2_t test_vle32_v_f32mf2_m(vbool64_t mask, const float32_t *base, size_t vl) { - return __riscv_vle32_v_f32mf2_m(mask, base, vl); +vfloat32mf2_t test_vle32_v_f32mf2_m(vbool64_t vm, const float32_t *rs1, size_t vl) { + return __riscv_vle32_v_f32mf2_m(vm, rs1, vl); } -vfloat32m1_t test_vle32_v_f32m1_m(vbool32_t mask, const float32_t *base, size_t vl) { - return __riscv_vle32_v_f32m1_m(mask, base, vl); +vfloat32m1_t test_vle32_v_f32m1_m(vbool32_t vm, const float32_t *rs1, size_t vl) { + return __riscv_vle32_v_f32m1_m(vm, rs1, vl); } -vfloat32m2_t test_vle32_v_f32m2_m(vbool16_t mask, const float32_t *base, size_t vl) { - return __riscv_vle32_v_f32m2_m(mask, base, vl); +vfloat32m2_t test_vle32_v_f32m2_m(vbool16_t vm, const float32_t *rs1, size_t vl) { + return __riscv_vle32_v_f32m2_m(vm, rs1, vl); } -vfloat32m4_t test_vle32_v_f32m4_m(vbool8_t mask, const float32_t *base, size_t vl) { - return __riscv_vle32_v_f32m4_m(mask, base, vl); +vfloat32m4_t test_vle32_v_f32m4_m(vbool8_t vm, const float32_t *rs1, size_t vl) { + return __riscv_vle32_v_f32m4_m(vm, rs1, vl); } -vfloat32m8_t test_vle32_v_f32m8_m(vbool4_t mask, const float32_t *base, size_t vl) { - return __riscv_vle32_v_f32m8_m(mask, base, vl); +vfloat32m8_t test_vle32_v_f32m8_m(vbool4_t vm, const float32_t *rs1, size_t vl) { + return __riscv_vle32_v_f32m8_m(vm, rs1, vl); } -vint32mf2_t test_vle32_v_i32mf2_m(vbool64_t mask, const int32_t *base, size_t vl) { - return __riscv_vle32_v_i32mf2_m(mask, base, vl); +vint32mf2_t test_vle32_v_i32mf2_m(vbool64_t vm, const int32_t *rs1, size_t vl) { + return __riscv_vle32_v_i32mf2_m(vm, rs1, vl); } -vint32m1_t test_vle32_v_i32m1_m(vbool32_t mask, const int32_t *base, size_t vl) { - return __riscv_vle32_v_i32m1_m(mask, base, vl); +vint32m1_t test_vle32_v_i32m1_m(vbool32_t vm, const int32_t *rs1, size_t vl) { + return __riscv_vle32_v_i32m1_m(vm, rs1, vl); } -vint32m2_t test_vle32_v_i32m2_m(vbool16_t mask, const int32_t *base, size_t vl) { - return __riscv_vle32_v_i32m2_m(mask, base, vl); +vint32m2_t test_vle32_v_i32m2_m(vbool16_t vm, const int32_t *rs1, size_t vl) { + return __riscv_vle32_v_i32m2_m(vm, rs1, vl); } -vint32m4_t test_vle32_v_i32m4_m(vbool8_t mask, const int32_t *base, size_t vl) { - return __riscv_vle32_v_i32m4_m(mask, base, vl); +vint32m4_t test_vle32_v_i32m4_m(vbool8_t vm, const int32_t *rs1, size_t vl) { + return __riscv_vle32_v_i32m4_m(vm, rs1, vl); } -vint32m8_t test_vle32_v_i32m8_m(vbool4_t mask, const int32_t *base, size_t vl) { - return __riscv_vle32_v_i32m8_m(mask, base, vl); +vint32m8_t test_vle32_v_i32m8_m(vbool4_t vm, const int32_t *rs1, size_t vl) { + return __riscv_vle32_v_i32m8_m(vm, rs1, vl); } -vuint32mf2_t test_vle32_v_u32mf2_m(vbool64_t mask, const uint32_t *base, size_t vl) { - return __riscv_vle32_v_u32mf2_m(mask, base, vl); +vuint32mf2_t test_vle32_v_u32mf2_m(vbool64_t vm, const uint32_t *rs1, size_t vl) { + return __riscv_vle32_v_u32mf2_m(vm, rs1, vl); } -vuint32m1_t test_vle32_v_u32m1_m(vbool32_t mask, const uint32_t *base, size_t vl) { - return __riscv_vle32_v_u32m1_m(mask, base, vl); +vuint32m1_t test_vle32_v_u32m1_m(vbool32_t vm, const uint32_t *rs1, size_t vl) { + return __riscv_vle32_v_u32m1_m(vm, rs1, vl); } -vuint32m2_t test_vle32_v_u32m2_m(vbool16_t mask, const uint32_t *base, size_t vl) { - return __riscv_vle32_v_u32m2_m(mask, base, vl); +vuint32m2_t test_vle32_v_u32m2_m(vbool16_t vm, const uint32_t *rs1, size_t vl) { + return __riscv_vle32_v_u32m2_m(vm, rs1, vl); } -vuint32m4_t test_vle32_v_u32m4_m(vbool8_t mask, const uint32_t *base, size_t vl) { - return __riscv_vle32_v_u32m4_m(mask, base, vl); +vuint32m4_t test_vle32_v_u32m4_m(vbool8_t vm, const uint32_t *rs1, size_t vl) { + return __riscv_vle32_v_u32m4_m(vm, rs1, vl); } -vuint32m8_t test_vle32_v_u32m8_m(vbool4_t mask, const uint32_t *base, size_t vl) { - return __riscv_vle32_v_u32m8_m(mask, base, vl); +vuint32m8_t test_vle32_v_u32m8_m(vbool4_t vm, const uint32_t *rs1, size_t vl) { + return __riscv_vle32_v_u32m8_m(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vle32\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/gnu-api-tests/vle32ff.c b/auto-generated/gnu-api-tests/vle32ff.c index f33744a1d..e7c30567f 100644 --- a/auto-generated/gnu-api-tests/vle32ff.c +++ b/auto-generated/gnu-api-tests/vle32ff.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2_t test_vle32ff_v_f32mf2(const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_f32mf2(base, new_vl, vl); +vfloat32mf2_t test_vle32ff_v_f32mf2(const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_f32mf2(rs1, new_vl, vl); } -vfloat32m1_t test_vle32ff_v_f32m1(const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_f32m1(base, new_vl, vl); +vfloat32m1_t test_vle32ff_v_f32m1(const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_f32m1(rs1, new_vl, vl); } -vfloat32m2_t test_vle32ff_v_f32m2(const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_f32m2(base, new_vl, vl); +vfloat32m2_t test_vle32ff_v_f32m2(const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_f32m2(rs1, new_vl, vl); } -vfloat32m4_t test_vle32ff_v_f32m4(const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_f32m4(base, new_vl, vl); +vfloat32m4_t test_vle32ff_v_f32m4(const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_f32m4(rs1, new_vl, vl); } -vfloat32m8_t test_vle32ff_v_f32m8(const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_f32m8(base, new_vl, vl); +vfloat32m8_t test_vle32ff_v_f32m8(const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_f32m8(rs1, new_vl, vl); } -vint32mf2_t test_vle32ff_v_i32mf2(const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_i32mf2(base, new_vl, vl); +vint32mf2_t test_vle32ff_v_i32mf2(const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_i32mf2(rs1, new_vl, vl); } -vint32m1_t test_vle32ff_v_i32m1(const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_i32m1(base, new_vl, vl); +vint32m1_t test_vle32ff_v_i32m1(const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_i32m1(rs1, new_vl, vl); } -vint32m2_t test_vle32ff_v_i32m2(const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_i32m2(base, new_vl, vl); +vint32m2_t test_vle32ff_v_i32m2(const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_i32m2(rs1, new_vl, vl); } -vint32m4_t test_vle32ff_v_i32m4(const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_i32m4(base, new_vl, vl); +vint32m4_t test_vle32ff_v_i32m4(const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_i32m4(rs1, new_vl, vl); } -vint32m8_t test_vle32ff_v_i32m8(const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_i32m8(base, new_vl, vl); +vint32m8_t test_vle32ff_v_i32m8(const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_i32m8(rs1, new_vl, vl); } -vuint32mf2_t test_vle32ff_v_u32mf2(const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_u32mf2(base, new_vl, vl); +vuint32mf2_t test_vle32ff_v_u32mf2(const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_u32mf2(rs1, new_vl, vl); } -vuint32m1_t test_vle32ff_v_u32m1(const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_u32m1(base, new_vl, vl); +vuint32m1_t test_vle32ff_v_u32m1(const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_u32m1(rs1, new_vl, vl); } -vuint32m2_t test_vle32ff_v_u32m2(const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_u32m2(base, new_vl, vl); +vuint32m2_t test_vle32ff_v_u32m2(const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_u32m2(rs1, new_vl, vl); } -vuint32m4_t test_vle32ff_v_u32m4(const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_u32m4(base, new_vl, vl); +vuint32m4_t test_vle32ff_v_u32m4(const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_u32m4(rs1, new_vl, vl); } -vuint32m8_t test_vle32ff_v_u32m8(const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_u32m8(base, new_vl, vl); +vuint32m8_t test_vle32ff_v_u32m8(const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_u32m8(rs1, new_vl, vl); } -vfloat32mf2_t test_vle32ff_v_f32mf2_m(vbool64_t mask, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_f32mf2_m(mask, base, new_vl, vl); +vfloat32mf2_t test_vle32ff_v_f32mf2_m(vbool64_t vm, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_f32mf2_m(vm, rs1, new_vl, vl); } -vfloat32m1_t test_vle32ff_v_f32m1_m(vbool32_t mask, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_f32m1_m(mask, base, new_vl, vl); +vfloat32m1_t test_vle32ff_v_f32m1_m(vbool32_t vm, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_f32m1_m(vm, rs1, new_vl, vl); } -vfloat32m2_t test_vle32ff_v_f32m2_m(vbool16_t mask, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_f32m2_m(mask, base, new_vl, vl); +vfloat32m2_t test_vle32ff_v_f32m2_m(vbool16_t vm, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_f32m2_m(vm, rs1, new_vl, vl); } -vfloat32m4_t test_vle32ff_v_f32m4_m(vbool8_t mask, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_f32m4_m(mask, base, new_vl, vl); +vfloat32m4_t test_vle32ff_v_f32m4_m(vbool8_t vm, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_f32m4_m(vm, rs1, new_vl, vl); } -vfloat32m8_t test_vle32ff_v_f32m8_m(vbool4_t mask, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_f32m8_m(mask, base, new_vl, vl); +vfloat32m8_t test_vle32ff_v_f32m8_m(vbool4_t vm, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_f32m8_m(vm, rs1, new_vl, vl); } -vint32mf2_t test_vle32ff_v_i32mf2_m(vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_i32mf2_m(mask, base, new_vl, vl); +vint32mf2_t test_vle32ff_v_i32mf2_m(vbool64_t vm, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_i32mf2_m(vm, rs1, new_vl, vl); } -vint32m1_t test_vle32ff_v_i32m1_m(vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_i32m1_m(mask, base, new_vl, vl); +vint32m1_t test_vle32ff_v_i32m1_m(vbool32_t vm, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_i32m1_m(vm, rs1, new_vl, vl); } -vint32m2_t test_vle32ff_v_i32m2_m(vbool16_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_i32m2_m(mask, base, new_vl, vl); +vint32m2_t test_vle32ff_v_i32m2_m(vbool16_t vm, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_i32m2_m(vm, rs1, new_vl, vl); } -vint32m4_t test_vle32ff_v_i32m4_m(vbool8_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_i32m4_m(mask, base, new_vl, vl); +vint32m4_t test_vle32ff_v_i32m4_m(vbool8_t vm, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_i32m4_m(vm, rs1, new_vl, vl); } -vint32m8_t test_vle32ff_v_i32m8_m(vbool4_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_i32m8_m(mask, base, new_vl, vl); +vint32m8_t test_vle32ff_v_i32m8_m(vbool4_t vm, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_i32m8_m(vm, rs1, new_vl, vl); } -vuint32mf2_t test_vle32ff_v_u32mf2_m(vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_u32mf2_m(mask, base, new_vl, vl); +vuint32mf2_t test_vle32ff_v_u32mf2_m(vbool64_t vm, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_u32mf2_m(vm, rs1, new_vl, vl); } -vuint32m1_t test_vle32ff_v_u32m1_m(vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_u32m1_m(mask, base, new_vl, vl); +vuint32m1_t test_vle32ff_v_u32m1_m(vbool32_t vm, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_u32m1_m(vm, rs1, new_vl, vl); } -vuint32m2_t test_vle32ff_v_u32m2_m(vbool16_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_u32m2_m(mask, base, new_vl, vl); +vuint32m2_t test_vle32ff_v_u32m2_m(vbool16_t vm, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_u32m2_m(vm, rs1, new_vl, vl); } -vuint32m4_t test_vle32ff_v_u32m4_m(vbool8_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_u32m4_m(mask, base, new_vl, vl); +vuint32m4_t test_vle32ff_v_u32m4_m(vbool8_t vm, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_u32m4_m(vm, rs1, new_vl, vl); } -vuint32m8_t test_vle32ff_v_u32m8_m(vbool4_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_u32m8_m(mask, base, new_vl, vl); +vuint32m8_t test_vle32ff_v_u32m8_m(vbool4_t vm, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_u32m8_m(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vle32ff\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/gnu-api-tests/vle64.c b/auto-generated/gnu-api-tests/vle64.c index 684d1d44f..427256d2f 100644 --- a/auto-generated/gnu-api-tests/vle64.c +++ b/auto-generated/gnu-api-tests/vle64.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1_t test_vle64_v_f64m1(const float64_t *base, size_t vl) { - return __riscv_vle64_v_f64m1(base, vl); +vfloat64m1_t test_vle64_v_f64m1(const float64_t *rs1, size_t vl) { + return __riscv_vle64_v_f64m1(rs1, vl); } -vfloat64m2_t test_vle64_v_f64m2(const float64_t *base, size_t vl) { - return __riscv_vle64_v_f64m2(base, vl); +vfloat64m2_t test_vle64_v_f64m2(const float64_t *rs1, size_t vl) { + return __riscv_vle64_v_f64m2(rs1, vl); } -vfloat64m4_t test_vle64_v_f64m4(const float64_t *base, size_t vl) { - return __riscv_vle64_v_f64m4(base, vl); +vfloat64m4_t test_vle64_v_f64m4(const float64_t *rs1, size_t vl) { + return __riscv_vle64_v_f64m4(rs1, vl); } -vfloat64m8_t test_vle64_v_f64m8(const float64_t *base, size_t vl) { - return __riscv_vle64_v_f64m8(base, vl); +vfloat64m8_t test_vle64_v_f64m8(const float64_t *rs1, size_t vl) { + return __riscv_vle64_v_f64m8(rs1, vl); } -vint64m1_t test_vle64_v_i64m1(const int64_t *base, size_t vl) { - return __riscv_vle64_v_i64m1(base, vl); +vint64m1_t test_vle64_v_i64m1(const int64_t *rs1, size_t vl) { + return __riscv_vle64_v_i64m1(rs1, vl); } -vint64m2_t test_vle64_v_i64m2(const int64_t *base, size_t vl) { - return __riscv_vle64_v_i64m2(base, vl); +vint64m2_t test_vle64_v_i64m2(const int64_t *rs1, size_t vl) { + return __riscv_vle64_v_i64m2(rs1, vl); } -vint64m4_t test_vle64_v_i64m4(const int64_t *base, size_t vl) { - return __riscv_vle64_v_i64m4(base, vl); +vint64m4_t test_vle64_v_i64m4(const int64_t *rs1, size_t vl) { + return __riscv_vle64_v_i64m4(rs1, vl); } -vint64m8_t test_vle64_v_i64m8(const int64_t *base, size_t vl) { - return __riscv_vle64_v_i64m8(base, vl); +vint64m8_t test_vle64_v_i64m8(const int64_t *rs1, size_t vl) { + return __riscv_vle64_v_i64m8(rs1, vl); } -vuint64m1_t test_vle64_v_u64m1(const uint64_t *base, size_t vl) { - return __riscv_vle64_v_u64m1(base, vl); +vuint64m1_t test_vle64_v_u64m1(const uint64_t *rs1, size_t vl) { + return __riscv_vle64_v_u64m1(rs1, vl); } -vuint64m2_t test_vle64_v_u64m2(const uint64_t *base, size_t vl) { - return __riscv_vle64_v_u64m2(base, vl); +vuint64m2_t test_vle64_v_u64m2(const uint64_t *rs1, size_t vl) { + return __riscv_vle64_v_u64m2(rs1, vl); } -vuint64m4_t test_vle64_v_u64m4(const uint64_t *base, size_t vl) { - return __riscv_vle64_v_u64m4(base, vl); +vuint64m4_t test_vle64_v_u64m4(const uint64_t *rs1, size_t vl) { + return __riscv_vle64_v_u64m4(rs1, vl); } -vuint64m8_t test_vle64_v_u64m8(const uint64_t *base, size_t vl) { - return __riscv_vle64_v_u64m8(base, vl); +vuint64m8_t test_vle64_v_u64m8(const uint64_t *rs1, size_t vl) { + return __riscv_vle64_v_u64m8(rs1, vl); } -vfloat64m1_t test_vle64_v_f64m1_m(vbool64_t mask, const float64_t *base, size_t vl) { - return __riscv_vle64_v_f64m1_m(mask, base, vl); +vfloat64m1_t test_vle64_v_f64m1_m(vbool64_t vm, const float64_t *rs1, size_t vl) { + return __riscv_vle64_v_f64m1_m(vm, rs1, vl); } -vfloat64m2_t test_vle64_v_f64m2_m(vbool32_t mask, const float64_t *base, size_t vl) { - return __riscv_vle64_v_f64m2_m(mask, base, vl); +vfloat64m2_t test_vle64_v_f64m2_m(vbool32_t vm, const float64_t *rs1, size_t vl) { + return __riscv_vle64_v_f64m2_m(vm, rs1, vl); } -vfloat64m4_t test_vle64_v_f64m4_m(vbool16_t mask, const float64_t *base, size_t vl) { - return __riscv_vle64_v_f64m4_m(mask, base, vl); +vfloat64m4_t test_vle64_v_f64m4_m(vbool16_t vm, const float64_t *rs1, size_t vl) { + return __riscv_vle64_v_f64m4_m(vm, rs1, vl); } -vfloat64m8_t test_vle64_v_f64m8_m(vbool8_t mask, const float64_t *base, size_t vl) { - return __riscv_vle64_v_f64m8_m(mask, base, vl); +vfloat64m8_t test_vle64_v_f64m8_m(vbool8_t vm, const float64_t *rs1, size_t vl) { + return __riscv_vle64_v_f64m8_m(vm, rs1, vl); } -vint64m1_t test_vle64_v_i64m1_m(vbool64_t mask, const int64_t *base, size_t vl) { - return __riscv_vle64_v_i64m1_m(mask, base, vl); +vint64m1_t test_vle64_v_i64m1_m(vbool64_t vm, const int64_t *rs1, size_t vl) { + return __riscv_vle64_v_i64m1_m(vm, rs1, vl); } -vint64m2_t test_vle64_v_i64m2_m(vbool32_t mask, const int64_t *base, size_t vl) { - return __riscv_vle64_v_i64m2_m(mask, base, vl); +vint64m2_t test_vle64_v_i64m2_m(vbool32_t vm, const int64_t *rs1, size_t vl) { + return __riscv_vle64_v_i64m2_m(vm, rs1, vl); } -vint64m4_t test_vle64_v_i64m4_m(vbool16_t mask, const int64_t *base, size_t vl) { - return __riscv_vle64_v_i64m4_m(mask, base, vl); +vint64m4_t test_vle64_v_i64m4_m(vbool16_t vm, const int64_t *rs1, size_t vl) { + return __riscv_vle64_v_i64m4_m(vm, rs1, vl); } -vint64m8_t test_vle64_v_i64m8_m(vbool8_t mask, const int64_t *base, size_t vl) { - return __riscv_vle64_v_i64m8_m(mask, base, vl); +vint64m8_t test_vle64_v_i64m8_m(vbool8_t vm, const int64_t *rs1, size_t vl) { + return __riscv_vle64_v_i64m8_m(vm, rs1, vl); } -vuint64m1_t test_vle64_v_u64m1_m(vbool64_t mask, const uint64_t *base, size_t vl) { - return __riscv_vle64_v_u64m1_m(mask, base, vl); +vuint64m1_t test_vle64_v_u64m1_m(vbool64_t vm, const uint64_t *rs1, size_t vl) { + return __riscv_vle64_v_u64m1_m(vm, rs1, vl); } -vuint64m2_t test_vle64_v_u64m2_m(vbool32_t mask, const uint64_t *base, size_t vl) { - return __riscv_vle64_v_u64m2_m(mask, base, vl); +vuint64m2_t test_vle64_v_u64m2_m(vbool32_t vm, const uint64_t *rs1, size_t vl) { + return __riscv_vle64_v_u64m2_m(vm, rs1, vl); } -vuint64m4_t test_vle64_v_u64m4_m(vbool16_t mask, const uint64_t *base, size_t vl) { - return __riscv_vle64_v_u64m4_m(mask, base, vl); +vuint64m4_t test_vle64_v_u64m4_m(vbool16_t vm, const uint64_t *rs1, size_t vl) { + return __riscv_vle64_v_u64m4_m(vm, rs1, vl); } -vuint64m8_t test_vle64_v_u64m8_m(vbool8_t mask, const uint64_t *base, size_t vl) { - return __riscv_vle64_v_u64m8_m(mask, base, vl); +vuint64m8_t test_vle64_v_u64m8_m(vbool8_t vm, const uint64_t *rs1, size_t vl) { + return __riscv_vle64_v_u64m8_m(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vle64\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/gnu-api-tests/vle64ff.c b/auto-generated/gnu-api-tests/vle64ff.c index dc3885d4e..44a3b2a96 100644 --- a/auto-generated/gnu-api-tests/vle64ff.c +++ b/auto-generated/gnu-api-tests/vle64ff.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1_t test_vle64ff_v_f64m1(const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_f64m1(base, new_vl, vl); +vfloat64m1_t test_vle64ff_v_f64m1(const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_f64m1(rs1, new_vl, vl); } -vfloat64m2_t test_vle64ff_v_f64m2(const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_f64m2(base, new_vl, vl); +vfloat64m2_t test_vle64ff_v_f64m2(const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_f64m2(rs1, new_vl, vl); } -vfloat64m4_t test_vle64ff_v_f64m4(const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_f64m4(base, new_vl, vl); +vfloat64m4_t test_vle64ff_v_f64m4(const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_f64m4(rs1, new_vl, vl); } -vfloat64m8_t test_vle64ff_v_f64m8(const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_f64m8(base, new_vl, vl); +vfloat64m8_t test_vle64ff_v_f64m8(const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_f64m8(rs1, new_vl, vl); } -vint64m1_t test_vle64ff_v_i64m1(const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_i64m1(base, new_vl, vl); +vint64m1_t test_vle64ff_v_i64m1(const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_i64m1(rs1, new_vl, vl); } -vint64m2_t test_vle64ff_v_i64m2(const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_i64m2(base, new_vl, vl); +vint64m2_t test_vle64ff_v_i64m2(const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_i64m2(rs1, new_vl, vl); } -vint64m4_t test_vle64ff_v_i64m4(const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_i64m4(base, new_vl, vl); +vint64m4_t test_vle64ff_v_i64m4(const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_i64m4(rs1, new_vl, vl); } -vint64m8_t test_vle64ff_v_i64m8(const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_i64m8(base, new_vl, vl); +vint64m8_t test_vle64ff_v_i64m8(const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_i64m8(rs1, new_vl, vl); } -vuint64m1_t test_vle64ff_v_u64m1(const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_u64m1(base, new_vl, vl); +vuint64m1_t test_vle64ff_v_u64m1(const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_u64m1(rs1, new_vl, vl); } -vuint64m2_t test_vle64ff_v_u64m2(const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_u64m2(base, new_vl, vl); +vuint64m2_t test_vle64ff_v_u64m2(const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_u64m2(rs1, new_vl, vl); } -vuint64m4_t test_vle64ff_v_u64m4(const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_u64m4(base, new_vl, vl); +vuint64m4_t test_vle64ff_v_u64m4(const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_u64m4(rs1, new_vl, vl); } -vuint64m8_t test_vle64ff_v_u64m8(const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_u64m8(base, new_vl, vl); +vuint64m8_t test_vle64ff_v_u64m8(const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_u64m8(rs1, new_vl, vl); } -vfloat64m1_t test_vle64ff_v_f64m1_m(vbool64_t mask, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_f64m1_m(mask, base, new_vl, vl); +vfloat64m1_t test_vle64ff_v_f64m1_m(vbool64_t vm, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_f64m1_m(vm, rs1, new_vl, vl); } -vfloat64m2_t test_vle64ff_v_f64m2_m(vbool32_t mask, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_f64m2_m(mask, base, new_vl, vl); +vfloat64m2_t test_vle64ff_v_f64m2_m(vbool32_t vm, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_f64m2_m(vm, rs1, new_vl, vl); } -vfloat64m4_t test_vle64ff_v_f64m4_m(vbool16_t mask, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_f64m4_m(mask, base, new_vl, vl); +vfloat64m4_t test_vle64ff_v_f64m4_m(vbool16_t vm, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_f64m4_m(vm, rs1, new_vl, vl); } -vfloat64m8_t test_vle64ff_v_f64m8_m(vbool8_t mask, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_f64m8_m(mask, base, new_vl, vl); +vfloat64m8_t test_vle64ff_v_f64m8_m(vbool8_t vm, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_f64m8_m(vm, rs1, new_vl, vl); } -vint64m1_t test_vle64ff_v_i64m1_m(vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_i64m1_m(mask, base, new_vl, vl); +vint64m1_t test_vle64ff_v_i64m1_m(vbool64_t vm, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_i64m1_m(vm, rs1, new_vl, vl); } -vint64m2_t test_vle64ff_v_i64m2_m(vbool32_t mask, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_i64m2_m(mask, base, new_vl, vl); +vint64m2_t test_vle64ff_v_i64m2_m(vbool32_t vm, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_i64m2_m(vm, rs1, new_vl, vl); } -vint64m4_t test_vle64ff_v_i64m4_m(vbool16_t mask, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_i64m4_m(mask, base, new_vl, vl); +vint64m4_t test_vle64ff_v_i64m4_m(vbool16_t vm, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_i64m4_m(vm, rs1, new_vl, vl); } -vint64m8_t test_vle64ff_v_i64m8_m(vbool8_t mask, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_i64m8_m(mask, base, new_vl, vl); +vint64m8_t test_vle64ff_v_i64m8_m(vbool8_t vm, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_i64m8_m(vm, rs1, new_vl, vl); } -vuint64m1_t test_vle64ff_v_u64m1_m(vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_u64m1_m(mask, base, new_vl, vl); +vuint64m1_t test_vle64ff_v_u64m1_m(vbool64_t vm, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_u64m1_m(vm, rs1, new_vl, vl); } -vuint64m2_t test_vle64ff_v_u64m2_m(vbool32_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_u64m2_m(mask, base, new_vl, vl); +vuint64m2_t test_vle64ff_v_u64m2_m(vbool32_t vm, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_u64m2_m(vm, rs1, new_vl, vl); } -vuint64m4_t test_vle64ff_v_u64m4_m(vbool16_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_u64m4_m(mask, base, new_vl, vl); +vuint64m4_t test_vle64ff_v_u64m4_m(vbool16_t vm, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_u64m4_m(vm, rs1, new_vl, vl); } -vuint64m8_t test_vle64ff_v_u64m8_m(vbool8_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_u64m8_m(mask, base, new_vl, vl); +vuint64m8_t test_vle64ff_v_u64m8_m(vbool8_t vm, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_u64m8_m(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vle64ff\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/gnu-api-tests/vle8.c b/auto-generated/gnu-api-tests/vle8.c index cf6cc13ec..4819e8d0f 100644 --- a/auto-generated/gnu-api-tests/vle8.c +++ b/auto-generated/gnu-api-tests/vle8.c @@ -6,116 +6,116 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vle8_v_i8mf8(const int8_t *base, size_t vl) { - return __riscv_vle8_v_i8mf8(base, vl); +vint8mf8_t test_vle8_v_i8mf8(const int8_t *rs1, size_t vl) { + return __riscv_vle8_v_i8mf8(rs1, vl); } -vint8mf4_t test_vle8_v_i8mf4(const int8_t *base, size_t vl) { - return __riscv_vle8_v_i8mf4(base, vl); +vint8mf4_t test_vle8_v_i8mf4(const int8_t *rs1, size_t vl) { + return __riscv_vle8_v_i8mf4(rs1, vl); } -vint8mf2_t test_vle8_v_i8mf2(const int8_t *base, size_t vl) { - return __riscv_vle8_v_i8mf2(base, vl); +vint8mf2_t test_vle8_v_i8mf2(const int8_t *rs1, size_t vl) { + return __riscv_vle8_v_i8mf2(rs1, vl); } -vint8m1_t test_vle8_v_i8m1(const int8_t *base, size_t vl) { - return __riscv_vle8_v_i8m1(base, vl); +vint8m1_t test_vle8_v_i8m1(const int8_t *rs1, size_t vl) { + return __riscv_vle8_v_i8m1(rs1, vl); } -vint8m2_t test_vle8_v_i8m2(const int8_t *base, size_t vl) { - return __riscv_vle8_v_i8m2(base, vl); +vint8m2_t test_vle8_v_i8m2(const int8_t *rs1, size_t vl) { + return __riscv_vle8_v_i8m2(rs1, vl); } -vint8m4_t test_vle8_v_i8m4(const int8_t *base, size_t vl) { - return __riscv_vle8_v_i8m4(base, vl); +vint8m4_t test_vle8_v_i8m4(const int8_t *rs1, size_t vl) { + return __riscv_vle8_v_i8m4(rs1, vl); } -vint8m8_t test_vle8_v_i8m8(const int8_t *base, size_t vl) { - return __riscv_vle8_v_i8m8(base, vl); +vint8m8_t test_vle8_v_i8m8(const int8_t *rs1, size_t vl) { + return __riscv_vle8_v_i8m8(rs1, vl); } -vuint8mf8_t test_vle8_v_u8mf8(const uint8_t *base, size_t vl) { - return __riscv_vle8_v_u8mf8(base, vl); +vuint8mf8_t test_vle8_v_u8mf8(const uint8_t *rs1, size_t vl) { + return __riscv_vle8_v_u8mf8(rs1, vl); } -vuint8mf4_t test_vle8_v_u8mf4(const uint8_t *base, size_t vl) { - return __riscv_vle8_v_u8mf4(base, vl); +vuint8mf4_t test_vle8_v_u8mf4(const uint8_t *rs1, size_t vl) { + return __riscv_vle8_v_u8mf4(rs1, vl); } -vuint8mf2_t test_vle8_v_u8mf2(const uint8_t *base, size_t vl) { - return __riscv_vle8_v_u8mf2(base, vl); +vuint8mf2_t test_vle8_v_u8mf2(const uint8_t *rs1, size_t vl) { + return __riscv_vle8_v_u8mf2(rs1, vl); } -vuint8m1_t test_vle8_v_u8m1(const uint8_t *base, size_t vl) { - return __riscv_vle8_v_u8m1(base, vl); +vuint8m1_t test_vle8_v_u8m1(const uint8_t *rs1, size_t vl) { + return __riscv_vle8_v_u8m1(rs1, vl); } -vuint8m2_t test_vle8_v_u8m2(const uint8_t *base, size_t vl) { - return __riscv_vle8_v_u8m2(base, vl); +vuint8m2_t test_vle8_v_u8m2(const uint8_t *rs1, size_t vl) { + return __riscv_vle8_v_u8m2(rs1, vl); } -vuint8m4_t test_vle8_v_u8m4(const uint8_t *base, size_t vl) { - return __riscv_vle8_v_u8m4(base, vl); +vuint8m4_t test_vle8_v_u8m4(const uint8_t *rs1, size_t vl) { + return __riscv_vle8_v_u8m4(rs1, vl); } -vuint8m8_t test_vle8_v_u8m8(const uint8_t *base, size_t vl) { - return __riscv_vle8_v_u8m8(base, vl); +vuint8m8_t test_vle8_v_u8m8(const uint8_t *rs1, size_t vl) { + return __riscv_vle8_v_u8m8(rs1, vl); } -vint8mf8_t test_vle8_v_i8mf8_m(vbool64_t mask, const int8_t *base, size_t vl) { - return __riscv_vle8_v_i8mf8_m(mask, base, vl); +vint8mf8_t test_vle8_v_i8mf8_m(vbool64_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vle8_v_i8mf8_m(vm, rs1, vl); } -vint8mf4_t test_vle8_v_i8mf4_m(vbool32_t mask, const int8_t *base, size_t vl) { - return __riscv_vle8_v_i8mf4_m(mask, base, vl); +vint8mf4_t test_vle8_v_i8mf4_m(vbool32_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vle8_v_i8mf4_m(vm, rs1, vl); } -vint8mf2_t test_vle8_v_i8mf2_m(vbool16_t mask, const int8_t *base, size_t vl) { - return __riscv_vle8_v_i8mf2_m(mask, base, vl); +vint8mf2_t test_vle8_v_i8mf2_m(vbool16_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vle8_v_i8mf2_m(vm, rs1, vl); } -vint8m1_t test_vle8_v_i8m1_m(vbool8_t mask, const int8_t *base, size_t vl) { - return __riscv_vle8_v_i8m1_m(mask, base, vl); +vint8m1_t test_vle8_v_i8m1_m(vbool8_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vle8_v_i8m1_m(vm, rs1, vl); } -vint8m2_t test_vle8_v_i8m2_m(vbool4_t mask, const int8_t *base, size_t vl) { - return __riscv_vle8_v_i8m2_m(mask, base, vl); +vint8m2_t test_vle8_v_i8m2_m(vbool4_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vle8_v_i8m2_m(vm, rs1, vl); } -vint8m4_t test_vle8_v_i8m4_m(vbool2_t mask, const int8_t *base, size_t vl) { - return __riscv_vle8_v_i8m4_m(mask, base, vl); +vint8m4_t test_vle8_v_i8m4_m(vbool2_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vle8_v_i8m4_m(vm, rs1, vl); } -vint8m8_t test_vle8_v_i8m8_m(vbool1_t mask, const int8_t *base, size_t vl) { - return __riscv_vle8_v_i8m8_m(mask, base, vl); +vint8m8_t test_vle8_v_i8m8_m(vbool1_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vle8_v_i8m8_m(vm, rs1, vl); } -vuint8mf8_t test_vle8_v_u8mf8_m(vbool64_t mask, const uint8_t *base, size_t vl) { - return __riscv_vle8_v_u8mf8_m(mask, base, vl); +vuint8mf8_t test_vle8_v_u8mf8_m(vbool64_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_v_u8mf8_m(vm, rs1, vl); } -vuint8mf4_t test_vle8_v_u8mf4_m(vbool32_t mask, const uint8_t *base, size_t vl) { - return __riscv_vle8_v_u8mf4_m(mask, base, vl); +vuint8mf4_t test_vle8_v_u8mf4_m(vbool32_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_v_u8mf4_m(vm, rs1, vl); } -vuint8mf2_t test_vle8_v_u8mf2_m(vbool16_t mask, const uint8_t *base, size_t vl) { - return __riscv_vle8_v_u8mf2_m(mask, base, vl); +vuint8mf2_t test_vle8_v_u8mf2_m(vbool16_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_v_u8mf2_m(vm, rs1, vl); } -vuint8m1_t test_vle8_v_u8m1_m(vbool8_t mask, const uint8_t *base, size_t vl) { - return __riscv_vle8_v_u8m1_m(mask, base, vl); +vuint8m1_t test_vle8_v_u8m1_m(vbool8_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_v_u8m1_m(vm, rs1, vl); } -vuint8m2_t test_vle8_v_u8m2_m(vbool4_t mask, const uint8_t *base, size_t vl) { - return __riscv_vle8_v_u8m2_m(mask, base, vl); +vuint8m2_t test_vle8_v_u8m2_m(vbool4_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_v_u8m2_m(vm, rs1, vl); } -vuint8m4_t test_vle8_v_u8m4_m(vbool2_t mask, const uint8_t *base, size_t vl) { - return __riscv_vle8_v_u8m4_m(mask, base, vl); +vuint8m4_t test_vle8_v_u8m4_m(vbool2_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_v_u8m4_m(vm, rs1, vl); } -vuint8m8_t test_vle8_v_u8m8_m(vbool1_t mask, const uint8_t *base, size_t vl) { - return __riscv_vle8_v_u8m8_m(mask, base, vl); +vuint8m8_t test_vle8_v_u8m8_m(vbool1_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_v_u8m8_m(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vle8\.[ivxfswum.]+\s+} 28 } } */ diff --git a/auto-generated/gnu-api-tests/vle8ff.c b/auto-generated/gnu-api-tests/vle8ff.c index 102167c73..44b151ec7 100644 --- a/auto-generated/gnu-api-tests/vle8ff.c +++ b/auto-generated/gnu-api-tests/vle8ff.c @@ -6,116 +6,116 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vle8ff_v_i8mf8(const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_i8mf8(base, new_vl, vl); +vint8mf8_t test_vle8ff_v_i8mf8(const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_i8mf8(rs1, new_vl, vl); } -vint8mf4_t test_vle8ff_v_i8mf4(const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_i8mf4(base, new_vl, vl); +vint8mf4_t test_vle8ff_v_i8mf4(const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_i8mf4(rs1, new_vl, vl); } -vint8mf2_t test_vle8ff_v_i8mf2(const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_i8mf2(base, new_vl, vl); +vint8mf2_t test_vle8ff_v_i8mf2(const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_i8mf2(rs1, new_vl, vl); } -vint8m1_t test_vle8ff_v_i8m1(const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_i8m1(base, new_vl, vl); +vint8m1_t test_vle8ff_v_i8m1(const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_i8m1(rs1, new_vl, vl); } -vint8m2_t test_vle8ff_v_i8m2(const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_i8m2(base, new_vl, vl); +vint8m2_t test_vle8ff_v_i8m2(const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_i8m2(rs1, new_vl, vl); } -vint8m4_t test_vle8ff_v_i8m4(const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_i8m4(base, new_vl, vl); +vint8m4_t test_vle8ff_v_i8m4(const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_i8m4(rs1, new_vl, vl); } -vint8m8_t test_vle8ff_v_i8m8(const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_i8m8(base, new_vl, vl); +vint8m8_t test_vle8ff_v_i8m8(const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_i8m8(rs1, new_vl, vl); } -vuint8mf8_t test_vle8ff_v_u8mf8(const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_u8mf8(base, new_vl, vl); +vuint8mf8_t test_vle8ff_v_u8mf8(const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_u8mf8(rs1, new_vl, vl); } -vuint8mf4_t test_vle8ff_v_u8mf4(const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_u8mf4(base, new_vl, vl); +vuint8mf4_t test_vle8ff_v_u8mf4(const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_u8mf4(rs1, new_vl, vl); } -vuint8mf2_t test_vle8ff_v_u8mf2(const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_u8mf2(base, new_vl, vl); +vuint8mf2_t test_vle8ff_v_u8mf2(const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_u8mf2(rs1, new_vl, vl); } -vuint8m1_t test_vle8ff_v_u8m1(const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_u8m1(base, new_vl, vl); +vuint8m1_t test_vle8ff_v_u8m1(const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_u8m1(rs1, new_vl, vl); } -vuint8m2_t test_vle8ff_v_u8m2(const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_u8m2(base, new_vl, vl); +vuint8m2_t test_vle8ff_v_u8m2(const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_u8m2(rs1, new_vl, vl); } -vuint8m4_t test_vle8ff_v_u8m4(const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_u8m4(base, new_vl, vl); +vuint8m4_t test_vle8ff_v_u8m4(const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_u8m4(rs1, new_vl, vl); } -vuint8m8_t test_vle8ff_v_u8m8(const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_u8m8(base, new_vl, vl); +vuint8m8_t test_vle8ff_v_u8m8(const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_u8m8(rs1, new_vl, vl); } -vint8mf8_t test_vle8ff_v_i8mf8_m(vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_i8mf8_m(mask, base, new_vl, vl); +vint8mf8_t test_vle8ff_v_i8mf8_m(vbool64_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_i8mf8_m(vm, rs1, new_vl, vl); } -vint8mf4_t test_vle8ff_v_i8mf4_m(vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_i8mf4_m(mask, base, new_vl, vl); +vint8mf4_t test_vle8ff_v_i8mf4_m(vbool32_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_i8mf4_m(vm, rs1, new_vl, vl); } -vint8mf2_t test_vle8ff_v_i8mf2_m(vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_i8mf2_m(mask, base, new_vl, vl); +vint8mf2_t test_vle8ff_v_i8mf2_m(vbool16_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_i8mf2_m(vm, rs1, new_vl, vl); } -vint8m1_t test_vle8ff_v_i8m1_m(vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_i8m1_m(mask, base, new_vl, vl); +vint8m1_t test_vle8ff_v_i8m1_m(vbool8_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_i8m1_m(vm, rs1, new_vl, vl); } -vint8m2_t test_vle8ff_v_i8m2_m(vbool4_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_i8m2_m(mask, base, new_vl, vl); +vint8m2_t test_vle8ff_v_i8m2_m(vbool4_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_i8m2_m(vm, rs1, new_vl, vl); } -vint8m4_t test_vle8ff_v_i8m4_m(vbool2_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_i8m4_m(mask, base, new_vl, vl); +vint8m4_t test_vle8ff_v_i8m4_m(vbool2_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_i8m4_m(vm, rs1, new_vl, vl); } -vint8m8_t test_vle8ff_v_i8m8_m(vbool1_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_i8m8_m(mask, base, new_vl, vl); +vint8m8_t test_vle8ff_v_i8m8_m(vbool1_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_i8m8_m(vm, rs1, new_vl, vl); } -vuint8mf8_t test_vle8ff_v_u8mf8_m(vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_u8mf8_m(mask, base, new_vl, vl); +vuint8mf8_t test_vle8ff_v_u8mf8_m(vbool64_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_u8mf8_m(vm, rs1, new_vl, vl); } -vuint8mf4_t test_vle8ff_v_u8mf4_m(vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_u8mf4_m(mask, base, new_vl, vl); +vuint8mf4_t test_vle8ff_v_u8mf4_m(vbool32_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_u8mf4_m(vm, rs1, new_vl, vl); } -vuint8mf2_t test_vle8ff_v_u8mf2_m(vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_u8mf2_m(mask, base, new_vl, vl); +vuint8mf2_t test_vle8ff_v_u8mf2_m(vbool16_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_u8mf2_m(vm, rs1, new_vl, vl); } -vuint8m1_t test_vle8ff_v_u8m1_m(vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_u8m1_m(mask, base, new_vl, vl); +vuint8m1_t test_vle8ff_v_u8m1_m(vbool8_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_u8m1_m(vm, rs1, new_vl, vl); } -vuint8m2_t test_vle8ff_v_u8m2_m(vbool4_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_u8m2_m(mask, base, new_vl, vl); +vuint8m2_t test_vle8ff_v_u8m2_m(vbool4_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_u8m2_m(vm, rs1, new_vl, vl); } -vuint8m4_t test_vle8ff_v_u8m4_m(vbool2_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_u8m4_m(mask, base, new_vl, vl); +vuint8m4_t test_vle8ff_v_u8m4_m(vbool2_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_u8m4_m(vm, rs1, new_vl, vl); } -vuint8m8_t test_vle8ff_v_u8m8_m(vbool1_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_u8m8_m(mask, base, new_vl, vl); +vuint8m8_t test_vle8ff_v_u8m8_m(vbool1_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_u8m8_m(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vle8ff\.[ivxfswum.]+\s+} 28 } } */ diff --git a/auto-generated/gnu-api-tests/vlm.c b/auto-generated/gnu-api-tests/vlm.c index f3151ad9c..3cf01d59c 100644 --- a/auto-generated/gnu-api-tests/vlm.c +++ b/auto-generated/gnu-api-tests/vlm.c @@ -6,32 +6,32 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool1_t test_vlm_v_b1(const uint8_t *base, size_t vl) { - return __riscv_vlm_v_b1(base, vl); +vbool1_t test_vlm_v_b1(const uint8_t *rs1, size_t vl) { + return __riscv_vlm_v_b1(rs1, vl); } -vbool2_t test_vlm_v_b2(const uint8_t *base, size_t vl) { - return __riscv_vlm_v_b2(base, vl); +vbool2_t test_vlm_v_b2(const uint8_t *rs1, size_t vl) { + return __riscv_vlm_v_b2(rs1, vl); } -vbool4_t test_vlm_v_b4(const uint8_t *base, size_t vl) { - return __riscv_vlm_v_b4(base, vl); +vbool4_t test_vlm_v_b4(const uint8_t *rs1, size_t vl) { + return __riscv_vlm_v_b4(rs1, vl); } -vbool8_t test_vlm_v_b8(const uint8_t *base, size_t vl) { - return __riscv_vlm_v_b8(base, vl); +vbool8_t test_vlm_v_b8(const uint8_t *rs1, size_t vl) { + return __riscv_vlm_v_b8(rs1, vl); } -vbool16_t test_vlm_v_b16(const uint8_t *base, size_t vl) { - return __riscv_vlm_v_b16(base, vl); +vbool16_t test_vlm_v_b16(const uint8_t *rs1, size_t vl) { + return __riscv_vlm_v_b16(rs1, vl); } -vbool32_t test_vlm_v_b32(const uint8_t *base, size_t vl) { - return __riscv_vlm_v_b32(base, vl); +vbool32_t test_vlm_v_b32(const uint8_t *rs1, size_t vl) { + return __riscv_vlm_v_b32(rs1, vl); } -vbool64_t test_vlm_v_b64(const uint8_t *base, size_t vl) { - return __riscv_vlm_v_b64(base, vl); +vbool64_t test_vlm_v_b64(const uint8_t *rs1, size_t vl) { + return __riscv_vlm_v_b64(rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlm\.[ivxfswum.]+\s+} 7 } } */ diff --git a/auto-generated/gnu-api-tests/vlmul_ext_v.c b/auto-generated/gnu-api-tests/vlmul_ext_v.c index 57f31b830..fbf91b340 100644 --- a/auto-generated/gnu-api-tests/vlmul_ext_v.c +++ b/auto-generated/gnu-api-tests/vlmul_ext_v.c @@ -6,544 +6,544 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf2_t test_vlmul_ext_v_f16mf4_f16mf2(vfloat16mf4_t op1) { - return __riscv_vlmul_ext_v_f16mf4_f16mf2(op1); +vfloat16mf2_t test_vlmul_ext_v_f16mf4_f16mf2(vfloat16mf4_t value) { + return __riscv_vlmul_ext_v_f16mf4_f16mf2(value); } -vfloat16m1_t test_vlmul_ext_v_f16mf4_f16m1(vfloat16mf4_t op1) { - return __riscv_vlmul_ext_v_f16mf4_f16m1(op1); +vfloat16m1_t test_vlmul_ext_v_f16mf4_f16m1(vfloat16mf4_t value) { + return __riscv_vlmul_ext_v_f16mf4_f16m1(value); } -vfloat16m2_t test_vlmul_ext_v_f16mf4_f16m2(vfloat16mf4_t op1) { - return __riscv_vlmul_ext_v_f16mf4_f16m2(op1); +vfloat16m2_t test_vlmul_ext_v_f16mf4_f16m2(vfloat16mf4_t value) { + return __riscv_vlmul_ext_v_f16mf4_f16m2(value); } -vfloat16m4_t test_vlmul_ext_v_f16mf4_f16m4(vfloat16mf4_t op1) { - return __riscv_vlmul_ext_v_f16mf4_f16m4(op1); +vfloat16m4_t test_vlmul_ext_v_f16mf4_f16m4(vfloat16mf4_t value) { + return __riscv_vlmul_ext_v_f16mf4_f16m4(value); } -vfloat16m8_t test_vlmul_ext_v_f16mf4_f16m8(vfloat16mf4_t op1) { - return __riscv_vlmul_ext_v_f16mf4_f16m8(op1); +vfloat16m8_t test_vlmul_ext_v_f16mf4_f16m8(vfloat16mf4_t value) { + return __riscv_vlmul_ext_v_f16mf4_f16m8(value); } -vfloat16m1_t test_vlmul_ext_v_f16mf2_f16m1(vfloat16mf2_t op1) { - return __riscv_vlmul_ext_v_f16mf2_f16m1(op1); +vfloat16m1_t test_vlmul_ext_v_f16mf2_f16m1(vfloat16mf2_t value) { + return __riscv_vlmul_ext_v_f16mf2_f16m1(value); } -vfloat16m2_t test_vlmul_ext_v_f16mf2_f16m2(vfloat16mf2_t op1) { - return __riscv_vlmul_ext_v_f16mf2_f16m2(op1); +vfloat16m2_t test_vlmul_ext_v_f16mf2_f16m2(vfloat16mf2_t value) { + return __riscv_vlmul_ext_v_f16mf2_f16m2(value); } -vfloat16m4_t test_vlmul_ext_v_f16mf2_f16m4(vfloat16mf2_t op1) { - return __riscv_vlmul_ext_v_f16mf2_f16m4(op1); +vfloat16m4_t test_vlmul_ext_v_f16mf2_f16m4(vfloat16mf2_t value) { + return __riscv_vlmul_ext_v_f16mf2_f16m4(value); } -vfloat16m8_t test_vlmul_ext_v_f16mf2_f16m8(vfloat16mf2_t op1) { - return __riscv_vlmul_ext_v_f16mf2_f16m8(op1); +vfloat16m8_t test_vlmul_ext_v_f16mf2_f16m8(vfloat16mf2_t value) { + return __riscv_vlmul_ext_v_f16mf2_f16m8(value); } -vfloat16m2_t test_vlmul_ext_v_f16m1_f16m2(vfloat16m1_t op1) { - return __riscv_vlmul_ext_v_f16m1_f16m2(op1); +vfloat16m2_t test_vlmul_ext_v_f16m1_f16m2(vfloat16m1_t value) { + return __riscv_vlmul_ext_v_f16m1_f16m2(value); } -vfloat16m4_t test_vlmul_ext_v_f16m1_f16m4(vfloat16m1_t op1) { - return __riscv_vlmul_ext_v_f16m1_f16m4(op1); +vfloat16m4_t test_vlmul_ext_v_f16m1_f16m4(vfloat16m1_t value) { + return __riscv_vlmul_ext_v_f16m1_f16m4(value); } -vfloat16m8_t test_vlmul_ext_v_f16m1_f16m8(vfloat16m1_t op1) { - return __riscv_vlmul_ext_v_f16m1_f16m8(op1); +vfloat16m8_t test_vlmul_ext_v_f16m1_f16m8(vfloat16m1_t value) { + return __riscv_vlmul_ext_v_f16m1_f16m8(value); } -vfloat16m4_t test_vlmul_ext_v_f16m2_f16m4(vfloat16m2_t op1) { - return __riscv_vlmul_ext_v_f16m2_f16m4(op1); +vfloat16m4_t test_vlmul_ext_v_f16m2_f16m4(vfloat16m2_t value) { + return __riscv_vlmul_ext_v_f16m2_f16m4(value); } -vfloat16m8_t test_vlmul_ext_v_f16m2_f16m8(vfloat16m2_t op1) { - return __riscv_vlmul_ext_v_f16m2_f16m8(op1); +vfloat16m8_t test_vlmul_ext_v_f16m2_f16m8(vfloat16m2_t value) { + return __riscv_vlmul_ext_v_f16m2_f16m8(value); } -vfloat16m8_t test_vlmul_ext_v_f16m4_f16m8(vfloat16m4_t op1) { - return __riscv_vlmul_ext_v_f16m4_f16m8(op1); +vfloat16m8_t test_vlmul_ext_v_f16m4_f16m8(vfloat16m4_t value) { + return __riscv_vlmul_ext_v_f16m4_f16m8(value); } -vfloat32m1_t test_vlmul_ext_v_f32mf2_f32m1(vfloat32mf2_t op1) { - return __riscv_vlmul_ext_v_f32mf2_f32m1(op1); +vfloat32m1_t test_vlmul_ext_v_f32mf2_f32m1(vfloat32mf2_t value) { + return __riscv_vlmul_ext_v_f32mf2_f32m1(value); } -vfloat32m2_t test_vlmul_ext_v_f32mf2_f32m2(vfloat32mf2_t op1) { - return __riscv_vlmul_ext_v_f32mf2_f32m2(op1); +vfloat32m2_t test_vlmul_ext_v_f32mf2_f32m2(vfloat32mf2_t value) { + return __riscv_vlmul_ext_v_f32mf2_f32m2(value); } -vfloat32m4_t test_vlmul_ext_v_f32mf2_f32m4(vfloat32mf2_t op1) { - return __riscv_vlmul_ext_v_f32mf2_f32m4(op1); +vfloat32m4_t test_vlmul_ext_v_f32mf2_f32m4(vfloat32mf2_t value) { + return __riscv_vlmul_ext_v_f32mf2_f32m4(value); } -vfloat32m8_t test_vlmul_ext_v_f32mf2_f32m8(vfloat32mf2_t op1) { - return __riscv_vlmul_ext_v_f32mf2_f32m8(op1); +vfloat32m8_t test_vlmul_ext_v_f32mf2_f32m8(vfloat32mf2_t value) { + return __riscv_vlmul_ext_v_f32mf2_f32m8(value); } -vfloat32m2_t test_vlmul_ext_v_f32m1_f32m2(vfloat32m1_t op1) { - return __riscv_vlmul_ext_v_f32m1_f32m2(op1); +vfloat32m2_t test_vlmul_ext_v_f32m1_f32m2(vfloat32m1_t value) { + return __riscv_vlmul_ext_v_f32m1_f32m2(value); } -vfloat32m4_t test_vlmul_ext_v_f32m1_f32m4(vfloat32m1_t op1) { - return __riscv_vlmul_ext_v_f32m1_f32m4(op1); +vfloat32m4_t test_vlmul_ext_v_f32m1_f32m4(vfloat32m1_t value) { + return __riscv_vlmul_ext_v_f32m1_f32m4(value); } -vfloat32m8_t test_vlmul_ext_v_f32m1_f32m8(vfloat32m1_t op1) { - return __riscv_vlmul_ext_v_f32m1_f32m8(op1); +vfloat32m8_t test_vlmul_ext_v_f32m1_f32m8(vfloat32m1_t value) { + return __riscv_vlmul_ext_v_f32m1_f32m8(value); } -vfloat32m4_t test_vlmul_ext_v_f32m2_f32m4(vfloat32m2_t op1) { - return __riscv_vlmul_ext_v_f32m2_f32m4(op1); +vfloat32m4_t test_vlmul_ext_v_f32m2_f32m4(vfloat32m2_t value) { + return __riscv_vlmul_ext_v_f32m2_f32m4(value); } -vfloat32m8_t test_vlmul_ext_v_f32m2_f32m8(vfloat32m2_t op1) { - return __riscv_vlmul_ext_v_f32m2_f32m8(op1); +vfloat32m8_t test_vlmul_ext_v_f32m2_f32m8(vfloat32m2_t value) { + return __riscv_vlmul_ext_v_f32m2_f32m8(value); } -vfloat32m8_t test_vlmul_ext_v_f32m4_f32m8(vfloat32m4_t op1) { - return __riscv_vlmul_ext_v_f32m4_f32m8(op1); +vfloat32m8_t test_vlmul_ext_v_f32m4_f32m8(vfloat32m4_t value) { + return __riscv_vlmul_ext_v_f32m4_f32m8(value); } -vfloat64m2_t test_vlmul_ext_v_f64m1_f64m2(vfloat64m1_t op1) { - return __riscv_vlmul_ext_v_f64m1_f64m2(op1); +vfloat64m2_t test_vlmul_ext_v_f64m1_f64m2(vfloat64m1_t value) { + return __riscv_vlmul_ext_v_f64m1_f64m2(value); } -vfloat64m4_t test_vlmul_ext_v_f64m1_f64m4(vfloat64m1_t op1) { - return __riscv_vlmul_ext_v_f64m1_f64m4(op1); +vfloat64m4_t test_vlmul_ext_v_f64m1_f64m4(vfloat64m1_t value) { + return __riscv_vlmul_ext_v_f64m1_f64m4(value); } -vfloat64m8_t test_vlmul_ext_v_f64m1_f64m8(vfloat64m1_t op1) { - return __riscv_vlmul_ext_v_f64m1_f64m8(op1); +vfloat64m8_t test_vlmul_ext_v_f64m1_f64m8(vfloat64m1_t value) { + return __riscv_vlmul_ext_v_f64m1_f64m8(value); } -vfloat64m4_t test_vlmul_ext_v_f64m2_f64m4(vfloat64m2_t op1) { - return __riscv_vlmul_ext_v_f64m2_f64m4(op1); +vfloat64m4_t test_vlmul_ext_v_f64m2_f64m4(vfloat64m2_t value) { + return __riscv_vlmul_ext_v_f64m2_f64m4(value); } -vfloat64m8_t test_vlmul_ext_v_f64m2_f64m8(vfloat64m2_t op1) { - return __riscv_vlmul_ext_v_f64m2_f64m8(op1); +vfloat64m8_t test_vlmul_ext_v_f64m2_f64m8(vfloat64m2_t value) { + return __riscv_vlmul_ext_v_f64m2_f64m8(value); } -vfloat64m8_t test_vlmul_ext_v_f64m4_f64m8(vfloat64m4_t op1) { - return __riscv_vlmul_ext_v_f64m4_f64m8(op1); +vfloat64m8_t test_vlmul_ext_v_f64m4_f64m8(vfloat64m4_t value) { + return __riscv_vlmul_ext_v_f64m4_f64m8(value); } -vint8mf4_t test_vlmul_ext_v_i8mf8_i8mf4(vint8mf8_t op1) { - return __riscv_vlmul_ext_v_i8mf8_i8mf4(op1); +vint8mf4_t test_vlmul_ext_v_i8mf8_i8mf4(vint8mf8_t value) { + return __riscv_vlmul_ext_v_i8mf8_i8mf4(value); } -vint8mf2_t test_vlmul_ext_v_i8mf8_i8mf2(vint8mf8_t op1) { - return __riscv_vlmul_ext_v_i8mf8_i8mf2(op1); +vint8mf2_t test_vlmul_ext_v_i8mf8_i8mf2(vint8mf8_t value) { + return __riscv_vlmul_ext_v_i8mf8_i8mf2(value); } -vint8m1_t test_vlmul_ext_v_i8mf8_i8m1(vint8mf8_t op1) { - return __riscv_vlmul_ext_v_i8mf8_i8m1(op1); +vint8m1_t test_vlmul_ext_v_i8mf8_i8m1(vint8mf8_t value) { + return __riscv_vlmul_ext_v_i8mf8_i8m1(value); } -vint8m2_t test_vlmul_ext_v_i8mf8_i8m2(vint8mf8_t op1) { - return __riscv_vlmul_ext_v_i8mf8_i8m2(op1); +vint8m2_t test_vlmul_ext_v_i8mf8_i8m2(vint8mf8_t value) { + return __riscv_vlmul_ext_v_i8mf8_i8m2(value); } -vint8m4_t test_vlmul_ext_v_i8mf8_i8m4(vint8mf8_t op1) { - return __riscv_vlmul_ext_v_i8mf8_i8m4(op1); +vint8m4_t test_vlmul_ext_v_i8mf8_i8m4(vint8mf8_t value) { + return __riscv_vlmul_ext_v_i8mf8_i8m4(value); } -vint8m8_t test_vlmul_ext_v_i8mf8_i8m8(vint8mf8_t op1) { - return __riscv_vlmul_ext_v_i8mf8_i8m8(op1); +vint8m8_t test_vlmul_ext_v_i8mf8_i8m8(vint8mf8_t value) { + return __riscv_vlmul_ext_v_i8mf8_i8m8(value); } -vint8mf2_t test_vlmul_ext_v_i8mf4_i8mf2(vint8mf4_t op1) { - return __riscv_vlmul_ext_v_i8mf4_i8mf2(op1); +vint8mf2_t test_vlmul_ext_v_i8mf4_i8mf2(vint8mf4_t value) { + return __riscv_vlmul_ext_v_i8mf4_i8mf2(value); } -vint8m1_t test_vlmul_ext_v_i8mf4_i8m1(vint8mf4_t op1) { - return __riscv_vlmul_ext_v_i8mf4_i8m1(op1); +vint8m1_t test_vlmul_ext_v_i8mf4_i8m1(vint8mf4_t value) { + return __riscv_vlmul_ext_v_i8mf4_i8m1(value); } -vint8m2_t test_vlmul_ext_v_i8mf4_i8m2(vint8mf4_t op1) { - return __riscv_vlmul_ext_v_i8mf4_i8m2(op1); +vint8m2_t test_vlmul_ext_v_i8mf4_i8m2(vint8mf4_t value) { + return __riscv_vlmul_ext_v_i8mf4_i8m2(value); } -vint8m4_t test_vlmul_ext_v_i8mf4_i8m4(vint8mf4_t op1) { - return __riscv_vlmul_ext_v_i8mf4_i8m4(op1); +vint8m4_t test_vlmul_ext_v_i8mf4_i8m4(vint8mf4_t value) { + return __riscv_vlmul_ext_v_i8mf4_i8m4(value); } -vint8m8_t test_vlmul_ext_v_i8mf4_i8m8(vint8mf4_t op1) { - return __riscv_vlmul_ext_v_i8mf4_i8m8(op1); +vint8m8_t test_vlmul_ext_v_i8mf4_i8m8(vint8mf4_t value) { + return __riscv_vlmul_ext_v_i8mf4_i8m8(value); } -vint8m1_t test_vlmul_ext_v_i8mf2_i8m1(vint8mf2_t op1) { - return __riscv_vlmul_ext_v_i8mf2_i8m1(op1); +vint8m1_t test_vlmul_ext_v_i8mf2_i8m1(vint8mf2_t value) { + return __riscv_vlmul_ext_v_i8mf2_i8m1(value); } -vint8m2_t test_vlmul_ext_v_i8mf2_i8m2(vint8mf2_t op1) { - return __riscv_vlmul_ext_v_i8mf2_i8m2(op1); +vint8m2_t test_vlmul_ext_v_i8mf2_i8m2(vint8mf2_t value) { + return __riscv_vlmul_ext_v_i8mf2_i8m2(value); } -vint8m4_t test_vlmul_ext_v_i8mf2_i8m4(vint8mf2_t op1) { - return __riscv_vlmul_ext_v_i8mf2_i8m4(op1); +vint8m4_t test_vlmul_ext_v_i8mf2_i8m4(vint8mf2_t value) { + return __riscv_vlmul_ext_v_i8mf2_i8m4(value); } -vint8m8_t test_vlmul_ext_v_i8mf2_i8m8(vint8mf2_t op1) { - return __riscv_vlmul_ext_v_i8mf2_i8m8(op1); +vint8m8_t test_vlmul_ext_v_i8mf2_i8m8(vint8mf2_t value) { + return __riscv_vlmul_ext_v_i8mf2_i8m8(value); } -vint8m2_t test_vlmul_ext_v_i8m1_i8m2(vint8m1_t op1) { - return __riscv_vlmul_ext_v_i8m1_i8m2(op1); +vint8m2_t test_vlmul_ext_v_i8m1_i8m2(vint8m1_t value) { + return __riscv_vlmul_ext_v_i8m1_i8m2(value); } -vint8m4_t test_vlmul_ext_v_i8m1_i8m4(vint8m1_t op1) { - return __riscv_vlmul_ext_v_i8m1_i8m4(op1); +vint8m4_t test_vlmul_ext_v_i8m1_i8m4(vint8m1_t value) { + return __riscv_vlmul_ext_v_i8m1_i8m4(value); } -vint8m8_t test_vlmul_ext_v_i8m1_i8m8(vint8m1_t op1) { - return __riscv_vlmul_ext_v_i8m1_i8m8(op1); +vint8m8_t test_vlmul_ext_v_i8m1_i8m8(vint8m1_t value) { + return __riscv_vlmul_ext_v_i8m1_i8m8(value); } -vint8m4_t test_vlmul_ext_v_i8m2_i8m4(vint8m2_t op1) { - return __riscv_vlmul_ext_v_i8m2_i8m4(op1); +vint8m4_t test_vlmul_ext_v_i8m2_i8m4(vint8m2_t value) { + return __riscv_vlmul_ext_v_i8m2_i8m4(value); } -vint8m8_t test_vlmul_ext_v_i8m2_i8m8(vint8m2_t op1) { - return __riscv_vlmul_ext_v_i8m2_i8m8(op1); +vint8m8_t test_vlmul_ext_v_i8m2_i8m8(vint8m2_t value) { + return __riscv_vlmul_ext_v_i8m2_i8m8(value); } -vint8m8_t test_vlmul_ext_v_i8m4_i8m8(vint8m4_t op1) { - return __riscv_vlmul_ext_v_i8m4_i8m8(op1); +vint8m8_t test_vlmul_ext_v_i8m4_i8m8(vint8m4_t value) { + return __riscv_vlmul_ext_v_i8m4_i8m8(value); } -vint16mf2_t test_vlmul_ext_v_i16mf4_i16mf2(vint16mf4_t op1) { - return __riscv_vlmul_ext_v_i16mf4_i16mf2(op1); +vint16mf2_t test_vlmul_ext_v_i16mf4_i16mf2(vint16mf4_t value) { + return __riscv_vlmul_ext_v_i16mf4_i16mf2(value); } -vint16m1_t test_vlmul_ext_v_i16mf4_i16m1(vint16mf4_t op1) { - return __riscv_vlmul_ext_v_i16mf4_i16m1(op1); +vint16m1_t test_vlmul_ext_v_i16mf4_i16m1(vint16mf4_t value) { + return __riscv_vlmul_ext_v_i16mf4_i16m1(value); } -vint16m2_t test_vlmul_ext_v_i16mf4_i16m2(vint16mf4_t op1) { - return __riscv_vlmul_ext_v_i16mf4_i16m2(op1); +vint16m2_t test_vlmul_ext_v_i16mf4_i16m2(vint16mf4_t value) { + return __riscv_vlmul_ext_v_i16mf4_i16m2(value); } -vint16m4_t test_vlmul_ext_v_i16mf4_i16m4(vint16mf4_t op1) { - return __riscv_vlmul_ext_v_i16mf4_i16m4(op1); +vint16m4_t test_vlmul_ext_v_i16mf4_i16m4(vint16mf4_t value) { + return __riscv_vlmul_ext_v_i16mf4_i16m4(value); } -vint16m8_t test_vlmul_ext_v_i16mf4_i16m8(vint16mf4_t op1) { - return __riscv_vlmul_ext_v_i16mf4_i16m8(op1); +vint16m8_t test_vlmul_ext_v_i16mf4_i16m8(vint16mf4_t value) { + return __riscv_vlmul_ext_v_i16mf4_i16m8(value); } -vint16m1_t test_vlmul_ext_v_i16mf2_i16m1(vint16mf2_t op1) { - return __riscv_vlmul_ext_v_i16mf2_i16m1(op1); +vint16m1_t test_vlmul_ext_v_i16mf2_i16m1(vint16mf2_t value) { + return __riscv_vlmul_ext_v_i16mf2_i16m1(value); } -vint16m2_t test_vlmul_ext_v_i16mf2_i16m2(vint16mf2_t op1) { - return __riscv_vlmul_ext_v_i16mf2_i16m2(op1); +vint16m2_t test_vlmul_ext_v_i16mf2_i16m2(vint16mf2_t value) { + return __riscv_vlmul_ext_v_i16mf2_i16m2(value); } -vint16m4_t test_vlmul_ext_v_i16mf2_i16m4(vint16mf2_t op1) { - return __riscv_vlmul_ext_v_i16mf2_i16m4(op1); +vint16m4_t test_vlmul_ext_v_i16mf2_i16m4(vint16mf2_t value) { + return __riscv_vlmul_ext_v_i16mf2_i16m4(value); } -vint16m8_t test_vlmul_ext_v_i16mf2_i16m8(vint16mf2_t op1) { - return __riscv_vlmul_ext_v_i16mf2_i16m8(op1); +vint16m8_t test_vlmul_ext_v_i16mf2_i16m8(vint16mf2_t value) { + return __riscv_vlmul_ext_v_i16mf2_i16m8(value); } -vint16m2_t test_vlmul_ext_v_i16m1_i16m2(vint16m1_t op1) { - return __riscv_vlmul_ext_v_i16m1_i16m2(op1); +vint16m2_t test_vlmul_ext_v_i16m1_i16m2(vint16m1_t value) { + return __riscv_vlmul_ext_v_i16m1_i16m2(value); } -vint16m4_t test_vlmul_ext_v_i16m1_i16m4(vint16m1_t op1) { - return __riscv_vlmul_ext_v_i16m1_i16m4(op1); +vint16m4_t test_vlmul_ext_v_i16m1_i16m4(vint16m1_t value) { + return __riscv_vlmul_ext_v_i16m1_i16m4(value); } -vint16m8_t test_vlmul_ext_v_i16m1_i16m8(vint16m1_t op1) { - return __riscv_vlmul_ext_v_i16m1_i16m8(op1); +vint16m8_t test_vlmul_ext_v_i16m1_i16m8(vint16m1_t value) { + return __riscv_vlmul_ext_v_i16m1_i16m8(value); } -vint16m4_t test_vlmul_ext_v_i16m2_i16m4(vint16m2_t op1) { - return __riscv_vlmul_ext_v_i16m2_i16m4(op1); +vint16m4_t test_vlmul_ext_v_i16m2_i16m4(vint16m2_t value) { + return __riscv_vlmul_ext_v_i16m2_i16m4(value); } -vint16m8_t test_vlmul_ext_v_i16m2_i16m8(vint16m2_t op1) { - return __riscv_vlmul_ext_v_i16m2_i16m8(op1); +vint16m8_t test_vlmul_ext_v_i16m2_i16m8(vint16m2_t value) { + return __riscv_vlmul_ext_v_i16m2_i16m8(value); } -vint16m8_t test_vlmul_ext_v_i16m4_i16m8(vint16m4_t op1) { - return __riscv_vlmul_ext_v_i16m4_i16m8(op1); +vint16m8_t test_vlmul_ext_v_i16m4_i16m8(vint16m4_t value) { + return __riscv_vlmul_ext_v_i16m4_i16m8(value); } -vint32m1_t test_vlmul_ext_v_i32mf2_i32m1(vint32mf2_t op1) { - return __riscv_vlmul_ext_v_i32mf2_i32m1(op1); +vint32m1_t test_vlmul_ext_v_i32mf2_i32m1(vint32mf2_t value) { + return __riscv_vlmul_ext_v_i32mf2_i32m1(value); } -vint32m2_t test_vlmul_ext_v_i32mf2_i32m2(vint32mf2_t op1) { - return __riscv_vlmul_ext_v_i32mf2_i32m2(op1); +vint32m2_t test_vlmul_ext_v_i32mf2_i32m2(vint32mf2_t value) { + return __riscv_vlmul_ext_v_i32mf2_i32m2(value); } -vint32m4_t test_vlmul_ext_v_i32mf2_i32m4(vint32mf2_t op1) { - return __riscv_vlmul_ext_v_i32mf2_i32m4(op1); +vint32m4_t test_vlmul_ext_v_i32mf2_i32m4(vint32mf2_t value) { + return __riscv_vlmul_ext_v_i32mf2_i32m4(value); } -vint32m8_t test_vlmul_ext_v_i32mf2_i32m8(vint32mf2_t op1) { - return __riscv_vlmul_ext_v_i32mf2_i32m8(op1); +vint32m8_t test_vlmul_ext_v_i32mf2_i32m8(vint32mf2_t value) { + return __riscv_vlmul_ext_v_i32mf2_i32m8(value); } -vint32m2_t test_vlmul_ext_v_i32m1_i32m2(vint32m1_t op1) { - return __riscv_vlmul_ext_v_i32m1_i32m2(op1); +vint32m2_t test_vlmul_ext_v_i32m1_i32m2(vint32m1_t value) { + return __riscv_vlmul_ext_v_i32m1_i32m2(value); } -vint32m4_t test_vlmul_ext_v_i32m1_i32m4(vint32m1_t op1) { - return __riscv_vlmul_ext_v_i32m1_i32m4(op1); +vint32m4_t test_vlmul_ext_v_i32m1_i32m4(vint32m1_t value) { + return __riscv_vlmul_ext_v_i32m1_i32m4(value); } -vint32m8_t test_vlmul_ext_v_i32m1_i32m8(vint32m1_t op1) { - return __riscv_vlmul_ext_v_i32m1_i32m8(op1); +vint32m8_t test_vlmul_ext_v_i32m1_i32m8(vint32m1_t value) { + return __riscv_vlmul_ext_v_i32m1_i32m8(value); } -vint32m4_t test_vlmul_ext_v_i32m2_i32m4(vint32m2_t op1) { - return __riscv_vlmul_ext_v_i32m2_i32m4(op1); +vint32m4_t test_vlmul_ext_v_i32m2_i32m4(vint32m2_t value) { + return __riscv_vlmul_ext_v_i32m2_i32m4(value); } -vint32m8_t test_vlmul_ext_v_i32m2_i32m8(vint32m2_t op1) { - return __riscv_vlmul_ext_v_i32m2_i32m8(op1); +vint32m8_t test_vlmul_ext_v_i32m2_i32m8(vint32m2_t value) { + return __riscv_vlmul_ext_v_i32m2_i32m8(value); } -vint32m8_t test_vlmul_ext_v_i32m4_i32m8(vint32m4_t op1) { - return __riscv_vlmul_ext_v_i32m4_i32m8(op1); +vint32m8_t test_vlmul_ext_v_i32m4_i32m8(vint32m4_t value) { + return __riscv_vlmul_ext_v_i32m4_i32m8(value); } -vint64m2_t test_vlmul_ext_v_i64m1_i64m2(vint64m1_t op1) { - return __riscv_vlmul_ext_v_i64m1_i64m2(op1); +vint64m2_t test_vlmul_ext_v_i64m1_i64m2(vint64m1_t value) { + return __riscv_vlmul_ext_v_i64m1_i64m2(value); } -vint64m4_t test_vlmul_ext_v_i64m1_i64m4(vint64m1_t op1) { - return __riscv_vlmul_ext_v_i64m1_i64m4(op1); +vint64m4_t test_vlmul_ext_v_i64m1_i64m4(vint64m1_t value) { + return __riscv_vlmul_ext_v_i64m1_i64m4(value); } -vint64m8_t test_vlmul_ext_v_i64m1_i64m8(vint64m1_t op1) { - return __riscv_vlmul_ext_v_i64m1_i64m8(op1); +vint64m8_t test_vlmul_ext_v_i64m1_i64m8(vint64m1_t value) { + return __riscv_vlmul_ext_v_i64m1_i64m8(value); } -vint64m4_t test_vlmul_ext_v_i64m2_i64m4(vint64m2_t op1) { - return __riscv_vlmul_ext_v_i64m2_i64m4(op1); +vint64m4_t test_vlmul_ext_v_i64m2_i64m4(vint64m2_t value) { + return __riscv_vlmul_ext_v_i64m2_i64m4(value); } -vint64m8_t test_vlmul_ext_v_i64m2_i64m8(vint64m2_t op1) { - return __riscv_vlmul_ext_v_i64m2_i64m8(op1); +vint64m8_t test_vlmul_ext_v_i64m2_i64m8(vint64m2_t value) { + return __riscv_vlmul_ext_v_i64m2_i64m8(value); } -vint64m8_t test_vlmul_ext_v_i64m4_i64m8(vint64m4_t op1) { - return __riscv_vlmul_ext_v_i64m4_i64m8(op1); +vint64m8_t test_vlmul_ext_v_i64m4_i64m8(vint64m4_t value) { + return __riscv_vlmul_ext_v_i64m4_i64m8(value); } -vuint8mf4_t test_vlmul_ext_v_u8mf8_u8mf4(vuint8mf8_t op1) { - return __riscv_vlmul_ext_v_u8mf8_u8mf4(op1); +vuint8mf4_t test_vlmul_ext_v_u8mf8_u8mf4(vuint8mf8_t value) { + return __riscv_vlmul_ext_v_u8mf8_u8mf4(value); } -vuint8mf2_t test_vlmul_ext_v_u8mf8_u8mf2(vuint8mf8_t op1) { - return __riscv_vlmul_ext_v_u8mf8_u8mf2(op1); +vuint8mf2_t test_vlmul_ext_v_u8mf8_u8mf2(vuint8mf8_t value) { + return __riscv_vlmul_ext_v_u8mf8_u8mf2(value); } -vuint8m1_t test_vlmul_ext_v_u8mf8_u8m1(vuint8mf8_t op1) { - return __riscv_vlmul_ext_v_u8mf8_u8m1(op1); +vuint8m1_t test_vlmul_ext_v_u8mf8_u8m1(vuint8mf8_t value) { + return __riscv_vlmul_ext_v_u8mf8_u8m1(value); } -vuint8m2_t test_vlmul_ext_v_u8mf8_u8m2(vuint8mf8_t op1) { - return __riscv_vlmul_ext_v_u8mf8_u8m2(op1); +vuint8m2_t test_vlmul_ext_v_u8mf8_u8m2(vuint8mf8_t value) { + return __riscv_vlmul_ext_v_u8mf8_u8m2(value); } -vuint8m4_t test_vlmul_ext_v_u8mf8_u8m4(vuint8mf8_t op1) { - return __riscv_vlmul_ext_v_u8mf8_u8m4(op1); +vuint8m4_t test_vlmul_ext_v_u8mf8_u8m4(vuint8mf8_t value) { + return __riscv_vlmul_ext_v_u8mf8_u8m4(value); } -vuint8m8_t test_vlmul_ext_v_u8mf8_u8m8(vuint8mf8_t op1) { - return __riscv_vlmul_ext_v_u8mf8_u8m8(op1); +vuint8m8_t test_vlmul_ext_v_u8mf8_u8m8(vuint8mf8_t value) { + return __riscv_vlmul_ext_v_u8mf8_u8m8(value); } -vuint8mf2_t test_vlmul_ext_v_u8mf4_u8mf2(vuint8mf4_t op1) { - return __riscv_vlmul_ext_v_u8mf4_u8mf2(op1); +vuint8mf2_t test_vlmul_ext_v_u8mf4_u8mf2(vuint8mf4_t value) { + return __riscv_vlmul_ext_v_u8mf4_u8mf2(value); } -vuint8m1_t test_vlmul_ext_v_u8mf4_u8m1(vuint8mf4_t op1) { - return __riscv_vlmul_ext_v_u8mf4_u8m1(op1); +vuint8m1_t test_vlmul_ext_v_u8mf4_u8m1(vuint8mf4_t value) { + return __riscv_vlmul_ext_v_u8mf4_u8m1(value); } -vuint8m2_t test_vlmul_ext_v_u8mf4_u8m2(vuint8mf4_t op1) { - return __riscv_vlmul_ext_v_u8mf4_u8m2(op1); +vuint8m2_t test_vlmul_ext_v_u8mf4_u8m2(vuint8mf4_t value) { + return __riscv_vlmul_ext_v_u8mf4_u8m2(value); } -vuint8m4_t test_vlmul_ext_v_u8mf4_u8m4(vuint8mf4_t op1) { - return __riscv_vlmul_ext_v_u8mf4_u8m4(op1); +vuint8m4_t test_vlmul_ext_v_u8mf4_u8m4(vuint8mf4_t value) { + return __riscv_vlmul_ext_v_u8mf4_u8m4(value); } -vuint8m8_t test_vlmul_ext_v_u8mf4_u8m8(vuint8mf4_t op1) { - return __riscv_vlmul_ext_v_u8mf4_u8m8(op1); +vuint8m8_t test_vlmul_ext_v_u8mf4_u8m8(vuint8mf4_t value) { + return __riscv_vlmul_ext_v_u8mf4_u8m8(value); } -vuint8m1_t test_vlmul_ext_v_u8mf2_u8m1(vuint8mf2_t op1) { - return __riscv_vlmul_ext_v_u8mf2_u8m1(op1); +vuint8m1_t test_vlmul_ext_v_u8mf2_u8m1(vuint8mf2_t value) { + return __riscv_vlmul_ext_v_u8mf2_u8m1(value); } -vuint8m2_t test_vlmul_ext_v_u8mf2_u8m2(vuint8mf2_t op1) { - return __riscv_vlmul_ext_v_u8mf2_u8m2(op1); +vuint8m2_t test_vlmul_ext_v_u8mf2_u8m2(vuint8mf2_t value) { + return __riscv_vlmul_ext_v_u8mf2_u8m2(value); } -vuint8m4_t test_vlmul_ext_v_u8mf2_u8m4(vuint8mf2_t op1) { - return __riscv_vlmul_ext_v_u8mf2_u8m4(op1); +vuint8m4_t test_vlmul_ext_v_u8mf2_u8m4(vuint8mf2_t value) { + return __riscv_vlmul_ext_v_u8mf2_u8m4(value); } -vuint8m8_t test_vlmul_ext_v_u8mf2_u8m8(vuint8mf2_t op1) { - return __riscv_vlmul_ext_v_u8mf2_u8m8(op1); +vuint8m8_t test_vlmul_ext_v_u8mf2_u8m8(vuint8mf2_t value) { + return __riscv_vlmul_ext_v_u8mf2_u8m8(value); } -vuint8m2_t test_vlmul_ext_v_u8m1_u8m2(vuint8m1_t op1) { - return __riscv_vlmul_ext_v_u8m1_u8m2(op1); +vuint8m2_t test_vlmul_ext_v_u8m1_u8m2(vuint8m1_t value) { + return __riscv_vlmul_ext_v_u8m1_u8m2(value); } -vuint8m4_t test_vlmul_ext_v_u8m1_u8m4(vuint8m1_t op1) { - return __riscv_vlmul_ext_v_u8m1_u8m4(op1); +vuint8m4_t test_vlmul_ext_v_u8m1_u8m4(vuint8m1_t value) { + return __riscv_vlmul_ext_v_u8m1_u8m4(value); } -vuint8m8_t test_vlmul_ext_v_u8m1_u8m8(vuint8m1_t op1) { - return __riscv_vlmul_ext_v_u8m1_u8m8(op1); +vuint8m8_t test_vlmul_ext_v_u8m1_u8m8(vuint8m1_t value) { + return __riscv_vlmul_ext_v_u8m1_u8m8(value); } -vuint8m4_t test_vlmul_ext_v_u8m2_u8m4(vuint8m2_t op1) { - return __riscv_vlmul_ext_v_u8m2_u8m4(op1); +vuint8m4_t test_vlmul_ext_v_u8m2_u8m4(vuint8m2_t value) { + return __riscv_vlmul_ext_v_u8m2_u8m4(value); } -vuint8m8_t test_vlmul_ext_v_u8m2_u8m8(vuint8m2_t op1) { - return __riscv_vlmul_ext_v_u8m2_u8m8(op1); +vuint8m8_t test_vlmul_ext_v_u8m2_u8m8(vuint8m2_t value) { + return __riscv_vlmul_ext_v_u8m2_u8m8(value); } -vuint8m8_t test_vlmul_ext_v_u8m4_u8m8(vuint8m4_t op1) { - return __riscv_vlmul_ext_v_u8m4_u8m8(op1); +vuint8m8_t test_vlmul_ext_v_u8m4_u8m8(vuint8m4_t value) { + return __riscv_vlmul_ext_v_u8m4_u8m8(value); } -vuint16mf2_t test_vlmul_ext_v_u16mf4_u16mf2(vuint16mf4_t op1) { - return __riscv_vlmul_ext_v_u16mf4_u16mf2(op1); +vuint16mf2_t test_vlmul_ext_v_u16mf4_u16mf2(vuint16mf4_t value) { + return __riscv_vlmul_ext_v_u16mf4_u16mf2(value); } -vuint16m1_t test_vlmul_ext_v_u16mf4_u16m1(vuint16mf4_t op1) { - return __riscv_vlmul_ext_v_u16mf4_u16m1(op1); +vuint16m1_t test_vlmul_ext_v_u16mf4_u16m1(vuint16mf4_t value) { + return __riscv_vlmul_ext_v_u16mf4_u16m1(value); } -vuint16m2_t test_vlmul_ext_v_u16mf4_u16m2(vuint16mf4_t op1) { - return __riscv_vlmul_ext_v_u16mf4_u16m2(op1); +vuint16m2_t test_vlmul_ext_v_u16mf4_u16m2(vuint16mf4_t value) { + return __riscv_vlmul_ext_v_u16mf4_u16m2(value); } -vuint16m4_t test_vlmul_ext_v_u16mf4_u16m4(vuint16mf4_t op1) { - return __riscv_vlmul_ext_v_u16mf4_u16m4(op1); +vuint16m4_t test_vlmul_ext_v_u16mf4_u16m4(vuint16mf4_t value) { + return __riscv_vlmul_ext_v_u16mf4_u16m4(value); } -vuint16m8_t test_vlmul_ext_v_u16mf4_u16m8(vuint16mf4_t op1) { - return __riscv_vlmul_ext_v_u16mf4_u16m8(op1); +vuint16m8_t test_vlmul_ext_v_u16mf4_u16m8(vuint16mf4_t value) { + return __riscv_vlmul_ext_v_u16mf4_u16m8(value); } -vuint16m1_t test_vlmul_ext_v_u16mf2_u16m1(vuint16mf2_t op1) { - return __riscv_vlmul_ext_v_u16mf2_u16m1(op1); +vuint16m1_t test_vlmul_ext_v_u16mf2_u16m1(vuint16mf2_t value) { + return __riscv_vlmul_ext_v_u16mf2_u16m1(value); } -vuint16m2_t test_vlmul_ext_v_u16mf2_u16m2(vuint16mf2_t op1) { - return __riscv_vlmul_ext_v_u16mf2_u16m2(op1); +vuint16m2_t test_vlmul_ext_v_u16mf2_u16m2(vuint16mf2_t value) { + return __riscv_vlmul_ext_v_u16mf2_u16m2(value); } -vuint16m4_t test_vlmul_ext_v_u16mf2_u16m4(vuint16mf2_t op1) { - return __riscv_vlmul_ext_v_u16mf2_u16m4(op1); +vuint16m4_t test_vlmul_ext_v_u16mf2_u16m4(vuint16mf2_t value) { + return __riscv_vlmul_ext_v_u16mf2_u16m4(value); } -vuint16m8_t test_vlmul_ext_v_u16mf2_u16m8(vuint16mf2_t op1) { - return __riscv_vlmul_ext_v_u16mf2_u16m8(op1); +vuint16m8_t test_vlmul_ext_v_u16mf2_u16m8(vuint16mf2_t value) { + return __riscv_vlmul_ext_v_u16mf2_u16m8(value); } -vuint16m2_t test_vlmul_ext_v_u16m1_u16m2(vuint16m1_t op1) { - return __riscv_vlmul_ext_v_u16m1_u16m2(op1); +vuint16m2_t test_vlmul_ext_v_u16m1_u16m2(vuint16m1_t value) { + return __riscv_vlmul_ext_v_u16m1_u16m2(value); } -vuint16m4_t test_vlmul_ext_v_u16m1_u16m4(vuint16m1_t op1) { - return __riscv_vlmul_ext_v_u16m1_u16m4(op1); +vuint16m4_t test_vlmul_ext_v_u16m1_u16m4(vuint16m1_t value) { + return __riscv_vlmul_ext_v_u16m1_u16m4(value); } -vuint16m8_t test_vlmul_ext_v_u16m1_u16m8(vuint16m1_t op1) { - return __riscv_vlmul_ext_v_u16m1_u16m8(op1); +vuint16m8_t test_vlmul_ext_v_u16m1_u16m8(vuint16m1_t value) { + return __riscv_vlmul_ext_v_u16m1_u16m8(value); } -vuint16m4_t test_vlmul_ext_v_u16m2_u16m4(vuint16m2_t op1) { - return __riscv_vlmul_ext_v_u16m2_u16m4(op1); +vuint16m4_t test_vlmul_ext_v_u16m2_u16m4(vuint16m2_t value) { + return __riscv_vlmul_ext_v_u16m2_u16m4(value); } -vuint16m8_t test_vlmul_ext_v_u16m2_u16m8(vuint16m2_t op1) { - return __riscv_vlmul_ext_v_u16m2_u16m8(op1); +vuint16m8_t test_vlmul_ext_v_u16m2_u16m8(vuint16m2_t value) { + return __riscv_vlmul_ext_v_u16m2_u16m8(value); } -vuint16m8_t test_vlmul_ext_v_u16m4_u16m8(vuint16m4_t op1) { - return __riscv_vlmul_ext_v_u16m4_u16m8(op1); +vuint16m8_t test_vlmul_ext_v_u16m4_u16m8(vuint16m4_t value) { + return __riscv_vlmul_ext_v_u16m4_u16m8(value); } -vuint32m1_t test_vlmul_ext_v_u32mf2_u32m1(vuint32mf2_t op1) { - return __riscv_vlmul_ext_v_u32mf2_u32m1(op1); +vuint32m1_t test_vlmul_ext_v_u32mf2_u32m1(vuint32mf2_t value) { + return __riscv_vlmul_ext_v_u32mf2_u32m1(value); } -vuint32m2_t test_vlmul_ext_v_u32mf2_u32m2(vuint32mf2_t op1) { - return __riscv_vlmul_ext_v_u32mf2_u32m2(op1); +vuint32m2_t test_vlmul_ext_v_u32mf2_u32m2(vuint32mf2_t value) { + return __riscv_vlmul_ext_v_u32mf2_u32m2(value); } -vuint32m4_t test_vlmul_ext_v_u32mf2_u32m4(vuint32mf2_t op1) { - return __riscv_vlmul_ext_v_u32mf2_u32m4(op1); +vuint32m4_t test_vlmul_ext_v_u32mf2_u32m4(vuint32mf2_t value) { + return __riscv_vlmul_ext_v_u32mf2_u32m4(value); } -vuint32m8_t test_vlmul_ext_v_u32mf2_u32m8(vuint32mf2_t op1) { - return __riscv_vlmul_ext_v_u32mf2_u32m8(op1); +vuint32m8_t test_vlmul_ext_v_u32mf2_u32m8(vuint32mf2_t value) { + return __riscv_vlmul_ext_v_u32mf2_u32m8(value); } -vuint32m2_t test_vlmul_ext_v_u32m1_u32m2(vuint32m1_t op1) { - return __riscv_vlmul_ext_v_u32m1_u32m2(op1); +vuint32m2_t test_vlmul_ext_v_u32m1_u32m2(vuint32m1_t value) { + return __riscv_vlmul_ext_v_u32m1_u32m2(value); } -vuint32m4_t test_vlmul_ext_v_u32m1_u32m4(vuint32m1_t op1) { - return __riscv_vlmul_ext_v_u32m1_u32m4(op1); +vuint32m4_t test_vlmul_ext_v_u32m1_u32m4(vuint32m1_t value) { + return __riscv_vlmul_ext_v_u32m1_u32m4(value); } -vuint32m8_t test_vlmul_ext_v_u32m1_u32m8(vuint32m1_t op1) { - return __riscv_vlmul_ext_v_u32m1_u32m8(op1); +vuint32m8_t test_vlmul_ext_v_u32m1_u32m8(vuint32m1_t value) { + return __riscv_vlmul_ext_v_u32m1_u32m8(value); } -vuint32m4_t test_vlmul_ext_v_u32m2_u32m4(vuint32m2_t op1) { - return __riscv_vlmul_ext_v_u32m2_u32m4(op1); +vuint32m4_t test_vlmul_ext_v_u32m2_u32m4(vuint32m2_t value) { + return __riscv_vlmul_ext_v_u32m2_u32m4(value); } -vuint32m8_t test_vlmul_ext_v_u32m2_u32m8(vuint32m2_t op1) { - return __riscv_vlmul_ext_v_u32m2_u32m8(op1); +vuint32m8_t test_vlmul_ext_v_u32m2_u32m8(vuint32m2_t value) { + return __riscv_vlmul_ext_v_u32m2_u32m8(value); } -vuint32m8_t test_vlmul_ext_v_u32m4_u32m8(vuint32m4_t op1) { - return __riscv_vlmul_ext_v_u32m4_u32m8(op1); +vuint32m8_t test_vlmul_ext_v_u32m4_u32m8(vuint32m4_t value) { + return __riscv_vlmul_ext_v_u32m4_u32m8(value); } -vuint64m2_t test_vlmul_ext_v_u64m1_u64m2(vuint64m1_t op1) { - return __riscv_vlmul_ext_v_u64m1_u64m2(op1); +vuint64m2_t test_vlmul_ext_v_u64m1_u64m2(vuint64m1_t value) { + return __riscv_vlmul_ext_v_u64m1_u64m2(value); } -vuint64m4_t test_vlmul_ext_v_u64m1_u64m4(vuint64m1_t op1) { - return __riscv_vlmul_ext_v_u64m1_u64m4(op1); +vuint64m4_t test_vlmul_ext_v_u64m1_u64m4(vuint64m1_t value) { + return __riscv_vlmul_ext_v_u64m1_u64m4(value); } -vuint64m8_t test_vlmul_ext_v_u64m1_u64m8(vuint64m1_t op1) { - return __riscv_vlmul_ext_v_u64m1_u64m8(op1); +vuint64m8_t test_vlmul_ext_v_u64m1_u64m8(vuint64m1_t value) { + return __riscv_vlmul_ext_v_u64m1_u64m8(value); } -vuint64m4_t test_vlmul_ext_v_u64m2_u64m4(vuint64m2_t op1) { - return __riscv_vlmul_ext_v_u64m2_u64m4(op1); +vuint64m4_t test_vlmul_ext_v_u64m2_u64m4(vuint64m2_t value) { + return __riscv_vlmul_ext_v_u64m2_u64m4(value); } -vuint64m8_t test_vlmul_ext_v_u64m2_u64m8(vuint64m2_t op1) { - return __riscv_vlmul_ext_v_u64m2_u64m8(op1); +vuint64m8_t test_vlmul_ext_v_u64m2_u64m8(vuint64m2_t value) { + return __riscv_vlmul_ext_v_u64m2_u64m8(value); } -vuint64m8_t test_vlmul_ext_v_u64m4_u64m8(vuint64m4_t op1) { - return __riscv_vlmul_ext_v_u64m4_u64m8(op1); +vuint64m8_t test_vlmul_ext_v_u64m4_u64m8(vuint64m4_t value) { + return __riscv_vlmul_ext_v_u64m4_u64m8(value); } /* { dg-final { scan-assembler-times {vs[1248e][r123468]+\.[ivxfswum.]+\s+[,\sa-x0-9()]+} 135 } } */ diff --git a/auto-generated/gnu-api-tests/vlmul_trunc_v.c b/auto-generated/gnu-api-tests/vlmul_trunc_v.c index c8be2f11f..ace3dc0cb 100644 --- a/auto-generated/gnu-api-tests/vlmul_trunc_v.c +++ b/auto-generated/gnu-api-tests/vlmul_trunc_v.c @@ -6,544 +6,544 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vlmul_trunc_v_f16mf2_f16mf4(vfloat16mf2_t op1) { - return __riscv_vlmul_trunc_v_f16mf2_f16mf4(op1); +vfloat16mf4_t test_vlmul_trunc_v_f16mf2_f16mf4(vfloat16mf2_t value) { + return __riscv_vlmul_trunc_v_f16mf2_f16mf4(value); } -vfloat16mf4_t test_vlmul_trunc_v_f16m1_f16mf4(vfloat16m1_t op1) { - return __riscv_vlmul_trunc_v_f16m1_f16mf4(op1); +vfloat16mf4_t test_vlmul_trunc_v_f16m1_f16mf4(vfloat16m1_t value) { + return __riscv_vlmul_trunc_v_f16m1_f16mf4(value); } -vfloat16mf2_t test_vlmul_trunc_v_f16m1_f16mf2(vfloat16m1_t op1) { - return __riscv_vlmul_trunc_v_f16m1_f16mf2(op1); +vfloat16mf2_t test_vlmul_trunc_v_f16m1_f16mf2(vfloat16m1_t value) { + return __riscv_vlmul_trunc_v_f16m1_f16mf2(value); } -vfloat16mf4_t test_vlmul_trunc_v_f16m2_f16mf4(vfloat16m2_t op1) { - return __riscv_vlmul_trunc_v_f16m2_f16mf4(op1); +vfloat16mf4_t test_vlmul_trunc_v_f16m2_f16mf4(vfloat16m2_t value) { + return __riscv_vlmul_trunc_v_f16m2_f16mf4(value); } -vfloat16mf2_t test_vlmul_trunc_v_f16m2_f16mf2(vfloat16m2_t op1) { - return __riscv_vlmul_trunc_v_f16m2_f16mf2(op1); +vfloat16mf2_t test_vlmul_trunc_v_f16m2_f16mf2(vfloat16m2_t value) { + return __riscv_vlmul_trunc_v_f16m2_f16mf2(value); } -vfloat16m1_t test_vlmul_trunc_v_f16m2_f16m1(vfloat16m2_t op1) { - return __riscv_vlmul_trunc_v_f16m2_f16m1(op1); +vfloat16m1_t test_vlmul_trunc_v_f16m2_f16m1(vfloat16m2_t value) { + return __riscv_vlmul_trunc_v_f16m2_f16m1(value); } -vfloat16mf4_t test_vlmul_trunc_v_f16m4_f16mf4(vfloat16m4_t op1) { - return __riscv_vlmul_trunc_v_f16m4_f16mf4(op1); +vfloat16mf4_t test_vlmul_trunc_v_f16m4_f16mf4(vfloat16m4_t value) { + return __riscv_vlmul_trunc_v_f16m4_f16mf4(value); } -vfloat16mf2_t test_vlmul_trunc_v_f16m4_f16mf2(vfloat16m4_t op1) { - return __riscv_vlmul_trunc_v_f16m4_f16mf2(op1); +vfloat16mf2_t test_vlmul_trunc_v_f16m4_f16mf2(vfloat16m4_t value) { + return __riscv_vlmul_trunc_v_f16m4_f16mf2(value); } -vfloat16m1_t test_vlmul_trunc_v_f16m4_f16m1(vfloat16m4_t op1) { - return __riscv_vlmul_trunc_v_f16m4_f16m1(op1); +vfloat16m1_t test_vlmul_trunc_v_f16m4_f16m1(vfloat16m4_t value) { + return __riscv_vlmul_trunc_v_f16m4_f16m1(value); } -vfloat16m2_t test_vlmul_trunc_v_f16m4_f16m2(vfloat16m4_t op1) { - return __riscv_vlmul_trunc_v_f16m4_f16m2(op1); +vfloat16m2_t test_vlmul_trunc_v_f16m4_f16m2(vfloat16m4_t value) { + return __riscv_vlmul_trunc_v_f16m4_f16m2(value); } -vfloat16mf4_t test_vlmul_trunc_v_f16m8_f16mf4(vfloat16m8_t op1) { - return __riscv_vlmul_trunc_v_f16m8_f16mf4(op1); +vfloat16mf4_t test_vlmul_trunc_v_f16m8_f16mf4(vfloat16m8_t value) { + return __riscv_vlmul_trunc_v_f16m8_f16mf4(value); } -vfloat16mf2_t test_vlmul_trunc_v_f16m8_f16mf2(vfloat16m8_t op1) { - return __riscv_vlmul_trunc_v_f16m8_f16mf2(op1); +vfloat16mf2_t test_vlmul_trunc_v_f16m8_f16mf2(vfloat16m8_t value) { + return __riscv_vlmul_trunc_v_f16m8_f16mf2(value); } -vfloat16m1_t test_vlmul_trunc_v_f16m8_f16m1(vfloat16m8_t op1) { - return __riscv_vlmul_trunc_v_f16m8_f16m1(op1); +vfloat16m1_t test_vlmul_trunc_v_f16m8_f16m1(vfloat16m8_t value) { + return __riscv_vlmul_trunc_v_f16m8_f16m1(value); } -vfloat16m2_t test_vlmul_trunc_v_f16m8_f16m2(vfloat16m8_t op1) { - return __riscv_vlmul_trunc_v_f16m8_f16m2(op1); +vfloat16m2_t test_vlmul_trunc_v_f16m8_f16m2(vfloat16m8_t value) { + return __riscv_vlmul_trunc_v_f16m8_f16m2(value); } -vfloat16m4_t test_vlmul_trunc_v_f16m8_f16m4(vfloat16m8_t op1) { - return __riscv_vlmul_trunc_v_f16m8_f16m4(op1); +vfloat16m4_t test_vlmul_trunc_v_f16m8_f16m4(vfloat16m8_t value) { + return __riscv_vlmul_trunc_v_f16m8_f16m4(value); } -vfloat32mf2_t test_vlmul_trunc_v_f32m1_f32mf2(vfloat32m1_t op1) { - return __riscv_vlmul_trunc_v_f32m1_f32mf2(op1); +vfloat32mf2_t test_vlmul_trunc_v_f32m1_f32mf2(vfloat32m1_t value) { + return __riscv_vlmul_trunc_v_f32m1_f32mf2(value); } -vfloat32mf2_t test_vlmul_trunc_v_f32m2_f32mf2(vfloat32m2_t op1) { - return __riscv_vlmul_trunc_v_f32m2_f32mf2(op1); +vfloat32mf2_t test_vlmul_trunc_v_f32m2_f32mf2(vfloat32m2_t value) { + return __riscv_vlmul_trunc_v_f32m2_f32mf2(value); } -vfloat32m1_t test_vlmul_trunc_v_f32m2_f32m1(vfloat32m2_t op1) { - return __riscv_vlmul_trunc_v_f32m2_f32m1(op1); +vfloat32m1_t test_vlmul_trunc_v_f32m2_f32m1(vfloat32m2_t value) { + return __riscv_vlmul_trunc_v_f32m2_f32m1(value); } -vfloat32mf2_t test_vlmul_trunc_v_f32m4_f32mf2(vfloat32m4_t op1) { - return __riscv_vlmul_trunc_v_f32m4_f32mf2(op1); +vfloat32mf2_t test_vlmul_trunc_v_f32m4_f32mf2(vfloat32m4_t value) { + return __riscv_vlmul_trunc_v_f32m4_f32mf2(value); } -vfloat32m1_t test_vlmul_trunc_v_f32m4_f32m1(vfloat32m4_t op1) { - return __riscv_vlmul_trunc_v_f32m4_f32m1(op1); +vfloat32m1_t test_vlmul_trunc_v_f32m4_f32m1(vfloat32m4_t value) { + return __riscv_vlmul_trunc_v_f32m4_f32m1(value); } -vfloat32m2_t test_vlmul_trunc_v_f32m4_f32m2(vfloat32m4_t op1) { - return __riscv_vlmul_trunc_v_f32m4_f32m2(op1); +vfloat32m2_t test_vlmul_trunc_v_f32m4_f32m2(vfloat32m4_t value) { + return __riscv_vlmul_trunc_v_f32m4_f32m2(value); } -vfloat32mf2_t test_vlmul_trunc_v_f32m8_f32mf2(vfloat32m8_t op1) { - return __riscv_vlmul_trunc_v_f32m8_f32mf2(op1); +vfloat32mf2_t test_vlmul_trunc_v_f32m8_f32mf2(vfloat32m8_t value) { + return __riscv_vlmul_trunc_v_f32m8_f32mf2(value); } -vfloat32m1_t test_vlmul_trunc_v_f32m8_f32m1(vfloat32m8_t op1) { - return __riscv_vlmul_trunc_v_f32m8_f32m1(op1); +vfloat32m1_t test_vlmul_trunc_v_f32m8_f32m1(vfloat32m8_t value) { + return __riscv_vlmul_trunc_v_f32m8_f32m1(value); } -vfloat32m2_t test_vlmul_trunc_v_f32m8_f32m2(vfloat32m8_t op1) { - return __riscv_vlmul_trunc_v_f32m8_f32m2(op1); +vfloat32m2_t test_vlmul_trunc_v_f32m8_f32m2(vfloat32m8_t value) { + return __riscv_vlmul_trunc_v_f32m8_f32m2(value); } -vfloat32m4_t test_vlmul_trunc_v_f32m8_f32m4(vfloat32m8_t op1) { - return __riscv_vlmul_trunc_v_f32m8_f32m4(op1); +vfloat32m4_t test_vlmul_trunc_v_f32m8_f32m4(vfloat32m8_t value) { + return __riscv_vlmul_trunc_v_f32m8_f32m4(value); } -vfloat64m1_t test_vlmul_trunc_v_f64m2_f64m1(vfloat64m2_t op1) { - return __riscv_vlmul_trunc_v_f64m2_f64m1(op1); +vfloat64m1_t test_vlmul_trunc_v_f64m2_f64m1(vfloat64m2_t value) { + return __riscv_vlmul_trunc_v_f64m2_f64m1(value); } -vfloat64m1_t test_vlmul_trunc_v_f64m4_f64m1(vfloat64m4_t op1) { - return __riscv_vlmul_trunc_v_f64m4_f64m1(op1); +vfloat64m1_t test_vlmul_trunc_v_f64m4_f64m1(vfloat64m4_t value) { + return __riscv_vlmul_trunc_v_f64m4_f64m1(value); } -vfloat64m2_t test_vlmul_trunc_v_f64m4_f64m2(vfloat64m4_t op1) { - return __riscv_vlmul_trunc_v_f64m4_f64m2(op1); +vfloat64m2_t test_vlmul_trunc_v_f64m4_f64m2(vfloat64m4_t value) { + return __riscv_vlmul_trunc_v_f64m4_f64m2(value); } -vfloat64m1_t test_vlmul_trunc_v_f64m8_f64m1(vfloat64m8_t op1) { - return __riscv_vlmul_trunc_v_f64m8_f64m1(op1); +vfloat64m1_t test_vlmul_trunc_v_f64m8_f64m1(vfloat64m8_t value) { + return __riscv_vlmul_trunc_v_f64m8_f64m1(value); } -vfloat64m2_t test_vlmul_trunc_v_f64m8_f64m2(vfloat64m8_t op1) { - return __riscv_vlmul_trunc_v_f64m8_f64m2(op1); +vfloat64m2_t test_vlmul_trunc_v_f64m8_f64m2(vfloat64m8_t value) { + return __riscv_vlmul_trunc_v_f64m8_f64m2(value); } -vfloat64m4_t test_vlmul_trunc_v_f64m8_f64m4(vfloat64m8_t op1) { - return __riscv_vlmul_trunc_v_f64m8_f64m4(op1); +vfloat64m4_t test_vlmul_trunc_v_f64m8_f64m4(vfloat64m8_t value) { + return __riscv_vlmul_trunc_v_f64m8_f64m4(value); } -vint8mf8_t test_vlmul_trunc_v_i8mf4_i8mf8(vint8mf4_t op1) { - return __riscv_vlmul_trunc_v_i8mf4_i8mf8(op1); +vint8mf8_t test_vlmul_trunc_v_i8mf4_i8mf8(vint8mf4_t value) { + return __riscv_vlmul_trunc_v_i8mf4_i8mf8(value); } -vint8mf8_t test_vlmul_trunc_v_i8mf2_i8mf8(vint8mf2_t op1) { - return __riscv_vlmul_trunc_v_i8mf2_i8mf8(op1); +vint8mf8_t test_vlmul_trunc_v_i8mf2_i8mf8(vint8mf2_t value) { + return __riscv_vlmul_trunc_v_i8mf2_i8mf8(value); } -vint8mf4_t test_vlmul_trunc_v_i8mf2_i8mf4(vint8mf2_t op1) { - return __riscv_vlmul_trunc_v_i8mf2_i8mf4(op1); +vint8mf4_t test_vlmul_trunc_v_i8mf2_i8mf4(vint8mf2_t value) { + return __riscv_vlmul_trunc_v_i8mf2_i8mf4(value); } -vint8mf8_t test_vlmul_trunc_v_i8m1_i8mf8(vint8m1_t op1) { - return __riscv_vlmul_trunc_v_i8m1_i8mf8(op1); +vint8mf8_t test_vlmul_trunc_v_i8m1_i8mf8(vint8m1_t value) { + return __riscv_vlmul_trunc_v_i8m1_i8mf8(value); } -vint8mf4_t test_vlmul_trunc_v_i8m1_i8mf4(vint8m1_t op1) { - return __riscv_vlmul_trunc_v_i8m1_i8mf4(op1); +vint8mf4_t test_vlmul_trunc_v_i8m1_i8mf4(vint8m1_t value) { + return __riscv_vlmul_trunc_v_i8m1_i8mf4(value); } -vint8mf2_t test_vlmul_trunc_v_i8m1_i8mf2(vint8m1_t op1) { - return __riscv_vlmul_trunc_v_i8m1_i8mf2(op1); +vint8mf2_t test_vlmul_trunc_v_i8m1_i8mf2(vint8m1_t value) { + return __riscv_vlmul_trunc_v_i8m1_i8mf2(value); } -vint8mf8_t test_vlmul_trunc_v_i8m2_i8mf8(vint8m2_t op1) { - return __riscv_vlmul_trunc_v_i8m2_i8mf8(op1); +vint8mf8_t test_vlmul_trunc_v_i8m2_i8mf8(vint8m2_t value) { + return __riscv_vlmul_trunc_v_i8m2_i8mf8(value); } -vint8mf4_t test_vlmul_trunc_v_i8m2_i8mf4(vint8m2_t op1) { - return __riscv_vlmul_trunc_v_i8m2_i8mf4(op1); +vint8mf4_t test_vlmul_trunc_v_i8m2_i8mf4(vint8m2_t value) { + return __riscv_vlmul_trunc_v_i8m2_i8mf4(value); } -vint8mf2_t test_vlmul_trunc_v_i8m2_i8mf2(vint8m2_t op1) { - return __riscv_vlmul_trunc_v_i8m2_i8mf2(op1); +vint8mf2_t test_vlmul_trunc_v_i8m2_i8mf2(vint8m2_t value) { + return __riscv_vlmul_trunc_v_i8m2_i8mf2(value); } -vint8m1_t test_vlmul_trunc_v_i8m2_i8m1(vint8m2_t op1) { - return __riscv_vlmul_trunc_v_i8m2_i8m1(op1); +vint8m1_t test_vlmul_trunc_v_i8m2_i8m1(vint8m2_t value) { + return __riscv_vlmul_trunc_v_i8m2_i8m1(value); } -vint8mf8_t test_vlmul_trunc_v_i8m4_i8mf8(vint8m4_t op1) { - return __riscv_vlmul_trunc_v_i8m4_i8mf8(op1); +vint8mf8_t test_vlmul_trunc_v_i8m4_i8mf8(vint8m4_t value) { + return __riscv_vlmul_trunc_v_i8m4_i8mf8(value); } -vint8mf4_t test_vlmul_trunc_v_i8m4_i8mf4(vint8m4_t op1) { - return __riscv_vlmul_trunc_v_i8m4_i8mf4(op1); +vint8mf4_t test_vlmul_trunc_v_i8m4_i8mf4(vint8m4_t value) { + return __riscv_vlmul_trunc_v_i8m4_i8mf4(value); } -vint8mf2_t test_vlmul_trunc_v_i8m4_i8mf2(vint8m4_t op1) { - return __riscv_vlmul_trunc_v_i8m4_i8mf2(op1); +vint8mf2_t test_vlmul_trunc_v_i8m4_i8mf2(vint8m4_t value) { + return __riscv_vlmul_trunc_v_i8m4_i8mf2(value); } -vint8m1_t test_vlmul_trunc_v_i8m4_i8m1(vint8m4_t op1) { - return __riscv_vlmul_trunc_v_i8m4_i8m1(op1); +vint8m1_t test_vlmul_trunc_v_i8m4_i8m1(vint8m4_t value) { + return __riscv_vlmul_trunc_v_i8m4_i8m1(value); } -vint8m2_t test_vlmul_trunc_v_i8m4_i8m2(vint8m4_t op1) { - return __riscv_vlmul_trunc_v_i8m4_i8m2(op1); +vint8m2_t test_vlmul_trunc_v_i8m4_i8m2(vint8m4_t value) { + return __riscv_vlmul_trunc_v_i8m4_i8m2(value); } -vint8mf8_t test_vlmul_trunc_v_i8m8_i8mf8(vint8m8_t op1) { - return __riscv_vlmul_trunc_v_i8m8_i8mf8(op1); +vint8mf8_t test_vlmul_trunc_v_i8m8_i8mf8(vint8m8_t value) { + return __riscv_vlmul_trunc_v_i8m8_i8mf8(value); } -vint8mf4_t test_vlmul_trunc_v_i8m8_i8mf4(vint8m8_t op1) { - return __riscv_vlmul_trunc_v_i8m8_i8mf4(op1); +vint8mf4_t test_vlmul_trunc_v_i8m8_i8mf4(vint8m8_t value) { + return __riscv_vlmul_trunc_v_i8m8_i8mf4(value); } -vint8mf2_t test_vlmul_trunc_v_i8m8_i8mf2(vint8m8_t op1) { - return __riscv_vlmul_trunc_v_i8m8_i8mf2(op1); +vint8mf2_t test_vlmul_trunc_v_i8m8_i8mf2(vint8m8_t value) { + return __riscv_vlmul_trunc_v_i8m8_i8mf2(value); } -vint8m1_t test_vlmul_trunc_v_i8m8_i8m1(vint8m8_t op1) { - return __riscv_vlmul_trunc_v_i8m8_i8m1(op1); +vint8m1_t test_vlmul_trunc_v_i8m8_i8m1(vint8m8_t value) { + return __riscv_vlmul_trunc_v_i8m8_i8m1(value); } -vint8m2_t test_vlmul_trunc_v_i8m8_i8m2(vint8m8_t op1) { - return __riscv_vlmul_trunc_v_i8m8_i8m2(op1); +vint8m2_t test_vlmul_trunc_v_i8m8_i8m2(vint8m8_t value) { + return __riscv_vlmul_trunc_v_i8m8_i8m2(value); } -vint8m4_t test_vlmul_trunc_v_i8m8_i8m4(vint8m8_t op1) { - return __riscv_vlmul_trunc_v_i8m8_i8m4(op1); +vint8m4_t test_vlmul_trunc_v_i8m8_i8m4(vint8m8_t value) { + return __riscv_vlmul_trunc_v_i8m8_i8m4(value); } -vint16mf4_t test_vlmul_trunc_v_i16mf2_i16mf4(vint16mf2_t op1) { - return __riscv_vlmul_trunc_v_i16mf2_i16mf4(op1); +vint16mf4_t test_vlmul_trunc_v_i16mf2_i16mf4(vint16mf2_t value) { + return __riscv_vlmul_trunc_v_i16mf2_i16mf4(value); } -vint16mf4_t test_vlmul_trunc_v_i16m1_i16mf4(vint16m1_t op1) { - return __riscv_vlmul_trunc_v_i16m1_i16mf4(op1); +vint16mf4_t test_vlmul_trunc_v_i16m1_i16mf4(vint16m1_t value) { + return __riscv_vlmul_trunc_v_i16m1_i16mf4(value); } -vint16mf2_t test_vlmul_trunc_v_i16m1_i16mf2(vint16m1_t op1) { - return __riscv_vlmul_trunc_v_i16m1_i16mf2(op1); +vint16mf2_t test_vlmul_trunc_v_i16m1_i16mf2(vint16m1_t value) { + return __riscv_vlmul_trunc_v_i16m1_i16mf2(value); } -vint16mf4_t test_vlmul_trunc_v_i16m2_i16mf4(vint16m2_t op1) { - return __riscv_vlmul_trunc_v_i16m2_i16mf4(op1); +vint16mf4_t test_vlmul_trunc_v_i16m2_i16mf4(vint16m2_t value) { + return __riscv_vlmul_trunc_v_i16m2_i16mf4(value); } -vint16mf2_t test_vlmul_trunc_v_i16m2_i16mf2(vint16m2_t op1) { - return __riscv_vlmul_trunc_v_i16m2_i16mf2(op1); +vint16mf2_t test_vlmul_trunc_v_i16m2_i16mf2(vint16m2_t value) { + return __riscv_vlmul_trunc_v_i16m2_i16mf2(value); } -vint16m1_t test_vlmul_trunc_v_i16m2_i16m1(vint16m2_t op1) { - return __riscv_vlmul_trunc_v_i16m2_i16m1(op1); +vint16m1_t test_vlmul_trunc_v_i16m2_i16m1(vint16m2_t value) { + return __riscv_vlmul_trunc_v_i16m2_i16m1(value); } -vint16mf4_t test_vlmul_trunc_v_i16m4_i16mf4(vint16m4_t op1) { - return __riscv_vlmul_trunc_v_i16m4_i16mf4(op1); +vint16mf4_t test_vlmul_trunc_v_i16m4_i16mf4(vint16m4_t value) { + return __riscv_vlmul_trunc_v_i16m4_i16mf4(value); } -vint16mf2_t test_vlmul_trunc_v_i16m4_i16mf2(vint16m4_t op1) { - return __riscv_vlmul_trunc_v_i16m4_i16mf2(op1); +vint16mf2_t test_vlmul_trunc_v_i16m4_i16mf2(vint16m4_t value) { + return __riscv_vlmul_trunc_v_i16m4_i16mf2(value); } -vint16m1_t test_vlmul_trunc_v_i16m4_i16m1(vint16m4_t op1) { - return __riscv_vlmul_trunc_v_i16m4_i16m1(op1); +vint16m1_t test_vlmul_trunc_v_i16m4_i16m1(vint16m4_t value) { + return __riscv_vlmul_trunc_v_i16m4_i16m1(value); } -vint16m2_t test_vlmul_trunc_v_i16m4_i16m2(vint16m4_t op1) { - return __riscv_vlmul_trunc_v_i16m4_i16m2(op1); +vint16m2_t test_vlmul_trunc_v_i16m4_i16m2(vint16m4_t value) { + return __riscv_vlmul_trunc_v_i16m4_i16m2(value); } -vint16mf4_t test_vlmul_trunc_v_i16m8_i16mf4(vint16m8_t op1) { - return __riscv_vlmul_trunc_v_i16m8_i16mf4(op1); +vint16mf4_t test_vlmul_trunc_v_i16m8_i16mf4(vint16m8_t value) { + return __riscv_vlmul_trunc_v_i16m8_i16mf4(value); } -vint16mf2_t test_vlmul_trunc_v_i16m8_i16mf2(vint16m8_t op1) { - return __riscv_vlmul_trunc_v_i16m8_i16mf2(op1); +vint16mf2_t test_vlmul_trunc_v_i16m8_i16mf2(vint16m8_t value) { + return __riscv_vlmul_trunc_v_i16m8_i16mf2(value); } -vint16m1_t test_vlmul_trunc_v_i16m8_i16m1(vint16m8_t op1) { - return __riscv_vlmul_trunc_v_i16m8_i16m1(op1); +vint16m1_t test_vlmul_trunc_v_i16m8_i16m1(vint16m8_t value) { + return __riscv_vlmul_trunc_v_i16m8_i16m1(value); } -vint16m2_t test_vlmul_trunc_v_i16m8_i16m2(vint16m8_t op1) { - return __riscv_vlmul_trunc_v_i16m8_i16m2(op1); +vint16m2_t test_vlmul_trunc_v_i16m8_i16m2(vint16m8_t value) { + return __riscv_vlmul_trunc_v_i16m8_i16m2(value); } -vint16m4_t test_vlmul_trunc_v_i16m8_i16m4(vint16m8_t op1) { - return __riscv_vlmul_trunc_v_i16m8_i16m4(op1); +vint16m4_t test_vlmul_trunc_v_i16m8_i16m4(vint16m8_t value) { + return __riscv_vlmul_trunc_v_i16m8_i16m4(value); } -vint32mf2_t test_vlmul_trunc_v_i32m1_i32mf2(vint32m1_t op1) { - return __riscv_vlmul_trunc_v_i32m1_i32mf2(op1); +vint32mf2_t test_vlmul_trunc_v_i32m1_i32mf2(vint32m1_t value) { + return __riscv_vlmul_trunc_v_i32m1_i32mf2(value); } -vint32mf2_t test_vlmul_trunc_v_i32m2_i32mf2(vint32m2_t op1) { - return __riscv_vlmul_trunc_v_i32m2_i32mf2(op1); +vint32mf2_t test_vlmul_trunc_v_i32m2_i32mf2(vint32m2_t value) { + return __riscv_vlmul_trunc_v_i32m2_i32mf2(value); } -vint32m1_t test_vlmul_trunc_v_i32m2_i32m1(vint32m2_t op1) { - return __riscv_vlmul_trunc_v_i32m2_i32m1(op1); +vint32m1_t test_vlmul_trunc_v_i32m2_i32m1(vint32m2_t value) { + return __riscv_vlmul_trunc_v_i32m2_i32m1(value); } -vint32mf2_t test_vlmul_trunc_v_i32m4_i32mf2(vint32m4_t op1) { - return __riscv_vlmul_trunc_v_i32m4_i32mf2(op1); +vint32mf2_t test_vlmul_trunc_v_i32m4_i32mf2(vint32m4_t value) { + return __riscv_vlmul_trunc_v_i32m4_i32mf2(value); } -vint32m1_t test_vlmul_trunc_v_i32m4_i32m1(vint32m4_t op1) { - return __riscv_vlmul_trunc_v_i32m4_i32m1(op1); +vint32m1_t test_vlmul_trunc_v_i32m4_i32m1(vint32m4_t value) { + return __riscv_vlmul_trunc_v_i32m4_i32m1(value); } -vint32m2_t test_vlmul_trunc_v_i32m4_i32m2(vint32m4_t op1) { - return __riscv_vlmul_trunc_v_i32m4_i32m2(op1); +vint32m2_t test_vlmul_trunc_v_i32m4_i32m2(vint32m4_t value) { + return __riscv_vlmul_trunc_v_i32m4_i32m2(value); } -vint32mf2_t test_vlmul_trunc_v_i32m8_i32mf2(vint32m8_t op1) { - return __riscv_vlmul_trunc_v_i32m8_i32mf2(op1); +vint32mf2_t test_vlmul_trunc_v_i32m8_i32mf2(vint32m8_t value) { + return __riscv_vlmul_trunc_v_i32m8_i32mf2(value); } -vint32m1_t test_vlmul_trunc_v_i32m8_i32m1(vint32m8_t op1) { - return __riscv_vlmul_trunc_v_i32m8_i32m1(op1); +vint32m1_t test_vlmul_trunc_v_i32m8_i32m1(vint32m8_t value) { + return __riscv_vlmul_trunc_v_i32m8_i32m1(value); } -vint32m2_t test_vlmul_trunc_v_i32m8_i32m2(vint32m8_t op1) { - return __riscv_vlmul_trunc_v_i32m8_i32m2(op1); +vint32m2_t test_vlmul_trunc_v_i32m8_i32m2(vint32m8_t value) { + return __riscv_vlmul_trunc_v_i32m8_i32m2(value); } -vint32m4_t test_vlmul_trunc_v_i32m8_i32m4(vint32m8_t op1) { - return __riscv_vlmul_trunc_v_i32m8_i32m4(op1); +vint32m4_t test_vlmul_trunc_v_i32m8_i32m4(vint32m8_t value) { + return __riscv_vlmul_trunc_v_i32m8_i32m4(value); } -vint64m1_t test_vlmul_trunc_v_i64m2_i64m1(vint64m2_t op1) { - return __riscv_vlmul_trunc_v_i64m2_i64m1(op1); +vint64m1_t test_vlmul_trunc_v_i64m2_i64m1(vint64m2_t value) { + return __riscv_vlmul_trunc_v_i64m2_i64m1(value); } -vint64m1_t test_vlmul_trunc_v_i64m4_i64m1(vint64m4_t op1) { - return __riscv_vlmul_trunc_v_i64m4_i64m1(op1); +vint64m1_t test_vlmul_trunc_v_i64m4_i64m1(vint64m4_t value) { + return __riscv_vlmul_trunc_v_i64m4_i64m1(value); } -vint64m2_t test_vlmul_trunc_v_i64m4_i64m2(vint64m4_t op1) { - return __riscv_vlmul_trunc_v_i64m4_i64m2(op1); +vint64m2_t test_vlmul_trunc_v_i64m4_i64m2(vint64m4_t value) { + return __riscv_vlmul_trunc_v_i64m4_i64m2(value); } -vint64m1_t test_vlmul_trunc_v_i64m8_i64m1(vint64m8_t op1) { - return __riscv_vlmul_trunc_v_i64m8_i64m1(op1); +vint64m1_t test_vlmul_trunc_v_i64m8_i64m1(vint64m8_t value) { + return __riscv_vlmul_trunc_v_i64m8_i64m1(value); } -vint64m2_t test_vlmul_trunc_v_i64m8_i64m2(vint64m8_t op1) { - return __riscv_vlmul_trunc_v_i64m8_i64m2(op1); +vint64m2_t test_vlmul_trunc_v_i64m8_i64m2(vint64m8_t value) { + return __riscv_vlmul_trunc_v_i64m8_i64m2(value); } -vint64m4_t test_vlmul_trunc_v_i64m8_i64m4(vint64m8_t op1) { - return __riscv_vlmul_trunc_v_i64m8_i64m4(op1); +vint64m4_t test_vlmul_trunc_v_i64m8_i64m4(vint64m8_t value) { + return __riscv_vlmul_trunc_v_i64m8_i64m4(value); } -vuint8mf8_t test_vlmul_trunc_v_u8mf4_u8mf8(vuint8mf4_t op1) { - return __riscv_vlmul_trunc_v_u8mf4_u8mf8(op1); +vuint8mf8_t test_vlmul_trunc_v_u8mf4_u8mf8(vuint8mf4_t value) { + return __riscv_vlmul_trunc_v_u8mf4_u8mf8(value); } -vuint8mf8_t test_vlmul_trunc_v_u8mf2_u8mf8(vuint8mf2_t op1) { - return __riscv_vlmul_trunc_v_u8mf2_u8mf8(op1); +vuint8mf8_t test_vlmul_trunc_v_u8mf2_u8mf8(vuint8mf2_t value) { + return __riscv_vlmul_trunc_v_u8mf2_u8mf8(value); } -vuint8mf4_t test_vlmul_trunc_v_u8mf2_u8mf4(vuint8mf2_t op1) { - return __riscv_vlmul_trunc_v_u8mf2_u8mf4(op1); +vuint8mf4_t test_vlmul_trunc_v_u8mf2_u8mf4(vuint8mf2_t value) { + return __riscv_vlmul_trunc_v_u8mf2_u8mf4(value); } -vuint8mf8_t test_vlmul_trunc_v_u8m1_u8mf8(vuint8m1_t op1) { - return __riscv_vlmul_trunc_v_u8m1_u8mf8(op1); +vuint8mf8_t test_vlmul_trunc_v_u8m1_u8mf8(vuint8m1_t value) { + return __riscv_vlmul_trunc_v_u8m1_u8mf8(value); } -vuint8mf4_t test_vlmul_trunc_v_u8m1_u8mf4(vuint8m1_t op1) { - return __riscv_vlmul_trunc_v_u8m1_u8mf4(op1); +vuint8mf4_t test_vlmul_trunc_v_u8m1_u8mf4(vuint8m1_t value) { + return __riscv_vlmul_trunc_v_u8m1_u8mf4(value); } -vuint8mf2_t test_vlmul_trunc_v_u8m1_u8mf2(vuint8m1_t op1) { - return __riscv_vlmul_trunc_v_u8m1_u8mf2(op1); +vuint8mf2_t test_vlmul_trunc_v_u8m1_u8mf2(vuint8m1_t value) { + return __riscv_vlmul_trunc_v_u8m1_u8mf2(value); } -vuint8mf8_t test_vlmul_trunc_v_u8m2_u8mf8(vuint8m2_t op1) { - return __riscv_vlmul_trunc_v_u8m2_u8mf8(op1); +vuint8mf8_t test_vlmul_trunc_v_u8m2_u8mf8(vuint8m2_t value) { + return __riscv_vlmul_trunc_v_u8m2_u8mf8(value); } -vuint8mf4_t test_vlmul_trunc_v_u8m2_u8mf4(vuint8m2_t op1) { - return __riscv_vlmul_trunc_v_u8m2_u8mf4(op1); +vuint8mf4_t test_vlmul_trunc_v_u8m2_u8mf4(vuint8m2_t value) { + return __riscv_vlmul_trunc_v_u8m2_u8mf4(value); } -vuint8mf2_t test_vlmul_trunc_v_u8m2_u8mf2(vuint8m2_t op1) { - return __riscv_vlmul_trunc_v_u8m2_u8mf2(op1); +vuint8mf2_t test_vlmul_trunc_v_u8m2_u8mf2(vuint8m2_t value) { + return __riscv_vlmul_trunc_v_u8m2_u8mf2(value); } -vuint8m1_t test_vlmul_trunc_v_u8m2_u8m1(vuint8m2_t op1) { - return __riscv_vlmul_trunc_v_u8m2_u8m1(op1); +vuint8m1_t test_vlmul_trunc_v_u8m2_u8m1(vuint8m2_t value) { + return __riscv_vlmul_trunc_v_u8m2_u8m1(value); } -vuint8mf8_t test_vlmul_trunc_v_u8m4_u8mf8(vuint8m4_t op1) { - return __riscv_vlmul_trunc_v_u8m4_u8mf8(op1); +vuint8mf8_t test_vlmul_trunc_v_u8m4_u8mf8(vuint8m4_t value) { + return __riscv_vlmul_trunc_v_u8m4_u8mf8(value); } -vuint8mf4_t test_vlmul_trunc_v_u8m4_u8mf4(vuint8m4_t op1) { - return __riscv_vlmul_trunc_v_u8m4_u8mf4(op1); +vuint8mf4_t test_vlmul_trunc_v_u8m4_u8mf4(vuint8m4_t value) { + return __riscv_vlmul_trunc_v_u8m4_u8mf4(value); } -vuint8mf2_t test_vlmul_trunc_v_u8m4_u8mf2(vuint8m4_t op1) { - return __riscv_vlmul_trunc_v_u8m4_u8mf2(op1); +vuint8mf2_t test_vlmul_trunc_v_u8m4_u8mf2(vuint8m4_t value) { + return __riscv_vlmul_trunc_v_u8m4_u8mf2(value); } -vuint8m1_t test_vlmul_trunc_v_u8m4_u8m1(vuint8m4_t op1) { - return __riscv_vlmul_trunc_v_u8m4_u8m1(op1); +vuint8m1_t test_vlmul_trunc_v_u8m4_u8m1(vuint8m4_t value) { + return __riscv_vlmul_trunc_v_u8m4_u8m1(value); } -vuint8m2_t test_vlmul_trunc_v_u8m4_u8m2(vuint8m4_t op1) { - return __riscv_vlmul_trunc_v_u8m4_u8m2(op1); +vuint8m2_t test_vlmul_trunc_v_u8m4_u8m2(vuint8m4_t value) { + return __riscv_vlmul_trunc_v_u8m4_u8m2(value); } -vuint8mf8_t test_vlmul_trunc_v_u8m8_u8mf8(vuint8m8_t op1) { - return __riscv_vlmul_trunc_v_u8m8_u8mf8(op1); +vuint8mf8_t test_vlmul_trunc_v_u8m8_u8mf8(vuint8m8_t value) { + return __riscv_vlmul_trunc_v_u8m8_u8mf8(value); } -vuint8mf4_t test_vlmul_trunc_v_u8m8_u8mf4(vuint8m8_t op1) { - return __riscv_vlmul_trunc_v_u8m8_u8mf4(op1); +vuint8mf4_t test_vlmul_trunc_v_u8m8_u8mf4(vuint8m8_t value) { + return __riscv_vlmul_trunc_v_u8m8_u8mf4(value); } -vuint8mf2_t test_vlmul_trunc_v_u8m8_u8mf2(vuint8m8_t op1) { - return __riscv_vlmul_trunc_v_u8m8_u8mf2(op1); +vuint8mf2_t test_vlmul_trunc_v_u8m8_u8mf2(vuint8m8_t value) { + return __riscv_vlmul_trunc_v_u8m8_u8mf2(value); } -vuint8m1_t test_vlmul_trunc_v_u8m8_u8m1(vuint8m8_t op1) { - return __riscv_vlmul_trunc_v_u8m8_u8m1(op1); +vuint8m1_t test_vlmul_trunc_v_u8m8_u8m1(vuint8m8_t value) { + return __riscv_vlmul_trunc_v_u8m8_u8m1(value); } -vuint8m2_t test_vlmul_trunc_v_u8m8_u8m2(vuint8m8_t op1) { - return __riscv_vlmul_trunc_v_u8m8_u8m2(op1); +vuint8m2_t test_vlmul_trunc_v_u8m8_u8m2(vuint8m8_t value) { + return __riscv_vlmul_trunc_v_u8m8_u8m2(value); } -vuint8m4_t test_vlmul_trunc_v_u8m8_u8m4(vuint8m8_t op1) { - return __riscv_vlmul_trunc_v_u8m8_u8m4(op1); +vuint8m4_t test_vlmul_trunc_v_u8m8_u8m4(vuint8m8_t value) { + return __riscv_vlmul_trunc_v_u8m8_u8m4(value); } -vuint16mf4_t test_vlmul_trunc_v_u16mf2_u16mf4(vuint16mf2_t op1) { - return __riscv_vlmul_trunc_v_u16mf2_u16mf4(op1); +vuint16mf4_t test_vlmul_trunc_v_u16mf2_u16mf4(vuint16mf2_t value) { + return __riscv_vlmul_trunc_v_u16mf2_u16mf4(value); } -vuint16mf4_t test_vlmul_trunc_v_u16m1_u16mf4(vuint16m1_t op1) { - return __riscv_vlmul_trunc_v_u16m1_u16mf4(op1); +vuint16mf4_t test_vlmul_trunc_v_u16m1_u16mf4(vuint16m1_t value) { + return __riscv_vlmul_trunc_v_u16m1_u16mf4(value); } -vuint16mf2_t test_vlmul_trunc_v_u16m1_u16mf2(vuint16m1_t op1) { - return __riscv_vlmul_trunc_v_u16m1_u16mf2(op1); +vuint16mf2_t test_vlmul_trunc_v_u16m1_u16mf2(vuint16m1_t value) { + return __riscv_vlmul_trunc_v_u16m1_u16mf2(value); } -vuint16mf4_t test_vlmul_trunc_v_u16m2_u16mf4(vuint16m2_t op1) { - return __riscv_vlmul_trunc_v_u16m2_u16mf4(op1); +vuint16mf4_t test_vlmul_trunc_v_u16m2_u16mf4(vuint16m2_t value) { + return __riscv_vlmul_trunc_v_u16m2_u16mf4(value); } -vuint16mf2_t test_vlmul_trunc_v_u16m2_u16mf2(vuint16m2_t op1) { - return __riscv_vlmul_trunc_v_u16m2_u16mf2(op1); +vuint16mf2_t test_vlmul_trunc_v_u16m2_u16mf2(vuint16m2_t value) { + return __riscv_vlmul_trunc_v_u16m2_u16mf2(value); } -vuint16m1_t test_vlmul_trunc_v_u16m2_u16m1(vuint16m2_t op1) { - return __riscv_vlmul_trunc_v_u16m2_u16m1(op1); +vuint16m1_t test_vlmul_trunc_v_u16m2_u16m1(vuint16m2_t value) { + return __riscv_vlmul_trunc_v_u16m2_u16m1(value); } -vuint16mf4_t test_vlmul_trunc_v_u16m4_u16mf4(vuint16m4_t op1) { - return __riscv_vlmul_trunc_v_u16m4_u16mf4(op1); +vuint16mf4_t test_vlmul_trunc_v_u16m4_u16mf4(vuint16m4_t value) { + return __riscv_vlmul_trunc_v_u16m4_u16mf4(value); } -vuint16mf2_t test_vlmul_trunc_v_u16m4_u16mf2(vuint16m4_t op1) { - return __riscv_vlmul_trunc_v_u16m4_u16mf2(op1); +vuint16mf2_t test_vlmul_trunc_v_u16m4_u16mf2(vuint16m4_t value) { + return __riscv_vlmul_trunc_v_u16m4_u16mf2(value); } -vuint16m1_t test_vlmul_trunc_v_u16m4_u16m1(vuint16m4_t op1) { - return __riscv_vlmul_trunc_v_u16m4_u16m1(op1); +vuint16m1_t test_vlmul_trunc_v_u16m4_u16m1(vuint16m4_t value) { + return __riscv_vlmul_trunc_v_u16m4_u16m1(value); } -vuint16m2_t test_vlmul_trunc_v_u16m4_u16m2(vuint16m4_t op1) { - return __riscv_vlmul_trunc_v_u16m4_u16m2(op1); +vuint16m2_t test_vlmul_trunc_v_u16m4_u16m2(vuint16m4_t value) { + return __riscv_vlmul_trunc_v_u16m4_u16m2(value); } -vuint16mf4_t test_vlmul_trunc_v_u16m8_u16mf4(vuint16m8_t op1) { - return __riscv_vlmul_trunc_v_u16m8_u16mf4(op1); +vuint16mf4_t test_vlmul_trunc_v_u16m8_u16mf4(vuint16m8_t value) { + return __riscv_vlmul_trunc_v_u16m8_u16mf4(value); } -vuint16mf2_t test_vlmul_trunc_v_u16m8_u16mf2(vuint16m8_t op1) { - return __riscv_vlmul_trunc_v_u16m8_u16mf2(op1); +vuint16mf2_t test_vlmul_trunc_v_u16m8_u16mf2(vuint16m8_t value) { + return __riscv_vlmul_trunc_v_u16m8_u16mf2(value); } -vuint16m1_t test_vlmul_trunc_v_u16m8_u16m1(vuint16m8_t op1) { - return __riscv_vlmul_trunc_v_u16m8_u16m1(op1); +vuint16m1_t test_vlmul_trunc_v_u16m8_u16m1(vuint16m8_t value) { + return __riscv_vlmul_trunc_v_u16m8_u16m1(value); } -vuint16m2_t test_vlmul_trunc_v_u16m8_u16m2(vuint16m8_t op1) { - return __riscv_vlmul_trunc_v_u16m8_u16m2(op1); +vuint16m2_t test_vlmul_trunc_v_u16m8_u16m2(vuint16m8_t value) { + return __riscv_vlmul_trunc_v_u16m8_u16m2(value); } -vuint16m4_t test_vlmul_trunc_v_u16m8_u16m4(vuint16m8_t op1) { - return __riscv_vlmul_trunc_v_u16m8_u16m4(op1); +vuint16m4_t test_vlmul_trunc_v_u16m8_u16m4(vuint16m8_t value) { + return __riscv_vlmul_trunc_v_u16m8_u16m4(value); } -vuint32mf2_t test_vlmul_trunc_v_u32m1_u32mf2(vuint32m1_t op1) { - return __riscv_vlmul_trunc_v_u32m1_u32mf2(op1); +vuint32mf2_t test_vlmul_trunc_v_u32m1_u32mf2(vuint32m1_t value) { + return __riscv_vlmul_trunc_v_u32m1_u32mf2(value); } -vuint32mf2_t test_vlmul_trunc_v_u32m2_u32mf2(vuint32m2_t op1) { - return __riscv_vlmul_trunc_v_u32m2_u32mf2(op1); +vuint32mf2_t test_vlmul_trunc_v_u32m2_u32mf2(vuint32m2_t value) { + return __riscv_vlmul_trunc_v_u32m2_u32mf2(value); } -vuint32m1_t test_vlmul_trunc_v_u32m2_u32m1(vuint32m2_t op1) { - return __riscv_vlmul_trunc_v_u32m2_u32m1(op1); +vuint32m1_t test_vlmul_trunc_v_u32m2_u32m1(vuint32m2_t value) { + return __riscv_vlmul_trunc_v_u32m2_u32m1(value); } -vuint32mf2_t test_vlmul_trunc_v_u32m4_u32mf2(vuint32m4_t op1) { - return __riscv_vlmul_trunc_v_u32m4_u32mf2(op1); +vuint32mf2_t test_vlmul_trunc_v_u32m4_u32mf2(vuint32m4_t value) { + return __riscv_vlmul_trunc_v_u32m4_u32mf2(value); } -vuint32m1_t test_vlmul_trunc_v_u32m4_u32m1(vuint32m4_t op1) { - return __riscv_vlmul_trunc_v_u32m4_u32m1(op1); +vuint32m1_t test_vlmul_trunc_v_u32m4_u32m1(vuint32m4_t value) { + return __riscv_vlmul_trunc_v_u32m4_u32m1(value); } -vuint32m2_t test_vlmul_trunc_v_u32m4_u32m2(vuint32m4_t op1) { - return __riscv_vlmul_trunc_v_u32m4_u32m2(op1); +vuint32m2_t test_vlmul_trunc_v_u32m4_u32m2(vuint32m4_t value) { + return __riscv_vlmul_trunc_v_u32m4_u32m2(value); } -vuint32mf2_t test_vlmul_trunc_v_u32m8_u32mf2(vuint32m8_t op1) { - return __riscv_vlmul_trunc_v_u32m8_u32mf2(op1); +vuint32mf2_t test_vlmul_trunc_v_u32m8_u32mf2(vuint32m8_t value) { + return __riscv_vlmul_trunc_v_u32m8_u32mf2(value); } -vuint32m1_t test_vlmul_trunc_v_u32m8_u32m1(vuint32m8_t op1) { - return __riscv_vlmul_trunc_v_u32m8_u32m1(op1); +vuint32m1_t test_vlmul_trunc_v_u32m8_u32m1(vuint32m8_t value) { + return __riscv_vlmul_trunc_v_u32m8_u32m1(value); } -vuint32m2_t test_vlmul_trunc_v_u32m8_u32m2(vuint32m8_t op1) { - return __riscv_vlmul_trunc_v_u32m8_u32m2(op1); +vuint32m2_t test_vlmul_trunc_v_u32m8_u32m2(vuint32m8_t value) { + return __riscv_vlmul_trunc_v_u32m8_u32m2(value); } -vuint32m4_t test_vlmul_trunc_v_u32m8_u32m4(vuint32m8_t op1) { - return __riscv_vlmul_trunc_v_u32m8_u32m4(op1); +vuint32m4_t test_vlmul_trunc_v_u32m8_u32m4(vuint32m8_t value) { + return __riscv_vlmul_trunc_v_u32m8_u32m4(value); } -vuint64m1_t test_vlmul_trunc_v_u64m2_u64m1(vuint64m2_t op1) { - return __riscv_vlmul_trunc_v_u64m2_u64m1(op1); +vuint64m1_t test_vlmul_trunc_v_u64m2_u64m1(vuint64m2_t value) { + return __riscv_vlmul_trunc_v_u64m2_u64m1(value); } -vuint64m1_t test_vlmul_trunc_v_u64m4_u64m1(vuint64m4_t op1) { - return __riscv_vlmul_trunc_v_u64m4_u64m1(op1); +vuint64m1_t test_vlmul_trunc_v_u64m4_u64m1(vuint64m4_t value) { + return __riscv_vlmul_trunc_v_u64m4_u64m1(value); } -vuint64m2_t test_vlmul_trunc_v_u64m4_u64m2(vuint64m4_t op1) { - return __riscv_vlmul_trunc_v_u64m4_u64m2(op1); +vuint64m2_t test_vlmul_trunc_v_u64m4_u64m2(vuint64m4_t value) { + return __riscv_vlmul_trunc_v_u64m4_u64m2(value); } -vuint64m1_t test_vlmul_trunc_v_u64m8_u64m1(vuint64m8_t op1) { - return __riscv_vlmul_trunc_v_u64m8_u64m1(op1); +vuint64m1_t test_vlmul_trunc_v_u64m8_u64m1(vuint64m8_t value) { + return __riscv_vlmul_trunc_v_u64m8_u64m1(value); } -vuint64m2_t test_vlmul_trunc_v_u64m8_u64m2(vuint64m8_t op1) { - return __riscv_vlmul_trunc_v_u64m8_u64m2(op1); +vuint64m2_t test_vlmul_trunc_v_u64m8_u64m2(vuint64m8_t value) { + return __riscv_vlmul_trunc_v_u64m8_u64m2(value); } -vuint64m4_t test_vlmul_trunc_v_u64m8_u64m4(vuint64m8_t op1) { - return __riscv_vlmul_trunc_v_u64m8_u64m4(op1); +vuint64m4_t test_vlmul_trunc_v_u64m8_u64m4(vuint64m8_t value) { + return __riscv_vlmul_trunc_v_u64m8_u64m4(value); } /* { dg-final { scan-assembler-times {vs[1248e][r123468]+\.[ivxfswum.]+\s+[,\sa-x0-9()]+} 135 } } */ diff --git a/auto-generated/gnu-api-tests/vloxei16.c b/auto-generated/gnu-api-tests/vloxei16.c index 31d196053..efea62008 100644 --- a/auto-generated/gnu-api-tests/vloxei16.c +++ b/auto-generated/gnu-api-tests/vloxei16.c @@ -6,460 +6,460 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vloxei16_v_f16mf4(const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_f16mf4(base, bindex, vl); +vfloat16mf4_t test_vloxei16_v_f16mf4(const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_f16mf4(rs1, rs2, vl); } -vfloat16mf2_t test_vloxei16_v_f16mf2(const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_f16mf2(base, bindex, vl); +vfloat16mf2_t test_vloxei16_v_f16mf2(const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_f16mf2(rs1, rs2, vl); } -vfloat16m1_t test_vloxei16_v_f16m1(const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_f16m1(base, bindex, vl); +vfloat16m1_t test_vloxei16_v_f16m1(const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_f16m1(rs1, rs2, vl); } -vfloat16m2_t test_vloxei16_v_f16m2(const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_f16m2(base, bindex, vl); +vfloat16m2_t test_vloxei16_v_f16m2(const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_f16m2(rs1, rs2, vl); } -vfloat16m4_t test_vloxei16_v_f16m4(const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_f16m4(base, bindex, vl); +vfloat16m4_t test_vloxei16_v_f16m4(const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_f16m4(rs1, rs2, vl); } -vfloat16m8_t test_vloxei16_v_f16m8(const float16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_v_f16m8(base, bindex, vl); +vfloat16m8_t test_vloxei16_v_f16m8(const float16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_v_f16m8(rs1, rs2, vl); } -vfloat32mf2_t test_vloxei16_v_f32mf2(const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_f32mf2(base, bindex, vl); +vfloat32mf2_t test_vloxei16_v_f32mf2(const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_f32mf2(rs1, rs2, vl); } -vfloat32m1_t test_vloxei16_v_f32m1(const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_f32m1(base, bindex, vl); +vfloat32m1_t test_vloxei16_v_f32m1(const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_f32m1(rs1, rs2, vl); } -vfloat32m2_t test_vloxei16_v_f32m2(const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_f32m2(base, bindex, vl); +vfloat32m2_t test_vloxei16_v_f32m2(const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_f32m2(rs1, rs2, vl); } -vfloat32m4_t test_vloxei16_v_f32m4(const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_f32m4(base, bindex, vl); +vfloat32m4_t test_vloxei16_v_f32m4(const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_f32m4(rs1, rs2, vl); } -vfloat32m8_t test_vloxei16_v_f32m8(const float32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_f32m8(base, bindex, vl); +vfloat32m8_t test_vloxei16_v_f32m8(const float32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_f32m8(rs1, rs2, vl); } -vfloat64m1_t test_vloxei16_v_f64m1(const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_f64m1(base, bindex, vl); +vfloat64m1_t test_vloxei16_v_f64m1(const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_f64m1(rs1, rs2, vl); } -vfloat64m2_t test_vloxei16_v_f64m2(const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_f64m2(base, bindex, vl); +vfloat64m2_t test_vloxei16_v_f64m2(const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_f64m2(rs1, rs2, vl); } -vfloat64m4_t test_vloxei16_v_f64m4(const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_f64m4(base, bindex, vl); +vfloat64m4_t test_vloxei16_v_f64m4(const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_f64m4(rs1, rs2, vl); } -vfloat64m8_t test_vloxei16_v_f64m8(const float64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_f64m8(base, bindex, vl); +vfloat64m8_t test_vloxei16_v_f64m8(const float64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_f64m8(rs1, rs2, vl); } -vint8mf8_t test_vloxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_i8mf8(base, bindex, vl); +vint8mf8_t test_vloxei16_v_i8mf8(const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_i8mf8(rs1, rs2, vl); } -vint8mf4_t test_vloxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i8mf4(base, bindex, vl); +vint8mf4_t test_vloxei16_v_i8mf4(const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i8mf4(rs1, rs2, vl); } -vint8mf2_t test_vloxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_i8mf2(base, bindex, vl); +vint8mf2_t test_vloxei16_v_i8mf2(const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_i8mf2(rs1, rs2, vl); } -vint8m1_t test_vloxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i8m1(base, bindex, vl); +vint8m1_t test_vloxei16_v_i8m1(const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i8m1(rs1, rs2, vl); } -vint8m2_t test_vloxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_i8m2(base, bindex, vl); +vint8m2_t test_vloxei16_v_i8m2(const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_i8m2(rs1, rs2, vl); } -vint8m4_t test_vloxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_v_i8m4(base, bindex, vl); +vint8m4_t test_vloxei16_v_i8m4(const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_v_i8m4(rs1, rs2, vl); } -vint16mf4_t test_vloxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_i16mf4(base, bindex, vl); +vint16mf4_t test_vloxei16_v_i16mf4(const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_i16mf4(rs1, rs2, vl); } -vint16mf2_t test_vloxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i16mf2(base, bindex, vl); +vint16mf2_t test_vloxei16_v_i16mf2(const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i16mf2(rs1, rs2, vl); } -vint16m1_t test_vloxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_i16m1(base, bindex, vl); +vint16m1_t test_vloxei16_v_i16m1(const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_i16m1(rs1, rs2, vl); } -vint16m2_t test_vloxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i16m2(base, bindex, vl); +vint16m2_t test_vloxei16_v_i16m2(const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i16m2(rs1, rs2, vl); } -vint16m4_t test_vloxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_i16m4(base, bindex, vl); +vint16m4_t test_vloxei16_v_i16m4(const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_i16m4(rs1, rs2, vl); } -vint16m8_t test_vloxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_v_i16m8(base, bindex, vl); +vint16m8_t test_vloxei16_v_i16m8(const int16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_v_i16m8(rs1, rs2, vl); } -vint32mf2_t test_vloxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_i32mf2(base, bindex, vl); +vint32mf2_t test_vloxei16_v_i32mf2(const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_i32mf2(rs1, rs2, vl); } -vint32m1_t test_vloxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i32m1(base, bindex, vl); +vint32m1_t test_vloxei16_v_i32m1(const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i32m1(rs1, rs2, vl); } -vint32m2_t test_vloxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_i32m2(base, bindex, vl); +vint32m2_t test_vloxei16_v_i32m2(const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_i32m2(rs1, rs2, vl); } -vint32m4_t test_vloxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i32m4(base, bindex, vl); +vint32m4_t test_vloxei16_v_i32m4(const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i32m4(rs1, rs2, vl); } -vint32m8_t test_vloxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_i32m8(base, bindex, vl); +vint32m8_t test_vloxei16_v_i32m8(const int32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_i32m8(rs1, rs2, vl); } -vint64m1_t test_vloxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_i64m1(base, bindex, vl); +vint64m1_t test_vloxei16_v_i64m1(const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_i64m1(rs1, rs2, vl); } -vint64m2_t test_vloxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i64m2(base, bindex, vl); +vint64m2_t test_vloxei16_v_i64m2(const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i64m2(rs1, rs2, vl); } -vint64m4_t test_vloxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_i64m4(base, bindex, vl); +vint64m4_t test_vloxei16_v_i64m4(const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_i64m4(rs1, rs2, vl); } -vint64m8_t test_vloxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i64m8(base, bindex, vl); +vint64m8_t test_vloxei16_v_i64m8(const int64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i64m8(rs1, rs2, vl); } -vuint8mf8_t test_vloxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_u8mf8(base, bindex, vl); +vuint8mf8_t test_vloxei16_v_u8mf8(const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_u8mf8(rs1, rs2, vl); } -vuint8mf4_t test_vloxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u8mf4(base, bindex, vl); +vuint8mf4_t test_vloxei16_v_u8mf4(const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u8mf4(rs1, rs2, vl); } -vuint8mf2_t test_vloxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_u8mf2(base, bindex, vl); +vuint8mf2_t test_vloxei16_v_u8mf2(const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_u8mf2(rs1, rs2, vl); } -vuint8m1_t test_vloxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u8m1(base, bindex, vl); +vuint8m1_t test_vloxei16_v_u8m1(const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u8m1(rs1, rs2, vl); } -vuint8m2_t test_vloxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_u8m2(base, bindex, vl); +vuint8m2_t test_vloxei16_v_u8m2(const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_u8m2(rs1, rs2, vl); } -vuint8m4_t test_vloxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_v_u8m4(base, bindex, vl); +vuint8m4_t test_vloxei16_v_u8m4(const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_v_u8m4(rs1, rs2, vl); } -vuint16mf4_t test_vloxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_u16mf4(base, bindex, vl); +vuint16mf4_t test_vloxei16_v_u16mf4(const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_u16mf4(rs1, rs2, vl); } -vuint16mf2_t test_vloxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u16mf2(base, bindex, vl); +vuint16mf2_t test_vloxei16_v_u16mf2(const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u16mf2(rs1, rs2, vl); } -vuint16m1_t test_vloxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_u16m1(base, bindex, vl); +vuint16m1_t test_vloxei16_v_u16m1(const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_u16m1(rs1, rs2, vl); } -vuint16m2_t test_vloxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u16m2(base, bindex, vl); +vuint16m2_t test_vloxei16_v_u16m2(const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u16m2(rs1, rs2, vl); } -vuint16m4_t test_vloxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_u16m4(base, bindex, vl); +vuint16m4_t test_vloxei16_v_u16m4(const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_u16m4(rs1, rs2, vl); } -vuint16m8_t test_vloxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_v_u16m8(base, bindex, vl); +vuint16m8_t test_vloxei16_v_u16m8(const uint16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_v_u16m8(rs1, rs2, vl); } -vuint32mf2_t test_vloxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_u32mf2(base, bindex, vl); +vuint32mf2_t test_vloxei16_v_u32mf2(const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_u32mf2(rs1, rs2, vl); } -vuint32m1_t test_vloxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u32m1(base, bindex, vl); +vuint32m1_t test_vloxei16_v_u32m1(const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u32m1(rs1, rs2, vl); } -vuint32m2_t test_vloxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_u32m2(base, bindex, vl); +vuint32m2_t test_vloxei16_v_u32m2(const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_u32m2(rs1, rs2, vl); } -vuint32m4_t test_vloxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u32m4(base, bindex, vl); +vuint32m4_t test_vloxei16_v_u32m4(const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u32m4(rs1, rs2, vl); } -vuint32m8_t test_vloxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_u32m8(base, bindex, vl); +vuint32m8_t test_vloxei16_v_u32m8(const uint32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_u32m8(rs1, rs2, vl); } -vuint64m1_t test_vloxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_u64m1(base, bindex, vl); +vuint64m1_t test_vloxei16_v_u64m1(const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_u64m1(rs1, rs2, vl); } -vuint64m2_t test_vloxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u64m2(base, bindex, vl); +vuint64m2_t test_vloxei16_v_u64m2(const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u64m2(rs1, rs2, vl); } -vuint64m4_t test_vloxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_u64m4(base, bindex, vl); +vuint64m4_t test_vloxei16_v_u64m4(const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_u64m4(rs1, rs2, vl); } -vuint64m8_t test_vloxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u64m8(base, bindex, vl); +vuint64m8_t test_vloxei16_v_u64m8(const uint64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u64m8(rs1, rs2, vl); } -vfloat16mf4_t test_vloxei16_v_f16mf4_m(vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_f16mf4_m(mask, base, bindex, vl); +vfloat16mf4_t test_vloxei16_v_f16mf4_m(vbool64_t vm, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_f16mf4_m(vm, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei16_v_f16mf2_m(vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_f16mf2_m(mask, base, bindex, vl); +vfloat16mf2_t test_vloxei16_v_f16mf2_m(vbool32_t vm, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_f16mf2_m(vm, rs1, rs2, vl); } -vfloat16m1_t test_vloxei16_v_f16m1_m(vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_f16m1_m(mask, base, bindex, vl); +vfloat16m1_t test_vloxei16_v_f16m1_m(vbool16_t vm, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_f16m1_m(vm, rs1, rs2, vl); } -vfloat16m2_t test_vloxei16_v_f16m2_m(vbool8_t mask, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_f16m2_m(mask, base, bindex, vl); +vfloat16m2_t test_vloxei16_v_f16m2_m(vbool8_t vm, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_f16m2_m(vm, rs1, rs2, vl); } -vfloat16m4_t test_vloxei16_v_f16m4_m(vbool4_t mask, const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_f16m4_m(mask, base, bindex, vl); +vfloat16m4_t test_vloxei16_v_f16m4_m(vbool4_t vm, const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_f16m4_m(vm, rs1, rs2, vl); } -vfloat16m8_t test_vloxei16_v_f16m8_m(vbool2_t mask, const float16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_v_f16m8_m(mask, base, bindex, vl); +vfloat16m8_t test_vloxei16_v_f16m8_m(vbool2_t vm, const float16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_v_f16m8_m(vm, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei16_v_f32mf2_m(vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_f32mf2_m(mask, base, bindex, vl); +vfloat32mf2_t test_vloxei16_v_f32mf2_m(vbool64_t vm, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_f32mf2_m(vm, rs1, rs2, vl); } -vfloat32m1_t test_vloxei16_v_f32m1_m(vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_f32m1_m(mask, base, bindex, vl); +vfloat32m1_t test_vloxei16_v_f32m1_m(vbool32_t vm, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_f32m1_m(vm, rs1, rs2, vl); } -vfloat32m2_t test_vloxei16_v_f32m2_m(vbool16_t mask, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_f32m2_m(mask, base, bindex, vl); +vfloat32m2_t test_vloxei16_v_f32m2_m(vbool16_t vm, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_f32m2_m(vm, rs1, rs2, vl); } -vfloat32m4_t test_vloxei16_v_f32m4_m(vbool8_t mask, const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_f32m4_m(mask, base, bindex, vl); +vfloat32m4_t test_vloxei16_v_f32m4_m(vbool8_t vm, const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_f32m4_m(vm, rs1, rs2, vl); } -vfloat32m8_t test_vloxei16_v_f32m8_m(vbool4_t mask, const float32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_f32m8_m(mask, base, bindex, vl); +vfloat32m8_t test_vloxei16_v_f32m8_m(vbool4_t vm, const float32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_f32m8_m(vm, rs1, rs2, vl); } -vfloat64m1_t test_vloxei16_v_f64m1_m(vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_f64m1_m(mask, base, bindex, vl); +vfloat64m1_t test_vloxei16_v_f64m1_m(vbool64_t vm, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_f64m1_m(vm, rs1, rs2, vl); } -vfloat64m2_t test_vloxei16_v_f64m2_m(vbool32_t mask, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_f64m2_m(mask, base, bindex, vl); +vfloat64m2_t test_vloxei16_v_f64m2_m(vbool32_t vm, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_f64m2_m(vm, rs1, rs2, vl); } -vfloat64m4_t test_vloxei16_v_f64m4_m(vbool16_t mask, const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_f64m4_m(mask, base, bindex, vl); +vfloat64m4_t test_vloxei16_v_f64m4_m(vbool16_t vm, const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_f64m4_m(vm, rs1, rs2, vl); } -vfloat64m8_t test_vloxei16_v_f64m8_m(vbool8_t mask, const float64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_f64m8_m(mask, base, bindex, vl); +vfloat64m8_t test_vloxei16_v_f64m8_m(vbool8_t vm, const float64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_f64m8_m(vm, rs1, rs2, vl); } -vint8mf8_t test_vloxei16_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_i8mf8_m(mask, base, bindex, vl); +vint8mf8_t test_vloxei16_v_i8mf8_m(vbool64_t vm, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_i8mf8_m(vm, rs1, rs2, vl); } -vint8mf4_t test_vloxei16_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i8mf4_m(mask, base, bindex, vl); +vint8mf4_t test_vloxei16_v_i8mf4_m(vbool32_t vm, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i8mf4_m(vm, rs1, rs2, vl); } -vint8mf2_t test_vloxei16_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_i8mf2_m(mask, base, bindex, vl); +vint8mf2_t test_vloxei16_v_i8mf2_m(vbool16_t vm, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_i8mf2_m(vm, rs1, rs2, vl); } -vint8m1_t test_vloxei16_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i8m1_m(mask, base, bindex, vl); +vint8m1_t test_vloxei16_v_i8m1_m(vbool8_t vm, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i8m1_m(vm, rs1, rs2, vl); } -vint8m2_t test_vloxei16_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_i8m2_m(mask, base, bindex, vl); +vint8m2_t test_vloxei16_v_i8m2_m(vbool4_t vm, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_i8m2_m(vm, rs1, rs2, vl); } -vint8m4_t test_vloxei16_v_i8m4_m(vbool2_t mask, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_v_i8m4_m(mask, base, bindex, vl); +vint8m4_t test_vloxei16_v_i8m4_m(vbool2_t vm, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_v_i8m4_m(vm, rs1, rs2, vl); } -vint16mf4_t test_vloxei16_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_i16mf4_m(mask, base, bindex, vl); +vint16mf4_t test_vloxei16_v_i16mf4_m(vbool64_t vm, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_i16mf4_m(vm, rs1, rs2, vl); } -vint16mf2_t test_vloxei16_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i16mf2_m(mask, base, bindex, vl); +vint16mf2_t test_vloxei16_v_i16mf2_m(vbool32_t vm, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i16mf2_m(vm, rs1, rs2, vl); } -vint16m1_t test_vloxei16_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_i16m1_m(mask, base, bindex, vl); +vint16m1_t test_vloxei16_v_i16m1_m(vbool16_t vm, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_i16m1_m(vm, rs1, rs2, vl); } -vint16m2_t test_vloxei16_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i16m2_m(mask, base, bindex, vl); +vint16m2_t test_vloxei16_v_i16m2_m(vbool8_t vm, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i16m2_m(vm, rs1, rs2, vl); } -vint16m4_t test_vloxei16_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_i16m4_m(mask, base, bindex, vl); +vint16m4_t test_vloxei16_v_i16m4_m(vbool4_t vm, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_i16m4_m(vm, rs1, rs2, vl); } -vint16m8_t test_vloxei16_v_i16m8_m(vbool2_t mask, const int16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_v_i16m8_m(mask, base, bindex, vl); +vint16m8_t test_vloxei16_v_i16m8_m(vbool2_t vm, const int16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_v_i16m8_m(vm, rs1, rs2, vl); } -vint32mf2_t test_vloxei16_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_i32mf2_m(mask, base, bindex, vl); +vint32mf2_t test_vloxei16_v_i32mf2_m(vbool64_t vm, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_i32mf2_m(vm, rs1, rs2, vl); } -vint32m1_t test_vloxei16_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i32m1_m(mask, base, bindex, vl); +vint32m1_t test_vloxei16_v_i32m1_m(vbool32_t vm, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i32m1_m(vm, rs1, rs2, vl); } -vint32m2_t test_vloxei16_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_i32m2_m(mask, base, bindex, vl); +vint32m2_t test_vloxei16_v_i32m2_m(vbool16_t vm, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_i32m2_m(vm, rs1, rs2, vl); } -vint32m4_t test_vloxei16_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i32m4_m(mask, base, bindex, vl); +vint32m4_t test_vloxei16_v_i32m4_m(vbool8_t vm, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i32m4_m(vm, rs1, rs2, vl); } -vint32m8_t test_vloxei16_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_i32m8_m(mask, base, bindex, vl); +vint32m8_t test_vloxei16_v_i32m8_m(vbool4_t vm, const int32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_i32m8_m(vm, rs1, rs2, vl); } -vint64m1_t test_vloxei16_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_i64m1_m(mask, base, bindex, vl); +vint64m1_t test_vloxei16_v_i64m1_m(vbool64_t vm, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_i64m1_m(vm, rs1, rs2, vl); } -vint64m2_t test_vloxei16_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i64m2_m(mask, base, bindex, vl); +vint64m2_t test_vloxei16_v_i64m2_m(vbool32_t vm, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i64m2_m(vm, rs1, rs2, vl); } -vint64m4_t test_vloxei16_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_i64m4_m(mask, base, bindex, vl); +vint64m4_t test_vloxei16_v_i64m4_m(vbool16_t vm, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_i64m4_m(vm, rs1, rs2, vl); } -vint64m8_t test_vloxei16_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i64m8_m(mask, base, bindex, vl); +vint64m8_t test_vloxei16_v_i64m8_m(vbool8_t vm, const int64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i64m8_m(vm, rs1, rs2, vl); } -vuint8mf8_t test_vloxei16_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_u8mf8_m(mask, base, bindex, vl); +vuint8mf8_t test_vloxei16_v_u8mf8_m(vbool64_t vm, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_u8mf8_m(vm, rs1, rs2, vl); } -vuint8mf4_t test_vloxei16_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u8mf4_m(mask, base, bindex, vl); +vuint8mf4_t test_vloxei16_v_u8mf4_m(vbool32_t vm, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u8mf4_m(vm, rs1, rs2, vl); } -vuint8mf2_t test_vloxei16_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_u8mf2_m(mask, base, bindex, vl); +vuint8mf2_t test_vloxei16_v_u8mf2_m(vbool16_t vm, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_u8mf2_m(vm, rs1, rs2, vl); } -vuint8m1_t test_vloxei16_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u8m1_m(mask, base, bindex, vl); +vuint8m1_t test_vloxei16_v_u8m1_m(vbool8_t vm, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u8m1_m(vm, rs1, rs2, vl); } -vuint8m2_t test_vloxei16_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_u8m2_m(mask, base, bindex, vl); +vuint8m2_t test_vloxei16_v_u8m2_m(vbool4_t vm, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_u8m2_m(vm, rs1, rs2, vl); } -vuint8m4_t test_vloxei16_v_u8m4_m(vbool2_t mask, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_v_u8m4_m(mask, base, bindex, vl); +vuint8m4_t test_vloxei16_v_u8m4_m(vbool2_t vm, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_v_u8m4_m(vm, rs1, rs2, vl); } -vuint16mf4_t test_vloxei16_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_u16mf4_m(mask, base, bindex, vl); +vuint16mf4_t test_vloxei16_v_u16mf4_m(vbool64_t vm, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_u16mf4_m(vm, rs1, rs2, vl); } -vuint16mf2_t test_vloxei16_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u16mf2_m(mask, base, bindex, vl); +vuint16mf2_t test_vloxei16_v_u16mf2_m(vbool32_t vm, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u16mf2_m(vm, rs1, rs2, vl); } -vuint16m1_t test_vloxei16_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_u16m1_m(mask, base, bindex, vl); +vuint16m1_t test_vloxei16_v_u16m1_m(vbool16_t vm, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_u16m1_m(vm, rs1, rs2, vl); } -vuint16m2_t test_vloxei16_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u16m2_m(mask, base, bindex, vl); +vuint16m2_t test_vloxei16_v_u16m2_m(vbool8_t vm, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u16m2_m(vm, rs1, rs2, vl); } -vuint16m4_t test_vloxei16_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_u16m4_m(mask, base, bindex, vl); +vuint16m4_t test_vloxei16_v_u16m4_m(vbool4_t vm, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_u16m4_m(vm, rs1, rs2, vl); } -vuint16m8_t test_vloxei16_v_u16m8_m(vbool2_t mask, const uint16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_v_u16m8_m(mask, base, bindex, vl); +vuint16m8_t test_vloxei16_v_u16m8_m(vbool2_t vm, const uint16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_v_u16m8_m(vm, rs1, rs2, vl); } -vuint32mf2_t test_vloxei16_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_u32mf2_m(mask, base, bindex, vl); +vuint32mf2_t test_vloxei16_v_u32mf2_m(vbool64_t vm, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_u32mf2_m(vm, rs1, rs2, vl); } -vuint32m1_t test_vloxei16_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u32m1_m(mask, base, bindex, vl); +vuint32m1_t test_vloxei16_v_u32m1_m(vbool32_t vm, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u32m1_m(vm, rs1, rs2, vl); } -vuint32m2_t test_vloxei16_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_u32m2_m(mask, base, bindex, vl); +vuint32m2_t test_vloxei16_v_u32m2_m(vbool16_t vm, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_u32m2_m(vm, rs1, rs2, vl); } -vuint32m4_t test_vloxei16_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u32m4_m(mask, base, bindex, vl); +vuint32m4_t test_vloxei16_v_u32m4_m(vbool8_t vm, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u32m4_m(vm, rs1, rs2, vl); } -vuint32m8_t test_vloxei16_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_u32m8_m(mask, base, bindex, vl); +vuint32m8_t test_vloxei16_v_u32m8_m(vbool4_t vm, const uint32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_u32m8_m(vm, rs1, rs2, vl); } -vuint64m1_t test_vloxei16_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_u64m1_m(mask, base, bindex, vl); +vuint64m1_t test_vloxei16_v_u64m1_m(vbool64_t vm, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_u64m1_m(vm, rs1, rs2, vl); } -vuint64m2_t test_vloxei16_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u64m2_m(mask, base, bindex, vl); +vuint64m2_t test_vloxei16_v_u64m2_m(vbool32_t vm, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u64m2_m(vm, rs1, rs2, vl); } -vuint64m4_t test_vloxei16_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_u64m4_m(mask, base, bindex, vl); +vuint64m4_t test_vloxei16_v_u64m4_m(vbool16_t vm, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_u64m4_m(vm, rs1, rs2, vl); } -vuint64m8_t test_vloxei16_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u64m8_m(mask, base, bindex, vl); +vuint64m8_t test_vloxei16_v_u64m8_m(vbool8_t vm, const uint64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u64m8_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxei16\.[ivxfswum.]+\s+} 114 } } */ diff --git a/auto-generated/gnu-api-tests/vloxei32.c b/auto-generated/gnu-api-tests/vloxei32.c index ed12f90d7..90d9650e3 100644 --- a/auto-generated/gnu-api-tests/vloxei32.c +++ b/auto-generated/gnu-api-tests/vloxei32.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vloxei32_v_f16mf4(const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_f16mf4(base, bindex, vl); +vfloat16mf4_t test_vloxei32_v_f16mf4(const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_f16mf4(rs1, rs2, vl); } -vfloat16mf2_t test_vloxei32_v_f16mf2(const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_f16mf2(base, bindex, vl); +vfloat16mf2_t test_vloxei32_v_f16mf2(const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_f16mf2(rs1, rs2, vl); } -vfloat16m1_t test_vloxei32_v_f16m1(const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_f16m1(base, bindex, vl); +vfloat16m1_t test_vloxei32_v_f16m1(const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_f16m1(rs1, rs2, vl); } -vfloat16m2_t test_vloxei32_v_f16m2(const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_f16m2(base, bindex, vl); +vfloat16m2_t test_vloxei32_v_f16m2(const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_f16m2(rs1, rs2, vl); } -vfloat16m4_t test_vloxei32_v_f16m4(const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_f16m4(base, bindex, vl); +vfloat16m4_t test_vloxei32_v_f16m4(const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_f16m4(rs1, rs2, vl); } -vfloat32mf2_t test_vloxei32_v_f32mf2(const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_f32mf2(base, bindex, vl); +vfloat32mf2_t test_vloxei32_v_f32mf2(const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_f32mf2(rs1, rs2, vl); } -vfloat32m1_t test_vloxei32_v_f32m1(const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_f32m1(base, bindex, vl); +vfloat32m1_t test_vloxei32_v_f32m1(const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_f32m1(rs1, rs2, vl); } -vfloat32m2_t test_vloxei32_v_f32m2(const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_f32m2(base, bindex, vl); +vfloat32m2_t test_vloxei32_v_f32m2(const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_f32m2(rs1, rs2, vl); } -vfloat32m4_t test_vloxei32_v_f32m4(const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_f32m4(base, bindex, vl); +vfloat32m4_t test_vloxei32_v_f32m4(const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_f32m4(rs1, rs2, vl); } -vfloat32m8_t test_vloxei32_v_f32m8(const float32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_f32m8(base, bindex, vl); +vfloat32m8_t test_vloxei32_v_f32m8(const float32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_f32m8(rs1, rs2, vl); } -vfloat64m1_t test_vloxei32_v_f64m1(const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_f64m1(base, bindex, vl); +vfloat64m1_t test_vloxei32_v_f64m1(const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_f64m1(rs1, rs2, vl); } -vfloat64m2_t test_vloxei32_v_f64m2(const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_f64m2(base, bindex, vl); +vfloat64m2_t test_vloxei32_v_f64m2(const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_f64m2(rs1, rs2, vl); } -vfloat64m4_t test_vloxei32_v_f64m4(const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_f64m4(base, bindex, vl); +vfloat64m4_t test_vloxei32_v_f64m4(const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_f64m4(rs1, rs2, vl); } -vfloat64m8_t test_vloxei32_v_f64m8(const float64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_f64m8(base, bindex, vl); +vfloat64m8_t test_vloxei32_v_f64m8(const float64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_f64m8(rs1, rs2, vl); } -vint8mf8_t test_vloxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i8mf8(base, bindex, vl); +vint8mf8_t test_vloxei32_v_i8mf8(const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i8mf8(rs1, rs2, vl); } -vint8mf4_t test_vloxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_i8mf4(base, bindex, vl); +vint8mf4_t test_vloxei32_v_i8mf4(const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_i8mf4(rs1, rs2, vl); } -vint8mf2_t test_vloxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i8mf2(base, bindex, vl); +vint8mf2_t test_vloxei32_v_i8mf2(const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i8mf2(rs1, rs2, vl); } -vint8m1_t test_vloxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_i8m1(base, bindex, vl); +vint8m1_t test_vloxei32_v_i8m1(const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_i8m1(rs1, rs2, vl); } -vint8m2_t test_vloxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_i8m2(base, bindex, vl); +vint8m2_t test_vloxei32_v_i8m2(const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_i8m2(rs1, rs2, vl); } -vint16mf4_t test_vloxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i16mf4(base, bindex, vl); +vint16mf4_t test_vloxei32_v_i16mf4(const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i16mf4(rs1, rs2, vl); } -vint16mf2_t test_vloxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_i16mf2(base, bindex, vl); +vint16mf2_t test_vloxei32_v_i16mf2(const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_i16mf2(rs1, rs2, vl); } -vint16m1_t test_vloxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i16m1(base, bindex, vl); +vint16m1_t test_vloxei32_v_i16m1(const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i16m1(rs1, rs2, vl); } -vint16m2_t test_vloxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_i16m2(base, bindex, vl); +vint16m2_t test_vloxei32_v_i16m2(const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_i16m2(rs1, rs2, vl); } -vint16m4_t test_vloxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_i16m4(base, bindex, vl); +vint16m4_t test_vloxei32_v_i16m4(const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_i16m4(rs1, rs2, vl); } -vint32mf2_t test_vloxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i32mf2(base, bindex, vl); +vint32mf2_t test_vloxei32_v_i32mf2(const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i32mf2(rs1, rs2, vl); } -vint32m1_t test_vloxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_i32m1(base, bindex, vl); +vint32m1_t test_vloxei32_v_i32m1(const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_i32m1(rs1, rs2, vl); } -vint32m2_t test_vloxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i32m2(base, bindex, vl); +vint32m2_t test_vloxei32_v_i32m2(const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i32m2(rs1, rs2, vl); } -vint32m4_t test_vloxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_i32m4(base, bindex, vl); +vint32m4_t test_vloxei32_v_i32m4(const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_i32m4(rs1, rs2, vl); } -vint32m8_t test_vloxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_i32m8(base, bindex, vl); +vint32m8_t test_vloxei32_v_i32m8(const int32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_i32m8(rs1, rs2, vl); } -vint64m1_t test_vloxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i64m1(base, bindex, vl); +vint64m1_t test_vloxei32_v_i64m1(const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i64m1(rs1, rs2, vl); } -vint64m2_t test_vloxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_i64m2(base, bindex, vl); +vint64m2_t test_vloxei32_v_i64m2(const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_i64m2(rs1, rs2, vl); } -vint64m4_t test_vloxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i64m4(base, bindex, vl); +vint64m4_t test_vloxei32_v_i64m4(const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i64m4(rs1, rs2, vl); } -vint64m8_t test_vloxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_i64m8(base, bindex, vl); +vint64m8_t test_vloxei32_v_i64m8(const int64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_i64m8(rs1, rs2, vl); } -vuint8mf8_t test_vloxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u8mf8(base, bindex, vl); +vuint8mf8_t test_vloxei32_v_u8mf8(const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u8mf8(rs1, rs2, vl); } -vuint8mf4_t test_vloxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_u8mf4(base, bindex, vl); +vuint8mf4_t test_vloxei32_v_u8mf4(const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_u8mf4(rs1, rs2, vl); } -vuint8mf2_t test_vloxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u8mf2(base, bindex, vl); +vuint8mf2_t test_vloxei32_v_u8mf2(const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u8mf2(rs1, rs2, vl); } -vuint8m1_t test_vloxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_u8m1(base, bindex, vl); +vuint8m1_t test_vloxei32_v_u8m1(const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_u8m1(rs1, rs2, vl); } -vuint8m2_t test_vloxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_u8m2(base, bindex, vl); +vuint8m2_t test_vloxei32_v_u8m2(const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_u8m2(rs1, rs2, vl); } -vuint16mf4_t test_vloxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u16mf4(base, bindex, vl); +vuint16mf4_t test_vloxei32_v_u16mf4(const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u16mf4(rs1, rs2, vl); } -vuint16mf2_t test_vloxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_u16mf2(base, bindex, vl); +vuint16mf2_t test_vloxei32_v_u16mf2(const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_u16mf2(rs1, rs2, vl); } -vuint16m1_t test_vloxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u16m1(base, bindex, vl); +vuint16m1_t test_vloxei32_v_u16m1(const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u16m1(rs1, rs2, vl); } -vuint16m2_t test_vloxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_u16m2(base, bindex, vl); +vuint16m2_t test_vloxei32_v_u16m2(const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_u16m2(rs1, rs2, vl); } -vuint16m4_t test_vloxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_u16m4(base, bindex, vl); +vuint16m4_t test_vloxei32_v_u16m4(const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_u16m4(rs1, rs2, vl); } -vuint32mf2_t test_vloxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u32mf2(base, bindex, vl); +vuint32mf2_t test_vloxei32_v_u32mf2(const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u32mf2(rs1, rs2, vl); } -vuint32m1_t test_vloxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_u32m1(base, bindex, vl); +vuint32m1_t test_vloxei32_v_u32m1(const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_u32m1(rs1, rs2, vl); } -vuint32m2_t test_vloxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u32m2(base, bindex, vl); +vuint32m2_t test_vloxei32_v_u32m2(const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u32m2(rs1, rs2, vl); } -vuint32m4_t test_vloxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_u32m4(base, bindex, vl); +vuint32m4_t test_vloxei32_v_u32m4(const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_u32m4(rs1, rs2, vl); } -vuint32m8_t test_vloxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_u32m8(base, bindex, vl); +vuint32m8_t test_vloxei32_v_u32m8(const uint32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_u32m8(rs1, rs2, vl); } -vuint64m1_t test_vloxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u64m1(base, bindex, vl); +vuint64m1_t test_vloxei32_v_u64m1(const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u64m1(rs1, rs2, vl); } -vuint64m2_t test_vloxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_u64m2(base, bindex, vl); +vuint64m2_t test_vloxei32_v_u64m2(const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_u64m2(rs1, rs2, vl); } -vuint64m4_t test_vloxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u64m4(base, bindex, vl); +vuint64m4_t test_vloxei32_v_u64m4(const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u64m4(rs1, rs2, vl); } -vuint64m8_t test_vloxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_u64m8(base, bindex, vl); +vuint64m8_t test_vloxei32_v_u64m8(const uint64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_u64m8(rs1, rs2, vl); } -vfloat16mf4_t test_vloxei32_v_f16mf4_m(vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_f16mf4_m(mask, base, bindex, vl); +vfloat16mf4_t test_vloxei32_v_f16mf4_m(vbool64_t vm, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_f16mf4_m(vm, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei32_v_f16mf2_m(vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_f16mf2_m(mask, base, bindex, vl); +vfloat16mf2_t test_vloxei32_v_f16mf2_m(vbool32_t vm, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_f16mf2_m(vm, rs1, rs2, vl); } -vfloat16m1_t test_vloxei32_v_f16m1_m(vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_f16m1_m(mask, base, bindex, vl); +vfloat16m1_t test_vloxei32_v_f16m1_m(vbool16_t vm, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_f16m1_m(vm, rs1, rs2, vl); } -vfloat16m2_t test_vloxei32_v_f16m2_m(vbool8_t mask, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_f16m2_m(mask, base, bindex, vl); +vfloat16m2_t test_vloxei32_v_f16m2_m(vbool8_t vm, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_f16m2_m(vm, rs1, rs2, vl); } -vfloat16m4_t test_vloxei32_v_f16m4_m(vbool4_t mask, const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_f16m4_m(mask, base, bindex, vl); +vfloat16m4_t test_vloxei32_v_f16m4_m(vbool4_t vm, const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_f16m4_m(vm, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei32_v_f32mf2_m(vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_f32mf2_m(mask, base, bindex, vl); +vfloat32mf2_t test_vloxei32_v_f32mf2_m(vbool64_t vm, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_f32mf2_m(vm, rs1, rs2, vl); } -vfloat32m1_t test_vloxei32_v_f32m1_m(vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_f32m1_m(mask, base, bindex, vl); +vfloat32m1_t test_vloxei32_v_f32m1_m(vbool32_t vm, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_f32m1_m(vm, rs1, rs2, vl); } -vfloat32m2_t test_vloxei32_v_f32m2_m(vbool16_t mask, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_f32m2_m(mask, base, bindex, vl); +vfloat32m2_t test_vloxei32_v_f32m2_m(vbool16_t vm, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_f32m2_m(vm, rs1, rs2, vl); } -vfloat32m4_t test_vloxei32_v_f32m4_m(vbool8_t mask, const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_f32m4_m(mask, base, bindex, vl); +vfloat32m4_t test_vloxei32_v_f32m4_m(vbool8_t vm, const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_f32m4_m(vm, rs1, rs2, vl); } -vfloat32m8_t test_vloxei32_v_f32m8_m(vbool4_t mask, const float32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_f32m8_m(mask, base, bindex, vl); +vfloat32m8_t test_vloxei32_v_f32m8_m(vbool4_t vm, const float32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_f32m8_m(vm, rs1, rs2, vl); } -vfloat64m1_t test_vloxei32_v_f64m1_m(vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_f64m1_m(mask, base, bindex, vl); +vfloat64m1_t test_vloxei32_v_f64m1_m(vbool64_t vm, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_f64m1_m(vm, rs1, rs2, vl); } -vfloat64m2_t test_vloxei32_v_f64m2_m(vbool32_t mask, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_f64m2_m(mask, base, bindex, vl); +vfloat64m2_t test_vloxei32_v_f64m2_m(vbool32_t vm, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_f64m2_m(vm, rs1, rs2, vl); } -vfloat64m4_t test_vloxei32_v_f64m4_m(vbool16_t mask, const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_f64m4_m(mask, base, bindex, vl); +vfloat64m4_t test_vloxei32_v_f64m4_m(vbool16_t vm, const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_f64m4_m(vm, rs1, rs2, vl); } -vfloat64m8_t test_vloxei32_v_f64m8_m(vbool8_t mask, const float64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_f64m8_m(mask, base, bindex, vl); +vfloat64m8_t test_vloxei32_v_f64m8_m(vbool8_t vm, const float64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_f64m8_m(vm, rs1, rs2, vl); } -vint8mf8_t test_vloxei32_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i8mf8_m(mask, base, bindex, vl); +vint8mf8_t test_vloxei32_v_i8mf8_m(vbool64_t vm, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i8mf8_m(vm, rs1, rs2, vl); } -vint8mf4_t test_vloxei32_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_i8mf4_m(mask, base, bindex, vl); +vint8mf4_t test_vloxei32_v_i8mf4_m(vbool32_t vm, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_i8mf4_m(vm, rs1, rs2, vl); } -vint8mf2_t test_vloxei32_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i8mf2_m(mask, base, bindex, vl); +vint8mf2_t test_vloxei32_v_i8mf2_m(vbool16_t vm, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i8mf2_m(vm, rs1, rs2, vl); } -vint8m1_t test_vloxei32_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_i8m1_m(mask, base, bindex, vl); +vint8m1_t test_vloxei32_v_i8m1_m(vbool8_t vm, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_i8m1_m(vm, rs1, rs2, vl); } -vint8m2_t test_vloxei32_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_i8m2_m(mask, base, bindex, vl); +vint8m2_t test_vloxei32_v_i8m2_m(vbool4_t vm, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_i8m2_m(vm, rs1, rs2, vl); } -vint16mf4_t test_vloxei32_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i16mf4_m(mask, base, bindex, vl); +vint16mf4_t test_vloxei32_v_i16mf4_m(vbool64_t vm, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i16mf4_m(vm, rs1, rs2, vl); } -vint16mf2_t test_vloxei32_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_i16mf2_m(mask, base, bindex, vl); +vint16mf2_t test_vloxei32_v_i16mf2_m(vbool32_t vm, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_i16mf2_m(vm, rs1, rs2, vl); } -vint16m1_t test_vloxei32_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i16m1_m(mask, base, bindex, vl); +vint16m1_t test_vloxei32_v_i16m1_m(vbool16_t vm, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i16m1_m(vm, rs1, rs2, vl); } -vint16m2_t test_vloxei32_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_i16m2_m(mask, base, bindex, vl); +vint16m2_t test_vloxei32_v_i16m2_m(vbool8_t vm, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_i16m2_m(vm, rs1, rs2, vl); } -vint16m4_t test_vloxei32_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_i16m4_m(mask, base, bindex, vl); +vint16m4_t test_vloxei32_v_i16m4_m(vbool4_t vm, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_i16m4_m(vm, rs1, rs2, vl); } -vint32mf2_t test_vloxei32_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i32mf2_m(mask, base, bindex, vl); +vint32mf2_t test_vloxei32_v_i32mf2_m(vbool64_t vm, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i32mf2_m(vm, rs1, rs2, vl); } -vint32m1_t test_vloxei32_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_i32m1_m(mask, base, bindex, vl); +vint32m1_t test_vloxei32_v_i32m1_m(vbool32_t vm, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_i32m1_m(vm, rs1, rs2, vl); } -vint32m2_t test_vloxei32_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i32m2_m(mask, base, bindex, vl); +vint32m2_t test_vloxei32_v_i32m2_m(vbool16_t vm, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i32m2_m(vm, rs1, rs2, vl); } -vint32m4_t test_vloxei32_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_i32m4_m(mask, base, bindex, vl); +vint32m4_t test_vloxei32_v_i32m4_m(vbool8_t vm, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_i32m4_m(vm, rs1, rs2, vl); } -vint32m8_t test_vloxei32_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_i32m8_m(mask, base, bindex, vl); +vint32m8_t test_vloxei32_v_i32m8_m(vbool4_t vm, const int32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_i32m8_m(vm, rs1, rs2, vl); } -vint64m1_t test_vloxei32_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i64m1_m(mask, base, bindex, vl); +vint64m1_t test_vloxei32_v_i64m1_m(vbool64_t vm, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i64m1_m(vm, rs1, rs2, vl); } -vint64m2_t test_vloxei32_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_i64m2_m(mask, base, bindex, vl); +vint64m2_t test_vloxei32_v_i64m2_m(vbool32_t vm, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_i64m2_m(vm, rs1, rs2, vl); } -vint64m4_t test_vloxei32_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i64m4_m(mask, base, bindex, vl); +vint64m4_t test_vloxei32_v_i64m4_m(vbool16_t vm, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i64m4_m(vm, rs1, rs2, vl); } -vint64m8_t test_vloxei32_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_i64m8_m(mask, base, bindex, vl); +vint64m8_t test_vloxei32_v_i64m8_m(vbool8_t vm, const int64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_i64m8_m(vm, rs1, rs2, vl); } -vuint8mf8_t test_vloxei32_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u8mf8_m(mask, base, bindex, vl); +vuint8mf8_t test_vloxei32_v_u8mf8_m(vbool64_t vm, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u8mf8_m(vm, rs1, rs2, vl); } -vuint8mf4_t test_vloxei32_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_u8mf4_m(mask, base, bindex, vl); +vuint8mf4_t test_vloxei32_v_u8mf4_m(vbool32_t vm, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_u8mf4_m(vm, rs1, rs2, vl); } -vuint8mf2_t test_vloxei32_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u8mf2_m(mask, base, bindex, vl); +vuint8mf2_t test_vloxei32_v_u8mf2_m(vbool16_t vm, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u8mf2_m(vm, rs1, rs2, vl); } -vuint8m1_t test_vloxei32_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_u8m1_m(mask, base, bindex, vl); +vuint8m1_t test_vloxei32_v_u8m1_m(vbool8_t vm, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_u8m1_m(vm, rs1, rs2, vl); } -vuint8m2_t test_vloxei32_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_u8m2_m(mask, base, bindex, vl); +vuint8m2_t test_vloxei32_v_u8m2_m(vbool4_t vm, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_u8m2_m(vm, rs1, rs2, vl); } -vuint16mf4_t test_vloxei32_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u16mf4_m(mask, base, bindex, vl); +vuint16mf4_t test_vloxei32_v_u16mf4_m(vbool64_t vm, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u16mf4_m(vm, rs1, rs2, vl); } -vuint16mf2_t test_vloxei32_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_u16mf2_m(mask, base, bindex, vl); +vuint16mf2_t test_vloxei32_v_u16mf2_m(vbool32_t vm, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_u16mf2_m(vm, rs1, rs2, vl); } -vuint16m1_t test_vloxei32_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u16m1_m(mask, base, bindex, vl); +vuint16m1_t test_vloxei32_v_u16m1_m(vbool16_t vm, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u16m1_m(vm, rs1, rs2, vl); } -vuint16m2_t test_vloxei32_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_u16m2_m(mask, base, bindex, vl); +vuint16m2_t test_vloxei32_v_u16m2_m(vbool8_t vm, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_u16m2_m(vm, rs1, rs2, vl); } -vuint16m4_t test_vloxei32_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_u16m4_m(mask, base, bindex, vl); +vuint16m4_t test_vloxei32_v_u16m4_m(vbool4_t vm, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_u16m4_m(vm, rs1, rs2, vl); } -vuint32mf2_t test_vloxei32_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u32mf2_m(mask, base, bindex, vl); +vuint32mf2_t test_vloxei32_v_u32mf2_m(vbool64_t vm, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u32mf2_m(vm, rs1, rs2, vl); } -vuint32m1_t test_vloxei32_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_u32m1_m(mask, base, bindex, vl); +vuint32m1_t test_vloxei32_v_u32m1_m(vbool32_t vm, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_u32m1_m(vm, rs1, rs2, vl); } -vuint32m2_t test_vloxei32_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u32m2_m(mask, base, bindex, vl); +vuint32m2_t test_vloxei32_v_u32m2_m(vbool16_t vm, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u32m2_m(vm, rs1, rs2, vl); } -vuint32m4_t test_vloxei32_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_u32m4_m(mask, base, bindex, vl); +vuint32m4_t test_vloxei32_v_u32m4_m(vbool8_t vm, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_u32m4_m(vm, rs1, rs2, vl); } -vuint32m8_t test_vloxei32_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_u32m8_m(mask, base, bindex, vl); +vuint32m8_t test_vloxei32_v_u32m8_m(vbool4_t vm, const uint32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_u32m8_m(vm, rs1, rs2, vl); } -vuint64m1_t test_vloxei32_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u64m1_m(mask, base, bindex, vl); +vuint64m1_t test_vloxei32_v_u64m1_m(vbool64_t vm, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u64m1_m(vm, rs1, rs2, vl); } -vuint64m2_t test_vloxei32_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_u64m2_m(mask, base, bindex, vl); +vuint64m2_t test_vloxei32_v_u64m2_m(vbool32_t vm, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_u64m2_m(vm, rs1, rs2, vl); } -vuint64m4_t test_vloxei32_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u64m4_m(mask, base, bindex, vl); +vuint64m4_t test_vloxei32_v_u64m4_m(vbool16_t vm, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u64m4_m(vm, rs1, rs2, vl); } -vuint64m8_t test_vloxei32_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_u64m8_m(mask, base, bindex, vl); +vuint64m8_t test_vloxei32_v_u64m8_m(vbool8_t vm, const uint64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_u64m8_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxei32\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/gnu-api-tests/vloxei64.c b/auto-generated/gnu-api-tests/vloxei64.c index 8ab35965d..d22f642b8 100644 --- a/auto-generated/gnu-api-tests/vloxei64.c +++ b/auto-generated/gnu-api-tests/vloxei64.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vloxei64_v_f16mf4(const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_f16mf4(base, bindex, vl); +vfloat16mf4_t test_vloxei64_v_f16mf4(const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_f16mf4(rs1, rs2, vl); } -vfloat16mf2_t test_vloxei64_v_f16mf2(const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_f16mf2(base, bindex, vl); +vfloat16mf2_t test_vloxei64_v_f16mf2(const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_f16mf2(rs1, rs2, vl); } -vfloat16m1_t test_vloxei64_v_f16m1(const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_f16m1(base, bindex, vl); +vfloat16m1_t test_vloxei64_v_f16m1(const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_f16m1(rs1, rs2, vl); } -vfloat16m2_t test_vloxei64_v_f16m2(const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_f16m2(base, bindex, vl); +vfloat16m2_t test_vloxei64_v_f16m2(const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_f16m2(rs1, rs2, vl); } -vfloat32mf2_t test_vloxei64_v_f32mf2(const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_f32mf2(base, bindex, vl); +vfloat32mf2_t test_vloxei64_v_f32mf2(const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_f32mf2(rs1, rs2, vl); } -vfloat32m1_t test_vloxei64_v_f32m1(const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_f32m1(base, bindex, vl); +vfloat32m1_t test_vloxei64_v_f32m1(const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_f32m1(rs1, rs2, vl); } -vfloat32m2_t test_vloxei64_v_f32m2(const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_f32m2(base, bindex, vl); +vfloat32m2_t test_vloxei64_v_f32m2(const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_f32m2(rs1, rs2, vl); } -vfloat32m4_t test_vloxei64_v_f32m4(const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_f32m4(base, bindex, vl); +vfloat32m4_t test_vloxei64_v_f32m4(const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_f32m4(rs1, rs2, vl); } -vfloat64m1_t test_vloxei64_v_f64m1(const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_f64m1(base, bindex, vl); +vfloat64m1_t test_vloxei64_v_f64m1(const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_f64m1(rs1, rs2, vl); } -vfloat64m2_t test_vloxei64_v_f64m2(const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_f64m2(base, bindex, vl); +vfloat64m2_t test_vloxei64_v_f64m2(const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_f64m2(rs1, rs2, vl); } -vfloat64m4_t test_vloxei64_v_f64m4(const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_f64m4(base, bindex, vl); +vfloat64m4_t test_vloxei64_v_f64m4(const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_f64m4(rs1, rs2, vl); } -vfloat64m8_t test_vloxei64_v_f64m8(const float64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_f64m8(base, bindex, vl); +vfloat64m8_t test_vloxei64_v_f64m8(const float64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_f64m8(rs1, rs2, vl); } -vint8mf8_t test_vloxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_i8mf8(base, bindex, vl); +vint8mf8_t test_vloxei64_v_i8mf8(const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_i8mf8(rs1, rs2, vl); } -vint8mf4_t test_vloxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_i8mf4(base, bindex, vl); +vint8mf4_t test_vloxei64_v_i8mf4(const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_i8mf4(rs1, rs2, vl); } -vint8mf2_t test_vloxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_i8mf2(base, bindex, vl); +vint8mf2_t test_vloxei64_v_i8mf2(const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_i8mf2(rs1, rs2, vl); } -vint8m1_t test_vloxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_i8m1(base, bindex, vl); +vint8m1_t test_vloxei64_v_i8m1(const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_i8m1(rs1, rs2, vl); } -vint16mf4_t test_vloxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_i16mf4(base, bindex, vl); +vint16mf4_t test_vloxei64_v_i16mf4(const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_i16mf4(rs1, rs2, vl); } -vint16mf2_t test_vloxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_i16mf2(base, bindex, vl); +vint16mf2_t test_vloxei64_v_i16mf2(const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_i16mf2(rs1, rs2, vl); } -vint16m1_t test_vloxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_i16m1(base, bindex, vl); +vint16m1_t test_vloxei64_v_i16m1(const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_i16m1(rs1, rs2, vl); } -vint16m2_t test_vloxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_i16m2(base, bindex, vl); +vint16m2_t test_vloxei64_v_i16m2(const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_i16m2(rs1, rs2, vl); } -vint32mf2_t test_vloxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_i32mf2(base, bindex, vl); +vint32mf2_t test_vloxei64_v_i32mf2(const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_i32mf2(rs1, rs2, vl); } -vint32m1_t test_vloxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_i32m1(base, bindex, vl); +vint32m1_t test_vloxei64_v_i32m1(const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_i32m1(rs1, rs2, vl); } -vint32m2_t test_vloxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_i32m2(base, bindex, vl); +vint32m2_t test_vloxei64_v_i32m2(const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_i32m2(rs1, rs2, vl); } -vint32m4_t test_vloxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_i32m4(base, bindex, vl); +vint32m4_t test_vloxei64_v_i32m4(const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_i32m4(rs1, rs2, vl); } -vint64m1_t test_vloxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_i64m1(base, bindex, vl); +vint64m1_t test_vloxei64_v_i64m1(const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_i64m1(rs1, rs2, vl); } -vint64m2_t test_vloxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_i64m2(base, bindex, vl); +vint64m2_t test_vloxei64_v_i64m2(const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_i64m2(rs1, rs2, vl); } -vint64m4_t test_vloxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_i64m4(base, bindex, vl); +vint64m4_t test_vloxei64_v_i64m4(const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_i64m4(rs1, rs2, vl); } -vint64m8_t test_vloxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_i64m8(base, bindex, vl); +vint64m8_t test_vloxei64_v_i64m8(const int64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_i64m8(rs1, rs2, vl); } -vuint8mf8_t test_vloxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_u8mf8(base, bindex, vl); +vuint8mf8_t test_vloxei64_v_u8mf8(const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_u8mf8(rs1, rs2, vl); } -vuint8mf4_t test_vloxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_u8mf4(base, bindex, vl); +vuint8mf4_t test_vloxei64_v_u8mf4(const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_u8mf4(rs1, rs2, vl); } -vuint8mf2_t test_vloxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_u8mf2(base, bindex, vl); +vuint8mf2_t test_vloxei64_v_u8mf2(const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_u8mf2(rs1, rs2, vl); } -vuint8m1_t test_vloxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_u8m1(base, bindex, vl); +vuint8m1_t test_vloxei64_v_u8m1(const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_u8m1(rs1, rs2, vl); } -vuint16mf4_t test_vloxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_u16mf4(base, bindex, vl); +vuint16mf4_t test_vloxei64_v_u16mf4(const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_u16mf4(rs1, rs2, vl); } -vuint16mf2_t test_vloxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_u16mf2(base, bindex, vl); +vuint16mf2_t test_vloxei64_v_u16mf2(const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_u16mf2(rs1, rs2, vl); } -vuint16m1_t test_vloxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_u16m1(base, bindex, vl); +vuint16m1_t test_vloxei64_v_u16m1(const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_u16m1(rs1, rs2, vl); } -vuint16m2_t test_vloxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_u16m2(base, bindex, vl); +vuint16m2_t test_vloxei64_v_u16m2(const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_u16m2(rs1, rs2, vl); } -vuint32mf2_t test_vloxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_u32mf2(base, bindex, vl); +vuint32mf2_t test_vloxei64_v_u32mf2(const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_u32mf2(rs1, rs2, vl); } -vuint32m1_t test_vloxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_u32m1(base, bindex, vl); +vuint32m1_t test_vloxei64_v_u32m1(const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_u32m1(rs1, rs2, vl); } -vuint32m2_t test_vloxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_u32m2(base, bindex, vl); +vuint32m2_t test_vloxei64_v_u32m2(const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_u32m2(rs1, rs2, vl); } -vuint32m4_t test_vloxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_u32m4(base, bindex, vl); +vuint32m4_t test_vloxei64_v_u32m4(const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_u32m4(rs1, rs2, vl); } -vuint64m1_t test_vloxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_u64m1(base, bindex, vl); +vuint64m1_t test_vloxei64_v_u64m1(const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_u64m1(rs1, rs2, vl); } -vuint64m2_t test_vloxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_u64m2(base, bindex, vl); +vuint64m2_t test_vloxei64_v_u64m2(const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_u64m2(rs1, rs2, vl); } -vuint64m4_t test_vloxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_u64m4(base, bindex, vl); +vuint64m4_t test_vloxei64_v_u64m4(const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_u64m4(rs1, rs2, vl); } -vuint64m8_t test_vloxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_u64m8(base, bindex, vl); +vuint64m8_t test_vloxei64_v_u64m8(const uint64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_u64m8(rs1, rs2, vl); } -vfloat16mf4_t test_vloxei64_v_f16mf4_m(vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_f16mf4_m(mask, base, bindex, vl); +vfloat16mf4_t test_vloxei64_v_f16mf4_m(vbool64_t vm, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_f16mf4_m(vm, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei64_v_f16mf2_m(vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_f16mf2_m(mask, base, bindex, vl); +vfloat16mf2_t test_vloxei64_v_f16mf2_m(vbool32_t vm, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_f16mf2_m(vm, rs1, rs2, vl); } -vfloat16m1_t test_vloxei64_v_f16m1_m(vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_f16m1_m(mask, base, bindex, vl); +vfloat16m1_t test_vloxei64_v_f16m1_m(vbool16_t vm, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_f16m1_m(vm, rs1, rs2, vl); } -vfloat16m2_t test_vloxei64_v_f16m2_m(vbool8_t mask, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_f16m2_m(mask, base, bindex, vl); +vfloat16m2_t test_vloxei64_v_f16m2_m(vbool8_t vm, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_f16m2_m(vm, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei64_v_f32mf2_m(vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_f32mf2_m(mask, base, bindex, vl); +vfloat32mf2_t test_vloxei64_v_f32mf2_m(vbool64_t vm, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_f32mf2_m(vm, rs1, rs2, vl); } -vfloat32m1_t test_vloxei64_v_f32m1_m(vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_f32m1_m(mask, base, bindex, vl); +vfloat32m1_t test_vloxei64_v_f32m1_m(vbool32_t vm, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_f32m1_m(vm, rs1, rs2, vl); } -vfloat32m2_t test_vloxei64_v_f32m2_m(vbool16_t mask, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_f32m2_m(mask, base, bindex, vl); +vfloat32m2_t test_vloxei64_v_f32m2_m(vbool16_t vm, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_f32m2_m(vm, rs1, rs2, vl); } -vfloat32m4_t test_vloxei64_v_f32m4_m(vbool8_t mask, const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_f32m4_m(mask, base, bindex, vl); +vfloat32m4_t test_vloxei64_v_f32m4_m(vbool8_t vm, const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_f32m4_m(vm, rs1, rs2, vl); } -vfloat64m1_t test_vloxei64_v_f64m1_m(vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_f64m1_m(mask, base, bindex, vl); +vfloat64m1_t test_vloxei64_v_f64m1_m(vbool64_t vm, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_f64m1_m(vm, rs1, rs2, vl); } -vfloat64m2_t test_vloxei64_v_f64m2_m(vbool32_t mask, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_f64m2_m(mask, base, bindex, vl); +vfloat64m2_t test_vloxei64_v_f64m2_m(vbool32_t vm, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_f64m2_m(vm, rs1, rs2, vl); } -vfloat64m4_t test_vloxei64_v_f64m4_m(vbool16_t mask, const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_f64m4_m(mask, base, bindex, vl); +vfloat64m4_t test_vloxei64_v_f64m4_m(vbool16_t vm, const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_f64m4_m(vm, rs1, rs2, vl); } -vfloat64m8_t test_vloxei64_v_f64m8_m(vbool8_t mask, const float64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_f64m8_m(mask, base, bindex, vl); +vfloat64m8_t test_vloxei64_v_f64m8_m(vbool8_t vm, const float64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_f64m8_m(vm, rs1, rs2, vl); } -vint8mf8_t test_vloxei64_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_i8mf8_m(mask, base, bindex, vl); +vint8mf8_t test_vloxei64_v_i8mf8_m(vbool64_t vm, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_i8mf8_m(vm, rs1, rs2, vl); } -vint8mf4_t test_vloxei64_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_i8mf4_m(mask, base, bindex, vl); +vint8mf4_t test_vloxei64_v_i8mf4_m(vbool32_t vm, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_i8mf4_m(vm, rs1, rs2, vl); } -vint8mf2_t test_vloxei64_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_i8mf2_m(mask, base, bindex, vl); +vint8mf2_t test_vloxei64_v_i8mf2_m(vbool16_t vm, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_i8mf2_m(vm, rs1, rs2, vl); } -vint8m1_t test_vloxei64_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_i8m1_m(mask, base, bindex, vl); +vint8m1_t test_vloxei64_v_i8m1_m(vbool8_t vm, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_i8m1_m(vm, rs1, rs2, vl); } -vint16mf4_t test_vloxei64_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_i16mf4_m(mask, base, bindex, vl); +vint16mf4_t test_vloxei64_v_i16mf4_m(vbool64_t vm, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_i16mf4_m(vm, rs1, rs2, vl); } -vint16mf2_t test_vloxei64_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_i16mf2_m(mask, base, bindex, vl); +vint16mf2_t test_vloxei64_v_i16mf2_m(vbool32_t vm, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_i16mf2_m(vm, rs1, rs2, vl); } -vint16m1_t test_vloxei64_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_i16m1_m(mask, base, bindex, vl); +vint16m1_t test_vloxei64_v_i16m1_m(vbool16_t vm, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_i16m1_m(vm, rs1, rs2, vl); } -vint16m2_t test_vloxei64_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_i16m2_m(mask, base, bindex, vl); +vint16m2_t test_vloxei64_v_i16m2_m(vbool8_t vm, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_i16m2_m(vm, rs1, rs2, vl); } -vint32mf2_t test_vloxei64_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_i32mf2_m(mask, base, bindex, vl); +vint32mf2_t test_vloxei64_v_i32mf2_m(vbool64_t vm, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_i32mf2_m(vm, rs1, rs2, vl); } -vint32m1_t test_vloxei64_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_i32m1_m(mask, base, bindex, vl); +vint32m1_t test_vloxei64_v_i32m1_m(vbool32_t vm, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_i32m1_m(vm, rs1, rs2, vl); } -vint32m2_t test_vloxei64_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_i32m2_m(mask, base, bindex, vl); +vint32m2_t test_vloxei64_v_i32m2_m(vbool16_t vm, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_i32m2_m(vm, rs1, rs2, vl); } -vint32m4_t test_vloxei64_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_i32m4_m(mask, base, bindex, vl); +vint32m4_t test_vloxei64_v_i32m4_m(vbool8_t vm, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_i32m4_m(vm, rs1, rs2, vl); } -vint64m1_t test_vloxei64_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_i64m1_m(mask, base, bindex, vl); +vint64m1_t test_vloxei64_v_i64m1_m(vbool64_t vm, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_i64m1_m(vm, rs1, rs2, vl); } -vint64m2_t test_vloxei64_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_i64m2_m(mask, base, bindex, vl); +vint64m2_t test_vloxei64_v_i64m2_m(vbool32_t vm, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_i64m2_m(vm, rs1, rs2, vl); } -vint64m4_t test_vloxei64_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_i64m4_m(mask, base, bindex, vl); +vint64m4_t test_vloxei64_v_i64m4_m(vbool16_t vm, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_i64m4_m(vm, rs1, rs2, vl); } -vint64m8_t test_vloxei64_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_i64m8_m(mask, base, bindex, vl); +vint64m8_t test_vloxei64_v_i64m8_m(vbool8_t vm, const int64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_i64m8_m(vm, rs1, rs2, vl); } -vuint8mf8_t test_vloxei64_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_u8mf8_m(mask, base, bindex, vl); +vuint8mf8_t test_vloxei64_v_u8mf8_m(vbool64_t vm, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_u8mf8_m(vm, rs1, rs2, vl); } -vuint8mf4_t test_vloxei64_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_u8mf4_m(mask, base, bindex, vl); +vuint8mf4_t test_vloxei64_v_u8mf4_m(vbool32_t vm, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_u8mf4_m(vm, rs1, rs2, vl); } -vuint8mf2_t test_vloxei64_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_u8mf2_m(mask, base, bindex, vl); +vuint8mf2_t test_vloxei64_v_u8mf2_m(vbool16_t vm, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_u8mf2_m(vm, rs1, rs2, vl); } -vuint8m1_t test_vloxei64_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_u8m1_m(mask, base, bindex, vl); +vuint8m1_t test_vloxei64_v_u8m1_m(vbool8_t vm, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_u8m1_m(vm, rs1, rs2, vl); } -vuint16mf4_t test_vloxei64_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_u16mf4_m(mask, base, bindex, vl); +vuint16mf4_t test_vloxei64_v_u16mf4_m(vbool64_t vm, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_u16mf4_m(vm, rs1, rs2, vl); } -vuint16mf2_t test_vloxei64_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_u16mf2_m(mask, base, bindex, vl); +vuint16mf2_t test_vloxei64_v_u16mf2_m(vbool32_t vm, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_u16mf2_m(vm, rs1, rs2, vl); } -vuint16m1_t test_vloxei64_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_u16m1_m(mask, base, bindex, vl); +vuint16m1_t test_vloxei64_v_u16m1_m(vbool16_t vm, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_u16m1_m(vm, rs1, rs2, vl); } -vuint16m2_t test_vloxei64_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_u16m2_m(mask, base, bindex, vl); +vuint16m2_t test_vloxei64_v_u16m2_m(vbool8_t vm, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_u16m2_m(vm, rs1, rs2, vl); } -vuint32mf2_t test_vloxei64_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_u32mf2_m(mask, base, bindex, vl); +vuint32mf2_t test_vloxei64_v_u32mf2_m(vbool64_t vm, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_u32mf2_m(vm, rs1, rs2, vl); } -vuint32m1_t test_vloxei64_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_u32m1_m(mask, base, bindex, vl); +vuint32m1_t test_vloxei64_v_u32m1_m(vbool32_t vm, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_u32m1_m(vm, rs1, rs2, vl); } -vuint32m2_t test_vloxei64_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_u32m2_m(mask, base, bindex, vl); +vuint32m2_t test_vloxei64_v_u32m2_m(vbool16_t vm, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_u32m2_m(vm, rs1, rs2, vl); } -vuint32m4_t test_vloxei64_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_u32m4_m(mask, base, bindex, vl); +vuint32m4_t test_vloxei64_v_u32m4_m(vbool8_t vm, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_u32m4_m(vm, rs1, rs2, vl); } -vuint64m1_t test_vloxei64_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_u64m1_m(mask, base, bindex, vl); +vuint64m1_t test_vloxei64_v_u64m1_m(vbool64_t vm, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_u64m1_m(vm, rs1, rs2, vl); } -vuint64m2_t test_vloxei64_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_u64m2_m(mask, base, bindex, vl); +vuint64m2_t test_vloxei64_v_u64m2_m(vbool32_t vm, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_u64m2_m(vm, rs1, rs2, vl); } -vuint64m4_t test_vloxei64_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_u64m4_m(mask, base, bindex, vl); +vuint64m4_t test_vloxei64_v_u64m4_m(vbool16_t vm, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_u64m4_m(vm, rs1, rs2, vl); } -vuint64m8_t test_vloxei64_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_u64m8_m(mask, base, bindex, vl); +vuint64m8_t test_vloxei64_v_u64m8_m(vbool8_t vm, const uint64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_u64m8_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxei64\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-api-tests/vloxei8.c b/auto-generated/gnu-api-tests/vloxei8.c index 71b61aff0..6ee203820 100644 --- a/auto-generated/gnu-api-tests/vloxei8.c +++ b/auto-generated/gnu-api-tests/vloxei8.c @@ -6,476 +6,476 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vloxei8_v_f16mf4(const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_f16mf4(base, bindex, vl); +vfloat16mf4_t test_vloxei8_v_f16mf4(const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_f16mf4(rs1, rs2, vl); } -vfloat16mf2_t test_vloxei8_v_f16mf2(const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_f16mf2(base, bindex, vl); +vfloat16mf2_t test_vloxei8_v_f16mf2(const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_f16mf2(rs1, rs2, vl); } -vfloat16m1_t test_vloxei8_v_f16m1(const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_f16m1(base, bindex, vl); +vfloat16m1_t test_vloxei8_v_f16m1(const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_f16m1(rs1, rs2, vl); } -vfloat16m2_t test_vloxei8_v_f16m2(const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_f16m2(base, bindex, vl); +vfloat16m2_t test_vloxei8_v_f16m2(const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_f16m2(rs1, rs2, vl); } -vfloat16m4_t test_vloxei8_v_f16m4(const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_f16m4(base, bindex, vl); +vfloat16m4_t test_vloxei8_v_f16m4(const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_f16m4(rs1, rs2, vl); } -vfloat16m8_t test_vloxei8_v_f16m8(const float16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_v_f16m8(base, bindex, vl); +vfloat16m8_t test_vloxei8_v_f16m8(const float16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_v_f16m8(rs1, rs2, vl); } -vfloat32mf2_t test_vloxei8_v_f32mf2(const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_f32mf2(base, bindex, vl); +vfloat32mf2_t test_vloxei8_v_f32mf2(const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_f32mf2(rs1, rs2, vl); } -vfloat32m1_t test_vloxei8_v_f32m1(const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_f32m1(base, bindex, vl); +vfloat32m1_t test_vloxei8_v_f32m1(const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_f32m1(rs1, rs2, vl); } -vfloat32m2_t test_vloxei8_v_f32m2(const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_f32m2(base, bindex, vl); +vfloat32m2_t test_vloxei8_v_f32m2(const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_f32m2(rs1, rs2, vl); } -vfloat32m4_t test_vloxei8_v_f32m4(const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_f32m4(base, bindex, vl); +vfloat32m4_t test_vloxei8_v_f32m4(const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_f32m4(rs1, rs2, vl); } -vfloat32m8_t test_vloxei8_v_f32m8(const float32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_f32m8(base, bindex, vl); +vfloat32m8_t test_vloxei8_v_f32m8(const float32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_f32m8(rs1, rs2, vl); } -vfloat64m1_t test_vloxei8_v_f64m1(const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_f64m1(base, bindex, vl); +vfloat64m1_t test_vloxei8_v_f64m1(const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_f64m1(rs1, rs2, vl); } -vfloat64m2_t test_vloxei8_v_f64m2(const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_f64m2(base, bindex, vl); +vfloat64m2_t test_vloxei8_v_f64m2(const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_f64m2(rs1, rs2, vl); } -vfloat64m4_t test_vloxei8_v_f64m4(const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_f64m4(base, bindex, vl); +vfloat64m4_t test_vloxei8_v_f64m4(const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_f64m4(rs1, rs2, vl); } -vfloat64m8_t test_vloxei8_v_f64m8(const float64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_f64m8(base, bindex, vl); +vfloat64m8_t test_vloxei8_v_f64m8(const float64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_f64m8(rs1, rs2, vl); } -vint8mf8_t test_vloxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_i8mf8(base, bindex, vl); +vint8mf8_t test_vloxei8_v_i8mf8(const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_i8mf8(rs1, rs2, vl); } -vint8mf4_t test_vloxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_i8mf4(base, bindex, vl); +vint8mf4_t test_vloxei8_v_i8mf4(const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_i8mf4(rs1, rs2, vl); } -vint8mf2_t test_vloxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_i8mf2(base, bindex, vl); +vint8mf2_t test_vloxei8_v_i8mf2(const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_i8mf2(rs1, rs2, vl); } -vint8m1_t test_vloxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_i8m1(base, bindex, vl); +vint8m1_t test_vloxei8_v_i8m1(const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_i8m1(rs1, rs2, vl); } -vint8m2_t test_vloxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_i8m2(base, bindex, vl); +vint8m2_t test_vloxei8_v_i8m2(const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_i8m2(rs1, rs2, vl); } -vint8m4_t test_vloxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_v_i8m4(base, bindex, vl); +vint8m4_t test_vloxei8_v_i8m4(const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_v_i8m4(rs1, rs2, vl); } -vint8m8_t test_vloxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vloxei8_v_i8m8(base, bindex, vl); +vint8m8_t test_vloxei8_v_i8m8(const int8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vloxei8_v_i8m8(rs1, rs2, vl); } -vint16mf4_t test_vloxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_i16mf4(base, bindex, vl); +vint16mf4_t test_vloxei8_v_i16mf4(const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_i16mf4(rs1, rs2, vl); } -vint16mf2_t test_vloxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_i16mf2(base, bindex, vl); +vint16mf2_t test_vloxei8_v_i16mf2(const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_i16mf2(rs1, rs2, vl); } -vint16m1_t test_vloxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_i16m1(base, bindex, vl); +vint16m1_t test_vloxei8_v_i16m1(const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_i16m1(rs1, rs2, vl); } -vint16m2_t test_vloxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_i16m2(base, bindex, vl); +vint16m2_t test_vloxei8_v_i16m2(const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_i16m2(rs1, rs2, vl); } -vint16m4_t test_vloxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_i16m4(base, bindex, vl); +vint16m4_t test_vloxei8_v_i16m4(const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_i16m4(rs1, rs2, vl); } -vint16m8_t test_vloxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_v_i16m8(base, bindex, vl); +vint16m8_t test_vloxei8_v_i16m8(const int16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_v_i16m8(rs1, rs2, vl); } -vint32mf2_t test_vloxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_i32mf2(base, bindex, vl); +vint32mf2_t test_vloxei8_v_i32mf2(const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_i32mf2(rs1, rs2, vl); } -vint32m1_t test_vloxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_i32m1(base, bindex, vl); +vint32m1_t test_vloxei8_v_i32m1(const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_i32m1(rs1, rs2, vl); } -vint32m2_t test_vloxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_i32m2(base, bindex, vl); +vint32m2_t test_vloxei8_v_i32m2(const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_i32m2(rs1, rs2, vl); } -vint32m4_t test_vloxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_i32m4(base, bindex, vl); +vint32m4_t test_vloxei8_v_i32m4(const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_i32m4(rs1, rs2, vl); } -vint32m8_t test_vloxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_i32m8(base, bindex, vl); +vint32m8_t test_vloxei8_v_i32m8(const int32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_i32m8(rs1, rs2, vl); } -vint64m1_t test_vloxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_i64m1(base, bindex, vl); +vint64m1_t test_vloxei8_v_i64m1(const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_i64m1(rs1, rs2, vl); } -vint64m2_t test_vloxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_i64m2(base, bindex, vl); +vint64m2_t test_vloxei8_v_i64m2(const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_i64m2(rs1, rs2, vl); } -vint64m4_t test_vloxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_i64m4(base, bindex, vl); +vint64m4_t test_vloxei8_v_i64m4(const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_i64m4(rs1, rs2, vl); } -vint64m8_t test_vloxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_i64m8(base, bindex, vl); +vint64m8_t test_vloxei8_v_i64m8(const int64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_i64m8(rs1, rs2, vl); } -vuint8mf8_t test_vloxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_u8mf8(base, bindex, vl); +vuint8mf8_t test_vloxei8_v_u8mf8(const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_u8mf8(rs1, rs2, vl); } -vuint8mf4_t test_vloxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_u8mf4(base, bindex, vl); +vuint8mf4_t test_vloxei8_v_u8mf4(const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_u8mf4(rs1, rs2, vl); } -vuint8mf2_t test_vloxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_u8mf2(base, bindex, vl); +vuint8mf2_t test_vloxei8_v_u8mf2(const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_u8mf2(rs1, rs2, vl); } -vuint8m1_t test_vloxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_u8m1(base, bindex, vl); +vuint8m1_t test_vloxei8_v_u8m1(const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_u8m1(rs1, rs2, vl); } -vuint8m2_t test_vloxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_u8m2(base, bindex, vl); +vuint8m2_t test_vloxei8_v_u8m2(const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_u8m2(rs1, rs2, vl); } -vuint8m4_t test_vloxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_v_u8m4(base, bindex, vl); +vuint8m4_t test_vloxei8_v_u8m4(const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_v_u8m4(rs1, rs2, vl); } -vuint8m8_t test_vloxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vloxei8_v_u8m8(base, bindex, vl); +vuint8m8_t test_vloxei8_v_u8m8(const uint8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vloxei8_v_u8m8(rs1, rs2, vl); } -vuint16mf4_t test_vloxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_u16mf4(base, bindex, vl); +vuint16mf4_t test_vloxei8_v_u16mf4(const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_u16mf4(rs1, rs2, vl); } -vuint16mf2_t test_vloxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_u16mf2(base, bindex, vl); +vuint16mf2_t test_vloxei8_v_u16mf2(const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_u16mf2(rs1, rs2, vl); } -vuint16m1_t test_vloxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_u16m1(base, bindex, vl); +vuint16m1_t test_vloxei8_v_u16m1(const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_u16m1(rs1, rs2, vl); } -vuint16m2_t test_vloxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_u16m2(base, bindex, vl); +vuint16m2_t test_vloxei8_v_u16m2(const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_u16m2(rs1, rs2, vl); } -vuint16m4_t test_vloxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_u16m4(base, bindex, vl); +vuint16m4_t test_vloxei8_v_u16m4(const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_u16m4(rs1, rs2, vl); } -vuint16m8_t test_vloxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_v_u16m8(base, bindex, vl); +vuint16m8_t test_vloxei8_v_u16m8(const uint16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_v_u16m8(rs1, rs2, vl); } -vuint32mf2_t test_vloxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_u32mf2(base, bindex, vl); +vuint32mf2_t test_vloxei8_v_u32mf2(const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_u32mf2(rs1, rs2, vl); } -vuint32m1_t test_vloxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_u32m1(base, bindex, vl); +vuint32m1_t test_vloxei8_v_u32m1(const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_u32m1(rs1, rs2, vl); } -vuint32m2_t test_vloxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_u32m2(base, bindex, vl); +vuint32m2_t test_vloxei8_v_u32m2(const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_u32m2(rs1, rs2, vl); } -vuint32m4_t test_vloxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_u32m4(base, bindex, vl); +vuint32m4_t test_vloxei8_v_u32m4(const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_u32m4(rs1, rs2, vl); } -vuint32m8_t test_vloxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_u32m8(base, bindex, vl); +vuint32m8_t test_vloxei8_v_u32m8(const uint32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_u32m8(rs1, rs2, vl); } -vuint64m1_t test_vloxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_u64m1(base, bindex, vl); +vuint64m1_t test_vloxei8_v_u64m1(const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_u64m1(rs1, rs2, vl); } -vuint64m2_t test_vloxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_u64m2(base, bindex, vl); +vuint64m2_t test_vloxei8_v_u64m2(const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_u64m2(rs1, rs2, vl); } -vuint64m4_t test_vloxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_u64m4(base, bindex, vl); +vuint64m4_t test_vloxei8_v_u64m4(const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_u64m4(rs1, rs2, vl); } -vuint64m8_t test_vloxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_u64m8(base, bindex, vl); +vuint64m8_t test_vloxei8_v_u64m8(const uint64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_u64m8(rs1, rs2, vl); } -vfloat16mf4_t test_vloxei8_v_f16mf4_m(vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_f16mf4_m(mask, base, bindex, vl); +vfloat16mf4_t test_vloxei8_v_f16mf4_m(vbool64_t vm, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_f16mf4_m(vm, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei8_v_f16mf2_m(vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_f16mf2_m(mask, base, bindex, vl); +vfloat16mf2_t test_vloxei8_v_f16mf2_m(vbool32_t vm, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_f16mf2_m(vm, rs1, rs2, vl); } -vfloat16m1_t test_vloxei8_v_f16m1_m(vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_f16m1_m(mask, base, bindex, vl); +vfloat16m1_t test_vloxei8_v_f16m1_m(vbool16_t vm, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_f16m1_m(vm, rs1, rs2, vl); } -vfloat16m2_t test_vloxei8_v_f16m2_m(vbool8_t mask, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_f16m2_m(mask, base, bindex, vl); +vfloat16m2_t test_vloxei8_v_f16m2_m(vbool8_t vm, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_f16m2_m(vm, rs1, rs2, vl); } -vfloat16m4_t test_vloxei8_v_f16m4_m(vbool4_t mask, const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_f16m4_m(mask, base, bindex, vl); +vfloat16m4_t test_vloxei8_v_f16m4_m(vbool4_t vm, const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_f16m4_m(vm, rs1, rs2, vl); } -vfloat16m8_t test_vloxei8_v_f16m8_m(vbool2_t mask, const float16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_v_f16m8_m(mask, base, bindex, vl); +vfloat16m8_t test_vloxei8_v_f16m8_m(vbool2_t vm, const float16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_v_f16m8_m(vm, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei8_v_f32mf2_m(vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_f32mf2_m(mask, base, bindex, vl); +vfloat32mf2_t test_vloxei8_v_f32mf2_m(vbool64_t vm, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_f32mf2_m(vm, rs1, rs2, vl); } -vfloat32m1_t test_vloxei8_v_f32m1_m(vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_f32m1_m(mask, base, bindex, vl); +vfloat32m1_t test_vloxei8_v_f32m1_m(vbool32_t vm, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_f32m1_m(vm, rs1, rs2, vl); } -vfloat32m2_t test_vloxei8_v_f32m2_m(vbool16_t mask, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_f32m2_m(mask, base, bindex, vl); +vfloat32m2_t test_vloxei8_v_f32m2_m(vbool16_t vm, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_f32m2_m(vm, rs1, rs2, vl); } -vfloat32m4_t test_vloxei8_v_f32m4_m(vbool8_t mask, const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_f32m4_m(mask, base, bindex, vl); +vfloat32m4_t test_vloxei8_v_f32m4_m(vbool8_t vm, const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_f32m4_m(vm, rs1, rs2, vl); } -vfloat32m8_t test_vloxei8_v_f32m8_m(vbool4_t mask, const float32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_f32m8_m(mask, base, bindex, vl); +vfloat32m8_t test_vloxei8_v_f32m8_m(vbool4_t vm, const float32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_f32m8_m(vm, rs1, rs2, vl); } -vfloat64m1_t test_vloxei8_v_f64m1_m(vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_f64m1_m(mask, base, bindex, vl); +vfloat64m1_t test_vloxei8_v_f64m1_m(vbool64_t vm, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_f64m1_m(vm, rs1, rs2, vl); } -vfloat64m2_t test_vloxei8_v_f64m2_m(vbool32_t mask, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_f64m2_m(mask, base, bindex, vl); +vfloat64m2_t test_vloxei8_v_f64m2_m(vbool32_t vm, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_f64m2_m(vm, rs1, rs2, vl); } -vfloat64m4_t test_vloxei8_v_f64m4_m(vbool16_t mask, const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_f64m4_m(mask, base, bindex, vl); +vfloat64m4_t test_vloxei8_v_f64m4_m(vbool16_t vm, const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_f64m4_m(vm, rs1, rs2, vl); } -vfloat64m8_t test_vloxei8_v_f64m8_m(vbool8_t mask, const float64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_f64m8_m(mask, base, bindex, vl); +vfloat64m8_t test_vloxei8_v_f64m8_m(vbool8_t vm, const float64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_f64m8_m(vm, rs1, rs2, vl); } -vint8mf8_t test_vloxei8_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_i8mf8_m(mask, base, bindex, vl); +vint8mf8_t test_vloxei8_v_i8mf8_m(vbool64_t vm, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_i8mf8_m(vm, rs1, rs2, vl); } -vint8mf4_t test_vloxei8_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_i8mf4_m(mask, base, bindex, vl); +vint8mf4_t test_vloxei8_v_i8mf4_m(vbool32_t vm, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_i8mf4_m(vm, rs1, rs2, vl); } -vint8mf2_t test_vloxei8_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_i8mf2_m(mask, base, bindex, vl); +vint8mf2_t test_vloxei8_v_i8mf2_m(vbool16_t vm, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_i8mf2_m(vm, rs1, rs2, vl); } -vint8m1_t test_vloxei8_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_i8m1_m(mask, base, bindex, vl); +vint8m1_t test_vloxei8_v_i8m1_m(vbool8_t vm, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_i8m1_m(vm, rs1, rs2, vl); } -vint8m2_t test_vloxei8_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_i8m2_m(mask, base, bindex, vl); +vint8m2_t test_vloxei8_v_i8m2_m(vbool4_t vm, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_i8m2_m(vm, rs1, rs2, vl); } -vint8m4_t test_vloxei8_v_i8m4_m(vbool2_t mask, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_v_i8m4_m(mask, base, bindex, vl); +vint8m4_t test_vloxei8_v_i8m4_m(vbool2_t vm, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_v_i8m4_m(vm, rs1, rs2, vl); } -vint8m8_t test_vloxei8_v_i8m8_m(vbool1_t mask, const int8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vloxei8_v_i8m8_m(mask, base, bindex, vl); +vint8m8_t test_vloxei8_v_i8m8_m(vbool1_t vm, const int8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vloxei8_v_i8m8_m(vm, rs1, rs2, vl); } -vint16mf4_t test_vloxei8_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_i16mf4_m(mask, base, bindex, vl); +vint16mf4_t test_vloxei8_v_i16mf4_m(vbool64_t vm, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_i16mf4_m(vm, rs1, rs2, vl); } -vint16mf2_t test_vloxei8_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_i16mf2_m(mask, base, bindex, vl); +vint16mf2_t test_vloxei8_v_i16mf2_m(vbool32_t vm, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_i16mf2_m(vm, rs1, rs2, vl); } -vint16m1_t test_vloxei8_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_i16m1_m(mask, base, bindex, vl); +vint16m1_t test_vloxei8_v_i16m1_m(vbool16_t vm, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_i16m1_m(vm, rs1, rs2, vl); } -vint16m2_t test_vloxei8_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_i16m2_m(mask, base, bindex, vl); +vint16m2_t test_vloxei8_v_i16m2_m(vbool8_t vm, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_i16m2_m(vm, rs1, rs2, vl); } -vint16m4_t test_vloxei8_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_i16m4_m(mask, base, bindex, vl); +vint16m4_t test_vloxei8_v_i16m4_m(vbool4_t vm, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_i16m4_m(vm, rs1, rs2, vl); } -vint16m8_t test_vloxei8_v_i16m8_m(vbool2_t mask, const int16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_v_i16m8_m(mask, base, bindex, vl); +vint16m8_t test_vloxei8_v_i16m8_m(vbool2_t vm, const int16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_v_i16m8_m(vm, rs1, rs2, vl); } -vint32mf2_t test_vloxei8_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_i32mf2_m(mask, base, bindex, vl); +vint32mf2_t test_vloxei8_v_i32mf2_m(vbool64_t vm, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_i32mf2_m(vm, rs1, rs2, vl); } -vint32m1_t test_vloxei8_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_i32m1_m(mask, base, bindex, vl); +vint32m1_t test_vloxei8_v_i32m1_m(vbool32_t vm, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_i32m1_m(vm, rs1, rs2, vl); } -vint32m2_t test_vloxei8_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_i32m2_m(mask, base, bindex, vl); +vint32m2_t test_vloxei8_v_i32m2_m(vbool16_t vm, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_i32m2_m(vm, rs1, rs2, vl); } -vint32m4_t test_vloxei8_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_i32m4_m(mask, base, bindex, vl); +vint32m4_t test_vloxei8_v_i32m4_m(vbool8_t vm, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_i32m4_m(vm, rs1, rs2, vl); } -vint32m8_t test_vloxei8_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_i32m8_m(mask, base, bindex, vl); +vint32m8_t test_vloxei8_v_i32m8_m(vbool4_t vm, const int32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_i32m8_m(vm, rs1, rs2, vl); } -vint64m1_t test_vloxei8_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_i64m1_m(mask, base, bindex, vl); +vint64m1_t test_vloxei8_v_i64m1_m(vbool64_t vm, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_i64m1_m(vm, rs1, rs2, vl); } -vint64m2_t test_vloxei8_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_i64m2_m(mask, base, bindex, vl); +vint64m2_t test_vloxei8_v_i64m2_m(vbool32_t vm, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_i64m2_m(vm, rs1, rs2, vl); } -vint64m4_t test_vloxei8_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_i64m4_m(mask, base, bindex, vl); +vint64m4_t test_vloxei8_v_i64m4_m(vbool16_t vm, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_i64m4_m(vm, rs1, rs2, vl); } -vint64m8_t test_vloxei8_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_i64m8_m(mask, base, bindex, vl); +vint64m8_t test_vloxei8_v_i64m8_m(vbool8_t vm, const int64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_i64m8_m(vm, rs1, rs2, vl); } -vuint8mf8_t test_vloxei8_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_u8mf8_m(mask, base, bindex, vl); +vuint8mf8_t test_vloxei8_v_u8mf8_m(vbool64_t vm, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_u8mf8_m(vm, rs1, rs2, vl); } -vuint8mf4_t test_vloxei8_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_u8mf4_m(mask, base, bindex, vl); +vuint8mf4_t test_vloxei8_v_u8mf4_m(vbool32_t vm, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_u8mf4_m(vm, rs1, rs2, vl); } -vuint8mf2_t test_vloxei8_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_u8mf2_m(mask, base, bindex, vl); +vuint8mf2_t test_vloxei8_v_u8mf2_m(vbool16_t vm, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_u8mf2_m(vm, rs1, rs2, vl); } -vuint8m1_t test_vloxei8_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_u8m1_m(mask, base, bindex, vl); +vuint8m1_t test_vloxei8_v_u8m1_m(vbool8_t vm, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_u8m1_m(vm, rs1, rs2, vl); } -vuint8m2_t test_vloxei8_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_u8m2_m(mask, base, bindex, vl); +vuint8m2_t test_vloxei8_v_u8m2_m(vbool4_t vm, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_u8m2_m(vm, rs1, rs2, vl); } -vuint8m4_t test_vloxei8_v_u8m4_m(vbool2_t mask, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_v_u8m4_m(mask, base, bindex, vl); +vuint8m4_t test_vloxei8_v_u8m4_m(vbool2_t vm, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_v_u8m4_m(vm, rs1, rs2, vl); } -vuint8m8_t test_vloxei8_v_u8m8_m(vbool1_t mask, const uint8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vloxei8_v_u8m8_m(mask, base, bindex, vl); +vuint8m8_t test_vloxei8_v_u8m8_m(vbool1_t vm, const uint8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vloxei8_v_u8m8_m(vm, rs1, rs2, vl); } -vuint16mf4_t test_vloxei8_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_u16mf4_m(mask, base, bindex, vl); +vuint16mf4_t test_vloxei8_v_u16mf4_m(vbool64_t vm, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_u16mf4_m(vm, rs1, rs2, vl); } -vuint16mf2_t test_vloxei8_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_u16mf2_m(mask, base, bindex, vl); +vuint16mf2_t test_vloxei8_v_u16mf2_m(vbool32_t vm, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_u16mf2_m(vm, rs1, rs2, vl); } -vuint16m1_t test_vloxei8_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_u16m1_m(mask, base, bindex, vl); +vuint16m1_t test_vloxei8_v_u16m1_m(vbool16_t vm, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_u16m1_m(vm, rs1, rs2, vl); } -vuint16m2_t test_vloxei8_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_u16m2_m(mask, base, bindex, vl); +vuint16m2_t test_vloxei8_v_u16m2_m(vbool8_t vm, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_u16m2_m(vm, rs1, rs2, vl); } -vuint16m4_t test_vloxei8_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_u16m4_m(mask, base, bindex, vl); +vuint16m4_t test_vloxei8_v_u16m4_m(vbool4_t vm, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_u16m4_m(vm, rs1, rs2, vl); } -vuint16m8_t test_vloxei8_v_u16m8_m(vbool2_t mask, const uint16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_v_u16m8_m(mask, base, bindex, vl); +vuint16m8_t test_vloxei8_v_u16m8_m(vbool2_t vm, const uint16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_v_u16m8_m(vm, rs1, rs2, vl); } -vuint32mf2_t test_vloxei8_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_u32mf2_m(mask, base, bindex, vl); +vuint32mf2_t test_vloxei8_v_u32mf2_m(vbool64_t vm, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_u32mf2_m(vm, rs1, rs2, vl); } -vuint32m1_t test_vloxei8_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_u32m1_m(mask, base, bindex, vl); +vuint32m1_t test_vloxei8_v_u32m1_m(vbool32_t vm, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_u32m1_m(vm, rs1, rs2, vl); } -vuint32m2_t test_vloxei8_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_u32m2_m(mask, base, bindex, vl); +vuint32m2_t test_vloxei8_v_u32m2_m(vbool16_t vm, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_u32m2_m(vm, rs1, rs2, vl); } -vuint32m4_t test_vloxei8_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_u32m4_m(mask, base, bindex, vl); +vuint32m4_t test_vloxei8_v_u32m4_m(vbool8_t vm, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_u32m4_m(vm, rs1, rs2, vl); } -vuint32m8_t test_vloxei8_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_u32m8_m(mask, base, bindex, vl); +vuint32m8_t test_vloxei8_v_u32m8_m(vbool4_t vm, const uint32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_u32m8_m(vm, rs1, rs2, vl); } -vuint64m1_t test_vloxei8_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_u64m1_m(mask, base, bindex, vl); +vuint64m1_t test_vloxei8_v_u64m1_m(vbool64_t vm, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_u64m1_m(vm, rs1, rs2, vl); } -vuint64m2_t test_vloxei8_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_u64m2_m(mask, base, bindex, vl); +vuint64m2_t test_vloxei8_v_u64m2_m(vbool32_t vm, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_u64m2_m(vm, rs1, rs2, vl); } -vuint64m4_t test_vloxei8_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_u64m4_m(mask, base, bindex, vl); +vuint64m4_t test_vloxei8_v_u64m4_m(vbool16_t vm, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_u64m4_m(vm, rs1, rs2, vl); } -vuint64m8_t test_vloxei8_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_u64m8_m(mask, base, bindex, vl); +vuint64m8_t test_vloxei8_v_u64m8_m(vbool8_t vm, const uint64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_u64m8_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxei8\.[ivxfswum.]+\s+} 118 } } */ diff --git a/auto-generated/gnu-api-tests/vloxseg2ei16.c b/auto-generated/gnu-api-tests/vloxseg2ei16.c index 19474aee2..fbe29fa42 100644 --- a/auto-generated/gnu-api-tests/vloxseg2ei16.c +++ b/auto-generated/gnu-api-tests/vloxseg2ei16.c @@ -6,388 +6,388 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2(const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f16mf4x2(base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2(const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f16mf4x2(rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2(const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f16mf2x2(base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2(const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f16mf2x2(rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2(const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f16m1x2(base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2(const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f16m1x2(rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2(const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f16m2x2(base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2(const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f16m2x2(rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2(const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f16m4x2(base, bindex, vl); +vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2(const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f16m4x2(rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2(const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f32mf2x2(base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2(const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f32mf2x2(rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2(const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f32m1x2(base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2(const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f32m1x2(rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2(const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f32m2x2(base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2(const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f32m2x2(rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2(const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f32m4x2(base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2(const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f32m4x2(rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2(const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f64m1x2(base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2(const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f64m1x2(rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2(const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f64m2x2(base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2(const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f64m2x2(rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2(const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f64m4x2(base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2(const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f64m4x2(rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2(const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i8mf8x2(base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2(const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i8mf8x2(rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2(const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i8mf4x2(base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2(const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i8mf4x2(rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2(const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i8mf2x2(base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2(const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i8mf2x2(rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei16_v_i8m1x2(const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i8m1x2(base, bindex, vl); +vint8m1x2_t test_vloxseg2ei16_v_i8m1x2(const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i8m1x2(rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei16_v_i8m2x2(const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i8m2x2(base, bindex, vl); +vint8m2x2_t test_vloxseg2ei16_v_i8m2x2(const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i8m2x2(rs1, rs2, vl); } -vint8m4x2_t test_vloxseg2ei16_v_i8m4x2(const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i8m4x2(base, bindex, vl); +vint8m4x2_t test_vloxseg2ei16_v_i8m4x2(const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i8m4x2(rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2(const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i16mf4x2(base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2(const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i16mf4x2(rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2(const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i16mf2x2(base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2(const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i16mf2x2(rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei16_v_i16m1x2(const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i16m1x2(base, bindex, vl); +vint16m1x2_t test_vloxseg2ei16_v_i16m1x2(const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i16m1x2(rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei16_v_i16m2x2(const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i16m2x2(base, bindex, vl); +vint16m2x2_t test_vloxseg2ei16_v_i16m2x2(const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i16m2x2(rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei16_v_i16m4x2(const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i16m4x2(base, bindex, vl); +vint16m4x2_t test_vloxseg2ei16_v_i16m4x2(const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i16m4x2(rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2(const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i32mf2x2(base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2(const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i32mf2x2(rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei16_v_i32m1x2(const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i32m1x2(base, bindex, vl); +vint32m1x2_t test_vloxseg2ei16_v_i32m1x2(const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i32m1x2(rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei16_v_i32m2x2(const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i32m2x2(base, bindex, vl); +vint32m2x2_t test_vloxseg2ei16_v_i32m2x2(const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i32m2x2(rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei16_v_i32m4x2(const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i32m4x2(base, bindex, vl); +vint32m4x2_t test_vloxseg2ei16_v_i32m4x2(const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i32m4x2(rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei16_v_i64m1x2(const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i64m1x2(base, bindex, vl); +vint64m1x2_t test_vloxseg2ei16_v_i64m1x2(const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i64m1x2(rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei16_v_i64m2x2(const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i64m2x2(base, bindex, vl); +vint64m2x2_t test_vloxseg2ei16_v_i64m2x2(const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i64m2x2(rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei16_v_i64m4x2(const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i64m4x2(base, bindex, vl); +vint64m4x2_t test_vloxseg2ei16_v_i64m4x2(const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i64m4x2(rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u8mf8x2(base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2(const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u8mf8x2(rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u8mf4x2(base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2(const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u8mf4x2(rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2(const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u8mf2x2(base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2(const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u8mf2x2(rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2(const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u8m1x2(base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2(const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u8m1x2(rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2(const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u8m2x2(base, bindex, vl); +vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2(const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u8m2x2(rs1, rs2, vl); } -vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2(const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u8m4x2(base, bindex, vl); +vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2(const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u8m4x2(rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u16mf4x2(base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2(const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u16mf4x2(rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u16mf2x2(base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2(const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u16mf2x2(rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2(const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u16m1x2(base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2(const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u16m1x2(rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2(const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u16m2x2(base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2(const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u16m2x2(rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2(const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u16m4x2(base, bindex, vl); +vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2(const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u16m4x2(rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u32mf2x2(base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2(const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u32mf2x2(rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u32m1x2(base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2(const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u32m1x2(rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2(const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u32m2x2(base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2(const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u32m2x2(rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2(const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u32m4x2(base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2(const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u32m4x2(rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u64m1x2(base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2(const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u64m1x2(rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u64m2x2(base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2(const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u64m2x2(rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2(const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u64m4x2(base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2(const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u64m4x2(rs1, rs2, vl); } -vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_m(vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f16mf4x2_m(mask, base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_m(vbool64_t vm, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f16mf4x2_m(vm, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_m(vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f16mf2x2_m(mask, base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_m(vbool32_t vm, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f16mf2x2_m(vm, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_m(vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f16m1x2_m(mask, base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_m(vbool16_t vm, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f16m1x2_m(vm, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_m(vbool8_t mask, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f16m2x2_m(mask, base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_m(vbool8_t vm, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f16m2x2_m(vm, rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_m(vbool4_t mask, const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f16m4x2_m(mask, base, bindex, vl); +vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_m(vbool4_t vm, const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f16m4x2_m(vm, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_m(vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f32mf2x2_m(mask, base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_m(vbool64_t vm, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f32mf2x2_m(vm, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_m(vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f32m1x2_m(mask, base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_m(vbool32_t vm, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f32m1x2_m(vm, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_m(vbool16_t mask, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f32m2x2_m(mask, base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_m(vbool16_t vm, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f32m2x2_m(vm, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_m(vbool8_t mask, const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f32m4x2_m(mask, base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_m(vbool8_t vm, const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f32m4x2_m(vm, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_m(vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f64m1x2_m(mask, base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_m(vbool64_t vm, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f64m1x2_m(vm, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_m(vbool32_t mask, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f64m2x2_m(mask, base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_m(vbool32_t vm, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f64m2x2_m(vm, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_m(vbool16_t mask, const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f64m4x2_m(mask, base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_m(vbool16_t vm, const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f64m4x2_m(vm, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i8mf8x2_m(mask, base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_m(vbool64_t vm, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i8mf8x2_m(vm, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i8mf4x2_m(mask, base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_m(vbool32_t vm, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i8mf4x2_m(vm, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i8mf2x2_m(mask, base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_m(vbool16_t vm, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i8mf2x2_m(vm, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i8m1x2_m(mask, base, bindex, vl); +vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_m(vbool8_t vm, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i8m1x2_m(vm, rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i8m2x2_m(mask, base, bindex, vl); +vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_m(vbool4_t vm, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i8m2x2_m(vm, rs1, rs2, vl); } -vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_m(vbool2_t mask, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i8m4x2_m(mask, base, bindex, vl); +vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_m(vbool2_t vm, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i8m4x2_m(vm, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i16mf4x2_m(mask, base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_m(vbool64_t vm, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i16mf4x2_m(vm, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i16mf2x2_m(mask, base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_m(vbool32_t vm, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i16mf2x2_m(vm, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i16m1x2_m(mask, base, bindex, vl); +vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_m(vbool16_t vm, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i16m1x2_m(vm, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i16m2x2_m(mask, base, bindex, vl); +vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_m(vbool8_t vm, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i16m2x2_m(vm, rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_m(vbool4_t mask, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i16m4x2_m(mask, base, bindex, vl); +vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_m(vbool4_t vm, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i16m4x2_m(vm, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i32mf2x2_m(mask, base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_m(vbool64_t vm, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i32mf2x2_m(vm, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i32m1x2_m(mask, base, bindex, vl); +vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_m(vbool32_t vm, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i32m1x2_m(vm, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i32m2x2_m(mask, base, bindex, vl); +vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_m(vbool16_t vm, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i32m2x2_m(vm, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i32m4x2_m(mask, base, bindex, vl); +vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_m(vbool8_t vm, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i32m4x2_m(vm, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i64m1x2_m(mask, base, bindex, vl); +vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_m(vbool64_t vm, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i64m1x2_m(vm, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i64m2x2_m(mask, base, bindex, vl); +vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_m(vbool32_t vm, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i64m2x2_m(vm, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i64m4x2_m(mask, base, bindex, vl); +vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_m(vbool16_t vm, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i64m4x2_m(vm, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u8mf8x2_m(mask, base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_m(vbool64_t vm, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u8mf8x2_m(vm, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u8mf4x2_m(mask, base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_m(vbool32_t vm, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u8mf4x2_m(vm, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u8mf2x2_m(mask, base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_m(vbool16_t vm, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u8mf2x2_m(vm, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u8m1x2_m(mask, base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_m(vbool8_t vm, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u8m1x2_m(vm, rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u8m2x2_m(mask, base, bindex, vl); +vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_m(vbool4_t vm, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u8m2x2_m(vm, rs1, rs2, vl); } -vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u8m4x2_m(mask, base, bindex, vl); +vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_m(vbool2_t vm, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u8m4x2_m(vm, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u16mf4x2_m(mask, base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_m(vbool64_t vm, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u16mf4x2_m(vm, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u16mf2x2_m(mask, base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_m(vbool32_t vm, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u16mf2x2_m(vm, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u16m1x2_m(mask, base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_m(vbool16_t vm, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u16m1x2_m(vm, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u16m2x2_m(mask, base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_m(vbool8_t vm, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u16m2x2_m(vm, rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u16m4x2_m(mask, base, bindex, vl); +vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_m(vbool4_t vm, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u16m4x2_m(vm, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u32mf2x2_m(mask, base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_m(vbool64_t vm, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u32mf2x2_m(vm, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u32m1x2_m(mask, base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_m(vbool32_t vm, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u32m1x2_m(vm, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u32m2x2_m(mask, base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_m(vbool16_t vm, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u32m2x2_m(vm, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u32m4x2_m(mask, base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_m(vbool8_t vm, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u32m4x2_m(vm, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u64m1x2_m(mask, base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_m(vbool64_t vm, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u64m1x2_m(vm, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u64m2x2_m(mask, base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_m(vbool32_t vm, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u64m2x2_m(vm, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u64m4x2_m(mask, base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_m(vbool16_t vm, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u64m4x2_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg2ei16\.[ivxfswum.]+\s+} 96 } } */ diff --git a/auto-generated/gnu-api-tests/vloxseg2ei32.c b/auto-generated/gnu-api-tests/vloxseg2ei32.c index e50d59cdd..f357e2497 100644 --- a/auto-generated/gnu-api-tests/vloxseg2ei32.c +++ b/auto-generated/gnu-api-tests/vloxseg2ei32.c @@ -6,372 +6,372 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2(const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f16mf4x2(base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2(const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f16mf4x2(rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2(const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f16mf2x2(base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2(const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f16mf2x2(rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2(const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f16m1x2(base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2(const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f16m1x2(rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2(const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f16m2x2(base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2(const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f16m2x2(rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2(const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f16m4x2(base, bindex, vl); +vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2(const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f16m4x2(rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2(const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f32mf2x2(base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2(const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f32mf2x2(rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2(const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f32m1x2(base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2(const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f32m1x2(rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2(const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f32m2x2(base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2(const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f32m2x2(rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2(const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f32m4x2(base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2(const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f32m4x2(rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2(const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f64m1x2(base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2(const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f64m1x2(rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2(const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f64m2x2(base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2(const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f64m2x2(rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2(const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f64m4x2(base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2(const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f64m4x2(rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2(const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i8mf8x2(base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2(const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i8mf8x2(rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2(const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i8mf4x2(base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2(const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i8mf4x2(rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2(const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i8mf2x2(base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2(const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i8mf2x2(rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei32_v_i8m1x2(const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i8m1x2(base, bindex, vl); +vint8m1x2_t test_vloxseg2ei32_v_i8m1x2(const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i8m1x2(rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei32_v_i8m2x2(const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i8m2x2(base, bindex, vl); +vint8m2x2_t test_vloxseg2ei32_v_i8m2x2(const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i8m2x2(rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2(const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i16mf4x2(base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2(const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i16mf4x2(rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2(const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i16mf2x2(base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2(const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i16mf2x2(rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei32_v_i16m1x2(const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i16m1x2(base, bindex, vl); +vint16m1x2_t test_vloxseg2ei32_v_i16m1x2(const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i16m1x2(rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei32_v_i16m2x2(const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i16m2x2(base, bindex, vl); +vint16m2x2_t test_vloxseg2ei32_v_i16m2x2(const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i16m2x2(rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei32_v_i16m4x2(const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i16m4x2(base, bindex, vl); +vint16m4x2_t test_vloxseg2ei32_v_i16m4x2(const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i16m4x2(rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2(const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i32mf2x2(base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2(const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i32mf2x2(rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei32_v_i32m1x2(const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i32m1x2(base, bindex, vl); +vint32m1x2_t test_vloxseg2ei32_v_i32m1x2(const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i32m1x2(rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei32_v_i32m2x2(const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i32m2x2(base, bindex, vl); +vint32m2x2_t test_vloxseg2ei32_v_i32m2x2(const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i32m2x2(rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei32_v_i32m4x2(const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i32m4x2(base, bindex, vl); +vint32m4x2_t test_vloxseg2ei32_v_i32m4x2(const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i32m4x2(rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei32_v_i64m1x2(const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i64m1x2(base, bindex, vl); +vint64m1x2_t test_vloxseg2ei32_v_i64m1x2(const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i64m1x2(rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei32_v_i64m2x2(const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i64m2x2(base, bindex, vl); +vint64m2x2_t test_vloxseg2ei32_v_i64m2x2(const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i64m2x2(rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei32_v_i64m4x2(const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i64m4x2(base, bindex, vl); +vint64m4x2_t test_vloxseg2ei32_v_i64m4x2(const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i64m4x2(rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u8mf8x2(base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2(const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u8mf8x2(rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2(const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u8mf4x2(base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2(const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u8mf4x2(rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2(const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u8mf2x2(base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2(const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u8mf2x2(rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2(const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u8m1x2(base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2(const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u8m1x2(rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2(const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u8m2x2(base, bindex, vl); +vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2(const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u8m2x2(rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u16mf4x2(base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2(const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u16mf4x2(rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2(const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u16mf2x2(base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2(const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u16mf2x2(rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2(const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u16m1x2(base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2(const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u16m1x2(rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2(const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u16m2x2(base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2(const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u16m2x2(rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2(const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u16m4x2(base, bindex, vl); +vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2(const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u16m4x2(rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u32mf2x2(base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2(const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u32mf2x2(rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2(const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u32m1x2(base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2(const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u32m1x2(rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2(const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u32m2x2(base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2(const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u32m2x2(rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2(const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u32m4x2(base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2(const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u32m4x2(rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u64m1x2(base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2(const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u64m1x2(rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2(const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u64m2x2(base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2(const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u64m2x2(rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2(const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u64m4x2(base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2(const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u64m4x2(rs1, rs2, vl); } -vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_m(vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f16mf4x2_m(mask, base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_m(vbool64_t vm, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f16mf4x2_m(vm, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_m(vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f16mf2x2_m(mask, base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_m(vbool32_t vm, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f16mf2x2_m(vm, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_m(vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f16m1x2_m(mask, base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_m(vbool16_t vm, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f16m1x2_m(vm, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_m(vbool8_t mask, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f16m2x2_m(mask, base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_m(vbool8_t vm, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f16m2x2_m(vm, rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_m(vbool4_t mask, const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f16m4x2_m(mask, base, bindex, vl); +vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_m(vbool4_t vm, const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f16m4x2_m(vm, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_m(vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f32mf2x2_m(mask, base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_m(vbool64_t vm, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f32mf2x2_m(vm, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_m(vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f32m1x2_m(mask, base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_m(vbool32_t vm, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f32m1x2_m(vm, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_m(vbool16_t mask, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f32m2x2_m(mask, base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_m(vbool16_t vm, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f32m2x2_m(vm, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_m(vbool8_t mask, const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f32m4x2_m(mask, base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_m(vbool8_t vm, const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f32m4x2_m(vm, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_m(vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f64m1x2_m(mask, base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_m(vbool64_t vm, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f64m1x2_m(vm, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_m(vbool32_t mask, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f64m2x2_m(mask, base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_m(vbool32_t vm, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f64m2x2_m(vm, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_m(vbool16_t mask, const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f64m4x2_m(mask, base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_m(vbool16_t vm, const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f64m4x2_m(vm, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i8mf8x2_m(mask, base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_m(vbool64_t vm, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i8mf8x2_m(vm, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i8mf4x2_m(mask, base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_m(vbool32_t vm, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i8mf4x2_m(vm, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i8mf2x2_m(mask, base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_m(vbool16_t vm, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i8mf2x2_m(vm, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i8m1x2_m(mask, base, bindex, vl); +vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_m(vbool8_t vm, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i8m1x2_m(vm, rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i8m2x2_m(mask, base, bindex, vl); +vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_m(vbool4_t vm, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i8m2x2_m(vm, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i16mf4x2_m(mask, base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_m(vbool64_t vm, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i16mf4x2_m(vm, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i16mf2x2_m(mask, base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_m(vbool32_t vm, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i16mf2x2_m(vm, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i16m1x2_m(mask, base, bindex, vl); +vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_m(vbool16_t vm, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i16m1x2_m(vm, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i16m2x2_m(mask, base, bindex, vl); +vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_m(vbool8_t vm, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i16m2x2_m(vm, rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_m(vbool4_t mask, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i16m4x2_m(mask, base, bindex, vl); +vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_m(vbool4_t vm, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i16m4x2_m(vm, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i32mf2x2_m(mask, base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_m(vbool64_t vm, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i32mf2x2_m(vm, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i32m1x2_m(mask, base, bindex, vl); +vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_m(vbool32_t vm, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i32m1x2_m(vm, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i32m2x2_m(mask, base, bindex, vl); +vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_m(vbool16_t vm, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i32m2x2_m(vm, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i32m4x2_m(mask, base, bindex, vl); +vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_m(vbool8_t vm, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i32m4x2_m(vm, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i64m1x2_m(mask, base, bindex, vl); +vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_m(vbool64_t vm, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i64m1x2_m(vm, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i64m2x2_m(mask, base, bindex, vl); +vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_m(vbool32_t vm, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i64m2x2_m(vm, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i64m4x2_m(mask, base, bindex, vl); +vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_m(vbool16_t vm, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i64m4x2_m(vm, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u8mf8x2_m(mask, base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_m(vbool64_t vm, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u8mf8x2_m(vm, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u8mf4x2_m(mask, base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_m(vbool32_t vm, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u8mf4x2_m(vm, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u8mf2x2_m(mask, base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_m(vbool16_t vm, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u8mf2x2_m(vm, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u8m1x2_m(mask, base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_m(vbool8_t vm, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u8m1x2_m(vm, rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u8m2x2_m(mask, base, bindex, vl); +vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_m(vbool4_t vm, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u8m2x2_m(vm, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u16mf4x2_m(mask, base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_m(vbool64_t vm, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u16mf4x2_m(vm, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u16mf2x2_m(mask, base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_m(vbool32_t vm, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u16mf2x2_m(vm, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u16m1x2_m(mask, base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_m(vbool16_t vm, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u16m1x2_m(vm, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u16m2x2_m(mask, base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_m(vbool8_t vm, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u16m2x2_m(vm, rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u16m4x2_m(mask, base, bindex, vl); +vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_m(vbool4_t vm, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u16m4x2_m(vm, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u32mf2x2_m(mask, base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_m(vbool64_t vm, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u32mf2x2_m(vm, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u32m1x2_m(mask, base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_m(vbool32_t vm, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u32m1x2_m(vm, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u32m2x2_m(mask, base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_m(vbool16_t vm, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u32m2x2_m(vm, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u32m4x2_m(mask, base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_m(vbool8_t vm, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u32m4x2_m(vm, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u64m1x2_m(mask, base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_m(vbool64_t vm, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u64m1x2_m(vm, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u64m2x2_m(mask, base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_m(vbool32_t vm, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u64m2x2_m(vm, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u64m4x2_m(mask, base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_m(vbool16_t vm, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u64m4x2_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg2ei32\.[ivxfswum.]+\s+} 92 } } */ diff --git a/auto-generated/gnu-api-tests/vloxseg2ei64.c b/auto-generated/gnu-api-tests/vloxseg2ei64.c index 15c677538..17870a782 100644 --- a/auto-generated/gnu-api-tests/vloxseg2ei64.c +++ b/auto-generated/gnu-api-tests/vloxseg2ei64.c @@ -6,332 +6,332 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2(const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f16mf4x2(base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2(const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f16mf4x2(rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2(const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f16mf2x2(base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2(const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f16mf2x2(rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2(const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f16m1x2(base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2(const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f16m1x2(rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2(const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f16m2x2(base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2(const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f16m2x2(rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2(const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f32mf2x2(base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2(const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f32mf2x2(rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2(const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f32m1x2(base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2(const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f32m1x2(rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2(const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f32m2x2(base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2(const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f32m2x2(rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2(const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f32m4x2(base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2(const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f32m4x2(rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2(const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f64m1x2(base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2(const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f64m1x2(rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2(const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f64m2x2(base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2(const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f64m2x2(rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2(const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f64m4x2(base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2(const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f64m4x2(rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2(const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i8mf8x2(base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2(const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i8mf8x2(rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2(const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i8mf4x2(base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2(const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i8mf4x2(rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2(const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i8mf2x2(base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2(const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i8mf2x2(rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei64_v_i8m1x2(const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i8m1x2(base, bindex, vl); +vint8m1x2_t test_vloxseg2ei64_v_i8m1x2(const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i8m1x2(rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2(const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i16mf4x2(base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2(const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i16mf4x2(rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2(const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i16mf2x2(base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2(const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i16mf2x2(rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei64_v_i16m1x2(const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i16m1x2(base, bindex, vl); +vint16m1x2_t test_vloxseg2ei64_v_i16m1x2(const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i16m1x2(rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei64_v_i16m2x2(const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i16m2x2(base, bindex, vl); +vint16m2x2_t test_vloxseg2ei64_v_i16m2x2(const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i16m2x2(rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2(const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i32mf2x2(base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2(const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i32mf2x2(rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei64_v_i32m1x2(const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i32m1x2(base, bindex, vl); +vint32m1x2_t test_vloxseg2ei64_v_i32m1x2(const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i32m1x2(rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei64_v_i32m2x2(const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i32m2x2(base, bindex, vl); +vint32m2x2_t test_vloxseg2ei64_v_i32m2x2(const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i32m2x2(rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei64_v_i32m4x2(const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i32m4x2(base, bindex, vl); +vint32m4x2_t test_vloxseg2ei64_v_i32m4x2(const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i32m4x2(rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei64_v_i64m1x2(const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i64m1x2(base, bindex, vl); +vint64m1x2_t test_vloxseg2ei64_v_i64m1x2(const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i64m1x2(rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei64_v_i64m2x2(const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i64m2x2(base, bindex, vl); +vint64m2x2_t test_vloxseg2ei64_v_i64m2x2(const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i64m2x2(rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei64_v_i64m4x2(const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i64m4x2(base, bindex, vl); +vint64m4x2_t test_vloxseg2ei64_v_i64m4x2(const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i64m4x2(rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2(const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u8mf8x2(base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2(const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u8mf8x2(rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2(const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u8mf4x2(base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2(const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u8mf4x2(rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2(const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u8mf2x2(base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2(const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u8mf2x2(rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2(const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u8m1x2(base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2(const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u8m1x2(rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2(const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u16mf4x2(base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2(const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u16mf4x2(rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2(const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u16mf2x2(base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2(const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u16mf2x2(rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2(const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u16m1x2(base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2(const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u16m1x2(rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2(const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u16m2x2(base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2(const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u16m2x2(rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2(const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u32mf2x2(base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2(const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u32mf2x2(rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2(const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u32m1x2(base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2(const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u32m1x2(rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2(const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u32m2x2(base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2(const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u32m2x2(rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2(const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u32m4x2(base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2(const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u32m4x2(rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2(const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u64m1x2(base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2(const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u64m1x2(rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2(const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u64m2x2(base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2(const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u64m2x2(rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2(const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u64m4x2(base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2(const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u64m4x2(rs1, rs2, vl); } -vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_m(vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f16mf4x2_m(mask, base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_m(vbool64_t vm, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f16mf4x2_m(vm, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_m(vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f16mf2x2_m(mask, base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_m(vbool32_t vm, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f16mf2x2_m(vm, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_m(vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f16m1x2_m(mask, base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_m(vbool16_t vm, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f16m1x2_m(vm, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_m(vbool8_t mask, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f16m2x2_m(mask, base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_m(vbool8_t vm, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f16m2x2_m(vm, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_m(vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f32mf2x2_m(mask, base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_m(vbool64_t vm, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f32mf2x2_m(vm, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_m(vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f32m1x2_m(mask, base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_m(vbool32_t vm, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f32m1x2_m(vm, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_m(vbool16_t mask, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f32m2x2_m(mask, base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_m(vbool16_t vm, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f32m2x2_m(vm, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_m(vbool8_t mask, const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f32m4x2_m(mask, base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_m(vbool8_t vm, const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f32m4x2_m(vm, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_m(vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f64m1x2_m(mask, base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_m(vbool64_t vm, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f64m1x2_m(vm, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_m(vbool32_t mask, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f64m2x2_m(mask, base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_m(vbool32_t vm, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f64m2x2_m(vm, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_m(vbool16_t mask, const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f64m4x2_m(mask, base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_m(vbool16_t vm, const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f64m4x2_m(vm, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i8mf8x2_m(mask, base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_m(vbool64_t vm, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i8mf8x2_m(vm, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i8mf4x2_m(mask, base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_m(vbool32_t vm, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i8mf4x2_m(vm, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i8mf2x2_m(mask, base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_m(vbool16_t vm, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i8mf2x2_m(vm, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i8m1x2_m(mask, base, bindex, vl); +vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_m(vbool8_t vm, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i8m1x2_m(vm, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i16mf4x2_m(mask, base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_m(vbool64_t vm, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i16mf4x2_m(vm, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i16mf2x2_m(mask, base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_m(vbool32_t vm, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i16mf2x2_m(vm, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i16m1x2_m(mask, base, bindex, vl); +vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_m(vbool16_t vm, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i16m1x2_m(vm, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i16m2x2_m(mask, base, bindex, vl); +vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_m(vbool8_t vm, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i16m2x2_m(vm, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i32mf2x2_m(mask, base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_m(vbool64_t vm, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i32mf2x2_m(vm, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i32m1x2_m(mask, base, bindex, vl); +vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_m(vbool32_t vm, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i32m1x2_m(vm, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i32m2x2_m(mask, base, bindex, vl); +vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_m(vbool16_t vm, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i32m2x2_m(vm, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i32m4x2_m(mask, base, bindex, vl); +vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_m(vbool8_t vm, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i32m4x2_m(vm, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i64m1x2_m(mask, base, bindex, vl); +vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_m(vbool64_t vm, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i64m1x2_m(vm, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i64m2x2_m(mask, base, bindex, vl); +vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_m(vbool32_t vm, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i64m2x2_m(vm, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i64m4x2_m(mask, base, bindex, vl); +vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_m(vbool16_t vm, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i64m4x2_m(vm, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u8mf8x2_m(mask, base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_m(vbool64_t vm, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u8mf8x2_m(vm, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u8mf4x2_m(mask, base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_m(vbool32_t vm, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u8mf4x2_m(vm, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u8mf2x2_m(mask, base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_m(vbool16_t vm, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u8mf2x2_m(vm, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u8m1x2_m(mask, base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_m(vbool8_t vm, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u8m1x2_m(vm, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u16mf4x2_m(mask, base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_m(vbool64_t vm, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u16mf4x2_m(vm, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u16mf2x2_m(mask, base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_m(vbool32_t vm, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u16mf2x2_m(vm, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u16m1x2_m(mask, base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_m(vbool16_t vm, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u16m1x2_m(vm, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u16m2x2_m(mask, base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_m(vbool8_t vm, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u16m2x2_m(vm, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u32mf2x2_m(mask, base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_m(vbool64_t vm, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u32mf2x2_m(vm, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u32m1x2_m(mask, base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_m(vbool32_t vm, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u32m1x2_m(vm, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u32m2x2_m(mask, base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_m(vbool16_t vm, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u32m2x2_m(vm, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u32m4x2_m(mask, base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_m(vbool8_t vm, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u32m4x2_m(vm, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u64m1x2_m(mask, base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_m(vbool64_t vm, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u64m1x2_m(vm, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u64m2x2_m(mask, base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_m(vbool32_t vm, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u64m2x2_m(vm, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u64m4x2_m(mask, base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_m(vbool16_t vm, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u64m4x2_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg2ei64\.[ivxfswum.]+\s+} 82 } } */ diff --git a/auto-generated/gnu-api-tests/vloxseg2ei8.c b/auto-generated/gnu-api-tests/vloxseg2ei8.c index ac5885a15..4220c2bd2 100644 --- a/auto-generated/gnu-api-tests/vloxseg2ei8.c +++ b/auto-generated/gnu-api-tests/vloxseg2ei8.c @@ -6,388 +6,388 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2(const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f16mf4x2(base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2(const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f16mf4x2(rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2(const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f16mf2x2(base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2(const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f16mf2x2(rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2(const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f16m1x2(base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2(const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f16m1x2(rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2(const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f16m2x2(base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2(const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f16m2x2(rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2(const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f16m4x2(base, bindex, vl); +vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2(const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f16m4x2(rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2(const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f32mf2x2(base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2(const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f32mf2x2(rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2(const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f32m1x2(base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2(const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f32m1x2(rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2(const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f32m2x2(base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2(const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f32m2x2(rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2(const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f32m4x2(base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2(const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f32m4x2(rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2(const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f64m1x2(base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2(const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f64m1x2(rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2(const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f64m2x2(base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2(const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f64m2x2(rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2(const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f64m4x2(base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2(const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f64m4x2(rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2(const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i8mf8x2(base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2(const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i8mf8x2(rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2(const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i8mf4x2(base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2(const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i8mf4x2(rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2(const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i8mf2x2(base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2(const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i8mf2x2(rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei8_v_i8m1x2(const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i8m1x2(base, bindex, vl); +vint8m1x2_t test_vloxseg2ei8_v_i8m1x2(const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i8m1x2(rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei8_v_i8m2x2(const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i8m2x2(base, bindex, vl); +vint8m2x2_t test_vloxseg2ei8_v_i8m2x2(const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i8m2x2(rs1, rs2, vl); } -vint8m4x2_t test_vloxseg2ei8_v_i8m4x2(const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i8m4x2(base, bindex, vl); +vint8m4x2_t test_vloxseg2ei8_v_i8m4x2(const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i8m4x2(rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2(const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i16mf4x2(base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2(const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i16mf4x2(rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2(const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i16mf2x2(base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2(const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i16mf2x2(rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei8_v_i16m1x2(const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i16m1x2(base, bindex, vl); +vint16m1x2_t test_vloxseg2ei8_v_i16m1x2(const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i16m1x2(rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei8_v_i16m2x2(const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i16m2x2(base, bindex, vl); +vint16m2x2_t test_vloxseg2ei8_v_i16m2x2(const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i16m2x2(rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei8_v_i16m4x2(const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i16m4x2(base, bindex, vl); +vint16m4x2_t test_vloxseg2ei8_v_i16m4x2(const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i16m4x2(rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2(const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i32mf2x2(base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2(const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i32mf2x2(rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei8_v_i32m1x2(const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i32m1x2(base, bindex, vl); +vint32m1x2_t test_vloxseg2ei8_v_i32m1x2(const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i32m1x2(rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei8_v_i32m2x2(const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i32m2x2(base, bindex, vl); +vint32m2x2_t test_vloxseg2ei8_v_i32m2x2(const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i32m2x2(rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei8_v_i32m4x2(const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i32m4x2(base, bindex, vl); +vint32m4x2_t test_vloxseg2ei8_v_i32m4x2(const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i32m4x2(rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei8_v_i64m1x2(const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i64m1x2(base, bindex, vl); +vint64m1x2_t test_vloxseg2ei8_v_i64m1x2(const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i64m1x2(rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei8_v_i64m2x2(const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i64m2x2(base, bindex, vl); +vint64m2x2_t test_vloxseg2ei8_v_i64m2x2(const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i64m2x2(rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei8_v_i64m4x2(const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i64m4x2(base, bindex, vl); +vint64m4x2_t test_vloxseg2ei8_v_i64m4x2(const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i64m4x2(rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u8mf8x2(base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2(const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u8mf8x2(rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u8mf4x2(base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2(const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u8mf4x2(rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u8mf2x2(base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2(const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u8mf2x2(rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2(const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u8m1x2(base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2(const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u8m1x2(rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2(const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u8m2x2(base, bindex, vl); +vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2(const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u8m2x2(rs1, rs2, vl); } -vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2(const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u8m4x2(base, bindex, vl); +vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2(const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u8m4x2(rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u16mf4x2(base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2(const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u16mf4x2(rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u16mf2x2(base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2(const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u16mf2x2(rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u16m1x2(base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2(const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u16m1x2(rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2(const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u16m2x2(base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2(const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u16m2x2(rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2(const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u16m4x2(base, bindex, vl); +vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2(const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u16m4x2(rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u32mf2x2(base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2(const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u32mf2x2(rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u32m1x2(base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2(const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u32m1x2(rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u32m2x2(base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2(const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u32m2x2(rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2(const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u32m4x2(base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2(const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u32m4x2(rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u64m1x2(base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2(const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u64m1x2(rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u64m2x2(base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2(const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u64m2x2(rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2(const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u64m4x2(base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2(const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u64m4x2(rs1, rs2, vl); } -vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_m(vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f16mf4x2_m(mask, base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_m(vbool64_t vm, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f16mf4x2_m(vm, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_m(vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f16mf2x2_m(mask, base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_m(vbool32_t vm, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f16mf2x2_m(vm, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_m(vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f16m1x2_m(mask, base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_m(vbool16_t vm, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f16m1x2_m(vm, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_m(vbool8_t mask, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f16m2x2_m(mask, base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_m(vbool8_t vm, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f16m2x2_m(vm, rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_m(vbool4_t mask, const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f16m4x2_m(mask, base, bindex, vl); +vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_m(vbool4_t vm, const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f16m4x2_m(vm, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_m(vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f32mf2x2_m(mask, base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_m(vbool64_t vm, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f32mf2x2_m(vm, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_m(vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f32m1x2_m(mask, base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_m(vbool32_t vm, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f32m1x2_m(vm, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_m(vbool16_t mask, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f32m2x2_m(mask, base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_m(vbool16_t vm, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f32m2x2_m(vm, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_m(vbool8_t mask, const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f32m4x2_m(mask, base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_m(vbool8_t vm, const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f32m4x2_m(vm, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_m(vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f64m1x2_m(mask, base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_m(vbool64_t vm, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f64m1x2_m(vm, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_m(vbool32_t mask, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f64m2x2_m(mask, base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_m(vbool32_t vm, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f64m2x2_m(vm, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_m(vbool16_t mask, const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f64m4x2_m(mask, base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_m(vbool16_t vm, const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f64m4x2_m(vm, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i8mf8x2_m(mask, base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_m(vbool64_t vm, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i8mf8x2_m(vm, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i8mf4x2_m(mask, base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_m(vbool32_t vm, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i8mf4x2_m(vm, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i8mf2x2_m(mask, base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_m(vbool16_t vm, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i8mf2x2_m(vm, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i8m1x2_m(mask, base, bindex, vl); +vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_m(vbool8_t vm, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i8m1x2_m(vm, rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i8m2x2_m(mask, base, bindex, vl); +vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_m(vbool4_t vm, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i8m2x2_m(vm, rs1, rs2, vl); } -vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_m(vbool2_t mask, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i8m4x2_m(mask, base, bindex, vl); +vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_m(vbool2_t vm, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i8m4x2_m(vm, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i16mf4x2_m(mask, base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_m(vbool64_t vm, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i16mf4x2_m(vm, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i16mf2x2_m(mask, base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_m(vbool32_t vm, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i16mf2x2_m(vm, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i16m1x2_m(mask, base, bindex, vl); +vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_m(vbool16_t vm, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i16m1x2_m(vm, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i16m2x2_m(mask, base, bindex, vl); +vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_m(vbool8_t vm, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i16m2x2_m(vm, rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_m(vbool4_t mask, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i16m4x2_m(mask, base, bindex, vl); +vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_m(vbool4_t vm, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i16m4x2_m(vm, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i32mf2x2_m(mask, base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_m(vbool64_t vm, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i32mf2x2_m(vm, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i32m1x2_m(mask, base, bindex, vl); +vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_m(vbool32_t vm, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i32m1x2_m(vm, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i32m2x2_m(mask, base, bindex, vl); +vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_m(vbool16_t vm, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i32m2x2_m(vm, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i32m4x2_m(mask, base, bindex, vl); +vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_m(vbool8_t vm, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i32m4x2_m(vm, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i64m1x2_m(mask, base, bindex, vl); +vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_m(vbool64_t vm, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i64m1x2_m(vm, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i64m2x2_m(mask, base, bindex, vl); +vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_m(vbool32_t vm, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i64m2x2_m(vm, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i64m4x2_m(mask, base, bindex, vl); +vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_m(vbool16_t vm, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i64m4x2_m(vm, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u8mf8x2_m(mask, base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_m(vbool64_t vm, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u8mf8x2_m(vm, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u8mf4x2_m(mask, base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_m(vbool32_t vm, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u8mf4x2_m(vm, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u8mf2x2_m(mask, base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_m(vbool16_t vm, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u8mf2x2_m(vm, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u8m1x2_m(mask, base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_m(vbool8_t vm, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u8m1x2_m(vm, rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u8m2x2_m(mask, base, bindex, vl); +vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_m(vbool4_t vm, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u8m2x2_m(vm, rs1, rs2, vl); } -vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u8m4x2_m(mask, base, bindex, vl); +vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_m(vbool2_t vm, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u8m4x2_m(vm, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u16mf4x2_m(mask, base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_m(vbool64_t vm, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u16mf4x2_m(vm, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u16mf2x2_m(mask, base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_m(vbool32_t vm, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u16mf2x2_m(vm, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u16m1x2_m(mask, base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_m(vbool16_t vm, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u16m1x2_m(vm, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u16m2x2_m(mask, base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_m(vbool8_t vm, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u16m2x2_m(vm, rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u16m4x2_m(mask, base, bindex, vl); +vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_m(vbool4_t vm, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u16m4x2_m(vm, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u32mf2x2_m(mask, base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_m(vbool64_t vm, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u32mf2x2_m(vm, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u32m1x2_m(mask, base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_m(vbool32_t vm, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u32m1x2_m(vm, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u32m2x2_m(mask, base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_m(vbool16_t vm, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u32m2x2_m(vm, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u32m4x2_m(mask, base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_m(vbool8_t vm, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u32m4x2_m(vm, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u64m1x2_m(mask, base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_m(vbool64_t vm, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u64m1x2_m(vm, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u64m2x2_m(mask, base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_m(vbool32_t vm, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u64m2x2_m(vm, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u64m4x2_m(mask, base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_m(vbool16_t vm, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u64m4x2_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg2ei8\.[ivxfswum.]+\s+} 96 } } */ diff --git a/auto-generated/gnu-api-tests/vloxseg3ei16.c b/auto-generated/gnu-api-tests/vloxseg3ei16.c index 0e5f8527f..040002880 100644 --- a/auto-generated/gnu-api-tests/vloxseg3ei16.c +++ b/auto-generated/gnu-api-tests/vloxseg3ei16.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3(const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f16mf4x3(base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3(const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f16mf4x3(rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3(const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f16mf2x3(base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3(const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f16mf2x3(rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3(const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f16m1x3(base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3(const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f16m1x3(rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3(const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f16m2x3(base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3(const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f16m2x3(rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3(const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f32mf2x3(base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3(const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f32mf2x3(rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3(const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f32m1x3(base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3(const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f32m1x3(rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3(const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f32m2x3(base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3(const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f32m2x3(rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3(const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f64m1x3(base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3(const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f64m1x3(rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3(const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f64m2x3(base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3(const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f64m2x3(rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3(const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i8mf8x3(base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3(const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i8mf8x3(rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3(const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i8mf4x3(base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3(const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i8mf4x3(rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3(const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i8mf2x3(base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3(const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i8mf2x3(rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei16_v_i8m1x3(const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i8m1x3(base, bindex, vl); +vint8m1x3_t test_vloxseg3ei16_v_i8m1x3(const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i8m1x3(rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei16_v_i8m2x3(const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i8m2x3(base, bindex, vl); +vint8m2x3_t test_vloxseg3ei16_v_i8m2x3(const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i8m2x3(rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3(const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i16mf4x3(base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3(const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i16mf4x3(rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3(const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i16mf2x3(base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3(const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i16mf2x3(rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei16_v_i16m1x3(const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i16m1x3(base, bindex, vl); +vint16m1x3_t test_vloxseg3ei16_v_i16m1x3(const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i16m1x3(rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei16_v_i16m2x3(const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i16m2x3(base, bindex, vl); +vint16m2x3_t test_vloxseg3ei16_v_i16m2x3(const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i16m2x3(rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3(const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i32mf2x3(base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3(const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i32mf2x3(rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei16_v_i32m1x3(const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i32m1x3(base, bindex, vl); +vint32m1x3_t test_vloxseg3ei16_v_i32m1x3(const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i32m1x3(rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei16_v_i32m2x3(const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i32m2x3(base, bindex, vl); +vint32m2x3_t test_vloxseg3ei16_v_i32m2x3(const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i32m2x3(rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei16_v_i64m1x3(const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i64m1x3(base, bindex, vl); +vint64m1x3_t test_vloxseg3ei16_v_i64m1x3(const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i64m1x3(rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei16_v_i64m2x3(const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i64m2x3(base, bindex, vl); +vint64m2x3_t test_vloxseg3ei16_v_i64m2x3(const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i64m2x3(rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u8mf8x3(base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3(const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u8mf8x3(rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u8mf4x3(base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3(const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u8mf4x3(rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3(const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u8mf2x3(base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3(const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u8mf2x3(rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3(const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u8m1x3(base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3(const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u8m1x3(rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3(const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u8m2x3(base, bindex, vl); +vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3(const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u8m2x3(rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u16mf4x3(base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3(const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u16mf4x3(rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u16mf2x3(base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3(const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u16mf2x3(rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3(const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u16m1x3(base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3(const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u16m1x3(rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3(const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u16m2x3(base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3(const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u16m2x3(rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u32mf2x3(base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3(const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u32mf2x3(rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u32m1x3(base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3(const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u32m1x3(rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3(const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u32m2x3(base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3(const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u32m2x3(rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u64m1x3(base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3(const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u64m1x3(rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u64m2x3(base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3(const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u64m2x3(rs1, rs2, vl); } -vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_m(vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f16mf4x3_m(mask, base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_m(vbool64_t vm, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f16mf4x3_m(vm, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_m(vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f16mf2x3_m(mask, base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_m(vbool32_t vm, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f16mf2x3_m(vm, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_m(vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f16m1x3_m(mask, base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_m(vbool16_t vm, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f16m1x3_m(vm, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_m(vbool8_t mask, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f16m2x3_m(mask, base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_m(vbool8_t vm, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f16m2x3_m(vm, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_m(vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f32mf2x3_m(mask, base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_m(vbool64_t vm, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f32mf2x3_m(vm, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_m(vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f32m1x3_m(mask, base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_m(vbool32_t vm, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f32m1x3_m(vm, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_m(vbool16_t mask, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f32m2x3_m(mask, base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_m(vbool16_t vm, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f32m2x3_m(vm, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_m(vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f64m1x3_m(mask, base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_m(vbool64_t vm, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f64m1x3_m(vm, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_m(vbool32_t mask, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f64m2x3_m(mask, base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_m(vbool32_t vm, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f64m2x3_m(vm, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i8mf8x3_m(mask, base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_m(vbool64_t vm, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i8mf8x3_m(vm, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i8mf4x3_m(mask, base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_m(vbool32_t vm, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i8mf4x3_m(vm, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i8mf2x3_m(mask, base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_m(vbool16_t vm, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i8mf2x3_m(vm, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i8m1x3_m(mask, base, bindex, vl); +vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_m(vbool8_t vm, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i8m1x3_m(vm, rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i8m2x3_m(mask, base, bindex, vl); +vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_m(vbool4_t vm, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i8m2x3_m(vm, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i16mf4x3_m(mask, base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_m(vbool64_t vm, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i16mf4x3_m(vm, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i16mf2x3_m(mask, base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_m(vbool32_t vm, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i16mf2x3_m(vm, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i16m1x3_m(mask, base, bindex, vl); +vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_m(vbool16_t vm, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i16m1x3_m(vm, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i16m2x3_m(mask, base, bindex, vl); +vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_m(vbool8_t vm, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i16m2x3_m(vm, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i32mf2x3_m(mask, base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_m(vbool64_t vm, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i32mf2x3_m(vm, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i32m1x3_m(mask, base, bindex, vl); +vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_m(vbool32_t vm, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i32m1x3_m(vm, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i32m2x3_m(mask, base, bindex, vl); +vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_m(vbool16_t vm, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i32m2x3_m(vm, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i64m1x3_m(mask, base, bindex, vl); +vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_m(vbool64_t vm, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i64m1x3_m(vm, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i64m2x3_m(mask, base, bindex, vl); +vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_m(vbool32_t vm, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i64m2x3_m(vm, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u8mf8x3_m(mask, base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_m(vbool64_t vm, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u8mf8x3_m(vm, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u8mf4x3_m(mask, base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_m(vbool32_t vm, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u8mf4x3_m(vm, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u8mf2x3_m(mask, base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_m(vbool16_t vm, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u8mf2x3_m(vm, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u8m1x3_m(mask, base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_m(vbool8_t vm, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u8m1x3_m(vm, rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u8m2x3_m(mask, base, bindex, vl); +vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_m(vbool4_t vm, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u8m2x3_m(vm, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u16mf4x3_m(mask, base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_m(vbool64_t vm, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u16mf4x3_m(vm, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u16mf2x3_m(mask, base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_m(vbool32_t vm, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u16mf2x3_m(vm, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u16m1x3_m(mask, base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_m(vbool16_t vm, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u16m1x3_m(vm, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u16m2x3_m(mask, base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_m(vbool8_t vm, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u16m2x3_m(vm, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u32mf2x3_m(mask, base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_m(vbool64_t vm, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u32mf2x3_m(vm, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u32m1x3_m(mask, base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_m(vbool32_t vm, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u32m1x3_m(vm, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u32m2x3_m(mask, base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_m(vbool16_t vm, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u32m2x3_m(vm, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u64m1x3_m(mask, base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_m(vbool64_t vm, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u64m1x3_m(vm, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u64m2x3_m(mask, base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_m(vbool32_t vm, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u64m2x3_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg3ei16\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-api-tests/vloxseg3ei32.c b/auto-generated/gnu-api-tests/vloxseg3ei32.c index 0d6033388..01a8d084c 100644 --- a/auto-generated/gnu-api-tests/vloxseg3ei32.c +++ b/auto-generated/gnu-api-tests/vloxseg3ei32.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3(const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f16mf4x3(base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3(const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f16mf4x3(rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3(const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f16mf2x3(base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3(const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f16mf2x3(rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3(const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f16m1x3(base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3(const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f16m1x3(rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3(const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f16m2x3(base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3(const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f16m2x3(rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3(const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f32mf2x3(base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3(const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f32mf2x3(rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3(const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f32m1x3(base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3(const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f32m1x3(rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3(const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f32m2x3(base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3(const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f32m2x3(rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3(const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f64m1x3(base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3(const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f64m1x3(rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3(const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f64m2x3(base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3(const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f64m2x3(rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3(const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i8mf8x3(base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3(const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i8mf8x3(rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3(const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i8mf4x3(base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3(const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i8mf4x3(rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3(const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i8mf2x3(base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3(const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i8mf2x3(rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei32_v_i8m1x3(const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i8m1x3(base, bindex, vl); +vint8m1x3_t test_vloxseg3ei32_v_i8m1x3(const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i8m1x3(rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei32_v_i8m2x3(const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i8m2x3(base, bindex, vl); +vint8m2x3_t test_vloxseg3ei32_v_i8m2x3(const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i8m2x3(rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3(const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i16mf4x3(base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3(const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i16mf4x3(rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3(const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i16mf2x3(base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3(const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i16mf2x3(rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei32_v_i16m1x3(const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i16m1x3(base, bindex, vl); +vint16m1x3_t test_vloxseg3ei32_v_i16m1x3(const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i16m1x3(rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei32_v_i16m2x3(const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i16m2x3(base, bindex, vl); +vint16m2x3_t test_vloxseg3ei32_v_i16m2x3(const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i16m2x3(rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3(const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i32mf2x3(base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3(const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i32mf2x3(rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei32_v_i32m1x3(const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i32m1x3(base, bindex, vl); +vint32m1x3_t test_vloxseg3ei32_v_i32m1x3(const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i32m1x3(rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei32_v_i32m2x3(const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i32m2x3(base, bindex, vl); +vint32m2x3_t test_vloxseg3ei32_v_i32m2x3(const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i32m2x3(rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei32_v_i64m1x3(const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i64m1x3(base, bindex, vl); +vint64m1x3_t test_vloxseg3ei32_v_i64m1x3(const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i64m1x3(rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei32_v_i64m2x3(const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i64m2x3(base, bindex, vl); +vint64m2x3_t test_vloxseg3ei32_v_i64m2x3(const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i64m2x3(rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u8mf8x3(base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3(const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u8mf8x3(rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3(const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u8mf4x3(base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3(const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u8mf4x3(rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3(const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u8mf2x3(base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3(const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u8mf2x3(rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3(const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u8m1x3(base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3(const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u8m1x3(rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3(const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u8m2x3(base, bindex, vl); +vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3(const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u8m2x3(rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u16mf4x3(base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3(const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u16mf4x3(rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3(const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u16mf2x3(base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3(const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u16mf2x3(rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3(const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u16m1x3(base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3(const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u16m1x3(rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3(const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u16m2x3(base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3(const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u16m2x3(rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u32mf2x3(base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3(const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u32mf2x3(rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3(const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u32m1x3(base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3(const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u32m1x3(rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3(const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u32m2x3(base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3(const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u32m2x3(rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u64m1x3(base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3(const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u64m1x3(rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3(const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u64m2x3(base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3(const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u64m2x3(rs1, rs2, vl); } -vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_m(vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f16mf4x3_m(mask, base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_m(vbool64_t vm, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f16mf4x3_m(vm, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_m(vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f16mf2x3_m(mask, base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_m(vbool32_t vm, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f16mf2x3_m(vm, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_m(vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f16m1x3_m(mask, base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_m(vbool16_t vm, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f16m1x3_m(vm, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_m(vbool8_t mask, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f16m2x3_m(mask, base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_m(vbool8_t vm, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f16m2x3_m(vm, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_m(vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f32mf2x3_m(mask, base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_m(vbool64_t vm, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f32mf2x3_m(vm, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_m(vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f32m1x3_m(mask, base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_m(vbool32_t vm, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f32m1x3_m(vm, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_m(vbool16_t mask, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f32m2x3_m(mask, base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_m(vbool16_t vm, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f32m2x3_m(vm, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_m(vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f64m1x3_m(mask, base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_m(vbool64_t vm, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f64m1x3_m(vm, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_m(vbool32_t mask, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f64m2x3_m(mask, base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_m(vbool32_t vm, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f64m2x3_m(vm, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i8mf8x3_m(mask, base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_m(vbool64_t vm, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i8mf8x3_m(vm, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i8mf4x3_m(mask, base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_m(vbool32_t vm, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i8mf4x3_m(vm, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i8mf2x3_m(mask, base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_m(vbool16_t vm, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i8mf2x3_m(vm, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i8m1x3_m(mask, base, bindex, vl); +vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_m(vbool8_t vm, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i8m1x3_m(vm, rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i8m2x3_m(mask, base, bindex, vl); +vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_m(vbool4_t vm, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i8m2x3_m(vm, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i16mf4x3_m(mask, base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_m(vbool64_t vm, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i16mf4x3_m(vm, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i16mf2x3_m(mask, base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_m(vbool32_t vm, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i16mf2x3_m(vm, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i16m1x3_m(mask, base, bindex, vl); +vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_m(vbool16_t vm, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i16m1x3_m(vm, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i16m2x3_m(mask, base, bindex, vl); +vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_m(vbool8_t vm, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i16m2x3_m(vm, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i32mf2x3_m(mask, base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_m(vbool64_t vm, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i32mf2x3_m(vm, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i32m1x3_m(mask, base, bindex, vl); +vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_m(vbool32_t vm, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i32m1x3_m(vm, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i32m2x3_m(mask, base, bindex, vl); +vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_m(vbool16_t vm, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i32m2x3_m(vm, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i64m1x3_m(mask, base, bindex, vl); +vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_m(vbool64_t vm, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i64m1x3_m(vm, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i64m2x3_m(mask, base, bindex, vl); +vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_m(vbool32_t vm, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i64m2x3_m(vm, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u8mf8x3_m(mask, base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_m(vbool64_t vm, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u8mf8x3_m(vm, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u8mf4x3_m(mask, base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_m(vbool32_t vm, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u8mf4x3_m(vm, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u8mf2x3_m(mask, base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_m(vbool16_t vm, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u8mf2x3_m(vm, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u8m1x3_m(mask, base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_m(vbool8_t vm, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u8m1x3_m(vm, rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u8m2x3_m(mask, base, bindex, vl); +vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_m(vbool4_t vm, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u8m2x3_m(vm, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u16mf4x3_m(mask, base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_m(vbool64_t vm, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u16mf4x3_m(vm, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u16mf2x3_m(mask, base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_m(vbool32_t vm, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u16mf2x3_m(vm, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u16m1x3_m(mask, base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_m(vbool16_t vm, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u16m1x3_m(vm, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u16m2x3_m(mask, base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_m(vbool8_t vm, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u16m2x3_m(vm, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u32mf2x3_m(mask, base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_m(vbool64_t vm, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u32mf2x3_m(vm, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u32m1x3_m(mask, base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_m(vbool32_t vm, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u32m1x3_m(vm, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u32m2x3_m(mask, base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_m(vbool16_t vm, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u32m2x3_m(vm, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u64m1x3_m(mask, base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_m(vbool64_t vm, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u64m1x3_m(vm, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u64m2x3_m(mask, base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_m(vbool32_t vm, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u64m2x3_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg3ei32\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-api-tests/vloxseg3ei64.c b/auto-generated/gnu-api-tests/vloxseg3ei64.c index bc0752e8a..8d8544ed3 100644 --- a/auto-generated/gnu-api-tests/vloxseg3ei64.c +++ b/auto-generated/gnu-api-tests/vloxseg3ei64.c @@ -6,284 +6,284 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3(const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f16mf4x3(base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3(const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f16mf4x3(rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3(const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f16mf2x3(base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3(const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f16mf2x3(rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3(const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f16m1x3(base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3(const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f16m1x3(rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3(const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f16m2x3(base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3(const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f16m2x3(rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3(const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f32mf2x3(base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3(const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f32mf2x3(rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3(const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f32m1x3(base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3(const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f32m1x3(rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3(const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f32m2x3(base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3(const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f32m2x3(rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3(const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f64m1x3(base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3(const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f64m1x3(rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3(const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f64m2x3(base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3(const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f64m2x3(rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3(const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i8mf8x3(base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3(const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i8mf8x3(rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3(const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i8mf4x3(base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3(const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i8mf4x3(rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3(const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i8mf2x3(base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3(const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i8mf2x3(rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei64_v_i8m1x3(const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i8m1x3(base, bindex, vl); +vint8m1x3_t test_vloxseg3ei64_v_i8m1x3(const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i8m1x3(rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3(const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i16mf4x3(base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3(const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i16mf4x3(rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3(const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i16mf2x3(base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3(const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i16mf2x3(rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei64_v_i16m1x3(const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i16m1x3(base, bindex, vl); +vint16m1x3_t test_vloxseg3ei64_v_i16m1x3(const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i16m1x3(rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei64_v_i16m2x3(const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i16m2x3(base, bindex, vl); +vint16m2x3_t test_vloxseg3ei64_v_i16m2x3(const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i16m2x3(rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3(const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i32mf2x3(base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3(const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i32mf2x3(rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei64_v_i32m1x3(const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i32m1x3(base, bindex, vl); +vint32m1x3_t test_vloxseg3ei64_v_i32m1x3(const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i32m1x3(rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei64_v_i32m2x3(const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i32m2x3(base, bindex, vl); +vint32m2x3_t test_vloxseg3ei64_v_i32m2x3(const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i32m2x3(rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei64_v_i64m1x3(const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i64m1x3(base, bindex, vl); +vint64m1x3_t test_vloxseg3ei64_v_i64m1x3(const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i64m1x3(rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei64_v_i64m2x3(const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i64m2x3(base, bindex, vl); +vint64m2x3_t test_vloxseg3ei64_v_i64m2x3(const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i64m2x3(rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3(const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u8mf8x3(base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3(const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u8mf8x3(rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3(const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u8mf4x3(base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3(const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u8mf4x3(rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3(const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u8mf2x3(base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3(const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u8mf2x3(rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3(const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u8m1x3(base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3(const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u8m1x3(rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3(const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u16mf4x3(base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3(const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u16mf4x3(rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3(const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u16mf2x3(base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3(const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u16mf2x3(rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3(const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u16m1x3(base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3(const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u16m1x3(rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3(const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u16m2x3(base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3(const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u16m2x3(rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3(const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u32mf2x3(base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3(const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u32mf2x3(rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3(const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u32m1x3(base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3(const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u32m1x3(rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3(const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u32m2x3(base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3(const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u32m2x3(rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3(const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u64m1x3(base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3(const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u64m1x3(rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3(const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u64m2x3(base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3(const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u64m2x3(rs1, rs2, vl); } -vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_m(vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f16mf4x3_m(mask, base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_m(vbool64_t vm, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f16mf4x3_m(vm, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_m(vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f16mf2x3_m(mask, base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_m(vbool32_t vm, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f16mf2x3_m(vm, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_m(vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f16m1x3_m(mask, base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_m(vbool16_t vm, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f16m1x3_m(vm, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_m(vbool8_t mask, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f16m2x3_m(mask, base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_m(vbool8_t vm, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f16m2x3_m(vm, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_m(vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f32mf2x3_m(mask, base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_m(vbool64_t vm, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f32mf2x3_m(vm, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_m(vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f32m1x3_m(mask, base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_m(vbool32_t vm, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f32m1x3_m(vm, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_m(vbool16_t mask, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f32m2x3_m(mask, base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_m(vbool16_t vm, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f32m2x3_m(vm, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_m(vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f64m1x3_m(mask, base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_m(vbool64_t vm, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f64m1x3_m(vm, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_m(vbool32_t mask, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f64m2x3_m(mask, base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_m(vbool32_t vm, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f64m2x3_m(vm, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i8mf8x3_m(mask, base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_m(vbool64_t vm, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i8mf8x3_m(vm, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i8mf4x3_m(mask, base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_m(vbool32_t vm, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i8mf4x3_m(vm, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i8mf2x3_m(mask, base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_m(vbool16_t vm, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i8mf2x3_m(vm, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i8m1x3_m(mask, base, bindex, vl); +vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_m(vbool8_t vm, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i8m1x3_m(vm, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i16mf4x3_m(mask, base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_m(vbool64_t vm, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i16mf4x3_m(vm, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i16mf2x3_m(mask, base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_m(vbool32_t vm, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i16mf2x3_m(vm, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i16m1x3_m(mask, base, bindex, vl); +vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_m(vbool16_t vm, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i16m1x3_m(vm, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i16m2x3_m(mask, base, bindex, vl); +vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_m(vbool8_t vm, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i16m2x3_m(vm, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i32mf2x3_m(mask, base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_m(vbool64_t vm, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i32mf2x3_m(vm, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i32m1x3_m(mask, base, bindex, vl); +vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_m(vbool32_t vm, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i32m1x3_m(vm, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i32m2x3_m(mask, base, bindex, vl); +vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_m(vbool16_t vm, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i32m2x3_m(vm, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i64m1x3_m(mask, base, bindex, vl); +vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_m(vbool64_t vm, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i64m1x3_m(vm, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i64m2x3_m(mask, base, bindex, vl); +vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_m(vbool32_t vm, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i64m2x3_m(vm, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u8mf8x3_m(mask, base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_m(vbool64_t vm, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u8mf8x3_m(vm, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u8mf4x3_m(mask, base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_m(vbool32_t vm, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u8mf4x3_m(vm, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u8mf2x3_m(mask, base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_m(vbool16_t vm, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u8mf2x3_m(vm, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u8m1x3_m(mask, base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_m(vbool8_t vm, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u8m1x3_m(vm, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u16mf4x3_m(mask, base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_m(vbool64_t vm, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u16mf4x3_m(vm, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u16mf2x3_m(mask, base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_m(vbool32_t vm, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u16mf2x3_m(vm, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u16m1x3_m(mask, base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_m(vbool16_t vm, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u16m1x3_m(vm, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u16m2x3_m(mask, base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_m(vbool8_t vm, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u16m2x3_m(vm, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u32mf2x3_m(mask, base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_m(vbool64_t vm, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u32mf2x3_m(vm, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u32m1x3_m(mask, base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_m(vbool32_t vm, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u32m1x3_m(vm, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u32m2x3_m(mask, base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_m(vbool16_t vm, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u32m2x3_m(vm, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u64m1x3_m(mask, base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_m(vbool64_t vm, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u64m1x3_m(vm, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u64m2x3_m(mask, base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_m(vbool32_t vm, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u64m2x3_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg3ei64\.[ivxfswum.]+\s+} 70 } } */ diff --git a/auto-generated/gnu-api-tests/vloxseg3ei8.c b/auto-generated/gnu-api-tests/vloxseg3ei8.c index ed4c17605..6c074c512 100644 --- a/auto-generated/gnu-api-tests/vloxseg3ei8.c +++ b/auto-generated/gnu-api-tests/vloxseg3ei8.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3(const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f16mf4x3(base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3(const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f16mf4x3(rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3(const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f16mf2x3(base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3(const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f16mf2x3(rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3(const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f16m1x3(base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3(const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f16m1x3(rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3(const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f16m2x3(base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3(const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f16m2x3(rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3(const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f32mf2x3(base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3(const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f32mf2x3(rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3(const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f32m1x3(base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3(const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f32m1x3(rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3(const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f32m2x3(base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3(const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f32m2x3(rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3(const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f64m1x3(base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3(const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f64m1x3(rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3(const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f64m2x3(base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3(const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f64m2x3(rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3(const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i8mf8x3(base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3(const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i8mf8x3(rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3(const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i8mf4x3(base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3(const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i8mf4x3(rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3(const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i8mf2x3(base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3(const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i8mf2x3(rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei8_v_i8m1x3(const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i8m1x3(base, bindex, vl); +vint8m1x3_t test_vloxseg3ei8_v_i8m1x3(const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i8m1x3(rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei8_v_i8m2x3(const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i8m2x3(base, bindex, vl); +vint8m2x3_t test_vloxseg3ei8_v_i8m2x3(const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i8m2x3(rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3(const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i16mf4x3(base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3(const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i16mf4x3(rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3(const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i16mf2x3(base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3(const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i16mf2x3(rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei8_v_i16m1x3(const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i16m1x3(base, bindex, vl); +vint16m1x3_t test_vloxseg3ei8_v_i16m1x3(const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i16m1x3(rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei8_v_i16m2x3(const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i16m2x3(base, bindex, vl); +vint16m2x3_t test_vloxseg3ei8_v_i16m2x3(const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i16m2x3(rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3(const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i32mf2x3(base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3(const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i32mf2x3(rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei8_v_i32m1x3(const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i32m1x3(base, bindex, vl); +vint32m1x3_t test_vloxseg3ei8_v_i32m1x3(const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i32m1x3(rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei8_v_i32m2x3(const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i32m2x3(base, bindex, vl); +vint32m2x3_t test_vloxseg3ei8_v_i32m2x3(const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i32m2x3(rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei8_v_i64m1x3(const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i64m1x3(base, bindex, vl); +vint64m1x3_t test_vloxseg3ei8_v_i64m1x3(const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i64m1x3(rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei8_v_i64m2x3(const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i64m2x3(base, bindex, vl); +vint64m2x3_t test_vloxseg3ei8_v_i64m2x3(const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i64m2x3(rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u8mf8x3(base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3(const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u8mf8x3(rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u8mf4x3(base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3(const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u8mf4x3(rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u8mf2x3(base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3(const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u8mf2x3(rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3(const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u8m1x3(base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3(const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u8m1x3(rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3(const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u8m2x3(base, bindex, vl); +vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3(const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u8m2x3(rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u16mf4x3(base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3(const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u16mf4x3(rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u16mf2x3(base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3(const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u16mf2x3(rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u16m1x3(base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3(const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u16m1x3(rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3(const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u16m2x3(base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3(const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u16m2x3(rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u32mf2x3(base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3(const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u32mf2x3(rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u32m1x3(base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3(const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u32m1x3(rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u32m2x3(base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3(const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u32m2x3(rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u64m1x3(base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3(const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u64m1x3(rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u64m2x3(base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3(const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u64m2x3(rs1, rs2, vl); } -vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_m(vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f16mf4x3_m(mask, base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_m(vbool64_t vm, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f16mf4x3_m(vm, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_m(vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f16mf2x3_m(mask, base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_m(vbool32_t vm, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f16mf2x3_m(vm, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_m(vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f16m1x3_m(mask, base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_m(vbool16_t vm, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f16m1x3_m(vm, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_m(vbool8_t mask, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f16m2x3_m(mask, base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_m(vbool8_t vm, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f16m2x3_m(vm, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_m(vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f32mf2x3_m(mask, base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_m(vbool64_t vm, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f32mf2x3_m(vm, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_m(vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f32m1x3_m(mask, base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_m(vbool32_t vm, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f32m1x3_m(vm, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_m(vbool16_t mask, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f32m2x3_m(mask, base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_m(vbool16_t vm, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f32m2x3_m(vm, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_m(vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f64m1x3_m(mask, base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_m(vbool64_t vm, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f64m1x3_m(vm, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_m(vbool32_t mask, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f64m2x3_m(mask, base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_m(vbool32_t vm, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f64m2x3_m(vm, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i8mf8x3_m(mask, base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_m(vbool64_t vm, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i8mf8x3_m(vm, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i8mf4x3_m(mask, base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_m(vbool32_t vm, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i8mf4x3_m(vm, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i8mf2x3_m(mask, base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_m(vbool16_t vm, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i8mf2x3_m(vm, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i8m1x3_m(mask, base, bindex, vl); +vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_m(vbool8_t vm, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i8m1x3_m(vm, rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i8m2x3_m(mask, base, bindex, vl); +vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_m(vbool4_t vm, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i8m2x3_m(vm, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i16mf4x3_m(mask, base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_m(vbool64_t vm, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i16mf4x3_m(vm, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i16mf2x3_m(mask, base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_m(vbool32_t vm, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i16mf2x3_m(vm, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i16m1x3_m(mask, base, bindex, vl); +vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_m(vbool16_t vm, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i16m1x3_m(vm, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i16m2x3_m(mask, base, bindex, vl); +vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_m(vbool8_t vm, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i16m2x3_m(vm, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i32mf2x3_m(mask, base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_m(vbool64_t vm, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i32mf2x3_m(vm, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i32m1x3_m(mask, base, bindex, vl); +vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_m(vbool32_t vm, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i32m1x3_m(vm, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i32m2x3_m(mask, base, bindex, vl); +vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_m(vbool16_t vm, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i32m2x3_m(vm, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i64m1x3_m(mask, base, bindex, vl); +vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_m(vbool64_t vm, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i64m1x3_m(vm, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i64m2x3_m(mask, base, bindex, vl); +vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_m(vbool32_t vm, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i64m2x3_m(vm, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u8mf8x3_m(mask, base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_m(vbool64_t vm, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u8mf8x3_m(vm, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u8mf4x3_m(mask, base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_m(vbool32_t vm, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u8mf4x3_m(vm, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u8mf2x3_m(mask, base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_m(vbool16_t vm, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u8mf2x3_m(vm, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u8m1x3_m(mask, base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_m(vbool8_t vm, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u8m1x3_m(vm, rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u8m2x3_m(mask, base, bindex, vl); +vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_m(vbool4_t vm, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u8m2x3_m(vm, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u16mf4x3_m(mask, base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_m(vbool64_t vm, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u16mf4x3_m(vm, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u16mf2x3_m(mask, base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_m(vbool32_t vm, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u16mf2x3_m(vm, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u16m1x3_m(mask, base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_m(vbool16_t vm, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u16m1x3_m(vm, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u16m2x3_m(mask, base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_m(vbool8_t vm, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u16m2x3_m(vm, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u32mf2x3_m(mask, base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_m(vbool64_t vm, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u32mf2x3_m(vm, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u32m1x3_m(mask, base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_m(vbool32_t vm, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u32m1x3_m(vm, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u32m2x3_m(mask, base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_m(vbool16_t vm, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u32m2x3_m(vm, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u64m1x3_m(mask, base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_m(vbool64_t vm, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u64m1x3_m(vm, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u64m2x3_m(mask, base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_m(vbool32_t vm, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u64m2x3_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg3ei8\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-api-tests/vloxseg4ei16.c b/auto-generated/gnu-api-tests/vloxseg4ei16.c index 725feb098..e88b520fb 100644 --- a/auto-generated/gnu-api-tests/vloxseg4ei16.c +++ b/auto-generated/gnu-api-tests/vloxseg4ei16.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4(const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f16mf4x4(base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4(const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f16mf4x4(rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4(const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f16mf2x4(base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4(const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f16mf2x4(rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4(const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f16m1x4(base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4(const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f16m1x4(rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4(const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f16m2x4(base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4(const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f16m2x4(rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4(const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f32mf2x4(base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4(const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f32mf2x4(rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4(const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f32m1x4(base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4(const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f32m1x4(rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4(const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f32m2x4(base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4(const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f32m2x4(rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4(const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f64m1x4(base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4(const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f64m1x4(rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4(const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f64m2x4(base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4(const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f64m2x4(rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4(const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i8mf8x4(base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4(const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i8mf8x4(rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4(const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i8mf4x4(base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4(const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i8mf4x4(rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4(const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i8mf2x4(base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4(const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i8mf2x4(rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei16_v_i8m1x4(const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i8m1x4(base, bindex, vl); +vint8m1x4_t test_vloxseg4ei16_v_i8m1x4(const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i8m1x4(rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei16_v_i8m2x4(const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i8m2x4(base, bindex, vl); +vint8m2x4_t test_vloxseg4ei16_v_i8m2x4(const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i8m2x4(rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4(const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i16mf4x4(base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4(const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i16mf4x4(rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4(const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i16mf2x4(base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4(const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i16mf2x4(rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei16_v_i16m1x4(const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i16m1x4(base, bindex, vl); +vint16m1x4_t test_vloxseg4ei16_v_i16m1x4(const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i16m1x4(rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei16_v_i16m2x4(const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i16m2x4(base, bindex, vl); +vint16m2x4_t test_vloxseg4ei16_v_i16m2x4(const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i16m2x4(rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4(const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i32mf2x4(base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4(const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i32mf2x4(rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei16_v_i32m1x4(const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i32m1x4(base, bindex, vl); +vint32m1x4_t test_vloxseg4ei16_v_i32m1x4(const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i32m1x4(rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei16_v_i32m2x4(const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i32m2x4(base, bindex, vl); +vint32m2x4_t test_vloxseg4ei16_v_i32m2x4(const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i32m2x4(rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei16_v_i64m1x4(const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i64m1x4(base, bindex, vl); +vint64m1x4_t test_vloxseg4ei16_v_i64m1x4(const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i64m1x4(rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei16_v_i64m2x4(const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i64m2x4(base, bindex, vl); +vint64m2x4_t test_vloxseg4ei16_v_i64m2x4(const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i64m2x4(rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u8mf8x4(base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4(const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u8mf8x4(rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u8mf4x4(base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4(const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u8mf4x4(rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4(const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u8mf2x4(base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4(const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u8mf2x4(rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4(const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u8m1x4(base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4(const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u8m1x4(rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4(const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u8m2x4(base, bindex, vl); +vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4(const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u8m2x4(rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u16mf4x4(base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4(const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u16mf4x4(rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u16mf2x4(base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4(const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u16mf2x4(rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4(const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u16m1x4(base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4(const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u16m1x4(rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4(const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u16m2x4(base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4(const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u16m2x4(rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u32mf2x4(base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4(const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u32mf2x4(rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u32m1x4(base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4(const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u32m1x4(rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4(const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u32m2x4(base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4(const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u32m2x4(rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u64m1x4(base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4(const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u64m1x4(rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u64m2x4(base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4(const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u64m2x4(rs1, rs2, vl); } -vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_m(vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f16mf4x4_m(mask, base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_m(vbool64_t vm, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f16mf4x4_m(vm, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_m(vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f16mf2x4_m(mask, base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_m(vbool32_t vm, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f16mf2x4_m(vm, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_m(vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f16m1x4_m(mask, base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_m(vbool16_t vm, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f16m1x4_m(vm, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_m(vbool8_t mask, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f16m2x4_m(mask, base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_m(vbool8_t vm, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f16m2x4_m(vm, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_m(vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f32mf2x4_m(mask, base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_m(vbool64_t vm, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f32mf2x4_m(vm, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_m(vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f32m1x4_m(mask, base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_m(vbool32_t vm, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f32m1x4_m(vm, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_m(vbool16_t mask, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f32m2x4_m(mask, base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_m(vbool16_t vm, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f32m2x4_m(vm, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_m(vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f64m1x4_m(mask, base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_m(vbool64_t vm, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f64m1x4_m(vm, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_m(vbool32_t mask, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f64m2x4_m(mask, base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_m(vbool32_t vm, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f64m2x4_m(vm, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i8mf8x4_m(mask, base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_m(vbool64_t vm, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i8mf8x4_m(vm, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i8mf4x4_m(mask, base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_m(vbool32_t vm, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i8mf4x4_m(vm, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i8mf2x4_m(mask, base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_m(vbool16_t vm, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i8mf2x4_m(vm, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i8m1x4_m(mask, base, bindex, vl); +vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_m(vbool8_t vm, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i8m1x4_m(vm, rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i8m2x4_m(mask, base, bindex, vl); +vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_m(vbool4_t vm, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i8m2x4_m(vm, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i16mf4x4_m(mask, base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_m(vbool64_t vm, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i16mf4x4_m(vm, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i16mf2x4_m(mask, base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_m(vbool32_t vm, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i16mf2x4_m(vm, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i16m1x4_m(mask, base, bindex, vl); +vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_m(vbool16_t vm, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i16m1x4_m(vm, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i16m2x4_m(mask, base, bindex, vl); +vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_m(vbool8_t vm, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i16m2x4_m(vm, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i32mf2x4_m(mask, base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_m(vbool64_t vm, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i32mf2x4_m(vm, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i32m1x4_m(mask, base, bindex, vl); +vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_m(vbool32_t vm, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i32m1x4_m(vm, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i32m2x4_m(mask, base, bindex, vl); +vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_m(vbool16_t vm, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i32m2x4_m(vm, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i64m1x4_m(mask, base, bindex, vl); +vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_m(vbool64_t vm, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i64m1x4_m(vm, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i64m2x4_m(mask, base, bindex, vl); +vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_m(vbool32_t vm, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i64m2x4_m(vm, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u8mf8x4_m(mask, base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_m(vbool64_t vm, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u8mf8x4_m(vm, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u8mf4x4_m(mask, base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_m(vbool32_t vm, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u8mf4x4_m(vm, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u8mf2x4_m(mask, base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_m(vbool16_t vm, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u8mf2x4_m(vm, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u8m1x4_m(mask, base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_m(vbool8_t vm, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u8m1x4_m(vm, rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u8m2x4_m(mask, base, bindex, vl); +vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_m(vbool4_t vm, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u8m2x4_m(vm, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u16mf4x4_m(mask, base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_m(vbool64_t vm, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u16mf4x4_m(vm, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u16mf2x4_m(mask, base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_m(vbool32_t vm, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u16mf2x4_m(vm, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u16m1x4_m(mask, base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_m(vbool16_t vm, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u16m1x4_m(vm, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u16m2x4_m(mask, base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_m(vbool8_t vm, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u16m2x4_m(vm, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u32mf2x4_m(mask, base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_m(vbool64_t vm, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u32mf2x4_m(vm, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u32m1x4_m(mask, base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_m(vbool32_t vm, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u32m1x4_m(vm, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u32m2x4_m(mask, base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_m(vbool16_t vm, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u32m2x4_m(vm, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u64m1x4_m(mask, base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_m(vbool64_t vm, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u64m1x4_m(vm, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u64m2x4_m(mask, base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_m(vbool32_t vm, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u64m2x4_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg4ei16\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-api-tests/vloxseg4ei32.c b/auto-generated/gnu-api-tests/vloxseg4ei32.c index e9066dc49..28932c980 100644 --- a/auto-generated/gnu-api-tests/vloxseg4ei32.c +++ b/auto-generated/gnu-api-tests/vloxseg4ei32.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4(const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f16mf4x4(base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4(const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f16mf4x4(rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4(const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f16mf2x4(base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4(const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f16mf2x4(rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4(const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f16m1x4(base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4(const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f16m1x4(rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4(const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f16m2x4(base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4(const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f16m2x4(rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4(const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f32mf2x4(base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4(const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f32mf2x4(rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4(const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f32m1x4(base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4(const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f32m1x4(rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4(const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f32m2x4(base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4(const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f32m2x4(rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4(const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f64m1x4(base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4(const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f64m1x4(rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4(const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f64m2x4(base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4(const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f64m2x4(rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4(const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i8mf8x4(base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4(const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i8mf8x4(rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4(const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i8mf4x4(base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4(const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i8mf4x4(rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4(const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i8mf2x4(base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4(const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i8mf2x4(rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei32_v_i8m1x4(const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i8m1x4(base, bindex, vl); +vint8m1x4_t test_vloxseg4ei32_v_i8m1x4(const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i8m1x4(rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei32_v_i8m2x4(const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i8m2x4(base, bindex, vl); +vint8m2x4_t test_vloxseg4ei32_v_i8m2x4(const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i8m2x4(rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4(const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i16mf4x4(base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4(const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i16mf4x4(rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4(const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i16mf2x4(base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4(const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i16mf2x4(rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei32_v_i16m1x4(const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i16m1x4(base, bindex, vl); +vint16m1x4_t test_vloxseg4ei32_v_i16m1x4(const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i16m1x4(rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei32_v_i16m2x4(const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i16m2x4(base, bindex, vl); +vint16m2x4_t test_vloxseg4ei32_v_i16m2x4(const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i16m2x4(rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4(const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i32mf2x4(base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4(const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i32mf2x4(rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei32_v_i32m1x4(const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i32m1x4(base, bindex, vl); +vint32m1x4_t test_vloxseg4ei32_v_i32m1x4(const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i32m1x4(rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei32_v_i32m2x4(const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i32m2x4(base, bindex, vl); +vint32m2x4_t test_vloxseg4ei32_v_i32m2x4(const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i32m2x4(rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei32_v_i64m1x4(const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i64m1x4(base, bindex, vl); +vint64m1x4_t test_vloxseg4ei32_v_i64m1x4(const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i64m1x4(rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei32_v_i64m2x4(const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i64m2x4(base, bindex, vl); +vint64m2x4_t test_vloxseg4ei32_v_i64m2x4(const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i64m2x4(rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u8mf8x4(base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4(const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u8mf8x4(rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4(const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u8mf4x4(base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4(const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u8mf4x4(rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4(const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u8mf2x4(base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4(const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u8mf2x4(rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4(const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u8m1x4(base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4(const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u8m1x4(rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4(const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u8m2x4(base, bindex, vl); +vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4(const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u8m2x4(rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u16mf4x4(base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4(const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u16mf4x4(rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4(const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u16mf2x4(base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4(const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u16mf2x4(rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4(const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u16m1x4(base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4(const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u16m1x4(rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4(const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u16m2x4(base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4(const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u16m2x4(rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u32mf2x4(base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4(const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u32mf2x4(rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4(const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u32m1x4(base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4(const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u32m1x4(rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4(const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u32m2x4(base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4(const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u32m2x4(rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u64m1x4(base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4(const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u64m1x4(rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4(const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u64m2x4(base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4(const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u64m2x4(rs1, rs2, vl); } -vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_m(vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f16mf4x4_m(mask, base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_m(vbool64_t vm, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f16mf4x4_m(vm, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_m(vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f16mf2x4_m(mask, base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_m(vbool32_t vm, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f16mf2x4_m(vm, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_m(vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f16m1x4_m(mask, base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_m(vbool16_t vm, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f16m1x4_m(vm, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_m(vbool8_t mask, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f16m2x4_m(mask, base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_m(vbool8_t vm, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f16m2x4_m(vm, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_m(vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f32mf2x4_m(mask, base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_m(vbool64_t vm, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f32mf2x4_m(vm, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_m(vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f32m1x4_m(mask, base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_m(vbool32_t vm, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f32m1x4_m(vm, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_m(vbool16_t mask, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f32m2x4_m(mask, base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_m(vbool16_t vm, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f32m2x4_m(vm, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_m(vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f64m1x4_m(mask, base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_m(vbool64_t vm, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f64m1x4_m(vm, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_m(vbool32_t mask, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f64m2x4_m(mask, base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_m(vbool32_t vm, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f64m2x4_m(vm, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i8mf8x4_m(mask, base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_m(vbool64_t vm, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i8mf8x4_m(vm, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i8mf4x4_m(mask, base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_m(vbool32_t vm, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i8mf4x4_m(vm, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i8mf2x4_m(mask, base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_m(vbool16_t vm, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i8mf2x4_m(vm, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i8m1x4_m(mask, base, bindex, vl); +vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_m(vbool8_t vm, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i8m1x4_m(vm, rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i8m2x4_m(mask, base, bindex, vl); +vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_m(vbool4_t vm, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i8m2x4_m(vm, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i16mf4x4_m(mask, base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_m(vbool64_t vm, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i16mf4x4_m(vm, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i16mf2x4_m(mask, base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_m(vbool32_t vm, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i16mf2x4_m(vm, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i16m1x4_m(mask, base, bindex, vl); +vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_m(vbool16_t vm, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i16m1x4_m(vm, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i16m2x4_m(mask, base, bindex, vl); +vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_m(vbool8_t vm, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i16m2x4_m(vm, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i32mf2x4_m(mask, base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_m(vbool64_t vm, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i32mf2x4_m(vm, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i32m1x4_m(mask, base, bindex, vl); +vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_m(vbool32_t vm, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i32m1x4_m(vm, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i32m2x4_m(mask, base, bindex, vl); +vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_m(vbool16_t vm, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i32m2x4_m(vm, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i64m1x4_m(mask, base, bindex, vl); +vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_m(vbool64_t vm, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i64m1x4_m(vm, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i64m2x4_m(mask, base, bindex, vl); +vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_m(vbool32_t vm, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i64m2x4_m(vm, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u8mf8x4_m(mask, base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_m(vbool64_t vm, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u8mf8x4_m(vm, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u8mf4x4_m(mask, base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_m(vbool32_t vm, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u8mf4x4_m(vm, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u8mf2x4_m(mask, base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_m(vbool16_t vm, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u8mf2x4_m(vm, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u8m1x4_m(mask, base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_m(vbool8_t vm, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u8m1x4_m(vm, rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u8m2x4_m(mask, base, bindex, vl); +vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_m(vbool4_t vm, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u8m2x4_m(vm, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u16mf4x4_m(mask, base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_m(vbool64_t vm, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u16mf4x4_m(vm, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u16mf2x4_m(mask, base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_m(vbool32_t vm, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u16mf2x4_m(vm, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u16m1x4_m(mask, base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_m(vbool16_t vm, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u16m1x4_m(vm, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u16m2x4_m(mask, base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_m(vbool8_t vm, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u16m2x4_m(vm, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u32mf2x4_m(mask, base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_m(vbool64_t vm, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u32mf2x4_m(vm, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u32m1x4_m(mask, base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_m(vbool32_t vm, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u32m1x4_m(vm, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u32m2x4_m(mask, base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_m(vbool16_t vm, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u32m2x4_m(vm, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u64m1x4_m(mask, base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_m(vbool64_t vm, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u64m1x4_m(vm, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u64m2x4_m(mask, base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_m(vbool32_t vm, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u64m2x4_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg4ei32\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-api-tests/vloxseg4ei64.c b/auto-generated/gnu-api-tests/vloxseg4ei64.c index 2dcbf5ff3..1e4579ac0 100644 --- a/auto-generated/gnu-api-tests/vloxseg4ei64.c +++ b/auto-generated/gnu-api-tests/vloxseg4ei64.c @@ -6,284 +6,284 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4(const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f16mf4x4(base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4(const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f16mf4x4(rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4(const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f16mf2x4(base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4(const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f16mf2x4(rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4(const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f16m1x4(base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4(const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f16m1x4(rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4(const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f16m2x4(base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4(const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f16m2x4(rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4(const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f32mf2x4(base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4(const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f32mf2x4(rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4(const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f32m1x4(base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4(const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f32m1x4(rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4(const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f32m2x4(base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4(const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f32m2x4(rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4(const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f64m1x4(base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4(const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f64m1x4(rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4(const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f64m2x4(base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4(const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f64m2x4(rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4(const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i8mf8x4(base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4(const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i8mf8x4(rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4(const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i8mf4x4(base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4(const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i8mf4x4(rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4(const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i8mf2x4(base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4(const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i8mf2x4(rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei64_v_i8m1x4(const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i8m1x4(base, bindex, vl); +vint8m1x4_t test_vloxseg4ei64_v_i8m1x4(const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i8m1x4(rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4(const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i16mf4x4(base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4(const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i16mf4x4(rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4(const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i16mf2x4(base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4(const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i16mf2x4(rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei64_v_i16m1x4(const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i16m1x4(base, bindex, vl); +vint16m1x4_t test_vloxseg4ei64_v_i16m1x4(const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i16m1x4(rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei64_v_i16m2x4(const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i16m2x4(base, bindex, vl); +vint16m2x4_t test_vloxseg4ei64_v_i16m2x4(const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i16m2x4(rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4(const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i32mf2x4(base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4(const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i32mf2x4(rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei64_v_i32m1x4(const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i32m1x4(base, bindex, vl); +vint32m1x4_t test_vloxseg4ei64_v_i32m1x4(const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i32m1x4(rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei64_v_i32m2x4(const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i32m2x4(base, bindex, vl); +vint32m2x4_t test_vloxseg4ei64_v_i32m2x4(const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i32m2x4(rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei64_v_i64m1x4(const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i64m1x4(base, bindex, vl); +vint64m1x4_t test_vloxseg4ei64_v_i64m1x4(const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i64m1x4(rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei64_v_i64m2x4(const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i64m2x4(base, bindex, vl); +vint64m2x4_t test_vloxseg4ei64_v_i64m2x4(const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i64m2x4(rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4(const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u8mf8x4(base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4(const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u8mf8x4(rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4(const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u8mf4x4(base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4(const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u8mf4x4(rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4(const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u8mf2x4(base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4(const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u8mf2x4(rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4(const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u8m1x4(base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4(const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u8m1x4(rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4(const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u16mf4x4(base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4(const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u16mf4x4(rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4(const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u16mf2x4(base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4(const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u16mf2x4(rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4(const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u16m1x4(base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4(const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u16m1x4(rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4(const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u16m2x4(base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4(const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u16m2x4(rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4(const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u32mf2x4(base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4(const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u32mf2x4(rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4(const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u32m1x4(base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4(const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u32m1x4(rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4(const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u32m2x4(base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4(const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u32m2x4(rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4(const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u64m1x4(base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4(const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u64m1x4(rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4(const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u64m2x4(base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4(const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u64m2x4(rs1, rs2, vl); } -vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_m(vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f16mf4x4_m(mask, base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_m(vbool64_t vm, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f16mf4x4_m(vm, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_m(vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f16mf2x4_m(mask, base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_m(vbool32_t vm, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f16mf2x4_m(vm, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_m(vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f16m1x4_m(mask, base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_m(vbool16_t vm, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f16m1x4_m(vm, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_m(vbool8_t mask, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f16m2x4_m(mask, base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_m(vbool8_t vm, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f16m2x4_m(vm, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_m(vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f32mf2x4_m(mask, base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_m(vbool64_t vm, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f32mf2x4_m(vm, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_m(vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f32m1x4_m(mask, base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_m(vbool32_t vm, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f32m1x4_m(vm, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_m(vbool16_t mask, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f32m2x4_m(mask, base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_m(vbool16_t vm, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f32m2x4_m(vm, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_m(vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f64m1x4_m(mask, base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_m(vbool64_t vm, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f64m1x4_m(vm, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_m(vbool32_t mask, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f64m2x4_m(mask, base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_m(vbool32_t vm, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f64m2x4_m(vm, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i8mf8x4_m(mask, base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_m(vbool64_t vm, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i8mf8x4_m(vm, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i8mf4x4_m(mask, base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_m(vbool32_t vm, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i8mf4x4_m(vm, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i8mf2x4_m(mask, base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_m(vbool16_t vm, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i8mf2x4_m(vm, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i8m1x4_m(mask, base, bindex, vl); +vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_m(vbool8_t vm, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i8m1x4_m(vm, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i16mf4x4_m(mask, base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_m(vbool64_t vm, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i16mf4x4_m(vm, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i16mf2x4_m(mask, base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_m(vbool32_t vm, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i16mf2x4_m(vm, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i16m1x4_m(mask, base, bindex, vl); +vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_m(vbool16_t vm, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i16m1x4_m(vm, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i16m2x4_m(mask, base, bindex, vl); +vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_m(vbool8_t vm, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i16m2x4_m(vm, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i32mf2x4_m(mask, base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_m(vbool64_t vm, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i32mf2x4_m(vm, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i32m1x4_m(mask, base, bindex, vl); +vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_m(vbool32_t vm, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i32m1x4_m(vm, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i32m2x4_m(mask, base, bindex, vl); +vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_m(vbool16_t vm, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i32m2x4_m(vm, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i64m1x4_m(mask, base, bindex, vl); +vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_m(vbool64_t vm, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i64m1x4_m(vm, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i64m2x4_m(mask, base, bindex, vl); +vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_m(vbool32_t vm, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i64m2x4_m(vm, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u8mf8x4_m(mask, base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_m(vbool64_t vm, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u8mf8x4_m(vm, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u8mf4x4_m(mask, base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_m(vbool32_t vm, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u8mf4x4_m(vm, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u8mf2x4_m(mask, base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_m(vbool16_t vm, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u8mf2x4_m(vm, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u8m1x4_m(mask, base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_m(vbool8_t vm, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u8m1x4_m(vm, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u16mf4x4_m(mask, base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_m(vbool64_t vm, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u16mf4x4_m(vm, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u16mf2x4_m(mask, base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_m(vbool32_t vm, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u16mf2x4_m(vm, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u16m1x4_m(mask, base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_m(vbool16_t vm, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u16m1x4_m(vm, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u16m2x4_m(mask, base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_m(vbool8_t vm, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u16m2x4_m(vm, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u32mf2x4_m(mask, base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_m(vbool64_t vm, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u32mf2x4_m(vm, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u32m1x4_m(mask, base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_m(vbool32_t vm, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u32m1x4_m(vm, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u32m2x4_m(mask, base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_m(vbool16_t vm, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u32m2x4_m(vm, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u64m1x4_m(mask, base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_m(vbool64_t vm, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u64m1x4_m(vm, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u64m2x4_m(mask, base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_m(vbool32_t vm, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u64m2x4_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg4ei64\.[ivxfswum.]+\s+} 70 } } */ diff --git a/auto-generated/gnu-api-tests/vloxseg4ei8.c b/auto-generated/gnu-api-tests/vloxseg4ei8.c index 5d16b40f5..7a6743ea2 100644 --- a/auto-generated/gnu-api-tests/vloxseg4ei8.c +++ b/auto-generated/gnu-api-tests/vloxseg4ei8.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4(const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f16mf4x4(base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4(const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f16mf4x4(rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4(const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f16mf2x4(base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4(const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f16mf2x4(rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4(const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f16m1x4(base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4(const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f16m1x4(rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4(const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f16m2x4(base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4(const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f16m2x4(rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4(const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f32mf2x4(base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4(const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f32mf2x4(rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4(const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f32m1x4(base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4(const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f32m1x4(rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4(const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f32m2x4(base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4(const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f32m2x4(rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4(const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f64m1x4(base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4(const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f64m1x4(rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4(const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f64m2x4(base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4(const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f64m2x4(rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4(const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i8mf8x4(base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4(const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i8mf8x4(rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4(const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i8mf4x4(base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4(const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i8mf4x4(rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4(const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i8mf2x4(base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4(const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i8mf2x4(rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei8_v_i8m1x4(const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i8m1x4(base, bindex, vl); +vint8m1x4_t test_vloxseg4ei8_v_i8m1x4(const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i8m1x4(rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei8_v_i8m2x4(const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i8m2x4(base, bindex, vl); +vint8m2x4_t test_vloxseg4ei8_v_i8m2x4(const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i8m2x4(rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4(const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i16mf4x4(base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4(const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i16mf4x4(rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4(const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i16mf2x4(base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4(const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i16mf2x4(rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei8_v_i16m1x4(const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i16m1x4(base, bindex, vl); +vint16m1x4_t test_vloxseg4ei8_v_i16m1x4(const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i16m1x4(rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei8_v_i16m2x4(const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i16m2x4(base, bindex, vl); +vint16m2x4_t test_vloxseg4ei8_v_i16m2x4(const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i16m2x4(rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4(const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i32mf2x4(base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4(const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i32mf2x4(rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei8_v_i32m1x4(const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i32m1x4(base, bindex, vl); +vint32m1x4_t test_vloxseg4ei8_v_i32m1x4(const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i32m1x4(rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei8_v_i32m2x4(const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i32m2x4(base, bindex, vl); +vint32m2x4_t test_vloxseg4ei8_v_i32m2x4(const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i32m2x4(rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei8_v_i64m1x4(const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i64m1x4(base, bindex, vl); +vint64m1x4_t test_vloxseg4ei8_v_i64m1x4(const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i64m1x4(rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei8_v_i64m2x4(const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i64m2x4(base, bindex, vl); +vint64m2x4_t test_vloxseg4ei8_v_i64m2x4(const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i64m2x4(rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u8mf8x4(base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4(const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u8mf8x4(rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u8mf4x4(base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4(const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u8mf4x4(rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u8mf2x4(base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4(const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u8mf2x4(rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4(const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u8m1x4(base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4(const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u8m1x4(rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4(const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u8m2x4(base, bindex, vl); +vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4(const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u8m2x4(rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u16mf4x4(base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4(const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u16mf4x4(rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u16mf2x4(base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4(const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u16mf2x4(rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u16m1x4(base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4(const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u16m1x4(rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4(const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u16m2x4(base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4(const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u16m2x4(rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u32mf2x4(base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4(const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u32mf2x4(rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u32m1x4(base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4(const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u32m1x4(rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u32m2x4(base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4(const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u32m2x4(rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u64m1x4(base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4(const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u64m1x4(rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u64m2x4(base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4(const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u64m2x4(rs1, rs2, vl); } -vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_m(vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f16mf4x4_m(mask, base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_m(vbool64_t vm, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f16mf4x4_m(vm, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_m(vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f16mf2x4_m(mask, base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_m(vbool32_t vm, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f16mf2x4_m(vm, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_m(vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f16m1x4_m(mask, base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_m(vbool16_t vm, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f16m1x4_m(vm, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_m(vbool8_t mask, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f16m2x4_m(mask, base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_m(vbool8_t vm, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f16m2x4_m(vm, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_m(vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f32mf2x4_m(mask, base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_m(vbool64_t vm, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f32mf2x4_m(vm, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_m(vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f32m1x4_m(mask, base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_m(vbool32_t vm, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f32m1x4_m(vm, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_m(vbool16_t mask, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f32m2x4_m(mask, base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_m(vbool16_t vm, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f32m2x4_m(vm, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_m(vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f64m1x4_m(mask, base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_m(vbool64_t vm, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f64m1x4_m(vm, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_m(vbool32_t mask, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f64m2x4_m(mask, base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_m(vbool32_t vm, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f64m2x4_m(vm, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i8mf8x4_m(mask, base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_m(vbool64_t vm, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i8mf8x4_m(vm, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i8mf4x4_m(mask, base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_m(vbool32_t vm, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i8mf4x4_m(vm, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i8mf2x4_m(mask, base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_m(vbool16_t vm, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i8mf2x4_m(vm, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i8m1x4_m(mask, base, bindex, vl); +vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_m(vbool8_t vm, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i8m1x4_m(vm, rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i8m2x4_m(mask, base, bindex, vl); +vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_m(vbool4_t vm, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i8m2x4_m(vm, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i16mf4x4_m(mask, base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_m(vbool64_t vm, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i16mf4x4_m(vm, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i16mf2x4_m(mask, base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_m(vbool32_t vm, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i16mf2x4_m(vm, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i16m1x4_m(mask, base, bindex, vl); +vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_m(vbool16_t vm, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i16m1x4_m(vm, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i16m2x4_m(mask, base, bindex, vl); +vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_m(vbool8_t vm, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i16m2x4_m(vm, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i32mf2x4_m(mask, base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_m(vbool64_t vm, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i32mf2x4_m(vm, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i32m1x4_m(mask, base, bindex, vl); +vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_m(vbool32_t vm, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i32m1x4_m(vm, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i32m2x4_m(mask, base, bindex, vl); +vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_m(vbool16_t vm, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i32m2x4_m(vm, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i64m1x4_m(mask, base, bindex, vl); +vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_m(vbool64_t vm, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i64m1x4_m(vm, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i64m2x4_m(mask, base, bindex, vl); +vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_m(vbool32_t vm, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i64m2x4_m(vm, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u8mf8x4_m(mask, base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_m(vbool64_t vm, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u8mf8x4_m(vm, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u8mf4x4_m(mask, base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_m(vbool32_t vm, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u8mf4x4_m(vm, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u8mf2x4_m(mask, base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_m(vbool16_t vm, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u8mf2x4_m(vm, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u8m1x4_m(mask, base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_m(vbool8_t vm, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u8m1x4_m(vm, rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u8m2x4_m(mask, base, bindex, vl); +vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_m(vbool4_t vm, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u8m2x4_m(vm, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u16mf4x4_m(mask, base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_m(vbool64_t vm, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u16mf4x4_m(vm, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u16mf2x4_m(mask, base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_m(vbool32_t vm, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u16mf2x4_m(vm, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u16m1x4_m(mask, base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_m(vbool16_t vm, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u16m1x4_m(vm, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u16m2x4_m(mask, base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_m(vbool8_t vm, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u16m2x4_m(vm, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u32mf2x4_m(mask, base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_m(vbool64_t vm, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u32mf2x4_m(vm, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u32m1x4_m(mask, base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_m(vbool32_t vm, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u32m1x4_m(vm, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u32m2x4_m(mask, base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_m(vbool16_t vm, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u32m2x4_m(vm, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u64m1x4_m(mask, base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_m(vbool64_t vm, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u64m1x4_m(vm, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u64m2x4_m(mask, base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_m(vbool32_t vm, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u64m2x4_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg4ei8\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-api-tests/vloxseg5ei16.c b/auto-generated/gnu-api-tests/vloxseg5ei16.c index 237acf802..5f3c2c78b 100644 --- a/auto-generated/gnu-api-tests/vloxseg5ei16.c +++ b/auto-generated/gnu-api-tests/vloxseg5ei16.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5(const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_f16mf4x5(base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5(const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_f16mf4x5(rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5(const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_f16mf2x5(base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5(const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_f16mf2x5(rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5(const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_f16m1x5(base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5(const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_f16m1x5(rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5(const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_f32mf2x5(base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5(const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_f32mf2x5(rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5(const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_f32m1x5(base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5(const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_f32m1x5(rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5(const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_f64m1x5(base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5(const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_f64m1x5(rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5(const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i8mf8x5(base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5(const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i8mf8x5(rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5(const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i8mf4x5(base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5(const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i8mf4x5(rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5(const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i8mf2x5(base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5(const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i8mf2x5(rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei16_v_i8m1x5(const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i8m1x5(base, bindex, vl); +vint8m1x5_t test_vloxseg5ei16_v_i8m1x5(const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i8m1x5(rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5(const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i16mf4x5(base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5(const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i16mf4x5(rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5(const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i16mf2x5(base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5(const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i16mf2x5(rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei16_v_i16m1x5(const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i16m1x5(base, bindex, vl); +vint16m1x5_t test_vloxseg5ei16_v_i16m1x5(const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i16m1x5(rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5(const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i32mf2x5(base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5(const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i32mf2x5(rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei16_v_i32m1x5(const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i32m1x5(base, bindex, vl); +vint32m1x5_t test_vloxseg5ei16_v_i32m1x5(const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i32m1x5(rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei16_v_i64m1x5(const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i64m1x5(base, bindex, vl); +vint64m1x5_t test_vloxseg5ei16_v_i64m1x5(const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i64m1x5(rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u8mf8x5(base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5(const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u8mf8x5(rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u8mf4x5(base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5(const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u8mf4x5(rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5(const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u8mf2x5(base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5(const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u8mf2x5(rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5(const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u8m1x5(base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5(const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u8m1x5(rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u16mf4x5(base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5(const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u16mf4x5(rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u16mf2x5(base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5(const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u16mf2x5(rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5(const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u16m1x5(base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5(const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u16m1x5(rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u32mf2x5(base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5(const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u32mf2x5(rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u32m1x5(base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5(const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u32m1x5(rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u64m1x5(base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5(const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u64m1x5(rs1, rs2, vl); } -vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_m(vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_f16mf4x5_m(mask, base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_m(vbool64_t vm, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_f16mf4x5_m(vm, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_m(vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_f16mf2x5_m(mask, base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_m(vbool32_t vm, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_f16mf2x5_m(vm, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_m(vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_f16m1x5_m(mask, base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_m(vbool16_t vm, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_f16m1x5_m(vm, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_m(vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_f32mf2x5_m(mask, base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_m(vbool64_t vm, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_f32mf2x5_m(vm, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_m(vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_f32m1x5_m(mask, base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_m(vbool32_t vm, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_f32m1x5_m(vm, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_m(vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_f64m1x5_m(mask, base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_m(vbool64_t vm, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_f64m1x5_m(vm, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i8mf8x5_m(mask, base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_m(vbool64_t vm, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i8mf8x5_m(vm, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i8mf4x5_m(mask, base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_m(vbool32_t vm, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i8mf4x5_m(vm, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i8mf2x5_m(mask, base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_m(vbool16_t vm, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i8mf2x5_m(vm, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i8m1x5_m(mask, base, bindex, vl); +vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_m(vbool8_t vm, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i8m1x5_m(vm, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i16mf4x5_m(mask, base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_m(vbool64_t vm, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i16mf4x5_m(vm, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i16mf2x5_m(mask, base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_m(vbool32_t vm, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i16mf2x5_m(vm, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i16m1x5_m(mask, base, bindex, vl); +vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_m(vbool16_t vm, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i16m1x5_m(vm, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i32mf2x5_m(mask, base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_m(vbool64_t vm, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i32mf2x5_m(vm, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i32m1x5_m(mask, base, bindex, vl); +vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_m(vbool32_t vm, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i32m1x5_m(vm, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i64m1x5_m(mask, base, bindex, vl); +vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_m(vbool64_t vm, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i64m1x5_m(vm, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u8mf8x5_m(mask, base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_m(vbool64_t vm, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u8mf8x5_m(vm, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u8mf4x5_m(mask, base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_m(vbool32_t vm, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u8mf4x5_m(vm, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u8mf2x5_m(mask, base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_m(vbool16_t vm, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u8mf2x5_m(vm, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u8m1x5_m(mask, base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_m(vbool8_t vm, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u8m1x5_m(vm, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u16mf4x5_m(mask, base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_m(vbool64_t vm, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u16mf4x5_m(vm, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u16mf2x5_m(mask, base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_m(vbool32_t vm, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u16mf2x5_m(vm, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u16m1x5_m(mask, base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_m(vbool16_t vm, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u16m1x5_m(vm, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u32mf2x5_m(mask, base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_m(vbool64_t vm, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u32mf2x5_m(vm, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u32m1x5_m(mask, base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_m(vbool32_t vm, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u32m1x5_m(vm, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u64m1x5_m(mask, base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_m(vbool64_t vm, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u64m1x5_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg5ei16\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vloxseg5ei32.c b/auto-generated/gnu-api-tests/vloxseg5ei32.c index bf1bc6e89..a20abf063 100644 --- a/auto-generated/gnu-api-tests/vloxseg5ei32.c +++ b/auto-generated/gnu-api-tests/vloxseg5ei32.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5(const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_f16mf4x5(base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5(const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_f16mf4x5(rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5(const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_f16mf2x5(base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5(const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_f16mf2x5(rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5(const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_f16m1x5(base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5(const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_f16m1x5(rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5(const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_f32mf2x5(base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5(const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_f32mf2x5(rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5(const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_f32m1x5(base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5(const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_f32m1x5(rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5(const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_f64m1x5(base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5(const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_f64m1x5(rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5(const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i8mf8x5(base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5(const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i8mf8x5(rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5(const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i8mf4x5(base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5(const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i8mf4x5(rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5(const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i8mf2x5(base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5(const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i8mf2x5(rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei32_v_i8m1x5(const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i8m1x5(base, bindex, vl); +vint8m1x5_t test_vloxseg5ei32_v_i8m1x5(const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i8m1x5(rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5(const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i16mf4x5(base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5(const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i16mf4x5(rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5(const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i16mf2x5(base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5(const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i16mf2x5(rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei32_v_i16m1x5(const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i16m1x5(base, bindex, vl); +vint16m1x5_t test_vloxseg5ei32_v_i16m1x5(const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i16m1x5(rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5(const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i32mf2x5(base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5(const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i32mf2x5(rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei32_v_i32m1x5(const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i32m1x5(base, bindex, vl); +vint32m1x5_t test_vloxseg5ei32_v_i32m1x5(const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i32m1x5(rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei32_v_i64m1x5(const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i64m1x5(base, bindex, vl); +vint64m1x5_t test_vloxseg5ei32_v_i64m1x5(const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i64m1x5(rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u8mf8x5(base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5(const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u8mf8x5(rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5(const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u8mf4x5(base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5(const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u8mf4x5(rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5(const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u8mf2x5(base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5(const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u8mf2x5(rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5(const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u8m1x5(base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5(const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u8m1x5(rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u16mf4x5(base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5(const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u16mf4x5(rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5(const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u16mf2x5(base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5(const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u16mf2x5(rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5(const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u16m1x5(base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5(const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u16m1x5(rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u32mf2x5(base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5(const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u32mf2x5(rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5(const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u32m1x5(base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5(const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u32m1x5(rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u64m1x5(base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5(const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u64m1x5(rs1, rs2, vl); } -vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_m(vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_f16mf4x5_m(mask, base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_m(vbool64_t vm, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_f16mf4x5_m(vm, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_m(vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_f16mf2x5_m(mask, base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_m(vbool32_t vm, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_f16mf2x5_m(vm, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_m(vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_f16m1x5_m(mask, base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_m(vbool16_t vm, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_f16m1x5_m(vm, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_m(vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_f32mf2x5_m(mask, base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_m(vbool64_t vm, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_f32mf2x5_m(vm, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_m(vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_f32m1x5_m(mask, base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_m(vbool32_t vm, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_f32m1x5_m(vm, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_m(vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_f64m1x5_m(mask, base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_m(vbool64_t vm, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_f64m1x5_m(vm, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i8mf8x5_m(mask, base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_m(vbool64_t vm, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i8mf8x5_m(vm, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i8mf4x5_m(mask, base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_m(vbool32_t vm, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i8mf4x5_m(vm, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i8mf2x5_m(mask, base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_m(vbool16_t vm, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i8mf2x5_m(vm, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i8m1x5_m(mask, base, bindex, vl); +vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_m(vbool8_t vm, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i8m1x5_m(vm, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i16mf4x5_m(mask, base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_m(vbool64_t vm, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i16mf4x5_m(vm, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i16mf2x5_m(mask, base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_m(vbool32_t vm, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i16mf2x5_m(vm, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i16m1x5_m(mask, base, bindex, vl); +vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_m(vbool16_t vm, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i16m1x5_m(vm, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i32mf2x5_m(mask, base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_m(vbool64_t vm, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i32mf2x5_m(vm, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i32m1x5_m(mask, base, bindex, vl); +vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_m(vbool32_t vm, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i32m1x5_m(vm, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i64m1x5_m(mask, base, bindex, vl); +vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_m(vbool64_t vm, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i64m1x5_m(vm, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u8mf8x5_m(mask, base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_m(vbool64_t vm, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u8mf8x5_m(vm, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u8mf4x5_m(mask, base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_m(vbool32_t vm, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u8mf4x5_m(vm, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u8mf2x5_m(mask, base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_m(vbool16_t vm, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u8mf2x5_m(vm, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u8m1x5_m(mask, base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_m(vbool8_t vm, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u8m1x5_m(vm, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u16mf4x5_m(mask, base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_m(vbool64_t vm, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u16mf4x5_m(vm, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u16mf2x5_m(mask, base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_m(vbool32_t vm, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u16mf2x5_m(vm, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u16m1x5_m(mask, base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_m(vbool16_t vm, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u16m1x5_m(vm, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u32mf2x5_m(mask, base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_m(vbool64_t vm, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u32mf2x5_m(vm, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u32m1x5_m(mask, base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_m(vbool32_t vm, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u32m1x5_m(vm, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u64m1x5_m(mask, base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_m(vbool64_t vm, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u64m1x5_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg5ei32\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vloxseg5ei64.c b/auto-generated/gnu-api-tests/vloxseg5ei64.c index 95a853538..9c753ed66 100644 --- a/auto-generated/gnu-api-tests/vloxseg5ei64.c +++ b/auto-generated/gnu-api-tests/vloxseg5ei64.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5(const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_f16mf4x5(base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5(const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_f16mf4x5(rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5(const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_f16mf2x5(base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5(const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_f16mf2x5(rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5(const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_f16m1x5(base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5(const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_f16m1x5(rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5(const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_f32mf2x5(base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5(const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_f32mf2x5(rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5(const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_f32m1x5(base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5(const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_f32m1x5(rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5(const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_f64m1x5(base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5(const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_f64m1x5(rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5(const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i8mf8x5(base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5(const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i8mf8x5(rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5(const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i8mf4x5(base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5(const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i8mf4x5(rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5(const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i8mf2x5(base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5(const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i8mf2x5(rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei64_v_i8m1x5(const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i8m1x5(base, bindex, vl); +vint8m1x5_t test_vloxseg5ei64_v_i8m1x5(const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i8m1x5(rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5(const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i16mf4x5(base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5(const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i16mf4x5(rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5(const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i16mf2x5(base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5(const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i16mf2x5(rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei64_v_i16m1x5(const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i16m1x5(base, bindex, vl); +vint16m1x5_t test_vloxseg5ei64_v_i16m1x5(const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i16m1x5(rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5(const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i32mf2x5(base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5(const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i32mf2x5(rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei64_v_i32m1x5(const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i32m1x5(base, bindex, vl); +vint32m1x5_t test_vloxseg5ei64_v_i32m1x5(const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i32m1x5(rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei64_v_i64m1x5(const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i64m1x5(base, bindex, vl); +vint64m1x5_t test_vloxseg5ei64_v_i64m1x5(const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i64m1x5(rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5(const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u8mf8x5(base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5(const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u8mf8x5(rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5(const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u8mf4x5(base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5(const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u8mf4x5(rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5(const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u8mf2x5(base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5(const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u8mf2x5(rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5(const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u8m1x5(base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5(const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u8m1x5(rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5(const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u16mf4x5(base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5(const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u16mf4x5(rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5(const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u16mf2x5(base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5(const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u16mf2x5(rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5(const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u16m1x5(base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5(const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u16m1x5(rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5(const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u32mf2x5(base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5(const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u32mf2x5(rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5(const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u32m1x5(base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5(const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u32m1x5(rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5(const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u64m1x5(base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5(const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u64m1x5(rs1, rs2, vl); } -vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_m(vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_f16mf4x5_m(mask, base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_m(vbool64_t vm, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_f16mf4x5_m(vm, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_m(vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_f16mf2x5_m(mask, base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_m(vbool32_t vm, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_f16mf2x5_m(vm, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_m(vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_f16m1x5_m(mask, base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_m(vbool16_t vm, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_f16m1x5_m(vm, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_m(vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_f32mf2x5_m(mask, base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_m(vbool64_t vm, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_f32mf2x5_m(vm, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_m(vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_f32m1x5_m(mask, base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_m(vbool32_t vm, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_f32m1x5_m(vm, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_m(vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_f64m1x5_m(mask, base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_m(vbool64_t vm, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_f64m1x5_m(vm, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i8mf8x5_m(mask, base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_m(vbool64_t vm, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i8mf8x5_m(vm, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i8mf4x5_m(mask, base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_m(vbool32_t vm, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i8mf4x5_m(vm, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i8mf2x5_m(mask, base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_m(vbool16_t vm, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i8mf2x5_m(vm, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i8m1x5_m(mask, base, bindex, vl); +vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_m(vbool8_t vm, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i8m1x5_m(vm, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i16mf4x5_m(mask, base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_m(vbool64_t vm, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i16mf4x5_m(vm, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i16mf2x5_m(mask, base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_m(vbool32_t vm, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i16mf2x5_m(vm, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i16m1x5_m(mask, base, bindex, vl); +vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_m(vbool16_t vm, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i16m1x5_m(vm, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i32mf2x5_m(mask, base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_m(vbool64_t vm, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i32mf2x5_m(vm, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i32m1x5_m(mask, base, bindex, vl); +vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_m(vbool32_t vm, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i32m1x5_m(vm, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i64m1x5_m(mask, base, bindex, vl); +vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_m(vbool64_t vm, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i64m1x5_m(vm, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u8mf8x5_m(mask, base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_m(vbool64_t vm, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u8mf8x5_m(vm, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u8mf4x5_m(mask, base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_m(vbool32_t vm, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u8mf4x5_m(vm, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u8mf2x5_m(mask, base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_m(vbool16_t vm, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u8mf2x5_m(vm, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u8m1x5_m(mask, base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_m(vbool8_t vm, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u8m1x5_m(vm, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u16mf4x5_m(mask, base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_m(vbool64_t vm, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u16mf4x5_m(vm, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u16mf2x5_m(mask, base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_m(vbool32_t vm, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u16mf2x5_m(vm, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u16m1x5_m(mask, base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_m(vbool16_t vm, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u16m1x5_m(vm, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u32mf2x5_m(mask, base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_m(vbool64_t vm, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u32mf2x5_m(vm, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u32m1x5_m(mask, base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_m(vbool32_t vm, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u32m1x5_m(vm, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u64m1x5_m(mask, base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_m(vbool64_t vm, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u64m1x5_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg5ei64\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vloxseg5ei8.c b/auto-generated/gnu-api-tests/vloxseg5ei8.c index ac6d09c9f..d0d9c651a 100644 --- a/auto-generated/gnu-api-tests/vloxseg5ei8.c +++ b/auto-generated/gnu-api-tests/vloxseg5ei8.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5(const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_f16mf4x5(base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5(const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_f16mf4x5(rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5(const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_f16mf2x5(base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5(const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_f16mf2x5(rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5(const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_f16m1x5(base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5(const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_f16m1x5(rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5(const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_f32mf2x5(base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5(const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_f32mf2x5(rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5(const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_f32m1x5(base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5(const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_f32m1x5(rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5(const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_f64m1x5(base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5(const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_f64m1x5(rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5(const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i8mf8x5(base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5(const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i8mf8x5(rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5(const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i8mf4x5(base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5(const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i8mf4x5(rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5(const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i8mf2x5(base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5(const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i8mf2x5(rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei8_v_i8m1x5(const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i8m1x5(base, bindex, vl); +vint8m1x5_t test_vloxseg5ei8_v_i8m1x5(const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i8m1x5(rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5(const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i16mf4x5(base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5(const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i16mf4x5(rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5(const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i16mf2x5(base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5(const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i16mf2x5(rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei8_v_i16m1x5(const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i16m1x5(base, bindex, vl); +vint16m1x5_t test_vloxseg5ei8_v_i16m1x5(const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i16m1x5(rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5(const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i32mf2x5(base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5(const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i32mf2x5(rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei8_v_i32m1x5(const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i32m1x5(base, bindex, vl); +vint32m1x5_t test_vloxseg5ei8_v_i32m1x5(const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i32m1x5(rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei8_v_i64m1x5(const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i64m1x5(base, bindex, vl); +vint64m1x5_t test_vloxseg5ei8_v_i64m1x5(const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i64m1x5(rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u8mf8x5(base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5(const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u8mf8x5(rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u8mf4x5(base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5(const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u8mf4x5(rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u8mf2x5(base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5(const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u8mf2x5(rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5(const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u8m1x5(base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5(const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u8m1x5(rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u16mf4x5(base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5(const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u16mf4x5(rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u16mf2x5(base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5(const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u16mf2x5(rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u16m1x5(base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5(const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u16m1x5(rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u32mf2x5(base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5(const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u32mf2x5(rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u32m1x5(base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5(const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u32m1x5(rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u64m1x5(base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5(const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u64m1x5(rs1, rs2, vl); } -vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_m(vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_f16mf4x5_m(mask, base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_m(vbool64_t vm, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_f16mf4x5_m(vm, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_m(vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_f16mf2x5_m(mask, base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_m(vbool32_t vm, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_f16mf2x5_m(vm, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_m(vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_f16m1x5_m(mask, base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_m(vbool16_t vm, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_f16m1x5_m(vm, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_m(vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_f32mf2x5_m(mask, base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_m(vbool64_t vm, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_f32mf2x5_m(vm, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_m(vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_f32m1x5_m(mask, base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_m(vbool32_t vm, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_f32m1x5_m(vm, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_m(vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_f64m1x5_m(mask, base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_m(vbool64_t vm, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_f64m1x5_m(vm, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i8mf8x5_m(mask, base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_m(vbool64_t vm, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i8mf8x5_m(vm, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i8mf4x5_m(mask, base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_m(vbool32_t vm, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i8mf4x5_m(vm, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i8mf2x5_m(mask, base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_m(vbool16_t vm, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i8mf2x5_m(vm, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i8m1x5_m(mask, base, bindex, vl); +vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_m(vbool8_t vm, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i8m1x5_m(vm, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i16mf4x5_m(mask, base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_m(vbool64_t vm, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i16mf4x5_m(vm, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i16mf2x5_m(mask, base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_m(vbool32_t vm, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i16mf2x5_m(vm, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i16m1x5_m(mask, base, bindex, vl); +vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_m(vbool16_t vm, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i16m1x5_m(vm, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i32mf2x5_m(mask, base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_m(vbool64_t vm, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i32mf2x5_m(vm, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i32m1x5_m(mask, base, bindex, vl); +vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_m(vbool32_t vm, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i32m1x5_m(vm, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i64m1x5_m(mask, base, bindex, vl); +vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_m(vbool64_t vm, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i64m1x5_m(vm, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u8mf8x5_m(mask, base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_m(vbool64_t vm, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u8mf8x5_m(vm, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u8mf4x5_m(mask, base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_m(vbool32_t vm, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u8mf4x5_m(vm, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u8mf2x5_m(mask, base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_m(vbool16_t vm, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u8mf2x5_m(vm, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u8m1x5_m(mask, base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_m(vbool8_t vm, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u8m1x5_m(vm, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u16mf4x5_m(mask, base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_m(vbool64_t vm, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u16mf4x5_m(vm, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u16mf2x5_m(mask, base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_m(vbool32_t vm, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u16mf2x5_m(vm, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u16m1x5_m(mask, base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_m(vbool16_t vm, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u16m1x5_m(vm, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u32mf2x5_m(mask, base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_m(vbool64_t vm, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u32mf2x5_m(vm, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u32m1x5_m(mask, base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_m(vbool32_t vm, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u32m1x5_m(vm, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u64m1x5_m(mask, base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_m(vbool64_t vm, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u64m1x5_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg5ei8\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vloxseg6ei16.c b/auto-generated/gnu-api-tests/vloxseg6ei16.c index 649513934..606c74241 100644 --- a/auto-generated/gnu-api-tests/vloxseg6ei16.c +++ b/auto-generated/gnu-api-tests/vloxseg6ei16.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6(const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_f16mf4x6(base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6(const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_f16mf4x6(rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6(const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_f16mf2x6(base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6(const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_f16mf2x6(rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6(const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_f16m1x6(base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6(const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_f16m1x6(rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6(const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_f32mf2x6(base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6(const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_f32mf2x6(rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6(const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_f32m1x6(base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6(const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_f32m1x6(rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6(const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_f64m1x6(base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6(const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_f64m1x6(rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6(const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i8mf8x6(base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6(const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i8mf8x6(rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6(const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i8mf4x6(base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6(const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i8mf4x6(rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6(const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i8mf2x6(base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6(const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i8mf2x6(rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei16_v_i8m1x6(const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i8m1x6(base, bindex, vl); +vint8m1x6_t test_vloxseg6ei16_v_i8m1x6(const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i8m1x6(rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6(const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i16mf4x6(base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6(const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i16mf4x6(rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6(const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i16mf2x6(base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6(const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i16mf2x6(rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei16_v_i16m1x6(const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i16m1x6(base, bindex, vl); +vint16m1x6_t test_vloxseg6ei16_v_i16m1x6(const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i16m1x6(rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6(const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i32mf2x6(base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6(const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i32mf2x6(rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei16_v_i32m1x6(const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i32m1x6(base, bindex, vl); +vint32m1x6_t test_vloxseg6ei16_v_i32m1x6(const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i32m1x6(rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei16_v_i64m1x6(const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i64m1x6(base, bindex, vl); +vint64m1x6_t test_vloxseg6ei16_v_i64m1x6(const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i64m1x6(rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u8mf8x6(base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6(const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u8mf8x6(rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u8mf4x6(base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6(const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u8mf4x6(rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6(const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u8mf2x6(base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6(const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u8mf2x6(rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6(const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u8m1x6(base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6(const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u8m1x6(rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u16mf4x6(base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6(const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u16mf4x6(rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u16mf2x6(base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6(const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u16mf2x6(rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6(const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u16m1x6(base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6(const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u16m1x6(rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u32mf2x6(base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6(const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u32mf2x6(rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u32m1x6(base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6(const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u32m1x6(rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u64m1x6(base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6(const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u64m1x6(rs1, rs2, vl); } -vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_m(vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_f16mf4x6_m(mask, base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_m(vbool64_t vm, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_f16mf4x6_m(vm, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_m(vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_f16mf2x6_m(mask, base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_m(vbool32_t vm, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_f16mf2x6_m(vm, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_m(vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_f16m1x6_m(mask, base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_m(vbool16_t vm, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_f16m1x6_m(vm, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_m(vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_f32mf2x6_m(mask, base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_m(vbool64_t vm, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_f32mf2x6_m(vm, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_m(vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_f32m1x6_m(mask, base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_m(vbool32_t vm, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_f32m1x6_m(vm, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_m(vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_f64m1x6_m(mask, base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_m(vbool64_t vm, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_f64m1x6_m(vm, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i8mf8x6_m(mask, base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_m(vbool64_t vm, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i8mf8x6_m(vm, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i8mf4x6_m(mask, base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_m(vbool32_t vm, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i8mf4x6_m(vm, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i8mf2x6_m(mask, base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_m(vbool16_t vm, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i8mf2x6_m(vm, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i8m1x6_m(mask, base, bindex, vl); +vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_m(vbool8_t vm, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i8m1x6_m(vm, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i16mf4x6_m(mask, base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_m(vbool64_t vm, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i16mf4x6_m(vm, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i16mf2x6_m(mask, base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_m(vbool32_t vm, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i16mf2x6_m(vm, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i16m1x6_m(mask, base, bindex, vl); +vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_m(vbool16_t vm, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i16m1x6_m(vm, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i32mf2x6_m(mask, base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_m(vbool64_t vm, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i32mf2x6_m(vm, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i32m1x6_m(mask, base, bindex, vl); +vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_m(vbool32_t vm, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i32m1x6_m(vm, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i64m1x6_m(mask, base, bindex, vl); +vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_m(vbool64_t vm, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i64m1x6_m(vm, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u8mf8x6_m(mask, base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_m(vbool64_t vm, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u8mf8x6_m(vm, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u8mf4x6_m(mask, base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_m(vbool32_t vm, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u8mf4x6_m(vm, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u8mf2x6_m(mask, base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_m(vbool16_t vm, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u8mf2x6_m(vm, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u8m1x6_m(mask, base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_m(vbool8_t vm, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u8m1x6_m(vm, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u16mf4x6_m(mask, base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_m(vbool64_t vm, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u16mf4x6_m(vm, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u16mf2x6_m(mask, base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_m(vbool32_t vm, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u16mf2x6_m(vm, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u16m1x6_m(mask, base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_m(vbool16_t vm, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u16m1x6_m(vm, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u32mf2x6_m(mask, base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_m(vbool64_t vm, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u32mf2x6_m(vm, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u32m1x6_m(mask, base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_m(vbool32_t vm, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u32m1x6_m(vm, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u64m1x6_m(mask, base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_m(vbool64_t vm, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u64m1x6_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg6ei16\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vloxseg6ei32.c b/auto-generated/gnu-api-tests/vloxseg6ei32.c index 7f5512408..d83d0f738 100644 --- a/auto-generated/gnu-api-tests/vloxseg6ei32.c +++ b/auto-generated/gnu-api-tests/vloxseg6ei32.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6(const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_f16mf4x6(base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6(const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_f16mf4x6(rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6(const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_f16mf2x6(base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6(const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_f16mf2x6(rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6(const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_f16m1x6(base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6(const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_f16m1x6(rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6(const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_f32mf2x6(base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6(const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_f32mf2x6(rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6(const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_f32m1x6(base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6(const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_f32m1x6(rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6(const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_f64m1x6(base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6(const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_f64m1x6(rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6(const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i8mf8x6(base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6(const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i8mf8x6(rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6(const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i8mf4x6(base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6(const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i8mf4x6(rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6(const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i8mf2x6(base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6(const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i8mf2x6(rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei32_v_i8m1x6(const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i8m1x6(base, bindex, vl); +vint8m1x6_t test_vloxseg6ei32_v_i8m1x6(const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i8m1x6(rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6(const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i16mf4x6(base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6(const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i16mf4x6(rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6(const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i16mf2x6(base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6(const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i16mf2x6(rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei32_v_i16m1x6(const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i16m1x6(base, bindex, vl); +vint16m1x6_t test_vloxseg6ei32_v_i16m1x6(const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i16m1x6(rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6(const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i32mf2x6(base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6(const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i32mf2x6(rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei32_v_i32m1x6(const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i32m1x6(base, bindex, vl); +vint32m1x6_t test_vloxseg6ei32_v_i32m1x6(const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i32m1x6(rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei32_v_i64m1x6(const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i64m1x6(base, bindex, vl); +vint64m1x6_t test_vloxseg6ei32_v_i64m1x6(const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i64m1x6(rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u8mf8x6(base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6(const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u8mf8x6(rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6(const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u8mf4x6(base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6(const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u8mf4x6(rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6(const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u8mf2x6(base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6(const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u8mf2x6(rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6(const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u8m1x6(base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6(const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u8m1x6(rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u16mf4x6(base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6(const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u16mf4x6(rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6(const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u16mf2x6(base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6(const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u16mf2x6(rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6(const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u16m1x6(base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6(const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u16m1x6(rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u32mf2x6(base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6(const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u32mf2x6(rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6(const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u32m1x6(base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6(const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u32m1x6(rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u64m1x6(base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6(const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u64m1x6(rs1, rs2, vl); } -vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_m(vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_f16mf4x6_m(mask, base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_m(vbool64_t vm, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_f16mf4x6_m(vm, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_m(vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_f16mf2x6_m(mask, base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_m(vbool32_t vm, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_f16mf2x6_m(vm, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_m(vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_f16m1x6_m(mask, base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_m(vbool16_t vm, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_f16m1x6_m(vm, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_m(vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_f32mf2x6_m(mask, base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_m(vbool64_t vm, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_f32mf2x6_m(vm, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_m(vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_f32m1x6_m(mask, base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_m(vbool32_t vm, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_f32m1x6_m(vm, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_m(vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_f64m1x6_m(mask, base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_m(vbool64_t vm, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_f64m1x6_m(vm, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i8mf8x6_m(mask, base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_m(vbool64_t vm, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i8mf8x6_m(vm, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i8mf4x6_m(mask, base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_m(vbool32_t vm, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i8mf4x6_m(vm, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i8mf2x6_m(mask, base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_m(vbool16_t vm, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i8mf2x6_m(vm, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i8m1x6_m(mask, base, bindex, vl); +vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_m(vbool8_t vm, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i8m1x6_m(vm, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i16mf4x6_m(mask, base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_m(vbool64_t vm, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i16mf4x6_m(vm, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i16mf2x6_m(mask, base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_m(vbool32_t vm, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i16mf2x6_m(vm, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i16m1x6_m(mask, base, bindex, vl); +vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_m(vbool16_t vm, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i16m1x6_m(vm, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i32mf2x6_m(mask, base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_m(vbool64_t vm, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i32mf2x6_m(vm, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i32m1x6_m(mask, base, bindex, vl); +vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_m(vbool32_t vm, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i32m1x6_m(vm, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i64m1x6_m(mask, base, bindex, vl); +vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_m(vbool64_t vm, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i64m1x6_m(vm, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u8mf8x6_m(mask, base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_m(vbool64_t vm, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u8mf8x6_m(vm, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u8mf4x6_m(mask, base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_m(vbool32_t vm, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u8mf4x6_m(vm, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u8mf2x6_m(mask, base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_m(vbool16_t vm, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u8mf2x6_m(vm, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u8m1x6_m(mask, base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_m(vbool8_t vm, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u8m1x6_m(vm, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u16mf4x6_m(mask, base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_m(vbool64_t vm, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u16mf4x6_m(vm, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u16mf2x6_m(mask, base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_m(vbool32_t vm, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u16mf2x6_m(vm, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u16m1x6_m(mask, base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_m(vbool16_t vm, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u16m1x6_m(vm, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u32mf2x6_m(mask, base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_m(vbool64_t vm, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u32mf2x6_m(vm, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u32m1x6_m(mask, base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_m(vbool32_t vm, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u32m1x6_m(vm, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u64m1x6_m(mask, base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_m(vbool64_t vm, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u64m1x6_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg6ei32\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vloxseg6ei64.c b/auto-generated/gnu-api-tests/vloxseg6ei64.c index 153735242..58f961cb1 100644 --- a/auto-generated/gnu-api-tests/vloxseg6ei64.c +++ b/auto-generated/gnu-api-tests/vloxseg6ei64.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6(const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_f16mf4x6(base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6(const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_f16mf4x6(rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6(const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_f16mf2x6(base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6(const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_f16mf2x6(rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6(const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_f16m1x6(base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6(const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_f16m1x6(rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6(const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_f32mf2x6(base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6(const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_f32mf2x6(rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6(const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_f32m1x6(base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6(const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_f32m1x6(rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6(const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_f64m1x6(base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6(const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_f64m1x6(rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6(const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i8mf8x6(base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6(const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i8mf8x6(rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6(const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i8mf4x6(base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6(const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i8mf4x6(rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6(const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i8mf2x6(base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6(const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i8mf2x6(rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei64_v_i8m1x6(const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i8m1x6(base, bindex, vl); +vint8m1x6_t test_vloxseg6ei64_v_i8m1x6(const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i8m1x6(rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6(const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i16mf4x6(base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6(const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i16mf4x6(rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6(const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i16mf2x6(base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6(const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i16mf2x6(rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei64_v_i16m1x6(const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i16m1x6(base, bindex, vl); +vint16m1x6_t test_vloxseg6ei64_v_i16m1x6(const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i16m1x6(rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6(const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i32mf2x6(base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6(const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i32mf2x6(rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei64_v_i32m1x6(const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i32m1x6(base, bindex, vl); +vint32m1x6_t test_vloxseg6ei64_v_i32m1x6(const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i32m1x6(rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei64_v_i64m1x6(const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i64m1x6(base, bindex, vl); +vint64m1x6_t test_vloxseg6ei64_v_i64m1x6(const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i64m1x6(rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6(const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u8mf8x6(base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6(const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u8mf8x6(rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6(const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u8mf4x6(base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6(const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u8mf4x6(rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6(const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u8mf2x6(base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6(const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u8mf2x6(rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6(const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u8m1x6(base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6(const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u8m1x6(rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6(const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u16mf4x6(base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6(const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u16mf4x6(rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6(const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u16mf2x6(base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6(const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u16mf2x6(rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6(const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u16m1x6(base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6(const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u16m1x6(rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6(const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u32mf2x6(base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6(const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u32mf2x6(rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6(const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u32m1x6(base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6(const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u32m1x6(rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6(const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u64m1x6(base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6(const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u64m1x6(rs1, rs2, vl); } -vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_m(vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_f16mf4x6_m(mask, base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_m(vbool64_t vm, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_f16mf4x6_m(vm, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_m(vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_f16mf2x6_m(mask, base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_m(vbool32_t vm, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_f16mf2x6_m(vm, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_m(vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_f16m1x6_m(mask, base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_m(vbool16_t vm, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_f16m1x6_m(vm, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_m(vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_f32mf2x6_m(mask, base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_m(vbool64_t vm, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_f32mf2x6_m(vm, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_m(vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_f32m1x6_m(mask, base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_m(vbool32_t vm, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_f32m1x6_m(vm, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_m(vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_f64m1x6_m(mask, base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_m(vbool64_t vm, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_f64m1x6_m(vm, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i8mf8x6_m(mask, base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_m(vbool64_t vm, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i8mf8x6_m(vm, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i8mf4x6_m(mask, base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_m(vbool32_t vm, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i8mf4x6_m(vm, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i8mf2x6_m(mask, base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_m(vbool16_t vm, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i8mf2x6_m(vm, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i8m1x6_m(mask, base, bindex, vl); +vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_m(vbool8_t vm, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i8m1x6_m(vm, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i16mf4x6_m(mask, base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_m(vbool64_t vm, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i16mf4x6_m(vm, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i16mf2x6_m(mask, base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_m(vbool32_t vm, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i16mf2x6_m(vm, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i16m1x6_m(mask, base, bindex, vl); +vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_m(vbool16_t vm, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i16m1x6_m(vm, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i32mf2x6_m(mask, base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_m(vbool64_t vm, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i32mf2x6_m(vm, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i32m1x6_m(mask, base, bindex, vl); +vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_m(vbool32_t vm, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i32m1x6_m(vm, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i64m1x6_m(mask, base, bindex, vl); +vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_m(vbool64_t vm, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i64m1x6_m(vm, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u8mf8x6_m(mask, base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_m(vbool64_t vm, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u8mf8x6_m(vm, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u8mf4x6_m(mask, base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_m(vbool32_t vm, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u8mf4x6_m(vm, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u8mf2x6_m(mask, base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_m(vbool16_t vm, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u8mf2x6_m(vm, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u8m1x6_m(mask, base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_m(vbool8_t vm, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u8m1x6_m(vm, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u16mf4x6_m(mask, base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_m(vbool64_t vm, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u16mf4x6_m(vm, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u16mf2x6_m(mask, base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_m(vbool32_t vm, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u16mf2x6_m(vm, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u16m1x6_m(mask, base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_m(vbool16_t vm, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u16m1x6_m(vm, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u32mf2x6_m(mask, base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_m(vbool64_t vm, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u32mf2x6_m(vm, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u32m1x6_m(mask, base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_m(vbool32_t vm, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u32m1x6_m(vm, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u64m1x6_m(mask, base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_m(vbool64_t vm, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u64m1x6_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg6ei64\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vloxseg6ei8.c b/auto-generated/gnu-api-tests/vloxseg6ei8.c index f6582f3b1..dc2123c9d 100644 --- a/auto-generated/gnu-api-tests/vloxseg6ei8.c +++ b/auto-generated/gnu-api-tests/vloxseg6ei8.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6(const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_f16mf4x6(base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6(const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_f16mf4x6(rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6(const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_f16mf2x6(base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6(const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_f16mf2x6(rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6(const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_f16m1x6(base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6(const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_f16m1x6(rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6(const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_f32mf2x6(base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6(const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_f32mf2x6(rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6(const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_f32m1x6(base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6(const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_f32m1x6(rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6(const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_f64m1x6(base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6(const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_f64m1x6(rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6(const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i8mf8x6(base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6(const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i8mf8x6(rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6(const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i8mf4x6(base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6(const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i8mf4x6(rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6(const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i8mf2x6(base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6(const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i8mf2x6(rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei8_v_i8m1x6(const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i8m1x6(base, bindex, vl); +vint8m1x6_t test_vloxseg6ei8_v_i8m1x6(const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i8m1x6(rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6(const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i16mf4x6(base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6(const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i16mf4x6(rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6(const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i16mf2x6(base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6(const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i16mf2x6(rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei8_v_i16m1x6(const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i16m1x6(base, bindex, vl); +vint16m1x6_t test_vloxseg6ei8_v_i16m1x6(const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i16m1x6(rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6(const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i32mf2x6(base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6(const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i32mf2x6(rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei8_v_i32m1x6(const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i32m1x6(base, bindex, vl); +vint32m1x6_t test_vloxseg6ei8_v_i32m1x6(const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i32m1x6(rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei8_v_i64m1x6(const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i64m1x6(base, bindex, vl); +vint64m1x6_t test_vloxseg6ei8_v_i64m1x6(const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i64m1x6(rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u8mf8x6(base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6(const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u8mf8x6(rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u8mf4x6(base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6(const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u8mf4x6(rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u8mf2x6(base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6(const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u8mf2x6(rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6(const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u8m1x6(base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6(const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u8m1x6(rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u16mf4x6(base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6(const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u16mf4x6(rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u16mf2x6(base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6(const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u16mf2x6(rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u16m1x6(base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6(const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u16m1x6(rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u32mf2x6(base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6(const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u32mf2x6(rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u32m1x6(base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6(const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u32m1x6(rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u64m1x6(base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6(const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u64m1x6(rs1, rs2, vl); } -vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_m(vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_f16mf4x6_m(mask, base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_m(vbool64_t vm, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_f16mf4x6_m(vm, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_m(vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_f16mf2x6_m(mask, base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_m(vbool32_t vm, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_f16mf2x6_m(vm, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_m(vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_f16m1x6_m(mask, base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_m(vbool16_t vm, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_f16m1x6_m(vm, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_m(vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_f32mf2x6_m(mask, base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_m(vbool64_t vm, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_f32mf2x6_m(vm, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_m(vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_f32m1x6_m(mask, base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_m(vbool32_t vm, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_f32m1x6_m(vm, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_m(vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_f64m1x6_m(mask, base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_m(vbool64_t vm, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_f64m1x6_m(vm, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i8mf8x6_m(mask, base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_m(vbool64_t vm, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i8mf8x6_m(vm, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i8mf4x6_m(mask, base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_m(vbool32_t vm, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i8mf4x6_m(vm, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i8mf2x6_m(mask, base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_m(vbool16_t vm, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i8mf2x6_m(vm, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i8m1x6_m(mask, base, bindex, vl); +vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_m(vbool8_t vm, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i8m1x6_m(vm, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i16mf4x6_m(mask, base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_m(vbool64_t vm, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i16mf4x6_m(vm, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i16mf2x6_m(mask, base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_m(vbool32_t vm, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i16mf2x6_m(vm, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i16m1x6_m(mask, base, bindex, vl); +vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_m(vbool16_t vm, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i16m1x6_m(vm, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i32mf2x6_m(mask, base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_m(vbool64_t vm, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i32mf2x6_m(vm, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i32m1x6_m(mask, base, bindex, vl); +vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_m(vbool32_t vm, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i32m1x6_m(vm, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i64m1x6_m(mask, base, bindex, vl); +vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_m(vbool64_t vm, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i64m1x6_m(vm, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u8mf8x6_m(mask, base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_m(vbool64_t vm, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u8mf8x6_m(vm, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u8mf4x6_m(mask, base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_m(vbool32_t vm, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u8mf4x6_m(vm, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u8mf2x6_m(mask, base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_m(vbool16_t vm, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u8mf2x6_m(vm, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u8m1x6_m(mask, base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_m(vbool8_t vm, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u8m1x6_m(vm, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u16mf4x6_m(mask, base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_m(vbool64_t vm, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u16mf4x6_m(vm, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u16mf2x6_m(mask, base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_m(vbool32_t vm, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u16mf2x6_m(vm, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u16m1x6_m(mask, base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_m(vbool16_t vm, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u16m1x6_m(vm, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u32mf2x6_m(mask, base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_m(vbool64_t vm, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u32mf2x6_m(vm, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u32m1x6_m(mask, base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_m(vbool32_t vm, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u32m1x6_m(vm, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u64m1x6_m(mask, base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_m(vbool64_t vm, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u64m1x6_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg6ei8\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vloxseg7ei16.c b/auto-generated/gnu-api-tests/vloxseg7ei16.c index ec6042d0e..b5e7444f3 100644 --- a/auto-generated/gnu-api-tests/vloxseg7ei16.c +++ b/auto-generated/gnu-api-tests/vloxseg7ei16.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7(const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_f16mf4x7(base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7(const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_f16mf4x7(rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7(const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_f16mf2x7(base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7(const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_f16mf2x7(rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7(const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_f16m1x7(base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7(const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_f16m1x7(rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7(const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_f32mf2x7(base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7(const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_f32mf2x7(rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7(const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_f32m1x7(base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7(const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_f32m1x7(rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7(const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_f64m1x7(base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7(const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_f64m1x7(rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7(const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i8mf8x7(base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7(const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i8mf8x7(rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7(const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i8mf4x7(base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7(const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i8mf4x7(rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7(const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i8mf2x7(base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7(const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i8mf2x7(rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei16_v_i8m1x7(const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i8m1x7(base, bindex, vl); +vint8m1x7_t test_vloxseg7ei16_v_i8m1x7(const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i8m1x7(rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7(const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i16mf4x7(base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7(const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i16mf4x7(rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7(const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i16mf2x7(base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7(const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i16mf2x7(rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei16_v_i16m1x7(const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i16m1x7(base, bindex, vl); +vint16m1x7_t test_vloxseg7ei16_v_i16m1x7(const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i16m1x7(rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7(const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i32mf2x7(base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7(const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i32mf2x7(rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei16_v_i32m1x7(const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i32m1x7(base, bindex, vl); +vint32m1x7_t test_vloxseg7ei16_v_i32m1x7(const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i32m1x7(rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei16_v_i64m1x7(const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i64m1x7(base, bindex, vl); +vint64m1x7_t test_vloxseg7ei16_v_i64m1x7(const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i64m1x7(rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u8mf8x7(base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7(const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u8mf8x7(rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u8mf4x7(base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7(const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u8mf4x7(rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7(const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u8mf2x7(base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7(const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u8mf2x7(rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7(const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u8m1x7(base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7(const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u8m1x7(rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u16mf4x7(base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7(const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u16mf4x7(rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u16mf2x7(base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7(const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u16mf2x7(rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7(const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u16m1x7(base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7(const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u16m1x7(rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u32mf2x7(base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7(const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u32mf2x7(rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u32m1x7(base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7(const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u32m1x7(rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u64m1x7(base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7(const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u64m1x7(rs1, rs2, vl); } -vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_m(vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_f16mf4x7_m(mask, base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_m(vbool64_t vm, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_f16mf4x7_m(vm, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_m(vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_f16mf2x7_m(mask, base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_m(vbool32_t vm, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_f16mf2x7_m(vm, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_m(vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_f16m1x7_m(mask, base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_m(vbool16_t vm, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_f16m1x7_m(vm, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_m(vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_f32mf2x7_m(mask, base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_m(vbool64_t vm, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_f32mf2x7_m(vm, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_m(vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_f32m1x7_m(mask, base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_m(vbool32_t vm, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_f32m1x7_m(vm, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_m(vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_f64m1x7_m(mask, base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_m(vbool64_t vm, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_f64m1x7_m(vm, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i8mf8x7_m(mask, base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_m(vbool64_t vm, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i8mf8x7_m(vm, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i8mf4x7_m(mask, base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_m(vbool32_t vm, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i8mf4x7_m(vm, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i8mf2x7_m(mask, base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_m(vbool16_t vm, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i8mf2x7_m(vm, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i8m1x7_m(mask, base, bindex, vl); +vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_m(vbool8_t vm, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i8m1x7_m(vm, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i16mf4x7_m(mask, base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_m(vbool64_t vm, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i16mf4x7_m(vm, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i16mf2x7_m(mask, base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_m(vbool32_t vm, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i16mf2x7_m(vm, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i16m1x7_m(mask, base, bindex, vl); +vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_m(vbool16_t vm, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i16m1x7_m(vm, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i32mf2x7_m(mask, base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_m(vbool64_t vm, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i32mf2x7_m(vm, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i32m1x7_m(mask, base, bindex, vl); +vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_m(vbool32_t vm, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i32m1x7_m(vm, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i64m1x7_m(mask, base, bindex, vl); +vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_m(vbool64_t vm, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i64m1x7_m(vm, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u8mf8x7_m(mask, base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_m(vbool64_t vm, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u8mf8x7_m(vm, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u8mf4x7_m(mask, base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_m(vbool32_t vm, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u8mf4x7_m(vm, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u8mf2x7_m(mask, base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_m(vbool16_t vm, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u8mf2x7_m(vm, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u8m1x7_m(mask, base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_m(vbool8_t vm, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u8m1x7_m(vm, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u16mf4x7_m(mask, base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_m(vbool64_t vm, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u16mf4x7_m(vm, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u16mf2x7_m(mask, base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_m(vbool32_t vm, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u16mf2x7_m(vm, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u16m1x7_m(mask, base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_m(vbool16_t vm, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u16m1x7_m(vm, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u32mf2x7_m(mask, base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_m(vbool64_t vm, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u32mf2x7_m(vm, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u32m1x7_m(mask, base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_m(vbool32_t vm, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u32m1x7_m(vm, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u64m1x7_m(mask, base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_m(vbool64_t vm, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u64m1x7_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg7ei16\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vloxseg7ei32.c b/auto-generated/gnu-api-tests/vloxseg7ei32.c index a1b5f41fd..4fe74c04f 100644 --- a/auto-generated/gnu-api-tests/vloxseg7ei32.c +++ b/auto-generated/gnu-api-tests/vloxseg7ei32.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7(const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_f16mf4x7(base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7(const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_f16mf4x7(rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7(const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_f16mf2x7(base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7(const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_f16mf2x7(rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7(const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_f16m1x7(base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7(const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_f16m1x7(rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7(const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_f32mf2x7(base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7(const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_f32mf2x7(rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7(const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_f32m1x7(base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7(const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_f32m1x7(rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7(const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_f64m1x7(base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7(const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_f64m1x7(rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7(const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i8mf8x7(base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7(const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i8mf8x7(rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7(const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i8mf4x7(base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7(const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i8mf4x7(rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7(const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i8mf2x7(base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7(const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i8mf2x7(rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei32_v_i8m1x7(const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i8m1x7(base, bindex, vl); +vint8m1x7_t test_vloxseg7ei32_v_i8m1x7(const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i8m1x7(rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7(const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i16mf4x7(base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7(const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i16mf4x7(rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7(const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i16mf2x7(base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7(const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i16mf2x7(rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei32_v_i16m1x7(const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i16m1x7(base, bindex, vl); +vint16m1x7_t test_vloxseg7ei32_v_i16m1x7(const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i16m1x7(rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7(const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i32mf2x7(base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7(const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i32mf2x7(rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei32_v_i32m1x7(const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i32m1x7(base, bindex, vl); +vint32m1x7_t test_vloxseg7ei32_v_i32m1x7(const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i32m1x7(rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei32_v_i64m1x7(const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i64m1x7(base, bindex, vl); +vint64m1x7_t test_vloxseg7ei32_v_i64m1x7(const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i64m1x7(rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u8mf8x7(base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7(const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u8mf8x7(rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7(const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u8mf4x7(base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7(const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u8mf4x7(rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7(const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u8mf2x7(base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7(const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u8mf2x7(rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7(const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u8m1x7(base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7(const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u8m1x7(rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u16mf4x7(base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7(const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u16mf4x7(rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7(const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u16mf2x7(base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7(const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u16mf2x7(rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7(const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u16m1x7(base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7(const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u16m1x7(rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u32mf2x7(base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7(const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u32mf2x7(rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7(const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u32m1x7(base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7(const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u32m1x7(rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u64m1x7(base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7(const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u64m1x7(rs1, rs2, vl); } -vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_m(vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_f16mf4x7_m(mask, base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_m(vbool64_t vm, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_f16mf4x7_m(vm, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_m(vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_f16mf2x7_m(mask, base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_m(vbool32_t vm, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_f16mf2x7_m(vm, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_m(vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_f16m1x7_m(mask, base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_m(vbool16_t vm, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_f16m1x7_m(vm, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_m(vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_f32mf2x7_m(mask, base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_m(vbool64_t vm, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_f32mf2x7_m(vm, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_m(vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_f32m1x7_m(mask, base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_m(vbool32_t vm, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_f32m1x7_m(vm, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_m(vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_f64m1x7_m(mask, base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_m(vbool64_t vm, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_f64m1x7_m(vm, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i8mf8x7_m(mask, base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_m(vbool64_t vm, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i8mf8x7_m(vm, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i8mf4x7_m(mask, base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_m(vbool32_t vm, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i8mf4x7_m(vm, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i8mf2x7_m(mask, base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_m(vbool16_t vm, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i8mf2x7_m(vm, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i8m1x7_m(mask, base, bindex, vl); +vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_m(vbool8_t vm, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i8m1x7_m(vm, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i16mf4x7_m(mask, base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_m(vbool64_t vm, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i16mf4x7_m(vm, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i16mf2x7_m(mask, base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_m(vbool32_t vm, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i16mf2x7_m(vm, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i16m1x7_m(mask, base, bindex, vl); +vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_m(vbool16_t vm, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i16m1x7_m(vm, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i32mf2x7_m(mask, base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_m(vbool64_t vm, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i32mf2x7_m(vm, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i32m1x7_m(mask, base, bindex, vl); +vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_m(vbool32_t vm, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i32m1x7_m(vm, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i64m1x7_m(mask, base, bindex, vl); +vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_m(vbool64_t vm, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i64m1x7_m(vm, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u8mf8x7_m(mask, base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_m(vbool64_t vm, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u8mf8x7_m(vm, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u8mf4x7_m(mask, base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_m(vbool32_t vm, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u8mf4x7_m(vm, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u8mf2x7_m(mask, base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_m(vbool16_t vm, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u8mf2x7_m(vm, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u8m1x7_m(mask, base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_m(vbool8_t vm, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u8m1x7_m(vm, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u16mf4x7_m(mask, base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_m(vbool64_t vm, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u16mf4x7_m(vm, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u16mf2x7_m(mask, base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_m(vbool32_t vm, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u16mf2x7_m(vm, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u16m1x7_m(mask, base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_m(vbool16_t vm, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u16m1x7_m(vm, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u32mf2x7_m(mask, base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_m(vbool64_t vm, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u32mf2x7_m(vm, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u32m1x7_m(mask, base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_m(vbool32_t vm, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u32m1x7_m(vm, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u64m1x7_m(mask, base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_m(vbool64_t vm, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u64m1x7_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg7ei32\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vloxseg7ei64.c b/auto-generated/gnu-api-tests/vloxseg7ei64.c index a69a16d46..5a62b63bf 100644 --- a/auto-generated/gnu-api-tests/vloxseg7ei64.c +++ b/auto-generated/gnu-api-tests/vloxseg7ei64.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7(const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_f16mf4x7(base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7(const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_f16mf4x7(rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7(const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_f16mf2x7(base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7(const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_f16mf2x7(rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7(const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_f16m1x7(base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7(const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_f16m1x7(rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7(const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_f32mf2x7(base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7(const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_f32mf2x7(rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7(const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_f32m1x7(base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7(const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_f32m1x7(rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7(const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_f64m1x7(base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7(const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_f64m1x7(rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7(const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i8mf8x7(base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7(const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i8mf8x7(rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7(const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i8mf4x7(base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7(const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i8mf4x7(rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7(const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i8mf2x7(base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7(const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i8mf2x7(rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei64_v_i8m1x7(const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i8m1x7(base, bindex, vl); +vint8m1x7_t test_vloxseg7ei64_v_i8m1x7(const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i8m1x7(rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7(const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i16mf4x7(base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7(const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i16mf4x7(rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7(const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i16mf2x7(base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7(const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i16mf2x7(rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei64_v_i16m1x7(const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i16m1x7(base, bindex, vl); +vint16m1x7_t test_vloxseg7ei64_v_i16m1x7(const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i16m1x7(rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7(const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i32mf2x7(base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7(const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i32mf2x7(rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei64_v_i32m1x7(const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i32m1x7(base, bindex, vl); +vint32m1x7_t test_vloxseg7ei64_v_i32m1x7(const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i32m1x7(rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei64_v_i64m1x7(const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i64m1x7(base, bindex, vl); +vint64m1x7_t test_vloxseg7ei64_v_i64m1x7(const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i64m1x7(rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7(const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u8mf8x7(base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7(const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u8mf8x7(rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7(const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u8mf4x7(base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7(const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u8mf4x7(rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7(const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u8mf2x7(base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7(const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u8mf2x7(rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7(const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u8m1x7(base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7(const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u8m1x7(rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7(const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u16mf4x7(base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7(const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u16mf4x7(rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7(const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u16mf2x7(base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7(const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u16mf2x7(rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7(const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u16m1x7(base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7(const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u16m1x7(rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7(const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u32mf2x7(base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7(const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u32mf2x7(rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7(const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u32m1x7(base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7(const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u32m1x7(rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7(const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u64m1x7(base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7(const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u64m1x7(rs1, rs2, vl); } -vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_m(vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_f16mf4x7_m(mask, base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_m(vbool64_t vm, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_f16mf4x7_m(vm, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_m(vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_f16mf2x7_m(mask, base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_m(vbool32_t vm, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_f16mf2x7_m(vm, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_m(vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_f16m1x7_m(mask, base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_m(vbool16_t vm, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_f16m1x7_m(vm, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_m(vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_f32mf2x7_m(mask, base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_m(vbool64_t vm, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_f32mf2x7_m(vm, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_m(vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_f32m1x7_m(mask, base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_m(vbool32_t vm, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_f32m1x7_m(vm, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_m(vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_f64m1x7_m(mask, base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_m(vbool64_t vm, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_f64m1x7_m(vm, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i8mf8x7_m(mask, base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_m(vbool64_t vm, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i8mf8x7_m(vm, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i8mf4x7_m(mask, base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_m(vbool32_t vm, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i8mf4x7_m(vm, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i8mf2x7_m(mask, base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_m(vbool16_t vm, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i8mf2x7_m(vm, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i8m1x7_m(mask, base, bindex, vl); +vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_m(vbool8_t vm, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i8m1x7_m(vm, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i16mf4x7_m(mask, base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_m(vbool64_t vm, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i16mf4x7_m(vm, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i16mf2x7_m(mask, base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_m(vbool32_t vm, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i16mf2x7_m(vm, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i16m1x7_m(mask, base, bindex, vl); +vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_m(vbool16_t vm, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i16m1x7_m(vm, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i32mf2x7_m(mask, base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_m(vbool64_t vm, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i32mf2x7_m(vm, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i32m1x7_m(mask, base, bindex, vl); +vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_m(vbool32_t vm, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i32m1x7_m(vm, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i64m1x7_m(mask, base, bindex, vl); +vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_m(vbool64_t vm, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i64m1x7_m(vm, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u8mf8x7_m(mask, base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_m(vbool64_t vm, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u8mf8x7_m(vm, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u8mf4x7_m(mask, base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_m(vbool32_t vm, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u8mf4x7_m(vm, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u8mf2x7_m(mask, base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_m(vbool16_t vm, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u8mf2x7_m(vm, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u8m1x7_m(mask, base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_m(vbool8_t vm, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u8m1x7_m(vm, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u16mf4x7_m(mask, base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_m(vbool64_t vm, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u16mf4x7_m(vm, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u16mf2x7_m(mask, base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_m(vbool32_t vm, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u16mf2x7_m(vm, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u16m1x7_m(mask, base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_m(vbool16_t vm, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u16m1x7_m(vm, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u32mf2x7_m(mask, base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_m(vbool64_t vm, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u32mf2x7_m(vm, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u32m1x7_m(mask, base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_m(vbool32_t vm, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u32m1x7_m(vm, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u64m1x7_m(mask, base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_m(vbool64_t vm, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u64m1x7_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg7ei64\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vloxseg7ei8.c b/auto-generated/gnu-api-tests/vloxseg7ei8.c index da72388c8..9ab55931b 100644 --- a/auto-generated/gnu-api-tests/vloxseg7ei8.c +++ b/auto-generated/gnu-api-tests/vloxseg7ei8.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7(const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_f16mf4x7(base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7(const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_f16mf4x7(rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7(const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_f16mf2x7(base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7(const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_f16mf2x7(rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7(const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_f16m1x7(base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7(const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_f16m1x7(rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7(const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_f32mf2x7(base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7(const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_f32mf2x7(rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7(const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_f32m1x7(base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7(const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_f32m1x7(rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7(const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_f64m1x7(base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7(const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_f64m1x7(rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7(const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i8mf8x7(base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7(const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i8mf8x7(rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7(const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i8mf4x7(base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7(const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i8mf4x7(rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7(const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i8mf2x7(base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7(const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i8mf2x7(rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei8_v_i8m1x7(const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i8m1x7(base, bindex, vl); +vint8m1x7_t test_vloxseg7ei8_v_i8m1x7(const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i8m1x7(rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7(const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i16mf4x7(base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7(const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i16mf4x7(rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7(const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i16mf2x7(base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7(const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i16mf2x7(rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei8_v_i16m1x7(const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i16m1x7(base, bindex, vl); +vint16m1x7_t test_vloxseg7ei8_v_i16m1x7(const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i16m1x7(rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7(const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i32mf2x7(base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7(const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i32mf2x7(rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei8_v_i32m1x7(const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i32m1x7(base, bindex, vl); +vint32m1x7_t test_vloxseg7ei8_v_i32m1x7(const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i32m1x7(rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei8_v_i64m1x7(const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i64m1x7(base, bindex, vl); +vint64m1x7_t test_vloxseg7ei8_v_i64m1x7(const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i64m1x7(rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u8mf8x7(base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7(const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u8mf8x7(rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u8mf4x7(base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7(const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u8mf4x7(rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u8mf2x7(base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7(const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u8mf2x7(rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7(const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u8m1x7(base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7(const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u8m1x7(rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u16mf4x7(base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7(const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u16mf4x7(rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u16mf2x7(base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7(const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u16mf2x7(rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u16m1x7(base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7(const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u16m1x7(rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u32mf2x7(base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7(const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u32mf2x7(rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u32m1x7(base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7(const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u32m1x7(rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u64m1x7(base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7(const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u64m1x7(rs1, rs2, vl); } -vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_m(vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_f16mf4x7_m(mask, base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_m(vbool64_t vm, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_f16mf4x7_m(vm, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_m(vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_f16mf2x7_m(mask, base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_m(vbool32_t vm, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_f16mf2x7_m(vm, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_m(vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_f16m1x7_m(mask, base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_m(vbool16_t vm, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_f16m1x7_m(vm, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_m(vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_f32mf2x7_m(mask, base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_m(vbool64_t vm, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_f32mf2x7_m(vm, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_m(vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_f32m1x7_m(mask, base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_m(vbool32_t vm, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_f32m1x7_m(vm, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_m(vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_f64m1x7_m(mask, base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_m(vbool64_t vm, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_f64m1x7_m(vm, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i8mf8x7_m(mask, base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_m(vbool64_t vm, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i8mf8x7_m(vm, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i8mf4x7_m(mask, base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_m(vbool32_t vm, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i8mf4x7_m(vm, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i8mf2x7_m(mask, base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_m(vbool16_t vm, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i8mf2x7_m(vm, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i8m1x7_m(mask, base, bindex, vl); +vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_m(vbool8_t vm, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i8m1x7_m(vm, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i16mf4x7_m(mask, base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_m(vbool64_t vm, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i16mf4x7_m(vm, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i16mf2x7_m(mask, base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_m(vbool32_t vm, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i16mf2x7_m(vm, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i16m1x7_m(mask, base, bindex, vl); +vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_m(vbool16_t vm, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i16m1x7_m(vm, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i32mf2x7_m(mask, base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_m(vbool64_t vm, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i32mf2x7_m(vm, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i32m1x7_m(mask, base, bindex, vl); +vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_m(vbool32_t vm, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i32m1x7_m(vm, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i64m1x7_m(mask, base, bindex, vl); +vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_m(vbool64_t vm, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i64m1x7_m(vm, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u8mf8x7_m(mask, base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_m(vbool64_t vm, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u8mf8x7_m(vm, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u8mf4x7_m(mask, base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_m(vbool32_t vm, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u8mf4x7_m(vm, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u8mf2x7_m(mask, base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_m(vbool16_t vm, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u8mf2x7_m(vm, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u8m1x7_m(mask, base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_m(vbool8_t vm, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u8m1x7_m(vm, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u16mf4x7_m(mask, base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_m(vbool64_t vm, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u16mf4x7_m(vm, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u16mf2x7_m(mask, base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_m(vbool32_t vm, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u16mf2x7_m(vm, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u16m1x7_m(mask, base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_m(vbool16_t vm, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u16m1x7_m(vm, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u32mf2x7_m(mask, base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_m(vbool64_t vm, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u32mf2x7_m(vm, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u32m1x7_m(mask, base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_m(vbool32_t vm, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u32m1x7_m(vm, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u64m1x7_m(mask, base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_m(vbool64_t vm, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u64m1x7_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg7ei8\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vloxseg8ei16.c b/auto-generated/gnu-api-tests/vloxseg8ei16.c index d09b5caf0..748581753 100644 --- a/auto-generated/gnu-api-tests/vloxseg8ei16.c +++ b/auto-generated/gnu-api-tests/vloxseg8ei16.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8(const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_f16mf4x8(base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8(const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_f16mf4x8(rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8(const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_f16mf2x8(base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8(const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_f16mf2x8(rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8(const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_f16m1x8(base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8(const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_f16m1x8(rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8(const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_f32mf2x8(base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8(const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_f32mf2x8(rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8(const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_f32m1x8(base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8(const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_f32m1x8(rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8(const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_f64m1x8(base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8(const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_f64m1x8(rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8(const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i8mf8x8(base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8(const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i8mf8x8(rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8(const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i8mf4x8(base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8(const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i8mf4x8(rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8(const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i8mf2x8(base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8(const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i8mf2x8(rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei16_v_i8m1x8(const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i8m1x8(base, bindex, vl); +vint8m1x8_t test_vloxseg8ei16_v_i8m1x8(const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i8m1x8(rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8(const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i16mf4x8(base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8(const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i16mf4x8(rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8(const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i16mf2x8(base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8(const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i16mf2x8(rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei16_v_i16m1x8(const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i16m1x8(base, bindex, vl); +vint16m1x8_t test_vloxseg8ei16_v_i16m1x8(const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i16m1x8(rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8(const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i32mf2x8(base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8(const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i32mf2x8(rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei16_v_i32m1x8(const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i32m1x8(base, bindex, vl); +vint32m1x8_t test_vloxseg8ei16_v_i32m1x8(const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i32m1x8(rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei16_v_i64m1x8(const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i64m1x8(base, bindex, vl); +vint64m1x8_t test_vloxseg8ei16_v_i64m1x8(const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i64m1x8(rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u8mf8x8(base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8(const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u8mf8x8(rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u8mf4x8(base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8(const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u8mf4x8(rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8(const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u8mf2x8(base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8(const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u8mf2x8(rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8(const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u8m1x8(base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8(const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u8m1x8(rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u16mf4x8(base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8(const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u16mf4x8(rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u16mf2x8(base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8(const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u16mf2x8(rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8(const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u16m1x8(base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8(const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u16m1x8(rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u32mf2x8(base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8(const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u32mf2x8(rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u32m1x8(base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8(const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u32m1x8(rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u64m1x8(base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8(const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u64m1x8(rs1, rs2, vl); } -vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_m(vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_f16mf4x8_m(mask, base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_m(vbool64_t vm, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_f16mf4x8_m(vm, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_m(vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_f16mf2x8_m(mask, base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_m(vbool32_t vm, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_f16mf2x8_m(vm, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_m(vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_f16m1x8_m(mask, base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_m(vbool16_t vm, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_f16m1x8_m(vm, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_m(vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_f32mf2x8_m(mask, base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_m(vbool64_t vm, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_f32mf2x8_m(vm, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_m(vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_f32m1x8_m(mask, base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_m(vbool32_t vm, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_f32m1x8_m(vm, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_m(vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_f64m1x8_m(mask, base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_m(vbool64_t vm, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_f64m1x8_m(vm, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i8mf8x8_m(mask, base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_m(vbool64_t vm, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i8mf8x8_m(vm, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i8mf4x8_m(mask, base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_m(vbool32_t vm, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i8mf4x8_m(vm, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i8mf2x8_m(mask, base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_m(vbool16_t vm, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i8mf2x8_m(vm, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i8m1x8_m(mask, base, bindex, vl); +vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_m(vbool8_t vm, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i8m1x8_m(vm, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i16mf4x8_m(mask, base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_m(vbool64_t vm, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i16mf4x8_m(vm, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i16mf2x8_m(mask, base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_m(vbool32_t vm, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i16mf2x8_m(vm, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i16m1x8_m(mask, base, bindex, vl); +vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_m(vbool16_t vm, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i16m1x8_m(vm, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i32mf2x8_m(mask, base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_m(vbool64_t vm, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i32mf2x8_m(vm, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i32m1x8_m(mask, base, bindex, vl); +vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_m(vbool32_t vm, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i32m1x8_m(vm, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i64m1x8_m(mask, base, bindex, vl); +vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_m(vbool64_t vm, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i64m1x8_m(vm, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u8mf8x8_m(mask, base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_m(vbool64_t vm, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u8mf8x8_m(vm, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u8mf4x8_m(mask, base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_m(vbool32_t vm, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u8mf4x8_m(vm, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u8mf2x8_m(mask, base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_m(vbool16_t vm, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u8mf2x8_m(vm, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u8m1x8_m(mask, base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_m(vbool8_t vm, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u8m1x8_m(vm, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u16mf4x8_m(mask, base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_m(vbool64_t vm, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u16mf4x8_m(vm, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u16mf2x8_m(mask, base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_m(vbool32_t vm, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u16mf2x8_m(vm, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u16m1x8_m(mask, base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_m(vbool16_t vm, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u16m1x8_m(vm, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u32mf2x8_m(mask, base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_m(vbool64_t vm, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u32mf2x8_m(vm, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u32m1x8_m(mask, base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_m(vbool32_t vm, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u32m1x8_m(vm, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u64m1x8_m(mask, base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_m(vbool64_t vm, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u64m1x8_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg8ei16\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vloxseg8ei32.c b/auto-generated/gnu-api-tests/vloxseg8ei32.c index d5e906d81..5a6df43eb 100644 --- a/auto-generated/gnu-api-tests/vloxseg8ei32.c +++ b/auto-generated/gnu-api-tests/vloxseg8ei32.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8(const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_f16mf4x8(base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8(const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_f16mf4x8(rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8(const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_f16mf2x8(base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8(const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_f16mf2x8(rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8(const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_f16m1x8(base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8(const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_f16m1x8(rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8(const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_f32mf2x8(base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8(const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_f32mf2x8(rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8(const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_f32m1x8(base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8(const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_f32m1x8(rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8(const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_f64m1x8(base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8(const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_f64m1x8(rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8(const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i8mf8x8(base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8(const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i8mf8x8(rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8(const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i8mf4x8(base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8(const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i8mf4x8(rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8(const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i8mf2x8(base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8(const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i8mf2x8(rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei32_v_i8m1x8(const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i8m1x8(base, bindex, vl); +vint8m1x8_t test_vloxseg8ei32_v_i8m1x8(const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i8m1x8(rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8(const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i16mf4x8(base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8(const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i16mf4x8(rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8(const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i16mf2x8(base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8(const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i16mf2x8(rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei32_v_i16m1x8(const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i16m1x8(base, bindex, vl); +vint16m1x8_t test_vloxseg8ei32_v_i16m1x8(const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i16m1x8(rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8(const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i32mf2x8(base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8(const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i32mf2x8(rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei32_v_i32m1x8(const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i32m1x8(base, bindex, vl); +vint32m1x8_t test_vloxseg8ei32_v_i32m1x8(const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i32m1x8(rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei32_v_i64m1x8(const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i64m1x8(base, bindex, vl); +vint64m1x8_t test_vloxseg8ei32_v_i64m1x8(const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i64m1x8(rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u8mf8x8(base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8(const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u8mf8x8(rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8(const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u8mf4x8(base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8(const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u8mf4x8(rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8(const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u8mf2x8(base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8(const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u8mf2x8(rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8(const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u8m1x8(base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8(const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u8m1x8(rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u16mf4x8(base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8(const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u16mf4x8(rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8(const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u16mf2x8(base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8(const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u16mf2x8(rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8(const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u16m1x8(base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8(const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u16m1x8(rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u32mf2x8(base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8(const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u32mf2x8(rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8(const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u32m1x8(base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8(const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u32m1x8(rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u64m1x8(base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8(const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u64m1x8(rs1, rs2, vl); } -vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_m(vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_f16mf4x8_m(mask, base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_m(vbool64_t vm, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_f16mf4x8_m(vm, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_m(vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_f16mf2x8_m(mask, base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_m(vbool32_t vm, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_f16mf2x8_m(vm, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_m(vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_f16m1x8_m(mask, base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_m(vbool16_t vm, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_f16m1x8_m(vm, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_m(vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_f32mf2x8_m(mask, base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_m(vbool64_t vm, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_f32mf2x8_m(vm, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_m(vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_f32m1x8_m(mask, base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_m(vbool32_t vm, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_f32m1x8_m(vm, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_m(vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_f64m1x8_m(mask, base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_m(vbool64_t vm, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_f64m1x8_m(vm, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i8mf8x8_m(mask, base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_m(vbool64_t vm, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i8mf8x8_m(vm, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i8mf4x8_m(mask, base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_m(vbool32_t vm, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i8mf4x8_m(vm, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i8mf2x8_m(mask, base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_m(vbool16_t vm, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i8mf2x8_m(vm, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i8m1x8_m(mask, base, bindex, vl); +vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_m(vbool8_t vm, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i8m1x8_m(vm, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i16mf4x8_m(mask, base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_m(vbool64_t vm, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i16mf4x8_m(vm, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i16mf2x8_m(mask, base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_m(vbool32_t vm, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i16mf2x8_m(vm, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i16m1x8_m(mask, base, bindex, vl); +vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_m(vbool16_t vm, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i16m1x8_m(vm, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i32mf2x8_m(mask, base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_m(vbool64_t vm, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i32mf2x8_m(vm, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i32m1x8_m(mask, base, bindex, vl); +vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_m(vbool32_t vm, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i32m1x8_m(vm, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i64m1x8_m(mask, base, bindex, vl); +vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_m(vbool64_t vm, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i64m1x8_m(vm, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u8mf8x8_m(mask, base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_m(vbool64_t vm, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u8mf8x8_m(vm, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u8mf4x8_m(mask, base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_m(vbool32_t vm, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u8mf4x8_m(vm, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u8mf2x8_m(mask, base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_m(vbool16_t vm, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u8mf2x8_m(vm, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u8m1x8_m(mask, base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_m(vbool8_t vm, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u8m1x8_m(vm, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u16mf4x8_m(mask, base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_m(vbool64_t vm, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u16mf4x8_m(vm, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u16mf2x8_m(mask, base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_m(vbool32_t vm, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u16mf2x8_m(vm, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u16m1x8_m(mask, base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_m(vbool16_t vm, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u16m1x8_m(vm, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u32mf2x8_m(mask, base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_m(vbool64_t vm, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u32mf2x8_m(vm, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u32m1x8_m(mask, base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_m(vbool32_t vm, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u32m1x8_m(vm, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u64m1x8_m(mask, base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_m(vbool64_t vm, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u64m1x8_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg8ei32\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vloxseg8ei64.c b/auto-generated/gnu-api-tests/vloxseg8ei64.c index 47a05bff0..8038e194f 100644 --- a/auto-generated/gnu-api-tests/vloxseg8ei64.c +++ b/auto-generated/gnu-api-tests/vloxseg8ei64.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8(const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_f16mf4x8(base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8(const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_f16mf4x8(rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8(const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_f16mf2x8(base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8(const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_f16mf2x8(rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8(const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_f16m1x8(base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8(const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_f16m1x8(rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8(const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_f32mf2x8(base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8(const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_f32mf2x8(rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8(const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_f32m1x8(base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8(const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_f32m1x8(rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8(const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_f64m1x8(base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8(const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_f64m1x8(rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8(const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i8mf8x8(base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8(const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i8mf8x8(rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8(const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i8mf4x8(base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8(const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i8mf4x8(rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8(const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i8mf2x8(base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8(const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i8mf2x8(rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei64_v_i8m1x8(const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i8m1x8(base, bindex, vl); +vint8m1x8_t test_vloxseg8ei64_v_i8m1x8(const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i8m1x8(rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8(const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i16mf4x8(base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8(const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i16mf4x8(rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8(const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i16mf2x8(base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8(const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i16mf2x8(rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei64_v_i16m1x8(const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i16m1x8(base, bindex, vl); +vint16m1x8_t test_vloxseg8ei64_v_i16m1x8(const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i16m1x8(rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8(const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i32mf2x8(base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8(const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i32mf2x8(rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei64_v_i32m1x8(const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i32m1x8(base, bindex, vl); +vint32m1x8_t test_vloxseg8ei64_v_i32m1x8(const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i32m1x8(rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei64_v_i64m1x8(const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i64m1x8(base, bindex, vl); +vint64m1x8_t test_vloxseg8ei64_v_i64m1x8(const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i64m1x8(rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8(const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u8mf8x8(base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8(const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u8mf8x8(rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8(const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u8mf4x8(base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8(const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u8mf4x8(rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8(const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u8mf2x8(base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8(const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u8mf2x8(rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8(const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u8m1x8(base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8(const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u8m1x8(rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8(const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u16mf4x8(base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8(const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u16mf4x8(rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8(const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u16mf2x8(base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8(const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u16mf2x8(rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8(const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u16m1x8(base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8(const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u16m1x8(rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8(const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u32mf2x8(base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8(const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u32mf2x8(rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8(const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u32m1x8(base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8(const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u32m1x8(rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8(const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u64m1x8(base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8(const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u64m1x8(rs1, rs2, vl); } -vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_m(vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_f16mf4x8_m(mask, base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_m(vbool64_t vm, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_f16mf4x8_m(vm, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_m(vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_f16mf2x8_m(mask, base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_m(vbool32_t vm, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_f16mf2x8_m(vm, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_m(vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_f16m1x8_m(mask, base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_m(vbool16_t vm, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_f16m1x8_m(vm, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_m(vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_f32mf2x8_m(mask, base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_m(vbool64_t vm, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_f32mf2x8_m(vm, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_m(vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_f32m1x8_m(mask, base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_m(vbool32_t vm, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_f32m1x8_m(vm, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_m(vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_f64m1x8_m(mask, base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_m(vbool64_t vm, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_f64m1x8_m(vm, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i8mf8x8_m(mask, base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_m(vbool64_t vm, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i8mf8x8_m(vm, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i8mf4x8_m(mask, base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_m(vbool32_t vm, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i8mf4x8_m(vm, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i8mf2x8_m(mask, base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_m(vbool16_t vm, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i8mf2x8_m(vm, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i8m1x8_m(mask, base, bindex, vl); +vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_m(vbool8_t vm, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i8m1x8_m(vm, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i16mf4x8_m(mask, base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_m(vbool64_t vm, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i16mf4x8_m(vm, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i16mf2x8_m(mask, base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_m(vbool32_t vm, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i16mf2x8_m(vm, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i16m1x8_m(mask, base, bindex, vl); +vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_m(vbool16_t vm, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i16m1x8_m(vm, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i32mf2x8_m(mask, base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_m(vbool64_t vm, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i32mf2x8_m(vm, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i32m1x8_m(mask, base, bindex, vl); +vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_m(vbool32_t vm, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i32m1x8_m(vm, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i64m1x8_m(mask, base, bindex, vl); +vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_m(vbool64_t vm, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i64m1x8_m(vm, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u8mf8x8_m(mask, base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_m(vbool64_t vm, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u8mf8x8_m(vm, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u8mf4x8_m(mask, base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_m(vbool32_t vm, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u8mf4x8_m(vm, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u8mf2x8_m(mask, base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_m(vbool16_t vm, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u8mf2x8_m(vm, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u8m1x8_m(mask, base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_m(vbool8_t vm, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u8m1x8_m(vm, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u16mf4x8_m(mask, base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_m(vbool64_t vm, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u16mf4x8_m(vm, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u16mf2x8_m(mask, base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_m(vbool32_t vm, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u16mf2x8_m(vm, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u16m1x8_m(mask, base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_m(vbool16_t vm, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u16m1x8_m(vm, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u32mf2x8_m(mask, base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_m(vbool64_t vm, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u32mf2x8_m(vm, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u32m1x8_m(mask, base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_m(vbool32_t vm, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u32m1x8_m(vm, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u64m1x8_m(mask, base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_m(vbool64_t vm, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u64m1x8_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg8ei64\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vloxseg8ei8.c b/auto-generated/gnu-api-tests/vloxseg8ei8.c index 3118793d0..eab000dd5 100644 --- a/auto-generated/gnu-api-tests/vloxseg8ei8.c +++ b/auto-generated/gnu-api-tests/vloxseg8ei8.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8(const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_f16mf4x8(base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8(const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_f16mf4x8(rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8(const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_f16mf2x8(base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8(const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_f16mf2x8(rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8(const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_f16m1x8(base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8(const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_f16m1x8(rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8(const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_f32mf2x8(base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8(const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_f32mf2x8(rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8(const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_f32m1x8(base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8(const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_f32m1x8(rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8(const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_f64m1x8(base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8(const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_f64m1x8(rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8(const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i8mf8x8(base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8(const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i8mf8x8(rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8(const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i8mf4x8(base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8(const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i8mf4x8(rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8(const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i8mf2x8(base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8(const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i8mf2x8(rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei8_v_i8m1x8(const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i8m1x8(base, bindex, vl); +vint8m1x8_t test_vloxseg8ei8_v_i8m1x8(const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i8m1x8(rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8(const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i16mf4x8(base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8(const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i16mf4x8(rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8(const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i16mf2x8(base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8(const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i16mf2x8(rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei8_v_i16m1x8(const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i16m1x8(base, bindex, vl); +vint16m1x8_t test_vloxseg8ei8_v_i16m1x8(const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i16m1x8(rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8(const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i32mf2x8(base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8(const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i32mf2x8(rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei8_v_i32m1x8(const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i32m1x8(base, bindex, vl); +vint32m1x8_t test_vloxseg8ei8_v_i32m1x8(const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i32m1x8(rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei8_v_i64m1x8(const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i64m1x8(base, bindex, vl); +vint64m1x8_t test_vloxseg8ei8_v_i64m1x8(const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i64m1x8(rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u8mf8x8(base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8(const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u8mf8x8(rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u8mf4x8(base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8(const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u8mf4x8(rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u8mf2x8(base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8(const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u8mf2x8(rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8(const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u8m1x8(base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8(const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u8m1x8(rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u16mf4x8(base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8(const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u16mf4x8(rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u16mf2x8(base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8(const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u16mf2x8(rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u16m1x8(base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8(const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u16m1x8(rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u32mf2x8(base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8(const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u32mf2x8(rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u32m1x8(base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8(const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u32m1x8(rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u64m1x8(base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8(const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u64m1x8(rs1, rs2, vl); } -vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_m(vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_f16mf4x8_m(mask, base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_m(vbool64_t vm, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_f16mf4x8_m(vm, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_m(vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_f16mf2x8_m(mask, base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_m(vbool32_t vm, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_f16mf2x8_m(vm, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_m(vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_f16m1x8_m(mask, base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_m(vbool16_t vm, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_f16m1x8_m(vm, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_m(vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_f32mf2x8_m(mask, base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_m(vbool64_t vm, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_f32mf2x8_m(vm, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_m(vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_f32m1x8_m(mask, base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_m(vbool32_t vm, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_f32m1x8_m(vm, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_m(vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_f64m1x8_m(mask, base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_m(vbool64_t vm, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_f64m1x8_m(vm, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i8mf8x8_m(mask, base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_m(vbool64_t vm, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i8mf8x8_m(vm, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i8mf4x8_m(mask, base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_m(vbool32_t vm, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i8mf4x8_m(vm, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i8mf2x8_m(mask, base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_m(vbool16_t vm, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i8mf2x8_m(vm, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i8m1x8_m(mask, base, bindex, vl); +vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_m(vbool8_t vm, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i8m1x8_m(vm, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i16mf4x8_m(mask, base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_m(vbool64_t vm, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i16mf4x8_m(vm, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i16mf2x8_m(mask, base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_m(vbool32_t vm, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i16mf2x8_m(vm, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i16m1x8_m(mask, base, bindex, vl); +vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_m(vbool16_t vm, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i16m1x8_m(vm, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i32mf2x8_m(mask, base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_m(vbool64_t vm, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i32mf2x8_m(vm, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i32m1x8_m(mask, base, bindex, vl); +vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_m(vbool32_t vm, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i32m1x8_m(vm, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i64m1x8_m(mask, base, bindex, vl); +vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_m(vbool64_t vm, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i64m1x8_m(vm, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u8mf8x8_m(mask, base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_m(vbool64_t vm, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u8mf8x8_m(vm, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u8mf4x8_m(mask, base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_m(vbool32_t vm, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u8mf4x8_m(vm, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u8mf2x8_m(mask, base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_m(vbool16_t vm, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u8mf2x8_m(vm, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u8m1x8_m(mask, base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_m(vbool8_t vm, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u8m1x8_m(vm, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u16mf4x8_m(mask, base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_m(vbool64_t vm, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u16mf4x8_m(vm, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u16mf2x8_m(mask, base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_m(vbool32_t vm, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u16mf2x8_m(vm, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u16m1x8_m(mask, base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_m(vbool16_t vm, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u16m1x8_m(vm, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u32mf2x8_m(mask, base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_m(vbool64_t vm, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u32mf2x8_m(vm, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u32m1x8_m(mask, base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_m(vbool32_t vm, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u32m1x8_m(vm, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u64m1x8_m(mask, base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_m(vbool64_t vm, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u64m1x8_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg8ei8\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vlse16.c b/auto-generated/gnu-api-tests/vlse16.c index 5b0212c8b..ea55ca38e 100644 --- a/auto-generated/gnu-api-tests/vlse16.c +++ b/auto-generated/gnu-api-tests/vlse16.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vlse16_v_f16mf4(const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_f16mf4(base, bstride, vl); +vfloat16mf4_t test_vlse16_v_f16mf4(const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_f16mf4(rs1, rs2, vl); } -vfloat16mf2_t test_vlse16_v_f16mf2(const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_f16mf2(base, bstride, vl); +vfloat16mf2_t test_vlse16_v_f16mf2(const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_f16mf2(rs1, rs2, vl); } -vfloat16m1_t test_vlse16_v_f16m1(const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_f16m1(base, bstride, vl); +vfloat16m1_t test_vlse16_v_f16m1(const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_f16m1(rs1, rs2, vl); } -vfloat16m2_t test_vlse16_v_f16m2(const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_f16m2(base, bstride, vl); +vfloat16m2_t test_vlse16_v_f16m2(const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_f16m2(rs1, rs2, vl); } -vfloat16m4_t test_vlse16_v_f16m4(const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_f16m4(base, bstride, vl); +vfloat16m4_t test_vlse16_v_f16m4(const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_f16m4(rs1, rs2, vl); } -vfloat16m8_t test_vlse16_v_f16m8(const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_f16m8(base, bstride, vl); +vfloat16m8_t test_vlse16_v_f16m8(const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_f16m8(rs1, rs2, vl); } -vint16mf4_t test_vlse16_v_i16mf4(const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_i16mf4(base, bstride, vl); +vint16mf4_t test_vlse16_v_i16mf4(const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_i16mf4(rs1, rs2, vl); } -vint16mf2_t test_vlse16_v_i16mf2(const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_i16mf2(base, bstride, vl); +vint16mf2_t test_vlse16_v_i16mf2(const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_i16mf2(rs1, rs2, vl); } -vint16m1_t test_vlse16_v_i16m1(const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_i16m1(base, bstride, vl); +vint16m1_t test_vlse16_v_i16m1(const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_i16m1(rs1, rs2, vl); } -vint16m2_t test_vlse16_v_i16m2(const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_i16m2(base, bstride, vl); +vint16m2_t test_vlse16_v_i16m2(const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_i16m2(rs1, rs2, vl); } -vint16m4_t test_vlse16_v_i16m4(const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_i16m4(base, bstride, vl); +vint16m4_t test_vlse16_v_i16m4(const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_i16m4(rs1, rs2, vl); } -vint16m8_t test_vlse16_v_i16m8(const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_i16m8(base, bstride, vl); +vint16m8_t test_vlse16_v_i16m8(const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_i16m8(rs1, rs2, vl); } -vuint16mf4_t test_vlse16_v_u16mf4(const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_u16mf4(base, bstride, vl); +vuint16mf4_t test_vlse16_v_u16mf4(const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_u16mf4(rs1, rs2, vl); } -vuint16mf2_t test_vlse16_v_u16mf2(const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_u16mf2(base, bstride, vl); +vuint16mf2_t test_vlse16_v_u16mf2(const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_u16mf2(rs1, rs2, vl); } -vuint16m1_t test_vlse16_v_u16m1(const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_u16m1(base, bstride, vl); +vuint16m1_t test_vlse16_v_u16m1(const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_u16m1(rs1, rs2, vl); } -vuint16m2_t test_vlse16_v_u16m2(const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_u16m2(base, bstride, vl); +vuint16m2_t test_vlse16_v_u16m2(const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_u16m2(rs1, rs2, vl); } -vuint16m4_t test_vlse16_v_u16m4(const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_u16m4(base, bstride, vl); +vuint16m4_t test_vlse16_v_u16m4(const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_u16m4(rs1, rs2, vl); } -vuint16m8_t test_vlse16_v_u16m8(const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_u16m8(base, bstride, vl); +vuint16m8_t test_vlse16_v_u16m8(const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_u16m8(rs1, rs2, vl); } -vfloat16mf4_t test_vlse16_v_f16mf4_m(vbool64_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_f16mf4_m(mask, base, bstride, vl); +vfloat16mf4_t test_vlse16_v_f16mf4_m(vbool64_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_f16mf4_m(vm, rs1, rs2, vl); } -vfloat16mf2_t test_vlse16_v_f16mf2_m(vbool32_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_f16mf2_m(mask, base, bstride, vl); +vfloat16mf2_t test_vlse16_v_f16mf2_m(vbool32_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_f16mf2_m(vm, rs1, rs2, vl); } -vfloat16m1_t test_vlse16_v_f16m1_m(vbool16_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_f16m1_m(mask, base, bstride, vl); +vfloat16m1_t test_vlse16_v_f16m1_m(vbool16_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_f16m1_m(vm, rs1, rs2, vl); } -vfloat16m2_t test_vlse16_v_f16m2_m(vbool8_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_f16m2_m(mask, base, bstride, vl); +vfloat16m2_t test_vlse16_v_f16m2_m(vbool8_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_f16m2_m(vm, rs1, rs2, vl); } -vfloat16m4_t test_vlse16_v_f16m4_m(vbool4_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_f16m4_m(mask, base, bstride, vl); +vfloat16m4_t test_vlse16_v_f16m4_m(vbool4_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_f16m4_m(vm, rs1, rs2, vl); } -vfloat16m8_t test_vlse16_v_f16m8_m(vbool2_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_f16m8_m(mask, base, bstride, vl); +vfloat16m8_t test_vlse16_v_f16m8_m(vbool2_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_f16m8_m(vm, rs1, rs2, vl); } -vint16mf4_t test_vlse16_v_i16mf4_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_i16mf4_m(mask, base, bstride, vl); +vint16mf4_t test_vlse16_v_i16mf4_m(vbool64_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_i16mf4_m(vm, rs1, rs2, vl); } -vint16mf2_t test_vlse16_v_i16mf2_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_i16mf2_m(mask, base, bstride, vl); +vint16mf2_t test_vlse16_v_i16mf2_m(vbool32_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_i16mf2_m(vm, rs1, rs2, vl); } -vint16m1_t test_vlse16_v_i16m1_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_i16m1_m(mask, base, bstride, vl); +vint16m1_t test_vlse16_v_i16m1_m(vbool16_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_i16m1_m(vm, rs1, rs2, vl); } -vint16m2_t test_vlse16_v_i16m2_m(vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_i16m2_m(mask, base, bstride, vl); +vint16m2_t test_vlse16_v_i16m2_m(vbool8_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_i16m2_m(vm, rs1, rs2, vl); } -vint16m4_t test_vlse16_v_i16m4_m(vbool4_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_i16m4_m(mask, base, bstride, vl); +vint16m4_t test_vlse16_v_i16m4_m(vbool4_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_i16m4_m(vm, rs1, rs2, vl); } -vint16m8_t test_vlse16_v_i16m8_m(vbool2_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_i16m8_m(mask, base, bstride, vl); +vint16m8_t test_vlse16_v_i16m8_m(vbool2_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_i16m8_m(vm, rs1, rs2, vl); } -vuint16mf4_t test_vlse16_v_u16mf4_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_u16mf4_m(mask, base, bstride, vl); +vuint16mf4_t test_vlse16_v_u16mf4_m(vbool64_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_u16mf4_m(vm, rs1, rs2, vl); } -vuint16mf2_t test_vlse16_v_u16mf2_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_u16mf2_m(mask, base, bstride, vl); +vuint16mf2_t test_vlse16_v_u16mf2_m(vbool32_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_u16mf2_m(vm, rs1, rs2, vl); } -vuint16m1_t test_vlse16_v_u16m1_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_u16m1_m(mask, base, bstride, vl); +vuint16m1_t test_vlse16_v_u16m1_m(vbool16_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_u16m1_m(vm, rs1, rs2, vl); } -vuint16m2_t test_vlse16_v_u16m2_m(vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_u16m2_m(mask, base, bstride, vl); +vuint16m2_t test_vlse16_v_u16m2_m(vbool8_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_u16m2_m(vm, rs1, rs2, vl); } -vuint16m4_t test_vlse16_v_u16m4_m(vbool4_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_u16m4_m(mask, base, bstride, vl); +vuint16m4_t test_vlse16_v_u16m4_m(vbool4_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_u16m4_m(vm, rs1, rs2, vl); } -vuint16m8_t test_vlse16_v_u16m8_m(vbool2_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_u16m8_m(mask, base, bstride, vl); +vuint16m8_t test_vlse16_v_u16m8_m(vbool2_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_u16m8_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlse16\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/gnu-api-tests/vlse32.c b/auto-generated/gnu-api-tests/vlse32.c index b50c84c72..470b6d58b 100644 --- a/auto-generated/gnu-api-tests/vlse32.c +++ b/auto-generated/gnu-api-tests/vlse32.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2_t test_vlse32_v_f32mf2(const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_f32mf2(base, bstride, vl); +vfloat32mf2_t test_vlse32_v_f32mf2(const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_f32mf2(rs1, rs2, vl); } -vfloat32m1_t test_vlse32_v_f32m1(const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_f32m1(base, bstride, vl); +vfloat32m1_t test_vlse32_v_f32m1(const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_f32m1(rs1, rs2, vl); } -vfloat32m2_t test_vlse32_v_f32m2(const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_f32m2(base, bstride, vl); +vfloat32m2_t test_vlse32_v_f32m2(const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_f32m2(rs1, rs2, vl); } -vfloat32m4_t test_vlse32_v_f32m4(const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_f32m4(base, bstride, vl); +vfloat32m4_t test_vlse32_v_f32m4(const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_f32m4(rs1, rs2, vl); } -vfloat32m8_t test_vlse32_v_f32m8(const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_f32m8(base, bstride, vl); +vfloat32m8_t test_vlse32_v_f32m8(const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_f32m8(rs1, rs2, vl); } -vint32mf2_t test_vlse32_v_i32mf2(const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_i32mf2(base, bstride, vl); +vint32mf2_t test_vlse32_v_i32mf2(const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_i32mf2(rs1, rs2, vl); } -vint32m1_t test_vlse32_v_i32m1(const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_i32m1(base, bstride, vl); +vint32m1_t test_vlse32_v_i32m1(const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_i32m1(rs1, rs2, vl); } -vint32m2_t test_vlse32_v_i32m2(const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_i32m2(base, bstride, vl); +vint32m2_t test_vlse32_v_i32m2(const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_i32m2(rs1, rs2, vl); } -vint32m4_t test_vlse32_v_i32m4(const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_i32m4(base, bstride, vl); +vint32m4_t test_vlse32_v_i32m4(const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_i32m4(rs1, rs2, vl); } -vint32m8_t test_vlse32_v_i32m8(const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_i32m8(base, bstride, vl); +vint32m8_t test_vlse32_v_i32m8(const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_i32m8(rs1, rs2, vl); } -vuint32mf2_t test_vlse32_v_u32mf2(const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_u32mf2(base, bstride, vl); +vuint32mf2_t test_vlse32_v_u32mf2(const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_u32mf2(rs1, rs2, vl); } -vuint32m1_t test_vlse32_v_u32m1(const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_u32m1(base, bstride, vl); +vuint32m1_t test_vlse32_v_u32m1(const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_u32m1(rs1, rs2, vl); } -vuint32m2_t test_vlse32_v_u32m2(const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_u32m2(base, bstride, vl); +vuint32m2_t test_vlse32_v_u32m2(const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_u32m2(rs1, rs2, vl); } -vuint32m4_t test_vlse32_v_u32m4(const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_u32m4(base, bstride, vl); +vuint32m4_t test_vlse32_v_u32m4(const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_u32m4(rs1, rs2, vl); } -vuint32m8_t test_vlse32_v_u32m8(const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_u32m8(base, bstride, vl); +vuint32m8_t test_vlse32_v_u32m8(const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_u32m8(rs1, rs2, vl); } -vfloat32mf2_t test_vlse32_v_f32mf2_m(vbool64_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_f32mf2_m(mask, base, bstride, vl); +vfloat32mf2_t test_vlse32_v_f32mf2_m(vbool64_t vm, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_f32mf2_m(vm, rs1, rs2, vl); } -vfloat32m1_t test_vlse32_v_f32m1_m(vbool32_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_f32m1_m(mask, base, bstride, vl); +vfloat32m1_t test_vlse32_v_f32m1_m(vbool32_t vm, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_f32m1_m(vm, rs1, rs2, vl); } -vfloat32m2_t test_vlse32_v_f32m2_m(vbool16_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_f32m2_m(mask, base, bstride, vl); +vfloat32m2_t test_vlse32_v_f32m2_m(vbool16_t vm, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_f32m2_m(vm, rs1, rs2, vl); } -vfloat32m4_t test_vlse32_v_f32m4_m(vbool8_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_f32m4_m(mask, base, bstride, vl); +vfloat32m4_t test_vlse32_v_f32m4_m(vbool8_t vm, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_f32m4_m(vm, rs1, rs2, vl); } -vfloat32m8_t test_vlse32_v_f32m8_m(vbool4_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_f32m8_m(mask, base, bstride, vl); +vfloat32m8_t test_vlse32_v_f32m8_m(vbool4_t vm, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_f32m8_m(vm, rs1, rs2, vl); } -vint32mf2_t test_vlse32_v_i32mf2_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_i32mf2_m(mask, base, bstride, vl); +vint32mf2_t test_vlse32_v_i32mf2_m(vbool64_t vm, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_i32mf2_m(vm, rs1, rs2, vl); } -vint32m1_t test_vlse32_v_i32m1_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_i32m1_m(mask, base, bstride, vl); +vint32m1_t test_vlse32_v_i32m1_m(vbool32_t vm, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_i32m1_m(vm, rs1, rs2, vl); } -vint32m2_t test_vlse32_v_i32m2_m(vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_i32m2_m(mask, base, bstride, vl); +vint32m2_t test_vlse32_v_i32m2_m(vbool16_t vm, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_i32m2_m(vm, rs1, rs2, vl); } -vint32m4_t test_vlse32_v_i32m4_m(vbool8_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_i32m4_m(mask, base, bstride, vl); +vint32m4_t test_vlse32_v_i32m4_m(vbool8_t vm, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_i32m4_m(vm, rs1, rs2, vl); } -vint32m8_t test_vlse32_v_i32m8_m(vbool4_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_i32m8_m(mask, base, bstride, vl); +vint32m8_t test_vlse32_v_i32m8_m(vbool4_t vm, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_i32m8_m(vm, rs1, rs2, vl); } -vuint32mf2_t test_vlse32_v_u32mf2_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_u32mf2_m(mask, base, bstride, vl); +vuint32mf2_t test_vlse32_v_u32mf2_m(vbool64_t vm, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_u32mf2_m(vm, rs1, rs2, vl); } -vuint32m1_t test_vlse32_v_u32m1_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_u32m1_m(mask, base, bstride, vl); +vuint32m1_t test_vlse32_v_u32m1_m(vbool32_t vm, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_u32m1_m(vm, rs1, rs2, vl); } -vuint32m2_t test_vlse32_v_u32m2_m(vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_u32m2_m(mask, base, bstride, vl); +vuint32m2_t test_vlse32_v_u32m2_m(vbool16_t vm, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_u32m2_m(vm, rs1, rs2, vl); } -vuint32m4_t test_vlse32_v_u32m4_m(vbool8_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_u32m4_m(mask, base, bstride, vl); +vuint32m4_t test_vlse32_v_u32m4_m(vbool8_t vm, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_u32m4_m(vm, rs1, rs2, vl); } -vuint32m8_t test_vlse32_v_u32m8_m(vbool4_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_u32m8_m(mask, base, bstride, vl); +vuint32m8_t test_vlse32_v_u32m8_m(vbool4_t vm, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_u32m8_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlse32\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/gnu-api-tests/vlse64.c b/auto-generated/gnu-api-tests/vlse64.c index 01ba651be..38d19d20d 100644 --- a/auto-generated/gnu-api-tests/vlse64.c +++ b/auto-generated/gnu-api-tests/vlse64.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1_t test_vlse64_v_f64m1(const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_f64m1(base, bstride, vl); +vfloat64m1_t test_vlse64_v_f64m1(const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_f64m1(rs1, rs2, vl); } -vfloat64m2_t test_vlse64_v_f64m2(const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_f64m2(base, bstride, vl); +vfloat64m2_t test_vlse64_v_f64m2(const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_f64m2(rs1, rs2, vl); } -vfloat64m4_t test_vlse64_v_f64m4(const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_f64m4(base, bstride, vl); +vfloat64m4_t test_vlse64_v_f64m4(const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_f64m4(rs1, rs2, vl); } -vfloat64m8_t test_vlse64_v_f64m8(const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_f64m8(base, bstride, vl); +vfloat64m8_t test_vlse64_v_f64m8(const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_f64m8(rs1, rs2, vl); } -vint64m1_t test_vlse64_v_i64m1(const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_i64m1(base, bstride, vl); +vint64m1_t test_vlse64_v_i64m1(const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_i64m1(rs1, rs2, vl); } -vint64m2_t test_vlse64_v_i64m2(const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_i64m2(base, bstride, vl); +vint64m2_t test_vlse64_v_i64m2(const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_i64m2(rs1, rs2, vl); } -vint64m4_t test_vlse64_v_i64m4(const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_i64m4(base, bstride, vl); +vint64m4_t test_vlse64_v_i64m4(const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_i64m4(rs1, rs2, vl); } -vint64m8_t test_vlse64_v_i64m8(const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_i64m8(base, bstride, vl); +vint64m8_t test_vlse64_v_i64m8(const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_i64m8(rs1, rs2, vl); } -vuint64m1_t test_vlse64_v_u64m1(const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_u64m1(base, bstride, vl); +vuint64m1_t test_vlse64_v_u64m1(const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_u64m1(rs1, rs2, vl); } -vuint64m2_t test_vlse64_v_u64m2(const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_u64m2(base, bstride, vl); +vuint64m2_t test_vlse64_v_u64m2(const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_u64m2(rs1, rs2, vl); } -vuint64m4_t test_vlse64_v_u64m4(const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_u64m4(base, bstride, vl); +vuint64m4_t test_vlse64_v_u64m4(const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_u64m4(rs1, rs2, vl); } -vuint64m8_t test_vlse64_v_u64m8(const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_u64m8(base, bstride, vl); +vuint64m8_t test_vlse64_v_u64m8(const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_u64m8(rs1, rs2, vl); } -vfloat64m1_t test_vlse64_v_f64m1_m(vbool64_t mask, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_f64m1_m(mask, base, bstride, vl); +vfloat64m1_t test_vlse64_v_f64m1_m(vbool64_t vm, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_f64m1_m(vm, rs1, rs2, vl); } -vfloat64m2_t test_vlse64_v_f64m2_m(vbool32_t mask, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_f64m2_m(mask, base, bstride, vl); +vfloat64m2_t test_vlse64_v_f64m2_m(vbool32_t vm, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_f64m2_m(vm, rs1, rs2, vl); } -vfloat64m4_t test_vlse64_v_f64m4_m(vbool16_t mask, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_f64m4_m(mask, base, bstride, vl); +vfloat64m4_t test_vlse64_v_f64m4_m(vbool16_t vm, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_f64m4_m(vm, rs1, rs2, vl); } -vfloat64m8_t test_vlse64_v_f64m8_m(vbool8_t mask, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_f64m8_m(mask, base, bstride, vl); +vfloat64m8_t test_vlse64_v_f64m8_m(vbool8_t vm, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_f64m8_m(vm, rs1, rs2, vl); } -vint64m1_t test_vlse64_v_i64m1_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_i64m1_m(mask, base, bstride, vl); +vint64m1_t test_vlse64_v_i64m1_m(vbool64_t vm, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_i64m1_m(vm, rs1, rs2, vl); } -vint64m2_t test_vlse64_v_i64m2_m(vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_i64m2_m(mask, base, bstride, vl); +vint64m2_t test_vlse64_v_i64m2_m(vbool32_t vm, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_i64m2_m(vm, rs1, rs2, vl); } -vint64m4_t test_vlse64_v_i64m4_m(vbool16_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_i64m4_m(mask, base, bstride, vl); +vint64m4_t test_vlse64_v_i64m4_m(vbool16_t vm, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_i64m4_m(vm, rs1, rs2, vl); } -vint64m8_t test_vlse64_v_i64m8_m(vbool8_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_i64m8_m(mask, base, bstride, vl); +vint64m8_t test_vlse64_v_i64m8_m(vbool8_t vm, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_i64m8_m(vm, rs1, rs2, vl); } -vuint64m1_t test_vlse64_v_u64m1_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_u64m1_m(mask, base, bstride, vl); +vuint64m1_t test_vlse64_v_u64m1_m(vbool64_t vm, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_u64m1_m(vm, rs1, rs2, vl); } -vuint64m2_t test_vlse64_v_u64m2_m(vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_u64m2_m(mask, base, bstride, vl); +vuint64m2_t test_vlse64_v_u64m2_m(vbool32_t vm, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_u64m2_m(vm, rs1, rs2, vl); } -vuint64m4_t test_vlse64_v_u64m4_m(vbool16_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_u64m4_m(mask, base, bstride, vl); +vuint64m4_t test_vlse64_v_u64m4_m(vbool16_t vm, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_u64m4_m(vm, rs1, rs2, vl); } -vuint64m8_t test_vlse64_v_u64m8_m(vbool8_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_u64m8_m(mask, base, bstride, vl); +vuint64m8_t test_vlse64_v_u64m8_m(vbool8_t vm, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_u64m8_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlse64\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/gnu-api-tests/vlse8.c b/auto-generated/gnu-api-tests/vlse8.c index f67f34ef7..345a9a6f7 100644 --- a/auto-generated/gnu-api-tests/vlse8.c +++ b/auto-generated/gnu-api-tests/vlse8.c @@ -6,116 +6,116 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vlse8_v_i8mf8(const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_i8mf8(base, bstride, vl); +vint8mf8_t test_vlse8_v_i8mf8(const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_i8mf8(rs1, rs2, vl); } -vint8mf4_t test_vlse8_v_i8mf4(const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_i8mf4(base, bstride, vl); +vint8mf4_t test_vlse8_v_i8mf4(const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_i8mf4(rs1, rs2, vl); } -vint8mf2_t test_vlse8_v_i8mf2(const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_i8mf2(base, bstride, vl); +vint8mf2_t test_vlse8_v_i8mf2(const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_i8mf2(rs1, rs2, vl); } -vint8m1_t test_vlse8_v_i8m1(const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_i8m1(base, bstride, vl); +vint8m1_t test_vlse8_v_i8m1(const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_i8m1(rs1, rs2, vl); } -vint8m2_t test_vlse8_v_i8m2(const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_i8m2(base, bstride, vl); +vint8m2_t test_vlse8_v_i8m2(const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_i8m2(rs1, rs2, vl); } -vint8m4_t test_vlse8_v_i8m4(const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_i8m4(base, bstride, vl); +vint8m4_t test_vlse8_v_i8m4(const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_i8m4(rs1, rs2, vl); } -vint8m8_t test_vlse8_v_i8m8(const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_i8m8(base, bstride, vl); +vint8m8_t test_vlse8_v_i8m8(const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_i8m8(rs1, rs2, vl); } -vuint8mf8_t test_vlse8_v_u8mf8(const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_u8mf8(base, bstride, vl); +vuint8mf8_t test_vlse8_v_u8mf8(const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_u8mf8(rs1, rs2, vl); } -vuint8mf4_t test_vlse8_v_u8mf4(const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_u8mf4(base, bstride, vl); +vuint8mf4_t test_vlse8_v_u8mf4(const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_u8mf4(rs1, rs2, vl); } -vuint8mf2_t test_vlse8_v_u8mf2(const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_u8mf2(base, bstride, vl); +vuint8mf2_t test_vlse8_v_u8mf2(const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_u8mf2(rs1, rs2, vl); } -vuint8m1_t test_vlse8_v_u8m1(const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_u8m1(base, bstride, vl); +vuint8m1_t test_vlse8_v_u8m1(const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_u8m1(rs1, rs2, vl); } -vuint8m2_t test_vlse8_v_u8m2(const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_u8m2(base, bstride, vl); +vuint8m2_t test_vlse8_v_u8m2(const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_u8m2(rs1, rs2, vl); } -vuint8m4_t test_vlse8_v_u8m4(const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_u8m4(base, bstride, vl); +vuint8m4_t test_vlse8_v_u8m4(const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_u8m4(rs1, rs2, vl); } -vuint8m8_t test_vlse8_v_u8m8(const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_u8m8(base, bstride, vl); +vuint8m8_t test_vlse8_v_u8m8(const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_u8m8(rs1, rs2, vl); } -vint8mf8_t test_vlse8_v_i8mf8_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_i8mf8_m(mask, base, bstride, vl); +vint8mf8_t test_vlse8_v_i8mf8_m(vbool64_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_i8mf8_m(vm, rs1, rs2, vl); } -vint8mf4_t test_vlse8_v_i8mf4_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_i8mf4_m(mask, base, bstride, vl); +vint8mf4_t test_vlse8_v_i8mf4_m(vbool32_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_i8mf4_m(vm, rs1, rs2, vl); } -vint8mf2_t test_vlse8_v_i8mf2_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_i8mf2_m(mask, base, bstride, vl); +vint8mf2_t test_vlse8_v_i8mf2_m(vbool16_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_i8mf2_m(vm, rs1, rs2, vl); } -vint8m1_t test_vlse8_v_i8m1_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_i8m1_m(mask, base, bstride, vl); +vint8m1_t test_vlse8_v_i8m1_m(vbool8_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_i8m1_m(vm, rs1, rs2, vl); } -vint8m2_t test_vlse8_v_i8m2_m(vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_i8m2_m(mask, base, bstride, vl); +vint8m2_t test_vlse8_v_i8m2_m(vbool4_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_i8m2_m(vm, rs1, rs2, vl); } -vint8m4_t test_vlse8_v_i8m4_m(vbool2_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_i8m4_m(mask, base, bstride, vl); +vint8m4_t test_vlse8_v_i8m4_m(vbool2_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_i8m4_m(vm, rs1, rs2, vl); } -vint8m8_t test_vlse8_v_i8m8_m(vbool1_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_i8m8_m(mask, base, bstride, vl); +vint8m8_t test_vlse8_v_i8m8_m(vbool1_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_i8m8_m(vm, rs1, rs2, vl); } -vuint8mf8_t test_vlse8_v_u8mf8_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_u8mf8_m(mask, base, bstride, vl); +vuint8mf8_t test_vlse8_v_u8mf8_m(vbool64_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_u8mf8_m(vm, rs1, rs2, vl); } -vuint8mf4_t test_vlse8_v_u8mf4_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_u8mf4_m(mask, base, bstride, vl); +vuint8mf4_t test_vlse8_v_u8mf4_m(vbool32_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_u8mf4_m(vm, rs1, rs2, vl); } -vuint8mf2_t test_vlse8_v_u8mf2_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_u8mf2_m(mask, base, bstride, vl); +vuint8mf2_t test_vlse8_v_u8mf2_m(vbool16_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_u8mf2_m(vm, rs1, rs2, vl); } -vuint8m1_t test_vlse8_v_u8m1_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_u8m1_m(mask, base, bstride, vl); +vuint8m1_t test_vlse8_v_u8m1_m(vbool8_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_u8m1_m(vm, rs1, rs2, vl); } -vuint8m2_t test_vlse8_v_u8m2_m(vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_u8m2_m(mask, base, bstride, vl); +vuint8m2_t test_vlse8_v_u8m2_m(vbool4_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_u8m2_m(vm, rs1, rs2, vl); } -vuint8m4_t test_vlse8_v_u8m4_m(vbool2_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_u8m4_m(mask, base, bstride, vl); +vuint8m4_t test_vlse8_v_u8m4_m(vbool2_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_u8m4_m(vm, rs1, rs2, vl); } -vuint8m8_t test_vlse8_v_u8m8_m(vbool1_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_u8m8_m(mask, base, bstride, vl); +vuint8m8_t test_vlse8_v_u8m8_m(vbool1_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_u8m8_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlse8\.[ivxfswum.]+\s+} 28 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg2e16.c b/auto-generated/gnu-api-tests/vlseg2e16.c index 87f65ade3..f977e8d02 100644 --- a/auto-generated/gnu-api-tests/vlseg2e16.c +++ b/auto-generated/gnu-api-tests/vlseg2e16.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x2_t test_vlseg2e16_v_f16mf4x2(const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_f16mf4x2(base, vl); +vfloat16mf4x2_t test_vlseg2e16_v_f16mf4x2(const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_f16mf4x2(rs1, vl); } -vfloat16mf2x2_t test_vlseg2e16_v_f16mf2x2(const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_f16mf2x2(base, vl); +vfloat16mf2x2_t test_vlseg2e16_v_f16mf2x2(const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_f16mf2x2(rs1, vl); } -vfloat16m1x2_t test_vlseg2e16_v_f16m1x2(const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_f16m1x2(base, vl); +vfloat16m1x2_t test_vlseg2e16_v_f16m1x2(const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_f16m1x2(rs1, vl); } -vfloat16m2x2_t test_vlseg2e16_v_f16m2x2(const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_f16m2x2(base, vl); +vfloat16m2x2_t test_vlseg2e16_v_f16m2x2(const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_f16m2x2(rs1, vl); } -vfloat16m4x2_t test_vlseg2e16_v_f16m4x2(const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_f16m4x2(base, vl); +vfloat16m4x2_t test_vlseg2e16_v_f16m4x2(const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_f16m4x2(rs1, vl); } -vint16mf4x2_t test_vlseg2e16_v_i16mf4x2(const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_i16mf4x2(base, vl); +vint16mf4x2_t test_vlseg2e16_v_i16mf4x2(const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_i16mf4x2(rs1, vl); } -vint16mf2x2_t test_vlseg2e16_v_i16mf2x2(const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_i16mf2x2(base, vl); +vint16mf2x2_t test_vlseg2e16_v_i16mf2x2(const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_i16mf2x2(rs1, vl); } -vint16m1x2_t test_vlseg2e16_v_i16m1x2(const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_i16m1x2(base, vl); +vint16m1x2_t test_vlseg2e16_v_i16m1x2(const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_i16m1x2(rs1, vl); } -vint16m2x2_t test_vlseg2e16_v_i16m2x2(const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_i16m2x2(base, vl); +vint16m2x2_t test_vlseg2e16_v_i16m2x2(const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_i16m2x2(rs1, vl); } -vint16m4x2_t test_vlseg2e16_v_i16m4x2(const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_i16m4x2(base, vl); +vint16m4x2_t test_vlseg2e16_v_i16m4x2(const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_i16m4x2(rs1, vl); } -vuint16mf4x2_t test_vlseg2e16_v_u16mf4x2(const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_u16mf4x2(base, vl); +vuint16mf4x2_t test_vlseg2e16_v_u16mf4x2(const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_u16mf4x2(rs1, vl); } -vuint16mf2x2_t test_vlseg2e16_v_u16mf2x2(const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_u16mf2x2(base, vl); +vuint16mf2x2_t test_vlseg2e16_v_u16mf2x2(const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_u16mf2x2(rs1, vl); } -vuint16m1x2_t test_vlseg2e16_v_u16m1x2(const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_u16m1x2(base, vl); +vuint16m1x2_t test_vlseg2e16_v_u16m1x2(const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_u16m1x2(rs1, vl); } -vuint16m2x2_t test_vlseg2e16_v_u16m2x2(const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_u16m2x2(base, vl); +vuint16m2x2_t test_vlseg2e16_v_u16m2x2(const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_u16m2x2(rs1, vl); } -vuint16m4x2_t test_vlseg2e16_v_u16m4x2(const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_u16m4x2(base, vl); +vuint16m4x2_t test_vlseg2e16_v_u16m4x2(const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_u16m4x2(rs1, vl); } -vfloat16mf4x2_t test_vlseg2e16_v_f16mf4x2_m(vbool64_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_f16mf4x2_m(mask, base, vl); +vfloat16mf4x2_t test_vlseg2e16_v_f16mf4x2_m(vbool64_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_f16mf4x2_m(vm, rs1, vl); } -vfloat16mf2x2_t test_vlseg2e16_v_f16mf2x2_m(vbool32_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_f16mf2x2_m(mask, base, vl); +vfloat16mf2x2_t test_vlseg2e16_v_f16mf2x2_m(vbool32_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_f16mf2x2_m(vm, rs1, vl); } -vfloat16m1x2_t test_vlseg2e16_v_f16m1x2_m(vbool16_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_f16m1x2_m(mask, base, vl); +vfloat16m1x2_t test_vlseg2e16_v_f16m1x2_m(vbool16_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_f16m1x2_m(vm, rs1, vl); } -vfloat16m2x2_t test_vlseg2e16_v_f16m2x2_m(vbool8_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_f16m2x2_m(mask, base, vl); +vfloat16m2x2_t test_vlseg2e16_v_f16m2x2_m(vbool8_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_f16m2x2_m(vm, rs1, vl); } -vfloat16m4x2_t test_vlseg2e16_v_f16m4x2_m(vbool4_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_f16m4x2_m(mask, base, vl); +vfloat16m4x2_t test_vlseg2e16_v_f16m4x2_m(vbool4_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_f16m4x2_m(vm, rs1, vl); } -vint16mf4x2_t test_vlseg2e16_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_i16mf4x2_m(mask, base, vl); +vint16mf4x2_t test_vlseg2e16_v_i16mf4x2_m(vbool64_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_i16mf4x2_m(vm, rs1, vl); } -vint16mf2x2_t test_vlseg2e16_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_i16mf2x2_m(mask, base, vl); +vint16mf2x2_t test_vlseg2e16_v_i16mf2x2_m(vbool32_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_i16mf2x2_m(vm, rs1, vl); } -vint16m1x2_t test_vlseg2e16_v_i16m1x2_m(vbool16_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_i16m1x2_m(mask, base, vl); +vint16m1x2_t test_vlseg2e16_v_i16m1x2_m(vbool16_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_i16m1x2_m(vm, rs1, vl); } -vint16m2x2_t test_vlseg2e16_v_i16m2x2_m(vbool8_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_i16m2x2_m(mask, base, vl); +vint16m2x2_t test_vlseg2e16_v_i16m2x2_m(vbool8_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_i16m2x2_m(vm, rs1, vl); } -vint16m4x2_t test_vlseg2e16_v_i16m4x2_m(vbool4_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_i16m4x2_m(mask, base, vl); +vint16m4x2_t test_vlseg2e16_v_i16m4x2_m(vbool4_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_i16m4x2_m(vm, rs1, vl); } -vuint16mf4x2_t test_vlseg2e16_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_u16mf4x2_m(mask, base, vl); +vuint16mf4x2_t test_vlseg2e16_v_u16mf4x2_m(vbool64_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_u16mf4x2_m(vm, rs1, vl); } -vuint16mf2x2_t test_vlseg2e16_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_u16mf2x2_m(mask, base, vl); +vuint16mf2x2_t test_vlseg2e16_v_u16mf2x2_m(vbool32_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_u16mf2x2_m(vm, rs1, vl); } -vuint16m1x2_t test_vlseg2e16_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_u16m1x2_m(mask, base, vl); +vuint16m1x2_t test_vlseg2e16_v_u16m1x2_m(vbool16_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_u16m1x2_m(vm, rs1, vl); } -vuint16m2x2_t test_vlseg2e16_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_u16m2x2_m(mask, base, vl); +vuint16m2x2_t test_vlseg2e16_v_u16m2x2_m(vbool8_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_u16m2x2_m(vm, rs1, vl); } -vuint16m4x2_t test_vlseg2e16_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_u16m4x2_m(mask, base, vl); +vuint16m4x2_t test_vlseg2e16_v_u16m4x2_m(vbool4_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_u16m4x2_m(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg2e16\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg2e16ff.c b/auto-generated/gnu-api-tests/vlseg2e16ff.c index 895975837..f1c2b6647 100644 --- a/auto-generated/gnu-api-tests/vlseg2e16ff.c +++ b/auto-generated/gnu-api-tests/vlseg2e16ff.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2(const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_f16mf4x2(base, new_vl, vl); +vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2(const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_f16mf4x2(rs1, new_vl, vl); } -vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2(const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_f16mf2x2(base, new_vl, vl); +vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2(const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_f16mf2x2(rs1, new_vl, vl); } -vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2(const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_f16m1x2(base, new_vl, vl); +vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2(const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_f16m1x2(rs1, new_vl, vl); } -vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2(const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_f16m2x2(base, new_vl, vl); +vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2(const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_f16m2x2(rs1, new_vl, vl); } -vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2(const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_f16m4x2(base, new_vl, vl); +vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2(const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_f16m4x2(rs1, new_vl, vl); } -vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2(const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_i16mf4x2(base, new_vl, vl); +vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2(const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_i16mf4x2(rs1, new_vl, vl); } -vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2(const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_i16mf2x2(base, new_vl, vl); +vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2(const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_i16mf2x2(rs1, new_vl, vl); } -vint16m1x2_t test_vlseg2e16ff_v_i16m1x2(const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_i16m1x2(base, new_vl, vl); +vint16m1x2_t test_vlseg2e16ff_v_i16m1x2(const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_i16m1x2(rs1, new_vl, vl); } -vint16m2x2_t test_vlseg2e16ff_v_i16m2x2(const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_i16m2x2(base, new_vl, vl); +vint16m2x2_t test_vlseg2e16ff_v_i16m2x2(const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_i16m2x2(rs1, new_vl, vl); } -vint16m4x2_t test_vlseg2e16ff_v_i16m4x2(const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_i16m4x2(base, new_vl, vl); +vint16m4x2_t test_vlseg2e16ff_v_i16m4x2(const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_i16m4x2(rs1, new_vl, vl); } -vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2(const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_u16mf4x2(base, new_vl, vl); +vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2(const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_u16mf4x2(rs1, new_vl, vl); } -vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2(const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_u16mf2x2(base, new_vl, vl); +vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2(const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_u16mf2x2(rs1, new_vl, vl); } -vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2(const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_u16m1x2(base, new_vl, vl); +vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2(const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_u16m1x2(rs1, new_vl, vl); } -vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2(const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_u16m2x2(base, new_vl, vl); +vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2(const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_u16m2x2(rs1, new_vl, vl); } -vuint16m4x2_t test_vlseg2e16ff_v_u16m4x2(const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_u16m4x2(base, new_vl, vl); +vuint16m4x2_t test_vlseg2e16ff_v_u16m4x2(const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_u16m4x2(rs1, new_vl, vl); } -vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_m(vbool64_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_f16mf4x2_m(mask, base, new_vl, vl); +vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_m(vbool64_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_f16mf4x2_m(vm, rs1, new_vl, vl); } -vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_m(vbool32_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_f16mf2x2_m(mask, base, new_vl, vl); +vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_m(vbool32_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_f16mf2x2_m(vm, rs1, new_vl, vl); } -vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_m(vbool16_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_f16m1x2_m(mask, base, new_vl, vl); +vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_m(vbool16_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_f16m1x2_m(vm, rs1, new_vl, vl); } -vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_m(vbool8_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_f16m2x2_m(mask, base, new_vl, vl); +vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_m(vbool8_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_f16m2x2_m(vm, rs1, new_vl, vl); } -vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_m(vbool4_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_f16m4x2_m(mask, base, new_vl, vl); +vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_m(vbool4_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_f16m4x2_m(vm, rs1, new_vl, vl); } -vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_i16mf4x2_m(mask, base, new_vl, vl); +vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_m(vbool64_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_i16mf4x2_m(vm, rs1, new_vl, vl); } -vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_i16mf2x2_m(mask, base, new_vl, vl); +vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_m(vbool32_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_i16mf2x2_m(vm, rs1, new_vl, vl); } -vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_m(vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_i16m1x2_m(mask, base, new_vl, vl); +vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_m(vbool16_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_i16m1x2_m(vm, rs1, new_vl, vl); } -vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_m(vbool8_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_i16m2x2_m(mask, base, new_vl, vl); +vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_m(vbool8_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_i16m2x2_m(vm, rs1, new_vl, vl); } -vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_m(vbool4_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_i16m4x2_m(mask, base, new_vl, vl); +vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_m(vbool4_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_i16m4x2_m(vm, rs1, new_vl, vl); } -vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_u16mf4x2_m(mask, base, new_vl, vl); +vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_m(vbool64_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_u16mf4x2_m(vm, rs1, new_vl, vl); } -vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_u16mf2x2_m(mask, base, new_vl, vl); +vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_m(vbool32_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_u16mf2x2_m(vm, rs1, new_vl, vl); } -vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_u16m1x2_m(mask, base, new_vl, vl); +vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_m(vbool16_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_u16m1x2_m(vm, rs1, new_vl, vl); } -vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_u16m2x2_m(mask, base, new_vl, vl); +vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_m(vbool8_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_u16m2x2_m(vm, rs1, new_vl, vl); } -vuint16m4x2_t test_vlseg2e16ff_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_u16m4x2_m(mask, base, new_vl, vl); +vuint16m4x2_t test_vlseg2e16ff_v_u16m4x2_m(vbool4_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_u16m4x2_m(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg2e16ff\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg2e32.c b/auto-generated/gnu-api-tests/vlseg2e32.c index d8f6f32d3..e8aeb3e88 100644 --- a/auto-generated/gnu-api-tests/vlseg2e32.c +++ b/auto-generated/gnu-api-tests/vlseg2e32.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x2_t test_vlseg2e32_v_f32mf2x2(const float32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_f32mf2x2(base, vl); +vfloat32mf2x2_t test_vlseg2e32_v_f32mf2x2(const float32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_f32mf2x2(rs1, vl); } -vfloat32m1x2_t test_vlseg2e32_v_f32m1x2(const float32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_f32m1x2(base, vl); +vfloat32m1x2_t test_vlseg2e32_v_f32m1x2(const float32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_f32m1x2(rs1, vl); } -vfloat32m2x2_t test_vlseg2e32_v_f32m2x2(const float32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_f32m2x2(base, vl); +vfloat32m2x2_t test_vlseg2e32_v_f32m2x2(const float32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_f32m2x2(rs1, vl); } -vfloat32m4x2_t test_vlseg2e32_v_f32m4x2(const float32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_f32m4x2(base, vl); +vfloat32m4x2_t test_vlseg2e32_v_f32m4x2(const float32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_f32m4x2(rs1, vl); } -vint32mf2x2_t test_vlseg2e32_v_i32mf2x2(const int32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_i32mf2x2(base, vl); +vint32mf2x2_t test_vlseg2e32_v_i32mf2x2(const int32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_i32mf2x2(rs1, vl); } -vint32m1x2_t test_vlseg2e32_v_i32m1x2(const int32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_i32m1x2(base, vl); +vint32m1x2_t test_vlseg2e32_v_i32m1x2(const int32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_i32m1x2(rs1, vl); } -vint32m2x2_t test_vlseg2e32_v_i32m2x2(const int32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_i32m2x2(base, vl); +vint32m2x2_t test_vlseg2e32_v_i32m2x2(const int32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_i32m2x2(rs1, vl); } -vint32m4x2_t test_vlseg2e32_v_i32m4x2(const int32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_i32m4x2(base, vl); +vint32m4x2_t test_vlseg2e32_v_i32m4x2(const int32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_i32m4x2(rs1, vl); } -vuint32mf2x2_t test_vlseg2e32_v_u32mf2x2(const uint32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_u32mf2x2(base, vl); +vuint32mf2x2_t test_vlseg2e32_v_u32mf2x2(const uint32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_u32mf2x2(rs1, vl); } -vuint32m1x2_t test_vlseg2e32_v_u32m1x2(const uint32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_u32m1x2(base, vl); +vuint32m1x2_t test_vlseg2e32_v_u32m1x2(const uint32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_u32m1x2(rs1, vl); } -vuint32m2x2_t test_vlseg2e32_v_u32m2x2(const uint32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_u32m2x2(base, vl); +vuint32m2x2_t test_vlseg2e32_v_u32m2x2(const uint32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_u32m2x2(rs1, vl); } -vuint32m4x2_t test_vlseg2e32_v_u32m4x2(const uint32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_u32m4x2(base, vl); +vuint32m4x2_t test_vlseg2e32_v_u32m4x2(const uint32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_u32m4x2(rs1, vl); } -vfloat32mf2x2_t test_vlseg2e32_v_f32mf2x2_m(vbool64_t mask, const float32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_f32mf2x2_m(mask, base, vl); +vfloat32mf2x2_t test_vlseg2e32_v_f32mf2x2_m(vbool64_t vm, const float32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_f32mf2x2_m(vm, rs1, vl); } -vfloat32m1x2_t test_vlseg2e32_v_f32m1x2_m(vbool32_t mask, const float32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_f32m1x2_m(mask, base, vl); +vfloat32m1x2_t test_vlseg2e32_v_f32m1x2_m(vbool32_t vm, const float32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_f32m1x2_m(vm, rs1, vl); } -vfloat32m2x2_t test_vlseg2e32_v_f32m2x2_m(vbool16_t mask, const float32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_f32m2x2_m(mask, base, vl); +vfloat32m2x2_t test_vlseg2e32_v_f32m2x2_m(vbool16_t vm, const float32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_f32m2x2_m(vm, rs1, vl); } -vfloat32m4x2_t test_vlseg2e32_v_f32m4x2_m(vbool8_t mask, const float32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_f32m4x2_m(mask, base, vl); +vfloat32m4x2_t test_vlseg2e32_v_f32m4x2_m(vbool8_t vm, const float32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_f32m4x2_m(vm, rs1, vl); } -vint32mf2x2_t test_vlseg2e32_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_i32mf2x2_m(mask, base, vl); +vint32mf2x2_t test_vlseg2e32_v_i32mf2x2_m(vbool64_t vm, const int32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_i32mf2x2_m(vm, rs1, vl); } -vint32m1x2_t test_vlseg2e32_v_i32m1x2_m(vbool32_t mask, const int32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_i32m1x2_m(mask, base, vl); +vint32m1x2_t test_vlseg2e32_v_i32m1x2_m(vbool32_t vm, const int32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_i32m1x2_m(vm, rs1, vl); } -vint32m2x2_t test_vlseg2e32_v_i32m2x2_m(vbool16_t mask, const int32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_i32m2x2_m(mask, base, vl); +vint32m2x2_t test_vlseg2e32_v_i32m2x2_m(vbool16_t vm, const int32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_i32m2x2_m(vm, rs1, vl); } -vint32m4x2_t test_vlseg2e32_v_i32m4x2_m(vbool8_t mask, const int32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_i32m4x2_m(mask, base, vl); +vint32m4x2_t test_vlseg2e32_v_i32m4x2_m(vbool8_t vm, const int32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_i32m4x2_m(vm, rs1, vl); } -vuint32mf2x2_t test_vlseg2e32_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_u32mf2x2_m(mask, base, vl); +vuint32mf2x2_t test_vlseg2e32_v_u32mf2x2_m(vbool64_t vm, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_u32mf2x2_m(vm, rs1, vl); } -vuint32m1x2_t test_vlseg2e32_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_u32m1x2_m(mask, base, vl); +vuint32m1x2_t test_vlseg2e32_v_u32m1x2_m(vbool32_t vm, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_u32m1x2_m(vm, rs1, vl); } -vuint32m2x2_t test_vlseg2e32_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_u32m2x2_m(mask, base, vl); +vuint32m2x2_t test_vlseg2e32_v_u32m2x2_m(vbool16_t vm, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_u32m2x2_m(vm, rs1, vl); } -vuint32m4x2_t test_vlseg2e32_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_u32m4x2_m(mask, base, vl); +vuint32m4x2_t test_vlseg2e32_v_u32m4x2_m(vbool8_t vm, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_u32m4x2_m(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg2e32\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg2e32ff.c b/auto-generated/gnu-api-tests/vlseg2e32ff.c index 2be7dcc0d..670f2f7b3 100644 --- a/auto-generated/gnu-api-tests/vlseg2e32ff.c +++ b/auto-generated/gnu-api-tests/vlseg2e32ff.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2(const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_f32mf2x2(base, new_vl, vl); +vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2(const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_f32mf2x2(rs1, new_vl, vl); } -vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2(const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_f32m1x2(base, new_vl, vl); +vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2(const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_f32m1x2(rs1, new_vl, vl); } -vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2(const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_f32m2x2(base, new_vl, vl); +vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2(const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_f32m2x2(rs1, new_vl, vl); } -vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2(const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_f32m4x2(base, new_vl, vl); +vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2(const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_f32m4x2(rs1, new_vl, vl); } -vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2(const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_i32mf2x2(base, new_vl, vl); +vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2(const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_i32mf2x2(rs1, new_vl, vl); } -vint32m1x2_t test_vlseg2e32ff_v_i32m1x2(const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_i32m1x2(base, new_vl, vl); +vint32m1x2_t test_vlseg2e32ff_v_i32m1x2(const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_i32m1x2(rs1, new_vl, vl); } -vint32m2x2_t test_vlseg2e32ff_v_i32m2x2(const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_i32m2x2(base, new_vl, vl); +vint32m2x2_t test_vlseg2e32ff_v_i32m2x2(const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_i32m2x2(rs1, new_vl, vl); } -vint32m4x2_t test_vlseg2e32ff_v_i32m4x2(const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_i32m4x2(base, new_vl, vl); +vint32m4x2_t test_vlseg2e32ff_v_i32m4x2(const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_i32m4x2(rs1, new_vl, vl); } -vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2(const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_u32mf2x2(base, new_vl, vl); +vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2(const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_u32mf2x2(rs1, new_vl, vl); } -vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2(const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_u32m1x2(base, new_vl, vl); +vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2(const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_u32m1x2(rs1, new_vl, vl); } -vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2(const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_u32m2x2(base, new_vl, vl); +vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2(const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_u32m2x2(rs1, new_vl, vl); } -vuint32m4x2_t test_vlseg2e32ff_v_u32m4x2(const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_u32m4x2(base, new_vl, vl); +vuint32m4x2_t test_vlseg2e32ff_v_u32m4x2(const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_u32m4x2(rs1, new_vl, vl); } -vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_m(vbool64_t mask, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_f32mf2x2_m(mask, base, new_vl, vl); +vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_m(vbool64_t vm, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_f32mf2x2_m(vm, rs1, new_vl, vl); } -vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_m(vbool32_t mask, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_f32m1x2_m(mask, base, new_vl, vl); +vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_m(vbool32_t vm, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_f32m1x2_m(vm, rs1, new_vl, vl); } -vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_m(vbool16_t mask, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_f32m2x2_m(mask, base, new_vl, vl); +vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_m(vbool16_t vm, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_f32m2x2_m(vm, rs1, new_vl, vl); } -vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_m(vbool8_t mask, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_f32m4x2_m(mask, base, new_vl, vl); +vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_m(vbool8_t vm, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_f32m4x2_m(vm, rs1, new_vl, vl); } -vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_i32mf2x2_m(mask, base, new_vl, vl); +vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_m(vbool64_t vm, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_i32mf2x2_m(vm, rs1, new_vl, vl); } -vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_m(vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_i32m1x2_m(mask, base, new_vl, vl); +vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_m(vbool32_t vm, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_i32m1x2_m(vm, rs1, new_vl, vl); } -vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_m(vbool16_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_i32m2x2_m(mask, base, new_vl, vl); +vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_m(vbool16_t vm, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_i32m2x2_m(vm, rs1, new_vl, vl); } -vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_m(vbool8_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_i32m4x2_m(mask, base, new_vl, vl); +vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_m(vbool8_t vm, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_i32m4x2_m(vm, rs1, new_vl, vl); } -vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_u32mf2x2_m(mask, base, new_vl, vl); +vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_m(vbool64_t vm, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_u32mf2x2_m(vm, rs1, new_vl, vl); } -vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_u32m1x2_m(mask, base, new_vl, vl); +vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_m(vbool32_t vm, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_u32m1x2_m(vm, rs1, new_vl, vl); } -vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_u32m2x2_m(mask, base, new_vl, vl); +vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_m(vbool16_t vm, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_u32m2x2_m(vm, rs1, new_vl, vl); } -vuint32m4x2_t test_vlseg2e32ff_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_u32m4x2_m(mask, base, new_vl, vl); +vuint32m4x2_t test_vlseg2e32ff_v_u32m4x2_m(vbool8_t vm, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_u32m4x2_m(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg2e32ff\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg2e64.c b/auto-generated/gnu-api-tests/vlseg2e64.c index 749efda2b..03f8be812 100644 --- a/auto-generated/gnu-api-tests/vlseg2e64.c +++ b/auto-generated/gnu-api-tests/vlseg2e64.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x2_t test_vlseg2e64_v_f64m1x2(const float64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_f64m1x2(base, vl); +vfloat64m1x2_t test_vlseg2e64_v_f64m1x2(const float64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_f64m1x2(rs1, vl); } -vfloat64m2x2_t test_vlseg2e64_v_f64m2x2(const float64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_f64m2x2(base, vl); +vfloat64m2x2_t test_vlseg2e64_v_f64m2x2(const float64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_f64m2x2(rs1, vl); } -vfloat64m4x2_t test_vlseg2e64_v_f64m4x2(const float64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_f64m4x2(base, vl); +vfloat64m4x2_t test_vlseg2e64_v_f64m4x2(const float64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_f64m4x2(rs1, vl); } -vint64m1x2_t test_vlseg2e64_v_i64m1x2(const int64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_i64m1x2(base, vl); +vint64m1x2_t test_vlseg2e64_v_i64m1x2(const int64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_i64m1x2(rs1, vl); } -vint64m2x2_t test_vlseg2e64_v_i64m2x2(const int64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_i64m2x2(base, vl); +vint64m2x2_t test_vlseg2e64_v_i64m2x2(const int64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_i64m2x2(rs1, vl); } -vint64m4x2_t test_vlseg2e64_v_i64m4x2(const int64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_i64m4x2(base, vl); +vint64m4x2_t test_vlseg2e64_v_i64m4x2(const int64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_i64m4x2(rs1, vl); } -vuint64m1x2_t test_vlseg2e64_v_u64m1x2(const uint64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_u64m1x2(base, vl); +vuint64m1x2_t test_vlseg2e64_v_u64m1x2(const uint64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_u64m1x2(rs1, vl); } -vuint64m2x2_t test_vlseg2e64_v_u64m2x2(const uint64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_u64m2x2(base, vl); +vuint64m2x2_t test_vlseg2e64_v_u64m2x2(const uint64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_u64m2x2(rs1, vl); } -vuint64m4x2_t test_vlseg2e64_v_u64m4x2(const uint64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_u64m4x2(base, vl); +vuint64m4x2_t test_vlseg2e64_v_u64m4x2(const uint64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_u64m4x2(rs1, vl); } -vfloat64m1x2_t test_vlseg2e64_v_f64m1x2_m(vbool64_t mask, const float64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_f64m1x2_m(mask, base, vl); +vfloat64m1x2_t test_vlseg2e64_v_f64m1x2_m(vbool64_t vm, const float64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_f64m1x2_m(vm, rs1, vl); } -vfloat64m2x2_t test_vlseg2e64_v_f64m2x2_m(vbool32_t mask, const float64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_f64m2x2_m(mask, base, vl); +vfloat64m2x2_t test_vlseg2e64_v_f64m2x2_m(vbool32_t vm, const float64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_f64m2x2_m(vm, rs1, vl); } -vfloat64m4x2_t test_vlseg2e64_v_f64m4x2_m(vbool16_t mask, const float64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_f64m4x2_m(mask, base, vl); +vfloat64m4x2_t test_vlseg2e64_v_f64m4x2_m(vbool16_t vm, const float64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_f64m4x2_m(vm, rs1, vl); } -vint64m1x2_t test_vlseg2e64_v_i64m1x2_m(vbool64_t mask, const int64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_i64m1x2_m(mask, base, vl); +vint64m1x2_t test_vlseg2e64_v_i64m1x2_m(vbool64_t vm, const int64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_i64m1x2_m(vm, rs1, vl); } -vint64m2x2_t test_vlseg2e64_v_i64m2x2_m(vbool32_t mask, const int64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_i64m2x2_m(mask, base, vl); +vint64m2x2_t test_vlseg2e64_v_i64m2x2_m(vbool32_t vm, const int64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_i64m2x2_m(vm, rs1, vl); } -vint64m4x2_t test_vlseg2e64_v_i64m4x2_m(vbool16_t mask, const int64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_i64m4x2_m(mask, base, vl); +vint64m4x2_t test_vlseg2e64_v_i64m4x2_m(vbool16_t vm, const int64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_i64m4x2_m(vm, rs1, vl); } -vuint64m1x2_t test_vlseg2e64_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_u64m1x2_m(mask, base, vl); +vuint64m1x2_t test_vlseg2e64_v_u64m1x2_m(vbool64_t vm, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_u64m1x2_m(vm, rs1, vl); } -vuint64m2x2_t test_vlseg2e64_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_u64m2x2_m(mask, base, vl); +vuint64m2x2_t test_vlseg2e64_v_u64m2x2_m(vbool32_t vm, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_u64m2x2_m(vm, rs1, vl); } -vuint64m4x2_t test_vlseg2e64_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_u64m4x2_m(mask, base, vl); +vuint64m4x2_t test_vlseg2e64_v_u64m4x2_m(vbool16_t vm, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_u64m4x2_m(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg2e64\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg2e64ff.c b/auto-generated/gnu-api-tests/vlseg2e64ff.c index 597e19cc3..a101e0a09 100644 --- a/auto-generated/gnu-api-tests/vlseg2e64ff.c +++ b/auto-generated/gnu-api-tests/vlseg2e64ff.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2(const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_f64m1x2(base, new_vl, vl); +vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2(const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_f64m1x2(rs1, new_vl, vl); } -vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2(const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_f64m2x2(base, new_vl, vl); +vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2(const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_f64m2x2(rs1, new_vl, vl); } -vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2(const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_f64m4x2(base, new_vl, vl); +vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2(const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_f64m4x2(rs1, new_vl, vl); } -vint64m1x2_t test_vlseg2e64ff_v_i64m1x2(const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_i64m1x2(base, new_vl, vl); +vint64m1x2_t test_vlseg2e64ff_v_i64m1x2(const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_i64m1x2(rs1, new_vl, vl); } -vint64m2x2_t test_vlseg2e64ff_v_i64m2x2(const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_i64m2x2(base, new_vl, vl); +vint64m2x2_t test_vlseg2e64ff_v_i64m2x2(const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_i64m2x2(rs1, new_vl, vl); } -vint64m4x2_t test_vlseg2e64ff_v_i64m4x2(const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_i64m4x2(base, new_vl, vl); +vint64m4x2_t test_vlseg2e64ff_v_i64m4x2(const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_i64m4x2(rs1, new_vl, vl); } -vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2(const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_u64m1x2(base, new_vl, vl); +vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2(const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_u64m1x2(rs1, new_vl, vl); } -vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2(const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_u64m2x2(base, new_vl, vl); +vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2(const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_u64m2x2(rs1, new_vl, vl); } -vuint64m4x2_t test_vlseg2e64ff_v_u64m4x2(const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_u64m4x2(base, new_vl, vl); +vuint64m4x2_t test_vlseg2e64ff_v_u64m4x2(const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_u64m4x2(rs1, new_vl, vl); } -vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_m(vbool64_t mask, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_f64m1x2_m(mask, base, new_vl, vl); +vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_m(vbool64_t vm, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_f64m1x2_m(vm, rs1, new_vl, vl); } -vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_m(vbool32_t mask, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_f64m2x2_m(mask, base, new_vl, vl); +vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_m(vbool32_t vm, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_f64m2x2_m(vm, rs1, new_vl, vl); } -vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_m(vbool16_t mask, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_f64m4x2_m(mask, base, new_vl, vl); +vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_m(vbool16_t vm, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_f64m4x2_m(vm, rs1, new_vl, vl); } -vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_m(vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_i64m1x2_m(mask, base, new_vl, vl); +vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_m(vbool64_t vm, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_i64m1x2_m(vm, rs1, new_vl, vl); } -vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_m(vbool32_t mask, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_i64m2x2_m(mask, base, new_vl, vl); +vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_m(vbool32_t vm, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_i64m2x2_m(vm, rs1, new_vl, vl); } -vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_m(vbool16_t mask, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_i64m4x2_m(mask, base, new_vl, vl); +vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_m(vbool16_t vm, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_i64m4x2_m(vm, rs1, new_vl, vl); } -vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_u64m1x2_m(mask, base, new_vl, vl); +vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_m(vbool64_t vm, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_u64m1x2_m(vm, rs1, new_vl, vl); } -vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_u64m2x2_m(mask, base, new_vl, vl); +vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_m(vbool32_t vm, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_u64m2x2_m(vm, rs1, new_vl, vl); } -vuint64m4x2_t test_vlseg2e64ff_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_u64m4x2_m(mask, base, new_vl, vl); +vuint64m4x2_t test_vlseg2e64ff_v_u64m4x2_m(vbool16_t vm, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_u64m4x2_m(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg2e64ff\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg2e8.c b/auto-generated/gnu-api-tests/vlseg2e8.c index 9708ccb98..0714e74e0 100644 --- a/auto-generated/gnu-api-tests/vlseg2e8.c +++ b/auto-generated/gnu-api-tests/vlseg2e8.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x2_t test_vlseg2e8_v_i8mf8x2(const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_i8mf8x2(base, vl); +vint8mf8x2_t test_vlseg2e8_v_i8mf8x2(const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_i8mf8x2(rs1, vl); } -vint8mf4x2_t test_vlseg2e8_v_i8mf4x2(const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_i8mf4x2(base, vl); +vint8mf4x2_t test_vlseg2e8_v_i8mf4x2(const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_i8mf4x2(rs1, vl); } -vint8mf2x2_t test_vlseg2e8_v_i8mf2x2(const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_i8mf2x2(base, vl); +vint8mf2x2_t test_vlseg2e8_v_i8mf2x2(const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_i8mf2x2(rs1, vl); } -vint8m1x2_t test_vlseg2e8_v_i8m1x2(const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_i8m1x2(base, vl); +vint8m1x2_t test_vlseg2e8_v_i8m1x2(const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_i8m1x2(rs1, vl); } -vint8m2x2_t test_vlseg2e8_v_i8m2x2(const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_i8m2x2(base, vl); +vint8m2x2_t test_vlseg2e8_v_i8m2x2(const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_i8m2x2(rs1, vl); } -vint8m4x2_t test_vlseg2e8_v_i8m4x2(const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_i8m4x2(base, vl); +vint8m4x2_t test_vlseg2e8_v_i8m4x2(const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_i8m4x2(rs1, vl); } -vuint8mf8x2_t test_vlseg2e8_v_u8mf8x2(const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_u8mf8x2(base, vl); +vuint8mf8x2_t test_vlseg2e8_v_u8mf8x2(const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_u8mf8x2(rs1, vl); } -vuint8mf4x2_t test_vlseg2e8_v_u8mf4x2(const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_u8mf4x2(base, vl); +vuint8mf4x2_t test_vlseg2e8_v_u8mf4x2(const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_u8mf4x2(rs1, vl); } -vuint8mf2x2_t test_vlseg2e8_v_u8mf2x2(const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_u8mf2x2(base, vl); +vuint8mf2x2_t test_vlseg2e8_v_u8mf2x2(const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_u8mf2x2(rs1, vl); } -vuint8m1x2_t test_vlseg2e8_v_u8m1x2(const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_u8m1x2(base, vl); +vuint8m1x2_t test_vlseg2e8_v_u8m1x2(const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_u8m1x2(rs1, vl); } -vuint8m2x2_t test_vlseg2e8_v_u8m2x2(const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_u8m2x2(base, vl); +vuint8m2x2_t test_vlseg2e8_v_u8m2x2(const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_u8m2x2(rs1, vl); } -vuint8m4x2_t test_vlseg2e8_v_u8m4x2(const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_u8m4x2(base, vl); +vuint8m4x2_t test_vlseg2e8_v_u8m4x2(const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_u8m4x2(rs1, vl); } -vint8mf8x2_t test_vlseg2e8_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_i8mf8x2_m(mask, base, vl); +vint8mf8x2_t test_vlseg2e8_v_i8mf8x2_m(vbool64_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_i8mf8x2_m(vm, rs1, vl); } -vint8mf4x2_t test_vlseg2e8_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_i8mf4x2_m(mask, base, vl); +vint8mf4x2_t test_vlseg2e8_v_i8mf4x2_m(vbool32_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_i8mf4x2_m(vm, rs1, vl); } -vint8mf2x2_t test_vlseg2e8_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_i8mf2x2_m(mask, base, vl); +vint8mf2x2_t test_vlseg2e8_v_i8mf2x2_m(vbool16_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_i8mf2x2_m(vm, rs1, vl); } -vint8m1x2_t test_vlseg2e8_v_i8m1x2_m(vbool8_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_i8m1x2_m(mask, base, vl); +vint8m1x2_t test_vlseg2e8_v_i8m1x2_m(vbool8_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_i8m1x2_m(vm, rs1, vl); } -vint8m2x2_t test_vlseg2e8_v_i8m2x2_m(vbool4_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_i8m2x2_m(mask, base, vl); +vint8m2x2_t test_vlseg2e8_v_i8m2x2_m(vbool4_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_i8m2x2_m(vm, rs1, vl); } -vint8m4x2_t test_vlseg2e8_v_i8m4x2_m(vbool2_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_i8m4x2_m(mask, base, vl); +vint8m4x2_t test_vlseg2e8_v_i8m4x2_m(vbool2_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_i8m4x2_m(vm, rs1, vl); } -vuint8mf8x2_t test_vlseg2e8_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_u8mf8x2_m(mask, base, vl); +vuint8mf8x2_t test_vlseg2e8_v_u8mf8x2_m(vbool64_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_u8mf8x2_m(vm, rs1, vl); } -vuint8mf4x2_t test_vlseg2e8_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_u8mf4x2_m(mask, base, vl); +vuint8mf4x2_t test_vlseg2e8_v_u8mf4x2_m(vbool32_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_u8mf4x2_m(vm, rs1, vl); } -vuint8mf2x2_t test_vlseg2e8_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_u8mf2x2_m(mask, base, vl); +vuint8mf2x2_t test_vlseg2e8_v_u8mf2x2_m(vbool16_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_u8mf2x2_m(vm, rs1, vl); } -vuint8m1x2_t test_vlseg2e8_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_u8m1x2_m(mask, base, vl); +vuint8m1x2_t test_vlseg2e8_v_u8m1x2_m(vbool8_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_u8m1x2_m(vm, rs1, vl); } -vuint8m2x2_t test_vlseg2e8_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_u8m2x2_m(mask, base, vl); +vuint8m2x2_t test_vlseg2e8_v_u8m2x2_m(vbool4_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_u8m2x2_m(vm, rs1, vl); } -vuint8m4x2_t test_vlseg2e8_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_u8m4x2_m(mask, base, vl); +vuint8m4x2_t test_vlseg2e8_v_u8m4x2_m(vbool2_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_u8m4x2_m(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg2e8\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg2e8ff.c b/auto-generated/gnu-api-tests/vlseg2e8ff.c index 9f8cc2eb7..acc494bbd 100644 --- a/auto-generated/gnu-api-tests/vlseg2e8ff.c +++ b/auto-generated/gnu-api-tests/vlseg2e8ff.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2(const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_i8mf8x2(base, new_vl, vl); +vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2(const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_i8mf8x2(rs1, new_vl, vl); } -vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2(const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_i8mf4x2(base, new_vl, vl); +vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2(const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_i8mf4x2(rs1, new_vl, vl); } -vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2(const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_i8mf2x2(base, new_vl, vl); +vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2(const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_i8mf2x2(rs1, new_vl, vl); } -vint8m1x2_t test_vlseg2e8ff_v_i8m1x2(const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_i8m1x2(base, new_vl, vl); +vint8m1x2_t test_vlseg2e8ff_v_i8m1x2(const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_i8m1x2(rs1, new_vl, vl); } -vint8m2x2_t test_vlseg2e8ff_v_i8m2x2(const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_i8m2x2(base, new_vl, vl); +vint8m2x2_t test_vlseg2e8ff_v_i8m2x2(const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_i8m2x2(rs1, new_vl, vl); } -vint8m4x2_t test_vlseg2e8ff_v_i8m4x2(const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_i8m4x2(base, new_vl, vl); +vint8m4x2_t test_vlseg2e8ff_v_i8m4x2(const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_i8m4x2(rs1, new_vl, vl); } -vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2(const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_u8mf8x2(base, new_vl, vl); +vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2(const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_u8mf8x2(rs1, new_vl, vl); } -vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2(const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_u8mf4x2(base, new_vl, vl); +vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2(const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_u8mf4x2(rs1, new_vl, vl); } -vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2(const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_u8mf2x2(base, new_vl, vl); +vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2(const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_u8mf2x2(rs1, new_vl, vl); } -vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2(const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_u8m1x2(base, new_vl, vl); +vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2(const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_u8m1x2(rs1, new_vl, vl); } -vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2(const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_u8m2x2(base, new_vl, vl); +vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2(const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_u8m2x2(rs1, new_vl, vl); } -vuint8m4x2_t test_vlseg2e8ff_v_u8m4x2(const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_u8m4x2(base, new_vl, vl); +vuint8m4x2_t test_vlseg2e8ff_v_u8m4x2(const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_u8m4x2(rs1, new_vl, vl); } -vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_i8mf8x2_m(mask, base, new_vl, vl); +vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_m(vbool64_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_i8mf8x2_m(vm, rs1, new_vl, vl); } -vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_i8mf4x2_m(mask, base, new_vl, vl); +vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_m(vbool32_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_i8mf4x2_m(vm, rs1, new_vl, vl); } -vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_i8mf2x2_m(mask, base, new_vl, vl); +vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_m(vbool16_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_i8mf2x2_m(vm, rs1, new_vl, vl); } -vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_m(vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_i8m1x2_m(mask, base, new_vl, vl); +vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_m(vbool8_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_i8m1x2_m(vm, rs1, new_vl, vl); } -vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_m(vbool4_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_i8m2x2_m(mask, base, new_vl, vl); +vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_m(vbool4_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_i8m2x2_m(vm, rs1, new_vl, vl); } -vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_m(vbool2_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_i8m4x2_m(mask, base, new_vl, vl); +vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_m(vbool2_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_i8m4x2_m(vm, rs1, new_vl, vl); } -vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_u8mf8x2_m(mask, base, new_vl, vl); +vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_m(vbool64_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_u8mf8x2_m(vm, rs1, new_vl, vl); } -vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_u8mf4x2_m(mask, base, new_vl, vl); +vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_m(vbool32_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_u8mf4x2_m(vm, rs1, new_vl, vl); } -vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_u8mf2x2_m(mask, base, new_vl, vl); +vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_m(vbool16_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_u8mf2x2_m(vm, rs1, new_vl, vl); } -vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_u8m1x2_m(mask, base, new_vl, vl); +vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_m(vbool8_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_u8m1x2_m(vm, rs1, new_vl, vl); } -vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_u8m2x2_m(mask, base, new_vl, vl); +vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_m(vbool4_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_u8m2x2_m(vm, rs1, new_vl, vl); } -vuint8m4x2_t test_vlseg2e8ff_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_u8m4x2_m(mask, base, new_vl, vl); +vuint8m4x2_t test_vlseg2e8ff_v_u8m4x2_m(vbool2_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_u8m4x2_m(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg2e8ff\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg3e16.c b/auto-generated/gnu-api-tests/vlseg3e16.c index 0df5c5da2..91d823cce 100644 --- a/auto-generated/gnu-api-tests/vlseg3e16.c +++ b/auto-generated/gnu-api-tests/vlseg3e16.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x3_t test_vlseg3e16_v_f16mf4x3(const float16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_f16mf4x3(base, vl); +vfloat16mf4x3_t test_vlseg3e16_v_f16mf4x3(const float16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_f16mf4x3(rs1, vl); } -vfloat16mf2x3_t test_vlseg3e16_v_f16mf2x3(const float16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_f16mf2x3(base, vl); +vfloat16mf2x3_t test_vlseg3e16_v_f16mf2x3(const float16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_f16mf2x3(rs1, vl); } -vfloat16m1x3_t test_vlseg3e16_v_f16m1x3(const float16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_f16m1x3(base, vl); +vfloat16m1x3_t test_vlseg3e16_v_f16m1x3(const float16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_f16m1x3(rs1, vl); } -vfloat16m2x3_t test_vlseg3e16_v_f16m2x3(const float16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_f16m2x3(base, vl); +vfloat16m2x3_t test_vlseg3e16_v_f16m2x3(const float16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_f16m2x3(rs1, vl); } -vint16mf4x3_t test_vlseg3e16_v_i16mf4x3(const int16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_i16mf4x3(base, vl); +vint16mf4x3_t test_vlseg3e16_v_i16mf4x3(const int16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_i16mf4x3(rs1, vl); } -vint16mf2x3_t test_vlseg3e16_v_i16mf2x3(const int16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_i16mf2x3(base, vl); +vint16mf2x3_t test_vlseg3e16_v_i16mf2x3(const int16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_i16mf2x3(rs1, vl); } -vint16m1x3_t test_vlseg3e16_v_i16m1x3(const int16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_i16m1x3(base, vl); +vint16m1x3_t test_vlseg3e16_v_i16m1x3(const int16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_i16m1x3(rs1, vl); } -vint16m2x3_t test_vlseg3e16_v_i16m2x3(const int16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_i16m2x3(base, vl); +vint16m2x3_t test_vlseg3e16_v_i16m2x3(const int16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_i16m2x3(rs1, vl); } -vuint16mf4x3_t test_vlseg3e16_v_u16mf4x3(const uint16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_u16mf4x3(base, vl); +vuint16mf4x3_t test_vlseg3e16_v_u16mf4x3(const uint16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_u16mf4x3(rs1, vl); } -vuint16mf2x3_t test_vlseg3e16_v_u16mf2x3(const uint16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_u16mf2x3(base, vl); +vuint16mf2x3_t test_vlseg3e16_v_u16mf2x3(const uint16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_u16mf2x3(rs1, vl); } -vuint16m1x3_t test_vlseg3e16_v_u16m1x3(const uint16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_u16m1x3(base, vl); +vuint16m1x3_t test_vlseg3e16_v_u16m1x3(const uint16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_u16m1x3(rs1, vl); } -vuint16m2x3_t test_vlseg3e16_v_u16m2x3(const uint16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_u16m2x3(base, vl); +vuint16m2x3_t test_vlseg3e16_v_u16m2x3(const uint16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_u16m2x3(rs1, vl); } -vfloat16mf4x3_t test_vlseg3e16_v_f16mf4x3_m(vbool64_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_f16mf4x3_m(mask, base, vl); +vfloat16mf4x3_t test_vlseg3e16_v_f16mf4x3_m(vbool64_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_f16mf4x3_m(vm, rs1, vl); } -vfloat16mf2x3_t test_vlseg3e16_v_f16mf2x3_m(vbool32_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_f16mf2x3_m(mask, base, vl); +vfloat16mf2x3_t test_vlseg3e16_v_f16mf2x3_m(vbool32_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_f16mf2x3_m(vm, rs1, vl); } -vfloat16m1x3_t test_vlseg3e16_v_f16m1x3_m(vbool16_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_f16m1x3_m(mask, base, vl); +vfloat16m1x3_t test_vlseg3e16_v_f16m1x3_m(vbool16_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_f16m1x3_m(vm, rs1, vl); } -vfloat16m2x3_t test_vlseg3e16_v_f16m2x3_m(vbool8_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_f16m2x3_m(mask, base, vl); +vfloat16m2x3_t test_vlseg3e16_v_f16m2x3_m(vbool8_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_f16m2x3_m(vm, rs1, vl); } -vint16mf4x3_t test_vlseg3e16_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_i16mf4x3_m(mask, base, vl); +vint16mf4x3_t test_vlseg3e16_v_i16mf4x3_m(vbool64_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_i16mf4x3_m(vm, rs1, vl); } -vint16mf2x3_t test_vlseg3e16_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_i16mf2x3_m(mask, base, vl); +vint16mf2x3_t test_vlseg3e16_v_i16mf2x3_m(vbool32_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_i16mf2x3_m(vm, rs1, vl); } -vint16m1x3_t test_vlseg3e16_v_i16m1x3_m(vbool16_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_i16m1x3_m(mask, base, vl); +vint16m1x3_t test_vlseg3e16_v_i16m1x3_m(vbool16_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_i16m1x3_m(vm, rs1, vl); } -vint16m2x3_t test_vlseg3e16_v_i16m2x3_m(vbool8_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_i16m2x3_m(mask, base, vl); +vint16m2x3_t test_vlseg3e16_v_i16m2x3_m(vbool8_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_i16m2x3_m(vm, rs1, vl); } -vuint16mf4x3_t test_vlseg3e16_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_u16mf4x3_m(mask, base, vl); +vuint16mf4x3_t test_vlseg3e16_v_u16mf4x3_m(vbool64_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_u16mf4x3_m(vm, rs1, vl); } -vuint16mf2x3_t test_vlseg3e16_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_u16mf2x3_m(mask, base, vl); +vuint16mf2x3_t test_vlseg3e16_v_u16mf2x3_m(vbool32_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_u16mf2x3_m(vm, rs1, vl); } -vuint16m1x3_t test_vlseg3e16_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_u16m1x3_m(mask, base, vl); +vuint16m1x3_t test_vlseg3e16_v_u16m1x3_m(vbool16_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_u16m1x3_m(vm, rs1, vl); } -vuint16m2x3_t test_vlseg3e16_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_u16m2x3_m(mask, base, vl); +vuint16m2x3_t test_vlseg3e16_v_u16m2x3_m(vbool8_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_u16m2x3_m(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg3e16\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg3e16ff.c b/auto-generated/gnu-api-tests/vlseg3e16ff.c index 875ef9ace..0d4fc9824 100644 --- a/auto-generated/gnu-api-tests/vlseg3e16ff.c +++ b/auto-generated/gnu-api-tests/vlseg3e16ff.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3(const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_f16mf4x3(base, new_vl, vl); +vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3(const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_f16mf4x3(rs1, new_vl, vl); } -vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3(const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_f16mf2x3(base, new_vl, vl); +vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3(const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_f16mf2x3(rs1, new_vl, vl); } -vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3(const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_f16m1x3(base, new_vl, vl); +vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3(const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_f16m1x3(rs1, new_vl, vl); } -vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3(const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_f16m2x3(base, new_vl, vl); +vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3(const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_f16m2x3(rs1, new_vl, vl); } -vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3(const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_i16mf4x3(base, new_vl, vl); +vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3(const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_i16mf4x3(rs1, new_vl, vl); } -vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3(const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_i16mf2x3(base, new_vl, vl); +vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3(const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_i16mf2x3(rs1, new_vl, vl); } -vint16m1x3_t test_vlseg3e16ff_v_i16m1x3(const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_i16m1x3(base, new_vl, vl); +vint16m1x3_t test_vlseg3e16ff_v_i16m1x3(const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_i16m1x3(rs1, new_vl, vl); } -vint16m2x3_t test_vlseg3e16ff_v_i16m2x3(const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_i16m2x3(base, new_vl, vl); +vint16m2x3_t test_vlseg3e16ff_v_i16m2x3(const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_i16m2x3(rs1, new_vl, vl); } -vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3(const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_u16mf4x3(base, new_vl, vl); +vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3(const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_u16mf4x3(rs1, new_vl, vl); } -vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3(const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_u16mf2x3(base, new_vl, vl); +vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3(const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_u16mf2x3(rs1, new_vl, vl); } -vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3(const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_u16m1x3(base, new_vl, vl); +vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3(const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_u16m1x3(rs1, new_vl, vl); } -vuint16m2x3_t test_vlseg3e16ff_v_u16m2x3(const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_u16m2x3(base, new_vl, vl); +vuint16m2x3_t test_vlseg3e16ff_v_u16m2x3(const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_u16m2x3(rs1, new_vl, vl); } -vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_m(vbool64_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_f16mf4x3_m(mask, base, new_vl, vl); +vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_m(vbool64_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_f16mf4x3_m(vm, rs1, new_vl, vl); } -vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_m(vbool32_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_f16mf2x3_m(mask, base, new_vl, vl); +vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_m(vbool32_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_f16mf2x3_m(vm, rs1, new_vl, vl); } -vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_m(vbool16_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_f16m1x3_m(mask, base, new_vl, vl); +vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_m(vbool16_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_f16m1x3_m(vm, rs1, new_vl, vl); } -vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_m(vbool8_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_f16m2x3_m(mask, base, new_vl, vl); +vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_m(vbool8_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_f16m2x3_m(vm, rs1, new_vl, vl); } -vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_i16mf4x3_m(mask, base, new_vl, vl); +vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_m(vbool64_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_i16mf4x3_m(vm, rs1, new_vl, vl); } -vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_i16mf2x3_m(mask, base, new_vl, vl); +vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_m(vbool32_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_i16mf2x3_m(vm, rs1, new_vl, vl); } -vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_m(vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_i16m1x3_m(mask, base, new_vl, vl); +vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_m(vbool16_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_i16m1x3_m(vm, rs1, new_vl, vl); } -vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_m(vbool8_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_i16m2x3_m(mask, base, new_vl, vl); +vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_m(vbool8_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_i16m2x3_m(vm, rs1, new_vl, vl); } -vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_u16mf4x3_m(mask, base, new_vl, vl); +vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_m(vbool64_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_u16mf4x3_m(vm, rs1, new_vl, vl); } -vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_u16mf2x3_m(mask, base, new_vl, vl); +vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_m(vbool32_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_u16mf2x3_m(vm, rs1, new_vl, vl); } -vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_u16m1x3_m(mask, base, new_vl, vl); +vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_m(vbool16_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_u16m1x3_m(vm, rs1, new_vl, vl); } -vuint16m2x3_t test_vlseg3e16ff_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_u16m2x3_m(mask, base, new_vl, vl); +vuint16m2x3_t test_vlseg3e16ff_v_u16m2x3_m(vbool8_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_u16m2x3_m(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg3e16ff\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg3e32.c b/auto-generated/gnu-api-tests/vlseg3e32.c index 473105354..62cf9c204 100644 --- a/auto-generated/gnu-api-tests/vlseg3e32.c +++ b/auto-generated/gnu-api-tests/vlseg3e32.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x3_t test_vlseg3e32_v_f32mf2x3(const float32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_f32mf2x3(base, vl); +vfloat32mf2x3_t test_vlseg3e32_v_f32mf2x3(const float32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_f32mf2x3(rs1, vl); } -vfloat32m1x3_t test_vlseg3e32_v_f32m1x3(const float32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_f32m1x3(base, vl); +vfloat32m1x3_t test_vlseg3e32_v_f32m1x3(const float32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_f32m1x3(rs1, vl); } -vfloat32m2x3_t test_vlseg3e32_v_f32m2x3(const float32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_f32m2x3(base, vl); +vfloat32m2x3_t test_vlseg3e32_v_f32m2x3(const float32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_f32m2x3(rs1, vl); } -vint32mf2x3_t test_vlseg3e32_v_i32mf2x3(const int32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_i32mf2x3(base, vl); +vint32mf2x3_t test_vlseg3e32_v_i32mf2x3(const int32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_i32mf2x3(rs1, vl); } -vint32m1x3_t test_vlseg3e32_v_i32m1x3(const int32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_i32m1x3(base, vl); +vint32m1x3_t test_vlseg3e32_v_i32m1x3(const int32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_i32m1x3(rs1, vl); } -vint32m2x3_t test_vlseg3e32_v_i32m2x3(const int32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_i32m2x3(base, vl); +vint32m2x3_t test_vlseg3e32_v_i32m2x3(const int32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_i32m2x3(rs1, vl); } -vuint32mf2x3_t test_vlseg3e32_v_u32mf2x3(const uint32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_u32mf2x3(base, vl); +vuint32mf2x3_t test_vlseg3e32_v_u32mf2x3(const uint32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_u32mf2x3(rs1, vl); } -vuint32m1x3_t test_vlseg3e32_v_u32m1x3(const uint32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_u32m1x3(base, vl); +vuint32m1x3_t test_vlseg3e32_v_u32m1x3(const uint32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_u32m1x3(rs1, vl); } -vuint32m2x3_t test_vlseg3e32_v_u32m2x3(const uint32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_u32m2x3(base, vl); +vuint32m2x3_t test_vlseg3e32_v_u32m2x3(const uint32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_u32m2x3(rs1, vl); } -vfloat32mf2x3_t test_vlseg3e32_v_f32mf2x3_m(vbool64_t mask, const float32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_f32mf2x3_m(mask, base, vl); +vfloat32mf2x3_t test_vlseg3e32_v_f32mf2x3_m(vbool64_t vm, const float32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_f32mf2x3_m(vm, rs1, vl); } -vfloat32m1x3_t test_vlseg3e32_v_f32m1x3_m(vbool32_t mask, const float32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_f32m1x3_m(mask, base, vl); +vfloat32m1x3_t test_vlseg3e32_v_f32m1x3_m(vbool32_t vm, const float32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_f32m1x3_m(vm, rs1, vl); } -vfloat32m2x3_t test_vlseg3e32_v_f32m2x3_m(vbool16_t mask, const float32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_f32m2x3_m(mask, base, vl); +vfloat32m2x3_t test_vlseg3e32_v_f32m2x3_m(vbool16_t vm, const float32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_f32m2x3_m(vm, rs1, vl); } -vint32mf2x3_t test_vlseg3e32_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_i32mf2x3_m(mask, base, vl); +vint32mf2x3_t test_vlseg3e32_v_i32mf2x3_m(vbool64_t vm, const int32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_i32mf2x3_m(vm, rs1, vl); } -vint32m1x3_t test_vlseg3e32_v_i32m1x3_m(vbool32_t mask, const int32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_i32m1x3_m(mask, base, vl); +vint32m1x3_t test_vlseg3e32_v_i32m1x3_m(vbool32_t vm, const int32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_i32m1x3_m(vm, rs1, vl); } -vint32m2x3_t test_vlseg3e32_v_i32m2x3_m(vbool16_t mask, const int32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_i32m2x3_m(mask, base, vl); +vint32m2x3_t test_vlseg3e32_v_i32m2x3_m(vbool16_t vm, const int32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_i32m2x3_m(vm, rs1, vl); } -vuint32mf2x3_t test_vlseg3e32_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_u32mf2x3_m(mask, base, vl); +vuint32mf2x3_t test_vlseg3e32_v_u32mf2x3_m(vbool64_t vm, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_u32mf2x3_m(vm, rs1, vl); } -vuint32m1x3_t test_vlseg3e32_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_u32m1x3_m(mask, base, vl); +vuint32m1x3_t test_vlseg3e32_v_u32m1x3_m(vbool32_t vm, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_u32m1x3_m(vm, rs1, vl); } -vuint32m2x3_t test_vlseg3e32_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_u32m2x3_m(mask, base, vl); +vuint32m2x3_t test_vlseg3e32_v_u32m2x3_m(vbool16_t vm, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_u32m2x3_m(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg3e32\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg3e32ff.c b/auto-generated/gnu-api-tests/vlseg3e32ff.c index 51bf7f462..d07eda5ce 100644 --- a/auto-generated/gnu-api-tests/vlseg3e32ff.c +++ b/auto-generated/gnu-api-tests/vlseg3e32ff.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3(const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_f32mf2x3(base, new_vl, vl); +vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3(const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_f32mf2x3(rs1, new_vl, vl); } -vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3(const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_f32m1x3(base, new_vl, vl); +vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3(const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_f32m1x3(rs1, new_vl, vl); } -vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3(const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_f32m2x3(base, new_vl, vl); +vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3(const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_f32m2x3(rs1, new_vl, vl); } -vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3(const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_i32mf2x3(base, new_vl, vl); +vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3(const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_i32mf2x3(rs1, new_vl, vl); } -vint32m1x3_t test_vlseg3e32ff_v_i32m1x3(const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_i32m1x3(base, new_vl, vl); +vint32m1x3_t test_vlseg3e32ff_v_i32m1x3(const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_i32m1x3(rs1, new_vl, vl); } -vint32m2x3_t test_vlseg3e32ff_v_i32m2x3(const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_i32m2x3(base, new_vl, vl); +vint32m2x3_t test_vlseg3e32ff_v_i32m2x3(const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_i32m2x3(rs1, new_vl, vl); } -vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3(const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_u32mf2x3(base, new_vl, vl); +vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3(const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_u32mf2x3(rs1, new_vl, vl); } -vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3(const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_u32m1x3(base, new_vl, vl); +vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3(const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_u32m1x3(rs1, new_vl, vl); } -vuint32m2x3_t test_vlseg3e32ff_v_u32m2x3(const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_u32m2x3(base, new_vl, vl); +vuint32m2x3_t test_vlseg3e32ff_v_u32m2x3(const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_u32m2x3(rs1, new_vl, vl); } -vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_m(vbool64_t mask, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_f32mf2x3_m(mask, base, new_vl, vl); +vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_m(vbool64_t vm, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_f32mf2x3_m(vm, rs1, new_vl, vl); } -vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_m(vbool32_t mask, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_f32m1x3_m(mask, base, new_vl, vl); +vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_m(vbool32_t vm, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_f32m1x3_m(vm, rs1, new_vl, vl); } -vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_m(vbool16_t mask, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_f32m2x3_m(mask, base, new_vl, vl); +vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_m(vbool16_t vm, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_f32m2x3_m(vm, rs1, new_vl, vl); } -vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_i32mf2x3_m(mask, base, new_vl, vl); +vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_m(vbool64_t vm, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_i32mf2x3_m(vm, rs1, new_vl, vl); } -vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_m(vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_i32m1x3_m(mask, base, new_vl, vl); +vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_m(vbool32_t vm, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_i32m1x3_m(vm, rs1, new_vl, vl); } -vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_m(vbool16_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_i32m2x3_m(mask, base, new_vl, vl); +vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_m(vbool16_t vm, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_i32m2x3_m(vm, rs1, new_vl, vl); } -vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_u32mf2x3_m(mask, base, new_vl, vl); +vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_m(vbool64_t vm, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_u32mf2x3_m(vm, rs1, new_vl, vl); } -vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_u32m1x3_m(mask, base, new_vl, vl); +vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_m(vbool32_t vm, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_u32m1x3_m(vm, rs1, new_vl, vl); } -vuint32m2x3_t test_vlseg3e32ff_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_u32m2x3_m(mask, base, new_vl, vl); +vuint32m2x3_t test_vlseg3e32ff_v_u32m2x3_m(vbool16_t vm, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_u32m2x3_m(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg3e32ff\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg3e64.c b/auto-generated/gnu-api-tests/vlseg3e64.c index eef170e37..8e1559793 100644 --- a/auto-generated/gnu-api-tests/vlseg3e64.c +++ b/auto-generated/gnu-api-tests/vlseg3e64.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x3_t test_vlseg3e64_v_f64m1x3(const float64_t *base, size_t vl) { - return __riscv_vlseg3e64_v_f64m1x3(base, vl); +vfloat64m1x3_t test_vlseg3e64_v_f64m1x3(const float64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_v_f64m1x3(rs1, vl); } -vfloat64m2x3_t test_vlseg3e64_v_f64m2x3(const float64_t *base, size_t vl) { - return __riscv_vlseg3e64_v_f64m2x3(base, vl); +vfloat64m2x3_t test_vlseg3e64_v_f64m2x3(const float64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_v_f64m2x3(rs1, vl); } -vint64m1x3_t test_vlseg3e64_v_i64m1x3(const int64_t *base, size_t vl) { - return __riscv_vlseg3e64_v_i64m1x3(base, vl); +vint64m1x3_t test_vlseg3e64_v_i64m1x3(const int64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_v_i64m1x3(rs1, vl); } -vint64m2x3_t test_vlseg3e64_v_i64m2x3(const int64_t *base, size_t vl) { - return __riscv_vlseg3e64_v_i64m2x3(base, vl); +vint64m2x3_t test_vlseg3e64_v_i64m2x3(const int64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_v_i64m2x3(rs1, vl); } -vuint64m1x3_t test_vlseg3e64_v_u64m1x3(const uint64_t *base, size_t vl) { - return __riscv_vlseg3e64_v_u64m1x3(base, vl); +vuint64m1x3_t test_vlseg3e64_v_u64m1x3(const uint64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_v_u64m1x3(rs1, vl); } -vuint64m2x3_t test_vlseg3e64_v_u64m2x3(const uint64_t *base, size_t vl) { - return __riscv_vlseg3e64_v_u64m2x3(base, vl); +vuint64m2x3_t test_vlseg3e64_v_u64m2x3(const uint64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_v_u64m2x3(rs1, vl); } -vfloat64m1x3_t test_vlseg3e64_v_f64m1x3_m(vbool64_t mask, const float64_t *base, size_t vl) { - return __riscv_vlseg3e64_v_f64m1x3_m(mask, base, vl); +vfloat64m1x3_t test_vlseg3e64_v_f64m1x3_m(vbool64_t vm, const float64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_v_f64m1x3_m(vm, rs1, vl); } -vfloat64m2x3_t test_vlseg3e64_v_f64m2x3_m(vbool32_t mask, const float64_t *base, size_t vl) { - return __riscv_vlseg3e64_v_f64m2x3_m(mask, base, vl); +vfloat64m2x3_t test_vlseg3e64_v_f64m2x3_m(vbool32_t vm, const float64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_v_f64m2x3_m(vm, rs1, vl); } -vint64m1x3_t test_vlseg3e64_v_i64m1x3_m(vbool64_t mask, const int64_t *base, size_t vl) { - return __riscv_vlseg3e64_v_i64m1x3_m(mask, base, vl); +vint64m1x3_t test_vlseg3e64_v_i64m1x3_m(vbool64_t vm, const int64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_v_i64m1x3_m(vm, rs1, vl); } -vint64m2x3_t test_vlseg3e64_v_i64m2x3_m(vbool32_t mask, const int64_t *base, size_t vl) { - return __riscv_vlseg3e64_v_i64m2x3_m(mask, base, vl); +vint64m2x3_t test_vlseg3e64_v_i64m2x3_m(vbool32_t vm, const int64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_v_i64m2x3_m(vm, rs1, vl); } -vuint64m1x3_t test_vlseg3e64_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, size_t vl) { - return __riscv_vlseg3e64_v_u64m1x3_m(mask, base, vl); +vuint64m1x3_t test_vlseg3e64_v_u64m1x3_m(vbool64_t vm, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_v_u64m1x3_m(vm, rs1, vl); } -vuint64m2x3_t test_vlseg3e64_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, size_t vl) { - return __riscv_vlseg3e64_v_u64m2x3_m(mask, base, vl); +vuint64m2x3_t test_vlseg3e64_v_u64m2x3_m(vbool32_t vm, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_v_u64m2x3_m(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg3e64\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg3e64ff.c b/auto-generated/gnu-api-tests/vlseg3e64ff.c index d6b0e0e03..1aa30b4bb 100644 --- a/auto-generated/gnu-api-tests/vlseg3e64ff.c +++ b/auto-generated/gnu-api-tests/vlseg3e64ff.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3(const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_v_f64m1x3(base, new_vl, vl); +vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3(const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_v_f64m1x3(rs1, new_vl, vl); } -vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3(const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_v_f64m2x3(base, new_vl, vl); +vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3(const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_v_f64m2x3(rs1, new_vl, vl); } -vint64m1x3_t test_vlseg3e64ff_v_i64m1x3(const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_v_i64m1x3(base, new_vl, vl); +vint64m1x3_t test_vlseg3e64ff_v_i64m1x3(const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_v_i64m1x3(rs1, new_vl, vl); } -vint64m2x3_t test_vlseg3e64ff_v_i64m2x3(const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_v_i64m2x3(base, new_vl, vl); +vint64m2x3_t test_vlseg3e64ff_v_i64m2x3(const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_v_i64m2x3(rs1, new_vl, vl); } -vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3(const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_v_u64m1x3(base, new_vl, vl); +vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3(const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_v_u64m1x3(rs1, new_vl, vl); } -vuint64m2x3_t test_vlseg3e64ff_v_u64m2x3(const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_v_u64m2x3(base, new_vl, vl); +vuint64m2x3_t test_vlseg3e64ff_v_u64m2x3(const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_v_u64m2x3(rs1, new_vl, vl); } -vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_m(vbool64_t mask, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_v_f64m1x3_m(mask, base, new_vl, vl); +vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_m(vbool64_t vm, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_v_f64m1x3_m(vm, rs1, new_vl, vl); } -vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_m(vbool32_t mask, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_v_f64m2x3_m(mask, base, new_vl, vl); +vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_m(vbool32_t vm, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_v_f64m2x3_m(vm, rs1, new_vl, vl); } -vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_m(vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_v_i64m1x3_m(mask, base, new_vl, vl); +vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_m(vbool64_t vm, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_v_i64m1x3_m(vm, rs1, new_vl, vl); } -vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_m(vbool32_t mask, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_v_i64m2x3_m(mask, base, new_vl, vl); +vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_m(vbool32_t vm, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_v_i64m2x3_m(vm, rs1, new_vl, vl); } -vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_v_u64m1x3_m(mask, base, new_vl, vl); +vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_m(vbool64_t vm, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_v_u64m1x3_m(vm, rs1, new_vl, vl); } -vuint64m2x3_t test_vlseg3e64ff_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_v_u64m2x3_m(mask, base, new_vl, vl); +vuint64m2x3_t test_vlseg3e64ff_v_u64m2x3_m(vbool32_t vm, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_v_u64m2x3_m(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg3e64ff\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg3e8.c b/auto-generated/gnu-api-tests/vlseg3e8.c index 48fd9ea44..5758e8603 100644 --- a/auto-generated/gnu-api-tests/vlseg3e8.c +++ b/auto-generated/gnu-api-tests/vlseg3e8.c @@ -6,84 +6,84 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x3_t test_vlseg3e8_v_i8mf8x3(const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_i8mf8x3(base, vl); +vint8mf8x3_t test_vlseg3e8_v_i8mf8x3(const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_i8mf8x3(rs1, vl); } -vint8mf4x3_t test_vlseg3e8_v_i8mf4x3(const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_i8mf4x3(base, vl); +vint8mf4x3_t test_vlseg3e8_v_i8mf4x3(const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_i8mf4x3(rs1, vl); } -vint8mf2x3_t test_vlseg3e8_v_i8mf2x3(const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_i8mf2x3(base, vl); +vint8mf2x3_t test_vlseg3e8_v_i8mf2x3(const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_i8mf2x3(rs1, vl); } -vint8m1x3_t test_vlseg3e8_v_i8m1x3(const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_i8m1x3(base, vl); +vint8m1x3_t test_vlseg3e8_v_i8m1x3(const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_i8m1x3(rs1, vl); } -vint8m2x3_t test_vlseg3e8_v_i8m2x3(const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_i8m2x3(base, vl); +vint8m2x3_t test_vlseg3e8_v_i8m2x3(const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_i8m2x3(rs1, vl); } -vuint8mf8x3_t test_vlseg3e8_v_u8mf8x3(const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_u8mf8x3(base, vl); +vuint8mf8x3_t test_vlseg3e8_v_u8mf8x3(const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_u8mf8x3(rs1, vl); } -vuint8mf4x3_t test_vlseg3e8_v_u8mf4x3(const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_u8mf4x3(base, vl); +vuint8mf4x3_t test_vlseg3e8_v_u8mf4x3(const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_u8mf4x3(rs1, vl); } -vuint8mf2x3_t test_vlseg3e8_v_u8mf2x3(const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_u8mf2x3(base, vl); +vuint8mf2x3_t test_vlseg3e8_v_u8mf2x3(const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_u8mf2x3(rs1, vl); } -vuint8m1x3_t test_vlseg3e8_v_u8m1x3(const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_u8m1x3(base, vl); +vuint8m1x3_t test_vlseg3e8_v_u8m1x3(const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_u8m1x3(rs1, vl); } -vuint8m2x3_t test_vlseg3e8_v_u8m2x3(const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_u8m2x3(base, vl); +vuint8m2x3_t test_vlseg3e8_v_u8m2x3(const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_u8m2x3(rs1, vl); } -vint8mf8x3_t test_vlseg3e8_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_i8mf8x3_m(mask, base, vl); +vint8mf8x3_t test_vlseg3e8_v_i8mf8x3_m(vbool64_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_i8mf8x3_m(vm, rs1, vl); } -vint8mf4x3_t test_vlseg3e8_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_i8mf4x3_m(mask, base, vl); +vint8mf4x3_t test_vlseg3e8_v_i8mf4x3_m(vbool32_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_i8mf4x3_m(vm, rs1, vl); } -vint8mf2x3_t test_vlseg3e8_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_i8mf2x3_m(mask, base, vl); +vint8mf2x3_t test_vlseg3e8_v_i8mf2x3_m(vbool16_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_i8mf2x3_m(vm, rs1, vl); } -vint8m1x3_t test_vlseg3e8_v_i8m1x3_m(vbool8_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_i8m1x3_m(mask, base, vl); +vint8m1x3_t test_vlseg3e8_v_i8m1x3_m(vbool8_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_i8m1x3_m(vm, rs1, vl); } -vint8m2x3_t test_vlseg3e8_v_i8m2x3_m(vbool4_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_i8m2x3_m(mask, base, vl); +vint8m2x3_t test_vlseg3e8_v_i8m2x3_m(vbool4_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_i8m2x3_m(vm, rs1, vl); } -vuint8mf8x3_t test_vlseg3e8_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_u8mf8x3_m(mask, base, vl); +vuint8mf8x3_t test_vlseg3e8_v_u8mf8x3_m(vbool64_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_u8mf8x3_m(vm, rs1, vl); } -vuint8mf4x3_t test_vlseg3e8_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_u8mf4x3_m(mask, base, vl); +vuint8mf4x3_t test_vlseg3e8_v_u8mf4x3_m(vbool32_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_u8mf4x3_m(vm, rs1, vl); } -vuint8mf2x3_t test_vlseg3e8_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_u8mf2x3_m(mask, base, vl); +vuint8mf2x3_t test_vlseg3e8_v_u8mf2x3_m(vbool16_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_u8mf2x3_m(vm, rs1, vl); } -vuint8m1x3_t test_vlseg3e8_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_u8m1x3_m(mask, base, vl); +vuint8m1x3_t test_vlseg3e8_v_u8m1x3_m(vbool8_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_u8m1x3_m(vm, rs1, vl); } -vuint8m2x3_t test_vlseg3e8_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_u8m2x3_m(mask, base, vl); +vuint8m2x3_t test_vlseg3e8_v_u8m2x3_m(vbool4_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_u8m2x3_m(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg3e8\.[ivxfswum.]+\s+} 20 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg3e8ff.c b/auto-generated/gnu-api-tests/vlseg3e8ff.c index dfc7c0980..ed5d9e435 100644 --- a/auto-generated/gnu-api-tests/vlseg3e8ff.c +++ b/auto-generated/gnu-api-tests/vlseg3e8ff.c @@ -6,84 +6,84 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3(const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_i8mf8x3(base, new_vl, vl); +vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3(const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_i8mf8x3(rs1, new_vl, vl); } -vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3(const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_i8mf4x3(base, new_vl, vl); +vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3(const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_i8mf4x3(rs1, new_vl, vl); } -vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3(const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_i8mf2x3(base, new_vl, vl); +vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3(const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_i8mf2x3(rs1, new_vl, vl); } -vint8m1x3_t test_vlseg3e8ff_v_i8m1x3(const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_i8m1x3(base, new_vl, vl); +vint8m1x3_t test_vlseg3e8ff_v_i8m1x3(const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_i8m1x3(rs1, new_vl, vl); } -vint8m2x3_t test_vlseg3e8ff_v_i8m2x3(const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_i8m2x3(base, new_vl, vl); +vint8m2x3_t test_vlseg3e8ff_v_i8m2x3(const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_i8m2x3(rs1, new_vl, vl); } -vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3(const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_u8mf8x3(base, new_vl, vl); +vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3(const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_u8mf8x3(rs1, new_vl, vl); } -vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3(const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_u8mf4x3(base, new_vl, vl); +vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3(const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_u8mf4x3(rs1, new_vl, vl); } -vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3(const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_u8mf2x3(base, new_vl, vl); +vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3(const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_u8mf2x3(rs1, new_vl, vl); } -vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3(const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_u8m1x3(base, new_vl, vl); +vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3(const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_u8m1x3(rs1, new_vl, vl); } -vuint8m2x3_t test_vlseg3e8ff_v_u8m2x3(const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_u8m2x3(base, new_vl, vl); +vuint8m2x3_t test_vlseg3e8ff_v_u8m2x3(const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_u8m2x3(rs1, new_vl, vl); } -vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_i8mf8x3_m(mask, base, new_vl, vl); +vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_m(vbool64_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_i8mf8x3_m(vm, rs1, new_vl, vl); } -vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_i8mf4x3_m(mask, base, new_vl, vl); +vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_m(vbool32_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_i8mf4x3_m(vm, rs1, new_vl, vl); } -vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_i8mf2x3_m(mask, base, new_vl, vl); +vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_m(vbool16_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_i8mf2x3_m(vm, rs1, new_vl, vl); } -vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_m(vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_i8m1x3_m(mask, base, new_vl, vl); +vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_m(vbool8_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_i8m1x3_m(vm, rs1, new_vl, vl); } -vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_m(vbool4_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_i8m2x3_m(mask, base, new_vl, vl); +vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_m(vbool4_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_i8m2x3_m(vm, rs1, new_vl, vl); } -vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_u8mf8x3_m(mask, base, new_vl, vl); +vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_m(vbool64_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_u8mf8x3_m(vm, rs1, new_vl, vl); } -vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_u8mf4x3_m(mask, base, new_vl, vl); +vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_m(vbool32_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_u8mf4x3_m(vm, rs1, new_vl, vl); } -vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_u8mf2x3_m(mask, base, new_vl, vl); +vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_m(vbool16_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_u8mf2x3_m(vm, rs1, new_vl, vl); } -vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_u8m1x3_m(mask, base, new_vl, vl); +vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_m(vbool8_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_u8m1x3_m(vm, rs1, new_vl, vl); } -vuint8m2x3_t test_vlseg3e8ff_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_u8m2x3_m(mask, base, new_vl, vl); +vuint8m2x3_t test_vlseg3e8ff_v_u8m2x3_m(vbool4_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_u8m2x3_m(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg3e8ff\.[ivxfswum.]+\s+} 20 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg4e16.c b/auto-generated/gnu-api-tests/vlseg4e16.c index bad3e2fa2..a4bca45a2 100644 --- a/auto-generated/gnu-api-tests/vlseg4e16.c +++ b/auto-generated/gnu-api-tests/vlseg4e16.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x4_t test_vlseg4e16_v_f16mf4x4(const float16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_f16mf4x4(base, vl); +vfloat16mf4x4_t test_vlseg4e16_v_f16mf4x4(const float16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_f16mf4x4(rs1, vl); } -vfloat16mf2x4_t test_vlseg4e16_v_f16mf2x4(const float16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_f16mf2x4(base, vl); +vfloat16mf2x4_t test_vlseg4e16_v_f16mf2x4(const float16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_f16mf2x4(rs1, vl); } -vfloat16m1x4_t test_vlseg4e16_v_f16m1x4(const float16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_f16m1x4(base, vl); +vfloat16m1x4_t test_vlseg4e16_v_f16m1x4(const float16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_f16m1x4(rs1, vl); } -vfloat16m2x4_t test_vlseg4e16_v_f16m2x4(const float16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_f16m2x4(base, vl); +vfloat16m2x4_t test_vlseg4e16_v_f16m2x4(const float16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_f16m2x4(rs1, vl); } -vint16mf4x4_t test_vlseg4e16_v_i16mf4x4(const int16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_i16mf4x4(base, vl); +vint16mf4x4_t test_vlseg4e16_v_i16mf4x4(const int16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_i16mf4x4(rs1, vl); } -vint16mf2x4_t test_vlseg4e16_v_i16mf2x4(const int16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_i16mf2x4(base, vl); +vint16mf2x4_t test_vlseg4e16_v_i16mf2x4(const int16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_i16mf2x4(rs1, vl); } -vint16m1x4_t test_vlseg4e16_v_i16m1x4(const int16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_i16m1x4(base, vl); +vint16m1x4_t test_vlseg4e16_v_i16m1x4(const int16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_i16m1x4(rs1, vl); } -vint16m2x4_t test_vlseg4e16_v_i16m2x4(const int16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_i16m2x4(base, vl); +vint16m2x4_t test_vlseg4e16_v_i16m2x4(const int16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_i16m2x4(rs1, vl); } -vuint16mf4x4_t test_vlseg4e16_v_u16mf4x4(const uint16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_u16mf4x4(base, vl); +vuint16mf4x4_t test_vlseg4e16_v_u16mf4x4(const uint16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_u16mf4x4(rs1, vl); } -vuint16mf2x4_t test_vlseg4e16_v_u16mf2x4(const uint16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_u16mf2x4(base, vl); +vuint16mf2x4_t test_vlseg4e16_v_u16mf2x4(const uint16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_u16mf2x4(rs1, vl); } -vuint16m1x4_t test_vlseg4e16_v_u16m1x4(const uint16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_u16m1x4(base, vl); +vuint16m1x4_t test_vlseg4e16_v_u16m1x4(const uint16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_u16m1x4(rs1, vl); } -vuint16m2x4_t test_vlseg4e16_v_u16m2x4(const uint16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_u16m2x4(base, vl); +vuint16m2x4_t test_vlseg4e16_v_u16m2x4(const uint16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_u16m2x4(rs1, vl); } -vfloat16mf4x4_t test_vlseg4e16_v_f16mf4x4_m(vbool64_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_f16mf4x4_m(mask, base, vl); +vfloat16mf4x4_t test_vlseg4e16_v_f16mf4x4_m(vbool64_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_f16mf4x4_m(vm, rs1, vl); } -vfloat16mf2x4_t test_vlseg4e16_v_f16mf2x4_m(vbool32_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_f16mf2x4_m(mask, base, vl); +vfloat16mf2x4_t test_vlseg4e16_v_f16mf2x4_m(vbool32_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_f16mf2x4_m(vm, rs1, vl); } -vfloat16m1x4_t test_vlseg4e16_v_f16m1x4_m(vbool16_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_f16m1x4_m(mask, base, vl); +vfloat16m1x4_t test_vlseg4e16_v_f16m1x4_m(vbool16_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_f16m1x4_m(vm, rs1, vl); } -vfloat16m2x4_t test_vlseg4e16_v_f16m2x4_m(vbool8_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_f16m2x4_m(mask, base, vl); +vfloat16m2x4_t test_vlseg4e16_v_f16m2x4_m(vbool8_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_f16m2x4_m(vm, rs1, vl); } -vint16mf4x4_t test_vlseg4e16_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_i16mf4x4_m(mask, base, vl); +vint16mf4x4_t test_vlseg4e16_v_i16mf4x4_m(vbool64_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_i16mf4x4_m(vm, rs1, vl); } -vint16mf2x4_t test_vlseg4e16_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_i16mf2x4_m(mask, base, vl); +vint16mf2x4_t test_vlseg4e16_v_i16mf2x4_m(vbool32_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_i16mf2x4_m(vm, rs1, vl); } -vint16m1x4_t test_vlseg4e16_v_i16m1x4_m(vbool16_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_i16m1x4_m(mask, base, vl); +vint16m1x4_t test_vlseg4e16_v_i16m1x4_m(vbool16_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_i16m1x4_m(vm, rs1, vl); } -vint16m2x4_t test_vlseg4e16_v_i16m2x4_m(vbool8_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_i16m2x4_m(mask, base, vl); +vint16m2x4_t test_vlseg4e16_v_i16m2x4_m(vbool8_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_i16m2x4_m(vm, rs1, vl); } -vuint16mf4x4_t test_vlseg4e16_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_u16mf4x4_m(mask, base, vl); +vuint16mf4x4_t test_vlseg4e16_v_u16mf4x4_m(vbool64_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_u16mf4x4_m(vm, rs1, vl); } -vuint16mf2x4_t test_vlseg4e16_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_u16mf2x4_m(mask, base, vl); +vuint16mf2x4_t test_vlseg4e16_v_u16mf2x4_m(vbool32_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_u16mf2x4_m(vm, rs1, vl); } -vuint16m1x4_t test_vlseg4e16_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_u16m1x4_m(mask, base, vl); +vuint16m1x4_t test_vlseg4e16_v_u16m1x4_m(vbool16_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_u16m1x4_m(vm, rs1, vl); } -vuint16m2x4_t test_vlseg4e16_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_u16m2x4_m(mask, base, vl); +vuint16m2x4_t test_vlseg4e16_v_u16m2x4_m(vbool8_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_u16m2x4_m(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg4e16\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg4e16ff.c b/auto-generated/gnu-api-tests/vlseg4e16ff.c index 6d3792403..c1c067e73 100644 --- a/auto-generated/gnu-api-tests/vlseg4e16ff.c +++ b/auto-generated/gnu-api-tests/vlseg4e16ff.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4(const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_f16mf4x4(base, new_vl, vl); +vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4(const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_f16mf4x4(rs1, new_vl, vl); } -vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4(const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_f16mf2x4(base, new_vl, vl); +vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4(const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_f16mf2x4(rs1, new_vl, vl); } -vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4(const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_f16m1x4(base, new_vl, vl); +vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4(const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_f16m1x4(rs1, new_vl, vl); } -vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4(const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_f16m2x4(base, new_vl, vl); +vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4(const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_f16m2x4(rs1, new_vl, vl); } -vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4(const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_i16mf4x4(base, new_vl, vl); +vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4(const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_i16mf4x4(rs1, new_vl, vl); } -vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4(const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_i16mf2x4(base, new_vl, vl); +vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4(const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_i16mf2x4(rs1, new_vl, vl); } -vint16m1x4_t test_vlseg4e16ff_v_i16m1x4(const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_i16m1x4(base, new_vl, vl); +vint16m1x4_t test_vlseg4e16ff_v_i16m1x4(const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_i16m1x4(rs1, new_vl, vl); } -vint16m2x4_t test_vlseg4e16ff_v_i16m2x4(const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_i16m2x4(base, new_vl, vl); +vint16m2x4_t test_vlseg4e16ff_v_i16m2x4(const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_i16m2x4(rs1, new_vl, vl); } -vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4(const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_u16mf4x4(base, new_vl, vl); +vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4(const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_u16mf4x4(rs1, new_vl, vl); } -vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4(const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_u16mf2x4(base, new_vl, vl); +vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4(const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_u16mf2x4(rs1, new_vl, vl); } -vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4(const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_u16m1x4(base, new_vl, vl); +vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4(const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_u16m1x4(rs1, new_vl, vl); } -vuint16m2x4_t test_vlseg4e16ff_v_u16m2x4(const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_u16m2x4(base, new_vl, vl); +vuint16m2x4_t test_vlseg4e16ff_v_u16m2x4(const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_u16m2x4(rs1, new_vl, vl); } -vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_m(vbool64_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_f16mf4x4_m(mask, base, new_vl, vl); +vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_m(vbool64_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_f16mf4x4_m(vm, rs1, new_vl, vl); } -vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_m(vbool32_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_f16mf2x4_m(mask, base, new_vl, vl); +vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_m(vbool32_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_f16mf2x4_m(vm, rs1, new_vl, vl); } -vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_m(vbool16_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_f16m1x4_m(mask, base, new_vl, vl); +vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_m(vbool16_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_f16m1x4_m(vm, rs1, new_vl, vl); } -vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_m(vbool8_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_f16m2x4_m(mask, base, new_vl, vl); +vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_m(vbool8_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_f16m2x4_m(vm, rs1, new_vl, vl); } -vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_i16mf4x4_m(mask, base, new_vl, vl); +vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_m(vbool64_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_i16mf4x4_m(vm, rs1, new_vl, vl); } -vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_i16mf2x4_m(mask, base, new_vl, vl); +vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_m(vbool32_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_i16mf2x4_m(vm, rs1, new_vl, vl); } -vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_m(vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_i16m1x4_m(mask, base, new_vl, vl); +vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_m(vbool16_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_i16m1x4_m(vm, rs1, new_vl, vl); } -vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_m(vbool8_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_i16m2x4_m(mask, base, new_vl, vl); +vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_m(vbool8_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_i16m2x4_m(vm, rs1, new_vl, vl); } -vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_u16mf4x4_m(mask, base, new_vl, vl); +vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_m(vbool64_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_u16mf4x4_m(vm, rs1, new_vl, vl); } -vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_u16mf2x4_m(mask, base, new_vl, vl); +vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_m(vbool32_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_u16mf2x4_m(vm, rs1, new_vl, vl); } -vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_u16m1x4_m(mask, base, new_vl, vl); +vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_m(vbool16_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_u16m1x4_m(vm, rs1, new_vl, vl); } -vuint16m2x4_t test_vlseg4e16ff_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_u16m2x4_m(mask, base, new_vl, vl); +vuint16m2x4_t test_vlseg4e16ff_v_u16m2x4_m(vbool8_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_u16m2x4_m(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg4e16ff\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg4e32.c b/auto-generated/gnu-api-tests/vlseg4e32.c index 2777f7ff8..f79b98b75 100644 --- a/auto-generated/gnu-api-tests/vlseg4e32.c +++ b/auto-generated/gnu-api-tests/vlseg4e32.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x4_t test_vlseg4e32_v_f32mf2x4(const float32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_f32mf2x4(base, vl); +vfloat32mf2x4_t test_vlseg4e32_v_f32mf2x4(const float32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_f32mf2x4(rs1, vl); } -vfloat32m1x4_t test_vlseg4e32_v_f32m1x4(const float32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_f32m1x4(base, vl); +vfloat32m1x4_t test_vlseg4e32_v_f32m1x4(const float32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_f32m1x4(rs1, vl); } -vfloat32m2x4_t test_vlseg4e32_v_f32m2x4(const float32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_f32m2x4(base, vl); +vfloat32m2x4_t test_vlseg4e32_v_f32m2x4(const float32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_f32m2x4(rs1, vl); } -vint32mf2x4_t test_vlseg4e32_v_i32mf2x4(const int32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_i32mf2x4(base, vl); +vint32mf2x4_t test_vlseg4e32_v_i32mf2x4(const int32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_i32mf2x4(rs1, vl); } -vint32m1x4_t test_vlseg4e32_v_i32m1x4(const int32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_i32m1x4(base, vl); +vint32m1x4_t test_vlseg4e32_v_i32m1x4(const int32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_i32m1x4(rs1, vl); } -vint32m2x4_t test_vlseg4e32_v_i32m2x4(const int32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_i32m2x4(base, vl); +vint32m2x4_t test_vlseg4e32_v_i32m2x4(const int32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_i32m2x4(rs1, vl); } -vuint32mf2x4_t test_vlseg4e32_v_u32mf2x4(const uint32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_u32mf2x4(base, vl); +vuint32mf2x4_t test_vlseg4e32_v_u32mf2x4(const uint32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_u32mf2x4(rs1, vl); } -vuint32m1x4_t test_vlseg4e32_v_u32m1x4(const uint32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_u32m1x4(base, vl); +vuint32m1x4_t test_vlseg4e32_v_u32m1x4(const uint32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_u32m1x4(rs1, vl); } -vuint32m2x4_t test_vlseg4e32_v_u32m2x4(const uint32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_u32m2x4(base, vl); +vuint32m2x4_t test_vlseg4e32_v_u32m2x4(const uint32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_u32m2x4(rs1, vl); } -vfloat32mf2x4_t test_vlseg4e32_v_f32mf2x4_m(vbool64_t mask, const float32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_f32mf2x4_m(mask, base, vl); +vfloat32mf2x4_t test_vlseg4e32_v_f32mf2x4_m(vbool64_t vm, const float32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_f32mf2x4_m(vm, rs1, vl); } -vfloat32m1x4_t test_vlseg4e32_v_f32m1x4_m(vbool32_t mask, const float32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_f32m1x4_m(mask, base, vl); +vfloat32m1x4_t test_vlseg4e32_v_f32m1x4_m(vbool32_t vm, const float32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_f32m1x4_m(vm, rs1, vl); } -vfloat32m2x4_t test_vlseg4e32_v_f32m2x4_m(vbool16_t mask, const float32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_f32m2x4_m(mask, base, vl); +vfloat32m2x4_t test_vlseg4e32_v_f32m2x4_m(vbool16_t vm, const float32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_f32m2x4_m(vm, rs1, vl); } -vint32mf2x4_t test_vlseg4e32_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_i32mf2x4_m(mask, base, vl); +vint32mf2x4_t test_vlseg4e32_v_i32mf2x4_m(vbool64_t vm, const int32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_i32mf2x4_m(vm, rs1, vl); } -vint32m1x4_t test_vlseg4e32_v_i32m1x4_m(vbool32_t mask, const int32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_i32m1x4_m(mask, base, vl); +vint32m1x4_t test_vlseg4e32_v_i32m1x4_m(vbool32_t vm, const int32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_i32m1x4_m(vm, rs1, vl); } -vint32m2x4_t test_vlseg4e32_v_i32m2x4_m(vbool16_t mask, const int32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_i32m2x4_m(mask, base, vl); +vint32m2x4_t test_vlseg4e32_v_i32m2x4_m(vbool16_t vm, const int32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_i32m2x4_m(vm, rs1, vl); } -vuint32mf2x4_t test_vlseg4e32_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_u32mf2x4_m(mask, base, vl); +vuint32mf2x4_t test_vlseg4e32_v_u32mf2x4_m(vbool64_t vm, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_u32mf2x4_m(vm, rs1, vl); } -vuint32m1x4_t test_vlseg4e32_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_u32m1x4_m(mask, base, vl); +vuint32m1x4_t test_vlseg4e32_v_u32m1x4_m(vbool32_t vm, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_u32m1x4_m(vm, rs1, vl); } -vuint32m2x4_t test_vlseg4e32_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_u32m2x4_m(mask, base, vl); +vuint32m2x4_t test_vlseg4e32_v_u32m2x4_m(vbool16_t vm, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_u32m2x4_m(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg4e32\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg4e32ff.c b/auto-generated/gnu-api-tests/vlseg4e32ff.c index 91a8b7f65..dad445840 100644 --- a/auto-generated/gnu-api-tests/vlseg4e32ff.c +++ b/auto-generated/gnu-api-tests/vlseg4e32ff.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4(const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_f32mf2x4(base, new_vl, vl); +vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4(const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_f32mf2x4(rs1, new_vl, vl); } -vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4(const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_f32m1x4(base, new_vl, vl); +vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4(const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_f32m1x4(rs1, new_vl, vl); } -vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4(const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_f32m2x4(base, new_vl, vl); +vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4(const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_f32m2x4(rs1, new_vl, vl); } -vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4(const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_i32mf2x4(base, new_vl, vl); +vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4(const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_i32mf2x4(rs1, new_vl, vl); } -vint32m1x4_t test_vlseg4e32ff_v_i32m1x4(const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_i32m1x4(base, new_vl, vl); +vint32m1x4_t test_vlseg4e32ff_v_i32m1x4(const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_i32m1x4(rs1, new_vl, vl); } -vint32m2x4_t test_vlseg4e32ff_v_i32m2x4(const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_i32m2x4(base, new_vl, vl); +vint32m2x4_t test_vlseg4e32ff_v_i32m2x4(const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_i32m2x4(rs1, new_vl, vl); } -vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4(const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_u32mf2x4(base, new_vl, vl); +vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4(const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_u32mf2x4(rs1, new_vl, vl); } -vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4(const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_u32m1x4(base, new_vl, vl); +vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4(const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_u32m1x4(rs1, new_vl, vl); } -vuint32m2x4_t test_vlseg4e32ff_v_u32m2x4(const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_u32m2x4(base, new_vl, vl); +vuint32m2x4_t test_vlseg4e32ff_v_u32m2x4(const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_u32m2x4(rs1, new_vl, vl); } -vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_m(vbool64_t mask, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_f32mf2x4_m(mask, base, new_vl, vl); +vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_m(vbool64_t vm, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_f32mf2x4_m(vm, rs1, new_vl, vl); } -vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_m(vbool32_t mask, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_f32m1x4_m(mask, base, new_vl, vl); +vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_m(vbool32_t vm, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_f32m1x4_m(vm, rs1, new_vl, vl); } -vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_m(vbool16_t mask, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_f32m2x4_m(mask, base, new_vl, vl); +vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_m(vbool16_t vm, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_f32m2x4_m(vm, rs1, new_vl, vl); } -vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_i32mf2x4_m(mask, base, new_vl, vl); +vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_m(vbool64_t vm, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_i32mf2x4_m(vm, rs1, new_vl, vl); } -vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_m(vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_i32m1x4_m(mask, base, new_vl, vl); +vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_m(vbool32_t vm, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_i32m1x4_m(vm, rs1, new_vl, vl); } -vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_m(vbool16_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_i32m2x4_m(mask, base, new_vl, vl); +vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_m(vbool16_t vm, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_i32m2x4_m(vm, rs1, new_vl, vl); } -vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_u32mf2x4_m(mask, base, new_vl, vl); +vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_m(vbool64_t vm, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_u32mf2x4_m(vm, rs1, new_vl, vl); } -vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_u32m1x4_m(mask, base, new_vl, vl); +vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_m(vbool32_t vm, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_u32m1x4_m(vm, rs1, new_vl, vl); } -vuint32m2x4_t test_vlseg4e32ff_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_u32m2x4_m(mask, base, new_vl, vl); +vuint32m2x4_t test_vlseg4e32ff_v_u32m2x4_m(vbool16_t vm, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_u32m2x4_m(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg4e32ff\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg4e64.c b/auto-generated/gnu-api-tests/vlseg4e64.c index 1ac8032f7..8f2759318 100644 --- a/auto-generated/gnu-api-tests/vlseg4e64.c +++ b/auto-generated/gnu-api-tests/vlseg4e64.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x4_t test_vlseg4e64_v_f64m1x4(const float64_t *base, size_t vl) { - return __riscv_vlseg4e64_v_f64m1x4(base, vl); +vfloat64m1x4_t test_vlseg4e64_v_f64m1x4(const float64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_v_f64m1x4(rs1, vl); } -vfloat64m2x4_t test_vlseg4e64_v_f64m2x4(const float64_t *base, size_t vl) { - return __riscv_vlseg4e64_v_f64m2x4(base, vl); +vfloat64m2x4_t test_vlseg4e64_v_f64m2x4(const float64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_v_f64m2x4(rs1, vl); } -vint64m1x4_t test_vlseg4e64_v_i64m1x4(const int64_t *base, size_t vl) { - return __riscv_vlseg4e64_v_i64m1x4(base, vl); +vint64m1x4_t test_vlseg4e64_v_i64m1x4(const int64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_v_i64m1x4(rs1, vl); } -vint64m2x4_t test_vlseg4e64_v_i64m2x4(const int64_t *base, size_t vl) { - return __riscv_vlseg4e64_v_i64m2x4(base, vl); +vint64m2x4_t test_vlseg4e64_v_i64m2x4(const int64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_v_i64m2x4(rs1, vl); } -vuint64m1x4_t test_vlseg4e64_v_u64m1x4(const uint64_t *base, size_t vl) { - return __riscv_vlseg4e64_v_u64m1x4(base, vl); +vuint64m1x4_t test_vlseg4e64_v_u64m1x4(const uint64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_v_u64m1x4(rs1, vl); } -vuint64m2x4_t test_vlseg4e64_v_u64m2x4(const uint64_t *base, size_t vl) { - return __riscv_vlseg4e64_v_u64m2x4(base, vl); +vuint64m2x4_t test_vlseg4e64_v_u64m2x4(const uint64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_v_u64m2x4(rs1, vl); } -vfloat64m1x4_t test_vlseg4e64_v_f64m1x4_m(vbool64_t mask, const float64_t *base, size_t vl) { - return __riscv_vlseg4e64_v_f64m1x4_m(mask, base, vl); +vfloat64m1x4_t test_vlseg4e64_v_f64m1x4_m(vbool64_t vm, const float64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_v_f64m1x4_m(vm, rs1, vl); } -vfloat64m2x4_t test_vlseg4e64_v_f64m2x4_m(vbool32_t mask, const float64_t *base, size_t vl) { - return __riscv_vlseg4e64_v_f64m2x4_m(mask, base, vl); +vfloat64m2x4_t test_vlseg4e64_v_f64m2x4_m(vbool32_t vm, const float64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_v_f64m2x4_m(vm, rs1, vl); } -vint64m1x4_t test_vlseg4e64_v_i64m1x4_m(vbool64_t mask, const int64_t *base, size_t vl) { - return __riscv_vlseg4e64_v_i64m1x4_m(mask, base, vl); +vint64m1x4_t test_vlseg4e64_v_i64m1x4_m(vbool64_t vm, const int64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_v_i64m1x4_m(vm, rs1, vl); } -vint64m2x4_t test_vlseg4e64_v_i64m2x4_m(vbool32_t mask, const int64_t *base, size_t vl) { - return __riscv_vlseg4e64_v_i64m2x4_m(mask, base, vl); +vint64m2x4_t test_vlseg4e64_v_i64m2x4_m(vbool32_t vm, const int64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_v_i64m2x4_m(vm, rs1, vl); } -vuint64m1x4_t test_vlseg4e64_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, size_t vl) { - return __riscv_vlseg4e64_v_u64m1x4_m(mask, base, vl); +vuint64m1x4_t test_vlseg4e64_v_u64m1x4_m(vbool64_t vm, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_v_u64m1x4_m(vm, rs1, vl); } -vuint64m2x4_t test_vlseg4e64_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, size_t vl) { - return __riscv_vlseg4e64_v_u64m2x4_m(mask, base, vl); +vuint64m2x4_t test_vlseg4e64_v_u64m2x4_m(vbool32_t vm, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_v_u64m2x4_m(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg4e64\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg4e64ff.c b/auto-generated/gnu-api-tests/vlseg4e64ff.c index b180a3684..a89bb8351 100644 --- a/auto-generated/gnu-api-tests/vlseg4e64ff.c +++ b/auto-generated/gnu-api-tests/vlseg4e64ff.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4(const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_v_f64m1x4(base, new_vl, vl); +vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4(const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_v_f64m1x4(rs1, new_vl, vl); } -vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4(const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_v_f64m2x4(base, new_vl, vl); +vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4(const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_v_f64m2x4(rs1, new_vl, vl); } -vint64m1x4_t test_vlseg4e64ff_v_i64m1x4(const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_v_i64m1x4(base, new_vl, vl); +vint64m1x4_t test_vlseg4e64ff_v_i64m1x4(const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_v_i64m1x4(rs1, new_vl, vl); } -vint64m2x4_t test_vlseg4e64ff_v_i64m2x4(const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_v_i64m2x4(base, new_vl, vl); +vint64m2x4_t test_vlseg4e64ff_v_i64m2x4(const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_v_i64m2x4(rs1, new_vl, vl); } -vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4(const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_v_u64m1x4(base, new_vl, vl); +vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4(const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_v_u64m1x4(rs1, new_vl, vl); } -vuint64m2x4_t test_vlseg4e64ff_v_u64m2x4(const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_v_u64m2x4(base, new_vl, vl); +vuint64m2x4_t test_vlseg4e64ff_v_u64m2x4(const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_v_u64m2x4(rs1, new_vl, vl); } -vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_m(vbool64_t mask, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_v_f64m1x4_m(mask, base, new_vl, vl); +vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_m(vbool64_t vm, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_v_f64m1x4_m(vm, rs1, new_vl, vl); } -vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_m(vbool32_t mask, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_v_f64m2x4_m(mask, base, new_vl, vl); +vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_m(vbool32_t vm, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_v_f64m2x4_m(vm, rs1, new_vl, vl); } -vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_m(vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_v_i64m1x4_m(mask, base, new_vl, vl); +vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_m(vbool64_t vm, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_v_i64m1x4_m(vm, rs1, new_vl, vl); } -vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_m(vbool32_t mask, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_v_i64m2x4_m(mask, base, new_vl, vl); +vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_m(vbool32_t vm, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_v_i64m2x4_m(vm, rs1, new_vl, vl); } -vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_v_u64m1x4_m(mask, base, new_vl, vl); +vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_m(vbool64_t vm, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_v_u64m1x4_m(vm, rs1, new_vl, vl); } -vuint64m2x4_t test_vlseg4e64ff_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_v_u64m2x4_m(mask, base, new_vl, vl); +vuint64m2x4_t test_vlseg4e64ff_v_u64m2x4_m(vbool32_t vm, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_v_u64m2x4_m(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg4e64ff\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg4e8.c b/auto-generated/gnu-api-tests/vlseg4e8.c index 34f00f17b..568a965ee 100644 --- a/auto-generated/gnu-api-tests/vlseg4e8.c +++ b/auto-generated/gnu-api-tests/vlseg4e8.c @@ -6,84 +6,84 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x4_t test_vlseg4e8_v_i8mf8x4(const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_i8mf8x4(base, vl); +vint8mf8x4_t test_vlseg4e8_v_i8mf8x4(const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_i8mf8x4(rs1, vl); } -vint8mf4x4_t test_vlseg4e8_v_i8mf4x4(const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_i8mf4x4(base, vl); +vint8mf4x4_t test_vlseg4e8_v_i8mf4x4(const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_i8mf4x4(rs1, vl); } -vint8mf2x4_t test_vlseg4e8_v_i8mf2x4(const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_i8mf2x4(base, vl); +vint8mf2x4_t test_vlseg4e8_v_i8mf2x4(const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_i8mf2x4(rs1, vl); } -vint8m1x4_t test_vlseg4e8_v_i8m1x4(const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_i8m1x4(base, vl); +vint8m1x4_t test_vlseg4e8_v_i8m1x4(const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_i8m1x4(rs1, vl); } -vint8m2x4_t test_vlseg4e8_v_i8m2x4(const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_i8m2x4(base, vl); +vint8m2x4_t test_vlseg4e8_v_i8m2x4(const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_i8m2x4(rs1, vl); } -vuint8mf8x4_t test_vlseg4e8_v_u8mf8x4(const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_u8mf8x4(base, vl); +vuint8mf8x4_t test_vlseg4e8_v_u8mf8x4(const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_u8mf8x4(rs1, vl); } -vuint8mf4x4_t test_vlseg4e8_v_u8mf4x4(const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_u8mf4x4(base, vl); +vuint8mf4x4_t test_vlseg4e8_v_u8mf4x4(const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_u8mf4x4(rs1, vl); } -vuint8mf2x4_t test_vlseg4e8_v_u8mf2x4(const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_u8mf2x4(base, vl); +vuint8mf2x4_t test_vlseg4e8_v_u8mf2x4(const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_u8mf2x4(rs1, vl); } -vuint8m1x4_t test_vlseg4e8_v_u8m1x4(const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_u8m1x4(base, vl); +vuint8m1x4_t test_vlseg4e8_v_u8m1x4(const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_u8m1x4(rs1, vl); } -vuint8m2x4_t test_vlseg4e8_v_u8m2x4(const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_u8m2x4(base, vl); +vuint8m2x4_t test_vlseg4e8_v_u8m2x4(const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_u8m2x4(rs1, vl); } -vint8mf8x4_t test_vlseg4e8_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_i8mf8x4_m(mask, base, vl); +vint8mf8x4_t test_vlseg4e8_v_i8mf8x4_m(vbool64_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_i8mf8x4_m(vm, rs1, vl); } -vint8mf4x4_t test_vlseg4e8_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_i8mf4x4_m(mask, base, vl); +vint8mf4x4_t test_vlseg4e8_v_i8mf4x4_m(vbool32_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_i8mf4x4_m(vm, rs1, vl); } -vint8mf2x4_t test_vlseg4e8_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_i8mf2x4_m(mask, base, vl); +vint8mf2x4_t test_vlseg4e8_v_i8mf2x4_m(vbool16_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_i8mf2x4_m(vm, rs1, vl); } -vint8m1x4_t test_vlseg4e8_v_i8m1x4_m(vbool8_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_i8m1x4_m(mask, base, vl); +vint8m1x4_t test_vlseg4e8_v_i8m1x4_m(vbool8_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_i8m1x4_m(vm, rs1, vl); } -vint8m2x4_t test_vlseg4e8_v_i8m2x4_m(vbool4_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_i8m2x4_m(mask, base, vl); +vint8m2x4_t test_vlseg4e8_v_i8m2x4_m(vbool4_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_i8m2x4_m(vm, rs1, vl); } -vuint8mf8x4_t test_vlseg4e8_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_u8mf8x4_m(mask, base, vl); +vuint8mf8x4_t test_vlseg4e8_v_u8mf8x4_m(vbool64_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_u8mf8x4_m(vm, rs1, vl); } -vuint8mf4x4_t test_vlseg4e8_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_u8mf4x4_m(mask, base, vl); +vuint8mf4x4_t test_vlseg4e8_v_u8mf4x4_m(vbool32_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_u8mf4x4_m(vm, rs1, vl); } -vuint8mf2x4_t test_vlseg4e8_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_u8mf2x4_m(mask, base, vl); +vuint8mf2x4_t test_vlseg4e8_v_u8mf2x4_m(vbool16_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_u8mf2x4_m(vm, rs1, vl); } -vuint8m1x4_t test_vlseg4e8_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_u8m1x4_m(mask, base, vl); +vuint8m1x4_t test_vlseg4e8_v_u8m1x4_m(vbool8_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_u8m1x4_m(vm, rs1, vl); } -vuint8m2x4_t test_vlseg4e8_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_u8m2x4_m(mask, base, vl); +vuint8m2x4_t test_vlseg4e8_v_u8m2x4_m(vbool4_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_u8m2x4_m(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg4e8\.[ivxfswum.]+\s+} 20 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg4e8ff.c b/auto-generated/gnu-api-tests/vlseg4e8ff.c index 9074a19a0..e5f5eb15e 100644 --- a/auto-generated/gnu-api-tests/vlseg4e8ff.c +++ b/auto-generated/gnu-api-tests/vlseg4e8ff.c @@ -6,84 +6,84 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4(const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_i8mf8x4(base, new_vl, vl); +vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4(const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_i8mf8x4(rs1, new_vl, vl); } -vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4(const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_i8mf4x4(base, new_vl, vl); +vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4(const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_i8mf4x4(rs1, new_vl, vl); } -vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4(const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_i8mf2x4(base, new_vl, vl); +vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4(const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_i8mf2x4(rs1, new_vl, vl); } -vint8m1x4_t test_vlseg4e8ff_v_i8m1x4(const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_i8m1x4(base, new_vl, vl); +vint8m1x4_t test_vlseg4e8ff_v_i8m1x4(const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_i8m1x4(rs1, new_vl, vl); } -vint8m2x4_t test_vlseg4e8ff_v_i8m2x4(const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_i8m2x4(base, new_vl, vl); +vint8m2x4_t test_vlseg4e8ff_v_i8m2x4(const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_i8m2x4(rs1, new_vl, vl); } -vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4(const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_u8mf8x4(base, new_vl, vl); +vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4(const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_u8mf8x4(rs1, new_vl, vl); } -vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4(const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_u8mf4x4(base, new_vl, vl); +vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4(const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_u8mf4x4(rs1, new_vl, vl); } -vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4(const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_u8mf2x4(base, new_vl, vl); +vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4(const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_u8mf2x4(rs1, new_vl, vl); } -vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4(const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_u8m1x4(base, new_vl, vl); +vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4(const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_u8m1x4(rs1, new_vl, vl); } -vuint8m2x4_t test_vlseg4e8ff_v_u8m2x4(const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_u8m2x4(base, new_vl, vl); +vuint8m2x4_t test_vlseg4e8ff_v_u8m2x4(const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_u8m2x4(rs1, new_vl, vl); } -vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_i8mf8x4_m(mask, base, new_vl, vl); +vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_m(vbool64_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_i8mf8x4_m(vm, rs1, new_vl, vl); } -vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_i8mf4x4_m(mask, base, new_vl, vl); +vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_m(vbool32_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_i8mf4x4_m(vm, rs1, new_vl, vl); } -vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_i8mf2x4_m(mask, base, new_vl, vl); +vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_m(vbool16_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_i8mf2x4_m(vm, rs1, new_vl, vl); } -vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_m(vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_i8m1x4_m(mask, base, new_vl, vl); +vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_m(vbool8_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_i8m1x4_m(vm, rs1, new_vl, vl); } -vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_m(vbool4_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_i8m2x4_m(mask, base, new_vl, vl); +vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_m(vbool4_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_i8m2x4_m(vm, rs1, new_vl, vl); } -vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_u8mf8x4_m(mask, base, new_vl, vl); +vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_m(vbool64_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_u8mf8x4_m(vm, rs1, new_vl, vl); } -vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_u8mf4x4_m(mask, base, new_vl, vl); +vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_m(vbool32_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_u8mf4x4_m(vm, rs1, new_vl, vl); } -vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_u8mf2x4_m(mask, base, new_vl, vl); +vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_m(vbool16_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_u8mf2x4_m(vm, rs1, new_vl, vl); } -vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_u8m1x4_m(mask, base, new_vl, vl); +vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_m(vbool8_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_u8m1x4_m(vm, rs1, new_vl, vl); } -vuint8m2x4_t test_vlseg4e8ff_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_u8m2x4_m(mask, base, new_vl, vl); +vuint8m2x4_t test_vlseg4e8ff_v_u8m2x4_m(vbool4_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_u8m2x4_m(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg4e8ff\.[ivxfswum.]+\s+} 20 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg5e16.c b/auto-generated/gnu-api-tests/vlseg5e16.c index 09178d1c4..577108803 100644 --- a/auto-generated/gnu-api-tests/vlseg5e16.c +++ b/auto-generated/gnu-api-tests/vlseg5e16.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x5_t test_vlseg5e16_v_f16mf4x5(const float16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_f16mf4x5(base, vl); +vfloat16mf4x5_t test_vlseg5e16_v_f16mf4x5(const float16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_f16mf4x5(rs1, vl); } -vfloat16mf2x5_t test_vlseg5e16_v_f16mf2x5(const float16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_f16mf2x5(base, vl); +vfloat16mf2x5_t test_vlseg5e16_v_f16mf2x5(const float16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_f16mf2x5(rs1, vl); } -vfloat16m1x5_t test_vlseg5e16_v_f16m1x5(const float16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_f16m1x5(base, vl); +vfloat16m1x5_t test_vlseg5e16_v_f16m1x5(const float16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_f16m1x5(rs1, vl); } -vint16mf4x5_t test_vlseg5e16_v_i16mf4x5(const int16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_i16mf4x5(base, vl); +vint16mf4x5_t test_vlseg5e16_v_i16mf4x5(const int16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_i16mf4x5(rs1, vl); } -vint16mf2x5_t test_vlseg5e16_v_i16mf2x5(const int16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_i16mf2x5(base, vl); +vint16mf2x5_t test_vlseg5e16_v_i16mf2x5(const int16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_i16mf2x5(rs1, vl); } -vint16m1x5_t test_vlseg5e16_v_i16m1x5(const int16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_i16m1x5(base, vl); +vint16m1x5_t test_vlseg5e16_v_i16m1x5(const int16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_i16m1x5(rs1, vl); } -vuint16mf4x5_t test_vlseg5e16_v_u16mf4x5(const uint16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_u16mf4x5(base, vl); +vuint16mf4x5_t test_vlseg5e16_v_u16mf4x5(const uint16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_u16mf4x5(rs1, vl); } -vuint16mf2x5_t test_vlseg5e16_v_u16mf2x5(const uint16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_u16mf2x5(base, vl); +vuint16mf2x5_t test_vlseg5e16_v_u16mf2x5(const uint16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_u16mf2x5(rs1, vl); } -vuint16m1x5_t test_vlseg5e16_v_u16m1x5(const uint16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_u16m1x5(base, vl); +vuint16m1x5_t test_vlseg5e16_v_u16m1x5(const uint16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_u16m1x5(rs1, vl); } -vfloat16mf4x5_t test_vlseg5e16_v_f16mf4x5_m(vbool64_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_f16mf4x5_m(mask, base, vl); +vfloat16mf4x5_t test_vlseg5e16_v_f16mf4x5_m(vbool64_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_f16mf4x5_m(vm, rs1, vl); } -vfloat16mf2x5_t test_vlseg5e16_v_f16mf2x5_m(vbool32_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_f16mf2x5_m(mask, base, vl); +vfloat16mf2x5_t test_vlseg5e16_v_f16mf2x5_m(vbool32_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_f16mf2x5_m(vm, rs1, vl); } -vfloat16m1x5_t test_vlseg5e16_v_f16m1x5_m(vbool16_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_f16m1x5_m(mask, base, vl); +vfloat16m1x5_t test_vlseg5e16_v_f16m1x5_m(vbool16_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_f16m1x5_m(vm, rs1, vl); } -vint16mf4x5_t test_vlseg5e16_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_i16mf4x5_m(mask, base, vl); +vint16mf4x5_t test_vlseg5e16_v_i16mf4x5_m(vbool64_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_i16mf4x5_m(vm, rs1, vl); } -vint16mf2x5_t test_vlseg5e16_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_i16mf2x5_m(mask, base, vl); +vint16mf2x5_t test_vlseg5e16_v_i16mf2x5_m(vbool32_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_i16mf2x5_m(vm, rs1, vl); } -vint16m1x5_t test_vlseg5e16_v_i16m1x5_m(vbool16_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_i16m1x5_m(mask, base, vl); +vint16m1x5_t test_vlseg5e16_v_i16m1x5_m(vbool16_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_i16m1x5_m(vm, rs1, vl); } -vuint16mf4x5_t test_vlseg5e16_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_u16mf4x5_m(mask, base, vl); +vuint16mf4x5_t test_vlseg5e16_v_u16mf4x5_m(vbool64_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_u16mf4x5_m(vm, rs1, vl); } -vuint16mf2x5_t test_vlseg5e16_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_u16mf2x5_m(mask, base, vl); +vuint16mf2x5_t test_vlseg5e16_v_u16mf2x5_m(vbool32_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_u16mf2x5_m(vm, rs1, vl); } -vuint16m1x5_t test_vlseg5e16_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_u16m1x5_m(mask, base, vl); +vuint16m1x5_t test_vlseg5e16_v_u16m1x5_m(vbool16_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_u16m1x5_m(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg5e16\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg5e16ff.c b/auto-generated/gnu-api-tests/vlseg5e16ff.c index f94d100f0..d8c6cbe76 100644 --- a/auto-generated/gnu-api-tests/vlseg5e16ff.c +++ b/auto-generated/gnu-api-tests/vlseg5e16ff.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5(const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_f16mf4x5(base, new_vl, vl); +vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5(const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_f16mf4x5(rs1, new_vl, vl); } -vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5(const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_f16mf2x5(base, new_vl, vl); +vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5(const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_f16mf2x5(rs1, new_vl, vl); } -vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5(const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_f16m1x5(base, new_vl, vl); +vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5(const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_f16m1x5(rs1, new_vl, vl); } -vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5(const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_i16mf4x5(base, new_vl, vl); +vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5(const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_i16mf4x5(rs1, new_vl, vl); } -vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5(const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_i16mf2x5(base, new_vl, vl); +vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5(const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_i16mf2x5(rs1, new_vl, vl); } -vint16m1x5_t test_vlseg5e16ff_v_i16m1x5(const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_i16m1x5(base, new_vl, vl); +vint16m1x5_t test_vlseg5e16ff_v_i16m1x5(const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_i16m1x5(rs1, new_vl, vl); } -vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5(const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_u16mf4x5(base, new_vl, vl); +vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5(const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_u16mf4x5(rs1, new_vl, vl); } -vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5(const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_u16mf2x5(base, new_vl, vl); +vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5(const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_u16mf2x5(rs1, new_vl, vl); } -vuint16m1x5_t test_vlseg5e16ff_v_u16m1x5(const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_u16m1x5(base, new_vl, vl); +vuint16m1x5_t test_vlseg5e16ff_v_u16m1x5(const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_u16m1x5(rs1, new_vl, vl); } -vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_m(vbool64_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_f16mf4x5_m(mask, base, new_vl, vl); +vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_m(vbool64_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_f16mf4x5_m(vm, rs1, new_vl, vl); } -vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_m(vbool32_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_f16mf2x5_m(mask, base, new_vl, vl); +vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_m(vbool32_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_f16mf2x5_m(vm, rs1, new_vl, vl); } -vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_m(vbool16_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_f16m1x5_m(mask, base, new_vl, vl); +vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_m(vbool16_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_f16m1x5_m(vm, rs1, new_vl, vl); } -vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_i16mf4x5_m(mask, base, new_vl, vl); +vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_m(vbool64_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_i16mf4x5_m(vm, rs1, new_vl, vl); } -vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_i16mf2x5_m(mask, base, new_vl, vl); +vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_m(vbool32_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_i16mf2x5_m(vm, rs1, new_vl, vl); } -vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_m(vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_i16m1x5_m(mask, base, new_vl, vl); +vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_m(vbool16_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_i16m1x5_m(vm, rs1, new_vl, vl); } -vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_u16mf4x5_m(mask, base, new_vl, vl); +vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_m(vbool64_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_u16mf4x5_m(vm, rs1, new_vl, vl); } -vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_u16mf2x5_m(mask, base, new_vl, vl); +vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_m(vbool32_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_u16mf2x5_m(vm, rs1, new_vl, vl); } -vuint16m1x5_t test_vlseg5e16ff_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_u16m1x5_m(mask, base, new_vl, vl); +vuint16m1x5_t test_vlseg5e16ff_v_u16m1x5_m(vbool16_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_u16m1x5_m(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg5e16ff\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg5e32.c b/auto-generated/gnu-api-tests/vlseg5e32.c index 494b7d27c..e1d2df1a7 100644 --- a/auto-generated/gnu-api-tests/vlseg5e32.c +++ b/auto-generated/gnu-api-tests/vlseg5e32.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x5_t test_vlseg5e32_v_f32mf2x5(const float32_t *base, size_t vl) { - return __riscv_vlseg5e32_v_f32mf2x5(base, vl); +vfloat32mf2x5_t test_vlseg5e32_v_f32mf2x5(const float32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_v_f32mf2x5(rs1, vl); } -vfloat32m1x5_t test_vlseg5e32_v_f32m1x5(const float32_t *base, size_t vl) { - return __riscv_vlseg5e32_v_f32m1x5(base, vl); +vfloat32m1x5_t test_vlseg5e32_v_f32m1x5(const float32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_v_f32m1x5(rs1, vl); } -vint32mf2x5_t test_vlseg5e32_v_i32mf2x5(const int32_t *base, size_t vl) { - return __riscv_vlseg5e32_v_i32mf2x5(base, vl); +vint32mf2x5_t test_vlseg5e32_v_i32mf2x5(const int32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_v_i32mf2x5(rs1, vl); } -vint32m1x5_t test_vlseg5e32_v_i32m1x5(const int32_t *base, size_t vl) { - return __riscv_vlseg5e32_v_i32m1x5(base, vl); +vint32m1x5_t test_vlseg5e32_v_i32m1x5(const int32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_v_i32m1x5(rs1, vl); } -vuint32mf2x5_t test_vlseg5e32_v_u32mf2x5(const uint32_t *base, size_t vl) { - return __riscv_vlseg5e32_v_u32mf2x5(base, vl); +vuint32mf2x5_t test_vlseg5e32_v_u32mf2x5(const uint32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_v_u32mf2x5(rs1, vl); } -vuint32m1x5_t test_vlseg5e32_v_u32m1x5(const uint32_t *base, size_t vl) { - return __riscv_vlseg5e32_v_u32m1x5(base, vl); +vuint32m1x5_t test_vlseg5e32_v_u32m1x5(const uint32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_v_u32m1x5(rs1, vl); } -vfloat32mf2x5_t test_vlseg5e32_v_f32mf2x5_m(vbool64_t mask, const float32_t *base, size_t vl) { - return __riscv_vlseg5e32_v_f32mf2x5_m(mask, base, vl); +vfloat32mf2x5_t test_vlseg5e32_v_f32mf2x5_m(vbool64_t vm, const float32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_v_f32mf2x5_m(vm, rs1, vl); } -vfloat32m1x5_t test_vlseg5e32_v_f32m1x5_m(vbool32_t mask, const float32_t *base, size_t vl) { - return __riscv_vlseg5e32_v_f32m1x5_m(mask, base, vl); +vfloat32m1x5_t test_vlseg5e32_v_f32m1x5_m(vbool32_t vm, const float32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_v_f32m1x5_m(vm, rs1, vl); } -vint32mf2x5_t test_vlseg5e32_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, size_t vl) { - return __riscv_vlseg5e32_v_i32mf2x5_m(mask, base, vl); +vint32mf2x5_t test_vlseg5e32_v_i32mf2x5_m(vbool64_t vm, const int32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_v_i32mf2x5_m(vm, rs1, vl); } -vint32m1x5_t test_vlseg5e32_v_i32m1x5_m(vbool32_t mask, const int32_t *base, size_t vl) { - return __riscv_vlseg5e32_v_i32m1x5_m(mask, base, vl); +vint32m1x5_t test_vlseg5e32_v_i32m1x5_m(vbool32_t vm, const int32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_v_i32m1x5_m(vm, rs1, vl); } -vuint32mf2x5_t test_vlseg5e32_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, size_t vl) { - return __riscv_vlseg5e32_v_u32mf2x5_m(mask, base, vl); +vuint32mf2x5_t test_vlseg5e32_v_u32mf2x5_m(vbool64_t vm, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_v_u32mf2x5_m(vm, rs1, vl); } -vuint32m1x5_t test_vlseg5e32_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, size_t vl) { - return __riscv_vlseg5e32_v_u32m1x5_m(mask, base, vl); +vuint32m1x5_t test_vlseg5e32_v_u32m1x5_m(vbool32_t vm, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_v_u32m1x5_m(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg5e32\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg5e32ff.c b/auto-generated/gnu-api-tests/vlseg5e32ff.c index 11a470b6a..a8e6cca62 100644 --- a/auto-generated/gnu-api-tests/vlseg5e32ff.c +++ b/auto-generated/gnu-api-tests/vlseg5e32ff.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5(const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_v_f32mf2x5(base, new_vl, vl); +vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5(const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_v_f32mf2x5(rs1, new_vl, vl); } -vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5(const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_v_f32m1x5(base, new_vl, vl); +vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5(const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_v_f32m1x5(rs1, new_vl, vl); } -vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5(const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_v_i32mf2x5(base, new_vl, vl); +vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5(const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_v_i32mf2x5(rs1, new_vl, vl); } -vint32m1x5_t test_vlseg5e32ff_v_i32m1x5(const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_v_i32m1x5(base, new_vl, vl); +vint32m1x5_t test_vlseg5e32ff_v_i32m1x5(const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_v_i32m1x5(rs1, new_vl, vl); } -vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5(const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_v_u32mf2x5(base, new_vl, vl); +vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5(const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_v_u32mf2x5(rs1, new_vl, vl); } -vuint32m1x5_t test_vlseg5e32ff_v_u32m1x5(const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_v_u32m1x5(base, new_vl, vl); +vuint32m1x5_t test_vlseg5e32ff_v_u32m1x5(const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_v_u32m1x5(rs1, new_vl, vl); } -vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_m(vbool64_t mask, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_v_f32mf2x5_m(mask, base, new_vl, vl); +vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_m(vbool64_t vm, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_v_f32mf2x5_m(vm, rs1, new_vl, vl); } -vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_m(vbool32_t mask, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_v_f32m1x5_m(mask, base, new_vl, vl); +vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_m(vbool32_t vm, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_v_f32m1x5_m(vm, rs1, new_vl, vl); } -vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_v_i32mf2x5_m(mask, base, new_vl, vl); +vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_m(vbool64_t vm, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_v_i32mf2x5_m(vm, rs1, new_vl, vl); } -vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_m(vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_v_i32m1x5_m(mask, base, new_vl, vl); +vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_m(vbool32_t vm, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_v_i32m1x5_m(vm, rs1, new_vl, vl); } -vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_v_u32mf2x5_m(mask, base, new_vl, vl); +vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_m(vbool64_t vm, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_v_u32mf2x5_m(vm, rs1, new_vl, vl); } -vuint32m1x5_t test_vlseg5e32ff_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_v_u32m1x5_m(mask, base, new_vl, vl); +vuint32m1x5_t test_vlseg5e32ff_v_u32m1x5_m(vbool32_t vm, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_v_u32m1x5_m(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg5e32ff\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg5e64.c b/auto-generated/gnu-api-tests/vlseg5e64.c index 913ae84ac..96e7ac9cd 100644 --- a/auto-generated/gnu-api-tests/vlseg5e64.c +++ b/auto-generated/gnu-api-tests/vlseg5e64.c @@ -6,28 +6,28 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x5_t test_vlseg5e64_v_f64m1x5(const float64_t *base, size_t vl) { - return __riscv_vlseg5e64_v_f64m1x5(base, vl); +vfloat64m1x5_t test_vlseg5e64_v_f64m1x5(const float64_t *rs1, size_t vl) { + return __riscv_vlseg5e64_v_f64m1x5(rs1, vl); } -vint64m1x5_t test_vlseg5e64_v_i64m1x5(const int64_t *base, size_t vl) { - return __riscv_vlseg5e64_v_i64m1x5(base, vl); +vint64m1x5_t test_vlseg5e64_v_i64m1x5(const int64_t *rs1, size_t vl) { + return __riscv_vlseg5e64_v_i64m1x5(rs1, vl); } -vuint64m1x5_t test_vlseg5e64_v_u64m1x5(const uint64_t *base, size_t vl) { - return __riscv_vlseg5e64_v_u64m1x5(base, vl); +vuint64m1x5_t test_vlseg5e64_v_u64m1x5(const uint64_t *rs1, size_t vl) { + return __riscv_vlseg5e64_v_u64m1x5(rs1, vl); } -vfloat64m1x5_t test_vlseg5e64_v_f64m1x5_m(vbool64_t mask, const float64_t *base, size_t vl) { - return __riscv_vlseg5e64_v_f64m1x5_m(mask, base, vl); +vfloat64m1x5_t test_vlseg5e64_v_f64m1x5_m(vbool64_t vm, const float64_t *rs1, size_t vl) { + return __riscv_vlseg5e64_v_f64m1x5_m(vm, rs1, vl); } -vint64m1x5_t test_vlseg5e64_v_i64m1x5_m(vbool64_t mask, const int64_t *base, size_t vl) { - return __riscv_vlseg5e64_v_i64m1x5_m(mask, base, vl); +vint64m1x5_t test_vlseg5e64_v_i64m1x5_m(vbool64_t vm, const int64_t *rs1, size_t vl) { + return __riscv_vlseg5e64_v_i64m1x5_m(vm, rs1, vl); } -vuint64m1x5_t test_vlseg5e64_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, size_t vl) { - return __riscv_vlseg5e64_v_u64m1x5_m(mask, base, vl); +vuint64m1x5_t test_vlseg5e64_v_u64m1x5_m(vbool64_t vm, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg5e64_v_u64m1x5_m(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg5e64\.[ivxfswum.]+\s+} 6 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg5e64ff.c b/auto-generated/gnu-api-tests/vlseg5e64ff.c index 92210ac1e..a64bf5fca 100644 --- a/auto-generated/gnu-api-tests/vlseg5e64ff.c +++ b/auto-generated/gnu-api-tests/vlseg5e64ff.c @@ -6,28 +6,28 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5(const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e64ff_v_f64m1x5(base, new_vl, vl); +vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5(const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e64ff_v_f64m1x5(rs1, new_vl, vl); } -vint64m1x5_t test_vlseg5e64ff_v_i64m1x5(const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e64ff_v_i64m1x5(base, new_vl, vl); +vint64m1x5_t test_vlseg5e64ff_v_i64m1x5(const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e64ff_v_i64m1x5(rs1, new_vl, vl); } -vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5(const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e64ff_v_u64m1x5(base, new_vl, vl); +vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5(const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e64ff_v_u64m1x5(rs1, new_vl, vl); } -vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_m(vbool64_t mask, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e64ff_v_f64m1x5_m(mask, base, new_vl, vl); +vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_m(vbool64_t vm, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e64ff_v_f64m1x5_m(vm, rs1, new_vl, vl); } -vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_m(vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e64ff_v_i64m1x5_m(mask, base, new_vl, vl); +vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_m(vbool64_t vm, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e64ff_v_i64m1x5_m(vm, rs1, new_vl, vl); } -vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e64ff_v_u64m1x5_m(mask, base, new_vl, vl); +vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5_m(vbool64_t vm, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e64ff_v_u64m1x5_m(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg5e64ff\.[ivxfswum.]+\s+} 6 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg5e8.c b/auto-generated/gnu-api-tests/vlseg5e8.c index 7a011a918..bddcdcfaf 100644 --- a/auto-generated/gnu-api-tests/vlseg5e8.c +++ b/auto-generated/gnu-api-tests/vlseg5e8.c @@ -6,68 +6,68 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x5_t test_vlseg5e8_v_i8mf8x5(const int8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_i8mf8x5(base, vl); +vint8mf8x5_t test_vlseg5e8_v_i8mf8x5(const int8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_i8mf8x5(rs1, vl); } -vint8mf4x5_t test_vlseg5e8_v_i8mf4x5(const int8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_i8mf4x5(base, vl); +vint8mf4x5_t test_vlseg5e8_v_i8mf4x5(const int8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_i8mf4x5(rs1, vl); } -vint8mf2x5_t test_vlseg5e8_v_i8mf2x5(const int8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_i8mf2x5(base, vl); +vint8mf2x5_t test_vlseg5e8_v_i8mf2x5(const int8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_i8mf2x5(rs1, vl); } -vint8m1x5_t test_vlseg5e8_v_i8m1x5(const int8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_i8m1x5(base, vl); +vint8m1x5_t test_vlseg5e8_v_i8m1x5(const int8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_i8m1x5(rs1, vl); } -vuint8mf8x5_t test_vlseg5e8_v_u8mf8x5(const uint8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_u8mf8x5(base, vl); +vuint8mf8x5_t test_vlseg5e8_v_u8mf8x5(const uint8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_u8mf8x5(rs1, vl); } -vuint8mf4x5_t test_vlseg5e8_v_u8mf4x5(const uint8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_u8mf4x5(base, vl); +vuint8mf4x5_t test_vlseg5e8_v_u8mf4x5(const uint8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_u8mf4x5(rs1, vl); } -vuint8mf2x5_t test_vlseg5e8_v_u8mf2x5(const uint8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_u8mf2x5(base, vl); +vuint8mf2x5_t test_vlseg5e8_v_u8mf2x5(const uint8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_u8mf2x5(rs1, vl); } -vuint8m1x5_t test_vlseg5e8_v_u8m1x5(const uint8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_u8m1x5(base, vl); +vuint8m1x5_t test_vlseg5e8_v_u8m1x5(const uint8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_u8m1x5(rs1, vl); } -vint8mf8x5_t test_vlseg5e8_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_i8mf8x5_m(mask, base, vl); +vint8mf8x5_t test_vlseg5e8_v_i8mf8x5_m(vbool64_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_i8mf8x5_m(vm, rs1, vl); } -vint8mf4x5_t test_vlseg5e8_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_i8mf4x5_m(mask, base, vl); +vint8mf4x5_t test_vlseg5e8_v_i8mf4x5_m(vbool32_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_i8mf4x5_m(vm, rs1, vl); } -vint8mf2x5_t test_vlseg5e8_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_i8mf2x5_m(mask, base, vl); +vint8mf2x5_t test_vlseg5e8_v_i8mf2x5_m(vbool16_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_i8mf2x5_m(vm, rs1, vl); } -vint8m1x5_t test_vlseg5e8_v_i8m1x5_m(vbool8_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_i8m1x5_m(mask, base, vl); +vint8m1x5_t test_vlseg5e8_v_i8m1x5_m(vbool8_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_i8m1x5_m(vm, rs1, vl); } -vuint8mf8x5_t test_vlseg5e8_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_u8mf8x5_m(mask, base, vl); +vuint8mf8x5_t test_vlseg5e8_v_u8mf8x5_m(vbool64_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_u8mf8x5_m(vm, rs1, vl); } -vuint8mf4x5_t test_vlseg5e8_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_u8mf4x5_m(mask, base, vl); +vuint8mf4x5_t test_vlseg5e8_v_u8mf4x5_m(vbool32_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_u8mf4x5_m(vm, rs1, vl); } -vuint8mf2x5_t test_vlseg5e8_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_u8mf2x5_m(mask, base, vl); +vuint8mf2x5_t test_vlseg5e8_v_u8mf2x5_m(vbool16_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_u8mf2x5_m(vm, rs1, vl); } -vuint8m1x5_t test_vlseg5e8_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_u8m1x5_m(mask, base, vl); +vuint8m1x5_t test_vlseg5e8_v_u8m1x5_m(vbool8_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_u8m1x5_m(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg5e8\.[ivxfswum.]+\s+} 16 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg5e8ff.c b/auto-generated/gnu-api-tests/vlseg5e8ff.c index d72b251b8..caaf7ec90 100644 --- a/auto-generated/gnu-api-tests/vlseg5e8ff.c +++ b/auto-generated/gnu-api-tests/vlseg5e8ff.c @@ -6,68 +6,68 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5(const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_i8mf8x5(base, new_vl, vl); +vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5(const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_i8mf8x5(rs1, new_vl, vl); } -vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5(const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_i8mf4x5(base, new_vl, vl); +vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5(const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_i8mf4x5(rs1, new_vl, vl); } -vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5(const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_i8mf2x5(base, new_vl, vl); +vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5(const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_i8mf2x5(rs1, new_vl, vl); } -vint8m1x5_t test_vlseg5e8ff_v_i8m1x5(const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_i8m1x5(base, new_vl, vl); +vint8m1x5_t test_vlseg5e8ff_v_i8m1x5(const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_i8m1x5(rs1, new_vl, vl); } -vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5(const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_u8mf8x5(base, new_vl, vl); +vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5(const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_u8mf8x5(rs1, new_vl, vl); } -vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5(const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_u8mf4x5(base, new_vl, vl); +vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5(const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_u8mf4x5(rs1, new_vl, vl); } -vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5(const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_u8mf2x5(base, new_vl, vl); +vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5(const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_u8mf2x5(rs1, new_vl, vl); } -vuint8m1x5_t test_vlseg5e8ff_v_u8m1x5(const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_u8m1x5(base, new_vl, vl); +vuint8m1x5_t test_vlseg5e8ff_v_u8m1x5(const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_u8m1x5(rs1, new_vl, vl); } -vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_i8mf8x5_m(mask, base, new_vl, vl); +vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_m(vbool64_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_i8mf8x5_m(vm, rs1, new_vl, vl); } -vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_i8mf4x5_m(mask, base, new_vl, vl); +vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_m(vbool32_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_i8mf4x5_m(vm, rs1, new_vl, vl); } -vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_i8mf2x5_m(mask, base, new_vl, vl); +vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_m(vbool16_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_i8mf2x5_m(vm, rs1, new_vl, vl); } -vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_m(vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_i8m1x5_m(mask, base, new_vl, vl); +vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_m(vbool8_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_i8m1x5_m(vm, rs1, new_vl, vl); } -vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_u8mf8x5_m(mask, base, new_vl, vl); +vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_m(vbool64_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_u8mf8x5_m(vm, rs1, new_vl, vl); } -vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_u8mf4x5_m(mask, base, new_vl, vl); +vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_m(vbool32_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_u8mf4x5_m(vm, rs1, new_vl, vl); } -vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_u8mf2x5_m(mask, base, new_vl, vl); +vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_m(vbool16_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_u8mf2x5_m(vm, rs1, new_vl, vl); } -vuint8m1x5_t test_vlseg5e8ff_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_u8m1x5_m(mask, base, new_vl, vl); +vuint8m1x5_t test_vlseg5e8ff_v_u8m1x5_m(vbool8_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_u8m1x5_m(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg5e8ff\.[ivxfswum.]+\s+} 16 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg6e16.c b/auto-generated/gnu-api-tests/vlseg6e16.c index facc42708..45bb264e9 100644 --- a/auto-generated/gnu-api-tests/vlseg6e16.c +++ b/auto-generated/gnu-api-tests/vlseg6e16.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x6_t test_vlseg6e16_v_f16mf4x6(const float16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_f16mf4x6(base, vl); +vfloat16mf4x6_t test_vlseg6e16_v_f16mf4x6(const float16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_f16mf4x6(rs1, vl); } -vfloat16mf2x6_t test_vlseg6e16_v_f16mf2x6(const float16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_f16mf2x6(base, vl); +vfloat16mf2x6_t test_vlseg6e16_v_f16mf2x6(const float16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_f16mf2x6(rs1, vl); } -vfloat16m1x6_t test_vlseg6e16_v_f16m1x6(const float16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_f16m1x6(base, vl); +vfloat16m1x6_t test_vlseg6e16_v_f16m1x6(const float16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_f16m1x6(rs1, vl); } -vint16mf4x6_t test_vlseg6e16_v_i16mf4x6(const int16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_i16mf4x6(base, vl); +vint16mf4x6_t test_vlseg6e16_v_i16mf4x6(const int16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_i16mf4x6(rs1, vl); } -vint16mf2x6_t test_vlseg6e16_v_i16mf2x6(const int16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_i16mf2x6(base, vl); +vint16mf2x6_t test_vlseg6e16_v_i16mf2x6(const int16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_i16mf2x6(rs1, vl); } -vint16m1x6_t test_vlseg6e16_v_i16m1x6(const int16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_i16m1x6(base, vl); +vint16m1x6_t test_vlseg6e16_v_i16m1x6(const int16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_i16m1x6(rs1, vl); } -vuint16mf4x6_t test_vlseg6e16_v_u16mf4x6(const uint16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_u16mf4x6(base, vl); +vuint16mf4x6_t test_vlseg6e16_v_u16mf4x6(const uint16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_u16mf4x6(rs1, vl); } -vuint16mf2x6_t test_vlseg6e16_v_u16mf2x6(const uint16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_u16mf2x6(base, vl); +vuint16mf2x6_t test_vlseg6e16_v_u16mf2x6(const uint16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_u16mf2x6(rs1, vl); } -vuint16m1x6_t test_vlseg6e16_v_u16m1x6(const uint16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_u16m1x6(base, vl); +vuint16m1x6_t test_vlseg6e16_v_u16m1x6(const uint16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_u16m1x6(rs1, vl); } -vfloat16mf4x6_t test_vlseg6e16_v_f16mf4x6_m(vbool64_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_f16mf4x6_m(mask, base, vl); +vfloat16mf4x6_t test_vlseg6e16_v_f16mf4x6_m(vbool64_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_f16mf4x6_m(vm, rs1, vl); } -vfloat16mf2x6_t test_vlseg6e16_v_f16mf2x6_m(vbool32_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_f16mf2x6_m(mask, base, vl); +vfloat16mf2x6_t test_vlseg6e16_v_f16mf2x6_m(vbool32_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_f16mf2x6_m(vm, rs1, vl); } -vfloat16m1x6_t test_vlseg6e16_v_f16m1x6_m(vbool16_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_f16m1x6_m(mask, base, vl); +vfloat16m1x6_t test_vlseg6e16_v_f16m1x6_m(vbool16_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_f16m1x6_m(vm, rs1, vl); } -vint16mf4x6_t test_vlseg6e16_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_i16mf4x6_m(mask, base, vl); +vint16mf4x6_t test_vlseg6e16_v_i16mf4x6_m(vbool64_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_i16mf4x6_m(vm, rs1, vl); } -vint16mf2x6_t test_vlseg6e16_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_i16mf2x6_m(mask, base, vl); +vint16mf2x6_t test_vlseg6e16_v_i16mf2x6_m(vbool32_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_i16mf2x6_m(vm, rs1, vl); } -vint16m1x6_t test_vlseg6e16_v_i16m1x6_m(vbool16_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_i16m1x6_m(mask, base, vl); +vint16m1x6_t test_vlseg6e16_v_i16m1x6_m(vbool16_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_i16m1x6_m(vm, rs1, vl); } -vuint16mf4x6_t test_vlseg6e16_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_u16mf4x6_m(mask, base, vl); +vuint16mf4x6_t test_vlseg6e16_v_u16mf4x6_m(vbool64_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_u16mf4x6_m(vm, rs1, vl); } -vuint16mf2x6_t test_vlseg6e16_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_u16mf2x6_m(mask, base, vl); +vuint16mf2x6_t test_vlseg6e16_v_u16mf2x6_m(vbool32_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_u16mf2x6_m(vm, rs1, vl); } -vuint16m1x6_t test_vlseg6e16_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_u16m1x6_m(mask, base, vl); +vuint16m1x6_t test_vlseg6e16_v_u16m1x6_m(vbool16_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_u16m1x6_m(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg6e16\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg6e16ff.c b/auto-generated/gnu-api-tests/vlseg6e16ff.c index f3011dbc9..96b173038 100644 --- a/auto-generated/gnu-api-tests/vlseg6e16ff.c +++ b/auto-generated/gnu-api-tests/vlseg6e16ff.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6(const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_f16mf4x6(base, new_vl, vl); +vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6(const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_f16mf4x6(rs1, new_vl, vl); } -vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6(const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_f16mf2x6(base, new_vl, vl); +vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6(const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_f16mf2x6(rs1, new_vl, vl); } -vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6(const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_f16m1x6(base, new_vl, vl); +vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6(const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_f16m1x6(rs1, new_vl, vl); } -vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6(const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_i16mf4x6(base, new_vl, vl); +vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6(const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_i16mf4x6(rs1, new_vl, vl); } -vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6(const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_i16mf2x6(base, new_vl, vl); +vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6(const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_i16mf2x6(rs1, new_vl, vl); } -vint16m1x6_t test_vlseg6e16ff_v_i16m1x6(const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_i16m1x6(base, new_vl, vl); +vint16m1x6_t test_vlseg6e16ff_v_i16m1x6(const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_i16m1x6(rs1, new_vl, vl); } -vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6(const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_u16mf4x6(base, new_vl, vl); +vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6(const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_u16mf4x6(rs1, new_vl, vl); } -vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6(const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_u16mf2x6(base, new_vl, vl); +vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6(const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_u16mf2x6(rs1, new_vl, vl); } -vuint16m1x6_t test_vlseg6e16ff_v_u16m1x6(const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_u16m1x6(base, new_vl, vl); +vuint16m1x6_t test_vlseg6e16ff_v_u16m1x6(const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_u16m1x6(rs1, new_vl, vl); } -vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_m(vbool64_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_f16mf4x6_m(mask, base, new_vl, vl); +vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_m(vbool64_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_f16mf4x6_m(vm, rs1, new_vl, vl); } -vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_m(vbool32_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_f16mf2x6_m(mask, base, new_vl, vl); +vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_m(vbool32_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_f16mf2x6_m(vm, rs1, new_vl, vl); } -vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_m(vbool16_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_f16m1x6_m(mask, base, new_vl, vl); +vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_m(vbool16_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_f16m1x6_m(vm, rs1, new_vl, vl); } -vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_i16mf4x6_m(mask, base, new_vl, vl); +vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_m(vbool64_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_i16mf4x6_m(vm, rs1, new_vl, vl); } -vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_i16mf2x6_m(mask, base, new_vl, vl); +vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_m(vbool32_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_i16mf2x6_m(vm, rs1, new_vl, vl); } -vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_m(vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_i16m1x6_m(mask, base, new_vl, vl); +vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_m(vbool16_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_i16m1x6_m(vm, rs1, new_vl, vl); } -vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_u16mf4x6_m(mask, base, new_vl, vl); +vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_m(vbool64_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_u16mf4x6_m(vm, rs1, new_vl, vl); } -vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_u16mf2x6_m(mask, base, new_vl, vl); +vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_m(vbool32_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_u16mf2x6_m(vm, rs1, new_vl, vl); } -vuint16m1x6_t test_vlseg6e16ff_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_u16m1x6_m(mask, base, new_vl, vl); +vuint16m1x6_t test_vlseg6e16ff_v_u16m1x6_m(vbool16_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_u16m1x6_m(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg6e16ff\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg6e32.c b/auto-generated/gnu-api-tests/vlseg6e32.c index b2896b7ad..c9bf34988 100644 --- a/auto-generated/gnu-api-tests/vlseg6e32.c +++ b/auto-generated/gnu-api-tests/vlseg6e32.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x6_t test_vlseg6e32_v_f32mf2x6(const float32_t *base, size_t vl) { - return __riscv_vlseg6e32_v_f32mf2x6(base, vl); +vfloat32mf2x6_t test_vlseg6e32_v_f32mf2x6(const float32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_v_f32mf2x6(rs1, vl); } -vfloat32m1x6_t test_vlseg6e32_v_f32m1x6(const float32_t *base, size_t vl) { - return __riscv_vlseg6e32_v_f32m1x6(base, vl); +vfloat32m1x6_t test_vlseg6e32_v_f32m1x6(const float32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_v_f32m1x6(rs1, vl); } -vint32mf2x6_t test_vlseg6e32_v_i32mf2x6(const int32_t *base, size_t vl) { - return __riscv_vlseg6e32_v_i32mf2x6(base, vl); +vint32mf2x6_t test_vlseg6e32_v_i32mf2x6(const int32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_v_i32mf2x6(rs1, vl); } -vint32m1x6_t test_vlseg6e32_v_i32m1x6(const int32_t *base, size_t vl) { - return __riscv_vlseg6e32_v_i32m1x6(base, vl); +vint32m1x6_t test_vlseg6e32_v_i32m1x6(const int32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_v_i32m1x6(rs1, vl); } -vuint32mf2x6_t test_vlseg6e32_v_u32mf2x6(const uint32_t *base, size_t vl) { - return __riscv_vlseg6e32_v_u32mf2x6(base, vl); +vuint32mf2x6_t test_vlseg6e32_v_u32mf2x6(const uint32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_v_u32mf2x6(rs1, vl); } -vuint32m1x6_t test_vlseg6e32_v_u32m1x6(const uint32_t *base, size_t vl) { - return __riscv_vlseg6e32_v_u32m1x6(base, vl); +vuint32m1x6_t test_vlseg6e32_v_u32m1x6(const uint32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_v_u32m1x6(rs1, vl); } -vfloat32mf2x6_t test_vlseg6e32_v_f32mf2x6_m(vbool64_t mask, const float32_t *base, size_t vl) { - return __riscv_vlseg6e32_v_f32mf2x6_m(mask, base, vl); +vfloat32mf2x6_t test_vlseg6e32_v_f32mf2x6_m(vbool64_t vm, const float32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_v_f32mf2x6_m(vm, rs1, vl); } -vfloat32m1x6_t test_vlseg6e32_v_f32m1x6_m(vbool32_t mask, const float32_t *base, size_t vl) { - return __riscv_vlseg6e32_v_f32m1x6_m(mask, base, vl); +vfloat32m1x6_t test_vlseg6e32_v_f32m1x6_m(vbool32_t vm, const float32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_v_f32m1x6_m(vm, rs1, vl); } -vint32mf2x6_t test_vlseg6e32_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, size_t vl) { - return __riscv_vlseg6e32_v_i32mf2x6_m(mask, base, vl); +vint32mf2x6_t test_vlseg6e32_v_i32mf2x6_m(vbool64_t vm, const int32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_v_i32mf2x6_m(vm, rs1, vl); } -vint32m1x6_t test_vlseg6e32_v_i32m1x6_m(vbool32_t mask, const int32_t *base, size_t vl) { - return __riscv_vlseg6e32_v_i32m1x6_m(mask, base, vl); +vint32m1x6_t test_vlseg6e32_v_i32m1x6_m(vbool32_t vm, const int32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_v_i32m1x6_m(vm, rs1, vl); } -vuint32mf2x6_t test_vlseg6e32_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, size_t vl) { - return __riscv_vlseg6e32_v_u32mf2x6_m(mask, base, vl); +vuint32mf2x6_t test_vlseg6e32_v_u32mf2x6_m(vbool64_t vm, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_v_u32mf2x6_m(vm, rs1, vl); } -vuint32m1x6_t test_vlseg6e32_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, size_t vl) { - return __riscv_vlseg6e32_v_u32m1x6_m(mask, base, vl); +vuint32m1x6_t test_vlseg6e32_v_u32m1x6_m(vbool32_t vm, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_v_u32m1x6_m(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg6e32\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg6e32ff.c b/auto-generated/gnu-api-tests/vlseg6e32ff.c index d28d27ad0..dfd9bfe56 100644 --- a/auto-generated/gnu-api-tests/vlseg6e32ff.c +++ b/auto-generated/gnu-api-tests/vlseg6e32ff.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6(const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_v_f32mf2x6(base, new_vl, vl); +vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6(const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_v_f32mf2x6(rs1, new_vl, vl); } -vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6(const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_v_f32m1x6(base, new_vl, vl); +vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6(const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_v_f32m1x6(rs1, new_vl, vl); } -vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6(const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_v_i32mf2x6(base, new_vl, vl); +vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6(const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_v_i32mf2x6(rs1, new_vl, vl); } -vint32m1x6_t test_vlseg6e32ff_v_i32m1x6(const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_v_i32m1x6(base, new_vl, vl); +vint32m1x6_t test_vlseg6e32ff_v_i32m1x6(const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_v_i32m1x6(rs1, new_vl, vl); } -vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6(const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_v_u32mf2x6(base, new_vl, vl); +vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6(const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_v_u32mf2x6(rs1, new_vl, vl); } -vuint32m1x6_t test_vlseg6e32ff_v_u32m1x6(const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_v_u32m1x6(base, new_vl, vl); +vuint32m1x6_t test_vlseg6e32ff_v_u32m1x6(const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_v_u32m1x6(rs1, new_vl, vl); } -vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_m(vbool64_t mask, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_v_f32mf2x6_m(mask, base, new_vl, vl); +vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_m(vbool64_t vm, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_v_f32mf2x6_m(vm, rs1, new_vl, vl); } -vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_m(vbool32_t mask, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_v_f32m1x6_m(mask, base, new_vl, vl); +vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_m(vbool32_t vm, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_v_f32m1x6_m(vm, rs1, new_vl, vl); } -vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_v_i32mf2x6_m(mask, base, new_vl, vl); +vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_m(vbool64_t vm, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_v_i32mf2x6_m(vm, rs1, new_vl, vl); } -vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_m(vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_v_i32m1x6_m(mask, base, new_vl, vl); +vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_m(vbool32_t vm, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_v_i32m1x6_m(vm, rs1, new_vl, vl); } -vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_v_u32mf2x6_m(mask, base, new_vl, vl); +vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_m(vbool64_t vm, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_v_u32mf2x6_m(vm, rs1, new_vl, vl); } -vuint32m1x6_t test_vlseg6e32ff_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_v_u32m1x6_m(mask, base, new_vl, vl); +vuint32m1x6_t test_vlseg6e32ff_v_u32m1x6_m(vbool32_t vm, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_v_u32m1x6_m(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg6e32ff\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg6e64.c b/auto-generated/gnu-api-tests/vlseg6e64.c index 602064829..6d0009cd0 100644 --- a/auto-generated/gnu-api-tests/vlseg6e64.c +++ b/auto-generated/gnu-api-tests/vlseg6e64.c @@ -6,28 +6,28 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x6_t test_vlseg6e64_v_f64m1x6(const float64_t *base, size_t vl) { - return __riscv_vlseg6e64_v_f64m1x6(base, vl); +vfloat64m1x6_t test_vlseg6e64_v_f64m1x6(const float64_t *rs1, size_t vl) { + return __riscv_vlseg6e64_v_f64m1x6(rs1, vl); } -vint64m1x6_t test_vlseg6e64_v_i64m1x6(const int64_t *base, size_t vl) { - return __riscv_vlseg6e64_v_i64m1x6(base, vl); +vint64m1x6_t test_vlseg6e64_v_i64m1x6(const int64_t *rs1, size_t vl) { + return __riscv_vlseg6e64_v_i64m1x6(rs1, vl); } -vuint64m1x6_t test_vlseg6e64_v_u64m1x6(const uint64_t *base, size_t vl) { - return __riscv_vlseg6e64_v_u64m1x6(base, vl); +vuint64m1x6_t test_vlseg6e64_v_u64m1x6(const uint64_t *rs1, size_t vl) { + return __riscv_vlseg6e64_v_u64m1x6(rs1, vl); } -vfloat64m1x6_t test_vlseg6e64_v_f64m1x6_m(vbool64_t mask, const float64_t *base, size_t vl) { - return __riscv_vlseg6e64_v_f64m1x6_m(mask, base, vl); +vfloat64m1x6_t test_vlseg6e64_v_f64m1x6_m(vbool64_t vm, const float64_t *rs1, size_t vl) { + return __riscv_vlseg6e64_v_f64m1x6_m(vm, rs1, vl); } -vint64m1x6_t test_vlseg6e64_v_i64m1x6_m(vbool64_t mask, const int64_t *base, size_t vl) { - return __riscv_vlseg6e64_v_i64m1x6_m(mask, base, vl); +vint64m1x6_t test_vlseg6e64_v_i64m1x6_m(vbool64_t vm, const int64_t *rs1, size_t vl) { + return __riscv_vlseg6e64_v_i64m1x6_m(vm, rs1, vl); } -vuint64m1x6_t test_vlseg6e64_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, size_t vl) { - return __riscv_vlseg6e64_v_u64m1x6_m(mask, base, vl); +vuint64m1x6_t test_vlseg6e64_v_u64m1x6_m(vbool64_t vm, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg6e64_v_u64m1x6_m(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg6e64\.[ivxfswum.]+\s+} 6 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg6e64ff.c b/auto-generated/gnu-api-tests/vlseg6e64ff.c index 0663f148f..e02886d00 100644 --- a/auto-generated/gnu-api-tests/vlseg6e64ff.c +++ b/auto-generated/gnu-api-tests/vlseg6e64ff.c @@ -6,28 +6,28 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6(const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e64ff_v_f64m1x6(base, new_vl, vl); +vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6(const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e64ff_v_f64m1x6(rs1, new_vl, vl); } -vint64m1x6_t test_vlseg6e64ff_v_i64m1x6(const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e64ff_v_i64m1x6(base, new_vl, vl); +vint64m1x6_t test_vlseg6e64ff_v_i64m1x6(const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e64ff_v_i64m1x6(rs1, new_vl, vl); } -vuint64m1x6_t test_vlseg6e64ff_v_u64m1x6(const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e64ff_v_u64m1x6(base, new_vl, vl); +vuint64m1x6_t test_vlseg6e64ff_v_u64m1x6(const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e64ff_v_u64m1x6(rs1, new_vl, vl); } -vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_m(vbool64_t mask, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e64ff_v_f64m1x6_m(mask, base, new_vl, vl); +vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_m(vbool64_t vm, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e64ff_v_f64m1x6_m(vm, rs1, new_vl, vl); } -vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_m(vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e64ff_v_i64m1x6_m(mask, base, new_vl, vl); +vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_m(vbool64_t vm, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e64ff_v_i64m1x6_m(vm, rs1, new_vl, vl); } -vuint64m1x6_t test_vlseg6e64ff_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e64ff_v_u64m1x6_m(mask, base, new_vl, vl); +vuint64m1x6_t test_vlseg6e64ff_v_u64m1x6_m(vbool64_t vm, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e64ff_v_u64m1x6_m(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg6e64ff\.[ivxfswum.]+\s+} 6 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg6e8.c b/auto-generated/gnu-api-tests/vlseg6e8.c index 01a9b6b05..4bac5ecb3 100644 --- a/auto-generated/gnu-api-tests/vlseg6e8.c +++ b/auto-generated/gnu-api-tests/vlseg6e8.c @@ -6,68 +6,68 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x6_t test_vlseg6e8_v_i8mf8x6(const int8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_i8mf8x6(base, vl); +vint8mf8x6_t test_vlseg6e8_v_i8mf8x6(const int8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_i8mf8x6(rs1, vl); } -vint8mf4x6_t test_vlseg6e8_v_i8mf4x6(const int8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_i8mf4x6(base, vl); +vint8mf4x6_t test_vlseg6e8_v_i8mf4x6(const int8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_i8mf4x6(rs1, vl); } -vint8mf2x6_t test_vlseg6e8_v_i8mf2x6(const int8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_i8mf2x6(base, vl); +vint8mf2x6_t test_vlseg6e8_v_i8mf2x6(const int8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_i8mf2x6(rs1, vl); } -vint8m1x6_t test_vlseg6e8_v_i8m1x6(const int8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_i8m1x6(base, vl); +vint8m1x6_t test_vlseg6e8_v_i8m1x6(const int8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_i8m1x6(rs1, vl); } -vuint8mf8x6_t test_vlseg6e8_v_u8mf8x6(const uint8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_u8mf8x6(base, vl); +vuint8mf8x6_t test_vlseg6e8_v_u8mf8x6(const uint8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_u8mf8x6(rs1, vl); } -vuint8mf4x6_t test_vlseg6e8_v_u8mf4x6(const uint8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_u8mf4x6(base, vl); +vuint8mf4x6_t test_vlseg6e8_v_u8mf4x6(const uint8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_u8mf4x6(rs1, vl); } -vuint8mf2x6_t test_vlseg6e8_v_u8mf2x6(const uint8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_u8mf2x6(base, vl); +vuint8mf2x6_t test_vlseg6e8_v_u8mf2x6(const uint8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_u8mf2x6(rs1, vl); } -vuint8m1x6_t test_vlseg6e8_v_u8m1x6(const uint8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_u8m1x6(base, vl); +vuint8m1x6_t test_vlseg6e8_v_u8m1x6(const uint8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_u8m1x6(rs1, vl); } -vint8mf8x6_t test_vlseg6e8_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_i8mf8x6_m(mask, base, vl); +vint8mf8x6_t test_vlseg6e8_v_i8mf8x6_m(vbool64_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_i8mf8x6_m(vm, rs1, vl); } -vint8mf4x6_t test_vlseg6e8_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_i8mf4x6_m(mask, base, vl); +vint8mf4x6_t test_vlseg6e8_v_i8mf4x6_m(vbool32_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_i8mf4x6_m(vm, rs1, vl); } -vint8mf2x6_t test_vlseg6e8_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_i8mf2x6_m(mask, base, vl); +vint8mf2x6_t test_vlseg6e8_v_i8mf2x6_m(vbool16_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_i8mf2x6_m(vm, rs1, vl); } -vint8m1x6_t test_vlseg6e8_v_i8m1x6_m(vbool8_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_i8m1x6_m(mask, base, vl); +vint8m1x6_t test_vlseg6e8_v_i8m1x6_m(vbool8_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_i8m1x6_m(vm, rs1, vl); } -vuint8mf8x6_t test_vlseg6e8_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_u8mf8x6_m(mask, base, vl); +vuint8mf8x6_t test_vlseg6e8_v_u8mf8x6_m(vbool64_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_u8mf8x6_m(vm, rs1, vl); } -vuint8mf4x6_t test_vlseg6e8_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_u8mf4x6_m(mask, base, vl); +vuint8mf4x6_t test_vlseg6e8_v_u8mf4x6_m(vbool32_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_u8mf4x6_m(vm, rs1, vl); } -vuint8mf2x6_t test_vlseg6e8_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_u8mf2x6_m(mask, base, vl); +vuint8mf2x6_t test_vlseg6e8_v_u8mf2x6_m(vbool16_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_u8mf2x6_m(vm, rs1, vl); } -vuint8m1x6_t test_vlseg6e8_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_u8m1x6_m(mask, base, vl); +vuint8m1x6_t test_vlseg6e8_v_u8m1x6_m(vbool8_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_u8m1x6_m(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg6e8\.[ivxfswum.]+\s+} 16 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg6e8ff.c b/auto-generated/gnu-api-tests/vlseg6e8ff.c index 3f9158d0e..6dd653082 100644 --- a/auto-generated/gnu-api-tests/vlseg6e8ff.c +++ b/auto-generated/gnu-api-tests/vlseg6e8ff.c @@ -6,68 +6,68 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6(const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_i8mf8x6(base, new_vl, vl); +vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6(const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_i8mf8x6(rs1, new_vl, vl); } -vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6(const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_i8mf4x6(base, new_vl, vl); +vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6(const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_i8mf4x6(rs1, new_vl, vl); } -vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6(const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_i8mf2x6(base, new_vl, vl); +vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6(const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_i8mf2x6(rs1, new_vl, vl); } -vint8m1x6_t test_vlseg6e8ff_v_i8m1x6(const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_i8m1x6(base, new_vl, vl); +vint8m1x6_t test_vlseg6e8ff_v_i8m1x6(const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_i8m1x6(rs1, new_vl, vl); } -vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6(const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_u8mf8x6(base, new_vl, vl); +vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6(const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_u8mf8x6(rs1, new_vl, vl); } -vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6(const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_u8mf4x6(base, new_vl, vl); +vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6(const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_u8mf4x6(rs1, new_vl, vl); } -vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6(const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_u8mf2x6(base, new_vl, vl); +vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6(const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_u8mf2x6(rs1, new_vl, vl); } -vuint8m1x6_t test_vlseg6e8ff_v_u8m1x6(const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_u8m1x6(base, new_vl, vl); +vuint8m1x6_t test_vlseg6e8ff_v_u8m1x6(const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_u8m1x6(rs1, new_vl, vl); } -vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_i8mf8x6_m(mask, base, new_vl, vl); +vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_m(vbool64_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_i8mf8x6_m(vm, rs1, new_vl, vl); } -vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_i8mf4x6_m(mask, base, new_vl, vl); +vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_m(vbool32_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_i8mf4x6_m(vm, rs1, new_vl, vl); } -vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_i8mf2x6_m(mask, base, new_vl, vl); +vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_m(vbool16_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_i8mf2x6_m(vm, rs1, new_vl, vl); } -vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_m(vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_i8m1x6_m(mask, base, new_vl, vl); +vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_m(vbool8_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_i8m1x6_m(vm, rs1, new_vl, vl); } -vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_u8mf8x6_m(mask, base, new_vl, vl); +vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_m(vbool64_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_u8mf8x6_m(vm, rs1, new_vl, vl); } -vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_u8mf4x6_m(mask, base, new_vl, vl); +vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_m(vbool32_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_u8mf4x6_m(vm, rs1, new_vl, vl); } -vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_u8mf2x6_m(mask, base, new_vl, vl); +vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_m(vbool16_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_u8mf2x6_m(vm, rs1, new_vl, vl); } -vuint8m1x6_t test_vlseg6e8ff_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_u8m1x6_m(mask, base, new_vl, vl); +vuint8m1x6_t test_vlseg6e8ff_v_u8m1x6_m(vbool8_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_u8m1x6_m(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg6e8ff\.[ivxfswum.]+\s+} 16 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg7e16.c b/auto-generated/gnu-api-tests/vlseg7e16.c index 76bfefec9..59362e9f9 100644 --- a/auto-generated/gnu-api-tests/vlseg7e16.c +++ b/auto-generated/gnu-api-tests/vlseg7e16.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x7_t test_vlseg7e16_v_f16mf4x7(const float16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_f16mf4x7(base, vl); +vfloat16mf4x7_t test_vlseg7e16_v_f16mf4x7(const float16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_f16mf4x7(rs1, vl); } -vfloat16mf2x7_t test_vlseg7e16_v_f16mf2x7(const float16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_f16mf2x7(base, vl); +vfloat16mf2x7_t test_vlseg7e16_v_f16mf2x7(const float16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_f16mf2x7(rs1, vl); } -vfloat16m1x7_t test_vlseg7e16_v_f16m1x7(const float16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_f16m1x7(base, vl); +vfloat16m1x7_t test_vlseg7e16_v_f16m1x7(const float16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_f16m1x7(rs1, vl); } -vint16mf4x7_t test_vlseg7e16_v_i16mf4x7(const int16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_i16mf4x7(base, vl); +vint16mf4x7_t test_vlseg7e16_v_i16mf4x7(const int16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_i16mf4x7(rs1, vl); } -vint16mf2x7_t test_vlseg7e16_v_i16mf2x7(const int16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_i16mf2x7(base, vl); +vint16mf2x7_t test_vlseg7e16_v_i16mf2x7(const int16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_i16mf2x7(rs1, vl); } -vint16m1x7_t test_vlseg7e16_v_i16m1x7(const int16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_i16m1x7(base, vl); +vint16m1x7_t test_vlseg7e16_v_i16m1x7(const int16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_i16m1x7(rs1, vl); } -vuint16mf4x7_t test_vlseg7e16_v_u16mf4x7(const uint16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_u16mf4x7(base, vl); +vuint16mf4x7_t test_vlseg7e16_v_u16mf4x7(const uint16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_u16mf4x7(rs1, vl); } -vuint16mf2x7_t test_vlseg7e16_v_u16mf2x7(const uint16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_u16mf2x7(base, vl); +vuint16mf2x7_t test_vlseg7e16_v_u16mf2x7(const uint16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_u16mf2x7(rs1, vl); } -vuint16m1x7_t test_vlseg7e16_v_u16m1x7(const uint16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_u16m1x7(base, vl); +vuint16m1x7_t test_vlseg7e16_v_u16m1x7(const uint16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_u16m1x7(rs1, vl); } -vfloat16mf4x7_t test_vlseg7e16_v_f16mf4x7_m(vbool64_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_f16mf4x7_m(mask, base, vl); +vfloat16mf4x7_t test_vlseg7e16_v_f16mf4x7_m(vbool64_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_f16mf4x7_m(vm, rs1, vl); } -vfloat16mf2x7_t test_vlseg7e16_v_f16mf2x7_m(vbool32_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_f16mf2x7_m(mask, base, vl); +vfloat16mf2x7_t test_vlseg7e16_v_f16mf2x7_m(vbool32_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_f16mf2x7_m(vm, rs1, vl); } -vfloat16m1x7_t test_vlseg7e16_v_f16m1x7_m(vbool16_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_f16m1x7_m(mask, base, vl); +vfloat16m1x7_t test_vlseg7e16_v_f16m1x7_m(vbool16_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_f16m1x7_m(vm, rs1, vl); } -vint16mf4x7_t test_vlseg7e16_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_i16mf4x7_m(mask, base, vl); +vint16mf4x7_t test_vlseg7e16_v_i16mf4x7_m(vbool64_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_i16mf4x7_m(vm, rs1, vl); } -vint16mf2x7_t test_vlseg7e16_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_i16mf2x7_m(mask, base, vl); +vint16mf2x7_t test_vlseg7e16_v_i16mf2x7_m(vbool32_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_i16mf2x7_m(vm, rs1, vl); } -vint16m1x7_t test_vlseg7e16_v_i16m1x7_m(vbool16_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_i16m1x7_m(mask, base, vl); +vint16m1x7_t test_vlseg7e16_v_i16m1x7_m(vbool16_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_i16m1x7_m(vm, rs1, vl); } -vuint16mf4x7_t test_vlseg7e16_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_u16mf4x7_m(mask, base, vl); +vuint16mf4x7_t test_vlseg7e16_v_u16mf4x7_m(vbool64_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_u16mf4x7_m(vm, rs1, vl); } -vuint16mf2x7_t test_vlseg7e16_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_u16mf2x7_m(mask, base, vl); +vuint16mf2x7_t test_vlseg7e16_v_u16mf2x7_m(vbool32_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_u16mf2x7_m(vm, rs1, vl); } -vuint16m1x7_t test_vlseg7e16_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_u16m1x7_m(mask, base, vl); +vuint16m1x7_t test_vlseg7e16_v_u16m1x7_m(vbool16_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_u16m1x7_m(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg7e16\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg7e16ff.c b/auto-generated/gnu-api-tests/vlseg7e16ff.c index b196a05f3..8e1d90e3f 100644 --- a/auto-generated/gnu-api-tests/vlseg7e16ff.c +++ b/auto-generated/gnu-api-tests/vlseg7e16ff.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7(const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_f16mf4x7(base, new_vl, vl); +vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7(const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_f16mf4x7(rs1, new_vl, vl); } -vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7(const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_f16mf2x7(base, new_vl, vl); +vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7(const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_f16mf2x7(rs1, new_vl, vl); } -vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7(const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_f16m1x7(base, new_vl, vl); +vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7(const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_f16m1x7(rs1, new_vl, vl); } -vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7(const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_i16mf4x7(base, new_vl, vl); +vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7(const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_i16mf4x7(rs1, new_vl, vl); } -vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7(const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_i16mf2x7(base, new_vl, vl); +vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7(const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_i16mf2x7(rs1, new_vl, vl); } -vint16m1x7_t test_vlseg7e16ff_v_i16m1x7(const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_i16m1x7(base, new_vl, vl); +vint16m1x7_t test_vlseg7e16ff_v_i16m1x7(const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_i16m1x7(rs1, new_vl, vl); } -vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7(const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_u16mf4x7(base, new_vl, vl); +vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7(const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_u16mf4x7(rs1, new_vl, vl); } -vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7(const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_u16mf2x7(base, new_vl, vl); +vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7(const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_u16mf2x7(rs1, new_vl, vl); } -vuint16m1x7_t test_vlseg7e16ff_v_u16m1x7(const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_u16m1x7(base, new_vl, vl); +vuint16m1x7_t test_vlseg7e16ff_v_u16m1x7(const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_u16m1x7(rs1, new_vl, vl); } -vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_m(vbool64_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_f16mf4x7_m(mask, base, new_vl, vl); +vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_m(vbool64_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_f16mf4x7_m(vm, rs1, new_vl, vl); } -vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_m(vbool32_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_f16mf2x7_m(mask, base, new_vl, vl); +vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_m(vbool32_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_f16mf2x7_m(vm, rs1, new_vl, vl); } -vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_m(vbool16_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_f16m1x7_m(mask, base, new_vl, vl); +vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_m(vbool16_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_f16m1x7_m(vm, rs1, new_vl, vl); } -vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_i16mf4x7_m(mask, base, new_vl, vl); +vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_m(vbool64_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_i16mf4x7_m(vm, rs1, new_vl, vl); } -vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_i16mf2x7_m(mask, base, new_vl, vl); +vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_m(vbool32_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_i16mf2x7_m(vm, rs1, new_vl, vl); } -vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_m(vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_i16m1x7_m(mask, base, new_vl, vl); +vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_m(vbool16_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_i16m1x7_m(vm, rs1, new_vl, vl); } -vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_u16mf4x7_m(mask, base, new_vl, vl); +vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_m(vbool64_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_u16mf4x7_m(vm, rs1, new_vl, vl); } -vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_u16mf2x7_m(mask, base, new_vl, vl); +vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_m(vbool32_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_u16mf2x7_m(vm, rs1, new_vl, vl); } -vuint16m1x7_t test_vlseg7e16ff_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_u16m1x7_m(mask, base, new_vl, vl); +vuint16m1x7_t test_vlseg7e16ff_v_u16m1x7_m(vbool16_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_u16m1x7_m(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg7e16ff\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg7e32.c b/auto-generated/gnu-api-tests/vlseg7e32.c index 424a78ec1..b99b27cc5 100644 --- a/auto-generated/gnu-api-tests/vlseg7e32.c +++ b/auto-generated/gnu-api-tests/vlseg7e32.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x7_t test_vlseg7e32_v_f32mf2x7(const float32_t *base, size_t vl) { - return __riscv_vlseg7e32_v_f32mf2x7(base, vl); +vfloat32mf2x7_t test_vlseg7e32_v_f32mf2x7(const float32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_v_f32mf2x7(rs1, vl); } -vfloat32m1x7_t test_vlseg7e32_v_f32m1x7(const float32_t *base, size_t vl) { - return __riscv_vlseg7e32_v_f32m1x7(base, vl); +vfloat32m1x7_t test_vlseg7e32_v_f32m1x7(const float32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_v_f32m1x7(rs1, vl); } -vint32mf2x7_t test_vlseg7e32_v_i32mf2x7(const int32_t *base, size_t vl) { - return __riscv_vlseg7e32_v_i32mf2x7(base, vl); +vint32mf2x7_t test_vlseg7e32_v_i32mf2x7(const int32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_v_i32mf2x7(rs1, vl); } -vint32m1x7_t test_vlseg7e32_v_i32m1x7(const int32_t *base, size_t vl) { - return __riscv_vlseg7e32_v_i32m1x7(base, vl); +vint32m1x7_t test_vlseg7e32_v_i32m1x7(const int32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_v_i32m1x7(rs1, vl); } -vuint32mf2x7_t test_vlseg7e32_v_u32mf2x7(const uint32_t *base, size_t vl) { - return __riscv_vlseg7e32_v_u32mf2x7(base, vl); +vuint32mf2x7_t test_vlseg7e32_v_u32mf2x7(const uint32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_v_u32mf2x7(rs1, vl); } -vuint32m1x7_t test_vlseg7e32_v_u32m1x7(const uint32_t *base, size_t vl) { - return __riscv_vlseg7e32_v_u32m1x7(base, vl); +vuint32m1x7_t test_vlseg7e32_v_u32m1x7(const uint32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_v_u32m1x7(rs1, vl); } -vfloat32mf2x7_t test_vlseg7e32_v_f32mf2x7_m(vbool64_t mask, const float32_t *base, size_t vl) { - return __riscv_vlseg7e32_v_f32mf2x7_m(mask, base, vl); +vfloat32mf2x7_t test_vlseg7e32_v_f32mf2x7_m(vbool64_t vm, const float32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_v_f32mf2x7_m(vm, rs1, vl); } -vfloat32m1x7_t test_vlseg7e32_v_f32m1x7_m(vbool32_t mask, const float32_t *base, size_t vl) { - return __riscv_vlseg7e32_v_f32m1x7_m(mask, base, vl); +vfloat32m1x7_t test_vlseg7e32_v_f32m1x7_m(vbool32_t vm, const float32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_v_f32m1x7_m(vm, rs1, vl); } -vint32mf2x7_t test_vlseg7e32_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, size_t vl) { - return __riscv_vlseg7e32_v_i32mf2x7_m(mask, base, vl); +vint32mf2x7_t test_vlseg7e32_v_i32mf2x7_m(vbool64_t vm, const int32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_v_i32mf2x7_m(vm, rs1, vl); } -vint32m1x7_t test_vlseg7e32_v_i32m1x7_m(vbool32_t mask, const int32_t *base, size_t vl) { - return __riscv_vlseg7e32_v_i32m1x7_m(mask, base, vl); +vint32m1x7_t test_vlseg7e32_v_i32m1x7_m(vbool32_t vm, const int32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_v_i32m1x7_m(vm, rs1, vl); } -vuint32mf2x7_t test_vlseg7e32_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, size_t vl) { - return __riscv_vlseg7e32_v_u32mf2x7_m(mask, base, vl); +vuint32mf2x7_t test_vlseg7e32_v_u32mf2x7_m(vbool64_t vm, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_v_u32mf2x7_m(vm, rs1, vl); } -vuint32m1x7_t test_vlseg7e32_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, size_t vl) { - return __riscv_vlseg7e32_v_u32m1x7_m(mask, base, vl); +vuint32m1x7_t test_vlseg7e32_v_u32m1x7_m(vbool32_t vm, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_v_u32m1x7_m(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg7e32\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg7e32ff.c b/auto-generated/gnu-api-tests/vlseg7e32ff.c index fff6638cd..1b16ab725 100644 --- a/auto-generated/gnu-api-tests/vlseg7e32ff.c +++ b/auto-generated/gnu-api-tests/vlseg7e32ff.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7(const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_v_f32mf2x7(base, new_vl, vl); +vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7(const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_v_f32mf2x7(rs1, new_vl, vl); } -vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7(const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_v_f32m1x7(base, new_vl, vl); +vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7(const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_v_f32m1x7(rs1, new_vl, vl); } -vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7(const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_v_i32mf2x7(base, new_vl, vl); +vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7(const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_v_i32mf2x7(rs1, new_vl, vl); } -vint32m1x7_t test_vlseg7e32ff_v_i32m1x7(const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_v_i32m1x7(base, new_vl, vl); +vint32m1x7_t test_vlseg7e32ff_v_i32m1x7(const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_v_i32m1x7(rs1, new_vl, vl); } -vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7(const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_v_u32mf2x7(base, new_vl, vl); +vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7(const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_v_u32mf2x7(rs1, new_vl, vl); } -vuint32m1x7_t test_vlseg7e32ff_v_u32m1x7(const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_v_u32m1x7(base, new_vl, vl); +vuint32m1x7_t test_vlseg7e32ff_v_u32m1x7(const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_v_u32m1x7(rs1, new_vl, vl); } -vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_m(vbool64_t mask, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_v_f32mf2x7_m(mask, base, new_vl, vl); +vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_m(vbool64_t vm, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_v_f32mf2x7_m(vm, rs1, new_vl, vl); } -vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_m(vbool32_t mask, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_v_f32m1x7_m(mask, base, new_vl, vl); +vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_m(vbool32_t vm, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_v_f32m1x7_m(vm, rs1, new_vl, vl); } -vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_v_i32mf2x7_m(mask, base, new_vl, vl); +vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_m(vbool64_t vm, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_v_i32mf2x7_m(vm, rs1, new_vl, vl); } -vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_m(vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_v_i32m1x7_m(mask, base, new_vl, vl); +vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_m(vbool32_t vm, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_v_i32m1x7_m(vm, rs1, new_vl, vl); } -vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_v_u32mf2x7_m(mask, base, new_vl, vl); +vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_m(vbool64_t vm, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_v_u32mf2x7_m(vm, rs1, new_vl, vl); } -vuint32m1x7_t test_vlseg7e32ff_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_v_u32m1x7_m(mask, base, new_vl, vl); +vuint32m1x7_t test_vlseg7e32ff_v_u32m1x7_m(vbool32_t vm, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_v_u32m1x7_m(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg7e32ff\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg7e64.c b/auto-generated/gnu-api-tests/vlseg7e64.c index 9cd8e8628..90746f1d7 100644 --- a/auto-generated/gnu-api-tests/vlseg7e64.c +++ b/auto-generated/gnu-api-tests/vlseg7e64.c @@ -6,28 +6,28 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x7_t test_vlseg7e64_v_f64m1x7(const float64_t *base, size_t vl) { - return __riscv_vlseg7e64_v_f64m1x7(base, vl); +vfloat64m1x7_t test_vlseg7e64_v_f64m1x7(const float64_t *rs1, size_t vl) { + return __riscv_vlseg7e64_v_f64m1x7(rs1, vl); } -vint64m1x7_t test_vlseg7e64_v_i64m1x7(const int64_t *base, size_t vl) { - return __riscv_vlseg7e64_v_i64m1x7(base, vl); +vint64m1x7_t test_vlseg7e64_v_i64m1x7(const int64_t *rs1, size_t vl) { + return __riscv_vlseg7e64_v_i64m1x7(rs1, vl); } -vuint64m1x7_t test_vlseg7e64_v_u64m1x7(const uint64_t *base, size_t vl) { - return __riscv_vlseg7e64_v_u64m1x7(base, vl); +vuint64m1x7_t test_vlseg7e64_v_u64m1x7(const uint64_t *rs1, size_t vl) { + return __riscv_vlseg7e64_v_u64m1x7(rs1, vl); } -vfloat64m1x7_t test_vlseg7e64_v_f64m1x7_m(vbool64_t mask, const float64_t *base, size_t vl) { - return __riscv_vlseg7e64_v_f64m1x7_m(mask, base, vl); +vfloat64m1x7_t test_vlseg7e64_v_f64m1x7_m(vbool64_t vm, const float64_t *rs1, size_t vl) { + return __riscv_vlseg7e64_v_f64m1x7_m(vm, rs1, vl); } -vint64m1x7_t test_vlseg7e64_v_i64m1x7_m(vbool64_t mask, const int64_t *base, size_t vl) { - return __riscv_vlseg7e64_v_i64m1x7_m(mask, base, vl); +vint64m1x7_t test_vlseg7e64_v_i64m1x7_m(vbool64_t vm, const int64_t *rs1, size_t vl) { + return __riscv_vlseg7e64_v_i64m1x7_m(vm, rs1, vl); } -vuint64m1x7_t test_vlseg7e64_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, size_t vl) { - return __riscv_vlseg7e64_v_u64m1x7_m(mask, base, vl); +vuint64m1x7_t test_vlseg7e64_v_u64m1x7_m(vbool64_t vm, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg7e64_v_u64m1x7_m(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg7e64\.[ivxfswum.]+\s+} 6 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg7e64ff.c b/auto-generated/gnu-api-tests/vlseg7e64ff.c index 8cc12e8e1..d838187cb 100644 --- a/auto-generated/gnu-api-tests/vlseg7e64ff.c +++ b/auto-generated/gnu-api-tests/vlseg7e64ff.c @@ -6,28 +6,28 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7(const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e64ff_v_f64m1x7(base, new_vl, vl); +vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7(const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e64ff_v_f64m1x7(rs1, new_vl, vl); } -vint64m1x7_t test_vlseg7e64ff_v_i64m1x7(const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e64ff_v_i64m1x7(base, new_vl, vl); +vint64m1x7_t test_vlseg7e64ff_v_i64m1x7(const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e64ff_v_i64m1x7(rs1, new_vl, vl); } -vuint64m1x7_t test_vlseg7e64ff_v_u64m1x7(const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e64ff_v_u64m1x7(base, new_vl, vl); +vuint64m1x7_t test_vlseg7e64ff_v_u64m1x7(const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e64ff_v_u64m1x7(rs1, new_vl, vl); } -vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_m(vbool64_t mask, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e64ff_v_f64m1x7_m(mask, base, new_vl, vl); +vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_m(vbool64_t vm, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e64ff_v_f64m1x7_m(vm, rs1, new_vl, vl); } -vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_m(vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e64ff_v_i64m1x7_m(mask, base, new_vl, vl); +vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_m(vbool64_t vm, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e64ff_v_i64m1x7_m(vm, rs1, new_vl, vl); } -vuint64m1x7_t test_vlseg7e64ff_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e64ff_v_u64m1x7_m(mask, base, new_vl, vl); +vuint64m1x7_t test_vlseg7e64ff_v_u64m1x7_m(vbool64_t vm, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e64ff_v_u64m1x7_m(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg7e64ff\.[ivxfswum.]+\s+} 6 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg7e8.c b/auto-generated/gnu-api-tests/vlseg7e8.c index 3ed1bc322..4c07f9dab 100644 --- a/auto-generated/gnu-api-tests/vlseg7e8.c +++ b/auto-generated/gnu-api-tests/vlseg7e8.c @@ -6,68 +6,68 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x7_t test_vlseg7e8_v_i8mf8x7(const int8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_i8mf8x7(base, vl); +vint8mf8x7_t test_vlseg7e8_v_i8mf8x7(const int8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_i8mf8x7(rs1, vl); } -vint8mf4x7_t test_vlseg7e8_v_i8mf4x7(const int8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_i8mf4x7(base, vl); +vint8mf4x7_t test_vlseg7e8_v_i8mf4x7(const int8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_i8mf4x7(rs1, vl); } -vint8mf2x7_t test_vlseg7e8_v_i8mf2x7(const int8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_i8mf2x7(base, vl); +vint8mf2x7_t test_vlseg7e8_v_i8mf2x7(const int8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_i8mf2x7(rs1, vl); } -vint8m1x7_t test_vlseg7e8_v_i8m1x7(const int8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_i8m1x7(base, vl); +vint8m1x7_t test_vlseg7e8_v_i8m1x7(const int8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_i8m1x7(rs1, vl); } -vuint8mf8x7_t test_vlseg7e8_v_u8mf8x7(const uint8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_u8mf8x7(base, vl); +vuint8mf8x7_t test_vlseg7e8_v_u8mf8x7(const uint8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_u8mf8x7(rs1, vl); } -vuint8mf4x7_t test_vlseg7e8_v_u8mf4x7(const uint8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_u8mf4x7(base, vl); +vuint8mf4x7_t test_vlseg7e8_v_u8mf4x7(const uint8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_u8mf4x7(rs1, vl); } -vuint8mf2x7_t test_vlseg7e8_v_u8mf2x7(const uint8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_u8mf2x7(base, vl); +vuint8mf2x7_t test_vlseg7e8_v_u8mf2x7(const uint8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_u8mf2x7(rs1, vl); } -vuint8m1x7_t test_vlseg7e8_v_u8m1x7(const uint8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_u8m1x7(base, vl); +vuint8m1x7_t test_vlseg7e8_v_u8m1x7(const uint8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_u8m1x7(rs1, vl); } -vint8mf8x7_t test_vlseg7e8_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_i8mf8x7_m(mask, base, vl); +vint8mf8x7_t test_vlseg7e8_v_i8mf8x7_m(vbool64_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_i8mf8x7_m(vm, rs1, vl); } -vint8mf4x7_t test_vlseg7e8_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_i8mf4x7_m(mask, base, vl); +vint8mf4x7_t test_vlseg7e8_v_i8mf4x7_m(vbool32_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_i8mf4x7_m(vm, rs1, vl); } -vint8mf2x7_t test_vlseg7e8_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_i8mf2x7_m(mask, base, vl); +vint8mf2x7_t test_vlseg7e8_v_i8mf2x7_m(vbool16_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_i8mf2x7_m(vm, rs1, vl); } -vint8m1x7_t test_vlseg7e8_v_i8m1x7_m(vbool8_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_i8m1x7_m(mask, base, vl); +vint8m1x7_t test_vlseg7e8_v_i8m1x7_m(vbool8_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_i8m1x7_m(vm, rs1, vl); } -vuint8mf8x7_t test_vlseg7e8_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_u8mf8x7_m(mask, base, vl); +vuint8mf8x7_t test_vlseg7e8_v_u8mf8x7_m(vbool64_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_u8mf8x7_m(vm, rs1, vl); } -vuint8mf4x7_t test_vlseg7e8_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_u8mf4x7_m(mask, base, vl); +vuint8mf4x7_t test_vlseg7e8_v_u8mf4x7_m(vbool32_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_u8mf4x7_m(vm, rs1, vl); } -vuint8mf2x7_t test_vlseg7e8_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_u8mf2x7_m(mask, base, vl); +vuint8mf2x7_t test_vlseg7e8_v_u8mf2x7_m(vbool16_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_u8mf2x7_m(vm, rs1, vl); } -vuint8m1x7_t test_vlseg7e8_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_u8m1x7_m(mask, base, vl); +vuint8m1x7_t test_vlseg7e8_v_u8m1x7_m(vbool8_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_u8m1x7_m(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg7e8\.[ivxfswum.]+\s+} 16 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg7e8ff.c b/auto-generated/gnu-api-tests/vlseg7e8ff.c index e05914feb..cf174656d 100644 --- a/auto-generated/gnu-api-tests/vlseg7e8ff.c +++ b/auto-generated/gnu-api-tests/vlseg7e8ff.c @@ -6,68 +6,68 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7(const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_i8mf8x7(base, new_vl, vl); +vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7(const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_i8mf8x7(rs1, new_vl, vl); } -vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7(const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_i8mf4x7(base, new_vl, vl); +vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7(const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_i8mf4x7(rs1, new_vl, vl); } -vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7(const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_i8mf2x7(base, new_vl, vl); +vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7(const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_i8mf2x7(rs1, new_vl, vl); } -vint8m1x7_t test_vlseg7e8ff_v_i8m1x7(const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_i8m1x7(base, new_vl, vl); +vint8m1x7_t test_vlseg7e8ff_v_i8m1x7(const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_i8m1x7(rs1, new_vl, vl); } -vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7(const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_u8mf8x7(base, new_vl, vl); +vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7(const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_u8mf8x7(rs1, new_vl, vl); } -vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7(const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_u8mf4x7(base, new_vl, vl); +vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7(const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_u8mf4x7(rs1, new_vl, vl); } -vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7(const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_u8mf2x7(base, new_vl, vl); +vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7(const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_u8mf2x7(rs1, new_vl, vl); } -vuint8m1x7_t test_vlseg7e8ff_v_u8m1x7(const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_u8m1x7(base, new_vl, vl); +vuint8m1x7_t test_vlseg7e8ff_v_u8m1x7(const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_u8m1x7(rs1, new_vl, vl); } -vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_i8mf8x7_m(mask, base, new_vl, vl); +vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_m(vbool64_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_i8mf8x7_m(vm, rs1, new_vl, vl); } -vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_i8mf4x7_m(mask, base, new_vl, vl); +vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_m(vbool32_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_i8mf4x7_m(vm, rs1, new_vl, vl); } -vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_i8mf2x7_m(mask, base, new_vl, vl); +vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_m(vbool16_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_i8mf2x7_m(vm, rs1, new_vl, vl); } -vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_m(vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_i8m1x7_m(mask, base, new_vl, vl); +vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_m(vbool8_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_i8m1x7_m(vm, rs1, new_vl, vl); } -vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_u8mf8x7_m(mask, base, new_vl, vl); +vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_m(vbool64_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_u8mf8x7_m(vm, rs1, new_vl, vl); } -vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_u8mf4x7_m(mask, base, new_vl, vl); +vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_m(vbool32_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_u8mf4x7_m(vm, rs1, new_vl, vl); } -vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_u8mf2x7_m(mask, base, new_vl, vl); +vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_m(vbool16_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_u8mf2x7_m(vm, rs1, new_vl, vl); } -vuint8m1x7_t test_vlseg7e8ff_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_u8m1x7_m(mask, base, new_vl, vl); +vuint8m1x7_t test_vlseg7e8ff_v_u8m1x7_m(vbool8_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_u8m1x7_m(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg7e8ff\.[ivxfswum.]+\s+} 16 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg8e16.c b/auto-generated/gnu-api-tests/vlseg8e16.c index 2640cb3ae..f81e04b6a 100644 --- a/auto-generated/gnu-api-tests/vlseg8e16.c +++ b/auto-generated/gnu-api-tests/vlseg8e16.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x8_t test_vlseg8e16_v_f16mf4x8(const float16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_f16mf4x8(base, vl); +vfloat16mf4x8_t test_vlseg8e16_v_f16mf4x8(const float16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_f16mf4x8(rs1, vl); } -vfloat16mf2x8_t test_vlseg8e16_v_f16mf2x8(const float16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_f16mf2x8(base, vl); +vfloat16mf2x8_t test_vlseg8e16_v_f16mf2x8(const float16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_f16mf2x8(rs1, vl); } -vfloat16m1x8_t test_vlseg8e16_v_f16m1x8(const float16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_f16m1x8(base, vl); +vfloat16m1x8_t test_vlseg8e16_v_f16m1x8(const float16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_f16m1x8(rs1, vl); } -vint16mf4x8_t test_vlseg8e16_v_i16mf4x8(const int16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_i16mf4x8(base, vl); +vint16mf4x8_t test_vlseg8e16_v_i16mf4x8(const int16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_i16mf4x8(rs1, vl); } -vint16mf2x8_t test_vlseg8e16_v_i16mf2x8(const int16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_i16mf2x8(base, vl); +vint16mf2x8_t test_vlseg8e16_v_i16mf2x8(const int16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_i16mf2x8(rs1, vl); } -vint16m1x8_t test_vlseg8e16_v_i16m1x8(const int16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_i16m1x8(base, vl); +vint16m1x8_t test_vlseg8e16_v_i16m1x8(const int16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_i16m1x8(rs1, vl); } -vuint16mf4x8_t test_vlseg8e16_v_u16mf4x8(const uint16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_u16mf4x8(base, vl); +vuint16mf4x8_t test_vlseg8e16_v_u16mf4x8(const uint16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_u16mf4x8(rs1, vl); } -vuint16mf2x8_t test_vlseg8e16_v_u16mf2x8(const uint16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_u16mf2x8(base, vl); +vuint16mf2x8_t test_vlseg8e16_v_u16mf2x8(const uint16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_u16mf2x8(rs1, vl); } -vuint16m1x8_t test_vlseg8e16_v_u16m1x8(const uint16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_u16m1x8(base, vl); +vuint16m1x8_t test_vlseg8e16_v_u16m1x8(const uint16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_u16m1x8(rs1, vl); } -vfloat16mf4x8_t test_vlseg8e16_v_f16mf4x8_m(vbool64_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_f16mf4x8_m(mask, base, vl); +vfloat16mf4x8_t test_vlseg8e16_v_f16mf4x8_m(vbool64_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_f16mf4x8_m(vm, rs1, vl); } -vfloat16mf2x8_t test_vlseg8e16_v_f16mf2x8_m(vbool32_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_f16mf2x8_m(mask, base, vl); +vfloat16mf2x8_t test_vlseg8e16_v_f16mf2x8_m(vbool32_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_f16mf2x8_m(vm, rs1, vl); } -vfloat16m1x8_t test_vlseg8e16_v_f16m1x8_m(vbool16_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_f16m1x8_m(mask, base, vl); +vfloat16m1x8_t test_vlseg8e16_v_f16m1x8_m(vbool16_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_f16m1x8_m(vm, rs1, vl); } -vint16mf4x8_t test_vlseg8e16_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_i16mf4x8_m(mask, base, vl); +vint16mf4x8_t test_vlseg8e16_v_i16mf4x8_m(vbool64_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_i16mf4x8_m(vm, rs1, vl); } -vint16mf2x8_t test_vlseg8e16_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_i16mf2x8_m(mask, base, vl); +vint16mf2x8_t test_vlseg8e16_v_i16mf2x8_m(vbool32_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_i16mf2x8_m(vm, rs1, vl); } -vint16m1x8_t test_vlseg8e16_v_i16m1x8_m(vbool16_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_i16m1x8_m(mask, base, vl); +vint16m1x8_t test_vlseg8e16_v_i16m1x8_m(vbool16_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_i16m1x8_m(vm, rs1, vl); } -vuint16mf4x8_t test_vlseg8e16_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_u16mf4x8_m(mask, base, vl); +vuint16mf4x8_t test_vlseg8e16_v_u16mf4x8_m(vbool64_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_u16mf4x8_m(vm, rs1, vl); } -vuint16mf2x8_t test_vlseg8e16_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_u16mf2x8_m(mask, base, vl); +vuint16mf2x8_t test_vlseg8e16_v_u16mf2x8_m(vbool32_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_u16mf2x8_m(vm, rs1, vl); } -vuint16m1x8_t test_vlseg8e16_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_u16m1x8_m(mask, base, vl); +vuint16m1x8_t test_vlseg8e16_v_u16m1x8_m(vbool16_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_u16m1x8_m(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg8e16\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg8e16ff.c b/auto-generated/gnu-api-tests/vlseg8e16ff.c index a0aa2733a..3969eacd4 100644 --- a/auto-generated/gnu-api-tests/vlseg8e16ff.c +++ b/auto-generated/gnu-api-tests/vlseg8e16ff.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8(const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_f16mf4x8(base, new_vl, vl); +vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8(const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_f16mf4x8(rs1, new_vl, vl); } -vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8(const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_f16mf2x8(base, new_vl, vl); +vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8(const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_f16mf2x8(rs1, new_vl, vl); } -vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8(const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_f16m1x8(base, new_vl, vl); +vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8(const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_f16m1x8(rs1, new_vl, vl); } -vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8(const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_i16mf4x8(base, new_vl, vl); +vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8(const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_i16mf4x8(rs1, new_vl, vl); } -vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8(const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_i16mf2x8(base, new_vl, vl); +vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8(const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_i16mf2x8(rs1, new_vl, vl); } -vint16m1x8_t test_vlseg8e16ff_v_i16m1x8(const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_i16m1x8(base, new_vl, vl); +vint16m1x8_t test_vlseg8e16ff_v_i16m1x8(const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_i16m1x8(rs1, new_vl, vl); } -vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8(const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_u16mf4x8(base, new_vl, vl); +vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8(const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_u16mf4x8(rs1, new_vl, vl); } -vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8(const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_u16mf2x8(base, new_vl, vl); +vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8(const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_u16mf2x8(rs1, new_vl, vl); } -vuint16m1x8_t test_vlseg8e16ff_v_u16m1x8(const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_u16m1x8(base, new_vl, vl); +vuint16m1x8_t test_vlseg8e16ff_v_u16m1x8(const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_u16m1x8(rs1, new_vl, vl); } -vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_m(vbool64_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_f16mf4x8_m(mask, base, new_vl, vl); +vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_m(vbool64_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_f16mf4x8_m(vm, rs1, new_vl, vl); } -vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_m(vbool32_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_f16mf2x8_m(mask, base, new_vl, vl); +vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_m(vbool32_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_f16mf2x8_m(vm, rs1, new_vl, vl); } -vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_m(vbool16_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_f16m1x8_m(mask, base, new_vl, vl); +vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_m(vbool16_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_f16m1x8_m(vm, rs1, new_vl, vl); } -vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_i16mf4x8_m(mask, base, new_vl, vl); +vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_m(vbool64_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_i16mf4x8_m(vm, rs1, new_vl, vl); } -vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_i16mf2x8_m(mask, base, new_vl, vl); +vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_m(vbool32_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_i16mf2x8_m(vm, rs1, new_vl, vl); } -vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_m(vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_i16m1x8_m(mask, base, new_vl, vl); +vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_m(vbool16_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_i16m1x8_m(vm, rs1, new_vl, vl); } -vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_u16mf4x8_m(mask, base, new_vl, vl); +vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_m(vbool64_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_u16mf4x8_m(vm, rs1, new_vl, vl); } -vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_u16mf2x8_m(mask, base, new_vl, vl); +vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_m(vbool32_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_u16mf2x8_m(vm, rs1, new_vl, vl); } -vuint16m1x8_t test_vlseg8e16ff_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_u16m1x8_m(mask, base, new_vl, vl); +vuint16m1x8_t test_vlseg8e16ff_v_u16m1x8_m(vbool16_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_u16m1x8_m(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg8e16ff\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg8e32.c b/auto-generated/gnu-api-tests/vlseg8e32.c index fbe955efc..ec4150f74 100644 --- a/auto-generated/gnu-api-tests/vlseg8e32.c +++ b/auto-generated/gnu-api-tests/vlseg8e32.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x8_t test_vlseg8e32_v_f32mf2x8(const float32_t *base, size_t vl) { - return __riscv_vlseg8e32_v_f32mf2x8(base, vl); +vfloat32mf2x8_t test_vlseg8e32_v_f32mf2x8(const float32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_v_f32mf2x8(rs1, vl); } -vfloat32m1x8_t test_vlseg8e32_v_f32m1x8(const float32_t *base, size_t vl) { - return __riscv_vlseg8e32_v_f32m1x8(base, vl); +vfloat32m1x8_t test_vlseg8e32_v_f32m1x8(const float32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_v_f32m1x8(rs1, vl); } -vint32mf2x8_t test_vlseg8e32_v_i32mf2x8(const int32_t *base, size_t vl) { - return __riscv_vlseg8e32_v_i32mf2x8(base, vl); +vint32mf2x8_t test_vlseg8e32_v_i32mf2x8(const int32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_v_i32mf2x8(rs1, vl); } -vint32m1x8_t test_vlseg8e32_v_i32m1x8(const int32_t *base, size_t vl) { - return __riscv_vlseg8e32_v_i32m1x8(base, vl); +vint32m1x8_t test_vlseg8e32_v_i32m1x8(const int32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_v_i32m1x8(rs1, vl); } -vuint32mf2x8_t test_vlseg8e32_v_u32mf2x8(const uint32_t *base, size_t vl) { - return __riscv_vlseg8e32_v_u32mf2x8(base, vl); +vuint32mf2x8_t test_vlseg8e32_v_u32mf2x8(const uint32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_v_u32mf2x8(rs1, vl); } -vuint32m1x8_t test_vlseg8e32_v_u32m1x8(const uint32_t *base, size_t vl) { - return __riscv_vlseg8e32_v_u32m1x8(base, vl); +vuint32m1x8_t test_vlseg8e32_v_u32m1x8(const uint32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_v_u32m1x8(rs1, vl); } -vfloat32mf2x8_t test_vlseg8e32_v_f32mf2x8_m(vbool64_t mask, const float32_t *base, size_t vl) { - return __riscv_vlseg8e32_v_f32mf2x8_m(mask, base, vl); +vfloat32mf2x8_t test_vlseg8e32_v_f32mf2x8_m(vbool64_t vm, const float32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_v_f32mf2x8_m(vm, rs1, vl); } -vfloat32m1x8_t test_vlseg8e32_v_f32m1x8_m(vbool32_t mask, const float32_t *base, size_t vl) { - return __riscv_vlseg8e32_v_f32m1x8_m(mask, base, vl); +vfloat32m1x8_t test_vlseg8e32_v_f32m1x8_m(vbool32_t vm, const float32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_v_f32m1x8_m(vm, rs1, vl); } -vint32mf2x8_t test_vlseg8e32_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, size_t vl) { - return __riscv_vlseg8e32_v_i32mf2x8_m(mask, base, vl); +vint32mf2x8_t test_vlseg8e32_v_i32mf2x8_m(vbool64_t vm, const int32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_v_i32mf2x8_m(vm, rs1, vl); } -vint32m1x8_t test_vlseg8e32_v_i32m1x8_m(vbool32_t mask, const int32_t *base, size_t vl) { - return __riscv_vlseg8e32_v_i32m1x8_m(mask, base, vl); +vint32m1x8_t test_vlseg8e32_v_i32m1x8_m(vbool32_t vm, const int32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_v_i32m1x8_m(vm, rs1, vl); } -vuint32mf2x8_t test_vlseg8e32_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, size_t vl) { - return __riscv_vlseg8e32_v_u32mf2x8_m(mask, base, vl); +vuint32mf2x8_t test_vlseg8e32_v_u32mf2x8_m(vbool64_t vm, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_v_u32mf2x8_m(vm, rs1, vl); } -vuint32m1x8_t test_vlseg8e32_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, size_t vl) { - return __riscv_vlseg8e32_v_u32m1x8_m(mask, base, vl); +vuint32m1x8_t test_vlseg8e32_v_u32m1x8_m(vbool32_t vm, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_v_u32m1x8_m(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg8e32\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg8e32ff.c b/auto-generated/gnu-api-tests/vlseg8e32ff.c index 537f65b1c..4be08c572 100644 --- a/auto-generated/gnu-api-tests/vlseg8e32ff.c +++ b/auto-generated/gnu-api-tests/vlseg8e32ff.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8(const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_v_f32mf2x8(base, new_vl, vl); +vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8(const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_v_f32mf2x8(rs1, new_vl, vl); } -vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8(const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_v_f32m1x8(base, new_vl, vl); +vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8(const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_v_f32m1x8(rs1, new_vl, vl); } -vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8(const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_v_i32mf2x8(base, new_vl, vl); +vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8(const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_v_i32mf2x8(rs1, new_vl, vl); } -vint32m1x8_t test_vlseg8e32ff_v_i32m1x8(const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_v_i32m1x8(base, new_vl, vl); +vint32m1x8_t test_vlseg8e32ff_v_i32m1x8(const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_v_i32m1x8(rs1, new_vl, vl); } -vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8(const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_v_u32mf2x8(base, new_vl, vl); +vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8(const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_v_u32mf2x8(rs1, new_vl, vl); } -vuint32m1x8_t test_vlseg8e32ff_v_u32m1x8(const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_v_u32m1x8(base, new_vl, vl); +vuint32m1x8_t test_vlseg8e32ff_v_u32m1x8(const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_v_u32m1x8(rs1, new_vl, vl); } -vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_m(vbool64_t mask, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_v_f32mf2x8_m(mask, base, new_vl, vl); +vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_m(vbool64_t vm, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_v_f32mf2x8_m(vm, rs1, new_vl, vl); } -vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_m(vbool32_t mask, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_v_f32m1x8_m(mask, base, new_vl, vl); +vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_m(vbool32_t vm, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_v_f32m1x8_m(vm, rs1, new_vl, vl); } -vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_v_i32mf2x8_m(mask, base, new_vl, vl); +vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_m(vbool64_t vm, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_v_i32mf2x8_m(vm, rs1, new_vl, vl); } -vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_m(vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_v_i32m1x8_m(mask, base, new_vl, vl); +vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_m(vbool32_t vm, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_v_i32m1x8_m(vm, rs1, new_vl, vl); } -vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_v_u32mf2x8_m(mask, base, new_vl, vl); +vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_m(vbool64_t vm, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_v_u32mf2x8_m(vm, rs1, new_vl, vl); } -vuint32m1x8_t test_vlseg8e32ff_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_v_u32m1x8_m(mask, base, new_vl, vl); +vuint32m1x8_t test_vlseg8e32ff_v_u32m1x8_m(vbool32_t vm, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_v_u32m1x8_m(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg8e32ff\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg8e64.c b/auto-generated/gnu-api-tests/vlseg8e64.c index 30f6079ae..5e621e9de 100644 --- a/auto-generated/gnu-api-tests/vlseg8e64.c +++ b/auto-generated/gnu-api-tests/vlseg8e64.c @@ -6,28 +6,28 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x8_t test_vlseg8e64_v_f64m1x8(const float64_t *base, size_t vl) { - return __riscv_vlseg8e64_v_f64m1x8(base, vl); +vfloat64m1x8_t test_vlseg8e64_v_f64m1x8(const float64_t *rs1, size_t vl) { + return __riscv_vlseg8e64_v_f64m1x8(rs1, vl); } -vint64m1x8_t test_vlseg8e64_v_i64m1x8(const int64_t *base, size_t vl) { - return __riscv_vlseg8e64_v_i64m1x8(base, vl); +vint64m1x8_t test_vlseg8e64_v_i64m1x8(const int64_t *rs1, size_t vl) { + return __riscv_vlseg8e64_v_i64m1x8(rs1, vl); } -vuint64m1x8_t test_vlseg8e64_v_u64m1x8(const uint64_t *base, size_t vl) { - return __riscv_vlseg8e64_v_u64m1x8(base, vl); +vuint64m1x8_t test_vlseg8e64_v_u64m1x8(const uint64_t *rs1, size_t vl) { + return __riscv_vlseg8e64_v_u64m1x8(rs1, vl); } -vfloat64m1x8_t test_vlseg8e64_v_f64m1x8_m(vbool64_t mask, const float64_t *base, size_t vl) { - return __riscv_vlseg8e64_v_f64m1x8_m(mask, base, vl); +vfloat64m1x8_t test_vlseg8e64_v_f64m1x8_m(vbool64_t vm, const float64_t *rs1, size_t vl) { + return __riscv_vlseg8e64_v_f64m1x8_m(vm, rs1, vl); } -vint64m1x8_t test_vlseg8e64_v_i64m1x8_m(vbool64_t mask, const int64_t *base, size_t vl) { - return __riscv_vlseg8e64_v_i64m1x8_m(mask, base, vl); +vint64m1x8_t test_vlseg8e64_v_i64m1x8_m(vbool64_t vm, const int64_t *rs1, size_t vl) { + return __riscv_vlseg8e64_v_i64m1x8_m(vm, rs1, vl); } -vuint64m1x8_t test_vlseg8e64_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, size_t vl) { - return __riscv_vlseg8e64_v_u64m1x8_m(mask, base, vl); +vuint64m1x8_t test_vlseg8e64_v_u64m1x8_m(vbool64_t vm, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg8e64_v_u64m1x8_m(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg8e64\.[ivxfswum.]+\s+} 6 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg8e64ff.c b/auto-generated/gnu-api-tests/vlseg8e64ff.c index a7b6d0a79..7edc30b1e 100644 --- a/auto-generated/gnu-api-tests/vlseg8e64ff.c +++ b/auto-generated/gnu-api-tests/vlseg8e64ff.c @@ -6,28 +6,28 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8(const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e64ff_v_f64m1x8(base, new_vl, vl); +vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8(const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e64ff_v_f64m1x8(rs1, new_vl, vl); } -vint64m1x8_t test_vlseg8e64ff_v_i64m1x8(const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e64ff_v_i64m1x8(base, new_vl, vl); +vint64m1x8_t test_vlseg8e64ff_v_i64m1x8(const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e64ff_v_i64m1x8(rs1, new_vl, vl); } -vuint64m1x8_t test_vlseg8e64ff_v_u64m1x8(const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e64ff_v_u64m1x8(base, new_vl, vl); +vuint64m1x8_t test_vlseg8e64ff_v_u64m1x8(const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e64ff_v_u64m1x8(rs1, new_vl, vl); } -vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_m(vbool64_t mask, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e64ff_v_f64m1x8_m(mask, base, new_vl, vl); +vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_m(vbool64_t vm, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e64ff_v_f64m1x8_m(vm, rs1, new_vl, vl); } -vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_m(vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e64ff_v_i64m1x8_m(mask, base, new_vl, vl); +vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_m(vbool64_t vm, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e64ff_v_i64m1x8_m(vm, rs1, new_vl, vl); } -vuint64m1x8_t test_vlseg8e64ff_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e64ff_v_u64m1x8_m(mask, base, new_vl, vl); +vuint64m1x8_t test_vlseg8e64ff_v_u64m1x8_m(vbool64_t vm, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e64ff_v_u64m1x8_m(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg8e64ff\.[ivxfswum.]+\s+} 6 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg8e8.c b/auto-generated/gnu-api-tests/vlseg8e8.c index 4a33774c4..48acf7b8f 100644 --- a/auto-generated/gnu-api-tests/vlseg8e8.c +++ b/auto-generated/gnu-api-tests/vlseg8e8.c @@ -6,68 +6,68 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x8_t test_vlseg8e8_v_i8mf8x8(const int8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_i8mf8x8(base, vl); +vint8mf8x8_t test_vlseg8e8_v_i8mf8x8(const int8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_i8mf8x8(rs1, vl); } -vint8mf4x8_t test_vlseg8e8_v_i8mf4x8(const int8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_i8mf4x8(base, vl); +vint8mf4x8_t test_vlseg8e8_v_i8mf4x8(const int8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_i8mf4x8(rs1, vl); } -vint8mf2x8_t test_vlseg8e8_v_i8mf2x8(const int8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_i8mf2x8(base, vl); +vint8mf2x8_t test_vlseg8e8_v_i8mf2x8(const int8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_i8mf2x8(rs1, vl); } -vint8m1x8_t test_vlseg8e8_v_i8m1x8(const int8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_i8m1x8(base, vl); +vint8m1x8_t test_vlseg8e8_v_i8m1x8(const int8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_i8m1x8(rs1, vl); } -vuint8mf8x8_t test_vlseg8e8_v_u8mf8x8(const uint8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_u8mf8x8(base, vl); +vuint8mf8x8_t test_vlseg8e8_v_u8mf8x8(const uint8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_u8mf8x8(rs1, vl); } -vuint8mf4x8_t test_vlseg8e8_v_u8mf4x8(const uint8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_u8mf4x8(base, vl); +vuint8mf4x8_t test_vlseg8e8_v_u8mf4x8(const uint8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_u8mf4x8(rs1, vl); } -vuint8mf2x8_t test_vlseg8e8_v_u8mf2x8(const uint8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_u8mf2x8(base, vl); +vuint8mf2x8_t test_vlseg8e8_v_u8mf2x8(const uint8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_u8mf2x8(rs1, vl); } -vuint8m1x8_t test_vlseg8e8_v_u8m1x8(const uint8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_u8m1x8(base, vl); +vuint8m1x8_t test_vlseg8e8_v_u8m1x8(const uint8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_u8m1x8(rs1, vl); } -vint8mf8x8_t test_vlseg8e8_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_i8mf8x8_m(mask, base, vl); +vint8mf8x8_t test_vlseg8e8_v_i8mf8x8_m(vbool64_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_i8mf8x8_m(vm, rs1, vl); } -vint8mf4x8_t test_vlseg8e8_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_i8mf4x8_m(mask, base, vl); +vint8mf4x8_t test_vlseg8e8_v_i8mf4x8_m(vbool32_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_i8mf4x8_m(vm, rs1, vl); } -vint8mf2x8_t test_vlseg8e8_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_i8mf2x8_m(mask, base, vl); +vint8mf2x8_t test_vlseg8e8_v_i8mf2x8_m(vbool16_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_i8mf2x8_m(vm, rs1, vl); } -vint8m1x8_t test_vlseg8e8_v_i8m1x8_m(vbool8_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_i8m1x8_m(mask, base, vl); +vint8m1x8_t test_vlseg8e8_v_i8m1x8_m(vbool8_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_i8m1x8_m(vm, rs1, vl); } -vuint8mf8x8_t test_vlseg8e8_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_u8mf8x8_m(mask, base, vl); +vuint8mf8x8_t test_vlseg8e8_v_u8mf8x8_m(vbool64_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_u8mf8x8_m(vm, rs1, vl); } -vuint8mf4x8_t test_vlseg8e8_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_u8mf4x8_m(mask, base, vl); +vuint8mf4x8_t test_vlseg8e8_v_u8mf4x8_m(vbool32_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_u8mf4x8_m(vm, rs1, vl); } -vuint8mf2x8_t test_vlseg8e8_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_u8mf2x8_m(mask, base, vl); +vuint8mf2x8_t test_vlseg8e8_v_u8mf2x8_m(vbool16_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_u8mf2x8_m(vm, rs1, vl); } -vuint8m1x8_t test_vlseg8e8_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_u8m1x8_m(mask, base, vl); +vuint8m1x8_t test_vlseg8e8_v_u8m1x8_m(vbool8_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_u8m1x8_m(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg8e8\.[ivxfswum.]+\s+} 16 } } */ diff --git a/auto-generated/gnu-api-tests/vlseg8e8ff.c b/auto-generated/gnu-api-tests/vlseg8e8ff.c index 59ec4c98f..a0151b970 100644 --- a/auto-generated/gnu-api-tests/vlseg8e8ff.c +++ b/auto-generated/gnu-api-tests/vlseg8e8ff.c @@ -6,68 +6,68 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8(const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_i8mf8x8(base, new_vl, vl); +vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8(const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_i8mf8x8(rs1, new_vl, vl); } -vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8(const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_i8mf4x8(base, new_vl, vl); +vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8(const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_i8mf4x8(rs1, new_vl, vl); } -vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8(const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_i8mf2x8(base, new_vl, vl); +vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8(const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_i8mf2x8(rs1, new_vl, vl); } -vint8m1x8_t test_vlseg8e8ff_v_i8m1x8(const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_i8m1x8(base, new_vl, vl); +vint8m1x8_t test_vlseg8e8ff_v_i8m1x8(const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_i8m1x8(rs1, new_vl, vl); } -vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8(const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_u8mf8x8(base, new_vl, vl); +vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8(const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_u8mf8x8(rs1, new_vl, vl); } -vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8(const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_u8mf4x8(base, new_vl, vl); +vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8(const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_u8mf4x8(rs1, new_vl, vl); } -vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8(const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_u8mf2x8(base, new_vl, vl); +vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8(const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_u8mf2x8(rs1, new_vl, vl); } -vuint8m1x8_t test_vlseg8e8ff_v_u8m1x8(const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_u8m1x8(base, new_vl, vl); +vuint8m1x8_t test_vlseg8e8ff_v_u8m1x8(const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_u8m1x8(rs1, new_vl, vl); } -vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_i8mf8x8_m(mask, base, new_vl, vl); +vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_m(vbool64_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_i8mf8x8_m(vm, rs1, new_vl, vl); } -vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_i8mf4x8_m(mask, base, new_vl, vl); +vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_m(vbool32_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_i8mf4x8_m(vm, rs1, new_vl, vl); } -vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_i8mf2x8_m(mask, base, new_vl, vl); +vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_m(vbool16_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_i8mf2x8_m(vm, rs1, new_vl, vl); } -vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_m(vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_i8m1x8_m(mask, base, new_vl, vl); +vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_m(vbool8_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_i8m1x8_m(vm, rs1, new_vl, vl); } -vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_u8mf8x8_m(mask, base, new_vl, vl); +vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_m(vbool64_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_u8mf8x8_m(vm, rs1, new_vl, vl); } -vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_u8mf4x8_m(mask, base, new_vl, vl); +vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_m(vbool32_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_u8mf4x8_m(vm, rs1, new_vl, vl); } -vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_u8mf2x8_m(mask, base, new_vl, vl); +vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_m(vbool16_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_u8mf2x8_m(vm, rs1, new_vl, vl); } -vuint8m1x8_t test_vlseg8e8ff_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_u8m1x8_m(mask, base, new_vl, vl); +vuint8m1x8_t test_vlseg8e8ff_v_u8m1x8_m(vbool8_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_u8m1x8_m(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg8e8ff\.[ivxfswum.]+\s+} 16 } } */ diff --git a/auto-generated/gnu-api-tests/vlsseg2e16.c b/auto-generated/gnu-api-tests/vlsseg2e16.c index db7e65739..8684ee481 100644 --- a/auto-generated/gnu-api-tests/vlsseg2e16.c +++ b/auto-generated/gnu-api-tests/vlsseg2e16.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2(const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16mf4x2(base, bstride, vl); +vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2(const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_f16mf4x2(rs1, rs2, vl); } -vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2(const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16mf2x2(base, bstride, vl); +vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2(const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_f16mf2x2(rs1, rs2, vl); } -vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2(const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m1x2(base, bstride, vl); +vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2(const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_f16m1x2(rs1, rs2, vl); } -vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2(const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m2x2(base, bstride, vl); +vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2(const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_f16m2x2(rs1, rs2, vl); } -vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2(const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m4x2(base, bstride, vl); +vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2(const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_f16m4x2(rs1, rs2, vl); } -vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2(const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16mf4x2(base, bstride, vl); +vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2(const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_i16mf4x2(rs1, rs2, vl); } -vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2(const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16mf2x2(base, bstride, vl); +vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2(const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_i16mf2x2(rs1, rs2, vl); } -vint16m1x2_t test_vlsseg2e16_v_i16m1x2(const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m1x2(base, bstride, vl); +vint16m1x2_t test_vlsseg2e16_v_i16m1x2(const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_i16m1x2(rs1, rs2, vl); } -vint16m2x2_t test_vlsseg2e16_v_i16m2x2(const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m2x2(base, bstride, vl); +vint16m2x2_t test_vlsseg2e16_v_i16m2x2(const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_i16m2x2(rs1, rs2, vl); } -vint16m4x2_t test_vlsseg2e16_v_i16m4x2(const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m4x2(base, bstride, vl); +vint16m4x2_t test_vlsseg2e16_v_i16m4x2(const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_i16m4x2(rs1, rs2, vl); } -vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2(const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16mf4x2(base, bstride, vl); +vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2(const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_u16mf4x2(rs1, rs2, vl); } -vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2(const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16mf2x2(base, bstride, vl); +vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2(const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_u16mf2x2(rs1, rs2, vl); } -vuint16m1x2_t test_vlsseg2e16_v_u16m1x2(const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m1x2(base, bstride, vl); +vuint16m1x2_t test_vlsseg2e16_v_u16m1x2(const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_u16m1x2(rs1, rs2, vl); } -vuint16m2x2_t test_vlsseg2e16_v_u16m2x2(const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m2x2(base, bstride, vl); +vuint16m2x2_t test_vlsseg2e16_v_u16m2x2(const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_u16m2x2(rs1, rs2, vl); } -vuint16m4x2_t test_vlsseg2e16_v_u16m4x2(const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m4x2(base, bstride, vl); +vuint16m4x2_t test_vlsseg2e16_v_u16m4x2(const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_u16m4x2(rs1, rs2, vl); } -vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_m(vbool64_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16mf4x2_m(mask, base, bstride, vl); +vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_m(vbool64_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_f16mf4x2_m(vm, rs1, rs2, vl); } -vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_m(vbool32_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16mf2x2_m(mask, base, bstride, vl); +vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_m(vbool32_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_f16mf2x2_m(vm, rs1, rs2, vl); } -vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_m(vbool16_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m1x2_m(mask, base, bstride, vl); +vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_m(vbool16_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_f16m1x2_m(vm, rs1, rs2, vl); } -vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_m(vbool8_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m2x2_m(mask, base, bstride, vl); +vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_m(vbool8_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_f16m2x2_m(vm, rs1, rs2, vl); } -vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_m(vbool4_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m4x2_m(mask, base, bstride, vl); +vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_m(vbool4_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_f16m4x2_m(vm, rs1, rs2, vl); } -vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16mf4x2_m(mask, base, bstride, vl); +vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_m(vbool64_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_i16mf4x2_m(vm, rs1, rs2, vl); } -vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16mf2x2_m(mask, base, bstride, vl); +vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_m(vbool32_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_i16mf2x2_m(vm, rs1, rs2, vl); } -vint16m1x2_t test_vlsseg2e16_v_i16m1x2_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m1x2_m(mask, base, bstride, vl); +vint16m1x2_t test_vlsseg2e16_v_i16m1x2_m(vbool16_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_i16m1x2_m(vm, rs1, rs2, vl); } -vint16m2x2_t test_vlsseg2e16_v_i16m2x2_m(vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m2x2_m(mask, base, bstride, vl); +vint16m2x2_t test_vlsseg2e16_v_i16m2x2_m(vbool8_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_i16m2x2_m(vm, rs1, rs2, vl); } -vint16m4x2_t test_vlsseg2e16_v_i16m4x2_m(vbool4_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m4x2_m(mask, base, bstride, vl); +vint16m4x2_t test_vlsseg2e16_v_i16m4x2_m(vbool4_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_i16m4x2_m(vm, rs1, rs2, vl); } -vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16mf4x2_m(mask, base, bstride, vl); +vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_m(vbool64_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_u16mf4x2_m(vm, rs1, rs2, vl); } -vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16mf2x2_m(mask, base, bstride, vl); +vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_m(vbool32_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_u16mf2x2_m(vm, rs1, rs2, vl); } -vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m1x2_m(mask, base, bstride, vl); +vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_m(vbool16_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_u16m1x2_m(vm, rs1, rs2, vl); } -vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m2x2_m(mask, base, bstride, vl); +vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_m(vbool8_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_u16m2x2_m(vm, rs1, rs2, vl); } -vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m4x2_m(mask, base, bstride, vl); +vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_m(vbool4_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_u16m4x2_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg2e16\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/gnu-api-tests/vlsseg2e32.c b/auto-generated/gnu-api-tests/vlsseg2e32.c index 00ddab720..95c334a59 100644 --- a/auto-generated/gnu-api-tests/vlsseg2e32.c +++ b/auto-generated/gnu-api-tests/vlsseg2e32.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2(const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32mf2x2(base, bstride, vl); +vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2(const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_f32mf2x2(rs1, rs2, vl); } -vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2(const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m1x2(base, bstride, vl); +vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2(const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_f32m1x2(rs1, rs2, vl); } -vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2(const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m2x2(base, bstride, vl); +vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2(const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_f32m2x2(rs1, rs2, vl); } -vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2(const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m4x2(base, bstride, vl); +vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2(const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_f32m4x2(rs1, rs2, vl); } -vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2(const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32mf2x2(base, bstride, vl); +vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2(const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_i32mf2x2(rs1, rs2, vl); } -vint32m1x2_t test_vlsseg2e32_v_i32m1x2(const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m1x2(base, bstride, vl); +vint32m1x2_t test_vlsseg2e32_v_i32m1x2(const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_i32m1x2(rs1, rs2, vl); } -vint32m2x2_t test_vlsseg2e32_v_i32m2x2(const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m2x2(base, bstride, vl); +vint32m2x2_t test_vlsseg2e32_v_i32m2x2(const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_i32m2x2(rs1, rs2, vl); } -vint32m4x2_t test_vlsseg2e32_v_i32m4x2(const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m4x2(base, bstride, vl); +vint32m4x2_t test_vlsseg2e32_v_i32m4x2(const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_i32m4x2(rs1, rs2, vl); } -vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2(const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32mf2x2(base, bstride, vl); +vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2(const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_u32mf2x2(rs1, rs2, vl); } -vuint32m1x2_t test_vlsseg2e32_v_u32m1x2(const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m1x2(base, bstride, vl); +vuint32m1x2_t test_vlsseg2e32_v_u32m1x2(const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_u32m1x2(rs1, rs2, vl); } -vuint32m2x2_t test_vlsseg2e32_v_u32m2x2(const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m2x2(base, bstride, vl); +vuint32m2x2_t test_vlsseg2e32_v_u32m2x2(const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_u32m2x2(rs1, rs2, vl); } -vuint32m4x2_t test_vlsseg2e32_v_u32m4x2(const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m4x2(base, bstride, vl); +vuint32m4x2_t test_vlsseg2e32_v_u32m4x2(const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_u32m4x2(rs1, rs2, vl); } -vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_m(vbool64_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32mf2x2_m(mask, base, bstride, vl); +vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_m(vbool64_t vm, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_f32mf2x2_m(vm, rs1, rs2, vl); } -vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_m(vbool32_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m1x2_m(mask, base, bstride, vl); +vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_m(vbool32_t vm, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_f32m1x2_m(vm, rs1, rs2, vl); } -vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_m(vbool16_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m2x2_m(mask, base, bstride, vl); +vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_m(vbool16_t vm, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_f32m2x2_m(vm, rs1, rs2, vl); } -vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_m(vbool8_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m4x2_m(mask, base, bstride, vl); +vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_m(vbool8_t vm, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_f32m4x2_m(vm, rs1, rs2, vl); } -vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32mf2x2_m(mask, base, bstride, vl); +vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_m(vbool64_t vm, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_i32mf2x2_m(vm, rs1, rs2, vl); } -vint32m1x2_t test_vlsseg2e32_v_i32m1x2_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m1x2_m(mask, base, bstride, vl); +vint32m1x2_t test_vlsseg2e32_v_i32m1x2_m(vbool32_t vm, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_i32m1x2_m(vm, rs1, rs2, vl); } -vint32m2x2_t test_vlsseg2e32_v_i32m2x2_m(vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m2x2_m(mask, base, bstride, vl); +vint32m2x2_t test_vlsseg2e32_v_i32m2x2_m(vbool16_t vm, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_i32m2x2_m(vm, rs1, rs2, vl); } -vint32m4x2_t test_vlsseg2e32_v_i32m4x2_m(vbool8_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m4x2_m(mask, base, bstride, vl); +vint32m4x2_t test_vlsseg2e32_v_i32m4x2_m(vbool8_t vm, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_i32m4x2_m(vm, rs1, rs2, vl); } -vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32mf2x2_m(mask, base, bstride, vl); +vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_m(vbool64_t vm, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_u32mf2x2_m(vm, rs1, rs2, vl); } -vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m1x2_m(mask, base, bstride, vl); +vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_m(vbool32_t vm, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_u32m1x2_m(vm, rs1, rs2, vl); } -vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m2x2_m(mask, base, bstride, vl); +vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_m(vbool16_t vm, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_u32m2x2_m(vm, rs1, rs2, vl); } -vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m4x2_m(mask, base, bstride, vl); +vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_m(vbool8_t vm, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_u32m4x2_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg2e32\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/gnu-api-tests/vlsseg2e64.c b/auto-generated/gnu-api-tests/vlsseg2e64.c index b3733a336..641a4bfcb 100644 --- a/auto-generated/gnu-api-tests/vlsseg2e64.c +++ b/auto-generated/gnu-api-tests/vlsseg2e64.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2(const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m1x2(base, bstride, vl); +vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2(const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_f64m1x2(rs1, rs2, vl); } -vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2(const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m2x2(base, bstride, vl); +vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2(const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_f64m2x2(rs1, rs2, vl); } -vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2(const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m4x2(base, bstride, vl); +vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2(const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_f64m4x2(rs1, rs2, vl); } -vint64m1x2_t test_vlsseg2e64_v_i64m1x2(const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m1x2(base, bstride, vl); +vint64m1x2_t test_vlsseg2e64_v_i64m1x2(const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_i64m1x2(rs1, rs2, vl); } -vint64m2x2_t test_vlsseg2e64_v_i64m2x2(const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m2x2(base, bstride, vl); +vint64m2x2_t test_vlsseg2e64_v_i64m2x2(const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_i64m2x2(rs1, rs2, vl); } -vint64m4x2_t test_vlsseg2e64_v_i64m4x2(const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m4x2(base, bstride, vl); +vint64m4x2_t test_vlsseg2e64_v_i64m4x2(const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_i64m4x2(rs1, rs2, vl); } -vuint64m1x2_t test_vlsseg2e64_v_u64m1x2(const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m1x2(base, bstride, vl); +vuint64m1x2_t test_vlsseg2e64_v_u64m1x2(const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_u64m1x2(rs1, rs2, vl); } -vuint64m2x2_t test_vlsseg2e64_v_u64m2x2(const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m2x2(base, bstride, vl); +vuint64m2x2_t test_vlsseg2e64_v_u64m2x2(const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_u64m2x2(rs1, rs2, vl); } -vuint64m4x2_t test_vlsseg2e64_v_u64m4x2(const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m4x2(base, bstride, vl); +vuint64m4x2_t test_vlsseg2e64_v_u64m4x2(const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_u64m4x2(rs1, rs2, vl); } -vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_m(vbool64_t mask, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m1x2_m(mask, base, bstride, vl); +vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_m(vbool64_t vm, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_f64m1x2_m(vm, rs1, rs2, vl); } -vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_m(vbool32_t mask, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m2x2_m(mask, base, bstride, vl); +vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_m(vbool32_t vm, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_f64m2x2_m(vm, rs1, rs2, vl); } -vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_m(vbool16_t mask, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m4x2_m(mask, base, bstride, vl); +vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_m(vbool16_t vm, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_f64m4x2_m(vm, rs1, rs2, vl); } -vint64m1x2_t test_vlsseg2e64_v_i64m1x2_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m1x2_m(mask, base, bstride, vl); +vint64m1x2_t test_vlsseg2e64_v_i64m1x2_m(vbool64_t vm, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_i64m1x2_m(vm, rs1, rs2, vl); } -vint64m2x2_t test_vlsseg2e64_v_i64m2x2_m(vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m2x2_m(mask, base, bstride, vl); +vint64m2x2_t test_vlsseg2e64_v_i64m2x2_m(vbool32_t vm, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_i64m2x2_m(vm, rs1, rs2, vl); } -vint64m4x2_t test_vlsseg2e64_v_i64m4x2_m(vbool16_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m4x2_m(mask, base, bstride, vl); +vint64m4x2_t test_vlsseg2e64_v_i64m4x2_m(vbool16_t vm, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_i64m4x2_m(vm, rs1, rs2, vl); } -vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m1x2_m(mask, base, bstride, vl); +vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_m(vbool64_t vm, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_u64m1x2_m(vm, rs1, rs2, vl); } -vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m2x2_m(mask, base, bstride, vl); +vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_m(vbool32_t vm, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_u64m2x2_m(vm, rs1, rs2, vl); } -vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m4x2_m(mask, base, bstride, vl); +vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_m(vbool16_t vm, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_u64m4x2_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg2e64\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-api-tests/vlsseg2e8.c b/auto-generated/gnu-api-tests/vlsseg2e8.c index 60fd58da4..e8fb38ce3 100644 --- a/auto-generated/gnu-api-tests/vlsseg2e8.c +++ b/auto-generated/gnu-api-tests/vlsseg2e8.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2(const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf8x2(base, bstride, vl); +vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2(const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf8x2(rs1, rs2, vl); } -vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2(const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf4x2(base, bstride, vl); +vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2(const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf4x2(rs1, rs2, vl); } -vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2(const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf2x2(base, bstride, vl); +vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2(const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf2x2(rs1, rs2, vl); } -vint8m1x2_t test_vlsseg2e8_v_i8m1x2(const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m1x2(base, bstride, vl); +vint8m1x2_t test_vlsseg2e8_v_i8m1x2(const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_i8m1x2(rs1, rs2, vl); } -vint8m2x2_t test_vlsseg2e8_v_i8m2x2(const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m2x2(base, bstride, vl); +vint8m2x2_t test_vlsseg2e8_v_i8m2x2(const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_i8m2x2(rs1, rs2, vl); } -vint8m4x2_t test_vlsseg2e8_v_i8m4x2(const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m4x2(base, bstride, vl); +vint8m4x2_t test_vlsseg2e8_v_i8m4x2(const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_i8m4x2(rs1, rs2, vl); } -vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2(const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf8x2(base, bstride, vl); +vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2(const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf8x2(rs1, rs2, vl); } -vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2(const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf4x2(base, bstride, vl); +vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2(const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf4x2(rs1, rs2, vl); } -vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2(const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf2x2(base, bstride, vl); +vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2(const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf2x2(rs1, rs2, vl); } -vuint8m1x2_t test_vlsseg2e8_v_u8m1x2(const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m1x2(base, bstride, vl); +vuint8m1x2_t test_vlsseg2e8_v_u8m1x2(const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_u8m1x2(rs1, rs2, vl); } -vuint8m2x2_t test_vlsseg2e8_v_u8m2x2(const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m2x2(base, bstride, vl); +vuint8m2x2_t test_vlsseg2e8_v_u8m2x2(const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_u8m2x2(rs1, rs2, vl); } -vuint8m4x2_t test_vlsseg2e8_v_u8m4x2(const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m4x2(base, bstride, vl); +vuint8m4x2_t test_vlsseg2e8_v_u8m4x2(const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_u8m4x2(rs1, rs2, vl); } -vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf8x2_m(mask, base, bstride, vl); +vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_m(vbool64_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf8x2_m(vm, rs1, rs2, vl); } -vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf4x2_m(mask, base, bstride, vl); +vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_m(vbool32_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf4x2_m(vm, rs1, rs2, vl); } -vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf2x2_m(mask, base, bstride, vl); +vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_m(vbool16_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf2x2_m(vm, rs1, rs2, vl); } -vint8m1x2_t test_vlsseg2e8_v_i8m1x2_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m1x2_m(mask, base, bstride, vl); +vint8m1x2_t test_vlsseg2e8_v_i8m1x2_m(vbool8_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_i8m1x2_m(vm, rs1, rs2, vl); } -vint8m2x2_t test_vlsseg2e8_v_i8m2x2_m(vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m2x2_m(mask, base, bstride, vl); +vint8m2x2_t test_vlsseg2e8_v_i8m2x2_m(vbool4_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_i8m2x2_m(vm, rs1, rs2, vl); } -vint8m4x2_t test_vlsseg2e8_v_i8m4x2_m(vbool2_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m4x2_m(mask, base, bstride, vl); +vint8m4x2_t test_vlsseg2e8_v_i8m4x2_m(vbool2_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_i8m4x2_m(vm, rs1, rs2, vl); } -vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf8x2_m(mask, base, bstride, vl); +vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_m(vbool64_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf8x2_m(vm, rs1, rs2, vl); } -vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf4x2_m(mask, base, bstride, vl); +vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_m(vbool32_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf4x2_m(vm, rs1, rs2, vl); } -vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf2x2_m(mask, base, bstride, vl); +vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_m(vbool16_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf2x2_m(vm, rs1, rs2, vl); } -vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m1x2_m(mask, base, bstride, vl); +vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_m(vbool8_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_u8m1x2_m(vm, rs1, rs2, vl); } -vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m2x2_m(mask, base, bstride, vl); +vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_m(vbool4_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_u8m2x2_m(vm, rs1, rs2, vl); } -vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m4x2_m(mask, base, bstride, vl); +vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_m(vbool2_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_u8m4x2_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg2e8\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/gnu-api-tests/vlsseg3e16.c b/auto-generated/gnu-api-tests/vlsseg3e16.c index 6c8662631..f1b5c569b 100644 --- a/auto-generated/gnu-api-tests/vlsseg3e16.c +++ b/auto-generated/gnu-api-tests/vlsseg3e16.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3(const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16mf4x3(base, bstride, vl); +vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3(const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_f16mf4x3(rs1, rs2, vl); } -vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3(const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16mf2x3(base, bstride, vl); +vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3(const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_f16mf2x3(rs1, rs2, vl); } -vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3(const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16m1x3(base, bstride, vl); +vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3(const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_f16m1x3(rs1, rs2, vl); } -vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3(const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16m2x3(base, bstride, vl); +vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3(const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_f16m2x3(rs1, rs2, vl); } -vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3(const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16mf4x3(base, bstride, vl); +vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3(const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_i16mf4x3(rs1, rs2, vl); } -vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3(const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16mf2x3(base, bstride, vl); +vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3(const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_i16mf2x3(rs1, rs2, vl); } -vint16m1x3_t test_vlsseg3e16_v_i16m1x3(const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16m1x3(base, bstride, vl); +vint16m1x3_t test_vlsseg3e16_v_i16m1x3(const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_i16m1x3(rs1, rs2, vl); } -vint16m2x3_t test_vlsseg3e16_v_i16m2x3(const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16m2x3(base, bstride, vl); +vint16m2x3_t test_vlsseg3e16_v_i16m2x3(const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_i16m2x3(rs1, rs2, vl); } -vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3(const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16mf4x3(base, bstride, vl); +vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3(const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_u16mf4x3(rs1, rs2, vl); } -vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3(const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16mf2x3(base, bstride, vl); +vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3(const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_u16mf2x3(rs1, rs2, vl); } -vuint16m1x3_t test_vlsseg3e16_v_u16m1x3(const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16m1x3(base, bstride, vl); +vuint16m1x3_t test_vlsseg3e16_v_u16m1x3(const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_u16m1x3(rs1, rs2, vl); } -vuint16m2x3_t test_vlsseg3e16_v_u16m2x3(const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16m2x3(base, bstride, vl); +vuint16m2x3_t test_vlsseg3e16_v_u16m2x3(const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_u16m2x3(rs1, rs2, vl); } -vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_m(vbool64_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16mf4x3_m(mask, base, bstride, vl); +vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_m(vbool64_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_f16mf4x3_m(vm, rs1, rs2, vl); } -vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_m(vbool32_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16mf2x3_m(mask, base, bstride, vl); +vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_m(vbool32_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_f16mf2x3_m(vm, rs1, rs2, vl); } -vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_m(vbool16_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16m1x3_m(mask, base, bstride, vl); +vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_m(vbool16_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_f16m1x3_m(vm, rs1, rs2, vl); } -vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_m(vbool8_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16m2x3_m(mask, base, bstride, vl); +vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_m(vbool8_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_f16m2x3_m(vm, rs1, rs2, vl); } -vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16mf4x3_m(mask, base, bstride, vl); +vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_m(vbool64_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_i16mf4x3_m(vm, rs1, rs2, vl); } -vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16mf2x3_m(mask, base, bstride, vl); +vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_m(vbool32_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_i16mf2x3_m(vm, rs1, rs2, vl); } -vint16m1x3_t test_vlsseg3e16_v_i16m1x3_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16m1x3_m(mask, base, bstride, vl); +vint16m1x3_t test_vlsseg3e16_v_i16m1x3_m(vbool16_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_i16m1x3_m(vm, rs1, rs2, vl); } -vint16m2x3_t test_vlsseg3e16_v_i16m2x3_m(vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16m2x3_m(mask, base, bstride, vl); +vint16m2x3_t test_vlsseg3e16_v_i16m2x3_m(vbool8_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_i16m2x3_m(vm, rs1, rs2, vl); } -vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16mf4x3_m(mask, base, bstride, vl); +vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_m(vbool64_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_u16mf4x3_m(vm, rs1, rs2, vl); } -vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16mf2x3_m(mask, base, bstride, vl); +vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_m(vbool32_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_u16mf2x3_m(vm, rs1, rs2, vl); } -vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16m1x3_m(mask, base, bstride, vl); +vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_m(vbool16_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_u16m1x3_m(vm, rs1, rs2, vl); } -vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16m2x3_m(mask, base, bstride, vl); +vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_m(vbool8_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_u16m2x3_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg3e16\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/gnu-api-tests/vlsseg3e32.c b/auto-generated/gnu-api-tests/vlsseg3e32.c index 2650ea9e2..82a560955 100644 --- a/auto-generated/gnu-api-tests/vlsseg3e32.c +++ b/auto-generated/gnu-api-tests/vlsseg3e32.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3(const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32mf2x3(base, bstride, vl); +vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3(const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_f32mf2x3(rs1, rs2, vl); } -vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3(const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32m1x3(base, bstride, vl); +vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3(const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_f32m1x3(rs1, rs2, vl); } -vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3(const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32m2x3(base, bstride, vl); +vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3(const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_f32m2x3(rs1, rs2, vl); } -vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3(const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32mf2x3(base, bstride, vl); +vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3(const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_i32mf2x3(rs1, rs2, vl); } -vint32m1x3_t test_vlsseg3e32_v_i32m1x3(const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32m1x3(base, bstride, vl); +vint32m1x3_t test_vlsseg3e32_v_i32m1x3(const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_i32m1x3(rs1, rs2, vl); } -vint32m2x3_t test_vlsseg3e32_v_i32m2x3(const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32m2x3(base, bstride, vl); +vint32m2x3_t test_vlsseg3e32_v_i32m2x3(const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_i32m2x3(rs1, rs2, vl); } -vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3(const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32mf2x3(base, bstride, vl); +vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3(const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_u32mf2x3(rs1, rs2, vl); } -vuint32m1x3_t test_vlsseg3e32_v_u32m1x3(const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32m1x3(base, bstride, vl); +vuint32m1x3_t test_vlsseg3e32_v_u32m1x3(const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_u32m1x3(rs1, rs2, vl); } -vuint32m2x3_t test_vlsseg3e32_v_u32m2x3(const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32m2x3(base, bstride, vl); +vuint32m2x3_t test_vlsseg3e32_v_u32m2x3(const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_u32m2x3(rs1, rs2, vl); } -vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_m(vbool64_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32mf2x3_m(mask, base, bstride, vl); +vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_m(vbool64_t vm, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_f32mf2x3_m(vm, rs1, rs2, vl); } -vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_m(vbool32_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32m1x3_m(mask, base, bstride, vl); +vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_m(vbool32_t vm, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_f32m1x3_m(vm, rs1, rs2, vl); } -vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_m(vbool16_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32m2x3_m(mask, base, bstride, vl); +vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_m(vbool16_t vm, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_f32m2x3_m(vm, rs1, rs2, vl); } -vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32mf2x3_m(mask, base, bstride, vl); +vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_m(vbool64_t vm, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_i32mf2x3_m(vm, rs1, rs2, vl); } -vint32m1x3_t test_vlsseg3e32_v_i32m1x3_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32m1x3_m(mask, base, bstride, vl); +vint32m1x3_t test_vlsseg3e32_v_i32m1x3_m(vbool32_t vm, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_i32m1x3_m(vm, rs1, rs2, vl); } -vint32m2x3_t test_vlsseg3e32_v_i32m2x3_m(vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32m2x3_m(mask, base, bstride, vl); +vint32m2x3_t test_vlsseg3e32_v_i32m2x3_m(vbool16_t vm, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_i32m2x3_m(vm, rs1, rs2, vl); } -vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32mf2x3_m(mask, base, bstride, vl); +vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_m(vbool64_t vm, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_u32mf2x3_m(vm, rs1, rs2, vl); } -vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32m1x3_m(mask, base, bstride, vl); +vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_m(vbool32_t vm, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_u32m1x3_m(vm, rs1, rs2, vl); } -vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32m2x3_m(mask, base, bstride, vl); +vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_m(vbool16_t vm, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_u32m2x3_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg3e32\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-api-tests/vlsseg3e64.c b/auto-generated/gnu-api-tests/vlsseg3e64.c index 16c29b134..cc187ca46 100644 --- a/auto-generated/gnu-api-tests/vlsseg3e64.c +++ b/auto-generated/gnu-api-tests/vlsseg3e64.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3(const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_f64m1x3(base, bstride, vl); +vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3(const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_v_f64m1x3(rs1, rs2, vl); } -vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3(const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_f64m2x3(base, bstride, vl); +vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3(const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_v_f64m2x3(rs1, rs2, vl); } -vint64m1x3_t test_vlsseg3e64_v_i64m1x3(const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_i64m1x3(base, bstride, vl); +vint64m1x3_t test_vlsseg3e64_v_i64m1x3(const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_v_i64m1x3(rs1, rs2, vl); } -vint64m2x3_t test_vlsseg3e64_v_i64m2x3(const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_i64m2x3(base, bstride, vl); +vint64m2x3_t test_vlsseg3e64_v_i64m2x3(const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_v_i64m2x3(rs1, rs2, vl); } -vuint64m1x3_t test_vlsseg3e64_v_u64m1x3(const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_u64m1x3(base, bstride, vl); +vuint64m1x3_t test_vlsseg3e64_v_u64m1x3(const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_v_u64m1x3(rs1, rs2, vl); } -vuint64m2x3_t test_vlsseg3e64_v_u64m2x3(const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_u64m2x3(base, bstride, vl); +vuint64m2x3_t test_vlsseg3e64_v_u64m2x3(const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_v_u64m2x3(rs1, rs2, vl); } -vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_m(vbool64_t mask, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_f64m1x3_m(mask, base, bstride, vl); +vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_m(vbool64_t vm, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_v_f64m1x3_m(vm, rs1, rs2, vl); } -vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_m(vbool32_t mask, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_f64m2x3_m(mask, base, bstride, vl); +vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_m(vbool32_t vm, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_v_f64m2x3_m(vm, rs1, rs2, vl); } -vint64m1x3_t test_vlsseg3e64_v_i64m1x3_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_i64m1x3_m(mask, base, bstride, vl); +vint64m1x3_t test_vlsseg3e64_v_i64m1x3_m(vbool64_t vm, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_v_i64m1x3_m(vm, rs1, rs2, vl); } -vint64m2x3_t test_vlsseg3e64_v_i64m2x3_m(vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_i64m2x3_m(mask, base, bstride, vl); +vint64m2x3_t test_vlsseg3e64_v_i64m2x3_m(vbool32_t vm, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_v_i64m2x3_m(vm, rs1, rs2, vl); } -vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_u64m1x3_m(mask, base, bstride, vl); +vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_m(vbool64_t vm, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_v_u64m1x3_m(vm, rs1, rs2, vl); } -vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_u64m2x3_m(mask, base, bstride, vl); +vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_m(vbool32_t vm, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_v_u64m2x3_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg3e64\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-api-tests/vlsseg3e8.c b/auto-generated/gnu-api-tests/vlsseg3e8.c index 13a5f40f9..0978ac15e 100644 --- a/auto-generated/gnu-api-tests/vlsseg3e8.c +++ b/auto-generated/gnu-api-tests/vlsseg3e8.c @@ -6,84 +6,84 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3(const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf8x3(base, bstride, vl); +vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3(const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf8x3(rs1, rs2, vl); } -vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3(const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf4x3(base, bstride, vl); +vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3(const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf4x3(rs1, rs2, vl); } -vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3(const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf2x3(base, bstride, vl); +vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3(const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf2x3(rs1, rs2, vl); } -vint8m1x3_t test_vlsseg3e8_v_i8m1x3(const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8m1x3(base, bstride, vl); +vint8m1x3_t test_vlsseg3e8_v_i8m1x3(const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_i8m1x3(rs1, rs2, vl); } -vint8m2x3_t test_vlsseg3e8_v_i8m2x3(const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8m2x3(base, bstride, vl); +vint8m2x3_t test_vlsseg3e8_v_i8m2x3(const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_i8m2x3(rs1, rs2, vl); } -vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3(const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf8x3(base, bstride, vl); +vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3(const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf8x3(rs1, rs2, vl); } -vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3(const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf4x3(base, bstride, vl); +vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3(const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf4x3(rs1, rs2, vl); } -vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3(const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf2x3(base, bstride, vl); +vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3(const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf2x3(rs1, rs2, vl); } -vuint8m1x3_t test_vlsseg3e8_v_u8m1x3(const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8m1x3(base, bstride, vl); +vuint8m1x3_t test_vlsseg3e8_v_u8m1x3(const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_u8m1x3(rs1, rs2, vl); } -vuint8m2x3_t test_vlsseg3e8_v_u8m2x3(const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8m2x3(base, bstride, vl); +vuint8m2x3_t test_vlsseg3e8_v_u8m2x3(const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_u8m2x3(rs1, rs2, vl); } -vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf8x3_m(mask, base, bstride, vl); +vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_m(vbool64_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf8x3_m(vm, rs1, rs2, vl); } -vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf4x3_m(mask, base, bstride, vl); +vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_m(vbool32_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf4x3_m(vm, rs1, rs2, vl); } -vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf2x3_m(mask, base, bstride, vl); +vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_m(vbool16_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf2x3_m(vm, rs1, rs2, vl); } -vint8m1x3_t test_vlsseg3e8_v_i8m1x3_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8m1x3_m(mask, base, bstride, vl); +vint8m1x3_t test_vlsseg3e8_v_i8m1x3_m(vbool8_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_i8m1x3_m(vm, rs1, rs2, vl); } -vint8m2x3_t test_vlsseg3e8_v_i8m2x3_m(vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8m2x3_m(mask, base, bstride, vl); +vint8m2x3_t test_vlsseg3e8_v_i8m2x3_m(vbool4_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_i8m2x3_m(vm, rs1, rs2, vl); } -vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf8x3_m(mask, base, bstride, vl); +vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_m(vbool64_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf8x3_m(vm, rs1, rs2, vl); } -vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf4x3_m(mask, base, bstride, vl); +vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_m(vbool32_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf4x3_m(vm, rs1, rs2, vl); } -vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf2x3_m(mask, base, bstride, vl); +vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_m(vbool16_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf2x3_m(vm, rs1, rs2, vl); } -vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8m1x3_m(mask, base, bstride, vl); +vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_m(vbool8_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_u8m1x3_m(vm, rs1, rs2, vl); } -vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8m2x3_m(mask, base, bstride, vl); +vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_m(vbool4_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_u8m2x3_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg3e8\.[ivxfswum.]+\s+} 20 } } */ diff --git a/auto-generated/gnu-api-tests/vlsseg4e16.c b/auto-generated/gnu-api-tests/vlsseg4e16.c index 75de9ab19..0c50ea6a1 100644 --- a/auto-generated/gnu-api-tests/vlsseg4e16.c +++ b/auto-generated/gnu-api-tests/vlsseg4e16.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4(const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16mf4x4(base, bstride, vl); +vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4(const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_f16mf4x4(rs1, rs2, vl); } -vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4(const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16mf2x4(base, bstride, vl); +vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4(const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_f16mf2x4(rs1, rs2, vl); } -vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4(const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16m1x4(base, bstride, vl); +vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4(const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_f16m1x4(rs1, rs2, vl); } -vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4(const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16m2x4(base, bstride, vl); +vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4(const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_f16m2x4(rs1, rs2, vl); } -vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4(const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16mf4x4(base, bstride, vl); +vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4(const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_i16mf4x4(rs1, rs2, vl); } -vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4(const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16mf2x4(base, bstride, vl); +vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4(const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_i16mf2x4(rs1, rs2, vl); } -vint16m1x4_t test_vlsseg4e16_v_i16m1x4(const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16m1x4(base, bstride, vl); +vint16m1x4_t test_vlsseg4e16_v_i16m1x4(const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_i16m1x4(rs1, rs2, vl); } -vint16m2x4_t test_vlsseg4e16_v_i16m2x4(const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16m2x4(base, bstride, vl); +vint16m2x4_t test_vlsseg4e16_v_i16m2x4(const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_i16m2x4(rs1, rs2, vl); } -vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4(const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16mf4x4(base, bstride, vl); +vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4(const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_u16mf4x4(rs1, rs2, vl); } -vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4(const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16mf2x4(base, bstride, vl); +vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4(const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_u16mf2x4(rs1, rs2, vl); } -vuint16m1x4_t test_vlsseg4e16_v_u16m1x4(const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16m1x4(base, bstride, vl); +vuint16m1x4_t test_vlsseg4e16_v_u16m1x4(const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_u16m1x4(rs1, rs2, vl); } -vuint16m2x4_t test_vlsseg4e16_v_u16m2x4(const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16m2x4(base, bstride, vl); +vuint16m2x4_t test_vlsseg4e16_v_u16m2x4(const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_u16m2x4(rs1, rs2, vl); } -vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_m(vbool64_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16mf4x4_m(mask, base, bstride, vl); +vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_m(vbool64_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_f16mf4x4_m(vm, rs1, rs2, vl); } -vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_m(vbool32_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16mf2x4_m(mask, base, bstride, vl); +vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_m(vbool32_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_f16mf2x4_m(vm, rs1, rs2, vl); } -vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_m(vbool16_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16m1x4_m(mask, base, bstride, vl); +vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_m(vbool16_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_f16m1x4_m(vm, rs1, rs2, vl); } -vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_m(vbool8_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16m2x4_m(mask, base, bstride, vl); +vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_m(vbool8_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_f16m2x4_m(vm, rs1, rs2, vl); } -vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16mf4x4_m(mask, base, bstride, vl); +vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_m(vbool64_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_i16mf4x4_m(vm, rs1, rs2, vl); } -vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16mf2x4_m(mask, base, bstride, vl); +vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_m(vbool32_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_i16mf2x4_m(vm, rs1, rs2, vl); } -vint16m1x4_t test_vlsseg4e16_v_i16m1x4_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16m1x4_m(mask, base, bstride, vl); +vint16m1x4_t test_vlsseg4e16_v_i16m1x4_m(vbool16_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_i16m1x4_m(vm, rs1, rs2, vl); } -vint16m2x4_t test_vlsseg4e16_v_i16m2x4_m(vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16m2x4_m(mask, base, bstride, vl); +vint16m2x4_t test_vlsseg4e16_v_i16m2x4_m(vbool8_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_i16m2x4_m(vm, rs1, rs2, vl); } -vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16mf4x4_m(mask, base, bstride, vl); +vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_m(vbool64_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_u16mf4x4_m(vm, rs1, rs2, vl); } -vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16mf2x4_m(mask, base, bstride, vl); +vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_m(vbool32_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_u16mf2x4_m(vm, rs1, rs2, vl); } -vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16m1x4_m(mask, base, bstride, vl); +vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_m(vbool16_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_u16m1x4_m(vm, rs1, rs2, vl); } -vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16m2x4_m(mask, base, bstride, vl); +vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_m(vbool8_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_u16m2x4_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg4e16\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/gnu-api-tests/vlsseg4e32.c b/auto-generated/gnu-api-tests/vlsseg4e32.c index f6c1d0714..320efeb9f 100644 --- a/auto-generated/gnu-api-tests/vlsseg4e32.c +++ b/auto-generated/gnu-api-tests/vlsseg4e32.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4(const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32mf2x4(base, bstride, vl); +vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4(const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_f32mf2x4(rs1, rs2, vl); } -vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4(const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32m1x4(base, bstride, vl); +vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4(const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_f32m1x4(rs1, rs2, vl); } -vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4(const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32m2x4(base, bstride, vl); +vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4(const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_f32m2x4(rs1, rs2, vl); } -vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4(const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32mf2x4(base, bstride, vl); +vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4(const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_i32mf2x4(rs1, rs2, vl); } -vint32m1x4_t test_vlsseg4e32_v_i32m1x4(const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32m1x4(base, bstride, vl); +vint32m1x4_t test_vlsseg4e32_v_i32m1x4(const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_i32m1x4(rs1, rs2, vl); } -vint32m2x4_t test_vlsseg4e32_v_i32m2x4(const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32m2x4(base, bstride, vl); +vint32m2x4_t test_vlsseg4e32_v_i32m2x4(const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_i32m2x4(rs1, rs2, vl); } -vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4(const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32mf2x4(base, bstride, vl); +vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4(const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_u32mf2x4(rs1, rs2, vl); } -vuint32m1x4_t test_vlsseg4e32_v_u32m1x4(const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32m1x4(base, bstride, vl); +vuint32m1x4_t test_vlsseg4e32_v_u32m1x4(const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_u32m1x4(rs1, rs2, vl); } -vuint32m2x4_t test_vlsseg4e32_v_u32m2x4(const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32m2x4(base, bstride, vl); +vuint32m2x4_t test_vlsseg4e32_v_u32m2x4(const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_u32m2x4(rs1, rs2, vl); } -vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_m(vbool64_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32mf2x4_m(mask, base, bstride, vl); +vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_m(vbool64_t vm, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_f32mf2x4_m(vm, rs1, rs2, vl); } -vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_m(vbool32_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32m1x4_m(mask, base, bstride, vl); +vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_m(vbool32_t vm, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_f32m1x4_m(vm, rs1, rs2, vl); } -vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_m(vbool16_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32m2x4_m(mask, base, bstride, vl); +vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_m(vbool16_t vm, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_f32m2x4_m(vm, rs1, rs2, vl); } -vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32mf2x4_m(mask, base, bstride, vl); +vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_m(vbool64_t vm, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_i32mf2x4_m(vm, rs1, rs2, vl); } -vint32m1x4_t test_vlsseg4e32_v_i32m1x4_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32m1x4_m(mask, base, bstride, vl); +vint32m1x4_t test_vlsseg4e32_v_i32m1x4_m(vbool32_t vm, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_i32m1x4_m(vm, rs1, rs2, vl); } -vint32m2x4_t test_vlsseg4e32_v_i32m2x4_m(vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32m2x4_m(mask, base, bstride, vl); +vint32m2x4_t test_vlsseg4e32_v_i32m2x4_m(vbool16_t vm, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_i32m2x4_m(vm, rs1, rs2, vl); } -vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32mf2x4_m(mask, base, bstride, vl); +vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_m(vbool64_t vm, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_u32mf2x4_m(vm, rs1, rs2, vl); } -vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32m1x4_m(mask, base, bstride, vl); +vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_m(vbool32_t vm, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_u32m1x4_m(vm, rs1, rs2, vl); } -vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32m2x4_m(mask, base, bstride, vl); +vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_m(vbool16_t vm, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_u32m2x4_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg4e32\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-api-tests/vlsseg4e64.c b/auto-generated/gnu-api-tests/vlsseg4e64.c index dc313a5ea..58c0ed32f 100644 --- a/auto-generated/gnu-api-tests/vlsseg4e64.c +++ b/auto-generated/gnu-api-tests/vlsseg4e64.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4(const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_f64m1x4(base, bstride, vl); +vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4(const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_v_f64m1x4(rs1, rs2, vl); } -vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4(const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_f64m2x4(base, bstride, vl); +vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4(const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_v_f64m2x4(rs1, rs2, vl); } -vint64m1x4_t test_vlsseg4e64_v_i64m1x4(const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_i64m1x4(base, bstride, vl); +vint64m1x4_t test_vlsseg4e64_v_i64m1x4(const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_v_i64m1x4(rs1, rs2, vl); } -vint64m2x4_t test_vlsseg4e64_v_i64m2x4(const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_i64m2x4(base, bstride, vl); +vint64m2x4_t test_vlsseg4e64_v_i64m2x4(const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_v_i64m2x4(rs1, rs2, vl); } -vuint64m1x4_t test_vlsseg4e64_v_u64m1x4(const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_u64m1x4(base, bstride, vl); +vuint64m1x4_t test_vlsseg4e64_v_u64m1x4(const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_v_u64m1x4(rs1, rs2, vl); } -vuint64m2x4_t test_vlsseg4e64_v_u64m2x4(const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_u64m2x4(base, bstride, vl); +vuint64m2x4_t test_vlsseg4e64_v_u64m2x4(const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_v_u64m2x4(rs1, rs2, vl); } -vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_m(vbool64_t mask, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_f64m1x4_m(mask, base, bstride, vl); +vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_m(vbool64_t vm, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_v_f64m1x4_m(vm, rs1, rs2, vl); } -vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_m(vbool32_t mask, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_f64m2x4_m(mask, base, bstride, vl); +vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_m(vbool32_t vm, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_v_f64m2x4_m(vm, rs1, rs2, vl); } -vint64m1x4_t test_vlsseg4e64_v_i64m1x4_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_i64m1x4_m(mask, base, bstride, vl); +vint64m1x4_t test_vlsseg4e64_v_i64m1x4_m(vbool64_t vm, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_v_i64m1x4_m(vm, rs1, rs2, vl); } -vint64m2x4_t test_vlsseg4e64_v_i64m2x4_m(vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_i64m2x4_m(mask, base, bstride, vl); +vint64m2x4_t test_vlsseg4e64_v_i64m2x4_m(vbool32_t vm, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_v_i64m2x4_m(vm, rs1, rs2, vl); } -vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_u64m1x4_m(mask, base, bstride, vl); +vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_m(vbool64_t vm, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_v_u64m1x4_m(vm, rs1, rs2, vl); } -vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_u64m2x4_m(mask, base, bstride, vl); +vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_m(vbool32_t vm, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_v_u64m2x4_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg4e64\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-api-tests/vlsseg4e8.c b/auto-generated/gnu-api-tests/vlsseg4e8.c index e21de9259..034f93144 100644 --- a/auto-generated/gnu-api-tests/vlsseg4e8.c +++ b/auto-generated/gnu-api-tests/vlsseg4e8.c @@ -6,84 +6,84 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4(const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf8x4(base, bstride, vl); +vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4(const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf8x4(rs1, rs2, vl); } -vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4(const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf4x4(base, bstride, vl); +vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4(const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf4x4(rs1, rs2, vl); } -vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4(const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf2x4(base, bstride, vl); +vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4(const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf2x4(rs1, rs2, vl); } -vint8m1x4_t test_vlsseg4e8_v_i8m1x4(const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8m1x4(base, bstride, vl); +vint8m1x4_t test_vlsseg4e8_v_i8m1x4(const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_i8m1x4(rs1, rs2, vl); } -vint8m2x4_t test_vlsseg4e8_v_i8m2x4(const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8m2x4(base, bstride, vl); +vint8m2x4_t test_vlsseg4e8_v_i8m2x4(const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_i8m2x4(rs1, rs2, vl); } -vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4(const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf8x4(base, bstride, vl); +vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4(const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf8x4(rs1, rs2, vl); } -vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4(const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf4x4(base, bstride, vl); +vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4(const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf4x4(rs1, rs2, vl); } -vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4(const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf2x4(base, bstride, vl); +vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4(const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf2x4(rs1, rs2, vl); } -vuint8m1x4_t test_vlsseg4e8_v_u8m1x4(const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8m1x4(base, bstride, vl); +vuint8m1x4_t test_vlsseg4e8_v_u8m1x4(const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_u8m1x4(rs1, rs2, vl); } -vuint8m2x4_t test_vlsseg4e8_v_u8m2x4(const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8m2x4(base, bstride, vl); +vuint8m2x4_t test_vlsseg4e8_v_u8m2x4(const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_u8m2x4(rs1, rs2, vl); } -vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf8x4_m(mask, base, bstride, vl); +vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_m(vbool64_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf8x4_m(vm, rs1, rs2, vl); } -vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf4x4_m(mask, base, bstride, vl); +vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_m(vbool32_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf4x4_m(vm, rs1, rs2, vl); } -vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf2x4_m(mask, base, bstride, vl); +vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_m(vbool16_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf2x4_m(vm, rs1, rs2, vl); } -vint8m1x4_t test_vlsseg4e8_v_i8m1x4_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8m1x4_m(mask, base, bstride, vl); +vint8m1x4_t test_vlsseg4e8_v_i8m1x4_m(vbool8_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_i8m1x4_m(vm, rs1, rs2, vl); } -vint8m2x4_t test_vlsseg4e8_v_i8m2x4_m(vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8m2x4_m(mask, base, bstride, vl); +vint8m2x4_t test_vlsseg4e8_v_i8m2x4_m(vbool4_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_i8m2x4_m(vm, rs1, rs2, vl); } -vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf8x4_m(mask, base, bstride, vl); +vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_m(vbool64_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf8x4_m(vm, rs1, rs2, vl); } -vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf4x4_m(mask, base, bstride, vl); +vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_m(vbool32_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf4x4_m(vm, rs1, rs2, vl); } -vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf2x4_m(mask, base, bstride, vl); +vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_m(vbool16_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf2x4_m(vm, rs1, rs2, vl); } -vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8m1x4_m(mask, base, bstride, vl); +vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_m(vbool8_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_u8m1x4_m(vm, rs1, rs2, vl); } -vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8m2x4_m(mask, base, bstride, vl); +vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_m(vbool4_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_u8m2x4_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg4e8\.[ivxfswum.]+\s+} 20 } } */ diff --git a/auto-generated/gnu-api-tests/vlsseg5e16.c b/auto-generated/gnu-api-tests/vlsseg5e16.c index e37ae4ee1..255a48ddb 100644 --- a/auto-generated/gnu-api-tests/vlsseg5e16.c +++ b/auto-generated/gnu-api-tests/vlsseg5e16.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5(const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16mf4x5(base, bstride, vl); +vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5(const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_f16mf4x5(rs1, rs2, vl); } -vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5(const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16mf2x5(base, bstride, vl); +vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5(const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_f16mf2x5(rs1, rs2, vl); } -vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5(const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16m1x5(base, bstride, vl); +vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5(const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_f16m1x5(rs1, rs2, vl); } -vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5(const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16mf4x5(base, bstride, vl); +vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5(const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_i16mf4x5(rs1, rs2, vl); } -vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5(const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16mf2x5(base, bstride, vl); +vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5(const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_i16mf2x5(rs1, rs2, vl); } -vint16m1x5_t test_vlsseg5e16_v_i16m1x5(const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16m1x5(base, bstride, vl); +vint16m1x5_t test_vlsseg5e16_v_i16m1x5(const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_i16m1x5(rs1, rs2, vl); } -vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5(const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16mf4x5(base, bstride, vl); +vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5(const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_u16mf4x5(rs1, rs2, vl); } -vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5(const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16mf2x5(base, bstride, vl); +vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5(const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_u16mf2x5(rs1, rs2, vl); } -vuint16m1x5_t test_vlsseg5e16_v_u16m1x5(const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16m1x5(base, bstride, vl); +vuint16m1x5_t test_vlsseg5e16_v_u16m1x5(const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_u16m1x5(rs1, rs2, vl); } -vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_m(vbool64_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16mf4x5_m(mask, base, bstride, vl); +vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_m(vbool64_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_f16mf4x5_m(vm, rs1, rs2, vl); } -vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_m(vbool32_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16mf2x5_m(mask, base, bstride, vl); +vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_m(vbool32_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_f16mf2x5_m(vm, rs1, rs2, vl); } -vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_m(vbool16_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16m1x5_m(mask, base, bstride, vl); +vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_m(vbool16_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_f16m1x5_m(vm, rs1, rs2, vl); } -vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16mf4x5_m(mask, base, bstride, vl); +vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_m(vbool64_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_i16mf4x5_m(vm, rs1, rs2, vl); } -vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16mf2x5_m(mask, base, bstride, vl); +vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_m(vbool32_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_i16mf2x5_m(vm, rs1, rs2, vl); } -vint16m1x5_t test_vlsseg5e16_v_i16m1x5_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16m1x5_m(mask, base, bstride, vl); +vint16m1x5_t test_vlsseg5e16_v_i16m1x5_m(vbool16_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_i16m1x5_m(vm, rs1, rs2, vl); } -vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16mf4x5_m(mask, base, bstride, vl); +vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_m(vbool64_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_u16mf4x5_m(vm, rs1, rs2, vl); } -vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16mf2x5_m(mask, base, bstride, vl); +vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_m(vbool32_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_u16mf2x5_m(vm, rs1, rs2, vl); } -vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16m1x5_m(mask, base, bstride, vl); +vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_m(vbool16_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_u16m1x5_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg5e16\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-api-tests/vlsseg5e32.c b/auto-generated/gnu-api-tests/vlsseg5e32.c index 2c2c14378..f85291371 100644 --- a/auto-generated/gnu-api-tests/vlsseg5e32.c +++ b/auto-generated/gnu-api-tests/vlsseg5e32.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5(const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_f32mf2x5(base, bstride, vl); +vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5(const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_v_f32mf2x5(rs1, rs2, vl); } -vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5(const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_f32m1x5(base, bstride, vl); +vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5(const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_v_f32m1x5(rs1, rs2, vl); } -vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5(const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_i32mf2x5(base, bstride, vl); +vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5(const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_v_i32mf2x5(rs1, rs2, vl); } -vint32m1x5_t test_vlsseg5e32_v_i32m1x5(const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_i32m1x5(base, bstride, vl); +vint32m1x5_t test_vlsseg5e32_v_i32m1x5(const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_v_i32m1x5(rs1, rs2, vl); } -vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5(const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_u32mf2x5(base, bstride, vl); +vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5(const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_v_u32mf2x5(rs1, rs2, vl); } -vuint32m1x5_t test_vlsseg5e32_v_u32m1x5(const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_u32m1x5(base, bstride, vl); +vuint32m1x5_t test_vlsseg5e32_v_u32m1x5(const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_v_u32m1x5(rs1, rs2, vl); } -vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_m(vbool64_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_f32mf2x5_m(mask, base, bstride, vl); +vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_m(vbool64_t vm, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_v_f32mf2x5_m(vm, rs1, rs2, vl); } -vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_m(vbool32_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_f32m1x5_m(mask, base, bstride, vl); +vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_m(vbool32_t vm, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_v_f32m1x5_m(vm, rs1, rs2, vl); } -vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_i32mf2x5_m(mask, base, bstride, vl); +vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_m(vbool64_t vm, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_v_i32mf2x5_m(vm, rs1, rs2, vl); } -vint32m1x5_t test_vlsseg5e32_v_i32m1x5_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_i32m1x5_m(mask, base, bstride, vl); +vint32m1x5_t test_vlsseg5e32_v_i32m1x5_m(vbool32_t vm, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_v_i32m1x5_m(vm, rs1, rs2, vl); } -vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_u32mf2x5_m(mask, base, bstride, vl); +vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_m(vbool64_t vm, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_v_u32mf2x5_m(vm, rs1, rs2, vl); } -vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_u32m1x5_m(mask, base, bstride, vl); +vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_m(vbool32_t vm, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_v_u32m1x5_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg5e32\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-api-tests/vlsseg5e64.c b/auto-generated/gnu-api-tests/vlsseg5e64.c index a8323fd66..71e322ff4 100644 --- a/auto-generated/gnu-api-tests/vlsseg5e64.c +++ b/auto-generated/gnu-api-tests/vlsseg5e64.c @@ -6,28 +6,28 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5(const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_f64m1x5(base, bstride, vl); +vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5(const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e64_v_f64m1x5(rs1, rs2, vl); } -vint64m1x5_t test_vlsseg5e64_v_i64m1x5(const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_i64m1x5(base, bstride, vl); +vint64m1x5_t test_vlsseg5e64_v_i64m1x5(const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e64_v_i64m1x5(rs1, rs2, vl); } -vuint64m1x5_t test_vlsseg5e64_v_u64m1x5(const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_u64m1x5(base, bstride, vl); +vuint64m1x5_t test_vlsseg5e64_v_u64m1x5(const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e64_v_u64m1x5(rs1, rs2, vl); } -vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_m(vbool64_t mask, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_f64m1x5_m(mask, base, bstride, vl); +vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_m(vbool64_t vm, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e64_v_f64m1x5_m(vm, rs1, rs2, vl); } -vint64m1x5_t test_vlsseg5e64_v_i64m1x5_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_i64m1x5_m(mask, base, bstride, vl); +vint64m1x5_t test_vlsseg5e64_v_i64m1x5_m(vbool64_t vm, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e64_v_i64m1x5_m(vm, rs1, rs2, vl); } -vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_u64m1x5_m(mask, base, bstride, vl); +vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_m(vbool64_t vm, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e64_v_u64m1x5_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg5e64\.[ivxfswum.]+\s+} 6 } } */ diff --git a/auto-generated/gnu-api-tests/vlsseg5e8.c b/auto-generated/gnu-api-tests/vlsseg5e8.c index 17fc919d3..8c9c21a00 100644 --- a/auto-generated/gnu-api-tests/vlsseg5e8.c +++ b/auto-generated/gnu-api-tests/vlsseg5e8.c @@ -6,68 +6,68 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5(const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf8x5(base, bstride, vl); +vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5(const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf8x5(rs1, rs2, vl); } -vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5(const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf4x5(base, bstride, vl); +vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5(const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf4x5(rs1, rs2, vl); } -vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5(const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf2x5(base, bstride, vl); +vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5(const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf2x5(rs1, rs2, vl); } -vint8m1x5_t test_vlsseg5e8_v_i8m1x5(const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8m1x5(base, bstride, vl); +vint8m1x5_t test_vlsseg5e8_v_i8m1x5(const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_i8m1x5(rs1, rs2, vl); } -vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5(const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf8x5(base, bstride, vl); +vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5(const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf8x5(rs1, rs2, vl); } -vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5(const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf4x5(base, bstride, vl); +vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5(const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf4x5(rs1, rs2, vl); } -vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5(const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf2x5(base, bstride, vl); +vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5(const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf2x5(rs1, rs2, vl); } -vuint8m1x5_t test_vlsseg5e8_v_u8m1x5(const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8m1x5(base, bstride, vl); +vuint8m1x5_t test_vlsseg5e8_v_u8m1x5(const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_u8m1x5(rs1, rs2, vl); } -vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf8x5_m(mask, base, bstride, vl); +vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_m(vbool64_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf8x5_m(vm, rs1, rs2, vl); } -vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf4x5_m(mask, base, bstride, vl); +vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_m(vbool32_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf4x5_m(vm, rs1, rs2, vl); } -vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf2x5_m(mask, base, bstride, vl); +vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_m(vbool16_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf2x5_m(vm, rs1, rs2, vl); } -vint8m1x5_t test_vlsseg5e8_v_i8m1x5_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8m1x5_m(mask, base, bstride, vl); +vint8m1x5_t test_vlsseg5e8_v_i8m1x5_m(vbool8_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_i8m1x5_m(vm, rs1, rs2, vl); } -vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf8x5_m(mask, base, bstride, vl); +vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_m(vbool64_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf8x5_m(vm, rs1, rs2, vl); } -vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf4x5_m(mask, base, bstride, vl); +vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_m(vbool32_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf4x5_m(vm, rs1, rs2, vl); } -vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf2x5_m(mask, base, bstride, vl); +vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_m(vbool16_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf2x5_m(vm, rs1, rs2, vl); } -vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8m1x5_m(mask, base, bstride, vl); +vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_m(vbool8_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_u8m1x5_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg5e8\.[ivxfswum.]+\s+} 16 } } */ diff --git a/auto-generated/gnu-api-tests/vlsseg6e16.c b/auto-generated/gnu-api-tests/vlsseg6e16.c index 818f3563a..e29ce8fa0 100644 --- a/auto-generated/gnu-api-tests/vlsseg6e16.c +++ b/auto-generated/gnu-api-tests/vlsseg6e16.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6(const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16mf4x6(base, bstride, vl); +vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6(const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_f16mf4x6(rs1, rs2, vl); } -vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6(const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16mf2x6(base, bstride, vl); +vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6(const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_f16mf2x6(rs1, rs2, vl); } -vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6(const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16m1x6(base, bstride, vl); +vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6(const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_f16m1x6(rs1, rs2, vl); } -vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6(const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16mf4x6(base, bstride, vl); +vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6(const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_i16mf4x6(rs1, rs2, vl); } -vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6(const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16mf2x6(base, bstride, vl); +vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6(const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_i16mf2x6(rs1, rs2, vl); } -vint16m1x6_t test_vlsseg6e16_v_i16m1x6(const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16m1x6(base, bstride, vl); +vint16m1x6_t test_vlsseg6e16_v_i16m1x6(const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_i16m1x6(rs1, rs2, vl); } -vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6(const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16mf4x6(base, bstride, vl); +vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6(const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_u16mf4x6(rs1, rs2, vl); } -vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6(const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16mf2x6(base, bstride, vl); +vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6(const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_u16mf2x6(rs1, rs2, vl); } -vuint16m1x6_t test_vlsseg6e16_v_u16m1x6(const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16m1x6(base, bstride, vl); +vuint16m1x6_t test_vlsseg6e16_v_u16m1x6(const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_u16m1x6(rs1, rs2, vl); } -vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_m(vbool64_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16mf4x6_m(mask, base, bstride, vl); +vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_m(vbool64_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_f16mf4x6_m(vm, rs1, rs2, vl); } -vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_m(vbool32_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16mf2x6_m(mask, base, bstride, vl); +vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_m(vbool32_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_f16mf2x6_m(vm, rs1, rs2, vl); } -vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_m(vbool16_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16m1x6_m(mask, base, bstride, vl); +vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_m(vbool16_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_f16m1x6_m(vm, rs1, rs2, vl); } -vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16mf4x6_m(mask, base, bstride, vl); +vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_m(vbool64_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_i16mf4x6_m(vm, rs1, rs2, vl); } -vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16mf2x6_m(mask, base, bstride, vl); +vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_m(vbool32_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_i16mf2x6_m(vm, rs1, rs2, vl); } -vint16m1x6_t test_vlsseg6e16_v_i16m1x6_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16m1x6_m(mask, base, bstride, vl); +vint16m1x6_t test_vlsseg6e16_v_i16m1x6_m(vbool16_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_i16m1x6_m(vm, rs1, rs2, vl); } -vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16mf4x6_m(mask, base, bstride, vl); +vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_m(vbool64_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_u16mf4x6_m(vm, rs1, rs2, vl); } -vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16mf2x6_m(mask, base, bstride, vl); +vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_m(vbool32_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_u16mf2x6_m(vm, rs1, rs2, vl); } -vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16m1x6_m(mask, base, bstride, vl); +vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_m(vbool16_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_u16m1x6_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg6e16\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-api-tests/vlsseg6e32.c b/auto-generated/gnu-api-tests/vlsseg6e32.c index 0f0c99601..3296d23b2 100644 --- a/auto-generated/gnu-api-tests/vlsseg6e32.c +++ b/auto-generated/gnu-api-tests/vlsseg6e32.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6(const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_f32mf2x6(base, bstride, vl); +vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6(const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_v_f32mf2x6(rs1, rs2, vl); } -vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6(const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_f32m1x6(base, bstride, vl); +vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6(const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_v_f32m1x6(rs1, rs2, vl); } -vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6(const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_i32mf2x6(base, bstride, vl); +vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6(const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_v_i32mf2x6(rs1, rs2, vl); } -vint32m1x6_t test_vlsseg6e32_v_i32m1x6(const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_i32m1x6(base, bstride, vl); +vint32m1x6_t test_vlsseg6e32_v_i32m1x6(const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_v_i32m1x6(rs1, rs2, vl); } -vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6(const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_u32mf2x6(base, bstride, vl); +vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6(const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_v_u32mf2x6(rs1, rs2, vl); } -vuint32m1x6_t test_vlsseg6e32_v_u32m1x6(const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_u32m1x6(base, bstride, vl); +vuint32m1x6_t test_vlsseg6e32_v_u32m1x6(const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_v_u32m1x6(rs1, rs2, vl); } -vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_m(vbool64_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_f32mf2x6_m(mask, base, bstride, vl); +vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_m(vbool64_t vm, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_v_f32mf2x6_m(vm, rs1, rs2, vl); } -vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_m(vbool32_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_f32m1x6_m(mask, base, bstride, vl); +vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_m(vbool32_t vm, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_v_f32m1x6_m(vm, rs1, rs2, vl); } -vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_i32mf2x6_m(mask, base, bstride, vl); +vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_m(vbool64_t vm, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_v_i32mf2x6_m(vm, rs1, rs2, vl); } -vint32m1x6_t test_vlsseg6e32_v_i32m1x6_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_i32m1x6_m(mask, base, bstride, vl); +vint32m1x6_t test_vlsseg6e32_v_i32m1x6_m(vbool32_t vm, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_v_i32m1x6_m(vm, rs1, rs2, vl); } -vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_u32mf2x6_m(mask, base, bstride, vl); +vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_m(vbool64_t vm, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_v_u32mf2x6_m(vm, rs1, rs2, vl); } -vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_u32m1x6_m(mask, base, bstride, vl); +vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_m(vbool32_t vm, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_v_u32m1x6_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg6e32\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-api-tests/vlsseg6e64.c b/auto-generated/gnu-api-tests/vlsseg6e64.c index 8f7758196..d9aac07bc 100644 --- a/auto-generated/gnu-api-tests/vlsseg6e64.c +++ b/auto-generated/gnu-api-tests/vlsseg6e64.c @@ -6,28 +6,28 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6(const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_f64m1x6(base, bstride, vl); +vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6(const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e64_v_f64m1x6(rs1, rs2, vl); } -vint64m1x6_t test_vlsseg6e64_v_i64m1x6(const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_i64m1x6(base, bstride, vl); +vint64m1x6_t test_vlsseg6e64_v_i64m1x6(const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e64_v_i64m1x6(rs1, rs2, vl); } -vuint64m1x6_t test_vlsseg6e64_v_u64m1x6(const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_u64m1x6(base, bstride, vl); +vuint64m1x6_t test_vlsseg6e64_v_u64m1x6(const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e64_v_u64m1x6(rs1, rs2, vl); } -vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_m(vbool64_t mask, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_f64m1x6_m(mask, base, bstride, vl); +vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_m(vbool64_t vm, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e64_v_f64m1x6_m(vm, rs1, rs2, vl); } -vint64m1x6_t test_vlsseg6e64_v_i64m1x6_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_i64m1x6_m(mask, base, bstride, vl); +vint64m1x6_t test_vlsseg6e64_v_i64m1x6_m(vbool64_t vm, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e64_v_i64m1x6_m(vm, rs1, rs2, vl); } -vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_u64m1x6_m(mask, base, bstride, vl); +vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_m(vbool64_t vm, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e64_v_u64m1x6_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg6e64\.[ivxfswum.]+\s+} 6 } } */ diff --git a/auto-generated/gnu-api-tests/vlsseg6e8.c b/auto-generated/gnu-api-tests/vlsseg6e8.c index f6b903d93..be0c486b3 100644 --- a/auto-generated/gnu-api-tests/vlsseg6e8.c +++ b/auto-generated/gnu-api-tests/vlsseg6e8.c @@ -6,68 +6,68 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6(const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf8x6(base, bstride, vl); +vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6(const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf8x6(rs1, rs2, vl); } -vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6(const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf4x6(base, bstride, vl); +vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6(const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf4x6(rs1, rs2, vl); } -vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6(const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf2x6(base, bstride, vl); +vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6(const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf2x6(rs1, rs2, vl); } -vint8m1x6_t test_vlsseg6e8_v_i8m1x6(const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8m1x6(base, bstride, vl); +vint8m1x6_t test_vlsseg6e8_v_i8m1x6(const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_i8m1x6(rs1, rs2, vl); } -vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6(const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf8x6(base, bstride, vl); +vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6(const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf8x6(rs1, rs2, vl); } -vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6(const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf4x6(base, bstride, vl); +vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6(const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf4x6(rs1, rs2, vl); } -vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6(const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf2x6(base, bstride, vl); +vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6(const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf2x6(rs1, rs2, vl); } -vuint8m1x6_t test_vlsseg6e8_v_u8m1x6(const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8m1x6(base, bstride, vl); +vuint8m1x6_t test_vlsseg6e8_v_u8m1x6(const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_u8m1x6(rs1, rs2, vl); } -vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf8x6_m(mask, base, bstride, vl); +vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_m(vbool64_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf8x6_m(vm, rs1, rs2, vl); } -vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf4x6_m(mask, base, bstride, vl); +vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_m(vbool32_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf4x6_m(vm, rs1, rs2, vl); } -vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf2x6_m(mask, base, bstride, vl); +vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_m(vbool16_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf2x6_m(vm, rs1, rs2, vl); } -vint8m1x6_t test_vlsseg6e8_v_i8m1x6_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8m1x6_m(mask, base, bstride, vl); +vint8m1x6_t test_vlsseg6e8_v_i8m1x6_m(vbool8_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_i8m1x6_m(vm, rs1, rs2, vl); } -vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf8x6_m(mask, base, bstride, vl); +vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_m(vbool64_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf8x6_m(vm, rs1, rs2, vl); } -vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf4x6_m(mask, base, bstride, vl); +vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_m(vbool32_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf4x6_m(vm, rs1, rs2, vl); } -vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf2x6_m(mask, base, bstride, vl); +vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_m(vbool16_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf2x6_m(vm, rs1, rs2, vl); } -vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8m1x6_m(mask, base, bstride, vl); +vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_m(vbool8_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_u8m1x6_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg6e8\.[ivxfswum.]+\s+} 16 } } */ diff --git a/auto-generated/gnu-api-tests/vlsseg7e16.c b/auto-generated/gnu-api-tests/vlsseg7e16.c index 8270f0b9e..694a747ef 100644 --- a/auto-generated/gnu-api-tests/vlsseg7e16.c +++ b/auto-generated/gnu-api-tests/vlsseg7e16.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7(const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16mf4x7(base, bstride, vl); +vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7(const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_f16mf4x7(rs1, rs2, vl); } -vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7(const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16mf2x7(base, bstride, vl); +vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7(const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_f16mf2x7(rs1, rs2, vl); } -vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7(const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16m1x7(base, bstride, vl); +vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7(const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_f16m1x7(rs1, rs2, vl); } -vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7(const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16mf4x7(base, bstride, vl); +vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7(const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_i16mf4x7(rs1, rs2, vl); } -vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7(const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16mf2x7(base, bstride, vl); +vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7(const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_i16mf2x7(rs1, rs2, vl); } -vint16m1x7_t test_vlsseg7e16_v_i16m1x7(const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16m1x7(base, bstride, vl); +vint16m1x7_t test_vlsseg7e16_v_i16m1x7(const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_i16m1x7(rs1, rs2, vl); } -vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7(const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16mf4x7(base, bstride, vl); +vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7(const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_u16mf4x7(rs1, rs2, vl); } -vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7(const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16mf2x7(base, bstride, vl); +vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7(const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_u16mf2x7(rs1, rs2, vl); } -vuint16m1x7_t test_vlsseg7e16_v_u16m1x7(const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16m1x7(base, bstride, vl); +vuint16m1x7_t test_vlsseg7e16_v_u16m1x7(const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_u16m1x7(rs1, rs2, vl); } -vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_m(vbool64_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16mf4x7_m(mask, base, bstride, vl); +vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_m(vbool64_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_f16mf4x7_m(vm, rs1, rs2, vl); } -vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_m(vbool32_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16mf2x7_m(mask, base, bstride, vl); +vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_m(vbool32_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_f16mf2x7_m(vm, rs1, rs2, vl); } -vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_m(vbool16_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16m1x7_m(mask, base, bstride, vl); +vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_m(vbool16_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_f16m1x7_m(vm, rs1, rs2, vl); } -vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16mf4x7_m(mask, base, bstride, vl); +vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_m(vbool64_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_i16mf4x7_m(vm, rs1, rs2, vl); } -vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16mf2x7_m(mask, base, bstride, vl); +vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_m(vbool32_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_i16mf2x7_m(vm, rs1, rs2, vl); } -vint16m1x7_t test_vlsseg7e16_v_i16m1x7_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16m1x7_m(mask, base, bstride, vl); +vint16m1x7_t test_vlsseg7e16_v_i16m1x7_m(vbool16_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_i16m1x7_m(vm, rs1, rs2, vl); } -vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16mf4x7_m(mask, base, bstride, vl); +vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_m(vbool64_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_u16mf4x7_m(vm, rs1, rs2, vl); } -vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16mf2x7_m(mask, base, bstride, vl); +vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_m(vbool32_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_u16mf2x7_m(vm, rs1, rs2, vl); } -vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16m1x7_m(mask, base, bstride, vl); +vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_m(vbool16_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_u16m1x7_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg7e16\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-api-tests/vlsseg7e32.c b/auto-generated/gnu-api-tests/vlsseg7e32.c index 4e6321ed5..70359e92d 100644 --- a/auto-generated/gnu-api-tests/vlsseg7e32.c +++ b/auto-generated/gnu-api-tests/vlsseg7e32.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7(const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_f32mf2x7(base, bstride, vl); +vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7(const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_v_f32mf2x7(rs1, rs2, vl); } -vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7(const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_f32m1x7(base, bstride, vl); +vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7(const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_v_f32m1x7(rs1, rs2, vl); } -vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7(const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_i32mf2x7(base, bstride, vl); +vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7(const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_v_i32mf2x7(rs1, rs2, vl); } -vint32m1x7_t test_vlsseg7e32_v_i32m1x7(const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_i32m1x7(base, bstride, vl); +vint32m1x7_t test_vlsseg7e32_v_i32m1x7(const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_v_i32m1x7(rs1, rs2, vl); } -vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7(const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_u32mf2x7(base, bstride, vl); +vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7(const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_v_u32mf2x7(rs1, rs2, vl); } -vuint32m1x7_t test_vlsseg7e32_v_u32m1x7(const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_u32m1x7(base, bstride, vl); +vuint32m1x7_t test_vlsseg7e32_v_u32m1x7(const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_v_u32m1x7(rs1, rs2, vl); } -vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_m(vbool64_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_f32mf2x7_m(mask, base, bstride, vl); +vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_m(vbool64_t vm, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_v_f32mf2x7_m(vm, rs1, rs2, vl); } -vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_m(vbool32_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_f32m1x7_m(mask, base, bstride, vl); +vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_m(vbool32_t vm, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_v_f32m1x7_m(vm, rs1, rs2, vl); } -vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_i32mf2x7_m(mask, base, bstride, vl); +vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_m(vbool64_t vm, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_v_i32mf2x7_m(vm, rs1, rs2, vl); } -vint32m1x7_t test_vlsseg7e32_v_i32m1x7_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_i32m1x7_m(mask, base, bstride, vl); +vint32m1x7_t test_vlsseg7e32_v_i32m1x7_m(vbool32_t vm, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_v_i32m1x7_m(vm, rs1, rs2, vl); } -vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_u32mf2x7_m(mask, base, bstride, vl); +vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_m(vbool64_t vm, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_v_u32mf2x7_m(vm, rs1, rs2, vl); } -vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_u32m1x7_m(mask, base, bstride, vl); +vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_m(vbool32_t vm, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_v_u32m1x7_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg7e32\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-api-tests/vlsseg7e64.c b/auto-generated/gnu-api-tests/vlsseg7e64.c index 0341e3c8c..5c9a07f4a 100644 --- a/auto-generated/gnu-api-tests/vlsseg7e64.c +++ b/auto-generated/gnu-api-tests/vlsseg7e64.c @@ -6,28 +6,28 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7(const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_f64m1x7(base, bstride, vl); +vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7(const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e64_v_f64m1x7(rs1, rs2, vl); } -vint64m1x7_t test_vlsseg7e64_v_i64m1x7(const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_i64m1x7(base, bstride, vl); +vint64m1x7_t test_vlsseg7e64_v_i64m1x7(const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e64_v_i64m1x7(rs1, rs2, vl); } -vuint64m1x7_t test_vlsseg7e64_v_u64m1x7(const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_u64m1x7(base, bstride, vl); +vuint64m1x7_t test_vlsseg7e64_v_u64m1x7(const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e64_v_u64m1x7(rs1, rs2, vl); } -vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_m(vbool64_t mask, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_f64m1x7_m(mask, base, bstride, vl); +vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_m(vbool64_t vm, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e64_v_f64m1x7_m(vm, rs1, rs2, vl); } -vint64m1x7_t test_vlsseg7e64_v_i64m1x7_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_i64m1x7_m(mask, base, bstride, vl); +vint64m1x7_t test_vlsseg7e64_v_i64m1x7_m(vbool64_t vm, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e64_v_i64m1x7_m(vm, rs1, rs2, vl); } -vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_u64m1x7_m(mask, base, bstride, vl); +vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_m(vbool64_t vm, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e64_v_u64m1x7_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg7e64\.[ivxfswum.]+\s+} 6 } } */ diff --git a/auto-generated/gnu-api-tests/vlsseg7e8.c b/auto-generated/gnu-api-tests/vlsseg7e8.c index 8e0fa73f9..0675a0b3c 100644 --- a/auto-generated/gnu-api-tests/vlsseg7e8.c +++ b/auto-generated/gnu-api-tests/vlsseg7e8.c @@ -6,68 +6,68 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7(const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf8x7(base, bstride, vl); +vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7(const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf8x7(rs1, rs2, vl); } -vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7(const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf4x7(base, bstride, vl); +vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7(const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf4x7(rs1, rs2, vl); } -vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7(const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf2x7(base, bstride, vl); +vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7(const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf2x7(rs1, rs2, vl); } -vint8m1x7_t test_vlsseg7e8_v_i8m1x7(const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8m1x7(base, bstride, vl); +vint8m1x7_t test_vlsseg7e8_v_i8m1x7(const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_i8m1x7(rs1, rs2, vl); } -vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7(const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf8x7(base, bstride, vl); +vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7(const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf8x7(rs1, rs2, vl); } -vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7(const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf4x7(base, bstride, vl); +vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7(const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf4x7(rs1, rs2, vl); } -vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7(const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf2x7(base, bstride, vl); +vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7(const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf2x7(rs1, rs2, vl); } -vuint8m1x7_t test_vlsseg7e8_v_u8m1x7(const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8m1x7(base, bstride, vl); +vuint8m1x7_t test_vlsseg7e8_v_u8m1x7(const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_u8m1x7(rs1, rs2, vl); } -vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf8x7_m(mask, base, bstride, vl); +vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_m(vbool64_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf8x7_m(vm, rs1, rs2, vl); } -vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf4x7_m(mask, base, bstride, vl); +vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_m(vbool32_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf4x7_m(vm, rs1, rs2, vl); } -vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf2x7_m(mask, base, bstride, vl); +vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_m(vbool16_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf2x7_m(vm, rs1, rs2, vl); } -vint8m1x7_t test_vlsseg7e8_v_i8m1x7_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8m1x7_m(mask, base, bstride, vl); +vint8m1x7_t test_vlsseg7e8_v_i8m1x7_m(vbool8_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_i8m1x7_m(vm, rs1, rs2, vl); } -vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf8x7_m(mask, base, bstride, vl); +vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_m(vbool64_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf8x7_m(vm, rs1, rs2, vl); } -vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf4x7_m(mask, base, bstride, vl); +vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_m(vbool32_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf4x7_m(vm, rs1, rs2, vl); } -vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf2x7_m(mask, base, bstride, vl); +vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_m(vbool16_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf2x7_m(vm, rs1, rs2, vl); } -vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8m1x7_m(mask, base, bstride, vl); +vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_m(vbool8_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_u8m1x7_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg7e8\.[ivxfswum.]+\s+} 16 } } */ diff --git a/auto-generated/gnu-api-tests/vlsseg8e16.c b/auto-generated/gnu-api-tests/vlsseg8e16.c index 22790abd9..161d55fce 100644 --- a/auto-generated/gnu-api-tests/vlsseg8e16.c +++ b/auto-generated/gnu-api-tests/vlsseg8e16.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8(const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16mf4x8(base, bstride, vl); +vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8(const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_f16mf4x8(rs1, rs2, vl); } -vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8(const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16mf2x8(base, bstride, vl); +vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8(const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_f16mf2x8(rs1, rs2, vl); } -vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8(const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16m1x8(base, bstride, vl); +vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8(const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_f16m1x8(rs1, rs2, vl); } -vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8(const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16mf4x8(base, bstride, vl); +vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8(const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_i16mf4x8(rs1, rs2, vl); } -vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8(const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16mf2x8(base, bstride, vl); +vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8(const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_i16mf2x8(rs1, rs2, vl); } -vint16m1x8_t test_vlsseg8e16_v_i16m1x8(const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16m1x8(base, bstride, vl); +vint16m1x8_t test_vlsseg8e16_v_i16m1x8(const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_i16m1x8(rs1, rs2, vl); } -vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8(const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16mf4x8(base, bstride, vl); +vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8(const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_u16mf4x8(rs1, rs2, vl); } -vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8(const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16mf2x8(base, bstride, vl); +vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8(const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_u16mf2x8(rs1, rs2, vl); } -vuint16m1x8_t test_vlsseg8e16_v_u16m1x8(const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16m1x8(base, bstride, vl); +vuint16m1x8_t test_vlsseg8e16_v_u16m1x8(const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_u16m1x8(rs1, rs2, vl); } -vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_m(vbool64_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16mf4x8_m(mask, base, bstride, vl); +vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_m(vbool64_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_f16mf4x8_m(vm, rs1, rs2, vl); } -vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_m(vbool32_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16mf2x8_m(mask, base, bstride, vl); +vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_m(vbool32_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_f16mf2x8_m(vm, rs1, rs2, vl); } -vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_m(vbool16_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16m1x8_m(mask, base, bstride, vl); +vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_m(vbool16_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_f16m1x8_m(vm, rs1, rs2, vl); } -vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16mf4x8_m(mask, base, bstride, vl); +vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_m(vbool64_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_i16mf4x8_m(vm, rs1, rs2, vl); } -vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16mf2x8_m(mask, base, bstride, vl); +vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_m(vbool32_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_i16mf2x8_m(vm, rs1, rs2, vl); } -vint16m1x8_t test_vlsseg8e16_v_i16m1x8_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16m1x8_m(mask, base, bstride, vl); +vint16m1x8_t test_vlsseg8e16_v_i16m1x8_m(vbool16_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_i16m1x8_m(vm, rs1, rs2, vl); } -vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16mf4x8_m(mask, base, bstride, vl); +vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_m(vbool64_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_u16mf4x8_m(vm, rs1, rs2, vl); } -vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16mf2x8_m(mask, base, bstride, vl); +vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_m(vbool32_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_u16mf2x8_m(vm, rs1, rs2, vl); } -vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16m1x8_m(mask, base, bstride, vl); +vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_m(vbool16_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_u16m1x8_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg8e16\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-api-tests/vlsseg8e32.c b/auto-generated/gnu-api-tests/vlsseg8e32.c index f59403e80..208311dca 100644 --- a/auto-generated/gnu-api-tests/vlsseg8e32.c +++ b/auto-generated/gnu-api-tests/vlsseg8e32.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8(const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_f32mf2x8(base, bstride, vl); +vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8(const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_v_f32mf2x8(rs1, rs2, vl); } -vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8(const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_f32m1x8(base, bstride, vl); +vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8(const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_v_f32m1x8(rs1, rs2, vl); } -vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8(const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_i32mf2x8(base, bstride, vl); +vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8(const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_v_i32mf2x8(rs1, rs2, vl); } -vint32m1x8_t test_vlsseg8e32_v_i32m1x8(const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_i32m1x8(base, bstride, vl); +vint32m1x8_t test_vlsseg8e32_v_i32m1x8(const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_v_i32m1x8(rs1, rs2, vl); } -vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8(const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_u32mf2x8(base, bstride, vl); +vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8(const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_v_u32mf2x8(rs1, rs2, vl); } -vuint32m1x8_t test_vlsseg8e32_v_u32m1x8(const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_u32m1x8(base, bstride, vl); +vuint32m1x8_t test_vlsseg8e32_v_u32m1x8(const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_v_u32m1x8(rs1, rs2, vl); } -vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_m(vbool64_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_f32mf2x8_m(mask, base, bstride, vl); +vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_m(vbool64_t vm, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_v_f32mf2x8_m(vm, rs1, rs2, vl); } -vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_m(vbool32_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_f32m1x8_m(mask, base, bstride, vl); +vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_m(vbool32_t vm, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_v_f32m1x8_m(vm, rs1, rs2, vl); } -vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_i32mf2x8_m(mask, base, bstride, vl); +vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_m(vbool64_t vm, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_v_i32mf2x8_m(vm, rs1, rs2, vl); } -vint32m1x8_t test_vlsseg8e32_v_i32m1x8_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_i32m1x8_m(mask, base, bstride, vl); +vint32m1x8_t test_vlsseg8e32_v_i32m1x8_m(vbool32_t vm, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_v_i32m1x8_m(vm, rs1, rs2, vl); } -vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_u32mf2x8_m(mask, base, bstride, vl); +vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_m(vbool64_t vm, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_v_u32mf2x8_m(vm, rs1, rs2, vl); } -vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_u32m1x8_m(mask, base, bstride, vl); +vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_m(vbool32_t vm, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_v_u32m1x8_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg8e32\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-api-tests/vlsseg8e64.c b/auto-generated/gnu-api-tests/vlsseg8e64.c index 1852c9e76..3ac423653 100644 --- a/auto-generated/gnu-api-tests/vlsseg8e64.c +++ b/auto-generated/gnu-api-tests/vlsseg8e64.c @@ -6,28 +6,28 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8(const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_f64m1x8(base, bstride, vl); +vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8(const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e64_v_f64m1x8(rs1, rs2, vl); } -vint64m1x8_t test_vlsseg8e64_v_i64m1x8(const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_i64m1x8(base, bstride, vl); +vint64m1x8_t test_vlsseg8e64_v_i64m1x8(const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e64_v_i64m1x8(rs1, rs2, vl); } -vuint64m1x8_t test_vlsseg8e64_v_u64m1x8(const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_u64m1x8(base, bstride, vl); +vuint64m1x8_t test_vlsseg8e64_v_u64m1x8(const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e64_v_u64m1x8(rs1, rs2, vl); } -vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_m(vbool64_t mask, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_f64m1x8_m(mask, base, bstride, vl); +vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_m(vbool64_t vm, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e64_v_f64m1x8_m(vm, rs1, rs2, vl); } -vint64m1x8_t test_vlsseg8e64_v_i64m1x8_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_i64m1x8_m(mask, base, bstride, vl); +vint64m1x8_t test_vlsseg8e64_v_i64m1x8_m(vbool64_t vm, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e64_v_i64m1x8_m(vm, rs1, rs2, vl); } -vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_u64m1x8_m(mask, base, bstride, vl); +vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_m(vbool64_t vm, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e64_v_u64m1x8_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg8e64\.[ivxfswum.]+\s+} 6 } } */ diff --git a/auto-generated/gnu-api-tests/vlsseg8e8.c b/auto-generated/gnu-api-tests/vlsseg8e8.c index f4048703d..5563a1f04 100644 --- a/auto-generated/gnu-api-tests/vlsseg8e8.c +++ b/auto-generated/gnu-api-tests/vlsseg8e8.c @@ -6,68 +6,68 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8(const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf8x8(base, bstride, vl); +vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8(const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf8x8(rs1, rs2, vl); } -vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8(const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf4x8(base, bstride, vl); +vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8(const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf4x8(rs1, rs2, vl); } -vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8(const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf2x8(base, bstride, vl); +vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8(const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf2x8(rs1, rs2, vl); } -vint8m1x8_t test_vlsseg8e8_v_i8m1x8(const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8m1x8(base, bstride, vl); +vint8m1x8_t test_vlsseg8e8_v_i8m1x8(const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_i8m1x8(rs1, rs2, vl); } -vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8(const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf8x8(base, bstride, vl); +vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8(const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf8x8(rs1, rs2, vl); } -vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8(const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf4x8(base, bstride, vl); +vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8(const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf4x8(rs1, rs2, vl); } -vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8(const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf2x8(base, bstride, vl); +vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8(const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf2x8(rs1, rs2, vl); } -vuint8m1x8_t test_vlsseg8e8_v_u8m1x8(const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8m1x8(base, bstride, vl); +vuint8m1x8_t test_vlsseg8e8_v_u8m1x8(const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_u8m1x8(rs1, rs2, vl); } -vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf8x8_m(mask, base, bstride, vl); +vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_m(vbool64_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf8x8_m(vm, rs1, rs2, vl); } -vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf4x8_m(mask, base, bstride, vl); +vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_m(vbool32_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf4x8_m(vm, rs1, rs2, vl); } -vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf2x8_m(mask, base, bstride, vl); +vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_m(vbool16_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf2x8_m(vm, rs1, rs2, vl); } -vint8m1x8_t test_vlsseg8e8_v_i8m1x8_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8m1x8_m(mask, base, bstride, vl); +vint8m1x8_t test_vlsseg8e8_v_i8m1x8_m(vbool8_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_i8m1x8_m(vm, rs1, rs2, vl); } -vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf8x8_m(mask, base, bstride, vl); +vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_m(vbool64_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf8x8_m(vm, rs1, rs2, vl); } -vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf4x8_m(mask, base, bstride, vl); +vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_m(vbool32_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf4x8_m(vm, rs1, rs2, vl); } -vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf2x8_m(mask, base, bstride, vl); +vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_m(vbool16_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf2x8_m(vm, rs1, rs2, vl); } -vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8m1x8_m(mask, base, bstride, vl); +vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_m(vbool8_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_u8m1x8_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg8e8\.[ivxfswum.]+\s+} 16 } } */ diff --git a/auto-generated/gnu-api-tests/vluxei16.c b/auto-generated/gnu-api-tests/vluxei16.c index 9e42e9c58..a6a422ab5 100644 --- a/auto-generated/gnu-api-tests/vluxei16.c +++ b/auto-generated/gnu-api-tests/vluxei16.c @@ -6,460 +6,460 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vluxei16_v_f16mf4(const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_f16mf4(base, bindex, vl); +vfloat16mf4_t test_vluxei16_v_f16mf4(const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_f16mf4(rs1, rs2, vl); } -vfloat16mf2_t test_vluxei16_v_f16mf2(const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_f16mf2(base, bindex, vl); +vfloat16mf2_t test_vluxei16_v_f16mf2(const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_f16mf2(rs1, rs2, vl); } -vfloat16m1_t test_vluxei16_v_f16m1(const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_f16m1(base, bindex, vl); +vfloat16m1_t test_vluxei16_v_f16m1(const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_f16m1(rs1, rs2, vl); } -vfloat16m2_t test_vluxei16_v_f16m2(const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_f16m2(base, bindex, vl); +vfloat16m2_t test_vluxei16_v_f16m2(const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_f16m2(rs1, rs2, vl); } -vfloat16m4_t test_vluxei16_v_f16m4(const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_f16m4(base, bindex, vl); +vfloat16m4_t test_vluxei16_v_f16m4(const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_f16m4(rs1, rs2, vl); } -vfloat16m8_t test_vluxei16_v_f16m8(const float16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_v_f16m8(base, bindex, vl); +vfloat16m8_t test_vluxei16_v_f16m8(const float16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_v_f16m8(rs1, rs2, vl); } -vfloat32mf2_t test_vluxei16_v_f32mf2(const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_f32mf2(base, bindex, vl); +vfloat32mf2_t test_vluxei16_v_f32mf2(const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_f32mf2(rs1, rs2, vl); } -vfloat32m1_t test_vluxei16_v_f32m1(const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_f32m1(base, bindex, vl); +vfloat32m1_t test_vluxei16_v_f32m1(const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_f32m1(rs1, rs2, vl); } -vfloat32m2_t test_vluxei16_v_f32m2(const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_f32m2(base, bindex, vl); +vfloat32m2_t test_vluxei16_v_f32m2(const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_f32m2(rs1, rs2, vl); } -vfloat32m4_t test_vluxei16_v_f32m4(const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_f32m4(base, bindex, vl); +vfloat32m4_t test_vluxei16_v_f32m4(const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_f32m4(rs1, rs2, vl); } -vfloat32m8_t test_vluxei16_v_f32m8(const float32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_f32m8(base, bindex, vl); +vfloat32m8_t test_vluxei16_v_f32m8(const float32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_f32m8(rs1, rs2, vl); } -vfloat64m1_t test_vluxei16_v_f64m1(const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_f64m1(base, bindex, vl); +vfloat64m1_t test_vluxei16_v_f64m1(const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_f64m1(rs1, rs2, vl); } -vfloat64m2_t test_vluxei16_v_f64m2(const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_f64m2(base, bindex, vl); +vfloat64m2_t test_vluxei16_v_f64m2(const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_f64m2(rs1, rs2, vl); } -vfloat64m4_t test_vluxei16_v_f64m4(const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_f64m4(base, bindex, vl); +vfloat64m4_t test_vluxei16_v_f64m4(const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_f64m4(rs1, rs2, vl); } -vfloat64m8_t test_vluxei16_v_f64m8(const float64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_f64m8(base, bindex, vl); +vfloat64m8_t test_vluxei16_v_f64m8(const float64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_f64m8(rs1, rs2, vl); } -vint8mf8_t test_vluxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_i8mf8(base, bindex, vl); +vint8mf8_t test_vluxei16_v_i8mf8(const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_i8mf8(rs1, rs2, vl); } -vint8mf4_t test_vluxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i8mf4(base, bindex, vl); +vint8mf4_t test_vluxei16_v_i8mf4(const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i8mf4(rs1, rs2, vl); } -vint8mf2_t test_vluxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_i8mf2(base, bindex, vl); +vint8mf2_t test_vluxei16_v_i8mf2(const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_i8mf2(rs1, rs2, vl); } -vint8m1_t test_vluxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i8m1(base, bindex, vl); +vint8m1_t test_vluxei16_v_i8m1(const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i8m1(rs1, rs2, vl); } -vint8m2_t test_vluxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_i8m2(base, bindex, vl); +vint8m2_t test_vluxei16_v_i8m2(const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_i8m2(rs1, rs2, vl); } -vint8m4_t test_vluxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_v_i8m4(base, bindex, vl); +vint8m4_t test_vluxei16_v_i8m4(const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_v_i8m4(rs1, rs2, vl); } -vint16mf4_t test_vluxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_i16mf4(base, bindex, vl); +vint16mf4_t test_vluxei16_v_i16mf4(const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_i16mf4(rs1, rs2, vl); } -vint16mf2_t test_vluxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i16mf2(base, bindex, vl); +vint16mf2_t test_vluxei16_v_i16mf2(const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i16mf2(rs1, rs2, vl); } -vint16m1_t test_vluxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_i16m1(base, bindex, vl); +vint16m1_t test_vluxei16_v_i16m1(const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_i16m1(rs1, rs2, vl); } -vint16m2_t test_vluxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i16m2(base, bindex, vl); +vint16m2_t test_vluxei16_v_i16m2(const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i16m2(rs1, rs2, vl); } -vint16m4_t test_vluxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_i16m4(base, bindex, vl); +vint16m4_t test_vluxei16_v_i16m4(const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_i16m4(rs1, rs2, vl); } -vint16m8_t test_vluxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_v_i16m8(base, bindex, vl); +vint16m8_t test_vluxei16_v_i16m8(const int16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_v_i16m8(rs1, rs2, vl); } -vint32mf2_t test_vluxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_i32mf2(base, bindex, vl); +vint32mf2_t test_vluxei16_v_i32mf2(const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_i32mf2(rs1, rs2, vl); } -vint32m1_t test_vluxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i32m1(base, bindex, vl); +vint32m1_t test_vluxei16_v_i32m1(const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i32m1(rs1, rs2, vl); } -vint32m2_t test_vluxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_i32m2(base, bindex, vl); +vint32m2_t test_vluxei16_v_i32m2(const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_i32m2(rs1, rs2, vl); } -vint32m4_t test_vluxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i32m4(base, bindex, vl); +vint32m4_t test_vluxei16_v_i32m4(const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i32m4(rs1, rs2, vl); } -vint32m8_t test_vluxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_i32m8(base, bindex, vl); +vint32m8_t test_vluxei16_v_i32m8(const int32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_i32m8(rs1, rs2, vl); } -vint64m1_t test_vluxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_i64m1(base, bindex, vl); +vint64m1_t test_vluxei16_v_i64m1(const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_i64m1(rs1, rs2, vl); } -vint64m2_t test_vluxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i64m2(base, bindex, vl); +vint64m2_t test_vluxei16_v_i64m2(const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i64m2(rs1, rs2, vl); } -vint64m4_t test_vluxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_i64m4(base, bindex, vl); +vint64m4_t test_vluxei16_v_i64m4(const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_i64m4(rs1, rs2, vl); } -vint64m8_t test_vluxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i64m8(base, bindex, vl); +vint64m8_t test_vluxei16_v_i64m8(const int64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i64m8(rs1, rs2, vl); } -vuint8mf8_t test_vluxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_u8mf8(base, bindex, vl); +vuint8mf8_t test_vluxei16_v_u8mf8(const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_u8mf8(rs1, rs2, vl); } -vuint8mf4_t test_vluxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u8mf4(base, bindex, vl); +vuint8mf4_t test_vluxei16_v_u8mf4(const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u8mf4(rs1, rs2, vl); } -vuint8mf2_t test_vluxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_u8mf2(base, bindex, vl); +vuint8mf2_t test_vluxei16_v_u8mf2(const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_u8mf2(rs1, rs2, vl); } -vuint8m1_t test_vluxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u8m1(base, bindex, vl); +vuint8m1_t test_vluxei16_v_u8m1(const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u8m1(rs1, rs2, vl); } -vuint8m2_t test_vluxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_u8m2(base, bindex, vl); +vuint8m2_t test_vluxei16_v_u8m2(const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_u8m2(rs1, rs2, vl); } -vuint8m4_t test_vluxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_v_u8m4(base, bindex, vl); +vuint8m4_t test_vluxei16_v_u8m4(const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_v_u8m4(rs1, rs2, vl); } -vuint16mf4_t test_vluxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_u16mf4(base, bindex, vl); +vuint16mf4_t test_vluxei16_v_u16mf4(const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_u16mf4(rs1, rs2, vl); } -vuint16mf2_t test_vluxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u16mf2(base, bindex, vl); +vuint16mf2_t test_vluxei16_v_u16mf2(const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u16mf2(rs1, rs2, vl); } -vuint16m1_t test_vluxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_u16m1(base, bindex, vl); +vuint16m1_t test_vluxei16_v_u16m1(const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_u16m1(rs1, rs2, vl); } -vuint16m2_t test_vluxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u16m2(base, bindex, vl); +vuint16m2_t test_vluxei16_v_u16m2(const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u16m2(rs1, rs2, vl); } -vuint16m4_t test_vluxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_u16m4(base, bindex, vl); +vuint16m4_t test_vluxei16_v_u16m4(const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_u16m4(rs1, rs2, vl); } -vuint16m8_t test_vluxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_v_u16m8(base, bindex, vl); +vuint16m8_t test_vluxei16_v_u16m8(const uint16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_v_u16m8(rs1, rs2, vl); } -vuint32mf2_t test_vluxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_u32mf2(base, bindex, vl); +vuint32mf2_t test_vluxei16_v_u32mf2(const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_u32mf2(rs1, rs2, vl); } -vuint32m1_t test_vluxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u32m1(base, bindex, vl); +vuint32m1_t test_vluxei16_v_u32m1(const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u32m1(rs1, rs2, vl); } -vuint32m2_t test_vluxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_u32m2(base, bindex, vl); +vuint32m2_t test_vluxei16_v_u32m2(const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_u32m2(rs1, rs2, vl); } -vuint32m4_t test_vluxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u32m4(base, bindex, vl); +vuint32m4_t test_vluxei16_v_u32m4(const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u32m4(rs1, rs2, vl); } -vuint32m8_t test_vluxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_u32m8(base, bindex, vl); +vuint32m8_t test_vluxei16_v_u32m8(const uint32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_u32m8(rs1, rs2, vl); } -vuint64m1_t test_vluxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_u64m1(base, bindex, vl); +vuint64m1_t test_vluxei16_v_u64m1(const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_u64m1(rs1, rs2, vl); } -vuint64m2_t test_vluxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u64m2(base, bindex, vl); +vuint64m2_t test_vluxei16_v_u64m2(const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u64m2(rs1, rs2, vl); } -vuint64m4_t test_vluxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_u64m4(base, bindex, vl); +vuint64m4_t test_vluxei16_v_u64m4(const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_u64m4(rs1, rs2, vl); } -vuint64m8_t test_vluxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u64m8(base, bindex, vl); +vuint64m8_t test_vluxei16_v_u64m8(const uint64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u64m8(rs1, rs2, vl); } -vfloat16mf4_t test_vluxei16_v_f16mf4_m(vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_f16mf4_m(mask, base, bindex, vl); +vfloat16mf4_t test_vluxei16_v_f16mf4_m(vbool64_t vm, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_f16mf4_m(vm, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei16_v_f16mf2_m(vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_f16mf2_m(mask, base, bindex, vl); +vfloat16mf2_t test_vluxei16_v_f16mf2_m(vbool32_t vm, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_f16mf2_m(vm, rs1, rs2, vl); } -vfloat16m1_t test_vluxei16_v_f16m1_m(vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_f16m1_m(mask, base, bindex, vl); +vfloat16m1_t test_vluxei16_v_f16m1_m(vbool16_t vm, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_f16m1_m(vm, rs1, rs2, vl); } -vfloat16m2_t test_vluxei16_v_f16m2_m(vbool8_t mask, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_f16m2_m(mask, base, bindex, vl); +vfloat16m2_t test_vluxei16_v_f16m2_m(vbool8_t vm, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_f16m2_m(vm, rs1, rs2, vl); } -vfloat16m4_t test_vluxei16_v_f16m4_m(vbool4_t mask, const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_f16m4_m(mask, base, bindex, vl); +vfloat16m4_t test_vluxei16_v_f16m4_m(vbool4_t vm, const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_f16m4_m(vm, rs1, rs2, vl); } -vfloat16m8_t test_vluxei16_v_f16m8_m(vbool2_t mask, const float16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_v_f16m8_m(mask, base, bindex, vl); +vfloat16m8_t test_vluxei16_v_f16m8_m(vbool2_t vm, const float16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_v_f16m8_m(vm, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei16_v_f32mf2_m(vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_f32mf2_m(mask, base, bindex, vl); +vfloat32mf2_t test_vluxei16_v_f32mf2_m(vbool64_t vm, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_f32mf2_m(vm, rs1, rs2, vl); } -vfloat32m1_t test_vluxei16_v_f32m1_m(vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_f32m1_m(mask, base, bindex, vl); +vfloat32m1_t test_vluxei16_v_f32m1_m(vbool32_t vm, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_f32m1_m(vm, rs1, rs2, vl); } -vfloat32m2_t test_vluxei16_v_f32m2_m(vbool16_t mask, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_f32m2_m(mask, base, bindex, vl); +vfloat32m2_t test_vluxei16_v_f32m2_m(vbool16_t vm, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_f32m2_m(vm, rs1, rs2, vl); } -vfloat32m4_t test_vluxei16_v_f32m4_m(vbool8_t mask, const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_f32m4_m(mask, base, bindex, vl); +vfloat32m4_t test_vluxei16_v_f32m4_m(vbool8_t vm, const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_f32m4_m(vm, rs1, rs2, vl); } -vfloat32m8_t test_vluxei16_v_f32m8_m(vbool4_t mask, const float32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_f32m8_m(mask, base, bindex, vl); +vfloat32m8_t test_vluxei16_v_f32m8_m(vbool4_t vm, const float32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_f32m8_m(vm, rs1, rs2, vl); } -vfloat64m1_t test_vluxei16_v_f64m1_m(vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_f64m1_m(mask, base, bindex, vl); +vfloat64m1_t test_vluxei16_v_f64m1_m(vbool64_t vm, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_f64m1_m(vm, rs1, rs2, vl); } -vfloat64m2_t test_vluxei16_v_f64m2_m(vbool32_t mask, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_f64m2_m(mask, base, bindex, vl); +vfloat64m2_t test_vluxei16_v_f64m2_m(vbool32_t vm, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_f64m2_m(vm, rs1, rs2, vl); } -vfloat64m4_t test_vluxei16_v_f64m4_m(vbool16_t mask, const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_f64m4_m(mask, base, bindex, vl); +vfloat64m4_t test_vluxei16_v_f64m4_m(vbool16_t vm, const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_f64m4_m(vm, rs1, rs2, vl); } -vfloat64m8_t test_vluxei16_v_f64m8_m(vbool8_t mask, const float64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_f64m8_m(mask, base, bindex, vl); +vfloat64m8_t test_vluxei16_v_f64m8_m(vbool8_t vm, const float64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_f64m8_m(vm, rs1, rs2, vl); } -vint8mf8_t test_vluxei16_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_i8mf8_m(mask, base, bindex, vl); +vint8mf8_t test_vluxei16_v_i8mf8_m(vbool64_t vm, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_i8mf8_m(vm, rs1, rs2, vl); } -vint8mf4_t test_vluxei16_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i8mf4_m(mask, base, bindex, vl); +vint8mf4_t test_vluxei16_v_i8mf4_m(vbool32_t vm, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i8mf4_m(vm, rs1, rs2, vl); } -vint8mf2_t test_vluxei16_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_i8mf2_m(mask, base, bindex, vl); +vint8mf2_t test_vluxei16_v_i8mf2_m(vbool16_t vm, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_i8mf2_m(vm, rs1, rs2, vl); } -vint8m1_t test_vluxei16_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i8m1_m(mask, base, bindex, vl); +vint8m1_t test_vluxei16_v_i8m1_m(vbool8_t vm, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i8m1_m(vm, rs1, rs2, vl); } -vint8m2_t test_vluxei16_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_i8m2_m(mask, base, bindex, vl); +vint8m2_t test_vluxei16_v_i8m2_m(vbool4_t vm, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_i8m2_m(vm, rs1, rs2, vl); } -vint8m4_t test_vluxei16_v_i8m4_m(vbool2_t mask, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_v_i8m4_m(mask, base, bindex, vl); +vint8m4_t test_vluxei16_v_i8m4_m(vbool2_t vm, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_v_i8m4_m(vm, rs1, rs2, vl); } -vint16mf4_t test_vluxei16_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_i16mf4_m(mask, base, bindex, vl); +vint16mf4_t test_vluxei16_v_i16mf4_m(vbool64_t vm, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_i16mf4_m(vm, rs1, rs2, vl); } -vint16mf2_t test_vluxei16_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i16mf2_m(mask, base, bindex, vl); +vint16mf2_t test_vluxei16_v_i16mf2_m(vbool32_t vm, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i16mf2_m(vm, rs1, rs2, vl); } -vint16m1_t test_vluxei16_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_i16m1_m(mask, base, bindex, vl); +vint16m1_t test_vluxei16_v_i16m1_m(vbool16_t vm, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_i16m1_m(vm, rs1, rs2, vl); } -vint16m2_t test_vluxei16_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i16m2_m(mask, base, bindex, vl); +vint16m2_t test_vluxei16_v_i16m2_m(vbool8_t vm, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i16m2_m(vm, rs1, rs2, vl); } -vint16m4_t test_vluxei16_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_i16m4_m(mask, base, bindex, vl); +vint16m4_t test_vluxei16_v_i16m4_m(vbool4_t vm, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_i16m4_m(vm, rs1, rs2, vl); } -vint16m8_t test_vluxei16_v_i16m8_m(vbool2_t mask, const int16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_v_i16m8_m(mask, base, bindex, vl); +vint16m8_t test_vluxei16_v_i16m8_m(vbool2_t vm, const int16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_v_i16m8_m(vm, rs1, rs2, vl); } -vint32mf2_t test_vluxei16_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_i32mf2_m(mask, base, bindex, vl); +vint32mf2_t test_vluxei16_v_i32mf2_m(vbool64_t vm, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_i32mf2_m(vm, rs1, rs2, vl); } -vint32m1_t test_vluxei16_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i32m1_m(mask, base, bindex, vl); +vint32m1_t test_vluxei16_v_i32m1_m(vbool32_t vm, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i32m1_m(vm, rs1, rs2, vl); } -vint32m2_t test_vluxei16_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_i32m2_m(mask, base, bindex, vl); +vint32m2_t test_vluxei16_v_i32m2_m(vbool16_t vm, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_i32m2_m(vm, rs1, rs2, vl); } -vint32m4_t test_vluxei16_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i32m4_m(mask, base, bindex, vl); +vint32m4_t test_vluxei16_v_i32m4_m(vbool8_t vm, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i32m4_m(vm, rs1, rs2, vl); } -vint32m8_t test_vluxei16_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_i32m8_m(mask, base, bindex, vl); +vint32m8_t test_vluxei16_v_i32m8_m(vbool4_t vm, const int32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_i32m8_m(vm, rs1, rs2, vl); } -vint64m1_t test_vluxei16_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_i64m1_m(mask, base, bindex, vl); +vint64m1_t test_vluxei16_v_i64m1_m(vbool64_t vm, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_i64m1_m(vm, rs1, rs2, vl); } -vint64m2_t test_vluxei16_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i64m2_m(mask, base, bindex, vl); +vint64m2_t test_vluxei16_v_i64m2_m(vbool32_t vm, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i64m2_m(vm, rs1, rs2, vl); } -vint64m4_t test_vluxei16_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_i64m4_m(mask, base, bindex, vl); +vint64m4_t test_vluxei16_v_i64m4_m(vbool16_t vm, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_i64m4_m(vm, rs1, rs2, vl); } -vint64m8_t test_vluxei16_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i64m8_m(mask, base, bindex, vl); +vint64m8_t test_vluxei16_v_i64m8_m(vbool8_t vm, const int64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i64m8_m(vm, rs1, rs2, vl); } -vuint8mf8_t test_vluxei16_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_u8mf8_m(mask, base, bindex, vl); +vuint8mf8_t test_vluxei16_v_u8mf8_m(vbool64_t vm, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_u8mf8_m(vm, rs1, rs2, vl); } -vuint8mf4_t test_vluxei16_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u8mf4_m(mask, base, bindex, vl); +vuint8mf4_t test_vluxei16_v_u8mf4_m(vbool32_t vm, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u8mf4_m(vm, rs1, rs2, vl); } -vuint8mf2_t test_vluxei16_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_u8mf2_m(mask, base, bindex, vl); +vuint8mf2_t test_vluxei16_v_u8mf2_m(vbool16_t vm, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_u8mf2_m(vm, rs1, rs2, vl); } -vuint8m1_t test_vluxei16_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u8m1_m(mask, base, bindex, vl); +vuint8m1_t test_vluxei16_v_u8m1_m(vbool8_t vm, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u8m1_m(vm, rs1, rs2, vl); } -vuint8m2_t test_vluxei16_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_u8m2_m(mask, base, bindex, vl); +vuint8m2_t test_vluxei16_v_u8m2_m(vbool4_t vm, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_u8m2_m(vm, rs1, rs2, vl); } -vuint8m4_t test_vluxei16_v_u8m4_m(vbool2_t mask, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_v_u8m4_m(mask, base, bindex, vl); +vuint8m4_t test_vluxei16_v_u8m4_m(vbool2_t vm, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_v_u8m4_m(vm, rs1, rs2, vl); } -vuint16mf4_t test_vluxei16_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_u16mf4_m(mask, base, bindex, vl); +vuint16mf4_t test_vluxei16_v_u16mf4_m(vbool64_t vm, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_u16mf4_m(vm, rs1, rs2, vl); } -vuint16mf2_t test_vluxei16_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u16mf2_m(mask, base, bindex, vl); +vuint16mf2_t test_vluxei16_v_u16mf2_m(vbool32_t vm, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u16mf2_m(vm, rs1, rs2, vl); } -vuint16m1_t test_vluxei16_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_u16m1_m(mask, base, bindex, vl); +vuint16m1_t test_vluxei16_v_u16m1_m(vbool16_t vm, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_u16m1_m(vm, rs1, rs2, vl); } -vuint16m2_t test_vluxei16_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u16m2_m(mask, base, bindex, vl); +vuint16m2_t test_vluxei16_v_u16m2_m(vbool8_t vm, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u16m2_m(vm, rs1, rs2, vl); } -vuint16m4_t test_vluxei16_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_u16m4_m(mask, base, bindex, vl); +vuint16m4_t test_vluxei16_v_u16m4_m(vbool4_t vm, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_u16m4_m(vm, rs1, rs2, vl); } -vuint16m8_t test_vluxei16_v_u16m8_m(vbool2_t mask, const uint16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_v_u16m8_m(mask, base, bindex, vl); +vuint16m8_t test_vluxei16_v_u16m8_m(vbool2_t vm, const uint16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_v_u16m8_m(vm, rs1, rs2, vl); } -vuint32mf2_t test_vluxei16_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_u32mf2_m(mask, base, bindex, vl); +vuint32mf2_t test_vluxei16_v_u32mf2_m(vbool64_t vm, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_u32mf2_m(vm, rs1, rs2, vl); } -vuint32m1_t test_vluxei16_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u32m1_m(mask, base, bindex, vl); +vuint32m1_t test_vluxei16_v_u32m1_m(vbool32_t vm, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u32m1_m(vm, rs1, rs2, vl); } -vuint32m2_t test_vluxei16_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_u32m2_m(mask, base, bindex, vl); +vuint32m2_t test_vluxei16_v_u32m2_m(vbool16_t vm, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_u32m2_m(vm, rs1, rs2, vl); } -vuint32m4_t test_vluxei16_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u32m4_m(mask, base, bindex, vl); +vuint32m4_t test_vluxei16_v_u32m4_m(vbool8_t vm, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u32m4_m(vm, rs1, rs2, vl); } -vuint32m8_t test_vluxei16_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_u32m8_m(mask, base, bindex, vl); +vuint32m8_t test_vluxei16_v_u32m8_m(vbool4_t vm, const uint32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_u32m8_m(vm, rs1, rs2, vl); } -vuint64m1_t test_vluxei16_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_u64m1_m(mask, base, bindex, vl); +vuint64m1_t test_vluxei16_v_u64m1_m(vbool64_t vm, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_u64m1_m(vm, rs1, rs2, vl); } -vuint64m2_t test_vluxei16_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u64m2_m(mask, base, bindex, vl); +vuint64m2_t test_vluxei16_v_u64m2_m(vbool32_t vm, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u64m2_m(vm, rs1, rs2, vl); } -vuint64m4_t test_vluxei16_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_u64m4_m(mask, base, bindex, vl); +vuint64m4_t test_vluxei16_v_u64m4_m(vbool16_t vm, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_u64m4_m(vm, rs1, rs2, vl); } -vuint64m8_t test_vluxei16_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u64m8_m(mask, base, bindex, vl); +vuint64m8_t test_vluxei16_v_u64m8_m(vbool8_t vm, const uint64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u64m8_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxei16\.[ivxfswum.]+\s+} 114 } } */ diff --git a/auto-generated/gnu-api-tests/vluxei32.c b/auto-generated/gnu-api-tests/vluxei32.c index df6c15179..633ad43c8 100644 --- a/auto-generated/gnu-api-tests/vluxei32.c +++ b/auto-generated/gnu-api-tests/vluxei32.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vluxei32_v_f16mf4(const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_f16mf4(base, bindex, vl); +vfloat16mf4_t test_vluxei32_v_f16mf4(const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_f16mf4(rs1, rs2, vl); } -vfloat16mf2_t test_vluxei32_v_f16mf2(const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_f16mf2(base, bindex, vl); +vfloat16mf2_t test_vluxei32_v_f16mf2(const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_f16mf2(rs1, rs2, vl); } -vfloat16m1_t test_vluxei32_v_f16m1(const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_f16m1(base, bindex, vl); +vfloat16m1_t test_vluxei32_v_f16m1(const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_f16m1(rs1, rs2, vl); } -vfloat16m2_t test_vluxei32_v_f16m2(const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_f16m2(base, bindex, vl); +vfloat16m2_t test_vluxei32_v_f16m2(const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_f16m2(rs1, rs2, vl); } -vfloat16m4_t test_vluxei32_v_f16m4(const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_f16m4(base, bindex, vl); +vfloat16m4_t test_vluxei32_v_f16m4(const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_f16m4(rs1, rs2, vl); } -vfloat32mf2_t test_vluxei32_v_f32mf2(const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_f32mf2(base, bindex, vl); +vfloat32mf2_t test_vluxei32_v_f32mf2(const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_f32mf2(rs1, rs2, vl); } -vfloat32m1_t test_vluxei32_v_f32m1(const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_f32m1(base, bindex, vl); +vfloat32m1_t test_vluxei32_v_f32m1(const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_f32m1(rs1, rs2, vl); } -vfloat32m2_t test_vluxei32_v_f32m2(const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_f32m2(base, bindex, vl); +vfloat32m2_t test_vluxei32_v_f32m2(const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_f32m2(rs1, rs2, vl); } -vfloat32m4_t test_vluxei32_v_f32m4(const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_f32m4(base, bindex, vl); +vfloat32m4_t test_vluxei32_v_f32m4(const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_f32m4(rs1, rs2, vl); } -vfloat32m8_t test_vluxei32_v_f32m8(const float32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_f32m8(base, bindex, vl); +vfloat32m8_t test_vluxei32_v_f32m8(const float32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_f32m8(rs1, rs2, vl); } -vfloat64m1_t test_vluxei32_v_f64m1(const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_f64m1(base, bindex, vl); +vfloat64m1_t test_vluxei32_v_f64m1(const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_f64m1(rs1, rs2, vl); } -vfloat64m2_t test_vluxei32_v_f64m2(const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_f64m2(base, bindex, vl); +vfloat64m2_t test_vluxei32_v_f64m2(const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_f64m2(rs1, rs2, vl); } -vfloat64m4_t test_vluxei32_v_f64m4(const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_f64m4(base, bindex, vl); +vfloat64m4_t test_vluxei32_v_f64m4(const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_f64m4(rs1, rs2, vl); } -vfloat64m8_t test_vluxei32_v_f64m8(const float64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_f64m8(base, bindex, vl); +vfloat64m8_t test_vluxei32_v_f64m8(const float64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_f64m8(rs1, rs2, vl); } -vint8mf8_t test_vluxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i8mf8(base, bindex, vl); +vint8mf8_t test_vluxei32_v_i8mf8(const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i8mf8(rs1, rs2, vl); } -vint8mf4_t test_vluxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_i8mf4(base, bindex, vl); +vint8mf4_t test_vluxei32_v_i8mf4(const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_i8mf4(rs1, rs2, vl); } -vint8mf2_t test_vluxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i8mf2(base, bindex, vl); +vint8mf2_t test_vluxei32_v_i8mf2(const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i8mf2(rs1, rs2, vl); } -vint8m1_t test_vluxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_i8m1(base, bindex, vl); +vint8m1_t test_vluxei32_v_i8m1(const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_i8m1(rs1, rs2, vl); } -vint8m2_t test_vluxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_i8m2(base, bindex, vl); +vint8m2_t test_vluxei32_v_i8m2(const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_i8m2(rs1, rs2, vl); } -vint16mf4_t test_vluxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i16mf4(base, bindex, vl); +vint16mf4_t test_vluxei32_v_i16mf4(const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i16mf4(rs1, rs2, vl); } -vint16mf2_t test_vluxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_i16mf2(base, bindex, vl); +vint16mf2_t test_vluxei32_v_i16mf2(const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_i16mf2(rs1, rs2, vl); } -vint16m1_t test_vluxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i16m1(base, bindex, vl); +vint16m1_t test_vluxei32_v_i16m1(const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i16m1(rs1, rs2, vl); } -vint16m2_t test_vluxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_i16m2(base, bindex, vl); +vint16m2_t test_vluxei32_v_i16m2(const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_i16m2(rs1, rs2, vl); } -vint16m4_t test_vluxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_i16m4(base, bindex, vl); +vint16m4_t test_vluxei32_v_i16m4(const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_i16m4(rs1, rs2, vl); } -vint32mf2_t test_vluxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i32mf2(base, bindex, vl); +vint32mf2_t test_vluxei32_v_i32mf2(const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i32mf2(rs1, rs2, vl); } -vint32m1_t test_vluxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_i32m1(base, bindex, vl); +vint32m1_t test_vluxei32_v_i32m1(const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_i32m1(rs1, rs2, vl); } -vint32m2_t test_vluxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i32m2(base, bindex, vl); +vint32m2_t test_vluxei32_v_i32m2(const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i32m2(rs1, rs2, vl); } -vint32m4_t test_vluxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_i32m4(base, bindex, vl); +vint32m4_t test_vluxei32_v_i32m4(const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_i32m4(rs1, rs2, vl); } -vint32m8_t test_vluxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_i32m8(base, bindex, vl); +vint32m8_t test_vluxei32_v_i32m8(const int32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_i32m8(rs1, rs2, vl); } -vint64m1_t test_vluxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i64m1(base, bindex, vl); +vint64m1_t test_vluxei32_v_i64m1(const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i64m1(rs1, rs2, vl); } -vint64m2_t test_vluxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_i64m2(base, bindex, vl); +vint64m2_t test_vluxei32_v_i64m2(const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_i64m2(rs1, rs2, vl); } -vint64m4_t test_vluxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i64m4(base, bindex, vl); +vint64m4_t test_vluxei32_v_i64m4(const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i64m4(rs1, rs2, vl); } -vint64m8_t test_vluxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_i64m8(base, bindex, vl); +vint64m8_t test_vluxei32_v_i64m8(const int64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_i64m8(rs1, rs2, vl); } -vuint8mf8_t test_vluxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u8mf8(base, bindex, vl); +vuint8mf8_t test_vluxei32_v_u8mf8(const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u8mf8(rs1, rs2, vl); } -vuint8mf4_t test_vluxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_u8mf4(base, bindex, vl); +vuint8mf4_t test_vluxei32_v_u8mf4(const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_u8mf4(rs1, rs2, vl); } -vuint8mf2_t test_vluxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u8mf2(base, bindex, vl); +vuint8mf2_t test_vluxei32_v_u8mf2(const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u8mf2(rs1, rs2, vl); } -vuint8m1_t test_vluxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_u8m1(base, bindex, vl); +vuint8m1_t test_vluxei32_v_u8m1(const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_u8m1(rs1, rs2, vl); } -vuint8m2_t test_vluxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_u8m2(base, bindex, vl); +vuint8m2_t test_vluxei32_v_u8m2(const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_u8m2(rs1, rs2, vl); } -vuint16mf4_t test_vluxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u16mf4(base, bindex, vl); +vuint16mf4_t test_vluxei32_v_u16mf4(const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u16mf4(rs1, rs2, vl); } -vuint16mf2_t test_vluxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_u16mf2(base, bindex, vl); +vuint16mf2_t test_vluxei32_v_u16mf2(const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_u16mf2(rs1, rs2, vl); } -vuint16m1_t test_vluxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u16m1(base, bindex, vl); +vuint16m1_t test_vluxei32_v_u16m1(const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u16m1(rs1, rs2, vl); } -vuint16m2_t test_vluxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_u16m2(base, bindex, vl); +vuint16m2_t test_vluxei32_v_u16m2(const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_u16m2(rs1, rs2, vl); } -vuint16m4_t test_vluxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_u16m4(base, bindex, vl); +vuint16m4_t test_vluxei32_v_u16m4(const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_u16m4(rs1, rs2, vl); } -vuint32mf2_t test_vluxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u32mf2(base, bindex, vl); +vuint32mf2_t test_vluxei32_v_u32mf2(const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u32mf2(rs1, rs2, vl); } -vuint32m1_t test_vluxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_u32m1(base, bindex, vl); +vuint32m1_t test_vluxei32_v_u32m1(const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_u32m1(rs1, rs2, vl); } -vuint32m2_t test_vluxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u32m2(base, bindex, vl); +vuint32m2_t test_vluxei32_v_u32m2(const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u32m2(rs1, rs2, vl); } -vuint32m4_t test_vluxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_u32m4(base, bindex, vl); +vuint32m4_t test_vluxei32_v_u32m4(const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_u32m4(rs1, rs2, vl); } -vuint32m8_t test_vluxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_u32m8(base, bindex, vl); +vuint32m8_t test_vluxei32_v_u32m8(const uint32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_u32m8(rs1, rs2, vl); } -vuint64m1_t test_vluxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u64m1(base, bindex, vl); +vuint64m1_t test_vluxei32_v_u64m1(const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u64m1(rs1, rs2, vl); } -vuint64m2_t test_vluxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_u64m2(base, bindex, vl); +vuint64m2_t test_vluxei32_v_u64m2(const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_u64m2(rs1, rs2, vl); } -vuint64m4_t test_vluxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u64m4(base, bindex, vl); +vuint64m4_t test_vluxei32_v_u64m4(const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u64m4(rs1, rs2, vl); } -vuint64m8_t test_vluxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_u64m8(base, bindex, vl); +vuint64m8_t test_vluxei32_v_u64m8(const uint64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_u64m8(rs1, rs2, vl); } -vfloat16mf4_t test_vluxei32_v_f16mf4_m(vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_f16mf4_m(mask, base, bindex, vl); +vfloat16mf4_t test_vluxei32_v_f16mf4_m(vbool64_t vm, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_f16mf4_m(vm, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei32_v_f16mf2_m(vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_f16mf2_m(mask, base, bindex, vl); +vfloat16mf2_t test_vluxei32_v_f16mf2_m(vbool32_t vm, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_f16mf2_m(vm, rs1, rs2, vl); } -vfloat16m1_t test_vluxei32_v_f16m1_m(vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_f16m1_m(mask, base, bindex, vl); +vfloat16m1_t test_vluxei32_v_f16m1_m(vbool16_t vm, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_f16m1_m(vm, rs1, rs2, vl); } -vfloat16m2_t test_vluxei32_v_f16m2_m(vbool8_t mask, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_f16m2_m(mask, base, bindex, vl); +vfloat16m2_t test_vluxei32_v_f16m2_m(vbool8_t vm, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_f16m2_m(vm, rs1, rs2, vl); } -vfloat16m4_t test_vluxei32_v_f16m4_m(vbool4_t mask, const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_f16m4_m(mask, base, bindex, vl); +vfloat16m4_t test_vluxei32_v_f16m4_m(vbool4_t vm, const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_f16m4_m(vm, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei32_v_f32mf2_m(vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_f32mf2_m(mask, base, bindex, vl); +vfloat32mf2_t test_vluxei32_v_f32mf2_m(vbool64_t vm, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_f32mf2_m(vm, rs1, rs2, vl); } -vfloat32m1_t test_vluxei32_v_f32m1_m(vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_f32m1_m(mask, base, bindex, vl); +vfloat32m1_t test_vluxei32_v_f32m1_m(vbool32_t vm, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_f32m1_m(vm, rs1, rs2, vl); } -vfloat32m2_t test_vluxei32_v_f32m2_m(vbool16_t mask, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_f32m2_m(mask, base, bindex, vl); +vfloat32m2_t test_vluxei32_v_f32m2_m(vbool16_t vm, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_f32m2_m(vm, rs1, rs2, vl); } -vfloat32m4_t test_vluxei32_v_f32m4_m(vbool8_t mask, const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_f32m4_m(mask, base, bindex, vl); +vfloat32m4_t test_vluxei32_v_f32m4_m(vbool8_t vm, const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_f32m4_m(vm, rs1, rs2, vl); } -vfloat32m8_t test_vluxei32_v_f32m8_m(vbool4_t mask, const float32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_f32m8_m(mask, base, bindex, vl); +vfloat32m8_t test_vluxei32_v_f32m8_m(vbool4_t vm, const float32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_f32m8_m(vm, rs1, rs2, vl); } -vfloat64m1_t test_vluxei32_v_f64m1_m(vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_f64m1_m(mask, base, bindex, vl); +vfloat64m1_t test_vluxei32_v_f64m1_m(vbool64_t vm, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_f64m1_m(vm, rs1, rs2, vl); } -vfloat64m2_t test_vluxei32_v_f64m2_m(vbool32_t mask, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_f64m2_m(mask, base, bindex, vl); +vfloat64m2_t test_vluxei32_v_f64m2_m(vbool32_t vm, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_f64m2_m(vm, rs1, rs2, vl); } -vfloat64m4_t test_vluxei32_v_f64m4_m(vbool16_t mask, const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_f64m4_m(mask, base, bindex, vl); +vfloat64m4_t test_vluxei32_v_f64m4_m(vbool16_t vm, const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_f64m4_m(vm, rs1, rs2, vl); } -vfloat64m8_t test_vluxei32_v_f64m8_m(vbool8_t mask, const float64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_f64m8_m(mask, base, bindex, vl); +vfloat64m8_t test_vluxei32_v_f64m8_m(vbool8_t vm, const float64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_f64m8_m(vm, rs1, rs2, vl); } -vint8mf8_t test_vluxei32_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i8mf8_m(mask, base, bindex, vl); +vint8mf8_t test_vluxei32_v_i8mf8_m(vbool64_t vm, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i8mf8_m(vm, rs1, rs2, vl); } -vint8mf4_t test_vluxei32_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_i8mf4_m(mask, base, bindex, vl); +vint8mf4_t test_vluxei32_v_i8mf4_m(vbool32_t vm, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_i8mf4_m(vm, rs1, rs2, vl); } -vint8mf2_t test_vluxei32_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i8mf2_m(mask, base, bindex, vl); +vint8mf2_t test_vluxei32_v_i8mf2_m(vbool16_t vm, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i8mf2_m(vm, rs1, rs2, vl); } -vint8m1_t test_vluxei32_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_i8m1_m(mask, base, bindex, vl); +vint8m1_t test_vluxei32_v_i8m1_m(vbool8_t vm, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_i8m1_m(vm, rs1, rs2, vl); } -vint8m2_t test_vluxei32_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_i8m2_m(mask, base, bindex, vl); +vint8m2_t test_vluxei32_v_i8m2_m(vbool4_t vm, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_i8m2_m(vm, rs1, rs2, vl); } -vint16mf4_t test_vluxei32_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i16mf4_m(mask, base, bindex, vl); +vint16mf4_t test_vluxei32_v_i16mf4_m(vbool64_t vm, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i16mf4_m(vm, rs1, rs2, vl); } -vint16mf2_t test_vluxei32_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_i16mf2_m(mask, base, bindex, vl); +vint16mf2_t test_vluxei32_v_i16mf2_m(vbool32_t vm, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_i16mf2_m(vm, rs1, rs2, vl); } -vint16m1_t test_vluxei32_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i16m1_m(mask, base, bindex, vl); +vint16m1_t test_vluxei32_v_i16m1_m(vbool16_t vm, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i16m1_m(vm, rs1, rs2, vl); } -vint16m2_t test_vluxei32_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_i16m2_m(mask, base, bindex, vl); +vint16m2_t test_vluxei32_v_i16m2_m(vbool8_t vm, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_i16m2_m(vm, rs1, rs2, vl); } -vint16m4_t test_vluxei32_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_i16m4_m(mask, base, bindex, vl); +vint16m4_t test_vluxei32_v_i16m4_m(vbool4_t vm, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_i16m4_m(vm, rs1, rs2, vl); } -vint32mf2_t test_vluxei32_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i32mf2_m(mask, base, bindex, vl); +vint32mf2_t test_vluxei32_v_i32mf2_m(vbool64_t vm, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i32mf2_m(vm, rs1, rs2, vl); } -vint32m1_t test_vluxei32_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_i32m1_m(mask, base, bindex, vl); +vint32m1_t test_vluxei32_v_i32m1_m(vbool32_t vm, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_i32m1_m(vm, rs1, rs2, vl); } -vint32m2_t test_vluxei32_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i32m2_m(mask, base, bindex, vl); +vint32m2_t test_vluxei32_v_i32m2_m(vbool16_t vm, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i32m2_m(vm, rs1, rs2, vl); } -vint32m4_t test_vluxei32_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_i32m4_m(mask, base, bindex, vl); +vint32m4_t test_vluxei32_v_i32m4_m(vbool8_t vm, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_i32m4_m(vm, rs1, rs2, vl); } -vint32m8_t test_vluxei32_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_i32m8_m(mask, base, bindex, vl); +vint32m8_t test_vluxei32_v_i32m8_m(vbool4_t vm, const int32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_i32m8_m(vm, rs1, rs2, vl); } -vint64m1_t test_vluxei32_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i64m1_m(mask, base, bindex, vl); +vint64m1_t test_vluxei32_v_i64m1_m(vbool64_t vm, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i64m1_m(vm, rs1, rs2, vl); } -vint64m2_t test_vluxei32_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_i64m2_m(mask, base, bindex, vl); +vint64m2_t test_vluxei32_v_i64m2_m(vbool32_t vm, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_i64m2_m(vm, rs1, rs2, vl); } -vint64m4_t test_vluxei32_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i64m4_m(mask, base, bindex, vl); +vint64m4_t test_vluxei32_v_i64m4_m(vbool16_t vm, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i64m4_m(vm, rs1, rs2, vl); } -vint64m8_t test_vluxei32_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_i64m8_m(mask, base, bindex, vl); +vint64m8_t test_vluxei32_v_i64m8_m(vbool8_t vm, const int64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_i64m8_m(vm, rs1, rs2, vl); } -vuint8mf8_t test_vluxei32_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u8mf8_m(mask, base, bindex, vl); +vuint8mf8_t test_vluxei32_v_u8mf8_m(vbool64_t vm, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u8mf8_m(vm, rs1, rs2, vl); } -vuint8mf4_t test_vluxei32_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_u8mf4_m(mask, base, bindex, vl); +vuint8mf4_t test_vluxei32_v_u8mf4_m(vbool32_t vm, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_u8mf4_m(vm, rs1, rs2, vl); } -vuint8mf2_t test_vluxei32_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u8mf2_m(mask, base, bindex, vl); +vuint8mf2_t test_vluxei32_v_u8mf2_m(vbool16_t vm, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u8mf2_m(vm, rs1, rs2, vl); } -vuint8m1_t test_vluxei32_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_u8m1_m(mask, base, bindex, vl); +vuint8m1_t test_vluxei32_v_u8m1_m(vbool8_t vm, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_u8m1_m(vm, rs1, rs2, vl); } -vuint8m2_t test_vluxei32_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_u8m2_m(mask, base, bindex, vl); +vuint8m2_t test_vluxei32_v_u8m2_m(vbool4_t vm, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_u8m2_m(vm, rs1, rs2, vl); } -vuint16mf4_t test_vluxei32_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u16mf4_m(mask, base, bindex, vl); +vuint16mf4_t test_vluxei32_v_u16mf4_m(vbool64_t vm, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u16mf4_m(vm, rs1, rs2, vl); } -vuint16mf2_t test_vluxei32_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_u16mf2_m(mask, base, bindex, vl); +vuint16mf2_t test_vluxei32_v_u16mf2_m(vbool32_t vm, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_u16mf2_m(vm, rs1, rs2, vl); } -vuint16m1_t test_vluxei32_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u16m1_m(mask, base, bindex, vl); +vuint16m1_t test_vluxei32_v_u16m1_m(vbool16_t vm, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u16m1_m(vm, rs1, rs2, vl); } -vuint16m2_t test_vluxei32_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_u16m2_m(mask, base, bindex, vl); +vuint16m2_t test_vluxei32_v_u16m2_m(vbool8_t vm, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_u16m2_m(vm, rs1, rs2, vl); } -vuint16m4_t test_vluxei32_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_u16m4_m(mask, base, bindex, vl); +vuint16m4_t test_vluxei32_v_u16m4_m(vbool4_t vm, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_u16m4_m(vm, rs1, rs2, vl); } -vuint32mf2_t test_vluxei32_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u32mf2_m(mask, base, bindex, vl); +vuint32mf2_t test_vluxei32_v_u32mf2_m(vbool64_t vm, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u32mf2_m(vm, rs1, rs2, vl); } -vuint32m1_t test_vluxei32_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_u32m1_m(mask, base, bindex, vl); +vuint32m1_t test_vluxei32_v_u32m1_m(vbool32_t vm, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_u32m1_m(vm, rs1, rs2, vl); } -vuint32m2_t test_vluxei32_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u32m2_m(mask, base, bindex, vl); +vuint32m2_t test_vluxei32_v_u32m2_m(vbool16_t vm, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u32m2_m(vm, rs1, rs2, vl); } -vuint32m4_t test_vluxei32_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_u32m4_m(mask, base, bindex, vl); +vuint32m4_t test_vluxei32_v_u32m4_m(vbool8_t vm, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_u32m4_m(vm, rs1, rs2, vl); } -vuint32m8_t test_vluxei32_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_u32m8_m(mask, base, bindex, vl); +vuint32m8_t test_vluxei32_v_u32m8_m(vbool4_t vm, const uint32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_u32m8_m(vm, rs1, rs2, vl); } -vuint64m1_t test_vluxei32_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u64m1_m(mask, base, bindex, vl); +vuint64m1_t test_vluxei32_v_u64m1_m(vbool64_t vm, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u64m1_m(vm, rs1, rs2, vl); } -vuint64m2_t test_vluxei32_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_u64m2_m(mask, base, bindex, vl); +vuint64m2_t test_vluxei32_v_u64m2_m(vbool32_t vm, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_u64m2_m(vm, rs1, rs2, vl); } -vuint64m4_t test_vluxei32_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u64m4_m(mask, base, bindex, vl); +vuint64m4_t test_vluxei32_v_u64m4_m(vbool16_t vm, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u64m4_m(vm, rs1, rs2, vl); } -vuint64m8_t test_vluxei32_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_u64m8_m(mask, base, bindex, vl); +vuint64m8_t test_vluxei32_v_u64m8_m(vbool8_t vm, const uint64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_u64m8_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxei32\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/gnu-api-tests/vluxei64.c b/auto-generated/gnu-api-tests/vluxei64.c index 0e0a8dc57..15f769ddc 100644 --- a/auto-generated/gnu-api-tests/vluxei64.c +++ b/auto-generated/gnu-api-tests/vluxei64.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vluxei64_v_f16mf4(const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_f16mf4(base, bindex, vl); +vfloat16mf4_t test_vluxei64_v_f16mf4(const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_f16mf4(rs1, rs2, vl); } -vfloat16mf2_t test_vluxei64_v_f16mf2(const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_f16mf2(base, bindex, vl); +vfloat16mf2_t test_vluxei64_v_f16mf2(const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_f16mf2(rs1, rs2, vl); } -vfloat16m1_t test_vluxei64_v_f16m1(const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_f16m1(base, bindex, vl); +vfloat16m1_t test_vluxei64_v_f16m1(const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_f16m1(rs1, rs2, vl); } -vfloat16m2_t test_vluxei64_v_f16m2(const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_f16m2(base, bindex, vl); +vfloat16m2_t test_vluxei64_v_f16m2(const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_f16m2(rs1, rs2, vl); } -vfloat32mf2_t test_vluxei64_v_f32mf2(const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_f32mf2(base, bindex, vl); +vfloat32mf2_t test_vluxei64_v_f32mf2(const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_f32mf2(rs1, rs2, vl); } -vfloat32m1_t test_vluxei64_v_f32m1(const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_f32m1(base, bindex, vl); +vfloat32m1_t test_vluxei64_v_f32m1(const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_f32m1(rs1, rs2, vl); } -vfloat32m2_t test_vluxei64_v_f32m2(const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_f32m2(base, bindex, vl); +vfloat32m2_t test_vluxei64_v_f32m2(const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_f32m2(rs1, rs2, vl); } -vfloat32m4_t test_vluxei64_v_f32m4(const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_f32m4(base, bindex, vl); +vfloat32m4_t test_vluxei64_v_f32m4(const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_f32m4(rs1, rs2, vl); } -vfloat64m1_t test_vluxei64_v_f64m1(const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_f64m1(base, bindex, vl); +vfloat64m1_t test_vluxei64_v_f64m1(const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_f64m1(rs1, rs2, vl); } -vfloat64m2_t test_vluxei64_v_f64m2(const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_f64m2(base, bindex, vl); +vfloat64m2_t test_vluxei64_v_f64m2(const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_f64m2(rs1, rs2, vl); } -vfloat64m4_t test_vluxei64_v_f64m4(const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_f64m4(base, bindex, vl); +vfloat64m4_t test_vluxei64_v_f64m4(const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_f64m4(rs1, rs2, vl); } -vfloat64m8_t test_vluxei64_v_f64m8(const float64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_f64m8(base, bindex, vl); +vfloat64m8_t test_vluxei64_v_f64m8(const float64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_f64m8(rs1, rs2, vl); } -vint8mf8_t test_vluxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_i8mf8(base, bindex, vl); +vint8mf8_t test_vluxei64_v_i8mf8(const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_i8mf8(rs1, rs2, vl); } -vint8mf4_t test_vluxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_i8mf4(base, bindex, vl); +vint8mf4_t test_vluxei64_v_i8mf4(const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_i8mf4(rs1, rs2, vl); } -vint8mf2_t test_vluxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_i8mf2(base, bindex, vl); +vint8mf2_t test_vluxei64_v_i8mf2(const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_i8mf2(rs1, rs2, vl); } -vint8m1_t test_vluxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_i8m1(base, bindex, vl); +vint8m1_t test_vluxei64_v_i8m1(const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_i8m1(rs1, rs2, vl); } -vint16mf4_t test_vluxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_i16mf4(base, bindex, vl); +vint16mf4_t test_vluxei64_v_i16mf4(const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_i16mf4(rs1, rs2, vl); } -vint16mf2_t test_vluxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_i16mf2(base, bindex, vl); +vint16mf2_t test_vluxei64_v_i16mf2(const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_i16mf2(rs1, rs2, vl); } -vint16m1_t test_vluxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_i16m1(base, bindex, vl); +vint16m1_t test_vluxei64_v_i16m1(const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_i16m1(rs1, rs2, vl); } -vint16m2_t test_vluxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_i16m2(base, bindex, vl); +vint16m2_t test_vluxei64_v_i16m2(const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_i16m2(rs1, rs2, vl); } -vint32mf2_t test_vluxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_i32mf2(base, bindex, vl); +vint32mf2_t test_vluxei64_v_i32mf2(const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_i32mf2(rs1, rs2, vl); } -vint32m1_t test_vluxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_i32m1(base, bindex, vl); +vint32m1_t test_vluxei64_v_i32m1(const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_i32m1(rs1, rs2, vl); } -vint32m2_t test_vluxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_i32m2(base, bindex, vl); +vint32m2_t test_vluxei64_v_i32m2(const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_i32m2(rs1, rs2, vl); } -vint32m4_t test_vluxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_i32m4(base, bindex, vl); +vint32m4_t test_vluxei64_v_i32m4(const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_i32m4(rs1, rs2, vl); } -vint64m1_t test_vluxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_i64m1(base, bindex, vl); +vint64m1_t test_vluxei64_v_i64m1(const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_i64m1(rs1, rs2, vl); } -vint64m2_t test_vluxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_i64m2(base, bindex, vl); +vint64m2_t test_vluxei64_v_i64m2(const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_i64m2(rs1, rs2, vl); } -vint64m4_t test_vluxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_i64m4(base, bindex, vl); +vint64m4_t test_vluxei64_v_i64m4(const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_i64m4(rs1, rs2, vl); } -vint64m8_t test_vluxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_i64m8(base, bindex, vl); +vint64m8_t test_vluxei64_v_i64m8(const int64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_i64m8(rs1, rs2, vl); } -vuint8mf8_t test_vluxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_u8mf8(base, bindex, vl); +vuint8mf8_t test_vluxei64_v_u8mf8(const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_u8mf8(rs1, rs2, vl); } -vuint8mf4_t test_vluxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_u8mf4(base, bindex, vl); +vuint8mf4_t test_vluxei64_v_u8mf4(const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_u8mf4(rs1, rs2, vl); } -vuint8mf2_t test_vluxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_u8mf2(base, bindex, vl); +vuint8mf2_t test_vluxei64_v_u8mf2(const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_u8mf2(rs1, rs2, vl); } -vuint8m1_t test_vluxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_u8m1(base, bindex, vl); +vuint8m1_t test_vluxei64_v_u8m1(const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_u8m1(rs1, rs2, vl); } -vuint16mf4_t test_vluxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_u16mf4(base, bindex, vl); +vuint16mf4_t test_vluxei64_v_u16mf4(const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_u16mf4(rs1, rs2, vl); } -vuint16mf2_t test_vluxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_u16mf2(base, bindex, vl); +vuint16mf2_t test_vluxei64_v_u16mf2(const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_u16mf2(rs1, rs2, vl); } -vuint16m1_t test_vluxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_u16m1(base, bindex, vl); +vuint16m1_t test_vluxei64_v_u16m1(const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_u16m1(rs1, rs2, vl); } -vuint16m2_t test_vluxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_u16m2(base, bindex, vl); +vuint16m2_t test_vluxei64_v_u16m2(const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_u16m2(rs1, rs2, vl); } -vuint32mf2_t test_vluxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_u32mf2(base, bindex, vl); +vuint32mf2_t test_vluxei64_v_u32mf2(const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_u32mf2(rs1, rs2, vl); } -vuint32m1_t test_vluxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_u32m1(base, bindex, vl); +vuint32m1_t test_vluxei64_v_u32m1(const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_u32m1(rs1, rs2, vl); } -vuint32m2_t test_vluxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_u32m2(base, bindex, vl); +vuint32m2_t test_vluxei64_v_u32m2(const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_u32m2(rs1, rs2, vl); } -vuint32m4_t test_vluxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_u32m4(base, bindex, vl); +vuint32m4_t test_vluxei64_v_u32m4(const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_u32m4(rs1, rs2, vl); } -vuint64m1_t test_vluxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_u64m1(base, bindex, vl); +vuint64m1_t test_vluxei64_v_u64m1(const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_u64m1(rs1, rs2, vl); } -vuint64m2_t test_vluxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_u64m2(base, bindex, vl); +vuint64m2_t test_vluxei64_v_u64m2(const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_u64m2(rs1, rs2, vl); } -vuint64m4_t test_vluxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_u64m4(base, bindex, vl); +vuint64m4_t test_vluxei64_v_u64m4(const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_u64m4(rs1, rs2, vl); } -vuint64m8_t test_vluxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_u64m8(base, bindex, vl); +vuint64m8_t test_vluxei64_v_u64m8(const uint64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_u64m8(rs1, rs2, vl); } -vfloat16mf4_t test_vluxei64_v_f16mf4_m(vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_f16mf4_m(mask, base, bindex, vl); +vfloat16mf4_t test_vluxei64_v_f16mf4_m(vbool64_t vm, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_f16mf4_m(vm, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei64_v_f16mf2_m(vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_f16mf2_m(mask, base, bindex, vl); +vfloat16mf2_t test_vluxei64_v_f16mf2_m(vbool32_t vm, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_f16mf2_m(vm, rs1, rs2, vl); } -vfloat16m1_t test_vluxei64_v_f16m1_m(vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_f16m1_m(mask, base, bindex, vl); +vfloat16m1_t test_vluxei64_v_f16m1_m(vbool16_t vm, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_f16m1_m(vm, rs1, rs2, vl); } -vfloat16m2_t test_vluxei64_v_f16m2_m(vbool8_t mask, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_f16m2_m(mask, base, bindex, vl); +vfloat16m2_t test_vluxei64_v_f16m2_m(vbool8_t vm, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_f16m2_m(vm, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei64_v_f32mf2_m(vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_f32mf2_m(mask, base, bindex, vl); +vfloat32mf2_t test_vluxei64_v_f32mf2_m(vbool64_t vm, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_f32mf2_m(vm, rs1, rs2, vl); } -vfloat32m1_t test_vluxei64_v_f32m1_m(vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_f32m1_m(mask, base, bindex, vl); +vfloat32m1_t test_vluxei64_v_f32m1_m(vbool32_t vm, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_f32m1_m(vm, rs1, rs2, vl); } -vfloat32m2_t test_vluxei64_v_f32m2_m(vbool16_t mask, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_f32m2_m(mask, base, bindex, vl); +vfloat32m2_t test_vluxei64_v_f32m2_m(vbool16_t vm, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_f32m2_m(vm, rs1, rs2, vl); } -vfloat32m4_t test_vluxei64_v_f32m4_m(vbool8_t mask, const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_f32m4_m(mask, base, bindex, vl); +vfloat32m4_t test_vluxei64_v_f32m4_m(vbool8_t vm, const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_f32m4_m(vm, rs1, rs2, vl); } -vfloat64m1_t test_vluxei64_v_f64m1_m(vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_f64m1_m(mask, base, bindex, vl); +vfloat64m1_t test_vluxei64_v_f64m1_m(vbool64_t vm, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_f64m1_m(vm, rs1, rs2, vl); } -vfloat64m2_t test_vluxei64_v_f64m2_m(vbool32_t mask, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_f64m2_m(mask, base, bindex, vl); +vfloat64m2_t test_vluxei64_v_f64m2_m(vbool32_t vm, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_f64m2_m(vm, rs1, rs2, vl); } -vfloat64m4_t test_vluxei64_v_f64m4_m(vbool16_t mask, const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_f64m4_m(mask, base, bindex, vl); +vfloat64m4_t test_vluxei64_v_f64m4_m(vbool16_t vm, const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_f64m4_m(vm, rs1, rs2, vl); } -vfloat64m8_t test_vluxei64_v_f64m8_m(vbool8_t mask, const float64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_f64m8_m(mask, base, bindex, vl); +vfloat64m8_t test_vluxei64_v_f64m8_m(vbool8_t vm, const float64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_f64m8_m(vm, rs1, rs2, vl); } -vint8mf8_t test_vluxei64_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_i8mf8_m(mask, base, bindex, vl); +vint8mf8_t test_vluxei64_v_i8mf8_m(vbool64_t vm, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_i8mf8_m(vm, rs1, rs2, vl); } -vint8mf4_t test_vluxei64_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_i8mf4_m(mask, base, bindex, vl); +vint8mf4_t test_vluxei64_v_i8mf4_m(vbool32_t vm, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_i8mf4_m(vm, rs1, rs2, vl); } -vint8mf2_t test_vluxei64_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_i8mf2_m(mask, base, bindex, vl); +vint8mf2_t test_vluxei64_v_i8mf2_m(vbool16_t vm, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_i8mf2_m(vm, rs1, rs2, vl); } -vint8m1_t test_vluxei64_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_i8m1_m(mask, base, bindex, vl); +vint8m1_t test_vluxei64_v_i8m1_m(vbool8_t vm, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_i8m1_m(vm, rs1, rs2, vl); } -vint16mf4_t test_vluxei64_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_i16mf4_m(mask, base, bindex, vl); +vint16mf4_t test_vluxei64_v_i16mf4_m(vbool64_t vm, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_i16mf4_m(vm, rs1, rs2, vl); } -vint16mf2_t test_vluxei64_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_i16mf2_m(mask, base, bindex, vl); +vint16mf2_t test_vluxei64_v_i16mf2_m(vbool32_t vm, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_i16mf2_m(vm, rs1, rs2, vl); } -vint16m1_t test_vluxei64_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_i16m1_m(mask, base, bindex, vl); +vint16m1_t test_vluxei64_v_i16m1_m(vbool16_t vm, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_i16m1_m(vm, rs1, rs2, vl); } -vint16m2_t test_vluxei64_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_i16m2_m(mask, base, bindex, vl); +vint16m2_t test_vluxei64_v_i16m2_m(vbool8_t vm, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_i16m2_m(vm, rs1, rs2, vl); } -vint32mf2_t test_vluxei64_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_i32mf2_m(mask, base, bindex, vl); +vint32mf2_t test_vluxei64_v_i32mf2_m(vbool64_t vm, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_i32mf2_m(vm, rs1, rs2, vl); } -vint32m1_t test_vluxei64_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_i32m1_m(mask, base, bindex, vl); +vint32m1_t test_vluxei64_v_i32m1_m(vbool32_t vm, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_i32m1_m(vm, rs1, rs2, vl); } -vint32m2_t test_vluxei64_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_i32m2_m(mask, base, bindex, vl); +vint32m2_t test_vluxei64_v_i32m2_m(vbool16_t vm, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_i32m2_m(vm, rs1, rs2, vl); } -vint32m4_t test_vluxei64_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_i32m4_m(mask, base, bindex, vl); +vint32m4_t test_vluxei64_v_i32m4_m(vbool8_t vm, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_i32m4_m(vm, rs1, rs2, vl); } -vint64m1_t test_vluxei64_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_i64m1_m(mask, base, bindex, vl); +vint64m1_t test_vluxei64_v_i64m1_m(vbool64_t vm, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_i64m1_m(vm, rs1, rs2, vl); } -vint64m2_t test_vluxei64_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_i64m2_m(mask, base, bindex, vl); +vint64m2_t test_vluxei64_v_i64m2_m(vbool32_t vm, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_i64m2_m(vm, rs1, rs2, vl); } -vint64m4_t test_vluxei64_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_i64m4_m(mask, base, bindex, vl); +vint64m4_t test_vluxei64_v_i64m4_m(vbool16_t vm, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_i64m4_m(vm, rs1, rs2, vl); } -vint64m8_t test_vluxei64_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_i64m8_m(mask, base, bindex, vl); +vint64m8_t test_vluxei64_v_i64m8_m(vbool8_t vm, const int64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_i64m8_m(vm, rs1, rs2, vl); } -vuint8mf8_t test_vluxei64_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_u8mf8_m(mask, base, bindex, vl); +vuint8mf8_t test_vluxei64_v_u8mf8_m(vbool64_t vm, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_u8mf8_m(vm, rs1, rs2, vl); } -vuint8mf4_t test_vluxei64_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_u8mf4_m(mask, base, bindex, vl); +vuint8mf4_t test_vluxei64_v_u8mf4_m(vbool32_t vm, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_u8mf4_m(vm, rs1, rs2, vl); } -vuint8mf2_t test_vluxei64_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_u8mf2_m(mask, base, bindex, vl); +vuint8mf2_t test_vluxei64_v_u8mf2_m(vbool16_t vm, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_u8mf2_m(vm, rs1, rs2, vl); } -vuint8m1_t test_vluxei64_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_u8m1_m(mask, base, bindex, vl); +vuint8m1_t test_vluxei64_v_u8m1_m(vbool8_t vm, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_u8m1_m(vm, rs1, rs2, vl); } -vuint16mf4_t test_vluxei64_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_u16mf4_m(mask, base, bindex, vl); +vuint16mf4_t test_vluxei64_v_u16mf4_m(vbool64_t vm, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_u16mf4_m(vm, rs1, rs2, vl); } -vuint16mf2_t test_vluxei64_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_u16mf2_m(mask, base, bindex, vl); +vuint16mf2_t test_vluxei64_v_u16mf2_m(vbool32_t vm, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_u16mf2_m(vm, rs1, rs2, vl); } -vuint16m1_t test_vluxei64_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_u16m1_m(mask, base, bindex, vl); +vuint16m1_t test_vluxei64_v_u16m1_m(vbool16_t vm, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_u16m1_m(vm, rs1, rs2, vl); } -vuint16m2_t test_vluxei64_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_u16m2_m(mask, base, bindex, vl); +vuint16m2_t test_vluxei64_v_u16m2_m(vbool8_t vm, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_u16m2_m(vm, rs1, rs2, vl); } -vuint32mf2_t test_vluxei64_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_u32mf2_m(mask, base, bindex, vl); +vuint32mf2_t test_vluxei64_v_u32mf2_m(vbool64_t vm, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_u32mf2_m(vm, rs1, rs2, vl); } -vuint32m1_t test_vluxei64_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_u32m1_m(mask, base, bindex, vl); +vuint32m1_t test_vluxei64_v_u32m1_m(vbool32_t vm, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_u32m1_m(vm, rs1, rs2, vl); } -vuint32m2_t test_vluxei64_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_u32m2_m(mask, base, bindex, vl); +vuint32m2_t test_vluxei64_v_u32m2_m(vbool16_t vm, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_u32m2_m(vm, rs1, rs2, vl); } -vuint32m4_t test_vluxei64_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_u32m4_m(mask, base, bindex, vl); +vuint32m4_t test_vluxei64_v_u32m4_m(vbool8_t vm, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_u32m4_m(vm, rs1, rs2, vl); } -vuint64m1_t test_vluxei64_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_u64m1_m(mask, base, bindex, vl); +vuint64m1_t test_vluxei64_v_u64m1_m(vbool64_t vm, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_u64m1_m(vm, rs1, rs2, vl); } -vuint64m2_t test_vluxei64_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_u64m2_m(mask, base, bindex, vl); +vuint64m2_t test_vluxei64_v_u64m2_m(vbool32_t vm, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_u64m2_m(vm, rs1, rs2, vl); } -vuint64m4_t test_vluxei64_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_u64m4_m(mask, base, bindex, vl); +vuint64m4_t test_vluxei64_v_u64m4_m(vbool16_t vm, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_u64m4_m(vm, rs1, rs2, vl); } -vuint64m8_t test_vluxei64_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_u64m8_m(mask, base, bindex, vl); +vuint64m8_t test_vluxei64_v_u64m8_m(vbool8_t vm, const uint64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_u64m8_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxei64\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-api-tests/vluxei8.c b/auto-generated/gnu-api-tests/vluxei8.c index 419dd7469..184bb34ff 100644 --- a/auto-generated/gnu-api-tests/vluxei8.c +++ b/auto-generated/gnu-api-tests/vluxei8.c @@ -6,476 +6,476 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vluxei8_v_f16mf4(const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_f16mf4(base, bindex, vl); +vfloat16mf4_t test_vluxei8_v_f16mf4(const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_f16mf4(rs1, rs2, vl); } -vfloat16mf2_t test_vluxei8_v_f16mf2(const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_f16mf2(base, bindex, vl); +vfloat16mf2_t test_vluxei8_v_f16mf2(const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_f16mf2(rs1, rs2, vl); } -vfloat16m1_t test_vluxei8_v_f16m1(const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_f16m1(base, bindex, vl); +vfloat16m1_t test_vluxei8_v_f16m1(const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_f16m1(rs1, rs2, vl); } -vfloat16m2_t test_vluxei8_v_f16m2(const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_f16m2(base, bindex, vl); +vfloat16m2_t test_vluxei8_v_f16m2(const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_f16m2(rs1, rs2, vl); } -vfloat16m4_t test_vluxei8_v_f16m4(const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_f16m4(base, bindex, vl); +vfloat16m4_t test_vluxei8_v_f16m4(const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_f16m4(rs1, rs2, vl); } -vfloat16m8_t test_vluxei8_v_f16m8(const float16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_v_f16m8(base, bindex, vl); +vfloat16m8_t test_vluxei8_v_f16m8(const float16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_v_f16m8(rs1, rs2, vl); } -vfloat32mf2_t test_vluxei8_v_f32mf2(const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_f32mf2(base, bindex, vl); +vfloat32mf2_t test_vluxei8_v_f32mf2(const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_f32mf2(rs1, rs2, vl); } -vfloat32m1_t test_vluxei8_v_f32m1(const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_f32m1(base, bindex, vl); +vfloat32m1_t test_vluxei8_v_f32m1(const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_f32m1(rs1, rs2, vl); } -vfloat32m2_t test_vluxei8_v_f32m2(const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_f32m2(base, bindex, vl); +vfloat32m2_t test_vluxei8_v_f32m2(const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_f32m2(rs1, rs2, vl); } -vfloat32m4_t test_vluxei8_v_f32m4(const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_f32m4(base, bindex, vl); +vfloat32m4_t test_vluxei8_v_f32m4(const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_f32m4(rs1, rs2, vl); } -vfloat32m8_t test_vluxei8_v_f32m8(const float32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_f32m8(base, bindex, vl); +vfloat32m8_t test_vluxei8_v_f32m8(const float32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_f32m8(rs1, rs2, vl); } -vfloat64m1_t test_vluxei8_v_f64m1(const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_f64m1(base, bindex, vl); +vfloat64m1_t test_vluxei8_v_f64m1(const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_f64m1(rs1, rs2, vl); } -vfloat64m2_t test_vluxei8_v_f64m2(const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_f64m2(base, bindex, vl); +vfloat64m2_t test_vluxei8_v_f64m2(const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_f64m2(rs1, rs2, vl); } -vfloat64m4_t test_vluxei8_v_f64m4(const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_f64m4(base, bindex, vl); +vfloat64m4_t test_vluxei8_v_f64m4(const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_f64m4(rs1, rs2, vl); } -vfloat64m8_t test_vluxei8_v_f64m8(const float64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_f64m8(base, bindex, vl); +vfloat64m8_t test_vluxei8_v_f64m8(const float64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_f64m8(rs1, rs2, vl); } -vint8mf8_t test_vluxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_i8mf8(base, bindex, vl); +vint8mf8_t test_vluxei8_v_i8mf8(const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_i8mf8(rs1, rs2, vl); } -vint8mf4_t test_vluxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_i8mf4(base, bindex, vl); +vint8mf4_t test_vluxei8_v_i8mf4(const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_i8mf4(rs1, rs2, vl); } -vint8mf2_t test_vluxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_i8mf2(base, bindex, vl); +vint8mf2_t test_vluxei8_v_i8mf2(const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_i8mf2(rs1, rs2, vl); } -vint8m1_t test_vluxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_i8m1(base, bindex, vl); +vint8m1_t test_vluxei8_v_i8m1(const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_i8m1(rs1, rs2, vl); } -vint8m2_t test_vluxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_i8m2(base, bindex, vl); +vint8m2_t test_vluxei8_v_i8m2(const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_i8m2(rs1, rs2, vl); } -vint8m4_t test_vluxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_v_i8m4(base, bindex, vl); +vint8m4_t test_vluxei8_v_i8m4(const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_v_i8m4(rs1, rs2, vl); } -vint8m8_t test_vluxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vluxei8_v_i8m8(base, bindex, vl); +vint8m8_t test_vluxei8_v_i8m8(const int8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vluxei8_v_i8m8(rs1, rs2, vl); } -vint16mf4_t test_vluxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_i16mf4(base, bindex, vl); +vint16mf4_t test_vluxei8_v_i16mf4(const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_i16mf4(rs1, rs2, vl); } -vint16mf2_t test_vluxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_i16mf2(base, bindex, vl); +vint16mf2_t test_vluxei8_v_i16mf2(const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_i16mf2(rs1, rs2, vl); } -vint16m1_t test_vluxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_i16m1(base, bindex, vl); +vint16m1_t test_vluxei8_v_i16m1(const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_i16m1(rs1, rs2, vl); } -vint16m2_t test_vluxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_i16m2(base, bindex, vl); +vint16m2_t test_vluxei8_v_i16m2(const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_i16m2(rs1, rs2, vl); } -vint16m4_t test_vluxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_i16m4(base, bindex, vl); +vint16m4_t test_vluxei8_v_i16m4(const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_i16m4(rs1, rs2, vl); } -vint16m8_t test_vluxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_v_i16m8(base, bindex, vl); +vint16m8_t test_vluxei8_v_i16m8(const int16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_v_i16m8(rs1, rs2, vl); } -vint32mf2_t test_vluxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_i32mf2(base, bindex, vl); +vint32mf2_t test_vluxei8_v_i32mf2(const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_i32mf2(rs1, rs2, vl); } -vint32m1_t test_vluxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_i32m1(base, bindex, vl); +vint32m1_t test_vluxei8_v_i32m1(const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_i32m1(rs1, rs2, vl); } -vint32m2_t test_vluxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_i32m2(base, bindex, vl); +vint32m2_t test_vluxei8_v_i32m2(const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_i32m2(rs1, rs2, vl); } -vint32m4_t test_vluxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_i32m4(base, bindex, vl); +vint32m4_t test_vluxei8_v_i32m4(const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_i32m4(rs1, rs2, vl); } -vint32m8_t test_vluxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_i32m8(base, bindex, vl); +vint32m8_t test_vluxei8_v_i32m8(const int32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_i32m8(rs1, rs2, vl); } -vint64m1_t test_vluxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_i64m1(base, bindex, vl); +vint64m1_t test_vluxei8_v_i64m1(const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_i64m1(rs1, rs2, vl); } -vint64m2_t test_vluxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_i64m2(base, bindex, vl); +vint64m2_t test_vluxei8_v_i64m2(const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_i64m2(rs1, rs2, vl); } -vint64m4_t test_vluxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_i64m4(base, bindex, vl); +vint64m4_t test_vluxei8_v_i64m4(const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_i64m4(rs1, rs2, vl); } -vint64m8_t test_vluxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_i64m8(base, bindex, vl); +vint64m8_t test_vluxei8_v_i64m8(const int64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_i64m8(rs1, rs2, vl); } -vuint8mf8_t test_vluxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_u8mf8(base, bindex, vl); +vuint8mf8_t test_vluxei8_v_u8mf8(const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_u8mf8(rs1, rs2, vl); } -vuint8mf4_t test_vluxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_u8mf4(base, bindex, vl); +vuint8mf4_t test_vluxei8_v_u8mf4(const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_u8mf4(rs1, rs2, vl); } -vuint8mf2_t test_vluxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_u8mf2(base, bindex, vl); +vuint8mf2_t test_vluxei8_v_u8mf2(const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_u8mf2(rs1, rs2, vl); } -vuint8m1_t test_vluxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_u8m1(base, bindex, vl); +vuint8m1_t test_vluxei8_v_u8m1(const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_u8m1(rs1, rs2, vl); } -vuint8m2_t test_vluxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_u8m2(base, bindex, vl); +vuint8m2_t test_vluxei8_v_u8m2(const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_u8m2(rs1, rs2, vl); } -vuint8m4_t test_vluxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_v_u8m4(base, bindex, vl); +vuint8m4_t test_vluxei8_v_u8m4(const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_v_u8m4(rs1, rs2, vl); } -vuint8m8_t test_vluxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vluxei8_v_u8m8(base, bindex, vl); +vuint8m8_t test_vluxei8_v_u8m8(const uint8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vluxei8_v_u8m8(rs1, rs2, vl); } -vuint16mf4_t test_vluxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_u16mf4(base, bindex, vl); +vuint16mf4_t test_vluxei8_v_u16mf4(const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_u16mf4(rs1, rs2, vl); } -vuint16mf2_t test_vluxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_u16mf2(base, bindex, vl); +vuint16mf2_t test_vluxei8_v_u16mf2(const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_u16mf2(rs1, rs2, vl); } -vuint16m1_t test_vluxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_u16m1(base, bindex, vl); +vuint16m1_t test_vluxei8_v_u16m1(const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_u16m1(rs1, rs2, vl); } -vuint16m2_t test_vluxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_u16m2(base, bindex, vl); +vuint16m2_t test_vluxei8_v_u16m2(const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_u16m2(rs1, rs2, vl); } -vuint16m4_t test_vluxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_u16m4(base, bindex, vl); +vuint16m4_t test_vluxei8_v_u16m4(const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_u16m4(rs1, rs2, vl); } -vuint16m8_t test_vluxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_v_u16m8(base, bindex, vl); +vuint16m8_t test_vluxei8_v_u16m8(const uint16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_v_u16m8(rs1, rs2, vl); } -vuint32mf2_t test_vluxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_u32mf2(base, bindex, vl); +vuint32mf2_t test_vluxei8_v_u32mf2(const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_u32mf2(rs1, rs2, vl); } -vuint32m1_t test_vluxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_u32m1(base, bindex, vl); +vuint32m1_t test_vluxei8_v_u32m1(const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_u32m1(rs1, rs2, vl); } -vuint32m2_t test_vluxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_u32m2(base, bindex, vl); +vuint32m2_t test_vluxei8_v_u32m2(const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_u32m2(rs1, rs2, vl); } -vuint32m4_t test_vluxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_u32m4(base, bindex, vl); +vuint32m4_t test_vluxei8_v_u32m4(const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_u32m4(rs1, rs2, vl); } -vuint32m8_t test_vluxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_u32m8(base, bindex, vl); +vuint32m8_t test_vluxei8_v_u32m8(const uint32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_u32m8(rs1, rs2, vl); } -vuint64m1_t test_vluxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_u64m1(base, bindex, vl); +vuint64m1_t test_vluxei8_v_u64m1(const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_u64m1(rs1, rs2, vl); } -vuint64m2_t test_vluxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_u64m2(base, bindex, vl); +vuint64m2_t test_vluxei8_v_u64m2(const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_u64m2(rs1, rs2, vl); } -vuint64m4_t test_vluxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_u64m4(base, bindex, vl); +vuint64m4_t test_vluxei8_v_u64m4(const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_u64m4(rs1, rs2, vl); } -vuint64m8_t test_vluxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_u64m8(base, bindex, vl); +vuint64m8_t test_vluxei8_v_u64m8(const uint64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_u64m8(rs1, rs2, vl); } -vfloat16mf4_t test_vluxei8_v_f16mf4_m(vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_f16mf4_m(mask, base, bindex, vl); +vfloat16mf4_t test_vluxei8_v_f16mf4_m(vbool64_t vm, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_f16mf4_m(vm, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei8_v_f16mf2_m(vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_f16mf2_m(mask, base, bindex, vl); +vfloat16mf2_t test_vluxei8_v_f16mf2_m(vbool32_t vm, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_f16mf2_m(vm, rs1, rs2, vl); } -vfloat16m1_t test_vluxei8_v_f16m1_m(vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_f16m1_m(mask, base, bindex, vl); +vfloat16m1_t test_vluxei8_v_f16m1_m(vbool16_t vm, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_f16m1_m(vm, rs1, rs2, vl); } -vfloat16m2_t test_vluxei8_v_f16m2_m(vbool8_t mask, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_f16m2_m(mask, base, bindex, vl); +vfloat16m2_t test_vluxei8_v_f16m2_m(vbool8_t vm, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_f16m2_m(vm, rs1, rs2, vl); } -vfloat16m4_t test_vluxei8_v_f16m4_m(vbool4_t mask, const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_f16m4_m(mask, base, bindex, vl); +vfloat16m4_t test_vluxei8_v_f16m4_m(vbool4_t vm, const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_f16m4_m(vm, rs1, rs2, vl); } -vfloat16m8_t test_vluxei8_v_f16m8_m(vbool2_t mask, const float16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_v_f16m8_m(mask, base, bindex, vl); +vfloat16m8_t test_vluxei8_v_f16m8_m(vbool2_t vm, const float16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_v_f16m8_m(vm, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei8_v_f32mf2_m(vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_f32mf2_m(mask, base, bindex, vl); +vfloat32mf2_t test_vluxei8_v_f32mf2_m(vbool64_t vm, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_f32mf2_m(vm, rs1, rs2, vl); } -vfloat32m1_t test_vluxei8_v_f32m1_m(vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_f32m1_m(mask, base, bindex, vl); +vfloat32m1_t test_vluxei8_v_f32m1_m(vbool32_t vm, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_f32m1_m(vm, rs1, rs2, vl); } -vfloat32m2_t test_vluxei8_v_f32m2_m(vbool16_t mask, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_f32m2_m(mask, base, bindex, vl); +vfloat32m2_t test_vluxei8_v_f32m2_m(vbool16_t vm, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_f32m2_m(vm, rs1, rs2, vl); } -vfloat32m4_t test_vluxei8_v_f32m4_m(vbool8_t mask, const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_f32m4_m(mask, base, bindex, vl); +vfloat32m4_t test_vluxei8_v_f32m4_m(vbool8_t vm, const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_f32m4_m(vm, rs1, rs2, vl); } -vfloat32m8_t test_vluxei8_v_f32m8_m(vbool4_t mask, const float32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_f32m8_m(mask, base, bindex, vl); +vfloat32m8_t test_vluxei8_v_f32m8_m(vbool4_t vm, const float32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_f32m8_m(vm, rs1, rs2, vl); } -vfloat64m1_t test_vluxei8_v_f64m1_m(vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_f64m1_m(mask, base, bindex, vl); +vfloat64m1_t test_vluxei8_v_f64m1_m(vbool64_t vm, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_f64m1_m(vm, rs1, rs2, vl); } -vfloat64m2_t test_vluxei8_v_f64m2_m(vbool32_t mask, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_f64m2_m(mask, base, bindex, vl); +vfloat64m2_t test_vluxei8_v_f64m2_m(vbool32_t vm, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_f64m2_m(vm, rs1, rs2, vl); } -vfloat64m4_t test_vluxei8_v_f64m4_m(vbool16_t mask, const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_f64m4_m(mask, base, bindex, vl); +vfloat64m4_t test_vluxei8_v_f64m4_m(vbool16_t vm, const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_f64m4_m(vm, rs1, rs2, vl); } -vfloat64m8_t test_vluxei8_v_f64m8_m(vbool8_t mask, const float64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_f64m8_m(mask, base, bindex, vl); +vfloat64m8_t test_vluxei8_v_f64m8_m(vbool8_t vm, const float64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_f64m8_m(vm, rs1, rs2, vl); } -vint8mf8_t test_vluxei8_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_i8mf8_m(mask, base, bindex, vl); +vint8mf8_t test_vluxei8_v_i8mf8_m(vbool64_t vm, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_i8mf8_m(vm, rs1, rs2, vl); } -vint8mf4_t test_vluxei8_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_i8mf4_m(mask, base, bindex, vl); +vint8mf4_t test_vluxei8_v_i8mf4_m(vbool32_t vm, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_i8mf4_m(vm, rs1, rs2, vl); } -vint8mf2_t test_vluxei8_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_i8mf2_m(mask, base, bindex, vl); +vint8mf2_t test_vluxei8_v_i8mf2_m(vbool16_t vm, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_i8mf2_m(vm, rs1, rs2, vl); } -vint8m1_t test_vluxei8_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_i8m1_m(mask, base, bindex, vl); +vint8m1_t test_vluxei8_v_i8m1_m(vbool8_t vm, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_i8m1_m(vm, rs1, rs2, vl); } -vint8m2_t test_vluxei8_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_i8m2_m(mask, base, bindex, vl); +vint8m2_t test_vluxei8_v_i8m2_m(vbool4_t vm, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_i8m2_m(vm, rs1, rs2, vl); } -vint8m4_t test_vluxei8_v_i8m4_m(vbool2_t mask, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_v_i8m4_m(mask, base, bindex, vl); +vint8m4_t test_vluxei8_v_i8m4_m(vbool2_t vm, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_v_i8m4_m(vm, rs1, rs2, vl); } -vint8m8_t test_vluxei8_v_i8m8_m(vbool1_t mask, const int8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vluxei8_v_i8m8_m(mask, base, bindex, vl); +vint8m8_t test_vluxei8_v_i8m8_m(vbool1_t vm, const int8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vluxei8_v_i8m8_m(vm, rs1, rs2, vl); } -vint16mf4_t test_vluxei8_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_i16mf4_m(mask, base, bindex, vl); +vint16mf4_t test_vluxei8_v_i16mf4_m(vbool64_t vm, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_i16mf4_m(vm, rs1, rs2, vl); } -vint16mf2_t test_vluxei8_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_i16mf2_m(mask, base, bindex, vl); +vint16mf2_t test_vluxei8_v_i16mf2_m(vbool32_t vm, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_i16mf2_m(vm, rs1, rs2, vl); } -vint16m1_t test_vluxei8_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_i16m1_m(mask, base, bindex, vl); +vint16m1_t test_vluxei8_v_i16m1_m(vbool16_t vm, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_i16m1_m(vm, rs1, rs2, vl); } -vint16m2_t test_vluxei8_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_i16m2_m(mask, base, bindex, vl); +vint16m2_t test_vluxei8_v_i16m2_m(vbool8_t vm, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_i16m2_m(vm, rs1, rs2, vl); } -vint16m4_t test_vluxei8_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_i16m4_m(mask, base, bindex, vl); +vint16m4_t test_vluxei8_v_i16m4_m(vbool4_t vm, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_i16m4_m(vm, rs1, rs2, vl); } -vint16m8_t test_vluxei8_v_i16m8_m(vbool2_t mask, const int16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_v_i16m8_m(mask, base, bindex, vl); +vint16m8_t test_vluxei8_v_i16m8_m(vbool2_t vm, const int16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_v_i16m8_m(vm, rs1, rs2, vl); } -vint32mf2_t test_vluxei8_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_i32mf2_m(mask, base, bindex, vl); +vint32mf2_t test_vluxei8_v_i32mf2_m(vbool64_t vm, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_i32mf2_m(vm, rs1, rs2, vl); } -vint32m1_t test_vluxei8_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_i32m1_m(mask, base, bindex, vl); +vint32m1_t test_vluxei8_v_i32m1_m(vbool32_t vm, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_i32m1_m(vm, rs1, rs2, vl); } -vint32m2_t test_vluxei8_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_i32m2_m(mask, base, bindex, vl); +vint32m2_t test_vluxei8_v_i32m2_m(vbool16_t vm, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_i32m2_m(vm, rs1, rs2, vl); } -vint32m4_t test_vluxei8_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_i32m4_m(mask, base, bindex, vl); +vint32m4_t test_vluxei8_v_i32m4_m(vbool8_t vm, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_i32m4_m(vm, rs1, rs2, vl); } -vint32m8_t test_vluxei8_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_i32m8_m(mask, base, bindex, vl); +vint32m8_t test_vluxei8_v_i32m8_m(vbool4_t vm, const int32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_i32m8_m(vm, rs1, rs2, vl); } -vint64m1_t test_vluxei8_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_i64m1_m(mask, base, bindex, vl); +vint64m1_t test_vluxei8_v_i64m1_m(vbool64_t vm, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_i64m1_m(vm, rs1, rs2, vl); } -vint64m2_t test_vluxei8_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_i64m2_m(mask, base, bindex, vl); +vint64m2_t test_vluxei8_v_i64m2_m(vbool32_t vm, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_i64m2_m(vm, rs1, rs2, vl); } -vint64m4_t test_vluxei8_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_i64m4_m(mask, base, bindex, vl); +vint64m4_t test_vluxei8_v_i64m4_m(vbool16_t vm, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_i64m4_m(vm, rs1, rs2, vl); } -vint64m8_t test_vluxei8_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_i64m8_m(mask, base, bindex, vl); +vint64m8_t test_vluxei8_v_i64m8_m(vbool8_t vm, const int64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_i64m8_m(vm, rs1, rs2, vl); } -vuint8mf8_t test_vluxei8_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_u8mf8_m(mask, base, bindex, vl); +vuint8mf8_t test_vluxei8_v_u8mf8_m(vbool64_t vm, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_u8mf8_m(vm, rs1, rs2, vl); } -vuint8mf4_t test_vluxei8_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_u8mf4_m(mask, base, bindex, vl); +vuint8mf4_t test_vluxei8_v_u8mf4_m(vbool32_t vm, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_u8mf4_m(vm, rs1, rs2, vl); } -vuint8mf2_t test_vluxei8_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_u8mf2_m(mask, base, bindex, vl); +vuint8mf2_t test_vluxei8_v_u8mf2_m(vbool16_t vm, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_u8mf2_m(vm, rs1, rs2, vl); } -vuint8m1_t test_vluxei8_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_u8m1_m(mask, base, bindex, vl); +vuint8m1_t test_vluxei8_v_u8m1_m(vbool8_t vm, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_u8m1_m(vm, rs1, rs2, vl); } -vuint8m2_t test_vluxei8_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_u8m2_m(mask, base, bindex, vl); +vuint8m2_t test_vluxei8_v_u8m2_m(vbool4_t vm, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_u8m2_m(vm, rs1, rs2, vl); } -vuint8m4_t test_vluxei8_v_u8m4_m(vbool2_t mask, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_v_u8m4_m(mask, base, bindex, vl); +vuint8m4_t test_vluxei8_v_u8m4_m(vbool2_t vm, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_v_u8m4_m(vm, rs1, rs2, vl); } -vuint8m8_t test_vluxei8_v_u8m8_m(vbool1_t mask, const uint8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vluxei8_v_u8m8_m(mask, base, bindex, vl); +vuint8m8_t test_vluxei8_v_u8m8_m(vbool1_t vm, const uint8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vluxei8_v_u8m8_m(vm, rs1, rs2, vl); } -vuint16mf4_t test_vluxei8_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_u16mf4_m(mask, base, bindex, vl); +vuint16mf4_t test_vluxei8_v_u16mf4_m(vbool64_t vm, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_u16mf4_m(vm, rs1, rs2, vl); } -vuint16mf2_t test_vluxei8_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_u16mf2_m(mask, base, bindex, vl); +vuint16mf2_t test_vluxei8_v_u16mf2_m(vbool32_t vm, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_u16mf2_m(vm, rs1, rs2, vl); } -vuint16m1_t test_vluxei8_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_u16m1_m(mask, base, bindex, vl); +vuint16m1_t test_vluxei8_v_u16m1_m(vbool16_t vm, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_u16m1_m(vm, rs1, rs2, vl); } -vuint16m2_t test_vluxei8_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_u16m2_m(mask, base, bindex, vl); +vuint16m2_t test_vluxei8_v_u16m2_m(vbool8_t vm, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_u16m2_m(vm, rs1, rs2, vl); } -vuint16m4_t test_vluxei8_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_u16m4_m(mask, base, bindex, vl); +vuint16m4_t test_vluxei8_v_u16m4_m(vbool4_t vm, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_u16m4_m(vm, rs1, rs2, vl); } -vuint16m8_t test_vluxei8_v_u16m8_m(vbool2_t mask, const uint16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_v_u16m8_m(mask, base, bindex, vl); +vuint16m8_t test_vluxei8_v_u16m8_m(vbool2_t vm, const uint16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_v_u16m8_m(vm, rs1, rs2, vl); } -vuint32mf2_t test_vluxei8_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_u32mf2_m(mask, base, bindex, vl); +vuint32mf2_t test_vluxei8_v_u32mf2_m(vbool64_t vm, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_u32mf2_m(vm, rs1, rs2, vl); } -vuint32m1_t test_vluxei8_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_u32m1_m(mask, base, bindex, vl); +vuint32m1_t test_vluxei8_v_u32m1_m(vbool32_t vm, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_u32m1_m(vm, rs1, rs2, vl); } -vuint32m2_t test_vluxei8_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_u32m2_m(mask, base, bindex, vl); +vuint32m2_t test_vluxei8_v_u32m2_m(vbool16_t vm, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_u32m2_m(vm, rs1, rs2, vl); } -vuint32m4_t test_vluxei8_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_u32m4_m(mask, base, bindex, vl); +vuint32m4_t test_vluxei8_v_u32m4_m(vbool8_t vm, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_u32m4_m(vm, rs1, rs2, vl); } -vuint32m8_t test_vluxei8_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_u32m8_m(mask, base, bindex, vl); +vuint32m8_t test_vluxei8_v_u32m8_m(vbool4_t vm, const uint32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_u32m8_m(vm, rs1, rs2, vl); } -vuint64m1_t test_vluxei8_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_u64m1_m(mask, base, bindex, vl); +vuint64m1_t test_vluxei8_v_u64m1_m(vbool64_t vm, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_u64m1_m(vm, rs1, rs2, vl); } -vuint64m2_t test_vluxei8_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_u64m2_m(mask, base, bindex, vl); +vuint64m2_t test_vluxei8_v_u64m2_m(vbool32_t vm, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_u64m2_m(vm, rs1, rs2, vl); } -vuint64m4_t test_vluxei8_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_u64m4_m(mask, base, bindex, vl); +vuint64m4_t test_vluxei8_v_u64m4_m(vbool16_t vm, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_u64m4_m(vm, rs1, rs2, vl); } -vuint64m8_t test_vluxei8_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_u64m8_m(mask, base, bindex, vl); +vuint64m8_t test_vluxei8_v_u64m8_m(vbool8_t vm, const uint64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_u64m8_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxei8\.[ivxfswum.]+\s+} 118 } } */ diff --git a/auto-generated/gnu-api-tests/vluxseg2ei16.c b/auto-generated/gnu-api-tests/vluxseg2ei16.c index c0aceb392..8b3255b65 100644 --- a/auto-generated/gnu-api-tests/vluxseg2ei16.c +++ b/auto-generated/gnu-api-tests/vluxseg2ei16.c @@ -6,388 +6,388 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2(const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f16mf4x2(base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2(const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f16mf4x2(rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2(const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f16mf2x2(base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2(const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f16mf2x2(rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2(const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f16m1x2(base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2(const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f16m1x2(rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2(const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f16m2x2(base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2(const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f16m2x2(rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2(const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f16m4x2(base, bindex, vl); +vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2(const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f16m4x2(rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2(const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f32mf2x2(base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2(const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f32mf2x2(rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2(const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f32m1x2(base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2(const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f32m1x2(rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2(const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f32m2x2(base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2(const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f32m2x2(rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2(const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f32m4x2(base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2(const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f32m4x2(rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2(const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f64m1x2(base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2(const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f64m1x2(rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2(const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f64m2x2(base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2(const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f64m2x2(rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2(const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f64m4x2(base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2(const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f64m4x2(rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2(const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i8mf8x2(base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2(const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i8mf8x2(rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2(const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i8mf4x2(base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2(const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i8mf4x2(rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2(const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i8mf2x2(base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2(const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i8mf2x2(rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei16_v_i8m1x2(const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i8m1x2(base, bindex, vl); +vint8m1x2_t test_vluxseg2ei16_v_i8m1x2(const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i8m1x2(rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei16_v_i8m2x2(const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i8m2x2(base, bindex, vl); +vint8m2x2_t test_vluxseg2ei16_v_i8m2x2(const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i8m2x2(rs1, rs2, vl); } -vint8m4x2_t test_vluxseg2ei16_v_i8m4x2(const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i8m4x2(base, bindex, vl); +vint8m4x2_t test_vluxseg2ei16_v_i8m4x2(const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i8m4x2(rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2(const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i16mf4x2(base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2(const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i16mf4x2(rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2(const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i16mf2x2(base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2(const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i16mf2x2(rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei16_v_i16m1x2(const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i16m1x2(base, bindex, vl); +vint16m1x2_t test_vluxseg2ei16_v_i16m1x2(const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i16m1x2(rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei16_v_i16m2x2(const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i16m2x2(base, bindex, vl); +vint16m2x2_t test_vluxseg2ei16_v_i16m2x2(const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i16m2x2(rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei16_v_i16m4x2(const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i16m4x2(base, bindex, vl); +vint16m4x2_t test_vluxseg2ei16_v_i16m4x2(const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i16m4x2(rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2(const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i32mf2x2(base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2(const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i32mf2x2(rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei16_v_i32m1x2(const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i32m1x2(base, bindex, vl); +vint32m1x2_t test_vluxseg2ei16_v_i32m1x2(const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i32m1x2(rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei16_v_i32m2x2(const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i32m2x2(base, bindex, vl); +vint32m2x2_t test_vluxseg2ei16_v_i32m2x2(const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i32m2x2(rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei16_v_i32m4x2(const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i32m4x2(base, bindex, vl); +vint32m4x2_t test_vluxseg2ei16_v_i32m4x2(const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i32m4x2(rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei16_v_i64m1x2(const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i64m1x2(base, bindex, vl); +vint64m1x2_t test_vluxseg2ei16_v_i64m1x2(const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i64m1x2(rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei16_v_i64m2x2(const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i64m2x2(base, bindex, vl); +vint64m2x2_t test_vluxseg2ei16_v_i64m2x2(const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i64m2x2(rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei16_v_i64m4x2(const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i64m4x2(base, bindex, vl); +vint64m4x2_t test_vluxseg2ei16_v_i64m4x2(const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i64m4x2(rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u8mf8x2(base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2(const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u8mf8x2(rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u8mf4x2(base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2(const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u8mf4x2(rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2(const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u8mf2x2(base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2(const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u8mf2x2(rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2(const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u8m1x2(base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2(const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u8m1x2(rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2(const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u8m2x2(base, bindex, vl); +vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2(const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u8m2x2(rs1, rs2, vl); } -vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2(const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u8m4x2(base, bindex, vl); +vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2(const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u8m4x2(rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u16mf4x2(base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2(const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u16mf4x2(rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u16mf2x2(base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2(const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u16mf2x2(rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2(const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u16m1x2(base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2(const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u16m1x2(rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2(const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u16m2x2(base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2(const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u16m2x2(rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2(const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u16m4x2(base, bindex, vl); +vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2(const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u16m4x2(rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u32mf2x2(base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2(const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u32mf2x2(rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u32m1x2(base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2(const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u32m1x2(rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2(const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u32m2x2(base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2(const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u32m2x2(rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2(const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u32m4x2(base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2(const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u32m4x2(rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u64m1x2(base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2(const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u64m1x2(rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u64m2x2(base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2(const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u64m2x2(rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2(const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u64m4x2(base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2(const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u64m4x2(rs1, rs2, vl); } -vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_m(vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f16mf4x2_m(mask, base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_m(vbool64_t vm, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f16mf4x2_m(vm, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_m(vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f16mf2x2_m(mask, base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_m(vbool32_t vm, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f16mf2x2_m(vm, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_m(vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f16m1x2_m(mask, base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_m(vbool16_t vm, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f16m1x2_m(vm, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_m(vbool8_t mask, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f16m2x2_m(mask, base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_m(vbool8_t vm, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f16m2x2_m(vm, rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_m(vbool4_t mask, const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f16m4x2_m(mask, base, bindex, vl); +vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_m(vbool4_t vm, const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f16m4x2_m(vm, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_m(vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f32mf2x2_m(mask, base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_m(vbool64_t vm, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f32mf2x2_m(vm, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_m(vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f32m1x2_m(mask, base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_m(vbool32_t vm, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f32m1x2_m(vm, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_m(vbool16_t mask, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f32m2x2_m(mask, base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_m(vbool16_t vm, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f32m2x2_m(vm, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_m(vbool8_t mask, const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f32m4x2_m(mask, base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_m(vbool8_t vm, const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f32m4x2_m(vm, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_m(vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f64m1x2_m(mask, base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_m(vbool64_t vm, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f64m1x2_m(vm, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_m(vbool32_t mask, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f64m2x2_m(mask, base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_m(vbool32_t vm, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f64m2x2_m(vm, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_m(vbool16_t mask, const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f64m4x2_m(mask, base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_m(vbool16_t vm, const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f64m4x2_m(vm, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i8mf8x2_m(mask, base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_m(vbool64_t vm, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i8mf8x2_m(vm, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i8mf4x2_m(mask, base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_m(vbool32_t vm, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i8mf4x2_m(vm, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i8mf2x2_m(mask, base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_m(vbool16_t vm, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i8mf2x2_m(vm, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i8m1x2_m(mask, base, bindex, vl); +vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_m(vbool8_t vm, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i8m1x2_m(vm, rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i8m2x2_m(mask, base, bindex, vl); +vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_m(vbool4_t vm, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i8m2x2_m(vm, rs1, rs2, vl); } -vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_m(vbool2_t mask, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i8m4x2_m(mask, base, bindex, vl); +vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_m(vbool2_t vm, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i8m4x2_m(vm, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i16mf4x2_m(mask, base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_m(vbool64_t vm, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i16mf4x2_m(vm, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i16mf2x2_m(mask, base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_m(vbool32_t vm, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i16mf2x2_m(vm, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i16m1x2_m(mask, base, bindex, vl); +vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_m(vbool16_t vm, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i16m1x2_m(vm, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i16m2x2_m(mask, base, bindex, vl); +vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_m(vbool8_t vm, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i16m2x2_m(vm, rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_m(vbool4_t mask, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i16m4x2_m(mask, base, bindex, vl); +vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_m(vbool4_t vm, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i16m4x2_m(vm, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i32mf2x2_m(mask, base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_m(vbool64_t vm, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i32mf2x2_m(vm, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i32m1x2_m(mask, base, bindex, vl); +vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_m(vbool32_t vm, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i32m1x2_m(vm, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i32m2x2_m(mask, base, bindex, vl); +vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_m(vbool16_t vm, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i32m2x2_m(vm, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i32m4x2_m(mask, base, bindex, vl); +vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_m(vbool8_t vm, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i32m4x2_m(vm, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i64m1x2_m(mask, base, bindex, vl); +vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_m(vbool64_t vm, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i64m1x2_m(vm, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i64m2x2_m(mask, base, bindex, vl); +vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_m(vbool32_t vm, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i64m2x2_m(vm, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i64m4x2_m(mask, base, bindex, vl); +vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_m(vbool16_t vm, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i64m4x2_m(vm, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u8mf8x2_m(mask, base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_m(vbool64_t vm, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u8mf8x2_m(vm, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u8mf4x2_m(mask, base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_m(vbool32_t vm, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u8mf4x2_m(vm, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u8mf2x2_m(mask, base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_m(vbool16_t vm, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u8mf2x2_m(vm, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u8m1x2_m(mask, base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_m(vbool8_t vm, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u8m1x2_m(vm, rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u8m2x2_m(mask, base, bindex, vl); +vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_m(vbool4_t vm, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u8m2x2_m(vm, rs1, rs2, vl); } -vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u8m4x2_m(mask, base, bindex, vl); +vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_m(vbool2_t vm, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u8m4x2_m(vm, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u16mf4x2_m(mask, base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_m(vbool64_t vm, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u16mf4x2_m(vm, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u16mf2x2_m(mask, base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_m(vbool32_t vm, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u16mf2x2_m(vm, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u16m1x2_m(mask, base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_m(vbool16_t vm, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u16m1x2_m(vm, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u16m2x2_m(mask, base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_m(vbool8_t vm, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u16m2x2_m(vm, rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u16m4x2_m(mask, base, bindex, vl); +vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_m(vbool4_t vm, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u16m4x2_m(vm, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u32mf2x2_m(mask, base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_m(vbool64_t vm, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u32mf2x2_m(vm, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u32m1x2_m(mask, base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_m(vbool32_t vm, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u32m1x2_m(vm, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u32m2x2_m(mask, base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_m(vbool16_t vm, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u32m2x2_m(vm, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u32m4x2_m(mask, base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_m(vbool8_t vm, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u32m4x2_m(vm, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u64m1x2_m(mask, base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_m(vbool64_t vm, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u64m1x2_m(vm, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u64m2x2_m(mask, base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_m(vbool32_t vm, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u64m2x2_m(vm, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u64m4x2_m(mask, base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_m(vbool16_t vm, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u64m4x2_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg2ei16\.[ivxfswum.]+\s+} 96 } } */ diff --git a/auto-generated/gnu-api-tests/vluxseg2ei32.c b/auto-generated/gnu-api-tests/vluxseg2ei32.c index 8ad93963e..d83117d01 100644 --- a/auto-generated/gnu-api-tests/vluxseg2ei32.c +++ b/auto-generated/gnu-api-tests/vluxseg2ei32.c @@ -6,372 +6,372 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2(const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f16mf4x2(base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2(const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f16mf4x2(rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2(const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f16mf2x2(base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2(const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f16mf2x2(rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2(const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f16m1x2(base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2(const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f16m1x2(rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2(const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f16m2x2(base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2(const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f16m2x2(rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2(const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f16m4x2(base, bindex, vl); +vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2(const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f16m4x2(rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2(const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f32mf2x2(base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2(const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f32mf2x2(rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2(const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f32m1x2(base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2(const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f32m1x2(rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2(const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f32m2x2(base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2(const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f32m2x2(rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2(const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f32m4x2(base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2(const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f32m4x2(rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2(const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f64m1x2(base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2(const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f64m1x2(rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2(const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f64m2x2(base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2(const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f64m2x2(rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2(const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f64m4x2(base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2(const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f64m4x2(rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2(const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i8mf8x2(base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2(const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i8mf8x2(rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2(const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i8mf4x2(base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2(const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i8mf4x2(rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2(const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i8mf2x2(base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2(const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i8mf2x2(rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei32_v_i8m1x2(const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i8m1x2(base, bindex, vl); +vint8m1x2_t test_vluxseg2ei32_v_i8m1x2(const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i8m1x2(rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei32_v_i8m2x2(const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i8m2x2(base, bindex, vl); +vint8m2x2_t test_vluxseg2ei32_v_i8m2x2(const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i8m2x2(rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2(const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i16mf4x2(base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2(const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i16mf4x2(rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2(const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i16mf2x2(base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2(const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i16mf2x2(rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei32_v_i16m1x2(const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i16m1x2(base, bindex, vl); +vint16m1x2_t test_vluxseg2ei32_v_i16m1x2(const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i16m1x2(rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei32_v_i16m2x2(const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i16m2x2(base, bindex, vl); +vint16m2x2_t test_vluxseg2ei32_v_i16m2x2(const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i16m2x2(rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei32_v_i16m4x2(const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i16m4x2(base, bindex, vl); +vint16m4x2_t test_vluxseg2ei32_v_i16m4x2(const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i16m4x2(rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2(const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i32mf2x2(base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2(const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i32mf2x2(rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei32_v_i32m1x2(const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i32m1x2(base, bindex, vl); +vint32m1x2_t test_vluxseg2ei32_v_i32m1x2(const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i32m1x2(rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei32_v_i32m2x2(const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i32m2x2(base, bindex, vl); +vint32m2x2_t test_vluxseg2ei32_v_i32m2x2(const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i32m2x2(rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei32_v_i32m4x2(const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i32m4x2(base, bindex, vl); +vint32m4x2_t test_vluxseg2ei32_v_i32m4x2(const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i32m4x2(rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei32_v_i64m1x2(const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i64m1x2(base, bindex, vl); +vint64m1x2_t test_vluxseg2ei32_v_i64m1x2(const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i64m1x2(rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei32_v_i64m2x2(const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i64m2x2(base, bindex, vl); +vint64m2x2_t test_vluxseg2ei32_v_i64m2x2(const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i64m2x2(rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei32_v_i64m4x2(const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i64m4x2(base, bindex, vl); +vint64m4x2_t test_vluxseg2ei32_v_i64m4x2(const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i64m4x2(rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u8mf8x2(base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2(const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u8mf8x2(rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2(const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u8mf4x2(base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2(const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u8mf4x2(rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2(const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u8mf2x2(base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2(const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u8mf2x2(rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2(const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u8m1x2(base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2(const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u8m1x2(rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2(const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u8m2x2(base, bindex, vl); +vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2(const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u8m2x2(rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u16mf4x2(base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2(const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u16mf4x2(rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2(const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u16mf2x2(base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2(const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u16mf2x2(rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2(const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u16m1x2(base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2(const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u16m1x2(rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2(const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u16m2x2(base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2(const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u16m2x2(rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2(const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u16m4x2(base, bindex, vl); +vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2(const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u16m4x2(rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u32mf2x2(base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2(const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u32mf2x2(rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2(const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u32m1x2(base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2(const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u32m1x2(rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2(const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u32m2x2(base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2(const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u32m2x2(rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2(const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u32m4x2(base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2(const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u32m4x2(rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u64m1x2(base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2(const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u64m1x2(rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2(const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u64m2x2(base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2(const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u64m2x2(rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2(const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u64m4x2(base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2(const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u64m4x2(rs1, rs2, vl); } -vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_m(vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f16mf4x2_m(mask, base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_m(vbool64_t vm, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f16mf4x2_m(vm, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_m(vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f16mf2x2_m(mask, base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_m(vbool32_t vm, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f16mf2x2_m(vm, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_m(vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f16m1x2_m(mask, base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_m(vbool16_t vm, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f16m1x2_m(vm, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_m(vbool8_t mask, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f16m2x2_m(mask, base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_m(vbool8_t vm, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f16m2x2_m(vm, rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_m(vbool4_t mask, const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f16m4x2_m(mask, base, bindex, vl); +vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_m(vbool4_t vm, const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f16m4x2_m(vm, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_m(vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f32mf2x2_m(mask, base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_m(vbool64_t vm, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f32mf2x2_m(vm, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_m(vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f32m1x2_m(mask, base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_m(vbool32_t vm, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f32m1x2_m(vm, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_m(vbool16_t mask, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f32m2x2_m(mask, base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_m(vbool16_t vm, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f32m2x2_m(vm, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_m(vbool8_t mask, const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f32m4x2_m(mask, base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_m(vbool8_t vm, const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f32m4x2_m(vm, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_m(vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f64m1x2_m(mask, base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_m(vbool64_t vm, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f64m1x2_m(vm, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_m(vbool32_t mask, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f64m2x2_m(mask, base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_m(vbool32_t vm, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f64m2x2_m(vm, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_m(vbool16_t mask, const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f64m4x2_m(mask, base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_m(vbool16_t vm, const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f64m4x2_m(vm, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i8mf8x2_m(mask, base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_m(vbool64_t vm, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i8mf8x2_m(vm, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i8mf4x2_m(mask, base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_m(vbool32_t vm, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i8mf4x2_m(vm, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i8mf2x2_m(mask, base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_m(vbool16_t vm, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i8mf2x2_m(vm, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i8m1x2_m(mask, base, bindex, vl); +vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_m(vbool8_t vm, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i8m1x2_m(vm, rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i8m2x2_m(mask, base, bindex, vl); +vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_m(vbool4_t vm, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i8m2x2_m(vm, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i16mf4x2_m(mask, base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_m(vbool64_t vm, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i16mf4x2_m(vm, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i16mf2x2_m(mask, base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_m(vbool32_t vm, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i16mf2x2_m(vm, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i16m1x2_m(mask, base, bindex, vl); +vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_m(vbool16_t vm, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i16m1x2_m(vm, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i16m2x2_m(mask, base, bindex, vl); +vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_m(vbool8_t vm, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i16m2x2_m(vm, rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_m(vbool4_t mask, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i16m4x2_m(mask, base, bindex, vl); +vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_m(vbool4_t vm, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i16m4x2_m(vm, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i32mf2x2_m(mask, base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_m(vbool64_t vm, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i32mf2x2_m(vm, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i32m1x2_m(mask, base, bindex, vl); +vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_m(vbool32_t vm, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i32m1x2_m(vm, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i32m2x2_m(mask, base, bindex, vl); +vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_m(vbool16_t vm, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i32m2x2_m(vm, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i32m4x2_m(mask, base, bindex, vl); +vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_m(vbool8_t vm, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i32m4x2_m(vm, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i64m1x2_m(mask, base, bindex, vl); +vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_m(vbool64_t vm, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i64m1x2_m(vm, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i64m2x2_m(mask, base, bindex, vl); +vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_m(vbool32_t vm, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i64m2x2_m(vm, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i64m4x2_m(mask, base, bindex, vl); +vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_m(vbool16_t vm, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i64m4x2_m(vm, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u8mf8x2_m(mask, base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_m(vbool64_t vm, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u8mf8x2_m(vm, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u8mf4x2_m(mask, base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_m(vbool32_t vm, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u8mf4x2_m(vm, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u8mf2x2_m(mask, base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_m(vbool16_t vm, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u8mf2x2_m(vm, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u8m1x2_m(mask, base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_m(vbool8_t vm, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u8m1x2_m(vm, rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u8m2x2_m(mask, base, bindex, vl); +vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_m(vbool4_t vm, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u8m2x2_m(vm, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u16mf4x2_m(mask, base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_m(vbool64_t vm, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u16mf4x2_m(vm, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u16mf2x2_m(mask, base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_m(vbool32_t vm, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u16mf2x2_m(vm, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u16m1x2_m(mask, base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_m(vbool16_t vm, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u16m1x2_m(vm, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u16m2x2_m(mask, base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_m(vbool8_t vm, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u16m2x2_m(vm, rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u16m4x2_m(mask, base, bindex, vl); +vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_m(vbool4_t vm, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u16m4x2_m(vm, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u32mf2x2_m(mask, base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_m(vbool64_t vm, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u32mf2x2_m(vm, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u32m1x2_m(mask, base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_m(vbool32_t vm, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u32m1x2_m(vm, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u32m2x2_m(mask, base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_m(vbool16_t vm, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u32m2x2_m(vm, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u32m4x2_m(mask, base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_m(vbool8_t vm, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u32m4x2_m(vm, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u64m1x2_m(mask, base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_m(vbool64_t vm, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u64m1x2_m(vm, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u64m2x2_m(mask, base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_m(vbool32_t vm, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u64m2x2_m(vm, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u64m4x2_m(mask, base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_m(vbool16_t vm, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u64m4x2_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg2ei32\.[ivxfswum.]+\s+} 92 } } */ diff --git a/auto-generated/gnu-api-tests/vluxseg2ei64.c b/auto-generated/gnu-api-tests/vluxseg2ei64.c index c234ee72c..5b74eea34 100644 --- a/auto-generated/gnu-api-tests/vluxseg2ei64.c +++ b/auto-generated/gnu-api-tests/vluxseg2ei64.c @@ -6,332 +6,332 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2(const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f16mf4x2(base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2(const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f16mf4x2(rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2(const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f16mf2x2(base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2(const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f16mf2x2(rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2(const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f16m1x2(base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2(const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f16m1x2(rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2(const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f16m2x2(base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2(const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f16m2x2(rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2(const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f32mf2x2(base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2(const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f32mf2x2(rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2(const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f32m1x2(base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2(const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f32m1x2(rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2(const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f32m2x2(base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2(const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f32m2x2(rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2(const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f32m4x2(base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2(const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f32m4x2(rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2(const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f64m1x2(base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2(const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f64m1x2(rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2(const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f64m2x2(base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2(const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f64m2x2(rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2(const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f64m4x2(base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2(const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f64m4x2(rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2(const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i8mf8x2(base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2(const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i8mf8x2(rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2(const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i8mf4x2(base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2(const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i8mf4x2(rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2(const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i8mf2x2(base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2(const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i8mf2x2(rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei64_v_i8m1x2(const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i8m1x2(base, bindex, vl); +vint8m1x2_t test_vluxseg2ei64_v_i8m1x2(const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i8m1x2(rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2(const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i16mf4x2(base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2(const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i16mf4x2(rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2(const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i16mf2x2(base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2(const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i16mf2x2(rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei64_v_i16m1x2(const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i16m1x2(base, bindex, vl); +vint16m1x2_t test_vluxseg2ei64_v_i16m1x2(const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i16m1x2(rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei64_v_i16m2x2(const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i16m2x2(base, bindex, vl); +vint16m2x2_t test_vluxseg2ei64_v_i16m2x2(const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i16m2x2(rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2(const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i32mf2x2(base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2(const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i32mf2x2(rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei64_v_i32m1x2(const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i32m1x2(base, bindex, vl); +vint32m1x2_t test_vluxseg2ei64_v_i32m1x2(const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i32m1x2(rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei64_v_i32m2x2(const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i32m2x2(base, bindex, vl); +vint32m2x2_t test_vluxseg2ei64_v_i32m2x2(const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i32m2x2(rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei64_v_i32m4x2(const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i32m4x2(base, bindex, vl); +vint32m4x2_t test_vluxseg2ei64_v_i32m4x2(const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i32m4x2(rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei64_v_i64m1x2(const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i64m1x2(base, bindex, vl); +vint64m1x2_t test_vluxseg2ei64_v_i64m1x2(const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i64m1x2(rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei64_v_i64m2x2(const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i64m2x2(base, bindex, vl); +vint64m2x2_t test_vluxseg2ei64_v_i64m2x2(const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i64m2x2(rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei64_v_i64m4x2(const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i64m4x2(base, bindex, vl); +vint64m4x2_t test_vluxseg2ei64_v_i64m4x2(const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i64m4x2(rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2(const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u8mf8x2(base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2(const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u8mf8x2(rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2(const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u8mf4x2(base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2(const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u8mf4x2(rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2(const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u8mf2x2(base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2(const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u8mf2x2(rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2(const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u8m1x2(base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2(const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u8m1x2(rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2(const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u16mf4x2(base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2(const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u16mf4x2(rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2(const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u16mf2x2(base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2(const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u16mf2x2(rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2(const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u16m1x2(base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2(const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u16m1x2(rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2(const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u16m2x2(base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2(const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u16m2x2(rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2(const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u32mf2x2(base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2(const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u32mf2x2(rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2(const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u32m1x2(base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2(const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u32m1x2(rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2(const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u32m2x2(base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2(const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u32m2x2(rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2(const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u32m4x2(base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2(const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u32m4x2(rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2(const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u64m1x2(base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2(const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u64m1x2(rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2(const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u64m2x2(base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2(const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u64m2x2(rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2(const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u64m4x2(base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2(const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u64m4x2(rs1, rs2, vl); } -vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_m(vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f16mf4x2_m(mask, base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_m(vbool64_t vm, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f16mf4x2_m(vm, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_m(vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f16mf2x2_m(mask, base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_m(vbool32_t vm, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f16mf2x2_m(vm, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_m(vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f16m1x2_m(mask, base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_m(vbool16_t vm, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f16m1x2_m(vm, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_m(vbool8_t mask, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f16m2x2_m(mask, base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_m(vbool8_t vm, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f16m2x2_m(vm, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_m(vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f32mf2x2_m(mask, base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_m(vbool64_t vm, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f32mf2x2_m(vm, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_m(vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f32m1x2_m(mask, base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_m(vbool32_t vm, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f32m1x2_m(vm, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_m(vbool16_t mask, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f32m2x2_m(mask, base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_m(vbool16_t vm, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f32m2x2_m(vm, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_m(vbool8_t mask, const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f32m4x2_m(mask, base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_m(vbool8_t vm, const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f32m4x2_m(vm, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_m(vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f64m1x2_m(mask, base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_m(vbool64_t vm, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f64m1x2_m(vm, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_m(vbool32_t mask, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f64m2x2_m(mask, base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_m(vbool32_t vm, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f64m2x2_m(vm, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_m(vbool16_t mask, const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f64m4x2_m(mask, base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_m(vbool16_t vm, const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f64m4x2_m(vm, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i8mf8x2_m(mask, base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_m(vbool64_t vm, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i8mf8x2_m(vm, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i8mf4x2_m(mask, base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_m(vbool32_t vm, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i8mf4x2_m(vm, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i8mf2x2_m(mask, base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_m(vbool16_t vm, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i8mf2x2_m(vm, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i8m1x2_m(mask, base, bindex, vl); +vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_m(vbool8_t vm, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i8m1x2_m(vm, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i16mf4x2_m(mask, base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_m(vbool64_t vm, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i16mf4x2_m(vm, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i16mf2x2_m(mask, base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_m(vbool32_t vm, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i16mf2x2_m(vm, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i16m1x2_m(mask, base, bindex, vl); +vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_m(vbool16_t vm, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i16m1x2_m(vm, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i16m2x2_m(mask, base, bindex, vl); +vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_m(vbool8_t vm, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i16m2x2_m(vm, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i32mf2x2_m(mask, base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_m(vbool64_t vm, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i32mf2x2_m(vm, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i32m1x2_m(mask, base, bindex, vl); +vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_m(vbool32_t vm, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i32m1x2_m(vm, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i32m2x2_m(mask, base, bindex, vl); +vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_m(vbool16_t vm, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i32m2x2_m(vm, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i32m4x2_m(mask, base, bindex, vl); +vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_m(vbool8_t vm, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i32m4x2_m(vm, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i64m1x2_m(mask, base, bindex, vl); +vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_m(vbool64_t vm, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i64m1x2_m(vm, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i64m2x2_m(mask, base, bindex, vl); +vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_m(vbool32_t vm, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i64m2x2_m(vm, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i64m4x2_m(mask, base, bindex, vl); +vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_m(vbool16_t vm, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i64m4x2_m(vm, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u8mf8x2_m(mask, base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_m(vbool64_t vm, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u8mf8x2_m(vm, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u8mf4x2_m(mask, base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_m(vbool32_t vm, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u8mf4x2_m(vm, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u8mf2x2_m(mask, base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_m(vbool16_t vm, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u8mf2x2_m(vm, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u8m1x2_m(mask, base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_m(vbool8_t vm, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u8m1x2_m(vm, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u16mf4x2_m(mask, base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_m(vbool64_t vm, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u16mf4x2_m(vm, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u16mf2x2_m(mask, base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_m(vbool32_t vm, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u16mf2x2_m(vm, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u16m1x2_m(mask, base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_m(vbool16_t vm, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u16m1x2_m(vm, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u16m2x2_m(mask, base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_m(vbool8_t vm, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u16m2x2_m(vm, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u32mf2x2_m(mask, base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_m(vbool64_t vm, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u32mf2x2_m(vm, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u32m1x2_m(mask, base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_m(vbool32_t vm, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u32m1x2_m(vm, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u32m2x2_m(mask, base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_m(vbool16_t vm, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u32m2x2_m(vm, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u32m4x2_m(mask, base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_m(vbool8_t vm, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u32m4x2_m(vm, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u64m1x2_m(mask, base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_m(vbool64_t vm, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u64m1x2_m(vm, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u64m2x2_m(mask, base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_m(vbool32_t vm, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u64m2x2_m(vm, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u64m4x2_m(mask, base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_m(vbool16_t vm, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u64m4x2_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg2ei64\.[ivxfswum.]+\s+} 82 } } */ diff --git a/auto-generated/gnu-api-tests/vluxseg2ei8.c b/auto-generated/gnu-api-tests/vluxseg2ei8.c index e6b6d55a9..703978253 100644 --- a/auto-generated/gnu-api-tests/vluxseg2ei8.c +++ b/auto-generated/gnu-api-tests/vluxseg2ei8.c @@ -6,388 +6,388 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2(const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f16mf4x2(base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2(const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f16mf4x2(rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2(const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f16mf2x2(base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2(const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f16mf2x2(rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2(const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f16m1x2(base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2(const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f16m1x2(rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2(const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f16m2x2(base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2(const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f16m2x2(rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2(const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f16m4x2(base, bindex, vl); +vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2(const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f16m4x2(rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2(const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f32mf2x2(base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2(const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f32mf2x2(rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2(const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f32m1x2(base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2(const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f32m1x2(rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2(const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f32m2x2(base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2(const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f32m2x2(rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2(const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f32m4x2(base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2(const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f32m4x2(rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2(const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f64m1x2(base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2(const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f64m1x2(rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2(const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f64m2x2(base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2(const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f64m2x2(rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2(const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f64m4x2(base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2(const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f64m4x2(rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2(const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i8mf8x2(base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2(const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i8mf8x2(rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2(const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i8mf4x2(base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2(const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i8mf4x2(rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2(const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i8mf2x2(base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2(const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i8mf2x2(rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei8_v_i8m1x2(const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i8m1x2(base, bindex, vl); +vint8m1x2_t test_vluxseg2ei8_v_i8m1x2(const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i8m1x2(rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei8_v_i8m2x2(const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i8m2x2(base, bindex, vl); +vint8m2x2_t test_vluxseg2ei8_v_i8m2x2(const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i8m2x2(rs1, rs2, vl); } -vint8m4x2_t test_vluxseg2ei8_v_i8m4x2(const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i8m4x2(base, bindex, vl); +vint8m4x2_t test_vluxseg2ei8_v_i8m4x2(const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i8m4x2(rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2(const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i16mf4x2(base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2(const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i16mf4x2(rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2(const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i16mf2x2(base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2(const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i16mf2x2(rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei8_v_i16m1x2(const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i16m1x2(base, bindex, vl); +vint16m1x2_t test_vluxseg2ei8_v_i16m1x2(const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i16m1x2(rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei8_v_i16m2x2(const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i16m2x2(base, bindex, vl); +vint16m2x2_t test_vluxseg2ei8_v_i16m2x2(const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i16m2x2(rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei8_v_i16m4x2(const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i16m4x2(base, bindex, vl); +vint16m4x2_t test_vluxseg2ei8_v_i16m4x2(const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i16m4x2(rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2(const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i32mf2x2(base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2(const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i32mf2x2(rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei8_v_i32m1x2(const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i32m1x2(base, bindex, vl); +vint32m1x2_t test_vluxseg2ei8_v_i32m1x2(const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i32m1x2(rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei8_v_i32m2x2(const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i32m2x2(base, bindex, vl); +vint32m2x2_t test_vluxseg2ei8_v_i32m2x2(const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i32m2x2(rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei8_v_i32m4x2(const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i32m4x2(base, bindex, vl); +vint32m4x2_t test_vluxseg2ei8_v_i32m4x2(const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i32m4x2(rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei8_v_i64m1x2(const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i64m1x2(base, bindex, vl); +vint64m1x2_t test_vluxseg2ei8_v_i64m1x2(const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i64m1x2(rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei8_v_i64m2x2(const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i64m2x2(base, bindex, vl); +vint64m2x2_t test_vluxseg2ei8_v_i64m2x2(const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i64m2x2(rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei8_v_i64m4x2(const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i64m4x2(base, bindex, vl); +vint64m4x2_t test_vluxseg2ei8_v_i64m4x2(const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i64m4x2(rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u8mf8x2(base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2(const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u8mf8x2(rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u8mf4x2(base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2(const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u8mf4x2(rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u8mf2x2(base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2(const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u8mf2x2(rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2(const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u8m1x2(base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2(const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u8m1x2(rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2(const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u8m2x2(base, bindex, vl); +vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2(const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u8m2x2(rs1, rs2, vl); } -vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2(const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u8m4x2(base, bindex, vl); +vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2(const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u8m4x2(rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u16mf4x2(base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2(const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u16mf4x2(rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u16mf2x2(base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2(const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u16mf2x2(rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u16m1x2(base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2(const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u16m1x2(rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2(const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u16m2x2(base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2(const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u16m2x2(rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2(const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u16m4x2(base, bindex, vl); +vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2(const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u16m4x2(rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u32mf2x2(base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2(const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u32mf2x2(rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u32m1x2(base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2(const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u32m1x2(rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u32m2x2(base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2(const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u32m2x2(rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2(const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u32m4x2(base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2(const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u32m4x2(rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u64m1x2(base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2(const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u64m1x2(rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u64m2x2(base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2(const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u64m2x2(rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2(const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u64m4x2(base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2(const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u64m4x2(rs1, rs2, vl); } -vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_m(vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f16mf4x2_m(mask, base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_m(vbool64_t vm, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f16mf4x2_m(vm, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_m(vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f16mf2x2_m(mask, base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_m(vbool32_t vm, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f16mf2x2_m(vm, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_m(vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f16m1x2_m(mask, base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_m(vbool16_t vm, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f16m1x2_m(vm, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_m(vbool8_t mask, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f16m2x2_m(mask, base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_m(vbool8_t vm, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f16m2x2_m(vm, rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_m(vbool4_t mask, const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f16m4x2_m(mask, base, bindex, vl); +vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_m(vbool4_t vm, const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f16m4x2_m(vm, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_m(vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f32mf2x2_m(mask, base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_m(vbool64_t vm, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f32mf2x2_m(vm, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_m(vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f32m1x2_m(mask, base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_m(vbool32_t vm, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f32m1x2_m(vm, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_m(vbool16_t mask, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f32m2x2_m(mask, base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_m(vbool16_t vm, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f32m2x2_m(vm, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_m(vbool8_t mask, const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f32m4x2_m(mask, base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_m(vbool8_t vm, const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f32m4x2_m(vm, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_m(vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f64m1x2_m(mask, base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_m(vbool64_t vm, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f64m1x2_m(vm, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_m(vbool32_t mask, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f64m2x2_m(mask, base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_m(vbool32_t vm, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f64m2x2_m(vm, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_m(vbool16_t mask, const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f64m4x2_m(mask, base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_m(vbool16_t vm, const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f64m4x2_m(vm, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i8mf8x2_m(mask, base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_m(vbool64_t vm, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i8mf8x2_m(vm, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i8mf4x2_m(mask, base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_m(vbool32_t vm, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i8mf4x2_m(vm, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i8mf2x2_m(mask, base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_m(vbool16_t vm, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i8mf2x2_m(vm, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i8m1x2_m(mask, base, bindex, vl); +vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_m(vbool8_t vm, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i8m1x2_m(vm, rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i8m2x2_m(mask, base, bindex, vl); +vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_m(vbool4_t vm, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i8m2x2_m(vm, rs1, rs2, vl); } -vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_m(vbool2_t mask, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i8m4x2_m(mask, base, bindex, vl); +vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_m(vbool2_t vm, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i8m4x2_m(vm, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i16mf4x2_m(mask, base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_m(vbool64_t vm, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i16mf4x2_m(vm, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i16mf2x2_m(mask, base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_m(vbool32_t vm, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i16mf2x2_m(vm, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i16m1x2_m(mask, base, bindex, vl); +vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_m(vbool16_t vm, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i16m1x2_m(vm, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i16m2x2_m(mask, base, bindex, vl); +vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_m(vbool8_t vm, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i16m2x2_m(vm, rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_m(vbool4_t mask, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i16m4x2_m(mask, base, bindex, vl); +vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_m(vbool4_t vm, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i16m4x2_m(vm, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i32mf2x2_m(mask, base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_m(vbool64_t vm, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i32mf2x2_m(vm, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i32m1x2_m(mask, base, bindex, vl); +vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_m(vbool32_t vm, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i32m1x2_m(vm, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i32m2x2_m(mask, base, bindex, vl); +vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_m(vbool16_t vm, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i32m2x2_m(vm, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i32m4x2_m(mask, base, bindex, vl); +vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_m(vbool8_t vm, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i32m4x2_m(vm, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i64m1x2_m(mask, base, bindex, vl); +vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_m(vbool64_t vm, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i64m1x2_m(vm, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i64m2x2_m(mask, base, bindex, vl); +vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_m(vbool32_t vm, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i64m2x2_m(vm, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i64m4x2_m(mask, base, bindex, vl); +vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_m(vbool16_t vm, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i64m4x2_m(vm, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u8mf8x2_m(mask, base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_m(vbool64_t vm, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u8mf8x2_m(vm, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u8mf4x2_m(mask, base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_m(vbool32_t vm, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u8mf4x2_m(vm, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u8mf2x2_m(mask, base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_m(vbool16_t vm, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u8mf2x2_m(vm, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u8m1x2_m(mask, base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_m(vbool8_t vm, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u8m1x2_m(vm, rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u8m2x2_m(mask, base, bindex, vl); +vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_m(vbool4_t vm, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u8m2x2_m(vm, rs1, rs2, vl); } -vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u8m4x2_m(mask, base, bindex, vl); +vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_m(vbool2_t vm, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u8m4x2_m(vm, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u16mf4x2_m(mask, base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_m(vbool64_t vm, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u16mf4x2_m(vm, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u16mf2x2_m(mask, base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_m(vbool32_t vm, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u16mf2x2_m(vm, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u16m1x2_m(mask, base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_m(vbool16_t vm, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u16m1x2_m(vm, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u16m2x2_m(mask, base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_m(vbool8_t vm, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u16m2x2_m(vm, rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u16m4x2_m(mask, base, bindex, vl); +vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_m(vbool4_t vm, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u16m4x2_m(vm, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u32mf2x2_m(mask, base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_m(vbool64_t vm, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u32mf2x2_m(vm, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u32m1x2_m(mask, base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_m(vbool32_t vm, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u32m1x2_m(vm, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u32m2x2_m(mask, base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_m(vbool16_t vm, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u32m2x2_m(vm, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u32m4x2_m(mask, base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_m(vbool8_t vm, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u32m4x2_m(vm, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u64m1x2_m(mask, base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_m(vbool64_t vm, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u64m1x2_m(vm, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u64m2x2_m(mask, base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_m(vbool32_t vm, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u64m2x2_m(vm, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u64m4x2_m(mask, base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_m(vbool16_t vm, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u64m4x2_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg2ei8\.[ivxfswum.]+\s+} 96 } } */ diff --git a/auto-generated/gnu-api-tests/vluxseg3ei16.c b/auto-generated/gnu-api-tests/vluxseg3ei16.c index c1b9f2e36..74d9c3d77 100644 --- a/auto-generated/gnu-api-tests/vluxseg3ei16.c +++ b/auto-generated/gnu-api-tests/vluxseg3ei16.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3(const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f16mf4x3(base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3(const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f16mf4x3(rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3(const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f16mf2x3(base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3(const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f16mf2x3(rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3(const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f16m1x3(base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3(const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f16m1x3(rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3(const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f16m2x3(base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3(const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f16m2x3(rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3(const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f32mf2x3(base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3(const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f32mf2x3(rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3(const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f32m1x3(base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3(const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f32m1x3(rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3(const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f32m2x3(base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3(const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f32m2x3(rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3(const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f64m1x3(base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3(const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f64m1x3(rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3(const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f64m2x3(base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3(const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f64m2x3(rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3(const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i8mf8x3(base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3(const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i8mf8x3(rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3(const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i8mf4x3(base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3(const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i8mf4x3(rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3(const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i8mf2x3(base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3(const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i8mf2x3(rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei16_v_i8m1x3(const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i8m1x3(base, bindex, vl); +vint8m1x3_t test_vluxseg3ei16_v_i8m1x3(const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i8m1x3(rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei16_v_i8m2x3(const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i8m2x3(base, bindex, vl); +vint8m2x3_t test_vluxseg3ei16_v_i8m2x3(const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i8m2x3(rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3(const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i16mf4x3(base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3(const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i16mf4x3(rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3(const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i16mf2x3(base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3(const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i16mf2x3(rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei16_v_i16m1x3(const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i16m1x3(base, bindex, vl); +vint16m1x3_t test_vluxseg3ei16_v_i16m1x3(const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i16m1x3(rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei16_v_i16m2x3(const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i16m2x3(base, bindex, vl); +vint16m2x3_t test_vluxseg3ei16_v_i16m2x3(const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i16m2x3(rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3(const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i32mf2x3(base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3(const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i32mf2x3(rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei16_v_i32m1x3(const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i32m1x3(base, bindex, vl); +vint32m1x3_t test_vluxseg3ei16_v_i32m1x3(const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i32m1x3(rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei16_v_i32m2x3(const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i32m2x3(base, bindex, vl); +vint32m2x3_t test_vluxseg3ei16_v_i32m2x3(const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i32m2x3(rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei16_v_i64m1x3(const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i64m1x3(base, bindex, vl); +vint64m1x3_t test_vluxseg3ei16_v_i64m1x3(const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i64m1x3(rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei16_v_i64m2x3(const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i64m2x3(base, bindex, vl); +vint64m2x3_t test_vluxseg3ei16_v_i64m2x3(const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i64m2x3(rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u8mf8x3(base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3(const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u8mf8x3(rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u8mf4x3(base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3(const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u8mf4x3(rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3(const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u8mf2x3(base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3(const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u8mf2x3(rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3(const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u8m1x3(base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3(const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u8m1x3(rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3(const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u8m2x3(base, bindex, vl); +vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3(const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u8m2x3(rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u16mf4x3(base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3(const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u16mf4x3(rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u16mf2x3(base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3(const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u16mf2x3(rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3(const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u16m1x3(base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3(const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u16m1x3(rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3(const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u16m2x3(base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3(const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u16m2x3(rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u32mf2x3(base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3(const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u32mf2x3(rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u32m1x3(base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3(const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u32m1x3(rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3(const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u32m2x3(base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3(const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u32m2x3(rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u64m1x3(base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3(const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u64m1x3(rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u64m2x3(base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3(const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u64m2x3(rs1, rs2, vl); } -vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_m(vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f16mf4x3_m(mask, base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_m(vbool64_t vm, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f16mf4x3_m(vm, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_m(vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f16mf2x3_m(mask, base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_m(vbool32_t vm, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f16mf2x3_m(vm, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_m(vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f16m1x3_m(mask, base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_m(vbool16_t vm, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f16m1x3_m(vm, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_m(vbool8_t mask, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f16m2x3_m(mask, base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_m(vbool8_t vm, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f16m2x3_m(vm, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_m(vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f32mf2x3_m(mask, base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_m(vbool64_t vm, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f32mf2x3_m(vm, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_m(vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f32m1x3_m(mask, base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_m(vbool32_t vm, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f32m1x3_m(vm, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_m(vbool16_t mask, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f32m2x3_m(mask, base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_m(vbool16_t vm, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f32m2x3_m(vm, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_m(vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f64m1x3_m(mask, base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_m(vbool64_t vm, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f64m1x3_m(vm, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_m(vbool32_t mask, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f64m2x3_m(mask, base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_m(vbool32_t vm, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f64m2x3_m(vm, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i8mf8x3_m(mask, base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_m(vbool64_t vm, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i8mf8x3_m(vm, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i8mf4x3_m(mask, base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_m(vbool32_t vm, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i8mf4x3_m(vm, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i8mf2x3_m(mask, base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_m(vbool16_t vm, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i8mf2x3_m(vm, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i8m1x3_m(mask, base, bindex, vl); +vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_m(vbool8_t vm, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i8m1x3_m(vm, rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i8m2x3_m(mask, base, bindex, vl); +vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_m(vbool4_t vm, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i8m2x3_m(vm, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i16mf4x3_m(mask, base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_m(vbool64_t vm, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i16mf4x3_m(vm, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i16mf2x3_m(mask, base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_m(vbool32_t vm, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i16mf2x3_m(vm, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i16m1x3_m(mask, base, bindex, vl); +vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_m(vbool16_t vm, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i16m1x3_m(vm, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i16m2x3_m(mask, base, bindex, vl); +vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_m(vbool8_t vm, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i16m2x3_m(vm, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i32mf2x3_m(mask, base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_m(vbool64_t vm, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i32mf2x3_m(vm, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i32m1x3_m(mask, base, bindex, vl); +vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_m(vbool32_t vm, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i32m1x3_m(vm, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i32m2x3_m(mask, base, bindex, vl); +vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_m(vbool16_t vm, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i32m2x3_m(vm, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i64m1x3_m(mask, base, bindex, vl); +vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_m(vbool64_t vm, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i64m1x3_m(vm, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i64m2x3_m(mask, base, bindex, vl); +vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_m(vbool32_t vm, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i64m2x3_m(vm, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u8mf8x3_m(mask, base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_m(vbool64_t vm, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u8mf8x3_m(vm, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u8mf4x3_m(mask, base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_m(vbool32_t vm, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u8mf4x3_m(vm, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u8mf2x3_m(mask, base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_m(vbool16_t vm, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u8mf2x3_m(vm, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u8m1x3_m(mask, base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_m(vbool8_t vm, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u8m1x3_m(vm, rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u8m2x3_m(mask, base, bindex, vl); +vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_m(vbool4_t vm, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u8m2x3_m(vm, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u16mf4x3_m(mask, base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_m(vbool64_t vm, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u16mf4x3_m(vm, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u16mf2x3_m(mask, base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_m(vbool32_t vm, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u16mf2x3_m(vm, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u16m1x3_m(mask, base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_m(vbool16_t vm, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u16m1x3_m(vm, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u16m2x3_m(mask, base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_m(vbool8_t vm, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u16m2x3_m(vm, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u32mf2x3_m(mask, base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_m(vbool64_t vm, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u32mf2x3_m(vm, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u32m1x3_m(mask, base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_m(vbool32_t vm, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u32m1x3_m(vm, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u32m2x3_m(mask, base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_m(vbool16_t vm, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u32m2x3_m(vm, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u64m1x3_m(mask, base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_m(vbool64_t vm, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u64m1x3_m(vm, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u64m2x3_m(mask, base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_m(vbool32_t vm, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u64m2x3_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg3ei16\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-api-tests/vluxseg3ei32.c b/auto-generated/gnu-api-tests/vluxseg3ei32.c index 96d9e3ddf..328d845d2 100644 --- a/auto-generated/gnu-api-tests/vluxseg3ei32.c +++ b/auto-generated/gnu-api-tests/vluxseg3ei32.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3(const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f16mf4x3(base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3(const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f16mf4x3(rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3(const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f16mf2x3(base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3(const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f16mf2x3(rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3(const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f16m1x3(base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3(const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f16m1x3(rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3(const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f16m2x3(base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3(const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f16m2x3(rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3(const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f32mf2x3(base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3(const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f32mf2x3(rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3(const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f32m1x3(base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3(const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f32m1x3(rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3(const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f32m2x3(base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3(const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f32m2x3(rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3(const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f64m1x3(base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3(const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f64m1x3(rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3(const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f64m2x3(base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3(const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f64m2x3(rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3(const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i8mf8x3(base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3(const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i8mf8x3(rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3(const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i8mf4x3(base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3(const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i8mf4x3(rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3(const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i8mf2x3(base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3(const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i8mf2x3(rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei32_v_i8m1x3(const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i8m1x3(base, bindex, vl); +vint8m1x3_t test_vluxseg3ei32_v_i8m1x3(const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i8m1x3(rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei32_v_i8m2x3(const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i8m2x3(base, bindex, vl); +vint8m2x3_t test_vluxseg3ei32_v_i8m2x3(const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i8m2x3(rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3(const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i16mf4x3(base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3(const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i16mf4x3(rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3(const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i16mf2x3(base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3(const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i16mf2x3(rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei32_v_i16m1x3(const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i16m1x3(base, bindex, vl); +vint16m1x3_t test_vluxseg3ei32_v_i16m1x3(const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i16m1x3(rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei32_v_i16m2x3(const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i16m2x3(base, bindex, vl); +vint16m2x3_t test_vluxseg3ei32_v_i16m2x3(const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i16m2x3(rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3(const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i32mf2x3(base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3(const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i32mf2x3(rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei32_v_i32m1x3(const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i32m1x3(base, bindex, vl); +vint32m1x3_t test_vluxseg3ei32_v_i32m1x3(const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i32m1x3(rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei32_v_i32m2x3(const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i32m2x3(base, bindex, vl); +vint32m2x3_t test_vluxseg3ei32_v_i32m2x3(const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i32m2x3(rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei32_v_i64m1x3(const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i64m1x3(base, bindex, vl); +vint64m1x3_t test_vluxseg3ei32_v_i64m1x3(const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i64m1x3(rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei32_v_i64m2x3(const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i64m2x3(base, bindex, vl); +vint64m2x3_t test_vluxseg3ei32_v_i64m2x3(const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i64m2x3(rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u8mf8x3(base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3(const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u8mf8x3(rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3(const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u8mf4x3(base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3(const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u8mf4x3(rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3(const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u8mf2x3(base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3(const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u8mf2x3(rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3(const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u8m1x3(base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3(const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u8m1x3(rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3(const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u8m2x3(base, bindex, vl); +vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3(const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u8m2x3(rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u16mf4x3(base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3(const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u16mf4x3(rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3(const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u16mf2x3(base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3(const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u16mf2x3(rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3(const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u16m1x3(base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3(const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u16m1x3(rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3(const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u16m2x3(base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3(const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u16m2x3(rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u32mf2x3(base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3(const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u32mf2x3(rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3(const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u32m1x3(base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3(const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u32m1x3(rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3(const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u32m2x3(base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3(const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u32m2x3(rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u64m1x3(base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3(const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u64m1x3(rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3(const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u64m2x3(base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3(const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u64m2x3(rs1, rs2, vl); } -vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_m(vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f16mf4x3_m(mask, base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_m(vbool64_t vm, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f16mf4x3_m(vm, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_m(vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f16mf2x3_m(mask, base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_m(vbool32_t vm, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f16mf2x3_m(vm, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_m(vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f16m1x3_m(mask, base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_m(vbool16_t vm, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f16m1x3_m(vm, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_m(vbool8_t mask, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f16m2x3_m(mask, base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_m(vbool8_t vm, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f16m2x3_m(vm, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_m(vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f32mf2x3_m(mask, base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_m(vbool64_t vm, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f32mf2x3_m(vm, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_m(vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f32m1x3_m(mask, base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_m(vbool32_t vm, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f32m1x3_m(vm, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_m(vbool16_t mask, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f32m2x3_m(mask, base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_m(vbool16_t vm, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f32m2x3_m(vm, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_m(vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f64m1x3_m(mask, base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_m(vbool64_t vm, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f64m1x3_m(vm, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_m(vbool32_t mask, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f64m2x3_m(mask, base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_m(vbool32_t vm, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f64m2x3_m(vm, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i8mf8x3_m(mask, base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_m(vbool64_t vm, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i8mf8x3_m(vm, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i8mf4x3_m(mask, base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_m(vbool32_t vm, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i8mf4x3_m(vm, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i8mf2x3_m(mask, base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_m(vbool16_t vm, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i8mf2x3_m(vm, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i8m1x3_m(mask, base, bindex, vl); +vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_m(vbool8_t vm, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i8m1x3_m(vm, rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i8m2x3_m(mask, base, bindex, vl); +vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_m(vbool4_t vm, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i8m2x3_m(vm, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i16mf4x3_m(mask, base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_m(vbool64_t vm, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i16mf4x3_m(vm, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i16mf2x3_m(mask, base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_m(vbool32_t vm, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i16mf2x3_m(vm, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i16m1x3_m(mask, base, bindex, vl); +vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_m(vbool16_t vm, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i16m1x3_m(vm, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i16m2x3_m(mask, base, bindex, vl); +vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_m(vbool8_t vm, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i16m2x3_m(vm, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i32mf2x3_m(mask, base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_m(vbool64_t vm, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i32mf2x3_m(vm, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i32m1x3_m(mask, base, bindex, vl); +vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_m(vbool32_t vm, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i32m1x3_m(vm, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i32m2x3_m(mask, base, bindex, vl); +vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_m(vbool16_t vm, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i32m2x3_m(vm, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i64m1x3_m(mask, base, bindex, vl); +vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_m(vbool64_t vm, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i64m1x3_m(vm, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i64m2x3_m(mask, base, bindex, vl); +vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_m(vbool32_t vm, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i64m2x3_m(vm, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u8mf8x3_m(mask, base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_m(vbool64_t vm, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u8mf8x3_m(vm, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u8mf4x3_m(mask, base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_m(vbool32_t vm, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u8mf4x3_m(vm, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u8mf2x3_m(mask, base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_m(vbool16_t vm, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u8mf2x3_m(vm, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u8m1x3_m(mask, base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_m(vbool8_t vm, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u8m1x3_m(vm, rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u8m2x3_m(mask, base, bindex, vl); +vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_m(vbool4_t vm, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u8m2x3_m(vm, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u16mf4x3_m(mask, base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_m(vbool64_t vm, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u16mf4x3_m(vm, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u16mf2x3_m(mask, base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_m(vbool32_t vm, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u16mf2x3_m(vm, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u16m1x3_m(mask, base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_m(vbool16_t vm, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u16m1x3_m(vm, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u16m2x3_m(mask, base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_m(vbool8_t vm, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u16m2x3_m(vm, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u32mf2x3_m(mask, base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_m(vbool64_t vm, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u32mf2x3_m(vm, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u32m1x3_m(mask, base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_m(vbool32_t vm, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u32m1x3_m(vm, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u32m2x3_m(mask, base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_m(vbool16_t vm, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u32m2x3_m(vm, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u64m1x3_m(mask, base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_m(vbool64_t vm, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u64m1x3_m(vm, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u64m2x3_m(mask, base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_m(vbool32_t vm, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u64m2x3_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg3ei32\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-api-tests/vluxseg3ei64.c b/auto-generated/gnu-api-tests/vluxseg3ei64.c index ec0fa738d..abdc6daad 100644 --- a/auto-generated/gnu-api-tests/vluxseg3ei64.c +++ b/auto-generated/gnu-api-tests/vluxseg3ei64.c @@ -6,284 +6,284 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3(const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f16mf4x3(base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3(const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f16mf4x3(rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3(const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f16mf2x3(base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3(const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f16mf2x3(rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3(const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f16m1x3(base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3(const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f16m1x3(rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3(const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f16m2x3(base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3(const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f16m2x3(rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3(const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f32mf2x3(base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3(const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f32mf2x3(rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3(const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f32m1x3(base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3(const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f32m1x3(rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3(const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f32m2x3(base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3(const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f32m2x3(rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3(const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f64m1x3(base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3(const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f64m1x3(rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3(const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f64m2x3(base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3(const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f64m2x3(rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3(const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i8mf8x3(base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3(const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i8mf8x3(rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3(const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i8mf4x3(base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3(const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i8mf4x3(rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3(const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i8mf2x3(base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3(const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i8mf2x3(rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei64_v_i8m1x3(const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i8m1x3(base, bindex, vl); +vint8m1x3_t test_vluxseg3ei64_v_i8m1x3(const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i8m1x3(rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3(const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i16mf4x3(base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3(const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i16mf4x3(rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3(const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i16mf2x3(base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3(const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i16mf2x3(rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei64_v_i16m1x3(const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i16m1x3(base, bindex, vl); +vint16m1x3_t test_vluxseg3ei64_v_i16m1x3(const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i16m1x3(rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei64_v_i16m2x3(const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i16m2x3(base, bindex, vl); +vint16m2x3_t test_vluxseg3ei64_v_i16m2x3(const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i16m2x3(rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3(const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i32mf2x3(base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3(const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i32mf2x3(rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei64_v_i32m1x3(const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i32m1x3(base, bindex, vl); +vint32m1x3_t test_vluxseg3ei64_v_i32m1x3(const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i32m1x3(rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei64_v_i32m2x3(const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i32m2x3(base, bindex, vl); +vint32m2x3_t test_vluxseg3ei64_v_i32m2x3(const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i32m2x3(rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei64_v_i64m1x3(const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i64m1x3(base, bindex, vl); +vint64m1x3_t test_vluxseg3ei64_v_i64m1x3(const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i64m1x3(rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei64_v_i64m2x3(const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i64m2x3(base, bindex, vl); +vint64m2x3_t test_vluxseg3ei64_v_i64m2x3(const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i64m2x3(rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3(const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u8mf8x3(base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3(const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u8mf8x3(rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3(const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u8mf4x3(base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3(const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u8mf4x3(rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3(const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u8mf2x3(base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3(const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u8mf2x3(rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3(const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u8m1x3(base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3(const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u8m1x3(rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3(const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u16mf4x3(base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3(const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u16mf4x3(rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3(const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u16mf2x3(base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3(const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u16mf2x3(rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3(const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u16m1x3(base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3(const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u16m1x3(rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3(const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u16m2x3(base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3(const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u16m2x3(rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3(const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u32mf2x3(base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3(const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u32mf2x3(rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3(const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u32m1x3(base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3(const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u32m1x3(rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3(const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u32m2x3(base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3(const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u32m2x3(rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3(const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u64m1x3(base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3(const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u64m1x3(rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3(const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u64m2x3(base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3(const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u64m2x3(rs1, rs2, vl); } -vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_m(vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f16mf4x3_m(mask, base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_m(vbool64_t vm, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f16mf4x3_m(vm, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_m(vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f16mf2x3_m(mask, base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_m(vbool32_t vm, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f16mf2x3_m(vm, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_m(vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f16m1x3_m(mask, base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_m(vbool16_t vm, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f16m1x3_m(vm, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_m(vbool8_t mask, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f16m2x3_m(mask, base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_m(vbool8_t vm, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f16m2x3_m(vm, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_m(vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f32mf2x3_m(mask, base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_m(vbool64_t vm, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f32mf2x3_m(vm, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_m(vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f32m1x3_m(mask, base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_m(vbool32_t vm, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f32m1x3_m(vm, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_m(vbool16_t mask, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f32m2x3_m(mask, base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_m(vbool16_t vm, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f32m2x3_m(vm, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_m(vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f64m1x3_m(mask, base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_m(vbool64_t vm, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f64m1x3_m(vm, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_m(vbool32_t mask, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f64m2x3_m(mask, base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_m(vbool32_t vm, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f64m2x3_m(vm, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i8mf8x3_m(mask, base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_m(vbool64_t vm, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i8mf8x3_m(vm, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i8mf4x3_m(mask, base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_m(vbool32_t vm, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i8mf4x3_m(vm, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i8mf2x3_m(mask, base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_m(vbool16_t vm, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i8mf2x3_m(vm, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i8m1x3_m(mask, base, bindex, vl); +vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_m(vbool8_t vm, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i8m1x3_m(vm, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i16mf4x3_m(mask, base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_m(vbool64_t vm, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i16mf4x3_m(vm, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i16mf2x3_m(mask, base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_m(vbool32_t vm, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i16mf2x3_m(vm, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i16m1x3_m(mask, base, bindex, vl); +vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_m(vbool16_t vm, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i16m1x3_m(vm, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i16m2x3_m(mask, base, bindex, vl); +vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_m(vbool8_t vm, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i16m2x3_m(vm, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i32mf2x3_m(mask, base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_m(vbool64_t vm, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i32mf2x3_m(vm, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i32m1x3_m(mask, base, bindex, vl); +vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_m(vbool32_t vm, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i32m1x3_m(vm, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i32m2x3_m(mask, base, bindex, vl); +vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_m(vbool16_t vm, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i32m2x3_m(vm, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i64m1x3_m(mask, base, bindex, vl); +vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_m(vbool64_t vm, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i64m1x3_m(vm, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i64m2x3_m(mask, base, bindex, vl); +vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_m(vbool32_t vm, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i64m2x3_m(vm, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u8mf8x3_m(mask, base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_m(vbool64_t vm, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u8mf8x3_m(vm, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u8mf4x3_m(mask, base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_m(vbool32_t vm, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u8mf4x3_m(vm, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u8mf2x3_m(mask, base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_m(vbool16_t vm, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u8mf2x3_m(vm, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u8m1x3_m(mask, base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_m(vbool8_t vm, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u8m1x3_m(vm, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u16mf4x3_m(mask, base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_m(vbool64_t vm, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u16mf4x3_m(vm, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u16mf2x3_m(mask, base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_m(vbool32_t vm, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u16mf2x3_m(vm, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u16m1x3_m(mask, base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_m(vbool16_t vm, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u16m1x3_m(vm, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u16m2x3_m(mask, base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_m(vbool8_t vm, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u16m2x3_m(vm, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u32mf2x3_m(mask, base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_m(vbool64_t vm, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u32mf2x3_m(vm, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u32m1x3_m(mask, base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_m(vbool32_t vm, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u32m1x3_m(vm, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u32m2x3_m(mask, base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_m(vbool16_t vm, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u32m2x3_m(vm, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u64m1x3_m(mask, base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_m(vbool64_t vm, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u64m1x3_m(vm, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u64m2x3_m(mask, base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_m(vbool32_t vm, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u64m2x3_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg3ei64\.[ivxfswum.]+\s+} 70 } } */ diff --git a/auto-generated/gnu-api-tests/vluxseg3ei8.c b/auto-generated/gnu-api-tests/vluxseg3ei8.c index bea3e27f1..8eed8b763 100644 --- a/auto-generated/gnu-api-tests/vluxseg3ei8.c +++ b/auto-generated/gnu-api-tests/vluxseg3ei8.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3(const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f16mf4x3(base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3(const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f16mf4x3(rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3(const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f16mf2x3(base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3(const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f16mf2x3(rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3(const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f16m1x3(base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3(const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f16m1x3(rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3(const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f16m2x3(base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3(const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f16m2x3(rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3(const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f32mf2x3(base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3(const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f32mf2x3(rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3(const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f32m1x3(base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3(const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f32m1x3(rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3(const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f32m2x3(base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3(const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f32m2x3(rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3(const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f64m1x3(base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3(const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f64m1x3(rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3(const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f64m2x3(base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3(const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f64m2x3(rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3(const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i8mf8x3(base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3(const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i8mf8x3(rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3(const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i8mf4x3(base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3(const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i8mf4x3(rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3(const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i8mf2x3(base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3(const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i8mf2x3(rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei8_v_i8m1x3(const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i8m1x3(base, bindex, vl); +vint8m1x3_t test_vluxseg3ei8_v_i8m1x3(const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i8m1x3(rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei8_v_i8m2x3(const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i8m2x3(base, bindex, vl); +vint8m2x3_t test_vluxseg3ei8_v_i8m2x3(const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i8m2x3(rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3(const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i16mf4x3(base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3(const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i16mf4x3(rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3(const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i16mf2x3(base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3(const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i16mf2x3(rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei8_v_i16m1x3(const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i16m1x3(base, bindex, vl); +vint16m1x3_t test_vluxseg3ei8_v_i16m1x3(const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i16m1x3(rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei8_v_i16m2x3(const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i16m2x3(base, bindex, vl); +vint16m2x3_t test_vluxseg3ei8_v_i16m2x3(const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i16m2x3(rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3(const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i32mf2x3(base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3(const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i32mf2x3(rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei8_v_i32m1x3(const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i32m1x3(base, bindex, vl); +vint32m1x3_t test_vluxseg3ei8_v_i32m1x3(const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i32m1x3(rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei8_v_i32m2x3(const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i32m2x3(base, bindex, vl); +vint32m2x3_t test_vluxseg3ei8_v_i32m2x3(const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i32m2x3(rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei8_v_i64m1x3(const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i64m1x3(base, bindex, vl); +vint64m1x3_t test_vluxseg3ei8_v_i64m1x3(const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i64m1x3(rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei8_v_i64m2x3(const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i64m2x3(base, bindex, vl); +vint64m2x3_t test_vluxseg3ei8_v_i64m2x3(const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i64m2x3(rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u8mf8x3(base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3(const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u8mf8x3(rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u8mf4x3(base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3(const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u8mf4x3(rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u8mf2x3(base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3(const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u8mf2x3(rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3(const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u8m1x3(base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3(const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u8m1x3(rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3(const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u8m2x3(base, bindex, vl); +vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3(const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u8m2x3(rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u16mf4x3(base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3(const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u16mf4x3(rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u16mf2x3(base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3(const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u16mf2x3(rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u16m1x3(base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3(const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u16m1x3(rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3(const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u16m2x3(base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3(const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u16m2x3(rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u32mf2x3(base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3(const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u32mf2x3(rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u32m1x3(base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3(const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u32m1x3(rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u32m2x3(base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3(const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u32m2x3(rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u64m1x3(base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3(const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u64m1x3(rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u64m2x3(base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3(const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u64m2x3(rs1, rs2, vl); } -vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_m(vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f16mf4x3_m(mask, base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_m(vbool64_t vm, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f16mf4x3_m(vm, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_m(vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f16mf2x3_m(mask, base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_m(vbool32_t vm, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f16mf2x3_m(vm, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_m(vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f16m1x3_m(mask, base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_m(vbool16_t vm, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f16m1x3_m(vm, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_m(vbool8_t mask, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f16m2x3_m(mask, base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_m(vbool8_t vm, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f16m2x3_m(vm, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_m(vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f32mf2x3_m(mask, base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_m(vbool64_t vm, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f32mf2x3_m(vm, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_m(vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f32m1x3_m(mask, base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_m(vbool32_t vm, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f32m1x3_m(vm, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_m(vbool16_t mask, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f32m2x3_m(mask, base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_m(vbool16_t vm, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f32m2x3_m(vm, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_m(vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f64m1x3_m(mask, base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_m(vbool64_t vm, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f64m1x3_m(vm, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_m(vbool32_t mask, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f64m2x3_m(mask, base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_m(vbool32_t vm, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f64m2x3_m(vm, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i8mf8x3_m(mask, base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_m(vbool64_t vm, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i8mf8x3_m(vm, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i8mf4x3_m(mask, base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_m(vbool32_t vm, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i8mf4x3_m(vm, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i8mf2x3_m(mask, base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_m(vbool16_t vm, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i8mf2x3_m(vm, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i8m1x3_m(mask, base, bindex, vl); +vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_m(vbool8_t vm, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i8m1x3_m(vm, rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i8m2x3_m(mask, base, bindex, vl); +vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_m(vbool4_t vm, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i8m2x3_m(vm, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i16mf4x3_m(mask, base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_m(vbool64_t vm, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i16mf4x3_m(vm, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i16mf2x3_m(mask, base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_m(vbool32_t vm, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i16mf2x3_m(vm, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i16m1x3_m(mask, base, bindex, vl); +vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_m(vbool16_t vm, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i16m1x3_m(vm, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i16m2x3_m(mask, base, bindex, vl); +vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_m(vbool8_t vm, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i16m2x3_m(vm, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i32mf2x3_m(mask, base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_m(vbool64_t vm, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i32mf2x3_m(vm, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i32m1x3_m(mask, base, bindex, vl); +vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_m(vbool32_t vm, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i32m1x3_m(vm, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i32m2x3_m(mask, base, bindex, vl); +vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_m(vbool16_t vm, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i32m2x3_m(vm, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i64m1x3_m(mask, base, bindex, vl); +vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_m(vbool64_t vm, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i64m1x3_m(vm, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i64m2x3_m(mask, base, bindex, vl); +vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_m(vbool32_t vm, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i64m2x3_m(vm, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u8mf8x3_m(mask, base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_m(vbool64_t vm, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u8mf8x3_m(vm, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u8mf4x3_m(mask, base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_m(vbool32_t vm, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u8mf4x3_m(vm, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u8mf2x3_m(mask, base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_m(vbool16_t vm, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u8mf2x3_m(vm, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u8m1x3_m(mask, base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_m(vbool8_t vm, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u8m1x3_m(vm, rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u8m2x3_m(mask, base, bindex, vl); +vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_m(vbool4_t vm, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u8m2x3_m(vm, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u16mf4x3_m(mask, base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_m(vbool64_t vm, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u16mf4x3_m(vm, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u16mf2x3_m(mask, base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_m(vbool32_t vm, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u16mf2x3_m(vm, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u16m1x3_m(mask, base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_m(vbool16_t vm, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u16m1x3_m(vm, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u16m2x3_m(mask, base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_m(vbool8_t vm, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u16m2x3_m(vm, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u32mf2x3_m(mask, base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_m(vbool64_t vm, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u32mf2x3_m(vm, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u32m1x3_m(mask, base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_m(vbool32_t vm, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u32m1x3_m(vm, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u32m2x3_m(mask, base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_m(vbool16_t vm, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u32m2x3_m(vm, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u64m1x3_m(mask, base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_m(vbool64_t vm, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u64m1x3_m(vm, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u64m2x3_m(mask, base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_m(vbool32_t vm, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u64m2x3_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg3ei8\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-api-tests/vluxseg4ei16.c b/auto-generated/gnu-api-tests/vluxseg4ei16.c index 4bc0efaea..81a076177 100644 --- a/auto-generated/gnu-api-tests/vluxseg4ei16.c +++ b/auto-generated/gnu-api-tests/vluxseg4ei16.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4(const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f16mf4x4(base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4(const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f16mf4x4(rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4(const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f16mf2x4(base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4(const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f16mf2x4(rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4(const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f16m1x4(base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4(const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f16m1x4(rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4(const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f16m2x4(base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4(const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f16m2x4(rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4(const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f32mf2x4(base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4(const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f32mf2x4(rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4(const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f32m1x4(base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4(const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f32m1x4(rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4(const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f32m2x4(base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4(const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f32m2x4(rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4(const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f64m1x4(base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4(const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f64m1x4(rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4(const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f64m2x4(base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4(const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f64m2x4(rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4(const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i8mf8x4(base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4(const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i8mf8x4(rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4(const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i8mf4x4(base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4(const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i8mf4x4(rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4(const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i8mf2x4(base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4(const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i8mf2x4(rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei16_v_i8m1x4(const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i8m1x4(base, bindex, vl); +vint8m1x4_t test_vluxseg4ei16_v_i8m1x4(const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i8m1x4(rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei16_v_i8m2x4(const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i8m2x4(base, bindex, vl); +vint8m2x4_t test_vluxseg4ei16_v_i8m2x4(const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i8m2x4(rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4(const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i16mf4x4(base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4(const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i16mf4x4(rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4(const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i16mf2x4(base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4(const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i16mf2x4(rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei16_v_i16m1x4(const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i16m1x4(base, bindex, vl); +vint16m1x4_t test_vluxseg4ei16_v_i16m1x4(const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i16m1x4(rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei16_v_i16m2x4(const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i16m2x4(base, bindex, vl); +vint16m2x4_t test_vluxseg4ei16_v_i16m2x4(const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i16m2x4(rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4(const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i32mf2x4(base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4(const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i32mf2x4(rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei16_v_i32m1x4(const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i32m1x4(base, bindex, vl); +vint32m1x4_t test_vluxseg4ei16_v_i32m1x4(const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i32m1x4(rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei16_v_i32m2x4(const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i32m2x4(base, bindex, vl); +vint32m2x4_t test_vluxseg4ei16_v_i32m2x4(const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i32m2x4(rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei16_v_i64m1x4(const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i64m1x4(base, bindex, vl); +vint64m1x4_t test_vluxseg4ei16_v_i64m1x4(const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i64m1x4(rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei16_v_i64m2x4(const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i64m2x4(base, bindex, vl); +vint64m2x4_t test_vluxseg4ei16_v_i64m2x4(const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i64m2x4(rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u8mf8x4(base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4(const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u8mf8x4(rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u8mf4x4(base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4(const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u8mf4x4(rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4(const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u8mf2x4(base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4(const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u8mf2x4(rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4(const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u8m1x4(base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4(const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u8m1x4(rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4(const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u8m2x4(base, bindex, vl); +vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4(const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u8m2x4(rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u16mf4x4(base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4(const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u16mf4x4(rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u16mf2x4(base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4(const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u16mf2x4(rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4(const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u16m1x4(base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4(const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u16m1x4(rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4(const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u16m2x4(base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4(const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u16m2x4(rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u32mf2x4(base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4(const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u32mf2x4(rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u32m1x4(base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4(const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u32m1x4(rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4(const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u32m2x4(base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4(const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u32m2x4(rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u64m1x4(base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4(const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u64m1x4(rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u64m2x4(base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4(const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u64m2x4(rs1, rs2, vl); } -vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_m(vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f16mf4x4_m(mask, base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_m(vbool64_t vm, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f16mf4x4_m(vm, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_m(vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f16mf2x4_m(mask, base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_m(vbool32_t vm, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f16mf2x4_m(vm, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_m(vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f16m1x4_m(mask, base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_m(vbool16_t vm, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f16m1x4_m(vm, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_m(vbool8_t mask, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f16m2x4_m(mask, base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_m(vbool8_t vm, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f16m2x4_m(vm, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_m(vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f32mf2x4_m(mask, base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_m(vbool64_t vm, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f32mf2x4_m(vm, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_m(vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f32m1x4_m(mask, base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_m(vbool32_t vm, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f32m1x4_m(vm, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_m(vbool16_t mask, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f32m2x4_m(mask, base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_m(vbool16_t vm, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f32m2x4_m(vm, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_m(vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f64m1x4_m(mask, base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_m(vbool64_t vm, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f64m1x4_m(vm, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_m(vbool32_t mask, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f64m2x4_m(mask, base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_m(vbool32_t vm, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f64m2x4_m(vm, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i8mf8x4_m(mask, base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_m(vbool64_t vm, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i8mf8x4_m(vm, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i8mf4x4_m(mask, base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_m(vbool32_t vm, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i8mf4x4_m(vm, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i8mf2x4_m(mask, base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_m(vbool16_t vm, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i8mf2x4_m(vm, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i8m1x4_m(mask, base, bindex, vl); +vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_m(vbool8_t vm, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i8m1x4_m(vm, rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i8m2x4_m(mask, base, bindex, vl); +vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_m(vbool4_t vm, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i8m2x4_m(vm, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i16mf4x4_m(mask, base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_m(vbool64_t vm, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i16mf4x4_m(vm, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i16mf2x4_m(mask, base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_m(vbool32_t vm, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i16mf2x4_m(vm, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i16m1x4_m(mask, base, bindex, vl); +vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_m(vbool16_t vm, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i16m1x4_m(vm, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i16m2x4_m(mask, base, bindex, vl); +vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_m(vbool8_t vm, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i16m2x4_m(vm, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i32mf2x4_m(mask, base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_m(vbool64_t vm, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i32mf2x4_m(vm, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i32m1x4_m(mask, base, bindex, vl); +vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_m(vbool32_t vm, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i32m1x4_m(vm, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i32m2x4_m(mask, base, bindex, vl); +vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_m(vbool16_t vm, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i32m2x4_m(vm, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i64m1x4_m(mask, base, bindex, vl); +vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_m(vbool64_t vm, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i64m1x4_m(vm, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i64m2x4_m(mask, base, bindex, vl); +vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_m(vbool32_t vm, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i64m2x4_m(vm, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u8mf8x4_m(mask, base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_m(vbool64_t vm, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u8mf8x4_m(vm, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u8mf4x4_m(mask, base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_m(vbool32_t vm, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u8mf4x4_m(vm, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u8mf2x4_m(mask, base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_m(vbool16_t vm, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u8mf2x4_m(vm, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u8m1x4_m(mask, base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_m(vbool8_t vm, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u8m1x4_m(vm, rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u8m2x4_m(mask, base, bindex, vl); +vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_m(vbool4_t vm, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u8m2x4_m(vm, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u16mf4x4_m(mask, base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_m(vbool64_t vm, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u16mf4x4_m(vm, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u16mf2x4_m(mask, base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_m(vbool32_t vm, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u16mf2x4_m(vm, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u16m1x4_m(mask, base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_m(vbool16_t vm, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u16m1x4_m(vm, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u16m2x4_m(mask, base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_m(vbool8_t vm, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u16m2x4_m(vm, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u32mf2x4_m(mask, base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_m(vbool64_t vm, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u32mf2x4_m(vm, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u32m1x4_m(mask, base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_m(vbool32_t vm, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u32m1x4_m(vm, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u32m2x4_m(mask, base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_m(vbool16_t vm, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u32m2x4_m(vm, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u64m1x4_m(mask, base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_m(vbool64_t vm, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u64m1x4_m(vm, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u64m2x4_m(mask, base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_m(vbool32_t vm, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u64m2x4_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg4ei16\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-api-tests/vluxseg4ei32.c b/auto-generated/gnu-api-tests/vluxseg4ei32.c index c4403ef0e..bd5798c99 100644 --- a/auto-generated/gnu-api-tests/vluxseg4ei32.c +++ b/auto-generated/gnu-api-tests/vluxseg4ei32.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4(const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f16mf4x4(base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4(const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f16mf4x4(rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4(const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f16mf2x4(base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4(const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f16mf2x4(rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4(const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f16m1x4(base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4(const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f16m1x4(rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4(const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f16m2x4(base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4(const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f16m2x4(rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4(const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f32mf2x4(base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4(const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f32mf2x4(rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4(const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f32m1x4(base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4(const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f32m1x4(rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4(const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f32m2x4(base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4(const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f32m2x4(rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4(const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f64m1x4(base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4(const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f64m1x4(rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4(const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f64m2x4(base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4(const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f64m2x4(rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4(const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i8mf8x4(base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4(const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i8mf8x4(rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4(const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i8mf4x4(base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4(const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i8mf4x4(rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4(const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i8mf2x4(base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4(const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i8mf2x4(rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei32_v_i8m1x4(const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i8m1x4(base, bindex, vl); +vint8m1x4_t test_vluxseg4ei32_v_i8m1x4(const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i8m1x4(rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei32_v_i8m2x4(const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i8m2x4(base, bindex, vl); +vint8m2x4_t test_vluxseg4ei32_v_i8m2x4(const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i8m2x4(rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4(const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i16mf4x4(base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4(const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i16mf4x4(rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4(const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i16mf2x4(base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4(const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i16mf2x4(rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei32_v_i16m1x4(const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i16m1x4(base, bindex, vl); +vint16m1x4_t test_vluxseg4ei32_v_i16m1x4(const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i16m1x4(rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei32_v_i16m2x4(const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i16m2x4(base, bindex, vl); +vint16m2x4_t test_vluxseg4ei32_v_i16m2x4(const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i16m2x4(rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4(const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i32mf2x4(base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4(const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i32mf2x4(rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei32_v_i32m1x4(const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i32m1x4(base, bindex, vl); +vint32m1x4_t test_vluxseg4ei32_v_i32m1x4(const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i32m1x4(rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei32_v_i32m2x4(const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i32m2x4(base, bindex, vl); +vint32m2x4_t test_vluxseg4ei32_v_i32m2x4(const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i32m2x4(rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei32_v_i64m1x4(const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i64m1x4(base, bindex, vl); +vint64m1x4_t test_vluxseg4ei32_v_i64m1x4(const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i64m1x4(rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei32_v_i64m2x4(const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i64m2x4(base, bindex, vl); +vint64m2x4_t test_vluxseg4ei32_v_i64m2x4(const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i64m2x4(rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u8mf8x4(base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4(const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u8mf8x4(rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4(const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u8mf4x4(base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4(const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u8mf4x4(rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4(const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u8mf2x4(base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4(const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u8mf2x4(rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4(const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u8m1x4(base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4(const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u8m1x4(rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4(const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u8m2x4(base, bindex, vl); +vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4(const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u8m2x4(rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u16mf4x4(base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4(const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u16mf4x4(rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4(const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u16mf2x4(base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4(const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u16mf2x4(rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4(const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u16m1x4(base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4(const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u16m1x4(rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4(const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u16m2x4(base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4(const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u16m2x4(rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u32mf2x4(base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4(const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u32mf2x4(rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4(const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u32m1x4(base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4(const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u32m1x4(rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4(const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u32m2x4(base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4(const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u32m2x4(rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u64m1x4(base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4(const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u64m1x4(rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4(const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u64m2x4(base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4(const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u64m2x4(rs1, rs2, vl); } -vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_m(vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f16mf4x4_m(mask, base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_m(vbool64_t vm, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f16mf4x4_m(vm, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_m(vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f16mf2x4_m(mask, base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_m(vbool32_t vm, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f16mf2x4_m(vm, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_m(vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f16m1x4_m(mask, base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_m(vbool16_t vm, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f16m1x4_m(vm, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_m(vbool8_t mask, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f16m2x4_m(mask, base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_m(vbool8_t vm, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f16m2x4_m(vm, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_m(vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f32mf2x4_m(mask, base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_m(vbool64_t vm, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f32mf2x4_m(vm, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_m(vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f32m1x4_m(mask, base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_m(vbool32_t vm, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f32m1x4_m(vm, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_m(vbool16_t mask, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f32m2x4_m(mask, base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_m(vbool16_t vm, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f32m2x4_m(vm, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_m(vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f64m1x4_m(mask, base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_m(vbool64_t vm, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f64m1x4_m(vm, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_m(vbool32_t mask, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f64m2x4_m(mask, base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_m(vbool32_t vm, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f64m2x4_m(vm, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i8mf8x4_m(mask, base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_m(vbool64_t vm, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i8mf8x4_m(vm, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i8mf4x4_m(mask, base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_m(vbool32_t vm, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i8mf4x4_m(vm, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i8mf2x4_m(mask, base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_m(vbool16_t vm, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i8mf2x4_m(vm, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i8m1x4_m(mask, base, bindex, vl); +vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_m(vbool8_t vm, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i8m1x4_m(vm, rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i8m2x4_m(mask, base, bindex, vl); +vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_m(vbool4_t vm, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i8m2x4_m(vm, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i16mf4x4_m(mask, base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_m(vbool64_t vm, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i16mf4x4_m(vm, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i16mf2x4_m(mask, base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_m(vbool32_t vm, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i16mf2x4_m(vm, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i16m1x4_m(mask, base, bindex, vl); +vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_m(vbool16_t vm, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i16m1x4_m(vm, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i16m2x4_m(mask, base, bindex, vl); +vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_m(vbool8_t vm, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i16m2x4_m(vm, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i32mf2x4_m(mask, base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_m(vbool64_t vm, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i32mf2x4_m(vm, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i32m1x4_m(mask, base, bindex, vl); +vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_m(vbool32_t vm, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i32m1x4_m(vm, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i32m2x4_m(mask, base, bindex, vl); +vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_m(vbool16_t vm, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i32m2x4_m(vm, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i64m1x4_m(mask, base, bindex, vl); +vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_m(vbool64_t vm, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i64m1x4_m(vm, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i64m2x4_m(mask, base, bindex, vl); +vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_m(vbool32_t vm, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i64m2x4_m(vm, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u8mf8x4_m(mask, base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_m(vbool64_t vm, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u8mf8x4_m(vm, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u8mf4x4_m(mask, base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_m(vbool32_t vm, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u8mf4x4_m(vm, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u8mf2x4_m(mask, base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_m(vbool16_t vm, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u8mf2x4_m(vm, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u8m1x4_m(mask, base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_m(vbool8_t vm, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u8m1x4_m(vm, rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u8m2x4_m(mask, base, bindex, vl); +vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_m(vbool4_t vm, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u8m2x4_m(vm, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u16mf4x4_m(mask, base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_m(vbool64_t vm, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u16mf4x4_m(vm, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u16mf2x4_m(mask, base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_m(vbool32_t vm, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u16mf2x4_m(vm, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u16m1x4_m(mask, base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_m(vbool16_t vm, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u16m1x4_m(vm, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u16m2x4_m(mask, base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_m(vbool8_t vm, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u16m2x4_m(vm, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u32mf2x4_m(mask, base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_m(vbool64_t vm, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u32mf2x4_m(vm, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u32m1x4_m(mask, base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_m(vbool32_t vm, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u32m1x4_m(vm, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u32m2x4_m(mask, base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_m(vbool16_t vm, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u32m2x4_m(vm, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u64m1x4_m(mask, base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_m(vbool64_t vm, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u64m1x4_m(vm, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u64m2x4_m(mask, base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_m(vbool32_t vm, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u64m2x4_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg4ei32\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-api-tests/vluxseg4ei64.c b/auto-generated/gnu-api-tests/vluxseg4ei64.c index 8c2a7e9e1..bb098d683 100644 --- a/auto-generated/gnu-api-tests/vluxseg4ei64.c +++ b/auto-generated/gnu-api-tests/vluxseg4ei64.c @@ -6,284 +6,284 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4(const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f16mf4x4(base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4(const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f16mf4x4(rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4(const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f16mf2x4(base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4(const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f16mf2x4(rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4(const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f16m1x4(base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4(const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f16m1x4(rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4(const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f16m2x4(base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4(const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f16m2x4(rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4(const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f32mf2x4(base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4(const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f32mf2x4(rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4(const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f32m1x4(base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4(const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f32m1x4(rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4(const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f32m2x4(base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4(const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f32m2x4(rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4(const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f64m1x4(base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4(const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f64m1x4(rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4(const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f64m2x4(base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4(const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f64m2x4(rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4(const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i8mf8x4(base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4(const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i8mf8x4(rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4(const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i8mf4x4(base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4(const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i8mf4x4(rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4(const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i8mf2x4(base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4(const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i8mf2x4(rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei64_v_i8m1x4(const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i8m1x4(base, bindex, vl); +vint8m1x4_t test_vluxseg4ei64_v_i8m1x4(const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i8m1x4(rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4(const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i16mf4x4(base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4(const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i16mf4x4(rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4(const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i16mf2x4(base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4(const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i16mf2x4(rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei64_v_i16m1x4(const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i16m1x4(base, bindex, vl); +vint16m1x4_t test_vluxseg4ei64_v_i16m1x4(const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i16m1x4(rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei64_v_i16m2x4(const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i16m2x4(base, bindex, vl); +vint16m2x4_t test_vluxseg4ei64_v_i16m2x4(const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i16m2x4(rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4(const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i32mf2x4(base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4(const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i32mf2x4(rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei64_v_i32m1x4(const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i32m1x4(base, bindex, vl); +vint32m1x4_t test_vluxseg4ei64_v_i32m1x4(const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i32m1x4(rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei64_v_i32m2x4(const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i32m2x4(base, bindex, vl); +vint32m2x4_t test_vluxseg4ei64_v_i32m2x4(const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i32m2x4(rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei64_v_i64m1x4(const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i64m1x4(base, bindex, vl); +vint64m1x4_t test_vluxseg4ei64_v_i64m1x4(const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i64m1x4(rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei64_v_i64m2x4(const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i64m2x4(base, bindex, vl); +vint64m2x4_t test_vluxseg4ei64_v_i64m2x4(const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i64m2x4(rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4(const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u8mf8x4(base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4(const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u8mf8x4(rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4(const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u8mf4x4(base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4(const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u8mf4x4(rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4(const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u8mf2x4(base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4(const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u8mf2x4(rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4(const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u8m1x4(base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4(const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u8m1x4(rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4(const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u16mf4x4(base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4(const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u16mf4x4(rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4(const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u16mf2x4(base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4(const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u16mf2x4(rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4(const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u16m1x4(base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4(const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u16m1x4(rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4(const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u16m2x4(base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4(const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u16m2x4(rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4(const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u32mf2x4(base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4(const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u32mf2x4(rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4(const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u32m1x4(base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4(const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u32m1x4(rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4(const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u32m2x4(base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4(const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u32m2x4(rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4(const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u64m1x4(base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4(const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u64m1x4(rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4(const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u64m2x4(base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4(const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u64m2x4(rs1, rs2, vl); } -vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_m(vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f16mf4x4_m(mask, base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_m(vbool64_t vm, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f16mf4x4_m(vm, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_m(vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f16mf2x4_m(mask, base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_m(vbool32_t vm, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f16mf2x4_m(vm, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_m(vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f16m1x4_m(mask, base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_m(vbool16_t vm, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f16m1x4_m(vm, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_m(vbool8_t mask, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f16m2x4_m(mask, base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_m(vbool8_t vm, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f16m2x4_m(vm, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_m(vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f32mf2x4_m(mask, base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_m(vbool64_t vm, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f32mf2x4_m(vm, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_m(vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f32m1x4_m(mask, base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_m(vbool32_t vm, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f32m1x4_m(vm, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_m(vbool16_t mask, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f32m2x4_m(mask, base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_m(vbool16_t vm, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f32m2x4_m(vm, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_m(vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f64m1x4_m(mask, base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_m(vbool64_t vm, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f64m1x4_m(vm, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_m(vbool32_t mask, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f64m2x4_m(mask, base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_m(vbool32_t vm, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f64m2x4_m(vm, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i8mf8x4_m(mask, base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_m(vbool64_t vm, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i8mf8x4_m(vm, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i8mf4x4_m(mask, base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_m(vbool32_t vm, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i8mf4x4_m(vm, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i8mf2x4_m(mask, base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_m(vbool16_t vm, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i8mf2x4_m(vm, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i8m1x4_m(mask, base, bindex, vl); +vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_m(vbool8_t vm, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i8m1x4_m(vm, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i16mf4x4_m(mask, base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_m(vbool64_t vm, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i16mf4x4_m(vm, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i16mf2x4_m(mask, base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_m(vbool32_t vm, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i16mf2x4_m(vm, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i16m1x4_m(mask, base, bindex, vl); +vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_m(vbool16_t vm, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i16m1x4_m(vm, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i16m2x4_m(mask, base, bindex, vl); +vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_m(vbool8_t vm, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i16m2x4_m(vm, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i32mf2x4_m(mask, base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_m(vbool64_t vm, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i32mf2x4_m(vm, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i32m1x4_m(mask, base, bindex, vl); +vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_m(vbool32_t vm, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i32m1x4_m(vm, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i32m2x4_m(mask, base, bindex, vl); +vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_m(vbool16_t vm, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i32m2x4_m(vm, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i64m1x4_m(mask, base, bindex, vl); +vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_m(vbool64_t vm, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i64m1x4_m(vm, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i64m2x4_m(mask, base, bindex, vl); +vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_m(vbool32_t vm, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i64m2x4_m(vm, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u8mf8x4_m(mask, base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_m(vbool64_t vm, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u8mf8x4_m(vm, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u8mf4x4_m(mask, base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_m(vbool32_t vm, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u8mf4x4_m(vm, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u8mf2x4_m(mask, base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_m(vbool16_t vm, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u8mf2x4_m(vm, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u8m1x4_m(mask, base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_m(vbool8_t vm, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u8m1x4_m(vm, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u16mf4x4_m(mask, base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_m(vbool64_t vm, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u16mf4x4_m(vm, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u16mf2x4_m(mask, base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_m(vbool32_t vm, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u16mf2x4_m(vm, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u16m1x4_m(mask, base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_m(vbool16_t vm, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u16m1x4_m(vm, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u16m2x4_m(mask, base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_m(vbool8_t vm, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u16m2x4_m(vm, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u32mf2x4_m(mask, base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_m(vbool64_t vm, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u32mf2x4_m(vm, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u32m1x4_m(mask, base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_m(vbool32_t vm, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u32m1x4_m(vm, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u32m2x4_m(mask, base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_m(vbool16_t vm, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u32m2x4_m(vm, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u64m1x4_m(mask, base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_m(vbool64_t vm, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u64m1x4_m(vm, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u64m2x4_m(mask, base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_m(vbool32_t vm, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u64m2x4_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg4ei64\.[ivxfswum.]+\s+} 70 } } */ diff --git a/auto-generated/gnu-api-tests/vluxseg4ei8.c b/auto-generated/gnu-api-tests/vluxseg4ei8.c index 718422034..9c06c4bc4 100644 --- a/auto-generated/gnu-api-tests/vluxseg4ei8.c +++ b/auto-generated/gnu-api-tests/vluxseg4ei8.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4(const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f16mf4x4(base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4(const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f16mf4x4(rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4(const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f16mf2x4(base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4(const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f16mf2x4(rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4(const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f16m1x4(base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4(const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f16m1x4(rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4(const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f16m2x4(base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4(const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f16m2x4(rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4(const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f32mf2x4(base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4(const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f32mf2x4(rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4(const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f32m1x4(base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4(const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f32m1x4(rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4(const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f32m2x4(base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4(const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f32m2x4(rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4(const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f64m1x4(base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4(const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f64m1x4(rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4(const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f64m2x4(base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4(const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f64m2x4(rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4(const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i8mf8x4(base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4(const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i8mf8x4(rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4(const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i8mf4x4(base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4(const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i8mf4x4(rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4(const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i8mf2x4(base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4(const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i8mf2x4(rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei8_v_i8m1x4(const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i8m1x4(base, bindex, vl); +vint8m1x4_t test_vluxseg4ei8_v_i8m1x4(const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i8m1x4(rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei8_v_i8m2x4(const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i8m2x4(base, bindex, vl); +vint8m2x4_t test_vluxseg4ei8_v_i8m2x4(const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i8m2x4(rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4(const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i16mf4x4(base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4(const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i16mf4x4(rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4(const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i16mf2x4(base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4(const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i16mf2x4(rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei8_v_i16m1x4(const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i16m1x4(base, bindex, vl); +vint16m1x4_t test_vluxseg4ei8_v_i16m1x4(const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i16m1x4(rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei8_v_i16m2x4(const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i16m2x4(base, bindex, vl); +vint16m2x4_t test_vluxseg4ei8_v_i16m2x4(const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i16m2x4(rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4(const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i32mf2x4(base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4(const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i32mf2x4(rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei8_v_i32m1x4(const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i32m1x4(base, bindex, vl); +vint32m1x4_t test_vluxseg4ei8_v_i32m1x4(const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i32m1x4(rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei8_v_i32m2x4(const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i32m2x4(base, bindex, vl); +vint32m2x4_t test_vluxseg4ei8_v_i32m2x4(const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i32m2x4(rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei8_v_i64m1x4(const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i64m1x4(base, bindex, vl); +vint64m1x4_t test_vluxseg4ei8_v_i64m1x4(const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i64m1x4(rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei8_v_i64m2x4(const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i64m2x4(base, bindex, vl); +vint64m2x4_t test_vluxseg4ei8_v_i64m2x4(const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i64m2x4(rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u8mf8x4(base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4(const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u8mf8x4(rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u8mf4x4(base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4(const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u8mf4x4(rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u8mf2x4(base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4(const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u8mf2x4(rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4(const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u8m1x4(base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4(const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u8m1x4(rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4(const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u8m2x4(base, bindex, vl); +vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4(const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u8m2x4(rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u16mf4x4(base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4(const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u16mf4x4(rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u16mf2x4(base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4(const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u16mf2x4(rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u16m1x4(base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4(const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u16m1x4(rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4(const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u16m2x4(base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4(const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u16m2x4(rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u32mf2x4(base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4(const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u32mf2x4(rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u32m1x4(base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4(const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u32m1x4(rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u32m2x4(base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4(const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u32m2x4(rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u64m1x4(base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4(const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u64m1x4(rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u64m2x4(base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4(const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u64m2x4(rs1, rs2, vl); } -vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_m(vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f16mf4x4_m(mask, base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_m(vbool64_t vm, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f16mf4x4_m(vm, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_m(vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f16mf2x4_m(mask, base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_m(vbool32_t vm, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f16mf2x4_m(vm, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_m(vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f16m1x4_m(mask, base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_m(vbool16_t vm, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f16m1x4_m(vm, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_m(vbool8_t mask, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f16m2x4_m(mask, base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_m(vbool8_t vm, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f16m2x4_m(vm, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_m(vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f32mf2x4_m(mask, base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_m(vbool64_t vm, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f32mf2x4_m(vm, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_m(vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f32m1x4_m(mask, base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_m(vbool32_t vm, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f32m1x4_m(vm, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_m(vbool16_t mask, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f32m2x4_m(mask, base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_m(vbool16_t vm, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f32m2x4_m(vm, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_m(vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f64m1x4_m(mask, base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_m(vbool64_t vm, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f64m1x4_m(vm, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_m(vbool32_t mask, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f64m2x4_m(mask, base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_m(vbool32_t vm, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f64m2x4_m(vm, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i8mf8x4_m(mask, base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_m(vbool64_t vm, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i8mf8x4_m(vm, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i8mf4x4_m(mask, base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_m(vbool32_t vm, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i8mf4x4_m(vm, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i8mf2x4_m(mask, base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_m(vbool16_t vm, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i8mf2x4_m(vm, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i8m1x4_m(mask, base, bindex, vl); +vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_m(vbool8_t vm, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i8m1x4_m(vm, rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i8m2x4_m(mask, base, bindex, vl); +vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_m(vbool4_t vm, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i8m2x4_m(vm, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i16mf4x4_m(mask, base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_m(vbool64_t vm, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i16mf4x4_m(vm, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i16mf2x4_m(mask, base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_m(vbool32_t vm, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i16mf2x4_m(vm, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i16m1x4_m(mask, base, bindex, vl); +vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_m(vbool16_t vm, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i16m1x4_m(vm, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i16m2x4_m(mask, base, bindex, vl); +vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_m(vbool8_t vm, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i16m2x4_m(vm, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i32mf2x4_m(mask, base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_m(vbool64_t vm, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i32mf2x4_m(vm, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i32m1x4_m(mask, base, bindex, vl); +vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_m(vbool32_t vm, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i32m1x4_m(vm, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i32m2x4_m(mask, base, bindex, vl); +vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_m(vbool16_t vm, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i32m2x4_m(vm, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i64m1x4_m(mask, base, bindex, vl); +vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_m(vbool64_t vm, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i64m1x4_m(vm, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i64m2x4_m(mask, base, bindex, vl); +vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_m(vbool32_t vm, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i64m2x4_m(vm, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u8mf8x4_m(mask, base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_m(vbool64_t vm, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u8mf8x4_m(vm, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u8mf4x4_m(mask, base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_m(vbool32_t vm, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u8mf4x4_m(vm, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u8mf2x4_m(mask, base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_m(vbool16_t vm, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u8mf2x4_m(vm, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u8m1x4_m(mask, base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_m(vbool8_t vm, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u8m1x4_m(vm, rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u8m2x4_m(mask, base, bindex, vl); +vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_m(vbool4_t vm, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u8m2x4_m(vm, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u16mf4x4_m(mask, base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_m(vbool64_t vm, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u16mf4x4_m(vm, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u16mf2x4_m(mask, base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_m(vbool32_t vm, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u16mf2x4_m(vm, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u16m1x4_m(mask, base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_m(vbool16_t vm, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u16m1x4_m(vm, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u16m2x4_m(mask, base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_m(vbool8_t vm, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u16m2x4_m(vm, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u32mf2x4_m(mask, base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_m(vbool64_t vm, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u32mf2x4_m(vm, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u32m1x4_m(mask, base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_m(vbool32_t vm, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u32m1x4_m(vm, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u32m2x4_m(mask, base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_m(vbool16_t vm, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u32m2x4_m(vm, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u64m1x4_m(mask, base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_m(vbool64_t vm, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u64m1x4_m(vm, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u64m2x4_m(mask, base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_m(vbool32_t vm, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u64m2x4_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg4ei8\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-api-tests/vluxseg5ei16.c b/auto-generated/gnu-api-tests/vluxseg5ei16.c index 25b483048..bc210a028 100644 --- a/auto-generated/gnu-api-tests/vluxseg5ei16.c +++ b/auto-generated/gnu-api-tests/vluxseg5ei16.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5(const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_f16mf4x5(base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5(const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_f16mf4x5(rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5(const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_f16mf2x5(base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5(const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_f16mf2x5(rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5(const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_f16m1x5(base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5(const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_f16m1x5(rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5(const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_f32mf2x5(base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5(const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_f32mf2x5(rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5(const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_f32m1x5(base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5(const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_f32m1x5(rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5(const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_f64m1x5(base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5(const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_f64m1x5(rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5(const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i8mf8x5(base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5(const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i8mf8x5(rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5(const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i8mf4x5(base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5(const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i8mf4x5(rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5(const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i8mf2x5(base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5(const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i8mf2x5(rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei16_v_i8m1x5(const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i8m1x5(base, bindex, vl); +vint8m1x5_t test_vluxseg5ei16_v_i8m1x5(const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i8m1x5(rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5(const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i16mf4x5(base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5(const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i16mf4x5(rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5(const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i16mf2x5(base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5(const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i16mf2x5(rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei16_v_i16m1x5(const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i16m1x5(base, bindex, vl); +vint16m1x5_t test_vluxseg5ei16_v_i16m1x5(const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i16m1x5(rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5(const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i32mf2x5(base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5(const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i32mf2x5(rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei16_v_i32m1x5(const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i32m1x5(base, bindex, vl); +vint32m1x5_t test_vluxseg5ei16_v_i32m1x5(const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i32m1x5(rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei16_v_i64m1x5(const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i64m1x5(base, bindex, vl); +vint64m1x5_t test_vluxseg5ei16_v_i64m1x5(const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i64m1x5(rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u8mf8x5(base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5(const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u8mf8x5(rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u8mf4x5(base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5(const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u8mf4x5(rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5(const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u8mf2x5(base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5(const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u8mf2x5(rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5(const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u8m1x5(base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5(const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u8m1x5(rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u16mf4x5(base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5(const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u16mf4x5(rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u16mf2x5(base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5(const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u16mf2x5(rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5(const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u16m1x5(base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5(const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u16m1x5(rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u32mf2x5(base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5(const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u32mf2x5(rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u32m1x5(base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5(const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u32m1x5(rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u64m1x5(base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5(const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u64m1x5(rs1, rs2, vl); } -vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_m(vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_f16mf4x5_m(mask, base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_m(vbool64_t vm, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_f16mf4x5_m(vm, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_m(vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_f16mf2x5_m(mask, base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_m(vbool32_t vm, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_f16mf2x5_m(vm, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_m(vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_f16m1x5_m(mask, base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_m(vbool16_t vm, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_f16m1x5_m(vm, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_m(vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_f32mf2x5_m(mask, base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_m(vbool64_t vm, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_f32mf2x5_m(vm, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_m(vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_f32m1x5_m(mask, base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_m(vbool32_t vm, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_f32m1x5_m(vm, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_m(vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_f64m1x5_m(mask, base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_m(vbool64_t vm, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_f64m1x5_m(vm, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i8mf8x5_m(mask, base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_m(vbool64_t vm, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i8mf8x5_m(vm, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i8mf4x5_m(mask, base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_m(vbool32_t vm, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i8mf4x5_m(vm, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i8mf2x5_m(mask, base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_m(vbool16_t vm, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i8mf2x5_m(vm, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i8m1x5_m(mask, base, bindex, vl); +vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_m(vbool8_t vm, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i8m1x5_m(vm, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i16mf4x5_m(mask, base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_m(vbool64_t vm, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i16mf4x5_m(vm, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i16mf2x5_m(mask, base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_m(vbool32_t vm, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i16mf2x5_m(vm, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i16m1x5_m(mask, base, bindex, vl); +vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_m(vbool16_t vm, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i16m1x5_m(vm, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i32mf2x5_m(mask, base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_m(vbool64_t vm, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i32mf2x5_m(vm, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i32m1x5_m(mask, base, bindex, vl); +vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_m(vbool32_t vm, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i32m1x5_m(vm, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i64m1x5_m(mask, base, bindex, vl); +vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_m(vbool64_t vm, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i64m1x5_m(vm, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u8mf8x5_m(mask, base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_m(vbool64_t vm, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u8mf8x5_m(vm, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u8mf4x5_m(mask, base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_m(vbool32_t vm, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u8mf4x5_m(vm, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u8mf2x5_m(mask, base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_m(vbool16_t vm, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u8mf2x5_m(vm, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u8m1x5_m(mask, base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_m(vbool8_t vm, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u8m1x5_m(vm, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u16mf4x5_m(mask, base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_m(vbool64_t vm, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u16mf4x5_m(vm, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u16mf2x5_m(mask, base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_m(vbool32_t vm, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u16mf2x5_m(vm, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u16m1x5_m(mask, base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_m(vbool16_t vm, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u16m1x5_m(vm, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u32mf2x5_m(mask, base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_m(vbool64_t vm, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u32mf2x5_m(vm, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u32m1x5_m(mask, base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_m(vbool32_t vm, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u32m1x5_m(vm, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u64m1x5_m(mask, base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_m(vbool64_t vm, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u64m1x5_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg5ei16\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vluxseg5ei32.c b/auto-generated/gnu-api-tests/vluxseg5ei32.c index 191c9e25b..5f92e7cb1 100644 --- a/auto-generated/gnu-api-tests/vluxseg5ei32.c +++ b/auto-generated/gnu-api-tests/vluxseg5ei32.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5(const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_f16mf4x5(base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5(const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_f16mf4x5(rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5(const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_f16mf2x5(base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5(const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_f16mf2x5(rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5(const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_f16m1x5(base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5(const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_f16m1x5(rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5(const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_f32mf2x5(base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5(const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_f32mf2x5(rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5(const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_f32m1x5(base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5(const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_f32m1x5(rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5(const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_f64m1x5(base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5(const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_f64m1x5(rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5(const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i8mf8x5(base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5(const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i8mf8x5(rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5(const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i8mf4x5(base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5(const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i8mf4x5(rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5(const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i8mf2x5(base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5(const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i8mf2x5(rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei32_v_i8m1x5(const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i8m1x5(base, bindex, vl); +vint8m1x5_t test_vluxseg5ei32_v_i8m1x5(const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i8m1x5(rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5(const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i16mf4x5(base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5(const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i16mf4x5(rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5(const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i16mf2x5(base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5(const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i16mf2x5(rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei32_v_i16m1x5(const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i16m1x5(base, bindex, vl); +vint16m1x5_t test_vluxseg5ei32_v_i16m1x5(const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i16m1x5(rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5(const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i32mf2x5(base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5(const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i32mf2x5(rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei32_v_i32m1x5(const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i32m1x5(base, bindex, vl); +vint32m1x5_t test_vluxseg5ei32_v_i32m1x5(const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i32m1x5(rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei32_v_i64m1x5(const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i64m1x5(base, bindex, vl); +vint64m1x5_t test_vluxseg5ei32_v_i64m1x5(const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i64m1x5(rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u8mf8x5(base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5(const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u8mf8x5(rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5(const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u8mf4x5(base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5(const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u8mf4x5(rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5(const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u8mf2x5(base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5(const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u8mf2x5(rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5(const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u8m1x5(base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5(const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u8m1x5(rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u16mf4x5(base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5(const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u16mf4x5(rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5(const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u16mf2x5(base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5(const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u16mf2x5(rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5(const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u16m1x5(base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5(const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u16m1x5(rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u32mf2x5(base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5(const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u32mf2x5(rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5(const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u32m1x5(base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5(const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u32m1x5(rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u64m1x5(base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5(const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u64m1x5(rs1, rs2, vl); } -vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_m(vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_f16mf4x5_m(mask, base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_m(vbool64_t vm, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_f16mf4x5_m(vm, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_m(vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_f16mf2x5_m(mask, base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_m(vbool32_t vm, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_f16mf2x5_m(vm, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_m(vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_f16m1x5_m(mask, base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_m(vbool16_t vm, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_f16m1x5_m(vm, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_m(vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_f32mf2x5_m(mask, base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_m(vbool64_t vm, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_f32mf2x5_m(vm, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_m(vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_f32m1x5_m(mask, base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_m(vbool32_t vm, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_f32m1x5_m(vm, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_m(vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_f64m1x5_m(mask, base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_m(vbool64_t vm, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_f64m1x5_m(vm, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i8mf8x5_m(mask, base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_m(vbool64_t vm, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i8mf8x5_m(vm, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i8mf4x5_m(mask, base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_m(vbool32_t vm, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i8mf4x5_m(vm, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i8mf2x5_m(mask, base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_m(vbool16_t vm, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i8mf2x5_m(vm, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i8m1x5_m(mask, base, bindex, vl); +vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_m(vbool8_t vm, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i8m1x5_m(vm, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i16mf4x5_m(mask, base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_m(vbool64_t vm, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i16mf4x5_m(vm, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i16mf2x5_m(mask, base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_m(vbool32_t vm, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i16mf2x5_m(vm, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i16m1x5_m(mask, base, bindex, vl); +vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_m(vbool16_t vm, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i16m1x5_m(vm, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i32mf2x5_m(mask, base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_m(vbool64_t vm, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i32mf2x5_m(vm, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i32m1x5_m(mask, base, bindex, vl); +vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_m(vbool32_t vm, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i32m1x5_m(vm, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i64m1x5_m(mask, base, bindex, vl); +vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_m(vbool64_t vm, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i64m1x5_m(vm, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u8mf8x5_m(mask, base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_m(vbool64_t vm, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u8mf8x5_m(vm, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u8mf4x5_m(mask, base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_m(vbool32_t vm, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u8mf4x5_m(vm, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u8mf2x5_m(mask, base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_m(vbool16_t vm, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u8mf2x5_m(vm, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u8m1x5_m(mask, base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_m(vbool8_t vm, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u8m1x5_m(vm, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u16mf4x5_m(mask, base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_m(vbool64_t vm, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u16mf4x5_m(vm, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u16mf2x5_m(mask, base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_m(vbool32_t vm, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u16mf2x5_m(vm, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u16m1x5_m(mask, base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_m(vbool16_t vm, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u16m1x5_m(vm, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u32mf2x5_m(mask, base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_m(vbool64_t vm, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u32mf2x5_m(vm, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u32m1x5_m(mask, base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_m(vbool32_t vm, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u32m1x5_m(vm, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u64m1x5_m(mask, base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_m(vbool64_t vm, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u64m1x5_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg5ei32\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vluxseg5ei64.c b/auto-generated/gnu-api-tests/vluxseg5ei64.c index d1c36a709..c39d6fcad 100644 --- a/auto-generated/gnu-api-tests/vluxseg5ei64.c +++ b/auto-generated/gnu-api-tests/vluxseg5ei64.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5(const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_f16mf4x5(base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5(const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_f16mf4x5(rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5(const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_f16mf2x5(base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5(const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_f16mf2x5(rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5(const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_f16m1x5(base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5(const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_f16m1x5(rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5(const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_f32mf2x5(base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5(const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_f32mf2x5(rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5(const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_f32m1x5(base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5(const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_f32m1x5(rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5(const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_f64m1x5(base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5(const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_f64m1x5(rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5(const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i8mf8x5(base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5(const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i8mf8x5(rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5(const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i8mf4x5(base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5(const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i8mf4x5(rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5(const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i8mf2x5(base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5(const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i8mf2x5(rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei64_v_i8m1x5(const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i8m1x5(base, bindex, vl); +vint8m1x5_t test_vluxseg5ei64_v_i8m1x5(const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i8m1x5(rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5(const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i16mf4x5(base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5(const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i16mf4x5(rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5(const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i16mf2x5(base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5(const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i16mf2x5(rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei64_v_i16m1x5(const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i16m1x5(base, bindex, vl); +vint16m1x5_t test_vluxseg5ei64_v_i16m1x5(const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i16m1x5(rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5(const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i32mf2x5(base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5(const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i32mf2x5(rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei64_v_i32m1x5(const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i32m1x5(base, bindex, vl); +vint32m1x5_t test_vluxseg5ei64_v_i32m1x5(const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i32m1x5(rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei64_v_i64m1x5(const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i64m1x5(base, bindex, vl); +vint64m1x5_t test_vluxseg5ei64_v_i64m1x5(const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i64m1x5(rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5(const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u8mf8x5(base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5(const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u8mf8x5(rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5(const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u8mf4x5(base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5(const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u8mf4x5(rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5(const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u8mf2x5(base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5(const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u8mf2x5(rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5(const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u8m1x5(base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5(const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u8m1x5(rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5(const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u16mf4x5(base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5(const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u16mf4x5(rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5(const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u16mf2x5(base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5(const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u16mf2x5(rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5(const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u16m1x5(base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5(const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u16m1x5(rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5(const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u32mf2x5(base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5(const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u32mf2x5(rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5(const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u32m1x5(base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5(const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u32m1x5(rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5(const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u64m1x5(base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5(const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u64m1x5(rs1, rs2, vl); } -vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_m(vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_f16mf4x5_m(mask, base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_m(vbool64_t vm, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_f16mf4x5_m(vm, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_m(vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_f16mf2x5_m(mask, base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_m(vbool32_t vm, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_f16mf2x5_m(vm, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_m(vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_f16m1x5_m(mask, base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_m(vbool16_t vm, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_f16m1x5_m(vm, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_m(vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_f32mf2x5_m(mask, base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_m(vbool64_t vm, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_f32mf2x5_m(vm, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_m(vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_f32m1x5_m(mask, base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_m(vbool32_t vm, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_f32m1x5_m(vm, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_m(vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_f64m1x5_m(mask, base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_m(vbool64_t vm, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_f64m1x5_m(vm, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i8mf8x5_m(mask, base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_m(vbool64_t vm, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i8mf8x5_m(vm, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i8mf4x5_m(mask, base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_m(vbool32_t vm, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i8mf4x5_m(vm, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i8mf2x5_m(mask, base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_m(vbool16_t vm, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i8mf2x5_m(vm, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i8m1x5_m(mask, base, bindex, vl); +vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_m(vbool8_t vm, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i8m1x5_m(vm, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i16mf4x5_m(mask, base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_m(vbool64_t vm, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i16mf4x5_m(vm, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i16mf2x5_m(mask, base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_m(vbool32_t vm, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i16mf2x5_m(vm, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i16m1x5_m(mask, base, bindex, vl); +vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_m(vbool16_t vm, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i16m1x5_m(vm, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i32mf2x5_m(mask, base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_m(vbool64_t vm, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i32mf2x5_m(vm, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i32m1x5_m(mask, base, bindex, vl); +vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_m(vbool32_t vm, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i32m1x5_m(vm, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i64m1x5_m(mask, base, bindex, vl); +vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_m(vbool64_t vm, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i64m1x5_m(vm, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u8mf8x5_m(mask, base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_m(vbool64_t vm, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u8mf8x5_m(vm, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u8mf4x5_m(mask, base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_m(vbool32_t vm, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u8mf4x5_m(vm, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u8mf2x5_m(mask, base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_m(vbool16_t vm, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u8mf2x5_m(vm, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u8m1x5_m(mask, base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_m(vbool8_t vm, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u8m1x5_m(vm, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u16mf4x5_m(mask, base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_m(vbool64_t vm, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u16mf4x5_m(vm, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u16mf2x5_m(mask, base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_m(vbool32_t vm, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u16mf2x5_m(vm, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u16m1x5_m(mask, base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_m(vbool16_t vm, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u16m1x5_m(vm, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u32mf2x5_m(mask, base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_m(vbool64_t vm, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u32mf2x5_m(vm, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u32m1x5_m(mask, base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_m(vbool32_t vm, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u32m1x5_m(vm, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u64m1x5_m(mask, base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_m(vbool64_t vm, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u64m1x5_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg5ei64\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vluxseg5ei8.c b/auto-generated/gnu-api-tests/vluxseg5ei8.c index d81a7ea01..6e2f02d7a 100644 --- a/auto-generated/gnu-api-tests/vluxseg5ei8.c +++ b/auto-generated/gnu-api-tests/vluxseg5ei8.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5(const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_f16mf4x5(base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5(const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_f16mf4x5(rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5(const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_f16mf2x5(base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5(const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_f16mf2x5(rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5(const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_f16m1x5(base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5(const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_f16m1x5(rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5(const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_f32mf2x5(base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5(const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_f32mf2x5(rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5(const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_f32m1x5(base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5(const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_f32m1x5(rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5(const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_f64m1x5(base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5(const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_f64m1x5(rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5(const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i8mf8x5(base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5(const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i8mf8x5(rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5(const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i8mf4x5(base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5(const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i8mf4x5(rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5(const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i8mf2x5(base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5(const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i8mf2x5(rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei8_v_i8m1x5(const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i8m1x5(base, bindex, vl); +vint8m1x5_t test_vluxseg5ei8_v_i8m1x5(const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i8m1x5(rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5(const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i16mf4x5(base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5(const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i16mf4x5(rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5(const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i16mf2x5(base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5(const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i16mf2x5(rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei8_v_i16m1x5(const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i16m1x5(base, bindex, vl); +vint16m1x5_t test_vluxseg5ei8_v_i16m1x5(const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i16m1x5(rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5(const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i32mf2x5(base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5(const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i32mf2x5(rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei8_v_i32m1x5(const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i32m1x5(base, bindex, vl); +vint32m1x5_t test_vluxseg5ei8_v_i32m1x5(const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i32m1x5(rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei8_v_i64m1x5(const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i64m1x5(base, bindex, vl); +vint64m1x5_t test_vluxseg5ei8_v_i64m1x5(const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i64m1x5(rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u8mf8x5(base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5(const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u8mf8x5(rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u8mf4x5(base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5(const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u8mf4x5(rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u8mf2x5(base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5(const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u8mf2x5(rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5(const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u8m1x5(base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5(const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u8m1x5(rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u16mf4x5(base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5(const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u16mf4x5(rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u16mf2x5(base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5(const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u16mf2x5(rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u16m1x5(base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5(const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u16m1x5(rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u32mf2x5(base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5(const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u32mf2x5(rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u32m1x5(base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5(const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u32m1x5(rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u64m1x5(base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5(const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u64m1x5(rs1, rs2, vl); } -vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_m(vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_f16mf4x5_m(mask, base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_m(vbool64_t vm, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_f16mf4x5_m(vm, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_m(vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_f16mf2x5_m(mask, base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_m(vbool32_t vm, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_f16mf2x5_m(vm, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_m(vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_f16m1x5_m(mask, base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_m(vbool16_t vm, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_f16m1x5_m(vm, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_m(vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_f32mf2x5_m(mask, base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_m(vbool64_t vm, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_f32mf2x5_m(vm, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_m(vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_f32m1x5_m(mask, base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_m(vbool32_t vm, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_f32m1x5_m(vm, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_m(vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_f64m1x5_m(mask, base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_m(vbool64_t vm, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_f64m1x5_m(vm, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i8mf8x5_m(mask, base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_m(vbool64_t vm, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i8mf8x5_m(vm, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i8mf4x5_m(mask, base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_m(vbool32_t vm, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i8mf4x5_m(vm, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i8mf2x5_m(mask, base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_m(vbool16_t vm, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i8mf2x5_m(vm, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i8m1x5_m(mask, base, bindex, vl); +vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_m(vbool8_t vm, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i8m1x5_m(vm, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i16mf4x5_m(mask, base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_m(vbool64_t vm, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i16mf4x5_m(vm, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i16mf2x5_m(mask, base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_m(vbool32_t vm, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i16mf2x5_m(vm, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i16m1x5_m(mask, base, bindex, vl); +vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_m(vbool16_t vm, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i16m1x5_m(vm, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i32mf2x5_m(mask, base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_m(vbool64_t vm, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i32mf2x5_m(vm, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i32m1x5_m(mask, base, bindex, vl); +vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_m(vbool32_t vm, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i32m1x5_m(vm, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i64m1x5_m(mask, base, bindex, vl); +vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_m(vbool64_t vm, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i64m1x5_m(vm, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u8mf8x5_m(mask, base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_m(vbool64_t vm, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u8mf8x5_m(vm, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u8mf4x5_m(mask, base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_m(vbool32_t vm, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u8mf4x5_m(vm, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u8mf2x5_m(mask, base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_m(vbool16_t vm, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u8mf2x5_m(vm, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u8m1x5_m(mask, base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_m(vbool8_t vm, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u8m1x5_m(vm, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u16mf4x5_m(mask, base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_m(vbool64_t vm, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u16mf4x5_m(vm, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u16mf2x5_m(mask, base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_m(vbool32_t vm, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u16mf2x5_m(vm, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u16m1x5_m(mask, base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_m(vbool16_t vm, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u16m1x5_m(vm, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u32mf2x5_m(mask, base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_m(vbool64_t vm, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u32mf2x5_m(vm, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u32m1x5_m(mask, base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_m(vbool32_t vm, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u32m1x5_m(vm, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u64m1x5_m(mask, base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_m(vbool64_t vm, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u64m1x5_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg5ei8\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vluxseg6ei16.c b/auto-generated/gnu-api-tests/vluxseg6ei16.c index d95fc0f41..a8a2fb9bd 100644 --- a/auto-generated/gnu-api-tests/vluxseg6ei16.c +++ b/auto-generated/gnu-api-tests/vluxseg6ei16.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6(const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_f16mf4x6(base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6(const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_f16mf4x6(rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6(const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_f16mf2x6(base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6(const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_f16mf2x6(rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6(const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_f16m1x6(base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6(const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_f16m1x6(rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6(const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_f32mf2x6(base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6(const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_f32mf2x6(rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6(const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_f32m1x6(base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6(const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_f32m1x6(rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6(const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_f64m1x6(base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6(const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_f64m1x6(rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6(const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i8mf8x6(base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6(const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i8mf8x6(rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6(const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i8mf4x6(base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6(const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i8mf4x6(rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6(const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i8mf2x6(base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6(const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i8mf2x6(rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei16_v_i8m1x6(const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i8m1x6(base, bindex, vl); +vint8m1x6_t test_vluxseg6ei16_v_i8m1x6(const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i8m1x6(rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6(const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i16mf4x6(base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6(const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i16mf4x6(rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6(const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i16mf2x6(base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6(const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i16mf2x6(rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei16_v_i16m1x6(const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i16m1x6(base, bindex, vl); +vint16m1x6_t test_vluxseg6ei16_v_i16m1x6(const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i16m1x6(rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6(const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i32mf2x6(base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6(const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i32mf2x6(rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei16_v_i32m1x6(const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i32m1x6(base, bindex, vl); +vint32m1x6_t test_vluxseg6ei16_v_i32m1x6(const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i32m1x6(rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei16_v_i64m1x6(const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i64m1x6(base, bindex, vl); +vint64m1x6_t test_vluxseg6ei16_v_i64m1x6(const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i64m1x6(rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u8mf8x6(base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6(const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u8mf8x6(rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u8mf4x6(base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6(const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u8mf4x6(rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6(const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u8mf2x6(base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6(const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u8mf2x6(rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6(const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u8m1x6(base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6(const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u8m1x6(rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u16mf4x6(base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6(const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u16mf4x6(rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u16mf2x6(base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6(const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u16mf2x6(rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6(const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u16m1x6(base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6(const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u16m1x6(rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u32mf2x6(base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6(const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u32mf2x6(rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u32m1x6(base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6(const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u32m1x6(rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u64m1x6(base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6(const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u64m1x6(rs1, rs2, vl); } -vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_m(vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_f16mf4x6_m(mask, base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_m(vbool64_t vm, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_f16mf4x6_m(vm, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_m(vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_f16mf2x6_m(mask, base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_m(vbool32_t vm, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_f16mf2x6_m(vm, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_m(vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_f16m1x6_m(mask, base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_m(vbool16_t vm, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_f16m1x6_m(vm, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_m(vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_f32mf2x6_m(mask, base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_m(vbool64_t vm, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_f32mf2x6_m(vm, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_m(vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_f32m1x6_m(mask, base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_m(vbool32_t vm, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_f32m1x6_m(vm, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_m(vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_f64m1x6_m(mask, base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_m(vbool64_t vm, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_f64m1x6_m(vm, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i8mf8x6_m(mask, base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_m(vbool64_t vm, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i8mf8x6_m(vm, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i8mf4x6_m(mask, base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_m(vbool32_t vm, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i8mf4x6_m(vm, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i8mf2x6_m(mask, base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_m(vbool16_t vm, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i8mf2x6_m(vm, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i8m1x6_m(mask, base, bindex, vl); +vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_m(vbool8_t vm, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i8m1x6_m(vm, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i16mf4x6_m(mask, base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_m(vbool64_t vm, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i16mf4x6_m(vm, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i16mf2x6_m(mask, base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_m(vbool32_t vm, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i16mf2x6_m(vm, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i16m1x6_m(mask, base, bindex, vl); +vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_m(vbool16_t vm, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i16m1x6_m(vm, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i32mf2x6_m(mask, base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_m(vbool64_t vm, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i32mf2x6_m(vm, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i32m1x6_m(mask, base, bindex, vl); +vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_m(vbool32_t vm, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i32m1x6_m(vm, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i64m1x6_m(mask, base, bindex, vl); +vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_m(vbool64_t vm, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i64m1x6_m(vm, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u8mf8x6_m(mask, base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_m(vbool64_t vm, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u8mf8x6_m(vm, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u8mf4x6_m(mask, base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_m(vbool32_t vm, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u8mf4x6_m(vm, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u8mf2x6_m(mask, base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_m(vbool16_t vm, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u8mf2x6_m(vm, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u8m1x6_m(mask, base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_m(vbool8_t vm, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u8m1x6_m(vm, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u16mf4x6_m(mask, base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_m(vbool64_t vm, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u16mf4x6_m(vm, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u16mf2x6_m(mask, base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_m(vbool32_t vm, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u16mf2x6_m(vm, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u16m1x6_m(mask, base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_m(vbool16_t vm, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u16m1x6_m(vm, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u32mf2x6_m(mask, base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_m(vbool64_t vm, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u32mf2x6_m(vm, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u32m1x6_m(mask, base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_m(vbool32_t vm, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u32m1x6_m(vm, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u64m1x6_m(mask, base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_m(vbool64_t vm, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u64m1x6_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg6ei16\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vluxseg6ei32.c b/auto-generated/gnu-api-tests/vluxseg6ei32.c index 9b9e21986..85174574a 100644 --- a/auto-generated/gnu-api-tests/vluxseg6ei32.c +++ b/auto-generated/gnu-api-tests/vluxseg6ei32.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6(const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_f16mf4x6(base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6(const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_f16mf4x6(rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6(const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_f16mf2x6(base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6(const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_f16mf2x6(rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6(const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_f16m1x6(base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6(const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_f16m1x6(rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6(const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_f32mf2x6(base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6(const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_f32mf2x6(rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6(const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_f32m1x6(base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6(const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_f32m1x6(rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6(const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_f64m1x6(base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6(const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_f64m1x6(rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6(const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i8mf8x6(base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6(const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i8mf8x6(rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6(const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i8mf4x6(base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6(const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i8mf4x6(rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6(const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i8mf2x6(base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6(const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i8mf2x6(rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei32_v_i8m1x6(const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i8m1x6(base, bindex, vl); +vint8m1x6_t test_vluxseg6ei32_v_i8m1x6(const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i8m1x6(rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6(const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i16mf4x6(base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6(const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i16mf4x6(rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6(const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i16mf2x6(base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6(const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i16mf2x6(rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei32_v_i16m1x6(const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i16m1x6(base, bindex, vl); +vint16m1x6_t test_vluxseg6ei32_v_i16m1x6(const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i16m1x6(rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6(const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i32mf2x6(base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6(const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i32mf2x6(rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei32_v_i32m1x6(const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i32m1x6(base, bindex, vl); +vint32m1x6_t test_vluxseg6ei32_v_i32m1x6(const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i32m1x6(rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei32_v_i64m1x6(const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i64m1x6(base, bindex, vl); +vint64m1x6_t test_vluxseg6ei32_v_i64m1x6(const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i64m1x6(rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u8mf8x6(base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6(const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u8mf8x6(rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6(const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u8mf4x6(base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6(const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u8mf4x6(rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6(const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u8mf2x6(base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6(const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u8mf2x6(rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6(const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u8m1x6(base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6(const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u8m1x6(rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u16mf4x6(base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6(const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u16mf4x6(rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6(const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u16mf2x6(base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6(const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u16mf2x6(rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6(const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u16m1x6(base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6(const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u16m1x6(rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u32mf2x6(base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6(const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u32mf2x6(rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6(const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u32m1x6(base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6(const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u32m1x6(rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u64m1x6(base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6(const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u64m1x6(rs1, rs2, vl); } -vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_m(vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_f16mf4x6_m(mask, base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_m(vbool64_t vm, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_f16mf4x6_m(vm, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_m(vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_f16mf2x6_m(mask, base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_m(vbool32_t vm, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_f16mf2x6_m(vm, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_m(vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_f16m1x6_m(mask, base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_m(vbool16_t vm, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_f16m1x6_m(vm, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_m(vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_f32mf2x6_m(mask, base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_m(vbool64_t vm, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_f32mf2x6_m(vm, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_m(vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_f32m1x6_m(mask, base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_m(vbool32_t vm, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_f32m1x6_m(vm, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_m(vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_f64m1x6_m(mask, base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_m(vbool64_t vm, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_f64m1x6_m(vm, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i8mf8x6_m(mask, base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_m(vbool64_t vm, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i8mf8x6_m(vm, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i8mf4x6_m(mask, base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_m(vbool32_t vm, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i8mf4x6_m(vm, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i8mf2x6_m(mask, base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_m(vbool16_t vm, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i8mf2x6_m(vm, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i8m1x6_m(mask, base, bindex, vl); +vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_m(vbool8_t vm, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i8m1x6_m(vm, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i16mf4x6_m(mask, base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_m(vbool64_t vm, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i16mf4x6_m(vm, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i16mf2x6_m(mask, base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_m(vbool32_t vm, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i16mf2x6_m(vm, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i16m1x6_m(mask, base, bindex, vl); +vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_m(vbool16_t vm, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i16m1x6_m(vm, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i32mf2x6_m(mask, base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_m(vbool64_t vm, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i32mf2x6_m(vm, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i32m1x6_m(mask, base, bindex, vl); +vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_m(vbool32_t vm, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i32m1x6_m(vm, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i64m1x6_m(mask, base, bindex, vl); +vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_m(vbool64_t vm, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i64m1x6_m(vm, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u8mf8x6_m(mask, base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_m(vbool64_t vm, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u8mf8x6_m(vm, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u8mf4x6_m(mask, base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_m(vbool32_t vm, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u8mf4x6_m(vm, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u8mf2x6_m(mask, base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_m(vbool16_t vm, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u8mf2x6_m(vm, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u8m1x6_m(mask, base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_m(vbool8_t vm, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u8m1x6_m(vm, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u16mf4x6_m(mask, base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_m(vbool64_t vm, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u16mf4x6_m(vm, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u16mf2x6_m(mask, base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_m(vbool32_t vm, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u16mf2x6_m(vm, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u16m1x6_m(mask, base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_m(vbool16_t vm, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u16m1x6_m(vm, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u32mf2x6_m(mask, base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_m(vbool64_t vm, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u32mf2x6_m(vm, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u32m1x6_m(mask, base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_m(vbool32_t vm, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u32m1x6_m(vm, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u64m1x6_m(mask, base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_m(vbool64_t vm, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u64m1x6_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg6ei32\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vluxseg6ei64.c b/auto-generated/gnu-api-tests/vluxseg6ei64.c index b6aa71af9..4fe053336 100644 --- a/auto-generated/gnu-api-tests/vluxseg6ei64.c +++ b/auto-generated/gnu-api-tests/vluxseg6ei64.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6(const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_f16mf4x6(base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6(const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_f16mf4x6(rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6(const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_f16mf2x6(base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6(const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_f16mf2x6(rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6(const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_f16m1x6(base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6(const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_f16m1x6(rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6(const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_f32mf2x6(base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6(const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_f32mf2x6(rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6(const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_f32m1x6(base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6(const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_f32m1x6(rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6(const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_f64m1x6(base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6(const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_f64m1x6(rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6(const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i8mf8x6(base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6(const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i8mf8x6(rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6(const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i8mf4x6(base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6(const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i8mf4x6(rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6(const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i8mf2x6(base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6(const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i8mf2x6(rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei64_v_i8m1x6(const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i8m1x6(base, bindex, vl); +vint8m1x6_t test_vluxseg6ei64_v_i8m1x6(const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i8m1x6(rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6(const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i16mf4x6(base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6(const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i16mf4x6(rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6(const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i16mf2x6(base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6(const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i16mf2x6(rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei64_v_i16m1x6(const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i16m1x6(base, bindex, vl); +vint16m1x6_t test_vluxseg6ei64_v_i16m1x6(const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i16m1x6(rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6(const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i32mf2x6(base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6(const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i32mf2x6(rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei64_v_i32m1x6(const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i32m1x6(base, bindex, vl); +vint32m1x6_t test_vluxseg6ei64_v_i32m1x6(const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i32m1x6(rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei64_v_i64m1x6(const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i64m1x6(base, bindex, vl); +vint64m1x6_t test_vluxseg6ei64_v_i64m1x6(const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i64m1x6(rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6(const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u8mf8x6(base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6(const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u8mf8x6(rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6(const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u8mf4x6(base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6(const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u8mf4x6(rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6(const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u8mf2x6(base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6(const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u8mf2x6(rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6(const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u8m1x6(base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6(const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u8m1x6(rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6(const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u16mf4x6(base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6(const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u16mf4x6(rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6(const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u16mf2x6(base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6(const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u16mf2x6(rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6(const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u16m1x6(base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6(const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u16m1x6(rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6(const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u32mf2x6(base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6(const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u32mf2x6(rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6(const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u32m1x6(base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6(const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u32m1x6(rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6(const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u64m1x6(base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6(const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u64m1x6(rs1, rs2, vl); } -vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_m(vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_f16mf4x6_m(mask, base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_m(vbool64_t vm, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_f16mf4x6_m(vm, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_m(vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_f16mf2x6_m(mask, base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_m(vbool32_t vm, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_f16mf2x6_m(vm, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_m(vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_f16m1x6_m(mask, base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_m(vbool16_t vm, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_f16m1x6_m(vm, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_m(vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_f32mf2x6_m(mask, base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_m(vbool64_t vm, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_f32mf2x6_m(vm, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_m(vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_f32m1x6_m(mask, base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_m(vbool32_t vm, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_f32m1x6_m(vm, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_m(vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_f64m1x6_m(mask, base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_m(vbool64_t vm, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_f64m1x6_m(vm, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i8mf8x6_m(mask, base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_m(vbool64_t vm, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i8mf8x6_m(vm, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i8mf4x6_m(mask, base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_m(vbool32_t vm, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i8mf4x6_m(vm, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i8mf2x6_m(mask, base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_m(vbool16_t vm, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i8mf2x6_m(vm, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i8m1x6_m(mask, base, bindex, vl); +vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_m(vbool8_t vm, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i8m1x6_m(vm, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i16mf4x6_m(mask, base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_m(vbool64_t vm, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i16mf4x6_m(vm, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i16mf2x6_m(mask, base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_m(vbool32_t vm, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i16mf2x6_m(vm, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i16m1x6_m(mask, base, bindex, vl); +vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_m(vbool16_t vm, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i16m1x6_m(vm, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i32mf2x6_m(mask, base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_m(vbool64_t vm, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i32mf2x6_m(vm, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i32m1x6_m(mask, base, bindex, vl); +vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_m(vbool32_t vm, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i32m1x6_m(vm, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i64m1x6_m(mask, base, bindex, vl); +vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_m(vbool64_t vm, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i64m1x6_m(vm, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u8mf8x6_m(mask, base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_m(vbool64_t vm, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u8mf8x6_m(vm, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u8mf4x6_m(mask, base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_m(vbool32_t vm, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u8mf4x6_m(vm, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u8mf2x6_m(mask, base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_m(vbool16_t vm, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u8mf2x6_m(vm, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u8m1x6_m(mask, base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_m(vbool8_t vm, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u8m1x6_m(vm, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u16mf4x6_m(mask, base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_m(vbool64_t vm, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u16mf4x6_m(vm, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u16mf2x6_m(mask, base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_m(vbool32_t vm, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u16mf2x6_m(vm, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u16m1x6_m(mask, base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_m(vbool16_t vm, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u16m1x6_m(vm, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u32mf2x6_m(mask, base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_m(vbool64_t vm, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u32mf2x6_m(vm, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u32m1x6_m(mask, base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_m(vbool32_t vm, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u32m1x6_m(vm, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u64m1x6_m(mask, base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_m(vbool64_t vm, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u64m1x6_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg6ei64\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vluxseg6ei8.c b/auto-generated/gnu-api-tests/vluxseg6ei8.c index c1173dd60..71622218c 100644 --- a/auto-generated/gnu-api-tests/vluxseg6ei8.c +++ b/auto-generated/gnu-api-tests/vluxseg6ei8.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6(const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_f16mf4x6(base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6(const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_f16mf4x6(rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6(const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_f16mf2x6(base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6(const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_f16mf2x6(rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6(const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_f16m1x6(base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6(const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_f16m1x6(rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6(const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_f32mf2x6(base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6(const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_f32mf2x6(rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6(const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_f32m1x6(base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6(const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_f32m1x6(rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6(const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_f64m1x6(base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6(const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_f64m1x6(rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6(const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i8mf8x6(base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6(const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i8mf8x6(rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6(const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i8mf4x6(base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6(const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i8mf4x6(rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6(const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i8mf2x6(base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6(const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i8mf2x6(rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei8_v_i8m1x6(const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i8m1x6(base, bindex, vl); +vint8m1x6_t test_vluxseg6ei8_v_i8m1x6(const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i8m1x6(rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6(const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i16mf4x6(base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6(const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i16mf4x6(rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6(const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i16mf2x6(base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6(const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i16mf2x6(rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei8_v_i16m1x6(const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i16m1x6(base, bindex, vl); +vint16m1x6_t test_vluxseg6ei8_v_i16m1x6(const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i16m1x6(rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6(const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i32mf2x6(base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6(const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i32mf2x6(rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei8_v_i32m1x6(const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i32m1x6(base, bindex, vl); +vint32m1x6_t test_vluxseg6ei8_v_i32m1x6(const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i32m1x6(rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei8_v_i64m1x6(const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i64m1x6(base, bindex, vl); +vint64m1x6_t test_vluxseg6ei8_v_i64m1x6(const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i64m1x6(rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u8mf8x6(base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6(const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u8mf8x6(rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u8mf4x6(base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6(const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u8mf4x6(rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u8mf2x6(base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6(const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u8mf2x6(rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6(const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u8m1x6(base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6(const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u8m1x6(rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u16mf4x6(base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6(const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u16mf4x6(rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u16mf2x6(base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6(const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u16mf2x6(rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u16m1x6(base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6(const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u16m1x6(rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u32mf2x6(base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6(const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u32mf2x6(rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u32m1x6(base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6(const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u32m1x6(rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u64m1x6(base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6(const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u64m1x6(rs1, rs2, vl); } -vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_m(vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_f16mf4x6_m(mask, base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_m(vbool64_t vm, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_f16mf4x6_m(vm, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_m(vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_f16mf2x6_m(mask, base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_m(vbool32_t vm, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_f16mf2x6_m(vm, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_m(vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_f16m1x6_m(mask, base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_m(vbool16_t vm, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_f16m1x6_m(vm, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_m(vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_f32mf2x6_m(mask, base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_m(vbool64_t vm, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_f32mf2x6_m(vm, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_m(vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_f32m1x6_m(mask, base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_m(vbool32_t vm, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_f32m1x6_m(vm, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_m(vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_f64m1x6_m(mask, base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_m(vbool64_t vm, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_f64m1x6_m(vm, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i8mf8x6_m(mask, base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_m(vbool64_t vm, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i8mf8x6_m(vm, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i8mf4x6_m(mask, base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_m(vbool32_t vm, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i8mf4x6_m(vm, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i8mf2x6_m(mask, base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_m(vbool16_t vm, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i8mf2x6_m(vm, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i8m1x6_m(mask, base, bindex, vl); +vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_m(vbool8_t vm, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i8m1x6_m(vm, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i16mf4x6_m(mask, base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_m(vbool64_t vm, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i16mf4x6_m(vm, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i16mf2x6_m(mask, base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_m(vbool32_t vm, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i16mf2x6_m(vm, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i16m1x6_m(mask, base, bindex, vl); +vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_m(vbool16_t vm, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i16m1x6_m(vm, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i32mf2x6_m(mask, base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_m(vbool64_t vm, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i32mf2x6_m(vm, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i32m1x6_m(mask, base, bindex, vl); +vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_m(vbool32_t vm, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i32m1x6_m(vm, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i64m1x6_m(mask, base, bindex, vl); +vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_m(vbool64_t vm, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i64m1x6_m(vm, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u8mf8x6_m(mask, base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_m(vbool64_t vm, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u8mf8x6_m(vm, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u8mf4x6_m(mask, base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_m(vbool32_t vm, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u8mf4x6_m(vm, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u8mf2x6_m(mask, base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_m(vbool16_t vm, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u8mf2x6_m(vm, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u8m1x6_m(mask, base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_m(vbool8_t vm, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u8m1x6_m(vm, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u16mf4x6_m(mask, base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_m(vbool64_t vm, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u16mf4x6_m(vm, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u16mf2x6_m(mask, base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_m(vbool32_t vm, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u16mf2x6_m(vm, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u16m1x6_m(mask, base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_m(vbool16_t vm, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u16m1x6_m(vm, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u32mf2x6_m(mask, base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_m(vbool64_t vm, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u32mf2x6_m(vm, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u32m1x6_m(mask, base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_m(vbool32_t vm, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u32m1x6_m(vm, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u64m1x6_m(mask, base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_m(vbool64_t vm, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u64m1x6_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg6ei8\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vluxseg7ei16.c b/auto-generated/gnu-api-tests/vluxseg7ei16.c index e629480a5..bb632c700 100644 --- a/auto-generated/gnu-api-tests/vluxseg7ei16.c +++ b/auto-generated/gnu-api-tests/vluxseg7ei16.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7(const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_f16mf4x7(base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7(const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_f16mf4x7(rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7(const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_f16mf2x7(base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7(const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_f16mf2x7(rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7(const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_f16m1x7(base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7(const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_f16m1x7(rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7(const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_f32mf2x7(base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7(const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_f32mf2x7(rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7(const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_f32m1x7(base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7(const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_f32m1x7(rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7(const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_f64m1x7(base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7(const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_f64m1x7(rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7(const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i8mf8x7(base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7(const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i8mf8x7(rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7(const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i8mf4x7(base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7(const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i8mf4x7(rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7(const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i8mf2x7(base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7(const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i8mf2x7(rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei16_v_i8m1x7(const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i8m1x7(base, bindex, vl); +vint8m1x7_t test_vluxseg7ei16_v_i8m1x7(const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i8m1x7(rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7(const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i16mf4x7(base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7(const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i16mf4x7(rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7(const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i16mf2x7(base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7(const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i16mf2x7(rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei16_v_i16m1x7(const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i16m1x7(base, bindex, vl); +vint16m1x7_t test_vluxseg7ei16_v_i16m1x7(const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i16m1x7(rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7(const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i32mf2x7(base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7(const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i32mf2x7(rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei16_v_i32m1x7(const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i32m1x7(base, bindex, vl); +vint32m1x7_t test_vluxseg7ei16_v_i32m1x7(const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i32m1x7(rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei16_v_i64m1x7(const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i64m1x7(base, bindex, vl); +vint64m1x7_t test_vluxseg7ei16_v_i64m1x7(const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i64m1x7(rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u8mf8x7(base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7(const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u8mf8x7(rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u8mf4x7(base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7(const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u8mf4x7(rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7(const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u8mf2x7(base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7(const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u8mf2x7(rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7(const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u8m1x7(base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7(const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u8m1x7(rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u16mf4x7(base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7(const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u16mf4x7(rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u16mf2x7(base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7(const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u16mf2x7(rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7(const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u16m1x7(base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7(const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u16m1x7(rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u32mf2x7(base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7(const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u32mf2x7(rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u32m1x7(base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7(const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u32m1x7(rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u64m1x7(base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7(const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u64m1x7(rs1, rs2, vl); } -vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_m(vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_f16mf4x7_m(mask, base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_m(vbool64_t vm, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_f16mf4x7_m(vm, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_m(vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_f16mf2x7_m(mask, base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_m(vbool32_t vm, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_f16mf2x7_m(vm, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_m(vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_f16m1x7_m(mask, base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_m(vbool16_t vm, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_f16m1x7_m(vm, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_m(vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_f32mf2x7_m(mask, base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_m(vbool64_t vm, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_f32mf2x7_m(vm, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_m(vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_f32m1x7_m(mask, base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_m(vbool32_t vm, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_f32m1x7_m(vm, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_m(vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_f64m1x7_m(mask, base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_m(vbool64_t vm, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_f64m1x7_m(vm, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i8mf8x7_m(mask, base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_m(vbool64_t vm, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i8mf8x7_m(vm, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i8mf4x7_m(mask, base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_m(vbool32_t vm, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i8mf4x7_m(vm, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i8mf2x7_m(mask, base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_m(vbool16_t vm, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i8mf2x7_m(vm, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i8m1x7_m(mask, base, bindex, vl); +vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_m(vbool8_t vm, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i8m1x7_m(vm, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i16mf4x7_m(mask, base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_m(vbool64_t vm, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i16mf4x7_m(vm, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i16mf2x7_m(mask, base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_m(vbool32_t vm, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i16mf2x7_m(vm, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i16m1x7_m(mask, base, bindex, vl); +vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_m(vbool16_t vm, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i16m1x7_m(vm, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i32mf2x7_m(mask, base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_m(vbool64_t vm, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i32mf2x7_m(vm, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i32m1x7_m(mask, base, bindex, vl); +vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_m(vbool32_t vm, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i32m1x7_m(vm, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i64m1x7_m(mask, base, bindex, vl); +vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_m(vbool64_t vm, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i64m1x7_m(vm, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u8mf8x7_m(mask, base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_m(vbool64_t vm, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u8mf8x7_m(vm, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u8mf4x7_m(mask, base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_m(vbool32_t vm, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u8mf4x7_m(vm, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u8mf2x7_m(mask, base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_m(vbool16_t vm, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u8mf2x7_m(vm, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u8m1x7_m(mask, base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_m(vbool8_t vm, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u8m1x7_m(vm, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u16mf4x7_m(mask, base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_m(vbool64_t vm, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u16mf4x7_m(vm, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u16mf2x7_m(mask, base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_m(vbool32_t vm, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u16mf2x7_m(vm, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u16m1x7_m(mask, base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_m(vbool16_t vm, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u16m1x7_m(vm, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u32mf2x7_m(mask, base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_m(vbool64_t vm, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u32mf2x7_m(vm, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u32m1x7_m(mask, base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_m(vbool32_t vm, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u32m1x7_m(vm, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u64m1x7_m(mask, base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_m(vbool64_t vm, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u64m1x7_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg7ei16\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vluxseg7ei32.c b/auto-generated/gnu-api-tests/vluxseg7ei32.c index 4707de963..2dc3c5aad 100644 --- a/auto-generated/gnu-api-tests/vluxseg7ei32.c +++ b/auto-generated/gnu-api-tests/vluxseg7ei32.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7(const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_f16mf4x7(base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7(const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_f16mf4x7(rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7(const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_f16mf2x7(base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7(const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_f16mf2x7(rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7(const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_f16m1x7(base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7(const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_f16m1x7(rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7(const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_f32mf2x7(base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7(const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_f32mf2x7(rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7(const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_f32m1x7(base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7(const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_f32m1x7(rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7(const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_f64m1x7(base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7(const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_f64m1x7(rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7(const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i8mf8x7(base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7(const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i8mf8x7(rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7(const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i8mf4x7(base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7(const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i8mf4x7(rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7(const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i8mf2x7(base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7(const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i8mf2x7(rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei32_v_i8m1x7(const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i8m1x7(base, bindex, vl); +vint8m1x7_t test_vluxseg7ei32_v_i8m1x7(const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i8m1x7(rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7(const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i16mf4x7(base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7(const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i16mf4x7(rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7(const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i16mf2x7(base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7(const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i16mf2x7(rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei32_v_i16m1x7(const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i16m1x7(base, bindex, vl); +vint16m1x7_t test_vluxseg7ei32_v_i16m1x7(const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i16m1x7(rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7(const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i32mf2x7(base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7(const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i32mf2x7(rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei32_v_i32m1x7(const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i32m1x7(base, bindex, vl); +vint32m1x7_t test_vluxseg7ei32_v_i32m1x7(const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i32m1x7(rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei32_v_i64m1x7(const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i64m1x7(base, bindex, vl); +vint64m1x7_t test_vluxseg7ei32_v_i64m1x7(const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i64m1x7(rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u8mf8x7(base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7(const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u8mf8x7(rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7(const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u8mf4x7(base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7(const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u8mf4x7(rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7(const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u8mf2x7(base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7(const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u8mf2x7(rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7(const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u8m1x7(base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7(const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u8m1x7(rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u16mf4x7(base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7(const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u16mf4x7(rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7(const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u16mf2x7(base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7(const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u16mf2x7(rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7(const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u16m1x7(base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7(const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u16m1x7(rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u32mf2x7(base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7(const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u32mf2x7(rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7(const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u32m1x7(base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7(const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u32m1x7(rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u64m1x7(base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7(const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u64m1x7(rs1, rs2, vl); } -vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_m(vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_f16mf4x7_m(mask, base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_m(vbool64_t vm, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_f16mf4x7_m(vm, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_m(vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_f16mf2x7_m(mask, base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_m(vbool32_t vm, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_f16mf2x7_m(vm, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_m(vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_f16m1x7_m(mask, base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_m(vbool16_t vm, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_f16m1x7_m(vm, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_m(vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_f32mf2x7_m(mask, base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_m(vbool64_t vm, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_f32mf2x7_m(vm, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_m(vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_f32m1x7_m(mask, base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_m(vbool32_t vm, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_f32m1x7_m(vm, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_m(vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_f64m1x7_m(mask, base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_m(vbool64_t vm, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_f64m1x7_m(vm, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i8mf8x7_m(mask, base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_m(vbool64_t vm, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i8mf8x7_m(vm, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i8mf4x7_m(mask, base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_m(vbool32_t vm, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i8mf4x7_m(vm, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i8mf2x7_m(mask, base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_m(vbool16_t vm, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i8mf2x7_m(vm, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i8m1x7_m(mask, base, bindex, vl); +vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_m(vbool8_t vm, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i8m1x7_m(vm, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i16mf4x7_m(mask, base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_m(vbool64_t vm, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i16mf4x7_m(vm, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i16mf2x7_m(mask, base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_m(vbool32_t vm, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i16mf2x7_m(vm, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i16m1x7_m(mask, base, bindex, vl); +vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_m(vbool16_t vm, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i16m1x7_m(vm, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i32mf2x7_m(mask, base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_m(vbool64_t vm, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i32mf2x7_m(vm, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i32m1x7_m(mask, base, bindex, vl); +vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_m(vbool32_t vm, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i32m1x7_m(vm, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i64m1x7_m(mask, base, bindex, vl); +vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_m(vbool64_t vm, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i64m1x7_m(vm, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u8mf8x7_m(mask, base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_m(vbool64_t vm, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u8mf8x7_m(vm, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u8mf4x7_m(mask, base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_m(vbool32_t vm, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u8mf4x7_m(vm, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u8mf2x7_m(mask, base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_m(vbool16_t vm, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u8mf2x7_m(vm, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u8m1x7_m(mask, base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_m(vbool8_t vm, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u8m1x7_m(vm, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u16mf4x7_m(mask, base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_m(vbool64_t vm, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u16mf4x7_m(vm, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u16mf2x7_m(mask, base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_m(vbool32_t vm, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u16mf2x7_m(vm, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u16m1x7_m(mask, base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_m(vbool16_t vm, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u16m1x7_m(vm, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u32mf2x7_m(mask, base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_m(vbool64_t vm, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u32mf2x7_m(vm, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u32m1x7_m(mask, base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_m(vbool32_t vm, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u32m1x7_m(vm, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u64m1x7_m(mask, base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_m(vbool64_t vm, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u64m1x7_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg7ei32\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vluxseg7ei64.c b/auto-generated/gnu-api-tests/vluxseg7ei64.c index 337e16344..22c6fee93 100644 --- a/auto-generated/gnu-api-tests/vluxseg7ei64.c +++ b/auto-generated/gnu-api-tests/vluxseg7ei64.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7(const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_f16mf4x7(base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7(const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_f16mf4x7(rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7(const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_f16mf2x7(base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7(const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_f16mf2x7(rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7(const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_f16m1x7(base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7(const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_f16m1x7(rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7(const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_f32mf2x7(base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7(const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_f32mf2x7(rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7(const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_f32m1x7(base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7(const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_f32m1x7(rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7(const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_f64m1x7(base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7(const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_f64m1x7(rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7(const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i8mf8x7(base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7(const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i8mf8x7(rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7(const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i8mf4x7(base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7(const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i8mf4x7(rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7(const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i8mf2x7(base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7(const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i8mf2x7(rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei64_v_i8m1x7(const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i8m1x7(base, bindex, vl); +vint8m1x7_t test_vluxseg7ei64_v_i8m1x7(const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i8m1x7(rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7(const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i16mf4x7(base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7(const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i16mf4x7(rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7(const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i16mf2x7(base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7(const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i16mf2x7(rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei64_v_i16m1x7(const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i16m1x7(base, bindex, vl); +vint16m1x7_t test_vluxseg7ei64_v_i16m1x7(const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i16m1x7(rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7(const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i32mf2x7(base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7(const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i32mf2x7(rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei64_v_i32m1x7(const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i32m1x7(base, bindex, vl); +vint32m1x7_t test_vluxseg7ei64_v_i32m1x7(const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i32m1x7(rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei64_v_i64m1x7(const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i64m1x7(base, bindex, vl); +vint64m1x7_t test_vluxseg7ei64_v_i64m1x7(const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i64m1x7(rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7(const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u8mf8x7(base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7(const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u8mf8x7(rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7(const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u8mf4x7(base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7(const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u8mf4x7(rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7(const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u8mf2x7(base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7(const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u8mf2x7(rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7(const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u8m1x7(base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7(const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u8m1x7(rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7(const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u16mf4x7(base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7(const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u16mf4x7(rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7(const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u16mf2x7(base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7(const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u16mf2x7(rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7(const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u16m1x7(base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7(const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u16m1x7(rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7(const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u32mf2x7(base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7(const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u32mf2x7(rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7(const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u32m1x7(base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7(const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u32m1x7(rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7(const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u64m1x7(base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7(const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u64m1x7(rs1, rs2, vl); } -vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_m(vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_f16mf4x7_m(mask, base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_m(vbool64_t vm, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_f16mf4x7_m(vm, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_m(vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_f16mf2x7_m(mask, base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_m(vbool32_t vm, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_f16mf2x7_m(vm, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_m(vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_f16m1x7_m(mask, base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_m(vbool16_t vm, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_f16m1x7_m(vm, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_m(vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_f32mf2x7_m(mask, base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_m(vbool64_t vm, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_f32mf2x7_m(vm, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_m(vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_f32m1x7_m(mask, base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_m(vbool32_t vm, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_f32m1x7_m(vm, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_m(vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_f64m1x7_m(mask, base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_m(vbool64_t vm, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_f64m1x7_m(vm, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i8mf8x7_m(mask, base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_m(vbool64_t vm, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i8mf8x7_m(vm, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i8mf4x7_m(mask, base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_m(vbool32_t vm, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i8mf4x7_m(vm, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i8mf2x7_m(mask, base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_m(vbool16_t vm, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i8mf2x7_m(vm, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i8m1x7_m(mask, base, bindex, vl); +vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_m(vbool8_t vm, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i8m1x7_m(vm, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i16mf4x7_m(mask, base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_m(vbool64_t vm, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i16mf4x7_m(vm, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i16mf2x7_m(mask, base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_m(vbool32_t vm, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i16mf2x7_m(vm, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i16m1x7_m(mask, base, bindex, vl); +vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_m(vbool16_t vm, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i16m1x7_m(vm, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i32mf2x7_m(mask, base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_m(vbool64_t vm, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i32mf2x7_m(vm, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i32m1x7_m(mask, base, bindex, vl); +vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_m(vbool32_t vm, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i32m1x7_m(vm, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i64m1x7_m(mask, base, bindex, vl); +vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_m(vbool64_t vm, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i64m1x7_m(vm, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u8mf8x7_m(mask, base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_m(vbool64_t vm, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u8mf8x7_m(vm, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u8mf4x7_m(mask, base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_m(vbool32_t vm, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u8mf4x7_m(vm, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u8mf2x7_m(mask, base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_m(vbool16_t vm, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u8mf2x7_m(vm, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u8m1x7_m(mask, base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_m(vbool8_t vm, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u8m1x7_m(vm, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u16mf4x7_m(mask, base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_m(vbool64_t vm, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u16mf4x7_m(vm, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u16mf2x7_m(mask, base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_m(vbool32_t vm, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u16mf2x7_m(vm, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u16m1x7_m(mask, base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_m(vbool16_t vm, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u16m1x7_m(vm, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u32mf2x7_m(mask, base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_m(vbool64_t vm, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u32mf2x7_m(vm, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u32m1x7_m(mask, base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_m(vbool32_t vm, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u32m1x7_m(vm, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u64m1x7_m(mask, base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_m(vbool64_t vm, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u64m1x7_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg7ei64\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vluxseg7ei8.c b/auto-generated/gnu-api-tests/vluxseg7ei8.c index ffda7a824..755ea6ede 100644 --- a/auto-generated/gnu-api-tests/vluxseg7ei8.c +++ b/auto-generated/gnu-api-tests/vluxseg7ei8.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7(const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_f16mf4x7(base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7(const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_f16mf4x7(rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7(const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_f16mf2x7(base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7(const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_f16mf2x7(rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7(const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_f16m1x7(base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7(const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_f16m1x7(rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7(const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_f32mf2x7(base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7(const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_f32mf2x7(rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7(const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_f32m1x7(base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7(const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_f32m1x7(rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7(const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_f64m1x7(base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7(const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_f64m1x7(rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7(const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i8mf8x7(base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7(const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i8mf8x7(rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7(const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i8mf4x7(base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7(const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i8mf4x7(rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7(const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i8mf2x7(base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7(const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i8mf2x7(rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei8_v_i8m1x7(const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i8m1x7(base, bindex, vl); +vint8m1x7_t test_vluxseg7ei8_v_i8m1x7(const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i8m1x7(rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7(const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i16mf4x7(base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7(const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i16mf4x7(rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7(const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i16mf2x7(base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7(const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i16mf2x7(rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei8_v_i16m1x7(const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i16m1x7(base, bindex, vl); +vint16m1x7_t test_vluxseg7ei8_v_i16m1x7(const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i16m1x7(rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7(const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i32mf2x7(base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7(const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i32mf2x7(rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei8_v_i32m1x7(const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i32m1x7(base, bindex, vl); +vint32m1x7_t test_vluxseg7ei8_v_i32m1x7(const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i32m1x7(rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei8_v_i64m1x7(const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i64m1x7(base, bindex, vl); +vint64m1x7_t test_vluxseg7ei8_v_i64m1x7(const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i64m1x7(rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u8mf8x7(base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7(const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u8mf8x7(rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u8mf4x7(base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7(const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u8mf4x7(rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u8mf2x7(base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7(const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u8mf2x7(rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7(const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u8m1x7(base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7(const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u8m1x7(rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u16mf4x7(base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7(const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u16mf4x7(rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u16mf2x7(base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7(const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u16mf2x7(rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u16m1x7(base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7(const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u16m1x7(rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u32mf2x7(base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7(const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u32mf2x7(rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u32m1x7(base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7(const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u32m1x7(rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u64m1x7(base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7(const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u64m1x7(rs1, rs2, vl); } -vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_m(vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_f16mf4x7_m(mask, base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_m(vbool64_t vm, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_f16mf4x7_m(vm, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_m(vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_f16mf2x7_m(mask, base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_m(vbool32_t vm, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_f16mf2x7_m(vm, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_m(vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_f16m1x7_m(mask, base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_m(vbool16_t vm, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_f16m1x7_m(vm, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_m(vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_f32mf2x7_m(mask, base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_m(vbool64_t vm, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_f32mf2x7_m(vm, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_m(vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_f32m1x7_m(mask, base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_m(vbool32_t vm, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_f32m1x7_m(vm, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_m(vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_f64m1x7_m(mask, base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_m(vbool64_t vm, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_f64m1x7_m(vm, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i8mf8x7_m(mask, base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_m(vbool64_t vm, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i8mf8x7_m(vm, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i8mf4x7_m(mask, base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_m(vbool32_t vm, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i8mf4x7_m(vm, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i8mf2x7_m(mask, base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_m(vbool16_t vm, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i8mf2x7_m(vm, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i8m1x7_m(mask, base, bindex, vl); +vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_m(vbool8_t vm, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i8m1x7_m(vm, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i16mf4x7_m(mask, base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_m(vbool64_t vm, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i16mf4x7_m(vm, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i16mf2x7_m(mask, base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_m(vbool32_t vm, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i16mf2x7_m(vm, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i16m1x7_m(mask, base, bindex, vl); +vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_m(vbool16_t vm, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i16m1x7_m(vm, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i32mf2x7_m(mask, base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_m(vbool64_t vm, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i32mf2x7_m(vm, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i32m1x7_m(mask, base, bindex, vl); +vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_m(vbool32_t vm, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i32m1x7_m(vm, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i64m1x7_m(mask, base, bindex, vl); +vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_m(vbool64_t vm, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i64m1x7_m(vm, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u8mf8x7_m(mask, base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_m(vbool64_t vm, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u8mf8x7_m(vm, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u8mf4x7_m(mask, base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_m(vbool32_t vm, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u8mf4x7_m(vm, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u8mf2x7_m(mask, base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_m(vbool16_t vm, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u8mf2x7_m(vm, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u8m1x7_m(mask, base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_m(vbool8_t vm, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u8m1x7_m(vm, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u16mf4x7_m(mask, base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_m(vbool64_t vm, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u16mf4x7_m(vm, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u16mf2x7_m(mask, base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_m(vbool32_t vm, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u16mf2x7_m(vm, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u16m1x7_m(mask, base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_m(vbool16_t vm, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u16m1x7_m(vm, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u32mf2x7_m(mask, base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_m(vbool64_t vm, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u32mf2x7_m(vm, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u32m1x7_m(mask, base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_m(vbool32_t vm, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u32m1x7_m(vm, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u64m1x7_m(mask, base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_m(vbool64_t vm, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u64m1x7_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg7ei8\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vluxseg8ei16.c b/auto-generated/gnu-api-tests/vluxseg8ei16.c index c98b34656..d526c76fe 100644 --- a/auto-generated/gnu-api-tests/vluxseg8ei16.c +++ b/auto-generated/gnu-api-tests/vluxseg8ei16.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8(const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_f16mf4x8(base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8(const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_f16mf4x8(rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8(const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_f16mf2x8(base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8(const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_f16mf2x8(rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8(const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_f16m1x8(base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8(const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_f16m1x8(rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8(const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_f32mf2x8(base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8(const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_f32mf2x8(rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8(const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_f32m1x8(base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8(const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_f32m1x8(rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8(const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_f64m1x8(base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8(const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_f64m1x8(rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8(const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i8mf8x8(base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8(const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i8mf8x8(rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8(const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i8mf4x8(base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8(const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i8mf4x8(rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8(const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i8mf2x8(base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8(const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i8mf2x8(rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei16_v_i8m1x8(const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i8m1x8(base, bindex, vl); +vint8m1x8_t test_vluxseg8ei16_v_i8m1x8(const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i8m1x8(rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8(const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i16mf4x8(base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8(const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i16mf4x8(rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8(const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i16mf2x8(base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8(const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i16mf2x8(rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei16_v_i16m1x8(const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i16m1x8(base, bindex, vl); +vint16m1x8_t test_vluxseg8ei16_v_i16m1x8(const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i16m1x8(rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8(const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i32mf2x8(base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8(const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i32mf2x8(rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei16_v_i32m1x8(const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i32m1x8(base, bindex, vl); +vint32m1x8_t test_vluxseg8ei16_v_i32m1x8(const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i32m1x8(rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei16_v_i64m1x8(const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i64m1x8(base, bindex, vl); +vint64m1x8_t test_vluxseg8ei16_v_i64m1x8(const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i64m1x8(rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u8mf8x8(base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8(const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u8mf8x8(rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u8mf4x8(base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8(const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u8mf4x8(rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8(const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u8mf2x8(base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8(const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u8mf2x8(rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8(const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u8m1x8(base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8(const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u8m1x8(rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u16mf4x8(base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8(const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u16mf4x8(rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u16mf2x8(base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8(const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u16mf2x8(rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8(const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u16m1x8(base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8(const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u16m1x8(rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u32mf2x8(base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8(const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u32mf2x8(rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u32m1x8(base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8(const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u32m1x8(rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u64m1x8(base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8(const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u64m1x8(rs1, rs2, vl); } -vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_m(vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_f16mf4x8_m(mask, base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_m(vbool64_t vm, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_f16mf4x8_m(vm, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_m(vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_f16mf2x8_m(mask, base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_m(vbool32_t vm, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_f16mf2x8_m(vm, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_m(vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_f16m1x8_m(mask, base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_m(vbool16_t vm, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_f16m1x8_m(vm, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_m(vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_f32mf2x8_m(mask, base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_m(vbool64_t vm, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_f32mf2x8_m(vm, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_m(vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_f32m1x8_m(mask, base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_m(vbool32_t vm, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_f32m1x8_m(vm, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_m(vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_f64m1x8_m(mask, base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_m(vbool64_t vm, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_f64m1x8_m(vm, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i8mf8x8_m(mask, base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_m(vbool64_t vm, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i8mf8x8_m(vm, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i8mf4x8_m(mask, base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_m(vbool32_t vm, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i8mf4x8_m(vm, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i8mf2x8_m(mask, base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_m(vbool16_t vm, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i8mf2x8_m(vm, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i8m1x8_m(mask, base, bindex, vl); +vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_m(vbool8_t vm, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i8m1x8_m(vm, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i16mf4x8_m(mask, base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_m(vbool64_t vm, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i16mf4x8_m(vm, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i16mf2x8_m(mask, base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_m(vbool32_t vm, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i16mf2x8_m(vm, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i16m1x8_m(mask, base, bindex, vl); +vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_m(vbool16_t vm, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i16m1x8_m(vm, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i32mf2x8_m(mask, base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_m(vbool64_t vm, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i32mf2x8_m(vm, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i32m1x8_m(mask, base, bindex, vl); +vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_m(vbool32_t vm, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i32m1x8_m(vm, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i64m1x8_m(mask, base, bindex, vl); +vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_m(vbool64_t vm, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i64m1x8_m(vm, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u8mf8x8_m(mask, base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_m(vbool64_t vm, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u8mf8x8_m(vm, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u8mf4x8_m(mask, base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_m(vbool32_t vm, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u8mf4x8_m(vm, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u8mf2x8_m(mask, base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_m(vbool16_t vm, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u8mf2x8_m(vm, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u8m1x8_m(mask, base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_m(vbool8_t vm, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u8m1x8_m(vm, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u16mf4x8_m(mask, base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_m(vbool64_t vm, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u16mf4x8_m(vm, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u16mf2x8_m(mask, base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_m(vbool32_t vm, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u16mf2x8_m(vm, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u16m1x8_m(mask, base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_m(vbool16_t vm, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u16m1x8_m(vm, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u32mf2x8_m(mask, base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_m(vbool64_t vm, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u32mf2x8_m(vm, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u32m1x8_m(mask, base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_m(vbool32_t vm, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u32m1x8_m(vm, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u64m1x8_m(mask, base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_m(vbool64_t vm, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u64m1x8_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg8ei16\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vluxseg8ei32.c b/auto-generated/gnu-api-tests/vluxseg8ei32.c index 62bfc39c7..8d3cfd99c 100644 --- a/auto-generated/gnu-api-tests/vluxseg8ei32.c +++ b/auto-generated/gnu-api-tests/vluxseg8ei32.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8(const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_f16mf4x8(base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8(const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_f16mf4x8(rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8(const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_f16mf2x8(base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8(const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_f16mf2x8(rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8(const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_f16m1x8(base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8(const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_f16m1x8(rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8(const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_f32mf2x8(base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8(const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_f32mf2x8(rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8(const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_f32m1x8(base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8(const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_f32m1x8(rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8(const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_f64m1x8(base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8(const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_f64m1x8(rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8(const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i8mf8x8(base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8(const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i8mf8x8(rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8(const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i8mf4x8(base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8(const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i8mf4x8(rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8(const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i8mf2x8(base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8(const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i8mf2x8(rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei32_v_i8m1x8(const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i8m1x8(base, bindex, vl); +vint8m1x8_t test_vluxseg8ei32_v_i8m1x8(const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i8m1x8(rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8(const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i16mf4x8(base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8(const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i16mf4x8(rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8(const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i16mf2x8(base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8(const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i16mf2x8(rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei32_v_i16m1x8(const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i16m1x8(base, bindex, vl); +vint16m1x8_t test_vluxseg8ei32_v_i16m1x8(const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i16m1x8(rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8(const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i32mf2x8(base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8(const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i32mf2x8(rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei32_v_i32m1x8(const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i32m1x8(base, bindex, vl); +vint32m1x8_t test_vluxseg8ei32_v_i32m1x8(const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i32m1x8(rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei32_v_i64m1x8(const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i64m1x8(base, bindex, vl); +vint64m1x8_t test_vluxseg8ei32_v_i64m1x8(const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i64m1x8(rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u8mf8x8(base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8(const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u8mf8x8(rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8(const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u8mf4x8(base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8(const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u8mf4x8(rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8(const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u8mf2x8(base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8(const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u8mf2x8(rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8(const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u8m1x8(base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8(const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u8m1x8(rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u16mf4x8(base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8(const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u16mf4x8(rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8(const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u16mf2x8(base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8(const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u16mf2x8(rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8(const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u16m1x8(base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8(const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u16m1x8(rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u32mf2x8(base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8(const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u32mf2x8(rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8(const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u32m1x8(base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8(const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u32m1x8(rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u64m1x8(base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8(const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u64m1x8(rs1, rs2, vl); } -vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_m(vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_f16mf4x8_m(mask, base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_m(vbool64_t vm, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_f16mf4x8_m(vm, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_m(vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_f16mf2x8_m(mask, base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_m(vbool32_t vm, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_f16mf2x8_m(vm, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_m(vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_f16m1x8_m(mask, base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_m(vbool16_t vm, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_f16m1x8_m(vm, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_m(vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_f32mf2x8_m(mask, base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_m(vbool64_t vm, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_f32mf2x8_m(vm, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_m(vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_f32m1x8_m(mask, base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_m(vbool32_t vm, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_f32m1x8_m(vm, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_m(vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_f64m1x8_m(mask, base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_m(vbool64_t vm, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_f64m1x8_m(vm, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i8mf8x8_m(mask, base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_m(vbool64_t vm, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i8mf8x8_m(vm, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i8mf4x8_m(mask, base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_m(vbool32_t vm, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i8mf4x8_m(vm, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i8mf2x8_m(mask, base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_m(vbool16_t vm, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i8mf2x8_m(vm, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i8m1x8_m(mask, base, bindex, vl); +vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_m(vbool8_t vm, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i8m1x8_m(vm, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i16mf4x8_m(mask, base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_m(vbool64_t vm, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i16mf4x8_m(vm, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i16mf2x8_m(mask, base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_m(vbool32_t vm, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i16mf2x8_m(vm, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i16m1x8_m(mask, base, bindex, vl); +vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_m(vbool16_t vm, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i16m1x8_m(vm, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i32mf2x8_m(mask, base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_m(vbool64_t vm, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i32mf2x8_m(vm, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i32m1x8_m(mask, base, bindex, vl); +vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_m(vbool32_t vm, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i32m1x8_m(vm, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i64m1x8_m(mask, base, bindex, vl); +vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_m(vbool64_t vm, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i64m1x8_m(vm, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u8mf8x8_m(mask, base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_m(vbool64_t vm, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u8mf8x8_m(vm, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u8mf4x8_m(mask, base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_m(vbool32_t vm, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u8mf4x8_m(vm, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u8mf2x8_m(mask, base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_m(vbool16_t vm, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u8mf2x8_m(vm, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u8m1x8_m(mask, base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_m(vbool8_t vm, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u8m1x8_m(vm, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u16mf4x8_m(mask, base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_m(vbool64_t vm, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u16mf4x8_m(vm, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u16mf2x8_m(mask, base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_m(vbool32_t vm, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u16mf2x8_m(vm, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u16m1x8_m(mask, base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_m(vbool16_t vm, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u16m1x8_m(vm, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u32mf2x8_m(mask, base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_m(vbool64_t vm, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u32mf2x8_m(vm, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u32m1x8_m(mask, base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_m(vbool32_t vm, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u32m1x8_m(vm, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u64m1x8_m(mask, base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_m(vbool64_t vm, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u64m1x8_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg8ei32\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vluxseg8ei64.c b/auto-generated/gnu-api-tests/vluxseg8ei64.c index 10b14df15..de412da8e 100644 --- a/auto-generated/gnu-api-tests/vluxseg8ei64.c +++ b/auto-generated/gnu-api-tests/vluxseg8ei64.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8(const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_f16mf4x8(base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8(const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_f16mf4x8(rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8(const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_f16mf2x8(base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8(const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_f16mf2x8(rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8(const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_f16m1x8(base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8(const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_f16m1x8(rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8(const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_f32mf2x8(base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8(const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_f32mf2x8(rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8(const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_f32m1x8(base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8(const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_f32m1x8(rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8(const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_f64m1x8(base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8(const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_f64m1x8(rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8(const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i8mf8x8(base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8(const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i8mf8x8(rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8(const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i8mf4x8(base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8(const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i8mf4x8(rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8(const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i8mf2x8(base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8(const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i8mf2x8(rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei64_v_i8m1x8(const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i8m1x8(base, bindex, vl); +vint8m1x8_t test_vluxseg8ei64_v_i8m1x8(const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i8m1x8(rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8(const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i16mf4x8(base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8(const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i16mf4x8(rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8(const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i16mf2x8(base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8(const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i16mf2x8(rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei64_v_i16m1x8(const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i16m1x8(base, bindex, vl); +vint16m1x8_t test_vluxseg8ei64_v_i16m1x8(const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i16m1x8(rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8(const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i32mf2x8(base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8(const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i32mf2x8(rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei64_v_i32m1x8(const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i32m1x8(base, bindex, vl); +vint32m1x8_t test_vluxseg8ei64_v_i32m1x8(const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i32m1x8(rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei64_v_i64m1x8(const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i64m1x8(base, bindex, vl); +vint64m1x8_t test_vluxseg8ei64_v_i64m1x8(const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i64m1x8(rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8(const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u8mf8x8(base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8(const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u8mf8x8(rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8(const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u8mf4x8(base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8(const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u8mf4x8(rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8(const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u8mf2x8(base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8(const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u8mf2x8(rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8(const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u8m1x8(base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8(const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u8m1x8(rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8(const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u16mf4x8(base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8(const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u16mf4x8(rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8(const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u16mf2x8(base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8(const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u16mf2x8(rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8(const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u16m1x8(base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8(const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u16m1x8(rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8(const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u32mf2x8(base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8(const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u32mf2x8(rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8(const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u32m1x8(base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8(const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u32m1x8(rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8(const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u64m1x8(base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8(const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u64m1x8(rs1, rs2, vl); } -vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_m(vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_f16mf4x8_m(mask, base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_m(vbool64_t vm, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_f16mf4x8_m(vm, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_m(vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_f16mf2x8_m(mask, base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_m(vbool32_t vm, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_f16mf2x8_m(vm, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_m(vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_f16m1x8_m(mask, base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_m(vbool16_t vm, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_f16m1x8_m(vm, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_m(vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_f32mf2x8_m(mask, base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_m(vbool64_t vm, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_f32mf2x8_m(vm, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_m(vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_f32m1x8_m(mask, base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_m(vbool32_t vm, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_f32m1x8_m(vm, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_m(vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_f64m1x8_m(mask, base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_m(vbool64_t vm, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_f64m1x8_m(vm, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i8mf8x8_m(mask, base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_m(vbool64_t vm, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i8mf8x8_m(vm, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i8mf4x8_m(mask, base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_m(vbool32_t vm, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i8mf4x8_m(vm, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i8mf2x8_m(mask, base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_m(vbool16_t vm, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i8mf2x8_m(vm, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i8m1x8_m(mask, base, bindex, vl); +vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_m(vbool8_t vm, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i8m1x8_m(vm, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i16mf4x8_m(mask, base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_m(vbool64_t vm, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i16mf4x8_m(vm, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i16mf2x8_m(mask, base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_m(vbool32_t vm, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i16mf2x8_m(vm, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i16m1x8_m(mask, base, bindex, vl); +vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_m(vbool16_t vm, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i16m1x8_m(vm, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i32mf2x8_m(mask, base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_m(vbool64_t vm, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i32mf2x8_m(vm, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i32m1x8_m(mask, base, bindex, vl); +vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_m(vbool32_t vm, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i32m1x8_m(vm, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i64m1x8_m(mask, base, bindex, vl); +vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_m(vbool64_t vm, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i64m1x8_m(vm, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u8mf8x8_m(mask, base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_m(vbool64_t vm, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u8mf8x8_m(vm, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u8mf4x8_m(mask, base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_m(vbool32_t vm, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u8mf4x8_m(vm, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u8mf2x8_m(mask, base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_m(vbool16_t vm, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u8mf2x8_m(vm, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u8m1x8_m(mask, base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_m(vbool8_t vm, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u8m1x8_m(vm, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u16mf4x8_m(mask, base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_m(vbool64_t vm, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u16mf4x8_m(vm, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u16mf2x8_m(mask, base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_m(vbool32_t vm, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u16mf2x8_m(vm, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u16m1x8_m(mask, base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_m(vbool16_t vm, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u16m1x8_m(vm, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u32mf2x8_m(mask, base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_m(vbool64_t vm, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u32mf2x8_m(vm, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u32m1x8_m(mask, base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_m(vbool32_t vm, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u32m1x8_m(vm, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u64m1x8_m(mask, base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_m(vbool64_t vm, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u64m1x8_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg8ei64\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vluxseg8ei8.c b/auto-generated/gnu-api-tests/vluxseg8ei8.c index 6cbe712aa..0cac47211 100644 --- a/auto-generated/gnu-api-tests/vluxseg8ei8.c +++ b/auto-generated/gnu-api-tests/vluxseg8ei8.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8(const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_f16mf4x8(base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8(const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_f16mf4x8(rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8(const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_f16mf2x8(base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8(const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_f16mf2x8(rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8(const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_f16m1x8(base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8(const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_f16m1x8(rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8(const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_f32mf2x8(base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8(const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_f32mf2x8(rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8(const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_f32m1x8(base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8(const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_f32m1x8(rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8(const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_f64m1x8(base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8(const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_f64m1x8(rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8(const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i8mf8x8(base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8(const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i8mf8x8(rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8(const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i8mf4x8(base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8(const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i8mf4x8(rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8(const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i8mf2x8(base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8(const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i8mf2x8(rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei8_v_i8m1x8(const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i8m1x8(base, bindex, vl); +vint8m1x8_t test_vluxseg8ei8_v_i8m1x8(const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i8m1x8(rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8(const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i16mf4x8(base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8(const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i16mf4x8(rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8(const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i16mf2x8(base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8(const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i16mf2x8(rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei8_v_i16m1x8(const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i16m1x8(base, bindex, vl); +vint16m1x8_t test_vluxseg8ei8_v_i16m1x8(const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i16m1x8(rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8(const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i32mf2x8(base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8(const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i32mf2x8(rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei8_v_i32m1x8(const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i32m1x8(base, bindex, vl); +vint32m1x8_t test_vluxseg8ei8_v_i32m1x8(const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i32m1x8(rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei8_v_i64m1x8(const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i64m1x8(base, bindex, vl); +vint64m1x8_t test_vluxseg8ei8_v_i64m1x8(const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i64m1x8(rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u8mf8x8(base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8(const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u8mf8x8(rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u8mf4x8(base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8(const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u8mf4x8(rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u8mf2x8(base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8(const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u8mf2x8(rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8(const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u8m1x8(base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8(const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u8m1x8(rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u16mf4x8(base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8(const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u16mf4x8(rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u16mf2x8(base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8(const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u16mf2x8(rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u16m1x8(base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8(const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u16m1x8(rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u32mf2x8(base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8(const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u32mf2x8(rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u32m1x8(base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8(const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u32m1x8(rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u64m1x8(base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8(const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u64m1x8(rs1, rs2, vl); } -vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_m(vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_f16mf4x8_m(mask, base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_m(vbool64_t vm, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_f16mf4x8_m(vm, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_m(vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_f16mf2x8_m(mask, base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_m(vbool32_t vm, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_f16mf2x8_m(vm, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_m(vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_f16m1x8_m(mask, base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_m(vbool16_t vm, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_f16m1x8_m(vm, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_m(vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_f32mf2x8_m(mask, base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_m(vbool64_t vm, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_f32mf2x8_m(vm, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_m(vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_f32m1x8_m(mask, base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_m(vbool32_t vm, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_f32m1x8_m(vm, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_m(vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_f64m1x8_m(mask, base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_m(vbool64_t vm, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_f64m1x8_m(vm, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i8mf8x8_m(mask, base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_m(vbool64_t vm, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i8mf8x8_m(vm, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i8mf4x8_m(mask, base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_m(vbool32_t vm, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i8mf4x8_m(vm, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i8mf2x8_m(mask, base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_m(vbool16_t vm, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i8mf2x8_m(vm, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i8m1x8_m(mask, base, bindex, vl); +vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_m(vbool8_t vm, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i8m1x8_m(vm, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i16mf4x8_m(mask, base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_m(vbool64_t vm, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i16mf4x8_m(vm, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i16mf2x8_m(mask, base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_m(vbool32_t vm, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i16mf2x8_m(vm, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i16m1x8_m(mask, base, bindex, vl); +vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_m(vbool16_t vm, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i16m1x8_m(vm, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i32mf2x8_m(mask, base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_m(vbool64_t vm, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i32mf2x8_m(vm, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i32m1x8_m(mask, base, bindex, vl); +vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_m(vbool32_t vm, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i32m1x8_m(vm, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i64m1x8_m(mask, base, bindex, vl); +vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_m(vbool64_t vm, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i64m1x8_m(vm, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u8mf8x8_m(mask, base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_m(vbool64_t vm, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u8mf8x8_m(vm, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u8mf4x8_m(mask, base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_m(vbool32_t vm, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u8mf4x8_m(vm, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u8mf2x8_m(mask, base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_m(vbool16_t vm, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u8mf2x8_m(vm, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u8m1x8_m(mask, base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_m(vbool8_t vm, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u8m1x8_m(vm, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u16mf4x8_m(mask, base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_m(vbool64_t vm, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u16mf4x8_m(vm, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u16mf2x8_m(mask, base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_m(vbool32_t vm, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u16mf2x8_m(vm, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u16m1x8_m(mask, base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_m(vbool16_t vm, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u16m1x8_m(vm, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u32mf2x8_m(mask, base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_m(vbool64_t vm, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u32mf2x8_m(vm, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u32m1x8_m(mask, base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_m(vbool32_t vm, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u32m1x8_m(vm, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u64m1x8_m(mask, base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_m(vbool64_t vm, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u64m1x8_m(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg8ei8\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vmacc.c b/auto-generated/gnu-api-tests/vmacc.c index 5840eed36..5c2dbec5f 100644 --- a/auto-generated/gnu-api-tests/vmacc.c +++ b/auto-generated/gnu-api-tests/vmacc.c @@ -358,356 +358,356 @@ vuint64m8_t test_vmacc_vx_u64m8(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, s return __riscv_vmacc_vx_u64m8(vd, rs1, vs2, vl); } -vint8mf8_t test_vmacc_vv_i8mf8_m(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vmacc_vv_i8mf8_m(mask, vd, vs1, vs2, vl); +vint8mf8_t test_vmacc_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vmacc_vv_i8mf8_m(vm, vd, vs1, vs2, vl); } -vint8mf8_t test_vmacc_vx_i8mf8_m(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vmacc_vx_i8mf8_m(mask, vd, rs1, vs2, vl); +vint8mf8_t test_vmacc_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vmacc_vx_i8mf8_m(vm, vd, rs1, vs2, vl); } -vint8mf4_t test_vmacc_vv_i8mf4_m(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vmacc_vv_i8mf4_m(mask, vd, vs1, vs2, vl); +vint8mf4_t test_vmacc_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vmacc_vv_i8mf4_m(vm, vd, vs1, vs2, vl); } -vint8mf4_t test_vmacc_vx_i8mf4_m(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vmacc_vx_i8mf4_m(mask, vd, rs1, vs2, vl); +vint8mf4_t test_vmacc_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vmacc_vx_i8mf4_m(vm, vd, rs1, vs2, vl); } -vint8mf2_t test_vmacc_vv_i8mf2_m(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vmacc_vv_i8mf2_m(mask, vd, vs1, vs2, vl); +vint8mf2_t test_vmacc_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vmacc_vv_i8mf2_m(vm, vd, vs1, vs2, vl); } -vint8mf2_t test_vmacc_vx_i8mf2_m(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vmacc_vx_i8mf2_m(mask, vd, rs1, vs2, vl); +vint8mf2_t test_vmacc_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vmacc_vx_i8mf2_m(vm, vd, rs1, vs2, vl); } -vint8m1_t test_vmacc_vv_i8m1_m(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_vmacc_vv_i8m1_m(mask, vd, vs1, vs2, vl); +vint8m1_t test_vmacc_vv_i8m1_m(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { + return __riscv_vmacc_vv_i8m1_m(vm, vd, vs1, vs2, vl); } -vint8m1_t test_vmacc_vx_i8m1_m(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vmacc_vx_i8m1_m(mask, vd, rs1, vs2, vl); +vint8m1_t test_vmacc_vx_i8m1_m(vbool8_t vm, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vmacc_vx_i8m1_m(vm, vd, rs1, vs2, vl); } -vint8m2_t test_vmacc_vv_i8m2_m(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return __riscv_vmacc_vv_i8m2_m(mask, vd, vs1, vs2, vl); +vint8m2_t test_vmacc_vv_i8m2_m(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { + return __riscv_vmacc_vv_i8m2_m(vm, vd, vs1, vs2, vl); } -vint8m2_t test_vmacc_vx_i8m2_m(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vmacc_vx_i8m2_m(mask, vd, rs1, vs2, vl); +vint8m2_t test_vmacc_vx_i8m2_m(vbool4_t vm, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vmacc_vx_i8m2_m(vm, vd, rs1, vs2, vl); } -vint8m4_t test_vmacc_vv_i8m4_m(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return __riscv_vmacc_vv_i8m4_m(mask, vd, vs1, vs2, vl); +vint8m4_t test_vmacc_vv_i8m4_m(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { + return __riscv_vmacc_vv_i8m4_m(vm, vd, vs1, vs2, vl); } -vint8m4_t test_vmacc_vx_i8m4_m(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vmacc_vx_i8m4_m(mask, vd, rs1, vs2, vl); +vint8m4_t test_vmacc_vx_i8m4_m(vbool2_t vm, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vmacc_vx_i8m4_m(vm, vd, rs1, vs2, vl); } -vint8m8_t test_vmacc_vv_i8m8_m(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return __riscv_vmacc_vv_i8m8_m(mask, vd, vs1, vs2, vl); +vint8m8_t test_vmacc_vv_i8m8_m(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { + return __riscv_vmacc_vv_i8m8_m(vm, vd, vs1, vs2, vl); } -vint8m8_t test_vmacc_vx_i8m8_m(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return __riscv_vmacc_vx_i8m8_m(mask, vd, rs1, vs2, vl); +vint8m8_t test_vmacc_vx_i8m8_m(vbool1_t vm, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { + return __riscv_vmacc_vx_i8m8_m(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vmacc_vv_i16mf4_m(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vmacc_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vmacc_vv_i16mf4_m(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vmacc_vx_i16mf4_m(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vmacc_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vmacc_vx_i16mf4_m(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vmacc_vv_i16mf2_m(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vmacc_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vmacc_vv_i16mf2_m(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vmacc_vx_i16mf2_m(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vmacc_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vmacc_vx_i16mf2_m(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_vmacc_vv_i16m1_m(mask, vd, vs1, vs2, vl); +vint16m1_t test_vmacc_vv_i16m1_m(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { + return __riscv_vmacc_vv_i16m1_m(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vmacc_vx_i16m1_m(mask, vd, rs1, vs2, vl); +vint16m1_t test_vmacc_vx_i16m1_m(vbool16_t vm, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vmacc_vx_i16m1_m(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return __riscv_vmacc_vv_i16m2_m(mask, vd, vs1, vs2, vl); +vint16m2_t test_vmacc_vv_i16m2_m(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { + return __riscv_vmacc_vv_i16m2_m(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vmacc_vx_i16m2_m(mask, vd, rs1, vs2, vl); +vint16m2_t test_vmacc_vx_i16m2_m(vbool8_t vm, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vmacc_vx_i16m2_m(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return __riscv_vmacc_vv_i16m4_m(mask, vd, vs1, vs2, vl); +vint16m4_t test_vmacc_vv_i16m4_m(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { + return __riscv_vmacc_vv_i16m4_m(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vmacc_vx_i16m4_m(mask, vd, rs1, vs2, vl); +vint16m4_t test_vmacc_vx_i16m4_m(vbool4_t vm, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vmacc_vx_i16m4_m(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return __riscv_vmacc_vv_i16m8_m(mask, vd, vs1, vs2, vl); +vint16m8_t test_vmacc_vv_i16m8_m(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { + return __riscv_vmacc_vv_i16m8_m(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return __riscv_vmacc_vx_i16m8_m(mask, vd, rs1, vs2, vl); +vint16m8_t test_vmacc_vx_i16m8_m(vbool2_t vm, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { + return __riscv_vmacc_vx_i16m8_m(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vmacc_vv_i32mf2_m(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vmacc_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vmacc_vv_i32mf2_m(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vmacc_vx_i32mf2_m(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vmacc_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vmacc_vx_i32mf2_m(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_vmacc_vv_i32m1_m(mask, vd, vs1, vs2, vl); +vint32m1_t test_vmacc_vv_i32m1_m(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { + return __riscv_vmacc_vv_i32m1_m(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vmacc_vx_i32m1_m(mask, vd, rs1, vs2, vl); +vint32m1_t test_vmacc_vx_i32m1_m(vbool32_t vm, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vmacc_vx_i32m1_m(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return __riscv_vmacc_vv_i32m2_m(mask, vd, vs1, vs2, vl); +vint32m2_t test_vmacc_vv_i32m2_m(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { + return __riscv_vmacc_vv_i32m2_m(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vmacc_vx_i32m2_m(mask, vd, rs1, vs2, vl); +vint32m2_t test_vmacc_vx_i32m2_m(vbool16_t vm, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vmacc_vx_i32m2_m(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return __riscv_vmacc_vv_i32m4_m(mask, vd, vs1, vs2, vl); +vint32m4_t test_vmacc_vv_i32m4_m(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { + return __riscv_vmacc_vv_i32m4_m(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vmacc_vx_i32m4_m(mask, vd, rs1, vs2, vl); +vint32m4_t test_vmacc_vx_i32m4_m(vbool8_t vm, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vmacc_vx_i32m4_m(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return __riscv_vmacc_vv_i32m8_m(mask, vd, vs1, vs2, vl); +vint32m8_t test_vmacc_vv_i32m8_m(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { + return __riscv_vmacc_vv_i32m8_m(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return __riscv_vmacc_vx_i32m8_m(mask, vd, rs1, vs2, vl); +vint32m8_t test_vmacc_vx_i32m8_m(vbool4_t vm, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { + return __riscv_vmacc_vx_i32m8_m(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return __riscv_vmacc_vv_i64m1_m(mask, vd, vs1, vs2, vl); +vint64m1_t test_vmacc_vv_i64m1_m(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { + return __riscv_vmacc_vv_i64m1_m(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return __riscv_vmacc_vx_i64m1_m(mask, vd, rs1, vs2, vl); +vint64m1_t test_vmacc_vx_i64m1_m(vbool64_t vm, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { + return __riscv_vmacc_vx_i64m1_m(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return __riscv_vmacc_vv_i64m2_m(mask, vd, vs1, vs2, vl); +vint64m2_t test_vmacc_vv_i64m2_m(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { + return __riscv_vmacc_vv_i64m2_m(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return __riscv_vmacc_vx_i64m2_m(mask, vd, rs1, vs2, vl); +vint64m2_t test_vmacc_vx_i64m2_m(vbool32_t vm, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { + return __riscv_vmacc_vx_i64m2_m(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return __riscv_vmacc_vv_i64m4_m(mask, vd, vs1, vs2, vl); +vint64m4_t test_vmacc_vv_i64m4_m(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { + return __riscv_vmacc_vv_i64m4_m(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return __riscv_vmacc_vx_i64m4_m(mask, vd, rs1, vs2, vl); +vint64m4_t test_vmacc_vx_i64m4_m(vbool16_t vm, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { + return __riscv_vmacc_vx_i64m4_m(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return __riscv_vmacc_vv_i64m8_m(mask, vd, vs1, vs2, vl); +vint64m8_t test_vmacc_vv_i64m8_m(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { + return __riscv_vmacc_vv_i64m8_m(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vmacc_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return __riscv_vmacc_vx_i64m8_m(mask, vd, rs1, vs2, vl); +vint64m8_t test_vmacc_vx_i64m8_m(vbool8_t vm, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { + return __riscv_vmacc_vx_i64m8_m(vm, vd, rs1, vs2, vl); } -vuint8mf8_t test_vmacc_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vmacc_vv_u8mf8_m(mask, vd, vs1, vs2, vl); +vuint8mf8_t test_vmacc_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vmacc_vv_u8mf8_m(vm, vd, vs1, vs2, vl); } -vuint8mf8_t test_vmacc_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vmacc_vx_u8mf8_m(mask, vd, rs1, vs2, vl); +vuint8mf8_t test_vmacc_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vmacc_vx_u8mf8_m(vm, vd, rs1, vs2, vl); } -vuint8mf4_t test_vmacc_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vmacc_vv_u8mf4_m(mask, vd, vs1, vs2, vl); +vuint8mf4_t test_vmacc_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vmacc_vv_u8mf4_m(vm, vd, vs1, vs2, vl); } -vuint8mf4_t test_vmacc_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vmacc_vx_u8mf4_m(mask, vd, rs1, vs2, vl); +vuint8mf4_t test_vmacc_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vmacc_vx_u8mf4_m(vm, vd, rs1, vs2, vl); } -vuint8mf2_t test_vmacc_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vmacc_vv_u8mf2_m(mask, vd, vs1, vs2, vl); +vuint8mf2_t test_vmacc_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vmacc_vv_u8mf2_m(vm, vd, vs1, vs2, vl); } -vuint8mf2_t test_vmacc_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vmacc_vx_u8mf2_m(mask, vd, rs1, vs2, vl); +vuint8mf2_t test_vmacc_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vmacc_vx_u8mf2_m(vm, vd, rs1, vs2, vl); } -vuint8m1_t test_vmacc_vv_u8m1_m(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vmacc_vv_u8m1_m(mask, vd, vs1, vs2, vl); +vuint8m1_t test_vmacc_vv_u8m1_m(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vmacc_vv_u8m1_m(vm, vd, vs1, vs2, vl); } -vuint8m1_t test_vmacc_vx_u8m1_m(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vmacc_vx_u8m1_m(mask, vd, rs1, vs2, vl); +vuint8m1_t test_vmacc_vx_u8m1_m(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vmacc_vx_u8m1_m(vm, vd, rs1, vs2, vl); } -vuint8m2_t test_vmacc_vv_u8m2_m(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vmacc_vv_u8m2_m(mask, vd, vs1, vs2, vl); +vuint8m2_t test_vmacc_vv_u8m2_m(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vmacc_vv_u8m2_m(vm, vd, vs1, vs2, vl); } -vuint8m2_t test_vmacc_vx_u8m2_m(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vmacc_vx_u8m2_m(mask, vd, rs1, vs2, vl); +vuint8m2_t test_vmacc_vx_u8m2_m(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vmacc_vx_u8m2_m(vm, vd, rs1, vs2, vl); } -vuint8m4_t test_vmacc_vv_u8m4_m(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vmacc_vv_u8m4_m(mask, vd, vs1, vs2, vl); +vuint8m4_t test_vmacc_vv_u8m4_m(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vmacc_vv_u8m4_m(vm, vd, vs1, vs2, vl); } -vuint8m4_t test_vmacc_vx_u8m4_m(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vmacc_vx_u8m4_m(mask, vd, rs1, vs2, vl); +vuint8m4_t test_vmacc_vx_u8m4_m(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vmacc_vx_u8m4_m(vm, vd, rs1, vs2, vl); } -vuint8m8_t test_vmacc_vv_u8m8_m(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vmacc_vv_u8m8_m(mask, vd, vs1, vs2, vl); +vuint8m8_t test_vmacc_vv_u8m8_m(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vmacc_vv_u8m8_m(vm, vd, vs1, vs2, vl); } -vuint8m8_t test_vmacc_vx_u8m8_m(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vmacc_vx_u8m8_m(mask, vd, rs1, vs2, vl); +vuint8m8_t test_vmacc_vx_u8m8_m(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vmacc_vx_u8m8_m(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vmacc_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vmacc_vv_u16mf4_m(mask, vd, vs1, vs2, vl); +vuint16mf4_t test_vmacc_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vmacc_vv_u16mf4_m(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vmacc_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vmacc_vx_u16mf4_m(mask, vd, rs1, vs2, vl); +vuint16mf4_t test_vmacc_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vmacc_vx_u16mf4_m(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vmacc_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vmacc_vv_u16mf2_m(mask, vd, vs1, vs2, vl); +vuint16mf2_t test_vmacc_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vmacc_vv_u16mf2_m(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vmacc_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vmacc_vx_u16mf2_m(mask, vd, rs1, vs2, vl); +vuint16mf2_t test_vmacc_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vmacc_vx_u16mf2_m(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vmacc_vv_u16m1_m(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vmacc_vv_u16m1_m(mask, vd, vs1, vs2, vl); +vuint16m1_t test_vmacc_vv_u16m1_m(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vmacc_vv_u16m1_m(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vmacc_vx_u16m1_m(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vmacc_vx_u16m1_m(mask, vd, rs1, vs2, vl); +vuint16m1_t test_vmacc_vx_u16m1_m(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vmacc_vx_u16m1_m(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vmacc_vv_u16m2_m(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vmacc_vv_u16m2_m(mask, vd, vs1, vs2, vl); +vuint16m2_t test_vmacc_vv_u16m2_m(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vmacc_vv_u16m2_m(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vmacc_vx_u16m2_m(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vmacc_vx_u16m2_m(mask, vd, rs1, vs2, vl); +vuint16m2_t test_vmacc_vx_u16m2_m(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vmacc_vx_u16m2_m(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vmacc_vv_u16m4_m(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vmacc_vv_u16m4_m(mask, vd, vs1, vs2, vl); +vuint16m4_t test_vmacc_vv_u16m4_m(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vmacc_vv_u16m4_m(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vmacc_vx_u16m4_m(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vmacc_vx_u16m4_m(mask, vd, rs1, vs2, vl); +vuint16m4_t test_vmacc_vx_u16m4_m(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vmacc_vx_u16m4_m(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vmacc_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vmacc_vv_u16m8_m(mask, vd, vs1, vs2, vl); +vuint16m8_t test_vmacc_vv_u16m8_m(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vmacc_vv_u16m8_m(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vmacc_vx_u16m8_m(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vmacc_vx_u16m8_m(mask, vd, rs1, vs2, vl); +vuint16m8_t test_vmacc_vx_u16m8_m(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vmacc_vx_u16m8_m(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vmacc_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vmacc_vv_u32mf2_m(mask, vd, vs1, vs2, vl); +vuint32mf2_t test_vmacc_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vmacc_vv_u32mf2_m(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vmacc_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vmacc_vx_u32mf2_m(mask, vd, rs1, vs2, vl); +vuint32mf2_t test_vmacc_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vmacc_vx_u32mf2_m(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vmacc_vv_u32m1_m(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vmacc_vv_u32m1_m(mask, vd, vs1, vs2, vl); +vuint32m1_t test_vmacc_vv_u32m1_m(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vmacc_vv_u32m1_m(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vmacc_vx_u32m1_m(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vmacc_vx_u32m1_m(mask, vd, rs1, vs2, vl); +vuint32m1_t test_vmacc_vx_u32m1_m(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vmacc_vx_u32m1_m(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vmacc_vv_u32m2_m(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vmacc_vv_u32m2_m(mask, vd, vs1, vs2, vl); +vuint32m2_t test_vmacc_vv_u32m2_m(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vmacc_vv_u32m2_m(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vmacc_vx_u32m2_m(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vmacc_vx_u32m2_m(mask, vd, rs1, vs2, vl); +vuint32m2_t test_vmacc_vx_u32m2_m(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vmacc_vx_u32m2_m(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vmacc_vv_u32m4_m(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vmacc_vv_u32m4_m(mask, vd, vs1, vs2, vl); +vuint32m4_t test_vmacc_vv_u32m4_m(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vmacc_vv_u32m4_m(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vmacc_vx_u32m4_m(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vmacc_vx_u32m4_m(mask, vd, rs1, vs2, vl); +vuint32m4_t test_vmacc_vx_u32m4_m(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vmacc_vx_u32m4_m(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vmacc_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vmacc_vv_u32m8_m(mask, vd, vs1, vs2, vl); +vuint32m8_t test_vmacc_vv_u32m8_m(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vmacc_vv_u32m8_m(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vmacc_vx_u32m8_m(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vmacc_vx_u32m8_m(mask, vd, rs1, vs2, vl); +vuint32m8_t test_vmacc_vx_u32m8_m(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vmacc_vx_u32m8_m(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vmacc_vv_u64m1_m(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vmacc_vv_u64m1_m(mask, vd, vs1, vs2, vl); +vuint64m1_t test_vmacc_vv_u64m1_m(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vmacc_vv_u64m1_m(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vmacc_vx_u64m1_m(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vmacc_vx_u64m1_m(mask, vd, rs1, vs2, vl); +vuint64m1_t test_vmacc_vx_u64m1_m(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vmacc_vx_u64m1_m(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vmacc_vv_u64m2_m(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vmacc_vv_u64m2_m(mask, vd, vs1, vs2, vl); +vuint64m2_t test_vmacc_vv_u64m2_m(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vmacc_vv_u64m2_m(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vmacc_vx_u64m2_m(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vmacc_vx_u64m2_m(mask, vd, rs1, vs2, vl); +vuint64m2_t test_vmacc_vx_u64m2_m(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vmacc_vx_u64m2_m(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vmacc_vv_u64m4_m(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vmacc_vv_u64m4_m(mask, vd, vs1, vs2, vl); +vuint64m4_t test_vmacc_vv_u64m4_m(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vmacc_vv_u64m4_m(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vmacc_vx_u64m4_m(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vmacc_vx_u64m4_m(mask, vd, rs1, vs2, vl); +vuint64m4_t test_vmacc_vx_u64m4_m(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vmacc_vx_u64m4_m(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vmacc_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vmacc_vv_u64m8_m(mask, vd, vs1, vs2, vl); +vuint64m8_t test_vmacc_vv_u64m8_m(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vmacc_vv_u64m8_m(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vmacc_vx_u64m8_m(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vmacc_vx_u64m8_m(mask, vd, rs1, vs2, vl); +vuint64m8_t test_vmacc_vx_u64m8_m(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vmacc_vx_u64m8_m(vm, vd, rs1, vs2, vl); } /* { dg-final { scan-assembler-times {vma[c-d][c-d]\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/gnu-api-tests/vmadc.c b/auto-generated/gnu-api-tests/vmadc.c index f81af65fb..6b0499894 100644 --- a/auto-generated/gnu-api-tests/vmadc.c +++ b/auto-generated/gnu-api-tests/vmadc.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmadc_vvm_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vmadc_vvm_i8mf8_b64(op1, op2, carryin, vl); +vbool64_t test_vmadc_vvm_i8mf8_b64(vint8mf8_t vs2, vint8mf8_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmadc_vvm_i8mf8_b64(vs2, vs1, v0, vl); } -vbool64_t test_vmadc_vxm_i8mf8_b64(vint8mf8_t op1, int8_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vmadc_vxm_i8mf8_b64(op1, op2, carryin, vl); +vbool64_t test_vmadc_vxm_i8mf8_b64(vint8mf8_t vs2, int8_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmadc_vxm_i8mf8_b64(vs2, rs1, v0, vl); } -vbool64_t test_vmadc_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmadc_vv_i8mf8_b64(op1, op2, vl); +vbool64_t test_vmadc_vv_i8mf8_b64(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmadc_vv_i8mf8_b64(vs2, vs1, vl); } -vbool64_t test_vmadc_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmadc_vx_i8mf8_b64(op1, op2, vl); +vbool64_t test_vmadc_vx_i8mf8_b64(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmadc_vx_i8mf8_b64(vs2, rs1, vl); } -vbool32_t test_vmadc_vvm_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vmadc_vvm_i8mf4_b32(op1, op2, carryin, vl); +vbool32_t test_vmadc_vvm_i8mf4_b32(vint8mf4_t vs2, vint8mf4_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmadc_vvm_i8mf4_b32(vs2, vs1, v0, vl); } -vbool32_t test_vmadc_vxm_i8mf4_b32(vint8mf4_t op1, int8_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vmadc_vxm_i8mf4_b32(op1, op2, carryin, vl); +vbool32_t test_vmadc_vxm_i8mf4_b32(vint8mf4_t vs2, int8_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmadc_vxm_i8mf4_b32(vs2, rs1, v0, vl); } -vbool32_t test_vmadc_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmadc_vv_i8mf4_b32(op1, op2, vl); +vbool32_t test_vmadc_vv_i8mf4_b32(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmadc_vv_i8mf4_b32(vs2, vs1, vl); } -vbool32_t test_vmadc_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmadc_vx_i8mf4_b32(op1, op2, vl); +vbool32_t test_vmadc_vx_i8mf4_b32(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmadc_vx_i8mf4_b32(vs2, rs1, vl); } -vbool16_t test_vmadc_vvm_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vmadc_vvm_i8mf2_b16(op1, op2, carryin, vl); +vbool16_t test_vmadc_vvm_i8mf2_b16(vint8mf2_t vs2, vint8mf2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmadc_vvm_i8mf2_b16(vs2, vs1, v0, vl); } -vbool16_t test_vmadc_vxm_i8mf2_b16(vint8mf2_t op1, int8_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vmadc_vxm_i8mf2_b16(op1, op2, carryin, vl); +vbool16_t test_vmadc_vxm_i8mf2_b16(vint8mf2_t vs2, int8_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmadc_vxm_i8mf2_b16(vs2, rs1, v0, vl); } -vbool16_t test_vmadc_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmadc_vv_i8mf2_b16(op1, op2, vl); +vbool16_t test_vmadc_vv_i8mf2_b16(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmadc_vv_i8mf2_b16(vs2, vs1, vl); } -vbool16_t test_vmadc_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmadc_vx_i8mf2_b16(op1, op2, vl); +vbool16_t test_vmadc_vx_i8mf2_b16(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmadc_vx_i8mf2_b16(vs2, rs1, vl); } -vbool8_t test_vmadc_vvm_i8m1_b8(vint8m1_t op1, vint8m1_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vmadc_vvm_i8m1_b8(op1, op2, carryin, vl); +vbool8_t test_vmadc_vvm_i8m1_b8(vint8m1_t vs2, vint8m1_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmadc_vvm_i8m1_b8(vs2, vs1, v0, vl); } -vbool8_t test_vmadc_vxm_i8m1_b8(vint8m1_t op1, int8_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vmadc_vxm_i8m1_b8(op1, op2, carryin, vl); +vbool8_t test_vmadc_vxm_i8m1_b8(vint8m1_t vs2, int8_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmadc_vxm_i8m1_b8(vs2, rs1, v0, vl); } -vbool8_t test_vmadc_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmadc_vv_i8m1_b8(op1, op2, vl); +vbool8_t test_vmadc_vv_i8m1_b8(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmadc_vv_i8m1_b8(vs2, vs1, vl); } -vbool8_t test_vmadc_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmadc_vx_i8m1_b8(op1, op2, vl); +vbool8_t test_vmadc_vx_i8m1_b8(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmadc_vx_i8m1_b8(vs2, rs1, vl); } -vbool4_t test_vmadc_vvm_i8m2_b4(vint8m2_t op1, vint8m2_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vmadc_vvm_i8m2_b4(op1, op2, carryin, vl); +vbool4_t test_vmadc_vvm_i8m2_b4(vint8m2_t vs2, vint8m2_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmadc_vvm_i8m2_b4(vs2, vs1, v0, vl); } -vbool4_t test_vmadc_vxm_i8m2_b4(vint8m2_t op1, int8_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vmadc_vxm_i8m2_b4(op1, op2, carryin, vl); +vbool4_t test_vmadc_vxm_i8m2_b4(vint8m2_t vs2, int8_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmadc_vxm_i8m2_b4(vs2, rs1, v0, vl); } -vbool4_t test_vmadc_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmadc_vv_i8m2_b4(op1, op2, vl); +vbool4_t test_vmadc_vv_i8m2_b4(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmadc_vv_i8m2_b4(vs2, vs1, vl); } -vbool4_t test_vmadc_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmadc_vx_i8m2_b4(op1, op2, vl); +vbool4_t test_vmadc_vx_i8m2_b4(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmadc_vx_i8m2_b4(vs2, rs1, vl); } -vbool2_t test_vmadc_vvm_i8m4_b2(vint8m4_t op1, vint8m4_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vmadc_vvm_i8m4_b2(op1, op2, carryin, vl); +vbool2_t test_vmadc_vvm_i8m4_b2(vint8m4_t vs2, vint8m4_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vmadc_vvm_i8m4_b2(vs2, vs1, v0, vl); } -vbool2_t test_vmadc_vxm_i8m4_b2(vint8m4_t op1, int8_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vmadc_vxm_i8m4_b2(op1, op2, carryin, vl); +vbool2_t test_vmadc_vxm_i8m4_b2(vint8m4_t vs2, int8_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vmadc_vxm_i8m4_b2(vs2, rs1, v0, vl); } -vbool2_t test_vmadc_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmadc_vv_i8m4_b2(op1, op2, vl); +vbool2_t test_vmadc_vv_i8m4_b2(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmadc_vv_i8m4_b2(vs2, vs1, vl); } -vbool2_t test_vmadc_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmadc_vx_i8m4_b2(op1, op2, vl); +vbool2_t test_vmadc_vx_i8m4_b2(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmadc_vx_i8m4_b2(vs2, rs1, vl); } -vbool1_t test_vmadc_vvm_i8m8_b1(vint8m8_t op1, vint8m8_t op2, vbool1_t carryin, size_t vl) { - return __riscv_vmadc_vvm_i8m8_b1(op1, op2, carryin, vl); +vbool1_t test_vmadc_vvm_i8m8_b1(vint8m8_t vs2, vint8m8_t vs1, vbool1_t v0, size_t vl) { + return __riscv_vmadc_vvm_i8m8_b1(vs2, vs1, v0, vl); } -vbool1_t test_vmadc_vxm_i8m8_b1(vint8m8_t op1, int8_t op2, vbool1_t carryin, size_t vl) { - return __riscv_vmadc_vxm_i8m8_b1(op1, op2, carryin, vl); +vbool1_t test_vmadc_vxm_i8m8_b1(vint8m8_t vs2, int8_t rs1, vbool1_t v0, size_t vl) { + return __riscv_vmadc_vxm_i8m8_b1(vs2, rs1, v0, vl); } -vbool1_t test_vmadc_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmadc_vv_i8m8_b1(op1, op2, vl); +vbool1_t test_vmadc_vv_i8m8_b1(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmadc_vv_i8m8_b1(vs2, vs1, vl); } -vbool1_t test_vmadc_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmadc_vx_i8m8_b1(op1, op2, vl); +vbool1_t test_vmadc_vx_i8m8_b1(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmadc_vx_i8m8_b1(vs2, rs1, vl); } -vbool64_t test_vmadc_vvm_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vmadc_vvm_i16mf4_b64(op1, op2, carryin, vl); +vbool64_t test_vmadc_vvm_i16mf4_b64(vint16mf4_t vs2, vint16mf4_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmadc_vvm_i16mf4_b64(vs2, vs1, v0, vl); } -vbool64_t test_vmadc_vxm_i16mf4_b64(vint16mf4_t op1, int16_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vmadc_vxm_i16mf4_b64(op1, op2, carryin, vl); +vbool64_t test_vmadc_vxm_i16mf4_b64(vint16mf4_t vs2, int16_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmadc_vxm_i16mf4_b64(vs2, rs1, v0, vl); } -vbool64_t test_vmadc_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmadc_vv_i16mf4_b64(op1, op2, vl); +vbool64_t test_vmadc_vv_i16mf4_b64(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmadc_vv_i16mf4_b64(vs2, vs1, vl); } -vbool64_t test_vmadc_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmadc_vx_i16mf4_b64(op1, op2, vl); +vbool64_t test_vmadc_vx_i16mf4_b64(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmadc_vx_i16mf4_b64(vs2, rs1, vl); } -vbool32_t test_vmadc_vvm_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vmadc_vvm_i16mf2_b32(op1, op2, carryin, vl); +vbool32_t test_vmadc_vvm_i16mf2_b32(vint16mf2_t vs2, vint16mf2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmadc_vvm_i16mf2_b32(vs2, vs1, v0, vl); } -vbool32_t test_vmadc_vxm_i16mf2_b32(vint16mf2_t op1, int16_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vmadc_vxm_i16mf2_b32(op1, op2, carryin, vl); +vbool32_t test_vmadc_vxm_i16mf2_b32(vint16mf2_t vs2, int16_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmadc_vxm_i16mf2_b32(vs2, rs1, v0, vl); } -vbool32_t test_vmadc_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmadc_vv_i16mf2_b32(op1, op2, vl); +vbool32_t test_vmadc_vv_i16mf2_b32(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmadc_vv_i16mf2_b32(vs2, vs1, vl); } -vbool32_t test_vmadc_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmadc_vx_i16mf2_b32(op1, op2, vl); +vbool32_t test_vmadc_vx_i16mf2_b32(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmadc_vx_i16mf2_b32(vs2, rs1, vl); } -vbool16_t test_vmadc_vvm_i16m1_b16(vint16m1_t op1, vint16m1_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vmadc_vvm_i16m1_b16(op1, op2, carryin, vl); +vbool16_t test_vmadc_vvm_i16m1_b16(vint16m1_t vs2, vint16m1_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmadc_vvm_i16m1_b16(vs2, vs1, v0, vl); } -vbool16_t test_vmadc_vxm_i16m1_b16(vint16m1_t op1, int16_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vmadc_vxm_i16m1_b16(op1, op2, carryin, vl); +vbool16_t test_vmadc_vxm_i16m1_b16(vint16m1_t vs2, int16_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmadc_vxm_i16m1_b16(vs2, rs1, v0, vl); } -vbool16_t test_vmadc_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmadc_vv_i16m1_b16(op1, op2, vl); +vbool16_t test_vmadc_vv_i16m1_b16(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmadc_vv_i16m1_b16(vs2, vs1, vl); } -vbool16_t test_vmadc_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmadc_vx_i16m1_b16(op1, op2, vl); +vbool16_t test_vmadc_vx_i16m1_b16(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmadc_vx_i16m1_b16(vs2, rs1, vl); } -vbool8_t test_vmadc_vvm_i16m2_b8(vint16m2_t op1, vint16m2_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vmadc_vvm_i16m2_b8(op1, op2, carryin, vl); +vbool8_t test_vmadc_vvm_i16m2_b8(vint16m2_t vs2, vint16m2_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmadc_vvm_i16m2_b8(vs2, vs1, v0, vl); } -vbool8_t test_vmadc_vxm_i16m2_b8(vint16m2_t op1, int16_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vmadc_vxm_i16m2_b8(op1, op2, carryin, vl); +vbool8_t test_vmadc_vxm_i16m2_b8(vint16m2_t vs2, int16_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmadc_vxm_i16m2_b8(vs2, rs1, v0, vl); } -vbool8_t test_vmadc_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmadc_vv_i16m2_b8(op1, op2, vl); +vbool8_t test_vmadc_vv_i16m2_b8(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmadc_vv_i16m2_b8(vs2, vs1, vl); } -vbool8_t test_vmadc_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmadc_vx_i16m2_b8(op1, op2, vl); +vbool8_t test_vmadc_vx_i16m2_b8(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmadc_vx_i16m2_b8(vs2, rs1, vl); } -vbool4_t test_vmadc_vvm_i16m4_b4(vint16m4_t op1, vint16m4_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vmadc_vvm_i16m4_b4(op1, op2, carryin, vl); +vbool4_t test_vmadc_vvm_i16m4_b4(vint16m4_t vs2, vint16m4_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmadc_vvm_i16m4_b4(vs2, vs1, v0, vl); } -vbool4_t test_vmadc_vxm_i16m4_b4(vint16m4_t op1, int16_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vmadc_vxm_i16m4_b4(op1, op2, carryin, vl); +vbool4_t test_vmadc_vxm_i16m4_b4(vint16m4_t vs2, int16_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmadc_vxm_i16m4_b4(vs2, rs1, v0, vl); } -vbool4_t test_vmadc_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmadc_vv_i16m4_b4(op1, op2, vl); +vbool4_t test_vmadc_vv_i16m4_b4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmadc_vv_i16m4_b4(vs2, vs1, vl); } -vbool4_t test_vmadc_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmadc_vx_i16m4_b4(op1, op2, vl); +vbool4_t test_vmadc_vx_i16m4_b4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmadc_vx_i16m4_b4(vs2, rs1, vl); } -vbool2_t test_vmadc_vvm_i16m8_b2(vint16m8_t op1, vint16m8_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vmadc_vvm_i16m8_b2(op1, op2, carryin, vl); +vbool2_t test_vmadc_vvm_i16m8_b2(vint16m8_t vs2, vint16m8_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vmadc_vvm_i16m8_b2(vs2, vs1, v0, vl); } -vbool2_t test_vmadc_vxm_i16m8_b2(vint16m8_t op1, int16_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vmadc_vxm_i16m8_b2(op1, op2, carryin, vl); +vbool2_t test_vmadc_vxm_i16m8_b2(vint16m8_t vs2, int16_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vmadc_vxm_i16m8_b2(vs2, rs1, v0, vl); } -vbool2_t test_vmadc_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmadc_vv_i16m8_b2(op1, op2, vl); +vbool2_t test_vmadc_vv_i16m8_b2(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmadc_vv_i16m8_b2(vs2, vs1, vl); } -vbool2_t test_vmadc_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmadc_vx_i16m8_b2(op1, op2, vl); +vbool2_t test_vmadc_vx_i16m8_b2(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmadc_vx_i16m8_b2(vs2, rs1, vl); } -vbool64_t test_vmadc_vvm_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vmadc_vvm_i32mf2_b64(op1, op2, carryin, vl); +vbool64_t test_vmadc_vvm_i32mf2_b64(vint32mf2_t vs2, vint32mf2_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmadc_vvm_i32mf2_b64(vs2, vs1, v0, vl); } -vbool64_t test_vmadc_vxm_i32mf2_b64(vint32mf2_t op1, int32_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vmadc_vxm_i32mf2_b64(op1, op2, carryin, vl); +vbool64_t test_vmadc_vxm_i32mf2_b64(vint32mf2_t vs2, int32_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmadc_vxm_i32mf2_b64(vs2, rs1, v0, vl); } -vbool64_t test_vmadc_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmadc_vv_i32mf2_b64(op1, op2, vl); +vbool64_t test_vmadc_vv_i32mf2_b64(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmadc_vv_i32mf2_b64(vs2, vs1, vl); } -vbool64_t test_vmadc_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmadc_vx_i32mf2_b64(op1, op2, vl); +vbool64_t test_vmadc_vx_i32mf2_b64(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmadc_vx_i32mf2_b64(vs2, rs1, vl); } -vbool32_t test_vmadc_vvm_i32m1_b32(vint32m1_t op1, vint32m1_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vmadc_vvm_i32m1_b32(op1, op2, carryin, vl); +vbool32_t test_vmadc_vvm_i32m1_b32(vint32m1_t vs2, vint32m1_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmadc_vvm_i32m1_b32(vs2, vs1, v0, vl); } -vbool32_t test_vmadc_vxm_i32m1_b32(vint32m1_t op1, int32_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vmadc_vxm_i32m1_b32(op1, op2, carryin, vl); +vbool32_t test_vmadc_vxm_i32m1_b32(vint32m1_t vs2, int32_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmadc_vxm_i32m1_b32(vs2, rs1, v0, vl); } -vbool32_t test_vmadc_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmadc_vv_i32m1_b32(op1, op2, vl); +vbool32_t test_vmadc_vv_i32m1_b32(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmadc_vv_i32m1_b32(vs2, vs1, vl); } -vbool32_t test_vmadc_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmadc_vx_i32m1_b32(op1, op2, vl); +vbool32_t test_vmadc_vx_i32m1_b32(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmadc_vx_i32m1_b32(vs2, rs1, vl); } -vbool16_t test_vmadc_vvm_i32m2_b16(vint32m2_t op1, vint32m2_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vmadc_vvm_i32m2_b16(op1, op2, carryin, vl); +vbool16_t test_vmadc_vvm_i32m2_b16(vint32m2_t vs2, vint32m2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmadc_vvm_i32m2_b16(vs2, vs1, v0, vl); } -vbool16_t test_vmadc_vxm_i32m2_b16(vint32m2_t op1, int32_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vmadc_vxm_i32m2_b16(op1, op2, carryin, vl); +vbool16_t test_vmadc_vxm_i32m2_b16(vint32m2_t vs2, int32_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmadc_vxm_i32m2_b16(vs2, rs1, v0, vl); } -vbool16_t test_vmadc_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmadc_vv_i32m2_b16(op1, op2, vl); +vbool16_t test_vmadc_vv_i32m2_b16(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmadc_vv_i32m2_b16(vs2, vs1, vl); } -vbool16_t test_vmadc_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmadc_vx_i32m2_b16(op1, op2, vl); +vbool16_t test_vmadc_vx_i32m2_b16(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmadc_vx_i32m2_b16(vs2, rs1, vl); } -vbool8_t test_vmadc_vvm_i32m4_b8(vint32m4_t op1, vint32m4_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vmadc_vvm_i32m4_b8(op1, op2, carryin, vl); +vbool8_t test_vmadc_vvm_i32m4_b8(vint32m4_t vs2, vint32m4_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmadc_vvm_i32m4_b8(vs2, vs1, v0, vl); } -vbool8_t test_vmadc_vxm_i32m4_b8(vint32m4_t op1, int32_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vmadc_vxm_i32m4_b8(op1, op2, carryin, vl); +vbool8_t test_vmadc_vxm_i32m4_b8(vint32m4_t vs2, int32_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmadc_vxm_i32m4_b8(vs2, rs1, v0, vl); } -vbool8_t test_vmadc_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmadc_vv_i32m4_b8(op1, op2, vl); +vbool8_t test_vmadc_vv_i32m4_b8(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmadc_vv_i32m4_b8(vs2, vs1, vl); } -vbool8_t test_vmadc_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmadc_vx_i32m4_b8(op1, op2, vl); +vbool8_t test_vmadc_vx_i32m4_b8(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmadc_vx_i32m4_b8(vs2, rs1, vl); } -vbool4_t test_vmadc_vvm_i32m8_b4(vint32m8_t op1, vint32m8_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vmadc_vvm_i32m8_b4(op1, op2, carryin, vl); +vbool4_t test_vmadc_vvm_i32m8_b4(vint32m8_t vs2, vint32m8_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmadc_vvm_i32m8_b4(vs2, vs1, v0, vl); } -vbool4_t test_vmadc_vxm_i32m8_b4(vint32m8_t op1, int32_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vmadc_vxm_i32m8_b4(op1, op2, carryin, vl); +vbool4_t test_vmadc_vxm_i32m8_b4(vint32m8_t vs2, int32_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmadc_vxm_i32m8_b4(vs2, rs1, v0, vl); } -vbool4_t test_vmadc_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmadc_vv_i32m8_b4(op1, op2, vl); +vbool4_t test_vmadc_vv_i32m8_b4(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmadc_vv_i32m8_b4(vs2, vs1, vl); } -vbool4_t test_vmadc_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmadc_vx_i32m8_b4(op1, op2, vl); +vbool4_t test_vmadc_vx_i32m8_b4(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmadc_vx_i32m8_b4(vs2, rs1, vl); } -vbool64_t test_vmadc_vvm_i64m1_b64(vint64m1_t op1, vint64m1_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vmadc_vvm_i64m1_b64(op1, op2, carryin, vl); +vbool64_t test_vmadc_vvm_i64m1_b64(vint64m1_t vs2, vint64m1_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmadc_vvm_i64m1_b64(vs2, vs1, v0, vl); } -vbool64_t test_vmadc_vxm_i64m1_b64(vint64m1_t op1, int64_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vmadc_vxm_i64m1_b64(op1, op2, carryin, vl); +vbool64_t test_vmadc_vxm_i64m1_b64(vint64m1_t vs2, int64_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmadc_vxm_i64m1_b64(vs2, rs1, v0, vl); } -vbool64_t test_vmadc_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmadc_vv_i64m1_b64(op1, op2, vl); +vbool64_t test_vmadc_vv_i64m1_b64(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmadc_vv_i64m1_b64(vs2, vs1, vl); } -vbool64_t test_vmadc_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmadc_vx_i64m1_b64(op1, op2, vl); +vbool64_t test_vmadc_vx_i64m1_b64(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmadc_vx_i64m1_b64(vs2, rs1, vl); } -vbool32_t test_vmadc_vvm_i64m2_b32(vint64m2_t op1, vint64m2_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vmadc_vvm_i64m2_b32(op1, op2, carryin, vl); +vbool32_t test_vmadc_vvm_i64m2_b32(vint64m2_t vs2, vint64m2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmadc_vvm_i64m2_b32(vs2, vs1, v0, vl); } -vbool32_t test_vmadc_vxm_i64m2_b32(vint64m2_t op1, int64_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vmadc_vxm_i64m2_b32(op1, op2, carryin, vl); +vbool32_t test_vmadc_vxm_i64m2_b32(vint64m2_t vs2, int64_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmadc_vxm_i64m2_b32(vs2, rs1, v0, vl); } -vbool32_t test_vmadc_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmadc_vv_i64m2_b32(op1, op2, vl); +vbool32_t test_vmadc_vv_i64m2_b32(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmadc_vv_i64m2_b32(vs2, vs1, vl); } -vbool32_t test_vmadc_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmadc_vx_i64m2_b32(op1, op2, vl); +vbool32_t test_vmadc_vx_i64m2_b32(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmadc_vx_i64m2_b32(vs2, rs1, vl); } -vbool16_t test_vmadc_vvm_i64m4_b16(vint64m4_t op1, vint64m4_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vmadc_vvm_i64m4_b16(op1, op2, carryin, vl); +vbool16_t test_vmadc_vvm_i64m4_b16(vint64m4_t vs2, vint64m4_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmadc_vvm_i64m4_b16(vs2, vs1, v0, vl); } -vbool16_t test_vmadc_vxm_i64m4_b16(vint64m4_t op1, int64_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vmadc_vxm_i64m4_b16(op1, op2, carryin, vl); +vbool16_t test_vmadc_vxm_i64m4_b16(vint64m4_t vs2, int64_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmadc_vxm_i64m4_b16(vs2, rs1, v0, vl); } -vbool16_t test_vmadc_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmadc_vv_i64m4_b16(op1, op2, vl); +vbool16_t test_vmadc_vv_i64m4_b16(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmadc_vv_i64m4_b16(vs2, vs1, vl); } -vbool16_t test_vmadc_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmadc_vx_i64m4_b16(op1, op2, vl); +vbool16_t test_vmadc_vx_i64m4_b16(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmadc_vx_i64m4_b16(vs2, rs1, vl); } -vbool8_t test_vmadc_vvm_i64m8_b8(vint64m8_t op1, vint64m8_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vmadc_vvm_i64m8_b8(op1, op2, carryin, vl); +vbool8_t test_vmadc_vvm_i64m8_b8(vint64m8_t vs2, vint64m8_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmadc_vvm_i64m8_b8(vs2, vs1, v0, vl); } -vbool8_t test_vmadc_vxm_i64m8_b8(vint64m8_t op1, int64_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vmadc_vxm_i64m8_b8(op1, op2, carryin, vl); +vbool8_t test_vmadc_vxm_i64m8_b8(vint64m8_t vs2, int64_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmadc_vxm_i64m8_b8(vs2, rs1, v0, vl); } -vbool8_t test_vmadc_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmadc_vv_i64m8_b8(op1, op2, vl); +vbool8_t test_vmadc_vv_i64m8_b8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmadc_vv_i64m8_b8(vs2, vs1, vl); } -vbool8_t test_vmadc_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmadc_vx_i64m8_b8(op1, op2, vl); +vbool8_t test_vmadc_vx_i64m8_b8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmadc_vx_i64m8_b8(vs2, rs1, vl); } -vbool64_t test_vmadc_vvm_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vmadc_vvm_u8mf8_b64(op1, op2, carryin, vl); +vbool64_t test_vmadc_vvm_u8mf8_b64(vuint8mf8_t vs2, vuint8mf8_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmadc_vvm_u8mf8_b64(vs2, vs1, v0, vl); } -vbool64_t test_vmadc_vxm_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vmadc_vxm_u8mf8_b64(op1, op2, carryin, vl); +vbool64_t test_vmadc_vxm_u8mf8_b64(vuint8mf8_t vs2, uint8_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmadc_vxm_u8mf8_b64(vs2, rs1, v0, vl); } -vbool64_t test_vmadc_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmadc_vv_u8mf8_b64(op1, op2, vl); +vbool64_t test_vmadc_vv_u8mf8_b64(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmadc_vv_u8mf8_b64(vs2, vs1, vl); } -vbool64_t test_vmadc_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmadc_vx_u8mf8_b64(op1, op2, vl); +vbool64_t test_vmadc_vx_u8mf8_b64(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmadc_vx_u8mf8_b64(vs2, rs1, vl); } -vbool32_t test_vmadc_vvm_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vmadc_vvm_u8mf4_b32(op1, op2, carryin, vl); +vbool32_t test_vmadc_vvm_u8mf4_b32(vuint8mf4_t vs2, vuint8mf4_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmadc_vvm_u8mf4_b32(vs2, vs1, v0, vl); } -vbool32_t test_vmadc_vxm_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vmadc_vxm_u8mf4_b32(op1, op2, carryin, vl); +vbool32_t test_vmadc_vxm_u8mf4_b32(vuint8mf4_t vs2, uint8_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmadc_vxm_u8mf4_b32(vs2, rs1, v0, vl); } -vbool32_t test_vmadc_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmadc_vv_u8mf4_b32(op1, op2, vl); +vbool32_t test_vmadc_vv_u8mf4_b32(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmadc_vv_u8mf4_b32(vs2, vs1, vl); } -vbool32_t test_vmadc_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmadc_vx_u8mf4_b32(op1, op2, vl); +vbool32_t test_vmadc_vx_u8mf4_b32(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmadc_vx_u8mf4_b32(vs2, rs1, vl); } -vbool16_t test_vmadc_vvm_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vmadc_vvm_u8mf2_b16(op1, op2, carryin, vl); +vbool16_t test_vmadc_vvm_u8mf2_b16(vuint8mf2_t vs2, vuint8mf2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmadc_vvm_u8mf2_b16(vs2, vs1, v0, vl); } -vbool16_t test_vmadc_vxm_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vmadc_vxm_u8mf2_b16(op1, op2, carryin, vl); +vbool16_t test_vmadc_vxm_u8mf2_b16(vuint8mf2_t vs2, uint8_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmadc_vxm_u8mf2_b16(vs2, rs1, v0, vl); } -vbool16_t test_vmadc_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmadc_vv_u8mf2_b16(op1, op2, vl); +vbool16_t test_vmadc_vv_u8mf2_b16(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmadc_vv_u8mf2_b16(vs2, vs1, vl); } -vbool16_t test_vmadc_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmadc_vx_u8mf2_b16(op1, op2, vl); +vbool16_t test_vmadc_vx_u8mf2_b16(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmadc_vx_u8mf2_b16(vs2, rs1, vl); } -vbool8_t test_vmadc_vvm_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vmadc_vvm_u8m1_b8(op1, op2, carryin, vl); +vbool8_t test_vmadc_vvm_u8m1_b8(vuint8m1_t vs2, vuint8m1_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmadc_vvm_u8m1_b8(vs2, vs1, v0, vl); } -vbool8_t test_vmadc_vxm_u8m1_b8(vuint8m1_t op1, uint8_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vmadc_vxm_u8m1_b8(op1, op2, carryin, vl); +vbool8_t test_vmadc_vxm_u8m1_b8(vuint8m1_t vs2, uint8_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmadc_vxm_u8m1_b8(vs2, rs1, v0, vl); } -vbool8_t test_vmadc_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmadc_vv_u8m1_b8(op1, op2, vl); +vbool8_t test_vmadc_vv_u8m1_b8(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmadc_vv_u8m1_b8(vs2, vs1, vl); } -vbool8_t test_vmadc_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmadc_vx_u8m1_b8(op1, op2, vl); +vbool8_t test_vmadc_vx_u8m1_b8(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmadc_vx_u8m1_b8(vs2, rs1, vl); } -vbool4_t test_vmadc_vvm_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vmadc_vvm_u8m2_b4(op1, op2, carryin, vl); +vbool4_t test_vmadc_vvm_u8m2_b4(vuint8m2_t vs2, vuint8m2_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmadc_vvm_u8m2_b4(vs2, vs1, v0, vl); } -vbool4_t test_vmadc_vxm_u8m2_b4(vuint8m2_t op1, uint8_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vmadc_vxm_u8m2_b4(op1, op2, carryin, vl); +vbool4_t test_vmadc_vxm_u8m2_b4(vuint8m2_t vs2, uint8_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmadc_vxm_u8m2_b4(vs2, rs1, v0, vl); } -vbool4_t test_vmadc_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmadc_vv_u8m2_b4(op1, op2, vl); +vbool4_t test_vmadc_vv_u8m2_b4(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmadc_vv_u8m2_b4(vs2, vs1, vl); } -vbool4_t test_vmadc_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmadc_vx_u8m2_b4(op1, op2, vl); +vbool4_t test_vmadc_vx_u8m2_b4(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmadc_vx_u8m2_b4(vs2, rs1, vl); } -vbool2_t test_vmadc_vvm_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vmadc_vvm_u8m4_b2(op1, op2, carryin, vl); +vbool2_t test_vmadc_vvm_u8m4_b2(vuint8m4_t vs2, vuint8m4_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vmadc_vvm_u8m4_b2(vs2, vs1, v0, vl); } -vbool2_t test_vmadc_vxm_u8m4_b2(vuint8m4_t op1, uint8_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vmadc_vxm_u8m4_b2(op1, op2, carryin, vl); +vbool2_t test_vmadc_vxm_u8m4_b2(vuint8m4_t vs2, uint8_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vmadc_vxm_u8m4_b2(vs2, rs1, v0, vl); } -vbool2_t test_vmadc_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmadc_vv_u8m4_b2(op1, op2, vl); +vbool2_t test_vmadc_vv_u8m4_b2(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmadc_vv_u8m4_b2(vs2, vs1, vl); } -vbool2_t test_vmadc_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmadc_vx_u8m4_b2(op1, op2, vl); +vbool2_t test_vmadc_vx_u8m4_b2(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmadc_vx_u8m4_b2(vs2, rs1, vl); } -vbool1_t test_vmadc_vvm_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, vbool1_t carryin, size_t vl) { - return __riscv_vmadc_vvm_u8m8_b1(op1, op2, carryin, vl); +vbool1_t test_vmadc_vvm_u8m8_b1(vuint8m8_t vs2, vuint8m8_t vs1, vbool1_t v0, size_t vl) { + return __riscv_vmadc_vvm_u8m8_b1(vs2, vs1, v0, vl); } -vbool1_t test_vmadc_vxm_u8m8_b1(vuint8m8_t op1, uint8_t op2, vbool1_t carryin, size_t vl) { - return __riscv_vmadc_vxm_u8m8_b1(op1, op2, carryin, vl); +vbool1_t test_vmadc_vxm_u8m8_b1(vuint8m8_t vs2, uint8_t rs1, vbool1_t v0, size_t vl) { + return __riscv_vmadc_vxm_u8m8_b1(vs2, rs1, v0, vl); } -vbool1_t test_vmadc_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmadc_vv_u8m8_b1(op1, op2, vl); +vbool1_t test_vmadc_vv_u8m8_b1(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmadc_vv_u8m8_b1(vs2, vs1, vl); } -vbool1_t test_vmadc_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmadc_vx_u8m8_b1(op1, op2, vl); +vbool1_t test_vmadc_vx_u8m8_b1(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmadc_vx_u8m8_b1(vs2, rs1, vl); } -vbool64_t test_vmadc_vvm_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vmadc_vvm_u16mf4_b64(op1, op2, carryin, vl); +vbool64_t test_vmadc_vvm_u16mf4_b64(vuint16mf4_t vs2, vuint16mf4_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmadc_vvm_u16mf4_b64(vs2, vs1, v0, vl); } -vbool64_t test_vmadc_vxm_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vmadc_vxm_u16mf4_b64(op1, op2, carryin, vl); +vbool64_t test_vmadc_vxm_u16mf4_b64(vuint16mf4_t vs2, uint16_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmadc_vxm_u16mf4_b64(vs2, rs1, v0, vl); } -vbool64_t test_vmadc_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmadc_vv_u16mf4_b64(op1, op2, vl); +vbool64_t test_vmadc_vv_u16mf4_b64(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmadc_vv_u16mf4_b64(vs2, vs1, vl); } -vbool64_t test_vmadc_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmadc_vx_u16mf4_b64(op1, op2, vl); +vbool64_t test_vmadc_vx_u16mf4_b64(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmadc_vx_u16mf4_b64(vs2, rs1, vl); } -vbool32_t test_vmadc_vvm_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vmadc_vvm_u16mf2_b32(op1, op2, carryin, vl); +vbool32_t test_vmadc_vvm_u16mf2_b32(vuint16mf2_t vs2, vuint16mf2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmadc_vvm_u16mf2_b32(vs2, vs1, v0, vl); } -vbool32_t test_vmadc_vxm_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vmadc_vxm_u16mf2_b32(op1, op2, carryin, vl); +vbool32_t test_vmadc_vxm_u16mf2_b32(vuint16mf2_t vs2, uint16_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmadc_vxm_u16mf2_b32(vs2, rs1, v0, vl); } -vbool32_t test_vmadc_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmadc_vv_u16mf2_b32(op1, op2, vl); +vbool32_t test_vmadc_vv_u16mf2_b32(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmadc_vv_u16mf2_b32(vs2, vs1, vl); } -vbool32_t test_vmadc_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmadc_vx_u16mf2_b32(op1, op2, vl); +vbool32_t test_vmadc_vx_u16mf2_b32(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmadc_vx_u16mf2_b32(vs2, rs1, vl); } -vbool16_t test_vmadc_vvm_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vmadc_vvm_u16m1_b16(op1, op2, carryin, vl); +vbool16_t test_vmadc_vvm_u16m1_b16(vuint16m1_t vs2, vuint16m1_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmadc_vvm_u16m1_b16(vs2, vs1, v0, vl); } -vbool16_t test_vmadc_vxm_u16m1_b16(vuint16m1_t op1, uint16_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vmadc_vxm_u16m1_b16(op1, op2, carryin, vl); +vbool16_t test_vmadc_vxm_u16m1_b16(vuint16m1_t vs2, uint16_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmadc_vxm_u16m1_b16(vs2, rs1, v0, vl); } -vbool16_t test_vmadc_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmadc_vv_u16m1_b16(op1, op2, vl); +vbool16_t test_vmadc_vv_u16m1_b16(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmadc_vv_u16m1_b16(vs2, vs1, vl); } -vbool16_t test_vmadc_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmadc_vx_u16m1_b16(op1, op2, vl); +vbool16_t test_vmadc_vx_u16m1_b16(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmadc_vx_u16m1_b16(vs2, rs1, vl); } -vbool8_t test_vmadc_vvm_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vmadc_vvm_u16m2_b8(op1, op2, carryin, vl); +vbool8_t test_vmadc_vvm_u16m2_b8(vuint16m2_t vs2, vuint16m2_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmadc_vvm_u16m2_b8(vs2, vs1, v0, vl); } -vbool8_t test_vmadc_vxm_u16m2_b8(vuint16m2_t op1, uint16_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vmadc_vxm_u16m2_b8(op1, op2, carryin, vl); +vbool8_t test_vmadc_vxm_u16m2_b8(vuint16m2_t vs2, uint16_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmadc_vxm_u16m2_b8(vs2, rs1, v0, vl); } -vbool8_t test_vmadc_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmadc_vv_u16m2_b8(op1, op2, vl); +vbool8_t test_vmadc_vv_u16m2_b8(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmadc_vv_u16m2_b8(vs2, vs1, vl); } -vbool8_t test_vmadc_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmadc_vx_u16m2_b8(op1, op2, vl); +vbool8_t test_vmadc_vx_u16m2_b8(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmadc_vx_u16m2_b8(vs2, rs1, vl); } -vbool4_t test_vmadc_vvm_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vmadc_vvm_u16m4_b4(op1, op2, carryin, vl); +vbool4_t test_vmadc_vvm_u16m4_b4(vuint16m4_t vs2, vuint16m4_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmadc_vvm_u16m4_b4(vs2, vs1, v0, vl); } -vbool4_t test_vmadc_vxm_u16m4_b4(vuint16m4_t op1, uint16_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vmadc_vxm_u16m4_b4(op1, op2, carryin, vl); +vbool4_t test_vmadc_vxm_u16m4_b4(vuint16m4_t vs2, uint16_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmadc_vxm_u16m4_b4(vs2, rs1, v0, vl); } -vbool4_t test_vmadc_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmadc_vv_u16m4_b4(op1, op2, vl); +vbool4_t test_vmadc_vv_u16m4_b4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmadc_vv_u16m4_b4(vs2, vs1, vl); } -vbool4_t test_vmadc_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmadc_vx_u16m4_b4(op1, op2, vl); +vbool4_t test_vmadc_vx_u16m4_b4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmadc_vx_u16m4_b4(vs2, rs1, vl); } -vbool2_t test_vmadc_vvm_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vmadc_vvm_u16m8_b2(op1, op2, carryin, vl); +vbool2_t test_vmadc_vvm_u16m8_b2(vuint16m8_t vs2, vuint16m8_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vmadc_vvm_u16m8_b2(vs2, vs1, v0, vl); } -vbool2_t test_vmadc_vxm_u16m8_b2(vuint16m8_t op1, uint16_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vmadc_vxm_u16m8_b2(op1, op2, carryin, vl); +vbool2_t test_vmadc_vxm_u16m8_b2(vuint16m8_t vs2, uint16_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vmadc_vxm_u16m8_b2(vs2, rs1, v0, vl); } -vbool2_t test_vmadc_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmadc_vv_u16m8_b2(op1, op2, vl); +vbool2_t test_vmadc_vv_u16m8_b2(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmadc_vv_u16m8_b2(vs2, vs1, vl); } -vbool2_t test_vmadc_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmadc_vx_u16m8_b2(op1, op2, vl); +vbool2_t test_vmadc_vx_u16m8_b2(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmadc_vx_u16m8_b2(vs2, rs1, vl); } -vbool64_t test_vmadc_vvm_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vmadc_vvm_u32mf2_b64(op1, op2, carryin, vl); +vbool64_t test_vmadc_vvm_u32mf2_b64(vuint32mf2_t vs2, vuint32mf2_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmadc_vvm_u32mf2_b64(vs2, vs1, v0, vl); } -vbool64_t test_vmadc_vxm_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vmadc_vxm_u32mf2_b64(op1, op2, carryin, vl); +vbool64_t test_vmadc_vxm_u32mf2_b64(vuint32mf2_t vs2, uint32_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmadc_vxm_u32mf2_b64(vs2, rs1, v0, vl); } -vbool64_t test_vmadc_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmadc_vv_u32mf2_b64(op1, op2, vl); +vbool64_t test_vmadc_vv_u32mf2_b64(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmadc_vv_u32mf2_b64(vs2, vs1, vl); } -vbool64_t test_vmadc_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmadc_vx_u32mf2_b64(op1, op2, vl); +vbool64_t test_vmadc_vx_u32mf2_b64(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmadc_vx_u32mf2_b64(vs2, rs1, vl); } -vbool32_t test_vmadc_vvm_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vmadc_vvm_u32m1_b32(op1, op2, carryin, vl); +vbool32_t test_vmadc_vvm_u32m1_b32(vuint32m1_t vs2, vuint32m1_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmadc_vvm_u32m1_b32(vs2, vs1, v0, vl); } -vbool32_t test_vmadc_vxm_u32m1_b32(vuint32m1_t op1, uint32_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vmadc_vxm_u32m1_b32(op1, op2, carryin, vl); +vbool32_t test_vmadc_vxm_u32m1_b32(vuint32m1_t vs2, uint32_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmadc_vxm_u32m1_b32(vs2, rs1, v0, vl); } -vbool32_t test_vmadc_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmadc_vv_u32m1_b32(op1, op2, vl); +vbool32_t test_vmadc_vv_u32m1_b32(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmadc_vv_u32m1_b32(vs2, vs1, vl); } -vbool32_t test_vmadc_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmadc_vx_u32m1_b32(op1, op2, vl); +vbool32_t test_vmadc_vx_u32m1_b32(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmadc_vx_u32m1_b32(vs2, rs1, vl); } -vbool16_t test_vmadc_vvm_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vmadc_vvm_u32m2_b16(op1, op2, carryin, vl); +vbool16_t test_vmadc_vvm_u32m2_b16(vuint32m2_t vs2, vuint32m2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmadc_vvm_u32m2_b16(vs2, vs1, v0, vl); } -vbool16_t test_vmadc_vxm_u32m2_b16(vuint32m2_t op1, uint32_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vmadc_vxm_u32m2_b16(op1, op2, carryin, vl); +vbool16_t test_vmadc_vxm_u32m2_b16(vuint32m2_t vs2, uint32_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmadc_vxm_u32m2_b16(vs2, rs1, v0, vl); } -vbool16_t test_vmadc_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmadc_vv_u32m2_b16(op1, op2, vl); +vbool16_t test_vmadc_vv_u32m2_b16(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmadc_vv_u32m2_b16(vs2, vs1, vl); } -vbool16_t test_vmadc_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmadc_vx_u32m2_b16(op1, op2, vl); +vbool16_t test_vmadc_vx_u32m2_b16(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmadc_vx_u32m2_b16(vs2, rs1, vl); } -vbool8_t test_vmadc_vvm_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vmadc_vvm_u32m4_b8(op1, op2, carryin, vl); +vbool8_t test_vmadc_vvm_u32m4_b8(vuint32m4_t vs2, vuint32m4_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmadc_vvm_u32m4_b8(vs2, vs1, v0, vl); } -vbool8_t test_vmadc_vxm_u32m4_b8(vuint32m4_t op1, uint32_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vmadc_vxm_u32m4_b8(op1, op2, carryin, vl); +vbool8_t test_vmadc_vxm_u32m4_b8(vuint32m4_t vs2, uint32_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmadc_vxm_u32m4_b8(vs2, rs1, v0, vl); } -vbool8_t test_vmadc_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmadc_vv_u32m4_b8(op1, op2, vl); +vbool8_t test_vmadc_vv_u32m4_b8(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmadc_vv_u32m4_b8(vs2, vs1, vl); } -vbool8_t test_vmadc_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmadc_vx_u32m4_b8(op1, op2, vl); +vbool8_t test_vmadc_vx_u32m4_b8(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmadc_vx_u32m4_b8(vs2, rs1, vl); } -vbool4_t test_vmadc_vvm_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vmadc_vvm_u32m8_b4(op1, op2, carryin, vl); +vbool4_t test_vmadc_vvm_u32m8_b4(vuint32m8_t vs2, vuint32m8_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmadc_vvm_u32m8_b4(vs2, vs1, v0, vl); } -vbool4_t test_vmadc_vxm_u32m8_b4(vuint32m8_t op1, uint32_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vmadc_vxm_u32m8_b4(op1, op2, carryin, vl); +vbool4_t test_vmadc_vxm_u32m8_b4(vuint32m8_t vs2, uint32_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmadc_vxm_u32m8_b4(vs2, rs1, v0, vl); } -vbool4_t test_vmadc_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmadc_vv_u32m8_b4(op1, op2, vl); +vbool4_t test_vmadc_vv_u32m8_b4(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmadc_vv_u32m8_b4(vs2, vs1, vl); } -vbool4_t test_vmadc_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmadc_vx_u32m8_b4(op1, op2, vl); +vbool4_t test_vmadc_vx_u32m8_b4(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmadc_vx_u32m8_b4(vs2, rs1, vl); } -vbool64_t test_vmadc_vvm_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vmadc_vvm_u64m1_b64(op1, op2, carryin, vl); +vbool64_t test_vmadc_vvm_u64m1_b64(vuint64m1_t vs2, vuint64m1_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmadc_vvm_u64m1_b64(vs2, vs1, v0, vl); } -vbool64_t test_vmadc_vxm_u64m1_b64(vuint64m1_t op1, uint64_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vmadc_vxm_u64m1_b64(op1, op2, carryin, vl); +vbool64_t test_vmadc_vxm_u64m1_b64(vuint64m1_t vs2, uint64_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmadc_vxm_u64m1_b64(vs2, rs1, v0, vl); } -vbool64_t test_vmadc_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmadc_vv_u64m1_b64(op1, op2, vl); +vbool64_t test_vmadc_vv_u64m1_b64(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmadc_vv_u64m1_b64(vs2, vs1, vl); } -vbool64_t test_vmadc_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmadc_vx_u64m1_b64(op1, op2, vl); +vbool64_t test_vmadc_vx_u64m1_b64(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmadc_vx_u64m1_b64(vs2, rs1, vl); } -vbool32_t test_vmadc_vvm_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vmadc_vvm_u64m2_b32(op1, op2, carryin, vl); +vbool32_t test_vmadc_vvm_u64m2_b32(vuint64m2_t vs2, vuint64m2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmadc_vvm_u64m2_b32(vs2, vs1, v0, vl); } -vbool32_t test_vmadc_vxm_u64m2_b32(vuint64m2_t op1, uint64_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vmadc_vxm_u64m2_b32(op1, op2, carryin, vl); +vbool32_t test_vmadc_vxm_u64m2_b32(vuint64m2_t vs2, uint64_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmadc_vxm_u64m2_b32(vs2, rs1, v0, vl); } -vbool32_t test_vmadc_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmadc_vv_u64m2_b32(op1, op2, vl); +vbool32_t test_vmadc_vv_u64m2_b32(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmadc_vv_u64m2_b32(vs2, vs1, vl); } -vbool32_t test_vmadc_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmadc_vx_u64m2_b32(op1, op2, vl); +vbool32_t test_vmadc_vx_u64m2_b32(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmadc_vx_u64m2_b32(vs2, rs1, vl); } -vbool16_t test_vmadc_vvm_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vmadc_vvm_u64m4_b16(op1, op2, carryin, vl); +vbool16_t test_vmadc_vvm_u64m4_b16(vuint64m4_t vs2, vuint64m4_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmadc_vvm_u64m4_b16(vs2, vs1, v0, vl); } -vbool16_t test_vmadc_vxm_u64m4_b16(vuint64m4_t op1, uint64_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vmadc_vxm_u64m4_b16(op1, op2, carryin, vl); +vbool16_t test_vmadc_vxm_u64m4_b16(vuint64m4_t vs2, uint64_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmadc_vxm_u64m4_b16(vs2, rs1, v0, vl); } -vbool16_t test_vmadc_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmadc_vv_u64m4_b16(op1, op2, vl); +vbool16_t test_vmadc_vv_u64m4_b16(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmadc_vv_u64m4_b16(vs2, vs1, vl); } -vbool16_t test_vmadc_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmadc_vx_u64m4_b16(op1, op2, vl); +vbool16_t test_vmadc_vx_u64m4_b16(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmadc_vx_u64m4_b16(vs2, rs1, vl); } -vbool8_t test_vmadc_vvm_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vmadc_vvm_u64m8_b8(op1, op2, carryin, vl); +vbool8_t test_vmadc_vvm_u64m8_b8(vuint64m8_t vs2, vuint64m8_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmadc_vvm_u64m8_b8(vs2, vs1, v0, vl); } -vbool8_t test_vmadc_vxm_u64m8_b8(vuint64m8_t op1, uint64_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vmadc_vxm_u64m8_b8(op1, op2, carryin, vl); +vbool8_t test_vmadc_vxm_u64m8_b8(vuint64m8_t vs2, uint64_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmadc_vxm_u64m8_b8(vs2, rs1, v0, vl); } -vbool8_t test_vmadc_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmadc_vv_u64m8_b8(op1, op2, vl); +vbool8_t test_vmadc_vv_u64m8_b8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmadc_vv_u64m8_b8(vs2, vs1, vl); } -vbool8_t test_vmadc_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmadc_vx_u64m8_b8(op1, op2, vl); +vbool8_t test_vmadc_vx_u64m8_b8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmadc_vx_u64m8_b8(vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vmadc\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/gnu-api-tests/vmadd.c b/auto-generated/gnu-api-tests/vmadd.c index b8b30b2ac..7af21747d 100644 --- a/auto-generated/gnu-api-tests/vmadd.c +++ b/auto-generated/gnu-api-tests/vmadd.c @@ -358,356 +358,356 @@ vuint64m8_t test_vmadd_vx_u64m8(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, s return __riscv_vmadd_vx_u64m8(vd, rs1, vs2, vl); } -vint8mf8_t test_vmadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vmadd_vv_i8mf8_m(mask, vd, vs1, vs2, vl); +vint8mf8_t test_vmadd_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vmadd_vv_i8mf8_m(vm, vd, vs1, vs2, vl); } -vint8mf8_t test_vmadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vmadd_vx_i8mf8_m(mask, vd, rs1, vs2, vl); +vint8mf8_t test_vmadd_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vmadd_vx_i8mf8_m(vm, vd, rs1, vs2, vl); } -vint8mf4_t test_vmadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vmadd_vv_i8mf4_m(mask, vd, vs1, vs2, vl); +vint8mf4_t test_vmadd_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vmadd_vv_i8mf4_m(vm, vd, vs1, vs2, vl); } -vint8mf4_t test_vmadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vmadd_vx_i8mf4_m(mask, vd, rs1, vs2, vl); +vint8mf4_t test_vmadd_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vmadd_vx_i8mf4_m(vm, vd, rs1, vs2, vl); } -vint8mf2_t test_vmadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vmadd_vv_i8mf2_m(mask, vd, vs1, vs2, vl); +vint8mf2_t test_vmadd_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vmadd_vv_i8mf2_m(vm, vd, vs1, vs2, vl); } -vint8mf2_t test_vmadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vmadd_vx_i8mf2_m(mask, vd, rs1, vs2, vl); +vint8mf2_t test_vmadd_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vmadd_vx_i8mf2_m(vm, vd, rs1, vs2, vl); } -vint8m1_t test_vmadd_vv_i8m1_m(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_vmadd_vv_i8m1_m(mask, vd, vs1, vs2, vl); +vint8m1_t test_vmadd_vv_i8m1_m(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { + return __riscv_vmadd_vv_i8m1_m(vm, vd, vs1, vs2, vl); } -vint8m1_t test_vmadd_vx_i8m1_m(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vmadd_vx_i8m1_m(mask, vd, rs1, vs2, vl); +vint8m1_t test_vmadd_vx_i8m1_m(vbool8_t vm, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vmadd_vx_i8m1_m(vm, vd, rs1, vs2, vl); } -vint8m2_t test_vmadd_vv_i8m2_m(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return __riscv_vmadd_vv_i8m2_m(mask, vd, vs1, vs2, vl); +vint8m2_t test_vmadd_vv_i8m2_m(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { + return __riscv_vmadd_vv_i8m2_m(vm, vd, vs1, vs2, vl); } -vint8m2_t test_vmadd_vx_i8m2_m(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vmadd_vx_i8m2_m(mask, vd, rs1, vs2, vl); +vint8m2_t test_vmadd_vx_i8m2_m(vbool4_t vm, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vmadd_vx_i8m2_m(vm, vd, rs1, vs2, vl); } -vint8m4_t test_vmadd_vv_i8m4_m(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return __riscv_vmadd_vv_i8m4_m(mask, vd, vs1, vs2, vl); +vint8m4_t test_vmadd_vv_i8m4_m(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { + return __riscv_vmadd_vv_i8m4_m(vm, vd, vs1, vs2, vl); } -vint8m4_t test_vmadd_vx_i8m4_m(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vmadd_vx_i8m4_m(mask, vd, rs1, vs2, vl); +vint8m4_t test_vmadd_vx_i8m4_m(vbool2_t vm, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vmadd_vx_i8m4_m(vm, vd, rs1, vs2, vl); } -vint8m8_t test_vmadd_vv_i8m8_m(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return __riscv_vmadd_vv_i8m8_m(mask, vd, vs1, vs2, vl); +vint8m8_t test_vmadd_vv_i8m8_m(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { + return __riscv_vmadd_vv_i8m8_m(vm, vd, vs1, vs2, vl); } -vint8m8_t test_vmadd_vx_i8m8_m(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return __riscv_vmadd_vx_i8m8_m(mask, vd, rs1, vs2, vl); +vint8m8_t test_vmadd_vx_i8m8_m(vbool1_t vm, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { + return __riscv_vmadd_vx_i8m8_m(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vmadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vmadd_vv_i16mf4_m(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vmadd_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vmadd_vv_i16mf4_m(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vmadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vmadd_vx_i16mf4_m(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vmadd_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vmadd_vx_i16mf4_m(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vmadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vmadd_vv_i16mf2_m(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vmadd_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vmadd_vv_i16mf2_m(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vmadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vmadd_vx_i16mf2_m(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vmadd_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vmadd_vx_i16mf2_m(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vmadd_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_vmadd_vv_i16m1_m(mask, vd, vs1, vs2, vl); +vint16m1_t test_vmadd_vv_i16m1_m(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { + return __riscv_vmadd_vv_i16m1_m(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vmadd_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vmadd_vx_i16m1_m(mask, vd, rs1, vs2, vl); +vint16m1_t test_vmadd_vx_i16m1_m(vbool16_t vm, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vmadd_vx_i16m1_m(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vmadd_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return __riscv_vmadd_vv_i16m2_m(mask, vd, vs1, vs2, vl); +vint16m2_t test_vmadd_vv_i16m2_m(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { + return __riscv_vmadd_vv_i16m2_m(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vmadd_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vmadd_vx_i16m2_m(mask, vd, rs1, vs2, vl); +vint16m2_t test_vmadd_vx_i16m2_m(vbool8_t vm, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vmadd_vx_i16m2_m(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vmadd_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return __riscv_vmadd_vv_i16m4_m(mask, vd, vs1, vs2, vl); +vint16m4_t test_vmadd_vv_i16m4_m(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { + return __riscv_vmadd_vv_i16m4_m(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vmadd_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vmadd_vx_i16m4_m(mask, vd, rs1, vs2, vl); +vint16m4_t test_vmadd_vx_i16m4_m(vbool4_t vm, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vmadd_vx_i16m4_m(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vmadd_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return __riscv_vmadd_vv_i16m8_m(mask, vd, vs1, vs2, vl); +vint16m8_t test_vmadd_vv_i16m8_m(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { + return __riscv_vmadd_vv_i16m8_m(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vmadd_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return __riscv_vmadd_vx_i16m8_m(mask, vd, rs1, vs2, vl); +vint16m8_t test_vmadd_vx_i16m8_m(vbool2_t vm, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { + return __riscv_vmadd_vx_i16m8_m(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vmadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vmadd_vv_i32mf2_m(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vmadd_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vmadd_vv_i32mf2_m(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vmadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vmadd_vx_i32mf2_m(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vmadd_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vmadd_vx_i32mf2_m(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vmadd_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_vmadd_vv_i32m1_m(mask, vd, vs1, vs2, vl); +vint32m1_t test_vmadd_vv_i32m1_m(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { + return __riscv_vmadd_vv_i32m1_m(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vmadd_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vmadd_vx_i32m1_m(mask, vd, rs1, vs2, vl); +vint32m1_t test_vmadd_vx_i32m1_m(vbool32_t vm, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vmadd_vx_i32m1_m(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vmadd_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return __riscv_vmadd_vv_i32m2_m(mask, vd, vs1, vs2, vl); +vint32m2_t test_vmadd_vv_i32m2_m(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { + return __riscv_vmadd_vv_i32m2_m(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vmadd_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vmadd_vx_i32m2_m(mask, vd, rs1, vs2, vl); +vint32m2_t test_vmadd_vx_i32m2_m(vbool16_t vm, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vmadd_vx_i32m2_m(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vmadd_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return __riscv_vmadd_vv_i32m4_m(mask, vd, vs1, vs2, vl); +vint32m4_t test_vmadd_vv_i32m4_m(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { + return __riscv_vmadd_vv_i32m4_m(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vmadd_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vmadd_vx_i32m4_m(mask, vd, rs1, vs2, vl); +vint32m4_t test_vmadd_vx_i32m4_m(vbool8_t vm, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vmadd_vx_i32m4_m(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vmadd_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return __riscv_vmadd_vv_i32m8_m(mask, vd, vs1, vs2, vl); +vint32m8_t test_vmadd_vv_i32m8_m(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { + return __riscv_vmadd_vv_i32m8_m(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vmadd_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return __riscv_vmadd_vx_i32m8_m(mask, vd, rs1, vs2, vl); +vint32m8_t test_vmadd_vx_i32m8_m(vbool4_t vm, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { + return __riscv_vmadd_vx_i32m8_m(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vmadd_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return __riscv_vmadd_vv_i64m1_m(mask, vd, vs1, vs2, vl); +vint64m1_t test_vmadd_vv_i64m1_m(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { + return __riscv_vmadd_vv_i64m1_m(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vmadd_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return __riscv_vmadd_vx_i64m1_m(mask, vd, rs1, vs2, vl); +vint64m1_t test_vmadd_vx_i64m1_m(vbool64_t vm, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { + return __riscv_vmadd_vx_i64m1_m(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vmadd_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return __riscv_vmadd_vv_i64m2_m(mask, vd, vs1, vs2, vl); +vint64m2_t test_vmadd_vv_i64m2_m(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { + return __riscv_vmadd_vv_i64m2_m(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vmadd_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return __riscv_vmadd_vx_i64m2_m(mask, vd, rs1, vs2, vl); +vint64m2_t test_vmadd_vx_i64m2_m(vbool32_t vm, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { + return __riscv_vmadd_vx_i64m2_m(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vmadd_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return __riscv_vmadd_vv_i64m4_m(mask, vd, vs1, vs2, vl); +vint64m4_t test_vmadd_vv_i64m4_m(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { + return __riscv_vmadd_vv_i64m4_m(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vmadd_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return __riscv_vmadd_vx_i64m4_m(mask, vd, rs1, vs2, vl); +vint64m4_t test_vmadd_vx_i64m4_m(vbool16_t vm, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { + return __riscv_vmadd_vx_i64m4_m(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vmadd_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return __riscv_vmadd_vv_i64m8_m(mask, vd, vs1, vs2, vl); +vint64m8_t test_vmadd_vv_i64m8_m(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { + return __riscv_vmadd_vv_i64m8_m(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vmadd_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return __riscv_vmadd_vx_i64m8_m(mask, vd, rs1, vs2, vl); +vint64m8_t test_vmadd_vx_i64m8_m(vbool8_t vm, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { + return __riscv_vmadd_vx_i64m8_m(vm, vd, rs1, vs2, vl); } -vuint8mf8_t test_vmadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vmadd_vv_u8mf8_m(mask, vd, vs1, vs2, vl); +vuint8mf8_t test_vmadd_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vmadd_vv_u8mf8_m(vm, vd, vs1, vs2, vl); } -vuint8mf8_t test_vmadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vmadd_vx_u8mf8_m(mask, vd, rs1, vs2, vl); +vuint8mf8_t test_vmadd_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vmadd_vx_u8mf8_m(vm, vd, rs1, vs2, vl); } -vuint8mf4_t test_vmadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vmadd_vv_u8mf4_m(mask, vd, vs1, vs2, vl); +vuint8mf4_t test_vmadd_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vmadd_vv_u8mf4_m(vm, vd, vs1, vs2, vl); } -vuint8mf4_t test_vmadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vmadd_vx_u8mf4_m(mask, vd, rs1, vs2, vl); +vuint8mf4_t test_vmadd_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vmadd_vx_u8mf4_m(vm, vd, rs1, vs2, vl); } -vuint8mf2_t test_vmadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vmadd_vv_u8mf2_m(mask, vd, vs1, vs2, vl); +vuint8mf2_t test_vmadd_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vmadd_vv_u8mf2_m(vm, vd, vs1, vs2, vl); } -vuint8mf2_t test_vmadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vmadd_vx_u8mf2_m(mask, vd, rs1, vs2, vl); +vuint8mf2_t test_vmadd_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vmadd_vx_u8mf2_m(vm, vd, rs1, vs2, vl); } -vuint8m1_t test_vmadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vmadd_vv_u8m1_m(mask, vd, vs1, vs2, vl); +vuint8m1_t test_vmadd_vv_u8m1_m(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vmadd_vv_u8m1_m(vm, vd, vs1, vs2, vl); } -vuint8m1_t test_vmadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vmadd_vx_u8m1_m(mask, vd, rs1, vs2, vl); +vuint8m1_t test_vmadd_vx_u8m1_m(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vmadd_vx_u8m1_m(vm, vd, rs1, vs2, vl); } -vuint8m2_t test_vmadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vmadd_vv_u8m2_m(mask, vd, vs1, vs2, vl); +vuint8m2_t test_vmadd_vv_u8m2_m(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vmadd_vv_u8m2_m(vm, vd, vs1, vs2, vl); } -vuint8m2_t test_vmadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vmadd_vx_u8m2_m(mask, vd, rs1, vs2, vl); +vuint8m2_t test_vmadd_vx_u8m2_m(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vmadd_vx_u8m2_m(vm, vd, rs1, vs2, vl); } -vuint8m4_t test_vmadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vmadd_vv_u8m4_m(mask, vd, vs1, vs2, vl); +vuint8m4_t test_vmadd_vv_u8m4_m(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vmadd_vv_u8m4_m(vm, vd, vs1, vs2, vl); } -vuint8m4_t test_vmadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vmadd_vx_u8m4_m(mask, vd, rs1, vs2, vl); +vuint8m4_t test_vmadd_vx_u8m4_m(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vmadd_vx_u8m4_m(vm, vd, rs1, vs2, vl); } -vuint8m8_t test_vmadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vmadd_vv_u8m8_m(mask, vd, vs1, vs2, vl); +vuint8m8_t test_vmadd_vv_u8m8_m(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vmadd_vv_u8m8_m(vm, vd, vs1, vs2, vl); } -vuint8m8_t test_vmadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vmadd_vx_u8m8_m(mask, vd, rs1, vs2, vl); +vuint8m8_t test_vmadd_vx_u8m8_m(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vmadd_vx_u8m8_m(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vmadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vmadd_vv_u16mf4_m(mask, vd, vs1, vs2, vl); +vuint16mf4_t test_vmadd_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vmadd_vv_u16mf4_m(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vmadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vmadd_vx_u16mf4_m(mask, vd, rs1, vs2, vl); +vuint16mf4_t test_vmadd_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vmadd_vx_u16mf4_m(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vmadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vmadd_vv_u16mf2_m(mask, vd, vs1, vs2, vl); +vuint16mf2_t test_vmadd_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vmadd_vv_u16mf2_m(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vmadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vmadd_vx_u16mf2_m(mask, vd, rs1, vs2, vl); +vuint16mf2_t test_vmadd_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vmadd_vx_u16mf2_m(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vmadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vmadd_vv_u16m1_m(mask, vd, vs1, vs2, vl); +vuint16m1_t test_vmadd_vv_u16m1_m(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vmadd_vv_u16m1_m(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vmadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vmadd_vx_u16m1_m(mask, vd, rs1, vs2, vl); +vuint16m1_t test_vmadd_vx_u16m1_m(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vmadd_vx_u16m1_m(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vmadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vmadd_vv_u16m2_m(mask, vd, vs1, vs2, vl); +vuint16m2_t test_vmadd_vv_u16m2_m(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vmadd_vv_u16m2_m(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vmadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vmadd_vx_u16m2_m(mask, vd, rs1, vs2, vl); +vuint16m2_t test_vmadd_vx_u16m2_m(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vmadd_vx_u16m2_m(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vmadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vmadd_vv_u16m4_m(mask, vd, vs1, vs2, vl); +vuint16m4_t test_vmadd_vv_u16m4_m(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vmadd_vv_u16m4_m(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vmadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vmadd_vx_u16m4_m(mask, vd, rs1, vs2, vl); +vuint16m4_t test_vmadd_vx_u16m4_m(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vmadd_vx_u16m4_m(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vmadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vmadd_vv_u16m8_m(mask, vd, vs1, vs2, vl); +vuint16m8_t test_vmadd_vv_u16m8_m(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vmadd_vv_u16m8_m(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vmadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vmadd_vx_u16m8_m(mask, vd, rs1, vs2, vl); +vuint16m8_t test_vmadd_vx_u16m8_m(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vmadd_vx_u16m8_m(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vmadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vmadd_vv_u32mf2_m(mask, vd, vs1, vs2, vl); +vuint32mf2_t test_vmadd_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vmadd_vv_u32mf2_m(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vmadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vmadd_vx_u32mf2_m(mask, vd, rs1, vs2, vl); +vuint32mf2_t test_vmadd_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vmadd_vx_u32mf2_m(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vmadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vmadd_vv_u32m1_m(mask, vd, vs1, vs2, vl); +vuint32m1_t test_vmadd_vv_u32m1_m(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vmadd_vv_u32m1_m(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vmadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vmadd_vx_u32m1_m(mask, vd, rs1, vs2, vl); +vuint32m1_t test_vmadd_vx_u32m1_m(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vmadd_vx_u32m1_m(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vmadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vmadd_vv_u32m2_m(mask, vd, vs1, vs2, vl); +vuint32m2_t test_vmadd_vv_u32m2_m(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vmadd_vv_u32m2_m(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vmadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vmadd_vx_u32m2_m(mask, vd, rs1, vs2, vl); +vuint32m2_t test_vmadd_vx_u32m2_m(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vmadd_vx_u32m2_m(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vmadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vmadd_vv_u32m4_m(mask, vd, vs1, vs2, vl); +vuint32m4_t test_vmadd_vv_u32m4_m(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vmadd_vv_u32m4_m(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vmadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vmadd_vx_u32m4_m(mask, vd, rs1, vs2, vl); +vuint32m4_t test_vmadd_vx_u32m4_m(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vmadd_vx_u32m4_m(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vmadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vmadd_vv_u32m8_m(mask, vd, vs1, vs2, vl); +vuint32m8_t test_vmadd_vv_u32m8_m(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vmadd_vv_u32m8_m(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vmadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vmadd_vx_u32m8_m(mask, vd, rs1, vs2, vl); +vuint32m8_t test_vmadd_vx_u32m8_m(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vmadd_vx_u32m8_m(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vmadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vmadd_vv_u64m1_m(mask, vd, vs1, vs2, vl); +vuint64m1_t test_vmadd_vv_u64m1_m(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vmadd_vv_u64m1_m(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vmadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vmadd_vx_u64m1_m(mask, vd, rs1, vs2, vl); +vuint64m1_t test_vmadd_vx_u64m1_m(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vmadd_vx_u64m1_m(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vmadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vmadd_vv_u64m2_m(mask, vd, vs1, vs2, vl); +vuint64m2_t test_vmadd_vv_u64m2_m(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vmadd_vv_u64m2_m(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vmadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vmadd_vx_u64m2_m(mask, vd, rs1, vs2, vl); +vuint64m2_t test_vmadd_vx_u64m2_m(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vmadd_vx_u64m2_m(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vmadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vmadd_vv_u64m4_m(mask, vd, vs1, vs2, vl); +vuint64m4_t test_vmadd_vv_u64m4_m(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vmadd_vv_u64m4_m(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vmadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vmadd_vx_u64m4_m(mask, vd, rs1, vs2, vl); +vuint64m4_t test_vmadd_vx_u64m4_m(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vmadd_vx_u64m4_m(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vmadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vmadd_vv_u64m8_m(mask, vd, vs1, vs2, vl); +vuint64m8_t test_vmadd_vv_u64m8_m(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vmadd_vv_u64m8_m(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vmadd_vx_u64m8_m(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vmadd_vx_u64m8_m(mask, vd, rs1, vs2, vl); +vuint64m8_t test_vmadd_vx_u64m8_m(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vmadd_vx_u64m8_m(vm, vd, rs1, vs2, vl); } /* { dg-final { scan-assembler-times {vma[c-d][c-d]\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/gnu-api-tests/vmand.c b/auto-generated/gnu-api-tests/vmand.c index a484c0495..e96eee0a4 100644 --- a/auto-generated/gnu-api-tests/vmand.c +++ b/auto-generated/gnu-api-tests/vmand.c @@ -6,32 +6,32 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool1_t test_vmand_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { - return __riscv_vmand_mm_b1(op1, op2, vl); +vbool1_t test_vmand_mm_b1(vbool1_t vs2, vbool1_t vs1, size_t vl) { + return __riscv_vmand_mm_b1(vs2, vs1, vl); } -vbool2_t test_vmand_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { - return __riscv_vmand_mm_b2(op1, op2, vl); +vbool2_t test_vmand_mm_b2(vbool2_t vs2, vbool2_t vs1, size_t vl) { + return __riscv_vmand_mm_b2(vs2, vs1, vl); } -vbool4_t test_vmand_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { - return __riscv_vmand_mm_b4(op1, op2, vl); +vbool4_t test_vmand_mm_b4(vbool4_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vmand_mm_b4(vs2, vs1, vl); } -vbool8_t test_vmand_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { - return __riscv_vmand_mm_b8(op1, op2, vl); +vbool8_t test_vmand_mm_b8(vbool8_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vmand_mm_b8(vs2, vs1, vl); } -vbool16_t test_vmand_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { - return __riscv_vmand_mm_b16(op1, op2, vl); +vbool16_t test_vmand_mm_b16(vbool16_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vmand_mm_b16(vs2, vs1, vl); } -vbool32_t test_vmand_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { - return __riscv_vmand_mm_b32(op1, op2, vl); +vbool32_t test_vmand_mm_b32(vbool32_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vmand_mm_b32(vs2, vs1, vl); } -vbool64_t test_vmand_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) { - return __riscv_vmand_mm_b64(op1, op2, vl); +vbool64_t test_vmand_mm_b64(vbool64_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vmand_mm_b64(vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmand\.[ivxfswum.]+\s+} 7 } } */ diff --git a/auto-generated/gnu-api-tests/vmandn.c b/auto-generated/gnu-api-tests/vmandn.c index 8304bfc99..a345093d7 100644 --- a/auto-generated/gnu-api-tests/vmandn.c +++ b/auto-generated/gnu-api-tests/vmandn.c @@ -6,32 +6,32 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool1_t test_vmandn_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { - return __riscv_vmandn_mm_b1(op1, op2, vl); +vbool1_t test_vmandn_mm_b1(vbool1_t vs2, vbool1_t vs1, size_t vl) { + return __riscv_vmandn_mm_b1(vs2, vs1, vl); } -vbool2_t test_vmandn_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { - return __riscv_vmandn_mm_b2(op1, op2, vl); +vbool2_t test_vmandn_mm_b2(vbool2_t vs2, vbool2_t vs1, size_t vl) { + return __riscv_vmandn_mm_b2(vs2, vs1, vl); } -vbool4_t test_vmandn_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { - return __riscv_vmandn_mm_b4(op1, op2, vl); +vbool4_t test_vmandn_mm_b4(vbool4_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vmandn_mm_b4(vs2, vs1, vl); } -vbool8_t test_vmandn_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { - return __riscv_vmandn_mm_b8(op1, op2, vl); +vbool8_t test_vmandn_mm_b8(vbool8_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vmandn_mm_b8(vs2, vs1, vl); } -vbool16_t test_vmandn_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { - return __riscv_vmandn_mm_b16(op1, op2, vl); +vbool16_t test_vmandn_mm_b16(vbool16_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vmandn_mm_b16(vs2, vs1, vl); } -vbool32_t test_vmandn_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { - return __riscv_vmandn_mm_b32(op1, op2, vl); +vbool32_t test_vmandn_mm_b32(vbool32_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vmandn_mm_b32(vs2, vs1, vl); } -vbool64_t test_vmandn_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) { - return __riscv_vmandn_mm_b64(op1, op2, vl); +vbool64_t test_vmandn_mm_b64(vbool64_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vmandn_mm_b64(vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmandn\.[ivxfswum.]+\s+} 7 } } */ diff --git a/auto-generated/gnu-api-tests/vmax.c b/auto-generated/gnu-api-tests/vmax.c index 2c5cfd754..1e3e1532b 100644 --- a/auto-generated/gnu-api-tests/vmax.c +++ b/auto-generated/gnu-api-tests/vmax.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vmax_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmax_vv_i8mf8(op1, op2, vl); +vint8mf8_t test_vmax_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmax_vv_i8mf8(vs2, vs1, vl); } -vint8mf8_t test_vmax_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_vx_i8mf8(op1, op2, vl); +vint8mf8_t test_vmax_vx_i8mf8(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_vx_i8mf8(vs2, rs1, vl); } -vint8mf4_t test_vmax_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmax_vv_i8mf4(op1, op2, vl); +vint8mf4_t test_vmax_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmax_vv_i8mf4(vs2, vs1, vl); } -vint8mf4_t test_vmax_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_vx_i8mf4(op1, op2, vl); +vint8mf4_t test_vmax_vx_i8mf4(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_vx_i8mf4(vs2, rs1, vl); } -vint8mf2_t test_vmax_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmax_vv_i8mf2(op1, op2, vl); +vint8mf2_t test_vmax_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmax_vv_i8mf2(vs2, vs1, vl); } -vint8mf2_t test_vmax_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_vx_i8mf2(op1, op2, vl); +vint8mf2_t test_vmax_vx_i8mf2(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_vx_i8mf2(vs2, rs1, vl); } -vint8m1_t test_vmax_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmax_vv_i8m1(op1, op2, vl); +vint8m1_t test_vmax_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmax_vv_i8m1(vs2, vs1, vl); } -vint8m1_t test_vmax_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_vx_i8m1(op1, op2, vl); +vint8m1_t test_vmax_vx_i8m1(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_vx_i8m1(vs2, rs1, vl); } -vint8m2_t test_vmax_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmax_vv_i8m2(op1, op2, vl); +vint8m2_t test_vmax_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmax_vv_i8m2(vs2, vs1, vl); } -vint8m2_t test_vmax_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_vx_i8m2(op1, op2, vl); +vint8m2_t test_vmax_vx_i8m2(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_vx_i8m2(vs2, rs1, vl); } -vint8m4_t test_vmax_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmax_vv_i8m4(op1, op2, vl); +vint8m4_t test_vmax_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmax_vv_i8m4(vs2, vs1, vl); } -vint8m4_t test_vmax_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_vx_i8m4(op1, op2, vl); +vint8m4_t test_vmax_vx_i8m4(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_vx_i8m4(vs2, rs1, vl); } -vint8m8_t test_vmax_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmax_vv_i8m8(op1, op2, vl); +vint8m8_t test_vmax_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmax_vv_i8m8(vs2, vs1, vl); } -vint8m8_t test_vmax_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_vx_i8m8(op1, op2, vl); +vint8m8_t test_vmax_vx_i8m8(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_vx_i8m8(vs2, rs1, vl); } -vint16mf4_t test_vmax_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmax_vv_i16mf4(op1, op2, vl); +vint16mf4_t test_vmax_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmax_vv_i16mf4(vs2, vs1, vl); } -vint16mf4_t test_vmax_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_vx_i16mf4(op1, op2, vl); +vint16mf4_t test_vmax_vx_i16mf4(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_vx_i16mf4(vs2, rs1, vl); } -vint16mf2_t test_vmax_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmax_vv_i16mf2(op1, op2, vl); +vint16mf2_t test_vmax_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmax_vv_i16mf2(vs2, vs1, vl); } -vint16mf2_t test_vmax_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_vx_i16mf2(op1, op2, vl); +vint16mf2_t test_vmax_vx_i16mf2(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_vx_i16mf2(vs2, rs1, vl); } -vint16m1_t test_vmax_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmax_vv_i16m1(op1, op2, vl); +vint16m1_t test_vmax_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmax_vv_i16m1(vs2, vs1, vl); } -vint16m1_t test_vmax_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_vx_i16m1(op1, op2, vl); +vint16m1_t test_vmax_vx_i16m1(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_vx_i16m1(vs2, rs1, vl); } -vint16m2_t test_vmax_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmax_vv_i16m2(op1, op2, vl); +vint16m2_t test_vmax_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmax_vv_i16m2(vs2, vs1, vl); } -vint16m2_t test_vmax_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_vx_i16m2(op1, op2, vl); +vint16m2_t test_vmax_vx_i16m2(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_vx_i16m2(vs2, rs1, vl); } -vint16m4_t test_vmax_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmax_vv_i16m4(op1, op2, vl); +vint16m4_t test_vmax_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmax_vv_i16m4(vs2, vs1, vl); } -vint16m4_t test_vmax_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_vx_i16m4(op1, op2, vl); +vint16m4_t test_vmax_vx_i16m4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_vx_i16m4(vs2, rs1, vl); } -vint16m8_t test_vmax_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmax_vv_i16m8(op1, op2, vl); +vint16m8_t test_vmax_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmax_vv_i16m8(vs2, vs1, vl); } -vint16m8_t test_vmax_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_vx_i16m8(op1, op2, vl); +vint16m8_t test_vmax_vx_i16m8(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_vx_i16m8(vs2, rs1, vl); } -vint32mf2_t test_vmax_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmax_vv_i32mf2(op1, op2, vl); +vint32mf2_t test_vmax_vv_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmax_vv_i32mf2(vs2, vs1, vl); } -vint32mf2_t test_vmax_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_vx_i32mf2(op1, op2, vl); +vint32mf2_t test_vmax_vx_i32mf2(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_vx_i32mf2(vs2, rs1, vl); } -vint32m1_t test_vmax_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmax_vv_i32m1(op1, op2, vl); +vint32m1_t test_vmax_vv_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmax_vv_i32m1(vs2, vs1, vl); } -vint32m1_t test_vmax_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_vx_i32m1(op1, op2, vl); +vint32m1_t test_vmax_vx_i32m1(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_vx_i32m1(vs2, rs1, vl); } -vint32m2_t test_vmax_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmax_vv_i32m2(op1, op2, vl); +vint32m2_t test_vmax_vv_i32m2(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmax_vv_i32m2(vs2, vs1, vl); } -vint32m2_t test_vmax_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_vx_i32m2(op1, op2, vl); +vint32m2_t test_vmax_vx_i32m2(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_vx_i32m2(vs2, rs1, vl); } -vint32m4_t test_vmax_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmax_vv_i32m4(op1, op2, vl); +vint32m4_t test_vmax_vv_i32m4(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmax_vv_i32m4(vs2, vs1, vl); } -vint32m4_t test_vmax_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_vx_i32m4(op1, op2, vl); +vint32m4_t test_vmax_vx_i32m4(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_vx_i32m4(vs2, rs1, vl); } -vint32m8_t test_vmax_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmax_vv_i32m8(op1, op2, vl); +vint32m8_t test_vmax_vv_i32m8(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmax_vv_i32m8(vs2, vs1, vl); } -vint32m8_t test_vmax_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_vx_i32m8(op1, op2, vl); +vint32m8_t test_vmax_vx_i32m8(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_vx_i32m8(vs2, rs1, vl); } -vint64m1_t test_vmax_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmax_vv_i64m1(op1, op2, vl); +vint64m1_t test_vmax_vv_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmax_vv_i64m1(vs2, vs1, vl); } -vint64m1_t test_vmax_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmax_vx_i64m1(op1, op2, vl); +vint64m1_t test_vmax_vx_i64m1(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax_vx_i64m1(vs2, rs1, vl); } -vint64m2_t test_vmax_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmax_vv_i64m2(op1, op2, vl); +vint64m2_t test_vmax_vv_i64m2(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmax_vv_i64m2(vs2, vs1, vl); } -vint64m2_t test_vmax_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmax_vx_i64m2(op1, op2, vl); +vint64m2_t test_vmax_vx_i64m2(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax_vx_i64m2(vs2, rs1, vl); } -vint64m4_t test_vmax_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmax_vv_i64m4(op1, op2, vl); +vint64m4_t test_vmax_vv_i64m4(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmax_vv_i64m4(vs2, vs1, vl); } -vint64m4_t test_vmax_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmax_vx_i64m4(op1, op2, vl); +vint64m4_t test_vmax_vx_i64m4(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax_vx_i64m4(vs2, rs1, vl); } -vint64m8_t test_vmax_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmax_vv_i64m8(op1, op2, vl); +vint64m8_t test_vmax_vv_i64m8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmax_vv_i64m8(vs2, vs1, vl); } -vint64m8_t test_vmax_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmax_vx_i64m8(op1, op2, vl); +vint64m8_t test_vmax_vx_i64m8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax_vx_i64m8(vs2, rs1, vl); } -vint8mf8_t test_vmax_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmax_vv_i8mf8_m(mask, op1, op2, vl); +vint8mf8_t test_vmax_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmax_vv_i8mf8_m(vm, vs2, vs1, vl); } -vint8mf8_t test_vmax_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_vx_i8mf8_m(mask, op1, op2, vl); +vint8mf8_t test_vmax_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_vx_i8mf8_m(vm, vs2, rs1, vl); } -vint8mf4_t test_vmax_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmax_vv_i8mf4_m(mask, op1, op2, vl); +vint8mf4_t test_vmax_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmax_vv_i8mf4_m(vm, vs2, vs1, vl); } -vint8mf4_t test_vmax_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_vx_i8mf4_m(mask, op1, op2, vl); +vint8mf4_t test_vmax_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_vx_i8mf4_m(vm, vs2, rs1, vl); } -vint8mf2_t test_vmax_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmax_vv_i8mf2_m(mask, op1, op2, vl); +vint8mf2_t test_vmax_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmax_vv_i8mf2_m(vm, vs2, vs1, vl); } -vint8mf2_t test_vmax_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_vx_i8mf2_m(mask, op1, op2, vl); +vint8mf2_t test_vmax_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_vx_i8mf2_m(vm, vs2, rs1, vl); } -vint8m1_t test_vmax_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmax_vv_i8m1_m(mask, op1, op2, vl); +vint8m1_t test_vmax_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmax_vv_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vmax_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_vx_i8m1_m(mask, op1, op2, vl); +vint8m1_t test_vmax_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_vx_i8m1_m(vm, vs2, rs1, vl); } -vint8m2_t test_vmax_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmax_vv_i8m2_m(mask, op1, op2, vl); +vint8m2_t test_vmax_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmax_vv_i8m2_m(vm, vs2, vs1, vl); } -vint8m2_t test_vmax_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_vx_i8m2_m(mask, op1, op2, vl); +vint8m2_t test_vmax_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_vx_i8m2_m(vm, vs2, rs1, vl); } -vint8m4_t test_vmax_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmax_vv_i8m4_m(mask, op1, op2, vl); +vint8m4_t test_vmax_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmax_vv_i8m4_m(vm, vs2, vs1, vl); } -vint8m4_t test_vmax_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_vx_i8m4_m(mask, op1, op2, vl); +vint8m4_t test_vmax_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_vx_i8m4_m(vm, vs2, rs1, vl); } -vint8m8_t test_vmax_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmax_vv_i8m8_m(mask, op1, op2, vl); +vint8m8_t test_vmax_vv_i8m8_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmax_vv_i8m8_m(vm, vs2, vs1, vl); } -vint8m8_t test_vmax_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_vx_i8m8_m(mask, op1, op2, vl); +vint8m8_t test_vmax_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_vx_i8m8_m(vm, vs2, rs1, vl); } -vint16mf4_t test_vmax_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmax_vv_i16mf4_m(mask, op1, op2, vl); +vint16mf4_t test_vmax_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmax_vv_i16mf4_m(vm, vs2, vs1, vl); } -vint16mf4_t test_vmax_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_vx_i16mf4_m(mask, op1, op2, vl); +vint16mf4_t test_vmax_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_vx_i16mf4_m(vm, vs2, rs1, vl); } -vint16mf2_t test_vmax_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmax_vv_i16mf2_m(mask, op1, op2, vl); +vint16mf2_t test_vmax_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmax_vv_i16mf2_m(vm, vs2, vs1, vl); } -vint16mf2_t test_vmax_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_vx_i16mf2_m(mask, op1, op2, vl); +vint16mf2_t test_vmax_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_vx_i16mf2_m(vm, vs2, rs1, vl); } -vint16m1_t test_vmax_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmax_vv_i16m1_m(mask, op1, op2, vl); +vint16m1_t test_vmax_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmax_vv_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vmax_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_vx_i16m1_m(mask, op1, op2, vl); +vint16m1_t test_vmax_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_vx_i16m1_m(vm, vs2, rs1, vl); } -vint16m2_t test_vmax_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmax_vv_i16m2_m(mask, op1, op2, vl); +vint16m2_t test_vmax_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmax_vv_i16m2_m(vm, vs2, vs1, vl); } -vint16m2_t test_vmax_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_vx_i16m2_m(mask, op1, op2, vl); +vint16m2_t test_vmax_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_vx_i16m2_m(vm, vs2, rs1, vl); } -vint16m4_t test_vmax_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmax_vv_i16m4_m(mask, op1, op2, vl); +vint16m4_t test_vmax_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmax_vv_i16m4_m(vm, vs2, vs1, vl); } -vint16m4_t test_vmax_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_vx_i16m4_m(mask, op1, op2, vl); +vint16m4_t test_vmax_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_vx_i16m4_m(vm, vs2, rs1, vl); } -vint16m8_t test_vmax_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmax_vv_i16m8_m(mask, op1, op2, vl); +vint16m8_t test_vmax_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmax_vv_i16m8_m(vm, vs2, vs1, vl); } -vint16m8_t test_vmax_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_vx_i16m8_m(mask, op1, op2, vl); +vint16m8_t test_vmax_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_vx_i16m8_m(vm, vs2, rs1, vl); } -vint32mf2_t test_vmax_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmax_vv_i32mf2_m(mask, op1, op2, vl); +vint32mf2_t test_vmax_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmax_vv_i32mf2_m(vm, vs2, vs1, vl); } -vint32mf2_t test_vmax_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_vx_i32mf2_m(mask, op1, op2, vl); +vint32mf2_t test_vmax_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_vx_i32mf2_m(vm, vs2, rs1, vl); } -vint32m1_t test_vmax_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmax_vv_i32m1_m(mask, op1, op2, vl); +vint32m1_t test_vmax_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmax_vv_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vmax_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_vx_i32m1_m(mask, op1, op2, vl); +vint32m1_t test_vmax_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_vx_i32m1_m(vm, vs2, rs1, vl); } -vint32m2_t test_vmax_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmax_vv_i32m2_m(mask, op1, op2, vl); +vint32m2_t test_vmax_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmax_vv_i32m2_m(vm, vs2, vs1, vl); } -vint32m2_t test_vmax_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_vx_i32m2_m(mask, op1, op2, vl); +vint32m2_t test_vmax_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_vx_i32m2_m(vm, vs2, rs1, vl); } -vint32m4_t test_vmax_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmax_vv_i32m4_m(mask, op1, op2, vl); +vint32m4_t test_vmax_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmax_vv_i32m4_m(vm, vs2, vs1, vl); } -vint32m4_t test_vmax_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_vx_i32m4_m(mask, op1, op2, vl); +vint32m4_t test_vmax_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_vx_i32m4_m(vm, vs2, rs1, vl); } -vint32m8_t test_vmax_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmax_vv_i32m8_m(mask, op1, op2, vl); +vint32m8_t test_vmax_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmax_vv_i32m8_m(vm, vs2, vs1, vl); } -vint32m8_t test_vmax_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_vx_i32m8_m(mask, op1, op2, vl); +vint32m8_t test_vmax_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_vx_i32m8_m(vm, vs2, rs1, vl); } -vint64m1_t test_vmax_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmax_vv_i64m1_m(mask, op1, op2, vl); +vint64m1_t test_vmax_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmax_vv_i64m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vmax_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmax_vx_i64m1_m(mask, op1, op2, vl); +vint64m1_t test_vmax_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax_vx_i64m1_m(vm, vs2, rs1, vl); } -vint64m2_t test_vmax_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmax_vv_i64m2_m(mask, op1, op2, vl); +vint64m2_t test_vmax_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmax_vv_i64m2_m(vm, vs2, vs1, vl); } -vint64m2_t test_vmax_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmax_vx_i64m2_m(mask, op1, op2, vl); +vint64m2_t test_vmax_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax_vx_i64m2_m(vm, vs2, rs1, vl); } -vint64m4_t test_vmax_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmax_vv_i64m4_m(mask, op1, op2, vl); +vint64m4_t test_vmax_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmax_vv_i64m4_m(vm, vs2, vs1, vl); } -vint64m4_t test_vmax_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmax_vx_i64m4_m(mask, op1, op2, vl); +vint64m4_t test_vmax_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax_vx_i64m4_m(vm, vs2, rs1, vl); } -vint64m8_t test_vmax_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmax_vv_i64m8_m(mask, op1, op2, vl); +vint64m8_t test_vmax_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmax_vv_i64m8_m(vm, vs2, vs1, vl); } -vint64m8_t test_vmax_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmax_vx_i64m8_m(mask, op1, op2, vl); +vint64m8_t test_vmax_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax_vx_i64m8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vmax\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-api-tests/vmaxu.c b/auto-generated/gnu-api-tests/vmaxu.c index d0d35853a..9e92b3d05 100644 --- a/auto-generated/gnu-api-tests/vmaxu.c +++ b/auto-generated/gnu-api-tests/vmaxu.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vmaxu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmaxu_vv_u8mf8(op1, op2, vl); +vuint8mf8_t test_vmaxu_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u8mf8(vs2, vs1, vl); } -vuint8mf8_t test_vmaxu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_vx_u8mf8(op1, op2, vl); +vuint8mf8_t test_vmaxu_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u8mf8(vs2, rs1, vl); } -vuint8mf4_t test_vmaxu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmaxu_vv_u8mf4(op1, op2, vl); +vuint8mf4_t test_vmaxu_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u8mf4(vs2, vs1, vl); } -vuint8mf4_t test_vmaxu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_vx_u8mf4(op1, op2, vl); +vuint8mf4_t test_vmaxu_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u8mf4(vs2, rs1, vl); } -vuint8mf2_t test_vmaxu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmaxu_vv_u8mf2(op1, op2, vl); +vuint8mf2_t test_vmaxu_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u8mf2(vs2, vs1, vl); } -vuint8mf2_t test_vmaxu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_vx_u8mf2(op1, op2, vl); +vuint8mf2_t test_vmaxu_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u8mf2(vs2, rs1, vl); } -vuint8m1_t test_vmaxu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmaxu_vv_u8m1(op1, op2, vl); +vuint8m1_t test_vmaxu_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vmaxu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_vx_u8m1(op1, op2, vl); +vuint8m1_t test_vmaxu_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u8m1(vs2, rs1, vl); } -vuint8m2_t test_vmaxu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmaxu_vv_u8m2(op1, op2, vl); +vuint8m2_t test_vmaxu_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u8m2(vs2, vs1, vl); } -vuint8m2_t test_vmaxu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_vx_u8m2(op1, op2, vl); +vuint8m2_t test_vmaxu_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u8m2(vs2, rs1, vl); } -vuint8m4_t test_vmaxu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmaxu_vv_u8m4(op1, op2, vl); +vuint8m4_t test_vmaxu_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u8m4(vs2, vs1, vl); } -vuint8m4_t test_vmaxu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_vx_u8m4(op1, op2, vl); +vuint8m4_t test_vmaxu_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u8m4(vs2, rs1, vl); } -vuint8m8_t test_vmaxu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmaxu_vv_u8m8(op1, op2, vl); +vuint8m8_t test_vmaxu_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u8m8(vs2, vs1, vl); } -vuint8m8_t test_vmaxu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_vx_u8m8(op1, op2, vl); +vuint8m8_t test_vmaxu_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u8m8(vs2, rs1, vl); } -vuint16mf4_t test_vmaxu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmaxu_vv_u16mf4(op1, op2, vl); +vuint16mf4_t test_vmaxu_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u16mf4(vs2, vs1, vl); } -vuint16mf4_t test_vmaxu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_vx_u16mf4(op1, op2, vl); +vuint16mf4_t test_vmaxu_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u16mf4(vs2, rs1, vl); } -vuint16mf2_t test_vmaxu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmaxu_vv_u16mf2(op1, op2, vl); +vuint16mf2_t test_vmaxu_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u16mf2(vs2, vs1, vl); } -vuint16mf2_t test_vmaxu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_vx_u16mf2(op1, op2, vl); +vuint16mf2_t test_vmaxu_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u16mf2(vs2, rs1, vl); } -vuint16m1_t test_vmaxu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmaxu_vv_u16m1(op1, op2, vl); +vuint16m1_t test_vmaxu_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vmaxu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_vx_u16m1(op1, op2, vl); +vuint16m1_t test_vmaxu_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u16m1(vs2, rs1, vl); } -vuint16m2_t test_vmaxu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmaxu_vv_u16m2(op1, op2, vl); +vuint16m2_t test_vmaxu_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u16m2(vs2, vs1, vl); } -vuint16m2_t test_vmaxu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_vx_u16m2(op1, op2, vl); +vuint16m2_t test_vmaxu_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u16m2(vs2, rs1, vl); } -vuint16m4_t test_vmaxu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmaxu_vv_u16m4(op1, op2, vl); +vuint16m4_t test_vmaxu_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u16m4(vs2, vs1, vl); } -vuint16m4_t test_vmaxu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_vx_u16m4(op1, op2, vl); +vuint16m4_t test_vmaxu_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u16m4(vs2, rs1, vl); } -vuint16m8_t test_vmaxu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmaxu_vv_u16m8(op1, op2, vl); +vuint16m8_t test_vmaxu_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u16m8(vs2, vs1, vl); } -vuint16m8_t test_vmaxu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_vx_u16m8(op1, op2, vl); +vuint16m8_t test_vmaxu_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u16m8(vs2, rs1, vl); } -vuint32mf2_t test_vmaxu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmaxu_vv_u32mf2(op1, op2, vl); +vuint32mf2_t test_vmaxu_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u32mf2(vs2, vs1, vl); } -vuint32mf2_t test_vmaxu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_vx_u32mf2(op1, op2, vl); +vuint32mf2_t test_vmaxu_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u32mf2(vs2, rs1, vl); } -vuint32m1_t test_vmaxu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmaxu_vv_u32m1(op1, op2, vl); +vuint32m1_t test_vmaxu_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vmaxu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_vx_u32m1(op1, op2, vl); +vuint32m1_t test_vmaxu_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u32m1(vs2, rs1, vl); } -vuint32m2_t test_vmaxu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmaxu_vv_u32m2(op1, op2, vl); +vuint32m2_t test_vmaxu_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u32m2(vs2, vs1, vl); } -vuint32m2_t test_vmaxu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_vx_u32m2(op1, op2, vl); +vuint32m2_t test_vmaxu_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u32m2(vs2, rs1, vl); } -vuint32m4_t test_vmaxu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmaxu_vv_u32m4(op1, op2, vl); +vuint32m4_t test_vmaxu_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u32m4(vs2, vs1, vl); } -vuint32m4_t test_vmaxu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_vx_u32m4(op1, op2, vl); +vuint32m4_t test_vmaxu_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u32m4(vs2, rs1, vl); } -vuint32m8_t test_vmaxu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmaxu_vv_u32m8(op1, op2, vl); +vuint32m8_t test_vmaxu_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u32m8(vs2, vs1, vl); } -vuint32m8_t test_vmaxu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_vx_u32m8(op1, op2, vl); +vuint32m8_t test_vmaxu_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u32m8(vs2, rs1, vl); } -vuint64m1_t test_vmaxu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmaxu_vv_u64m1(op1, op2, vl); +vuint64m1_t test_vmaxu_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u64m1(vs2, vs1, vl); } -vuint64m1_t test_vmaxu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu_vx_u64m1(op1, op2, vl); +vuint64m1_t test_vmaxu_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u64m1(vs2, rs1, vl); } -vuint64m2_t test_vmaxu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmaxu_vv_u64m2(op1, op2, vl); +vuint64m2_t test_vmaxu_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u64m2(vs2, vs1, vl); } -vuint64m2_t test_vmaxu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu_vx_u64m2(op1, op2, vl); +vuint64m2_t test_vmaxu_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u64m2(vs2, rs1, vl); } -vuint64m4_t test_vmaxu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmaxu_vv_u64m4(op1, op2, vl); +vuint64m4_t test_vmaxu_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u64m4(vs2, vs1, vl); } -vuint64m4_t test_vmaxu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu_vx_u64m4(op1, op2, vl); +vuint64m4_t test_vmaxu_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u64m4(vs2, rs1, vl); } -vuint64m8_t test_vmaxu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmaxu_vv_u64m8(op1, op2, vl); +vuint64m8_t test_vmaxu_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u64m8(vs2, vs1, vl); } -vuint64m8_t test_vmaxu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu_vx_u64m8(op1, op2, vl); +vuint64m8_t test_vmaxu_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u64m8(vs2, rs1, vl); } -vuint8mf8_t test_vmaxu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmaxu_vv_u8mf8_m(mask, op1, op2, vl); +vuint8mf8_t test_vmaxu_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u8mf8_m(vm, vs2, vs1, vl); } -vuint8mf8_t test_vmaxu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_vx_u8mf8_m(mask, op1, op2, vl); +vuint8mf8_t test_vmaxu_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u8mf8_m(vm, vs2, rs1, vl); } -vuint8mf4_t test_vmaxu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmaxu_vv_u8mf4_m(mask, op1, op2, vl); +vuint8mf4_t test_vmaxu_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u8mf4_m(vm, vs2, vs1, vl); } -vuint8mf4_t test_vmaxu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_vx_u8mf4_m(mask, op1, op2, vl); +vuint8mf4_t test_vmaxu_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u8mf4_m(vm, vs2, rs1, vl); } -vuint8mf2_t test_vmaxu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmaxu_vv_u8mf2_m(mask, op1, op2, vl); +vuint8mf2_t test_vmaxu_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u8mf2_m(vm, vs2, vs1, vl); } -vuint8mf2_t test_vmaxu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_vx_u8mf2_m(mask, op1, op2, vl); +vuint8mf2_t test_vmaxu_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u8mf2_m(vm, vs2, rs1, vl); } -vuint8m1_t test_vmaxu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmaxu_vv_u8m1_m(mask, op1, op2, vl); +vuint8m1_t test_vmaxu_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vmaxu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_vx_u8m1_m(mask, op1, op2, vl); +vuint8m1_t test_vmaxu_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u8m1_m(vm, vs2, rs1, vl); } -vuint8m2_t test_vmaxu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmaxu_vv_u8m2_m(mask, op1, op2, vl); +vuint8m2_t test_vmaxu_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u8m2_m(vm, vs2, vs1, vl); } -vuint8m2_t test_vmaxu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_vx_u8m2_m(mask, op1, op2, vl); +vuint8m2_t test_vmaxu_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u8m2_m(vm, vs2, rs1, vl); } -vuint8m4_t test_vmaxu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmaxu_vv_u8m4_m(mask, op1, op2, vl); +vuint8m4_t test_vmaxu_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u8m4_m(vm, vs2, vs1, vl); } -vuint8m4_t test_vmaxu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_vx_u8m4_m(mask, op1, op2, vl); +vuint8m4_t test_vmaxu_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u8m4_m(vm, vs2, rs1, vl); } -vuint8m8_t test_vmaxu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmaxu_vv_u8m8_m(mask, op1, op2, vl); +vuint8m8_t test_vmaxu_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u8m8_m(vm, vs2, vs1, vl); } -vuint8m8_t test_vmaxu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_vx_u8m8_m(mask, op1, op2, vl); +vuint8m8_t test_vmaxu_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u8m8_m(vm, vs2, rs1, vl); } -vuint16mf4_t test_vmaxu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmaxu_vv_u16mf4_m(mask, op1, op2, vl); +vuint16mf4_t test_vmaxu_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u16mf4_m(vm, vs2, vs1, vl); } -vuint16mf4_t test_vmaxu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_vx_u16mf4_m(mask, op1, op2, vl); +vuint16mf4_t test_vmaxu_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u16mf4_m(vm, vs2, rs1, vl); } -vuint16mf2_t test_vmaxu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmaxu_vv_u16mf2_m(mask, op1, op2, vl); +vuint16mf2_t test_vmaxu_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u16mf2_m(vm, vs2, vs1, vl); } -vuint16mf2_t test_vmaxu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_vx_u16mf2_m(mask, op1, op2, vl); +vuint16mf2_t test_vmaxu_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u16mf2_m(vm, vs2, rs1, vl); } -vuint16m1_t test_vmaxu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmaxu_vv_u16m1_m(mask, op1, op2, vl); +vuint16m1_t test_vmaxu_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vmaxu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_vx_u16m1_m(mask, op1, op2, vl); +vuint16m1_t test_vmaxu_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u16m1_m(vm, vs2, rs1, vl); } -vuint16m2_t test_vmaxu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmaxu_vv_u16m2_m(mask, op1, op2, vl); +vuint16m2_t test_vmaxu_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u16m2_m(vm, vs2, vs1, vl); } -vuint16m2_t test_vmaxu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_vx_u16m2_m(mask, op1, op2, vl); +vuint16m2_t test_vmaxu_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u16m2_m(vm, vs2, rs1, vl); } -vuint16m4_t test_vmaxu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmaxu_vv_u16m4_m(mask, op1, op2, vl); +vuint16m4_t test_vmaxu_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u16m4_m(vm, vs2, vs1, vl); } -vuint16m4_t test_vmaxu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_vx_u16m4_m(mask, op1, op2, vl); +vuint16m4_t test_vmaxu_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u16m4_m(vm, vs2, rs1, vl); } -vuint16m8_t test_vmaxu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmaxu_vv_u16m8_m(mask, op1, op2, vl); +vuint16m8_t test_vmaxu_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u16m8_m(vm, vs2, vs1, vl); } -vuint16m8_t test_vmaxu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_vx_u16m8_m(mask, op1, op2, vl); +vuint16m8_t test_vmaxu_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u16m8_m(vm, vs2, rs1, vl); } -vuint32mf2_t test_vmaxu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmaxu_vv_u32mf2_m(mask, op1, op2, vl); +vuint32mf2_t test_vmaxu_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u32mf2_m(vm, vs2, vs1, vl); } -vuint32mf2_t test_vmaxu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_vx_u32mf2_m(mask, op1, op2, vl); +vuint32mf2_t test_vmaxu_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u32mf2_m(vm, vs2, rs1, vl); } -vuint32m1_t test_vmaxu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmaxu_vv_u32m1_m(mask, op1, op2, vl); +vuint32m1_t test_vmaxu_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vmaxu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_vx_u32m1_m(mask, op1, op2, vl); +vuint32m1_t test_vmaxu_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u32m1_m(vm, vs2, rs1, vl); } -vuint32m2_t test_vmaxu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmaxu_vv_u32m2_m(mask, op1, op2, vl); +vuint32m2_t test_vmaxu_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u32m2_m(vm, vs2, vs1, vl); } -vuint32m2_t test_vmaxu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_vx_u32m2_m(mask, op1, op2, vl); +vuint32m2_t test_vmaxu_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u32m2_m(vm, vs2, rs1, vl); } -vuint32m4_t test_vmaxu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmaxu_vv_u32m4_m(mask, op1, op2, vl); +vuint32m4_t test_vmaxu_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u32m4_m(vm, vs2, vs1, vl); } -vuint32m4_t test_vmaxu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_vx_u32m4_m(mask, op1, op2, vl); +vuint32m4_t test_vmaxu_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u32m4_m(vm, vs2, rs1, vl); } -vuint32m8_t test_vmaxu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmaxu_vv_u32m8_m(mask, op1, op2, vl); +vuint32m8_t test_vmaxu_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u32m8_m(vm, vs2, vs1, vl); } -vuint32m8_t test_vmaxu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_vx_u32m8_m(mask, op1, op2, vl); +vuint32m8_t test_vmaxu_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u32m8_m(vm, vs2, rs1, vl); } -vuint64m1_t test_vmaxu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmaxu_vv_u64m1_m(mask, op1, op2, vl); +vuint64m1_t test_vmaxu_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vmaxu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu_vx_u64m1_m(mask, op1, op2, vl); +vuint64m1_t test_vmaxu_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u64m1_m(vm, vs2, rs1, vl); } -vuint64m2_t test_vmaxu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmaxu_vv_u64m2_m(mask, op1, op2, vl); +vuint64m2_t test_vmaxu_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u64m2_m(vm, vs2, vs1, vl); } -vuint64m2_t test_vmaxu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu_vx_u64m2_m(mask, op1, op2, vl); +vuint64m2_t test_vmaxu_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u64m2_m(vm, vs2, rs1, vl); } -vuint64m4_t test_vmaxu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmaxu_vv_u64m4_m(mask, op1, op2, vl); +vuint64m4_t test_vmaxu_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u64m4_m(vm, vs2, vs1, vl); } -vuint64m4_t test_vmaxu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu_vx_u64m4_m(mask, op1, op2, vl); +vuint64m4_t test_vmaxu_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u64m4_m(vm, vs2, rs1, vl); } -vuint64m8_t test_vmaxu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmaxu_vv_u64m8_m(mask, op1, op2, vl); +vuint64m8_t test_vmaxu_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u64m8_m(vm, vs2, vs1, vl); } -vuint64m8_t test_vmaxu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu_vx_u64m8_m(mask, op1, op2, vl); +vuint64m8_t test_vmaxu_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u64m8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vmaxu\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-api-tests/vmerge.c b/auto-generated/gnu-api-tests/vmerge.c index b37861d16..44f5e5812 100644 --- a/auto-generated/gnu-api-tests/vmerge.c +++ b/auto-generated/gnu-api-tests/vmerge.c @@ -6,416 +6,416 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vmerge_vvm_i8mf8(vint8mf8_t op1, vint8mf8_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_vvm_i8mf8(op1, op2, mask, vl); +vint8mf8_t test_vmerge_vvm_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_vvm_i8mf8(vs2, vs1, v0, vl); } -vint8mf8_t test_vmerge_vxm_i8mf8(vint8mf8_t op1, int8_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_vxm_i8mf8(op1, op2, mask, vl); +vint8mf8_t test_vmerge_vxm_i8mf8(vint8mf8_t vs2, int8_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_vxm_i8mf8(vs2, rs1, v0, vl); } -vint8mf4_t test_vmerge_vvm_i8mf4(vint8mf4_t op1, vint8mf4_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_vvm_i8mf4(op1, op2, mask, vl); +vint8mf4_t test_vmerge_vvm_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_vvm_i8mf4(vs2, vs1, v0, vl); } -vint8mf4_t test_vmerge_vxm_i8mf4(vint8mf4_t op1, int8_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_vxm_i8mf4(op1, op2, mask, vl); +vint8mf4_t test_vmerge_vxm_i8mf4(vint8mf4_t vs2, int8_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_vxm_i8mf4(vs2, rs1, v0, vl); } -vint8mf2_t test_vmerge_vvm_i8mf2(vint8mf2_t op1, vint8mf2_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_vvm_i8mf2(op1, op2, mask, vl); +vint8mf2_t test_vmerge_vvm_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_vvm_i8mf2(vs2, vs1, v0, vl); } -vint8mf2_t test_vmerge_vxm_i8mf2(vint8mf2_t op1, int8_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_vxm_i8mf2(op1, op2, mask, vl); +vint8mf2_t test_vmerge_vxm_i8mf2(vint8mf2_t vs2, int8_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_vxm_i8mf2(vs2, rs1, v0, vl); } -vint8m1_t test_vmerge_vvm_i8m1(vint8m1_t op1, vint8m1_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_vvm_i8m1(op1, op2, mask, vl); +vint8m1_t test_vmerge_vvm_i8m1(vint8m1_t vs2, vint8m1_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_vvm_i8m1(vs2, vs1, v0, vl); } -vint8m1_t test_vmerge_vxm_i8m1(vint8m1_t op1, int8_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_vxm_i8m1(op1, op2, mask, vl); +vint8m1_t test_vmerge_vxm_i8m1(vint8m1_t vs2, int8_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_vxm_i8m1(vs2, rs1, v0, vl); } -vint8m2_t test_vmerge_vvm_i8m2(vint8m2_t op1, vint8m2_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge_vvm_i8m2(op1, op2, mask, vl); +vint8m2_t test_vmerge_vvm_i8m2(vint8m2_t vs2, vint8m2_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge_vvm_i8m2(vs2, vs1, v0, vl); } -vint8m2_t test_vmerge_vxm_i8m2(vint8m2_t op1, int8_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge_vxm_i8m2(op1, op2, mask, vl); +vint8m2_t test_vmerge_vxm_i8m2(vint8m2_t vs2, int8_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge_vxm_i8m2(vs2, rs1, v0, vl); } -vint8m4_t test_vmerge_vvm_i8m4(vint8m4_t op1, vint8m4_t op2, vbool2_t mask, size_t vl) { - return __riscv_vmerge_vvm_i8m4(op1, op2, mask, vl); +vint8m4_t test_vmerge_vvm_i8m4(vint8m4_t vs2, vint8m4_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vmerge_vvm_i8m4(vs2, vs1, v0, vl); } -vint8m4_t test_vmerge_vxm_i8m4(vint8m4_t op1, int8_t op2, vbool2_t mask, size_t vl) { - return __riscv_vmerge_vxm_i8m4(op1, op2, mask, vl); +vint8m4_t test_vmerge_vxm_i8m4(vint8m4_t vs2, int8_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vmerge_vxm_i8m4(vs2, rs1, v0, vl); } -vint8m8_t test_vmerge_vvm_i8m8(vint8m8_t op1, vint8m8_t op2, vbool1_t mask, size_t vl) { - return __riscv_vmerge_vvm_i8m8(op1, op2, mask, vl); +vint8m8_t test_vmerge_vvm_i8m8(vint8m8_t vs2, vint8m8_t vs1, vbool1_t v0, size_t vl) { + return __riscv_vmerge_vvm_i8m8(vs2, vs1, v0, vl); } -vint8m8_t test_vmerge_vxm_i8m8(vint8m8_t op1, int8_t op2, vbool1_t mask, size_t vl) { - return __riscv_vmerge_vxm_i8m8(op1, op2, mask, vl); +vint8m8_t test_vmerge_vxm_i8m8(vint8m8_t vs2, int8_t rs1, vbool1_t v0, size_t vl) { + return __riscv_vmerge_vxm_i8m8(vs2, rs1, v0, vl); } -vint16mf4_t test_vmerge_vvm_i16mf4(vint16mf4_t op1, vint16mf4_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_vvm_i16mf4(op1, op2, mask, vl); +vint16mf4_t test_vmerge_vvm_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_vvm_i16mf4(vs2, vs1, v0, vl); } -vint16mf4_t test_vmerge_vxm_i16mf4(vint16mf4_t op1, int16_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_vxm_i16mf4(op1, op2, mask, vl); +vint16mf4_t test_vmerge_vxm_i16mf4(vint16mf4_t vs2, int16_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_vxm_i16mf4(vs2, rs1, v0, vl); } -vint16mf2_t test_vmerge_vvm_i16mf2(vint16mf2_t op1, vint16mf2_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_vvm_i16mf2(op1, op2, mask, vl); +vint16mf2_t test_vmerge_vvm_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_vvm_i16mf2(vs2, vs1, v0, vl); } -vint16mf2_t test_vmerge_vxm_i16mf2(vint16mf2_t op1, int16_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_vxm_i16mf2(op1, op2, mask, vl); +vint16mf2_t test_vmerge_vxm_i16mf2(vint16mf2_t vs2, int16_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_vxm_i16mf2(vs2, rs1, v0, vl); } -vint16m1_t test_vmerge_vvm_i16m1(vint16m1_t op1, vint16m1_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_vvm_i16m1(op1, op2, mask, vl); +vint16m1_t test_vmerge_vvm_i16m1(vint16m1_t vs2, vint16m1_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_vvm_i16m1(vs2, vs1, v0, vl); } -vint16m1_t test_vmerge_vxm_i16m1(vint16m1_t op1, int16_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_vxm_i16m1(op1, op2, mask, vl); +vint16m1_t test_vmerge_vxm_i16m1(vint16m1_t vs2, int16_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_vxm_i16m1(vs2, rs1, v0, vl); } -vint16m2_t test_vmerge_vvm_i16m2(vint16m2_t op1, vint16m2_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_vvm_i16m2(op1, op2, mask, vl); +vint16m2_t test_vmerge_vvm_i16m2(vint16m2_t vs2, vint16m2_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_vvm_i16m2(vs2, vs1, v0, vl); } -vint16m2_t test_vmerge_vxm_i16m2(vint16m2_t op1, int16_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_vxm_i16m2(op1, op2, mask, vl); +vint16m2_t test_vmerge_vxm_i16m2(vint16m2_t vs2, int16_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_vxm_i16m2(vs2, rs1, v0, vl); } -vint16m4_t test_vmerge_vvm_i16m4(vint16m4_t op1, vint16m4_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge_vvm_i16m4(op1, op2, mask, vl); +vint16m4_t test_vmerge_vvm_i16m4(vint16m4_t vs2, vint16m4_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge_vvm_i16m4(vs2, vs1, v0, vl); } -vint16m4_t test_vmerge_vxm_i16m4(vint16m4_t op1, int16_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge_vxm_i16m4(op1, op2, mask, vl); +vint16m4_t test_vmerge_vxm_i16m4(vint16m4_t vs2, int16_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge_vxm_i16m4(vs2, rs1, v0, vl); } -vint16m8_t test_vmerge_vvm_i16m8(vint16m8_t op1, vint16m8_t op2, vbool2_t mask, size_t vl) { - return __riscv_vmerge_vvm_i16m8(op1, op2, mask, vl); +vint16m8_t test_vmerge_vvm_i16m8(vint16m8_t vs2, vint16m8_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vmerge_vvm_i16m8(vs2, vs1, v0, vl); } -vint16m8_t test_vmerge_vxm_i16m8(vint16m8_t op1, int16_t op2, vbool2_t mask, size_t vl) { - return __riscv_vmerge_vxm_i16m8(op1, op2, mask, vl); +vint16m8_t test_vmerge_vxm_i16m8(vint16m8_t vs2, int16_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vmerge_vxm_i16m8(vs2, rs1, v0, vl); } -vint32mf2_t test_vmerge_vvm_i32mf2(vint32mf2_t op1, vint32mf2_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_vvm_i32mf2(op1, op2, mask, vl); +vint32mf2_t test_vmerge_vvm_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_vvm_i32mf2(vs2, vs1, v0, vl); } -vint32mf2_t test_vmerge_vxm_i32mf2(vint32mf2_t op1, int32_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_vxm_i32mf2(op1, op2, mask, vl); +vint32mf2_t test_vmerge_vxm_i32mf2(vint32mf2_t vs2, int32_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_vxm_i32mf2(vs2, rs1, v0, vl); } -vint32m1_t test_vmerge_vvm_i32m1(vint32m1_t op1, vint32m1_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_vvm_i32m1(op1, op2, mask, vl); +vint32m1_t test_vmerge_vvm_i32m1(vint32m1_t vs2, vint32m1_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_vvm_i32m1(vs2, vs1, v0, vl); } -vint32m1_t test_vmerge_vxm_i32m1(vint32m1_t op1, int32_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_vxm_i32m1(op1, op2, mask, vl); +vint32m1_t test_vmerge_vxm_i32m1(vint32m1_t vs2, int32_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_vxm_i32m1(vs2, rs1, v0, vl); } -vint32m2_t test_vmerge_vvm_i32m2(vint32m2_t op1, vint32m2_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_vvm_i32m2(op1, op2, mask, vl); +vint32m2_t test_vmerge_vvm_i32m2(vint32m2_t vs2, vint32m2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_vvm_i32m2(vs2, vs1, v0, vl); } -vint32m2_t test_vmerge_vxm_i32m2(vint32m2_t op1, int32_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_vxm_i32m2(op1, op2, mask, vl); +vint32m2_t test_vmerge_vxm_i32m2(vint32m2_t vs2, int32_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_vxm_i32m2(vs2, rs1, v0, vl); } -vint32m4_t test_vmerge_vvm_i32m4(vint32m4_t op1, vint32m4_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_vvm_i32m4(op1, op2, mask, vl); +vint32m4_t test_vmerge_vvm_i32m4(vint32m4_t vs2, vint32m4_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_vvm_i32m4(vs2, vs1, v0, vl); } -vint32m4_t test_vmerge_vxm_i32m4(vint32m4_t op1, int32_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_vxm_i32m4(op1, op2, mask, vl); +vint32m4_t test_vmerge_vxm_i32m4(vint32m4_t vs2, int32_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_vxm_i32m4(vs2, rs1, v0, vl); } -vint32m8_t test_vmerge_vvm_i32m8(vint32m8_t op1, vint32m8_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge_vvm_i32m8(op1, op2, mask, vl); +vint32m8_t test_vmerge_vvm_i32m8(vint32m8_t vs2, vint32m8_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge_vvm_i32m8(vs2, vs1, v0, vl); } -vint32m8_t test_vmerge_vxm_i32m8(vint32m8_t op1, int32_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge_vxm_i32m8(op1, op2, mask, vl); +vint32m8_t test_vmerge_vxm_i32m8(vint32m8_t vs2, int32_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge_vxm_i32m8(vs2, rs1, v0, vl); } -vint64m1_t test_vmerge_vvm_i64m1(vint64m1_t op1, vint64m1_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_vvm_i64m1(op1, op2, mask, vl); +vint64m1_t test_vmerge_vvm_i64m1(vint64m1_t vs2, vint64m1_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_vvm_i64m1(vs2, vs1, v0, vl); } -vint64m1_t test_vmerge_vxm_i64m1(vint64m1_t op1, int64_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_vxm_i64m1(op1, op2, mask, vl); +vint64m1_t test_vmerge_vxm_i64m1(vint64m1_t vs2, int64_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_vxm_i64m1(vs2, rs1, v0, vl); } -vint64m2_t test_vmerge_vvm_i64m2(vint64m2_t op1, vint64m2_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_vvm_i64m2(op1, op2, mask, vl); +vint64m2_t test_vmerge_vvm_i64m2(vint64m2_t vs2, vint64m2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_vvm_i64m2(vs2, vs1, v0, vl); } -vint64m2_t test_vmerge_vxm_i64m2(vint64m2_t op1, int64_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_vxm_i64m2(op1, op2, mask, vl); +vint64m2_t test_vmerge_vxm_i64m2(vint64m2_t vs2, int64_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_vxm_i64m2(vs2, rs1, v0, vl); } -vint64m4_t test_vmerge_vvm_i64m4(vint64m4_t op1, vint64m4_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_vvm_i64m4(op1, op2, mask, vl); +vint64m4_t test_vmerge_vvm_i64m4(vint64m4_t vs2, vint64m4_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_vvm_i64m4(vs2, vs1, v0, vl); } -vint64m4_t test_vmerge_vxm_i64m4(vint64m4_t op1, int64_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_vxm_i64m4(op1, op2, mask, vl); +vint64m4_t test_vmerge_vxm_i64m4(vint64m4_t vs2, int64_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_vxm_i64m4(vs2, rs1, v0, vl); } -vint64m8_t test_vmerge_vvm_i64m8(vint64m8_t op1, vint64m8_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_vvm_i64m8(op1, op2, mask, vl); +vint64m8_t test_vmerge_vvm_i64m8(vint64m8_t vs2, vint64m8_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_vvm_i64m8(vs2, vs1, v0, vl); } -vint64m8_t test_vmerge_vxm_i64m8(vint64m8_t op1, int64_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_vxm_i64m8(op1, op2, mask, vl); +vint64m8_t test_vmerge_vxm_i64m8(vint64m8_t vs2, int64_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_vxm_i64m8(vs2, rs1, v0, vl); } -vuint8mf8_t test_vmerge_vvm_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_vvm_u8mf8(op1, op2, mask, vl); +vuint8mf8_t test_vmerge_vvm_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_vvm_u8mf8(vs2, vs1, v0, vl); } -vuint8mf8_t test_vmerge_vxm_u8mf8(vuint8mf8_t op1, uint8_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_vxm_u8mf8(op1, op2, mask, vl); +vuint8mf8_t test_vmerge_vxm_u8mf8(vuint8mf8_t vs2, uint8_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_vxm_u8mf8(vs2, rs1, v0, vl); } -vuint8mf4_t test_vmerge_vvm_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_vvm_u8mf4(op1, op2, mask, vl); +vuint8mf4_t test_vmerge_vvm_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_vvm_u8mf4(vs2, vs1, v0, vl); } -vuint8mf4_t test_vmerge_vxm_u8mf4(vuint8mf4_t op1, uint8_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_vxm_u8mf4(op1, op2, mask, vl); +vuint8mf4_t test_vmerge_vxm_u8mf4(vuint8mf4_t vs2, uint8_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_vxm_u8mf4(vs2, rs1, v0, vl); } -vuint8mf2_t test_vmerge_vvm_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_vvm_u8mf2(op1, op2, mask, vl); +vuint8mf2_t test_vmerge_vvm_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_vvm_u8mf2(vs2, vs1, v0, vl); } -vuint8mf2_t test_vmerge_vxm_u8mf2(vuint8mf2_t op1, uint8_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_vxm_u8mf2(op1, op2, mask, vl); +vuint8mf2_t test_vmerge_vxm_u8mf2(vuint8mf2_t vs2, uint8_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_vxm_u8mf2(vs2, rs1, v0, vl); } -vuint8m1_t test_vmerge_vvm_u8m1(vuint8m1_t op1, vuint8m1_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_vvm_u8m1(op1, op2, mask, vl); +vuint8m1_t test_vmerge_vvm_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_vvm_u8m1(vs2, vs1, v0, vl); } -vuint8m1_t test_vmerge_vxm_u8m1(vuint8m1_t op1, uint8_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_vxm_u8m1(op1, op2, mask, vl); +vuint8m1_t test_vmerge_vxm_u8m1(vuint8m1_t vs2, uint8_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_vxm_u8m1(vs2, rs1, v0, vl); } -vuint8m2_t test_vmerge_vvm_u8m2(vuint8m2_t op1, vuint8m2_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge_vvm_u8m2(op1, op2, mask, vl); +vuint8m2_t test_vmerge_vvm_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge_vvm_u8m2(vs2, vs1, v0, vl); } -vuint8m2_t test_vmerge_vxm_u8m2(vuint8m2_t op1, uint8_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge_vxm_u8m2(op1, op2, mask, vl); +vuint8m2_t test_vmerge_vxm_u8m2(vuint8m2_t vs2, uint8_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge_vxm_u8m2(vs2, rs1, v0, vl); } -vuint8m4_t test_vmerge_vvm_u8m4(vuint8m4_t op1, vuint8m4_t op2, vbool2_t mask, size_t vl) { - return __riscv_vmerge_vvm_u8m4(op1, op2, mask, vl); +vuint8m4_t test_vmerge_vvm_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vmerge_vvm_u8m4(vs2, vs1, v0, vl); } -vuint8m4_t test_vmerge_vxm_u8m4(vuint8m4_t op1, uint8_t op2, vbool2_t mask, size_t vl) { - return __riscv_vmerge_vxm_u8m4(op1, op2, mask, vl); +vuint8m4_t test_vmerge_vxm_u8m4(vuint8m4_t vs2, uint8_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vmerge_vxm_u8m4(vs2, rs1, v0, vl); } -vuint8m8_t test_vmerge_vvm_u8m8(vuint8m8_t op1, vuint8m8_t op2, vbool1_t mask, size_t vl) { - return __riscv_vmerge_vvm_u8m8(op1, op2, mask, vl); +vuint8m8_t test_vmerge_vvm_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, vbool1_t v0, size_t vl) { + return __riscv_vmerge_vvm_u8m8(vs2, vs1, v0, vl); } -vuint8m8_t test_vmerge_vxm_u8m8(vuint8m8_t op1, uint8_t op2, vbool1_t mask, size_t vl) { - return __riscv_vmerge_vxm_u8m8(op1, op2, mask, vl); +vuint8m8_t test_vmerge_vxm_u8m8(vuint8m8_t vs2, uint8_t rs1, vbool1_t v0, size_t vl) { + return __riscv_vmerge_vxm_u8m8(vs2, rs1, v0, vl); } -vuint16mf4_t test_vmerge_vvm_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_vvm_u16mf4(op1, op2, mask, vl); +vuint16mf4_t test_vmerge_vvm_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_vvm_u16mf4(vs2, vs1, v0, vl); } -vuint16mf4_t test_vmerge_vxm_u16mf4(vuint16mf4_t op1, uint16_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_vxm_u16mf4(op1, op2, mask, vl); +vuint16mf4_t test_vmerge_vxm_u16mf4(vuint16mf4_t vs2, uint16_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_vxm_u16mf4(vs2, rs1, v0, vl); } -vuint16mf2_t test_vmerge_vvm_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_vvm_u16mf2(op1, op2, mask, vl); +vuint16mf2_t test_vmerge_vvm_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_vvm_u16mf2(vs2, vs1, v0, vl); } -vuint16mf2_t test_vmerge_vxm_u16mf2(vuint16mf2_t op1, uint16_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_vxm_u16mf2(op1, op2, mask, vl); +vuint16mf2_t test_vmerge_vxm_u16mf2(vuint16mf2_t vs2, uint16_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_vxm_u16mf2(vs2, rs1, v0, vl); } -vuint16m1_t test_vmerge_vvm_u16m1(vuint16m1_t op1, vuint16m1_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_vvm_u16m1(op1, op2, mask, vl); +vuint16m1_t test_vmerge_vvm_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_vvm_u16m1(vs2, vs1, v0, vl); } -vuint16m1_t test_vmerge_vxm_u16m1(vuint16m1_t op1, uint16_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_vxm_u16m1(op1, op2, mask, vl); +vuint16m1_t test_vmerge_vxm_u16m1(vuint16m1_t vs2, uint16_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_vxm_u16m1(vs2, rs1, v0, vl); } -vuint16m2_t test_vmerge_vvm_u16m2(vuint16m2_t op1, vuint16m2_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_vvm_u16m2(op1, op2, mask, vl); +vuint16m2_t test_vmerge_vvm_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_vvm_u16m2(vs2, vs1, v0, vl); } -vuint16m2_t test_vmerge_vxm_u16m2(vuint16m2_t op1, uint16_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_vxm_u16m2(op1, op2, mask, vl); +vuint16m2_t test_vmerge_vxm_u16m2(vuint16m2_t vs2, uint16_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_vxm_u16m2(vs2, rs1, v0, vl); } -vuint16m4_t test_vmerge_vvm_u16m4(vuint16m4_t op1, vuint16m4_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge_vvm_u16m4(op1, op2, mask, vl); +vuint16m4_t test_vmerge_vvm_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge_vvm_u16m4(vs2, vs1, v0, vl); } -vuint16m4_t test_vmerge_vxm_u16m4(vuint16m4_t op1, uint16_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge_vxm_u16m4(op1, op2, mask, vl); +vuint16m4_t test_vmerge_vxm_u16m4(vuint16m4_t vs2, uint16_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge_vxm_u16m4(vs2, rs1, v0, vl); } -vuint16m8_t test_vmerge_vvm_u16m8(vuint16m8_t op1, vuint16m8_t op2, vbool2_t mask, size_t vl) { - return __riscv_vmerge_vvm_u16m8(op1, op2, mask, vl); +vuint16m8_t test_vmerge_vvm_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vmerge_vvm_u16m8(vs2, vs1, v0, vl); } -vuint16m8_t test_vmerge_vxm_u16m8(vuint16m8_t op1, uint16_t op2, vbool2_t mask, size_t vl) { - return __riscv_vmerge_vxm_u16m8(op1, op2, mask, vl); +vuint16m8_t test_vmerge_vxm_u16m8(vuint16m8_t vs2, uint16_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vmerge_vxm_u16m8(vs2, rs1, v0, vl); } -vuint32mf2_t test_vmerge_vvm_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_vvm_u32mf2(op1, op2, mask, vl); +vuint32mf2_t test_vmerge_vvm_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_vvm_u32mf2(vs2, vs1, v0, vl); } -vuint32mf2_t test_vmerge_vxm_u32mf2(vuint32mf2_t op1, uint32_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_vxm_u32mf2(op1, op2, mask, vl); +vuint32mf2_t test_vmerge_vxm_u32mf2(vuint32mf2_t vs2, uint32_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_vxm_u32mf2(vs2, rs1, v0, vl); } -vuint32m1_t test_vmerge_vvm_u32m1(vuint32m1_t op1, vuint32m1_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_vvm_u32m1(op1, op2, mask, vl); +vuint32m1_t test_vmerge_vvm_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_vvm_u32m1(vs2, vs1, v0, vl); } -vuint32m1_t test_vmerge_vxm_u32m1(vuint32m1_t op1, uint32_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_vxm_u32m1(op1, op2, mask, vl); +vuint32m1_t test_vmerge_vxm_u32m1(vuint32m1_t vs2, uint32_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_vxm_u32m1(vs2, rs1, v0, vl); } -vuint32m2_t test_vmerge_vvm_u32m2(vuint32m2_t op1, vuint32m2_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_vvm_u32m2(op1, op2, mask, vl); +vuint32m2_t test_vmerge_vvm_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_vvm_u32m2(vs2, vs1, v0, vl); } -vuint32m2_t test_vmerge_vxm_u32m2(vuint32m2_t op1, uint32_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_vxm_u32m2(op1, op2, mask, vl); +vuint32m2_t test_vmerge_vxm_u32m2(vuint32m2_t vs2, uint32_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_vxm_u32m2(vs2, rs1, v0, vl); } -vuint32m4_t test_vmerge_vvm_u32m4(vuint32m4_t op1, vuint32m4_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_vvm_u32m4(op1, op2, mask, vl); +vuint32m4_t test_vmerge_vvm_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_vvm_u32m4(vs2, vs1, v0, vl); } -vuint32m4_t test_vmerge_vxm_u32m4(vuint32m4_t op1, uint32_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_vxm_u32m4(op1, op2, mask, vl); +vuint32m4_t test_vmerge_vxm_u32m4(vuint32m4_t vs2, uint32_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_vxm_u32m4(vs2, rs1, v0, vl); } -vuint32m8_t test_vmerge_vvm_u32m8(vuint32m8_t op1, vuint32m8_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge_vvm_u32m8(op1, op2, mask, vl); +vuint32m8_t test_vmerge_vvm_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge_vvm_u32m8(vs2, vs1, v0, vl); } -vuint32m8_t test_vmerge_vxm_u32m8(vuint32m8_t op1, uint32_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge_vxm_u32m8(op1, op2, mask, vl); +vuint32m8_t test_vmerge_vxm_u32m8(vuint32m8_t vs2, uint32_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge_vxm_u32m8(vs2, rs1, v0, vl); } -vuint64m1_t test_vmerge_vvm_u64m1(vuint64m1_t op1, vuint64m1_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_vvm_u64m1(op1, op2, mask, vl); +vuint64m1_t test_vmerge_vvm_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_vvm_u64m1(vs2, vs1, v0, vl); } -vuint64m1_t test_vmerge_vxm_u64m1(vuint64m1_t op1, uint64_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_vxm_u64m1(op1, op2, mask, vl); +vuint64m1_t test_vmerge_vxm_u64m1(vuint64m1_t vs2, uint64_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_vxm_u64m1(vs2, rs1, v0, vl); } -vuint64m2_t test_vmerge_vvm_u64m2(vuint64m2_t op1, vuint64m2_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_vvm_u64m2(op1, op2, mask, vl); +vuint64m2_t test_vmerge_vvm_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_vvm_u64m2(vs2, vs1, v0, vl); } -vuint64m2_t test_vmerge_vxm_u64m2(vuint64m2_t op1, uint64_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_vxm_u64m2(op1, op2, mask, vl); +vuint64m2_t test_vmerge_vxm_u64m2(vuint64m2_t vs2, uint64_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_vxm_u64m2(vs2, rs1, v0, vl); } -vuint64m4_t test_vmerge_vvm_u64m4(vuint64m4_t op1, vuint64m4_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_vvm_u64m4(op1, op2, mask, vl); +vuint64m4_t test_vmerge_vvm_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_vvm_u64m4(vs2, vs1, v0, vl); } -vuint64m4_t test_vmerge_vxm_u64m4(vuint64m4_t op1, uint64_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_vxm_u64m4(op1, op2, mask, vl); +vuint64m4_t test_vmerge_vxm_u64m4(vuint64m4_t vs2, uint64_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_vxm_u64m4(vs2, rs1, v0, vl); } -vuint64m8_t test_vmerge_vvm_u64m8(vuint64m8_t op1, vuint64m8_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_vvm_u64m8(op1, op2, mask, vl); +vuint64m8_t test_vmerge_vvm_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_vvm_u64m8(vs2, vs1, v0, vl); } -vuint64m8_t test_vmerge_vxm_u64m8(vuint64m8_t op1, uint64_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_vxm_u64m8(op1, op2, mask, vl); +vuint64m8_t test_vmerge_vxm_u64m8(vuint64m8_t vs2, uint64_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_vxm_u64m8(vs2, rs1, v0, vl); } -vfloat16mf4_t test_vmerge_vvm_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_vvm_f16mf4(op1, op2, mask, vl); +vfloat16mf4_t test_vmerge_vvm_f16mf4(vfloat16mf4_t vs2, vfloat16mf4_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_vvm_f16mf4(vs2, vs1, v0, vl); } -vfloat16mf2_t test_vmerge_vvm_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_vvm_f16mf2(op1, op2, mask, vl); +vfloat16mf2_t test_vmerge_vvm_f16mf2(vfloat16mf2_t vs2, vfloat16mf2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_vvm_f16mf2(vs2, vs1, v0, vl); } -vfloat16m1_t test_vmerge_vvm_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_vvm_f16m1(op1, op2, mask, vl); +vfloat16m1_t test_vmerge_vvm_f16m1(vfloat16m1_t vs2, vfloat16m1_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_vvm_f16m1(vs2, vs1, v0, vl); } -vfloat16m2_t test_vmerge_vvm_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_vvm_f16m2(op1, op2, mask, vl); +vfloat16m2_t test_vmerge_vvm_f16m2(vfloat16m2_t vs2, vfloat16m2_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_vvm_f16m2(vs2, vs1, v0, vl); } -vfloat16m4_t test_vmerge_vvm_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge_vvm_f16m4(op1, op2, mask, vl); +vfloat16m4_t test_vmerge_vvm_f16m4(vfloat16m4_t vs2, vfloat16m4_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge_vvm_f16m4(vs2, vs1, v0, vl); } -vfloat16m8_t test_vmerge_vvm_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, vbool2_t mask, size_t vl) { - return __riscv_vmerge_vvm_f16m8(op1, op2, mask, vl); +vfloat16m8_t test_vmerge_vvm_f16m8(vfloat16m8_t vs2, vfloat16m8_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vmerge_vvm_f16m8(vs2, vs1, v0, vl); } -vfloat32mf2_t test_vmerge_vvm_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_vvm_f32mf2(op1, op2, mask, vl); +vfloat32mf2_t test_vmerge_vvm_f32mf2(vfloat32mf2_t vs2, vfloat32mf2_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_vvm_f32mf2(vs2, vs1, v0, vl); } -vfloat32m1_t test_vmerge_vvm_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_vvm_f32m1(op1, op2, mask, vl); +vfloat32m1_t test_vmerge_vvm_f32m1(vfloat32m1_t vs2, vfloat32m1_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_vvm_f32m1(vs2, vs1, v0, vl); } -vfloat32m2_t test_vmerge_vvm_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_vvm_f32m2(op1, op2, mask, vl); +vfloat32m2_t test_vmerge_vvm_f32m2(vfloat32m2_t vs2, vfloat32m2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_vvm_f32m2(vs2, vs1, v0, vl); } -vfloat32m4_t test_vmerge_vvm_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_vvm_f32m4(op1, op2, mask, vl); +vfloat32m4_t test_vmerge_vvm_f32m4(vfloat32m4_t vs2, vfloat32m4_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_vvm_f32m4(vs2, vs1, v0, vl); } -vfloat32m8_t test_vmerge_vvm_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge_vvm_f32m8(op1, op2, mask, vl); +vfloat32m8_t test_vmerge_vvm_f32m8(vfloat32m8_t vs2, vfloat32m8_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge_vvm_f32m8(vs2, vs1, v0, vl); } -vfloat64m1_t test_vmerge_vvm_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_vvm_f64m1(op1, op2, mask, vl); +vfloat64m1_t test_vmerge_vvm_f64m1(vfloat64m1_t vs2, vfloat64m1_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_vvm_f64m1(vs2, vs1, v0, vl); } -vfloat64m2_t test_vmerge_vvm_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_vvm_f64m2(op1, op2, mask, vl); +vfloat64m2_t test_vmerge_vvm_f64m2(vfloat64m2_t vs2, vfloat64m2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_vvm_f64m2(vs2, vs1, v0, vl); } -vfloat64m4_t test_vmerge_vvm_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_vvm_f64m4(op1, op2, mask, vl); +vfloat64m4_t test_vmerge_vvm_f64m4(vfloat64m4_t vs2, vfloat64m4_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_vvm_f64m4(vs2, vs1, v0, vl); } -vfloat64m8_t test_vmerge_vvm_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_vvm_f64m8(op1, op2, mask, vl); +vfloat64m8_t test_vmerge_vvm_f64m8(vfloat64m8_t vs2, vfloat64m8_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_vvm_f64m8(vs2, vs1, v0, vl); } /* { dg-final { scan-assembler-times {vmerge\.[ivxfswum.]+\s+} 103 } } */ diff --git a/auto-generated/gnu-api-tests/vmfeq.c b/auto-generated/gnu-api-tests/vmfeq.c index dbafe3129..1c97ac78a 100644 --- a/auto-generated/gnu-api-tests/vmfeq.c +++ b/auto-generated/gnu-api-tests/vmfeq.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmfeq_vv_f16mf4_b64(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vmfeq_vv_f16mf4_b64(op1, op2, vl); +vbool64_t test_vmfeq_vv_f16mf4_b64(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vmfeq_vv_f16mf4_b64(vs2, vs1, vl); } -vbool64_t test_vmfeq_vf_f16mf4_b64(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfeq_vf_f16mf4_b64(op1, op2, vl); +vbool64_t test_vmfeq_vf_f16mf4_b64(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfeq_vf_f16mf4_b64(vs2, rs1, vl); } -vbool32_t test_vmfeq_vv_f16mf2_b32(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vmfeq_vv_f16mf2_b32(op1, op2, vl); +vbool32_t test_vmfeq_vv_f16mf2_b32(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vmfeq_vv_f16mf2_b32(vs2, vs1, vl); } -vbool32_t test_vmfeq_vf_f16mf2_b32(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfeq_vf_f16mf2_b32(op1, op2, vl); +vbool32_t test_vmfeq_vf_f16mf2_b32(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfeq_vf_f16mf2_b32(vs2, rs1, vl); } -vbool16_t test_vmfeq_vv_f16m1_b16(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vmfeq_vv_f16m1_b16(op1, op2, vl); +vbool16_t test_vmfeq_vv_f16m1_b16(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vmfeq_vv_f16m1_b16(vs2, vs1, vl); } -vbool16_t test_vmfeq_vf_f16m1_b16(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vmfeq_vf_f16m1_b16(op1, op2, vl); +vbool16_t test_vmfeq_vf_f16m1_b16(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfeq_vf_f16m1_b16(vs2, rs1, vl); } -vbool8_t test_vmfeq_vv_f16m2_b8(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vmfeq_vv_f16m2_b8(op1, op2, vl); +vbool8_t test_vmfeq_vv_f16m2_b8(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vmfeq_vv_f16m2_b8(vs2, vs1, vl); } -vbool8_t test_vmfeq_vf_f16m2_b8(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfeq_vf_f16m2_b8(op1, op2, vl); +vbool8_t test_vmfeq_vf_f16m2_b8(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfeq_vf_f16m2_b8(vs2, rs1, vl); } -vbool4_t test_vmfeq_vv_f16m4_b4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vmfeq_vv_f16m4_b4(op1, op2, vl); +vbool4_t test_vmfeq_vv_f16m4_b4(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vmfeq_vv_f16m4_b4(vs2, vs1, vl); } -vbool4_t test_vmfeq_vf_f16m4_b4(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfeq_vf_f16m4_b4(op1, op2, vl); +vbool4_t test_vmfeq_vf_f16m4_b4(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfeq_vf_f16m4_b4(vs2, rs1, vl); } -vbool2_t test_vmfeq_vv_f16m8_b2(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vmfeq_vv_f16m8_b2(op1, op2, vl); +vbool2_t test_vmfeq_vv_f16m8_b2(vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vmfeq_vv_f16m8_b2(vs2, vs1, vl); } -vbool2_t test_vmfeq_vf_f16m8_b2(vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vmfeq_vf_f16m8_b2(op1, op2, vl); +vbool2_t test_vmfeq_vf_f16m8_b2(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfeq_vf_f16m8_b2(vs2, rs1, vl); } -vbool64_t test_vmfeq_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vmfeq_vv_f32mf2_b64(op1, op2, vl); +vbool64_t test_vmfeq_vv_f32mf2_b64(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vmfeq_vv_f32mf2_b64(vs2, vs1, vl); } -vbool64_t test_vmfeq_vf_f32mf2_b64(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfeq_vf_f32mf2_b64(op1, op2, vl); +vbool64_t test_vmfeq_vf_f32mf2_b64(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfeq_vf_f32mf2_b64(vs2, rs1, vl); } -vbool32_t test_vmfeq_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vmfeq_vv_f32m1_b32(op1, op2, vl); +vbool32_t test_vmfeq_vv_f32m1_b32(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vmfeq_vv_f32m1_b32(vs2, vs1, vl); } -vbool32_t test_vmfeq_vf_f32m1_b32(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vmfeq_vf_f32m1_b32(op1, op2, vl); +vbool32_t test_vmfeq_vf_f32m1_b32(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfeq_vf_f32m1_b32(vs2, rs1, vl); } -vbool16_t test_vmfeq_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vmfeq_vv_f32m2_b16(op1, op2, vl); +vbool16_t test_vmfeq_vv_f32m2_b16(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vmfeq_vv_f32m2_b16(vs2, vs1, vl); } -vbool16_t test_vmfeq_vf_f32m2_b16(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfeq_vf_f32m2_b16(op1, op2, vl); +vbool16_t test_vmfeq_vf_f32m2_b16(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfeq_vf_f32m2_b16(vs2, rs1, vl); } -vbool8_t test_vmfeq_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vmfeq_vv_f32m4_b8(op1, op2, vl); +vbool8_t test_vmfeq_vv_f32m4_b8(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vmfeq_vv_f32m4_b8(vs2, vs1, vl); } -vbool8_t test_vmfeq_vf_f32m4_b8(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vmfeq_vf_f32m4_b8(op1, op2, vl); +vbool8_t test_vmfeq_vf_f32m4_b8(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfeq_vf_f32m4_b8(vs2, rs1, vl); } -vbool4_t test_vmfeq_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vmfeq_vv_f32m8_b4(op1, op2, vl); +vbool4_t test_vmfeq_vv_f32m8_b4(vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vmfeq_vv_f32m8_b4(vs2, vs1, vl); } -vbool4_t test_vmfeq_vf_f32m8_b4(vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vmfeq_vf_f32m8_b4(op1, op2, vl); +vbool4_t test_vmfeq_vf_f32m8_b4(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfeq_vf_f32m8_b4(vs2, rs1, vl); } -vbool64_t test_vmfeq_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vmfeq_vv_f64m1_b64(op1, op2, vl); +vbool64_t test_vmfeq_vv_f64m1_b64(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vmfeq_vv_f64m1_b64(vs2, vs1, vl); } -vbool64_t test_vmfeq_vf_f64m1_b64(vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vmfeq_vf_f64m1_b64(op1, op2, vl); +vbool64_t test_vmfeq_vf_f64m1_b64(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfeq_vf_f64m1_b64(vs2, rs1, vl); } -vbool32_t test_vmfeq_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vmfeq_vv_f64m2_b32(op1, op2, vl); +vbool32_t test_vmfeq_vv_f64m2_b32(vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vmfeq_vv_f64m2_b32(vs2, vs1, vl); } -vbool32_t test_vmfeq_vf_f64m2_b32(vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vmfeq_vf_f64m2_b32(op1, op2, vl); +vbool32_t test_vmfeq_vf_f64m2_b32(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfeq_vf_f64m2_b32(vs2, rs1, vl); } -vbool16_t test_vmfeq_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vmfeq_vv_f64m4_b16(op1, op2, vl); +vbool16_t test_vmfeq_vv_f64m4_b16(vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vmfeq_vv_f64m4_b16(vs2, vs1, vl); } -vbool16_t test_vmfeq_vf_f64m4_b16(vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vmfeq_vf_f64m4_b16(op1, op2, vl); +vbool16_t test_vmfeq_vf_f64m4_b16(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfeq_vf_f64m4_b16(vs2, rs1, vl); } -vbool8_t test_vmfeq_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vmfeq_vv_f64m8_b8(op1, op2, vl); +vbool8_t test_vmfeq_vv_f64m8_b8(vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vmfeq_vv_f64m8_b8(vs2, vs1, vl); } -vbool8_t test_vmfeq_vf_f64m8_b8(vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vmfeq_vf_f64m8_b8(op1, op2, vl); +vbool8_t test_vmfeq_vf_f64m8_b8(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfeq_vf_f64m8_b8(vs2, rs1, vl); } -vbool64_t test_vmfeq_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vmfeq_vv_f16mf4_b64_m(mask, op1, op2, vl); +vbool64_t test_vmfeq_vv_f16mf4_b64_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vmfeq_vv_f16mf4_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmfeq_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfeq_vf_f16mf4_b64_m(mask, op1, op2, vl); +vbool64_t test_vmfeq_vf_f16mf4_b64_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfeq_vf_f16mf4_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmfeq_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vmfeq_vv_f16mf2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfeq_vv_f16mf2_b32_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vmfeq_vv_f16mf2_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmfeq_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfeq_vf_f16mf2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfeq_vf_f16mf2_b32_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfeq_vf_f16mf2_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmfeq_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vmfeq_vv_f16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfeq_vv_f16m1_b16_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vmfeq_vv_f16m1_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmfeq_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vmfeq_vf_f16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfeq_vf_f16m1_b16_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfeq_vf_f16m1_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmfeq_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vmfeq_vv_f16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfeq_vv_f16m2_b8_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vmfeq_vv_f16m2_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmfeq_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfeq_vf_f16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfeq_vf_f16m2_b8_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfeq_vf_f16m2_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmfeq_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vmfeq_vv_f16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmfeq_vv_f16m4_b4_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vmfeq_vv_f16m4_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmfeq_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfeq_vf_f16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmfeq_vf_f16m4_b4_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfeq_vf_f16m4_b4_m(vm, vs2, rs1, vl); } -vbool2_t test_vmfeq_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vmfeq_vv_f16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmfeq_vv_f16m8_b2_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vmfeq_vv_f16m8_b2_m(vm, vs2, vs1, vl); } -vbool2_t test_vmfeq_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vmfeq_vf_f16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmfeq_vf_f16m8_b2_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfeq_vf_f16m8_b2_m(vm, vs2, rs1, vl); } -vbool64_t test_vmfeq_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vmfeq_vv_f32mf2_b64_m(mask, op1, op2, vl); +vbool64_t test_vmfeq_vv_f32mf2_b64_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vmfeq_vv_f32mf2_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmfeq_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfeq_vf_f32mf2_b64_m(mask, op1, op2, vl); +vbool64_t test_vmfeq_vf_f32mf2_b64_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfeq_vf_f32mf2_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmfeq_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vmfeq_vv_f32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfeq_vv_f32m1_b32_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vmfeq_vv_f32m1_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmfeq_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vmfeq_vf_f32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfeq_vf_f32m1_b32_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfeq_vf_f32m1_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmfeq_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vmfeq_vv_f32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfeq_vv_f32m2_b16_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vmfeq_vv_f32m2_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmfeq_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfeq_vf_f32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfeq_vf_f32m2_b16_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfeq_vf_f32m2_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmfeq_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vmfeq_vv_f32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfeq_vv_f32m4_b8_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vmfeq_vv_f32m4_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmfeq_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vmfeq_vf_f32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfeq_vf_f32m4_b8_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfeq_vf_f32m4_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmfeq_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vmfeq_vv_f32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmfeq_vv_f32m8_b4_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vmfeq_vv_f32m8_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmfeq_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vmfeq_vf_f32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmfeq_vf_f32m8_b4_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfeq_vf_f32m8_b4_m(vm, vs2, rs1, vl); } -vbool64_t test_vmfeq_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vmfeq_vv_f64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmfeq_vv_f64m1_b64_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vmfeq_vv_f64m1_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmfeq_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vmfeq_vf_f64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmfeq_vf_f64m1_b64_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfeq_vf_f64m1_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmfeq_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vmfeq_vv_f64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfeq_vv_f64m2_b32_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vmfeq_vv_f64m2_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmfeq_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vmfeq_vf_f64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfeq_vf_f64m2_b32_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfeq_vf_f64m2_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmfeq_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vmfeq_vv_f64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfeq_vv_f64m4_b16_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vmfeq_vv_f64m4_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmfeq_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vmfeq_vf_f64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfeq_vf_f64m4_b16_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfeq_vf_f64m4_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmfeq_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vmfeq_vv_f64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfeq_vv_f64m8_b8_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vmfeq_vv_f64m8_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmfeq_vf_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vmfeq_vf_f64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfeq_vf_f64m8_b8_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfeq_vf_f64m8_b8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmfeq\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-api-tests/vmfge.c b/auto-generated/gnu-api-tests/vmfge.c index 55cec819f..6fff46b11 100644 --- a/auto-generated/gnu-api-tests/vmfge.c +++ b/auto-generated/gnu-api-tests/vmfge.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmfge_vv_f16mf4_b64(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vmfge_vv_f16mf4_b64(op1, op2, vl); +vbool64_t test_vmfge_vv_f16mf4_b64(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vmfge_vv_f16mf4_b64(vs2, vs1, vl); } -vbool64_t test_vmfge_vf_f16mf4_b64(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfge_vf_f16mf4_b64(op1, op2, vl); +vbool64_t test_vmfge_vf_f16mf4_b64(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfge_vf_f16mf4_b64(vs2, rs1, vl); } -vbool32_t test_vmfge_vv_f16mf2_b32(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vmfge_vv_f16mf2_b32(op1, op2, vl); +vbool32_t test_vmfge_vv_f16mf2_b32(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vmfge_vv_f16mf2_b32(vs2, vs1, vl); } -vbool32_t test_vmfge_vf_f16mf2_b32(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfge_vf_f16mf2_b32(op1, op2, vl); +vbool32_t test_vmfge_vf_f16mf2_b32(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfge_vf_f16mf2_b32(vs2, rs1, vl); } -vbool16_t test_vmfge_vv_f16m1_b16(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vmfge_vv_f16m1_b16(op1, op2, vl); +vbool16_t test_vmfge_vv_f16m1_b16(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vmfge_vv_f16m1_b16(vs2, vs1, vl); } -vbool16_t test_vmfge_vf_f16m1_b16(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vmfge_vf_f16m1_b16(op1, op2, vl); +vbool16_t test_vmfge_vf_f16m1_b16(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfge_vf_f16m1_b16(vs2, rs1, vl); } -vbool8_t test_vmfge_vv_f16m2_b8(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vmfge_vv_f16m2_b8(op1, op2, vl); +vbool8_t test_vmfge_vv_f16m2_b8(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vmfge_vv_f16m2_b8(vs2, vs1, vl); } -vbool8_t test_vmfge_vf_f16m2_b8(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfge_vf_f16m2_b8(op1, op2, vl); +vbool8_t test_vmfge_vf_f16m2_b8(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfge_vf_f16m2_b8(vs2, rs1, vl); } -vbool4_t test_vmfge_vv_f16m4_b4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vmfge_vv_f16m4_b4(op1, op2, vl); +vbool4_t test_vmfge_vv_f16m4_b4(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vmfge_vv_f16m4_b4(vs2, vs1, vl); } -vbool4_t test_vmfge_vf_f16m4_b4(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfge_vf_f16m4_b4(op1, op2, vl); +vbool4_t test_vmfge_vf_f16m4_b4(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfge_vf_f16m4_b4(vs2, rs1, vl); } -vbool2_t test_vmfge_vv_f16m8_b2(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vmfge_vv_f16m8_b2(op1, op2, vl); +vbool2_t test_vmfge_vv_f16m8_b2(vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vmfge_vv_f16m8_b2(vs2, vs1, vl); } -vbool2_t test_vmfge_vf_f16m8_b2(vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vmfge_vf_f16m8_b2(op1, op2, vl); +vbool2_t test_vmfge_vf_f16m8_b2(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfge_vf_f16m8_b2(vs2, rs1, vl); } -vbool64_t test_vmfge_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vmfge_vv_f32mf2_b64(op1, op2, vl); +vbool64_t test_vmfge_vv_f32mf2_b64(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vmfge_vv_f32mf2_b64(vs2, vs1, vl); } -vbool64_t test_vmfge_vf_f32mf2_b64(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfge_vf_f32mf2_b64(op1, op2, vl); +vbool64_t test_vmfge_vf_f32mf2_b64(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfge_vf_f32mf2_b64(vs2, rs1, vl); } -vbool32_t test_vmfge_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vmfge_vv_f32m1_b32(op1, op2, vl); +vbool32_t test_vmfge_vv_f32m1_b32(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vmfge_vv_f32m1_b32(vs2, vs1, vl); } -vbool32_t test_vmfge_vf_f32m1_b32(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vmfge_vf_f32m1_b32(op1, op2, vl); +vbool32_t test_vmfge_vf_f32m1_b32(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfge_vf_f32m1_b32(vs2, rs1, vl); } -vbool16_t test_vmfge_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vmfge_vv_f32m2_b16(op1, op2, vl); +vbool16_t test_vmfge_vv_f32m2_b16(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vmfge_vv_f32m2_b16(vs2, vs1, vl); } -vbool16_t test_vmfge_vf_f32m2_b16(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfge_vf_f32m2_b16(op1, op2, vl); +vbool16_t test_vmfge_vf_f32m2_b16(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfge_vf_f32m2_b16(vs2, rs1, vl); } -vbool8_t test_vmfge_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vmfge_vv_f32m4_b8(op1, op2, vl); +vbool8_t test_vmfge_vv_f32m4_b8(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vmfge_vv_f32m4_b8(vs2, vs1, vl); } -vbool8_t test_vmfge_vf_f32m4_b8(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vmfge_vf_f32m4_b8(op1, op2, vl); +vbool8_t test_vmfge_vf_f32m4_b8(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfge_vf_f32m4_b8(vs2, rs1, vl); } -vbool4_t test_vmfge_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vmfge_vv_f32m8_b4(op1, op2, vl); +vbool4_t test_vmfge_vv_f32m8_b4(vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vmfge_vv_f32m8_b4(vs2, vs1, vl); } -vbool4_t test_vmfge_vf_f32m8_b4(vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vmfge_vf_f32m8_b4(op1, op2, vl); +vbool4_t test_vmfge_vf_f32m8_b4(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfge_vf_f32m8_b4(vs2, rs1, vl); } -vbool64_t test_vmfge_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vmfge_vv_f64m1_b64(op1, op2, vl); +vbool64_t test_vmfge_vv_f64m1_b64(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vmfge_vv_f64m1_b64(vs2, vs1, vl); } -vbool64_t test_vmfge_vf_f64m1_b64(vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vmfge_vf_f64m1_b64(op1, op2, vl); +vbool64_t test_vmfge_vf_f64m1_b64(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfge_vf_f64m1_b64(vs2, rs1, vl); } -vbool32_t test_vmfge_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vmfge_vv_f64m2_b32(op1, op2, vl); +vbool32_t test_vmfge_vv_f64m2_b32(vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vmfge_vv_f64m2_b32(vs2, vs1, vl); } -vbool32_t test_vmfge_vf_f64m2_b32(vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vmfge_vf_f64m2_b32(op1, op2, vl); +vbool32_t test_vmfge_vf_f64m2_b32(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfge_vf_f64m2_b32(vs2, rs1, vl); } -vbool16_t test_vmfge_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vmfge_vv_f64m4_b16(op1, op2, vl); +vbool16_t test_vmfge_vv_f64m4_b16(vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vmfge_vv_f64m4_b16(vs2, vs1, vl); } -vbool16_t test_vmfge_vf_f64m4_b16(vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vmfge_vf_f64m4_b16(op1, op2, vl); +vbool16_t test_vmfge_vf_f64m4_b16(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfge_vf_f64m4_b16(vs2, rs1, vl); } -vbool8_t test_vmfge_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vmfge_vv_f64m8_b8(op1, op2, vl); +vbool8_t test_vmfge_vv_f64m8_b8(vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vmfge_vv_f64m8_b8(vs2, vs1, vl); } -vbool8_t test_vmfge_vf_f64m8_b8(vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vmfge_vf_f64m8_b8(op1, op2, vl); +vbool8_t test_vmfge_vf_f64m8_b8(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfge_vf_f64m8_b8(vs2, rs1, vl); } -vbool64_t test_vmfge_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vmfge_vv_f16mf4_b64_m(mask, op1, op2, vl); +vbool64_t test_vmfge_vv_f16mf4_b64_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vmfge_vv_f16mf4_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmfge_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfge_vf_f16mf4_b64_m(mask, op1, op2, vl); +vbool64_t test_vmfge_vf_f16mf4_b64_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfge_vf_f16mf4_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmfge_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vmfge_vv_f16mf2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfge_vv_f16mf2_b32_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vmfge_vv_f16mf2_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmfge_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfge_vf_f16mf2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfge_vf_f16mf2_b32_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfge_vf_f16mf2_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmfge_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vmfge_vv_f16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfge_vv_f16m1_b16_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vmfge_vv_f16m1_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmfge_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vmfge_vf_f16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfge_vf_f16m1_b16_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfge_vf_f16m1_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmfge_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vmfge_vv_f16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfge_vv_f16m2_b8_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vmfge_vv_f16m2_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmfge_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfge_vf_f16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfge_vf_f16m2_b8_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfge_vf_f16m2_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmfge_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vmfge_vv_f16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmfge_vv_f16m4_b4_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vmfge_vv_f16m4_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmfge_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfge_vf_f16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmfge_vf_f16m4_b4_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfge_vf_f16m4_b4_m(vm, vs2, rs1, vl); } -vbool2_t test_vmfge_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vmfge_vv_f16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmfge_vv_f16m8_b2_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vmfge_vv_f16m8_b2_m(vm, vs2, vs1, vl); } -vbool2_t test_vmfge_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vmfge_vf_f16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmfge_vf_f16m8_b2_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfge_vf_f16m8_b2_m(vm, vs2, rs1, vl); } -vbool64_t test_vmfge_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vmfge_vv_f32mf2_b64_m(mask, op1, op2, vl); +vbool64_t test_vmfge_vv_f32mf2_b64_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vmfge_vv_f32mf2_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmfge_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfge_vf_f32mf2_b64_m(mask, op1, op2, vl); +vbool64_t test_vmfge_vf_f32mf2_b64_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfge_vf_f32mf2_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmfge_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vmfge_vv_f32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfge_vv_f32m1_b32_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vmfge_vv_f32m1_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmfge_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vmfge_vf_f32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfge_vf_f32m1_b32_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfge_vf_f32m1_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmfge_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vmfge_vv_f32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfge_vv_f32m2_b16_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vmfge_vv_f32m2_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmfge_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfge_vf_f32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfge_vf_f32m2_b16_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfge_vf_f32m2_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmfge_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vmfge_vv_f32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfge_vv_f32m4_b8_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vmfge_vv_f32m4_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmfge_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vmfge_vf_f32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfge_vf_f32m4_b8_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfge_vf_f32m4_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmfge_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vmfge_vv_f32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmfge_vv_f32m8_b4_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vmfge_vv_f32m8_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmfge_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vmfge_vf_f32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmfge_vf_f32m8_b4_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfge_vf_f32m8_b4_m(vm, vs2, rs1, vl); } -vbool64_t test_vmfge_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vmfge_vv_f64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmfge_vv_f64m1_b64_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vmfge_vv_f64m1_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmfge_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vmfge_vf_f64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmfge_vf_f64m1_b64_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfge_vf_f64m1_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmfge_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vmfge_vv_f64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfge_vv_f64m2_b32_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vmfge_vv_f64m2_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmfge_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vmfge_vf_f64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfge_vf_f64m2_b32_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfge_vf_f64m2_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmfge_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vmfge_vv_f64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfge_vv_f64m4_b16_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vmfge_vv_f64m4_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmfge_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vmfge_vf_f64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfge_vf_f64m4_b16_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfge_vf_f64m4_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmfge_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vmfge_vv_f64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfge_vv_f64m8_b8_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vmfge_vv_f64m8_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmfge_vf_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vmfge_vf_f64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfge_vf_f64m8_b8_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfge_vf_f64m8_b8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmfge\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-api-tests/vmfgt.c b/auto-generated/gnu-api-tests/vmfgt.c index 0548be608..b6e2b750a 100644 --- a/auto-generated/gnu-api-tests/vmfgt.c +++ b/auto-generated/gnu-api-tests/vmfgt.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmfgt_vv_f16mf4_b64(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vmfgt_vv_f16mf4_b64(op1, op2, vl); +vbool64_t test_vmfgt_vv_f16mf4_b64(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vmfgt_vv_f16mf4_b64(vs2, vs1, vl); } -vbool64_t test_vmfgt_vf_f16mf4_b64(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfgt_vf_f16mf4_b64(op1, op2, vl); +vbool64_t test_vmfgt_vf_f16mf4_b64(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfgt_vf_f16mf4_b64(vs2, rs1, vl); } -vbool32_t test_vmfgt_vv_f16mf2_b32(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vmfgt_vv_f16mf2_b32(op1, op2, vl); +vbool32_t test_vmfgt_vv_f16mf2_b32(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vmfgt_vv_f16mf2_b32(vs2, vs1, vl); } -vbool32_t test_vmfgt_vf_f16mf2_b32(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfgt_vf_f16mf2_b32(op1, op2, vl); +vbool32_t test_vmfgt_vf_f16mf2_b32(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfgt_vf_f16mf2_b32(vs2, rs1, vl); } -vbool16_t test_vmfgt_vv_f16m1_b16(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vmfgt_vv_f16m1_b16(op1, op2, vl); +vbool16_t test_vmfgt_vv_f16m1_b16(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vmfgt_vv_f16m1_b16(vs2, vs1, vl); } -vbool16_t test_vmfgt_vf_f16m1_b16(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vmfgt_vf_f16m1_b16(op1, op2, vl); +vbool16_t test_vmfgt_vf_f16m1_b16(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfgt_vf_f16m1_b16(vs2, rs1, vl); } -vbool8_t test_vmfgt_vv_f16m2_b8(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vmfgt_vv_f16m2_b8(op1, op2, vl); +vbool8_t test_vmfgt_vv_f16m2_b8(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vmfgt_vv_f16m2_b8(vs2, vs1, vl); } -vbool8_t test_vmfgt_vf_f16m2_b8(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfgt_vf_f16m2_b8(op1, op2, vl); +vbool8_t test_vmfgt_vf_f16m2_b8(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfgt_vf_f16m2_b8(vs2, rs1, vl); } -vbool4_t test_vmfgt_vv_f16m4_b4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vmfgt_vv_f16m4_b4(op1, op2, vl); +vbool4_t test_vmfgt_vv_f16m4_b4(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vmfgt_vv_f16m4_b4(vs2, vs1, vl); } -vbool4_t test_vmfgt_vf_f16m4_b4(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfgt_vf_f16m4_b4(op1, op2, vl); +vbool4_t test_vmfgt_vf_f16m4_b4(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfgt_vf_f16m4_b4(vs2, rs1, vl); } -vbool2_t test_vmfgt_vv_f16m8_b2(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vmfgt_vv_f16m8_b2(op1, op2, vl); +vbool2_t test_vmfgt_vv_f16m8_b2(vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vmfgt_vv_f16m8_b2(vs2, vs1, vl); } -vbool2_t test_vmfgt_vf_f16m8_b2(vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vmfgt_vf_f16m8_b2(op1, op2, vl); +vbool2_t test_vmfgt_vf_f16m8_b2(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfgt_vf_f16m8_b2(vs2, rs1, vl); } -vbool64_t test_vmfgt_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vmfgt_vv_f32mf2_b64(op1, op2, vl); +vbool64_t test_vmfgt_vv_f32mf2_b64(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vmfgt_vv_f32mf2_b64(vs2, vs1, vl); } -vbool64_t test_vmfgt_vf_f32mf2_b64(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfgt_vf_f32mf2_b64(op1, op2, vl); +vbool64_t test_vmfgt_vf_f32mf2_b64(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfgt_vf_f32mf2_b64(vs2, rs1, vl); } -vbool32_t test_vmfgt_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vmfgt_vv_f32m1_b32(op1, op2, vl); +vbool32_t test_vmfgt_vv_f32m1_b32(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vmfgt_vv_f32m1_b32(vs2, vs1, vl); } -vbool32_t test_vmfgt_vf_f32m1_b32(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vmfgt_vf_f32m1_b32(op1, op2, vl); +vbool32_t test_vmfgt_vf_f32m1_b32(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfgt_vf_f32m1_b32(vs2, rs1, vl); } -vbool16_t test_vmfgt_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vmfgt_vv_f32m2_b16(op1, op2, vl); +vbool16_t test_vmfgt_vv_f32m2_b16(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vmfgt_vv_f32m2_b16(vs2, vs1, vl); } -vbool16_t test_vmfgt_vf_f32m2_b16(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfgt_vf_f32m2_b16(op1, op2, vl); +vbool16_t test_vmfgt_vf_f32m2_b16(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfgt_vf_f32m2_b16(vs2, rs1, vl); } -vbool8_t test_vmfgt_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vmfgt_vv_f32m4_b8(op1, op2, vl); +vbool8_t test_vmfgt_vv_f32m4_b8(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vmfgt_vv_f32m4_b8(vs2, vs1, vl); } -vbool8_t test_vmfgt_vf_f32m4_b8(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vmfgt_vf_f32m4_b8(op1, op2, vl); +vbool8_t test_vmfgt_vf_f32m4_b8(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfgt_vf_f32m4_b8(vs2, rs1, vl); } -vbool4_t test_vmfgt_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vmfgt_vv_f32m8_b4(op1, op2, vl); +vbool4_t test_vmfgt_vv_f32m8_b4(vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vmfgt_vv_f32m8_b4(vs2, vs1, vl); } -vbool4_t test_vmfgt_vf_f32m8_b4(vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vmfgt_vf_f32m8_b4(op1, op2, vl); +vbool4_t test_vmfgt_vf_f32m8_b4(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfgt_vf_f32m8_b4(vs2, rs1, vl); } -vbool64_t test_vmfgt_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vmfgt_vv_f64m1_b64(op1, op2, vl); +vbool64_t test_vmfgt_vv_f64m1_b64(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vmfgt_vv_f64m1_b64(vs2, vs1, vl); } -vbool64_t test_vmfgt_vf_f64m1_b64(vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vmfgt_vf_f64m1_b64(op1, op2, vl); +vbool64_t test_vmfgt_vf_f64m1_b64(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfgt_vf_f64m1_b64(vs2, rs1, vl); } -vbool32_t test_vmfgt_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vmfgt_vv_f64m2_b32(op1, op2, vl); +vbool32_t test_vmfgt_vv_f64m2_b32(vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vmfgt_vv_f64m2_b32(vs2, vs1, vl); } -vbool32_t test_vmfgt_vf_f64m2_b32(vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vmfgt_vf_f64m2_b32(op1, op2, vl); +vbool32_t test_vmfgt_vf_f64m2_b32(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfgt_vf_f64m2_b32(vs2, rs1, vl); } -vbool16_t test_vmfgt_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vmfgt_vv_f64m4_b16(op1, op2, vl); +vbool16_t test_vmfgt_vv_f64m4_b16(vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vmfgt_vv_f64m4_b16(vs2, vs1, vl); } -vbool16_t test_vmfgt_vf_f64m4_b16(vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vmfgt_vf_f64m4_b16(op1, op2, vl); +vbool16_t test_vmfgt_vf_f64m4_b16(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfgt_vf_f64m4_b16(vs2, rs1, vl); } -vbool8_t test_vmfgt_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vmfgt_vv_f64m8_b8(op1, op2, vl); +vbool8_t test_vmfgt_vv_f64m8_b8(vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vmfgt_vv_f64m8_b8(vs2, vs1, vl); } -vbool8_t test_vmfgt_vf_f64m8_b8(vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vmfgt_vf_f64m8_b8(op1, op2, vl); +vbool8_t test_vmfgt_vf_f64m8_b8(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfgt_vf_f64m8_b8(vs2, rs1, vl); } -vbool64_t test_vmfgt_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vmfgt_vv_f16mf4_b64_m(mask, op1, op2, vl); +vbool64_t test_vmfgt_vv_f16mf4_b64_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vmfgt_vv_f16mf4_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmfgt_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfgt_vf_f16mf4_b64_m(mask, op1, op2, vl); +vbool64_t test_vmfgt_vf_f16mf4_b64_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfgt_vf_f16mf4_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmfgt_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vmfgt_vv_f16mf2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfgt_vv_f16mf2_b32_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vmfgt_vv_f16mf2_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmfgt_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfgt_vf_f16mf2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfgt_vf_f16mf2_b32_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfgt_vf_f16mf2_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmfgt_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vmfgt_vv_f16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfgt_vv_f16m1_b16_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vmfgt_vv_f16m1_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmfgt_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vmfgt_vf_f16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfgt_vf_f16m1_b16_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfgt_vf_f16m1_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmfgt_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vmfgt_vv_f16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfgt_vv_f16m2_b8_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vmfgt_vv_f16m2_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmfgt_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfgt_vf_f16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfgt_vf_f16m2_b8_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfgt_vf_f16m2_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmfgt_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vmfgt_vv_f16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmfgt_vv_f16m4_b4_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vmfgt_vv_f16m4_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmfgt_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfgt_vf_f16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmfgt_vf_f16m4_b4_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfgt_vf_f16m4_b4_m(vm, vs2, rs1, vl); } -vbool2_t test_vmfgt_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vmfgt_vv_f16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmfgt_vv_f16m8_b2_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vmfgt_vv_f16m8_b2_m(vm, vs2, vs1, vl); } -vbool2_t test_vmfgt_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vmfgt_vf_f16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmfgt_vf_f16m8_b2_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfgt_vf_f16m8_b2_m(vm, vs2, rs1, vl); } -vbool64_t test_vmfgt_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vmfgt_vv_f32mf2_b64_m(mask, op1, op2, vl); +vbool64_t test_vmfgt_vv_f32mf2_b64_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vmfgt_vv_f32mf2_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmfgt_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfgt_vf_f32mf2_b64_m(mask, op1, op2, vl); +vbool64_t test_vmfgt_vf_f32mf2_b64_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfgt_vf_f32mf2_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmfgt_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vmfgt_vv_f32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfgt_vv_f32m1_b32_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vmfgt_vv_f32m1_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmfgt_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vmfgt_vf_f32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfgt_vf_f32m1_b32_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfgt_vf_f32m1_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmfgt_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vmfgt_vv_f32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfgt_vv_f32m2_b16_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vmfgt_vv_f32m2_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmfgt_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfgt_vf_f32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfgt_vf_f32m2_b16_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfgt_vf_f32m2_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmfgt_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vmfgt_vv_f32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfgt_vv_f32m4_b8_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vmfgt_vv_f32m4_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmfgt_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vmfgt_vf_f32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfgt_vf_f32m4_b8_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfgt_vf_f32m4_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmfgt_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vmfgt_vv_f32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmfgt_vv_f32m8_b4_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vmfgt_vv_f32m8_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmfgt_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vmfgt_vf_f32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmfgt_vf_f32m8_b4_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfgt_vf_f32m8_b4_m(vm, vs2, rs1, vl); } -vbool64_t test_vmfgt_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vmfgt_vv_f64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmfgt_vv_f64m1_b64_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vmfgt_vv_f64m1_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmfgt_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vmfgt_vf_f64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmfgt_vf_f64m1_b64_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfgt_vf_f64m1_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmfgt_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vmfgt_vv_f64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfgt_vv_f64m2_b32_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vmfgt_vv_f64m2_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmfgt_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vmfgt_vf_f64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfgt_vf_f64m2_b32_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfgt_vf_f64m2_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmfgt_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vmfgt_vv_f64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfgt_vv_f64m4_b16_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vmfgt_vv_f64m4_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmfgt_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vmfgt_vf_f64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfgt_vf_f64m4_b16_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfgt_vf_f64m4_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmfgt_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vmfgt_vv_f64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfgt_vv_f64m8_b8_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vmfgt_vv_f64m8_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmfgt_vf_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vmfgt_vf_f64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfgt_vf_f64m8_b8_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfgt_vf_f64m8_b8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmfgt\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-api-tests/vmfle.c b/auto-generated/gnu-api-tests/vmfle.c index f66a94bee..7221af60b 100644 --- a/auto-generated/gnu-api-tests/vmfle.c +++ b/auto-generated/gnu-api-tests/vmfle.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmfle_vv_f16mf4_b64(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vmfle_vv_f16mf4_b64(op1, op2, vl); +vbool64_t test_vmfle_vv_f16mf4_b64(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vmfle_vv_f16mf4_b64(vs2, vs1, vl); } -vbool64_t test_vmfle_vf_f16mf4_b64(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfle_vf_f16mf4_b64(op1, op2, vl); +vbool64_t test_vmfle_vf_f16mf4_b64(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfle_vf_f16mf4_b64(vs2, rs1, vl); } -vbool32_t test_vmfle_vv_f16mf2_b32(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vmfle_vv_f16mf2_b32(op1, op2, vl); +vbool32_t test_vmfle_vv_f16mf2_b32(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vmfle_vv_f16mf2_b32(vs2, vs1, vl); } -vbool32_t test_vmfle_vf_f16mf2_b32(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfle_vf_f16mf2_b32(op1, op2, vl); +vbool32_t test_vmfle_vf_f16mf2_b32(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfle_vf_f16mf2_b32(vs2, rs1, vl); } -vbool16_t test_vmfle_vv_f16m1_b16(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vmfle_vv_f16m1_b16(op1, op2, vl); +vbool16_t test_vmfle_vv_f16m1_b16(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vmfle_vv_f16m1_b16(vs2, vs1, vl); } -vbool16_t test_vmfle_vf_f16m1_b16(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vmfle_vf_f16m1_b16(op1, op2, vl); +vbool16_t test_vmfle_vf_f16m1_b16(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfle_vf_f16m1_b16(vs2, rs1, vl); } -vbool8_t test_vmfle_vv_f16m2_b8(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vmfle_vv_f16m2_b8(op1, op2, vl); +vbool8_t test_vmfle_vv_f16m2_b8(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vmfle_vv_f16m2_b8(vs2, vs1, vl); } -vbool8_t test_vmfle_vf_f16m2_b8(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfle_vf_f16m2_b8(op1, op2, vl); +vbool8_t test_vmfle_vf_f16m2_b8(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfle_vf_f16m2_b8(vs2, rs1, vl); } -vbool4_t test_vmfle_vv_f16m4_b4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vmfle_vv_f16m4_b4(op1, op2, vl); +vbool4_t test_vmfle_vv_f16m4_b4(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vmfle_vv_f16m4_b4(vs2, vs1, vl); } -vbool4_t test_vmfle_vf_f16m4_b4(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfle_vf_f16m4_b4(op1, op2, vl); +vbool4_t test_vmfle_vf_f16m4_b4(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfle_vf_f16m4_b4(vs2, rs1, vl); } -vbool2_t test_vmfle_vv_f16m8_b2(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vmfle_vv_f16m8_b2(op1, op2, vl); +vbool2_t test_vmfle_vv_f16m8_b2(vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vmfle_vv_f16m8_b2(vs2, vs1, vl); } -vbool2_t test_vmfle_vf_f16m8_b2(vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vmfle_vf_f16m8_b2(op1, op2, vl); +vbool2_t test_vmfle_vf_f16m8_b2(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfle_vf_f16m8_b2(vs2, rs1, vl); } -vbool64_t test_vmfle_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vmfle_vv_f32mf2_b64(op1, op2, vl); +vbool64_t test_vmfle_vv_f32mf2_b64(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vmfle_vv_f32mf2_b64(vs2, vs1, vl); } -vbool64_t test_vmfle_vf_f32mf2_b64(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfle_vf_f32mf2_b64(op1, op2, vl); +vbool64_t test_vmfle_vf_f32mf2_b64(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfle_vf_f32mf2_b64(vs2, rs1, vl); } -vbool32_t test_vmfle_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vmfle_vv_f32m1_b32(op1, op2, vl); +vbool32_t test_vmfle_vv_f32m1_b32(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vmfle_vv_f32m1_b32(vs2, vs1, vl); } -vbool32_t test_vmfle_vf_f32m1_b32(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vmfle_vf_f32m1_b32(op1, op2, vl); +vbool32_t test_vmfle_vf_f32m1_b32(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfle_vf_f32m1_b32(vs2, rs1, vl); } -vbool16_t test_vmfle_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vmfle_vv_f32m2_b16(op1, op2, vl); +vbool16_t test_vmfle_vv_f32m2_b16(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vmfle_vv_f32m2_b16(vs2, vs1, vl); } -vbool16_t test_vmfle_vf_f32m2_b16(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfle_vf_f32m2_b16(op1, op2, vl); +vbool16_t test_vmfle_vf_f32m2_b16(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfle_vf_f32m2_b16(vs2, rs1, vl); } -vbool8_t test_vmfle_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vmfle_vv_f32m4_b8(op1, op2, vl); +vbool8_t test_vmfle_vv_f32m4_b8(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vmfle_vv_f32m4_b8(vs2, vs1, vl); } -vbool8_t test_vmfle_vf_f32m4_b8(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vmfle_vf_f32m4_b8(op1, op2, vl); +vbool8_t test_vmfle_vf_f32m4_b8(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfle_vf_f32m4_b8(vs2, rs1, vl); } -vbool4_t test_vmfle_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vmfle_vv_f32m8_b4(op1, op2, vl); +vbool4_t test_vmfle_vv_f32m8_b4(vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vmfle_vv_f32m8_b4(vs2, vs1, vl); } -vbool4_t test_vmfle_vf_f32m8_b4(vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vmfle_vf_f32m8_b4(op1, op2, vl); +vbool4_t test_vmfle_vf_f32m8_b4(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfle_vf_f32m8_b4(vs2, rs1, vl); } -vbool64_t test_vmfle_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vmfle_vv_f64m1_b64(op1, op2, vl); +vbool64_t test_vmfle_vv_f64m1_b64(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vmfle_vv_f64m1_b64(vs2, vs1, vl); } -vbool64_t test_vmfle_vf_f64m1_b64(vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vmfle_vf_f64m1_b64(op1, op2, vl); +vbool64_t test_vmfle_vf_f64m1_b64(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfle_vf_f64m1_b64(vs2, rs1, vl); } -vbool32_t test_vmfle_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vmfle_vv_f64m2_b32(op1, op2, vl); +vbool32_t test_vmfle_vv_f64m2_b32(vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vmfle_vv_f64m2_b32(vs2, vs1, vl); } -vbool32_t test_vmfle_vf_f64m2_b32(vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vmfle_vf_f64m2_b32(op1, op2, vl); +vbool32_t test_vmfle_vf_f64m2_b32(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfle_vf_f64m2_b32(vs2, rs1, vl); } -vbool16_t test_vmfle_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vmfle_vv_f64m4_b16(op1, op2, vl); +vbool16_t test_vmfle_vv_f64m4_b16(vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vmfle_vv_f64m4_b16(vs2, vs1, vl); } -vbool16_t test_vmfle_vf_f64m4_b16(vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vmfle_vf_f64m4_b16(op1, op2, vl); +vbool16_t test_vmfle_vf_f64m4_b16(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfle_vf_f64m4_b16(vs2, rs1, vl); } -vbool8_t test_vmfle_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vmfle_vv_f64m8_b8(op1, op2, vl); +vbool8_t test_vmfle_vv_f64m8_b8(vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vmfle_vv_f64m8_b8(vs2, vs1, vl); } -vbool8_t test_vmfle_vf_f64m8_b8(vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vmfle_vf_f64m8_b8(op1, op2, vl); +vbool8_t test_vmfle_vf_f64m8_b8(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfle_vf_f64m8_b8(vs2, rs1, vl); } -vbool64_t test_vmfle_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vmfle_vv_f16mf4_b64_m(mask, op1, op2, vl); +vbool64_t test_vmfle_vv_f16mf4_b64_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vmfle_vv_f16mf4_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmfle_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfle_vf_f16mf4_b64_m(mask, op1, op2, vl); +vbool64_t test_vmfle_vf_f16mf4_b64_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfle_vf_f16mf4_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmfle_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vmfle_vv_f16mf2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfle_vv_f16mf2_b32_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vmfle_vv_f16mf2_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmfle_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfle_vf_f16mf2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfle_vf_f16mf2_b32_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfle_vf_f16mf2_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmfle_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vmfle_vv_f16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfle_vv_f16m1_b16_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vmfle_vv_f16m1_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmfle_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vmfle_vf_f16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfle_vf_f16m1_b16_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfle_vf_f16m1_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmfle_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vmfle_vv_f16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfle_vv_f16m2_b8_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vmfle_vv_f16m2_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmfle_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfle_vf_f16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfle_vf_f16m2_b8_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfle_vf_f16m2_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmfle_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vmfle_vv_f16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmfle_vv_f16m4_b4_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vmfle_vv_f16m4_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmfle_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfle_vf_f16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmfle_vf_f16m4_b4_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfle_vf_f16m4_b4_m(vm, vs2, rs1, vl); } -vbool2_t test_vmfle_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vmfle_vv_f16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmfle_vv_f16m8_b2_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vmfle_vv_f16m8_b2_m(vm, vs2, vs1, vl); } -vbool2_t test_vmfle_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vmfle_vf_f16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmfle_vf_f16m8_b2_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfle_vf_f16m8_b2_m(vm, vs2, rs1, vl); } -vbool64_t test_vmfle_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vmfle_vv_f32mf2_b64_m(mask, op1, op2, vl); +vbool64_t test_vmfle_vv_f32mf2_b64_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vmfle_vv_f32mf2_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmfle_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfle_vf_f32mf2_b64_m(mask, op1, op2, vl); +vbool64_t test_vmfle_vf_f32mf2_b64_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfle_vf_f32mf2_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmfle_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vmfle_vv_f32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfle_vv_f32m1_b32_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vmfle_vv_f32m1_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmfle_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vmfle_vf_f32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfle_vf_f32m1_b32_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfle_vf_f32m1_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmfle_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vmfle_vv_f32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfle_vv_f32m2_b16_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vmfle_vv_f32m2_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmfle_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfle_vf_f32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfle_vf_f32m2_b16_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfle_vf_f32m2_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmfle_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vmfle_vv_f32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfle_vv_f32m4_b8_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vmfle_vv_f32m4_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmfle_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vmfle_vf_f32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfle_vf_f32m4_b8_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfle_vf_f32m4_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmfle_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vmfle_vv_f32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmfle_vv_f32m8_b4_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vmfle_vv_f32m8_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmfle_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vmfle_vf_f32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmfle_vf_f32m8_b4_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfle_vf_f32m8_b4_m(vm, vs2, rs1, vl); } -vbool64_t test_vmfle_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vmfle_vv_f64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmfle_vv_f64m1_b64_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vmfle_vv_f64m1_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmfle_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vmfle_vf_f64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmfle_vf_f64m1_b64_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfle_vf_f64m1_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmfle_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vmfle_vv_f64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfle_vv_f64m2_b32_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vmfle_vv_f64m2_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmfle_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vmfle_vf_f64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfle_vf_f64m2_b32_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfle_vf_f64m2_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmfle_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vmfle_vv_f64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfle_vv_f64m4_b16_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vmfle_vv_f64m4_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmfle_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vmfle_vf_f64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfle_vf_f64m4_b16_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfle_vf_f64m4_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmfle_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vmfle_vv_f64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfle_vv_f64m8_b8_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vmfle_vv_f64m8_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmfle_vf_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vmfle_vf_f64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfle_vf_f64m8_b8_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfle_vf_f64m8_b8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmfle\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-api-tests/vmflt.c b/auto-generated/gnu-api-tests/vmflt.c index d88515840..343ffe4f9 100644 --- a/auto-generated/gnu-api-tests/vmflt.c +++ b/auto-generated/gnu-api-tests/vmflt.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmflt_vv_f16mf4_b64(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vmflt_vv_f16mf4_b64(op1, op2, vl); +vbool64_t test_vmflt_vv_f16mf4_b64(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vmflt_vv_f16mf4_b64(vs2, vs1, vl); } -vbool64_t test_vmflt_vf_f16mf4_b64(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vmflt_vf_f16mf4_b64(op1, op2, vl); +vbool64_t test_vmflt_vf_f16mf4_b64(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmflt_vf_f16mf4_b64(vs2, rs1, vl); } -vbool32_t test_vmflt_vv_f16mf2_b32(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vmflt_vv_f16mf2_b32(op1, op2, vl); +vbool32_t test_vmflt_vv_f16mf2_b32(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vmflt_vv_f16mf2_b32(vs2, vs1, vl); } -vbool32_t test_vmflt_vf_f16mf2_b32(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vmflt_vf_f16mf2_b32(op1, op2, vl); +vbool32_t test_vmflt_vf_f16mf2_b32(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmflt_vf_f16mf2_b32(vs2, rs1, vl); } -vbool16_t test_vmflt_vv_f16m1_b16(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vmflt_vv_f16m1_b16(op1, op2, vl); +vbool16_t test_vmflt_vv_f16m1_b16(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vmflt_vv_f16m1_b16(vs2, vs1, vl); } -vbool16_t test_vmflt_vf_f16m1_b16(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vmflt_vf_f16m1_b16(op1, op2, vl); +vbool16_t test_vmflt_vf_f16m1_b16(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmflt_vf_f16m1_b16(vs2, rs1, vl); } -vbool8_t test_vmflt_vv_f16m2_b8(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vmflt_vv_f16m2_b8(op1, op2, vl); +vbool8_t test_vmflt_vv_f16m2_b8(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vmflt_vv_f16m2_b8(vs2, vs1, vl); } -vbool8_t test_vmflt_vf_f16m2_b8(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vmflt_vf_f16m2_b8(op1, op2, vl); +vbool8_t test_vmflt_vf_f16m2_b8(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmflt_vf_f16m2_b8(vs2, rs1, vl); } -vbool4_t test_vmflt_vv_f16m4_b4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vmflt_vv_f16m4_b4(op1, op2, vl); +vbool4_t test_vmflt_vv_f16m4_b4(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vmflt_vv_f16m4_b4(vs2, vs1, vl); } -vbool4_t test_vmflt_vf_f16m4_b4(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vmflt_vf_f16m4_b4(op1, op2, vl); +vbool4_t test_vmflt_vf_f16m4_b4(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmflt_vf_f16m4_b4(vs2, rs1, vl); } -vbool2_t test_vmflt_vv_f16m8_b2(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vmflt_vv_f16m8_b2(op1, op2, vl); +vbool2_t test_vmflt_vv_f16m8_b2(vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vmflt_vv_f16m8_b2(vs2, vs1, vl); } -vbool2_t test_vmflt_vf_f16m8_b2(vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vmflt_vf_f16m8_b2(op1, op2, vl); +vbool2_t test_vmflt_vf_f16m8_b2(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmflt_vf_f16m8_b2(vs2, rs1, vl); } -vbool64_t test_vmflt_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vmflt_vv_f32mf2_b64(op1, op2, vl); +vbool64_t test_vmflt_vv_f32mf2_b64(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vmflt_vv_f32mf2_b64(vs2, vs1, vl); } -vbool64_t test_vmflt_vf_f32mf2_b64(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vmflt_vf_f32mf2_b64(op1, op2, vl); +vbool64_t test_vmflt_vf_f32mf2_b64(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmflt_vf_f32mf2_b64(vs2, rs1, vl); } -vbool32_t test_vmflt_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vmflt_vv_f32m1_b32(op1, op2, vl); +vbool32_t test_vmflt_vv_f32m1_b32(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vmflt_vv_f32m1_b32(vs2, vs1, vl); } -vbool32_t test_vmflt_vf_f32m1_b32(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vmflt_vf_f32m1_b32(op1, op2, vl); +vbool32_t test_vmflt_vf_f32m1_b32(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmflt_vf_f32m1_b32(vs2, rs1, vl); } -vbool16_t test_vmflt_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vmflt_vv_f32m2_b16(op1, op2, vl); +vbool16_t test_vmflt_vv_f32m2_b16(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vmflt_vv_f32m2_b16(vs2, vs1, vl); } -vbool16_t test_vmflt_vf_f32m2_b16(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vmflt_vf_f32m2_b16(op1, op2, vl); +vbool16_t test_vmflt_vf_f32m2_b16(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmflt_vf_f32m2_b16(vs2, rs1, vl); } -vbool8_t test_vmflt_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vmflt_vv_f32m4_b8(op1, op2, vl); +vbool8_t test_vmflt_vv_f32m4_b8(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vmflt_vv_f32m4_b8(vs2, vs1, vl); } -vbool8_t test_vmflt_vf_f32m4_b8(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vmflt_vf_f32m4_b8(op1, op2, vl); +vbool8_t test_vmflt_vf_f32m4_b8(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmflt_vf_f32m4_b8(vs2, rs1, vl); } -vbool4_t test_vmflt_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vmflt_vv_f32m8_b4(op1, op2, vl); +vbool4_t test_vmflt_vv_f32m8_b4(vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vmflt_vv_f32m8_b4(vs2, vs1, vl); } -vbool4_t test_vmflt_vf_f32m8_b4(vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vmflt_vf_f32m8_b4(op1, op2, vl); +vbool4_t test_vmflt_vf_f32m8_b4(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmflt_vf_f32m8_b4(vs2, rs1, vl); } -vbool64_t test_vmflt_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vmflt_vv_f64m1_b64(op1, op2, vl); +vbool64_t test_vmflt_vv_f64m1_b64(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vmflt_vv_f64m1_b64(vs2, vs1, vl); } -vbool64_t test_vmflt_vf_f64m1_b64(vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vmflt_vf_f64m1_b64(op1, op2, vl); +vbool64_t test_vmflt_vf_f64m1_b64(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmflt_vf_f64m1_b64(vs2, rs1, vl); } -vbool32_t test_vmflt_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vmflt_vv_f64m2_b32(op1, op2, vl); +vbool32_t test_vmflt_vv_f64m2_b32(vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vmflt_vv_f64m2_b32(vs2, vs1, vl); } -vbool32_t test_vmflt_vf_f64m2_b32(vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vmflt_vf_f64m2_b32(op1, op2, vl); +vbool32_t test_vmflt_vf_f64m2_b32(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmflt_vf_f64m2_b32(vs2, rs1, vl); } -vbool16_t test_vmflt_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vmflt_vv_f64m4_b16(op1, op2, vl); +vbool16_t test_vmflt_vv_f64m4_b16(vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vmflt_vv_f64m4_b16(vs2, vs1, vl); } -vbool16_t test_vmflt_vf_f64m4_b16(vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vmflt_vf_f64m4_b16(op1, op2, vl); +vbool16_t test_vmflt_vf_f64m4_b16(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmflt_vf_f64m4_b16(vs2, rs1, vl); } -vbool8_t test_vmflt_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vmflt_vv_f64m8_b8(op1, op2, vl); +vbool8_t test_vmflt_vv_f64m8_b8(vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vmflt_vv_f64m8_b8(vs2, vs1, vl); } -vbool8_t test_vmflt_vf_f64m8_b8(vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vmflt_vf_f64m8_b8(op1, op2, vl); +vbool8_t test_vmflt_vf_f64m8_b8(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmflt_vf_f64m8_b8(vs2, rs1, vl); } -vbool64_t test_vmflt_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vmflt_vv_f16mf4_b64_m(mask, op1, op2, vl); +vbool64_t test_vmflt_vv_f16mf4_b64_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vmflt_vv_f16mf4_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmflt_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vmflt_vf_f16mf4_b64_m(mask, op1, op2, vl); +vbool64_t test_vmflt_vf_f16mf4_b64_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmflt_vf_f16mf4_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmflt_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vmflt_vv_f16mf2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmflt_vv_f16mf2_b32_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vmflt_vv_f16mf2_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmflt_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vmflt_vf_f16mf2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmflt_vf_f16mf2_b32_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmflt_vf_f16mf2_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmflt_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vmflt_vv_f16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmflt_vv_f16m1_b16_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vmflt_vv_f16m1_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmflt_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vmflt_vf_f16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmflt_vf_f16m1_b16_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmflt_vf_f16m1_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmflt_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vmflt_vv_f16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmflt_vv_f16m2_b8_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vmflt_vv_f16m2_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmflt_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vmflt_vf_f16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmflt_vf_f16m2_b8_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmflt_vf_f16m2_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmflt_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vmflt_vv_f16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmflt_vv_f16m4_b4_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vmflt_vv_f16m4_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmflt_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vmflt_vf_f16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmflt_vf_f16m4_b4_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmflt_vf_f16m4_b4_m(vm, vs2, rs1, vl); } -vbool2_t test_vmflt_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vmflt_vv_f16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmflt_vv_f16m8_b2_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vmflt_vv_f16m8_b2_m(vm, vs2, vs1, vl); } -vbool2_t test_vmflt_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vmflt_vf_f16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmflt_vf_f16m8_b2_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmflt_vf_f16m8_b2_m(vm, vs2, rs1, vl); } -vbool64_t test_vmflt_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vmflt_vv_f32mf2_b64_m(mask, op1, op2, vl); +vbool64_t test_vmflt_vv_f32mf2_b64_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vmflt_vv_f32mf2_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmflt_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vmflt_vf_f32mf2_b64_m(mask, op1, op2, vl); +vbool64_t test_vmflt_vf_f32mf2_b64_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmflt_vf_f32mf2_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmflt_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vmflt_vv_f32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmflt_vv_f32m1_b32_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vmflt_vv_f32m1_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmflt_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vmflt_vf_f32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmflt_vf_f32m1_b32_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmflt_vf_f32m1_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmflt_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vmflt_vv_f32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmflt_vv_f32m2_b16_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vmflt_vv_f32m2_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmflt_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vmflt_vf_f32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmflt_vf_f32m2_b16_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmflt_vf_f32m2_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmflt_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vmflt_vv_f32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmflt_vv_f32m4_b8_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vmflt_vv_f32m4_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmflt_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vmflt_vf_f32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmflt_vf_f32m4_b8_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmflt_vf_f32m4_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmflt_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vmflt_vv_f32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmflt_vv_f32m8_b4_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vmflt_vv_f32m8_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmflt_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vmflt_vf_f32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmflt_vf_f32m8_b4_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmflt_vf_f32m8_b4_m(vm, vs2, rs1, vl); } -vbool64_t test_vmflt_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vmflt_vv_f64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmflt_vv_f64m1_b64_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vmflt_vv_f64m1_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmflt_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vmflt_vf_f64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmflt_vf_f64m1_b64_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmflt_vf_f64m1_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmflt_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vmflt_vv_f64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmflt_vv_f64m2_b32_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vmflt_vv_f64m2_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmflt_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vmflt_vf_f64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmflt_vf_f64m2_b32_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmflt_vf_f64m2_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmflt_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vmflt_vv_f64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmflt_vv_f64m4_b16_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vmflt_vv_f64m4_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmflt_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vmflt_vf_f64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmflt_vf_f64m4_b16_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmflt_vf_f64m4_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmflt_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vmflt_vv_f64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmflt_vv_f64m8_b8_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vmflt_vv_f64m8_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmflt_vf_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vmflt_vf_f64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmflt_vf_f64m8_b8_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmflt_vf_f64m8_b8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmflt\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-api-tests/vmfne.c b/auto-generated/gnu-api-tests/vmfne.c index 56143e4a0..5dd1937c0 100644 --- a/auto-generated/gnu-api-tests/vmfne.c +++ b/auto-generated/gnu-api-tests/vmfne.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmfne_vv_f16mf4_b64(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vmfne_vv_f16mf4_b64(op1, op2, vl); +vbool64_t test_vmfne_vv_f16mf4_b64(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vmfne_vv_f16mf4_b64(vs2, vs1, vl); } -vbool64_t test_vmfne_vf_f16mf4_b64(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfne_vf_f16mf4_b64(op1, op2, vl); +vbool64_t test_vmfne_vf_f16mf4_b64(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfne_vf_f16mf4_b64(vs2, rs1, vl); } -vbool32_t test_vmfne_vv_f16mf2_b32(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vmfne_vv_f16mf2_b32(op1, op2, vl); +vbool32_t test_vmfne_vv_f16mf2_b32(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vmfne_vv_f16mf2_b32(vs2, vs1, vl); } -vbool32_t test_vmfne_vf_f16mf2_b32(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfne_vf_f16mf2_b32(op1, op2, vl); +vbool32_t test_vmfne_vf_f16mf2_b32(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfne_vf_f16mf2_b32(vs2, rs1, vl); } -vbool16_t test_vmfne_vv_f16m1_b16(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vmfne_vv_f16m1_b16(op1, op2, vl); +vbool16_t test_vmfne_vv_f16m1_b16(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vmfne_vv_f16m1_b16(vs2, vs1, vl); } -vbool16_t test_vmfne_vf_f16m1_b16(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vmfne_vf_f16m1_b16(op1, op2, vl); +vbool16_t test_vmfne_vf_f16m1_b16(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfne_vf_f16m1_b16(vs2, rs1, vl); } -vbool8_t test_vmfne_vv_f16m2_b8(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vmfne_vv_f16m2_b8(op1, op2, vl); +vbool8_t test_vmfne_vv_f16m2_b8(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vmfne_vv_f16m2_b8(vs2, vs1, vl); } -vbool8_t test_vmfne_vf_f16m2_b8(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfne_vf_f16m2_b8(op1, op2, vl); +vbool8_t test_vmfne_vf_f16m2_b8(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfne_vf_f16m2_b8(vs2, rs1, vl); } -vbool4_t test_vmfne_vv_f16m4_b4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vmfne_vv_f16m4_b4(op1, op2, vl); +vbool4_t test_vmfne_vv_f16m4_b4(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vmfne_vv_f16m4_b4(vs2, vs1, vl); } -vbool4_t test_vmfne_vf_f16m4_b4(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfne_vf_f16m4_b4(op1, op2, vl); +vbool4_t test_vmfne_vf_f16m4_b4(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfne_vf_f16m4_b4(vs2, rs1, vl); } -vbool2_t test_vmfne_vv_f16m8_b2(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vmfne_vv_f16m8_b2(op1, op2, vl); +vbool2_t test_vmfne_vv_f16m8_b2(vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vmfne_vv_f16m8_b2(vs2, vs1, vl); } -vbool2_t test_vmfne_vf_f16m8_b2(vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vmfne_vf_f16m8_b2(op1, op2, vl); +vbool2_t test_vmfne_vf_f16m8_b2(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfne_vf_f16m8_b2(vs2, rs1, vl); } -vbool64_t test_vmfne_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vmfne_vv_f32mf2_b64(op1, op2, vl); +vbool64_t test_vmfne_vv_f32mf2_b64(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vmfne_vv_f32mf2_b64(vs2, vs1, vl); } -vbool64_t test_vmfne_vf_f32mf2_b64(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfne_vf_f32mf2_b64(op1, op2, vl); +vbool64_t test_vmfne_vf_f32mf2_b64(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfne_vf_f32mf2_b64(vs2, rs1, vl); } -vbool32_t test_vmfne_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vmfne_vv_f32m1_b32(op1, op2, vl); +vbool32_t test_vmfne_vv_f32m1_b32(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vmfne_vv_f32m1_b32(vs2, vs1, vl); } -vbool32_t test_vmfne_vf_f32m1_b32(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vmfne_vf_f32m1_b32(op1, op2, vl); +vbool32_t test_vmfne_vf_f32m1_b32(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfne_vf_f32m1_b32(vs2, rs1, vl); } -vbool16_t test_vmfne_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vmfne_vv_f32m2_b16(op1, op2, vl); +vbool16_t test_vmfne_vv_f32m2_b16(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vmfne_vv_f32m2_b16(vs2, vs1, vl); } -vbool16_t test_vmfne_vf_f32m2_b16(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfne_vf_f32m2_b16(op1, op2, vl); +vbool16_t test_vmfne_vf_f32m2_b16(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfne_vf_f32m2_b16(vs2, rs1, vl); } -vbool8_t test_vmfne_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vmfne_vv_f32m4_b8(op1, op2, vl); +vbool8_t test_vmfne_vv_f32m4_b8(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vmfne_vv_f32m4_b8(vs2, vs1, vl); } -vbool8_t test_vmfne_vf_f32m4_b8(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vmfne_vf_f32m4_b8(op1, op2, vl); +vbool8_t test_vmfne_vf_f32m4_b8(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfne_vf_f32m4_b8(vs2, rs1, vl); } -vbool4_t test_vmfne_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vmfne_vv_f32m8_b4(op1, op2, vl); +vbool4_t test_vmfne_vv_f32m8_b4(vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vmfne_vv_f32m8_b4(vs2, vs1, vl); } -vbool4_t test_vmfne_vf_f32m8_b4(vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vmfne_vf_f32m8_b4(op1, op2, vl); +vbool4_t test_vmfne_vf_f32m8_b4(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfne_vf_f32m8_b4(vs2, rs1, vl); } -vbool64_t test_vmfne_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vmfne_vv_f64m1_b64(op1, op2, vl); +vbool64_t test_vmfne_vv_f64m1_b64(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vmfne_vv_f64m1_b64(vs2, vs1, vl); } -vbool64_t test_vmfne_vf_f64m1_b64(vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vmfne_vf_f64m1_b64(op1, op2, vl); +vbool64_t test_vmfne_vf_f64m1_b64(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfne_vf_f64m1_b64(vs2, rs1, vl); } -vbool32_t test_vmfne_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vmfne_vv_f64m2_b32(op1, op2, vl); +vbool32_t test_vmfne_vv_f64m2_b32(vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vmfne_vv_f64m2_b32(vs2, vs1, vl); } -vbool32_t test_vmfne_vf_f64m2_b32(vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vmfne_vf_f64m2_b32(op1, op2, vl); +vbool32_t test_vmfne_vf_f64m2_b32(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfne_vf_f64m2_b32(vs2, rs1, vl); } -vbool16_t test_vmfne_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vmfne_vv_f64m4_b16(op1, op2, vl); +vbool16_t test_vmfne_vv_f64m4_b16(vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vmfne_vv_f64m4_b16(vs2, vs1, vl); } -vbool16_t test_vmfne_vf_f64m4_b16(vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vmfne_vf_f64m4_b16(op1, op2, vl); +vbool16_t test_vmfne_vf_f64m4_b16(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfne_vf_f64m4_b16(vs2, rs1, vl); } -vbool8_t test_vmfne_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vmfne_vv_f64m8_b8(op1, op2, vl); +vbool8_t test_vmfne_vv_f64m8_b8(vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vmfne_vv_f64m8_b8(vs2, vs1, vl); } -vbool8_t test_vmfne_vf_f64m8_b8(vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vmfne_vf_f64m8_b8(op1, op2, vl); +vbool8_t test_vmfne_vf_f64m8_b8(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfne_vf_f64m8_b8(vs2, rs1, vl); } -vbool64_t test_vmfne_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vmfne_vv_f16mf4_b64_m(mask, op1, op2, vl); +vbool64_t test_vmfne_vv_f16mf4_b64_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vmfne_vv_f16mf4_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmfne_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfne_vf_f16mf4_b64_m(mask, op1, op2, vl); +vbool64_t test_vmfne_vf_f16mf4_b64_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfne_vf_f16mf4_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmfne_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vmfne_vv_f16mf2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfne_vv_f16mf2_b32_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vmfne_vv_f16mf2_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmfne_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfne_vf_f16mf2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfne_vf_f16mf2_b32_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfne_vf_f16mf2_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmfne_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vmfne_vv_f16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfne_vv_f16m1_b16_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vmfne_vv_f16m1_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmfne_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vmfne_vf_f16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfne_vf_f16m1_b16_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfne_vf_f16m1_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmfne_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vmfne_vv_f16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfne_vv_f16m2_b8_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vmfne_vv_f16m2_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmfne_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfne_vf_f16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfne_vf_f16m2_b8_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfne_vf_f16m2_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmfne_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vmfne_vv_f16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmfne_vv_f16m4_b4_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vmfne_vv_f16m4_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmfne_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfne_vf_f16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmfne_vf_f16m4_b4_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfne_vf_f16m4_b4_m(vm, vs2, rs1, vl); } -vbool2_t test_vmfne_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vmfne_vv_f16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmfne_vv_f16m8_b2_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vmfne_vv_f16m8_b2_m(vm, vs2, vs1, vl); } -vbool2_t test_vmfne_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vmfne_vf_f16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmfne_vf_f16m8_b2_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfne_vf_f16m8_b2_m(vm, vs2, rs1, vl); } -vbool64_t test_vmfne_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vmfne_vv_f32mf2_b64_m(mask, op1, op2, vl); +vbool64_t test_vmfne_vv_f32mf2_b64_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vmfne_vv_f32mf2_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmfne_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfne_vf_f32mf2_b64_m(mask, op1, op2, vl); +vbool64_t test_vmfne_vf_f32mf2_b64_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfne_vf_f32mf2_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmfne_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vmfne_vv_f32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfne_vv_f32m1_b32_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vmfne_vv_f32m1_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmfne_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vmfne_vf_f32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfne_vf_f32m1_b32_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfne_vf_f32m1_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmfne_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vmfne_vv_f32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfne_vv_f32m2_b16_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vmfne_vv_f32m2_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmfne_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfne_vf_f32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfne_vf_f32m2_b16_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfne_vf_f32m2_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmfne_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vmfne_vv_f32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfne_vv_f32m4_b8_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vmfne_vv_f32m4_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmfne_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vmfne_vf_f32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfne_vf_f32m4_b8_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfne_vf_f32m4_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmfne_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vmfne_vv_f32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmfne_vv_f32m8_b4_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vmfne_vv_f32m8_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmfne_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vmfne_vf_f32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmfne_vf_f32m8_b4_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfne_vf_f32m8_b4_m(vm, vs2, rs1, vl); } -vbool64_t test_vmfne_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vmfne_vv_f64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmfne_vv_f64m1_b64_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vmfne_vv_f64m1_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmfne_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vmfne_vf_f64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmfne_vf_f64m1_b64_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfne_vf_f64m1_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmfne_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vmfne_vv_f64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfne_vv_f64m2_b32_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vmfne_vv_f64m2_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmfne_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vmfne_vf_f64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfne_vf_f64m2_b32_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfne_vf_f64m2_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmfne_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vmfne_vv_f64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfne_vv_f64m4_b16_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vmfne_vv_f64m4_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmfne_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vmfne_vf_f64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfne_vf_f64m4_b16_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfne_vf_f64m4_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmfne_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vmfne_vv_f64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfne_vv_f64m8_b8_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vmfne_vv_f64m8_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmfne_vf_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vmfne_vf_f64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfne_vf_f64m8_b8_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfne_vf_f64m8_b8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmfne\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-api-tests/vmin.c b/auto-generated/gnu-api-tests/vmin.c index bf436203a..ccc0a66e2 100644 --- a/auto-generated/gnu-api-tests/vmin.c +++ b/auto-generated/gnu-api-tests/vmin.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vmin_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmin_vv_i8mf8(op1, op2, vl); +vint8mf8_t test_vmin_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmin_vv_i8mf8(vs2, vs1, vl); } -vint8mf8_t test_vmin_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_vx_i8mf8(op1, op2, vl); +vint8mf8_t test_vmin_vx_i8mf8(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_vx_i8mf8(vs2, rs1, vl); } -vint8mf4_t test_vmin_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmin_vv_i8mf4(op1, op2, vl); +vint8mf4_t test_vmin_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmin_vv_i8mf4(vs2, vs1, vl); } -vint8mf4_t test_vmin_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_vx_i8mf4(op1, op2, vl); +vint8mf4_t test_vmin_vx_i8mf4(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_vx_i8mf4(vs2, rs1, vl); } -vint8mf2_t test_vmin_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmin_vv_i8mf2(op1, op2, vl); +vint8mf2_t test_vmin_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmin_vv_i8mf2(vs2, vs1, vl); } -vint8mf2_t test_vmin_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_vx_i8mf2(op1, op2, vl); +vint8mf2_t test_vmin_vx_i8mf2(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_vx_i8mf2(vs2, rs1, vl); } -vint8m1_t test_vmin_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmin_vv_i8m1(op1, op2, vl); +vint8m1_t test_vmin_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmin_vv_i8m1(vs2, vs1, vl); } -vint8m1_t test_vmin_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_vx_i8m1(op1, op2, vl); +vint8m1_t test_vmin_vx_i8m1(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_vx_i8m1(vs2, rs1, vl); } -vint8m2_t test_vmin_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmin_vv_i8m2(op1, op2, vl); +vint8m2_t test_vmin_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmin_vv_i8m2(vs2, vs1, vl); } -vint8m2_t test_vmin_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_vx_i8m2(op1, op2, vl); +vint8m2_t test_vmin_vx_i8m2(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_vx_i8m2(vs2, rs1, vl); } -vint8m4_t test_vmin_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmin_vv_i8m4(op1, op2, vl); +vint8m4_t test_vmin_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmin_vv_i8m4(vs2, vs1, vl); } -vint8m4_t test_vmin_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_vx_i8m4(op1, op2, vl); +vint8m4_t test_vmin_vx_i8m4(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_vx_i8m4(vs2, rs1, vl); } -vint8m8_t test_vmin_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmin_vv_i8m8(op1, op2, vl); +vint8m8_t test_vmin_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmin_vv_i8m8(vs2, vs1, vl); } -vint8m8_t test_vmin_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_vx_i8m8(op1, op2, vl); +vint8m8_t test_vmin_vx_i8m8(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_vx_i8m8(vs2, rs1, vl); } -vint16mf4_t test_vmin_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmin_vv_i16mf4(op1, op2, vl); +vint16mf4_t test_vmin_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmin_vv_i16mf4(vs2, vs1, vl); } -vint16mf4_t test_vmin_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_vx_i16mf4(op1, op2, vl); +vint16mf4_t test_vmin_vx_i16mf4(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_vx_i16mf4(vs2, rs1, vl); } -vint16mf2_t test_vmin_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmin_vv_i16mf2(op1, op2, vl); +vint16mf2_t test_vmin_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmin_vv_i16mf2(vs2, vs1, vl); } -vint16mf2_t test_vmin_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_vx_i16mf2(op1, op2, vl); +vint16mf2_t test_vmin_vx_i16mf2(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_vx_i16mf2(vs2, rs1, vl); } -vint16m1_t test_vmin_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmin_vv_i16m1(op1, op2, vl); +vint16m1_t test_vmin_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmin_vv_i16m1(vs2, vs1, vl); } -vint16m1_t test_vmin_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_vx_i16m1(op1, op2, vl); +vint16m1_t test_vmin_vx_i16m1(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_vx_i16m1(vs2, rs1, vl); } -vint16m2_t test_vmin_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmin_vv_i16m2(op1, op2, vl); +vint16m2_t test_vmin_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmin_vv_i16m2(vs2, vs1, vl); } -vint16m2_t test_vmin_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_vx_i16m2(op1, op2, vl); +vint16m2_t test_vmin_vx_i16m2(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_vx_i16m2(vs2, rs1, vl); } -vint16m4_t test_vmin_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmin_vv_i16m4(op1, op2, vl); +vint16m4_t test_vmin_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmin_vv_i16m4(vs2, vs1, vl); } -vint16m4_t test_vmin_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_vx_i16m4(op1, op2, vl); +vint16m4_t test_vmin_vx_i16m4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_vx_i16m4(vs2, rs1, vl); } -vint16m8_t test_vmin_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmin_vv_i16m8(op1, op2, vl); +vint16m8_t test_vmin_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmin_vv_i16m8(vs2, vs1, vl); } -vint16m8_t test_vmin_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_vx_i16m8(op1, op2, vl); +vint16m8_t test_vmin_vx_i16m8(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_vx_i16m8(vs2, rs1, vl); } -vint32mf2_t test_vmin_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmin_vv_i32mf2(op1, op2, vl); +vint32mf2_t test_vmin_vv_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmin_vv_i32mf2(vs2, vs1, vl); } -vint32mf2_t test_vmin_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_vx_i32mf2(op1, op2, vl); +vint32mf2_t test_vmin_vx_i32mf2(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_vx_i32mf2(vs2, rs1, vl); } -vint32m1_t test_vmin_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmin_vv_i32m1(op1, op2, vl); +vint32m1_t test_vmin_vv_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmin_vv_i32m1(vs2, vs1, vl); } -vint32m1_t test_vmin_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_vx_i32m1(op1, op2, vl); +vint32m1_t test_vmin_vx_i32m1(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_vx_i32m1(vs2, rs1, vl); } -vint32m2_t test_vmin_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmin_vv_i32m2(op1, op2, vl); +vint32m2_t test_vmin_vv_i32m2(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmin_vv_i32m2(vs2, vs1, vl); } -vint32m2_t test_vmin_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_vx_i32m2(op1, op2, vl); +vint32m2_t test_vmin_vx_i32m2(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_vx_i32m2(vs2, rs1, vl); } -vint32m4_t test_vmin_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmin_vv_i32m4(op1, op2, vl); +vint32m4_t test_vmin_vv_i32m4(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmin_vv_i32m4(vs2, vs1, vl); } -vint32m4_t test_vmin_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_vx_i32m4(op1, op2, vl); +vint32m4_t test_vmin_vx_i32m4(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_vx_i32m4(vs2, rs1, vl); } -vint32m8_t test_vmin_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmin_vv_i32m8(op1, op2, vl); +vint32m8_t test_vmin_vv_i32m8(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmin_vv_i32m8(vs2, vs1, vl); } -vint32m8_t test_vmin_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_vx_i32m8(op1, op2, vl); +vint32m8_t test_vmin_vx_i32m8(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_vx_i32m8(vs2, rs1, vl); } -vint64m1_t test_vmin_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmin_vv_i64m1(op1, op2, vl); +vint64m1_t test_vmin_vv_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmin_vv_i64m1(vs2, vs1, vl); } -vint64m1_t test_vmin_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmin_vx_i64m1(op1, op2, vl); +vint64m1_t test_vmin_vx_i64m1(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin_vx_i64m1(vs2, rs1, vl); } -vint64m2_t test_vmin_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmin_vv_i64m2(op1, op2, vl); +vint64m2_t test_vmin_vv_i64m2(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmin_vv_i64m2(vs2, vs1, vl); } -vint64m2_t test_vmin_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmin_vx_i64m2(op1, op2, vl); +vint64m2_t test_vmin_vx_i64m2(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin_vx_i64m2(vs2, rs1, vl); } -vint64m4_t test_vmin_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmin_vv_i64m4(op1, op2, vl); +vint64m4_t test_vmin_vv_i64m4(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmin_vv_i64m4(vs2, vs1, vl); } -vint64m4_t test_vmin_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmin_vx_i64m4(op1, op2, vl); +vint64m4_t test_vmin_vx_i64m4(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin_vx_i64m4(vs2, rs1, vl); } -vint64m8_t test_vmin_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmin_vv_i64m8(op1, op2, vl); +vint64m8_t test_vmin_vv_i64m8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmin_vv_i64m8(vs2, vs1, vl); } -vint64m8_t test_vmin_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmin_vx_i64m8(op1, op2, vl); +vint64m8_t test_vmin_vx_i64m8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin_vx_i64m8(vs2, rs1, vl); } -vint8mf8_t test_vmin_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmin_vv_i8mf8_m(mask, op1, op2, vl); +vint8mf8_t test_vmin_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmin_vv_i8mf8_m(vm, vs2, vs1, vl); } -vint8mf8_t test_vmin_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_vx_i8mf8_m(mask, op1, op2, vl); +vint8mf8_t test_vmin_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_vx_i8mf8_m(vm, vs2, rs1, vl); } -vint8mf4_t test_vmin_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmin_vv_i8mf4_m(mask, op1, op2, vl); +vint8mf4_t test_vmin_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmin_vv_i8mf4_m(vm, vs2, vs1, vl); } -vint8mf4_t test_vmin_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_vx_i8mf4_m(mask, op1, op2, vl); +vint8mf4_t test_vmin_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_vx_i8mf4_m(vm, vs2, rs1, vl); } -vint8mf2_t test_vmin_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmin_vv_i8mf2_m(mask, op1, op2, vl); +vint8mf2_t test_vmin_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmin_vv_i8mf2_m(vm, vs2, vs1, vl); } -vint8mf2_t test_vmin_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_vx_i8mf2_m(mask, op1, op2, vl); +vint8mf2_t test_vmin_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_vx_i8mf2_m(vm, vs2, rs1, vl); } -vint8m1_t test_vmin_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmin_vv_i8m1_m(mask, op1, op2, vl); +vint8m1_t test_vmin_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmin_vv_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vmin_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_vx_i8m1_m(mask, op1, op2, vl); +vint8m1_t test_vmin_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_vx_i8m1_m(vm, vs2, rs1, vl); } -vint8m2_t test_vmin_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmin_vv_i8m2_m(mask, op1, op2, vl); +vint8m2_t test_vmin_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmin_vv_i8m2_m(vm, vs2, vs1, vl); } -vint8m2_t test_vmin_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_vx_i8m2_m(mask, op1, op2, vl); +vint8m2_t test_vmin_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_vx_i8m2_m(vm, vs2, rs1, vl); } -vint8m4_t test_vmin_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmin_vv_i8m4_m(mask, op1, op2, vl); +vint8m4_t test_vmin_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmin_vv_i8m4_m(vm, vs2, vs1, vl); } -vint8m4_t test_vmin_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_vx_i8m4_m(mask, op1, op2, vl); +vint8m4_t test_vmin_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_vx_i8m4_m(vm, vs2, rs1, vl); } -vint8m8_t test_vmin_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmin_vv_i8m8_m(mask, op1, op2, vl); +vint8m8_t test_vmin_vv_i8m8_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmin_vv_i8m8_m(vm, vs2, vs1, vl); } -vint8m8_t test_vmin_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_vx_i8m8_m(mask, op1, op2, vl); +vint8m8_t test_vmin_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_vx_i8m8_m(vm, vs2, rs1, vl); } -vint16mf4_t test_vmin_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmin_vv_i16mf4_m(mask, op1, op2, vl); +vint16mf4_t test_vmin_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmin_vv_i16mf4_m(vm, vs2, vs1, vl); } -vint16mf4_t test_vmin_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_vx_i16mf4_m(mask, op1, op2, vl); +vint16mf4_t test_vmin_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_vx_i16mf4_m(vm, vs2, rs1, vl); } -vint16mf2_t test_vmin_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmin_vv_i16mf2_m(mask, op1, op2, vl); +vint16mf2_t test_vmin_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmin_vv_i16mf2_m(vm, vs2, vs1, vl); } -vint16mf2_t test_vmin_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_vx_i16mf2_m(mask, op1, op2, vl); +vint16mf2_t test_vmin_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_vx_i16mf2_m(vm, vs2, rs1, vl); } -vint16m1_t test_vmin_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmin_vv_i16m1_m(mask, op1, op2, vl); +vint16m1_t test_vmin_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmin_vv_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vmin_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_vx_i16m1_m(mask, op1, op2, vl); +vint16m1_t test_vmin_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_vx_i16m1_m(vm, vs2, rs1, vl); } -vint16m2_t test_vmin_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmin_vv_i16m2_m(mask, op1, op2, vl); +vint16m2_t test_vmin_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmin_vv_i16m2_m(vm, vs2, vs1, vl); } -vint16m2_t test_vmin_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_vx_i16m2_m(mask, op1, op2, vl); +vint16m2_t test_vmin_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_vx_i16m2_m(vm, vs2, rs1, vl); } -vint16m4_t test_vmin_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmin_vv_i16m4_m(mask, op1, op2, vl); +vint16m4_t test_vmin_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmin_vv_i16m4_m(vm, vs2, vs1, vl); } -vint16m4_t test_vmin_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_vx_i16m4_m(mask, op1, op2, vl); +vint16m4_t test_vmin_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_vx_i16m4_m(vm, vs2, rs1, vl); } -vint16m8_t test_vmin_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmin_vv_i16m8_m(mask, op1, op2, vl); +vint16m8_t test_vmin_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmin_vv_i16m8_m(vm, vs2, vs1, vl); } -vint16m8_t test_vmin_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_vx_i16m8_m(mask, op1, op2, vl); +vint16m8_t test_vmin_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_vx_i16m8_m(vm, vs2, rs1, vl); } -vint32mf2_t test_vmin_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmin_vv_i32mf2_m(mask, op1, op2, vl); +vint32mf2_t test_vmin_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmin_vv_i32mf2_m(vm, vs2, vs1, vl); } -vint32mf2_t test_vmin_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_vx_i32mf2_m(mask, op1, op2, vl); +vint32mf2_t test_vmin_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_vx_i32mf2_m(vm, vs2, rs1, vl); } -vint32m1_t test_vmin_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmin_vv_i32m1_m(mask, op1, op2, vl); +vint32m1_t test_vmin_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmin_vv_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vmin_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_vx_i32m1_m(mask, op1, op2, vl); +vint32m1_t test_vmin_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_vx_i32m1_m(vm, vs2, rs1, vl); } -vint32m2_t test_vmin_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmin_vv_i32m2_m(mask, op1, op2, vl); +vint32m2_t test_vmin_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmin_vv_i32m2_m(vm, vs2, vs1, vl); } -vint32m2_t test_vmin_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_vx_i32m2_m(mask, op1, op2, vl); +vint32m2_t test_vmin_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_vx_i32m2_m(vm, vs2, rs1, vl); } -vint32m4_t test_vmin_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmin_vv_i32m4_m(mask, op1, op2, vl); +vint32m4_t test_vmin_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmin_vv_i32m4_m(vm, vs2, vs1, vl); } -vint32m4_t test_vmin_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_vx_i32m4_m(mask, op1, op2, vl); +vint32m4_t test_vmin_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_vx_i32m4_m(vm, vs2, rs1, vl); } -vint32m8_t test_vmin_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmin_vv_i32m8_m(mask, op1, op2, vl); +vint32m8_t test_vmin_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmin_vv_i32m8_m(vm, vs2, vs1, vl); } -vint32m8_t test_vmin_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_vx_i32m8_m(mask, op1, op2, vl); +vint32m8_t test_vmin_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_vx_i32m8_m(vm, vs2, rs1, vl); } -vint64m1_t test_vmin_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmin_vv_i64m1_m(mask, op1, op2, vl); +vint64m1_t test_vmin_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmin_vv_i64m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vmin_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmin_vx_i64m1_m(mask, op1, op2, vl); +vint64m1_t test_vmin_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin_vx_i64m1_m(vm, vs2, rs1, vl); } -vint64m2_t test_vmin_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmin_vv_i64m2_m(mask, op1, op2, vl); +vint64m2_t test_vmin_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmin_vv_i64m2_m(vm, vs2, vs1, vl); } -vint64m2_t test_vmin_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmin_vx_i64m2_m(mask, op1, op2, vl); +vint64m2_t test_vmin_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin_vx_i64m2_m(vm, vs2, rs1, vl); } -vint64m4_t test_vmin_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmin_vv_i64m4_m(mask, op1, op2, vl); +vint64m4_t test_vmin_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmin_vv_i64m4_m(vm, vs2, vs1, vl); } -vint64m4_t test_vmin_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmin_vx_i64m4_m(mask, op1, op2, vl); +vint64m4_t test_vmin_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin_vx_i64m4_m(vm, vs2, rs1, vl); } -vint64m8_t test_vmin_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmin_vv_i64m8_m(mask, op1, op2, vl); +vint64m8_t test_vmin_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmin_vv_i64m8_m(vm, vs2, vs1, vl); } -vint64m8_t test_vmin_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmin_vx_i64m8_m(mask, op1, op2, vl); +vint64m8_t test_vmin_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin_vx_i64m8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vmin\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-api-tests/vminu.c b/auto-generated/gnu-api-tests/vminu.c index 0a0c19f3a..2559d2f47 100644 --- a/auto-generated/gnu-api-tests/vminu.c +++ b/auto-generated/gnu-api-tests/vminu.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vminu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vminu_vv_u8mf8(op1, op2, vl); +vuint8mf8_t test_vminu_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vminu_vv_u8mf8(vs2, vs1, vl); } -vuint8mf8_t test_vminu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_vx_u8mf8(op1, op2, vl); +vuint8mf8_t test_vminu_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_vx_u8mf8(vs2, rs1, vl); } -vuint8mf4_t test_vminu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vminu_vv_u8mf4(op1, op2, vl); +vuint8mf4_t test_vminu_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vminu_vv_u8mf4(vs2, vs1, vl); } -vuint8mf4_t test_vminu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_vx_u8mf4(op1, op2, vl); +vuint8mf4_t test_vminu_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_vx_u8mf4(vs2, rs1, vl); } -vuint8mf2_t test_vminu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vminu_vv_u8mf2(op1, op2, vl); +vuint8mf2_t test_vminu_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vminu_vv_u8mf2(vs2, vs1, vl); } -vuint8mf2_t test_vminu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_vx_u8mf2(op1, op2, vl); +vuint8mf2_t test_vminu_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_vx_u8mf2(vs2, rs1, vl); } -vuint8m1_t test_vminu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vminu_vv_u8m1(op1, op2, vl); +vuint8m1_t test_vminu_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vminu_vv_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vminu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_vx_u8m1(op1, op2, vl); +vuint8m1_t test_vminu_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_vx_u8m1(vs2, rs1, vl); } -vuint8m2_t test_vminu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vminu_vv_u8m2(op1, op2, vl); +vuint8m2_t test_vminu_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vminu_vv_u8m2(vs2, vs1, vl); } -vuint8m2_t test_vminu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_vx_u8m2(op1, op2, vl); +vuint8m2_t test_vminu_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_vx_u8m2(vs2, rs1, vl); } -vuint8m4_t test_vminu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vminu_vv_u8m4(op1, op2, vl); +vuint8m4_t test_vminu_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vminu_vv_u8m4(vs2, vs1, vl); } -vuint8m4_t test_vminu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_vx_u8m4(op1, op2, vl); +vuint8m4_t test_vminu_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_vx_u8m4(vs2, rs1, vl); } -vuint8m8_t test_vminu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vminu_vv_u8m8(op1, op2, vl); +vuint8m8_t test_vminu_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vminu_vv_u8m8(vs2, vs1, vl); } -vuint8m8_t test_vminu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_vx_u8m8(op1, op2, vl); +vuint8m8_t test_vminu_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_vx_u8m8(vs2, rs1, vl); } -vuint16mf4_t test_vminu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vminu_vv_u16mf4(op1, op2, vl); +vuint16mf4_t test_vminu_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vminu_vv_u16mf4(vs2, vs1, vl); } -vuint16mf4_t test_vminu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_vx_u16mf4(op1, op2, vl); +vuint16mf4_t test_vminu_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_vx_u16mf4(vs2, rs1, vl); } -vuint16mf2_t test_vminu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vminu_vv_u16mf2(op1, op2, vl); +vuint16mf2_t test_vminu_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vminu_vv_u16mf2(vs2, vs1, vl); } -vuint16mf2_t test_vminu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_vx_u16mf2(op1, op2, vl); +vuint16mf2_t test_vminu_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_vx_u16mf2(vs2, rs1, vl); } -vuint16m1_t test_vminu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vminu_vv_u16m1(op1, op2, vl); +vuint16m1_t test_vminu_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vminu_vv_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vminu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_vx_u16m1(op1, op2, vl); +vuint16m1_t test_vminu_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_vx_u16m1(vs2, rs1, vl); } -vuint16m2_t test_vminu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vminu_vv_u16m2(op1, op2, vl); +vuint16m2_t test_vminu_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vminu_vv_u16m2(vs2, vs1, vl); } -vuint16m2_t test_vminu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_vx_u16m2(op1, op2, vl); +vuint16m2_t test_vminu_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_vx_u16m2(vs2, rs1, vl); } -vuint16m4_t test_vminu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vminu_vv_u16m4(op1, op2, vl); +vuint16m4_t test_vminu_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vminu_vv_u16m4(vs2, vs1, vl); } -vuint16m4_t test_vminu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_vx_u16m4(op1, op2, vl); +vuint16m4_t test_vminu_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_vx_u16m4(vs2, rs1, vl); } -vuint16m8_t test_vminu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vminu_vv_u16m8(op1, op2, vl); +vuint16m8_t test_vminu_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vminu_vv_u16m8(vs2, vs1, vl); } -vuint16m8_t test_vminu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_vx_u16m8(op1, op2, vl); +vuint16m8_t test_vminu_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_vx_u16m8(vs2, rs1, vl); } -vuint32mf2_t test_vminu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vminu_vv_u32mf2(op1, op2, vl); +vuint32mf2_t test_vminu_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vminu_vv_u32mf2(vs2, vs1, vl); } -vuint32mf2_t test_vminu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_vx_u32mf2(op1, op2, vl); +vuint32mf2_t test_vminu_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_vx_u32mf2(vs2, rs1, vl); } -vuint32m1_t test_vminu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vminu_vv_u32m1(op1, op2, vl); +vuint32m1_t test_vminu_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vminu_vv_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vminu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_vx_u32m1(op1, op2, vl); +vuint32m1_t test_vminu_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_vx_u32m1(vs2, rs1, vl); } -vuint32m2_t test_vminu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vminu_vv_u32m2(op1, op2, vl); +vuint32m2_t test_vminu_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vminu_vv_u32m2(vs2, vs1, vl); } -vuint32m2_t test_vminu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_vx_u32m2(op1, op2, vl); +vuint32m2_t test_vminu_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_vx_u32m2(vs2, rs1, vl); } -vuint32m4_t test_vminu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vminu_vv_u32m4(op1, op2, vl); +vuint32m4_t test_vminu_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vminu_vv_u32m4(vs2, vs1, vl); } -vuint32m4_t test_vminu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_vx_u32m4(op1, op2, vl); +vuint32m4_t test_vminu_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_vx_u32m4(vs2, rs1, vl); } -vuint32m8_t test_vminu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vminu_vv_u32m8(op1, op2, vl); +vuint32m8_t test_vminu_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vminu_vv_u32m8(vs2, vs1, vl); } -vuint32m8_t test_vminu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_vx_u32m8(op1, op2, vl); +vuint32m8_t test_vminu_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_vx_u32m8(vs2, rs1, vl); } -vuint64m1_t test_vminu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vminu_vv_u64m1(op1, op2, vl); +vuint64m1_t test_vminu_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vminu_vv_u64m1(vs2, vs1, vl); } -vuint64m1_t test_vminu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu_vx_u64m1(op1, op2, vl); +vuint64m1_t test_vminu_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu_vx_u64m1(vs2, rs1, vl); } -vuint64m2_t test_vminu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vminu_vv_u64m2(op1, op2, vl); +vuint64m2_t test_vminu_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vminu_vv_u64m2(vs2, vs1, vl); } -vuint64m2_t test_vminu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu_vx_u64m2(op1, op2, vl); +vuint64m2_t test_vminu_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu_vx_u64m2(vs2, rs1, vl); } -vuint64m4_t test_vminu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vminu_vv_u64m4(op1, op2, vl); +vuint64m4_t test_vminu_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vminu_vv_u64m4(vs2, vs1, vl); } -vuint64m4_t test_vminu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu_vx_u64m4(op1, op2, vl); +vuint64m4_t test_vminu_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu_vx_u64m4(vs2, rs1, vl); } -vuint64m8_t test_vminu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vminu_vv_u64m8(op1, op2, vl); +vuint64m8_t test_vminu_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vminu_vv_u64m8(vs2, vs1, vl); } -vuint64m8_t test_vminu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu_vx_u64m8(op1, op2, vl); +vuint64m8_t test_vminu_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu_vx_u64m8(vs2, rs1, vl); } -vuint8mf8_t test_vminu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vminu_vv_u8mf8_m(mask, op1, op2, vl); +vuint8mf8_t test_vminu_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vminu_vv_u8mf8_m(vm, vs2, vs1, vl); } -vuint8mf8_t test_vminu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_vx_u8mf8_m(mask, op1, op2, vl); +vuint8mf8_t test_vminu_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_vx_u8mf8_m(vm, vs2, rs1, vl); } -vuint8mf4_t test_vminu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vminu_vv_u8mf4_m(mask, op1, op2, vl); +vuint8mf4_t test_vminu_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vminu_vv_u8mf4_m(vm, vs2, vs1, vl); } -vuint8mf4_t test_vminu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_vx_u8mf4_m(mask, op1, op2, vl); +vuint8mf4_t test_vminu_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_vx_u8mf4_m(vm, vs2, rs1, vl); } -vuint8mf2_t test_vminu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vminu_vv_u8mf2_m(mask, op1, op2, vl); +vuint8mf2_t test_vminu_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vminu_vv_u8mf2_m(vm, vs2, vs1, vl); } -vuint8mf2_t test_vminu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_vx_u8mf2_m(mask, op1, op2, vl); +vuint8mf2_t test_vminu_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_vx_u8mf2_m(vm, vs2, rs1, vl); } -vuint8m1_t test_vminu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vminu_vv_u8m1_m(mask, op1, op2, vl); +vuint8m1_t test_vminu_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vminu_vv_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vminu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_vx_u8m1_m(mask, op1, op2, vl); +vuint8m1_t test_vminu_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_vx_u8m1_m(vm, vs2, rs1, vl); } -vuint8m2_t test_vminu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vminu_vv_u8m2_m(mask, op1, op2, vl); +vuint8m2_t test_vminu_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vminu_vv_u8m2_m(vm, vs2, vs1, vl); } -vuint8m2_t test_vminu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_vx_u8m2_m(mask, op1, op2, vl); +vuint8m2_t test_vminu_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_vx_u8m2_m(vm, vs2, rs1, vl); } -vuint8m4_t test_vminu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vminu_vv_u8m4_m(mask, op1, op2, vl); +vuint8m4_t test_vminu_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vminu_vv_u8m4_m(vm, vs2, vs1, vl); } -vuint8m4_t test_vminu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_vx_u8m4_m(mask, op1, op2, vl); +vuint8m4_t test_vminu_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_vx_u8m4_m(vm, vs2, rs1, vl); } -vuint8m8_t test_vminu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vminu_vv_u8m8_m(mask, op1, op2, vl); +vuint8m8_t test_vminu_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vminu_vv_u8m8_m(vm, vs2, vs1, vl); } -vuint8m8_t test_vminu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_vx_u8m8_m(mask, op1, op2, vl); +vuint8m8_t test_vminu_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_vx_u8m8_m(vm, vs2, rs1, vl); } -vuint16mf4_t test_vminu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vminu_vv_u16mf4_m(mask, op1, op2, vl); +vuint16mf4_t test_vminu_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vminu_vv_u16mf4_m(vm, vs2, vs1, vl); } -vuint16mf4_t test_vminu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_vx_u16mf4_m(mask, op1, op2, vl); +vuint16mf4_t test_vminu_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_vx_u16mf4_m(vm, vs2, rs1, vl); } -vuint16mf2_t test_vminu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vminu_vv_u16mf2_m(mask, op1, op2, vl); +vuint16mf2_t test_vminu_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vminu_vv_u16mf2_m(vm, vs2, vs1, vl); } -vuint16mf2_t test_vminu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_vx_u16mf2_m(mask, op1, op2, vl); +vuint16mf2_t test_vminu_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_vx_u16mf2_m(vm, vs2, rs1, vl); } -vuint16m1_t test_vminu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vminu_vv_u16m1_m(mask, op1, op2, vl); +vuint16m1_t test_vminu_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vminu_vv_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vminu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_vx_u16m1_m(mask, op1, op2, vl); +vuint16m1_t test_vminu_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_vx_u16m1_m(vm, vs2, rs1, vl); } -vuint16m2_t test_vminu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vminu_vv_u16m2_m(mask, op1, op2, vl); +vuint16m2_t test_vminu_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vminu_vv_u16m2_m(vm, vs2, vs1, vl); } -vuint16m2_t test_vminu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_vx_u16m2_m(mask, op1, op2, vl); +vuint16m2_t test_vminu_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_vx_u16m2_m(vm, vs2, rs1, vl); } -vuint16m4_t test_vminu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vminu_vv_u16m4_m(mask, op1, op2, vl); +vuint16m4_t test_vminu_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vminu_vv_u16m4_m(vm, vs2, vs1, vl); } -vuint16m4_t test_vminu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_vx_u16m4_m(mask, op1, op2, vl); +vuint16m4_t test_vminu_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_vx_u16m4_m(vm, vs2, rs1, vl); } -vuint16m8_t test_vminu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vminu_vv_u16m8_m(mask, op1, op2, vl); +vuint16m8_t test_vminu_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vminu_vv_u16m8_m(vm, vs2, vs1, vl); } -vuint16m8_t test_vminu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_vx_u16m8_m(mask, op1, op2, vl); +vuint16m8_t test_vminu_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_vx_u16m8_m(vm, vs2, rs1, vl); } -vuint32mf2_t test_vminu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vminu_vv_u32mf2_m(mask, op1, op2, vl); +vuint32mf2_t test_vminu_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vminu_vv_u32mf2_m(vm, vs2, vs1, vl); } -vuint32mf2_t test_vminu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_vx_u32mf2_m(mask, op1, op2, vl); +vuint32mf2_t test_vminu_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_vx_u32mf2_m(vm, vs2, rs1, vl); } -vuint32m1_t test_vminu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vminu_vv_u32m1_m(mask, op1, op2, vl); +vuint32m1_t test_vminu_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vminu_vv_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vminu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_vx_u32m1_m(mask, op1, op2, vl); +vuint32m1_t test_vminu_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_vx_u32m1_m(vm, vs2, rs1, vl); } -vuint32m2_t test_vminu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vminu_vv_u32m2_m(mask, op1, op2, vl); +vuint32m2_t test_vminu_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vminu_vv_u32m2_m(vm, vs2, vs1, vl); } -vuint32m2_t test_vminu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_vx_u32m2_m(mask, op1, op2, vl); +vuint32m2_t test_vminu_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_vx_u32m2_m(vm, vs2, rs1, vl); } -vuint32m4_t test_vminu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vminu_vv_u32m4_m(mask, op1, op2, vl); +vuint32m4_t test_vminu_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vminu_vv_u32m4_m(vm, vs2, vs1, vl); } -vuint32m4_t test_vminu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_vx_u32m4_m(mask, op1, op2, vl); +vuint32m4_t test_vminu_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_vx_u32m4_m(vm, vs2, rs1, vl); } -vuint32m8_t test_vminu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vminu_vv_u32m8_m(mask, op1, op2, vl); +vuint32m8_t test_vminu_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vminu_vv_u32m8_m(vm, vs2, vs1, vl); } -vuint32m8_t test_vminu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_vx_u32m8_m(mask, op1, op2, vl); +vuint32m8_t test_vminu_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_vx_u32m8_m(vm, vs2, rs1, vl); } -vuint64m1_t test_vminu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vminu_vv_u64m1_m(mask, op1, op2, vl); +vuint64m1_t test_vminu_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vminu_vv_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vminu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu_vx_u64m1_m(mask, op1, op2, vl); +vuint64m1_t test_vminu_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu_vx_u64m1_m(vm, vs2, rs1, vl); } -vuint64m2_t test_vminu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vminu_vv_u64m2_m(mask, op1, op2, vl); +vuint64m2_t test_vminu_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vminu_vv_u64m2_m(vm, vs2, vs1, vl); } -vuint64m2_t test_vminu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu_vx_u64m2_m(mask, op1, op2, vl); +vuint64m2_t test_vminu_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu_vx_u64m2_m(vm, vs2, rs1, vl); } -vuint64m4_t test_vminu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vminu_vv_u64m4_m(mask, op1, op2, vl); +vuint64m4_t test_vminu_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vminu_vv_u64m4_m(vm, vs2, vs1, vl); } -vuint64m4_t test_vminu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu_vx_u64m4_m(mask, op1, op2, vl); +vuint64m4_t test_vminu_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu_vx_u64m4_m(vm, vs2, rs1, vl); } -vuint64m8_t test_vminu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vminu_vv_u64m8_m(mask, op1, op2, vl); +vuint64m8_t test_vminu_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vminu_vv_u64m8_m(vm, vs2, vs1, vl); } -vuint64m8_t test_vminu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu_vx_u64m8_m(mask, op1, op2, vl); +vuint64m8_t test_vminu_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu_vx_u64m8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vminu\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-api-tests/vmmv.c b/auto-generated/gnu-api-tests/vmmv.c index 3e0b3f08d..aff8ce818 100644 --- a/auto-generated/gnu-api-tests/vmmv.c +++ b/auto-generated/gnu-api-tests/vmmv.c @@ -6,32 +6,32 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool1_t test_vmmv_m_b1(vbool1_t op1, size_t vl) { - return __riscv_vmmv_m_b1(op1, vl); +vbool1_t test_vmmv_m_b1(vbool1_t vs, size_t vl) { + return __riscv_vmmv_m_b1(vs, vl); } -vbool2_t test_vmmv_m_b2(vbool2_t op1, size_t vl) { - return __riscv_vmmv_m_b2(op1, vl); +vbool2_t test_vmmv_m_b2(vbool2_t vs, size_t vl) { + return __riscv_vmmv_m_b2(vs, vl); } -vbool4_t test_vmmv_m_b4(vbool4_t op1, size_t vl) { - return __riscv_vmmv_m_b4(op1, vl); +vbool4_t test_vmmv_m_b4(vbool4_t vs, size_t vl) { + return __riscv_vmmv_m_b4(vs, vl); } -vbool8_t test_vmmv_m_b8(vbool8_t op1, size_t vl) { - return __riscv_vmmv_m_b8(op1, vl); +vbool8_t test_vmmv_m_b8(vbool8_t vs, size_t vl) { + return __riscv_vmmv_m_b8(vs, vl); } -vbool16_t test_vmmv_m_b16(vbool16_t op1, size_t vl) { - return __riscv_vmmv_m_b16(op1, vl); +vbool16_t test_vmmv_m_b16(vbool16_t vs, size_t vl) { + return __riscv_vmmv_m_b16(vs, vl); } -vbool32_t test_vmmv_m_b32(vbool32_t op1, size_t vl) { - return __riscv_vmmv_m_b32(op1, vl); +vbool32_t test_vmmv_m_b32(vbool32_t vs, size_t vl) { + return __riscv_vmmv_m_b32(vs, vl); } -vbool64_t test_vmmv_m_b64(vbool64_t op1, size_t vl) { - return __riscv_vmmv_m_b64(op1, vl); +vbool64_t test_vmmv_m_b64(vbool64_t vs, size_t vl) { + return __riscv_vmmv_m_b64(vs, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmmv\.[ivxfswum.]+\s+} 7 } } */ diff --git a/auto-generated/gnu-api-tests/vmnand.c b/auto-generated/gnu-api-tests/vmnand.c index 2412565ea..4a5a275fc 100644 --- a/auto-generated/gnu-api-tests/vmnand.c +++ b/auto-generated/gnu-api-tests/vmnand.c @@ -6,32 +6,32 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool1_t test_vmnand_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { - return __riscv_vmnand_mm_b1(op1, op2, vl); +vbool1_t test_vmnand_mm_b1(vbool1_t vs2, vbool1_t vs1, size_t vl) { + return __riscv_vmnand_mm_b1(vs2, vs1, vl); } -vbool2_t test_vmnand_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { - return __riscv_vmnand_mm_b2(op1, op2, vl); +vbool2_t test_vmnand_mm_b2(vbool2_t vs2, vbool2_t vs1, size_t vl) { + return __riscv_vmnand_mm_b2(vs2, vs1, vl); } -vbool4_t test_vmnand_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { - return __riscv_vmnand_mm_b4(op1, op2, vl); +vbool4_t test_vmnand_mm_b4(vbool4_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vmnand_mm_b4(vs2, vs1, vl); } -vbool8_t test_vmnand_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { - return __riscv_vmnand_mm_b8(op1, op2, vl); +vbool8_t test_vmnand_mm_b8(vbool8_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vmnand_mm_b8(vs2, vs1, vl); } -vbool16_t test_vmnand_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { - return __riscv_vmnand_mm_b16(op1, op2, vl); +vbool16_t test_vmnand_mm_b16(vbool16_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vmnand_mm_b16(vs2, vs1, vl); } -vbool32_t test_vmnand_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { - return __riscv_vmnand_mm_b32(op1, op2, vl); +vbool32_t test_vmnand_mm_b32(vbool32_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vmnand_mm_b32(vs2, vs1, vl); } -vbool64_t test_vmnand_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) { - return __riscv_vmnand_mm_b64(op1, op2, vl); +vbool64_t test_vmnand_mm_b64(vbool64_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vmnand_mm_b64(vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmnand\.[ivxfswum.]+\s+} 7 } } */ diff --git a/auto-generated/gnu-api-tests/vmnor.c b/auto-generated/gnu-api-tests/vmnor.c index 797742d92..bf7ebca2e 100644 --- a/auto-generated/gnu-api-tests/vmnor.c +++ b/auto-generated/gnu-api-tests/vmnor.c @@ -6,32 +6,32 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool1_t test_vmnor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { - return __riscv_vmnor_mm_b1(op1, op2, vl); +vbool1_t test_vmnor_mm_b1(vbool1_t vs2, vbool1_t vs1, size_t vl) { + return __riscv_vmnor_mm_b1(vs2, vs1, vl); } -vbool2_t test_vmnor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { - return __riscv_vmnor_mm_b2(op1, op2, vl); +vbool2_t test_vmnor_mm_b2(vbool2_t vs2, vbool2_t vs1, size_t vl) { + return __riscv_vmnor_mm_b2(vs2, vs1, vl); } -vbool4_t test_vmnor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { - return __riscv_vmnor_mm_b4(op1, op2, vl); +vbool4_t test_vmnor_mm_b4(vbool4_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vmnor_mm_b4(vs2, vs1, vl); } -vbool8_t test_vmnor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { - return __riscv_vmnor_mm_b8(op1, op2, vl); +vbool8_t test_vmnor_mm_b8(vbool8_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vmnor_mm_b8(vs2, vs1, vl); } -vbool16_t test_vmnor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { - return __riscv_vmnor_mm_b16(op1, op2, vl); +vbool16_t test_vmnor_mm_b16(vbool16_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vmnor_mm_b16(vs2, vs1, vl); } -vbool32_t test_vmnor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { - return __riscv_vmnor_mm_b32(op1, op2, vl); +vbool32_t test_vmnor_mm_b32(vbool32_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vmnor_mm_b32(vs2, vs1, vl); } -vbool64_t test_vmnor_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) { - return __riscv_vmnor_mm_b64(op1, op2, vl); +vbool64_t test_vmnor_mm_b64(vbool64_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vmnor_mm_b64(vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmnor\.[ivxfswum.]+\s+} 7 } } */ diff --git a/auto-generated/gnu-api-tests/vmnot.c b/auto-generated/gnu-api-tests/vmnot.c index 5e8a4fc77..43f7e6506 100644 --- a/auto-generated/gnu-api-tests/vmnot.c +++ b/auto-generated/gnu-api-tests/vmnot.c @@ -6,32 +6,32 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool1_t test_vmnot_m_b1(vbool1_t op1, size_t vl) { - return __riscv_vmnot_m_b1(op1, vl); +vbool1_t test_vmnot_m_b1(vbool1_t vs, size_t vl) { + return __riscv_vmnot_m_b1(vs, vl); } -vbool2_t test_vmnot_m_b2(vbool2_t op1, size_t vl) { - return __riscv_vmnot_m_b2(op1, vl); +vbool2_t test_vmnot_m_b2(vbool2_t vs, size_t vl) { + return __riscv_vmnot_m_b2(vs, vl); } -vbool4_t test_vmnot_m_b4(vbool4_t op1, size_t vl) { - return __riscv_vmnot_m_b4(op1, vl); +vbool4_t test_vmnot_m_b4(vbool4_t vs, size_t vl) { + return __riscv_vmnot_m_b4(vs, vl); } -vbool8_t test_vmnot_m_b8(vbool8_t op1, size_t vl) { - return __riscv_vmnot_m_b8(op1, vl); +vbool8_t test_vmnot_m_b8(vbool8_t vs, size_t vl) { + return __riscv_vmnot_m_b8(vs, vl); } -vbool16_t test_vmnot_m_b16(vbool16_t op1, size_t vl) { - return __riscv_vmnot_m_b16(op1, vl); +vbool16_t test_vmnot_m_b16(vbool16_t vs, size_t vl) { + return __riscv_vmnot_m_b16(vs, vl); } -vbool32_t test_vmnot_m_b32(vbool32_t op1, size_t vl) { - return __riscv_vmnot_m_b32(op1, vl); +vbool32_t test_vmnot_m_b32(vbool32_t vs, size_t vl) { + return __riscv_vmnot_m_b32(vs, vl); } -vbool64_t test_vmnot_m_b64(vbool64_t op1, size_t vl) { - return __riscv_vmnot_m_b64(op1, vl); +vbool64_t test_vmnot_m_b64(vbool64_t vs, size_t vl) { + return __riscv_vmnot_m_b64(vs, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmnot\.[ivxfswum.]+\s+} 7 } } */ diff --git a/auto-generated/gnu-api-tests/vmor.c b/auto-generated/gnu-api-tests/vmor.c index ebab5aeea..29ee8d892 100644 --- a/auto-generated/gnu-api-tests/vmor.c +++ b/auto-generated/gnu-api-tests/vmor.c @@ -6,32 +6,32 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool1_t test_vmor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { - return __riscv_vmor_mm_b1(op1, op2, vl); +vbool1_t test_vmor_mm_b1(vbool1_t vs2, vbool1_t vs1, size_t vl) { + return __riscv_vmor_mm_b1(vs2, vs1, vl); } -vbool2_t test_vmor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { - return __riscv_vmor_mm_b2(op1, op2, vl); +vbool2_t test_vmor_mm_b2(vbool2_t vs2, vbool2_t vs1, size_t vl) { + return __riscv_vmor_mm_b2(vs2, vs1, vl); } -vbool4_t test_vmor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { - return __riscv_vmor_mm_b4(op1, op2, vl); +vbool4_t test_vmor_mm_b4(vbool4_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vmor_mm_b4(vs2, vs1, vl); } -vbool8_t test_vmor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { - return __riscv_vmor_mm_b8(op1, op2, vl); +vbool8_t test_vmor_mm_b8(vbool8_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vmor_mm_b8(vs2, vs1, vl); } -vbool16_t test_vmor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { - return __riscv_vmor_mm_b16(op1, op2, vl); +vbool16_t test_vmor_mm_b16(vbool16_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vmor_mm_b16(vs2, vs1, vl); } -vbool32_t test_vmor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { - return __riscv_vmor_mm_b32(op1, op2, vl); +vbool32_t test_vmor_mm_b32(vbool32_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vmor_mm_b32(vs2, vs1, vl); } -vbool64_t test_vmor_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) { - return __riscv_vmor_mm_b64(op1, op2, vl); +vbool64_t test_vmor_mm_b64(vbool64_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vmor_mm_b64(vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmor\.[ivxfswum.]+\s+} 7 } } */ diff --git a/auto-generated/gnu-api-tests/vmorn.c b/auto-generated/gnu-api-tests/vmorn.c index ecc581b94..a3d3430c6 100644 --- a/auto-generated/gnu-api-tests/vmorn.c +++ b/auto-generated/gnu-api-tests/vmorn.c @@ -6,32 +6,32 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool1_t test_vmorn_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { - return __riscv_vmorn_mm_b1(op1, op2, vl); +vbool1_t test_vmorn_mm_b1(vbool1_t vs2, vbool1_t vs1, size_t vl) { + return __riscv_vmorn_mm_b1(vs2, vs1, vl); } -vbool2_t test_vmorn_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { - return __riscv_vmorn_mm_b2(op1, op2, vl); +vbool2_t test_vmorn_mm_b2(vbool2_t vs2, vbool2_t vs1, size_t vl) { + return __riscv_vmorn_mm_b2(vs2, vs1, vl); } -vbool4_t test_vmorn_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { - return __riscv_vmorn_mm_b4(op1, op2, vl); +vbool4_t test_vmorn_mm_b4(vbool4_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vmorn_mm_b4(vs2, vs1, vl); } -vbool8_t test_vmorn_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { - return __riscv_vmorn_mm_b8(op1, op2, vl); +vbool8_t test_vmorn_mm_b8(vbool8_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vmorn_mm_b8(vs2, vs1, vl); } -vbool16_t test_vmorn_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { - return __riscv_vmorn_mm_b16(op1, op2, vl); +vbool16_t test_vmorn_mm_b16(vbool16_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vmorn_mm_b16(vs2, vs1, vl); } -vbool32_t test_vmorn_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { - return __riscv_vmorn_mm_b32(op1, op2, vl); +vbool32_t test_vmorn_mm_b32(vbool32_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vmorn_mm_b32(vs2, vs1, vl); } -vbool64_t test_vmorn_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) { - return __riscv_vmorn_mm_b64(op1, op2, vl); +vbool64_t test_vmorn_mm_b64(vbool64_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vmorn_mm_b64(vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmorn\.[ivxfswum.]+\s+} 7 } } */ diff --git a/auto-generated/gnu-api-tests/vmsbc.c b/auto-generated/gnu-api-tests/vmsbc.c index 5aa7820dd..82987f607 100644 --- a/auto-generated/gnu-api-tests/vmsbc.c +++ b/auto-generated/gnu-api-tests/vmsbc.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmsbc_vvm_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vmsbc_vvm_i8mf8_b64(op1, op2, borrowin, vl); +vbool64_t test_vmsbc_vvm_i8mf8_b64(vint8mf8_t vs2, vint8mf8_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmsbc_vvm_i8mf8_b64(vs2, vs1, v0, vl); } -vbool64_t test_vmsbc_vxm_i8mf8_b64(vint8mf8_t op1, int8_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vmsbc_vxm_i8mf8_b64(op1, op2, borrowin, vl); +vbool64_t test_vmsbc_vxm_i8mf8_b64(vint8mf8_t vs2, int8_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmsbc_vxm_i8mf8_b64(vs2, rs1, v0, vl); } -vbool64_t test_vmsbc_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmsbc_vv_i8mf8_b64(op1, op2, vl); +vbool64_t test_vmsbc_vv_i8mf8_b64(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmsbc_vv_i8mf8_b64(vs2, vs1, vl); } -vbool64_t test_vmsbc_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsbc_vx_i8mf8_b64(op1, op2, vl); +vbool64_t test_vmsbc_vx_i8mf8_b64(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsbc_vx_i8mf8_b64(vs2, rs1, vl); } -vbool32_t test_vmsbc_vvm_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vmsbc_vvm_i8mf4_b32(op1, op2, borrowin, vl); +vbool32_t test_vmsbc_vvm_i8mf4_b32(vint8mf4_t vs2, vint8mf4_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmsbc_vvm_i8mf4_b32(vs2, vs1, v0, vl); } -vbool32_t test_vmsbc_vxm_i8mf4_b32(vint8mf4_t op1, int8_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vmsbc_vxm_i8mf4_b32(op1, op2, borrowin, vl); +vbool32_t test_vmsbc_vxm_i8mf4_b32(vint8mf4_t vs2, int8_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmsbc_vxm_i8mf4_b32(vs2, rs1, v0, vl); } -vbool32_t test_vmsbc_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmsbc_vv_i8mf4_b32(op1, op2, vl); +vbool32_t test_vmsbc_vv_i8mf4_b32(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmsbc_vv_i8mf4_b32(vs2, vs1, vl); } -vbool32_t test_vmsbc_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsbc_vx_i8mf4_b32(op1, op2, vl); +vbool32_t test_vmsbc_vx_i8mf4_b32(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsbc_vx_i8mf4_b32(vs2, rs1, vl); } -vbool16_t test_vmsbc_vvm_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vmsbc_vvm_i8mf2_b16(op1, op2, borrowin, vl); +vbool16_t test_vmsbc_vvm_i8mf2_b16(vint8mf2_t vs2, vint8mf2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmsbc_vvm_i8mf2_b16(vs2, vs1, v0, vl); } -vbool16_t test_vmsbc_vxm_i8mf2_b16(vint8mf2_t op1, int8_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vmsbc_vxm_i8mf2_b16(op1, op2, borrowin, vl); +vbool16_t test_vmsbc_vxm_i8mf2_b16(vint8mf2_t vs2, int8_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmsbc_vxm_i8mf2_b16(vs2, rs1, v0, vl); } -vbool16_t test_vmsbc_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmsbc_vv_i8mf2_b16(op1, op2, vl); +vbool16_t test_vmsbc_vv_i8mf2_b16(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmsbc_vv_i8mf2_b16(vs2, vs1, vl); } -vbool16_t test_vmsbc_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsbc_vx_i8mf2_b16(op1, op2, vl); +vbool16_t test_vmsbc_vx_i8mf2_b16(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsbc_vx_i8mf2_b16(vs2, rs1, vl); } -vbool8_t test_vmsbc_vvm_i8m1_b8(vint8m1_t op1, vint8m1_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vmsbc_vvm_i8m1_b8(op1, op2, borrowin, vl); +vbool8_t test_vmsbc_vvm_i8m1_b8(vint8m1_t vs2, vint8m1_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmsbc_vvm_i8m1_b8(vs2, vs1, v0, vl); } -vbool8_t test_vmsbc_vxm_i8m1_b8(vint8m1_t op1, int8_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vmsbc_vxm_i8m1_b8(op1, op2, borrowin, vl); +vbool8_t test_vmsbc_vxm_i8m1_b8(vint8m1_t vs2, int8_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmsbc_vxm_i8m1_b8(vs2, rs1, v0, vl); } -vbool8_t test_vmsbc_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmsbc_vv_i8m1_b8(op1, op2, vl); +vbool8_t test_vmsbc_vv_i8m1_b8(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmsbc_vv_i8m1_b8(vs2, vs1, vl); } -vbool8_t test_vmsbc_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmsbc_vx_i8m1_b8(op1, op2, vl); +vbool8_t test_vmsbc_vx_i8m1_b8(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsbc_vx_i8m1_b8(vs2, rs1, vl); } -vbool4_t test_vmsbc_vvm_i8m2_b4(vint8m2_t op1, vint8m2_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vmsbc_vvm_i8m2_b4(op1, op2, borrowin, vl); +vbool4_t test_vmsbc_vvm_i8m2_b4(vint8m2_t vs2, vint8m2_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmsbc_vvm_i8m2_b4(vs2, vs1, v0, vl); } -vbool4_t test_vmsbc_vxm_i8m2_b4(vint8m2_t op1, int8_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vmsbc_vxm_i8m2_b4(op1, op2, borrowin, vl); +vbool4_t test_vmsbc_vxm_i8m2_b4(vint8m2_t vs2, int8_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmsbc_vxm_i8m2_b4(vs2, rs1, v0, vl); } -vbool4_t test_vmsbc_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmsbc_vv_i8m2_b4(op1, op2, vl); +vbool4_t test_vmsbc_vv_i8m2_b4(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmsbc_vv_i8m2_b4(vs2, vs1, vl); } -vbool4_t test_vmsbc_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsbc_vx_i8m2_b4(op1, op2, vl); +vbool4_t test_vmsbc_vx_i8m2_b4(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsbc_vx_i8m2_b4(vs2, rs1, vl); } -vbool2_t test_vmsbc_vvm_i8m4_b2(vint8m4_t op1, vint8m4_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vmsbc_vvm_i8m4_b2(op1, op2, borrowin, vl); +vbool2_t test_vmsbc_vvm_i8m4_b2(vint8m4_t vs2, vint8m4_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vmsbc_vvm_i8m4_b2(vs2, vs1, v0, vl); } -vbool2_t test_vmsbc_vxm_i8m4_b2(vint8m4_t op1, int8_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vmsbc_vxm_i8m4_b2(op1, op2, borrowin, vl); +vbool2_t test_vmsbc_vxm_i8m4_b2(vint8m4_t vs2, int8_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vmsbc_vxm_i8m4_b2(vs2, rs1, v0, vl); } -vbool2_t test_vmsbc_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmsbc_vv_i8m4_b2(op1, op2, vl); +vbool2_t test_vmsbc_vv_i8m4_b2(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmsbc_vv_i8m4_b2(vs2, vs1, vl); } -vbool2_t test_vmsbc_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsbc_vx_i8m4_b2(op1, op2, vl); +vbool2_t test_vmsbc_vx_i8m4_b2(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsbc_vx_i8m4_b2(vs2, rs1, vl); } -vbool1_t test_vmsbc_vvm_i8m8_b1(vint8m8_t op1, vint8m8_t op2, vbool1_t borrowin, size_t vl) { - return __riscv_vmsbc_vvm_i8m8_b1(op1, op2, borrowin, vl); +vbool1_t test_vmsbc_vvm_i8m8_b1(vint8m8_t vs2, vint8m8_t vs1, vbool1_t v0, size_t vl) { + return __riscv_vmsbc_vvm_i8m8_b1(vs2, vs1, v0, vl); } -vbool1_t test_vmsbc_vxm_i8m8_b1(vint8m8_t op1, int8_t op2, vbool1_t borrowin, size_t vl) { - return __riscv_vmsbc_vxm_i8m8_b1(op1, op2, borrowin, vl); +vbool1_t test_vmsbc_vxm_i8m8_b1(vint8m8_t vs2, int8_t rs1, vbool1_t v0, size_t vl) { + return __riscv_vmsbc_vxm_i8m8_b1(vs2, rs1, v0, vl); } -vbool1_t test_vmsbc_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmsbc_vv_i8m8_b1(op1, op2, vl); +vbool1_t test_vmsbc_vv_i8m8_b1(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmsbc_vv_i8m8_b1(vs2, vs1, vl); } -vbool1_t test_vmsbc_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsbc_vx_i8m8_b1(op1, op2, vl); +vbool1_t test_vmsbc_vx_i8m8_b1(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsbc_vx_i8m8_b1(vs2, rs1, vl); } -vbool64_t test_vmsbc_vvm_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vmsbc_vvm_i16mf4_b64(op1, op2, borrowin, vl); +vbool64_t test_vmsbc_vvm_i16mf4_b64(vint16mf4_t vs2, vint16mf4_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmsbc_vvm_i16mf4_b64(vs2, vs1, v0, vl); } -vbool64_t test_vmsbc_vxm_i16mf4_b64(vint16mf4_t op1, int16_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vmsbc_vxm_i16mf4_b64(op1, op2, borrowin, vl); +vbool64_t test_vmsbc_vxm_i16mf4_b64(vint16mf4_t vs2, int16_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmsbc_vxm_i16mf4_b64(vs2, rs1, v0, vl); } -vbool64_t test_vmsbc_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmsbc_vv_i16mf4_b64(op1, op2, vl); +vbool64_t test_vmsbc_vv_i16mf4_b64(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmsbc_vv_i16mf4_b64(vs2, vs1, vl); } -vbool64_t test_vmsbc_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsbc_vx_i16mf4_b64(op1, op2, vl); +vbool64_t test_vmsbc_vx_i16mf4_b64(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsbc_vx_i16mf4_b64(vs2, rs1, vl); } -vbool32_t test_vmsbc_vvm_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vmsbc_vvm_i16mf2_b32(op1, op2, borrowin, vl); +vbool32_t test_vmsbc_vvm_i16mf2_b32(vint16mf2_t vs2, vint16mf2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmsbc_vvm_i16mf2_b32(vs2, vs1, v0, vl); } -vbool32_t test_vmsbc_vxm_i16mf2_b32(vint16mf2_t op1, int16_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vmsbc_vxm_i16mf2_b32(op1, op2, borrowin, vl); +vbool32_t test_vmsbc_vxm_i16mf2_b32(vint16mf2_t vs2, int16_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmsbc_vxm_i16mf2_b32(vs2, rs1, v0, vl); } -vbool32_t test_vmsbc_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmsbc_vv_i16mf2_b32(op1, op2, vl); +vbool32_t test_vmsbc_vv_i16mf2_b32(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmsbc_vv_i16mf2_b32(vs2, vs1, vl); } -vbool32_t test_vmsbc_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsbc_vx_i16mf2_b32(op1, op2, vl); +vbool32_t test_vmsbc_vx_i16mf2_b32(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsbc_vx_i16mf2_b32(vs2, rs1, vl); } -vbool16_t test_vmsbc_vvm_i16m1_b16(vint16m1_t op1, vint16m1_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vmsbc_vvm_i16m1_b16(op1, op2, borrowin, vl); +vbool16_t test_vmsbc_vvm_i16m1_b16(vint16m1_t vs2, vint16m1_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmsbc_vvm_i16m1_b16(vs2, vs1, v0, vl); } -vbool16_t test_vmsbc_vxm_i16m1_b16(vint16m1_t op1, int16_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vmsbc_vxm_i16m1_b16(op1, op2, borrowin, vl); +vbool16_t test_vmsbc_vxm_i16m1_b16(vint16m1_t vs2, int16_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmsbc_vxm_i16m1_b16(vs2, rs1, v0, vl); } -vbool16_t test_vmsbc_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmsbc_vv_i16m1_b16(op1, op2, vl); +vbool16_t test_vmsbc_vv_i16m1_b16(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmsbc_vv_i16m1_b16(vs2, vs1, vl); } -vbool16_t test_vmsbc_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmsbc_vx_i16m1_b16(op1, op2, vl); +vbool16_t test_vmsbc_vx_i16m1_b16(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsbc_vx_i16m1_b16(vs2, rs1, vl); } -vbool8_t test_vmsbc_vvm_i16m2_b8(vint16m2_t op1, vint16m2_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vmsbc_vvm_i16m2_b8(op1, op2, borrowin, vl); +vbool8_t test_vmsbc_vvm_i16m2_b8(vint16m2_t vs2, vint16m2_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmsbc_vvm_i16m2_b8(vs2, vs1, v0, vl); } -vbool8_t test_vmsbc_vxm_i16m2_b8(vint16m2_t op1, int16_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vmsbc_vxm_i16m2_b8(op1, op2, borrowin, vl); +vbool8_t test_vmsbc_vxm_i16m2_b8(vint16m2_t vs2, int16_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmsbc_vxm_i16m2_b8(vs2, rs1, v0, vl); } -vbool8_t test_vmsbc_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmsbc_vv_i16m2_b8(op1, op2, vl); +vbool8_t test_vmsbc_vv_i16m2_b8(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmsbc_vv_i16m2_b8(vs2, vs1, vl); } -vbool8_t test_vmsbc_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsbc_vx_i16m2_b8(op1, op2, vl); +vbool8_t test_vmsbc_vx_i16m2_b8(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsbc_vx_i16m2_b8(vs2, rs1, vl); } -vbool4_t test_vmsbc_vvm_i16m4_b4(vint16m4_t op1, vint16m4_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vmsbc_vvm_i16m4_b4(op1, op2, borrowin, vl); +vbool4_t test_vmsbc_vvm_i16m4_b4(vint16m4_t vs2, vint16m4_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmsbc_vvm_i16m4_b4(vs2, vs1, v0, vl); } -vbool4_t test_vmsbc_vxm_i16m4_b4(vint16m4_t op1, int16_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vmsbc_vxm_i16m4_b4(op1, op2, borrowin, vl); +vbool4_t test_vmsbc_vxm_i16m4_b4(vint16m4_t vs2, int16_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmsbc_vxm_i16m4_b4(vs2, rs1, v0, vl); } -vbool4_t test_vmsbc_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmsbc_vv_i16m4_b4(op1, op2, vl); +vbool4_t test_vmsbc_vv_i16m4_b4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmsbc_vv_i16m4_b4(vs2, vs1, vl); } -vbool4_t test_vmsbc_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsbc_vx_i16m4_b4(op1, op2, vl); +vbool4_t test_vmsbc_vx_i16m4_b4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsbc_vx_i16m4_b4(vs2, rs1, vl); } -vbool2_t test_vmsbc_vvm_i16m8_b2(vint16m8_t op1, vint16m8_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vmsbc_vvm_i16m8_b2(op1, op2, borrowin, vl); +vbool2_t test_vmsbc_vvm_i16m8_b2(vint16m8_t vs2, vint16m8_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vmsbc_vvm_i16m8_b2(vs2, vs1, v0, vl); } -vbool2_t test_vmsbc_vxm_i16m8_b2(vint16m8_t op1, int16_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vmsbc_vxm_i16m8_b2(op1, op2, borrowin, vl); +vbool2_t test_vmsbc_vxm_i16m8_b2(vint16m8_t vs2, int16_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vmsbc_vxm_i16m8_b2(vs2, rs1, v0, vl); } -vbool2_t test_vmsbc_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmsbc_vv_i16m8_b2(op1, op2, vl); +vbool2_t test_vmsbc_vv_i16m8_b2(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmsbc_vv_i16m8_b2(vs2, vs1, vl); } -vbool2_t test_vmsbc_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmsbc_vx_i16m8_b2(op1, op2, vl); +vbool2_t test_vmsbc_vx_i16m8_b2(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsbc_vx_i16m8_b2(vs2, rs1, vl); } -vbool64_t test_vmsbc_vvm_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vmsbc_vvm_i32mf2_b64(op1, op2, borrowin, vl); +vbool64_t test_vmsbc_vvm_i32mf2_b64(vint32mf2_t vs2, vint32mf2_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmsbc_vvm_i32mf2_b64(vs2, vs1, v0, vl); } -vbool64_t test_vmsbc_vxm_i32mf2_b64(vint32mf2_t op1, int32_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vmsbc_vxm_i32mf2_b64(op1, op2, borrowin, vl); +vbool64_t test_vmsbc_vxm_i32mf2_b64(vint32mf2_t vs2, int32_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmsbc_vxm_i32mf2_b64(vs2, rs1, v0, vl); } -vbool64_t test_vmsbc_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmsbc_vv_i32mf2_b64(op1, op2, vl); +vbool64_t test_vmsbc_vv_i32mf2_b64(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmsbc_vv_i32mf2_b64(vs2, vs1, vl); } -vbool64_t test_vmsbc_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsbc_vx_i32mf2_b64(op1, op2, vl); +vbool64_t test_vmsbc_vx_i32mf2_b64(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsbc_vx_i32mf2_b64(vs2, rs1, vl); } -vbool32_t test_vmsbc_vvm_i32m1_b32(vint32m1_t op1, vint32m1_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vmsbc_vvm_i32m1_b32(op1, op2, borrowin, vl); +vbool32_t test_vmsbc_vvm_i32m1_b32(vint32m1_t vs2, vint32m1_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmsbc_vvm_i32m1_b32(vs2, vs1, v0, vl); } -vbool32_t test_vmsbc_vxm_i32m1_b32(vint32m1_t op1, int32_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vmsbc_vxm_i32m1_b32(op1, op2, borrowin, vl); +vbool32_t test_vmsbc_vxm_i32m1_b32(vint32m1_t vs2, int32_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmsbc_vxm_i32m1_b32(vs2, rs1, v0, vl); } -vbool32_t test_vmsbc_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmsbc_vv_i32m1_b32(op1, op2, vl); +vbool32_t test_vmsbc_vv_i32m1_b32(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmsbc_vv_i32m1_b32(vs2, vs1, vl); } -vbool32_t test_vmsbc_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmsbc_vx_i32m1_b32(op1, op2, vl); +vbool32_t test_vmsbc_vx_i32m1_b32(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsbc_vx_i32m1_b32(vs2, rs1, vl); } -vbool16_t test_vmsbc_vvm_i32m2_b16(vint32m2_t op1, vint32m2_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vmsbc_vvm_i32m2_b16(op1, op2, borrowin, vl); +vbool16_t test_vmsbc_vvm_i32m2_b16(vint32m2_t vs2, vint32m2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmsbc_vvm_i32m2_b16(vs2, vs1, v0, vl); } -vbool16_t test_vmsbc_vxm_i32m2_b16(vint32m2_t op1, int32_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vmsbc_vxm_i32m2_b16(op1, op2, borrowin, vl); +vbool16_t test_vmsbc_vxm_i32m2_b16(vint32m2_t vs2, int32_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmsbc_vxm_i32m2_b16(vs2, rs1, v0, vl); } -vbool16_t test_vmsbc_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmsbc_vv_i32m2_b16(op1, op2, vl); +vbool16_t test_vmsbc_vv_i32m2_b16(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmsbc_vv_i32m2_b16(vs2, vs1, vl); } -vbool16_t test_vmsbc_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsbc_vx_i32m2_b16(op1, op2, vl); +vbool16_t test_vmsbc_vx_i32m2_b16(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsbc_vx_i32m2_b16(vs2, rs1, vl); } -vbool8_t test_vmsbc_vvm_i32m4_b8(vint32m4_t op1, vint32m4_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vmsbc_vvm_i32m4_b8(op1, op2, borrowin, vl); +vbool8_t test_vmsbc_vvm_i32m4_b8(vint32m4_t vs2, vint32m4_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmsbc_vvm_i32m4_b8(vs2, vs1, v0, vl); } -vbool8_t test_vmsbc_vxm_i32m4_b8(vint32m4_t op1, int32_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vmsbc_vxm_i32m4_b8(op1, op2, borrowin, vl); +vbool8_t test_vmsbc_vxm_i32m4_b8(vint32m4_t vs2, int32_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmsbc_vxm_i32m4_b8(vs2, rs1, v0, vl); } -vbool8_t test_vmsbc_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmsbc_vv_i32m4_b8(op1, op2, vl); +vbool8_t test_vmsbc_vv_i32m4_b8(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmsbc_vv_i32m4_b8(vs2, vs1, vl); } -vbool8_t test_vmsbc_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmsbc_vx_i32m4_b8(op1, op2, vl); +vbool8_t test_vmsbc_vx_i32m4_b8(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsbc_vx_i32m4_b8(vs2, rs1, vl); } -vbool4_t test_vmsbc_vvm_i32m8_b4(vint32m8_t op1, vint32m8_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vmsbc_vvm_i32m8_b4(op1, op2, borrowin, vl); +vbool4_t test_vmsbc_vvm_i32m8_b4(vint32m8_t vs2, vint32m8_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmsbc_vvm_i32m8_b4(vs2, vs1, v0, vl); } -vbool4_t test_vmsbc_vxm_i32m8_b4(vint32m8_t op1, int32_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vmsbc_vxm_i32m8_b4(op1, op2, borrowin, vl); +vbool4_t test_vmsbc_vxm_i32m8_b4(vint32m8_t vs2, int32_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmsbc_vxm_i32m8_b4(vs2, rs1, v0, vl); } -vbool4_t test_vmsbc_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmsbc_vv_i32m8_b4(op1, op2, vl); +vbool4_t test_vmsbc_vv_i32m8_b4(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmsbc_vv_i32m8_b4(vs2, vs1, vl); } -vbool4_t test_vmsbc_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmsbc_vx_i32m8_b4(op1, op2, vl); +vbool4_t test_vmsbc_vx_i32m8_b4(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsbc_vx_i32m8_b4(vs2, rs1, vl); } -vbool64_t test_vmsbc_vvm_i64m1_b64(vint64m1_t op1, vint64m1_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vmsbc_vvm_i64m1_b64(op1, op2, borrowin, vl); +vbool64_t test_vmsbc_vvm_i64m1_b64(vint64m1_t vs2, vint64m1_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmsbc_vvm_i64m1_b64(vs2, vs1, v0, vl); } -vbool64_t test_vmsbc_vxm_i64m1_b64(vint64m1_t op1, int64_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vmsbc_vxm_i64m1_b64(op1, op2, borrowin, vl); +vbool64_t test_vmsbc_vxm_i64m1_b64(vint64m1_t vs2, int64_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmsbc_vxm_i64m1_b64(vs2, rs1, v0, vl); } -vbool64_t test_vmsbc_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmsbc_vv_i64m1_b64(op1, op2, vl); +vbool64_t test_vmsbc_vv_i64m1_b64(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmsbc_vv_i64m1_b64(vs2, vs1, vl); } -vbool64_t test_vmsbc_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmsbc_vx_i64m1_b64(op1, op2, vl); +vbool64_t test_vmsbc_vx_i64m1_b64(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsbc_vx_i64m1_b64(vs2, rs1, vl); } -vbool32_t test_vmsbc_vvm_i64m2_b32(vint64m2_t op1, vint64m2_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vmsbc_vvm_i64m2_b32(op1, op2, borrowin, vl); +vbool32_t test_vmsbc_vvm_i64m2_b32(vint64m2_t vs2, vint64m2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmsbc_vvm_i64m2_b32(vs2, vs1, v0, vl); } -vbool32_t test_vmsbc_vxm_i64m2_b32(vint64m2_t op1, int64_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vmsbc_vxm_i64m2_b32(op1, op2, borrowin, vl); +vbool32_t test_vmsbc_vxm_i64m2_b32(vint64m2_t vs2, int64_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmsbc_vxm_i64m2_b32(vs2, rs1, v0, vl); } -vbool32_t test_vmsbc_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmsbc_vv_i64m2_b32(op1, op2, vl); +vbool32_t test_vmsbc_vv_i64m2_b32(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmsbc_vv_i64m2_b32(vs2, vs1, vl); } -vbool32_t test_vmsbc_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmsbc_vx_i64m2_b32(op1, op2, vl); +vbool32_t test_vmsbc_vx_i64m2_b32(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsbc_vx_i64m2_b32(vs2, rs1, vl); } -vbool16_t test_vmsbc_vvm_i64m4_b16(vint64m4_t op1, vint64m4_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vmsbc_vvm_i64m4_b16(op1, op2, borrowin, vl); +vbool16_t test_vmsbc_vvm_i64m4_b16(vint64m4_t vs2, vint64m4_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmsbc_vvm_i64m4_b16(vs2, vs1, v0, vl); } -vbool16_t test_vmsbc_vxm_i64m4_b16(vint64m4_t op1, int64_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vmsbc_vxm_i64m4_b16(op1, op2, borrowin, vl); +vbool16_t test_vmsbc_vxm_i64m4_b16(vint64m4_t vs2, int64_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmsbc_vxm_i64m4_b16(vs2, rs1, v0, vl); } -vbool16_t test_vmsbc_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmsbc_vv_i64m4_b16(op1, op2, vl); +vbool16_t test_vmsbc_vv_i64m4_b16(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmsbc_vv_i64m4_b16(vs2, vs1, vl); } -vbool16_t test_vmsbc_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmsbc_vx_i64m4_b16(op1, op2, vl); +vbool16_t test_vmsbc_vx_i64m4_b16(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsbc_vx_i64m4_b16(vs2, rs1, vl); } -vbool8_t test_vmsbc_vvm_i64m8_b8(vint64m8_t op1, vint64m8_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vmsbc_vvm_i64m8_b8(op1, op2, borrowin, vl); +vbool8_t test_vmsbc_vvm_i64m8_b8(vint64m8_t vs2, vint64m8_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmsbc_vvm_i64m8_b8(vs2, vs1, v0, vl); } -vbool8_t test_vmsbc_vxm_i64m8_b8(vint64m8_t op1, int64_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vmsbc_vxm_i64m8_b8(op1, op2, borrowin, vl); +vbool8_t test_vmsbc_vxm_i64m8_b8(vint64m8_t vs2, int64_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmsbc_vxm_i64m8_b8(vs2, rs1, v0, vl); } -vbool8_t test_vmsbc_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmsbc_vv_i64m8_b8(op1, op2, vl); +vbool8_t test_vmsbc_vv_i64m8_b8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmsbc_vv_i64m8_b8(vs2, vs1, vl); } -vbool8_t test_vmsbc_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmsbc_vx_i64m8_b8(op1, op2, vl); +vbool8_t test_vmsbc_vx_i64m8_b8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsbc_vx_i64m8_b8(vs2, rs1, vl); } -vbool64_t test_vmsbc_vvm_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vmsbc_vvm_u8mf8_b64(op1, op2, borrowin, vl); +vbool64_t test_vmsbc_vvm_u8mf8_b64(vuint8mf8_t vs2, vuint8mf8_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmsbc_vvm_u8mf8_b64(vs2, vs1, v0, vl); } -vbool64_t test_vmsbc_vxm_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vmsbc_vxm_u8mf8_b64(op1, op2, borrowin, vl); +vbool64_t test_vmsbc_vxm_u8mf8_b64(vuint8mf8_t vs2, uint8_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmsbc_vxm_u8mf8_b64(vs2, rs1, v0, vl); } -vbool64_t test_vmsbc_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmsbc_vv_u8mf8_b64(op1, op2, vl); +vbool64_t test_vmsbc_vv_u8mf8_b64(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmsbc_vv_u8mf8_b64(vs2, vs1, vl); } -vbool64_t test_vmsbc_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsbc_vx_u8mf8_b64(op1, op2, vl); +vbool64_t test_vmsbc_vx_u8mf8_b64(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsbc_vx_u8mf8_b64(vs2, rs1, vl); } -vbool32_t test_vmsbc_vvm_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vmsbc_vvm_u8mf4_b32(op1, op2, borrowin, vl); +vbool32_t test_vmsbc_vvm_u8mf4_b32(vuint8mf4_t vs2, vuint8mf4_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmsbc_vvm_u8mf4_b32(vs2, vs1, v0, vl); } -vbool32_t test_vmsbc_vxm_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vmsbc_vxm_u8mf4_b32(op1, op2, borrowin, vl); +vbool32_t test_vmsbc_vxm_u8mf4_b32(vuint8mf4_t vs2, uint8_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmsbc_vxm_u8mf4_b32(vs2, rs1, v0, vl); } -vbool32_t test_vmsbc_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmsbc_vv_u8mf4_b32(op1, op2, vl); +vbool32_t test_vmsbc_vv_u8mf4_b32(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmsbc_vv_u8mf4_b32(vs2, vs1, vl); } -vbool32_t test_vmsbc_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsbc_vx_u8mf4_b32(op1, op2, vl); +vbool32_t test_vmsbc_vx_u8mf4_b32(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsbc_vx_u8mf4_b32(vs2, rs1, vl); } -vbool16_t test_vmsbc_vvm_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vmsbc_vvm_u8mf2_b16(op1, op2, borrowin, vl); +vbool16_t test_vmsbc_vvm_u8mf2_b16(vuint8mf2_t vs2, vuint8mf2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmsbc_vvm_u8mf2_b16(vs2, vs1, v0, vl); } -vbool16_t test_vmsbc_vxm_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vmsbc_vxm_u8mf2_b16(op1, op2, borrowin, vl); +vbool16_t test_vmsbc_vxm_u8mf2_b16(vuint8mf2_t vs2, uint8_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmsbc_vxm_u8mf2_b16(vs2, rs1, v0, vl); } -vbool16_t test_vmsbc_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmsbc_vv_u8mf2_b16(op1, op2, vl); +vbool16_t test_vmsbc_vv_u8mf2_b16(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmsbc_vv_u8mf2_b16(vs2, vs1, vl); } -vbool16_t test_vmsbc_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsbc_vx_u8mf2_b16(op1, op2, vl); +vbool16_t test_vmsbc_vx_u8mf2_b16(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsbc_vx_u8mf2_b16(vs2, rs1, vl); } -vbool8_t test_vmsbc_vvm_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vmsbc_vvm_u8m1_b8(op1, op2, borrowin, vl); +vbool8_t test_vmsbc_vvm_u8m1_b8(vuint8m1_t vs2, vuint8m1_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmsbc_vvm_u8m1_b8(vs2, vs1, v0, vl); } -vbool8_t test_vmsbc_vxm_u8m1_b8(vuint8m1_t op1, uint8_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vmsbc_vxm_u8m1_b8(op1, op2, borrowin, vl); +vbool8_t test_vmsbc_vxm_u8m1_b8(vuint8m1_t vs2, uint8_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmsbc_vxm_u8m1_b8(vs2, rs1, v0, vl); } -vbool8_t test_vmsbc_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmsbc_vv_u8m1_b8(op1, op2, vl); +vbool8_t test_vmsbc_vv_u8m1_b8(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmsbc_vv_u8m1_b8(vs2, vs1, vl); } -vbool8_t test_vmsbc_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsbc_vx_u8m1_b8(op1, op2, vl); +vbool8_t test_vmsbc_vx_u8m1_b8(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsbc_vx_u8m1_b8(vs2, rs1, vl); } -vbool4_t test_vmsbc_vvm_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vmsbc_vvm_u8m2_b4(op1, op2, borrowin, vl); +vbool4_t test_vmsbc_vvm_u8m2_b4(vuint8m2_t vs2, vuint8m2_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmsbc_vvm_u8m2_b4(vs2, vs1, v0, vl); } -vbool4_t test_vmsbc_vxm_u8m2_b4(vuint8m2_t op1, uint8_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vmsbc_vxm_u8m2_b4(op1, op2, borrowin, vl); +vbool4_t test_vmsbc_vxm_u8m2_b4(vuint8m2_t vs2, uint8_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmsbc_vxm_u8m2_b4(vs2, rs1, v0, vl); } -vbool4_t test_vmsbc_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmsbc_vv_u8m2_b4(op1, op2, vl); +vbool4_t test_vmsbc_vv_u8m2_b4(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmsbc_vv_u8m2_b4(vs2, vs1, vl); } -vbool4_t test_vmsbc_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsbc_vx_u8m2_b4(op1, op2, vl); +vbool4_t test_vmsbc_vx_u8m2_b4(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsbc_vx_u8m2_b4(vs2, rs1, vl); } -vbool2_t test_vmsbc_vvm_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vmsbc_vvm_u8m4_b2(op1, op2, borrowin, vl); +vbool2_t test_vmsbc_vvm_u8m4_b2(vuint8m4_t vs2, vuint8m4_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vmsbc_vvm_u8m4_b2(vs2, vs1, v0, vl); } -vbool2_t test_vmsbc_vxm_u8m4_b2(vuint8m4_t op1, uint8_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vmsbc_vxm_u8m4_b2(op1, op2, borrowin, vl); +vbool2_t test_vmsbc_vxm_u8m4_b2(vuint8m4_t vs2, uint8_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vmsbc_vxm_u8m4_b2(vs2, rs1, v0, vl); } -vbool2_t test_vmsbc_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmsbc_vv_u8m4_b2(op1, op2, vl); +vbool2_t test_vmsbc_vv_u8m4_b2(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmsbc_vv_u8m4_b2(vs2, vs1, vl); } -vbool2_t test_vmsbc_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsbc_vx_u8m4_b2(op1, op2, vl); +vbool2_t test_vmsbc_vx_u8m4_b2(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsbc_vx_u8m4_b2(vs2, rs1, vl); } -vbool1_t test_vmsbc_vvm_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, vbool1_t borrowin, size_t vl) { - return __riscv_vmsbc_vvm_u8m8_b1(op1, op2, borrowin, vl); +vbool1_t test_vmsbc_vvm_u8m8_b1(vuint8m8_t vs2, vuint8m8_t vs1, vbool1_t v0, size_t vl) { + return __riscv_vmsbc_vvm_u8m8_b1(vs2, vs1, v0, vl); } -vbool1_t test_vmsbc_vxm_u8m8_b1(vuint8m8_t op1, uint8_t op2, vbool1_t borrowin, size_t vl) { - return __riscv_vmsbc_vxm_u8m8_b1(op1, op2, borrowin, vl); +vbool1_t test_vmsbc_vxm_u8m8_b1(vuint8m8_t vs2, uint8_t rs1, vbool1_t v0, size_t vl) { + return __riscv_vmsbc_vxm_u8m8_b1(vs2, rs1, v0, vl); } -vbool1_t test_vmsbc_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmsbc_vv_u8m8_b1(op1, op2, vl); +vbool1_t test_vmsbc_vv_u8m8_b1(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmsbc_vv_u8m8_b1(vs2, vs1, vl); } -vbool1_t test_vmsbc_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsbc_vx_u8m8_b1(op1, op2, vl); +vbool1_t test_vmsbc_vx_u8m8_b1(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsbc_vx_u8m8_b1(vs2, rs1, vl); } -vbool64_t test_vmsbc_vvm_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vmsbc_vvm_u16mf4_b64(op1, op2, borrowin, vl); +vbool64_t test_vmsbc_vvm_u16mf4_b64(vuint16mf4_t vs2, vuint16mf4_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmsbc_vvm_u16mf4_b64(vs2, vs1, v0, vl); } -vbool64_t test_vmsbc_vxm_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vmsbc_vxm_u16mf4_b64(op1, op2, borrowin, vl); +vbool64_t test_vmsbc_vxm_u16mf4_b64(vuint16mf4_t vs2, uint16_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmsbc_vxm_u16mf4_b64(vs2, rs1, v0, vl); } -vbool64_t test_vmsbc_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmsbc_vv_u16mf4_b64(op1, op2, vl); +vbool64_t test_vmsbc_vv_u16mf4_b64(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmsbc_vv_u16mf4_b64(vs2, vs1, vl); } -vbool64_t test_vmsbc_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsbc_vx_u16mf4_b64(op1, op2, vl); +vbool64_t test_vmsbc_vx_u16mf4_b64(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsbc_vx_u16mf4_b64(vs2, rs1, vl); } -vbool32_t test_vmsbc_vvm_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vmsbc_vvm_u16mf2_b32(op1, op2, borrowin, vl); +vbool32_t test_vmsbc_vvm_u16mf2_b32(vuint16mf2_t vs2, vuint16mf2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmsbc_vvm_u16mf2_b32(vs2, vs1, v0, vl); } -vbool32_t test_vmsbc_vxm_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vmsbc_vxm_u16mf2_b32(op1, op2, borrowin, vl); +vbool32_t test_vmsbc_vxm_u16mf2_b32(vuint16mf2_t vs2, uint16_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmsbc_vxm_u16mf2_b32(vs2, rs1, v0, vl); } -vbool32_t test_vmsbc_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmsbc_vv_u16mf2_b32(op1, op2, vl); +vbool32_t test_vmsbc_vv_u16mf2_b32(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmsbc_vv_u16mf2_b32(vs2, vs1, vl); } -vbool32_t test_vmsbc_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsbc_vx_u16mf2_b32(op1, op2, vl); +vbool32_t test_vmsbc_vx_u16mf2_b32(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsbc_vx_u16mf2_b32(vs2, rs1, vl); } -vbool16_t test_vmsbc_vvm_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vmsbc_vvm_u16m1_b16(op1, op2, borrowin, vl); +vbool16_t test_vmsbc_vvm_u16m1_b16(vuint16m1_t vs2, vuint16m1_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmsbc_vvm_u16m1_b16(vs2, vs1, v0, vl); } -vbool16_t test_vmsbc_vxm_u16m1_b16(vuint16m1_t op1, uint16_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vmsbc_vxm_u16m1_b16(op1, op2, borrowin, vl); +vbool16_t test_vmsbc_vxm_u16m1_b16(vuint16m1_t vs2, uint16_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmsbc_vxm_u16m1_b16(vs2, rs1, v0, vl); } -vbool16_t test_vmsbc_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmsbc_vv_u16m1_b16(op1, op2, vl); +vbool16_t test_vmsbc_vv_u16m1_b16(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmsbc_vv_u16m1_b16(vs2, vs1, vl); } -vbool16_t test_vmsbc_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsbc_vx_u16m1_b16(op1, op2, vl); +vbool16_t test_vmsbc_vx_u16m1_b16(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsbc_vx_u16m1_b16(vs2, rs1, vl); } -vbool8_t test_vmsbc_vvm_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vmsbc_vvm_u16m2_b8(op1, op2, borrowin, vl); +vbool8_t test_vmsbc_vvm_u16m2_b8(vuint16m2_t vs2, vuint16m2_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmsbc_vvm_u16m2_b8(vs2, vs1, v0, vl); } -vbool8_t test_vmsbc_vxm_u16m2_b8(vuint16m2_t op1, uint16_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vmsbc_vxm_u16m2_b8(op1, op2, borrowin, vl); +vbool8_t test_vmsbc_vxm_u16m2_b8(vuint16m2_t vs2, uint16_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmsbc_vxm_u16m2_b8(vs2, rs1, v0, vl); } -vbool8_t test_vmsbc_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmsbc_vv_u16m2_b8(op1, op2, vl); +vbool8_t test_vmsbc_vv_u16m2_b8(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmsbc_vv_u16m2_b8(vs2, vs1, vl); } -vbool8_t test_vmsbc_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsbc_vx_u16m2_b8(op1, op2, vl); +vbool8_t test_vmsbc_vx_u16m2_b8(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsbc_vx_u16m2_b8(vs2, rs1, vl); } -vbool4_t test_vmsbc_vvm_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vmsbc_vvm_u16m4_b4(op1, op2, borrowin, vl); +vbool4_t test_vmsbc_vvm_u16m4_b4(vuint16m4_t vs2, vuint16m4_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmsbc_vvm_u16m4_b4(vs2, vs1, v0, vl); } -vbool4_t test_vmsbc_vxm_u16m4_b4(vuint16m4_t op1, uint16_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vmsbc_vxm_u16m4_b4(op1, op2, borrowin, vl); +vbool4_t test_vmsbc_vxm_u16m4_b4(vuint16m4_t vs2, uint16_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmsbc_vxm_u16m4_b4(vs2, rs1, v0, vl); } -vbool4_t test_vmsbc_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmsbc_vv_u16m4_b4(op1, op2, vl); +vbool4_t test_vmsbc_vv_u16m4_b4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmsbc_vv_u16m4_b4(vs2, vs1, vl); } -vbool4_t test_vmsbc_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsbc_vx_u16m4_b4(op1, op2, vl); +vbool4_t test_vmsbc_vx_u16m4_b4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsbc_vx_u16m4_b4(vs2, rs1, vl); } -vbool2_t test_vmsbc_vvm_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vmsbc_vvm_u16m8_b2(op1, op2, borrowin, vl); +vbool2_t test_vmsbc_vvm_u16m8_b2(vuint16m8_t vs2, vuint16m8_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vmsbc_vvm_u16m8_b2(vs2, vs1, v0, vl); } -vbool2_t test_vmsbc_vxm_u16m8_b2(vuint16m8_t op1, uint16_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vmsbc_vxm_u16m8_b2(op1, op2, borrowin, vl); +vbool2_t test_vmsbc_vxm_u16m8_b2(vuint16m8_t vs2, uint16_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vmsbc_vxm_u16m8_b2(vs2, rs1, v0, vl); } -vbool2_t test_vmsbc_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmsbc_vv_u16m8_b2(op1, op2, vl); +vbool2_t test_vmsbc_vv_u16m8_b2(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmsbc_vv_u16m8_b2(vs2, vs1, vl); } -vbool2_t test_vmsbc_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsbc_vx_u16m8_b2(op1, op2, vl); +vbool2_t test_vmsbc_vx_u16m8_b2(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsbc_vx_u16m8_b2(vs2, rs1, vl); } -vbool64_t test_vmsbc_vvm_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vmsbc_vvm_u32mf2_b64(op1, op2, borrowin, vl); +vbool64_t test_vmsbc_vvm_u32mf2_b64(vuint32mf2_t vs2, vuint32mf2_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmsbc_vvm_u32mf2_b64(vs2, vs1, v0, vl); } -vbool64_t test_vmsbc_vxm_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vmsbc_vxm_u32mf2_b64(op1, op2, borrowin, vl); +vbool64_t test_vmsbc_vxm_u32mf2_b64(vuint32mf2_t vs2, uint32_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmsbc_vxm_u32mf2_b64(vs2, rs1, v0, vl); } -vbool64_t test_vmsbc_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmsbc_vv_u32mf2_b64(op1, op2, vl); +vbool64_t test_vmsbc_vv_u32mf2_b64(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmsbc_vv_u32mf2_b64(vs2, vs1, vl); } -vbool64_t test_vmsbc_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsbc_vx_u32mf2_b64(op1, op2, vl); +vbool64_t test_vmsbc_vx_u32mf2_b64(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsbc_vx_u32mf2_b64(vs2, rs1, vl); } -vbool32_t test_vmsbc_vvm_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vmsbc_vvm_u32m1_b32(op1, op2, borrowin, vl); +vbool32_t test_vmsbc_vvm_u32m1_b32(vuint32m1_t vs2, vuint32m1_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmsbc_vvm_u32m1_b32(vs2, vs1, v0, vl); } -vbool32_t test_vmsbc_vxm_u32m1_b32(vuint32m1_t op1, uint32_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vmsbc_vxm_u32m1_b32(op1, op2, borrowin, vl); +vbool32_t test_vmsbc_vxm_u32m1_b32(vuint32m1_t vs2, uint32_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmsbc_vxm_u32m1_b32(vs2, rs1, v0, vl); } -vbool32_t test_vmsbc_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmsbc_vv_u32m1_b32(op1, op2, vl); +vbool32_t test_vmsbc_vv_u32m1_b32(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmsbc_vv_u32m1_b32(vs2, vs1, vl); } -vbool32_t test_vmsbc_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsbc_vx_u32m1_b32(op1, op2, vl); +vbool32_t test_vmsbc_vx_u32m1_b32(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsbc_vx_u32m1_b32(vs2, rs1, vl); } -vbool16_t test_vmsbc_vvm_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vmsbc_vvm_u32m2_b16(op1, op2, borrowin, vl); +vbool16_t test_vmsbc_vvm_u32m2_b16(vuint32m2_t vs2, vuint32m2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmsbc_vvm_u32m2_b16(vs2, vs1, v0, vl); } -vbool16_t test_vmsbc_vxm_u32m2_b16(vuint32m2_t op1, uint32_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vmsbc_vxm_u32m2_b16(op1, op2, borrowin, vl); +vbool16_t test_vmsbc_vxm_u32m2_b16(vuint32m2_t vs2, uint32_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmsbc_vxm_u32m2_b16(vs2, rs1, v0, vl); } -vbool16_t test_vmsbc_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmsbc_vv_u32m2_b16(op1, op2, vl); +vbool16_t test_vmsbc_vv_u32m2_b16(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmsbc_vv_u32m2_b16(vs2, vs1, vl); } -vbool16_t test_vmsbc_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsbc_vx_u32m2_b16(op1, op2, vl); +vbool16_t test_vmsbc_vx_u32m2_b16(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsbc_vx_u32m2_b16(vs2, rs1, vl); } -vbool8_t test_vmsbc_vvm_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vmsbc_vvm_u32m4_b8(op1, op2, borrowin, vl); +vbool8_t test_vmsbc_vvm_u32m4_b8(vuint32m4_t vs2, vuint32m4_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmsbc_vvm_u32m4_b8(vs2, vs1, v0, vl); } -vbool8_t test_vmsbc_vxm_u32m4_b8(vuint32m4_t op1, uint32_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vmsbc_vxm_u32m4_b8(op1, op2, borrowin, vl); +vbool8_t test_vmsbc_vxm_u32m4_b8(vuint32m4_t vs2, uint32_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmsbc_vxm_u32m4_b8(vs2, rs1, v0, vl); } -vbool8_t test_vmsbc_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmsbc_vv_u32m4_b8(op1, op2, vl); +vbool8_t test_vmsbc_vv_u32m4_b8(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmsbc_vv_u32m4_b8(vs2, vs1, vl); } -vbool8_t test_vmsbc_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsbc_vx_u32m4_b8(op1, op2, vl); +vbool8_t test_vmsbc_vx_u32m4_b8(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsbc_vx_u32m4_b8(vs2, rs1, vl); } -vbool4_t test_vmsbc_vvm_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vmsbc_vvm_u32m8_b4(op1, op2, borrowin, vl); +vbool4_t test_vmsbc_vvm_u32m8_b4(vuint32m8_t vs2, vuint32m8_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmsbc_vvm_u32m8_b4(vs2, vs1, v0, vl); } -vbool4_t test_vmsbc_vxm_u32m8_b4(vuint32m8_t op1, uint32_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vmsbc_vxm_u32m8_b4(op1, op2, borrowin, vl); +vbool4_t test_vmsbc_vxm_u32m8_b4(vuint32m8_t vs2, uint32_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmsbc_vxm_u32m8_b4(vs2, rs1, v0, vl); } -vbool4_t test_vmsbc_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmsbc_vv_u32m8_b4(op1, op2, vl); +vbool4_t test_vmsbc_vv_u32m8_b4(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmsbc_vv_u32m8_b4(vs2, vs1, vl); } -vbool4_t test_vmsbc_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsbc_vx_u32m8_b4(op1, op2, vl); +vbool4_t test_vmsbc_vx_u32m8_b4(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsbc_vx_u32m8_b4(vs2, rs1, vl); } -vbool64_t test_vmsbc_vvm_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vmsbc_vvm_u64m1_b64(op1, op2, borrowin, vl); +vbool64_t test_vmsbc_vvm_u64m1_b64(vuint64m1_t vs2, vuint64m1_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmsbc_vvm_u64m1_b64(vs2, vs1, v0, vl); } -vbool64_t test_vmsbc_vxm_u64m1_b64(vuint64m1_t op1, uint64_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vmsbc_vxm_u64m1_b64(op1, op2, borrowin, vl); +vbool64_t test_vmsbc_vxm_u64m1_b64(vuint64m1_t vs2, uint64_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmsbc_vxm_u64m1_b64(vs2, rs1, v0, vl); } -vbool64_t test_vmsbc_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmsbc_vv_u64m1_b64(op1, op2, vl); +vbool64_t test_vmsbc_vv_u64m1_b64(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmsbc_vv_u64m1_b64(vs2, vs1, vl); } -vbool64_t test_vmsbc_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsbc_vx_u64m1_b64(op1, op2, vl); +vbool64_t test_vmsbc_vx_u64m1_b64(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsbc_vx_u64m1_b64(vs2, rs1, vl); } -vbool32_t test_vmsbc_vvm_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vmsbc_vvm_u64m2_b32(op1, op2, borrowin, vl); +vbool32_t test_vmsbc_vvm_u64m2_b32(vuint64m2_t vs2, vuint64m2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmsbc_vvm_u64m2_b32(vs2, vs1, v0, vl); } -vbool32_t test_vmsbc_vxm_u64m2_b32(vuint64m2_t op1, uint64_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vmsbc_vxm_u64m2_b32(op1, op2, borrowin, vl); +vbool32_t test_vmsbc_vxm_u64m2_b32(vuint64m2_t vs2, uint64_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmsbc_vxm_u64m2_b32(vs2, rs1, v0, vl); } -vbool32_t test_vmsbc_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmsbc_vv_u64m2_b32(op1, op2, vl); +vbool32_t test_vmsbc_vv_u64m2_b32(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmsbc_vv_u64m2_b32(vs2, vs1, vl); } -vbool32_t test_vmsbc_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsbc_vx_u64m2_b32(op1, op2, vl); +vbool32_t test_vmsbc_vx_u64m2_b32(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsbc_vx_u64m2_b32(vs2, rs1, vl); } -vbool16_t test_vmsbc_vvm_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vmsbc_vvm_u64m4_b16(op1, op2, borrowin, vl); +vbool16_t test_vmsbc_vvm_u64m4_b16(vuint64m4_t vs2, vuint64m4_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmsbc_vvm_u64m4_b16(vs2, vs1, v0, vl); } -vbool16_t test_vmsbc_vxm_u64m4_b16(vuint64m4_t op1, uint64_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vmsbc_vxm_u64m4_b16(op1, op2, borrowin, vl); +vbool16_t test_vmsbc_vxm_u64m4_b16(vuint64m4_t vs2, uint64_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmsbc_vxm_u64m4_b16(vs2, rs1, v0, vl); } -vbool16_t test_vmsbc_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmsbc_vv_u64m4_b16(op1, op2, vl); +vbool16_t test_vmsbc_vv_u64m4_b16(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmsbc_vv_u64m4_b16(vs2, vs1, vl); } -vbool16_t test_vmsbc_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsbc_vx_u64m4_b16(op1, op2, vl); +vbool16_t test_vmsbc_vx_u64m4_b16(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsbc_vx_u64m4_b16(vs2, rs1, vl); } -vbool8_t test_vmsbc_vvm_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vmsbc_vvm_u64m8_b8(op1, op2, borrowin, vl); +vbool8_t test_vmsbc_vvm_u64m8_b8(vuint64m8_t vs2, vuint64m8_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmsbc_vvm_u64m8_b8(vs2, vs1, v0, vl); } -vbool8_t test_vmsbc_vxm_u64m8_b8(vuint64m8_t op1, uint64_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vmsbc_vxm_u64m8_b8(op1, op2, borrowin, vl); +vbool8_t test_vmsbc_vxm_u64m8_b8(vuint64m8_t vs2, uint64_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmsbc_vxm_u64m8_b8(vs2, rs1, v0, vl); } -vbool8_t test_vmsbc_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmsbc_vv_u64m8_b8(op1, op2, vl); +vbool8_t test_vmsbc_vv_u64m8_b8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmsbc_vv_u64m8_b8(vs2, vs1, vl); } -vbool8_t test_vmsbc_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsbc_vx_u64m8_b8(op1, op2, vl); +vbool8_t test_vmsbc_vx_u64m8_b8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsbc_vx_u64m8_b8(vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vmsbc\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/gnu-api-tests/vmsbf.c b/auto-generated/gnu-api-tests/vmsbf.c index 03693929f..0f3a66c43 100644 --- a/auto-generated/gnu-api-tests/vmsbf.c +++ b/auto-generated/gnu-api-tests/vmsbf.c @@ -6,60 +6,60 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool1_t test_vmsbf_m_b1(vbool1_t op1, size_t vl) { - return __riscv_vmsbf_m_b1(op1, vl); +vbool1_t test_vmsbf_m_b1(vbool1_t vs2, size_t vl) { + return __riscv_vmsbf_m_b1(vs2, vl); } -vbool2_t test_vmsbf_m_b2(vbool2_t op1, size_t vl) { - return __riscv_vmsbf_m_b2(op1, vl); +vbool2_t test_vmsbf_m_b2(vbool2_t vs2, size_t vl) { + return __riscv_vmsbf_m_b2(vs2, vl); } -vbool4_t test_vmsbf_m_b4(vbool4_t op1, size_t vl) { - return __riscv_vmsbf_m_b4(op1, vl); +vbool4_t test_vmsbf_m_b4(vbool4_t vs2, size_t vl) { + return __riscv_vmsbf_m_b4(vs2, vl); } -vbool8_t test_vmsbf_m_b8(vbool8_t op1, size_t vl) { - return __riscv_vmsbf_m_b8(op1, vl); +vbool8_t test_vmsbf_m_b8(vbool8_t vs2, size_t vl) { + return __riscv_vmsbf_m_b8(vs2, vl); } -vbool16_t test_vmsbf_m_b16(vbool16_t op1, size_t vl) { - return __riscv_vmsbf_m_b16(op1, vl); +vbool16_t test_vmsbf_m_b16(vbool16_t vs2, size_t vl) { + return __riscv_vmsbf_m_b16(vs2, vl); } -vbool32_t test_vmsbf_m_b32(vbool32_t op1, size_t vl) { - return __riscv_vmsbf_m_b32(op1, vl); +vbool32_t test_vmsbf_m_b32(vbool32_t vs2, size_t vl) { + return __riscv_vmsbf_m_b32(vs2, vl); } -vbool64_t test_vmsbf_m_b64(vbool64_t op1, size_t vl) { - return __riscv_vmsbf_m_b64(op1, vl); +vbool64_t test_vmsbf_m_b64(vbool64_t vs2, size_t vl) { + return __riscv_vmsbf_m_b64(vs2, vl); } -vbool1_t test_vmsbf_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) { - return __riscv_vmsbf_m_b1_m(mask, op1, vl); +vbool1_t test_vmsbf_m_b1_m(vbool1_t vm, vbool1_t vs2, size_t vl) { + return __riscv_vmsbf_m_b1_m(vm, vs2, vl); } -vbool2_t test_vmsbf_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) { - return __riscv_vmsbf_m_b2_m(mask, op1, vl); +vbool2_t test_vmsbf_m_b2_m(vbool2_t vm, vbool2_t vs2, size_t vl) { + return __riscv_vmsbf_m_b2_m(vm, vs2, vl); } -vbool4_t test_vmsbf_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) { - return __riscv_vmsbf_m_b4_m(mask, op1, vl); +vbool4_t test_vmsbf_m_b4_m(vbool4_t vm, vbool4_t vs2, size_t vl) { + return __riscv_vmsbf_m_b4_m(vm, vs2, vl); } -vbool8_t test_vmsbf_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) { - return __riscv_vmsbf_m_b8_m(mask, op1, vl); +vbool8_t test_vmsbf_m_b8_m(vbool8_t vm, vbool8_t vs2, size_t vl) { + return __riscv_vmsbf_m_b8_m(vm, vs2, vl); } -vbool16_t test_vmsbf_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) { - return __riscv_vmsbf_m_b16_m(mask, op1, vl); +vbool16_t test_vmsbf_m_b16_m(vbool16_t vm, vbool16_t vs2, size_t vl) { + return __riscv_vmsbf_m_b16_m(vm, vs2, vl); } -vbool32_t test_vmsbf_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) { - return __riscv_vmsbf_m_b32_m(mask, op1, vl); +vbool32_t test_vmsbf_m_b32_m(vbool32_t vm, vbool32_t vs2, size_t vl) { + return __riscv_vmsbf_m_b32_m(vm, vs2, vl); } -vbool64_t test_vmsbf_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) { - return __riscv_vmsbf_m_b64_m(mask, op1, vl); +vbool64_t test_vmsbf_m_b64_m(vbool64_t vm, vbool64_t vs2, size_t vl) { + return __riscv_vmsbf_m_b64_m(vm, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmsbf\.[ivxfswum.]+\s+} 14 } } */ diff --git a/auto-generated/gnu-api-tests/vmseq.c b/auto-generated/gnu-api-tests/vmseq.c index 162a0df98..d939da0a4 100644 --- a/auto-generated/gnu-api-tests/vmseq.c +++ b/auto-generated/gnu-api-tests/vmseq.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmseq_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmseq_vv_i8mf8_b64(op1, op2, vl); +vbool64_t test_vmseq_vv_i8mf8_b64(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmseq_vv_i8mf8_b64(vs2, vs1, vl); } -vbool64_t test_vmseq_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmseq_vx_i8mf8_b64(op1, op2, vl); +vbool64_t test_vmseq_vx_i8mf8_b64(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmseq_vx_i8mf8_b64(vs2, rs1, vl); } -vbool32_t test_vmseq_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmseq_vv_i8mf4_b32(op1, op2, vl); +vbool32_t test_vmseq_vv_i8mf4_b32(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmseq_vv_i8mf4_b32(vs2, vs1, vl); } -vbool32_t test_vmseq_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmseq_vx_i8mf4_b32(op1, op2, vl); +vbool32_t test_vmseq_vx_i8mf4_b32(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmseq_vx_i8mf4_b32(vs2, rs1, vl); } -vbool16_t test_vmseq_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmseq_vv_i8mf2_b16(op1, op2, vl); +vbool16_t test_vmseq_vv_i8mf2_b16(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmseq_vv_i8mf2_b16(vs2, vs1, vl); } -vbool16_t test_vmseq_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmseq_vx_i8mf2_b16(op1, op2, vl); +vbool16_t test_vmseq_vx_i8mf2_b16(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmseq_vx_i8mf2_b16(vs2, rs1, vl); } -vbool8_t test_vmseq_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmseq_vv_i8m1_b8(op1, op2, vl); +vbool8_t test_vmseq_vv_i8m1_b8(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmseq_vv_i8m1_b8(vs2, vs1, vl); } -vbool8_t test_vmseq_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmseq_vx_i8m1_b8(op1, op2, vl); +vbool8_t test_vmseq_vx_i8m1_b8(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmseq_vx_i8m1_b8(vs2, rs1, vl); } -vbool4_t test_vmseq_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmseq_vv_i8m2_b4(op1, op2, vl); +vbool4_t test_vmseq_vv_i8m2_b4(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmseq_vv_i8m2_b4(vs2, vs1, vl); } -vbool4_t test_vmseq_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmseq_vx_i8m2_b4(op1, op2, vl); +vbool4_t test_vmseq_vx_i8m2_b4(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmseq_vx_i8m2_b4(vs2, rs1, vl); } -vbool2_t test_vmseq_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmseq_vv_i8m4_b2(op1, op2, vl); +vbool2_t test_vmseq_vv_i8m4_b2(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmseq_vv_i8m4_b2(vs2, vs1, vl); } -vbool2_t test_vmseq_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmseq_vx_i8m4_b2(op1, op2, vl); +vbool2_t test_vmseq_vx_i8m4_b2(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmseq_vx_i8m4_b2(vs2, rs1, vl); } -vbool1_t test_vmseq_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmseq_vv_i8m8_b1(op1, op2, vl); +vbool1_t test_vmseq_vv_i8m8_b1(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmseq_vv_i8m8_b1(vs2, vs1, vl); } -vbool1_t test_vmseq_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmseq_vx_i8m8_b1(op1, op2, vl); +vbool1_t test_vmseq_vx_i8m8_b1(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmseq_vx_i8m8_b1(vs2, rs1, vl); } -vbool64_t test_vmseq_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmseq_vv_i16mf4_b64(op1, op2, vl); +vbool64_t test_vmseq_vv_i16mf4_b64(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmseq_vv_i16mf4_b64(vs2, vs1, vl); } -vbool64_t test_vmseq_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmseq_vx_i16mf4_b64(op1, op2, vl); +vbool64_t test_vmseq_vx_i16mf4_b64(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmseq_vx_i16mf4_b64(vs2, rs1, vl); } -vbool32_t test_vmseq_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmseq_vv_i16mf2_b32(op1, op2, vl); +vbool32_t test_vmseq_vv_i16mf2_b32(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmseq_vv_i16mf2_b32(vs2, vs1, vl); } -vbool32_t test_vmseq_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmseq_vx_i16mf2_b32(op1, op2, vl); +vbool32_t test_vmseq_vx_i16mf2_b32(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmseq_vx_i16mf2_b32(vs2, rs1, vl); } -vbool16_t test_vmseq_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmseq_vv_i16m1_b16(op1, op2, vl); +vbool16_t test_vmseq_vv_i16m1_b16(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmseq_vv_i16m1_b16(vs2, vs1, vl); } -vbool16_t test_vmseq_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmseq_vx_i16m1_b16(op1, op2, vl); +vbool16_t test_vmseq_vx_i16m1_b16(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmseq_vx_i16m1_b16(vs2, rs1, vl); } -vbool8_t test_vmseq_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmseq_vv_i16m2_b8(op1, op2, vl); +vbool8_t test_vmseq_vv_i16m2_b8(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmseq_vv_i16m2_b8(vs2, vs1, vl); } -vbool8_t test_vmseq_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmseq_vx_i16m2_b8(op1, op2, vl); +vbool8_t test_vmseq_vx_i16m2_b8(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmseq_vx_i16m2_b8(vs2, rs1, vl); } -vbool4_t test_vmseq_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmseq_vv_i16m4_b4(op1, op2, vl); +vbool4_t test_vmseq_vv_i16m4_b4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmseq_vv_i16m4_b4(vs2, vs1, vl); } -vbool4_t test_vmseq_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmseq_vx_i16m4_b4(op1, op2, vl); +vbool4_t test_vmseq_vx_i16m4_b4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmseq_vx_i16m4_b4(vs2, rs1, vl); } -vbool2_t test_vmseq_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmseq_vv_i16m8_b2(op1, op2, vl); +vbool2_t test_vmseq_vv_i16m8_b2(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmseq_vv_i16m8_b2(vs2, vs1, vl); } -vbool2_t test_vmseq_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmseq_vx_i16m8_b2(op1, op2, vl); +vbool2_t test_vmseq_vx_i16m8_b2(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmseq_vx_i16m8_b2(vs2, rs1, vl); } -vbool64_t test_vmseq_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmseq_vv_i32mf2_b64(op1, op2, vl); +vbool64_t test_vmseq_vv_i32mf2_b64(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmseq_vv_i32mf2_b64(vs2, vs1, vl); } -vbool64_t test_vmseq_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmseq_vx_i32mf2_b64(op1, op2, vl); +vbool64_t test_vmseq_vx_i32mf2_b64(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmseq_vx_i32mf2_b64(vs2, rs1, vl); } -vbool32_t test_vmseq_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmseq_vv_i32m1_b32(op1, op2, vl); +vbool32_t test_vmseq_vv_i32m1_b32(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmseq_vv_i32m1_b32(vs2, vs1, vl); } -vbool32_t test_vmseq_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmseq_vx_i32m1_b32(op1, op2, vl); +vbool32_t test_vmseq_vx_i32m1_b32(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmseq_vx_i32m1_b32(vs2, rs1, vl); } -vbool16_t test_vmseq_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmseq_vv_i32m2_b16(op1, op2, vl); +vbool16_t test_vmseq_vv_i32m2_b16(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmseq_vv_i32m2_b16(vs2, vs1, vl); } -vbool16_t test_vmseq_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmseq_vx_i32m2_b16(op1, op2, vl); +vbool16_t test_vmseq_vx_i32m2_b16(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmseq_vx_i32m2_b16(vs2, rs1, vl); } -vbool8_t test_vmseq_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmseq_vv_i32m4_b8(op1, op2, vl); +vbool8_t test_vmseq_vv_i32m4_b8(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmseq_vv_i32m4_b8(vs2, vs1, vl); } -vbool8_t test_vmseq_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmseq_vx_i32m4_b8(op1, op2, vl); +vbool8_t test_vmseq_vx_i32m4_b8(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmseq_vx_i32m4_b8(vs2, rs1, vl); } -vbool4_t test_vmseq_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmseq_vv_i32m8_b4(op1, op2, vl); +vbool4_t test_vmseq_vv_i32m8_b4(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmseq_vv_i32m8_b4(vs2, vs1, vl); } -vbool4_t test_vmseq_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmseq_vx_i32m8_b4(op1, op2, vl); +vbool4_t test_vmseq_vx_i32m8_b4(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmseq_vx_i32m8_b4(vs2, rs1, vl); } -vbool64_t test_vmseq_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmseq_vv_i64m1_b64(op1, op2, vl); +vbool64_t test_vmseq_vv_i64m1_b64(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmseq_vv_i64m1_b64(vs2, vs1, vl); } -vbool64_t test_vmseq_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmseq_vx_i64m1_b64(op1, op2, vl); +vbool64_t test_vmseq_vx_i64m1_b64(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmseq_vx_i64m1_b64(vs2, rs1, vl); } -vbool32_t test_vmseq_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmseq_vv_i64m2_b32(op1, op2, vl); +vbool32_t test_vmseq_vv_i64m2_b32(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmseq_vv_i64m2_b32(vs2, vs1, vl); } -vbool32_t test_vmseq_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmseq_vx_i64m2_b32(op1, op2, vl); +vbool32_t test_vmseq_vx_i64m2_b32(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmseq_vx_i64m2_b32(vs2, rs1, vl); } -vbool16_t test_vmseq_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmseq_vv_i64m4_b16(op1, op2, vl); +vbool16_t test_vmseq_vv_i64m4_b16(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmseq_vv_i64m4_b16(vs2, vs1, vl); } -vbool16_t test_vmseq_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmseq_vx_i64m4_b16(op1, op2, vl); +vbool16_t test_vmseq_vx_i64m4_b16(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmseq_vx_i64m4_b16(vs2, rs1, vl); } -vbool8_t test_vmseq_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmseq_vv_i64m8_b8(op1, op2, vl); +vbool8_t test_vmseq_vv_i64m8_b8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmseq_vv_i64m8_b8(vs2, vs1, vl); } -vbool8_t test_vmseq_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmseq_vx_i64m8_b8(op1, op2, vl); +vbool8_t test_vmseq_vx_i64m8_b8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmseq_vx_i64m8_b8(vs2, rs1, vl); } -vbool64_t test_vmseq_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmseq_vv_u8mf8_b64(op1, op2, vl); +vbool64_t test_vmseq_vv_u8mf8_b64(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmseq_vv_u8mf8_b64(vs2, vs1, vl); } -vbool64_t test_vmseq_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmseq_vx_u8mf8_b64(op1, op2, vl); +vbool64_t test_vmseq_vx_u8mf8_b64(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmseq_vx_u8mf8_b64(vs2, rs1, vl); } -vbool32_t test_vmseq_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmseq_vv_u8mf4_b32(op1, op2, vl); +vbool32_t test_vmseq_vv_u8mf4_b32(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmseq_vv_u8mf4_b32(vs2, vs1, vl); } -vbool32_t test_vmseq_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmseq_vx_u8mf4_b32(op1, op2, vl); +vbool32_t test_vmseq_vx_u8mf4_b32(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmseq_vx_u8mf4_b32(vs2, rs1, vl); } -vbool16_t test_vmseq_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmseq_vv_u8mf2_b16(op1, op2, vl); +vbool16_t test_vmseq_vv_u8mf2_b16(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmseq_vv_u8mf2_b16(vs2, vs1, vl); } -vbool16_t test_vmseq_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmseq_vx_u8mf2_b16(op1, op2, vl); +vbool16_t test_vmseq_vx_u8mf2_b16(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmseq_vx_u8mf2_b16(vs2, rs1, vl); } -vbool8_t test_vmseq_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmseq_vv_u8m1_b8(op1, op2, vl); +vbool8_t test_vmseq_vv_u8m1_b8(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmseq_vv_u8m1_b8(vs2, vs1, vl); } -vbool8_t test_vmseq_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmseq_vx_u8m1_b8(op1, op2, vl); +vbool8_t test_vmseq_vx_u8m1_b8(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmseq_vx_u8m1_b8(vs2, rs1, vl); } -vbool4_t test_vmseq_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmseq_vv_u8m2_b4(op1, op2, vl); +vbool4_t test_vmseq_vv_u8m2_b4(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmseq_vv_u8m2_b4(vs2, vs1, vl); } -vbool4_t test_vmseq_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmseq_vx_u8m2_b4(op1, op2, vl); +vbool4_t test_vmseq_vx_u8m2_b4(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmseq_vx_u8m2_b4(vs2, rs1, vl); } -vbool2_t test_vmseq_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmseq_vv_u8m4_b2(op1, op2, vl); +vbool2_t test_vmseq_vv_u8m4_b2(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmseq_vv_u8m4_b2(vs2, vs1, vl); } -vbool2_t test_vmseq_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmseq_vx_u8m4_b2(op1, op2, vl); +vbool2_t test_vmseq_vx_u8m4_b2(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmseq_vx_u8m4_b2(vs2, rs1, vl); } -vbool1_t test_vmseq_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmseq_vv_u8m8_b1(op1, op2, vl); +vbool1_t test_vmseq_vv_u8m8_b1(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmseq_vv_u8m8_b1(vs2, vs1, vl); } -vbool1_t test_vmseq_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmseq_vx_u8m8_b1(op1, op2, vl); +vbool1_t test_vmseq_vx_u8m8_b1(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmseq_vx_u8m8_b1(vs2, rs1, vl); } -vbool64_t test_vmseq_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmseq_vv_u16mf4_b64(op1, op2, vl); +vbool64_t test_vmseq_vv_u16mf4_b64(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmseq_vv_u16mf4_b64(vs2, vs1, vl); } -vbool64_t test_vmseq_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmseq_vx_u16mf4_b64(op1, op2, vl); +vbool64_t test_vmseq_vx_u16mf4_b64(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmseq_vx_u16mf4_b64(vs2, rs1, vl); } -vbool32_t test_vmseq_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmseq_vv_u16mf2_b32(op1, op2, vl); +vbool32_t test_vmseq_vv_u16mf2_b32(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmseq_vv_u16mf2_b32(vs2, vs1, vl); } -vbool32_t test_vmseq_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmseq_vx_u16mf2_b32(op1, op2, vl); +vbool32_t test_vmseq_vx_u16mf2_b32(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmseq_vx_u16mf2_b32(vs2, rs1, vl); } -vbool16_t test_vmseq_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmseq_vv_u16m1_b16(op1, op2, vl); +vbool16_t test_vmseq_vv_u16m1_b16(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmseq_vv_u16m1_b16(vs2, vs1, vl); } -vbool16_t test_vmseq_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmseq_vx_u16m1_b16(op1, op2, vl); +vbool16_t test_vmseq_vx_u16m1_b16(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmseq_vx_u16m1_b16(vs2, rs1, vl); } -vbool8_t test_vmseq_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmseq_vv_u16m2_b8(op1, op2, vl); +vbool8_t test_vmseq_vv_u16m2_b8(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmseq_vv_u16m2_b8(vs2, vs1, vl); } -vbool8_t test_vmseq_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmseq_vx_u16m2_b8(op1, op2, vl); +vbool8_t test_vmseq_vx_u16m2_b8(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmseq_vx_u16m2_b8(vs2, rs1, vl); } -vbool4_t test_vmseq_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmseq_vv_u16m4_b4(op1, op2, vl); +vbool4_t test_vmseq_vv_u16m4_b4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmseq_vv_u16m4_b4(vs2, vs1, vl); } -vbool4_t test_vmseq_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmseq_vx_u16m4_b4(op1, op2, vl); +vbool4_t test_vmseq_vx_u16m4_b4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmseq_vx_u16m4_b4(vs2, rs1, vl); } -vbool2_t test_vmseq_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmseq_vv_u16m8_b2(op1, op2, vl); +vbool2_t test_vmseq_vv_u16m8_b2(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmseq_vv_u16m8_b2(vs2, vs1, vl); } -vbool2_t test_vmseq_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmseq_vx_u16m8_b2(op1, op2, vl); +vbool2_t test_vmseq_vx_u16m8_b2(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmseq_vx_u16m8_b2(vs2, rs1, vl); } -vbool64_t test_vmseq_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmseq_vv_u32mf2_b64(op1, op2, vl); +vbool64_t test_vmseq_vv_u32mf2_b64(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmseq_vv_u32mf2_b64(vs2, vs1, vl); } -vbool64_t test_vmseq_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmseq_vx_u32mf2_b64(op1, op2, vl); +vbool64_t test_vmseq_vx_u32mf2_b64(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmseq_vx_u32mf2_b64(vs2, rs1, vl); } -vbool32_t test_vmseq_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmseq_vv_u32m1_b32(op1, op2, vl); +vbool32_t test_vmseq_vv_u32m1_b32(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmseq_vv_u32m1_b32(vs2, vs1, vl); } -vbool32_t test_vmseq_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmseq_vx_u32m1_b32(op1, op2, vl); +vbool32_t test_vmseq_vx_u32m1_b32(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmseq_vx_u32m1_b32(vs2, rs1, vl); } -vbool16_t test_vmseq_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmseq_vv_u32m2_b16(op1, op2, vl); +vbool16_t test_vmseq_vv_u32m2_b16(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmseq_vv_u32m2_b16(vs2, vs1, vl); } -vbool16_t test_vmseq_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmseq_vx_u32m2_b16(op1, op2, vl); +vbool16_t test_vmseq_vx_u32m2_b16(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmseq_vx_u32m2_b16(vs2, rs1, vl); } -vbool8_t test_vmseq_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmseq_vv_u32m4_b8(op1, op2, vl); +vbool8_t test_vmseq_vv_u32m4_b8(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmseq_vv_u32m4_b8(vs2, vs1, vl); } -vbool8_t test_vmseq_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmseq_vx_u32m4_b8(op1, op2, vl); +vbool8_t test_vmseq_vx_u32m4_b8(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmseq_vx_u32m4_b8(vs2, rs1, vl); } -vbool4_t test_vmseq_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmseq_vv_u32m8_b4(op1, op2, vl); +vbool4_t test_vmseq_vv_u32m8_b4(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmseq_vv_u32m8_b4(vs2, vs1, vl); } -vbool4_t test_vmseq_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmseq_vx_u32m8_b4(op1, op2, vl); +vbool4_t test_vmseq_vx_u32m8_b4(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmseq_vx_u32m8_b4(vs2, rs1, vl); } -vbool64_t test_vmseq_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmseq_vv_u64m1_b64(op1, op2, vl); +vbool64_t test_vmseq_vv_u64m1_b64(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmseq_vv_u64m1_b64(vs2, vs1, vl); } -vbool64_t test_vmseq_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmseq_vx_u64m1_b64(op1, op2, vl); +vbool64_t test_vmseq_vx_u64m1_b64(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmseq_vx_u64m1_b64(vs2, rs1, vl); } -vbool32_t test_vmseq_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmseq_vv_u64m2_b32(op1, op2, vl); +vbool32_t test_vmseq_vv_u64m2_b32(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmseq_vv_u64m2_b32(vs2, vs1, vl); } -vbool32_t test_vmseq_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmseq_vx_u64m2_b32(op1, op2, vl); +vbool32_t test_vmseq_vx_u64m2_b32(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmseq_vx_u64m2_b32(vs2, rs1, vl); } -vbool16_t test_vmseq_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmseq_vv_u64m4_b16(op1, op2, vl); +vbool16_t test_vmseq_vv_u64m4_b16(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmseq_vv_u64m4_b16(vs2, vs1, vl); } -vbool16_t test_vmseq_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmseq_vx_u64m4_b16(op1, op2, vl); +vbool16_t test_vmseq_vx_u64m4_b16(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmseq_vx_u64m4_b16(vs2, rs1, vl); } -vbool8_t test_vmseq_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmseq_vv_u64m8_b8(op1, op2, vl); +vbool8_t test_vmseq_vv_u64m8_b8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmseq_vv_u64m8_b8(vs2, vs1, vl); } -vbool8_t test_vmseq_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmseq_vx_u64m8_b8(op1, op2, vl); +vbool8_t test_vmseq_vx_u64m8_b8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmseq_vx_u64m8_b8(vs2, rs1, vl); } -vbool64_t test_vmseq_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmseq_vv_i8mf8_b64_m(mask, op1, op2, vl); +vbool64_t test_vmseq_vv_i8mf8_b64_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmseq_vv_i8mf8_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmseq_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmseq_vx_i8mf8_b64_m(mask, op1, op2, vl); +vbool64_t test_vmseq_vx_i8mf8_b64_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmseq_vx_i8mf8_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmseq_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmseq_vv_i8mf4_b32_m(mask, op1, op2, vl); +vbool32_t test_vmseq_vv_i8mf4_b32_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmseq_vv_i8mf4_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmseq_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmseq_vx_i8mf4_b32_m(mask, op1, op2, vl); +vbool32_t test_vmseq_vx_i8mf4_b32_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmseq_vx_i8mf4_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmseq_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmseq_vv_i8mf2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmseq_vv_i8mf2_b16_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmseq_vv_i8mf2_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmseq_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmseq_vx_i8mf2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmseq_vx_i8mf2_b16_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmseq_vx_i8mf2_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmseq_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmseq_vv_i8m1_b8_m(mask, op1, op2, vl); +vbool8_t test_vmseq_vv_i8m1_b8_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmseq_vv_i8m1_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmseq_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmseq_vx_i8m1_b8_m(mask, op1, op2, vl); +vbool8_t test_vmseq_vx_i8m1_b8_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmseq_vx_i8m1_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmseq_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmseq_vv_i8m2_b4_m(mask, op1, op2, vl); +vbool4_t test_vmseq_vv_i8m2_b4_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmseq_vv_i8m2_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmseq_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmseq_vx_i8m2_b4_m(mask, op1, op2, vl); +vbool4_t test_vmseq_vx_i8m2_b4_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmseq_vx_i8m2_b4_m(vm, vs2, rs1, vl); } -vbool2_t test_vmseq_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmseq_vv_i8m4_b2_m(mask, op1, op2, vl); +vbool2_t test_vmseq_vv_i8m4_b2_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmseq_vv_i8m4_b2_m(vm, vs2, vs1, vl); } -vbool2_t test_vmseq_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmseq_vx_i8m4_b2_m(mask, op1, op2, vl); +vbool2_t test_vmseq_vx_i8m4_b2_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmseq_vx_i8m4_b2_m(vm, vs2, rs1, vl); } -vbool1_t test_vmseq_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmseq_vv_i8m8_b1_m(mask, op1, op2, vl); +vbool1_t test_vmseq_vv_i8m8_b1_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmseq_vv_i8m8_b1_m(vm, vs2, vs1, vl); } -vbool1_t test_vmseq_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmseq_vx_i8m8_b1_m(mask, op1, op2, vl); +vbool1_t test_vmseq_vx_i8m8_b1_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmseq_vx_i8m8_b1_m(vm, vs2, rs1, vl); } -vbool64_t test_vmseq_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmseq_vv_i16mf4_b64_m(mask, op1, op2, vl); +vbool64_t test_vmseq_vv_i16mf4_b64_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmseq_vv_i16mf4_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmseq_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmseq_vx_i16mf4_b64_m(mask, op1, op2, vl); +vbool64_t test_vmseq_vx_i16mf4_b64_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmseq_vx_i16mf4_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmseq_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmseq_vv_i16mf2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmseq_vv_i16mf2_b32_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmseq_vv_i16mf2_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmseq_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmseq_vx_i16mf2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmseq_vx_i16mf2_b32_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmseq_vx_i16mf2_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmseq_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmseq_vv_i16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmseq_vv_i16m1_b16_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmseq_vv_i16m1_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmseq_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmseq_vx_i16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmseq_vx_i16m1_b16_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmseq_vx_i16m1_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmseq_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmseq_vv_i16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmseq_vv_i16m2_b8_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmseq_vv_i16m2_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmseq_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmseq_vx_i16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmseq_vx_i16m2_b8_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmseq_vx_i16m2_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmseq_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmseq_vv_i16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmseq_vv_i16m4_b4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmseq_vv_i16m4_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmseq_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmseq_vx_i16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmseq_vx_i16m4_b4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmseq_vx_i16m4_b4_m(vm, vs2, rs1, vl); } -vbool2_t test_vmseq_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmseq_vv_i16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmseq_vv_i16m8_b2_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmseq_vv_i16m8_b2_m(vm, vs2, vs1, vl); } -vbool2_t test_vmseq_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmseq_vx_i16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmseq_vx_i16m8_b2_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmseq_vx_i16m8_b2_m(vm, vs2, rs1, vl); } -vbool64_t test_vmseq_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmseq_vv_i32mf2_b64_m(mask, op1, op2, vl); +vbool64_t test_vmseq_vv_i32mf2_b64_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmseq_vv_i32mf2_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmseq_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmseq_vx_i32mf2_b64_m(mask, op1, op2, vl); +vbool64_t test_vmseq_vx_i32mf2_b64_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmseq_vx_i32mf2_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmseq_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmseq_vv_i32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmseq_vv_i32m1_b32_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmseq_vv_i32m1_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmseq_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmseq_vx_i32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmseq_vx_i32m1_b32_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmseq_vx_i32m1_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmseq_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmseq_vv_i32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmseq_vv_i32m2_b16_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmseq_vv_i32m2_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmseq_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmseq_vx_i32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmseq_vx_i32m2_b16_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmseq_vx_i32m2_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmseq_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmseq_vv_i32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmseq_vv_i32m4_b8_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmseq_vv_i32m4_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmseq_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmseq_vx_i32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmseq_vx_i32m4_b8_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmseq_vx_i32m4_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmseq_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmseq_vv_i32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmseq_vv_i32m8_b4_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmseq_vv_i32m8_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmseq_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmseq_vx_i32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmseq_vx_i32m8_b4_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmseq_vx_i32m8_b4_m(vm, vs2, rs1, vl); } -vbool64_t test_vmseq_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmseq_vv_i64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmseq_vv_i64m1_b64_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmseq_vv_i64m1_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmseq_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmseq_vx_i64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmseq_vx_i64m1_b64_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmseq_vx_i64m1_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmseq_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmseq_vv_i64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmseq_vv_i64m2_b32_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmseq_vv_i64m2_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmseq_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmseq_vx_i64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmseq_vx_i64m2_b32_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmseq_vx_i64m2_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmseq_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmseq_vv_i64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmseq_vv_i64m4_b16_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmseq_vv_i64m4_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmseq_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmseq_vx_i64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmseq_vx_i64m4_b16_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmseq_vx_i64m4_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmseq_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmseq_vv_i64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmseq_vv_i64m8_b8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmseq_vv_i64m8_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmseq_vx_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmseq_vx_i64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmseq_vx_i64m8_b8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmseq_vx_i64m8_b8_m(vm, vs2, rs1, vl); } -vbool64_t test_vmseq_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmseq_vv_u8mf8_b64_m(mask, op1, op2, vl); +vbool64_t test_vmseq_vv_u8mf8_b64_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmseq_vv_u8mf8_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmseq_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmseq_vx_u8mf8_b64_m(mask, op1, op2, vl); +vbool64_t test_vmseq_vx_u8mf8_b64_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmseq_vx_u8mf8_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmseq_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmseq_vv_u8mf4_b32_m(mask, op1, op2, vl); +vbool32_t test_vmseq_vv_u8mf4_b32_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmseq_vv_u8mf4_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmseq_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmseq_vx_u8mf4_b32_m(mask, op1, op2, vl); +vbool32_t test_vmseq_vx_u8mf4_b32_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmseq_vx_u8mf4_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmseq_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmseq_vv_u8mf2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmseq_vv_u8mf2_b16_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmseq_vv_u8mf2_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmseq_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmseq_vx_u8mf2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmseq_vx_u8mf2_b16_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmseq_vx_u8mf2_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmseq_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmseq_vv_u8m1_b8_m(mask, op1, op2, vl); +vbool8_t test_vmseq_vv_u8m1_b8_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmseq_vv_u8m1_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmseq_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmseq_vx_u8m1_b8_m(mask, op1, op2, vl); +vbool8_t test_vmseq_vx_u8m1_b8_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmseq_vx_u8m1_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmseq_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmseq_vv_u8m2_b4_m(mask, op1, op2, vl); +vbool4_t test_vmseq_vv_u8m2_b4_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmseq_vv_u8m2_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmseq_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmseq_vx_u8m2_b4_m(mask, op1, op2, vl); +vbool4_t test_vmseq_vx_u8m2_b4_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmseq_vx_u8m2_b4_m(vm, vs2, rs1, vl); } -vbool2_t test_vmseq_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmseq_vv_u8m4_b2_m(mask, op1, op2, vl); +vbool2_t test_vmseq_vv_u8m4_b2_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmseq_vv_u8m4_b2_m(vm, vs2, vs1, vl); } -vbool2_t test_vmseq_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmseq_vx_u8m4_b2_m(mask, op1, op2, vl); +vbool2_t test_vmseq_vx_u8m4_b2_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmseq_vx_u8m4_b2_m(vm, vs2, rs1, vl); } -vbool1_t test_vmseq_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmseq_vv_u8m8_b1_m(mask, op1, op2, vl); +vbool1_t test_vmseq_vv_u8m8_b1_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmseq_vv_u8m8_b1_m(vm, vs2, vs1, vl); } -vbool1_t test_vmseq_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmseq_vx_u8m8_b1_m(mask, op1, op2, vl); +vbool1_t test_vmseq_vx_u8m8_b1_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmseq_vx_u8m8_b1_m(vm, vs2, rs1, vl); } -vbool64_t test_vmseq_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmseq_vv_u16mf4_b64_m(mask, op1, op2, vl); +vbool64_t test_vmseq_vv_u16mf4_b64_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmseq_vv_u16mf4_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmseq_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmseq_vx_u16mf4_b64_m(mask, op1, op2, vl); +vbool64_t test_vmseq_vx_u16mf4_b64_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmseq_vx_u16mf4_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmseq_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmseq_vv_u16mf2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmseq_vv_u16mf2_b32_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmseq_vv_u16mf2_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmseq_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmseq_vx_u16mf2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmseq_vx_u16mf2_b32_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmseq_vx_u16mf2_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmseq_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmseq_vv_u16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmseq_vv_u16m1_b16_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmseq_vv_u16m1_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmseq_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmseq_vx_u16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmseq_vx_u16m1_b16_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmseq_vx_u16m1_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmseq_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmseq_vv_u16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmseq_vv_u16m2_b8_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmseq_vv_u16m2_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmseq_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmseq_vx_u16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmseq_vx_u16m2_b8_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmseq_vx_u16m2_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmseq_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmseq_vv_u16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmseq_vv_u16m4_b4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmseq_vv_u16m4_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmseq_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmseq_vx_u16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmseq_vx_u16m4_b4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmseq_vx_u16m4_b4_m(vm, vs2, rs1, vl); } -vbool2_t test_vmseq_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmseq_vv_u16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmseq_vv_u16m8_b2_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmseq_vv_u16m8_b2_m(vm, vs2, vs1, vl); } -vbool2_t test_vmseq_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmseq_vx_u16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmseq_vx_u16m8_b2_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmseq_vx_u16m8_b2_m(vm, vs2, rs1, vl); } -vbool64_t test_vmseq_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmseq_vv_u32mf2_b64_m(mask, op1, op2, vl); +vbool64_t test_vmseq_vv_u32mf2_b64_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmseq_vv_u32mf2_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmseq_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmseq_vx_u32mf2_b64_m(mask, op1, op2, vl); +vbool64_t test_vmseq_vx_u32mf2_b64_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmseq_vx_u32mf2_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmseq_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmseq_vv_u32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmseq_vv_u32m1_b32_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmseq_vv_u32m1_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmseq_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmseq_vx_u32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmseq_vx_u32m1_b32_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmseq_vx_u32m1_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmseq_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmseq_vv_u32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmseq_vv_u32m2_b16_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmseq_vv_u32m2_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmseq_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmseq_vx_u32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmseq_vx_u32m2_b16_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmseq_vx_u32m2_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmseq_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmseq_vv_u32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmseq_vv_u32m4_b8_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmseq_vv_u32m4_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmseq_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmseq_vx_u32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmseq_vx_u32m4_b8_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmseq_vx_u32m4_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmseq_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmseq_vv_u32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmseq_vv_u32m8_b4_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmseq_vv_u32m8_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmseq_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmseq_vx_u32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmseq_vx_u32m8_b4_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmseq_vx_u32m8_b4_m(vm, vs2, rs1, vl); } -vbool64_t test_vmseq_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmseq_vv_u64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmseq_vv_u64m1_b64_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmseq_vv_u64m1_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmseq_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmseq_vx_u64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmseq_vx_u64m1_b64_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmseq_vx_u64m1_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmseq_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmseq_vv_u64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmseq_vv_u64m2_b32_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmseq_vv_u64m2_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmseq_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmseq_vx_u64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmseq_vx_u64m2_b32_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmseq_vx_u64m2_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmseq_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmseq_vv_u64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmseq_vv_u64m4_b16_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmseq_vv_u64m4_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmseq_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmseq_vx_u64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmseq_vx_u64m4_b16_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmseq_vx_u64m4_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmseq_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmseq_vv_u64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmseq_vv_u64m8_b8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmseq_vv_u64m8_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmseq_vx_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmseq_vx_u64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmseq_vx_u64m8_b8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmseq_vx_u64m8_b8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vmseq\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/gnu-api-tests/vmsge.c b/auto-generated/gnu-api-tests/vmsge.c index ea52ca960..f0992d046 100644 --- a/auto-generated/gnu-api-tests/vmsge.c +++ b/auto-generated/gnu-api-tests/vmsge.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmsge_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmsge_vv_i8mf8_b64(op1, op2, vl); +vbool64_t test_vmsge_vv_i8mf8_b64(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmsge_vv_i8mf8_b64(vs2, vs1, vl); } -vbool64_t test_vmsge_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsge_vx_i8mf8_b64(op1, op2, vl); +vbool64_t test_vmsge_vx_i8mf8_b64(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsge_vx_i8mf8_b64(vs2, rs1, vl); } -vbool32_t test_vmsge_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmsge_vv_i8mf4_b32(op1, op2, vl); +vbool32_t test_vmsge_vv_i8mf4_b32(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmsge_vv_i8mf4_b32(vs2, vs1, vl); } -vbool32_t test_vmsge_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsge_vx_i8mf4_b32(op1, op2, vl); +vbool32_t test_vmsge_vx_i8mf4_b32(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsge_vx_i8mf4_b32(vs2, rs1, vl); } -vbool16_t test_vmsge_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmsge_vv_i8mf2_b16(op1, op2, vl); +vbool16_t test_vmsge_vv_i8mf2_b16(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmsge_vv_i8mf2_b16(vs2, vs1, vl); } -vbool16_t test_vmsge_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsge_vx_i8mf2_b16(op1, op2, vl); +vbool16_t test_vmsge_vx_i8mf2_b16(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsge_vx_i8mf2_b16(vs2, rs1, vl); } -vbool8_t test_vmsge_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmsge_vv_i8m1_b8(op1, op2, vl); +vbool8_t test_vmsge_vv_i8m1_b8(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmsge_vv_i8m1_b8(vs2, vs1, vl); } -vbool8_t test_vmsge_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmsge_vx_i8m1_b8(op1, op2, vl); +vbool8_t test_vmsge_vx_i8m1_b8(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsge_vx_i8m1_b8(vs2, rs1, vl); } -vbool4_t test_vmsge_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmsge_vv_i8m2_b4(op1, op2, vl); +vbool4_t test_vmsge_vv_i8m2_b4(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmsge_vv_i8m2_b4(vs2, vs1, vl); } -vbool4_t test_vmsge_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsge_vx_i8m2_b4(op1, op2, vl); +vbool4_t test_vmsge_vx_i8m2_b4(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsge_vx_i8m2_b4(vs2, rs1, vl); } -vbool2_t test_vmsge_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmsge_vv_i8m4_b2(op1, op2, vl); +vbool2_t test_vmsge_vv_i8m4_b2(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmsge_vv_i8m4_b2(vs2, vs1, vl); } -vbool2_t test_vmsge_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsge_vx_i8m4_b2(op1, op2, vl); +vbool2_t test_vmsge_vx_i8m4_b2(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsge_vx_i8m4_b2(vs2, rs1, vl); } -vbool1_t test_vmsge_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmsge_vv_i8m8_b1(op1, op2, vl); +vbool1_t test_vmsge_vv_i8m8_b1(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmsge_vv_i8m8_b1(vs2, vs1, vl); } -vbool1_t test_vmsge_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsge_vx_i8m8_b1(op1, op2, vl); +vbool1_t test_vmsge_vx_i8m8_b1(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsge_vx_i8m8_b1(vs2, rs1, vl); } -vbool64_t test_vmsge_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmsge_vv_i16mf4_b64(op1, op2, vl); +vbool64_t test_vmsge_vv_i16mf4_b64(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmsge_vv_i16mf4_b64(vs2, vs1, vl); } -vbool64_t test_vmsge_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsge_vx_i16mf4_b64(op1, op2, vl); +vbool64_t test_vmsge_vx_i16mf4_b64(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsge_vx_i16mf4_b64(vs2, rs1, vl); } -vbool32_t test_vmsge_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmsge_vv_i16mf2_b32(op1, op2, vl); +vbool32_t test_vmsge_vv_i16mf2_b32(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmsge_vv_i16mf2_b32(vs2, vs1, vl); } -vbool32_t test_vmsge_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsge_vx_i16mf2_b32(op1, op2, vl); +vbool32_t test_vmsge_vx_i16mf2_b32(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsge_vx_i16mf2_b32(vs2, rs1, vl); } -vbool16_t test_vmsge_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmsge_vv_i16m1_b16(op1, op2, vl); +vbool16_t test_vmsge_vv_i16m1_b16(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmsge_vv_i16m1_b16(vs2, vs1, vl); } -vbool16_t test_vmsge_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmsge_vx_i16m1_b16(op1, op2, vl); +vbool16_t test_vmsge_vx_i16m1_b16(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsge_vx_i16m1_b16(vs2, rs1, vl); } -vbool8_t test_vmsge_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmsge_vv_i16m2_b8(op1, op2, vl); +vbool8_t test_vmsge_vv_i16m2_b8(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmsge_vv_i16m2_b8(vs2, vs1, vl); } -vbool8_t test_vmsge_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsge_vx_i16m2_b8(op1, op2, vl); +vbool8_t test_vmsge_vx_i16m2_b8(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsge_vx_i16m2_b8(vs2, rs1, vl); } -vbool4_t test_vmsge_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmsge_vv_i16m4_b4(op1, op2, vl); +vbool4_t test_vmsge_vv_i16m4_b4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmsge_vv_i16m4_b4(vs2, vs1, vl); } -vbool4_t test_vmsge_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsge_vx_i16m4_b4(op1, op2, vl); +vbool4_t test_vmsge_vx_i16m4_b4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsge_vx_i16m4_b4(vs2, rs1, vl); } -vbool2_t test_vmsge_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmsge_vv_i16m8_b2(op1, op2, vl); +vbool2_t test_vmsge_vv_i16m8_b2(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmsge_vv_i16m8_b2(vs2, vs1, vl); } -vbool2_t test_vmsge_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmsge_vx_i16m8_b2(op1, op2, vl); +vbool2_t test_vmsge_vx_i16m8_b2(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsge_vx_i16m8_b2(vs2, rs1, vl); } -vbool64_t test_vmsge_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmsge_vv_i32mf2_b64(op1, op2, vl); +vbool64_t test_vmsge_vv_i32mf2_b64(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmsge_vv_i32mf2_b64(vs2, vs1, vl); } -vbool64_t test_vmsge_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsge_vx_i32mf2_b64(op1, op2, vl); +vbool64_t test_vmsge_vx_i32mf2_b64(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsge_vx_i32mf2_b64(vs2, rs1, vl); } -vbool32_t test_vmsge_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmsge_vv_i32m1_b32(op1, op2, vl); +vbool32_t test_vmsge_vv_i32m1_b32(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmsge_vv_i32m1_b32(vs2, vs1, vl); } -vbool32_t test_vmsge_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmsge_vx_i32m1_b32(op1, op2, vl); +vbool32_t test_vmsge_vx_i32m1_b32(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsge_vx_i32m1_b32(vs2, rs1, vl); } -vbool16_t test_vmsge_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmsge_vv_i32m2_b16(op1, op2, vl); +vbool16_t test_vmsge_vv_i32m2_b16(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmsge_vv_i32m2_b16(vs2, vs1, vl); } -vbool16_t test_vmsge_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsge_vx_i32m2_b16(op1, op2, vl); +vbool16_t test_vmsge_vx_i32m2_b16(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsge_vx_i32m2_b16(vs2, rs1, vl); } -vbool8_t test_vmsge_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmsge_vv_i32m4_b8(op1, op2, vl); +vbool8_t test_vmsge_vv_i32m4_b8(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmsge_vv_i32m4_b8(vs2, vs1, vl); } -vbool8_t test_vmsge_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmsge_vx_i32m4_b8(op1, op2, vl); +vbool8_t test_vmsge_vx_i32m4_b8(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsge_vx_i32m4_b8(vs2, rs1, vl); } -vbool4_t test_vmsge_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmsge_vv_i32m8_b4(op1, op2, vl); +vbool4_t test_vmsge_vv_i32m8_b4(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmsge_vv_i32m8_b4(vs2, vs1, vl); } -vbool4_t test_vmsge_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmsge_vx_i32m8_b4(op1, op2, vl); +vbool4_t test_vmsge_vx_i32m8_b4(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsge_vx_i32m8_b4(vs2, rs1, vl); } -vbool64_t test_vmsge_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmsge_vv_i64m1_b64(op1, op2, vl); +vbool64_t test_vmsge_vv_i64m1_b64(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmsge_vv_i64m1_b64(vs2, vs1, vl); } -vbool64_t test_vmsge_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmsge_vx_i64m1_b64(op1, op2, vl); +vbool64_t test_vmsge_vx_i64m1_b64(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsge_vx_i64m1_b64(vs2, rs1, vl); } -vbool32_t test_vmsge_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmsge_vv_i64m2_b32(op1, op2, vl); +vbool32_t test_vmsge_vv_i64m2_b32(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmsge_vv_i64m2_b32(vs2, vs1, vl); } -vbool32_t test_vmsge_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmsge_vx_i64m2_b32(op1, op2, vl); +vbool32_t test_vmsge_vx_i64m2_b32(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsge_vx_i64m2_b32(vs2, rs1, vl); } -vbool16_t test_vmsge_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmsge_vv_i64m4_b16(op1, op2, vl); +vbool16_t test_vmsge_vv_i64m4_b16(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmsge_vv_i64m4_b16(vs2, vs1, vl); } -vbool16_t test_vmsge_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmsge_vx_i64m4_b16(op1, op2, vl); +vbool16_t test_vmsge_vx_i64m4_b16(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsge_vx_i64m4_b16(vs2, rs1, vl); } -vbool8_t test_vmsge_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmsge_vv_i64m8_b8(op1, op2, vl); +vbool8_t test_vmsge_vv_i64m8_b8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmsge_vv_i64m8_b8(vs2, vs1, vl); } -vbool8_t test_vmsge_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmsge_vx_i64m8_b8(op1, op2, vl); +vbool8_t test_vmsge_vx_i64m8_b8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsge_vx_i64m8_b8(vs2, rs1, vl); } -vbool64_t test_vmsge_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmsge_vv_i8mf8_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsge_vv_i8mf8_b64_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmsge_vv_i8mf8_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmsge_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsge_vx_i8mf8_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsge_vx_i8mf8_b64_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsge_vx_i8mf8_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmsge_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmsge_vv_i8mf4_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsge_vv_i8mf4_b32_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmsge_vv_i8mf4_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmsge_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsge_vx_i8mf4_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsge_vx_i8mf4_b32_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsge_vx_i8mf4_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmsge_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmsge_vv_i8mf2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsge_vv_i8mf2_b16_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmsge_vv_i8mf2_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmsge_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsge_vx_i8mf2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsge_vx_i8mf2_b16_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsge_vx_i8mf2_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmsge_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmsge_vv_i8m1_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsge_vv_i8m1_b8_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmsge_vv_i8m1_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmsge_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmsge_vx_i8m1_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsge_vx_i8m1_b8_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsge_vx_i8m1_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmsge_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmsge_vv_i8m2_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsge_vv_i8m2_b4_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmsge_vv_i8m2_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmsge_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsge_vx_i8m2_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsge_vx_i8m2_b4_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsge_vx_i8m2_b4_m(vm, vs2, rs1, vl); } -vbool2_t test_vmsge_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmsge_vv_i8m4_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsge_vv_i8m4_b2_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmsge_vv_i8m4_b2_m(vm, vs2, vs1, vl); } -vbool2_t test_vmsge_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsge_vx_i8m4_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsge_vx_i8m4_b2_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsge_vx_i8m4_b2_m(vm, vs2, rs1, vl); } -vbool1_t test_vmsge_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmsge_vv_i8m8_b1_m(mask, op1, op2, vl); +vbool1_t test_vmsge_vv_i8m8_b1_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmsge_vv_i8m8_b1_m(vm, vs2, vs1, vl); } -vbool1_t test_vmsge_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsge_vx_i8m8_b1_m(mask, op1, op2, vl); +vbool1_t test_vmsge_vx_i8m8_b1_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsge_vx_i8m8_b1_m(vm, vs2, rs1, vl); } -vbool64_t test_vmsge_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmsge_vv_i16mf4_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsge_vv_i16mf4_b64_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmsge_vv_i16mf4_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmsge_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsge_vx_i16mf4_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsge_vx_i16mf4_b64_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsge_vx_i16mf4_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmsge_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmsge_vv_i16mf2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsge_vv_i16mf2_b32_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmsge_vv_i16mf2_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmsge_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsge_vx_i16mf2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsge_vx_i16mf2_b32_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsge_vx_i16mf2_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmsge_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmsge_vv_i16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsge_vv_i16m1_b16_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmsge_vv_i16m1_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmsge_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmsge_vx_i16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsge_vx_i16m1_b16_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsge_vx_i16m1_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmsge_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmsge_vv_i16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsge_vv_i16m2_b8_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmsge_vv_i16m2_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmsge_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsge_vx_i16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsge_vx_i16m2_b8_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsge_vx_i16m2_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmsge_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmsge_vv_i16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsge_vv_i16m4_b4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmsge_vv_i16m4_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmsge_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsge_vx_i16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsge_vx_i16m4_b4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsge_vx_i16m4_b4_m(vm, vs2, rs1, vl); } -vbool2_t test_vmsge_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmsge_vv_i16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsge_vv_i16m8_b2_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmsge_vv_i16m8_b2_m(vm, vs2, vs1, vl); } -vbool2_t test_vmsge_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmsge_vx_i16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsge_vx_i16m8_b2_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsge_vx_i16m8_b2_m(vm, vs2, rs1, vl); } -vbool64_t test_vmsge_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmsge_vv_i32mf2_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsge_vv_i32mf2_b64_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmsge_vv_i32mf2_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmsge_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsge_vx_i32mf2_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsge_vx_i32mf2_b64_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsge_vx_i32mf2_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmsge_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmsge_vv_i32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsge_vv_i32m1_b32_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmsge_vv_i32m1_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmsge_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmsge_vx_i32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsge_vx_i32m1_b32_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsge_vx_i32m1_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmsge_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmsge_vv_i32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsge_vv_i32m2_b16_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmsge_vv_i32m2_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmsge_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsge_vx_i32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsge_vx_i32m2_b16_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsge_vx_i32m2_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmsge_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmsge_vv_i32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsge_vv_i32m4_b8_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmsge_vv_i32m4_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmsge_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmsge_vx_i32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsge_vx_i32m4_b8_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsge_vx_i32m4_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmsge_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmsge_vv_i32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsge_vv_i32m8_b4_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmsge_vv_i32m8_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmsge_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmsge_vx_i32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsge_vx_i32m8_b4_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsge_vx_i32m8_b4_m(vm, vs2, rs1, vl); } -vbool64_t test_vmsge_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmsge_vv_i64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsge_vv_i64m1_b64_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmsge_vv_i64m1_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmsge_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmsge_vx_i64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsge_vx_i64m1_b64_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsge_vx_i64m1_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmsge_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmsge_vv_i64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsge_vv_i64m2_b32_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmsge_vv_i64m2_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmsge_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmsge_vx_i64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsge_vx_i64m2_b32_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsge_vx_i64m2_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmsge_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmsge_vv_i64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsge_vv_i64m4_b16_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmsge_vv_i64m4_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmsge_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmsge_vx_i64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsge_vx_i64m4_b16_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsge_vx_i64m4_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmsge_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmsge_vv_i64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsge_vv_i64m8_b8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmsge_vv_i64m8_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmsge_vx_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmsge_vx_i64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsge_vx_i64m8_b8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsge_vx_i64m8_b8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vms[gl][et]\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-api-tests/vmsgeu.c b/auto-generated/gnu-api-tests/vmsgeu.c index e99197793..9c9409a9f 100644 --- a/auto-generated/gnu-api-tests/vmsgeu.c +++ b/auto-generated/gnu-api-tests/vmsgeu.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmsgeu_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u8mf8_b64(op1, op2, vl); +vbool64_t test_vmsgeu_vv_u8mf8_b64(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u8mf8_b64(vs2, vs1, vl); } -vbool64_t test_vmsgeu_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u8mf8_b64(op1, op2, vl); +vbool64_t test_vmsgeu_vx_u8mf8_b64(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u8mf8_b64(vs2, rs1, vl); } -vbool32_t test_vmsgeu_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u8mf4_b32(op1, op2, vl); +vbool32_t test_vmsgeu_vv_u8mf4_b32(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u8mf4_b32(vs2, vs1, vl); } -vbool32_t test_vmsgeu_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u8mf4_b32(op1, op2, vl); +vbool32_t test_vmsgeu_vx_u8mf4_b32(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u8mf4_b32(vs2, rs1, vl); } -vbool16_t test_vmsgeu_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u8mf2_b16(op1, op2, vl); +vbool16_t test_vmsgeu_vv_u8mf2_b16(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u8mf2_b16(vs2, vs1, vl); } -vbool16_t test_vmsgeu_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u8mf2_b16(op1, op2, vl); +vbool16_t test_vmsgeu_vx_u8mf2_b16(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u8mf2_b16(vs2, rs1, vl); } -vbool8_t test_vmsgeu_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u8m1_b8(op1, op2, vl); +vbool8_t test_vmsgeu_vv_u8m1_b8(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u8m1_b8(vs2, vs1, vl); } -vbool8_t test_vmsgeu_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u8m1_b8(op1, op2, vl); +vbool8_t test_vmsgeu_vx_u8m1_b8(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u8m1_b8(vs2, rs1, vl); } -vbool4_t test_vmsgeu_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u8m2_b4(op1, op2, vl); +vbool4_t test_vmsgeu_vv_u8m2_b4(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u8m2_b4(vs2, vs1, vl); } -vbool4_t test_vmsgeu_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u8m2_b4(op1, op2, vl); +vbool4_t test_vmsgeu_vx_u8m2_b4(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u8m2_b4(vs2, rs1, vl); } -vbool2_t test_vmsgeu_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u8m4_b2(op1, op2, vl); +vbool2_t test_vmsgeu_vv_u8m4_b2(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u8m4_b2(vs2, vs1, vl); } -vbool2_t test_vmsgeu_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u8m4_b2(op1, op2, vl); +vbool2_t test_vmsgeu_vx_u8m4_b2(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u8m4_b2(vs2, rs1, vl); } -vbool1_t test_vmsgeu_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u8m8_b1(op1, op2, vl); +vbool1_t test_vmsgeu_vv_u8m8_b1(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u8m8_b1(vs2, vs1, vl); } -vbool1_t test_vmsgeu_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u8m8_b1(op1, op2, vl); +vbool1_t test_vmsgeu_vx_u8m8_b1(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u8m8_b1(vs2, rs1, vl); } -vbool64_t test_vmsgeu_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u16mf4_b64(op1, op2, vl); +vbool64_t test_vmsgeu_vv_u16mf4_b64(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u16mf4_b64(vs2, vs1, vl); } -vbool64_t test_vmsgeu_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u16mf4_b64(op1, op2, vl); +vbool64_t test_vmsgeu_vx_u16mf4_b64(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u16mf4_b64(vs2, rs1, vl); } -vbool32_t test_vmsgeu_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u16mf2_b32(op1, op2, vl); +vbool32_t test_vmsgeu_vv_u16mf2_b32(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u16mf2_b32(vs2, vs1, vl); } -vbool32_t test_vmsgeu_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u16mf2_b32(op1, op2, vl); +vbool32_t test_vmsgeu_vx_u16mf2_b32(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u16mf2_b32(vs2, rs1, vl); } -vbool16_t test_vmsgeu_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u16m1_b16(op1, op2, vl); +vbool16_t test_vmsgeu_vv_u16m1_b16(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u16m1_b16(vs2, vs1, vl); } -vbool16_t test_vmsgeu_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u16m1_b16(op1, op2, vl); +vbool16_t test_vmsgeu_vx_u16m1_b16(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u16m1_b16(vs2, rs1, vl); } -vbool8_t test_vmsgeu_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u16m2_b8(op1, op2, vl); +vbool8_t test_vmsgeu_vv_u16m2_b8(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u16m2_b8(vs2, vs1, vl); } -vbool8_t test_vmsgeu_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u16m2_b8(op1, op2, vl); +vbool8_t test_vmsgeu_vx_u16m2_b8(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u16m2_b8(vs2, rs1, vl); } -vbool4_t test_vmsgeu_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u16m4_b4(op1, op2, vl); +vbool4_t test_vmsgeu_vv_u16m4_b4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u16m4_b4(vs2, vs1, vl); } -vbool4_t test_vmsgeu_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u16m4_b4(op1, op2, vl); +vbool4_t test_vmsgeu_vx_u16m4_b4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u16m4_b4(vs2, rs1, vl); } -vbool2_t test_vmsgeu_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u16m8_b2(op1, op2, vl); +vbool2_t test_vmsgeu_vv_u16m8_b2(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u16m8_b2(vs2, vs1, vl); } -vbool2_t test_vmsgeu_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u16m8_b2(op1, op2, vl); +vbool2_t test_vmsgeu_vx_u16m8_b2(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u16m8_b2(vs2, rs1, vl); } -vbool64_t test_vmsgeu_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u32mf2_b64(op1, op2, vl); +vbool64_t test_vmsgeu_vv_u32mf2_b64(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u32mf2_b64(vs2, vs1, vl); } -vbool64_t test_vmsgeu_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u32mf2_b64(op1, op2, vl); +vbool64_t test_vmsgeu_vx_u32mf2_b64(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u32mf2_b64(vs2, rs1, vl); } -vbool32_t test_vmsgeu_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u32m1_b32(op1, op2, vl); +vbool32_t test_vmsgeu_vv_u32m1_b32(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u32m1_b32(vs2, vs1, vl); } -vbool32_t test_vmsgeu_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u32m1_b32(op1, op2, vl); +vbool32_t test_vmsgeu_vx_u32m1_b32(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u32m1_b32(vs2, rs1, vl); } -vbool16_t test_vmsgeu_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u32m2_b16(op1, op2, vl); +vbool16_t test_vmsgeu_vv_u32m2_b16(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u32m2_b16(vs2, vs1, vl); } -vbool16_t test_vmsgeu_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u32m2_b16(op1, op2, vl); +vbool16_t test_vmsgeu_vx_u32m2_b16(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u32m2_b16(vs2, rs1, vl); } -vbool8_t test_vmsgeu_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u32m4_b8(op1, op2, vl); +vbool8_t test_vmsgeu_vv_u32m4_b8(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u32m4_b8(vs2, vs1, vl); } -vbool8_t test_vmsgeu_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u32m4_b8(op1, op2, vl); +vbool8_t test_vmsgeu_vx_u32m4_b8(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u32m4_b8(vs2, rs1, vl); } -vbool4_t test_vmsgeu_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u32m8_b4(op1, op2, vl); +vbool4_t test_vmsgeu_vv_u32m8_b4(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u32m8_b4(vs2, vs1, vl); } -vbool4_t test_vmsgeu_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u32m8_b4(op1, op2, vl); +vbool4_t test_vmsgeu_vx_u32m8_b4(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u32m8_b4(vs2, rs1, vl); } -vbool64_t test_vmsgeu_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u64m1_b64(op1, op2, vl); +vbool64_t test_vmsgeu_vv_u64m1_b64(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u64m1_b64(vs2, vs1, vl); } -vbool64_t test_vmsgeu_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u64m1_b64(op1, op2, vl); +vbool64_t test_vmsgeu_vx_u64m1_b64(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u64m1_b64(vs2, rs1, vl); } -vbool32_t test_vmsgeu_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u64m2_b32(op1, op2, vl); +vbool32_t test_vmsgeu_vv_u64m2_b32(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u64m2_b32(vs2, vs1, vl); } -vbool32_t test_vmsgeu_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u64m2_b32(op1, op2, vl); +vbool32_t test_vmsgeu_vx_u64m2_b32(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u64m2_b32(vs2, rs1, vl); } -vbool16_t test_vmsgeu_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u64m4_b16(op1, op2, vl); +vbool16_t test_vmsgeu_vv_u64m4_b16(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u64m4_b16(vs2, vs1, vl); } -vbool16_t test_vmsgeu_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u64m4_b16(op1, op2, vl); +vbool16_t test_vmsgeu_vx_u64m4_b16(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u64m4_b16(vs2, rs1, vl); } -vbool8_t test_vmsgeu_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u64m8_b8(op1, op2, vl); +vbool8_t test_vmsgeu_vv_u64m8_b8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u64m8_b8(vs2, vs1, vl); } -vbool8_t test_vmsgeu_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u64m8_b8(op1, op2, vl); +vbool8_t test_vmsgeu_vx_u64m8_b8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u64m8_b8(vs2, rs1, vl); } -vbool64_t test_vmsgeu_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u8mf8_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsgeu_vv_u8mf8_b64_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u8mf8_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmsgeu_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u8mf8_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsgeu_vx_u8mf8_b64_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u8mf8_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmsgeu_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u8mf4_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsgeu_vv_u8mf4_b32_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u8mf4_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmsgeu_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u8mf4_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsgeu_vx_u8mf4_b32_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u8mf4_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmsgeu_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u8mf2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsgeu_vv_u8mf2_b16_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u8mf2_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmsgeu_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u8mf2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsgeu_vx_u8mf2_b16_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u8mf2_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmsgeu_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u8m1_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgeu_vv_u8m1_b8_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u8m1_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmsgeu_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u8m1_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgeu_vx_u8m1_b8_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u8m1_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmsgeu_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u8m2_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsgeu_vv_u8m2_b4_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u8m2_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmsgeu_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u8m2_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsgeu_vx_u8m2_b4_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u8m2_b4_m(vm, vs2, rs1, vl); } -vbool2_t test_vmsgeu_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u8m4_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsgeu_vv_u8m4_b2_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u8m4_b2_m(vm, vs2, vs1, vl); } -vbool2_t test_vmsgeu_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u8m4_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsgeu_vx_u8m4_b2_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u8m4_b2_m(vm, vs2, rs1, vl); } -vbool1_t test_vmsgeu_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u8m8_b1_m(mask, op1, op2, vl); +vbool1_t test_vmsgeu_vv_u8m8_b1_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u8m8_b1_m(vm, vs2, vs1, vl); } -vbool1_t test_vmsgeu_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u8m8_b1_m(mask, op1, op2, vl); +vbool1_t test_vmsgeu_vx_u8m8_b1_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u8m8_b1_m(vm, vs2, rs1, vl); } -vbool64_t test_vmsgeu_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u16mf4_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsgeu_vv_u16mf4_b64_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u16mf4_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmsgeu_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u16mf4_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsgeu_vx_u16mf4_b64_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u16mf4_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmsgeu_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u16mf2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsgeu_vv_u16mf2_b32_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u16mf2_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmsgeu_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u16mf2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsgeu_vx_u16mf2_b32_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u16mf2_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmsgeu_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsgeu_vv_u16m1_b16_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u16m1_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmsgeu_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsgeu_vx_u16m1_b16_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u16m1_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmsgeu_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgeu_vv_u16m2_b8_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u16m2_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmsgeu_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgeu_vx_u16m2_b8_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u16m2_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmsgeu_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsgeu_vv_u16m4_b4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u16m4_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmsgeu_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsgeu_vx_u16m4_b4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u16m4_b4_m(vm, vs2, rs1, vl); } -vbool2_t test_vmsgeu_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsgeu_vv_u16m8_b2_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u16m8_b2_m(vm, vs2, vs1, vl); } -vbool2_t test_vmsgeu_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsgeu_vx_u16m8_b2_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u16m8_b2_m(vm, vs2, rs1, vl); } -vbool64_t test_vmsgeu_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u32mf2_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsgeu_vv_u32mf2_b64_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u32mf2_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmsgeu_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u32mf2_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsgeu_vx_u32mf2_b64_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u32mf2_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmsgeu_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsgeu_vv_u32m1_b32_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u32m1_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmsgeu_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsgeu_vx_u32m1_b32_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u32m1_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmsgeu_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsgeu_vv_u32m2_b16_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u32m2_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmsgeu_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsgeu_vx_u32m2_b16_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u32m2_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmsgeu_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgeu_vv_u32m4_b8_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u32m4_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmsgeu_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgeu_vx_u32m4_b8_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u32m4_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmsgeu_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsgeu_vv_u32m8_b4_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u32m8_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmsgeu_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsgeu_vx_u32m8_b4_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u32m8_b4_m(vm, vs2, rs1, vl); } -vbool64_t test_vmsgeu_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsgeu_vv_u64m1_b64_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u64m1_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmsgeu_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsgeu_vx_u64m1_b64_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u64m1_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmsgeu_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsgeu_vv_u64m2_b32_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u64m2_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmsgeu_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsgeu_vx_u64m2_b32_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u64m2_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmsgeu_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsgeu_vv_u64m4_b16_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u64m4_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmsgeu_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsgeu_vx_u64m4_b16_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u64m4_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmsgeu_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgeu_vv_u64m8_b8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u64m8_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmsgeu_vx_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgeu_vx_u64m8_b8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u64m8_b8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vms[gl][et]u\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-api-tests/vmsgt.c b/auto-generated/gnu-api-tests/vmsgt.c index 1b86c23e6..c7e0cb3bb 100644 --- a/auto-generated/gnu-api-tests/vmsgt.c +++ b/auto-generated/gnu-api-tests/vmsgt.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmsgt_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmsgt_vv_i8mf8_b64(op1, op2, vl); +vbool64_t test_vmsgt_vv_i8mf8_b64(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i8mf8_b64(vs2, vs1, vl); } -vbool64_t test_vmsgt_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsgt_vx_i8mf8_b64(op1, op2, vl); +vbool64_t test_vmsgt_vx_i8mf8_b64(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i8mf8_b64(vs2, rs1, vl); } -vbool32_t test_vmsgt_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmsgt_vv_i8mf4_b32(op1, op2, vl); +vbool32_t test_vmsgt_vv_i8mf4_b32(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i8mf4_b32(vs2, vs1, vl); } -vbool32_t test_vmsgt_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsgt_vx_i8mf4_b32(op1, op2, vl); +vbool32_t test_vmsgt_vx_i8mf4_b32(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i8mf4_b32(vs2, rs1, vl); } -vbool16_t test_vmsgt_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmsgt_vv_i8mf2_b16(op1, op2, vl); +vbool16_t test_vmsgt_vv_i8mf2_b16(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i8mf2_b16(vs2, vs1, vl); } -vbool16_t test_vmsgt_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsgt_vx_i8mf2_b16(op1, op2, vl); +vbool16_t test_vmsgt_vx_i8mf2_b16(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i8mf2_b16(vs2, rs1, vl); } -vbool8_t test_vmsgt_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmsgt_vv_i8m1_b8(op1, op2, vl); +vbool8_t test_vmsgt_vv_i8m1_b8(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i8m1_b8(vs2, vs1, vl); } -vbool8_t test_vmsgt_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmsgt_vx_i8m1_b8(op1, op2, vl); +vbool8_t test_vmsgt_vx_i8m1_b8(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i8m1_b8(vs2, rs1, vl); } -vbool4_t test_vmsgt_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmsgt_vv_i8m2_b4(op1, op2, vl); +vbool4_t test_vmsgt_vv_i8m2_b4(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i8m2_b4(vs2, vs1, vl); } -vbool4_t test_vmsgt_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsgt_vx_i8m2_b4(op1, op2, vl); +vbool4_t test_vmsgt_vx_i8m2_b4(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i8m2_b4(vs2, rs1, vl); } -vbool2_t test_vmsgt_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmsgt_vv_i8m4_b2(op1, op2, vl); +vbool2_t test_vmsgt_vv_i8m4_b2(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i8m4_b2(vs2, vs1, vl); } -vbool2_t test_vmsgt_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsgt_vx_i8m4_b2(op1, op2, vl); +vbool2_t test_vmsgt_vx_i8m4_b2(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i8m4_b2(vs2, rs1, vl); } -vbool1_t test_vmsgt_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmsgt_vv_i8m8_b1(op1, op2, vl); +vbool1_t test_vmsgt_vv_i8m8_b1(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i8m8_b1(vs2, vs1, vl); } -vbool1_t test_vmsgt_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsgt_vx_i8m8_b1(op1, op2, vl); +vbool1_t test_vmsgt_vx_i8m8_b1(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i8m8_b1(vs2, rs1, vl); } -vbool64_t test_vmsgt_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmsgt_vv_i16mf4_b64(op1, op2, vl); +vbool64_t test_vmsgt_vv_i16mf4_b64(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i16mf4_b64(vs2, vs1, vl); } -vbool64_t test_vmsgt_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsgt_vx_i16mf4_b64(op1, op2, vl); +vbool64_t test_vmsgt_vx_i16mf4_b64(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i16mf4_b64(vs2, rs1, vl); } -vbool32_t test_vmsgt_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmsgt_vv_i16mf2_b32(op1, op2, vl); +vbool32_t test_vmsgt_vv_i16mf2_b32(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i16mf2_b32(vs2, vs1, vl); } -vbool32_t test_vmsgt_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsgt_vx_i16mf2_b32(op1, op2, vl); +vbool32_t test_vmsgt_vx_i16mf2_b32(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i16mf2_b32(vs2, rs1, vl); } -vbool16_t test_vmsgt_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmsgt_vv_i16m1_b16(op1, op2, vl); +vbool16_t test_vmsgt_vv_i16m1_b16(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i16m1_b16(vs2, vs1, vl); } -vbool16_t test_vmsgt_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmsgt_vx_i16m1_b16(op1, op2, vl); +vbool16_t test_vmsgt_vx_i16m1_b16(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i16m1_b16(vs2, rs1, vl); } -vbool8_t test_vmsgt_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmsgt_vv_i16m2_b8(op1, op2, vl); +vbool8_t test_vmsgt_vv_i16m2_b8(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i16m2_b8(vs2, vs1, vl); } -vbool8_t test_vmsgt_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsgt_vx_i16m2_b8(op1, op2, vl); +vbool8_t test_vmsgt_vx_i16m2_b8(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i16m2_b8(vs2, rs1, vl); } -vbool4_t test_vmsgt_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmsgt_vv_i16m4_b4(op1, op2, vl); +vbool4_t test_vmsgt_vv_i16m4_b4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i16m4_b4(vs2, vs1, vl); } -vbool4_t test_vmsgt_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsgt_vx_i16m4_b4(op1, op2, vl); +vbool4_t test_vmsgt_vx_i16m4_b4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i16m4_b4(vs2, rs1, vl); } -vbool2_t test_vmsgt_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmsgt_vv_i16m8_b2(op1, op2, vl); +vbool2_t test_vmsgt_vv_i16m8_b2(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i16m8_b2(vs2, vs1, vl); } -vbool2_t test_vmsgt_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmsgt_vx_i16m8_b2(op1, op2, vl); +vbool2_t test_vmsgt_vx_i16m8_b2(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i16m8_b2(vs2, rs1, vl); } -vbool64_t test_vmsgt_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmsgt_vv_i32mf2_b64(op1, op2, vl); +vbool64_t test_vmsgt_vv_i32mf2_b64(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i32mf2_b64(vs2, vs1, vl); } -vbool64_t test_vmsgt_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsgt_vx_i32mf2_b64(op1, op2, vl); +vbool64_t test_vmsgt_vx_i32mf2_b64(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i32mf2_b64(vs2, rs1, vl); } -vbool32_t test_vmsgt_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmsgt_vv_i32m1_b32(op1, op2, vl); +vbool32_t test_vmsgt_vv_i32m1_b32(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i32m1_b32(vs2, vs1, vl); } -vbool32_t test_vmsgt_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmsgt_vx_i32m1_b32(op1, op2, vl); +vbool32_t test_vmsgt_vx_i32m1_b32(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i32m1_b32(vs2, rs1, vl); } -vbool16_t test_vmsgt_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmsgt_vv_i32m2_b16(op1, op2, vl); +vbool16_t test_vmsgt_vv_i32m2_b16(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i32m2_b16(vs2, vs1, vl); } -vbool16_t test_vmsgt_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsgt_vx_i32m2_b16(op1, op2, vl); +vbool16_t test_vmsgt_vx_i32m2_b16(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i32m2_b16(vs2, rs1, vl); } -vbool8_t test_vmsgt_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmsgt_vv_i32m4_b8(op1, op2, vl); +vbool8_t test_vmsgt_vv_i32m4_b8(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i32m4_b8(vs2, vs1, vl); } -vbool8_t test_vmsgt_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmsgt_vx_i32m4_b8(op1, op2, vl); +vbool8_t test_vmsgt_vx_i32m4_b8(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i32m4_b8(vs2, rs1, vl); } -vbool4_t test_vmsgt_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmsgt_vv_i32m8_b4(op1, op2, vl); +vbool4_t test_vmsgt_vv_i32m8_b4(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i32m8_b4(vs2, vs1, vl); } -vbool4_t test_vmsgt_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmsgt_vx_i32m8_b4(op1, op2, vl); +vbool4_t test_vmsgt_vx_i32m8_b4(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i32m8_b4(vs2, rs1, vl); } -vbool64_t test_vmsgt_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmsgt_vv_i64m1_b64(op1, op2, vl); +vbool64_t test_vmsgt_vv_i64m1_b64(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i64m1_b64(vs2, vs1, vl); } -vbool64_t test_vmsgt_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmsgt_vx_i64m1_b64(op1, op2, vl); +vbool64_t test_vmsgt_vx_i64m1_b64(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i64m1_b64(vs2, rs1, vl); } -vbool32_t test_vmsgt_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmsgt_vv_i64m2_b32(op1, op2, vl); +vbool32_t test_vmsgt_vv_i64m2_b32(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i64m2_b32(vs2, vs1, vl); } -vbool32_t test_vmsgt_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmsgt_vx_i64m2_b32(op1, op2, vl); +vbool32_t test_vmsgt_vx_i64m2_b32(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i64m2_b32(vs2, rs1, vl); } -vbool16_t test_vmsgt_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmsgt_vv_i64m4_b16(op1, op2, vl); +vbool16_t test_vmsgt_vv_i64m4_b16(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i64m4_b16(vs2, vs1, vl); } -vbool16_t test_vmsgt_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmsgt_vx_i64m4_b16(op1, op2, vl); +vbool16_t test_vmsgt_vx_i64m4_b16(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i64m4_b16(vs2, rs1, vl); } -vbool8_t test_vmsgt_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmsgt_vv_i64m8_b8(op1, op2, vl); +vbool8_t test_vmsgt_vv_i64m8_b8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i64m8_b8(vs2, vs1, vl); } -vbool8_t test_vmsgt_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmsgt_vx_i64m8_b8(op1, op2, vl); +vbool8_t test_vmsgt_vx_i64m8_b8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i64m8_b8(vs2, rs1, vl); } -vbool64_t test_vmsgt_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmsgt_vv_i8mf8_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsgt_vv_i8mf8_b64_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i8mf8_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmsgt_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsgt_vx_i8mf8_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsgt_vx_i8mf8_b64_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i8mf8_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmsgt_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmsgt_vv_i8mf4_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsgt_vv_i8mf4_b32_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i8mf4_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmsgt_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsgt_vx_i8mf4_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsgt_vx_i8mf4_b32_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i8mf4_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmsgt_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmsgt_vv_i8mf2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsgt_vv_i8mf2_b16_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i8mf2_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmsgt_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsgt_vx_i8mf2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsgt_vx_i8mf2_b16_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i8mf2_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmsgt_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmsgt_vv_i8m1_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgt_vv_i8m1_b8_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i8m1_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmsgt_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmsgt_vx_i8m1_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgt_vx_i8m1_b8_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i8m1_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmsgt_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmsgt_vv_i8m2_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsgt_vv_i8m2_b4_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i8m2_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmsgt_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsgt_vx_i8m2_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsgt_vx_i8m2_b4_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i8m2_b4_m(vm, vs2, rs1, vl); } -vbool2_t test_vmsgt_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmsgt_vv_i8m4_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsgt_vv_i8m4_b2_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i8m4_b2_m(vm, vs2, vs1, vl); } -vbool2_t test_vmsgt_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsgt_vx_i8m4_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsgt_vx_i8m4_b2_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i8m4_b2_m(vm, vs2, rs1, vl); } -vbool1_t test_vmsgt_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmsgt_vv_i8m8_b1_m(mask, op1, op2, vl); +vbool1_t test_vmsgt_vv_i8m8_b1_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i8m8_b1_m(vm, vs2, vs1, vl); } -vbool1_t test_vmsgt_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsgt_vx_i8m8_b1_m(mask, op1, op2, vl); +vbool1_t test_vmsgt_vx_i8m8_b1_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i8m8_b1_m(vm, vs2, rs1, vl); } -vbool64_t test_vmsgt_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmsgt_vv_i16mf4_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsgt_vv_i16mf4_b64_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i16mf4_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmsgt_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsgt_vx_i16mf4_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsgt_vx_i16mf4_b64_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i16mf4_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmsgt_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmsgt_vv_i16mf2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsgt_vv_i16mf2_b32_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i16mf2_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmsgt_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsgt_vx_i16mf2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsgt_vx_i16mf2_b32_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i16mf2_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmsgt_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmsgt_vv_i16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsgt_vv_i16m1_b16_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i16m1_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmsgt_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmsgt_vx_i16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsgt_vx_i16m1_b16_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i16m1_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmsgt_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmsgt_vv_i16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgt_vv_i16m2_b8_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i16m2_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmsgt_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsgt_vx_i16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgt_vx_i16m2_b8_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i16m2_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmsgt_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmsgt_vv_i16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsgt_vv_i16m4_b4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i16m4_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmsgt_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsgt_vx_i16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsgt_vx_i16m4_b4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i16m4_b4_m(vm, vs2, rs1, vl); } -vbool2_t test_vmsgt_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmsgt_vv_i16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsgt_vv_i16m8_b2_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i16m8_b2_m(vm, vs2, vs1, vl); } -vbool2_t test_vmsgt_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmsgt_vx_i16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsgt_vx_i16m8_b2_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i16m8_b2_m(vm, vs2, rs1, vl); } -vbool64_t test_vmsgt_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmsgt_vv_i32mf2_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsgt_vv_i32mf2_b64_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i32mf2_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmsgt_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsgt_vx_i32mf2_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsgt_vx_i32mf2_b64_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i32mf2_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmsgt_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmsgt_vv_i32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsgt_vv_i32m1_b32_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i32m1_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmsgt_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmsgt_vx_i32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsgt_vx_i32m1_b32_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i32m1_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmsgt_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmsgt_vv_i32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsgt_vv_i32m2_b16_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i32m2_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmsgt_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsgt_vx_i32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsgt_vx_i32m2_b16_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i32m2_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmsgt_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmsgt_vv_i32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgt_vv_i32m4_b8_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i32m4_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmsgt_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmsgt_vx_i32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgt_vx_i32m4_b8_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i32m4_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmsgt_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmsgt_vv_i32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsgt_vv_i32m8_b4_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i32m8_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmsgt_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmsgt_vx_i32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsgt_vx_i32m8_b4_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i32m8_b4_m(vm, vs2, rs1, vl); } -vbool64_t test_vmsgt_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmsgt_vv_i64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsgt_vv_i64m1_b64_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i64m1_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmsgt_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmsgt_vx_i64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsgt_vx_i64m1_b64_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i64m1_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmsgt_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmsgt_vv_i64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsgt_vv_i64m2_b32_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i64m2_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmsgt_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmsgt_vx_i64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsgt_vx_i64m2_b32_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i64m2_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmsgt_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmsgt_vv_i64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsgt_vv_i64m4_b16_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i64m4_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmsgt_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmsgt_vx_i64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsgt_vx_i64m4_b16_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i64m4_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmsgt_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmsgt_vv_i64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgt_vv_i64m8_b8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i64m8_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmsgt_vx_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmsgt_vx_i64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgt_vx_i64m8_b8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i64m8_b8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vmsgt\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-api-tests/vmsgtu.c b/auto-generated/gnu-api-tests/vmsgtu.c index eaf751901..b33ae1bd4 100644 --- a/auto-generated/gnu-api-tests/vmsgtu.c +++ b/auto-generated/gnu-api-tests/vmsgtu.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmsgtu_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u8mf8_b64(op1, op2, vl); +vbool64_t test_vmsgtu_vv_u8mf8_b64(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u8mf8_b64(vs2, vs1, vl); } -vbool64_t test_vmsgtu_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u8mf8_b64(op1, op2, vl); +vbool64_t test_vmsgtu_vx_u8mf8_b64(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u8mf8_b64(vs2, rs1, vl); } -vbool32_t test_vmsgtu_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u8mf4_b32(op1, op2, vl); +vbool32_t test_vmsgtu_vv_u8mf4_b32(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u8mf4_b32(vs2, vs1, vl); } -vbool32_t test_vmsgtu_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u8mf4_b32(op1, op2, vl); +vbool32_t test_vmsgtu_vx_u8mf4_b32(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u8mf4_b32(vs2, rs1, vl); } -vbool16_t test_vmsgtu_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u8mf2_b16(op1, op2, vl); +vbool16_t test_vmsgtu_vv_u8mf2_b16(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u8mf2_b16(vs2, vs1, vl); } -vbool16_t test_vmsgtu_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u8mf2_b16(op1, op2, vl); +vbool16_t test_vmsgtu_vx_u8mf2_b16(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u8mf2_b16(vs2, rs1, vl); } -vbool8_t test_vmsgtu_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u8m1_b8(op1, op2, vl); +vbool8_t test_vmsgtu_vv_u8m1_b8(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u8m1_b8(vs2, vs1, vl); } -vbool8_t test_vmsgtu_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u8m1_b8(op1, op2, vl); +vbool8_t test_vmsgtu_vx_u8m1_b8(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u8m1_b8(vs2, rs1, vl); } -vbool4_t test_vmsgtu_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u8m2_b4(op1, op2, vl); +vbool4_t test_vmsgtu_vv_u8m2_b4(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u8m2_b4(vs2, vs1, vl); } -vbool4_t test_vmsgtu_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u8m2_b4(op1, op2, vl); +vbool4_t test_vmsgtu_vx_u8m2_b4(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u8m2_b4(vs2, rs1, vl); } -vbool2_t test_vmsgtu_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u8m4_b2(op1, op2, vl); +vbool2_t test_vmsgtu_vv_u8m4_b2(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u8m4_b2(vs2, vs1, vl); } -vbool2_t test_vmsgtu_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u8m4_b2(op1, op2, vl); +vbool2_t test_vmsgtu_vx_u8m4_b2(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u8m4_b2(vs2, rs1, vl); } -vbool1_t test_vmsgtu_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u8m8_b1(op1, op2, vl); +vbool1_t test_vmsgtu_vv_u8m8_b1(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u8m8_b1(vs2, vs1, vl); } -vbool1_t test_vmsgtu_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u8m8_b1(op1, op2, vl); +vbool1_t test_vmsgtu_vx_u8m8_b1(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u8m8_b1(vs2, rs1, vl); } -vbool64_t test_vmsgtu_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u16mf4_b64(op1, op2, vl); +vbool64_t test_vmsgtu_vv_u16mf4_b64(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u16mf4_b64(vs2, vs1, vl); } -vbool64_t test_vmsgtu_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u16mf4_b64(op1, op2, vl); +vbool64_t test_vmsgtu_vx_u16mf4_b64(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u16mf4_b64(vs2, rs1, vl); } -vbool32_t test_vmsgtu_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u16mf2_b32(op1, op2, vl); +vbool32_t test_vmsgtu_vv_u16mf2_b32(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u16mf2_b32(vs2, vs1, vl); } -vbool32_t test_vmsgtu_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u16mf2_b32(op1, op2, vl); +vbool32_t test_vmsgtu_vx_u16mf2_b32(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u16mf2_b32(vs2, rs1, vl); } -vbool16_t test_vmsgtu_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u16m1_b16(op1, op2, vl); +vbool16_t test_vmsgtu_vv_u16m1_b16(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u16m1_b16(vs2, vs1, vl); } -vbool16_t test_vmsgtu_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u16m1_b16(op1, op2, vl); +vbool16_t test_vmsgtu_vx_u16m1_b16(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u16m1_b16(vs2, rs1, vl); } -vbool8_t test_vmsgtu_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u16m2_b8(op1, op2, vl); +vbool8_t test_vmsgtu_vv_u16m2_b8(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u16m2_b8(vs2, vs1, vl); } -vbool8_t test_vmsgtu_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u16m2_b8(op1, op2, vl); +vbool8_t test_vmsgtu_vx_u16m2_b8(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u16m2_b8(vs2, rs1, vl); } -vbool4_t test_vmsgtu_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u16m4_b4(op1, op2, vl); +vbool4_t test_vmsgtu_vv_u16m4_b4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u16m4_b4(vs2, vs1, vl); } -vbool4_t test_vmsgtu_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u16m4_b4(op1, op2, vl); +vbool4_t test_vmsgtu_vx_u16m4_b4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u16m4_b4(vs2, rs1, vl); } -vbool2_t test_vmsgtu_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u16m8_b2(op1, op2, vl); +vbool2_t test_vmsgtu_vv_u16m8_b2(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u16m8_b2(vs2, vs1, vl); } -vbool2_t test_vmsgtu_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u16m8_b2(op1, op2, vl); +vbool2_t test_vmsgtu_vx_u16m8_b2(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u16m8_b2(vs2, rs1, vl); } -vbool64_t test_vmsgtu_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u32mf2_b64(op1, op2, vl); +vbool64_t test_vmsgtu_vv_u32mf2_b64(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u32mf2_b64(vs2, vs1, vl); } -vbool64_t test_vmsgtu_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u32mf2_b64(op1, op2, vl); +vbool64_t test_vmsgtu_vx_u32mf2_b64(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u32mf2_b64(vs2, rs1, vl); } -vbool32_t test_vmsgtu_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u32m1_b32(op1, op2, vl); +vbool32_t test_vmsgtu_vv_u32m1_b32(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u32m1_b32(vs2, vs1, vl); } -vbool32_t test_vmsgtu_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u32m1_b32(op1, op2, vl); +vbool32_t test_vmsgtu_vx_u32m1_b32(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u32m1_b32(vs2, rs1, vl); } -vbool16_t test_vmsgtu_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u32m2_b16(op1, op2, vl); +vbool16_t test_vmsgtu_vv_u32m2_b16(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u32m2_b16(vs2, vs1, vl); } -vbool16_t test_vmsgtu_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u32m2_b16(op1, op2, vl); +vbool16_t test_vmsgtu_vx_u32m2_b16(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u32m2_b16(vs2, rs1, vl); } -vbool8_t test_vmsgtu_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u32m4_b8(op1, op2, vl); +vbool8_t test_vmsgtu_vv_u32m4_b8(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u32m4_b8(vs2, vs1, vl); } -vbool8_t test_vmsgtu_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u32m4_b8(op1, op2, vl); +vbool8_t test_vmsgtu_vx_u32m4_b8(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u32m4_b8(vs2, rs1, vl); } -vbool4_t test_vmsgtu_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u32m8_b4(op1, op2, vl); +vbool4_t test_vmsgtu_vv_u32m8_b4(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u32m8_b4(vs2, vs1, vl); } -vbool4_t test_vmsgtu_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u32m8_b4(op1, op2, vl); +vbool4_t test_vmsgtu_vx_u32m8_b4(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u32m8_b4(vs2, rs1, vl); } -vbool64_t test_vmsgtu_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u64m1_b64(op1, op2, vl); +vbool64_t test_vmsgtu_vv_u64m1_b64(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u64m1_b64(vs2, vs1, vl); } -vbool64_t test_vmsgtu_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u64m1_b64(op1, op2, vl); +vbool64_t test_vmsgtu_vx_u64m1_b64(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u64m1_b64(vs2, rs1, vl); } -vbool32_t test_vmsgtu_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u64m2_b32(op1, op2, vl); +vbool32_t test_vmsgtu_vv_u64m2_b32(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u64m2_b32(vs2, vs1, vl); } -vbool32_t test_vmsgtu_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u64m2_b32(op1, op2, vl); +vbool32_t test_vmsgtu_vx_u64m2_b32(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u64m2_b32(vs2, rs1, vl); } -vbool16_t test_vmsgtu_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u64m4_b16(op1, op2, vl); +vbool16_t test_vmsgtu_vv_u64m4_b16(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u64m4_b16(vs2, vs1, vl); } -vbool16_t test_vmsgtu_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u64m4_b16(op1, op2, vl); +vbool16_t test_vmsgtu_vx_u64m4_b16(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u64m4_b16(vs2, rs1, vl); } -vbool8_t test_vmsgtu_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u64m8_b8(op1, op2, vl); +vbool8_t test_vmsgtu_vv_u64m8_b8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u64m8_b8(vs2, vs1, vl); } -vbool8_t test_vmsgtu_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u64m8_b8(op1, op2, vl); +vbool8_t test_vmsgtu_vx_u64m8_b8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u64m8_b8(vs2, rs1, vl); } -vbool64_t test_vmsgtu_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u8mf8_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsgtu_vv_u8mf8_b64_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u8mf8_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmsgtu_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u8mf8_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsgtu_vx_u8mf8_b64_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u8mf8_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmsgtu_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u8mf4_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsgtu_vv_u8mf4_b32_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u8mf4_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmsgtu_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u8mf4_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsgtu_vx_u8mf4_b32_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u8mf4_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmsgtu_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u8mf2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsgtu_vv_u8mf2_b16_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u8mf2_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmsgtu_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u8mf2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsgtu_vx_u8mf2_b16_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u8mf2_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmsgtu_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u8m1_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgtu_vv_u8m1_b8_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u8m1_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmsgtu_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u8m1_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgtu_vx_u8m1_b8_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u8m1_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmsgtu_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u8m2_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsgtu_vv_u8m2_b4_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u8m2_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmsgtu_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u8m2_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsgtu_vx_u8m2_b4_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u8m2_b4_m(vm, vs2, rs1, vl); } -vbool2_t test_vmsgtu_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u8m4_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsgtu_vv_u8m4_b2_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u8m4_b2_m(vm, vs2, vs1, vl); } -vbool2_t test_vmsgtu_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u8m4_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsgtu_vx_u8m4_b2_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u8m4_b2_m(vm, vs2, rs1, vl); } -vbool1_t test_vmsgtu_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u8m8_b1_m(mask, op1, op2, vl); +vbool1_t test_vmsgtu_vv_u8m8_b1_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u8m8_b1_m(vm, vs2, vs1, vl); } -vbool1_t test_vmsgtu_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u8m8_b1_m(mask, op1, op2, vl); +vbool1_t test_vmsgtu_vx_u8m8_b1_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u8m8_b1_m(vm, vs2, rs1, vl); } -vbool64_t test_vmsgtu_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u16mf4_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsgtu_vv_u16mf4_b64_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u16mf4_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmsgtu_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u16mf4_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsgtu_vx_u16mf4_b64_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u16mf4_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmsgtu_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u16mf2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsgtu_vv_u16mf2_b32_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u16mf2_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmsgtu_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u16mf2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsgtu_vx_u16mf2_b32_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u16mf2_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmsgtu_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsgtu_vv_u16m1_b16_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u16m1_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmsgtu_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsgtu_vx_u16m1_b16_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u16m1_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmsgtu_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgtu_vv_u16m2_b8_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u16m2_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmsgtu_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgtu_vx_u16m2_b8_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u16m2_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmsgtu_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsgtu_vv_u16m4_b4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u16m4_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmsgtu_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsgtu_vx_u16m4_b4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u16m4_b4_m(vm, vs2, rs1, vl); } -vbool2_t test_vmsgtu_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsgtu_vv_u16m8_b2_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u16m8_b2_m(vm, vs2, vs1, vl); } -vbool2_t test_vmsgtu_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsgtu_vx_u16m8_b2_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u16m8_b2_m(vm, vs2, rs1, vl); } -vbool64_t test_vmsgtu_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u32mf2_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsgtu_vv_u32mf2_b64_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u32mf2_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmsgtu_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u32mf2_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsgtu_vx_u32mf2_b64_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u32mf2_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmsgtu_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsgtu_vv_u32m1_b32_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u32m1_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmsgtu_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsgtu_vx_u32m1_b32_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u32m1_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmsgtu_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsgtu_vv_u32m2_b16_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u32m2_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmsgtu_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsgtu_vx_u32m2_b16_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u32m2_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmsgtu_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgtu_vv_u32m4_b8_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u32m4_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmsgtu_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgtu_vx_u32m4_b8_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u32m4_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmsgtu_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsgtu_vv_u32m8_b4_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u32m8_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmsgtu_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsgtu_vx_u32m8_b4_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u32m8_b4_m(vm, vs2, rs1, vl); } -vbool64_t test_vmsgtu_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsgtu_vv_u64m1_b64_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u64m1_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmsgtu_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsgtu_vx_u64m1_b64_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u64m1_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmsgtu_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsgtu_vv_u64m2_b32_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u64m2_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmsgtu_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsgtu_vx_u64m2_b32_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u64m2_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmsgtu_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsgtu_vv_u64m4_b16_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u64m4_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmsgtu_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsgtu_vx_u64m4_b16_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u64m4_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmsgtu_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgtu_vv_u64m8_b8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u64m8_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmsgtu_vx_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgtu_vx_u64m8_b8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u64m8_b8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vmsgtu\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-api-tests/vmsif.c b/auto-generated/gnu-api-tests/vmsif.c index 2d98c4eb4..33efcbc31 100644 --- a/auto-generated/gnu-api-tests/vmsif.c +++ b/auto-generated/gnu-api-tests/vmsif.c @@ -6,60 +6,60 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool1_t test_vmsif_m_b1(vbool1_t op1, size_t vl) { - return __riscv_vmsif_m_b1(op1, vl); +vbool1_t test_vmsif_m_b1(vbool1_t vs2, size_t vl) { + return __riscv_vmsif_m_b1(vs2, vl); } -vbool2_t test_vmsif_m_b2(vbool2_t op1, size_t vl) { - return __riscv_vmsif_m_b2(op1, vl); +vbool2_t test_vmsif_m_b2(vbool2_t vs2, size_t vl) { + return __riscv_vmsif_m_b2(vs2, vl); } -vbool4_t test_vmsif_m_b4(vbool4_t op1, size_t vl) { - return __riscv_vmsif_m_b4(op1, vl); +vbool4_t test_vmsif_m_b4(vbool4_t vs2, size_t vl) { + return __riscv_vmsif_m_b4(vs2, vl); } -vbool8_t test_vmsif_m_b8(vbool8_t op1, size_t vl) { - return __riscv_vmsif_m_b8(op1, vl); +vbool8_t test_vmsif_m_b8(vbool8_t vs2, size_t vl) { + return __riscv_vmsif_m_b8(vs2, vl); } -vbool16_t test_vmsif_m_b16(vbool16_t op1, size_t vl) { - return __riscv_vmsif_m_b16(op1, vl); +vbool16_t test_vmsif_m_b16(vbool16_t vs2, size_t vl) { + return __riscv_vmsif_m_b16(vs2, vl); } -vbool32_t test_vmsif_m_b32(vbool32_t op1, size_t vl) { - return __riscv_vmsif_m_b32(op1, vl); +vbool32_t test_vmsif_m_b32(vbool32_t vs2, size_t vl) { + return __riscv_vmsif_m_b32(vs2, vl); } -vbool64_t test_vmsif_m_b64(vbool64_t op1, size_t vl) { - return __riscv_vmsif_m_b64(op1, vl); +vbool64_t test_vmsif_m_b64(vbool64_t vs2, size_t vl) { + return __riscv_vmsif_m_b64(vs2, vl); } -vbool1_t test_vmsif_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) { - return __riscv_vmsif_m_b1_m(mask, op1, vl); +vbool1_t test_vmsif_m_b1_m(vbool1_t vm, vbool1_t vs2, size_t vl) { + return __riscv_vmsif_m_b1_m(vm, vs2, vl); } -vbool2_t test_vmsif_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) { - return __riscv_vmsif_m_b2_m(mask, op1, vl); +vbool2_t test_vmsif_m_b2_m(vbool2_t vm, vbool2_t vs2, size_t vl) { + return __riscv_vmsif_m_b2_m(vm, vs2, vl); } -vbool4_t test_vmsif_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) { - return __riscv_vmsif_m_b4_m(mask, op1, vl); +vbool4_t test_vmsif_m_b4_m(vbool4_t vm, vbool4_t vs2, size_t vl) { + return __riscv_vmsif_m_b4_m(vm, vs2, vl); } -vbool8_t test_vmsif_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) { - return __riscv_vmsif_m_b8_m(mask, op1, vl); +vbool8_t test_vmsif_m_b8_m(vbool8_t vm, vbool8_t vs2, size_t vl) { + return __riscv_vmsif_m_b8_m(vm, vs2, vl); } -vbool16_t test_vmsif_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) { - return __riscv_vmsif_m_b16_m(mask, op1, vl); +vbool16_t test_vmsif_m_b16_m(vbool16_t vm, vbool16_t vs2, size_t vl) { + return __riscv_vmsif_m_b16_m(vm, vs2, vl); } -vbool32_t test_vmsif_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) { - return __riscv_vmsif_m_b32_m(mask, op1, vl); +vbool32_t test_vmsif_m_b32_m(vbool32_t vm, vbool32_t vs2, size_t vl) { + return __riscv_vmsif_m_b32_m(vm, vs2, vl); } -vbool64_t test_vmsif_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) { - return __riscv_vmsif_m_b64_m(mask, op1, vl); +vbool64_t test_vmsif_m_b64_m(vbool64_t vm, vbool64_t vs2, size_t vl) { + return __riscv_vmsif_m_b64_m(vm, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmsif\.[ivxfswum.]+\s+} 14 } } */ diff --git a/auto-generated/gnu-api-tests/vmsle.c b/auto-generated/gnu-api-tests/vmsle.c index c00f938cb..85ff1a31e 100644 --- a/auto-generated/gnu-api-tests/vmsle.c +++ b/auto-generated/gnu-api-tests/vmsle.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmsle_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmsle_vv_i8mf8_b64(op1, op2, vl); +vbool64_t test_vmsle_vv_i8mf8_b64(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmsle_vv_i8mf8_b64(vs2, vs1, vl); } -vbool64_t test_vmsle_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsle_vx_i8mf8_b64(op1, op2, vl); +vbool64_t test_vmsle_vx_i8mf8_b64(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsle_vx_i8mf8_b64(vs2, rs1, vl); } -vbool32_t test_vmsle_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmsle_vv_i8mf4_b32(op1, op2, vl); +vbool32_t test_vmsle_vv_i8mf4_b32(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmsle_vv_i8mf4_b32(vs2, vs1, vl); } -vbool32_t test_vmsle_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsle_vx_i8mf4_b32(op1, op2, vl); +vbool32_t test_vmsle_vx_i8mf4_b32(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsle_vx_i8mf4_b32(vs2, rs1, vl); } -vbool16_t test_vmsle_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmsle_vv_i8mf2_b16(op1, op2, vl); +vbool16_t test_vmsle_vv_i8mf2_b16(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmsle_vv_i8mf2_b16(vs2, vs1, vl); } -vbool16_t test_vmsle_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsle_vx_i8mf2_b16(op1, op2, vl); +vbool16_t test_vmsle_vx_i8mf2_b16(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsle_vx_i8mf2_b16(vs2, rs1, vl); } -vbool8_t test_vmsle_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmsle_vv_i8m1_b8(op1, op2, vl); +vbool8_t test_vmsle_vv_i8m1_b8(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmsle_vv_i8m1_b8(vs2, vs1, vl); } -vbool8_t test_vmsle_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmsle_vx_i8m1_b8(op1, op2, vl); +vbool8_t test_vmsle_vx_i8m1_b8(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsle_vx_i8m1_b8(vs2, rs1, vl); } -vbool4_t test_vmsle_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmsle_vv_i8m2_b4(op1, op2, vl); +vbool4_t test_vmsle_vv_i8m2_b4(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmsle_vv_i8m2_b4(vs2, vs1, vl); } -vbool4_t test_vmsle_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsle_vx_i8m2_b4(op1, op2, vl); +vbool4_t test_vmsle_vx_i8m2_b4(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsle_vx_i8m2_b4(vs2, rs1, vl); } -vbool2_t test_vmsle_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmsle_vv_i8m4_b2(op1, op2, vl); +vbool2_t test_vmsle_vv_i8m4_b2(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmsle_vv_i8m4_b2(vs2, vs1, vl); } -vbool2_t test_vmsle_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsle_vx_i8m4_b2(op1, op2, vl); +vbool2_t test_vmsle_vx_i8m4_b2(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsle_vx_i8m4_b2(vs2, rs1, vl); } -vbool1_t test_vmsle_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmsle_vv_i8m8_b1(op1, op2, vl); +vbool1_t test_vmsle_vv_i8m8_b1(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmsle_vv_i8m8_b1(vs2, vs1, vl); } -vbool1_t test_vmsle_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsle_vx_i8m8_b1(op1, op2, vl); +vbool1_t test_vmsle_vx_i8m8_b1(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsle_vx_i8m8_b1(vs2, rs1, vl); } -vbool64_t test_vmsle_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmsle_vv_i16mf4_b64(op1, op2, vl); +vbool64_t test_vmsle_vv_i16mf4_b64(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmsle_vv_i16mf4_b64(vs2, vs1, vl); } -vbool64_t test_vmsle_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsle_vx_i16mf4_b64(op1, op2, vl); +vbool64_t test_vmsle_vx_i16mf4_b64(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsle_vx_i16mf4_b64(vs2, rs1, vl); } -vbool32_t test_vmsle_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmsle_vv_i16mf2_b32(op1, op2, vl); +vbool32_t test_vmsle_vv_i16mf2_b32(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmsle_vv_i16mf2_b32(vs2, vs1, vl); } -vbool32_t test_vmsle_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsle_vx_i16mf2_b32(op1, op2, vl); +vbool32_t test_vmsle_vx_i16mf2_b32(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsle_vx_i16mf2_b32(vs2, rs1, vl); } -vbool16_t test_vmsle_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmsle_vv_i16m1_b16(op1, op2, vl); +vbool16_t test_vmsle_vv_i16m1_b16(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmsle_vv_i16m1_b16(vs2, vs1, vl); } -vbool16_t test_vmsle_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmsle_vx_i16m1_b16(op1, op2, vl); +vbool16_t test_vmsle_vx_i16m1_b16(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsle_vx_i16m1_b16(vs2, rs1, vl); } -vbool8_t test_vmsle_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmsle_vv_i16m2_b8(op1, op2, vl); +vbool8_t test_vmsle_vv_i16m2_b8(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmsle_vv_i16m2_b8(vs2, vs1, vl); } -vbool8_t test_vmsle_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsle_vx_i16m2_b8(op1, op2, vl); +vbool8_t test_vmsle_vx_i16m2_b8(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsle_vx_i16m2_b8(vs2, rs1, vl); } -vbool4_t test_vmsle_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmsle_vv_i16m4_b4(op1, op2, vl); +vbool4_t test_vmsle_vv_i16m4_b4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmsle_vv_i16m4_b4(vs2, vs1, vl); } -vbool4_t test_vmsle_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsle_vx_i16m4_b4(op1, op2, vl); +vbool4_t test_vmsle_vx_i16m4_b4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsle_vx_i16m4_b4(vs2, rs1, vl); } -vbool2_t test_vmsle_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmsle_vv_i16m8_b2(op1, op2, vl); +vbool2_t test_vmsle_vv_i16m8_b2(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmsle_vv_i16m8_b2(vs2, vs1, vl); } -vbool2_t test_vmsle_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmsle_vx_i16m8_b2(op1, op2, vl); +vbool2_t test_vmsle_vx_i16m8_b2(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsle_vx_i16m8_b2(vs2, rs1, vl); } -vbool64_t test_vmsle_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmsle_vv_i32mf2_b64(op1, op2, vl); +vbool64_t test_vmsle_vv_i32mf2_b64(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmsle_vv_i32mf2_b64(vs2, vs1, vl); } -vbool64_t test_vmsle_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsle_vx_i32mf2_b64(op1, op2, vl); +vbool64_t test_vmsle_vx_i32mf2_b64(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsle_vx_i32mf2_b64(vs2, rs1, vl); } -vbool32_t test_vmsle_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmsle_vv_i32m1_b32(op1, op2, vl); +vbool32_t test_vmsle_vv_i32m1_b32(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmsle_vv_i32m1_b32(vs2, vs1, vl); } -vbool32_t test_vmsle_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmsle_vx_i32m1_b32(op1, op2, vl); +vbool32_t test_vmsle_vx_i32m1_b32(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsle_vx_i32m1_b32(vs2, rs1, vl); } -vbool16_t test_vmsle_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmsle_vv_i32m2_b16(op1, op2, vl); +vbool16_t test_vmsle_vv_i32m2_b16(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmsle_vv_i32m2_b16(vs2, vs1, vl); } -vbool16_t test_vmsle_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsle_vx_i32m2_b16(op1, op2, vl); +vbool16_t test_vmsle_vx_i32m2_b16(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsle_vx_i32m2_b16(vs2, rs1, vl); } -vbool8_t test_vmsle_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmsle_vv_i32m4_b8(op1, op2, vl); +vbool8_t test_vmsle_vv_i32m4_b8(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmsle_vv_i32m4_b8(vs2, vs1, vl); } -vbool8_t test_vmsle_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmsle_vx_i32m4_b8(op1, op2, vl); +vbool8_t test_vmsle_vx_i32m4_b8(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsle_vx_i32m4_b8(vs2, rs1, vl); } -vbool4_t test_vmsle_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmsle_vv_i32m8_b4(op1, op2, vl); +vbool4_t test_vmsle_vv_i32m8_b4(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmsle_vv_i32m8_b4(vs2, vs1, vl); } -vbool4_t test_vmsle_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmsle_vx_i32m8_b4(op1, op2, vl); +vbool4_t test_vmsle_vx_i32m8_b4(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsle_vx_i32m8_b4(vs2, rs1, vl); } -vbool64_t test_vmsle_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmsle_vv_i64m1_b64(op1, op2, vl); +vbool64_t test_vmsle_vv_i64m1_b64(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmsle_vv_i64m1_b64(vs2, vs1, vl); } -vbool64_t test_vmsle_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmsle_vx_i64m1_b64(op1, op2, vl); +vbool64_t test_vmsle_vx_i64m1_b64(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsle_vx_i64m1_b64(vs2, rs1, vl); } -vbool32_t test_vmsle_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmsle_vv_i64m2_b32(op1, op2, vl); +vbool32_t test_vmsle_vv_i64m2_b32(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmsle_vv_i64m2_b32(vs2, vs1, vl); } -vbool32_t test_vmsle_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmsle_vx_i64m2_b32(op1, op2, vl); +vbool32_t test_vmsle_vx_i64m2_b32(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsle_vx_i64m2_b32(vs2, rs1, vl); } -vbool16_t test_vmsle_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmsle_vv_i64m4_b16(op1, op2, vl); +vbool16_t test_vmsle_vv_i64m4_b16(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmsle_vv_i64m4_b16(vs2, vs1, vl); } -vbool16_t test_vmsle_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmsle_vx_i64m4_b16(op1, op2, vl); +vbool16_t test_vmsle_vx_i64m4_b16(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsle_vx_i64m4_b16(vs2, rs1, vl); } -vbool8_t test_vmsle_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmsle_vv_i64m8_b8(op1, op2, vl); +vbool8_t test_vmsle_vv_i64m8_b8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmsle_vv_i64m8_b8(vs2, vs1, vl); } -vbool8_t test_vmsle_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmsle_vx_i64m8_b8(op1, op2, vl); +vbool8_t test_vmsle_vx_i64m8_b8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsle_vx_i64m8_b8(vs2, rs1, vl); } -vbool64_t test_vmsle_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmsle_vv_i8mf8_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsle_vv_i8mf8_b64_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmsle_vv_i8mf8_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmsle_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsle_vx_i8mf8_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsle_vx_i8mf8_b64_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsle_vx_i8mf8_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmsle_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmsle_vv_i8mf4_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsle_vv_i8mf4_b32_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmsle_vv_i8mf4_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmsle_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsle_vx_i8mf4_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsle_vx_i8mf4_b32_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsle_vx_i8mf4_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmsle_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmsle_vv_i8mf2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsle_vv_i8mf2_b16_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmsle_vv_i8mf2_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmsle_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsle_vx_i8mf2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsle_vx_i8mf2_b16_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsle_vx_i8mf2_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmsle_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmsle_vv_i8m1_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsle_vv_i8m1_b8_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmsle_vv_i8m1_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmsle_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmsle_vx_i8m1_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsle_vx_i8m1_b8_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsle_vx_i8m1_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmsle_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmsle_vv_i8m2_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsle_vv_i8m2_b4_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmsle_vv_i8m2_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmsle_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsle_vx_i8m2_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsle_vx_i8m2_b4_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsle_vx_i8m2_b4_m(vm, vs2, rs1, vl); } -vbool2_t test_vmsle_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmsle_vv_i8m4_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsle_vv_i8m4_b2_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmsle_vv_i8m4_b2_m(vm, vs2, vs1, vl); } -vbool2_t test_vmsle_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsle_vx_i8m4_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsle_vx_i8m4_b2_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsle_vx_i8m4_b2_m(vm, vs2, rs1, vl); } -vbool1_t test_vmsle_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmsle_vv_i8m8_b1_m(mask, op1, op2, vl); +vbool1_t test_vmsle_vv_i8m8_b1_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmsle_vv_i8m8_b1_m(vm, vs2, vs1, vl); } -vbool1_t test_vmsle_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsle_vx_i8m8_b1_m(mask, op1, op2, vl); +vbool1_t test_vmsle_vx_i8m8_b1_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsle_vx_i8m8_b1_m(vm, vs2, rs1, vl); } -vbool64_t test_vmsle_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmsle_vv_i16mf4_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsle_vv_i16mf4_b64_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmsle_vv_i16mf4_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmsle_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsle_vx_i16mf4_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsle_vx_i16mf4_b64_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsle_vx_i16mf4_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmsle_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmsle_vv_i16mf2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsle_vv_i16mf2_b32_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmsle_vv_i16mf2_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmsle_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsle_vx_i16mf2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsle_vx_i16mf2_b32_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsle_vx_i16mf2_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmsle_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmsle_vv_i16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsle_vv_i16m1_b16_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmsle_vv_i16m1_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmsle_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmsle_vx_i16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsle_vx_i16m1_b16_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsle_vx_i16m1_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmsle_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmsle_vv_i16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsle_vv_i16m2_b8_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmsle_vv_i16m2_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmsle_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsle_vx_i16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsle_vx_i16m2_b8_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsle_vx_i16m2_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmsle_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmsle_vv_i16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsle_vv_i16m4_b4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmsle_vv_i16m4_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmsle_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsle_vx_i16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsle_vx_i16m4_b4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsle_vx_i16m4_b4_m(vm, vs2, rs1, vl); } -vbool2_t test_vmsle_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmsle_vv_i16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsle_vv_i16m8_b2_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmsle_vv_i16m8_b2_m(vm, vs2, vs1, vl); } -vbool2_t test_vmsle_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmsle_vx_i16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsle_vx_i16m8_b2_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsle_vx_i16m8_b2_m(vm, vs2, rs1, vl); } -vbool64_t test_vmsle_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmsle_vv_i32mf2_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsle_vv_i32mf2_b64_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmsle_vv_i32mf2_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmsle_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsle_vx_i32mf2_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsle_vx_i32mf2_b64_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsle_vx_i32mf2_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmsle_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmsle_vv_i32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsle_vv_i32m1_b32_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmsle_vv_i32m1_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmsle_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmsle_vx_i32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsle_vx_i32m1_b32_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsle_vx_i32m1_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmsle_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmsle_vv_i32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsle_vv_i32m2_b16_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmsle_vv_i32m2_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmsle_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsle_vx_i32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsle_vx_i32m2_b16_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsle_vx_i32m2_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmsle_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmsle_vv_i32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsle_vv_i32m4_b8_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmsle_vv_i32m4_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmsle_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmsle_vx_i32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsle_vx_i32m4_b8_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsle_vx_i32m4_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmsle_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmsle_vv_i32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsle_vv_i32m8_b4_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmsle_vv_i32m8_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmsle_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmsle_vx_i32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsle_vx_i32m8_b4_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsle_vx_i32m8_b4_m(vm, vs2, rs1, vl); } -vbool64_t test_vmsle_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmsle_vv_i64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsle_vv_i64m1_b64_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmsle_vv_i64m1_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmsle_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmsle_vx_i64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsle_vx_i64m1_b64_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsle_vx_i64m1_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmsle_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmsle_vv_i64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsle_vv_i64m2_b32_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmsle_vv_i64m2_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmsle_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmsle_vx_i64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsle_vx_i64m2_b32_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsle_vx_i64m2_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmsle_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmsle_vv_i64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsle_vv_i64m4_b16_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmsle_vv_i64m4_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmsle_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmsle_vx_i64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsle_vx_i64m4_b16_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsle_vx_i64m4_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmsle_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmsle_vv_i64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsle_vv_i64m8_b8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmsle_vv_i64m8_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmsle_vx_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmsle_vx_i64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsle_vx_i64m8_b8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsle_vx_i64m8_b8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vmsle\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-api-tests/vmsleu.c b/auto-generated/gnu-api-tests/vmsleu.c index 95863312f..3bd2a78e6 100644 --- a/auto-generated/gnu-api-tests/vmsleu.c +++ b/auto-generated/gnu-api-tests/vmsleu.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmsleu_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmsleu_vv_u8mf8_b64(op1, op2, vl); +vbool64_t test_vmsleu_vv_u8mf8_b64(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u8mf8_b64(vs2, vs1, vl); } -vbool64_t test_vmsleu_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsleu_vx_u8mf8_b64(op1, op2, vl); +vbool64_t test_vmsleu_vx_u8mf8_b64(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u8mf8_b64(vs2, rs1, vl); } -vbool32_t test_vmsleu_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmsleu_vv_u8mf4_b32(op1, op2, vl); +vbool32_t test_vmsleu_vv_u8mf4_b32(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u8mf4_b32(vs2, vs1, vl); } -vbool32_t test_vmsleu_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsleu_vx_u8mf4_b32(op1, op2, vl); +vbool32_t test_vmsleu_vx_u8mf4_b32(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u8mf4_b32(vs2, rs1, vl); } -vbool16_t test_vmsleu_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmsleu_vv_u8mf2_b16(op1, op2, vl); +vbool16_t test_vmsleu_vv_u8mf2_b16(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u8mf2_b16(vs2, vs1, vl); } -vbool16_t test_vmsleu_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsleu_vx_u8mf2_b16(op1, op2, vl); +vbool16_t test_vmsleu_vx_u8mf2_b16(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u8mf2_b16(vs2, rs1, vl); } -vbool8_t test_vmsleu_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmsleu_vv_u8m1_b8(op1, op2, vl); +vbool8_t test_vmsleu_vv_u8m1_b8(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u8m1_b8(vs2, vs1, vl); } -vbool8_t test_vmsleu_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsleu_vx_u8m1_b8(op1, op2, vl); +vbool8_t test_vmsleu_vx_u8m1_b8(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u8m1_b8(vs2, rs1, vl); } -vbool4_t test_vmsleu_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmsleu_vv_u8m2_b4(op1, op2, vl); +vbool4_t test_vmsleu_vv_u8m2_b4(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u8m2_b4(vs2, vs1, vl); } -vbool4_t test_vmsleu_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsleu_vx_u8m2_b4(op1, op2, vl); +vbool4_t test_vmsleu_vx_u8m2_b4(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u8m2_b4(vs2, rs1, vl); } -vbool2_t test_vmsleu_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmsleu_vv_u8m4_b2(op1, op2, vl); +vbool2_t test_vmsleu_vv_u8m4_b2(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u8m4_b2(vs2, vs1, vl); } -vbool2_t test_vmsleu_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsleu_vx_u8m4_b2(op1, op2, vl); +vbool2_t test_vmsleu_vx_u8m4_b2(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u8m4_b2(vs2, rs1, vl); } -vbool1_t test_vmsleu_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmsleu_vv_u8m8_b1(op1, op2, vl); +vbool1_t test_vmsleu_vv_u8m8_b1(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u8m8_b1(vs2, vs1, vl); } -vbool1_t test_vmsleu_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsleu_vx_u8m8_b1(op1, op2, vl); +vbool1_t test_vmsleu_vx_u8m8_b1(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u8m8_b1(vs2, rs1, vl); } -vbool64_t test_vmsleu_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmsleu_vv_u16mf4_b64(op1, op2, vl); +vbool64_t test_vmsleu_vv_u16mf4_b64(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u16mf4_b64(vs2, vs1, vl); } -vbool64_t test_vmsleu_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsleu_vx_u16mf4_b64(op1, op2, vl); +vbool64_t test_vmsleu_vx_u16mf4_b64(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u16mf4_b64(vs2, rs1, vl); } -vbool32_t test_vmsleu_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmsleu_vv_u16mf2_b32(op1, op2, vl); +vbool32_t test_vmsleu_vv_u16mf2_b32(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u16mf2_b32(vs2, vs1, vl); } -vbool32_t test_vmsleu_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsleu_vx_u16mf2_b32(op1, op2, vl); +vbool32_t test_vmsleu_vx_u16mf2_b32(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u16mf2_b32(vs2, rs1, vl); } -vbool16_t test_vmsleu_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmsleu_vv_u16m1_b16(op1, op2, vl); +vbool16_t test_vmsleu_vv_u16m1_b16(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u16m1_b16(vs2, vs1, vl); } -vbool16_t test_vmsleu_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsleu_vx_u16m1_b16(op1, op2, vl); +vbool16_t test_vmsleu_vx_u16m1_b16(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u16m1_b16(vs2, rs1, vl); } -vbool8_t test_vmsleu_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmsleu_vv_u16m2_b8(op1, op2, vl); +vbool8_t test_vmsleu_vv_u16m2_b8(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u16m2_b8(vs2, vs1, vl); } -vbool8_t test_vmsleu_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsleu_vx_u16m2_b8(op1, op2, vl); +vbool8_t test_vmsleu_vx_u16m2_b8(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u16m2_b8(vs2, rs1, vl); } -vbool4_t test_vmsleu_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmsleu_vv_u16m4_b4(op1, op2, vl); +vbool4_t test_vmsleu_vv_u16m4_b4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u16m4_b4(vs2, vs1, vl); } -vbool4_t test_vmsleu_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsleu_vx_u16m4_b4(op1, op2, vl); +vbool4_t test_vmsleu_vx_u16m4_b4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u16m4_b4(vs2, rs1, vl); } -vbool2_t test_vmsleu_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmsleu_vv_u16m8_b2(op1, op2, vl); +vbool2_t test_vmsleu_vv_u16m8_b2(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u16m8_b2(vs2, vs1, vl); } -vbool2_t test_vmsleu_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsleu_vx_u16m8_b2(op1, op2, vl); +vbool2_t test_vmsleu_vx_u16m8_b2(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u16m8_b2(vs2, rs1, vl); } -vbool64_t test_vmsleu_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmsleu_vv_u32mf2_b64(op1, op2, vl); +vbool64_t test_vmsleu_vv_u32mf2_b64(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u32mf2_b64(vs2, vs1, vl); } -vbool64_t test_vmsleu_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsleu_vx_u32mf2_b64(op1, op2, vl); +vbool64_t test_vmsleu_vx_u32mf2_b64(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u32mf2_b64(vs2, rs1, vl); } -vbool32_t test_vmsleu_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmsleu_vv_u32m1_b32(op1, op2, vl); +vbool32_t test_vmsleu_vv_u32m1_b32(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u32m1_b32(vs2, vs1, vl); } -vbool32_t test_vmsleu_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsleu_vx_u32m1_b32(op1, op2, vl); +vbool32_t test_vmsleu_vx_u32m1_b32(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u32m1_b32(vs2, rs1, vl); } -vbool16_t test_vmsleu_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmsleu_vv_u32m2_b16(op1, op2, vl); +vbool16_t test_vmsleu_vv_u32m2_b16(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u32m2_b16(vs2, vs1, vl); } -vbool16_t test_vmsleu_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsleu_vx_u32m2_b16(op1, op2, vl); +vbool16_t test_vmsleu_vx_u32m2_b16(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u32m2_b16(vs2, rs1, vl); } -vbool8_t test_vmsleu_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmsleu_vv_u32m4_b8(op1, op2, vl); +vbool8_t test_vmsleu_vv_u32m4_b8(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u32m4_b8(vs2, vs1, vl); } -vbool8_t test_vmsleu_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsleu_vx_u32m4_b8(op1, op2, vl); +vbool8_t test_vmsleu_vx_u32m4_b8(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u32m4_b8(vs2, rs1, vl); } -vbool4_t test_vmsleu_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmsleu_vv_u32m8_b4(op1, op2, vl); +vbool4_t test_vmsleu_vv_u32m8_b4(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u32m8_b4(vs2, vs1, vl); } -vbool4_t test_vmsleu_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsleu_vx_u32m8_b4(op1, op2, vl); +vbool4_t test_vmsleu_vx_u32m8_b4(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u32m8_b4(vs2, rs1, vl); } -vbool64_t test_vmsleu_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmsleu_vv_u64m1_b64(op1, op2, vl); +vbool64_t test_vmsleu_vv_u64m1_b64(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u64m1_b64(vs2, vs1, vl); } -vbool64_t test_vmsleu_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsleu_vx_u64m1_b64(op1, op2, vl); +vbool64_t test_vmsleu_vx_u64m1_b64(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u64m1_b64(vs2, rs1, vl); } -vbool32_t test_vmsleu_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmsleu_vv_u64m2_b32(op1, op2, vl); +vbool32_t test_vmsleu_vv_u64m2_b32(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u64m2_b32(vs2, vs1, vl); } -vbool32_t test_vmsleu_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsleu_vx_u64m2_b32(op1, op2, vl); +vbool32_t test_vmsleu_vx_u64m2_b32(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u64m2_b32(vs2, rs1, vl); } -vbool16_t test_vmsleu_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmsleu_vv_u64m4_b16(op1, op2, vl); +vbool16_t test_vmsleu_vv_u64m4_b16(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u64m4_b16(vs2, vs1, vl); } -vbool16_t test_vmsleu_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsleu_vx_u64m4_b16(op1, op2, vl); +vbool16_t test_vmsleu_vx_u64m4_b16(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u64m4_b16(vs2, rs1, vl); } -vbool8_t test_vmsleu_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmsleu_vv_u64m8_b8(op1, op2, vl); +vbool8_t test_vmsleu_vv_u64m8_b8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u64m8_b8(vs2, vs1, vl); } -vbool8_t test_vmsleu_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsleu_vx_u64m8_b8(op1, op2, vl); +vbool8_t test_vmsleu_vx_u64m8_b8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u64m8_b8(vs2, rs1, vl); } -vbool64_t test_vmsleu_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmsleu_vv_u8mf8_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsleu_vv_u8mf8_b64_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u8mf8_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmsleu_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsleu_vx_u8mf8_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsleu_vx_u8mf8_b64_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u8mf8_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmsleu_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmsleu_vv_u8mf4_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsleu_vv_u8mf4_b32_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u8mf4_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmsleu_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsleu_vx_u8mf4_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsleu_vx_u8mf4_b32_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u8mf4_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmsleu_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmsleu_vv_u8mf2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsleu_vv_u8mf2_b16_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u8mf2_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmsleu_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsleu_vx_u8mf2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsleu_vx_u8mf2_b16_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u8mf2_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmsleu_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmsleu_vv_u8m1_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsleu_vv_u8m1_b8_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u8m1_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmsleu_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsleu_vx_u8m1_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsleu_vx_u8m1_b8_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u8m1_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmsleu_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmsleu_vv_u8m2_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsleu_vv_u8m2_b4_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u8m2_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmsleu_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsleu_vx_u8m2_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsleu_vx_u8m2_b4_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u8m2_b4_m(vm, vs2, rs1, vl); } -vbool2_t test_vmsleu_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmsleu_vv_u8m4_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsleu_vv_u8m4_b2_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u8m4_b2_m(vm, vs2, vs1, vl); } -vbool2_t test_vmsleu_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsleu_vx_u8m4_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsleu_vx_u8m4_b2_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u8m4_b2_m(vm, vs2, rs1, vl); } -vbool1_t test_vmsleu_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmsleu_vv_u8m8_b1_m(mask, op1, op2, vl); +vbool1_t test_vmsleu_vv_u8m8_b1_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u8m8_b1_m(vm, vs2, vs1, vl); } -vbool1_t test_vmsleu_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsleu_vx_u8m8_b1_m(mask, op1, op2, vl); +vbool1_t test_vmsleu_vx_u8m8_b1_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u8m8_b1_m(vm, vs2, rs1, vl); } -vbool64_t test_vmsleu_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmsleu_vv_u16mf4_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsleu_vv_u16mf4_b64_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u16mf4_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmsleu_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsleu_vx_u16mf4_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsleu_vx_u16mf4_b64_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u16mf4_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmsleu_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmsleu_vv_u16mf2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsleu_vv_u16mf2_b32_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u16mf2_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmsleu_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsleu_vx_u16mf2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsleu_vx_u16mf2_b32_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u16mf2_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmsleu_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmsleu_vv_u16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsleu_vv_u16m1_b16_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u16m1_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmsleu_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsleu_vx_u16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsleu_vx_u16m1_b16_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u16m1_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmsleu_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmsleu_vv_u16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsleu_vv_u16m2_b8_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u16m2_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmsleu_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsleu_vx_u16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsleu_vx_u16m2_b8_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u16m2_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmsleu_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmsleu_vv_u16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsleu_vv_u16m4_b4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u16m4_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmsleu_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsleu_vx_u16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsleu_vx_u16m4_b4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u16m4_b4_m(vm, vs2, rs1, vl); } -vbool2_t test_vmsleu_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmsleu_vv_u16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsleu_vv_u16m8_b2_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u16m8_b2_m(vm, vs2, vs1, vl); } -vbool2_t test_vmsleu_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsleu_vx_u16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsleu_vx_u16m8_b2_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u16m8_b2_m(vm, vs2, rs1, vl); } -vbool64_t test_vmsleu_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmsleu_vv_u32mf2_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsleu_vv_u32mf2_b64_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u32mf2_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmsleu_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsleu_vx_u32mf2_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsleu_vx_u32mf2_b64_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u32mf2_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmsleu_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmsleu_vv_u32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsleu_vv_u32m1_b32_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u32m1_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmsleu_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsleu_vx_u32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsleu_vx_u32m1_b32_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u32m1_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmsleu_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmsleu_vv_u32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsleu_vv_u32m2_b16_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u32m2_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmsleu_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsleu_vx_u32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsleu_vx_u32m2_b16_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u32m2_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmsleu_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmsleu_vv_u32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsleu_vv_u32m4_b8_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u32m4_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmsleu_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsleu_vx_u32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsleu_vx_u32m4_b8_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u32m4_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmsleu_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmsleu_vv_u32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsleu_vv_u32m8_b4_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u32m8_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmsleu_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsleu_vx_u32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsleu_vx_u32m8_b4_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u32m8_b4_m(vm, vs2, rs1, vl); } -vbool64_t test_vmsleu_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmsleu_vv_u64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsleu_vv_u64m1_b64_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u64m1_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmsleu_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsleu_vx_u64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsleu_vx_u64m1_b64_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u64m1_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmsleu_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmsleu_vv_u64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsleu_vv_u64m2_b32_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u64m2_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmsleu_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsleu_vx_u64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsleu_vx_u64m2_b32_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u64m2_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmsleu_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmsleu_vv_u64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsleu_vv_u64m4_b16_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u64m4_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmsleu_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsleu_vx_u64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsleu_vx_u64m4_b16_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u64m4_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmsleu_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmsleu_vv_u64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsleu_vv_u64m8_b8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u64m8_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmsleu_vx_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsleu_vx_u64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsleu_vx_u64m8_b8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u64m8_b8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vmsleu\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-api-tests/vmslt.c b/auto-generated/gnu-api-tests/vmslt.c index a48b6413f..5d8b7a6a5 100644 --- a/auto-generated/gnu-api-tests/vmslt.c +++ b/auto-generated/gnu-api-tests/vmslt.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmslt_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmslt_vv_i8mf8_b64(op1, op2, vl); +vbool64_t test_vmslt_vv_i8mf8_b64(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmslt_vv_i8mf8_b64(vs2, vs1, vl); } -vbool64_t test_vmslt_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmslt_vx_i8mf8_b64(op1, op2, vl); +vbool64_t test_vmslt_vx_i8mf8_b64(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmslt_vx_i8mf8_b64(vs2, rs1, vl); } -vbool32_t test_vmslt_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmslt_vv_i8mf4_b32(op1, op2, vl); +vbool32_t test_vmslt_vv_i8mf4_b32(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmslt_vv_i8mf4_b32(vs2, vs1, vl); } -vbool32_t test_vmslt_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmslt_vx_i8mf4_b32(op1, op2, vl); +vbool32_t test_vmslt_vx_i8mf4_b32(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmslt_vx_i8mf4_b32(vs2, rs1, vl); } -vbool16_t test_vmslt_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmslt_vv_i8mf2_b16(op1, op2, vl); +vbool16_t test_vmslt_vv_i8mf2_b16(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmslt_vv_i8mf2_b16(vs2, vs1, vl); } -vbool16_t test_vmslt_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmslt_vx_i8mf2_b16(op1, op2, vl); +vbool16_t test_vmslt_vx_i8mf2_b16(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmslt_vx_i8mf2_b16(vs2, rs1, vl); } -vbool8_t test_vmslt_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmslt_vv_i8m1_b8(op1, op2, vl); +vbool8_t test_vmslt_vv_i8m1_b8(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmslt_vv_i8m1_b8(vs2, vs1, vl); } -vbool8_t test_vmslt_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmslt_vx_i8m1_b8(op1, op2, vl); +vbool8_t test_vmslt_vx_i8m1_b8(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmslt_vx_i8m1_b8(vs2, rs1, vl); } -vbool4_t test_vmslt_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmslt_vv_i8m2_b4(op1, op2, vl); +vbool4_t test_vmslt_vv_i8m2_b4(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmslt_vv_i8m2_b4(vs2, vs1, vl); } -vbool4_t test_vmslt_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmslt_vx_i8m2_b4(op1, op2, vl); +vbool4_t test_vmslt_vx_i8m2_b4(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmslt_vx_i8m2_b4(vs2, rs1, vl); } -vbool2_t test_vmslt_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmslt_vv_i8m4_b2(op1, op2, vl); +vbool2_t test_vmslt_vv_i8m4_b2(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmslt_vv_i8m4_b2(vs2, vs1, vl); } -vbool2_t test_vmslt_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmslt_vx_i8m4_b2(op1, op2, vl); +vbool2_t test_vmslt_vx_i8m4_b2(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmslt_vx_i8m4_b2(vs2, rs1, vl); } -vbool1_t test_vmslt_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmslt_vv_i8m8_b1(op1, op2, vl); +vbool1_t test_vmslt_vv_i8m8_b1(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmslt_vv_i8m8_b1(vs2, vs1, vl); } -vbool1_t test_vmslt_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmslt_vx_i8m8_b1(op1, op2, vl); +vbool1_t test_vmslt_vx_i8m8_b1(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmslt_vx_i8m8_b1(vs2, rs1, vl); } -vbool64_t test_vmslt_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmslt_vv_i16mf4_b64(op1, op2, vl); +vbool64_t test_vmslt_vv_i16mf4_b64(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmslt_vv_i16mf4_b64(vs2, vs1, vl); } -vbool64_t test_vmslt_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmslt_vx_i16mf4_b64(op1, op2, vl); +vbool64_t test_vmslt_vx_i16mf4_b64(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmslt_vx_i16mf4_b64(vs2, rs1, vl); } -vbool32_t test_vmslt_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmslt_vv_i16mf2_b32(op1, op2, vl); +vbool32_t test_vmslt_vv_i16mf2_b32(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmslt_vv_i16mf2_b32(vs2, vs1, vl); } -vbool32_t test_vmslt_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmslt_vx_i16mf2_b32(op1, op2, vl); +vbool32_t test_vmslt_vx_i16mf2_b32(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmslt_vx_i16mf2_b32(vs2, rs1, vl); } -vbool16_t test_vmslt_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmslt_vv_i16m1_b16(op1, op2, vl); +vbool16_t test_vmslt_vv_i16m1_b16(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmslt_vv_i16m1_b16(vs2, vs1, vl); } -vbool16_t test_vmslt_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmslt_vx_i16m1_b16(op1, op2, vl); +vbool16_t test_vmslt_vx_i16m1_b16(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmslt_vx_i16m1_b16(vs2, rs1, vl); } -vbool8_t test_vmslt_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmslt_vv_i16m2_b8(op1, op2, vl); +vbool8_t test_vmslt_vv_i16m2_b8(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmslt_vv_i16m2_b8(vs2, vs1, vl); } -vbool8_t test_vmslt_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmslt_vx_i16m2_b8(op1, op2, vl); +vbool8_t test_vmslt_vx_i16m2_b8(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmslt_vx_i16m2_b8(vs2, rs1, vl); } -vbool4_t test_vmslt_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmslt_vv_i16m4_b4(op1, op2, vl); +vbool4_t test_vmslt_vv_i16m4_b4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmslt_vv_i16m4_b4(vs2, vs1, vl); } -vbool4_t test_vmslt_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmslt_vx_i16m4_b4(op1, op2, vl); +vbool4_t test_vmslt_vx_i16m4_b4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmslt_vx_i16m4_b4(vs2, rs1, vl); } -vbool2_t test_vmslt_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmslt_vv_i16m8_b2(op1, op2, vl); +vbool2_t test_vmslt_vv_i16m8_b2(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmslt_vv_i16m8_b2(vs2, vs1, vl); } -vbool2_t test_vmslt_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmslt_vx_i16m8_b2(op1, op2, vl); +vbool2_t test_vmslt_vx_i16m8_b2(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmslt_vx_i16m8_b2(vs2, rs1, vl); } -vbool64_t test_vmslt_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmslt_vv_i32mf2_b64(op1, op2, vl); +vbool64_t test_vmslt_vv_i32mf2_b64(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmslt_vv_i32mf2_b64(vs2, vs1, vl); } -vbool64_t test_vmslt_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmslt_vx_i32mf2_b64(op1, op2, vl); +vbool64_t test_vmslt_vx_i32mf2_b64(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmslt_vx_i32mf2_b64(vs2, rs1, vl); } -vbool32_t test_vmslt_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmslt_vv_i32m1_b32(op1, op2, vl); +vbool32_t test_vmslt_vv_i32m1_b32(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmslt_vv_i32m1_b32(vs2, vs1, vl); } -vbool32_t test_vmslt_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmslt_vx_i32m1_b32(op1, op2, vl); +vbool32_t test_vmslt_vx_i32m1_b32(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmslt_vx_i32m1_b32(vs2, rs1, vl); } -vbool16_t test_vmslt_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmslt_vv_i32m2_b16(op1, op2, vl); +vbool16_t test_vmslt_vv_i32m2_b16(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmslt_vv_i32m2_b16(vs2, vs1, vl); } -vbool16_t test_vmslt_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmslt_vx_i32m2_b16(op1, op2, vl); +vbool16_t test_vmslt_vx_i32m2_b16(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmslt_vx_i32m2_b16(vs2, rs1, vl); } -vbool8_t test_vmslt_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmslt_vv_i32m4_b8(op1, op2, vl); +vbool8_t test_vmslt_vv_i32m4_b8(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmslt_vv_i32m4_b8(vs2, vs1, vl); } -vbool8_t test_vmslt_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmslt_vx_i32m4_b8(op1, op2, vl); +vbool8_t test_vmslt_vx_i32m4_b8(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmslt_vx_i32m4_b8(vs2, rs1, vl); } -vbool4_t test_vmslt_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmslt_vv_i32m8_b4(op1, op2, vl); +vbool4_t test_vmslt_vv_i32m8_b4(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmslt_vv_i32m8_b4(vs2, vs1, vl); } -vbool4_t test_vmslt_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmslt_vx_i32m8_b4(op1, op2, vl); +vbool4_t test_vmslt_vx_i32m8_b4(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmslt_vx_i32m8_b4(vs2, rs1, vl); } -vbool64_t test_vmslt_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmslt_vv_i64m1_b64(op1, op2, vl); +vbool64_t test_vmslt_vv_i64m1_b64(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmslt_vv_i64m1_b64(vs2, vs1, vl); } -vbool64_t test_vmslt_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmslt_vx_i64m1_b64(op1, op2, vl); +vbool64_t test_vmslt_vx_i64m1_b64(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmslt_vx_i64m1_b64(vs2, rs1, vl); } -vbool32_t test_vmslt_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmslt_vv_i64m2_b32(op1, op2, vl); +vbool32_t test_vmslt_vv_i64m2_b32(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmslt_vv_i64m2_b32(vs2, vs1, vl); } -vbool32_t test_vmslt_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmslt_vx_i64m2_b32(op1, op2, vl); +vbool32_t test_vmslt_vx_i64m2_b32(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmslt_vx_i64m2_b32(vs2, rs1, vl); } -vbool16_t test_vmslt_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmslt_vv_i64m4_b16(op1, op2, vl); +vbool16_t test_vmslt_vv_i64m4_b16(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmslt_vv_i64m4_b16(vs2, vs1, vl); } -vbool16_t test_vmslt_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmslt_vx_i64m4_b16(op1, op2, vl); +vbool16_t test_vmslt_vx_i64m4_b16(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmslt_vx_i64m4_b16(vs2, rs1, vl); } -vbool8_t test_vmslt_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmslt_vv_i64m8_b8(op1, op2, vl); +vbool8_t test_vmslt_vv_i64m8_b8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmslt_vv_i64m8_b8(vs2, vs1, vl); } -vbool8_t test_vmslt_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmslt_vx_i64m8_b8(op1, op2, vl); +vbool8_t test_vmslt_vx_i64m8_b8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmslt_vx_i64m8_b8(vs2, rs1, vl); } -vbool64_t test_vmslt_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmslt_vv_i8mf8_b64_m(mask, op1, op2, vl); +vbool64_t test_vmslt_vv_i8mf8_b64_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmslt_vv_i8mf8_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmslt_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmslt_vx_i8mf8_b64_m(mask, op1, op2, vl); +vbool64_t test_vmslt_vx_i8mf8_b64_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmslt_vx_i8mf8_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmslt_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmslt_vv_i8mf4_b32_m(mask, op1, op2, vl); +vbool32_t test_vmslt_vv_i8mf4_b32_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmslt_vv_i8mf4_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmslt_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmslt_vx_i8mf4_b32_m(mask, op1, op2, vl); +vbool32_t test_vmslt_vx_i8mf4_b32_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmslt_vx_i8mf4_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmslt_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmslt_vv_i8mf2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmslt_vv_i8mf2_b16_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmslt_vv_i8mf2_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmslt_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmslt_vx_i8mf2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmslt_vx_i8mf2_b16_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmslt_vx_i8mf2_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmslt_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmslt_vv_i8m1_b8_m(mask, op1, op2, vl); +vbool8_t test_vmslt_vv_i8m1_b8_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmslt_vv_i8m1_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmslt_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmslt_vx_i8m1_b8_m(mask, op1, op2, vl); +vbool8_t test_vmslt_vx_i8m1_b8_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmslt_vx_i8m1_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmslt_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmslt_vv_i8m2_b4_m(mask, op1, op2, vl); +vbool4_t test_vmslt_vv_i8m2_b4_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmslt_vv_i8m2_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmslt_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmslt_vx_i8m2_b4_m(mask, op1, op2, vl); +vbool4_t test_vmslt_vx_i8m2_b4_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmslt_vx_i8m2_b4_m(vm, vs2, rs1, vl); } -vbool2_t test_vmslt_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmslt_vv_i8m4_b2_m(mask, op1, op2, vl); +vbool2_t test_vmslt_vv_i8m4_b2_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmslt_vv_i8m4_b2_m(vm, vs2, vs1, vl); } -vbool2_t test_vmslt_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmslt_vx_i8m4_b2_m(mask, op1, op2, vl); +vbool2_t test_vmslt_vx_i8m4_b2_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmslt_vx_i8m4_b2_m(vm, vs2, rs1, vl); } -vbool1_t test_vmslt_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmslt_vv_i8m8_b1_m(mask, op1, op2, vl); +vbool1_t test_vmslt_vv_i8m8_b1_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmslt_vv_i8m8_b1_m(vm, vs2, vs1, vl); } -vbool1_t test_vmslt_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmslt_vx_i8m8_b1_m(mask, op1, op2, vl); +vbool1_t test_vmslt_vx_i8m8_b1_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmslt_vx_i8m8_b1_m(vm, vs2, rs1, vl); } -vbool64_t test_vmslt_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmslt_vv_i16mf4_b64_m(mask, op1, op2, vl); +vbool64_t test_vmslt_vv_i16mf4_b64_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmslt_vv_i16mf4_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmslt_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmslt_vx_i16mf4_b64_m(mask, op1, op2, vl); +vbool64_t test_vmslt_vx_i16mf4_b64_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmslt_vx_i16mf4_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmslt_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmslt_vv_i16mf2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmslt_vv_i16mf2_b32_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmslt_vv_i16mf2_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmslt_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmslt_vx_i16mf2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmslt_vx_i16mf2_b32_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmslt_vx_i16mf2_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmslt_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmslt_vv_i16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmslt_vv_i16m1_b16_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmslt_vv_i16m1_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmslt_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmslt_vx_i16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmslt_vx_i16m1_b16_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmslt_vx_i16m1_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmslt_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmslt_vv_i16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmslt_vv_i16m2_b8_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmslt_vv_i16m2_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmslt_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmslt_vx_i16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmslt_vx_i16m2_b8_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmslt_vx_i16m2_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmslt_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmslt_vv_i16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmslt_vv_i16m4_b4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmslt_vv_i16m4_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmslt_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmslt_vx_i16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmslt_vx_i16m4_b4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmslt_vx_i16m4_b4_m(vm, vs2, rs1, vl); } -vbool2_t test_vmslt_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmslt_vv_i16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmslt_vv_i16m8_b2_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmslt_vv_i16m8_b2_m(vm, vs2, vs1, vl); } -vbool2_t test_vmslt_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmslt_vx_i16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmslt_vx_i16m8_b2_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmslt_vx_i16m8_b2_m(vm, vs2, rs1, vl); } -vbool64_t test_vmslt_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmslt_vv_i32mf2_b64_m(mask, op1, op2, vl); +vbool64_t test_vmslt_vv_i32mf2_b64_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmslt_vv_i32mf2_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmslt_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmslt_vx_i32mf2_b64_m(mask, op1, op2, vl); +vbool64_t test_vmslt_vx_i32mf2_b64_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmslt_vx_i32mf2_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmslt_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmslt_vv_i32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmslt_vv_i32m1_b32_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmslt_vv_i32m1_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmslt_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmslt_vx_i32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmslt_vx_i32m1_b32_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmslt_vx_i32m1_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmslt_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmslt_vv_i32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmslt_vv_i32m2_b16_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmslt_vv_i32m2_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmslt_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmslt_vx_i32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmslt_vx_i32m2_b16_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmslt_vx_i32m2_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmslt_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmslt_vv_i32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmslt_vv_i32m4_b8_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmslt_vv_i32m4_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmslt_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmslt_vx_i32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmslt_vx_i32m4_b8_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmslt_vx_i32m4_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmslt_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmslt_vv_i32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmslt_vv_i32m8_b4_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmslt_vv_i32m8_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmslt_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmslt_vx_i32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmslt_vx_i32m8_b4_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmslt_vx_i32m8_b4_m(vm, vs2, rs1, vl); } -vbool64_t test_vmslt_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmslt_vv_i64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmslt_vv_i64m1_b64_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmslt_vv_i64m1_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmslt_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmslt_vx_i64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmslt_vx_i64m1_b64_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmslt_vx_i64m1_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmslt_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmslt_vv_i64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmslt_vv_i64m2_b32_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmslt_vv_i64m2_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmslt_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmslt_vx_i64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmslt_vx_i64m2_b32_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmslt_vx_i64m2_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmslt_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmslt_vv_i64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmslt_vv_i64m4_b16_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmslt_vv_i64m4_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmslt_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmslt_vx_i64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmslt_vx_i64m4_b16_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmslt_vx_i64m4_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmslt_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmslt_vv_i64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmslt_vv_i64m8_b8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmslt_vv_i64m8_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmslt_vx_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmslt_vx_i64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmslt_vx_i64m8_b8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmslt_vx_i64m8_b8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vms[gl][et]\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-api-tests/vmsltu.c b/auto-generated/gnu-api-tests/vmsltu.c index d87cdc3ad..b71995a0c 100644 --- a/auto-generated/gnu-api-tests/vmsltu.c +++ b/auto-generated/gnu-api-tests/vmsltu.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmsltu_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmsltu_vv_u8mf8_b64(op1, op2, vl); +vbool64_t test_vmsltu_vv_u8mf8_b64(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u8mf8_b64(vs2, vs1, vl); } -vbool64_t test_vmsltu_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsltu_vx_u8mf8_b64(op1, op2, vl); +vbool64_t test_vmsltu_vx_u8mf8_b64(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u8mf8_b64(vs2, rs1, vl); } -vbool32_t test_vmsltu_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmsltu_vv_u8mf4_b32(op1, op2, vl); +vbool32_t test_vmsltu_vv_u8mf4_b32(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u8mf4_b32(vs2, vs1, vl); } -vbool32_t test_vmsltu_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsltu_vx_u8mf4_b32(op1, op2, vl); +vbool32_t test_vmsltu_vx_u8mf4_b32(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u8mf4_b32(vs2, rs1, vl); } -vbool16_t test_vmsltu_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmsltu_vv_u8mf2_b16(op1, op2, vl); +vbool16_t test_vmsltu_vv_u8mf2_b16(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u8mf2_b16(vs2, vs1, vl); } -vbool16_t test_vmsltu_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsltu_vx_u8mf2_b16(op1, op2, vl); +vbool16_t test_vmsltu_vx_u8mf2_b16(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u8mf2_b16(vs2, rs1, vl); } -vbool8_t test_vmsltu_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmsltu_vv_u8m1_b8(op1, op2, vl); +vbool8_t test_vmsltu_vv_u8m1_b8(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u8m1_b8(vs2, vs1, vl); } -vbool8_t test_vmsltu_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsltu_vx_u8m1_b8(op1, op2, vl); +vbool8_t test_vmsltu_vx_u8m1_b8(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u8m1_b8(vs2, rs1, vl); } -vbool4_t test_vmsltu_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmsltu_vv_u8m2_b4(op1, op2, vl); +vbool4_t test_vmsltu_vv_u8m2_b4(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u8m2_b4(vs2, vs1, vl); } -vbool4_t test_vmsltu_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsltu_vx_u8m2_b4(op1, op2, vl); +vbool4_t test_vmsltu_vx_u8m2_b4(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u8m2_b4(vs2, rs1, vl); } -vbool2_t test_vmsltu_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmsltu_vv_u8m4_b2(op1, op2, vl); +vbool2_t test_vmsltu_vv_u8m4_b2(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u8m4_b2(vs2, vs1, vl); } -vbool2_t test_vmsltu_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsltu_vx_u8m4_b2(op1, op2, vl); +vbool2_t test_vmsltu_vx_u8m4_b2(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u8m4_b2(vs2, rs1, vl); } -vbool1_t test_vmsltu_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmsltu_vv_u8m8_b1(op1, op2, vl); +vbool1_t test_vmsltu_vv_u8m8_b1(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u8m8_b1(vs2, vs1, vl); } -vbool1_t test_vmsltu_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsltu_vx_u8m8_b1(op1, op2, vl); +vbool1_t test_vmsltu_vx_u8m8_b1(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u8m8_b1(vs2, rs1, vl); } -vbool64_t test_vmsltu_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmsltu_vv_u16mf4_b64(op1, op2, vl); +vbool64_t test_vmsltu_vv_u16mf4_b64(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u16mf4_b64(vs2, vs1, vl); } -vbool64_t test_vmsltu_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsltu_vx_u16mf4_b64(op1, op2, vl); +vbool64_t test_vmsltu_vx_u16mf4_b64(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u16mf4_b64(vs2, rs1, vl); } -vbool32_t test_vmsltu_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmsltu_vv_u16mf2_b32(op1, op2, vl); +vbool32_t test_vmsltu_vv_u16mf2_b32(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u16mf2_b32(vs2, vs1, vl); } -vbool32_t test_vmsltu_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsltu_vx_u16mf2_b32(op1, op2, vl); +vbool32_t test_vmsltu_vx_u16mf2_b32(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u16mf2_b32(vs2, rs1, vl); } -vbool16_t test_vmsltu_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmsltu_vv_u16m1_b16(op1, op2, vl); +vbool16_t test_vmsltu_vv_u16m1_b16(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u16m1_b16(vs2, vs1, vl); } -vbool16_t test_vmsltu_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsltu_vx_u16m1_b16(op1, op2, vl); +vbool16_t test_vmsltu_vx_u16m1_b16(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u16m1_b16(vs2, rs1, vl); } -vbool8_t test_vmsltu_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmsltu_vv_u16m2_b8(op1, op2, vl); +vbool8_t test_vmsltu_vv_u16m2_b8(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u16m2_b8(vs2, vs1, vl); } -vbool8_t test_vmsltu_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsltu_vx_u16m2_b8(op1, op2, vl); +vbool8_t test_vmsltu_vx_u16m2_b8(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u16m2_b8(vs2, rs1, vl); } -vbool4_t test_vmsltu_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmsltu_vv_u16m4_b4(op1, op2, vl); +vbool4_t test_vmsltu_vv_u16m4_b4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u16m4_b4(vs2, vs1, vl); } -vbool4_t test_vmsltu_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsltu_vx_u16m4_b4(op1, op2, vl); +vbool4_t test_vmsltu_vx_u16m4_b4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u16m4_b4(vs2, rs1, vl); } -vbool2_t test_vmsltu_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmsltu_vv_u16m8_b2(op1, op2, vl); +vbool2_t test_vmsltu_vv_u16m8_b2(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u16m8_b2(vs2, vs1, vl); } -vbool2_t test_vmsltu_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsltu_vx_u16m8_b2(op1, op2, vl); +vbool2_t test_vmsltu_vx_u16m8_b2(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u16m8_b2(vs2, rs1, vl); } -vbool64_t test_vmsltu_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmsltu_vv_u32mf2_b64(op1, op2, vl); +vbool64_t test_vmsltu_vv_u32mf2_b64(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u32mf2_b64(vs2, vs1, vl); } -vbool64_t test_vmsltu_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsltu_vx_u32mf2_b64(op1, op2, vl); +vbool64_t test_vmsltu_vx_u32mf2_b64(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u32mf2_b64(vs2, rs1, vl); } -vbool32_t test_vmsltu_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmsltu_vv_u32m1_b32(op1, op2, vl); +vbool32_t test_vmsltu_vv_u32m1_b32(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u32m1_b32(vs2, vs1, vl); } -vbool32_t test_vmsltu_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsltu_vx_u32m1_b32(op1, op2, vl); +vbool32_t test_vmsltu_vx_u32m1_b32(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u32m1_b32(vs2, rs1, vl); } -vbool16_t test_vmsltu_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmsltu_vv_u32m2_b16(op1, op2, vl); +vbool16_t test_vmsltu_vv_u32m2_b16(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u32m2_b16(vs2, vs1, vl); } -vbool16_t test_vmsltu_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsltu_vx_u32m2_b16(op1, op2, vl); +vbool16_t test_vmsltu_vx_u32m2_b16(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u32m2_b16(vs2, rs1, vl); } -vbool8_t test_vmsltu_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmsltu_vv_u32m4_b8(op1, op2, vl); +vbool8_t test_vmsltu_vv_u32m4_b8(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u32m4_b8(vs2, vs1, vl); } -vbool8_t test_vmsltu_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsltu_vx_u32m4_b8(op1, op2, vl); +vbool8_t test_vmsltu_vx_u32m4_b8(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u32m4_b8(vs2, rs1, vl); } -vbool4_t test_vmsltu_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmsltu_vv_u32m8_b4(op1, op2, vl); +vbool4_t test_vmsltu_vv_u32m8_b4(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u32m8_b4(vs2, vs1, vl); } -vbool4_t test_vmsltu_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsltu_vx_u32m8_b4(op1, op2, vl); +vbool4_t test_vmsltu_vx_u32m8_b4(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u32m8_b4(vs2, rs1, vl); } -vbool64_t test_vmsltu_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmsltu_vv_u64m1_b64(op1, op2, vl); +vbool64_t test_vmsltu_vv_u64m1_b64(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u64m1_b64(vs2, vs1, vl); } -vbool64_t test_vmsltu_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsltu_vx_u64m1_b64(op1, op2, vl); +vbool64_t test_vmsltu_vx_u64m1_b64(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u64m1_b64(vs2, rs1, vl); } -vbool32_t test_vmsltu_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmsltu_vv_u64m2_b32(op1, op2, vl); +vbool32_t test_vmsltu_vv_u64m2_b32(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u64m2_b32(vs2, vs1, vl); } -vbool32_t test_vmsltu_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsltu_vx_u64m2_b32(op1, op2, vl); +vbool32_t test_vmsltu_vx_u64m2_b32(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u64m2_b32(vs2, rs1, vl); } -vbool16_t test_vmsltu_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmsltu_vv_u64m4_b16(op1, op2, vl); +vbool16_t test_vmsltu_vv_u64m4_b16(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u64m4_b16(vs2, vs1, vl); } -vbool16_t test_vmsltu_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsltu_vx_u64m4_b16(op1, op2, vl); +vbool16_t test_vmsltu_vx_u64m4_b16(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u64m4_b16(vs2, rs1, vl); } -vbool8_t test_vmsltu_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmsltu_vv_u64m8_b8(op1, op2, vl); +vbool8_t test_vmsltu_vv_u64m8_b8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u64m8_b8(vs2, vs1, vl); } -vbool8_t test_vmsltu_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsltu_vx_u64m8_b8(op1, op2, vl); +vbool8_t test_vmsltu_vx_u64m8_b8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u64m8_b8(vs2, rs1, vl); } -vbool64_t test_vmsltu_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmsltu_vv_u8mf8_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsltu_vv_u8mf8_b64_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u8mf8_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmsltu_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsltu_vx_u8mf8_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsltu_vx_u8mf8_b64_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u8mf8_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmsltu_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmsltu_vv_u8mf4_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsltu_vv_u8mf4_b32_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u8mf4_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmsltu_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsltu_vx_u8mf4_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsltu_vx_u8mf4_b32_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u8mf4_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmsltu_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmsltu_vv_u8mf2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsltu_vv_u8mf2_b16_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u8mf2_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmsltu_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsltu_vx_u8mf2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsltu_vx_u8mf2_b16_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u8mf2_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmsltu_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmsltu_vv_u8m1_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsltu_vv_u8m1_b8_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u8m1_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmsltu_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsltu_vx_u8m1_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsltu_vx_u8m1_b8_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u8m1_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmsltu_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmsltu_vv_u8m2_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsltu_vv_u8m2_b4_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u8m2_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmsltu_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsltu_vx_u8m2_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsltu_vx_u8m2_b4_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u8m2_b4_m(vm, vs2, rs1, vl); } -vbool2_t test_vmsltu_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmsltu_vv_u8m4_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsltu_vv_u8m4_b2_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u8m4_b2_m(vm, vs2, vs1, vl); } -vbool2_t test_vmsltu_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsltu_vx_u8m4_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsltu_vx_u8m4_b2_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u8m4_b2_m(vm, vs2, rs1, vl); } -vbool1_t test_vmsltu_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmsltu_vv_u8m8_b1_m(mask, op1, op2, vl); +vbool1_t test_vmsltu_vv_u8m8_b1_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u8m8_b1_m(vm, vs2, vs1, vl); } -vbool1_t test_vmsltu_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsltu_vx_u8m8_b1_m(mask, op1, op2, vl); +vbool1_t test_vmsltu_vx_u8m8_b1_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u8m8_b1_m(vm, vs2, rs1, vl); } -vbool64_t test_vmsltu_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmsltu_vv_u16mf4_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsltu_vv_u16mf4_b64_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u16mf4_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmsltu_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsltu_vx_u16mf4_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsltu_vx_u16mf4_b64_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u16mf4_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmsltu_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmsltu_vv_u16mf2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsltu_vv_u16mf2_b32_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u16mf2_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmsltu_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsltu_vx_u16mf2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsltu_vx_u16mf2_b32_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u16mf2_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmsltu_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmsltu_vv_u16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsltu_vv_u16m1_b16_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u16m1_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmsltu_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsltu_vx_u16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsltu_vx_u16m1_b16_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u16m1_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmsltu_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmsltu_vv_u16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsltu_vv_u16m2_b8_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u16m2_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmsltu_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsltu_vx_u16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsltu_vx_u16m2_b8_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u16m2_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmsltu_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmsltu_vv_u16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsltu_vv_u16m4_b4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u16m4_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmsltu_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsltu_vx_u16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsltu_vx_u16m4_b4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u16m4_b4_m(vm, vs2, rs1, vl); } -vbool2_t test_vmsltu_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmsltu_vv_u16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsltu_vv_u16m8_b2_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u16m8_b2_m(vm, vs2, vs1, vl); } -vbool2_t test_vmsltu_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsltu_vx_u16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsltu_vx_u16m8_b2_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u16m8_b2_m(vm, vs2, rs1, vl); } -vbool64_t test_vmsltu_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmsltu_vv_u32mf2_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsltu_vv_u32mf2_b64_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u32mf2_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmsltu_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsltu_vx_u32mf2_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsltu_vx_u32mf2_b64_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u32mf2_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmsltu_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmsltu_vv_u32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsltu_vv_u32m1_b32_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u32m1_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmsltu_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsltu_vx_u32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsltu_vx_u32m1_b32_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u32m1_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmsltu_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmsltu_vv_u32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsltu_vv_u32m2_b16_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u32m2_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmsltu_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsltu_vx_u32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsltu_vx_u32m2_b16_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u32m2_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmsltu_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmsltu_vv_u32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsltu_vv_u32m4_b8_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u32m4_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmsltu_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsltu_vx_u32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsltu_vx_u32m4_b8_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u32m4_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmsltu_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmsltu_vv_u32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsltu_vv_u32m8_b4_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u32m8_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmsltu_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsltu_vx_u32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsltu_vx_u32m8_b4_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u32m8_b4_m(vm, vs2, rs1, vl); } -vbool64_t test_vmsltu_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmsltu_vv_u64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsltu_vv_u64m1_b64_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u64m1_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmsltu_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsltu_vx_u64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsltu_vx_u64m1_b64_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u64m1_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmsltu_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmsltu_vv_u64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsltu_vv_u64m2_b32_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u64m2_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmsltu_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsltu_vx_u64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsltu_vx_u64m2_b32_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u64m2_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmsltu_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmsltu_vv_u64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsltu_vv_u64m4_b16_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u64m4_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmsltu_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsltu_vx_u64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsltu_vx_u64m4_b16_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u64m4_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmsltu_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmsltu_vv_u64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsltu_vv_u64m8_b8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u64m8_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmsltu_vx_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsltu_vx_u64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsltu_vx_u64m8_b8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u64m8_b8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vms[gl][et]u\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-api-tests/vmsne.c b/auto-generated/gnu-api-tests/vmsne.c index 9ab6945fe..96bc6a634 100644 --- a/auto-generated/gnu-api-tests/vmsne.c +++ b/auto-generated/gnu-api-tests/vmsne.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmsne_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmsne_vv_i8mf8_b64(op1, op2, vl); +vbool64_t test_vmsne_vv_i8mf8_b64(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmsne_vv_i8mf8_b64(vs2, vs1, vl); } -vbool64_t test_vmsne_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsne_vx_i8mf8_b64(op1, op2, vl); +vbool64_t test_vmsne_vx_i8mf8_b64(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsne_vx_i8mf8_b64(vs2, rs1, vl); } -vbool32_t test_vmsne_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmsne_vv_i8mf4_b32(op1, op2, vl); +vbool32_t test_vmsne_vv_i8mf4_b32(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmsne_vv_i8mf4_b32(vs2, vs1, vl); } -vbool32_t test_vmsne_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsne_vx_i8mf4_b32(op1, op2, vl); +vbool32_t test_vmsne_vx_i8mf4_b32(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsne_vx_i8mf4_b32(vs2, rs1, vl); } -vbool16_t test_vmsne_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmsne_vv_i8mf2_b16(op1, op2, vl); +vbool16_t test_vmsne_vv_i8mf2_b16(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmsne_vv_i8mf2_b16(vs2, vs1, vl); } -vbool16_t test_vmsne_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsne_vx_i8mf2_b16(op1, op2, vl); +vbool16_t test_vmsne_vx_i8mf2_b16(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsne_vx_i8mf2_b16(vs2, rs1, vl); } -vbool8_t test_vmsne_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmsne_vv_i8m1_b8(op1, op2, vl); +vbool8_t test_vmsne_vv_i8m1_b8(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmsne_vv_i8m1_b8(vs2, vs1, vl); } -vbool8_t test_vmsne_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmsne_vx_i8m1_b8(op1, op2, vl); +vbool8_t test_vmsne_vx_i8m1_b8(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsne_vx_i8m1_b8(vs2, rs1, vl); } -vbool4_t test_vmsne_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmsne_vv_i8m2_b4(op1, op2, vl); +vbool4_t test_vmsne_vv_i8m2_b4(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmsne_vv_i8m2_b4(vs2, vs1, vl); } -vbool4_t test_vmsne_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsne_vx_i8m2_b4(op1, op2, vl); +vbool4_t test_vmsne_vx_i8m2_b4(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsne_vx_i8m2_b4(vs2, rs1, vl); } -vbool2_t test_vmsne_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmsne_vv_i8m4_b2(op1, op2, vl); +vbool2_t test_vmsne_vv_i8m4_b2(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmsne_vv_i8m4_b2(vs2, vs1, vl); } -vbool2_t test_vmsne_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsne_vx_i8m4_b2(op1, op2, vl); +vbool2_t test_vmsne_vx_i8m4_b2(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsne_vx_i8m4_b2(vs2, rs1, vl); } -vbool1_t test_vmsne_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmsne_vv_i8m8_b1(op1, op2, vl); +vbool1_t test_vmsne_vv_i8m8_b1(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmsne_vv_i8m8_b1(vs2, vs1, vl); } -vbool1_t test_vmsne_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsne_vx_i8m8_b1(op1, op2, vl); +vbool1_t test_vmsne_vx_i8m8_b1(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsne_vx_i8m8_b1(vs2, rs1, vl); } -vbool64_t test_vmsne_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmsne_vv_i16mf4_b64(op1, op2, vl); +vbool64_t test_vmsne_vv_i16mf4_b64(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmsne_vv_i16mf4_b64(vs2, vs1, vl); } -vbool64_t test_vmsne_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsne_vx_i16mf4_b64(op1, op2, vl); +vbool64_t test_vmsne_vx_i16mf4_b64(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsne_vx_i16mf4_b64(vs2, rs1, vl); } -vbool32_t test_vmsne_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmsne_vv_i16mf2_b32(op1, op2, vl); +vbool32_t test_vmsne_vv_i16mf2_b32(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmsne_vv_i16mf2_b32(vs2, vs1, vl); } -vbool32_t test_vmsne_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsne_vx_i16mf2_b32(op1, op2, vl); +vbool32_t test_vmsne_vx_i16mf2_b32(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsne_vx_i16mf2_b32(vs2, rs1, vl); } -vbool16_t test_vmsne_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmsne_vv_i16m1_b16(op1, op2, vl); +vbool16_t test_vmsne_vv_i16m1_b16(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmsne_vv_i16m1_b16(vs2, vs1, vl); } -vbool16_t test_vmsne_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmsne_vx_i16m1_b16(op1, op2, vl); +vbool16_t test_vmsne_vx_i16m1_b16(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsne_vx_i16m1_b16(vs2, rs1, vl); } -vbool8_t test_vmsne_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmsne_vv_i16m2_b8(op1, op2, vl); +vbool8_t test_vmsne_vv_i16m2_b8(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmsne_vv_i16m2_b8(vs2, vs1, vl); } -vbool8_t test_vmsne_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsne_vx_i16m2_b8(op1, op2, vl); +vbool8_t test_vmsne_vx_i16m2_b8(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsne_vx_i16m2_b8(vs2, rs1, vl); } -vbool4_t test_vmsne_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmsne_vv_i16m4_b4(op1, op2, vl); +vbool4_t test_vmsne_vv_i16m4_b4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmsne_vv_i16m4_b4(vs2, vs1, vl); } -vbool4_t test_vmsne_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsne_vx_i16m4_b4(op1, op2, vl); +vbool4_t test_vmsne_vx_i16m4_b4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsne_vx_i16m4_b4(vs2, rs1, vl); } -vbool2_t test_vmsne_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmsne_vv_i16m8_b2(op1, op2, vl); +vbool2_t test_vmsne_vv_i16m8_b2(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmsne_vv_i16m8_b2(vs2, vs1, vl); } -vbool2_t test_vmsne_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmsne_vx_i16m8_b2(op1, op2, vl); +vbool2_t test_vmsne_vx_i16m8_b2(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsne_vx_i16m8_b2(vs2, rs1, vl); } -vbool64_t test_vmsne_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmsne_vv_i32mf2_b64(op1, op2, vl); +vbool64_t test_vmsne_vv_i32mf2_b64(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmsne_vv_i32mf2_b64(vs2, vs1, vl); } -vbool64_t test_vmsne_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsne_vx_i32mf2_b64(op1, op2, vl); +vbool64_t test_vmsne_vx_i32mf2_b64(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsne_vx_i32mf2_b64(vs2, rs1, vl); } -vbool32_t test_vmsne_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmsne_vv_i32m1_b32(op1, op2, vl); +vbool32_t test_vmsne_vv_i32m1_b32(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmsne_vv_i32m1_b32(vs2, vs1, vl); } -vbool32_t test_vmsne_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmsne_vx_i32m1_b32(op1, op2, vl); +vbool32_t test_vmsne_vx_i32m1_b32(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsne_vx_i32m1_b32(vs2, rs1, vl); } -vbool16_t test_vmsne_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmsne_vv_i32m2_b16(op1, op2, vl); +vbool16_t test_vmsne_vv_i32m2_b16(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmsne_vv_i32m2_b16(vs2, vs1, vl); } -vbool16_t test_vmsne_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsne_vx_i32m2_b16(op1, op2, vl); +vbool16_t test_vmsne_vx_i32m2_b16(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsne_vx_i32m2_b16(vs2, rs1, vl); } -vbool8_t test_vmsne_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmsne_vv_i32m4_b8(op1, op2, vl); +vbool8_t test_vmsne_vv_i32m4_b8(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmsne_vv_i32m4_b8(vs2, vs1, vl); } -vbool8_t test_vmsne_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmsne_vx_i32m4_b8(op1, op2, vl); +vbool8_t test_vmsne_vx_i32m4_b8(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsne_vx_i32m4_b8(vs2, rs1, vl); } -vbool4_t test_vmsne_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmsne_vv_i32m8_b4(op1, op2, vl); +vbool4_t test_vmsne_vv_i32m8_b4(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmsne_vv_i32m8_b4(vs2, vs1, vl); } -vbool4_t test_vmsne_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmsne_vx_i32m8_b4(op1, op2, vl); +vbool4_t test_vmsne_vx_i32m8_b4(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsne_vx_i32m8_b4(vs2, rs1, vl); } -vbool64_t test_vmsne_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmsne_vv_i64m1_b64(op1, op2, vl); +vbool64_t test_vmsne_vv_i64m1_b64(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmsne_vv_i64m1_b64(vs2, vs1, vl); } -vbool64_t test_vmsne_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmsne_vx_i64m1_b64(op1, op2, vl); +vbool64_t test_vmsne_vx_i64m1_b64(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsne_vx_i64m1_b64(vs2, rs1, vl); } -vbool32_t test_vmsne_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmsne_vv_i64m2_b32(op1, op2, vl); +vbool32_t test_vmsne_vv_i64m2_b32(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmsne_vv_i64m2_b32(vs2, vs1, vl); } -vbool32_t test_vmsne_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmsne_vx_i64m2_b32(op1, op2, vl); +vbool32_t test_vmsne_vx_i64m2_b32(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsne_vx_i64m2_b32(vs2, rs1, vl); } -vbool16_t test_vmsne_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmsne_vv_i64m4_b16(op1, op2, vl); +vbool16_t test_vmsne_vv_i64m4_b16(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmsne_vv_i64m4_b16(vs2, vs1, vl); } -vbool16_t test_vmsne_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmsne_vx_i64m4_b16(op1, op2, vl); +vbool16_t test_vmsne_vx_i64m4_b16(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsne_vx_i64m4_b16(vs2, rs1, vl); } -vbool8_t test_vmsne_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmsne_vv_i64m8_b8(op1, op2, vl); +vbool8_t test_vmsne_vv_i64m8_b8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmsne_vv_i64m8_b8(vs2, vs1, vl); } -vbool8_t test_vmsne_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmsne_vx_i64m8_b8(op1, op2, vl); +vbool8_t test_vmsne_vx_i64m8_b8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsne_vx_i64m8_b8(vs2, rs1, vl); } -vbool64_t test_vmsne_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmsne_vv_u8mf8_b64(op1, op2, vl); +vbool64_t test_vmsne_vv_u8mf8_b64(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmsne_vv_u8mf8_b64(vs2, vs1, vl); } -vbool64_t test_vmsne_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsne_vx_u8mf8_b64(op1, op2, vl); +vbool64_t test_vmsne_vx_u8mf8_b64(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsne_vx_u8mf8_b64(vs2, rs1, vl); } -vbool32_t test_vmsne_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmsne_vv_u8mf4_b32(op1, op2, vl); +vbool32_t test_vmsne_vv_u8mf4_b32(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmsne_vv_u8mf4_b32(vs2, vs1, vl); } -vbool32_t test_vmsne_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsne_vx_u8mf4_b32(op1, op2, vl); +vbool32_t test_vmsne_vx_u8mf4_b32(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsne_vx_u8mf4_b32(vs2, rs1, vl); } -vbool16_t test_vmsne_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmsne_vv_u8mf2_b16(op1, op2, vl); +vbool16_t test_vmsne_vv_u8mf2_b16(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmsne_vv_u8mf2_b16(vs2, vs1, vl); } -vbool16_t test_vmsne_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsne_vx_u8mf2_b16(op1, op2, vl); +vbool16_t test_vmsne_vx_u8mf2_b16(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsne_vx_u8mf2_b16(vs2, rs1, vl); } -vbool8_t test_vmsne_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmsne_vv_u8m1_b8(op1, op2, vl); +vbool8_t test_vmsne_vv_u8m1_b8(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmsne_vv_u8m1_b8(vs2, vs1, vl); } -vbool8_t test_vmsne_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsne_vx_u8m1_b8(op1, op2, vl); +vbool8_t test_vmsne_vx_u8m1_b8(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsne_vx_u8m1_b8(vs2, rs1, vl); } -vbool4_t test_vmsne_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmsne_vv_u8m2_b4(op1, op2, vl); +vbool4_t test_vmsne_vv_u8m2_b4(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmsne_vv_u8m2_b4(vs2, vs1, vl); } -vbool4_t test_vmsne_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsne_vx_u8m2_b4(op1, op2, vl); +vbool4_t test_vmsne_vx_u8m2_b4(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsne_vx_u8m2_b4(vs2, rs1, vl); } -vbool2_t test_vmsne_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmsne_vv_u8m4_b2(op1, op2, vl); +vbool2_t test_vmsne_vv_u8m4_b2(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmsne_vv_u8m4_b2(vs2, vs1, vl); } -vbool2_t test_vmsne_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsne_vx_u8m4_b2(op1, op2, vl); +vbool2_t test_vmsne_vx_u8m4_b2(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsne_vx_u8m4_b2(vs2, rs1, vl); } -vbool1_t test_vmsne_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmsne_vv_u8m8_b1(op1, op2, vl); +vbool1_t test_vmsne_vv_u8m8_b1(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmsne_vv_u8m8_b1(vs2, vs1, vl); } -vbool1_t test_vmsne_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsne_vx_u8m8_b1(op1, op2, vl); +vbool1_t test_vmsne_vx_u8m8_b1(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsne_vx_u8m8_b1(vs2, rs1, vl); } -vbool64_t test_vmsne_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmsne_vv_u16mf4_b64(op1, op2, vl); +vbool64_t test_vmsne_vv_u16mf4_b64(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmsne_vv_u16mf4_b64(vs2, vs1, vl); } -vbool64_t test_vmsne_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsne_vx_u16mf4_b64(op1, op2, vl); +vbool64_t test_vmsne_vx_u16mf4_b64(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsne_vx_u16mf4_b64(vs2, rs1, vl); } -vbool32_t test_vmsne_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmsne_vv_u16mf2_b32(op1, op2, vl); +vbool32_t test_vmsne_vv_u16mf2_b32(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmsne_vv_u16mf2_b32(vs2, vs1, vl); } -vbool32_t test_vmsne_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsne_vx_u16mf2_b32(op1, op2, vl); +vbool32_t test_vmsne_vx_u16mf2_b32(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsne_vx_u16mf2_b32(vs2, rs1, vl); } -vbool16_t test_vmsne_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmsne_vv_u16m1_b16(op1, op2, vl); +vbool16_t test_vmsne_vv_u16m1_b16(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmsne_vv_u16m1_b16(vs2, vs1, vl); } -vbool16_t test_vmsne_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsne_vx_u16m1_b16(op1, op2, vl); +vbool16_t test_vmsne_vx_u16m1_b16(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsne_vx_u16m1_b16(vs2, rs1, vl); } -vbool8_t test_vmsne_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmsne_vv_u16m2_b8(op1, op2, vl); +vbool8_t test_vmsne_vv_u16m2_b8(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmsne_vv_u16m2_b8(vs2, vs1, vl); } -vbool8_t test_vmsne_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsne_vx_u16m2_b8(op1, op2, vl); +vbool8_t test_vmsne_vx_u16m2_b8(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsne_vx_u16m2_b8(vs2, rs1, vl); } -vbool4_t test_vmsne_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmsne_vv_u16m4_b4(op1, op2, vl); +vbool4_t test_vmsne_vv_u16m4_b4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmsne_vv_u16m4_b4(vs2, vs1, vl); } -vbool4_t test_vmsne_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsne_vx_u16m4_b4(op1, op2, vl); +vbool4_t test_vmsne_vx_u16m4_b4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsne_vx_u16m4_b4(vs2, rs1, vl); } -vbool2_t test_vmsne_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmsne_vv_u16m8_b2(op1, op2, vl); +vbool2_t test_vmsne_vv_u16m8_b2(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmsne_vv_u16m8_b2(vs2, vs1, vl); } -vbool2_t test_vmsne_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsne_vx_u16m8_b2(op1, op2, vl); +vbool2_t test_vmsne_vx_u16m8_b2(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsne_vx_u16m8_b2(vs2, rs1, vl); } -vbool64_t test_vmsne_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmsne_vv_u32mf2_b64(op1, op2, vl); +vbool64_t test_vmsne_vv_u32mf2_b64(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmsne_vv_u32mf2_b64(vs2, vs1, vl); } -vbool64_t test_vmsne_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsne_vx_u32mf2_b64(op1, op2, vl); +vbool64_t test_vmsne_vx_u32mf2_b64(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsne_vx_u32mf2_b64(vs2, rs1, vl); } -vbool32_t test_vmsne_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmsne_vv_u32m1_b32(op1, op2, vl); +vbool32_t test_vmsne_vv_u32m1_b32(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmsne_vv_u32m1_b32(vs2, vs1, vl); } -vbool32_t test_vmsne_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsne_vx_u32m1_b32(op1, op2, vl); +vbool32_t test_vmsne_vx_u32m1_b32(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsne_vx_u32m1_b32(vs2, rs1, vl); } -vbool16_t test_vmsne_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmsne_vv_u32m2_b16(op1, op2, vl); +vbool16_t test_vmsne_vv_u32m2_b16(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmsne_vv_u32m2_b16(vs2, vs1, vl); } -vbool16_t test_vmsne_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsne_vx_u32m2_b16(op1, op2, vl); +vbool16_t test_vmsne_vx_u32m2_b16(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsne_vx_u32m2_b16(vs2, rs1, vl); } -vbool8_t test_vmsne_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmsne_vv_u32m4_b8(op1, op2, vl); +vbool8_t test_vmsne_vv_u32m4_b8(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmsne_vv_u32m4_b8(vs2, vs1, vl); } -vbool8_t test_vmsne_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsne_vx_u32m4_b8(op1, op2, vl); +vbool8_t test_vmsne_vx_u32m4_b8(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsne_vx_u32m4_b8(vs2, rs1, vl); } -vbool4_t test_vmsne_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmsne_vv_u32m8_b4(op1, op2, vl); +vbool4_t test_vmsne_vv_u32m8_b4(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmsne_vv_u32m8_b4(vs2, vs1, vl); } -vbool4_t test_vmsne_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsne_vx_u32m8_b4(op1, op2, vl); +vbool4_t test_vmsne_vx_u32m8_b4(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsne_vx_u32m8_b4(vs2, rs1, vl); } -vbool64_t test_vmsne_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmsne_vv_u64m1_b64(op1, op2, vl); +vbool64_t test_vmsne_vv_u64m1_b64(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmsne_vv_u64m1_b64(vs2, vs1, vl); } -vbool64_t test_vmsne_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsne_vx_u64m1_b64(op1, op2, vl); +vbool64_t test_vmsne_vx_u64m1_b64(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsne_vx_u64m1_b64(vs2, rs1, vl); } -vbool32_t test_vmsne_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmsne_vv_u64m2_b32(op1, op2, vl); +vbool32_t test_vmsne_vv_u64m2_b32(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmsne_vv_u64m2_b32(vs2, vs1, vl); } -vbool32_t test_vmsne_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsne_vx_u64m2_b32(op1, op2, vl); +vbool32_t test_vmsne_vx_u64m2_b32(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsne_vx_u64m2_b32(vs2, rs1, vl); } -vbool16_t test_vmsne_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmsne_vv_u64m4_b16(op1, op2, vl); +vbool16_t test_vmsne_vv_u64m4_b16(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmsne_vv_u64m4_b16(vs2, vs1, vl); } -vbool16_t test_vmsne_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsne_vx_u64m4_b16(op1, op2, vl); +vbool16_t test_vmsne_vx_u64m4_b16(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsne_vx_u64m4_b16(vs2, rs1, vl); } -vbool8_t test_vmsne_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmsne_vv_u64m8_b8(op1, op2, vl); +vbool8_t test_vmsne_vv_u64m8_b8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmsne_vv_u64m8_b8(vs2, vs1, vl); } -vbool8_t test_vmsne_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsne_vx_u64m8_b8(op1, op2, vl); +vbool8_t test_vmsne_vx_u64m8_b8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsne_vx_u64m8_b8(vs2, rs1, vl); } -vbool64_t test_vmsne_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmsne_vv_i8mf8_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsne_vv_i8mf8_b64_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmsne_vv_i8mf8_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmsne_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsne_vx_i8mf8_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsne_vx_i8mf8_b64_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsne_vx_i8mf8_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmsne_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmsne_vv_i8mf4_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsne_vv_i8mf4_b32_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmsne_vv_i8mf4_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmsne_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsne_vx_i8mf4_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsne_vx_i8mf4_b32_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsne_vx_i8mf4_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmsne_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmsne_vv_i8mf2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsne_vv_i8mf2_b16_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmsne_vv_i8mf2_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmsne_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsne_vx_i8mf2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsne_vx_i8mf2_b16_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsne_vx_i8mf2_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmsne_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmsne_vv_i8m1_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsne_vv_i8m1_b8_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmsne_vv_i8m1_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmsne_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmsne_vx_i8m1_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsne_vx_i8m1_b8_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsne_vx_i8m1_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmsne_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmsne_vv_i8m2_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsne_vv_i8m2_b4_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmsne_vv_i8m2_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmsne_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsne_vx_i8m2_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsne_vx_i8m2_b4_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsne_vx_i8m2_b4_m(vm, vs2, rs1, vl); } -vbool2_t test_vmsne_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmsne_vv_i8m4_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsne_vv_i8m4_b2_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmsne_vv_i8m4_b2_m(vm, vs2, vs1, vl); } -vbool2_t test_vmsne_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsne_vx_i8m4_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsne_vx_i8m4_b2_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsne_vx_i8m4_b2_m(vm, vs2, rs1, vl); } -vbool1_t test_vmsne_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmsne_vv_i8m8_b1_m(mask, op1, op2, vl); +vbool1_t test_vmsne_vv_i8m8_b1_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmsne_vv_i8m8_b1_m(vm, vs2, vs1, vl); } -vbool1_t test_vmsne_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsne_vx_i8m8_b1_m(mask, op1, op2, vl); +vbool1_t test_vmsne_vx_i8m8_b1_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsne_vx_i8m8_b1_m(vm, vs2, rs1, vl); } -vbool64_t test_vmsne_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmsne_vv_i16mf4_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsne_vv_i16mf4_b64_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmsne_vv_i16mf4_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmsne_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsne_vx_i16mf4_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsne_vx_i16mf4_b64_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsne_vx_i16mf4_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmsne_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmsne_vv_i16mf2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsne_vv_i16mf2_b32_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmsne_vv_i16mf2_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmsne_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsne_vx_i16mf2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsne_vx_i16mf2_b32_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsne_vx_i16mf2_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmsne_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmsne_vv_i16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsne_vv_i16m1_b16_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmsne_vv_i16m1_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmsne_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmsne_vx_i16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsne_vx_i16m1_b16_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsne_vx_i16m1_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmsne_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmsne_vv_i16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsne_vv_i16m2_b8_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmsne_vv_i16m2_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmsne_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsne_vx_i16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsne_vx_i16m2_b8_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsne_vx_i16m2_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmsne_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmsne_vv_i16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsne_vv_i16m4_b4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmsne_vv_i16m4_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmsne_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsne_vx_i16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsne_vx_i16m4_b4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsne_vx_i16m4_b4_m(vm, vs2, rs1, vl); } -vbool2_t test_vmsne_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmsne_vv_i16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsne_vv_i16m8_b2_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmsne_vv_i16m8_b2_m(vm, vs2, vs1, vl); } -vbool2_t test_vmsne_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmsne_vx_i16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsne_vx_i16m8_b2_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsne_vx_i16m8_b2_m(vm, vs2, rs1, vl); } -vbool64_t test_vmsne_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmsne_vv_i32mf2_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsne_vv_i32mf2_b64_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmsne_vv_i32mf2_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmsne_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsne_vx_i32mf2_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsne_vx_i32mf2_b64_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsne_vx_i32mf2_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmsne_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmsne_vv_i32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsne_vv_i32m1_b32_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmsne_vv_i32m1_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmsne_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmsne_vx_i32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsne_vx_i32m1_b32_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsne_vx_i32m1_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmsne_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmsne_vv_i32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsne_vv_i32m2_b16_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmsne_vv_i32m2_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmsne_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsne_vx_i32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsne_vx_i32m2_b16_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsne_vx_i32m2_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmsne_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmsne_vv_i32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsne_vv_i32m4_b8_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmsne_vv_i32m4_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmsne_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmsne_vx_i32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsne_vx_i32m4_b8_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsne_vx_i32m4_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmsne_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmsne_vv_i32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsne_vv_i32m8_b4_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmsne_vv_i32m8_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmsne_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmsne_vx_i32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsne_vx_i32m8_b4_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsne_vx_i32m8_b4_m(vm, vs2, rs1, vl); } -vbool64_t test_vmsne_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmsne_vv_i64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsne_vv_i64m1_b64_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmsne_vv_i64m1_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmsne_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmsne_vx_i64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsne_vx_i64m1_b64_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsne_vx_i64m1_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmsne_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmsne_vv_i64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsne_vv_i64m2_b32_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmsne_vv_i64m2_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmsne_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmsne_vx_i64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsne_vx_i64m2_b32_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsne_vx_i64m2_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmsne_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmsne_vv_i64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsne_vv_i64m4_b16_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmsne_vv_i64m4_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmsne_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmsne_vx_i64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsne_vx_i64m4_b16_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsne_vx_i64m4_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmsne_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmsne_vv_i64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsne_vv_i64m8_b8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmsne_vv_i64m8_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmsne_vx_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmsne_vx_i64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsne_vx_i64m8_b8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsne_vx_i64m8_b8_m(vm, vs2, rs1, vl); } -vbool64_t test_vmsne_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmsne_vv_u8mf8_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsne_vv_u8mf8_b64_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmsne_vv_u8mf8_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmsne_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsne_vx_u8mf8_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsne_vx_u8mf8_b64_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsne_vx_u8mf8_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmsne_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmsne_vv_u8mf4_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsne_vv_u8mf4_b32_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmsne_vv_u8mf4_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmsne_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsne_vx_u8mf4_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsne_vx_u8mf4_b32_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsne_vx_u8mf4_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmsne_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmsne_vv_u8mf2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsne_vv_u8mf2_b16_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmsne_vv_u8mf2_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmsne_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsne_vx_u8mf2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsne_vx_u8mf2_b16_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsne_vx_u8mf2_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmsne_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmsne_vv_u8m1_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsne_vv_u8m1_b8_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmsne_vv_u8m1_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmsne_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsne_vx_u8m1_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsne_vx_u8m1_b8_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsne_vx_u8m1_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmsne_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmsne_vv_u8m2_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsne_vv_u8m2_b4_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmsne_vv_u8m2_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmsne_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsne_vx_u8m2_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsne_vx_u8m2_b4_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsne_vx_u8m2_b4_m(vm, vs2, rs1, vl); } -vbool2_t test_vmsne_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmsne_vv_u8m4_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsne_vv_u8m4_b2_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmsne_vv_u8m4_b2_m(vm, vs2, vs1, vl); } -vbool2_t test_vmsne_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsne_vx_u8m4_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsne_vx_u8m4_b2_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsne_vx_u8m4_b2_m(vm, vs2, rs1, vl); } -vbool1_t test_vmsne_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmsne_vv_u8m8_b1_m(mask, op1, op2, vl); +vbool1_t test_vmsne_vv_u8m8_b1_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmsne_vv_u8m8_b1_m(vm, vs2, vs1, vl); } -vbool1_t test_vmsne_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsne_vx_u8m8_b1_m(mask, op1, op2, vl); +vbool1_t test_vmsne_vx_u8m8_b1_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsne_vx_u8m8_b1_m(vm, vs2, rs1, vl); } -vbool64_t test_vmsne_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmsne_vv_u16mf4_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsne_vv_u16mf4_b64_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmsne_vv_u16mf4_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmsne_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsne_vx_u16mf4_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsne_vx_u16mf4_b64_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsne_vx_u16mf4_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmsne_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmsne_vv_u16mf2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsne_vv_u16mf2_b32_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmsne_vv_u16mf2_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmsne_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsne_vx_u16mf2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsne_vx_u16mf2_b32_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsne_vx_u16mf2_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmsne_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmsne_vv_u16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsne_vv_u16m1_b16_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmsne_vv_u16m1_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmsne_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsne_vx_u16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsne_vx_u16m1_b16_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsne_vx_u16m1_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmsne_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmsne_vv_u16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsne_vv_u16m2_b8_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmsne_vv_u16m2_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmsne_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsne_vx_u16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsne_vx_u16m2_b8_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsne_vx_u16m2_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmsne_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmsne_vv_u16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsne_vv_u16m4_b4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmsne_vv_u16m4_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmsne_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsne_vx_u16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsne_vx_u16m4_b4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsne_vx_u16m4_b4_m(vm, vs2, rs1, vl); } -vbool2_t test_vmsne_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmsne_vv_u16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsne_vv_u16m8_b2_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmsne_vv_u16m8_b2_m(vm, vs2, vs1, vl); } -vbool2_t test_vmsne_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsne_vx_u16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsne_vx_u16m8_b2_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsne_vx_u16m8_b2_m(vm, vs2, rs1, vl); } -vbool64_t test_vmsne_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmsne_vv_u32mf2_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsne_vv_u32mf2_b64_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmsne_vv_u32mf2_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmsne_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsne_vx_u32mf2_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsne_vx_u32mf2_b64_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsne_vx_u32mf2_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmsne_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmsne_vv_u32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsne_vv_u32m1_b32_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmsne_vv_u32m1_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmsne_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsne_vx_u32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsne_vx_u32m1_b32_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsne_vx_u32m1_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmsne_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmsne_vv_u32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsne_vv_u32m2_b16_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmsne_vv_u32m2_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmsne_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsne_vx_u32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsne_vx_u32m2_b16_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsne_vx_u32m2_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmsne_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmsne_vv_u32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsne_vv_u32m4_b8_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmsne_vv_u32m4_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmsne_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsne_vx_u32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsne_vx_u32m4_b8_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsne_vx_u32m4_b8_m(vm, vs2, rs1, vl); } -vbool4_t test_vmsne_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmsne_vv_u32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsne_vv_u32m8_b4_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmsne_vv_u32m8_b4_m(vm, vs2, vs1, vl); } -vbool4_t test_vmsne_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsne_vx_u32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsne_vx_u32m8_b4_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsne_vx_u32m8_b4_m(vm, vs2, rs1, vl); } -vbool64_t test_vmsne_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmsne_vv_u64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsne_vv_u64m1_b64_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmsne_vv_u64m1_b64_m(vm, vs2, vs1, vl); } -vbool64_t test_vmsne_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsne_vx_u64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsne_vx_u64m1_b64_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsne_vx_u64m1_b64_m(vm, vs2, rs1, vl); } -vbool32_t test_vmsne_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmsne_vv_u64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsne_vv_u64m2_b32_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmsne_vv_u64m2_b32_m(vm, vs2, vs1, vl); } -vbool32_t test_vmsne_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsne_vx_u64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsne_vx_u64m2_b32_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsne_vx_u64m2_b32_m(vm, vs2, rs1, vl); } -vbool16_t test_vmsne_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmsne_vv_u64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsne_vv_u64m4_b16_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmsne_vv_u64m4_b16_m(vm, vs2, vs1, vl); } -vbool16_t test_vmsne_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsne_vx_u64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsne_vx_u64m4_b16_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsne_vx_u64m4_b16_m(vm, vs2, rs1, vl); } -vbool8_t test_vmsne_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmsne_vv_u64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsne_vv_u64m8_b8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmsne_vv_u64m8_b8_m(vm, vs2, vs1, vl); } -vbool8_t test_vmsne_vx_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsne_vx_u64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsne_vx_u64m8_b8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsne_vx_u64m8_b8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vmsne\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/gnu-api-tests/vmsof.c b/auto-generated/gnu-api-tests/vmsof.c index bf2f6c35c..675a7f92a 100644 --- a/auto-generated/gnu-api-tests/vmsof.c +++ b/auto-generated/gnu-api-tests/vmsof.c @@ -6,60 +6,60 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool1_t test_vmsof_m_b1(vbool1_t op1, size_t vl) { - return __riscv_vmsof_m_b1(op1, vl); +vbool1_t test_vmsof_m_b1(vbool1_t vs2, size_t vl) { + return __riscv_vmsof_m_b1(vs2, vl); } -vbool2_t test_vmsof_m_b2(vbool2_t op1, size_t vl) { - return __riscv_vmsof_m_b2(op1, vl); +vbool2_t test_vmsof_m_b2(vbool2_t vs2, size_t vl) { + return __riscv_vmsof_m_b2(vs2, vl); } -vbool4_t test_vmsof_m_b4(vbool4_t op1, size_t vl) { - return __riscv_vmsof_m_b4(op1, vl); +vbool4_t test_vmsof_m_b4(vbool4_t vs2, size_t vl) { + return __riscv_vmsof_m_b4(vs2, vl); } -vbool8_t test_vmsof_m_b8(vbool8_t op1, size_t vl) { - return __riscv_vmsof_m_b8(op1, vl); +vbool8_t test_vmsof_m_b8(vbool8_t vs2, size_t vl) { + return __riscv_vmsof_m_b8(vs2, vl); } -vbool16_t test_vmsof_m_b16(vbool16_t op1, size_t vl) { - return __riscv_vmsof_m_b16(op1, vl); +vbool16_t test_vmsof_m_b16(vbool16_t vs2, size_t vl) { + return __riscv_vmsof_m_b16(vs2, vl); } -vbool32_t test_vmsof_m_b32(vbool32_t op1, size_t vl) { - return __riscv_vmsof_m_b32(op1, vl); +vbool32_t test_vmsof_m_b32(vbool32_t vs2, size_t vl) { + return __riscv_vmsof_m_b32(vs2, vl); } -vbool64_t test_vmsof_m_b64(vbool64_t op1, size_t vl) { - return __riscv_vmsof_m_b64(op1, vl); +vbool64_t test_vmsof_m_b64(vbool64_t vs2, size_t vl) { + return __riscv_vmsof_m_b64(vs2, vl); } -vbool1_t test_vmsof_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) { - return __riscv_vmsof_m_b1_m(mask, op1, vl); +vbool1_t test_vmsof_m_b1_m(vbool1_t vm, vbool1_t vs2, size_t vl) { + return __riscv_vmsof_m_b1_m(vm, vs2, vl); } -vbool2_t test_vmsof_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) { - return __riscv_vmsof_m_b2_m(mask, op1, vl); +vbool2_t test_vmsof_m_b2_m(vbool2_t vm, vbool2_t vs2, size_t vl) { + return __riscv_vmsof_m_b2_m(vm, vs2, vl); } -vbool4_t test_vmsof_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) { - return __riscv_vmsof_m_b4_m(mask, op1, vl); +vbool4_t test_vmsof_m_b4_m(vbool4_t vm, vbool4_t vs2, size_t vl) { + return __riscv_vmsof_m_b4_m(vm, vs2, vl); } -vbool8_t test_vmsof_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) { - return __riscv_vmsof_m_b8_m(mask, op1, vl); +vbool8_t test_vmsof_m_b8_m(vbool8_t vm, vbool8_t vs2, size_t vl) { + return __riscv_vmsof_m_b8_m(vm, vs2, vl); } -vbool16_t test_vmsof_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) { - return __riscv_vmsof_m_b16_m(mask, op1, vl); +vbool16_t test_vmsof_m_b16_m(vbool16_t vm, vbool16_t vs2, size_t vl) { + return __riscv_vmsof_m_b16_m(vm, vs2, vl); } -vbool32_t test_vmsof_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) { - return __riscv_vmsof_m_b32_m(mask, op1, vl); +vbool32_t test_vmsof_m_b32_m(vbool32_t vm, vbool32_t vs2, size_t vl) { + return __riscv_vmsof_m_b32_m(vm, vs2, vl); } -vbool64_t test_vmsof_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) { - return __riscv_vmsof_m_b64_m(mask, op1, vl); +vbool64_t test_vmsof_m_b64_m(vbool64_t vm, vbool64_t vs2, size_t vl) { + return __riscv_vmsof_m_b64_m(vm, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmsof\.[ivxfswum.]+\s+} 14 } } */ diff --git a/auto-generated/gnu-api-tests/vmul.c b/auto-generated/gnu-api-tests/vmul.c index 8f6fd01f1..3dbb1eb52 100644 --- a/auto-generated/gnu-api-tests/vmul.c +++ b/auto-generated/gnu-api-tests/vmul.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmul_vv_i8mf8(op1, op2, vl); +vint8mf8_t test_vmul_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmul_vv_i8mf8(vs2, vs1, vl); } -vint8mf8_t test_vmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_vx_i8mf8(op1, op2, vl); +vint8mf8_t test_vmul_vx_i8mf8(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_vx_i8mf8(vs2, rs1, vl); } -vint8mf4_t test_vmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmul_vv_i8mf4(op1, op2, vl); +vint8mf4_t test_vmul_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmul_vv_i8mf4(vs2, vs1, vl); } -vint8mf4_t test_vmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_vx_i8mf4(op1, op2, vl); +vint8mf4_t test_vmul_vx_i8mf4(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_vx_i8mf4(vs2, rs1, vl); } -vint8mf2_t test_vmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmul_vv_i8mf2(op1, op2, vl); +vint8mf2_t test_vmul_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmul_vv_i8mf2(vs2, vs1, vl); } -vint8mf2_t test_vmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_vx_i8mf2(op1, op2, vl); +vint8mf2_t test_vmul_vx_i8mf2(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_vx_i8mf2(vs2, rs1, vl); } -vint8m1_t test_vmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmul_vv_i8m1(op1, op2, vl); +vint8m1_t test_vmul_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmul_vv_i8m1(vs2, vs1, vl); } -vint8m1_t test_vmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_vx_i8m1(op1, op2, vl); +vint8m1_t test_vmul_vx_i8m1(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_vx_i8m1(vs2, rs1, vl); } -vint8m2_t test_vmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmul_vv_i8m2(op1, op2, vl); +vint8m2_t test_vmul_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmul_vv_i8m2(vs2, vs1, vl); } -vint8m2_t test_vmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_vx_i8m2(op1, op2, vl); +vint8m2_t test_vmul_vx_i8m2(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_vx_i8m2(vs2, rs1, vl); } -vint8m4_t test_vmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmul_vv_i8m4(op1, op2, vl); +vint8m4_t test_vmul_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmul_vv_i8m4(vs2, vs1, vl); } -vint8m4_t test_vmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_vx_i8m4(op1, op2, vl); +vint8m4_t test_vmul_vx_i8m4(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_vx_i8m4(vs2, rs1, vl); } -vint8m8_t test_vmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmul_vv_i8m8(op1, op2, vl); +vint8m8_t test_vmul_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmul_vv_i8m8(vs2, vs1, vl); } -vint8m8_t test_vmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_vx_i8m8(op1, op2, vl); +vint8m8_t test_vmul_vx_i8m8(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_vx_i8m8(vs2, rs1, vl); } -vint16mf4_t test_vmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmul_vv_i16mf4(op1, op2, vl); +vint16mf4_t test_vmul_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmul_vv_i16mf4(vs2, vs1, vl); } -vint16mf4_t test_vmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_vx_i16mf4(op1, op2, vl); +vint16mf4_t test_vmul_vx_i16mf4(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_vx_i16mf4(vs2, rs1, vl); } -vint16mf2_t test_vmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmul_vv_i16mf2(op1, op2, vl); +vint16mf2_t test_vmul_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmul_vv_i16mf2(vs2, vs1, vl); } -vint16mf2_t test_vmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_vx_i16mf2(op1, op2, vl); +vint16mf2_t test_vmul_vx_i16mf2(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_vx_i16mf2(vs2, rs1, vl); } -vint16m1_t test_vmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmul_vv_i16m1(op1, op2, vl); +vint16m1_t test_vmul_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmul_vv_i16m1(vs2, vs1, vl); } -vint16m1_t test_vmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_vx_i16m1(op1, op2, vl); +vint16m1_t test_vmul_vx_i16m1(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_vx_i16m1(vs2, rs1, vl); } -vint16m2_t test_vmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmul_vv_i16m2(op1, op2, vl); +vint16m2_t test_vmul_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmul_vv_i16m2(vs2, vs1, vl); } -vint16m2_t test_vmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_vx_i16m2(op1, op2, vl); +vint16m2_t test_vmul_vx_i16m2(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_vx_i16m2(vs2, rs1, vl); } -vint16m4_t test_vmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmul_vv_i16m4(op1, op2, vl); +vint16m4_t test_vmul_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmul_vv_i16m4(vs2, vs1, vl); } -vint16m4_t test_vmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_vx_i16m4(op1, op2, vl); +vint16m4_t test_vmul_vx_i16m4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_vx_i16m4(vs2, rs1, vl); } -vint16m8_t test_vmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmul_vv_i16m8(op1, op2, vl); +vint16m8_t test_vmul_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmul_vv_i16m8(vs2, vs1, vl); } -vint16m8_t test_vmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_vx_i16m8(op1, op2, vl); +vint16m8_t test_vmul_vx_i16m8(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_vx_i16m8(vs2, rs1, vl); } -vint32mf2_t test_vmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmul_vv_i32mf2(op1, op2, vl); +vint32mf2_t test_vmul_vv_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmul_vv_i32mf2(vs2, vs1, vl); } -vint32mf2_t test_vmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_vx_i32mf2(op1, op2, vl); +vint32mf2_t test_vmul_vx_i32mf2(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_vx_i32mf2(vs2, rs1, vl); } -vint32m1_t test_vmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmul_vv_i32m1(op1, op2, vl); +vint32m1_t test_vmul_vv_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmul_vv_i32m1(vs2, vs1, vl); } -vint32m1_t test_vmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_vx_i32m1(op1, op2, vl); +vint32m1_t test_vmul_vx_i32m1(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_vx_i32m1(vs2, rs1, vl); } -vint32m2_t test_vmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmul_vv_i32m2(op1, op2, vl); +vint32m2_t test_vmul_vv_i32m2(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmul_vv_i32m2(vs2, vs1, vl); } -vint32m2_t test_vmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_vx_i32m2(op1, op2, vl); +vint32m2_t test_vmul_vx_i32m2(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_vx_i32m2(vs2, rs1, vl); } -vint32m4_t test_vmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmul_vv_i32m4(op1, op2, vl); +vint32m4_t test_vmul_vv_i32m4(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmul_vv_i32m4(vs2, vs1, vl); } -vint32m4_t test_vmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_vx_i32m4(op1, op2, vl); +vint32m4_t test_vmul_vx_i32m4(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_vx_i32m4(vs2, rs1, vl); } -vint32m8_t test_vmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmul_vv_i32m8(op1, op2, vl); +vint32m8_t test_vmul_vv_i32m8(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmul_vv_i32m8(vs2, vs1, vl); } -vint32m8_t test_vmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_vx_i32m8(op1, op2, vl); +vint32m8_t test_vmul_vx_i32m8(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_vx_i32m8(vs2, rs1, vl); } -vint64m1_t test_vmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmul_vv_i64m1(op1, op2, vl); +vint64m1_t test_vmul_vv_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmul_vv_i64m1(vs2, vs1, vl); } -vint64m1_t test_vmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmul_vx_i64m1(op1, op2, vl); +vint64m1_t test_vmul_vx_i64m1(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul_vx_i64m1(vs2, rs1, vl); } -vint64m2_t test_vmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmul_vv_i64m2(op1, op2, vl); +vint64m2_t test_vmul_vv_i64m2(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmul_vv_i64m2(vs2, vs1, vl); } -vint64m2_t test_vmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmul_vx_i64m2(op1, op2, vl); +vint64m2_t test_vmul_vx_i64m2(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul_vx_i64m2(vs2, rs1, vl); } -vint64m4_t test_vmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmul_vv_i64m4(op1, op2, vl); +vint64m4_t test_vmul_vv_i64m4(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmul_vv_i64m4(vs2, vs1, vl); } -vint64m4_t test_vmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmul_vx_i64m4(op1, op2, vl); +vint64m4_t test_vmul_vx_i64m4(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul_vx_i64m4(vs2, rs1, vl); } -vint64m8_t test_vmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmul_vv_i64m8(op1, op2, vl); +vint64m8_t test_vmul_vv_i64m8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmul_vv_i64m8(vs2, vs1, vl); } -vint64m8_t test_vmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmul_vx_i64m8(op1, op2, vl); +vint64m8_t test_vmul_vx_i64m8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul_vx_i64m8(vs2, rs1, vl); } -vuint8mf8_t test_vmul_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmul_vv_u8mf8(op1, op2, vl); +vuint8mf8_t test_vmul_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmul_vv_u8mf8(vs2, vs1, vl); } -vuint8mf8_t test_vmul_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_vx_u8mf8(op1, op2, vl); +vuint8mf8_t test_vmul_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_vx_u8mf8(vs2, rs1, vl); } -vuint8mf4_t test_vmul_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmul_vv_u8mf4(op1, op2, vl); +vuint8mf4_t test_vmul_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmul_vv_u8mf4(vs2, vs1, vl); } -vuint8mf4_t test_vmul_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_vx_u8mf4(op1, op2, vl); +vuint8mf4_t test_vmul_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_vx_u8mf4(vs2, rs1, vl); } -vuint8mf2_t test_vmul_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmul_vv_u8mf2(op1, op2, vl); +vuint8mf2_t test_vmul_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmul_vv_u8mf2(vs2, vs1, vl); } -vuint8mf2_t test_vmul_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_vx_u8mf2(op1, op2, vl); +vuint8mf2_t test_vmul_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_vx_u8mf2(vs2, rs1, vl); } -vuint8m1_t test_vmul_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmul_vv_u8m1(op1, op2, vl); +vuint8m1_t test_vmul_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmul_vv_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vmul_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_vx_u8m1(op1, op2, vl); +vuint8m1_t test_vmul_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_vx_u8m1(vs2, rs1, vl); } -vuint8m2_t test_vmul_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmul_vv_u8m2(op1, op2, vl); +vuint8m2_t test_vmul_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmul_vv_u8m2(vs2, vs1, vl); } -vuint8m2_t test_vmul_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_vx_u8m2(op1, op2, vl); +vuint8m2_t test_vmul_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_vx_u8m2(vs2, rs1, vl); } -vuint8m4_t test_vmul_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmul_vv_u8m4(op1, op2, vl); +vuint8m4_t test_vmul_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmul_vv_u8m4(vs2, vs1, vl); } -vuint8m4_t test_vmul_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_vx_u8m4(op1, op2, vl); +vuint8m4_t test_vmul_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_vx_u8m4(vs2, rs1, vl); } -vuint8m8_t test_vmul_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmul_vv_u8m8(op1, op2, vl); +vuint8m8_t test_vmul_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmul_vv_u8m8(vs2, vs1, vl); } -vuint8m8_t test_vmul_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_vx_u8m8(op1, op2, vl); +vuint8m8_t test_vmul_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_vx_u8m8(vs2, rs1, vl); } -vuint16mf4_t test_vmul_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmul_vv_u16mf4(op1, op2, vl); +vuint16mf4_t test_vmul_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmul_vv_u16mf4(vs2, vs1, vl); } -vuint16mf4_t test_vmul_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_vx_u16mf4(op1, op2, vl); +vuint16mf4_t test_vmul_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_vx_u16mf4(vs2, rs1, vl); } -vuint16mf2_t test_vmul_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmul_vv_u16mf2(op1, op2, vl); +vuint16mf2_t test_vmul_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmul_vv_u16mf2(vs2, vs1, vl); } -vuint16mf2_t test_vmul_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_vx_u16mf2(op1, op2, vl); +vuint16mf2_t test_vmul_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_vx_u16mf2(vs2, rs1, vl); } -vuint16m1_t test_vmul_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmul_vv_u16m1(op1, op2, vl); +vuint16m1_t test_vmul_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmul_vv_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vmul_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_vx_u16m1(op1, op2, vl); +vuint16m1_t test_vmul_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_vx_u16m1(vs2, rs1, vl); } -vuint16m2_t test_vmul_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmul_vv_u16m2(op1, op2, vl); +vuint16m2_t test_vmul_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmul_vv_u16m2(vs2, vs1, vl); } -vuint16m2_t test_vmul_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_vx_u16m2(op1, op2, vl); +vuint16m2_t test_vmul_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_vx_u16m2(vs2, rs1, vl); } -vuint16m4_t test_vmul_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmul_vv_u16m4(op1, op2, vl); +vuint16m4_t test_vmul_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmul_vv_u16m4(vs2, vs1, vl); } -vuint16m4_t test_vmul_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_vx_u16m4(op1, op2, vl); +vuint16m4_t test_vmul_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_vx_u16m4(vs2, rs1, vl); } -vuint16m8_t test_vmul_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmul_vv_u16m8(op1, op2, vl); +vuint16m8_t test_vmul_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmul_vv_u16m8(vs2, vs1, vl); } -vuint16m8_t test_vmul_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_vx_u16m8(op1, op2, vl); +vuint16m8_t test_vmul_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_vx_u16m8(vs2, rs1, vl); } -vuint32mf2_t test_vmul_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmul_vv_u32mf2(op1, op2, vl); +vuint32mf2_t test_vmul_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmul_vv_u32mf2(vs2, vs1, vl); } -vuint32mf2_t test_vmul_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_vx_u32mf2(op1, op2, vl); +vuint32mf2_t test_vmul_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_vx_u32mf2(vs2, rs1, vl); } -vuint32m1_t test_vmul_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmul_vv_u32m1(op1, op2, vl); +vuint32m1_t test_vmul_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmul_vv_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vmul_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_vx_u32m1(op1, op2, vl); +vuint32m1_t test_vmul_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_vx_u32m1(vs2, rs1, vl); } -vuint32m2_t test_vmul_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmul_vv_u32m2(op1, op2, vl); +vuint32m2_t test_vmul_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmul_vv_u32m2(vs2, vs1, vl); } -vuint32m2_t test_vmul_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_vx_u32m2(op1, op2, vl); +vuint32m2_t test_vmul_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_vx_u32m2(vs2, rs1, vl); } -vuint32m4_t test_vmul_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmul_vv_u32m4(op1, op2, vl); +vuint32m4_t test_vmul_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmul_vv_u32m4(vs2, vs1, vl); } -vuint32m4_t test_vmul_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_vx_u32m4(op1, op2, vl); +vuint32m4_t test_vmul_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_vx_u32m4(vs2, rs1, vl); } -vuint32m8_t test_vmul_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmul_vv_u32m8(op1, op2, vl); +vuint32m8_t test_vmul_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmul_vv_u32m8(vs2, vs1, vl); } -vuint32m8_t test_vmul_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_vx_u32m8(op1, op2, vl); +vuint32m8_t test_vmul_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_vx_u32m8(vs2, rs1, vl); } -vuint64m1_t test_vmul_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmul_vv_u64m1(op1, op2, vl); +vuint64m1_t test_vmul_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmul_vv_u64m1(vs2, vs1, vl); } -vuint64m1_t test_vmul_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul_vx_u64m1(op1, op2, vl); +vuint64m1_t test_vmul_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul_vx_u64m1(vs2, rs1, vl); } -vuint64m2_t test_vmul_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmul_vv_u64m2(op1, op2, vl); +vuint64m2_t test_vmul_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmul_vv_u64m2(vs2, vs1, vl); } -vuint64m2_t test_vmul_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul_vx_u64m2(op1, op2, vl); +vuint64m2_t test_vmul_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul_vx_u64m2(vs2, rs1, vl); } -vuint64m4_t test_vmul_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmul_vv_u64m4(op1, op2, vl); +vuint64m4_t test_vmul_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmul_vv_u64m4(vs2, vs1, vl); } -vuint64m4_t test_vmul_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul_vx_u64m4(op1, op2, vl); +vuint64m4_t test_vmul_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul_vx_u64m4(vs2, rs1, vl); } -vuint64m8_t test_vmul_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmul_vv_u64m8(op1, op2, vl); +vuint64m8_t test_vmul_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmul_vv_u64m8(vs2, vs1, vl); } -vuint64m8_t test_vmul_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul_vx_u64m8(op1, op2, vl); +vuint64m8_t test_vmul_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul_vx_u64m8(vs2, rs1, vl); } -vint8mf8_t test_vmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmul_vv_i8mf8_m(mask, op1, op2, vl); +vint8mf8_t test_vmul_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmul_vv_i8mf8_m(vm, vs2, vs1, vl); } -vint8mf8_t test_vmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_vx_i8mf8_m(mask, op1, op2, vl); +vint8mf8_t test_vmul_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_vx_i8mf8_m(vm, vs2, rs1, vl); } -vint8mf4_t test_vmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmul_vv_i8mf4_m(mask, op1, op2, vl); +vint8mf4_t test_vmul_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmul_vv_i8mf4_m(vm, vs2, vs1, vl); } -vint8mf4_t test_vmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_vx_i8mf4_m(mask, op1, op2, vl); +vint8mf4_t test_vmul_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_vx_i8mf4_m(vm, vs2, rs1, vl); } -vint8mf2_t test_vmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmul_vv_i8mf2_m(mask, op1, op2, vl); +vint8mf2_t test_vmul_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmul_vv_i8mf2_m(vm, vs2, vs1, vl); } -vint8mf2_t test_vmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_vx_i8mf2_m(mask, op1, op2, vl); +vint8mf2_t test_vmul_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_vx_i8mf2_m(vm, vs2, rs1, vl); } -vint8m1_t test_vmul_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmul_vv_i8m1_m(mask, op1, op2, vl); +vint8m1_t test_vmul_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmul_vv_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vmul_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_vx_i8m1_m(mask, op1, op2, vl); +vint8m1_t test_vmul_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_vx_i8m1_m(vm, vs2, rs1, vl); } -vint8m2_t test_vmul_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmul_vv_i8m2_m(mask, op1, op2, vl); +vint8m2_t test_vmul_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmul_vv_i8m2_m(vm, vs2, vs1, vl); } -vint8m2_t test_vmul_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_vx_i8m2_m(mask, op1, op2, vl); +vint8m2_t test_vmul_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_vx_i8m2_m(vm, vs2, rs1, vl); } -vint8m4_t test_vmul_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmul_vv_i8m4_m(mask, op1, op2, vl); +vint8m4_t test_vmul_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmul_vv_i8m4_m(vm, vs2, vs1, vl); } -vint8m4_t test_vmul_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_vx_i8m4_m(mask, op1, op2, vl); +vint8m4_t test_vmul_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_vx_i8m4_m(vm, vs2, rs1, vl); } -vint8m8_t test_vmul_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmul_vv_i8m8_m(mask, op1, op2, vl); +vint8m8_t test_vmul_vv_i8m8_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmul_vv_i8m8_m(vm, vs2, vs1, vl); } -vint8m8_t test_vmul_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_vx_i8m8_m(mask, op1, op2, vl); +vint8m8_t test_vmul_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_vx_i8m8_m(vm, vs2, rs1, vl); } -vint16mf4_t test_vmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmul_vv_i16mf4_m(mask, op1, op2, vl); +vint16mf4_t test_vmul_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmul_vv_i16mf4_m(vm, vs2, vs1, vl); } -vint16mf4_t test_vmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_vx_i16mf4_m(mask, op1, op2, vl); +vint16mf4_t test_vmul_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_vx_i16mf4_m(vm, vs2, rs1, vl); } -vint16mf2_t test_vmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmul_vv_i16mf2_m(mask, op1, op2, vl); +vint16mf2_t test_vmul_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmul_vv_i16mf2_m(vm, vs2, vs1, vl); } -vint16mf2_t test_vmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_vx_i16mf2_m(mask, op1, op2, vl); +vint16mf2_t test_vmul_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_vx_i16mf2_m(vm, vs2, rs1, vl); } -vint16m1_t test_vmul_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmul_vv_i16m1_m(mask, op1, op2, vl); +vint16m1_t test_vmul_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmul_vv_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vmul_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_vx_i16m1_m(mask, op1, op2, vl); +vint16m1_t test_vmul_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_vx_i16m1_m(vm, vs2, rs1, vl); } -vint16m2_t test_vmul_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmul_vv_i16m2_m(mask, op1, op2, vl); +vint16m2_t test_vmul_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmul_vv_i16m2_m(vm, vs2, vs1, vl); } -vint16m2_t test_vmul_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_vx_i16m2_m(mask, op1, op2, vl); +vint16m2_t test_vmul_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_vx_i16m2_m(vm, vs2, rs1, vl); } -vint16m4_t test_vmul_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmul_vv_i16m4_m(mask, op1, op2, vl); +vint16m4_t test_vmul_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmul_vv_i16m4_m(vm, vs2, vs1, vl); } -vint16m4_t test_vmul_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_vx_i16m4_m(mask, op1, op2, vl); +vint16m4_t test_vmul_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_vx_i16m4_m(vm, vs2, rs1, vl); } -vint16m8_t test_vmul_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmul_vv_i16m8_m(mask, op1, op2, vl); +vint16m8_t test_vmul_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmul_vv_i16m8_m(vm, vs2, vs1, vl); } -vint16m8_t test_vmul_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_vx_i16m8_m(mask, op1, op2, vl); +vint16m8_t test_vmul_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_vx_i16m8_m(vm, vs2, rs1, vl); } -vint32mf2_t test_vmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmul_vv_i32mf2_m(mask, op1, op2, vl); +vint32mf2_t test_vmul_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmul_vv_i32mf2_m(vm, vs2, vs1, vl); } -vint32mf2_t test_vmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_vx_i32mf2_m(mask, op1, op2, vl); +vint32mf2_t test_vmul_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_vx_i32mf2_m(vm, vs2, rs1, vl); } -vint32m1_t test_vmul_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmul_vv_i32m1_m(mask, op1, op2, vl); +vint32m1_t test_vmul_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmul_vv_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vmul_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_vx_i32m1_m(mask, op1, op2, vl); +vint32m1_t test_vmul_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_vx_i32m1_m(vm, vs2, rs1, vl); } -vint32m2_t test_vmul_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmul_vv_i32m2_m(mask, op1, op2, vl); +vint32m2_t test_vmul_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmul_vv_i32m2_m(vm, vs2, vs1, vl); } -vint32m2_t test_vmul_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_vx_i32m2_m(mask, op1, op2, vl); +vint32m2_t test_vmul_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_vx_i32m2_m(vm, vs2, rs1, vl); } -vint32m4_t test_vmul_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmul_vv_i32m4_m(mask, op1, op2, vl); +vint32m4_t test_vmul_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmul_vv_i32m4_m(vm, vs2, vs1, vl); } -vint32m4_t test_vmul_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_vx_i32m4_m(mask, op1, op2, vl); +vint32m4_t test_vmul_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_vx_i32m4_m(vm, vs2, rs1, vl); } -vint32m8_t test_vmul_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmul_vv_i32m8_m(mask, op1, op2, vl); +vint32m8_t test_vmul_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmul_vv_i32m8_m(vm, vs2, vs1, vl); } -vint32m8_t test_vmul_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_vx_i32m8_m(mask, op1, op2, vl); +vint32m8_t test_vmul_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_vx_i32m8_m(vm, vs2, rs1, vl); } -vint64m1_t test_vmul_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmul_vv_i64m1_m(mask, op1, op2, vl); +vint64m1_t test_vmul_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmul_vv_i64m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vmul_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmul_vx_i64m1_m(mask, op1, op2, vl); +vint64m1_t test_vmul_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul_vx_i64m1_m(vm, vs2, rs1, vl); } -vint64m2_t test_vmul_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmul_vv_i64m2_m(mask, op1, op2, vl); +vint64m2_t test_vmul_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmul_vv_i64m2_m(vm, vs2, vs1, vl); } -vint64m2_t test_vmul_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmul_vx_i64m2_m(mask, op1, op2, vl); +vint64m2_t test_vmul_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul_vx_i64m2_m(vm, vs2, rs1, vl); } -vint64m4_t test_vmul_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmul_vv_i64m4_m(mask, op1, op2, vl); +vint64m4_t test_vmul_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmul_vv_i64m4_m(vm, vs2, vs1, vl); } -vint64m4_t test_vmul_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmul_vx_i64m4_m(mask, op1, op2, vl); +vint64m4_t test_vmul_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul_vx_i64m4_m(vm, vs2, rs1, vl); } -vint64m8_t test_vmul_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmul_vv_i64m8_m(mask, op1, op2, vl); +vint64m8_t test_vmul_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmul_vv_i64m8_m(vm, vs2, vs1, vl); } -vint64m8_t test_vmul_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmul_vx_i64m8_m(mask, op1, op2, vl); +vint64m8_t test_vmul_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul_vx_i64m8_m(vm, vs2, rs1, vl); } -vuint8mf8_t test_vmul_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmul_vv_u8mf8_m(mask, op1, op2, vl); +vuint8mf8_t test_vmul_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmul_vv_u8mf8_m(vm, vs2, vs1, vl); } -vuint8mf8_t test_vmul_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_vx_u8mf8_m(mask, op1, op2, vl); +vuint8mf8_t test_vmul_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_vx_u8mf8_m(vm, vs2, rs1, vl); } -vuint8mf4_t test_vmul_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmul_vv_u8mf4_m(mask, op1, op2, vl); +vuint8mf4_t test_vmul_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmul_vv_u8mf4_m(vm, vs2, vs1, vl); } -vuint8mf4_t test_vmul_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_vx_u8mf4_m(mask, op1, op2, vl); +vuint8mf4_t test_vmul_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_vx_u8mf4_m(vm, vs2, rs1, vl); } -vuint8mf2_t test_vmul_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmul_vv_u8mf2_m(mask, op1, op2, vl); +vuint8mf2_t test_vmul_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmul_vv_u8mf2_m(vm, vs2, vs1, vl); } -vuint8mf2_t test_vmul_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_vx_u8mf2_m(mask, op1, op2, vl); +vuint8mf2_t test_vmul_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_vx_u8mf2_m(vm, vs2, rs1, vl); } -vuint8m1_t test_vmul_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmul_vv_u8m1_m(mask, op1, op2, vl); +vuint8m1_t test_vmul_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmul_vv_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vmul_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_vx_u8m1_m(mask, op1, op2, vl); +vuint8m1_t test_vmul_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_vx_u8m1_m(vm, vs2, rs1, vl); } -vuint8m2_t test_vmul_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmul_vv_u8m2_m(mask, op1, op2, vl); +vuint8m2_t test_vmul_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmul_vv_u8m2_m(vm, vs2, vs1, vl); } -vuint8m2_t test_vmul_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_vx_u8m2_m(mask, op1, op2, vl); +vuint8m2_t test_vmul_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_vx_u8m2_m(vm, vs2, rs1, vl); } -vuint8m4_t test_vmul_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmul_vv_u8m4_m(mask, op1, op2, vl); +vuint8m4_t test_vmul_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmul_vv_u8m4_m(vm, vs2, vs1, vl); } -vuint8m4_t test_vmul_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_vx_u8m4_m(mask, op1, op2, vl); +vuint8m4_t test_vmul_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_vx_u8m4_m(vm, vs2, rs1, vl); } -vuint8m8_t test_vmul_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmul_vv_u8m8_m(mask, op1, op2, vl); +vuint8m8_t test_vmul_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmul_vv_u8m8_m(vm, vs2, vs1, vl); } -vuint8m8_t test_vmul_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_vx_u8m8_m(mask, op1, op2, vl); +vuint8m8_t test_vmul_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_vx_u8m8_m(vm, vs2, rs1, vl); } -vuint16mf4_t test_vmul_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmul_vv_u16mf4_m(mask, op1, op2, vl); +vuint16mf4_t test_vmul_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmul_vv_u16mf4_m(vm, vs2, vs1, vl); } -vuint16mf4_t test_vmul_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_vx_u16mf4_m(mask, op1, op2, vl); +vuint16mf4_t test_vmul_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_vx_u16mf4_m(vm, vs2, rs1, vl); } -vuint16mf2_t test_vmul_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmul_vv_u16mf2_m(mask, op1, op2, vl); +vuint16mf2_t test_vmul_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmul_vv_u16mf2_m(vm, vs2, vs1, vl); } -vuint16mf2_t test_vmul_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_vx_u16mf2_m(mask, op1, op2, vl); +vuint16mf2_t test_vmul_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_vx_u16mf2_m(vm, vs2, rs1, vl); } -vuint16m1_t test_vmul_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmul_vv_u16m1_m(mask, op1, op2, vl); +vuint16m1_t test_vmul_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmul_vv_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vmul_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_vx_u16m1_m(mask, op1, op2, vl); +vuint16m1_t test_vmul_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_vx_u16m1_m(vm, vs2, rs1, vl); } -vuint16m2_t test_vmul_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmul_vv_u16m2_m(mask, op1, op2, vl); +vuint16m2_t test_vmul_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmul_vv_u16m2_m(vm, vs2, vs1, vl); } -vuint16m2_t test_vmul_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_vx_u16m2_m(mask, op1, op2, vl); +vuint16m2_t test_vmul_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_vx_u16m2_m(vm, vs2, rs1, vl); } -vuint16m4_t test_vmul_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmul_vv_u16m4_m(mask, op1, op2, vl); +vuint16m4_t test_vmul_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmul_vv_u16m4_m(vm, vs2, vs1, vl); } -vuint16m4_t test_vmul_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_vx_u16m4_m(mask, op1, op2, vl); +vuint16m4_t test_vmul_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_vx_u16m4_m(vm, vs2, rs1, vl); } -vuint16m8_t test_vmul_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmul_vv_u16m8_m(mask, op1, op2, vl); +vuint16m8_t test_vmul_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmul_vv_u16m8_m(vm, vs2, vs1, vl); } -vuint16m8_t test_vmul_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_vx_u16m8_m(mask, op1, op2, vl); +vuint16m8_t test_vmul_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_vx_u16m8_m(vm, vs2, rs1, vl); } -vuint32mf2_t test_vmul_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmul_vv_u32mf2_m(mask, op1, op2, vl); +vuint32mf2_t test_vmul_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmul_vv_u32mf2_m(vm, vs2, vs1, vl); } -vuint32mf2_t test_vmul_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_vx_u32mf2_m(mask, op1, op2, vl); +vuint32mf2_t test_vmul_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_vx_u32mf2_m(vm, vs2, rs1, vl); } -vuint32m1_t test_vmul_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmul_vv_u32m1_m(mask, op1, op2, vl); +vuint32m1_t test_vmul_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmul_vv_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vmul_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_vx_u32m1_m(mask, op1, op2, vl); +vuint32m1_t test_vmul_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_vx_u32m1_m(vm, vs2, rs1, vl); } -vuint32m2_t test_vmul_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmul_vv_u32m2_m(mask, op1, op2, vl); +vuint32m2_t test_vmul_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmul_vv_u32m2_m(vm, vs2, vs1, vl); } -vuint32m2_t test_vmul_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_vx_u32m2_m(mask, op1, op2, vl); +vuint32m2_t test_vmul_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_vx_u32m2_m(vm, vs2, rs1, vl); } -vuint32m4_t test_vmul_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmul_vv_u32m4_m(mask, op1, op2, vl); +vuint32m4_t test_vmul_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmul_vv_u32m4_m(vm, vs2, vs1, vl); } -vuint32m4_t test_vmul_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_vx_u32m4_m(mask, op1, op2, vl); +vuint32m4_t test_vmul_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_vx_u32m4_m(vm, vs2, rs1, vl); } -vuint32m8_t test_vmul_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmul_vv_u32m8_m(mask, op1, op2, vl); +vuint32m8_t test_vmul_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmul_vv_u32m8_m(vm, vs2, vs1, vl); } -vuint32m8_t test_vmul_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_vx_u32m8_m(mask, op1, op2, vl); +vuint32m8_t test_vmul_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_vx_u32m8_m(vm, vs2, rs1, vl); } -vuint64m1_t test_vmul_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmul_vv_u64m1_m(mask, op1, op2, vl); +vuint64m1_t test_vmul_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmul_vv_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vmul_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul_vx_u64m1_m(mask, op1, op2, vl); +vuint64m1_t test_vmul_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul_vx_u64m1_m(vm, vs2, rs1, vl); } -vuint64m2_t test_vmul_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmul_vv_u64m2_m(mask, op1, op2, vl); +vuint64m2_t test_vmul_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmul_vv_u64m2_m(vm, vs2, vs1, vl); } -vuint64m2_t test_vmul_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul_vx_u64m2_m(mask, op1, op2, vl); +vuint64m2_t test_vmul_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul_vx_u64m2_m(vm, vs2, rs1, vl); } -vuint64m4_t test_vmul_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmul_vv_u64m4_m(mask, op1, op2, vl); +vuint64m4_t test_vmul_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmul_vv_u64m4_m(vm, vs2, vs1, vl); } -vuint64m4_t test_vmul_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul_vx_u64m4_m(mask, op1, op2, vl); +vuint64m4_t test_vmul_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul_vx_u64m4_m(vm, vs2, rs1, vl); } -vuint64m8_t test_vmul_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmul_vv_u64m8_m(mask, op1, op2, vl); +vuint64m8_t test_vmul_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmul_vv_u64m8_m(vm, vs2, vs1, vl); } -vuint64m8_t test_vmul_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul_vx_u64m8_m(mask, op1, op2, vl); +vuint64m8_t test_vmul_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul_vx_u64m8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vmul\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/gnu-api-tests/vmulh.c b/auto-generated/gnu-api-tests/vmulh.c index 36e19529f..0caab3995 100644 --- a/auto-generated/gnu-api-tests/vmulh.c +++ b/auto-generated/gnu-api-tests/vmulh.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vmulh_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmulh_vv_i8mf8(op1, op2, vl); +vint8mf8_t test_vmulh_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmulh_vv_i8mf8(vs2, vs1, vl); } -vint8mf8_t test_vmulh_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_vx_i8mf8(op1, op2, vl); +vint8mf8_t test_vmulh_vx_i8mf8(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_vx_i8mf8(vs2, rs1, vl); } -vint8mf4_t test_vmulh_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmulh_vv_i8mf4(op1, op2, vl); +vint8mf4_t test_vmulh_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmulh_vv_i8mf4(vs2, vs1, vl); } -vint8mf4_t test_vmulh_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_vx_i8mf4(op1, op2, vl); +vint8mf4_t test_vmulh_vx_i8mf4(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_vx_i8mf4(vs2, rs1, vl); } -vint8mf2_t test_vmulh_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmulh_vv_i8mf2(op1, op2, vl); +vint8mf2_t test_vmulh_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmulh_vv_i8mf2(vs2, vs1, vl); } -vint8mf2_t test_vmulh_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_vx_i8mf2(op1, op2, vl); +vint8mf2_t test_vmulh_vx_i8mf2(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_vx_i8mf2(vs2, rs1, vl); } -vint8m1_t test_vmulh_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmulh_vv_i8m1(op1, op2, vl); +vint8m1_t test_vmulh_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmulh_vv_i8m1(vs2, vs1, vl); } -vint8m1_t test_vmulh_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_vx_i8m1(op1, op2, vl); +vint8m1_t test_vmulh_vx_i8m1(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_vx_i8m1(vs2, rs1, vl); } -vint8m2_t test_vmulh_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmulh_vv_i8m2(op1, op2, vl); +vint8m2_t test_vmulh_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmulh_vv_i8m2(vs2, vs1, vl); } -vint8m2_t test_vmulh_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_vx_i8m2(op1, op2, vl); +vint8m2_t test_vmulh_vx_i8m2(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_vx_i8m2(vs2, rs1, vl); } -vint8m4_t test_vmulh_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmulh_vv_i8m4(op1, op2, vl); +vint8m4_t test_vmulh_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmulh_vv_i8m4(vs2, vs1, vl); } -vint8m4_t test_vmulh_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_vx_i8m4(op1, op2, vl); +vint8m4_t test_vmulh_vx_i8m4(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_vx_i8m4(vs2, rs1, vl); } -vint8m8_t test_vmulh_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmulh_vv_i8m8(op1, op2, vl); +vint8m8_t test_vmulh_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmulh_vv_i8m8(vs2, vs1, vl); } -vint8m8_t test_vmulh_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_vx_i8m8(op1, op2, vl); +vint8m8_t test_vmulh_vx_i8m8(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_vx_i8m8(vs2, rs1, vl); } -vint16mf4_t test_vmulh_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmulh_vv_i16mf4(op1, op2, vl); +vint16mf4_t test_vmulh_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmulh_vv_i16mf4(vs2, vs1, vl); } -vint16mf4_t test_vmulh_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_vx_i16mf4(op1, op2, vl); +vint16mf4_t test_vmulh_vx_i16mf4(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_vx_i16mf4(vs2, rs1, vl); } -vint16mf2_t test_vmulh_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmulh_vv_i16mf2(op1, op2, vl); +vint16mf2_t test_vmulh_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmulh_vv_i16mf2(vs2, vs1, vl); } -vint16mf2_t test_vmulh_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_vx_i16mf2(op1, op2, vl); +vint16mf2_t test_vmulh_vx_i16mf2(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_vx_i16mf2(vs2, rs1, vl); } -vint16m1_t test_vmulh_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmulh_vv_i16m1(op1, op2, vl); +vint16m1_t test_vmulh_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmulh_vv_i16m1(vs2, vs1, vl); } -vint16m1_t test_vmulh_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_vx_i16m1(op1, op2, vl); +vint16m1_t test_vmulh_vx_i16m1(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_vx_i16m1(vs2, rs1, vl); } -vint16m2_t test_vmulh_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmulh_vv_i16m2(op1, op2, vl); +vint16m2_t test_vmulh_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmulh_vv_i16m2(vs2, vs1, vl); } -vint16m2_t test_vmulh_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_vx_i16m2(op1, op2, vl); +vint16m2_t test_vmulh_vx_i16m2(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_vx_i16m2(vs2, rs1, vl); } -vint16m4_t test_vmulh_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmulh_vv_i16m4(op1, op2, vl); +vint16m4_t test_vmulh_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmulh_vv_i16m4(vs2, vs1, vl); } -vint16m4_t test_vmulh_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_vx_i16m4(op1, op2, vl); +vint16m4_t test_vmulh_vx_i16m4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_vx_i16m4(vs2, rs1, vl); } -vint16m8_t test_vmulh_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmulh_vv_i16m8(op1, op2, vl); +vint16m8_t test_vmulh_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmulh_vv_i16m8(vs2, vs1, vl); } -vint16m8_t test_vmulh_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_vx_i16m8(op1, op2, vl); +vint16m8_t test_vmulh_vx_i16m8(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_vx_i16m8(vs2, rs1, vl); } -vint32mf2_t test_vmulh_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmulh_vv_i32mf2(op1, op2, vl); +vint32mf2_t test_vmulh_vv_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmulh_vv_i32mf2(vs2, vs1, vl); } -vint32mf2_t test_vmulh_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_vx_i32mf2(op1, op2, vl); +vint32mf2_t test_vmulh_vx_i32mf2(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_vx_i32mf2(vs2, rs1, vl); } -vint32m1_t test_vmulh_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmulh_vv_i32m1(op1, op2, vl); +vint32m1_t test_vmulh_vv_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmulh_vv_i32m1(vs2, vs1, vl); } -vint32m1_t test_vmulh_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_vx_i32m1(op1, op2, vl); +vint32m1_t test_vmulh_vx_i32m1(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_vx_i32m1(vs2, rs1, vl); } -vint32m2_t test_vmulh_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmulh_vv_i32m2(op1, op2, vl); +vint32m2_t test_vmulh_vv_i32m2(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmulh_vv_i32m2(vs2, vs1, vl); } -vint32m2_t test_vmulh_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_vx_i32m2(op1, op2, vl); +vint32m2_t test_vmulh_vx_i32m2(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_vx_i32m2(vs2, rs1, vl); } -vint32m4_t test_vmulh_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmulh_vv_i32m4(op1, op2, vl); +vint32m4_t test_vmulh_vv_i32m4(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmulh_vv_i32m4(vs2, vs1, vl); } -vint32m4_t test_vmulh_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_vx_i32m4(op1, op2, vl); +vint32m4_t test_vmulh_vx_i32m4(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_vx_i32m4(vs2, rs1, vl); } -vint32m8_t test_vmulh_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmulh_vv_i32m8(op1, op2, vl); +vint32m8_t test_vmulh_vv_i32m8(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmulh_vv_i32m8(vs2, vs1, vl); } -vint32m8_t test_vmulh_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_vx_i32m8(op1, op2, vl); +vint32m8_t test_vmulh_vx_i32m8(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_vx_i32m8(vs2, rs1, vl); } -vint64m1_t test_vmulh_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmulh_vv_i64m1(op1, op2, vl); +vint64m1_t test_vmulh_vv_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmulh_vv_i64m1(vs2, vs1, vl); } -vint64m1_t test_vmulh_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh_vx_i64m1(op1, op2, vl); +vint64m1_t test_vmulh_vx_i64m1(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh_vx_i64m1(vs2, rs1, vl); } -vint64m2_t test_vmulh_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmulh_vv_i64m2(op1, op2, vl); +vint64m2_t test_vmulh_vv_i64m2(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmulh_vv_i64m2(vs2, vs1, vl); } -vint64m2_t test_vmulh_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh_vx_i64m2(op1, op2, vl); +vint64m2_t test_vmulh_vx_i64m2(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh_vx_i64m2(vs2, rs1, vl); } -vint64m4_t test_vmulh_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmulh_vv_i64m4(op1, op2, vl); +vint64m4_t test_vmulh_vv_i64m4(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmulh_vv_i64m4(vs2, vs1, vl); } -vint64m4_t test_vmulh_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh_vx_i64m4(op1, op2, vl); +vint64m4_t test_vmulh_vx_i64m4(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh_vx_i64m4(vs2, rs1, vl); } -vint64m8_t test_vmulh_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmulh_vv_i64m8(op1, op2, vl); +vint64m8_t test_vmulh_vv_i64m8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmulh_vv_i64m8(vs2, vs1, vl); } -vint64m8_t test_vmulh_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh_vx_i64m8(op1, op2, vl); +vint64m8_t test_vmulh_vx_i64m8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh_vx_i64m8(vs2, rs1, vl); } -vint8mf8_t test_vmulh_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmulh_vv_i8mf8_m(mask, op1, op2, vl); +vint8mf8_t test_vmulh_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmulh_vv_i8mf8_m(vm, vs2, vs1, vl); } -vint8mf8_t test_vmulh_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_vx_i8mf8_m(mask, op1, op2, vl); +vint8mf8_t test_vmulh_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_vx_i8mf8_m(vm, vs2, rs1, vl); } -vint8mf4_t test_vmulh_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmulh_vv_i8mf4_m(mask, op1, op2, vl); +vint8mf4_t test_vmulh_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmulh_vv_i8mf4_m(vm, vs2, vs1, vl); } -vint8mf4_t test_vmulh_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_vx_i8mf4_m(mask, op1, op2, vl); +vint8mf4_t test_vmulh_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_vx_i8mf4_m(vm, vs2, rs1, vl); } -vint8mf2_t test_vmulh_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmulh_vv_i8mf2_m(mask, op1, op2, vl); +vint8mf2_t test_vmulh_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmulh_vv_i8mf2_m(vm, vs2, vs1, vl); } -vint8mf2_t test_vmulh_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_vx_i8mf2_m(mask, op1, op2, vl); +vint8mf2_t test_vmulh_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_vx_i8mf2_m(vm, vs2, rs1, vl); } -vint8m1_t test_vmulh_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmulh_vv_i8m1_m(mask, op1, op2, vl); +vint8m1_t test_vmulh_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmulh_vv_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vmulh_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_vx_i8m1_m(mask, op1, op2, vl); +vint8m1_t test_vmulh_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_vx_i8m1_m(vm, vs2, rs1, vl); } -vint8m2_t test_vmulh_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmulh_vv_i8m2_m(mask, op1, op2, vl); +vint8m2_t test_vmulh_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmulh_vv_i8m2_m(vm, vs2, vs1, vl); } -vint8m2_t test_vmulh_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_vx_i8m2_m(mask, op1, op2, vl); +vint8m2_t test_vmulh_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_vx_i8m2_m(vm, vs2, rs1, vl); } -vint8m4_t test_vmulh_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmulh_vv_i8m4_m(mask, op1, op2, vl); +vint8m4_t test_vmulh_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmulh_vv_i8m4_m(vm, vs2, vs1, vl); } -vint8m4_t test_vmulh_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_vx_i8m4_m(mask, op1, op2, vl); +vint8m4_t test_vmulh_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_vx_i8m4_m(vm, vs2, rs1, vl); } -vint8m8_t test_vmulh_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmulh_vv_i8m8_m(mask, op1, op2, vl); +vint8m8_t test_vmulh_vv_i8m8_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmulh_vv_i8m8_m(vm, vs2, vs1, vl); } -vint8m8_t test_vmulh_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_vx_i8m8_m(mask, op1, op2, vl); +vint8m8_t test_vmulh_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_vx_i8m8_m(vm, vs2, rs1, vl); } -vint16mf4_t test_vmulh_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmulh_vv_i16mf4_m(mask, op1, op2, vl); +vint16mf4_t test_vmulh_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmulh_vv_i16mf4_m(vm, vs2, vs1, vl); } -vint16mf4_t test_vmulh_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_vx_i16mf4_m(mask, op1, op2, vl); +vint16mf4_t test_vmulh_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_vx_i16mf4_m(vm, vs2, rs1, vl); } -vint16mf2_t test_vmulh_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmulh_vv_i16mf2_m(mask, op1, op2, vl); +vint16mf2_t test_vmulh_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmulh_vv_i16mf2_m(vm, vs2, vs1, vl); } -vint16mf2_t test_vmulh_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_vx_i16mf2_m(mask, op1, op2, vl); +vint16mf2_t test_vmulh_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_vx_i16mf2_m(vm, vs2, rs1, vl); } -vint16m1_t test_vmulh_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmulh_vv_i16m1_m(mask, op1, op2, vl); +vint16m1_t test_vmulh_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmulh_vv_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vmulh_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_vx_i16m1_m(mask, op1, op2, vl); +vint16m1_t test_vmulh_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_vx_i16m1_m(vm, vs2, rs1, vl); } -vint16m2_t test_vmulh_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmulh_vv_i16m2_m(mask, op1, op2, vl); +vint16m2_t test_vmulh_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmulh_vv_i16m2_m(vm, vs2, vs1, vl); } -vint16m2_t test_vmulh_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_vx_i16m2_m(mask, op1, op2, vl); +vint16m2_t test_vmulh_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_vx_i16m2_m(vm, vs2, rs1, vl); } -vint16m4_t test_vmulh_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmulh_vv_i16m4_m(mask, op1, op2, vl); +vint16m4_t test_vmulh_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmulh_vv_i16m4_m(vm, vs2, vs1, vl); } -vint16m4_t test_vmulh_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_vx_i16m4_m(mask, op1, op2, vl); +vint16m4_t test_vmulh_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_vx_i16m4_m(vm, vs2, rs1, vl); } -vint16m8_t test_vmulh_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmulh_vv_i16m8_m(mask, op1, op2, vl); +vint16m8_t test_vmulh_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmulh_vv_i16m8_m(vm, vs2, vs1, vl); } -vint16m8_t test_vmulh_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_vx_i16m8_m(mask, op1, op2, vl); +vint16m8_t test_vmulh_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_vx_i16m8_m(vm, vs2, rs1, vl); } -vint32mf2_t test_vmulh_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmulh_vv_i32mf2_m(mask, op1, op2, vl); +vint32mf2_t test_vmulh_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmulh_vv_i32mf2_m(vm, vs2, vs1, vl); } -vint32mf2_t test_vmulh_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_vx_i32mf2_m(mask, op1, op2, vl); +vint32mf2_t test_vmulh_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_vx_i32mf2_m(vm, vs2, rs1, vl); } -vint32m1_t test_vmulh_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmulh_vv_i32m1_m(mask, op1, op2, vl); +vint32m1_t test_vmulh_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmulh_vv_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vmulh_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_vx_i32m1_m(mask, op1, op2, vl); +vint32m1_t test_vmulh_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_vx_i32m1_m(vm, vs2, rs1, vl); } -vint32m2_t test_vmulh_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmulh_vv_i32m2_m(mask, op1, op2, vl); +vint32m2_t test_vmulh_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmulh_vv_i32m2_m(vm, vs2, vs1, vl); } -vint32m2_t test_vmulh_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_vx_i32m2_m(mask, op1, op2, vl); +vint32m2_t test_vmulh_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_vx_i32m2_m(vm, vs2, rs1, vl); } -vint32m4_t test_vmulh_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmulh_vv_i32m4_m(mask, op1, op2, vl); +vint32m4_t test_vmulh_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmulh_vv_i32m4_m(vm, vs2, vs1, vl); } -vint32m4_t test_vmulh_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_vx_i32m4_m(mask, op1, op2, vl); +vint32m4_t test_vmulh_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_vx_i32m4_m(vm, vs2, rs1, vl); } -vint32m8_t test_vmulh_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmulh_vv_i32m8_m(mask, op1, op2, vl); +vint32m8_t test_vmulh_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmulh_vv_i32m8_m(vm, vs2, vs1, vl); } -vint32m8_t test_vmulh_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_vx_i32m8_m(mask, op1, op2, vl); +vint32m8_t test_vmulh_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_vx_i32m8_m(vm, vs2, rs1, vl); } -vint64m1_t test_vmulh_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmulh_vv_i64m1_m(mask, op1, op2, vl); +vint64m1_t test_vmulh_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmulh_vv_i64m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vmulh_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh_vx_i64m1_m(mask, op1, op2, vl); +vint64m1_t test_vmulh_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh_vx_i64m1_m(vm, vs2, rs1, vl); } -vint64m2_t test_vmulh_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmulh_vv_i64m2_m(mask, op1, op2, vl); +vint64m2_t test_vmulh_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmulh_vv_i64m2_m(vm, vs2, vs1, vl); } -vint64m2_t test_vmulh_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh_vx_i64m2_m(mask, op1, op2, vl); +vint64m2_t test_vmulh_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh_vx_i64m2_m(vm, vs2, rs1, vl); } -vint64m4_t test_vmulh_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmulh_vv_i64m4_m(mask, op1, op2, vl); +vint64m4_t test_vmulh_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmulh_vv_i64m4_m(vm, vs2, vs1, vl); } -vint64m4_t test_vmulh_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh_vx_i64m4_m(mask, op1, op2, vl); +vint64m4_t test_vmulh_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh_vx_i64m4_m(vm, vs2, rs1, vl); } -vint64m8_t test_vmulh_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmulh_vv_i64m8_m(mask, op1, op2, vl); +vint64m8_t test_vmulh_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmulh_vv_i64m8_m(vm, vs2, vs1, vl); } -vint64m8_t test_vmulh_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh_vx_i64m8_m(mask, op1, op2, vl); +vint64m8_t test_vmulh_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh_vx_i64m8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmulh\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-api-tests/vmulhsu.c b/auto-generated/gnu-api-tests/vmulhsu.c index e8dc6c5a2..8272d236e 100644 --- a/auto-generated/gnu-api-tests/vmulhsu.c +++ b/auto-generated/gnu-api-tests/vmulhsu.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vmulhsu_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i8mf8(op1, op2, vl); +vint8mf8_t test_vmulhsu_vv_i8mf8(vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i8mf8(vs2, vs1, vl); } -vint8mf8_t test_vmulhsu_vx_i8mf8(vint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i8mf8(op1, op2, vl); +vint8mf8_t test_vmulhsu_vx_i8mf8(vint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i8mf8(vs2, rs1, vl); } -vint8mf4_t test_vmulhsu_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i8mf4(op1, op2, vl); +vint8mf4_t test_vmulhsu_vv_i8mf4(vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i8mf4(vs2, vs1, vl); } -vint8mf4_t test_vmulhsu_vx_i8mf4(vint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i8mf4(op1, op2, vl); +vint8mf4_t test_vmulhsu_vx_i8mf4(vint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i8mf4(vs2, rs1, vl); } -vint8mf2_t test_vmulhsu_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i8mf2(op1, op2, vl); +vint8mf2_t test_vmulhsu_vv_i8mf2(vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i8mf2(vs2, vs1, vl); } -vint8mf2_t test_vmulhsu_vx_i8mf2(vint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i8mf2(op1, op2, vl); +vint8mf2_t test_vmulhsu_vx_i8mf2(vint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i8mf2(vs2, rs1, vl); } -vint8m1_t test_vmulhsu_vv_i8m1(vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i8m1(op1, op2, vl); +vint8m1_t test_vmulhsu_vv_i8m1(vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i8m1(vs2, vs1, vl); } -vint8m1_t test_vmulhsu_vx_i8m1(vint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i8m1(op1, op2, vl); +vint8m1_t test_vmulhsu_vx_i8m1(vint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i8m1(vs2, rs1, vl); } -vint8m2_t test_vmulhsu_vv_i8m2(vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i8m2(op1, op2, vl); +vint8m2_t test_vmulhsu_vv_i8m2(vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i8m2(vs2, vs1, vl); } -vint8m2_t test_vmulhsu_vx_i8m2(vint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i8m2(op1, op2, vl); +vint8m2_t test_vmulhsu_vx_i8m2(vint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i8m2(vs2, rs1, vl); } -vint8m4_t test_vmulhsu_vv_i8m4(vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i8m4(op1, op2, vl); +vint8m4_t test_vmulhsu_vv_i8m4(vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i8m4(vs2, vs1, vl); } -vint8m4_t test_vmulhsu_vx_i8m4(vint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i8m4(op1, op2, vl); +vint8m4_t test_vmulhsu_vx_i8m4(vint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i8m4(vs2, rs1, vl); } -vint8m8_t test_vmulhsu_vv_i8m8(vint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i8m8(op1, op2, vl); +vint8m8_t test_vmulhsu_vv_i8m8(vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i8m8(vs2, vs1, vl); } -vint8m8_t test_vmulhsu_vx_i8m8(vint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i8m8(op1, op2, vl); +vint8m8_t test_vmulhsu_vx_i8m8(vint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i8m8(vs2, rs1, vl); } -vint16mf4_t test_vmulhsu_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i16mf4(op1, op2, vl); +vint16mf4_t test_vmulhsu_vv_i16mf4(vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i16mf4(vs2, vs1, vl); } -vint16mf4_t test_vmulhsu_vx_i16mf4(vint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i16mf4(op1, op2, vl); +vint16mf4_t test_vmulhsu_vx_i16mf4(vint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i16mf4(vs2, rs1, vl); } -vint16mf2_t test_vmulhsu_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i16mf2(op1, op2, vl); +vint16mf2_t test_vmulhsu_vv_i16mf2(vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i16mf2(vs2, vs1, vl); } -vint16mf2_t test_vmulhsu_vx_i16mf2(vint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i16mf2(op1, op2, vl); +vint16mf2_t test_vmulhsu_vx_i16mf2(vint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i16mf2(vs2, rs1, vl); } -vint16m1_t test_vmulhsu_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i16m1(op1, op2, vl); +vint16m1_t test_vmulhsu_vv_i16m1(vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i16m1(vs2, vs1, vl); } -vint16m1_t test_vmulhsu_vx_i16m1(vint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i16m1(op1, op2, vl); +vint16m1_t test_vmulhsu_vx_i16m1(vint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i16m1(vs2, rs1, vl); } -vint16m2_t test_vmulhsu_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i16m2(op1, op2, vl); +vint16m2_t test_vmulhsu_vv_i16m2(vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i16m2(vs2, vs1, vl); } -vint16m2_t test_vmulhsu_vx_i16m2(vint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i16m2(op1, op2, vl); +vint16m2_t test_vmulhsu_vx_i16m2(vint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i16m2(vs2, rs1, vl); } -vint16m4_t test_vmulhsu_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i16m4(op1, op2, vl); +vint16m4_t test_vmulhsu_vv_i16m4(vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i16m4(vs2, vs1, vl); } -vint16m4_t test_vmulhsu_vx_i16m4(vint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i16m4(op1, op2, vl); +vint16m4_t test_vmulhsu_vx_i16m4(vint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i16m4(vs2, rs1, vl); } -vint16m8_t test_vmulhsu_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i16m8(op1, op2, vl); +vint16m8_t test_vmulhsu_vv_i16m8(vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i16m8(vs2, vs1, vl); } -vint16m8_t test_vmulhsu_vx_i16m8(vint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i16m8(op1, op2, vl); +vint16m8_t test_vmulhsu_vx_i16m8(vint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i16m8(vs2, rs1, vl); } -vint32mf2_t test_vmulhsu_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i32mf2(op1, op2, vl); +vint32mf2_t test_vmulhsu_vv_i32mf2(vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i32mf2(vs2, vs1, vl); } -vint32mf2_t test_vmulhsu_vx_i32mf2(vint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i32mf2(op1, op2, vl); +vint32mf2_t test_vmulhsu_vx_i32mf2(vint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i32mf2(vs2, rs1, vl); } -vint32m1_t test_vmulhsu_vv_i32m1(vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i32m1(op1, op2, vl); +vint32m1_t test_vmulhsu_vv_i32m1(vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i32m1(vs2, vs1, vl); } -vint32m1_t test_vmulhsu_vx_i32m1(vint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i32m1(op1, op2, vl); +vint32m1_t test_vmulhsu_vx_i32m1(vint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i32m1(vs2, rs1, vl); } -vint32m2_t test_vmulhsu_vv_i32m2(vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i32m2(op1, op2, vl); +vint32m2_t test_vmulhsu_vv_i32m2(vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i32m2(vs2, vs1, vl); } -vint32m2_t test_vmulhsu_vx_i32m2(vint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i32m2(op1, op2, vl); +vint32m2_t test_vmulhsu_vx_i32m2(vint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i32m2(vs2, rs1, vl); } -vint32m4_t test_vmulhsu_vv_i32m4(vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i32m4(op1, op2, vl); +vint32m4_t test_vmulhsu_vv_i32m4(vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i32m4(vs2, vs1, vl); } -vint32m4_t test_vmulhsu_vx_i32m4(vint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i32m4(op1, op2, vl); +vint32m4_t test_vmulhsu_vx_i32m4(vint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i32m4(vs2, rs1, vl); } -vint32m8_t test_vmulhsu_vv_i32m8(vint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i32m8(op1, op2, vl); +vint32m8_t test_vmulhsu_vv_i32m8(vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i32m8(vs2, vs1, vl); } -vint32m8_t test_vmulhsu_vx_i32m8(vint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i32m8(op1, op2, vl); +vint32m8_t test_vmulhsu_vx_i32m8(vint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i32m8(vs2, rs1, vl); } -vint64m1_t test_vmulhsu_vv_i64m1(vint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i64m1(op1, op2, vl); +vint64m1_t test_vmulhsu_vv_i64m1(vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i64m1(vs2, vs1, vl); } -vint64m1_t test_vmulhsu_vx_i64m1(vint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i64m1(op1, op2, vl); +vint64m1_t test_vmulhsu_vx_i64m1(vint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i64m1(vs2, rs1, vl); } -vint64m2_t test_vmulhsu_vv_i64m2(vint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i64m2(op1, op2, vl); +vint64m2_t test_vmulhsu_vv_i64m2(vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i64m2(vs2, vs1, vl); } -vint64m2_t test_vmulhsu_vx_i64m2(vint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i64m2(op1, op2, vl); +vint64m2_t test_vmulhsu_vx_i64m2(vint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i64m2(vs2, rs1, vl); } -vint64m4_t test_vmulhsu_vv_i64m4(vint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i64m4(op1, op2, vl); +vint64m4_t test_vmulhsu_vv_i64m4(vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i64m4(vs2, vs1, vl); } -vint64m4_t test_vmulhsu_vx_i64m4(vint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i64m4(op1, op2, vl); +vint64m4_t test_vmulhsu_vx_i64m4(vint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i64m4(vs2, rs1, vl); } -vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i64m8(op1, op2, vl); +vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i64m8(vs2, vs1, vl); } -vint64m8_t test_vmulhsu_vx_i64m8(vint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i64m8(op1, op2, vl); +vint64m8_t test_vmulhsu_vx_i64m8(vint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i64m8(vs2, rs1, vl); } -vint8mf8_t test_vmulhsu_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i8mf8_m(mask, op1, op2, vl); +vint8mf8_t test_vmulhsu_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i8mf8_m(vm, vs2, vs1, vl); } -vint8mf8_t test_vmulhsu_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i8mf8_m(mask, op1, op2, vl); +vint8mf8_t test_vmulhsu_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i8mf8_m(vm, vs2, rs1, vl); } -vint8mf4_t test_vmulhsu_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i8mf4_m(mask, op1, op2, vl); +vint8mf4_t test_vmulhsu_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i8mf4_m(vm, vs2, vs1, vl); } -vint8mf4_t test_vmulhsu_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i8mf4_m(mask, op1, op2, vl); +vint8mf4_t test_vmulhsu_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i8mf4_m(vm, vs2, rs1, vl); } -vint8mf2_t test_vmulhsu_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i8mf2_m(mask, op1, op2, vl); +vint8mf2_t test_vmulhsu_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i8mf2_m(vm, vs2, vs1, vl); } -vint8mf2_t test_vmulhsu_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i8mf2_m(mask, op1, op2, vl); +vint8mf2_t test_vmulhsu_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i8mf2_m(vm, vs2, rs1, vl); } -vint8m1_t test_vmulhsu_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i8m1_m(mask, op1, op2, vl); +vint8m1_t test_vmulhsu_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vmulhsu_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i8m1_m(mask, op1, op2, vl); +vint8m1_t test_vmulhsu_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i8m1_m(vm, vs2, rs1, vl); } -vint8m2_t test_vmulhsu_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i8m2_m(mask, op1, op2, vl); +vint8m2_t test_vmulhsu_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i8m2_m(vm, vs2, vs1, vl); } -vint8m2_t test_vmulhsu_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i8m2_m(mask, op1, op2, vl); +vint8m2_t test_vmulhsu_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i8m2_m(vm, vs2, rs1, vl); } -vint8m4_t test_vmulhsu_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i8m4_m(mask, op1, op2, vl); +vint8m4_t test_vmulhsu_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i8m4_m(vm, vs2, vs1, vl); } -vint8m4_t test_vmulhsu_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i8m4_m(mask, op1, op2, vl); +vint8m4_t test_vmulhsu_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i8m4_m(vm, vs2, rs1, vl); } -vint8m8_t test_vmulhsu_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i8m8_m(mask, op1, op2, vl); +vint8m8_t test_vmulhsu_vv_i8m8_m(vbool1_t vm, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i8m8_m(vm, vs2, vs1, vl); } -vint8m8_t test_vmulhsu_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i8m8_m(mask, op1, op2, vl); +vint8m8_t test_vmulhsu_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i8m8_m(vm, vs2, rs1, vl); } -vint16mf4_t test_vmulhsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i16mf4_m(mask, op1, op2, vl); +vint16mf4_t test_vmulhsu_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i16mf4_m(vm, vs2, vs1, vl); } -vint16mf4_t test_vmulhsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i16mf4_m(mask, op1, op2, vl); +vint16mf4_t test_vmulhsu_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i16mf4_m(vm, vs2, rs1, vl); } -vint16mf2_t test_vmulhsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i16mf2_m(mask, op1, op2, vl); +vint16mf2_t test_vmulhsu_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i16mf2_m(vm, vs2, vs1, vl); } -vint16mf2_t test_vmulhsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i16mf2_m(mask, op1, op2, vl); +vint16mf2_t test_vmulhsu_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i16mf2_m(vm, vs2, rs1, vl); } -vint16m1_t test_vmulhsu_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i16m1_m(mask, op1, op2, vl); +vint16m1_t test_vmulhsu_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vmulhsu_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i16m1_m(mask, op1, op2, vl); +vint16m1_t test_vmulhsu_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i16m1_m(vm, vs2, rs1, vl); } -vint16m2_t test_vmulhsu_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i16m2_m(mask, op1, op2, vl); +vint16m2_t test_vmulhsu_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i16m2_m(vm, vs2, vs1, vl); } -vint16m2_t test_vmulhsu_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i16m2_m(mask, op1, op2, vl); +vint16m2_t test_vmulhsu_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i16m2_m(vm, vs2, rs1, vl); } -vint16m4_t test_vmulhsu_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i16m4_m(mask, op1, op2, vl); +vint16m4_t test_vmulhsu_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i16m4_m(vm, vs2, vs1, vl); } -vint16m4_t test_vmulhsu_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i16m4_m(mask, op1, op2, vl); +vint16m4_t test_vmulhsu_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i16m4_m(vm, vs2, rs1, vl); } -vint16m8_t test_vmulhsu_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i16m8_m(mask, op1, op2, vl); +vint16m8_t test_vmulhsu_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i16m8_m(vm, vs2, vs1, vl); } -vint16m8_t test_vmulhsu_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i16m8_m(mask, op1, op2, vl); +vint16m8_t test_vmulhsu_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i16m8_m(vm, vs2, rs1, vl); } -vint32mf2_t test_vmulhsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i32mf2_m(mask, op1, op2, vl); +vint32mf2_t test_vmulhsu_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i32mf2_m(vm, vs2, vs1, vl); } -vint32mf2_t test_vmulhsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i32mf2_m(mask, op1, op2, vl); +vint32mf2_t test_vmulhsu_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i32mf2_m(vm, vs2, rs1, vl); } -vint32m1_t test_vmulhsu_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i32m1_m(mask, op1, op2, vl); +vint32m1_t test_vmulhsu_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vmulhsu_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i32m1_m(mask, op1, op2, vl); +vint32m1_t test_vmulhsu_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i32m1_m(vm, vs2, rs1, vl); } -vint32m2_t test_vmulhsu_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i32m2_m(mask, op1, op2, vl); +vint32m2_t test_vmulhsu_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i32m2_m(vm, vs2, vs1, vl); } -vint32m2_t test_vmulhsu_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i32m2_m(mask, op1, op2, vl); +vint32m2_t test_vmulhsu_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i32m2_m(vm, vs2, rs1, vl); } -vint32m4_t test_vmulhsu_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i32m4_m(mask, op1, op2, vl); +vint32m4_t test_vmulhsu_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i32m4_m(vm, vs2, vs1, vl); } -vint32m4_t test_vmulhsu_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i32m4_m(mask, op1, op2, vl); +vint32m4_t test_vmulhsu_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i32m4_m(vm, vs2, rs1, vl); } -vint32m8_t test_vmulhsu_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i32m8_m(mask, op1, op2, vl); +vint32m8_t test_vmulhsu_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i32m8_m(vm, vs2, vs1, vl); } -vint32m8_t test_vmulhsu_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i32m8_m(mask, op1, op2, vl); +vint32m8_t test_vmulhsu_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i32m8_m(vm, vs2, rs1, vl); } -vint64m1_t test_vmulhsu_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i64m1_m(mask, op1, op2, vl); +vint64m1_t test_vmulhsu_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i64m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vmulhsu_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i64m1_m(mask, op1, op2, vl); +vint64m1_t test_vmulhsu_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i64m1_m(vm, vs2, rs1, vl); } -vint64m2_t test_vmulhsu_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i64m2_m(mask, op1, op2, vl); +vint64m2_t test_vmulhsu_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i64m2_m(vm, vs2, vs1, vl); } -vint64m2_t test_vmulhsu_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i64m2_m(mask, op1, op2, vl); +vint64m2_t test_vmulhsu_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i64m2_m(vm, vs2, rs1, vl); } -vint64m4_t test_vmulhsu_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i64m4_m(mask, op1, op2, vl); +vint64m4_t test_vmulhsu_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i64m4_m(vm, vs2, vs1, vl); } -vint64m4_t test_vmulhsu_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i64m4_m(mask, op1, op2, vl); +vint64m4_t test_vmulhsu_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i64m4_m(vm, vs2, rs1, vl); } -vint64m8_t test_vmulhsu_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i64m8_m(mask, op1, op2, vl); +vint64m8_t test_vmulhsu_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i64m8_m(vm, vs2, vs1, vl); } -vint64m8_t test_vmulhsu_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i64m8_m(mask, op1, op2, vl); +vint64m8_t test_vmulhsu_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i64m8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmulhsu\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-api-tests/vmulhu.c b/auto-generated/gnu-api-tests/vmulhu.c index e483ee5b1..6d530c884 100644 --- a/auto-generated/gnu-api-tests/vmulhu.c +++ b/auto-generated/gnu-api-tests/vmulhu.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vmulhu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmulhu_vv_u8mf8(op1, op2, vl); +vuint8mf8_t test_vmulhu_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u8mf8(vs2, vs1, vl); } -vuint8mf8_t test_vmulhu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_vx_u8mf8(op1, op2, vl); +vuint8mf8_t test_vmulhu_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u8mf8(vs2, rs1, vl); } -vuint8mf4_t test_vmulhu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmulhu_vv_u8mf4(op1, op2, vl); +vuint8mf4_t test_vmulhu_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u8mf4(vs2, vs1, vl); } -vuint8mf4_t test_vmulhu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_vx_u8mf4(op1, op2, vl); +vuint8mf4_t test_vmulhu_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u8mf4(vs2, rs1, vl); } -vuint8mf2_t test_vmulhu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmulhu_vv_u8mf2(op1, op2, vl); +vuint8mf2_t test_vmulhu_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u8mf2(vs2, vs1, vl); } -vuint8mf2_t test_vmulhu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_vx_u8mf2(op1, op2, vl); +vuint8mf2_t test_vmulhu_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u8mf2(vs2, rs1, vl); } -vuint8m1_t test_vmulhu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmulhu_vv_u8m1(op1, op2, vl); +vuint8m1_t test_vmulhu_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vmulhu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_vx_u8m1(op1, op2, vl); +vuint8m1_t test_vmulhu_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u8m1(vs2, rs1, vl); } -vuint8m2_t test_vmulhu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmulhu_vv_u8m2(op1, op2, vl); +vuint8m2_t test_vmulhu_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u8m2(vs2, vs1, vl); } -vuint8m2_t test_vmulhu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_vx_u8m2(op1, op2, vl); +vuint8m2_t test_vmulhu_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u8m2(vs2, rs1, vl); } -vuint8m4_t test_vmulhu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmulhu_vv_u8m4(op1, op2, vl); +vuint8m4_t test_vmulhu_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u8m4(vs2, vs1, vl); } -vuint8m4_t test_vmulhu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_vx_u8m4(op1, op2, vl); +vuint8m4_t test_vmulhu_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u8m4(vs2, rs1, vl); } -vuint8m8_t test_vmulhu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmulhu_vv_u8m8(op1, op2, vl); +vuint8m8_t test_vmulhu_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u8m8(vs2, vs1, vl); } -vuint8m8_t test_vmulhu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_vx_u8m8(op1, op2, vl); +vuint8m8_t test_vmulhu_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u8m8(vs2, rs1, vl); } -vuint16mf4_t test_vmulhu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmulhu_vv_u16mf4(op1, op2, vl); +vuint16mf4_t test_vmulhu_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u16mf4(vs2, vs1, vl); } -vuint16mf4_t test_vmulhu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_vx_u16mf4(op1, op2, vl); +vuint16mf4_t test_vmulhu_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u16mf4(vs2, rs1, vl); } -vuint16mf2_t test_vmulhu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmulhu_vv_u16mf2(op1, op2, vl); +vuint16mf2_t test_vmulhu_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u16mf2(vs2, vs1, vl); } -vuint16mf2_t test_vmulhu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_vx_u16mf2(op1, op2, vl); +vuint16mf2_t test_vmulhu_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u16mf2(vs2, rs1, vl); } -vuint16m1_t test_vmulhu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmulhu_vv_u16m1(op1, op2, vl); +vuint16m1_t test_vmulhu_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vmulhu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_vx_u16m1(op1, op2, vl); +vuint16m1_t test_vmulhu_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u16m1(vs2, rs1, vl); } -vuint16m2_t test_vmulhu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmulhu_vv_u16m2(op1, op2, vl); +vuint16m2_t test_vmulhu_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u16m2(vs2, vs1, vl); } -vuint16m2_t test_vmulhu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_vx_u16m2(op1, op2, vl); +vuint16m2_t test_vmulhu_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u16m2(vs2, rs1, vl); } -vuint16m4_t test_vmulhu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmulhu_vv_u16m4(op1, op2, vl); +vuint16m4_t test_vmulhu_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u16m4(vs2, vs1, vl); } -vuint16m4_t test_vmulhu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_vx_u16m4(op1, op2, vl); +vuint16m4_t test_vmulhu_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u16m4(vs2, rs1, vl); } -vuint16m8_t test_vmulhu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmulhu_vv_u16m8(op1, op2, vl); +vuint16m8_t test_vmulhu_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u16m8(vs2, vs1, vl); } -vuint16m8_t test_vmulhu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_vx_u16m8(op1, op2, vl); +vuint16m8_t test_vmulhu_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u16m8(vs2, rs1, vl); } -vuint32mf2_t test_vmulhu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmulhu_vv_u32mf2(op1, op2, vl); +vuint32mf2_t test_vmulhu_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u32mf2(vs2, vs1, vl); } -vuint32mf2_t test_vmulhu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_vx_u32mf2(op1, op2, vl); +vuint32mf2_t test_vmulhu_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u32mf2(vs2, rs1, vl); } -vuint32m1_t test_vmulhu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmulhu_vv_u32m1(op1, op2, vl); +vuint32m1_t test_vmulhu_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vmulhu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_vx_u32m1(op1, op2, vl); +vuint32m1_t test_vmulhu_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u32m1(vs2, rs1, vl); } -vuint32m2_t test_vmulhu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmulhu_vv_u32m2(op1, op2, vl); +vuint32m2_t test_vmulhu_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u32m2(vs2, vs1, vl); } -vuint32m2_t test_vmulhu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_vx_u32m2(op1, op2, vl); +vuint32m2_t test_vmulhu_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u32m2(vs2, rs1, vl); } -vuint32m4_t test_vmulhu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmulhu_vv_u32m4(op1, op2, vl); +vuint32m4_t test_vmulhu_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u32m4(vs2, vs1, vl); } -vuint32m4_t test_vmulhu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_vx_u32m4(op1, op2, vl); +vuint32m4_t test_vmulhu_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u32m4(vs2, rs1, vl); } -vuint32m8_t test_vmulhu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmulhu_vv_u32m8(op1, op2, vl); +vuint32m8_t test_vmulhu_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u32m8(vs2, vs1, vl); } -vuint32m8_t test_vmulhu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_vx_u32m8(op1, op2, vl); +vuint32m8_t test_vmulhu_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u32m8(vs2, rs1, vl); } -vuint64m1_t test_vmulhu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmulhu_vv_u64m1(op1, op2, vl); +vuint64m1_t test_vmulhu_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u64m1(vs2, vs1, vl); } -vuint64m1_t test_vmulhu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu_vx_u64m1(op1, op2, vl); +vuint64m1_t test_vmulhu_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u64m1(vs2, rs1, vl); } -vuint64m2_t test_vmulhu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmulhu_vv_u64m2(op1, op2, vl); +vuint64m2_t test_vmulhu_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u64m2(vs2, vs1, vl); } -vuint64m2_t test_vmulhu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu_vx_u64m2(op1, op2, vl); +vuint64m2_t test_vmulhu_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u64m2(vs2, rs1, vl); } -vuint64m4_t test_vmulhu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmulhu_vv_u64m4(op1, op2, vl); +vuint64m4_t test_vmulhu_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u64m4(vs2, vs1, vl); } -vuint64m4_t test_vmulhu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu_vx_u64m4(op1, op2, vl); +vuint64m4_t test_vmulhu_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u64m4(vs2, rs1, vl); } -vuint64m8_t test_vmulhu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmulhu_vv_u64m8(op1, op2, vl); +vuint64m8_t test_vmulhu_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u64m8(vs2, vs1, vl); } -vuint64m8_t test_vmulhu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu_vx_u64m8(op1, op2, vl); +vuint64m8_t test_vmulhu_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u64m8(vs2, rs1, vl); } -vuint8mf8_t test_vmulhu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmulhu_vv_u8mf8_m(mask, op1, op2, vl); +vuint8mf8_t test_vmulhu_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u8mf8_m(vm, vs2, vs1, vl); } -vuint8mf8_t test_vmulhu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_vx_u8mf8_m(mask, op1, op2, vl); +vuint8mf8_t test_vmulhu_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u8mf8_m(vm, vs2, rs1, vl); } -vuint8mf4_t test_vmulhu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmulhu_vv_u8mf4_m(mask, op1, op2, vl); +vuint8mf4_t test_vmulhu_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u8mf4_m(vm, vs2, vs1, vl); } -vuint8mf4_t test_vmulhu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_vx_u8mf4_m(mask, op1, op2, vl); +vuint8mf4_t test_vmulhu_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u8mf4_m(vm, vs2, rs1, vl); } -vuint8mf2_t test_vmulhu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmulhu_vv_u8mf2_m(mask, op1, op2, vl); +vuint8mf2_t test_vmulhu_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u8mf2_m(vm, vs2, vs1, vl); } -vuint8mf2_t test_vmulhu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_vx_u8mf2_m(mask, op1, op2, vl); +vuint8mf2_t test_vmulhu_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u8mf2_m(vm, vs2, rs1, vl); } -vuint8m1_t test_vmulhu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmulhu_vv_u8m1_m(mask, op1, op2, vl); +vuint8m1_t test_vmulhu_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vmulhu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_vx_u8m1_m(mask, op1, op2, vl); +vuint8m1_t test_vmulhu_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u8m1_m(vm, vs2, rs1, vl); } -vuint8m2_t test_vmulhu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmulhu_vv_u8m2_m(mask, op1, op2, vl); +vuint8m2_t test_vmulhu_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u8m2_m(vm, vs2, vs1, vl); } -vuint8m2_t test_vmulhu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_vx_u8m2_m(mask, op1, op2, vl); +vuint8m2_t test_vmulhu_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u8m2_m(vm, vs2, rs1, vl); } -vuint8m4_t test_vmulhu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmulhu_vv_u8m4_m(mask, op1, op2, vl); +vuint8m4_t test_vmulhu_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u8m4_m(vm, vs2, vs1, vl); } -vuint8m4_t test_vmulhu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_vx_u8m4_m(mask, op1, op2, vl); +vuint8m4_t test_vmulhu_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u8m4_m(vm, vs2, rs1, vl); } -vuint8m8_t test_vmulhu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmulhu_vv_u8m8_m(mask, op1, op2, vl); +vuint8m8_t test_vmulhu_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u8m8_m(vm, vs2, vs1, vl); } -vuint8m8_t test_vmulhu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_vx_u8m8_m(mask, op1, op2, vl); +vuint8m8_t test_vmulhu_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u8m8_m(vm, vs2, rs1, vl); } -vuint16mf4_t test_vmulhu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmulhu_vv_u16mf4_m(mask, op1, op2, vl); +vuint16mf4_t test_vmulhu_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u16mf4_m(vm, vs2, vs1, vl); } -vuint16mf4_t test_vmulhu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_vx_u16mf4_m(mask, op1, op2, vl); +vuint16mf4_t test_vmulhu_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u16mf4_m(vm, vs2, rs1, vl); } -vuint16mf2_t test_vmulhu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmulhu_vv_u16mf2_m(mask, op1, op2, vl); +vuint16mf2_t test_vmulhu_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u16mf2_m(vm, vs2, vs1, vl); } -vuint16mf2_t test_vmulhu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_vx_u16mf2_m(mask, op1, op2, vl); +vuint16mf2_t test_vmulhu_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u16mf2_m(vm, vs2, rs1, vl); } -vuint16m1_t test_vmulhu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmulhu_vv_u16m1_m(mask, op1, op2, vl); +vuint16m1_t test_vmulhu_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vmulhu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_vx_u16m1_m(mask, op1, op2, vl); +vuint16m1_t test_vmulhu_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u16m1_m(vm, vs2, rs1, vl); } -vuint16m2_t test_vmulhu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmulhu_vv_u16m2_m(mask, op1, op2, vl); +vuint16m2_t test_vmulhu_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u16m2_m(vm, vs2, vs1, vl); } -vuint16m2_t test_vmulhu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_vx_u16m2_m(mask, op1, op2, vl); +vuint16m2_t test_vmulhu_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u16m2_m(vm, vs2, rs1, vl); } -vuint16m4_t test_vmulhu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmulhu_vv_u16m4_m(mask, op1, op2, vl); +vuint16m4_t test_vmulhu_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u16m4_m(vm, vs2, vs1, vl); } -vuint16m4_t test_vmulhu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_vx_u16m4_m(mask, op1, op2, vl); +vuint16m4_t test_vmulhu_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u16m4_m(vm, vs2, rs1, vl); } -vuint16m8_t test_vmulhu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmulhu_vv_u16m8_m(mask, op1, op2, vl); +vuint16m8_t test_vmulhu_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u16m8_m(vm, vs2, vs1, vl); } -vuint16m8_t test_vmulhu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_vx_u16m8_m(mask, op1, op2, vl); +vuint16m8_t test_vmulhu_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u16m8_m(vm, vs2, rs1, vl); } -vuint32mf2_t test_vmulhu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmulhu_vv_u32mf2_m(mask, op1, op2, vl); +vuint32mf2_t test_vmulhu_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u32mf2_m(vm, vs2, vs1, vl); } -vuint32mf2_t test_vmulhu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_vx_u32mf2_m(mask, op1, op2, vl); +vuint32mf2_t test_vmulhu_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u32mf2_m(vm, vs2, rs1, vl); } -vuint32m1_t test_vmulhu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmulhu_vv_u32m1_m(mask, op1, op2, vl); +vuint32m1_t test_vmulhu_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vmulhu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_vx_u32m1_m(mask, op1, op2, vl); +vuint32m1_t test_vmulhu_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u32m1_m(vm, vs2, rs1, vl); } -vuint32m2_t test_vmulhu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmulhu_vv_u32m2_m(mask, op1, op2, vl); +vuint32m2_t test_vmulhu_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u32m2_m(vm, vs2, vs1, vl); } -vuint32m2_t test_vmulhu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_vx_u32m2_m(mask, op1, op2, vl); +vuint32m2_t test_vmulhu_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u32m2_m(vm, vs2, rs1, vl); } -vuint32m4_t test_vmulhu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmulhu_vv_u32m4_m(mask, op1, op2, vl); +vuint32m4_t test_vmulhu_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u32m4_m(vm, vs2, vs1, vl); } -vuint32m4_t test_vmulhu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_vx_u32m4_m(mask, op1, op2, vl); +vuint32m4_t test_vmulhu_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u32m4_m(vm, vs2, rs1, vl); } -vuint32m8_t test_vmulhu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmulhu_vv_u32m8_m(mask, op1, op2, vl); +vuint32m8_t test_vmulhu_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u32m8_m(vm, vs2, vs1, vl); } -vuint32m8_t test_vmulhu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_vx_u32m8_m(mask, op1, op2, vl); +vuint32m8_t test_vmulhu_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u32m8_m(vm, vs2, rs1, vl); } -vuint64m1_t test_vmulhu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmulhu_vv_u64m1_m(mask, op1, op2, vl); +vuint64m1_t test_vmulhu_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vmulhu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu_vx_u64m1_m(mask, op1, op2, vl); +vuint64m1_t test_vmulhu_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u64m1_m(vm, vs2, rs1, vl); } -vuint64m2_t test_vmulhu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmulhu_vv_u64m2_m(mask, op1, op2, vl); +vuint64m2_t test_vmulhu_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u64m2_m(vm, vs2, vs1, vl); } -vuint64m2_t test_vmulhu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu_vx_u64m2_m(mask, op1, op2, vl); +vuint64m2_t test_vmulhu_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u64m2_m(vm, vs2, rs1, vl); } -vuint64m4_t test_vmulhu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmulhu_vv_u64m4_m(mask, op1, op2, vl); +vuint64m4_t test_vmulhu_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u64m4_m(vm, vs2, vs1, vl); } -vuint64m4_t test_vmulhu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu_vx_u64m4_m(mask, op1, op2, vl); +vuint64m4_t test_vmulhu_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u64m4_m(vm, vs2, rs1, vl); } -vuint64m8_t test_vmulhu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmulhu_vv_u64m8_m(mask, op1, op2, vl); +vuint64m8_t test_vmulhu_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u64m8_m(vm, vs2, vs1, vl); } -vuint64m8_t test_vmulhu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu_vx_u64m8_m(mask, op1, op2, vl); +vuint64m8_t test_vmulhu_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u64m8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmulhu\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-api-tests/vmv.c b/auto-generated/gnu-api-tests/vmv.c index 288da82e3..4a4f509f2 100644 --- a/auto-generated/gnu-api-tests/vmv.c +++ b/auto-generated/gnu-api-tests/vmv.c @@ -6,768 +6,768 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vmv_v_v_i8mf8(vint8mf8_t src, size_t vl) { - return __riscv_vmv_v_v_i8mf8(src, vl); +vint8mf8_t test_vmv_v_v_i8mf8(vint8mf8_t vs1, size_t vl) { + return __riscv_vmv_v_v_i8mf8(vs1, vl); } -vint8mf8_t test_vmv_v_x_i8mf8(int8_t src, size_t vl) { - return __riscv_vmv_v_x_i8mf8(src, vl); +vint8mf8_t test_vmv_v_x_i8mf8(int8_t rs1, size_t vl) { + return __riscv_vmv_v_x_i8mf8(rs1, vl); } -vint8mf4_t test_vmv_v_v_i8mf4(vint8mf4_t src, size_t vl) { - return __riscv_vmv_v_v_i8mf4(src, vl); +vint8mf4_t test_vmv_v_v_i8mf4(vint8mf4_t vs1, size_t vl) { + return __riscv_vmv_v_v_i8mf4(vs1, vl); } -vint8mf4_t test_vmv_v_x_i8mf4(int8_t src, size_t vl) { - return __riscv_vmv_v_x_i8mf4(src, vl); +vint8mf4_t test_vmv_v_x_i8mf4(int8_t rs1, size_t vl) { + return __riscv_vmv_v_x_i8mf4(rs1, vl); } -vint8mf2_t test_vmv_v_v_i8mf2(vint8mf2_t src, size_t vl) { - return __riscv_vmv_v_v_i8mf2(src, vl); +vint8mf2_t test_vmv_v_v_i8mf2(vint8mf2_t vs1, size_t vl) { + return __riscv_vmv_v_v_i8mf2(vs1, vl); } -vint8mf2_t test_vmv_v_x_i8mf2(int8_t src, size_t vl) { - return __riscv_vmv_v_x_i8mf2(src, vl); +vint8mf2_t test_vmv_v_x_i8mf2(int8_t rs1, size_t vl) { + return __riscv_vmv_v_x_i8mf2(rs1, vl); } -vint8m1_t test_vmv_v_v_i8m1(vint8m1_t src, size_t vl) { - return __riscv_vmv_v_v_i8m1(src, vl); +vint8m1_t test_vmv_v_v_i8m1(vint8m1_t vs1, size_t vl) { + return __riscv_vmv_v_v_i8m1(vs1, vl); } -vint8m1_t test_vmv_v_x_i8m1(int8_t src, size_t vl) { - return __riscv_vmv_v_x_i8m1(src, vl); +vint8m1_t test_vmv_v_x_i8m1(int8_t rs1, size_t vl) { + return __riscv_vmv_v_x_i8m1(rs1, vl); } -vint8m2_t test_vmv_v_v_i8m2(vint8m2_t src, size_t vl) { - return __riscv_vmv_v_v_i8m2(src, vl); +vint8m2_t test_vmv_v_v_i8m2(vint8m2_t vs1, size_t vl) { + return __riscv_vmv_v_v_i8m2(vs1, vl); } -vint8m2_t test_vmv_v_x_i8m2(int8_t src, size_t vl) { - return __riscv_vmv_v_x_i8m2(src, vl); +vint8m2_t test_vmv_v_x_i8m2(int8_t rs1, size_t vl) { + return __riscv_vmv_v_x_i8m2(rs1, vl); } -vint8m4_t test_vmv_v_v_i8m4(vint8m4_t src, size_t vl) { - return __riscv_vmv_v_v_i8m4(src, vl); +vint8m4_t test_vmv_v_v_i8m4(vint8m4_t vs1, size_t vl) { + return __riscv_vmv_v_v_i8m4(vs1, vl); } -vint8m4_t test_vmv_v_x_i8m4(int8_t src, size_t vl) { - return __riscv_vmv_v_x_i8m4(src, vl); +vint8m4_t test_vmv_v_x_i8m4(int8_t rs1, size_t vl) { + return __riscv_vmv_v_x_i8m4(rs1, vl); } -vint8m8_t test_vmv_v_v_i8m8(vint8m8_t src, size_t vl) { - return __riscv_vmv_v_v_i8m8(src, vl); +vint8m8_t test_vmv_v_v_i8m8(vint8m8_t vs1, size_t vl) { + return __riscv_vmv_v_v_i8m8(vs1, vl); } -vint8m8_t test_vmv_v_x_i8m8(int8_t src, size_t vl) { - return __riscv_vmv_v_x_i8m8(src, vl); +vint8m8_t test_vmv_v_x_i8m8(int8_t rs1, size_t vl) { + return __riscv_vmv_v_x_i8m8(rs1, vl); } -vint16mf4_t test_vmv_v_v_i16mf4(vint16mf4_t src, size_t vl) { - return __riscv_vmv_v_v_i16mf4(src, vl); +vint16mf4_t test_vmv_v_v_i16mf4(vint16mf4_t vs1, size_t vl) { + return __riscv_vmv_v_v_i16mf4(vs1, vl); } -vint16mf4_t test_vmv_v_x_i16mf4(int16_t src, size_t vl) { - return __riscv_vmv_v_x_i16mf4(src, vl); +vint16mf4_t test_vmv_v_x_i16mf4(int16_t rs1, size_t vl) { + return __riscv_vmv_v_x_i16mf4(rs1, vl); } -vint16mf2_t test_vmv_v_v_i16mf2(vint16mf2_t src, size_t vl) { - return __riscv_vmv_v_v_i16mf2(src, vl); +vint16mf2_t test_vmv_v_v_i16mf2(vint16mf2_t vs1, size_t vl) { + return __riscv_vmv_v_v_i16mf2(vs1, vl); } -vint16mf2_t test_vmv_v_x_i16mf2(int16_t src, size_t vl) { - return __riscv_vmv_v_x_i16mf2(src, vl); +vint16mf2_t test_vmv_v_x_i16mf2(int16_t rs1, size_t vl) { + return __riscv_vmv_v_x_i16mf2(rs1, vl); } -vint16m1_t test_vmv_v_v_i16m1(vint16m1_t src, size_t vl) { - return __riscv_vmv_v_v_i16m1(src, vl); +vint16m1_t test_vmv_v_v_i16m1(vint16m1_t vs1, size_t vl) { + return __riscv_vmv_v_v_i16m1(vs1, vl); } -vint16m1_t test_vmv_v_x_i16m1(int16_t src, size_t vl) { - return __riscv_vmv_v_x_i16m1(src, vl); +vint16m1_t test_vmv_v_x_i16m1(int16_t rs1, size_t vl) { + return __riscv_vmv_v_x_i16m1(rs1, vl); } -vint16m2_t test_vmv_v_v_i16m2(vint16m2_t src, size_t vl) { - return __riscv_vmv_v_v_i16m2(src, vl); +vint16m2_t test_vmv_v_v_i16m2(vint16m2_t vs1, size_t vl) { + return __riscv_vmv_v_v_i16m2(vs1, vl); } -vint16m2_t test_vmv_v_x_i16m2(int16_t src, size_t vl) { - return __riscv_vmv_v_x_i16m2(src, vl); +vint16m2_t test_vmv_v_x_i16m2(int16_t rs1, size_t vl) { + return __riscv_vmv_v_x_i16m2(rs1, vl); } -vint16m4_t test_vmv_v_v_i16m4(vint16m4_t src, size_t vl) { - return __riscv_vmv_v_v_i16m4(src, vl); +vint16m4_t test_vmv_v_v_i16m4(vint16m4_t vs1, size_t vl) { + return __riscv_vmv_v_v_i16m4(vs1, vl); } -vint16m4_t test_vmv_v_x_i16m4(int16_t src, size_t vl) { - return __riscv_vmv_v_x_i16m4(src, vl); +vint16m4_t test_vmv_v_x_i16m4(int16_t rs1, size_t vl) { + return __riscv_vmv_v_x_i16m4(rs1, vl); } -vint16m8_t test_vmv_v_v_i16m8(vint16m8_t src, size_t vl) { - return __riscv_vmv_v_v_i16m8(src, vl); +vint16m8_t test_vmv_v_v_i16m8(vint16m8_t vs1, size_t vl) { + return __riscv_vmv_v_v_i16m8(vs1, vl); } -vint16m8_t test_vmv_v_x_i16m8(int16_t src, size_t vl) { - return __riscv_vmv_v_x_i16m8(src, vl); +vint16m8_t test_vmv_v_x_i16m8(int16_t rs1, size_t vl) { + return __riscv_vmv_v_x_i16m8(rs1, vl); } -vint32mf2_t test_vmv_v_v_i32mf2(vint32mf2_t src, size_t vl) { - return __riscv_vmv_v_v_i32mf2(src, vl); +vint32mf2_t test_vmv_v_v_i32mf2(vint32mf2_t vs1, size_t vl) { + return __riscv_vmv_v_v_i32mf2(vs1, vl); } -vint32mf2_t test_vmv_v_x_i32mf2(int32_t src, size_t vl) { - return __riscv_vmv_v_x_i32mf2(src, vl); +vint32mf2_t test_vmv_v_x_i32mf2(int32_t rs1, size_t vl) { + return __riscv_vmv_v_x_i32mf2(rs1, vl); } -vint32m1_t test_vmv_v_v_i32m1(vint32m1_t src, size_t vl) { - return __riscv_vmv_v_v_i32m1(src, vl); +vint32m1_t test_vmv_v_v_i32m1(vint32m1_t vs1, size_t vl) { + return __riscv_vmv_v_v_i32m1(vs1, vl); } -vint32m1_t test_vmv_v_x_i32m1(int32_t src, size_t vl) { - return __riscv_vmv_v_x_i32m1(src, vl); +vint32m1_t test_vmv_v_x_i32m1(int32_t rs1, size_t vl) { + return __riscv_vmv_v_x_i32m1(rs1, vl); } -vint32m2_t test_vmv_v_v_i32m2(vint32m2_t src, size_t vl) { - return __riscv_vmv_v_v_i32m2(src, vl); +vint32m2_t test_vmv_v_v_i32m2(vint32m2_t vs1, size_t vl) { + return __riscv_vmv_v_v_i32m2(vs1, vl); } -vint32m2_t test_vmv_v_x_i32m2(int32_t src, size_t vl) { - return __riscv_vmv_v_x_i32m2(src, vl); +vint32m2_t test_vmv_v_x_i32m2(int32_t rs1, size_t vl) { + return __riscv_vmv_v_x_i32m2(rs1, vl); } -vint32m4_t test_vmv_v_v_i32m4(vint32m4_t src, size_t vl) { - return __riscv_vmv_v_v_i32m4(src, vl); +vint32m4_t test_vmv_v_v_i32m4(vint32m4_t vs1, size_t vl) { + return __riscv_vmv_v_v_i32m4(vs1, vl); } -vint32m4_t test_vmv_v_x_i32m4(int32_t src, size_t vl) { - return __riscv_vmv_v_x_i32m4(src, vl); +vint32m4_t test_vmv_v_x_i32m4(int32_t rs1, size_t vl) { + return __riscv_vmv_v_x_i32m4(rs1, vl); } -vint32m8_t test_vmv_v_v_i32m8(vint32m8_t src, size_t vl) { - return __riscv_vmv_v_v_i32m8(src, vl); +vint32m8_t test_vmv_v_v_i32m8(vint32m8_t vs1, size_t vl) { + return __riscv_vmv_v_v_i32m8(vs1, vl); } -vint32m8_t test_vmv_v_x_i32m8(int32_t src, size_t vl) { - return __riscv_vmv_v_x_i32m8(src, vl); +vint32m8_t test_vmv_v_x_i32m8(int32_t rs1, size_t vl) { + return __riscv_vmv_v_x_i32m8(rs1, vl); } -vint64m1_t test_vmv_v_v_i64m1(vint64m1_t src, size_t vl) { - return __riscv_vmv_v_v_i64m1(src, vl); +vint64m1_t test_vmv_v_v_i64m1(vint64m1_t vs1, size_t vl) { + return __riscv_vmv_v_v_i64m1(vs1, vl); } -vint64m1_t test_vmv_v_x_i64m1(int64_t src, size_t vl) { - return __riscv_vmv_v_x_i64m1(src, vl); +vint64m1_t test_vmv_v_x_i64m1(int64_t rs1, size_t vl) { + return __riscv_vmv_v_x_i64m1(rs1, vl); } -vint64m2_t test_vmv_v_v_i64m2(vint64m2_t src, size_t vl) { - return __riscv_vmv_v_v_i64m2(src, vl); +vint64m2_t test_vmv_v_v_i64m2(vint64m2_t vs1, size_t vl) { + return __riscv_vmv_v_v_i64m2(vs1, vl); } -vint64m2_t test_vmv_v_x_i64m2(int64_t src, size_t vl) { - return __riscv_vmv_v_x_i64m2(src, vl); +vint64m2_t test_vmv_v_x_i64m2(int64_t rs1, size_t vl) { + return __riscv_vmv_v_x_i64m2(rs1, vl); } -vint64m4_t test_vmv_v_v_i64m4(vint64m4_t src, size_t vl) { - return __riscv_vmv_v_v_i64m4(src, vl); +vint64m4_t test_vmv_v_v_i64m4(vint64m4_t vs1, size_t vl) { + return __riscv_vmv_v_v_i64m4(vs1, vl); } -vint64m4_t test_vmv_v_x_i64m4(int64_t src, size_t vl) { - return __riscv_vmv_v_x_i64m4(src, vl); +vint64m4_t test_vmv_v_x_i64m4(int64_t rs1, size_t vl) { + return __riscv_vmv_v_x_i64m4(rs1, vl); } -vint64m8_t test_vmv_v_v_i64m8(vint64m8_t src, size_t vl) { - return __riscv_vmv_v_v_i64m8(src, vl); +vint64m8_t test_vmv_v_v_i64m8(vint64m8_t vs1, size_t vl) { + return __riscv_vmv_v_v_i64m8(vs1, vl); } -vint64m8_t test_vmv_v_x_i64m8(int64_t src, size_t vl) { - return __riscv_vmv_v_x_i64m8(src, vl); +vint64m8_t test_vmv_v_x_i64m8(int64_t rs1, size_t vl) { + return __riscv_vmv_v_x_i64m8(rs1, vl); } -vuint8mf8_t test_vmv_v_v_u8mf8(vuint8mf8_t src, size_t vl) { - return __riscv_vmv_v_v_u8mf8(src, vl); +vuint8mf8_t test_vmv_v_v_u8mf8(vuint8mf8_t vs1, size_t vl) { + return __riscv_vmv_v_v_u8mf8(vs1, vl); } -vuint8mf8_t test_vmv_v_x_u8mf8(uint8_t src, size_t vl) { - return __riscv_vmv_v_x_u8mf8(src, vl); +vuint8mf8_t test_vmv_v_x_u8mf8(uint8_t rs1, size_t vl) { + return __riscv_vmv_v_x_u8mf8(rs1, vl); } -vuint8mf4_t test_vmv_v_v_u8mf4(vuint8mf4_t src, size_t vl) { - return __riscv_vmv_v_v_u8mf4(src, vl); +vuint8mf4_t test_vmv_v_v_u8mf4(vuint8mf4_t vs1, size_t vl) { + return __riscv_vmv_v_v_u8mf4(vs1, vl); } -vuint8mf4_t test_vmv_v_x_u8mf4(uint8_t src, size_t vl) { - return __riscv_vmv_v_x_u8mf4(src, vl); +vuint8mf4_t test_vmv_v_x_u8mf4(uint8_t rs1, size_t vl) { + return __riscv_vmv_v_x_u8mf4(rs1, vl); } -vuint8mf2_t test_vmv_v_v_u8mf2(vuint8mf2_t src, size_t vl) { - return __riscv_vmv_v_v_u8mf2(src, vl); +vuint8mf2_t test_vmv_v_v_u8mf2(vuint8mf2_t vs1, size_t vl) { + return __riscv_vmv_v_v_u8mf2(vs1, vl); } -vuint8mf2_t test_vmv_v_x_u8mf2(uint8_t src, size_t vl) { - return __riscv_vmv_v_x_u8mf2(src, vl); +vuint8mf2_t test_vmv_v_x_u8mf2(uint8_t rs1, size_t vl) { + return __riscv_vmv_v_x_u8mf2(rs1, vl); } -vuint8m1_t test_vmv_v_v_u8m1(vuint8m1_t src, size_t vl) { - return __riscv_vmv_v_v_u8m1(src, vl); +vuint8m1_t test_vmv_v_v_u8m1(vuint8m1_t vs1, size_t vl) { + return __riscv_vmv_v_v_u8m1(vs1, vl); } -vuint8m1_t test_vmv_v_x_u8m1(uint8_t src, size_t vl) { - return __riscv_vmv_v_x_u8m1(src, vl); +vuint8m1_t test_vmv_v_x_u8m1(uint8_t rs1, size_t vl) { + return __riscv_vmv_v_x_u8m1(rs1, vl); } -vuint8m2_t test_vmv_v_v_u8m2(vuint8m2_t src, size_t vl) { - return __riscv_vmv_v_v_u8m2(src, vl); +vuint8m2_t test_vmv_v_v_u8m2(vuint8m2_t vs1, size_t vl) { + return __riscv_vmv_v_v_u8m2(vs1, vl); } -vuint8m2_t test_vmv_v_x_u8m2(uint8_t src, size_t vl) { - return __riscv_vmv_v_x_u8m2(src, vl); +vuint8m2_t test_vmv_v_x_u8m2(uint8_t rs1, size_t vl) { + return __riscv_vmv_v_x_u8m2(rs1, vl); } -vuint8m4_t test_vmv_v_v_u8m4(vuint8m4_t src, size_t vl) { - return __riscv_vmv_v_v_u8m4(src, vl); +vuint8m4_t test_vmv_v_v_u8m4(vuint8m4_t vs1, size_t vl) { + return __riscv_vmv_v_v_u8m4(vs1, vl); } -vuint8m4_t test_vmv_v_x_u8m4(uint8_t src, size_t vl) { - return __riscv_vmv_v_x_u8m4(src, vl); +vuint8m4_t test_vmv_v_x_u8m4(uint8_t rs1, size_t vl) { + return __riscv_vmv_v_x_u8m4(rs1, vl); } -vuint8m8_t test_vmv_v_v_u8m8(vuint8m8_t src, size_t vl) { - return __riscv_vmv_v_v_u8m8(src, vl); +vuint8m8_t test_vmv_v_v_u8m8(vuint8m8_t vs1, size_t vl) { + return __riscv_vmv_v_v_u8m8(vs1, vl); } -vuint8m8_t test_vmv_v_x_u8m8(uint8_t src, size_t vl) { - return __riscv_vmv_v_x_u8m8(src, vl); +vuint8m8_t test_vmv_v_x_u8m8(uint8_t rs1, size_t vl) { + return __riscv_vmv_v_x_u8m8(rs1, vl); } -vuint16mf4_t test_vmv_v_v_u16mf4(vuint16mf4_t src, size_t vl) { - return __riscv_vmv_v_v_u16mf4(src, vl); +vuint16mf4_t test_vmv_v_v_u16mf4(vuint16mf4_t vs1, size_t vl) { + return __riscv_vmv_v_v_u16mf4(vs1, vl); } -vuint16mf4_t test_vmv_v_x_u16mf4(uint16_t src, size_t vl) { - return __riscv_vmv_v_x_u16mf4(src, vl); +vuint16mf4_t test_vmv_v_x_u16mf4(uint16_t rs1, size_t vl) { + return __riscv_vmv_v_x_u16mf4(rs1, vl); } -vuint16mf2_t test_vmv_v_v_u16mf2(vuint16mf2_t src, size_t vl) { - return __riscv_vmv_v_v_u16mf2(src, vl); +vuint16mf2_t test_vmv_v_v_u16mf2(vuint16mf2_t vs1, size_t vl) { + return __riscv_vmv_v_v_u16mf2(vs1, vl); } -vuint16mf2_t test_vmv_v_x_u16mf2(uint16_t src, size_t vl) { - return __riscv_vmv_v_x_u16mf2(src, vl); +vuint16mf2_t test_vmv_v_x_u16mf2(uint16_t rs1, size_t vl) { + return __riscv_vmv_v_x_u16mf2(rs1, vl); } -vuint16m1_t test_vmv_v_v_u16m1(vuint16m1_t src, size_t vl) { - return __riscv_vmv_v_v_u16m1(src, vl); +vuint16m1_t test_vmv_v_v_u16m1(vuint16m1_t vs1, size_t vl) { + return __riscv_vmv_v_v_u16m1(vs1, vl); } -vuint16m1_t test_vmv_v_x_u16m1(uint16_t src, size_t vl) { - return __riscv_vmv_v_x_u16m1(src, vl); +vuint16m1_t test_vmv_v_x_u16m1(uint16_t rs1, size_t vl) { + return __riscv_vmv_v_x_u16m1(rs1, vl); } -vuint16m2_t test_vmv_v_v_u16m2(vuint16m2_t src, size_t vl) { - return __riscv_vmv_v_v_u16m2(src, vl); +vuint16m2_t test_vmv_v_v_u16m2(vuint16m2_t vs1, size_t vl) { + return __riscv_vmv_v_v_u16m2(vs1, vl); } -vuint16m2_t test_vmv_v_x_u16m2(uint16_t src, size_t vl) { - return __riscv_vmv_v_x_u16m2(src, vl); +vuint16m2_t test_vmv_v_x_u16m2(uint16_t rs1, size_t vl) { + return __riscv_vmv_v_x_u16m2(rs1, vl); } -vuint16m4_t test_vmv_v_v_u16m4(vuint16m4_t src, size_t vl) { - return __riscv_vmv_v_v_u16m4(src, vl); +vuint16m4_t test_vmv_v_v_u16m4(vuint16m4_t vs1, size_t vl) { + return __riscv_vmv_v_v_u16m4(vs1, vl); } -vuint16m4_t test_vmv_v_x_u16m4(uint16_t src, size_t vl) { - return __riscv_vmv_v_x_u16m4(src, vl); +vuint16m4_t test_vmv_v_x_u16m4(uint16_t rs1, size_t vl) { + return __riscv_vmv_v_x_u16m4(rs1, vl); } -vuint16m8_t test_vmv_v_v_u16m8(vuint16m8_t src, size_t vl) { - return __riscv_vmv_v_v_u16m8(src, vl); +vuint16m8_t test_vmv_v_v_u16m8(vuint16m8_t vs1, size_t vl) { + return __riscv_vmv_v_v_u16m8(vs1, vl); } -vuint16m8_t test_vmv_v_x_u16m8(uint16_t src, size_t vl) { - return __riscv_vmv_v_x_u16m8(src, vl); +vuint16m8_t test_vmv_v_x_u16m8(uint16_t rs1, size_t vl) { + return __riscv_vmv_v_x_u16m8(rs1, vl); } -vuint32mf2_t test_vmv_v_v_u32mf2(vuint32mf2_t src, size_t vl) { - return __riscv_vmv_v_v_u32mf2(src, vl); +vuint32mf2_t test_vmv_v_v_u32mf2(vuint32mf2_t vs1, size_t vl) { + return __riscv_vmv_v_v_u32mf2(vs1, vl); } -vuint32mf2_t test_vmv_v_x_u32mf2(uint32_t src, size_t vl) { - return __riscv_vmv_v_x_u32mf2(src, vl); +vuint32mf2_t test_vmv_v_x_u32mf2(uint32_t rs1, size_t vl) { + return __riscv_vmv_v_x_u32mf2(rs1, vl); } -vuint32m1_t test_vmv_v_v_u32m1(vuint32m1_t src, size_t vl) { - return __riscv_vmv_v_v_u32m1(src, vl); +vuint32m1_t test_vmv_v_v_u32m1(vuint32m1_t vs1, size_t vl) { + return __riscv_vmv_v_v_u32m1(vs1, vl); } -vuint32m1_t test_vmv_v_x_u32m1(uint32_t src, size_t vl) { - return __riscv_vmv_v_x_u32m1(src, vl); +vuint32m1_t test_vmv_v_x_u32m1(uint32_t rs1, size_t vl) { + return __riscv_vmv_v_x_u32m1(rs1, vl); } -vuint32m2_t test_vmv_v_v_u32m2(vuint32m2_t src, size_t vl) { - return __riscv_vmv_v_v_u32m2(src, vl); +vuint32m2_t test_vmv_v_v_u32m2(vuint32m2_t vs1, size_t vl) { + return __riscv_vmv_v_v_u32m2(vs1, vl); } -vuint32m2_t test_vmv_v_x_u32m2(uint32_t src, size_t vl) { - return __riscv_vmv_v_x_u32m2(src, vl); +vuint32m2_t test_vmv_v_x_u32m2(uint32_t rs1, size_t vl) { + return __riscv_vmv_v_x_u32m2(rs1, vl); } -vuint32m4_t test_vmv_v_v_u32m4(vuint32m4_t src, size_t vl) { - return __riscv_vmv_v_v_u32m4(src, vl); +vuint32m4_t test_vmv_v_v_u32m4(vuint32m4_t vs1, size_t vl) { + return __riscv_vmv_v_v_u32m4(vs1, vl); } -vuint32m4_t test_vmv_v_x_u32m4(uint32_t src, size_t vl) { - return __riscv_vmv_v_x_u32m4(src, vl); +vuint32m4_t test_vmv_v_x_u32m4(uint32_t rs1, size_t vl) { + return __riscv_vmv_v_x_u32m4(rs1, vl); } -vuint32m8_t test_vmv_v_v_u32m8(vuint32m8_t src, size_t vl) { - return __riscv_vmv_v_v_u32m8(src, vl); +vuint32m8_t test_vmv_v_v_u32m8(vuint32m8_t vs1, size_t vl) { + return __riscv_vmv_v_v_u32m8(vs1, vl); } -vuint32m8_t test_vmv_v_x_u32m8(uint32_t src, size_t vl) { - return __riscv_vmv_v_x_u32m8(src, vl); +vuint32m8_t test_vmv_v_x_u32m8(uint32_t rs1, size_t vl) { + return __riscv_vmv_v_x_u32m8(rs1, vl); } -vuint64m1_t test_vmv_v_v_u64m1(vuint64m1_t src, size_t vl) { - return __riscv_vmv_v_v_u64m1(src, vl); +vuint64m1_t test_vmv_v_v_u64m1(vuint64m1_t vs1, size_t vl) { + return __riscv_vmv_v_v_u64m1(vs1, vl); } -vuint64m1_t test_vmv_v_x_u64m1(uint64_t src, size_t vl) { - return __riscv_vmv_v_x_u64m1(src, vl); +vuint64m1_t test_vmv_v_x_u64m1(uint64_t rs1, size_t vl) { + return __riscv_vmv_v_x_u64m1(rs1, vl); } -vuint64m2_t test_vmv_v_v_u64m2(vuint64m2_t src, size_t vl) { - return __riscv_vmv_v_v_u64m2(src, vl); +vuint64m2_t test_vmv_v_v_u64m2(vuint64m2_t vs1, size_t vl) { + return __riscv_vmv_v_v_u64m2(vs1, vl); } -vuint64m2_t test_vmv_v_x_u64m2(uint64_t src, size_t vl) { - return __riscv_vmv_v_x_u64m2(src, vl); +vuint64m2_t test_vmv_v_x_u64m2(uint64_t rs1, size_t vl) { + return __riscv_vmv_v_x_u64m2(rs1, vl); } -vuint64m4_t test_vmv_v_v_u64m4(vuint64m4_t src, size_t vl) { - return __riscv_vmv_v_v_u64m4(src, vl); +vuint64m4_t test_vmv_v_v_u64m4(vuint64m4_t vs1, size_t vl) { + return __riscv_vmv_v_v_u64m4(vs1, vl); } -vuint64m4_t test_vmv_v_x_u64m4(uint64_t src, size_t vl) { - return __riscv_vmv_v_x_u64m4(src, vl); +vuint64m4_t test_vmv_v_x_u64m4(uint64_t rs1, size_t vl) { + return __riscv_vmv_v_x_u64m4(rs1, vl); } -vuint64m8_t test_vmv_v_v_u64m8(vuint64m8_t src, size_t vl) { - return __riscv_vmv_v_v_u64m8(src, vl); +vuint64m8_t test_vmv_v_v_u64m8(vuint64m8_t vs1, size_t vl) { + return __riscv_vmv_v_v_u64m8(vs1, vl); } -vuint64m8_t test_vmv_v_x_u64m8(uint64_t src, size_t vl) { - return __riscv_vmv_v_x_u64m8(src, vl); +vuint64m8_t test_vmv_v_x_u64m8(uint64_t rs1, size_t vl) { + return __riscv_vmv_v_x_u64m8(rs1, vl); } -vfloat16mf4_t test_vmv_v_v_f16mf4(vfloat16mf4_t src, size_t vl) { - return __riscv_vmv_v_v_f16mf4(src, vl); +vfloat16mf4_t test_vmv_v_v_f16mf4(vfloat16mf4_t vs1, size_t vl) { + return __riscv_vmv_v_v_f16mf4(vs1, vl); } -vfloat16mf2_t test_vmv_v_v_f16mf2(vfloat16mf2_t src, size_t vl) { - return __riscv_vmv_v_v_f16mf2(src, vl); +vfloat16mf2_t test_vmv_v_v_f16mf2(vfloat16mf2_t vs1, size_t vl) { + return __riscv_vmv_v_v_f16mf2(vs1, vl); } -vfloat16m1_t test_vmv_v_v_f16m1(vfloat16m1_t src, size_t vl) { - return __riscv_vmv_v_v_f16m1(src, vl); +vfloat16m1_t test_vmv_v_v_f16m1(vfloat16m1_t vs1, size_t vl) { + return __riscv_vmv_v_v_f16m1(vs1, vl); } -vfloat16m2_t test_vmv_v_v_f16m2(vfloat16m2_t src, size_t vl) { - return __riscv_vmv_v_v_f16m2(src, vl); +vfloat16m2_t test_vmv_v_v_f16m2(vfloat16m2_t vs1, size_t vl) { + return __riscv_vmv_v_v_f16m2(vs1, vl); } -vfloat16m4_t test_vmv_v_v_f16m4(vfloat16m4_t src, size_t vl) { - return __riscv_vmv_v_v_f16m4(src, vl); +vfloat16m4_t test_vmv_v_v_f16m4(vfloat16m4_t vs1, size_t vl) { + return __riscv_vmv_v_v_f16m4(vs1, vl); } -vfloat16m8_t test_vmv_v_v_f16m8(vfloat16m8_t src, size_t vl) { - return __riscv_vmv_v_v_f16m8(src, vl); +vfloat16m8_t test_vmv_v_v_f16m8(vfloat16m8_t vs1, size_t vl) { + return __riscv_vmv_v_v_f16m8(vs1, vl); } -vfloat32mf2_t test_vmv_v_v_f32mf2(vfloat32mf2_t src, size_t vl) { - return __riscv_vmv_v_v_f32mf2(src, vl); +vfloat32mf2_t test_vmv_v_v_f32mf2(vfloat32mf2_t vs1, size_t vl) { + return __riscv_vmv_v_v_f32mf2(vs1, vl); } -vfloat32m1_t test_vmv_v_v_f32m1(vfloat32m1_t src, size_t vl) { - return __riscv_vmv_v_v_f32m1(src, vl); +vfloat32m1_t test_vmv_v_v_f32m1(vfloat32m1_t vs1, size_t vl) { + return __riscv_vmv_v_v_f32m1(vs1, vl); } -vfloat32m2_t test_vmv_v_v_f32m2(vfloat32m2_t src, size_t vl) { - return __riscv_vmv_v_v_f32m2(src, vl); +vfloat32m2_t test_vmv_v_v_f32m2(vfloat32m2_t vs1, size_t vl) { + return __riscv_vmv_v_v_f32m2(vs1, vl); } -vfloat32m4_t test_vmv_v_v_f32m4(vfloat32m4_t src, size_t vl) { - return __riscv_vmv_v_v_f32m4(src, vl); +vfloat32m4_t test_vmv_v_v_f32m4(vfloat32m4_t vs1, size_t vl) { + return __riscv_vmv_v_v_f32m4(vs1, vl); } -vfloat32m8_t test_vmv_v_v_f32m8(vfloat32m8_t src, size_t vl) { - return __riscv_vmv_v_v_f32m8(src, vl); +vfloat32m8_t test_vmv_v_v_f32m8(vfloat32m8_t vs1, size_t vl) { + return __riscv_vmv_v_v_f32m8(vs1, vl); } -vfloat64m1_t test_vmv_v_v_f64m1(vfloat64m1_t src, size_t vl) { - return __riscv_vmv_v_v_f64m1(src, vl); +vfloat64m1_t test_vmv_v_v_f64m1(vfloat64m1_t vs1, size_t vl) { + return __riscv_vmv_v_v_f64m1(vs1, vl); } -vfloat64m2_t test_vmv_v_v_f64m2(vfloat64m2_t src, size_t vl) { - return __riscv_vmv_v_v_f64m2(src, vl); +vfloat64m2_t test_vmv_v_v_f64m2(vfloat64m2_t vs1, size_t vl) { + return __riscv_vmv_v_v_f64m2(vs1, vl); } -vfloat64m4_t test_vmv_v_v_f64m4(vfloat64m4_t src, size_t vl) { - return __riscv_vmv_v_v_f64m4(src, vl); +vfloat64m4_t test_vmv_v_v_f64m4(vfloat64m4_t vs1, size_t vl) { + return __riscv_vmv_v_v_f64m4(vs1, vl); } -vfloat64m8_t test_vmv_v_v_f64m8(vfloat64m8_t src, size_t vl) { - return __riscv_vmv_v_v_f64m8(src, vl); +vfloat64m8_t test_vmv_v_v_f64m8(vfloat64m8_t vs1, size_t vl) { + return __riscv_vmv_v_v_f64m8(vs1, vl); } -int8_t test_vmv_x_s_i8mf8_i8(vint8mf8_t src) { - return __riscv_vmv_x_s_i8mf8_i8(src); +int8_t test_vmv_x_s_i8mf8_i8(vint8mf8_t vs1) { + return __riscv_vmv_x_s_i8mf8_i8(vs1); } -vint8mf8_t test_vmv_s_x_i8mf8(int8_t src, size_t vl) { - return __riscv_vmv_s_x_i8mf8(src, vl); +vint8mf8_t test_vmv_s_x_i8mf8(int8_t rs1, size_t vl) { + return __riscv_vmv_s_x_i8mf8(rs1, vl); } -int8_t test_vmv_x_s_i8mf4_i8(vint8mf4_t src) { - return __riscv_vmv_x_s_i8mf4_i8(src); +int8_t test_vmv_x_s_i8mf4_i8(vint8mf4_t vs1) { + return __riscv_vmv_x_s_i8mf4_i8(vs1); } -vint8mf4_t test_vmv_s_x_i8mf4(int8_t src, size_t vl) { - return __riscv_vmv_s_x_i8mf4(src, vl); +vint8mf4_t test_vmv_s_x_i8mf4(int8_t rs1, size_t vl) { + return __riscv_vmv_s_x_i8mf4(rs1, vl); } -int8_t test_vmv_x_s_i8mf2_i8(vint8mf2_t src) { - return __riscv_vmv_x_s_i8mf2_i8(src); +int8_t test_vmv_x_s_i8mf2_i8(vint8mf2_t vs1) { + return __riscv_vmv_x_s_i8mf2_i8(vs1); } -vint8mf2_t test_vmv_s_x_i8mf2(int8_t src, size_t vl) { - return __riscv_vmv_s_x_i8mf2(src, vl); +vint8mf2_t test_vmv_s_x_i8mf2(int8_t rs1, size_t vl) { + return __riscv_vmv_s_x_i8mf2(rs1, vl); } -int8_t test_vmv_x_s_i8m1_i8(vint8m1_t src) { - return __riscv_vmv_x_s_i8m1_i8(src); +int8_t test_vmv_x_s_i8m1_i8(vint8m1_t vs1) { + return __riscv_vmv_x_s_i8m1_i8(vs1); } -vint8m1_t test_vmv_s_x_i8m1(int8_t src, size_t vl) { - return __riscv_vmv_s_x_i8m1(src, vl); +vint8m1_t test_vmv_s_x_i8m1(int8_t rs1, size_t vl) { + return __riscv_vmv_s_x_i8m1(rs1, vl); } -int8_t test_vmv_x_s_i8m2_i8(vint8m2_t src) { - return __riscv_vmv_x_s_i8m2_i8(src); +int8_t test_vmv_x_s_i8m2_i8(vint8m2_t vs1) { + return __riscv_vmv_x_s_i8m2_i8(vs1); } -vint8m2_t test_vmv_s_x_i8m2(int8_t src, size_t vl) { - return __riscv_vmv_s_x_i8m2(src, vl); +vint8m2_t test_vmv_s_x_i8m2(int8_t rs1, size_t vl) { + return __riscv_vmv_s_x_i8m2(rs1, vl); } -int8_t test_vmv_x_s_i8m4_i8(vint8m4_t src) { - return __riscv_vmv_x_s_i8m4_i8(src); +int8_t test_vmv_x_s_i8m4_i8(vint8m4_t vs1) { + return __riscv_vmv_x_s_i8m4_i8(vs1); } -vint8m4_t test_vmv_s_x_i8m4(int8_t src, size_t vl) { - return __riscv_vmv_s_x_i8m4(src, vl); +vint8m4_t test_vmv_s_x_i8m4(int8_t rs1, size_t vl) { + return __riscv_vmv_s_x_i8m4(rs1, vl); } -int8_t test_vmv_x_s_i8m8_i8(vint8m8_t src) { - return __riscv_vmv_x_s_i8m8_i8(src); +int8_t test_vmv_x_s_i8m8_i8(vint8m8_t vs1) { + return __riscv_vmv_x_s_i8m8_i8(vs1); } -vint8m8_t test_vmv_s_x_i8m8(int8_t src, size_t vl) { - return __riscv_vmv_s_x_i8m8(src, vl); +vint8m8_t test_vmv_s_x_i8m8(int8_t rs1, size_t vl) { + return __riscv_vmv_s_x_i8m8(rs1, vl); } -int16_t test_vmv_x_s_i16mf4_i16(vint16mf4_t src) { - return __riscv_vmv_x_s_i16mf4_i16(src); +int16_t test_vmv_x_s_i16mf4_i16(vint16mf4_t vs1) { + return __riscv_vmv_x_s_i16mf4_i16(vs1); } -vint16mf4_t test_vmv_s_x_i16mf4(int16_t src, size_t vl) { - return __riscv_vmv_s_x_i16mf4(src, vl); +vint16mf4_t test_vmv_s_x_i16mf4(int16_t rs1, size_t vl) { + return __riscv_vmv_s_x_i16mf4(rs1, vl); } -int16_t test_vmv_x_s_i16mf2_i16(vint16mf2_t src) { - return __riscv_vmv_x_s_i16mf2_i16(src); +int16_t test_vmv_x_s_i16mf2_i16(vint16mf2_t vs1) { + return __riscv_vmv_x_s_i16mf2_i16(vs1); } -vint16mf2_t test_vmv_s_x_i16mf2(int16_t src, size_t vl) { - return __riscv_vmv_s_x_i16mf2(src, vl); +vint16mf2_t test_vmv_s_x_i16mf2(int16_t rs1, size_t vl) { + return __riscv_vmv_s_x_i16mf2(rs1, vl); } -int16_t test_vmv_x_s_i16m1_i16(vint16m1_t src) { - return __riscv_vmv_x_s_i16m1_i16(src); +int16_t test_vmv_x_s_i16m1_i16(vint16m1_t vs1) { + return __riscv_vmv_x_s_i16m1_i16(vs1); } -vint16m1_t test_vmv_s_x_i16m1(int16_t src, size_t vl) { - return __riscv_vmv_s_x_i16m1(src, vl); +vint16m1_t test_vmv_s_x_i16m1(int16_t rs1, size_t vl) { + return __riscv_vmv_s_x_i16m1(rs1, vl); } -int16_t test_vmv_x_s_i16m2_i16(vint16m2_t src) { - return __riscv_vmv_x_s_i16m2_i16(src); +int16_t test_vmv_x_s_i16m2_i16(vint16m2_t vs1) { + return __riscv_vmv_x_s_i16m2_i16(vs1); } -vint16m2_t test_vmv_s_x_i16m2(int16_t src, size_t vl) { - return __riscv_vmv_s_x_i16m2(src, vl); +vint16m2_t test_vmv_s_x_i16m2(int16_t rs1, size_t vl) { + return __riscv_vmv_s_x_i16m2(rs1, vl); } -int16_t test_vmv_x_s_i16m4_i16(vint16m4_t src) { - return __riscv_vmv_x_s_i16m4_i16(src); +int16_t test_vmv_x_s_i16m4_i16(vint16m4_t vs1) { + return __riscv_vmv_x_s_i16m4_i16(vs1); } -vint16m4_t test_vmv_s_x_i16m4(int16_t src, size_t vl) { - return __riscv_vmv_s_x_i16m4(src, vl); +vint16m4_t test_vmv_s_x_i16m4(int16_t rs1, size_t vl) { + return __riscv_vmv_s_x_i16m4(rs1, vl); } -int16_t test_vmv_x_s_i16m8_i16(vint16m8_t src) { - return __riscv_vmv_x_s_i16m8_i16(src); +int16_t test_vmv_x_s_i16m8_i16(vint16m8_t vs1) { + return __riscv_vmv_x_s_i16m8_i16(vs1); } -vint16m8_t test_vmv_s_x_i16m8(int16_t src, size_t vl) { - return __riscv_vmv_s_x_i16m8(src, vl); +vint16m8_t test_vmv_s_x_i16m8(int16_t rs1, size_t vl) { + return __riscv_vmv_s_x_i16m8(rs1, vl); } -int32_t test_vmv_x_s_i32mf2_i32(vint32mf2_t src) { - return __riscv_vmv_x_s_i32mf2_i32(src); +int32_t test_vmv_x_s_i32mf2_i32(vint32mf2_t vs1) { + return __riscv_vmv_x_s_i32mf2_i32(vs1); } -vint32mf2_t test_vmv_s_x_i32mf2(int32_t src, size_t vl) { - return __riscv_vmv_s_x_i32mf2(src, vl); +vint32mf2_t test_vmv_s_x_i32mf2(int32_t rs1, size_t vl) { + return __riscv_vmv_s_x_i32mf2(rs1, vl); } -int32_t test_vmv_x_s_i32m1_i32(vint32m1_t src) { - return __riscv_vmv_x_s_i32m1_i32(src); +int32_t test_vmv_x_s_i32m1_i32(vint32m1_t vs1) { + return __riscv_vmv_x_s_i32m1_i32(vs1); } -vint32m1_t test_vmv_s_x_i32m1(int32_t src, size_t vl) { - return __riscv_vmv_s_x_i32m1(src, vl); +vint32m1_t test_vmv_s_x_i32m1(int32_t rs1, size_t vl) { + return __riscv_vmv_s_x_i32m1(rs1, vl); } -int32_t test_vmv_x_s_i32m2_i32(vint32m2_t src) { - return __riscv_vmv_x_s_i32m2_i32(src); +int32_t test_vmv_x_s_i32m2_i32(vint32m2_t vs1) { + return __riscv_vmv_x_s_i32m2_i32(vs1); } -vint32m2_t test_vmv_s_x_i32m2(int32_t src, size_t vl) { - return __riscv_vmv_s_x_i32m2(src, vl); +vint32m2_t test_vmv_s_x_i32m2(int32_t rs1, size_t vl) { + return __riscv_vmv_s_x_i32m2(rs1, vl); } -int32_t test_vmv_x_s_i32m4_i32(vint32m4_t src) { - return __riscv_vmv_x_s_i32m4_i32(src); +int32_t test_vmv_x_s_i32m4_i32(vint32m4_t vs1) { + return __riscv_vmv_x_s_i32m4_i32(vs1); } -vint32m4_t test_vmv_s_x_i32m4(int32_t src, size_t vl) { - return __riscv_vmv_s_x_i32m4(src, vl); +vint32m4_t test_vmv_s_x_i32m4(int32_t rs1, size_t vl) { + return __riscv_vmv_s_x_i32m4(rs1, vl); } -int32_t test_vmv_x_s_i32m8_i32(vint32m8_t src) { - return __riscv_vmv_x_s_i32m8_i32(src); +int32_t test_vmv_x_s_i32m8_i32(vint32m8_t vs1) { + return __riscv_vmv_x_s_i32m8_i32(vs1); } -vint32m8_t test_vmv_s_x_i32m8(int32_t src, size_t vl) { - return __riscv_vmv_s_x_i32m8(src, vl); +vint32m8_t test_vmv_s_x_i32m8(int32_t rs1, size_t vl) { + return __riscv_vmv_s_x_i32m8(rs1, vl); } -int64_t test_vmv_x_s_i64m1_i64(vint64m1_t src) { - return __riscv_vmv_x_s_i64m1_i64(src); +int64_t test_vmv_x_s_i64m1_i64(vint64m1_t vs1) { + return __riscv_vmv_x_s_i64m1_i64(vs1); } -vint64m1_t test_vmv_s_x_i64m1(int64_t src, size_t vl) { - return __riscv_vmv_s_x_i64m1(src, vl); +vint64m1_t test_vmv_s_x_i64m1(int64_t rs1, size_t vl) { + return __riscv_vmv_s_x_i64m1(rs1, vl); } -int64_t test_vmv_x_s_i64m2_i64(vint64m2_t src) { - return __riscv_vmv_x_s_i64m2_i64(src); +int64_t test_vmv_x_s_i64m2_i64(vint64m2_t vs1) { + return __riscv_vmv_x_s_i64m2_i64(vs1); } -vint64m2_t test_vmv_s_x_i64m2(int64_t src, size_t vl) { - return __riscv_vmv_s_x_i64m2(src, vl); +vint64m2_t test_vmv_s_x_i64m2(int64_t rs1, size_t vl) { + return __riscv_vmv_s_x_i64m2(rs1, vl); } -int64_t test_vmv_x_s_i64m4_i64(vint64m4_t src) { - return __riscv_vmv_x_s_i64m4_i64(src); +int64_t test_vmv_x_s_i64m4_i64(vint64m4_t vs1) { + return __riscv_vmv_x_s_i64m4_i64(vs1); } -vint64m4_t test_vmv_s_x_i64m4(int64_t src, size_t vl) { - return __riscv_vmv_s_x_i64m4(src, vl); +vint64m4_t test_vmv_s_x_i64m4(int64_t rs1, size_t vl) { + return __riscv_vmv_s_x_i64m4(rs1, vl); } -int64_t test_vmv_x_s_i64m8_i64(vint64m8_t src) { - return __riscv_vmv_x_s_i64m8_i64(src); +int64_t test_vmv_x_s_i64m8_i64(vint64m8_t vs1) { + return __riscv_vmv_x_s_i64m8_i64(vs1); } -vint64m8_t test_vmv_s_x_i64m8(int64_t src, size_t vl) { - return __riscv_vmv_s_x_i64m8(src, vl); +vint64m8_t test_vmv_s_x_i64m8(int64_t rs1, size_t vl) { + return __riscv_vmv_s_x_i64m8(rs1, vl); } -uint8_t test_vmv_x_s_u8mf8_u8(vuint8mf8_t src) { - return __riscv_vmv_x_s_u8mf8_u8(src); +uint8_t test_vmv_x_s_u8mf8_u8(vuint8mf8_t vs1) { + return __riscv_vmv_x_s_u8mf8_u8(vs1); } -vuint8mf8_t test_vmv_s_x_u8mf8(uint8_t src, size_t vl) { - return __riscv_vmv_s_x_u8mf8(src, vl); +vuint8mf8_t test_vmv_s_x_u8mf8(uint8_t rs1, size_t vl) { + return __riscv_vmv_s_x_u8mf8(rs1, vl); } -uint8_t test_vmv_x_s_u8mf4_u8(vuint8mf4_t src) { - return __riscv_vmv_x_s_u8mf4_u8(src); +uint8_t test_vmv_x_s_u8mf4_u8(vuint8mf4_t vs1) { + return __riscv_vmv_x_s_u8mf4_u8(vs1); } -vuint8mf4_t test_vmv_s_x_u8mf4(uint8_t src, size_t vl) { - return __riscv_vmv_s_x_u8mf4(src, vl); +vuint8mf4_t test_vmv_s_x_u8mf4(uint8_t rs1, size_t vl) { + return __riscv_vmv_s_x_u8mf4(rs1, vl); } -uint8_t test_vmv_x_s_u8mf2_u8(vuint8mf2_t src) { - return __riscv_vmv_x_s_u8mf2_u8(src); +uint8_t test_vmv_x_s_u8mf2_u8(vuint8mf2_t vs1) { + return __riscv_vmv_x_s_u8mf2_u8(vs1); } -vuint8mf2_t test_vmv_s_x_u8mf2(uint8_t src, size_t vl) { - return __riscv_vmv_s_x_u8mf2(src, vl); +vuint8mf2_t test_vmv_s_x_u8mf2(uint8_t rs1, size_t vl) { + return __riscv_vmv_s_x_u8mf2(rs1, vl); } -uint8_t test_vmv_x_s_u8m1_u8(vuint8m1_t src) { - return __riscv_vmv_x_s_u8m1_u8(src); +uint8_t test_vmv_x_s_u8m1_u8(vuint8m1_t vs1) { + return __riscv_vmv_x_s_u8m1_u8(vs1); } -vuint8m1_t test_vmv_s_x_u8m1(uint8_t src, size_t vl) { - return __riscv_vmv_s_x_u8m1(src, vl); +vuint8m1_t test_vmv_s_x_u8m1(uint8_t rs1, size_t vl) { + return __riscv_vmv_s_x_u8m1(rs1, vl); } -uint8_t test_vmv_x_s_u8m2_u8(vuint8m2_t src) { - return __riscv_vmv_x_s_u8m2_u8(src); +uint8_t test_vmv_x_s_u8m2_u8(vuint8m2_t vs1) { + return __riscv_vmv_x_s_u8m2_u8(vs1); } -vuint8m2_t test_vmv_s_x_u8m2(uint8_t src, size_t vl) { - return __riscv_vmv_s_x_u8m2(src, vl); +vuint8m2_t test_vmv_s_x_u8m2(uint8_t rs1, size_t vl) { + return __riscv_vmv_s_x_u8m2(rs1, vl); } -uint8_t test_vmv_x_s_u8m4_u8(vuint8m4_t src) { - return __riscv_vmv_x_s_u8m4_u8(src); +uint8_t test_vmv_x_s_u8m4_u8(vuint8m4_t vs1) { + return __riscv_vmv_x_s_u8m4_u8(vs1); } -vuint8m4_t test_vmv_s_x_u8m4(uint8_t src, size_t vl) { - return __riscv_vmv_s_x_u8m4(src, vl); +vuint8m4_t test_vmv_s_x_u8m4(uint8_t rs1, size_t vl) { + return __riscv_vmv_s_x_u8m4(rs1, vl); } -uint8_t test_vmv_x_s_u8m8_u8(vuint8m8_t src) { - return __riscv_vmv_x_s_u8m8_u8(src); +uint8_t test_vmv_x_s_u8m8_u8(vuint8m8_t vs1) { + return __riscv_vmv_x_s_u8m8_u8(vs1); } -vuint8m8_t test_vmv_s_x_u8m8(uint8_t src, size_t vl) { - return __riscv_vmv_s_x_u8m8(src, vl); +vuint8m8_t test_vmv_s_x_u8m8(uint8_t rs1, size_t vl) { + return __riscv_vmv_s_x_u8m8(rs1, vl); } -uint16_t test_vmv_x_s_u16mf4_u16(vuint16mf4_t src) { - return __riscv_vmv_x_s_u16mf4_u16(src); +uint16_t test_vmv_x_s_u16mf4_u16(vuint16mf4_t vs1) { + return __riscv_vmv_x_s_u16mf4_u16(vs1); } -vuint16mf4_t test_vmv_s_x_u16mf4(uint16_t src, size_t vl) { - return __riscv_vmv_s_x_u16mf4(src, vl); +vuint16mf4_t test_vmv_s_x_u16mf4(uint16_t rs1, size_t vl) { + return __riscv_vmv_s_x_u16mf4(rs1, vl); } -uint16_t test_vmv_x_s_u16mf2_u16(vuint16mf2_t src) { - return __riscv_vmv_x_s_u16mf2_u16(src); +uint16_t test_vmv_x_s_u16mf2_u16(vuint16mf2_t vs1) { + return __riscv_vmv_x_s_u16mf2_u16(vs1); } -vuint16mf2_t test_vmv_s_x_u16mf2(uint16_t src, size_t vl) { - return __riscv_vmv_s_x_u16mf2(src, vl); +vuint16mf2_t test_vmv_s_x_u16mf2(uint16_t rs1, size_t vl) { + return __riscv_vmv_s_x_u16mf2(rs1, vl); } -uint16_t test_vmv_x_s_u16m1_u16(vuint16m1_t src) { - return __riscv_vmv_x_s_u16m1_u16(src); +uint16_t test_vmv_x_s_u16m1_u16(vuint16m1_t vs1) { + return __riscv_vmv_x_s_u16m1_u16(vs1); } -vuint16m1_t test_vmv_s_x_u16m1(uint16_t src, size_t vl) { - return __riscv_vmv_s_x_u16m1(src, vl); +vuint16m1_t test_vmv_s_x_u16m1(uint16_t rs1, size_t vl) { + return __riscv_vmv_s_x_u16m1(rs1, vl); } -uint16_t test_vmv_x_s_u16m2_u16(vuint16m2_t src) { - return __riscv_vmv_x_s_u16m2_u16(src); +uint16_t test_vmv_x_s_u16m2_u16(vuint16m2_t vs1) { + return __riscv_vmv_x_s_u16m2_u16(vs1); } -vuint16m2_t test_vmv_s_x_u16m2(uint16_t src, size_t vl) { - return __riscv_vmv_s_x_u16m2(src, vl); +vuint16m2_t test_vmv_s_x_u16m2(uint16_t rs1, size_t vl) { + return __riscv_vmv_s_x_u16m2(rs1, vl); } -uint16_t test_vmv_x_s_u16m4_u16(vuint16m4_t src) { - return __riscv_vmv_x_s_u16m4_u16(src); +uint16_t test_vmv_x_s_u16m4_u16(vuint16m4_t vs1) { + return __riscv_vmv_x_s_u16m4_u16(vs1); } -vuint16m4_t test_vmv_s_x_u16m4(uint16_t src, size_t vl) { - return __riscv_vmv_s_x_u16m4(src, vl); +vuint16m4_t test_vmv_s_x_u16m4(uint16_t rs1, size_t vl) { + return __riscv_vmv_s_x_u16m4(rs1, vl); } -uint16_t test_vmv_x_s_u16m8_u16(vuint16m8_t src) { - return __riscv_vmv_x_s_u16m8_u16(src); +uint16_t test_vmv_x_s_u16m8_u16(vuint16m8_t vs1) { + return __riscv_vmv_x_s_u16m8_u16(vs1); } -vuint16m8_t test_vmv_s_x_u16m8(uint16_t src, size_t vl) { - return __riscv_vmv_s_x_u16m8(src, vl); +vuint16m8_t test_vmv_s_x_u16m8(uint16_t rs1, size_t vl) { + return __riscv_vmv_s_x_u16m8(rs1, vl); } -uint32_t test_vmv_x_s_u32mf2_u32(vuint32mf2_t src) { - return __riscv_vmv_x_s_u32mf2_u32(src); +uint32_t test_vmv_x_s_u32mf2_u32(vuint32mf2_t vs1) { + return __riscv_vmv_x_s_u32mf2_u32(vs1); } -vuint32mf2_t test_vmv_s_x_u32mf2(uint32_t src, size_t vl) { - return __riscv_vmv_s_x_u32mf2(src, vl); +vuint32mf2_t test_vmv_s_x_u32mf2(uint32_t rs1, size_t vl) { + return __riscv_vmv_s_x_u32mf2(rs1, vl); } -uint32_t test_vmv_x_s_u32m1_u32(vuint32m1_t src) { - return __riscv_vmv_x_s_u32m1_u32(src); +uint32_t test_vmv_x_s_u32m1_u32(vuint32m1_t vs1) { + return __riscv_vmv_x_s_u32m1_u32(vs1); } -vuint32m1_t test_vmv_s_x_u32m1(uint32_t src, size_t vl) { - return __riscv_vmv_s_x_u32m1(src, vl); +vuint32m1_t test_vmv_s_x_u32m1(uint32_t rs1, size_t vl) { + return __riscv_vmv_s_x_u32m1(rs1, vl); } -uint32_t test_vmv_x_s_u32m2_u32(vuint32m2_t src) { - return __riscv_vmv_x_s_u32m2_u32(src); +uint32_t test_vmv_x_s_u32m2_u32(vuint32m2_t vs1) { + return __riscv_vmv_x_s_u32m2_u32(vs1); } -vuint32m2_t test_vmv_s_x_u32m2(uint32_t src, size_t vl) { - return __riscv_vmv_s_x_u32m2(src, vl); +vuint32m2_t test_vmv_s_x_u32m2(uint32_t rs1, size_t vl) { + return __riscv_vmv_s_x_u32m2(rs1, vl); } -uint32_t test_vmv_x_s_u32m4_u32(vuint32m4_t src) { - return __riscv_vmv_x_s_u32m4_u32(src); +uint32_t test_vmv_x_s_u32m4_u32(vuint32m4_t vs1) { + return __riscv_vmv_x_s_u32m4_u32(vs1); } -vuint32m4_t test_vmv_s_x_u32m4(uint32_t src, size_t vl) { - return __riscv_vmv_s_x_u32m4(src, vl); +vuint32m4_t test_vmv_s_x_u32m4(uint32_t rs1, size_t vl) { + return __riscv_vmv_s_x_u32m4(rs1, vl); } -uint32_t test_vmv_x_s_u32m8_u32(vuint32m8_t src) { - return __riscv_vmv_x_s_u32m8_u32(src); +uint32_t test_vmv_x_s_u32m8_u32(vuint32m8_t vs1) { + return __riscv_vmv_x_s_u32m8_u32(vs1); } -vuint32m8_t test_vmv_s_x_u32m8(uint32_t src, size_t vl) { - return __riscv_vmv_s_x_u32m8(src, vl); +vuint32m8_t test_vmv_s_x_u32m8(uint32_t rs1, size_t vl) { + return __riscv_vmv_s_x_u32m8(rs1, vl); } -uint64_t test_vmv_x_s_u64m1_u64(vuint64m1_t src) { - return __riscv_vmv_x_s_u64m1_u64(src); +uint64_t test_vmv_x_s_u64m1_u64(vuint64m1_t vs1) { + return __riscv_vmv_x_s_u64m1_u64(vs1); } -vuint64m1_t test_vmv_s_x_u64m1(uint64_t src, size_t vl) { - return __riscv_vmv_s_x_u64m1(src, vl); +vuint64m1_t test_vmv_s_x_u64m1(uint64_t rs1, size_t vl) { + return __riscv_vmv_s_x_u64m1(rs1, vl); } -uint64_t test_vmv_x_s_u64m2_u64(vuint64m2_t src) { - return __riscv_vmv_x_s_u64m2_u64(src); +uint64_t test_vmv_x_s_u64m2_u64(vuint64m2_t vs1) { + return __riscv_vmv_x_s_u64m2_u64(vs1); } -vuint64m2_t test_vmv_s_x_u64m2(uint64_t src, size_t vl) { - return __riscv_vmv_s_x_u64m2(src, vl); +vuint64m2_t test_vmv_s_x_u64m2(uint64_t rs1, size_t vl) { + return __riscv_vmv_s_x_u64m2(rs1, vl); } -uint64_t test_vmv_x_s_u64m4_u64(vuint64m4_t src) { - return __riscv_vmv_x_s_u64m4_u64(src); +uint64_t test_vmv_x_s_u64m4_u64(vuint64m4_t vs1) { + return __riscv_vmv_x_s_u64m4_u64(vs1); } -vuint64m4_t test_vmv_s_x_u64m4(uint64_t src, size_t vl) { - return __riscv_vmv_s_x_u64m4(src, vl); +vuint64m4_t test_vmv_s_x_u64m4(uint64_t rs1, size_t vl) { + return __riscv_vmv_s_x_u64m4(rs1, vl); } -uint64_t test_vmv_x_s_u64m8_u64(vuint64m8_t src) { - return __riscv_vmv_x_s_u64m8_u64(src); +uint64_t test_vmv_x_s_u64m8_u64(vuint64m8_t vs1) { + return __riscv_vmv_x_s_u64m8_u64(vs1); } -vuint64m8_t test_vmv_s_x_u64m8(uint64_t src, size_t vl) { - return __riscv_vmv_s_x_u64m8(src, vl); +vuint64m8_t test_vmv_s_x_u64m8(uint64_t rs1, size_t vl) { + return __riscv_vmv_s_x_u64m8(rs1, vl); } /* { dg-final { scan-assembler-times {v[ml][s]*[ve][0-9]*\.[ivxfswum.]+\s+} 218 } } */ diff --git a/auto-generated/gnu-api-tests/vmxnor.c b/auto-generated/gnu-api-tests/vmxnor.c index 97d45bd85..c8088f5c2 100644 --- a/auto-generated/gnu-api-tests/vmxnor.c +++ b/auto-generated/gnu-api-tests/vmxnor.c @@ -6,32 +6,32 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool1_t test_vmxnor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { - return __riscv_vmxnor_mm_b1(op1, op2, vl); +vbool1_t test_vmxnor_mm_b1(vbool1_t vs2, vbool1_t vs1, size_t vl) { + return __riscv_vmxnor_mm_b1(vs2, vs1, vl); } -vbool2_t test_vmxnor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { - return __riscv_vmxnor_mm_b2(op1, op2, vl); +vbool2_t test_vmxnor_mm_b2(vbool2_t vs2, vbool2_t vs1, size_t vl) { + return __riscv_vmxnor_mm_b2(vs2, vs1, vl); } -vbool4_t test_vmxnor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { - return __riscv_vmxnor_mm_b4(op1, op2, vl); +vbool4_t test_vmxnor_mm_b4(vbool4_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vmxnor_mm_b4(vs2, vs1, vl); } -vbool8_t test_vmxnor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { - return __riscv_vmxnor_mm_b8(op1, op2, vl); +vbool8_t test_vmxnor_mm_b8(vbool8_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vmxnor_mm_b8(vs2, vs1, vl); } -vbool16_t test_vmxnor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { - return __riscv_vmxnor_mm_b16(op1, op2, vl); +vbool16_t test_vmxnor_mm_b16(vbool16_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vmxnor_mm_b16(vs2, vs1, vl); } -vbool32_t test_vmxnor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { - return __riscv_vmxnor_mm_b32(op1, op2, vl); +vbool32_t test_vmxnor_mm_b32(vbool32_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vmxnor_mm_b32(vs2, vs1, vl); } -vbool64_t test_vmxnor_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) { - return __riscv_vmxnor_mm_b64(op1, op2, vl); +vbool64_t test_vmxnor_mm_b64(vbool64_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vmxnor_mm_b64(vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmxnor\.[ivxfswum.]+\s+} 7 } } */ diff --git a/auto-generated/gnu-api-tests/vmxor.c b/auto-generated/gnu-api-tests/vmxor.c index 10c3639a7..f11b4c8b2 100644 --- a/auto-generated/gnu-api-tests/vmxor.c +++ b/auto-generated/gnu-api-tests/vmxor.c @@ -6,32 +6,32 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool1_t test_vmxor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { - return __riscv_vmxor_mm_b1(op1, op2, vl); +vbool1_t test_vmxor_mm_b1(vbool1_t vs2, vbool1_t vs1, size_t vl) { + return __riscv_vmxor_mm_b1(vs2, vs1, vl); } -vbool2_t test_vmxor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { - return __riscv_vmxor_mm_b2(op1, op2, vl); +vbool2_t test_vmxor_mm_b2(vbool2_t vs2, vbool2_t vs1, size_t vl) { + return __riscv_vmxor_mm_b2(vs2, vs1, vl); } -vbool4_t test_vmxor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { - return __riscv_vmxor_mm_b4(op1, op2, vl); +vbool4_t test_vmxor_mm_b4(vbool4_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vmxor_mm_b4(vs2, vs1, vl); } -vbool8_t test_vmxor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { - return __riscv_vmxor_mm_b8(op1, op2, vl); +vbool8_t test_vmxor_mm_b8(vbool8_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vmxor_mm_b8(vs2, vs1, vl); } -vbool16_t test_vmxor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { - return __riscv_vmxor_mm_b16(op1, op2, vl); +vbool16_t test_vmxor_mm_b16(vbool16_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vmxor_mm_b16(vs2, vs1, vl); } -vbool32_t test_vmxor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { - return __riscv_vmxor_mm_b32(op1, op2, vl); +vbool32_t test_vmxor_mm_b32(vbool32_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vmxor_mm_b32(vs2, vs1, vl); } -vbool64_t test_vmxor_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) { - return __riscv_vmxor_mm_b64(op1, op2, vl); +vbool64_t test_vmxor_mm_b64(vbool64_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vmxor_mm_b64(vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmxor\.[ivxfswum.]+\s+} 7 } } */ diff --git a/auto-generated/gnu-api-tests/vnclip.c b/auto-generated/gnu-api-tests/vnclip.c index 896395db1..8c8a71b45 100644 --- a/auto-generated/gnu-api-tests/vnclip.c +++ b/auto-generated/gnu-api-tests/vnclip.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vnclip_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclip_wv_i8mf8(op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vnclip_wv_i8mf8(vint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnclip_wv_i8mf8(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vnclip_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8mf8(op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vnclip_wx_i8mf8(vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i8mf8(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vnclip_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclip_wv_i8mf4(op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vnclip_wv_i8mf4(vint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnclip_wv_i8mf4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vnclip_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8mf4(op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vnclip_wx_i8mf4(vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i8mf4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vnclip_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclip_wv_i8mf2(op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vnclip_wv_i8mf2(vint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnclip_wv_i8mf2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vnclip_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8mf2(op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vnclip_wx_i8mf2(vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i8mf2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vnclip_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclip_wv_i8m1(op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vnclip_wv_i8m1(vint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnclip_wv_i8m1(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vnclip_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8m1(op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vnclip_wx_i8m1(vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i8m1(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vnclip_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclip_wv_i8m2(op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vnclip_wv_i8m2(vint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnclip_wv_i8m2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vnclip_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8m2(op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vnclip_wx_i8m2(vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i8m2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vnclip_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclip_wv_i8m4(op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vnclip_wv_i8m4(vint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnclip_wv_i8m4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vnclip_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8m4(op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vnclip_wx_i8m4(vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i8m4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vnclip_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclip_wv_i16mf4(op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vnclip_wv_i16mf4(vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnclip_wv_i16mf4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vnclip_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16mf4(op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vnclip_wx_i16mf4(vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i16mf4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vnclip_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclip_wv_i16mf2(op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vnclip_wv_i16mf2(vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnclip_wv_i16mf2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vnclip_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16mf2(op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vnclip_wx_i16mf2(vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i16mf2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vnclip_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclip_wv_i16m1(op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vnclip_wv_i16m1(vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnclip_wv_i16m1(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vnclip_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16m1(op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vnclip_wx_i16m1(vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i16m1(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vnclip_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclip_wv_i16m2(op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vnclip_wv_i16m2(vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnclip_wv_i16m2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vnclip_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16m2(op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vnclip_wx_i16m2(vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i16m2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vnclip_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclip_wv_i16m4(op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vnclip_wv_i16m4(vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnclip_wv_i16m4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vnclip_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16m4(op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vnclip_wx_i16m4(vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i16m4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vnclip_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclip_wv_i32mf2(op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vnclip_wv_i32mf2(vint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnclip_wv_i32mf2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vnclip_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32mf2(op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vnclip_wx_i32mf2(vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i32mf2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vnclip_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclip_wv_i32m1(op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vnclip_wv_i32m1(vint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnclip_wv_i32m1(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vnclip_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32m1(op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vnclip_wx_i32m1(vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i32m1(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vnclip_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclip_wv_i32m2(op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vnclip_wv_i32m2(vint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnclip_wv_i32m2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vnclip_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32m2(op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vnclip_wx_i32m2(vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i32m2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vnclip_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclip_wv_i32m4(op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vnclip_wv_i32m4(vint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnclip_wv_i32m4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vnclip_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32m4(op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vnclip_wx_i32m4(vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i32m4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vnclip_wv_i8mf8_m(vbool64_t mask, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclip_wv_i8mf8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vnclip_wv_i8mf8_m(vbool64_t vm, vint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnclip_wv_i8mf8_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vnclip_wx_i8mf8_m(vbool64_t mask, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8mf8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vnclip_wx_i8mf8_m(vbool64_t vm, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i8mf8_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vnclip_wv_i8mf4_m(vbool32_t mask, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclip_wv_i8mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vnclip_wv_i8mf4_m(vbool32_t vm, vint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnclip_wv_i8mf4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vnclip_wx_i8mf4_m(vbool32_t mask, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vnclip_wx_i8mf4_m(vbool32_t vm, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i8mf4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vnclip_wv_i8mf2_m(vbool16_t mask, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclip_wv_i8mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vnclip_wv_i8mf2_m(vbool16_t vm, vint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnclip_wv_i8mf2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vnclip_wx_i8mf2_m(vbool16_t mask, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vnclip_wx_i8mf2_m(vbool16_t vm, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i8mf2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vnclip_wv_i8m1_m(vbool8_t mask, vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclip_wv_i8m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vnclip_wv_i8m1_m(vbool8_t vm, vint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnclip_wv_i8m1_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vnclip_wx_i8m1_m(vbool8_t mask, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vnclip_wx_i8m1_m(vbool8_t vm, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i8m1_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vnclip_wv_i8m2_m(vbool4_t mask, vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclip_wv_i8m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vnclip_wv_i8m2_m(vbool4_t vm, vint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnclip_wv_i8m2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vnclip_wx_i8m2_m(vbool4_t mask, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vnclip_wx_i8m2_m(vbool4_t vm, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i8m2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vnclip_wv_i8m4_m(vbool2_t mask, vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclip_wv_i8m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vnclip_wv_i8m4_m(vbool2_t vm, vint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnclip_wv_i8m4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vnclip_wx_i8m4_m(vbool2_t mask, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vnclip_wx_i8m4_m(vbool2_t vm, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i8m4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vnclip_wv_i16mf4_m(vbool64_t mask, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclip_wv_i16mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vnclip_wv_i16mf4_m(vbool64_t vm, vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnclip_wv_i16mf4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vnclip_wx_i16mf4_m(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vnclip_wx_i16mf4_m(vbool64_t vm, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i16mf4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vnclip_wv_i16mf2_m(vbool32_t mask, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclip_wv_i16mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vnclip_wv_i16mf2_m(vbool32_t vm, vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnclip_wv_i16mf2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vnclip_wx_i16mf2_m(vbool32_t mask, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vnclip_wx_i16mf2_m(vbool32_t vm, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i16mf2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vnclip_wv_i16m1_m(vbool16_t mask, vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclip_wv_i16m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vnclip_wv_i16m1_m(vbool16_t vm, vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnclip_wv_i16m1_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vnclip_wx_i16m1_m(vbool16_t mask, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vnclip_wx_i16m1_m(vbool16_t vm, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i16m1_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vnclip_wv_i16m2_m(vbool8_t mask, vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclip_wv_i16m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vnclip_wv_i16m2_m(vbool8_t vm, vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnclip_wv_i16m2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vnclip_wx_i16m2_m(vbool8_t mask, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vnclip_wx_i16m2_m(vbool8_t vm, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i16m2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vnclip_wv_i16m4_m(vbool4_t mask, vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclip_wv_i16m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vnclip_wv_i16m4_m(vbool4_t vm, vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnclip_wv_i16m4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vnclip_wx_i16m4_m(vbool4_t mask, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vnclip_wx_i16m4_m(vbool4_t vm, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i16m4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vnclip_wv_i32mf2_m(vbool64_t mask, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclip_wv_i32mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vnclip_wv_i32mf2_m(vbool64_t vm, vint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnclip_wv_i32mf2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vnclip_wx_i32mf2_m(vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vnclip_wx_i32mf2_m(vbool64_t vm, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i32mf2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vnclip_wv_i32m1_m(vbool32_t mask, vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclip_wv_i32m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vnclip_wv_i32m1_m(vbool32_t vm, vint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnclip_wv_i32m1_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vnclip_wx_i32m1_m(vbool32_t mask, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vnclip_wx_i32m1_m(vbool32_t vm, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i32m1_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vnclip_wv_i32m2_m(vbool16_t mask, vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclip_wv_i32m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vnclip_wv_i32m2_m(vbool16_t vm, vint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnclip_wv_i32m2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vnclip_wx_i32m2_m(vbool16_t mask, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vnclip_wx_i32m2_m(vbool16_t vm, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i32m2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vnclip_wv_i32m4_m(vbool8_t mask, vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclip_wv_i32m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vnclip_wv_i32m4_m(vbool8_t vm, vint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnclip_wv_i32m4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vnclip_wx_i32m4_m(vbool8_t mask, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vnclip_wx_i32m4_m(vbool8_t vm, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i32m4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vnclip\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-api-tests/vnclipu.c b/auto-generated/gnu-api-tests/vnclipu.c index 0e243a2f8..542b86a4a 100644 --- a/auto-generated/gnu-api-tests/vnclipu.c +++ b/auto-generated/gnu-api-tests/vnclipu.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vnclipu_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8mf8(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vnclipu_wv_u8mf8(vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u8mf8(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vnclipu_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8mf8(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vnclipu_wx_u8mf8(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u8mf8(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vnclipu_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8mf4(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vnclipu_wv_u8mf4(vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u8mf4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vnclipu_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8mf4(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vnclipu_wx_u8mf4(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u8mf4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vnclipu_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8mf2(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vnclipu_wv_u8mf2(vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u8mf2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vnclipu_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8mf2(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vnclipu_wx_u8mf2(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u8mf2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vnclipu_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8m1(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vnclipu_wv_u8m1(vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u8m1(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vnclipu_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8m1(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vnclipu_wx_u8m1(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u8m1(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vnclipu_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8m2(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vnclipu_wv_u8m2(vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u8m2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vnclipu_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8m2(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vnclipu_wx_u8m2(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u8m2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vnclipu_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8m4(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vnclipu_wv_u8m4(vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u8m4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vnclipu_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8m4(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vnclipu_wx_u8m4(vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u8m4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vnclipu_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16mf4(op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vnclipu_wv_u16mf4(vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u16mf4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vnclipu_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16mf4(op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vnclipu_wx_u16mf4(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u16mf4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vnclipu_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16mf2(op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vnclipu_wv_u16mf2(vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u16mf2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vnclipu_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16mf2(op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vnclipu_wx_u16mf2(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u16mf2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vnclipu_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16m1(op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vnclipu_wv_u16m1(vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u16m1(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vnclipu_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16m1(op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vnclipu_wx_u16m1(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u16m1(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vnclipu_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16m2(op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vnclipu_wv_u16m2(vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u16m2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vnclipu_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16m2(op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vnclipu_wx_u16m2(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u16m2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vnclipu_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16m4(op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vnclipu_wv_u16m4(vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u16m4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vnclipu_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16m4(op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vnclipu_wx_u16m4(vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u16m4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vnclipu_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32mf2(op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vnclipu_wv_u32mf2(vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u32mf2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vnclipu_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32mf2(op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vnclipu_wx_u32mf2(vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u32mf2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vnclipu_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32m1(op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vnclipu_wv_u32m1(vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u32m1(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vnclipu_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32m1(op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vnclipu_wx_u32m1(vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u32m1(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vnclipu_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32m2(op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vnclipu_wv_u32m2(vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u32m2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vnclipu_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32m2(op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vnclipu_wx_u32m2(vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u32m2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vnclipu_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32m4(op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vnclipu_wv_u32m4(vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u32m4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vnclipu_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32m4(op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vnclipu_wx_u32m4(vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u32m4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vnclipu_wv_u8mf8_m(vbool64_t mask, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8mf8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vnclipu_wv_u8mf8_m(vbool64_t vm, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u8mf8_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vnclipu_wx_u8mf8_m(vbool64_t mask, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8mf8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vnclipu_wx_u8mf8_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u8mf8_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vnclipu_wv_u8mf4_m(vbool32_t mask, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vnclipu_wv_u8mf4_m(vbool32_t vm, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u8mf4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vnclipu_wx_u8mf4_m(vbool32_t mask, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vnclipu_wx_u8mf4_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u8mf4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vnclipu_wv_u8mf2_m(vbool16_t mask, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vnclipu_wv_u8mf2_m(vbool16_t vm, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u8mf2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vnclipu_wx_u8mf2_m(vbool16_t mask, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vnclipu_wx_u8mf2_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u8mf2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vnclipu_wv_u8m1_m(vbool8_t mask, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vnclipu_wv_u8m1_m(vbool8_t vm, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u8m1_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vnclipu_wx_u8m1_m(vbool8_t mask, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vnclipu_wx_u8m1_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u8m1_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vnclipu_wv_u8m2_m(vbool4_t mask, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vnclipu_wv_u8m2_m(vbool4_t vm, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u8m2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vnclipu_wx_u8m2_m(vbool4_t mask, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vnclipu_wx_u8m2_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u8m2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vnclipu_wv_u8m4_m(vbool2_t mask, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vnclipu_wv_u8m4_m(vbool2_t vm, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u8m4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vnclipu_wx_u8m4_m(vbool2_t mask, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vnclipu_wx_u8m4_m(vbool2_t vm, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u8m4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vnclipu_wv_u16mf4_m(vbool64_t mask, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vnclipu_wv_u16mf4_m(vbool64_t vm, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u16mf4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vnclipu_wx_u16mf4_m(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vnclipu_wx_u16mf4_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u16mf4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vnclipu_wv_u16mf2_m(vbool32_t mask, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vnclipu_wv_u16mf2_m(vbool32_t vm, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u16mf2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vnclipu_wx_u16mf2_m(vbool32_t mask, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vnclipu_wx_u16mf2_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u16mf2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vnclipu_wv_u16m1_m(vbool16_t mask, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vnclipu_wv_u16m1_m(vbool16_t vm, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u16m1_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vnclipu_wx_u16m1_m(vbool16_t mask, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vnclipu_wx_u16m1_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u16m1_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vnclipu_wv_u16m2_m(vbool8_t mask, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vnclipu_wv_u16m2_m(vbool8_t vm, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u16m2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vnclipu_wx_u16m2_m(vbool8_t mask, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vnclipu_wx_u16m2_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u16m2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vnclipu_wv_u16m4_m(vbool4_t mask, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vnclipu_wv_u16m4_m(vbool4_t vm, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u16m4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vnclipu_wx_u16m4_m(vbool4_t mask, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vnclipu_wx_u16m4_m(vbool4_t vm, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u16m4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vnclipu_wv_u32mf2_m(vbool64_t mask, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vnclipu_wv_u32mf2_m(vbool64_t vm, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u32mf2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vnclipu_wx_u32mf2_m(vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vnclipu_wx_u32mf2_m(vbool64_t vm, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u32mf2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vnclipu_wv_u32m1_m(vbool32_t mask, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vnclipu_wv_u32m1_m(vbool32_t vm, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u32m1_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vnclipu_wx_u32m1_m(vbool32_t mask, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vnclipu_wx_u32m1_m(vbool32_t vm, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u32m1_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vnclipu_wv_u32m2_m(vbool16_t mask, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vnclipu_wv_u32m2_m(vbool16_t vm, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u32m2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vnclipu_wx_u32m2_m(vbool16_t mask, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vnclipu_wx_u32m2_m(vbool16_t vm, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u32m2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vnclipu_wv_u32m4_m(vbool8_t mask, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vnclipu_wv_u32m4_m(vbool8_t vm, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u32m4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vnclipu_wx_u32m4_m(vbool8_t mask, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vnclipu_wx_u32m4_m(vbool8_t vm, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u32m4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vnclipu\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-api-tests/vncvt.c b/auto-generated/gnu-api-tests/vncvt.c index 42e070e79..2bf26242c 100644 --- a/auto-generated/gnu-api-tests/vncvt.c +++ b/auto-generated/gnu-api-tests/vncvt.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vncvt_x_x_w_i8mf8(vint16mf4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i8mf8(src, vl); +vint8mf8_t test_vncvt_x_x_w_i8mf8(vint16mf4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i8mf8(vs2, vl); } -vint8mf4_t test_vncvt_x_x_w_i8mf4(vint16mf2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i8mf4(src, vl); +vint8mf4_t test_vncvt_x_x_w_i8mf4(vint16mf2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i8mf4(vs2, vl); } -vint8mf2_t test_vncvt_x_x_w_i8mf2(vint16m1_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i8mf2(src, vl); +vint8mf2_t test_vncvt_x_x_w_i8mf2(vint16m1_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i8mf2(vs2, vl); } -vint8m1_t test_vncvt_x_x_w_i8m1(vint16m2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i8m1(src, vl); +vint8m1_t test_vncvt_x_x_w_i8m1(vint16m2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i8m1(vs2, vl); } -vint8m2_t test_vncvt_x_x_w_i8m2(vint16m4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i8m2(src, vl); +vint8m2_t test_vncvt_x_x_w_i8m2(vint16m4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i8m2(vs2, vl); } -vint8m4_t test_vncvt_x_x_w_i8m4(vint16m8_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i8m4(src, vl); +vint8m4_t test_vncvt_x_x_w_i8m4(vint16m8_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i8m4(vs2, vl); } -vuint8mf8_t test_vncvt_x_x_w_u8mf8(vuint16mf4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u8mf8(src, vl); +vuint8mf8_t test_vncvt_x_x_w_u8mf8(vuint16mf4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u8mf8(vs2, vl); } -vuint8mf4_t test_vncvt_x_x_w_u8mf4(vuint16mf2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u8mf4(src, vl); +vuint8mf4_t test_vncvt_x_x_w_u8mf4(vuint16mf2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u8mf4(vs2, vl); } -vuint8mf2_t test_vncvt_x_x_w_u8mf2(vuint16m1_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u8mf2(src, vl); +vuint8mf2_t test_vncvt_x_x_w_u8mf2(vuint16m1_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u8mf2(vs2, vl); } -vuint8m1_t test_vncvt_x_x_w_u8m1(vuint16m2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u8m1(src, vl); +vuint8m1_t test_vncvt_x_x_w_u8m1(vuint16m2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u8m1(vs2, vl); } -vuint8m2_t test_vncvt_x_x_w_u8m2(vuint16m4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u8m2(src, vl); +vuint8m2_t test_vncvt_x_x_w_u8m2(vuint16m4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u8m2(vs2, vl); } -vuint8m4_t test_vncvt_x_x_w_u8m4(vuint16m8_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u8m4(src, vl); +vuint8m4_t test_vncvt_x_x_w_u8m4(vuint16m8_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u8m4(vs2, vl); } -vint16mf4_t test_vncvt_x_x_w_i16mf4(vint32mf2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i16mf4(src, vl); +vint16mf4_t test_vncvt_x_x_w_i16mf4(vint32mf2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i16mf4(vs2, vl); } -vint16mf2_t test_vncvt_x_x_w_i16mf2(vint32m1_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i16mf2(src, vl); +vint16mf2_t test_vncvt_x_x_w_i16mf2(vint32m1_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i16mf2(vs2, vl); } -vint16m1_t test_vncvt_x_x_w_i16m1(vint32m2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i16m1(src, vl); +vint16m1_t test_vncvt_x_x_w_i16m1(vint32m2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i16m1(vs2, vl); } -vint16m2_t test_vncvt_x_x_w_i16m2(vint32m4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i16m2(src, vl); +vint16m2_t test_vncvt_x_x_w_i16m2(vint32m4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i16m2(vs2, vl); } -vint16m4_t test_vncvt_x_x_w_i16m4(vint32m8_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i16m4(src, vl); +vint16m4_t test_vncvt_x_x_w_i16m4(vint32m8_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i16m4(vs2, vl); } -vuint16mf4_t test_vncvt_x_x_w_u16mf4(vuint32mf2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u16mf4(src, vl); +vuint16mf4_t test_vncvt_x_x_w_u16mf4(vuint32mf2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u16mf4(vs2, vl); } -vuint16mf2_t test_vncvt_x_x_w_u16mf2(vuint32m1_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u16mf2(src, vl); +vuint16mf2_t test_vncvt_x_x_w_u16mf2(vuint32m1_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u16mf2(vs2, vl); } -vuint16m1_t test_vncvt_x_x_w_u16m1(vuint32m2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u16m1(src, vl); +vuint16m1_t test_vncvt_x_x_w_u16m1(vuint32m2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u16m1(vs2, vl); } -vuint16m2_t test_vncvt_x_x_w_u16m2(vuint32m4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u16m2(src, vl); +vuint16m2_t test_vncvt_x_x_w_u16m2(vuint32m4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u16m2(vs2, vl); } -vuint16m4_t test_vncvt_x_x_w_u16m4(vuint32m8_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u16m4(src, vl); +vuint16m4_t test_vncvt_x_x_w_u16m4(vuint32m8_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u16m4(vs2, vl); } -vint32mf2_t test_vncvt_x_x_w_i32mf2(vint64m1_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i32mf2(src, vl); +vint32mf2_t test_vncvt_x_x_w_i32mf2(vint64m1_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i32mf2(vs2, vl); } -vint32m1_t test_vncvt_x_x_w_i32m1(vint64m2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i32m1(src, vl); +vint32m1_t test_vncvt_x_x_w_i32m1(vint64m2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i32m1(vs2, vl); } -vint32m2_t test_vncvt_x_x_w_i32m2(vint64m4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i32m2(src, vl); +vint32m2_t test_vncvt_x_x_w_i32m2(vint64m4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i32m2(vs2, vl); } -vint32m4_t test_vncvt_x_x_w_i32m4(vint64m8_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i32m4(src, vl); +vint32m4_t test_vncvt_x_x_w_i32m4(vint64m8_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i32m4(vs2, vl); } -vuint32mf2_t test_vncvt_x_x_w_u32mf2(vuint64m1_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u32mf2(src, vl); +vuint32mf2_t test_vncvt_x_x_w_u32mf2(vuint64m1_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u32mf2(vs2, vl); } -vuint32m1_t test_vncvt_x_x_w_u32m1(vuint64m2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u32m1(src, vl); +vuint32m1_t test_vncvt_x_x_w_u32m1(vuint64m2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u32m1(vs2, vl); } -vuint32m2_t test_vncvt_x_x_w_u32m2(vuint64m4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u32m2(src, vl); +vuint32m2_t test_vncvt_x_x_w_u32m2(vuint64m4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u32m2(vs2, vl); } -vuint32m4_t test_vncvt_x_x_w_u32m4(vuint64m8_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u32m4(src, vl); +vuint32m4_t test_vncvt_x_x_w_u32m4(vuint64m8_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u32m4(vs2, vl); } -vint8mf8_t test_vncvt_x_x_w_i8mf8_m(vbool64_t mask, vint16mf4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i8mf8_m(mask, src, vl); +vint8mf8_t test_vncvt_x_x_w_i8mf8_m(vbool64_t vm, vint16mf4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i8mf8_m(vm, vs2, vl); } -vint8mf4_t test_vncvt_x_x_w_i8mf4_m(vbool32_t mask, vint16mf2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i8mf4_m(mask, src, vl); +vint8mf4_t test_vncvt_x_x_w_i8mf4_m(vbool32_t vm, vint16mf2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i8mf4_m(vm, vs2, vl); } -vint8mf2_t test_vncvt_x_x_w_i8mf2_m(vbool16_t mask, vint16m1_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i8mf2_m(mask, src, vl); +vint8mf2_t test_vncvt_x_x_w_i8mf2_m(vbool16_t vm, vint16m1_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i8mf2_m(vm, vs2, vl); } -vint8m1_t test_vncvt_x_x_w_i8m1_m(vbool8_t mask, vint16m2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i8m1_m(mask, src, vl); +vint8m1_t test_vncvt_x_x_w_i8m1_m(vbool8_t vm, vint16m2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i8m1_m(vm, vs2, vl); } -vint8m2_t test_vncvt_x_x_w_i8m2_m(vbool4_t mask, vint16m4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i8m2_m(mask, src, vl); +vint8m2_t test_vncvt_x_x_w_i8m2_m(vbool4_t vm, vint16m4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i8m2_m(vm, vs2, vl); } -vint8m4_t test_vncvt_x_x_w_i8m4_m(vbool2_t mask, vint16m8_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i8m4_m(mask, src, vl); +vint8m4_t test_vncvt_x_x_w_i8m4_m(vbool2_t vm, vint16m8_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i8m4_m(vm, vs2, vl); } -vuint8mf8_t test_vncvt_x_x_w_u8mf8_m(vbool64_t mask, vuint16mf4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u8mf8_m(mask, src, vl); +vuint8mf8_t test_vncvt_x_x_w_u8mf8_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u8mf8_m(vm, vs2, vl); } -vuint8mf4_t test_vncvt_x_x_w_u8mf4_m(vbool32_t mask, vuint16mf2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u8mf4_m(mask, src, vl); +vuint8mf4_t test_vncvt_x_x_w_u8mf4_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u8mf4_m(vm, vs2, vl); } -vuint8mf2_t test_vncvt_x_x_w_u8mf2_m(vbool16_t mask, vuint16m1_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u8mf2_m(mask, src, vl); +vuint8mf2_t test_vncvt_x_x_w_u8mf2_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u8mf2_m(vm, vs2, vl); } -vuint8m1_t test_vncvt_x_x_w_u8m1_m(vbool8_t mask, vuint16m2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u8m1_m(mask, src, vl); +vuint8m1_t test_vncvt_x_x_w_u8m1_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u8m1_m(vm, vs2, vl); } -vuint8m2_t test_vncvt_x_x_w_u8m2_m(vbool4_t mask, vuint16m4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u8m2_m(mask, src, vl); +vuint8m2_t test_vncvt_x_x_w_u8m2_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u8m2_m(vm, vs2, vl); } -vuint8m4_t test_vncvt_x_x_w_u8m4_m(vbool2_t mask, vuint16m8_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u8m4_m(mask, src, vl); +vuint8m4_t test_vncvt_x_x_w_u8m4_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u8m4_m(vm, vs2, vl); } -vint16mf4_t test_vncvt_x_x_w_i16mf4_m(vbool64_t mask, vint32mf2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i16mf4_m(mask, src, vl); +vint16mf4_t test_vncvt_x_x_w_i16mf4_m(vbool64_t vm, vint32mf2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i16mf4_m(vm, vs2, vl); } -vint16mf2_t test_vncvt_x_x_w_i16mf2_m(vbool32_t mask, vint32m1_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i16mf2_m(mask, src, vl); +vint16mf2_t test_vncvt_x_x_w_i16mf2_m(vbool32_t vm, vint32m1_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i16mf2_m(vm, vs2, vl); } -vint16m1_t test_vncvt_x_x_w_i16m1_m(vbool16_t mask, vint32m2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i16m1_m(mask, src, vl); +vint16m1_t test_vncvt_x_x_w_i16m1_m(vbool16_t vm, vint32m2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i16m1_m(vm, vs2, vl); } -vint16m2_t test_vncvt_x_x_w_i16m2_m(vbool8_t mask, vint32m4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i16m2_m(mask, src, vl); +vint16m2_t test_vncvt_x_x_w_i16m2_m(vbool8_t vm, vint32m4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i16m2_m(vm, vs2, vl); } -vint16m4_t test_vncvt_x_x_w_i16m4_m(vbool4_t mask, vint32m8_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i16m4_m(mask, src, vl); +vint16m4_t test_vncvt_x_x_w_i16m4_m(vbool4_t vm, vint32m8_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i16m4_m(vm, vs2, vl); } -vuint16mf4_t test_vncvt_x_x_w_u16mf4_m(vbool64_t mask, vuint32mf2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u16mf4_m(mask, src, vl); +vuint16mf4_t test_vncvt_x_x_w_u16mf4_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u16mf4_m(vm, vs2, vl); } -vuint16mf2_t test_vncvt_x_x_w_u16mf2_m(vbool32_t mask, vuint32m1_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u16mf2_m(mask, src, vl); +vuint16mf2_t test_vncvt_x_x_w_u16mf2_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u16mf2_m(vm, vs2, vl); } -vuint16m1_t test_vncvt_x_x_w_u16m1_m(vbool16_t mask, vuint32m2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u16m1_m(mask, src, vl); +vuint16m1_t test_vncvt_x_x_w_u16m1_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u16m1_m(vm, vs2, vl); } -vuint16m2_t test_vncvt_x_x_w_u16m2_m(vbool8_t mask, vuint32m4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u16m2_m(mask, src, vl); +vuint16m2_t test_vncvt_x_x_w_u16m2_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u16m2_m(vm, vs2, vl); } -vuint16m4_t test_vncvt_x_x_w_u16m4_m(vbool4_t mask, vuint32m8_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u16m4_m(mask, src, vl); +vuint16m4_t test_vncvt_x_x_w_u16m4_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u16m4_m(vm, vs2, vl); } -vint32mf2_t test_vncvt_x_x_w_i32mf2_m(vbool64_t mask, vint64m1_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i32mf2_m(mask, src, vl); +vint32mf2_t test_vncvt_x_x_w_i32mf2_m(vbool64_t vm, vint64m1_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i32mf2_m(vm, vs2, vl); } -vint32m1_t test_vncvt_x_x_w_i32m1_m(vbool32_t mask, vint64m2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i32m1_m(mask, src, vl); +vint32m1_t test_vncvt_x_x_w_i32m1_m(vbool32_t vm, vint64m2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i32m1_m(vm, vs2, vl); } -vint32m2_t test_vncvt_x_x_w_i32m2_m(vbool16_t mask, vint64m4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i32m2_m(mask, src, vl); +vint32m2_t test_vncvt_x_x_w_i32m2_m(vbool16_t vm, vint64m4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i32m2_m(vm, vs2, vl); } -vint32m4_t test_vncvt_x_x_w_i32m4_m(vbool8_t mask, vint64m8_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i32m4_m(mask, src, vl); +vint32m4_t test_vncvt_x_x_w_i32m4_m(vbool8_t vm, vint64m8_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i32m4_m(vm, vs2, vl); } -vuint32mf2_t test_vncvt_x_x_w_u32mf2_m(vbool64_t mask, vuint64m1_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u32mf2_m(mask, src, vl); +vuint32mf2_t test_vncvt_x_x_w_u32mf2_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u32mf2_m(vm, vs2, vl); } -vuint32m1_t test_vncvt_x_x_w_u32m1_m(vbool32_t mask, vuint64m2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u32m1_m(mask, src, vl); +vuint32m1_t test_vncvt_x_x_w_u32m1_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u32m1_m(vm, vs2, vl); } -vuint32m2_t test_vncvt_x_x_w_u32m2_m(vbool16_t mask, vuint64m4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u32m2_m(mask, src, vl); +vuint32m2_t test_vncvt_x_x_w_u32m2_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u32m2_m(vm, vs2, vl); } -vuint32m4_t test_vncvt_x_x_w_u32m4_m(vbool8_t mask, vuint64m8_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u32m4_m(mask, src, vl); +vuint32m4_t test_vncvt_x_x_w_u32m4_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u32m4_m(vm, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vncvt\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-api-tests/vneg.c b/auto-generated/gnu-api-tests/vneg.c index e5f6f1411..423253faf 100644 --- a/auto-generated/gnu-api-tests/vneg.c +++ b/auto-generated/gnu-api-tests/vneg.c @@ -6,180 +6,180 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vneg_v_i8mf8(vint8mf8_t op1, size_t vl) { - return __riscv_vneg_v_i8mf8(op1, vl); +vint8mf8_t test_vneg_v_i8mf8(vint8mf8_t vs, size_t vl) { + return __riscv_vneg_v_i8mf8(vs, vl); } -vint8mf4_t test_vneg_v_i8mf4(vint8mf4_t op1, size_t vl) { - return __riscv_vneg_v_i8mf4(op1, vl); +vint8mf4_t test_vneg_v_i8mf4(vint8mf4_t vs, size_t vl) { + return __riscv_vneg_v_i8mf4(vs, vl); } -vint8mf2_t test_vneg_v_i8mf2(vint8mf2_t op1, size_t vl) { - return __riscv_vneg_v_i8mf2(op1, vl); +vint8mf2_t test_vneg_v_i8mf2(vint8mf2_t vs, size_t vl) { + return __riscv_vneg_v_i8mf2(vs, vl); } -vint8m1_t test_vneg_v_i8m1(vint8m1_t op1, size_t vl) { - return __riscv_vneg_v_i8m1(op1, vl); +vint8m1_t test_vneg_v_i8m1(vint8m1_t vs, size_t vl) { + return __riscv_vneg_v_i8m1(vs, vl); } -vint8m2_t test_vneg_v_i8m2(vint8m2_t op1, size_t vl) { - return __riscv_vneg_v_i8m2(op1, vl); +vint8m2_t test_vneg_v_i8m2(vint8m2_t vs, size_t vl) { + return __riscv_vneg_v_i8m2(vs, vl); } -vint8m4_t test_vneg_v_i8m4(vint8m4_t op1, size_t vl) { - return __riscv_vneg_v_i8m4(op1, vl); +vint8m4_t test_vneg_v_i8m4(vint8m4_t vs, size_t vl) { + return __riscv_vneg_v_i8m4(vs, vl); } -vint8m8_t test_vneg_v_i8m8(vint8m8_t op1, size_t vl) { - return __riscv_vneg_v_i8m8(op1, vl); +vint8m8_t test_vneg_v_i8m8(vint8m8_t vs, size_t vl) { + return __riscv_vneg_v_i8m8(vs, vl); } -vint16mf4_t test_vneg_v_i16mf4(vint16mf4_t op1, size_t vl) { - return __riscv_vneg_v_i16mf4(op1, vl); +vint16mf4_t test_vneg_v_i16mf4(vint16mf4_t vs, size_t vl) { + return __riscv_vneg_v_i16mf4(vs, vl); } -vint16mf2_t test_vneg_v_i16mf2(vint16mf2_t op1, size_t vl) { - return __riscv_vneg_v_i16mf2(op1, vl); +vint16mf2_t test_vneg_v_i16mf2(vint16mf2_t vs, size_t vl) { + return __riscv_vneg_v_i16mf2(vs, vl); } -vint16m1_t test_vneg_v_i16m1(vint16m1_t op1, size_t vl) { - return __riscv_vneg_v_i16m1(op1, vl); +vint16m1_t test_vneg_v_i16m1(vint16m1_t vs, size_t vl) { + return __riscv_vneg_v_i16m1(vs, vl); } -vint16m2_t test_vneg_v_i16m2(vint16m2_t op1, size_t vl) { - return __riscv_vneg_v_i16m2(op1, vl); +vint16m2_t test_vneg_v_i16m2(vint16m2_t vs, size_t vl) { + return __riscv_vneg_v_i16m2(vs, vl); } -vint16m4_t test_vneg_v_i16m4(vint16m4_t op1, size_t vl) { - return __riscv_vneg_v_i16m4(op1, vl); +vint16m4_t test_vneg_v_i16m4(vint16m4_t vs, size_t vl) { + return __riscv_vneg_v_i16m4(vs, vl); } -vint16m8_t test_vneg_v_i16m8(vint16m8_t op1, size_t vl) { - return __riscv_vneg_v_i16m8(op1, vl); +vint16m8_t test_vneg_v_i16m8(vint16m8_t vs, size_t vl) { + return __riscv_vneg_v_i16m8(vs, vl); } -vint32mf2_t test_vneg_v_i32mf2(vint32mf2_t op1, size_t vl) { - return __riscv_vneg_v_i32mf2(op1, vl); +vint32mf2_t test_vneg_v_i32mf2(vint32mf2_t vs, size_t vl) { + return __riscv_vneg_v_i32mf2(vs, vl); } -vint32m1_t test_vneg_v_i32m1(vint32m1_t op1, size_t vl) { - return __riscv_vneg_v_i32m1(op1, vl); +vint32m1_t test_vneg_v_i32m1(vint32m1_t vs, size_t vl) { + return __riscv_vneg_v_i32m1(vs, vl); } -vint32m2_t test_vneg_v_i32m2(vint32m2_t op1, size_t vl) { - return __riscv_vneg_v_i32m2(op1, vl); +vint32m2_t test_vneg_v_i32m2(vint32m2_t vs, size_t vl) { + return __riscv_vneg_v_i32m2(vs, vl); } -vint32m4_t test_vneg_v_i32m4(vint32m4_t op1, size_t vl) { - return __riscv_vneg_v_i32m4(op1, vl); +vint32m4_t test_vneg_v_i32m4(vint32m4_t vs, size_t vl) { + return __riscv_vneg_v_i32m4(vs, vl); } -vint32m8_t test_vneg_v_i32m8(vint32m8_t op1, size_t vl) { - return __riscv_vneg_v_i32m8(op1, vl); +vint32m8_t test_vneg_v_i32m8(vint32m8_t vs, size_t vl) { + return __riscv_vneg_v_i32m8(vs, vl); } -vint64m1_t test_vneg_v_i64m1(vint64m1_t op1, size_t vl) { - return __riscv_vneg_v_i64m1(op1, vl); +vint64m1_t test_vneg_v_i64m1(vint64m1_t vs, size_t vl) { + return __riscv_vneg_v_i64m1(vs, vl); } -vint64m2_t test_vneg_v_i64m2(vint64m2_t op1, size_t vl) { - return __riscv_vneg_v_i64m2(op1, vl); +vint64m2_t test_vneg_v_i64m2(vint64m2_t vs, size_t vl) { + return __riscv_vneg_v_i64m2(vs, vl); } -vint64m4_t test_vneg_v_i64m4(vint64m4_t op1, size_t vl) { - return __riscv_vneg_v_i64m4(op1, vl); +vint64m4_t test_vneg_v_i64m4(vint64m4_t vs, size_t vl) { + return __riscv_vneg_v_i64m4(vs, vl); } -vint64m8_t test_vneg_v_i64m8(vint64m8_t op1, size_t vl) { - return __riscv_vneg_v_i64m8(op1, vl); +vint64m8_t test_vneg_v_i64m8(vint64m8_t vs, size_t vl) { + return __riscv_vneg_v_i64m8(vs, vl); } -vint8mf8_t test_vneg_v_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t vl) { - return __riscv_vneg_v_i8mf8_m(mask, op1, vl); +vint8mf8_t test_vneg_v_i8mf8_m(vbool64_t vm, vint8mf8_t vs, size_t vl) { + return __riscv_vneg_v_i8mf8_m(vm, vs, vl); } -vint8mf4_t test_vneg_v_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t vl) { - return __riscv_vneg_v_i8mf4_m(mask, op1, vl); +vint8mf4_t test_vneg_v_i8mf4_m(vbool32_t vm, vint8mf4_t vs, size_t vl) { + return __riscv_vneg_v_i8mf4_m(vm, vs, vl); } -vint8mf2_t test_vneg_v_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t vl) { - return __riscv_vneg_v_i8mf2_m(mask, op1, vl); +vint8mf2_t test_vneg_v_i8mf2_m(vbool16_t vm, vint8mf2_t vs, size_t vl) { + return __riscv_vneg_v_i8mf2_m(vm, vs, vl); } -vint8m1_t test_vneg_v_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t vl) { - return __riscv_vneg_v_i8m1_m(mask, op1, vl); +vint8m1_t test_vneg_v_i8m1_m(vbool8_t vm, vint8m1_t vs, size_t vl) { + return __riscv_vneg_v_i8m1_m(vm, vs, vl); } -vint8m2_t test_vneg_v_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t vl) { - return __riscv_vneg_v_i8m2_m(mask, op1, vl); +vint8m2_t test_vneg_v_i8m2_m(vbool4_t vm, vint8m2_t vs, size_t vl) { + return __riscv_vneg_v_i8m2_m(vm, vs, vl); } -vint8m4_t test_vneg_v_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t vl) { - return __riscv_vneg_v_i8m4_m(mask, op1, vl); +vint8m4_t test_vneg_v_i8m4_m(vbool2_t vm, vint8m4_t vs, size_t vl) { + return __riscv_vneg_v_i8m4_m(vm, vs, vl); } -vint8m8_t test_vneg_v_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t vl) { - return __riscv_vneg_v_i8m8_m(mask, op1, vl); +vint8m8_t test_vneg_v_i8m8_m(vbool1_t vm, vint8m8_t vs, size_t vl) { + return __riscv_vneg_v_i8m8_m(vm, vs, vl); } -vint16mf4_t test_vneg_v_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t vl) { - return __riscv_vneg_v_i16mf4_m(mask, op1, vl); +vint16mf4_t test_vneg_v_i16mf4_m(vbool64_t vm, vint16mf4_t vs, size_t vl) { + return __riscv_vneg_v_i16mf4_m(vm, vs, vl); } -vint16mf2_t test_vneg_v_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t vl) { - return __riscv_vneg_v_i16mf2_m(mask, op1, vl); +vint16mf2_t test_vneg_v_i16mf2_m(vbool32_t vm, vint16mf2_t vs, size_t vl) { + return __riscv_vneg_v_i16mf2_m(vm, vs, vl); } -vint16m1_t test_vneg_v_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t vl) { - return __riscv_vneg_v_i16m1_m(mask, op1, vl); +vint16m1_t test_vneg_v_i16m1_m(vbool16_t vm, vint16m1_t vs, size_t vl) { + return __riscv_vneg_v_i16m1_m(vm, vs, vl); } -vint16m2_t test_vneg_v_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t vl) { - return __riscv_vneg_v_i16m2_m(mask, op1, vl); +vint16m2_t test_vneg_v_i16m2_m(vbool8_t vm, vint16m2_t vs, size_t vl) { + return __riscv_vneg_v_i16m2_m(vm, vs, vl); } -vint16m4_t test_vneg_v_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t vl) { - return __riscv_vneg_v_i16m4_m(mask, op1, vl); +vint16m4_t test_vneg_v_i16m4_m(vbool4_t vm, vint16m4_t vs, size_t vl) { + return __riscv_vneg_v_i16m4_m(vm, vs, vl); } -vint16m8_t test_vneg_v_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t vl) { - return __riscv_vneg_v_i16m8_m(mask, op1, vl); +vint16m8_t test_vneg_v_i16m8_m(vbool2_t vm, vint16m8_t vs, size_t vl) { + return __riscv_vneg_v_i16m8_m(vm, vs, vl); } -vint32mf2_t test_vneg_v_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t vl) { - return __riscv_vneg_v_i32mf2_m(mask, op1, vl); +vint32mf2_t test_vneg_v_i32mf2_m(vbool64_t vm, vint32mf2_t vs, size_t vl) { + return __riscv_vneg_v_i32mf2_m(vm, vs, vl); } -vint32m1_t test_vneg_v_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t vl) { - return __riscv_vneg_v_i32m1_m(mask, op1, vl); +vint32m1_t test_vneg_v_i32m1_m(vbool32_t vm, vint32m1_t vs, size_t vl) { + return __riscv_vneg_v_i32m1_m(vm, vs, vl); } -vint32m2_t test_vneg_v_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t vl) { - return __riscv_vneg_v_i32m2_m(mask, op1, vl); +vint32m2_t test_vneg_v_i32m2_m(vbool16_t vm, vint32m2_t vs, size_t vl) { + return __riscv_vneg_v_i32m2_m(vm, vs, vl); } -vint32m4_t test_vneg_v_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t vl) { - return __riscv_vneg_v_i32m4_m(mask, op1, vl); +vint32m4_t test_vneg_v_i32m4_m(vbool8_t vm, vint32m4_t vs, size_t vl) { + return __riscv_vneg_v_i32m4_m(vm, vs, vl); } -vint32m8_t test_vneg_v_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t vl) { - return __riscv_vneg_v_i32m8_m(mask, op1, vl); +vint32m8_t test_vneg_v_i32m8_m(vbool4_t vm, vint32m8_t vs, size_t vl) { + return __riscv_vneg_v_i32m8_m(vm, vs, vl); } -vint64m1_t test_vneg_v_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t vl) { - return __riscv_vneg_v_i64m1_m(mask, op1, vl); +vint64m1_t test_vneg_v_i64m1_m(vbool64_t vm, vint64m1_t vs, size_t vl) { + return __riscv_vneg_v_i64m1_m(vm, vs, vl); } -vint64m2_t test_vneg_v_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t vl) { - return __riscv_vneg_v_i64m2_m(mask, op1, vl); +vint64m2_t test_vneg_v_i64m2_m(vbool32_t vm, vint64m2_t vs, size_t vl) { + return __riscv_vneg_v_i64m2_m(vm, vs, vl); } -vint64m4_t test_vneg_v_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t vl) { - return __riscv_vneg_v_i64m4_m(mask, op1, vl); +vint64m4_t test_vneg_v_i64m4_m(vbool16_t vm, vint64m4_t vs, size_t vl) { + return __riscv_vneg_v_i64m4_m(vm, vs, vl); } -vint64m8_t test_vneg_v_i64m8_m(vbool8_t mask, vint64m8_t op1, size_t vl) { - return __riscv_vneg_v_i64m8_m(mask, op1, vl); +vint64m8_t test_vneg_v_i64m8_m(vbool8_t vm, vint64m8_t vs, size_t vl) { + return __riscv_vneg_v_i64m8_m(vm, vs, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vneg\.[ivxfswum.]+\s+} 44 } } */ diff --git a/auto-generated/gnu-api-tests/vnmsac.c b/auto-generated/gnu-api-tests/vnmsac.c index 5bf88c188..db11ab958 100644 --- a/auto-generated/gnu-api-tests/vnmsac.c +++ b/auto-generated/gnu-api-tests/vnmsac.c @@ -358,356 +358,356 @@ vuint64m8_t test_vnmsac_vx_u64m8(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, return __riscv_vnmsac_vx_u64m8(vd, rs1, vs2, vl); } -vint8mf8_t test_vnmsac_vv_i8mf8_m(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i8mf8_m(mask, vd, vs1, vs2, vl); +vint8mf8_t test_vnmsac_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i8mf8_m(vm, vd, vs1, vs2, vl); } -vint8mf8_t test_vnmsac_vx_i8mf8_m(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i8mf8_m(mask, vd, rs1, vs2, vl); +vint8mf8_t test_vnmsac_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i8mf8_m(vm, vd, rs1, vs2, vl); } -vint8mf4_t test_vnmsac_vv_i8mf4_m(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i8mf4_m(mask, vd, vs1, vs2, vl); +vint8mf4_t test_vnmsac_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i8mf4_m(vm, vd, vs1, vs2, vl); } -vint8mf4_t test_vnmsac_vx_i8mf4_m(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i8mf4_m(mask, vd, rs1, vs2, vl); +vint8mf4_t test_vnmsac_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i8mf4_m(vm, vd, rs1, vs2, vl); } -vint8mf2_t test_vnmsac_vv_i8mf2_m(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i8mf2_m(mask, vd, vs1, vs2, vl); +vint8mf2_t test_vnmsac_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i8mf2_m(vm, vd, vs1, vs2, vl); } -vint8mf2_t test_vnmsac_vx_i8mf2_m(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i8mf2_m(mask, vd, rs1, vs2, vl); +vint8mf2_t test_vnmsac_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i8mf2_m(vm, vd, rs1, vs2, vl); } -vint8m1_t test_vnmsac_vv_i8m1_m(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i8m1_m(mask, vd, vs1, vs2, vl); +vint8m1_t test_vnmsac_vv_i8m1_m(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i8m1_m(vm, vd, vs1, vs2, vl); } -vint8m1_t test_vnmsac_vx_i8m1_m(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i8m1_m(mask, vd, rs1, vs2, vl); +vint8m1_t test_vnmsac_vx_i8m1_m(vbool8_t vm, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i8m1_m(vm, vd, rs1, vs2, vl); } -vint8m2_t test_vnmsac_vv_i8m2_m(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i8m2_m(mask, vd, vs1, vs2, vl); +vint8m2_t test_vnmsac_vv_i8m2_m(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i8m2_m(vm, vd, vs1, vs2, vl); } -vint8m2_t test_vnmsac_vx_i8m2_m(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i8m2_m(mask, vd, rs1, vs2, vl); +vint8m2_t test_vnmsac_vx_i8m2_m(vbool4_t vm, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i8m2_m(vm, vd, rs1, vs2, vl); } -vint8m4_t test_vnmsac_vv_i8m4_m(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i8m4_m(mask, vd, vs1, vs2, vl); +vint8m4_t test_vnmsac_vv_i8m4_m(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i8m4_m(vm, vd, vs1, vs2, vl); } -vint8m4_t test_vnmsac_vx_i8m4_m(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i8m4_m(mask, vd, rs1, vs2, vl); +vint8m4_t test_vnmsac_vx_i8m4_m(vbool2_t vm, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i8m4_m(vm, vd, rs1, vs2, vl); } -vint8m8_t test_vnmsac_vv_i8m8_m(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i8m8_m(mask, vd, vs1, vs2, vl); +vint8m8_t test_vnmsac_vv_i8m8_m(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i8m8_m(vm, vd, vs1, vs2, vl); } -vint8m8_t test_vnmsac_vx_i8m8_m(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i8m8_m(mask, vd, rs1, vs2, vl); +vint8m8_t test_vnmsac_vx_i8m8_m(vbool1_t vm, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i8m8_m(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vnmsac_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i16mf4_m(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vnmsac_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i16mf4_m(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vnmsac_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i16mf4_m(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vnmsac_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i16mf4_m(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vnmsac_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i16mf2_m(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vnmsac_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i16mf2_m(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vnmsac_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i16mf2_m(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vnmsac_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i16mf2_m(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vnmsac_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i16m1_m(mask, vd, vs1, vs2, vl); +vint16m1_t test_vnmsac_vv_i16m1_m(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i16m1_m(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vnmsac_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i16m1_m(mask, vd, rs1, vs2, vl); +vint16m1_t test_vnmsac_vx_i16m1_m(vbool16_t vm, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i16m1_m(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vnmsac_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i16m2_m(mask, vd, vs1, vs2, vl); +vint16m2_t test_vnmsac_vv_i16m2_m(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i16m2_m(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vnmsac_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i16m2_m(mask, vd, rs1, vs2, vl); +vint16m2_t test_vnmsac_vx_i16m2_m(vbool8_t vm, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i16m2_m(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vnmsac_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i16m4_m(mask, vd, vs1, vs2, vl); +vint16m4_t test_vnmsac_vv_i16m4_m(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i16m4_m(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vnmsac_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i16m4_m(mask, vd, rs1, vs2, vl); +vint16m4_t test_vnmsac_vx_i16m4_m(vbool4_t vm, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i16m4_m(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vnmsac_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i16m8_m(mask, vd, vs1, vs2, vl); +vint16m8_t test_vnmsac_vv_i16m8_m(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i16m8_m(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vnmsac_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i16m8_m(mask, vd, rs1, vs2, vl); +vint16m8_t test_vnmsac_vx_i16m8_m(vbool2_t vm, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i16m8_m(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vnmsac_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i32mf2_m(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vnmsac_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i32mf2_m(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vnmsac_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i32mf2_m(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vnmsac_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i32mf2_m(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vnmsac_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i32m1_m(mask, vd, vs1, vs2, vl); +vint32m1_t test_vnmsac_vv_i32m1_m(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i32m1_m(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vnmsac_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i32m1_m(mask, vd, rs1, vs2, vl); +vint32m1_t test_vnmsac_vx_i32m1_m(vbool32_t vm, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i32m1_m(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vnmsac_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i32m2_m(mask, vd, vs1, vs2, vl); +vint32m2_t test_vnmsac_vv_i32m2_m(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i32m2_m(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vnmsac_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i32m2_m(mask, vd, rs1, vs2, vl); +vint32m2_t test_vnmsac_vx_i32m2_m(vbool16_t vm, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i32m2_m(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vnmsac_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i32m4_m(mask, vd, vs1, vs2, vl); +vint32m4_t test_vnmsac_vv_i32m4_m(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i32m4_m(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vnmsac_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i32m4_m(mask, vd, rs1, vs2, vl); +vint32m4_t test_vnmsac_vx_i32m4_m(vbool8_t vm, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i32m4_m(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vnmsac_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i32m8_m(mask, vd, vs1, vs2, vl); +vint32m8_t test_vnmsac_vv_i32m8_m(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i32m8_m(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vnmsac_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i32m8_m(mask, vd, rs1, vs2, vl); +vint32m8_t test_vnmsac_vx_i32m8_m(vbool4_t vm, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i32m8_m(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vnmsac_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i64m1_m(mask, vd, vs1, vs2, vl); +vint64m1_t test_vnmsac_vv_i64m1_m(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i64m1_m(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vnmsac_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i64m1_m(mask, vd, rs1, vs2, vl); +vint64m1_t test_vnmsac_vx_i64m1_m(vbool64_t vm, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i64m1_m(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vnmsac_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i64m2_m(mask, vd, vs1, vs2, vl); +vint64m2_t test_vnmsac_vv_i64m2_m(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i64m2_m(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vnmsac_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i64m2_m(mask, vd, rs1, vs2, vl); +vint64m2_t test_vnmsac_vx_i64m2_m(vbool32_t vm, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i64m2_m(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vnmsac_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i64m4_m(mask, vd, vs1, vs2, vl); +vint64m4_t test_vnmsac_vv_i64m4_m(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i64m4_m(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vnmsac_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i64m4_m(mask, vd, rs1, vs2, vl); +vint64m4_t test_vnmsac_vx_i64m4_m(vbool16_t vm, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i64m4_m(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vnmsac_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i64m8_m(mask, vd, vs1, vs2, vl); +vint64m8_t test_vnmsac_vv_i64m8_m(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i64m8_m(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vnmsac_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i64m8_m(mask, vd, rs1, vs2, vl); +vint64m8_t test_vnmsac_vx_i64m8_m(vbool8_t vm, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i64m8_m(vm, vd, rs1, vs2, vl); } -vuint8mf8_t test_vnmsac_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u8mf8_m(mask, vd, vs1, vs2, vl); +vuint8mf8_t test_vnmsac_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u8mf8_m(vm, vd, vs1, vs2, vl); } -vuint8mf8_t test_vnmsac_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u8mf8_m(mask, vd, rs1, vs2, vl); +vuint8mf8_t test_vnmsac_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u8mf8_m(vm, vd, rs1, vs2, vl); } -vuint8mf4_t test_vnmsac_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u8mf4_m(mask, vd, vs1, vs2, vl); +vuint8mf4_t test_vnmsac_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u8mf4_m(vm, vd, vs1, vs2, vl); } -vuint8mf4_t test_vnmsac_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u8mf4_m(mask, vd, rs1, vs2, vl); +vuint8mf4_t test_vnmsac_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u8mf4_m(vm, vd, rs1, vs2, vl); } -vuint8mf2_t test_vnmsac_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u8mf2_m(mask, vd, vs1, vs2, vl); +vuint8mf2_t test_vnmsac_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u8mf2_m(vm, vd, vs1, vs2, vl); } -vuint8mf2_t test_vnmsac_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u8mf2_m(mask, vd, rs1, vs2, vl); +vuint8mf2_t test_vnmsac_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u8mf2_m(vm, vd, rs1, vs2, vl); } -vuint8m1_t test_vnmsac_vv_u8m1_m(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u8m1_m(mask, vd, vs1, vs2, vl); +vuint8m1_t test_vnmsac_vv_u8m1_m(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u8m1_m(vm, vd, vs1, vs2, vl); } -vuint8m1_t test_vnmsac_vx_u8m1_m(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u8m1_m(mask, vd, rs1, vs2, vl); +vuint8m1_t test_vnmsac_vx_u8m1_m(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u8m1_m(vm, vd, rs1, vs2, vl); } -vuint8m2_t test_vnmsac_vv_u8m2_m(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u8m2_m(mask, vd, vs1, vs2, vl); +vuint8m2_t test_vnmsac_vv_u8m2_m(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u8m2_m(vm, vd, vs1, vs2, vl); } -vuint8m2_t test_vnmsac_vx_u8m2_m(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u8m2_m(mask, vd, rs1, vs2, vl); +vuint8m2_t test_vnmsac_vx_u8m2_m(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u8m2_m(vm, vd, rs1, vs2, vl); } -vuint8m4_t test_vnmsac_vv_u8m4_m(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u8m4_m(mask, vd, vs1, vs2, vl); +vuint8m4_t test_vnmsac_vv_u8m4_m(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u8m4_m(vm, vd, vs1, vs2, vl); } -vuint8m4_t test_vnmsac_vx_u8m4_m(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u8m4_m(mask, vd, rs1, vs2, vl); +vuint8m4_t test_vnmsac_vx_u8m4_m(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u8m4_m(vm, vd, rs1, vs2, vl); } -vuint8m8_t test_vnmsac_vv_u8m8_m(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u8m8_m(mask, vd, vs1, vs2, vl); +vuint8m8_t test_vnmsac_vv_u8m8_m(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u8m8_m(vm, vd, vs1, vs2, vl); } -vuint8m8_t test_vnmsac_vx_u8m8_m(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u8m8_m(mask, vd, rs1, vs2, vl); +vuint8m8_t test_vnmsac_vx_u8m8_m(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u8m8_m(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vnmsac_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u16mf4_m(mask, vd, vs1, vs2, vl); +vuint16mf4_t test_vnmsac_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u16mf4_m(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vnmsac_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u16mf4_m(mask, vd, rs1, vs2, vl); +vuint16mf4_t test_vnmsac_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u16mf4_m(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vnmsac_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u16mf2_m(mask, vd, vs1, vs2, vl); +vuint16mf2_t test_vnmsac_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u16mf2_m(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vnmsac_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u16mf2_m(mask, vd, rs1, vs2, vl); +vuint16mf2_t test_vnmsac_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u16mf2_m(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vnmsac_vv_u16m1_m(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u16m1_m(mask, vd, vs1, vs2, vl); +vuint16m1_t test_vnmsac_vv_u16m1_m(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u16m1_m(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vnmsac_vx_u16m1_m(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u16m1_m(mask, vd, rs1, vs2, vl); +vuint16m1_t test_vnmsac_vx_u16m1_m(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u16m1_m(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vnmsac_vv_u16m2_m(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u16m2_m(mask, vd, vs1, vs2, vl); +vuint16m2_t test_vnmsac_vv_u16m2_m(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u16m2_m(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vnmsac_vx_u16m2_m(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u16m2_m(mask, vd, rs1, vs2, vl); +vuint16m2_t test_vnmsac_vx_u16m2_m(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u16m2_m(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vnmsac_vv_u16m4_m(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u16m4_m(mask, vd, vs1, vs2, vl); +vuint16m4_t test_vnmsac_vv_u16m4_m(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u16m4_m(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vnmsac_vx_u16m4_m(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u16m4_m(mask, vd, rs1, vs2, vl); +vuint16m4_t test_vnmsac_vx_u16m4_m(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u16m4_m(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vnmsac_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u16m8_m(mask, vd, vs1, vs2, vl); +vuint16m8_t test_vnmsac_vv_u16m8_m(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u16m8_m(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vnmsac_vx_u16m8_m(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u16m8_m(mask, vd, rs1, vs2, vl); +vuint16m8_t test_vnmsac_vx_u16m8_m(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u16m8_m(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vnmsac_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u32mf2_m(mask, vd, vs1, vs2, vl); +vuint32mf2_t test_vnmsac_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u32mf2_m(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vnmsac_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u32mf2_m(mask, vd, rs1, vs2, vl); +vuint32mf2_t test_vnmsac_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u32mf2_m(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vnmsac_vv_u32m1_m(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u32m1_m(mask, vd, vs1, vs2, vl); +vuint32m1_t test_vnmsac_vv_u32m1_m(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u32m1_m(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vnmsac_vx_u32m1_m(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u32m1_m(mask, vd, rs1, vs2, vl); +vuint32m1_t test_vnmsac_vx_u32m1_m(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u32m1_m(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vnmsac_vv_u32m2_m(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u32m2_m(mask, vd, vs1, vs2, vl); +vuint32m2_t test_vnmsac_vv_u32m2_m(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u32m2_m(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vnmsac_vx_u32m2_m(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u32m2_m(mask, vd, rs1, vs2, vl); +vuint32m2_t test_vnmsac_vx_u32m2_m(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u32m2_m(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vnmsac_vv_u32m4_m(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u32m4_m(mask, vd, vs1, vs2, vl); +vuint32m4_t test_vnmsac_vv_u32m4_m(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u32m4_m(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vnmsac_vx_u32m4_m(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u32m4_m(mask, vd, rs1, vs2, vl); +vuint32m4_t test_vnmsac_vx_u32m4_m(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u32m4_m(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vnmsac_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u32m8_m(mask, vd, vs1, vs2, vl); +vuint32m8_t test_vnmsac_vv_u32m8_m(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u32m8_m(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vnmsac_vx_u32m8_m(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u32m8_m(mask, vd, rs1, vs2, vl); +vuint32m8_t test_vnmsac_vx_u32m8_m(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u32m8_m(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vnmsac_vv_u64m1_m(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u64m1_m(mask, vd, vs1, vs2, vl); +vuint64m1_t test_vnmsac_vv_u64m1_m(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u64m1_m(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vnmsac_vx_u64m1_m(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u64m1_m(mask, vd, rs1, vs2, vl); +vuint64m1_t test_vnmsac_vx_u64m1_m(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u64m1_m(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vnmsac_vv_u64m2_m(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u64m2_m(mask, vd, vs1, vs2, vl); +vuint64m2_t test_vnmsac_vv_u64m2_m(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u64m2_m(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vnmsac_vx_u64m2_m(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u64m2_m(mask, vd, rs1, vs2, vl); +vuint64m2_t test_vnmsac_vx_u64m2_m(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u64m2_m(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vnmsac_vv_u64m4_m(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u64m4_m(mask, vd, vs1, vs2, vl); +vuint64m4_t test_vnmsac_vv_u64m4_m(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u64m4_m(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vnmsac_vx_u64m4_m(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u64m4_m(mask, vd, rs1, vs2, vl); +vuint64m4_t test_vnmsac_vx_u64m4_m(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u64m4_m(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vnmsac_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u64m8_m(mask, vd, vs1, vs2, vl); +vuint64m8_t test_vnmsac_vv_u64m8_m(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u64m8_m(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vnmsac_vx_u64m8_m(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u64m8_m(mask, vd, rs1, vs2, vl); +vuint64m8_t test_vnmsac_vx_u64m8_m(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u64m8_m(vm, vd, rs1, vs2, vl); } /* { dg-final { scan-assembler-times {vnms[acub]+\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/gnu-api-tests/vnmsub.c b/auto-generated/gnu-api-tests/vnmsub.c index e2e3a64f7..6e862e054 100644 --- a/auto-generated/gnu-api-tests/vnmsub.c +++ b/auto-generated/gnu-api-tests/vnmsub.c @@ -358,356 +358,356 @@ vuint64m8_t test_vnmsub_vx_u64m8(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, return __riscv_vnmsub_vx_u64m8(vd, rs1, vs2, vl); } -vint8mf8_t test_vnmsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i8mf8_m(mask, vd, vs1, vs2, vl); +vint8mf8_t test_vnmsub_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i8mf8_m(vm, vd, vs1, vs2, vl); } -vint8mf8_t test_vnmsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i8mf8_m(mask, vd, rs1, vs2, vl); +vint8mf8_t test_vnmsub_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i8mf8_m(vm, vd, rs1, vs2, vl); } -vint8mf4_t test_vnmsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i8mf4_m(mask, vd, vs1, vs2, vl); +vint8mf4_t test_vnmsub_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i8mf4_m(vm, vd, vs1, vs2, vl); } -vint8mf4_t test_vnmsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i8mf4_m(mask, vd, rs1, vs2, vl); +vint8mf4_t test_vnmsub_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i8mf4_m(vm, vd, rs1, vs2, vl); } -vint8mf2_t test_vnmsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i8mf2_m(mask, vd, vs1, vs2, vl); +vint8mf2_t test_vnmsub_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i8mf2_m(vm, vd, vs1, vs2, vl); } -vint8mf2_t test_vnmsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i8mf2_m(mask, vd, rs1, vs2, vl); +vint8mf2_t test_vnmsub_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i8mf2_m(vm, vd, rs1, vs2, vl); } -vint8m1_t test_vnmsub_vv_i8m1_m(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i8m1_m(mask, vd, vs1, vs2, vl); +vint8m1_t test_vnmsub_vv_i8m1_m(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i8m1_m(vm, vd, vs1, vs2, vl); } -vint8m1_t test_vnmsub_vx_i8m1_m(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i8m1_m(mask, vd, rs1, vs2, vl); +vint8m1_t test_vnmsub_vx_i8m1_m(vbool8_t vm, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i8m1_m(vm, vd, rs1, vs2, vl); } -vint8m2_t test_vnmsub_vv_i8m2_m(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i8m2_m(mask, vd, vs1, vs2, vl); +vint8m2_t test_vnmsub_vv_i8m2_m(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i8m2_m(vm, vd, vs1, vs2, vl); } -vint8m2_t test_vnmsub_vx_i8m2_m(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i8m2_m(mask, vd, rs1, vs2, vl); +vint8m2_t test_vnmsub_vx_i8m2_m(vbool4_t vm, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i8m2_m(vm, vd, rs1, vs2, vl); } -vint8m4_t test_vnmsub_vv_i8m4_m(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i8m4_m(mask, vd, vs1, vs2, vl); +vint8m4_t test_vnmsub_vv_i8m4_m(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i8m4_m(vm, vd, vs1, vs2, vl); } -vint8m4_t test_vnmsub_vx_i8m4_m(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i8m4_m(mask, vd, rs1, vs2, vl); +vint8m4_t test_vnmsub_vx_i8m4_m(vbool2_t vm, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i8m4_m(vm, vd, rs1, vs2, vl); } -vint8m8_t test_vnmsub_vv_i8m8_m(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i8m8_m(mask, vd, vs1, vs2, vl); +vint8m8_t test_vnmsub_vv_i8m8_m(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i8m8_m(vm, vd, vs1, vs2, vl); } -vint8m8_t test_vnmsub_vx_i8m8_m(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i8m8_m(mask, vd, rs1, vs2, vl); +vint8m8_t test_vnmsub_vx_i8m8_m(vbool1_t vm, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i8m8_m(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vnmsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i16mf4_m(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vnmsub_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i16mf4_m(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vnmsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i16mf4_m(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vnmsub_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i16mf4_m(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vnmsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i16mf2_m(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vnmsub_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i16mf2_m(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vnmsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i16mf2_m(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vnmsub_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i16mf2_m(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vnmsub_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i16m1_m(mask, vd, vs1, vs2, vl); +vint16m1_t test_vnmsub_vv_i16m1_m(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i16m1_m(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vnmsub_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i16m1_m(mask, vd, rs1, vs2, vl); +vint16m1_t test_vnmsub_vx_i16m1_m(vbool16_t vm, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i16m1_m(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vnmsub_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i16m2_m(mask, vd, vs1, vs2, vl); +vint16m2_t test_vnmsub_vv_i16m2_m(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i16m2_m(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vnmsub_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i16m2_m(mask, vd, rs1, vs2, vl); +vint16m2_t test_vnmsub_vx_i16m2_m(vbool8_t vm, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i16m2_m(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vnmsub_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i16m4_m(mask, vd, vs1, vs2, vl); +vint16m4_t test_vnmsub_vv_i16m4_m(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i16m4_m(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vnmsub_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i16m4_m(mask, vd, rs1, vs2, vl); +vint16m4_t test_vnmsub_vx_i16m4_m(vbool4_t vm, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i16m4_m(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vnmsub_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i16m8_m(mask, vd, vs1, vs2, vl); +vint16m8_t test_vnmsub_vv_i16m8_m(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i16m8_m(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vnmsub_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i16m8_m(mask, vd, rs1, vs2, vl); +vint16m8_t test_vnmsub_vx_i16m8_m(vbool2_t vm, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i16m8_m(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vnmsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i32mf2_m(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vnmsub_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i32mf2_m(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vnmsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i32mf2_m(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vnmsub_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i32mf2_m(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vnmsub_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i32m1_m(mask, vd, vs1, vs2, vl); +vint32m1_t test_vnmsub_vv_i32m1_m(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i32m1_m(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vnmsub_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i32m1_m(mask, vd, rs1, vs2, vl); +vint32m1_t test_vnmsub_vx_i32m1_m(vbool32_t vm, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i32m1_m(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vnmsub_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i32m2_m(mask, vd, vs1, vs2, vl); +vint32m2_t test_vnmsub_vv_i32m2_m(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i32m2_m(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vnmsub_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i32m2_m(mask, vd, rs1, vs2, vl); +vint32m2_t test_vnmsub_vx_i32m2_m(vbool16_t vm, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i32m2_m(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vnmsub_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i32m4_m(mask, vd, vs1, vs2, vl); +vint32m4_t test_vnmsub_vv_i32m4_m(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i32m4_m(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vnmsub_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i32m4_m(mask, vd, rs1, vs2, vl); +vint32m4_t test_vnmsub_vx_i32m4_m(vbool8_t vm, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i32m4_m(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vnmsub_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i32m8_m(mask, vd, vs1, vs2, vl); +vint32m8_t test_vnmsub_vv_i32m8_m(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i32m8_m(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vnmsub_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i32m8_m(mask, vd, rs1, vs2, vl); +vint32m8_t test_vnmsub_vx_i32m8_m(vbool4_t vm, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i32m8_m(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vnmsub_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i64m1_m(mask, vd, vs1, vs2, vl); +vint64m1_t test_vnmsub_vv_i64m1_m(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i64m1_m(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vnmsub_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i64m1_m(mask, vd, rs1, vs2, vl); +vint64m1_t test_vnmsub_vx_i64m1_m(vbool64_t vm, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i64m1_m(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vnmsub_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i64m2_m(mask, vd, vs1, vs2, vl); +vint64m2_t test_vnmsub_vv_i64m2_m(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i64m2_m(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vnmsub_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i64m2_m(mask, vd, rs1, vs2, vl); +vint64m2_t test_vnmsub_vx_i64m2_m(vbool32_t vm, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i64m2_m(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vnmsub_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i64m4_m(mask, vd, vs1, vs2, vl); +vint64m4_t test_vnmsub_vv_i64m4_m(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i64m4_m(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vnmsub_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i64m4_m(mask, vd, rs1, vs2, vl); +vint64m4_t test_vnmsub_vx_i64m4_m(vbool16_t vm, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i64m4_m(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vnmsub_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i64m8_m(mask, vd, vs1, vs2, vl); +vint64m8_t test_vnmsub_vv_i64m8_m(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i64m8_m(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vnmsub_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i64m8_m(mask, vd, rs1, vs2, vl); +vint64m8_t test_vnmsub_vx_i64m8_m(vbool8_t vm, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i64m8_m(vm, vd, rs1, vs2, vl); } -vuint8mf8_t test_vnmsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u8mf8_m(mask, vd, vs1, vs2, vl); +vuint8mf8_t test_vnmsub_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u8mf8_m(vm, vd, vs1, vs2, vl); } -vuint8mf8_t test_vnmsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u8mf8_m(mask, vd, rs1, vs2, vl); +vuint8mf8_t test_vnmsub_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u8mf8_m(vm, vd, rs1, vs2, vl); } -vuint8mf4_t test_vnmsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u8mf4_m(mask, vd, vs1, vs2, vl); +vuint8mf4_t test_vnmsub_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u8mf4_m(vm, vd, vs1, vs2, vl); } -vuint8mf4_t test_vnmsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u8mf4_m(mask, vd, rs1, vs2, vl); +vuint8mf4_t test_vnmsub_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u8mf4_m(vm, vd, rs1, vs2, vl); } -vuint8mf2_t test_vnmsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u8mf2_m(mask, vd, vs1, vs2, vl); +vuint8mf2_t test_vnmsub_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u8mf2_m(vm, vd, vs1, vs2, vl); } -vuint8mf2_t test_vnmsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u8mf2_m(mask, vd, rs1, vs2, vl); +vuint8mf2_t test_vnmsub_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u8mf2_m(vm, vd, rs1, vs2, vl); } -vuint8m1_t test_vnmsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u8m1_m(mask, vd, vs1, vs2, vl); +vuint8m1_t test_vnmsub_vv_u8m1_m(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u8m1_m(vm, vd, vs1, vs2, vl); } -vuint8m1_t test_vnmsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u8m1_m(mask, vd, rs1, vs2, vl); +vuint8m1_t test_vnmsub_vx_u8m1_m(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u8m1_m(vm, vd, rs1, vs2, vl); } -vuint8m2_t test_vnmsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u8m2_m(mask, vd, vs1, vs2, vl); +vuint8m2_t test_vnmsub_vv_u8m2_m(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u8m2_m(vm, vd, vs1, vs2, vl); } -vuint8m2_t test_vnmsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u8m2_m(mask, vd, rs1, vs2, vl); +vuint8m2_t test_vnmsub_vx_u8m2_m(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u8m2_m(vm, vd, rs1, vs2, vl); } -vuint8m4_t test_vnmsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u8m4_m(mask, vd, vs1, vs2, vl); +vuint8m4_t test_vnmsub_vv_u8m4_m(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u8m4_m(vm, vd, vs1, vs2, vl); } -vuint8m4_t test_vnmsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u8m4_m(mask, vd, rs1, vs2, vl); +vuint8m4_t test_vnmsub_vx_u8m4_m(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u8m4_m(vm, vd, rs1, vs2, vl); } -vuint8m8_t test_vnmsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u8m8_m(mask, vd, vs1, vs2, vl); +vuint8m8_t test_vnmsub_vv_u8m8_m(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u8m8_m(vm, vd, vs1, vs2, vl); } -vuint8m8_t test_vnmsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u8m8_m(mask, vd, rs1, vs2, vl); +vuint8m8_t test_vnmsub_vx_u8m8_m(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u8m8_m(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vnmsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u16mf4_m(mask, vd, vs1, vs2, vl); +vuint16mf4_t test_vnmsub_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u16mf4_m(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vnmsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u16mf4_m(mask, vd, rs1, vs2, vl); +vuint16mf4_t test_vnmsub_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u16mf4_m(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vnmsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u16mf2_m(mask, vd, vs1, vs2, vl); +vuint16mf2_t test_vnmsub_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u16mf2_m(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vnmsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u16mf2_m(mask, vd, rs1, vs2, vl); +vuint16mf2_t test_vnmsub_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u16mf2_m(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vnmsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u16m1_m(mask, vd, vs1, vs2, vl); +vuint16m1_t test_vnmsub_vv_u16m1_m(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u16m1_m(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vnmsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u16m1_m(mask, vd, rs1, vs2, vl); +vuint16m1_t test_vnmsub_vx_u16m1_m(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u16m1_m(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vnmsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u16m2_m(mask, vd, vs1, vs2, vl); +vuint16m2_t test_vnmsub_vv_u16m2_m(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u16m2_m(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vnmsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u16m2_m(mask, vd, rs1, vs2, vl); +vuint16m2_t test_vnmsub_vx_u16m2_m(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u16m2_m(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vnmsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u16m4_m(mask, vd, vs1, vs2, vl); +vuint16m4_t test_vnmsub_vv_u16m4_m(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u16m4_m(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vnmsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u16m4_m(mask, vd, rs1, vs2, vl); +vuint16m4_t test_vnmsub_vx_u16m4_m(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u16m4_m(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vnmsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u16m8_m(mask, vd, vs1, vs2, vl); +vuint16m8_t test_vnmsub_vv_u16m8_m(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u16m8_m(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vnmsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u16m8_m(mask, vd, rs1, vs2, vl); +vuint16m8_t test_vnmsub_vx_u16m8_m(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u16m8_m(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vnmsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u32mf2_m(mask, vd, vs1, vs2, vl); +vuint32mf2_t test_vnmsub_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u32mf2_m(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vnmsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u32mf2_m(mask, vd, rs1, vs2, vl); +vuint32mf2_t test_vnmsub_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u32mf2_m(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vnmsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u32m1_m(mask, vd, vs1, vs2, vl); +vuint32m1_t test_vnmsub_vv_u32m1_m(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u32m1_m(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vnmsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u32m1_m(mask, vd, rs1, vs2, vl); +vuint32m1_t test_vnmsub_vx_u32m1_m(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u32m1_m(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vnmsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u32m2_m(mask, vd, vs1, vs2, vl); +vuint32m2_t test_vnmsub_vv_u32m2_m(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u32m2_m(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vnmsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u32m2_m(mask, vd, rs1, vs2, vl); +vuint32m2_t test_vnmsub_vx_u32m2_m(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u32m2_m(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vnmsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u32m4_m(mask, vd, vs1, vs2, vl); +vuint32m4_t test_vnmsub_vv_u32m4_m(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u32m4_m(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vnmsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u32m4_m(mask, vd, rs1, vs2, vl); +vuint32m4_t test_vnmsub_vx_u32m4_m(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u32m4_m(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vnmsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u32m8_m(mask, vd, vs1, vs2, vl); +vuint32m8_t test_vnmsub_vv_u32m8_m(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u32m8_m(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vnmsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u32m8_m(mask, vd, rs1, vs2, vl); +vuint32m8_t test_vnmsub_vx_u32m8_m(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u32m8_m(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vnmsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u64m1_m(mask, vd, vs1, vs2, vl); +vuint64m1_t test_vnmsub_vv_u64m1_m(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u64m1_m(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vnmsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u64m1_m(mask, vd, rs1, vs2, vl); +vuint64m1_t test_vnmsub_vx_u64m1_m(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u64m1_m(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vnmsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u64m2_m(mask, vd, vs1, vs2, vl); +vuint64m2_t test_vnmsub_vv_u64m2_m(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u64m2_m(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vnmsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u64m2_m(mask, vd, rs1, vs2, vl); +vuint64m2_t test_vnmsub_vx_u64m2_m(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u64m2_m(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vnmsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u64m4_m(mask, vd, vs1, vs2, vl); +vuint64m4_t test_vnmsub_vv_u64m4_m(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u64m4_m(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vnmsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u64m4_m(mask, vd, rs1, vs2, vl); +vuint64m4_t test_vnmsub_vx_u64m4_m(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u64m4_m(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vnmsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u64m8_m(mask, vd, vs1, vs2, vl); +vuint64m8_t test_vnmsub_vv_u64m8_m(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u64m8_m(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vnmsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u64m8_m(mask, vd, rs1, vs2, vl); +vuint64m8_t test_vnmsub_vx_u64m8_m(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u64m8_m(vm, vd, rs1, vs2, vl); } /* { dg-final { scan-assembler-times {vnms[acub]+\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/gnu-api-tests/vnot.c b/auto-generated/gnu-api-tests/vnot.c index 22cdf7c6d..e8cbd3ce5 100644 --- a/auto-generated/gnu-api-tests/vnot.c +++ b/auto-generated/gnu-api-tests/vnot.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vnot_v_i8mf8(vint8mf8_t op1, size_t vl) { - return __riscv_vnot_v_i8mf8(op1, vl); +vint8mf8_t test_vnot_v_i8mf8(vint8mf8_t vs, size_t vl) { + return __riscv_vnot_v_i8mf8(vs, vl); } -vint8mf4_t test_vnot_v_i8mf4(vint8mf4_t op1, size_t vl) { - return __riscv_vnot_v_i8mf4(op1, vl); +vint8mf4_t test_vnot_v_i8mf4(vint8mf4_t vs, size_t vl) { + return __riscv_vnot_v_i8mf4(vs, vl); } -vint8mf2_t test_vnot_v_i8mf2(vint8mf2_t op1, size_t vl) { - return __riscv_vnot_v_i8mf2(op1, vl); +vint8mf2_t test_vnot_v_i8mf2(vint8mf2_t vs, size_t vl) { + return __riscv_vnot_v_i8mf2(vs, vl); } -vint8m1_t test_vnot_v_i8m1(vint8m1_t op1, size_t vl) { - return __riscv_vnot_v_i8m1(op1, vl); +vint8m1_t test_vnot_v_i8m1(vint8m1_t vs, size_t vl) { + return __riscv_vnot_v_i8m1(vs, vl); } -vint8m2_t test_vnot_v_i8m2(vint8m2_t op1, size_t vl) { - return __riscv_vnot_v_i8m2(op1, vl); +vint8m2_t test_vnot_v_i8m2(vint8m2_t vs, size_t vl) { + return __riscv_vnot_v_i8m2(vs, vl); } -vint8m4_t test_vnot_v_i8m4(vint8m4_t op1, size_t vl) { - return __riscv_vnot_v_i8m4(op1, vl); +vint8m4_t test_vnot_v_i8m4(vint8m4_t vs, size_t vl) { + return __riscv_vnot_v_i8m4(vs, vl); } -vint8m8_t test_vnot_v_i8m8(vint8m8_t op1, size_t vl) { - return __riscv_vnot_v_i8m8(op1, vl); +vint8m8_t test_vnot_v_i8m8(vint8m8_t vs, size_t vl) { + return __riscv_vnot_v_i8m8(vs, vl); } -vint16mf4_t test_vnot_v_i16mf4(vint16mf4_t op1, size_t vl) { - return __riscv_vnot_v_i16mf4(op1, vl); +vint16mf4_t test_vnot_v_i16mf4(vint16mf4_t vs, size_t vl) { + return __riscv_vnot_v_i16mf4(vs, vl); } -vint16mf2_t test_vnot_v_i16mf2(vint16mf2_t op1, size_t vl) { - return __riscv_vnot_v_i16mf2(op1, vl); +vint16mf2_t test_vnot_v_i16mf2(vint16mf2_t vs, size_t vl) { + return __riscv_vnot_v_i16mf2(vs, vl); } -vint16m1_t test_vnot_v_i16m1(vint16m1_t op1, size_t vl) { - return __riscv_vnot_v_i16m1(op1, vl); +vint16m1_t test_vnot_v_i16m1(vint16m1_t vs, size_t vl) { + return __riscv_vnot_v_i16m1(vs, vl); } -vint16m2_t test_vnot_v_i16m2(vint16m2_t op1, size_t vl) { - return __riscv_vnot_v_i16m2(op1, vl); +vint16m2_t test_vnot_v_i16m2(vint16m2_t vs, size_t vl) { + return __riscv_vnot_v_i16m2(vs, vl); } -vint16m4_t test_vnot_v_i16m4(vint16m4_t op1, size_t vl) { - return __riscv_vnot_v_i16m4(op1, vl); +vint16m4_t test_vnot_v_i16m4(vint16m4_t vs, size_t vl) { + return __riscv_vnot_v_i16m4(vs, vl); } -vint16m8_t test_vnot_v_i16m8(vint16m8_t op1, size_t vl) { - return __riscv_vnot_v_i16m8(op1, vl); +vint16m8_t test_vnot_v_i16m8(vint16m8_t vs, size_t vl) { + return __riscv_vnot_v_i16m8(vs, vl); } -vint32mf2_t test_vnot_v_i32mf2(vint32mf2_t op1, size_t vl) { - return __riscv_vnot_v_i32mf2(op1, vl); +vint32mf2_t test_vnot_v_i32mf2(vint32mf2_t vs, size_t vl) { + return __riscv_vnot_v_i32mf2(vs, vl); } -vint32m1_t test_vnot_v_i32m1(vint32m1_t op1, size_t vl) { - return __riscv_vnot_v_i32m1(op1, vl); +vint32m1_t test_vnot_v_i32m1(vint32m1_t vs, size_t vl) { + return __riscv_vnot_v_i32m1(vs, vl); } -vint32m2_t test_vnot_v_i32m2(vint32m2_t op1, size_t vl) { - return __riscv_vnot_v_i32m2(op1, vl); +vint32m2_t test_vnot_v_i32m2(vint32m2_t vs, size_t vl) { + return __riscv_vnot_v_i32m2(vs, vl); } -vint32m4_t test_vnot_v_i32m4(vint32m4_t op1, size_t vl) { - return __riscv_vnot_v_i32m4(op1, vl); +vint32m4_t test_vnot_v_i32m4(vint32m4_t vs, size_t vl) { + return __riscv_vnot_v_i32m4(vs, vl); } -vint32m8_t test_vnot_v_i32m8(vint32m8_t op1, size_t vl) { - return __riscv_vnot_v_i32m8(op1, vl); +vint32m8_t test_vnot_v_i32m8(vint32m8_t vs, size_t vl) { + return __riscv_vnot_v_i32m8(vs, vl); } -vint64m1_t test_vnot_v_i64m1(vint64m1_t op1, size_t vl) { - return __riscv_vnot_v_i64m1(op1, vl); +vint64m1_t test_vnot_v_i64m1(vint64m1_t vs, size_t vl) { + return __riscv_vnot_v_i64m1(vs, vl); } -vint64m2_t test_vnot_v_i64m2(vint64m2_t op1, size_t vl) { - return __riscv_vnot_v_i64m2(op1, vl); +vint64m2_t test_vnot_v_i64m2(vint64m2_t vs, size_t vl) { + return __riscv_vnot_v_i64m2(vs, vl); } -vint64m4_t test_vnot_v_i64m4(vint64m4_t op1, size_t vl) { - return __riscv_vnot_v_i64m4(op1, vl); +vint64m4_t test_vnot_v_i64m4(vint64m4_t vs, size_t vl) { + return __riscv_vnot_v_i64m4(vs, vl); } -vint64m8_t test_vnot_v_i64m8(vint64m8_t op1, size_t vl) { - return __riscv_vnot_v_i64m8(op1, vl); +vint64m8_t test_vnot_v_i64m8(vint64m8_t vs, size_t vl) { + return __riscv_vnot_v_i64m8(vs, vl); } -vuint8mf8_t test_vnot_v_u8mf8(vuint8mf8_t op1, size_t vl) { - return __riscv_vnot_v_u8mf8(op1, vl); +vuint8mf8_t test_vnot_v_u8mf8(vuint8mf8_t vs, size_t vl) { + return __riscv_vnot_v_u8mf8(vs, vl); } -vuint8mf4_t test_vnot_v_u8mf4(vuint8mf4_t op1, size_t vl) { - return __riscv_vnot_v_u8mf4(op1, vl); +vuint8mf4_t test_vnot_v_u8mf4(vuint8mf4_t vs, size_t vl) { + return __riscv_vnot_v_u8mf4(vs, vl); } -vuint8mf2_t test_vnot_v_u8mf2(vuint8mf2_t op1, size_t vl) { - return __riscv_vnot_v_u8mf2(op1, vl); +vuint8mf2_t test_vnot_v_u8mf2(vuint8mf2_t vs, size_t vl) { + return __riscv_vnot_v_u8mf2(vs, vl); } -vuint8m1_t test_vnot_v_u8m1(vuint8m1_t op1, size_t vl) { - return __riscv_vnot_v_u8m1(op1, vl); +vuint8m1_t test_vnot_v_u8m1(vuint8m1_t vs, size_t vl) { + return __riscv_vnot_v_u8m1(vs, vl); } -vuint8m2_t test_vnot_v_u8m2(vuint8m2_t op1, size_t vl) { - return __riscv_vnot_v_u8m2(op1, vl); +vuint8m2_t test_vnot_v_u8m2(vuint8m2_t vs, size_t vl) { + return __riscv_vnot_v_u8m2(vs, vl); } -vuint8m4_t test_vnot_v_u8m4(vuint8m4_t op1, size_t vl) { - return __riscv_vnot_v_u8m4(op1, vl); +vuint8m4_t test_vnot_v_u8m4(vuint8m4_t vs, size_t vl) { + return __riscv_vnot_v_u8m4(vs, vl); } -vuint8m8_t test_vnot_v_u8m8(vuint8m8_t op1, size_t vl) { - return __riscv_vnot_v_u8m8(op1, vl); +vuint8m8_t test_vnot_v_u8m8(vuint8m8_t vs, size_t vl) { + return __riscv_vnot_v_u8m8(vs, vl); } -vuint16mf4_t test_vnot_v_u16mf4(vuint16mf4_t op1, size_t vl) { - return __riscv_vnot_v_u16mf4(op1, vl); +vuint16mf4_t test_vnot_v_u16mf4(vuint16mf4_t vs, size_t vl) { + return __riscv_vnot_v_u16mf4(vs, vl); } -vuint16mf2_t test_vnot_v_u16mf2(vuint16mf2_t op1, size_t vl) { - return __riscv_vnot_v_u16mf2(op1, vl); +vuint16mf2_t test_vnot_v_u16mf2(vuint16mf2_t vs, size_t vl) { + return __riscv_vnot_v_u16mf2(vs, vl); } -vuint16m1_t test_vnot_v_u16m1(vuint16m1_t op1, size_t vl) { - return __riscv_vnot_v_u16m1(op1, vl); +vuint16m1_t test_vnot_v_u16m1(vuint16m1_t vs, size_t vl) { + return __riscv_vnot_v_u16m1(vs, vl); } -vuint16m2_t test_vnot_v_u16m2(vuint16m2_t op1, size_t vl) { - return __riscv_vnot_v_u16m2(op1, vl); +vuint16m2_t test_vnot_v_u16m2(vuint16m2_t vs, size_t vl) { + return __riscv_vnot_v_u16m2(vs, vl); } -vuint16m4_t test_vnot_v_u16m4(vuint16m4_t op1, size_t vl) { - return __riscv_vnot_v_u16m4(op1, vl); +vuint16m4_t test_vnot_v_u16m4(vuint16m4_t vs, size_t vl) { + return __riscv_vnot_v_u16m4(vs, vl); } -vuint16m8_t test_vnot_v_u16m8(vuint16m8_t op1, size_t vl) { - return __riscv_vnot_v_u16m8(op1, vl); +vuint16m8_t test_vnot_v_u16m8(vuint16m8_t vs, size_t vl) { + return __riscv_vnot_v_u16m8(vs, vl); } -vuint32mf2_t test_vnot_v_u32mf2(vuint32mf2_t op1, size_t vl) { - return __riscv_vnot_v_u32mf2(op1, vl); +vuint32mf2_t test_vnot_v_u32mf2(vuint32mf2_t vs, size_t vl) { + return __riscv_vnot_v_u32mf2(vs, vl); } -vuint32m1_t test_vnot_v_u32m1(vuint32m1_t op1, size_t vl) { - return __riscv_vnot_v_u32m1(op1, vl); +vuint32m1_t test_vnot_v_u32m1(vuint32m1_t vs, size_t vl) { + return __riscv_vnot_v_u32m1(vs, vl); } -vuint32m2_t test_vnot_v_u32m2(vuint32m2_t op1, size_t vl) { - return __riscv_vnot_v_u32m2(op1, vl); +vuint32m2_t test_vnot_v_u32m2(vuint32m2_t vs, size_t vl) { + return __riscv_vnot_v_u32m2(vs, vl); } -vuint32m4_t test_vnot_v_u32m4(vuint32m4_t op1, size_t vl) { - return __riscv_vnot_v_u32m4(op1, vl); +vuint32m4_t test_vnot_v_u32m4(vuint32m4_t vs, size_t vl) { + return __riscv_vnot_v_u32m4(vs, vl); } -vuint32m8_t test_vnot_v_u32m8(vuint32m8_t op1, size_t vl) { - return __riscv_vnot_v_u32m8(op1, vl); +vuint32m8_t test_vnot_v_u32m8(vuint32m8_t vs, size_t vl) { + return __riscv_vnot_v_u32m8(vs, vl); } -vuint64m1_t test_vnot_v_u64m1(vuint64m1_t op1, size_t vl) { - return __riscv_vnot_v_u64m1(op1, vl); +vuint64m1_t test_vnot_v_u64m1(vuint64m1_t vs, size_t vl) { + return __riscv_vnot_v_u64m1(vs, vl); } -vuint64m2_t test_vnot_v_u64m2(vuint64m2_t op1, size_t vl) { - return __riscv_vnot_v_u64m2(op1, vl); +vuint64m2_t test_vnot_v_u64m2(vuint64m2_t vs, size_t vl) { + return __riscv_vnot_v_u64m2(vs, vl); } -vuint64m4_t test_vnot_v_u64m4(vuint64m4_t op1, size_t vl) { - return __riscv_vnot_v_u64m4(op1, vl); +vuint64m4_t test_vnot_v_u64m4(vuint64m4_t vs, size_t vl) { + return __riscv_vnot_v_u64m4(vs, vl); } -vuint64m8_t test_vnot_v_u64m8(vuint64m8_t op1, size_t vl) { - return __riscv_vnot_v_u64m8(op1, vl); +vuint64m8_t test_vnot_v_u64m8(vuint64m8_t vs, size_t vl) { + return __riscv_vnot_v_u64m8(vs, vl); } -vint8mf8_t test_vnot_v_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t vl) { - return __riscv_vnot_v_i8mf8_m(mask, op1, vl); +vint8mf8_t test_vnot_v_i8mf8_m(vbool64_t vm, vint8mf8_t vs, size_t vl) { + return __riscv_vnot_v_i8mf8_m(vm, vs, vl); } -vint8mf4_t test_vnot_v_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t vl) { - return __riscv_vnot_v_i8mf4_m(mask, op1, vl); +vint8mf4_t test_vnot_v_i8mf4_m(vbool32_t vm, vint8mf4_t vs, size_t vl) { + return __riscv_vnot_v_i8mf4_m(vm, vs, vl); } -vint8mf2_t test_vnot_v_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t vl) { - return __riscv_vnot_v_i8mf2_m(mask, op1, vl); +vint8mf2_t test_vnot_v_i8mf2_m(vbool16_t vm, vint8mf2_t vs, size_t vl) { + return __riscv_vnot_v_i8mf2_m(vm, vs, vl); } -vint8m1_t test_vnot_v_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t vl) { - return __riscv_vnot_v_i8m1_m(mask, op1, vl); +vint8m1_t test_vnot_v_i8m1_m(vbool8_t vm, vint8m1_t vs, size_t vl) { + return __riscv_vnot_v_i8m1_m(vm, vs, vl); } -vint8m2_t test_vnot_v_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t vl) { - return __riscv_vnot_v_i8m2_m(mask, op1, vl); +vint8m2_t test_vnot_v_i8m2_m(vbool4_t vm, vint8m2_t vs, size_t vl) { + return __riscv_vnot_v_i8m2_m(vm, vs, vl); } -vint8m4_t test_vnot_v_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t vl) { - return __riscv_vnot_v_i8m4_m(mask, op1, vl); +vint8m4_t test_vnot_v_i8m4_m(vbool2_t vm, vint8m4_t vs, size_t vl) { + return __riscv_vnot_v_i8m4_m(vm, vs, vl); } -vint8m8_t test_vnot_v_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t vl) { - return __riscv_vnot_v_i8m8_m(mask, op1, vl); +vint8m8_t test_vnot_v_i8m8_m(vbool1_t vm, vint8m8_t vs, size_t vl) { + return __riscv_vnot_v_i8m8_m(vm, vs, vl); } -vint16mf4_t test_vnot_v_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t vl) { - return __riscv_vnot_v_i16mf4_m(mask, op1, vl); +vint16mf4_t test_vnot_v_i16mf4_m(vbool64_t vm, vint16mf4_t vs, size_t vl) { + return __riscv_vnot_v_i16mf4_m(vm, vs, vl); } -vint16mf2_t test_vnot_v_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t vl) { - return __riscv_vnot_v_i16mf2_m(mask, op1, vl); +vint16mf2_t test_vnot_v_i16mf2_m(vbool32_t vm, vint16mf2_t vs, size_t vl) { + return __riscv_vnot_v_i16mf2_m(vm, vs, vl); } -vint16m1_t test_vnot_v_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t vl) { - return __riscv_vnot_v_i16m1_m(mask, op1, vl); +vint16m1_t test_vnot_v_i16m1_m(vbool16_t vm, vint16m1_t vs, size_t vl) { + return __riscv_vnot_v_i16m1_m(vm, vs, vl); } -vint16m2_t test_vnot_v_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t vl) { - return __riscv_vnot_v_i16m2_m(mask, op1, vl); +vint16m2_t test_vnot_v_i16m2_m(vbool8_t vm, vint16m2_t vs, size_t vl) { + return __riscv_vnot_v_i16m2_m(vm, vs, vl); } -vint16m4_t test_vnot_v_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t vl) { - return __riscv_vnot_v_i16m4_m(mask, op1, vl); +vint16m4_t test_vnot_v_i16m4_m(vbool4_t vm, vint16m4_t vs, size_t vl) { + return __riscv_vnot_v_i16m4_m(vm, vs, vl); } -vint16m8_t test_vnot_v_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t vl) { - return __riscv_vnot_v_i16m8_m(mask, op1, vl); +vint16m8_t test_vnot_v_i16m8_m(vbool2_t vm, vint16m8_t vs, size_t vl) { + return __riscv_vnot_v_i16m8_m(vm, vs, vl); } -vint32mf2_t test_vnot_v_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t vl) { - return __riscv_vnot_v_i32mf2_m(mask, op1, vl); +vint32mf2_t test_vnot_v_i32mf2_m(vbool64_t vm, vint32mf2_t vs, size_t vl) { + return __riscv_vnot_v_i32mf2_m(vm, vs, vl); } -vint32m1_t test_vnot_v_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t vl) { - return __riscv_vnot_v_i32m1_m(mask, op1, vl); +vint32m1_t test_vnot_v_i32m1_m(vbool32_t vm, vint32m1_t vs, size_t vl) { + return __riscv_vnot_v_i32m1_m(vm, vs, vl); } -vint32m2_t test_vnot_v_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t vl) { - return __riscv_vnot_v_i32m2_m(mask, op1, vl); +vint32m2_t test_vnot_v_i32m2_m(vbool16_t vm, vint32m2_t vs, size_t vl) { + return __riscv_vnot_v_i32m2_m(vm, vs, vl); } -vint32m4_t test_vnot_v_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t vl) { - return __riscv_vnot_v_i32m4_m(mask, op1, vl); +vint32m4_t test_vnot_v_i32m4_m(vbool8_t vm, vint32m4_t vs, size_t vl) { + return __riscv_vnot_v_i32m4_m(vm, vs, vl); } -vint32m8_t test_vnot_v_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t vl) { - return __riscv_vnot_v_i32m8_m(mask, op1, vl); +vint32m8_t test_vnot_v_i32m8_m(vbool4_t vm, vint32m8_t vs, size_t vl) { + return __riscv_vnot_v_i32m8_m(vm, vs, vl); } -vint64m1_t test_vnot_v_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t vl) { - return __riscv_vnot_v_i64m1_m(mask, op1, vl); +vint64m1_t test_vnot_v_i64m1_m(vbool64_t vm, vint64m1_t vs, size_t vl) { + return __riscv_vnot_v_i64m1_m(vm, vs, vl); } -vint64m2_t test_vnot_v_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t vl) { - return __riscv_vnot_v_i64m2_m(mask, op1, vl); +vint64m2_t test_vnot_v_i64m2_m(vbool32_t vm, vint64m2_t vs, size_t vl) { + return __riscv_vnot_v_i64m2_m(vm, vs, vl); } -vint64m4_t test_vnot_v_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t vl) { - return __riscv_vnot_v_i64m4_m(mask, op1, vl); +vint64m4_t test_vnot_v_i64m4_m(vbool16_t vm, vint64m4_t vs, size_t vl) { + return __riscv_vnot_v_i64m4_m(vm, vs, vl); } -vint64m8_t test_vnot_v_i64m8_m(vbool8_t mask, vint64m8_t op1, size_t vl) { - return __riscv_vnot_v_i64m8_m(mask, op1, vl); +vint64m8_t test_vnot_v_i64m8_m(vbool8_t vm, vint64m8_t vs, size_t vl) { + return __riscv_vnot_v_i64m8_m(vm, vs, vl); } -vuint8mf8_t test_vnot_v_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, size_t vl) { - return __riscv_vnot_v_u8mf8_m(mask, op1, vl); +vuint8mf8_t test_vnot_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs, size_t vl) { + return __riscv_vnot_v_u8mf8_m(vm, vs, vl); } -vuint8mf4_t test_vnot_v_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, size_t vl) { - return __riscv_vnot_v_u8mf4_m(mask, op1, vl); +vuint8mf4_t test_vnot_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs, size_t vl) { + return __riscv_vnot_v_u8mf4_m(vm, vs, vl); } -vuint8mf2_t test_vnot_v_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, size_t vl) { - return __riscv_vnot_v_u8mf2_m(mask, op1, vl); +vuint8mf2_t test_vnot_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs, size_t vl) { + return __riscv_vnot_v_u8mf2_m(vm, vs, vl); } -vuint8m1_t test_vnot_v_u8m1_m(vbool8_t mask, vuint8m1_t op1, size_t vl) { - return __riscv_vnot_v_u8m1_m(mask, op1, vl); +vuint8m1_t test_vnot_v_u8m1_m(vbool8_t vm, vuint8m1_t vs, size_t vl) { + return __riscv_vnot_v_u8m1_m(vm, vs, vl); } -vuint8m2_t test_vnot_v_u8m2_m(vbool4_t mask, vuint8m2_t op1, size_t vl) { - return __riscv_vnot_v_u8m2_m(mask, op1, vl); +vuint8m2_t test_vnot_v_u8m2_m(vbool4_t vm, vuint8m2_t vs, size_t vl) { + return __riscv_vnot_v_u8m2_m(vm, vs, vl); } -vuint8m4_t test_vnot_v_u8m4_m(vbool2_t mask, vuint8m4_t op1, size_t vl) { - return __riscv_vnot_v_u8m4_m(mask, op1, vl); +vuint8m4_t test_vnot_v_u8m4_m(vbool2_t vm, vuint8m4_t vs, size_t vl) { + return __riscv_vnot_v_u8m4_m(vm, vs, vl); } -vuint8m8_t test_vnot_v_u8m8_m(vbool1_t mask, vuint8m8_t op1, size_t vl) { - return __riscv_vnot_v_u8m8_m(mask, op1, vl); +vuint8m8_t test_vnot_v_u8m8_m(vbool1_t vm, vuint8m8_t vs, size_t vl) { + return __riscv_vnot_v_u8m8_m(vm, vs, vl); } -vuint16mf4_t test_vnot_v_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, size_t vl) { - return __riscv_vnot_v_u16mf4_m(mask, op1, vl); +vuint16mf4_t test_vnot_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs, size_t vl) { + return __riscv_vnot_v_u16mf4_m(vm, vs, vl); } -vuint16mf2_t test_vnot_v_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, size_t vl) { - return __riscv_vnot_v_u16mf2_m(mask, op1, vl); +vuint16mf2_t test_vnot_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs, size_t vl) { + return __riscv_vnot_v_u16mf2_m(vm, vs, vl); } -vuint16m1_t test_vnot_v_u16m1_m(vbool16_t mask, vuint16m1_t op1, size_t vl) { - return __riscv_vnot_v_u16m1_m(mask, op1, vl); +vuint16m1_t test_vnot_v_u16m1_m(vbool16_t vm, vuint16m1_t vs, size_t vl) { + return __riscv_vnot_v_u16m1_m(vm, vs, vl); } -vuint16m2_t test_vnot_v_u16m2_m(vbool8_t mask, vuint16m2_t op1, size_t vl) { - return __riscv_vnot_v_u16m2_m(mask, op1, vl); +vuint16m2_t test_vnot_v_u16m2_m(vbool8_t vm, vuint16m2_t vs, size_t vl) { + return __riscv_vnot_v_u16m2_m(vm, vs, vl); } -vuint16m4_t test_vnot_v_u16m4_m(vbool4_t mask, vuint16m4_t op1, size_t vl) { - return __riscv_vnot_v_u16m4_m(mask, op1, vl); +vuint16m4_t test_vnot_v_u16m4_m(vbool4_t vm, vuint16m4_t vs, size_t vl) { + return __riscv_vnot_v_u16m4_m(vm, vs, vl); } -vuint16m8_t test_vnot_v_u16m8_m(vbool2_t mask, vuint16m8_t op1, size_t vl) { - return __riscv_vnot_v_u16m8_m(mask, op1, vl); +vuint16m8_t test_vnot_v_u16m8_m(vbool2_t vm, vuint16m8_t vs, size_t vl) { + return __riscv_vnot_v_u16m8_m(vm, vs, vl); } -vuint32mf2_t test_vnot_v_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, size_t vl) { - return __riscv_vnot_v_u32mf2_m(mask, op1, vl); +vuint32mf2_t test_vnot_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs, size_t vl) { + return __riscv_vnot_v_u32mf2_m(vm, vs, vl); } -vuint32m1_t test_vnot_v_u32m1_m(vbool32_t mask, vuint32m1_t op1, size_t vl) { - return __riscv_vnot_v_u32m1_m(mask, op1, vl); +vuint32m1_t test_vnot_v_u32m1_m(vbool32_t vm, vuint32m1_t vs, size_t vl) { + return __riscv_vnot_v_u32m1_m(vm, vs, vl); } -vuint32m2_t test_vnot_v_u32m2_m(vbool16_t mask, vuint32m2_t op1, size_t vl) { - return __riscv_vnot_v_u32m2_m(mask, op1, vl); +vuint32m2_t test_vnot_v_u32m2_m(vbool16_t vm, vuint32m2_t vs, size_t vl) { + return __riscv_vnot_v_u32m2_m(vm, vs, vl); } -vuint32m4_t test_vnot_v_u32m4_m(vbool8_t mask, vuint32m4_t op1, size_t vl) { - return __riscv_vnot_v_u32m4_m(mask, op1, vl); +vuint32m4_t test_vnot_v_u32m4_m(vbool8_t vm, vuint32m4_t vs, size_t vl) { + return __riscv_vnot_v_u32m4_m(vm, vs, vl); } -vuint32m8_t test_vnot_v_u32m8_m(vbool4_t mask, vuint32m8_t op1, size_t vl) { - return __riscv_vnot_v_u32m8_m(mask, op1, vl); +vuint32m8_t test_vnot_v_u32m8_m(vbool4_t vm, vuint32m8_t vs, size_t vl) { + return __riscv_vnot_v_u32m8_m(vm, vs, vl); } -vuint64m1_t test_vnot_v_u64m1_m(vbool64_t mask, vuint64m1_t op1, size_t vl) { - return __riscv_vnot_v_u64m1_m(mask, op1, vl); +vuint64m1_t test_vnot_v_u64m1_m(vbool64_t vm, vuint64m1_t vs, size_t vl) { + return __riscv_vnot_v_u64m1_m(vm, vs, vl); } -vuint64m2_t test_vnot_v_u64m2_m(vbool32_t mask, vuint64m2_t op1, size_t vl) { - return __riscv_vnot_v_u64m2_m(mask, op1, vl); +vuint64m2_t test_vnot_v_u64m2_m(vbool32_t vm, vuint64m2_t vs, size_t vl) { + return __riscv_vnot_v_u64m2_m(vm, vs, vl); } -vuint64m4_t test_vnot_v_u64m4_m(vbool16_t mask, vuint64m4_t op1, size_t vl) { - return __riscv_vnot_v_u64m4_m(mask, op1, vl); +vuint64m4_t test_vnot_v_u64m4_m(vbool16_t vm, vuint64m4_t vs, size_t vl) { + return __riscv_vnot_v_u64m4_m(vm, vs, vl); } -vuint64m8_t test_vnot_v_u64m8_m(vbool8_t mask, vuint64m8_t op1, size_t vl) { - return __riscv_vnot_v_u64m8_m(mask, op1, vl); +vuint64m8_t test_vnot_v_u64m8_m(vbool8_t vm, vuint64m8_t vs, size_t vl) { + return __riscv_vnot_v_u64m8_m(vm, vs, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vnot\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-api-tests/vnsra.c b/auto-generated/gnu-api-tests/vnsra.c index 57bf2e656..14c1ddf0d 100644 --- a/auto-generated/gnu-api-tests/vnsra.c +++ b/auto-generated/gnu-api-tests/vnsra.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vnsra_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnsra_wv_i8mf8(op1, shift, vl); +vint8mf8_t test_vnsra_wv_i8mf8(vint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnsra_wv_i8mf8(vs2, vs1, vl); } -vint8mf8_t test_vnsra_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i8mf8(op1, shift, vl); +vint8mf8_t test_vnsra_wx_i8mf8(vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i8mf8(vs2, rs1, vl); } -vint8mf4_t test_vnsra_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnsra_wv_i8mf4(op1, shift, vl); +vint8mf4_t test_vnsra_wv_i8mf4(vint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnsra_wv_i8mf4(vs2, vs1, vl); } -vint8mf4_t test_vnsra_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i8mf4(op1, shift, vl); +vint8mf4_t test_vnsra_wx_i8mf4(vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i8mf4(vs2, rs1, vl); } -vint8mf2_t test_vnsra_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnsra_wv_i8mf2(op1, shift, vl); +vint8mf2_t test_vnsra_wv_i8mf2(vint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnsra_wv_i8mf2(vs2, vs1, vl); } -vint8mf2_t test_vnsra_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i8mf2(op1, shift, vl); +vint8mf2_t test_vnsra_wx_i8mf2(vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i8mf2(vs2, rs1, vl); } -vint8m1_t test_vnsra_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnsra_wv_i8m1(op1, shift, vl); +vint8m1_t test_vnsra_wv_i8m1(vint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnsra_wv_i8m1(vs2, vs1, vl); } -vint8m1_t test_vnsra_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i8m1(op1, shift, vl); +vint8m1_t test_vnsra_wx_i8m1(vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i8m1(vs2, rs1, vl); } -vint8m2_t test_vnsra_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnsra_wv_i8m2(op1, shift, vl); +vint8m2_t test_vnsra_wv_i8m2(vint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnsra_wv_i8m2(vs2, vs1, vl); } -vint8m2_t test_vnsra_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i8m2(op1, shift, vl); +vint8m2_t test_vnsra_wx_i8m2(vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i8m2(vs2, rs1, vl); } -vint8m4_t test_vnsra_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnsra_wv_i8m4(op1, shift, vl); +vint8m4_t test_vnsra_wv_i8m4(vint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnsra_wv_i8m4(vs2, vs1, vl); } -vint8m4_t test_vnsra_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i8m4(op1, shift, vl); +vint8m4_t test_vnsra_wx_i8m4(vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i8m4(vs2, rs1, vl); } -vint16mf4_t test_vnsra_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnsra_wv_i16mf4(op1, shift, vl); +vint16mf4_t test_vnsra_wv_i16mf4(vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnsra_wv_i16mf4(vs2, vs1, vl); } -vint16mf4_t test_vnsra_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i16mf4(op1, shift, vl); +vint16mf4_t test_vnsra_wx_i16mf4(vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i16mf4(vs2, rs1, vl); } -vint16mf2_t test_vnsra_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnsra_wv_i16mf2(op1, shift, vl); +vint16mf2_t test_vnsra_wv_i16mf2(vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnsra_wv_i16mf2(vs2, vs1, vl); } -vint16mf2_t test_vnsra_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i16mf2(op1, shift, vl); +vint16mf2_t test_vnsra_wx_i16mf2(vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i16mf2(vs2, rs1, vl); } -vint16m1_t test_vnsra_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnsra_wv_i16m1(op1, shift, vl); +vint16m1_t test_vnsra_wv_i16m1(vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnsra_wv_i16m1(vs2, vs1, vl); } -vint16m1_t test_vnsra_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i16m1(op1, shift, vl); +vint16m1_t test_vnsra_wx_i16m1(vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i16m1(vs2, rs1, vl); } -vint16m2_t test_vnsra_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnsra_wv_i16m2(op1, shift, vl); +vint16m2_t test_vnsra_wv_i16m2(vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnsra_wv_i16m2(vs2, vs1, vl); } -vint16m2_t test_vnsra_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i16m2(op1, shift, vl); +vint16m2_t test_vnsra_wx_i16m2(vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i16m2(vs2, rs1, vl); } -vint16m4_t test_vnsra_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnsra_wv_i16m4(op1, shift, vl); +vint16m4_t test_vnsra_wv_i16m4(vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnsra_wv_i16m4(vs2, vs1, vl); } -vint16m4_t test_vnsra_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i16m4(op1, shift, vl); +vint16m4_t test_vnsra_wx_i16m4(vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i16m4(vs2, rs1, vl); } -vint32mf2_t test_vnsra_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnsra_wv_i32mf2(op1, shift, vl); +vint32mf2_t test_vnsra_wv_i32mf2(vint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnsra_wv_i32mf2(vs2, vs1, vl); } -vint32mf2_t test_vnsra_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i32mf2(op1, shift, vl); +vint32mf2_t test_vnsra_wx_i32mf2(vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i32mf2(vs2, rs1, vl); } -vint32m1_t test_vnsra_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnsra_wv_i32m1(op1, shift, vl); +vint32m1_t test_vnsra_wv_i32m1(vint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnsra_wv_i32m1(vs2, vs1, vl); } -vint32m1_t test_vnsra_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i32m1(op1, shift, vl); +vint32m1_t test_vnsra_wx_i32m1(vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i32m1(vs2, rs1, vl); } -vint32m2_t test_vnsra_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnsra_wv_i32m2(op1, shift, vl); +vint32m2_t test_vnsra_wv_i32m2(vint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnsra_wv_i32m2(vs2, vs1, vl); } -vint32m2_t test_vnsra_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i32m2(op1, shift, vl); +vint32m2_t test_vnsra_wx_i32m2(vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i32m2(vs2, rs1, vl); } -vint32m4_t test_vnsra_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnsra_wv_i32m4(op1, shift, vl); +vint32m4_t test_vnsra_wv_i32m4(vint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnsra_wv_i32m4(vs2, vs1, vl); } -vint32m4_t test_vnsra_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i32m4(op1, shift, vl); +vint32m4_t test_vnsra_wx_i32m4(vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i32m4(vs2, rs1, vl); } -vint8mf8_t test_vnsra_wv_i8mf8_m(vbool64_t mask, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnsra_wv_i8mf8_m(mask, op1, shift, vl); +vint8mf8_t test_vnsra_wv_i8mf8_m(vbool64_t vm, vint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnsra_wv_i8mf8_m(vm, vs2, vs1, vl); } -vint8mf8_t test_vnsra_wx_i8mf8_m(vbool64_t mask, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i8mf8_m(mask, op1, shift, vl); +vint8mf8_t test_vnsra_wx_i8mf8_m(vbool64_t vm, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i8mf8_m(vm, vs2, rs1, vl); } -vint8mf4_t test_vnsra_wv_i8mf4_m(vbool32_t mask, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnsra_wv_i8mf4_m(mask, op1, shift, vl); +vint8mf4_t test_vnsra_wv_i8mf4_m(vbool32_t vm, vint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnsra_wv_i8mf4_m(vm, vs2, vs1, vl); } -vint8mf4_t test_vnsra_wx_i8mf4_m(vbool32_t mask, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i8mf4_m(mask, op1, shift, vl); +vint8mf4_t test_vnsra_wx_i8mf4_m(vbool32_t vm, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i8mf4_m(vm, vs2, rs1, vl); } -vint8mf2_t test_vnsra_wv_i8mf2_m(vbool16_t mask, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnsra_wv_i8mf2_m(mask, op1, shift, vl); +vint8mf2_t test_vnsra_wv_i8mf2_m(vbool16_t vm, vint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnsra_wv_i8mf2_m(vm, vs2, vs1, vl); } -vint8mf2_t test_vnsra_wx_i8mf2_m(vbool16_t mask, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i8mf2_m(mask, op1, shift, vl); +vint8mf2_t test_vnsra_wx_i8mf2_m(vbool16_t vm, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i8mf2_m(vm, vs2, rs1, vl); } -vint8m1_t test_vnsra_wv_i8m1_m(vbool8_t mask, vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnsra_wv_i8m1_m(mask, op1, shift, vl); +vint8m1_t test_vnsra_wv_i8m1_m(vbool8_t vm, vint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnsra_wv_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vnsra_wx_i8m1_m(vbool8_t mask, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i8m1_m(mask, op1, shift, vl); +vint8m1_t test_vnsra_wx_i8m1_m(vbool8_t vm, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i8m1_m(vm, vs2, rs1, vl); } -vint8m2_t test_vnsra_wv_i8m2_m(vbool4_t mask, vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnsra_wv_i8m2_m(mask, op1, shift, vl); +vint8m2_t test_vnsra_wv_i8m2_m(vbool4_t vm, vint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnsra_wv_i8m2_m(vm, vs2, vs1, vl); } -vint8m2_t test_vnsra_wx_i8m2_m(vbool4_t mask, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i8m2_m(mask, op1, shift, vl); +vint8m2_t test_vnsra_wx_i8m2_m(vbool4_t vm, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i8m2_m(vm, vs2, rs1, vl); } -vint8m4_t test_vnsra_wv_i8m4_m(vbool2_t mask, vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnsra_wv_i8m4_m(mask, op1, shift, vl); +vint8m4_t test_vnsra_wv_i8m4_m(vbool2_t vm, vint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnsra_wv_i8m4_m(vm, vs2, vs1, vl); } -vint8m4_t test_vnsra_wx_i8m4_m(vbool2_t mask, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i8m4_m(mask, op1, shift, vl); +vint8m4_t test_vnsra_wx_i8m4_m(vbool2_t vm, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i8m4_m(vm, vs2, rs1, vl); } -vint16mf4_t test_vnsra_wv_i16mf4_m(vbool64_t mask, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnsra_wv_i16mf4_m(mask, op1, shift, vl); +vint16mf4_t test_vnsra_wv_i16mf4_m(vbool64_t vm, vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnsra_wv_i16mf4_m(vm, vs2, vs1, vl); } -vint16mf4_t test_vnsra_wx_i16mf4_m(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i16mf4_m(mask, op1, shift, vl); +vint16mf4_t test_vnsra_wx_i16mf4_m(vbool64_t vm, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i16mf4_m(vm, vs2, rs1, vl); } -vint16mf2_t test_vnsra_wv_i16mf2_m(vbool32_t mask, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnsra_wv_i16mf2_m(mask, op1, shift, vl); +vint16mf2_t test_vnsra_wv_i16mf2_m(vbool32_t vm, vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnsra_wv_i16mf2_m(vm, vs2, vs1, vl); } -vint16mf2_t test_vnsra_wx_i16mf2_m(vbool32_t mask, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i16mf2_m(mask, op1, shift, vl); +vint16mf2_t test_vnsra_wx_i16mf2_m(vbool32_t vm, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i16mf2_m(vm, vs2, rs1, vl); } -vint16m1_t test_vnsra_wv_i16m1_m(vbool16_t mask, vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnsra_wv_i16m1_m(mask, op1, shift, vl); +vint16m1_t test_vnsra_wv_i16m1_m(vbool16_t vm, vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnsra_wv_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vnsra_wx_i16m1_m(vbool16_t mask, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i16m1_m(mask, op1, shift, vl); +vint16m1_t test_vnsra_wx_i16m1_m(vbool16_t vm, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i16m1_m(vm, vs2, rs1, vl); } -vint16m2_t test_vnsra_wv_i16m2_m(vbool8_t mask, vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnsra_wv_i16m2_m(mask, op1, shift, vl); +vint16m2_t test_vnsra_wv_i16m2_m(vbool8_t vm, vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnsra_wv_i16m2_m(vm, vs2, vs1, vl); } -vint16m2_t test_vnsra_wx_i16m2_m(vbool8_t mask, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i16m2_m(mask, op1, shift, vl); +vint16m2_t test_vnsra_wx_i16m2_m(vbool8_t vm, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i16m2_m(vm, vs2, rs1, vl); } -vint16m4_t test_vnsra_wv_i16m4_m(vbool4_t mask, vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnsra_wv_i16m4_m(mask, op1, shift, vl); +vint16m4_t test_vnsra_wv_i16m4_m(vbool4_t vm, vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnsra_wv_i16m4_m(vm, vs2, vs1, vl); } -vint16m4_t test_vnsra_wx_i16m4_m(vbool4_t mask, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i16m4_m(mask, op1, shift, vl); +vint16m4_t test_vnsra_wx_i16m4_m(vbool4_t vm, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i16m4_m(vm, vs2, rs1, vl); } -vint32mf2_t test_vnsra_wv_i32mf2_m(vbool64_t mask, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnsra_wv_i32mf2_m(mask, op1, shift, vl); +vint32mf2_t test_vnsra_wv_i32mf2_m(vbool64_t vm, vint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnsra_wv_i32mf2_m(vm, vs2, vs1, vl); } -vint32mf2_t test_vnsra_wx_i32mf2_m(vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i32mf2_m(mask, op1, shift, vl); +vint32mf2_t test_vnsra_wx_i32mf2_m(vbool64_t vm, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i32mf2_m(vm, vs2, rs1, vl); } -vint32m1_t test_vnsra_wv_i32m1_m(vbool32_t mask, vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnsra_wv_i32m1_m(mask, op1, shift, vl); +vint32m1_t test_vnsra_wv_i32m1_m(vbool32_t vm, vint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnsra_wv_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vnsra_wx_i32m1_m(vbool32_t mask, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i32m1_m(mask, op1, shift, vl); +vint32m1_t test_vnsra_wx_i32m1_m(vbool32_t vm, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i32m1_m(vm, vs2, rs1, vl); } -vint32m2_t test_vnsra_wv_i32m2_m(vbool16_t mask, vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnsra_wv_i32m2_m(mask, op1, shift, vl); +vint32m2_t test_vnsra_wv_i32m2_m(vbool16_t vm, vint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnsra_wv_i32m2_m(vm, vs2, vs1, vl); } -vint32m2_t test_vnsra_wx_i32m2_m(vbool16_t mask, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i32m2_m(mask, op1, shift, vl); +vint32m2_t test_vnsra_wx_i32m2_m(vbool16_t vm, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i32m2_m(vm, vs2, rs1, vl); } -vint32m4_t test_vnsra_wv_i32m4_m(vbool8_t mask, vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnsra_wv_i32m4_m(mask, op1, shift, vl); +vint32m4_t test_vnsra_wv_i32m4_m(vbool8_t vm, vint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnsra_wv_i32m4_m(vm, vs2, vs1, vl); } -vint32m4_t test_vnsra_wx_i32m4_m(vbool8_t mask, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i32m4_m(mask, op1, shift, vl); +vint32m4_t test_vnsra_wx_i32m4_m(vbool8_t vm, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i32m4_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vnsra\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-api-tests/vnsrl.c b/auto-generated/gnu-api-tests/vnsrl.c index 0480b73c6..d43afc9ed 100644 --- a/auto-generated/gnu-api-tests/vnsrl.c +++ b/auto-generated/gnu-api-tests/vnsrl.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vnsrl_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnsrl_wv_u8mf8(op1, shift, vl); +vuint8mf8_t test_vnsrl_wv_u8mf8(vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u8mf8(vs2, vs1, vl); } -vuint8mf8_t test_vnsrl_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u8mf8(op1, shift, vl); +vuint8mf8_t test_vnsrl_wx_u8mf8(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u8mf8(vs2, rs1, vl); } -vuint8mf4_t test_vnsrl_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnsrl_wv_u8mf4(op1, shift, vl); +vuint8mf4_t test_vnsrl_wv_u8mf4(vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u8mf4(vs2, vs1, vl); } -vuint8mf4_t test_vnsrl_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u8mf4(op1, shift, vl); +vuint8mf4_t test_vnsrl_wx_u8mf4(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u8mf4(vs2, rs1, vl); } -vuint8mf2_t test_vnsrl_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnsrl_wv_u8mf2(op1, shift, vl); +vuint8mf2_t test_vnsrl_wv_u8mf2(vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u8mf2(vs2, vs1, vl); } -vuint8mf2_t test_vnsrl_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u8mf2(op1, shift, vl); +vuint8mf2_t test_vnsrl_wx_u8mf2(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u8mf2(vs2, rs1, vl); } -vuint8m1_t test_vnsrl_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnsrl_wv_u8m1(op1, shift, vl); +vuint8m1_t test_vnsrl_wv_u8m1(vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vnsrl_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u8m1(op1, shift, vl); +vuint8m1_t test_vnsrl_wx_u8m1(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u8m1(vs2, rs1, vl); } -vuint8m2_t test_vnsrl_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnsrl_wv_u8m2(op1, shift, vl); +vuint8m2_t test_vnsrl_wv_u8m2(vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u8m2(vs2, vs1, vl); } -vuint8m2_t test_vnsrl_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u8m2(op1, shift, vl); +vuint8m2_t test_vnsrl_wx_u8m2(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u8m2(vs2, rs1, vl); } -vuint8m4_t test_vnsrl_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnsrl_wv_u8m4(op1, shift, vl); +vuint8m4_t test_vnsrl_wv_u8m4(vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u8m4(vs2, vs1, vl); } -vuint8m4_t test_vnsrl_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u8m4(op1, shift, vl); +vuint8m4_t test_vnsrl_wx_u8m4(vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u8m4(vs2, rs1, vl); } -vuint16mf4_t test_vnsrl_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnsrl_wv_u16mf4(op1, shift, vl); +vuint16mf4_t test_vnsrl_wv_u16mf4(vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u16mf4(vs2, vs1, vl); } -vuint16mf4_t test_vnsrl_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u16mf4(op1, shift, vl); +vuint16mf4_t test_vnsrl_wx_u16mf4(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u16mf4(vs2, rs1, vl); } -vuint16mf2_t test_vnsrl_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnsrl_wv_u16mf2(op1, shift, vl); +vuint16mf2_t test_vnsrl_wv_u16mf2(vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u16mf2(vs2, vs1, vl); } -vuint16mf2_t test_vnsrl_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u16mf2(op1, shift, vl); +vuint16mf2_t test_vnsrl_wx_u16mf2(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u16mf2(vs2, rs1, vl); } -vuint16m1_t test_vnsrl_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnsrl_wv_u16m1(op1, shift, vl); +vuint16m1_t test_vnsrl_wv_u16m1(vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vnsrl_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u16m1(op1, shift, vl); +vuint16m1_t test_vnsrl_wx_u16m1(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u16m1(vs2, rs1, vl); } -vuint16m2_t test_vnsrl_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnsrl_wv_u16m2(op1, shift, vl); +vuint16m2_t test_vnsrl_wv_u16m2(vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u16m2(vs2, vs1, vl); } -vuint16m2_t test_vnsrl_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u16m2(op1, shift, vl); +vuint16m2_t test_vnsrl_wx_u16m2(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u16m2(vs2, rs1, vl); } -vuint16m4_t test_vnsrl_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnsrl_wv_u16m4(op1, shift, vl); +vuint16m4_t test_vnsrl_wv_u16m4(vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u16m4(vs2, vs1, vl); } -vuint16m4_t test_vnsrl_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u16m4(op1, shift, vl); +vuint16m4_t test_vnsrl_wx_u16m4(vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u16m4(vs2, rs1, vl); } -vuint32mf2_t test_vnsrl_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnsrl_wv_u32mf2(op1, shift, vl); +vuint32mf2_t test_vnsrl_wv_u32mf2(vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u32mf2(vs2, vs1, vl); } -vuint32mf2_t test_vnsrl_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u32mf2(op1, shift, vl); +vuint32mf2_t test_vnsrl_wx_u32mf2(vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u32mf2(vs2, rs1, vl); } -vuint32m1_t test_vnsrl_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnsrl_wv_u32m1(op1, shift, vl); +vuint32m1_t test_vnsrl_wv_u32m1(vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vnsrl_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u32m1(op1, shift, vl); +vuint32m1_t test_vnsrl_wx_u32m1(vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u32m1(vs2, rs1, vl); } -vuint32m2_t test_vnsrl_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnsrl_wv_u32m2(op1, shift, vl); +vuint32m2_t test_vnsrl_wv_u32m2(vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u32m2(vs2, vs1, vl); } -vuint32m2_t test_vnsrl_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u32m2(op1, shift, vl); +vuint32m2_t test_vnsrl_wx_u32m2(vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u32m2(vs2, rs1, vl); } -vuint32m4_t test_vnsrl_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnsrl_wv_u32m4(op1, shift, vl); +vuint32m4_t test_vnsrl_wv_u32m4(vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u32m4(vs2, vs1, vl); } -vuint32m4_t test_vnsrl_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u32m4(op1, shift, vl); +vuint32m4_t test_vnsrl_wx_u32m4(vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u32m4(vs2, rs1, vl); } -vuint8mf8_t test_vnsrl_wv_u8mf8_m(vbool64_t mask, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnsrl_wv_u8mf8_m(mask, op1, shift, vl); +vuint8mf8_t test_vnsrl_wv_u8mf8_m(vbool64_t vm, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u8mf8_m(vm, vs2, vs1, vl); } -vuint8mf8_t test_vnsrl_wx_u8mf8_m(vbool64_t mask, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u8mf8_m(mask, op1, shift, vl); +vuint8mf8_t test_vnsrl_wx_u8mf8_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u8mf8_m(vm, vs2, rs1, vl); } -vuint8mf4_t test_vnsrl_wv_u8mf4_m(vbool32_t mask, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnsrl_wv_u8mf4_m(mask, op1, shift, vl); +vuint8mf4_t test_vnsrl_wv_u8mf4_m(vbool32_t vm, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u8mf4_m(vm, vs2, vs1, vl); } -vuint8mf4_t test_vnsrl_wx_u8mf4_m(vbool32_t mask, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u8mf4_m(mask, op1, shift, vl); +vuint8mf4_t test_vnsrl_wx_u8mf4_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u8mf4_m(vm, vs2, rs1, vl); } -vuint8mf2_t test_vnsrl_wv_u8mf2_m(vbool16_t mask, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnsrl_wv_u8mf2_m(mask, op1, shift, vl); +vuint8mf2_t test_vnsrl_wv_u8mf2_m(vbool16_t vm, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u8mf2_m(vm, vs2, vs1, vl); } -vuint8mf2_t test_vnsrl_wx_u8mf2_m(vbool16_t mask, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u8mf2_m(mask, op1, shift, vl); +vuint8mf2_t test_vnsrl_wx_u8mf2_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u8mf2_m(vm, vs2, rs1, vl); } -vuint8m1_t test_vnsrl_wv_u8m1_m(vbool8_t mask, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnsrl_wv_u8m1_m(mask, op1, shift, vl); +vuint8m1_t test_vnsrl_wv_u8m1_m(vbool8_t vm, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vnsrl_wx_u8m1_m(vbool8_t mask, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u8m1_m(mask, op1, shift, vl); +vuint8m1_t test_vnsrl_wx_u8m1_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u8m1_m(vm, vs2, rs1, vl); } -vuint8m2_t test_vnsrl_wv_u8m2_m(vbool4_t mask, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnsrl_wv_u8m2_m(mask, op1, shift, vl); +vuint8m2_t test_vnsrl_wv_u8m2_m(vbool4_t vm, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u8m2_m(vm, vs2, vs1, vl); } -vuint8m2_t test_vnsrl_wx_u8m2_m(vbool4_t mask, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u8m2_m(mask, op1, shift, vl); +vuint8m2_t test_vnsrl_wx_u8m2_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u8m2_m(vm, vs2, rs1, vl); } -vuint8m4_t test_vnsrl_wv_u8m4_m(vbool2_t mask, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnsrl_wv_u8m4_m(mask, op1, shift, vl); +vuint8m4_t test_vnsrl_wv_u8m4_m(vbool2_t vm, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u8m4_m(vm, vs2, vs1, vl); } -vuint8m4_t test_vnsrl_wx_u8m4_m(vbool2_t mask, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u8m4_m(mask, op1, shift, vl); +vuint8m4_t test_vnsrl_wx_u8m4_m(vbool2_t vm, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u8m4_m(vm, vs2, rs1, vl); } -vuint16mf4_t test_vnsrl_wv_u16mf4_m(vbool64_t mask, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnsrl_wv_u16mf4_m(mask, op1, shift, vl); +vuint16mf4_t test_vnsrl_wv_u16mf4_m(vbool64_t vm, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u16mf4_m(vm, vs2, vs1, vl); } -vuint16mf4_t test_vnsrl_wx_u16mf4_m(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u16mf4_m(mask, op1, shift, vl); +vuint16mf4_t test_vnsrl_wx_u16mf4_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u16mf4_m(vm, vs2, rs1, vl); } -vuint16mf2_t test_vnsrl_wv_u16mf2_m(vbool32_t mask, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnsrl_wv_u16mf2_m(mask, op1, shift, vl); +vuint16mf2_t test_vnsrl_wv_u16mf2_m(vbool32_t vm, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u16mf2_m(vm, vs2, vs1, vl); } -vuint16mf2_t test_vnsrl_wx_u16mf2_m(vbool32_t mask, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u16mf2_m(mask, op1, shift, vl); +vuint16mf2_t test_vnsrl_wx_u16mf2_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u16mf2_m(vm, vs2, rs1, vl); } -vuint16m1_t test_vnsrl_wv_u16m1_m(vbool16_t mask, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnsrl_wv_u16m1_m(mask, op1, shift, vl); +vuint16m1_t test_vnsrl_wv_u16m1_m(vbool16_t vm, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vnsrl_wx_u16m1_m(vbool16_t mask, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u16m1_m(mask, op1, shift, vl); +vuint16m1_t test_vnsrl_wx_u16m1_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u16m1_m(vm, vs2, rs1, vl); } -vuint16m2_t test_vnsrl_wv_u16m2_m(vbool8_t mask, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnsrl_wv_u16m2_m(mask, op1, shift, vl); +vuint16m2_t test_vnsrl_wv_u16m2_m(vbool8_t vm, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u16m2_m(vm, vs2, vs1, vl); } -vuint16m2_t test_vnsrl_wx_u16m2_m(vbool8_t mask, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u16m2_m(mask, op1, shift, vl); +vuint16m2_t test_vnsrl_wx_u16m2_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u16m2_m(vm, vs2, rs1, vl); } -vuint16m4_t test_vnsrl_wv_u16m4_m(vbool4_t mask, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnsrl_wv_u16m4_m(mask, op1, shift, vl); +vuint16m4_t test_vnsrl_wv_u16m4_m(vbool4_t vm, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u16m4_m(vm, vs2, vs1, vl); } -vuint16m4_t test_vnsrl_wx_u16m4_m(vbool4_t mask, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u16m4_m(mask, op1, shift, vl); +vuint16m4_t test_vnsrl_wx_u16m4_m(vbool4_t vm, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u16m4_m(vm, vs2, rs1, vl); } -vuint32mf2_t test_vnsrl_wv_u32mf2_m(vbool64_t mask, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnsrl_wv_u32mf2_m(mask, op1, shift, vl); +vuint32mf2_t test_vnsrl_wv_u32mf2_m(vbool64_t vm, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u32mf2_m(vm, vs2, vs1, vl); } -vuint32mf2_t test_vnsrl_wx_u32mf2_m(vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u32mf2_m(mask, op1, shift, vl); +vuint32mf2_t test_vnsrl_wx_u32mf2_m(vbool64_t vm, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u32mf2_m(vm, vs2, rs1, vl); } -vuint32m1_t test_vnsrl_wv_u32m1_m(vbool32_t mask, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnsrl_wv_u32m1_m(mask, op1, shift, vl); +vuint32m1_t test_vnsrl_wv_u32m1_m(vbool32_t vm, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vnsrl_wx_u32m1_m(vbool32_t mask, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u32m1_m(mask, op1, shift, vl); +vuint32m1_t test_vnsrl_wx_u32m1_m(vbool32_t vm, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u32m1_m(vm, vs2, rs1, vl); } -vuint32m2_t test_vnsrl_wv_u32m2_m(vbool16_t mask, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnsrl_wv_u32m2_m(mask, op1, shift, vl); +vuint32m2_t test_vnsrl_wv_u32m2_m(vbool16_t vm, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u32m2_m(vm, vs2, vs1, vl); } -vuint32m2_t test_vnsrl_wx_u32m2_m(vbool16_t mask, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u32m2_m(mask, op1, shift, vl); +vuint32m2_t test_vnsrl_wx_u32m2_m(vbool16_t vm, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u32m2_m(vm, vs2, rs1, vl); } -vuint32m4_t test_vnsrl_wv_u32m4_m(vbool8_t mask, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnsrl_wv_u32m4_m(mask, op1, shift, vl); +vuint32m4_t test_vnsrl_wv_u32m4_m(vbool8_t vm, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u32m4_m(vm, vs2, vs1, vl); } -vuint32m4_t test_vnsrl_wx_u32m4_m(vbool8_t mask, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u32m4_m(mask, op1, shift, vl); +vuint32m4_t test_vnsrl_wx_u32m4_m(vbool8_t vm, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u32m4_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vnsrl\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-api-tests/vor.c b/auto-generated/gnu-api-tests/vor.c index fbf29d733..9d8298ae5 100644 --- a/auto-generated/gnu-api-tests/vor.c +++ b/auto-generated/gnu-api-tests/vor.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vor_vv_i8mf8(op1, op2, vl); +vint8mf8_t test_vor_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vor_vv_i8mf8(vs2, vs1, vl); } -vint8mf8_t test_vor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vor_vx_i8mf8(op1, op2, vl); +vint8mf8_t test_vor_vx_i8mf8(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_vx_i8mf8(vs2, rs1, vl); } -vint8mf4_t test_vor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vor_vv_i8mf4(op1, op2, vl); +vint8mf4_t test_vor_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vor_vv_i8mf4(vs2, vs1, vl); } -vint8mf4_t test_vor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vor_vx_i8mf4(op1, op2, vl); +vint8mf4_t test_vor_vx_i8mf4(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_vx_i8mf4(vs2, rs1, vl); } -vint8mf2_t test_vor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vor_vv_i8mf2(op1, op2, vl); +vint8mf2_t test_vor_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vor_vv_i8mf2(vs2, vs1, vl); } -vint8mf2_t test_vor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vor_vx_i8mf2(op1, op2, vl); +vint8mf2_t test_vor_vx_i8mf2(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_vx_i8mf2(vs2, rs1, vl); } -vint8m1_t test_vor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vor_vv_i8m1(op1, op2, vl); +vint8m1_t test_vor_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vor_vv_i8m1(vs2, vs1, vl); } -vint8m1_t test_vor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vor_vx_i8m1(op1, op2, vl); +vint8m1_t test_vor_vx_i8m1(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_vx_i8m1(vs2, rs1, vl); } -vint8m2_t test_vor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vor_vv_i8m2(op1, op2, vl); +vint8m2_t test_vor_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vor_vv_i8m2(vs2, vs1, vl); } -vint8m2_t test_vor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vor_vx_i8m2(op1, op2, vl); +vint8m2_t test_vor_vx_i8m2(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_vx_i8m2(vs2, rs1, vl); } -vint8m4_t test_vor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vor_vv_i8m4(op1, op2, vl); +vint8m4_t test_vor_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vor_vv_i8m4(vs2, vs1, vl); } -vint8m4_t test_vor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vor_vx_i8m4(op1, op2, vl); +vint8m4_t test_vor_vx_i8m4(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_vx_i8m4(vs2, rs1, vl); } -vint8m8_t test_vor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vor_vv_i8m8(op1, op2, vl); +vint8m8_t test_vor_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vor_vv_i8m8(vs2, vs1, vl); } -vint8m8_t test_vor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vor_vx_i8m8(op1, op2, vl); +vint8m8_t test_vor_vx_i8m8(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_vx_i8m8(vs2, rs1, vl); } -vint16mf4_t test_vor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vor_vv_i16mf4(op1, op2, vl); +vint16mf4_t test_vor_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vor_vv_i16mf4(vs2, vs1, vl); } -vint16mf4_t test_vor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vor_vx_i16mf4(op1, op2, vl); +vint16mf4_t test_vor_vx_i16mf4(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_vx_i16mf4(vs2, rs1, vl); } -vint16mf2_t test_vor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vor_vv_i16mf2(op1, op2, vl); +vint16mf2_t test_vor_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vor_vv_i16mf2(vs2, vs1, vl); } -vint16mf2_t test_vor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vor_vx_i16mf2(op1, op2, vl); +vint16mf2_t test_vor_vx_i16mf2(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_vx_i16mf2(vs2, rs1, vl); } -vint16m1_t test_vor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vor_vv_i16m1(op1, op2, vl); +vint16m1_t test_vor_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vor_vv_i16m1(vs2, vs1, vl); } -vint16m1_t test_vor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vor_vx_i16m1(op1, op2, vl); +vint16m1_t test_vor_vx_i16m1(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_vx_i16m1(vs2, rs1, vl); } -vint16m2_t test_vor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vor_vv_i16m2(op1, op2, vl); +vint16m2_t test_vor_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vor_vv_i16m2(vs2, vs1, vl); } -vint16m2_t test_vor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vor_vx_i16m2(op1, op2, vl); +vint16m2_t test_vor_vx_i16m2(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_vx_i16m2(vs2, rs1, vl); } -vint16m4_t test_vor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vor_vv_i16m4(op1, op2, vl); +vint16m4_t test_vor_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vor_vv_i16m4(vs2, vs1, vl); } -vint16m4_t test_vor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vor_vx_i16m4(op1, op2, vl); +vint16m4_t test_vor_vx_i16m4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_vx_i16m4(vs2, rs1, vl); } -vint16m8_t test_vor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vor_vv_i16m8(op1, op2, vl); +vint16m8_t test_vor_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vor_vv_i16m8(vs2, vs1, vl); } -vint16m8_t test_vor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vor_vx_i16m8(op1, op2, vl); +vint16m8_t test_vor_vx_i16m8(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_vx_i16m8(vs2, rs1, vl); } -vint32mf2_t test_vor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vor_vv_i32mf2(op1, op2, vl); +vint32mf2_t test_vor_vv_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vor_vv_i32mf2(vs2, vs1, vl); } -vint32mf2_t test_vor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vor_vx_i32mf2(op1, op2, vl); +vint32mf2_t test_vor_vx_i32mf2(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_vx_i32mf2(vs2, rs1, vl); } -vint32m1_t test_vor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vor_vv_i32m1(op1, op2, vl); +vint32m1_t test_vor_vv_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vor_vv_i32m1(vs2, vs1, vl); } -vint32m1_t test_vor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vor_vx_i32m1(op1, op2, vl); +vint32m1_t test_vor_vx_i32m1(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_vx_i32m1(vs2, rs1, vl); } -vint32m2_t test_vor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vor_vv_i32m2(op1, op2, vl); +vint32m2_t test_vor_vv_i32m2(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vor_vv_i32m2(vs2, vs1, vl); } -vint32m2_t test_vor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vor_vx_i32m2(op1, op2, vl); +vint32m2_t test_vor_vx_i32m2(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_vx_i32m2(vs2, rs1, vl); } -vint32m4_t test_vor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vor_vv_i32m4(op1, op2, vl); +vint32m4_t test_vor_vv_i32m4(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vor_vv_i32m4(vs2, vs1, vl); } -vint32m4_t test_vor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vor_vx_i32m4(op1, op2, vl); +vint32m4_t test_vor_vx_i32m4(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_vx_i32m4(vs2, rs1, vl); } -vint32m8_t test_vor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vor_vv_i32m8(op1, op2, vl); +vint32m8_t test_vor_vv_i32m8(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vor_vv_i32m8(vs2, vs1, vl); } -vint32m8_t test_vor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vor_vx_i32m8(op1, op2, vl); +vint32m8_t test_vor_vx_i32m8(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_vx_i32m8(vs2, rs1, vl); } -vint64m1_t test_vor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vor_vv_i64m1(op1, op2, vl); +vint64m1_t test_vor_vv_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vor_vv_i64m1(vs2, vs1, vl); } -vint64m1_t test_vor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vor_vx_i64m1(op1, op2, vl); +vint64m1_t test_vor_vx_i64m1(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor_vx_i64m1(vs2, rs1, vl); } -vint64m2_t test_vor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vor_vv_i64m2(op1, op2, vl); +vint64m2_t test_vor_vv_i64m2(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vor_vv_i64m2(vs2, vs1, vl); } -vint64m2_t test_vor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vor_vx_i64m2(op1, op2, vl); +vint64m2_t test_vor_vx_i64m2(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor_vx_i64m2(vs2, rs1, vl); } -vint64m4_t test_vor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vor_vv_i64m4(op1, op2, vl); +vint64m4_t test_vor_vv_i64m4(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vor_vv_i64m4(vs2, vs1, vl); } -vint64m4_t test_vor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vor_vx_i64m4(op1, op2, vl); +vint64m4_t test_vor_vx_i64m4(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor_vx_i64m4(vs2, rs1, vl); } -vint64m8_t test_vor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vor_vv_i64m8(op1, op2, vl); +vint64m8_t test_vor_vv_i64m8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vor_vv_i64m8(vs2, vs1, vl); } -vint64m8_t test_vor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vor_vx_i64m8(op1, op2, vl); +vint64m8_t test_vor_vx_i64m8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor_vx_i64m8(vs2, rs1, vl); } -vuint8mf8_t test_vor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vor_vv_u8mf8(op1, op2, vl); +vuint8mf8_t test_vor_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vor_vv_u8mf8(vs2, vs1, vl); } -vuint8mf8_t test_vor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_vx_u8mf8(op1, op2, vl); +vuint8mf8_t test_vor_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_vx_u8mf8(vs2, rs1, vl); } -vuint8mf4_t test_vor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vor_vv_u8mf4(op1, op2, vl); +vuint8mf4_t test_vor_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vor_vv_u8mf4(vs2, vs1, vl); } -vuint8mf4_t test_vor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_vx_u8mf4(op1, op2, vl); +vuint8mf4_t test_vor_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_vx_u8mf4(vs2, rs1, vl); } -vuint8mf2_t test_vor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vor_vv_u8mf2(op1, op2, vl); +vuint8mf2_t test_vor_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vor_vv_u8mf2(vs2, vs1, vl); } -vuint8mf2_t test_vor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_vx_u8mf2(op1, op2, vl); +vuint8mf2_t test_vor_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_vx_u8mf2(vs2, rs1, vl); } -vuint8m1_t test_vor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vor_vv_u8m1(op1, op2, vl); +vuint8m1_t test_vor_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vor_vv_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_vx_u8m1(op1, op2, vl); +vuint8m1_t test_vor_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_vx_u8m1(vs2, rs1, vl); } -vuint8m2_t test_vor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vor_vv_u8m2(op1, op2, vl); +vuint8m2_t test_vor_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vor_vv_u8m2(vs2, vs1, vl); } -vuint8m2_t test_vor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_vx_u8m2(op1, op2, vl); +vuint8m2_t test_vor_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_vx_u8m2(vs2, rs1, vl); } -vuint8m4_t test_vor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vor_vv_u8m4(op1, op2, vl); +vuint8m4_t test_vor_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vor_vv_u8m4(vs2, vs1, vl); } -vuint8m4_t test_vor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_vx_u8m4(op1, op2, vl); +vuint8m4_t test_vor_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_vx_u8m4(vs2, rs1, vl); } -vuint8m8_t test_vor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vor_vv_u8m8(op1, op2, vl); +vuint8m8_t test_vor_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vor_vv_u8m8(vs2, vs1, vl); } -vuint8m8_t test_vor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_vx_u8m8(op1, op2, vl); +vuint8m8_t test_vor_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_vx_u8m8(vs2, rs1, vl); } -vuint16mf4_t test_vor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vor_vv_u16mf4(op1, op2, vl); +vuint16mf4_t test_vor_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vor_vv_u16mf4(vs2, vs1, vl); } -vuint16mf4_t test_vor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_vx_u16mf4(op1, op2, vl); +vuint16mf4_t test_vor_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_vx_u16mf4(vs2, rs1, vl); } -vuint16mf2_t test_vor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vor_vv_u16mf2(op1, op2, vl); +vuint16mf2_t test_vor_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vor_vv_u16mf2(vs2, vs1, vl); } -vuint16mf2_t test_vor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_vx_u16mf2(op1, op2, vl); +vuint16mf2_t test_vor_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_vx_u16mf2(vs2, rs1, vl); } -vuint16m1_t test_vor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vor_vv_u16m1(op1, op2, vl); +vuint16m1_t test_vor_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vor_vv_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_vx_u16m1(op1, op2, vl); +vuint16m1_t test_vor_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_vx_u16m1(vs2, rs1, vl); } -vuint16m2_t test_vor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vor_vv_u16m2(op1, op2, vl); +vuint16m2_t test_vor_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vor_vv_u16m2(vs2, vs1, vl); } -vuint16m2_t test_vor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_vx_u16m2(op1, op2, vl); +vuint16m2_t test_vor_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_vx_u16m2(vs2, rs1, vl); } -vuint16m4_t test_vor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vor_vv_u16m4(op1, op2, vl); +vuint16m4_t test_vor_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vor_vv_u16m4(vs2, vs1, vl); } -vuint16m4_t test_vor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_vx_u16m4(op1, op2, vl); +vuint16m4_t test_vor_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_vx_u16m4(vs2, rs1, vl); } -vuint16m8_t test_vor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vor_vv_u16m8(op1, op2, vl); +vuint16m8_t test_vor_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vor_vv_u16m8(vs2, vs1, vl); } -vuint16m8_t test_vor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_vx_u16m8(op1, op2, vl); +vuint16m8_t test_vor_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_vx_u16m8(vs2, rs1, vl); } -vuint32mf2_t test_vor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vor_vv_u32mf2(op1, op2, vl); +vuint32mf2_t test_vor_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vor_vv_u32mf2(vs2, vs1, vl); } -vuint32mf2_t test_vor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_vx_u32mf2(op1, op2, vl); +vuint32mf2_t test_vor_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_vx_u32mf2(vs2, rs1, vl); } -vuint32m1_t test_vor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vor_vv_u32m1(op1, op2, vl); +vuint32m1_t test_vor_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vor_vv_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_vx_u32m1(op1, op2, vl); +vuint32m1_t test_vor_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_vx_u32m1(vs2, rs1, vl); } -vuint32m2_t test_vor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vor_vv_u32m2(op1, op2, vl); +vuint32m2_t test_vor_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vor_vv_u32m2(vs2, vs1, vl); } -vuint32m2_t test_vor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_vx_u32m2(op1, op2, vl); +vuint32m2_t test_vor_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_vx_u32m2(vs2, rs1, vl); } -vuint32m4_t test_vor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vor_vv_u32m4(op1, op2, vl); +vuint32m4_t test_vor_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vor_vv_u32m4(vs2, vs1, vl); } -vuint32m4_t test_vor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_vx_u32m4(op1, op2, vl); +vuint32m4_t test_vor_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_vx_u32m4(vs2, rs1, vl); } -vuint32m8_t test_vor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vor_vv_u32m8(op1, op2, vl); +vuint32m8_t test_vor_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vor_vv_u32m8(vs2, vs1, vl); } -vuint32m8_t test_vor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_vx_u32m8(op1, op2, vl); +vuint32m8_t test_vor_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_vx_u32m8(vs2, rs1, vl); } -vuint64m1_t test_vor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vor_vv_u64m1(op1, op2, vl); +vuint64m1_t test_vor_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vor_vv_u64m1(vs2, vs1, vl); } -vuint64m1_t test_vor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vor_vx_u64m1(op1, op2, vl); +vuint64m1_t test_vor_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor_vx_u64m1(vs2, rs1, vl); } -vuint64m2_t test_vor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vor_vv_u64m2(op1, op2, vl); +vuint64m2_t test_vor_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vor_vv_u64m2(vs2, vs1, vl); } -vuint64m2_t test_vor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vor_vx_u64m2(op1, op2, vl); +vuint64m2_t test_vor_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor_vx_u64m2(vs2, rs1, vl); } -vuint64m4_t test_vor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vor_vv_u64m4(op1, op2, vl); +vuint64m4_t test_vor_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vor_vv_u64m4(vs2, vs1, vl); } -vuint64m4_t test_vor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vor_vx_u64m4(op1, op2, vl); +vuint64m4_t test_vor_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor_vx_u64m4(vs2, rs1, vl); } -vuint64m8_t test_vor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vor_vv_u64m8(op1, op2, vl); +vuint64m8_t test_vor_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vor_vv_u64m8(vs2, vs1, vl); } -vuint64m8_t test_vor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vor_vx_u64m8(op1, op2, vl); +vuint64m8_t test_vor_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor_vx_u64m8(vs2, rs1, vl); } -vint8mf8_t test_vor_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vor_vv_i8mf8_m(mask, op1, op2, vl); +vint8mf8_t test_vor_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vor_vv_i8mf8_m(vm, vs2, vs1, vl); } -vint8mf8_t test_vor_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vor_vx_i8mf8_m(mask, op1, op2, vl); +vint8mf8_t test_vor_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_vx_i8mf8_m(vm, vs2, rs1, vl); } -vint8mf4_t test_vor_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vor_vv_i8mf4_m(mask, op1, op2, vl); +vint8mf4_t test_vor_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vor_vv_i8mf4_m(vm, vs2, vs1, vl); } -vint8mf4_t test_vor_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vor_vx_i8mf4_m(mask, op1, op2, vl); +vint8mf4_t test_vor_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_vx_i8mf4_m(vm, vs2, rs1, vl); } -vint8mf2_t test_vor_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vor_vv_i8mf2_m(mask, op1, op2, vl); +vint8mf2_t test_vor_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vor_vv_i8mf2_m(vm, vs2, vs1, vl); } -vint8mf2_t test_vor_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vor_vx_i8mf2_m(mask, op1, op2, vl); +vint8mf2_t test_vor_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_vx_i8mf2_m(vm, vs2, rs1, vl); } -vint8m1_t test_vor_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vor_vv_i8m1_m(mask, op1, op2, vl); +vint8m1_t test_vor_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vor_vv_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vor_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vor_vx_i8m1_m(mask, op1, op2, vl); +vint8m1_t test_vor_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_vx_i8m1_m(vm, vs2, rs1, vl); } -vint8m2_t test_vor_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vor_vv_i8m2_m(mask, op1, op2, vl); +vint8m2_t test_vor_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vor_vv_i8m2_m(vm, vs2, vs1, vl); } -vint8m2_t test_vor_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vor_vx_i8m2_m(mask, op1, op2, vl); +vint8m2_t test_vor_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_vx_i8m2_m(vm, vs2, rs1, vl); } -vint8m4_t test_vor_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vor_vv_i8m4_m(mask, op1, op2, vl); +vint8m4_t test_vor_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vor_vv_i8m4_m(vm, vs2, vs1, vl); } -vint8m4_t test_vor_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vor_vx_i8m4_m(mask, op1, op2, vl); +vint8m4_t test_vor_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_vx_i8m4_m(vm, vs2, rs1, vl); } -vint8m8_t test_vor_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vor_vv_i8m8_m(mask, op1, op2, vl); +vint8m8_t test_vor_vv_i8m8_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vor_vv_i8m8_m(vm, vs2, vs1, vl); } -vint8m8_t test_vor_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vor_vx_i8m8_m(mask, op1, op2, vl); +vint8m8_t test_vor_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_vx_i8m8_m(vm, vs2, rs1, vl); } -vint16mf4_t test_vor_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vor_vv_i16mf4_m(mask, op1, op2, vl); +vint16mf4_t test_vor_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vor_vv_i16mf4_m(vm, vs2, vs1, vl); } -vint16mf4_t test_vor_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vor_vx_i16mf4_m(mask, op1, op2, vl); +vint16mf4_t test_vor_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_vx_i16mf4_m(vm, vs2, rs1, vl); } -vint16mf2_t test_vor_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vor_vv_i16mf2_m(mask, op1, op2, vl); +vint16mf2_t test_vor_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vor_vv_i16mf2_m(vm, vs2, vs1, vl); } -vint16mf2_t test_vor_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vor_vx_i16mf2_m(mask, op1, op2, vl); +vint16mf2_t test_vor_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_vx_i16mf2_m(vm, vs2, rs1, vl); } -vint16m1_t test_vor_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vor_vv_i16m1_m(mask, op1, op2, vl); +vint16m1_t test_vor_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vor_vv_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vor_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vor_vx_i16m1_m(mask, op1, op2, vl); +vint16m1_t test_vor_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_vx_i16m1_m(vm, vs2, rs1, vl); } -vint16m2_t test_vor_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vor_vv_i16m2_m(mask, op1, op2, vl); +vint16m2_t test_vor_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vor_vv_i16m2_m(vm, vs2, vs1, vl); } -vint16m2_t test_vor_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vor_vx_i16m2_m(mask, op1, op2, vl); +vint16m2_t test_vor_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_vx_i16m2_m(vm, vs2, rs1, vl); } -vint16m4_t test_vor_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vor_vv_i16m4_m(mask, op1, op2, vl); +vint16m4_t test_vor_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vor_vv_i16m4_m(vm, vs2, vs1, vl); } -vint16m4_t test_vor_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vor_vx_i16m4_m(mask, op1, op2, vl); +vint16m4_t test_vor_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_vx_i16m4_m(vm, vs2, rs1, vl); } -vint16m8_t test_vor_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vor_vv_i16m8_m(mask, op1, op2, vl); +vint16m8_t test_vor_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vor_vv_i16m8_m(vm, vs2, vs1, vl); } -vint16m8_t test_vor_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vor_vx_i16m8_m(mask, op1, op2, vl); +vint16m8_t test_vor_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_vx_i16m8_m(vm, vs2, rs1, vl); } -vint32mf2_t test_vor_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vor_vv_i32mf2_m(mask, op1, op2, vl); +vint32mf2_t test_vor_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vor_vv_i32mf2_m(vm, vs2, vs1, vl); } -vint32mf2_t test_vor_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vor_vx_i32mf2_m(mask, op1, op2, vl); +vint32mf2_t test_vor_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_vx_i32mf2_m(vm, vs2, rs1, vl); } -vint32m1_t test_vor_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vor_vv_i32m1_m(mask, op1, op2, vl); +vint32m1_t test_vor_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vor_vv_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vor_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vor_vx_i32m1_m(mask, op1, op2, vl); +vint32m1_t test_vor_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_vx_i32m1_m(vm, vs2, rs1, vl); } -vint32m2_t test_vor_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vor_vv_i32m2_m(mask, op1, op2, vl); +vint32m2_t test_vor_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vor_vv_i32m2_m(vm, vs2, vs1, vl); } -vint32m2_t test_vor_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vor_vx_i32m2_m(mask, op1, op2, vl); +vint32m2_t test_vor_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_vx_i32m2_m(vm, vs2, rs1, vl); } -vint32m4_t test_vor_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vor_vv_i32m4_m(mask, op1, op2, vl); +vint32m4_t test_vor_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vor_vv_i32m4_m(vm, vs2, vs1, vl); } -vint32m4_t test_vor_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vor_vx_i32m4_m(mask, op1, op2, vl); +vint32m4_t test_vor_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_vx_i32m4_m(vm, vs2, rs1, vl); } -vint32m8_t test_vor_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vor_vv_i32m8_m(mask, op1, op2, vl); +vint32m8_t test_vor_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vor_vv_i32m8_m(vm, vs2, vs1, vl); } -vint32m8_t test_vor_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vor_vx_i32m8_m(mask, op1, op2, vl); +vint32m8_t test_vor_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_vx_i32m8_m(vm, vs2, rs1, vl); } -vint64m1_t test_vor_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vor_vv_i64m1_m(mask, op1, op2, vl); +vint64m1_t test_vor_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vor_vv_i64m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vor_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vor_vx_i64m1_m(mask, op1, op2, vl); +vint64m1_t test_vor_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor_vx_i64m1_m(vm, vs2, rs1, vl); } -vint64m2_t test_vor_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vor_vv_i64m2_m(mask, op1, op2, vl); +vint64m2_t test_vor_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vor_vv_i64m2_m(vm, vs2, vs1, vl); } -vint64m2_t test_vor_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vor_vx_i64m2_m(mask, op1, op2, vl); +vint64m2_t test_vor_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor_vx_i64m2_m(vm, vs2, rs1, vl); } -vint64m4_t test_vor_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vor_vv_i64m4_m(mask, op1, op2, vl); +vint64m4_t test_vor_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vor_vv_i64m4_m(vm, vs2, vs1, vl); } -vint64m4_t test_vor_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vor_vx_i64m4_m(mask, op1, op2, vl); +vint64m4_t test_vor_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor_vx_i64m4_m(vm, vs2, rs1, vl); } -vint64m8_t test_vor_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vor_vv_i64m8_m(mask, op1, op2, vl); +vint64m8_t test_vor_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vor_vv_i64m8_m(vm, vs2, vs1, vl); } -vint64m8_t test_vor_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vor_vx_i64m8_m(mask, op1, op2, vl); +vint64m8_t test_vor_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor_vx_i64m8_m(vm, vs2, rs1, vl); } -vuint8mf8_t test_vor_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vor_vv_u8mf8_m(mask, op1, op2, vl); +vuint8mf8_t test_vor_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vor_vv_u8mf8_m(vm, vs2, vs1, vl); } -vuint8mf8_t test_vor_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_vx_u8mf8_m(mask, op1, op2, vl); +vuint8mf8_t test_vor_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_vx_u8mf8_m(vm, vs2, rs1, vl); } -vuint8mf4_t test_vor_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vor_vv_u8mf4_m(mask, op1, op2, vl); +vuint8mf4_t test_vor_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vor_vv_u8mf4_m(vm, vs2, vs1, vl); } -vuint8mf4_t test_vor_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_vx_u8mf4_m(mask, op1, op2, vl); +vuint8mf4_t test_vor_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_vx_u8mf4_m(vm, vs2, rs1, vl); } -vuint8mf2_t test_vor_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vor_vv_u8mf2_m(mask, op1, op2, vl); +vuint8mf2_t test_vor_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vor_vv_u8mf2_m(vm, vs2, vs1, vl); } -vuint8mf2_t test_vor_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_vx_u8mf2_m(mask, op1, op2, vl); +vuint8mf2_t test_vor_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_vx_u8mf2_m(vm, vs2, rs1, vl); } -vuint8m1_t test_vor_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vor_vv_u8m1_m(mask, op1, op2, vl); +vuint8m1_t test_vor_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vor_vv_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vor_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_vx_u8m1_m(mask, op1, op2, vl); +vuint8m1_t test_vor_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_vx_u8m1_m(vm, vs2, rs1, vl); } -vuint8m2_t test_vor_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vor_vv_u8m2_m(mask, op1, op2, vl); +vuint8m2_t test_vor_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vor_vv_u8m2_m(vm, vs2, vs1, vl); } -vuint8m2_t test_vor_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_vx_u8m2_m(mask, op1, op2, vl); +vuint8m2_t test_vor_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_vx_u8m2_m(vm, vs2, rs1, vl); } -vuint8m4_t test_vor_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vor_vv_u8m4_m(mask, op1, op2, vl); +vuint8m4_t test_vor_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vor_vv_u8m4_m(vm, vs2, vs1, vl); } -vuint8m4_t test_vor_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_vx_u8m4_m(mask, op1, op2, vl); +vuint8m4_t test_vor_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_vx_u8m4_m(vm, vs2, rs1, vl); } -vuint8m8_t test_vor_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vor_vv_u8m8_m(mask, op1, op2, vl); +vuint8m8_t test_vor_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vor_vv_u8m8_m(vm, vs2, vs1, vl); } -vuint8m8_t test_vor_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_vx_u8m8_m(mask, op1, op2, vl); +vuint8m8_t test_vor_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_vx_u8m8_m(vm, vs2, rs1, vl); } -vuint16mf4_t test_vor_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vor_vv_u16mf4_m(mask, op1, op2, vl); +vuint16mf4_t test_vor_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vor_vv_u16mf4_m(vm, vs2, vs1, vl); } -vuint16mf4_t test_vor_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_vx_u16mf4_m(mask, op1, op2, vl); +vuint16mf4_t test_vor_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_vx_u16mf4_m(vm, vs2, rs1, vl); } -vuint16mf2_t test_vor_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vor_vv_u16mf2_m(mask, op1, op2, vl); +vuint16mf2_t test_vor_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vor_vv_u16mf2_m(vm, vs2, vs1, vl); } -vuint16mf2_t test_vor_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_vx_u16mf2_m(mask, op1, op2, vl); +vuint16mf2_t test_vor_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_vx_u16mf2_m(vm, vs2, rs1, vl); } -vuint16m1_t test_vor_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vor_vv_u16m1_m(mask, op1, op2, vl); +vuint16m1_t test_vor_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vor_vv_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vor_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_vx_u16m1_m(mask, op1, op2, vl); +vuint16m1_t test_vor_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_vx_u16m1_m(vm, vs2, rs1, vl); } -vuint16m2_t test_vor_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vor_vv_u16m2_m(mask, op1, op2, vl); +vuint16m2_t test_vor_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vor_vv_u16m2_m(vm, vs2, vs1, vl); } -vuint16m2_t test_vor_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_vx_u16m2_m(mask, op1, op2, vl); +vuint16m2_t test_vor_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_vx_u16m2_m(vm, vs2, rs1, vl); } -vuint16m4_t test_vor_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vor_vv_u16m4_m(mask, op1, op2, vl); +vuint16m4_t test_vor_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vor_vv_u16m4_m(vm, vs2, vs1, vl); } -vuint16m4_t test_vor_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_vx_u16m4_m(mask, op1, op2, vl); +vuint16m4_t test_vor_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_vx_u16m4_m(vm, vs2, rs1, vl); } -vuint16m8_t test_vor_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vor_vv_u16m8_m(mask, op1, op2, vl); +vuint16m8_t test_vor_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vor_vv_u16m8_m(vm, vs2, vs1, vl); } -vuint16m8_t test_vor_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_vx_u16m8_m(mask, op1, op2, vl); +vuint16m8_t test_vor_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_vx_u16m8_m(vm, vs2, rs1, vl); } -vuint32mf2_t test_vor_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vor_vv_u32mf2_m(mask, op1, op2, vl); +vuint32mf2_t test_vor_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vor_vv_u32mf2_m(vm, vs2, vs1, vl); } -vuint32mf2_t test_vor_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_vx_u32mf2_m(mask, op1, op2, vl); +vuint32mf2_t test_vor_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_vx_u32mf2_m(vm, vs2, rs1, vl); } -vuint32m1_t test_vor_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vor_vv_u32m1_m(mask, op1, op2, vl); +vuint32m1_t test_vor_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vor_vv_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vor_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_vx_u32m1_m(mask, op1, op2, vl); +vuint32m1_t test_vor_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_vx_u32m1_m(vm, vs2, rs1, vl); } -vuint32m2_t test_vor_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vor_vv_u32m2_m(mask, op1, op2, vl); +vuint32m2_t test_vor_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vor_vv_u32m2_m(vm, vs2, vs1, vl); } -vuint32m2_t test_vor_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_vx_u32m2_m(mask, op1, op2, vl); +vuint32m2_t test_vor_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_vx_u32m2_m(vm, vs2, rs1, vl); } -vuint32m4_t test_vor_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vor_vv_u32m4_m(mask, op1, op2, vl); +vuint32m4_t test_vor_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vor_vv_u32m4_m(vm, vs2, vs1, vl); } -vuint32m4_t test_vor_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_vx_u32m4_m(mask, op1, op2, vl); +vuint32m4_t test_vor_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_vx_u32m4_m(vm, vs2, rs1, vl); } -vuint32m8_t test_vor_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vor_vv_u32m8_m(mask, op1, op2, vl); +vuint32m8_t test_vor_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vor_vv_u32m8_m(vm, vs2, vs1, vl); } -vuint32m8_t test_vor_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_vx_u32m8_m(mask, op1, op2, vl); +vuint32m8_t test_vor_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_vx_u32m8_m(vm, vs2, rs1, vl); } -vuint64m1_t test_vor_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vor_vv_u64m1_m(mask, op1, op2, vl); +vuint64m1_t test_vor_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vor_vv_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vor_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vor_vx_u64m1_m(mask, op1, op2, vl); +vuint64m1_t test_vor_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor_vx_u64m1_m(vm, vs2, rs1, vl); } -vuint64m2_t test_vor_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vor_vv_u64m2_m(mask, op1, op2, vl); +vuint64m2_t test_vor_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vor_vv_u64m2_m(vm, vs2, vs1, vl); } -vuint64m2_t test_vor_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vor_vx_u64m2_m(mask, op1, op2, vl); +vuint64m2_t test_vor_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor_vx_u64m2_m(vm, vs2, rs1, vl); } -vuint64m4_t test_vor_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vor_vv_u64m4_m(mask, op1, op2, vl); +vuint64m4_t test_vor_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vor_vv_u64m4_m(vm, vs2, vs1, vl); } -vuint64m4_t test_vor_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vor_vx_u64m4_m(mask, op1, op2, vl); +vuint64m4_t test_vor_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor_vx_u64m4_m(vm, vs2, rs1, vl); } -vuint64m8_t test_vor_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vor_vv_u64m8_m(mask, op1, op2, vl); +vuint64m8_t test_vor_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vor_vv_u64m8_m(vm, vs2, vs1, vl); } -vuint64m8_t test_vor_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vor_vx_u64m8_m(mask, op1, op2, vl); +vuint64m8_t test_vor_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor_vx_u64m8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vor\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/gnu-api-tests/vredand.c b/auto-generated/gnu-api-tests/vredand.c index c486a811c..c93462037 100644 --- a/auto-generated/gnu-api-tests/vredand.c +++ b/auto-generated/gnu-api-tests/vredand.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8m1_t test_vredand_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i8mf8_i8m1(vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf8_i8m1(vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i8mf8_i8m1(vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i8mf4_i8m1(vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf4_i8m1(vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i8mf4_i8m1(vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i8mf2_i8m1(vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf2_i8m1(vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i8mf2_i8m1(vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i8m1_i8m1(vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m1_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i8m1_i8m1(vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i8m2_i8m1(vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m2_i8m1(vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i8m2_i8m1(vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i8m4_i8m1(vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m4_i8m1(vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i8m4_i8m1(vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i8m8_i8m1(vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m8_i8m1(vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i8m8_i8m1(vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i16mf4_i16m1(vector, scalar, vl); +vint16m1_t test_vredand_vs_i16mf4_i16m1(vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i16mf4_i16m1(vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i16mf2_i16m1(vector, scalar, vl); +vint16m1_t test_vredand_vs_i16mf2_i16m1(vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i16mf2_i16m1(vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i16m1_i16m1(vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m1_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i16m1_i16m1(vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i16m2_i16m1(vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m2_i16m1(vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i16m2_i16m1(vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i16m4_i16m1(vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m4_i16m1(vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i16m4_i16m1(vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i16m8_i16m1(vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m8_i16m1(vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i16m8_i16m1(vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i32mf2_i32m1(vector, scalar, vl); +vint32m1_t test_vredand_vs_i32mf2_i32m1(vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i32mf2_i32m1(vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i32m1_i32m1(vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m1_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i32m1_i32m1(vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i32m2_i32m1(vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m2_i32m1(vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i32m2_i32m1(vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i32m4_i32m1(vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m4_i32m1(vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i32m4_i32m1(vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i32m8_i32m1(vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m8_i32m1(vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i32m8_i32m1(vs2, vs1, vl); } -vint64m1_t test_vredand_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i64m1_i64m1(vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m1_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i64m1_i64m1(vs2, vs1, vl); } -vint64m1_t test_vredand_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i64m2_i64m1(vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m2_i64m1(vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i64m2_i64m1(vs2, vs1, vl); } -vint64m1_t test_vredand_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i64m4_i64m1(vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m4_i64m1(vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i64m4_i64m1(vs2, vs1, vl); } -vint64m1_t test_vredand_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i64m8_i64m1(vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m8_i64m1(vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i64m8_i64m1(vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u8mf8_u8m1(vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf8_u8m1(vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u8mf8_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u8mf4_u8m1(vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf4_u8m1(vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u8mf4_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u8mf2_u8m1(vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf2_u8m1(vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u8mf2_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u8m1_u8m1(vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m1_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u8m1_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u8m2_u8m1(vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m2_u8m1(vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u8m2_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u8m4_u8m1(vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m4_u8m1(vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u8m4_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u8m8_u8m1(vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m8_u8m1(vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u8m8_u8m1(vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u16mf4_u16m1(vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16mf4_u16m1(vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u16mf4_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u16mf2_u16m1(vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16mf2_u16m1(vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u16mf2_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u16m1_u16m1(vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m1_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u16m1_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u16m2_u16m1(vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m2_u16m1(vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u16m2_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u16m4_u16m1(vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m4_u16m1(vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u16m4_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u16m8_u16m1(vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m8_u16m1(vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u16m8_u16m1(vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u32mf2_u32m1(vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32mf2_u32m1(vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u32mf2_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u32m1_u32m1(vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m1_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u32m1_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u32m2_u32m1(vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m2_u32m1(vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u32m2_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u32m4_u32m1(vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m4_u32m1(vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u32m4_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u32m8_u32m1(vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m8_u32m1(vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u32m8_u32m1(vs2, vs1, vl); } -vuint64m1_t test_vredand_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u64m1_u64m1(vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m1_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u64m1_u64m1(vs2, vs1, vl); } -vuint64m1_t test_vredand_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u64m2_u64m1(vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m2_u64m1(vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u64m2_u64m1(vs2, vs1, vl); } -vuint64m1_t test_vredand_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u64m4_u64m1(vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m4_u64m1(vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u64m4_u64m1(vs2, vs1, vl); } -vuint64m1_t test_vredand_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u64m8_u64m1(vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m8_u64m1(vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u64m8_u64m1(vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i8mf8_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf8_i8m1_m(vbool64_t vm, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i8mf8_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i8mf4_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf4_i8m1_m(vbool32_t vm, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i8mf4_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i8mf2_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf2_i8m1_m(vbool16_t vm, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i8mf2_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i8m1_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m1_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i8m1_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i8m2_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m2_i8m1_m(vbool4_t vm, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i8m2_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i8m4_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m4_i8m1_m(vbool2_t vm, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i8m4_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i8m8_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m8_i8m1_m(vbool1_t vm, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i8m8_i8m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i16mf4_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16mf4_i16m1_m(vbool64_t vm, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i16mf4_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i16mf2_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16mf2_i16m1_m(vbool32_t vm, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i16mf2_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i16m1_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m1_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i16m1_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i16m2_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m2_i16m1_m(vbool8_t vm, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i16m2_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i16m4_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m4_i16m1_m(vbool4_t vm, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i16m4_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i16m8_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m8_i16m1_m(vbool2_t vm, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i16m8_i16m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i32mf2_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32mf2_i32m1_m(vbool64_t vm, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i32mf2_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i32m1_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m1_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i32m1_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i32m2_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m2_i32m1_m(vbool16_t vm, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i32m2_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i32m4_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m4_i32m1_m(vbool8_t vm, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i32m4_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i32m8_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m8_i32m1_m(vbool4_t vm, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i32m8_i32m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vredand_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i64m1_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m1_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i64m1_i64m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vredand_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i64m2_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m2_i64m1_m(vbool32_t vm, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i64m2_i64m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vredand_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i64m4_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m4_i64m1_m(vbool16_t vm, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i64m4_i64m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vredand_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i64m8_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m8_i64m1_m(vbool8_t vm, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i64m8_i64m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u8mf8_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf8_u8m1_m(vbool64_t vm, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u8mf8_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u8mf4_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf4_u8m1_m(vbool32_t vm, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u8mf4_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u8mf2_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf2_u8m1_m(vbool16_t vm, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u8mf2_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u8m1_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m1_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u8m1_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u8m2_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m2_u8m1_m(vbool4_t vm, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u8m2_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u8m4_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m4_u8m1_m(vbool2_t vm, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u8m4_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u8m8_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m8_u8m1_m(vbool1_t vm, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u8m8_u8m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u16mf4_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16mf4_u16m1_m(vbool64_t vm, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u16mf4_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u16mf2_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16mf2_u16m1_m(vbool32_t vm, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u16mf2_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u16m1_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m1_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u16m1_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u16m2_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m2_u16m1_m(vbool8_t vm, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u16m2_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u16m4_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m4_u16m1_m(vbool4_t vm, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u16m4_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u16m8_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m8_u16m1_m(vbool2_t vm, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u16m8_u16m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u32mf2_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32mf2_u32m1_m(vbool64_t vm, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u32mf2_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u32m1_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m1_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u32m1_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u32m2_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m2_u32m1_m(vbool16_t vm, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u32m2_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u32m4_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m4_u32m1_m(vbool8_t vm, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u32m4_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u32m8_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m8_u32m1_m(vbool4_t vm, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u32m8_u32m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vredand_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u64m1_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m1_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u64m1_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vredand_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u64m2_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m2_u64m1_m(vbool32_t vm, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u64m2_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vredand_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u64m4_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m4_u64m1_m(vbool16_t vm, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u64m4_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vredand_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u64m8_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m8_u64m1_m(vbool8_t vm, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u64m8_u64m1_m(vm, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vredand\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-api-tests/vredmax.c b/auto-generated/gnu-api-tests/vredmax.c index b88dd6970..e0537b5be 100644 --- a/auto-generated/gnu-api-tests/vredmax.c +++ b/auto-generated/gnu-api-tests/vredmax.c @@ -6,180 +6,180 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8m1_t test_vredmax_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i8mf8_i8m1(vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf8_i8m1(vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i8mf8_i8m1(vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i8mf4_i8m1(vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf4_i8m1(vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i8mf4_i8m1(vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i8mf2_i8m1(vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf2_i8m1(vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i8mf2_i8m1(vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i8m1_i8m1(vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m1_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i8m1_i8m1(vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i8m2_i8m1(vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m2_i8m1(vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i8m2_i8m1(vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i8m4_i8m1(vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m4_i8m1(vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i8m4_i8m1(vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i8m8_i8m1(vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m8_i8m1(vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i8m8_i8m1(vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i16mf4_i16m1(vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16mf4_i16m1(vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i16mf4_i16m1(vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i16mf2_i16m1(vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16mf2_i16m1(vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i16mf2_i16m1(vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i16m1_i16m1(vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m1_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i16m1_i16m1(vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i16m2_i16m1(vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m2_i16m1(vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i16m2_i16m1(vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i16m4_i16m1(vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m4_i16m1(vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i16m4_i16m1(vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i16m8_i16m1(vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m8_i16m1(vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i16m8_i16m1(vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i32mf2_i32m1(vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32mf2_i32m1(vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i32mf2_i32m1(vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i32m1_i32m1(vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m1_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i32m1_i32m1(vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i32m2_i32m1(vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m2_i32m1(vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i32m2_i32m1(vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i32m4_i32m1(vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m4_i32m1(vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i32m4_i32m1(vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i32m8_i32m1(vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m8_i32m1(vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i32m8_i32m1(vs2, vs1, vl); } -vint64m1_t test_vredmax_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i64m1_i64m1(vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m1_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i64m1_i64m1(vs2, vs1, vl); } -vint64m1_t test_vredmax_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i64m2_i64m1(vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m2_i64m1(vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i64m2_i64m1(vs2, vs1, vl); } -vint64m1_t test_vredmax_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i64m4_i64m1(vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m4_i64m1(vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i64m4_i64m1(vs2, vs1, vl); } -vint64m1_t test_vredmax_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i64m8_i64m1(vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m8_i64m1(vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i64m8_i64m1(vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i8mf8_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf8_i8m1_m(vbool64_t vm, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i8mf8_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i8mf4_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf4_i8m1_m(vbool32_t vm, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i8mf4_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i8mf2_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf2_i8m1_m(vbool16_t vm, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i8mf2_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i8m1_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m1_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i8m1_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i8m2_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m2_i8m1_m(vbool4_t vm, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i8m2_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i8m4_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m4_i8m1_m(vbool2_t vm, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i8m4_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i8m8_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m8_i8m1_m(vbool1_t vm, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i8m8_i8m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i16mf4_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16mf4_i16m1_m(vbool64_t vm, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i16mf4_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i16mf2_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16mf2_i16m1_m(vbool32_t vm, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i16mf2_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i16m1_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m1_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i16m1_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i16m2_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m2_i16m1_m(vbool8_t vm, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i16m2_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i16m4_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m4_i16m1_m(vbool4_t vm, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i16m4_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i16m8_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m8_i16m1_m(vbool2_t vm, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i16m8_i16m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i32mf2_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32mf2_i32m1_m(vbool64_t vm, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i32mf2_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i32m1_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m1_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i32m1_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i32m2_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m2_i32m1_m(vbool16_t vm, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i32m2_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i32m4_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m4_i32m1_m(vbool8_t vm, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i32m4_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i32m8_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m8_i32m1_m(vbool4_t vm, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i32m8_i32m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vredmax_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i64m1_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m1_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i64m1_i64m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vredmax_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i64m2_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m2_i64m1_m(vbool32_t vm, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i64m2_i64m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vredmax_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i64m4_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m4_i64m1_m(vbool16_t vm, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i64m4_i64m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vredmax_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i64m8_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m8_i64m1_m(vbool8_t vm, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i64m8_i64m1_m(vm, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vredmax\.[ivxfswum.]+\s+} 44 } } */ diff --git a/auto-generated/gnu-api-tests/vredmaxu.c b/auto-generated/gnu-api-tests/vredmaxu.c index 47f6b00e2..39a0f0e61 100644 --- a/auto-generated/gnu-api-tests/vredmaxu.c +++ b/auto-generated/gnu-api-tests/vredmaxu.c @@ -6,180 +6,180 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u8mf8_u8m1(vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1(vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u8mf8_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u8mf4_u8m1(vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1(vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u8mf4_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u8mf2_u8m1(vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1(vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u8mf2_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u8m1_u8m1(vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m1_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u8m1_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u8m2_u8m1(vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m2_u8m1(vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u8m2_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u8m4_u8m1(vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m4_u8m1(vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u8m4_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u8m8_u8m1(vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m8_u8m1(vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u8m8_u8m1(vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u16mf4_u16m1(vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1(vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u16mf4_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u16mf2_u16m1(vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1(vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u16mf2_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u16m1_u16m1(vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m1_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u16m1_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u16m2_u16m1(vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m2_u16m1(vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u16m2_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u16m4_u16m1(vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m4_u16m1(vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u16m4_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u16m8_u16m1(vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m8_u16m1(vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u16m8_u16m1(vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u32mf2_u32m1(vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1(vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u32mf2_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u32m1_u32m1(vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m1_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u32m1_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u32m2_u32m1(vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m2_u32m1(vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u32m2_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u32m4_u32m1(vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m4_u32m1(vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u32m4_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u32m8_u32m1(vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m8_u32m1(vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u32m8_u32m1(vs2, vs1, vl); } -vuint64m1_t test_vredmaxu_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u64m1_u64m1(vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m1_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u64m1_u64m1(vs2, vs1, vl); } -vuint64m1_t test_vredmaxu_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u64m2_u64m1(vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m2_u64m1(vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u64m2_u64m1(vs2, vs1, vl); } -vuint64m1_t test_vredmaxu_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u64m4_u64m1(vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m4_u64m1(vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u64m4_u64m1(vs2, vs1, vl); } -vuint64m1_t test_vredmaxu_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u64m8_u64m1(vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m8_u64m1(vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u64m8_u64m1(vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u8mf8_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_m(vbool64_t vm, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u8mf8_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u8mf4_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_m(vbool32_t vm, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u8mf4_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u8mf2_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_m(vbool16_t vm, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u8mf2_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u8m1_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u8m1_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u8m2_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_m(vbool4_t vm, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u8m2_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u8m4_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_m(vbool2_t vm, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u8m4_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u8m8_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_m(vbool1_t vm, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u8m8_u8m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u16mf4_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_m(vbool64_t vm, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u16mf4_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u16mf2_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_m(vbool32_t vm, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u16mf2_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u16m1_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u16m1_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u16m2_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_m(vbool8_t vm, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u16m2_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u16m4_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_m(vbool4_t vm, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u16m4_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u16m8_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_m(vbool2_t vm, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u16m8_u16m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u32mf2_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_m(vbool64_t vm, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u32mf2_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u32m1_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u32m1_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u32m2_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_m(vbool16_t vm, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u32m2_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u32m4_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_m(vbool8_t vm, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u32m4_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u32m8_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_m(vbool4_t vm, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u32m8_u32m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u64m1_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u64m1_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u64m2_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_m(vbool32_t vm, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u64m2_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u64m4_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_m(vbool16_t vm, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u64m4_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u64m8_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_m(vbool8_t vm, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u64m8_u64m1_m(vm, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vredmaxu\.[ivxfswum.]+\s+} 44 } } */ diff --git a/auto-generated/gnu-api-tests/vredmin.c b/auto-generated/gnu-api-tests/vredmin.c index 58598d6d9..1463c4526 100644 --- a/auto-generated/gnu-api-tests/vredmin.c +++ b/auto-generated/gnu-api-tests/vredmin.c @@ -6,180 +6,180 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8m1_t test_vredmin_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i8mf8_i8m1(vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf8_i8m1(vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i8mf8_i8m1(vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i8mf4_i8m1(vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf4_i8m1(vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i8mf4_i8m1(vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i8mf2_i8m1(vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf2_i8m1(vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i8mf2_i8m1(vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i8m1_i8m1(vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m1_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i8m1_i8m1(vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i8m2_i8m1(vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m2_i8m1(vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i8m2_i8m1(vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i8m4_i8m1(vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m4_i8m1(vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i8m4_i8m1(vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i8m8_i8m1(vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m8_i8m1(vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i8m8_i8m1(vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i16mf4_i16m1(vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16mf4_i16m1(vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i16mf4_i16m1(vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i16mf2_i16m1(vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16mf2_i16m1(vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i16mf2_i16m1(vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i16m1_i16m1(vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m1_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i16m1_i16m1(vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i16m2_i16m1(vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m2_i16m1(vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i16m2_i16m1(vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i16m4_i16m1(vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m4_i16m1(vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i16m4_i16m1(vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i16m8_i16m1(vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m8_i16m1(vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i16m8_i16m1(vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i32mf2_i32m1(vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32mf2_i32m1(vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i32mf2_i32m1(vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i32m1_i32m1(vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m1_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i32m1_i32m1(vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i32m2_i32m1(vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m2_i32m1(vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i32m2_i32m1(vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i32m4_i32m1(vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m4_i32m1(vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i32m4_i32m1(vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i32m8_i32m1(vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m8_i32m1(vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i32m8_i32m1(vs2, vs1, vl); } -vint64m1_t test_vredmin_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i64m1_i64m1(vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m1_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i64m1_i64m1(vs2, vs1, vl); } -vint64m1_t test_vredmin_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i64m2_i64m1(vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m2_i64m1(vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i64m2_i64m1(vs2, vs1, vl); } -vint64m1_t test_vredmin_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i64m4_i64m1(vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m4_i64m1(vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i64m4_i64m1(vs2, vs1, vl); } -vint64m1_t test_vredmin_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i64m8_i64m1(vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m8_i64m1(vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i64m8_i64m1(vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i8mf8_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf8_i8m1_m(vbool64_t vm, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i8mf8_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i8mf4_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf4_i8m1_m(vbool32_t vm, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i8mf4_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i8mf2_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf2_i8m1_m(vbool16_t vm, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i8mf2_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i8m1_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m1_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i8m1_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i8m2_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m2_i8m1_m(vbool4_t vm, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i8m2_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i8m4_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m4_i8m1_m(vbool2_t vm, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i8m4_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i8m8_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m8_i8m1_m(vbool1_t vm, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i8m8_i8m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i16mf4_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16mf4_i16m1_m(vbool64_t vm, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i16mf4_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i16mf2_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16mf2_i16m1_m(vbool32_t vm, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i16mf2_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i16m1_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m1_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i16m1_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i16m2_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m2_i16m1_m(vbool8_t vm, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i16m2_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i16m4_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m4_i16m1_m(vbool4_t vm, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i16m4_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i16m8_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m8_i16m1_m(vbool2_t vm, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i16m8_i16m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i32mf2_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32mf2_i32m1_m(vbool64_t vm, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i32mf2_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i32m1_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m1_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i32m1_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i32m2_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m2_i32m1_m(vbool16_t vm, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i32m2_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i32m4_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m4_i32m1_m(vbool8_t vm, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i32m4_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i32m8_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m8_i32m1_m(vbool4_t vm, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i32m8_i32m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vredmin_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i64m1_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m1_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i64m1_i64m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vredmin_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i64m2_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m2_i64m1_m(vbool32_t vm, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i64m2_i64m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vredmin_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i64m4_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m4_i64m1_m(vbool16_t vm, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i64m4_i64m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vredmin_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i64m8_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m8_i64m1_m(vbool8_t vm, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i64m8_i64m1_m(vm, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vredmin\.[ivxfswum.]+\s+} 44 } } */ diff --git a/auto-generated/gnu-api-tests/vredminu.c b/auto-generated/gnu-api-tests/vredminu.c index 9c4f356d7..6fbe1c8c2 100644 --- a/auto-generated/gnu-api-tests/vredminu.c +++ b/auto-generated/gnu-api-tests/vredminu.c @@ -6,180 +6,180 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8m1_t test_vredminu_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u8mf8_u8m1(vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf8_u8m1(vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u8mf8_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u8mf4_u8m1(vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf4_u8m1(vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u8mf4_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u8mf2_u8m1(vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf2_u8m1(vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u8mf2_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u8m1_u8m1(vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m1_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u8m1_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u8m2_u8m1(vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m2_u8m1(vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u8m2_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u8m4_u8m1(vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m4_u8m1(vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u8m4_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u8m8_u8m1(vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m8_u8m1(vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u8m8_u8m1(vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u16mf4_u16m1(vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16mf4_u16m1(vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u16mf4_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u16mf2_u16m1(vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16mf2_u16m1(vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u16mf2_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u16m1_u16m1(vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m1_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u16m1_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u16m2_u16m1(vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m2_u16m1(vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u16m2_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u16m4_u16m1(vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m4_u16m1(vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u16m4_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u16m8_u16m1(vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m8_u16m1(vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u16m8_u16m1(vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u32mf2_u32m1(vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32mf2_u32m1(vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u32mf2_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u32m1_u32m1(vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m1_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u32m1_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u32m2_u32m1(vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m2_u32m1(vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u32m2_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u32m4_u32m1(vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m4_u32m1(vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u32m4_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u32m8_u32m1(vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m8_u32m1(vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u32m8_u32m1(vs2, vs1, vl); } -vuint64m1_t test_vredminu_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u64m1_u64m1(vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m1_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u64m1_u64m1(vs2, vs1, vl); } -vuint64m1_t test_vredminu_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u64m2_u64m1(vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m2_u64m1(vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u64m2_u64m1(vs2, vs1, vl); } -vuint64m1_t test_vredminu_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u64m4_u64m1(vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m4_u64m1(vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u64m4_u64m1(vs2, vs1, vl); } -vuint64m1_t test_vredminu_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u64m8_u64m1(vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m8_u64m1(vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u64m8_u64m1(vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u8mf8_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf8_u8m1_m(vbool64_t vm, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u8mf8_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u8mf4_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf4_u8m1_m(vbool32_t vm, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u8mf4_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u8mf2_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf2_u8m1_m(vbool16_t vm, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u8mf2_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u8m1_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m1_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u8m1_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u8m2_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m2_u8m1_m(vbool4_t vm, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u8m2_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u8m4_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m4_u8m1_m(vbool2_t vm, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u8m4_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u8m8_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m8_u8m1_m(vbool1_t vm, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u8m8_u8m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u16mf4_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16mf4_u16m1_m(vbool64_t vm, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u16mf4_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u16mf2_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16mf2_u16m1_m(vbool32_t vm, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u16mf2_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u16m1_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m1_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u16m1_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u16m2_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m2_u16m1_m(vbool8_t vm, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u16m2_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u16m4_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m4_u16m1_m(vbool4_t vm, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u16m4_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u16m8_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m8_u16m1_m(vbool2_t vm, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u16m8_u16m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u32mf2_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32mf2_u32m1_m(vbool64_t vm, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u32mf2_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u32m1_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m1_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u32m1_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u32m2_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m2_u32m1_m(vbool16_t vm, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u32m2_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u32m4_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m4_u32m1_m(vbool8_t vm, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u32m4_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u32m8_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m8_u32m1_m(vbool4_t vm, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u32m8_u32m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vredminu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u64m1_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m1_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u64m1_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vredminu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u64m2_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m2_u64m1_m(vbool32_t vm, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u64m2_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vredminu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u64m4_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m4_u64m1_m(vbool16_t vm, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u64m4_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vredminu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u64m8_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m8_u64m1_m(vbool8_t vm, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u64m8_u64m1_m(vm, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vredminu\.[ivxfswum.]+\s+} 44 } } */ diff --git a/auto-generated/gnu-api-tests/vredor.c b/auto-generated/gnu-api-tests/vredor.c index 018675c85..4ea9961da 100644 --- a/auto-generated/gnu-api-tests/vredor.c +++ b/auto-generated/gnu-api-tests/vredor.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8m1_t test_vredor_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i8mf8_i8m1(vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf8_i8m1(vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i8mf8_i8m1(vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i8mf4_i8m1(vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf4_i8m1(vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i8mf4_i8m1(vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i8mf2_i8m1(vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf2_i8m1(vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i8mf2_i8m1(vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i8m1_i8m1(vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m1_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i8m1_i8m1(vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i8m2_i8m1(vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m2_i8m1(vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i8m2_i8m1(vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i8m4_i8m1(vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m4_i8m1(vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i8m4_i8m1(vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i8m8_i8m1(vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m8_i8m1(vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i8m8_i8m1(vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i16mf4_i16m1(vector, scalar, vl); +vint16m1_t test_vredor_vs_i16mf4_i16m1(vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i16mf4_i16m1(vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i16mf2_i16m1(vector, scalar, vl); +vint16m1_t test_vredor_vs_i16mf2_i16m1(vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i16mf2_i16m1(vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i16m1_i16m1(vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m1_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i16m1_i16m1(vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i16m2_i16m1(vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m2_i16m1(vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i16m2_i16m1(vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i16m4_i16m1(vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m4_i16m1(vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i16m4_i16m1(vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i16m8_i16m1(vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m8_i16m1(vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i16m8_i16m1(vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i32mf2_i32m1(vector, scalar, vl); +vint32m1_t test_vredor_vs_i32mf2_i32m1(vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i32mf2_i32m1(vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i32m1_i32m1(vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m1_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i32m1_i32m1(vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i32m2_i32m1(vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m2_i32m1(vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i32m2_i32m1(vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i32m4_i32m1(vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m4_i32m1(vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i32m4_i32m1(vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i32m8_i32m1(vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m8_i32m1(vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i32m8_i32m1(vs2, vs1, vl); } -vint64m1_t test_vredor_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i64m1_i64m1(vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m1_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i64m1_i64m1(vs2, vs1, vl); } -vint64m1_t test_vredor_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i64m2_i64m1(vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m2_i64m1(vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i64m2_i64m1(vs2, vs1, vl); } -vint64m1_t test_vredor_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i64m4_i64m1(vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m4_i64m1(vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i64m4_i64m1(vs2, vs1, vl); } -vint64m1_t test_vredor_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i64m8_i64m1(vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m8_i64m1(vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i64m8_i64m1(vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u8mf8_u8m1(vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf8_u8m1(vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u8mf8_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u8mf4_u8m1(vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf4_u8m1(vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u8mf4_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u8mf2_u8m1(vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf2_u8m1(vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u8mf2_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u8m1_u8m1(vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m1_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u8m1_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u8m2_u8m1(vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m2_u8m1(vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u8m2_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u8m4_u8m1(vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m4_u8m1(vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u8m4_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u8m8_u8m1(vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m8_u8m1(vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u8m8_u8m1(vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u16mf4_u16m1(vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16mf4_u16m1(vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u16mf4_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u16mf2_u16m1(vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16mf2_u16m1(vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u16mf2_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u16m1_u16m1(vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m1_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u16m1_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u16m2_u16m1(vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m2_u16m1(vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u16m2_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u16m4_u16m1(vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m4_u16m1(vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u16m4_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u16m8_u16m1(vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m8_u16m1(vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u16m8_u16m1(vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u32mf2_u32m1(vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32mf2_u32m1(vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u32mf2_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u32m1_u32m1(vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m1_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u32m1_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u32m2_u32m1(vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m2_u32m1(vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u32m2_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u32m4_u32m1(vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m4_u32m1(vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u32m4_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u32m8_u32m1(vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m8_u32m1(vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u32m8_u32m1(vs2, vs1, vl); } -vuint64m1_t test_vredor_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u64m1_u64m1(vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m1_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u64m1_u64m1(vs2, vs1, vl); } -vuint64m1_t test_vredor_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u64m2_u64m1(vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m2_u64m1(vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u64m2_u64m1(vs2, vs1, vl); } -vuint64m1_t test_vredor_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u64m4_u64m1(vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m4_u64m1(vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u64m4_u64m1(vs2, vs1, vl); } -vuint64m1_t test_vredor_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u64m8_u64m1(vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m8_u64m1(vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u64m8_u64m1(vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i8mf8_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf8_i8m1_m(vbool64_t vm, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i8mf8_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i8mf4_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf4_i8m1_m(vbool32_t vm, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i8mf4_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i8mf2_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf2_i8m1_m(vbool16_t vm, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i8mf2_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i8m1_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m1_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i8m1_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i8m2_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m2_i8m1_m(vbool4_t vm, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i8m2_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i8m4_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m4_i8m1_m(vbool2_t vm, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i8m4_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i8m8_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m8_i8m1_m(vbool1_t vm, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i8m8_i8m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i16mf4_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16mf4_i16m1_m(vbool64_t vm, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i16mf4_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i16mf2_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16mf2_i16m1_m(vbool32_t vm, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i16mf2_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i16m1_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m1_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i16m1_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i16m2_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m2_i16m1_m(vbool8_t vm, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i16m2_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i16m4_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m4_i16m1_m(vbool4_t vm, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i16m4_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i16m8_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m8_i16m1_m(vbool2_t vm, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i16m8_i16m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i32mf2_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32mf2_i32m1_m(vbool64_t vm, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i32mf2_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i32m1_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m1_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i32m1_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i32m2_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m2_i32m1_m(vbool16_t vm, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i32m2_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i32m4_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m4_i32m1_m(vbool8_t vm, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i32m4_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i32m8_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m8_i32m1_m(vbool4_t vm, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i32m8_i32m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vredor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i64m1_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m1_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i64m1_i64m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vredor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i64m2_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m2_i64m1_m(vbool32_t vm, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i64m2_i64m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vredor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i64m4_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m4_i64m1_m(vbool16_t vm, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i64m4_i64m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vredor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i64m8_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m8_i64m1_m(vbool8_t vm, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i64m8_i64m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u8mf8_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf8_u8m1_m(vbool64_t vm, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u8mf8_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u8mf4_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf4_u8m1_m(vbool32_t vm, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u8mf4_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u8mf2_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf2_u8m1_m(vbool16_t vm, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u8mf2_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u8m1_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m1_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u8m1_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u8m2_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m2_u8m1_m(vbool4_t vm, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u8m2_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u8m4_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m4_u8m1_m(vbool2_t vm, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u8m4_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u8m8_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m8_u8m1_m(vbool1_t vm, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u8m8_u8m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u16mf4_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16mf4_u16m1_m(vbool64_t vm, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u16mf4_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u16mf2_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16mf2_u16m1_m(vbool32_t vm, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u16mf2_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u16m1_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m1_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u16m1_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u16m2_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m2_u16m1_m(vbool8_t vm, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u16m2_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u16m4_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m4_u16m1_m(vbool4_t vm, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u16m4_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u16m8_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m8_u16m1_m(vbool2_t vm, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u16m8_u16m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u32mf2_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32mf2_u32m1_m(vbool64_t vm, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u32mf2_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u32m1_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m1_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u32m1_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u32m2_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m2_u32m1_m(vbool16_t vm, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u32m2_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u32m4_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m4_u32m1_m(vbool8_t vm, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u32m4_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u32m8_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m8_u32m1_m(vbool4_t vm, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u32m8_u32m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vredor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u64m1_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m1_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u64m1_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vredor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u64m2_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m2_u64m1_m(vbool32_t vm, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u64m2_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vredor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u64m4_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m4_u64m1_m(vbool16_t vm, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u64m4_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vredor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u64m8_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m8_u64m1_m(vbool8_t vm, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u64m8_u64m1_m(vm, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vredor\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-api-tests/vredsum.c b/auto-generated/gnu-api-tests/vredsum.c index 6993c8d24..77708043b 100644 --- a/auto-generated/gnu-api-tests/vredsum.c +++ b/auto-generated/gnu-api-tests/vredsum.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8m1_t test_vredsum_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i8mf8_i8m1(vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf8_i8m1(vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i8mf8_i8m1(vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i8mf4_i8m1(vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf4_i8m1(vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i8mf4_i8m1(vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i8mf2_i8m1(vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf2_i8m1(vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i8mf2_i8m1(vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i8m1_i8m1(vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m1_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i8m1_i8m1(vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i8m2_i8m1(vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m2_i8m1(vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i8m2_i8m1(vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i8m4_i8m1(vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m4_i8m1(vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i8m4_i8m1(vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i8m8_i8m1(vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m8_i8m1(vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i8m8_i8m1(vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i16mf4_i16m1(vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16mf4_i16m1(vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i16mf4_i16m1(vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i16mf2_i16m1(vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16mf2_i16m1(vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i16mf2_i16m1(vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i16m1_i16m1(vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m1_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i16m1_i16m1(vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i16m2_i16m1(vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m2_i16m1(vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i16m2_i16m1(vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i16m4_i16m1(vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m4_i16m1(vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i16m4_i16m1(vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i16m8_i16m1(vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m8_i16m1(vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i16m8_i16m1(vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i32mf2_i32m1(vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32mf2_i32m1(vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i32mf2_i32m1(vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i32m1_i32m1(vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m1_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i32m1_i32m1(vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i32m2_i32m1(vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m2_i32m1(vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i32m2_i32m1(vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i32m4_i32m1(vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m4_i32m1(vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i32m4_i32m1(vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i32m8_i32m1(vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m8_i32m1(vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i32m8_i32m1(vs2, vs1, vl); } -vint64m1_t test_vredsum_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i64m1_i64m1(vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m1_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i64m1_i64m1(vs2, vs1, vl); } -vint64m1_t test_vredsum_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i64m2_i64m1(vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m2_i64m1(vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i64m2_i64m1(vs2, vs1, vl); } -vint64m1_t test_vredsum_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i64m4_i64m1(vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m4_i64m1(vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i64m4_i64m1(vs2, vs1, vl); } -vint64m1_t test_vredsum_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i64m8_i64m1(vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m8_i64m1(vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i64m8_i64m1(vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u8mf8_u8m1(vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf8_u8m1(vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u8mf8_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u8mf4_u8m1(vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf4_u8m1(vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u8mf4_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u8mf2_u8m1(vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf2_u8m1(vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u8mf2_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u8m1_u8m1(vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m1_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u8m1_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u8m2_u8m1(vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m2_u8m1(vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u8m2_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u8m4_u8m1(vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m4_u8m1(vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u8m4_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u8m8_u8m1(vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m8_u8m1(vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u8m8_u8m1(vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u16mf4_u16m1(vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16mf4_u16m1(vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u16mf4_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u16mf2_u16m1(vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16mf2_u16m1(vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u16mf2_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u16m1_u16m1(vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m1_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u16m1_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u16m2_u16m1(vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m2_u16m1(vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u16m2_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u16m4_u16m1(vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m4_u16m1(vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u16m4_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u16m8_u16m1(vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m8_u16m1(vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u16m8_u16m1(vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u32mf2_u32m1(vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32mf2_u32m1(vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u32mf2_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u32m1_u32m1(vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m1_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u32m1_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u32m2_u32m1(vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m2_u32m1(vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u32m2_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u32m4_u32m1(vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m4_u32m1(vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u32m4_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u32m8_u32m1(vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m8_u32m1(vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u32m8_u32m1(vs2, vs1, vl); } -vuint64m1_t test_vredsum_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u64m1_u64m1(vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m1_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u64m1_u64m1(vs2, vs1, vl); } -vuint64m1_t test_vredsum_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u64m2_u64m1(vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m2_u64m1(vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u64m2_u64m1(vs2, vs1, vl); } -vuint64m1_t test_vredsum_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u64m4_u64m1(vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m4_u64m1(vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u64m4_u64m1(vs2, vs1, vl); } -vuint64m1_t test_vredsum_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u64m8_u64m1(vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m8_u64m1(vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u64m8_u64m1(vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i8mf8_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf8_i8m1_m(vbool64_t vm, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i8mf8_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i8mf4_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf4_i8m1_m(vbool32_t vm, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i8mf4_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i8mf2_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf2_i8m1_m(vbool16_t vm, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i8mf2_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i8m1_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m1_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i8m1_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i8m2_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m2_i8m1_m(vbool4_t vm, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i8m2_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i8m4_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m4_i8m1_m(vbool2_t vm, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i8m4_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i8m8_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m8_i8m1_m(vbool1_t vm, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i8m8_i8m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i16mf4_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16mf4_i16m1_m(vbool64_t vm, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i16mf4_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i16mf2_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16mf2_i16m1_m(vbool32_t vm, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i16mf2_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i16m1_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m1_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i16m1_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i16m2_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m2_i16m1_m(vbool8_t vm, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i16m2_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i16m4_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m4_i16m1_m(vbool4_t vm, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i16m4_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i16m8_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m8_i16m1_m(vbool2_t vm, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i16m8_i16m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i32mf2_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32mf2_i32m1_m(vbool64_t vm, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i32mf2_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i32m1_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m1_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i32m1_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i32m2_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m2_i32m1_m(vbool16_t vm, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i32m2_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i32m4_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m4_i32m1_m(vbool8_t vm, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i32m4_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i32m8_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m8_i32m1_m(vbool4_t vm, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i32m8_i32m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vredsum_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i64m1_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m1_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i64m1_i64m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vredsum_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i64m2_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m2_i64m1_m(vbool32_t vm, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i64m2_i64m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vredsum_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i64m4_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m4_i64m1_m(vbool16_t vm, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i64m4_i64m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vredsum_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i64m8_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m8_i64m1_m(vbool8_t vm, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i64m8_i64m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u8mf8_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf8_u8m1_m(vbool64_t vm, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u8mf8_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u8mf4_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf4_u8m1_m(vbool32_t vm, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u8mf4_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u8mf2_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf2_u8m1_m(vbool16_t vm, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u8mf2_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u8m1_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m1_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u8m1_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u8m2_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m2_u8m1_m(vbool4_t vm, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u8m2_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u8m4_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m4_u8m1_m(vbool2_t vm, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u8m4_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u8m8_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m8_u8m1_m(vbool1_t vm, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u8m8_u8m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u16mf4_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16mf4_u16m1_m(vbool64_t vm, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u16mf4_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u16mf2_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16mf2_u16m1_m(vbool32_t vm, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u16mf2_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u16m1_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m1_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u16m1_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u16m2_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m2_u16m1_m(vbool8_t vm, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u16m2_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u16m4_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m4_u16m1_m(vbool4_t vm, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u16m4_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u16m8_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m8_u16m1_m(vbool2_t vm, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u16m8_u16m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u32mf2_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32mf2_u32m1_m(vbool64_t vm, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u32mf2_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u32m1_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m1_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u32m1_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u32m2_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m2_u32m1_m(vbool16_t vm, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u32m2_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u32m4_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m4_u32m1_m(vbool8_t vm, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u32m4_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u32m8_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m8_u32m1_m(vbool4_t vm, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u32m8_u32m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vredsum_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u64m1_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m1_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u64m1_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vredsum_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u64m2_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m2_u64m1_m(vbool32_t vm, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u64m2_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vredsum_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u64m4_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m4_u64m1_m(vbool16_t vm, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u64m4_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vredsum_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u64m8_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m8_u64m1_m(vbool8_t vm, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u64m8_u64m1_m(vm, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vredsum\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-api-tests/vredxor.c b/auto-generated/gnu-api-tests/vredxor.c index adfde2952..96edd7eea 100644 --- a/auto-generated/gnu-api-tests/vredxor.c +++ b/auto-generated/gnu-api-tests/vredxor.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8m1_t test_vredxor_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i8mf8_i8m1(vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf8_i8m1(vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i8mf8_i8m1(vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i8mf4_i8m1(vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf4_i8m1(vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i8mf4_i8m1(vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i8mf2_i8m1(vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf2_i8m1(vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i8mf2_i8m1(vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i8m1_i8m1(vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m1_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i8m1_i8m1(vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i8m2_i8m1(vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m2_i8m1(vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i8m2_i8m1(vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i8m4_i8m1(vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m4_i8m1(vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i8m4_i8m1(vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i8m8_i8m1(vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m8_i8m1(vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i8m8_i8m1(vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i16mf4_i16m1(vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16mf4_i16m1(vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i16mf4_i16m1(vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i16mf2_i16m1(vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16mf2_i16m1(vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i16mf2_i16m1(vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i16m1_i16m1(vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m1_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i16m1_i16m1(vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i16m2_i16m1(vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m2_i16m1(vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i16m2_i16m1(vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i16m4_i16m1(vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m4_i16m1(vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i16m4_i16m1(vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i16m8_i16m1(vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m8_i16m1(vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i16m8_i16m1(vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i32mf2_i32m1(vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32mf2_i32m1(vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i32mf2_i32m1(vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i32m1_i32m1(vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m1_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i32m1_i32m1(vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i32m2_i32m1(vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m2_i32m1(vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i32m2_i32m1(vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i32m4_i32m1(vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m4_i32m1(vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i32m4_i32m1(vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i32m8_i32m1(vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m8_i32m1(vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i32m8_i32m1(vs2, vs1, vl); } -vint64m1_t test_vredxor_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i64m1_i64m1(vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m1_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i64m1_i64m1(vs2, vs1, vl); } -vint64m1_t test_vredxor_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i64m2_i64m1(vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m2_i64m1(vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i64m2_i64m1(vs2, vs1, vl); } -vint64m1_t test_vredxor_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i64m4_i64m1(vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m4_i64m1(vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i64m4_i64m1(vs2, vs1, vl); } -vint64m1_t test_vredxor_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i64m8_i64m1(vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m8_i64m1(vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i64m8_i64m1(vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u8mf8_u8m1(vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf8_u8m1(vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u8mf8_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u8mf4_u8m1(vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf4_u8m1(vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u8mf4_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u8mf2_u8m1(vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf2_u8m1(vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u8mf2_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u8m1_u8m1(vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m1_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u8m1_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u8m2_u8m1(vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m2_u8m1(vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u8m2_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u8m4_u8m1(vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m4_u8m1(vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u8m4_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u8m8_u8m1(vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m8_u8m1(vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u8m8_u8m1(vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u16mf4_u16m1(vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16mf4_u16m1(vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u16mf4_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u16mf2_u16m1(vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16mf2_u16m1(vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u16mf2_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u16m1_u16m1(vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m1_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u16m1_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u16m2_u16m1(vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m2_u16m1(vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u16m2_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u16m4_u16m1(vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m4_u16m1(vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u16m4_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u16m8_u16m1(vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m8_u16m1(vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u16m8_u16m1(vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u32mf2_u32m1(vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32mf2_u32m1(vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u32mf2_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u32m1_u32m1(vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m1_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u32m1_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u32m2_u32m1(vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m2_u32m1(vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u32m2_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u32m4_u32m1(vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m4_u32m1(vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u32m4_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u32m8_u32m1(vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m8_u32m1(vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u32m8_u32m1(vs2, vs1, vl); } -vuint64m1_t test_vredxor_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u64m1_u64m1(vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m1_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u64m1_u64m1(vs2, vs1, vl); } -vuint64m1_t test_vredxor_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u64m2_u64m1(vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m2_u64m1(vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u64m2_u64m1(vs2, vs1, vl); } -vuint64m1_t test_vredxor_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u64m4_u64m1(vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m4_u64m1(vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u64m4_u64m1(vs2, vs1, vl); } -vuint64m1_t test_vredxor_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u64m8_u64m1(vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m8_u64m1(vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u64m8_u64m1(vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i8mf8_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf8_i8m1_m(vbool64_t vm, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i8mf8_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i8mf4_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf4_i8m1_m(vbool32_t vm, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i8mf4_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i8mf2_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf2_i8m1_m(vbool16_t vm, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i8mf2_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i8m1_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m1_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i8m1_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i8m2_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m2_i8m1_m(vbool4_t vm, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i8m2_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i8m4_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m4_i8m1_m(vbool2_t vm, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i8m4_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i8m8_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m8_i8m1_m(vbool1_t vm, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i8m8_i8m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i16mf4_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16mf4_i16m1_m(vbool64_t vm, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i16mf4_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i16mf2_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16mf2_i16m1_m(vbool32_t vm, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i16mf2_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i16m1_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m1_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i16m1_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i16m2_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m2_i16m1_m(vbool8_t vm, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i16m2_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i16m4_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m4_i16m1_m(vbool4_t vm, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i16m4_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i16m8_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m8_i16m1_m(vbool2_t vm, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i16m8_i16m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i32mf2_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32mf2_i32m1_m(vbool64_t vm, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i32mf2_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i32m1_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m1_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i32m1_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i32m2_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m2_i32m1_m(vbool16_t vm, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i32m2_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i32m4_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m4_i32m1_m(vbool8_t vm, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i32m4_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i32m8_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m8_i32m1_m(vbool4_t vm, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i32m8_i32m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vredxor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i64m1_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m1_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i64m1_i64m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vredxor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i64m2_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m2_i64m1_m(vbool32_t vm, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i64m2_i64m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vredxor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i64m4_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m4_i64m1_m(vbool16_t vm, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i64m4_i64m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vredxor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i64m8_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m8_i64m1_m(vbool8_t vm, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i64m8_i64m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u8mf8_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf8_u8m1_m(vbool64_t vm, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u8mf8_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u8mf4_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf4_u8m1_m(vbool32_t vm, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u8mf4_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u8mf2_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf2_u8m1_m(vbool16_t vm, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u8mf2_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u8m1_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m1_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u8m1_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u8m2_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m2_u8m1_m(vbool4_t vm, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u8m2_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u8m4_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m4_u8m1_m(vbool2_t vm, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u8m4_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u8m8_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m8_u8m1_m(vbool1_t vm, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u8m8_u8m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u16mf4_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16mf4_u16m1_m(vbool64_t vm, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u16mf4_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u16mf2_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16mf2_u16m1_m(vbool32_t vm, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u16mf2_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u16m1_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m1_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u16m1_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u16m2_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m2_u16m1_m(vbool8_t vm, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u16m2_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u16m4_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m4_u16m1_m(vbool4_t vm, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u16m4_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u16m8_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m8_u16m1_m(vbool2_t vm, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u16m8_u16m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u32mf2_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32mf2_u32m1_m(vbool64_t vm, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u32mf2_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u32m1_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m1_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u32m1_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u32m2_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m2_u32m1_m(vbool16_t vm, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u32m2_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u32m4_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m4_u32m1_m(vbool8_t vm, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u32m4_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u32m8_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m8_u32m1_m(vbool4_t vm, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u32m8_u32m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vredxor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u64m1_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m1_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u64m1_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vredxor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u64m2_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m2_u64m1_m(vbool32_t vm, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u64m2_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vredxor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u64m4_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m4_u64m1_m(vbool16_t vm, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u64m4_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vredxor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u64m8_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m8_u64m1_m(vbool8_t vm, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u64m8_u64m1_m(vm, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vredxor\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-api-tests/vrem.c b/auto-generated/gnu-api-tests/vrem.c index 4b6a94bc5..644f4d2d5 100644 --- a/auto-generated/gnu-api-tests/vrem.c +++ b/auto-generated/gnu-api-tests/vrem.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vrem_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vrem_vv_i8mf8(op1, op2, vl); +vint8mf8_t test_vrem_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vrem_vv_i8mf8(vs2, vs1, vl); } -vint8mf8_t test_vrem_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_vx_i8mf8(op1, op2, vl); +vint8mf8_t test_vrem_vx_i8mf8(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_vx_i8mf8(vs2, rs1, vl); } -vint8mf4_t test_vrem_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vrem_vv_i8mf4(op1, op2, vl); +vint8mf4_t test_vrem_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vrem_vv_i8mf4(vs2, vs1, vl); } -vint8mf4_t test_vrem_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_vx_i8mf4(op1, op2, vl); +vint8mf4_t test_vrem_vx_i8mf4(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_vx_i8mf4(vs2, rs1, vl); } -vint8mf2_t test_vrem_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vrem_vv_i8mf2(op1, op2, vl); +vint8mf2_t test_vrem_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vrem_vv_i8mf2(vs2, vs1, vl); } -vint8mf2_t test_vrem_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_vx_i8mf2(op1, op2, vl); +vint8mf2_t test_vrem_vx_i8mf2(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_vx_i8mf2(vs2, rs1, vl); } -vint8m1_t test_vrem_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vrem_vv_i8m1(op1, op2, vl); +vint8m1_t test_vrem_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vrem_vv_i8m1(vs2, vs1, vl); } -vint8m1_t test_vrem_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_vx_i8m1(op1, op2, vl); +vint8m1_t test_vrem_vx_i8m1(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_vx_i8m1(vs2, rs1, vl); } -vint8m2_t test_vrem_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vrem_vv_i8m2(op1, op2, vl); +vint8m2_t test_vrem_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vrem_vv_i8m2(vs2, vs1, vl); } -vint8m2_t test_vrem_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_vx_i8m2(op1, op2, vl); +vint8m2_t test_vrem_vx_i8m2(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_vx_i8m2(vs2, rs1, vl); } -vint8m4_t test_vrem_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vrem_vv_i8m4(op1, op2, vl); +vint8m4_t test_vrem_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vrem_vv_i8m4(vs2, vs1, vl); } -vint8m4_t test_vrem_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_vx_i8m4(op1, op2, vl); +vint8m4_t test_vrem_vx_i8m4(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_vx_i8m4(vs2, rs1, vl); } -vint8m8_t test_vrem_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vrem_vv_i8m8(op1, op2, vl); +vint8m8_t test_vrem_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vrem_vv_i8m8(vs2, vs1, vl); } -vint8m8_t test_vrem_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_vx_i8m8(op1, op2, vl); +vint8m8_t test_vrem_vx_i8m8(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_vx_i8m8(vs2, rs1, vl); } -vint16mf4_t test_vrem_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vrem_vv_i16mf4(op1, op2, vl); +vint16mf4_t test_vrem_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vrem_vv_i16mf4(vs2, vs1, vl); } -vint16mf4_t test_vrem_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_vx_i16mf4(op1, op2, vl); +vint16mf4_t test_vrem_vx_i16mf4(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_vx_i16mf4(vs2, rs1, vl); } -vint16mf2_t test_vrem_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vrem_vv_i16mf2(op1, op2, vl); +vint16mf2_t test_vrem_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vrem_vv_i16mf2(vs2, vs1, vl); } -vint16mf2_t test_vrem_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_vx_i16mf2(op1, op2, vl); +vint16mf2_t test_vrem_vx_i16mf2(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_vx_i16mf2(vs2, rs1, vl); } -vint16m1_t test_vrem_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vrem_vv_i16m1(op1, op2, vl); +vint16m1_t test_vrem_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vrem_vv_i16m1(vs2, vs1, vl); } -vint16m1_t test_vrem_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_vx_i16m1(op1, op2, vl); +vint16m1_t test_vrem_vx_i16m1(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_vx_i16m1(vs2, rs1, vl); } -vint16m2_t test_vrem_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vrem_vv_i16m2(op1, op2, vl); +vint16m2_t test_vrem_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vrem_vv_i16m2(vs2, vs1, vl); } -vint16m2_t test_vrem_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_vx_i16m2(op1, op2, vl); +vint16m2_t test_vrem_vx_i16m2(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_vx_i16m2(vs2, rs1, vl); } -vint16m4_t test_vrem_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vrem_vv_i16m4(op1, op2, vl); +vint16m4_t test_vrem_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vrem_vv_i16m4(vs2, vs1, vl); } -vint16m4_t test_vrem_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_vx_i16m4(op1, op2, vl); +vint16m4_t test_vrem_vx_i16m4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_vx_i16m4(vs2, rs1, vl); } -vint16m8_t test_vrem_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vrem_vv_i16m8(op1, op2, vl); +vint16m8_t test_vrem_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vrem_vv_i16m8(vs2, vs1, vl); } -vint16m8_t test_vrem_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_vx_i16m8(op1, op2, vl); +vint16m8_t test_vrem_vx_i16m8(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_vx_i16m8(vs2, rs1, vl); } -vint32mf2_t test_vrem_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vrem_vv_i32mf2(op1, op2, vl); +vint32mf2_t test_vrem_vv_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vrem_vv_i32mf2(vs2, vs1, vl); } -vint32mf2_t test_vrem_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_vx_i32mf2(op1, op2, vl); +vint32mf2_t test_vrem_vx_i32mf2(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_vx_i32mf2(vs2, rs1, vl); } -vint32m1_t test_vrem_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vrem_vv_i32m1(op1, op2, vl); +vint32m1_t test_vrem_vv_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vrem_vv_i32m1(vs2, vs1, vl); } -vint32m1_t test_vrem_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_vx_i32m1(op1, op2, vl); +vint32m1_t test_vrem_vx_i32m1(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_vx_i32m1(vs2, rs1, vl); } -vint32m2_t test_vrem_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vrem_vv_i32m2(op1, op2, vl); +vint32m2_t test_vrem_vv_i32m2(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vrem_vv_i32m2(vs2, vs1, vl); } -vint32m2_t test_vrem_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_vx_i32m2(op1, op2, vl); +vint32m2_t test_vrem_vx_i32m2(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_vx_i32m2(vs2, rs1, vl); } -vint32m4_t test_vrem_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vrem_vv_i32m4(op1, op2, vl); +vint32m4_t test_vrem_vv_i32m4(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vrem_vv_i32m4(vs2, vs1, vl); } -vint32m4_t test_vrem_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_vx_i32m4(op1, op2, vl); +vint32m4_t test_vrem_vx_i32m4(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_vx_i32m4(vs2, rs1, vl); } -vint32m8_t test_vrem_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vrem_vv_i32m8(op1, op2, vl); +vint32m8_t test_vrem_vv_i32m8(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vrem_vv_i32m8(vs2, vs1, vl); } -vint32m8_t test_vrem_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_vx_i32m8(op1, op2, vl); +vint32m8_t test_vrem_vx_i32m8(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_vx_i32m8(vs2, rs1, vl); } -vint64m1_t test_vrem_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vrem_vv_i64m1(op1, op2, vl); +vint64m1_t test_vrem_vv_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vrem_vv_i64m1(vs2, vs1, vl); } -vint64m1_t test_vrem_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vrem_vx_i64m1(op1, op2, vl); +vint64m1_t test_vrem_vx_i64m1(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem_vx_i64m1(vs2, rs1, vl); } -vint64m2_t test_vrem_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vrem_vv_i64m2(op1, op2, vl); +vint64m2_t test_vrem_vv_i64m2(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vrem_vv_i64m2(vs2, vs1, vl); } -vint64m2_t test_vrem_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vrem_vx_i64m2(op1, op2, vl); +vint64m2_t test_vrem_vx_i64m2(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem_vx_i64m2(vs2, rs1, vl); } -vint64m4_t test_vrem_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vrem_vv_i64m4(op1, op2, vl); +vint64m4_t test_vrem_vv_i64m4(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vrem_vv_i64m4(vs2, vs1, vl); } -vint64m4_t test_vrem_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vrem_vx_i64m4(op1, op2, vl); +vint64m4_t test_vrem_vx_i64m4(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem_vx_i64m4(vs2, rs1, vl); } -vint64m8_t test_vrem_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vrem_vv_i64m8(op1, op2, vl); +vint64m8_t test_vrem_vv_i64m8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vrem_vv_i64m8(vs2, vs1, vl); } -vint64m8_t test_vrem_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vrem_vx_i64m8(op1, op2, vl); +vint64m8_t test_vrem_vx_i64m8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem_vx_i64m8(vs2, rs1, vl); } -vint8mf8_t test_vrem_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vrem_vv_i8mf8_m(mask, op1, op2, vl); +vint8mf8_t test_vrem_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vrem_vv_i8mf8_m(vm, vs2, vs1, vl); } -vint8mf8_t test_vrem_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_vx_i8mf8_m(mask, op1, op2, vl); +vint8mf8_t test_vrem_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_vx_i8mf8_m(vm, vs2, rs1, vl); } -vint8mf4_t test_vrem_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vrem_vv_i8mf4_m(mask, op1, op2, vl); +vint8mf4_t test_vrem_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vrem_vv_i8mf4_m(vm, vs2, vs1, vl); } -vint8mf4_t test_vrem_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_vx_i8mf4_m(mask, op1, op2, vl); +vint8mf4_t test_vrem_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_vx_i8mf4_m(vm, vs2, rs1, vl); } -vint8mf2_t test_vrem_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vrem_vv_i8mf2_m(mask, op1, op2, vl); +vint8mf2_t test_vrem_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vrem_vv_i8mf2_m(vm, vs2, vs1, vl); } -vint8mf2_t test_vrem_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_vx_i8mf2_m(mask, op1, op2, vl); +vint8mf2_t test_vrem_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_vx_i8mf2_m(vm, vs2, rs1, vl); } -vint8m1_t test_vrem_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vrem_vv_i8m1_m(mask, op1, op2, vl); +vint8m1_t test_vrem_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vrem_vv_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vrem_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_vx_i8m1_m(mask, op1, op2, vl); +vint8m1_t test_vrem_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_vx_i8m1_m(vm, vs2, rs1, vl); } -vint8m2_t test_vrem_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vrem_vv_i8m2_m(mask, op1, op2, vl); +vint8m2_t test_vrem_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vrem_vv_i8m2_m(vm, vs2, vs1, vl); } -vint8m2_t test_vrem_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_vx_i8m2_m(mask, op1, op2, vl); +vint8m2_t test_vrem_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_vx_i8m2_m(vm, vs2, rs1, vl); } -vint8m4_t test_vrem_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vrem_vv_i8m4_m(mask, op1, op2, vl); +vint8m4_t test_vrem_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vrem_vv_i8m4_m(vm, vs2, vs1, vl); } -vint8m4_t test_vrem_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_vx_i8m4_m(mask, op1, op2, vl); +vint8m4_t test_vrem_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_vx_i8m4_m(vm, vs2, rs1, vl); } -vint8m8_t test_vrem_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vrem_vv_i8m8_m(mask, op1, op2, vl); +vint8m8_t test_vrem_vv_i8m8_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vrem_vv_i8m8_m(vm, vs2, vs1, vl); } -vint8m8_t test_vrem_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_vx_i8m8_m(mask, op1, op2, vl); +vint8m8_t test_vrem_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_vx_i8m8_m(vm, vs2, rs1, vl); } -vint16mf4_t test_vrem_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vrem_vv_i16mf4_m(mask, op1, op2, vl); +vint16mf4_t test_vrem_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vrem_vv_i16mf4_m(vm, vs2, vs1, vl); } -vint16mf4_t test_vrem_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_vx_i16mf4_m(mask, op1, op2, vl); +vint16mf4_t test_vrem_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_vx_i16mf4_m(vm, vs2, rs1, vl); } -vint16mf2_t test_vrem_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vrem_vv_i16mf2_m(mask, op1, op2, vl); +vint16mf2_t test_vrem_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vrem_vv_i16mf2_m(vm, vs2, vs1, vl); } -vint16mf2_t test_vrem_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_vx_i16mf2_m(mask, op1, op2, vl); +vint16mf2_t test_vrem_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_vx_i16mf2_m(vm, vs2, rs1, vl); } -vint16m1_t test_vrem_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vrem_vv_i16m1_m(mask, op1, op2, vl); +vint16m1_t test_vrem_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vrem_vv_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vrem_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_vx_i16m1_m(mask, op1, op2, vl); +vint16m1_t test_vrem_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_vx_i16m1_m(vm, vs2, rs1, vl); } -vint16m2_t test_vrem_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vrem_vv_i16m2_m(mask, op1, op2, vl); +vint16m2_t test_vrem_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vrem_vv_i16m2_m(vm, vs2, vs1, vl); } -vint16m2_t test_vrem_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_vx_i16m2_m(mask, op1, op2, vl); +vint16m2_t test_vrem_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_vx_i16m2_m(vm, vs2, rs1, vl); } -vint16m4_t test_vrem_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vrem_vv_i16m4_m(mask, op1, op2, vl); +vint16m4_t test_vrem_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vrem_vv_i16m4_m(vm, vs2, vs1, vl); } -vint16m4_t test_vrem_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_vx_i16m4_m(mask, op1, op2, vl); +vint16m4_t test_vrem_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_vx_i16m4_m(vm, vs2, rs1, vl); } -vint16m8_t test_vrem_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vrem_vv_i16m8_m(mask, op1, op2, vl); +vint16m8_t test_vrem_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vrem_vv_i16m8_m(vm, vs2, vs1, vl); } -vint16m8_t test_vrem_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_vx_i16m8_m(mask, op1, op2, vl); +vint16m8_t test_vrem_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_vx_i16m8_m(vm, vs2, rs1, vl); } -vint32mf2_t test_vrem_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vrem_vv_i32mf2_m(mask, op1, op2, vl); +vint32mf2_t test_vrem_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vrem_vv_i32mf2_m(vm, vs2, vs1, vl); } -vint32mf2_t test_vrem_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_vx_i32mf2_m(mask, op1, op2, vl); +vint32mf2_t test_vrem_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_vx_i32mf2_m(vm, vs2, rs1, vl); } -vint32m1_t test_vrem_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vrem_vv_i32m1_m(mask, op1, op2, vl); +vint32m1_t test_vrem_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vrem_vv_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vrem_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_vx_i32m1_m(mask, op1, op2, vl); +vint32m1_t test_vrem_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_vx_i32m1_m(vm, vs2, rs1, vl); } -vint32m2_t test_vrem_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vrem_vv_i32m2_m(mask, op1, op2, vl); +vint32m2_t test_vrem_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vrem_vv_i32m2_m(vm, vs2, vs1, vl); } -vint32m2_t test_vrem_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_vx_i32m2_m(mask, op1, op2, vl); +vint32m2_t test_vrem_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_vx_i32m2_m(vm, vs2, rs1, vl); } -vint32m4_t test_vrem_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vrem_vv_i32m4_m(mask, op1, op2, vl); +vint32m4_t test_vrem_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vrem_vv_i32m4_m(vm, vs2, vs1, vl); } -vint32m4_t test_vrem_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_vx_i32m4_m(mask, op1, op2, vl); +vint32m4_t test_vrem_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_vx_i32m4_m(vm, vs2, rs1, vl); } -vint32m8_t test_vrem_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vrem_vv_i32m8_m(mask, op1, op2, vl); +vint32m8_t test_vrem_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vrem_vv_i32m8_m(vm, vs2, vs1, vl); } -vint32m8_t test_vrem_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_vx_i32m8_m(mask, op1, op2, vl); +vint32m8_t test_vrem_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_vx_i32m8_m(vm, vs2, rs1, vl); } -vint64m1_t test_vrem_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vrem_vv_i64m1_m(mask, op1, op2, vl); +vint64m1_t test_vrem_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vrem_vv_i64m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vrem_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vrem_vx_i64m1_m(mask, op1, op2, vl); +vint64m1_t test_vrem_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem_vx_i64m1_m(vm, vs2, rs1, vl); } -vint64m2_t test_vrem_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vrem_vv_i64m2_m(mask, op1, op2, vl); +vint64m2_t test_vrem_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vrem_vv_i64m2_m(vm, vs2, vs1, vl); } -vint64m2_t test_vrem_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vrem_vx_i64m2_m(mask, op1, op2, vl); +vint64m2_t test_vrem_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem_vx_i64m2_m(vm, vs2, rs1, vl); } -vint64m4_t test_vrem_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vrem_vv_i64m4_m(mask, op1, op2, vl); +vint64m4_t test_vrem_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vrem_vv_i64m4_m(vm, vs2, vs1, vl); } -vint64m4_t test_vrem_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vrem_vx_i64m4_m(mask, op1, op2, vl); +vint64m4_t test_vrem_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem_vx_i64m4_m(vm, vs2, rs1, vl); } -vint64m8_t test_vrem_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vrem_vv_i64m8_m(mask, op1, op2, vl); +vint64m8_t test_vrem_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vrem_vv_i64m8_m(vm, vs2, vs1, vl); } -vint64m8_t test_vrem_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vrem_vx_i64m8_m(mask, op1, op2, vl); +vint64m8_t test_vrem_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem_vx_i64m8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vrem\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-api-tests/vremu.c b/auto-generated/gnu-api-tests/vremu.c index 2c0a1570f..1a19b0515 100644 --- a/auto-generated/gnu-api-tests/vremu.c +++ b/auto-generated/gnu-api-tests/vremu.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vremu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vremu_vv_u8mf8(op1, op2, vl); +vuint8mf8_t test_vremu_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vremu_vv_u8mf8(vs2, vs1, vl); } -vuint8mf8_t test_vremu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_vx_u8mf8(op1, op2, vl); +vuint8mf8_t test_vremu_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_vx_u8mf8(vs2, rs1, vl); } -vuint8mf4_t test_vremu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vremu_vv_u8mf4(op1, op2, vl); +vuint8mf4_t test_vremu_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vremu_vv_u8mf4(vs2, vs1, vl); } -vuint8mf4_t test_vremu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_vx_u8mf4(op1, op2, vl); +vuint8mf4_t test_vremu_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_vx_u8mf4(vs2, rs1, vl); } -vuint8mf2_t test_vremu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vremu_vv_u8mf2(op1, op2, vl); +vuint8mf2_t test_vremu_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vremu_vv_u8mf2(vs2, vs1, vl); } -vuint8mf2_t test_vremu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_vx_u8mf2(op1, op2, vl); +vuint8mf2_t test_vremu_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_vx_u8mf2(vs2, rs1, vl); } -vuint8m1_t test_vremu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vremu_vv_u8m1(op1, op2, vl); +vuint8m1_t test_vremu_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vremu_vv_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vremu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_vx_u8m1(op1, op2, vl); +vuint8m1_t test_vremu_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_vx_u8m1(vs2, rs1, vl); } -vuint8m2_t test_vremu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vremu_vv_u8m2(op1, op2, vl); +vuint8m2_t test_vremu_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vremu_vv_u8m2(vs2, vs1, vl); } -vuint8m2_t test_vremu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_vx_u8m2(op1, op2, vl); +vuint8m2_t test_vremu_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_vx_u8m2(vs2, rs1, vl); } -vuint8m4_t test_vremu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vremu_vv_u8m4(op1, op2, vl); +vuint8m4_t test_vremu_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vremu_vv_u8m4(vs2, vs1, vl); } -vuint8m4_t test_vremu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_vx_u8m4(op1, op2, vl); +vuint8m4_t test_vremu_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_vx_u8m4(vs2, rs1, vl); } -vuint8m8_t test_vremu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vremu_vv_u8m8(op1, op2, vl); +vuint8m8_t test_vremu_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vremu_vv_u8m8(vs2, vs1, vl); } -vuint8m8_t test_vremu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_vx_u8m8(op1, op2, vl); +vuint8m8_t test_vremu_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_vx_u8m8(vs2, rs1, vl); } -vuint16mf4_t test_vremu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vremu_vv_u16mf4(op1, op2, vl); +vuint16mf4_t test_vremu_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vremu_vv_u16mf4(vs2, vs1, vl); } -vuint16mf4_t test_vremu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_vx_u16mf4(op1, op2, vl); +vuint16mf4_t test_vremu_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_vx_u16mf4(vs2, rs1, vl); } -vuint16mf2_t test_vremu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vremu_vv_u16mf2(op1, op2, vl); +vuint16mf2_t test_vremu_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vremu_vv_u16mf2(vs2, vs1, vl); } -vuint16mf2_t test_vremu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_vx_u16mf2(op1, op2, vl); +vuint16mf2_t test_vremu_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_vx_u16mf2(vs2, rs1, vl); } -vuint16m1_t test_vremu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vremu_vv_u16m1(op1, op2, vl); +vuint16m1_t test_vremu_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vremu_vv_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vremu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_vx_u16m1(op1, op2, vl); +vuint16m1_t test_vremu_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_vx_u16m1(vs2, rs1, vl); } -vuint16m2_t test_vremu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vremu_vv_u16m2(op1, op2, vl); +vuint16m2_t test_vremu_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vremu_vv_u16m2(vs2, vs1, vl); } -vuint16m2_t test_vremu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_vx_u16m2(op1, op2, vl); +vuint16m2_t test_vremu_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_vx_u16m2(vs2, rs1, vl); } -vuint16m4_t test_vremu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vremu_vv_u16m4(op1, op2, vl); +vuint16m4_t test_vremu_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vremu_vv_u16m4(vs2, vs1, vl); } -vuint16m4_t test_vremu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_vx_u16m4(op1, op2, vl); +vuint16m4_t test_vremu_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_vx_u16m4(vs2, rs1, vl); } -vuint16m8_t test_vremu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vremu_vv_u16m8(op1, op2, vl); +vuint16m8_t test_vremu_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vremu_vv_u16m8(vs2, vs1, vl); } -vuint16m8_t test_vremu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_vx_u16m8(op1, op2, vl); +vuint16m8_t test_vremu_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_vx_u16m8(vs2, rs1, vl); } -vuint32mf2_t test_vremu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vremu_vv_u32mf2(op1, op2, vl); +vuint32mf2_t test_vremu_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vremu_vv_u32mf2(vs2, vs1, vl); } -vuint32mf2_t test_vremu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_vx_u32mf2(op1, op2, vl); +vuint32mf2_t test_vremu_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_vx_u32mf2(vs2, rs1, vl); } -vuint32m1_t test_vremu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vremu_vv_u32m1(op1, op2, vl); +vuint32m1_t test_vremu_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vremu_vv_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vremu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_vx_u32m1(op1, op2, vl); +vuint32m1_t test_vremu_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_vx_u32m1(vs2, rs1, vl); } -vuint32m2_t test_vremu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vremu_vv_u32m2(op1, op2, vl); +vuint32m2_t test_vremu_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vremu_vv_u32m2(vs2, vs1, vl); } -vuint32m2_t test_vremu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_vx_u32m2(op1, op2, vl); +vuint32m2_t test_vremu_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_vx_u32m2(vs2, rs1, vl); } -vuint32m4_t test_vremu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vremu_vv_u32m4(op1, op2, vl); +vuint32m4_t test_vremu_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vremu_vv_u32m4(vs2, vs1, vl); } -vuint32m4_t test_vremu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_vx_u32m4(op1, op2, vl); +vuint32m4_t test_vremu_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_vx_u32m4(vs2, rs1, vl); } -vuint32m8_t test_vremu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vremu_vv_u32m8(op1, op2, vl); +vuint32m8_t test_vremu_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vremu_vv_u32m8(vs2, vs1, vl); } -vuint32m8_t test_vremu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_vx_u32m8(op1, op2, vl); +vuint32m8_t test_vremu_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_vx_u32m8(vs2, rs1, vl); } -vuint64m1_t test_vremu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vremu_vv_u64m1(op1, op2, vl); +vuint64m1_t test_vremu_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vremu_vv_u64m1(vs2, vs1, vl); } -vuint64m1_t test_vremu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu_vx_u64m1(op1, op2, vl); +vuint64m1_t test_vremu_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu_vx_u64m1(vs2, rs1, vl); } -vuint64m2_t test_vremu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vremu_vv_u64m2(op1, op2, vl); +vuint64m2_t test_vremu_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vremu_vv_u64m2(vs2, vs1, vl); } -vuint64m2_t test_vremu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu_vx_u64m2(op1, op2, vl); +vuint64m2_t test_vremu_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu_vx_u64m2(vs2, rs1, vl); } -vuint64m4_t test_vremu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vremu_vv_u64m4(op1, op2, vl); +vuint64m4_t test_vremu_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vremu_vv_u64m4(vs2, vs1, vl); } -vuint64m4_t test_vremu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu_vx_u64m4(op1, op2, vl); +vuint64m4_t test_vremu_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu_vx_u64m4(vs2, rs1, vl); } -vuint64m8_t test_vremu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vremu_vv_u64m8(op1, op2, vl); +vuint64m8_t test_vremu_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vremu_vv_u64m8(vs2, vs1, vl); } -vuint64m8_t test_vremu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu_vx_u64m8(op1, op2, vl); +vuint64m8_t test_vremu_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu_vx_u64m8(vs2, rs1, vl); } -vuint8mf8_t test_vremu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vremu_vv_u8mf8_m(mask, op1, op2, vl); +vuint8mf8_t test_vremu_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vremu_vv_u8mf8_m(vm, vs2, vs1, vl); } -vuint8mf8_t test_vremu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_vx_u8mf8_m(mask, op1, op2, vl); +vuint8mf8_t test_vremu_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_vx_u8mf8_m(vm, vs2, rs1, vl); } -vuint8mf4_t test_vremu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vremu_vv_u8mf4_m(mask, op1, op2, vl); +vuint8mf4_t test_vremu_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vremu_vv_u8mf4_m(vm, vs2, vs1, vl); } -vuint8mf4_t test_vremu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_vx_u8mf4_m(mask, op1, op2, vl); +vuint8mf4_t test_vremu_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_vx_u8mf4_m(vm, vs2, rs1, vl); } -vuint8mf2_t test_vremu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vremu_vv_u8mf2_m(mask, op1, op2, vl); +vuint8mf2_t test_vremu_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vremu_vv_u8mf2_m(vm, vs2, vs1, vl); } -vuint8mf2_t test_vremu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_vx_u8mf2_m(mask, op1, op2, vl); +vuint8mf2_t test_vremu_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_vx_u8mf2_m(vm, vs2, rs1, vl); } -vuint8m1_t test_vremu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vremu_vv_u8m1_m(mask, op1, op2, vl); +vuint8m1_t test_vremu_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vremu_vv_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vremu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_vx_u8m1_m(mask, op1, op2, vl); +vuint8m1_t test_vremu_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_vx_u8m1_m(vm, vs2, rs1, vl); } -vuint8m2_t test_vremu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vremu_vv_u8m2_m(mask, op1, op2, vl); +vuint8m2_t test_vremu_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vremu_vv_u8m2_m(vm, vs2, vs1, vl); } -vuint8m2_t test_vremu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_vx_u8m2_m(mask, op1, op2, vl); +vuint8m2_t test_vremu_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_vx_u8m2_m(vm, vs2, rs1, vl); } -vuint8m4_t test_vremu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vremu_vv_u8m4_m(mask, op1, op2, vl); +vuint8m4_t test_vremu_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vremu_vv_u8m4_m(vm, vs2, vs1, vl); } -vuint8m4_t test_vremu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_vx_u8m4_m(mask, op1, op2, vl); +vuint8m4_t test_vremu_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_vx_u8m4_m(vm, vs2, rs1, vl); } -vuint8m8_t test_vremu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vremu_vv_u8m8_m(mask, op1, op2, vl); +vuint8m8_t test_vremu_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vremu_vv_u8m8_m(vm, vs2, vs1, vl); } -vuint8m8_t test_vremu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_vx_u8m8_m(mask, op1, op2, vl); +vuint8m8_t test_vremu_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_vx_u8m8_m(vm, vs2, rs1, vl); } -vuint16mf4_t test_vremu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vremu_vv_u16mf4_m(mask, op1, op2, vl); +vuint16mf4_t test_vremu_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vremu_vv_u16mf4_m(vm, vs2, vs1, vl); } -vuint16mf4_t test_vremu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_vx_u16mf4_m(mask, op1, op2, vl); +vuint16mf4_t test_vremu_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_vx_u16mf4_m(vm, vs2, rs1, vl); } -vuint16mf2_t test_vremu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vremu_vv_u16mf2_m(mask, op1, op2, vl); +vuint16mf2_t test_vremu_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vremu_vv_u16mf2_m(vm, vs2, vs1, vl); } -vuint16mf2_t test_vremu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_vx_u16mf2_m(mask, op1, op2, vl); +vuint16mf2_t test_vremu_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_vx_u16mf2_m(vm, vs2, rs1, vl); } -vuint16m1_t test_vremu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vremu_vv_u16m1_m(mask, op1, op2, vl); +vuint16m1_t test_vremu_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vremu_vv_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vremu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_vx_u16m1_m(mask, op1, op2, vl); +vuint16m1_t test_vremu_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_vx_u16m1_m(vm, vs2, rs1, vl); } -vuint16m2_t test_vremu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vremu_vv_u16m2_m(mask, op1, op2, vl); +vuint16m2_t test_vremu_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vremu_vv_u16m2_m(vm, vs2, vs1, vl); } -vuint16m2_t test_vremu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_vx_u16m2_m(mask, op1, op2, vl); +vuint16m2_t test_vremu_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_vx_u16m2_m(vm, vs2, rs1, vl); } -vuint16m4_t test_vremu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vremu_vv_u16m4_m(mask, op1, op2, vl); +vuint16m4_t test_vremu_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vremu_vv_u16m4_m(vm, vs2, vs1, vl); } -vuint16m4_t test_vremu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_vx_u16m4_m(mask, op1, op2, vl); +vuint16m4_t test_vremu_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_vx_u16m4_m(vm, vs2, rs1, vl); } -vuint16m8_t test_vremu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vremu_vv_u16m8_m(mask, op1, op2, vl); +vuint16m8_t test_vremu_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vremu_vv_u16m8_m(vm, vs2, vs1, vl); } -vuint16m8_t test_vremu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_vx_u16m8_m(mask, op1, op2, vl); +vuint16m8_t test_vremu_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_vx_u16m8_m(vm, vs2, rs1, vl); } -vuint32mf2_t test_vremu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vremu_vv_u32mf2_m(mask, op1, op2, vl); +vuint32mf2_t test_vremu_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vremu_vv_u32mf2_m(vm, vs2, vs1, vl); } -vuint32mf2_t test_vremu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_vx_u32mf2_m(mask, op1, op2, vl); +vuint32mf2_t test_vremu_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_vx_u32mf2_m(vm, vs2, rs1, vl); } -vuint32m1_t test_vremu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vremu_vv_u32m1_m(mask, op1, op2, vl); +vuint32m1_t test_vremu_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vremu_vv_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vremu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_vx_u32m1_m(mask, op1, op2, vl); +vuint32m1_t test_vremu_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_vx_u32m1_m(vm, vs2, rs1, vl); } -vuint32m2_t test_vremu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vremu_vv_u32m2_m(mask, op1, op2, vl); +vuint32m2_t test_vremu_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vremu_vv_u32m2_m(vm, vs2, vs1, vl); } -vuint32m2_t test_vremu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_vx_u32m2_m(mask, op1, op2, vl); +vuint32m2_t test_vremu_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_vx_u32m2_m(vm, vs2, rs1, vl); } -vuint32m4_t test_vremu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vremu_vv_u32m4_m(mask, op1, op2, vl); +vuint32m4_t test_vremu_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vremu_vv_u32m4_m(vm, vs2, vs1, vl); } -vuint32m4_t test_vremu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_vx_u32m4_m(mask, op1, op2, vl); +vuint32m4_t test_vremu_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_vx_u32m4_m(vm, vs2, rs1, vl); } -vuint32m8_t test_vremu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vremu_vv_u32m8_m(mask, op1, op2, vl); +vuint32m8_t test_vremu_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vremu_vv_u32m8_m(vm, vs2, vs1, vl); } -vuint32m8_t test_vremu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_vx_u32m8_m(mask, op1, op2, vl); +vuint32m8_t test_vremu_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_vx_u32m8_m(vm, vs2, rs1, vl); } -vuint64m1_t test_vremu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vremu_vv_u64m1_m(mask, op1, op2, vl); +vuint64m1_t test_vremu_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vremu_vv_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vremu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu_vx_u64m1_m(mask, op1, op2, vl); +vuint64m1_t test_vremu_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu_vx_u64m1_m(vm, vs2, rs1, vl); } -vuint64m2_t test_vremu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vremu_vv_u64m2_m(mask, op1, op2, vl); +vuint64m2_t test_vremu_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vremu_vv_u64m2_m(vm, vs2, vs1, vl); } -vuint64m2_t test_vremu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu_vx_u64m2_m(mask, op1, op2, vl); +vuint64m2_t test_vremu_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu_vx_u64m2_m(vm, vs2, rs1, vl); } -vuint64m4_t test_vremu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vremu_vv_u64m4_m(mask, op1, op2, vl); +vuint64m4_t test_vremu_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vremu_vv_u64m4_m(vm, vs2, vs1, vl); } -vuint64m4_t test_vremu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu_vx_u64m4_m(mask, op1, op2, vl); +vuint64m4_t test_vremu_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu_vx_u64m4_m(vm, vs2, rs1, vl); } -vuint64m8_t test_vremu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vremu_vv_u64m8_m(mask, op1, op2, vl); +vuint64m8_t test_vremu_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vremu_vv_u64m8_m(vm, vs2, vs1, vl); } -vuint64m8_t test_vremu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu_vx_u64m8_m(mask, op1, op2, vl); +vuint64m8_t test_vremu_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu_vx_u64m8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vremu\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-api-tests/vrgather.c b/auto-generated/gnu-api-tests/vrgather.c index 7da3ca5be..5d848931c 100644 --- a/auto-generated/gnu-api-tests/vrgather.c +++ b/auto-generated/gnu-api-tests/vrgather.c @@ -6,948 +6,948 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vrgather_vv_f16mf4(vfloat16mf4_t op1, vuint16mf4_t index, size_t vl) { - return __riscv_vrgather_vv_f16mf4(op1, index, vl); +vfloat16mf4_t test_vrgather_vv_f16mf4(vfloat16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgather_vv_f16mf4(vs2, vs1, vl); } -vfloat16mf4_t test_vrgather_vx_f16mf4(vfloat16mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f16mf4(op1, index, vl); +vfloat16mf4_t test_vrgather_vx_f16mf4(vfloat16mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f16mf4(vs2, vs1, vl); } -vfloat16mf2_t test_vrgather_vv_f16mf2(vfloat16mf2_t op1, vuint16mf2_t index, size_t vl) { - return __riscv_vrgather_vv_f16mf2(op1, index, vl); +vfloat16mf2_t test_vrgather_vv_f16mf2(vfloat16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_f16mf2(vs2, vs1, vl); } -vfloat16mf2_t test_vrgather_vx_f16mf2(vfloat16mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f16mf2(op1, index, vl); +vfloat16mf2_t test_vrgather_vx_f16mf2(vfloat16mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f16mf2(vs2, vs1, vl); } -vfloat16m1_t test_vrgather_vv_f16m1(vfloat16m1_t op1, vuint16m1_t index, size_t vl) { - return __riscv_vrgather_vv_f16m1(op1, index, vl); +vfloat16m1_t test_vrgather_vv_f16m1(vfloat16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_f16m1(vs2, vs1, vl); } -vfloat16m1_t test_vrgather_vx_f16m1(vfloat16m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f16m1(op1, index, vl); +vfloat16m1_t test_vrgather_vx_f16m1(vfloat16m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f16m1(vs2, vs1, vl); } -vfloat16m2_t test_vrgather_vv_f16m2(vfloat16m2_t op1, vuint16m2_t index, size_t vl) { - return __riscv_vrgather_vv_f16m2(op1, index, vl); +vfloat16m2_t test_vrgather_vv_f16m2(vfloat16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_f16m2(vs2, vs1, vl); } -vfloat16m2_t test_vrgather_vx_f16m2(vfloat16m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f16m2(op1, index, vl); +vfloat16m2_t test_vrgather_vx_f16m2(vfloat16m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f16m2(vs2, vs1, vl); } -vfloat16m4_t test_vrgather_vv_f16m4(vfloat16m4_t op1, vuint16m4_t index, size_t vl) { - return __riscv_vrgather_vv_f16m4(op1, index, vl); +vfloat16m4_t test_vrgather_vv_f16m4(vfloat16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_f16m4(vs2, vs1, vl); } -vfloat16m4_t test_vrgather_vx_f16m4(vfloat16m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f16m4(op1, index, vl); +vfloat16m4_t test_vrgather_vx_f16m4(vfloat16m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f16m4(vs2, vs1, vl); } -vfloat16m8_t test_vrgather_vv_f16m8(vfloat16m8_t op1, vuint16m8_t index, size_t vl) { - return __riscv_vrgather_vv_f16m8(op1, index, vl); +vfloat16m8_t test_vrgather_vv_f16m8(vfloat16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_f16m8(vs2, vs1, vl); } -vfloat16m8_t test_vrgather_vx_f16m8(vfloat16m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f16m8(op1, index, vl); +vfloat16m8_t test_vrgather_vx_f16m8(vfloat16m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f16m8(vs2, vs1, vl); } -vfloat32mf2_t test_vrgather_vv_f32mf2(vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) { - return __riscv_vrgather_vv_f32mf2(op1, index, vl); +vfloat32mf2_t test_vrgather_vv_f32mf2(vfloat32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_f32mf2(vs2, vs1, vl); } -vfloat32mf2_t test_vrgather_vx_f32mf2(vfloat32mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f32mf2(op1, index, vl); +vfloat32mf2_t test_vrgather_vx_f32mf2(vfloat32mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f32mf2(vs2, vs1, vl); } -vfloat32m1_t test_vrgather_vv_f32m1(vfloat32m1_t op1, vuint32m1_t index, size_t vl) { - return __riscv_vrgather_vv_f32m1(op1, index, vl); +vfloat32m1_t test_vrgather_vv_f32m1(vfloat32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_f32m1(vs2, vs1, vl); } -vfloat32m1_t test_vrgather_vx_f32m1(vfloat32m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f32m1(op1, index, vl); +vfloat32m1_t test_vrgather_vx_f32m1(vfloat32m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f32m1(vs2, vs1, vl); } -vfloat32m2_t test_vrgather_vv_f32m2(vfloat32m2_t op1, vuint32m2_t index, size_t vl) { - return __riscv_vrgather_vv_f32m2(op1, index, vl); +vfloat32m2_t test_vrgather_vv_f32m2(vfloat32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_f32m2(vs2, vs1, vl); } -vfloat32m2_t test_vrgather_vx_f32m2(vfloat32m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f32m2(op1, index, vl); +vfloat32m2_t test_vrgather_vx_f32m2(vfloat32m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f32m2(vs2, vs1, vl); } -vfloat32m4_t test_vrgather_vv_f32m4(vfloat32m4_t op1, vuint32m4_t index, size_t vl) { - return __riscv_vrgather_vv_f32m4(op1, index, vl); +vfloat32m4_t test_vrgather_vv_f32m4(vfloat32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_f32m4(vs2, vs1, vl); } -vfloat32m4_t test_vrgather_vx_f32m4(vfloat32m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f32m4(op1, index, vl); +vfloat32m4_t test_vrgather_vx_f32m4(vfloat32m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f32m4(vs2, vs1, vl); } -vfloat32m8_t test_vrgather_vv_f32m8(vfloat32m8_t op1, vuint32m8_t index, size_t vl) { - return __riscv_vrgather_vv_f32m8(op1, index, vl); +vfloat32m8_t test_vrgather_vv_f32m8(vfloat32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_f32m8(vs2, vs1, vl); } -vfloat32m8_t test_vrgather_vx_f32m8(vfloat32m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f32m8(op1, index, vl); +vfloat32m8_t test_vrgather_vx_f32m8(vfloat32m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f32m8(vs2, vs1, vl); } -vfloat64m1_t test_vrgather_vv_f64m1(vfloat64m1_t op1, vuint64m1_t index, size_t vl) { - return __riscv_vrgather_vv_f64m1(op1, index, vl); +vfloat64m1_t test_vrgather_vv_f64m1(vfloat64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_f64m1(vs2, vs1, vl); } -vfloat64m1_t test_vrgather_vx_f64m1(vfloat64m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f64m1(op1, index, vl); +vfloat64m1_t test_vrgather_vx_f64m1(vfloat64m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f64m1(vs2, vs1, vl); } -vfloat64m2_t test_vrgather_vv_f64m2(vfloat64m2_t op1, vuint64m2_t index, size_t vl) { - return __riscv_vrgather_vv_f64m2(op1, index, vl); +vfloat64m2_t test_vrgather_vv_f64m2(vfloat64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_f64m2(vs2, vs1, vl); } -vfloat64m2_t test_vrgather_vx_f64m2(vfloat64m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f64m2(op1, index, vl); +vfloat64m2_t test_vrgather_vx_f64m2(vfloat64m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f64m2(vs2, vs1, vl); } -vfloat64m4_t test_vrgather_vv_f64m4(vfloat64m4_t op1, vuint64m4_t index, size_t vl) { - return __riscv_vrgather_vv_f64m4(op1, index, vl); +vfloat64m4_t test_vrgather_vv_f64m4(vfloat64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_f64m4(vs2, vs1, vl); } -vfloat64m4_t test_vrgather_vx_f64m4(vfloat64m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f64m4(op1, index, vl); +vfloat64m4_t test_vrgather_vx_f64m4(vfloat64m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f64m4(vs2, vs1, vl); } -vfloat64m8_t test_vrgather_vv_f64m8(vfloat64m8_t op1, vuint64m8_t index, size_t vl) { - return __riscv_vrgather_vv_f64m8(op1, index, vl); +vfloat64m8_t test_vrgather_vv_f64m8(vfloat64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_f64m8(vs2, vs1, vl); } -vfloat64m8_t test_vrgather_vx_f64m8(vfloat64m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f64m8(op1, index, vl); +vfloat64m8_t test_vrgather_vx_f64m8(vfloat64m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f64m8(vs2, vs1, vl); } -vint8mf8_t test_vrgather_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t index, size_t vl) { - return __riscv_vrgather_vv_i8mf8(op1, index, vl); +vint8mf8_t test_vrgather_vv_i8mf8(vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrgather_vv_i8mf8(vs2, vs1, vl); } -vint8mf8_t test_vrgather_vx_i8mf8(vint8mf8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i8mf8(op1, index, vl); +vint8mf8_t test_vrgather_vx_i8mf8(vint8mf8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i8mf8(vs2, vs1, vl); } -vint8mf4_t test_vrgather_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t index, size_t vl) { - return __riscv_vrgather_vv_i8mf4(op1, index, vl); +vint8mf4_t test_vrgather_vv_i8mf4(vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrgather_vv_i8mf4(vs2, vs1, vl); } -vint8mf4_t test_vrgather_vx_i8mf4(vint8mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i8mf4(op1, index, vl); +vint8mf4_t test_vrgather_vx_i8mf4(vint8mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i8mf4(vs2, vs1, vl); } -vint8mf2_t test_vrgather_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t index, size_t vl) { - return __riscv_vrgather_vv_i8mf2(op1, index, vl); +vint8mf2_t test_vrgather_vv_i8mf2(vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_i8mf2(vs2, vs1, vl); } -vint8mf2_t test_vrgather_vx_i8mf2(vint8mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i8mf2(op1, index, vl); +vint8mf2_t test_vrgather_vx_i8mf2(vint8mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i8mf2(vs2, vs1, vl); } -vint8m1_t test_vrgather_vv_i8m1(vint8m1_t op1, vuint8m1_t index, size_t vl) { - return __riscv_vrgather_vv_i8m1(op1, index, vl); +vint8m1_t test_vrgather_vv_i8m1(vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_i8m1(vs2, vs1, vl); } -vint8m1_t test_vrgather_vx_i8m1(vint8m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i8m1(op1, index, vl); +vint8m1_t test_vrgather_vx_i8m1(vint8m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i8m1(vs2, vs1, vl); } -vint8m2_t test_vrgather_vv_i8m2(vint8m2_t op1, vuint8m2_t index, size_t vl) { - return __riscv_vrgather_vv_i8m2(op1, index, vl); +vint8m2_t test_vrgather_vv_i8m2(vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_i8m2(vs2, vs1, vl); } -vint8m2_t test_vrgather_vx_i8m2(vint8m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i8m2(op1, index, vl); +vint8m2_t test_vrgather_vx_i8m2(vint8m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i8m2(vs2, vs1, vl); } -vint8m4_t test_vrgather_vv_i8m4(vint8m4_t op1, vuint8m4_t index, size_t vl) { - return __riscv_vrgather_vv_i8m4(op1, index, vl); +vint8m4_t test_vrgather_vv_i8m4(vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_i8m4(vs2, vs1, vl); } -vint8m4_t test_vrgather_vx_i8m4(vint8m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i8m4(op1, index, vl); +vint8m4_t test_vrgather_vx_i8m4(vint8m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i8m4(vs2, vs1, vl); } -vint8m8_t test_vrgather_vv_i8m8(vint8m8_t op1, vuint8m8_t index, size_t vl) { - return __riscv_vrgather_vv_i8m8(op1, index, vl); +vint8m8_t test_vrgather_vv_i8m8(vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_i8m8(vs2, vs1, vl); } -vint8m8_t test_vrgather_vx_i8m8(vint8m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i8m8(op1, index, vl); +vint8m8_t test_vrgather_vx_i8m8(vint8m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i8m8(vs2, vs1, vl); } -vint16mf4_t test_vrgather_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t index, size_t vl) { - return __riscv_vrgather_vv_i16mf4(op1, index, vl); +vint16mf4_t test_vrgather_vv_i16mf4(vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgather_vv_i16mf4(vs2, vs1, vl); } -vint16mf4_t test_vrgather_vx_i16mf4(vint16mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i16mf4(op1, index, vl); +vint16mf4_t test_vrgather_vx_i16mf4(vint16mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i16mf4(vs2, vs1, vl); } -vint16mf2_t test_vrgather_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t index, size_t vl) { - return __riscv_vrgather_vv_i16mf2(op1, index, vl); +vint16mf2_t test_vrgather_vv_i16mf2(vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_i16mf2(vs2, vs1, vl); } -vint16mf2_t test_vrgather_vx_i16mf2(vint16mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i16mf2(op1, index, vl); +vint16mf2_t test_vrgather_vx_i16mf2(vint16mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i16mf2(vs2, vs1, vl); } -vint16m1_t test_vrgather_vv_i16m1(vint16m1_t op1, vuint16m1_t index, size_t vl) { - return __riscv_vrgather_vv_i16m1(op1, index, vl); +vint16m1_t test_vrgather_vv_i16m1(vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_i16m1(vs2, vs1, vl); } -vint16m1_t test_vrgather_vx_i16m1(vint16m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i16m1(op1, index, vl); +vint16m1_t test_vrgather_vx_i16m1(vint16m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i16m1(vs2, vs1, vl); } -vint16m2_t test_vrgather_vv_i16m2(vint16m2_t op1, vuint16m2_t index, size_t vl) { - return __riscv_vrgather_vv_i16m2(op1, index, vl); +vint16m2_t test_vrgather_vv_i16m2(vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_i16m2(vs2, vs1, vl); } -vint16m2_t test_vrgather_vx_i16m2(vint16m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i16m2(op1, index, vl); +vint16m2_t test_vrgather_vx_i16m2(vint16m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i16m2(vs2, vs1, vl); } -vint16m4_t test_vrgather_vv_i16m4(vint16m4_t op1, vuint16m4_t index, size_t vl) { - return __riscv_vrgather_vv_i16m4(op1, index, vl); +vint16m4_t test_vrgather_vv_i16m4(vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_i16m4(vs2, vs1, vl); } -vint16m4_t test_vrgather_vx_i16m4(vint16m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i16m4(op1, index, vl); +vint16m4_t test_vrgather_vx_i16m4(vint16m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i16m4(vs2, vs1, vl); } -vint16m8_t test_vrgather_vv_i16m8(vint16m8_t op1, vuint16m8_t index, size_t vl) { - return __riscv_vrgather_vv_i16m8(op1, index, vl); +vint16m8_t test_vrgather_vv_i16m8(vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_i16m8(vs2, vs1, vl); } -vint16m8_t test_vrgather_vx_i16m8(vint16m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i16m8(op1, index, vl); +vint16m8_t test_vrgather_vx_i16m8(vint16m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i16m8(vs2, vs1, vl); } -vint32mf2_t test_vrgather_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t index, size_t vl) { - return __riscv_vrgather_vv_i32mf2(op1, index, vl); +vint32mf2_t test_vrgather_vv_i32mf2(vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_i32mf2(vs2, vs1, vl); } -vint32mf2_t test_vrgather_vx_i32mf2(vint32mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i32mf2(op1, index, vl); +vint32mf2_t test_vrgather_vx_i32mf2(vint32mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i32mf2(vs2, vs1, vl); } -vint32m1_t test_vrgather_vv_i32m1(vint32m1_t op1, vuint32m1_t index, size_t vl) { - return __riscv_vrgather_vv_i32m1(op1, index, vl); +vint32m1_t test_vrgather_vv_i32m1(vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_i32m1(vs2, vs1, vl); } -vint32m1_t test_vrgather_vx_i32m1(vint32m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i32m1(op1, index, vl); +vint32m1_t test_vrgather_vx_i32m1(vint32m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i32m1(vs2, vs1, vl); } -vint32m2_t test_vrgather_vv_i32m2(vint32m2_t op1, vuint32m2_t index, size_t vl) { - return __riscv_vrgather_vv_i32m2(op1, index, vl); +vint32m2_t test_vrgather_vv_i32m2(vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_i32m2(vs2, vs1, vl); } -vint32m2_t test_vrgather_vx_i32m2(vint32m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i32m2(op1, index, vl); +vint32m2_t test_vrgather_vx_i32m2(vint32m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i32m2(vs2, vs1, vl); } -vint32m4_t test_vrgather_vv_i32m4(vint32m4_t op1, vuint32m4_t index, size_t vl) { - return __riscv_vrgather_vv_i32m4(op1, index, vl); +vint32m4_t test_vrgather_vv_i32m4(vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_i32m4(vs2, vs1, vl); } -vint32m4_t test_vrgather_vx_i32m4(vint32m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i32m4(op1, index, vl); +vint32m4_t test_vrgather_vx_i32m4(vint32m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i32m4(vs2, vs1, vl); } -vint32m8_t test_vrgather_vv_i32m8(vint32m8_t op1, vuint32m8_t index, size_t vl) { - return __riscv_vrgather_vv_i32m8(op1, index, vl); +vint32m8_t test_vrgather_vv_i32m8(vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_i32m8(vs2, vs1, vl); } -vint32m8_t test_vrgather_vx_i32m8(vint32m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i32m8(op1, index, vl); +vint32m8_t test_vrgather_vx_i32m8(vint32m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i32m8(vs2, vs1, vl); } -vint64m1_t test_vrgather_vv_i64m1(vint64m1_t op1, vuint64m1_t index, size_t vl) { - return __riscv_vrgather_vv_i64m1(op1, index, vl); +vint64m1_t test_vrgather_vv_i64m1(vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_i64m1(vs2, vs1, vl); } -vint64m1_t test_vrgather_vx_i64m1(vint64m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i64m1(op1, index, vl); +vint64m1_t test_vrgather_vx_i64m1(vint64m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i64m1(vs2, vs1, vl); } -vint64m2_t test_vrgather_vv_i64m2(vint64m2_t op1, vuint64m2_t index, size_t vl) { - return __riscv_vrgather_vv_i64m2(op1, index, vl); +vint64m2_t test_vrgather_vv_i64m2(vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_i64m2(vs2, vs1, vl); } -vint64m2_t test_vrgather_vx_i64m2(vint64m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i64m2(op1, index, vl); +vint64m2_t test_vrgather_vx_i64m2(vint64m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i64m2(vs2, vs1, vl); } -vint64m4_t test_vrgather_vv_i64m4(vint64m4_t op1, vuint64m4_t index, size_t vl) { - return __riscv_vrgather_vv_i64m4(op1, index, vl); +vint64m4_t test_vrgather_vv_i64m4(vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_i64m4(vs2, vs1, vl); } -vint64m4_t test_vrgather_vx_i64m4(vint64m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i64m4(op1, index, vl); +vint64m4_t test_vrgather_vx_i64m4(vint64m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i64m4(vs2, vs1, vl); } -vint64m8_t test_vrgather_vv_i64m8(vint64m8_t op1, vuint64m8_t index, size_t vl) { - return __riscv_vrgather_vv_i64m8(op1, index, vl); +vint64m8_t test_vrgather_vv_i64m8(vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_i64m8(vs2, vs1, vl); } -vint64m8_t test_vrgather_vx_i64m8(vint64m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i64m8(op1, index, vl); +vint64m8_t test_vrgather_vx_i64m8(vint64m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i64m8(vs2, vs1, vl); } -vuint8mf8_t test_vrgather_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t index, size_t vl) { - return __riscv_vrgather_vv_u8mf8(op1, index, vl); +vuint8mf8_t test_vrgather_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrgather_vv_u8mf8(vs2, vs1, vl); } -vuint8mf8_t test_vrgather_vx_u8mf8(vuint8mf8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u8mf8(op1, index, vl); +vuint8mf8_t test_vrgather_vx_u8mf8(vuint8mf8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u8mf8(vs2, vs1, vl); } -vuint8mf4_t test_vrgather_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t index, size_t vl) { - return __riscv_vrgather_vv_u8mf4(op1, index, vl); +vuint8mf4_t test_vrgather_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrgather_vv_u8mf4(vs2, vs1, vl); } -vuint8mf4_t test_vrgather_vx_u8mf4(vuint8mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u8mf4(op1, index, vl); +vuint8mf4_t test_vrgather_vx_u8mf4(vuint8mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u8mf4(vs2, vs1, vl); } -vuint8mf2_t test_vrgather_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t index, size_t vl) { - return __riscv_vrgather_vv_u8mf2(op1, index, vl); +vuint8mf2_t test_vrgather_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_u8mf2(vs2, vs1, vl); } -vuint8mf2_t test_vrgather_vx_u8mf2(vuint8mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u8mf2(op1, index, vl); +vuint8mf2_t test_vrgather_vx_u8mf2(vuint8mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u8mf2(vs2, vs1, vl); } -vuint8m1_t test_vrgather_vv_u8m1(vuint8m1_t op1, vuint8m1_t index, size_t vl) { - return __riscv_vrgather_vv_u8m1(op1, index, vl); +vuint8m1_t test_vrgather_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vrgather_vx_u8m1(vuint8m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u8m1(op1, index, vl); +vuint8m1_t test_vrgather_vx_u8m1(vuint8m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u8m1(vs2, vs1, vl); } -vuint8m2_t test_vrgather_vv_u8m2(vuint8m2_t op1, vuint8m2_t index, size_t vl) { - return __riscv_vrgather_vv_u8m2(op1, index, vl); +vuint8m2_t test_vrgather_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_u8m2(vs2, vs1, vl); } -vuint8m2_t test_vrgather_vx_u8m2(vuint8m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u8m2(op1, index, vl); +vuint8m2_t test_vrgather_vx_u8m2(vuint8m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u8m2(vs2, vs1, vl); } -vuint8m4_t test_vrgather_vv_u8m4(vuint8m4_t op1, vuint8m4_t index, size_t vl) { - return __riscv_vrgather_vv_u8m4(op1, index, vl); +vuint8m4_t test_vrgather_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_u8m4(vs2, vs1, vl); } -vuint8m4_t test_vrgather_vx_u8m4(vuint8m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u8m4(op1, index, vl); +vuint8m4_t test_vrgather_vx_u8m4(vuint8m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u8m4(vs2, vs1, vl); } -vuint8m8_t test_vrgather_vv_u8m8(vuint8m8_t op1, vuint8m8_t index, size_t vl) { - return __riscv_vrgather_vv_u8m8(op1, index, vl); +vuint8m8_t test_vrgather_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_u8m8(vs2, vs1, vl); } -vuint8m8_t test_vrgather_vx_u8m8(vuint8m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u8m8(op1, index, vl); +vuint8m8_t test_vrgather_vx_u8m8(vuint8m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u8m8(vs2, vs1, vl); } -vuint16mf4_t test_vrgather_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t index, size_t vl) { - return __riscv_vrgather_vv_u16mf4(op1, index, vl); +vuint16mf4_t test_vrgather_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgather_vv_u16mf4(vs2, vs1, vl); } -vuint16mf4_t test_vrgather_vx_u16mf4(vuint16mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u16mf4(op1, index, vl); +vuint16mf4_t test_vrgather_vx_u16mf4(vuint16mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u16mf4(vs2, vs1, vl); } -vuint16mf2_t test_vrgather_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t index, size_t vl) { - return __riscv_vrgather_vv_u16mf2(op1, index, vl); +vuint16mf2_t test_vrgather_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_u16mf2(vs2, vs1, vl); } -vuint16mf2_t test_vrgather_vx_u16mf2(vuint16mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u16mf2(op1, index, vl); +vuint16mf2_t test_vrgather_vx_u16mf2(vuint16mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u16mf2(vs2, vs1, vl); } -vuint16m1_t test_vrgather_vv_u16m1(vuint16m1_t op1, vuint16m1_t index, size_t vl) { - return __riscv_vrgather_vv_u16m1(op1, index, vl); +vuint16m1_t test_vrgather_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vrgather_vx_u16m1(vuint16m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u16m1(op1, index, vl); +vuint16m1_t test_vrgather_vx_u16m1(vuint16m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u16m1(vs2, vs1, vl); } -vuint16m2_t test_vrgather_vv_u16m2(vuint16m2_t op1, vuint16m2_t index, size_t vl) { - return __riscv_vrgather_vv_u16m2(op1, index, vl); +vuint16m2_t test_vrgather_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_u16m2(vs2, vs1, vl); } -vuint16m2_t test_vrgather_vx_u16m2(vuint16m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u16m2(op1, index, vl); +vuint16m2_t test_vrgather_vx_u16m2(vuint16m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u16m2(vs2, vs1, vl); } -vuint16m4_t test_vrgather_vv_u16m4(vuint16m4_t op1, vuint16m4_t index, size_t vl) { - return __riscv_vrgather_vv_u16m4(op1, index, vl); +vuint16m4_t test_vrgather_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_u16m4(vs2, vs1, vl); } -vuint16m4_t test_vrgather_vx_u16m4(vuint16m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u16m4(op1, index, vl); +vuint16m4_t test_vrgather_vx_u16m4(vuint16m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u16m4(vs2, vs1, vl); } -vuint16m8_t test_vrgather_vv_u16m8(vuint16m8_t op1, vuint16m8_t index, size_t vl) { - return __riscv_vrgather_vv_u16m8(op1, index, vl); +vuint16m8_t test_vrgather_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_u16m8(vs2, vs1, vl); } -vuint16m8_t test_vrgather_vx_u16m8(vuint16m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u16m8(op1, index, vl); +vuint16m8_t test_vrgather_vx_u16m8(vuint16m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u16m8(vs2, vs1, vl); } -vuint32mf2_t test_vrgather_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t index, size_t vl) { - return __riscv_vrgather_vv_u32mf2(op1, index, vl); +vuint32mf2_t test_vrgather_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_u32mf2(vs2, vs1, vl); } -vuint32mf2_t test_vrgather_vx_u32mf2(vuint32mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u32mf2(op1, index, vl); +vuint32mf2_t test_vrgather_vx_u32mf2(vuint32mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u32mf2(vs2, vs1, vl); } -vuint32m1_t test_vrgather_vv_u32m1(vuint32m1_t op1, vuint32m1_t index, size_t vl) { - return __riscv_vrgather_vv_u32m1(op1, index, vl); +vuint32m1_t test_vrgather_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vrgather_vx_u32m1(vuint32m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u32m1(op1, index, vl); +vuint32m1_t test_vrgather_vx_u32m1(vuint32m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u32m1(vs2, vs1, vl); } -vuint32m2_t test_vrgather_vv_u32m2(vuint32m2_t op1, vuint32m2_t index, size_t vl) { - return __riscv_vrgather_vv_u32m2(op1, index, vl); +vuint32m2_t test_vrgather_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_u32m2(vs2, vs1, vl); } -vuint32m2_t test_vrgather_vx_u32m2(vuint32m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u32m2(op1, index, vl); +vuint32m2_t test_vrgather_vx_u32m2(vuint32m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u32m2(vs2, vs1, vl); } -vuint32m4_t test_vrgather_vv_u32m4(vuint32m4_t op1, vuint32m4_t index, size_t vl) { - return __riscv_vrgather_vv_u32m4(op1, index, vl); +vuint32m4_t test_vrgather_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_u32m4(vs2, vs1, vl); } -vuint32m4_t test_vrgather_vx_u32m4(vuint32m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u32m4(op1, index, vl); +vuint32m4_t test_vrgather_vx_u32m4(vuint32m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u32m4(vs2, vs1, vl); } -vuint32m8_t test_vrgather_vv_u32m8(vuint32m8_t op1, vuint32m8_t index, size_t vl) { - return __riscv_vrgather_vv_u32m8(op1, index, vl); +vuint32m8_t test_vrgather_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_u32m8(vs2, vs1, vl); } -vuint32m8_t test_vrgather_vx_u32m8(vuint32m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u32m8(op1, index, vl); +vuint32m8_t test_vrgather_vx_u32m8(vuint32m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u32m8(vs2, vs1, vl); } -vuint64m1_t test_vrgather_vv_u64m1(vuint64m1_t op1, vuint64m1_t index, size_t vl) { - return __riscv_vrgather_vv_u64m1(op1, index, vl); +vuint64m1_t test_vrgather_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_u64m1(vs2, vs1, vl); } -vuint64m1_t test_vrgather_vx_u64m1(vuint64m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u64m1(op1, index, vl); +vuint64m1_t test_vrgather_vx_u64m1(vuint64m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u64m1(vs2, vs1, vl); } -vuint64m2_t test_vrgather_vv_u64m2(vuint64m2_t op1, vuint64m2_t index, size_t vl) { - return __riscv_vrgather_vv_u64m2(op1, index, vl); +vuint64m2_t test_vrgather_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_u64m2(vs2, vs1, vl); } -vuint64m2_t test_vrgather_vx_u64m2(vuint64m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u64m2(op1, index, vl); +vuint64m2_t test_vrgather_vx_u64m2(vuint64m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u64m2(vs2, vs1, vl); } -vuint64m4_t test_vrgather_vv_u64m4(vuint64m4_t op1, vuint64m4_t index, size_t vl) { - return __riscv_vrgather_vv_u64m4(op1, index, vl); +vuint64m4_t test_vrgather_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_u64m4(vs2, vs1, vl); } -vuint64m4_t test_vrgather_vx_u64m4(vuint64m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u64m4(op1, index, vl); +vuint64m4_t test_vrgather_vx_u64m4(vuint64m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u64m4(vs2, vs1, vl); } -vuint64m8_t test_vrgather_vv_u64m8(vuint64m8_t op1, vuint64m8_t index, size_t vl) { - return __riscv_vrgather_vv_u64m8(op1, index, vl); +vuint64m8_t test_vrgather_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_u64m8(vs2, vs1, vl); } -vuint64m8_t test_vrgather_vx_u64m8(vuint64m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u64m8(op1, index, vl); +vuint64m8_t test_vrgather_vx_u64m8(vuint64m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u64m8(vs2, vs1, vl); } -vfloat16mf4_t test_vrgather_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vuint16mf4_t index, size_t vl) { - return __riscv_vrgather_vv_f16mf4_m(mask, op1, index, vl); +vfloat16mf4_t test_vrgather_vv_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgather_vv_f16mf4_m(vm, vs2, vs1, vl); } -vfloat16mf4_t test_vrgather_vx_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f16mf4_m(mask, op1, index, vl); +vfloat16mf4_t test_vrgather_vx_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f16mf4_m(vm, vs2, vs1, vl); } -vfloat16mf2_t test_vrgather_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vuint16mf2_t index, size_t vl) { - return __riscv_vrgather_vv_f16mf2_m(mask, op1, index, vl); +vfloat16mf2_t test_vrgather_vv_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_f16mf2_m(vm, vs2, vs1, vl); } -vfloat16mf2_t test_vrgather_vx_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f16mf2_m(mask, op1, index, vl); +vfloat16mf2_t test_vrgather_vx_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f16mf2_m(vm, vs2, vs1, vl); } -vfloat16m1_t test_vrgather_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vuint16m1_t index, size_t vl) { - return __riscv_vrgather_vv_f16m1_m(mask, op1, index, vl); +vfloat16m1_t test_vrgather_vv_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_f16m1_m(vm, vs2, vs1, vl); } -vfloat16m1_t test_vrgather_vx_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f16m1_m(mask, op1, index, vl); +vfloat16m1_t test_vrgather_vx_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f16m1_m(vm, vs2, vs1, vl); } -vfloat16m2_t test_vrgather_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vuint16m2_t index, size_t vl) { - return __riscv_vrgather_vv_f16m2_m(mask, op1, index, vl); +vfloat16m2_t test_vrgather_vv_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_f16m2_m(vm, vs2, vs1, vl); } -vfloat16m2_t test_vrgather_vx_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f16m2_m(mask, op1, index, vl); +vfloat16m2_t test_vrgather_vx_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f16m2_m(vm, vs2, vs1, vl); } -vfloat16m4_t test_vrgather_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vuint16m4_t index, size_t vl) { - return __riscv_vrgather_vv_f16m4_m(mask, op1, index, vl); +vfloat16m4_t test_vrgather_vv_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_f16m4_m(vm, vs2, vs1, vl); } -vfloat16m4_t test_vrgather_vx_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f16m4_m(mask, op1, index, vl); +vfloat16m4_t test_vrgather_vx_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f16m4_m(vm, vs2, vs1, vl); } -vfloat16m8_t test_vrgather_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vuint16m8_t index, size_t vl) { - return __riscv_vrgather_vv_f16m8_m(mask, op1, index, vl); +vfloat16m8_t test_vrgather_vv_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_f16m8_m(vm, vs2, vs1, vl); } -vfloat16m8_t test_vrgather_vx_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f16m8_m(mask, op1, index, vl); +vfloat16m8_t test_vrgather_vx_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f16m8_m(vm, vs2, vs1, vl); } -vfloat32mf2_t test_vrgather_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) { - return __riscv_vrgather_vv_f32mf2_m(mask, op1, index, vl); +vfloat32mf2_t test_vrgather_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_f32mf2_m(vm, vs2, vs1, vl); } -vfloat32mf2_t test_vrgather_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f32mf2_m(mask, op1, index, vl); +vfloat32mf2_t test_vrgather_vx_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f32mf2_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vrgather_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vuint32m1_t index, size_t vl) { - return __riscv_vrgather_vv_f32m1_m(mask, op1, index, vl); +vfloat32m1_t test_vrgather_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_f32m1_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vrgather_vx_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f32m1_m(mask, op1, index, vl); +vfloat32m1_t test_vrgather_vx_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f32m1_m(vm, vs2, vs1, vl); } -vfloat32m2_t test_vrgather_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vuint32m2_t index, size_t vl) { - return __riscv_vrgather_vv_f32m2_m(mask, op1, index, vl); +vfloat32m2_t test_vrgather_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_f32m2_m(vm, vs2, vs1, vl); } -vfloat32m2_t test_vrgather_vx_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f32m2_m(mask, op1, index, vl); +vfloat32m2_t test_vrgather_vx_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f32m2_m(vm, vs2, vs1, vl); } -vfloat32m4_t test_vrgather_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vuint32m4_t index, size_t vl) { - return __riscv_vrgather_vv_f32m4_m(mask, op1, index, vl); +vfloat32m4_t test_vrgather_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_f32m4_m(vm, vs2, vs1, vl); } -vfloat32m4_t test_vrgather_vx_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f32m4_m(mask, op1, index, vl); +vfloat32m4_t test_vrgather_vx_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f32m4_m(vm, vs2, vs1, vl); } -vfloat32m8_t test_vrgather_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vuint32m8_t index, size_t vl) { - return __riscv_vrgather_vv_f32m8_m(mask, op1, index, vl); +vfloat32m8_t test_vrgather_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_f32m8_m(vm, vs2, vs1, vl); } -vfloat32m8_t test_vrgather_vx_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f32m8_m(mask, op1, index, vl); +vfloat32m8_t test_vrgather_vx_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f32m8_m(vm, vs2, vs1, vl); } -vfloat64m1_t test_vrgather_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vuint64m1_t index, size_t vl) { - return __riscv_vrgather_vv_f64m1_m(mask, op1, index, vl); +vfloat64m1_t test_vrgather_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_f64m1_m(vm, vs2, vs1, vl); } -vfloat64m1_t test_vrgather_vx_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f64m1_m(mask, op1, index, vl); +vfloat64m1_t test_vrgather_vx_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f64m1_m(vm, vs2, vs1, vl); } -vfloat64m2_t test_vrgather_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vuint64m2_t index, size_t vl) { - return __riscv_vrgather_vv_f64m2_m(mask, op1, index, vl); +vfloat64m2_t test_vrgather_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_f64m2_m(vm, vs2, vs1, vl); } -vfloat64m2_t test_vrgather_vx_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f64m2_m(mask, op1, index, vl); +vfloat64m2_t test_vrgather_vx_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f64m2_m(vm, vs2, vs1, vl); } -vfloat64m4_t test_vrgather_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vuint64m4_t index, size_t vl) { - return __riscv_vrgather_vv_f64m4_m(mask, op1, index, vl); +vfloat64m4_t test_vrgather_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_f64m4_m(vm, vs2, vs1, vl); } -vfloat64m4_t test_vrgather_vx_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f64m4_m(mask, op1, index, vl); +vfloat64m4_t test_vrgather_vx_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f64m4_m(vm, vs2, vs1, vl); } -vfloat64m8_t test_vrgather_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vuint64m8_t index, size_t vl) { - return __riscv_vrgather_vv_f64m8_m(mask, op1, index, vl); +vfloat64m8_t test_vrgather_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_f64m8_m(vm, vs2, vs1, vl); } -vfloat64m8_t test_vrgather_vx_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f64m8_m(mask, op1, index, vl); +vfloat64m8_t test_vrgather_vx_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f64m8_m(vm, vs2, vs1, vl); } -vint8mf8_t test_vrgather_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t index, size_t vl) { - return __riscv_vrgather_vv_i8mf8_m(mask, op1, index, vl); +vint8mf8_t test_vrgather_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrgather_vv_i8mf8_m(vm, vs2, vs1, vl); } -vint8mf8_t test_vrgather_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i8mf8_m(mask, op1, index, vl); +vint8mf8_t test_vrgather_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i8mf8_m(vm, vs2, vs1, vl); } -vint8mf4_t test_vrgather_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t index, size_t vl) { - return __riscv_vrgather_vv_i8mf4_m(mask, op1, index, vl); +vint8mf4_t test_vrgather_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrgather_vv_i8mf4_m(vm, vs2, vs1, vl); } -vint8mf4_t test_vrgather_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i8mf4_m(mask, op1, index, vl); +vint8mf4_t test_vrgather_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i8mf4_m(vm, vs2, vs1, vl); } -vint8mf2_t test_vrgather_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t index, size_t vl) { - return __riscv_vrgather_vv_i8mf2_m(mask, op1, index, vl); +vint8mf2_t test_vrgather_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_i8mf2_m(vm, vs2, vs1, vl); } -vint8mf2_t test_vrgather_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i8mf2_m(mask, op1, index, vl); +vint8mf2_t test_vrgather_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i8mf2_m(vm, vs2, vs1, vl); } -vint8m1_t test_vrgather_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t index, size_t vl) { - return __riscv_vrgather_vv_i8m1_m(mask, op1, index, vl); +vint8m1_t test_vrgather_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vrgather_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i8m1_m(mask, op1, index, vl); +vint8m1_t test_vrgather_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i8m1_m(vm, vs2, vs1, vl); } -vint8m2_t test_vrgather_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t index, size_t vl) { - return __riscv_vrgather_vv_i8m2_m(mask, op1, index, vl); +vint8m2_t test_vrgather_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_i8m2_m(vm, vs2, vs1, vl); } -vint8m2_t test_vrgather_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i8m2_m(mask, op1, index, vl); +vint8m2_t test_vrgather_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i8m2_m(vm, vs2, vs1, vl); } -vint8m4_t test_vrgather_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t index, size_t vl) { - return __riscv_vrgather_vv_i8m4_m(mask, op1, index, vl); +vint8m4_t test_vrgather_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_i8m4_m(vm, vs2, vs1, vl); } -vint8m4_t test_vrgather_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i8m4_m(mask, op1, index, vl); +vint8m4_t test_vrgather_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i8m4_m(vm, vs2, vs1, vl); } -vint8m8_t test_vrgather_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t index, size_t vl) { - return __riscv_vrgather_vv_i8m8_m(mask, op1, index, vl); +vint8m8_t test_vrgather_vv_i8m8_m(vbool1_t vm, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_i8m8_m(vm, vs2, vs1, vl); } -vint8m8_t test_vrgather_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i8m8_m(mask, op1, index, vl); +vint8m8_t test_vrgather_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i8m8_m(vm, vs2, vs1, vl); } -vint16mf4_t test_vrgather_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t index, size_t vl) { - return __riscv_vrgather_vv_i16mf4_m(mask, op1, index, vl); +vint16mf4_t test_vrgather_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgather_vv_i16mf4_m(vm, vs2, vs1, vl); } -vint16mf4_t test_vrgather_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i16mf4_m(mask, op1, index, vl); +vint16mf4_t test_vrgather_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i16mf4_m(vm, vs2, vs1, vl); } -vint16mf2_t test_vrgather_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t index, size_t vl) { - return __riscv_vrgather_vv_i16mf2_m(mask, op1, index, vl); +vint16mf2_t test_vrgather_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_i16mf2_m(vm, vs2, vs1, vl); } -vint16mf2_t test_vrgather_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i16mf2_m(mask, op1, index, vl); +vint16mf2_t test_vrgather_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i16mf2_m(vm, vs2, vs1, vl); } -vint16m1_t test_vrgather_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t index, size_t vl) { - return __riscv_vrgather_vv_i16m1_m(mask, op1, index, vl); +vint16m1_t test_vrgather_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vrgather_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i16m1_m(mask, op1, index, vl); +vint16m1_t test_vrgather_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i16m1_m(vm, vs2, vs1, vl); } -vint16m2_t test_vrgather_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t index, size_t vl) { - return __riscv_vrgather_vv_i16m2_m(mask, op1, index, vl); +vint16m2_t test_vrgather_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_i16m2_m(vm, vs2, vs1, vl); } -vint16m2_t test_vrgather_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i16m2_m(mask, op1, index, vl); +vint16m2_t test_vrgather_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i16m2_m(vm, vs2, vs1, vl); } -vint16m4_t test_vrgather_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t index, size_t vl) { - return __riscv_vrgather_vv_i16m4_m(mask, op1, index, vl); +vint16m4_t test_vrgather_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_i16m4_m(vm, vs2, vs1, vl); } -vint16m4_t test_vrgather_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i16m4_m(mask, op1, index, vl); +vint16m4_t test_vrgather_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i16m4_m(vm, vs2, vs1, vl); } -vint16m8_t test_vrgather_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t index, size_t vl) { - return __riscv_vrgather_vv_i16m8_m(mask, op1, index, vl); +vint16m8_t test_vrgather_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_i16m8_m(vm, vs2, vs1, vl); } -vint16m8_t test_vrgather_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i16m8_m(mask, op1, index, vl); +vint16m8_t test_vrgather_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i16m8_m(vm, vs2, vs1, vl); } -vint32mf2_t test_vrgather_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t index, size_t vl) { - return __riscv_vrgather_vv_i32mf2_m(mask, op1, index, vl); +vint32mf2_t test_vrgather_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_i32mf2_m(vm, vs2, vs1, vl); } -vint32mf2_t test_vrgather_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i32mf2_m(mask, op1, index, vl); +vint32mf2_t test_vrgather_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i32mf2_m(vm, vs2, vs1, vl); } -vint32m1_t test_vrgather_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t index, size_t vl) { - return __riscv_vrgather_vv_i32m1_m(mask, op1, index, vl); +vint32m1_t test_vrgather_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vrgather_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i32m1_m(mask, op1, index, vl); +vint32m1_t test_vrgather_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i32m1_m(vm, vs2, vs1, vl); } -vint32m2_t test_vrgather_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t index, size_t vl) { - return __riscv_vrgather_vv_i32m2_m(mask, op1, index, vl); +vint32m2_t test_vrgather_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_i32m2_m(vm, vs2, vs1, vl); } -vint32m2_t test_vrgather_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i32m2_m(mask, op1, index, vl); +vint32m2_t test_vrgather_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i32m2_m(vm, vs2, vs1, vl); } -vint32m4_t test_vrgather_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t index, size_t vl) { - return __riscv_vrgather_vv_i32m4_m(mask, op1, index, vl); +vint32m4_t test_vrgather_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_i32m4_m(vm, vs2, vs1, vl); } -vint32m4_t test_vrgather_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i32m4_m(mask, op1, index, vl); +vint32m4_t test_vrgather_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i32m4_m(vm, vs2, vs1, vl); } -vint32m8_t test_vrgather_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t index, size_t vl) { - return __riscv_vrgather_vv_i32m8_m(mask, op1, index, vl); +vint32m8_t test_vrgather_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_i32m8_m(vm, vs2, vs1, vl); } -vint32m8_t test_vrgather_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i32m8_m(mask, op1, index, vl); +vint32m8_t test_vrgather_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i32m8_m(vm, vs2, vs1, vl); } -vint64m1_t test_vrgather_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t index, size_t vl) { - return __riscv_vrgather_vv_i64m1_m(mask, op1, index, vl); +vint64m1_t test_vrgather_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_i64m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vrgather_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i64m1_m(mask, op1, index, vl); +vint64m1_t test_vrgather_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i64m1_m(vm, vs2, vs1, vl); } -vint64m2_t test_vrgather_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t index, size_t vl) { - return __riscv_vrgather_vv_i64m2_m(mask, op1, index, vl); +vint64m2_t test_vrgather_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_i64m2_m(vm, vs2, vs1, vl); } -vint64m2_t test_vrgather_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i64m2_m(mask, op1, index, vl); +vint64m2_t test_vrgather_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i64m2_m(vm, vs2, vs1, vl); } -vint64m4_t test_vrgather_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t index, size_t vl) { - return __riscv_vrgather_vv_i64m4_m(mask, op1, index, vl); +vint64m4_t test_vrgather_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_i64m4_m(vm, vs2, vs1, vl); } -vint64m4_t test_vrgather_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i64m4_m(mask, op1, index, vl); +vint64m4_t test_vrgather_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i64m4_m(vm, vs2, vs1, vl); } -vint64m8_t test_vrgather_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t index, size_t vl) { - return __riscv_vrgather_vv_i64m8_m(mask, op1, index, vl); +vint64m8_t test_vrgather_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_i64m8_m(vm, vs2, vs1, vl); } -vint64m8_t test_vrgather_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i64m8_m(mask, op1, index, vl); +vint64m8_t test_vrgather_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i64m8_m(vm, vs2, vs1, vl); } -vuint8mf8_t test_vrgather_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t index, size_t vl) { - return __riscv_vrgather_vv_u8mf8_m(mask, op1, index, vl); +vuint8mf8_t test_vrgather_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrgather_vv_u8mf8_m(vm, vs2, vs1, vl); } -vuint8mf8_t test_vrgather_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u8mf8_m(mask, op1, index, vl); +vuint8mf8_t test_vrgather_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u8mf8_m(vm, vs2, vs1, vl); } -vuint8mf4_t test_vrgather_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t index, size_t vl) { - return __riscv_vrgather_vv_u8mf4_m(mask, op1, index, vl); +vuint8mf4_t test_vrgather_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrgather_vv_u8mf4_m(vm, vs2, vs1, vl); } -vuint8mf4_t test_vrgather_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u8mf4_m(mask, op1, index, vl); +vuint8mf4_t test_vrgather_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u8mf4_m(vm, vs2, vs1, vl); } -vuint8mf2_t test_vrgather_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t index, size_t vl) { - return __riscv_vrgather_vv_u8mf2_m(mask, op1, index, vl); +vuint8mf2_t test_vrgather_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_u8mf2_m(vm, vs2, vs1, vl); } -vuint8mf2_t test_vrgather_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u8mf2_m(mask, op1, index, vl); +vuint8mf2_t test_vrgather_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u8mf2_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vrgather_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t index, size_t vl) { - return __riscv_vrgather_vv_u8m1_m(mask, op1, index, vl); +vuint8m1_t test_vrgather_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vrgather_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u8m1_m(mask, op1, index, vl); +vuint8m1_t test_vrgather_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u8m1_m(vm, vs2, vs1, vl); } -vuint8m2_t test_vrgather_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t index, size_t vl) { - return __riscv_vrgather_vv_u8m2_m(mask, op1, index, vl); +vuint8m2_t test_vrgather_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_u8m2_m(vm, vs2, vs1, vl); } -vuint8m2_t test_vrgather_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u8m2_m(mask, op1, index, vl); +vuint8m2_t test_vrgather_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u8m2_m(vm, vs2, vs1, vl); } -vuint8m4_t test_vrgather_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t index, size_t vl) { - return __riscv_vrgather_vv_u8m4_m(mask, op1, index, vl); +vuint8m4_t test_vrgather_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_u8m4_m(vm, vs2, vs1, vl); } -vuint8m4_t test_vrgather_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u8m4_m(mask, op1, index, vl); +vuint8m4_t test_vrgather_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u8m4_m(vm, vs2, vs1, vl); } -vuint8m8_t test_vrgather_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t index, size_t vl) { - return __riscv_vrgather_vv_u8m8_m(mask, op1, index, vl); +vuint8m8_t test_vrgather_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_u8m8_m(vm, vs2, vs1, vl); } -vuint8m8_t test_vrgather_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u8m8_m(mask, op1, index, vl); +vuint8m8_t test_vrgather_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u8m8_m(vm, vs2, vs1, vl); } -vuint16mf4_t test_vrgather_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t index, size_t vl) { - return __riscv_vrgather_vv_u16mf4_m(mask, op1, index, vl); +vuint16mf4_t test_vrgather_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgather_vv_u16mf4_m(vm, vs2, vs1, vl); } -vuint16mf4_t test_vrgather_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u16mf4_m(mask, op1, index, vl); +vuint16mf4_t test_vrgather_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u16mf4_m(vm, vs2, vs1, vl); } -vuint16mf2_t test_vrgather_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t index, size_t vl) { - return __riscv_vrgather_vv_u16mf2_m(mask, op1, index, vl); +vuint16mf2_t test_vrgather_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_u16mf2_m(vm, vs2, vs1, vl); } -vuint16mf2_t test_vrgather_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u16mf2_m(mask, op1, index, vl); +vuint16mf2_t test_vrgather_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u16mf2_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vrgather_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t index, size_t vl) { - return __riscv_vrgather_vv_u16m1_m(mask, op1, index, vl); +vuint16m1_t test_vrgather_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vrgather_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u16m1_m(mask, op1, index, vl); +vuint16m1_t test_vrgather_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u16m1_m(vm, vs2, vs1, vl); } -vuint16m2_t test_vrgather_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t index, size_t vl) { - return __riscv_vrgather_vv_u16m2_m(mask, op1, index, vl); +vuint16m2_t test_vrgather_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_u16m2_m(vm, vs2, vs1, vl); } -vuint16m2_t test_vrgather_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u16m2_m(mask, op1, index, vl); +vuint16m2_t test_vrgather_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u16m2_m(vm, vs2, vs1, vl); } -vuint16m4_t test_vrgather_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t index, size_t vl) { - return __riscv_vrgather_vv_u16m4_m(mask, op1, index, vl); +vuint16m4_t test_vrgather_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_u16m4_m(vm, vs2, vs1, vl); } -vuint16m4_t test_vrgather_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u16m4_m(mask, op1, index, vl); +vuint16m4_t test_vrgather_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u16m4_m(vm, vs2, vs1, vl); } -vuint16m8_t test_vrgather_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t index, size_t vl) { - return __riscv_vrgather_vv_u16m8_m(mask, op1, index, vl); +vuint16m8_t test_vrgather_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_u16m8_m(vm, vs2, vs1, vl); } -vuint16m8_t test_vrgather_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u16m8_m(mask, op1, index, vl); +vuint16m8_t test_vrgather_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u16m8_m(vm, vs2, vs1, vl); } -vuint32mf2_t test_vrgather_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t index, size_t vl) { - return __riscv_vrgather_vv_u32mf2_m(mask, op1, index, vl); +vuint32mf2_t test_vrgather_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_u32mf2_m(vm, vs2, vs1, vl); } -vuint32mf2_t test_vrgather_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u32mf2_m(mask, op1, index, vl); +vuint32mf2_t test_vrgather_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u32mf2_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vrgather_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t index, size_t vl) { - return __riscv_vrgather_vv_u32m1_m(mask, op1, index, vl); +vuint32m1_t test_vrgather_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vrgather_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u32m1_m(mask, op1, index, vl); +vuint32m1_t test_vrgather_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u32m1_m(vm, vs2, vs1, vl); } -vuint32m2_t test_vrgather_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t index, size_t vl) { - return __riscv_vrgather_vv_u32m2_m(mask, op1, index, vl); +vuint32m2_t test_vrgather_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_u32m2_m(vm, vs2, vs1, vl); } -vuint32m2_t test_vrgather_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u32m2_m(mask, op1, index, vl); +vuint32m2_t test_vrgather_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u32m2_m(vm, vs2, vs1, vl); } -vuint32m4_t test_vrgather_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t index, size_t vl) { - return __riscv_vrgather_vv_u32m4_m(mask, op1, index, vl); +vuint32m4_t test_vrgather_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_u32m4_m(vm, vs2, vs1, vl); } -vuint32m4_t test_vrgather_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u32m4_m(mask, op1, index, vl); +vuint32m4_t test_vrgather_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u32m4_m(vm, vs2, vs1, vl); } -vuint32m8_t test_vrgather_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t index, size_t vl) { - return __riscv_vrgather_vv_u32m8_m(mask, op1, index, vl); +vuint32m8_t test_vrgather_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_u32m8_m(vm, vs2, vs1, vl); } -vuint32m8_t test_vrgather_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u32m8_m(mask, op1, index, vl); +vuint32m8_t test_vrgather_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u32m8_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vrgather_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t index, size_t vl) { - return __riscv_vrgather_vv_u64m1_m(mask, op1, index, vl); +vuint64m1_t test_vrgather_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vrgather_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u64m1_m(mask, op1, index, vl); +vuint64m1_t test_vrgather_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u64m1_m(vm, vs2, vs1, vl); } -vuint64m2_t test_vrgather_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t index, size_t vl) { - return __riscv_vrgather_vv_u64m2_m(mask, op1, index, vl); +vuint64m2_t test_vrgather_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_u64m2_m(vm, vs2, vs1, vl); } -vuint64m2_t test_vrgather_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u64m2_m(mask, op1, index, vl); +vuint64m2_t test_vrgather_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u64m2_m(vm, vs2, vs1, vl); } -vuint64m4_t test_vrgather_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t index, size_t vl) { - return __riscv_vrgather_vv_u64m4_m(mask, op1, index, vl); +vuint64m4_t test_vrgather_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_u64m4_m(vm, vs2, vs1, vl); } -vuint64m4_t test_vrgather_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u64m4_m(mask, op1, index, vl); +vuint64m4_t test_vrgather_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u64m4_m(vm, vs2, vs1, vl); } -vuint64m8_t test_vrgather_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t index, size_t vl) { - return __riscv_vrgather_vv_u64m8_m(mask, op1, index, vl); +vuint64m8_t test_vrgather_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_u64m8_m(vm, vs2, vs1, vl); } -vuint64m8_t test_vrgather_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u64m8_m(mask, op1, index, vl); +vuint64m8_t test_vrgather_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u64m8_m(vm, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vrgather\.[ivxfswum.]+\s+} 236 } } */ diff --git a/auto-generated/gnu-api-tests/vrgatherei16.c b/auto-generated/gnu-api-tests/vrgatherei16.c index 602d27c1d..39b8bcae0 100644 --- a/auto-generated/gnu-api-tests/vrgatherei16.c +++ b/auto-generated/gnu-api-tests/vrgatherei16.c @@ -6,460 +6,460 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vrgatherei16_vv_f16mf4(vfloat16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f16mf4(op1, op2, vl); +vfloat16mf4_t test_vrgatherei16_vv_f16mf4(vfloat16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f16mf4(vs2, vs1, vl); } -vfloat16mf2_t test_vrgatherei16_vv_f16mf2(vfloat16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f16mf2(op1, op2, vl); +vfloat16mf2_t test_vrgatherei16_vv_f16mf2(vfloat16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f16mf2(vs2, vs1, vl); } -vfloat16m1_t test_vrgatherei16_vv_f16m1(vfloat16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f16m1(op1, op2, vl); +vfloat16m1_t test_vrgatherei16_vv_f16m1(vfloat16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f16m1(vs2, vs1, vl); } -vfloat16m2_t test_vrgatherei16_vv_f16m2(vfloat16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f16m2(op1, op2, vl); +vfloat16m2_t test_vrgatherei16_vv_f16m2(vfloat16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f16m2(vs2, vs1, vl); } -vfloat16m4_t test_vrgatherei16_vv_f16m4(vfloat16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f16m4(op1, op2, vl); +vfloat16m4_t test_vrgatherei16_vv_f16m4(vfloat16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f16m4(vs2, vs1, vl); } -vfloat16m8_t test_vrgatherei16_vv_f16m8(vfloat16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f16m8(op1, op2, vl); +vfloat16m8_t test_vrgatherei16_vv_f16m8(vfloat16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f16m8(vs2, vs1, vl); } -vfloat32mf2_t test_vrgatherei16_vv_f32mf2(vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f32mf2(op1, op2, vl); +vfloat32mf2_t test_vrgatherei16_vv_f32mf2(vfloat32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f32mf2(vs2, vs1, vl); } -vfloat32m1_t test_vrgatherei16_vv_f32m1(vfloat32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f32m1(op1, op2, vl); +vfloat32m1_t test_vrgatherei16_vv_f32m1(vfloat32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f32m1(vs2, vs1, vl); } -vfloat32m2_t test_vrgatherei16_vv_f32m2(vfloat32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f32m2(op1, op2, vl); +vfloat32m2_t test_vrgatherei16_vv_f32m2(vfloat32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f32m2(vs2, vs1, vl); } -vfloat32m4_t test_vrgatherei16_vv_f32m4(vfloat32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f32m4(op1, op2, vl); +vfloat32m4_t test_vrgatherei16_vv_f32m4(vfloat32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f32m4(vs2, vs1, vl); } -vfloat32m8_t test_vrgatherei16_vv_f32m8(vfloat32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f32m8(op1, op2, vl); +vfloat32m8_t test_vrgatherei16_vv_f32m8(vfloat32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f32m8(vs2, vs1, vl); } -vfloat64m1_t test_vrgatherei16_vv_f64m1(vfloat64m1_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f64m1(op1, op2, vl); +vfloat64m1_t test_vrgatherei16_vv_f64m1(vfloat64m1_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f64m1(vs2, vs1, vl); } -vfloat64m2_t test_vrgatherei16_vv_f64m2(vfloat64m2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f64m2(op1, op2, vl); +vfloat64m2_t test_vrgatherei16_vv_f64m2(vfloat64m2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f64m2(vs2, vs1, vl); } -vfloat64m4_t test_vrgatherei16_vv_f64m4(vfloat64m4_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f64m4(op1, op2, vl); +vfloat64m4_t test_vrgatherei16_vv_f64m4(vfloat64m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f64m4(vs2, vs1, vl); } -vfloat64m8_t test_vrgatherei16_vv_f64m8(vfloat64m8_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f64m8(op1, op2, vl); +vfloat64m8_t test_vrgatherei16_vv_f64m8(vfloat64m8_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f64m8(vs2, vs1, vl); } -vint8mf8_t test_vrgatherei16_vv_i8mf8(vint8mf8_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i8mf8(op1, op2, vl); +vint8mf8_t test_vrgatherei16_vv_i8mf8(vint8mf8_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i8mf8(vs2, vs1, vl); } -vint8mf4_t test_vrgatherei16_vv_i8mf4(vint8mf4_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i8mf4(op1, op2, vl); +vint8mf4_t test_vrgatherei16_vv_i8mf4(vint8mf4_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i8mf4(vs2, vs1, vl); } -vint8mf2_t test_vrgatherei16_vv_i8mf2(vint8mf2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i8mf2(op1, op2, vl); +vint8mf2_t test_vrgatherei16_vv_i8mf2(vint8mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i8mf2(vs2, vs1, vl); } -vint8m1_t test_vrgatherei16_vv_i8m1(vint8m1_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i8m1(op1, op2, vl); +vint8m1_t test_vrgatherei16_vv_i8m1(vint8m1_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i8m1(vs2, vs1, vl); } -vint8m2_t test_vrgatherei16_vv_i8m2(vint8m2_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i8m2(op1, op2, vl); +vint8m2_t test_vrgatherei16_vv_i8m2(vint8m2_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i8m2(vs2, vs1, vl); } -vint8m4_t test_vrgatherei16_vv_i8m4(vint8m4_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i8m4(op1, op2, vl); +vint8m4_t test_vrgatherei16_vv_i8m4(vint8m4_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i8m4(vs2, vs1, vl); } -vint16mf4_t test_vrgatherei16_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i16mf4(op1, op2, vl); +vint16mf4_t test_vrgatherei16_vv_i16mf4(vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i16mf4(vs2, vs1, vl); } -vint16mf2_t test_vrgatherei16_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i16mf2(op1, op2, vl); +vint16mf2_t test_vrgatherei16_vv_i16mf2(vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i16mf2(vs2, vs1, vl); } -vint16m1_t test_vrgatherei16_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i16m1(op1, op2, vl); +vint16m1_t test_vrgatherei16_vv_i16m1(vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i16m1(vs2, vs1, vl); } -vint16m2_t test_vrgatherei16_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i16m2(op1, op2, vl); +vint16m2_t test_vrgatherei16_vv_i16m2(vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i16m2(vs2, vs1, vl); } -vint16m4_t test_vrgatherei16_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i16m4(op1, op2, vl); +vint16m4_t test_vrgatherei16_vv_i16m4(vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i16m4(vs2, vs1, vl); } -vint16m8_t test_vrgatherei16_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i16m8(op1, op2, vl); +vint16m8_t test_vrgatherei16_vv_i16m8(vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i16m8(vs2, vs1, vl); } -vint32mf2_t test_vrgatherei16_vv_i32mf2(vint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i32mf2(op1, op2, vl); +vint32mf2_t test_vrgatherei16_vv_i32mf2(vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i32mf2(vs2, vs1, vl); } -vint32m1_t test_vrgatherei16_vv_i32m1(vint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i32m1(op1, op2, vl); +vint32m1_t test_vrgatherei16_vv_i32m1(vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i32m1(vs2, vs1, vl); } -vint32m2_t test_vrgatherei16_vv_i32m2(vint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i32m2(op1, op2, vl); +vint32m2_t test_vrgatherei16_vv_i32m2(vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i32m2(vs2, vs1, vl); } -vint32m4_t test_vrgatherei16_vv_i32m4(vint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i32m4(op1, op2, vl); +vint32m4_t test_vrgatherei16_vv_i32m4(vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i32m4(vs2, vs1, vl); } -vint32m8_t test_vrgatherei16_vv_i32m8(vint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i32m8(op1, op2, vl); +vint32m8_t test_vrgatherei16_vv_i32m8(vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i32m8(vs2, vs1, vl); } -vint64m1_t test_vrgatherei16_vv_i64m1(vint64m1_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i64m1(op1, op2, vl); +vint64m1_t test_vrgatherei16_vv_i64m1(vint64m1_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i64m1(vs2, vs1, vl); } -vint64m2_t test_vrgatherei16_vv_i64m2(vint64m2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i64m2(op1, op2, vl); +vint64m2_t test_vrgatherei16_vv_i64m2(vint64m2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i64m2(vs2, vs1, vl); } -vint64m4_t test_vrgatherei16_vv_i64m4(vint64m4_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i64m4(op1, op2, vl); +vint64m4_t test_vrgatherei16_vv_i64m4(vint64m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i64m4(vs2, vs1, vl); } -vint64m8_t test_vrgatherei16_vv_i64m8(vint64m8_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i64m8(op1, op2, vl); +vint64m8_t test_vrgatherei16_vv_i64m8(vint64m8_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i64m8(vs2, vs1, vl); } -vuint8mf8_t test_vrgatherei16_vv_u8mf8(vuint8mf8_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u8mf8(op1, op2, vl); +vuint8mf8_t test_vrgatherei16_vv_u8mf8(vuint8mf8_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u8mf8(vs2, vs1, vl); } -vuint8mf4_t test_vrgatherei16_vv_u8mf4(vuint8mf4_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u8mf4(op1, op2, vl); +vuint8mf4_t test_vrgatherei16_vv_u8mf4(vuint8mf4_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u8mf4(vs2, vs1, vl); } -vuint8mf2_t test_vrgatherei16_vv_u8mf2(vuint8mf2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u8mf2(op1, op2, vl); +vuint8mf2_t test_vrgatherei16_vv_u8mf2(vuint8mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u8mf2(vs2, vs1, vl); } -vuint8m1_t test_vrgatherei16_vv_u8m1(vuint8m1_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u8m1(op1, op2, vl); +vuint8m1_t test_vrgatherei16_vv_u8m1(vuint8m1_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u8m1(vs2, vs1, vl); } -vuint8m2_t test_vrgatherei16_vv_u8m2(vuint8m2_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u8m2(op1, op2, vl); +vuint8m2_t test_vrgatherei16_vv_u8m2(vuint8m2_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u8m2(vs2, vs1, vl); } -vuint8m4_t test_vrgatherei16_vv_u8m4(vuint8m4_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u8m4(op1, op2, vl); +vuint8m4_t test_vrgatherei16_vv_u8m4(vuint8m4_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u8m4(vs2, vs1, vl); } -vuint16mf4_t test_vrgatherei16_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u16mf4(op1, op2, vl); +vuint16mf4_t test_vrgatherei16_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u16mf4(vs2, vs1, vl); } -vuint16mf2_t test_vrgatherei16_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u16mf2(op1, op2, vl); +vuint16mf2_t test_vrgatherei16_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u16mf2(vs2, vs1, vl); } -vuint16m1_t test_vrgatherei16_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u16m1(op1, op2, vl); +vuint16m1_t test_vrgatherei16_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u16m1(vs2, vs1, vl); } -vuint16m2_t test_vrgatherei16_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u16m2(op1, op2, vl); +vuint16m2_t test_vrgatherei16_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u16m2(vs2, vs1, vl); } -vuint16m4_t test_vrgatherei16_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u16m4(op1, op2, vl); +vuint16m4_t test_vrgatherei16_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u16m4(vs2, vs1, vl); } -vuint16m8_t test_vrgatherei16_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u16m8(op1, op2, vl); +vuint16m8_t test_vrgatherei16_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u16m8(vs2, vs1, vl); } -vuint32mf2_t test_vrgatherei16_vv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u32mf2(op1, op2, vl); +vuint32mf2_t test_vrgatherei16_vv_u32mf2(vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u32mf2(vs2, vs1, vl); } -vuint32m1_t test_vrgatherei16_vv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u32m1(op1, op2, vl); +vuint32m1_t test_vrgatherei16_vv_u32m1(vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u32m1(vs2, vs1, vl); } -vuint32m2_t test_vrgatherei16_vv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u32m2(op1, op2, vl); +vuint32m2_t test_vrgatherei16_vv_u32m2(vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u32m2(vs2, vs1, vl); } -vuint32m4_t test_vrgatherei16_vv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u32m4(op1, op2, vl); +vuint32m4_t test_vrgatherei16_vv_u32m4(vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u32m4(vs2, vs1, vl); } -vuint32m8_t test_vrgatherei16_vv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u32m8(op1, op2, vl); +vuint32m8_t test_vrgatherei16_vv_u32m8(vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u32m8(vs2, vs1, vl); } -vuint64m1_t test_vrgatherei16_vv_u64m1(vuint64m1_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u64m1(op1, op2, vl); +vuint64m1_t test_vrgatherei16_vv_u64m1(vuint64m1_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u64m1(vs2, vs1, vl); } -vuint64m2_t test_vrgatherei16_vv_u64m2(vuint64m2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u64m2(op1, op2, vl); +vuint64m2_t test_vrgatherei16_vv_u64m2(vuint64m2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u64m2(vs2, vs1, vl); } -vuint64m4_t test_vrgatherei16_vv_u64m4(vuint64m4_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u64m4(op1, op2, vl); +vuint64m4_t test_vrgatherei16_vv_u64m4(vuint64m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u64m4(vs2, vs1, vl); } -vuint64m8_t test_vrgatherei16_vv_u64m8(vuint64m8_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u64m8(op1, op2, vl); +vuint64m8_t test_vrgatherei16_vv_u64m8(vuint64m8_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u64m8(vs2, vs1, vl); } -vfloat16mf4_t test_vrgatherei16_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f16mf4_m(mask, op1, op2, vl); +vfloat16mf4_t test_vrgatherei16_vv_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f16mf4_m(vm, vs2, vs1, vl); } -vfloat16mf2_t test_vrgatherei16_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f16mf2_m(mask, op1, op2, vl); +vfloat16mf2_t test_vrgatherei16_vv_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f16mf2_m(vm, vs2, vs1, vl); } -vfloat16m1_t test_vrgatherei16_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f16m1_m(mask, op1, op2, vl); +vfloat16m1_t test_vrgatherei16_vv_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f16m1_m(vm, vs2, vs1, vl); } -vfloat16m2_t test_vrgatherei16_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f16m2_m(mask, op1, op2, vl); +vfloat16m2_t test_vrgatherei16_vv_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f16m2_m(vm, vs2, vs1, vl); } -vfloat16m4_t test_vrgatherei16_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f16m4_m(mask, op1, op2, vl); +vfloat16m4_t test_vrgatherei16_vv_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f16m4_m(vm, vs2, vs1, vl); } -vfloat16m8_t test_vrgatherei16_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f16m8_m(mask, op1, op2, vl); +vfloat16m8_t test_vrgatherei16_vv_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f16m8_m(vm, vs2, vs1, vl); } -vfloat32mf2_t test_vrgatherei16_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f32mf2_m(mask, op1, op2, vl); +vfloat32mf2_t test_vrgatherei16_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f32mf2_m(vm, vs2, vs1, vl); } -vfloat32m1_t test_vrgatherei16_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vrgatherei16_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f32m1_m(vm, vs2, vs1, vl); } -vfloat32m2_t test_vrgatherei16_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vrgatherei16_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f32m2_m(vm, vs2, vs1, vl); } -vfloat32m4_t test_vrgatherei16_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vrgatherei16_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f32m4_m(vm, vs2, vs1, vl); } -vfloat32m8_t test_vrgatherei16_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vrgatherei16_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f32m8_m(vm, vs2, vs1, vl); } -vfloat64m1_t test_vrgatherei16_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vrgatherei16_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f64m1_m(vm, vs2, vs1, vl); } -vfloat64m2_t test_vrgatherei16_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vrgatherei16_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f64m2_m(vm, vs2, vs1, vl); } -vfloat64m4_t test_vrgatherei16_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vrgatherei16_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f64m4_m(vm, vs2, vs1, vl); } -vfloat64m8_t test_vrgatherei16_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vrgatherei16_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f64m8_m(vm, vs2, vs1, vl); } -vint8mf8_t test_vrgatherei16_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i8mf8_m(mask, op1, op2, vl); +vint8mf8_t test_vrgatherei16_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i8mf8_m(vm, vs2, vs1, vl); } -vint8mf4_t test_vrgatherei16_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i8mf4_m(mask, op1, op2, vl); +vint8mf4_t test_vrgatherei16_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i8mf4_m(vm, vs2, vs1, vl); } -vint8mf2_t test_vrgatherei16_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i8mf2_m(mask, op1, op2, vl); +vint8mf2_t test_vrgatherei16_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i8mf2_m(vm, vs2, vs1, vl); } -vint8m1_t test_vrgatherei16_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i8m1_m(mask, op1, op2, vl); +vint8m1_t test_vrgatherei16_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i8m1_m(vm, vs2, vs1, vl); } -vint8m2_t test_vrgatherei16_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i8m2_m(mask, op1, op2, vl); +vint8m2_t test_vrgatherei16_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i8m2_m(vm, vs2, vs1, vl); } -vint8m4_t test_vrgatherei16_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i8m4_m(mask, op1, op2, vl); +vint8m4_t test_vrgatherei16_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i8m4_m(vm, vs2, vs1, vl); } -vint16mf4_t test_vrgatherei16_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i16mf4_m(mask, op1, op2, vl); +vint16mf4_t test_vrgatherei16_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i16mf4_m(vm, vs2, vs1, vl); } -vint16mf2_t test_vrgatherei16_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i16mf2_m(mask, op1, op2, vl); +vint16mf2_t test_vrgatherei16_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i16mf2_m(vm, vs2, vs1, vl); } -vint16m1_t test_vrgatherei16_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i16m1_m(mask, op1, op2, vl); +vint16m1_t test_vrgatherei16_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i16m1_m(vm, vs2, vs1, vl); } -vint16m2_t test_vrgatherei16_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i16m2_m(mask, op1, op2, vl); +vint16m2_t test_vrgatherei16_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i16m2_m(vm, vs2, vs1, vl); } -vint16m4_t test_vrgatherei16_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i16m4_m(mask, op1, op2, vl); +vint16m4_t test_vrgatherei16_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i16m4_m(vm, vs2, vs1, vl); } -vint16m8_t test_vrgatherei16_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i16m8_m(mask, op1, op2, vl); +vint16m8_t test_vrgatherei16_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i16m8_m(vm, vs2, vs1, vl); } -vint32mf2_t test_vrgatherei16_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i32mf2_m(mask, op1, op2, vl); +vint32mf2_t test_vrgatherei16_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i32mf2_m(vm, vs2, vs1, vl); } -vint32m1_t test_vrgatherei16_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i32m1_m(mask, op1, op2, vl); +vint32m1_t test_vrgatherei16_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i32m1_m(vm, vs2, vs1, vl); } -vint32m2_t test_vrgatherei16_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i32m2_m(mask, op1, op2, vl); +vint32m2_t test_vrgatherei16_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i32m2_m(vm, vs2, vs1, vl); } -vint32m4_t test_vrgatherei16_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i32m4_m(mask, op1, op2, vl); +vint32m4_t test_vrgatherei16_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i32m4_m(vm, vs2, vs1, vl); } -vint32m8_t test_vrgatherei16_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i32m8_m(mask, op1, op2, vl); +vint32m8_t test_vrgatherei16_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i32m8_m(vm, vs2, vs1, vl); } -vint64m1_t test_vrgatherei16_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i64m1_m(mask, op1, op2, vl); +vint64m1_t test_vrgatherei16_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i64m1_m(vm, vs2, vs1, vl); } -vint64m2_t test_vrgatherei16_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i64m2_m(mask, op1, op2, vl); +vint64m2_t test_vrgatherei16_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i64m2_m(vm, vs2, vs1, vl); } -vint64m4_t test_vrgatherei16_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i64m4_m(mask, op1, op2, vl); +vint64m4_t test_vrgatherei16_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i64m4_m(vm, vs2, vs1, vl); } -vint64m8_t test_vrgatherei16_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i64m8_m(mask, op1, op2, vl); +vint64m8_t test_vrgatherei16_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i64m8_m(vm, vs2, vs1, vl); } -vuint8mf8_t test_vrgatherei16_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u8mf8_m(mask, op1, op2, vl); +vuint8mf8_t test_vrgatherei16_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u8mf8_m(vm, vs2, vs1, vl); } -vuint8mf4_t test_vrgatherei16_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u8mf4_m(mask, op1, op2, vl); +vuint8mf4_t test_vrgatherei16_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u8mf4_m(vm, vs2, vs1, vl); } -vuint8mf2_t test_vrgatherei16_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u8mf2_m(mask, op1, op2, vl); +vuint8mf2_t test_vrgatherei16_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u8mf2_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vrgatherei16_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u8m1_m(mask, op1, op2, vl); +vuint8m1_t test_vrgatherei16_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u8m1_m(vm, vs2, vs1, vl); } -vuint8m2_t test_vrgatherei16_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u8m2_m(mask, op1, op2, vl); +vuint8m2_t test_vrgatherei16_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u8m2_m(vm, vs2, vs1, vl); } -vuint8m4_t test_vrgatherei16_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u8m4_m(mask, op1, op2, vl); +vuint8m4_t test_vrgatherei16_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u8m4_m(vm, vs2, vs1, vl); } -vuint16mf4_t test_vrgatherei16_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u16mf4_m(mask, op1, op2, vl); +vuint16mf4_t test_vrgatherei16_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u16mf4_m(vm, vs2, vs1, vl); } -vuint16mf2_t test_vrgatherei16_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u16mf2_m(mask, op1, op2, vl); +vuint16mf2_t test_vrgatherei16_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u16mf2_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vrgatherei16_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u16m1_m(mask, op1, op2, vl); +vuint16m1_t test_vrgatherei16_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u16m1_m(vm, vs2, vs1, vl); } -vuint16m2_t test_vrgatherei16_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u16m2_m(mask, op1, op2, vl); +vuint16m2_t test_vrgatherei16_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u16m2_m(vm, vs2, vs1, vl); } -vuint16m4_t test_vrgatherei16_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u16m4_m(mask, op1, op2, vl); +vuint16m4_t test_vrgatherei16_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u16m4_m(vm, vs2, vs1, vl); } -vuint16m8_t test_vrgatherei16_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u16m8_m(mask, op1, op2, vl); +vuint16m8_t test_vrgatherei16_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u16m8_m(vm, vs2, vs1, vl); } -vuint32mf2_t test_vrgatherei16_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u32mf2_m(mask, op1, op2, vl); +vuint32mf2_t test_vrgatherei16_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u32mf2_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vrgatherei16_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u32m1_m(mask, op1, op2, vl); +vuint32m1_t test_vrgatherei16_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u32m1_m(vm, vs2, vs1, vl); } -vuint32m2_t test_vrgatherei16_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u32m2_m(mask, op1, op2, vl); +vuint32m2_t test_vrgatherei16_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u32m2_m(vm, vs2, vs1, vl); } -vuint32m4_t test_vrgatherei16_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u32m4_m(mask, op1, op2, vl); +vuint32m4_t test_vrgatherei16_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u32m4_m(vm, vs2, vs1, vl); } -vuint32m8_t test_vrgatherei16_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u32m8_m(mask, op1, op2, vl); +vuint32m8_t test_vrgatherei16_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u32m8_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vrgatherei16_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u64m1_m(mask, op1, op2, vl); +vuint64m1_t test_vrgatherei16_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u64m1_m(vm, vs2, vs1, vl); } -vuint64m2_t test_vrgatherei16_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u64m2_m(mask, op1, op2, vl); +vuint64m2_t test_vrgatherei16_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u64m2_m(vm, vs2, vs1, vl); } -vuint64m4_t test_vrgatherei16_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u64m4_m(mask, op1, op2, vl); +vuint64m4_t test_vrgatherei16_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u64m4_m(vm, vs2, vs1, vl); } -vuint64m8_t test_vrgatherei16_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u64m8_m(mask, op1, op2, vl); +vuint64m8_t test_vrgatherei16_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u64m8_m(vm, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vrgatherei16\.[ivxfswum.]+\s+} 114 } } */ diff --git a/auto-generated/gnu-api-tests/vrsub.c b/auto-generated/gnu-api-tests/vrsub.c index 787c49537..be5c4a32f 100644 --- a/auto-generated/gnu-api-tests/vrsub.c +++ b/auto-generated/gnu-api-tests/vrsub.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vrsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_vx_i8mf8(op1, op2, vl); +vint8mf8_t test_vrsub_vx_i8mf8(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_vx_i8mf8(vs2, rs1, vl); } -vint8mf4_t test_vrsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_vx_i8mf4(op1, op2, vl); +vint8mf4_t test_vrsub_vx_i8mf4(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_vx_i8mf4(vs2, rs1, vl); } -vint8mf2_t test_vrsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_vx_i8mf2(op1, op2, vl); +vint8mf2_t test_vrsub_vx_i8mf2(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_vx_i8mf2(vs2, rs1, vl); } -vint8m1_t test_vrsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_vx_i8m1(op1, op2, vl); +vint8m1_t test_vrsub_vx_i8m1(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_vx_i8m1(vs2, rs1, vl); } -vint8m2_t test_vrsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_vx_i8m2(op1, op2, vl); +vint8m2_t test_vrsub_vx_i8m2(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_vx_i8m2(vs2, rs1, vl); } -vint8m4_t test_vrsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_vx_i8m4(op1, op2, vl); +vint8m4_t test_vrsub_vx_i8m4(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_vx_i8m4(vs2, rs1, vl); } -vint8m8_t test_vrsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_vx_i8m8(op1, op2, vl); +vint8m8_t test_vrsub_vx_i8m8(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_vx_i8m8(vs2, rs1, vl); } -vint16mf4_t test_vrsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_vx_i16mf4(op1, op2, vl); +vint16mf4_t test_vrsub_vx_i16mf4(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_vx_i16mf4(vs2, rs1, vl); } -vint16mf2_t test_vrsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_vx_i16mf2(op1, op2, vl); +vint16mf2_t test_vrsub_vx_i16mf2(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_vx_i16mf2(vs2, rs1, vl); } -vint16m1_t test_vrsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_vx_i16m1(op1, op2, vl); +vint16m1_t test_vrsub_vx_i16m1(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_vx_i16m1(vs2, rs1, vl); } -vint16m2_t test_vrsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_vx_i16m2(op1, op2, vl); +vint16m2_t test_vrsub_vx_i16m2(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_vx_i16m2(vs2, rs1, vl); } -vint16m4_t test_vrsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_vx_i16m4(op1, op2, vl); +vint16m4_t test_vrsub_vx_i16m4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_vx_i16m4(vs2, rs1, vl); } -vint16m8_t test_vrsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_vx_i16m8(op1, op2, vl); +vint16m8_t test_vrsub_vx_i16m8(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_vx_i16m8(vs2, rs1, vl); } -vint32mf2_t test_vrsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_vx_i32mf2(op1, op2, vl); +vint32mf2_t test_vrsub_vx_i32mf2(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_vx_i32mf2(vs2, rs1, vl); } -vint32m1_t test_vrsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_vx_i32m1(op1, op2, vl); +vint32m1_t test_vrsub_vx_i32m1(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_vx_i32m1(vs2, rs1, vl); } -vint32m2_t test_vrsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_vx_i32m2(op1, op2, vl); +vint32m2_t test_vrsub_vx_i32m2(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_vx_i32m2(vs2, rs1, vl); } -vint32m4_t test_vrsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_vx_i32m4(op1, op2, vl); +vint32m4_t test_vrsub_vx_i32m4(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_vx_i32m4(vs2, rs1, vl); } -vint32m8_t test_vrsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_vx_i32m8(op1, op2, vl); +vint32m8_t test_vrsub_vx_i32m8(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_vx_i32m8(vs2, rs1, vl); } -vint64m1_t test_vrsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub_vx_i64m1(op1, op2, vl); +vint64m1_t test_vrsub_vx_i64m1(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub_vx_i64m1(vs2, rs1, vl); } -vint64m2_t test_vrsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub_vx_i64m2(op1, op2, vl); +vint64m2_t test_vrsub_vx_i64m2(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub_vx_i64m2(vs2, rs1, vl); } -vint64m4_t test_vrsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub_vx_i64m4(op1, op2, vl); +vint64m4_t test_vrsub_vx_i64m4(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub_vx_i64m4(vs2, rs1, vl); } -vint64m8_t test_vrsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub_vx_i64m8(op1, op2, vl); +vint64m8_t test_vrsub_vx_i64m8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub_vx_i64m8(vs2, rs1, vl); } -vuint8mf8_t test_vrsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_vx_u8mf8(op1, op2, vl); +vuint8mf8_t test_vrsub_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_vx_u8mf8(vs2, rs1, vl); } -vuint8mf4_t test_vrsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_vx_u8mf4(op1, op2, vl); +vuint8mf4_t test_vrsub_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_vx_u8mf4(vs2, rs1, vl); } -vuint8mf2_t test_vrsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_vx_u8mf2(op1, op2, vl); +vuint8mf2_t test_vrsub_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_vx_u8mf2(vs2, rs1, vl); } -vuint8m1_t test_vrsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_vx_u8m1(op1, op2, vl); +vuint8m1_t test_vrsub_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_vx_u8m1(vs2, rs1, vl); } -vuint8m2_t test_vrsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_vx_u8m2(op1, op2, vl); +vuint8m2_t test_vrsub_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_vx_u8m2(vs2, rs1, vl); } -vuint8m4_t test_vrsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_vx_u8m4(op1, op2, vl); +vuint8m4_t test_vrsub_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_vx_u8m4(vs2, rs1, vl); } -vuint8m8_t test_vrsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_vx_u8m8(op1, op2, vl); +vuint8m8_t test_vrsub_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_vx_u8m8(vs2, rs1, vl); } -vuint16mf4_t test_vrsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_vx_u16mf4(op1, op2, vl); +vuint16mf4_t test_vrsub_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_vx_u16mf4(vs2, rs1, vl); } -vuint16mf2_t test_vrsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_vx_u16mf2(op1, op2, vl); +vuint16mf2_t test_vrsub_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_vx_u16mf2(vs2, rs1, vl); } -vuint16m1_t test_vrsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_vx_u16m1(op1, op2, vl); +vuint16m1_t test_vrsub_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_vx_u16m1(vs2, rs1, vl); } -vuint16m2_t test_vrsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_vx_u16m2(op1, op2, vl); +vuint16m2_t test_vrsub_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_vx_u16m2(vs2, rs1, vl); } -vuint16m4_t test_vrsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_vx_u16m4(op1, op2, vl); +vuint16m4_t test_vrsub_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_vx_u16m4(vs2, rs1, vl); } -vuint16m8_t test_vrsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_vx_u16m8(op1, op2, vl); +vuint16m8_t test_vrsub_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_vx_u16m8(vs2, rs1, vl); } -vuint32mf2_t test_vrsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_vx_u32mf2(op1, op2, vl); +vuint32mf2_t test_vrsub_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_vx_u32mf2(vs2, rs1, vl); } -vuint32m1_t test_vrsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_vx_u32m1(op1, op2, vl); +vuint32m1_t test_vrsub_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_vx_u32m1(vs2, rs1, vl); } -vuint32m2_t test_vrsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_vx_u32m2(op1, op2, vl); +vuint32m2_t test_vrsub_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_vx_u32m2(vs2, rs1, vl); } -vuint32m4_t test_vrsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_vx_u32m4(op1, op2, vl); +vuint32m4_t test_vrsub_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_vx_u32m4(vs2, rs1, vl); } -vuint32m8_t test_vrsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_vx_u32m8(op1, op2, vl); +vuint32m8_t test_vrsub_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_vx_u32m8(vs2, rs1, vl); } -vuint64m1_t test_vrsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub_vx_u64m1(op1, op2, vl); +vuint64m1_t test_vrsub_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub_vx_u64m1(vs2, rs1, vl); } -vuint64m2_t test_vrsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub_vx_u64m2(op1, op2, vl); +vuint64m2_t test_vrsub_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub_vx_u64m2(vs2, rs1, vl); } -vuint64m4_t test_vrsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub_vx_u64m4(op1, op2, vl); +vuint64m4_t test_vrsub_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub_vx_u64m4(vs2, rs1, vl); } -vuint64m8_t test_vrsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub_vx_u64m8(op1, op2, vl); +vuint64m8_t test_vrsub_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub_vx_u64m8(vs2, rs1, vl); } -vint8mf8_t test_vrsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_vx_i8mf8_m(mask, op1, op2, vl); +vint8mf8_t test_vrsub_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_vx_i8mf8_m(vm, vs2, rs1, vl); } -vint8mf4_t test_vrsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_vx_i8mf4_m(mask, op1, op2, vl); +vint8mf4_t test_vrsub_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_vx_i8mf4_m(vm, vs2, rs1, vl); } -vint8mf2_t test_vrsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_vx_i8mf2_m(mask, op1, op2, vl); +vint8mf2_t test_vrsub_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_vx_i8mf2_m(vm, vs2, rs1, vl); } -vint8m1_t test_vrsub_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_vx_i8m1_m(mask, op1, op2, vl); +vint8m1_t test_vrsub_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_vx_i8m1_m(vm, vs2, rs1, vl); } -vint8m2_t test_vrsub_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_vx_i8m2_m(mask, op1, op2, vl); +vint8m2_t test_vrsub_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_vx_i8m2_m(vm, vs2, rs1, vl); } -vint8m4_t test_vrsub_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_vx_i8m4_m(mask, op1, op2, vl); +vint8m4_t test_vrsub_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_vx_i8m4_m(vm, vs2, rs1, vl); } -vint8m8_t test_vrsub_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_vx_i8m8_m(mask, op1, op2, vl); +vint8m8_t test_vrsub_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_vx_i8m8_m(vm, vs2, rs1, vl); } -vint16mf4_t test_vrsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_vx_i16mf4_m(mask, op1, op2, vl); +vint16mf4_t test_vrsub_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_vx_i16mf4_m(vm, vs2, rs1, vl); } -vint16mf2_t test_vrsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_vx_i16mf2_m(mask, op1, op2, vl); +vint16mf2_t test_vrsub_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_vx_i16mf2_m(vm, vs2, rs1, vl); } -vint16m1_t test_vrsub_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_vx_i16m1_m(mask, op1, op2, vl); +vint16m1_t test_vrsub_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_vx_i16m1_m(vm, vs2, rs1, vl); } -vint16m2_t test_vrsub_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_vx_i16m2_m(mask, op1, op2, vl); +vint16m2_t test_vrsub_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_vx_i16m2_m(vm, vs2, rs1, vl); } -vint16m4_t test_vrsub_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_vx_i16m4_m(mask, op1, op2, vl); +vint16m4_t test_vrsub_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_vx_i16m4_m(vm, vs2, rs1, vl); } -vint16m8_t test_vrsub_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_vx_i16m8_m(mask, op1, op2, vl); +vint16m8_t test_vrsub_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_vx_i16m8_m(vm, vs2, rs1, vl); } -vint32mf2_t test_vrsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_vx_i32mf2_m(mask, op1, op2, vl); +vint32mf2_t test_vrsub_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_vx_i32mf2_m(vm, vs2, rs1, vl); } -vint32m1_t test_vrsub_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_vx_i32m1_m(mask, op1, op2, vl); +vint32m1_t test_vrsub_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_vx_i32m1_m(vm, vs2, rs1, vl); } -vint32m2_t test_vrsub_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_vx_i32m2_m(mask, op1, op2, vl); +vint32m2_t test_vrsub_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_vx_i32m2_m(vm, vs2, rs1, vl); } -vint32m4_t test_vrsub_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_vx_i32m4_m(mask, op1, op2, vl); +vint32m4_t test_vrsub_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_vx_i32m4_m(vm, vs2, rs1, vl); } -vint32m8_t test_vrsub_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_vx_i32m8_m(mask, op1, op2, vl); +vint32m8_t test_vrsub_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_vx_i32m8_m(vm, vs2, rs1, vl); } -vint64m1_t test_vrsub_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub_vx_i64m1_m(mask, op1, op2, vl); +vint64m1_t test_vrsub_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub_vx_i64m1_m(vm, vs2, rs1, vl); } -vint64m2_t test_vrsub_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub_vx_i64m2_m(mask, op1, op2, vl); +vint64m2_t test_vrsub_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub_vx_i64m2_m(vm, vs2, rs1, vl); } -vint64m4_t test_vrsub_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub_vx_i64m4_m(mask, op1, op2, vl); +vint64m4_t test_vrsub_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub_vx_i64m4_m(vm, vs2, rs1, vl); } -vint64m8_t test_vrsub_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub_vx_i64m8_m(mask, op1, op2, vl); +vint64m8_t test_vrsub_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub_vx_i64m8_m(vm, vs2, rs1, vl); } -vuint8mf8_t test_vrsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_vx_u8mf8_m(mask, op1, op2, vl); +vuint8mf8_t test_vrsub_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_vx_u8mf8_m(vm, vs2, rs1, vl); } -vuint8mf4_t test_vrsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_vx_u8mf4_m(mask, op1, op2, vl); +vuint8mf4_t test_vrsub_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_vx_u8mf4_m(vm, vs2, rs1, vl); } -vuint8mf2_t test_vrsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_vx_u8mf2_m(mask, op1, op2, vl); +vuint8mf2_t test_vrsub_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_vx_u8mf2_m(vm, vs2, rs1, vl); } -vuint8m1_t test_vrsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_vx_u8m1_m(mask, op1, op2, vl); +vuint8m1_t test_vrsub_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_vx_u8m1_m(vm, vs2, rs1, vl); } -vuint8m2_t test_vrsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_vx_u8m2_m(mask, op1, op2, vl); +vuint8m2_t test_vrsub_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_vx_u8m2_m(vm, vs2, rs1, vl); } -vuint8m4_t test_vrsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_vx_u8m4_m(mask, op1, op2, vl); +vuint8m4_t test_vrsub_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_vx_u8m4_m(vm, vs2, rs1, vl); } -vuint8m8_t test_vrsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_vx_u8m8_m(mask, op1, op2, vl); +vuint8m8_t test_vrsub_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_vx_u8m8_m(vm, vs2, rs1, vl); } -vuint16mf4_t test_vrsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_vx_u16mf4_m(mask, op1, op2, vl); +vuint16mf4_t test_vrsub_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_vx_u16mf4_m(vm, vs2, rs1, vl); } -vuint16mf2_t test_vrsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_vx_u16mf2_m(mask, op1, op2, vl); +vuint16mf2_t test_vrsub_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_vx_u16mf2_m(vm, vs2, rs1, vl); } -vuint16m1_t test_vrsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_vx_u16m1_m(mask, op1, op2, vl); +vuint16m1_t test_vrsub_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_vx_u16m1_m(vm, vs2, rs1, vl); } -vuint16m2_t test_vrsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_vx_u16m2_m(mask, op1, op2, vl); +vuint16m2_t test_vrsub_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_vx_u16m2_m(vm, vs2, rs1, vl); } -vuint16m4_t test_vrsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_vx_u16m4_m(mask, op1, op2, vl); +vuint16m4_t test_vrsub_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_vx_u16m4_m(vm, vs2, rs1, vl); } -vuint16m8_t test_vrsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_vx_u16m8_m(mask, op1, op2, vl); +vuint16m8_t test_vrsub_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_vx_u16m8_m(vm, vs2, rs1, vl); } -vuint32mf2_t test_vrsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_vx_u32mf2_m(mask, op1, op2, vl); +vuint32mf2_t test_vrsub_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_vx_u32mf2_m(vm, vs2, rs1, vl); } -vuint32m1_t test_vrsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_vx_u32m1_m(mask, op1, op2, vl); +vuint32m1_t test_vrsub_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_vx_u32m1_m(vm, vs2, rs1, vl); } -vuint32m2_t test_vrsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_vx_u32m2_m(mask, op1, op2, vl); +vuint32m2_t test_vrsub_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_vx_u32m2_m(vm, vs2, rs1, vl); } -vuint32m4_t test_vrsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_vx_u32m4_m(mask, op1, op2, vl); +vuint32m4_t test_vrsub_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_vx_u32m4_m(vm, vs2, rs1, vl); } -vuint32m8_t test_vrsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_vx_u32m8_m(mask, op1, op2, vl); +vuint32m8_t test_vrsub_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_vx_u32m8_m(vm, vs2, rs1, vl); } -vuint64m1_t test_vrsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub_vx_u64m1_m(mask, op1, op2, vl); +vuint64m1_t test_vrsub_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub_vx_u64m1_m(vm, vs2, rs1, vl); } -vuint64m2_t test_vrsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub_vx_u64m2_m(mask, op1, op2, vl); +vuint64m2_t test_vrsub_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub_vx_u64m2_m(vm, vs2, rs1, vl); } -vuint64m4_t test_vrsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub_vx_u64m4_m(mask, op1, op2, vl); +vuint64m4_t test_vrsub_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub_vx_u64m4_m(vm, vs2, rs1, vl); } -vuint64m8_t test_vrsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub_vx_u64m8_m(mask, op1, op2, vl); +vuint64m8_t test_vrsub_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub_vx_u64m8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vrsub\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-api-tests/vsadd.c b/auto-generated/gnu-api-tests/vsadd.c index 2dda66fec..4ee3faa94 100644 --- a/auto-generated/gnu-api-tests/vsadd.c +++ b/auto-generated/gnu-api-tests/vsadd.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vsadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsadd_vv_i8mf8(op1, op2, vl); +vint8mf8_t test_vsadd_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vsadd_vv_i8mf8(vs2, vs1, vl); } -vint8mf8_t test_vsadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8mf8(op1, op2, vl); +vint8mf8_t test_vsadd_vx_i8mf8(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_vx_i8mf8(vs2, rs1, vl); } -vint8mf4_t test_vsadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsadd_vv_i8mf4(op1, op2, vl); +vint8mf4_t test_vsadd_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vsadd_vv_i8mf4(vs2, vs1, vl); } -vint8mf4_t test_vsadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8mf4(op1, op2, vl); +vint8mf4_t test_vsadd_vx_i8mf4(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_vx_i8mf4(vs2, rs1, vl); } -vint8mf2_t test_vsadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsadd_vv_i8mf2(op1, op2, vl); +vint8mf2_t test_vsadd_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vsadd_vv_i8mf2(vs2, vs1, vl); } -vint8mf2_t test_vsadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8mf2(op1, op2, vl); +vint8mf2_t test_vsadd_vx_i8mf2(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_vx_i8mf2(vs2, rs1, vl); } -vint8m1_t test_vsadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m1(op1, op2, vl); +vint8m1_t test_vsadd_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vsadd_vv_i8m1(vs2, vs1, vl); } -vint8m1_t test_vsadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m1(op1, op2, vl); +vint8m1_t test_vsadd_vx_i8m1(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_vx_i8m1(vs2, rs1, vl); } -vint8m2_t test_vsadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m2(op1, op2, vl); +vint8m2_t test_vsadd_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vsadd_vv_i8m2(vs2, vs1, vl); } -vint8m2_t test_vsadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m2(op1, op2, vl); +vint8m2_t test_vsadd_vx_i8m2(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_vx_i8m2(vs2, rs1, vl); } -vint8m4_t test_vsadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m4(op1, op2, vl); +vint8m4_t test_vsadd_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vsadd_vv_i8m4(vs2, vs1, vl); } -vint8m4_t test_vsadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m4(op1, op2, vl); +vint8m4_t test_vsadd_vx_i8m4(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_vx_i8m4(vs2, rs1, vl); } -vint8m8_t test_vsadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m8(op1, op2, vl); +vint8m8_t test_vsadd_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vsadd_vv_i8m8(vs2, vs1, vl); } -vint8m8_t test_vsadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m8(op1, op2, vl); +vint8m8_t test_vsadd_vx_i8m8(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_vx_i8m8(vs2, rs1, vl); } -vint16mf4_t test_vsadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsadd_vv_i16mf4(op1, op2, vl); +vint16mf4_t test_vsadd_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vsadd_vv_i16mf4(vs2, vs1, vl); } -vint16mf4_t test_vsadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16mf4(op1, op2, vl); +vint16mf4_t test_vsadd_vx_i16mf4(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_vx_i16mf4(vs2, rs1, vl); } -vint16mf2_t test_vsadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsadd_vv_i16mf2(op1, op2, vl); +vint16mf2_t test_vsadd_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vsadd_vv_i16mf2(vs2, vs1, vl); } -vint16mf2_t test_vsadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16mf2(op1, op2, vl); +vint16mf2_t test_vsadd_vx_i16mf2(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_vx_i16mf2(vs2, rs1, vl); } -vint16m1_t test_vsadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m1(op1, op2, vl); +vint16m1_t test_vsadd_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vsadd_vv_i16m1(vs2, vs1, vl); } -vint16m1_t test_vsadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m1(op1, op2, vl); +vint16m1_t test_vsadd_vx_i16m1(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_vx_i16m1(vs2, rs1, vl); } -vint16m2_t test_vsadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m2(op1, op2, vl); +vint16m2_t test_vsadd_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vsadd_vv_i16m2(vs2, vs1, vl); } -vint16m2_t test_vsadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m2(op1, op2, vl); +vint16m2_t test_vsadd_vx_i16m2(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_vx_i16m2(vs2, rs1, vl); } -vint16m4_t test_vsadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m4(op1, op2, vl); +vint16m4_t test_vsadd_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vsadd_vv_i16m4(vs2, vs1, vl); } -vint16m4_t test_vsadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m4(op1, op2, vl); +vint16m4_t test_vsadd_vx_i16m4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_vx_i16m4(vs2, rs1, vl); } -vint16m8_t test_vsadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m8(op1, op2, vl); +vint16m8_t test_vsadd_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vsadd_vv_i16m8(vs2, vs1, vl); } -vint16m8_t test_vsadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m8(op1, op2, vl); +vint16m8_t test_vsadd_vx_i16m8(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_vx_i16m8(vs2, rs1, vl); } -vint32mf2_t test_vsadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsadd_vv_i32mf2(op1, op2, vl); +vint32mf2_t test_vsadd_vv_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vsadd_vv_i32mf2(vs2, vs1, vl); } -vint32mf2_t test_vsadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32mf2(op1, op2, vl); +vint32mf2_t test_vsadd_vx_i32mf2(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_vx_i32mf2(vs2, rs1, vl); } -vint32m1_t test_vsadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m1(op1, op2, vl); +vint32m1_t test_vsadd_vv_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vsadd_vv_i32m1(vs2, vs1, vl); } -vint32m1_t test_vsadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m1(op1, op2, vl); +vint32m1_t test_vsadd_vx_i32m1(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_vx_i32m1(vs2, rs1, vl); } -vint32m2_t test_vsadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m2(op1, op2, vl); +vint32m2_t test_vsadd_vv_i32m2(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vsadd_vv_i32m2(vs2, vs1, vl); } -vint32m2_t test_vsadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m2(op1, op2, vl); +vint32m2_t test_vsadd_vx_i32m2(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_vx_i32m2(vs2, rs1, vl); } -vint32m4_t test_vsadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m4(op1, op2, vl); +vint32m4_t test_vsadd_vv_i32m4(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vsadd_vv_i32m4(vs2, vs1, vl); } -vint32m4_t test_vsadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m4(op1, op2, vl); +vint32m4_t test_vsadd_vx_i32m4(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_vx_i32m4(vs2, rs1, vl); } -vint32m8_t test_vsadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m8(op1, op2, vl); +vint32m8_t test_vsadd_vv_i32m8(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vsadd_vv_i32m8(vs2, vs1, vl); } -vint32m8_t test_vsadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m8(op1, op2, vl); +vint32m8_t test_vsadd_vx_i32m8(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_vx_i32m8(vs2, rs1, vl); } -vint64m1_t test_vsadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m1(op1, op2, vl); +vint64m1_t test_vsadd_vv_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vsadd_vv_i64m1(vs2, vs1, vl); } -vint64m1_t test_vsadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m1(op1, op2, vl); +vint64m1_t test_vsadd_vx_i64m1(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd_vx_i64m1(vs2, rs1, vl); } -vint64m2_t test_vsadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m2(op1, op2, vl); +vint64m2_t test_vsadd_vv_i64m2(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vsadd_vv_i64m2(vs2, vs1, vl); } -vint64m2_t test_vsadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m2(op1, op2, vl); +vint64m2_t test_vsadd_vx_i64m2(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd_vx_i64m2(vs2, rs1, vl); } -vint64m4_t test_vsadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m4(op1, op2, vl); +vint64m4_t test_vsadd_vv_i64m4(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vsadd_vv_i64m4(vs2, vs1, vl); } -vint64m4_t test_vsadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m4(op1, op2, vl); +vint64m4_t test_vsadd_vx_i64m4(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd_vx_i64m4(vs2, rs1, vl); } -vint64m8_t test_vsadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m8(op1, op2, vl); +vint64m8_t test_vsadd_vv_i64m8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vsadd_vv_i64m8(vs2, vs1, vl); } -vint64m8_t test_vsadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m8(op1, op2, vl); +vint64m8_t test_vsadd_vx_i64m8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd_vx_i64m8(vs2, rs1, vl); } -vint8mf8_t test_vsadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsadd_vv_i8mf8_m(mask, op1, op2, vl); +vint8mf8_t test_vsadd_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vsadd_vv_i8mf8_m(vm, vs2, vs1, vl); } -vint8mf8_t test_vsadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8mf8_m(mask, op1, op2, vl); +vint8mf8_t test_vsadd_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_vx_i8mf8_m(vm, vs2, rs1, vl); } -vint8mf4_t test_vsadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsadd_vv_i8mf4_m(mask, op1, op2, vl); +vint8mf4_t test_vsadd_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vsadd_vv_i8mf4_m(vm, vs2, vs1, vl); } -vint8mf4_t test_vsadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8mf4_m(mask, op1, op2, vl); +vint8mf4_t test_vsadd_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_vx_i8mf4_m(vm, vs2, rs1, vl); } -vint8mf2_t test_vsadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsadd_vv_i8mf2_m(mask, op1, op2, vl); +vint8mf2_t test_vsadd_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vsadd_vv_i8mf2_m(vm, vs2, vs1, vl); } -vint8mf2_t test_vsadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8mf2_m(mask, op1, op2, vl); +vint8mf2_t test_vsadd_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_vx_i8mf2_m(vm, vs2, rs1, vl); } -vint8m1_t test_vsadd_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m1_m(mask, op1, op2, vl); +vint8m1_t test_vsadd_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vsadd_vv_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vsadd_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m1_m(mask, op1, op2, vl); +vint8m1_t test_vsadd_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_vx_i8m1_m(vm, vs2, rs1, vl); } -vint8m2_t test_vsadd_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m2_m(mask, op1, op2, vl); +vint8m2_t test_vsadd_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vsadd_vv_i8m2_m(vm, vs2, vs1, vl); } -vint8m2_t test_vsadd_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m2_m(mask, op1, op2, vl); +vint8m2_t test_vsadd_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_vx_i8m2_m(vm, vs2, rs1, vl); } -vint8m4_t test_vsadd_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m4_m(mask, op1, op2, vl); +vint8m4_t test_vsadd_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vsadd_vv_i8m4_m(vm, vs2, vs1, vl); } -vint8m4_t test_vsadd_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m4_m(mask, op1, op2, vl); +vint8m4_t test_vsadd_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_vx_i8m4_m(vm, vs2, rs1, vl); } -vint8m8_t test_vsadd_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m8_m(mask, op1, op2, vl); +vint8m8_t test_vsadd_vv_i8m8_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vsadd_vv_i8m8_m(vm, vs2, vs1, vl); } -vint8m8_t test_vsadd_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m8_m(mask, op1, op2, vl); +vint8m8_t test_vsadd_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_vx_i8m8_m(vm, vs2, rs1, vl); } -vint16mf4_t test_vsadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsadd_vv_i16mf4_m(mask, op1, op2, vl); +vint16mf4_t test_vsadd_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vsadd_vv_i16mf4_m(vm, vs2, vs1, vl); } -vint16mf4_t test_vsadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16mf4_m(mask, op1, op2, vl); +vint16mf4_t test_vsadd_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_vx_i16mf4_m(vm, vs2, rs1, vl); } -vint16mf2_t test_vsadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsadd_vv_i16mf2_m(mask, op1, op2, vl); +vint16mf2_t test_vsadd_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vsadd_vv_i16mf2_m(vm, vs2, vs1, vl); } -vint16mf2_t test_vsadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16mf2_m(mask, op1, op2, vl); +vint16mf2_t test_vsadd_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_vx_i16mf2_m(vm, vs2, rs1, vl); } -vint16m1_t test_vsadd_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m1_m(mask, op1, op2, vl); +vint16m1_t test_vsadd_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vsadd_vv_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vsadd_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m1_m(mask, op1, op2, vl); +vint16m1_t test_vsadd_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_vx_i16m1_m(vm, vs2, rs1, vl); } -vint16m2_t test_vsadd_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m2_m(mask, op1, op2, vl); +vint16m2_t test_vsadd_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vsadd_vv_i16m2_m(vm, vs2, vs1, vl); } -vint16m2_t test_vsadd_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m2_m(mask, op1, op2, vl); +vint16m2_t test_vsadd_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_vx_i16m2_m(vm, vs2, rs1, vl); } -vint16m4_t test_vsadd_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m4_m(mask, op1, op2, vl); +vint16m4_t test_vsadd_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vsadd_vv_i16m4_m(vm, vs2, vs1, vl); } -vint16m4_t test_vsadd_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m4_m(mask, op1, op2, vl); +vint16m4_t test_vsadd_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_vx_i16m4_m(vm, vs2, rs1, vl); } -vint16m8_t test_vsadd_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m8_m(mask, op1, op2, vl); +vint16m8_t test_vsadd_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vsadd_vv_i16m8_m(vm, vs2, vs1, vl); } -vint16m8_t test_vsadd_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m8_m(mask, op1, op2, vl); +vint16m8_t test_vsadd_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_vx_i16m8_m(vm, vs2, rs1, vl); } -vint32mf2_t test_vsadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsadd_vv_i32mf2_m(mask, op1, op2, vl); +vint32mf2_t test_vsadd_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vsadd_vv_i32mf2_m(vm, vs2, vs1, vl); } -vint32mf2_t test_vsadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32mf2_m(mask, op1, op2, vl); +vint32mf2_t test_vsadd_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_vx_i32mf2_m(vm, vs2, rs1, vl); } -vint32m1_t test_vsadd_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m1_m(mask, op1, op2, vl); +vint32m1_t test_vsadd_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vsadd_vv_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vsadd_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m1_m(mask, op1, op2, vl); +vint32m1_t test_vsadd_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_vx_i32m1_m(vm, vs2, rs1, vl); } -vint32m2_t test_vsadd_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m2_m(mask, op1, op2, vl); +vint32m2_t test_vsadd_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vsadd_vv_i32m2_m(vm, vs2, vs1, vl); } -vint32m2_t test_vsadd_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m2_m(mask, op1, op2, vl); +vint32m2_t test_vsadd_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_vx_i32m2_m(vm, vs2, rs1, vl); } -vint32m4_t test_vsadd_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m4_m(mask, op1, op2, vl); +vint32m4_t test_vsadd_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vsadd_vv_i32m4_m(vm, vs2, vs1, vl); } -vint32m4_t test_vsadd_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m4_m(mask, op1, op2, vl); +vint32m4_t test_vsadd_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_vx_i32m4_m(vm, vs2, rs1, vl); } -vint32m8_t test_vsadd_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m8_m(mask, op1, op2, vl); +vint32m8_t test_vsadd_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vsadd_vv_i32m8_m(vm, vs2, vs1, vl); } -vint32m8_t test_vsadd_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m8_m(mask, op1, op2, vl); +vint32m8_t test_vsadd_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_vx_i32m8_m(vm, vs2, rs1, vl); } -vint64m1_t test_vsadd_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m1_m(mask, op1, op2, vl); +vint64m1_t test_vsadd_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vsadd_vv_i64m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vsadd_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m1_m(mask, op1, op2, vl); +vint64m1_t test_vsadd_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd_vx_i64m1_m(vm, vs2, rs1, vl); } -vint64m2_t test_vsadd_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m2_m(mask, op1, op2, vl); +vint64m2_t test_vsadd_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vsadd_vv_i64m2_m(vm, vs2, vs1, vl); } -vint64m2_t test_vsadd_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m2_m(mask, op1, op2, vl); +vint64m2_t test_vsadd_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd_vx_i64m2_m(vm, vs2, rs1, vl); } -vint64m4_t test_vsadd_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m4_m(mask, op1, op2, vl); +vint64m4_t test_vsadd_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vsadd_vv_i64m4_m(vm, vs2, vs1, vl); } -vint64m4_t test_vsadd_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m4_m(mask, op1, op2, vl); +vint64m4_t test_vsadd_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd_vx_i64m4_m(vm, vs2, rs1, vl); } -vint64m8_t test_vsadd_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m8_m(mask, op1, op2, vl); +vint64m8_t test_vsadd_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vsadd_vv_i64m8_m(vm, vs2, vs1, vl); } -vint64m8_t test_vsadd_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m8_m(mask, op1, op2, vl); +vint64m8_t test_vsadd_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd_vx_i64m8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsadd\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-api-tests/vsaddu.c b/auto-generated/gnu-api-tests/vsaddu.c index 309efcec7..be2d90ac2 100644 --- a/auto-generated/gnu-api-tests/vsaddu.c +++ b/auto-generated/gnu-api-tests/vsaddu.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vsaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8mf8(op1, op2, vl); +vuint8mf8_t test_vsaddu_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u8mf8(vs2, vs1, vl); } -vuint8mf8_t test_vsaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8mf8(op1, op2, vl); +vuint8mf8_t test_vsaddu_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u8mf8(vs2, rs1, vl); } -vuint8mf4_t test_vsaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8mf4(op1, op2, vl); +vuint8mf4_t test_vsaddu_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u8mf4(vs2, vs1, vl); } -vuint8mf4_t test_vsaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8mf4(op1, op2, vl); +vuint8mf4_t test_vsaddu_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u8mf4(vs2, rs1, vl); } -vuint8mf2_t test_vsaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8mf2(op1, op2, vl); +vuint8mf2_t test_vsaddu_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u8mf2(vs2, vs1, vl); } -vuint8mf2_t test_vsaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8mf2(op1, op2, vl); +vuint8mf2_t test_vsaddu_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u8mf2(vs2, rs1, vl); } -vuint8m1_t test_vsaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m1(op1, op2, vl); +vuint8m1_t test_vsaddu_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vsaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m1(op1, op2, vl); +vuint8m1_t test_vsaddu_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u8m1(vs2, rs1, vl); } -vuint8m2_t test_vsaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m2(op1, op2, vl); +vuint8m2_t test_vsaddu_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u8m2(vs2, vs1, vl); } -vuint8m2_t test_vsaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m2(op1, op2, vl); +vuint8m2_t test_vsaddu_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u8m2(vs2, rs1, vl); } -vuint8m4_t test_vsaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m4(op1, op2, vl); +vuint8m4_t test_vsaddu_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u8m4(vs2, vs1, vl); } -vuint8m4_t test_vsaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m4(op1, op2, vl); +vuint8m4_t test_vsaddu_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u8m4(vs2, rs1, vl); } -vuint8m8_t test_vsaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m8(op1, op2, vl); +vuint8m8_t test_vsaddu_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u8m8(vs2, vs1, vl); } -vuint8m8_t test_vsaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m8(op1, op2, vl); +vuint8m8_t test_vsaddu_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u8m8(vs2, rs1, vl); } -vuint16mf4_t test_vsaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16mf4(op1, op2, vl); +vuint16mf4_t test_vsaddu_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u16mf4(vs2, vs1, vl); } -vuint16mf4_t test_vsaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16mf4(op1, op2, vl); +vuint16mf4_t test_vsaddu_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u16mf4(vs2, rs1, vl); } -vuint16mf2_t test_vsaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16mf2(op1, op2, vl); +vuint16mf2_t test_vsaddu_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u16mf2(vs2, vs1, vl); } -vuint16mf2_t test_vsaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16mf2(op1, op2, vl); +vuint16mf2_t test_vsaddu_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u16mf2(vs2, rs1, vl); } -vuint16m1_t test_vsaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m1(op1, op2, vl); +vuint16m1_t test_vsaddu_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vsaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m1(op1, op2, vl); +vuint16m1_t test_vsaddu_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u16m1(vs2, rs1, vl); } -vuint16m2_t test_vsaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m2(op1, op2, vl); +vuint16m2_t test_vsaddu_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u16m2(vs2, vs1, vl); } -vuint16m2_t test_vsaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m2(op1, op2, vl); +vuint16m2_t test_vsaddu_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u16m2(vs2, rs1, vl); } -vuint16m4_t test_vsaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m4(op1, op2, vl); +vuint16m4_t test_vsaddu_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u16m4(vs2, vs1, vl); } -vuint16m4_t test_vsaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m4(op1, op2, vl); +vuint16m4_t test_vsaddu_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u16m4(vs2, rs1, vl); } -vuint16m8_t test_vsaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m8(op1, op2, vl); +vuint16m8_t test_vsaddu_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u16m8(vs2, vs1, vl); } -vuint16m8_t test_vsaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m8(op1, op2, vl); +vuint16m8_t test_vsaddu_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u16m8(vs2, rs1, vl); } -vuint32mf2_t test_vsaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32mf2(op1, op2, vl); +vuint32mf2_t test_vsaddu_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u32mf2(vs2, vs1, vl); } -vuint32mf2_t test_vsaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32mf2(op1, op2, vl); +vuint32mf2_t test_vsaddu_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u32mf2(vs2, rs1, vl); } -vuint32m1_t test_vsaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m1(op1, op2, vl); +vuint32m1_t test_vsaddu_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vsaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m1(op1, op2, vl); +vuint32m1_t test_vsaddu_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u32m1(vs2, rs1, vl); } -vuint32m2_t test_vsaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m2(op1, op2, vl); +vuint32m2_t test_vsaddu_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u32m2(vs2, vs1, vl); } -vuint32m2_t test_vsaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m2(op1, op2, vl); +vuint32m2_t test_vsaddu_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u32m2(vs2, rs1, vl); } -vuint32m4_t test_vsaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m4(op1, op2, vl); +vuint32m4_t test_vsaddu_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u32m4(vs2, vs1, vl); } -vuint32m4_t test_vsaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m4(op1, op2, vl); +vuint32m4_t test_vsaddu_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u32m4(vs2, rs1, vl); } -vuint32m8_t test_vsaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m8(op1, op2, vl); +vuint32m8_t test_vsaddu_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u32m8(vs2, vs1, vl); } -vuint32m8_t test_vsaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m8(op1, op2, vl); +vuint32m8_t test_vsaddu_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u32m8(vs2, rs1, vl); } -vuint64m1_t test_vsaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m1(op1, op2, vl); +vuint64m1_t test_vsaddu_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u64m1(vs2, vs1, vl); } -vuint64m1_t test_vsaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m1(op1, op2, vl); +vuint64m1_t test_vsaddu_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u64m1(vs2, rs1, vl); } -vuint64m2_t test_vsaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m2(op1, op2, vl); +vuint64m2_t test_vsaddu_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u64m2(vs2, vs1, vl); } -vuint64m2_t test_vsaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m2(op1, op2, vl); +vuint64m2_t test_vsaddu_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u64m2(vs2, rs1, vl); } -vuint64m4_t test_vsaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m4(op1, op2, vl); +vuint64m4_t test_vsaddu_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u64m4(vs2, vs1, vl); } -vuint64m4_t test_vsaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m4(op1, op2, vl); +vuint64m4_t test_vsaddu_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u64m4(vs2, rs1, vl); } -vuint64m8_t test_vsaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m8(op1, op2, vl); +vuint64m8_t test_vsaddu_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u64m8(vs2, vs1, vl); } -vuint64m8_t test_vsaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m8(op1, op2, vl); +vuint64m8_t test_vsaddu_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u64m8(vs2, rs1, vl); } -vuint8mf8_t test_vsaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8mf8_m(mask, op1, op2, vl); +vuint8mf8_t test_vsaddu_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u8mf8_m(vm, vs2, vs1, vl); } -vuint8mf8_t test_vsaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8mf8_m(mask, op1, op2, vl); +vuint8mf8_t test_vsaddu_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u8mf8_m(vm, vs2, rs1, vl); } -vuint8mf4_t test_vsaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8mf4_m(mask, op1, op2, vl); +vuint8mf4_t test_vsaddu_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u8mf4_m(vm, vs2, vs1, vl); } -vuint8mf4_t test_vsaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8mf4_m(mask, op1, op2, vl); +vuint8mf4_t test_vsaddu_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u8mf4_m(vm, vs2, rs1, vl); } -vuint8mf2_t test_vsaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8mf2_m(mask, op1, op2, vl); +vuint8mf2_t test_vsaddu_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u8mf2_m(vm, vs2, vs1, vl); } -vuint8mf2_t test_vsaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8mf2_m(mask, op1, op2, vl); +vuint8mf2_t test_vsaddu_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u8mf2_m(vm, vs2, rs1, vl); } -vuint8m1_t test_vsaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m1_m(mask, op1, op2, vl); +vuint8m1_t test_vsaddu_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vsaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m1_m(mask, op1, op2, vl); +vuint8m1_t test_vsaddu_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u8m1_m(vm, vs2, rs1, vl); } -vuint8m2_t test_vsaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m2_m(mask, op1, op2, vl); +vuint8m2_t test_vsaddu_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u8m2_m(vm, vs2, vs1, vl); } -vuint8m2_t test_vsaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m2_m(mask, op1, op2, vl); +vuint8m2_t test_vsaddu_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u8m2_m(vm, vs2, rs1, vl); } -vuint8m4_t test_vsaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m4_m(mask, op1, op2, vl); +vuint8m4_t test_vsaddu_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u8m4_m(vm, vs2, vs1, vl); } -vuint8m4_t test_vsaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m4_m(mask, op1, op2, vl); +vuint8m4_t test_vsaddu_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u8m4_m(vm, vs2, rs1, vl); } -vuint8m8_t test_vsaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m8_m(mask, op1, op2, vl); +vuint8m8_t test_vsaddu_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u8m8_m(vm, vs2, vs1, vl); } -vuint8m8_t test_vsaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m8_m(mask, op1, op2, vl); +vuint8m8_t test_vsaddu_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u8m8_m(vm, vs2, rs1, vl); } -vuint16mf4_t test_vsaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16mf4_m(mask, op1, op2, vl); +vuint16mf4_t test_vsaddu_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u16mf4_m(vm, vs2, vs1, vl); } -vuint16mf4_t test_vsaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16mf4_m(mask, op1, op2, vl); +vuint16mf4_t test_vsaddu_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u16mf4_m(vm, vs2, rs1, vl); } -vuint16mf2_t test_vsaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16mf2_m(mask, op1, op2, vl); +vuint16mf2_t test_vsaddu_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u16mf2_m(vm, vs2, vs1, vl); } -vuint16mf2_t test_vsaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16mf2_m(mask, op1, op2, vl); +vuint16mf2_t test_vsaddu_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u16mf2_m(vm, vs2, rs1, vl); } -vuint16m1_t test_vsaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m1_m(mask, op1, op2, vl); +vuint16m1_t test_vsaddu_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vsaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m1_m(mask, op1, op2, vl); +vuint16m1_t test_vsaddu_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u16m1_m(vm, vs2, rs1, vl); } -vuint16m2_t test_vsaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m2_m(mask, op1, op2, vl); +vuint16m2_t test_vsaddu_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u16m2_m(vm, vs2, vs1, vl); } -vuint16m2_t test_vsaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m2_m(mask, op1, op2, vl); +vuint16m2_t test_vsaddu_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u16m2_m(vm, vs2, rs1, vl); } -vuint16m4_t test_vsaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m4_m(mask, op1, op2, vl); +vuint16m4_t test_vsaddu_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u16m4_m(vm, vs2, vs1, vl); } -vuint16m4_t test_vsaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m4_m(mask, op1, op2, vl); +vuint16m4_t test_vsaddu_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u16m4_m(vm, vs2, rs1, vl); } -vuint16m8_t test_vsaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m8_m(mask, op1, op2, vl); +vuint16m8_t test_vsaddu_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u16m8_m(vm, vs2, vs1, vl); } -vuint16m8_t test_vsaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m8_m(mask, op1, op2, vl); +vuint16m8_t test_vsaddu_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u16m8_m(vm, vs2, rs1, vl); } -vuint32mf2_t test_vsaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32mf2_m(mask, op1, op2, vl); +vuint32mf2_t test_vsaddu_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u32mf2_m(vm, vs2, vs1, vl); } -vuint32mf2_t test_vsaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32mf2_m(mask, op1, op2, vl); +vuint32mf2_t test_vsaddu_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u32mf2_m(vm, vs2, rs1, vl); } -vuint32m1_t test_vsaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m1_m(mask, op1, op2, vl); +vuint32m1_t test_vsaddu_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vsaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m1_m(mask, op1, op2, vl); +vuint32m1_t test_vsaddu_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u32m1_m(vm, vs2, rs1, vl); } -vuint32m2_t test_vsaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m2_m(mask, op1, op2, vl); +vuint32m2_t test_vsaddu_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u32m2_m(vm, vs2, vs1, vl); } -vuint32m2_t test_vsaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m2_m(mask, op1, op2, vl); +vuint32m2_t test_vsaddu_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u32m2_m(vm, vs2, rs1, vl); } -vuint32m4_t test_vsaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m4_m(mask, op1, op2, vl); +vuint32m4_t test_vsaddu_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u32m4_m(vm, vs2, vs1, vl); } -vuint32m4_t test_vsaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m4_m(mask, op1, op2, vl); +vuint32m4_t test_vsaddu_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u32m4_m(vm, vs2, rs1, vl); } -vuint32m8_t test_vsaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m8_m(mask, op1, op2, vl); +vuint32m8_t test_vsaddu_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u32m8_m(vm, vs2, vs1, vl); } -vuint32m8_t test_vsaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m8_m(mask, op1, op2, vl); +vuint32m8_t test_vsaddu_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u32m8_m(vm, vs2, rs1, vl); } -vuint64m1_t test_vsaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m1_m(mask, op1, op2, vl); +vuint64m1_t test_vsaddu_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vsaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m1_m(mask, op1, op2, vl); +vuint64m1_t test_vsaddu_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u64m1_m(vm, vs2, rs1, vl); } -vuint64m2_t test_vsaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m2_m(mask, op1, op2, vl); +vuint64m2_t test_vsaddu_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u64m2_m(vm, vs2, vs1, vl); } -vuint64m2_t test_vsaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m2_m(mask, op1, op2, vl); +vuint64m2_t test_vsaddu_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u64m2_m(vm, vs2, rs1, vl); } -vuint64m4_t test_vsaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m4_m(mask, op1, op2, vl); +vuint64m4_t test_vsaddu_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u64m4_m(vm, vs2, vs1, vl); } -vuint64m4_t test_vsaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m4_m(mask, op1, op2, vl); +vuint64m4_t test_vsaddu_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u64m4_m(vm, vs2, rs1, vl); } -vuint64m8_t test_vsaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m8_m(mask, op1, op2, vl); +vuint64m8_t test_vsaddu_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u64m8_m(vm, vs2, vs1, vl); } -vuint64m8_t test_vsaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m8_m(mask, op1, op2, vl); +vuint64m8_t test_vsaddu_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u64m8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsaddu\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-api-tests/vsbc.c b/auto-generated/gnu-api-tests/vsbc.c index b538257b4..3b3f08aa0 100644 --- a/auto-generated/gnu-api-tests/vsbc.c +++ b/auto-generated/gnu-api-tests/vsbc.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vsbc_vvm_i8mf8(vint8mf8_t op1, vint8mf8_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_i8mf8(op1, op2, borrowin, vl); +vint8mf8_t test_vsbc_vvm_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_vvm_i8mf8(vs2, vs1, v0, vl); } -vint8mf8_t test_vsbc_vxm_i8mf8(vint8mf8_t op1, int8_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_i8mf8(op1, op2, borrowin, vl); +vint8mf8_t test_vsbc_vxm_i8mf8(vint8mf8_t vs2, int8_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_vxm_i8mf8(vs2, rs1, v0, vl); } -vint8mf4_t test_vsbc_vvm_i8mf4(vint8mf4_t op1, vint8mf4_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_i8mf4(op1, op2, borrowin, vl); +vint8mf4_t test_vsbc_vvm_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_vvm_i8mf4(vs2, vs1, v0, vl); } -vint8mf4_t test_vsbc_vxm_i8mf4(vint8mf4_t op1, int8_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_i8mf4(op1, op2, borrowin, vl); +vint8mf4_t test_vsbc_vxm_i8mf4(vint8mf4_t vs2, int8_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_vxm_i8mf4(vs2, rs1, v0, vl); } -vint8mf2_t test_vsbc_vvm_i8mf2(vint8mf2_t op1, vint8mf2_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_i8mf2(op1, op2, borrowin, vl); +vint8mf2_t test_vsbc_vvm_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_vvm_i8mf2(vs2, vs1, v0, vl); } -vint8mf2_t test_vsbc_vxm_i8mf2(vint8mf2_t op1, int8_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_i8mf2(op1, op2, borrowin, vl); +vint8mf2_t test_vsbc_vxm_i8mf2(vint8mf2_t vs2, int8_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_vxm_i8mf2(vs2, rs1, v0, vl); } -vint8m1_t test_vsbc_vvm_i8m1(vint8m1_t op1, vint8m1_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_i8m1(op1, op2, borrowin, vl); +vint8m1_t test_vsbc_vvm_i8m1(vint8m1_t vs2, vint8m1_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_vvm_i8m1(vs2, vs1, v0, vl); } -vint8m1_t test_vsbc_vxm_i8m1(vint8m1_t op1, int8_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_i8m1(op1, op2, borrowin, vl); +vint8m1_t test_vsbc_vxm_i8m1(vint8m1_t vs2, int8_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_vxm_i8m1(vs2, rs1, v0, vl); } -vint8m2_t test_vsbc_vvm_i8m2(vint8m2_t op1, vint8m2_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_i8m2(op1, op2, borrowin, vl); +vint8m2_t test_vsbc_vvm_i8m2(vint8m2_t vs2, vint8m2_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc_vvm_i8m2(vs2, vs1, v0, vl); } -vint8m2_t test_vsbc_vxm_i8m2(vint8m2_t op1, int8_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_i8m2(op1, op2, borrowin, vl); +vint8m2_t test_vsbc_vxm_i8m2(vint8m2_t vs2, int8_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc_vxm_i8m2(vs2, rs1, v0, vl); } -vint8m4_t test_vsbc_vvm_i8m4(vint8m4_t op1, vint8m4_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_i8m4(op1, op2, borrowin, vl); +vint8m4_t test_vsbc_vvm_i8m4(vint8m4_t vs2, vint8m4_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vsbc_vvm_i8m4(vs2, vs1, v0, vl); } -vint8m4_t test_vsbc_vxm_i8m4(vint8m4_t op1, int8_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_i8m4(op1, op2, borrowin, vl); +vint8m4_t test_vsbc_vxm_i8m4(vint8m4_t vs2, int8_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vsbc_vxm_i8m4(vs2, rs1, v0, vl); } -vint8m8_t test_vsbc_vvm_i8m8(vint8m8_t op1, vint8m8_t op2, vbool1_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_i8m8(op1, op2, borrowin, vl); +vint8m8_t test_vsbc_vvm_i8m8(vint8m8_t vs2, vint8m8_t vs1, vbool1_t v0, size_t vl) { + return __riscv_vsbc_vvm_i8m8(vs2, vs1, v0, vl); } -vint8m8_t test_vsbc_vxm_i8m8(vint8m8_t op1, int8_t op2, vbool1_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_i8m8(op1, op2, borrowin, vl); +vint8m8_t test_vsbc_vxm_i8m8(vint8m8_t vs2, int8_t rs1, vbool1_t v0, size_t vl) { + return __riscv_vsbc_vxm_i8m8(vs2, rs1, v0, vl); } -vint16mf4_t test_vsbc_vvm_i16mf4(vint16mf4_t op1, vint16mf4_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_i16mf4(op1, op2, borrowin, vl); +vint16mf4_t test_vsbc_vvm_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_vvm_i16mf4(vs2, vs1, v0, vl); } -vint16mf4_t test_vsbc_vxm_i16mf4(vint16mf4_t op1, int16_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_i16mf4(op1, op2, borrowin, vl); +vint16mf4_t test_vsbc_vxm_i16mf4(vint16mf4_t vs2, int16_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_vxm_i16mf4(vs2, rs1, v0, vl); } -vint16mf2_t test_vsbc_vvm_i16mf2(vint16mf2_t op1, vint16mf2_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_i16mf2(op1, op2, borrowin, vl); +vint16mf2_t test_vsbc_vvm_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_vvm_i16mf2(vs2, vs1, v0, vl); } -vint16mf2_t test_vsbc_vxm_i16mf2(vint16mf2_t op1, int16_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_i16mf2(op1, op2, borrowin, vl); +vint16mf2_t test_vsbc_vxm_i16mf2(vint16mf2_t vs2, int16_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_vxm_i16mf2(vs2, rs1, v0, vl); } -vint16m1_t test_vsbc_vvm_i16m1(vint16m1_t op1, vint16m1_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_i16m1(op1, op2, borrowin, vl); +vint16m1_t test_vsbc_vvm_i16m1(vint16m1_t vs2, vint16m1_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_vvm_i16m1(vs2, vs1, v0, vl); } -vint16m1_t test_vsbc_vxm_i16m1(vint16m1_t op1, int16_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_i16m1(op1, op2, borrowin, vl); +vint16m1_t test_vsbc_vxm_i16m1(vint16m1_t vs2, int16_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_vxm_i16m1(vs2, rs1, v0, vl); } -vint16m2_t test_vsbc_vvm_i16m2(vint16m2_t op1, vint16m2_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_i16m2(op1, op2, borrowin, vl); +vint16m2_t test_vsbc_vvm_i16m2(vint16m2_t vs2, vint16m2_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_vvm_i16m2(vs2, vs1, v0, vl); } -vint16m2_t test_vsbc_vxm_i16m2(vint16m2_t op1, int16_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_i16m2(op1, op2, borrowin, vl); +vint16m2_t test_vsbc_vxm_i16m2(vint16m2_t vs2, int16_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_vxm_i16m2(vs2, rs1, v0, vl); } -vint16m4_t test_vsbc_vvm_i16m4(vint16m4_t op1, vint16m4_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_i16m4(op1, op2, borrowin, vl); +vint16m4_t test_vsbc_vvm_i16m4(vint16m4_t vs2, vint16m4_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc_vvm_i16m4(vs2, vs1, v0, vl); } -vint16m4_t test_vsbc_vxm_i16m4(vint16m4_t op1, int16_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_i16m4(op1, op2, borrowin, vl); +vint16m4_t test_vsbc_vxm_i16m4(vint16m4_t vs2, int16_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc_vxm_i16m4(vs2, rs1, v0, vl); } -vint16m8_t test_vsbc_vvm_i16m8(vint16m8_t op1, vint16m8_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_i16m8(op1, op2, borrowin, vl); +vint16m8_t test_vsbc_vvm_i16m8(vint16m8_t vs2, vint16m8_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vsbc_vvm_i16m8(vs2, vs1, v0, vl); } -vint16m8_t test_vsbc_vxm_i16m8(vint16m8_t op1, int16_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_i16m8(op1, op2, borrowin, vl); +vint16m8_t test_vsbc_vxm_i16m8(vint16m8_t vs2, int16_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vsbc_vxm_i16m8(vs2, rs1, v0, vl); } -vint32mf2_t test_vsbc_vvm_i32mf2(vint32mf2_t op1, vint32mf2_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_i32mf2(op1, op2, borrowin, vl); +vint32mf2_t test_vsbc_vvm_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_vvm_i32mf2(vs2, vs1, v0, vl); } -vint32mf2_t test_vsbc_vxm_i32mf2(vint32mf2_t op1, int32_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_i32mf2(op1, op2, borrowin, vl); +vint32mf2_t test_vsbc_vxm_i32mf2(vint32mf2_t vs2, int32_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_vxm_i32mf2(vs2, rs1, v0, vl); } -vint32m1_t test_vsbc_vvm_i32m1(vint32m1_t op1, vint32m1_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_i32m1(op1, op2, borrowin, vl); +vint32m1_t test_vsbc_vvm_i32m1(vint32m1_t vs2, vint32m1_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_vvm_i32m1(vs2, vs1, v0, vl); } -vint32m1_t test_vsbc_vxm_i32m1(vint32m1_t op1, int32_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_i32m1(op1, op2, borrowin, vl); +vint32m1_t test_vsbc_vxm_i32m1(vint32m1_t vs2, int32_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_vxm_i32m1(vs2, rs1, v0, vl); } -vint32m2_t test_vsbc_vvm_i32m2(vint32m2_t op1, vint32m2_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_i32m2(op1, op2, borrowin, vl); +vint32m2_t test_vsbc_vvm_i32m2(vint32m2_t vs2, vint32m2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_vvm_i32m2(vs2, vs1, v0, vl); } -vint32m2_t test_vsbc_vxm_i32m2(vint32m2_t op1, int32_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_i32m2(op1, op2, borrowin, vl); +vint32m2_t test_vsbc_vxm_i32m2(vint32m2_t vs2, int32_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_vxm_i32m2(vs2, rs1, v0, vl); } -vint32m4_t test_vsbc_vvm_i32m4(vint32m4_t op1, vint32m4_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_i32m4(op1, op2, borrowin, vl); +vint32m4_t test_vsbc_vvm_i32m4(vint32m4_t vs2, vint32m4_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_vvm_i32m4(vs2, vs1, v0, vl); } -vint32m4_t test_vsbc_vxm_i32m4(vint32m4_t op1, int32_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_i32m4(op1, op2, borrowin, vl); +vint32m4_t test_vsbc_vxm_i32m4(vint32m4_t vs2, int32_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_vxm_i32m4(vs2, rs1, v0, vl); } -vint32m8_t test_vsbc_vvm_i32m8(vint32m8_t op1, vint32m8_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_i32m8(op1, op2, borrowin, vl); +vint32m8_t test_vsbc_vvm_i32m8(vint32m8_t vs2, vint32m8_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc_vvm_i32m8(vs2, vs1, v0, vl); } -vint32m8_t test_vsbc_vxm_i32m8(vint32m8_t op1, int32_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_i32m8(op1, op2, borrowin, vl); +vint32m8_t test_vsbc_vxm_i32m8(vint32m8_t vs2, int32_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc_vxm_i32m8(vs2, rs1, v0, vl); } -vint64m1_t test_vsbc_vvm_i64m1(vint64m1_t op1, vint64m1_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_i64m1(op1, op2, borrowin, vl); +vint64m1_t test_vsbc_vvm_i64m1(vint64m1_t vs2, vint64m1_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_vvm_i64m1(vs2, vs1, v0, vl); } -vint64m1_t test_vsbc_vxm_i64m1(vint64m1_t op1, int64_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_i64m1(op1, op2, borrowin, vl); +vint64m1_t test_vsbc_vxm_i64m1(vint64m1_t vs2, int64_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_vxm_i64m1(vs2, rs1, v0, vl); } -vint64m2_t test_vsbc_vvm_i64m2(vint64m2_t op1, vint64m2_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_i64m2(op1, op2, borrowin, vl); +vint64m2_t test_vsbc_vvm_i64m2(vint64m2_t vs2, vint64m2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_vvm_i64m2(vs2, vs1, v0, vl); } -vint64m2_t test_vsbc_vxm_i64m2(vint64m2_t op1, int64_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_i64m2(op1, op2, borrowin, vl); +vint64m2_t test_vsbc_vxm_i64m2(vint64m2_t vs2, int64_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_vxm_i64m2(vs2, rs1, v0, vl); } -vint64m4_t test_vsbc_vvm_i64m4(vint64m4_t op1, vint64m4_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_i64m4(op1, op2, borrowin, vl); +vint64m4_t test_vsbc_vvm_i64m4(vint64m4_t vs2, vint64m4_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_vvm_i64m4(vs2, vs1, v0, vl); } -vint64m4_t test_vsbc_vxm_i64m4(vint64m4_t op1, int64_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_i64m4(op1, op2, borrowin, vl); +vint64m4_t test_vsbc_vxm_i64m4(vint64m4_t vs2, int64_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_vxm_i64m4(vs2, rs1, v0, vl); } -vint64m8_t test_vsbc_vvm_i64m8(vint64m8_t op1, vint64m8_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_i64m8(op1, op2, borrowin, vl); +vint64m8_t test_vsbc_vvm_i64m8(vint64m8_t vs2, vint64m8_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_vvm_i64m8(vs2, vs1, v0, vl); } -vint64m8_t test_vsbc_vxm_i64m8(vint64m8_t op1, int64_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_i64m8(op1, op2, borrowin, vl); +vint64m8_t test_vsbc_vxm_i64m8(vint64m8_t vs2, int64_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_vxm_i64m8(vs2, rs1, v0, vl); } -vuint8mf8_t test_vsbc_vvm_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_u8mf8(op1, op2, borrowin, vl); +vuint8mf8_t test_vsbc_vvm_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_vvm_u8mf8(vs2, vs1, v0, vl); } -vuint8mf8_t test_vsbc_vxm_u8mf8(vuint8mf8_t op1, uint8_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_u8mf8(op1, op2, borrowin, vl); +vuint8mf8_t test_vsbc_vxm_u8mf8(vuint8mf8_t vs2, uint8_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_vxm_u8mf8(vs2, rs1, v0, vl); } -vuint8mf4_t test_vsbc_vvm_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_u8mf4(op1, op2, borrowin, vl); +vuint8mf4_t test_vsbc_vvm_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_vvm_u8mf4(vs2, vs1, v0, vl); } -vuint8mf4_t test_vsbc_vxm_u8mf4(vuint8mf4_t op1, uint8_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_u8mf4(op1, op2, borrowin, vl); +vuint8mf4_t test_vsbc_vxm_u8mf4(vuint8mf4_t vs2, uint8_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_vxm_u8mf4(vs2, rs1, v0, vl); } -vuint8mf2_t test_vsbc_vvm_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_u8mf2(op1, op2, borrowin, vl); +vuint8mf2_t test_vsbc_vvm_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_vvm_u8mf2(vs2, vs1, v0, vl); } -vuint8mf2_t test_vsbc_vxm_u8mf2(vuint8mf2_t op1, uint8_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_u8mf2(op1, op2, borrowin, vl); +vuint8mf2_t test_vsbc_vxm_u8mf2(vuint8mf2_t vs2, uint8_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_vxm_u8mf2(vs2, rs1, v0, vl); } -vuint8m1_t test_vsbc_vvm_u8m1(vuint8m1_t op1, vuint8m1_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_u8m1(op1, op2, borrowin, vl); +vuint8m1_t test_vsbc_vvm_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_vvm_u8m1(vs2, vs1, v0, vl); } -vuint8m1_t test_vsbc_vxm_u8m1(vuint8m1_t op1, uint8_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_u8m1(op1, op2, borrowin, vl); +vuint8m1_t test_vsbc_vxm_u8m1(vuint8m1_t vs2, uint8_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_vxm_u8m1(vs2, rs1, v0, vl); } -vuint8m2_t test_vsbc_vvm_u8m2(vuint8m2_t op1, vuint8m2_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_u8m2(op1, op2, borrowin, vl); +vuint8m2_t test_vsbc_vvm_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc_vvm_u8m2(vs2, vs1, v0, vl); } -vuint8m2_t test_vsbc_vxm_u8m2(vuint8m2_t op1, uint8_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_u8m2(op1, op2, borrowin, vl); +vuint8m2_t test_vsbc_vxm_u8m2(vuint8m2_t vs2, uint8_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc_vxm_u8m2(vs2, rs1, v0, vl); } -vuint8m4_t test_vsbc_vvm_u8m4(vuint8m4_t op1, vuint8m4_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_u8m4(op1, op2, borrowin, vl); +vuint8m4_t test_vsbc_vvm_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vsbc_vvm_u8m4(vs2, vs1, v0, vl); } -vuint8m4_t test_vsbc_vxm_u8m4(vuint8m4_t op1, uint8_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_u8m4(op1, op2, borrowin, vl); +vuint8m4_t test_vsbc_vxm_u8m4(vuint8m4_t vs2, uint8_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vsbc_vxm_u8m4(vs2, rs1, v0, vl); } -vuint8m8_t test_vsbc_vvm_u8m8(vuint8m8_t op1, vuint8m8_t op2, vbool1_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_u8m8(op1, op2, borrowin, vl); +vuint8m8_t test_vsbc_vvm_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, vbool1_t v0, size_t vl) { + return __riscv_vsbc_vvm_u8m8(vs2, vs1, v0, vl); } -vuint8m8_t test_vsbc_vxm_u8m8(vuint8m8_t op1, uint8_t op2, vbool1_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_u8m8(op1, op2, borrowin, vl); +vuint8m8_t test_vsbc_vxm_u8m8(vuint8m8_t vs2, uint8_t rs1, vbool1_t v0, size_t vl) { + return __riscv_vsbc_vxm_u8m8(vs2, rs1, v0, vl); } -vuint16mf4_t test_vsbc_vvm_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_u16mf4(op1, op2, borrowin, vl); +vuint16mf4_t test_vsbc_vvm_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_vvm_u16mf4(vs2, vs1, v0, vl); } -vuint16mf4_t test_vsbc_vxm_u16mf4(vuint16mf4_t op1, uint16_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_u16mf4(op1, op2, borrowin, vl); +vuint16mf4_t test_vsbc_vxm_u16mf4(vuint16mf4_t vs2, uint16_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_vxm_u16mf4(vs2, rs1, v0, vl); } -vuint16mf2_t test_vsbc_vvm_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_u16mf2(op1, op2, borrowin, vl); +vuint16mf2_t test_vsbc_vvm_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_vvm_u16mf2(vs2, vs1, v0, vl); } -vuint16mf2_t test_vsbc_vxm_u16mf2(vuint16mf2_t op1, uint16_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_u16mf2(op1, op2, borrowin, vl); +vuint16mf2_t test_vsbc_vxm_u16mf2(vuint16mf2_t vs2, uint16_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_vxm_u16mf2(vs2, rs1, v0, vl); } -vuint16m1_t test_vsbc_vvm_u16m1(vuint16m1_t op1, vuint16m1_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_u16m1(op1, op2, borrowin, vl); +vuint16m1_t test_vsbc_vvm_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_vvm_u16m1(vs2, vs1, v0, vl); } -vuint16m1_t test_vsbc_vxm_u16m1(vuint16m1_t op1, uint16_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_u16m1(op1, op2, borrowin, vl); +vuint16m1_t test_vsbc_vxm_u16m1(vuint16m1_t vs2, uint16_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_vxm_u16m1(vs2, rs1, v0, vl); } -vuint16m2_t test_vsbc_vvm_u16m2(vuint16m2_t op1, vuint16m2_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_u16m2(op1, op2, borrowin, vl); +vuint16m2_t test_vsbc_vvm_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_vvm_u16m2(vs2, vs1, v0, vl); } -vuint16m2_t test_vsbc_vxm_u16m2(vuint16m2_t op1, uint16_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_u16m2(op1, op2, borrowin, vl); +vuint16m2_t test_vsbc_vxm_u16m2(vuint16m2_t vs2, uint16_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_vxm_u16m2(vs2, rs1, v0, vl); } -vuint16m4_t test_vsbc_vvm_u16m4(vuint16m4_t op1, vuint16m4_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_u16m4(op1, op2, borrowin, vl); +vuint16m4_t test_vsbc_vvm_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc_vvm_u16m4(vs2, vs1, v0, vl); } -vuint16m4_t test_vsbc_vxm_u16m4(vuint16m4_t op1, uint16_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_u16m4(op1, op2, borrowin, vl); +vuint16m4_t test_vsbc_vxm_u16m4(vuint16m4_t vs2, uint16_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc_vxm_u16m4(vs2, rs1, v0, vl); } -vuint16m8_t test_vsbc_vvm_u16m8(vuint16m8_t op1, vuint16m8_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_u16m8(op1, op2, borrowin, vl); +vuint16m8_t test_vsbc_vvm_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vsbc_vvm_u16m8(vs2, vs1, v0, vl); } -vuint16m8_t test_vsbc_vxm_u16m8(vuint16m8_t op1, uint16_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_u16m8(op1, op2, borrowin, vl); +vuint16m8_t test_vsbc_vxm_u16m8(vuint16m8_t vs2, uint16_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vsbc_vxm_u16m8(vs2, rs1, v0, vl); } -vuint32mf2_t test_vsbc_vvm_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_u32mf2(op1, op2, borrowin, vl); +vuint32mf2_t test_vsbc_vvm_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_vvm_u32mf2(vs2, vs1, v0, vl); } -vuint32mf2_t test_vsbc_vxm_u32mf2(vuint32mf2_t op1, uint32_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_u32mf2(op1, op2, borrowin, vl); +vuint32mf2_t test_vsbc_vxm_u32mf2(vuint32mf2_t vs2, uint32_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_vxm_u32mf2(vs2, rs1, v0, vl); } -vuint32m1_t test_vsbc_vvm_u32m1(vuint32m1_t op1, vuint32m1_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_u32m1(op1, op2, borrowin, vl); +vuint32m1_t test_vsbc_vvm_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_vvm_u32m1(vs2, vs1, v0, vl); } -vuint32m1_t test_vsbc_vxm_u32m1(vuint32m1_t op1, uint32_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_u32m1(op1, op2, borrowin, vl); +vuint32m1_t test_vsbc_vxm_u32m1(vuint32m1_t vs2, uint32_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_vxm_u32m1(vs2, rs1, v0, vl); } -vuint32m2_t test_vsbc_vvm_u32m2(vuint32m2_t op1, vuint32m2_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_u32m2(op1, op2, borrowin, vl); +vuint32m2_t test_vsbc_vvm_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_vvm_u32m2(vs2, vs1, v0, vl); } -vuint32m2_t test_vsbc_vxm_u32m2(vuint32m2_t op1, uint32_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_u32m2(op1, op2, borrowin, vl); +vuint32m2_t test_vsbc_vxm_u32m2(vuint32m2_t vs2, uint32_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_vxm_u32m2(vs2, rs1, v0, vl); } -vuint32m4_t test_vsbc_vvm_u32m4(vuint32m4_t op1, vuint32m4_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_u32m4(op1, op2, borrowin, vl); +vuint32m4_t test_vsbc_vvm_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_vvm_u32m4(vs2, vs1, v0, vl); } -vuint32m4_t test_vsbc_vxm_u32m4(vuint32m4_t op1, uint32_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_u32m4(op1, op2, borrowin, vl); +vuint32m4_t test_vsbc_vxm_u32m4(vuint32m4_t vs2, uint32_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_vxm_u32m4(vs2, rs1, v0, vl); } -vuint32m8_t test_vsbc_vvm_u32m8(vuint32m8_t op1, vuint32m8_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_u32m8(op1, op2, borrowin, vl); +vuint32m8_t test_vsbc_vvm_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc_vvm_u32m8(vs2, vs1, v0, vl); } -vuint32m8_t test_vsbc_vxm_u32m8(vuint32m8_t op1, uint32_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_u32m8(op1, op2, borrowin, vl); +vuint32m8_t test_vsbc_vxm_u32m8(vuint32m8_t vs2, uint32_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc_vxm_u32m8(vs2, rs1, v0, vl); } -vuint64m1_t test_vsbc_vvm_u64m1(vuint64m1_t op1, vuint64m1_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_u64m1(op1, op2, borrowin, vl); +vuint64m1_t test_vsbc_vvm_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_vvm_u64m1(vs2, vs1, v0, vl); } -vuint64m1_t test_vsbc_vxm_u64m1(vuint64m1_t op1, uint64_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_u64m1(op1, op2, borrowin, vl); +vuint64m1_t test_vsbc_vxm_u64m1(vuint64m1_t vs2, uint64_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_vxm_u64m1(vs2, rs1, v0, vl); } -vuint64m2_t test_vsbc_vvm_u64m2(vuint64m2_t op1, vuint64m2_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_u64m2(op1, op2, borrowin, vl); +vuint64m2_t test_vsbc_vvm_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_vvm_u64m2(vs2, vs1, v0, vl); } -vuint64m2_t test_vsbc_vxm_u64m2(vuint64m2_t op1, uint64_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_u64m2(op1, op2, borrowin, vl); +vuint64m2_t test_vsbc_vxm_u64m2(vuint64m2_t vs2, uint64_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_vxm_u64m2(vs2, rs1, v0, vl); } -vuint64m4_t test_vsbc_vvm_u64m4(vuint64m4_t op1, vuint64m4_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_u64m4(op1, op2, borrowin, vl); +vuint64m4_t test_vsbc_vvm_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_vvm_u64m4(vs2, vs1, v0, vl); } -vuint64m4_t test_vsbc_vxm_u64m4(vuint64m4_t op1, uint64_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_u64m4(op1, op2, borrowin, vl); +vuint64m4_t test_vsbc_vxm_u64m4(vuint64m4_t vs2, uint64_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_vxm_u64m4(vs2, rs1, v0, vl); } -vuint64m8_t test_vsbc_vvm_u64m8(vuint64m8_t op1, vuint64m8_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_u64m8(op1, op2, borrowin, vl); +vuint64m8_t test_vsbc_vvm_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_vvm_u64m8(vs2, vs1, v0, vl); } -vuint64m8_t test_vsbc_vxm_u64m8(vuint64m8_t op1, uint64_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_u64m8(op1, op2, borrowin, vl); +vuint64m8_t test_vsbc_vxm_u64m8(vuint64m8_t vs2, uint64_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_vxm_u64m8(vs2, rs1, v0, vl); } /* { dg-final { scan-assembler-times {vsbc\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-api-tests/vse16.c b/auto-generated/gnu-api-tests/vse16.c index 5488f4375..6198ddffb 100644 --- a/auto-generated/gnu-api-tests/vse16.c +++ b/auto-generated/gnu-api-tests/vse16.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vse16_v_f16mf4(float16_t *base, vfloat16mf4_t value, size_t vl) { - return __riscv_vse16_v_f16mf4(base, value, vl); +void test_vse16_v_f16mf4(float16_t *rs1, vfloat16mf4_t vs3, size_t vl) { + return __riscv_vse16_v_f16mf4(rs1, vs3, vl); } -void test_vse16_v_f16mf2(float16_t *base, vfloat16mf2_t value, size_t vl) { - return __riscv_vse16_v_f16mf2(base, value, vl); +void test_vse16_v_f16mf2(float16_t *rs1, vfloat16mf2_t vs3, size_t vl) { + return __riscv_vse16_v_f16mf2(rs1, vs3, vl); } -void test_vse16_v_f16m1(float16_t *base, vfloat16m1_t value, size_t vl) { - return __riscv_vse16_v_f16m1(base, value, vl); +void test_vse16_v_f16m1(float16_t *rs1, vfloat16m1_t vs3, size_t vl) { + return __riscv_vse16_v_f16m1(rs1, vs3, vl); } -void test_vse16_v_f16m2(float16_t *base, vfloat16m2_t value, size_t vl) { - return __riscv_vse16_v_f16m2(base, value, vl); +void test_vse16_v_f16m2(float16_t *rs1, vfloat16m2_t vs3, size_t vl) { + return __riscv_vse16_v_f16m2(rs1, vs3, vl); } -void test_vse16_v_f16m4(float16_t *base, vfloat16m4_t value, size_t vl) { - return __riscv_vse16_v_f16m4(base, value, vl); +void test_vse16_v_f16m4(float16_t *rs1, vfloat16m4_t vs3, size_t vl) { + return __riscv_vse16_v_f16m4(rs1, vs3, vl); } -void test_vse16_v_f16m8(float16_t *base, vfloat16m8_t value, size_t vl) { - return __riscv_vse16_v_f16m8(base, value, vl); +void test_vse16_v_f16m8(float16_t *rs1, vfloat16m8_t vs3, size_t vl) { + return __riscv_vse16_v_f16m8(rs1, vs3, vl); } -void test_vse16_v_i16mf4(int16_t *base, vint16mf4_t value, size_t vl) { - return __riscv_vse16_v_i16mf4(base, value, vl); +void test_vse16_v_i16mf4(int16_t *rs1, vint16mf4_t vs3, size_t vl) { + return __riscv_vse16_v_i16mf4(rs1, vs3, vl); } -void test_vse16_v_i16mf2(int16_t *base, vint16mf2_t value, size_t vl) { - return __riscv_vse16_v_i16mf2(base, value, vl); +void test_vse16_v_i16mf2(int16_t *rs1, vint16mf2_t vs3, size_t vl) { + return __riscv_vse16_v_i16mf2(rs1, vs3, vl); } -void test_vse16_v_i16m1(int16_t *base, vint16m1_t value, size_t vl) { - return __riscv_vse16_v_i16m1(base, value, vl); +void test_vse16_v_i16m1(int16_t *rs1, vint16m1_t vs3, size_t vl) { + return __riscv_vse16_v_i16m1(rs1, vs3, vl); } -void test_vse16_v_i16m2(int16_t *base, vint16m2_t value, size_t vl) { - return __riscv_vse16_v_i16m2(base, value, vl); +void test_vse16_v_i16m2(int16_t *rs1, vint16m2_t vs3, size_t vl) { + return __riscv_vse16_v_i16m2(rs1, vs3, vl); } -void test_vse16_v_i16m4(int16_t *base, vint16m4_t value, size_t vl) { - return __riscv_vse16_v_i16m4(base, value, vl); +void test_vse16_v_i16m4(int16_t *rs1, vint16m4_t vs3, size_t vl) { + return __riscv_vse16_v_i16m4(rs1, vs3, vl); } -void test_vse16_v_i16m8(int16_t *base, vint16m8_t value, size_t vl) { - return __riscv_vse16_v_i16m8(base, value, vl); +void test_vse16_v_i16m8(int16_t *rs1, vint16m8_t vs3, size_t vl) { + return __riscv_vse16_v_i16m8(rs1, vs3, vl); } -void test_vse16_v_u16mf4(uint16_t *base, vuint16mf4_t value, size_t vl) { - return __riscv_vse16_v_u16mf4(base, value, vl); +void test_vse16_v_u16mf4(uint16_t *rs1, vuint16mf4_t vs3, size_t vl) { + return __riscv_vse16_v_u16mf4(rs1, vs3, vl); } -void test_vse16_v_u16mf2(uint16_t *base, vuint16mf2_t value, size_t vl) { - return __riscv_vse16_v_u16mf2(base, value, vl); +void test_vse16_v_u16mf2(uint16_t *rs1, vuint16mf2_t vs3, size_t vl) { + return __riscv_vse16_v_u16mf2(rs1, vs3, vl); } -void test_vse16_v_u16m1(uint16_t *base, vuint16m1_t value, size_t vl) { - return __riscv_vse16_v_u16m1(base, value, vl); +void test_vse16_v_u16m1(uint16_t *rs1, vuint16m1_t vs3, size_t vl) { + return __riscv_vse16_v_u16m1(rs1, vs3, vl); } -void test_vse16_v_u16m2(uint16_t *base, vuint16m2_t value, size_t vl) { - return __riscv_vse16_v_u16m2(base, value, vl); +void test_vse16_v_u16m2(uint16_t *rs1, vuint16m2_t vs3, size_t vl) { + return __riscv_vse16_v_u16m2(rs1, vs3, vl); } -void test_vse16_v_u16m4(uint16_t *base, vuint16m4_t value, size_t vl) { - return __riscv_vse16_v_u16m4(base, value, vl); +void test_vse16_v_u16m4(uint16_t *rs1, vuint16m4_t vs3, size_t vl) { + return __riscv_vse16_v_u16m4(rs1, vs3, vl); } -void test_vse16_v_u16m8(uint16_t *base, vuint16m8_t value, size_t vl) { - return __riscv_vse16_v_u16m8(base, value, vl); +void test_vse16_v_u16m8(uint16_t *rs1, vuint16m8_t vs3, size_t vl) { + return __riscv_vse16_v_u16m8(rs1, vs3, vl); } -void test_vse16_v_f16mf4_m(vbool64_t mask, float16_t *base, vfloat16mf4_t value, size_t vl) { - return __riscv_vse16_v_f16mf4_m(mask, base, value, vl); +void test_vse16_v_f16mf4_m(vbool64_t vm, float16_t *rs1, vfloat16mf4_t vs3, size_t vl) { + return __riscv_vse16_v_f16mf4_m(vm, rs1, vs3, vl); } -void test_vse16_v_f16mf2_m(vbool32_t mask, float16_t *base, vfloat16mf2_t value, size_t vl) { - return __riscv_vse16_v_f16mf2_m(mask, base, value, vl); +void test_vse16_v_f16mf2_m(vbool32_t vm, float16_t *rs1, vfloat16mf2_t vs3, size_t vl) { + return __riscv_vse16_v_f16mf2_m(vm, rs1, vs3, vl); } -void test_vse16_v_f16m1_m(vbool16_t mask, float16_t *base, vfloat16m1_t value, size_t vl) { - return __riscv_vse16_v_f16m1_m(mask, base, value, vl); +void test_vse16_v_f16m1_m(vbool16_t vm, float16_t *rs1, vfloat16m1_t vs3, size_t vl) { + return __riscv_vse16_v_f16m1_m(vm, rs1, vs3, vl); } -void test_vse16_v_f16m2_m(vbool8_t mask, float16_t *base, vfloat16m2_t value, size_t vl) { - return __riscv_vse16_v_f16m2_m(mask, base, value, vl); +void test_vse16_v_f16m2_m(vbool8_t vm, float16_t *rs1, vfloat16m2_t vs3, size_t vl) { + return __riscv_vse16_v_f16m2_m(vm, rs1, vs3, vl); } -void test_vse16_v_f16m4_m(vbool4_t mask, float16_t *base, vfloat16m4_t value, size_t vl) { - return __riscv_vse16_v_f16m4_m(mask, base, value, vl); +void test_vse16_v_f16m4_m(vbool4_t vm, float16_t *rs1, vfloat16m4_t vs3, size_t vl) { + return __riscv_vse16_v_f16m4_m(vm, rs1, vs3, vl); } -void test_vse16_v_f16m8_m(vbool2_t mask, float16_t *base, vfloat16m8_t value, size_t vl) { - return __riscv_vse16_v_f16m8_m(mask, base, value, vl); +void test_vse16_v_f16m8_m(vbool2_t vm, float16_t *rs1, vfloat16m8_t vs3, size_t vl) { + return __riscv_vse16_v_f16m8_m(vm, rs1, vs3, vl); } -void test_vse16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t value, size_t vl) { - return __riscv_vse16_v_i16mf4_m(mask, base, value, vl); +void test_vse16_v_i16mf4_m(vbool64_t vm, int16_t *rs1, vint16mf4_t vs3, size_t vl) { + return __riscv_vse16_v_i16mf4_m(vm, rs1, vs3, vl); } -void test_vse16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t value, size_t vl) { - return __riscv_vse16_v_i16mf2_m(mask, base, value, vl); +void test_vse16_v_i16mf2_m(vbool32_t vm, int16_t *rs1, vint16mf2_t vs3, size_t vl) { + return __riscv_vse16_v_i16mf2_m(vm, rs1, vs3, vl); } -void test_vse16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t value, size_t vl) { - return __riscv_vse16_v_i16m1_m(mask, base, value, vl); +void test_vse16_v_i16m1_m(vbool16_t vm, int16_t *rs1, vint16m1_t vs3, size_t vl) { + return __riscv_vse16_v_i16m1_m(vm, rs1, vs3, vl); } -void test_vse16_v_i16m2_m(vbool8_t mask, int16_t *base, vint16m2_t value, size_t vl) { - return __riscv_vse16_v_i16m2_m(mask, base, value, vl); +void test_vse16_v_i16m2_m(vbool8_t vm, int16_t *rs1, vint16m2_t vs3, size_t vl) { + return __riscv_vse16_v_i16m2_m(vm, rs1, vs3, vl); } -void test_vse16_v_i16m4_m(vbool4_t mask, int16_t *base, vint16m4_t value, size_t vl) { - return __riscv_vse16_v_i16m4_m(mask, base, value, vl); +void test_vse16_v_i16m4_m(vbool4_t vm, int16_t *rs1, vint16m4_t vs3, size_t vl) { + return __riscv_vse16_v_i16m4_m(vm, rs1, vs3, vl); } -void test_vse16_v_i16m8_m(vbool2_t mask, int16_t *base, vint16m8_t value, size_t vl) { - return __riscv_vse16_v_i16m8_m(mask, base, value, vl); +void test_vse16_v_i16m8_m(vbool2_t vm, int16_t *rs1, vint16m8_t vs3, size_t vl) { + return __riscv_vse16_v_i16m8_m(vm, rs1, vs3, vl); } -void test_vse16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t value, size_t vl) { - return __riscv_vse16_v_u16mf4_m(mask, base, value, vl); +void test_vse16_v_u16mf4_m(vbool64_t vm, uint16_t *rs1, vuint16mf4_t vs3, size_t vl) { + return __riscv_vse16_v_u16mf4_m(vm, rs1, vs3, vl); } -void test_vse16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t value, size_t vl) { - return __riscv_vse16_v_u16mf2_m(mask, base, value, vl); +void test_vse16_v_u16mf2_m(vbool32_t vm, uint16_t *rs1, vuint16mf2_t vs3, size_t vl) { + return __riscv_vse16_v_u16mf2_m(vm, rs1, vs3, vl); } -void test_vse16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t value, size_t vl) { - return __riscv_vse16_v_u16m1_m(mask, base, value, vl); +void test_vse16_v_u16m1_m(vbool16_t vm, uint16_t *rs1, vuint16m1_t vs3, size_t vl) { + return __riscv_vse16_v_u16m1_m(vm, rs1, vs3, vl); } -void test_vse16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t value, size_t vl) { - return __riscv_vse16_v_u16m2_m(mask, base, value, vl); +void test_vse16_v_u16m2_m(vbool8_t vm, uint16_t *rs1, vuint16m2_t vs3, size_t vl) { + return __riscv_vse16_v_u16m2_m(vm, rs1, vs3, vl); } -void test_vse16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t value, size_t vl) { - return __riscv_vse16_v_u16m4_m(mask, base, value, vl); +void test_vse16_v_u16m4_m(vbool4_t vm, uint16_t *rs1, vuint16m4_t vs3, size_t vl) { + return __riscv_vse16_v_u16m4_m(vm, rs1, vs3, vl); } -void test_vse16_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint16m8_t value, size_t vl) { - return __riscv_vse16_v_u16m8_m(mask, base, value, vl); +void test_vse16_v_u16m8_m(vbool2_t vm, uint16_t *rs1, vuint16m8_t vs3, size_t vl) { + return __riscv_vse16_v_u16m8_m(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vse16\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/gnu-api-tests/vse32.c b/auto-generated/gnu-api-tests/vse32.c index 4457aaa8b..054dfde29 100644 --- a/auto-generated/gnu-api-tests/vse32.c +++ b/auto-generated/gnu-api-tests/vse32.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vse32_v_f32mf2(float32_t *base, vfloat32mf2_t value, size_t vl) { - return __riscv_vse32_v_f32mf2(base, value, vl); +void test_vse32_v_f32mf2(float32_t *rs1, vfloat32mf2_t vs3, size_t vl) { + return __riscv_vse32_v_f32mf2(rs1, vs3, vl); } -void test_vse32_v_f32m1(float32_t *base, vfloat32m1_t value, size_t vl) { - return __riscv_vse32_v_f32m1(base, value, vl); +void test_vse32_v_f32m1(float32_t *rs1, vfloat32m1_t vs3, size_t vl) { + return __riscv_vse32_v_f32m1(rs1, vs3, vl); } -void test_vse32_v_f32m2(float32_t *base, vfloat32m2_t value, size_t vl) { - return __riscv_vse32_v_f32m2(base, value, vl); +void test_vse32_v_f32m2(float32_t *rs1, vfloat32m2_t vs3, size_t vl) { + return __riscv_vse32_v_f32m2(rs1, vs3, vl); } -void test_vse32_v_f32m4(float32_t *base, vfloat32m4_t value, size_t vl) { - return __riscv_vse32_v_f32m4(base, value, vl); +void test_vse32_v_f32m4(float32_t *rs1, vfloat32m4_t vs3, size_t vl) { + return __riscv_vse32_v_f32m4(rs1, vs3, vl); } -void test_vse32_v_f32m8(float32_t *base, vfloat32m8_t value, size_t vl) { - return __riscv_vse32_v_f32m8(base, value, vl); +void test_vse32_v_f32m8(float32_t *rs1, vfloat32m8_t vs3, size_t vl) { + return __riscv_vse32_v_f32m8(rs1, vs3, vl); } -void test_vse32_v_i32mf2(int32_t *base, vint32mf2_t value, size_t vl) { - return __riscv_vse32_v_i32mf2(base, value, vl); +void test_vse32_v_i32mf2(int32_t *rs1, vint32mf2_t vs3, size_t vl) { + return __riscv_vse32_v_i32mf2(rs1, vs3, vl); } -void test_vse32_v_i32m1(int32_t *base, vint32m1_t value, size_t vl) { - return __riscv_vse32_v_i32m1(base, value, vl); +void test_vse32_v_i32m1(int32_t *rs1, vint32m1_t vs3, size_t vl) { + return __riscv_vse32_v_i32m1(rs1, vs3, vl); } -void test_vse32_v_i32m2(int32_t *base, vint32m2_t value, size_t vl) { - return __riscv_vse32_v_i32m2(base, value, vl); +void test_vse32_v_i32m2(int32_t *rs1, vint32m2_t vs3, size_t vl) { + return __riscv_vse32_v_i32m2(rs1, vs3, vl); } -void test_vse32_v_i32m4(int32_t *base, vint32m4_t value, size_t vl) { - return __riscv_vse32_v_i32m4(base, value, vl); +void test_vse32_v_i32m4(int32_t *rs1, vint32m4_t vs3, size_t vl) { + return __riscv_vse32_v_i32m4(rs1, vs3, vl); } -void test_vse32_v_i32m8(int32_t *base, vint32m8_t value, size_t vl) { - return __riscv_vse32_v_i32m8(base, value, vl); +void test_vse32_v_i32m8(int32_t *rs1, vint32m8_t vs3, size_t vl) { + return __riscv_vse32_v_i32m8(rs1, vs3, vl); } -void test_vse32_v_u32mf2(uint32_t *base, vuint32mf2_t value, size_t vl) { - return __riscv_vse32_v_u32mf2(base, value, vl); +void test_vse32_v_u32mf2(uint32_t *rs1, vuint32mf2_t vs3, size_t vl) { + return __riscv_vse32_v_u32mf2(rs1, vs3, vl); } -void test_vse32_v_u32m1(uint32_t *base, vuint32m1_t value, size_t vl) { - return __riscv_vse32_v_u32m1(base, value, vl); +void test_vse32_v_u32m1(uint32_t *rs1, vuint32m1_t vs3, size_t vl) { + return __riscv_vse32_v_u32m1(rs1, vs3, vl); } -void test_vse32_v_u32m2(uint32_t *base, vuint32m2_t value, size_t vl) { - return __riscv_vse32_v_u32m2(base, value, vl); +void test_vse32_v_u32m2(uint32_t *rs1, vuint32m2_t vs3, size_t vl) { + return __riscv_vse32_v_u32m2(rs1, vs3, vl); } -void test_vse32_v_u32m4(uint32_t *base, vuint32m4_t value, size_t vl) { - return __riscv_vse32_v_u32m4(base, value, vl); +void test_vse32_v_u32m4(uint32_t *rs1, vuint32m4_t vs3, size_t vl) { + return __riscv_vse32_v_u32m4(rs1, vs3, vl); } -void test_vse32_v_u32m8(uint32_t *base, vuint32m8_t value, size_t vl) { - return __riscv_vse32_v_u32m8(base, value, vl); +void test_vse32_v_u32m8(uint32_t *rs1, vuint32m8_t vs3, size_t vl) { + return __riscv_vse32_v_u32m8(rs1, vs3, vl); } -void test_vse32_v_f32mf2_m(vbool64_t mask, float32_t *base, vfloat32mf2_t value, size_t vl) { - return __riscv_vse32_v_f32mf2_m(mask, base, value, vl); +void test_vse32_v_f32mf2_m(vbool64_t vm, float32_t *rs1, vfloat32mf2_t vs3, size_t vl) { + return __riscv_vse32_v_f32mf2_m(vm, rs1, vs3, vl); } -void test_vse32_v_f32m1_m(vbool32_t mask, float32_t *base, vfloat32m1_t value, size_t vl) { - return __riscv_vse32_v_f32m1_m(mask, base, value, vl); +void test_vse32_v_f32m1_m(vbool32_t vm, float32_t *rs1, vfloat32m1_t vs3, size_t vl) { + return __riscv_vse32_v_f32m1_m(vm, rs1, vs3, vl); } -void test_vse32_v_f32m2_m(vbool16_t mask, float32_t *base, vfloat32m2_t value, size_t vl) { - return __riscv_vse32_v_f32m2_m(mask, base, value, vl); +void test_vse32_v_f32m2_m(vbool16_t vm, float32_t *rs1, vfloat32m2_t vs3, size_t vl) { + return __riscv_vse32_v_f32m2_m(vm, rs1, vs3, vl); } -void test_vse32_v_f32m4_m(vbool8_t mask, float32_t *base, vfloat32m4_t value, size_t vl) { - return __riscv_vse32_v_f32m4_m(mask, base, value, vl); +void test_vse32_v_f32m4_m(vbool8_t vm, float32_t *rs1, vfloat32m4_t vs3, size_t vl) { + return __riscv_vse32_v_f32m4_m(vm, rs1, vs3, vl); } -void test_vse32_v_f32m8_m(vbool4_t mask, float32_t *base, vfloat32m8_t value, size_t vl) { - return __riscv_vse32_v_f32m8_m(mask, base, value, vl); +void test_vse32_v_f32m8_m(vbool4_t vm, float32_t *rs1, vfloat32m8_t vs3, size_t vl) { + return __riscv_vse32_v_f32m8_m(vm, rs1, vs3, vl); } -void test_vse32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t value, size_t vl) { - return __riscv_vse32_v_i32mf2_m(mask, base, value, vl); +void test_vse32_v_i32mf2_m(vbool64_t vm, int32_t *rs1, vint32mf2_t vs3, size_t vl) { + return __riscv_vse32_v_i32mf2_m(vm, rs1, vs3, vl); } -void test_vse32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t value, size_t vl) { - return __riscv_vse32_v_i32m1_m(mask, base, value, vl); +void test_vse32_v_i32m1_m(vbool32_t vm, int32_t *rs1, vint32m1_t vs3, size_t vl) { + return __riscv_vse32_v_i32m1_m(vm, rs1, vs3, vl); } -void test_vse32_v_i32m2_m(vbool16_t mask, int32_t *base, vint32m2_t value, size_t vl) { - return __riscv_vse32_v_i32m2_m(mask, base, value, vl); +void test_vse32_v_i32m2_m(vbool16_t vm, int32_t *rs1, vint32m2_t vs3, size_t vl) { + return __riscv_vse32_v_i32m2_m(vm, rs1, vs3, vl); } -void test_vse32_v_i32m4_m(vbool8_t mask, int32_t *base, vint32m4_t value, size_t vl) { - return __riscv_vse32_v_i32m4_m(mask, base, value, vl); +void test_vse32_v_i32m4_m(vbool8_t vm, int32_t *rs1, vint32m4_t vs3, size_t vl) { + return __riscv_vse32_v_i32m4_m(vm, rs1, vs3, vl); } -void test_vse32_v_i32m8_m(vbool4_t mask, int32_t *base, vint32m8_t value, size_t vl) { - return __riscv_vse32_v_i32m8_m(mask, base, value, vl); +void test_vse32_v_i32m8_m(vbool4_t vm, int32_t *rs1, vint32m8_t vs3, size_t vl) { + return __riscv_vse32_v_i32m8_m(vm, rs1, vs3, vl); } -void test_vse32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t value, size_t vl) { - return __riscv_vse32_v_u32mf2_m(mask, base, value, vl); +void test_vse32_v_u32mf2_m(vbool64_t vm, uint32_t *rs1, vuint32mf2_t vs3, size_t vl) { + return __riscv_vse32_v_u32mf2_m(vm, rs1, vs3, vl); } -void test_vse32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t value, size_t vl) { - return __riscv_vse32_v_u32m1_m(mask, base, value, vl); +void test_vse32_v_u32m1_m(vbool32_t vm, uint32_t *rs1, vuint32m1_t vs3, size_t vl) { + return __riscv_vse32_v_u32m1_m(vm, rs1, vs3, vl); } -void test_vse32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t value, size_t vl) { - return __riscv_vse32_v_u32m2_m(mask, base, value, vl); +void test_vse32_v_u32m2_m(vbool16_t vm, uint32_t *rs1, vuint32m2_t vs3, size_t vl) { + return __riscv_vse32_v_u32m2_m(vm, rs1, vs3, vl); } -void test_vse32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t value, size_t vl) { - return __riscv_vse32_v_u32m4_m(mask, base, value, vl); +void test_vse32_v_u32m4_m(vbool8_t vm, uint32_t *rs1, vuint32m4_t vs3, size_t vl) { + return __riscv_vse32_v_u32m4_m(vm, rs1, vs3, vl); } -void test_vse32_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint32m8_t value, size_t vl) { - return __riscv_vse32_v_u32m8_m(mask, base, value, vl); +void test_vse32_v_u32m8_m(vbool4_t vm, uint32_t *rs1, vuint32m8_t vs3, size_t vl) { + return __riscv_vse32_v_u32m8_m(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vse32\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/gnu-api-tests/vse64.c b/auto-generated/gnu-api-tests/vse64.c index f8e155b1c..d85ea0930 100644 --- a/auto-generated/gnu-api-tests/vse64.c +++ b/auto-generated/gnu-api-tests/vse64.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vse64_v_f64m1(float64_t *base, vfloat64m1_t value, size_t vl) { - return __riscv_vse64_v_f64m1(base, value, vl); +void test_vse64_v_f64m1(float64_t *rs1, vfloat64m1_t vs3, size_t vl) { + return __riscv_vse64_v_f64m1(rs1, vs3, vl); } -void test_vse64_v_f64m2(float64_t *base, vfloat64m2_t value, size_t vl) { - return __riscv_vse64_v_f64m2(base, value, vl); +void test_vse64_v_f64m2(float64_t *rs1, vfloat64m2_t vs3, size_t vl) { + return __riscv_vse64_v_f64m2(rs1, vs3, vl); } -void test_vse64_v_f64m4(float64_t *base, vfloat64m4_t value, size_t vl) { - return __riscv_vse64_v_f64m4(base, value, vl); +void test_vse64_v_f64m4(float64_t *rs1, vfloat64m4_t vs3, size_t vl) { + return __riscv_vse64_v_f64m4(rs1, vs3, vl); } -void test_vse64_v_f64m8(float64_t *base, vfloat64m8_t value, size_t vl) { - return __riscv_vse64_v_f64m8(base, value, vl); +void test_vse64_v_f64m8(float64_t *rs1, vfloat64m8_t vs3, size_t vl) { + return __riscv_vse64_v_f64m8(rs1, vs3, vl); } -void test_vse64_v_i64m1(int64_t *base, vint64m1_t value, size_t vl) { - return __riscv_vse64_v_i64m1(base, value, vl); +void test_vse64_v_i64m1(int64_t *rs1, vint64m1_t vs3, size_t vl) { + return __riscv_vse64_v_i64m1(rs1, vs3, vl); } -void test_vse64_v_i64m2(int64_t *base, vint64m2_t value, size_t vl) { - return __riscv_vse64_v_i64m2(base, value, vl); +void test_vse64_v_i64m2(int64_t *rs1, vint64m2_t vs3, size_t vl) { + return __riscv_vse64_v_i64m2(rs1, vs3, vl); } -void test_vse64_v_i64m4(int64_t *base, vint64m4_t value, size_t vl) { - return __riscv_vse64_v_i64m4(base, value, vl); +void test_vse64_v_i64m4(int64_t *rs1, vint64m4_t vs3, size_t vl) { + return __riscv_vse64_v_i64m4(rs1, vs3, vl); } -void test_vse64_v_i64m8(int64_t *base, vint64m8_t value, size_t vl) { - return __riscv_vse64_v_i64m8(base, value, vl); +void test_vse64_v_i64m8(int64_t *rs1, vint64m8_t vs3, size_t vl) { + return __riscv_vse64_v_i64m8(rs1, vs3, vl); } -void test_vse64_v_u64m1(uint64_t *base, vuint64m1_t value, size_t vl) { - return __riscv_vse64_v_u64m1(base, value, vl); +void test_vse64_v_u64m1(uint64_t *rs1, vuint64m1_t vs3, size_t vl) { + return __riscv_vse64_v_u64m1(rs1, vs3, vl); } -void test_vse64_v_u64m2(uint64_t *base, vuint64m2_t value, size_t vl) { - return __riscv_vse64_v_u64m2(base, value, vl); +void test_vse64_v_u64m2(uint64_t *rs1, vuint64m2_t vs3, size_t vl) { + return __riscv_vse64_v_u64m2(rs1, vs3, vl); } -void test_vse64_v_u64m4(uint64_t *base, vuint64m4_t value, size_t vl) { - return __riscv_vse64_v_u64m4(base, value, vl); +void test_vse64_v_u64m4(uint64_t *rs1, vuint64m4_t vs3, size_t vl) { + return __riscv_vse64_v_u64m4(rs1, vs3, vl); } -void test_vse64_v_u64m8(uint64_t *base, vuint64m8_t value, size_t vl) { - return __riscv_vse64_v_u64m8(base, value, vl); +void test_vse64_v_u64m8(uint64_t *rs1, vuint64m8_t vs3, size_t vl) { + return __riscv_vse64_v_u64m8(rs1, vs3, vl); } -void test_vse64_v_f64m1_m(vbool64_t mask, float64_t *base, vfloat64m1_t value, size_t vl) { - return __riscv_vse64_v_f64m1_m(mask, base, value, vl); +void test_vse64_v_f64m1_m(vbool64_t vm, float64_t *rs1, vfloat64m1_t vs3, size_t vl) { + return __riscv_vse64_v_f64m1_m(vm, rs1, vs3, vl); } -void test_vse64_v_f64m2_m(vbool32_t mask, float64_t *base, vfloat64m2_t value, size_t vl) { - return __riscv_vse64_v_f64m2_m(mask, base, value, vl); +void test_vse64_v_f64m2_m(vbool32_t vm, float64_t *rs1, vfloat64m2_t vs3, size_t vl) { + return __riscv_vse64_v_f64m2_m(vm, rs1, vs3, vl); } -void test_vse64_v_f64m4_m(vbool16_t mask, float64_t *base, vfloat64m4_t value, size_t vl) { - return __riscv_vse64_v_f64m4_m(mask, base, value, vl); +void test_vse64_v_f64m4_m(vbool16_t vm, float64_t *rs1, vfloat64m4_t vs3, size_t vl) { + return __riscv_vse64_v_f64m4_m(vm, rs1, vs3, vl); } -void test_vse64_v_f64m8_m(vbool8_t mask, float64_t *base, vfloat64m8_t value, size_t vl) { - return __riscv_vse64_v_f64m8_m(mask, base, value, vl); +void test_vse64_v_f64m8_m(vbool8_t vm, float64_t *rs1, vfloat64m8_t vs3, size_t vl) { + return __riscv_vse64_v_f64m8_m(vm, rs1, vs3, vl); } -void test_vse64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t value, size_t vl) { - return __riscv_vse64_v_i64m1_m(mask, base, value, vl); +void test_vse64_v_i64m1_m(vbool64_t vm, int64_t *rs1, vint64m1_t vs3, size_t vl) { + return __riscv_vse64_v_i64m1_m(vm, rs1, vs3, vl); } -void test_vse64_v_i64m2_m(vbool32_t mask, int64_t *base, vint64m2_t value, size_t vl) { - return __riscv_vse64_v_i64m2_m(mask, base, value, vl); +void test_vse64_v_i64m2_m(vbool32_t vm, int64_t *rs1, vint64m2_t vs3, size_t vl) { + return __riscv_vse64_v_i64m2_m(vm, rs1, vs3, vl); } -void test_vse64_v_i64m4_m(vbool16_t mask, int64_t *base, vint64m4_t value, size_t vl) { - return __riscv_vse64_v_i64m4_m(mask, base, value, vl); +void test_vse64_v_i64m4_m(vbool16_t vm, int64_t *rs1, vint64m4_t vs3, size_t vl) { + return __riscv_vse64_v_i64m4_m(vm, rs1, vs3, vl); } -void test_vse64_v_i64m8_m(vbool8_t mask, int64_t *base, vint64m8_t value, size_t vl) { - return __riscv_vse64_v_i64m8_m(mask, base, value, vl); +void test_vse64_v_i64m8_m(vbool8_t vm, int64_t *rs1, vint64m8_t vs3, size_t vl) { + return __riscv_vse64_v_i64m8_m(vm, rs1, vs3, vl); } -void test_vse64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t value, size_t vl) { - return __riscv_vse64_v_u64m1_m(mask, base, value, vl); +void test_vse64_v_u64m1_m(vbool64_t vm, uint64_t *rs1, vuint64m1_t vs3, size_t vl) { + return __riscv_vse64_v_u64m1_m(vm, rs1, vs3, vl); } -void test_vse64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t value, size_t vl) { - return __riscv_vse64_v_u64m2_m(mask, base, value, vl); +void test_vse64_v_u64m2_m(vbool32_t vm, uint64_t *rs1, vuint64m2_t vs3, size_t vl) { + return __riscv_vse64_v_u64m2_m(vm, rs1, vs3, vl); } -void test_vse64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t value, size_t vl) { - return __riscv_vse64_v_u64m4_m(mask, base, value, vl); +void test_vse64_v_u64m4_m(vbool16_t vm, uint64_t *rs1, vuint64m4_t vs3, size_t vl) { + return __riscv_vse64_v_u64m4_m(vm, rs1, vs3, vl); } -void test_vse64_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint64m8_t value, size_t vl) { - return __riscv_vse64_v_u64m8_m(mask, base, value, vl); +void test_vse64_v_u64m8_m(vbool8_t vm, uint64_t *rs1, vuint64m8_t vs3, size_t vl) { + return __riscv_vse64_v_u64m8_m(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vse64\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/gnu-api-tests/vse8.c b/auto-generated/gnu-api-tests/vse8.c index 15e4fd758..8bcb1d9c1 100644 --- a/auto-generated/gnu-api-tests/vse8.c +++ b/auto-generated/gnu-api-tests/vse8.c @@ -6,116 +6,116 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vse8_v_i8mf8(int8_t *base, vint8mf8_t value, size_t vl) { - return __riscv_vse8_v_i8mf8(base, value, vl); +void test_vse8_v_i8mf8(int8_t *rs1, vint8mf8_t vs3, size_t vl) { + return __riscv_vse8_v_i8mf8(rs1, vs3, vl); } -void test_vse8_v_i8mf4(int8_t *base, vint8mf4_t value, size_t vl) { - return __riscv_vse8_v_i8mf4(base, value, vl); +void test_vse8_v_i8mf4(int8_t *rs1, vint8mf4_t vs3, size_t vl) { + return __riscv_vse8_v_i8mf4(rs1, vs3, vl); } -void test_vse8_v_i8mf2(int8_t *base, vint8mf2_t value, size_t vl) { - return __riscv_vse8_v_i8mf2(base, value, vl); +void test_vse8_v_i8mf2(int8_t *rs1, vint8mf2_t vs3, size_t vl) { + return __riscv_vse8_v_i8mf2(rs1, vs3, vl); } -void test_vse8_v_i8m1(int8_t *base, vint8m1_t value, size_t vl) { - return __riscv_vse8_v_i8m1(base, value, vl); +void test_vse8_v_i8m1(int8_t *rs1, vint8m1_t vs3, size_t vl) { + return __riscv_vse8_v_i8m1(rs1, vs3, vl); } -void test_vse8_v_i8m2(int8_t *base, vint8m2_t value, size_t vl) { - return __riscv_vse8_v_i8m2(base, value, vl); +void test_vse8_v_i8m2(int8_t *rs1, vint8m2_t vs3, size_t vl) { + return __riscv_vse8_v_i8m2(rs1, vs3, vl); } -void test_vse8_v_i8m4(int8_t *base, vint8m4_t value, size_t vl) { - return __riscv_vse8_v_i8m4(base, value, vl); +void test_vse8_v_i8m4(int8_t *rs1, vint8m4_t vs3, size_t vl) { + return __riscv_vse8_v_i8m4(rs1, vs3, vl); } -void test_vse8_v_i8m8(int8_t *base, vint8m8_t value, size_t vl) { - return __riscv_vse8_v_i8m8(base, value, vl); +void test_vse8_v_i8m8(int8_t *rs1, vint8m8_t vs3, size_t vl) { + return __riscv_vse8_v_i8m8(rs1, vs3, vl); } -void test_vse8_v_u8mf8(uint8_t *base, vuint8mf8_t value, size_t vl) { - return __riscv_vse8_v_u8mf8(base, value, vl); +void test_vse8_v_u8mf8(uint8_t *rs1, vuint8mf8_t vs3, size_t vl) { + return __riscv_vse8_v_u8mf8(rs1, vs3, vl); } -void test_vse8_v_u8mf4(uint8_t *base, vuint8mf4_t value, size_t vl) { - return __riscv_vse8_v_u8mf4(base, value, vl); +void test_vse8_v_u8mf4(uint8_t *rs1, vuint8mf4_t vs3, size_t vl) { + return __riscv_vse8_v_u8mf4(rs1, vs3, vl); } -void test_vse8_v_u8mf2(uint8_t *base, vuint8mf2_t value, size_t vl) { - return __riscv_vse8_v_u8mf2(base, value, vl); +void test_vse8_v_u8mf2(uint8_t *rs1, vuint8mf2_t vs3, size_t vl) { + return __riscv_vse8_v_u8mf2(rs1, vs3, vl); } -void test_vse8_v_u8m1(uint8_t *base, vuint8m1_t value, size_t vl) { - return __riscv_vse8_v_u8m1(base, value, vl); +void test_vse8_v_u8m1(uint8_t *rs1, vuint8m1_t vs3, size_t vl) { + return __riscv_vse8_v_u8m1(rs1, vs3, vl); } -void test_vse8_v_u8m2(uint8_t *base, vuint8m2_t value, size_t vl) { - return __riscv_vse8_v_u8m2(base, value, vl); +void test_vse8_v_u8m2(uint8_t *rs1, vuint8m2_t vs3, size_t vl) { + return __riscv_vse8_v_u8m2(rs1, vs3, vl); } -void test_vse8_v_u8m4(uint8_t *base, vuint8m4_t value, size_t vl) { - return __riscv_vse8_v_u8m4(base, value, vl); +void test_vse8_v_u8m4(uint8_t *rs1, vuint8m4_t vs3, size_t vl) { + return __riscv_vse8_v_u8m4(rs1, vs3, vl); } -void test_vse8_v_u8m8(uint8_t *base, vuint8m8_t value, size_t vl) { - return __riscv_vse8_v_u8m8(base, value, vl); +void test_vse8_v_u8m8(uint8_t *rs1, vuint8m8_t vs3, size_t vl) { + return __riscv_vse8_v_u8m8(rs1, vs3, vl); } -void test_vse8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t value, size_t vl) { - return __riscv_vse8_v_i8mf8_m(mask, base, value, vl); +void test_vse8_v_i8mf8_m(vbool64_t vm, int8_t *rs1, vint8mf8_t vs3, size_t vl) { + return __riscv_vse8_v_i8mf8_m(vm, rs1, vs3, vl); } -void test_vse8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t value, size_t vl) { - return __riscv_vse8_v_i8mf4_m(mask, base, value, vl); +void test_vse8_v_i8mf4_m(vbool32_t vm, int8_t *rs1, vint8mf4_t vs3, size_t vl) { + return __riscv_vse8_v_i8mf4_m(vm, rs1, vs3, vl); } -void test_vse8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t value, size_t vl) { - return __riscv_vse8_v_i8mf2_m(mask, base, value, vl); +void test_vse8_v_i8mf2_m(vbool16_t vm, int8_t *rs1, vint8mf2_t vs3, size_t vl) { + return __riscv_vse8_v_i8mf2_m(vm, rs1, vs3, vl); } -void test_vse8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t value, size_t vl) { - return __riscv_vse8_v_i8m1_m(mask, base, value, vl); +void test_vse8_v_i8m1_m(vbool8_t vm, int8_t *rs1, vint8m1_t vs3, size_t vl) { + return __riscv_vse8_v_i8m1_m(vm, rs1, vs3, vl); } -void test_vse8_v_i8m2_m(vbool4_t mask, int8_t *base, vint8m2_t value, size_t vl) { - return __riscv_vse8_v_i8m2_m(mask, base, value, vl); +void test_vse8_v_i8m2_m(vbool4_t vm, int8_t *rs1, vint8m2_t vs3, size_t vl) { + return __riscv_vse8_v_i8m2_m(vm, rs1, vs3, vl); } -void test_vse8_v_i8m4_m(vbool2_t mask, int8_t *base, vint8m4_t value, size_t vl) { - return __riscv_vse8_v_i8m4_m(mask, base, value, vl); +void test_vse8_v_i8m4_m(vbool2_t vm, int8_t *rs1, vint8m4_t vs3, size_t vl) { + return __riscv_vse8_v_i8m4_m(vm, rs1, vs3, vl); } -void test_vse8_v_i8m8_m(vbool1_t mask, int8_t *base, vint8m8_t value, size_t vl) { - return __riscv_vse8_v_i8m8_m(mask, base, value, vl); +void test_vse8_v_i8m8_m(vbool1_t vm, int8_t *rs1, vint8m8_t vs3, size_t vl) { + return __riscv_vse8_v_i8m8_m(vm, rs1, vs3, vl); } -void test_vse8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t value, size_t vl) { - return __riscv_vse8_v_u8mf8_m(mask, base, value, vl); +void test_vse8_v_u8mf8_m(vbool64_t vm, uint8_t *rs1, vuint8mf8_t vs3, size_t vl) { + return __riscv_vse8_v_u8mf8_m(vm, rs1, vs3, vl); } -void test_vse8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t value, size_t vl) { - return __riscv_vse8_v_u8mf4_m(mask, base, value, vl); +void test_vse8_v_u8mf4_m(vbool32_t vm, uint8_t *rs1, vuint8mf4_t vs3, size_t vl) { + return __riscv_vse8_v_u8mf4_m(vm, rs1, vs3, vl); } -void test_vse8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t value, size_t vl) { - return __riscv_vse8_v_u8mf2_m(mask, base, value, vl); +void test_vse8_v_u8mf2_m(vbool16_t vm, uint8_t *rs1, vuint8mf2_t vs3, size_t vl) { + return __riscv_vse8_v_u8mf2_m(vm, rs1, vs3, vl); } -void test_vse8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t value, size_t vl) { - return __riscv_vse8_v_u8m1_m(mask, base, value, vl); +void test_vse8_v_u8m1_m(vbool8_t vm, uint8_t *rs1, vuint8m1_t vs3, size_t vl) { + return __riscv_vse8_v_u8m1_m(vm, rs1, vs3, vl); } -void test_vse8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t value, size_t vl) { - return __riscv_vse8_v_u8m2_m(mask, base, value, vl); +void test_vse8_v_u8m2_m(vbool4_t vm, uint8_t *rs1, vuint8m2_t vs3, size_t vl) { + return __riscv_vse8_v_u8m2_m(vm, rs1, vs3, vl); } -void test_vse8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t value, size_t vl) { - return __riscv_vse8_v_u8m4_m(mask, base, value, vl); +void test_vse8_v_u8m4_m(vbool2_t vm, uint8_t *rs1, vuint8m4_t vs3, size_t vl) { + return __riscv_vse8_v_u8m4_m(vm, rs1, vs3, vl); } -void test_vse8_v_u8m8_m(vbool1_t mask, uint8_t *base, vuint8m8_t value, size_t vl) { - return __riscv_vse8_v_u8m8_m(mask, base, value, vl); +void test_vse8_v_u8m8_m(vbool1_t vm, uint8_t *rs1, vuint8m8_t vs3, size_t vl) { + return __riscv_vse8_v_u8m8_m(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vse8\.[ivxfswum.]+\s+} 28 } } */ diff --git a/auto-generated/gnu-api-tests/vset.c b/auto-generated/gnu-api-tests/vset.c index 6afcfb9dd..6dbbb9f1e 100644 --- a/auto-generated/gnu-api-tests/vset.c +++ b/auto-generated/gnu-api-tests/vset.c @@ -6,1172 +6,1172 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16m2_t test_vset_v_f16m1_f16m2(vfloat16m2_t dest, size_t index, vfloat16m1_t val) { - return __riscv_vset_v_f16m1_f16m2(dest, 0, val); +vfloat16m2_t test_vset_v_f16m1_f16m2(vfloat16m2_t dest, size_t index, vfloat16m1_t value) { + return __riscv_vset_v_f16m1_f16m2(dest, 0, value); } -vfloat16m4_t test_vset_v_f16m1_f16m4(vfloat16m4_t dest, size_t index, vfloat16m1_t val) { - return __riscv_vset_v_f16m1_f16m4(dest, 0, val); +vfloat16m4_t test_vset_v_f16m1_f16m4(vfloat16m4_t dest, size_t index, vfloat16m1_t value) { + return __riscv_vset_v_f16m1_f16m4(dest, 0, value); } -vfloat16m4_t test_vset_v_f16m2_f16m4(vfloat16m4_t dest, size_t index, vfloat16m2_t val) { - return __riscv_vset_v_f16m2_f16m4(dest, 0, val); +vfloat16m4_t test_vset_v_f16m2_f16m4(vfloat16m4_t dest, size_t index, vfloat16m2_t value) { + return __riscv_vset_v_f16m2_f16m4(dest, 0, value); } -vfloat16m8_t test_vset_v_f16m1_f16m8(vfloat16m8_t dest, size_t index, vfloat16m1_t val) { - return __riscv_vset_v_f16m1_f16m8(dest, 0, val); +vfloat16m8_t test_vset_v_f16m1_f16m8(vfloat16m8_t dest, size_t index, vfloat16m1_t value) { + return __riscv_vset_v_f16m1_f16m8(dest, 0, value); } -vfloat16m8_t test_vset_v_f16m2_f16m8(vfloat16m8_t dest, size_t index, vfloat16m2_t val) { - return __riscv_vset_v_f16m2_f16m8(dest, 0, val); +vfloat16m8_t test_vset_v_f16m2_f16m8(vfloat16m8_t dest, size_t index, vfloat16m2_t value) { + return __riscv_vset_v_f16m2_f16m8(dest, 0, value); } -vfloat16m8_t test_vset_v_f16m4_f16m8(vfloat16m8_t dest, size_t index, vfloat16m4_t val) { - return __riscv_vset_v_f16m4_f16m8(dest, 0, val); +vfloat16m8_t test_vset_v_f16m4_f16m8(vfloat16m8_t dest, size_t index, vfloat16m4_t value) { + return __riscv_vset_v_f16m4_f16m8(dest, 0, value); } -vfloat32m2_t test_vset_v_f32m1_f32m2(vfloat32m2_t dest, size_t index, vfloat32m1_t val) { - return __riscv_vset_v_f32m1_f32m2(dest, 0, val); +vfloat32m2_t test_vset_v_f32m1_f32m2(vfloat32m2_t dest, size_t index, vfloat32m1_t value) { + return __riscv_vset_v_f32m1_f32m2(dest, 0, value); } -vfloat32m4_t test_vset_v_f32m1_f32m4(vfloat32m4_t dest, size_t index, vfloat32m1_t val) { - return __riscv_vset_v_f32m1_f32m4(dest, 0, val); +vfloat32m4_t test_vset_v_f32m1_f32m4(vfloat32m4_t dest, size_t index, vfloat32m1_t value) { + return __riscv_vset_v_f32m1_f32m4(dest, 0, value); } -vfloat32m4_t test_vset_v_f32m2_f32m4(vfloat32m4_t dest, size_t index, vfloat32m2_t val) { - return __riscv_vset_v_f32m2_f32m4(dest, 0, val); +vfloat32m4_t test_vset_v_f32m2_f32m4(vfloat32m4_t dest, size_t index, vfloat32m2_t value) { + return __riscv_vset_v_f32m2_f32m4(dest, 0, value); } -vfloat32m8_t test_vset_v_f32m1_f32m8(vfloat32m8_t dest, size_t index, vfloat32m1_t val) { - return __riscv_vset_v_f32m1_f32m8(dest, 0, val); +vfloat32m8_t test_vset_v_f32m1_f32m8(vfloat32m8_t dest, size_t index, vfloat32m1_t value) { + return __riscv_vset_v_f32m1_f32m8(dest, 0, value); } -vfloat32m8_t test_vset_v_f32m2_f32m8(vfloat32m8_t dest, size_t index, vfloat32m2_t val) { - return __riscv_vset_v_f32m2_f32m8(dest, 0, val); +vfloat32m8_t test_vset_v_f32m2_f32m8(vfloat32m8_t dest, size_t index, vfloat32m2_t value) { + return __riscv_vset_v_f32m2_f32m8(dest, 0, value); } -vfloat32m8_t test_vset_v_f32m4_f32m8(vfloat32m8_t dest, size_t index, vfloat32m4_t val) { - return __riscv_vset_v_f32m4_f32m8(dest, 0, val); +vfloat32m8_t test_vset_v_f32m4_f32m8(vfloat32m8_t dest, size_t index, vfloat32m4_t value) { + return __riscv_vset_v_f32m4_f32m8(dest, 0, value); } -vfloat64m2_t test_vset_v_f64m1_f64m2(vfloat64m2_t dest, size_t index, vfloat64m1_t val) { - return __riscv_vset_v_f64m1_f64m2(dest, 0, val); +vfloat64m2_t test_vset_v_f64m1_f64m2(vfloat64m2_t dest, size_t index, vfloat64m1_t value) { + return __riscv_vset_v_f64m1_f64m2(dest, 0, value); } -vfloat64m4_t test_vset_v_f64m1_f64m4(vfloat64m4_t dest, size_t index, vfloat64m1_t val) { - return __riscv_vset_v_f64m1_f64m4(dest, 0, val); +vfloat64m4_t test_vset_v_f64m1_f64m4(vfloat64m4_t dest, size_t index, vfloat64m1_t value) { + return __riscv_vset_v_f64m1_f64m4(dest, 0, value); } -vfloat64m4_t test_vset_v_f64m2_f64m4(vfloat64m4_t dest, size_t index, vfloat64m2_t val) { - return __riscv_vset_v_f64m2_f64m4(dest, 0, val); +vfloat64m4_t test_vset_v_f64m2_f64m4(vfloat64m4_t dest, size_t index, vfloat64m2_t value) { + return __riscv_vset_v_f64m2_f64m4(dest, 0, value); } -vfloat64m8_t test_vset_v_f64m1_f64m8(vfloat64m8_t dest, size_t index, vfloat64m1_t val) { - return __riscv_vset_v_f64m1_f64m8(dest, 0, val); +vfloat64m8_t test_vset_v_f64m1_f64m8(vfloat64m8_t dest, size_t index, vfloat64m1_t value) { + return __riscv_vset_v_f64m1_f64m8(dest, 0, value); } -vfloat64m8_t test_vset_v_f64m2_f64m8(vfloat64m8_t dest, size_t index, vfloat64m2_t val) { - return __riscv_vset_v_f64m2_f64m8(dest, 0, val); +vfloat64m8_t test_vset_v_f64m2_f64m8(vfloat64m8_t dest, size_t index, vfloat64m2_t value) { + return __riscv_vset_v_f64m2_f64m8(dest, 0, value); } -vfloat64m8_t test_vset_v_f64m4_f64m8(vfloat64m8_t dest, size_t index, vfloat64m4_t val) { - return __riscv_vset_v_f64m4_f64m8(dest, 0, val); +vfloat64m8_t test_vset_v_f64m4_f64m8(vfloat64m8_t dest, size_t index, vfloat64m4_t value) { + return __riscv_vset_v_f64m4_f64m8(dest, 0, value); } -vint8m2_t test_vset_v_i8m1_i8m2(vint8m2_t dest, size_t index, vint8m1_t val) { - return __riscv_vset_v_i8m1_i8m2(dest, 0, val); +vint8m2_t test_vset_v_i8m1_i8m2(vint8m2_t dest, size_t index, vint8m1_t value) { + return __riscv_vset_v_i8m1_i8m2(dest, 0, value); } -vint8m4_t test_vset_v_i8m1_i8m4(vint8m4_t dest, size_t index, vint8m1_t val) { - return __riscv_vset_v_i8m1_i8m4(dest, 0, val); +vint8m4_t test_vset_v_i8m1_i8m4(vint8m4_t dest, size_t index, vint8m1_t value) { + return __riscv_vset_v_i8m1_i8m4(dest, 0, value); } -vint8m4_t test_vset_v_i8m2_i8m4(vint8m4_t dest, size_t index, vint8m2_t val) { - return __riscv_vset_v_i8m2_i8m4(dest, 0, val); +vint8m4_t test_vset_v_i8m2_i8m4(vint8m4_t dest, size_t index, vint8m2_t value) { + return __riscv_vset_v_i8m2_i8m4(dest, 0, value); } -vint8m8_t test_vset_v_i8m1_i8m8(vint8m8_t dest, size_t index, vint8m1_t val) { - return __riscv_vset_v_i8m1_i8m8(dest, 0, val); +vint8m8_t test_vset_v_i8m1_i8m8(vint8m8_t dest, size_t index, vint8m1_t value) { + return __riscv_vset_v_i8m1_i8m8(dest, 0, value); } -vint8m8_t test_vset_v_i8m2_i8m8(vint8m8_t dest, size_t index, vint8m2_t val) { - return __riscv_vset_v_i8m2_i8m8(dest, 0, val); +vint8m8_t test_vset_v_i8m2_i8m8(vint8m8_t dest, size_t index, vint8m2_t value) { + return __riscv_vset_v_i8m2_i8m8(dest, 0, value); } -vint8m8_t test_vset_v_i8m4_i8m8(vint8m8_t dest, size_t index, vint8m4_t val) { - return __riscv_vset_v_i8m4_i8m8(dest, 0, val); +vint8m8_t test_vset_v_i8m4_i8m8(vint8m8_t dest, size_t index, vint8m4_t value) { + return __riscv_vset_v_i8m4_i8m8(dest, 0, value); } -vint16m2_t test_vset_v_i16m1_i16m2(vint16m2_t dest, size_t index, vint16m1_t val) { - return __riscv_vset_v_i16m1_i16m2(dest, 0, val); +vint16m2_t test_vset_v_i16m1_i16m2(vint16m2_t dest, size_t index, vint16m1_t value) { + return __riscv_vset_v_i16m1_i16m2(dest, 0, value); } -vint16m4_t test_vset_v_i16m1_i16m4(vint16m4_t dest, size_t index, vint16m1_t val) { - return __riscv_vset_v_i16m1_i16m4(dest, 0, val); +vint16m4_t test_vset_v_i16m1_i16m4(vint16m4_t dest, size_t index, vint16m1_t value) { + return __riscv_vset_v_i16m1_i16m4(dest, 0, value); } -vint16m4_t test_vset_v_i16m2_i16m4(vint16m4_t dest, size_t index, vint16m2_t val) { - return __riscv_vset_v_i16m2_i16m4(dest, 0, val); +vint16m4_t test_vset_v_i16m2_i16m4(vint16m4_t dest, size_t index, vint16m2_t value) { + return __riscv_vset_v_i16m2_i16m4(dest, 0, value); } -vint16m8_t test_vset_v_i16m1_i16m8(vint16m8_t dest, size_t index, vint16m1_t val) { - return __riscv_vset_v_i16m1_i16m8(dest, 0, val); +vint16m8_t test_vset_v_i16m1_i16m8(vint16m8_t dest, size_t index, vint16m1_t value) { + return __riscv_vset_v_i16m1_i16m8(dest, 0, value); } -vint16m8_t test_vset_v_i16m2_i16m8(vint16m8_t dest, size_t index, vint16m2_t val) { - return __riscv_vset_v_i16m2_i16m8(dest, 0, val); +vint16m8_t test_vset_v_i16m2_i16m8(vint16m8_t dest, size_t index, vint16m2_t value) { + return __riscv_vset_v_i16m2_i16m8(dest, 0, value); } -vint16m8_t test_vset_v_i16m4_i16m8(vint16m8_t dest, size_t index, vint16m4_t val) { - return __riscv_vset_v_i16m4_i16m8(dest, 0, val); +vint16m8_t test_vset_v_i16m4_i16m8(vint16m8_t dest, size_t index, vint16m4_t value) { + return __riscv_vset_v_i16m4_i16m8(dest, 0, value); } -vint32m2_t test_vset_v_i32m1_i32m2(vint32m2_t dest, size_t index, vint32m1_t val) { - return __riscv_vset_v_i32m1_i32m2(dest, 0, val); +vint32m2_t test_vset_v_i32m1_i32m2(vint32m2_t dest, size_t index, vint32m1_t value) { + return __riscv_vset_v_i32m1_i32m2(dest, 0, value); } -vint32m4_t test_vset_v_i32m1_i32m4(vint32m4_t dest, size_t index, vint32m1_t val) { - return __riscv_vset_v_i32m1_i32m4(dest, 0, val); +vint32m4_t test_vset_v_i32m1_i32m4(vint32m4_t dest, size_t index, vint32m1_t value) { + return __riscv_vset_v_i32m1_i32m4(dest, 0, value); } -vint32m4_t test_vset_v_i32m2_i32m4(vint32m4_t dest, size_t index, vint32m2_t val) { - return __riscv_vset_v_i32m2_i32m4(dest, 0, val); +vint32m4_t test_vset_v_i32m2_i32m4(vint32m4_t dest, size_t index, vint32m2_t value) { + return __riscv_vset_v_i32m2_i32m4(dest, 0, value); } -vint32m8_t test_vset_v_i32m1_i32m8(vint32m8_t dest, size_t index, vint32m1_t val) { - return __riscv_vset_v_i32m1_i32m8(dest, 0, val); +vint32m8_t test_vset_v_i32m1_i32m8(vint32m8_t dest, size_t index, vint32m1_t value) { + return __riscv_vset_v_i32m1_i32m8(dest, 0, value); } -vint32m8_t test_vset_v_i32m2_i32m8(vint32m8_t dest, size_t index, vint32m2_t val) { - return __riscv_vset_v_i32m2_i32m8(dest, 0, val); +vint32m8_t test_vset_v_i32m2_i32m8(vint32m8_t dest, size_t index, vint32m2_t value) { + return __riscv_vset_v_i32m2_i32m8(dest, 0, value); } -vint32m8_t test_vset_v_i32m4_i32m8(vint32m8_t dest, size_t index, vint32m4_t val) { - return __riscv_vset_v_i32m4_i32m8(dest, 0, val); +vint32m8_t test_vset_v_i32m4_i32m8(vint32m8_t dest, size_t index, vint32m4_t value) { + return __riscv_vset_v_i32m4_i32m8(dest, 0, value); } -vint64m2_t test_vset_v_i64m1_i64m2(vint64m2_t dest, size_t index, vint64m1_t val) { - return __riscv_vset_v_i64m1_i64m2(dest, 0, val); +vint64m2_t test_vset_v_i64m1_i64m2(vint64m2_t dest, size_t index, vint64m1_t value) { + return __riscv_vset_v_i64m1_i64m2(dest, 0, value); } -vint64m4_t test_vset_v_i64m1_i64m4(vint64m4_t dest, size_t index, vint64m1_t val) { - return __riscv_vset_v_i64m1_i64m4(dest, 0, val); +vint64m4_t test_vset_v_i64m1_i64m4(vint64m4_t dest, size_t index, vint64m1_t value) { + return __riscv_vset_v_i64m1_i64m4(dest, 0, value); } -vint64m4_t test_vset_v_i64m2_i64m4(vint64m4_t dest, size_t index, vint64m2_t val) { - return __riscv_vset_v_i64m2_i64m4(dest, 0, val); +vint64m4_t test_vset_v_i64m2_i64m4(vint64m4_t dest, size_t index, vint64m2_t value) { + return __riscv_vset_v_i64m2_i64m4(dest, 0, value); } -vint64m8_t test_vset_v_i64m1_i64m8(vint64m8_t dest, size_t index, vint64m1_t val) { - return __riscv_vset_v_i64m1_i64m8(dest, 0, val); +vint64m8_t test_vset_v_i64m1_i64m8(vint64m8_t dest, size_t index, vint64m1_t value) { + return __riscv_vset_v_i64m1_i64m8(dest, 0, value); } -vint64m8_t test_vset_v_i64m2_i64m8(vint64m8_t dest, size_t index, vint64m2_t val) { - return __riscv_vset_v_i64m2_i64m8(dest, 0, val); +vint64m8_t test_vset_v_i64m2_i64m8(vint64m8_t dest, size_t index, vint64m2_t value) { + return __riscv_vset_v_i64m2_i64m8(dest, 0, value); } -vint64m8_t test_vset_v_i64m4_i64m8(vint64m8_t dest, size_t index, vint64m4_t val) { - return __riscv_vset_v_i64m4_i64m8(dest, 0, val); +vint64m8_t test_vset_v_i64m4_i64m8(vint64m8_t dest, size_t index, vint64m4_t value) { + return __riscv_vset_v_i64m4_i64m8(dest, 0, value); } -vuint8m2_t test_vset_v_u8m1_u8m2(vuint8m2_t dest, size_t index, vuint8m1_t val) { - return __riscv_vset_v_u8m1_u8m2(dest, 0, val); +vuint8m2_t test_vset_v_u8m1_u8m2(vuint8m2_t dest, size_t index, vuint8m1_t value) { + return __riscv_vset_v_u8m1_u8m2(dest, 0, value); } -vuint8m4_t test_vset_v_u8m1_u8m4(vuint8m4_t dest, size_t index, vuint8m1_t val) { - return __riscv_vset_v_u8m1_u8m4(dest, 0, val); +vuint8m4_t test_vset_v_u8m1_u8m4(vuint8m4_t dest, size_t index, vuint8m1_t value) { + return __riscv_vset_v_u8m1_u8m4(dest, 0, value); } -vuint8m4_t test_vset_v_u8m2_u8m4(vuint8m4_t dest, size_t index, vuint8m2_t val) { - return __riscv_vset_v_u8m2_u8m4(dest, 0, val); +vuint8m4_t test_vset_v_u8m2_u8m4(vuint8m4_t dest, size_t index, vuint8m2_t value) { + return __riscv_vset_v_u8m2_u8m4(dest, 0, value); } -vuint8m8_t test_vset_v_u8m1_u8m8(vuint8m8_t dest, size_t index, vuint8m1_t val) { - return __riscv_vset_v_u8m1_u8m8(dest, 0, val); +vuint8m8_t test_vset_v_u8m1_u8m8(vuint8m8_t dest, size_t index, vuint8m1_t value) { + return __riscv_vset_v_u8m1_u8m8(dest, 0, value); } -vuint8m8_t test_vset_v_u8m2_u8m8(vuint8m8_t dest, size_t index, vuint8m2_t val) { - return __riscv_vset_v_u8m2_u8m8(dest, 0, val); +vuint8m8_t test_vset_v_u8m2_u8m8(vuint8m8_t dest, size_t index, vuint8m2_t value) { + return __riscv_vset_v_u8m2_u8m8(dest, 0, value); } -vuint8m8_t test_vset_v_u8m4_u8m8(vuint8m8_t dest, size_t index, vuint8m4_t val) { - return __riscv_vset_v_u8m4_u8m8(dest, 0, val); +vuint8m8_t test_vset_v_u8m4_u8m8(vuint8m8_t dest, size_t index, vuint8m4_t value) { + return __riscv_vset_v_u8m4_u8m8(dest, 0, value); } -vuint16m2_t test_vset_v_u16m1_u16m2(vuint16m2_t dest, size_t index, vuint16m1_t val) { - return __riscv_vset_v_u16m1_u16m2(dest, 0, val); +vuint16m2_t test_vset_v_u16m1_u16m2(vuint16m2_t dest, size_t index, vuint16m1_t value) { + return __riscv_vset_v_u16m1_u16m2(dest, 0, value); } -vuint16m4_t test_vset_v_u16m1_u16m4(vuint16m4_t dest, size_t index, vuint16m1_t val) { - return __riscv_vset_v_u16m1_u16m4(dest, 0, val); +vuint16m4_t test_vset_v_u16m1_u16m4(vuint16m4_t dest, size_t index, vuint16m1_t value) { + return __riscv_vset_v_u16m1_u16m4(dest, 0, value); } -vuint16m4_t test_vset_v_u16m2_u16m4(vuint16m4_t dest, size_t index, vuint16m2_t val) { - return __riscv_vset_v_u16m2_u16m4(dest, 0, val); +vuint16m4_t test_vset_v_u16m2_u16m4(vuint16m4_t dest, size_t index, vuint16m2_t value) { + return __riscv_vset_v_u16m2_u16m4(dest, 0, value); } -vuint16m8_t test_vset_v_u16m1_u16m8(vuint16m8_t dest, size_t index, vuint16m1_t val) { - return __riscv_vset_v_u16m1_u16m8(dest, 0, val); +vuint16m8_t test_vset_v_u16m1_u16m8(vuint16m8_t dest, size_t index, vuint16m1_t value) { + return __riscv_vset_v_u16m1_u16m8(dest, 0, value); } -vuint16m8_t test_vset_v_u16m2_u16m8(vuint16m8_t dest, size_t index, vuint16m2_t val) { - return __riscv_vset_v_u16m2_u16m8(dest, 0, val); +vuint16m8_t test_vset_v_u16m2_u16m8(vuint16m8_t dest, size_t index, vuint16m2_t value) { + return __riscv_vset_v_u16m2_u16m8(dest, 0, value); } -vuint16m8_t test_vset_v_u16m4_u16m8(vuint16m8_t dest, size_t index, vuint16m4_t val) { - return __riscv_vset_v_u16m4_u16m8(dest, 0, val); +vuint16m8_t test_vset_v_u16m4_u16m8(vuint16m8_t dest, size_t index, vuint16m4_t value) { + return __riscv_vset_v_u16m4_u16m8(dest, 0, value); } -vuint32m2_t test_vset_v_u32m1_u32m2(vuint32m2_t dest, size_t index, vuint32m1_t val) { - return __riscv_vset_v_u32m1_u32m2(dest, 0, val); +vuint32m2_t test_vset_v_u32m1_u32m2(vuint32m2_t dest, size_t index, vuint32m1_t value) { + return __riscv_vset_v_u32m1_u32m2(dest, 0, value); } -vuint32m4_t test_vset_v_u32m1_u32m4(vuint32m4_t dest, size_t index, vuint32m1_t val) { - return __riscv_vset_v_u32m1_u32m4(dest, 0, val); +vuint32m4_t test_vset_v_u32m1_u32m4(vuint32m4_t dest, size_t index, vuint32m1_t value) { + return __riscv_vset_v_u32m1_u32m4(dest, 0, value); } -vuint32m4_t test_vset_v_u32m2_u32m4(vuint32m4_t dest, size_t index, vuint32m2_t val) { - return __riscv_vset_v_u32m2_u32m4(dest, 0, val); +vuint32m4_t test_vset_v_u32m2_u32m4(vuint32m4_t dest, size_t index, vuint32m2_t value) { + return __riscv_vset_v_u32m2_u32m4(dest, 0, value); } -vuint32m8_t test_vset_v_u32m1_u32m8(vuint32m8_t dest, size_t index, vuint32m1_t val) { - return __riscv_vset_v_u32m1_u32m8(dest, 0, val); +vuint32m8_t test_vset_v_u32m1_u32m8(vuint32m8_t dest, size_t index, vuint32m1_t value) { + return __riscv_vset_v_u32m1_u32m8(dest, 0, value); } -vuint32m8_t test_vset_v_u32m2_u32m8(vuint32m8_t dest, size_t index, vuint32m2_t val) { - return __riscv_vset_v_u32m2_u32m8(dest, 0, val); +vuint32m8_t test_vset_v_u32m2_u32m8(vuint32m8_t dest, size_t index, vuint32m2_t value) { + return __riscv_vset_v_u32m2_u32m8(dest, 0, value); } -vuint32m8_t test_vset_v_u32m4_u32m8(vuint32m8_t dest, size_t index, vuint32m4_t val) { - return __riscv_vset_v_u32m4_u32m8(dest, 0, val); +vuint32m8_t test_vset_v_u32m4_u32m8(vuint32m8_t dest, size_t index, vuint32m4_t value) { + return __riscv_vset_v_u32m4_u32m8(dest, 0, value); } -vuint64m2_t test_vset_v_u64m1_u64m2(vuint64m2_t dest, size_t index, vuint64m1_t val) { - return __riscv_vset_v_u64m1_u64m2(dest, 0, val); +vuint64m2_t test_vset_v_u64m1_u64m2(vuint64m2_t dest, size_t index, vuint64m1_t value) { + return __riscv_vset_v_u64m1_u64m2(dest, 0, value); } -vuint64m4_t test_vset_v_u64m1_u64m4(vuint64m4_t dest, size_t index, vuint64m1_t val) { - return __riscv_vset_v_u64m1_u64m4(dest, 0, val); +vuint64m4_t test_vset_v_u64m1_u64m4(vuint64m4_t dest, size_t index, vuint64m1_t value) { + return __riscv_vset_v_u64m1_u64m4(dest, 0, value); } -vuint64m4_t test_vset_v_u64m2_u64m4(vuint64m4_t dest, size_t index, vuint64m2_t val) { - return __riscv_vset_v_u64m2_u64m4(dest, 0, val); +vuint64m4_t test_vset_v_u64m2_u64m4(vuint64m4_t dest, size_t index, vuint64m2_t value) { + return __riscv_vset_v_u64m2_u64m4(dest, 0, value); } -vuint64m8_t test_vset_v_u64m1_u64m8(vuint64m8_t dest, size_t index, vuint64m1_t val) { - return __riscv_vset_v_u64m1_u64m8(dest, 0, val); +vuint64m8_t test_vset_v_u64m1_u64m8(vuint64m8_t dest, size_t index, vuint64m1_t value) { + return __riscv_vset_v_u64m1_u64m8(dest, 0, value); } -vuint64m8_t test_vset_v_u64m2_u64m8(vuint64m8_t dest, size_t index, vuint64m2_t val) { - return __riscv_vset_v_u64m2_u64m8(dest, 0, val); +vuint64m8_t test_vset_v_u64m2_u64m8(vuint64m8_t dest, size_t index, vuint64m2_t value) { + return __riscv_vset_v_u64m2_u64m8(dest, 0, value); } -vuint64m8_t test_vset_v_u64m4_u64m8(vuint64m8_t dest, size_t index, vuint64m4_t val) { - return __riscv_vset_v_u64m4_u64m8(dest, 0, val); +vuint64m8_t test_vset_v_u64m4_u64m8(vuint64m8_t dest, size_t index, vuint64m4_t value) { + return __riscv_vset_v_u64m4_u64m8(dest, 0, value); } -vfloat16mf4x2_t test_vset_v_f16mf4_f16mf4x2(vfloat16mf4x2_t dest, size_t index, vfloat16mf4_t val) { - return __riscv_vset_v_f16mf4_f16mf4x2(dest, 0, val); +vfloat16mf4x2_t test_vset_v_f16mf4_f16mf4x2(vfloat16mf4x2_t dest, size_t index, vfloat16mf4_t value) { + return __riscv_vset_v_f16mf4_f16mf4x2(dest, 0, value); } -vfloat16mf4x3_t test_vset_v_f16mf4_f16mf4x3(vfloat16mf4x3_t dest, size_t index, vfloat16mf4_t val) { - return __riscv_vset_v_f16mf4_f16mf4x3(dest, 0, val); +vfloat16mf4x3_t test_vset_v_f16mf4_f16mf4x3(vfloat16mf4x3_t dest, size_t index, vfloat16mf4_t value) { + return __riscv_vset_v_f16mf4_f16mf4x3(dest, 0, value); } -vfloat16mf4x4_t test_vset_v_f16mf4_f16mf4x4(vfloat16mf4x4_t dest, size_t index, vfloat16mf4_t val) { - return __riscv_vset_v_f16mf4_f16mf4x4(dest, 0, val); +vfloat16mf4x4_t test_vset_v_f16mf4_f16mf4x4(vfloat16mf4x4_t dest, size_t index, vfloat16mf4_t value) { + return __riscv_vset_v_f16mf4_f16mf4x4(dest, 0, value); } -vfloat16mf4x5_t test_vset_v_f16mf4_f16mf4x5(vfloat16mf4x5_t dest, size_t index, vfloat16mf4_t val) { - return __riscv_vset_v_f16mf4_f16mf4x5(dest, 0, val); +vfloat16mf4x5_t test_vset_v_f16mf4_f16mf4x5(vfloat16mf4x5_t dest, size_t index, vfloat16mf4_t value) { + return __riscv_vset_v_f16mf4_f16mf4x5(dest, 0, value); } -vfloat16mf4x6_t test_vset_v_f16mf4_f16mf4x6(vfloat16mf4x6_t dest, size_t index, vfloat16mf4_t val) { - return __riscv_vset_v_f16mf4_f16mf4x6(dest, 0, val); +vfloat16mf4x6_t test_vset_v_f16mf4_f16mf4x6(vfloat16mf4x6_t dest, size_t index, vfloat16mf4_t value) { + return __riscv_vset_v_f16mf4_f16mf4x6(dest, 0, value); } -vfloat16mf4x7_t test_vset_v_f16mf4_f16mf4x7(vfloat16mf4x7_t dest, size_t index, vfloat16mf4_t val) { - return __riscv_vset_v_f16mf4_f16mf4x7(dest, 0, val); +vfloat16mf4x7_t test_vset_v_f16mf4_f16mf4x7(vfloat16mf4x7_t dest, size_t index, vfloat16mf4_t value) { + return __riscv_vset_v_f16mf4_f16mf4x7(dest, 0, value); } -vfloat16mf4x8_t test_vset_v_f16mf4_f16mf4x8(vfloat16mf4x8_t dest, size_t index, vfloat16mf4_t val) { - return __riscv_vset_v_f16mf4_f16mf4x8(dest, 0, val); +vfloat16mf4x8_t test_vset_v_f16mf4_f16mf4x8(vfloat16mf4x8_t dest, size_t index, vfloat16mf4_t value) { + return __riscv_vset_v_f16mf4_f16mf4x8(dest, 0, value); } -vfloat16mf2x2_t test_vset_v_f16mf2_f16mf2x2(vfloat16mf2x2_t dest, size_t index, vfloat16mf2_t val) { - return __riscv_vset_v_f16mf2_f16mf2x2(dest, 0, val); +vfloat16mf2x2_t test_vset_v_f16mf2_f16mf2x2(vfloat16mf2x2_t dest, size_t index, vfloat16mf2_t value) { + return __riscv_vset_v_f16mf2_f16mf2x2(dest, 0, value); } -vfloat16mf2x3_t test_vset_v_f16mf2_f16mf2x3(vfloat16mf2x3_t dest, size_t index, vfloat16mf2_t val) { - return __riscv_vset_v_f16mf2_f16mf2x3(dest, 0, val); +vfloat16mf2x3_t test_vset_v_f16mf2_f16mf2x3(vfloat16mf2x3_t dest, size_t index, vfloat16mf2_t value) { + return __riscv_vset_v_f16mf2_f16mf2x3(dest, 0, value); } -vfloat16mf2x4_t test_vset_v_f16mf2_f16mf2x4(vfloat16mf2x4_t dest, size_t index, vfloat16mf2_t val) { - return __riscv_vset_v_f16mf2_f16mf2x4(dest, 0, val); +vfloat16mf2x4_t test_vset_v_f16mf2_f16mf2x4(vfloat16mf2x4_t dest, size_t index, vfloat16mf2_t value) { + return __riscv_vset_v_f16mf2_f16mf2x4(dest, 0, value); } -vfloat16mf2x5_t test_vset_v_f16mf2_f16mf2x5(vfloat16mf2x5_t dest, size_t index, vfloat16mf2_t val) { - return __riscv_vset_v_f16mf2_f16mf2x5(dest, 0, val); +vfloat16mf2x5_t test_vset_v_f16mf2_f16mf2x5(vfloat16mf2x5_t dest, size_t index, vfloat16mf2_t value) { + return __riscv_vset_v_f16mf2_f16mf2x5(dest, 0, value); } -vfloat16mf2x6_t test_vset_v_f16mf2_f16mf2x6(vfloat16mf2x6_t dest, size_t index, vfloat16mf2_t val) { - return __riscv_vset_v_f16mf2_f16mf2x6(dest, 0, val); +vfloat16mf2x6_t test_vset_v_f16mf2_f16mf2x6(vfloat16mf2x6_t dest, size_t index, vfloat16mf2_t value) { + return __riscv_vset_v_f16mf2_f16mf2x6(dest, 0, value); } -vfloat16mf2x7_t test_vset_v_f16mf2_f16mf2x7(vfloat16mf2x7_t dest, size_t index, vfloat16mf2_t val) { - return __riscv_vset_v_f16mf2_f16mf2x7(dest, 0, val); +vfloat16mf2x7_t test_vset_v_f16mf2_f16mf2x7(vfloat16mf2x7_t dest, size_t index, vfloat16mf2_t value) { + return __riscv_vset_v_f16mf2_f16mf2x7(dest, 0, value); } -vfloat16mf2x8_t test_vset_v_f16mf2_f16mf2x8(vfloat16mf2x8_t dest, size_t index, vfloat16mf2_t val) { - return __riscv_vset_v_f16mf2_f16mf2x8(dest, 0, val); +vfloat16mf2x8_t test_vset_v_f16mf2_f16mf2x8(vfloat16mf2x8_t dest, size_t index, vfloat16mf2_t value) { + return __riscv_vset_v_f16mf2_f16mf2x8(dest, 0, value); } -vfloat16m1x2_t test_vset_v_f16m1_f16m1x2(vfloat16m1x2_t dest, size_t index, vfloat16m1_t val) { - return __riscv_vset_v_f16m1_f16m1x2(dest, 0, val); +vfloat16m1x2_t test_vset_v_f16m1_f16m1x2(vfloat16m1x2_t dest, size_t index, vfloat16m1_t value) { + return __riscv_vset_v_f16m1_f16m1x2(dest, 0, value); } -vfloat16m1x3_t test_vset_v_f16m1_f16m1x3(vfloat16m1x3_t dest, size_t index, vfloat16m1_t val) { - return __riscv_vset_v_f16m1_f16m1x3(dest, 0, val); +vfloat16m1x3_t test_vset_v_f16m1_f16m1x3(vfloat16m1x3_t dest, size_t index, vfloat16m1_t value) { + return __riscv_vset_v_f16m1_f16m1x3(dest, 0, value); } -vfloat16m1x4_t test_vset_v_f16m1_f16m1x4(vfloat16m1x4_t dest, size_t index, vfloat16m1_t val) { - return __riscv_vset_v_f16m1_f16m1x4(dest, 0, val); +vfloat16m1x4_t test_vset_v_f16m1_f16m1x4(vfloat16m1x4_t dest, size_t index, vfloat16m1_t value) { + return __riscv_vset_v_f16m1_f16m1x4(dest, 0, value); } -vfloat16m1x5_t test_vset_v_f16m1_f16m1x5(vfloat16m1x5_t dest, size_t index, vfloat16m1_t val) { - return __riscv_vset_v_f16m1_f16m1x5(dest, 0, val); +vfloat16m1x5_t test_vset_v_f16m1_f16m1x5(vfloat16m1x5_t dest, size_t index, vfloat16m1_t value) { + return __riscv_vset_v_f16m1_f16m1x5(dest, 0, value); } -vfloat16m1x6_t test_vset_v_f16m1_f16m1x6(vfloat16m1x6_t dest, size_t index, vfloat16m1_t val) { - return __riscv_vset_v_f16m1_f16m1x6(dest, 0, val); +vfloat16m1x6_t test_vset_v_f16m1_f16m1x6(vfloat16m1x6_t dest, size_t index, vfloat16m1_t value) { + return __riscv_vset_v_f16m1_f16m1x6(dest, 0, value); } -vfloat16m1x7_t test_vset_v_f16m1_f16m1x7(vfloat16m1x7_t dest, size_t index, vfloat16m1_t val) { - return __riscv_vset_v_f16m1_f16m1x7(dest, 0, val); +vfloat16m1x7_t test_vset_v_f16m1_f16m1x7(vfloat16m1x7_t dest, size_t index, vfloat16m1_t value) { + return __riscv_vset_v_f16m1_f16m1x7(dest, 0, value); } -vfloat16m1x8_t test_vset_v_f16m1_f16m1x8(vfloat16m1x8_t dest, size_t index, vfloat16m1_t val) { - return __riscv_vset_v_f16m1_f16m1x8(dest, 0, val); +vfloat16m1x8_t test_vset_v_f16m1_f16m1x8(vfloat16m1x8_t dest, size_t index, vfloat16m1_t value) { + return __riscv_vset_v_f16m1_f16m1x8(dest, 0, value); } -vfloat16m2x2_t test_vset_v_f16m2_f16m2x2(vfloat16m2x2_t dest, size_t index, vfloat16m2_t val) { - return __riscv_vset_v_f16m2_f16m2x2(dest, 0, val); +vfloat16m2x2_t test_vset_v_f16m2_f16m2x2(vfloat16m2x2_t dest, size_t index, vfloat16m2_t value) { + return __riscv_vset_v_f16m2_f16m2x2(dest, 0, value); } -vfloat16m2x3_t test_vset_v_f16m2_f16m2x3(vfloat16m2x3_t dest, size_t index, vfloat16m2_t val) { - return __riscv_vset_v_f16m2_f16m2x3(dest, 0, val); +vfloat16m2x3_t test_vset_v_f16m2_f16m2x3(vfloat16m2x3_t dest, size_t index, vfloat16m2_t value) { + return __riscv_vset_v_f16m2_f16m2x3(dest, 0, value); } -vfloat16m2x4_t test_vset_v_f16m2_f16m2x4(vfloat16m2x4_t dest, size_t index, vfloat16m2_t val) { - return __riscv_vset_v_f16m2_f16m2x4(dest, 0, val); +vfloat16m2x4_t test_vset_v_f16m2_f16m2x4(vfloat16m2x4_t dest, size_t index, vfloat16m2_t value) { + return __riscv_vset_v_f16m2_f16m2x4(dest, 0, value); } -vfloat16m4x2_t test_vset_v_f16m4_f16m4x2(vfloat16m4x2_t dest, size_t index, vfloat16m4_t val) { - return __riscv_vset_v_f16m4_f16m4x2(dest, 0, val); +vfloat16m4x2_t test_vset_v_f16m4_f16m4x2(vfloat16m4x2_t dest, size_t index, vfloat16m4_t value) { + return __riscv_vset_v_f16m4_f16m4x2(dest, 0, value); } -vfloat32mf2x2_t test_vset_v_f32mf2_f32mf2x2(vfloat32mf2x2_t dest, size_t index, vfloat32mf2_t val) { - return __riscv_vset_v_f32mf2_f32mf2x2(dest, 0, val); +vfloat32mf2x2_t test_vset_v_f32mf2_f32mf2x2(vfloat32mf2x2_t dest, size_t index, vfloat32mf2_t value) { + return __riscv_vset_v_f32mf2_f32mf2x2(dest, 0, value); } -vfloat32mf2x3_t test_vset_v_f32mf2_f32mf2x3(vfloat32mf2x3_t dest, size_t index, vfloat32mf2_t val) { - return __riscv_vset_v_f32mf2_f32mf2x3(dest, 0, val); +vfloat32mf2x3_t test_vset_v_f32mf2_f32mf2x3(vfloat32mf2x3_t dest, size_t index, vfloat32mf2_t value) { + return __riscv_vset_v_f32mf2_f32mf2x3(dest, 0, value); } -vfloat32mf2x4_t test_vset_v_f32mf2_f32mf2x4(vfloat32mf2x4_t dest, size_t index, vfloat32mf2_t val) { - return __riscv_vset_v_f32mf2_f32mf2x4(dest, 0, val); +vfloat32mf2x4_t test_vset_v_f32mf2_f32mf2x4(vfloat32mf2x4_t dest, size_t index, vfloat32mf2_t value) { + return __riscv_vset_v_f32mf2_f32mf2x4(dest, 0, value); } -vfloat32mf2x5_t test_vset_v_f32mf2_f32mf2x5(vfloat32mf2x5_t dest, size_t index, vfloat32mf2_t val) { - return __riscv_vset_v_f32mf2_f32mf2x5(dest, 0, val); +vfloat32mf2x5_t test_vset_v_f32mf2_f32mf2x5(vfloat32mf2x5_t dest, size_t index, vfloat32mf2_t value) { + return __riscv_vset_v_f32mf2_f32mf2x5(dest, 0, value); } -vfloat32mf2x6_t test_vset_v_f32mf2_f32mf2x6(vfloat32mf2x6_t dest, size_t index, vfloat32mf2_t val) { - return __riscv_vset_v_f32mf2_f32mf2x6(dest, 0, val); +vfloat32mf2x6_t test_vset_v_f32mf2_f32mf2x6(vfloat32mf2x6_t dest, size_t index, vfloat32mf2_t value) { + return __riscv_vset_v_f32mf2_f32mf2x6(dest, 0, value); } -vfloat32mf2x7_t test_vset_v_f32mf2_f32mf2x7(vfloat32mf2x7_t dest, size_t index, vfloat32mf2_t val) { - return __riscv_vset_v_f32mf2_f32mf2x7(dest, 0, val); +vfloat32mf2x7_t test_vset_v_f32mf2_f32mf2x7(vfloat32mf2x7_t dest, size_t index, vfloat32mf2_t value) { + return __riscv_vset_v_f32mf2_f32mf2x7(dest, 0, value); } -vfloat32mf2x8_t test_vset_v_f32mf2_f32mf2x8(vfloat32mf2x8_t dest, size_t index, vfloat32mf2_t val) { - return __riscv_vset_v_f32mf2_f32mf2x8(dest, 0, val); +vfloat32mf2x8_t test_vset_v_f32mf2_f32mf2x8(vfloat32mf2x8_t dest, size_t index, vfloat32mf2_t value) { + return __riscv_vset_v_f32mf2_f32mf2x8(dest, 0, value); } -vfloat32m1x2_t test_vset_v_f32m1_f32m1x2(vfloat32m1x2_t dest, size_t index, vfloat32m1_t val) { - return __riscv_vset_v_f32m1_f32m1x2(dest, 0, val); +vfloat32m1x2_t test_vset_v_f32m1_f32m1x2(vfloat32m1x2_t dest, size_t index, vfloat32m1_t value) { + return __riscv_vset_v_f32m1_f32m1x2(dest, 0, value); } -vfloat32m1x3_t test_vset_v_f32m1_f32m1x3(vfloat32m1x3_t dest, size_t index, vfloat32m1_t val) { - return __riscv_vset_v_f32m1_f32m1x3(dest, 0, val); +vfloat32m1x3_t test_vset_v_f32m1_f32m1x3(vfloat32m1x3_t dest, size_t index, vfloat32m1_t value) { + return __riscv_vset_v_f32m1_f32m1x3(dest, 0, value); } -vfloat32m1x4_t test_vset_v_f32m1_f32m1x4(vfloat32m1x4_t dest, size_t index, vfloat32m1_t val) { - return __riscv_vset_v_f32m1_f32m1x4(dest, 0, val); +vfloat32m1x4_t test_vset_v_f32m1_f32m1x4(vfloat32m1x4_t dest, size_t index, vfloat32m1_t value) { + return __riscv_vset_v_f32m1_f32m1x4(dest, 0, value); } -vfloat32m1x5_t test_vset_v_f32m1_f32m1x5(vfloat32m1x5_t dest, size_t index, vfloat32m1_t val) { - return __riscv_vset_v_f32m1_f32m1x5(dest, 0, val); +vfloat32m1x5_t test_vset_v_f32m1_f32m1x5(vfloat32m1x5_t dest, size_t index, vfloat32m1_t value) { + return __riscv_vset_v_f32m1_f32m1x5(dest, 0, value); } -vfloat32m1x6_t test_vset_v_f32m1_f32m1x6(vfloat32m1x6_t dest, size_t index, vfloat32m1_t val) { - return __riscv_vset_v_f32m1_f32m1x6(dest, 0, val); +vfloat32m1x6_t test_vset_v_f32m1_f32m1x6(vfloat32m1x6_t dest, size_t index, vfloat32m1_t value) { + return __riscv_vset_v_f32m1_f32m1x6(dest, 0, value); } -vfloat32m1x7_t test_vset_v_f32m1_f32m1x7(vfloat32m1x7_t dest, size_t index, vfloat32m1_t val) { - return __riscv_vset_v_f32m1_f32m1x7(dest, 0, val); +vfloat32m1x7_t test_vset_v_f32m1_f32m1x7(vfloat32m1x7_t dest, size_t index, vfloat32m1_t value) { + return __riscv_vset_v_f32m1_f32m1x7(dest, 0, value); } -vfloat32m1x8_t test_vset_v_f32m1_f32m1x8(vfloat32m1x8_t dest, size_t index, vfloat32m1_t val) { - return __riscv_vset_v_f32m1_f32m1x8(dest, 0, val); +vfloat32m1x8_t test_vset_v_f32m1_f32m1x8(vfloat32m1x8_t dest, size_t index, vfloat32m1_t value) { + return __riscv_vset_v_f32m1_f32m1x8(dest, 0, value); } -vfloat32m2x2_t test_vset_v_f32m2_f32m2x2(vfloat32m2x2_t dest, size_t index, vfloat32m2_t val) { - return __riscv_vset_v_f32m2_f32m2x2(dest, 0, val); +vfloat32m2x2_t test_vset_v_f32m2_f32m2x2(vfloat32m2x2_t dest, size_t index, vfloat32m2_t value) { + return __riscv_vset_v_f32m2_f32m2x2(dest, 0, value); } -vfloat32m2x3_t test_vset_v_f32m2_f32m2x3(vfloat32m2x3_t dest, size_t index, vfloat32m2_t val) { - return __riscv_vset_v_f32m2_f32m2x3(dest, 0, val); +vfloat32m2x3_t test_vset_v_f32m2_f32m2x3(vfloat32m2x3_t dest, size_t index, vfloat32m2_t value) { + return __riscv_vset_v_f32m2_f32m2x3(dest, 0, value); } -vfloat32m2x4_t test_vset_v_f32m2_f32m2x4(vfloat32m2x4_t dest, size_t index, vfloat32m2_t val) { - return __riscv_vset_v_f32m2_f32m2x4(dest, 0, val); +vfloat32m2x4_t test_vset_v_f32m2_f32m2x4(vfloat32m2x4_t dest, size_t index, vfloat32m2_t value) { + return __riscv_vset_v_f32m2_f32m2x4(dest, 0, value); } -vfloat32m4x2_t test_vset_v_f32m4_f32m4x2(vfloat32m4x2_t dest, size_t index, vfloat32m4_t val) { - return __riscv_vset_v_f32m4_f32m4x2(dest, 0, val); +vfloat32m4x2_t test_vset_v_f32m4_f32m4x2(vfloat32m4x2_t dest, size_t index, vfloat32m4_t value) { + return __riscv_vset_v_f32m4_f32m4x2(dest, 0, value); } -vfloat64m1x2_t test_vset_v_f64m1_f64m1x2(vfloat64m1x2_t dest, size_t index, vfloat64m1_t val) { - return __riscv_vset_v_f64m1_f64m1x2(dest, 0, val); +vfloat64m1x2_t test_vset_v_f64m1_f64m1x2(vfloat64m1x2_t dest, size_t index, vfloat64m1_t value) { + return __riscv_vset_v_f64m1_f64m1x2(dest, 0, value); } -vfloat64m1x3_t test_vset_v_f64m1_f64m1x3(vfloat64m1x3_t dest, size_t index, vfloat64m1_t val) { - return __riscv_vset_v_f64m1_f64m1x3(dest, 0, val); +vfloat64m1x3_t test_vset_v_f64m1_f64m1x3(vfloat64m1x3_t dest, size_t index, vfloat64m1_t value) { + return __riscv_vset_v_f64m1_f64m1x3(dest, 0, value); } -vfloat64m1x4_t test_vset_v_f64m1_f64m1x4(vfloat64m1x4_t dest, size_t index, vfloat64m1_t val) { - return __riscv_vset_v_f64m1_f64m1x4(dest, 0, val); +vfloat64m1x4_t test_vset_v_f64m1_f64m1x4(vfloat64m1x4_t dest, size_t index, vfloat64m1_t value) { + return __riscv_vset_v_f64m1_f64m1x4(dest, 0, value); } -vfloat64m1x5_t test_vset_v_f64m1_f64m1x5(vfloat64m1x5_t dest, size_t index, vfloat64m1_t val) { - return __riscv_vset_v_f64m1_f64m1x5(dest, 0, val); +vfloat64m1x5_t test_vset_v_f64m1_f64m1x5(vfloat64m1x5_t dest, size_t index, vfloat64m1_t value) { + return __riscv_vset_v_f64m1_f64m1x5(dest, 0, value); } -vfloat64m1x6_t test_vset_v_f64m1_f64m1x6(vfloat64m1x6_t dest, size_t index, vfloat64m1_t val) { - return __riscv_vset_v_f64m1_f64m1x6(dest, 0, val); +vfloat64m1x6_t test_vset_v_f64m1_f64m1x6(vfloat64m1x6_t dest, size_t index, vfloat64m1_t value) { + return __riscv_vset_v_f64m1_f64m1x6(dest, 0, value); } -vfloat64m1x7_t test_vset_v_f64m1_f64m1x7(vfloat64m1x7_t dest, size_t index, vfloat64m1_t val) { - return __riscv_vset_v_f64m1_f64m1x7(dest, 0, val); +vfloat64m1x7_t test_vset_v_f64m1_f64m1x7(vfloat64m1x7_t dest, size_t index, vfloat64m1_t value) { + return __riscv_vset_v_f64m1_f64m1x7(dest, 0, value); } -vfloat64m1x8_t test_vset_v_f64m1_f64m1x8(vfloat64m1x8_t dest, size_t index, vfloat64m1_t val) { - return __riscv_vset_v_f64m1_f64m1x8(dest, 0, val); +vfloat64m1x8_t test_vset_v_f64m1_f64m1x8(vfloat64m1x8_t dest, size_t index, vfloat64m1_t value) { + return __riscv_vset_v_f64m1_f64m1x8(dest, 0, value); } -vfloat64m2x2_t test_vset_v_f64m2_f64m2x2(vfloat64m2x2_t dest, size_t index, vfloat64m2_t val) { - return __riscv_vset_v_f64m2_f64m2x2(dest, 0, val); +vfloat64m2x2_t test_vset_v_f64m2_f64m2x2(vfloat64m2x2_t dest, size_t index, vfloat64m2_t value) { + return __riscv_vset_v_f64m2_f64m2x2(dest, 0, value); } -vfloat64m2x3_t test_vset_v_f64m2_f64m2x3(vfloat64m2x3_t dest, size_t index, vfloat64m2_t val) { - return __riscv_vset_v_f64m2_f64m2x3(dest, 0, val); +vfloat64m2x3_t test_vset_v_f64m2_f64m2x3(vfloat64m2x3_t dest, size_t index, vfloat64m2_t value) { + return __riscv_vset_v_f64m2_f64m2x3(dest, 0, value); } -vfloat64m2x4_t test_vset_v_f64m2_f64m2x4(vfloat64m2x4_t dest, size_t index, vfloat64m2_t val) { - return __riscv_vset_v_f64m2_f64m2x4(dest, 0, val); +vfloat64m2x4_t test_vset_v_f64m2_f64m2x4(vfloat64m2x4_t dest, size_t index, vfloat64m2_t value) { + return __riscv_vset_v_f64m2_f64m2x4(dest, 0, value); } -vfloat64m4x2_t test_vset_v_f64m4_f64m4x2(vfloat64m4x2_t dest, size_t index, vfloat64m4_t val) { - return __riscv_vset_v_f64m4_f64m4x2(dest, 0, val); +vfloat64m4x2_t test_vset_v_f64m4_f64m4x2(vfloat64m4x2_t dest, size_t index, vfloat64m4_t value) { + return __riscv_vset_v_f64m4_f64m4x2(dest, 0, value); } -vint8mf8x2_t test_vset_v_i8mf8_i8mf8x2(vint8mf8x2_t dest, size_t index, vint8mf8_t val) { - return __riscv_vset_v_i8mf8_i8mf8x2(dest, 0, val); +vint8mf8x2_t test_vset_v_i8mf8_i8mf8x2(vint8mf8x2_t dest, size_t index, vint8mf8_t value) { + return __riscv_vset_v_i8mf8_i8mf8x2(dest, 0, value); } -vint8mf8x3_t test_vset_v_i8mf8_i8mf8x3(vint8mf8x3_t dest, size_t index, vint8mf8_t val) { - return __riscv_vset_v_i8mf8_i8mf8x3(dest, 0, val); +vint8mf8x3_t test_vset_v_i8mf8_i8mf8x3(vint8mf8x3_t dest, size_t index, vint8mf8_t value) { + return __riscv_vset_v_i8mf8_i8mf8x3(dest, 0, value); } -vint8mf8x4_t test_vset_v_i8mf8_i8mf8x4(vint8mf8x4_t dest, size_t index, vint8mf8_t val) { - return __riscv_vset_v_i8mf8_i8mf8x4(dest, 0, val); +vint8mf8x4_t test_vset_v_i8mf8_i8mf8x4(vint8mf8x4_t dest, size_t index, vint8mf8_t value) { + return __riscv_vset_v_i8mf8_i8mf8x4(dest, 0, value); } -vint8mf8x5_t test_vset_v_i8mf8_i8mf8x5(vint8mf8x5_t dest, size_t index, vint8mf8_t val) { - return __riscv_vset_v_i8mf8_i8mf8x5(dest, 0, val); +vint8mf8x5_t test_vset_v_i8mf8_i8mf8x5(vint8mf8x5_t dest, size_t index, vint8mf8_t value) { + return __riscv_vset_v_i8mf8_i8mf8x5(dest, 0, value); } -vint8mf8x6_t test_vset_v_i8mf8_i8mf8x6(vint8mf8x6_t dest, size_t index, vint8mf8_t val) { - return __riscv_vset_v_i8mf8_i8mf8x6(dest, 0, val); +vint8mf8x6_t test_vset_v_i8mf8_i8mf8x6(vint8mf8x6_t dest, size_t index, vint8mf8_t value) { + return __riscv_vset_v_i8mf8_i8mf8x6(dest, 0, value); } -vint8mf8x7_t test_vset_v_i8mf8_i8mf8x7(vint8mf8x7_t dest, size_t index, vint8mf8_t val) { - return __riscv_vset_v_i8mf8_i8mf8x7(dest, 0, val); +vint8mf8x7_t test_vset_v_i8mf8_i8mf8x7(vint8mf8x7_t dest, size_t index, vint8mf8_t value) { + return __riscv_vset_v_i8mf8_i8mf8x7(dest, 0, value); } -vint8mf8x8_t test_vset_v_i8mf8_i8mf8x8(vint8mf8x8_t dest, size_t index, vint8mf8_t val) { - return __riscv_vset_v_i8mf8_i8mf8x8(dest, 0, val); +vint8mf8x8_t test_vset_v_i8mf8_i8mf8x8(vint8mf8x8_t dest, size_t index, vint8mf8_t value) { + return __riscv_vset_v_i8mf8_i8mf8x8(dest, 0, value); } -vint8mf4x2_t test_vset_v_i8mf4_i8mf4x2(vint8mf4x2_t dest, size_t index, vint8mf4_t val) { - return __riscv_vset_v_i8mf4_i8mf4x2(dest, 0, val); +vint8mf4x2_t test_vset_v_i8mf4_i8mf4x2(vint8mf4x2_t dest, size_t index, vint8mf4_t value) { + return __riscv_vset_v_i8mf4_i8mf4x2(dest, 0, value); } -vint8mf4x3_t test_vset_v_i8mf4_i8mf4x3(vint8mf4x3_t dest, size_t index, vint8mf4_t val) { - return __riscv_vset_v_i8mf4_i8mf4x3(dest, 0, val); +vint8mf4x3_t test_vset_v_i8mf4_i8mf4x3(vint8mf4x3_t dest, size_t index, vint8mf4_t value) { + return __riscv_vset_v_i8mf4_i8mf4x3(dest, 0, value); } -vint8mf4x4_t test_vset_v_i8mf4_i8mf4x4(vint8mf4x4_t dest, size_t index, vint8mf4_t val) { - return __riscv_vset_v_i8mf4_i8mf4x4(dest, 0, val); +vint8mf4x4_t test_vset_v_i8mf4_i8mf4x4(vint8mf4x4_t dest, size_t index, vint8mf4_t value) { + return __riscv_vset_v_i8mf4_i8mf4x4(dest, 0, value); } -vint8mf4x5_t test_vset_v_i8mf4_i8mf4x5(vint8mf4x5_t dest, size_t index, vint8mf4_t val) { - return __riscv_vset_v_i8mf4_i8mf4x5(dest, 0, val); +vint8mf4x5_t test_vset_v_i8mf4_i8mf4x5(vint8mf4x5_t dest, size_t index, vint8mf4_t value) { + return __riscv_vset_v_i8mf4_i8mf4x5(dest, 0, value); } -vint8mf4x6_t test_vset_v_i8mf4_i8mf4x6(vint8mf4x6_t dest, size_t index, vint8mf4_t val) { - return __riscv_vset_v_i8mf4_i8mf4x6(dest, 0, val); +vint8mf4x6_t test_vset_v_i8mf4_i8mf4x6(vint8mf4x6_t dest, size_t index, vint8mf4_t value) { + return __riscv_vset_v_i8mf4_i8mf4x6(dest, 0, value); } -vint8mf4x7_t test_vset_v_i8mf4_i8mf4x7(vint8mf4x7_t dest, size_t index, vint8mf4_t val) { - return __riscv_vset_v_i8mf4_i8mf4x7(dest, 0, val); +vint8mf4x7_t test_vset_v_i8mf4_i8mf4x7(vint8mf4x7_t dest, size_t index, vint8mf4_t value) { + return __riscv_vset_v_i8mf4_i8mf4x7(dest, 0, value); } -vint8mf4x8_t test_vset_v_i8mf4_i8mf4x8(vint8mf4x8_t dest, size_t index, vint8mf4_t val) { - return __riscv_vset_v_i8mf4_i8mf4x8(dest, 0, val); +vint8mf4x8_t test_vset_v_i8mf4_i8mf4x8(vint8mf4x8_t dest, size_t index, vint8mf4_t value) { + return __riscv_vset_v_i8mf4_i8mf4x8(dest, 0, value); } -vint8mf2x2_t test_vset_v_i8mf2_i8mf2x2(vint8mf2x2_t dest, size_t index, vint8mf2_t val) { - return __riscv_vset_v_i8mf2_i8mf2x2(dest, 0, val); +vint8mf2x2_t test_vset_v_i8mf2_i8mf2x2(vint8mf2x2_t dest, size_t index, vint8mf2_t value) { + return __riscv_vset_v_i8mf2_i8mf2x2(dest, 0, value); } -vint8mf2x3_t test_vset_v_i8mf2_i8mf2x3(vint8mf2x3_t dest, size_t index, vint8mf2_t val) { - return __riscv_vset_v_i8mf2_i8mf2x3(dest, 0, val); +vint8mf2x3_t test_vset_v_i8mf2_i8mf2x3(vint8mf2x3_t dest, size_t index, vint8mf2_t value) { + return __riscv_vset_v_i8mf2_i8mf2x3(dest, 0, value); } -vint8mf2x4_t test_vset_v_i8mf2_i8mf2x4(vint8mf2x4_t dest, size_t index, vint8mf2_t val) { - return __riscv_vset_v_i8mf2_i8mf2x4(dest, 0, val); +vint8mf2x4_t test_vset_v_i8mf2_i8mf2x4(vint8mf2x4_t dest, size_t index, vint8mf2_t value) { + return __riscv_vset_v_i8mf2_i8mf2x4(dest, 0, value); } -vint8mf2x5_t test_vset_v_i8mf2_i8mf2x5(vint8mf2x5_t dest, size_t index, vint8mf2_t val) { - return __riscv_vset_v_i8mf2_i8mf2x5(dest, 0, val); +vint8mf2x5_t test_vset_v_i8mf2_i8mf2x5(vint8mf2x5_t dest, size_t index, vint8mf2_t value) { + return __riscv_vset_v_i8mf2_i8mf2x5(dest, 0, value); } -vint8mf2x6_t test_vset_v_i8mf2_i8mf2x6(vint8mf2x6_t dest, size_t index, vint8mf2_t val) { - return __riscv_vset_v_i8mf2_i8mf2x6(dest, 0, val); +vint8mf2x6_t test_vset_v_i8mf2_i8mf2x6(vint8mf2x6_t dest, size_t index, vint8mf2_t value) { + return __riscv_vset_v_i8mf2_i8mf2x6(dest, 0, value); } -vint8mf2x7_t test_vset_v_i8mf2_i8mf2x7(vint8mf2x7_t dest, size_t index, vint8mf2_t val) { - return __riscv_vset_v_i8mf2_i8mf2x7(dest, 0, val); +vint8mf2x7_t test_vset_v_i8mf2_i8mf2x7(vint8mf2x7_t dest, size_t index, vint8mf2_t value) { + return __riscv_vset_v_i8mf2_i8mf2x7(dest, 0, value); } -vint8mf2x8_t test_vset_v_i8mf2_i8mf2x8(vint8mf2x8_t dest, size_t index, vint8mf2_t val) { - return __riscv_vset_v_i8mf2_i8mf2x8(dest, 0, val); +vint8mf2x8_t test_vset_v_i8mf2_i8mf2x8(vint8mf2x8_t dest, size_t index, vint8mf2_t value) { + return __riscv_vset_v_i8mf2_i8mf2x8(dest, 0, value); } -vint8m1x2_t test_vset_v_i8m1_i8m1x2(vint8m1x2_t dest, size_t index, vint8m1_t val) { - return __riscv_vset_v_i8m1_i8m1x2(dest, 0, val); +vint8m1x2_t test_vset_v_i8m1_i8m1x2(vint8m1x2_t dest, size_t index, vint8m1_t value) { + return __riscv_vset_v_i8m1_i8m1x2(dest, 0, value); } -vint8m1x3_t test_vset_v_i8m1_i8m1x3(vint8m1x3_t dest, size_t index, vint8m1_t val) { - return __riscv_vset_v_i8m1_i8m1x3(dest, 0, val); +vint8m1x3_t test_vset_v_i8m1_i8m1x3(vint8m1x3_t dest, size_t index, vint8m1_t value) { + return __riscv_vset_v_i8m1_i8m1x3(dest, 0, value); } -vint8m1x4_t test_vset_v_i8m1_i8m1x4(vint8m1x4_t dest, size_t index, vint8m1_t val) { - return __riscv_vset_v_i8m1_i8m1x4(dest, 0, val); +vint8m1x4_t test_vset_v_i8m1_i8m1x4(vint8m1x4_t dest, size_t index, vint8m1_t value) { + return __riscv_vset_v_i8m1_i8m1x4(dest, 0, value); } -vint8m1x5_t test_vset_v_i8m1_i8m1x5(vint8m1x5_t dest, size_t index, vint8m1_t val) { - return __riscv_vset_v_i8m1_i8m1x5(dest, 0, val); +vint8m1x5_t test_vset_v_i8m1_i8m1x5(vint8m1x5_t dest, size_t index, vint8m1_t value) { + return __riscv_vset_v_i8m1_i8m1x5(dest, 0, value); } -vint8m1x6_t test_vset_v_i8m1_i8m1x6(vint8m1x6_t dest, size_t index, vint8m1_t val) { - return __riscv_vset_v_i8m1_i8m1x6(dest, 0, val); +vint8m1x6_t test_vset_v_i8m1_i8m1x6(vint8m1x6_t dest, size_t index, vint8m1_t value) { + return __riscv_vset_v_i8m1_i8m1x6(dest, 0, value); } -vint8m1x7_t test_vset_v_i8m1_i8m1x7(vint8m1x7_t dest, size_t index, vint8m1_t val) { - return __riscv_vset_v_i8m1_i8m1x7(dest, 0, val); +vint8m1x7_t test_vset_v_i8m1_i8m1x7(vint8m1x7_t dest, size_t index, vint8m1_t value) { + return __riscv_vset_v_i8m1_i8m1x7(dest, 0, value); } -vint8m1x8_t test_vset_v_i8m1_i8m1x8(vint8m1x8_t dest, size_t index, vint8m1_t val) { - return __riscv_vset_v_i8m1_i8m1x8(dest, 0, val); +vint8m1x8_t test_vset_v_i8m1_i8m1x8(vint8m1x8_t dest, size_t index, vint8m1_t value) { + return __riscv_vset_v_i8m1_i8m1x8(dest, 0, value); } -vint8m2x2_t test_vset_v_i8m2_i8m2x2(vint8m2x2_t dest, size_t index, vint8m2_t val) { - return __riscv_vset_v_i8m2_i8m2x2(dest, 0, val); +vint8m2x2_t test_vset_v_i8m2_i8m2x2(vint8m2x2_t dest, size_t index, vint8m2_t value) { + return __riscv_vset_v_i8m2_i8m2x2(dest, 0, value); } -vint8m2x3_t test_vset_v_i8m2_i8m2x3(vint8m2x3_t dest, size_t index, vint8m2_t val) { - return __riscv_vset_v_i8m2_i8m2x3(dest, 0, val); +vint8m2x3_t test_vset_v_i8m2_i8m2x3(vint8m2x3_t dest, size_t index, vint8m2_t value) { + return __riscv_vset_v_i8m2_i8m2x3(dest, 0, value); } -vint8m2x4_t test_vset_v_i8m2_i8m2x4(vint8m2x4_t dest, size_t index, vint8m2_t val) { - return __riscv_vset_v_i8m2_i8m2x4(dest, 0, val); +vint8m2x4_t test_vset_v_i8m2_i8m2x4(vint8m2x4_t dest, size_t index, vint8m2_t value) { + return __riscv_vset_v_i8m2_i8m2x4(dest, 0, value); } -vint8m4x2_t test_vset_v_i8m4_i8m4x2(vint8m4x2_t dest, size_t index, vint8m4_t val) { - return __riscv_vset_v_i8m4_i8m4x2(dest, 0, val); +vint8m4x2_t test_vset_v_i8m4_i8m4x2(vint8m4x2_t dest, size_t index, vint8m4_t value) { + return __riscv_vset_v_i8m4_i8m4x2(dest, 0, value); } -vint16mf4x2_t test_vset_v_i16mf4_i16mf4x2(vint16mf4x2_t dest, size_t index, vint16mf4_t val) { - return __riscv_vset_v_i16mf4_i16mf4x2(dest, 0, val); +vint16mf4x2_t test_vset_v_i16mf4_i16mf4x2(vint16mf4x2_t dest, size_t index, vint16mf4_t value) { + return __riscv_vset_v_i16mf4_i16mf4x2(dest, 0, value); } -vint16mf4x3_t test_vset_v_i16mf4_i16mf4x3(vint16mf4x3_t dest, size_t index, vint16mf4_t val) { - return __riscv_vset_v_i16mf4_i16mf4x3(dest, 0, val); +vint16mf4x3_t test_vset_v_i16mf4_i16mf4x3(vint16mf4x3_t dest, size_t index, vint16mf4_t value) { + return __riscv_vset_v_i16mf4_i16mf4x3(dest, 0, value); } -vint16mf4x4_t test_vset_v_i16mf4_i16mf4x4(vint16mf4x4_t dest, size_t index, vint16mf4_t val) { - return __riscv_vset_v_i16mf4_i16mf4x4(dest, 0, val); +vint16mf4x4_t test_vset_v_i16mf4_i16mf4x4(vint16mf4x4_t dest, size_t index, vint16mf4_t value) { + return __riscv_vset_v_i16mf4_i16mf4x4(dest, 0, value); } -vint16mf4x5_t test_vset_v_i16mf4_i16mf4x5(vint16mf4x5_t dest, size_t index, vint16mf4_t val) { - return __riscv_vset_v_i16mf4_i16mf4x5(dest, 0, val); +vint16mf4x5_t test_vset_v_i16mf4_i16mf4x5(vint16mf4x5_t dest, size_t index, vint16mf4_t value) { + return __riscv_vset_v_i16mf4_i16mf4x5(dest, 0, value); } -vint16mf4x6_t test_vset_v_i16mf4_i16mf4x6(vint16mf4x6_t dest, size_t index, vint16mf4_t val) { - return __riscv_vset_v_i16mf4_i16mf4x6(dest, 0, val); +vint16mf4x6_t test_vset_v_i16mf4_i16mf4x6(vint16mf4x6_t dest, size_t index, vint16mf4_t value) { + return __riscv_vset_v_i16mf4_i16mf4x6(dest, 0, value); } -vint16mf4x7_t test_vset_v_i16mf4_i16mf4x7(vint16mf4x7_t dest, size_t index, vint16mf4_t val) { - return __riscv_vset_v_i16mf4_i16mf4x7(dest, 0, val); +vint16mf4x7_t test_vset_v_i16mf4_i16mf4x7(vint16mf4x7_t dest, size_t index, vint16mf4_t value) { + return __riscv_vset_v_i16mf4_i16mf4x7(dest, 0, value); } -vint16mf4x8_t test_vset_v_i16mf4_i16mf4x8(vint16mf4x8_t dest, size_t index, vint16mf4_t val) { - return __riscv_vset_v_i16mf4_i16mf4x8(dest, 0, val); +vint16mf4x8_t test_vset_v_i16mf4_i16mf4x8(vint16mf4x8_t dest, size_t index, vint16mf4_t value) { + return __riscv_vset_v_i16mf4_i16mf4x8(dest, 0, value); } -vint16mf2x2_t test_vset_v_i16mf2_i16mf2x2(vint16mf2x2_t dest, size_t index, vint16mf2_t val) { - return __riscv_vset_v_i16mf2_i16mf2x2(dest, 0, val); +vint16mf2x2_t test_vset_v_i16mf2_i16mf2x2(vint16mf2x2_t dest, size_t index, vint16mf2_t value) { + return __riscv_vset_v_i16mf2_i16mf2x2(dest, 0, value); } -vint16mf2x3_t test_vset_v_i16mf2_i16mf2x3(vint16mf2x3_t dest, size_t index, vint16mf2_t val) { - return __riscv_vset_v_i16mf2_i16mf2x3(dest, 0, val); +vint16mf2x3_t test_vset_v_i16mf2_i16mf2x3(vint16mf2x3_t dest, size_t index, vint16mf2_t value) { + return __riscv_vset_v_i16mf2_i16mf2x3(dest, 0, value); } -vint16mf2x4_t test_vset_v_i16mf2_i16mf2x4(vint16mf2x4_t dest, size_t index, vint16mf2_t val) { - return __riscv_vset_v_i16mf2_i16mf2x4(dest, 0, val); +vint16mf2x4_t test_vset_v_i16mf2_i16mf2x4(vint16mf2x4_t dest, size_t index, vint16mf2_t value) { + return __riscv_vset_v_i16mf2_i16mf2x4(dest, 0, value); } -vint16mf2x5_t test_vset_v_i16mf2_i16mf2x5(vint16mf2x5_t dest, size_t index, vint16mf2_t val) { - return __riscv_vset_v_i16mf2_i16mf2x5(dest, 0, val); +vint16mf2x5_t test_vset_v_i16mf2_i16mf2x5(vint16mf2x5_t dest, size_t index, vint16mf2_t value) { + return __riscv_vset_v_i16mf2_i16mf2x5(dest, 0, value); } -vint16mf2x6_t test_vset_v_i16mf2_i16mf2x6(vint16mf2x6_t dest, size_t index, vint16mf2_t val) { - return __riscv_vset_v_i16mf2_i16mf2x6(dest, 0, val); +vint16mf2x6_t test_vset_v_i16mf2_i16mf2x6(vint16mf2x6_t dest, size_t index, vint16mf2_t value) { + return __riscv_vset_v_i16mf2_i16mf2x6(dest, 0, value); } -vint16mf2x7_t test_vset_v_i16mf2_i16mf2x7(vint16mf2x7_t dest, size_t index, vint16mf2_t val) { - return __riscv_vset_v_i16mf2_i16mf2x7(dest, 0, val); +vint16mf2x7_t test_vset_v_i16mf2_i16mf2x7(vint16mf2x7_t dest, size_t index, vint16mf2_t value) { + return __riscv_vset_v_i16mf2_i16mf2x7(dest, 0, value); } -vint16mf2x8_t test_vset_v_i16mf2_i16mf2x8(vint16mf2x8_t dest, size_t index, vint16mf2_t val) { - return __riscv_vset_v_i16mf2_i16mf2x8(dest, 0, val); +vint16mf2x8_t test_vset_v_i16mf2_i16mf2x8(vint16mf2x8_t dest, size_t index, vint16mf2_t value) { + return __riscv_vset_v_i16mf2_i16mf2x8(dest, 0, value); } -vint16m1x2_t test_vset_v_i16m1_i16m1x2(vint16m1x2_t dest, size_t index, vint16m1_t val) { - return __riscv_vset_v_i16m1_i16m1x2(dest, 0, val); +vint16m1x2_t test_vset_v_i16m1_i16m1x2(vint16m1x2_t dest, size_t index, vint16m1_t value) { + return __riscv_vset_v_i16m1_i16m1x2(dest, 0, value); } -vint16m1x3_t test_vset_v_i16m1_i16m1x3(vint16m1x3_t dest, size_t index, vint16m1_t val) { - return __riscv_vset_v_i16m1_i16m1x3(dest, 0, val); +vint16m1x3_t test_vset_v_i16m1_i16m1x3(vint16m1x3_t dest, size_t index, vint16m1_t value) { + return __riscv_vset_v_i16m1_i16m1x3(dest, 0, value); } -vint16m1x4_t test_vset_v_i16m1_i16m1x4(vint16m1x4_t dest, size_t index, vint16m1_t val) { - return __riscv_vset_v_i16m1_i16m1x4(dest, 0, val); +vint16m1x4_t test_vset_v_i16m1_i16m1x4(vint16m1x4_t dest, size_t index, vint16m1_t value) { + return __riscv_vset_v_i16m1_i16m1x4(dest, 0, value); } -vint16m1x5_t test_vset_v_i16m1_i16m1x5(vint16m1x5_t dest, size_t index, vint16m1_t val) { - return __riscv_vset_v_i16m1_i16m1x5(dest, 0, val); +vint16m1x5_t test_vset_v_i16m1_i16m1x5(vint16m1x5_t dest, size_t index, vint16m1_t value) { + return __riscv_vset_v_i16m1_i16m1x5(dest, 0, value); } -vint16m1x6_t test_vset_v_i16m1_i16m1x6(vint16m1x6_t dest, size_t index, vint16m1_t val) { - return __riscv_vset_v_i16m1_i16m1x6(dest, 0, val); +vint16m1x6_t test_vset_v_i16m1_i16m1x6(vint16m1x6_t dest, size_t index, vint16m1_t value) { + return __riscv_vset_v_i16m1_i16m1x6(dest, 0, value); } -vint16m1x7_t test_vset_v_i16m1_i16m1x7(vint16m1x7_t dest, size_t index, vint16m1_t val) { - return __riscv_vset_v_i16m1_i16m1x7(dest, 0, val); +vint16m1x7_t test_vset_v_i16m1_i16m1x7(vint16m1x7_t dest, size_t index, vint16m1_t value) { + return __riscv_vset_v_i16m1_i16m1x7(dest, 0, value); } -vint16m1x8_t test_vset_v_i16m1_i16m1x8(vint16m1x8_t dest, size_t index, vint16m1_t val) { - return __riscv_vset_v_i16m1_i16m1x8(dest, 0, val); +vint16m1x8_t test_vset_v_i16m1_i16m1x8(vint16m1x8_t dest, size_t index, vint16m1_t value) { + return __riscv_vset_v_i16m1_i16m1x8(dest, 0, value); } -vint16m2x2_t test_vset_v_i16m2_i16m2x2(vint16m2x2_t dest, size_t index, vint16m2_t val) { - return __riscv_vset_v_i16m2_i16m2x2(dest, 0, val); +vint16m2x2_t test_vset_v_i16m2_i16m2x2(vint16m2x2_t dest, size_t index, vint16m2_t value) { + return __riscv_vset_v_i16m2_i16m2x2(dest, 0, value); } -vint16m2x3_t test_vset_v_i16m2_i16m2x3(vint16m2x3_t dest, size_t index, vint16m2_t val) { - return __riscv_vset_v_i16m2_i16m2x3(dest, 0, val); +vint16m2x3_t test_vset_v_i16m2_i16m2x3(vint16m2x3_t dest, size_t index, vint16m2_t value) { + return __riscv_vset_v_i16m2_i16m2x3(dest, 0, value); } -vint16m2x4_t test_vset_v_i16m2_i16m2x4(vint16m2x4_t dest, size_t index, vint16m2_t val) { - return __riscv_vset_v_i16m2_i16m2x4(dest, 0, val); +vint16m2x4_t test_vset_v_i16m2_i16m2x4(vint16m2x4_t dest, size_t index, vint16m2_t value) { + return __riscv_vset_v_i16m2_i16m2x4(dest, 0, value); } -vint16m4x2_t test_vset_v_i16m4_i16m4x2(vint16m4x2_t dest, size_t index, vint16m4_t val) { - return __riscv_vset_v_i16m4_i16m4x2(dest, 0, val); +vint16m4x2_t test_vset_v_i16m4_i16m4x2(vint16m4x2_t dest, size_t index, vint16m4_t value) { + return __riscv_vset_v_i16m4_i16m4x2(dest, 0, value); } -vint32mf2x2_t test_vset_v_i32mf2_i32mf2x2(vint32mf2x2_t dest, size_t index, vint32mf2_t val) { - return __riscv_vset_v_i32mf2_i32mf2x2(dest, 0, val); +vint32mf2x2_t test_vset_v_i32mf2_i32mf2x2(vint32mf2x2_t dest, size_t index, vint32mf2_t value) { + return __riscv_vset_v_i32mf2_i32mf2x2(dest, 0, value); } -vint32mf2x3_t test_vset_v_i32mf2_i32mf2x3(vint32mf2x3_t dest, size_t index, vint32mf2_t val) { - return __riscv_vset_v_i32mf2_i32mf2x3(dest, 0, val); +vint32mf2x3_t test_vset_v_i32mf2_i32mf2x3(vint32mf2x3_t dest, size_t index, vint32mf2_t value) { + return __riscv_vset_v_i32mf2_i32mf2x3(dest, 0, value); } -vint32mf2x4_t test_vset_v_i32mf2_i32mf2x4(vint32mf2x4_t dest, size_t index, vint32mf2_t val) { - return __riscv_vset_v_i32mf2_i32mf2x4(dest, 0, val); +vint32mf2x4_t test_vset_v_i32mf2_i32mf2x4(vint32mf2x4_t dest, size_t index, vint32mf2_t value) { + return __riscv_vset_v_i32mf2_i32mf2x4(dest, 0, value); } -vint32mf2x5_t test_vset_v_i32mf2_i32mf2x5(vint32mf2x5_t dest, size_t index, vint32mf2_t val) { - return __riscv_vset_v_i32mf2_i32mf2x5(dest, 0, val); +vint32mf2x5_t test_vset_v_i32mf2_i32mf2x5(vint32mf2x5_t dest, size_t index, vint32mf2_t value) { + return __riscv_vset_v_i32mf2_i32mf2x5(dest, 0, value); } -vint32mf2x6_t test_vset_v_i32mf2_i32mf2x6(vint32mf2x6_t dest, size_t index, vint32mf2_t val) { - return __riscv_vset_v_i32mf2_i32mf2x6(dest, 0, val); +vint32mf2x6_t test_vset_v_i32mf2_i32mf2x6(vint32mf2x6_t dest, size_t index, vint32mf2_t value) { + return __riscv_vset_v_i32mf2_i32mf2x6(dest, 0, value); } -vint32mf2x7_t test_vset_v_i32mf2_i32mf2x7(vint32mf2x7_t dest, size_t index, vint32mf2_t val) { - return __riscv_vset_v_i32mf2_i32mf2x7(dest, 0, val); +vint32mf2x7_t test_vset_v_i32mf2_i32mf2x7(vint32mf2x7_t dest, size_t index, vint32mf2_t value) { + return __riscv_vset_v_i32mf2_i32mf2x7(dest, 0, value); } -vint32mf2x8_t test_vset_v_i32mf2_i32mf2x8(vint32mf2x8_t dest, size_t index, vint32mf2_t val) { - return __riscv_vset_v_i32mf2_i32mf2x8(dest, 0, val); +vint32mf2x8_t test_vset_v_i32mf2_i32mf2x8(vint32mf2x8_t dest, size_t index, vint32mf2_t value) { + return __riscv_vset_v_i32mf2_i32mf2x8(dest, 0, value); } -vint32m1x2_t test_vset_v_i32m1_i32m1x2(vint32m1x2_t dest, size_t index, vint32m1_t val) { - return __riscv_vset_v_i32m1_i32m1x2(dest, 0, val); +vint32m1x2_t test_vset_v_i32m1_i32m1x2(vint32m1x2_t dest, size_t index, vint32m1_t value) { + return __riscv_vset_v_i32m1_i32m1x2(dest, 0, value); } -vint32m1x3_t test_vset_v_i32m1_i32m1x3(vint32m1x3_t dest, size_t index, vint32m1_t val) { - return __riscv_vset_v_i32m1_i32m1x3(dest, 0, val); +vint32m1x3_t test_vset_v_i32m1_i32m1x3(vint32m1x3_t dest, size_t index, vint32m1_t value) { + return __riscv_vset_v_i32m1_i32m1x3(dest, 0, value); } -vint32m1x4_t test_vset_v_i32m1_i32m1x4(vint32m1x4_t dest, size_t index, vint32m1_t val) { - return __riscv_vset_v_i32m1_i32m1x4(dest, 0, val); +vint32m1x4_t test_vset_v_i32m1_i32m1x4(vint32m1x4_t dest, size_t index, vint32m1_t value) { + return __riscv_vset_v_i32m1_i32m1x4(dest, 0, value); } -vint32m1x5_t test_vset_v_i32m1_i32m1x5(vint32m1x5_t dest, size_t index, vint32m1_t val) { - return __riscv_vset_v_i32m1_i32m1x5(dest, 0, val); +vint32m1x5_t test_vset_v_i32m1_i32m1x5(vint32m1x5_t dest, size_t index, vint32m1_t value) { + return __riscv_vset_v_i32m1_i32m1x5(dest, 0, value); } -vint32m1x6_t test_vset_v_i32m1_i32m1x6(vint32m1x6_t dest, size_t index, vint32m1_t val) { - return __riscv_vset_v_i32m1_i32m1x6(dest, 0, val); +vint32m1x6_t test_vset_v_i32m1_i32m1x6(vint32m1x6_t dest, size_t index, vint32m1_t value) { + return __riscv_vset_v_i32m1_i32m1x6(dest, 0, value); } -vint32m1x7_t test_vset_v_i32m1_i32m1x7(vint32m1x7_t dest, size_t index, vint32m1_t val) { - return __riscv_vset_v_i32m1_i32m1x7(dest, 0, val); +vint32m1x7_t test_vset_v_i32m1_i32m1x7(vint32m1x7_t dest, size_t index, vint32m1_t value) { + return __riscv_vset_v_i32m1_i32m1x7(dest, 0, value); } -vint32m1x8_t test_vset_v_i32m1_i32m1x8(vint32m1x8_t dest, size_t index, vint32m1_t val) { - return __riscv_vset_v_i32m1_i32m1x8(dest, 0, val); +vint32m1x8_t test_vset_v_i32m1_i32m1x8(vint32m1x8_t dest, size_t index, vint32m1_t value) { + return __riscv_vset_v_i32m1_i32m1x8(dest, 0, value); } -vint32m2x2_t test_vset_v_i32m2_i32m2x2(vint32m2x2_t dest, size_t index, vint32m2_t val) { - return __riscv_vset_v_i32m2_i32m2x2(dest, 0, val); +vint32m2x2_t test_vset_v_i32m2_i32m2x2(vint32m2x2_t dest, size_t index, vint32m2_t value) { + return __riscv_vset_v_i32m2_i32m2x2(dest, 0, value); } -vint32m2x3_t test_vset_v_i32m2_i32m2x3(vint32m2x3_t dest, size_t index, vint32m2_t val) { - return __riscv_vset_v_i32m2_i32m2x3(dest, 0, val); +vint32m2x3_t test_vset_v_i32m2_i32m2x3(vint32m2x3_t dest, size_t index, vint32m2_t value) { + return __riscv_vset_v_i32m2_i32m2x3(dest, 0, value); } -vint32m2x4_t test_vset_v_i32m2_i32m2x4(vint32m2x4_t dest, size_t index, vint32m2_t val) { - return __riscv_vset_v_i32m2_i32m2x4(dest, 0, val); +vint32m2x4_t test_vset_v_i32m2_i32m2x4(vint32m2x4_t dest, size_t index, vint32m2_t value) { + return __riscv_vset_v_i32m2_i32m2x4(dest, 0, value); } -vint32m4x2_t test_vset_v_i32m4_i32m4x2(vint32m4x2_t dest, size_t index, vint32m4_t val) { - return __riscv_vset_v_i32m4_i32m4x2(dest, 0, val); +vint32m4x2_t test_vset_v_i32m4_i32m4x2(vint32m4x2_t dest, size_t index, vint32m4_t value) { + return __riscv_vset_v_i32m4_i32m4x2(dest, 0, value); } -vint64m1x2_t test_vset_v_i64m1_i64m1x2(vint64m1x2_t dest, size_t index, vint64m1_t val) { - return __riscv_vset_v_i64m1_i64m1x2(dest, 0, val); +vint64m1x2_t test_vset_v_i64m1_i64m1x2(vint64m1x2_t dest, size_t index, vint64m1_t value) { + return __riscv_vset_v_i64m1_i64m1x2(dest, 0, value); } -vint64m1x3_t test_vset_v_i64m1_i64m1x3(vint64m1x3_t dest, size_t index, vint64m1_t val) { - return __riscv_vset_v_i64m1_i64m1x3(dest, 0, val); +vint64m1x3_t test_vset_v_i64m1_i64m1x3(vint64m1x3_t dest, size_t index, vint64m1_t value) { + return __riscv_vset_v_i64m1_i64m1x3(dest, 0, value); } -vint64m1x4_t test_vset_v_i64m1_i64m1x4(vint64m1x4_t dest, size_t index, vint64m1_t val) { - return __riscv_vset_v_i64m1_i64m1x4(dest, 0, val); +vint64m1x4_t test_vset_v_i64m1_i64m1x4(vint64m1x4_t dest, size_t index, vint64m1_t value) { + return __riscv_vset_v_i64m1_i64m1x4(dest, 0, value); } -vint64m1x5_t test_vset_v_i64m1_i64m1x5(vint64m1x5_t dest, size_t index, vint64m1_t val) { - return __riscv_vset_v_i64m1_i64m1x5(dest, 0, val); +vint64m1x5_t test_vset_v_i64m1_i64m1x5(vint64m1x5_t dest, size_t index, vint64m1_t value) { + return __riscv_vset_v_i64m1_i64m1x5(dest, 0, value); } -vint64m1x6_t test_vset_v_i64m1_i64m1x6(vint64m1x6_t dest, size_t index, vint64m1_t val) { - return __riscv_vset_v_i64m1_i64m1x6(dest, 0, val); +vint64m1x6_t test_vset_v_i64m1_i64m1x6(vint64m1x6_t dest, size_t index, vint64m1_t value) { + return __riscv_vset_v_i64m1_i64m1x6(dest, 0, value); } -vint64m1x7_t test_vset_v_i64m1_i64m1x7(vint64m1x7_t dest, size_t index, vint64m1_t val) { - return __riscv_vset_v_i64m1_i64m1x7(dest, 0, val); +vint64m1x7_t test_vset_v_i64m1_i64m1x7(vint64m1x7_t dest, size_t index, vint64m1_t value) { + return __riscv_vset_v_i64m1_i64m1x7(dest, 0, value); } -vint64m1x8_t test_vset_v_i64m1_i64m1x8(vint64m1x8_t dest, size_t index, vint64m1_t val) { - return __riscv_vset_v_i64m1_i64m1x8(dest, 0, val); +vint64m1x8_t test_vset_v_i64m1_i64m1x8(vint64m1x8_t dest, size_t index, vint64m1_t value) { + return __riscv_vset_v_i64m1_i64m1x8(dest, 0, value); } -vint64m2x2_t test_vset_v_i64m2_i64m2x2(vint64m2x2_t dest, size_t index, vint64m2_t val) { - return __riscv_vset_v_i64m2_i64m2x2(dest, 0, val); +vint64m2x2_t test_vset_v_i64m2_i64m2x2(vint64m2x2_t dest, size_t index, vint64m2_t value) { + return __riscv_vset_v_i64m2_i64m2x2(dest, 0, value); } -vint64m2x3_t test_vset_v_i64m2_i64m2x3(vint64m2x3_t dest, size_t index, vint64m2_t val) { - return __riscv_vset_v_i64m2_i64m2x3(dest, 0, val); +vint64m2x3_t test_vset_v_i64m2_i64m2x3(vint64m2x3_t dest, size_t index, vint64m2_t value) { + return __riscv_vset_v_i64m2_i64m2x3(dest, 0, value); } -vint64m2x4_t test_vset_v_i64m2_i64m2x4(vint64m2x4_t dest, size_t index, vint64m2_t val) { - return __riscv_vset_v_i64m2_i64m2x4(dest, 0, val); +vint64m2x4_t test_vset_v_i64m2_i64m2x4(vint64m2x4_t dest, size_t index, vint64m2_t value) { + return __riscv_vset_v_i64m2_i64m2x4(dest, 0, value); } -vint64m4x2_t test_vset_v_i64m4_i64m4x2(vint64m4x2_t dest, size_t index, vint64m4_t val) { - return __riscv_vset_v_i64m4_i64m4x2(dest, 0, val); +vint64m4x2_t test_vset_v_i64m4_i64m4x2(vint64m4x2_t dest, size_t index, vint64m4_t value) { + return __riscv_vset_v_i64m4_i64m4x2(dest, 0, value); } -vuint8mf8x2_t test_vset_v_u8mf8_u8mf8x2(vuint8mf8x2_t dest, size_t index, vuint8mf8_t val) { - return __riscv_vset_v_u8mf8_u8mf8x2(dest, 0, val); +vuint8mf8x2_t test_vset_v_u8mf8_u8mf8x2(vuint8mf8x2_t dest, size_t index, vuint8mf8_t value) { + return __riscv_vset_v_u8mf8_u8mf8x2(dest, 0, value); } -vuint8mf8x3_t test_vset_v_u8mf8_u8mf8x3(vuint8mf8x3_t dest, size_t index, vuint8mf8_t val) { - return __riscv_vset_v_u8mf8_u8mf8x3(dest, 0, val); +vuint8mf8x3_t test_vset_v_u8mf8_u8mf8x3(vuint8mf8x3_t dest, size_t index, vuint8mf8_t value) { + return __riscv_vset_v_u8mf8_u8mf8x3(dest, 0, value); } -vuint8mf8x4_t test_vset_v_u8mf8_u8mf8x4(vuint8mf8x4_t dest, size_t index, vuint8mf8_t val) { - return __riscv_vset_v_u8mf8_u8mf8x4(dest, 0, val); +vuint8mf8x4_t test_vset_v_u8mf8_u8mf8x4(vuint8mf8x4_t dest, size_t index, vuint8mf8_t value) { + return __riscv_vset_v_u8mf8_u8mf8x4(dest, 0, value); } -vuint8mf8x5_t test_vset_v_u8mf8_u8mf8x5(vuint8mf8x5_t dest, size_t index, vuint8mf8_t val) { - return __riscv_vset_v_u8mf8_u8mf8x5(dest, 0, val); +vuint8mf8x5_t test_vset_v_u8mf8_u8mf8x5(vuint8mf8x5_t dest, size_t index, vuint8mf8_t value) { + return __riscv_vset_v_u8mf8_u8mf8x5(dest, 0, value); } -vuint8mf8x6_t test_vset_v_u8mf8_u8mf8x6(vuint8mf8x6_t dest, size_t index, vuint8mf8_t val) { - return __riscv_vset_v_u8mf8_u8mf8x6(dest, 0, val); +vuint8mf8x6_t test_vset_v_u8mf8_u8mf8x6(vuint8mf8x6_t dest, size_t index, vuint8mf8_t value) { + return __riscv_vset_v_u8mf8_u8mf8x6(dest, 0, value); } -vuint8mf8x7_t test_vset_v_u8mf8_u8mf8x7(vuint8mf8x7_t dest, size_t index, vuint8mf8_t val) { - return __riscv_vset_v_u8mf8_u8mf8x7(dest, 0, val); +vuint8mf8x7_t test_vset_v_u8mf8_u8mf8x7(vuint8mf8x7_t dest, size_t index, vuint8mf8_t value) { + return __riscv_vset_v_u8mf8_u8mf8x7(dest, 0, value); } -vuint8mf8x8_t test_vset_v_u8mf8_u8mf8x8(vuint8mf8x8_t dest, size_t index, vuint8mf8_t val) { - return __riscv_vset_v_u8mf8_u8mf8x8(dest, 0, val); +vuint8mf8x8_t test_vset_v_u8mf8_u8mf8x8(vuint8mf8x8_t dest, size_t index, vuint8mf8_t value) { + return __riscv_vset_v_u8mf8_u8mf8x8(dest, 0, value); } -vuint8mf4x2_t test_vset_v_u8mf4_u8mf4x2(vuint8mf4x2_t dest, size_t index, vuint8mf4_t val) { - return __riscv_vset_v_u8mf4_u8mf4x2(dest, 0, val); +vuint8mf4x2_t test_vset_v_u8mf4_u8mf4x2(vuint8mf4x2_t dest, size_t index, vuint8mf4_t value) { + return __riscv_vset_v_u8mf4_u8mf4x2(dest, 0, value); } -vuint8mf4x3_t test_vset_v_u8mf4_u8mf4x3(vuint8mf4x3_t dest, size_t index, vuint8mf4_t val) { - return __riscv_vset_v_u8mf4_u8mf4x3(dest, 0, val); +vuint8mf4x3_t test_vset_v_u8mf4_u8mf4x3(vuint8mf4x3_t dest, size_t index, vuint8mf4_t value) { + return __riscv_vset_v_u8mf4_u8mf4x3(dest, 0, value); } -vuint8mf4x4_t test_vset_v_u8mf4_u8mf4x4(vuint8mf4x4_t dest, size_t index, vuint8mf4_t val) { - return __riscv_vset_v_u8mf4_u8mf4x4(dest, 0, val); +vuint8mf4x4_t test_vset_v_u8mf4_u8mf4x4(vuint8mf4x4_t dest, size_t index, vuint8mf4_t value) { + return __riscv_vset_v_u8mf4_u8mf4x4(dest, 0, value); } -vuint8mf4x5_t test_vset_v_u8mf4_u8mf4x5(vuint8mf4x5_t dest, size_t index, vuint8mf4_t val) { - return __riscv_vset_v_u8mf4_u8mf4x5(dest, 0, val); +vuint8mf4x5_t test_vset_v_u8mf4_u8mf4x5(vuint8mf4x5_t dest, size_t index, vuint8mf4_t value) { + return __riscv_vset_v_u8mf4_u8mf4x5(dest, 0, value); } -vuint8mf4x6_t test_vset_v_u8mf4_u8mf4x6(vuint8mf4x6_t dest, size_t index, vuint8mf4_t val) { - return __riscv_vset_v_u8mf4_u8mf4x6(dest, 0, val); +vuint8mf4x6_t test_vset_v_u8mf4_u8mf4x6(vuint8mf4x6_t dest, size_t index, vuint8mf4_t value) { + return __riscv_vset_v_u8mf4_u8mf4x6(dest, 0, value); } -vuint8mf4x7_t test_vset_v_u8mf4_u8mf4x7(vuint8mf4x7_t dest, size_t index, vuint8mf4_t val) { - return __riscv_vset_v_u8mf4_u8mf4x7(dest, 0, val); +vuint8mf4x7_t test_vset_v_u8mf4_u8mf4x7(vuint8mf4x7_t dest, size_t index, vuint8mf4_t value) { + return __riscv_vset_v_u8mf4_u8mf4x7(dest, 0, value); } -vuint8mf4x8_t test_vset_v_u8mf4_u8mf4x8(vuint8mf4x8_t dest, size_t index, vuint8mf4_t val) { - return __riscv_vset_v_u8mf4_u8mf4x8(dest, 0, val); +vuint8mf4x8_t test_vset_v_u8mf4_u8mf4x8(vuint8mf4x8_t dest, size_t index, vuint8mf4_t value) { + return __riscv_vset_v_u8mf4_u8mf4x8(dest, 0, value); } -vuint8mf2x2_t test_vset_v_u8mf2_u8mf2x2(vuint8mf2x2_t dest, size_t index, vuint8mf2_t val) { - return __riscv_vset_v_u8mf2_u8mf2x2(dest, 0, val); +vuint8mf2x2_t test_vset_v_u8mf2_u8mf2x2(vuint8mf2x2_t dest, size_t index, vuint8mf2_t value) { + return __riscv_vset_v_u8mf2_u8mf2x2(dest, 0, value); } -vuint8mf2x3_t test_vset_v_u8mf2_u8mf2x3(vuint8mf2x3_t dest, size_t index, vuint8mf2_t val) { - return __riscv_vset_v_u8mf2_u8mf2x3(dest, 0, val); +vuint8mf2x3_t test_vset_v_u8mf2_u8mf2x3(vuint8mf2x3_t dest, size_t index, vuint8mf2_t value) { + return __riscv_vset_v_u8mf2_u8mf2x3(dest, 0, value); } -vuint8mf2x4_t test_vset_v_u8mf2_u8mf2x4(vuint8mf2x4_t dest, size_t index, vuint8mf2_t val) { - return __riscv_vset_v_u8mf2_u8mf2x4(dest, 0, val); +vuint8mf2x4_t test_vset_v_u8mf2_u8mf2x4(vuint8mf2x4_t dest, size_t index, vuint8mf2_t value) { + return __riscv_vset_v_u8mf2_u8mf2x4(dest, 0, value); } -vuint8mf2x5_t test_vset_v_u8mf2_u8mf2x5(vuint8mf2x5_t dest, size_t index, vuint8mf2_t val) { - return __riscv_vset_v_u8mf2_u8mf2x5(dest, 0, val); +vuint8mf2x5_t test_vset_v_u8mf2_u8mf2x5(vuint8mf2x5_t dest, size_t index, vuint8mf2_t value) { + return __riscv_vset_v_u8mf2_u8mf2x5(dest, 0, value); } -vuint8mf2x6_t test_vset_v_u8mf2_u8mf2x6(vuint8mf2x6_t dest, size_t index, vuint8mf2_t val) { - return __riscv_vset_v_u8mf2_u8mf2x6(dest, 0, val); +vuint8mf2x6_t test_vset_v_u8mf2_u8mf2x6(vuint8mf2x6_t dest, size_t index, vuint8mf2_t value) { + return __riscv_vset_v_u8mf2_u8mf2x6(dest, 0, value); } -vuint8mf2x7_t test_vset_v_u8mf2_u8mf2x7(vuint8mf2x7_t dest, size_t index, vuint8mf2_t val) { - return __riscv_vset_v_u8mf2_u8mf2x7(dest, 0, val); +vuint8mf2x7_t test_vset_v_u8mf2_u8mf2x7(vuint8mf2x7_t dest, size_t index, vuint8mf2_t value) { + return __riscv_vset_v_u8mf2_u8mf2x7(dest, 0, value); } -vuint8mf2x8_t test_vset_v_u8mf2_u8mf2x8(vuint8mf2x8_t dest, size_t index, vuint8mf2_t val) { - return __riscv_vset_v_u8mf2_u8mf2x8(dest, 0, val); +vuint8mf2x8_t test_vset_v_u8mf2_u8mf2x8(vuint8mf2x8_t dest, size_t index, vuint8mf2_t value) { + return __riscv_vset_v_u8mf2_u8mf2x8(dest, 0, value); } -vuint8m1x2_t test_vset_v_u8m1_u8m1x2(vuint8m1x2_t dest, size_t index, vuint8m1_t val) { - return __riscv_vset_v_u8m1_u8m1x2(dest, 0, val); +vuint8m1x2_t test_vset_v_u8m1_u8m1x2(vuint8m1x2_t dest, size_t index, vuint8m1_t value) { + return __riscv_vset_v_u8m1_u8m1x2(dest, 0, value); } -vuint8m1x3_t test_vset_v_u8m1_u8m1x3(vuint8m1x3_t dest, size_t index, vuint8m1_t val) { - return __riscv_vset_v_u8m1_u8m1x3(dest, 0, val); +vuint8m1x3_t test_vset_v_u8m1_u8m1x3(vuint8m1x3_t dest, size_t index, vuint8m1_t value) { + return __riscv_vset_v_u8m1_u8m1x3(dest, 0, value); } -vuint8m1x4_t test_vset_v_u8m1_u8m1x4(vuint8m1x4_t dest, size_t index, vuint8m1_t val) { - return __riscv_vset_v_u8m1_u8m1x4(dest, 0, val); +vuint8m1x4_t test_vset_v_u8m1_u8m1x4(vuint8m1x4_t dest, size_t index, vuint8m1_t value) { + return __riscv_vset_v_u8m1_u8m1x4(dest, 0, value); } -vuint8m1x5_t test_vset_v_u8m1_u8m1x5(vuint8m1x5_t dest, size_t index, vuint8m1_t val) { - return __riscv_vset_v_u8m1_u8m1x5(dest, 0, val); +vuint8m1x5_t test_vset_v_u8m1_u8m1x5(vuint8m1x5_t dest, size_t index, vuint8m1_t value) { + return __riscv_vset_v_u8m1_u8m1x5(dest, 0, value); } -vuint8m1x6_t test_vset_v_u8m1_u8m1x6(vuint8m1x6_t dest, size_t index, vuint8m1_t val) { - return __riscv_vset_v_u8m1_u8m1x6(dest, 0, val); +vuint8m1x6_t test_vset_v_u8m1_u8m1x6(vuint8m1x6_t dest, size_t index, vuint8m1_t value) { + return __riscv_vset_v_u8m1_u8m1x6(dest, 0, value); } -vuint8m1x7_t test_vset_v_u8m1_u8m1x7(vuint8m1x7_t dest, size_t index, vuint8m1_t val) { - return __riscv_vset_v_u8m1_u8m1x7(dest, 0, val); +vuint8m1x7_t test_vset_v_u8m1_u8m1x7(vuint8m1x7_t dest, size_t index, vuint8m1_t value) { + return __riscv_vset_v_u8m1_u8m1x7(dest, 0, value); } -vuint8m1x8_t test_vset_v_u8m1_u8m1x8(vuint8m1x8_t dest, size_t index, vuint8m1_t val) { - return __riscv_vset_v_u8m1_u8m1x8(dest, 0, val); +vuint8m1x8_t test_vset_v_u8m1_u8m1x8(vuint8m1x8_t dest, size_t index, vuint8m1_t value) { + return __riscv_vset_v_u8m1_u8m1x8(dest, 0, value); } -vuint8m2x2_t test_vset_v_u8m2_u8m2x2(vuint8m2x2_t dest, size_t index, vuint8m2_t val) { - return __riscv_vset_v_u8m2_u8m2x2(dest, 0, val); +vuint8m2x2_t test_vset_v_u8m2_u8m2x2(vuint8m2x2_t dest, size_t index, vuint8m2_t value) { + return __riscv_vset_v_u8m2_u8m2x2(dest, 0, value); } -vuint8m2x3_t test_vset_v_u8m2_u8m2x3(vuint8m2x3_t dest, size_t index, vuint8m2_t val) { - return __riscv_vset_v_u8m2_u8m2x3(dest, 0, val); +vuint8m2x3_t test_vset_v_u8m2_u8m2x3(vuint8m2x3_t dest, size_t index, vuint8m2_t value) { + return __riscv_vset_v_u8m2_u8m2x3(dest, 0, value); } -vuint8m2x4_t test_vset_v_u8m2_u8m2x4(vuint8m2x4_t dest, size_t index, vuint8m2_t val) { - return __riscv_vset_v_u8m2_u8m2x4(dest, 0, val); +vuint8m2x4_t test_vset_v_u8m2_u8m2x4(vuint8m2x4_t dest, size_t index, vuint8m2_t value) { + return __riscv_vset_v_u8m2_u8m2x4(dest, 0, value); } -vuint8m4x2_t test_vset_v_u8m4_u8m4x2(vuint8m4x2_t dest, size_t index, vuint8m4_t val) { - return __riscv_vset_v_u8m4_u8m4x2(dest, 0, val); +vuint8m4x2_t test_vset_v_u8m4_u8m4x2(vuint8m4x2_t dest, size_t index, vuint8m4_t value) { + return __riscv_vset_v_u8m4_u8m4x2(dest, 0, value); } -vuint16mf4x2_t test_vset_v_u16mf4_u16mf4x2(vuint16mf4x2_t dest, size_t index, vuint16mf4_t val) { - return __riscv_vset_v_u16mf4_u16mf4x2(dest, 0, val); +vuint16mf4x2_t test_vset_v_u16mf4_u16mf4x2(vuint16mf4x2_t dest, size_t index, vuint16mf4_t value) { + return __riscv_vset_v_u16mf4_u16mf4x2(dest, 0, value); } -vuint16mf4x3_t test_vset_v_u16mf4_u16mf4x3(vuint16mf4x3_t dest, size_t index, vuint16mf4_t val) { - return __riscv_vset_v_u16mf4_u16mf4x3(dest, 0, val); +vuint16mf4x3_t test_vset_v_u16mf4_u16mf4x3(vuint16mf4x3_t dest, size_t index, vuint16mf4_t value) { + return __riscv_vset_v_u16mf4_u16mf4x3(dest, 0, value); } -vuint16mf4x4_t test_vset_v_u16mf4_u16mf4x4(vuint16mf4x4_t dest, size_t index, vuint16mf4_t val) { - return __riscv_vset_v_u16mf4_u16mf4x4(dest, 0, val); +vuint16mf4x4_t test_vset_v_u16mf4_u16mf4x4(vuint16mf4x4_t dest, size_t index, vuint16mf4_t value) { + return __riscv_vset_v_u16mf4_u16mf4x4(dest, 0, value); } -vuint16mf4x5_t test_vset_v_u16mf4_u16mf4x5(vuint16mf4x5_t dest, size_t index, vuint16mf4_t val) { - return __riscv_vset_v_u16mf4_u16mf4x5(dest, 0, val); +vuint16mf4x5_t test_vset_v_u16mf4_u16mf4x5(vuint16mf4x5_t dest, size_t index, vuint16mf4_t value) { + return __riscv_vset_v_u16mf4_u16mf4x5(dest, 0, value); } -vuint16mf4x6_t test_vset_v_u16mf4_u16mf4x6(vuint16mf4x6_t dest, size_t index, vuint16mf4_t val) { - return __riscv_vset_v_u16mf4_u16mf4x6(dest, 0, val); +vuint16mf4x6_t test_vset_v_u16mf4_u16mf4x6(vuint16mf4x6_t dest, size_t index, vuint16mf4_t value) { + return __riscv_vset_v_u16mf4_u16mf4x6(dest, 0, value); } -vuint16mf4x7_t test_vset_v_u16mf4_u16mf4x7(vuint16mf4x7_t dest, size_t index, vuint16mf4_t val) { - return __riscv_vset_v_u16mf4_u16mf4x7(dest, 0, val); +vuint16mf4x7_t test_vset_v_u16mf4_u16mf4x7(vuint16mf4x7_t dest, size_t index, vuint16mf4_t value) { + return __riscv_vset_v_u16mf4_u16mf4x7(dest, 0, value); } -vuint16mf4x8_t test_vset_v_u16mf4_u16mf4x8(vuint16mf4x8_t dest, size_t index, vuint16mf4_t val) { - return __riscv_vset_v_u16mf4_u16mf4x8(dest, 0, val); +vuint16mf4x8_t test_vset_v_u16mf4_u16mf4x8(vuint16mf4x8_t dest, size_t index, vuint16mf4_t value) { + return __riscv_vset_v_u16mf4_u16mf4x8(dest, 0, value); } -vuint16mf2x2_t test_vset_v_u16mf2_u16mf2x2(vuint16mf2x2_t dest, size_t index, vuint16mf2_t val) { - return __riscv_vset_v_u16mf2_u16mf2x2(dest, 0, val); +vuint16mf2x2_t test_vset_v_u16mf2_u16mf2x2(vuint16mf2x2_t dest, size_t index, vuint16mf2_t value) { + return __riscv_vset_v_u16mf2_u16mf2x2(dest, 0, value); } -vuint16mf2x3_t test_vset_v_u16mf2_u16mf2x3(vuint16mf2x3_t dest, size_t index, vuint16mf2_t val) { - return __riscv_vset_v_u16mf2_u16mf2x3(dest, 0, val); +vuint16mf2x3_t test_vset_v_u16mf2_u16mf2x3(vuint16mf2x3_t dest, size_t index, vuint16mf2_t value) { + return __riscv_vset_v_u16mf2_u16mf2x3(dest, 0, value); } -vuint16mf2x4_t test_vset_v_u16mf2_u16mf2x4(vuint16mf2x4_t dest, size_t index, vuint16mf2_t val) { - return __riscv_vset_v_u16mf2_u16mf2x4(dest, 0, val); +vuint16mf2x4_t test_vset_v_u16mf2_u16mf2x4(vuint16mf2x4_t dest, size_t index, vuint16mf2_t value) { + return __riscv_vset_v_u16mf2_u16mf2x4(dest, 0, value); } -vuint16mf2x5_t test_vset_v_u16mf2_u16mf2x5(vuint16mf2x5_t dest, size_t index, vuint16mf2_t val) { - return __riscv_vset_v_u16mf2_u16mf2x5(dest, 0, val); +vuint16mf2x5_t test_vset_v_u16mf2_u16mf2x5(vuint16mf2x5_t dest, size_t index, vuint16mf2_t value) { + return __riscv_vset_v_u16mf2_u16mf2x5(dest, 0, value); } -vuint16mf2x6_t test_vset_v_u16mf2_u16mf2x6(vuint16mf2x6_t dest, size_t index, vuint16mf2_t val) { - return __riscv_vset_v_u16mf2_u16mf2x6(dest, 0, val); +vuint16mf2x6_t test_vset_v_u16mf2_u16mf2x6(vuint16mf2x6_t dest, size_t index, vuint16mf2_t value) { + return __riscv_vset_v_u16mf2_u16mf2x6(dest, 0, value); } -vuint16mf2x7_t test_vset_v_u16mf2_u16mf2x7(vuint16mf2x7_t dest, size_t index, vuint16mf2_t val) { - return __riscv_vset_v_u16mf2_u16mf2x7(dest, 0, val); +vuint16mf2x7_t test_vset_v_u16mf2_u16mf2x7(vuint16mf2x7_t dest, size_t index, vuint16mf2_t value) { + return __riscv_vset_v_u16mf2_u16mf2x7(dest, 0, value); } -vuint16mf2x8_t test_vset_v_u16mf2_u16mf2x8(vuint16mf2x8_t dest, size_t index, vuint16mf2_t val) { - return __riscv_vset_v_u16mf2_u16mf2x8(dest, 0, val); +vuint16mf2x8_t test_vset_v_u16mf2_u16mf2x8(vuint16mf2x8_t dest, size_t index, vuint16mf2_t value) { + return __riscv_vset_v_u16mf2_u16mf2x8(dest, 0, value); } -vuint16m1x2_t test_vset_v_u16m1_u16m1x2(vuint16m1x2_t dest, size_t index, vuint16m1_t val) { - return __riscv_vset_v_u16m1_u16m1x2(dest, 0, val); +vuint16m1x2_t test_vset_v_u16m1_u16m1x2(vuint16m1x2_t dest, size_t index, vuint16m1_t value) { + return __riscv_vset_v_u16m1_u16m1x2(dest, 0, value); } -vuint16m1x3_t test_vset_v_u16m1_u16m1x3(vuint16m1x3_t dest, size_t index, vuint16m1_t val) { - return __riscv_vset_v_u16m1_u16m1x3(dest, 0, val); +vuint16m1x3_t test_vset_v_u16m1_u16m1x3(vuint16m1x3_t dest, size_t index, vuint16m1_t value) { + return __riscv_vset_v_u16m1_u16m1x3(dest, 0, value); } -vuint16m1x4_t test_vset_v_u16m1_u16m1x4(vuint16m1x4_t dest, size_t index, vuint16m1_t val) { - return __riscv_vset_v_u16m1_u16m1x4(dest, 0, val); +vuint16m1x4_t test_vset_v_u16m1_u16m1x4(vuint16m1x4_t dest, size_t index, vuint16m1_t value) { + return __riscv_vset_v_u16m1_u16m1x4(dest, 0, value); } -vuint16m1x5_t test_vset_v_u16m1_u16m1x5(vuint16m1x5_t dest, size_t index, vuint16m1_t val) { - return __riscv_vset_v_u16m1_u16m1x5(dest, 0, val); +vuint16m1x5_t test_vset_v_u16m1_u16m1x5(vuint16m1x5_t dest, size_t index, vuint16m1_t value) { + return __riscv_vset_v_u16m1_u16m1x5(dest, 0, value); } -vuint16m1x6_t test_vset_v_u16m1_u16m1x6(vuint16m1x6_t dest, size_t index, vuint16m1_t val) { - return __riscv_vset_v_u16m1_u16m1x6(dest, 0, val); +vuint16m1x6_t test_vset_v_u16m1_u16m1x6(vuint16m1x6_t dest, size_t index, vuint16m1_t value) { + return __riscv_vset_v_u16m1_u16m1x6(dest, 0, value); } -vuint16m1x7_t test_vset_v_u16m1_u16m1x7(vuint16m1x7_t dest, size_t index, vuint16m1_t val) { - return __riscv_vset_v_u16m1_u16m1x7(dest, 0, val); +vuint16m1x7_t test_vset_v_u16m1_u16m1x7(vuint16m1x7_t dest, size_t index, vuint16m1_t value) { + return __riscv_vset_v_u16m1_u16m1x7(dest, 0, value); } -vuint16m1x8_t test_vset_v_u16m1_u16m1x8(vuint16m1x8_t dest, size_t index, vuint16m1_t val) { - return __riscv_vset_v_u16m1_u16m1x8(dest, 0, val); +vuint16m1x8_t test_vset_v_u16m1_u16m1x8(vuint16m1x8_t dest, size_t index, vuint16m1_t value) { + return __riscv_vset_v_u16m1_u16m1x8(dest, 0, value); } -vuint16m2x2_t test_vset_v_u16m2_u16m2x2(vuint16m2x2_t dest, size_t index, vuint16m2_t val) { - return __riscv_vset_v_u16m2_u16m2x2(dest, 0, val); +vuint16m2x2_t test_vset_v_u16m2_u16m2x2(vuint16m2x2_t dest, size_t index, vuint16m2_t value) { + return __riscv_vset_v_u16m2_u16m2x2(dest, 0, value); } -vuint16m2x3_t test_vset_v_u16m2_u16m2x3(vuint16m2x3_t dest, size_t index, vuint16m2_t val) { - return __riscv_vset_v_u16m2_u16m2x3(dest, 0, val); +vuint16m2x3_t test_vset_v_u16m2_u16m2x3(vuint16m2x3_t dest, size_t index, vuint16m2_t value) { + return __riscv_vset_v_u16m2_u16m2x3(dest, 0, value); } -vuint16m2x4_t test_vset_v_u16m2_u16m2x4(vuint16m2x4_t dest, size_t index, vuint16m2_t val) { - return __riscv_vset_v_u16m2_u16m2x4(dest, 0, val); +vuint16m2x4_t test_vset_v_u16m2_u16m2x4(vuint16m2x4_t dest, size_t index, vuint16m2_t value) { + return __riscv_vset_v_u16m2_u16m2x4(dest, 0, value); } -vuint16m4x2_t test_vset_v_u16m4_u16m4x2(vuint16m4x2_t dest, size_t index, vuint16m4_t val) { - return __riscv_vset_v_u16m4_u16m4x2(dest, 0, val); +vuint16m4x2_t test_vset_v_u16m4_u16m4x2(vuint16m4x2_t dest, size_t index, vuint16m4_t value) { + return __riscv_vset_v_u16m4_u16m4x2(dest, 0, value); } -vuint32mf2x2_t test_vset_v_u32mf2_u32mf2x2(vuint32mf2x2_t dest, size_t index, vuint32mf2_t val) { - return __riscv_vset_v_u32mf2_u32mf2x2(dest, 0, val); +vuint32mf2x2_t test_vset_v_u32mf2_u32mf2x2(vuint32mf2x2_t dest, size_t index, vuint32mf2_t value) { + return __riscv_vset_v_u32mf2_u32mf2x2(dest, 0, value); } -vuint32mf2x3_t test_vset_v_u32mf2_u32mf2x3(vuint32mf2x3_t dest, size_t index, vuint32mf2_t val) { - return __riscv_vset_v_u32mf2_u32mf2x3(dest, 0, val); +vuint32mf2x3_t test_vset_v_u32mf2_u32mf2x3(vuint32mf2x3_t dest, size_t index, vuint32mf2_t value) { + return __riscv_vset_v_u32mf2_u32mf2x3(dest, 0, value); } -vuint32mf2x4_t test_vset_v_u32mf2_u32mf2x4(vuint32mf2x4_t dest, size_t index, vuint32mf2_t val) { - return __riscv_vset_v_u32mf2_u32mf2x4(dest, 0, val); +vuint32mf2x4_t test_vset_v_u32mf2_u32mf2x4(vuint32mf2x4_t dest, size_t index, vuint32mf2_t value) { + return __riscv_vset_v_u32mf2_u32mf2x4(dest, 0, value); } -vuint32mf2x5_t test_vset_v_u32mf2_u32mf2x5(vuint32mf2x5_t dest, size_t index, vuint32mf2_t val) { - return __riscv_vset_v_u32mf2_u32mf2x5(dest, 0, val); +vuint32mf2x5_t test_vset_v_u32mf2_u32mf2x5(vuint32mf2x5_t dest, size_t index, vuint32mf2_t value) { + return __riscv_vset_v_u32mf2_u32mf2x5(dest, 0, value); } -vuint32mf2x6_t test_vset_v_u32mf2_u32mf2x6(vuint32mf2x6_t dest, size_t index, vuint32mf2_t val) { - return __riscv_vset_v_u32mf2_u32mf2x6(dest, 0, val); +vuint32mf2x6_t test_vset_v_u32mf2_u32mf2x6(vuint32mf2x6_t dest, size_t index, vuint32mf2_t value) { + return __riscv_vset_v_u32mf2_u32mf2x6(dest, 0, value); } -vuint32mf2x7_t test_vset_v_u32mf2_u32mf2x7(vuint32mf2x7_t dest, size_t index, vuint32mf2_t val) { - return __riscv_vset_v_u32mf2_u32mf2x7(dest, 0, val); +vuint32mf2x7_t test_vset_v_u32mf2_u32mf2x7(vuint32mf2x7_t dest, size_t index, vuint32mf2_t value) { + return __riscv_vset_v_u32mf2_u32mf2x7(dest, 0, value); } -vuint32mf2x8_t test_vset_v_u32mf2_u32mf2x8(vuint32mf2x8_t dest, size_t index, vuint32mf2_t val) { - return __riscv_vset_v_u32mf2_u32mf2x8(dest, 0, val); +vuint32mf2x8_t test_vset_v_u32mf2_u32mf2x8(vuint32mf2x8_t dest, size_t index, vuint32mf2_t value) { + return __riscv_vset_v_u32mf2_u32mf2x8(dest, 0, value); } -vuint32m1x2_t test_vset_v_u32m1_u32m1x2(vuint32m1x2_t dest, size_t index, vuint32m1_t val) { - return __riscv_vset_v_u32m1_u32m1x2(dest, 0, val); +vuint32m1x2_t test_vset_v_u32m1_u32m1x2(vuint32m1x2_t dest, size_t index, vuint32m1_t value) { + return __riscv_vset_v_u32m1_u32m1x2(dest, 0, value); } -vuint32m1x3_t test_vset_v_u32m1_u32m1x3(vuint32m1x3_t dest, size_t index, vuint32m1_t val) { - return __riscv_vset_v_u32m1_u32m1x3(dest, 0, val); +vuint32m1x3_t test_vset_v_u32m1_u32m1x3(vuint32m1x3_t dest, size_t index, vuint32m1_t value) { + return __riscv_vset_v_u32m1_u32m1x3(dest, 0, value); } -vuint32m1x4_t test_vset_v_u32m1_u32m1x4(vuint32m1x4_t dest, size_t index, vuint32m1_t val) { - return __riscv_vset_v_u32m1_u32m1x4(dest, 0, val); +vuint32m1x4_t test_vset_v_u32m1_u32m1x4(vuint32m1x4_t dest, size_t index, vuint32m1_t value) { + return __riscv_vset_v_u32m1_u32m1x4(dest, 0, value); } -vuint32m1x5_t test_vset_v_u32m1_u32m1x5(vuint32m1x5_t dest, size_t index, vuint32m1_t val) { - return __riscv_vset_v_u32m1_u32m1x5(dest, 0, val); +vuint32m1x5_t test_vset_v_u32m1_u32m1x5(vuint32m1x5_t dest, size_t index, vuint32m1_t value) { + return __riscv_vset_v_u32m1_u32m1x5(dest, 0, value); } -vuint32m1x6_t test_vset_v_u32m1_u32m1x6(vuint32m1x6_t dest, size_t index, vuint32m1_t val) { - return __riscv_vset_v_u32m1_u32m1x6(dest, 0, val); +vuint32m1x6_t test_vset_v_u32m1_u32m1x6(vuint32m1x6_t dest, size_t index, vuint32m1_t value) { + return __riscv_vset_v_u32m1_u32m1x6(dest, 0, value); } -vuint32m1x7_t test_vset_v_u32m1_u32m1x7(vuint32m1x7_t dest, size_t index, vuint32m1_t val) { - return __riscv_vset_v_u32m1_u32m1x7(dest, 0, val); +vuint32m1x7_t test_vset_v_u32m1_u32m1x7(vuint32m1x7_t dest, size_t index, vuint32m1_t value) { + return __riscv_vset_v_u32m1_u32m1x7(dest, 0, value); } -vuint32m1x8_t test_vset_v_u32m1_u32m1x8(vuint32m1x8_t dest, size_t index, vuint32m1_t val) { - return __riscv_vset_v_u32m1_u32m1x8(dest, 0, val); +vuint32m1x8_t test_vset_v_u32m1_u32m1x8(vuint32m1x8_t dest, size_t index, vuint32m1_t value) { + return __riscv_vset_v_u32m1_u32m1x8(dest, 0, value); } -vuint32m2x2_t test_vset_v_u32m2_u32m2x2(vuint32m2x2_t dest, size_t index, vuint32m2_t val) { - return __riscv_vset_v_u32m2_u32m2x2(dest, 0, val); +vuint32m2x2_t test_vset_v_u32m2_u32m2x2(vuint32m2x2_t dest, size_t index, vuint32m2_t value) { + return __riscv_vset_v_u32m2_u32m2x2(dest, 0, value); } -vuint32m2x3_t test_vset_v_u32m2_u32m2x3(vuint32m2x3_t dest, size_t index, vuint32m2_t val) { - return __riscv_vset_v_u32m2_u32m2x3(dest, 0, val); +vuint32m2x3_t test_vset_v_u32m2_u32m2x3(vuint32m2x3_t dest, size_t index, vuint32m2_t value) { + return __riscv_vset_v_u32m2_u32m2x3(dest, 0, value); } -vuint32m2x4_t test_vset_v_u32m2_u32m2x4(vuint32m2x4_t dest, size_t index, vuint32m2_t val) { - return __riscv_vset_v_u32m2_u32m2x4(dest, 0, val); +vuint32m2x4_t test_vset_v_u32m2_u32m2x4(vuint32m2x4_t dest, size_t index, vuint32m2_t value) { + return __riscv_vset_v_u32m2_u32m2x4(dest, 0, value); } -vuint32m4x2_t test_vset_v_u32m4_u32m4x2(vuint32m4x2_t dest, size_t index, vuint32m4_t val) { - return __riscv_vset_v_u32m4_u32m4x2(dest, 0, val); +vuint32m4x2_t test_vset_v_u32m4_u32m4x2(vuint32m4x2_t dest, size_t index, vuint32m4_t value) { + return __riscv_vset_v_u32m4_u32m4x2(dest, 0, value); } -vuint64m1x2_t test_vset_v_u64m1_u64m1x2(vuint64m1x2_t dest, size_t index, vuint64m1_t val) { - return __riscv_vset_v_u64m1_u64m1x2(dest, 0, val); +vuint64m1x2_t test_vset_v_u64m1_u64m1x2(vuint64m1x2_t dest, size_t index, vuint64m1_t value) { + return __riscv_vset_v_u64m1_u64m1x2(dest, 0, value); } -vuint64m1x3_t test_vset_v_u64m1_u64m1x3(vuint64m1x3_t dest, size_t index, vuint64m1_t val) { - return __riscv_vset_v_u64m1_u64m1x3(dest, 0, val); +vuint64m1x3_t test_vset_v_u64m1_u64m1x3(vuint64m1x3_t dest, size_t index, vuint64m1_t value) { + return __riscv_vset_v_u64m1_u64m1x3(dest, 0, value); } -vuint64m1x4_t test_vset_v_u64m1_u64m1x4(vuint64m1x4_t dest, size_t index, vuint64m1_t val) { - return __riscv_vset_v_u64m1_u64m1x4(dest, 0, val); +vuint64m1x4_t test_vset_v_u64m1_u64m1x4(vuint64m1x4_t dest, size_t index, vuint64m1_t value) { + return __riscv_vset_v_u64m1_u64m1x4(dest, 0, value); } -vuint64m1x5_t test_vset_v_u64m1_u64m1x5(vuint64m1x5_t dest, size_t index, vuint64m1_t val) { - return __riscv_vset_v_u64m1_u64m1x5(dest, 0, val); +vuint64m1x5_t test_vset_v_u64m1_u64m1x5(vuint64m1x5_t dest, size_t index, vuint64m1_t value) { + return __riscv_vset_v_u64m1_u64m1x5(dest, 0, value); } -vuint64m1x6_t test_vset_v_u64m1_u64m1x6(vuint64m1x6_t dest, size_t index, vuint64m1_t val) { - return __riscv_vset_v_u64m1_u64m1x6(dest, 0, val); +vuint64m1x6_t test_vset_v_u64m1_u64m1x6(vuint64m1x6_t dest, size_t index, vuint64m1_t value) { + return __riscv_vset_v_u64m1_u64m1x6(dest, 0, value); } -vuint64m1x7_t test_vset_v_u64m1_u64m1x7(vuint64m1x7_t dest, size_t index, vuint64m1_t val) { - return __riscv_vset_v_u64m1_u64m1x7(dest, 0, val); +vuint64m1x7_t test_vset_v_u64m1_u64m1x7(vuint64m1x7_t dest, size_t index, vuint64m1_t value) { + return __riscv_vset_v_u64m1_u64m1x7(dest, 0, value); } -vuint64m1x8_t test_vset_v_u64m1_u64m1x8(vuint64m1x8_t dest, size_t index, vuint64m1_t val) { - return __riscv_vset_v_u64m1_u64m1x8(dest, 0, val); +vuint64m1x8_t test_vset_v_u64m1_u64m1x8(vuint64m1x8_t dest, size_t index, vuint64m1_t value) { + return __riscv_vset_v_u64m1_u64m1x8(dest, 0, value); } -vuint64m2x2_t test_vset_v_u64m2_u64m2x2(vuint64m2x2_t dest, size_t index, vuint64m2_t val) { - return __riscv_vset_v_u64m2_u64m2x2(dest, 0, val); +vuint64m2x2_t test_vset_v_u64m2_u64m2x2(vuint64m2x2_t dest, size_t index, vuint64m2_t value) { + return __riscv_vset_v_u64m2_u64m2x2(dest, 0, value); } -vuint64m2x3_t test_vset_v_u64m2_u64m2x3(vuint64m2x3_t dest, size_t index, vuint64m2_t val) { - return __riscv_vset_v_u64m2_u64m2x3(dest, 0, val); +vuint64m2x3_t test_vset_v_u64m2_u64m2x3(vuint64m2x3_t dest, size_t index, vuint64m2_t value) { + return __riscv_vset_v_u64m2_u64m2x3(dest, 0, value); } -vuint64m2x4_t test_vset_v_u64m2_u64m2x4(vuint64m2x4_t dest, size_t index, vuint64m2_t val) { - return __riscv_vset_v_u64m2_u64m2x4(dest, 0, val); +vuint64m2x4_t test_vset_v_u64m2_u64m2x4(vuint64m2x4_t dest, size_t index, vuint64m2_t value) { + return __riscv_vset_v_u64m2_u64m2x4(dest, 0, value); } -vuint64m4x2_t test_vset_v_u64m4_u64m4x2(vuint64m4x2_t dest, size_t index, vuint64m4_t val) { - return __riscv_vset_v_u64m4_u64m4x2(dest, 0, val); +vuint64m4x2_t test_vset_v_u64m4_u64m4x2(vuint64m4x2_t dest, size_t index, vuint64m4_t value) { + return __riscv_vset_v_u64m4_u64m4x2(dest, 0, value); } /* { dg-final { scan-assembler-times {vl[1248]re[0-9]*\.v\s+v[1248],0\([a-z0-9]*\)\s+vl[1248]re[0-9]*\.v\s+v[1248],0\([a-z0-9]*\)+[ivxfswum.]*\s+} 66 } } */ diff --git a/auto-generated/gnu-api-tests/vsext_vf2.c b/auto-generated/gnu-api-tests/vsext_vf2.c index 1a3ff7e7f..e4c1adbc2 100644 --- a/auto-generated/gnu-api-tests/vsext_vf2.c +++ b/auto-generated/gnu-api-tests/vsext_vf2.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint16mf4_t test_vsext_vf2_i16mf4(vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf2_i16mf4(op1, vl); +vint16mf4_t test_vsext_vf2_i16mf4(vint8mf8_t vs2, size_t vl) { + return __riscv_vsext_vf2_i16mf4(vs2, vl); } -vint16mf2_t test_vsext_vf2_i16mf2(vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf2_i16mf2(op1, vl); +vint16mf2_t test_vsext_vf2_i16mf2(vint8mf4_t vs2, size_t vl) { + return __riscv_vsext_vf2_i16mf2(vs2, vl); } -vint16m1_t test_vsext_vf2_i16m1(vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf2_i16m1(op1, vl); +vint16m1_t test_vsext_vf2_i16m1(vint8mf2_t vs2, size_t vl) { + return __riscv_vsext_vf2_i16m1(vs2, vl); } -vint16m2_t test_vsext_vf2_i16m2(vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf2_i16m2(op1, vl); +vint16m2_t test_vsext_vf2_i16m2(vint8m1_t vs2, size_t vl) { + return __riscv_vsext_vf2_i16m2(vs2, vl); } -vint16m4_t test_vsext_vf2_i16m4(vint8m2_t op1, size_t vl) { - return __riscv_vsext_vf2_i16m4(op1, vl); +vint16m4_t test_vsext_vf2_i16m4(vint8m2_t vs2, size_t vl) { + return __riscv_vsext_vf2_i16m4(vs2, vl); } -vint16m8_t test_vsext_vf2_i16m8(vint8m4_t op1, size_t vl) { - return __riscv_vsext_vf2_i16m8(op1, vl); +vint16m8_t test_vsext_vf2_i16m8(vint8m4_t vs2, size_t vl) { + return __riscv_vsext_vf2_i16m8(vs2, vl); } -vint32mf2_t test_vsext_vf2_i32mf2(vint16mf4_t op1, size_t vl) { - return __riscv_vsext_vf2_i32mf2(op1, vl); +vint32mf2_t test_vsext_vf2_i32mf2(vint16mf4_t vs2, size_t vl) { + return __riscv_vsext_vf2_i32mf2(vs2, vl); } -vint32m1_t test_vsext_vf2_i32m1(vint16mf2_t op1, size_t vl) { - return __riscv_vsext_vf2_i32m1(op1, vl); +vint32m1_t test_vsext_vf2_i32m1(vint16mf2_t vs2, size_t vl) { + return __riscv_vsext_vf2_i32m1(vs2, vl); } -vint32m2_t test_vsext_vf2_i32m2(vint16m1_t op1, size_t vl) { - return __riscv_vsext_vf2_i32m2(op1, vl); +vint32m2_t test_vsext_vf2_i32m2(vint16m1_t vs2, size_t vl) { + return __riscv_vsext_vf2_i32m2(vs2, vl); } -vint32m4_t test_vsext_vf2_i32m4(vint16m2_t op1, size_t vl) { - return __riscv_vsext_vf2_i32m4(op1, vl); +vint32m4_t test_vsext_vf2_i32m4(vint16m2_t vs2, size_t vl) { + return __riscv_vsext_vf2_i32m4(vs2, vl); } -vint32m8_t test_vsext_vf2_i32m8(vint16m4_t op1, size_t vl) { - return __riscv_vsext_vf2_i32m8(op1, vl); +vint32m8_t test_vsext_vf2_i32m8(vint16m4_t vs2, size_t vl) { + return __riscv_vsext_vf2_i32m8(vs2, vl); } -vint64m1_t test_vsext_vf2_i64m1(vint32mf2_t op1, size_t vl) { - return __riscv_vsext_vf2_i64m1(op1, vl); +vint64m1_t test_vsext_vf2_i64m1(vint32mf2_t vs2, size_t vl) { + return __riscv_vsext_vf2_i64m1(vs2, vl); } -vint64m2_t test_vsext_vf2_i64m2(vint32m1_t op1, size_t vl) { - return __riscv_vsext_vf2_i64m2(op1, vl); +vint64m2_t test_vsext_vf2_i64m2(vint32m1_t vs2, size_t vl) { + return __riscv_vsext_vf2_i64m2(vs2, vl); } -vint64m4_t test_vsext_vf2_i64m4(vint32m2_t op1, size_t vl) { - return __riscv_vsext_vf2_i64m4(op1, vl); +vint64m4_t test_vsext_vf2_i64m4(vint32m2_t vs2, size_t vl) { + return __riscv_vsext_vf2_i64m4(vs2, vl); } -vint64m8_t test_vsext_vf2_i64m8(vint32m4_t op1, size_t vl) { - return __riscv_vsext_vf2_i64m8(op1, vl); +vint64m8_t test_vsext_vf2_i64m8(vint32m4_t vs2, size_t vl) { + return __riscv_vsext_vf2_i64m8(vs2, vl); } -vint16mf4_t test_vsext_vf2_i16mf4_m(vbool64_t mask, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf2_i16mf4_m(mask, op1, vl); +vint16mf4_t test_vsext_vf2_i16mf4_m(vbool64_t vm, vint8mf8_t vs2, size_t vl) { + return __riscv_vsext_vf2_i16mf4_m(vm, vs2, vl); } -vint16mf2_t test_vsext_vf2_i16mf2_m(vbool32_t mask, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf2_i16mf2_m(mask, op1, vl); +vint16mf2_t test_vsext_vf2_i16mf2_m(vbool32_t vm, vint8mf4_t vs2, size_t vl) { + return __riscv_vsext_vf2_i16mf2_m(vm, vs2, vl); } -vint16m1_t test_vsext_vf2_i16m1_m(vbool16_t mask, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf2_i16m1_m(mask, op1, vl); +vint16m1_t test_vsext_vf2_i16m1_m(vbool16_t vm, vint8mf2_t vs2, size_t vl) { + return __riscv_vsext_vf2_i16m1_m(vm, vs2, vl); } -vint16m2_t test_vsext_vf2_i16m2_m(vbool8_t mask, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf2_i16m2_m(mask, op1, vl); +vint16m2_t test_vsext_vf2_i16m2_m(vbool8_t vm, vint8m1_t vs2, size_t vl) { + return __riscv_vsext_vf2_i16m2_m(vm, vs2, vl); } -vint16m4_t test_vsext_vf2_i16m4_m(vbool4_t mask, vint8m2_t op1, size_t vl) { - return __riscv_vsext_vf2_i16m4_m(mask, op1, vl); +vint16m4_t test_vsext_vf2_i16m4_m(vbool4_t vm, vint8m2_t vs2, size_t vl) { + return __riscv_vsext_vf2_i16m4_m(vm, vs2, vl); } -vint16m8_t test_vsext_vf2_i16m8_m(vbool2_t mask, vint8m4_t op1, size_t vl) { - return __riscv_vsext_vf2_i16m8_m(mask, op1, vl); +vint16m8_t test_vsext_vf2_i16m8_m(vbool2_t vm, vint8m4_t vs2, size_t vl) { + return __riscv_vsext_vf2_i16m8_m(vm, vs2, vl); } -vint32mf2_t test_vsext_vf2_i32mf2_m(vbool64_t mask, vint16mf4_t op1, size_t vl) { - return __riscv_vsext_vf2_i32mf2_m(mask, op1, vl); +vint32mf2_t test_vsext_vf2_i32mf2_m(vbool64_t vm, vint16mf4_t vs2, size_t vl) { + return __riscv_vsext_vf2_i32mf2_m(vm, vs2, vl); } -vint32m1_t test_vsext_vf2_i32m1_m(vbool32_t mask, vint16mf2_t op1, size_t vl) { - return __riscv_vsext_vf2_i32m1_m(mask, op1, vl); +vint32m1_t test_vsext_vf2_i32m1_m(vbool32_t vm, vint16mf2_t vs2, size_t vl) { + return __riscv_vsext_vf2_i32m1_m(vm, vs2, vl); } -vint32m2_t test_vsext_vf2_i32m2_m(vbool16_t mask, vint16m1_t op1, size_t vl) { - return __riscv_vsext_vf2_i32m2_m(mask, op1, vl); +vint32m2_t test_vsext_vf2_i32m2_m(vbool16_t vm, vint16m1_t vs2, size_t vl) { + return __riscv_vsext_vf2_i32m2_m(vm, vs2, vl); } -vint32m4_t test_vsext_vf2_i32m4_m(vbool8_t mask, vint16m2_t op1, size_t vl) { - return __riscv_vsext_vf2_i32m4_m(mask, op1, vl); +vint32m4_t test_vsext_vf2_i32m4_m(vbool8_t vm, vint16m2_t vs2, size_t vl) { + return __riscv_vsext_vf2_i32m4_m(vm, vs2, vl); } -vint32m8_t test_vsext_vf2_i32m8_m(vbool4_t mask, vint16m4_t op1, size_t vl) { - return __riscv_vsext_vf2_i32m8_m(mask, op1, vl); +vint32m8_t test_vsext_vf2_i32m8_m(vbool4_t vm, vint16m4_t vs2, size_t vl) { + return __riscv_vsext_vf2_i32m8_m(vm, vs2, vl); } -vint64m1_t test_vsext_vf2_i64m1_m(vbool64_t mask, vint32mf2_t op1, size_t vl) { - return __riscv_vsext_vf2_i64m1_m(mask, op1, vl); +vint64m1_t test_vsext_vf2_i64m1_m(vbool64_t vm, vint32mf2_t vs2, size_t vl) { + return __riscv_vsext_vf2_i64m1_m(vm, vs2, vl); } -vint64m2_t test_vsext_vf2_i64m2_m(vbool32_t mask, vint32m1_t op1, size_t vl) { - return __riscv_vsext_vf2_i64m2_m(mask, op1, vl); +vint64m2_t test_vsext_vf2_i64m2_m(vbool32_t vm, vint32m1_t vs2, size_t vl) { + return __riscv_vsext_vf2_i64m2_m(vm, vs2, vl); } -vint64m4_t test_vsext_vf2_i64m4_m(vbool16_t mask, vint32m2_t op1, size_t vl) { - return __riscv_vsext_vf2_i64m4_m(mask, op1, vl); +vint64m4_t test_vsext_vf2_i64m4_m(vbool16_t vm, vint32m2_t vs2, size_t vl) { + return __riscv_vsext_vf2_i64m4_m(vm, vs2, vl); } -vint64m8_t test_vsext_vf2_i64m8_m(vbool8_t mask, vint32m4_t op1, size_t vl) { - return __riscv_vsext_vf2_i64m8_m(mask, op1, vl); +vint64m8_t test_vsext_vf2_i64m8_m(vbool8_t vm, vint32m4_t vs2, size_t vl) { + return __riscv_vsext_vf2_i64m8_m(vm, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsext\.vf2[ivxfswum.]*\s+} 30 } } */ diff --git a/auto-generated/gnu-api-tests/vsext_vf4.c b/auto-generated/gnu-api-tests/vsext_vf4.c index 7e2a5f167..2ae7561ba 100644 --- a/auto-generated/gnu-api-tests/vsext_vf4.c +++ b/auto-generated/gnu-api-tests/vsext_vf4.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint32mf2_t test_vsext_vf4_i32mf2(vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf4_i32mf2(op1, vl); +vint32mf2_t test_vsext_vf4_i32mf2(vint8mf8_t vs2, size_t vl) { + return __riscv_vsext_vf4_i32mf2(vs2, vl); } -vint32m1_t test_vsext_vf4_i32m1(vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m1(op1, vl); +vint32m1_t test_vsext_vf4_i32m1(vint8mf4_t vs2, size_t vl) { + return __riscv_vsext_vf4_i32m1(vs2, vl); } -vint32m2_t test_vsext_vf4_i32m2(vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m2(op1, vl); +vint32m2_t test_vsext_vf4_i32m2(vint8mf2_t vs2, size_t vl) { + return __riscv_vsext_vf4_i32m2(vs2, vl); } -vint32m4_t test_vsext_vf4_i32m4(vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m4(op1, vl); +vint32m4_t test_vsext_vf4_i32m4(vint8m1_t vs2, size_t vl) { + return __riscv_vsext_vf4_i32m4(vs2, vl); } -vint32m8_t test_vsext_vf4_i32m8(vint8m2_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m8(op1, vl); +vint32m8_t test_vsext_vf4_i32m8(vint8m2_t vs2, size_t vl) { + return __riscv_vsext_vf4_i32m8(vs2, vl); } -vint64m1_t test_vsext_vf4_i64m1(vint16mf4_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m1(op1, vl); +vint64m1_t test_vsext_vf4_i64m1(vint16mf4_t vs2, size_t vl) { + return __riscv_vsext_vf4_i64m1(vs2, vl); } -vint64m2_t test_vsext_vf4_i64m2(vint16mf2_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m2(op1, vl); +vint64m2_t test_vsext_vf4_i64m2(vint16mf2_t vs2, size_t vl) { + return __riscv_vsext_vf4_i64m2(vs2, vl); } -vint64m4_t test_vsext_vf4_i64m4(vint16m1_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m4(op1, vl); +vint64m4_t test_vsext_vf4_i64m4(vint16m1_t vs2, size_t vl) { + return __riscv_vsext_vf4_i64m4(vs2, vl); } -vint64m8_t test_vsext_vf4_i64m8(vint16m2_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m8(op1, vl); +vint64m8_t test_vsext_vf4_i64m8(vint16m2_t vs2, size_t vl) { + return __riscv_vsext_vf4_i64m8(vs2, vl); } -vint32mf2_t test_vsext_vf4_i32mf2_m(vbool64_t mask, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf4_i32mf2_m(mask, op1, vl); +vint32mf2_t test_vsext_vf4_i32mf2_m(vbool64_t vm, vint8mf8_t vs2, size_t vl) { + return __riscv_vsext_vf4_i32mf2_m(vm, vs2, vl); } -vint32m1_t test_vsext_vf4_i32m1_m(vbool32_t mask, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m1_m(mask, op1, vl); +vint32m1_t test_vsext_vf4_i32m1_m(vbool32_t vm, vint8mf4_t vs2, size_t vl) { + return __riscv_vsext_vf4_i32m1_m(vm, vs2, vl); } -vint32m2_t test_vsext_vf4_i32m2_m(vbool16_t mask, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m2_m(mask, op1, vl); +vint32m2_t test_vsext_vf4_i32m2_m(vbool16_t vm, vint8mf2_t vs2, size_t vl) { + return __riscv_vsext_vf4_i32m2_m(vm, vs2, vl); } -vint32m4_t test_vsext_vf4_i32m4_m(vbool8_t mask, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m4_m(mask, op1, vl); +vint32m4_t test_vsext_vf4_i32m4_m(vbool8_t vm, vint8m1_t vs2, size_t vl) { + return __riscv_vsext_vf4_i32m4_m(vm, vs2, vl); } -vint32m8_t test_vsext_vf4_i32m8_m(vbool4_t mask, vint8m2_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m8_m(mask, op1, vl); +vint32m8_t test_vsext_vf4_i32m8_m(vbool4_t vm, vint8m2_t vs2, size_t vl) { + return __riscv_vsext_vf4_i32m8_m(vm, vs2, vl); } -vint64m1_t test_vsext_vf4_i64m1_m(vbool64_t mask, vint16mf4_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m1_m(mask, op1, vl); +vint64m1_t test_vsext_vf4_i64m1_m(vbool64_t vm, vint16mf4_t vs2, size_t vl) { + return __riscv_vsext_vf4_i64m1_m(vm, vs2, vl); } -vint64m2_t test_vsext_vf4_i64m2_m(vbool32_t mask, vint16mf2_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m2_m(mask, op1, vl); +vint64m2_t test_vsext_vf4_i64m2_m(vbool32_t vm, vint16mf2_t vs2, size_t vl) { + return __riscv_vsext_vf4_i64m2_m(vm, vs2, vl); } -vint64m4_t test_vsext_vf4_i64m4_m(vbool16_t mask, vint16m1_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m4_m(mask, op1, vl); +vint64m4_t test_vsext_vf4_i64m4_m(vbool16_t vm, vint16m1_t vs2, size_t vl) { + return __riscv_vsext_vf4_i64m4_m(vm, vs2, vl); } -vint64m8_t test_vsext_vf4_i64m8_m(vbool8_t mask, vint16m2_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m8_m(mask, op1, vl); +vint64m8_t test_vsext_vf4_i64m8_m(vbool8_t vm, vint16m2_t vs2, size_t vl) { + return __riscv_vsext_vf4_i64m8_m(vm, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsext\.vf4[ivxfswum.]*\s+} 18 } } */ diff --git a/auto-generated/gnu-api-tests/vsext_vf8.c b/auto-generated/gnu-api-tests/vsext_vf8.c index f9a8991bb..ab4fb8651 100644 --- a/auto-generated/gnu-api-tests/vsext_vf8.c +++ b/auto-generated/gnu-api-tests/vsext_vf8.c @@ -6,36 +6,36 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint64m1_t test_vsext_vf8_i64m1(vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m1(op1, vl); +vint64m1_t test_vsext_vf8_i64m1(vint8mf8_t vs2, size_t vl) { + return __riscv_vsext_vf8_i64m1(vs2, vl); } -vint64m2_t test_vsext_vf8_i64m2(vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m2(op1, vl); +vint64m2_t test_vsext_vf8_i64m2(vint8mf4_t vs2, size_t vl) { + return __riscv_vsext_vf8_i64m2(vs2, vl); } -vint64m4_t test_vsext_vf8_i64m4(vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m4(op1, vl); +vint64m4_t test_vsext_vf8_i64m4(vint8mf2_t vs2, size_t vl) { + return __riscv_vsext_vf8_i64m4(vs2, vl); } -vint64m8_t test_vsext_vf8_i64m8(vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m8(op1, vl); +vint64m8_t test_vsext_vf8_i64m8(vint8m1_t vs2, size_t vl) { + return __riscv_vsext_vf8_i64m8(vs2, vl); } -vint64m1_t test_vsext_vf8_i64m1_m(vbool64_t mask, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m1_m(mask, op1, vl); +vint64m1_t test_vsext_vf8_i64m1_m(vbool64_t vm, vint8mf8_t vs2, size_t vl) { + return __riscv_vsext_vf8_i64m1_m(vm, vs2, vl); } -vint64m2_t test_vsext_vf8_i64m2_m(vbool32_t mask, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m2_m(mask, op1, vl); +vint64m2_t test_vsext_vf8_i64m2_m(vbool32_t vm, vint8mf4_t vs2, size_t vl) { + return __riscv_vsext_vf8_i64m2_m(vm, vs2, vl); } -vint64m4_t test_vsext_vf8_i64m4_m(vbool16_t mask, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m4_m(mask, op1, vl); +vint64m4_t test_vsext_vf8_i64m4_m(vbool16_t vm, vint8mf2_t vs2, size_t vl) { + return __riscv_vsext_vf8_i64m4_m(vm, vs2, vl); } -vint64m8_t test_vsext_vf8_i64m8_m(vbool8_t mask, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m8_m(mask, op1, vl); +vint64m8_t test_vsext_vf8_i64m8_m(vbool8_t vm, vint8m1_t vs2, size_t vl) { + return __riscv_vsext_vf8_i64m8_m(vm, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsext\.vf8[ivxfswum.]*\s+} 8 } } */ diff --git a/auto-generated/gnu-api-tests/vslide1down.c b/auto-generated/gnu-api-tests/vslide1down.c index cb1d3dfa4..44cf7c526 100644 --- a/auto-generated/gnu-api-tests/vslide1down.c +++ b/auto-generated/gnu-api-tests/vslide1down.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vslide1down_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_vx_i8mf8(src, value, vl); +vint8mf8_t test_vslide1down_vx_i8mf8(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i8mf8(vs2, rs1, vl); } -vint8mf4_t test_vslide1down_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_vx_i8mf4(src, value, vl); +vint8mf4_t test_vslide1down_vx_i8mf4(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i8mf4(vs2, rs1, vl); } -vint8mf2_t test_vslide1down_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_vx_i8mf2(src, value, vl); +vint8mf2_t test_vslide1down_vx_i8mf2(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i8mf2(vs2, rs1, vl); } -vint8m1_t test_vslide1down_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_vx_i8m1(src, value, vl); +vint8m1_t test_vslide1down_vx_i8m1(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i8m1(vs2, rs1, vl); } -vint8m2_t test_vslide1down_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_vx_i8m2(src, value, vl); +vint8m2_t test_vslide1down_vx_i8m2(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i8m2(vs2, rs1, vl); } -vint8m4_t test_vslide1down_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_vx_i8m4(src, value, vl); +vint8m4_t test_vslide1down_vx_i8m4(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i8m4(vs2, rs1, vl); } -vint8m8_t test_vslide1down_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_vx_i8m8(src, value, vl); +vint8m8_t test_vslide1down_vx_i8m8(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i8m8(vs2, rs1, vl); } -vint16mf4_t test_vslide1down_vx_i16mf4(vint16mf4_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_vx_i16mf4(src, value, vl); +vint16mf4_t test_vslide1down_vx_i16mf4(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i16mf4(vs2, rs1, vl); } -vint16mf2_t test_vslide1down_vx_i16mf2(vint16mf2_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_vx_i16mf2(src, value, vl); +vint16mf2_t test_vslide1down_vx_i16mf2(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i16mf2(vs2, rs1, vl); } -vint16m1_t test_vslide1down_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_vx_i16m1(src, value, vl); +vint16m1_t test_vslide1down_vx_i16m1(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i16m1(vs2, rs1, vl); } -vint16m2_t test_vslide1down_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_vx_i16m2(src, value, vl); +vint16m2_t test_vslide1down_vx_i16m2(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i16m2(vs2, rs1, vl); } -vint16m4_t test_vslide1down_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_vx_i16m4(src, value, vl); +vint16m4_t test_vslide1down_vx_i16m4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i16m4(vs2, rs1, vl); } -vint16m8_t test_vslide1down_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_vx_i16m8(src, value, vl); +vint16m8_t test_vslide1down_vx_i16m8(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i16m8(vs2, rs1, vl); } -vint32mf2_t test_vslide1down_vx_i32mf2(vint32mf2_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_vx_i32mf2(src, value, vl); +vint32mf2_t test_vslide1down_vx_i32mf2(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i32mf2(vs2, rs1, vl); } -vint32m1_t test_vslide1down_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_vx_i32m1(src, value, vl); +vint32m1_t test_vslide1down_vx_i32m1(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i32m1(vs2, rs1, vl); } -vint32m2_t test_vslide1down_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_vx_i32m2(src, value, vl); +vint32m2_t test_vslide1down_vx_i32m2(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i32m2(vs2, rs1, vl); } -vint32m4_t test_vslide1down_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_vx_i32m4(src, value, vl); +vint32m4_t test_vslide1down_vx_i32m4(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i32m4(vs2, rs1, vl); } -vint32m8_t test_vslide1down_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_vx_i32m8(src, value, vl); +vint32m8_t test_vslide1down_vx_i32m8(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i32m8(vs2, rs1, vl); } -vint64m1_t test_vslide1down_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) { - return __riscv_vslide1down_vx_i64m1(src, value, vl); +vint64m1_t test_vslide1down_vx_i64m1(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i64m1(vs2, rs1, vl); } -vint64m2_t test_vslide1down_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) { - return __riscv_vslide1down_vx_i64m2(src, value, vl); +vint64m2_t test_vslide1down_vx_i64m2(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i64m2(vs2, rs1, vl); } -vint64m4_t test_vslide1down_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) { - return __riscv_vslide1down_vx_i64m4(src, value, vl); +vint64m4_t test_vslide1down_vx_i64m4(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i64m4(vs2, rs1, vl); } -vint64m8_t test_vslide1down_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) { - return __riscv_vslide1down_vx_i64m8(src, value, vl); +vint64m8_t test_vslide1down_vx_i64m8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i64m8(vs2, rs1, vl); } -vuint8mf8_t test_vslide1down_vx_u8mf8(vuint8mf8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_vx_u8mf8(src, value, vl); +vuint8mf8_t test_vslide1down_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u8mf8(vs2, rs1, vl); } -vuint8mf4_t test_vslide1down_vx_u8mf4(vuint8mf4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_vx_u8mf4(src, value, vl); +vuint8mf4_t test_vslide1down_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u8mf4(vs2, rs1, vl); } -vuint8mf2_t test_vslide1down_vx_u8mf2(vuint8mf2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_vx_u8mf2(src, value, vl); +vuint8mf2_t test_vslide1down_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u8mf2(vs2, rs1, vl); } -vuint8m1_t test_vslide1down_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_vx_u8m1(src, value, vl); +vuint8m1_t test_vslide1down_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u8m1(vs2, rs1, vl); } -vuint8m2_t test_vslide1down_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_vx_u8m2(src, value, vl); +vuint8m2_t test_vslide1down_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u8m2(vs2, rs1, vl); } -vuint8m4_t test_vslide1down_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_vx_u8m4(src, value, vl); +vuint8m4_t test_vslide1down_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u8m4(vs2, rs1, vl); } -vuint8m8_t test_vslide1down_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_vx_u8m8(src, value, vl); +vuint8m8_t test_vslide1down_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u8m8(vs2, rs1, vl); } -vuint16mf4_t test_vslide1down_vx_u16mf4(vuint16mf4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_vx_u16mf4(src, value, vl); +vuint16mf4_t test_vslide1down_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u16mf4(vs2, rs1, vl); } -vuint16mf2_t test_vslide1down_vx_u16mf2(vuint16mf2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_vx_u16mf2(src, value, vl); +vuint16mf2_t test_vslide1down_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u16mf2(vs2, rs1, vl); } -vuint16m1_t test_vslide1down_vx_u16m1(vuint16m1_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_vx_u16m1(src, value, vl); +vuint16m1_t test_vslide1down_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u16m1(vs2, rs1, vl); } -vuint16m2_t test_vslide1down_vx_u16m2(vuint16m2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_vx_u16m2(src, value, vl); +vuint16m2_t test_vslide1down_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u16m2(vs2, rs1, vl); } -vuint16m4_t test_vslide1down_vx_u16m4(vuint16m4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_vx_u16m4(src, value, vl); +vuint16m4_t test_vslide1down_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u16m4(vs2, rs1, vl); } -vuint16m8_t test_vslide1down_vx_u16m8(vuint16m8_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_vx_u16m8(src, value, vl); +vuint16m8_t test_vslide1down_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u16m8(vs2, rs1, vl); } -vuint32mf2_t test_vslide1down_vx_u32mf2(vuint32mf2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_vx_u32mf2(src, value, vl); +vuint32mf2_t test_vslide1down_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u32mf2(vs2, rs1, vl); } -vuint32m1_t test_vslide1down_vx_u32m1(vuint32m1_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_vx_u32m1(src, value, vl); +vuint32m1_t test_vslide1down_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u32m1(vs2, rs1, vl); } -vuint32m2_t test_vslide1down_vx_u32m2(vuint32m2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_vx_u32m2(src, value, vl); +vuint32m2_t test_vslide1down_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u32m2(vs2, rs1, vl); } -vuint32m4_t test_vslide1down_vx_u32m4(vuint32m4_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_vx_u32m4(src, value, vl); +vuint32m4_t test_vslide1down_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u32m4(vs2, rs1, vl); } -vuint32m8_t test_vslide1down_vx_u32m8(vuint32m8_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_vx_u32m8(src, value, vl); +vuint32m8_t test_vslide1down_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u32m8(vs2, rs1, vl); } -vuint64m1_t test_vslide1down_vx_u64m1(vuint64m1_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down_vx_u64m1(src, value, vl); +vuint64m1_t test_vslide1down_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u64m1(vs2, rs1, vl); } -vuint64m2_t test_vslide1down_vx_u64m2(vuint64m2_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down_vx_u64m2(src, value, vl); +vuint64m2_t test_vslide1down_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u64m2(vs2, rs1, vl); } -vuint64m4_t test_vslide1down_vx_u64m4(vuint64m4_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down_vx_u64m4(src, value, vl); +vuint64m4_t test_vslide1down_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u64m4(vs2, rs1, vl); } -vuint64m8_t test_vslide1down_vx_u64m8(vuint64m8_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down_vx_u64m8(src, value, vl); +vuint64m8_t test_vslide1down_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u64m8(vs2, rs1, vl); } -vint8mf8_t test_vslide1down_vx_i8mf8_m(vbool64_t mask, vint8mf8_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_vx_i8mf8_m(mask, src, value, vl); +vint8mf8_t test_vslide1down_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i8mf8_m(vm, vs2, rs1, vl); } -vint8mf4_t test_vslide1down_vx_i8mf4_m(vbool32_t mask, vint8mf4_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_vx_i8mf4_m(mask, src, value, vl); +vint8mf4_t test_vslide1down_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i8mf4_m(vm, vs2, rs1, vl); } -vint8mf2_t test_vslide1down_vx_i8mf2_m(vbool16_t mask, vint8mf2_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_vx_i8mf2_m(mask, src, value, vl); +vint8mf2_t test_vslide1down_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i8mf2_m(vm, vs2, rs1, vl); } -vint8m1_t test_vslide1down_vx_i8m1_m(vbool8_t mask, vint8m1_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_vx_i8m1_m(mask, src, value, vl); +vint8m1_t test_vslide1down_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i8m1_m(vm, vs2, rs1, vl); } -vint8m2_t test_vslide1down_vx_i8m2_m(vbool4_t mask, vint8m2_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_vx_i8m2_m(mask, src, value, vl); +vint8m2_t test_vslide1down_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i8m2_m(vm, vs2, rs1, vl); } -vint8m4_t test_vslide1down_vx_i8m4_m(vbool2_t mask, vint8m4_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_vx_i8m4_m(mask, src, value, vl); +vint8m4_t test_vslide1down_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i8m4_m(vm, vs2, rs1, vl); } -vint8m8_t test_vslide1down_vx_i8m8_m(vbool1_t mask, vint8m8_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_vx_i8m8_m(mask, src, value, vl); +vint8m8_t test_vslide1down_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i8m8_m(vm, vs2, rs1, vl); } -vint16mf4_t test_vslide1down_vx_i16mf4_m(vbool64_t mask, vint16mf4_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_vx_i16mf4_m(mask, src, value, vl); +vint16mf4_t test_vslide1down_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i16mf4_m(vm, vs2, rs1, vl); } -vint16mf2_t test_vslide1down_vx_i16mf2_m(vbool32_t mask, vint16mf2_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_vx_i16mf2_m(mask, src, value, vl); +vint16mf2_t test_vslide1down_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i16mf2_m(vm, vs2, rs1, vl); } -vint16m1_t test_vslide1down_vx_i16m1_m(vbool16_t mask, vint16m1_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_vx_i16m1_m(mask, src, value, vl); +vint16m1_t test_vslide1down_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i16m1_m(vm, vs2, rs1, vl); } -vint16m2_t test_vslide1down_vx_i16m2_m(vbool8_t mask, vint16m2_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_vx_i16m2_m(mask, src, value, vl); +vint16m2_t test_vslide1down_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i16m2_m(vm, vs2, rs1, vl); } -vint16m4_t test_vslide1down_vx_i16m4_m(vbool4_t mask, vint16m4_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_vx_i16m4_m(mask, src, value, vl); +vint16m4_t test_vslide1down_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i16m4_m(vm, vs2, rs1, vl); } -vint16m8_t test_vslide1down_vx_i16m8_m(vbool2_t mask, vint16m8_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_vx_i16m8_m(mask, src, value, vl); +vint16m8_t test_vslide1down_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i16m8_m(vm, vs2, rs1, vl); } -vint32mf2_t test_vslide1down_vx_i32mf2_m(vbool64_t mask, vint32mf2_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_vx_i32mf2_m(mask, src, value, vl); +vint32mf2_t test_vslide1down_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i32mf2_m(vm, vs2, rs1, vl); } -vint32m1_t test_vslide1down_vx_i32m1_m(vbool32_t mask, vint32m1_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_vx_i32m1_m(mask, src, value, vl); +vint32m1_t test_vslide1down_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i32m1_m(vm, vs2, rs1, vl); } -vint32m2_t test_vslide1down_vx_i32m2_m(vbool16_t mask, vint32m2_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_vx_i32m2_m(mask, src, value, vl); +vint32m2_t test_vslide1down_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i32m2_m(vm, vs2, rs1, vl); } -vint32m4_t test_vslide1down_vx_i32m4_m(vbool8_t mask, vint32m4_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_vx_i32m4_m(mask, src, value, vl); +vint32m4_t test_vslide1down_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i32m4_m(vm, vs2, rs1, vl); } -vint32m8_t test_vslide1down_vx_i32m8_m(vbool4_t mask, vint32m8_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_vx_i32m8_m(mask, src, value, vl); +vint32m8_t test_vslide1down_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i32m8_m(vm, vs2, rs1, vl); } -vint64m1_t test_vslide1down_vx_i64m1_m(vbool64_t mask, vint64m1_t src, int64_t value, size_t vl) { - return __riscv_vslide1down_vx_i64m1_m(mask, src, value, vl); +vint64m1_t test_vslide1down_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i64m1_m(vm, vs2, rs1, vl); } -vint64m2_t test_vslide1down_vx_i64m2_m(vbool32_t mask, vint64m2_t src, int64_t value, size_t vl) { - return __riscv_vslide1down_vx_i64m2_m(mask, src, value, vl); +vint64m2_t test_vslide1down_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i64m2_m(vm, vs2, rs1, vl); } -vint64m4_t test_vslide1down_vx_i64m4_m(vbool16_t mask, vint64m4_t src, int64_t value, size_t vl) { - return __riscv_vslide1down_vx_i64m4_m(mask, src, value, vl); +vint64m4_t test_vslide1down_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i64m4_m(vm, vs2, rs1, vl); } -vint64m8_t test_vslide1down_vx_i64m8_m(vbool8_t mask, vint64m8_t src, int64_t value, size_t vl) { - return __riscv_vslide1down_vx_i64m8_m(mask, src, value, vl); +vint64m8_t test_vslide1down_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i64m8_m(vm, vs2, rs1, vl); } -vuint8mf8_t test_vslide1down_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_vx_u8mf8_m(mask, src, value, vl); +vuint8mf8_t test_vslide1down_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u8mf8_m(vm, vs2, rs1, vl); } -vuint8mf4_t test_vslide1down_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_vx_u8mf4_m(mask, src, value, vl); +vuint8mf4_t test_vslide1down_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u8mf4_m(vm, vs2, rs1, vl); } -vuint8mf2_t test_vslide1down_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_vx_u8mf2_m(mask, src, value, vl); +vuint8mf2_t test_vslide1down_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u8mf2_m(vm, vs2, rs1, vl); } -vuint8m1_t test_vslide1down_vx_u8m1_m(vbool8_t mask, vuint8m1_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_vx_u8m1_m(mask, src, value, vl); +vuint8m1_t test_vslide1down_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u8m1_m(vm, vs2, rs1, vl); } -vuint8m2_t test_vslide1down_vx_u8m2_m(vbool4_t mask, vuint8m2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_vx_u8m2_m(mask, src, value, vl); +vuint8m2_t test_vslide1down_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u8m2_m(vm, vs2, rs1, vl); } -vuint8m4_t test_vslide1down_vx_u8m4_m(vbool2_t mask, vuint8m4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_vx_u8m4_m(mask, src, value, vl); +vuint8m4_t test_vslide1down_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u8m4_m(vm, vs2, rs1, vl); } -vuint8m8_t test_vslide1down_vx_u8m8_m(vbool1_t mask, vuint8m8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_vx_u8m8_m(mask, src, value, vl); +vuint8m8_t test_vslide1down_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u8m8_m(vm, vs2, rs1, vl); } -vuint16mf4_t test_vslide1down_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_vx_u16mf4_m(mask, src, value, vl); +vuint16mf4_t test_vslide1down_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u16mf4_m(vm, vs2, rs1, vl); } -vuint16mf2_t test_vslide1down_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_vx_u16mf2_m(mask, src, value, vl); +vuint16mf2_t test_vslide1down_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u16mf2_m(vm, vs2, rs1, vl); } -vuint16m1_t test_vslide1down_vx_u16m1_m(vbool16_t mask, vuint16m1_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_vx_u16m1_m(mask, src, value, vl); +vuint16m1_t test_vslide1down_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u16m1_m(vm, vs2, rs1, vl); } -vuint16m2_t test_vslide1down_vx_u16m2_m(vbool8_t mask, vuint16m2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_vx_u16m2_m(mask, src, value, vl); +vuint16m2_t test_vslide1down_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u16m2_m(vm, vs2, rs1, vl); } -vuint16m4_t test_vslide1down_vx_u16m4_m(vbool4_t mask, vuint16m4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_vx_u16m4_m(mask, src, value, vl); +vuint16m4_t test_vslide1down_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u16m4_m(vm, vs2, rs1, vl); } -vuint16m8_t test_vslide1down_vx_u16m8_m(vbool2_t mask, vuint16m8_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_vx_u16m8_m(mask, src, value, vl); +vuint16m8_t test_vslide1down_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u16m8_m(vm, vs2, rs1, vl); } -vuint32mf2_t test_vslide1down_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_vx_u32mf2_m(mask, src, value, vl); +vuint32mf2_t test_vslide1down_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u32mf2_m(vm, vs2, rs1, vl); } -vuint32m1_t test_vslide1down_vx_u32m1_m(vbool32_t mask, vuint32m1_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_vx_u32m1_m(mask, src, value, vl); +vuint32m1_t test_vslide1down_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u32m1_m(vm, vs2, rs1, vl); } -vuint32m2_t test_vslide1down_vx_u32m2_m(vbool16_t mask, vuint32m2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_vx_u32m2_m(mask, src, value, vl); +vuint32m2_t test_vslide1down_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u32m2_m(vm, vs2, rs1, vl); } -vuint32m4_t test_vslide1down_vx_u32m4_m(vbool8_t mask, vuint32m4_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_vx_u32m4_m(mask, src, value, vl); +vuint32m4_t test_vslide1down_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u32m4_m(vm, vs2, rs1, vl); } -vuint32m8_t test_vslide1down_vx_u32m8_m(vbool4_t mask, vuint32m8_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_vx_u32m8_m(mask, src, value, vl); +vuint32m8_t test_vslide1down_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u32m8_m(vm, vs2, rs1, vl); } -vuint64m1_t test_vslide1down_vx_u64m1_m(vbool64_t mask, vuint64m1_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down_vx_u64m1_m(mask, src, value, vl); +vuint64m1_t test_vslide1down_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u64m1_m(vm, vs2, rs1, vl); } -vuint64m2_t test_vslide1down_vx_u64m2_m(vbool32_t mask, vuint64m2_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down_vx_u64m2_m(mask, src, value, vl); +vuint64m2_t test_vslide1down_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u64m2_m(vm, vs2, rs1, vl); } -vuint64m4_t test_vslide1down_vx_u64m4_m(vbool16_t mask, vuint64m4_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down_vx_u64m4_m(mask, src, value, vl); +vuint64m4_t test_vslide1down_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u64m4_m(vm, vs2, rs1, vl); } -vuint64m8_t test_vslide1down_vx_u64m8_m(vbool8_t mask, vuint64m8_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down_vx_u64m8_m(mask, src, value, vl); +vuint64m8_t test_vslide1down_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u64m8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vslide1down\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-api-tests/vslide1up.c b/auto-generated/gnu-api-tests/vslide1up.c index 9de2da40b..4df7e8a12 100644 --- a/auto-generated/gnu-api-tests/vslide1up.c +++ b/auto-generated/gnu-api-tests/vslide1up.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vslide1up_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_vx_i8mf8(src, value, vl); +vint8mf8_t test_vslide1up_vx_i8mf8(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i8mf8(vs2, rs1, vl); } -vint8mf4_t test_vslide1up_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_vx_i8mf4(src, value, vl); +vint8mf4_t test_vslide1up_vx_i8mf4(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i8mf4(vs2, rs1, vl); } -vint8mf2_t test_vslide1up_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_vx_i8mf2(src, value, vl); +vint8mf2_t test_vslide1up_vx_i8mf2(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i8mf2(vs2, rs1, vl); } -vint8m1_t test_vslide1up_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_vx_i8m1(src, value, vl); +vint8m1_t test_vslide1up_vx_i8m1(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i8m1(vs2, rs1, vl); } -vint8m2_t test_vslide1up_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_vx_i8m2(src, value, vl); +vint8m2_t test_vslide1up_vx_i8m2(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i8m2(vs2, rs1, vl); } -vint8m4_t test_vslide1up_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_vx_i8m4(src, value, vl); +vint8m4_t test_vslide1up_vx_i8m4(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i8m4(vs2, rs1, vl); } -vint8m8_t test_vslide1up_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_vx_i8m8(src, value, vl); +vint8m8_t test_vslide1up_vx_i8m8(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i8m8(vs2, rs1, vl); } -vint16mf4_t test_vslide1up_vx_i16mf4(vint16mf4_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_vx_i16mf4(src, value, vl); +vint16mf4_t test_vslide1up_vx_i16mf4(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i16mf4(vs2, rs1, vl); } -vint16mf2_t test_vslide1up_vx_i16mf2(vint16mf2_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_vx_i16mf2(src, value, vl); +vint16mf2_t test_vslide1up_vx_i16mf2(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i16mf2(vs2, rs1, vl); } -vint16m1_t test_vslide1up_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_vx_i16m1(src, value, vl); +vint16m1_t test_vslide1up_vx_i16m1(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i16m1(vs2, rs1, vl); } -vint16m2_t test_vslide1up_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_vx_i16m2(src, value, vl); +vint16m2_t test_vslide1up_vx_i16m2(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i16m2(vs2, rs1, vl); } -vint16m4_t test_vslide1up_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_vx_i16m4(src, value, vl); +vint16m4_t test_vslide1up_vx_i16m4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i16m4(vs2, rs1, vl); } -vint16m8_t test_vslide1up_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_vx_i16m8(src, value, vl); +vint16m8_t test_vslide1up_vx_i16m8(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i16m8(vs2, rs1, vl); } -vint32mf2_t test_vslide1up_vx_i32mf2(vint32mf2_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_vx_i32mf2(src, value, vl); +vint32mf2_t test_vslide1up_vx_i32mf2(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i32mf2(vs2, rs1, vl); } -vint32m1_t test_vslide1up_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_vx_i32m1(src, value, vl); +vint32m1_t test_vslide1up_vx_i32m1(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i32m1(vs2, rs1, vl); } -vint32m2_t test_vslide1up_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_vx_i32m2(src, value, vl); +vint32m2_t test_vslide1up_vx_i32m2(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i32m2(vs2, rs1, vl); } -vint32m4_t test_vslide1up_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_vx_i32m4(src, value, vl); +vint32m4_t test_vslide1up_vx_i32m4(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i32m4(vs2, rs1, vl); } -vint32m8_t test_vslide1up_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_vx_i32m8(src, value, vl); +vint32m8_t test_vslide1up_vx_i32m8(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i32m8(vs2, rs1, vl); } -vint64m1_t test_vslide1up_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) { - return __riscv_vslide1up_vx_i64m1(src, value, vl); +vint64m1_t test_vslide1up_vx_i64m1(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i64m1(vs2, rs1, vl); } -vint64m2_t test_vslide1up_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) { - return __riscv_vslide1up_vx_i64m2(src, value, vl); +vint64m2_t test_vslide1up_vx_i64m2(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i64m2(vs2, rs1, vl); } -vint64m4_t test_vslide1up_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) { - return __riscv_vslide1up_vx_i64m4(src, value, vl); +vint64m4_t test_vslide1up_vx_i64m4(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i64m4(vs2, rs1, vl); } -vint64m8_t test_vslide1up_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) { - return __riscv_vslide1up_vx_i64m8(src, value, vl); +vint64m8_t test_vslide1up_vx_i64m8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i64m8(vs2, rs1, vl); } -vuint8mf8_t test_vslide1up_vx_u8mf8(vuint8mf8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_vx_u8mf8(src, value, vl); +vuint8mf8_t test_vslide1up_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u8mf8(vs2, rs1, vl); } -vuint8mf4_t test_vslide1up_vx_u8mf4(vuint8mf4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_vx_u8mf4(src, value, vl); +vuint8mf4_t test_vslide1up_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u8mf4(vs2, rs1, vl); } -vuint8mf2_t test_vslide1up_vx_u8mf2(vuint8mf2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_vx_u8mf2(src, value, vl); +vuint8mf2_t test_vslide1up_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u8mf2(vs2, rs1, vl); } -vuint8m1_t test_vslide1up_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_vx_u8m1(src, value, vl); +vuint8m1_t test_vslide1up_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u8m1(vs2, rs1, vl); } -vuint8m2_t test_vslide1up_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_vx_u8m2(src, value, vl); +vuint8m2_t test_vslide1up_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u8m2(vs2, rs1, vl); } -vuint8m4_t test_vslide1up_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_vx_u8m4(src, value, vl); +vuint8m4_t test_vslide1up_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u8m4(vs2, rs1, vl); } -vuint8m8_t test_vslide1up_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_vx_u8m8(src, value, vl); +vuint8m8_t test_vslide1up_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u8m8(vs2, rs1, vl); } -vuint16mf4_t test_vslide1up_vx_u16mf4(vuint16mf4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_vx_u16mf4(src, value, vl); +vuint16mf4_t test_vslide1up_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u16mf4(vs2, rs1, vl); } -vuint16mf2_t test_vslide1up_vx_u16mf2(vuint16mf2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_vx_u16mf2(src, value, vl); +vuint16mf2_t test_vslide1up_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u16mf2(vs2, rs1, vl); } -vuint16m1_t test_vslide1up_vx_u16m1(vuint16m1_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_vx_u16m1(src, value, vl); +vuint16m1_t test_vslide1up_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u16m1(vs2, rs1, vl); } -vuint16m2_t test_vslide1up_vx_u16m2(vuint16m2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_vx_u16m2(src, value, vl); +vuint16m2_t test_vslide1up_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u16m2(vs2, rs1, vl); } -vuint16m4_t test_vslide1up_vx_u16m4(vuint16m4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_vx_u16m4(src, value, vl); +vuint16m4_t test_vslide1up_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u16m4(vs2, rs1, vl); } -vuint16m8_t test_vslide1up_vx_u16m8(vuint16m8_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_vx_u16m8(src, value, vl); +vuint16m8_t test_vslide1up_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u16m8(vs2, rs1, vl); } -vuint32mf2_t test_vslide1up_vx_u32mf2(vuint32mf2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_vx_u32mf2(src, value, vl); +vuint32mf2_t test_vslide1up_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u32mf2(vs2, rs1, vl); } -vuint32m1_t test_vslide1up_vx_u32m1(vuint32m1_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_vx_u32m1(src, value, vl); +vuint32m1_t test_vslide1up_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u32m1(vs2, rs1, vl); } -vuint32m2_t test_vslide1up_vx_u32m2(vuint32m2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_vx_u32m2(src, value, vl); +vuint32m2_t test_vslide1up_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u32m2(vs2, rs1, vl); } -vuint32m4_t test_vslide1up_vx_u32m4(vuint32m4_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_vx_u32m4(src, value, vl); +vuint32m4_t test_vslide1up_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u32m4(vs2, rs1, vl); } -vuint32m8_t test_vslide1up_vx_u32m8(vuint32m8_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_vx_u32m8(src, value, vl); +vuint32m8_t test_vslide1up_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u32m8(vs2, rs1, vl); } -vuint64m1_t test_vslide1up_vx_u64m1(vuint64m1_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up_vx_u64m1(src, value, vl); +vuint64m1_t test_vslide1up_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u64m1(vs2, rs1, vl); } -vuint64m2_t test_vslide1up_vx_u64m2(vuint64m2_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up_vx_u64m2(src, value, vl); +vuint64m2_t test_vslide1up_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u64m2(vs2, rs1, vl); } -vuint64m4_t test_vslide1up_vx_u64m4(vuint64m4_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up_vx_u64m4(src, value, vl); +vuint64m4_t test_vslide1up_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u64m4(vs2, rs1, vl); } -vuint64m8_t test_vslide1up_vx_u64m8(vuint64m8_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up_vx_u64m8(src, value, vl); +vuint64m8_t test_vslide1up_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u64m8(vs2, rs1, vl); } -vint8mf8_t test_vslide1up_vx_i8mf8_m(vbool64_t mask, vint8mf8_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_vx_i8mf8_m(mask, src, value, vl); +vint8mf8_t test_vslide1up_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i8mf8_m(vm, vs2, rs1, vl); } -vint8mf4_t test_vslide1up_vx_i8mf4_m(vbool32_t mask, vint8mf4_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_vx_i8mf4_m(mask, src, value, vl); +vint8mf4_t test_vslide1up_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i8mf4_m(vm, vs2, rs1, vl); } -vint8mf2_t test_vslide1up_vx_i8mf2_m(vbool16_t mask, vint8mf2_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_vx_i8mf2_m(mask, src, value, vl); +vint8mf2_t test_vslide1up_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i8mf2_m(vm, vs2, rs1, vl); } -vint8m1_t test_vslide1up_vx_i8m1_m(vbool8_t mask, vint8m1_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_vx_i8m1_m(mask, src, value, vl); +vint8m1_t test_vslide1up_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i8m1_m(vm, vs2, rs1, vl); } -vint8m2_t test_vslide1up_vx_i8m2_m(vbool4_t mask, vint8m2_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_vx_i8m2_m(mask, src, value, vl); +vint8m2_t test_vslide1up_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i8m2_m(vm, vs2, rs1, vl); } -vint8m4_t test_vslide1up_vx_i8m4_m(vbool2_t mask, vint8m4_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_vx_i8m4_m(mask, src, value, vl); +vint8m4_t test_vslide1up_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i8m4_m(vm, vs2, rs1, vl); } -vint8m8_t test_vslide1up_vx_i8m8_m(vbool1_t mask, vint8m8_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_vx_i8m8_m(mask, src, value, vl); +vint8m8_t test_vslide1up_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i8m8_m(vm, vs2, rs1, vl); } -vint16mf4_t test_vslide1up_vx_i16mf4_m(vbool64_t mask, vint16mf4_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_vx_i16mf4_m(mask, src, value, vl); +vint16mf4_t test_vslide1up_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i16mf4_m(vm, vs2, rs1, vl); } -vint16mf2_t test_vslide1up_vx_i16mf2_m(vbool32_t mask, vint16mf2_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_vx_i16mf2_m(mask, src, value, vl); +vint16mf2_t test_vslide1up_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i16mf2_m(vm, vs2, rs1, vl); } -vint16m1_t test_vslide1up_vx_i16m1_m(vbool16_t mask, vint16m1_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_vx_i16m1_m(mask, src, value, vl); +vint16m1_t test_vslide1up_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i16m1_m(vm, vs2, rs1, vl); } -vint16m2_t test_vslide1up_vx_i16m2_m(vbool8_t mask, vint16m2_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_vx_i16m2_m(mask, src, value, vl); +vint16m2_t test_vslide1up_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i16m2_m(vm, vs2, rs1, vl); } -vint16m4_t test_vslide1up_vx_i16m4_m(vbool4_t mask, vint16m4_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_vx_i16m4_m(mask, src, value, vl); +vint16m4_t test_vslide1up_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i16m4_m(vm, vs2, rs1, vl); } -vint16m8_t test_vslide1up_vx_i16m8_m(vbool2_t mask, vint16m8_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_vx_i16m8_m(mask, src, value, vl); +vint16m8_t test_vslide1up_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i16m8_m(vm, vs2, rs1, vl); } -vint32mf2_t test_vslide1up_vx_i32mf2_m(vbool64_t mask, vint32mf2_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_vx_i32mf2_m(mask, src, value, vl); +vint32mf2_t test_vslide1up_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i32mf2_m(vm, vs2, rs1, vl); } -vint32m1_t test_vslide1up_vx_i32m1_m(vbool32_t mask, vint32m1_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_vx_i32m1_m(mask, src, value, vl); +vint32m1_t test_vslide1up_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i32m1_m(vm, vs2, rs1, vl); } -vint32m2_t test_vslide1up_vx_i32m2_m(vbool16_t mask, vint32m2_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_vx_i32m2_m(mask, src, value, vl); +vint32m2_t test_vslide1up_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i32m2_m(vm, vs2, rs1, vl); } -vint32m4_t test_vslide1up_vx_i32m4_m(vbool8_t mask, vint32m4_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_vx_i32m4_m(mask, src, value, vl); +vint32m4_t test_vslide1up_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i32m4_m(vm, vs2, rs1, vl); } -vint32m8_t test_vslide1up_vx_i32m8_m(vbool4_t mask, vint32m8_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_vx_i32m8_m(mask, src, value, vl); +vint32m8_t test_vslide1up_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i32m8_m(vm, vs2, rs1, vl); } -vint64m1_t test_vslide1up_vx_i64m1_m(vbool64_t mask, vint64m1_t src, int64_t value, size_t vl) { - return __riscv_vslide1up_vx_i64m1_m(mask, src, value, vl); +vint64m1_t test_vslide1up_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i64m1_m(vm, vs2, rs1, vl); } -vint64m2_t test_vslide1up_vx_i64m2_m(vbool32_t mask, vint64m2_t src, int64_t value, size_t vl) { - return __riscv_vslide1up_vx_i64m2_m(mask, src, value, vl); +vint64m2_t test_vslide1up_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i64m2_m(vm, vs2, rs1, vl); } -vint64m4_t test_vslide1up_vx_i64m4_m(vbool16_t mask, vint64m4_t src, int64_t value, size_t vl) { - return __riscv_vslide1up_vx_i64m4_m(mask, src, value, vl); +vint64m4_t test_vslide1up_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i64m4_m(vm, vs2, rs1, vl); } -vint64m8_t test_vslide1up_vx_i64m8_m(vbool8_t mask, vint64m8_t src, int64_t value, size_t vl) { - return __riscv_vslide1up_vx_i64m8_m(mask, src, value, vl); +vint64m8_t test_vslide1up_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i64m8_m(vm, vs2, rs1, vl); } -vuint8mf8_t test_vslide1up_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_vx_u8mf8_m(mask, src, value, vl); +vuint8mf8_t test_vslide1up_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u8mf8_m(vm, vs2, rs1, vl); } -vuint8mf4_t test_vslide1up_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_vx_u8mf4_m(mask, src, value, vl); +vuint8mf4_t test_vslide1up_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u8mf4_m(vm, vs2, rs1, vl); } -vuint8mf2_t test_vslide1up_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_vx_u8mf2_m(mask, src, value, vl); +vuint8mf2_t test_vslide1up_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u8mf2_m(vm, vs2, rs1, vl); } -vuint8m1_t test_vslide1up_vx_u8m1_m(vbool8_t mask, vuint8m1_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_vx_u8m1_m(mask, src, value, vl); +vuint8m1_t test_vslide1up_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u8m1_m(vm, vs2, rs1, vl); } -vuint8m2_t test_vslide1up_vx_u8m2_m(vbool4_t mask, vuint8m2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_vx_u8m2_m(mask, src, value, vl); +vuint8m2_t test_vslide1up_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u8m2_m(vm, vs2, rs1, vl); } -vuint8m4_t test_vslide1up_vx_u8m4_m(vbool2_t mask, vuint8m4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_vx_u8m4_m(mask, src, value, vl); +vuint8m4_t test_vslide1up_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u8m4_m(vm, vs2, rs1, vl); } -vuint8m8_t test_vslide1up_vx_u8m8_m(vbool1_t mask, vuint8m8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_vx_u8m8_m(mask, src, value, vl); +vuint8m8_t test_vslide1up_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u8m8_m(vm, vs2, rs1, vl); } -vuint16mf4_t test_vslide1up_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_vx_u16mf4_m(mask, src, value, vl); +vuint16mf4_t test_vslide1up_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u16mf4_m(vm, vs2, rs1, vl); } -vuint16mf2_t test_vslide1up_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_vx_u16mf2_m(mask, src, value, vl); +vuint16mf2_t test_vslide1up_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u16mf2_m(vm, vs2, rs1, vl); } -vuint16m1_t test_vslide1up_vx_u16m1_m(vbool16_t mask, vuint16m1_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_vx_u16m1_m(mask, src, value, vl); +vuint16m1_t test_vslide1up_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u16m1_m(vm, vs2, rs1, vl); } -vuint16m2_t test_vslide1up_vx_u16m2_m(vbool8_t mask, vuint16m2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_vx_u16m2_m(mask, src, value, vl); +vuint16m2_t test_vslide1up_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u16m2_m(vm, vs2, rs1, vl); } -vuint16m4_t test_vslide1up_vx_u16m4_m(vbool4_t mask, vuint16m4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_vx_u16m4_m(mask, src, value, vl); +vuint16m4_t test_vslide1up_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u16m4_m(vm, vs2, rs1, vl); } -vuint16m8_t test_vslide1up_vx_u16m8_m(vbool2_t mask, vuint16m8_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_vx_u16m8_m(mask, src, value, vl); +vuint16m8_t test_vslide1up_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u16m8_m(vm, vs2, rs1, vl); } -vuint32mf2_t test_vslide1up_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_vx_u32mf2_m(mask, src, value, vl); +vuint32mf2_t test_vslide1up_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u32mf2_m(vm, vs2, rs1, vl); } -vuint32m1_t test_vslide1up_vx_u32m1_m(vbool32_t mask, vuint32m1_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_vx_u32m1_m(mask, src, value, vl); +vuint32m1_t test_vslide1up_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u32m1_m(vm, vs2, rs1, vl); } -vuint32m2_t test_vslide1up_vx_u32m2_m(vbool16_t mask, vuint32m2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_vx_u32m2_m(mask, src, value, vl); +vuint32m2_t test_vslide1up_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u32m2_m(vm, vs2, rs1, vl); } -vuint32m4_t test_vslide1up_vx_u32m4_m(vbool8_t mask, vuint32m4_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_vx_u32m4_m(mask, src, value, vl); +vuint32m4_t test_vslide1up_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u32m4_m(vm, vs2, rs1, vl); } -vuint32m8_t test_vslide1up_vx_u32m8_m(vbool4_t mask, vuint32m8_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_vx_u32m8_m(mask, src, value, vl); +vuint32m8_t test_vslide1up_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u32m8_m(vm, vs2, rs1, vl); } -vuint64m1_t test_vslide1up_vx_u64m1_m(vbool64_t mask, vuint64m1_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up_vx_u64m1_m(mask, src, value, vl); +vuint64m1_t test_vslide1up_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u64m1_m(vm, vs2, rs1, vl); } -vuint64m2_t test_vslide1up_vx_u64m2_m(vbool32_t mask, vuint64m2_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up_vx_u64m2_m(mask, src, value, vl); +vuint64m2_t test_vslide1up_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u64m2_m(vm, vs2, rs1, vl); } -vuint64m4_t test_vslide1up_vx_u64m4_m(vbool16_t mask, vuint64m4_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up_vx_u64m4_m(mask, src, value, vl); +vuint64m4_t test_vslide1up_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u64m4_m(vm, vs2, rs1, vl); } -vuint64m8_t test_vslide1up_vx_u64m8_m(vbool8_t mask, vuint64m8_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up_vx_u64m8_m(mask, src, value, vl); +vuint64m8_t test_vslide1up_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u64m8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vslide1up\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-api-tests/vslidedown.c b/auto-generated/gnu-api-tests/vslidedown.c index 8c808de08..bd98d8bce 100644 --- a/auto-generated/gnu-api-tests/vslidedown.c +++ b/auto-generated/gnu-api-tests/vslidedown.c @@ -6,476 +6,476 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vslidedown_vx_f16mf4(vfloat16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f16mf4(src, offset, vl); +vfloat16mf4_t test_vslidedown_vx_f16mf4(vfloat16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f16mf4(vs2, rs1, vl); } -vfloat16mf2_t test_vslidedown_vx_f16mf2(vfloat16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f16mf2(src, offset, vl); +vfloat16mf2_t test_vslidedown_vx_f16mf2(vfloat16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f16mf2(vs2, rs1, vl); } -vfloat16m1_t test_vslidedown_vx_f16m1(vfloat16m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f16m1(src, offset, vl); +vfloat16m1_t test_vslidedown_vx_f16m1(vfloat16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f16m1(vs2, rs1, vl); } -vfloat16m2_t test_vslidedown_vx_f16m2(vfloat16m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f16m2(src, offset, vl); +vfloat16m2_t test_vslidedown_vx_f16m2(vfloat16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f16m2(vs2, rs1, vl); } -vfloat16m4_t test_vslidedown_vx_f16m4(vfloat16m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f16m4(src, offset, vl); +vfloat16m4_t test_vslidedown_vx_f16m4(vfloat16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f16m4(vs2, rs1, vl); } -vfloat16m8_t test_vslidedown_vx_f16m8(vfloat16m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f16m8(src, offset, vl); +vfloat16m8_t test_vslidedown_vx_f16m8(vfloat16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f16m8(vs2, rs1, vl); } -vfloat32mf2_t test_vslidedown_vx_f32mf2(vfloat32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f32mf2(src, offset, vl); +vfloat32mf2_t test_vslidedown_vx_f32mf2(vfloat32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f32mf2(vs2, rs1, vl); } -vfloat32m1_t test_vslidedown_vx_f32m1(vfloat32m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f32m1(src, offset, vl); +vfloat32m1_t test_vslidedown_vx_f32m1(vfloat32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f32m1(vs2, rs1, vl); } -vfloat32m2_t test_vslidedown_vx_f32m2(vfloat32m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f32m2(src, offset, vl); +vfloat32m2_t test_vslidedown_vx_f32m2(vfloat32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f32m2(vs2, rs1, vl); } -vfloat32m4_t test_vslidedown_vx_f32m4(vfloat32m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f32m4(src, offset, vl); +vfloat32m4_t test_vslidedown_vx_f32m4(vfloat32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f32m4(vs2, rs1, vl); } -vfloat32m8_t test_vslidedown_vx_f32m8(vfloat32m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f32m8(src, offset, vl); +vfloat32m8_t test_vslidedown_vx_f32m8(vfloat32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f32m8(vs2, rs1, vl); } -vfloat64m1_t test_vslidedown_vx_f64m1(vfloat64m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f64m1(src, offset, vl); +vfloat64m1_t test_vslidedown_vx_f64m1(vfloat64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f64m1(vs2, rs1, vl); } -vfloat64m2_t test_vslidedown_vx_f64m2(vfloat64m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f64m2(src, offset, vl); +vfloat64m2_t test_vslidedown_vx_f64m2(vfloat64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f64m2(vs2, rs1, vl); } -vfloat64m4_t test_vslidedown_vx_f64m4(vfloat64m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f64m4(src, offset, vl); +vfloat64m4_t test_vslidedown_vx_f64m4(vfloat64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f64m4(vs2, rs1, vl); } -vfloat64m8_t test_vslidedown_vx_f64m8(vfloat64m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f64m8(src, offset, vl); +vfloat64m8_t test_vslidedown_vx_f64m8(vfloat64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f64m8(vs2, rs1, vl); } -vint8mf8_t test_vslidedown_vx_i8mf8(vint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i8mf8(src, offset, vl); +vint8mf8_t test_vslidedown_vx_i8mf8(vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i8mf8(vs2, rs1, vl); } -vint8mf4_t test_vslidedown_vx_i8mf4(vint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i8mf4(src, offset, vl); +vint8mf4_t test_vslidedown_vx_i8mf4(vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i8mf4(vs2, rs1, vl); } -vint8mf2_t test_vslidedown_vx_i8mf2(vint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i8mf2(src, offset, vl); +vint8mf2_t test_vslidedown_vx_i8mf2(vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i8mf2(vs2, rs1, vl); } -vint8m1_t test_vslidedown_vx_i8m1(vint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i8m1(src, offset, vl); +vint8m1_t test_vslidedown_vx_i8m1(vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i8m1(vs2, rs1, vl); } -vint8m2_t test_vslidedown_vx_i8m2(vint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i8m2(src, offset, vl); +vint8m2_t test_vslidedown_vx_i8m2(vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i8m2(vs2, rs1, vl); } -vint8m4_t test_vslidedown_vx_i8m4(vint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i8m4(src, offset, vl); +vint8m4_t test_vslidedown_vx_i8m4(vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i8m4(vs2, rs1, vl); } -vint8m8_t test_vslidedown_vx_i8m8(vint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i8m8(src, offset, vl); +vint8m8_t test_vslidedown_vx_i8m8(vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i8m8(vs2, rs1, vl); } -vint16mf4_t test_vslidedown_vx_i16mf4(vint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i16mf4(src, offset, vl); +vint16mf4_t test_vslidedown_vx_i16mf4(vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i16mf4(vs2, rs1, vl); } -vint16mf2_t test_vslidedown_vx_i16mf2(vint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i16mf2(src, offset, vl); +vint16mf2_t test_vslidedown_vx_i16mf2(vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i16mf2(vs2, rs1, vl); } -vint16m1_t test_vslidedown_vx_i16m1(vint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i16m1(src, offset, vl); +vint16m1_t test_vslidedown_vx_i16m1(vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i16m1(vs2, rs1, vl); } -vint16m2_t test_vslidedown_vx_i16m2(vint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i16m2(src, offset, vl); +vint16m2_t test_vslidedown_vx_i16m2(vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i16m2(vs2, rs1, vl); } -vint16m4_t test_vslidedown_vx_i16m4(vint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i16m4(src, offset, vl); +vint16m4_t test_vslidedown_vx_i16m4(vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i16m4(vs2, rs1, vl); } -vint16m8_t test_vslidedown_vx_i16m8(vint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i16m8(src, offset, vl); +vint16m8_t test_vslidedown_vx_i16m8(vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i16m8(vs2, rs1, vl); } -vint32mf2_t test_vslidedown_vx_i32mf2(vint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i32mf2(src, offset, vl); +vint32mf2_t test_vslidedown_vx_i32mf2(vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i32mf2(vs2, rs1, vl); } -vint32m1_t test_vslidedown_vx_i32m1(vint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i32m1(src, offset, vl); +vint32m1_t test_vslidedown_vx_i32m1(vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i32m1(vs2, rs1, vl); } -vint32m2_t test_vslidedown_vx_i32m2(vint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i32m2(src, offset, vl); +vint32m2_t test_vslidedown_vx_i32m2(vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i32m2(vs2, rs1, vl); } -vint32m4_t test_vslidedown_vx_i32m4(vint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i32m4(src, offset, vl); +vint32m4_t test_vslidedown_vx_i32m4(vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i32m4(vs2, rs1, vl); } -vint32m8_t test_vslidedown_vx_i32m8(vint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i32m8(src, offset, vl); +vint32m8_t test_vslidedown_vx_i32m8(vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i32m8(vs2, rs1, vl); } -vint64m1_t test_vslidedown_vx_i64m1(vint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i64m1(src, offset, vl); +vint64m1_t test_vslidedown_vx_i64m1(vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i64m1(vs2, rs1, vl); } -vint64m2_t test_vslidedown_vx_i64m2(vint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i64m2(src, offset, vl); +vint64m2_t test_vslidedown_vx_i64m2(vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i64m2(vs2, rs1, vl); } -vint64m4_t test_vslidedown_vx_i64m4(vint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i64m4(src, offset, vl); +vint64m4_t test_vslidedown_vx_i64m4(vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i64m4(vs2, rs1, vl); } -vint64m8_t test_vslidedown_vx_i64m8(vint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i64m8(src, offset, vl); +vint64m8_t test_vslidedown_vx_i64m8(vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i64m8(vs2, rs1, vl); } -vuint8mf8_t test_vslidedown_vx_u8mf8(vuint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u8mf8(src, offset, vl); +vuint8mf8_t test_vslidedown_vx_u8mf8(vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u8mf8(vs2, rs1, vl); } -vuint8mf4_t test_vslidedown_vx_u8mf4(vuint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u8mf4(src, offset, vl); +vuint8mf4_t test_vslidedown_vx_u8mf4(vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u8mf4(vs2, rs1, vl); } -vuint8mf2_t test_vslidedown_vx_u8mf2(vuint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u8mf2(src, offset, vl); +vuint8mf2_t test_vslidedown_vx_u8mf2(vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u8mf2(vs2, rs1, vl); } -vuint8m1_t test_vslidedown_vx_u8m1(vuint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u8m1(src, offset, vl); +vuint8m1_t test_vslidedown_vx_u8m1(vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u8m1(vs2, rs1, vl); } -vuint8m2_t test_vslidedown_vx_u8m2(vuint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u8m2(src, offset, vl); +vuint8m2_t test_vslidedown_vx_u8m2(vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u8m2(vs2, rs1, vl); } -vuint8m4_t test_vslidedown_vx_u8m4(vuint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u8m4(src, offset, vl); +vuint8m4_t test_vslidedown_vx_u8m4(vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u8m4(vs2, rs1, vl); } -vuint8m8_t test_vslidedown_vx_u8m8(vuint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u8m8(src, offset, vl); +vuint8m8_t test_vslidedown_vx_u8m8(vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u8m8(vs2, rs1, vl); } -vuint16mf4_t test_vslidedown_vx_u16mf4(vuint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u16mf4(src, offset, vl); +vuint16mf4_t test_vslidedown_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u16mf4(vs2, rs1, vl); } -vuint16mf2_t test_vslidedown_vx_u16mf2(vuint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u16mf2(src, offset, vl); +vuint16mf2_t test_vslidedown_vx_u16mf2(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u16mf2(vs2, rs1, vl); } -vuint16m1_t test_vslidedown_vx_u16m1(vuint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u16m1(src, offset, vl); +vuint16m1_t test_vslidedown_vx_u16m1(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u16m1(vs2, rs1, vl); } -vuint16m2_t test_vslidedown_vx_u16m2(vuint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u16m2(src, offset, vl); +vuint16m2_t test_vslidedown_vx_u16m2(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u16m2(vs2, rs1, vl); } -vuint16m4_t test_vslidedown_vx_u16m4(vuint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u16m4(src, offset, vl); +vuint16m4_t test_vslidedown_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u16m4(vs2, rs1, vl); } -vuint16m8_t test_vslidedown_vx_u16m8(vuint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u16m8(src, offset, vl); +vuint16m8_t test_vslidedown_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u16m8(vs2, rs1, vl); } -vuint32mf2_t test_vslidedown_vx_u32mf2(vuint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u32mf2(src, offset, vl); +vuint32mf2_t test_vslidedown_vx_u32mf2(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u32mf2(vs2, rs1, vl); } -vuint32m1_t test_vslidedown_vx_u32m1(vuint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u32m1(src, offset, vl); +vuint32m1_t test_vslidedown_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u32m1(vs2, rs1, vl); } -vuint32m2_t test_vslidedown_vx_u32m2(vuint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u32m2(src, offset, vl); +vuint32m2_t test_vslidedown_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u32m2(vs2, rs1, vl); } -vuint32m4_t test_vslidedown_vx_u32m4(vuint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u32m4(src, offset, vl); +vuint32m4_t test_vslidedown_vx_u32m4(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u32m4(vs2, rs1, vl); } -vuint32m8_t test_vslidedown_vx_u32m8(vuint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u32m8(src, offset, vl); +vuint32m8_t test_vslidedown_vx_u32m8(vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u32m8(vs2, rs1, vl); } -vuint64m1_t test_vslidedown_vx_u64m1(vuint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u64m1(src, offset, vl); +vuint64m1_t test_vslidedown_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u64m1(vs2, rs1, vl); } -vuint64m2_t test_vslidedown_vx_u64m2(vuint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u64m2(src, offset, vl); +vuint64m2_t test_vslidedown_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u64m2(vs2, rs1, vl); } -vuint64m4_t test_vslidedown_vx_u64m4(vuint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u64m4(src, offset, vl); +vuint64m4_t test_vslidedown_vx_u64m4(vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u64m4(vs2, rs1, vl); } -vuint64m8_t test_vslidedown_vx_u64m8(vuint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u64m8(src, offset, vl); +vuint64m8_t test_vslidedown_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u64m8(vs2, rs1, vl); } -vfloat16mf4_t test_vslidedown_vx_f16mf4_m(vbool64_t mask, vfloat16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f16mf4_m(mask, src, offset, vl); +vfloat16mf4_t test_vslidedown_vx_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f16mf4_m(vm, vs2, rs1, vl); } -vfloat16mf2_t test_vslidedown_vx_f16mf2_m(vbool32_t mask, vfloat16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f16mf2_m(mask, src, offset, vl); +vfloat16mf2_t test_vslidedown_vx_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f16mf2_m(vm, vs2, rs1, vl); } -vfloat16m1_t test_vslidedown_vx_f16m1_m(vbool16_t mask, vfloat16m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f16m1_m(mask, src, offset, vl); +vfloat16m1_t test_vslidedown_vx_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f16m1_m(vm, vs2, rs1, vl); } -vfloat16m2_t test_vslidedown_vx_f16m2_m(vbool8_t mask, vfloat16m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f16m2_m(mask, src, offset, vl); +vfloat16m2_t test_vslidedown_vx_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f16m2_m(vm, vs2, rs1, vl); } -vfloat16m4_t test_vslidedown_vx_f16m4_m(vbool4_t mask, vfloat16m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f16m4_m(mask, src, offset, vl); +vfloat16m4_t test_vslidedown_vx_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f16m4_m(vm, vs2, rs1, vl); } -vfloat16m8_t test_vslidedown_vx_f16m8_m(vbool2_t mask, vfloat16m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f16m8_m(mask, src, offset, vl); +vfloat16m8_t test_vslidedown_vx_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f16m8_m(vm, vs2, rs1, vl); } -vfloat32mf2_t test_vslidedown_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f32mf2_m(mask, src, offset, vl); +vfloat32mf2_t test_vslidedown_vx_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f32mf2_m(vm, vs2, rs1, vl); } -vfloat32m1_t test_vslidedown_vx_f32m1_m(vbool32_t mask, vfloat32m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f32m1_m(mask, src, offset, vl); +vfloat32m1_t test_vslidedown_vx_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f32m1_m(vm, vs2, rs1, vl); } -vfloat32m2_t test_vslidedown_vx_f32m2_m(vbool16_t mask, vfloat32m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f32m2_m(mask, src, offset, vl); +vfloat32m2_t test_vslidedown_vx_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f32m2_m(vm, vs2, rs1, vl); } -vfloat32m4_t test_vslidedown_vx_f32m4_m(vbool8_t mask, vfloat32m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f32m4_m(mask, src, offset, vl); +vfloat32m4_t test_vslidedown_vx_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f32m4_m(vm, vs2, rs1, vl); } -vfloat32m8_t test_vslidedown_vx_f32m8_m(vbool4_t mask, vfloat32m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f32m8_m(mask, src, offset, vl); +vfloat32m8_t test_vslidedown_vx_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f32m8_m(vm, vs2, rs1, vl); } -vfloat64m1_t test_vslidedown_vx_f64m1_m(vbool64_t mask, vfloat64m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f64m1_m(mask, src, offset, vl); +vfloat64m1_t test_vslidedown_vx_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f64m1_m(vm, vs2, rs1, vl); } -vfloat64m2_t test_vslidedown_vx_f64m2_m(vbool32_t mask, vfloat64m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f64m2_m(mask, src, offset, vl); +vfloat64m2_t test_vslidedown_vx_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f64m2_m(vm, vs2, rs1, vl); } -vfloat64m4_t test_vslidedown_vx_f64m4_m(vbool16_t mask, vfloat64m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f64m4_m(mask, src, offset, vl); +vfloat64m4_t test_vslidedown_vx_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f64m4_m(vm, vs2, rs1, vl); } -vfloat64m8_t test_vslidedown_vx_f64m8_m(vbool8_t mask, vfloat64m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f64m8_m(mask, src, offset, vl); +vfloat64m8_t test_vslidedown_vx_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f64m8_m(vm, vs2, rs1, vl); } -vint8mf8_t test_vslidedown_vx_i8mf8_m(vbool64_t mask, vint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i8mf8_m(mask, src, offset, vl); +vint8mf8_t test_vslidedown_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i8mf8_m(vm, vs2, rs1, vl); } -vint8mf4_t test_vslidedown_vx_i8mf4_m(vbool32_t mask, vint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i8mf4_m(mask, src, offset, vl); +vint8mf4_t test_vslidedown_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i8mf4_m(vm, vs2, rs1, vl); } -vint8mf2_t test_vslidedown_vx_i8mf2_m(vbool16_t mask, vint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i8mf2_m(mask, src, offset, vl); +vint8mf2_t test_vslidedown_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i8mf2_m(vm, vs2, rs1, vl); } -vint8m1_t test_vslidedown_vx_i8m1_m(vbool8_t mask, vint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i8m1_m(mask, src, offset, vl); +vint8m1_t test_vslidedown_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i8m1_m(vm, vs2, rs1, vl); } -vint8m2_t test_vslidedown_vx_i8m2_m(vbool4_t mask, vint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i8m2_m(mask, src, offset, vl); +vint8m2_t test_vslidedown_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i8m2_m(vm, vs2, rs1, vl); } -vint8m4_t test_vslidedown_vx_i8m4_m(vbool2_t mask, vint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i8m4_m(mask, src, offset, vl); +vint8m4_t test_vslidedown_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i8m4_m(vm, vs2, rs1, vl); } -vint8m8_t test_vslidedown_vx_i8m8_m(vbool1_t mask, vint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i8m8_m(mask, src, offset, vl); +vint8m8_t test_vslidedown_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i8m8_m(vm, vs2, rs1, vl); } -vint16mf4_t test_vslidedown_vx_i16mf4_m(vbool64_t mask, vint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i16mf4_m(mask, src, offset, vl); +vint16mf4_t test_vslidedown_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i16mf4_m(vm, vs2, rs1, vl); } -vint16mf2_t test_vslidedown_vx_i16mf2_m(vbool32_t mask, vint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i16mf2_m(mask, src, offset, vl); +vint16mf2_t test_vslidedown_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i16mf2_m(vm, vs2, rs1, vl); } -vint16m1_t test_vslidedown_vx_i16m1_m(vbool16_t mask, vint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i16m1_m(mask, src, offset, vl); +vint16m1_t test_vslidedown_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i16m1_m(vm, vs2, rs1, vl); } -vint16m2_t test_vslidedown_vx_i16m2_m(vbool8_t mask, vint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i16m2_m(mask, src, offset, vl); +vint16m2_t test_vslidedown_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i16m2_m(vm, vs2, rs1, vl); } -vint16m4_t test_vslidedown_vx_i16m4_m(vbool4_t mask, vint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i16m4_m(mask, src, offset, vl); +vint16m4_t test_vslidedown_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i16m4_m(vm, vs2, rs1, vl); } -vint16m8_t test_vslidedown_vx_i16m8_m(vbool2_t mask, vint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i16m8_m(mask, src, offset, vl); +vint16m8_t test_vslidedown_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i16m8_m(vm, vs2, rs1, vl); } -vint32mf2_t test_vslidedown_vx_i32mf2_m(vbool64_t mask, vint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i32mf2_m(mask, src, offset, vl); +vint32mf2_t test_vslidedown_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i32mf2_m(vm, vs2, rs1, vl); } -vint32m1_t test_vslidedown_vx_i32m1_m(vbool32_t mask, vint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i32m1_m(mask, src, offset, vl); +vint32m1_t test_vslidedown_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i32m1_m(vm, vs2, rs1, vl); } -vint32m2_t test_vslidedown_vx_i32m2_m(vbool16_t mask, vint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i32m2_m(mask, src, offset, vl); +vint32m2_t test_vslidedown_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i32m2_m(vm, vs2, rs1, vl); } -vint32m4_t test_vslidedown_vx_i32m4_m(vbool8_t mask, vint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i32m4_m(mask, src, offset, vl); +vint32m4_t test_vslidedown_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i32m4_m(vm, vs2, rs1, vl); } -vint32m8_t test_vslidedown_vx_i32m8_m(vbool4_t mask, vint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i32m8_m(mask, src, offset, vl); +vint32m8_t test_vslidedown_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i32m8_m(vm, vs2, rs1, vl); } -vint64m1_t test_vslidedown_vx_i64m1_m(vbool64_t mask, vint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i64m1_m(mask, src, offset, vl); +vint64m1_t test_vslidedown_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i64m1_m(vm, vs2, rs1, vl); } -vint64m2_t test_vslidedown_vx_i64m2_m(vbool32_t mask, vint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i64m2_m(mask, src, offset, vl); +vint64m2_t test_vslidedown_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i64m2_m(vm, vs2, rs1, vl); } -vint64m4_t test_vslidedown_vx_i64m4_m(vbool16_t mask, vint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i64m4_m(mask, src, offset, vl); +vint64m4_t test_vslidedown_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i64m4_m(vm, vs2, rs1, vl); } -vint64m8_t test_vslidedown_vx_i64m8_m(vbool8_t mask, vint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i64m8_m(mask, src, offset, vl); +vint64m8_t test_vslidedown_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i64m8_m(vm, vs2, rs1, vl); } -vuint8mf8_t test_vslidedown_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u8mf8_m(mask, src, offset, vl); +vuint8mf8_t test_vslidedown_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u8mf8_m(vm, vs2, rs1, vl); } -vuint8mf4_t test_vslidedown_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u8mf4_m(mask, src, offset, vl); +vuint8mf4_t test_vslidedown_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u8mf4_m(vm, vs2, rs1, vl); } -vuint8mf2_t test_vslidedown_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u8mf2_m(mask, src, offset, vl); +vuint8mf2_t test_vslidedown_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u8mf2_m(vm, vs2, rs1, vl); } -vuint8m1_t test_vslidedown_vx_u8m1_m(vbool8_t mask, vuint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u8m1_m(mask, src, offset, vl); +vuint8m1_t test_vslidedown_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u8m1_m(vm, vs2, rs1, vl); } -vuint8m2_t test_vslidedown_vx_u8m2_m(vbool4_t mask, vuint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u8m2_m(mask, src, offset, vl); +vuint8m2_t test_vslidedown_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u8m2_m(vm, vs2, rs1, vl); } -vuint8m4_t test_vslidedown_vx_u8m4_m(vbool2_t mask, vuint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u8m4_m(mask, src, offset, vl); +vuint8m4_t test_vslidedown_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u8m4_m(vm, vs2, rs1, vl); } -vuint8m8_t test_vslidedown_vx_u8m8_m(vbool1_t mask, vuint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u8m8_m(mask, src, offset, vl); +vuint8m8_t test_vslidedown_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u8m8_m(vm, vs2, rs1, vl); } -vuint16mf4_t test_vslidedown_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u16mf4_m(mask, src, offset, vl); +vuint16mf4_t test_vslidedown_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u16mf4_m(vm, vs2, rs1, vl); } -vuint16mf2_t test_vslidedown_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u16mf2_m(mask, src, offset, vl); +vuint16mf2_t test_vslidedown_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u16mf2_m(vm, vs2, rs1, vl); } -vuint16m1_t test_vslidedown_vx_u16m1_m(vbool16_t mask, vuint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u16m1_m(mask, src, offset, vl); +vuint16m1_t test_vslidedown_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u16m1_m(vm, vs2, rs1, vl); } -vuint16m2_t test_vslidedown_vx_u16m2_m(vbool8_t mask, vuint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u16m2_m(mask, src, offset, vl); +vuint16m2_t test_vslidedown_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u16m2_m(vm, vs2, rs1, vl); } -vuint16m4_t test_vslidedown_vx_u16m4_m(vbool4_t mask, vuint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u16m4_m(mask, src, offset, vl); +vuint16m4_t test_vslidedown_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u16m4_m(vm, vs2, rs1, vl); } -vuint16m8_t test_vslidedown_vx_u16m8_m(vbool2_t mask, vuint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u16m8_m(mask, src, offset, vl); +vuint16m8_t test_vslidedown_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u16m8_m(vm, vs2, rs1, vl); } -vuint32mf2_t test_vslidedown_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u32mf2_m(mask, src, offset, vl); +vuint32mf2_t test_vslidedown_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u32mf2_m(vm, vs2, rs1, vl); } -vuint32m1_t test_vslidedown_vx_u32m1_m(vbool32_t mask, vuint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u32m1_m(mask, src, offset, vl); +vuint32m1_t test_vslidedown_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u32m1_m(vm, vs2, rs1, vl); } -vuint32m2_t test_vslidedown_vx_u32m2_m(vbool16_t mask, vuint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u32m2_m(mask, src, offset, vl); +vuint32m2_t test_vslidedown_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u32m2_m(vm, vs2, rs1, vl); } -vuint32m4_t test_vslidedown_vx_u32m4_m(vbool8_t mask, vuint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u32m4_m(mask, src, offset, vl); +vuint32m4_t test_vslidedown_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u32m4_m(vm, vs2, rs1, vl); } -vuint32m8_t test_vslidedown_vx_u32m8_m(vbool4_t mask, vuint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u32m8_m(mask, src, offset, vl); +vuint32m8_t test_vslidedown_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u32m8_m(vm, vs2, rs1, vl); } -vuint64m1_t test_vslidedown_vx_u64m1_m(vbool64_t mask, vuint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u64m1_m(mask, src, offset, vl); +vuint64m1_t test_vslidedown_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u64m1_m(vm, vs2, rs1, vl); } -vuint64m2_t test_vslidedown_vx_u64m2_m(vbool32_t mask, vuint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u64m2_m(mask, src, offset, vl); +vuint64m2_t test_vslidedown_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u64m2_m(vm, vs2, rs1, vl); } -vuint64m4_t test_vslidedown_vx_u64m4_m(vbool16_t mask, vuint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u64m4_m(mask, src, offset, vl); +vuint64m4_t test_vslidedown_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u64m4_m(vm, vs2, rs1, vl); } -vuint64m8_t test_vslidedown_vx_u64m8_m(vbool8_t mask, vuint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u64m8_m(mask, src, offset, vl); +vuint64m8_t test_vslidedown_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u64m8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vslidedown\.[ivxfswum.]+\s+} 118 } } */ diff --git a/auto-generated/gnu-api-tests/vslideup.c b/auto-generated/gnu-api-tests/vslideup.c index a7af1289d..237f485d4 100644 --- a/auto-generated/gnu-api-tests/vslideup.c +++ b/auto-generated/gnu-api-tests/vslideup.c @@ -6,476 +6,476 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vslideup_vx_f16mf4(vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f16mf4(dest, src, offset, vl); +vfloat16mf4_t test_vslideup_vx_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f16mf4(vd, vs2, rs1, vl); } -vfloat16mf2_t test_vslideup_vx_f16mf2(vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f16mf2(dest, src, offset, vl); +vfloat16mf2_t test_vslideup_vx_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f16mf2(vd, vs2, rs1, vl); } -vfloat16m1_t test_vslideup_vx_f16m1(vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f16m1(dest, src, offset, vl); +vfloat16m1_t test_vslideup_vx_f16m1(vfloat16m1_t vd, vfloat16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f16m1(vd, vs2, rs1, vl); } -vfloat16m2_t test_vslideup_vx_f16m2(vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f16m2(dest, src, offset, vl); +vfloat16m2_t test_vslideup_vx_f16m2(vfloat16m2_t vd, vfloat16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f16m2(vd, vs2, rs1, vl); } -vfloat16m4_t test_vslideup_vx_f16m4(vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f16m4(dest, src, offset, vl); +vfloat16m4_t test_vslideup_vx_f16m4(vfloat16m4_t vd, vfloat16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f16m4(vd, vs2, rs1, vl); } -vfloat16m8_t test_vslideup_vx_f16m8(vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f16m8(dest, src, offset, vl); +vfloat16m8_t test_vslideup_vx_f16m8(vfloat16m8_t vd, vfloat16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f16m8(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vslideup_vx_f32mf2(vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f32mf2(dest, src, offset, vl); +vfloat32mf2_t test_vslideup_vx_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f32mf2(vd, vs2, rs1, vl); } -vfloat32m1_t test_vslideup_vx_f32m1(vfloat32m1_t dest, vfloat32m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f32m1(dest, src, offset, vl); +vfloat32m1_t test_vslideup_vx_f32m1(vfloat32m1_t vd, vfloat32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f32m1(vd, vs2, rs1, vl); } -vfloat32m2_t test_vslideup_vx_f32m2(vfloat32m2_t dest, vfloat32m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f32m2(dest, src, offset, vl); +vfloat32m2_t test_vslideup_vx_f32m2(vfloat32m2_t vd, vfloat32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f32m2(vd, vs2, rs1, vl); } -vfloat32m4_t test_vslideup_vx_f32m4(vfloat32m4_t dest, vfloat32m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f32m4(dest, src, offset, vl); +vfloat32m4_t test_vslideup_vx_f32m4(vfloat32m4_t vd, vfloat32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f32m4(vd, vs2, rs1, vl); } -vfloat32m8_t test_vslideup_vx_f32m8(vfloat32m8_t dest, vfloat32m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f32m8(dest, src, offset, vl); +vfloat32m8_t test_vslideup_vx_f32m8(vfloat32m8_t vd, vfloat32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f32m8(vd, vs2, rs1, vl); } -vfloat64m1_t test_vslideup_vx_f64m1(vfloat64m1_t dest, vfloat64m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f64m1(dest, src, offset, vl); +vfloat64m1_t test_vslideup_vx_f64m1(vfloat64m1_t vd, vfloat64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f64m1(vd, vs2, rs1, vl); } -vfloat64m2_t test_vslideup_vx_f64m2(vfloat64m2_t dest, vfloat64m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f64m2(dest, src, offset, vl); +vfloat64m2_t test_vslideup_vx_f64m2(vfloat64m2_t vd, vfloat64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f64m2(vd, vs2, rs1, vl); } -vfloat64m4_t test_vslideup_vx_f64m4(vfloat64m4_t dest, vfloat64m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f64m4(dest, src, offset, vl); +vfloat64m4_t test_vslideup_vx_f64m4(vfloat64m4_t vd, vfloat64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f64m4(vd, vs2, rs1, vl); } -vfloat64m8_t test_vslideup_vx_f64m8(vfloat64m8_t dest, vfloat64m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f64m8(dest, src, offset, vl); +vfloat64m8_t test_vslideup_vx_f64m8(vfloat64m8_t vd, vfloat64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f64m8(vd, vs2, rs1, vl); } -vint8mf8_t test_vslideup_vx_i8mf8(vint8mf8_t dest, vint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i8mf8(dest, src, offset, vl); +vint8mf8_t test_vslideup_vx_i8mf8(vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i8mf8(vd, vs2, rs1, vl); } -vint8mf4_t test_vslideup_vx_i8mf4(vint8mf4_t dest, vint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i8mf4(dest, src, offset, vl); +vint8mf4_t test_vslideup_vx_i8mf4(vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i8mf4(vd, vs2, rs1, vl); } -vint8mf2_t test_vslideup_vx_i8mf2(vint8mf2_t dest, vint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i8mf2(dest, src, offset, vl); +vint8mf2_t test_vslideup_vx_i8mf2(vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i8mf2(vd, vs2, rs1, vl); } -vint8m1_t test_vslideup_vx_i8m1(vint8m1_t dest, vint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i8m1(dest, src, offset, vl); +vint8m1_t test_vslideup_vx_i8m1(vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i8m1(vd, vs2, rs1, vl); } -vint8m2_t test_vslideup_vx_i8m2(vint8m2_t dest, vint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i8m2(dest, src, offset, vl); +vint8m2_t test_vslideup_vx_i8m2(vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i8m2(vd, vs2, rs1, vl); } -vint8m4_t test_vslideup_vx_i8m4(vint8m4_t dest, vint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i8m4(dest, src, offset, vl); +vint8m4_t test_vslideup_vx_i8m4(vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i8m4(vd, vs2, rs1, vl); } -vint8m8_t test_vslideup_vx_i8m8(vint8m8_t dest, vint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i8m8(dest, src, offset, vl); +vint8m8_t test_vslideup_vx_i8m8(vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i8m8(vd, vs2, rs1, vl); } -vint16mf4_t test_vslideup_vx_i16mf4(vint16mf4_t dest, vint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i16mf4(dest, src, offset, vl); +vint16mf4_t test_vslideup_vx_i16mf4(vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i16mf4(vd, vs2, rs1, vl); } -vint16mf2_t test_vslideup_vx_i16mf2(vint16mf2_t dest, vint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i16mf2(dest, src, offset, vl); +vint16mf2_t test_vslideup_vx_i16mf2(vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i16mf2(vd, vs2, rs1, vl); } -vint16m1_t test_vslideup_vx_i16m1(vint16m1_t dest, vint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i16m1(dest, src, offset, vl); +vint16m1_t test_vslideup_vx_i16m1(vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i16m1(vd, vs2, rs1, vl); } -vint16m2_t test_vslideup_vx_i16m2(vint16m2_t dest, vint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i16m2(dest, src, offset, vl); +vint16m2_t test_vslideup_vx_i16m2(vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i16m2(vd, vs2, rs1, vl); } -vint16m4_t test_vslideup_vx_i16m4(vint16m4_t dest, vint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i16m4(dest, src, offset, vl); +vint16m4_t test_vslideup_vx_i16m4(vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i16m4(vd, vs2, rs1, vl); } -vint16m8_t test_vslideup_vx_i16m8(vint16m8_t dest, vint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i16m8(dest, src, offset, vl); +vint16m8_t test_vslideup_vx_i16m8(vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i16m8(vd, vs2, rs1, vl); } -vint32mf2_t test_vslideup_vx_i32mf2(vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i32mf2(dest, src, offset, vl); +vint32mf2_t test_vslideup_vx_i32mf2(vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i32mf2(vd, vs2, rs1, vl); } -vint32m1_t test_vslideup_vx_i32m1(vint32m1_t dest, vint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i32m1(dest, src, offset, vl); +vint32m1_t test_vslideup_vx_i32m1(vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i32m1(vd, vs2, rs1, vl); } -vint32m2_t test_vslideup_vx_i32m2(vint32m2_t dest, vint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i32m2(dest, src, offset, vl); +vint32m2_t test_vslideup_vx_i32m2(vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i32m2(vd, vs2, rs1, vl); } -vint32m4_t test_vslideup_vx_i32m4(vint32m4_t dest, vint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i32m4(dest, src, offset, vl); +vint32m4_t test_vslideup_vx_i32m4(vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i32m4(vd, vs2, rs1, vl); } -vint32m8_t test_vslideup_vx_i32m8(vint32m8_t dest, vint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i32m8(dest, src, offset, vl); +vint32m8_t test_vslideup_vx_i32m8(vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i32m8(vd, vs2, rs1, vl); } -vint64m1_t test_vslideup_vx_i64m1(vint64m1_t dest, vint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i64m1(dest, src, offset, vl); +vint64m1_t test_vslideup_vx_i64m1(vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i64m1(vd, vs2, rs1, vl); } -vint64m2_t test_vslideup_vx_i64m2(vint64m2_t dest, vint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i64m2(dest, src, offset, vl); +vint64m2_t test_vslideup_vx_i64m2(vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i64m2(vd, vs2, rs1, vl); } -vint64m4_t test_vslideup_vx_i64m4(vint64m4_t dest, vint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i64m4(dest, src, offset, vl); +vint64m4_t test_vslideup_vx_i64m4(vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i64m4(vd, vs2, rs1, vl); } -vint64m8_t test_vslideup_vx_i64m8(vint64m8_t dest, vint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i64m8(dest, src, offset, vl); +vint64m8_t test_vslideup_vx_i64m8(vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i64m8(vd, vs2, rs1, vl); } -vuint8mf8_t test_vslideup_vx_u8mf8(vuint8mf8_t dest, vuint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u8mf8(dest, src, offset, vl); +vuint8mf8_t test_vslideup_vx_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u8mf8(vd, vs2, rs1, vl); } -vuint8mf4_t test_vslideup_vx_u8mf4(vuint8mf4_t dest, vuint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u8mf4(dest, src, offset, vl); +vuint8mf4_t test_vslideup_vx_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u8mf4(vd, vs2, rs1, vl); } -vuint8mf2_t test_vslideup_vx_u8mf2(vuint8mf2_t dest, vuint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u8mf2(dest, src, offset, vl); +vuint8mf2_t test_vslideup_vx_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u8mf2(vd, vs2, rs1, vl); } -vuint8m1_t test_vslideup_vx_u8m1(vuint8m1_t dest, vuint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u8m1(dest, src, offset, vl); +vuint8m1_t test_vslideup_vx_u8m1(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u8m1(vd, vs2, rs1, vl); } -vuint8m2_t test_vslideup_vx_u8m2(vuint8m2_t dest, vuint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u8m2(dest, src, offset, vl); +vuint8m2_t test_vslideup_vx_u8m2(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u8m2(vd, vs2, rs1, vl); } -vuint8m4_t test_vslideup_vx_u8m4(vuint8m4_t dest, vuint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u8m4(dest, src, offset, vl); +vuint8m4_t test_vslideup_vx_u8m4(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u8m4(vd, vs2, rs1, vl); } -vuint8m8_t test_vslideup_vx_u8m8(vuint8m8_t dest, vuint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u8m8(dest, src, offset, vl); +vuint8m8_t test_vslideup_vx_u8m8(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u8m8(vd, vs2, rs1, vl); } -vuint16mf4_t test_vslideup_vx_u16mf4(vuint16mf4_t dest, vuint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u16mf4(dest, src, offset, vl); +vuint16mf4_t test_vslideup_vx_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u16mf4(vd, vs2, rs1, vl); } -vuint16mf2_t test_vslideup_vx_u16mf2(vuint16mf2_t dest, vuint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u16mf2(dest, src, offset, vl); +vuint16mf2_t test_vslideup_vx_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u16mf2(vd, vs2, rs1, vl); } -vuint16m1_t test_vslideup_vx_u16m1(vuint16m1_t dest, vuint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u16m1(dest, src, offset, vl); +vuint16m1_t test_vslideup_vx_u16m1(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u16m1(vd, vs2, rs1, vl); } -vuint16m2_t test_vslideup_vx_u16m2(vuint16m2_t dest, vuint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u16m2(dest, src, offset, vl); +vuint16m2_t test_vslideup_vx_u16m2(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u16m2(vd, vs2, rs1, vl); } -vuint16m4_t test_vslideup_vx_u16m4(vuint16m4_t dest, vuint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u16m4(dest, src, offset, vl); +vuint16m4_t test_vslideup_vx_u16m4(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u16m4(vd, vs2, rs1, vl); } -vuint16m8_t test_vslideup_vx_u16m8(vuint16m8_t dest, vuint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u16m8(dest, src, offset, vl); +vuint16m8_t test_vslideup_vx_u16m8(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u16m8(vd, vs2, rs1, vl); } -vuint32mf2_t test_vslideup_vx_u32mf2(vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u32mf2(dest, src, offset, vl); +vuint32mf2_t test_vslideup_vx_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u32mf2(vd, vs2, rs1, vl); } -vuint32m1_t test_vslideup_vx_u32m1(vuint32m1_t dest, vuint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u32m1(dest, src, offset, vl); +vuint32m1_t test_vslideup_vx_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u32m1(vd, vs2, rs1, vl); } -vuint32m2_t test_vslideup_vx_u32m2(vuint32m2_t dest, vuint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u32m2(dest, src, offset, vl); +vuint32m2_t test_vslideup_vx_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u32m2(vd, vs2, rs1, vl); } -vuint32m4_t test_vslideup_vx_u32m4(vuint32m4_t dest, vuint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u32m4(dest, src, offset, vl); +vuint32m4_t test_vslideup_vx_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u32m4(vd, vs2, rs1, vl); } -vuint32m8_t test_vslideup_vx_u32m8(vuint32m8_t dest, vuint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u32m8(dest, src, offset, vl); +vuint32m8_t test_vslideup_vx_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u32m8(vd, vs2, rs1, vl); } -vuint64m1_t test_vslideup_vx_u64m1(vuint64m1_t dest, vuint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u64m1(dest, src, offset, vl); +vuint64m1_t test_vslideup_vx_u64m1(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u64m1(vd, vs2, rs1, vl); } -vuint64m2_t test_vslideup_vx_u64m2(vuint64m2_t dest, vuint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u64m2(dest, src, offset, vl); +vuint64m2_t test_vslideup_vx_u64m2(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u64m2(vd, vs2, rs1, vl); } -vuint64m4_t test_vslideup_vx_u64m4(vuint64m4_t dest, vuint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u64m4(dest, src, offset, vl); +vuint64m4_t test_vslideup_vx_u64m4(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u64m4(vd, vs2, rs1, vl); } -vuint64m8_t test_vslideup_vx_u64m8(vuint64m8_t dest, vuint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u64m8(dest, src, offset, vl); +vuint64m8_t test_vslideup_vx_u64m8(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u64m8(vd, vs2, rs1, vl); } -vfloat16mf4_t test_vslideup_vx_f16mf4_m(vbool64_t mask, vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f16mf4_m(mask, dest, src, offset, vl); +vfloat16mf4_t test_vslideup_vx_f16mf4_m(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f16mf4_m(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vslideup_vx_f16mf2_m(vbool32_t mask, vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f16mf2_m(mask, dest, src, offset, vl); +vfloat16mf2_t test_vslideup_vx_f16mf2_m(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f16mf2_m(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vslideup_vx_f16m1_m(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f16m1_m(mask, dest, src, offset, vl); +vfloat16m1_t test_vslideup_vx_f16m1_m(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f16m1_m(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vslideup_vx_f16m2_m(vbool8_t mask, vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f16m2_m(mask, dest, src, offset, vl); +vfloat16m2_t test_vslideup_vx_f16m2_m(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f16m2_m(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vslideup_vx_f16m4_m(vbool4_t mask, vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f16m4_m(mask, dest, src, offset, vl); +vfloat16m4_t test_vslideup_vx_f16m4_m(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f16m4_m(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vslideup_vx_f16m8_m(vbool2_t mask, vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f16m8_m(mask, dest, src, offset, vl); +vfloat16m8_t test_vslideup_vx_f16m8_m(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f16m8_m(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vslideup_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f32mf2_m(mask, dest, src, offset, vl); +vfloat32mf2_t test_vslideup_vx_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f32mf2_m(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vslideup_vx_f32m1_m(vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f32m1_m(mask, dest, src, offset, vl); +vfloat32m1_t test_vslideup_vx_f32m1_m(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f32m1_m(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vslideup_vx_f32m2_m(vbool16_t mask, vfloat32m2_t dest, vfloat32m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f32m2_m(mask, dest, src, offset, vl); +vfloat32m2_t test_vslideup_vx_f32m2_m(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f32m2_m(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vslideup_vx_f32m4_m(vbool8_t mask, vfloat32m4_t dest, vfloat32m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f32m4_m(mask, dest, src, offset, vl); +vfloat32m4_t test_vslideup_vx_f32m4_m(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f32m4_m(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vslideup_vx_f32m8_m(vbool4_t mask, vfloat32m8_t dest, vfloat32m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f32m8_m(mask, dest, src, offset, vl); +vfloat32m8_t test_vslideup_vx_f32m8_m(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f32m8_m(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vslideup_vx_f64m1_m(vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f64m1_m(mask, dest, src, offset, vl); +vfloat64m1_t test_vslideup_vx_f64m1_m(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f64m1_m(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vslideup_vx_f64m2_m(vbool32_t mask, vfloat64m2_t dest, vfloat64m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f64m2_m(mask, dest, src, offset, vl); +vfloat64m2_t test_vslideup_vx_f64m2_m(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f64m2_m(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vslideup_vx_f64m4_m(vbool16_t mask, vfloat64m4_t dest, vfloat64m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f64m4_m(mask, dest, src, offset, vl); +vfloat64m4_t test_vslideup_vx_f64m4_m(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f64m4_m(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vslideup_vx_f64m8_m(vbool8_t mask, vfloat64m8_t dest, vfloat64m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f64m8_m(mask, dest, src, offset, vl); +vfloat64m8_t test_vslideup_vx_f64m8_m(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f64m8_m(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vslideup_vx_i8mf8_m(vbool64_t mask, vint8mf8_t dest, vint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i8mf8_m(mask, dest, src, offset, vl); +vint8mf8_t test_vslideup_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i8mf8_m(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vslideup_vx_i8mf4_m(vbool32_t mask, vint8mf4_t dest, vint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i8mf4_m(mask, dest, src, offset, vl); +vint8mf4_t test_vslideup_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i8mf4_m(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vslideup_vx_i8mf2_m(vbool16_t mask, vint8mf2_t dest, vint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i8mf2_m(mask, dest, src, offset, vl); +vint8mf2_t test_vslideup_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i8mf2_m(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vslideup_vx_i8m1_m(vbool8_t mask, vint8m1_t dest, vint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i8m1_m(mask, dest, src, offset, vl); +vint8m1_t test_vslideup_vx_i8m1_m(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i8m1_m(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vslideup_vx_i8m2_m(vbool4_t mask, vint8m2_t dest, vint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i8m2_m(mask, dest, src, offset, vl); +vint8m2_t test_vslideup_vx_i8m2_m(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i8m2_m(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vslideup_vx_i8m4_m(vbool2_t mask, vint8m4_t dest, vint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i8m4_m(mask, dest, src, offset, vl); +vint8m4_t test_vslideup_vx_i8m4_m(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i8m4_m(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vslideup_vx_i8m8_m(vbool1_t mask, vint8m8_t dest, vint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i8m8_m(mask, dest, src, offset, vl); +vint8m8_t test_vslideup_vx_i8m8_m(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i8m8_m(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vslideup_vx_i16mf4_m(vbool64_t mask, vint16mf4_t dest, vint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i16mf4_m(mask, dest, src, offset, vl); +vint16mf4_t test_vslideup_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i16mf4_m(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vslideup_vx_i16mf2_m(vbool32_t mask, vint16mf2_t dest, vint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i16mf2_m(mask, dest, src, offset, vl); +vint16mf2_t test_vslideup_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i16mf2_m(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vslideup_vx_i16m1_m(vbool16_t mask, vint16m1_t dest, vint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i16m1_m(mask, dest, src, offset, vl); +vint16m1_t test_vslideup_vx_i16m1_m(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i16m1_m(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vslideup_vx_i16m2_m(vbool8_t mask, vint16m2_t dest, vint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i16m2_m(mask, dest, src, offset, vl); +vint16m2_t test_vslideup_vx_i16m2_m(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i16m2_m(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vslideup_vx_i16m4_m(vbool4_t mask, vint16m4_t dest, vint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i16m4_m(mask, dest, src, offset, vl); +vint16m4_t test_vslideup_vx_i16m4_m(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i16m4_m(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vslideup_vx_i16m8_m(vbool2_t mask, vint16m8_t dest, vint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i16m8_m(mask, dest, src, offset, vl); +vint16m8_t test_vslideup_vx_i16m8_m(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i16m8_m(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vslideup_vx_i32mf2_m(vbool64_t mask, vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i32mf2_m(mask, dest, src, offset, vl); +vint32mf2_t test_vslideup_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i32mf2_m(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vslideup_vx_i32m1_m(vbool32_t mask, vint32m1_t dest, vint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i32m1_m(mask, dest, src, offset, vl); +vint32m1_t test_vslideup_vx_i32m1_m(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i32m1_m(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vslideup_vx_i32m2_m(vbool16_t mask, vint32m2_t dest, vint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i32m2_m(mask, dest, src, offset, vl); +vint32m2_t test_vslideup_vx_i32m2_m(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i32m2_m(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vslideup_vx_i32m4_m(vbool8_t mask, vint32m4_t dest, vint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i32m4_m(mask, dest, src, offset, vl); +vint32m4_t test_vslideup_vx_i32m4_m(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i32m4_m(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vslideup_vx_i32m8_m(vbool4_t mask, vint32m8_t dest, vint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i32m8_m(mask, dest, src, offset, vl); +vint32m8_t test_vslideup_vx_i32m8_m(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i32m8_m(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vslideup_vx_i64m1_m(vbool64_t mask, vint64m1_t dest, vint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i64m1_m(mask, dest, src, offset, vl); +vint64m1_t test_vslideup_vx_i64m1_m(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i64m1_m(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vslideup_vx_i64m2_m(vbool32_t mask, vint64m2_t dest, vint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i64m2_m(mask, dest, src, offset, vl); +vint64m2_t test_vslideup_vx_i64m2_m(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i64m2_m(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vslideup_vx_i64m4_m(vbool16_t mask, vint64m4_t dest, vint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i64m4_m(mask, dest, src, offset, vl); +vint64m4_t test_vslideup_vx_i64m4_m(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i64m4_m(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vslideup_vx_i64m8_m(vbool8_t mask, vint64m8_t dest, vint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i64m8_m(mask, dest, src, offset, vl); +vint64m8_t test_vslideup_vx_i64m8_m(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i64m8_m(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vslideup_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t dest, vuint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u8mf8_m(mask, dest, src, offset, vl); +vuint8mf8_t test_vslideup_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u8mf8_m(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vslideup_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t dest, vuint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u8mf4_m(mask, dest, src, offset, vl); +vuint8mf4_t test_vslideup_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u8mf4_m(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vslideup_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t dest, vuint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u8mf2_m(mask, dest, src, offset, vl); +vuint8mf2_t test_vslideup_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u8mf2_m(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vslideup_vx_u8m1_m(vbool8_t mask, vuint8m1_t dest, vuint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u8m1_m(mask, dest, src, offset, vl); +vuint8m1_t test_vslideup_vx_u8m1_m(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u8m1_m(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vslideup_vx_u8m2_m(vbool4_t mask, vuint8m2_t dest, vuint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u8m2_m(mask, dest, src, offset, vl); +vuint8m2_t test_vslideup_vx_u8m2_m(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u8m2_m(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vslideup_vx_u8m4_m(vbool2_t mask, vuint8m4_t dest, vuint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u8m4_m(mask, dest, src, offset, vl); +vuint8m4_t test_vslideup_vx_u8m4_m(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u8m4_m(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vslideup_vx_u8m8_m(vbool1_t mask, vuint8m8_t dest, vuint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u8m8_m(mask, dest, src, offset, vl); +vuint8m8_t test_vslideup_vx_u8m8_m(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u8m8_m(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vslideup_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t dest, vuint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u16mf4_m(mask, dest, src, offset, vl); +vuint16mf4_t test_vslideup_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u16mf4_m(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vslideup_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t dest, vuint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u16mf2_m(mask, dest, src, offset, vl); +vuint16mf2_t test_vslideup_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u16mf2_m(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vslideup_vx_u16m1_m(vbool16_t mask, vuint16m1_t dest, vuint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u16m1_m(mask, dest, src, offset, vl); +vuint16m1_t test_vslideup_vx_u16m1_m(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u16m1_m(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vslideup_vx_u16m2_m(vbool8_t mask, vuint16m2_t dest, vuint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u16m2_m(mask, dest, src, offset, vl); +vuint16m2_t test_vslideup_vx_u16m2_m(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u16m2_m(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vslideup_vx_u16m4_m(vbool4_t mask, vuint16m4_t dest, vuint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u16m4_m(mask, dest, src, offset, vl); +vuint16m4_t test_vslideup_vx_u16m4_m(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u16m4_m(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vslideup_vx_u16m8_m(vbool2_t mask, vuint16m8_t dest, vuint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u16m8_m(mask, dest, src, offset, vl); +vuint16m8_t test_vslideup_vx_u16m8_m(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u16m8_m(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vslideup_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u32mf2_m(mask, dest, src, offset, vl); +vuint32mf2_t test_vslideup_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u32mf2_m(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vslideup_vx_u32m1_m(vbool32_t mask, vuint32m1_t dest, vuint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u32m1_m(mask, dest, src, offset, vl); +vuint32m1_t test_vslideup_vx_u32m1_m(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u32m1_m(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vslideup_vx_u32m2_m(vbool16_t mask, vuint32m2_t dest, vuint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u32m2_m(mask, dest, src, offset, vl); +vuint32m2_t test_vslideup_vx_u32m2_m(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u32m2_m(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vslideup_vx_u32m4_m(vbool8_t mask, vuint32m4_t dest, vuint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u32m4_m(mask, dest, src, offset, vl); +vuint32m4_t test_vslideup_vx_u32m4_m(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u32m4_m(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vslideup_vx_u32m8_m(vbool4_t mask, vuint32m8_t dest, vuint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u32m8_m(mask, dest, src, offset, vl); +vuint32m8_t test_vslideup_vx_u32m8_m(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u32m8_m(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vslideup_vx_u64m1_m(vbool64_t mask, vuint64m1_t dest, vuint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u64m1_m(mask, dest, src, offset, vl); +vuint64m1_t test_vslideup_vx_u64m1_m(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u64m1_m(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vslideup_vx_u64m2_m(vbool32_t mask, vuint64m2_t dest, vuint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u64m2_m(mask, dest, src, offset, vl); +vuint64m2_t test_vslideup_vx_u64m2_m(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u64m2_m(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vslideup_vx_u64m4_m(vbool16_t mask, vuint64m4_t dest, vuint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u64m4_m(mask, dest, src, offset, vl); +vuint64m4_t test_vslideup_vx_u64m4_m(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u64m4_m(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vslideup_vx_u64m8_m(vbool8_t mask, vuint64m8_t dest, vuint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u64m8_m(mask, dest, src, offset, vl); +vuint64m8_t test_vslideup_vx_u64m8_m(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u64m8_m(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vslideup\.[ivxfswum.]+\s+} 118 } } */ diff --git a/auto-generated/gnu-api-tests/vsll.c b/auto-generated/gnu-api-tests/vsll.c index ed9efd97c..7dbb2e6b6 100644 --- a/auto-generated/gnu-api-tests/vsll.c +++ b/auto-generated/gnu-api-tests/vsll.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vsll_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsll_vv_i8mf8(op1, shift, vl); +vint8mf8_t test_vsll_vv_i8mf8(vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsll_vv_i8mf8(vs2, vs1, vl); } -vint8mf8_t test_vsll_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i8mf8(op1, shift, vl); +vint8mf8_t test_vsll_vx_i8mf8(vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i8mf8(vs2, rs1, vl); } -vint8mf4_t test_vsll_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsll_vv_i8mf4(op1, shift, vl); +vint8mf4_t test_vsll_vv_i8mf4(vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsll_vv_i8mf4(vs2, vs1, vl); } -vint8mf4_t test_vsll_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i8mf4(op1, shift, vl); +vint8mf4_t test_vsll_vx_i8mf4(vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i8mf4(vs2, rs1, vl); } -vint8mf2_t test_vsll_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsll_vv_i8mf2(op1, shift, vl); +vint8mf2_t test_vsll_vv_i8mf2(vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsll_vv_i8mf2(vs2, vs1, vl); } -vint8mf2_t test_vsll_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i8mf2(op1, shift, vl); +vint8mf2_t test_vsll_vx_i8mf2(vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i8mf2(vs2, rs1, vl); } -vint8m1_t test_vsll_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsll_vv_i8m1(op1, shift, vl); +vint8m1_t test_vsll_vv_i8m1(vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsll_vv_i8m1(vs2, vs1, vl); } -vint8m1_t test_vsll_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i8m1(op1, shift, vl); +vint8m1_t test_vsll_vx_i8m1(vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i8m1(vs2, rs1, vl); } -vint8m2_t test_vsll_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsll_vv_i8m2(op1, shift, vl); +vint8m2_t test_vsll_vv_i8m2(vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsll_vv_i8m2(vs2, vs1, vl); } -vint8m2_t test_vsll_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i8m2(op1, shift, vl); +vint8m2_t test_vsll_vx_i8m2(vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i8m2(vs2, rs1, vl); } -vint8m4_t test_vsll_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsll_vv_i8m4(op1, shift, vl); +vint8m4_t test_vsll_vv_i8m4(vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsll_vv_i8m4(vs2, vs1, vl); } -vint8m4_t test_vsll_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i8m4(op1, shift, vl); +vint8m4_t test_vsll_vx_i8m4(vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i8m4(vs2, rs1, vl); } -vint8m8_t test_vsll_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsll_vv_i8m8(op1, shift, vl); +vint8m8_t test_vsll_vv_i8m8(vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsll_vv_i8m8(vs2, vs1, vl); } -vint8m8_t test_vsll_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i8m8(op1, shift, vl); +vint8m8_t test_vsll_vx_i8m8(vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i8m8(vs2, rs1, vl); } -vint16mf4_t test_vsll_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsll_vv_i16mf4(op1, shift, vl); +vint16mf4_t test_vsll_vv_i16mf4(vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsll_vv_i16mf4(vs2, vs1, vl); } -vint16mf4_t test_vsll_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i16mf4(op1, shift, vl); +vint16mf4_t test_vsll_vx_i16mf4(vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i16mf4(vs2, rs1, vl); } -vint16mf2_t test_vsll_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsll_vv_i16mf2(op1, shift, vl); +vint16mf2_t test_vsll_vv_i16mf2(vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsll_vv_i16mf2(vs2, vs1, vl); } -vint16mf2_t test_vsll_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i16mf2(op1, shift, vl); +vint16mf2_t test_vsll_vx_i16mf2(vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i16mf2(vs2, rs1, vl); } -vint16m1_t test_vsll_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsll_vv_i16m1(op1, shift, vl); +vint16m1_t test_vsll_vv_i16m1(vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsll_vv_i16m1(vs2, vs1, vl); } -vint16m1_t test_vsll_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i16m1(op1, shift, vl); +vint16m1_t test_vsll_vx_i16m1(vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i16m1(vs2, rs1, vl); } -vint16m2_t test_vsll_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsll_vv_i16m2(op1, shift, vl); +vint16m2_t test_vsll_vv_i16m2(vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsll_vv_i16m2(vs2, vs1, vl); } -vint16m2_t test_vsll_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i16m2(op1, shift, vl); +vint16m2_t test_vsll_vx_i16m2(vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i16m2(vs2, rs1, vl); } -vint16m4_t test_vsll_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsll_vv_i16m4(op1, shift, vl); +vint16m4_t test_vsll_vv_i16m4(vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsll_vv_i16m4(vs2, vs1, vl); } -vint16m4_t test_vsll_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i16m4(op1, shift, vl); +vint16m4_t test_vsll_vx_i16m4(vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i16m4(vs2, rs1, vl); } -vint16m8_t test_vsll_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsll_vv_i16m8(op1, shift, vl); +vint16m8_t test_vsll_vv_i16m8(vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsll_vv_i16m8(vs2, vs1, vl); } -vint16m8_t test_vsll_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i16m8(op1, shift, vl); +vint16m8_t test_vsll_vx_i16m8(vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i16m8(vs2, rs1, vl); } -vint32mf2_t test_vsll_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsll_vv_i32mf2(op1, shift, vl); +vint32mf2_t test_vsll_vv_i32mf2(vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsll_vv_i32mf2(vs2, vs1, vl); } -vint32mf2_t test_vsll_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i32mf2(op1, shift, vl); +vint32mf2_t test_vsll_vx_i32mf2(vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i32mf2(vs2, rs1, vl); } -vint32m1_t test_vsll_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsll_vv_i32m1(op1, shift, vl); +vint32m1_t test_vsll_vv_i32m1(vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsll_vv_i32m1(vs2, vs1, vl); } -vint32m1_t test_vsll_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i32m1(op1, shift, vl); +vint32m1_t test_vsll_vx_i32m1(vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i32m1(vs2, rs1, vl); } -vint32m2_t test_vsll_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsll_vv_i32m2(op1, shift, vl); +vint32m2_t test_vsll_vv_i32m2(vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsll_vv_i32m2(vs2, vs1, vl); } -vint32m2_t test_vsll_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i32m2(op1, shift, vl); +vint32m2_t test_vsll_vx_i32m2(vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i32m2(vs2, rs1, vl); } -vint32m4_t test_vsll_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsll_vv_i32m4(op1, shift, vl); +vint32m4_t test_vsll_vv_i32m4(vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsll_vv_i32m4(vs2, vs1, vl); } -vint32m4_t test_vsll_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i32m4(op1, shift, vl); +vint32m4_t test_vsll_vx_i32m4(vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i32m4(vs2, rs1, vl); } -vint32m8_t test_vsll_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsll_vv_i32m8(op1, shift, vl); +vint32m8_t test_vsll_vv_i32m8(vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsll_vv_i32m8(vs2, vs1, vl); } -vint32m8_t test_vsll_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i32m8(op1, shift, vl); +vint32m8_t test_vsll_vx_i32m8(vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i32m8(vs2, rs1, vl); } -vint64m1_t test_vsll_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsll_vv_i64m1(op1, shift, vl); +vint64m1_t test_vsll_vv_i64m1(vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsll_vv_i64m1(vs2, vs1, vl); } -vint64m1_t test_vsll_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i64m1(op1, shift, vl); +vint64m1_t test_vsll_vx_i64m1(vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i64m1(vs2, rs1, vl); } -vint64m2_t test_vsll_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsll_vv_i64m2(op1, shift, vl); +vint64m2_t test_vsll_vv_i64m2(vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsll_vv_i64m2(vs2, vs1, vl); } -vint64m2_t test_vsll_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i64m2(op1, shift, vl); +vint64m2_t test_vsll_vx_i64m2(vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i64m2(vs2, rs1, vl); } -vint64m4_t test_vsll_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsll_vv_i64m4(op1, shift, vl); +vint64m4_t test_vsll_vv_i64m4(vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsll_vv_i64m4(vs2, vs1, vl); } -vint64m4_t test_vsll_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i64m4(op1, shift, vl); +vint64m4_t test_vsll_vx_i64m4(vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i64m4(vs2, rs1, vl); } -vint64m8_t test_vsll_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsll_vv_i64m8(op1, shift, vl); +vint64m8_t test_vsll_vv_i64m8(vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsll_vv_i64m8(vs2, vs1, vl); } -vint64m8_t test_vsll_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i64m8(op1, shift, vl); +vint64m8_t test_vsll_vx_i64m8(vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i64m8(vs2, rs1, vl); } -vuint8mf8_t test_vsll_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsll_vv_u8mf8(op1, shift, vl); +vuint8mf8_t test_vsll_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsll_vv_u8mf8(vs2, vs1, vl); } -vuint8mf8_t test_vsll_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u8mf8(op1, shift, vl); +vuint8mf8_t test_vsll_vx_u8mf8(vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u8mf8(vs2, rs1, vl); } -vuint8mf4_t test_vsll_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsll_vv_u8mf4(op1, shift, vl); +vuint8mf4_t test_vsll_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsll_vv_u8mf4(vs2, vs1, vl); } -vuint8mf4_t test_vsll_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u8mf4(op1, shift, vl); +vuint8mf4_t test_vsll_vx_u8mf4(vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u8mf4(vs2, rs1, vl); } -vuint8mf2_t test_vsll_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsll_vv_u8mf2(op1, shift, vl); +vuint8mf2_t test_vsll_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsll_vv_u8mf2(vs2, vs1, vl); } -vuint8mf2_t test_vsll_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u8mf2(op1, shift, vl); +vuint8mf2_t test_vsll_vx_u8mf2(vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u8mf2(vs2, rs1, vl); } -vuint8m1_t test_vsll_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsll_vv_u8m1(op1, shift, vl); +vuint8m1_t test_vsll_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsll_vv_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vsll_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u8m1(op1, shift, vl); +vuint8m1_t test_vsll_vx_u8m1(vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u8m1(vs2, rs1, vl); } -vuint8m2_t test_vsll_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsll_vv_u8m2(op1, shift, vl); +vuint8m2_t test_vsll_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsll_vv_u8m2(vs2, vs1, vl); } -vuint8m2_t test_vsll_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u8m2(op1, shift, vl); +vuint8m2_t test_vsll_vx_u8m2(vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u8m2(vs2, rs1, vl); } -vuint8m4_t test_vsll_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsll_vv_u8m4(op1, shift, vl); +vuint8m4_t test_vsll_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsll_vv_u8m4(vs2, vs1, vl); } -vuint8m4_t test_vsll_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u8m4(op1, shift, vl); +vuint8m4_t test_vsll_vx_u8m4(vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u8m4(vs2, rs1, vl); } -vuint8m8_t test_vsll_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsll_vv_u8m8(op1, shift, vl); +vuint8m8_t test_vsll_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsll_vv_u8m8(vs2, vs1, vl); } -vuint8m8_t test_vsll_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u8m8(op1, shift, vl); +vuint8m8_t test_vsll_vx_u8m8(vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u8m8(vs2, rs1, vl); } -vuint16mf4_t test_vsll_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsll_vv_u16mf4(op1, shift, vl); +vuint16mf4_t test_vsll_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsll_vv_u16mf4(vs2, vs1, vl); } -vuint16mf4_t test_vsll_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u16mf4(op1, shift, vl); +vuint16mf4_t test_vsll_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u16mf4(vs2, rs1, vl); } -vuint16mf2_t test_vsll_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsll_vv_u16mf2(op1, shift, vl); +vuint16mf2_t test_vsll_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsll_vv_u16mf2(vs2, vs1, vl); } -vuint16mf2_t test_vsll_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u16mf2(op1, shift, vl); +vuint16mf2_t test_vsll_vx_u16mf2(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u16mf2(vs2, rs1, vl); } -vuint16m1_t test_vsll_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsll_vv_u16m1(op1, shift, vl); +vuint16m1_t test_vsll_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsll_vv_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vsll_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u16m1(op1, shift, vl); +vuint16m1_t test_vsll_vx_u16m1(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u16m1(vs2, rs1, vl); } -vuint16m2_t test_vsll_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsll_vv_u16m2(op1, shift, vl); +vuint16m2_t test_vsll_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsll_vv_u16m2(vs2, vs1, vl); } -vuint16m2_t test_vsll_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u16m2(op1, shift, vl); +vuint16m2_t test_vsll_vx_u16m2(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u16m2(vs2, rs1, vl); } -vuint16m4_t test_vsll_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsll_vv_u16m4(op1, shift, vl); +vuint16m4_t test_vsll_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsll_vv_u16m4(vs2, vs1, vl); } -vuint16m4_t test_vsll_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u16m4(op1, shift, vl); +vuint16m4_t test_vsll_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u16m4(vs2, rs1, vl); } -vuint16m8_t test_vsll_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsll_vv_u16m8(op1, shift, vl); +vuint16m8_t test_vsll_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsll_vv_u16m8(vs2, vs1, vl); } -vuint16m8_t test_vsll_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u16m8(op1, shift, vl); +vuint16m8_t test_vsll_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u16m8(vs2, rs1, vl); } -vuint32mf2_t test_vsll_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsll_vv_u32mf2(op1, shift, vl); +vuint32mf2_t test_vsll_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsll_vv_u32mf2(vs2, vs1, vl); } -vuint32mf2_t test_vsll_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u32mf2(op1, shift, vl); +vuint32mf2_t test_vsll_vx_u32mf2(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u32mf2(vs2, rs1, vl); } -vuint32m1_t test_vsll_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsll_vv_u32m1(op1, shift, vl); +vuint32m1_t test_vsll_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsll_vv_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vsll_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u32m1(op1, shift, vl); +vuint32m1_t test_vsll_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u32m1(vs2, rs1, vl); } -vuint32m2_t test_vsll_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsll_vv_u32m2(op1, shift, vl); +vuint32m2_t test_vsll_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsll_vv_u32m2(vs2, vs1, vl); } -vuint32m2_t test_vsll_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u32m2(op1, shift, vl); +vuint32m2_t test_vsll_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u32m2(vs2, rs1, vl); } -vuint32m4_t test_vsll_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsll_vv_u32m4(op1, shift, vl); +vuint32m4_t test_vsll_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsll_vv_u32m4(vs2, vs1, vl); } -vuint32m4_t test_vsll_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u32m4(op1, shift, vl); +vuint32m4_t test_vsll_vx_u32m4(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u32m4(vs2, rs1, vl); } -vuint32m8_t test_vsll_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsll_vv_u32m8(op1, shift, vl); +vuint32m8_t test_vsll_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsll_vv_u32m8(vs2, vs1, vl); } -vuint32m8_t test_vsll_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u32m8(op1, shift, vl); +vuint32m8_t test_vsll_vx_u32m8(vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u32m8(vs2, rs1, vl); } -vuint64m1_t test_vsll_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsll_vv_u64m1(op1, shift, vl); +vuint64m1_t test_vsll_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsll_vv_u64m1(vs2, vs1, vl); } -vuint64m1_t test_vsll_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u64m1(op1, shift, vl); +vuint64m1_t test_vsll_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u64m1(vs2, rs1, vl); } -vuint64m2_t test_vsll_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsll_vv_u64m2(op1, shift, vl); +vuint64m2_t test_vsll_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsll_vv_u64m2(vs2, vs1, vl); } -vuint64m2_t test_vsll_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u64m2(op1, shift, vl); +vuint64m2_t test_vsll_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u64m2(vs2, rs1, vl); } -vuint64m4_t test_vsll_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsll_vv_u64m4(op1, shift, vl); +vuint64m4_t test_vsll_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsll_vv_u64m4(vs2, vs1, vl); } -vuint64m4_t test_vsll_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u64m4(op1, shift, vl); +vuint64m4_t test_vsll_vx_u64m4(vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u64m4(vs2, rs1, vl); } -vuint64m8_t test_vsll_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsll_vv_u64m8(op1, shift, vl); +vuint64m8_t test_vsll_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsll_vv_u64m8(vs2, vs1, vl); } -vuint64m8_t test_vsll_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u64m8(op1, shift, vl); +vuint64m8_t test_vsll_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u64m8(vs2, rs1, vl); } -vint8mf8_t test_vsll_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsll_vv_i8mf8_m(mask, op1, shift, vl); +vint8mf8_t test_vsll_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsll_vv_i8mf8_m(vm, vs2, vs1, vl); } -vint8mf8_t test_vsll_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i8mf8_m(mask, op1, shift, vl); +vint8mf8_t test_vsll_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i8mf8_m(vm, vs2, rs1, vl); } -vint8mf4_t test_vsll_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsll_vv_i8mf4_m(mask, op1, shift, vl); +vint8mf4_t test_vsll_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsll_vv_i8mf4_m(vm, vs2, vs1, vl); } -vint8mf4_t test_vsll_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i8mf4_m(mask, op1, shift, vl); +vint8mf4_t test_vsll_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i8mf4_m(vm, vs2, rs1, vl); } -vint8mf2_t test_vsll_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsll_vv_i8mf2_m(mask, op1, shift, vl); +vint8mf2_t test_vsll_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsll_vv_i8mf2_m(vm, vs2, vs1, vl); } -vint8mf2_t test_vsll_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i8mf2_m(mask, op1, shift, vl); +vint8mf2_t test_vsll_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i8mf2_m(vm, vs2, rs1, vl); } -vint8m1_t test_vsll_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsll_vv_i8m1_m(mask, op1, shift, vl); +vint8m1_t test_vsll_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsll_vv_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vsll_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i8m1_m(mask, op1, shift, vl); +vint8m1_t test_vsll_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i8m1_m(vm, vs2, rs1, vl); } -vint8m2_t test_vsll_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsll_vv_i8m2_m(mask, op1, shift, vl); +vint8m2_t test_vsll_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsll_vv_i8m2_m(vm, vs2, vs1, vl); } -vint8m2_t test_vsll_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i8m2_m(mask, op1, shift, vl); +vint8m2_t test_vsll_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i8m2_m(vm, vs2, rs1, vl); } -vint8m4_t test_vsll_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsll_vv_i8m4_m(mask, op1, shift, vl); +vint8m4_t test_vsll_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsll_vv_i8m4_m(vm, vs2, vs1, vl); } -vint8m4_t test_vsll_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i8m4_m(mask, op1, shift, vl); +vint8m4_t test_vsll_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i8m4_m(vm, vs2, rs1, vl); } -vint8m8_t test_vsll_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsll_vv_i8m8_m(mask, op1, shift, vl); +vint8m8_t test_vsll_vv_i8m8_m(vbool1_t vm, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsll_vv_i8m8_m(vm, vs2, vs1, vl); } -vint8m8_t test_vsll_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i8m8_m(mask, op1, shift, vl); +vint8m8_t test_vsll_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i8m8_m(vm, vs2, rs1, vl); } -vint16mf4_t test_vsll_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsll_vv_i16mf4_m(mask, op1, shift, vl); +vint16mf4_t test_vsll_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsll_vv_i16mf4_m(vm, vs2, vs1, vl); } -vint16mf4_t test_vsll_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i16mf4_m(mask, op1, shift, vl); +vint16mf4_t test_vsll_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i16mf4_m(vm, vs2, rs1, vl); } -vint16mf2_t test_vsll_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsll_vv_i16mf2_m(mask, op1, shift, vl); +vint16mf2_t test_vsll_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsll_vv_i16mf2_m(vm, vs2, vs1, vl); } -vint16mf2_t test_vsll_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i16mf2_m(mask, op1, shift, vl); +vint16mf2_t test_vsll_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i16mf2_m(vm, vs2, rs1, vl); } -vint16m1_t test_vsll_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsll_vv_i16m1_m(mask, op1, shift, vl); +vint16m1_t test_vsll_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsll_vv_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vsll_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i16m1_m(mask, op1, shift, vl); +vint16m1_t test_vsll_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i16m1_m(vm, vs2, rs1, vl); } -vint16m2_t test_vsll_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsll_vv_i16m2_m(mask, op1, shift, vl); +vint16m2_t test_vsll_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsll_vv_i16m2_m(vm, vs2, vs1, vl); } -vint16m2_t test_vsll_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i16m2_m(mask, op1, shift, vl); +vint16m2_t test_vsll_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i16m2_m(vm, vs2, rs1, vl); } -vint16m4_t test_vsll_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsll_vv_i16m4_m(mask, op1, shift, vl); +vint16m4_t test_vsll_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsll_vv_i16m4_m(vm, vs2, vs1, vl); } -vint16m4_t test_vsll_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i16m4_m(mask, op1, shift, vl); +vint16m4_t test_vsll_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i16m4_m(vm, vs2, rs1, vl); } -vint16m8_t test_vsll_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsll_vv_i16m8_m(mask, op1, shift, vl); +vint16m8_t test_vsll_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsll_vv_i16m8_m(vm, vs2, vs1, vl); } -vint16m8_t test_vsll_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i16m8_m(mask, op1, shift, vl); +vint16m8_t test_vsll_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i16m8_m(vm, vs2, rs1, vl); } -vint32mf2_t test_vsll_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsll_vv_i32mf2_m(mask, op1, shift, vl); +vint32mf2_t test_vsll_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsll_vv_i32mf2_m(vm, vs2, vs1, vl); } -vint32mf2_t test_vsll_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i32mf2_m(mask, op1, shift, vl); +vint32mf2_t test_vsll_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i32mf2_m(vm, vs2, rs1, vl); } -vint32m1_t test_vsll_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsll_vv_i32m1_m(mask, op1, shift, vl); +vint32m1_t test_vsll_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsll_vv_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vsll_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i32m1_m(mask, op1, shift, vl); +vint32m1_t test_vsll_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i32m1_m(vm, vs2, rs1, vl); } -vint32m2_t test_vsll_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsll_vv_i32m2_m(mask, op1, shift, vl); +vint32m2_t test_vsll_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsll_vv_i32m2_m(vm, vs2, vs1, vl); } -vint32m2_t test_vsll_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i32m2_m(mask, op1, shift, vl); +vint32m2_t test_vsll_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i32m2_m(vm, vs2, rs1, vl); } -vint32m4_t test_vsll_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsll_vv_i32m4_m(mask, op1, shift, vl); +vint32m4_t test_vsll_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsll_vv_i32m4_m(vm, vs2, vs1, vl); } -vint32m4_t test_vsll_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i32m4_m(mask, op1, shift, vl); +vint32m4_t test_vsll_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i32m4_m(vm, vs2, rs1, vl); } -vint32m8_t test_vsll_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsll_vv_i32m8_m(mask, op1, shift, vl); +vint32m8_t test_vsll_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsll_vv_i32m8_m(vm, vs2, vs1, vl); } -vint32m8_t test_vsll_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i32m8_m(mask, op1, shift, vl); +vint32m8_t test_vsll_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i32m8_m(vm, vs2, rs1, vl); } -vint64m1_t test_vsll_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsll_vv_i64m1_m(mask, op1, shift, vl); +vint64m1_t test_vsll_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsll_vv_i64m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vsll_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i64m1_m(mask, op1, shift, vl); +vint64m1_t test_vsll_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i64m1_m(vm, vs2, rs1, vl); } -vint64m2_t test_vsll_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsll_vv_i64m2_m(mask, op1, shift, vl); +vint64m2_t test_vsll_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsll_vv_i64m2_m(vm, vs2, vs1, vl); } -vint64m2_t test_vsll_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i64m2_m(mask, op1, shift, vl); +vint64m2_t test_vsll_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i64m2_m(vm, vs2, rs1, vl); } -vint64m4_t test_vsll_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsll_vv_i64m4_m(mask, op1, shift, vl); +vint64m4_t test_vsll_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsll_vv_i64m4_m(vm, vs2, vs1, vl); } -vint64m4_t test_vsll_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i64m4_m(mask, op1, shift, vl); +vint64m4_t test_vsll_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i64m4_m(vm, vs2, rs1, vl); } -vint64m8_t test_vsll_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsll_vv_i64m8_m(mask, op1, shift, vl); +vint64m8_t test_vsll_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsll_vv_i64m8_m(vm, vs2, vs1, vl); } -vint64m8_t test_vsll_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i64m8_m(mask, op1, shift, vl); +vint64m8_t test_vsll_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i64m8_m(vm, vs2, rs1, vl); } -vuint8mf8_t test_vsll_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsll_vv_u8mf8_m(mask, op1, shift, vl); +vuint8mf8_t test_vsll_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsll_vv_u8mf8_m(vm, vs2, vs1, vl); } -vuint8mf8_t test_vsll_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u8mf8_m(mask, op1, shift, vl); +vuint8mf8_t test_vsll_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u8mf8_m(vm, vs2, rs1, vl); } -vuint8mf4_t test_vsll_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsll_vv_u8mf4_m(mask, op1, shift, vl); +vuint8mf4_t test_vsll_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsll_vv_u8mf4_m(vm, vs2, vs1, vl); } -vuint8mf4_t test_vsll_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u8mf4_m(mask, op1, shift, vl); +vuint8mf4_t test_vsll_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u8mf4_m(vm, vs2, rs1, vl); } -vuint8mf2_t test_vsll_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsll_vv_u8mf2_m(mask, op1, shift, vl); +vuint8mf2_t test_vsll_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsll_vv_u8mf2_m(vm, vs2, vs1, vl); } -vuint8mf2_t test_vsll_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u8mf2_m(mask, op1, shift, vl); +vuint8mf2_t test_vsll_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u8mf2_m(vm, vs2, rs1, vl); } -vuint8m1_t test_vsll_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsll_vv_u8m1_m(mask, op1, shift, vl); +vuint8m1_t test_vsll_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsll_vv_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vsll_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u8m1_m(mask, op1, shift, vl); +vuint8m1_t test_vsll_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u8m1_m(vm, vs2, rs1, vl); } -vuint8m2_t test_vsll_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsll_vv_u8m2_m(mask, op1, shift, vl); +vuint8m2_t test_vsll_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsll_vv_u8m2_m(vm, vs2, vs1, vl); } -vuint8m2_t test_vsll_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u8m2_m(mask, op1, shift, vl); +vuint8m2_t test_vsll_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u8m2_m(vm, vs2, rs1, vl); } -vuint8m4_t test_vsll_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsll_vv_u8m4_m(mask, op1, shift, vl); +vuint8m4_t test_vsll_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsll_vv_u8m4_m(vm, vs2, vs1, vl); } -vuint8m4_t test_vsll_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u8m4_m(mask, op1, shift, vl); +vuint8m4_t test_vsll_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u8m4_m(vm, vs2, rs1, vl); } -vuint8m8_t test_vsll_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsll_vv_u8m8_m(mask, op1, shift, vl); +vuint8m8_t test_vsll_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsll_vv_u8m8_m(vm, vs2, vs1, vl); } -vuint8m8_t test_vsll_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u8m8_m(mask, op1, shift, vl); +vuint8m8_t test_vsll_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u8m8_m(vm, vs2, rs1, vl); } -vuint16mf4_t test_vsll_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsll_vv_u16mf4_m(mask, op1, shift, vl); +vuint16mf4_t test_vsll_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsll_vv_u16mf4_m(vm, vs2, vs1, vl); } -vuint16mf4_t test_vsll_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u16mf4_m(mask, op1, shift, vl); +vuint16mf4_t test_vsll_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u16mf4_m(vm, vs2, rs1, vl); } -vuint16mf2_t test_vsll_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsll_vv_u16mf2_m(mask, op1, shift, vl); +vuint16mf2_t test_vsll_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsll_vv_u16mf2_m(vm, vs2, vs1, vl); } -vuint16mf2_t test_vsll_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u16mf2_m(mask, op1, shift, vl); +vuint16mf2_t test_vsll_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u16mf2_m(vm, vs2, rs1, vl); } -vuint16m1_t test_vsll_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsll_vv_u16m1_m(mask, op1, shift, vl); +vuint16m1_t test_vsll_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsll_vv_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vsll_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u16m1_m(mask, op1, shift, vl); +vuint16m1_t test_vsll_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u16m1_m(vm, vs2, rs1, vl); } -vuint16m2_t test_vsll_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsll_vv_u16m2_m(mask, op1, shift, vl); +vuint16m2_t test_vsll_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsll_vv_u16m2_m(vm, vs2, vs1, vl); } -vuint16m2_t test_vsll_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u16m2_m(mask, op1, shift, vl); +vuint16m2_t test_vsll_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u16m2_m(vm, vs2, rs1, vl); } -vuint16m4_t test_vsll_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsll_vv_u16m4_m(mask, op1, shift, vl); +vuint16m4_t test_vsll_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsll_vv_u16m4_m(vm, vs2, vs1, vl); } -vuint16m4_t test_vsll_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u16m4_m(mask, op1, shift, vl); +vuint16m4_t test_vsll_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u16m4_m(vm, vs2, rs1, vl); } -vuint16m8_t test_vsll_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsll_vv_u16m8_m(mask, op1, shift, vl); +vuint16m8_t test_vsll_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsll_vv_u16m8_m(vm, vs2, vs1, vl); } -vuint16m8_t test_vsll_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u16m8_m(mask, op1, shift, vl); +vuint16m8_t test_vsll_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u16m8_m(vm, vs2, rs1, vl); } -vuint32mf2_t test_vsll_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsll_vv_u32mf2_m(mask, op1, shift, vl); +vuint32mf2_t test_vsll_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsll_vv_u32mf2_m(vm, vs2, vs1, vl); } -vuint32mf2_t test_vsll_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u32mf2_m(mask, op1, shift, vl); +vuint32mf2_t test_vsll_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u32mf2_m(vm, vs2, rs1, vl); } -vuint32m1_t test_vsll_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsll_vv_u32m1_m(mask, op1, shift, vl); +vuint32m1_t test_vsll_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsll_vv_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vsll_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u32m1_m(mask, op1, shift, vl); +vuint32m1_t test_vsll_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u32m1_m(vm, vs2, rs1, vl); } -vuint32m2_t test_vsll_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsll_vv_u32m2_m(mask, op1, shift, vl); +vuint32m2_t test_vsll_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsll_vv_u32m2_m(vm, vs2, vs1, vl); } -vuint32m2_t test_vsll_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u32m2_m(mask, op1, shift, vl); +vuint32m2_t test_vsll_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u32m2_m(vm, vs2, rs1, vl); } -vuint32m4_t test_vsll_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsll_vv_u32m4_m(mask, op1, shift, vl); +vuint32m4_t test_vsll_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsll_vv_u32m4_m(vm, vs2, vs1, vl); } -vuint32m4_t test_vsll_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u32m4_m(mask, op1, shift, vl); +vuint32m4_t test_vsll_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u32m4_m(vm, vs2, rs1, vl); } -vuint32m8_t test_vsll_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsll_vv_u32m8_m(mask, op1, shift, vl); +vuint32m8_t test_vsll_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsll_vv_u32m8_m(vm, vs2, vs1, vl); } -vuint32m8_t test_vsll_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u32m8_m(mask, op1, shift, vl); +vuint32m8_t test_vsll_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u32m8_m(vm, vs2, rs1, vl); } -vuint64m1_t test_vsll_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsll_vv_u64m1_m(mask, op1, shift, vl); +vuint64m1_t test_vsll_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsll_vv_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vsll_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u64m1_m(mask, op1, shift, vl); +vuint64m1_t test_vsll_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u64m1_m(vm, vs2, rs1, vl); } -vuint64m2_t test_vsll_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsll_vv_u64m2_m(mask, op1, shift, vl); +vuint64m2_t test_vsll_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsll_vv_u64m2_m(vm, vs2, vs1, vl); } -vuint64m2_t test_vsll_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u64m2_m(mask, op1, shift, vl); +vuint64m2_t test_vsll_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u64m2_m(vm, vs2, rs1, vl); } -vuint64m4_t test_vsll_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsll_vv_u64m4_m(mask, op1, shift, vl); +vuint64m4_t test_vsll_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsll_vv_u64m4_m(vm, vs2, vs1, vl); } -vuint64m4_t test_vsll_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u64m4_m(mask, op1, shift, vl); +vuint64m4_t test_vsll_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u64m4_m(vm, vs2, rs1, vl); } -vuint64m8_t test_vsll_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsll_vv_u64m8_m(mask, op1, shift, vl); +vuint64m8_t test_vsll_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsll_vv_u64m8_m(vm, vs2, vs1, vl); } -vuint64m8_t test_vsll_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u64m8_m(mask, op1, shift, vl); +vuint64m8_t test_vsll_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u64m8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsll\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/gnu-api-tests/vsm.c b/auto-generated/gnu-api-tests/vsm.c index 30a942cbd..5632f09fa 100644 --- a/auto-generated/gnu-api-tests/vsm.c +++ b/auto-generated/gnu-api-tests/vsm.c @@ -6,32 +6,32 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsm_v_b1(uint8_t *base, vbool1_t value, size_t vl) { - return __riscv_vsm_v_b1(base, value, vl); +void test_vsm_v_b1(uint8_t *rs1, vbool1_t vs3, size_t vl) { + return __riscv_vsm_v_b1(rs1, vs3, vl); } -void test_vsm_v_b2(uint8_t *base, vbool2_t value, size_t vl) { - return __riscv_vsm_v_b2(base, value, vl); +void test_vsm_v_b2(uint8_t *rs1, vbool2_t vs3, size_t vl) { + return __riscv_vsm_v_b2(rs1, vs3, vl); } -void test_vsm_v_b4(uint8_t *base, vbool4_t value, size_t vl) { - return __riscv_vsm_v_b4(base, value, vl); +void test_vsm_v_b4(uint8_t *rs1, vbool4_t vs3, size_t vl) { + return __riscv_vsm_v_b4(rs1, vs3, vl); } -void test_vsm_v_b8(uint8_t *base, vbool8_t value, size_t vl) { - return __riscv_vsm_v_b8(base, value, vl); +void test_vsm_v_b8(uint8_t *rs1, vbool8_t vs3, size_t vl) { + return __riscv_vsm_v_b8(rs1, vs3, vl); } -void test_vsm_v_b16(uint8_t *base, vbool16_t value, size_t vl) { - return __riscv_vsm_v_b16(base, value, vl); +void test_vsm_v_b16(uint8_t *rs1, vbool16_t vs3, size_t vl) { + return __riscv_vsm_v_b16(rs1, vs3, vl); } -void test_vsm_v_b32(uint8_t *base, vbool32_t value, size_t vl) { - return __riscv_vsm_v_b32(base, value, vl); +void test_vsm_v_b32(uint8_t *rs1, vbool32_t vs3, size_t vl) { + return __riscv_vsm_v_b32(rs1, vs3, vl); } -void test_vsm_v_b64(uint8_t *base, vbool64_t value, size_t vl) { - return __riscv_vsm_v_b64(base, value, vl); +void test_vsm_v_b64(uint8_t *rs1, vbool64_t vs3, size_t vl) { + return __riscv_vsm_v_b64(rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsm\.[ivxfswum.]+\s+} 7 } } */ diff --git a/auto-generated/gnu-api-tests/vsmul.c b/auto-generated/gnu-api-tests/vsmul.c index 4d7e328a9..c345a0de9 100644 --- a/auto-generated/gnu-api-tests/vsmul.c +++ b/auto-generated/gnu-api-tests/vsmul.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vsmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsmul_vv_i8mf8(op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vsmul_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vsmul_vv_i8mf8(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vsmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8mf8(op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vsmul_vx_i8mf8(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_vx_i8mf8(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vsmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsmul_vv_i8mf4(op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vsmul_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vsmul_vv_i8mf4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vsmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8mf4(op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vsmul_vx_i8mf4(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_vx_i8mf4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vsmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsmul_vv_i8mf2(op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vsmul_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vsmul_vv_i8mf2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vsmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8mf2(op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vsmul_vx_i8mf2(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_vx_i8mf2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vsmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m1(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vsmul_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vsmul_vv_i8m1(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vsmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m1(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vsmul_vx_i8m1(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_vx_i8m1(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vsmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m2(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vsmul_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vsmul_vv_i8m2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vsmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m2(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vsmul_vx_i8m2(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_vx_i8m2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vsmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m4(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vsmul_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vsmul_vv_i8m4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vsmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m4(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vsmul_vx_i8m4(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_vx_i8m4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vsmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m8(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vsmul_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vsmul_vv_i8m8(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vsmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m8(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vsmul_vx_i8m8(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_vx_i8m8(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vsmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsmul_vv_i16mf4(op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vsmul_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vsmul_vv_i16mf4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vsmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16mf4(op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vsmul_vx_i16mf4(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_vx_i16mf4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vsmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsmul_vv_i16mf2(op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vsmul_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vsmul_vv_i16mf2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vsmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16mf2(op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vsmul_vx_i16mf2(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_vx_i16mf2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vsmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m1(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vsmul_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vsmul_vv_i16m1(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vsmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m1(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vsmul_vx_i16m1(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_vx_i16m1(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vsmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m2(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vsmul_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vsmul_vv_i16m2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vsmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m2(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vsmul_vx_i16m2(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_vx_i16m2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vsmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m4(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vsmul_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vsmul_vv_i16m4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vsmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m4(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vsmul_vx_i16m4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_vx_i16m4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vsmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m8(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vsmul_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vsmul_vv_i16m8(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vsmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m8(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vsmul_vx_i16m8(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_vx_i16m8(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vsmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsmul_vv_i32mf2(op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vsmul_vv_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vsmul_vv_i32mf2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vsmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32mf2(op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vsmul_vx_i32mf2(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_vx_i32mf2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vsmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m1(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vsmul_vv_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vsmul_vv_i32m1(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vsmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m1(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vsmul_vx_i32m1(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_vx_i32m1(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vsmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m2(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vsmul_vv_i32m2(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vsmul_vv_i32m2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vsmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m2(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vsmul_vx_i32m2(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_vx_i32m2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vsmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m4(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vsmul_vv_i32m4(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vsmul_vv_i32m4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vsmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m4(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vsmul_vx_i32m4(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_vx_i32m4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vsmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m8(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vsmul_vv_i32m8(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vsmul_vv_i32m8(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vsmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m8(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vsmul_vx_i32m8(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_vx_i32m8(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m1(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vsmul_vv_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vsmul_vv_i64m1(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m1(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vsmul_vx_i64m1(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul_vx_i64m1(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m2(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vsmul_vv_i64m2(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vsmul_vv_i64m2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m2(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vsmul_vx_i64m2(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul_vx_i64m2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m4(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vsmul_vv_i64m4(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vsmul_vv_i64m4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m4(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vsmul_vx_i64m4(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul_vx_i64m4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m8(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vsmul_vv_i64m8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vsmul_vv_i64m8(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m8(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vsmul_vx_i64m8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul_vx_i64m8(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vsmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsmul_vv_i8mf8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vsmul_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vsmul_vv_i8mf8_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vsmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8mf8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vsmul_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_vx_i8mf8_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vsmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsmul_vv_i8mf4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vsmul_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vsmul_vv_i8mf4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vsmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8mf4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vsmul_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_vx_i8mf4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vsmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsmul_vv_i8mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vsmul_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vsmul_vv_i8mf2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vsmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vsmul_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_vx_i8mf2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vsmul_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vsmul_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vsmul_vv_i8m1_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vsmul_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vsmul_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_vx_i8m1_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vsmul_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vsmul_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vsmul_vv_i8m2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vsmul_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vsmul_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_vx_i8m2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vsmul_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vsmul_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vsmul_vv_i8m4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vsmul_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vsmul_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_vx_i8m4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vsmul_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vsmul_vv_i8m8_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vsmul_vv_i8m8_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vsmul_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vsmul_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_vx_i8m8_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vsmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsmul_vv_i16mf4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vsmul_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vsmul_vv_i16mf4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vsmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16mf4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vsmul_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_vx_i16mf4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vsmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsmul_vv_i16mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vsmul_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vsmul_vv_i16mf2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vsmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vsmul_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_vx_i16mf2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vsmul_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vsmul_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vsmul_vv_i16m1_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vsmul_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vsmul_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_vx_i16m1_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vsmul_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vsmul_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vsmul_vv_i16m2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vsmul_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vsmul_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_vx_i16m2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vsmul_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vsmul_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vsmul_vv_i16m4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vsmul_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vsmul_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_vx_i16m4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vsmul_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vsmul_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vsmul_vv_i16m8_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vsmul_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vsmul_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_vx_i16m8_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vsmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsmul_vv_i32mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vsmul_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vsmul_vv_i32mf2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vsmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32mf2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vsmul_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_vx_i32mf2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vsmul_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vsmul_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vsmul_vv_i32m1_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vsmul_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vsmul_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_vx_i32m1_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vsmul_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vsmul_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vsmul_vv_i32m2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vsmul_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vsmul_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_vx_i32m2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vsmul_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vsmul_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vsmul_vv_i32m4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vsmul_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vsmul_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_vx_i32m4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vsmul_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vsmul_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vsmul_vv_i32m8_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vsmul_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vsmul_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_vx_i32m8_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vsmul_vv_i64m1_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul_vx_i64m1_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vsmul_vv_i64m2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul_vx_i64m2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vsmul_vv_i64m4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul_vx_i64m4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vsmul_vv_i64m8_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vsmul_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vsmul_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul_vx_i64m8_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsmul\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-api-tests/vsoxei16.c b/auto-generated/gnu-api-tests/vsoxei16.c index b2f1b436d..5bfded6cb 100644 --- a/auto-generated/gnu-api-tests/vsoxei16.c +++ b/auto-generated/gnu-api-tests/vsoxei16.c @@ -6,460 +6,460 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxei16_v_f16mf4(float16_t *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) { - return __riscv_vsoxei16_v_f16mf4(base, bindex, value, vl); +void test_vsoxei16_v_f16mf4(float16_t *rs1, vuint16mf4_t rs2, vfloat16mf4_t vs3, size_t vl) { + return __riscv_vsoxei16_v_f16mf4(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f16mf2(float16_t *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) { - return __riscv_vsoxei16_v_f16mf2(base, bindex, value, vl); +void test_vsoxei16_v_f16mf2(float16_t *rs1, vuint16mf2_t rs2, vfloat16mf2_t vs3, size_t vl) { + return __riscv_vsoxei16_v_f16mf2(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f16m1(float16_t *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) { - return __riscv_vsoxei16_v_f16m1(base, bindex, value, vl); +void test_vsoxei16_v_f16m1(float16_t *rs1, vuint16m1_t rs2, vfloat16m1_t vs3, size_t vl) { + return __riscv_vsoxei16_v_f16m1(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f16m2(float16_t *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) { - return __riscv_vsoxei16_v_f16m2(base, bindex, value, vl); +void test_vsoxei16_v_f16m2(float16_t *rs1, vuint16m2_t rs2, vfloat16m2_t vs3, size_t vl) { + return __riscv_vsoxei16_v_f16m2(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f16m4(float16_t *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) { - return __riscv_vsoxei16_v_f16m4(base, bindex, value, vl); +void test_vsoxei16_v_f16m4(float16_t *rs1, vuint16m4_t rs2, vfloat16m4_t vs3, size_t vl) { + return __riscv_vsoxei16_v_f16m4(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f16m8(float16_t *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) { - return __riscv_vsoxei16_v_f16m8(base, bindex, value, vl); +void test_vsoxei16_v_f16m8(float16_t *rs1, vuint16m8_t rs2, vfloat16m8_t vs3, size_t vl) { + return __riscv_vsoxei16_v_f16m8(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f32mf2(float32_t *base, vuint16mf4_t bindex, vfloat32mf2_t value, size_t vl) { - return __riscv_vsoxei16_v_f32mf2(base, bindex, value, vl); +void test_vsoxei16_v_f32mf2(float32_t *rs1, vuint16mf4_t rs2, vfloat32mf2_t vs3, size_t vl) { + return __riscv_vsoxei16_v_f32mf2(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f32m1(float32_t *base, vuint16mf2_t bindex, vfloat32m1_t value, size_t vl) { - return __riscv_vsoxei16_v_f32m1(base, bindex, value, vl); +void test_vsoxei16_v_f32m1(float32_t *rs1, vuint16mf2_t rs2, vfloat32m1_t vs3, size_t vl) { + return __riscv_vsoxei16_v_f32m1(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f32m2(float32_t *base, vuint16m1_t bindex, vfloat32m2_t value, size_t vl) { - return __riscv_vsoxei16_v_f32m2(base, bindex, value, vl); +void test_vsoxei16_v_f32m2(float32_t *rs1, vuint16m1_t rs2, vfloat32m2_t vs3, size_t vl) { + return __riscv_vsoxei16_v_f32m2(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f32m4(float32_t *base, vuint16m2_t bindex, vfloat32m4_t value, size_t vl) { - return __riscv_vsoxei16_v_f32m4(base, bindex, value, vl); +void test_vsoxei16_v_f32m4(float32_t *rs1, vuint16m2_t rs2, vfloat32m4_t vs3, size_t vl) { + return __riscv_vsoxei16_v_f32m4(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f32m8(float32_t *base, vuint16m4_t bindex, vfloat32m8_t value, size_t vl) { - return __riscv_vsoxei16_v_f32m8(base, bindex, value, vl); +void test_vsoxei16_v_f32m8(float32_t *rs1, vuint16m4_t rs2, vfloat32m8_t vs3, size_t vl) { + return __riscv_vsoxei16_v_f32m8(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f64m1(float64_t *base, vuint16mf4_t bindex, vfloat64m1_t value, size_t vl) { - return __riscv_vsoxei16_v_f64m1(base, bindex, value, vl); +void test_vsoxei16_v_f64m1(float64_t *rs1, vuint16mf4_t rs2, vfloat64m1_t vs3, size_t vl) { + return __riscv_vsoxei16_v_f64m1(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f64m2(float64_t *base, vuint16mf2_t bindex, vfloat64m2_t value, size_t vl) { - return __riscv_vsoxei16_v_f64m2(base, bindex, value, vl); +void test_vsoxei16_v_f64m2(float64_t *rs1, vuint16mf2_t rs2, vfloat64m2_t vs3, size_t vl) { + return __riscv_vsoxei16_v_f64m2(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f64m4(float64_t *base, vuint16m1_t bindex, vfloat64m4_t value, size_t vl) { - return __riscv_vsoxei16_v_f64m4(base, bindex, value, vl); +void test_vsoxei16_v_f64m4(float64_t *rs1, vuint16m1_t rs2, vfloat64m4_t vs3, size_t vl) { + return __riscv_vsoxei16_v_f64m4(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f64m8(float64_t *base, vuint16m2_t bindex, vfloat64m8_t value, size_t vl) { - return __riscv_vsoxei16_v_f64m8(base, bindex, value, vl); +void test_vsoxei16_v_f64m8(float64_t *rs1, vuint16m2_t rs2, vfloat64m8_t vs3, size_t vl) { + return __riscv_vsoxei16_v_f64m8(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t value, size_t vl) { - return __riscv_vsoxei16_v_i8mf8(base, bindex, value, vl); +void test_vsoxei16_v_i8mf8(int8_t *rs1, vuint16mf4_t rs2, vint8mf8_t vs3, size_t vl) { + return __riscv_vsoxei16_v_i8mf8(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t value, size_t vl) { - return __riscv_vsoxei16_v_i8mf4(base, bindex, value, vl); +void test_vsoxei16_v_i8mf4(int8_t *rs1, vuint16mf2_t rs2, vint8mf4_t vs3, size_t vl) { + return __riscv_vsoxei16_v_i8mf4(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t value, size_t vl) { - return __riscv_vsoxei16_v_i8mf2(base, bindex, value, vl); +void test_vsoxei16_v_i8mf2(int8_t *rs1, vuint16m1_t rs2, vint8mf2_t vs3, size_t vl) { + return __riscv_vsoxei16_v_i8mf2(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t value, size_t vl) { - return __riscv_vsoxei16_v_i8m1(base, bindex, value, vl); +void test_vsoxei16_v_i8m1(int8_t *rs1, vuint16m2_t rs2, vint8m1_t vs3, size_t vl) { + return __riscv_vsoxei16_v_i8m1(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t value, size_t vl) { - return __riscv_vsoxei16_v_i8m2(base, bindex, value, vl); +void test_vsoxei16_v_i8m2(int8_t *rs1, vuint16m4_t rs2, vint8m2_t vs3, size_t vl) { + return __riscv_vsoxei16_v_i8m2(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i8m4(int8_t *base, vuint16m8_t bindex, vint8m4_t value, size_t vl) { - return __riscv_vsoxei16_v_i8m4(base, bindex, value, vl); +void test_vsoxei16_v_i8m4(int8_t *rs1, vuint16m8_t rs2, vint8m4_t vs3, size_t vl) { + return __riscv_vsoxei16_v_i8m4(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t value, size_t vl) { - return __riscv_vsoxei16_v_i16mf4(base, bindex, value, vl); +void test_vsoxei16_v_i16mf4(int16_t *rs1, vuint16mf4_t rs2, vint16mf4_t vs3, size_t vl) { + return __riscv_vsoxei16_v_i16mf4(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t value, size_t vl) { - return __riscv_vsoxei16_v_i16mf2(base, bindex, value, vl); +void test_vsoxei16_v_i16mf2(int16_t *rs1, vuint16mf2_t rs2, vint16mf2_t vs3, size_t vl) { + return __riscv_vsoxei16_v_i16mf2(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t value, size_t vl) { - return __riscv_vsoxei16_v_i16m1(base, bindex, value, vl); +void test_vsoxei16_v_i16m1(int16_t *rs1, vuint16m1_t rs2, vint16m1_t vs3, size_t vl) { + return __riscv_vsoxei16_v_i16m1(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t value, size_t vl) { - return __riscv_vsoxei16_v_i16m2(base, bindex, value, vl); +void test_vsoxei16_v_i16m2(int16_t *rs1, vuint16m2_t rs2, vint16m2_t vs3, size_t vl) { + return __riscv_vsoxei16_v_i16m2(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i16m4(int16_t *base, vuint16m4_t bindex, vint16m4_t value, size_t vl) { - return __riscv_vsoxei16_v_i16m4(base, bindex, value, vl); +void test_vsoxei16_v_i16m4(int16_t *rs1, vuint16m4_t rs2, vint16m4_t vs3, size_t vl) { + return __riscv_vsoxei16_v_i16m4(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i16m8(int16_t *base, vuint16m8_t bindex, vint16m8_t value, size_t vl) { - return __riscv_vsoxei16_v_i16m8(base, bindex, value, vl); +void test_vsoxei16_v_i16m8(int16_t *rs1, vuint16m8_t rs2, vint16m8_t vs3, size_t vl) { + return __riscv_vsoxei16_v_i16m8(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { - return __riscv_vsoxei16_v_i32mf2(base, bindex, value, vl); +void test_vsoxei16_v_i32mf2(int32_t *rs1, vuint16mf4_t rs2, vint32mf2_t vs3, size_t vl) { + return __riscv_vsoxei16_v_i32mf2(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { - return __riscv_vsoxei16_v_i32m1(base, bindex, value, vl); +void test_vsoxei16_v_i32m1(int32_t *rs1, vuint16mf2_t rs2, vint32m1_t vs3, size_t vl) { + return __riscv_vsoxei16_v_i32m1(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { - return __riscv_vsoxei16_v_i32m2(base, bindex, value, vl); +void test_vsoxei16_v_i32m2(int32_t *rs1, vuint16m1_t rs2, vint32m2_t vs3, size_t vl) { + return __riscv_vsoxei16_v_i32m2(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i32m4(int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { - return __riscv_vsoxei16_v_i32m4(base, bindex, value, vl); +void test_vsoxei16_v_i32m4(int32_t *rs1, vuint16m2_t rs2, vint32m4_t vs3, size_t vl) { + return __riscv_vsoxei16_v_i32m4(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i32m8(int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { - return __riscv_vsoxei16_v_i32m8(base, bindex, value, vl); +void test_vsoxei16_v_i32m8(int32_t *rs1, vuint16m4_t rs2, vint32m8_t vs3, size_t vl) { + return __riscv_vsoxei16_v_i32m8(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { - return __riscv_vsoxei16_v_i64m1(base, bindex, value, vl); +void test_vsoxei16_v_i64m1(int64_t *rs1, vuint16mf4_t rs2, vint64m1_t vs3, size_t vl) { + return __riscv_vsoxei16_v_i64m1(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { - return __riscv_vsoxei16_v_i64m2(base, bindex, value, vl); +void test_vsoxei16_v_i64m2(int64_t *rs1, vuint16mf2_t rs2, vint64m2_t vs3, size_t vl) { + return __riscv_vsoxei16_v_i64m2(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i64m4(int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { - return __riscv_vsoxei16_v_i64m4(base, bindex, value, vl); +void test_vsoxei16_v_i64m4(int64_t *rs1, vuint16m1_t rs2, vint64m4_t vs3, size_t vl) { + return __riscv_vsoxei16_v_i64m4(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i64m8(int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { - return __riscv_vsoxei16_v_i64m8(base, bindex, value, vl); +void test_vsoxei16_v_i64m8(int64_t *rs1, vuint16m2_t rs2, vint64m8_t vs3, size_t vl) { + return __riscv_vsoxei16_v_i64m8(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t value, size_t vl) { - return __riscv_vsoxei16_v_u8mf8(base, bindex, value, vl); +void test_vsoxei16_v_u8mf8(uint8_t *rs1, vuint16mf4_t rs2, vuint8mf8_t vs3, size_t vl) { + return __riscv_vsoxei16_v_u8mf8(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t value, size_t vl) { - return __riscv_vsoxei16_v_u8mf4(base, bindex, value, vl); +void test_vsoxei16_v_u8mf4(uint8_t *rs1, vuint16mf2_t rs2, vuint8mf4_t vs3, size_t vl) { + return __riscv_vsoxei16_v_u8mf4(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value, size_t vl) { - return __riscv_vsoxei16_v_u8mf2(base, bindex, value, vl); +void test_vsoxei16_v_u8mf2(uint8_t *rs1, vuint16m1_t rs2, vuint8mf2_t vs3, size_t vl) { + return __riscv_vsoxei16_v_u8mf2(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t value, size_t vl) { - return __riscv_vsoxei16_v_u8m1(base, bindex, value, vl); +void test_vsoxei16_v_u8m1(uint8_t *rs1, vuint16m2_t rs2, vuint8m1_t vs3, size_t vl) { + return __riscv_vsoxei16_v_u8m1(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t value, size_t vl) { - return __riscv_vsoxei16_v_u8m2(base, bindex, value, vl); +void test_vsoxei16_v_u8m2(uint8_t *rs1, vuint16m4_t rs2, vuint8m2_t vs3, size_t vl) { + return __riscv_vsoxei16_v_u8m2(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u8m4(uint8_t *base, vuint16m8_t bindex, vuint8m4_t value, size_t vl) { - return __riscv_vsoxei16_v_u8m4(base, bindex, value, vl); +void test_vsoxei16_v_u8m4(uint8_t *rs1, vuint16m8_t rs2, vuint8m4_t vs3, size_t vl) { + return __riscv_vsoxei16_v_u8m4(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t value, size_t vl) { - return __riscv_vsoxei16_v_u16mf4(base, bindex, value, vl); +void test_vsoxei16_v_u16mf4(uint16_t *rs1, vuint16mf4_t rs2, vuint16mf4_t vs3, size_t vl) { + return __riscv_vsoxei16_v_u16mf4(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t value, size_t vl) { - return __riscv_vsoxei16_v_u16mf2(base, bindex, value, vl); +void test_vsoxei16_v_u16mf2(uint16_t *rs1, vuint16mf2_t rs2, vuint16mf2_t vs3, size_t vl) { + return __riscv_vsoxei16_v_u16mf2(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t value, size_t vl) { - return __riscv_vsoxei16_v_u16m1(base, bindex, value, vl); +void test_vsoxei16_v_u16m1(uint16_t *rs1, vuint16m1_t rs2, vuint16m1_t vs3, size_t vl) { + return __riscv_vsoxei16_v_u16m1(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t value, size_t vl) { - return __riscv_vsoxei16_v_u16m2(base, bindex, value, vl); +void test_vsoxei16_v_u16m2(uint16_t *rs1, vuint16m2_t rs2, vuint16m2_t vs3, size_t vl) { + return __riscv_vsoxei16_v_u16m2(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u16m4(uint16_t *base, vuint16m4_t bindex, vuint16m4_t value, size_t vl) { - return __riscv_vsoxei16_v_u16m4(base, bindex, value, vl); +void test_vsoxei16_v_u16m4(uint16_t *rs1, vuint16m4_t rs2, vuint16m4_t vs3, size_t vl) { + return __riscv_vsoxei16_v_u16m4(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u16m8(uint16_t *base, vuint16m8_t bindex, vuint16m8_t value, size_t vl) { - return __riscv_vsoxei16_v_u16m8(base, bindex, value, vl); +void test_vsoxei16_v_u16m8(uint16_t *rs1, vuint16m8_t rs2, vuint16m8_t vs3, size_t vl) { + return __riscv_vsoxei16_v_u16m8(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { - return __riscv_vsoxei16_v_u32mf2(base, bindex, value, vl); +void test_vsoxei16_v_u32mf2(uint32_t *rs1, vuint16mf4_t rs2, vuint32mf2_t vs3, size_t vl) { + return __riscv_vsoxei16_v_u32mf2(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { - return __riscv_vsoxei16_v_u32m1(base, bindex, value, vl); +void test_vsoxei16_v_u32m1(uint32_t *rs1, vuint16mf2_t rs2, vuint32m1_t vs3, size_t vl) { + return __riscv_vsoxei16_v_u32m1(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { - return __riscv_vsoxei16_v_u32m2(base, bindex, value, vl); +void test_vsoxei16_v_u32m2(uint32_t *rs1, vuint16m1_t rs2, vuint32m2_t vs3, size_t vl) { + return __riscv_vsoxei16_v_u32m2(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u32m4(uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { - return __riscv_vsoxei16_v_u32m4(base, bindex, value, vl); +void test_vsoxei16_v_u32m4(uint32_t *rs1, vuint16m2_t rs2, vuint32m4_t vs3, size_t vl) { + return __riscv_vsoxei16_v_u32m4(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u32m8(uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { - return __riscv_vsoxei16_v_u32m8(base, bindex, value, vl); +void test_vsoxei16_v_u32m8(uint32_t *rs1, vuint16m4_t rs2, vuint32m8_t vs3, size_t vl) { + return __riscv_vsoxei16_v_u32m8(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { - return __riscv_vsoxei16_v_u64m1(base, bindex, value, vl); +void test_vsoxei16_v_u64m1(uint64_t *rs1, vuint16mf4_t rs2, vuint64m1_t vs3, size_t vl) { + return __riscv_vsoxei16_v_u64m1(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { - return __riscv_vsoxei16_v_u64m2(base, bindex, value, vl); +void test_vsoxei16_v_u64m2(uint64_t *rs1, vuint16mf2_t rs2, vuint64m2_t vs3, size_t vl) { + return __riscv_vsoxei16_v_u64m2(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u64m4(uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { - return __riscv_vsoxei16_v_u64m4(base, bindex, value, vl); +void test_vsoxei16_v_u64m4(uint64_t *rs1, vuint16m1_t rs2, vuint64m4_t vs3, size_t vl) { + return __riscv_vsoxei16_v_u64m4(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u64m8(uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { - return __riscv_vsoxei16_v_u64m8(base, bindex, value, vl); +void test_vsoxei16_v_u64m8(uint64_t *rs1, vuint16m2_t rs2, vuint64m8_t vs3, size_t vl) { + return __riscv_vsoxei16_v_u64m8(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f16mf4_m(vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) { - return __riscv_vsoxei16_v_f16mf4_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_f16mf4_m(vbool64_t vm, float16_t *rs1, vuint16mf4_t rs2, vfloat16mf4_t vs3, size_t vl) { + return __riscv_vsoxei16_v_f16mf4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f16mf2_m(vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) { - return __riscv_vsoxei16_v_f16mf2_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_f16mf2_m(vbool32_t vm, float16_t *rs1, vuint16mf2_t rs2, vfloat16mf2_t vs3, size_t vl) { + return __riscv_vsoxei16_v_f16mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f16m1_m(vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) { - return __riscv_vsoxei16_v_f16m1_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_f16m1_m(vbool16_t vm, float16_t *rs1, vuint16m1_t rs2, vfloat16m1_t vs3, size_t vl) { + return __riscv_vsoxei16_v_f16m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f16m2_m(vbool8_t mask, float16_t *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) { - return __riscv_vsoxei16_v_f16m2_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_f16m2_m(vbool8_t vm, float16_t *rs1, vuint16m2_t rs2, vfloat16m2_t vs3, size_t vl) { + return __riscv_vsoxei16_v_f16m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f16m4_m(vbool4_t mask, float16_t *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) { - return __riscv_vsoxei16_v_f16m4_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_f16m4_m(vbool4_t vm, float16_t *rs1, vuint16m4_t rs2, vfloat16m4_t vs3, size_t vl) { + return __riscv_vsoxei16_v_f16m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f16m8_m(vbool2_t mask, float16_t *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) { - return __riscv_vsoxei16_v_f16m8_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_f16m8_m(vbool2_t vm, float16_t *rs1, vuint16m8_t rs2, vfloat16m8_t vs3, size_t vl) { + return __riscv_vsoxei16_v_f16m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f32mf2_m(vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2_t value, size_t vl) { - return __riscv_vsoxei16_v_f32mf2_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_f32mf2_m(vbool64_t vm, float32_t *rs1, vuint16mf4_t rs2, vfloat32mf2_t vs3, size_t vl) { + return __riscv_vsoxei16_v_f32mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f32m1_m(vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1_t value, size_t vl) { - return __riscv_vsoxei16_v_f32m1_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_f32m1_m(vbool32_t vm, float32_t *rs1, vuint16mf2_t rs2, vfloat32m1_t vs3, size_t vl) { + return __riscv_vsoxei16_v_f32m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f32m2_m(vbool16_t mask, float32_t *base, vuint16m1_t bindex, vfloat32m2_t value, size_t vl) { - return __riscv_vsoxei16_v_f32m2_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_f32m2_m(vbool16_t vm, float32_t *rs1, vuint16m1_t rs2, vfloat32m2_t vs3, size_t vl) { + return __riscv_vsoxei16_v_f32m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f32m4_m(vbool8_t mask, float32_t *base, vuint16m2_t bindex, vfloat32m4_t value, size_t vl) { - return __riscv_vsoxei16_v_f32m4_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_f32m4_m(vbool8_t vm, float32_t *rs1, vuint16m2_t rs2, vfloat32m4_t vs3, size_t vl) { + return __riscv_vsoxei16_v_f32m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f32m8_m(vbool4_t mask, float32_t *base, vuint16m4_t bindex, vfloat32m8_t value, size_t vl) { - return __riscv_vsoxei16_v_f32m8_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_f32m8_m(vbool4_t vm, float32_t *rs1, vuint16m4_t rs2, vfloat32m8_t vs3, size_t vl) { + return __riscv_vsoxei16_v_f32m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f64m1_m(vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1_t value, size_t vl) { - return __riscv_vsoxei16_v_f64m1_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_f64m1_m(vbool64_t vm, float64_t *rs1, vuint16mf4_t rs2, vfloat64m1_t vs3, size_t vl) { + return __riscv_vsoxei16_v_f64m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f64m2_m(vbool32_t mask, float64_t *base, vuint16mf2_t bindex, vfloat64m2_t value, size_t vl) { - return __riscv_vsoxei16_v_f64m2_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_f64m2_m(vbool32_t vm, float64_t *rs1, vuint16mf2_t rs2, vfloat64m2_t vs3, size_t vl) { + return __riscv_vsoxei16_v_f64m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f64m4_m(vbool16_t mask, float64_t *base, vuint16m1_t bindex, vfloat64m4_t value, size_t vl) { - return __riscv_vsoxei16_v_f64m4_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_f64m4_m(vbool16_t vm, float64_t *rs1, vuint16m1_t rs2, vfloat64m4_t vs3, size_t vl) { + return __riscv_vsoxei16_v_f64m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f64m8_m(vbool8_t mask, float64_t *base, vuint16m2_t bindex, vfloat64m8_t value, size_t vl) { - return __riscv_vsoxei16_v_f64m8_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_f64m8_m(vbool8_t vm, float64_t *rs1, vuint16m2_t rs2, vfloat64m8_t vs3, size_t vl) { + return __riscv_vsoxei16_v_f64m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t value, size_t vl) { - return __riscv_vsoxei16_v_i8mf8_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_i8mf8_m(vbool64_t vm, int8_t *rs1, vuint16mf4_t rs2, vint8mf8_t vs3, size_t vl) { + return __riscv_vsoxei16_v_i8mf8_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t value, size_t vl) { - return __riscv_vsoxei16_v_i8mf4_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_i8mf4_m(vbool32_t vm, int8_t *rs1, vuint16mf2_t rs2, vint8mf4_t vs3, size_t vl) { + return __riscv_vsoxei16_v_i8mf4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t value, size_t vl) { - return __riscv_vsoxei16_v_i8mf2_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_i8mf2_m(vbool16_t vm, int8_t *rs1, vuint16m1_t rs2, vint8mf2_t vs3, size_t vl) { + return __riscv_vsoxei16_v_i8mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t value, size_t vl) { - return __riscv_vsoxei16_v_i8m1_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_i8m1_m(vbool8_t vm, int8_t *rs1, vuint16m2_t rs2, vint8m1_t vs3, size_t vl) { + return __riscv_vsoxei16_v_i8m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t value, size_t vl) { - return __riscv_vsoxei16_v_i8m2_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_i8m2_m(vbool4_t vm, int8_t *rs1, vuint16m4_t rs2, vint8m2_t vs3, size_t vl) { + return __riscv_vsoxei16_v_i8m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i8m4_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4_t value, size_t vl) { - return __riscv_vsoxei16_v_i8m4_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_i8m4_m(vbool2_t vm, int8_t *rs1, vuint16m8_t rs2, vint8m4_t vs3, size_t vl) { + return __riscv_vsoxei16_v_i8m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t value, size_t vl) { - return __riscv_vsoxei16_v_i16mf4_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_i16mf4_m(vbool64_t vm, int16_t *rs1, vuint16mf4_t rs2, vint16mf4_t vs3, size_t vl) { + return __riscv_vsoxei16_v_i16mf4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t value, size_t vl) { - return __riscv_vsoxei16_v_i16mf2_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_i16mf2_m(vbool32_t vm, int16_t *rs1, vuint16mf2_t rs2, vint16mf2_t vs3, size_t vl) { + return __riscv_vsoxei16_v_i16mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t value, size_t vl) { - return __riscv_vsoxei16_v_i16m1_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_i16m1_m(vbool16_t vm, int16_t *rs1, vuint16m1_t rs2, vint16m1_t vs3, size_t vl) { + return __riscv_vsoxei16_v_i16m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t value, size_t vl) { - return __riscv_vsoxei16_v_i16m2_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_i16m2_m(vbool8_t vm, int16_t *rs1, vuint16m2_t rs2, vint16m2_t vs3, size_t vl) { + return __riscv_vsoxei16_v_i16m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i16m4_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4_t value, size_t vl) { - return __riscv_vsoxei16_v_i16m4_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_i16m4_m(vbool4_t vm, int16_t *rs1, vuint16m4_t rs2, vint16m4_t vs3, size_t vl) { + return __riscv_vsoxei16_v_i16m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i16m8_m(vbool2_t mask, int16_t *base, vuint16m8_t bindex, vint16m8_t value, size_t vl) { - return __riscv_vsoxei16_v_i16m8_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_i16m8_m(vbool2_t vm, int16_t *rs1, vuint16m8_t rs2, vint16m8_t vs3, size_t vl) { + return __riscv_vsoxei16_v_i16m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { - return __riscv_vsoxei16_v_i32mf2_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_i32mf2_m(vbool64_t vm, int32_t *rs1, vuint16mf4_t rs2, vint32mf2_t vs3, size_t vl) { + return __riscv_vsoxei16_v_i32mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { - return __riscv_vsoxei16_v_i32m1_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_i32m1_m(vbool32_t vm, int32_t *rs1, vuint16mf2_t rs2, vint32m1_t vs3, size_t vl) { + return __riscv_vsoxei16_v_i32m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { - return __riscv_vsoxei16_v_i32m2_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_i32m2_m(vbool16_t vm, int32_t *rs1, vuint16m1_t rs2, vint32m2_t vs3, size_t vl) { + return __riscv_vsoxei16_v_i32m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i32m4_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { - return __riscv_vsoxei16_v_i32m4_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_i32m4_m(vbool8_t vm, int32_t *rs1, vuint16m2_t rs2, vint32m4_t vs3, size_t vl) { + return __riscv_vsoxei16_v_i32m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i32m8_m(vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { - return __riscv_vsoxei16_v_i32m8_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_i32m8_m(vbool4_t vm, int32_t *rs1, vuint16m4_t rs2, vint32m8_t vs3, size_t vl) { + return __riscv_vsoxei16_v_i32m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { - return __riscv_vsoxei16_v_i64m1_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_i64m1_m(vbool64_t vm, int64_t *rs1, vuint16mf4_t rs2, vint64m1_t vs3, size_t vl) { + return __riscv_vsoxei16_v_i64m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { - return __riscv_vsoxei16_v_i64m2_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_i64m2_m(vbool32_t vm, int64_t *rs1, vuint16mf2_t rs2, vint64m2_t vs3, size_t vl) { + return __riscv_vsoxei16_v_i64m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i64m4_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { - return __riscv_vsoxei16_v_i64m4_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_i64m4_m(vbool16_t vm, int64_t *rs1, vuint16m1_t rs2, vint64m4_t vs3, size_t vl) { + return __riscv_vsoxei16_v_i64m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i64m8_m(vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { - return __riscv_vsoxei16_v_i64m8_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_i64m8_m(vbool8_t vm, int64_t *rs1, vuint16m2_t rs2, vint64m8_t vs3, size_t vl) { + return __riscv_vsoxei16_v_i64m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t value, size_t vl) { - return __riscv_vsoxei16_v_u8mf8_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_u8mf8_m(vbool64_t vm, uint8_t *rs1, vuint16mf4_t rs2, vuint8mf8_t vs3, size_t vl) { + return __riscv_vsoxei16_v_u8mf8_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t value, size_t vl) { - return __riscv_vsoxei16_v_u8mf4_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_u8mf4_m(vbool32_t vm, uint8_t *rs1, vuint16mf2_t rs2, vuint8mf4_t vs3, size_t vl) { + return __riscv_vsoxei16_v_u8mf4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value, size_t vl) { - return __riscv_vsoxei16_v_u8mf2_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_u8mf2_m(vbool16_t vm, uint8_t *rs1, vuint16m1_t rs2, vuint8mf2_t vs3, size_t vl) { + return __riscv_vsoxei16_v_u8mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t value, size_t vl) { - return __riscv_vsoxei16_v_u8m1_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_u8m1_m(vbool8_t vm, uint8_t *rs1, vuint16m2_t rs2, vuint8m1_t vs3, size_t vl) { + return __riscv_vsoxei16_v_u8m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t value, size_t vl) { - return __riscv_vsoxei16_v_u8m2_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_u8m2_m(vbool4_t vm, uint8_t *rs1, vuint16m4_t rs2, vuint8m2_t vs3, size_t vl) { + return __riscv_vsoxei16_v_u8m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4_t value, size_t vl) { - return __riscv_vsoxei16_v_u8m4_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_u8m4_m(vbool2_t vm, uint8_t *rs1, vuint16m8_t rs2, vuint8m4_t vs3, size_t vl) { + return __riscv_vsoxei16_v_u8m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t value, size_t vl) { - return __riscv_vsoxei16_v_u16mf4_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_u16mf4_m(vbool64_t vm, uint16_t *rs1, vuint16mf4_t rs2, vuint16mf4_t vs3, size_t vl) { + return __riscv_vsoxei16_v_u16mf4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t value, size_t vl) { - return __riscv_vsoxei16_v_u16mf2_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_u16mf2_m(vbool32_t vm, uint16_t *rs1, vuint16mf2_t rs2, vuint16mf2_t vs3, size_t vl) { + return __riscv_vsoxei16_v_u16mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t value, size_t vl) { - return __riscv_vsoxei16_v_u16m1_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_u16m1_m(vbool16_t vm, uint16_t *rs1, vuint16m1_t rs2, vuint16m1_t vs3, size_t vl) { + return __riscv_vsoxei16_v_u16m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t value, size_t vl) { - return __riscv_vsoxei16_v_u16m2_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_u16m2_m(vbool8_t vm, uint16_t *rs1, vuint16m2_t rs2, vuint16m2_t vs3, size_t vl) { + return __riscv_vsoxei16_v_u16m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4_t value, size_t vl) { - return __riscv_vsoxei16_v_u16m4_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_u16m4_m(vbool4_t vm, uint16_t *rs1, vuint16m4_t rs2, vuint16m4_t vs3, size_t vl) { + return __riscv_vsoxei16_v_u16m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint16m8_t bindex, vuint16m8_t value, size_t vl) { - return __riscv_vsoxei16_v_u16m8_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_u16m8_m(vbool2_t vm, uint16_t *rs1, vuint16m8_t rs2, vuint16m8_t vs3, size_t vl) { + return __riscv_vsoxei16_v_u16m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { - return __riscv_vsoxei16_v_u32mf2_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_u32mf2_m(vbool64_t vm, uint32_t *rs1, vuint16mf4_t rs2, vuint32mf2_t vs3, size_t vl) { + return __riscv_vsoxei16_v_u32mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { - return __riscv_vsoxei16_v_u32m1_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_u32m1_m(vbool32_t vm, uint32_t *rs1, vuint16mf2_t rs2, vuint32m1_t vs3, size_t vl) { + return __riscv_vsoxei16_v_u32m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { - return __riscv_vsoxei16_v_u32m2_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_u32m2_m(vbool16_t vm, uint32_t *rs1, vuint16m1_t rs2, vuint32m2_t vs3, size_t vl) { + return __riscv_vsoxei16_v_u32m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { - return __riscv_vsoxei16_v_u32m4_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_u32m4_m(vbool8_t vm, uint32_t *rs1, vuint16m2_t rs2, vuint32m4_t vs3, size_t vl) { + return __riscv_vsoxei16_v_u32m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { - return __riscv_vsoxei16_v_u32m8_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_u32m8_m(vbool4_t vm, uint32_t *rs1, vuint16m4_t rs2, vuint32m8_t vs3, size_t vl) { + return __riscv_vsoxei16_v_u32m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { - return __riscv_vsoxei16_v_u64m1_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_u64m1_m(vbool64_t vm, uint64_t *rs1, vuint16mf4_t rs2, vuint64m1_t vs3, size_t vl) { + return __riscv_vsoxei16_v_u64m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { - return __riscv_vsoxei16_v_u64m2_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_u64m2_m(vbool32_t vm, uint64_t *rs1, vuint16mf2_t rs2, vuint64m2_t vs3, size_t vl) { + return __riscv_vsoxei16_v_u64m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { - return __riscv_vsoxei16_v_u64m4_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_u64m4_m(vbool16_t vm, uint64_t *rs1, vuint16m1_t rs2, vuint64m4_t vs3, size_t vl) { + return __riscv_vsoxei16_v_u64m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { - return __riscv_vsoxei16_v_u64m8_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_u64m8_m(vbool8_t vm, uint64_t *rs1, vuint16m2_t rs2, vuint64m8_t vs3, size_t vl) { + return __riscv_vsoxei16_v_u64m8_m(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxei16\.[ivxfswum.]+\s+} 114 } } */ diff --git a/auto-generated/gnu-api-tests/vsoxei32.c b/auto-generated/gnu-api-tests/vsoxei32.c index c27a4232e..a904c7a75 100644 --- a/auto-generated/gnu-api-tests/vsoxei32.c +++ b/auto-generated/gnu-api-tests/vsoxei32.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxei32_v_f16mf4(float16_t *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) { - return __riscv_vsoxei32_v_f16mf4(base, bindex, value, vl); +void test_vsoxei32_v_f16mf4(float16_t *rs1, vuint32mf2_t rs2, vfloat16mf4_t vs3, size_t vl) { + return __riscv_vsoxei32_v_f16mf4(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f16mf2(float16_t *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) { - return __riscv_vsoxei32_v_f16mf2(base, bindex, value, vl); +void test_vsoxei32_v_f16mf2(float16_t *rs1, vuint32m1_t rs2, vfloat16mf2_t vs3, size_t vl) { + return __riscv_vsoxei32_v_f16mf2(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f16m1(float16_t *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) { - return __riscv_vsoxei32_v_f16m1(base, bindex, value, vl); +void test_vsoxei32_v_f16m1(float16_t *rs1, vuint32m2_t rs2, vfloat16m1_t vs3, size_t vl) { + return __riscv_vsoxei32_v_f16m1(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f16m2(float16_t *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) { - return __riscv_vsoxei32_v_f16m2(base, bindex, value, vl); +void test_vsoxei32_v_f16m2(float16_t *rs1, vuint32m4_t rs2, vfloat16m2_t vs3, size_t vl) { + return __riscv_vsoxei32_v_f16m2(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f16m4(float16_t *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) { - return __riscv_vsoxei32_v_f16m4(base, bindex, value, vl); +void test_vsoxei32_v_f16m4(float16_t *rs1, vuint32m8_t rs2, vfloat16m4_t vs3, size_t vl) { + return __riscv_vsoxei32_v_f16m4(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f32mf2(float32_t *base, vuint32mf2_t bindex, vfloat32mf2_t value, size_t vl) { - return __riscv_vsoxei32_v_f32mf2(base, bindex, value, vl); +void test_vsoxei32_v_f32mf2(float32_t *rs1, vuint32mf2_t rs2, vfloat32mf2_t vs3, size_t vl) { + return __riscv_vsoxei32_v_f32mf2(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f32m1(float32_t *base, vuint32m1_t bindex, vfloat32m1_t value, size_t vl) { - return __riscv_vsoxei32_v_f32m1(base, bindex, value, vl); +void test_vsoxei32_v_f32m1(float32_t *rs1, vuint32m1_t rs2, vfloat32m1_t vs3, size_t vl) { + return __riscv_vsoxei32_v_f32m1(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f32m2(float32_t *base, vuint32m2_t bindex, vfloat32m2_t value, size_t vl) { - return __riscv_vsoxei32_v_f32m2(base, bindex, value, vl); +void test_vsoxei32_v_f32m2(float32_t *rs1, vuint32m2_t rs2, vfloat32m2_t vs3, size_t vl) { + return __riscv_vsoxei32_v_f32m2(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f32m4(float32_t *base, vuint32m4_t bindex, vfloat32m4_t value, size_t vl) { - return __riscv_vsoxei32_v_f32m4(base, bindex, value, vl); +void test_vsoxei32_v_f32m4(float32_t *rs1, vuint32m4_t rs2, vfloat32m4_t vs3, size_t vl) { + return __riscv_vsoxei32_v_f32m4(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f32m8(float32_t *base, vuint32m8_t bindex, vfloat32m8_t value, size_t vl) { - return __riscv_vsoxei32_v_f32m8(base, bindex, value, vl); +void test_vsoxei32_v_f32m8(float32_t *rs1, vuint32m8_t rs2, vfloat32m8_t vs3, size_t vl) { + return __riscv_vsoxei32_v_f32m8(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f64m1(float64_t *base, vuint32mf2_t bindex, vfloat64m1_t value, size_t vl) { - return __riscv_vsoxei32_v_f64m1(base, bindex, value, vl); +void test_vsoxei32_v_f64m1(float64_t *rs1, vuint32mf2_t rs2, vfloat64m1_t vs3, size_t vl) { + return __riscv_vsoxei32_v_f64m1(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f64m2(float64_t *base, vuint32m1_t bindex, vfloat64m2_t value, size_t vl) { - return __riscv_vsoxei32_v_f64m2(base, bindex, value, vl); +void test_vsoxei32_v_f64m2(float64_t *rs1, vuint32m1_t rs2, vfloat64m2_t vs3, size_t vl) { + return __riscv_vsoxei32_v_f64m2(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f64m4(float64_t *base, vuint32m2_t bindex, vfloat64m4_t value, size_t vl) { - return __riscv_vsoxei32_v_f64m4(base, bindex, value, vl); +void test_vsoxei32_v_f64m4(float64_t *rs1, vuint32m2_t rs2, vfloat64m4_t vs3, size_t vl) { + return __riscv_vsoxei32_v_f64m4(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f64m8(float64_t *base, vuint32m4_t bindex, vfloat64m8_t value, size_t vl) { - return __riscv_vsoxei32_v_f64m8(base, bindex, value, vl); +void test_vsoxei32_v_f64m8(float64_t *rs1, vuint32m4_t rs2, vfloat64m8_t vs3, size_t vl) { + return __riscv_vsoxei32_v_f64m8(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t value, size_t vl) { - return __riscv_vsoxei32_v_i8mf8(base, bindex, value, vl); +void test_vsoxei32_v_i8mf8(int8_t *rs1, vuint32mf2_t rs2, vint8mf8_t vs3, size_t vl) { + return __riscv_vsoxei32_v_i8mf8(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t value, size_t vl) { - return __riscv_vsoxei32_v_i8mf4(base, bindex, value, vl); +void test_vsoxei32_v_i8mf4(int8_t *rs1, vuint32m1_t rs2, vint8mf4_t vs3, size_t vl) { + return __riscv_vsoxei32_v_i8mf4(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t value, size_t vl) { - return __riscv_vsoxei32_v_i8mf2(base, bindex, value, vl); +void test_vsoxei32_v_i8mf2(int8_t *rs1, vuint32m2_t rs2, vint8mf2_t vs3, size_t vl) { + return __riscv_vsoxei32_v_i8mf2(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t value, size_t vl) { - return __riscv_vsoxei32_v_i8m1(base, bindex, value, vl); +void test_vsoxei32_v_i8m1(int8_t *rs1, vuint32m4_t rs2, vint8m1_t vs3, size_t vl) { + return __riscv_vsoxei32_v_i8m1(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t value, size_t vl) { - return __riscv_vsoxei32_v_i8m2(base, bindex, value, vl); +void test_vsoxei32_v_i8m2(int8_t *rs1, vuint32m8_t rs2, vint8m2_t vs3, size_t vl) { + return __riscv_vsoxei32_v_i8m2(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t value, size_t vl) { - return __riscv_vsoxei32_v_i16mf4(base, bindex, value, vl); +void test_vsoxei32_v_i16mf4(int16_t *rs1, vuint32mf2_t rs2, vint16mf4_t vs3, size_t vl) { + return __riscv_vsoxei32_v_i16mf4(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t value, size_t vl) { - return __riscv_vsoxei32_v_i16mf2(base, bindex, value, vl); +void test_vsoxei32_v_i16mf2(int16_t *rs1, vuint32m1_t rs2, vint16mf2_t vs3, size_t vl) { + return __riscv_vsoxei32_v_i16mf2(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t value, size_t vl) { - return __riscv_vsoxei32_v_i16m1(base, bindex, value, vl); +void test_vsoxei32_v_i16m1(int16_t *rs1, vuint32m2_t rs2, vint16m1_t vs3, size_t vl) { + return __riscv_vsoxei32_v_i16m1(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t value, size_t vl) { - return __riscv_vsoxei32_v_i16m2(base, bindex, value, vl); +void test_vsoxei32_v_i16m2(int16_t *rs1, vuint32m4_t rs2, vint16m2_t vs3, size_t vl) { + return __riscv_vsoxei32_v_i16m2(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i16m4(int16_t *base, vuint32m8_t bindex, vint16m4_t value, size_t vl) { - return __riscv_vsoxei32_v_i16m4(base, bindex, value, vl); +void test_vsoxei32_v_i16m4(int16_t *rs1, vuint32m8_t rs2, vint16m4_t vs3, size_t vl) { + return __riscv_vsoxei32_v_i16m4(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { - return __riscv_vsoxei32_v_i32mf2(base, bindex, value, vl); +void test_vsoxei32_v_i32mf2(int32_t *rs1, vuint32mf2_t rs2, vint32mf2_t vs3, size_t vl) { + return __riscv_vsoxei32_v_i32mf2(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { - return __riscv_vsoxei32_v_i32m1(base, bindex, value, vl); +void test_vsoxei32_v_i32m1(int32_t *rs1, vuint32m1_t rs2, vint32m1_t vs3, size_t vl) { + return __riscv_vsoxei32_v_i32m1(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { - return __riscv_vsoxei32_v_i32m2(base, bindex, value, vl); +void test_vsoxei32_v_i32m2(int32_t *rs1, vuint32m2_t rs2, vint32m2_t vs3, size_t vl) { + return __riscv_vsoxei32_v_i32m2(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i32m4(int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { - return __riscv_vsoxei32_v_i32m4(base, bindex, value, vl); +void test_vsoxei32_v_i32m4(int32_t *rs1, vuint32m4_t rs2, vint32m4_t vs3, size_t vl) { + return __riscv_vsoxei32_v_i32m4(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i32m8(int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { - return __riscv_vsoxei32_v_i32m8(base, bindex, value, vl); +void test_vsoxei32_v_i32m8(int32_t *rs1, vuint32m8_t rs2, vint32m8_t vs3, size_t vl) { + return __riscv_vsoxei32_v_i32m8(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { - return __riscv_vsoxei32_v_i64m1(base, bindex, value, vl); +void test_vsoxei32_v_i64m1(int64_t *rs1, vuint32mf2_t rs2, vint64m1_t vs3, size_t vl) { + return __riscv_vsoxei32_v_i64m1(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { - return __riscv_vsoxei32_v_i64m2(base, bindex, value, vl); +void test_vsoxei32_v_i64m2(int64_t *rs1, vuint32m1_t rs2, vint64m2_t vs3, size_t vl) { + return __riscv_vsoxei32_v_i64m2(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i64m4(int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { - return __riscv_vsoxei32_v_i64m4(base, bindex, value, vl); +void test_vsoxei32_v_i64m4(int64_t *rs1, vuint32m2_t rs2, vint64m4_t vs3, size_t vl) { + return __riscv_vsoxei32_v_i64m4(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i64m8(int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { - return __riscv_vsoxei32_v_i64m8(base, bindex, value, vl); +void test_vsoxei32_v_i64m8(int64_t *rs1, vuint32m4_t rs2, vint64m8_t vs3, size_t vl) { + return __riscv_vsoxei32_v_i64m8(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t value, size_t vl) { - return __riscv_vsoxei32_v_u8mf8(base, bindex, value, vl); +void test_vsoxei32_v_u8mf8(uint8_t *rs1, vuint32mf2_t rs2, vuint8mf8_t vs3, size_t vl) { + return __riscv_vsoxei32_v_u8mf8(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t value, size_t vl) { - return __riscv_vsoxei32_v_u8mf4(base, bindex, value, vl); +void test_vsoxei32_v_u8mf4(uint8_t *rs1, vuint32m1_t rs2, vuint8mf4_t vs3, size_t vl) { + return __riscv_vsoxei32_v_u8mf4(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t value, size_t vl) { - return __riscv_vsoxei32_v_u8mf2(base, bindex, value, vl); +void test_vsoxei32_v_u8mf2(uint8_t *rs1, vuint32m2_t rs2, vuint8mf2_t vs3, size_t vl) { + return __riscv_vsoxei32_v_u8mf2(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t value, size_t vl) { - return __riscv_vsoxei32_v_u8m1(base, bindex, value, vl); +void test_vsoxei32_v_u8m1(uint8_t *rs1, vuint32m4_t rs2, vuint8m1_t vs3, size_t vl) { + return __riscv_vsoxei32_v_u8m1(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t value, size_t vl) { - return __riscv_vsoxei32_v_u8m2(base, bindex, value, vl); +void test_vsoxei32_v_u8m2(uint8_t *rs1, vuint32m8_t rs2, vuint8m2_t vs3, size_t vl) { + return __riscv_vsoxei32_v_u8m2(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t value, size_t vl) { - return __riscv_vsoxei32_v_u16mf4(base, bindex, value, vl); +void test_vsoxei32_v_u16mf4(uint16_t *rs1, vuint32mf2_t rs2, vuint16mf4_t vs3, size_t vl) { + return __riscv_vsoxei32_v_u16mf4(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t value, size_t vl) { - return __riscv_vsoxei32_v_u16mf2(base, bindex, value, vl); +void test_vsoxei32_v_u16mf2(uint16_t *rs1, vuint32m1_t rs2, vuint16mf2_t vs3, size_t vl) { + return __riscv_vsoxei32_v_u16mf2(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t value, size_t vl) { - return __riscv_vsoxei32_v_u16m1(base, bindex, value, vl); +void test_vsoxei32_v_u16m1(uint16_t *rs1, vuint32m2_t rs2, vuint16m1_t vs3, size_t vl) { + return __riscv_vsoxei32_v_u16m1(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t value, size_t vl) { - return __riscv_vsoxei32_v_u16m2(base, bindex, value, vl); +void test_vsoxei32_v_u16m2(uint16_t *rs1, vuint32m4_t rs2, vuint16m2_t vs3, size_t vl) { + return __riscv_vsoxei32_v_u16m2(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u16m4(uint16_t *base, vuint32m8_t bindex, vuint16m4_t value, size_t vl) { - return __riscv_vsoxei32_v_u16m4(base, bindex, value, vl); +void test_vsoxei32_v_u16m4(uint16_t *rs1, vuint32m8_t rs2, vuint16m4_t vs3, size_t vl) { + return __riscv_vsoxei32_v_u16m4(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { - return __riscv_vsoxei32_v_u32mf2(base, bindex, value, vl); +void test_vsoxei32_v_u32mf2(uint32_t *rs1, vuint32mf2_t rs2, vuint32mf2_t vs3, size_t vl) { + return __riscv_vsoxei32_v_u32mf2(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { - return __riscv_vsoxei32_v_u32m1(base, bindex, value, vl); +void test_vsoxei32_v_u32m1(uint32_t *rs1, vuint32m1_t rs2, vuint32m1_t vs3, size_t vl) { + return __riscv_vsoxei32_v_u32m1(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { - return __riscv_vsoxei32_v_u32m2(base, bindex, value, vl); +void test_vsoxei32_v_u32m2(uint32_t *rs1, vuint32m2_t rs2, vuint32m2_t vs3, size_t vl) { + return __riscv_vsoxei32_v_u32m2(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u32m4(uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { - return __riscv_vsoxei32_v_u32m4(base, bindex, value, vl); +void test_vsoxei32_v_u32m4(uint32_t *rs1, vuint32m4_t rs2, vuint32m4_t vs3, size_t vl) { + return __riscv_vsoxei32_v_u32m4(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u32m8(uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { - return __riscv_vsoxei32_v_u32m8(base, bindex, value, vl); +void test_vsoxei32_v_u32m8(uint32_t *rs1, vuint32m8_t rs2, vuint32m8_t vs3, size_t vl) { + return __riscv_vsoxei32_v_u32m8(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { - return __riscv_vsoxei32_v_u64m1(base, bindex, value, vl); +void test_vsoxei32_v_u64m1(uint64_t *rs1, vuint32mf2_t rs2, vuint64m1_t vs3, size_t vl) { + return __riscv_vsoxei32_v_u64m1(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { - return __riscv_vsoxei32_v_u64m2(base, bindex, value, vl); +void test_vsoxei32_v_u64m2(uint64_t *rs1, vuint32m1_t rs2, vuint64m2_t vs3, size_t vl) { + return __riscv_vsoxei32_v_u64m2(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u64m4(uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { - return __riscv_vsoxei32_v_u64m4(base, bindex, value, vl); +void test_vsoxei32_v_u64m4(uint64_t *rs1, vuint32m2_t rs2, vuint64m4_t vs3, size_t vl) { + return __riscv_vsoxei32_v_u64m4(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u64m8(uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { - return __riscv_vsoxei32_v_u64m8(base, bindex, value, vl); +void test_vsoxei32_v_u64m8(uint64_t *rs1, vuint32m4_t rs2, vuint64m8_t vs3, size_t vl) { + return __riscv_vsoxei32_v_u64m8(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f16mf4_m(vbool64_t mask, float16_t *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) { - return __riscv_vsoxei32_v_f16mf4_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_f16mf4_m(vbool64_t vm, float16_t *rs1, vuint32mf2_t rs2, vfloat16mf4_t vs3, size_t vl) { + return __riscv_vsoxei32_v_f16mf4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f16mf2_m(vbool32_t mask, float16_t *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) { - return __riscv_vsoxei32_v_f16mf2_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_f16mf2_m(vbool32_t vm, float16_t *rs1, vuint32m1_t rs2, vfloat16mf2_t vs3, size_t vl) { + return __riscv_vsoxei32_v_f16mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f16m1_m(vbool16_t mask, float16_t *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) { - return __riscv_vsoxei32_v_f16m1_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_f16m1_m(vbool16_t vm, float16_t *rs1, vuint32m2_t rs2, vfloat16m1_t vs3, size_t vl) { + return __riscv_vsoxei32_v_f16m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f16m2_m(vbool8_t mask, float16_t *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) { - return __riscv_vsoxei32_v_f16m2_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_f16m2_m(vbool8_t vm, float16_t *rs1, vuint32m4_t rs2, vfloat16m2_t vs3, size_t vl) { + return __riscv_vsoxei32_v_f16m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f16m4_m(vbool4_t mask, float16_t *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) { - return __riscv_vsoxei32_v_f16m4_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_f16m4_m(vbool4_t vm, float16_t *rs1, vuint32m8_t rs2, vfloat16m4_t vs3, size_t vl) { + return __riscv_vsoxei32_v_f16m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f32mf2_m(vbool64_t mask, float32_t *base, vuint32mf2_t bindex, vfloat32mf2_t value, size_t vl) { - return __riscv_vsoxei32_v_f32mf2_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_f32mf2_m(vbool64_t vm, float32_t *rs1, vuint32mf2_t rs2, vfloat32mf2_t vs3, size_t vl) { + return __riscv_vsoxei32_v_f32mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f32m1_m(vbool32_t mask, float32_t *base, vuint32m1_t bindex, vfloat32m1_t value, size_t vl) { - return __riscv_vsoxei32_v_f32m1_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_f32m1_m(vbool32_t vm, float32_t *rs1, vuint32m1_t rs2, vfloat32m1_t vs3, size_t vl) { + return __riscv_vsoxei32_v_f32m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f32m2_m(vbool16_t mask, float32_t *base, vuint32m2_t bindex, vfloat32m2_t value, size_t vl) { - return __riscv_vsoxei32_v_f32m2_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_f32m2_m(vbool16_t vm, float32_t *rs1, vuint32m2_t rs2, vfloat32m2_t vs3, size_t vl) { + return __riscv_vsoxei32_v_f32m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f32m4_m(vbool8_t mask, float32_t *base, vuint32m4_t bindex, vfloat32m4_t value, size_t vl) { - return __riscv_vsoxei32_v_f32m4_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_f32m4_m(vbool8_t vm, float32_t *rs1, vuint32m4_t rs2, vfloat32m4_t vs3, size_t vl) { + return __riscv_vsoxei32_v_f32m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f32m8_m(vbool4_t mask, float32_t *base, vuint32m8_t bindex, vfloat32m8_t value, size_t vl) { - return __riscv_vsoxei32_v_f32m8_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_f32m8_m(vbool4_t vm, float32_t *rs1, vuint32m8_t rs2, vfloat32m8_t vs3, size_t vl) { + return __riscv_vsoxei32_v_f32m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f64m1_m(vbool64_t mask, float64_t *base, vuint32mf2_t bindex, vfloat64m1_t value, size_t vl) { - return __riscv_vsoxei32_v_f64m1_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_f64m1_m(vbool64_t vm, float64_t *rs1, vuint32mf2_t rs2, vfloat64m1_t vs3, size_t vl) { + return __riscv_vsoxei32_v_f64m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f64m2_m(vbool32_t mask, float64_t *base, vuint32m1_t bindex, vfloat64m2_t value, size_t vl) { - return __riscv_vsoxei32_v_f64m2_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_f64m2_m(vbool32_t vm, float64_t *rs1, vuint32m1_t rs2, vfloat64m2_t vs3, size_t vl) { + return __riscv_vsoxei32_v_f64m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f64m4_m(vbool16_t mask, float64_t *base, vuint32m2_t bindex, vfloat64m4_t value, size_t vl) { - return __riscv_vsoxei32_v_f64m4_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_f64m4_m(vbool16_t vm, float64_t *rs1, vuint32m2_t rs2, vfloat64m4_t vs3, size_t vl) { + return __riscv_vsoxei32_v_f64m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f64m8_m(vbool8_t mask, float64_t *base, vuint32m4_t bindex, vfloat64m8_t value, size_t vl) { - return __riscv_vsoxei32_v_f64m8_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_f64m8_m(vbool8_t vm, float64_t *rs1, vuint32m4_t rs2, vfloat64m8_t vs3, size_t vl) { + return __riscv_vsoxei32_v_f64m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t value, size_t vl) { - return __riscv_vsoxei32_v_i8mf8_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_i8mf8_m(vbool64_t vm, int8_t *rs1, vuint32mf2_t rs2, vint8mf8_t vs3, size_t vl) { + return __riscv_vsoxei32_v_i8mf8_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t value, size_t vl) { - return __riscv_vsoxei32_v_i8mf4_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_i8mf4_m(vbool32_t vm, int8_t *rs1, vuint32m1_t rs2, vint8mf4_t vs3, size_t vl) { + return __riscv_vsoxei32_v_i8mf4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t value, size_t vl) { - return __riscv_vsoxei32_v_i8mf2_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_i8mf2_m(vbool16_t vm, int8_t *rs1, vuint32m2_t rs2, vint8mf2_t vs3, size_t vl) { + return __riscv_vsoxei32_v_i8mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t value, size_t vl) { - return __riscv_vsoxei32_v_i8m1_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_i8m1_m(vbool8_t vm, int8_t *rs1, vuint32m4_t rs2, vint8m1_t vs3, size_t vl) { + return __riscv_vsoxei32_v_i8m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t value, size_t vl) { - return __riscv_vsoxei32_v_i8m2_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_i8m2_m(vbool4_t vm, int8_t *rs1, vuint32m8_t rs2, vint8m2_t vs3, size_t vl) { + return __riscv_vsoxei32_v_i8m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t value, size_t vl) { - return __riscv_vsoxei32_v_i16mf4_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_i16mf4_m(vbool64_t vm, int16_t *rs1, vuint32mf2_t rs2, vint16mf4_t vs3, size_t vl) { + return __riscv_vsoxei32_v_i16mf4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t value, size_t vl) { - return __riscv_vsoxei32_v_i16mf2_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_i16mf2_m(vbool32_t vm, int16_t *rs1, vuint32m1_t rs2, vint16mf2_t vs3, size_t vl) { + return __riscv_vsoxei32_v_i16mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t value, size_t vl) { - return __riscv_vsoxei32_v_i16m1_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_i16m1_m(vbool16_t vm, int16_t *rs1, vuint32m2_t rs2, vint16m1_t vs3, size_t vl) { + return __riscv_vsoxei32_v_i16m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t value, size_t vl) { - return __riscv_vsoxei32_v_i16m2_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_i16m2_m(vbool8_t vm, int16_t *rs1, vuint32m4_t rs2, vint16m2_t vs3, size_t vl) { + return __riscv_vsoxei32_v_i16m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i16m4_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4_t value, size_t vl) { - return __riscv_vsoxei32_v_i16m4_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_i16m4_m(vbool4_t vm, int16_t *rs1, vuint32m8_t rs2, vint16m4_t vs3, size_t vl) { + return __riscv_vsoxei32_v_i16m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { - return __riscv_vsoxei32_v_i32mf2_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_i32mf2_m(vbool64_t vm, int32_t *rs1, vuint32mf2_t rs2, vint32mf2_t vs3, size_t vl) { + return __riscv_vsoxei32_v_i32mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { - return __riscv_vsoxei32_v_i32m1_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_i32m1_m(vbool32_t vm, int32_t *rs1, vuint32m1_t rs2, vint32m1_t vs3, size_t vl) { + return __riscv_vsoxei32_v_i32m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { - return __riscv_vsoxei32_v_i32m2_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_i32m2_m(vbool16_t vm, int32_t *rs1, vuint32m2_t rs2, vint32m2_t vs3, size_t vl) { + return __riscv_vsoxei32_v_i32m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i32m4_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { - return __riscv_vsoxei32_v_i32m4_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_i32m4_m(vbool8_t vm, int32_t *rs1, vuint32m4_t rs2, vint32m4_t vs3, size_t vl) { + return __riscv_vsoxei32_v_i32m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i32m8_m(vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { - return __riscv_vsoxei32_v_i32m8_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_i32m8_m(vbool4_t vm, int32_t *rs1, vuint32m8_t rs2, vint32m8_t vs3, size_t vl) { + return __riscv_vsoxei32_v_i32m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { - return __riscv_vsoxei32_v_i64m1_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_i64m1_m(vbool64_t vm, int64_t *rs1, vuint32mf2_t rs2, vint64m1_t vs3, size_t vl) { + return __riscv_vsoxei32_v_i64m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { - return __riscv_vsoxei32_v_i64m2_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_i64m2_m(vbool32_t vm, int64_t *rs1, vuint32m1_t rs2, vint64m2_t vs3, size_t vl) { + return __riscv_vsoxei32_v_i64m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i64m4_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { - return __riscv_vsoxei32_v_i64m4_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_i64m4_m(vbool16_t vm, int64_t *rs1, vuint32m2_t rs2, vint64m4_t vs3, size_t vl) { + return __riscv_vsoxei32_v_i64m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i64m8_m(vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { - return __riscv_vsoxei32_v_i64m8_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_i64m8_m(vbool8_t vm, int64_t *rs1, vuint32m4_t rs2, vint64m8_t vs3, size_t vl) { + return __riscv_vsoxei32_v_i64m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t value, size_t vl) { - return __riscv_vsoxei32_v_u8mf8_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_u8mf8_m(vbool64_t vm, uint8_t *rs1, vuint32mf2_t rs2, vuint8mf8_t vs3, size_t vl) { + return __riscv_vsoxei32_v_u8mf8_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t value, size_t vl) { - return __riscv_vsoxei32_v_u8mf4_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_u8mf4_m(vbool32_t vm, uint8_t *rs1, vuint32m1_t rs2, vuint8mf4_t vs3, size_t vl) { + return __riscv_vsoxei32_v_u8mf4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t value, size_t vl) { - return __riscv_vsoxei32_v_u8mf2_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_u8mf2_m(vbool16_t vm, uint8_t *rs1, vuint32m2_t rs2, vuint8mf2_t vs3, size_t vl) { + return __riscv_vsoxei32_v_u8mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t value, size_t vl) { - return __riscv_vsoxei32_v_u8m1_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_u8m1_m(vbool8_t vm, uint8_t *rs1, vuint32m4_t rs2, vuint8m1_t vs3, size_t vl) { + return __riscv_vsoxei32_v_u8m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t value, size_t vl) { - return __riscv_vsoxei32_v_u8m2_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_u8m2_m(vbool4_t vm, uint8_t *rs1, vuint32m8_t rs2, vuint8m2_t vs3, size_t vl) { + return __riscv_vsoxei32_v_u8m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t value, size_t vl) { - return __riscv_vsoxei32_v_u16mf4_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_u16mf4_m(vbool64_t vm, uint16_t *rs1, vuint32mf2_t rs2, vuint16mf4_t vs3, size_t vl) { + return __riscv_vsoxei32_v_u16mf4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t value, size_t vl) { - return __riscv_vsoxei32_v_u16mf2_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_u16mf2_m(vbool32_t vm, uint16_t *rs1, vuint32m1_t rs2, vuint16mf2_t vs3, size_t vl) { + return __riscv_vsoxei32_v_u16mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t value, size_t vl) { - return __riscv_vsoxei32_v_u16m1_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_u16m1_m(vbool16_t vm, uint16_t *rs1, vuint32m2_t rs2, vuint16m1_t vs3, size_t vl) { + return __riscv_vsoxei32_v_u16m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t value, size_t vl) { - return __riscv_vsoxei32_v_u16m2_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_u16m2_m(vbool8_t vm, uint16_t *rs1, vuint32m4_t rs2, vuint16m2_t vs3, size_t vl) { + return __riscv_vsoxei32_v_u16m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4_t value, size_t vl) { - return __riscv_vsoxei32_v_u16m4_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_u16m4_m(vbool4_t vm, uint16_t *rs1, vuint32m8_t rs2, vuint16m4_t vs3, size_t vl) { + return __riscv_vsoxei32_v_u16m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { - return __riscv_vsoxei32_v_u32mf2_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_u32mf2_m(vbool64_t vm, uint32_t *rs1, vuint32mf2_t rs2, vuint32mf2_t vs3, size_t vl) { + return __riscv_vsoxei32_v_u32mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { - return __riscv_vsoxei32_v_u32m1_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_u32m1_m(vbool32_t vm, uint32_t *rs1, vuint32m1_t rs2, vuint32m1_t vs3, size_t vl) { + return __riscv_vsoxei32_v_u32m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { - return __riscv_vsoxei32_v_u32m2_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_u32m2_m(vbool16_t vm, uint32_t *rs1, vuint32m2_t rs2, vuint32m2_t vs3, size_t vl) { + return __riscv_vsoxei32_v_u32m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { - return __riscv_vsoxei32_v_u32m4_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_u32m4_m(vbool8_t vm, uint32_t *rs1, vuint32m4_t rs2, vuint32m4_t vs3, size_t vl) { + return __riscv_vsoxei32_v_u32m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { - return __riscv_vsoxei32_v_u32m8_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_u32m8_m(vbool4_t vm, uint32_t *rs1, vuint32m8_t rs2, vuint32m8_t vs3, size_t vl) { + return __riscv_vsoxei32_v_u32m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { - return __riscv_vsoxei32_v_u64m1_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_u64m1_m(vbool64_t vm, uint64_t *rs1, vuint32mf2_t rs2, vuint64m1_t vs3, size_t vl) { + return __riscv_vsoxei32_v_u64m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { - return __riscv_vsoxei32_v_u64m2_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_u64m2_m(vbool32_t vm, uint64_t *rs1, vuint32m1_t rs2, vuint64m2_t vs3, size_t vl) { + return __riscv_vsoxei32_v_u64m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { - return __riscv_vsoxei32_v_u64m4_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_u64m4_m(vbool16_t vm, uint64_t *rs1, vuint32m2_t rs2, vuint64m4_t vs3, size_t vl) { + return __riscv_vsoxei32_v_u64m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { - return __riscv_vsoxei32_v_u64m8_m(mask, base, bindex, value, vl); +void test_vsoxei32_v_u64m8_m(vbool8_t vm, uint64_t *rs1, vuint32m4_t rs2, vuint64m8_t vs3, size_t vl) { + return __riscv_vsoxei32_v_u64m8_m(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxei32\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/gnu-api-tests/vsoxei64.c b/auto-generated/gnu-api-tests/vsoxei64.c index 8ebff036a..e2a38bc7a 100644 --- a/auto-generated/gnu-api-tests/vsoxei64.c +++ b/auto-generated/gnu-api-tests/vsoxei64.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxei64_v_f16mf4(float16_t *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) { - return __riscv_vsoxei64_v_f16mf4(base, bindex, value, vl); +void test_vsoxei64_v_f16mf4(float16_t *rs1, vuint64m1_t rs2, vfloat16mf4_t vs3, size_t vl) { + return __riscv_vsoxei64_v_f16mf4(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_f16mf2(float16_t *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) { - return __riscv_vsoxei64_v_f16mf2(base, bindex, value, vl); +void test_vsoxei64_v_f16mf2(float16_t *rs1, vuint64m2_t rs2, vfloat16mf2_t vs3, size_t vl) { + return __riscv_vsoxei64_v_f16mf2(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_f16m1(float16_t *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) { - return __riscv_vsoxei64_v_f16m1(base, bindex, value, vl); +void test_vsoxei64_v_f16m1(float16_t *rs1, vuint64m4_t rs2, vfloat16m1_t vs3, size_t vl) { + return __riscv_vsoxei64_v_f16m1(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_f16m2(float16_t *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) { - return __riscv_vsoxei64_v_f16m2(base, bindex, value, vl); +void test_vsoxei64_v_f16m2(float16_t *rs1, vuint64m8_t rs2, vfloat16m2_t vs3, size_t vl) { + return __riscv_vsoxei64_v_f16m2(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_f32mf2(float32_t *base, vuint64m1_t bindex, vfloat32mf2_t value, size_t vl) { - return __riscv_vsoxei64_v_f32mf2(base, bindex, value, vl); +void test_vsoxei64_v_f32mf2(float32_t *rs1, vuint64m1_t rs2, vfloat32mf2_t vs3, size_t vl) { + return __riscv_vsoxei64_v_f32mf2(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_f32m1(float32_t *base, vuint64m2_t bindex, vfloat32m1_t value, size_t vl) { - return __riscv_vsoxei64_v_f32m1(base, bindex, value, vl); +void test_vsoxei64_v_f32m1(float32_t *rs1, vuint64m2_t rs2, vfloat32m1_t vs3, size_t vl) { + return __riscv_vsoxei64_v_f32m1(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_f32m2(float32_t *base, vuint64m4_t bindex, vfloat32m2_t value, size_t vl) { - return __riscv_vsoxei64_v_f32m2(base, bindex, value, vl); +void test_vsoxei64_v_f32m2(float32_t *rs1, vuint64m4_t rs2, vfloat32m2_t vs3, size_t vl) { + return __riscv_vsoxei64_v_f32m2(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_f32m4(float32_t *base, vuint64m8_t bindex, vfloat32m4_t value, size_t vl) { - return __riscv_vsoxei64_v_f32m4(base, bindex, value, vl); +void test_vsoxei64_v_f32m4(float32_t *rs1, vuint64m8_t rs2, vfloat32m4_t vs3, size_t vl) { + return __riscv_vsoxei64_v_f32m4(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_f64m1(float64_t *base, vuint64m1_t bindex, vfloat64m1_t value, size_t vl) { - return __riscv_vsoxei64_v_f64m1(base, bindex, value, vl); +void test_vsoxei64_v_f64m1(float64_t *rs1, vuint64m1_t rs2, vfloat64m1_t vs3, size_t vl) { + return __riscv_vsoxei64_v_f64m1(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_f64m2(float64_t *base, vuint64m2_t bindex, vfloat64m2_t value, size_t vl) { - return __riscv_vsoxei64_v_f64m2(base, bindex, value, vl); +void test_vsoxei64_v_f64m2(float64_t *rs1, vuint64m2_t rs2, vfloat64m2_t vs3, size_t vl) { + return __riscv_vsoxei64_v_f64m2(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_f64m4(float64_t *base, vuint64m4_t bindex, vfloat64m4_t value, size_t vl) { - return __riscv_vsoxei64_v_f64m4(base, bindex, value, vl); +void test_vsoxei64_v_f64m4(float64_t *rs1, vuint64m4_t rs2, vfloat64m4_t vs3, size_t vl) { + return __riscv_vsoxei64_v_f64m4(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_f64m8(float64_t *base, vuint64m8_t bindex, vfloat64m8_t value, size_t vl) { - return __riscv_vsoxei64_v_f64m8(base, bindex, value, vl); +void test_vsoxei64_v_f64m8(float64_t *rs1, vuint64m8_t rs2, vfloat64m8_t vs3, size_t vl) { + return __riscv_vsoxei64_v_f64m8(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t value, size_t vl) { - return __riscv_vsoxei64_v_i8mf8(base, bindex, value, vl); +void test_vsoxei64_v_i8mf8(int8_t *rs1, vuint64m1_t rs2, vint8mf8_t vs3, size_t vl) { + return __riscv_vsoxei64_v_i8mf8(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t value, size_t vl) { - return __riscv_vsoxei64_v_i8mf4(base, bindex, value, vl); +void test_vsoxei64_v_i8mf4(int8_t *rs1, vuint64m2_t rs2, vint8mf4_t vs3, size_t vl) { + return __riscv_vsoxei64_v_i8mf4(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t value, size_t vl) { - return __riscv_vsoxei64_v_i8mf2(base, bindex, value, vl); +void test_vsoxei64_v_i8mf2(int8_t *rs1, vuint64m4_t rs2, vint8mf2_t vs3, size_t vl) { + return __riscv_vsoxei64_v_i8mf2(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t value, size_t vl) { - return __riscv_vsoxei64_v_i8m1(base, bindex, value, vl); +void test_vsoxei64_v_i8m1(int8_t *rs1, vuint64m8_t rs2, vint8m1_t vs3, size_t vl) { + return __riscv_vsoxei64_v_i8m1(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t value, size_t vl) { - return __riscv_vsoxei64_v_i16mf4(base, bindex, value, vl); +void test_vsoxei64_v_i16mf4(int16_t *rs1, vuint64m1_t rs2, vint16mf4_t vs3, size_t vl) { + return __riscv_vsoxei64_v_i16mf4(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t value, size_t vl) { - return __riscv_vsoxei64_v_i16mf2(base, bindex, value, vl); +void test_vsoxei64_v_i16mf2(int16_t *rs1, vuint64m2_t rs2, vint16mf2_t vs3, size_t vl) { + return __riscv_vsoxei64_v_i16mf2(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t value, size_t vl) { - return __riscv_vsoxei64_v_i16m1(base, bindex, value, vl); +void test_vsoxei64_v_i16m1(int16_t *rs1, vuint64m4_t rs2, vint16m1_t vs3, size_t vl) { + return __riscv_vsoxei64_v_i16m1(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t value, size_t vl) { - return __riscv_vsoxei64_v_i16m2(base, bindex, value, vl); +void test_vsoxei64_v_i16m2(int16_t *rs1, vuint64m8_t rs2, vint16m2_t vs3, size_t vl) { + return __riscv_vsoxei64_v_i16m2(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { - return __riscv_vsoxei64_v_i32mf2(base, bindex, value, vl); +void test_vsoxei64_v_i32mf2(int32_t *rs1, vuint64m1_t rs2, vint32mf2_t vs3, size_t vl) { + return __riscv_vsoxei64_v_i32mf2(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { - return __riscv_vsoxei64_v_i32m1(base, bindex, value, vl); +void test_vsoxei64_v_i32m1(int32_t *rs1, vuint64m2_t rs2, vint32m1_t vs3, size_t vl) { + return __riscv_vsoxei64_v_i32m1(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { - return __riscv_vsoxei64_v_i32m2(base, bindex, value, vl); +void test_vsoxei64_v_i32m2(int32_t *rs1, vuint64m4_t rs2, vint32m2_t vs3, size_t vl) { + return __riscv_vsoxei64_v_i32m2(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i32m4(int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { - return __riscv_vsoxei64_v_i32m4(base, bindex, value, vl); +void test_vsoxei64_v_i32m4(int32_t *rs1, vuint64m8_t rs2, vint32m4_t vs3, size_t vl) { + return __riscv_vsoxei64_v_i32m4(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { - return __riscv_vsoxei64_v_i64m1(base, bindex, value, vl); +void test_vsoxei64_v_i64m1(int64_t *rs1, vuint64m1_t rs2, vint64m1_t vs3, size_t vl) { + return __riscv_vsoxei64_v_i64m1(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { - return __riscv_vsoxei64_v_i64m2(base, bindex, value, vl); +void test_vsoxei64_v_i64m2(int64_t *rs1, vuint64m2_t rs2, vint64m2_t vs3, size_t vl) { + return __riscv_vsoxei64_v_i64m2(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i64m4(int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { - return __riscv_vsoxei64_v_i64m4(base, bindex, value, vl); +void test_vsoxei64_v_i64m4(int64_t *rs1, vuint64m4_t rs2, vint64m4_t vs3, size_t vl) { + return __riscv_vsoxei64_v_i64m4(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i64m8(int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { - return __riscv_vsoxei64_v_i64m8(base, bindex, value, vl); +void test_vsoxei64_v_i64m8(int64_t *rs1, vuint64m8_t rs2, vint64m8_t vs3, size_t vl) { + return __riscv_vsoxei64_v_i64m8(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value, size_t vl) { - return __riscv_vsoxei64_v_u8mf8(base, bindex, value, vl); +void test_vsoxei64_v_u8mf8(uint8_t *rs1, vuint64m1_t rs2, vuint8mf8_t vs3, size_t vl) { + return __riscv_vsoxei64_v_u8mf8(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value, size_t vl) { - return __riscv_vsoxei64_v_u8mf4(base, bindex, value, vl); +void test_vsoxei64_v_u8mf4(uint8_t *rs1, vuint64m2_t rs2, vuint8mf4_t vs3, size_t vl) { + return __riscv_vsoxei64_v_u8mf4(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value, size_t vl) { - return __riscv_vsoxei64_v_u8mf2(base, bindex, value, vl); +void test_vsoxei64_v_u8mf2(uint8_t *rs1, vuint64m4_t rs2, vuint8mf2_t vs3, size_t vl) { + return __riscv_vsoxei64_v_u8mf2(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t value, size_t vl) { - return __riscv_vsoxei64_v_u8m1(base, bindex, value, vl); +void test_vsoxei64_v_u8m1(uint8_t *rs1, vuint64m8_t rs2, vuint8m1_t vs3, size_t vl) { + return __riscv_vsoxei64_v_u8m1(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t value, size_t vl) { - return __riscv_vsoxei64_v_u16mf4(base, bindex, value, vl); +void test_vsoxei64_v_u16mf4(uint16_t *rs1, vuint64m1_t rs2, vuint16mf4_t vs3, size_t vl) { + return __riscv_vsoxei64_v_u16mf4(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t value, size_t vl) { - return __riscv_vsoxei64_v_u16mf2(base, bindex, value, vl); +void test_vsoxei64_v_u16mf2(uint16_t *rs1, vuint64m2_t rs2, vuint16mf2_t vs3, size_t vl) { + return __riscv_vsoxei64_v_u16mf2(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t value, size_t vl) { - return __riscv_vsoxei64_v_u16m1(base, bindex, value, vl); +void test_vsoxei64_v_u16m1(uint16_t *rs1, vuint64m4_t rs2, vuint16m1_t vs3, size_t vl) { + return __riscv_vsoxei64_v_u16m1(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t value, size_t vl) { - return __riscv_vsoxei64_v_u16m2(base, bindex, value, vl); +void test_vsoxei64_v_u16m2(uint16_t *rs1, vuint64m8_t rs2, vuint16m2_t vs3, size_t vl) { + return __riscv_vsoxei64_v_u16m2(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { - return __riscv_vsoxei64_v_u32mf2(base, bindex, value, vl); +void test_vsoxei64_v_u32mf2(uint32_t *rs1, vuint64m1_t rs2, vuint32mf2_t vs3, size_t vl) { + return __riscv_vsoxei64_v_u32mf2(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { - return __riscv_vsoxei64_v_u32m1(base, bindex, value, vl); +void test_vsoxei64_v_u32m1(uint32_t *rs1, vuint64m2_t rs2, vuint32m1_t vs3, size_t vl) { + return __riscv_vsoxei64_v_u32m1(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { - return __riscv_vsoxei64_v_u32m2(base, bindex, value, vl); +void test_vsoxei64_v_u32m2(uint32_t *rs1, vuint64m4_t rs2, vuint32m2_t vs3, size_t vl) { + return __riscv_vsoxei64_v_u32m2(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u32m4(uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { - return __riscv_vsoxei64_v_u32m4(base, bindex, value, vl); +void test_vsoxei64_v_u32m4(uint32_t *rs1, vuint64m8_t rs2, vuint32m4_t vs3, size_t vl) { + return __riscv_vsoxei64_v_u32m4(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { - return __riscv_vsoxei64_v_u64m1(base, bindex, value, vl); +void test_vsoxei64_v_u64m1(uint64_t *rs1, vuint64m1_t rs2, vuint64m1_t vs3, size_t vl) { + return __riscv_vsoxei64_v_u64m1(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { - return __riscv_vsoxei64_v_u64m2(base, bindex, value, vl); +void test_vsoxei64_v_u64m2(uint64_t *rs1, vuint64m2_t rs2, vuint64m2_t vs3, size_t vl) { + return __riscv_vsoxei64_v_u64m2(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u64m4(uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { - return __riscv_vsoxei64_v_u64m4(base, bindex, value, vl); +void test_vsoxei64_v_u64m4(uint64_t *rs1, vuint64m4_t rs2, vuint64m4_t vs3, size_t vl) { + return __riscv_vsoxei64_v_u64m4(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u64m8(uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { - return __riscv_vsoxei64_v_u64m8(base, bindex, value, vl); +void test_vsoxei64_v_u64m8(uint64_t *rs1, vuint64m8_t rs2, vuint64m8_t vs3, size_t vl) { + return __riscv_vsoxei64_v_u64m8(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_f16mf4_m(vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) { - return __riscv_vsoxei64_v_f16mf4_m(mask, base, bindex, value, vl); +void test_vsoxei64_v_f16mf4_m(vbool64_t vm, float16_t *rs1, vuint64m1_t rs2, vfloat16mf4_t vs3, size_t vl) { + return __riscv_vsoxei64_v_f16mf4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_f16mf2_m(vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) { - return __riscv_vsoxei64_v_f16mf2_m(mask, base, bindex, value, vl); +void test_vsoxei64_v_f16mf2_m(vbool32_t vm, float16_t *rs1, vuint64m2_t rs2, vfloat16mf2_t vs3, size_t vl) { + return __riscv_vsoxei64_v_f16mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_f16m1_m(vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) { - return __riscv_vsoxei64_v_f16m1_m(mask, base, bindex, value, vl); +void test_vsoxei64_v_f16m1_m(vbool16_t vm, float16_t *rs1, vuint64m4_t rs2, vfloat16m1_t vs3, size_t vl) { + return __riscv_vsoxei64_v_f16m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_f16m2_m(vbool8_t mask, float16_t *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) { - return __riscv_vsoxei64_v_f16m2_m(mask, base, bindex, value, vl); +void test_vsoxei64_v_f16m2_m(vbool8_t vm, float16_t *rs1, vuint64m8_t rs2, vfloat16m2_t vs3, size_t vl) { + return __riscv_vsoxei64_v_f16m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_f32mf2_m(vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2_t value, size_t vl) { - return __riscv_vsoxei64_v_f32mf2_m(mask, base, bindex, value, vl); +void test_vsoxei64_v_f32mf2_m(vbool64_t vm, float32_t *rs1, vuint64m1_t rs2, vfloat32mf2_t vs3, size_t vl) { + return __riscv_vsoxei64_v_f32mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_f32m1_m(vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1_t value, size_t vl) { - return __riscv_vsoxei64_v_f32m1_m(mask, base, bindex, value, vl); +void test_vsoxei64_v_f32m1_m(vbool32_t vm, float32_t *rs1, vuint64m2_t rs2, vfloat32m1_t vs3, size_t vl) { + return __riscv_vsoxei64_v_f32m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_f32m2_m(vbool16_t mask, float32_t *base, vuint64m4_t bindex, vfloat32m2_t value, size_t vl) { - return __riscv_vsoxei64_v_f32m2_m(mask, base, bindex, value, vl); +void test_vsoxei64_v_f32m2_m(vbool16_t vm, float32_t *rs1, vuint64m4_t rs2, vfloat32m2_t vs3, size_t vl) { + return __riscv_vsoxei64_v_f32m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_f32m4_m(vbool8_t mask, float32_t *base, vuint64m8_t bindex, vfloat32m4_t value, size_t vl) { - return __riscv_vsoxei64_v_f32m4_m(mask, base, bindex, value, vl); +void test_vsoxei64_v_f32m4_m(vbool8_t vm, float32_t *rs1, vuint64m8_t rs2, vfloat32m4_t vs3, size_t vl) { + return __riscv_vsoxei64_v_f32m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_f64m1_m(vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1_t value, size_t vl) { - return __riscv_vsoxei64_v_f64m1_m(mask, base, bindex, value, vl); +void test_vsoxei64_v_f64m1_m(vbool64_t vm, float64_t *rs1, vuint64m1_t rs2, vfloat64m1_t vs3, size_t vl) { + return __riscv_vsoxei64_v_f64m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_f64m2_m(vbool32_t mask, float64_t *base, vuint64m2_t bindex, vfloat64m2_t value, size_t vl) { - return __riscv_vsoxei64_v_f64m2_m(mask, base, bindex, value, vl); +void test_vsoxei64_v_f64m2_m(vbool32_t vm, float64_t *rs1, vuint64m2_t rs2, vfloat64m2_t vs3, size_t vl) { + return __riscv_vsoxei64_v_f64m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_f64m4_m(vbool16_t mask, float64_t *base, vuint64m4_t bindex, vfloat64m4_t value, size_t vl) { - return __riscv_vsoxei64_v_f64m4_m(mask, base, bindex, value, vl); +void test_vsoxei64_v_f64m4_m(vbool16_t vm, float64_t *rs1, vuint64m4_t rs2, vfloat64m4_t vs3, size_t vl) { + return __riscv_vsoxei64_v_f64m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_f64m8_m(vbool8_t mask, float64_t *base, vuint64m8_t bindex, vfloat64m8_t value, size_t vl) { - return __riscv_vsoxei64_v_f64m8_m(mask, base, bindex, value, vl); +void test_vsoxei64_v_f64m8_m(vbool8_t vm, float64_t *rs1, vuint64m8_t rs2, vfloat64m8_t vs3, size_t vl) { + return __riscv_vsoxei64_v_f64m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t value, size_t vl) { - return __riscv_vsoxei64_v_i8mf8_m(mask, base, bindex, value, vl); +void test_vsoxei64_v_i8mf8_m(vbool64_t vm, int8_t *rs1, vuint64m1_t rs2, vint8mf8_t vs3, size_t vl) { + return __riscv_vsoxei64_v_i8mf8_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t value, size_t vl) { - return __riscv_vsoxei64_v_i8mf4_m(mask, base, bindex, value, vl); +void test_vsoxei64_v_i8mf4_m(vbool32_t vm, int8_t *rs1, vuint64m2_t rs2, vint8mf4_t vs3, size_t vl) { + return __riscv_vsoxei64_v_i8mf4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t value, size_t vl) { - return __riscv_vsoxei64_v_i8mf2_m(mask, base, bindex, value, vl); +void test_vsoxei64_v_i8mf2_m(vbool16_t vm, int8_t *rs1, vuint64m4_t rs2, vint8mf2_t vs3, size_t vl) { + return __riscv_vsoxei64_v_i8mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t value, size_t vl) { - return __riscv_vsoxei64_v_i8m1_m(mask, base, bindex, value, vl); +void test_vsoxei64_v_i8m1_m(vbool8_t vm, int8_t *rs1, vuint64m8_t rs2, vint8m1_t vs3, size_t vl) { + return __riscv_vsoxei64_v_i8m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t value, size_t vl) { - return __riscv_vsoxei64_v_i16mf4_m(mask, base, bindex, value, vl); +void test_vsoxei64_v_i16mf4_m(vbool64_t vm, int16_t *rs1, vuint64m1_t rs2, vint16mf4_t vs3, size_t vl) { + return __riscv_vsoxei64_v_i16mf4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t value, size_t vl) { - return __riscv_vsoxei64_v_i16mf2_m(mask, base, bindex, value, vl); +void test_vsoxei64_v_i16mf2_m(vbool32_t vm, int16_t *rs1, vuint64m2_t rs2, vint16mf2_t vs3, size_t vl) { + return __riscv_vsoxei64_v_i16mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t value, size_t vl) { - return __riscv_vsoxei64_v_i16m1_m(mask, base, bindex, value, vl); +void test_vsoxei64_v_i16m1_m(vbool16_t vm, int16_t *rs1, vuint64m4_t rs2, vint16m1_t vs3, size_t vl) { + return __riscv_vsoxei64_v_i16m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t value, size_t vl) { - return __riscv_vsoxei64_v_i16m2_m(mask, base, bindex, value, vl); +void test_vsoxei64_v_i16m2_m(vbool8_t vm, int16_t *rs1, vuint64m8_t rs2, vint16m2_t vs3, size_t vl) { + return __riscv_vsoxei64_v_i16m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { - return __riscv_vsoxei64_v_i32mf2_m(mask, base, bindex, value, vl); +void test_vsoxei64_v_i32mf2_m(vbool64_t vm, int32_t *rs1, vuint64m1_t rs2, vint32mf2_t vs3, size_t vl) { + return __riscv_vsoxei64_v_i32mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { - return __riscv_vsoxei64_v_i32m1_m(mask, base, bindex, value, vl); +void test_vsoxei64_v_i32m1_m(vbool32_t vm, int32_t *rs1, vuint64m2_t rs2, vint32m1_t vs3, size_t vl) { + return __riscv_vsoxei64_v_i32m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { - return __riscv_vsoxei64_v_i32m2_m(mask, base, bindex, value, vl); +void test_vsoxei64_v_i32m2_m(vbool16_t vm, int32_t *rs1, vuint64m4_t rs2, vint32m2_t vs3, size_t vl) { + return __riscv_vsoxei64_v_i32m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i32m4_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { - return __riscv_vsoxei64_v_i32m4_m(mask, base, bindex, value, vl); +void test_vsoxei64_v_i32m4_m(vbool8_t vm, int32_t *rs1, vuint64m8_t rs2, vint32m4_t vs3, size_t vl) { + return __riscv_vsoxei64_v_i32m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { - return __riscv_vsoxei64_v_i64m1_m(mask, base, bindex, value, vl); +void test_vsoxei64_v_i64m1_m(vbool64_t vm, int64_t *rs1, vuint64m1_t rs2, vint64m1_t vs3, size_t vl) { + return __riscv_vsoxei64_v_i64m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { - return __riscv_vsoxei64_v_i64m2_m(mask, base, bindex, value, vl); +void test_vsoxei64_v_i64m2_m(vbool32_t vm, int64_t *rs1, vuint64m2_t rs2, vint64m2_t vs3, size_t vl) { + return __riscv_vsoxei64_v_i64m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i64m4_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { - return __riscv_vsoxei64_v_i64m4_m(mask, base, bindex, value, vl); +void test_vsoxei64_v_i64m4_m(vbool16_t vm, int64_t *rs1, vuint64m4_t rs2, vint64m4_t vs3, size_t vl) { + return __riscv_vsoxei64_v_i64m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i64m8_m(vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { - return __riscv_vsoxei64_v_i64m8_m(mask, base, bindex, value, vl); +void test_vsoxei64_v_i64m8_m(vbool8_t vm, int64_t *rs1, vuint64m8_t rs2, vint64m8_t vs3, size_t vl) { + return __riscv_vsoxei64_v_i64m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value, size_t vl) { - return __riscv_vsoxei64_v_u8mf8_m(mask, base, bindex, value, vl); +void test_vsoxei64_v_u8mf8_m(vbool64_t vm, uint8_t *rs1, vuint64m1_t rs2, vuint8mf8_t vs3, size_t vl) { + return __riscv_vsoxei64_v_u8mf8_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value, size_t vl) { - return __riscv_vsoxei64_v_u8mf4_m(mask, base, bindex, value, vl); +void test_vsoxei64_v_u8mf4_m(vbool32_t vm, uint8_t *rs1, vuint64m2_t rs2, vuint8mf4_t vs3, size_t vl) { + return __riscv_vsoxei64_v_u8mf4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value, size_t vl) { - return __riscv_vsoxei64_v_u8mf2_m(mask, base, bindex, value, vl); +void test_vsoxei64_v_u8mf2_m(vbool16_t vm, uint8_t *rs1, vuint64m4_t rs2, vuint8mf2_t vs3, size_t vl) { + return __riscv_vsoxei64_v_u8mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t value, size_t vl) { - return __riscv_vsoxei64_v_u8m1_m(mask, base, bindex, value, vl); +void test_vsoxei64_v_u8m1_m(vbool8_t vm, uint8_t *rs1, vuint64m8_t rs2, vuint8m1_t vs3, size_t vl) { + return __riscv_vsoxei64_v_u8m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t value, size_t vl) { - return __riscv_vsoxei64_v_u16mf4_m(mask, base, bindex, value, vl); +void test_vsoxei64_v_u16mf4_m(vbool64_t vm, uint16_t *rs1, vuint64m1_t rs2, vuint16mf4_t vs3, size_t vl) { + return __riscv_vsoxei64_v_u16mf4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t value, size_t vl) { - return __riscv_vsoxei64_v_u16mf2_m(mask, base, bindex, value, vl); +void test_vsoxei64_v_u16mf2_m(vbool32_t vm, uint16_t *rs1, vuint64m2_t rs2, vuint16mf2_t vs3, size_t vl) { + return __riscv_vsoxei64_v_u16mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t value, size_t vl) { - return __riscv_vsoxei64_v_u16m1_m(mask, base, bindex, value, vl); +void test_vsoxei64_v_u16m1_m(vbool16_t vm, uint16_t *rs1, vuint64m4_t rs2, vuint16m1_t vs3, size_t vl) { + return __riscv_vsoxei64_v_u16m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t value, size_t vl) { - return __riscv_vsoxei64_v_u16m2_m(mask, base, bindex, value, vl); +void test_vsoxei64_v_u16m2_m(vbool8_t vm, uint16_t *rs1, vuint64m8_t rs2, vuint16m2_t vs3, size_t vl) { + return __riscv_vsoxei64_v_u16m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { - return __riscv_vsoxei64_v_u32mf2_m(mask, base, bindex, value, vl); +void test_vsoxei64_v_u32mf2_m(vbool64_t vm, uint32_t *rs1, vuint64m1_t rs2, vuint32mf2_t vs3, size_t vl) { + return __riscv_vsoxei64_v_u32mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { - return __riscv_vsoxei64_v_u32m1_m(mask, base, bindex, value, vl); +void test_vsoxei64_v_u32m1_m(vbool32_t vm, uint32_t *rs1, vuint64m2_t rs2, vuint32m1_t vs3, size_t vl) { + return __riscv_vsoxei64_v_u32m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { - return __riscv_vsoxei64_v_u32m2_m(mask, base, bindex, value, vl); +void test_vsoxei64_v_u32m2_m(vbool16_t vm, uint32_t *rs1, vuint64m4_t rs2, vuint32m2_t vs3, size_t vl) { + return __riscv_vsoxei64_v_u32m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { - return __riscv_vsoxei64_v_u32m4_m(mask, base, bindex, value, vl); +void test_vsoxei64_v_u32m4_m(vbool8_t vm, uint32_t *rs1, vuint64m8_t rs2, vuint32m4_t vs3, size_t vl) { + return __riscv_vsoxei64_v_u32m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { - return __riscv_vsoxei64_v_u64m1_m(mask, base, bindex, value, vl); +void test_vsoxei64_v_u64m1_m(vbool64_t vm, uint64_t *rs1, vuint64m1_t rs2, vuint64m1_t vs3, size_t vl) { + return __riscv_vsoxei64_v_u64m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { - return __riscv_vsoxei64_v_u64m2_m(mask, base, bindex, value, vl); +void test_vsoxei64_v_u64m2_m(vbool32_t vm, uint64_t *rs1, vuint64m2_t rs2, vuint64m2_t vs3, size_t vl) { + return __riscv_vsoxei64_v_u64m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { - return __riscv_vsoxei64_v_u64m4_m(mask, base, bindex, value, vl); +void test_vsoxei64_v_u64m4_m(vbool16_t vm, uint64_t *rs1, vuint64m4_t rs2, vuint64m4_t vs3, size_t vl) { + return __riscv_vsoxei64_v_u64m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { - return __riscv_vsoxei64_v_u64m8_m(mask, base, bindex, value, vl); +void test_vsoxei64_v_u64m8_m(vbool8_t vm, uint64_t *rs1, vuint64m8_t rs2, vuint64m8_t vs3, size_t vl) { + return __riscv_vsoxei64_v_u64m8_m(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxei64\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-api-tests/vsoxei8.c b/auto-generated/gnu-api-tests/vsoxei8.c index 443697d9d..c099df63a 100644 --- a/auto-generated/gnu-api-tests/vsoxei8.c +++ b/auto-generated/gnu-api-tests/vsoxei8.c @@ -6,476 +6,476 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxei8_v_f16mf4(float16_t *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) { - return __riscv_vsoxei8_v_f16mf4(base, bindex, value, vl); +void test_vsoxei8_v_f16mf4(float16_t *rs1, vuint8mf8_t rs2, vfloat16mf4_t vs3, size_t vl) { + return __riscv_vsoxei8_v_f16mf4(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f16mf2(float16_t *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) { - return __riscv_vsoxei8_v_f16mf2(base, bindex, value, vl); +void test_vsoxei8_v_f16mf2(float16_t *rs1, vuint8mf4_t rs2, vfloat16mf2_t vs3, size_t vl) { + return __riscv_vsoxei8_v_f16mf2(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f16m1(float16_t *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) { - return __riscv_vsoxei8_v_f16m1(base, bindex, value, vl); +void test_vsoxei8_v_f16m1(float16_t *rs1, vuint8mf2_t rs2, vfloat16m1_t vs3, size_t vl) { + return __riscv_vsoxei8_v_f16m1(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f16m2(float16_t *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) { - return __riscv_vsoxei8_v_f16m2(base, bindex, value, vl); +void test_vsoxei8_v_f16m2(float16_t *rs1, vuint8m1_t rs2, vfloat16m2_t vs3, size_t vl) { + return __riscv_vsoxei8_v_f16m2(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f16m4(float16_t *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) { - return __riscv_vsoxei8_v_f16m4(base, bindex, value, vl); +void test_vsoxei8_v_f16m4(float16_t *rs1, vuint8m2_t rs2, vfloat16m4_t vs3, size_t vl) { + return __riscv_vsoxei8_v_f16m4(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f16m8(float16_t *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) { - return __riscv_vsoxei8_v_f16m8(base, bindex, value, vl); +void test_vsoxei8_v_f16m8(float16_t *rs1, vuint8m4_t rs2, vfloat16m8_t vs3, size_t vl) { + return __riscv_vsoxei8_v_f16m8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f32mf2(float32_t *base, vuint8mf8_t bindex, vfloat32mf2_t value, size_t vl) { - return __riscv_vsoxei8_v_f32mf2(base, bindex, value, vl); +void test_vsoxei8_v_f32mf2(float32_t *rs1, vuint8mf8_t rs2, vfloat32mf2_t vs3, size_t vl) { + return __riscv_vsoxei8_v_f32mf2(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f32m1(float32_t *base, vuint8mf4_t bindex, vfloat32m1_t value, size_t vl) { - return __riscv_vsoxei8_v_f32m1(base, bindex, value, vl); +void test_vsoxei8_v_f32m1(float32_t *rs1, vuint8mf4_t rs2, vfloat32m1_t vs3, size_t vl) { + return __riscv_vsoxei8_v_f32m1(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f32m2(float32_t *base, vuint8mf2_t bindex, vfloat32m2_t value, size_t vl) { - return __riscv_vsoxei8_v_f32m2(base, bindex, value, vl); +void test_vsoxei8_v_f32m2(float32_t *rs1, vuint8mf2_t rs2, vfloat32m2_t vs3, size_t vl) { + return __riscv_vsoxei8_v_f32m2(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f32m4(float32_t *base, vuint8m1_t bindex, vfloat32m4_t value, size_t vl) { - return __riscv_vsoxei8_v_f32m4(base, bindex, value, vl); +void test_vsoxei8_v_f32m4(float32_t *rs1, vuint8m1_t rs2, vfloat32m4_t vs3, size_t vl) { + return __riscv_vsoxei8_v_f32m4(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f32m8(float32_t *base, vuint8m2_t bindex, vfloat32m8_t value, size_t vl) { - return __riscv_vsoxei8_v_f32m8(base, bindex, value, vl); +void test_vsoxei8_v_f32m8(float32_t *rs1, vuint8m2_t rs2, vfloat32m8_t vs3, size_t vl) { + return __riscv_vsoxei8_v_f32m8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f64m1(float64_t *base, vuint8mf8_t bindex, vfloat64m1_t value, size_t vl) { - return __riscv_vsoxei8_v_f64m1(base, bindex, value, vl); +void test_vsoxei8_v_f64m1(float64_t *rs1, vuint8mf8_t rs2, vfloat64m1_t vs3, size_t vl) { + return __riscv_vsoxei8_v_f64m1(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f64m2(float64_t *base, vuint8mf4_t bindex, vfloat64m2_t value, size_t vl) { - return __riscv_vsoxei8_v_f64m2(base, bindex, value, vl); +void test_vsoxei8_v_f64m2(float64_t *rs1, vuint8mf4_t rs2, vfloat64m2_t vs3, size_t vl) { + return __riscv_vsoxei8_v_f64m2(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f64m4(float64_t *base, vuint8mf2_t bindex, vfloat64m4_t value, size_t vl) { - return __riscv_vsoxei8_v_f64m4(base, bindex, value, vl); +void test_vsoxei8_v_f64m4(float64_t *rs1, vuint8mf2_t rs2, vfloat64m4_t vs3, size_t vl) { + return __riscv_vsoxei8_v_f64m4(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f64m8(float64_t *base, vuint8m1_t bindex, vfloat64m8_t value, size_t vl) { - return __riscv_vsoxei8_v_f64m8(base, bindex, value, vl); +void test_vsoxei8_v_f64m8(float64_t *rs1, vuint8m1_t rs2, vfloat64m8_t vs3, size_t vl) { + return __riscv_vsoxei8_v_f64m8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t value, size_t vl) { - return __riscv_vsoxei8_v_i8mf8(base, bindex, value, vl); +void test_vsoxei8_v_i8mf8(int8_t *rs1, vuint8mf8_t rs2, vint8mf8_t vs3, size_t vl) { + return __riscv_vsoxei8_v_i8mf8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t value, size_t vl) { - return __riscv_vsoxei8_v_i8mf4(base, bindex, value, vl); +void test_vsoxei8_v_i8mf4(int8_t *rs1, vuint8mf4_t rs2, vint8mf4_t vs3, size_t vl) { + return __riscv_vsoxei8_v_i8mf4(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t value, size_t vl) { - return __riscv_vsoxei8_v_i8mf2(base, bindex, value, vl); +void test_vsoxei8_v_i8mf2(int8_t *rs1, vuint8mf2_t rs2, vint8mf2_t vs3, size_t vl) { + return __riscv_vsoxei8_v_i8mf2(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t value, size_t vl) { - return __riscv_vsoxei8_v_i8m1(base, bindex, value, vl); +void test_vsoxei8_v_i8m1(int8_t *rs1, vuint8m1_t rs2, vint8m1_t vs3, size_t vl) { + return __riscv_vsoxei8_v_i8m1(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t value, size_t vl) { - return __riscv_vsoxei8_v_i8m2(base, bindex, value, vl); +void test_vsoxei8_v_i8m2(int8_t *rs1, vuint8m2_t rs2, vint8m2_t vs3, size_t vl) { + return __riscv_vsoxei8_v_i8m2(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i8m4(int8_t *base, vuint8m4_t bindex, vint8m4_t value, size_t vl) { - return __riscv_vsoxei8_v_i8m4(base, bindex, value, vl); +void test_vsoxei8_v_i8m4(int8_t *rs1, vuint8m4_t rs2, vint8m4_t vs3, size_t vl) { + return __riscv_vsoxei8_v_i8m4(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i8m8(int8_t *base, vuint8m8_t bindex, vint8m8_t value, size_t vl) { - return __riscv_vsoxei8_v_i8m8(base, bindex, value, vl); +void test_vsoxei8_v_i8m8(int8_t *rs1, vuint8m8_t rs2, vint8m8_t vs3, size_t vl) { + return __riscv_vsoxei8_v_i8m8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t value, size_t vl) { - return __riscv_vsoxei8_v_i16mf4(base, bindex, value, vl); +void test_vsoxei8_v_i16mf4(int16_t *rs1, vuint8mf8_t rs2, vint16mf4_t vs3, size_t vl) { + return __riscv_vsoxei8_v_i16mf4(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t value, size_t vl) { - return __riscv_vsoxei8_v_i16mf2(base, bindex, value, vl); +void test_vsoxei8_v_i16mf2(int16_t *rs1, vuint8mf4_t rs2, vint16mf2_t vs3, size_t vl) { + return __riscv_vsoxei8_v_i16mf2(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t value, size_t vl) { - return __riscv_vsoxei8_v_i16m1(base, bindex, value, vl); +void test_vsoxei8_v_i16m1(int16_t *rs1, vuint8mf2_t rs2, vint16m1_t vs3, size_t vl) { + return __riscv_vsoxei8_v_i16m1(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t value, size_t vl) { - return __riscv_vsoxei8_v_i16m2(base, bindex, value, vl); +void test_vsoxei8_v_i16m2(int16_t *rs1, vuint8m1_t rs2, vint16m2_t vs3, size_t vl) { + return __riscv_vsoxei8_v_i16m2(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i16m4(int16_t *base, vuint8m2_t bindex, vint16m4_t value, size_t vl) { - return __riscv_vsoxei8_v_i16m4(base, bindex, value, vl); +void test_vsoxei8_v_i16m4(int16_t *rs1, vuint8m2_t rs2, vint16m4_t vs3, size_t vl) { + return __riscv_vsoxei8_v_i16m4(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i16m8(int16_t *base, vuint8m4_t bindex, vint16m8_t value, size_t vl) { - return __riscv_vsoxei8_v_i16m8(base, bindex, value, vl); +void test_vsoxei8_v_i16m8(int16_t *rs1, vuint8m4_t rs2, vint16m8_t vs3, size_t vl) { + return __riscv_vsoxei8_v_i16m8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { - return __riscv_vsoxei8_v_i32mf2(base, bindex, value, vl); +void test_vsoxei8_v_i32mf2(int32_t *rs1, vuint8mf8_t rs2, vint32mf2_t vs3, size_t vl) { + return __riscv_vsoxei8_v_i32mf2(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { - return __riscv_vsoxei8_v_i32m1(base, bindex, value, vl); +void test_vsoxei8_v_i32m1(int32_t *rs1, vuint8mf4_t rs2, vint32m1_t vs3, size_t vl) { + return __riscv_vsoxei8_v_i32m1(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { - return __riscv_vsoxei8_v_i32m2(base, bindex, value, vl); +void test_vsoxei8_v_i32m2(int32_t *rs1, vuint8mf2_t rs2, vint32m2_t vs3, size_t vl) { + return __riscv_vsoxei8_v_i32m2(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i32m4(int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { - return __riscv_vsoxei8_v_i32m4(base, bindex, value, vl); +void test_vsoxei8_v_i32m4(int32_t *rs1, vuint8m1_t rs2, vint32m4_t vs3, size_t vl) { + return __riscv_vsoxei8_v_i32m4(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i32m8(int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { - return __riscv_vsoxei8_v_i32m8(base, bindex, value, vl); +void test_vsoxei8_v_i32m8(int32_t *rs1, vuint8m2_t rs2, vint32m8_t vs3, size_t vl) { + return __riscv_vsoxei8_v_i32m8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { - return __riscv_vsoxei8_v_i64m1(base, bindex, value, vl); +void test_vsoxei8_v_i64m1(int64_t *rs1, vuint8mf8_t rs2, vint64m1_t vs3, size_t vl) { + return __riscv_vsoxei8_v_i64m1(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { - return __riscv_vsoxei8_v_i64m2(base, bindex, value, vl); +void test_vsoxei8_v_i64m2(int64_t *rs1, vuint8mf4_t rs2, vint64m2_t vs3, size_t vl) { + return __riscv_vsoxei8_v_i64m2(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { - return __riscv_vsoxei8_v_i64m4(base, bindex, value, vl); +void test_vsoxei8_v_i64m4(int64_t *rs1, vuint8mf2_t rs2, vint64m4_t vs3, size_t vl) { + return __riscv_vsoxei8_v_i64m4(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i64m8(int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { - return __riscv_vsoxei8_v_i64m8(base, bindex, value, vl); +void test_vsoxei8_v_i64m8(int64_t *rs1, vuint8m1_t rs2, vint64m8_t vs3, size_t vl) { + return __riscv_vsoxei8_v_i64m8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value, size_t vl) { - return __riscv_vsoxei8_v_u8mf8(base, bindex, value, vl); +void test_vsoxei8_v_u8mf8(uint8_t *rs1, vuint8mf8_t rs2, vuint8mf8_t vs3, size_t vl) { + return __riscv_vsoxei8_v_u8mf8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value, size_t vl) { - return __riscv_vsoxei8_v_u8mf4(base, bindex, value, vl); +void test_vsoxei8_v_u8mf4(uint8_t *rs1, vuint8mf4_t rs2, vuint8mf4_t vs3, size_t vl) { + return __riscv_vsoxei8_v_u8mf4(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value, size_t vl) { - return __riscv_vsoxei8_v_u8mf2(base, bindex, value, vl); +void test_vsoxei8_v_u8mf2(uint8_t *rs1, vuint8mf2_t rs2, vuint8mf2_t vs3, size_t vl) { + return __riscv_vsoxei8_v_u8mf2(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t value, size_t vl) { - return __riscv_vsoxei8_v_u8m1(base, bindex, value, vl); +void test_vsoxei8_v_u8m1(uint8_t *rs1, vuint8m1_t rs2, vuint8m1_t vs3, size_t vl) { + return __riscv_vsoxei8_v_u8m1(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t value, size_t vl) { - return __riscv_vsoxei8_v_u8m2(base, bindex, value, vl); +void test_vsoxei8_v_u8m2(uint8_t *rs1, vuint8m2_t rs2, vuint8m2_t vs3, size_t vl) { + return __riscv_vsoxei8_v_u8m2(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u8m4(uint8_t *base, vuint8m4_t bindex, vuint8m4_t value, size_t vl) { - return __riscv_vsoxei8_v_u8m4(base, bindex, value, vl); +void test_vsoxei8_v_u8m4(uint8_t *rs1, vuint8m4_t rs2, vuint8m4_t vs3, size_t vl) { + return __riscv_vsoxei8_v_u8m4(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u8m8(uint8_t *base, vuint8m8_t bindex, vuint8m8_t value, size_t vl) { - return __riscv_vsoxei8_v_u8m8(base, bindex, value, vl); +void test_vsoxei8_v_u8m8(uint8_t *rs1, vuint8m8_t rs2, vuint8m8_t vs3, size_t vl) { + return __riscv_vsoxei8_v_u8m8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t value, size_t vl) { - return __riscv_vsoxei8_v_u16mf4(base, bindex, value, vl); +void test_vsoxei8_v_u16mf4(uint16_t *rs1, vuint8mf8_t rs2, vuint16mf4_t vs3, size_t vl) { + return __riscv_vsoxei8_v_u16mf4(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t value, size_t vl) { - return __riscv_vsoxei8_v_u16mf2(base, bindex, value, vl); +void test_vsoxei8_v_u16mf2(uint16_t *rs1, vuint8mf4_t rs2, vuint16mf2_t vs3, size_t vl) { + return __riscv_vsoxei8_v_u16mf2(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value, size_t vl) { - return __riscv_vsoxei8_v_u16m1(base, bindex, value, vl); +void test_vsoxei8_v_u16m1(uint16_t *rs1, vuint8mf2_t rs2, vuint16m1_t vs3, size_t vl) { + return __riscv_vsoxei8_v_u16m1(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t value, size_t vl) { - return __riscv_vsoxei8_v_u16m2(base, bindex, value, vl); +void test_vsoxei8_v_u16m2(uint16_t *rs1, vuint8m1_t rs2, vuint16m2_t vs3, size_t vl) { + return __riscv_vsoxei8_v_u16m2(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u16m4(uint16_t *base, vuint8m2_t bindex, vuint16m4_t value, size_t vl) { - return __riscv_vsoxei8_v_u16m4(base, bindex, value, vl); +void test_vsoxei8_v_u16m4(uint16_t *rs1, vuint8m2_t rs2, vuint16m4_t vs3, size_t vl) { + return __riscv_vsoxei8_v_u16m4(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u16m8(uint16_t *base, vuint8m4_t bindex, vuint16m8_t value, size_t vl) { - return __riscv_vsoxei8_v_u16m8(base, bindex, value, vl); +void test_vsoxei8_v_u16m8(uint16_t *rs1, vuint8m4_t rs2, vuint16m8_t vs3, size_t vl) { + return __riscv_vsoxei8_v_u16m8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { - return __riscv_vsoxei8_v_u32mf2(base, bindex, value, vl); +void test_vsoxei8_v_u32mf2(uint32_t *rs1, vuint8mf8_t rs2, vuint32mf2_t vs3, size_t vl) { + return __riscv_vsoxei8_v_u32mf2(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { - return __riscv_vsoxei8_v_u32m1(base, bindex, value, vl); +void test_vsoxei8_v_u32m1(uint32_t *rs1, vuint8mf4_t rs2, vuint32m1_t vs3, size_t vl) { + return __riscv_vsoxei8_v_u32m1(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { - return __riscv_vsoxei8_v_u32m2(base, bindex, value, vl); +void test_vsoxei8_v_u32m2(uint32_t *rs1, vuint8mf2_t rs2, vuint32m2_t vs3, size_t vl) { + return __riscv_vsoxei8_v_u32m2(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { - return __riscv_vsoxei8_v_u32m4(base, bindex, value, vl); +void test_vsoxei8_v_u32m4(uint32_t *rs1, vuint8m1_t rs2, vuint32m4_t vs3, size_t vl) { + return __riscv_vsoxei8_v_u32m4(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u32m8(uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { - return __riscv_vsoxei8_v_u32m8(base, bindex, value, vl); +void test_vsoxei8_v_u32m8(uint32_t *rs1, vuint8m2_t rs2, vuint32m8_t vs3, size_t vl) { + return __riscv_vsoxei8_v_u32m8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { - return __riscv_vsoxei8_v_u64m1(base, bindex, value, vl); +void test_vsoxei8_v_u64m1(uint64_t *rs1, vuint8mf8_t rs2, vuint64m1_t vs3, size_t vl) { + return __riscv_vsoxei8_v_u64m1(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { - return __riscv_vsoxei8_v_u64m2(base, bindex, value, vl); +void test_vsoxei8_v_u64m2(uint64_t *rs1, vuint8mf4_t rs2, vuint64m2_t vs3, size_t vl) { + return __riscv_vsoxei8_v_u64m2(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { - return __riscv_vsoxei8_v_u64m4(base, bindex, value, vl); +void test_vsoxei8_v_u64m4(uint64_t *rs1, vuint8mf2_t rs2, vuint64m4_t vs3, size_t vl) { + return __riscv_vsoxei8_v_u64m4(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u64m8(uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { - return __riscv_vsoxei8_v_u64m8(base, bindex, value, vl); +void test_vsoxei8_v_u64m8(uint64_t *rs1, vuint8m1_t rs2, vuint64m8_t vs3, size_t vl) { + return __riscv_vsoxei8_v_u64m8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f16mf4_m(vbool64_t mask, float16_t *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) { - return __riscv_vsoxei8_v_f16mf4_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_f16mf4_m(vbool64_t vm, float16_t *rs1, vuint8mf8_t rs2, vfloat16mf4_t vs3, size_t vl) { + return __riscv_vsoxei8_v_f16mf4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f16mf2_m(vbool32_t mask, float16_t *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) { - return __riscv_vsoxei8_v_f16mf2_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_f16mf2_m(vbool32_t vm, float16_t *rs1, vuint8mf4_t rs2, vfloat16mf2_t vs3, size_t vl) { + return __riscv_vsoxei8_v_f16mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f16m1_m(vbool16_t mask, float16_t *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) { - return __riscv_vsoxei8_v_f16m1_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_f16m1_m(vbool16_t vm, float16_t *rs1, vuint8mf2_t rs2, vfloat16m1_t vs3, size_t vl) { + return __riscv_vsoxei8_v_f16m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f16m2_m(vbool8_t mask, float16_t *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) { - return __riscv_vsoxei8_v_f16m2_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_f16m2_m(vbool8_t vm, float16_t *rs1, vuint8m1_t rs2, vfloat16m2_t vs3, size_t vl) { + return __riscv_vsoxei8_v_f16m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f16m4_m(vbool4_t mask, float16_t *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) { - return __riscv_vsoxei8_v_f16m4_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_f16m4_m(vbool4_t vm, float16_t *rs1, vuint8m2_t rs2, vfloat16m4_t vs3, size_t vl) { + return __riscv_vsoxei8_v_f16m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f16m8_m(vbool2_t mask, float16_t *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) { - return __riscv_vsoxei8_v_f16m8_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_f16m8_m(vbool2_t vm, float16_t *rs1, vuint8m4_t rs2, vfloat16m8_t vs3, size_t vl) { + return __riscv_vsoxei8_v_f16m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f32mf2_m(vbool64_t mask, float32_t *base, vuint8mf8_t bindex, vfloat32mf2_t value, size_t vl) { - return __riscv_vsoxei8_v_f32mf2_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_f32mf2_m(vbool64_t vm, float32_t *rs1, vuint8mf8_t rs2, vfloat32mf2_t vs3, size_t vl) { + return __riscv_vsoxei8_v_f32mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f32m1_m(vbool32_t mask, float32_t *base, vuint8mf4_t bindex, vfloat32m1_t value, size_t vl) { - return __riscv_vsoxei8_v_f32m1_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_f32m1_m(vbool32_t vm, float32_t *rs1, vuint8mf4_t rs2, vfloat32m1_t vs3, size_t vl) { + return __riscv_vsoxei8_v_f32m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f32m2_m(vbool16_t mask, float32_t *base, vuint8mf2_t bindex, vfloat32m2_t value, size_t vl) { - return __riscv_vsoxei8_v_f32m2_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_f32m2_m(vbool16_t vm, float32_t *rs1, vuint8mf2_t rs2, vfloat32m2_t vs3, size_t vl) { + return __riscv_vsoxei8_v_f32m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f32m4_m(vbool8_t mask, float32_t *base, vuint8m1_t bindex, vfloat32m4_t value, size_t vl) { - return __riscv_vsoxei8_v_f32m4_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_f32m4_m(vbool8_t vm, float32_t *rs1, vuint8m1_t rs2, vfloat32m4_t vs3, size_t vl) { + return __riscv_vsoxei8_v_f32m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f32m8_m(vbool4_t mask, float32_t *base, vuint8m2_t bindex, vfloat32m8_t value, size_t vl) { - return __riscv_vsoxei8_v_f32m8_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_f32m8_m(vbool4_t vm, float32_t *rs1, vuint8m2_t rs2, vfloat32m8_t vs3, size_t vl) { + return __riscv_vsoxei8_v_f32m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f64m1_m(vbool64_t mask, float64_t *base, vuint8mf8_t bindex, vfloat64m1_t value, size_t vl) { - return __riscv_vsoxei8_v_f64m1_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_f64m1_m(vbool64_t vm, float64_t *rs1, vuint8mf8_t rs2, vfloat64m1_t vs3, size_t vl) { + return __riscv_vsoxei8_v_f64m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f64m2_m(vbool32_t mask, float64_t *base, vuint8mf4_t bindex, vfloat64m2_t value, size_t vl) { - return __riscv_vsoxei8_v_f64m2_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_f64m2_m(vbool32_t vm, float64_t *rs1, vuint8mf4_t rs2, vfloat64m2_t vs3, size_t vl) { + return __riscv_vsoxei8_v_f64m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f64m4_m(vbool16_t mask, float64_t *base, vuint8mf2_t bindex, vfloat64m4_t value, size_t vl) { - return __riscv_vsoxei8_v_f64m4_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_f64m4_m(vbool16_t vm, float64_t *rs1, vuint8mf2_t rs2, vfloat64m4_t vs3, size_t vl) { + return __riscv_vsoxei8_v_f64m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f64m8_m(vbool8_t mask, float64_t *base, vuint8m1_t bindex, vfloat64m8_t value, size_t vl) { - return __riscv_vsoxei8_v_f64m8_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_f64m8_m(vbool8_t vm, float64_t *rs1, vuint8m1_t rs2, vfloat64m8_t vs3, size_t vl) { + return __riscv_vsoxei8_v_f64m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t value, size_t vl) { - return __riscv_vsoxei8_v_i8mf8_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_i8mf8_m(vbool64_t vm, int8_t *rs1, vuint8mf8_t rs2, vint8mf8_t vs3, size_t vl) { + return __riscv_vsoxei8_v_i8mf8_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t value, size_t vl) { - return __riscv_vsoxei8_v_i8mf4_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_i8mf4_m(vbool32_t vm, int8_t *rs1, vuint8mf4_t rs2, vint8mf4_t vs3, size_t vl) { + return __riscv_vsoxei8_v_i8mf4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t value, size_t vl) { - return __riscv_vsoxei8_v_i8mf2_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_i8mf2_m(vbool16_t vm, int8_t *rs1, vuint8mf2_t rs2, vint8mf2_t vs3, size_t vl) { + return __riscv_vsoxei8_v_i8mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t value, size_t vl) { - return __riscv_vsoxei8_v_i8m1_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_i8m1_m(vbool8_t vm, int8_t *rs1, vuint8m1_t rs2, vint8m1_t vs3, size_t vl) { + return __riscv_vsoxei8_v_i8m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t value, size_t vl) { - return __riscv_vsoxei8_v_i8m2_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_i8m2_m(vbool4_t vm, int8_t *rs1, vuint8m2_t rs2, vint8m2_t vs3, size_t vl) { + return __riscv_vsoxei8_v_i8m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i8m4_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4_t value, size_t vl) { - return __riscv_vsoxei8_v_i8m4_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_i8m4_m(vbool2_t vm, int8_t *rs1, vuint8m4_t rs2, vint8m4_t vs3, size_t vl) { + return __riscv_vsoxei8_v_i8m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i8m8_m(vbool1_t mask, int8_t *base, vuint8m8_t bindex, vint8m8_t value, size_t vl) { - return __riscv_vsoxei8_v_i8m8_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_i8m8_m(vbool1_t vm, int8_t *rs1, vuint8m8_t rs2, vint8m8_t vs3, size_t vl) { + return __riscv_vsoxei8_v_i8m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t value, size_t vl) { - return __riscv_vsoxei8_v_i16mf4_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_i16mf4_m(vbool64_t vm, int16_t *rs1, vuint8mf8_t rs2, vint16mf4_t vs3, size_t vl) { + return __riscv_vsoxei8_v_i16mf4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t value, size_t vl) { - return __riscv_vsoxei8_v_i16mf2_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_i16mf2_m(vbool32_t vm, int16_t *rs1, vuint8mf4_t rs2, vint16mf2_t vs3, size_t vl) { + return __riscv_vsoxei8_v_i16mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t value, size_t vl) { - return __riscv_vsoxei8_v_i16m1_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_i16m1_m(vbool16_t vm, int16_t *rs1, vuint8mf2_t rs2, vint16m1_t vs3, size_t vl) { + return __riscv_vsoxei8_v_i16m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t value, size_t vl) { - return __riscv_vsoxei8_v_i16m2_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_i16m2_m(vbool8_t vm, int16_t *rs1, vuint8m1_t rs2, vint16m2_t vs3, size_t vl) { + return __riscv_vsoxei8_v_i16m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i16m4_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4_t value, size_t vl) { - return __riscv_vsoxei8_v_i16m4_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_i16m4_m(vbool4_t vm, int16_t *rs1, vuint8m2_t rs2, vint16m4_t vs3, size_t vl) { + return __riscv_vsoxei8_v_i16m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i16m8_m(vbool2_t mask, int16_t *base, vuint8m4_t bindex, vint16m8_t value, size_t vl) { - return __riscv_vsoxei8_v_i16m8_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_i16m8_m(vbool2_t vm, int16_t *rs1, vuint8m4_t rs2, vint16m8_t vs3, size_t vl) { + return __riscv_vsoxei8_v_i16m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { - return __riscv_vsoxei8_v_i32mf2_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_i32mf2_m(vbool64_t vm, int32_t *rs1, vuint8mf8_t rs2, vint32mf2_t vs3, size_t vl) { + return __riscv_vsoxei8_v_i32mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { - return __riscv_vsoxei8_v_i32m1_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_i32m1_m(vbool32_t vm, int32_t *rs1, vuint8mf4_t rs2, vint32m1_t vs3, size_t vl) { + return __riscv_vsoxei8_v_i32m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { - return __riscv_vsoxei8_v_i32m2_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_i32m2_m(vbool16_t vm, int32_t *rs1, vuint8mf2_t rs2, vint32m2_t vs3, size_t vl) { + return __riscv_vsoxei8_v_i32m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i32m4_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { - return __riscv_vsoxei8_v_i32m4_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_i32m4_m(vbool8_t vm, int32_t *rs1, vuint8m1_t rs2, vint32m4_t vs3, size_t vl) { + return __riscv_vsoxei8_v_i32m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i32m8_m(vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { - return __riscv_vsoxei8_v_i32m8_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_i32m8_m(vbool4_t vm, int32_t *rs1, vuint8m2_t rs2, vint32m8_t vs3, size_t vl) { + return __riscv_vsoxei8_v_i32m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { - return __riscv_vsoxei8_v_i64m1_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_i64m1_m(vbool64_t vm, int64_t *rs1, vuint8mf8_t rs2, vint64m1_t vs3, size_t vl) { + return __riscv_vsoxei8_v_i64m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { - return __riscv_vsoxei8_v_i64m2_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_i64m2_m(vbool32_t vm, int64_t *rs1, vuint8mf4_t rs2, vint64m2_t vs3, size_t vl) { + return __riscv_vsoxei8_v_i64m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i64m4_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { - return __riscv_vsoxei8_v_i64m4_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_i64m4_m(vbool16_t vm, int64_t *rs1, vuint8mf2_t rs2, vint64m4_t vs3, size_t vl) { + return __riscv_vsoxei8_v_i64m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i64m8_m(vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { - return __riscv_vsoxei8_v_i64m8_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_i64m8_m(vbool8_t vm, int64_t *rs1, vuint8m1_t rs2, vint64m8_t vs3, size_t vl) { + return __riscv_vsoxei8_v_i64m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value, size_t vl) { - return __riscv_vsoxei8_v_u8mf8_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_u8mf8_m(vbool64_t vm, uint8_t *rs1, vuint8mf8_t rs2, vuint8mf8_t vs3, size_t vl) { + return __riscv_vsoxei8_v_u8mf8_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value, size_t vl) { - return __riscv_vsoxei8_v_u8mf4_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_u8mf4_m(vbool32_t vm, uint8_t *rs1, vuint8mf4_t rs2, vuint8mf4_t vs3, size_t vl) { + return __riscv_vsoxei8_v_u8mf4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value, size_t vl) { - return __riscv_vsoxei8_v_u8mf2_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_u8mf2_m(vbool16_t vm, uint8_t *rs1, vuint8mf2_t rs2, vuint8mf2_t vs3, size_t vl) { + return __riscv_vsoxei8_v_u8mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t value, size_t vl) { - return __riscv_vsoxei8_v_u8m1_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_u8m1_m(vbool8_t vm, uint8_t *rs1, vuint8m1_t rs2, vuint8m1_t vs3, size_t vl) { + return __riscv_vsoxei8_v_u8m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t value, size_t vl) { - return __riscv_vsoxei8_v_u8m2_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_u8m2_m(vbool4_t vm, uint8_t *rs1, vuint8m2_t rs2, vuint8m2_t vs3, size_t vl) { + return __riscv_vsoxei8_v_u8m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4_t value, size_t vl) { - return __riscv_vsoxei8_v_u8m4_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_u8m4_m(vbool2_t vm, uint8_t *rs1, vuint8m4_t rs2, vuint8m4_t vs3, size_t vl) { + return __riscv_vsoxei8_v_u8m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u8m8_m(vbool1_t mask, uint8_t *base, vuint8m8_t bindex, vuint8m8_t value, size_t vl) { - return __riscv_vsoxei8_v_u8m8_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_u8m8_m(vbool1_t vm, uint8_t *rs1, vuint8m8_t rs2, vuint8m8_t vs3, size_t vl) { + return __riscv_vsoxei8_v_u8m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t value, size_t vl) { - return __riscv_vsoxei8_v_u16mf4_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_u16mf4_m(vbool64_t vm, uint16_t *rs1, vuint8mf8_t rs2, vuint16mf4_t vs3, size_t vl) { + return __riscv_vsoxei8_v_u16mf4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t value, size_t vl) { - return __riscv_vsoxei8_v_u16mf2_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_u16mf2_m(vbool32_t vm, uint16_t *rs1, vuint8mf4_t rs2, vuint16mf2_t vs3, size_t vl) { + return __riscv_vsoxei8_v_u16mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value, size_t vl) { - return __riscv_vsoxei8_v_u16m1_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_u16m1_m(vbool16_t vm, uint16_t *rs1, vuint8mf2_t rs2, vuint16m1_t vs3, size_t vl) { + return __riscv_vsoxei8_v_u16m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t value, size_t vl) { - return __riscv_vsoxei8_v_u16m2_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_u16m2_m(vbool8_t vm, uint16_t *rs1, vuint8m1_t rs2, vuint16m2_t vs3, size_t vl) { + return __riscv_vsoxei8_v_u16m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4_t value, size_t vl) { - return __riscv_vsoxei8_v_u16m4_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_u16m4_m(vbool4_t vm, uint16_t *rs1, vuint8m2_t rs2, vuint16m4_t vs3, size_t vl) { + return __riscv_vsoxei8_v_u16m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint8m4_t bindex, vuint16m8_t value, size_t vl) { - return __riscv_vsoxei8_v_u16m8_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_u16m8_m(vbool2_t vm, uint16_t *rs1, vuint8m4_t rs2, vuint16m8_t vs3, size_t vl) { + return __riscv_vsoxei8_v_u16m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { - return __riscv_vsoxei8_v_u32mf2_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_u32mf2_m(vbool64_t vm, uint32_t *rs1, vuint8mf8_t rs2, vuint32mf2_t vs3, size_t vl) { + return __riscv_vsoxei8_v_u32mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { - return __riscv_vsoxei8_v_u32m1_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_u32m1_m(vbool32_t vm, uint32_t *rs1, vuint8mf4_t rs2, vuint32m1_t vs3, size_t vl) { + return __riscv_vsoxei8_v_u32m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { - return __riscv_vsoxei8_v_u32m2_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_u32m2_m(vbool16_t vm, uint32_t *rs1, vuint8mf2_t rs2, vuint32m2_t vs3, size_t vl) { + return __riscv_vsoxei8_v_u32m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { - return __riscv_vsoxei8_v_u32m4_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_u32m4_m(vbool8_t vm, uint32_t *rs1, vuint8m1_t rs2, vuint32m4_t vs3, size_t vl) { + return __riscv_vsoxei8_v_u32m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { - return __riscv_vsoxei8_v_u32m8_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_u32m8_m(vbool4_t vm, uint32_t *rs1, vuint8m2_t rs2, vuint32m8_t vs3, size_t vl) { + return __riscv_vsoxei8_v_u32m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { - return __riscv_vsoxei8_v_u64m1_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_u64m1_m(vbool64_t vm, uint64_t *rs1, vuint8mf8_t rs2, vuint64m1_t vs3, size_t vl) { + return __riscv_vsoxei8_v_u64m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { - return __riscv_vsoxei8_v_u64m2_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_u64m2_m(vbool32_t vm, uint64_t *rs1, vuint8mf4_t rs2, vuint64m2_t vs3, size_t vl) { + return __riscv_vsoxei8_v_u64m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { - return __riscv_vsoxei8_v_u64m4_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_u64m4_m(vbool16_t vm, uint64_t *rs1, vuint8mf2_t rs2, vuint64m4_t vs3, size_t vl) { + return __riscv_vsoxei8_v_u64m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { - return __riscv_vsoxei8_v_u64m8_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_u64m8_m(vbool8_t vm, uint64_t *rs1, vuint8m1_t rs2, vuint64m8_t vs3, size_t vl) { + return __riscv_vsoxei8_v_u64m8_m(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxei8\.[ivxfswum.]+\s+} 118 } } */ diff --git a/auto-generated/gnu-api-tests/vsoxseg2ei16.c b/auto-generated/gnu-api-tests/vsoxseg2ei16.c index c76b9cdfb..8e3d7d13a 100644 --- a/auto-generated/gnu-api-tests/vsoxseg2ei16.c +++ b/auto-generated/gnu-api-tests/vsoxseg2ei16.c @@ -6,388 +6,388 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg2ei16_v_f16mf4x2(float16_t *base, vuint16mf4_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_f16mf4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f16mf4x2(float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_f16mf4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_f16mf2x2(float16_t *base, vuint16mf2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_f16mf2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f16mf2x2(float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_f16mf2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_f16m1x2(float16_t *base, vuint16m1_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_f16m1x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f16m1x2(float16_t *rs1, vuint16m1_t vs2, vfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_f16m1x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_f16m2x2(float16_t *base, vuint16m2_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_f16m2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f16m2x2(float16_t *rs1, vuint16m2_t vs2, vfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_f16m2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_f16m4x2(float16_t *base, vuint16m4_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_f16m4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f16m4x2(float16_t *rs1, vuint16m4_t vs2, vfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_f16m4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_f32mf2x2(float32_t *base, vuint16mf4_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_f32mf2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f32mf2x2(float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_f32mf2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_f32m1x2(float32_t *base, vuint16mf2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_f32m1x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f32m1x2(float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_f32m1x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_f32m2x2(float32_t *base, vuint16m1_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_f32m2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f32m2x2(float32_t *rs1, vuint16m1_t vs2, vfloat32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_f32m2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_f32m4x2(float32_t *base, vuint16m2_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_f32m4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f32m4x2(float32_t *rs1, vuint16m2_t vs2, vfloat32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_f32m4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_f64m1x2(float64_t *base, vuint16mf4_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_f64m1x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f64m1x2(float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_f64m1x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_f64m2x2(float64_t *base, vuint16mf2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_f64m2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f64m2x2(float64_t *rs1, vuint16mf2_t vs2, vfloat64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_f64m2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_f64m4x2(float64_t *base, vuint16m1_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_f64m4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f64m4x2(float64_t *rs1, vuint16m1_t vs2, vfloat64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_f64m4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i8mf8x2(int8_t *base, vuint16mf4_t bindex, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_i8mf8x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i8mf8x2(int8_t *rs1, vuint16mf4_t vs2, vint8mf8x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_i8mf8x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i8mf4x2(int8_t *base, vuint16mf2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_i8mf4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i8mf4x2(int8_t *rs1, vuint16mf2_t vs2, vint8mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_i8mf4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i8mf2x2(int8_t *base, vuint16m1_t bindex, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_i8mf2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i8mf2x2(int8_t *rs1, vuint16m1_t vs2, vint8mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_i8mf2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i8m1x2(int8_t *base, vuint16m2_t bindex, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_i8m1x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i8m1x2(int8_t *rs1, vuint16m2_t vs2, vint8m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_i8m1x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i8m2x2(int8_t *base, vuint16m4_t bindex, vint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_i8m2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i8m2x2(int8_t *rs1, vuint16m4_t vs2, vint8m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_i8m2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i8m4x2(int8_t *base, vuint16m8_t bindex, vint8m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_i8m4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i8m4x2(int8_t *rs1, vuint16m8_t vs2, vint8m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_i8m4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i16mf4x2(int16_t *base, vuint16mf4_t bindex, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_i16mf4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i16mf4x2(int16_t *rs1, vuint16mf4_t vs2, vint16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_i16mf4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i16mf2x2(int16_t *base, vuint16mf2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_i16mf2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i16mf2x2(int16_t *rs1, vuint16mf2_t vs2, vint16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_i16mf2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i16m1x2(int16_t *base, vuint16m1_t bindex, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_i16m1x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i16m1x2(int16_t *rs1, vuint16m1_t vs2, vint16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_i16m1x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i16m2x2(int16_t *base, vuint16m2_t bindex, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_i16m2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i16m2x2(int16_t *rs1, vuint16m2_t vs2, vint16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_i16m2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i16m4x2(int16_t *base, vuint16m4_t bindex, vint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_i16m4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i16m4x2(int16_t *rs1, vuint16m4_t vs2, vint16m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_i16m4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i32mf2x2(int32_t *base, vuint16mf4_t bindex, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_i32mf2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i32mf2x2(int32_t *rs1, vuint16mf4_t vs2, vint32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_i32mf2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i32m1x2(int32_t *base, vuint16mf2_t bindex, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_i32m1x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i32m1x2(int32_t *rs1, vuint16mf2_t vs2, vint32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_i32m1x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i32m2x2(int32_t *base, vuint16m1_t bindex, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_i32m2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i32m2x2(int32_t *rs1, vuint16m1_t vs2, vint32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_i32m2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i32m4x2(int32_t *base, vuint16m2_t bindex, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_i32m4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i32m4x2(int32_t *rs1, vuint16m2_t vs2, vint32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_i32m4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i64m1x2(int64_t *base, vuint16mf4_t bindex, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_i64m1x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i64m1x2(int64_t *rs1, vuint16mf4_t vs2, vint64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_i64m1x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i64m2x2(int64_t *base, vuint16mf2_t bindex, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_i64m2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i64m2x2(int64_t *rs1, vuint16mf2_t vs2, vint64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_i64m2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i64m4x2(int64_t *base, vuint16m1_t bindex, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_i64m4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i64m4x2(int64_t *rs1, vuint16m1_t vs2, vint64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_i64m4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u8mf8x2(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_u8mf8x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u8mf8x2(uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_u8mf8x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u8mf4x2(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_u8mf4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u8mf4x2(uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_u8mf4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u8mf2x2(uint8_t *base, vuint16m1_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_u8mf2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u8mf2x2(uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_u8mf2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u8m1x2(uint8_t *base, vuint16m2_t bindex, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_u8m1x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u8m1x2(uint8_t *rs1, vuint16m2_t vs2, vuint8m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_u8m1x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u8m2x2(uint8_t *base, vuint16m4_t bindex, vuint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_u8m2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u8m2x2(uint8_t *rs1, vuint16m4_t vs2, vuint8m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_u8m2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u8m4x2(uint8_t *base, vuint16m8_t bindex, vuint8m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_u8m4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u8m4x2(uint8_t *rs1, vuint16m8_t vs2, vuint8m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_u8m4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u16mf4x2(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_u16mf4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u16mf4x2(uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_u16mf4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u16mf2x2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_u16mf2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u16mf2x2(uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_u16mf2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u16m1x2(uint16_t *base, vuint16m1_t bindex, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_u16m1x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u16m1x2(uint16_t *rs1, vuint16m1_t vs2, vuint16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_u16m1x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u16m2x2(uint16_t *base, vuint16m2_t bindex, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_u16m2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u16m2x2(uint16_t *rs1, vuint16m2_t vs2, vuint16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_u16m2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u16m4x2(uint16_t *base, vuint16m4_t bindex, vuint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_u16m4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u16m4x2(uint16_t *rs1, vuint16m4_t vs2, vuint16m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_u16m4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u32mf2x2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_u32mf2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u32mf2x2(uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_u32mf2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u32m1x2(uint32_t *base, vuint16mf2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_u32m1x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u32m1x2(uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_u32m1x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u32m2x2(uint32_t *base, vuint16m1_t bindex, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_u32m2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u32m2x2(uint32_t *rs1, vuint16m1_t vs2, vuint32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_u32m2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u32m4x2(uint32_t *base, vuint16m2_t bindex, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_u32m4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u32m4x2(uint32_t *rs1, vuint16m2_t vs2, vuint32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_u32m4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u64m1x2(uint64_t *base, vuint16mf4_t bindex, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_u64m1x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u64m1x2(uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_u64m1x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u64m2x2(uint64_t *base, vuint16mf2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_u64m2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u64m2x2(uint64_t *rs1, vuint16mf2_t vs2, vuint64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_u64m2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u64m4x2(uint64_t *base, vuint16m1_t bindex, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_u64m4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u64m4x2(uint64_t *rs1, vuint16m1_t vs2, vuint64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_u64m4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_f16mf4x2_m(vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_f16mf4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f16mf4x2_m(vbool64_t vm, float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_f16mf4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_f16mf2x2_m(vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_f16mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f16mf2x2_m(vbool32_t vm, float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_f16mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_f16m1x2_m(vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_f16m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f16m1x2_m(vbool16_t vm, float16_t *rs1, vuint16m1_t vs2, vfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_f16m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_f16m2x2_m(vbool8_t mask, float16_t *base, vuint16m2_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_f16m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f16m2x2_m(vbool8_t vm, float16_t *rs1, vuint16m2_t vs2, vfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_f16m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_f16m4x2_m(vbool4_t mask, float16_t *base, vuint16m4_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_f16m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f16m4x2_m(vbool4_t vm, float16_t *rs1, vuint16m4_t vs2, vfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_f16m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_f32mf2x2_m(vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_f32mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f32mf2x2_m(vbool64_t vm, float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_f32mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_f32m1x2_m(vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_f32m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f32m1x2_m(vbool32_t vm, float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_f32m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_f32m2x2_m(vbool16_t mask, float32_t *base, vuint16m1_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_f32m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f32m2x2_m(vbool16_t vm, float32_t *rs1, vuint16m1_t vs2, vfloat32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_f32m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_f32m4x2_m(vbool8_t mask, float32_t *base, vuint16m2_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_f32m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f32m4x2_m(vbool8_t vm, float32_t *rs1, vuint16m2_t vs2, vfloat32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_f32m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_f64m1x2_m(vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_f64m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f64m1x2_m(vbool64_t vm, float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_f64m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_f64m2x2_m(vbool32_t mask, float64_t *base, vuint16mf2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_f64m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f64m2x2_m(vbool32_t vm, float64_t *rs1, vuint16mf2_t vs2, vfloat64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_f64m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_f64m4x2_m(vbool16_t mask, float64_t *base, vuint16m1_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_f64m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f64m4x2_m(vbool16_t vm, float64_t *rs1, vuint16m1_t vs2, vfloat64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_f64m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_i8mf8x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i8mf8x2_m(vbool64_t vm, int8_t *rs1, vuint16mf4_t vs2, vint8mf8x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_i8mf8x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_i8mf4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i8mf4x2_m(vbool32_t vm, int8_t *rs1, vuint16mf2_t vs2, vint8mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_i8mf4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_i8mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i8mf2x2_m(vbool16_t vm, int8_t *rs1, vuint16m1_t vs2, vint8mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_i8mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_i8m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i8m1x2_m(vbool8_t vm, int8_t *rs1, vuint16m2_t vs2, vint8m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_i8m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_i8m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i8m2x2_m(vbool4_t vm, int8_t *rs1, vuint16m4_t vs2, vint8m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_i8m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_i8m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i8m4x2_m(vbool2_t vm, int8_t *rs1, vuint16m8_t vs2, vint8m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_i8m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_i16mf4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i16mf4x2_m(vbool64_t vm, int16_t *rs1, vuint16mf4_t vs2, vint16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_i16mf4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_i16mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i16mf2x2_m(vbool32_t vm, int16_t *rs1, vuint16mf2_t vs2, vint16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_i16mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_i16m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i16m1x2_m(vbool16_t vm, int16_t *rs1, vuint16m1_t vs2, vint16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_i16m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_i16m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i16m2x2_m(vbool8_t vm, int16_t *rs1, vuint16m2_t vs2, vint16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_i16m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_i16m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i16m4x2_m(vbool4_t vm, int16_t *rs1, vuint16m4_t vs2, vint16m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_i16m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_i32mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i32mf2x2_m(vbool64_t vm, int32_t *rs1, vuint16mf4_t vs2, vint32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_i32mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_i32m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i32m1x2_m(vbool32_t vm, int32_t *rs1, vuint16mf2_t vs2, vint32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_i32m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_i32m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i32m2x2_m(vbool16_t vm, int32_t *rs1, vuint16m1_t vs2, vint32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_i32m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_i32m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i32m4x2_m(vbool8_t vm, int32_t *rs1, vuint16m2_t vs2, vint32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_i32m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_i64m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i64m1x2_m(vbool64_t vm, int64_t *rs1, vuint16mf4_t vs2, vint64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_i64m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_i64m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i64m2x2_m(vbool32_t vm, int64_t *rs1, vuint16mf2_t vs2, vint64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_i64m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_i64m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i64m4x2_m(vbool16_t vm, int64_t *rs1, vuint16m1_t vs2, vint64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_i64m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_u8mf8x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u8mf8x2_m(vbool64_t vm, uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_u8mf8x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_u8mf4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u8mf4x2_m(vbool32_t vm, uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_u8mf4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_u8mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u8mf2x2_m(vbool16_t vm, uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_u8mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_u8m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u8m1x2_m(vbool8_t vm, uint8_t *rs1, vuint16m2_t vs2, vuint8m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_u8m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_u8m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u8m2x2_m(vbool4_t vm, uint8_t *rs1, vuint16m4_t vs2, vuint8m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_u8m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_u8m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u8m4x2_m(vbool2_t vm, uint8_t *rs1, vuint16m8_t vs2, vuint8m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_u8m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_u16mf4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u16mf4x2_m(vbool64_t vm, uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_u16mf4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_u16mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u16mf2x2_m(vbool32_t vm, uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_u16mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_u16m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u16m1x2_m(vbool16_t vm, uint16_t *rs1, vuint16m1_t vs2, vuint16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_u16m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_u16m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u16m2x2_m(vbool8_t vm, uint16_t *rs1, vuint16m2_t vs2, vuint16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_u16m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_u16m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u16m4x2_m(vbool4_t vm, uint16_t *rs1, vuint16m4_t vs2, vuint16m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_u16m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_u32mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u32mf2x2_m(vbool64_t vm, uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_u32mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_u32m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u32m1x2_m(vbool32_t vm, uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_u32m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_u32m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u32m2x2_m(vbool16_t vm, uint32_t *rs1, vuint16m1_t vs2, vuint32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_u32m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_u32m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u32m4x2_m(vbool8_t vm, uint32_t *rs1, vuint16m2_t vs2, vuint32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_u32m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_u64m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u64m1x2_m(vbool64_t vm, uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_u64m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_u64m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u64m2x2_m(vbool32_t vm, uint64_t *rs1, vuint16mf2_t vs2, vuint64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_u64m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16_v_u64m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u64m4x2_m(vbool16_t vm, uint64_t *rs1, vuint16m1_t vs2, vuint64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_u64m4x2_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg2ei16\.[ivxfswum.]+\s+} 96 } } */ diff --git a/auto-generated/gnu-api-tests/vsoxseg2ei32.c b/auto-generated/gnu-api-tests/vsoxseg2ei32.c index 626a5a3b8..f707cf116 100644 --- a/auto-generated/gnu-api-tests/vsoxseg2ei32.c +++ b/auto-generated/gnu-api-tests/vsoxseg2ei32.c @@ -6,372 +6,372 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg2ei32_v_f16mf4x2(float16_t *base, vuint32mf2_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_f16mf4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f16mf4x2(float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_f16mf4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_f16mf2x2(float16_t *base, vuint32m1_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_f16mf2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f16mf2x2(float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_f16mf2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_f16m1x2(float16_t *base, vuint32m2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_f16m1x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f16m1x2(float16_t *rs1, vuint32m2_t vs2, vfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_f16m1x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_f16m2x2(float16_t *base, vuint32m4_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_f16m2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f16m2x2(float16_t *rs1, vuint32m4_t vs2, vfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_f16m2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_f16m4x2(float16_t *base, vuint32m8_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_f16m4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f16m4x2(float16_t *rs1, vuint32m8_t vs2, vfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_f16m4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_f32mf2x2(float32_t *base, vuint32mf2_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_f32mf2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f32mf2x2(float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_f32mf2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_f32m1x2(float32_t *base, vuint32m1_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_f32m1x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f32m1x2(float32_t *rs1, vuint32m1_t vs2, vfloat32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_f32m1x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_f32m2x2(float32_t *base, vuint32m2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_f32m2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f32m2x2(float32_t *rs1, vuint32m2_t vs2, vfloat32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_f32m2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_f32m4x2(float32_t *base, vuint32m4_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_f32m4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f32m4x2(float32_t *rs1, vuint32m4_t vs2, vfloat32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_f32m4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_f64m1x2(float64_t *base, vuint32mf2_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_f64m1x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f64m1x2(float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_f64m1x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_f64m2x2(float64_t *base, vuint32m1_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_f64m2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f64m2x2(float64_t *rs1, vuint32m1_t vs2, vfloat64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_f64m2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_f64m4x2(float64_t *base, vuint32m2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_f64m4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f64m4x2(float64_t *rs1, vuint32m2_t vs2, vfloat64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_f64m4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i8mf8x2(int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_i8mf8x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i8mf8x2(int8_t *rs1, vuint32mf2_t vs2, vint8mf8x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_i8mf8x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i8mf4x2(int8_t *base, vuint32m1_t bindex, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_i8mf4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i8mf4x2(int8_t *rs1, vuint32m1_t vs2, vint8mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_i8mf4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i8mf2x2(int8_t *base, vuint32m2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_i8mf2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i8mf2x2(int8_t *rs1, vuint32m2_t vs2, vint8mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_i8mf2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i8m1x2(int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_i8m1x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i8m1x2(int8_t *rs1, vuint32m4_t vs2, vint8m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_i8m1x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i8m2x2(int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_i8m2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i8m2x2(int8_t *rs1, vuint32m8_t vs2, vint8m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_i8m2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i16mf4x2(int16_t *base, vuint32mf2_t bindex, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_i16mf4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i16mf4x2(int16_t *rs1, vuint32mf2_t vs2, vint16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_i16mf4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i16mf2x2(int16_t *base, vuint32m1_t bindex, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_i16mf2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i16mf2x2(int16_t *rs1, vuint32m1_t vs2, vint16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_i16mf2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i16m1x2(int16_t *base, vuint32m2_t bindex, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_i16m1x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i16m1x2(int16_t *rs1, vuint32m2_t vs2, vint16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_i16m1x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i16m2x2(int16_t *base, vuint32m4_t bindex, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_i16m2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i16m2x2(int16_t *rs1, vuint32m4_t vs2, vint16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_i16m2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i16m4x2(int16_t *base, vuint32m8_t bindex, vint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_i16m4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i16m4x2(int16_t *rs1, vuint32m8_t vs2, vint16m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_i16m4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i32mf2x2(int32_t *base, vuint32mf2_t bindex, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_i32mf2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i32mf2x2(int32_t *rs1, vuint32mf2_t vs2, vint32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_i32mf2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i32m1x2(int32_t *base, vuint32m1_t bindex, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_i32m1x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i32m1x2(int32_t *rs1, vuint32m1_t vs2, vint32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_i32m1x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i32m2x2(int32_t *base, vuint32m2_t bindex, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_i32m2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i32m2x2(int32_t *rs1, vuint32m2_t vs2, vint32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_i32m2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i32m4x2(int32_t *base, vuint32m4_t bindex, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_i32m4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i32m4x2(int32_t *rs1, vuint32m4_t vs2, vint32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_i32m4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i64m1x2(int64_t *base, vuint32mf2_t bindex, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_i64m1x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i64m1x2(int64_t *rs1, vuint32mf2_t vs2, vint64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_i64m1x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i64m2x2(int64_t *base, vuint32m1_t bindex, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_i64m2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i64m2x2(int64_t *rs1, vuint32m1_t vs2, vint64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_i64m2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i64m4x2(int64_t *base, vuint32m2_t bindex, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_i64m4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i64m4x2(int64_t *rs1, vuint32m2_t vs2, vint64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_i64m4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u8mf8x2(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_u8mf8x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u8mf8x2(uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_u8mf8x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u8mf4x2(uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_u8mf4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u8mf4x2(uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_u8mf4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u8mf2x2(uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_u8mf2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u8mf2x2(uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_u8mf2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u8m1x2(uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_u8m1x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u8m1x2(uint8_t *rs1, vuint32m4_t vs2, vuint8m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_u8m1x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u8m2x2(uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_u8m2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u8m2x2(uint8_t *rs1, vuint32m8_t vs2, vuint8m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_u8m2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u16mf4x2(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_u16mf4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u16mf4x2(uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_u16mf4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u16mf2x2(uint16_t *base, vuint32m1_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_u16mf2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u16mf2x2(uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_u16mf2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u16m1x2(uint16_t *base, vuint32m2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_u16m1x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u16m1x2(uint16_t *rs1, vuint32m2_t vs2, vuint16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_u16m1x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u16m2x2(uint16_t *base, vuint32m4_t bindex, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_u16m2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u16m2x2(uint16_t *rs1, vuint32m4_t vs2, vuint16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_u16m2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u16m4x2(uint16_t *base, vuint32m8_t bindex, vuint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_u16m4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u16m4x2(uint16_t *rs1, vuint32m8_t vs2, vuint16m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_u16m4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u32mf2x2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_u32mf2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u32mf2x2(uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_u32mf2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u32m1x2(uint32_t *base, vuint32m1_t bindex, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_u32m1x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u32m1x2(uint32_t *rs1, vuint32m1_t vs2, vuint32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_u32m1x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u32m2x2(uint32_t *base, vuint32m2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_u32m2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u32m2x2(uint32_t *rs1, vuint32m2_t vs2, vuint32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_u32m2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u32m4x2(uint32_t *base, vuint32m4_t bindex, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_u32m4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u32m4x2(uint32_t *rs1, vuint32m4_t vs2, vuint32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_u32m4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u64m1x2(uint64_t *base, vuint32mf2_t bindex, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_u64m1x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u64m1x2(uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_u64m1x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u64m2x2(uint64_t *base, vuint32m1_t bindex, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_u64m2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u64m2x2(uint64_t *rs1, vuint32m1_t vs2, vuint64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_u64m2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u64m4x2(uint64_t *base, vuint32m2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_u64m4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u64m4x2(uint64_t *rs1, vuint32m2_t vs2, vuint64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_u64m4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_f16mf4x2_m(vbool64_t mask, float16_t *base, vuint32mf2_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_f16mf4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f16mf4x2_m(vbool64_t vm, float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_f16mf4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_f16mf2x2_m(vbool32_t mask, float16_t *base, vuint32m1_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_f16mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f16mf2x2_m(vbool32_t vm, float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_f16mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_f16m1x2_m(vbool16_t mask, float16_t *base, vuint32m2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_f16m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f16m1x2_m(vbool16_t vm, float16_t *rs1, vuint32m2_t vs2, vfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_f16m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_f16m2x2_m(vbool8_t mask, float16_t *base, vuint32m4_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_f16m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f16m2x2_m(vbool8_t vm, float16_t *rs1, vuint32m4_t vs2, vfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_f16m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_f16m4x2_m(vbool4_t mask, float16_t *base, vuint32m8_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_f16m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f16m4x2_m(vbool4_t vm, float16_t *rs1, vuint32m8_t vs2, vfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_f16m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_f32mf2x2_m(vbool64_t mask, float32_t *base, vuint32mf2_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_f32mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f32mf2x2_m(vbool64_t vm, float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_f32mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_f32m1x2_m(vbool32_t mask, float32_t *base, vuint32m1_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_f32m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f32m1x2_m(vbool32_t vm, float32_t *rs1, vuint32m1_t vs2, vfloat32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_f32m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_f32m2x2_m(vbool16_t mask, float32_t *base, vuint32m2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_f32m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f32m2x2_m(vbool16_t vm, float32_t *rs1, vuint32m2_t vs2, vfloat32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_f32m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_f32m4x2_m(vbool8_t mask, float32_t *base, vuint32m4_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_f32m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f32m4x2_m(vbool8_t vm, float32_t *rs1, vuint32m4_t vs2, vfloat32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_f32m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_f64m1x2_m(vbool64_t mask, float64_t *base, vuint32mf2_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_f64m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f64m1x2_m(vbool64_t vm, float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_f64m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_f64m2x2_m(vbool32_t mask, float64_t *base, vuint32m1_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_f64m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f64m2x2_m(vbool32_t vm, float64_t *rs1, vuint32m1_t vs2, vfloat64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_f64m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_f64m4x2_m(vbool16_t mask, float64_t *base, vuint32m2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_f64m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f64m4x2_m(vbool16_t vm, float64_t *rs1, vuint32m2_t vs2, vfloat64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_f64m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_i8mf8x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i8mf8x2_m(vbool64_t vm, int8_t *rs1, vuint32mf2_t vs2, vint8mf8x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_i8mf8x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_i8mf4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i8mf4x2_m(vbool32_t vm, int8_t *rs1, vuint32m1_t vs2, vint8mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_i8mf4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_i8mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i8mf2x2_m(vbool16_t vm, int8_t *rs1, vuint32m2_t vs2, vint8mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_i8mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_i8m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i8m1x2_m(vbool8_t vm, int8_t *rs1, vuint32m4_t vs2, vint8m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_i8m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_i8m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i8m2x2_m(vbool4_t vm, int8_t *rs1, vuint32m8_t vs2, vint8m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_i8m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_i16mf4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i16mf4x2_m(vbool64_t vm, int16_t *rs1, vuint32mf2_t vs2, vint16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_i16mf4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_i16mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i16mf2x2_m(vbool32_t vm, int16_t *rs1, vuint32m1_t vs2, vint16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_i16mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_i16m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i16m1x2_m(vbool16_t vm, int16_t *rs1, vuint32m2_t vs2, vint16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_i16m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_i16m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i16m2x2_m(vbool8_t vm, int16_t *rs1, vuint32m4_t vs2, vint16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_i16m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_i16m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i16m4x2_m(vbool4_t vm, int16_t *rs1, vuint32m8_t vs2, vint16m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_i16m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_i32mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i32mf2x2_m(vbool64_t vm, int32_t *rs1, vuint32mf2_t vs2, vint32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_i32mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_i32m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i32m1x2_m(vbool32_t vm, int32_t *rs1, vuint32m1_t vs2, vint32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_i32m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_i32m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i32m2x2_m(vbool16_t vm, int32_t *rs1, vuint32m2_t vs2, vint32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_i32m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_i32m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i32m4x2_m(vbool8_t vm, int32_t *rs1, vuint32m4_t vs2, vint32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_i32m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_i64m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i64m1x2_m(vbool64_t vm, int64_t *rs1, vuint32mf2_t vs2, vint64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_i64m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_i64m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i64m2x2_m(vbool32_t vm, int64_t *rs1, vuint32m1_t vs2, vint64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_i64m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_i64m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i64m4x2_m(vbool16_t vm, int64_t *rs1, vuint32m2_t vs2, vint64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_i64m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_u8mf8x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u8mf8x2_m(vbool64_t vm, uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_u8mf8x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_u8mf4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u8mf4x2_m(vbool32_t vm, uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_u8mf4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_u8mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u8mf2x2_m(vbool16_t vm, uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_u8mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_u8m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u8m1x2_m(vbool8_t vm, uint8_t *rs1, vuint32m4_t vs2, vuint8m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_u8m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_u8m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u8m2x2_m(vbool4_t vm, uint8_t *rs1, vuint32m8_t vs2, vuint8m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_u8m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_u16mf4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u16mf4x2_m(vbool64_t vm, uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_u16mf4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_u16mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u16mf2x2_m(vbool32_t vm, uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_u16mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_u16m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u16m1x2_m(vbool16_t vm, uint16_t *rs1, vuint32m2_t vs2, vuint16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_u16m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_u16m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u16m2x2_m(vbool8_t vm, uint16_t *rs1, vuint32m4_t vs2, vuint16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_u16m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_u16m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u16m4x2_m(vbool4_t vm, uint16_t *rs1, vuint32m8_t vs2, vuint16m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_u16m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_u32mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u32mf2x2_m(vbool64_t vm, uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_u32mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_u32m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u32m1x2_m(vbool32_t vm, uint32_t *rs1, vuint32m1_t vs2, vuint32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_u32m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_u32m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u32m2x2_m(vbool16_t vm, uint32_t *rs1, vuint32m2_t vs2, vuint32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_u32m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_u32m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u32m4x2_m(vbool8_t vm, uint32_t *rs1, vuint32m4_t vs2, vuint32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_u32m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_u64m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u64m1x2_m(vbool64_t vm, uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_u64m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_u64m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u64m2x2_m(vbool32_t vm, uint64_t *rs1, vuint32m1_t vs2, vuint64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_u64m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_u64m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u64m4x2_m(vbool16_t vm, uint64_t *rs1, vuint32m2_t vs2, vuint64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32_v_u64m4x2_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg2ei32\.[ivxfswum.]+\s+} 92 } } */ diff --git a/auto-generated/gnu-api-tests/vsoxseg2ei64.c b/auto-generated/gnu-api-tests/vsoxseg2ei64.c index 5f8321128..69e4dba5b 100644 --- a/auto-generated/gnu-api-tests/vsoxseg2ei64.c +++ b/auto-generated/gnu-api-tests/vsoxseg2ei64.c @@ -6,332 +6,332 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg2ei64_v_f16mf4x2(float16_t *base, vuint64m1_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_f16mf4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_f16mf4x2(float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_f16mf4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_f16mf2x2(float16_t *base, vuint64m2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_f16mf2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_f16mf2x2(float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_f16mf2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_f16m1x2(float16_t *base, vuint64m4_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_f16m1x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_f16m1x2(float16_t *rs1, vuint64m4_t vs2, vfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_f16m1x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_f16m2x2(float16_t *base, vuint64m8_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_f16m2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_f16m2x2(float16_t *rs1, vuint64m8_t vs2, vfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_f16m2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_f32mf2x2(float32_t *base, vuint64m1_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_f32mf2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_f32mf2x2(float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_f32mf2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_f32m1x2(float32_t *base, vuint64m2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_f32m1x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_f32m1x2(float32_t *rs1, vuint64m2_t vs2, vfloat32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_f32m1x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_f32m2x2(float32_t *base, vuint64m4_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_f32m2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_f32m2x2(float32_t *rs1, vuint64m4_t vs2, vfloat32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_f32m2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_f32m4x2(float32_t *base, vuint64m8_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_f32m4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_f32m4x2(float32_t *rs1, vuint64m8_t vs2, vfloat32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_f32m4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_f64m1x2(float64_t *base, vuint64m1_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_f64m1x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_f64m1x2(float64_t *rs1, vuint64m1_t vs2, vfloat64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_f64m1x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_f64m2x2(float64_t *base, vuint64m2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_f64m2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_f64m2x2(float64_t *rs1, vuint64m2_t vs2, vfloat64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_f64m2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_f64m4x2(float64_t *base, vuint64m4_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_f64m4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_f64m4x2(float64_t *rs1, vuint64m4_t vs2, vfloat64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_f64m4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i8mf8x2(int8_t *base, vuint64m1_t bindex, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_i8mf8x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i8mf8x2(int8_t *rs1, vuint64m1_t vs2, vint8mf8x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_i8mf8x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i8mf4x2(int8_t *base, vuint64m2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_i8mf4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i8mf4x2(int8_t *rs1, vuint64m2_t vs2, vint8mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_i8mf4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i8mf2x2(int8_t *base, vuint64m4_t bindex, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_i8mf2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i8mf2x2(int8_t *rs1, vuint64m4_t vs2, vint8mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_i8mf2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i8m1x2(int8_t *base, vuint64m8_t bindex, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_i8m1x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i8m1x2(int8_t *rs1, vuint64m8_t vs2, vint8m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_i8m1x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i16mf4x2(int16_t *base, vuint64m1_t bindex, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_i16mf4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i16mf4x2(int16_t *rs1, vuint64m1_t vs2, vint16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_i16mf4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i16mf2x2(int16_t *base, vuint64m2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_i16mf2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i16mf2x2(int16_t *rs1, vuint64m2_t vs2, vint16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_i16mf2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i16m1x2(int16_t *base, vuint64m4_t bindex, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_i16m1x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i16m1x2(int16_t *rs1, vuint64m4_t vs2, vint16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_i16m1x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i16m2x2(int16_t *base, vuint64m8_t bindex, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_i16m2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i16m2x2(int16_t *rs1, vuint64m8_t vs2, vint16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_i16m2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i32mf2x2(int32_t *base, vuint64m1_t bindex, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_i32mf2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i32mf2x2(int32_t *rs1, vuint64m1_t vs2, vint32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_i32mf2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i32m1x2(int32_t *base, vuint64m2_t bindex, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_i32m1x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i32m1x2(int32_t *rs1, vuint64m2_t vs2, vint32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_i32m1x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i32m2x2(int32_t *base, vuint64m4_t bindex, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_i32m2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i32m2x2(int32_t *rs1, vuint64m4_t vs2, vint32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_i32m2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i32m4x2(int32_t *base, vuint64m8_t bindex, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_i32m4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i32m4x2(int32_t *rs1, vuint64m8_t vs2, vint32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_i32m4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i64m1x2(int64_t *base, vuint64m1_t bindex, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_i64m1x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i64m1x2(int64_t *rs1, vuint64m1_t vs2, vint64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_i64m1x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i64m2x2(int64_t *base, vuint64m2_t bindex, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_i64m2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i64m2x2(int64_t *rs1, vuint64m2_t vs2, vint64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_i64m2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i64m4x2(int64_t *base, vuint64m4_t bindex, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_i64m4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i64m4x2(int64_t *rs1, vuint64m4_t vs2, vint64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_i64m4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u8mf8x2(uint8_t *base, vuint64m1_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_u8mf8x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u8mf8x2(uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_u8mf8x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u8mf4x2(uint8_t *base, vuint64m2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_u8mf4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u8mf4x2(uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_u8mf4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u8mf2x2(uint8_t *base, vuint64m4_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_u8mf2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u8mf2x2(uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_u8mf2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u8m1x2(uint8_t *base, vuint64m8_t bindex, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_u8m1x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u8m1x2(uint8_t *rs1, vuint64m8_t vs2, vuint8m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_u8m1x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u16mf4x2(uint16_t *base, vuint64m1_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_u16mf4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u16mf4x2(uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_u16mf4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u16mf2x2(uint16_t *base, vuint64m2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_u16mf2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u16mf2x2(uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_u16mf2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u16m1x2(uint16_t *base, vuint64m4_t bindex, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_u16m1x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u16m1x2(uint16_t *rs1, vuint64m4_t vs2, vuint16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_u16m1x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u16m2x2(uint16_t *base, vuint64m8_t bindex, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_u16m2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u16m2x2(uint16_t *rs1, vuint64m8_t vs2, vuint16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_u16m2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u32mf2x2(uint32_t *base, vuint64m1_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_u32mf2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u32mf2x2(uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_u32mf2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u32m1x2(uint32_t *base, vuint64m2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_u32m1x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u32m1x2(uint32_t *rs1, vuint64m2_t vs2, vuint32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_u32m1x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u32m2x2(uint32_t *base, vuint64m4_t bindex, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_u32m2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u32m2x2(uint32_t *rs1, vuint64m4_t vs2, vuint32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_u32m2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u32m4x2(uint32_t *base, vuint64m8_t bindex, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_u32m4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u32m4x2(uint32_t *rs1, vuint64m8_t vs2, vuint32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_u32m4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u64m1x2(uint64_t *base, vuint64m1_t bindex, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_u64m1x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u64m1x2(uint64_t *rs1, vuint64m1_t vs2, vuint64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_u64m1x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u64m2x2(uint64_t *base, vuint64m2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_u64m2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u64m2x2(uint64_t *rs1, vuint64m2_t vs2, vuint64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_u64m2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u64m4x2(uint64_t *base, vuint64m4_t bindex, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_u64m4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u64m4x2(uint64_t *rs1, vuint64m4_t vs2, vuint64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_u64m4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_f16mf4x2_m(vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_f16mf4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_f16mf4x2_m(vbool64_t vm, float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_f16mf4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_f16mf2x2_m(vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_f16mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_f16mf2x2_m(vbool32_t vm, float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_f16mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_f16m1x2_m(vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_f16m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_f16m1x2_m(vbool16_t vm, float16_t *rs1, vuint64m4_t vs2, vfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_f16m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_f16m2x2_m(vbool8_t mask, float16_t *base, vuint64m8_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_f16m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_f16m2x2_m(vbool8_t vm, float16_t *rs1, vuint64m8_t vs2, vfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_f16m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_f32mf2x2_m(vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_f32mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_f32mf2x2_m(vbool64_t vm, float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_f32mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_f32m1x2_m(vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_f32m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_f32m1x2_m(vbool32_t vm, float32_t *rs1, vuint64m2_t vs2, vfloat32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_f32m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_f32m2x2_m(vbool16_t mask, float32_t *base, vuint64m4_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_f32m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_f32m2x2_m(vbool16_t vm, float32_t *rs1, vuint64m4_t vs2, vfloat32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_f32m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_f32m4x2_m(vbool8_t mask, float32_t *base, vuint64m8_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_f32m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_f32m4x2_m(vbool8_t vm, float32_t *rs1, vuint64m8_t vs2, vfloat32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_f32m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_f64m1x2_m(vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_f64m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_f64m1x2_m(vbool64_t vm, float64_t *rs1, vuint64m1_t vs2, vfloat64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_f64m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_f64m2x2_m(vbool32_t mask, float64_t *base, vuint64m2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_f64m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_f64m2x2_m(vbool32_t vm, float64_t *rs1, vuint64m2_t vs2, vfloat64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_f64m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_f64m4x2_m(vbool16_t mask, float64_t *base, vuint64m4_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_f64m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_f64m4x2_m(vbool16_t vm, float64_t *rs1, vuint64m4_t vs2, vfloat64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_f64m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_i8mf8x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i8mf8x2_m(vbool64_t vm, int8_t *rs1, vuint64m1_t vs2, vint8mf8x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_i8mf8x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_i8mf4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i8mf4x2_m(vbool32_t vm, int8_t *rs1, vuint64m2_t vs2, vint8mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_i8mf4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_i8mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i8mf2x2_m(vbool16_t vm, int8_t *rs1, vuint64m4_t vs2, vint8mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_i8mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_i8m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i8m1x2_m(vbool8_t vm, int8_t *rs1, vuint64m8_t vs2, vint8m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_i8m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_i16mf4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i16mf4x2_m(vbool64_t vm, int16_t *rs1, vuint64m1_t vs2, vint16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_i16mf4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_i16mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i16mf2x2_m(vbool32_t vm, int16_t *rs1, vuint64m2_t vs2, vint16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_i16mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_i16m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i16m1x2_m(vbool16_t vm, int16_t *rs1, vuint64m4_t vs2, vint16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_i16m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_i16m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i16m2x2_m(vbool8_t vm, int16_t *rs1, vuint64m8_t vs2, vint16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_i16m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_i32mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i32mf2x2_m(vbool64_t vm, int32_t *rs1, vuint64m1_t vs2, vint32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_i32mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_i32m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i32m1x2_m(vbool32_t vm, int32_t *rs1, vuint64m2_t vs2, vint32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_i32m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_i32m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i32m2x2_m(vbool16_t vm, int32_t *rs1, vuint64m4_t vs2, vint32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_i32m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_i32m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i32m4x2_m(vbool8_t vm, int32_t *rs1, vuint64m8_t vs2, vint32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_i32m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_i64m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i64m1x2_m(vbool64_t vm, int64_t *rs1, vuint64m1_t vs2, vint64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_i64m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_i64m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i64m2x2_m(vbool32_t vm, int64_t *rs1, vuint64m2_t vs2, vint64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_i64m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_i64m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i64m4x2_m(vbool16_t vm, int64_t *rs1, vuint64m4_t vs2, vint64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_i64m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_u8mf8x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u8mf8x2_m(vbool64_t vm, uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_u8mf8x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_u8mf4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u8mf4x2_m(vbool32_t vm, uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_u8mf4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_u8mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u8mf2x2_m(vbool16_t vm, uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_u8mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_u8m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u8m1x2_m(vbool8_t vm, uint8_t *rs1, vuint64m8_t vs2, vuint8m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_u8m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_u16mf4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u16mf4x2_m(vbool64_t vm, uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_u16mf4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_u16mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u16mf2x2_m(vbool32_t vm, uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_u16mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_u16m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u16m1x2_m(vbool16_t vm, uint16_t *rs1, vuint64m4_t vs2, vuint16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_u16m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_u16m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u16m2x2_m(vbool8_t vm, uint16_t *rs1, vuint64m8_t vs2, vuint16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_u16m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_u32mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u32mf2x2_m(vbool64_t vm, uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_u32mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_u32m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u32m1x2_m(vbool32_t vm, uint32_t *rs1, vuint64m2_t vs2, vuint32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_u32m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_u32m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u32m2x2_m(vbool16_t vm, uint32_t *rs1, vuint64m4_t vs2, vuint32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_u32m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_u32m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u32m4x2_m(vbool8_t vm, uint32_t *rs1, vuint64m8_t vs2, vuint32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_u32m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_u64m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u64m1x2_m(vbool64_t vm, uint64_t *rs1, vuint64m1_t vs2, vuint64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_u64m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_u64m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u64m2x2_m(vbool32_t vm, uint64_t *rs1, vuint64m2_t vs2, vuint64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_u64m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64_v_u64m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u64m4x2_m(vbool16_t vm, uint64_t *rs1, vuint64m4_t vs2, vuint64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64_v_u64m4x2_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg2ei64\.[ivxfswum.]+\s+} 82 } } */ diff --git a/auto-generated/gnu-api-tests/vsoxseg2ei8.c b/auto-generated/gnu-api-tests/vsoxseg2ei8.c index 4703116ae..d5a24f431 100644 --- a/auto-generated/gnu-api-tests/vsoxseg2ei8.c +++ b/auto-generated/gnu-api-tests/vsoxseg2ei8.c @@ -6,388 +6,388 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg2ei8_v_f16mf4x2(float16_t *base, vuint8mf8_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_f16mf4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f16mf4x2(float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_f16mf4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_f16mf2x2(float16_t *base, vuint8mf4_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_f16mf2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f16mf2x2(float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_f16mf2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_f16m1x2(float16_t *base, vuint8mf2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_f16m1x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f16m1x2(float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_f16m1x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_f16m2x2(float16_t *base, vuint8m1_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_f16m2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f16m2x2(float16_t *rs1, vuint8m1_t vs2, vfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_f16m2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_f16m4x2(float16_t *base, vuint8m2_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_f16m4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f16m4x2(float16_t *rs1, vuint8m2_t vs2, vfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_f16m4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_f32mf2x2(float32_t *base, vuint8mf8_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_f32mf2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f32mf2x2(float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_f32mf2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_f32m1x2(float32_t *base, vuint8mf4_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_f32m1x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f32m1x2(float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_f32m1x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_f32m2x2(float32_t *base, vuint8mf2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_f32m2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f32m2x2(float32_t *rs1, vuint8mf2_t vs2, vfloat32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_f32m2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_f32m4x2(float32_t *base, vuint8m1_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_f32m4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f32m4x2(float32_t *rs1, vuint8m1_t vs2, vfloat32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_f32m4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_f64m1x2(float64_t *base, vuint8mf8_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_f64m1x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f64m1x2(float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_f64m1x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_f64m2x2(float64_t *base, vuint8mf4_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_f64m2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f64m2x2(float64_t *rs1, vuint8mf4_t vs2, vfloat64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_f64m2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_f64m4x2(float64_t *base, vuint8mf2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_f64m4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f64m4x2(float64_t *rs1, vuint8mf2_t vs2, vfloat64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_f64m4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i8mf8x2(int8_t *base, vuint8mf8_t bindex, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_i8mf8x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i8mf8x2(int8_t *rs1, vuint8mf8_t vs2, vint8mf8x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_i8mf8x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i8mf4x2(int8_t *base, vuint8mf4_t bindex, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_i8mf4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i8mf4x2(int8_t *rs1, vuint8mf4_t vs2, vint8mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_i8mf4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i8mf2x2(int8_t *base, vuint8mf2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_i8mf2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i8mf2x2(int8_t *rs1, vuint8mf2_t vs2, vint8mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_i8mf2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i8m1x2(int8_t *base, vuint8m1_t bindex, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_i8m1x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i8m1x2(int8_t *rs1, vuint8m1_t vs2, vint8m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_i8m1x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i8m2x2(int8_t *base, vuint8m2_t bindex, vint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_i8m2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i8m2x2(int8_t *rs1, vuint8m2_t vs2, vint8m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_i8m2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i8m4x2(int8_t *base, vuint8m4_t bindex, vint8m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_i8m4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i8m4x2(int8_t *rs1, vuint8m4_t vs2, vint8m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_i8m4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i16mf4x2(int16_t *base, vuint8mf8_t bindex, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_i16mf4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i16mf4x2(int16_t *rs1, vuint8mf8_t vs2, vint16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_i16mf4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i16mf2x2(int16_t *base, vuint8mf4_t bindex, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_i16mf2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i16mf2x2(int16_t *rs1, vuint8mf4_t vs2, vint16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_i16mf2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i16m1x2(int16_t *base, vuint8mf2_t bindex, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_i16m1x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i16m1x2(int16_t *rs1, vuint8mf2_t vs2, vint16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_i16m1x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i16m2x2(int16_t *base, vuint8m1_t bindex, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_i16m2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i16m2x2(int16_t *rs1, vuint8m1_t vs2, vint16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_i16m2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i16m4x2(int16_t *base, vuint8m2_t bindex, vint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_i16m4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i16m4x2(int16_t *rs1, vuint8m2_t vs2, vint16m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_i16m4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i32mf2x2(int32_t *base, vuint8mf8_t bindex, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_i32mf2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i32mf2x2(int32_t *rs1, vuint8mf8_t vs2, vint32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_i32mf2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i32m1x2(int32_t *base, vuint8mf4_t bindex, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_i32m1x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i32m1x2(int32_t *rs1, vuint8mf4_t vs2, vint32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_i32m1x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i32m2x2(int32_t *base, vuint8mf2_t bindex, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_i32m2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i32m2x2(int32_t *rs1, vuint8mf2_t vs2, vint32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_i32m2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i32m4x2(int32_t *base, vuint8m1_t bindex, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_i32m4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i32m4x2(int32_t *rs1, vuint8m1_t vs2, vint32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_i32m4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i64m1x2(int64_t *base, vuint8mf8_t bindex, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_i64m1x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i64m1x2(int64_t *rs1, vuint8mf8_t vs2, vint64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_i64m1x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i64m2x2(int64_t *base, vuint8mf4_t bindex, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_i64m2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i64m2x2(int64_t *rs1, vuint8mf4_t vs2, vint64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_i64m2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i64m4x2(int64_t *base, vuint8mf2_t bindex, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_i64m4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i64m4x2(int64_t *rs1, vuint8mf2_t vs2, vint64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_i64m4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u8mf8x2(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_u8mf8x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u8mf8x2(uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_u8mf8x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u8mf4x2(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_u8mf4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u8mf4x2(uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_u8mf4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u8mf2x2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_u8mf2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u8mf2x2(uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_u8mf2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u8m1x2(uint8_t *base, vuint8m1_t bindex, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_u8m1x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u8m1x2(uint8_t *rs1, vuint8m1_t vs2, vuint8m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_u8m1x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u8m2x2(uint8_t *base, vuint8m2_t bindex, vuint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_u8m2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u8m2x2(uint8_t *rs1, vuint8m2_t vs2, vuint8m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_u8m2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u8m4x2(uint8_t *base, vuint8m4_t bindex, vuint8m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_u8m4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u8m4x2(uint8_t *rs1, vuint8m4_t vs2, vuint8m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_u8m4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u16mf4x2(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_u16mf4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u16mf4x2(uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_u16mf4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u16mf2x2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_u16mf2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u16mf2x2(uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_u16mf2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u16m1x2(uint16_t *base, vuint8mf2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_u16m1x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u16m1x2(uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_u16m1x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u16m2x2(uint16_t *base, vuint8m1_t bindex, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_u16m2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u16m2x2(uint16_t *rs1, vuint8m1_t vs2, vuint16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_u16m2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u16m4x2(uint16_t *base, vuint8m2_t bindex, vuint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_u16m4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u16m4x2(uint16_t *rs1, vuint8m2_t vs2, vuint16m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_u16m4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u32mf2x2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_u32mf2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u32mf2x2(uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_u32mf2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u32m1x2(uint32_t *base, vuint8mf4_t bindex, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_u32m1x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u32m1x2(uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_u32m1x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u32m2x2(uint32_t *base, vuint8mf2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_u32m2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u32m2x2(uint32_t *rs1, vuint8mf2_t vs2, vuint32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_u32m2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u32m4x2(uint32_t *base, vuint8m1_t bindex, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_u32m4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u32m4x2(uint32_t *rs1, vuint8m1_t vs2, vuint32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_u32m4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u64m1x2(uint64_t *base, vuint8mf8_t bindex, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_u64m1x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u64m1x2(uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_u64m1x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u64m2x2(uint64_t *base, vuint8mf4_t bindex, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_u64m2x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u64m2x2(uint64_t *rs1, vuint8mf4_t vs2, vuint64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_u64m2x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u64m4x2(uint64_t *base, vuint8mf2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_u64m4x2(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u64m4x2(uint64_t *rs1, vuint8mf2_t vs2, vuint64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_u64m4x2(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_f16mf4x2_m(vbool64_t mask, float16_t *base, vuint8mf8_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_f16mf4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f16mf4x2_m(vbool64_t vm, float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_f16mf4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_f16mf2x2_m(vbool32_t mask, float16_t *base, vuint8mf4_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_f16mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f16mf2x2_m(vbool32_t vm, float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_f16mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_f16m1x2_m(vbool16_t mask, float16_t *base, vuint8mf2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_f16m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f16m1x2_m(vbool16_t vm, float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_f16m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_f16m2x2_m(vbool8_t mask, float16_t *base, vuint8m1_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_f16m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f16m2x2_m(vbool8_t vm, float16_t *rs1, vuint8m1_t vs2, vfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_f16m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_f16m4x2_m(vbool4_t mask, float16_t *base, vuint8m2_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_f16m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f16m4x2_m(vbool4_t vm, float16_t *rs1, vuint8m2_t vs2, vfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_f16m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_f32mf2x2_m(vbool64_t mask, float32_t *base, vuint8mf8_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_f32mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f32mf2x2_m(vbool64_t vm, float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_f32mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_f32m1x2_m(vbool32_t mask, float32_t *base, vuint8mf4_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_f32m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f32m1x2_m(vbool32_t vm, float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_f32m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_f32m2x2_m(vbool16_t mask, float32_t *base, vuint8mf2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_f32m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f32m2x2_m(vbool16_t vm, float32_t *rs1, vuint8mf2_t vs2, vfloat32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_f32m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_f32m4x2_m(vbool8_t mask, float32_t *base, vuint8m1_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_f32m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f32m4x2_m(vbool8_t vm, float32_t *rs1, vuint8m1_t vs2, vfloat32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_f32m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_f64m1x2_m(vbool64_t mask, float64_t *base, vuint8mf8_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_f64m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f64m1x2_m(vbool64_t vm, float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_f64m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_f64m2x2_m(vbool32_t mask, float64_t *base, vuint8mf4_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_f64m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f64m2x2_m(vbool32_t vm, float64_t *rs1, vuint8mf4_t vs2, vfloat64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_f64m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_f64m4x2_m(vbool16_t mask, float64_t *base, vuint8mf2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_f64m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f64m4x2_m(vbool16_t vm, float64_t *rs1, vuint8mf2_t vs2, vfloat64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_f64m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_i8mf8x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i8mf8x2_m(vbool64_t vm, int8_t *rs1, vuint8mf8_t vs2, vint8mf8x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_i8mf8x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_i8mf4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i8mf4x2_m(vbool32_t vm, int8_t *rs1, vuint8mf4_t vs2, vint8mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_i8mf4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_i8mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i8mf2x2_m(vbool16_t vm, int8_t *rs1, vuint8mf2_t vs2, vint8mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_i8mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_i8m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i8m1x2_m(vbool8_t vm, int8_t *rs1, vuint8m1_t vs2, vint8m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_i8m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_i8m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i8m2x2_m(vbool4_t vm, int8_t *rs1, vuint8m2_t vs2, vint8m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_i8m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_i8m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i8m4x2_m(vbool2_t vm, int8_t *rs1, vuint8m4_t vs2, vint8m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_i8m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_i16mf4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i16mf4x2_m(vbool64_t vm, int16_t *rs1, vuint8mf8_t vs2, vint16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_i16mf4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_i16mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i16mf2x2_m(vbool32_t vm, int16_t *rs1, vuint8mf4_t vs2, vint16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_i16mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_i16m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i16m1x2_m(vbool16_t vm, int16_t *rs1, vuint8mf2_t vs2, vint16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_i16m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_i16m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i16m2x2_m(vbool8_t vm, int16_t *rs1, vuint8m1_t vs2, vint16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_i16m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_i16m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i16m4x2_m(vbool4_t vm, int16_t *rs1, vuint8m2_t vs2, vint16m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_i16m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_i32mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i32mf2x2_m(vbool64_t vm, int32_t *rs1, vuint8mf8_t vs2, vint32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_i32mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_i32m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i32m1x2_m(vbool32_t vm, int32_t *rs1, vuint8mf4_t vs2, vint32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_i32m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_i32m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i32m2x2_m(vbool16_t vm, int32_t *rs1, vuint8mf2_t vs2, vint32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_i32m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_i32m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i32m4x2_m(vbool8_t vm, int32_t *rs1, vuint8m1_t vs2, vint32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_i32m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_i64m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i64m1x2_m(vbool64_t vm, int64_t *rs1, vuint8mf8_t vs2, vint64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_i64m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_i64m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i64m2x2_m(vbool32_t vm, int64_t *rs1, vuint8mf4_t vs2, vint64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_i64m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_i64m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i64m4x2_m(vbool16_t vm, int64_t *rs1, vuint8mf2_t vs2, vint64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_i64m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_u8mf8x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u8mf8x2_m(vbool64_t vm, uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_u8mf8x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_u8mf4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u8mf4x2_m(vbool32_t vm, uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_u8mf4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_u8mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u8mf2x2_m(vbool16_t vm, uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_u8mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_u8m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u8m1x2_m(vbool8_t vm, uint8_t *rs1, vuint8m1_t vs2, vuint8m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_u8m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_u8m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u8m2x2_m(vbool4_t vm, uint8_t *rs1, vuint8m2_t vs2, vuint8m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_u8m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_u8m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u8m4x2_m(vbool2_t vm, uint8_t *rs1, vuint8m4_t vs2, vuint8m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_u8m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_u16mf4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u16mf4x2_m(vbool64_t vm, uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_u16mf4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_u16mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u16mf2x2_m(vbool32_t vm, uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_u16mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_u16m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u16m1x2_m(vbool16_t vm, uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_u16m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_u16m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u16m2x2_m(vbool8_t vm, uint16_t *rs1, vuint8m1_t vs2, vuint16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_u16m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_u16m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u16m4x2_m(vbool4_t vm, uint16_t *rs1, vuint8m2_t vs2, vuint16m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_u16m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_u32mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u32mf2x2_m(vbool64_t vm, uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_u32mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_u32m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u32m1x2_m(vbool32_t vm, uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_u32m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_u32m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u32m2x2_m(vbool16_t vm, uint32_t *rs1, vuint8mf2_t vs2, vuint32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_u32m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_u32m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u32m4x2_m(vbool8_t vm, uint32_t *rs1, vuint8m1_t vs2, vuint32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_u32m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_u64m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u64m1x2_m(vbool64_t vm, uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_u64m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_u64m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u64m2x2_m(vbool32_t vm, uint64_t *rs1, vuint8mf4_t vs2, vuint64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_u64m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8_v_u64m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u64m4x2_m(vbool16_t vm, uint64_t *rs1, vuint8mf2_t vs2, vuint64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8_v_u64m4x2_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg2ei8\.[ivxfswum.]+\s+} 96 } } */ diff --git a/auto-generated/gnu-api-tests/vsoxseg3ei16.c b/auto-generated/gnu-api-tests/vsoxseg3ei16.c index 6e3aa670c..ed5a9a5f4 100644 --- a/auto-generated/gnu-api-tests/vsoxseg3ei16.c +++ b/auto-generated/gnu-api-tests/vsoxseg3ei16.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg3ei16_v_f16mf4x3(float16_t *base, vuint16mf4_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_f16mf4x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_f16mf4x3(float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_f16mf4x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_f16mf2x3(float16_t *base, vuint16mf2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_f16mf2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_f16mf2x3(float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_f16mf2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_f16m1x3(float16_t *base, vuint16m1_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_f16m1x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_f16m1x3(float16_t *rs1, vuint16m1_t vs2, vfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_f16m1x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_f16m2x3(float16_t *base, vuint16m2_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_f16m2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_f16m2x3(float16_t *rs1, vuint16m2_t vs2, vfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_f16m2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_f32mf2x3(float32_t *base, vuint16mf4_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_f32mf2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_f32mf2x3(float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_f32mf2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_f32m1x3(float32_t *base, vuint16mf2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_f32m1x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_f32m1x3(float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_f32m1x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_f32m2x3(float32_t *base, vuint16m1_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_f32m2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_f32m2x3(float32_t *rs1, vuint16m1_t vs2, vfloat32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_f32m2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_f64m1x3(float64_t *base, vuint16mf4_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_f64m1x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_f64m1x3(float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_f64m1x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_f64m2x3(float64_t *base, vuint16mf2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_f64m2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_f64m2x3(float64_t *rs1, vuint16mf2_t vs2, vfloat64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_f64m2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i8mf8x3(int8_t *base, vuint16mf4_t bindex, vint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_i8mf8x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i8mf8x3(int8_t *rs1, vuint16mf4_t vs2, vint8mf8x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_i8mf8x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i8mf4x3(int8_t *base, vuint16mf2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_i8mf4x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i8mf4x3(int8_t *rs1, vuint16mf2_t vs2, vint8mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_i8mf4x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i8mf2x3(int8_t *base, vuint16m1_t bindex, vint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_i8mf2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i8mf2x3(int8_t *rs1, vuint16m1_t vs2, vint8mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_i8mf2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i8m1x3(int8_t *base, vuint16m2_t bindex, vint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_i8m1x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i8m1x3(int8_t *rs1, vuint16m2_t vs2, vint8m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_i8m1x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i8m2x3(int8_t *base, vuint16m4_t bindex, vint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_i8m2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i8m2x3(int8_t *rs1, vuint16m4_t vs2, vint8m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_i8m2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i16mf4x3(int16_t *base, vuint16mf4_t bindex, vint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_i16mf4x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i16mf4x3(int16_t *rs1, vuint16mf4_t vs2, vint16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_i16mf4x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i16mf2x3(int16_t *base, vuint16mf2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_i16mf2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i16mf2x3(int16_t *rs1, vuint16mf2_t vs2, vint16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_i16mf2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i16m1x3(int16_t *base, vuint16m1_t bindex, vint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_i16m1x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i16m1x3(int16_t *rs1, vuint16m1_t vs2, vint16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_i16m1x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i16m2x3(int16_t *base, vuint16m2_t bindex, vint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_i16m2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i16m2x3(int16_t *rs1, vuint16m2_t vs2, vint16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_i16m2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i32mf2x3(int32_t *base, vuint16mf4_t bindex, vint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_i32mf2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i32mf2x3(int32_t *rs1, vuint16mf4_t vs2, vint32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_i32mf2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i32m1x3(int32_t *base, vuint16mf2_t bindex, vint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_i32m1x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i32m1x3(int32_t *rs1, vuint16mf2_t vs2, vint32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_i32m1x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i32m2x3(int32_t *base, vuint16m1_t bindex, vint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_i32m2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i32m2x3(int32_t *rs1, vuint16m1_t vs2, vint32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_i32m2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i64m1x3(int64_t *base, vuint16mf4_t bindex, vint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_i64m1x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i64m1x3(int64_t *rs1, vuint16mf4_t vs2, vint64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_i64m1x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i64m2x3(int64_t *base, vuint16mf2_t bindex, vint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_i64m2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i64m2x3(int64_t *rs1, vuint16mf2_t vs2, vint64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_i64m2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u8mf8x3(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_u8mf8x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u8mf8x3(uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_u8mf8x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u8mf4x3(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_u8mf4x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u8mf4x3(uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_u8mf4x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u8mf2x3(uint8_t *base, vuint16m1_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_u8mf2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u8mf2x3(uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_u8mf2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u8m1x3(uint8_t *base, vuint16m2_t bindex, vuint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_u8m1x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u8m1x3(uint8_t *rs1, vuint16m2_t vs2, vuint8m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_u8m1x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u8m2x3(uint8_t *base, vuint16m4_t bindex, vuint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_u8m2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u8m2x3(uint8_t *rs1, vuint16m4_t vs2, vuint8m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_u8m2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u16mf4x3(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_u16mf4x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u16mf4x3(uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_u16mf4x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u16mf2x3(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_u16mf2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u16mf2x3(uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_u16mf2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u16m1x3(uint16_t *base, vuint16m1_t bindex, vuint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_u16m1x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u16m1x3(uint16_t *rs1, vuint16m1_t vs2, vuint16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_u16m1x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u16m2x3(uint16_t *base, vuint16m2_t bindex, vuint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_u16m2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u16m2x3(uint16_t *rs1, vuint16m2_t vs2, vuint16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_u16m2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u32mf2x3(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_u32mf2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u32mf2x3(uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_u32mf2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u32m1x3(uint32_t *base, vuint16mf2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_u32m1x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u32m1x3(uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_u32m1x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u32m2x3(uint32_t *base, vuint16m1_t bindex, vuint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_u32m2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u32m2x3(uint32_t *rs1, vuint16m1_t vs2, vuint32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_u32m2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u64m1x3(uint64_t *base, vuint16mf4_t bindex, vuint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_u64m1x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u64m1x3(uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_u64m1x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u64m2x3(uint64_t *base, vuint16mf2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_u64m2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u64m2x3(uint64_t *rs1, vuint16mf2_t vs2, vuint64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_u64m2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_f16mf4x3_m(vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_f16mf4x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_f16mf4x3_m(vbool64_t vm, float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_f16mf4x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_f16mf2x3_m(vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_f16mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_f16mf2x3_m(vbool32_t vm, float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_f16mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_f16m1x3_m(vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_f16m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_f16m1x3_m(vbool16_t vm, float16_t *rs1, vuint16m1_t vs2, vfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_f16m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_f16m2x3_m(vbool8_t mask, float16_t *base, vuint16m2_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_f16m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_f16m2x3_m(vbool8_t vm, float16_t *rs1, vuint16m2_t vs2, vfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_f16m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_f32mf2x3_m(vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_f32mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_f32mf2x3_m(vbool64_t vm, float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_f32mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_f32m1x3_m(vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_f32m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_f32m1x3_m(vbool32_t vm, float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_f32m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_f32m2x3_m(vbool16_t mask, float32_t *base, vuint16m1_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_f32m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_f32m2x3_m(vbool16_t vm, float32_t *rs1, vuint16m1_t vs2, vfloat32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_f32m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_f64m1x3_m(vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_f64m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_f64m1x3_m(vbool64_t vm, float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_f64m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_f64m2x3_m(vbool32_t mask, float64_t *base, vuint16mf2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_f64m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_f64m2x3_m(vbool32_t vm, float64_t *rs1, vuint16mf2_t vs2, vfloat64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_f64m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_i8mf8x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i8mf8x3_m(vbool64_t vm, int8_t *rs1, vuint16mf4_t vs2, vint8mf8x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_i8mf8x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_i8mf4x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i8mf4x3_m(vbool32_t vm, int8_t *rs1, vuint16mf2_t vs2, vint8mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_i8mf4x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_i8mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i8mf2x3_m(vbool16_t vm, int8_t *rs1, vuint16m1_t vs2, vint8mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_i8mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_i8m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i8m1x3_m(vbool8_t vm, int8_t *rs1, vuint16m2_t vs2, vint8m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_i8m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_i8m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i8m2x3_m(vbool4_t vm, int8_t *rs1, vuint16m4_t vs2, vint8m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_i8m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_i16mf4x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i16mf4x3_m(vbool64_t vm, int16_t *rs1, vuint16mf4_t vs2, vint16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_i16mf4x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_i16mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i16mf2x3_m(vbool32_t vm, int16_t *rs1, vuint16mf2_t vs2, vint16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_i16mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_i16m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i16m1x3_m(vbool16_t vm, int16_t *rs1, vuint16m1_t vs2, vint16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_i16m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_i16m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i16m2x3_m(vbool8_t vm, int16_t *rs1, vuint16m2_t vs2, vint16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_i16m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_i32mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i32mf2x3_m(vbool64_t vm, int32_t *rs1, vuint16mf4_t vs2, vint32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_i32mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_i32m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i32m1x3_m(vbool32_t vm, int32_t *rs1, vuint16mf2_t vs2, vint32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_i32m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_i32m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i32m2x3_m(vbool16_t vm, int32_t *rs1, vuint16m1_t vs2, vint32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_i32m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_i64m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i64m1x3_m(vbool64_t vm, int64_t *rs1, vuint16mf4_t vs2, vint64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_i64m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_i64m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i64m2x3_m(vbool32_t vm, int64_t *rs1, vuint16mf2_t vs2, vint64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_i64m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_u8mf8x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u8mf8x3_m(vbool64_t vm, uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_u8mf8x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_u8mf4x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u8mf4x3_m(vbool32_t vm, uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_u8mf4x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_u8mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u8mf2x3_m(vbool16_t vm, uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_u8mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_u8m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u8m1x3_m(vbool8_t vm, uint8_t *rs1, vuint16m2_t vs2, vuint8m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_u8m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_u8m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u8m2x3_m(vbool4_t vm, uint8_t *rs1, vuint16m4_t vs2, vuint8m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_u8m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_u16mf4x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u16mf4x3_m(vbool64_t vm, uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_u16mf4x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_u16mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u16mf2x3_m(vbool32_t vm, uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_u16mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_u16m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u16m1x3_m(vbool16_t vm, uint16_t *rs1, vuint16m1_t vs2, vuint16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_u16m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_u16m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u16m2x3_m(vbool8_t vm, uint16_t *rs1, vuint16m2_t vs2, vuint16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_u16m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_u32mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u32mf2x3_m(vbool64_t vm, uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_u32mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_u32m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u32m1x3_m(vbool32_t vm, uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_u32m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_u32m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u32m2x3_m(vbool16_t vm, uint32_t *rs1, vuint16m1_t vs2, vuint32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_u32m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_u64m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u64m1x3_m(vbool64_t vm, uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_u64m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16_v_u64m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u64m2x3_m(vbool32_t vm, uint64_t *rs1, vuint16mf2_t vs2, vuint64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_u64m2x3_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg3ei16\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-api-tests/vsoxseg3ei32.c b/auto-generated/gnu-api-tests/vsoxseg3ei32.c index c31a58610..ec701c0f1 100644 --- a/auto-generated/gnu-api-tests/vsoxseg3ei32.c +++ b/auto-generated/gnu-api-tests/vsoxseg3ei32.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg3ei32_v_f16mf4x3(float16_t *base, vuint32mf2_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_f16mf4x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_f16mf4x3(float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_f16mf4x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_f16mf2x3(float16_t *base, vuint32m1_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_f16mf2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_f16mf2x3(float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_f16mf2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_f16m1x3(float16_t *base, vuint32m2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_f16m1x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_f16m1x3(float16_t *rs1, vuint32m2_t vs2, vfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_f16m1x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_f16m2x3(float16_t *base, vuint32m4_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_f16m2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_f16m2x3(float16_t *rs1, vuint32m4_t vs2, vfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_f16m2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_f32mf2x3(float32_t *base, vuint32mf2_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_f32mf2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_f32mf2x3(float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_f32mf2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_f32m1x3(float32_t *base, vuint32m1_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_f32m1x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_f32m1x3(float32_t *rs1, vuint32m1_t vs2, vfloat32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_f32m1x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_f32m2x3(float32_t *base, vuint32m2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_f32m2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_f32m2x3(float32_t *rs1, vuint32m2_t vs2, vfloat32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_f32m2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_f64m1x3(float64_t *base, vuint32mf2_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_f64m1x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_f64m1x3(float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_f64m1x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_f64m2x3(float64_t *base, vuint32m1_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_f64m2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_f64m2x3(float64_t *rs1, vuint32m1_t vs2, vfloat64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_f64m2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i8mf8x3(int8_t *base, vuint32mf2_t bindex, vint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_i8mf8x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i8mf8x3(int8_t *rs1, vuint32mf2_t vs2, vint8mf8x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_i8mf8x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i8mf4x3(int8_t *base, vuint32m1_t bindex, vint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_i8mf4x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i8mf4x3(int8_t *rs1, vuint32m1_t vs2, vint8mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_i8mf4x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i8mf2x3(int8_t *base, vuint32m2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_i8mf2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i8mf2x3(int8_t *rs1, vuint32m2_t vs2, vint8mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_i8mf2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i8m1x3(int8_t *base, vuint32m4_t bindex, vint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_i8m1x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i8m1x3(int8_t *rs1, vuint32m4_t vs2, vint8m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_i8m1x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i8m2x3(int8_t *base, vuint32m8_t bindex, vint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_i8m2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i8m2x3(int8_t *rs1, vuint32m8_t vs2, vint8m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_i8m2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i16mf4x3(int16_t *base, vuint32mf2_t bindex, vint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_i16mf4x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i16mf4x3(int16_t *rs1, vuint32mf2_t vs2, vint16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_i16mf4x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i16mf2x3(int16_t *base, vuint32m1_t bindex, vint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_i16mf2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i16mf2x3(int16_t *rs1, vuint32m1_t vs2, vint16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_i16mf2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i16m1x3(int16_t *base, vuint32m2_t bindex, vint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_i16m1x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i16m1x3(int16_t *rs1, vuint32m2_t vs2, vint16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_i16m1x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i16m2x3(int16_t *base, vuint32m4_t bindex, vint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_i16m2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i16m2x3(int16_t *rs1, vuint32m4_t vs2, vint16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_i16m2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i32mf2x3(int32_t *base, vuint32mf2_t bindex, vint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_i32mf2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i32mf2x3(int32_t *rs1, vuint32mf2_t vs2, vint32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_i32mf2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i32m1x3(int32_t *base, vuint32m1_t bindex, vint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_i32m1x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i32m1x3(int32_t *rs1, vuint32m1_t vs2, vint32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_i32m1x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i32m2x3(int32_t *base, vuint32m2_t bindex, vint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_i32m2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i32m2x3(int32_t *rs1, vuint32m2_t vs2, vint32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_i32m2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i64m1x3(int64_t *base, vuint32mf2_t bindex, vint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_i64m1x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i64m1x3(int64_t *rs1, vuint32mf2_t vs2, vint64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_i64m1x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i64m2x3(int64_t *base, vuint32m1_t bindex, vint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_i64m2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i64m2x3(int64_t *rs1, vuint32m1_t vs2, vint64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_i64m2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u8mf8x3(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_u8mf8x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u8mf8x3(uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_u8mf8x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u8mf4x3(uint8_t *base, vuint32m1_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_u8mf4x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u8mf4x3(uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_u8mf4x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u8mf2x3(uint8_t *base, vuint32m2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_u8mf2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u8mf2x3(uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_u8mf2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u8m1x3(uint8_t *base, vuint32m4_t bindex, vuint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_u8m1x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u8m1x3(uint8_t *rs1, vuint32m4_t vs2, vuint8m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_u8m1x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u8m2x3(uint8_t *base, vuint32m8_t bindex, vuint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_u8m2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u8m2x3(uint8_t *rs1, vuint32m8_t vs2, vuint8m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_u8m2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u16mf4x3(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_u16mf4x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u16mf4x3(uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_u16mf4x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u16mf2x3(uint16_t *base, vuint32m1_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_u16mf2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u16mf2x3(uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_u16mf2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u16m1x3(uint16_t *base, vuint32m2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_u16m1x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u16m1x3(uint16_t *rs1, vuint32m2_t vs2, vuint16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_u16m1x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u16m2x3(uint16_t *base, vuint32m4_t bindex, vuint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_u16m2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u16m2x3(uint16_t *rs1, vuint32m4_t vs2, vuint16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_u16m2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u32mf2x3(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_u32mf2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u32mf2x3(uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_u32mf2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u32m1x3(uint32_t *base, vuint32m1_t bindex, vuint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_u32m1x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u32m1x3(uint32_t *rs1, vuint32m1_t vs2, vuint32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_u32m1x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u32m2x3(uint32_t *base, vuint32m2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_u32m2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u32m2x3(uint32_t *rs1, vuint32m2_t vs2, vuint32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_u32m2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u64m1x3(uint64_t *base, vuint32mf2_t bindex, vuint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_u64m1x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u64m1x3(uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_u64m1x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u64m2x3(uint64_t *base, vuint32m1_t bindex, vuint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_u64m2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u64m2x3(uint64_t *rs1, vuint32m1_t vs2, vuint64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_u64m2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_f16mf4x3_m(vbool64_t mask, float16_t *base, vuint32mf2_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_f16mf4x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_f16mf4x3_m(vbool64_t vm, float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_f16mf4x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_f16mf2x3_m(vbool32_t mask, float16_t *base, vuint32m1_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_f16mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_f16mf2x3_m(vbool32_t vm, float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_f16mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_f16m1x3_m(vbool16_t mask, float16_t *base, vuint32m2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_f16m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_f16m1x3_m(vbool16_t vm, float16_t *rs1, vuint32m2_t vs2, vfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_f16m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_f16m2x3_m(vbool8_t mask, float16_t *base, vuint32m4_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_f16m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_f16m2x3_m(vbool8_t vm, float16_t *rs1, vuint32m4_t vs2, vfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_f16m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_f32mf2x3_m(vbool64_t mask, float32_t *base, vuint32mf2_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_f32mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_f32mf2x3_m(vbool64_t vm, float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_f32mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_f32m1x3_m(vbool32_t mask, float32_t *base, vuint32m1_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_f32m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_f32m1x3_m(vbool32_t vm, float32_t *rs1, vuint32m1_t vs2, vfloat32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_f32m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_f32m2x3_m(vbool16_t mask, float32_t *base, vuint32m2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_f32m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_f32m2x3_m(vbool16_t vm, float32_t *rs1, vuint32m2_t vs2, vfloat32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_f32m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_f64m1x3_m(vbool64_t mask, float64_t *base, vuint32mf2_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_f64m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_f64m1x3_m(vbool64_t vm, float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_f64m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_f64m2x3_m(vbool32_t mask, float64_t *base, vuint32m1_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_f64m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_f64m2x3_m(vbool32_t vm, float64_t *rs1, vuint32m1_t vs2, vfloat64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_f64m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_i8mf8x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i8mf8x3_m(vbool64_t vm, int8_t *rs1, vuint32mf2_t vs2, vint8mf8x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_i8mf8x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_i8mf4x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i8mf4x3_m(vbool32_t vm, int8_t *rs1, vuint32m1_t vs2, vint8mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_i8mf4x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_i8mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i8mf2x3_m(vbool16_t vm, int8_t *rs1, vuint32m2_t vs2, vint8mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_i8mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_i8m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i8m1x3_m(vbool8_t vm, int8_t *rs1, vuint32m4_t vs2, vint8m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_i8m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_i8m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i8m2x3_m(vbool4_t vm, int8_t *rs1, vuint32m8_t vs2, vint8m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_i8m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_i16mf4x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i16mf4x3_m(vbool64_t vm, int16_t *rs1, vuint32mf2_t vs2, vint16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_i16mf4x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_i16mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i16mf2x3_m(vbool32_t vm, int16_t *rs1, vuint32m1_t vs2, vint16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_i16mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_i16m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i16m1x3_m(vbool16_t vm, int16_t *rs1, vuint32m2_t vs2, vint16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_i16m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_i16m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i16m2x3_m(vbool8_t vm, int16_t *rs1, vuint32m4_t vs2, vint16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_i16m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_i32mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i32mf2x3_m(vbool64_t vm, int32_t *rs1, vuint32mf2_t vs2, vint32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_i32mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_i32m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i32m1x3_m(vbool32_t vm, int32_t *rs1, vuint32m1_t vs2, vint32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_i32m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_i32m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i32m2x3_m(vbool16_t vm, int32_t *rs1, vuint32m2_t vs2, vint32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_i32m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_i64m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i64m1x3_m(vbool64_t vm, int64_t *rs1, vuint32mf2_t vs2, vint64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_i64m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_i64m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i64m2x3_m(vbool32_t vm, int64_t *rs1, vuint32m1_t vs2, vint64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_i64m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_u8mf8x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u8mf8x3_m(vbool64_t vm, uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_u8mf8x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_u8mf4x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u8mf4x3_m(vbool32_t vm, uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_u8mf4x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_u8mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u8mf2x3_m(vbool16_t vm, uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_u8mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_u8m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u8m1x3_m(vbool8_t vm, uint8_t *rs1, vuint32m4_t vs2, vuint8m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_u8m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_u8m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u8m2x3_m(vbool4_t vm, uint8_t *rs1, vuint32m8_t vs2, vuint8m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_u8m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_u16mf4x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u16mf4x3_m(vbool64_t vm, uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_u16mf4x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_u16mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u16mf2x3_m(vbool32_t vm, uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_u16mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_u16m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u16m1x3_m(vbool16_t vm, uint16_t *rs1, vuint32m2_t vs2, vuint16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_u16m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_u16m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u16m2x3_m(vbool8_t vm, uint16_t *rs1, vuint32m4_t vs2, vuint16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_u16m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_u32mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u32mf2x3_m(vbool64_t vm, uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_u32mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_u32m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u32m1x3_m(vbool32_t vm, uint32_t *rs1, vuint32m1_t vs2, vuint32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_u32m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_u32m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u32m2x3_m(vbool16_t vm, uint32_t *rs1, vuint32m2_t vs2, vuint32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_u32m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_u64m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u64m1x3_m(vbool64_t vm, uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_u64m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32_v_u64m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u64m2x3_m(vbool32_t vm, uint64_t *rs1, vuint32m1_t vs2, vuint64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32_v_u64m2x3_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg3ei32\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-api-tests/vsoxseg3ei64.c b/auto-generated/gnu-api-tests/vsoxseg3ei64.c index c9691f69e..ba3633672 100644 --- a/auto-generated/gnu-api-tests/vsoxseg3ei64.c +++ b/auto-generated/gnu-api-tests/vsoxseg3ei64.c @@ -6,284 +6,284 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg3ei64_v_f16mf4x3(float16_t *base, vuint64m1_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_f16mf4x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_f16mf4x3(float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_f16mf4x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_f16mf2x3(float16_t *base, vuint64m2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_f16mf2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_f16mf2x3(float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_f16mf2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_f16m1x3(float16_t *base, vuint64m4_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_f16m1x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_f16m1x3(float16_t *rs1, vuint64m4_t vs2, vfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_f16m1x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_f16m2x3(float16_t *base, vuint64m8_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_f16m2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_f16m2x3(float16_t *rs1, vuint64m8_t vs2, vfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_f16m2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_f32mf2x3(float32_t *base, vuint64m1_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_f32mf2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_f32mf2x3(float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_f32mf2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_f32m1x3(float32_t *base, vuint64m2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_f32m1x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_f32m1x3(float32_t *rs1, vuint64m2_t vs2, vfloat32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_f32m1x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_f32m2x3(float32_t *base, vuint64m4_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_f32m2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_f32m2x3(float32_t *rs1, vuint64m4_t vs2, vfloat32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_f32m2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_f64m1x3(float64_t *base, vuint64m1_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_f64m1x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_f64m1x3(float64_t *rs1, vuint64m1_t vs2, vfloat64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_f64m1x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_f64m2x3(float64_t *base, vuint64m2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_f64m2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_f64m2x3(float64_t *rs1, vuint64m2_t vs2, vfloat64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_f64m2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i8mf8x3(int8_t *base, vuint64m1_t bindex, vint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_i8mf8x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i8mf8x3(int8_t *rs1, vuint64m1_t vs2, vint8mf8x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_i8mf8x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i8mf4x3(int8_t *base, vuint64m2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_i8mf4x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i8mf4x3(int8_t *rs1, vuint64m2_t vs2, vint8mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_i8mf4x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i8mf2x3(int8_t *base, vuint64m4_t bindex, vint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_i8mf2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i8mf2x3(int8_t *rs1, vuint64m4_t vs2, vint8mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_i8mf2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i8m1x3(int8_t *base, vuint64m8_t bindex, vint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_i8m1x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i8m1x3(int8_t *rs1, vuint64m8_t vs2, vint8m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_i8m1x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i16mf4x3(int16_t *base, vuint64m1_t bindex, vint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_i16mf4x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i16mf4x3(int16_t *rs1, vuint64m1_t vs2, vint16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_i16mf4x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i16mf2x3(int16_t *base, vuint64m2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_i16mf2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i16mf2x3(int16_t *rs1, vuint64m2_t vs2, vint16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_i16mf2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i16m1x3(int16_t *base, vuint64m4_t bindex, vint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_i16m1x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i16m1x3(int16_t *rs1, vuint64m4_t vs2, vint16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_i16m1x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i16m2x3(int16_t *base, vuint64m8_t bindex, vint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_i16m2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i16m2x3(int16_t *rs1, vuint64m8_t vs2, vint16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_i16m2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i32mf2x3(int32_t *base, vuint64m1_t bindex, vint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_i32mf2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i32mf2x3(int32_t *rs1, vuint64m1_t vs2, vint32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_i32mf2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i32m1x3(int32_t *base, vuint64m2_t bindex, vint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_i32m1x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i32m1x3(int32_t *rs1, vuint64m2_t vs2, vint32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_i32m1x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i32m2x3(int32_t *base, vuint64m4_t bindex, vint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_i32m2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i32m2x3(int32_t *rs1, vuint64m4_t vs2, vint32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_i32m2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i64m1x3(int64_t *base, vuint64m1_t bindex, vint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_i64m1x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i64m1x3(int64_t *rs1, vuint64m1_t vs2, vint64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_i64m1x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i64m2x3(int64_t *base, vuint64m2_t bindex, vint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_i64m2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i64m2x3(int64_t *rs1, vuint64m2_t vs2, vint64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_i64m2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u8mf8x3(uint8_t *base, vuint64m1_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_u8mf8x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u8mf8x3(uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_u8mf8x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u8mf4x3(uint8_t *base, vuint64m2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_u8mf4x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u8mf4x3(uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_u8mf4x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u8mf2x3(uint8_t *base, vuint64m4_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_u8mf2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u8mf2x3(uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_u8mf2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u8m1x3(uint8_t *base, vuint64m8_t bindex, vuint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_u8m1x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u8m1x3(uint8_t *rs1, vuint64m8_t vs2, vuint8m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_u8m1x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u16mf4x3(uint16_t *base, vuint64m1_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_u16mf4x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u16mf4x3(uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_u16mf4x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u16mf2x3(uint16_t *base, vuint64m2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_u16mf2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u16mf2x3(uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_u16mf2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u16m1x3(uint16_t *base, vuint64m4_t bindex, vuint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_u16m1x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u16m1x3(uint16_t *rs1, vuint64m4_t vs2, vuint16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_u16m1x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u16m2x3(uint16_t *base, vuint64m8_t bindex, vuint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_u16m2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u16m2x3(uint16_t *rs1, vuint64m8_t vs2, vuint16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_u16m2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u32mf2x3(uint32_t *base, vuint64m1_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_u32mf2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u32mf2x3(uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_u32mf2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u32m1x3(uint32_t *base, vuint64m2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_u32m1x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u32m1x3(uint32_t *rs1, vuint64m2_t vs2, vuint32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_u32m1x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u32m2x3(uint32_t *base, vuint64m4_t bindex, vuint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_u32m2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u32m2x3(uint32_t *rs1, vuint64m4_t vs2, vuint32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_u32m2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u64m1x3(uint64_t *base, vuint64m1_t bindex, vuint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_u64m1x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u64m1x3(uint64_t *rs1, vuint64m1_t vs2, vuint64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_u64m1x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u64m2x3(uint64_t *base, vuint64m2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_u64m2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u64m2x3(uint64_t *rs1, vuint64m2_t vs2, vuint64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_u64m2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_f16mf4x3_m(vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_f16mf4x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_f16mf4x3_m(vbool64_t vm, float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_f16mf4x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_f16mf2x3_m(vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_f16mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_f16mf2x3_m(vbool32_t vm, float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_f16mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_f16m1x3_m(vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_f16m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_f16m1x3_m(vbool16_t vm, float16_t *rs1, vuint64m4_t vs2, vfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_f16m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_f16m2x3_m(vbool8_t mask, float16_t *base, vuint64m8_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_f16m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_f16m2x3_m(vbool8_t vm, float16_t *rs1, vuint64m8_t vs2, vfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_f16m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_f32mf2x3_m(vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_f32mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_f32mf2x3_m(vbool64_t vm, float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_f32mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_f32m1x3_m(vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_f32m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_f32m1x3_m(vbool32_t vm, float32_t *rs1, vuint64m2_t vs2, vfloat32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_f32m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_f32m2x3_m(vbool16_t mask, float32_t *base, vuint64m4_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_f32m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_f32m2x3_m(vbool16_t vm, float32_t *rs1, vuint64m4_t vs2, vfloat32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_f32m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_f64m1x3_m(vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_f64m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_f64m1x3_m(vbool64_t vm, float64_t *rs1, vuint64m1_t vs2, vfloat64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_f64m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_f64m2x3_m(vbool32_t mask, float64_t *base, vuint64m2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_f64m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_f64m2x3_m(vbool32_t vm, float64_t *rs1, vuint64m2_t vs2, vfloat64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_f64m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_i8mf8x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i8mf8x3_m(vbool64_t vm, int8_t *rs1, vuint64m1_t vs2, vint8mf8x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_i8mf8x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_i8mf4x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i8mf4x3_m(vbool32_t vm, int8_t *rs1, vuint64m2_t vs2, vint8mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_i8mf4x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_i8mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i8mf2x3_m(vbool16_t vm, int8_t *rs1, vuint64m4_t vs2, vint8mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_i8mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_i8m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i8m1x3_m(vbool8_t vm, int8_t *rs1, vuint64m8_t vs2, vint8m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_i8m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_i16mf4x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i16mf4x3_m(vbool64_t vm, int16_t *rs1, vuint64m1_t vs2, vint16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_i16mf4x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_i16mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i16mf2x3_m(vbool32_t vm, int16_t *rs1, vuint64m2_t vs2, vint16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_i16mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_i16m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i16m1x3_m(vbool16_t vm, int16_t *rs1, vuint64m4_t vs2, vint16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_i16m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_i16m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i16m2x3_m(vbool8_t vm, int16_t *rs1, vuint64m8_t vs2, vint16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_i16m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_i32mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i32mf2x3_m(vbool64_t vm, int32_t *rs1, vuint64m1_t vs2, vint32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_i32mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_i32m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i32m1x3_m(vbool32_t vm, int32_t *rs1, vuint64m2_t vs2, vint32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_i32m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_i32m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i32m2x3_m(vbool16_t vm, int32_t *rs1, vuint64m4_t vs2, vint32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_i32m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_i64m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i64m1x3_m(vbool64_t vm, int64_t *rs1, vuint64m1_t vs2, vint64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_i64m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_i64m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i64m2x3_m(vbool32_t vm, int64_t *rs1, vuint64m2_t vs2, vint64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_i64m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_u8mf8x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u8mf8x3_m(vbool64_t vm, uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_u8mf8x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_u8mf4x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u8mf4x3_m(vbool32_t vm, uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_u8mf4x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_u8mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u8mf2x3_m(vbool16_t vm, uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_u8mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_u8m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u8m1x3_m(vbool8_t vm, uint8_t *rs1, vuint64m8_t vs2, vuint8m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_u8m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_u16mf4x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u16mf4x3_m(vbool64_t vm, uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_u16mf4x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_u16mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u16mf2x3_m(vbool32_t vm, uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_u16mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_u16m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u16m1x3_m(vbool16_t vm, uint16_t *rs1, vuint64m4_t vs2, vuint16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_u16m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_u16m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u16m2x3_m(vbool8_t vm, uint16_t *rs1, vuint64m8_t vs2, vuint16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_u16m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_u32mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u32mf2x3_m(vbool64_t vm, uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_u32mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_u32m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u32m1x3_m(vbool32_t vm, uint32_t *rs1, vuint64m2_t vs2, vuint32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_u32m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_u32m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u32m2x3_m(vbool16_t vm, uint32_t *rs1, vuint64m4_t vs2, vuint32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_u32m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_u64m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u64m1x3_m(vbool64_t vm, uint64_t *rs1, vuint64m1_t vs2, vuint64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_u64m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64_v_u64m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u64m2x3_m(vbool32_t vm, uint64_t *rs1, vuint64m2_t vs2, vuint64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64_v_u64m2x3_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg3ei64\.[ivxfswum.]+\s+} 70 } } */ diff --git a/auto-generated/gnu-api-tests/vsoxseg3ei8.c b/auto-generated/gnu-api-tests/vsoxseg3ei8.c index 4e4192ff5..06e717bfe 100644 --- a/auto-generated/gnu-api-tests/vsoxseg3ei8.c +++ b/auto-generated/gnu-api-tests/vsoxseg3ei8.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg3ei8_v_f16mf4x3(float16_t *base, vuint8mf8_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_f16mf4x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_f16mf4x3(float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_f16mf4x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_f16mf2x3(float16_t *base, vuint8mf4_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_f16mf2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_f16mf2x3(float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_f16mf2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_f16m1x3(float16_t *base, vuint8mf2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_f16m1x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_f16m1x3(float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_f16m1x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_f16m2x3(float16_t *base, vuint8m1_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_f16m2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_f16m2x3(float16_t *rs1, vuint8m1_t vs2, vfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_f16m2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_f32mf2x3(float32_t *base, vuint8mf8_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_f32mf2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_f32mf2x3(float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_f32mf2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_f32m1x3(float32_t *base, vuint8mf4_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_f32m1x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_f32m1x3(float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_f32m1x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_f32m2x3(float32_t *base, vuint8mf2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_f32m2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_f32m2x3(float32_t *rs1, vuint8mf2_t vs2, vfloat32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_f32m2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_f64m1x3(float64_t *base, vuint8mf8_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_f64m1x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_f64m1x3(float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_f64m1x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_f64m2x3(float64_t *base, vuint8mf4_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_f64m2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_f64m2x3(float64_t *rs1, vuint8mf4_t vs2, vfloat64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_f64m2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i8mf8x3(int8_t *base, vuint8mf8_t bindex, vint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_i8mf8x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i8mf8x3(int8_t *rs1, vuint8mf8_t vs2, vint8mf8x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_i8mf8x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i8mf4x3(int8_t *base, vuint8mf4_t bindex, vint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_i8mf4x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i8mf4x3(int8_t *rs1, vuint8mf4_t vs2, vint8mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_i8mf4x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i8mf2x3(int8_t *base, vuint8mf2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_i8mf2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i8mf2x3(int8_t *rs1, vuint8mf2_t vs2, vint8mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_i8mf2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i8m1x3(int8_t *base, vuint8m1_t bindex, vint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_i8m1x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i8m1x3(int8_t *rs1, vuint8m1_t vs2, vint8m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_i8m1x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i8m2x3(int8_t *base, vuint8m2_t bindex, vint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_i8m2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i8m2x3(int8_t *rs1, vuint8m2_t vs2, vint8m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_i8m2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i16mf4x3(int16_t *base, vuint8mf8_t bindex, vint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_i16mf4x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i16mf4x3(int16_t *rs1, vuint8mf8_t vs2, vint16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_i16mf4x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i16mf2x3(int16_t *base, vuint8mf4_t bindex, vint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_i16mf2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i16mf2x3(int16_t *rs1, vuint8mf4_t vs2, vint16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_i16mf2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i16m1x3(int16_t *base, vuint8mf2_t bindex, vint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_i16m1x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i16m1x3(int16_t *rs1, vuint8mf2_t vs2, vint16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_i16m1x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i16m2x3(int16_t *base, vuint8m1_t bindex, vint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_i16m2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i16m2x3(int16_t *rs1, vuint8m1_t vs2, vint16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_i16m2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i32mf2x3(int32_t *base, vuint8mf8_t bindex, vint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_i32mf2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i32mf2x3(int32_t *rs1, vuint8mf8_t vs2, vint32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_i32mf2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i32m1x3(int32_t *base, vuint8mf4_t bindex, vint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_i32m1x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i32m1x3(int32_t *rs1, vuint8mf4_t vs2, vint32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_i32m1x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i32m2x3(int32_t *base, vuint8mf2_t bindex, vint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_i32m2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i32m2x3(int32_t *rs1, vuint8mf2_t vs2, vint32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_i32m2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i64m1x3(int64_t *base, vuint8mf8_t bindex, vint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_i64m1x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i64m1x3(int64_t *rs1, vuint8mf8_t vs2, vint64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_i64m1x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i64m2x3(int64_t *base, vuint8mf4_t bindex, vint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_i64m2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i64m2x3(int64_t *rs1, vuint8mf4_t vs2, vint64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_i64m2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u8mf8x3(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_u8mf8x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u8mf8x3(uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_u8mf8x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u8mf4x3(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_u8mf4x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u8mf4x3(uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_u8mf4x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u8mf2x3(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_u8mf2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u8mf2x3(uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_u8mf2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u8m1x3(uint8_t *base, vuint8m1_t bindex, vuint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_u8m1x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u8m1x3(uint8_t *rs1, vuint8m1_t vs2, vuint8m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_u8m1x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u8m2x3(uint8_t *base, vuint8m2_t bindex, vuint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_u8m2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u8m2x3(uint8_t *rs1, vuint8m2_t vs2, vuint8m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_u8m2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u16mf4x3(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_u16mf4x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u16mf4x3(uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_u16mf4x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u16mf2x3(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_u16mf2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u16mf2x3(uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_u16mf2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u16m1x3(uint16_t *base, vuint8mf2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_u16m1x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u16m1x3(uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_u16m1x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u16m2x3(uint16_t *base, vuint8m1_t bindex, vuint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_u16m2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u16m2x3(uint16_t *rs1, vuint8m1_t vs2, vuint16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_u16m2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u32mf2x3(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_u32mf2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u32mf2x3(uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_u32mf2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u32m1x3(uint32_t *base, vuint8mf4_t bindex, vuint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_u32m1x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u32m1x3(uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_u32m1x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u32m2x3(uint32_t *base, vuint8mf2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_u32m2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u32m2x3(uint32_t *rs1, vuint8mf2_t vs2, vuint32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_u32m2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u64m1x3(uint64_t *base, vuint8mf8_t bindex, vuint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_u64m1x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u64m1x3(uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_u64m1x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u64m2x3(uint64_t *base, vuint8mf4_t bindex, vuint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_u64m2x3(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u64m2x3(uint64_t *rs1, vuint8mf4_t vs2, vuint64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_u64m2x3(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_f16mf4x3_m(vbool64_t mask, float16_t *base, vuint8mf8_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_f16mf4x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_f16mf4x3_m(vbool64_t vm, float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_f16mf4x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_f16mf2x3_m(vbool32_t mask, float16_t *base, vuint8mf4_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_f16mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_f16mf2x3_m(vbool32_t vm, float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_f16mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_f16m1x3_m(vbool16_t mask, float16_t *base, vuint8mf2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_f16m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_f16m1x3_m(vbool16_t vm, float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_f16m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_f16m2x3_m(vbool8_t mask, float16_t *base, vuint8m1_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_f16m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_f16m2x3_m(vbool8_t vm, float16_t *rs1, vuint8m1_t vs2, vfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_f16m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_f32mf2x3_m(vbool64_t mask, float32_t *base, vuint8mf8_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_f32mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_f32mf2x3_m(vbool64_t vm, float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_f32mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_f32m1x3_m(vbool32_t mask, float32_t *base, vuint8mf4_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_f32m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_f32m1x3_m(vbool32_t vm, float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_f32m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_f32m2x3_m(vbool16_t mask, float32_t *base, vuint8mf2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_f32m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_f32m2x3_m(vbool16_t vm, float32_t *rs1, vuint8mf2_t vs2, vfloat32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_f32m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_f64m1x3_m(vbool64_t mask, float64_t *base, vuint8mf8_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_f64m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_f64m1x3_m(vbool64_t vm, float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_f64m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_f64m2x3_m(vbool32_t mask, float64_t *base, vuint8mf4_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_f64m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_f64m2x3_m(vbool32_t vm, float64_t *rs1, vuint8mf4_t vs2, vfloat64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_f64m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_i8mf8x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i8mf8x3_m(vbool64_t vm, int8_t *rs1, vuint8mf8_t vs2, vint8mf8x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_i8mf8x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_i8mf4x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i8mf4x3_m(vbool32_t vm, int8_t *rs1, vuint8mf4_t vs2, vint8mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_i8mf4x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_i8mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i8mf2x3_m(vbool16_t vm, int8_t *rs1, vuint8mf2_t vs2, vint8mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_i8mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_i8m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i8m1x3_m(vbool8_t vm, int8_t *rs1, vuint8m1_t vs2, vint8m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_i8m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_i8m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i8m2x3_m(vbool4_t vm, int8_t *rs1, vuint8m2_t vs2, vint8m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_i8m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_i16mf4x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i16mf4x3_m(vbool64_t vm, int16_t *rs1, vuint8mf8_t vs2, vint16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_i16mf4x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_i16mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i16mf2x3_m(vbool32_t vm, int16_t *rs1, vuint8mf4_t vs2, vint16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_i16mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_i16m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i16m1x3_m(vbool16_t vm, int16_t *rs1, vuint8mf2_t vs2, vint16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_i16m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_i16m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i16m2x3_m(vbool8_t vm, int16_t *rs1, vuint8m1_t vs2, vint16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_i16m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_i32mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i32mf2x3_m(vbool64_t vm, int32_t *rs1, vuint8mf8_t vs2, vint32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_i32mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_i32m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i32m1x3_m(vbool32_t vm, int32_t *rs1, vuint8mf4_t vs2, vint32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_i32m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_i32m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i32m2x3_m(vbool16_t vm, int32_t *rs1, vuint8mf2_t vs2, vint32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_i32m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_i64m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i64m1x3_m(vbool64_t vm, int64_t *rs1, vuint8mf8_t vs2, vint64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_i64m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_i64m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i64m2x3_m(vbool32_t vm, int64_t *rs1, vuint8mf4_t vs2, vint64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_i64m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_u8mf8x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u8mf8x3_m(vbool64_t vm, uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_u8mf8x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_u8mf4x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u8mf4x3_m(vbool32_t vm, uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_u8mf4x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_u8mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u8mf2x3_m(vbool16_t vm, uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_u8mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_u8m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u8m1x3_m(vbool8_t vm, uint8_t *rs1, vuint8m1_t vs2, vuint8m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_u8m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_u8m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u8m2x3_m(vbool4_t vm, uint8_t *rs1, vuint8m2_t vs2, vuint8m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_u8m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_u16mf4x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u16mf4x3_m(vbool64_t vm, uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_u16mf4x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_u16mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u16mf2x3_m(vbool32_t vm, uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_u16mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_u16m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u16m1x3_m(vbool16_t vm, uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_u16m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_u16m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u16m2x3_m(vbool8_t vm, uint16_t *rs1, vuint8m1_t vs2, vuint16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_u16m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_u32mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u32mf2x3_m(vbool64_t vm, uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_u32mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_u32m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u32m1x3_m(vbool32_t vm, uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_u32m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_u32m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u32m2x3_m(vbool16_t vm, uint32_t *rs1, vuint8mf2_t vs2, vuint32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_u32m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_u64m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u64m1x3_m(vbool64_t vm, uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_u64m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8_v_u64m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u64m2x3_m(vbool32_t vm, uint64_t *rs1, vuint8mf4_t vs2, vuint64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8_v_u64m2x3_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg3ei8\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-api-tests/vsoxseg4ei16.c b/auto-generated/gnu-api-tests/vsoxseg4ei16.c index bcb0cbcf8..05cfc0be6 100644 --- a/auto-generated/gnu-api-tests/vsoxseg4ei16.c +++ b/auto-generated/gnu-api-tests/vsoxseg4ei16.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg4ei16_v_f16mf4x4(float16_t *base, vuint16mf4_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_f16mf4x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_f16mf4x4(float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_f16mf4x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_f16mf2x4(float16_t *base, vuint16mf2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_f16mf2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_f16mf2x4(float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_f16mf2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_f16m1x4(float16_t *base, vuint16m1_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_f16m1x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_f16m1x4(float16_t *rs1, vuint16m1_t vs2, vfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_f16m1x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_f16m2x4(float16_t *base, vuint16m2_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_f16m2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_f16m2x4(float16_t *rs1, vuint16m2_t vs2, vfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_f16m2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_f32mf2x4(float32_t *base, vuint16mf4_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_f32mf2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_f32mf2x4(float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_f32mf2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_f32m1x4(float32_t *base, vuint16mf2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_f32m1x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_f32m1x4(float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_f32m1x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_f32m2x4(float32_t *base, vuint16m1_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_f32m2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_f32m2x4(float32_t *rs1, vuint16m1_t vs2, vfloat32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_f32m2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_f64m1x4(float64_t *base, vuint16mf4_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_f64m1x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_f64m1x4(float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_f64m1x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_f64m2x4(float64_t *base, vuint16mf2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_f64m2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_f64m2x4(float64_t *rs1, vuint16mf2_t vs2, vfloat64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_f64m2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i8mf8x4(int8_t *base, vuint16mf4_t bindex, vint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_i8mf8x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i8mf8x4(int8_t *rs1, vuint16mf4_t vs2, vint8mf8x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_i8mf8x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i8mf4x4(int8_t *base, vuint16mf2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_i8mf4x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i8mf4x4(int8_t *rs1, vuint16mf2_t vs2, vint8mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_i8mf4x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i8mf2x4(int8_t *base, vuint16m1_t bindex, vint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_i8mf2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i8mf2x4(int8_t *rs1, vuint16m1_t vs2, vint8mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_i8mf2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i8m1x4(int8_t *base, vuint16m2_t bindex, vint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_i8m1x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i8m1x4(int8_t *rs1, vuint16m2_t vs2, vint8m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_i8m1x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i8m2x4(int8_t *base, vuint16m4_t bindex, vint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_i8m2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i8m2x4(int8_t *rs1, vuint16m4_t vs2, vint8m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_i8m2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i16mf4x4(int16_t *base, vuint16mf4_t bindex, vint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_i16mf4x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i16mf4x4(int16_t *rs1, vuint16mf4_t vs2, vint16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_i16mf4x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i16mf2x4(int16_t *base, vuint16mf2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_i16mf2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i16mf2x4(int16_t *rs1, vuint16mf2_t vs2, vint16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_i16mf2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i16m1x4(int16_t *base, vuint16m1_t bindex, vint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_i16m1x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i16m1x4(int16_t *rs1, vuint16m1_t vs2, vint16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_i16m1x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i16m2x4(int16_t *base, vuint16m2_t bindex, vint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_i16m2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i16m2x4(int16_t *rs1, vuint16m2_t vs2, vint16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_i16m2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i32mf2x4(int32_t *base, vuint16mf4_t bindex, vint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_i32mf2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i32mf2x4(int32_t *rs1, vuint16mf4_t vs2, vint32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_i32mf2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i32m1x4(int32_t *base, vuint16mf2_t bindex, vint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_i32m1x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i32m1x4(int32_t *rs1, vuint16mf2_t vs2, vint32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_i32m1x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i32m2x4(int32_t *base, vuint16m1_t bindex, vint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_i32m2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i32m2x4(int32_t *rs1, vuint16m1_t vs2, vint32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_i32m2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i64m1x4(int64_t *base, vuint16mf4_t bindex, vint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_i64m1x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i64m1x4(int64_t *rs1, vuint16mf4_t vs2, vint64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_i64m1x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i64m2x4(int64_t *base, vuint16mf2_t bindex, vint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_i64m2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i64m2x4(int64_t *rs1, vuint16mf2_t vs2, vint64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_i64m2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u8mf8x4(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_u8mf8x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u8mf8x4(uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_u8mf8x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u8mf4x4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_u8mf4x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u8mf4x4(uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_u8mf4x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u8mf2x4(uint8_t *base, vuint16m1_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_u8mf2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u8mf2x4(uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_u8mf2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u8m1x4(uint8_t *base, vuint16m2_t bindex, vuint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_u8m1x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u8m1x4(uint8_t *rs1, vuint16m2_t vs2, vuint8m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_u8m1x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u8m2x4(uint8_t *base, vuint16m4_t bindex, vuint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_u8m2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u8m2x4(uint8_t *rs1, vuint16m4_t vs2, vuint8m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_u8m2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u16mf4x4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_u16mf4x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u16mf4x4(uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_u16mf4x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u16mf2x4(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_u16mf2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u16mf2x4(uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_u16mf2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u16m1x4(uint16_t *base, vuint16m1_t bindex, vuint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_u16m1x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u16m1x4(uint16_t *rs1, vuint16m1_t vs2, vuint16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_u16m1x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u16m2x4(uint16_t *base, vuint16m2_t bindex, vuint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_u16m2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u16m2x4(uint16_t *rs1, vuint16m2_t vs2, vuint16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_u16m2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u32mf2x4(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_u32mf2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u32mf2x4(uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_u32mf2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u32m1x4(uint32_t *base, vuint16mf2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_u32m1x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u32m1x4(uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_u32m1x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u32m2x4(uint32_t *base, vuint16m1_t bindex, vuint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_u32m2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u32m2x4(uint32_t *rs1, vuint16m1_t vs2, vuint32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_u32m2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u64m1x4(uint64_t *base, vuint16mf4_t bindex, vuint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_u64m1x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u64m1x4(uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_u64m1x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u64m2x4(uint64_t *base, vuint16mf2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_u64m2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u64m2x4(uint64_t *rs1, vuint16mf2_t vs2, vuint64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_u64m2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_f16mf4x4_m(vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_f16mf4x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_f16mf4x4_m(vbool64_t vm, float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_f16mf4x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_f16mf2x4_m(vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_f16mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_f16mf2x4_m(vbool32_t vm, float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_f16mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_f16m1x4_m(vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_f16m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_f16m1x4_m(vbool16_t vm, float16_t *rs1, vuint16m1_t vs2, vfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_f16m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_f16m2x4_m(vbool8_t mask, float16_t *base, vuint16m2_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_f16m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_f16m2x4_m(vbool8_t vm, float16_t *rs1, vuint16m2_t vs2, vfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_f16m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_f32mf2x4_m(vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_f32mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_f32mf2x4_m(vbool64_t vm, float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_f32mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_f32m1x4_m(vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_f32m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_f32m1x4_m(vbool32_t vm, float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_f32m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_f32m2x4_m(vbool16_t mask, float32_t *base, vuint16m1_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_f32m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_f32m2x4_m(vbool16_t vm, float32_t *rs1, vuint16m1_t vs2, vfloat32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_f32m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_f64m1x4_m(vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_f64m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_f64m1x4_m(vbool64_t vm, float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_f64m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_f64m2x4_m(vbool32_t mask, float64_t *base, vuint16mf2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_f64m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_f64m2x4_m(vbool32_t vm, float64_t *rs1, vuint16mf2_t vs2, vfloat64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_f64m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_i8mf8x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i8mf8x4_m(vbool64_t vm, int8_t *rs1, vuint16mf4_t vs2, vint8mf8x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_i8mf8x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_i8mf4x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i8mf4x4_m(vbool32_t vm, int8_t *rs1, vuint16mf2_t vs2, vint8mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_i8mf4x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_i8mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i8mf2x4_m(vbool16_t vm, int8_t *rs1, vuint16m1_t vs2, vint8mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_i8mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_i8m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i8m1x4_m(vbool8_t vm, int8_t *rs1, vuint16m2_t vs2, vint8m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_i8m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_i8m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i8m2x4_m(vbool4_t vm, int8_t *rs1, vuint16m4_t vs2, vint8m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_i8m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_i16mf4x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i16mf4x4_m(vbool64_t vm, int16_t *rs1, vuint16mf4_t vs2, vint16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_i16mf4x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_i16mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i16mf2x4_m(vbool32_t vm, int16_t *rs1, vuint16mf2_t vs2, vint16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_i16mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_i16m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i16m1x4_m(vbool16_t vm, int16_t *rs1, vuint16m1_t vs2, vint16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_i16m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_i16m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i16m2x4_m(vbool8_t vm, int16_t *rs1, vuint16m2_t vs2, vint16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_i16m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_i32mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i32mf2x4_m(vbool64_t vm, int32_t *rs1, vuint16mf4_t vs2, vint32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_i32mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_i32m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i32m1x4_m(vbool32_t vm, int32_t *rs1, vuint16mf2_t vs2, vint32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_i32m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_i32m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i32m2x4_m(vbool16_t vm, int32_t *rs1, vuint16m1_t vs2, vint32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_i32m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_i64m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i64m1x4_m(vbool64_t vm, int64_t *rs1, vuint16mf4_t vs2, vint64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_i64m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_i64m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i64m2x4_m(vbool32_t vm, int64_t *rs1, vuint16mf2_t vs2, vint64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_i64m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_u8mf8x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u8mf8x4_m(vbool64_t vm, uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_u8mf8x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_u8mf4x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u8mf4x4_m(vbool32_t vm, uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_u8mf4x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_u8mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u8mf2x4_m(vbool16_t vm, uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_u8mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_u8m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u8m1x4_m(vbool8_t vm, uint8_t *rs1, vuint16m2_t vs2, vuint8m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_u8m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_u8m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u8m2x4_m(vbool4_t vm, uint8_t *rs1, vuint16m4_t vs2, vuint8m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_u8m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_u16mf4x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u16mf4x4_m(vbool64_t vm, uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_u16mf4x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_u16mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u16mf2x4_m(vbool32_t vm, uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_u16mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_u16m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u16m1x4_m(vbool16_t vm, uint16_t *rs1, vuint16m1_t vs2, vuint16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_u16m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_u16m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u16m2x4_m(vbool8_t vm, uint16_t *rs1, vuint16m2_t vs2, vuint16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_u16m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_u32mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u32mf2x4_m(vbool64_t vm, uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_u32mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_u32m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u32m1x4_m(vbool32_t vm, uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_u32m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_u32m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u32m2x4_m(vbool16_t vm, uint32_t *rs1, vuint16m1_t vs2, vuint32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_u32m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_u64m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u64m1x4_m(vbool64_t vm, uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_u64m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16_v_u64m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u64m2x4_m(vbool32_t vm, uint64_t *rs1, vuint16mf2_t vs2, vuint64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_u64m2x4_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg4ei16\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-api-tests/vsoxseg4ei32.c b/auto-generated/gnu-api-tests/vsoxseg4ei32.c index d1b8320b3..62315bba0 100644 --- a/auto-generated/gnu-api-tests/vsoxseg4ei32.c +++ b/auto-generated/gnu-api-tests/vsoxseg4ei32.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg4ei32_v_f16mf4x4(float16_t *base, vuint32mf2_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_f16mf4x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_f16mf4x4(float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_f16mf4x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_f16mf2x4(float16_t *base, vuint32m1_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_f16mf2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_f16mf2x4(float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_f16mf2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_f16m1x4(float16_t *base, vuint32m2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_f16m1x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_f16m1x4(float16_t *rs1, vuint32m2_t vs2, vfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_f16m1x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_f16m2x4(float16_t *base, vuint32m4_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_f16m2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_f16m2x4(float16_t *rs1, vuint32m4_t vs2, vfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_f16m2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_f32mf2x4(float32_t *base, vuint32mf2_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_f32mf2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_f32mf2x4(float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_f32mf2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_f32m1x4(float32_t *base, vuint32m1_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_f32m1x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_f32m1x4(float32_t *rs1, vuint32m1_t vs2, vfloat32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_f32m1x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_f32m2x4(float32_t *base, vuint32m2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_f32m2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_f32m2x4(float32_t *rs1, vuint32m2_t vs2, vfloat32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_f32m2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_f64m1x4(float64_t *base, vuint32mf2_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_f64m1x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_f64m1x4(float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_f64m1x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_f64m2x4(float64_t *base, vuint32m1_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_f64m2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_f64m2x4(float64_t *rs1, vuint32m1_t vs2, vfloat64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_f64m2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i8mf8x4(int8_t *base, vuint32mf2_t bindex, vint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_i8mf8x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i8mf8x4(int8_t *rs1, vuint32mf2_t vs2, vint8mf8x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_i8mf8x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i8mf4x4(int8_t *base, vuint32m1_t bindex, vint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_i8mf4x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i8mf4x4(int8_t *rs1, vuint32m1_t vs2, vint8mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_i8mf4x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i8mf2x4(int8_t *base, vuint32m2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_i8mf2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i8mf2x4(int8_t *rs1, vuint32m2_t vs2, vint8mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_i8mf2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i8m1x4(int8_t *base, vuint32m4_t bindex, vint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_i8m1x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i8m1x4(int8_t *rs1, vuint32m4_t vs2, vint8m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_i8m1x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i8m2x4(int8_t *base, vuint32m8_t bindex, vint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_i8m2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i8m2x4(int8_t *rs1, vuint32m8_t vs2, vint8m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_i8m2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i16mf4x4(int16_t *base, vuint32mf2_t bindex, vint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_i16mf4x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i16mf4x4(int16_t *rs1, vuint32mf2_t vs2, vint16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_i16mf4x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i16mf2x4(int16_t *base, vuint32m1_t bindex, vint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_i16mf2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i16mf2x4(int16_t *rs1, vuint32m1_t vs2, vint16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_i16mf2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i16m1x4(int16_t *base, vuint32m2_t bindex, vint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_i16m1x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i16m1x4(int16_t *rs1, vuint32m2_t vs2, vint16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_i16m1x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i16m2x4(int16_t *base, vuint32m4_t bindex, vint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_i16m2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i16m2x4(int16_t *rs1, vuint32m4_t vs2, vint16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_i16m2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i32mf2x4(int32_t *base, vuint32mf2_t bindex, vint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_i32mf2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i32mf2x4(int32_t *rs1, vuint32mf2_t vs2, vint32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_i32mf2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i32m1x4(int32_t *base, vuint32m1_t bindex, vint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_i32m1x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i32m1x4(int32_t *rs1, vuint32m1_t vs2, vint32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_i32m1x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i32m2x4(int32_t *base, vuint32m2_t bindex, vint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_i32m2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i32m2x4(int32_t *rs1, vuint32m2_t vs2, vint32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_i32m2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i64m1x4(int64_t *base, vuint32mf2_t bindex, vint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_i64m1x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i64m1x4(int64_t *rs1, vuint32mf2_t vs2, vint64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_i64m1x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i64m2x4(int64_t *base, vuint32m1_t bindex, vint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_i64m2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i64m2x4(int64_t *rs1, vuint32m1_t vs2, vint64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_i64m2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u8mf8x4(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_u8mf8x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u8mf8x4(uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_u8mf8x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u8mf4x4(uint8_t *base, vuint32m1_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_u8mf4x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u8mf4x4(uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_u8mf4x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u8mf2x4(uint8_t *base, vuint32m2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_u8mf2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u8mf2x4(uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_u8mf2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u8m1x4(uint8_t *base, vuint32m4_t bindex, vuint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_u8m1x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u8m1x4(uint8_t *rs1, vuint32m4_t vs2, vuint8m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_u8m1x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u8m2x4(uint8_t *base, vuint32m8_t bindex, vuint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_u8m2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u8m2x4(uint8_t *rs1, vuint32m8_t vs2, vuint8m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_u8m2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u16mf4x4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_u16mf4x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u16mf4x4(uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_u16mf4x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u16mf2x4(uint16_t *base, vuint32m1_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_u16mf2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u16mf2x4(uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_u16mf2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u16m1x4(uint16_t *base, vuint32m2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_u16m1x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u16m1x4(uint16_t *rs1, vuint32m2_t vs2, vuint16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_u16m1x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u16m2x4(uint16_t *base, vuint32m4_t bindex, vuint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_u16m2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u16m2x4(uint16_t *rs1, vuint32m4_t vs2, vuint16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_u16m2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u32mf2x4(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_u32mf2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u32mf2x4(uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_u32mf2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u32m1x4(uint32_t *base, vuint32m1_t bindex, vuint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_u32m1x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u32m1x4(uint32_t *rs1, vuint32m1_t vs2, vuint32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_u32m1x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u32m2x4(uint32_t *base, vuint32m2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_u32m2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u32m2x4(uint32_t *rs1, vuint32m2_t vs2, vuint32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_u32m2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u64m1x4(uint64_t *base, vuint32mf2_t bindex, vuint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_u64m1x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u64m1x4(uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_u64m1x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u64m2x4(uint64_t *base, vuint32m1_t bindex, vuint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_u64m2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u64m2x4(uint64_t *rs1, vuint32m1_t vs2, vuint64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_u64m2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_f16mf4x4_m(vbool64_t mask, float16_t *base, vuint32mf2_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_f16mf4x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_f16mf4x4_m(vbool64_t vm, float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_f16mf4x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_f16mf2x4_m(vbool32_t mask, float16_t *base, vuint32m1_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_f16mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_f16mf2x4_m(vbool32_t vm, float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_f16mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_f16m1x4_m(vbool16_t mask, float16_t *base, vuint32m2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_f16m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_f16m1x4_m(vbool16_t vm, float16_t *rs1, vuint32m2_t vs2, vfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_f16m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_f16m2x4_m(vbool8_t mask, float16_t *base, vuint32m4_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_f16m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_f16m2x4_m(vbool8_t vm, float16_t *rs1, vuint32m4_t vs2, vfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_f16m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_f32mf2x4_m(vbool64_t mask, float32_t *base, vuint32mf2_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_f32mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_f32mf2x4_m(vbool64_t vm, float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_f32mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_f32m1x4_m(vbool32_t mask, float32_t *base, vuint32m1_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_f32m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_f32m1x4_m(vbool32_t vm, float32_t *rs1, vuint32m1_t vs2, vfloat32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_f32m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_f32m2x4_m(vbool16_t mask, float32_t *base, vuint32m2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_f32m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_f32m2x4_m(vbool16_t vm, float32_t *rs1, vuint32m2_t vs2, vfloat32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_f32m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_f64m1x4_m(vbool64_t mask, float64_t *base, vuint32mf2_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_f64m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_f64m1x4_m(vbool64_t vm, float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_f64m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_f64m2x4_m(vbool32_t mask, float64_t *base, vuint32m1_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_f64m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_f64m2x4_m(vbool32_t vm, float64_t *rs1, vuint32m1_t vs2, vfloat64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_f64m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_i8mf8x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i8mf8x4_m(vbool64_t vm, int8_t *rs1, vuint32mf2_t vs2, vint8mf8x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_i8mf8x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_i8mf4x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i8mf4x4_m(vbool32_t vm, int8_t *rs1, vuint32m1_t vs2, vint8mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_i8mf4x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_i8mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i8mf2x4_m(vbool16_t vm, int8_t *rs1, vuint32m2_t vs2, vint8mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_i8mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_i8m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i8m1x4_m(vbool8_t vm, int8_t *rs1, vuint32m4_t vs2, vint8m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_i8m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_i8m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i8m2x4_m(vbool4_t vm, int8_t *rs1, vuint32m8_t vs2, vint8m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_i8m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_i16mf4x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i16mf4x4_m(vbool64_t vm, int16_t *rs1, vuint32mf2_t vs2, vint16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_i16mf4x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_i16mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i16mf2x4_m(vbool32_t vm, int16_t *rs1, vuint32m1_t vs2, vint16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_i16mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_i16m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i16m1x4_m(vbool16_t vm, int16_t *rs1, vuint32m2_t vs2, vint16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_i16m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_i16m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i16m2x4_m(vbool8_t vm, int16_t *rs1, vuint32m4_t vs2, vint16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_i16m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_i32mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i32mf2x4_m(vbool64_t vm, int32_t *rs1, vuint32mf2_t vs2, vint32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_i32mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_i32m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i32m1x4_m(vbool32_t vm, int32_t *rs1, vuint32m1_t vs2, vint32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_i32m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_i32m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i32m2x4_m(vbool16_t vm, int32_t *rs1, vuint32m2_t vs2, vint32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_i32m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_i64m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i64m1x4_m(vbool64_t vm, int64_t *rs1, vuint32mf2_t vs2, vint64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_i64m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_i64m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i64m2x4_m(vbool32_t vm, int64_t *rs1, vuint32m1_t vs2, vint64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_i64m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_u8mf8x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u8mf8x4_m(vbool64_t vm, uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_u8mf8x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_u8mf4x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u8mf4x4_m(vbool32_t vm, uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_u8mf4x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_u8mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u8mf2x4_m(vbool16_t vm, uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_u8mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_u8m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u8m1x4_m(vbool8_t vm, uint8_t *rs1, vuint32m4_t vs2, vuint8m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_u8m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_u8m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u8m2x4_m(vbool4_t vm, uint8_t *rs1, vuint32m8_t vs2, vuint8m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_u8m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_u16mf4x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u16mf4x4_m(vbool64_t vm, uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_u16mf4x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_u16mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u16mf2x4_m(vbool32_t vm, uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_u16mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_u16m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u16m1x4_m(vbool16_t vm, uint16_t *rs1, vuint32m2_t vs2, vuint16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_u16m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_u16m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u16m2x4_m(vbool8_t vm, uint16_t *rs1, vuint32m4_t vs2, vuint16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_u16m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_u32mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u32mf2x4_m(vbool64_t vm, uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_u32mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_u32m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u32m1x4_m(vbool32_t vm, uint32_t *rs1, vuint32m1_t vs2, vuint32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_u32m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_u32m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u32m2x4_m(vbool16_t vm, uint32_t *rs1, vuint32m2_t vs2, vuint32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_u32m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_u64m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u64m1x4_m(vbool64_t vm, uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_u64m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32_v_u64m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u64m2x4_m(vbool32_t vm, uint64_t *rs1, vuint32m1_t vs2, vuint64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32_v_u64m2x4_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg4ei32\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-api-tests/vsoxseg4ei64.c b/auto-generated/gnu-api-tests/vsoxseg4ei64.c index 39fa822d6..90d15cd7d 100644 --- a/auto-generated/gnu-api-tests/vsoxseg4ei64.c +++ b/auto-generated/gnu-api-tests/vsoxseg4ei64.c @@ -6,284 +6,284 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg4ei64_v_f16mf4x4(float16_t *base, vuint64m1_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_f16mf4x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_f16mf4x4(float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_f16mf4x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_f16mf2x4(float16_t *base, vuint64m2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_f16mf2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_f16mf2x4(float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_f16mf2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_f16m1x4(float16_t *base, vuint64m4_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_f16m1x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_f16m1x4(float16_t *rs1, vuint64m4_t vs2, vfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_f16m1x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_f16m2x4(float16_t *base, vuint64m8_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_f16m2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_f16m2x4(float16_t *rs1, vuint64m8_t vs2, vfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_f16m2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_f32mf2x4(float32_t *base, vuint64m1_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_f32mf2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_f32mf2x4(float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_f32mf2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_f32m1x4(float32_t *base, vuint64m2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_f32m1x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_f32m1x4(float32_t *rs1, vuint64m2_t vs2, vfloat32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_f32m1x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_f32m2x4(float32_t *base, vuint64m4_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_f32m2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_f32m2x4(float32_t *rs1, vuint64m4_t vs2, vfloat32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_f32m2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_f64m1x4(float64_t *base, vuint64m1_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_f64m1x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_f64m1x4(float64_t *rs1, vuint64m1_t vs2, vfloat64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_f64m1x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_f64m2x4(float64_t *base, vuint64m2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_f64m2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_f64m2x4(float64_t *rs1, vuint64m2_t vs2, vfloat64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_f64m2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i8mf8x4(int8_t *base, vuint64m1_t bindex, vint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_i8mf8x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i8mf8x4(int8_t *rs1, vuint64m1_t vs2, vint8mf8x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_i8mf8x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i8mf4x4(int8_t *base, vuint64m2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_i8mf4x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i8mf4x4(int8_t *rs1, vuint64m2_t vs2, vint8mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_i8mf4x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i8mf2x4(int8_t *base, vuint64m4_t bindex, vint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_i8mf2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i8mf2x4(int8_t *rs1, vuint64m4_t vs2, vint8mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_i8mf2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i8m1x4(int8_t *base, vuint64m8_t bindex, vint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_i8m1x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i8m1x4(int8_t *rs1, vuint64m8_t vs2, vint8m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_i8m1x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i16mf4x4(int16_t *base, vuint64m1_t bindex, vint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_i16mf4x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i16mf4x4(int16_t *rs1, vuint64m1_t vs2, vint16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_i16mf4x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i16mf2x4(int16_t *base, vuint64m2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_i16mf2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i16mf2x4(int16_t *rs1, vuint64m2_t vs2, vint16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_i16mf2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i16m1x4(int16_t *base, vuint64m4_t bindex, vint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_i16m1x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i16m1x4(int16_t *rs1, vuint64m4_t vs2, vint16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_i16m1x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i16m2x4(int16_t *base, vuint64m8_t bindex, vint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_i16m2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i16m2x4(int16_t *rs1, vuint64m8_t vs2, vint16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_i16m2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i32mf2x4(int32_t *base, vuint64m1_t bindex, vint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_i32mf2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i32mf2x4(int32_t *rs1, vuint64m1_t vs2, vint32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_i32mf2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i32m1x4(int32_t *base, vuint64m2_t bindex, vint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_i32m1x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i32m1x4(int32_t *rs1, vuint64m2_t vs2, vint32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_i32m1x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i32m2x4(int32_t *base, vuint64m4_t bindex, vint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_i32m2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i32m2x4(int32_t *rs1, vuint64m4_t vs2, vint32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_i32m2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i64m1x4(int64_t *base, vuint64m1_t bindex, vint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_i64m1x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i64m1x4(int64_t *rs1, vuint64m1_t vs2, vint64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_i64m1x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i64m2x4(int64_t *base, vuint64m2_t bindex, vint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_i64m2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i64m2x4(int64_t *rs1, vuint64m2_t vs2, vint64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_i64m2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u8mf8x4(uint8_t *base, vuint64m1_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_u8mf8x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u8mf8x4(uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_u8mf8x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u8mf4x4(uint8_t *base, vuint64m2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_u8mf4x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u8mf4x4(uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_u8mf4x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u8mf2x4(uint8_t *base, vuint64m4_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_u8mf2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u8mf2x4(uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_u8mf2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u8m1x4(uint8_t *base, vuint64m8_t bindex, vuint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_u8m1x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u8m1x4(uint8_t *rs1, vuint64m8_t vs2, vuint8m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_u8m1x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u16mf4x4(uint16_t *base, vuint64m1_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_u16mf4x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u16mf4x4(uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_u16mf4x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u16mf2x4(uint16_t *base, vuint64m2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_u16mf2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u16mf2x4(uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_u16mf2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u16m1x4(uint16_t *base, vuint64m4_t bindex, vuint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_u16m1x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u16m1x4(uint16_t *rs1, vuint64m4_t vs2, vuint16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_u16m1x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u16m2x4(uint16_t *base, vuint64m8_t bindex, vuint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_u16m2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u16m2x4(uint16_t *rs1, vuint64m8_t vs2, vuint16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_u16m2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u32mf2x4(uint32_t *base, vuint64m1_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_u32mf2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u32mf2x4(uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_u32mf2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u32m1x4(uint32_t *base, vuint64m2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_u32m1x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u32m1x4(uint32_t *rs1, vuint64m2_t vs2, vuint32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_u32m1x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u32m2x4(uint32_t *base, vuint64m4_t bindex, vuint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_u32m2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u32m2x4(uint32_t *rs1, vuint64m4_t vs2, vuint32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_u32m2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u64m1x4(uint64_t *base, vuint64m1_t bindex, vuint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_u64m1x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u64m1x4(uint64_t *rs1, vuint64m1_t vs2, vuint64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_u64m1x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u64m2x4(uint64_t *base, vuint64m2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_u64m2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u64m2x4(uint64_t *rs1, vuint64m2_t vs2, vuint64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_u64m2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_f16mf4x4_m(vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_f16mf4x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_f16mf4x4_m(vbool64_t vm, float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_f16mf4x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_f16mf2x4_m(vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_f16mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_f16mf2x4_m(vbool32_t vm, float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_f16mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_f16m1x4_m(vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_f16m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_f16m1x4_m(vbool16_t vm, float16_t *rs1, vuint64m4_t vs2, vfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_f16m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_f16m2x4_m(vbool8_t mask, float16_t *base, vuint64m8_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_f16m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_f16m2x4_m(vbool8_t vm, float16_t *rs1, vuint64m8_t vs2, vfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_f16m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_f32mf2x4_m(vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_f32mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_f32mf2x4_m(vbool64_t vm, float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_f32mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_f32m1x4_m(vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_f32m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_f32m1x4_m(vbool32_t vm, float32_t *rs1, vuint64m2_t vs2, vfloat32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_f32m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_f32m2x4_m(vbool16_t mask, float32_t *base, vuint64m4_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_f32m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_f32m2x4_m(vbool16_t vm, float32_t *rs1, vuint64m4_t vs2, vfloat32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_f32m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_f64m1x4_m(vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_f64m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_f64m1x4_m(vbool64_t vm, float64_t *rs1, vuint64m1_t vs2, vfloat64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_f64m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_f64m2x4_m(vbool32_t mask, float64_t *base, vuint64m2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_f64m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_f64m2x4_m(vbool32_t vm, float64_t *rs1, vuint64m2_t vs2, vfloat64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_f64m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_i8mf8x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i8mf8x4_m(vbool64_t vm, int8_t *rs1, vuint64m1_t vs2, vint8mf8x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_i8mf8x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_i8mf4x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i8mf4x4_m(vbool32_t vm, int8_t *rs1, vuint64m2_t vs2, vint8mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_i8mf4x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_i8mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i8mf2x4_m(vbool16_t vm, int8_t *rs1, vuint64m4_t vs2, vint8mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_i8mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_i8m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i8m1x4_m(vbool8_t vm, int8_t *rs1, vuint64m8_t vs2, vint8m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_i8m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_i16mf4x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i16mf4x4_m(vbool64_t vm, int16_t *rs1, vuint64m1_t vs2, vint16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_i16mf4x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_i16mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i16mf2x4_m(vbool32_t vm, int16_t *rs1, vuint64m2_t vs2, vint16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_i16mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_i16m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i16m1x4_m(vbool16_t vm, int16_t *rs1, vuint64m4_t vs2, vint16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_i16m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_i16m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i16m2x4_m(vbool8_t vm, int16_t *rs1, vuint64m8_t vs2, vint16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_i16m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_i32mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i32mf2x4_m(vbool64_t vm, int32_t *rs1, vuint64m1_t vs2, vint32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_i32mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_i32m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i32m1x4_m(vbool32_t vm, int32_t *rs1, vuint64m2_t vs2, vint32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_i32m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_i32m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i32m2x4_m(vbool16_t vm, int32_t *rs1, vuint64m4_t vs2, vint32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_i32m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_i64m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i64m1x4_m(vbool64_t vm, int64_t *rs1, vuint64m1_t vs2, vint64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_i64m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_i64m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i64m2x4_m(vbool32_t vm, int64_t *rs1, vuint64m2_t vs2, vint64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_i64m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_u8mf8x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u8mf8x4_m(vbool64_t vm, uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_u8mf8x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_u8mf4x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u8mf4x4_m(vbool32_t vm, uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_u8mf4x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_u8mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u8mf2x4_m(vbool16_t vm, uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_u8mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_u8m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u8m1x4_m(vbool8_t vm, uint8_t *rs1, vuint64m8_t vs2, vuint8m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_u8m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_u16mf4x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u16mf4x4_m(vbool64_t vm, uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_u16mf4x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_u16mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u16mf2x4_m(vbool32_t vm, uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_u16mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_u16m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u16m1x4_m(vbool16_t vm, uint16_t *rs1, vuint64m4_t vs2, vuint16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_u16m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_u16m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u16m2x4_m(vbool8_t vm, uint16_t *rs1, vuint64m8_t vs2, vuint16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_u16m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_u32mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u32mf2x4_m(vbool64_t vm, uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_u32mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_u32m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u32m1x4_m(vbool32_t vm, uint32_t *rs1, vuint64m2_t vs2, vuint32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_u32m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_u32m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u32m2x4_m(vbool16_t vm, uint32_t *rs1, vuint64m4_t vs2, vuint32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_u32m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_u64m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u64m1x4_m(vbool64_t vm, uint64_t *rs1, vuint64m1_t vs2, vuint64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_u64m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64_v_u64m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u64m2x4_m(vbool32_t vm, uint64_t *rs1, vuint64m2_t vs2, vuint64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64_v_u64m2x4_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg4ei64\.[ivxfswum.]+\s+} 70 } } */ diff --git a/auto-generated/gnu-api-tests/vsoxseg4ei8.c b/auto-generated/gnu-api-tests/vsoxseg4ei8.c index a2ddcdffa..20a03e531 100644 --- a/auto-generated/gnu-api-tests/vsoxseg4ei8.c +++ b/auto-generated/gnu-api-tests/vsoxseg4ei8.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg4ei8_v_f16mf4x4(float16_t *base, vuint8mf8_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_f16mf4x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_f16mf4x4(float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_f16mf4x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_f16mf2x4(float16_t *base, vuint8mf4_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_f16mf2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_f16mf2x4(float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_f16mf2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_f16m1x4(float16_t *base, vuint8mf2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_f16m1x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_f16m1x4(float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_f16m1x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_f16m2x4(float16_t *base, vuint8m1_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_f16m2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_f16m2x4(float16_t *rs1, vuint8m1_t vs2, vfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_f16m2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_f32mf2x4(float32_t *base, vuint8mf8_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_f32mf2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_f32mf2x4(float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_f32mf2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_f32m1x4(float32_t *base, vuint8mf4_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_f32m1x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_f32m1x4(float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_f32m1x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_f32m2x4(float32_t *base, vuint8mf2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_f32m2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_f32m2x4(float32_t *rs1, vuint8mf2_t vs2, vfloat32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_f32m2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_f64m1x4(float64_t *base, vuint8mf8_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_f64m1x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_f64m1x4(float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_f64m1x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_f64m2x4(float64_t *base, vuint8mf4_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_f64m2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_f64m2x4(float64_t *rs1, vuint8mf4_t vs2, vfloat64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_f64m2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i8mf8x4(int8_t *base, vuint8mf8_t bindex, vint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_i8mf8x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i8mf8x4(int8_t *rs1, vuint8mf8_t vs2, vint8mf8x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_i8mf8x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i8mf4x4(int8_t *base, vuint8mf4_t bindex, vint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_i8mf4x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i8mf4x4(int8_t *rs1, vuint8mf4_t vs2, vint8mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_i8mf4x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i8mf2x4(int8_t *base, vuint8mf2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_i8mf2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i8mf2x4(int8_t *rs1, vuint8mf2_t vs2, vint8mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_i8mf2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i8m1x4(int8_t *base, vuint8m1_t bindex, vint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_i8m1x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i8m1x4(int8_t *rs1, vuint8m1_t vs2, vint8m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_i8m1x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i8m2x4(int8_t *base, vuint8m2_t bindex, vint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_i8m2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i8m2x4(int8_t *rs1, vuint8m2_t vs2, vint8m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_i8m2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i16mf4x4(int16_t *base, vuint8mf8_t bindex, vint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_i16mf4x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i16mf4x4(int16_t *rs1, vuint8mf8_t vs2, vint16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_i16mf4x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i16mf2x4(int16_t *base, vuint8mf4_t bindex, vint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_i16mf2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i16mf2x4(int16_t *rs1, vuint8mf4_t vs2, vint16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_i16mf2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i16m1x4(int16_t *base, vuint8mf2_t bindex, vint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_i16m1x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i16m1x4(int16_t *rs1, vuint8mf2_t vs2, vint16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_i16m1x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i16m2x4(int16_t *base, vuint8m1_t bindex, vint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_i16m2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i16m2x4(int16_t *rs1, vuint8m1_t vs2, vint16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_i16m2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i32mf2x4(int32_t *base, vuint8mf8_t bindex, vint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_i32mf2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i32mf2x4(int32_t *rs1, vuint8mf8_t vs2, vint32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_i32mf2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i32m1x4(int32_t *base, vuint8mf4_t bindex, vint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_i32m1x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i32m1x4(int32_t *rs1, vuint8mf4_t vs2, vint32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_i32m1x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i32m2x4(int32_t *base, vuint8mf2_t bindex, vint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_i32m2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i32m2x4(int32_t *rs1, vuint8mf2_t vs2, vint32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_i32m2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i64m1x4(int64_t *base, vuint8mf8_t bindex, vint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_i64m1x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i64m1x4(int64_t *rs1, vuint8mf8_t vs2, vint64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_i64m1x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i64m2x4(int64_t *base, vuint8mf4_t bindex, vint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_i64m2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i64m2x4(int64_t *rs1, vuint8mf4_t vs2, vint64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_i64m2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u8mf8x4(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_u8mf8x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u8mf8x4(uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_u8mf8x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u8mf4x4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_u8mf4x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u8mf4x4(uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_u8mf4x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u8mf2x4(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_u8mf2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u8mf2x4(uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_u8mf2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u8m1x4(uint8_t *base, vuint8m1_t bindex, vuint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_u8m1x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u8m1x4(uint8_t *rs1, vuint8m1_t vs2, vuint8m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_u8m1x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u8m2x4(uint8_t *base, vuint8m2_t bindex, vuint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_u8m2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u8m2x4(uint8_t *rs1, vuint8m2_t vs2, vuint8m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_u8m2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u16mf4x4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_u16mf4x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u16mf4x4(uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_u16mf4x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u16mf2x4(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_u16mf2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u16mf2x4(uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_u16mf2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u16m1x4(uint16_t *base, vuint8mf2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_u16m1x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u16m1x4(uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_u16m1x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u16m2x4(uint16_t *base, vuint8m1_t bindex, vuint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_u16m2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u16m2x4(uint16_t *rs1, vuint8m1_t vs2, vuint16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_u16m2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u32mf2x4(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_u32mf2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u32mf2x4(uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_u32mf2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u32m1x4(uint32_t *base, vuint8mf4_t bindex, vuint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_u32m1x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u32m1x4(uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_u32m1x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u32m2x4(uint32_t *base, vuint8mf2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_u32m2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u32m2x4(uint32_t *rs1, vuint8mf2_t vs2, vuint32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_u32m2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u64m1x4(uint64_t *base, vuint8mf8_t bindex, vuint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_u64m1x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u64m1x4(uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_u64m1x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u64m2x4(uint64_t *base, vuint8mf4_t bindex, vuint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_u64m2x4(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u64m2x4(uint64_t *rs1, vuint8mf4_t vs2, vuint64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_u64m2x4(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_f16mf4x4_m(vbool64_t mask, float16_t *base, vuint8mf8_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_f16mf4x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_f16mf4x4_m(vbool64_t vm, float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_f16mf4x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_f16mf2x4_m(vbool32_t mask, float16_t *base, vuint8mf4_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_f16mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_f16mf2x4_m(vbool32_t vm, float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_f16mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_f16m1x4_m(vbool16_t mask, float16_t *base, vuint8mf2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_f16m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_f16m1x4_m(vbool16_t vm, float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_f16m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_f16m2x4_m(vbool8_t mask, float16_t *base, vuint8m1_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_f16m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_f16m2x4_m(vbool8_t vm, float16_t *rs1, vuint8m1_t vs2, vfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_f16m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_f32mf2x4_m(vbool64_t mask, float32_t *base, vuint8mf8_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_f32mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_f32mf2x4_m(vbool64_t vm, float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_f32mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_f32m1x4_m(vbool32_t mask, float32_t *base, vuint8mf4_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_f32m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_f32m1x4_m(vbool32_t vm, float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_f32m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_f32m2x4_m(vbool16_t mask, float32_t *base, vuint8mf2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_f32m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_f32m2x4_m(vbool16_t vm, float32_t *rs1, vuint8mf2_t vs2, vfloat32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_f32m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_f64m1x4_m(vbool64_t mask, float64_t *base, vuint8mf8_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_f64m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_f64m1x4_m(vbool64_t vm, float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_f64m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_f64m2x4_m(vbool32_t mask, float64_t *base, vuint8mf4_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_f64m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_f64m2x4_m(vbool32_t vm, float64_t *rs1, vuint8mf4_t vs2, vfloat64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_f64m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_i8mf8x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i8mf8x4_m(vbool64_t vm, int8_t *rs1, vuint8mf8_t vs2, vint8mf8x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_i8mf8x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_i8mf4x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i8mf4x4_m(vbool32_t vm, int8_t *rs1, vuint8mf4_t vs2, vint8mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_i8mf4x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_i8mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i8mf2x4_m(vbool16_t vm, int8_t *rs1, vuint8mf2_t vs2, vint8mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_i8mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_i8m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i8m1x4_m(vbool8_t vm, int8_t *rs1, vuint8m1_t vs2, vint8m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_i8m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_i8m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i8m2x4_m(vbool4_t vm, int8_t *rs1, vuint8m2_t vs2, vint8m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_i8m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_i16mf4x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i16mf4x4_m(vbool64_t vm, int16_t *rs1, vuint8mf8_t vs2, vint16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_i16mf4x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_i16mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i16mf2x4_m(vbool32_t vm, int16_t *rs1, vuint8mf4_t vs2, vint16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_i16mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_i16m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i16m1x4_m(vbool16_t vm, int16_t *rs1, vuint8mf2_t vs2, vint16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_i16m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_i16m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i16m2x4_m(vbool8_t vm, int16_t *rs1, vuint8m1_t vs2, vint16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_i16m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_i32mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i32mf2x4_m(vbool64_t vm, int32_t *rs1, vuint8mf8_t vs2, vint32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_i32mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_i32m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i32m1x4_m(vbool32_t vm, int32_t *rs1, vuint8mf4_t vs2, vint32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_i32m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_i32m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i32m2x4_m(vbool16_t vm, int32_t *rs1, vuint8mf2_t vs2, vint32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_i32m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_i64m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i64m1x4_m(vbool64_t vm, int64_t *rs1, vuint8mf8_t vs2, vint64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_i64m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_i64m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i64m2x4_m(vbool32_t vm, int64_t *rs1, vuint8mf4_t vs2, vint64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_i64m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_u8mf8x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u8mf8x4_m(vbool64_t vm, uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_u8mf8x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_u8mf4x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u8mf4x4_m(vbool32_t vm, uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_u8mf4x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_u8mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u8mf2x4_m(vbool16_t vm, uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_u8mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_u8m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u8m1x4_m(vbool8_t vm, uint8_t *rs1, vuint8m1_t vs2, vuint8m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_u8m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_u8m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u8m2x4_m(vbool4_t vm, uint8_t *rs1, vuint8m2_t vs2, vuint8m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_u8m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_u16mf4x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u16mf4x4_m(vbool64_t vm, uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_u16mf4x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_u16mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u16mf2x4_m(vbool32_t vm, uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_u16mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_u16m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u16m1x4_m(vbool16_t vm, uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_u16m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_u16m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u16m2x4_m(vbool8_t vm, uint16_t *rs1, vuint8m1_t vs2, vuint16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_u16m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_u32mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u32mf2x4_m(vbool64_t vm, uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_u32mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_u32m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u32m1x4_m(vbool32_t vm, uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_u32m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_u32m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u32m2x4_m(vbool16_t vm, uint32_t *rs1, vuint8mf2_t vs2, vuint32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_u32m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_u64m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u64m1x4_m(vbool64_t vm, uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_u64m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8_v_u64m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u64m2x4_m(vbool32_t vm, uint64_t *rs1, vuint8mf4_t vs2, vuint64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8_v_u64m2x4_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg4ei8\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-api-tests/vsoxseg5ei16.c b/auto-generated/gnu-api-tests/vsoxseg5ei16.c index 28b4b5a47..253fb06ef 100644 --- a/auto-generated/gnu-api-tests/vsoxseg5ei16.c +++ b/auto-generated/gnu-api-tests/vsoxseg5ei16.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg5ei16_v_f16mf4x5(float16_t *base, vuint16mf4_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_f16mf4x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_f16mf4x5(float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_f16mf4x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_f16mf2x5(float16_t *base, vuint16mf2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_f16mf2x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_f16mf2x5(float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_f16mf2x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_f16m1x5(float16_t *base, vuint16m1_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_f16m1x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_f16m1x5(float16_t *rs1, vuint16m1_t vs2, vfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_f16m1x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_f32mf2x5(float32_t *base, vuint16mf4_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_f32mf2x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_f32mf2x5(float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_f32mf2x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_f32m1x5(float32_t *base, vuint16mf2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_f32m1x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_f32m1x5(float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_f32m1x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_f64m1x5(float64_t *base, vuint16mf4_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_f64m1x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_f64m1x5(float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_f64m1x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_i8mf8x5(int8_t *base, vuint16mf4_t bindex, vint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_i8mf8x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_i8mf8x5(int8_t *rs1, vuint16mf4_t vs2, vint8mf8x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_i8mf8x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_i8mf4x5(int8_t *base, vuint16mf2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_i8mf4x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_i8mf4x5(int8_t *rs1, vuint16mf2_t vs2, vint8mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_i8mf4x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_i8mf2x5(int8_t *base, vuint16m1_t bindex, vint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_i8mf2x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_i8mf2x5(int8_t *rs1, vuint16m1_t vs2, vint8mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_i8mf2x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_i8m1x5(int8_t *base, vuint16m2_t bindex, vint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_i8m1x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_i8m1x5(int8_t *rs1, vuint16m2_t vs2, vint8m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_i8m1x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_i16mf4x5(int16_t *base, vuint16mf4_t bindex, vint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_i16mf4x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_i16mf4x5(int16_t *rs1, vuint16mf4_t vs2, vint16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_i16mf4x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_i16mf2x5(int16_t *base, vuint16mf2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_i16mf2x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_i16mf2x5(int16_t *rs1, vuint16mf2_t vs2, vint16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_i16mf2x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_i16m1x5(int16_t *base, vuint16m1_t bindex, vint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_i16m1x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_i16m1x5(int16_t *rs1, vuint16m1_t vs2, vint16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_i16m1x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_i32mf2x5(int32_t *base, vuint16mf4_t bindex, vint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_i32mf2x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_i32mf2x5(int32_t *rs1, vuint16mf4_t vs2, vint32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_i32mf2x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_i32m1x5(int32_t *base, vuint16mf2_t bindex, vint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_i32m1x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_i32m1x5(int32_t *rs1, vuint16mf2_t vs2, vint32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_i32m1x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_i64m1x5(int64_t *base, vuint16mf4_t bindex, vint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_i64m1x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_i64m1x5(int64_t *rs1, vuint16mf4_t vs2, vint64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_i64m1x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_u8mf8x5(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_u8mf8x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_u8mf8x5(uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_u8mf8x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_u8mf4x5(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_u8mf4x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_u8mf4x5(uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_u8mf4x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_u8mf2x5(uint8_t *base, vuint16m1_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_u8mf2x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_u8mf2x5(uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_u8mf2x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_u8m1x5(uint8_t *base, vuint16m2_t bindex, vuint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_u8m1x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_u8m1x5(uint8_t *rs1, vuint16m2_t vs2, vuint8m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_u8m1x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_u16mf4x5(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_u16mf4x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_u16mf4x5(uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_u16mf4x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_u16mf2x5(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_u16mf2x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_u16mf2x5(uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_u16mf2x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_u16m1x5(uint16_t *base, vuint16m1_t bindex, vuint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_u16m1x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_u16m1x5(uint16_t *rs1, vuint16m1_t vs2, vuint16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_u16m1x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_u32mf2x5(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_u32mf2x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_u32mf2x5(uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_u32mf2x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_u32m1x5(uint32_t *base, vuint16mf2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_u32m1x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_u32m1x5(uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_u32m1x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_u64m1x5(uint64_t *base, vuint16mf4_t bindex, vuint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_u64m1x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_u64m1x5(uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_u64m1x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_f16mf4x5_m(vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_f16mf4x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_f16mf4x5_m(vbool64_t vm, float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_f16mf4x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_f16mf2x5_m(vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_f16mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_f16mf2x5_m(vbool32_t vm, float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_f16mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_f16m1x5_m(vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_f16m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_f16m1x5_m(vbool16_t vm, float16_t *rs1, vuint16m1_t vs2, vfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_f16m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_f32mf2x5_m(vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_f32mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_f32mf2x5_m(vbool64_t vm, float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_f32mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_f32m1x5_m(vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_f32m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_f32m1x5_m(vbool32_t vm, float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_f32m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_f64m1x5_m(vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_f64m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_f64m1x5_m(vbool64_t vm, float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_f64m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_i8mf8x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_i8mf8x5_m(vbool64_t vm, int8_t *rs1, vuint16mf4_t vs2, vint8mf8x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_i8mf8x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_i8mf4x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_i8mf4x5_m(vbool32_t vm, int8_t *rs1, vuint16mf2_t vs2, vint8mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_i8mf4x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_i8mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_i8mf2x5_m(vbool16_t vm, int8_t *rs1, vuint16m1_t vs2, vint8mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_i8mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_i8m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_i8m1x5_m(vbool8_t vm, int8_t *rs1, vuint16m2_t vs2, vint8m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_i8m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_i16mf4x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_i16mf4x5_m(vbool64_t vm, int16_t *rs1, vuint16mf4_t vs2, vint16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_i16mf4x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_i16mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_i16mf2x5_m(vbool32_t vm, int16_t *rs1, vuint16mf2_t vs2, vint16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_i16mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_i16m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_i16m1x5_m(vbool16_t vm, int16_t *rs1, vuint16m1_t vs2, vint16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_i16m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_i32mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_i32mf2x5_m(vbool64_t vm, int32_t *rs1, vuint16mf4_t vs2, vint32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_i32mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_i32m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_i32m1x5_m(vbool32_t vm, int32_t *rs1, vuint16mf2_t vs2, vint32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_i32m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_i64m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_i64m1x5_m(vbool64_t vm, int64_t *rs1, vuint16mf4_t vs2, vint64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_i64m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_u8mf8x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_u8mf8x5_m(vbool64_t vm, uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_u8mf8x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_u8mf4x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_u8mf4x5_m(vbool32_t vm, uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_u8mf4x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_u8mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_u8mf2x5_m(vbool16_t vm, uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_u8mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_u8m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_u8m1x5_m(vbool8_t vm, uint8_t *rs1, vuint16m2_t vs2, vuint8m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_u8m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_u16mf4x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_u16mf4x5_m(vbool64_t vm, uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_u16mf4x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_u16mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_u16mf2x5_m(vbool32_t vm, uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_u16mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_u16m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_u16m1x5_m(vbool16_t vm, uint16_t *rs1, vuint16m1_t vs2, vuint16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_u16m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_u32mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_u32mf2x5_m(vbool64_t vm, uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_u32mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_u32m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_u32m1x5_m(vbool32_t vm, uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_u32m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16_v_u64m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_u64m1x5_m(vbool64_t vm, uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_u64m1x5_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg5ei16\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vsoxseg5ei32.c b/auto-generated/gnu-api-tests/vsoxseg5ei32.c index 218ef2b1b..0baeb275d 100644 --- a/auto-generated/gnu-api-tests/vsoxseg5ei32.c +++ b/auto-generated/gnu-api-tests/vsoxseg5ei32.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg5ei32_v_f16mf4x5(float16_t *base, vuint32mf2_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_f16mf4x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_f16mf4x5(float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_f16mf4x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_f16mf2x5(float16_t *base, vuint32m1_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_f16mf2x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_f16mf2x5(float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_f16mf2x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_f16m1x5(float16_t *base, vuint32m2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_f16m1x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_f16m1x5(float16_t *rs1, vuint32m2_t vs2, vfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_f16m1x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_f32mf2x5(float32_t *base, vuint32mf2_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_f32mf2x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_f32mf2x5(float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_f32mf2x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_f32m1x5(float32_t *base, vuint32m1_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_f32m1x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_f32m1x5(float32_t *rs1, vuint32m1_t vs2, vfloat32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_f32m1x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_f64m1x5(float64_t *base, vuint32mf2_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_f64m1x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_f64m1x5(float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_f64m1x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_i8mf8x5(int8_t *base, vuint32mf2_t bindex, vint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_i8mf8x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_i8mf8x5(int8_t *rs1, vuint32mf2_t vs2, vint8mf8x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_i8mf8x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_i8mf4x5(int8_t *base, vuint32m1_t bindex, vint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_i8mf4x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_i8mf4x5(int8_t *rs1, vuint32m1_t vs2, vint8mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_i8mf4x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_i8mf2x5(int8_t *base, vuint32m2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_i8mf2x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_i8mf2x5(int8_t *rs1, vuint32m2_t vs2, vint8mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_i8mf2x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_i8m1x5(int8_t *base, vuint32m4_t bindex, vint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_i8m1x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_i8m1x5(int8_t *rs1, vuint32m4_t vs2, vint8m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_i8m1x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_i16mf4x5(int16_t *base, vuint32mf2_t bindex, vint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_i16mf4x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_i16mf4x5(int16_t *rs1, vuint32mf2_t vs2, vint16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_i16mf4x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_i16mf2x5(int16_t *base, vuint32m1_t bindex, vint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_i16mf2x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_i16mf2x5(int16_t *rs1, vuint32m1_t vs2, vint16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_i16mf2x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_i16m1x5(int16_t *base, vuint32m2_t bindex, vint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_i16m1x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_i16m1x5(int16_t *rs1, vuint32m2_t vs2, vint16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_i16m1x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_i32mf2x5(int32_t *base, vuint32mf2_t bindex, vint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_i32mf2x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_i32mf2x5(int32_t *rs1, vuint32mf2_t vs2, vint32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_i32mf2x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_i32m1x5(int32_t *base, vuint32m1_t bindex, vint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_i32m1x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_i32m1x5(int32_t *rs1, vuint32m1_t vs2, vint32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_i32m1x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_i64m1x5(int64_t *base, vuint32mf2_t bindex, vint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_i64m1x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_i64m1x5(int64_t *rs1, vuint32mf2_t vs2, vint64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_i64m1x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_u8mf8x5(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_u8mf8x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_u8mf8x5(uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_u8mf8x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_u8mf4x5(uint8_t *base, vuint32m1_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_u8mf4x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_u8mf4x5(uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_u8mf4x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_u8mf2x5(uint8_t *base, vuint32m2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_u8mf2x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_u8mf2x5(uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_u8mf2x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_u8m1x5(uint8_t *base, vuint32m4_t bindex, vuint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_u8m1x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_u8m1x5(uint8_t *rs1, vuint32m4_t vs2, vuint8m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_u8m1x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_u16mf4x5(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_u16mf4x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_u16mf4x5(uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_u16mf4x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_u16mf2x5(uint16_t *base, vuint32m1_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_u16mf2x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_u16mf2x5(uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_u16mf2x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_u16m1x5(uint16_t *base, vuint32m2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_u16m1x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_u16m1x5(uint16_t *rs1, vuint32m2_t vs2, vuint16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_u16m1x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_u32mf2x5(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_u32mf2x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_u32mf2x5(uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_u32mf2x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_u32m1x5(uint32_t *base, vuint32m1_t bindex, vuint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_u32m1x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_u32m1x5(uint32_t *rs1, vuint32m1_t vs2, vuint32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_u32m1x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_u64m1x5(uint64_t *base, vuint32mf2_t bindex, vuint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_u64m1x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_u64m1x5(uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_u64m1x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_f16mf4x5_m(vbool64_t mask, float16_t *base, vuint32mf2_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_f16mf4x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_f16mf4x5_m(vbool64_t vm, float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_f16mf4x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_f16mf2x5_m(vbool32_t mask, float16_t *base, vuint32m1_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_f16mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_f16mf2x5_m(vbool32_t vm, float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_f16mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_f16m1x5_m(vbool16_t mask, float16_t *base, vuint32m2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_f16m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_f16m1x5_m(vbool16_t vm, float16_t *rs1, vuint32m2_t vs2, vfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_f16m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_f32mf2x5_m(vbool64_t mask, float32_t *base, vuint32mf2_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_f32mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_f32mf2x5_m(vbool64_t vm, float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_f32mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_f32m1x5_m(vbool32_t mask, float32_t *base, vuint32m1_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_f32m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_f32m1x5_m(vbool32_t vm, float32_t *rs1, vuint32m1_t vs2, vfloat32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_f32m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_f64m1x5_m(vbool64_t mask, float64_t *base, vuint32mf2_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_f64m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_f64m1x5_m(vbool64_t vm, float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_f64m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_i8mf8x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_i8mf8x5_m(vbool64_t vm, int8_t *rs1, vuint32mf2_t vs2, vint8mf8x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_i8mf8x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_i8mf4x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_i8mf4x5_m(vbool32_t vm, int8_t *rs1, vuint32m1_t vs2, vint8mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_i8mf4x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_i8mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_i8mf2x5_m(vbool16_t vm, int8_t *rs1, vuint32m2_t vs2, vint8mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_i8mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_i8m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_i8m1x5_m(vbool8_t vm, int8_t *rs1, vuint32m4_t vs2, vint8m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_i8m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_i16mf4x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_i16mf4x5_m(vbool64_t vm, int16_t *rs1, vuint32mf2_t vs2, vint16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_i16mf4x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_i16mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_i16mf2x5_m(vbool32_t vm, int16_t *rs1, vuint32m1_t vs2, vint16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_i16mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_i16m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_i16m1x5_m(vbool16_t vm, int16_t *rs1, vuint32m2_t vs2, vint16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_i16m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_i32mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_i32mf2x5_m(vbool64_t vm, int32_t *rs1, vuint32mf2_t vs2, vint32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_i32mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_i32m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_i32m1x5_m(vbool32_t vm, int32_t *rs1, vuint32m1_t vs2, vint32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_i32m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_i64m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_i64m1x5_m(vbool64_t vm, int64_t *rs1, vuint32mf2_t vs2, vint64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_i64m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_u8mf8x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_u8mf8x5_m(vbool64_t vm, uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_u8mf8x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_u8mf4x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_u8mf4x5_m(vbool32_t vm, uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_u8mf4x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_u8mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_u8mf2x5_m(vbool16_t vm, uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_u8mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_u8m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_u8m1x5_m(vbool8_t vm, uint8_t *rs1, vuint32m4_t vs2, vuint8m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_u8m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_u16mf4x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_u16mf4x5_m(vbool64_t vm, uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_u16mf4x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_u16mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_u16mf2x5_m(vbool32_t vm, uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_u16mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_u16m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_u16m1x5_m(vbool16_t vm, uint16_t *rs1, vuint32m2_t vs2, vuint16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_u16m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_u32mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_u32mf2x5_m(vbool64_t vm, uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_u32mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_u32m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_u32m1x5_m(vbool32_t vm, uint32_t *rs1, vuint32m1_t vs2, vuint32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_u32m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32_v_u64m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_u64m1x5_m(vbool64_t vm, uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32_v_u64m1x5_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg5ei32\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vsoxseg5ei64.c b/auto-generated/gnu-api-tests/vsoxseg5ei64.c index c22958b5c..75c59cc5d 100644 --- a/auto-generated/gnu-api-tests/vsoxseg5ei64.c +++ b/auto-generated/gnu-api-tests/vsoxseg5ei64.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg5ei64_v_f16mf4x5(float16_t *base, vuint64m1_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_f16mf4x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_f16mf4x5(float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_f16mf4x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_f16mf2x5(float16_t *base, vuint64m2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_f16mf2x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_f16mf2x5(float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_f16mf2x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_f16m1x5(float16_t *base, vuint64m4_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_f16m1x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_f16m1x5(float16_t *rs1, vuint64m4_t vs2, vfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_f16m1x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_f32mf2x5(float32_t *base, vuint64m1_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_f32mf2x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_f32mf2x5(float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_f32mf2x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_f32m1x5(float32_t *base, vuint64m2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_f32m1x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_f32m1x5(float32_t *rs1, vuint64m2_t vs2, vfloat32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_f32m1x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_f64m1x5(float64_t *base, vuint64m1_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_f64m1x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_f64m1x5(float64_t *rs1, vuint64m1_t vs2, vfloat64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_f64m1x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_i8mf8x5(int8_t *base, vuint64m1_t bindex, vint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_i8mf8x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_i8mf8x5(int8_t *rs1, vuint64m1_t vs2, vint8mf8x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_i8mf8x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_i8mf4x5(int8_t *base, vuint64m2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_i8mf4x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_i8mf4x5(int8_t *rs1, vuint64m2_t vs2, vint8mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_i8mf4x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_i8mf2x5(int8_t *base, vuint64m4_t bindex, vint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_i8mf2x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_i8mf2x5(int8_t *rs1, vuint64m4_t vs2, vint8mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_i8mf2x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_i8m1x5(int8_t *base, vuint64m8_t bindex, vint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_i8m1x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_i8m1x5(int8_t *rs1, vuint64m8_t vs2, vint8m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_i8m1x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_i16mf4x5(int16_t *base, vuint64m1_t bindex, vint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_i16mf4x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_i16mf4x5(int16_t *rs1, vuint64m1_t vs2, vint16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_i16mf4x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_i16mf2x5(int16_t *base, vuint64m2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_i16mf2x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_i16mf2x5(int16_t *rs1, vuint64m2_t vs2, vint16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_i16mf2x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_i16m1x5(int16_t *base, vuint64m4_t bindex, vint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_i16m1x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_i16m1x5(int16_t *rs1, vuint64m4_t vs2, vint16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_i16m1x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_i32mf2x5(int32_t *base, vuint64m1_t bindex, vint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_i32mf2x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_i32mf2x5(int32_t *rs1, vuint64m1_t vs2, vint32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_i32mf2x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_i32m1x5(int32_t *base, vuint64m2_t bindex, vint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_i32m1x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_i32m1x5(int32_t *rs1, vuint64m2_t vs2, vint32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_i32m1x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_i64m1x5(int64_t *base, vuint64m1_t bindex, vint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_i64m1x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_i64m1x5(int64_t *rs1, vuint64m1_t vs2, vint64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_i64m1x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_u8mf8x5(uint8_t *base, vuint64m1_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_u8mf8x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_u8mf8x5(uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_u8mf8x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_u8mf4x5(uint8_t *base, vuint64m2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_u8mf4x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_u8mf4x5(uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_u8mf4x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_u8mf2x5(uint8_t *base, vuint64m4_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_u8mf2x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_u8mf2x5(uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_u8mf2x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_u8m1x5(uint8_t *base, vuint64m8_t bindex, vuint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_u8m1x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_u8m1x5(uint8_t *rs1, vuint64m8_t vs2, vuint8m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_u8m1x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_u16mf4x5(uint16_t *base, vuint64m1_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_u16mf4x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_u16mf4x5(uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_u16mf4x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_u16mf2x5(uint16_t *base, vuint64m2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_u16mf2x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_u16mf2x5(uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_u16mf2x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_u16m1x5(uint16_t *base, vuint64m4_t bindex, vuint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_u16m1x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_u16m1x5(uint16_t *rs1, vuint64m4_t vs2, vuint16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_u16m1x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_u32mf2x5(uint32_t *base, vuint64m1_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_u32mf2x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_u32mf2x5(uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_u32mf2x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_u32m1x5(uint32_t *base, vuint64m2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_u32m1x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_u32m1x5(uint32_t *rs1, vuint64m2_t vs2, vuint32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_u32m1x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_u64m1x5(uint64_t *base, vuint64m1_t bindex, vuint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_u64m1x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_u64m1x5(uint64_t *rs1, vuint64m1_t vs2, vuint64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_u64m1x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_f16mf4x5_m(vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_f16mf4x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_f16mf4x5_m(vbool64_t vm, float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_f16mf4x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_f16mf2x5_m(vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_f16mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_f16mf2x5_m(vbool32_t vm, float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_f16mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_f16m1x5_m(vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_f16m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_f16m1x5_m(vbool16_t vm, float16_t *rs1, vuint64m4_t vs2, vfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_f16m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_f32mf2x5_m(vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_f32mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_f32mf2x5_m(vbool64_t vm, float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_f32mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_f32m1x5_m(vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_f32m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_f32m1x5_m(vbool32_t vm, float32_t *rs1, vuint64m2_t vs2, vfloat32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_f32m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_f64m1x5_m(vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_f64m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_f64m1x5_m(vbool64_t vm, float64_t *rs1, vuint64m1_t vs2, vfloat64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_f64m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_i8mf8x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_i8mf8x5_m(vbool64_t vm, int8_t *rs1, vuint64m1_t vs2, vint8mf8x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_i8mf8x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_i8mf4x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_i8mf4x5_m(vbool32_t vm, int8_t *rs1, vuint64m2_t vs2, vint8mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_i8mf4x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_i8mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_i8mf2x5_m(vbool16_t vm, int8_t *rs1, vuint64m4_t vs2, vint8mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_i8mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_i8m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_i8m1x5_m(vbool8_t vm, int8_t *rs1, vuint64m8_t vs2, vint8m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_i8m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_i16mf4x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_i16mf4x5_m(vbool64_t vm, int16_t *rs1, vuint64m1_t vs2, vint16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_i16mf4x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_i16mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_i16mf2x5_m(vbool32_t vm, int16_t *rs1, vuint64m2_t vs2, vint16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_i16mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_i16m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_i16m1x5_m(vbool16_t vm, int16_t *rs1, vuint64m4_t vs2, vint16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_i16m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_i32mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_i32mf2x5_m(vbool64_t vm, int32_t *rs1, vuint64m1_t vs2, vint32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_i32mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_i32m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_i32m1x5_m(vbool32_t vm, int32_t *rs1, vuint64m2_t vs2, vint32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_i32m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_i64m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_i64m1x5_m(vbool64_t vm, int64_t *rs1, vuint64m1_t vs2, vint64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_i64m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_u8mf8x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_u8mf8x5_m(vbool64_t vm, uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_u8mf8x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_u8mf4x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_u8mf4x5_m(vbool32_t vm, uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_u8mf4x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_u8mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_u8mf2x5_m(vbool16_t vm, uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_u8mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_u8m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_u8m1x5_m(vbool8_t vm, uint8_t *rs1, vuint64m8_t vs2, vuint8m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_u8m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_u16mf4x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_u16mf4x5_m(vbool64_t vm, uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_u16mf4x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_u16mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_u16mf2x5_m(vbool32_t vm, uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_u16mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_u16m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_u16m1x5_m(vbool16_t vm, uint16_t *rs1, vuint64m4_t vs2, vuint16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_u16m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_u32mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_u32mf2x5_m(vbool64_t vm, uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_u32mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_u32m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_u32m1x5_m(vbool32_t vm, uint32_t *rs1, vuint64m2_t vs2, vuint32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_u32m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64_v_u64m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_u64m1x5_m(vbool64_t vm, uint64_t *rs1, vuint64m1_t vs2, vuint64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64_v_u64m1x5_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg5ei64\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vsoxseg5ei8.c b/auto-generated/gnu-api-tests/vsoxseg5ei8.c index ee8f1a42c..a43d662b6 100644 --- a/auto-generated/gnu-api-tests/vsoxseg5ei8.c +++ b/auto-generated/gnu-api-tests/vsoxseg5ei8.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg5ei8_v_f16mf4x5(float16_t *base, vuint8mf8_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_f16mf4x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_f16mf4x5(float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_f16mf4x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_f16mf2x5(float16_t *base, vuint8mf4_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_f16mf2x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_f16mf2x5(float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_f16mf2x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_f16m1x5(float16_t *base, vuint8mf2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_f16m1x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_f16m1x5(float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_f16m1x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_f32mf2x5(float32_t *base, vuint8mf8_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_f32mf2x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_f32mf2x5(float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_f32mf2x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_f32m1x5(float32_t *base, vuint8mf4_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_f32m1x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_f32m1x5(float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_f32m1x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_f64m1x5(float64_t *base, vuint8mf8_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_f64m1x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_f64m1x5(float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_f64m1x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_i8mf8x5(int8_t *base, vuint8mf8_t bindex, vint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_i8mf8x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_i8mf8x5(int8_t *rs1, vuint8mf8_t vs2, vint8mf8x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_i8mf8x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_i8mf4x5(int8_t *base, vuint8mf4_t bindex, vint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_i8mf4x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_i8mf4x5(int8_t *rs1, vuint8mf4_t vs2, vint8mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_i8mf4x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_i8mf2x5(int8_t *base, vuint8mf2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_i8mf2x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_i8mf2x5(int8_t *rs1, vuint8mf2_t vs2, vint8mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_i8mf2x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_i8m1x5(int8_t *base, vuint8m1_t bindex, vint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_i8m1x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_i8m1x5(int8_t *rs1, vuint8m1_t vs2, vint8m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_i8m1x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_i16mf4x5(int16_t *base, vuint8mf8_t bindex, vint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_i16mf4x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_i16mf4x5(int16_t *rs1, vuint8mf8_t vs2, vint16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_i16mf4x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_i16mf2x5(int16_t *base, vuint8mf4_t bindex, vint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_i16mf2x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_i16mf2x5(int16_t *rs1, vuint8mf4_t vs2, vint16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_i16mf2x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_i16m1x5(int16_t *base, vuint8mf2_t bindex, vint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_i16m1x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_i16m1x5(int16_t *rs1, vuint8mf2_t vs2, vint16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_i16m1x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_i32mf2x5(int32_t *base, vuint8mf8_t bindex, vint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_i32mf2x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_i32mf2x5(int32_t *rs1, vuint8mf8_t vs2, vint32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_i32mf2x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_i32m1x5(int32_t *base, vuint8mf4_t bindex, vint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_i32m1x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_i32m1x5(int32_t *rs1, vuint8mf4_t vs2, vint32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_i32m1x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_i64m1x5(int64_t *base, vuint8mf8_t bindex, vint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_i64m1x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_i64m1x5(int64_t *rs1, vuint8mf8_t vs2, vint64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_i64m1x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_u8mf8x5(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_u8mf8x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_u8mf8x5(uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_u8mf8x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_u8mf4x5(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_u8mf4x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_u8mf4x5(uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_u8mf4x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_u8mf2x5(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_u8mf2x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_u8mf2x5(uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_u8mf2x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_u8m1x5(uint8_t *base, vuint8m1_t bindex, vuint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_u8m1x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_u8m1x5(uint8_t *rs1, vuint8m1_t vs2, vuint8m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_u8m1x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_u16mf4x5(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_u16mf4x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_u16mf4x5(uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_u16mf4x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_u16mf2x5(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_u16mf2x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_u16mf2x5(uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_u16mf2x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_u16m1x5(uint16_t *base, vuint8mf2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_u16m1x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_u16m1x5(uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_u16m1x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_u32mf2x5(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_u32mf2x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_u32mf2x5(uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_u32mf2x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_u32m1x5(uint32_t *base, vuint8mf4_t bindex, vuint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_u32m1x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_u32m1x5(uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_u32m1x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_u64m1x5(uint64_t *base, vuint8mf8_t bindex, vuint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_u64m1x5(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_u64m1x5(uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_u64m1x5(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_f16mf4x5_m(vbool64_t mask, float16_t *base, vuint8mf8_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_f16mf4x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_f16mf4x5_m(vbool64_t vm, float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_f16mf4x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_f16mf2x5_m(vbool32_t mask, float16_t *base, vuint8mf4_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_f16mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_f16mf2x5_m(vbool32_t vm, float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_f16mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_f16m1x5_m(vbool16_t mask, float16_t *base, vuint8mf2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_f16m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_f16m1x5_m(vbool16_t vm, float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_f16m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_f32mf2x5_m(vbool64_t mask, float32_t *base, vuint8mf8_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_f32mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_f32mf2x5_m(vbool64_t vm, float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_f32mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_f32m1x5_m(vbool32_t mask, float32_t *base, vuint8mf4_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_f32m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_f32m1x5_m(vbool32_t vm, float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_f32m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_f64m1x5_m(vbool64_t mask, float64_t *base, vuint8mf8_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_f64m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_f64m1x5_m(vbool64_t vm, float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_f64m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_i8mf8x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_i8mf8x5_m(vbool64_t vm, int8_t *rs1, vuint8mf8_t vs2, vint8mf8x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_i8mf8x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_i8mf4x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_i8mf4x5_m(vbool32_t vm, int8_t *rs1, vuint8mf4_t vs2, vint8mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_i8mf4x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_i8mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_i8mf2x5_m(vbool16_t vm, int8_t *rs1, vuint8mf2_t vs2, vint8mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_i8mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_i8m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_i8m1x5_m(vbool8_t vm, int8_t *rs1, vuint8m1_t vs2, vint8m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_i8m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_i16mf4x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_i16mf4x5_m(vbool64_t vm, int16_t *rs1, vuint8mf8_t vs2, vint16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_i16mf4x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_i16mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_i16mf2x5_m(vbool32_t vm, int16_t *rs1, vuint8mf4_t vs2, vint16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_i16mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_i16m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_i16m1x5_m(vbool16_t vm, int16_t *rs1, vuint8mf2_t vs2, vint16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_i16m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_i32mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_i32mf2x5_m(vbool64_t vm, int32_t *rs1, vuint8mf8_t vs2, vint32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_i32mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_i32m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_i32m1x5_m(vbool32_t vm, int32_t *rs1, vuint8mf4_t vs2, vint32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_i32m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_i64m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_i64m1x5_m(vbool64_t vm, int64_t *rs1, vuint8mf8_t vs2, vint64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_i64m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_u8mf8x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_u8mf8x5_m(vbool64_t vm, uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_u8mf8x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_u8mf4x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_u8mf4x5_m(vbool32_t vm, uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_u8mf4x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_u8mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_u8mf2x5_m(vbool16_t vm, uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_u8mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_u8m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_u8m1x5_m(vbool8_t vm, uint8_t *rs1, vuint8m1_t vs2, vuint8m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_u8m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_u16mf4x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_u16mf4x5_m(vbool64_t vm, uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_u16mf4x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_u16mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_u16mf2x5_m(vbool32_t vm, uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_u16mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_u16m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_u16m1x5_m(vbool16_t vm, uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_u16m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_u32mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_u32mf2x5_m(vbool64_t vm, uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_u32mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_u32m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_u32m1x5_m(vbool32_t vm, uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_u32m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8_v_u64m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_u64m1x5_m(vbool64_t vm, uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8_v_u64m1x5_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg5ei8\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vsoxseg6ei16.c b/auto-generated/gnu-api-tests/vsoxseg6ei16.c index 0a297b0f5..ccc9facf8 100644 --- a/auto-generated/gnu-api-tests/vsoxseg6ei16.c +++ b/auto-generated/gnu-api-tests/vsoxseg6ei16.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg6ei16_v_f16mf4x6(float16_t *base, vuint16mf4_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_f16mf4x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_f16mf4x6(float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_f16mf4x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_f16mf2x6(float16_t *base, vuint16mf2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_f16mf2x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_f16mf2x6(float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_f16mf2x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_f16m1x6(float16_t *base, vuint16m1_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_f16m1x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_f16m1x6(float16_t *rs1, vuint16m1_t vs2, vfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_f16m1x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_f32mf2x6(float32_t *base, vuint16mf4_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_f32mf2x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_f32mf2x6(float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_f32mf2x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_f32m1x6(float32_t *base, vuint16mf2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_f32m1x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_f32m1x6(float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_f32m1x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_f64m1x6(float64_t *base, vuint16mf4_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_f64m1x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_f64m1x6(float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_f64m1x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_i8mf8x6(int8_t *base, vuint16mf4_t bindex, vint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_i8mf8x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_i8mf8x6(int8_t *rs1, vuint16mf4_t vs2, vint8mf8x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_i8mf8x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_i8mf4x6(int8_t *base, vuint16mf2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_i8mf4x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_i8mf4x6(int8_t *rs1, vuint16mf2_t vs2, vint8mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_i8mf4x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_i8mf2x6(int8_t *base, vuint16m1_t bindex, vint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_i8mf2x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_i8mf2x6(int8_t *rs1, vuint16m1_t vs2, vint8mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_i8mf2x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_i8m1x6(int8_t *base, vuint16m2_t bindex, vint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_i8m1x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_i8m1x6(int8_t *rs1, vuint16m2_t vs2, vint8m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_i8m1x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_i16mf4x6(int16_t *base, vuint16mf4_t bindex, vint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_i16mf4x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_i16mf4x6(int16_t *rs1, vuint16mf4_t vs2, vint16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_i16mf4x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_i16mf2x6(int16_t *base, vuint16mf2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_i16mf2x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_i16mf2x6(int16_t *rs1, vuint16mf2_t vs2, vint16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_i16mf2x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_i16m1x6(int16_t *base, vuint16m1_t bindex, vint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_i16m1x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_i16m1x6(int16_t *rs1, vuint16m1_t vs2, vint16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_i16m1x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_i32mf2x6(int32_t *base, vuint16mf4_t bindex, vint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_i32mf2x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_i32mf2x6(int32_t *rs1, vuint16mf4_t vs2, vint32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_i32mf2x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_i32m1x6(int32_t *base, vuint16mf2_t bindex, vint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_i32m1x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_i32m1x6(int32_t *rs1, vuint16mf2_t vs2, vint32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_i32m1x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_i64m1x6(int64_t *base, vuint16mf4_t bindex, vint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_i64m1x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_i64m1x6(int64_t *rs1, vuint16mf4_t vs2, vint64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_i64m1x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_u8mf8x6(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_u8mf8x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_u8mf8x6(uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_u8mf8x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_u8mf4x6(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_u8mf4x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_u8mf4x6(uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_u8mf4x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_u8mf2x6(uint8_t *base, vuint16m1_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_u8mf2x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_u8mf2x6(uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_u8mf2x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_u8m1x6(uint8_t *base, vuint16m2_t bindex, vuint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_u8m1x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_u8m1x6(uint8_t *rs1, vuint16m2_t vs2, vuint8m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_u8m1x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_u16mf4x6(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_u16mf4x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_u16mf4x6(uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_u16mf4x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_u16mf2x6(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_u16mf2x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_u16mf2x6(uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_u16mf2x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_u16m1x6(uint16_t *base, vuint16m1_t bindex, vuint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_u16m1x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_u16m1x6(uint16_t *rs1, vuint16m1_t vs2, vuint16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_u16m1x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_u32mf2x6(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_u32mf2x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_u32mf2x6(uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_u32mf2x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_u32m1x6(uint32_t *base, vuint16mf2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_u32m1x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_u32m1x6(uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_u32m1x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_u64m1x6(uint64_t *base, vuint16mf4_t bindex, vuint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_u64m1x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_u64m1x6(uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_u64m1x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_f16mf4x6_m(vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_f16mf4x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_f16mf4x6_m(vbool64_t vm, float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_f16mf4x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_f16mf2x6_m(vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_f16mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_f16mf2x6_m(vbool32_t vm, float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_f16mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_f16m1x6_m(vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_f16m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_f16m1x6_m(vbool16_t vm, float16_t *rs1, vuint16m1_t vs2, vfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_f16m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_f32mf2x6_m(vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_f32mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_f32mf2x6_m(vbool64_t vm, float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_f32mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_f32m1x6_m(vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_f32m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_f32m1x6_m(vbool32_t vm, float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_f32m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_f64m1x6_m(vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_f64m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_f64m1x6_m(vbool64_t vm, float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_f64m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_i8mf8x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_i8mf8x6_m(vbool64_t vm, int8_t *rs1, vuint16mf4_t vs2, vint8mf8x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_i8mf8x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_i8mf4x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_i8mf4x6_m(vbool32_t vm, int8_t *rs1, vuint16mf2_t vs2, vint8mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_i8mf4x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_i8mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_i8mf2x6_m(vbool16_t vm, int8_t *rs1, vuint16m1_t vs2, vint8mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_i8mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_i8m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_i8m1x6_m(vbool8_t vm, int8_t *rs1, vuint16m2_t vs2, vint8m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_i8m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_i16mf4x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_i16mf4x6_m(vbool64_t vm, int16_t *rs1, vuint16mf4_t vs2, vint16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_i16mf4x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_i16mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_i16mf2x6_m(vbool32_t vm, int16_t *rs1, vuint16mf2_t vs2, vint16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_i16mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_i16m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_i16m1x6_m(vbool16_t vm, int16_t *rs1, vuint16m1_t vs2, vint16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_i16m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_i32mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_i32mf2x6_m(vbool64_t vm, int32_t *rs1, vuint16mf4_t vs2, vint32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_i32mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_i32m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_i32m1x6_m(vbool32_t vm, int32_t *rs1, vuint16mf2_t vs2, vint32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_i32m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_i64m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_i64m1x6_m(vbool64_t vm, int64_t *rs1, vuint16mf4_t vs2, vint64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_i64m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_u8mf8x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_u8mf8x6_m(vbool64_t vm, uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_u8mf8x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_u8mf4x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_u8mf4x6_m(vbool32_t vm, uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_u8mf4x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_u8mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_u8mf2x6_m(vbool16_t vm, uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_u8mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_u8m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_u8m1x6_m(vbool8_t vm, uint8_t *rs1, vuint16m2_t vs2, vuint8m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_u8m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_u16mf4x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_u16mf4x6_m(vbool64_t vm, uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_u16mf4x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_u16mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_u16mf2x6_m(vbool32_t vm, uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_u16mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_u16m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_u16m1x6_m(vbool16_t vm, uint16_t *rs1, vuint16m1_t vs2, vuint16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_u16m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_u32mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_u32mf2x6_m(vbool64_t vm, uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_u32mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_u32m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_u32m1x6_m(vbool32_t vm, uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_u32m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16_v_u64m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_u64m1x6_m(vbool64_t vm, uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_u64m1x6_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg6ei16\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vsoxseg6ei32.c b/auto-generated/gnu-api-tests/vsoxseg6ei32.c index 471283ed9..61c3ee0ce 100644 --- a/auto-generated/gnu-api-tests/vsoxseg6ei32.c +++ b/auto-generated/gnu-api-tests/vsoxseg6ei32.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg6ei32_v_f16mf4x6(float16_t *base, vuint32mf2_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_f16mf4x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_f16mf4x6(float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_f16mf4x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_f16mf2x6(float16_t *base, vuint32m1_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_f16mf2x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_f16mf2x6(float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_f16mf2x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_f16m1x6(float16_t *base, vuint32m2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_f16m1x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_f16m1x6(float16_t *rs1, vuint32m2_t vs2, vfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_f16m1x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_f32mf2x6(float32_t *base, vuint32mf2_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_f32mf2x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_f32mf2x6(float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_f32mf2x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_f32m1x6(float32_t *base, vuint32m1_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_f32m1x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_f32m1x6(float32_t *rs1, vuint32m1_t vs2, vfloat32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_f32m1x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_f64m1x6(float64_t *base, vuint32mf2_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_f64m1x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_f64m1x6(float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_f64m1x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_i8mf8x6(int8_t *base, vuint32mf2_t bindex, vint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_i8mf8x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_i8mf8x6(int8_t *rs1, vuint32mf2_t vs2, vint8mf8x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_i8mf8x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_i8mf4x6(int8_t *base, vuint32m1_t bindex, vint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_i8mf4x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_i8mf4x6(int8_t *rs1, vuint32m1_t vs2, vint8mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_i8mf4x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_i8mf2x6(int8_t *base, vuint32m2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_i8mf2x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_i8mf2x6(int8_t *rs1, vuint32m2_t vs2, vint8mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_i8mf2x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_i8m1x6(int8_t *base, vuint32m4_t bindex, vint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_i8m1x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_i8m1x6(int8_t *rs1, vuint32m4_t vs2, vint8m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_i8m1x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_i16mf4x6(int16_t *base, vuint32mf2_t bindex, vint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_i16mf4x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_i16mf4x6(int16_t *rs1, vuint32mf2_t vs2, vint16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_i16mf4x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_i16mf2x6(int16_t *base, vuint32m1_t bindex, vint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_i16mf2x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_i16mf2x6(int16_t *rs1, vuint32m1_t vs2, vint16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_i16mf2x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_i16m1x6(int16_t *base, vuint32m2_t bindex, vint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_i16m1x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_i16m1x6(int16_t *rs1, vuint32m2_t vs2, vint16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_i16m1x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_i32mf2x6(int32_t *base, vuint32mf2_t bindex, vint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_i32mf2x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_i32mf2x6(int32_t *rs1, vuint32mf2_t vs2, vint32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_i32mf2x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_i32m1x6(int32_t *base, vuint32m1_t bindex, vint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_i32m1x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_i32m1x6(int32_t *rs1, vuint32m1_t vs2, vint32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_i32m1x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_i64m1x6(int64_t *base, vuint32mf2_t bindex, vint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_i64m1x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_i64m1x6(int64_t *rs1, vuint32mf2_t vs2, vint64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_i64m1x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_u8mf8x6(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_u8mf8x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_u8mf8x6(uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_u8mf8x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_u8mf4x6(uint8_t *base, vuint32m1_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_u8mf4x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_u8mf4x6(uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_u8mf4x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_u8mf2x6(uint8_t *base, vuint32m2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_u8mf2x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_u8mf2x6(uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_u8mf2x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_u8m1x6(uint8_t *base, vuint32m4_t bindex, vuint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_u8m1x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_u8m1x6(uint8_t *rs1, vuint32m4_t vs2, vuint8m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_u8m1x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_u16mf4x6(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_u16mf4x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_u16mf4x6(uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_u16mf4x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_u16mf2x6(uint16_t *base, vuint32m1_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_u16mf2x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_u16mf2x6(uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_u16mf2x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_u16m1x6(uint16_t *base, vuint32m2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_u16m1x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_u16m1x6(uint16_t *rs1, vuint32m2_t vs2, vuint16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_u16m1x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_u32mf2x6(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_u32mf2x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_u32mf2x6(uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_u32mf2x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_u32m1x6(uint32_t *base, vuint32m1_t bindex, vuint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_u32m1x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_u32m1x6(uint32_t *rs1, vuint32m1_t vs2, vuint32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_u32m1x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_u64m1x6(uint64_t *base, vuint32mf2_t bindex, vuint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_u64m1x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_u64m1x6(uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_u64m1x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_f16mf4x6_m(vbool64_t mask, float16_t *base, vuint32mf2_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_f16mf4x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_f16mf4x6_m(vbool64_t vm, float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_f16mf4x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_f16mf2x6_m(vbool32_t mask, float16_t *base, vuint32m1_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_f16mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_f16mf2x6_m(vbool32_t vm, float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_f16mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_f16m1x6_m(vbool16_t mask, float16_t *base, vuint32m2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_f16m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_f16m1x6_m(vbool16_t vm, float16_t *rs1, vuint32m2_t vs2, vfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_f16m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_f32mf2x6_m(vbool64_t mask, float32_t *base, vuint32mf2_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_f32mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_f32mf2x6_m(vbool64_t vm, float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_f32mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_f32m1x6_m(vbool32_t mask, float32_t *base, vuint32m1_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_f32m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_f32m1x6_m(vbool32_t vm, float32_t *rs1, vuint32m1_t vs2, vfloat32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_f32m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_f64m1x6_m(vbool64_t mask, float64_t *base, vuint32mf2_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_f64m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_f64m1x6_m(vbool64_t vm, float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_f64m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_i8mf8x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_i8mf8x6_m(vbool64_t vm, int8_t *rs1, vuint32mf2_t vs2, vint8mf8x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_i8mf8x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_i8mf4x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_i8mf4x6_m(vbool32_t vm, int8_t *rs1, vuint32m1_t vs2, vint8mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_i8mf4x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_i8mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_i8mf2x6_m(vbool16_t vm, int8_t *rs1, vuint32m2_t vs2, vint8mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_i8mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_i8m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_i8m1x6_m(vbool8_t vm, int8_t *rs1, vuint32m4_t vs2, vint8m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_i8m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_i16mf4x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_i16mf4x6_m(vbool64_t vm, int16_t *rs1, vuint32mf2_t vs2, vint16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_i16mf4x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_i16mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_i16mf2x6_m(vbool32_t vm, int16_t *rs1, vuint32m1_t vs2, vint16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_i16mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_i16m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_i16m1x6_m(vbool16_t vm, int16_t *rs1, vuint32m2_t vs2, vint16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_i16m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_i32mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_i32mf2x6_m(vbool64_t vm, int32_t *rs1, vuint32mf2_t vs2, vint32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_i32mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_i32m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_i32m1x6_m(vbool32_t vm, int32_t *rs1, vuint32m1_t vs2, vint32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_i32m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_i64m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_i64m1x6_m(vbool64_t vm, int64_t *rs1, vuint32mf2_t vs2, vint64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_i64m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_u8mf8x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_u8mf8x6_m(vbool64_t vm, uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_u8mf8x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_u8mf4x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_u8mf4x6_m(vbool32_t vm, uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_u8mf4x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_u8mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_u8mf2x6_m(vbool16_t vm, uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_u8mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_u8m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_u8m1x6_m(vbool8_t vm, uint8_t *rs1, vuint32m4_t vs2, vuint8m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_u8m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_u16mf4x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_u16mf4x6_m(vbool64_t vm, uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_u16mf4x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_u16mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_u16mf2x6_m(vbool32_t vm, uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_u16mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_u16m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_u16m1x6_m(vbool16_t vm, uint16_t *rs1, vuint32m2_t vs2, vuint16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_u16m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_u32mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_u32mf2x6_m(vbool64_t vm, uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_u32mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_u32m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_u32m1x6_m(vbool32_t vm, uint32_t *rs1, vuint32m1_t vs2, vuint32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_u32m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32_v_u64m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_u64m1x6_m(vbool64_t vm, uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32_v_u64m1x6_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg6ei32\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vsoxseg6ei64.c b/auto-generated/gnu-api-tests/vsoxseg6ei64.c index f419fd158..b2b146f88 100644 --- a/auto-generated/gnu-api-tests/vsoxseg6ei64.c +++ b/auto-generated/gnu-api-tests/vsoxseg6ei64.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg6ei64_v_f16mf4x6(float16_t *base, vuint64m1_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_f16mf4x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_f16mf4x6(float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_f16mf4x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_f16mf2x6(float16_t *base, vuint64m2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_f16mf2x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_f16mf2x6(float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_f16mf2x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_f16m1x6(float16_t *base, vuint64m4_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_f16m1x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_f16m1x6(float16_t *rs1, vuint64m4_t vs2, vfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_f16m1x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_f32mf2x6(float32_t *base, vuint64m1_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_f32mf2x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_f32mf2x6(float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_f32mf2x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_f32m1x6(float32_t *base, vuint64m2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_f32m1x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_f32m1x6(float32_t *rs1, vuint64m2_t vs2, vfloat32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_f32m1x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_f64m1x6(float64_t *base, vuint64m1_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_f64m1x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_f64m1x6(float64_t *rs1, vuint64m1_t vs2, vfloat64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_f64m1x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_i8mf8x6(int8_t *base, vuint64m1_t bindex, vint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_i8mf8x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_i8mf8x6(int8_t *rs1, vuint64m1_t vs2, vint8mf8x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_i8mf8x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_i8mf4x6(int8_t *base, vuint64m2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_i8mf4x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_i8mf4x6(int8_t *rs1, vuint64m2_t vs2, vint8mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_i8mf4x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_i8mf2x6(int8_t *base, vuint64m4_t bindex, vint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_i8mf2x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_i8mf2x6(int8_t *rs1, vuint64m4_t vs2, vint8mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_i8mf2x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_i8m1x6(int8_t *base, vuint64m8_t bindex, vint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_i8m1x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_i8m1x6(int8_t *rs1, vuint64m8_t vs2, vint8m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_i8m1x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_i16mf4x6(int16_t *base, vuint64m1_t bindex, vint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_i16mf4x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_i16mf4x6(int16_t *rs1, vuint64m1_t vs2, vint16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_i16mf4x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_i16mf2x6(int16_t *base, vuint64m2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_i16mf2x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_i16mf2x6(int16_t *rs1, vuint64m2_t vs2, vint16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_i16mf2x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_i16m1x6(int16_t *base, vuint64m4_t bindex, vint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_i16m1x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_i16m1x6(int16_t *rs1, vuint64m4_t vs2, vint16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_i16m1x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_i32mf2x6(int32_t *base, vuint64m1_t bindex, vint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_i32mf2x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_i32mf2x6(int32_t *rs1, vuint64m1_t vs2, vint32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_i32mf2x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_i32m1x6(int32_t *base, vuint64m2_t bindex, vint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_i32m1x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_i32m1x6(int32_t *rs1, vuint64m2_t vs2, vint32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_i32m1x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_i64m1x6(int64_t *base, vuint64m1_t bindex, vint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_i64m1x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_i64m1x6(int64_t *rs1, vuint64m1_t vs2, vint64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_i64m1x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_u8mf8x6(uint8_t *base, vuint64m1_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_u8mf8x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_u8mf8x6(uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_u8mf8x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_u8mf4x6(uint8_t *base, vuint64m2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_u8mf4x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_u8mf4x6(uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_u8mf4x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_u8mf2x6(uint8_t *base, vuint64m4_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_u8mf2x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_u8mf2x6(uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_u8mf2x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_u8m1x6(uint8_t *base, vuint64m8_t bindex, vuint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_u8m1x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_u8m1x6(uint8_t *rs1, vuint64m8_t vs2, vuint8m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_u8m1x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_u16mf4x6(uint16_t *base, vuint64m1_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_u16mf4x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_u16mf4x6(uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_u16mf4x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_u16mf2x6(uint16_t *base, vuint64m2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_u16mf2x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_u16mf2x6(uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_u16mf2x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_u16m1x6(uint16_t *base, vuint64m4_t bindex, vuint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_u16m1x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_u16m1x6(uint16_t *rs1, vuint64m4_t vs2, vuint16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_u16m1x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_u32mf2x6(uint32_t *base, vuint64m1_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_u32mf2x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_u32mf2x6(uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_u32mf2x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_u32m1x6(uint32_t *base, vuint64m2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_u32m1x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_u32m1x6(uint32_t *rs1, vuint64m2_t vs2, vuint32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_u32m1x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_u64m1x6(uint64_t *base, vuint64m1_t bindex, vuint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_u64m1x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_u64m1x6(uint64_t *rs1, vuint64m1_t vs2, vuint64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_u64m1x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_f16mf4x6_m(vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_f16mf4x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_f16mf4x6_m(vbool64_t vm, float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_f16mf4x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_f16mf2x6_m(vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_f16mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_f16mf2x6_m(vbool32_t vm, float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_f16mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_f16m1x6_m(vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_f16m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_f16m1x6_m(vbool16_t vm, float16_t *rs1, vuint64m4_t vs2, vfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_f16m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_f32mf2x6_m(vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_f32mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_f32mf2x6_m(vbool64_t vm, float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_f32mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_f32m1x6_m(vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_f32m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_f32m1x6_m(vbool32_t vm, float32_t *rs1, vuint64m2_t vs2, vfloat32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_f32m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_f64m1x6_m(vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_f64m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_f64m1x6_m(vbool64_t vm, float64_t *rs1, vuint64m1_t vs2, vfloat64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_f64m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_i8mf8x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_i8mf8x6_m(vbool64_t vm, int8_t *rs1, vuint64m1_t vs2, vint8mf8x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_i8mf8x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_i8mf4x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_i8mf4x6_m(vbool32_t vm, int8_t *rs1, vuint64m2_t vs2, vint8mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_i8mf4x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_i8mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_i8mf2x6_m(vbool16_t vm, int8_t *rs1, vuint64m4_t vs2, vint8mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_i8mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_i8m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_i8m1x6_m(vbool8_t vm, int8_t *rs1, vuint64m8_t vs2, vint8m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_i8m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_i16mf4x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_i16mf4x6_m(vbool64_t vm, int16_t *rs1, vuint64m1_t vs2, vint16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_i16mf4x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_i16mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_i16mf2x6_m(vbool32_t vm, int16_t *rs1, vuint64m2_t vs2, vint16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_i16mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_i16m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_i16m1x6_m(vbool16_t vm, int16_t *rs1, vuint64m4_t vs2, vint16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_i16m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_i32mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_i32mf2x6_m(vbool64_t vm, int32_t *rs1, vuint64m1_t vs2, vint32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_i32mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_i32m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_i32m1x6_m(vbool32_t vm, int32_t *rs1, vuint64m2_t vs2, vint32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_i32m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_i64m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_i64m1x6_m(vbool64_t vm, int64_t *rs1, vuint64m1_t vs2, vint64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_i64m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_u8mf8x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_u8mf8x6_m(vbool64_t vm, uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_u8mf8x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_u8mf4x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_u8mf4x6_m(vbool32_t vm, uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_u8mf4x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_u8mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_u8mf2x6_m(vbool16_t vm, uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_u8mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_u8m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_u8m1x6_m(vbool8_t vm, uint8_t *rs1, vuint64m8_t vs2, vuint8m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_u8m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_u16mf4x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_u16mf4x6_m(vbool64_t vm, uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_u16mf4x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_u16mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_u16mf2x6_m(vbool32_t vm, uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_u16mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_u16m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_u16m1x6_m(vbool16_t vm, uint16_t *rs1, vuint64m4_t vs2, vuint16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_u16m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_u32mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_u32mf2x6_m(vbool64_t vm, uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_u32mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_u32m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_u32m1x6_m(vbool32_t vm, uint32_t *rs1, vuint64m2_t vs2, vuint32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_u32m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64_v_u64m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_u64m1x6_m(vbool64_t vm, uint64_t *rs1, vuint64m1_t vs2, vuint64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64_v_u64m1x6_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg6ei64\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vsoxseg6ei8.c b/auto-generated/gnu-api-tests/vsoxseg6ei8.c index c240054b1..283890e33 100644 --- a/auto-generated/gnu-api-tests/vsoxseg6ei8.c +++ b/auto-generated/gnu-api-tests/vsoxseg6ei8.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg6ei8_v_f16mf4x6(float16_t *base, vuint8mf8_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_f16mf4x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_f16mf4x6(float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_f16mf4x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_f16mf2x6(float16_t *base, vuint8mf4_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_f16mf2x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_f16mf2x6(float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_f16mf2x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_f16m1x6(float16_t *base, vuint8mf2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_f16m1x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_f16m1x6(float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_f16m1x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_f32mf2x6(float32_t *base, vuint8mf8_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_f32mf2x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_f32mf2x6(float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_f32mf2x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_f32m1x6(float32_t *base, vuint8mf4_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_f32m1x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_f32m1x6(float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_f32m1x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_f64m1x6(float64_t *base, vuint8mf8_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_f64m1x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_f64m1x6(float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_f64m1x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_i8mf8x6(int8_t *base, vuint8mf8_t bindex, vint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_i8mf8x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_i8mf8x6(int8_t *rs1, vuint8mf8_t vs2, vint8mf8x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_i8mf8x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_i8mf4x6(int8_t *base, vuint8mf4_t bindex, vint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_i8mf4x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_i8mf4x6(int8_t *rs1, vuint8mf4_t vs2, vint8mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_i8mf4x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_i8mf2x6(int8_t *base, vuint8mf2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_i8mf2x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_i8mf2x6(int8_t *rs1, vuint8mf2_t vs2, vint8mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_i8mf2x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_i8m1x6(int8_t *base, vuint8m1_t bindex, vint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_i8m1x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_i8m1x6(int8_t *rs1, vuint8m1_t vs2, vint8m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_i8m1x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_i16mf4x6(int16_t *base, vuint8mf8_t bindex, vint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_i16mf4x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_i16mf4x6(int16_t *rs1, vuint8mf8_t vs2, vint16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_i16mf4x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_i16mf2x6(int16_t *base, vuint8mf4_t bindex, vint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_i16mf2x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_i16mf2x6(int16_t *rs1, vuint8mf4_t vs2, vint16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_i16mf2x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_i16m1x6(int16_t *base, vuint8mf2_t bindex, vint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_i16m1x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_i16m1x6(int16_t *rs1, vuint8mf2_t vs2, vint16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_i16m1x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_i32mf2x6(int32_t *base, vuint8mf8_t bindex, vint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_i32mf2x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_i32mf2x6(int32_t *rs1, vuint8mf8_t vs2, vint32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_i32mf2x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_i32m1x6(int32_t *base, vuint8mf4_t bindex, vint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_i32m1x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_i32m1x6(int32_t *rs1, vuint8mf4_t vs2, vint32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_i32m1x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_i64m1x6(int64_t *base, vuint8mf8_t bindex, vint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_i64m1x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_i64m1x6(int64_t *rs1, vuint8mf8_t vs2, vint64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_i64m1x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_u8mf8x6(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_u8mf8x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_u8mf8x6(uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_u8mf8x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_u8mf4x6(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_u8mf4x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_u8mf4x6(uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_u8mf4x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_u8mf2x6(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_u8mf2x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_u8mf2x6(uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_u8mf2x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_u8m1x6(uint8_t *base, vuint8m1_t bindex, vuint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_u8m1x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_u8m1x6(uint8_t *rs1, vuint8m1_t vs2, vuint8m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_u8m1x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_u16mf4x6(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_u16mf4x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_u16mf4x6(uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_u16mf4x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_u16mf2x6(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_u16mf2x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_u16mf2x6(uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_u16mf2x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_u16m1x6(uint16_t *base, vuint8mf2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_u16m1x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_u16m1x6(uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_u16m1x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_u32mf2x6(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_u32mf2x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_u32mf2x6(uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_u32mf2x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_u32m1x6(uint32_t *base, vuint8mf4_t bindex, vuint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_u32m1x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_u32m1x6(uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_u32m1x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_u64m1x6(uint64_t *base, vuint8mf8_t bindex, vuint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_u64m1x6(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_u64m1x6(uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_u64m1x6(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_f16mf4x6_m(vbool64_t mask, float16_t *base, vuint8mf8_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_f16mf4x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_f16mf4x6_m(vbool64_t vm, float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_f16mf4x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_f16mf2x6_m(vbool32_t mask, float16_t *base, vuint8mf4_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_f16mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_f16mf2x6_m(vbool32_t vm, float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_f16mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_f16m1x6_m(vbool16_t mask, float16_t *base, vuint8mf2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_f16m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_f16m1x6_m(vbool16_t vm, float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_f16m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_f32mf2x6_m(vbool64_t mask, float32_t *base, vuint8mf8_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_f32mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_f32mf2x6_m(vbool64_t vm, float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_f32mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_f32m1x6_m(vbool32_t mask, float32_t *base, vuint8mf4_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_f32m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_f32m1x6_m(vbool32_t vm, float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_f32m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_f64m1x6_m(vbool64_t mask, float64_t *base, vuint8mf8_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_f64m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_f64m1x6_m(vbool64_t vm, float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_f64m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_i8mf8x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_i8mf8x6_m(vbool64_t vm, int8_t *rs1, vuint8mf8_t vs2, vint8mf8x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_i8mf8x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_i8mf4x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_i8mf4x6_m(vbool32_t vm, int8_t *rs1, vuint8mf4_t vs2, vint8mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_i8mf4x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_i8mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_i8mf2x6_m(vbool16_t vm, int8_t *rs1, vuint8mf2_t vs2, vint8mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_i8mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_i8m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_i8m1x6_m(vbool8_t vm, int8_t *rs1, vuint8m1_t vs2, vint8m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_i8m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_i16mf4x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_i16mf4x6_m(vbool64_t vm, int16_t *rs1, vuint8mf8_t vs2, vint16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_i16mf4x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_i16mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_i16mf2x6_m(vbool32_t vm, int16_t *rs1, vuint8mf4_t vs2, vint16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_i16mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_i16m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_i16m1x6_m(vbool16_t vm, int16_t *rs1, vuint8mf2_t vs2, vint16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_i16m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_i32mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_i32mf2x6_m(vbool64_t vm, int32_t *rs1, vuint8mf8_t vs2, vint32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_i32mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_i32m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_i32m1x6_m(vbool32_t vm, int32_t *rs1, vuint8mf4_t vs2, vint32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_i32m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_i64m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_i64m1x6_m(vbool64_t vm, int64_t *rs1, vuint8mf8_t vs2, vint64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_i64m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_u8mf8x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_u8mf8x6_m(vbool64_t vm, uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_u8mf8x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_u8mf4x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_u8mf4x6_m(vbool32_t vm, uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_u8mf4x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_u8mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_u8mf2x6_m(vbool16_t vm, uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_u8mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_u8m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_u8m1x6_m(vbool8_t vm, uint8_t *rs1, vuint8m1_t vs2, vuint8m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_u8m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_u16mf4x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_u16mf4x6_m(vbool64_t vm, uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_u16mf4x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_u16mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_u16mf2x6_m(vbool32_t vm, uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_u16mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_u16m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_u16m1x6_m(vbool16_t vm, uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_u16m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_u32mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_u32mf2x6_m(vbool64_t vm, uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_u32mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_u32m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_u32m1x6_m(vbool32_t vm, uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_u32m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8_v_u64m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_u64m1x6_m(vbool64_t vm, uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8_v_u64m1x6_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg6ei8\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vsoxseg7ei16.c b/auto-generated/gnu-api-tests/vsoxseg7ei16.c index d5fbba87e..259a5c30b 100644 --- a/auto-generated/gnu-api-tests/vsoxseg7ei16.c +++ b/auto-generated/gnu-api-tests/vsoxseg7ei16.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg7ei16_v_f16mf4x7(float16_t *base, vuint16mf4_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_f16mf4x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_f16mf4x7(float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_f16mf4x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_f16mf2x7(float16_t *base, vuint16mf2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_f16mf2x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_f16mf2x7(float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_f16mf2x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_f16m1x7(float16_t *base, vuint16m1_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_f16m1x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_f16m1x7(float16_t *rs1, vuint16m1_t vs2, vfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_f16m1x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_f32mf2x7(float32_t *base, vuint16mf4_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_f32mf2x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_f32mf2x7(float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_f32mf2x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_f32m1x7(float32_t *base, vuint16mf2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_f32m1x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_f32m1x7(float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_f32m1x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_f64m1x7(float64_t *base, vuint16mf4_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_f64m1x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_f64m1x7(float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_f64m1x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_i8mf8x7(int8_t *base, vuint16mf4_t bindex, vint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_i8mf8x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_i8mf8x7(int8_t *rs1, vuint16mf4_t vs2, vint8mf8x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_i8mf8x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_i8mf4x7(int8_t *base, vuint16mf2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_i8mf4x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_i8mf4x7(int8_t *rs1, vuint16mf2_t vs2, vint8mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_i8mf4x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_i8mf2x7(int8_t *base, vuint16m1_t bindex, vint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_i8mf2x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_i8mf2x7(int8_t *rs1, vuint16m1_t vs2, vint8mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_i8mf2x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_i8m1x7(int8_t *base, vuint16m2_t bindex, vint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_i8m1x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_i8m1x7(int8_t *rs1, vuint16m2_t vs2, vint8m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_i8m1x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_i16mf4x7(int16_t *base, vuint16mf4_t bindex, vint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_i16mf4x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_i16mf4x7(int16_t *rs1, vuint16mf4_t vs2, vint16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_i16mf4x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_i16mf2x7(int16_t *base, vuint16mf2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_i16mf2x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_i16mf2x7(int16_t *rs1, vuint16mf2_t vs2, vint16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_i16mf2x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_i16m1x7(int16_t *base, vuint16m1_t bindex, vint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_i16m1x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_i16m1x7(int16_t *rs1, vuint16m1_t vs2, vint16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_i16m1x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_i32mf2x7(int32_t *base, vuint16mf4_t bindex, vint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_i32mf2x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_i32mf2x7(int32_t *rs1, vuint16mf4_t vs2, vint32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_i32mf2x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_i32m1x7(int32_t *base, vuint16mf2_t bindex, vint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_i32m1x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_i32m1x7(int32_t *rs1, vuint16mf2_t vs2, vint32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_i32m1x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_i64m1x7(int64_t *base, vuint16mf4_t bindex, vint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_i64m1x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_i64m1x7(int64_t *rs1, vuint16mf4_t vs2, vint64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_i64m1x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_u8mf8x7(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_u8mf8x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_u8mf8x7(uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_u8mf8x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_u8mf4x7(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_u8mf4x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_u8mf4x7(uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_u8mf4x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_u8mf2x7(uint8_t *base, vuint16m1_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_u8mf2x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_u8mf2x7(uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_u8mf2x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_u8m1x7(uint8_t *base, vuint16m2_t bindex, vuint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_u8m1x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_u8m1x7(uint8_t *rs1, vuint16m2_t vs2, vuint8m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_u8m1x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_u16mf4x7(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_u16mf4x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_u16mf4x7(uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_u16mf4x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_u16mf2x7(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_u16mf2x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_u16mf2x7(uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_u16mf2x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_u16m1x7(uint16_t *base, vuint16m1_t bindex, vuint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_u16m1x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_u16m1x7(uint16_t *rs1, vuint16m1_t vs2, vuint16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_u16m1x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_u32mf2x7(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_u32mf2x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_u32mf2x7(uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_u32mf2x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_u32m1x7(uint32_t *base, vuint16mf2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_u32m1x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_u32m1x7(uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_u32m1x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_u64m1x7(uint64_t *base, vuint16mf4_t bindex, vuint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_u64m1x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_u64m1x7(uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_u64m1x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_f16mf4x7_m(vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_f16mf4x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_f16mf4x7_m(vbool64_t vm, float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_f16mf4x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_f16mf2x7_m(vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_f16mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_f16mf2x7_m(vbool32_t vm, float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_f16mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_f16m1x7_m(vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_f16m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_f16m1x7_m(vbool16_t vm, float16_t *rs1, vuint16m1_t vs2, vfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_f16m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_f32mf2x7_m(vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_f32mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_f32mf2x7_m(vbool64_t vm, float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_f32mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_f32m1x7_m(vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_f32m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_f32m1x7_m(vbool32_t vm, float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_f32m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_f64m1x7_m(vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_f64m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_f64m1x7_m(vbool64_t vm, float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_f64m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_i8mf8x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_i8mf8x7_m(vbool64_t vm, int8_t *rs1, vuint16mf4_t vs2, vint8mf8x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_i8mf8x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_i8mf4x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_i8mf4x7_m(vbool32_t vm, int8_t *rs1, vuint16mf2_t vs2, vint8mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_i8mf4x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_i8mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_i8mf2x7_m(vbool16_t vm, int8_t *rs1, vuint16m1_t vs2, vint8mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_i8mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_i8m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_i8m1x7_m(vbool8_t vm, int8_t *rs1, vuint16m2_t vs2, vint8m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_i8m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_i16mf4x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_i16mf4x7_m(vbool64_t vm, int16_t *rs1, vuint16mf4_t vs2, vint16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_i16mf4x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_i16mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_i16mf2x7_m(vbool32_t vm, int16_t *rs1, vuint16mf2_t vs2, vint16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_i16mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_i16m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_i16m1x7_m(vbool16_t vm, int16_t *rs1, vuint16m1_t vs2, vint16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_i16m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_i32mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_i32mf2x7_m(vbool64_t vm, int32_t *rs1, vuint16mf4_t vs2, vint32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_i32mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_i32m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_i32m1x7_m(vbool32_t vm, int32_t *rs1, vuint16mf2_t vs2, vint32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_i32m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_i64m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_i64m1x7_m(vbool64_t vm, int64_t *rs1, vuint16mf4_t vs2, vint64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_i64m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_u8mf8x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_u8mf8x7_m(vbool64_t vm, uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_u8mf8x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_u8mf4x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_u8mf4x7_m(vbool32_t vm, uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_u8mf4x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_u8mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_u8mf2x7_m(vbool16_t vm, uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_u8mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_u8m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_u8m1x7_m(vbool8_t vm, uint8_t *rs1, vuint16m2_t vs2, vuint8m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_u8m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_u16mf4x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_u16mf4x7_m(vbool64_t vm, uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_u16mf4x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_u16mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_u16mf2x7_m(vbool32_t vm, uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_u16mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_u16m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_u16m1x7_m(vbool16_t vm, uint16_t *rs1, vuint16m1_t vs2, vuint16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_u16m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_u32mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_u32mf2x7_m(vbool64_t vm, uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_u32mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_u32m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_u32m1x7_m(vbool32_t vm, uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_u32m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16_v_u64m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_u64m1x7_m(vbool64_t vm, uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_u64m1x7_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg7ei16\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vsoxseg7ei32.c b/auto-generated/gnu-api-tests/vsoxseg7ei32.c index c64d4e768..1c0e66574 100644 --- a/auto-generated/gnu-api-tests/vsoxseg7ei32.c +++ b/auto-generated/gnu-api-tests/vsoxseg7ei32.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg7ei32_v_f16mf4x7(float16_t *base, vuint32mf2_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_f16mf4x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_f16mf4x7(float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_f16mf4x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_f16mf2x7(float16_t *base, vuint32m1_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_f16mf2x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_f16mf2x7(float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_f16mf2x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_f16m1x7(float16_t *base, vuint32m2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_f16m1x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_f16m1x7(float16_t *rs1, vuint32m2_t vs2, vfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_f16m1x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_f32mf2x7(float32_t *base, vuint32mf2_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_f32mf2x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_f32mf2x7(float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_f32mf2x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_f32m1x7(float32_t *base, vuint32m1_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_f32m1x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_f32m1x7(float32_t *rs1, vuint32m1_t vs2, vfloat32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_f32m1x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_f64m1x7(float64_t *base, vuint32mf2_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_f64m1x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_f64m1x7(float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_f64m1x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_i8mf8x7(int8_t *base, vuint32mf2_t bindex, vint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_i8mf8x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_i8mf8x7(int8_t *rs1, vuint32mf2_t vs2, vint8mf8x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_i8mf8x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_i8mf4x7(int8_t *base, vuint32m1_t bindex, vint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_i8mf4x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_i8mf4x7(int8_t *rs1, vuint32m1_t vs2, vint8mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_i8mf4x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_i8mf2x7(int8_t *base, vuint32m2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_i8mf2x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_i8mf2x7(int8_t *rs1, vuint32m2_t vs2, vint8mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_i8mf2x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_i8m1x7(int8_t *base, vuint32m4_t bindex, vint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_i8m1x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_i8m1x7(int8_t *rs1, vuint32m4_t vs2, vint8m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_i8m1x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_i16mf4x7(int16_t *base, vuint32mf2_t bindex, vint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_i16mf4x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_i16mf4x7(int16_t *rs1, vuint32mf2_t vs2, vint16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_i16mf4x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_i16mf2x7(int16_t *base, vuint32m1_t bindex, vint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_i16mf2x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_i16mf2x7(int16_t *rs1, vuint32m1_t vs2, vint16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_i16mf2x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_i16m1x7(int16_t *base, vuint32m2_t bindex, vint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_i16m1x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_i16m1x7(int16_t *rs1, vuint32m2_t vs2, vint16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_i16m1x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_i32mf2x7(int32_t *base, vuint32mf2_t bindex, vint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_i32mf2x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_i32mf2x7(int32_t *rs1, vuint32mf2_t vs2, vint32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_i32mf2x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_i32m1x7(int32_t *base, vuint32m1_t bindex, vint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_i32m1x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_i32m1x7(int32_t *rs1, vuint32m1_t vs2, vint32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_i32m1x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_i64m1x7(int64_t *base, vuint32mf2_t bindex, vint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_i64m1x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_i64m1x7(int64_t *rs1, vuint32mf2_t vs2, vint64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_i64m1x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_u8mf8x7(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_u8mf8x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_u8mf8x7(uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_u8mf8x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_u8mf4x7(uint8_t *base, vuint32m1_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_u8mf4x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_u8mf4x7(uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_u8mf4x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_u8mf2x7(uint8_t *base, vuint32m2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_u8mf2x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_u8mf2x7(uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_u8mf2x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_u8m1x7(uint8_t *base, vuint32m4_t bindex, vuint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_u8m1x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_u8m1x7(uint8_t *rs1, vuint32m4_t vs2, vuint8m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_u8m1x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_u16mf4x7(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_u16mf4x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_u16mf4x7(uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_u16mf4x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_u16mf2x7(uint16_t *base, vuint32m1_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_u16mf2x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_u16mf2x7(uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_u16mf2x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_u16m1x7(uint16_t *base, vuint32m2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_u16m1x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_u16m1x7(uint16_t *rs1, vuint32m2_t vs2, vuint16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_u16m1x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_u32mf2x7(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_u32mf2x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_u32mf2x7(uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_u32mf2x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_u32m1x7(uint32_t *base, vuint32m1_t bindex, vuint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_u32m1x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_u32m1x7(uint32_t *rs1, vuint32m1_t vs2, vuint32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_u32m1x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_u64m1x7(uint64_t *base, vuint32mf2_t bindex, vuint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_u64m1x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_u64m1x7(uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_u64m1x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_f16mf4x7_m(vbool64_t mask, float16_t *base, vuint32mf2_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_f16mf4x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_f16mf4x7_m(vbool64_t vm, float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_f16mf4x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_f16mf2x7_m(vbool32_t mask, float16_t *base, vuint32m1_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_f16mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_f16mf2x7_m(vbool32_t vm, float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_f16mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_f16m1x7_m(vbool16_t mask, float16_t *base, vuint32m2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_f16m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_f16m1x7_m(vbool16_t vm, float16_t *rs1, vuint32m2_t vs2, vfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_f16m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_f32mf2x7_m(vbool64_t mask, float32_t *base, vuint32mf2_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_f32mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_f32mf2x7_m(vbool64_t vm, float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_f32mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_f32m1x7_m(vbool32_t mask, float32_t *base, vuint32m1_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_f32m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_f32m1x7_m(vbool32_t vm, float32_t *rs1, vuint32m1_t vs2, vfloat32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_f32m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_f64m1x7_m(vbool64_t mask, float64_t *base, vuint32mf2_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_f64m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_f64m1x7_m(vbool64_t vm, float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_f64m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_i8mf8x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_i8mf8x7_m(vbool64_t vm, int8_t *rs1, vuint32mf2_t vs2, vint8mf8x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_i8mf8x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_i8mf4x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_i8mf4x7_m(vbool32_t vm, int8_t *rs1, vuint32m1_t vs2, vint8mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_i8mf4x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_i8mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_i8mf2x7_m(vbool16_t vm, int8_t *rs1, vuint32m2_t vs2, vint8mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_i8mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_i8m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_i8m1x7_m(vbool8_t vm, int8_t *rs1, vuint32m4_t vs2, vint8m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_i8m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_i16mf4x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_i16mf4x7_m(vbool64_t vm, int16_t *rs1, vuint32mf2_t vs2, vint16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_i16mf4x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_i16mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_i16mf2x7_m(vbool32_t vm, int16_t *rs1, vuint32m1_t vs2, vint16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_i16mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_i16m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_i16m1x7_m(vbool16_t vm, int16_t *rs1, vuint32m2_t vs2, vint16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_i16m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_i32mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_i32mf2x7_m(vbool64_t vm, int32_t *rs1, vuint32mf2_t vs2, vint32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_i32mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_i32m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_i32m1x7_m(vbool32_t vm, int32_t *rs1, vuint32m1_t vs2, vint32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_i32m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_i64m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_i64m1x7_m(vbool64_t vm, int64_t *rs1, vuint32mf2_t vs2, vint64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_i64m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_u8mf8x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_u8mf8x7_m(vbool64_t vm, uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_u8mf8x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_u8mf4x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_u8mf4x7_m(vbool32_t vm, uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_u8mf4x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_u8mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_u8mf2x7_m(vbool16_t vm, uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_u8mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_u8m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_u8m1x7_m(vbool8_t vm, uint8_t *rs1, vuint32m4_t vs2, vuint8m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_u8m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_u16mf4x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_u16mf4x7_m(vbool64_t vm, uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_u16mf4x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_u16mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_u16mf2x7_m(vbool32_t vm, uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_u16mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_u16m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_u16m1x7_m(vbool16_t vm, uint16_t *rs1, vuint32m2_t vs2, vuint16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_u16m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_u32mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_u32mf2x7_m(vbool64_t vm, uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_u32mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_u32m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_u32m1x7_m(vbool32_t vm, uint32_t *rs1, vuint32m1_t vs2, vuint32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_u32m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32_v_u64m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_u64m1x7_m(vbool64_t vm, uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32_v_u64m1x7_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg7ei32\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vsoxseg7ei64.c b/auto-generated/gnu-api-tests/vsoxseg7ei64.c index 84c653e9d..695ec1621 100644 --- a/auto-generated/gnu-api-tests/vsoxseg7ei64.c +++ b/auto-generated/gnu-api-tests/vsoxseg7ei64.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg7ei64_v_f16mf4x7(float16_t *base, vuint64m1_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_f16mf4x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_f16mf4x7(float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_f16mf4x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_f16mf2x7(float16_t *base, vuint64m2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_f16mf2x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_f16mf2x7(float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_f16mf2x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_f16m1x7(float16_t *base, vuint64m4_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_f16m1x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_f16m1x7(float16_t *rs1, vuint64m4_t vs2, vfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_f16m1x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_f32mf2x7(float32_t *base, vuint64m1_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_f32mf2x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_f32mf2x7(float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_f32mf2x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_f32m1x7(float32_t *base, vuint64m2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_f32m1x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_f32m1x7(float32_t *rs1, vuint64m2_t vs2, vfloat32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_f32m1x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_f64m1x7(float64_t *base, vuint64m1_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_f64m1x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_f64m1x7(float64_t *rs1, vuint64m1_t vs2, vfloat64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_f64m1x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_i8mf8x7(int8_t *base, vuint64m1_t bindex, vint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_i8mf8x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_i8mf8x7(int8_t *rs1, vuint64m1_t vs2, vint8mf8x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_i8mf8x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_i8mf4x7(int8_t *base, vuint64m2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_i8mf4x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_i8mf4x7(int8_t *rs1, vuint64m2_t vs2, vint8mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_i8mf4x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_i8mf2x7(int8_t *base, vuint64m4_t bindex, vint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_i8mf2x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_i8mf2x7(int8_t *rs1, vuint64m4_t vs2, vint8mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_i8mf2x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_i8m1x7(int8_t *base, vuint64m8_t bindex, vint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_i8m1x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_i8m1x7(int8_t *rs1, vuint64m8_t vs2, vint8m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_i8m1x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_i16mf4x7(int16_t *base, vuint64m1_t bindex, vint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_i16mf4x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_i16mf4x7(int16_t *rs1, vuint64m1_t vs2, vint16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_i16mf4x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_i16mf2x7(int16_t *base, vuint64m2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_i16mf2x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_i16mf2x7(int16_t *rs1, vuint64m2_t vs2, vint16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_i16mf2x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_i16m1x7(int16_t *base, vuint64m4_t bindex, vint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_i16m1x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_i16m1x7(int16_t *rs1, vuint64m4_t vs2, vint16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_i16m1x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_i32mf2x7(int32_t *base, vuint64m1_t bindex, vint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_i32mf2x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_i32mf2x7(int32_t *rs1, vuint64m1_t vs2, vint32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_i32mf2x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_i32m1x7(int32_t *base, vuint64m2_t bindex, vint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_i32m1x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_i32m1x7(int32_t *rs1, vuint64m2_t vs2, vint32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_i32m1x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_i64m1x7(int64_t *base, vuint64m1_t bindex, vint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_i64m1x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_i64m1x7(int64_t *rs1, vuint64m1_t vs2, vint64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_i64m1x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_u8mf8x7(uint8_t *base, vuint64m1_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_u8mf8x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_u8mf8x7(uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_u8mf8x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_u8mf4x7(uint8_t *base, vuint64m2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_u8mf4x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_u8mf4x7(uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_u8mf4x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_u8mf2x7(uint8_t *base, vuint64m4_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_u8mf2x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_u8mf2x7(uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_u8mf2x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_u8m1x7(uint8_t *base, vuint64m8_t bindex, vuint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_u8m1x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_u8m1x7(uint8_t *rs1, vuint64m8_t vs2, vuint8m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_u8m1x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_u16mf4x7(uint16_t *base, vuint64m1_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_u16mf4x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_u16mf4x7(uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_u16mf4x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_u16mf2x7(uint16_t *base, vuint64m2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_u16mf2x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_u16mf2x7(uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_u16mf2x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_u16m1x7(uint16_t *base, vuint64m4_t bindex, vuint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_u16m1x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_u16m1x7(uint16_t *rs1, vuint64m4_t vs2, vuint16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_u16m1x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_u32mf2x7(uint32_t *base, vuint64m1_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_u32mf2x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_u32mf2x7(uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_u32mf2x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_u32m1x7(uint32_t *base, vuint64m2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_u32m1x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_u32m1x7(uint32_t *rs1, vuint64m2_t vs2, vuint32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_u32m1x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_u64m1x7(uint64_t *base, vuint64m1_t bindex, vuint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_u64m1x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_u64m1x7(uint64_t *rs1, vuint64m1_t vs2, vuint64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_u64m1x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_f16mf4x7_m(vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_f16mf4x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_f16mf4x7_m(vbool64_t vm, float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_f16mf4x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_f16mf2x7_m(vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_f16mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_f16mf2x7_m(vbool32_t vm, float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_f16mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_f16m1x7_m(vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_f16m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_f16m1x7_m(vbool16_t vm, float16_t *rs1, vuint64m4_t vs2, vfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_f16m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_f32mf2x7_m(vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_f32mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_f32mf2x7_m(vbool64_t vm, float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_f32mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_f32m1x7_m(vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_f32m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_f32m1x7_m(vbool32_t vm, float32_t *rs1, vuint64m2_t vs2, vfloat32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_f32m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_f64m1x7_m(vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_f64m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_f64m1x7_m(vbool64_t vm, float64_t *rs1, vuint64m1_t vs2, vfloat64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_f64m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_i8mf8x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_i8mf8x7_m(vbool64_t vm, int8_t *rs1, vuint64m1_t vs2, vint8mf8x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_i8mf8x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_i8mf4x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_i8mf4x7_m(vbool32_t vm, int8_t *rs1, vuint64m2_t vs2, vint8mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_i8mf4x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_i8mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_i8mf2x7_m(vbool16_t vm, int8_t *rs1, vuint64m4_t vs2, vint8mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_i8mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_i8m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_i8m1x7_m(vbool8_t vm, int8_t *rs1, vuint64m8_t vs2, vint8m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_i8m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_i16mf4x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_i16mf4x7_m(vbool64_t vm, int16_t *rs1, vuint64m1_t vs2, vint16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_i16mf4x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_i16mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_i16mf2x7_m(vbool32_t vm, int16_t *rs1, vuint64m2_t vs2, vint16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_i16mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_i16m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_i16m1x7_m(vbool16_t vm, int16_t *rs1, vuint64m4_t vs2, vint16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_i16m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_i32mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_i32mf2x7_m(vbool64_t vm, int32_t *rs1, vuint64m1_t vs2, vint32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_i32mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_i32m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_i32m1x7_m(vbool32_t vm, int32_t *rs1, vuint64m2_t vs2, vint32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_i32m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_i64m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_i64m1x7_m(vbool64_t vm, int64_t *rs1, vuint64m1_t vs2, vint64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_i64m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_u8mf8x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_u8mf8x7_m(vbool64_t vm, uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_u8mf8x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_u8mf4x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_u8mf4x7_m(vbool32_t vm, uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_u8mf4x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_u8mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_u8mf2x7_m(vbool16_t vm, uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_u8mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_u8m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_u8m1x7_m(vbool8_t vm, uint8_t *rs1, vuint64m8_t vs2, vuint8m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_u8m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_u16mf4x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_u16mf4x7_m(vbool64_t vm, uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_u16mf4x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_u16mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_u16mf2x7_m(vbool32_t vm, uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_u16mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_u16m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_u16m1x7_m(vbool16_t vm, uint16_t *rs1, vuint64m4_t vs2, vuint16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_u16m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_u32mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_u32mf2x7_m(vbool64_t vm, uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_u32mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_u32m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_u32m1x7_m(vbool32_t vm, uint32_t *rs1, vuint64m2_t vs2, vuint32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_u32m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64_v_u64m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_u64m1x7_m(vbool64_t vm, uint64_t *rs1, vuint64m1_t vs2, vuint64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64_v_u64m1x7_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg7ei64\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vsoxseg7ei8.c b/auto-generated/gnu-api-tests/vsoxseg7ei8.c index e0da8b559..764c5f225 100644 --- a/auto-generated/gnu-api-tests/vsoxseg7ei8.c +++ b/auto-generated/gnu-api-tests/vsoxseg7ei8.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg7ei8_v_f16mf4x7(float16_t *base, vuint8mf8_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_f16mf4x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_f16mf4x7(float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_f16mf4x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_f16mf2x7(float16_t *base, vuint8mf4_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_f16mf2x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_f16mf2x7(float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_f16mf2x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_f16m1x7(float16_t *base, vuint8mf2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_f16m1x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_f16m1x7(float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_f16m1x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_f32mf2x7(float32_t *base, vuint8mf8_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_f32mf2x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_f32mf2x7(float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_f32mf2x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_f32m1x7(float32_t *base, vuint8mf4_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_f32m1x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_f32m1x7(float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_f32m1x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_f64m1x7(float64_t *base, vuint8mf8_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_f64m1x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_f64m1x7(float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_f64m1x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_i8mf8x7(int8_t *base, vuint8mf8_t bindex, vint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_i8mf8x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_i8mf8x7(int8_t *rs1, vuint8mf8_t vs2, vint8mf8x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_i8mf8x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_i8mf4x7(int8_t *base, vuint8mf4_t bindex, vint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_i8mf4x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_i8mf4x7(int8_t *rs1, vuint8mf4_t vs2, vint8mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_i8mf4x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_i8mf2x7(int8_t *base, vuint8mf2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_i8mf2x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_i8mf2x7(int8_t *rs1, vuint8mf2_t vs2, vint8mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_i8mf2x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_i8m1x7(int8_t *base, vuint8m1_t bindex, vint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_i8m1x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_i8m1x7(int8_t *rs1, vuint8m1_t vs2, vint8m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_i8m1x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_i16mf4x7(int16_t *base, vuint8mf8_t bindex, vint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_i16mf4x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_i16mf4x7(int16_t *rs1, vuint8mf8_t vs2, vint16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_i16mf4x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_i16mf2x7(int16_t *base, vuint8mf4_t bindex, vint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_i16mf2x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_i16mf2x7(int16_t *rs1, vuint8mf4_t vs2, vint16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_i16mf2x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_i16m1x7(int16_t *base, vuint8mf2_t bindex, vint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_i16m1x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_i16m1x7(int16_t *rs1, vuint8mf2_t vs2, vint16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_i16m1x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_i32mf2x7(int32_t *base, vuint8mf8_t bindex, vint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_i32mf2x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_i32mf2x7(int32_t *rs1, vuint8mf8_t vs2, vint32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_i32mf2x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_i32m1x7(int32_t *base, vuint8mf4_t bindex, vint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_i32m1x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_i32m1x7(int32_t *rs1, vuint8mf4_t vs2, vint32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_i32m1x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_i64m1x7(int64_t *base, vuint8mf8_t bindex, vint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_i64m1x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_i64m1x7(int64_t *rs1, vuint8mf8_t vs2, vint64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_i64m1x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_u8mf8x7(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_u8mf8x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_u8mf8x7(uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_u8mf8x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_u8mf4x7(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_u8mf4x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_u8mf4x7(uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_u8mf4x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_u8mf2x7(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_u8mf2x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_u8mf2x7(uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_u8mf2x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_u8m1x7(uint8_t *base, vuint8m1_t bindex, vuint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_u8m1x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_u8m1x7(uint8_t *rs1, vuint8m1_t vs2, vuint8m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_u8m1x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_u16mf4x7(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_u16mf4x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_u16mf4x7(uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_u16mf4x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_u16mf2x7(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_u16mf2x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_u16mf2x7(uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_u16mf2x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_u16m1x7(uint16_t *base, vuint8mf2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_u16m1x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_u16m1x7(uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_u16m1x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_u32mf2x7(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_u32mf2x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_u32mf2x7(uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_u32mf2x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_u32m1x7(uint32_t *base, vuint8mf4_t bindex, vuint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_u32m1x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_u32m1x7(uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_u32m1x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_u64m1x7(uint64_t *base, vuint8mf8_t bindex, vuint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_u64m1x7(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_u64m1x7(uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_u64m1x7(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_f16mf4x7_m(vbool64_t mask, float16_t *base, vuint8mf8_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_f16mf4x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_f16mf4x7_m(vbool64_t vm, float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_f16mf4x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_f16mf2x7_m(vbool32_t mask, float16_t *base, vuint8mf4_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_f16mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_f16mf2x7_m(vbool32_t vm, float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_f16mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_f16m1x7_m(vbool16_t mask, float16_t *base, vuint8mf2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_f16m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_f16m1x7_m(vbool16_t vm, float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_f16m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_f32mf2x7_m(vbool64_t mask, float32_t *base, vuint8mf8_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_f32mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_f32mf2x7_m(vbool64_t vm, float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_f32mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_f32m1x7_m(vbool32_t mask, float32_t *base, vuint8mf4_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_f32m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_f32m1x7_m(vbool32_t vm, float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_f32m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_f64m1x7_m(vbool64_t mask, float64_t *base, vuint8mf8_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_f64m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_f64m1x7_m(vbool64_t vm, float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_f64m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_i8mf8x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_i8mf8x7_m(vbool64_t vm, int8_t *rs1, vuint8mf8_t vs2, vint8mf8x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_i8mf8x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_i8mf4x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_i8mf4x7_m(vbool32_t vm, int8_t *rs1, vuint8mf4_t vs2, vint8mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_i8mf4x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_i8mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_i8mf2x7_m(vbool16_t vm, int8_t *rs1, vuint8mf2_t vs2, vint8mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_i8mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_i8m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_i8m1x7_m(vbool8_t vm, int8_t *rs1, vuint8m1_t vs2, vint8m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_i8m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_i16mf4x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_i16mf4x7_m(vbool64_t vm, int16_t *rs1, vuint8mf8_t vs2, vint16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_i16mf4x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_i16mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_i16mf2x7_m(vbool32_t vm, int16_t *rs1, vuint8mf4_t vs2, vint16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_i16mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_i16m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_i16m1x7_m(vbool16_t vm, int16_t *rs1, vuint8mf2_t vs2, vint16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_i16m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_i32mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_i32mf2x7_m(vbool64_t vm, int32_t *rs1, vuint8mf8_t vs2, vint32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_i32mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_i32m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_i32m1x7_m(vbool32_t vm, int32_t *rs1, vuint8mf4_t vs2, vint32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_i32m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_i64m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_i64m1x7_m(vbool64_t vm, int64_t *rs1, vuint8mf8_t vs2, vint64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_i64m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_u8mf8x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_u8mf8x7_m(vbool64_t vm, uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_u8mf8x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_u8mf4x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_u8mf4x7_m(vbool32_t vm, uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_u8mf4x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_u8mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_u8mf2x7_m(vbool16_t vm, uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_u8mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_u8m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_u8m1x7_m(vbool8_t vm, uint8_t *rs1, vuint8m1_t vs2, vuint8m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_u8m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_u16mf4x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_u16mf4x7_m(vbool64_t vm, uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_u16mf4x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_u16mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_u16mf2x7_m(vbool32_t vm, uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_u16mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_u16m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_u16m1x7_m(vbool16_t vm, uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_u16m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_u32mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_u32mf2x7_m(vbool64_t vm, uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_u32mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_u32m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_u32m1x7_m(vbool32_t vm, uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_u32m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8_v_u64m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_u64m1x7_m(vbool64_t vm, uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8_v_u64m1x7_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg7ei8\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vsoxseg8ei16.c b/auto-generated/gnu-api-tests/vsoxseg8ei16.c index 2bb591492..30acf553e 100644 --- a/auto-generated/gnu-api-tests/vsoxseg8ei16.c +++ b/auto-generated/gnu-api-tests/vsoxseg8ei16.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg8ei16_v_f16mf4x8(float16_t *base, vuint16mf4_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_f16mf4x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_f16mf4x8(float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_f16mf4x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_f16mf2x8(float16_t *base, vuint16mf2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_f16mf2x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_f16mf2x8(float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_f16mf2x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_f16m1x8(float16_t *base, vuint16m1_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_f16m1x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_f16m1x8(float16_t *rs1, vuint16m1_t vs2, vfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_f16m1x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_f32mf2x8(float32_t *base, vuint16mf4_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_f32mf2x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_f32mf2x8(float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_f32mf2x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_f32m1x8(float32_t *base, vuint16mf2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_f32m1x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_f32m1x8(float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_f32m1x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_f64m1x8(float64_t *base, vuint16mf4_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_f64m1x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_f64m1x8(float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_f64m1x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_i8mf8x8(int8_t *base, vuint16mf4_t bindex, vint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_i8mf8x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_i8mf8x8(int8_t *rs1, vuint16mf4_t vs2, vint8mf8x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_i8mf8x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_i8mf4x8(int8_t *base, vuint16mf2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_i8mf4x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_i8mf4x8(int8_t *rs1, vuint16mf2_t vs2, vint8mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_i8mf4x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_i8mf2x8(int8_t *base, vuint16m1_t bindex, vint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_i8mf2x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_i8mf2x8(int8_t *rs1, vuint16m1_t vs2, vint8mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_i8mf2x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_i8m1x8(int8_t *base, vuint16m2_t bindex, vint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_i8m1x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_i8m1x8(int8_t *rs1, vuint16m2_t vs2, vint8m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_i8m1x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_i16mf4x8(int16_t *base, vuint16mf4_t bindex, vint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_i16mf4x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_i16mf4x8(int16_t *rs1, vuint16mf4_t vs2, vint16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_i16mf4x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_i16mf2x8(int16_t *base, vuint16mf2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_i16mf2x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_i16mf2x8(int16_t *rs1, vuint16mf2_t vs2, vint16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_i16mf2x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_i16m1x8(int16_t *base, vuint16m1_t bindex, vint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_i16m1x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_i16m1x8(int16_t *rs1, vuint16m1_t vs2, vint16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_i16m1x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_i32mf2x8(int32_t *base, vuint16mf4_t bindex, vint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_i32mf2x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_i32mf2x8(int32_t *rs1, vuint16mf4_t vs2, vint32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_i32mf2x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_i32m1x8(int32_t *base, vuint16mf2_t bindex, vint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_i32m1x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_i32m1x8(int32_t *rs1, vuint16mf2_t vs2, vint32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_i32m1x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_i64m1x8(int64_t *base, vuint16mf4_t bindex, vint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_i64m1x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_i64m1x8(int64_t *rs1, vuint16mf4_t vs2, vint64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_i64m1x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_u8mf8x8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_u8mf8x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_u8mf8x8(uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_u8mf8x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_u8mf4x8(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_u8mf4x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_u8mf4x8(uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_u8mf4x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_u8mf2x8(uint8_t *base, vuint16m1_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_u8mf2x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_u8mf2x8(uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_u8mf2x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_u8m1x8(uint8_t *base, vuint16m2_t bindex, vuint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_u8m1x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_u8m1x8(uint8_t *rs1, vuint16m2_t vs2, vuint8m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_u8m1x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_u16mf4x8(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_u16mf4x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_u16mf4x8(uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_u16mf4x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_u16mf2x8(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_u16mf2x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_u16mf2x8(uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_u16mf2x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_u16m1x8(uint16_t *base, vuint16m1_t bindex, vuint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_u16m1x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_u16m1x8(uint16_t *rs1, vuint16m1_t vs2, vuint16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_u16m1x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_u32mf2x8(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_u32mf2x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_u32mf2x8(uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_u32mf2x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_u32m1x8(uint32_t *base, vuint16mf2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_u32m1x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_u32m1x8(uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_u32m1x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_u64m1x8(uint64_t *base, vuint16mf4_t bindex, vuint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_u64m1x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_u64m1x8(uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_u64m1x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_f16mf4x8_m(vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_f16mf4x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_f16mf4x8_m(vbool64_t vm, float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_f16mf4x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_f16mf2x8_m(vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_f16mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_f16mf2x8_m(vbool32_t vm, float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_f16mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_f16m1x8_m(vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_f16m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_f16m1x8_m(vbool16_t vm, float16_t *rs1, vuint16m1_t vs2, vfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_f16m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_f32mf2x8_m(vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_f32mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_f32mf2x8_m(vbool64_t vm, float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_f32mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_f32m1x8_m(vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_f32m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_f32m1x8_m(vbool32_t vm, float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_f32m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_f64m1x8_m(vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_f64m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_f64m1x8_m(vbool64_t vm, float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_f64m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_i8mf8x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_i8mf8x8_m(vbool64_t vm, int8_t *rs1, vuint16mf4_t vs2, vint8mf8x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_i8mf8x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_i8mf4x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_i8mf4x8_m(vbool32_t vm, int8_t *rs1, vuint16mf2_t vs2, vint8mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_i8mf4x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_i8mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_i8mf2x8_m(vbool16_t vm, int8_t *rs1, vuint16m1_t vs2, vint8mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_i8mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_i8m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_i8m1x8_m(vbool8_t vm, int8_t *rs1, vuint16m2_t vs2, vint8m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_i8m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_i16mf4x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_i16mf4x8_m(vbool64_t vm, int16_t *rs1, vuint16mf4_t vs2, vint16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_i16mf4x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_i16mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_i16mf2x8_m(vbool32_t vm, int16_t *rs1, vuint16mf2_t vs2, vint16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_i16mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_i16m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_i16m1x8_m(vbool16_t vm, int16_t *rs1, vuint16m1_t vs2, vint16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_i16m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_i32mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_i32mf2x8_m(vbool64_t vm, int32_t *rs1, vuint16mf4_t vs2, vint32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_i32mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_i32m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_i32m1x8_m(vbool32_t vm, int32_t *rs1, vuint16mf2_t vs2, vint32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_i32m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_i64m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_i64m1x8_m(vbool64_t vm, int64_t *rs1, vuint16mf4_t vs2, vint64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_i64m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_u8mf8x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_u8mf8x8_m(vbool64_t vm, uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_u8mf8x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_u8mf4x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_u8mf4x8_m(vbool32_t vm, uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_u8mf4x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_u8mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_u8mf2x8_m(vbool16_t vm, uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_u8mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_u8m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_u8m1x8_m(vbool8_t vm, uint8_t *rs1, vuint16m2_t vs2, vuint8m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_u8m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_u16mf4x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_u16mf4x8_m(vbool64_t vm, uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_u16mf4x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_u16mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_u16mf2x8_m(vbool32_t vm, uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_u16mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_u16m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_u16m1x8_m(vbool16_t vm, uint16_t *rs1, vuint16m1_t vs2, vuint16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_u16m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_u32mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_u32mf2x8_m(vbool64_t vm, uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_u32mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_u32m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_u32m1x8_m(vbool32_t vm, uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_u32m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16_v_u64m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_u64m1x8_m(vbool64_t vm, uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_u64m1x8_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg8ei16\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vsoxseg8ei32.c b/auto-generated/gnu-api-tests/vsoxseg8ei32.c index 36af99c8a..847236372 100644 --- a/auto-generated/gnu-api-tests/vsoxseg8ei32.c +++ b/auto-generated/gnu-api-tests/vsoxseg8ei32.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg8ei32_v_f16mf4x8(float16_t *base, vuint32mf2_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_f16mf4x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_f16mf4x8(float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_f16mf4x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_f16mf2x8(float16_t *base, vuint32m1_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_f16mf2x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_f16mf2x8(float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_f16mf2x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_f16m1x8(float16_t *base, vuint32m2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_f16m1x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_f16m1x8(float16_t *rs1, vuint32m2_t vs2, vfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_f16m1x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_f32mf2x8(float32_t *base, vuint32mf2_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_f32mf2x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_f32mf2x8(float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_f32mf2x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_f32m1x8(float32_t *base, vuint32m1_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_f32m1x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_f32m1x8(float32_t *rs1, vuint32m1_t vs2, vfloat32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_f32m1x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_f64m1x8(float64_t *base, vuint32mf2_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_f64m1x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_f64m1x8(float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_f64m1x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_i8mf8x8(int8_t *base, vuint32mf2_t bindex, vint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_i8mf8x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_i8mf8x8(int8_t *rs1, vuint32mf2_t vs2, vint8mf8x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_i8mf8x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_i8mf4x8(int8_t *base, vuint32m1_t bindex, vint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_i8mf4x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_i8mf4x8(int8_t *rs1, vuint32m1_t vs2, vint8mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_i8mf4x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_i8mf2x8(int8_t *base, vuint32m2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_i8mf2x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_i8mf2x8(int8_t *rs1, vuint32m2_t vs2, vint8mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_i8mf2x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_i8m1x8(int8_t *base, vuint32m4_t bindex, vint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_i8m1x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_i8m1x8(int8_t *rs1, vuint32m4_t vs2, vint8m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_i8m1x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_i16mf4x8(int16_t *base, vuint32mf2_t bindex, vint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_i16mf4x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_i16mf4x8(int16_t *rs1, vuint32mf2_t vs2, vint16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_i16mf4x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_i16mf2x8(int16_t *base, vuint32m1_t bindex, vint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_i16mf2x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_i16mf2x8(int16_t *rs1, vuint32m1_t vs2, vint16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_i16mf2x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_i16m1x8(int16_t *base, vuint32m2_t bindex, vint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_i16m1x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_i16m1x8(int16_t *rs1, vuint32m2_t vs2, vint16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_i16m1x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_i32mf2x8(int32_t *base, vuint32mf2_t bindex, vint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_i32mf2x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_i32mf2x8(int32_t *rs1, vuint32mf2_t vs2, vint32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_i32mf2x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_i32m1x8(int32_t *base, vuint32m1_t bindex, vint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_i32m1x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_i32m1x8(int32_t *rs1, vuint32m1_t vs2, vint32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_i32m1x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_i64m1x8(int64_t *base, vuint32mf2_t bindex, vint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_i64m1x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_i64m1x8(int64_t *rs1, vuint32mf2_t vs2, vint64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_i64m1x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_u8mf8x8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_u8mf8x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_u8mf8x8(uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_u8mf8x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_u8mf4x8(uint8_t *base, vuint32m1_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_u8mf4x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_u8mf4x8(uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_u8mf4x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_u8mf2x8(uint8_t *base, vuint32m2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_u8mf2x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_u8mf2x8(uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_u8mf2x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_u8m1x8(uint8_t *base, vuint32m4_t bindex, vuint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_u8m1x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_u8m1x8(uint8_t *rs1, vuint32m4_t vs2, vuint8m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_u8m1x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_u16mf4x8(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_u16mf4x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_u16mf4x8(uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_u16mf4x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_u16mf2x8(uint16_t *base, vuint32m1_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_u16mf2x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_u16mf2x8(uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_u16mf2x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_u16m1x8(uint16_t *base, vuint32m2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_u16m1x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_u16m1x8(uint16_t *rs1, vuint32m2_t vs2, vuint16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_u16m1x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_u32mf2x8(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_u32mf2x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_u32mf2x8(uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_u32mf2x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_u32m1x8(uint32_t *base, vuint32m1_t bindex, vuint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_u32m1x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_u32m1x8(uint32_t *rs1, vuint32m1_t vs2, vuint32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_u32m1x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_u64m1x8(uint64_t *base, vuint32mf2_t bindex, vuint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_u64m1x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_u64m1x8(uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_u64m1x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_f16mf4x8_m(vbool64_t mask, float16_t *base, vuint32mf2_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_f16mf4x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_f16mf4x8_m(vbool64_t vm, float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_f16mf4x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_f16mf2x8_m(vbool32_t mask, float16_t *base, vuint32m1_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_f16mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_f16mf2x8_m(vbool32_t vm, float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_f16mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_f16m1x8_m(vbool16_t mask, float16_t *base, vuint32m2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_f16m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_f16m1x8_m(vbool16_t vm, float16_t *rs1, vuint32m2_t vs2, vfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_f16m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_f32mf2x8_m(vbool64_t mask, float32_t *base, vuint32mf2_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_f32mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_f32mf2x8_m(vbool64_t vm, float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_f32mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_f32m1x8_m(vbool32_t mask, float32_t *base, vuint32m1_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_f32m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_f32m1x8_m(vbool32_t vm, float32_t *rs1, vuint32m1_t vs2, vfloat32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_f32m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_f64m1x8_m(vbool64_t mask, float64_t *base, vuint32mf2_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_f64m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_f64m1x8_m(vbool64_t vm, float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_f64m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_i8mf8x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_i8mf8x8_m(vbool64_t vm, int8_t *rs1, vuint32mf2_t vs2, vint8mf8x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_i8mf8x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_i8mf4x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_i8mf4x8_m(vbool32_t vm, int8_t *rs1, vuint32m1_t vs2, vint8mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_i8mf4x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_i8mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_i8mf2x8_m(vbool16_t vm, int8_t *rs1, vuint32m2_t vs2, vint8mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_i8mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_i8m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_i8m1x8_m(vbool8_t vm, int8_t *rs1, vuint32m4_t vs2, vint8m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_i8m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_i16mf4x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_i16mf4x8_m(vbool64_t vm, int16_t *rs1, vuint32mf2_t vs2, vint16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_i16mf4x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_i16mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_i16mf2x8_m(vbool32_t vm, int16_t *rs1, vuint32m1_t vs2, vint16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_i16mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_i16m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_i16m1x8_m(vbool16_t vm, int16_t *rs1, vuint32m2_t vs2, vint16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_i16m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_i32mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_i32mf2x8_m(vbool64_t vm, int32_t *rs1, vuint32mf2_t vs2, vint32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_i32mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_i32m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_i32m1x8_m(vbool32_t vm, int32_t *rs1, vuint32m1_t vs2, vint32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_i32m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_i64m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_i64m1x8_m(vbool64_t vm, int64_t *rs1, vuint32mf2_t vs2, vint64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_i64m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_u8mf8x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_u8mf8x8_m(vbool64_t vm, uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_u8mf8x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_u8mf4x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_u8mf4x8_m(vbool32_t vm, uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_u8mf4x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_u8mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_u8mf2x8_m(vbool16_t vm, uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_u8mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_u8m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_u8m1x8_m(vbool8_t vm, uint8_t *rs1, vuint32m4_t vs2, vuint8m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_u8m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_u16mf4x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_u16mf4x8_m(vbool64_t vm, uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_u16mf4x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_u16mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_u16mf2x8_m(vbool32_t vm, uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_u16mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_u16m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_u16m1x8_m(vbool16_t vm, uint16_t *rs1, vuint32m2_t vs2, vuint16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_u16m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_u32mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_u32mf2x8_m(vbool64_t vm, uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_u32mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_u32m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_u32m1x8_m(vbool32_t vm, uint32_t *rs1, vuint32m1_t vs2, vuint32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_u32m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32_v_u64m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_u64m1x8_m(vbool64_t vm, uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32_v_u64m1x8_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg8ei32\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vsoxseg8ei64.c b/auto-generated/gnu-api-tests/vsoxseg8ei64.c index 341b298b1..b749804ec 100644 --- a/auto-generated/gnu-api-tests/vsoxseg8ei64.c +++ b/auto-generated/gnu-api-tests/vsoxseg8ei64.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg8ei64_v_f16mf4x8(float16_t *base, vuint64m1_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_f16mf4x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_f16mf4x8(float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_f16mf4x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_f16mf2x8(float16_t *base, vuint64m2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_f16mf2x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_f16mf2x8(float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_f16mf2x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_f16m1x8(float16_t *base, vuint64m4_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_f16m1x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_f16m1x8(float16_t *rs1, vuint64m4_t vs2, vfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_f16m1x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_f32mf2x8(float32_t *base, vuint64m1_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_f32mf2x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_f32mf2x8(float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_f32mf2x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_f32m1x8(float32_t *base, vuint64m2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_f32m1x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_f32m1x8(float32_t *rs1, vuint64m2_t vs2, vfloat32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_f32m1x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_f64m1x8(float64_t *base, vuint64m1_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_f64m1x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_f64m1x8(float64_t *rs1, vuint64m1_t vs2, vfloat64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_f64m1x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_i8mf8x8(int8_t *base, vuint64m1_t bindex, vint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_i8mf8x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_i8mf8x8(int8_t *rs1, vuint64m1_t vs2, vint8mf8x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_i8mf8x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_i8mf4x8(int8_t *base, vuint64m2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_i8mf4x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_i8mf4x8(int8_t *rs1, vuint64m2_t vs2, vint8mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_i8mf4x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_i8mf2x8(int8_t *base, vuint64m4_t bindex, vint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_i8mf2x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_i8mf2x8(int8_t *rs1, vuint64m4_t vs2, vint8mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_i8mf2x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_i8m1x8(int8_t *base, vuint64m8_t bindex, vint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_i8m1x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_i8m1x8(int8_t *rs1, vuint64m8_t vs2, vint8m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_i8m1x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_i16mf4x8(int16_t *base, vuint64m1_t bindex, vint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_i16mf4x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_i16mf4x8(int16_t *rs1, vuint64m1_t vs2, vint16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_i16mf4x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_i16mf2x8(int16_t *base, vuint64m2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_i16mf2x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_i16mf2x8(int16_t *rs1, vuint64m2_t vs2, vint16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_i16mf2x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_i16m1x8(int16_t *base, vuint64m4_t bindex, vint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_i16m1x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_i16m1x8(int16_t *rs1, vuint64m4_t vs2, vint16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_i16m1x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_i32mf2x8(int32_t *base, vuint64m1_t bindex, vint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_i32mf2x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_i32mf2x8(int32_t *rs1, vuint64m1_t vs2, vint32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_i32mf2x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_i32m1x8(int32_t *base, vuint64m2_t bindex, vint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_i32m1x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_i32m1x8(int32_t *rs1, vuint64m2_t vs2, vint32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_i32m1x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_i64m1x8(int64_t *base, vuint64m1_t bindex, vint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_i64m1x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_i64m1x8(int64_t *rs1, vuint64m1_t vs2, vint64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_i64m1x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_u8mf8x8(uint8_t *base, vuint64m1_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_u8mf8x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_u8mf8x8(uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_u8mf8x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_u8mf4x8(uint8_t *base, vuint64m2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_u8mf4x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_u8mf4x8(uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_u8mf4x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_u8mf2x8(uint8_t *base, vuint64m4_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_u8mf2x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_u8mf2x8(uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_u8mf2x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_u8m1x8(uint8_t *base, vuint64m8_t bindex, vuint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_u8m1x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_u8m1x8(uint8_t *rs1, vuint64m8_t vs2, vuint8m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_u8m1x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_u16mf4x8(uint16_t *base, vuint64m1_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_u16mf4x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_u16mf4x8(uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_u16mf4x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_u16mf2x8(uint16_t *base, vuint64m2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_u16mf2x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_u16mf2x8(uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_u16mf2x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_u16m1x8(uint16_t *base, vuint64m4_t bindex, vuint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_u16m1x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_u16m1x8(uint16_t *rs1, vuint64m4_t vs2, vuint16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_u16m1x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_u32mf2x8(uint32_t *base, vuint64m1_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_u32mf2x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_u32mf2x8(uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_u32mf2x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_u32m1x8(uint32_t *base, vuint64m2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_u32m1x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_u32m1x8(uint32_t *rs1, vuint64m2_t vs2, vuint32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_u32m1x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_u64m1x8(uint64_t *base, vuint64m1_t bindex, vuint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_u64m1x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_u64m1x8(uint64_t *rs1, vuint64m1_t vs2, vuint64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_u64m1x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_f16mf4x8_m(vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_f16mf4x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_f16mf4x8_m(vbool64_t vm, float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_f16mf4x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_f16mf2x8_m(vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_f16mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_f16mf2x8_m(vbool32_t vm, float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_f16mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_f16m1x8_m(vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_f16m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_f16m1x8_m(vbool16_t vm, float16_t *rs1, vuint64m4_t vs2, vfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_f16m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_f32mf2x8_m(vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_f32mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_f32mf2x8_m(vbool64_t vm, float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_f32mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_f32m1x8_m(vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_f32m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_f32m1x8_m(vbool32_t vm, float32_t *rs1, vuint64m2_t vs2, vfloat32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_f32m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_f64m1x8_m(vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_f64m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_f64m1x8_m(vbool64_t vm, float64_t *rs1, vuint64m1_t vs2, vfloat64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_f64m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_i8mf8x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_i8mf8x8_m(vbool64_t vm, int8_t *rs1, vuint64m1_t vs2, vint8mf8x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_i8mf8x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_i8mf4x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_i8mf4x8_m(vbool32_t vm, int8_t *rs1, vuint64m2_t vs2, vint8mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_i8mf4x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_i8mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_i8mf2x8_m(vbool16_t vm, int8_t *rs1, vuint64m4_t vs2, vint8mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_i8mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_i8m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_i8m1x8_m(vbool8_t vm, int8_t *rs1, vuint64m8_t vs2, vint8m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_i8m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_i16mf4x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_i16mf4x8_m(vbool64_t vm, int16_t *rs1, vuint64m1_t vs2, vint16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_i16mf4x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_i16mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_i16mf2x8_m(vbool32_t vm, int16_t *rs1, vuint64m2_t vs2, vint16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_i16mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_i16m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_i16m1x8_m(vbool16_t vm, int16_t *rs1, vuint64m4_t vs2, vint16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_i16m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_i32mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_i32mf2x8_m(vbool64_t vm, int32_t *rs1, vuint64m1_t vs2, vint32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_i32mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_i32m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_i32m1x8_m(vbool32_t vm, int32_t *rs1, vuint64m2_t vs2, vint32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_i32m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_i64m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_i64m1x8_m(vbool64_t vm, int64_t *rs1, vuint64m1_t vs2, vint64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_i64m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_u8mf8x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_u8mf8x8_m(vbool64_t vm, uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_u8mf8x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_u8mf4x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_u8mf4x8_m(vbool32_t vm, uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_u8mf4x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_u8mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_u8mf2x8_m(vbool16_t vm, uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_u8mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_u8m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_u8m1x8_m(vbool8_t vm, uint8_t *rs1, vuint64m8_t vs2, vuint8m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_u8m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_u16mf4x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_u16mf4x8_m(vbool64_t vm, uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_u16mf4x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_u16mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_u16mf2x8_m(vbool32_t vm, uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_u16mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_u16m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_u16m1x8_m(vbool16_t vm, uint16_t *rs1, vuint64m4_t vs2, vuint16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_u16m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_u32mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_u32mf2x8_m(vbool64_t vm, uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_u32mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_u32m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_u32m1x8_m(vbool32_t vm, uint32_t *rs1, vuint64m2_t vs2, vuint32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_u32m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64_v_u64m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_u64m1x8_m(vbool64_t vm, uint64_t *rs1, vuint64m1_t vs2, vuint64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64_v_u64m1x8_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg8ei64\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vsoxseg8ei8.c b/auto-generated/gnu-api-tests/vsoxseg8ei8.c index 1d5abe4c1..d792657d2 100644 --- a/auto-generated/gnu-api-tests/vsoxseg8ei8.c +++ b/auto-generated/gnu-api-tests/vsoxseg8ei8.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg8ei8_v_f16mf4x8(float16_t *base, vuint8mf8_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_f16mf4x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_f16mf4x8(float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_f16mf4x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_f16mf2x8(float16_t *base, vuint8mf4_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_f16mf2x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_f16mf2x8(float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_f16mf2x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_f16m1x8(float16_t *base, vuint8mf2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_f16m1x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_f16m1x8(float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_f16m1x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_f32mf2x8(float32_t *base, vuint8mf8_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_f32mf2x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_f32mf2x8(float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_f32mf2x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_f32m1x8(float32_t *base, vuint8mf4_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_f32m1x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_f32m1x8(float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_f32m1x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_f64m1x8(float64_t *base, vuint8mf8_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_f64m1x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_f64m1x8(float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_f64m1x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_i8mf8x8(int8_t *base, vuint8mf8_t bindex, vint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_i8mf8x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_i8mf8x8(int8_t *rs1, vuint8mf8_t vs2, vint8mf8x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_i8mf8x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_i8mf4x8(int8_t *base, vuint8mf4_t bindex, vint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_i8mf4x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_i8mf4x8(int8_t *rs1, vuint8mf4_t vs2, vint8mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_i8mf4x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_i8mf2x8(int8_t *base, vuint8mf2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_i8mf2x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_i8mf2x8(int8_t *rs1, vuint8mf2_t vs2, vint8mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_i8mf2x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_i8m1x8(int8_t *base, vuint8m1_t bindex, vint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_i8m1x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_i8m1x8(int8_t *rs1, vuint8m1_t vs2, vint8m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_i8m1x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_i16mf4x8(int16_t *base, vuint8mf8_t bindex, vint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_i16mf4x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_i16mf4x8(int16_t *rs1, vuint8mf8_t vs2, vint16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_i16mf4x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_i16mf2x8(int16_t *base, vuint8mf4_t bindex, vint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_i16mf2x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_i16mf2x8(int16_t *rs1, vuint8mf4_t vs2, vint16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_i16mf2x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_i16m1x8(int16_t *base, vuint8mf2_t bindex, vint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_i16m1x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_i16m1x8(int16_t *rs1, vuint8mf2_t vs2, vint16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_i16m1x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_i32mf2x8(int32_t *base, vuint8mf8_t bindex, vint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_i32mf2x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_i32mf2x8(int32_t *rs1, vuint8mf8_t vs2, vint32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_i32mf2x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_i32m1x8(int32_t *base, vuint8mf4_t bindex, vint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_i32m1x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_i32m1x8(int32_t *rs1, vuint8mf4_t vs2, vint32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_i32m1x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_i64m1x8(int64_t *base, vuint8mf8_t bindex, vint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_i64m1x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_i64m1x8(int64_t *rs1, vuint8mf8_t vs2, vint64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_i64m1x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_u8mf8x8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_u8mf8x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_u8mf8x8(uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_u8mf8x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_u8mf4x8(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_u8mf4x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_u8mf4x8(uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_u8mf4x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_u8mf2x8(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_u8mf2x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_u8mf2x8(uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_u8mf2x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_u8m1x8(uint8_t *base, vuint8m1_t bindex, vuint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_u8m1x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_u8m1x8(uint8_t *rs1, vuint8m1_t vs2, vuint8m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_u8m1x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_u16mf4x8(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_u16mf4x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_u16mf4x8(uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_u16mf4x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_u16mf2x8(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_u16mf2x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_u16mf2x8(uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_u16mf2x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_u16m1x8(uint16_t *base, vuint8mf2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_u16m1x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_u16m1x8(uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_u16m1x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_u32mf2x8(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_u32mf2x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_u32mf2x8(uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_u32mf2x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_u32m1x8(uint32_t *base, vuint8mf4_t bindex, vuint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_u32m1x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_u32m1x8(uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_u32m1x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_u64m1x8(uint64_t *base, vuint8mf8_t bindex, vuint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_u64m1x8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_u64m1x8(uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_u64m1x8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_f16mf4x8_m(vbool64_t mask, float16_t *base, vuint8mf8_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_f16mf4x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_f16mf4x8_m(vbool64_t vm, float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_f16mf4x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_f16mf2x8_m(vbool32_t mask, float16_t *base, vuint8mf4_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_f16mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_f16mf2x8_m(vbool32_t vm, float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_f16mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_f16m1x8_m(vbool16_t mask, float16_t *base, vuint8mf2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_f16m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_f16m1x8_m(vbool16_t vm, float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_f16m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_f32mf2x8_m(vbool64_t mask, float32_t *base, vuint8mf8_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_f32mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_f32mf2x8_m(vbool64_t vm, float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_f32mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_f32m1x8_m(vbool32_t mask, float32_t *base, vuint8mf4_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_f32m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_f32m1x8_m(vbool32_t vm, float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_f32m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_f64m1x8_m(vbool64_t mask, float64_t *base, vuint8mf8_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_f64m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_f64m1x8_m(vbool64_t vm, float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_f64m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_i8mf8x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_i8mf8x8_m(vbool64_t vm, int8_t *rs1, vuint8mf8_t vs2, vint8mf8x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_i8mf8x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_i8mf4x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_i8mf4x8_m(vbool32_t vm, int8_t *rs1, vuint8mf4_t vs2, vint8mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_i8mf4x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_i8mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_i8mf2x8_m(vbool16_t vm, int8_t *rs1, vuint8mf2_t vs2, vint8mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_i8mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_i8m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_i8m1x8_m(vbool8_t vm, int8_t *rs1, vuint8m1_t vs2, vint8m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_i8m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_i16mf4x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_i16mf4x8_m(vbool64_t vm, int16_t *rs1, vuint8mf8_t vs2, vint16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_i16mf4x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_i16mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_i16mf2x8_m(vbool32_t vm, int16_t *rs1, vuint8mf4_t vs2, vint16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_i16mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_i16m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_i16m1x8_m(vbool16_t vm, int16_t *rs1, vuint8mf2_t vs2, vint16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_i16m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_i32mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_i32mf2x8_m(vbool64_t vm, int32_t *rs1, vuint8mf8_t vs2, vint32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_i32mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_i32m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_i32m1x8_m(vbool32_t vm, int32_t *rs1, vuint8mf4_t vs2, vint32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_i32m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_i64m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_i64m1x8_m(vbool64_t vm, int64_t *rs1, vuint8mf8_t vs2, vint64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_i64m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_u8mf8x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_u8mf8x8_m(vbool64_t vm, uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_u8mf8x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_u8mf4x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_u8mf4x8_m(vbool32_t vm, uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_u8mf4x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_u8mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_u8mf2x8_m(vbool16_t vm, uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_u8mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_u8m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_u8m1x8_m(vbool8_t vm, uint8_t *rs1, vuint8m1_t vs2, vuint8m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_u8m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_u16mf4x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_u16mf4x8_m(vbool64_t vm, uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_u16mf4x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_u16mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_u16mf2x8_m(vbool32_t vm, uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_u16mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_u16m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_u16m1x8_m(vbool16_t vm, uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_u16m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_u32mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_u32mf2x8_m(vbool64_t vm, uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_u32mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_u32m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_u32m1x8_m(vbool32_t vm, uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_u32m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8_v_u64m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_u64m1x8_m(vbool64_t vm, uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8_v_u64m1x8_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg8ei8\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vsra.c b/auto-generated/gnu-api-tests/vsra.c index e7ef499ac..4b98a34a1 100644 --- a/auto-generated/gnu-api-tests/vsra.c +++ b/auto-generated/gnu-api-tests/vsra.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vsra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsra_vv_i8mf8(op1, shift, vl); +vint8mf8_t test_vsra_vv_i8mf8(vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsra_vv_i8mf8(vs2, vs1, vl); } -vint8mf8_t test_vsra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i8mf8(op1, shift, vl); +vint8mf8_t test_vsra_vx_i8mf8(vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i8mf8(vs2, rs1, vl); } -vint8mf4_t test_vsra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsra_vv_i8mf4(op1, shift, vl); +vint8mf4_t test_vsra_vv_i8mf4(vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsra_vv_i8mf4(vs2, vs1, vl); } -vint8mf4_t test_vsra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i8mf4(op1, shift, vl); +vint8mf4_t test_vsra_vx_i8mf4(vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i8mf4(vs2, rs1, vl); } -vint8mf2_t test_vsra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsra_vv_i8mf2(op1, shift, vl); +vint8mf2_t test_vsra_vv_i8mf2(vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsra_vv_i8mf2(vs2, vs1, vl); } -vint8mf2_t test_vsra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i8mf2(op1, shift, vl); +vint8mf2_t test_vsra_vx_i8mf2(vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i8mf2(vs2, rs1, vl); } -vint8m1_t test_vsra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsra_vv_i8m1(op1, shift, vl); +vint8m1_t test_vsra_vv_i8m1(vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsra_vv_i8m1(vs2, vs1, vl); } -vint8m1_t test_vsra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i8m1(op1, shift, vl); +vint8m1_t test_vsra_vx_i8m1(vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i8m1(vs2, rs1, vl); } -vint8m2_t test_vsra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsra_vv_i8m2(op1, shift, vl); +vint8m2_t test_vsra_vv_i8m2(vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsra_vv_i8m2(vs2, vs1, vl); } -vint8m2_t test_vsra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i8m2(op1, shift, vl); +vint8m2_t test_vsra_vx_i8m2(vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i8m2(vs2, rs1, vl); } -vint8m4_t test_vsra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsra_vv_i8m4(op1, shift, vl); +vint8m4_t test_vsra_vv_i8m4(vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsra_vv_i8m4(vs2, vs1, vl); } -vint8m4_t test_vsra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i8m4(op1, shift, vl); +vint8m4_t test_vsra_vx_i8m4(vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i8m4(vs2, rs1, vl); } -vint8m8_t test_vsra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsra_vv_i8m8(op1, shift, vl); +vint8m8_t test_vsra_vv_i8m8(vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsra_vv_i8m8(vs2, vs1, vl); } -vint8m8_t test_vsra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i8m8(op1, shift, vl); +vint8m8_t test_vsra_vx_i8m8(vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i8m8(vs2, rs1, vl); } -vint16mf4_t test_vsra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsra_vv_i16mf4(op1, shift, vl); +vint16mf4_t test_vsra_vv_i16mf4(vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsra_vv_i16mf4(vs2, vs1, vl); } -vint16mf4_t test_vsra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i16mf4(op1, shift, vl); +vint16mf4_t test_vsra_vx_i16mf4(vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i16mf4(vs2, rs1, vl); } -vint16mf2_t test_vsra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsra_vv_i16mf2(op1, shift, vl); +vint16mf2_t test_vsra_vv_i16mf2(vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsra_vv_i16mf2(vs2, vs1, vl); } -vint16mf2_t test_vsra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i16mf2(op1, shift, vl); +vint16mf2_t test_vsra_vx_i16mf2(vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i16mf2(vs2, rs1, vl); } -vint16m1_t test_vsra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsra_vv_i16m1(op1, shift, vl); +vint16m1_t test_vsra_vv_i16m1(vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsra_vv_i16m1(vs2, vs1, vl); } -vint16m1_t test_vsra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i16m1(op1, shift, vl); +vint16m1_t test_vsra_vx_i16m1(vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i16m1(vs2, rs1, vl); } -vint16m2_t test_vsra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsra_vv_i16m2(op1, shift, vl); +vint16m2_t test_vsra_vv_i16m2(vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsra_vv_i16m2(vs2, vs1, vl); } -vint16m2_t test_vsra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i16m2(op1, shift, vl); +vint16m2_t test_vsra_vx_i16m2(vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i16m2(vs2, rs1, vl); } -vint16m4_t test_vsra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsra_vv_i16m4(op1, shift, vl); +vint16m4_t test_vsra_vv_i16m4(vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsra_vv_i16m4(vs2, vs1, vl); } -vint16m4_t test_vsra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i16m4(op1, shift, vl); +vint16m4_t test_vsra_vx_i16m4(vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i16m4(vs2, rs1, vl); } -vint16m8_t test_vsra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsra_vv_i16m8(op1, shift, vl); +vint16m8_t test_vsra_vv_i16m8(vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsra_vv_i16m8(vs2, vs1, vl); } -vint16m8_t test_vsra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i16m8(op1, shift, vl); +vint16m8_t test_vsra_vx_i16m8(vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i16m8(vs2, rs1, vl); } -vint32mf2_t test_vsra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsra_vv_i32mf2(op1, shift, vl); +vint32mf2_t test_vsra_vv_i32mf2(vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsra_vv_i32mf2(vs2, vs1, vl); } -vint32mf2_t test_vsra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i32mf2(op1, shift, vl); +vint32mf2_t test_vsra_vx_i32mf2(vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i32mf2(vs2, rs1, vl); } -vint32m1_t test_vsra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsra_vv_i32m1(op1, shift, vl); +vint32m1_t test_vsra_vv_i32m1(vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsra_vv_i32m1(vs2, vs1, vl); } -vint32m1_t test_vsra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i32m1(op1, shift, vl); +vint32m1_t test_vsra_vx_i32m1(vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i32m1(vs2, rs1, vl); } -vint32m2_t test_vsra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsra_vv_i32m2(op1, shift, vl); +vint32m2_t test_vsra_vv_i32m2(vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsra_vv_i32m2(vs2, vs1, vl); } -vint32m2_t test_vsra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i32m2(op1, shift, vl); +vint32m2_t test_vsra_vx_i32m2(vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i32m2(vs2, rs1, vl); } -vint32m4_t test_vsra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsra_vv_i32m4(op1, shift, vl); +vint32m4_t test_vsra_vv_i32m4(vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsra_vv_i32m4(vs2, vs1, vl); } -vint32m4_t test_vsra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i32m4(op1, shift, vl); +vint32m4_t test_vsra_vx_i32m4(vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i32m4(vs2, rs1, vl); } -vint32m8_t test_vsra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsra_vv_i32m8(op1, shift, vl); +vint32m8_t test_vsra_vv_i32m8(vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsra_vv_i32m8(vs2, vs1, vl); } -vint32m8_t test_vsra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i32m8(op1, shift, vl); +vint32m8_t test_vsra_vx_i32m8(vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i32m8(vs2, rs1, vl); } -vint64m1_t test_vsra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsra_vv_i64m1(op1, shift, vl); +vint64m1_t test_vsra_vv_i64m1(vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsra_vv_i64m1(vs2, vs1, vl); } -vint64m1_t test_vsra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i64m1(op1, shift, vl); +vint64m1_t test_vsra_vx_i64m1(vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i64m1(vs2, rs1, vl); } -vint64m2_t test_vsra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsra_vv_i64m2(op1, shift, vl); +vint64m2_t test_vsra_vv_i64m2(vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsra_vv_i64m2(vs2, vs1, vl); } -vint64m2_t test_vsra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i64m2(op1, shift, vl); +vint64m2_t test_vsra_vx_i64m2(vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i64m2(vs2, rs1, vl); } -vint64m4_t test_vsra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsra_vv_i64m4(op1, shift, vl); +vint64m4_t test_vsra_vv_i64m4(vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsra_vv_i64m4(vs2, vs1, vl); } -vint64m4_t test_vsra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i64m4(op1, shift, vl); +vint64m4_t test_vsra_vx_i64m4(vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i64m4(vs2, rs1, vl); } -vint64m8_t test_vsra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsra_vv_i64m8(op1, shift, vl); +vint64m8_t test_vsra_vv_i64m8(vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsra_vv_i64m8(vs2, vs1, vl); } -vint64m8_t test_vsra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i64m8(op1, shift, vl); +vint64m8_t test_vsra_vx_i64m8(vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i64m8(vs2, rs1, vl); } -vint8mf8_t test_vsra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsra_vv_i8mf8_m(mask, op1, shift, vl); +vint8mf8_t test_vsra_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsra_vv_i8mf8_m(vm, vs2, vs1, vl); } -vint8mf8_t test_vsra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i8mf8_m(mask, op1, shift, vl); +vint8mf8_t test_vsra_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i8mf8_m(vm, vs2, rs1, vl); } -vint8mf4_t test_vsra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsra_vv_i8mf4_m(mask, op1, shift, vl); +vint8mf4_t test_vsra_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsra_vv_i8mf4_m(vm, vs2, vs1, vl); } -vint8mf4_t test_vsra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i8mf4_m(mask, op1, shift, vl); +vint8mf4_t test_vsra_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i8mf4_m(vm, vs2, rs1, vl); } -vint8mf2_t test_vsra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsra_vv_i8mf2_m(mask, op1, shift, vl); +vint8mf2_t test_vsra_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsra_vv_i8mf2_m(vm, vs2, vs1, vl); } -vint8mf2_t test_vsra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i8mf2_m(mask, op1, shift, vl); +vint8mf2_t test_vsra_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i8mf2_m(vm, vs2, rs1, vl); } -vint8m1_t test_vsra_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsra_vv_i8m1_m(mask, op1, shift, vl); +vint8m1_t test_vsra_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsra_vv_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vsra_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i8m1_m(mask, op1, shift, vl); +vint8m1_t test_vsra_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i8m1_m(vm, vs2, rs1, vl); } -vint8m2_t test_vsra_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsra_vv_i8m2_m(mask, op1, shift, vl); +vint8m2_t test_vsra_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsra_vv_i8m2_m(vm, vs2, vs1, vl); } -vint8m2_t test_vsra_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i8m2_m(mask, op1, shift, vl); +vint8m2_t test_vsra_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i8m2_m(vm, vs2, rs1, vl); } -vint8m4_t test_vsra_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsra_vv_i8m4_m(mask, op1, shift, vl); +vint8m4_t test_vsra_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsra_vv_i8m4_m(vm, vs2, vs1, vl); } -vint8m4_t test_vsra_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i8m4_m(mask, op1, shift, vl); +vint8m4_t test_vsra_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i8m4_m(vm, vs2, rs1, vl); } -vint8m8_t test_vsra_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsra_vv_i8m8_m(mask, op1, shift, vl); +vint8m8_t test_vsra_vv_i8m8_m(vbool1_t vm, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsra_vv_i8m8_m(vm, vs2, vs1, vl); } -vint8m8_t test_vsra_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i8m8_m(mask, op1, shift, vl); +vint8m8_t test_vsra_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i8m8_m(vm, vs2, rs1, vl); } -vint16mf4_t test_vsra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsra_vv_i16mf4_m(mask, op1, shift, vl); +vint16mf4_t test_vsra_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsra_vv_i16mf4_m(vm, vs2, vs1, vl); } -vint16mf4_t test_vsra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i16mf4_m(mask, op1, shift, vl); +vint16mf4_t test_vsra_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i16mf4_m(vm, vs2, rs1, vl); } -vint16mf2_t test_vsra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsra_vv_i16mf2_m(mask, op1, shift, vl); +vint16mf2_t test_vsra_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsra_vv_i16mf2_m(vm, vs2, vs1, vl); } -vint16mf2_t test_vsra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i16mf2_m(mask, op1, shift, vl); +vint16mf2_t test_vsra_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i16mf2_m(vm, vs2, rs1, vl); } -vint16m1_t test_vsra_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsra_vv_i16m1_m(mask, op1, shift, vl); +vint16m1_t test_vsra_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsra_vv_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vsra_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i16m1_m(mask, op1, shift, vl); +vint16m1_t test_vsra_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i16m1_m(vm, vs2, rs1, vl); } -vint16m2_t test_vsra_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsra_vv_i16m2_m(mask, op1, shift, vl); +vint16m2_t test_vsra_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsra_vv_i16m2_m(vm, vs2, vs1, vl); } -vint16m2_t test_vsra_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i16m2_m(mask, op1, shift, vl); +vint16m2_t test_vsra_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i16m2_m(vm, vs2, rs1, vl); } -vint16m4_t test_vsra_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsra_vv_i16m4_m(mask, op1, shift, vl); +vint16m4_t test_vsra_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsra_vv_i16m4_m(vm, vs2, vs1, vl); } -vint16m4_t test_vsra_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i16m4_m(mask, op1, shift, vl); +vint16m4_t test_vsra_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i16m4_m(vm, vs2, rs1, vl); } -vint16m8_t test_vsra_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsra_vv_i16m8_m(mask, op1, shift, vl); +vint16m8_t test_vsra_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsra_vv_i16m8_m(vm, vs2, vs1, vl); } -vint16m8_t test_vsra_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i16m8_m(mask, op1, shift, vl); +vint16m8_t test_vsra_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i16m8_m(vm, vs2, rs1, vl); } -vint32mf2_t test_vsra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsra_vv_i32mf2_m(mask, op1, shift, vl); +vint32mf2_t test_vsra_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsra_vv_i32mf2_m(vm, vs2, vs1, vl); } -vint32mf2_t test_vsra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i32mf2_m(mask, op1, shift, vl); +vint32mf2_t test_vsra_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i32mf2_m(vm, vs2, rs1, vl); } -vint32m1_t test_vsra_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsra_vv_i32m1_m(mask, op1, shift, vl); +vint32m1_t test_vsra_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsra_vv_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vsra_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i32m1_m(mask, op1, shift, vl); +vint32m1_t test_vsra_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i32m1_m(vm, vs2, rs1, vl); } -vint32m2_t test_vsra_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsra_vv_i32m2_m(mask, op1, shift, vl); +vint32m2_t test_vsra_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsra_vv_i32m2_m(vm, vs2, vs1, vl); } -vint32m2_t test_vsra_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i32m2_m(mask, op1, shift, vl); +vint32m2_t test_vsra_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i32m2_m(vm, vs2, rs1, vl); } -vint32m4_t test_vsra_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsra_vv_i32m4_m(mask, op1, shift, vl); +vint32m4_t test_vsra_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsra_vv_i32m4_m(vm, vs2, vs1, vl); } -vint32m4_t test_vsra_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i32m4_m(mask, op1, shift, vl); +vint32m4_t test_vsra_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i32m4_m(vm, vs2, rs1, vl); } -vint32m8_t test_vsra_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsra_vv_i32m8_m(mask, op1, shift, vl); +vint32m8_t test_vsra_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsra_vv_i32m8_m(vm, vs2, vs1, vl); } -vint32m8_t test_vsra_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i32m8_m(mask, op1, shift, vl); +vint32m8_t test_vsra_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i32m8_m(vm, vs2, rs1, vl); } -vint64m1_t test_vsra_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsra_vv_i64m1_m(mask, op1, shift, vl); +vint64m1_t test_vsra_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsra_vv_i64m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vsra_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i64m1_m(mask, op1, shift, vl); +vint64m1_t test_vsra_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i64m1_m(vm, vs2, rs1, vl); } -vint64m2_t test_vsra_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsra_vv_i64m2_m(mask, op1, shift, vl); +vint64m2_t test_vsra_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsra_vv_i64m2_m(vm, vs2, vs1, vl); } -vint64m2_t test_vsra_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i64m2_m(mask, op1, shift, vl); +vint64m2_t test_vsra_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i64m2_m(vm, vs2, rs1, vl); } -vint64m4_t test_vsra_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsra_vv_i64m4_m(mask, op1, shift, vl); +vint64m4_t test_vsra_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsra_vv_i64m4_m(vm, vs2, vs1, vl); } -vint64m4_t test_vsra_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i64m4_m(mask, op1, shift, vl); +vint64m4_t test_vsra_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i64m4_m(vm, vs2, rs1, vl); } -vint64m8_t test_vsra_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsra_vv_i64m8_m(mask, op1, shift, vl); +vint64m8_t test_vsra_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsra_vv_i64m8_m(vm, vs2, vs1, vl); } -vint64m8_t test_vsra_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i64m8_m(mask, op1, shift, vl); +vint64m8_t test_vsra_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i64m8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsra\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-api-tests/vsrl.c b/auto-generated/gnu-api-tests/vsrl.c index aa8e120f1..259bdfd03 100644 --- a/auto-generated/gnu-api-tests/vsrl.c +++ b/auto-generated/gnu-api-tests/vsrl.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vsrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsrl_vv_u8mf8(op1, shift, vl); +vuint8mf8_t test_vsrl_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsrl_vv_u8mf8(vs2, vs1, vl); } -vuint8mf8_t test_vsrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u8mf8(op1, shift, vl); +vuint8mf8_t test_vsrl_vx_u8mf8(vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u8mf8(vs2, rs1, vl); } -vuint8mf4_t test_vsrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsrl_vv_u8mf4(op1, shift, vl); +vuint8mf4_t test_vsrl_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsrl_vv_u8mf4(vs2, vs1, vl); } -vuint8mf4_t test_vsrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u8mf4(op1, shift, vl); +vuint8mf4_t test_vsrl_vx_u8mf4(vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u8mf4(vs2, rs1, vl); } -vuint8mf2_t test_vsrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsrl_vv_u8mf2(op1, shift, vl); +vuint8mf2_t test_vsrl_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsrl_vv_u8mf2(vs2, vs1, vl); } -vuint8mf2_t test_vsrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u8mf2(op1, shift, vl); +vuint8mf2_t test_vsrl_vx_u8mf2(vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u8mf2(vs2, rs1, vl); } -vuint8m1_t test_vsrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsrl_vv_u8m1(op1, shift, vl); +vuint8m1_t test_vsrl_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsrl_vv_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vsrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u8m1(op1, shift, vl); +vuint8m1_t test_vsrl_vx_u8m1(vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u8m1(vs2, rs1, vl); } -vuint8m2_t test_vsrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsrl_vv_u8m2(op1, shift, vl); +vuint8m2_t test_vsrl_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsrl_vv_u8m2(vs2, vs1, vl); } -vuint8m2_t test_vsrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u8m2(op1, shift, vl); +vuint8m2_t test_vsrl_vx_u8m2(vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u8m2(vs2, rs1, vl); } -vuint8m4_t test_vsrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsrl_vv_u8m4(op1, shift, vl); +vuint8m4_t test_vsrl_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsrl_vv_u8m4(vs2, vs1, vl); } -vuint8m4_t test_vsrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u8m4(op1, shift, vl); +vuint8m4_t test_vsrl_vx_u8m4(vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u8m4(vs2, rs1, vl); } -vuint8m8_t test_vsrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsrl_vv_u8m8(op1, shift, vl); +vuint8m8_t test_vsrl_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsrl_vv_u8m8(vs2, vs1, vl); } -vuint8m8_t test_vsrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u8m8(op1, shift, vl); +vuint8m8_t test_vsrl_vx_u8m8(vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u8m8(vs2, rs1, vl); } -vuint16mf4_t test_vsrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsrl_vv_u16mf4(op1, shift, vl); +vuint16mf4_t test_vsrl_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsrl_vv_u16mf4(vs2, vs1, vl); } -vuint16mf4_t test_vsrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u16mf4(op1, shift, vl); +vuint16mf4_t test_vsrl_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u16mf4(vs2, rs1, vl); } -vuint16mf2_t test_vsrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsrl_vv_u16mf2(op1, shift, vl); +vuint16mf2_t test_vsrl_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsrl_vv_u16mf2(vs2, vs1, vl); } -vuint16mf2_t test_vsrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u16mf2(op1, shift, vl); +vuint16mf2_t test_vsrl_vx_u16mf2(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u16mf2(vs2, rs1, vl); } -vuint16m1_t test_vsrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsrl_vv_u16m1(op1, shift, vl); +vuint16m1_t test_vsrl_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsrl_vv_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vsrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u16m1(op1, shift, vl); +vuint16m1_t test_vsrl_vx_u16m1(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u16m1(vs2, rs1, vl); } -vuint16m2_t test_vsrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsrl_vv_u16m2(op1, shift, vl); +vuint16m2_t test_vsrl_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsrl_vv_u16m2(vs2, vs1, vl); } -vuint16m2_t test_vsrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u16m2(op1, shift, vl); +vuint16m2_t test_vsrl_vx_u16m2(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u16m2(vs2, rs1, vl); } -vuint16m4_t test_vsrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsrl_vv_u16m4(op1, shift, vl); +vuint16m4_t test_vsrl_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsrl_vv_u16m4(vs2, vs1, vl); } -vuint16m4_t test_vsrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u16m4(op1, shift, vl); +vuint16m4_t test_vsrl_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u16m4(vs2, rs1, vl); } -vuint16m8_t test_vsrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsrl_vv_u16m8(op1, shift, vl); +vuint16m8_t test_vsrl_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsrl_vv_u16m8(vs2, vs1, vl); } -vuint16m8_t test_vsrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u16m8(op1, shift, vl); +vuint16m8_t test_vsrl_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u16m8(vs2, rs1, vl); } -vuint32mf2_t test_vsrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsrl_vv_u32mf2(op1, shift, vl); +vuint32mf2_t test_vsrl_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsrl_vv_u32mf2(vs2, vs1, vl); } -vuint32mf2_t test_vsrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u32mf2(op1, shift, vl); +vuint32mf2_t test_vsrl_vx_u32mf2(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u32mf2(vs2, rs1, vl); } -vuint32m1_t test_vsrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsrl_vv_u32m1(op1, shift, vl); +vuint32m1_t test_vsrl_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsrl_vv_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vsrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u32m1(op1, shift, vl); +vuint32m1_t test_vsrl_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u32m1(vs2, rs1, vl); } -vuint32m2_t test_vsrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsrl_vv_u32m2(op1, shift, vl); +vuint32m2_t test_vsrl_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsrl_vv_u32m2(vs2, vs1, vl); } -vuint32m2_t test_vsrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u32m2(op1, shift, vl); +vuint32m2_t test_vsrl_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u32m2(vs2, rs1, vl); } -vuint32m4_t test_vsrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsrl_vv_u32m4(op1, shift, vl); +vuint32m4_t test_vsrl_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsrl_vv_u32m4(vs2, vs1, vl); } -vuint32m4_t test_vsrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u32m4(op1, shift, vl); +vuint32m4_t test_vsrl_vx_u32m4(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u32m4(vs2, rs1, vl); } -vuint32m8_t test_vsrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsrl_vv_u32m8(op1, shift, vl); +vuint32m8_t test_vsrl_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsrl_vv_u32m8(vs2, vs1, vl); } -vuint32m8_t test_vsrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u32m8(op1, shift, vl); +vuint32m8_t test_vsrl_vx_u32m8(vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u32m8(vs2, rs1, vl); } -vuint64m1_t test_vsrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsrl_vv_u64m1(op1, shift, vl); +vuint64m1_t test_vsrl_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsrl_vv_u64m1(vs2, vs1, vl); } -vuint64m1_t test_vsrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u64m1(op1, shift, vl); +vuint64m1_t test_vsrl_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u64m1(vs2, rs1, vl); } -vuint64m2_t test_vsrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsrl_vv_u64m2(op1, shift, vl); +vuint64m2_t test_vsrl_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsrl_vv_u64m2(vs2, vs1, vl); } -vuint64m2_t test_vsrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u64m2(op1, shift, vl); +vuint64m2_t test_vsrl_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u64m2(vs2, rs1, vl); } -vuint64m4_t test_vsrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsrl_vv_u64m4(op1, shift, vl); +vuint64m4_t test_vsrl_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsrl_vv_u64m4(vs2, vs1, vl); } -vuint64m4_t test_vsrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u64m4(op1, shift, vl); +vuint64m4_t test_vsrl_vx_u64m4(vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u64m4(vs2, rs1, vl); } -vuint64m8_t test_vsrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsrl_vv_u64m8(op1, shift, vl); +vuint64m8_t test_vsrl_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsrl_vv_u64m8(vs2, vs1, vl); } -vuint64m8_t test_vsrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u64m8(op1, shift, vl); +vuint64m8_t test_vsrl_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u64m8(vs2, rs1, vl); } -vuint8mf8_t test_vsrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsrl_vv_u8mf8_m(mask, op1, shift, vl); +vuint8mf8_t test_vsrl_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsrl_vv_u8mf8_m(vm, vs2, vs1, vl); } -vuint8mf8_t test_vsrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u8mf8_m(mask, op1, shift, vl); +vuint8mf8_t test_vsrl_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u8mf8_m(vm, vs2, rs1, vl); } -vuint8mf4_t test_vsrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsrl_vv_u8mf4_m(mask, op1, shift, vl); +vuint8mf4_t test_vsrl_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsrl_vv_u8mf4_m(vm, vs2, vs1, vl); } -vuint8mf4_t test_vsrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u8mf4_m(mask, op1, shift, vl); +vuint8mf4_t test_vsrl_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u8mf4_m(vm, vs2, rs1, vl); } -vuint8mf2_t test_vsrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsrl_vv_u8mf2_m(mask, op1, shift, vl); +vuint8mf2_t test_vsrl_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsrl_vv_u8mf2_m(vm, vs2, vs1, vl); } -vuint8mf2_t test_vsrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u8mf2_m(mask, op1, shift, vl); +vuint8mf2_t test_vsrl_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u8mf2_m(vm, vs2, rs1, vl); } -vuint8m1_t test_vsrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsrl_vv_u8m1_m(mask, op1, shift, vl); +vuint8m1_t test_vsrl_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsrl_vv_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vsrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u8m1_m(mask, op1, shift, vl); +vuint8m1_t test_vsrl_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u8m1_m(vm, vs2, rs1, vl); } -vuint8m2_t test_vsrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsrl_vv_u8m2_m(mask, op1, shift, vl); +vuint8m2_t test_vsrl_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsrl_vv_u8m2_m(vm, vs2, vs1, vl); } -vuint8m2_t test_vsrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u8m2_m(mask, op1, shift, vl); +vuint8m2_t test_vsrl_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u8m2_m(vm, vs2, rs1, vl); } -vuint8m4_t test_vsrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsrl_vv_u8m4_m(mask, op1, shift, vl); +vuint8m4_t test_vsrl_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsrl_vv_u8m4_m(vm, vs2, vs1, vl); } -vuint8m4_t test_vsrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u8m4_m(mask, op1, shift, vl); +vuint8m4_t test_vsrl_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u8m4_m(vm, vs2, rs1, vl); } -vuint8m8_t test_vsrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsrl_vv_u8m8_m(mask, op1, shift, vl); +vuint8m8_t test_vsrl_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsrl_vv_u8m8_m(vm, vs2, vs1, vl); } -vuint8m8_t test_vsrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u8m8_m(mask, op1, shift, vl); +vuint8m8_t test_vsrl_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u8m8_m(vm, vs2, rs1, vl); } -vuint16mf4_t test_vsrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsrl_vv_u16mf4_m(mask, op1, shift, vl); +vuint16mf4_t test_vsrl_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsrl_vv_u16mf4_m(vm, vs2, vs1, vl); } -vuint16mf4_t test_vsrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u16mf4_m(mask, op1, shift, vl); +vuint16mf4_t test_vsrl_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u16mf4_m(vm, vs2, rs1, vl); } -vuint16mf2_t test_vsrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsrl_vv_u16mf2_m(mask, op1, shift, vl); +vuint16mf2_t test_vsrl_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsrl_vv_u16mf2_m(vm, vs2, vs1, vl); } -vuint16mf2_t test_vsrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u16mf2_m(mask, op1, shift, vl); +vuint16mf2_t test_vsrl_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u16mf2_m(vm, vs2, rs1, vl); } -vuint16m1_t test_vsrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsrl_vv_u16m1_m(mask, op1, shift, vl); +vuint16m1_t test_vsrl_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsrl_vv_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vsrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u16m1_m(mask, op1, shift, vl); +vuint16m1_t test_vsrl_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u16m1_m(vm, vs2, rs1, vl); } -vuint16m2_t test_vsrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsrl_vv_u16m2_m(mask, op1, shift, vl); +vuint16m2_t test_vsrl_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsrl_vv_u16m2_m(vm, vs2, vs1, vl); } -vuint16m2_t test_vsrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u16m2_m(mask, op1, shift, vl); +vuint16m2_t test_vsrl_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u16m2_m(vm, vs2, rs1, vl); } -vuint16m4_t test_vsrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsrl_vv_u16m4_m(mask, op1, shift, vl); +vuint16m4_t test_vsrl_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsrl_vv_u16m4_m(vm, vs2, vs1, vl); } -vuint16m4_t test_vsrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u16m4_m(mask, op1, shift, vl); +vuint16m4_t test_vsrl_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u16m4_m(vm, vs2, rs1, vl); } -vuint16m8_t test_vsrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsrl_vv_u16m8_m(mask, op1, shift, vl); +vuint16m8_t test_vsrl_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsrl_vv_u16m8_m(vm, vs2, vs1, vl); } -vuint16m8_t test_vsrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u16m8_m(mask, op1, shift, vl); +vuint16m8_t test_vsrl_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u16m8_m(vm, vs2, rs1, vl); } -vuint32mf2_t test_vsrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsrl_vv_u32mf2_m(mask, op1, shift, vl); +vuint32mf2_t test_vsrl_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsrl_vv_u32mf2_m(vm, vs2, vs1, vl); } -vuint32mf2_t test_vsrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u32mf2_m(mask, op1, shift, vl); +vuint32mf2_t test_vsrl_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u32mf2_m(vm, vs2, rs1, vl); } -vuint32m1_t test_vsrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsrl_vv_u32m1_m(mask, op1, shift, vl); +vuint32m1_t test_vsrl_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsrl_vv_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vsrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u32m1_m(mask, op1, shift, vl); +vuint32m1_t test_vsrl_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u32m1_m(vm, vs2, rs1, vl); } -vuint32m2_t test_vsrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsrl_vv_u32m2_m(mask, op1, shift, vl); +vuint32m2_t test_vsrl_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsrl_vv_u32m2_m(vm, vs2, vs1, vl); } -vuint32m2_t test_vsrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u32m2_m(mask, op1, shift, vl); +vuint32m2_t test_vsrl_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u32m2_m(vm, vs2, rs1, vl); } -vuint32m4_t test_vsrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsrl_vv_u32m4_m(mask, op1, shift, vl); +vuint32m4_t test_vsrl_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsrl_vv_u32m4_m(vm, vs2, vs1, vl); } -vuint32m4_t test_vsrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u32m4_m(mask, op1, shift, vl); +vuint32m4_t test_vsrl_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u32m4_m(vm, vs2, rs1, vl); } -vuint32m8_t test_vsrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsrl_vv_u32m8_m(mask, op1, shift, vl); +vuint32m8_t test_vsrl_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsrl_vv_u32m8_m(vm, vs2, vs1, vl); } -vuint32m8_t test_vsrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u32m8_m(mask, op1, shift, vl); +vuint32m8_t test_vsrl_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u32m8_m(vm, vs2, rs1, vl); } -vuint64m1_t test_vsrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsrl_vv_u64m1_m(mask, op1, shift, vl); +vuint64m1_t test_vsrl_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsrl_vv_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vsrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u64m1_m(mask, op1, shift, vl); +vuint64m1_t test_vsrl_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u64m1_m(vm, vs2, rs1, vl); } -vuint64m2_t test_vsrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsrl_vv_u64m2_m(mask, op1, shift, vl); +vuint64m2_t test_vsrl_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsrl_vv_u64m2_m(vm, vs2, vs1, vl); } -vuint64m2_t test_vsrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u64m2_m(mask, op1, shift, vl); +vuint64m2_t test_vsrl_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u64m2_m(vm, vs2, rs1, vl); } -vuint64m4_t test_vsrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsrl_vv_u64m4_m(mask, op1, shift, vl); +vuint64m4_t test_vsrl_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsrl_vv_u64m4_m(vm, vs2, vs1, vl); } -vuint64m4_t test_vsrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u64m4_m(mask, op1, shift, vl); +vuint64m4_t test_vsrl_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u64m4_m(vm, vs2, rs1, vl); } -vuint64m8_t test_vsrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsrl_vv_u64m8_m(mask, op1, shift, vl); +vuint64m8_t test_vsrl_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsrl_vv_u64m8_m(vm, vs2, vs1, vl); } -vuint64m8_t test_vsrl_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u64m8_m(mask, op1, shift, vl); +vuint64m8_t test_vsrl_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u64m8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsrl\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-api-tests/vsse16.c b/auto-generated/gnu-api-tests/vsse16.c index 8333977c1..d4ad93891 100644 --- a/auto-generated/gnu-api-tests/vsse16.c +++ b/auto-generated/gnu-api-tests/vsse16.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsse16_v_f16mf4(float16_t *base, ptrdiff_t bstride, vfloat16mf4_t value, size_t vl) { - return __riscv_vsse16_v_f16mf4(base, bstride, value, vl); +void test_vsse16_v_f16mf4(float16_t *rs1, ptrdiff_t rs2, vfloat16mf4_t vs3, size_t vl) { + return __riscv_vsse16_v_f16mf4(rs1, rs2, vs3, vl); } -void test_vsse16_v_f16mf2(float16_t *base, ptrdiff_t bstride, vfloat16mf2_t value, size_t vl) { - return __riscv_vsse16_v_f16mf2(base, bstride, value, vl); +void test_vsse16_v_f16mf2(float16_t *rs1, ptrdiff_t rs2, vfloat16mf2_t vs3, size_t vl) { + return __riscv_vsse16_v_f16mf2(rs1, rs2, vs3, vl); } -void test_vsse16_v_f16m1(float16_t *base, ptrdiff_t bstride, vfloat16m1_t value, size_t vl) { - return __riscv_vsse16_v_f16m1(base, bstride, value, vl); +void test_vsse16_v_f16m1(float16_t *rs1, ptrdiff_t rs2, vfloat16m1_t vs3, size_t vl) { + return __riscv_vsse16_v_f16m1(rs1, rs2, vs3, vl); } -void test_vsse16_v_f16m2(float16_t *base, ptrdiff_t bstride, vfloat16m2_t value, size_t vl) { - return __riscv_vsse16_v_f16m2(base, bstride, value, vl); +void test_vsse16_v_f16m2(float16_t *rs1, ptrdiff_t rs2, vfloat16m2_t vs3, size_t vl) { + return __riscv_vsse16_v_f16m2(rs1, rs2, vs3, vl); } -void test_vsse16_v_f16m4(float16_t *base, ptrdiff_t bstride, vfloat16m4_t value, size_t vl) { - return __riscv_vsse16_v_f16m4(base, bstride, value, vl); +void test_vsse16_v_f16m4(float16_t *rs1, ptrdiff_t rs2, vfloat16m4_t vs3, size_t vl) { + return __riscv_vsse16_v_f16m4(rs1, rs2, vs3, vl); } -void test_vsse16_v_f16m8(float16_t *base, ptrdiff_t bstride, vfloat16m8_t value, size_t vl) { - return __riscv_vsse16_v_f16m8(base, bstride, value, vl); +void test_vsse16_v_f16m8(float16_t *rs1, ptrdiff_t rs2, vfloat16m8_t vs3, size_t vl) { + return __riscv_vsse16_v_f16m8(rs1, rs2, vs3, vl); } -void test_vsse16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t value, size_t vl) { - return __riscv_vsse16_v_i16mf4(base, bstride, value, vl); +void test_vsse16_v_i16mf4(int16_t *rs1, ptrdiff_t rs2, vint16mf4_t vs3, size_t vl) { + return __riscv_vsse16_v_i16mf4(rs1, rs2, vs3, vl); } -void test_vsse16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t value, size_t vl) { - return __riscv_vsse16_v_i16mf2(base, bstride, value, vl); +void test_vsse16_v_i16mf2(int16_t *rs1, ptrdiff_t rs2, vint16mf2_t vs3, size_t vl) { + return __riscv_vsse16_v_i16mf2(rs1, rs2, vs3, vl); } -void test_vsse16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t value, size_t vl) { - return __riscv_vsse16_v_i16m1(base, bstride, value, vl); +void test_vsse16_v_i16m1(int16_t *rs1, ptrdiff_t rs2, vint16m1_t vs3, size_t vl) { + return __riscv_vsse16_v_i16m1(rs1, rs2, vs3, vl); } -void test_vsse16_v_i16m2(int16_t *base, ptrdiff_t bstride, vint16m2_t value, size_t vl) { - return __riscv_vsse16_v_i16m2(base, bstride, value, vl); +void test_vsse16_v_i16m2(int16_t *rs1, ptrdiff_t rs2, vint16m2_t vs3, size_t vl) { + return __riscv_vsse16_v_i16m2(rs1, rs2, vs3, vl); } -void test_vsse16_v_i16m4(int16_t *base, ptrdiff_t bstride, vint16m4_t value, size_t vl) { - return __riscv_vsse16_v_i16m4(base, bstride, value, vl); +void test_vsse16_v_i16m4(int16_t *rs1, ptrdiff_t rs2, vint16m4_t vs3, size_t vl) { + return __riscv_vsse16_v_i16m4(rs1, rs2, vs3, vl); } -void test_vsse16_v_i16m8(int16_t *base, ptrdiff_t bstride, vint16m8_t value, size_t vl) { - return __riscv_vsse16_v_i16m8(base, bstride, value, vl); +void test_vsse16_v_i16m8(int16_t *rs1, ptrdiff_t rs2, vint16m8_t vs3, size_t vl) { + return __riscv_vsse16_v_i16m8(rs1, rs2, vs3, vl); } -void test_vsse16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t value, size_t vl) { - return __riscv_vsse16_v_u16mf4(base, bstride, value, vl); +void test_vsse16_v_u16mf4(uint16_t *rs1, ptrdiff_t rs2, vuint16mf4_t vs3, size_t vl) { + return __riscv_vsse16_v_u16mf4(rs1, rs2, vs3, vl); } -void test_vsse16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t value, size_t vl) { - return __riscv_vsse16_v_u16mf2(base, bstride, value, vl); +void test_vsse16_v_u16mf2(uint16_t *rs1, ptrdiff_t rs2, vuint16mf2_t vs3, size_t vl) { + return __riscv_vsse16_v_u16mf2(rs1, rs2, vs3, vl); } -void test_vsse16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t value, size_t vl) { - return __riscv_vsse16_v_u16m1(base, bstride, value, vl); +void test_vsse16_v_u16m1(uint16_t *rs1, ptrdiff_t rs2, vuint16m1_t vs3, size_t vl) { + return __riscv_vsse16_v_u16m1(rs1, rs2, vs3, vl); } -void test_vsse16_v_u16m2(uint16_t *base, ptrdiff_t bstride, vuint16m2_t value, size_t vl) { - return __riscv_vsse16_v_u16m2(base, bstride, value, vl); +void test_vsse16_v_u16m2(uint16_t *rs1, ptrdiff_t rs2, vuint16m2_t vs3, size_t vl) { + return __riscv_vsse16_v_u16m2(rs1, rs2, vs3, vl); } -void test_vsse16_v_u16m4(uint16_t *base, ptrdiff_t bstride, vuint16m4_t value, size_t vl) { - return __riscv_vsse16_v_u16m4(base, bstride, value, vl); +void test_vsse16_v_u16m4(uint16_t *rs1, ptrdiff_t rs2, vuint16m4_t vs3, size_t vl) { + return __riscv_vsse16_v_u16m4(rs1, rs2, vs3, vl); } -void test_vsse16_v_u16m8(uint16_t *base, ptrdiff_t bstride, vuint16m8_t value, size_t vl) { - return __riscv_vsse16_v_u16m8(base, bstride, value, vl); +void test_vsse16_v_u16m8(uint16_t *rs1, ptrdiff_t rs2, vuint16m8_t vs3, size_t vl) { + return __riscv_vsse16_v_u16m8(rs1, rs2, vs3, vl); } -void test_vsse16_v_f16mf4_m(vbool64_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf4_t value, size_t vl) { - return __riscv_vsse16_v_f16mf4_m(mask, base, bstride, value, vl); +void test_vsse16_v_f16mf4_m(vbool64_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16mf4_t vs3, size_t vl) { + return __riscv_vsse16_v_f16mf4_m(vm, rs1, rs2, vs3, vl); } -void test_vsse16_v_f16mf2_m(vbool32_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf2_t value, size_t vl) { - return __riscv_vsse16_v_f16mf2_m(mask, base, bstride, value, vl); +void test_vsse16_v_f16mf2_m(vbool32_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16mf2_t vs3, size_t vl) { + return __riscv_vsse16_v_f16mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsse16_v_f16m1_m(vbool16_t mask, float16_t *base, ptrdiff_t bstride, vfloat16m1_t value, size_t vl) { - return __riscv_vsse16_v_f16m1_m(mask, base, bstride, value, vl); +void test_vsse16_v_f16m1_m(vbool16_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16m1_t vs3, size_t vl) { + return __riscv_vsse16_v_f16m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsse16_v_f16m2_m(vbool8_t mask, float16_t *base, ptrdiff_t bstride, vfloat16m2_t value, size_t vl) { - return __riscv_vsse16_v_f16m2_m(mask, base, bstride, value, vl); +void test_vsse16_v_f16m2_m(vbool8_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16m2_t vs3, size_t vl) { + return __riscv_vsse16_v_f16m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsse16_v_f16m4_m(vbool4_t mask, float16_t *base, ptrdiff_t bstride, vfloat16m4_t value, size_t vl) { - return __riscv_vsse16_v_f16m4_m(mask, base, bstride, value, vl); +void test_vsse16_v_f16m4_m(vbool4_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16m4_t vs3, size_t vl) { + return __riscv_vsse16_v_f16m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsse16_v_f16m8_m(vbool2_t mask, float16_t *base, ptrdiff_t bstride, vfloat16m8_t value, size_t vl) { - return __riscv_vsse16_v_f16m8_m(mask, base, bstride, value, vl); +void test_vsse16_v_f16m8_m(vbool2_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16m8_t vs3, size_t vl) { + return __riscv_vsse16_v_f16m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsse16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t value, size_t vl) { - return __riscv_vsse16_v_i16mf4_m(mask, base, bstride, value, vl); +void test_vsse16_v_i16mf4_m(vbool64_t vm, int16_t *rs1, ptrdiff_t rs2, vint16mf4_t vs3, size_t vl) { + return __riscv_vsse16_v_i16mf4_m(vm, rs1, rs2, vs3, vl); } -void test_vsse16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t value, size_t vl) { - return __riscv_vsse16_v_i16mf2_m(mask, base, bstride, value, vl); +void test_vsse16_v_i16mf2_m(vbool32_t vm, int16_t *rs1, ptrdiff_t rs2, vint16mf2_t vs3, size_t vl) { + return __riscv_vsse16_v_i16mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsse16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t value, size_t vl) { - return __riscv_vsse16_v_i16m1_m(mask, base, bstride, value, vl); +void test_vsse16_v_i16m1_m(vbool16_t vm, int16_t *rs1, ptrdiff_t rs2, vint16m1_t vs3, size_t vl) { + return __riscv_vsse16_v_i16m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsse16_v_i16m2_m(vbool8_t mask, int16_t *base, ptrdiff_t bstride, vint16m2_t value, size_t vl) { - return __riscv_vsse16_v_i16m2_m(mask, base, bstride, value, vl); +void test_vsse16_v_i16m2_m(vbool8_t vm, int16_t *rs1, ptrdiff_t rs2, vint16m2_t vs3, size_t vl) { + return __riscv_vsse16_v_i16m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsse16_v_i16m4_m(vbool4_t mask, int16_t *base, ptrdiff_t bstride, vint16m4_t value, size_t vl) { - return __riscv_vsse16_v_i16m4_m(mask, base, bstride, value, vl); +void test_vsse16_v_i16m4_m(vbool4_t vm, int16_t *rs1, ptrdiff_t rs2, vint16m4_t vs3, size_t vl) { + return __riscv_vsse16_v_i16m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsse16_v_i16m8_m(vbool2_t mask, int16_t *base, ptrdiff_t bstride, vint16m8_t value, size_t vl) { - return __riscv_vsse16_v_i16m8_m(mask, base, bstride, value, vl); +void test_vsse16_v_i16m8_m(vbool2_t vm, int16_t *rs1, ptrdiff_t rs2, vint16m8_t vs3, size_t vl) { + return __riscv_vsse16_v_i16m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsse16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t value, size_t vl) { - return __riscv_vsse16_v_u16mf4_m(mask, base, bstride, value, vl); +void test_vsse16_v_u16mf4_m(vbool64_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16mf4_t vs3, size_t vl) { + return __riscv_vsse16_v_u16mf4_m(vm, rs1, rs2, vs3, vl); } -void test_vsse16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t value, size_t vl) { - return __riscv_vsse16_v_u16mf2_m(mask, base, bstride, value, vl); +void test_vsse16_v_u16mf2_m(vbool32_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16mf2_t vs3, size_t vl) { + return __riscv_vsse16_v_u16mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsse16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t value, size_t vl) { - return __riscv_vsse16_v_u16m1_m(mask, base, bstride, value, vl); +void test_vsse16_v_u16m1_m(vbool16_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16m1_t vs3, size_t vl) { + return __riscv_vsse16_v_u16m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsse16_v_u16m2_m(vbool8_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m2_t value, size_t vl) { - return __riscv_vsse16_v_u16m2_m(mask, base, bstride, value, vl); +void test_vsse16_v_u16m2_m(vbool8_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16m2_t vs3, size_t vl) { + return __riscv_vsse16_v_u16m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsse16_v_u16m4_m(vbool4_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m4_t value, size_t vl) { - return __riscv_vsse16_v_u16m4_m(mask, base, bstride, value, vl); +void test_vsse16_v_u16m4_m(vbool4_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16m4_t vs3, size_t vl) { + return __riscv_vsse16_v_u16m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsse16_v_u16m8_m(vbool2_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m8_t value, size_t vl) { - return __riscv_vsse16_v_u16m8_m(mask, base, bstride, value, vl); +void test_vsse16_v_u16m8_m(vbool2_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16m8_t vs3, size_t vl) { + return __riscv_vsse16_v_u16m8_m(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsse16\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/gnu-api-tests/vsse32.c b/auto-generated/gnu-api-tests/vsse32.c index 16624796b..d44390bcb 100644 --- a/auto-generated/gnu-api-tests/vsse32.c +++ b/auto-generated/gnu-api-tests/vsse32.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsse32_v_f32mf2(float32_t *base, ptrdiff_t bstride, vfloat32mf2_t value, size_t vl) { - return __riscv_vsse32_v_f32mf2(base, bstride, value, vl); +void test_vsse32_v_f32mf2(float32_t *rs1, ptrdiff_t rs2, vfloat32mf2_t vs3, size_t vl) { + return __riscv_vsse32_v_f32mf2(rs1, rs2, vs3, vl); } -void test_vsse32_v_f32m1(float32_t *base, ptrdiff_t bstride, vfloat32m1_t value, size_t vl) { - return __riscv_vsse32_v_f32m1(base, bstride, value, vl); +void test_vsse32_v_f32m1(float32_t *rs1, ptrdiff_t rs2, vfloat32m1_t vs3, size_t vl) { + return __riscv_vsse32_v_f32m1(rs1, rs2, vs3, vl); } -void test_vsse32_v_f32m2(float32_t *base, ptrdiff_t bstride, vfloat32m2_t value, size_t vl) { - return __riscv_vsse32_v_f32m2(base, bstride, value, vl); +void test_vsse32_v_f32m2(float32_t *rs1, ptrdiff_t rs2, vfloat32m2_t vs3, size_t vl) { + return __riscv_vsse32_v_f32m2(rs1, rs2, vs3, vl); } -void test_vsse32_v_f32m4(float32_t *base, ptrdiff_t bstride, vfloat32m4_t value, size_t vl) { - return __riscv_vsse32_v_f32m4(base, bstride, value, vl); +void test_vsse32_v_f32m4(float32_t *rs1, ptrdiff_t rs2, vfloat32m4_t vs3, size_t vl) { + return __riscv_vsse32_v_f32m4(rs1, rs2, vs3, vl); } -void test_vsse32_v_f32m8(float32_t *base, ptrdiff_t bstride, vfloat32m8_t value, size_t vl) { - return __riscv_vsse32_v_f32m8(base, bstride, value, vl); +void test_vsse32_v_f32m8(float32_t *rs1, ptrdiff_t rs2, vfloat32m8_t vs3, size_t vl) { + return __riscv_vsse32_v_f32m8(rs1, rs2, vs3, vl); } -void test_vsse32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t value, size_t vl) { - return __riscv_vsse32_v_i32mf2(base, bstride, value, vl); +void test_vsse32_v_i32mf2(int32_t *rs1, ptrdiff_t rs2, vint32mf2_t vs3, size_t vl) { + return __riscv_vsse32_v_i32mf2(rs1, rs2, vs3, vl); } -void test_vsse32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t value, size_t vl) { - return __riscv_vsse32_v_i32m1(base, bstride, value, vl); +void test_vsse32_v_i32m1(int32_t *rs1, ptrdiff_t rs2, vint32m1_t vs3, size_t vl) { + return __riscv_vsse32_v_i32m1(rs1, rs2, vs3, vl); } -void test_vsse32_v_i32m2(int32_t *base, ptrdiff_t bstride, vint32m2_t value, size_t vl) { - return __riscv_vsse32_v_i32m2(base, bstride, value, vl); +void test_vsse32_v_i32m2(int32_t *rs1, ptrdiff_t rs2, vint32m2_t vs3, size_t vl) { + return __riscv_vsse32_v_i32m2(rs1, rs2, vs3, vl); } -void test_vsse32_v_i32m4(int32_t *base, ptrdiff_t bstride, vint32m4_t value, size_t vl) { - return __riscv_vsse32_v_i32m4(base, bstride, value, vl); +void test_vsse32_v_i32m4(int32_t *rs1, ptrdiff_t rs2, vint32m4_t vs3, size_t vl) { + return __riscv_vsse32_v_i32m4(rs1, rs2, vs3, vl); } -void test_vsse32_v_i32m8(int32_t *base, ptrdiff_t bstride, vint32m8_t value, size_t vl) { - return __riscv_vsse32_v_i32m8(base, bstride, value, vl); +void test_vsse32_v_i32m8(int32_t *rs1, ptrdiff_t rs2, vint32m8_t vs3, size_t vl) { + return __riscv_vsse32_v_i32m8(rs1, rs2, vs3, vl); } -void test_vsse32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t value, size_t vl) { - return __riscv_vsse32_v_u32mf2(base, bstride, value, vl); +void test_vsse32_v_u32mf2(uint32_t *rs1, ptrdiff_t rs2, vuint32mf2_t vs3, size_t vl) { + return __riscv_vsse32_v_u32mf2(rs1, rs2, vs3, vl); } -void test_vsse32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t value, size_t vl) { - return __riscv_vsse32_v_u32m1(base, bstride, value, vl); +void test_vsse32_v_u32m1(uint32_t *rs1, ptrdiff_t rs2, vuint32m1_t vs3, size_t vl) { + return __riscv_vsse32_v_u32m1(rs1, rs2, vs3, vl); } -void test_vsse32_v_u32m2(uint32_t *base, ptrdiff_t bstride, vuint32m2_t value, size_t vl) { - return __riscv_vsse32_v_u32m2(base, bstride, value, vl); +void test_vsse32_v_u32m2(uint32_t *rs1, ptrdiff_t rs2, vuint32m2_t vs3, size_t vl) { + return __riscv_vsse32_v_u32m2(rs1, rs2, vs3, vl); } -void test_vsse32_v_u32m4(uint32_t *base, ptrdiff_t bstride, vuint32m4_t value, size_t vl) { - return __riscv_vsse32_v_u32m4(base, bstride, value, vl); +void test_vsse32_v_u32m4(uint32_t *rs1, ptrdiff_t rs2, vuint32m4_t vs3, size_t vl) { + return __riscv_vsse32_v_u32m4(rs1, rs2, vs3, vl); } -void test_vsse32_v_u32m8(uint32_t *base, ptrdiff_t bstride, vuint32m8_t value, size_t vl) { - return __riscv_vsse32_v_u32m8(base, bstride, value, vl); +void test_vsse32_v_u32m8(uint32_t *rs1, ptrdiff_t rs2, vuint32m8_t vs3, size_t vl) { + return __riscv_vsse32_v_u32m8(rs1, rs2, vs3, vl); } -void test_vsse32_v_f32mf2_m(vbool64_t mask, float32_t *base, ptrdiff_t bstride, vfloat32mf2_t value, size_t vl) { - return __riscv_vsse32_v_f32mf2_m(mask, base, bstride, value, vl); +void test_vsse32_v_f32mf2_m(vbool64_t vm, float32_t *rs1, ptrdiff_t rs2, vfloat32mf2_t vs3, size_t vl) { + return __riscv_vsse32_v_f32mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsse32_v_f32m1_m(vbool32_t mask, float32_t *base, ptrdiff_t bstride, vfloat32m1_t value, size_t vl) { - return __riscv_vsse32_v_f32m1_m(mask, base, bstride, value, vl); +void test_vsse32_v_f32m1_m(vbool32_t vm, float32_t *rs1, ptrdiff_t rs2, vfloat32m1_t vs3, size_t vl) { + return __riscv_vsse32_v_f32m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsse32_v_f32m2_m(vbool16_t mask, float32_t *base, ptrdiff_t bstride, vfloat32m2_t value, size_t vl) { - return __riscv_vsse32_v_f32m2_m(mask, base, bstride, value, vl); +void test_vsse32_v_f32m2_m(vbool16_t vm, float32_t *rs1, ptrdiff_t rs2, vfloat32m2_t vs3, size_t vl) { + return __riscv_vsse32_v_f32m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsse32_v_f32m4_m(vbool8_t mask, float32_t *base, ptrdiff_t bstride, vfloat32m4_t value, size_t vl) { - return __riscv_vsse32_v_f32m4_m(mask, base, bstride, value, vl); +void test_vsse32_v_f32m4_m(vbool8_t vm, float32_t *rs1, ptrdiff_t rs2, vfloat32m4_t vs3, size_t vl) { + return __riscv_vsse32_v_f32m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsse32_v_f32m8_m(vbool4_t mask, float32_t *base, ptrdiff_t bstride, vfloat32m8_t value, size_t vl) { - return __riscv_vsse32_v_f32m8_m(mask, base, bstride, value, vl); +void test_vsse32_v_f32m8_m(vbool4_t vm, float32_t *rs1, ptrdiff_t rs2, vfloat32m8_t vs3, size_t vl) { + return __riscv_vsse32_v_f32m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsse32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t value, size_t vl) { - return __riscv_vsse32_v_i32mf2_m(mask, base, bstride, value, vl); +void test_vsse32_v_i32mf2_m(vbool64_t vm, int32_t *rs1, ptrdiff_t rs2, vint32mf2_t vs3, size_t vl) { + return __riscv_vsse32_v_i32mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsse32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t value, size_t vl) { - return __riscv_vsse32_v_i32m1_m(mask, base, bstride, value, vl); +void test_vsse32_v_i32m1_m(vbool32_t vm, int32_t *rs1, ptrdiff_t rs2, vint32m1_t vs3, size_t vl) { + return __riscv_vsse32_v_i32m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsse32_v_i32m2_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride, vint32m2_t value, size_t vl) { - return __riscv_vsse32_v_i32m2_m(mask, base, bstride, value, vl); +void test_vsse32_v_i32m2_m(vbool16_t vm, int32_t *rs1, ptrdiff_t rs2, vint32m2_t vs3, size_t vl) { + return __riscv_vsse32_v_i32m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsse32_v_i32m4_m(vbool8_t mask, int32_t *base, ptrdiff_t bstride, vint32m4_t value, size_t vl) { - return __riscv_vsse32_v_i32m4_m(mask, base, bstride, value, vl); +void test_vsse32_v_i32m4_m(vbool8_t vm, int32_t *rs1, ptrdiff_t rs2, vint32m4_t vs3, size_t vl) { + return __riscv_vsse32_v_i32m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsse32_v_i32m8_m(vbool4_t mask, int32_t *base, ptrdiff_t bstride, vint32m8_t value, size_t vl) { - return __riscv_vsse32_v_i32m8_m(mask, base, bstride, value, vl); +void test_vsse32_v_i32m8_m(vbool4_t vm, int32_t *rs1, ptrdiff_t rs2, vint32m8_t vs3, size_t vl) { + return __riscv_vsse32_v_i32m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsse32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t value, size_t vl) { - return __riscv_vsse32_v_u32mf2_m(mask, base, bstride, value, vl); +void test_vsse32_v_u32mf2_m(vbool64_t vm, uint32_t *rs1, ptrdiff_t rs2, vuint32mf2_t vs3, size_t vl) { + return __riscv_vsse32_v_u32mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsse32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t value, size_t vl) { - return __riscv_vsse32_v_u32m1_m(mask, base, bstride, value, vl); +void test_vsse32_v_u32m1_m(vbool32_t vm, uint32_t *rs1, ptrdiff_t rs2, vuint32m1_t vs3, size_t vl) { + return __riscv_vsse32_v_u32m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsse32_v_u32m2_m(vbool16_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m2_t value, size_t vl) { - return __riscv_vsse32_v_u32m2_m(mask, base, bstride, value, vl); +void test_vsse32_v_u32m2_m(vbool16_t vm, uint32_t *rs1, ptrdiff_t rs2, vuint32m2_t vs3, size_t vl) { + return __riscv_vsse32_v_u32m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsse32_v_u32m4_m(vbool8_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m4_t value, size_t vl) { - return __riscv_vsse32_v_u32m4_m(mask, base, bstride, value, vl); +void test_vsse32_v_u32m4_m(vbool8_t vm, uint32_t *rs1, ptrdiff_t rs2, vuint32m4_t vs3, size_t vl) { + return __riscv_vsse32_v_u32m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsse32_v_u32m8_m(vbool4_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m8_t value, size_t vl) { - return __riscv_vsse32_v_u32m8_m(mask, base, bstride, value, vl); +void test_vsse32_v_u32m8_m(vbool4_t vm, uint32_t *rs1, ptrdiff_t rs2, vuint32m8_t vs3, size_t vl) { + return __riscv_vsse32_v_u32m8_m(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsse32\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/gnu-api-tests/vsse64.c b/auto-generated/gnu-api-tests/vsse64.c index d7a7f4d8a..d68cf5ac2 100644 --- a/auto-generated/gnu-api-tests/vsse64.c +++ b/auto-generated/gnu-api-tests/vsse64.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsse64_v_f64m1(float64_t *base, ptrdiff_t bstride, vfloat64m1_t value, size_t vl) { - return __riscv_vsse64_v_f64m1(base, bstride, value, vl); +void test_vsse64_v_f64m1(float64_t *rs1, ptrdiff_t rs2, vfloat64m1_t vs3, size_t vl) { + return __riscv_vsse64_v_f64m1(rs1, rs2, vs3, vl); } -void test_vsse64_v_f64m2(float64_t *base, ptrdiff_t bstride, vfloat64m2_t value, size_t vl) { - return __riscv_vsse64_v_f64m2(base, bstride, value, vl); +void test_vsse64_v_f64m2(float64_t *rs1, ptrdiff_t rs2, vfloat64m2_t vs3, size_t vl) { + return __riscv_vsse64_v_f64m2(rs1, rs2, vs3, vl); } -void test_vsse64_v_f64m4(float64_t *base, ptrdiff_t bstride, vfloat64m4_t value, size_t vl) { - return __riscv_vsse64_v_f64m4(base, bstride, value, vl); +void test_vsse64_v_f64m4(float64_t *rs1, ptrdiff_t rs2, vfloat64m4_t vs3, size_t vl) { + return __riscv_vsse64_v_f64m4(rs1, rs2, vs3, vl); } -void test_vsse64_v_f64m8(float64_t *base, ptrdiff_t bstride, vfloat64m8_t value, size_t vl) { - return __riscv_vsse64_v_f64m8(base, bstride, value, vl); +void test_vsse64_v_f64m8(float64_t *rs1, ptrdiff_t rs2, vfloat64m8_t vs3, size_t vl) { + return __riscv_vsse64_v_f64m8(rs1, rs2, vs3, vl); } -void test_vsse64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t value, size_t vl) { - return __riscv_vsse64_v_i64m1(base, bstride, value, vl); +void test_vsse64_v_i64m1(int64_t *rs1, ptrdiff_t rs2, vint64m1_t vs3, size_t vl) { + return __riscv_vsse64_v_i64m1(rs1, rs2, vs3, vl); } -void test_vsse64_v_i64m2(int64_t *base, ptrdiff_t bstride, vint64m2_t value, size_t vl) { - return __riscv_vsse64_v_i64m2(base, bstride, value, vl); +void test_vsse64_v_i64m2(int64_t *rs1, ptrdiff_t rs2, vint64m2_t vs3, size_t vl) { + return __riscv_vsse64_v_i64m2(rs1, rs2, vs3, vl); } -void test_vsse64_v_i64m4(int64_t *base, ptrdiff_t bstride, vint64m4_t value, size_t vl) { - return __riscv_vsse64_v_i64m4(base, bstride, value, vl); +void test_vsse64_v_i64m4(int64_t *rs1, ptrdiff_t rs2, vint64m4_t vs3, size_t vl) { + return __riscv_vsse64_v_i64m4(rs1, rs2, vs3, vl); } -void test_vsse64_v_i64m8(int64_t *base, ptrdiff_t bstride, vint64m8_t value, size_t vl) { - return __riscv_vsse64_v_i64m8(base, bstride, value, vl); +void test_vsse64_v_i64m8(int64_t *rs1, ptrdiff_t rs2, vint64m8_t vs3, size_t vl) { + return __riscv_vsse64_v_i64m8(rs1, rs2, vs3, vl); } -void test_vsse64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t value, size_t vl) { - return __riscv_vsse64_v_u64m1(base, bstride, value, vl); +void test_vsse64_v_u64m1(uint64_t *rs1, ptrdiff_t rs2, vuint64m1_t vs3, size_t vl) { + return __riscv_vsse64_v_u64m1(rs1, rs2, vs3, vl); } -void test_vsse64_v_u64m2(uint64_t *base, ptrdiff_t bstride, vuint64m2_t value, size_t vl) { - return __riscv_vsse64_v_u64m2(base, bstride, value, vl); +void test_vsse64_v_u64m2(uint64_t *rs1, ptrdiff_t rs2, vuint64m2_t vs3, size_t vl) { + return __riscv_vsse64_v_u64m2(rs1, rs2, vs3, vl); } -void test_vsse64_v_u64m4(uint64_t *base, ptrdiff_t bstride, vuint64m4_t value, size_t vl) { - return __riscv_vsse64_v_u64m4(base, bstride, value, vl); +void test_vsse64_v_u64m4(uint64_t *rs1, ptrdiff_t rs2, vuint64m4_t vs3, size_t vl) { + return __riscv_vsse64_v_u64m4(rs1, rs2, vs3, vl); } -void test_vsse64_v_u64m8(uint64_t *base, ptrdiff_t bstride, vuint64m8_t value, size_t vl) { - return __riscv_vsse64_v_u64m8(base, bstride, value, vl); +void test_vsse64_v_u64m8(uint64_t *rs1, ptrdiff_t rs2, vuint64m8_t vs3, size_t vl) { + return __riscv_vsse64_v_u64m8(rs1, rs2, vs3, vl); } -void test_vsse64_v_f64m1_m(vbool64_t mask, float64_t *base, ptrdiff_t bstride, vfloat64m1_t value, size_t vl) { - return __riscv_vsse64_v_f64m1_m(mask, base, bstride, value, vl); +void test_vsse64_v_f64m1_m(vbool64_t vm, float64_t *rs1, ptrdiff_t rs2, vfloat64m1_t vs3, size_t vl) { + return __riscv_vsse64_v_f64m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsse64_v_f64m2_m(vbool32_t mask, float64_t *base, ptrdiff_t bstride, vfloat64m2_t value, size_t vl) { - return __riscv_vsse64_v_f64m2_m(mask, base, bstride, value, vl); +void test_vsse64_v_f64m2_m(vbool32_t vm, float64_t *rs1, ptrdiff_t rs2, vfloat64m2_t vs3, size_t vl) { + return __riscv_vsse64_v_f64m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsse64_v_f64m4_m(vbool16_t mask, float64_t *base, ptrdiff_t bstride, vfloat64m4_t value, size_t vl) { - return __riscv_vsse64_v_f64m4_m(mask, base, bstride, value, vl); +void test_vsse64_v_f64m4_m(vbool16_t vm, float64_t *rs1, ptrdiff_t rs2, vfloat64m4_t vs3, size_t vl) { + return __riscv_vsse64_v_f64m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsse64_v_f64m8_m(vbool8_t mask, float64_t *base, ptrdiff_t bstride, vfloat64m8_t value, size_t vl) { - return __riscv_vsse64_v_f64m8_m(mask, base, bstride, value, vl); +void test_vsse64_v_f64m8_m(vbool8_t vm, float64_t *rs1, ptrdiff_t rs2, vfloat64m8_t vs3, size_t vl) { + return __riscv_vsse64_v_f64m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsse64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t value, size_t vl) { - return __riscv_vsse64_v_i64m1_m(mask, base, bstride, value, vl); +void test_vsse64_v_i64m1_m(vbool64_t vm, int64_t *rs1, ptrdiff_t rs2, vint64m1_t vs3, size_t vl) { + return __riscv_vsse64_v_i64m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsse64_v_i64m2_m(vbool32_t mask, int64_t *base, ptrdiff_t bstride, vint64m2_t value, size_t vl) { - return __riscv_vsse64_v_i64m2_m(mask, base, bstride, value, vl); +void test_vsse64_v_i64m2_m(vbool32_t vm, int64_t *rs1, ptrdiff_t rs2, vint64m2_t vs3, size_t vl) { + return __riscv_vsse64_v_i64m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsse64_v_i64m4_m(vbool16_t mask, int64_t *base, ptrdiff_t bstride, vint64m4_t value, size_t vl) { - return __riscv_vsse64_v_i64m4_m(mask, base, bstride, value, vl); +void test_vsse64_v_i64m4_m(vbool16_t vm, int64_t *rs1, ptrdiff_t rs2, vint64m4_t vs3, size_t vl) { + return __riscv_vsse64_v_i64m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsse64_v_i64m8_m(vbool8_t mask, int64_t *base, ptrdiff_t bstride, vint64m8_t value, size_t vl) { - return __riscv_vsse64_v_i64m8_m(mask, base, bstride, value, vl); +void test_vsse64_v_i64m8_m(vbool8_t vm, int64_t *rs1, ptrdiff_t rs2, vint64m8_t vs3, size_t vl) { + return __riscv_vsse64_v_i64m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsse64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t value, size_t vl) { - return __riscv_vsse64_v_u64m1_m(mask, base, bstride, value, vl); +void test_vsse64_v_u64m1_m(vbool64_t vm, uint64_t *rs1, ptrdiff_t rs2, vuint64m1_t vs3, size_t vl) { + return __riscv_vsse64_v_u64m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsse64_v_u64m2_m(vbool32_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m2_t value, size_t vl) { - return __riscv_vsse64_v_u64m2_m(mask, base, bstride, value, vl); +void test_vsse64_v_u64m2_m(vbool32_t vm, uint64_t *rs1, ptrdiff_t rs2, vuint64m2_t vs3, size_t vl) { + return __riscv_vsse64_v_u64m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsse64_v_u64m4_m(vbool16_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m4_t value, size_t vl) { - return __riscv_vsse64_v_u64m4_m(mask, base, bstride, value, vl); +void test_vsse64_v_u64m4_m(vbool16_t vm, uint64_t *rs1, ptrdiff_t rs2, vuint64m4_t vs3, size_t vl) { + return __riscv_vsse64_v_u64m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsse64_v_u64m8_m(vbool8_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m8_t value, size_t vl) { - return __riscv_vsse64_v_u64m8_m(mask, base, bstride, value, vl); +void test_vsse64_v_u64m8_m(vbool8_t vm, uint64_t *rs1, ptrdiff_t rs2, vuint64m8_t vs3, size_t vl) { + return __riscv_vsse64_v_u64m8_m(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsse64\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/gnu-api-tests/vsse8.c b/auto-generated/gnu-api-tests/vsse8.c index a166f2f77..c512cc498 100644 --- a/auto-generated/gnu-api-tests/vsse8.c +++ b/auto-generated/gnu-api-tests/vsse8.c @@ -6,116 +6,116 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsse8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t value, size_t vl) { - return __riscv_vsse8_v_i8mf8(base, bstride, value, vl); +void test_vsse8_v_i8mf8(int8_t *rs1, ptrdiff_t rs2, vint8mf8_t vs3, size_t vl) { + return __riscv_vsse8_v_i8mf8(rs1, rs2, vs3, vl); } -void test_vsse8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t value, size_t vl) { - return __riscv_vsse8_v_i8mf4(base, bstride, value, vl); +void test_vsse8_v_i8mf4(int8_t *rs1, ptrdiff_t rs2, vint8mf4_t vs3, size_t vl) { + return __riscv_vsse8_v_i8mf4(rs1, rs2, vs3, vl); } -void test_vsse8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t value, size_t vl) { - return __riscv_vsse8_v_i8mf2(base, bstride, value, vl); +void test_vsse8_v_i8mf2(int8_t *rs1, ptrdiff_t rs2, vint8mf2_t vs3, size_t vl) { + return __riscv_vsse8_v_i8mf2(rs1, rs2, vs3, vl); } -void test_vsse8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t value, size_t vl) { - return __riscv_vsse8_v_i8m1(base, bstride, value, vl); +void test_vsse8_v_i8m1(int8_t *rs1, ptrdiff_t rs2, vint8m1_t vs3, size_t vl) { + return __riscv_vsse8_v_i8m1(rs1, rs2, vs3, vl); } -void test_vsse8_v_i8m2(int8_t *base, ptrdiff_t bstride, vint8m2_t value, size_t vl) { - return __riscv_vsse8_v_i8m2(base, bstride, value, vl); +void test_vsse8_v_i8m2(int8_t *rs1, ptrdiff_t rs2, vint8m2_t vs3, size_t vl) { + return __riscv_vsse8_v_i8m2(rs1, rs2, vs3, vl); } -void test_vsse8_v_i8m4(int8_t *base, ptrdiff_t bstride, vint8m4_t value, size_t vl) { - return __riscv_vsse8_v_i8m4(base, bstride, value, vl); +void test_vsse8_v_i8m4(int8_t *rs1, ptrdiff_t rs2, vint8m4_t vs3, size_t vl) { + return __riscv_vsse8_v_i8m4(rs1, rs2, vs3, vl); } -void test_vsse8_v_i8m8(int8_t *base, ptrdiff_t bstride, vint8m8_t value, size_t vl) { - return __riscv_vsse8_v_i8m8(base, bstride, value, vl); +void test_vsse8_v_i8m8(int8_t *rs1, ptrdiff_t rs2, vint8m8_t vs3, size_t vl) { + return __riscv_vsse8_v_i8m8(rs1, rs2, vs3, vl); } -void test_vsse8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t value, size_t vl) { - return __riscv_vsse8_v_u8mf8(base, bstride, value, vl); +void test_vsse8_v_u8mf8(uint8_t *rs1, ptrdiff_t rs2, vuint8mf8_t vs3, size_t vl) { + return __riscv_vsse8_v_u8mf8(rs1, rs2, vs3, vl); } -void test_vsse8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t value, size_t vl) { - return __riscv_vsse8_v_u8mf4(base, bstride, value, vl); +void test_vsse8_v_u8mf4(uint8_t *rs1, ptrdiff_t rs2, vuint8mf4_t vs3, size_t vl) { + return __riscv_vsse8_v_u8mf4(rs1, rs2, vs3, vl); } -void test_vsse8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t value, size_t vl) { - return __riscv_vsse8_v_u8mf2(base, bstride, value, vl); +void test_vsse8_v_u8mf2(uint8_t *rs1, ptrdiff_t rs2, vuint8mf2_t vs3, size_t vl) { + return __riscv_vsse8_v_u8mf2(rs1, rs2, vs3, vl); } -void test_vsse8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t value, size_t vl) { - return __riscv_vsse8_v_u8m1(base, bstride, value, vl); +void test_vsse8_v_u8m1(uint8_t *rs1, ptrdiff_t rs2, vuint8m1_t vs3, size_t vl) { + return __riscv_vsse8_v_u8m1(rs1, rs2, vs3, vl); } -void test_vsse8_v_u8m2(uint8_t *base, ptrdiff_t bstride, vuint8m2_t value, size_t vl) { - return __riscv_vsse8_v_u8m2(base, bstride, value, vl); +void test_vsse8_v_u8m2(uint8_t *rs1, ptrdiff_t rs2, vuint8m2_t vs3, size_t vl) { + return __riscv_vsse8_v_u8m2(rs1, rs2, vs3, vl); } -void test_vsse8_v_u8m4(uint8_t *base, ptrdiff_t bstride, vuint8m4_t value, size_t vl) { - return __riscv_vsse8_v_u8m4(base, bstride, value, vl); +void test_vsse8_v_u8m4(uint8_t *rs1, ptrdiff_t rs2, vuint8m4_t vs3, size_t vl) { + return __riscv_vsse8_v_u8m4(rs1, rs2, vs3, vl); } -void test_vsse8_v_u8m8(uint8_t *base, ptrdiff_t bstride, vuint8m8_t value, size_t vl) { - return __riscv_vsse8_v_u8m8(base, bstride, value, vl); +void test_vsse8_v_u8m8(uint8_t *rs1, ptrdiff_t rs2, vuint8m8_t vs3, size_t vl) { + return __riscv_vsse8_v_u8m8(rs1, rs2, vs3, vl); } -void test_vsse8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t value, size_t vl) { - return __riscv_vsse8_v_i8mf8_m(mask, base, bstride, value, vl); +void test_vsse8_v_i8mf8_m(vbool64_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf8_t vs3, size_t vl) { + return __riscv_vsse8_v_i8mf8_m(vm, rs1, rs2, vs3, vl); } -void test_vsse8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t value, size_t vl) { - return __riscv_vsse8_v_i8mf4_m(mask, base, bstride, value, vl); +void test_vsse8_v_i8mf4_m(vbool32_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf4_t vs3, size_t vl) { + return __riscv_vsse8_v_i8mf4_m(vm, rs1, rs2, vs3, vl); } -void test_vsse8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t value, size_t vl) { - return __riscv_vsse8_v_i8mf2_m(mask, base, bstride, value, vl); +void test_vsse8_v_i8mf2_m(vbool16_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf2_t vs3, size_t vl) { + return __riscv_vsse8_v_i8mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsse8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t value, size_t vl) { - return __riscv_vsse8_v_i8m1_m(mask, base, bstride, value, vl); +void test_vsse8_v_i8m1_m(vbool8_t vm, int8_t *rs1, ptrdiff_t rs2, vint8m1_t vs3, size_t vl) { + return __riscv_vsse8_v_i8m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsse8_v_i8m2_m(vbool4_t mask, int8_t *base, ptrdiff_t bstride, vint8m2_t value, size_t vl) { - return __riscv_vsse8_v_i8m2_m(mask, base, bstride, value, vl); +void test_vsse8_v_i8m2_m(vbool4_t vm, int8_t *rs1, ptrdiff_t rs2, vint8m2_t vs3, size_t vl) { + return __riscv_vsse8_v_i8m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsse8_v_i8m4_m(vbool2_t mask, int8_t *base, ptrdiff_t bstride, vint8m4_t value, size_t vl) { - return __riscv_vsse8_v_i8m4_m(mask, base, bstride, value, vl); +void test_vsse8_v_i8m4_m(vbool2_t vm, int8_t *rs1, ptrdiff_t rs2, vint8m4_t vs3, size_t vl) { + return __riscv_vsse8_v_i8m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsse8_v_i8m8_m(vbool1_t mask, int8_t *base, ptrdiff_t bstride, vint8m8_t value, size_t vl) { - return __riscv_vsse8_v_i8m8_m(mask, base, bstride, value, vl); +void test_vsse8_v_i8m8_m(vbool1_t vm, int8_t *rs1, ptrdiff_t rs2, vint8m8_t vs3, size_t vl) { + return __riscv_vsse8_v_i8m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsse8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t value, size_t vl) { - return __riscv_vsse8_v_u8mf8_m(mask, base, bstride, value, vl); +void test_vsse8_v_u8mf8_m(vbool64_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf8_t vs3, size_t vl) { + return __riscv_vsse8_v_u8mf8_m(vm, rs1, rs2, vs3, vl); } -void test_vsse8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t value, size_t vl) { - return __riscv_vsse8_v_u8mf4_m(mask, base, bstride, value, vl); +void test_vsse8_v_u8mf4_m(vbool32_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf4_t vs3, size_t vl) { + return __riscv_vsse8_v_u8mf4_m(vm, rs1, rs2, vs3, vl); } -void test_vsse8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t value, size_t vl) { - return __riscv_vsse8_v_u8mf2_m(mask, base, bstride, value, vl); +void test_vsse8_v_u8mf2_m(vbool16_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf2_t vs3, size_t vl) { + return __riscv_vsse8_v_u8mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsse8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t value, size_t vl) { - return __riscv_vsse8_v_u8m1_m(mask, base, bstride, value, vl); +void test_vsse8_v_u8m1_m(vbool8_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8m1_t vs3, size_t vl) { + return __riscv_vsse8_v_u8m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsse8_v_u8m2_m(vbool4_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m2_t value, size_t vl) { - return __riscv_vsse8_v_u8m2_m(mask, base, bstride, value, vl); +void test_vsse8_v_u8m2_m(vbool4_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8m2_t vs3, size_t vl) { + return __riscv_vsse8_v_u8m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsse8_v_u8m4_m(vbool2_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m4_t value, size_t vl) { - return __riscv_vsse8_v_u8m4_m(mask, base, bstride, value, vl); +void test_vsse8_v_u8m4_m(vbool2_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8m4_t vs3, size_t vl) { + return __riscv_vsse8_v_u8m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsse8_v_u8m8_m(vbool1_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m8_t value, size_t vl) { - return __riscv_vsse8_v_u8m8_m(mask, base, bstride, value, vl); +void test_vsse8_v_u8m8_m(vbool1_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8m8_t vs3, size_t vl) { + return __riscv_vsse8_v_u8m8_m(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsse8\.[ivxfswum.]+\s+} 28 } } */ diff --git a/auto-generated/gnu-api-tests/vsseg2e16.c b/auto-generated/gnu-api-tests/vsseg2e16.c index dd8435858..f069eec1f 100644 --- a/auto-generated/gnu-api-tests/vsseg2e16.c +++ b/auto-generated/gnu-api-tests/vsseg2e16.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg2e16_v_f16mf4x2(float16_t *base, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16_v_f16mf4x2(base, v_tuple, vl); +void test_vsseg2e16_v_f16mf4x2(float16_t *rs1, vfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsseg2e16_v_f16mf4x2(rs1, vs3, vl); } -void test_vsseg2e16_v_f16mf2x2(float16_t *base, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16_v_f16mf2x2(base, v_tuple, vl); +void test_vsseg2e16_v_f16mf2x2(float16_t *rs1, vfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsseg2e16_v_f16mf2x2(rs1, vs3, vl); } -void test_vsseg2e16_v_f16m1x2(float16_t *base, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16_v_f16m1x2(base, v_tuple, vl); +void test_vsseg2e16_v_f16m1x2(float16_t *rs1, vfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e16_v_f16m1x2(rs1, vs3, vl); } -void test_vsseg2e16_v_f16m2x2(float16_t *base, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16_v_f16m2x2(base, v_tuple, vl); +void test_vsseg2e16_v_f16m2x2(float16_t *rs1, vfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e16_v_f16m2x2(rs1, vs3, vl); } -void test_vsseg2e16_v_f16m4x2(float16_t *base, vfloat16m4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16_v_f16m4x2(base, v_tuple, vl); +void test_vsseg2e16_v_f16m4x2(float16_t *rs1, vfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e16_v_f16m4x2(rs1, vs3, vl); } -void test_vsseg2e16_v_i16mf4x2(int16_t *base, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16_v_i16mf4x2(base, v_tuple, vl); +void test_vsseg2e16_v_i16mf4x2(int16_t *rs1, vint16mf4x2_t vs3, size_t vl) { + return __riscv_vsseg2e16_v_i16mf4x2(rs1, vs3, vl); } -void test_vsseg2e16_v_i16mf2x2(int16_t *base, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16_v_i16mf2x2(base, v_tuple, vl); +void test_vsseg2e16_v_i16mf2x2(int16_t *rs1, vint16mf2x2_t vs3, size_t vl) { + return __riscv_vsseg2e16_v_i16mf2x2(rs1, vs3, vl); } -void test_vsseg2e16_v_i16m1x2(int16_t *base, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16_v_i16m1x2(base, v_tuple, vl); +void test_vsseg2e16_v_i16m1x2(int16_t *rs1, vint16m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e16_v_i16m1x2(rs1, vs3, vl); } -void test_vsseg2e16_v_i16m2x2(int16_t *base, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16_v_i16m2x2(base, v_tuple, vl); +void test_vsseg2e16_v_i16m2x2(int16_t *rs1, vint16m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e16_v_i16m2x2(rs1, vs3, vl); } -void test_vsseg2e16_v_i16m4x2(int16_t *base, vint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16_v_i16m4x2(base, v_tuple, vl); +void test_vsseg2e16_v_i16m4x2(int16_t *rs1, vint16m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e16_v_i16m4x2(rs1, vs3, vl); } -void test_vsseg2e16_v_u16mf4x2(uint16_t *base, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16_v_u16mf4x2(base, v_tuple, vl); +void test_vsseg2e16_v_u16mf4x2(uint16_t *rs1, vuint16mf4x2_t vs3, size_t vl) { + return __riscv_vsseg2e16_v_u16mf4x2(rs1, vs3, vl); } -void test_vsseg2e16_v_u16mf2x2(uint16_t *base, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16_v_u16mf2x2(base, v_tuple, vl); +void test_vsseg2e16_v_u16mf2x2(uint16_t *rs1, vuint16mf2x2_t vs3, size_t vl) { + return __riscv_vsseg2e16_v_u16mf2x2(rs1, vs3, vl); } -void test_vsseg2e16_v_u16m1x2(uint16_t *base, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16_v_u16m1x2(base, v_tuple, vl); +void test_vsseg2e16_v_u16m1x2(uint16_t *rs1, vuint16m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e16_v_u16m1x2(rs1, vs3, vl); } -void test_vsseg2e16_v_u16m2x2(uint16_t *base, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16_v_u16m2x2(base, v_tuple, vl); +void test_vsseg2e16_v_u16m2x2(uint16_t *rs1, vuint16m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e16_v_u16m2x2(rs1, vs3, vl); } -void test_vsseg2e16_v_u16m4x2(uint16_t *base, vuint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16_v_u16m4x2(base, v_tuple, vl); +void test_vsseg2e16_v_u16m4x2(uint16_t *rs1, vuint16m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e16_v_u16m4x2(rs1, vs3, vl); } -void test_vsseg2e16_v_f16mf4x2_m(vbool64_t mask, float16_t *base, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16_v_f16mf4x2_m(mask, base, v_tuple, vl); +void test_vsseg2e16_v_f16mf4x2_m(vbool64_t vm, float16_t *rs1, vfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsseg2e16_v_f16mf4x2_m(vm, rs1, vs3, vl); } -void test_vsseg2e16_v_f16mf2x2_m(vbool32_t mask, float16_t *base, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16_v_f16mf2x2_m(mask, base, v_tuple, vl); +void test_vsseg2e16_v_f16mf2x2_m(vbool32_t vm, float16_t *rs1, vfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsseg2e16_v_f16mf2x2_m(vm, rs1, vs3, vl); } -void test_vsseg2e16_v_f16m1x2_m(vbool16_t mask, float16_t *base, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16_v_f16m1x2_m(mask, base, v_tuple, vl); +void test_vsseg2e16_v_f16m1x2_m(vbool16_t vm, float16_t *rs1, vfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e16_v_f16m1x2_m(vm, rs1, vs3, vl); } -void test_vsseg2e16_v_f16m2x2_m(vbool8_t mask, float16_t *base, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16_v_f16m2x2_m(mask, base, v_tuple, vl); +void test_vsseg2e16_v_f16m2x2_m(vbool8_t vm, float16_t *rs1, vfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e16_v_f16m2x2_m(vm, rs1, vs3, vl); } -void test_vsseg2e16_v_f16m4x2_m(vbool4_t mask, float16_t *base, vfloat16m4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16_v_f16m4x2_m(mask, base, v_tuple, vl); +void test_vsseg2e16_v_f16m4x2_m(vbool4_t vm, float16_t *rs1, vfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e16_v_f16m4x2_m(vm, rs1, vs3, vl); } -void test_vsseg2e16_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16_v_i16mf4x2_m(mask, base, v_tuple, vl); +void test_vsseg2e16_v_i16mf4x2_m(vbool64_t vm, int16_t *rs1, vint16mf4x2_t vs3, size_t vl) { + return __riscv_vsseg2e16_v_i16mf4x2_m(vm, rs1, vs3, vl); } -void test_vsseg2e16_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16_v_i16mf2x2_m(mask, base, v_tuple, vl); +void test_vsseg2e16_v_i16mf2x2_m(vbool32_t vm, int16_t *rs1, vint16mf2x2_t vs3, size_t vl) { + return __riscv_vsseg2e16_v_i16mf2x2_m(vm, rs1, vs3, vl); } -void test_vsseg2e16_v_i16m1x2_m(vbool16_t mask, int16_t *base, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16_v_i16m1x2_m(mask, base, v_tuple, vl); +void test_vsseg2e16_v_i16m1x2_m(vbool16_t vm, int16_t *rs1, vint16m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e16_v_i16m1x2_m(vm, rs1, vs3, vl); } -void test_vsseg2e16_v_i16m2x2_m(vbool8_t mask, int16_t *base, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16_v_i16m2x2_m(mask, base, v_tuple, vl); +void test_vsseg2e16_v_i16m2x2_m(vbool8_t vm, int16_t *rs1, vint16m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e16_v_i16m2x2_m(vm, rs1, vs3, vl); } -void test_vsseg2e16_v_i16m4x2_m(vbool4_t mask, int16_t *base, vint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16_v_i16m4x2_m(mask, base, v_tuple, vl); +void test_vsseg2e16_v_i16m4x2_m(vbool4_t vm, int16_t *rs1, vint16m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e16_v_i16m4x2_m(vm, rs1, vs3, vl); } -void test_vsseg2e16_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16_v_u16mf4x2_m(mask, base, v_tuple, vl); +void test_vsseg2e16_v_u16mf4x2_m(vbool64_t vm, uint16_t *rs1, vuint16mf4x2_t vs3, size_t vl) { + return __riscv_vsseg2e16_v_u16mf4x2_m(vm, rs1, vs3, vl); } -void test_vsseg2e16_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16_v_u16mf2x2_m(mask, base, v_tuple, vl); +void test_vsseg2e16_v_u16mf2x2_m(vbool32_t vm, uint16_t *rs1, vuint16mf2x2_t vs3, size_t vl) { + return __riscv_vsseg2e16_v_u16mf2x2_m(vm, rs1, vs3, vl); } -void test_vsseg2e16_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16_v_u16m1x2_m(mask, base, v_tuple, vl); +void test_vsseg2e16_v_u16m1x2_m(vbool16_t vm, uint16_t *rs1, vuint16m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e16_v_u16m1x2_m(vm, rs1, vs3, vl); } -void test_vsseg2e16_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16_v_u16m2x2_m(mask, base, v_tuple, vl); +void test_vsseg2e16_v_u16m2x2_m(vbool8_t vm, uint16_t *rs1, vuint16m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e16_v_u16m2x2_m(vm, rs1, vs3, vl); } -void test_vsseg2e16_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16_v_u16m4x2_m(mask, base, v_tuple, vl); +void test_vsseg2e16_v_u16m4x2_m(vbool4_t vm, uint16_t *rs1, vuint16m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e16_v_u16m4x2_m(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg2e16\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/gnu-api-tests/vsseg2e32.c b/auto-generated/gnu-api-tests/vsseg2e32.c index 70c7f366b..a33b3f16e 100644 --- a/auto-generated/gnu-api-tests/vsseg2e32.c +++ b/auto-generated/gnu-api-tests/vsseg2e32.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg2e32_v_f32mf2x2(float32_t *base, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32_v_f32mf2x2(base, v_tuple, vl); +void test_vsseg2e32_v_f32mf2x2(float32_t *rs1, vfloat32mf2x2_t vs3, size_t vl) { + return __riscv_vsseg2e32_v_f32mf2x2(rs1, vs3, vl); } -void test_vsseg2e32_v_f32m1x2(float32_t *base, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32_v_f32m1x2(base, v_tuple, vl); +void test_vsseg2e32_v_f32m1x2(float32_t *rs1, vfloat32m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e32_v_f32m1x2(rs1, vs3, vl); } -void test_vsseg2e32_v_f32m2x2(float32_t *base, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32_v_f32m2x2(base, v_tuple, vl); +void test_vsseg2e32_v_f32m2x2(float32_t *rs1, vfloat32m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e32_v_f32m2x2(rs1, vs3, vl); } -void test_vsseg2e32_v_f32m4x2(float32_t *base, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32_v_f32m4x2(base, v_tuple, vl); +void test_vsseg2e32_v_f32m4x2(float32_t *rs1, vfloat32m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e32_v_f32m4x2(rs1, vs3, vl); } -void test_vsseg2e32_v_i32mf2x2(int32_t *base, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32_v_i32mf2x2(base, v_tuple, vl); +void test_vsseg2e32_v_i32mf2x2(int32_t *rs1, vint32mf2x2_t vs3, size_t vl) { + return __riscv_vsseg2e32_v_i32mf2x2(rs1, vs3, vl); } -void test_vsseg2e32_v_i32m1x2(int32_t *base, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32_v_i32m1x2(base, v_tuple, vl); +void test_vsseg2e32_v_i32m1x2(int32_t *rs1, vint32m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e32_v_i32m1x2(rs1, vs3, vl); } -void test_vsseg2e32_v_i32m2x2(int32_t *base, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32_v_i32m2x2(base, v_tuple, vl); +void test_vsseg2e32_v_i32m2x2(int32_t *rs1, vint32m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e32_v_i32m2x2(rs1, vs3, vl); } -void test_vsseg2e32_v_i32m4x2(int32_t *base, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32_v_i32m4x2(base, v_tuple, vl); +void test_vsseg2e32_v_i32m4x2(int32_t *rs1, vint32m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e32_v_i32m4x2(rs1, vs3, vl); } -void test_vsseg2e32_v_u32mf2x2(uint32_t *base, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32_v_u32mf2x2(base, v_tuple, vl); +void test_vsseg2e32_v_u32mf2x2(uint32_t *rs1, vuint32mf2x2_t vs3, size_t vl) { + return __riscv_vsseg2e32_v_u32mf2x2(rs1, vs3, vl); } -void test_vsseg2e32_v_u32m1x2(uint32_t *base, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32_v_u32m1x2(base, v_tuple, vl); +void test_vsseg2e32_v_u32m1x2(uint32_t *rs1, vuint32m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e32_v_u32m1x2(rs1, vs3, vl); } -void test_vsseg2e32_v_u32m2x2(uint32_t *base, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32_v_u32m2x2(base, v_tuple, vl); +void test_vsseg2e32_v_u32m2x2(uint32_t *rs1, vuint32m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e32_v_u32m2x2(rs1, vs3, vl); } -void test_vsseg2e32_v_u32m4x2(uint32_t *base, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32_v_u32m4x2(base, v_tuple, vl); +void test_vsseg2e32_v_u32m4x2(uint32_t *rs1, vuint32m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e32_v_u32m4x2(rs1, vs3, vl); } -void test_vsseg2e32_v_f32mf2x2_m(vbool64_t mask, float32_t *base, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32_v_f32mf2x2_m(mask, base, v_tuple, vl); +void test_vsseg2e32_v_f32mf2x2_m(vbool64_t vm, float32_t *rs1, vfloat32mf2x2_t vs3, size_t vl) { + return __riscv_vsseg2e32_v_f32mf2x2_m(vm, rs1, vs3, vl); } -void test_vsseg2e32_v_f32m1x2_m(vbool32_t mask, float32_t *base, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32_v_f32m1x2_m(mask, base, v_tuple, vl); +void test_vsseg2e32_v_f32m1x2_m(vbool32_t vm, float32_t *rs1, vfloat32m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e32_v_f32m1x2_m(vm, rs1, vs3, vl); } -void test_vsseg2e32_v_f32m2x2_m(vbool16_t mask, float32_t *base, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32_v_f32m2x2_m(mask, base, v_tuple, vl); +void test_vsseg2e32_v_f32m2x2_m(vbool16_t vm, float32_t *rs1, vfloat32m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e32_v_f32m2x2_m(vm, rs1, vs3, vl); } -void test_vsseg2e32_v_f32m4x2_m(vbool8_t mask, float32_t *base, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32_v_f32m4x2_m(mask, base, v_tuple, vl); +void test_vsseg2e32_v_f32m4x2_m(vbool8_t vm, float32_t *rs1, vfloat32m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e32_v_f32m4x2_m(vm, rs1, vs3, vl); } -void test_vsseg2e32_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32_v_i32mf2x2_m(mask, base, v_tuple, vl); +void test_vsseg2e32_v_i32mf2x2_m(vbool64_t vm, int32_t *rs1, vint32mf2x2_t vs3, size_t vl) { + return __riscv_vsseg2e32_v_i32mf2x2_m(vm, rs1, vs3, vl); } -void test_vsseg2e32_v_i32m1x2_m(vbool32_t mask, int32_t *base, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32_v_i32m1x2_m(mask, base, v_tuple, vl); +void test_vsseg2e32_v_i32m1x2_m(vbool32_t vm, int32_t *rs1, vint32m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e32_v_i32m1x2_m(vm, rs1, vs3, vl); } -void test_vsseg2e32_v_i32m2x2_m(vbool16_t mask, int32_t *base, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32_v_i32m2x2_m(mask, base, v_tuple, vl); +void test_vsseg2e32_v_i32m2x2_m(vbool16_t vm, int32_t *rs1, vint32m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e32_v_i32m2x2_m(vm, rs1, vs3, vl); } -void test_vsseg2e32_v_i32m4x2_m(vbool8_t mask, int32_t *base, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32_v_i32m4x2_m(mask, base, v_tuple, vl); +void test_vsseg2e32_v_i32m4x2_m(vbool8_t vm, int32_t *rs1, vint32m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e32_v_i32m4x2_m(vm, rs1, vs3, vl); } -void test_vsseg2e32_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32_v_u32mf2x2_m(mask, base, v_tuple, vl); +void test_vsseg2e32_v_u32mf2x2_m(vbool64_t vm, uint32_t *rs1, vuint32mf2x2_t vs3, size_t vl) { + return __riscv_vsseg2e32_v_u32mf2x2_m(vm, rs1, vs3, vl); } -void test_vsseg2e32_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32_v_u32m1x2_m(mask, base, v_tuple, vl); +void test_vsseg2e32_v_u32m1x2_m(vbool32_t vm, uint32_t *rs1, vuint32m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e32_v_u32m1x2_m(vm, rs1, vs3, vl); } -void test_vsseg2e32_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32_v_u32m2x2_m(mask, base, v_tuple, vl); +void test_vsseg2e32_v_u32m2x2_m(vbool16_t vm, uint32_t *rs1, vuint32m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e32_v_u32m2x2_m(vm, rs1, vs3, vl); } -void test_vsseg2e32_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32_v_u32m4x2_m(mask, base, v_tuple, vl); +void test_vsseg2e32_v_u32m4x2_m(vbool8_t vm, uint32_t *rs1, vuint32m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e32_v_u32m4x2_m(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg2e32\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/gnu-api-tests/vsseg2e64.c b/auto-generated/gnu-api-tests/vsseg2e64.c index fe60f47e7..942fec118 100644 --- a/auto-generated/gnu-api-tests/vsseg2e64.c +++ b/auto-generated/gnu-api-tests/vsseg2e64.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg2e64_v_f64m1x2(float64_t *base, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e64_v_f64m1x2(base, v_tuple, vl); +void test_vsseg2e64_v_f64m1x2(float64_t *rs1, vfloat64m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e64_v_f64m1x2(rs1, vs3, vl); } -void test_vsseg2e64_v_f64m2x2(float64_t *base, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e64_v_f64m2x2(base, v_tuple, vl); +void test_vsseg2e64_v_f64m2x2(float64_t *rs1, vfloat64m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e64_v_f64m2x2(rs1, vs3, vl); } -void test_vsseg2e64_v_f64m4x2(float64_t *base, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e64_v_f64m4x2(base, v_tuple, vl); +void test_vsseg2e64_v_f64m4x2(float64_t *rs1, vfloat64m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e64_v_f64m4x2(rs1, vs3, vl); } -void test_vsseg2e64_v_i64m1x2(int64_t *base, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e64_v_i64m1x2(base, v_tuple, vl); +void test_vsseg2e64_v_i64m1x2(int64_t *rs1, vint64m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e64_v_i64m1x2(rs1, vs3, vl); } -void test_vsseg2e64_v_i64m2x2(int64_t *base, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e64_v_i64m2x2(base, v_tuple, vl); +void test_vsseg2e64_v_i64m2x2(int64_t *rs1, vint64m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e64_v_i64m2x2(rs1, vs3, vl); } -void test_vsseg2e64_v_i64m4x2(int64_t *base, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e64_v_i64m4x2(base, v_tuple, vl); +void test_vsseg2e64_v_i64m4x2(int64_t *rs1, vint64m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e64_v_i64m4x2(rs1, vs3, vl); } -void test_vsseg2e64_v_u64m1x2(uint64_t *base, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e64_v_u64m1x2(base, v_tuple, vl); +void test_vsseg2e64_v_u64m1x2(uint64_t *rs1, vuint64m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e64_v_u64m1x2(rs1, vs3, vl); } -void test_vsseg2e64_v_u64m2x2(uint64_t *base, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e64_v_u64m2x2(base, v_tuple, vl); +void test_vsseg2e64_v_u64m2x2(uint64_t *rs1, vuint64m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e64_v_u64m2x2(rs1, vs3, vl); } -void test_vsseg2e64_v_u64m4x2(uint64_t *base, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e64_v_u64m4x2(base, v_tuple, vl); +void test_vsseg2e64_v_u64m4x2(uint64_t *rs1, vuint64m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e64_v_u64m4x2(rs1, vs3, vl); } -void test_vsseg2e64_v_f64m1x2_m(vbool64_t mask, float64_t *base, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e64_v_f64m1x2_m(mask, base, v_tuple, vl); +void test_vsseg2e64_v_f64m1x2_m(vbool64_t vm, float64_t *rs1, vfloat64m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e64_v_f64m1x2_m(vm, rs1, vs3, vl); } -void test_vsseg2e64_v_f64m2x2_m(vbool32_t mask, float64_t *base, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e64_v_f64m2x2_m(mask, base, v_tuple, vl); +void test_vsseg2e64_v_f64m2x2_m(vbool32_t vm, float64_t *rs1, vfloat64m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e64_v_f64m2x2_m(vm, rs1, vs3, vl); } -void test_vsseg2e64_v_f64m4x2_m(vbool16_t mask, float64_t *base, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e64_v_f64m4x2_m(mask, base, v_tuple, vl); +void test_vsseg2e64_v_f64m4x2_m(vbool16_t vm, float64_t *rs1, vfloat64m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e64_v_f64m4x2_m(vm, rs1, vs3, vl); } -void test_vsseg2e64_v_i64m1x2_m(vbool64_t mask, int64_t *base, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e64_v_i64m1x2_m(mask, base, v_tuple, vl); +void test_vsseg2e64_v_i64m1x2_m(vbool64_t vm, int64_t *rs1, vint64m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e64_v_i64m1x2_m(vm, rs1, vs3, vl); } -void test_vsseg2e64_v_i64m2x2_m(vbool32_t mask, int64_t *base, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e64_v_i64m2x2_m(mask, base, v_tuple, vl); +void test_vsseg2e64_v_i64m2x2_m(vbool32_t vm, int64_t *rs1, vint64m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e64_v_i64m2x2_m(vm, rs1, vs3, vl); } -void test_vsseg2e64_v_i64m4x2_m(vbool16_t mask, int64_t *base, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e64_v_i64m4x2_m(mask, base, v_tuple, vl); +void test_vsseg2e64_v_i64m4x2_m(vbool16_t vm, int64_t *rs1, vint64m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e64_v_i64m4x2_m(vm, rs1, vs3, vl); } -void test_vsseg2e64_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e64_v_u64m1x2_m(mask, base, v_tuple, vl); +void test_vsseg2e64_v_u64m1x2_m(vbool64_t vm, uint64_t *rs1, vuint64m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e64_v_u64m1x2_m(vm, rs1, vs3, vl); } -void test_vsseg2e64_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e64_v_u64m2x2_m(mask, base, v_tuple, vl); +void test_vsseg2e64_v_u64m2x2_m(vbool32_t vm, uint64_t *rs1, vuint64m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e64_v_u64m2x2_m(vm, rs1, vs3, vl); } -void test_vsseg2e64_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e64_v_u64m4x2_m(mask, base, v_tuple, vl); +void test_vsseg2e64_v_u64m4x2_m(vbool16_t vm, uint64_t *rs1, vuint64m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e64_v_u64m4x2_m(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg2e64\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-api-tests/vsseg2e8.c b/auto-generated/gnu-api-tests/vsseg2e8.c index f5e15634c..aec1137a5 100644 --- a/auto-generated/gnu-api-tests/vsseg2e8.c +++ b/auto-generated/gnu-api-tests/vsseg2e8.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg2e8_v_i8mf8x2(int8_t *base, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8_v_i8mf8x2(base, v_tuple, vl); +void test_vsseg2e8_v_i8mf8x2(int8_t *rs1, vint8mf8x2_t vs3, size_t vl) { + return __riscv_vsseg2e8_v_i8mf8x2(rs1, vs3, vl); } -void test_vsseg2e8_v_i8mf4x2(int8_t *base, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8_v_i8mf4x2(base, v_tuple, vl); +void test_vsseg2e8_v_i8mf4x2(int8_t *rs1, vint8mf4x2_t vs3, size_t vl) { + return __riscv_vsseg2e8_v_i8mf4x2(rs1, vs3, vl); } -void test_vsseg2e8_v_i8mf2x2(int8_t *base, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8_v_i8mf2x2(base, v_tuple, vl); +void test_vsseg2e8_v_i8mf2x2(int8_t *rs1, vint8mf2x2_t vs3, size_t vl) { + return __riscv_vsseg2e8_v_i8mf2x2(rs1, vs3, vl); } -void test_vsseg2e8_v_i8m1x2(int8_t *base, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8_v_i8m1x2(base, v_tuple, vl); +void test_vsseg2e8_v_i8m1x2(int8_t *rs1, vint8m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e8_v_i8m1x2(rs1, vs3, vl); } -void test_vsseg2e8_v_i8m2x2(int8_t *base, vint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8_v_i8m2x2(base, v_tuple, vl); +void test_vsseg2e8_v_i8m2x2(int8_t *rs1, vint8m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e8_v_i8m2x2(rs1, vs3, vl); } -void test_vsseg2e8_v_i8m4x2(int8_t *base, vint8m4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8_v_i8m4x2(base, v_tuple, vl); +void test_vsseg2e8_v_i8m4x2(int8_t *rs1, vint8m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e8_v_i8m4x2(rs1, vs3, vl); } -void test_vsseg2e8_v_u8mf8x2(uint8_t *base, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8_v_u8mf8x2(base, v_tuple, vl); +void test_vsseg2e8_v_u8mf8x2(uint8_t *rs1, vuint8mf8x2_t vs3, size_t vl) { + return __riscv_vsseg2e8_v_u8mf8x2(rs1, vs3, vl); } -void test_vsseg2e8_v_u8mf4x2(uint8_t *base, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8_v_u8mf4x2(base, v_tuple, vl); +void test_vsseg2e8_v_u8mf4x2(uint8_t *rs1, vuint8mf4x2_t vs3, size_t vl) { + return __riscv_vsseg2e8_v_u8mf4x2(rs1, vs3, vl); } -void test_vsseg2e8_v_u8mf2x2(uint8_t *base, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8_v_u8mf2x2(base, v_tuple, vl); +void test_vsseg2e8_v_u8mf2x2(uint8_t *rs1, vuint8mf2x2_t vs3, size_t vl) { + return __riscv_vsseg2e8_v_u8mf2x2(rs1, vs3, vl); } -void test_vsseg2e8_v_u8m1x2(uint8_t *base, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8_v_u8m1x2(base, v_tuple, vl); +void test_vsseg2e8_v_u8m1x2(uint8_t *rs1, vuint8m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e8_v_u8m1x2(rs1, vs3, vl); } -void test_vsseg2e8_v_u8m2x2(uint8_t *base, vuint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8_v_u8m2x2(base, v_tuple, vl); +void test_vsseg2e8_v_u8m2x2(uint8_t *rs1, vuint8m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e8_v_u8m2x2(rs1, vs3, vl); } -void test_vsseg2e8_v_u8m4x2(uint8_t *base, vuint8m4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8_v_u8m4x2(base, v_tuple, vl); +void test_vsseg2e8_v_u8m4x2(uint8_t *rs1, vuint8m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e8_v_u8m4x2(rs1, vs3, vl); } -void test_vsseg2e8_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8_v_i8mf8x2_m(mask, base, v_tuple, vl); +void test_vsseg2e8_v_i8mf8x2_m(vbool64_t vm, int8_t *rs1, vint8mf8x2_t vs3, size_t vl) { + return __riscv_vsseg2e8_v_i8mf8x2_m(vm, rs1, vs3, vl); } -void test_vsseg2e8_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8_v_i8mf4x2_m(mask, base, v_tuple, vl); +void test_vsseg2e8_v_i8mf4x2_m(vbool32_t vm, int8_t *rs1, vint8mf4x2_t vs3, size_t vl) { + return __riscv_vsseg2e8_v_i8mf4x2_m(vm, rs1, vs3, vl); } -void test_vsseg2e8_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8_v_i8mf2x2_m(mask, base, v_tuple, vl); +void test_vsseg2e8_v_i8mf2x2_m(vbool16_t vm, int8_t *rs1, vint8mf2x2_t vs3, size_t vl) { + return __riscv_vsseg2e8_v_i8mf2x2_m(vm, rs1, vs3, vl); } -void test_vsseg2e8_v_i8m1x2_m(vbool8_t mask, int8_t *base, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8_v_i8m1x2_m(mask, base, v_tuple, vl); +void test_vsseg2e8_v_i8m1x2_m(vbool8_t vm, int8_t *rs1, vint8m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e8_v_i8m1x2_m(vm, rs1, vs3, vl); } -void test_vsseg2e8_v_i8m2x2_m(vbool4_t mask, int8_t *base, vint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8_v_i8m2x2_m(mask, base, v_tuple, vl); +void test_vsseg2e8_v_i8m2x2_m(vbool4_t vm, int8_t *rs1, vint8m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e8_v_i8m2x2_m(vm, rs1, vs3, vl); } -void test_vsseg2e8_v_i8m4x2_m(vbool2_t mask, int8_t *base, vint8m4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8_v_i8m4x2_m(mask, base, v_tuple, vl); +void test_vsseg2e8_v_i8m4x2_m(vbool2_t vm, int8_t *rs1, vint8m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e8_v_i8m4x2_m(vm, rs1, vs3, vl); } -void test_vsseg2e8_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8_v_u8mf8x2_m(mask, base, v_tuple, vl); +void test_vsseg2e8_v_u8mf8x2_m(vbool64_t vm, uint8_t *rs1, vuint8mf8x2_t vs3, size_t vl) { + return __riscv_vsseg2e8_v_u8mf8x2_m(vm, rs1, vs3, vl); } -void test_vsseg2e8_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8_v_u8mf4x2_m(mask, base, v_tuple, vl); +void test_vsseg2e8_v_u8mf4x2_m(vbool32_t vm, uint8_t *rs1, vuint8mf4x2_t vs3, size_t vl) { + return __riscv_vsseg2e8_v_u8mf4x2_m(vm, rs1, vs3, vl); } -void test_vsseg2e8_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8_v_u8mf2x2_m(mask, base, v_tuple, vl); +void test_vsseg2e8_v_u8mf2x2_m(vbool16_t vm, uint8_t *rs1, vuint8mf2x2_t vs3, size_t vl) { + return __riscv_vsseg2e8_v_u8mf2x2_m(vm, rs1, vs3, vl); } -void test_vsseg2e8_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8_v_u8m1x2_m(mask, base, v_tuple, vl); +void test_vsseg2e8_v_u8m1x2_m(vbool8_t vm, uint8_t *rs1, vuint8m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e8_v_u8m1x2_m(vm, rs1, vs3, vl); } -void test_vsseg2e8_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8_v_u8m2x2_m(mask, base, v_tuple, vl); +void test_vsseg2e8_v_u8m2x2_m(vbool4_t vm, uint8_t *rs1, vuint8m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e8_v_u8m2x2_m(vm, rs1, vs3, vl); } -void test_vsseg2e8_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint8m4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8_v_u8m4x2_m(mask, base, v_tuple, vl); +void test_vsseg2e8_v_u8m4x2_m(vbool2_t vm, uint8_t *rs1, vuint8m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e8_v_u8m4x2_m(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg2e8\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/gnu-api-tests/vsseg3e16.c b/auto-generated/gnu-api-tests/vsseg3e16.c index aa31a501c..216858473 100644 --- a/auto-generated/gnu-api-tests/vsseg3e16.c +++ b/auto-generated/gnu-api-tests/vsseg3e16.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg3e16_v_f16mf4x3(float16_t *base, vfloat16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16_v_f16mf4x3(base, v_tuple, vl); +void test_vsseg3e16_v_f16mf4x3(float16_t *rs1, vfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsseg3e16_v_f16mf4x3(rs1, vs3, vl); } -void test_vsseg3e16_v_f16mf2x3(float16_t *base, vfloat16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16_v_f16mf2x3(base, v_tuple, vl); +void test_vsseg3e16_v_f16mf2x3(float16_t *rs1, vfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsseg3e16_v_f16mf2x3(rs1, vs3, vl); } -void test_vsseg3e16_v_f16m1x3(float16_t *base, vfloat16m1x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16_v_f16m1x3(base, v_tuple, vl); +void test_vsseg3e16_v_f16m1x3(float16_t *rs1, vfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e16_v_f16m1x3(rs1, vs3, vl); } -void test_vsseg3e16_v_f16m2x3(float16_t *base, vfloat16m2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16_v_f16m2x3(base, v_tuple, vl); +void test_vsseg3e16_v_f16m2x3(float16_t *rs1, vfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e16_v_f16m2x3(rs1, vs3, vl); } -void test_vsseg3e16_v_i16mf4x3(int16_t *base, vint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16_v_i16mf4x3(base, v_tuple, vl); +void test_vsseg3e16_v_i16mf4x3(int16_t *rs1, vint16mf4x3_t vs3, size_t vl) { + return __riscv_vsseg3e16_v_i16mf4x3(rs1, vs3, vl); } -void test_vsseg3e16_v_i16mf2x3(int16_t *base, vint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16_v_i16mf2x3(base, v_tuple, vl); +void test_vsseg3e16_v_i16mf2x3(int16_t *rs1, vint16mf2x3_t vs3, size_t vl) { + return __riscv_vsseg3e16_v_i16mf2x3(rs1, vs3, vl); } -void test_vsseg3e16_v_i16m1x3(int16_t *base, vint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16_v_i16m1x3(base, v_tuple, vl); +void test_vsseg3e16_v_i16m1x3(int16_t *rs1, vint16m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e16_v_i16m1x3(rs1, vs3, vl); } -void test_vsseg3e16_v_i16m2x3(int16_t *base, vint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16_v_i16m2x3(base, v_tuple, vl); +void test_vsseg3e16_v_i16m2x3(int16_t *rs1, vint16m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e16_v_i16m2x3(rs1, vs3, vl); } -void test_vsseg3e16_v_u16mf4x3(uint16_t *base, vuint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16_v_u16mf4x3(base, v_tuple, vl); +void test_vsseg3e16_v_u16mf4x3(uint16_t *rs1, vuint16mf4x3_t vs3, size_t vl) { + return __riscv_vsseg3e16_v_u16mf4x3(rs1, vs3, vl); } -void test_vsseg3e16_v_u16mf2x3(uint16_t *base, vuint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16_v_u16mf2x3(base, v_tuple, vl); +void test_vsseg3e16_v_u16mf2x3(uint16_t *rs1, vuint16mf2x3_t vs3, size_t vl) { + return __riscv_vsseg3e16_v_u16mf2x3(rs1, vs3, vl); } -void test_vsseg3e16_v_u16m1x3(uint16_t *base, vuint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16_v_u16m1x3(base, v_tuple, vl); +void test_vsseg3e16_v_u16m1x3(uint16_t *rs1, vuint16m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e16_v_u16m1x3(rs1, vs3, vl); } -void test_vsseg3e16_v_u16m2x3(uint16_t *base, vuint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16_v_u16m2x3(base, v_tuple, vl); +void test_vsseg3e16_v_u16m2x3(uint16_t *rs1, vuint16m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e16_v_u16m2x3(rs1, vs3, vl); } -void test_vsseg3e16_v_f16mf4x3_m(vbool64_t mask, float16_t *base, vfloat16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16_v_f16mf4x3_m(mask, base, v_tuple, vl); +void test_vsseg3e16_v_f16mf4x3_m(vbool64_t vm, float16_t *rs1, vfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsseg3e16_v_f16mf4x3_m(vm, rs1, vs3, vl); } -void test_vsseg3e16_v_f16mf2x3_m(vbool32_t mask, float16_t *base, vfloat16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16_v_f16mf2x3_m(mask, base, v_tuple, vl); +void test_vsseg3e16_v_f16mf2x3_m(vbool32_t vm, float16_t *rs1, vfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsseg3e16_v_f16mf2x3_m(vm, rs1, vs3, vl); } -void test_vsseg3e16_v_f16m1x3_m(vbool16_t mask, float16_t *base, vfloat16m1x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16_v_f16m1x3_m(mask, base, v_tuple, vl); +void test_vsseg3e16_v_f16m1x3_m(vbool16_t vm, float16_t *rs1, vfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e16_v_f16m1x3_m(vm, rs1, vs3, vl); } -void test_vsseg3e16_v_f16m2x3_m(vbool8_t mask, float16_t *base, vfloat16m2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16_v_f16m2x3_m(mask, base, v_tuple, vl); +void test_vsseg3e16_v_f16m2x3_m(vbool8_t vm, float16_t *rs1, vfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e16_v_f16m2x3_m(vm, rs1, vs3, vl); } -void test_vsseg3e16_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16_v_i16mf4x3_m(mask, base, v_tuple, vl); +void test_vsseg3e16_v_i16mf4x3_m(vbool64_t vm, int16_t *rs1, vint16mf4x3_t vs3, size_t vl) { + return __riscv_vsseg3e16_v_i16mf4x3_m(vm, rs1, vs3, vl); } -void test_vsseg3e16_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16_v_i16mf2x3_m(mask, base, v_tuple, vl); +void test_vsseg3e16_v_i16mf2x3_m(vbool32_t vm, int16_t *rs1, vint16mf2x3_t vs3, size_t vl) { + return __riscv_vsseg3e16_v_i16mf2x3_m(vm, rs1, vs3, vl); } -void test_vsseg3e16_v_i16m1x3_m(vbool16_t mask, int16_t *base, vint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16_v_i16m1x3_m(mask, base, v_tuple, vl); +void test_vsseg3e16_v_i16m1x3_m(vbool16_t vm, int16_t *rs1, vint16m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e16_v_i16m1x3_m(vm, rs1, vs3, vl); } -void test_vsseg3e16_v_i16m2x3_m(vbool8_t mask, int16_t *base, vint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16_v_i16m2x3_m(mask, base, v_tuple, vl); +void test_vsseg3e16_v_i16m2x3_m(vbool8_t vm, int16_t *rs1, vint16m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e16_v_i16m2x3_m(vm, rs1, vs3, vl); } -void test_vsseg3e16_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16_v_u16mf4x3_m(mask, base, v_tuple, vl); +void test_vsseg3e16_v_u16mf4x3_m(vbool64_t vm, uint16_t *rs1, vuint16mf4x3_t vs3, size_t vl) { + return __riscv_vsseg3e16_v_u16mf4x3_m(vm, rs1, vs3, vl); } -void test_vsseg3e16_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16_v_u16mf2x3_m(mask, base, v_tuple, vl); +void test_vsseg3e16_v_u16mf2x3_m(vbool32_t vm, uint16_t *rs1, vuint16mf2x3_t vs3, size_t vl) { + return __riscv_vsseg3e16_v_u16mf2x3_m(vm, rs1, vs3, vl); } -void test_vsseg3e16_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16_v_u16m1x3_m(mask, base, v_tuple, vl); +void test_vsseg3e16_v_u16m1x3_m(vbool16_t vm, uint16_t *rs1, vuint16m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e16_v_u16m1x3_m(vm, rs1, vs3, vl); } -void test_vsseg3e16_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16_v_u16m2x3_m(mask, base, v_tuple, vl); +void test_vsseg3e16_v_u16m2x3_m(vbool8_t vm, uint16_t *rs1, vuint16m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e16_v_u16m2x3_m(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg3e16\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/gnu-api-tests/vsseg3e32.c b/auto-generated/gnu-api-tests/vsseg3e32.c index fc61977cd..5c3f19768 100644 --- a/auto-generated/gnu-api-tests/vsseg3e32.c +++ b/auto-generated/gnu-api-tests/vsseg3e32.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg3e32_v_f32mf2x3(float32_t *base, vfloat32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e32_v_f32mf2x3(base, v_tuple, vl); +void test_vsseg3e32_v_f32mf2x3(float32_t *rs1, vfloat32mf2x3_t vs3, size_t vl) { + return __riscv_vsseg3e32_v_f32mf2x3(rs1, vs3, vl); } -void test_vsseg3e32_v_f32m1x3(float32_t *base, vfloat32m1x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e32_v_f32m1x3(base, v_tuple, vl); +void test_vsseg3e32_v_f32m1x3(float32_t *rs1, vfloat32m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e32_v_f32m1x3(rs1, vs3, vl); } -void test_vsseg3e32_v_f32m2x3(float32_t *base, vfloat32m2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e32_v_f32m2x3(base, v_tuple, vl); +void test_vsseg3e32_v_f32m2x3(float32_t *rs1, vfloat32m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e32_v_f32m2x3(rs1, vs3, vl); } -void test_vsseg3e32_v_i32mf2x3(int32_t *base, vint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e32_v_i32mf2x3(base, v_tuple, vl); +void test_vsseg3e32_v_i32mf2x3(int32_t *rs1, vint32mf2x3_t vs3, size_t vl) { + return __riscv_vsseg3e32_v_i32mf2x3(rs1, vs3, vl); } -void test_vsseg3e32_v_i32m1x3(int32_t *base, vint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e32_v_i32m1x3(base, v_tuple, vl); +void test_vsseg3e32_v_i32m1x3(int32_t *rs1, vint32m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e32_v_i32m1x3(rs1, vs3, vl); } -void test_vsseg3e32_v_i32m2x3(int32_t *base, vint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e32_v_i32m2x3(base, v_tuple, vl); +void test_vsseg3e32_v_i32m2x3(int32_t *rs1, vint32m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e32_v_i32m2x3(rs1, vs3, vl); } -void test_vsseg3e32_v_u32mf2x3(uint32_t *base, vuint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e32_v_u32mf2x3(base, v_tuple, vl); +void test_vsseg3e32_v_u32mf2x3(uint32_t *rs1, vuint32mf2x3_t vs3, size_t vl) { + return __riscv_vsseg3e32_v_u32mf2x3(rs1, vs3, vl); } -void test_vsseg3e32_v_u32m1x3(uint32_t *base, vuint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e32_v_u32m1x3(base, v_tuple, vl); +void test_vsseg3e32_v_u32m1x3(uint32_t *rs1, vuint32m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e32_v_u32m1x3(rs1, vs3, vl); } -void test_vsseg3e32_v_u32m2x3(uint32_t *base, vuint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e32_v_u32m2x3(base, v_tuple, vl); +void test_vsseg3e32_v_u32m2x3(uint32_t *rs1, vuint32m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e32_v_u32m2x3(rs1, vs3, vl); } -void test_vsseg3e32_v_f32mf2x3_m(vbool64_t mask, float32_t *base, vfloat32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e32_v_f32mf2x3_m(mask, base, v_tuple, vl); +void test_vsseg3e32_v_f32mf2x3_m(vbool64_t vm, float32_t *rs1, vfloat32mf2x3_t vs3, size_t vl) { + return __riscv_vsseg3e32_v_f32mf2x3_m(vm, rs1, vs3, vl); } -void test_vsseg3e32_v_f32m1x3_m(vbool32_t mask, float32_t *base, vfloat32m1x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e32_v_f32m1x3_m(mask, base, v_tuple, vl); +void test_vsseg3e32_v_f32m1x3_m(vbool32_t vm, float32_t *rs1, vfloat32m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e32_v_f32m1x3_m(vm, rs1, vs3, vl); } -void test_vsseg3e32_v_f32m2x3_m(vbool16_t mask, float32_t *base, vfloat32m2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e32_v_f32m2x3_m(mask, base, v_tuple, vl); +void test_vsseg3e32_v_f32m2x3_m(vbool16_t vm, float32_t *rs1, vfloat32m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e32_v_f32m2x3_m(vm, rs1, vs3, vl); } -void test_vsseg3e32_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e32_v_i32mf2x3_m(mask, base, v_tuple, vl); +void test_vsseg3e32_v_i32mf2x3_m(vbool64_t vm, int32_t *rs1, vint32mf2x3_t vs3, size_t vl) { + return __riscv_vsseg3e32_v_i32mf2x3_m(vm, rs1, vs3, vl); } -void test_vsseg3e32_v_i32m1x3_m(vbool32_t mask, int32_t *base, vint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e32_v_i32m1x3_m(mask, base, v_tuple, vl); +void test_vsseg3e32_v_i32m1x3_m(vbool32_t vm, int32_t *rs1, vint32m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e32_v_i32m1x3_m(vm, rs1, vs3, vl); } -void test_vsseg3e32_v_i32m2x3_m(vbool16_t mask, int32_t *base, vint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e32_v_i32m2x3_m(mask, base, v_tuple, vl); +void test_vsseg3e32_v_i32m2x3_m(vbool16_t vm, int32_t *rs1, vint32m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e32_v_i32m2x3_m(vm, rs1, vs3, vl); } -void test_vsseg3e32_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e32_v_u32mf2x3_m(mask, base, v_tuple, vl); +void test_vsseg3e32_v_u32mf2x3_m(vbool64_t vm, uint32_t *rs1, vuint32mf2x3_t vs3, size_t vl) { + return __riscv_vsseg3e32_v_u32mf2x3_m(vm, rs1, vs3, vl); } -void test_vsseg3e32_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e32_v_u32m1x3_m(mask, base, v_tuple, vl); +void test_vsseg3e32_v_u32m1x3_m(vbool32_t vm, uint32_t *rs1, vuint32m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e32_v_u32m1x3_m(vm, rs1, vs3, vl); } -void test_vsseg3e32_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e32_v_u32m2x3_m(mask, base, v_tuple, vl); +void test_vsseg3e32_v_u32m2x3_m(vbool16_t vm, uint32_t *rs1, vuint32m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e32_v_u32m2x3_m(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg3e32\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-api-tests/vsseg3e64.c b/auto-generated/gnu-api-tests/vsseg3e64.c index 8d07821b6..7f82bec72 100644 --- a/auto-generated/gnu-api-tests/vsseg3e64.c +++ b/auto-generated/gnu-api-tests/vsseg3e64.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg3e64_v_f64m1x3(float64_t *base, vfloat64m1x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e64_v_f64m1x3(base, v_tuple, vl); +void test_vsseg3e64_v_f64m1x3(float64_t *rs1, vfloat64m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e64_v_f64m1x3(rs1, vs3, vl); } -void test_vsseg3e64_v_f64m2x3(float64_t *base, vfloat64m2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e64_v_f64m2x3(base, v_tuple, vl); +void test_vsseg3e64_v_f64m2x3(float64_t *rs1, vfloat64m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e64_v_f64m2x3(rs1, vs3, vl); } -void test_vsseg3e64_v_i64m1x3(int64_t *base, vint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e64_v_i64m1x3(base, v_tuple, vl); +void test_vsseg3e64_v_i64m1x3(int64_t *rs1, vint64m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e64_v_i64m1x3(rs1, vs3, vl); } -void test_vsseg3e64_v_i64m2x3(int64_t *base, vint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e64_v_i64m2x3(base, v_tuple, vl); +void test_vsseg3e64_v_i64m2x3(int64_t *rs1, vint64m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e64_v_i64m2x3(rs1, vs3, vl); } -void test_vsseg3e64_v_u64m1x3(uint64_t *base, vuint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e64_v_u64m1x3(base, v_tuple, vl); +void test_vsseg3e64_v_u64m1x3(uint64_t *rs1, vuint64m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e64_v_u64m1x3(rs1, vs3, vl); } -void test_vsseg3e64_v_u64m2x3(uint64_t *base, vuint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e64_v_u64m2x3(base, v_tuple, vl); +void test_vsseg3e64_v_u64m2x3(uint64_t *rs1, vuint64m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e64_v_u64m2x3(rs1, vs3, vl); } -void test_vsseg3e64_v_f64m1x3_m(vbool64_t mask, float64_t *base, vfloat64m1x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e64_v_f64m1x3_m(mask, base, v_tuple, vl); +void test_vsseg3e64_v_f64m1x3_m(vbool64_t vm, float64_t *rs1, vfloat64m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e64_v_f64m1x3_m(vm, rs1, vs3, vl); } -void test_vsseg3e64_v_f64m2x3_m(vbool32_t mask, float64_t *base, vfloat64m2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e64_v_f64m2x3_m(mask, base, v_tuple, vl); +void test_vsseg3e64_v_f64m2x3_m(vbool32_t vm, float64_t *rs1, vfloat64m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e64_v_f64m2x3_m(vm, rs1, vs3, vl); } -void test_vsseg3e64_v_i64m1x3_m(vbool64_t mask, int64_t *base, vint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e64_v_i64m1x3_m(mask, base, v_tuple, vl); +void test_vsseg3e64_v_i64m1x3_m(vbool64_t vm, int64_t *rs1, vint64m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e64_v_i64m1x3_m(vm, rs1, vs3, vl); } -void test_vsseg3e64_v_i64m2x3_m(vbool32_t mask, int64_t *base, vint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e64_v_i64m2x3_m(mask, base, v_tuple, vl); +void test_vsseg3e64_v_i64m2x3_m(vbool32_t vm, int64_t *rs1, vint64m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e64_v_i64m2x3_m(vm, rs1, vs3, vl); } -void test_vsseg3e64_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e64_v_u64m1x3_m(mask, base, v_tuple, vl); +void test_vsseg3e64_v_u64m1x3_m(vbool64_t vm, uint64_t *rs1, vuint64m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e64_v_u64m1x3_m(vm, rs1, vs3, vl); } -void test_vsseg3e64_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e64_v_u64m2x3_m(mask, base, v_tuple, vl); +void test_vsseg3e64_v_u64m2x3_m(vbool32_t vm, uint64_t *rs1, vuint64m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e64_v_u64m2x3_m(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg3e64\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-api-tests/vsseg3e8.c b/auto-generated/gnu-api-tests/vsseg3e8.c index f63ecab4e..629671650 100644 --- a/auto-generated/gnu-api-tests/vsseg3e8.c +++ b/auto-generated/gnu-api-tests/vsseg3e8.c @@ -6,84 +6,84 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg3e8_v_i8mf8x3(int8_t *base, vint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e8_v_i8mf8x3(base, v_tuple, vl); +void test_vsseg3e8_v_i8mf8x3(int8_t *rs1, vint8mf8x3_t vs3, size_t vl) { + return __riscv_vsseg3e8_v_i8mf8x3(rs1, vs3, vl); } -void test_vsseg3e8_v_i8mf4x3(int8_t *base, vint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e8_v_i8mf4x3(base, v_tuple, vl); +void test_vsseg3e8_v_i8mf4x3(int8_t *rs1, vint8mf4x3_t vs3, size_t vl) { + return __riscv_vsseg3e8_v_i8mf4x3(rs1, vs3, vl); } -void test_vsseg3e8_v_i8mf2x3(int8_t *base, vint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e8_v_i8mf2x3(base, v_tuple, vl); +void test_vsseg3e8_v_i8mf2x3(int8_t *rs1, vint8mf2x3_t vs3, size_t vl) { + return __riscv_vsseg3e8_v_i8mf2x3(rs1, vs3, vl); } -void test_vsseg3e8_v_i8m1x3(int8_t *base, vint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e8_v_i8m1x3(base, v_tuple, vl); +void test_vsseg3e8_v_i8m1x3(int8_t *rs1, vint8m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e8_v_i8m1x3(rs1, vs3, vl); } -void test_vsseg3e8_v_i8m2x3(int8_t *base, vint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e8_v_i8m2x3(base, v_tuple, vl); +void test_vsseg3e8_v_i8m2x3(int8_t *rs1, vint8m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e8_v_i8m2x3(rs1, vs3, vl); } -void test_vsseg3e8_v_u8mf8x3(uint8_t *base, vuint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e8_v_u8mf8x3(base, v_tuple, vl); +void test_vsseg3e8_v_u8mf8x3(uint8_t *rs1, vuint8mf8x3_t vs3, size_t vl) { + return __riscv_vsseg3e8_v_u8mf8x3(rs1, vs3, vl); } -void test_vsseg3e8_v_u8mf4x3(uint8_t *base, vuint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e8_v_u8mf4x3(base, v_tuple, vl); +void test_vsseg3e8_v_u8mf4x3(uint8_t *rs1, vuint8mf4x3_t vs3, size_t vl) { + return __riscv_vsseg3e8_v_u8mf4x3(rs1, vs3, vl); } -void test_vsseg3e8_v_u8mf2x3(uint8_t *base, vuint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e8_v_u8mf2x3(base, v_tuple, vl); +void test_vsseg3e8_v_u8mf2x3(uint8_t *rs1, vuint8mf2x3_t vs3, size_t vl) { + return __riscv_vsseg3e8_v_u8mf2x3(rs1, vs3, vl); } -void test_vsseg3e8_v_u8m1x3(uint8_t *base, vuint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e8_v_u8m1x3(base, v_tuple, vl); +void test_vsseg3e8_v_u8m1x3(uint8_t *rs1, vuint8m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e8_v_u8m1x3(rs1, vs3, vl); } -void test_vsseg3e8_v_u8m2x3(uint8_t *base, vuint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e8_v_u8m2x3(base, v_tuple, vl); +void test_vsseg3e8_v_u8m2x3(uint8_t *rs1, vuint8m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e8_v_u8m2x3(rs1, vs3, vl); } -void test_vsseg3e8_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e8_v_i8mf8x3_m(mask, base, v_tuple, vl); +void test_vsseg3e8_v_i8mf8x3_m(vbool64_t vm, int8_t *rs1, vint8mf8x3_t vs3, size_t vl) { + return __riscv_vsseg3e8_v_i8mf8x3_m(vm, rs1, vs3, vl); } -void test_vsseg3e8_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e8_v_i8mf4x3_m(mask, base, v_tuple, vl); +void test_vsseg3e8_v_i8mf4x3_m(vbool32_t vm, int8_t *rs1, vint8mf4x3_t vs3, size_t vl) { + return __riscv_vsseg3e8_v_i8mf4x3_m(vm, rs1, vs3, vl); } -void test_vsseg3e8_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e8_v_i8mf2x3_m(mask, base, v_tuple, vl); +void test_vsseg3e8_v_i8mf2x3_m(vbool16_t vm, int8_t *rs1, vint8mf2x3_t vs3, size_t vl) { + return __riscv_vsseg3e8_v_i8mf2x3_m(vm, rs1, vs3, vl); } -void test_vsseg3e8_v_i8m1x3_m(vbool8_t mask, int8_t *base, vint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e8_v_i8m1x3_m(mask, base, v_tuple, vl); +void test_vsseg3e8_v_i8m1x3_m(vbool8_t vm, int8_t *rs1, vint8m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e8_v_i8m1x3_m(vm, rs1, vs3, vl); } -void test_vsseg3e8_v_i8m2x3_m(vbool4_t mask, int8_t *base, vint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e8_v_i8m2x3_m(mask, base, v_tuple, vl); +void test_vsseg3e8_v_i8m2x3_m(vbool4_t vm, int8_t *rs1, vint8m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e8_v_i8m2x3_m(vm, rs1, vs3, vl); } -void test_vsseg3e8_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e8_v_u8mf8x3_m(mask, base, v_tuple, vl); +void test_vsseg3e8_v_u8mf8x3_m(vbool64_t vm, uint8_t *rs1, vuint8mf8x3_t vs3, size_t vl) { + return __riscv_vsseg3e8_v_u8mf8x3_m(vm, rs1, vs3, vl); } -void test_vsseg3e8_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e8_v_u8mf4x3_m(mask, base, v_tuple, vl); +void test_vsseg3e8_v_u8mf4x3_m(vbool32_t vm, uint8_t *rs1, vuint8mf4x3_t vs3, size_t vl) { + return __riscv_vsseg3e8_v_u8mf4x3_m(vm, rs1, vs3, vl); } -void test_vsseg3e8_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e8_v_u8mf2x3_m(mask, base, v_tuple, vl); +void test_vsseg3e8_v_u8mf2x3_m(vbool16_t vm, uint8_t *rs1, vuint8mf2x3_t vs3, size_t vl) { + return __riscv_vsseg3e8_v_u8mf2x3_m(vm, rs1, vs3, vl); } -void test_vsseg3e8_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e8_v_u8m1x3_m(mask, base, v_tuple, vl); +void test_vsseg3e8_v_u8m1x3_m(vbool8_t vm, uint8_t *rs1, vuint8m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e8_v_u8m1x3_m(vm, rs1, vs3, vl); } -void test_vsseg3e8_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e8_v_u8m2x3_m(mask, base, v_tuple, vl); +void test_vsseg3e8_v_u8m2x3_m(vbool4_t vm, uint8_t *rs1, vuint8m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e8_v_u8m2x3_m(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg3e8\.[ivxfswum.]+\s+} 20 } } */ diff --git a/auto-generated/gnu-api-tests/vsseg4e16.c b/auto-generated/gnu-api-tests/vsseg4e16.c index 30df996f5..3a80894b4 100644 --- a/auto-generated/gnu-api-tests/vsseg4e16.c +++ b/auto-generated/gnu-api-tests/vsseg4e16.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg4e16_v_f16mf4x4(float16_t *base, vfloat16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16_v_f16mf4x4(base, v_tuple, vl); +void test_vsseg4e16_v_f16mf4x4(float16_t *rs1, vfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsseg4e16_v_f16mf4x4(rs1, vs3, vl); } -void test_vsseg4e16_v_f16mf2x4(float16_t *base, vfloat16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16_v_f16mf2x4(base, v_tuple, vl); +void test_vsseg4e16_v_f16mf2x4(float16_t *rs1, vfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsseg4e16_v_f16mf2x4(rs1, vs3, vl); } -void test_vsseg4e16_v_f16m1x4(float16_t *base, vfloat16m1x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16_v_f16m1x4(base, v_tuple, vl); +void test_vsseg4e16_v_f16m1x4(float16_t *rs1, vfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e16_v_f16m1x4(rs1, vs3, vl); } -void test_vsseg4e16_v_f16m2x4(float16_t *base, vfloat16m2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16_v_f16m2x4(base, v_tuple, vl); +void test_vsseg4e16_v_f16m2x4(float16_t *rs1, vfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e16_v_f16m2x4(rs1, vs3, vl); } -void test_vsseg4e16_v_i16mf4x4(int16_t *base, vint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16_v_i16mf4x4(base, v_tuple, vl); +void test_vsseg4e16_v_i16mf4x4(int16_t *rs1, vint16mf4x4_t vs3, size_t vl) { + return __riscv_vsseg4e16_v_i16mf4x4(rs1, vs3, vl); } -void test_vsseg4e16_v_i16mf2x4(int16_t *base, vint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16_v_i16mf2x4(base, v_tuple, vl); +void test_vsseg4e16_v_i16mf2x4(int16_t *rs1, vint16mf2x4_t vs3, size_t vl) { + return __riscv_vsseg4e16_v_i16mf2x4(rs1, vs3, vl); } -void test_vsseg4e16_v_i16m1x4(int16_t *base, vint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16_v_i16m1x4(base, v_tuple, vl); +void test_vsseg4e16_v_i16m1x4(int16_t *rs1, vint16m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e16_v_i16m1x4(rs1, vs3, vl); } -void test_vsseg4e16_v_i16m2x4(int16_t *base, vint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16_v_i16m2x4(base, v_tuple, vl); +void test_vsseg4e16_v_i16m2x4(int16_t *rs1, vint16m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e16_v_i16m2x4(rs1, vs3, vl); } -void test_vsseg4e16_v_u16mf4x4(uint16_t *base, vuint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16_v_u16mf4x4(base, v_tuple, vl); +void test_vsseg4e16_v_u16mf4x4(uint16_t *rs1, vuint16mf4x4_t vs3, size_t vl) { + return __riscv_vsseg4e16_v_u16mf4x4(rs1, vs3, vl); } -void test_vsseg4e16_v_u16mf2x4(uint16_t *base, vuint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16_v_u16mf2x4(base, v_tuple, vl); +void test_vsseg4e16_v_u16mf2x4(uint16_t *rs1, vuint16mf2x4_t vs3, size_t vl) { + return __riscv_vsseg4e16_v_u16mf2x4(rs1, vs3, vl); } -void test_vsseg4e16_v_u16m1x4(uint16_t *base, vuint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16_v_u16m1x4(base, v_tuple, vl); +void test_vsseg4e16_v_u16m1x4(uint16_t *rs1, vuint16m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e16_v_u16m1x4(rs1, vs3, vl); } -void test_vsseg4e16_v_u16m2x4(uint16_t *base, vuint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16_v_u16m2x4(base, v_tuple, vl); +void test_vsseg4e16_v_u16m2x4(uint16_t *rs1, vuint16m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e16_v_u16m2x4(rs1, vs3, vl); } -void test_vsseg4e16_v_f16mf4x4_m(vbool64_t mask, float16_t *base, vfloat16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16_v_f16mf4x4_m(mask, base, v_tuple, vl); +void test_vsseg4e16_v_f16mf4x4_m(vbool64_t vm, float16_t *rs1, vfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsseg4e16_v_f16mf4x4_m(vm, rs1, vs3, vl); } -void test_vsseg4e16_v_f16mf2x4_m(vbool32_t mask, float16_t *base, vfloat16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16_v_f16mf2x4_m(mask, base, v_tuple, vl); +void test_vsseg4e16_v_f16mf2x4_m(vbool32_t vm, float16_t *rs1, vfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsseg4e16_v_f16mf2x4_m(vm, rs1, vs3, vl); } -void test_vsseg4e16_v_f16m1x4_m(vbool16_t mask, float16_t *base, vfloat16m1x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16_v_f16m1x4_m(mask, base, v_tuple, vl); +void test_vsseg4e16_v_f16m1x4_m(vbool16_t vm, float16_t *rs1, vfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e16_v_f16m1x4_m(vm, rs1, vs3, vl); } -void test_vsseg4e16_v_f16m2x4_m(vbool8_t mask, float16_t *base, vfloat16m2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16_v_f16m2x4_m(mask, base, v_tuple, vl); +void test_vsseg4e16_v_f16m2x4_m(vbool8_t vm, float16_t *rs1, vfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e16_v_f16m2x4_m(vm, rs1, vs3, vl); } -void test_vsseg4e16_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16_v_i16mf4x4_m(mask, base, v_tuple, vl); +void test_vsseg4e16_v_i16mf4x4_m(vbool64_t vm, int16_t *rs1, vint16mf4x4_t vs3, size_t vl) { + return __riscv_vsseg4e16_v_i16mf4x4_m(vm, rs1, vs3, vl); } -void test_vsseg4e16_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16_v_i16mf2x4_m(mask, base, v_tuple, vl); +void test_vsseg4e16_v_i16mf2x4_m(vbool32_t vm, int16_t *rs1, vint16mf2x4_t vs3, size_t vl) { + return __riscv_vsseg4e16_v_i16mf2x4_m(vm, rs1, vs3, vl); } -void test_vsseg4e16_v_i16m1x4_m(vbool16_t mask, int16_t *base, vint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16_v_i16m1x4_m(mask, base, v_tuple, vl); +void test_vsseg4e16_v_i16m1x4_m(vbool16_t vm, int16_t *rs1, vint16m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e16_v_i16m1x4_m(vm, rs1, vs3, vl); } -void test_vsseg4e16_v_i16m2x4_m(vbool8_t mask, int16_t *base, vint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16_v_i16m2x4_m(mask, base, v_tuple, vl); +void test_vsseg4e16_v_i16m2x4_m(vbool8_t vm, int16_t *rs1, vint16m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e16_v_i16m2x4_m(vm, rs1, vs3, vl); } -void test_vsseg4e16_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16_v_u16mf4x4_m(mask, base, v_tuple, vl); +void test_vsseg4e16_v_u16mf4x4_m(vbool64_t vm, uint16_t *rs1, vuint16mf4x4_t vs3, size_t vl) { + return __riscv_vsseg4e16_v_u16mf4x4_m(vm, rs1, vs3, vl); } -void test_vsseg4e16_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16_v_u16mf2x4_m(mask, base, v_tuple, vl); +void test_vsseg4e16_v_u16mf2x4_m(vbool32_t vm, uint16_t *rs1, vuint16mf2x4_t vs3, size_t vl) { + return __riscv_vsseg4e16_v_u16mf2x4_m(vm, rs1, vs3, vl); } -void test_vsseg4e16_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16_v_u16m1x4_m(mask, base, v_tuple, vl); +void test_vsseg4e16_v_u16m1x4_m(vbool16_t vm, uint16_t *rs1, vuint16m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e16_v_u16m1x4_m(vm, rs1, vs3, vl); } -void test_vsseg4e16_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16_v_u16m2x4_m(mask, base, v_tuple, vl); +void test_vsseg4e16_v_u16m2x4_m(vbool8_t vm, uint16_t *rs1, vuint16m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e16_v_u16m2x4_m(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg4e16\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/gnu-api-tests/vsseg4e32.c b/auto-generated/gnu-api-tests/vsseg4e32.c index 10146234b..d915cd8e8 100644 --- a/auto-generated/gnu-api-tests/vsseg4e32.c +++ b/auto-generated/gnu-api-tests/vsseg4e32.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg4e32_v_f32mf2x4(float32_t *base, vfloat32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e32_v_f32mf2x4(base, v_tuple, vl); +void test_vsseg4e32_v_f32mf2x4(float32_t *rs1, vfloat32mf2x4_t vs3, size_t vl) { + return __riscv_vsseg4e32_v_f32mf2x4(rs1, vs3, vl); } -void test_vsseg4e32_v_f32m1x4(float32_t *base, vfloat32m1x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e32_v_f32m1x4(base, v_tuple, vl); +void test_vsseg4e32_v_f32m1x4(float32_t *rs1, vfloat32m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e32_v_f32m1x4(rs1, vs3, vl); } -void test_vsseg4e32_v_f32m2x4(float32_t *base, vfloat32m2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e32_v_f32m2x4(base, v_tuple, vl); +void test_vsseg4e32_v_f32m2x4(float32_t *rs1, vfloat32m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e32_v_f32m2x4(rs1, vs3, vl); } -void test_vsseg4e32_v_i32mf2x4(int32_t *base, vint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e32_v_i32mf2x4(base, v_tuple, vl); +void test_vsseg4e32_v_i32mf2x4(int32_t *rs1, vint32mf2x4_t vs3, size_t vl) { + return __riscv_vsseg4e32_v_i32mf2x4(rs1, vs3, vl); } -void test_vsseg4e32_v_i32m1x4(int32_t *base, vint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e32_v_i32m1x4(base, v_tuple, vl); +void test_vsseg4e32_v_i32m1x4(int32_t *rs1, vint32m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e32_v_i32m1x4(rs1, vs3, vl); } -void test_vsseg4e32_v_i32m2x4(int32_t *base, vint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e32_v_i32m2x4(base, v_tuple, vl); +void test_vsseg4e32_v_i32m2x4(int32_t *rs1, vint32m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e32_v_i32m2x4(rs1, vs3, vl); } -void test_vsseg4e32_v_u32mf2x4(uint32_t *base, vuint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e32_v_u32mf2x4(base, v_tuple, vl); +void test_vsseg4e32_v_u32mf2x4(uint32_t *rs1, vuint32mf2x4_t vs3, size_t vl) { + return __riscv_vsseg4e32_v_u32mf2x4(rs1, vs3, vl); } -void test_vsseg4e32_v_u32m1x4(uint32_t *base, vuint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e32_v_u32m1x4(base, v_tuple, vl); +void test_vsseg4e32_v_u32m1x4(uint32_t *rs1, vuint32m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e32_v_u32m1x4(rs1, vs3, vl); } -void test_vsseg4e32_v_u32m2x4(uint32_t *base, vuint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e32_v_u32m2x4(base, v_tuple, vl); +void test_vsseg4e32_v_u32m2x4(uint32_t *rs1, vuint32m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e32_v_u32m2x4(rs1, vs3, vl); } -void test_vsseg4e32_v_f32mf2x4_m(vbool64_t mask, float32_t *base, vfloat32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e32_v_f32mf2x4_m(mask, base, v_tuple, vl); +void test_vsseg4e32_v_f32mf2x4_m(vbool64_t vm, float32_t *rs1, vfloat32mf2x4_t vs3, size_t vl) { + return __riscv_vsseg4e32_v_f32mf2x4_m(vm, rs1, vs3, vl); } -void test_vsseg4e32_v_f32m1x4_m(vbool32_t mask, float32_t *base, vfloat32m1x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e32_v_f32m1x4_m(mask, base, v_tuple, vl); +void test_vsseg4e32_v_f32m1x4_m(vbool32_t vm, float32_t *rs1, vfloat32m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e32_v_f32m1x4_m(vm, rs1, vs3, vl); } -void test_vsseg4e32_v_f32m2x4_m(vbool16_t mask, float32_t *base, vfloat32m2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e32_v_f32m2x4_m(mask, base, v_tuple, vl); +void test_vsseg4e32_v_f32m2x4_m(vbool16_t vm, float32_t *rs1, vfloat32m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e32_v_f32m2x4_m(vm, rs1, vs3, vl); } -void test_vsseg4e32_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e32_v_i32mf2x4_m(mask, base, v_tuple, vl); +void test_vsseg4e32_v_i32mf2x4_m(vbool64_t vm, int32_t *rs1, vint32mf2x4_t vs3, size_t vl) { + return __riscv_vsseg4e32_v_i32mf2x4_m(vm, rs1, vs3, vl); } -void test_vsseg4e32_v_i32m1x4_m(vbool32_t mask, int32_t *base, vint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e32_v_i32m1x4_m(mask, base, v_tuple, vl); +void test_vsseg4e32_v_i32m1x4_m(vbool32_t vm, int32_t *rs1, vint32m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e32_v_i32m1x4_m(vm, rs1, vs3, vl); } -void test_vsseg4e32_v_i32m2x4_m(vbool16_t mask, int32_t *base, vint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e32_v_i32m2x4_m(mask, base, v_tuple, vl); +void test_vsseg4e32_v_i32m2x4_m(vbool16_t vm, int32_t *rs1, vint32m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e32_v_i32m2x4_m(vm, rs1, vs3, vl); } -void test_vsseg4e32_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e32_v_u32mf2x4_m(mask, base, v_tuple, vl); +void test_vsseg4e32_v_u32mf2x4_m(vbool64_t vm, uint32_t *rs1, vuint32mf2x4_t vs3, size_t vl) { + return __riscv_vsseg4e32_v_u32mf2x4_m(vm, rs1, vs3, vl); } -void test_vsseg4e32_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e32_v_u32m1x4_m(mask, base, v_tuple, vl); +void test_vsseg4e32_v_u32m1x4_m(vbool32_t vm, uint32_t *rs1, vuint32m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e32_v_u32m1x4_m(vm, rs1, vs3, vl); } -void test_vsseg4e32_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e32_v_u32m2x4_m(mask, base, v_tuple, vl); +void test_vsseg4e32_v_u32m2x4_m(vbool16_t vm, uint32_t *rs1, vuint32m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e32_v_u32m2x4_m(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg4e32\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-api-tests/vsseg4e64.c b/auto-generated/gnu-api-tests/vsseg4e64.c index 1f2e22c96..6dac98882 100644 --- a/auto-generated/gnu-api-tests/vsseg4e64.c +++ b/auto-generated/gnu-api-tests/vsseg4e64.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg4e64_v_f64m1x4(float64_t *base, vfloat64m1x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e64_v_f64m1x4(base, v_tuple, vl); +void test_vsseg4e64_v_f64m1x4(float64_t *rs1, vfloat64m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e64_v_f64m1x4(rs1, vs3, vl); } -void test_vsseg4e64_v_f64m2x4(float64_t *base, vfloat64m2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e64_v_f64m2x4(base, v_tuple, vl); +void test_vsseg4e64_v_f64m2x4(float64_t *rs1, vfloat64m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e64_v_f64m2x4(rs1, vs3, vl); } -void test_vsseg4e64_v_i64m1x4(int64_t *base, vint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e64_v_i64m1x4(base, v_tuple, vl); +void test_vsseg4e64_v_i64m1x4(int64_t *rs1, vint64m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e64_v_i64m1x4(rs1, vs3, vl); } -void test_vsseg4e64_v_i64m2x4(int64_t *base, vint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e64_v_i64m2x4(base, v_tuple, vl); +void test_vsseg4e64_v_i64m2x4(int64_t *rs1, vint64m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e64_v_i64m2x4(rs1, vs3, vl); } -void test_vsseg4e64_v_u64m1x4(uint64_t *base, vuint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e64_v_u64m1x4(base, v_tuple, vl); +void test_vsseg4e64_v_u64m1x4(uint64_t *rs1, vuint64m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e64_v_u64m1x4(rs1, vs3, vl); } -void test_vsseg4e64_v_u64m2x4(uint64_t *base, vuint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e64_v_u64m2x4(base, v_tuple, vl); +void test_vsseg4e64_v_u64m2x4(uint64_t *rs1, vuint64m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e64_v_u64m2x4(rs1, vs3, vl); } -void test_vsseg4e64_v_f64m1x4_m(vbool64_t mask, float64_t *base, vfloat64m1x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e64_v_f64m1x4_m(mask, base, v_tuple, vl); +void test_vsseg4e64_v_f64m1x4_m(vbool64_t vm, float64_t *rs1, vfloat64m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e64_v_f64m1x4_m(vm, rs1, vs3, vl); } -void test_vsseg4e64_v_f64m2x4_m(vbool32_t mask, float64_t *base, vfloat64m2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e64_v_f64m2x4_m(mask, base, v_tuple, vl); +void test_vsseg4e64_v_f64m2x4_m(vbool32_t vm, float64_t *rs1, vfloat64m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e64_v_f64m2x4_m(vm, rs1, vs3, vl); } -void test_vsseg4e64_v_i64m1x4_m(vbool64_t mask, int64_t *base, vint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e64_v_i64m1x4_m(mask, base, v_tuple, vl); +void test_vsseg4e64_v_i64m1x4_m(vbool64_t vm, int64_t *rs1, vint64m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e64_v_i64m1x4_m(vm, rs1, vs3, vl); } -void test_vsseg4e64_v_i64m2x4_m(vbool32_t mask, int64_t *base, vint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e64_v_i64m2x4_m(mask, base, v_tuple, vl); +void test_vsseg4e64_v_i64m2x4_m(vbool32_t vm, int64_t *rs1, vint64m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e64_v_i64m2x4_m(vm, rs1, vs3, vl); } -void test_vsseg4e64_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e64_v_u64m1x4_m(mask, base, v_tuple, vl); +void test_vsseg4e64_v_u64m1x4_m(vbool64_t vm, uint64_t *rs1, vuint64m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e64_v_u64m1x4_m(vm, rs1, vs3, vl); } -void test_vsseg4e64_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e64_v_u64m2x4_m(mask, base, v_tuple, vl); +void test_vsseg4e64_v_u64m2x4_m(vbool32_t vm, uint64_t *rs1, vuint64m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e64_v_u64m2x4_m(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg4e64\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-api-tests/vsseg4e8.c b/auto-generated/gnu-api-tests/vsseg4e8.c index 84d806c72..1e1334e22 100644 --- a/auto-generated/gnu-api-tests/vsseg4e8.c +++ b/auto-generated/gnu-api-tests/vsseg4e8.c @@ -6,84 +6,84 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg4e8_v_i8mf8x4(int8_t *base, vint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e8_v_i8mf8x4(base, v_tuple, vl); +void test_vsseg4e8_v_i8mf8x4(int8_t *rs1, vint8mf8x4_t vs3, size_t vl) { + return __riscv_vsseg4e8_v_i8mf8x4(rs1, vs3, vl); } -void test_vsseg4e8_v_i8mf4x4(int8_t *base, vint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e8_v_i8mf4x4(base, v_tuple, vl); +void test_vsseg4e8_v_i8mf4x4(int8_t *rs1, vint8mf4x4_t vs3, size_t vl) { + return __riscv_vsseg4e8_v_i8mf4x4(rs1, vs3, vl); } -void test_vsseg4e8_v_i8mf2x4(int8_t *base, vint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e8_v_i8mf2x4(base, v_tuple, vl); +void test_vsseg4e8_v_i8mf2x4(int8_t *rs1, vint8mf2x4_t vs3, size_t vl) { + return __riscv_vsseg4e8_v_i8mf2x4(rs1, vs3, vl); } -void test_vsseg4e8_v_i8m1x4(int8_t *base, vint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e8_v_i8m1x4(base, v_tuple, vl); +void test_vsseg4e8_v_i8m1x4(int8_t *rs1, vint8m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e8_v_i8m1x4(rs1, vs3, vl); } -void test_vsseg4e8_v_i8m2x4(int8_t *base, vint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e8_v_i8m2x4(base, v_tuple, vl); +void test_vsseg4e8_v_i8m2x4(int8_t *rs1, vint8m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e8_v_i8m2x4(rs1, vs3, vl); } -void test_vsseg4e8_v_u8mf8x4(uint8_t *base, vuint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e8_v_u8mf8x4(base, v_tuple, vl); +void test_vsseg4e8_v_u8mf8x4(uint8_t *rs1, vuint8mf8x4_t vs3, size_t vl) { + return __riscv_vsseg4e8_v_u8mf8x4(rs1, vs3, vl); } -void test_vsseg4e8_v_u8mf4x4(uint8_t *base, vuint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e8_v_u8mf4x4(base, v_tuple, vl); +void test_vsseg4e8_v_u8mf4x4(uint8_t *rs1, vuint8mf4x4_t vs3, size_t vl) { + return __riscv_vsseg4e8_v_u8mf4x4(rs1, vs3, vl); } -void test_vsseg4e8_v_u8mf2x4(uint8_t *base, vuint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e8_v_u8mf2x4(base, v_tuple, vl); +void test_vsseg4e8_v_u8mf2x4(uint8_t *rs1, vuint8mf2x4_t vs3, size_t vl) { + return __riscv_vsseg4e8_v_u8mf2x4(rs1, vs3, vl); } -void test_vsseg4e8_v_u8m1x4(uint8_t *base, vuint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e8_v_u8m1x4(base, v_tuple, vl); +void test_vsseg4e8_v_u8m1x4(uint8_t *rs1, vuint8m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e8_v_u8m1x4(rs1, vs3, vl); } -void test_vsseg4e8_v_u8m2x4(uint8_t *base, vuint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e8_v_u8m2x4(base, v_tuple, vl); +void test_vsseg4e8_v_u8m2x4(uint8_t *rs1, vuint8m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e8_v_u8m2x4(rs1, vs3, vl); } -void test_vsseg4e8_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e8_v_i8mf8x4_m(mask, base, v_tuple, vl); +void test_vsseg4e8_v_i8mf8x4_m(vbool64_t vm, int8_t *rs1, vint8mf8x4_t vs3, size_t vl) { + return __riscv_vsseg4e8_v_i8mf8x4_m(vm, rs1, vs3, vl); } -void test_vsseg4e8_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e8_v_i8mf4x4_m(mask, base, v_tuple, vl); +void test_vsseg4e8_v_i8mf4x4_m(vbool32_t vm, int8_t *rs1, vint8mf4x4_t vs3, size_t vl) { + return __riscv_vsseg4e8_v_i8mf4x4_m(vm, rs1, vs3, vl); } -void test_vsseg4e8_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e8_v_i8mf2x4_m(mask, base, v_tuple, vl); +void test_vsseg4e8_v_i8mf2x4_m(vbool16_t vm, int8_t *rs1, vint8mf2x4_t vs3, size_t vl) { + return __riscv_vsseg4e8_v_i8mf2x4_m(vm, rs1, vs3, vl); } -void test_vsseg4e8_v_i8m1x4_m(vbool8_t mask, int8_t *base, vint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e8_v_i8m1x4_m(mask, base, v_tuple, vl); +void test_vsseg4e8_v_i8m1x4_m(vbool8_t vm, int8_t *rs1, vint8m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e8_v_i8m1x4_m(vm, rs1, vs3, vl); } -void test_vsseg4e8_v_i8m2x4_m(vbool4_t mask, int8_t *base, vint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e8_v_i8m2x4_m(mask, base, v_tuple, vl); +void test_vsseg4e8_v_i8m2x4_m(vbool4_t vm, int8_t *rs1, vint8m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e8_v_i8m2x4_m(vm, rs1, vs3, vl); } -void test_vsseg4e8_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e8_v_u8mf8x4_m(mask, base, v_tuple, vl); +void test_vsseg4e8_v_u8mf8x4_m(vbool64_t vm, uint8_t *rs1, vuint8mf8x4_t vs3, size_t vl) { + return __riscv_vsseg4e8_v_u8mf8x4_m(vm, rs1, vs3, vl); } -void test_vsseg4e8_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e8_v_u8mf4x4_m(mask, base, v_tuple, vl); +void test_vsseg4e8_v_u8mf4x4_m(vbool32_t vm, uint8_t *rs1, vuint8mf4x4_t vs3, size_t vl) { + return __riscv_vsseg4e8_v_u8mf4x4_m(vm, rs1, vs3, vl); } -void test_vsseg4e8_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e8_v_u8mf2x4_m(mask, base, v_tuple, vl); +void test_vsseg4e8_v_u8mf2x4_m(vbool16_t vm, uint8_t *rs1, vuint8mf2x4_t vs3, size_t vl) { + return __riscv_vsseg4e8_v_u8mf2x4_m(vm, rs1, vs3, vl); } -void test_vsseg4e8_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e8_v_u8m1x4_m(mask, base, v_tuple, vl); +void test_vsseg4e8_v_u8m1x4_m(vbool8_t vm, uint8_t *rs1, vuint8m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e8_v_u8m1x4_m(vm, rs1, vs3, vl); } -void test_vsseg4e8_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e8_v_u8m2x4_m(mask, base, v_tuple, vl); +void test_vsseg4e8_v_u8m2x4_m(vbool4_t vm, uint8_t *rs1, vuint8m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e8_v_u8m2x4_m(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg4e8\.[ivxfswum.]+\s+} 20 } } */ diff --git a/auto-generated/gnu-api-tests/vsseg5e16.c b/auto-generated/gnu-api-tests/vsseg5e16.c index fc64af588..d64ea39fb 100644 --- a/auto-generated/gnu-api-tests/vsseg5e16.c +++ b/auto-generated/gnu-api-tests/vsseg5e16.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg5e16_v_f16mf4x5(float16_t *base, vfloat16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e16_v_f16mf4x5(base, v_tuple, vl); +void test_vsseg5e16_v_f16mf4x5(float16_t *rs1, vfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsseg5e16_v_f16mf4x5(rs1, vs3, vl); } -void test_vsseg5e16_v_f16mf2x5(float16_t *base, vfloat16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e16_v_f16mf2x5(base, v_tuple, vl); +void test_vsseg5e16_v_f16mf2x5(float16_t *rs1, vfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsseg5e16_v_f16mf2x5(rs1, vs3, vl); } -void test_vsseg5e16_v_f16m1x5(float16_t *base, vfloat16m1x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e16_v_f16m1x5(base, v_tuple, vl); +void test_vsseg5e16_v_f16m1x5(float16_t *rs1, vfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e16_v_f16m1x5(rs1, vs3, vl); } -void test_vsseg5e16_v_i16mf4x5(int16_t *base, vint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e16_v_i16mf4x5(base, v_tuple, vl); +void test_vsseg5e16_v_i16mf4x5(int16_t *rs1, vint16mf4x5_t vs3, size_t vl) { + return __riscv_vsseg5e16_v_i16mf4x5(rs1, vs3, vl); } -void test_vsseg5e16_v_i16mf2x5(int16_t *base, vint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e16_v_i16mf2x5(base, v_tuple, vl); +void test_vsseg5e16_v_i16mf2x5(int16_t *rs1, vint16mf2x5_t vs3, size_t vl) { + return __riscv_vsseg5e16_v_i16mf2x5(rs1, vs3, vl); } -void test_vsseg5e16_v_i16m1x5(int16_t *base, vint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e16_v_i16m1x5(base, v_tuple, vl); +void test_vsseg5e16_v_i16m1x5(int16_t *rs1, vint16m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e16_v_i16m1x5(rs1, vs3, vl); } -void test_vsseg5e16_v_u16mf4x5(uint16_t *base, vuint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e16_v_u16mf4x5(base, v_tuple, vl); +void test_vsseg5e16_v_u16mf4x5(uint16_t *rs1, vuint16mf4x5_t vs3, size_t vl) { + return __riscv_vsseg5e16_v_u16mf4x5(rs1, vs3, vl); } -void test_vsseg5e16_v_u16mf2x5(uint16_t *base, vuint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e16_v_u16mf2x5(base, v_tuple, vl); +void test_vsseg5e16_v_u16mf2x5(uint16_t *rs1, vuint16mf2x5_t vs3, size_t vl) { + return __riscv_vsseg5e16_v_u16mf2x5(rs1, vs3, vl); } -void test_vsseg5e16_v_u16m1x5(uint16_t *base, vuint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e16_v_u16m1x5(base, v_tuple, vl); +void test_vsseg5e16_v_u16m1x5(uint16_t *rs1, vuint16m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e16_v_u16m1x5(rs1, vs3, vl); } -void test_vsseg5e16_v_f16mf4x5_m(vbool64_t mask, float16_t *base, vfloat16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e16_v_f16mf4x5_m(mask, base, v_tuple, vl); +void test_vsseg5e16_v_f16mf4x5_m(vbool64_t vm, float16_t *rs1, vfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsseg5e16_v_f16mf4x5_m(vm, rs1, vs3, vl); } -void test_vsseg5e16_v_f16mf2x5_m(vbool32_t mask, float16_t *base, vfloat16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e16_v_f16mf2x5_m(mask, base, v_tuple, vl); +void test_vsseg5e16_v_f16mf2x5_m(vbool32_t vm, float16_t *rs1, vfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsseg5e16_v_f16mf2x5_m(vm, rs1, vs3, vl); } -void test_vsseg5e16_v_f16m1x5_m(vbool16_t mask, float16_t *base, vfloat16m1x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e16_v_f16m1x5_m(mask, base, v_tuple, vl); +void test_vsseg5e16_v_f16m1x5_m(vbool16_t vm, float16_t *rs1, vfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e16_v_f16m1x5_m(vm, rs1, vs3, vl); } -void test_vsseg5e16_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e16_v_i16mf4x5_m(mask, base, v_tuple, vl); +void test_vsseg5e16_v_i16mf4x5_m(vbool64_t vm, int16_t *rs1, vint16mf4x5_t vs3, size_t vl) { + return __riscv_vsseg5e16_v_i16mf4x5_m(vm, rs1, vs3, vl); } -void test_vsseg5e16_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e16_v_i16mf2x5_m(mask, base, v_tuple, vl); +void test_vsseg5e16_v_i16mf2x5_m(vbool32_t vm, int16_t *rs1, vint16mf2x5_t vs3, size_t vl) { + return __riscv_vsseg5e16_v_i16mf2x5_m(vm, rs1, vs3, vl); } -void test_vsseg5e16_v_i16m1x5_m(vbool16_t mask, int16_t *base, vint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e16_v_i16m1x5_m(mask, base, v_tuple, vl); +void test_vsseg5e16_v_i16m1x5_m(vbool16_t vm, int16_t *rs1, vint16m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e16_v_i16m1x5_m(vm, rs1, vs3, vl); } -void test_vsseg5e16_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e16_v_u16mf4x5_m(mask, base, v_tuple, vl); +void test_vsseg5e16_v_u16mf4x5_m(vbool64_t vm, uint16_t *rs1, vuint16mf4x5_t vs3, size_t vl) { + return __riscv_vsseg5e16_v_u16mf4x5_m(vm, rs1, vs3, vl); } -void test_vsseg5e16_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e16_v_u16mf2x5_m(mask, base, v_tuple, vl); +void test_vsseg5e16_v_u16mf2x5_m(vbool32_t vm, uint16_t *rs1, vuint16mf2x5_t vs3, size_t vl) { + return __riscv_vsseg5e16_v_u16mf2x5_m(vm, rs1, vs3, vl); } -void test_vsseg5e16_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e16_v_u16m1x5_m(mask, base, v_tuple, vl); +void test_vsseg5e16_v_u16m1x5_m(vbool16_t vm, uint16_t *rs1, vuint16m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e16_v_u16m1x5_m(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg5e16\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-api-tests/vsseg5e32.c b/auto-generated/gnu-api-tests/vsseg5e32.c index 42bd6d010..a902015ec 100644 --- a/auto-generated/gnu-api-tests/vsseg5e32.c +++ b/auto-generated/gnu-api-tests/vsseg5e32.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg5e32_v_f32mf2x5(float32_t *base, vfloat32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e32_v_f32mf2x5(base, v_tuple, vl); +void test_vsseg5e32_v_f32mf2x5(float32_t *rs1, vfloat32mf2x5_t vs3, size_t vl) { + return __riscv_vsseg5e32_v_f32mf2x5(rs1, vs3, vl); } -void test_vsseg5e32_v_f32m1x5(float32_t *base, vfloat32m1x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e32_v_f32m1x5(base, v_tuple, vl); +void test_vsseg5e32_v_f32m1x5(float32_t *rs1, vfloat32m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e32_v_f32m1x5(rs1, vs3, vl); } -void test_vsseg5e32_v_i32mf2x5(int32_t *base, vint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e32_v_i32mf2x5(base, v_tuple, vl); +void test_vsseg5e32_v_i32mf2x5(int32_t *rs1, vint32mf2x5_t vs3, size_t vl) { + return __riscv_vsseg5e32_v_i32mf2x5(rs1, vs3, vl); } -void test_vsseg5e32_v_i32m1x5(int32_t *base, vint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e32_v_i32m1x5(base, v_tuple, vl); +void test_vsseg5e32_v_i32m1x5(int32_t *rs1, vint32m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e32_v_i32m1x5(rs1, vs3, vl); } -void test_vsseg5e32_v_u32mf2x5(uint32_t *base, vuint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e32_v_u32mf2x5(base, v_tuple, vl); +void test_vsseg5e32_v_u32mf2x5(uint32_t *rs1, vuint32mf2x5_t vs3, size_t vl) { + return __riscv_vsseg5e32_v_u32mf2x5(rs1, vs3, vl); } -void test_vsseg5e32_v_u32m1x5(uint32_t *base, vuint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e32_v_u32m1x5(base, v_tuple, vl); +void test_vsseg5e32_v_u32m1x5(uint32_t *rs1, vuint32m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e32_v_u32m1x5(rs1, vs3, vl); } -void test_vsseg5e32_v_f32mf2x5_m(vbool64_t mask, float32_t *base, vfloat32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e32_v_f32mf2x5_m(mask, base, v_tuple, vl); +void test_vsseg5e32_v_f32mf2x5_m(vbool64_t vm, float32_t *rs1, vfloat32mf2x5_t vs3, size_t vl) { + return __riscv_vsseg5e32_v_f32mf2x5_m(vm, rs1, vs3, vl); } -void test_vsseg5e32_v_f32m1x5_m(vbool32_t mask, float32_t *base, vfloat32m1x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e32_v_f32m1x5_m(mask, base, v_tuple, vl); +void test_vsseg5e32_v_f32m1x5_m(vbool32_t vm, float32_t *rs1, vfloat32m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e32_v_f32m1x5_m(vm, rs1, vs3, vl); } -void test_vsseg5e32_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e32_v_i32mf2x5_m(mask, base, v_tuple, vl); +void test_vsseg5e32_v_i32mf2x5_m(vbool64_t vm, int32_t *rs1, vint32mf2x5_t vs3, size_t vl) { + return __riscv_vsseg5e32_v_i32mf2x5_m(vm, rs1, vs3, vl); } -void test_vsseg5e32_v_i32m1x5_m(vbool32_t mask, int32_t *base, vint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e32_v_i32m1x5_m(mask, base, v_tuple, vl); +void test_vsseg5e32_v_i32m1x5_m(vbool32_t vm, int32_t *rs1, vint32m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e32_v_i32m1x5_m(vm, rs1, vs3, vl); } -void test_vsseg5e32_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e32_v_u32mf2x5_m(mask, base, v_tuple, vl); +void test_vsseg5e32_v_u32mf2x5_m(vbool64_t vm, uint32_t *rs1, vuint32mf2x5_t vs3, size_t vl) { + return __riscv_vsseg5e32_v_u32mf2x5_m(vm, rs1, vs3, vl); } -void test_vsseg5e32_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e32_v_u32m1x5_m(mask, base, v_tuple, vl); +void test_vsseg5e32_v_u32m1x5_m(vbool32_t vm, uint32_t *rs1, vuint32m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e32_v_u32m1x5_m(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg5e32\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-api-tests/vsseg5e64.c b/auto-generated/gnu-api-tests/vsseg5e64.c index 6a8e6e684..21fa8abbe 100644 --- a/auto-generated/gnu-api-tests/vsseg5e64.c +++ b/auto-generated/gnu-api-tests/vsseg5e64.c @@ -6,28 +6,28 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg5e64_v_f64m1x5(float64_t *base, vfloat64m1x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e64_v_f64m1x5(base, v_tuple, vl); +void test_vsseg5e64_v_f64m1x5(float64_t *rs1, vfloat64m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e64_v_f64m1x5(rs1, vs3, vl); } -void test_vsseg5e64_v_i64m1x5(int64_t *base, vint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e64_v_i64m1x5(base, v_tuple, vl); +void test_vsseg5e64_v_i64m1x5(int64_t *rs1, vint64m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e64_v_i64m1x5(rs1, vs3, vl); } -void test_vsseg5e64_v_u64m1x5(uint64_t *base, vuint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e64_v_u64m1x5(base, v_tuple, vl); +void test_vsseg5e64_v_u64m1x5(uint64_t *rs1, vuint64m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e64_v_u64m1x5(rs1, vs3, vl); } -void test_vsseg5e64_v_f64m1x5_m(vbool64_t mask, float64_t *base, vfloat64m1x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e64_v_f64m1x5_m(mask, base, v_tuple, vl); +void test_vsseg5e64_v_f64m1x5_m(vbool64_t vm, float64_t *rs1, vfloat64m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e64_v_f64m1x5_m(vm, rs1, vs3, vl); } -void test_vsseg5e64_v_i64m1x5_m(vbool64_t mask, int64_t *base, vint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e64_v_i64m1x5_m(mask, base, v_tuple, vl); +void test_vsseg5e64_v_i64m1x5_m(vbool64_t vm, int64_t *rs1, vint64m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e64_v_i64m1x5_m(vm, rs1, vs3, vl); } -void test_vsseg5e64_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e64_v_u64m1x5_m(mask, base, v_tuple, vl); +void test_vsseg5e64_v_u64m1x5_m(vbool64_t vm, uint64_t *rs1, vuint64m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e64_v_u64m1x5_m(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg5e64\.[ivxfswum.]+\s+} 6 } } */ diff --git a/auto-generated/gnu-api-tests/vsseg5e8.c b/auto-generated/gnu-api-tests/vsseg5e8.c index b2ef45789..6ee6b71c1 100644 --- a/auto-generated/gnu-api-tests/vsseg5e8.c +++ b/auto-generated/gnu-api-tests/vsseg5e8.c @@ -6,68 +6,68 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg5e8_v_i8mf8x5(int8_t *base, vint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e8_v_i8mf8x5(base, v_tuple, vl); +void test_vsseg5e8_v_i8mf8x5(int8_t *rs1, vint8mf8x5_t vs3, size_t vl) { + return __riscv_vsseg5e8_v_i8mf8x5(rs1, vs3, vl); } -void test_vsseg5e8_v_i8mf4x5(int8_t *base, vint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e8_v_i8mf4x5(base, v_tuple, vl); +void test_vsseg5e8_v_i8mf4x5(int8_t *rs1, vint8mf4x5_t vs3, size_t vl) { + return __riscv_vsseg5e8_v_i8mf4x5(rs1, vs3, vl); } -void test_vsseg5e8_v_i8mf2x5(int8_t *base, vint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e8_v_i8mf2x5(base, v_tuple, vl); +void test_vsseg5e8_v_i8mf2x5(int8_t *rs1, vint8mf2x5_t vs3, size_t vl) { + return __riscv_vsseg5e8_v_i8mf2x5(rs1, vs3, vl); } -void test_vsseg5e8_v_i8m1x5(int8_t *base, vint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e8_v_i8m1x5(base, v_tuple, vl); +void test_vsseg5e8_v_i8m1x5(int8_t *rs1, vint8m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e8_v_i8m1x5(rs1, vs3, vl); } -void test_vsseg5e8_v_u8mf8x5(uint8_t *base, vuint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e8_v_u8mf8x5(base, v_tuple, vl); +void test_vsseg5e8_v_u8mf8x5(uint8_t *rs1, vuint8mf8x5_t vs3, size_t vl) { + return __riscv_vsseg5e8_v_u8mf8x5(rs1, vs3, vl); } -void test_vsseg5e8_v_u8mf4x5(uint8_t *base, vuint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e8_v_u8mf4x5(base, v_tuple, vl); +void test_vsseg5e8_v_u8mf4x5(uint8_t *rs1, vuint8mf4x5_t vs3, size_t vl) { + return __riscv_vsseg5e8_v_u8mf4x5(rs1, vs3, vl); } -void test_vsseg5e8_v_u8mf2x5(uint8_t *base, vuint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e8_v_u8mf2x5(base, v_tuple, vl); +void test_vsseg5e8_v_u8mf2x5(uint8_t *rs1, vuint8mf2x5_t vs3, size_t vl) { + return __riscv_vsseg5e8_v_u8mf2x5(rs1, vs3, vl); } -void test_vsseg5e8_v_u8m1x5(uint8_t *base, vuint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e8_v_u8m1x5(base, v_tuple, vl); +void test_vsseg5e8_v_u8m1x5(uint8_t *rs1, vuint8m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e8_v_u8m1x5(rs1, vs3, vl); } -void test_vsseg5e8_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e8_v_i8mf8x5_m(mask, base, v_tuple, vl); +void test_vsseg5e8_v_i8mf8x5_m(vbool64_t vm, int8_t *rs1, vint8mf8x5_t vs3, size_t vl) { + return __riscv_vsseg5e8_v_i8mf8x5_m(vm, rs1, vs3, vl); } -void test_vsseg5e8_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e8_v_i8mf4x5_m(mask, base, v_tuple, vl); +void test_vsseg5e8_v_i8mf4x5_m(vbool32_t vm, int8_t *rs1, vint8mf4x5_t vs3, size_t vl) { + return __riscv_vsseg5e8_v_i8mf4x5_m(vm, rs1, vs3, vl); } -void test_vsseg5e8_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e8_v_i8mf2x5_m(mask, base, v_tuple, vl); +void test_vsseg5e8_v_i8mf2x5_m(vbool16_t vm, int8_t *rs1, vint8mf2x5_t vs3, size_t vl) { + return __riscv_vsseg5e8_v_i8mf2x5_m(vm, rs1, vs3, vl); } -void test_vsseg5e8_v_i8m1x5_m(vbool8_t mask, int8_t *base, vint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e8_v_i8m1x5_m(mask, base, v_tuple, vl); +void test_vsseg5e8_v_i8m1x5_m(vbool8_t vm, int8_t *rs1, vint8m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e8_v_i8m1x5_m(vm, rs1, vs3, vl); } -void test_vsseg5e8_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e8_v_u8mf8x5_m(mask, base, v_tuple, vl); +void test_vsseg5e8_v_u8mf8x5_m(vbool64_t vm, uint8_t *rs1, vuint8mf8x5_t vs3, size_t vl) { + return __riscv_vsseg5e8_v_u8mf8x5_m(vm, rs1, vs3, vl); } -void test_vsseg5e8_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e8_v_u8mf4x5_m(mask, base, v_tuple, vl); +void test_vsseg5e8_v_u8mf4x5_m(vbool32_t vm, uint8_t *rs1, vuint8mf4x5_t vs3, size_t vl) { + return __riscv_vsseg5e8_v_u8mf4x5_m(vm, rs1, vs3, vl); } -void test_vsseg5e8_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e8_v_u8mf2x5_m(mask, base, v_tuple, vl); +void test_vsseg5e8_v_u8mf2x5_m(vbool16_t vm, uint8_t *rs1, vuint8mf2x5_t vs3, size_t vl) { + return __riscv_vsseg5e8_v_u8mf2x5_m(vm, rs1, vs3, vl); } -void test_vsseg5e8_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e8_v_u8m1x5_m(mask, base, v_tuple, vl); +void test_vsseg5e8_v_u8m1x5_m(vbool8_t vm, uint8_t *rs1, vuint8m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e8_v_u8m1x5_m(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg5e8\.[ivxfswum.]+\s+} 16 } } */ diff --git a/auto-generated/gnu-api-tests/vsseg6e16.c b/auto-generated/gnu-api-tests/vsseg6e16.c index 80e5dee1c..e3120b0a4 100644 --- a/auto-generated/gnu-api-tests/vsseg6e16.c +++ b/auto-generated/gnu-api-tests/vsseg6e16.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg6e16_v_f16mf4x6(float16_t *base, vfloat16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e16_v_f16mf4x6(base, v_tuple, vl); +void test_vsseg6e16_v_f16mf4x6(float16_t *rs1, vfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsseg6e16_v_f16mf4x6(rs1, vs3, vl); } -void test_vsseg6e16_v_f16mf2x6(float16_t *base, vfloat16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e16_v_f16mf2x6(base, v_tuple, vl); +void test_vsseg6e16_v_f16mf2x6(float16_t *rs1, vfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsseg6e16_v_f16mf2x6(rs1, vs3, vl); } -void test_vsseg6e16_v_f16m1x6(float16_t *base, vfloat16m1x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e16_v_f16m1x6(base, v_tuple, vl); +void test_vsseg6e16_v_f16m1x6(float16_t *rs1, vfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e16_v_f16m1x6(rs1, vs3, vl); } -void test_vsseg6e16_v_i16mf4x6(int16_t *base, vint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e16_v_i16mf4x6(base, v_tuple, vl); +void test_vsseg6e16_v_i16mf4x6(int16_t *rs1, vint16mf4x6_t vs3, size_t vl) { + return __riscv_vsseg6e16_v_i16mf4x6(rs1, vs3, vl); } -void test_vsseg6e16_v_i16mf2x6(int16_t *base, vint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e16_v_i16mf2x6(base, v_tuple, vl); +void test_vsseg6e16_v_i16mf2x6(int16_t *rs1, vint16mf2x6_t vs3, size_t vl) { + return __riscv_vsseg6e16_v_i16mf2x6(rs1, vs3, vl); } -void test_vsseg6e16_v_i16m1x6(int16_t *base, vint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e16_v_i16m1x6(base, v_tuple, vl); +void test_vsseg6e16_v_i16m1x6(int16_t *rs1, vint16m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e16_v_i16m1x6(rs1, vs3, vl); } -void test_vsseg6e16_v_u16mf4x6(uint16_t *base, vuint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e16_v_u16mf4x6(base, v_tuple, vl); +void test_vsseg6e16_v_u16mf4x6(uint16_t *rs1, vuint16mf4x6_t vs3, size_t vl) { + return __riscv_vsseg6e16_v_u16mf4x6(rs1, vs3, vl); } -void test_vsseg6e16_v_u16mf2x6(uint16_t *base, vuint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e16_v_u16mf2x6(base, v_tuple, vl); +void test_vsseg6e16_v_u16mf2x6(uint16_t *rs1, vuint16mf2x6_t vs3, size_t vl) { + return __riscv_vsseg6e16_v_u16mf2x6(rs1, vs3, vl); } -void test_vsseg6e16_v_u16m1x6(uint16_t *base, vuint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e16_v_u16m1x6(base, v_tuple, vl); +void test_vsseg6e16_v_u16m1x6(uint16_t *rs1, vuint16m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e16_v_u16m1x6(rs1, vs3, vl); } -void test_vsseg6e16_v_f16mf4x6_m(vbool64_t mask, float16_t *base, vfloat16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e16_v_f16mf4x6_m(mask, base, v_tuple, vl); +void test_vsseg6e16_v_f16mf4x6_m(vbool64_t vm, float16_t *rs1, vfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsseg6e16_v_f16mf4x6_m(vm, rs1, vs3, vl); } -void test_vsseg6e16_v_f16mf2x6_m(vbool32_t mask, float16_t *base, vfloat16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e16_v_f16mf2x6_m(mask, base, v_tuple, vl); +void test_vsseg6e16_v_f16mf2x6_m(vbool32_t vm, float16_t *rs1, vfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsseg6e16_v_f16mf2x6_m(vm, rs1, vs3, vl); } -void test_vsseg6e16_v_f16m1x6_m(vbool16_t mask, float16_t *base, vfloat16m1x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e16_v_f16m1x6_m(mask, base, v_tuple, vl); +void test_vsseg6e16_v_f16m1x6_m(vbool16_t vm, float16_t *rs1, vfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e16_v_f16m1x6_m(vm, rs1, vs3, vl); } -void test_vsseg6e16_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e16_v_i16mf4x6_m(mask, base, v_tuple, vl); +void test_vsseg6e16_v_i16mf4x6_m(vbool64_t vm, int16_t *rs1, vint16mf4x6_t vs3, size_t vl) { + return __riscv_vsseg6e16_v_i16mf4x6_m(vm, rs1, vs3, vl); } -void test_vsseg6e16_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e16_v_i16mf2x6_m(mask, base, v_tuple, vl); +void test_vsseg6e16_v_i16mf2x6_m(vbool32_t vm, int16_t *rs1, vint16mf2x6_t vs3, size_t vl) { + return __riscv_vsseg6e16_v_i16mf2x6_m(vm, rs1, vs3, vl); } -void test_vsseg6e16_v_i16m1x6_m(vbool16_t mask, int16_t *base, vint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e16_v_i16m1x6_m(mask, base, v_tuple, vl); +void test_vsseg6e16_v_i16m1x6_m(vbool16_t vm, int16_t *rs1, vint16m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e16_v_i16m1x6_m(vm, rs1, vs3, vl); } -void test_vsseg6e16_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e16_v_u16mf4x6_m(mask, base, v_tuple, vl); +void test_vsseg6e16_v_u16mf4x6_m(vbool64_t vm, uint16_t *rs1, vuint16mf4x6_t vs3, size_t vl) { + return __riscv_vsseg6e16_v_u16mf4x6_m(vm, rs1, vs3, vl); } -void test_vsseg6e16_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e16_v_u16mf2x6_m(mask, base, v_tuple, vl); +void test_vsseg6e16_v_u16mf2x6_m(vbool32_t vm, uint16_t *rs1, vuint16mf2x6_t vs3, size_t vl) { + return __riscv_vsseg6e16_v_u16mf2x6_m(vm, rs1, vs3, vl); } -void test_vsseg6e16_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e16_v_u16m1x6_m(mask, base, v_tuple, vl); +void test_vsseg6e16_v_u16m1x6_m(vbool16_t vm, uint16_t *rs1, vuint16m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e16_v_u16m1x6_m(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg6e16\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-api-tests/vsseg6e32.c b/auto-generated/gnu-api-tests/vsseg6e32.c index dbe0688c7..bcabc9794 100644 --- a/auto-generated/gnu-api-tests/vsseg6e32.c +++ b/auto-generated/gnu-api-tests/vsseg6e32.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg6e32_v_f32mf2x6(float32_t *base, vfloat32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e32_v_f32mf2x6(base, v_tuple, vl); +void test_vsseg6e32_v_f32mf2x6(float32_t *rs1, vfloat32mf2x6_t vs3, size_t vl) { + return __riscv_vsseg6e32_v_f32mf2x6(rs1, vs3, vl); } -void test_vsseg6e32_v_f32m1x6(float32_t *base, vfloat32m1x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e32_v_f32m1x6(base, v_tuple, vl); +void test_vsseg6e32_v_f32m1x6(float32_t *rs1, vfloat32m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e32_v_f32m1x6(rs1, vs3, vl); } -void test_vsseg6e32_v_i32mf2x6(int32_t *base, vint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e32_v_i32mf2x6(base, v_tuple, vl); +void test_vsseg6e32_v_i32mf2x6(int32_t *rs1, vint32mf2x6_t vs3, size_t vl) { + return __riscv_vsseg6e32_v_i32mf2x6(rs1, vs3, vl); } -void test_vsseg6e32_v_i32m1x6(int32_t *base, vint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e32_v_i32m1x6(base, v_tuple, vl); +void test_vsseg6e32_v_i32m1x6(int32_t *rs1, vint32m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e32_v_i32m1x6(rs1, vs3, vl); } -void test_vsseg6e32_v_u32mf2x6(uint32_t *base, vuint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e32_v_u32mf2x6(base, v_tuple, vl); +void test_vsseg6e32_v_u32mf2x6(uint32_t *rs1, vuint32mf2x6_t vs3, size_t vl) { + return __riscv_vsseg6e32_v_u32mf2x6(rs1, vs3, vl); } -void test_vsseg6e32_v_u32m1x6(uint32_t *base, vuint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e32_v_u32m1x6(base, v_tuple, vl); +void test_vsseg6e32_v_u32m1x6(uint32_t *rs1, vuint32m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e32_v_u32m1x6(rs1, vs3, vl); } -void test_vsseg6e32_v_f32mf2x6_m(vbool64_t mask, float32_t *base, vfloat32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e32_v_f32mf2x6_m(mask, base, v_tuple, vl); +void test_vsseg6e32_v_f32mf2x6_m(vbool64_t vm, float32_t *rs1, vfloat32mf2x6_t vs3, size_t vl) { + return __riscv_vsseg6e32_v_f32mf2x6_m(vm, rs1, vs3, vl); } -void test_vsseg6e32_v_f32m1x6_m(vbool32_t mask, float32_t *base, vfloat32m1x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e32_v_f32m1x6_m(mask, base, v_tuple, vl); +void test_vsseg6e32_v_f32m1x6_m(vbool32_t vm, float32_t *rs1, vfloat32m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e32_v_f32m1x6_m(vm, rs1, vs3, vl); } -void test_vsseg6e32_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e32_v_i32mf2x6_m(mask, base, v_tuple, vl); +void test_vsseg6e32_v_i32mf2x6_m(vbool64_t vm, int32_t *rs1, vint32mf2x6_t vs3, size_t vl) { + return __riscv_vsseg6e32_v_i32mf2x6_m(vm, rs1, vs3, vl); } -void test_vsseg6e32_v_i32m1x6_m(vbool32_t mask, int32_t *base, vint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e32_v_i32m1x6_m(mask, base, v_tuple, vl); +void test_vsseg6e32_v_i32m1x6_m(vbool32_t vm, int32_t *rs1, vint32m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e32_v_i32m1x6_m(vm, rs1, vs3, vl); } -void test_vsseg6e32_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e32_v_u32mf2x6_m(mask, base, v_tuple, vl); +void test_vsseg6e32_v_u32mf2x6_m(vbool64_t vm, uint32_t *rs1, vuint32mf2x6_t vs3, size_t vl) { + return __riscv_vsseg6e32_v_u32mf2x6_m(vm, rs1, vs3, vl); } -void test_vsseg6e32_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e32_v_u32m1x6_m(mask, base, v_tuple, vl); +void test_vsseg6e32_v_u32m1x6_m(vbool32_t vm, uint32_t *rs1, vuint32m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e32_v_u32m1x6_m(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg6e32\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-api-tests/vsseg6e64.c b/auto-generated/gnu-api-tests/vsseg6e64.c index 301a57e62..771bac55b 100644 --- a/auto-generated/gnu-api-tests/vsseg6e64.c +++ b/auto-generated/gnu-api-tests/vsseg6e64.c @@ -6,28 +6,28 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg6e64_v_f64m1x6(float64_t *base, vfloat64m1x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e64_v_f64m1x6(base, v_tuple, vl); +void test_vsseg6e64_v_f64m1x6(float64_t *rs1, vfloat64m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e64_v_f64m1x6(rs1, vs3, vl); } -void test_vsseg6e64_v_i64m1x6(int64_t *base, vint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e64_v_i64m1x6(base, v_tuple, vl); +void test_vsseg6e64_v_i64m1x6(int64_t *rs1, vint64m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e64_v_i64m1x6(rs1, vs3, vl); } -void test_vsseg6e64_v_u64m1x6(uint64_t *base, vuint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e64_v_u64m1x6(base, v_tuple, vl); +void test_vsseg6e64_v_u64m1x6(uint64_t *rs1, vuint64m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e64_v_u64m1x6(rs1, vs3, vl); } -void test_vsseg6e64_v_f64m1x6_m(vbool64_t mask, float64_t *base, vfloat64m1x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e64_v_f64m1x6_m(mask, base, v_tuple, vl); +void test_vsseg6e64_v_f64m1x6_m(vbool64_t vm, float64_t *rs1, vfloat64m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e64_v_f64m1x6_m(vm, rs1, vs3, vl); } -void test_vsseg6e64_v_i64m1x6_m(vbool64_t mask, int64_t *base, vint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e64_v_i64m1x6_m(mask, base, v_tuple, vl); +void test_vsseg6e64_v_i64m1x6_m(vbool64_t vm, int64_t *rs1, vint64m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e64_v_i64m1x6_m(vm, rs1, vs3, vl); } -void test_vsseg6e64_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e64_v_u64m1x6_m(mask, base, v_tuple, vl); +void test_vsseg6e64_v_u64m1x6_m(vbool64_t vm, uint64_t *rs1, vuint64m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e64_v_u64m1x6_m(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg6e64\.[ivxfswum.]+\s+} 6 } } */ diff --git a/auto-generated/gnu-api-tests/vsseg6e8.c b/auto-generated/gnu-api-tests/vsseg6e8.c index a4757d0bb..a75cccd7b 100644 --- a/auto-generated/gnu-api-tests/vsseg6e8.c +++ b/auto-generated/gnu-api-tests/vsseg6e8.c @@ -6,68 +6,68 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg6e8_v_i8mf8x6(int8_t *base, vint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e8_v_i8mf8x6(base, v_tuple, vl); +void test_vsseg6e8_v_i8mf8x6(int8_t *rs1, vint8mf8x6_t vs3, size_t vl) { + return __riscv_vsseg6e8_v_i8mf8x6(rs1, vs3, vl); } -void test_vsseg6e8_v_i8mf4x6(int8_t *base, vint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e8_v_i8mf4x6(base, v_tuple, vl); +void test_vsseg6e8_v_i8mf4x6(int8_t *rs1, vint8mf4x6_t vs3, size_t vl) { + return __riscv_vsseg6e8_v_i8mf4x6(rs1, vs3, vl); } -void test_vsseg6e8_v_i8mf2x6(int8_t *base, vint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e8_v_i8mf2x6(base, v_tuple, vl); +void test_vsseg6e8_v_i8mf2x6(int8_t *rs1, vint8mf2x6_t vs3, size_t vl) { + return __riscv_vsseg6e8_v_i8mf2x6(rs1, vs3, vl); } -void test_vsseg6e8_v_i8m1x6(int8_t *base, vint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e8_v_i8m1x6(base, v_tuple, vl); +void test_vsseg6e8_v_i8m1x6(int8_t *rs1, vint8m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e8_v_i8m1x6(rs1, vs3, vl); } -void test_vsseg6e8_v_u8mf8x6(uint8_t *base, vuint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e8_v_u8mf8x6(base, v_tuple, vl); +void test_vsseg6e8_v_u8mf8x6(uint8_t *rs1, vuint8mf8x6_t vs3, size_t vl) { + return __riscv_vsseg6e8_v_u8mf8x6(rs1, vs3, vl); } -void test_vsseg6e8_v_u8mf4x6(uint8_t *base, vuint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e8_v_u8mf4x6(base, v_tuple, vl); +void test_vsseg6e8_v_u8mf4x6(uint8_t *rs1, vuint8mf4x6_t vs3, size_t vl) { + return __riscv_vsseg6e8_v_u8mf4x6(rs1, vs3, vl); } -void test_vsseg6e8_v_u8mf2x6(uint8_t *base, vuint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e8_v_u8mf2x6(base, v_tuple, vl); +void test_vsseg6e8_v_u8mf2x6(uint8_t *rs1, vuint8mf2x6_t vs3, size_t vl) { + return __riscv_vsseg6e8_v_u8mf2x6(rs1, vs3, vl); } -void test_vsseg6e8_v_u8m1x6(uint8_t *base, vuint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e8_v_u8m1x6(base, v_tuple, vl); +void test_vsseg6e8_v_u8m1x6(uint8_t *rs1, vuint8m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e8_v_u8m1x6(rs1, vs3, vl); } -void test_vsseg6e8_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e8_v_i8mf8x6_m(mask, base, v_tuple, vl); +void test_vsseg6e8_v_i8mf8x6_m(vbool64_t vm, int8_t *rs1, vint8mf8x6_t vs3, size_t vl) { + return __riscv_vsseg6e8_v_i8mf8x6_m(vm, rs1, vs3, vl); } -void test_vsseg6e8_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e8_v_i8mf4x6_m(mask, base, v_tuple, vl); +void test_vsseg6e8_v_i8mf4x6_m(vbool32_t vm, int8_t *rs1, vint8mf4x6_t vs3, size_t vl) { + return __riscv_vsseg6e8_v_i8mf4x6_m(vm, rs1, vs3, vl); } -void test_vsseg6e8_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e8_v_i8mf2x6_m(mask, base, v_tuple, vl); +void test_vsseg6e8_v_i8mf2x6_m(vbool16_t vm, int8_t *rs1, vint8mf2x6_t vs3, size_t vl) { + return __riscv_vsseg6e8_v_i8mf2x6_m(vm, rs1, vs3, vl); } -void test_vsseg6e8_v_i8m1x6_m(vbool8_t mask, int8_t *base, vint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e8_v_i8m1x6_m(mask, base, v_tuple, vl); +void test_vsseg6e8_v_i8m1x6_m(vbool8_t vm, int8_t *rs1, vint8m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e8_v_i8m1x6_m(vm, rs1, vs3, vl); } -void test_vsseg6e8_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e8_v_u8mf8x6_m(mask, base, v_tuple, vl); +void test_vsseg6e8_v_u8mf8x6_m(vbool64_t vm, uint8_t *rs1, vuint8mf8x6_t vs3, size_t vl) { + return __riscv_vsseg6e8_v_u8mf8x6_m(vm, rs1, vs3, vl); } -void test_vsseg6e8_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e8_v_u8mf4x6_m(mask, base, v_tuple, vl); +void test_vsseg6e8_v_u8mf4x6_m(vbool32_t vm, uint8_t *rs1, vuint8mf4x6_t vs3, size_t vl) { + return __riscv_vsseg6e8_v_u8mf4x6_m(vm, rs1, vs3, vl); } -void test_vsseg6e8_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e8_v_u8mf2x6_m(mask, base, v_tuple, vl); +void test_vsseg6e8_v_u8mf2x6_m(vbool16_t vm, uint8_t *rs1, vuint8mf2x6_t vs3, size_t vl) { + return __riscv_vsseg6e8_v_u8mf2x6_m(vm, rs1, vs3, vl); } -void test_vsseg6e8_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e8_v_u8m1x6_m(mask, base, v_tuple, vl); +void test_vsseg6e8_v_u8m1x6_m(vbool8_t vm, uint8_t *rs1, vuint8m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e8_v_u8m1x6_m(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg6e8\.[ivxfswum.]+\s+} 16 } } */ diff --git a/auto-generated/gnu-api-tests/vsseg7e16.c b/auto-generated/gnu-api-tests/vsseg7e16.c index 7e99993af..e7357bf6e 100644 --- a/auto-generated/gnu-api-tests/vsseg7e16.c +++ b/auto-generated/gnu-api-tests/vsseg7e16.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg7e16_v_f16mf4x7(float16_t *base, vfloat16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e16_v_f16mf4x7(base, v_tuple, vl); +void test_vsseg7e16_v_f16mf4x7(float16_t *rs1, vfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsseg7e16_v_f16mf4x7(rs1, vs3, vl); } -void test_vsseg7e16_v_f16mf2x7(float16_t *base, vfloat16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e16_v_f16mf2x7(base, v_tuple, vl); +void test_vsseg7e16_v_f16mf2x7(float16_t *rs1, vfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsseg7e16_v_f16mf2x7(rs1, vs3, vl); } -void test_vsseg7e16_v_f16m1x7(float16_t *base, vfloat16m1x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e16_v_f16m1x7(base, v_tuple, vl); +void test_vsseg7e16_v_f16m1x7(float16_t *rs1, vfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e16_v_f16m1x7(rs1, vs3, vl); } -void test_vsseg7e16_v_i16mf4x7(int16_t *base, vint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e16_v_i16mf4x7(base, v_tuple, vl); +void test_vsseg7e16_v_i16mf4x7(int16_t *rs1, vint16mf4x7_t vs3, size_t vl) { + return __riscv_vsseg7e16_v_i16mf4x7(rs1, vs3, vl); } -void test_vsseg7e16_v_i16mf2x7(int16_t *base, vint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e16_v_i16mf2x7(base, v_tuple, vl); +void test_vsseg7e16_v_i16mf2x7(int16_t *rs1, vint16mf2x7_t vs3, size_t vl) { + return __riscv_vsseg7e16_v_i16mf2x7(rs1, vs3, vl); } -void test_vsseg7e16_v_i16m1x7(int16_t *base, vint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e16_v_i16m1x7(base, v_tuple, vl); +void test_vsseg7e16_v_i16m1x7(int16_t *rs1, vint16m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e16_v_i16m1x7(rs1, vs3, vl); } -void test_vsseg7e16_v_u16mf4x7(uint16_t *base, vuint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e16_v_u16mf4x7(base, v_tuple, vl); +void test_vsseg7e16_v_u16mf4x7(uint16_t *rs1, vuint16mf4x7_t vs3, size_t vl) { + return __riscv_vsseg7e16_v_u16mf4x7(rs1, vs3, vl); } -void test_vsseg7e16_v_u16mf2x7(uint16_t *base, vuint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e16_v_u16mf2x7(base, v_tuple, vl); +void test_vsseg7e16_v_u16mf2x7(uint16_t *rs1, vuint16mf2x7_t vs3, size_t vl) { + return __riscv_vsseg7e16_v_u16mf2x7(rs1, vs3, vl); } -void test_vsseg7e16_v_u16m1x7(uint16_t *base, vuint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e16_v_u16m1x7(base, v_tuple, vl); +void test_vsseg7e16_v_u16m1x7(uint16_t *rs1, vuint16m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e16_v_u16m1x7(rs1, vs3, vl); } -void test_vsseg7e16_v_f16mf4x7_m(vbool64_t mask, float16_t *base, vfloat16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e16_v_f16mf4x7_m(mask, base, v_tuple, vl); +void test_vsseg7e16_v_f16mf4x7_m(vbool64_t vm, float16_t *rs1, vfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsseg7e16_v_f16mf4x7_m(vm, rs1, vs3, vl); } -void test_vsseg7e16_v_f16mf2x7_m(vbool32_t mask, float16_t *base, vfloat16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e16_v_f16mf2x7_m(mask, base, v_tuple, vl); +void test_vsseg7e16_v_f16mf2x7_m(vbool32_t vm, float16_t *rs1, vfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsseg7e16_v_f16mf2x7_m(vm, rs1, vs3, vl); } -void test_vsseg7e16_v_f16m1x7_m(vbool16_t mask, float16_t *base, vfloat16m1x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e16_v_f16m1x7_m(mask, base, v_tuple, vl); +void test_vsseg7e16_v_f16m1x7_m(vbool16_t vm, float16_t *rs1, vfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e16_v_f16m1x7_m(vm, rs1, vs3, vl); } -void test_vsseg7e16_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e16_v_i16mf4x7_m(mask, base, v_tuple, vl); +void test_vsseg7e16_v_i16mf4x7_m(vbool64_t vm, int16_t *rs1, vint16mf4x7_t vs3, size_t vl) { + return __riscv_vsseg7e16_v_i16mf4x7_m(vm, rs1, vs3, vl); } -void test_vsseg7e16_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e16_v_i16mf2x7_m(mask, base, v_tuple, vl); +void test_vsseg7e16_v_i16mf2x7_m(vbool32_t vm, int16_t *rs1, vint16mf2x7_t vs3, size_t vl) { + return __riscv_vsseg7e16_v_i16mf2x7_m(vm, rs1, vs3, vl); } -void test_vsseg7e16_v_i16m1x7_m(vbool16_t mask, int16_t *base, vint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e16_v_i16m1x7_m(mask, base, v_tuple, vl); +void test_vsseg7e16_v_i16m1x7_m(vbool16_t vm, int16_t *rs1, vint16m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e16_v_i16m1x7_m(vm, rs1, vs3, vl); } -void test_vsseg7e16_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e16_v_u16mf4x7_m(mask, base, v_tuple, vl); +void test_vsseg7e16_v_u16mf4x7_m(vbool64_t vm, uint16_t *rs1, vuint16mf4x7_t vs3, size_t vl) { + return __riscv_vsseg7e16_v_u16mf4x7_m(vm, rs1, vs3, vl); } -void test_vsseg7e16_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e16_v_u16mf2x7_m(mask, base, v_tuple, vl); +void test_vsseg7e16_v_u16mf2x7_m(vbool32_t vm, uint16_t *rs1, vuint16mf2x7_t vs3, size_t vl) { + return __riscv_vsseg7e16_v_u16mf2x7_m(vm, rs1, vs3, vl); } -void test_vsseg7e16_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e16_v_u16m1x7_m(mask, base, v_tuple, vl); +void test_vsseg7e16_v_u16m1x7_m(vbool16_t vm, uint16_t *rs1, vuint16m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e16_v_u16m1x7_m(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg7e16\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-api-tests/vsseg7e32.c b/auto-generated/gnu-api-tests/vsseg7e32.c index 83deb5be9..3f8448662 100644 --- a/auto-generated/gnu-api-tests/vsseg7e32.c +++ b/auto-generated/gnu-api-tests/vsseg7e32.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg7e32_v_f32mf2x7(float32_t *base, vfloat32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e32_v_f32mf2x7(base, v_tuple, vl); +void test_vsseg7e32_v_f32mf2x7(float32_t *rs1, vfloat32mf2x7_t vs3, size_t vl) { + return __riscv_vsseg7e32_v_f32mf2x7(rs1, vs3, vl); } -void test_vsseg7e32_v_f32m1x7(float32_t *base, vfloat32m1x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e32_v_f32m1x7(base, v_tuple, vl); +void test_vsseg7e32_v_f32m1x7(float32_t *rs1, vfloat32m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e32_v_f32m1x7(rs1, vs3, vl); } -void test_vsseg7e32_v_i32mf2x7(int32_t *base, vint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e32_v_i32mf2x7(base, v_tuple, vl); +void test_vsseg7e32_v_i32mf2x7(int32_t *rs1, vint32mf2x7_t vs3, size_t vl) { + return __riscv_vsseg7e32_v_i32mf2x7(rs1, vs3, vl); } -void test_vsseg7e32_v_i32m1x7(int32_t *base, vint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e32_v_i32m1x7(base, v_tuple, vl); +void test_vsseg7e32_v_i32m1x7(int32_t *rs1, vint32m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e32_v_i32m1x7(rs1, vs3, vl); } -void test_vsseg7e32_v_u32mf2x7(uint32_t *base, vuint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e32_v_u32mf2x7(base, v_tuple, vl); +void test_vsseg7e32_v_u32mf2x7(uint32_t *rs1, vuint32mf2x7_t vs3, size_t vl) { + return __riscv_vsseg7e32_v_u32mf2x7(rs1, vs3, vl); } -void test_vsseg7e32_v_u32m1x7(uint32_t *base, vuint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e32_v_u32m1x7(base, v_tuple, vl); +void test_vsseg7e32_v_u32m1x7(uint32_t *rs1, vuint32m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e32_v_u32m1x7(rs1, vs3, vl); } -void test_vsseg7e32_v_f32mf2x7_m(vbool64_t mask, float32_t *base, vfloat32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e32_v_f32mf2x7_m(mask, base, v_tuple, vl); +void test_vsseg7e32_v_f32mf2x7_m(vbool64_t vm, float32_t *rs1, vfloat32mf2x7_t vs3, size_t vl) { + return __riscv_vsseg7e32_v_f32mf2x7_m(vm, rs1, vs3, vl); } -void test_vsseg7e32_v_f32m1x7_m(vbool32_t mask, float32_t *base, vfloat32m1x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e32_v_f32m1x7_m(mask, base, v_tuple, vl); +void test_vsseg7e32_v_f32m1x7_m(vbool32_t vm, float32_t *rs1, vfloat32m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e32_v_f32m1x7_m(vm, rs1, vs3, vl); } -void test_vsseg7e32_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e32_v_i32mf2x7_m(mask, base, v_tuple, vl); +void test_vsseg7e32_v_i32mf2x7_m(vbool64_t vm, int32_t *rs1, vint32mf2x7_t vs3, size_t vl) { + return __riscv_vsseg7e32_v_i32mf2x7_m(vm, rs1, vs3, vl); } -void test_vsseg7e32_v_i32m1x7_m(vbool32_t mask, int32_t *base, vint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e32_v_i32m1x7_m(mask, base, v_tuple, vl); +void test_vsseg7e32_v_i32m1x7_m(vbool32_t vm, int32_t *rs1, vint32m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e32_v_i32m1x7_m(vm, rs1, vs3, vl); } -void test_vsseg7e32_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e32_v_u32mf2x7_m(mask, base, v_tuple, vl); +void test_vsseg7e32_v_u32mf2x7_m(vbool64_t vm, uint32_t *rs1, vuint32mf2x7_t vs3, size_t vl) { + return __riscv_vsseg7e32_v_u32mf2x7_m(vm, rs1, vs3, vl); } -void test_vsseg7e32_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e32_v_u32m1x7_m(mask, base, v_tuple, vl); +void test_vsseg7e32_v_u32m1x7_m(vbool32_t vm, uint32_t *rs1, vuint32m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e32_v_u32m1x7_m(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg7e32\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-api-tests/vsseg7e64.c b/auto-generated/gnu-api-tests/vsseg7e64.c index 01c2bb37b..f4a0b37db 100644 --- a/auto-generated/gnu-api-tests/vsseg7e64.c +++ b/auto-generated/gnu-api-tests/vsseg7e64.c @@ -6,28 +6,28 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg7e64_v_f64m1x7(float64_t *base, vfloat64m1x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e64_v_f64m1x7(base, v_tuple, vl); +void test_vsseg7e64_v_f64m1x7(float64_t *rs1, vfloat64m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e64_v_f64m1x7(rs1, vs3, vl); } -void test_vsseg7e64_v_i64m1x7(int64_t *base, vint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e64_v_i64m1x7(base, v_tuple, vl); +void test_vsseg7e64_v_i64m1x7(int64_t *rs1, vint64m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e64_v_i64m1x7(rs1, vs3, vl); } -void test_vsseg7e64_v_u64m1x7(uint64_t *base, vuint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e64_v_u64m1x7(base, v_tuple, vl); +void test_vsseg7e64_v_u64m1x7(uint64_t *rs1, vuint64m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e64_v_u64m1x7(rs1, vs3, vl); } -void test_vsseg7e64_v_f64m1x7_m(vbool64_t mask, float64_t *base, vfloat64m1x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e64_v_f64m1x7_m(mask, base, v_tuple, vl); +void test_vsseg7e64_v_f64m1x7_m(vbool64_t vm, float64_t *rs1, vfloat64m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e64_v_f64m1x7_m(vm, rs1, vs3, vl); } -void test_vsseg7e64_v_i64m1x7_m(vbool64_t mask, int64_t *base, vint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e64_v_i64m1x7_m(mask, base, v_tuple, vl); +void test_vsseg7e64_v_i64m1x7_m(vbool64_t vm, int64_t *rs1, vint64m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e64_v_i64m1x7_m(vm, rs1, vs3, vl); } -void test_vsseg7e64_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e64_v_u64m1x7_m(mask, base, v_tuple, vl); +void test_vsseg7e64_v_u64m1x7_m(vbool64_t vm, uint64_t *rs1, vuint64m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e64_v_u64m1x7_m(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg7e64\.[ivxfswum.]+\s+} 6 } } */ diff --git a/auto-generated/gnu-api-tests/vsseg7e8.c b/auto-generated/gnu-api-tests/vsseg7e8.c index 2ef5e8801..f19eb1875 100644 --- a/auto-generated/gnu-api-tests/vsseg7e8.c +++ b/auto-generated/gnu-api-tests/vsseg7e8.c @@ -6,68 +6,68 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg7e8_v_i8mf8x7(int8_t *base, vint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e8_v_i8mf8x7(base, v_tuple, vl); +void test_vsseg7e8_v_i8mf8x7(int8_t *rs1, vint8mf8x7_t vs3, size_t vl) { + return __riscv_vsseg7e8_v_i8mf8x7(rs1, vs3, vl); } -void test_vsseg7e8_v_i8mf4x7(int8_t *base, vint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e8_v_i8mf4x7(base, v_tuple, vl); +void test_vsseg7e8_v_i8mf4x7(int8_t *rs1, vint8mf4x7_t vs3, size_t vl) { + return __riscv_vsseg7e8_v_i8mf4x7(rs1, vs3, vl); } -void test_vsseg7e8_v_i8mf2x7(int8_t *base, vint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e8_v_i8mf2x7(base, v_tuple, vl); +void test_vsseg7e8_v_i8mf2x7(int8_t *rs1, vint8mf2x7_t vs3, size_t vl) { + return __riscv_vsseg7e8_v_i8mf2x7(rs1, vs3, vl); } -void test_vsseg7e8_v_i8m1x7(int8_t *base, vint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e8_v_i8m1x7(base, v_tuple, vl); +void test_vsseg7e8_v_i8m1x7(int8_t *rs1, vint8m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e8_v_i8m1x7(rs1, vs3, vl); } -void test_vsseg7e8_v_u8mf8x7(uint8_t *base, vuint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e8_v_u8mf8x7(base, v_tuple, vl); +void test_vsseg7e8_v_u8mf8x7(uint8_t *rs1, vuint8mf8x7_t vs3, size_t vl) { + return __riscv_vsseg7e8_v_u8mf8x7(rs1, vs3, vl); } -void test_vsseg7e8_v_u8mf4x7(uint8_t *base, vuint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e8_v_u8mf4x7(base, v_tuple, vl); +void test_vsseg7e8_v_u8mf4x7(uint8_t *rs1, vuint8mf4x7_t vs3, size_t vl) { + return __riscv_vsseg7e8_v_u8mf4x7(rs1, vs3, vl); } -void test_vsseg7e8_v_u8mf2x7(uint8_t *base, vuint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e8_v_u8mf2x7(base, v_tuple, vl); +void test_vsseg7e8_v_u8mf2x7(uint8_t *rs1, vuint8mf2x7_t vs3, size_t vl) { + return __riscv_vsseg7e8_v_u8mf2x7(rs1, vs3, vl); } -void test_vsseg7e8_v_u8m1x7(uint8_t *base, vuint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e8_v_u8m1x7(base, v_tuple, vl); +void test_vsseg7e8_v_u8m1x7(uint8_t *rs1, vuint8m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e8_v_u8m1x7(rs1, vs3, vl); } -void test_vsseg7e8_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e8_v_i8mf8x7_m(mask, base, v_tuple, vl); +void test_vsseg7e8_v_i8mf8x7_m(vbool64_t vm, int8_t *rs1, vint8mf8x7_t vs3, size_t vl) { + return __riscv_vsseg7e8_v_i8mf8x7_m(vm, rs1, vs3, vl); } -void test_vsseg7e8_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e8_v_i8mf4x7_m(mask, base, v_tuple, vl); +void test_vsseg7e8_v_i8mf4x7_m(vbool32_t vm, int8_t *rs1, vint8mf4x7_t vs3, size_t vl) { + return __riscv_vsseg7e8_v_i8mf4x7_m(vm, rs1, vs3, vl); } -void test_vsseg7e8_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e8_v_i8mf2x7_m(mask, base, v_tuple, vl); +void test_vsseg7e8_v_i8mf2x7_m(vbool16_t vm, int8_t *rs1, vint8mf2x7_t vs3, size_t vl) { + return __riscv_vsseg7e8_v_i8mf2x7_m(vm, rs1, vs3, vl); } -void test_vsseg7e8_v_i8m1x7_m(vbool8_t mask, int8_t *base, vint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e8_v_i8m1x7_m(mask, base, v_tuple, vl); +void test_vsseg7e8_v_i8m1x7_m(vbool8_t vm, int8_t *rs1, vint8m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e8_v_i8m1x7_m(vm, rs1, vs3, vl); } -void test_vsseg7e8_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e8_v_u8mf8x7_m(mask, base, v_tuple, vl); +void test_vsseg7e8_v_u8mf8x7_m(vbool64_t vm, uint8_t *rs1, vuint8mf8x7_t vs3, size_t vl) { + return __riscv_vsseg7e8_v_u8mf8x7_m(vm, rs1, vs3, vl); } -void test_vsseg7e8_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e8_v_u8mf4x7_m(mask, base, v_tuple, vl); +void test_vsseg7e8_v_u8mf4x7_m(vbool32_t vm, uint8_t *rs1, vuint8mf4x7_t vs3, size_t vl) { + return __riscv_vsseg7e8_v_u8mf4x7_m(vm, rs1, vs3, vl); } -void test_vsseg7e8_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e8_v_u8mf2x7_m(mask, base, v_tuple, vl); +void test_vsseg7e8_v_u8mf2x7_m(vbool16_t vm, uint8_t *rs1, vuint8mf2x7_t vs3, size_t vl) { + return __riscv_vsseg7e8_v_u8mf2x7_m(vm, rs1, vs3, vl); } -void test_vsseg7e8_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e8_v_u8m1x7_m(mask, base, v_tuple, vl); +void test_vsseg7e8_v_u8m1x7_m(vbool8_t vm, uint8_t *rs1, vuint8m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e8_v_u8m1x7_m(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg7e8\.[ivxfswum.]+\s+} 16 } } */ diff --git a/auto-generated/gnu-api-tests/vsseg8e16.c b/auto-generated/gnu-api-tests/vsseg8e16.c index 6df5bdbd1..70b2ac3a0 100644 --- a/auto-generated/gnu-api-tests/vsseg8e16.c +++ b/auto-generated/gnu-api-tests/vsseg8e16.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg8e16_v_f16mf4x8(float16_t *base, vfloat16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e16_v_f16mf4x8(base, v_tuple, vl); +void test_vsseg8e16_v_f16mf4x8(float16_t *rs1, vfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsseg8e16_v_f16mf4x8(rs1, vs3, vl); } -void test_vsseg8e16_v_f16mf2x8(float16_t *base, vfloat16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e16_v_f16mf2x8(base, v_tuple, vl); +void test_vsseg8e16_v_f16mf2x8(float16_t *rs1, vfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsseg8e16_v_f16mf2x8(rs1, vs3, vl); } -void test_vsseg8e16_v_f16m1x8(float16_t *base, vfloat16m1x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e16_v_f16m1x8(base, v_tuple, vl); +void test_vsseg8e16_v_f16m1x8(float16_t *rs1, vfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e16_v_f16m1x8(rs1, vs3, vl); } -void test_vsseg8e16_v_i16mf4x8(int16_t *base, vint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e16_v_i16mf4x8(base, v_tuple, vl); +void test_vsseg8e16_v_i16mf4x8(int16_t *rs1, vint16mf4x8_t vs3, size_t vl) { + return __riscv_vsseg8e16_v_i16mf4x8(rs1, vs3, vl); } -void test_vsseg8e16_v_i16mf2x8(int16_t *base, vint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e16_v_i16mf2x8(base, v_tuple, vl); +void test_vsseg8e16_v_i16mf2x8(int16_t *rs1, vint16mf2x8_t vs3, size_t vl) { + return __riscv_vsseg8e16_v_i16mf2x8(rs1, vs3, vl); } -void test_vsseg8e16_v_i16m1x8(int16_t *base, vint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e16_v_i16m1x8(base, v_tuple, vl); +void test_vsseg8e16_v_i16m1x8(int16_t *rs1, vint16m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e16_v_i16m1x8(rs1, vs3, vl); } -void test_vsseg8e16_v_u16mf4x8(uint16_t *base, vuint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e16_v_u16mf4x8(base, v_tuple, vl); +void test_vsseg8e16_v_u16mf4x8(uint16_t *rs1, vuint16mf4x8_t vs3, size_t vl) { + return __riscv_vsseg8e16_v_u16mf4x8(rs1, vs3, vl); } -void test_vsseg8e16_v_u16mf2x8(uint16_t *base, vuint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e16_v_u16mf2x8(base, v_tuple, vl); +void test_vsseg8e16_v_u16mf2x8(uint16_t *rs1, vuint16mf2x8_t vs3, size_t vl) { + return __riscv_vsseg8e16_v_u16mf2x8(rs1, vs3, vl); } -void test_vsseg8e16_v_u16m1x8(uint16_t *base, vuint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e16_v_u16m1x8(base, v_tuple, vl); +void test_vsseg8e16_v_u16m1x8(uint16_t *rs1, vuint16m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e16_v_u16m1x8(rs1, vs3, vl); } -void test_vsseg8e16_v_f16mf4x8_m(vbool64_t mask, float16_t *base, vfloat16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e16_v_f16mf4x8_m(mask, base, v_tuple, vl); +void test_vsseg8e16_v_f16mf4x8_m(vbool64_t vm, float16_t *rs1, vfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsseg8e16_v_f16mf4x8_m(vm, rs1, vs3, vl); } -void test_vsseg8e16_v_f16mf2x8_m(vbool32_t mask, float16_t *base, vfloat16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e16_v_f16mf2x8_m(mask, base, v_tuple, vl); +void test_vsseg8e16_v_f16mf2x8_m(vbool32_t vm, float16_t *rs1, vfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsseg8e16_v_f16mf2x8_m(vm, rs1, vs3, vl); } -void test_vsseg8e16_v_f16m1x8_m(vbool16_t mask, float16_t *base, vfloat16m1x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e16_v_f16m1x8_m(mask, base, v_tuple, vl); +void test_vsseg8e16_v_f16m1x8_m(vbool16_t vm, float16_t *rs1, vfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e16_v_f16m1x8_m(vm, rs1, vs3, vl); } -void test_vsseg8e16_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e16_v_i16mf4x8_m(mask, base, v_tuple, vl); +void test_vsseg8e16_v_i16mf4x8_m(vbool64_t vm, int16_t *rs1, vint16mf4x8_t vs3, size_t vl) { + return __riscv_vsseg8e16_v_i16mf4x8_m(vm, rs1, vs3, vl); } -void test_vsseg8e16_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e16_v_i16mf2x8_m(mask, base, v_tuple, vl); +void test_vsseg8e16_v_i16mf2x8_m(vbool32_t vm, int16_t *rs1, vint16mf2x8_t vs3, size_t vl) { + return __riscv_vsseg8e16_v_i16mf2x8_m(vm, rs1, vs3, vl); } -void test_vsseg8e16_v_i16m1x8_m(vbool16_t mask, int16_t *base, vint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e16_v_i16m1x8_m(mask, base, v_tuple, vl); +void test_vsseg8e16_v_i16m1x8_m(vbool16_t vm, int16_t *rs1, vint16m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e16_v_i16m1x8_m(vm, rs1, vs3, vl); } -void test_vsseg8e16_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e16_v_u16mf4x8_m(mask, base, v_tuple, vl); +void test_vsseg8e16_v_u16mf4x8_m(vbool64_t vm, uint16_t *rs1, vuint16mf4x8_t vs3, size_t vl) { + return __riscv_vsseg8e16_v_u16mf4x8_m(vm, rs1, vs3, vl); } -void test_vsseg8e16_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e16_v_u16mf2x8_m(mask, base, v_tuple, vl); +void test_vsseg8e16_v_u16mf2x8_m(vbool32_t vm, uint16_t *rs1, vuint16mf2x8_t vs3, size_t vl) { + return __riscv_vsseg8e16_v_u16mf2x8_m(vm, rs1, vs3, vl); } -void test_vsseg8e16_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e16_v_u16m1x8_m(mask, base, v_tuple, vl); +void test_vsseg8e16_v_u16m1x8_m(vbool16_t vm, uint16_t *rs1, vuint16m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e16_v_u16m1x8_m(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg8e16\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-api-tests/vsseg8e32.c b/auto-generated/gnu-api-tests/vsseg8e32.c index cd99ec2a7..d0fabd59f 100644 --- a/auto-generated/gnu-api-tests/vsseg8e32.c +++ b/auto-generated/gnu-api-tests/vsseg8e32.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg8e32_v_f32mf2x8(float32_t *base, vfloat32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e32_v_f32mf2x8(base, v_tuple, vl); +void test_vsseg8e32_v_f32mf2x8(float32_t *rs1, vfloat32mf2x8_t vs3, size_t vl) { + return __riscv_vsseg8e32_v_f32mf2x8(rs1, vs3, vl); } -void test_vsseg8e32_v_f32m1x8(float32_t *base, vfloat32m1x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e32_v_f32m1x8(base, v_tuple, vl); +void test_vsseg8e32_v_f32m1x8(float32_t *rs1, vfloat32m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e32_v_f32m1x8(rs1, vs3, vl); } -void test_vsseg8e32_v_i32mf2x8(int32_t *base, vint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e32_v_i32mf2x8(base, v_tuple, vl); +void test_vsseg8e32_v_i32mf2x8(int32_t *rs1, vint32mf2x8_t vs3, size_t vl) { + return __riscv_vsseg8e32_v_i32mf2x8(rs1, vs3, vl); } -void test_vsseg8e32_v_i32m1x8(int32_t *base, vint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e32_v_i32m1x8(base, v_tuple, vl); +void test_vsseg8e32_v_i32m1x8(int32_t *rs1, vint32m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e32_v_i32m1x8(rs1, vs3, vl); } -void test_vsseg8e32_v_u32mf2x8(uint32_t *base, vuint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e32_v_u32mf2x8(base, v_tuple, vl); +void test_vsseg8e32_v_u32mf2x8(uint32_t *rs1, vuint32mf2x8_t vs3, size_t vl) { + return __riscv_vsseg8e32_v_u32mf2x8(rs1, vs3, vl); } -void test_vsseg8e32_v_u32m1x8(uint32_t *base, vuint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e32_v_u32m1x8(base, v_tuple, vl); +void test_vsseg8e32_v_u32m1x8(uint32_t *rs1, vuint32m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e32_v_u32m1x8(rs1, vs3, vl); } -void test_vsseg8e32_v_f32mf2x8_m(vbool64_t mask, float32_t *base, vfloat32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e32_v_f32mf2x8_m(mask, base, v_tuple, vl); +void test_vsseg8e32_v_f32mf2x8_m(vbool64_t vm, float32_t *rs1, vfloat32mf2x8_t vs3, size_t vl) { + return __riscv_vsseg8e32_v_f32mf2x8_m(vm, rs1, vs3, vl); } -void test_vsseg8e32_v_f32m1x8_m(vbool32_t mask, float32_t *base, vfloat32m1x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e32_v_f32m1x8_m(mask, base, v_tuple, vl); +void test_vsseg8e32_v_f32m1x8_m(vbool32_t vm, float32_t *rs1, vfloat32m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e32_v_f32m1x8_m(vm, rs1, vs3, vl); } -void test_vsseg8e32_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e32_v_i32mf2x8_m(mask, base, v_tuple, vl); +void test_vsseg8e32_v_i32mf2x8_m(vbool64_t vm, int32_t *rs1, vint32mf2x8_t vs3, size_t vl) { + return __riscv_vsseg8e32_v_i32mf2x8_m(vm, rs1, vs3, vl); } -void test_vsseg8e32_v_i32m1x8_m(vbool32_t mask, int32_t *base, vint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e32_v_i32m1x8_m(mask, base, v_tuple, vl); +void test_vsseg8e32_v_i32m1x8_m(vbool32_t vm, int32_t *rs1, vint32m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e32_v_i32m1x8_m(vm, rs1, vs3, vl); } -void test_vsseg8e32_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e32_v_u32mf2x8_m(mask, base, v_tuple, vl); +void test_vsseg8e32_v_u32mf2x8_m(vbool64_t vm, uint32_t *rs1, vuint32mf2x8_t vs3, size_t vl) { + return __riscv_vsseg8e32_v_u32mf2x8_m(vm, rs1, vs3, vl); } -void test_vsseg8e32_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e32_v_u32m1x8_m(mask, base, v_tuple, vl); +void test_vsseg8e32_v_u32m1x8_m(vbool32_t vm, uint32_t *rs1, vuint32m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e32_v_u32m1x8_m(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg8e32\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-api-tests/vsseg8e64.c b/auto-generated/gnu-api-tests/vsseg8e64.c index a846edb44..47b430cd0 100644 --- a/auto-generated/gnu-api-tests/vsseg8e64.c +++ b/auto-generated/gnu-api-tests/vsseg8e64.c @@ -6,28 +6,28 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg8e64_v_f64m1x8(float64_t *base, vfloat64m1x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e64_v_f64m1x8(base, v_tuple, vl); +void test_vsseg8e64_v_f64m1x8(float64_t *rs1, vfloat64m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e64_v_f64m1x8(rs1, vs3, vl); } -void test_vsseg8e64_v_i64m1x8(int64_t *base, vint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e64_v_i64m1x8(base, v_tuple, vl); +void test_vsseg8e64_v_i64m1x8(int64_t *rs1, vint64m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e64_v_i64m1x8(rs1, vs3, vl); } -void test_vsseg8e64_v_u64m1x8(uint64_t *base, vuint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e64_v_u64m1x8(base, v_tuple, vl); +void test_vsseg8e64_v_u64m1x8(uint64_t *rs1, vuint64m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e64_v_u64m1x8(rs1, vs3, vl); } -void test_vsseg8e64_v_f64m1x8_m(vbool64_t mask, float64_t *base, vfloat64m1x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e64_v_f64m1x8_m(mask, base, v_tuple, vl); +void test_vsseg8e64_v_f64m1x8_m(vbool64_t vm, float64_t *rs1, vfloat64m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e64_v_f64m1x8_m(vm, rs1, vs3, vl); } -void test_vsseg8e64_v_i64m1x8_m(vbool64_t mask, int64_t *base, vint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e64_v_i64m1x8_m(mask, base, v_tuple, vl); +void test_vsseg8e64_v_i64m1x8_m(vbool64_t vm, int64_t *rs1, vint64m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e64_v_i64m1x8_m(vm, rs1, vs3, vl); } -void test_vsseg8e64_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e64_v_u64m1x8_m(mask, base, v_tuple, vl); +void test_vsseg8e64_v_u64m1x8_m(vbool64_t vm, uint64_t *rs1, vuint64m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e64_v_u64m1x8_m(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg8e64\.[ivxfswum.]+\s+} 6 } } */ diff --git a/auto-generated/gnu-api-tests/vsseg8e8.c b/auto-generated/gnu-api-tests/vsseg8e8.c index 10506ff39..397df4941 100644 --- a/auto-generated/gnu-api-tests/vsseg8e8.c +++ b/auto-generated/gnu-api-tests/vsseg8e8.c @@ -6,68 +6,68 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg8e8_v_i8mf8x8(int8_t *base, vint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e8_v_i8mf8x8(base, v_tuple, vl); +void test_vsseg8e8_v_i8mf8x8(int8_t *rs1, vint8mf8x8_t vs3, size_t vl) { + return __riscv_vsseg8e8_v_i8mf8x8(rs1, vs3, vl); } -void test_vsseg8e8_v_i8mf4x8(int8_t *base, vint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e8_v_i8mf4x8(base, v_tuple, vl); +void test_vsseg8e8_v_i8mf4x8(int8_t *rs1, vint8mf4x8_t vs3, size_t vl) { + return __riscv_vsseg8e8_v_i8mf4x8(rs1, vs3, vl); } -void test_vsseg8e8_v_i8mf2x8(int8_t *base, vint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e8_v_i8mf2x8(base, v_tuple, vl); +void test_vsseg8e8_v_i8mf2x8(int8_t *rs1, vint8mf2x8_t vs3, size_t vl) { + return __riscv_vsseg8e8_v_i8mf2x8(rs1, vs3, vl); } -void test_vsseg8e8_v_i8m1x8(int8_t *base, vint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e8_v_i8m1x8(base, v_tuple, vl); +void test_vsseg8e8_v_i8m1x8(int8_t *rs1, vint8m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e8_v_i8m1x8(rs1, vs3, vl); } -void test_vsseg8e8_v_u8mf8x8(uint8_t *base, vuint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e8_v_u8mf8x8(base, v_tuple, vl); +void test_vsseg8e8_v_u8mf8x8(uint8_t *rs1, vuint8mf8x8_t vs3, size_t vl) { + return __riscv_vsseg8e8_v_u8mf8x8(rs1, vs3, vl); } -void test_vsseg8e8_v_u8mf4x8(uint8_t *base, vuint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e8_v_u8mf4x8(base, v_tuple, vl); +void test_vsseg8e8_v_u8mf4x8(uint8_t *rs1, vuint8mf4x8_t vs3, size_t vl) { + return __riscv_vsseg8e8_v_u8mf4x8(rs1, vs3, vl); } -void test_vsseg8e8_v_u8mf2x8(uint8_t *base, vuint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e8_v_u8mf2x8(base, v_tuple, vl); +void test_vsseg8e8_v_u8mf2x8(uint8_t *rs1, vuint8mf2x8_t vs3, size_t vl) { + return __riscv_vsseg8e8_v_u8mf2x8(rs1, vs3, vl); } -void test_vsseg8e8_v_u8m1x8(uint8_t *base, vuint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e8_v_u8m1x8(base, v_tuple, vl); +void test_vsseg8e8_v_u8m1x8(uint8_t *rs1, vuint8m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e8_v_u8m1x8(rs1, vs3, vl); } -void test_vsseg8e8_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e8_v_i8mf8x8_m(mask, base, v_tuple, vl); +void test_vsseg8e8_v_i8mf8x8_m(vbool64_t vm, int8_t *rs1, vint8mf8x8_t vs3, size_t vl) { + return __riscv_vsseg8e8_v_i8mf8x8_m(vm, rs1, vs3, vl); } -void test_vsseg8e8_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e8_v_i8mf4x8_m(mask, base, v_tuple, vl); +void test_vsseg8e8_v_i8mf4x8_m(vbool32_t vm, int8_t *rs1, vint8mf4x8_t vs3, size_t vl) { + return __riscv_vsseg8e8_v_i8mf4x8_m(vm, rs1, vs3, vl); } -void test_vsseg8e8_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e8_v_i8mf2x8_m(mask, base, v_tuple, vl); +void test_vsseg8e8_v_i8mf2x8_m(vbool16_t vm, int8_t *rs1, vint8mf2x8_t vs3, size_t vl) { + return __riscv_vsseg8e8_v_i8mf2x8_m(vm, rs1, vs3, vl); } -void test_vsseg8e8_v_i8m1x8_m(vbool8_t mask, int8_t *base, vint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e8_v_i8m1x8_m(mask, base, v_tuple, vl); +void test_vsseg8e8_v_i8m1x8_m(vbool8_t vm, int8_t *rs1, vint8m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e8_v_i8m1x8_m(vm, rs1, vs3, vl); } -void test_vsseg8e8_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e8_v_u8mf8x8_m(mask, base, v_tuple, vl); +void test_vsseg8e8_v_u8mf8x8_m(vbool64_t vm, uint8_t *rs1, vuint8mf8x8_t vs3, size_t vl) { + return __riscv_vsseg8e8_v_u8mf8x8_m(vm, rs1, vs3, vl); } -void test_vsseg8e8_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e8_v_u8mf4x8_m(mask, base, v_tuple, vl); +void test_vsseg8e8_v_u8mf4x8_m(vbool32_t vm, uint8_t *rs1, vuint8mf4x8_t vs3, size_t vl) { + return __riscv_vsseg8e8_v_u8mf4x8_m(vm, rs1, vs3, vl); } -void test_vsseg8e8_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e8_v_u8mf2x8_m(mask, base, v_tuple, vl); +void test_vsseg8e8_v_u8mf2x8_m(vbool16_t vm, uint8_t *rs1, vuint8mf2x8_t vs3, size_t vl) { + return __riscv_vsseg8e8_v_u8mf2x8_m(vm, rs1, vs3, vl); } -void test_vsseg8e8_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e8_v_u8m1x8_m(mask, base, v_tuple, vl); +void test_vsseg8e8_v_u8m1x8_m(vbool8_t vm, uint8_t *rs1, vuint8m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e8_v_u8m1x8_m(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg8e8\.[ivxfswum.]+\s+} 16 } } */ diff --git a/auto-generated/gnu-api-tests/vssra.c b/auto-generated/gnu-api-tests/vssra.c index ca7543c60..f2ee29fac 100644 --- a/auto-generated/gnu-api-tests/vssra.c +++ b/auto-generated/gnu-api-tests/vssra.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vssra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssra_vv_i8mf8(op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vssra_vv_i8mf8(vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vssra_vv_i8mf8(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vssra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8mf8(op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vssra_vx_i8mf8(vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i8mf8(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vssra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssra_vv_i8mf4(op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vssra_vv_i8mf4(vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vssra_vv_i8mf4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vssra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8mf4(op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vssra_vx_i8mf4(vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i8mf4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vssra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssra_vv_i8mf2(op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vssra_vv_i8mf2(vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vssra_vv_i8mf2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vssra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8mf2(op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vssra_vx_i8mf2(vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i8mf2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vssra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssra_vv_i8m1(op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vssra_vv_i8m1(vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vssra_vv_i8m1(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vssra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m1(op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vssra_vx_i8m1(vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i8m1(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vssra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssra_vv_i8m2(op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vssra_vv_i8m2(vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vssra_vv_i8m2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vssra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m2(op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vssra_vx_i8m2(vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i8m2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vssra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssra_vv_i8m4(op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vssra_vv_i8m4(vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vssra_vv_i8m4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vssra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m4(op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vssra_vx_i8m4(vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i8m4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vssra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssra_vv_i8m8(op1, shift, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vssra_vv_i8m8(vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vssra_vv_i8m8(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vssra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m8(op1, shift, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vssra_vx_i8m8(vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i8m8(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vssra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssra_vv_i16mf4(op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vssra_vv_i16mf4(vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vssra_vv_i16mf4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vssra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16mf4(op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vssra_vx_i16mf4(vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i16mf4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vssra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssra_vv_i16mf2(op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vssra_vv_i16mf2(vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vssra_vv_i16mf2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vssra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16mf2(op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vssra_vx_i16mf2(vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i16mf2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vssra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssra_vv_i16m1(op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vssra_vv_i16m1(vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vssra_vv_i16m1(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vssra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m1(op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vssra_vx_i16m1(vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i16m1(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vssra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssra_vv_i16m2(op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vssra_vv_i16m2(vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vssra_vv_i16m2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vssra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m2(op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vssra_vx_i16m2(vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i16m2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vssra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssra_vv_i16m4(op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vssra_vv_i16m4(vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vssra_vv_i16m4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vssra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m4(op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vssra_vx_i16m4(vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i16m4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vssra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssra_vv_i16m8(op1, shift, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vssra_vv_i16m8(vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vssra_vv_i16m8(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vssra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m8(op1, shift, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vssra_vx_i16m8(vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i16m8(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vssra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssra_vv_i32mf2(op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vssra_vv_i32mf2(vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vssra_vv_i32mf2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vssra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32mf2(op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vssra_vx_i32mf2(vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i32mf2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vssra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssra_vv_i32m1(op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vssra_vv_i32m1(vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vssra_vv_i32m1(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vssra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m1(op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vssra_vx_i32m1(vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i32m1(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vssra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssra_vv_i32m2(op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vssra_vv_i32m2(vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vssra_vv_i32m2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vssra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m2(op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vssra_vx_i32m2(vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i32m2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vssra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssra_vv_i32m4(op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vssra_vv_i32m4(vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vssra_vv_i32m4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vssra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m4(op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vssra_vx_i32m4(vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i32m4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vssra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssra_vv_i32m8(op1, shift, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vssra_vv_i32m8(vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vssra_vv_i32m8(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vssra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m8(op1, shift, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vssra_vx_i32m8(vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i32m8(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vssra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssra_vv_i64m1(op1, shift, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vssra_vv_i64m1(vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vssra_vv_i64m1(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vssra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m1(op1, shift, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vssra_vx_i64m1(vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i64m1(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vssra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssra_vv_i64m2(op1, shift, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vssra_vv_i64m2(vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vssra_vv_i64m2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vssra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m2(op1, shift, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vssra_vx_i64m2(vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i64m2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vssra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssra_vv_i64m4(op1, shift, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vssra_vv_i64m4(vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vssra_vv_i64m4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vssra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m4(op1, shift, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vssra_vx_i64m4(vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i64m4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vssra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssra_vv_i64m8(op1, shift, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vssra_vv_i64m8(vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vssra_vv_i64m8(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vssra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m8(op1, shift, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vssra_vx_i64m8(vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i64m8(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vssra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssra_vv_i8mf8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vssra_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vssra_vv_i8mf8_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vssra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8mf8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vssra_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i8mf8_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vssra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssra_vv_i8mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vssra_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vssra_vv_i8mf4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vssra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vssra_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i8mf4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vssra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssra_vv_i8mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vssra_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vssra_vv_i8mf2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vssra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vssra_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i8mf2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vssra_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssra_vv_i8m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vssra_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vssra_vv_i8m1_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vssra_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vssra_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i8m1_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vssra_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssra_vv_i8m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vssra_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vssra_vv_i8m2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vssra_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vssra_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i8m2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vssra_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssra_vv_i8m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vssra_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vssra_vv_i8m4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vssra_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vssra_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i8m4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vssra_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssra_vv_i8m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vssra_vv_i8m8_m(vbool1_t vm, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vssra_vv_i8m8_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vssra_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vssra_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i8m8_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vssra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssra_vv_i16mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vssra_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vssra_vv_i16mf4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vssra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vssra_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i16mf4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vssra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssra_vv_i16mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vssra_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vssra_vv_i16mf2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vssra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vssra_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i16mf2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vssra_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssra_vv_i16m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vssra_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vssra_vv_i16m1_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vssra_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vssra_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i16m1_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vssra_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssra_vv_i16m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vssra_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vssra_vv_i16m2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vssra_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vssra_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i16m2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vssra_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssra_vv_i16m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vssra_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vssra_vv_i16m4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vssra_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vssra_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i16m4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vssra_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssra_vv_i16m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vssra_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vssra_vv_i16m8_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vssra_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vssra_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i16m8_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vssra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssra_vv_i32mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vssra_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vssra_vv_i32mf2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vssra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vssra_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i32mf2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vssra_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssra_vv_i32m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vssra_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vssra_vv_i32m1_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vssra_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vssra_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i32m1_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vssra_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssra_vv_i32m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vssra_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vssra_vv_i32m2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vssra_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vssra_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i32m2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vssra_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssra_vv_i32m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vssra_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vssra_vv_i32m4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vssra_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vssra_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i32m4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vssra_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssra_vv_i32m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vssra_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vssra_vv_i32m8_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vssra_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vssra_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i32m8_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vssra_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssra_vv_i64m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vssra_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vssra_vv_i64m1_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vssra_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vssra_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i64m1_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vssra_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssra_vv_i64m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vssra_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vssra_vv_i64m2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vssra_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vssra_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i64m2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vssra_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssra_vv_i64m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vssra_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vssra_vv_i64m4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vssra_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vssra_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i64m4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vssra_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssra_vv_i64m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vssra_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vssra_vv_i64m8_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vssra_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vssra_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i64m8_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssra\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-api-tests/vssrl.c b/auto-generated/gnu-api-tests/vssrl.c index a220622bc..920c91082 100644 --- a/auto-generated/gnu-api-tests/vssrl.c +++ b/auto-generated/gnu-api-tests/vssrl.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vssrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssrl_vv_u8mf8(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vssrl_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vssrl_vv_u8mf8(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vssrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8mf8(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vssrl_vx_u8mf8(vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u8mf8(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vssrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssrl_vv_u8mf4(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vssrl_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vssrl_vv_u8mf4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vssrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8mf4(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vssrl_vx_u8mf4(vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u8mf4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vssrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssrl_vv_u8mf2(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vssrl_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vssrl_vv_u8mf2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vssrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8mf2(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vssrl_vx_u8mf2(vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u8mf2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vssrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m1(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vssrl_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vssrl_vv_u8m1(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vssrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m1(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vssrl_vx_u8m1(vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u8m1(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vssrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m2(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vssrl_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vssrl_vv_u8m2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vssrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m2(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vssrl_vx_u8m2(vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u8m2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vssrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m4(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vssrl_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vssrl_vv_u8m4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vssrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m4(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vssrl_vx_u8m4(vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u8m4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vssrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m8(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vssrl_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vssrl_vv_u8m8(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vssrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m8(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vssrl_vx_u8m8(vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u8m8(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vssrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssrl_vv_u16mf4(op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vssrl_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vssrl_vv_u16mf4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vssrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16mf4(op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vssrl_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u16mf4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vssrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssrl_vv_u16mf2(op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vssrl_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vssrl_vv_u16mf2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vssrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16mf2(op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vssrl_vx_u16mf2(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u16mf2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vssrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m1(op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vssrl_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vssrl_vv_u16m1(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vssrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m1(op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vssrl_vx_u16m1(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u16m1(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vssrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m2(op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vssrl_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vssrl_vv_u16m2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vssrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m2(op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vssrl_vx_u16m2(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u16m2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vssrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m4(op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vssrl_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vssrl_vv_u16m4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vssrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m4(op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vssrl_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u16m4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vssrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m8(op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vssrl_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vssrl_vv_u16m8(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vssrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m8(op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vssrl_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u16m8(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vssrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssrl_vv_u32mf2(op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vssrl_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vssrl_vv_u32mf2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vssrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32mf2(op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vssrl_vx_u32mf2(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u32mf2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vssrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m1(op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vssrl_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vssrl_vv_u32m1(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vssrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m1(op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vssrl_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u32m1(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vssrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m2(op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vssrl_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vssrl_vv_u32m2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vssrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m2(op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vssrl_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u32m2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vssrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m4(op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vssrl_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vssrl_vv_u32m4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vssrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m4(op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vssrl_vx_u32m4(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u32m4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vssrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m8(op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vssrl_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vssrl_vv_u32m8(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vssrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m8(op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vssrl_vx_u32m8(vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u32m8(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vssrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m1(op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vssrl_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vssrl_vv_u64m1(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vssrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m1(op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vssrl_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u64m1(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vssrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m2(op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vssrl_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vssrl_vv_u64m2(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vssrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m2(op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vssrl_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u64m2(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vssrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m4(op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vssrl_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vssrl_vv_u64m4(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vssrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m4(op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vssrl_vx_u64m4(vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u64m4(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vssrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m8(op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vssrl_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vssrl_vv_u64m8(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vssrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m8(op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vssrl_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u64m8(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vssrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssrl_vv_u8mf8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vssrl_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vssrl_vv_u8mf8_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vssrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8mf8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vssrl_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u8mf8_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vssrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssrl_vv_u8mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vssrl_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vssrl_vv_u8mf4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vssrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vssrl_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u8mf4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vssrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssrl_vv_u8mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vssrl_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vssrl_vv_u8mf2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vssrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vssrl_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u8mf2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vssrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vssrl_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vssrl_vv_u8m1_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vssrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vssrl_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u8m1_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vssrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vssrl_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vssrl_vv_u8m2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vssrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vssrl_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u8m2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vssrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vssrl_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vssrl_vv_u8m4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vssrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vssrl_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u8m4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vssrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vssrl_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vssrl_vv_u8m8_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vssrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vssrl_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u8m8_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vssrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssrl_vv_u16mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vssrl_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vssrl_vv_u16mf4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vssrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16mf4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vssrl_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u16mf4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vssrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssrl_vv_u16mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vssrl_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vssrl_vv_u16mf2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vssrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vssrl_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u16mf2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vssrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vssrl_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vssrl_vv_u16m1_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vssrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vssrl_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u16m1_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vssrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vssrl_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vssrl_vv_u16m2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vssrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vssrl_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u16m2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vssrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vssrl_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vssrl_vv_u16m4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vssrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vssrl_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u16m4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vssrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vssrl_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vssrl_vv_u16m8_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vssrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vssrl_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u16m8_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vssrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssrl_vv_u32mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vssrl_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vssrl_vv_u32mf2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vssrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32mf2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vssrl_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u32mf2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vssrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vssrl_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vssrl_vv_u32m1_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vssrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vssrl_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u32m1_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vssrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vssrl_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vssrl_vv_u32m2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vssrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vssrl_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u32m2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vssrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vssrl_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vssrl_vv_u32m4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vssrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vssrl_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u32m4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vssrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vssrl_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vssrl_vv_u32m8_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vssrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vssrl_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u32m8_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vssrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vssrl_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vssrl_vv_u64m1_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vssrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m1_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vssrl_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u64m1_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vssrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vssrl_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vssrl_vv_u64m2_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vssrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m2_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vssrl_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u64m2_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vssrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vssrl_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vssrl_vv_u64m4_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vssrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m4_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vssrl_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u64m4_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vssrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vssrl_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vssrl_vv_u64m8_m(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vssrl_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m8_m(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vssrl_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u64m8_m(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssrl\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-api-tests/vssseg2e16.c b/auto-generated/gnu-api-tests/vssseg2e16.c index 9052baf34..bb9b7f8a7 100644 --- a/auto-generated/gnu-api-tests/vssseg2e16.c +++ b/auto-generated/gnu-api-tests/vssseg2e16.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg2e16_v_f16mf4x2(float16_t *base, ptrdiff_t bstride, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16_v_f16mf4x2(base, bstride, v_tuple, vl); +void test_vssseg2e16_v_f16mf4x2(float16_t *rs1, ptrdiff_t rs2, vfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vssseg2e16_v_f16mf4x2(rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_f16mf2x2(float16_t *base, ptrdiff_t bstride, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16_v_f16mf2x2(base, bstride, v_tuple, vl); +void test_vssseg2e16_v_f16mf2x2(float16_t *rs1, ptrdiff_t rs2, vfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vssseg2e16_v_f16mf2x2(rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_f16m1x2(float16_t *base, ptrdiff_t bstride, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16_v_f16m1x2(base, bstride, v_tuple, vl); +void test_vssseg2e16_v_f16m1x2(float16_t *rs1, ptrdiff_t rs2, vfloat16m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e16_v_f16m1x2(rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_f16m2x2(float16_t *base, ptrdiff_t bstride, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16_v_f16m2x2(base, bstride, v_tuple, vl); +void test_vssseg2e16_v_f16m2x2(float16_t *rs1, ptrdiff_t rs2, vfloat16m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e16_v_f16m2x2(rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_f16m4x2(float16_t *base, ptrdiff_t bstride, vfloat16m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16_v_f16m4x2(base, bstride, v_tuple, vl); +void test_vssseg2e16_v_f16m4x2(float16_t *rs1, ptrdiff_t rs2, vfloat16m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e16_v_f16m4x2(rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_i16mf4x2(int16_t *base, ptrdiff_t bstride, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16_v_i16mf4x2(base, bstride, v_tuple, vl); +void test_vssseg2e16_v_i16mf4x2(int16_t *rs1, ptrdiff_t rs2, vint16mf4x2_t vs3, size_t vl) { + return __riscv_vssseg2e16_v_i16mf4x2(rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_i16mf2x2(int16_t *base, ptrdiff_t bstride, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16_v_i16mf2x2(base, bstride, v_tuple, vl); +void test_vssseg2e16_v_i16mf2x2(int16_t *rs1, ptrdiff_t rs2, vint16mf2x2_t vs3, size_t vl) { + return __riscv_vssseg2e16_v_i16mf2x2(rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_i16m1x2(int16_t *base, ptrdiff_t bstride, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16_v_i16m1x2(base, bstride, v_tuple, vl); +void test_vssseg2e16_v_i16m1x2(int16_t *rs1, ptrdiff_t rs2, vint16m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e16_v_i16m1x2(rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_i16m2x2(int16_t *base, ptrdiff_t bstride, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16_v_i16m2x2(base, bstride, v_tuple, vl); +void test_vssseg2e16_v_i16m2x2(int16_t *rs1, ptrdiff_t rs2, vint16m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e16_v_i16m2x2(rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_i16m4x2(int16_t *base, ptrdiff_t bstride, vint16m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16_v_i16m4x2(base, bstride, v_tuple, vl); +void test_vssseg2e16_v_i16m4x2(int16_t *rs1, ptrdiff_t rs2, vint16m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e16_v_i16m4x2(rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_u16mf4x2(uint16_t *base, ptrdiff_t bstride, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16_v_u16mf4x2(base, bstride, v_tuple, vl); +void test_vssseg2e16_v_u16mf4x2(uint16_t *rs1, ptrdiff_t rs2, vuint16mf4x2_t vs3, size_t vl) { + return __riscv_vssseg2e16_v_u16mf4x2(rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_u16mf2x2(uint16_t *base, ptrdiff_t bstride, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16_v_u16mf2x2(base, bstride, v_tuple, vl); +void test_vssseg2e16_v_u16mf2x2(uint16_t *rs1, ptrdiff_t rs2, vuint16mf2x2_t vs3, size_t vl) { + return __riscv_vssseg2e16_v_u16mf2x2(rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_u16m1x2(uint16_t *base, ptrdiff_t bstride, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16_v_u16m1x2(base, bstride, v_tuple, vl); +void test_vssseg2e16_v_u16m1x2(uint16_t *rs1, ptrdiff_t rs2, vuint16m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e16_v_u16m1x2(rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_u16m2x2(uint16_t *base, ptrdiff_t bstride, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16_v_u16m2x2(base, bstride, v_tuple, vl); +void test_vssseg2e16_v_u16m2x2(uint16_t *rs1, ptrdiff_t rs2, vuint16m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e16_v_u16m2x2(rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_u16m4x2(uint16_t *base, ptrdiff_t bstride, vuint16m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16_v_u16m4x2(base, bstride, v_tuple, vl); +void test_vssseg2e16_v_u16m4x2(uint16_t *rs1, ptrdiff_t rs2, vuint16m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e16_v_u16m4x2(rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_f16mf4x2_m(vbool64_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16_v_f16mf4x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e16_v_f16mf4x2_m(vbool64_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vssseg2e16_v_f16mf4x2_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_f16mf2x2_m(vbool32_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16_v_f16mf2x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e16_v_f16mf2x2_m(vbool32_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vssseg2e16_v_f16mf2x2_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_f16m1x2_m(vbool16_t mask, float16_t *base, ptrdiff_t bstride, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16_v_f16m1x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e16_v_f16m1x2_m(vbool16_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e16_v_f16m1x2_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_f16m2x2_m(vbool8_t mask, float16_t *base, ptrdiff_t bstride, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16_v_f16m2x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e16_v_f16m2x2_m(vbool8_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e16_v_f16m2x2_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_f16m4x2_m(vbool4_t mask, float16_t *base, ptrdiff_t bstride, vfloat16m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16_v_f16m4x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e16_v_f16m4x2_m(vbool4_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e16_v_f16m4x2_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_i16mf4x2_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16_v_i16mf4x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e16_v_i16mf4x2_m(vbool64_t vm, int16_t *rs1, ptrdiff_t rs2, vint16mf4x2_t vs3, size_t vl) { + return __riscv_vssseg2e16_v_i16mf4x2_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_i16mf2x2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16_v_i16mf2x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e16_v_i16mf2x2_m(vbool32_t vm, int16_t *rs1, ptrdiff_t rs2, vint16mf2x2_t vs3, size_t vl) { + return __riscv_vssseg2e16_v_i16mf2x2_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_i16m1x2_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16_v_i16m1x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e16_v_i16m1x2_m(vbool16_t vm, int16_t *rs1, ptrdiff_t rs2, vint16m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e16_v_i16m1x2_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_i16m2x2_m(vbool8_t mask, int16_t *base, ptrdiff_t bstride, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16_v_i16m2x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e16_v_i16m2x2_m(vbool8_t vm, int16_t *rs1, ptrdiff_t rs2, vint16m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e16_v_i16m2x2_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_i16m4x2_m(vbool4_t mask, int16_t *base, ptrdiff_t bstride, vint16m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16_v_i16m4x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e16_v_i16m4x2_m(vbool4_t vm, int16_t *rs1, ptrdiff_t rs2, vint16m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e16_v_i16m4x2_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16_v_u16mf4x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e16_v_u16mf4x2_m(vbool64_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16mf4x2_t vs3, size_t vl) { + return __riscv_vssseg2e16_v_u16mf4x2_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16_v_u16mf2x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e16_v_u16mf2x2_m(vbool32_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16mf2x2_t vs3, size_t vl) { + return __riscv_vssseg2e16_v_u16mf2x2_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_u16m1x2_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16_v_u16m1x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e16_v_u16m1x2_m(vbool16_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e16_v_u16m1x2_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_u16m2x2_m(vbool8_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16_v_u16m2x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e16_v_u16m2x2_m(vbool8_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e16_v_u16m2x2_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_u16m4x2_m(vbool4_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16_v_u16m4x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e16_v_u16m4x2_m(vbool4_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e16_v_u16m4x2_m(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg2e16\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/gnu-api-tests/vssseg2e32.c b/auto-generated/gnu-api-tests/vssseg2e32.c index 3bc49d8ce..b1d2a3286 100644 --- a/auto-generated/gnu-api-tests/vssseg2e32.c +++ b/auto-generated/gnu-api-tests/vssseg2e32.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg2e32_v_f32mf2x2(float32_t *base, ptrdiff_t bstride, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_f32mf2x2(base, bstride, v_tuple, vl); +void test_vssseg2e32_v_f32mf2x2(float32_t *rs1, ptrdiff_t rs2, vfloat32mf2x2_t vs3, size_t vl) { + return __riscv_vssseg2e32_v_f32mf2x2(rs1, rs2, vs3, vl); } -void test_vssseg2e32_v_f32m1x2(float32_t *base, ptrdiff_t bstride, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_f32m1x2(base, bstride, v_tuple, vl); +void test_vssseg2e32_v_f32m1x2(float32_t *rs1, ptrdiff_t rs2, vfloat32m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e32_v_f32m1x2(rs1, rs2, vs3, vl); } -void test_vssseg2e32_v_f32m2x2(float32_t *base, ptrdiff_t bstride, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_f32m2x2(base, bstride, v_tuple, vl); +void test_vssseg2e32_v_f32m2x2(float32_t *rs1, ptrdiff_t rs2, vfloat32m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e32_v_f32m2x2(rs1, rs2, vs3, vl); } -void test_vssseg2e32_v_f32m4x2(float32_t *base, ptrdiff_t bstride, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_f32m4x2(base, bstride, v_tuple, vl); +void test_vssseg2e32_v_f32m4x2(float32_t *rs1, ptrdiff_t rs2, vfloat32m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e32_v_f32m4x2(rs1, rs2, vs3, vl); } -void test_vssseg2e32_v_i32mf2x2(int32_t *base, ptrdiff_t bstride, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_i32mf2x2(base, bstride, v_tuple, vl); +void test_vssseg2e32_v_i32mf2x2(int32_t *rs1, ptrdiff_t rs2, vint32mf2x2_t vs3, size_t vl) { + return __riscv_vssseg2e32_v_i32mf2x2(rs1, rs2, vs3, vl); } -void test_vssseg2e32_v_i32m1x2(int32_t *base, ptrdiff_t bstride, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_i32m1x2(base, bstride, v_tuple, vl); +void test_vssseg2e32_v_i32m1x2(int32_t *rs1, ptrdiff_t rs2, vint32m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e32_v_i32m1x2(rs1, rs2, vs3, vl); } -void test_vssseg2e32_v_i32m2x2(int32_t *base, ptrdiff_t bstride, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_i32m2x2(base, bstride, v_tuple, vl); +void test_vssseg2e32_v_i32m2x2(int32_t *rs1, ptrdiff_t rs2, vint32m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e32_v_i32m2x2(rs1, rs2, vs3, vl); } -void test_vssseg2e32_v_i32m4x2(int32_t *base, ptrdiff_t bstride, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_i32m4x2(base, bstride, v_tuple, vl); +void test_vssseg2e32_v_i32m4x2(int32_t *rs1, ptrdiff_t rs2, vint32m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e32_v_i32m4x2(rs1, rs2, vs3, vl); } -void test_vssseg2e32_v_u32mf2x2(uint32_t *base, ptrdiff_t bstride, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_u32mf2x2(base, bstride, v_tuple, vl); +void test_vssseg2e32_v_u32mf2x2(uint32_t *rs1, ptrdiff_t rs2, vuint32mf2x2_t vs3, size_t vl) { + return __riscv_vssseg2e32_v_u32mf2x2(rs1, rs2, vs3, vl); } -void test_vssseg2e32_v_u32m1x2(uint32_t *base, ptrdiff_t bstride, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_u32m1x2(base, bstride, v_tuple, vl); +void test_vssseg2e32_v_u32m1x2(uint32_t *rs1, ptrdiff_t rs2, vuint32m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e32_v_u32m1x2(rs1, rs2, vs3, vl); } -void test_vssseg2e32_v_u32m2x2(uint32_t *base, ptrdiff_t bstride, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_u32m2x2(base, bstride, v_tuple, vl); +void test_vssseg2e32_v_u32m2x2(uint32_t *rs1, ptrdiff_t rs2, vuint32m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e32_v_u32m2x2(rs1, rs2, vs3, vl); } -void test_vssseg2e32_v_u32m4x2(uint32_t *base, ptrdiff_t bstride, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_u32m4x2(base, bstride, v_tuple, vl); +void test_vssseg2e32_v_u32m4x2(uint32_t *rs1, ptrdiff_t rs2, vuint32m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e32_v_u32m4x2(rs1, rs2, vs3, vl); } -void test_vssseg2e32_v_f32mf2x2_m(vbool64_t mask, float32_t *base, ptrdiff_t bstride, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_f32mf2x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e32_v_f32mf2x2_m(vbool64_t vm, float32_t *rs1, ptrdiff_t rs2, vfloat32mf2x2_t vs3, size_t vl) { + return __riscv_vssseg2e32_v_f32mf2x2_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e32_v_f32m1x2_m(vbool32_t mask, float32_t *base, ptrdiff_t bstride, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_f32m1x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e32_v_f32m1x2_m(vbool32_t vm, float32_t *rs1, ptrdiff_t rs2, vfloat32m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e32_v_f32m1x2_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e32_v_f32m2x2_m(vbool16_t mask, float32_t *base, ptrdiff_t bstride, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_f32m2x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e32_v_f32m2x2_m(vbool16_t vm, float32_t *rs1, ptrdiff_t rs2, vfloat32m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e32_v_f32m2x2_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e32_v_f32m4x2_m(vbool8_t mask, float32_t *base, ptrdiff_t bstride, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_f32m4x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e32_v_f32m4x2_m(vbool8_t vm, float32_t *rs1, ptrdiff_t rs2, vfloat32m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e32_v_f32m4x2_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e32_v_i32mf2x2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_i32mf2x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e32_v_i32mf2x2_m(vbool64_t vm, int32_t *rs1, ptrdiff_t rs2, vint32mf2x2_t vs3, size_t vl) { + return __riscv_vssseg2e32_v_i32mf2x2_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e32_v_i32m1x2_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_i32m1x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e32_v_i32m1x2_m(vbool32_t vm, int32_t *rs1, ptrdiff_t rs2, vint32m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e32_v_i32m1x2_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e32_v_i32m2x2_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_i32m2x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e32_v_i32m2x2_m(vbool16_t vm, int32_t *rs1, ptrdiff_t rs2, vint32m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e32_v_i32m2x2_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e32_v_i32m4x2_m(vbool8_t mask, int32_t *base, ptrdiff_t bstride, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_i32m4x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e32_v_i32m4x2_m(vbool8_t vm, int32_t *rs1, ptrdiff_t rs2, vint32m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e32_v_i32m4x2_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e32_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_u32mf2x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e32_v_u32mf2x2_m(vbool64_t vm, uint32_t *rs1, ptrdiff_t rs2, vuint32mf2x2_t vs3, size_t vl) { + return __riscv_vssseg2e32_v_u32mf2x2_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e32_v_u32m1x2_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_u32m1x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e32_v_u32m1x2_m(vbool32_t vm, uint32_t *rs1, ptrdiff_t rs2, vuint32m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e32_v_u32m1x2_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e32_v_u32m2x2_m(vbool16_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_u32m2x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e32_v_u32m2x2_m(vbool16_t vm, uint32_t *rs1, ptrdiff_t rs2, vuint32m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e32_v_u32m2x2_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e32_v_u32m4x2_m(vbool8_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32_v_u32m4x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e32_v_u32m4x2_m(vbool8_t vm, uint32_t *rs1, ptrdiff_t rs2, vuint32m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e32_v_u32m4x2_m(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg2e32\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/gnu-api-tests/vssseg2e64.c b/auto-generated/gnu-api-tests/vssseg2e64.c index 53aa10f51..4f2bded00 100644 --- a/auto-generated/gnu-api-tests/vssseg2e64.c +++ b/auto-generated/gnu-api-tests/vssseg2e64.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg2e64_v_f64m1x2(float64_t *base, ptrdiff_t bstride, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e64_v_f64m1x2(base, bstride, v_tuple, vl); +void test_vssseg2e64_v_f64m1x2(float64_t *rs1, ptrdiff_t rs2, vfloat64m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e64_v_f64m1x2(rs1, rs2, vs3, vl); } -void test_vssseg2e64_v_f64m2x2(float64_t *base, ptrdiff_t bstride, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e64_v_f64m2x2(base, bstride, v_tuple, vl); +void test_vssseg2e64_v_f64m2x2(float64_t *rs1, ptrdiff_t rs2, vfloat64m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e64_v_f64m2x2(rs1, rs2, vs3, vl); } -void test_vssseg2e64_v_f64m4x2(float64_t *base, ptrdiff_t bstride, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e64_v_f64m4x2(base, bstride, v_tuple, vl); +void test_vssseg2e64_v_f64m4x2(float64_t *rs1, ptrdiff_t rs2, vfloat64m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e64_v_f64m4x2(rs1, rs2, vs3, vl); } -void test_vssseg2e64_v_i64m1x2(int64_t *base, ptrdiff_t bstride, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e64_v_i64m1x2(base, bstride, v_tuple, vl); +void test_vssseg2e64_v_i64m1x2(int64_t *rs1, ptrdiff_t rs2, vint64m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e64_v_i64m1x2(rs1, rs2, vs3, vl); } -void test_vssseg2e64_v_i64m2x2(int64_t *base, ptrdiff_t bstride, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e64_v_i64m2x2(base, bstride, v_tuple, vl); +void test_vssseg2e64_v_i64m2x2(int64_t *rs1, ptrdiff_t rs2, vint64m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e64_v_i64m2x2(rs1, rs2, vs3, vl); } -void test_vssseg2e64_v_i64m4x2(int64_t *base, ptrdiff_t bstride, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e64_v_i64m4x2(base, bstride, v_tuple, vl); +void test_vssseg2e64_v_i64m4x2(int64_t *rs1, ptrdiff_t rs2, vint64m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e64_v_i64m4x2(rs1, rs2, vs3, vl); } -void test_vssseg2e64_v_u64m1x2(uint64_t *base, ptrdiff_t bstride, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e64_v_u64m1x2(base, bstride, v_tuple, vl); +void test_vssseg2e64_v_u64m1x2(uint64_t *rs1, ptrdiff_t rs2, vuint64m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e64_v_u64m1x2(rs1, rs2, vs3, vl); } -void test_vssseg2e64_v_u64m2x2(uint64_t *base, ptrdiff_t bstride, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e64_v_u64m2x2(base, bstride, v_tuple, vl); +void test_vssseg2e64_v_u64m2x2(uint64_t *rs1, ptrdiff_t rs2, vuint64m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e64_v_u64m2x2(rs1, rs2, vs3, vl); } -void test_vssseg2e64_v_u64m4x2(uint64_t *base, ptrdiff_t bstride, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e64_v_u64m4x2(base, bstride, v_tuple, vl); +void test_vssseg2e64_v_u64m4x2(uint64_t *rs1, ptrdiff_t rs2, vuint64m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e64_v_u64m4x2(rs1, rs2, vs3, vl); } -void test_vssseg2e64_v_f64m1x2_m(vbool64_t mask, float64_t *base, ptrdiff_t bstride, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e64_v_f64m1x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e64_v_f64m1x2_m(vbool64_t vm, float64_t *rs1, ptrdiff_t rs2, vfloat64m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e64_v_f64m1x2_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e64_v_f64m2x2_m(vbool32_t mask, float64_t *base, ptrdiff_t bstride, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e64_v_f64m2x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e64_v_f64m2x2_m(vbool32_t vm, float64_t *rs1, ptrdiff_t rs2, vfloat64m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e64_v_f64m2x2_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e64_v_f64m4x2_m(vbool16_t mask, float64_t *base, ptrdiff_t bstride, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e64_v_f64m4x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e64_v_f64m4x2_m(vbool16_t vm, float64_t *rs1, ptrdiff_t rs2, vfloat64m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e64_v_f64m4x2_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e64_v_i64m1x2_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e64_v_i64m1x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e64_v_i64m1x2_m(vbool64_t vm, int64_t *rs1, ptrdiff_t rs2, vint64m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e64_v_i64m1x2_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e64_v_i64m2x2_m(vbool32_t mask, int64_t *base, ptrdiff_t bstride, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e64_v_i64m2x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e64_v_i64m2x2_m(vbool32_t vm, int64_t *rs1, ptrdiff_t rs2, vint64m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e64_v_i64m2x2_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e64_v_i64m4x2_m(vbool16_t mask, int64_t *base, ptrdiff_t bstride, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e64_v_i64m4x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e64_v_i64m4x2_m(vbool16_t vm, int64_t *rs1, ptrdiff_t rs2, vint64m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e64_v_i64m4x2_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e64_v_u64m1x2_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e64_v_u64m1x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e64_v_u64m1x2_m(vbool64_t vm, uint64_t *rs1, ptrdiff_t rs2, vuint64m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e64_v_u64m1x2_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e64_v_u64m2x2_m(vbool32_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e64_v_u64m2x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e64_v_u64m2x2_m(vbool32_t vm, uint64_t *rs1, ptrdiff_t rs2, vuint64m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e64_v_u64m2x2_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e64_v_u64m4x2_m(vbool16_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e64_v_u64m4x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e64_v_u64m4x2_m(vbool16_t vm, uint64_t *rs1, ptrdiff_t rs2, vuint64m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e64_v_u64m4x2_m(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg2e64\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-api-tests/vssseg2e8.c b/auto-generated/gnu-api-tests/vssseg2e8.c index 9023a6507..b62221695 100644 --- a/auto-generated/gnu-api-tests/vssseg2e8.c +++ b/auto-generated/gnu-api-tests/vssseg2e8.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg2e8_v_i8mf8x2(int8_t *base, ptrdiff_t bstride, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8_v_i8mf8x2(base, bstride, v_tuple, vl); +void test_vssseg2e8_v_i8mf8x2(int8_t *rs1, ptrdiff_t rs2, vint8mf8x2_t vs3, size_t vl) { + return __riscv_vssseg2e8_v_i8mf8x2(rs1, rs2, vs3, vl); } -void test_vssseg2e8_v_i8mf4x2(int8_t *base, ptrdiff_t bstride, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8_v_i8mf4x2(base, bstride, v_tuple, vl); +void test_vssseg2e8_v_i8mf4x2(int8_t *rs1, ptrdiff_t rs2, vint8mf4x2_t vs3, size_t vl) { + return __riscv_vssseg2e8_v_i8mf4x2(rs1, rs2, vs3, vl); } -void test_vssseg2e8_v_i8mf2x2(int8_t *base, ptrdiff_t bstride, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8_v_i8mf2x2(base, bstride, v_tuple, vl); +void test_vssseg2e8_v_i8mf2x2(int8_t *rs1, ptrdiff_t rs2, vint8mf2x2_t vs3, size_t vl) { + return __riscv_vssseg2e8_v_i8mf2x2(rs1, rs2, vs3, vl); } -void test_vssseg2e8_v_i8m1x2(int8_t *base, ptrdiff_t bstride, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8_v_i8m1x2(base, bstride, v_tuple, vl); +void test_vssseg2e8_v_i8m1x2(int8_t *rs1, ptrdiff_t rs2, vint8m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e8_v_i8m1x2(rs1, rs2, vs3, vl); } -void test_vssseg2e8_v_i8m2x2(int8_t *base, ptrdiff_t bstride, vint8m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8_v_i8m2x2(base, bstride, v_tuple, vl); +void test_vssseg2e8_v_i8m2x2(int8_t *rs1, ptrdiff_t rs2, vint8m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e8_v_i8m2x2(rs1, rs2, vs3, vl); } -void test_vssseg2e8_v_i8m4x2(int8_t *base, ptrdiff_t bstride, vint8m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8_v_i8m4x2(base, bstride, v_tuple, vl); +void test_vssseg2e8_v_i8m4x2(int8_t *rs1, ptrdiff_t rs2, vint8m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e8_v_i8m4x2(rs1, rs2, vs3, vl); } -void test_vssseg2e8_v_u8mf8x2(uint8_t *base, ptrdiff_t bstride, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8_v_u8mf8x2(base, bstride, v_tuple, vl); +void test_vssseg2e8_v_u8mf8x2(uint8_t *rs1, ptrdiff_t rs2, vuint8mf8x2_t vs3, size_t vl) { + return __riscv_vssseg2e8_v_u8mf8x2(rs1, rs2, vs3, vl); } -void test_vssseg2e8_v_u8mf4x2(uint8_t *base, ptrdiff_t bstride, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8_v_u8mf4x2(base, bstride, v_tuple, vl); +void test_vssseg2e8_v_u8mf4x2(uint8_t *rs1, ptrdiff_t rs2, vuint8mf4x2_t vs3, size_t vl) { + return __riscv_vssseg2e8_v_u8mf4x2(rs1, rs2, vs3, vl); } -void test_vssseg2e8_v_u8mf2x2(uint8_t *base, ptrdiff_t bstride, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8_v_u8mf2x2(base, bstride, v_tuple, vl); +void test_vssseg2e8_v_u8mf2x2(uint8_t *rs1, ptrdiff_t rs2, vuint8mf2x2_t vs3, size_t vl) { + return __riscv_vssseg2e8_v_u8mf2x2(rs1, rs2, vs3, vl); } -void test_vssseg2e8_v_u8m1x2(uint8_t *base, ptrdiff_t bstride, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8_v_u8m1x2(base, bstride, v_tuple, vl); +void test_vssseg2e8_v_u8m1x2(uint8_t *rs1, ptrdiff_t rs2, vuint8m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e8_v_u8m1x2(rs1, rs2, vs3, vl); } -void test_vssseg2e8_v_u8m2x2(uint8_t *base, ptrdiff_t bstride, vuint8m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8_v_u8m2x2(base, bstride, v_tuple, vl); +void test_vssseg2e8_v_u8m2x2(uint8_t *rs1, ptrdiff_t rs2, vuint8m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e8_v_u8m2x2(rs1, rs2, vs3, vl); } -void test_vssseg2e8_v_u8m4x2(uint8_t *base, ptrdiff_t bstride, vuint8m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8_v_u8m4x2(base, bstride, v_tuple, vl); +void test_vssseg2e8_v_u8m4x2(uint8_t *rs1, ptrdiff_t rs2, vuint8m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e8_v_u8m4x2(rs1, rs2, vs3, vl); } -void test_vssseg2e8_v_i8mf8x2_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8_v_i8mf8x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e8_v_i8mf8x2_m(vbool64_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf8x2_t vs3, size_t vl) { + return __riscv_vssseg2e8_v_i8mf8x2_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e8_v_i8mf4x2_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8_v_i8mf4x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e8_v_i8mf4x2_m(vbool32_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf4x2_t vs3, size_t vl) { + return __riscv_vssseg2e8_v_i8mf4x2_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e8_v_i8mf2x2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8_v_i8mf2x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e8_v_i8mf2x2_m(vbool16_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf2x2_t vs3, size_t vl) { + return __riscv_vssseg2e8_v_i8mf2x2_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e8_v_i8m1x2_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8_v_i8m1x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e8_v_i8m1x2_m(vbool8_t vm, int8_t *rs1, ptrdiff_t rs2, vint8m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e8_v_i8m1x2_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e8_v_i8m2x2_m(vbool4_t mask, int8_t *base, ptrdiff_t bstride, vint8m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8_v_i8m2x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e8_v_i8m2x2_m(vbool4_t vm, int8_t *rs1, ptrdiff_t rs2, vint8m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e8_v_i8m2x2_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e8_v_i8m4x2_m(vbool2_t mask, int8_t *base, ptrdiff_t bstride, vint8m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8_v_i8m4x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e8_v_i8m4x2_m(vbool2_t vm, int8_t *rs1, ptrdiff_t rs2, vint8m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e8_v_i8m4x2_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e8_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8_v_u8mf8x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e8_v_u8mf8x2_m(vbool64_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf8x2_t vs3, size_t vl) { + return __riscv_vssseg2e8_v_u8mf8x2_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e8_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8_v_u8mf4x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e8_v_u8mf4x2_m(vbool32_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf4x2_t vs3, size_t vl) { + return __riscv_vssseg2e8_v_u8mf4x2_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e8_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8_v_u8mf2x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e8_v_u8mf2x2_m(vbool16_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf2x2_t vs3, size_t vl) { + return __riscv_vssseg2e8_v_u8mf2x2_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e8_v_u8m1x2_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8_v_u8m1x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e8_v_u8m1x2_m(vbool8_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e8_v_u8m1x2_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e8_v_u8m2x2_m(vbool4_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8_v_u8m2x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e8_v_u8m2x2_m(vbool4_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e8_v_u8m2x2_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e8_v_u8m4x2_m(vbool2_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8_v_u8m4x2_m(mask, base, bstride, v_tuple, vl); +void test_vssseg2e8_v_u8m4x2_m(vbool2_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e8_v_u8m4x2_m(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg2e8\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/gnu-api-tests/vssseg3e16.c b/auto-generated/gnu-api-tests/vssseg3e16.c index 2ab3a1bb7..c5543c753 100644 --- a/auto-generated/gnu-api-tests/vssseg3e16.c +++ b/auto-generated/gnu-api-tests/vssseg3e16.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg3e16_v_f16mf4x3(float16_t *base, ptrdiff_t bstride, vfloat16mf4x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16_v_f16mf4x3(base, bstride, v_tuple, vl); +void test_vssseg3e16_v_f16mf4x3(float16_t *rs1, ptrdiff_t rs2, vfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vssseg3e16_v_f16mf4x3(rs1, rs2, vs3, vl); } -void test_vssseg3e16_v_f16mf2x3(float16_t *base, ptrdiff_t bstride, vfloat16mf2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16_v_f16mf2x3(base, bstride, v_tuple, vl); +void test_vssseg3e16_v_f16mf2x3(float16_t *rs1, ptrdiff_t rs2, vfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vssseg3e16_v_f16mf2x3(rs1, rs2, vs3, vl); } -void test_vssseg3e16_v_f16m1x3(float16_t *base, ptrdiff_t bstride, vfloat16m1x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16_v_f16m1x3(base, bstride, v_tuple, vl); +void test_vssseg3e16_v_f16m1x3(float16_t *rs1, ptrdiff_t rs2, vfloat16m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e16_v_f16m1x3(rs1, rs2, vs3, vl); } -void test_vssseg3e16_v_f16m2x3(float16_t *base, ptrdiff_t bstride, vfloat16m2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16_v_f16m2x3(base, bstride, v_tuple, vl); +void test_vssseg3e16_v_f16m2x3(float16_t *rs1, ptrdiff_t rs2, vfloat16m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e16_v_f16m2x3(rs1, rs2, vs3, vl); } -void test_vssseg3e16_v_i16mf4x3(int16_t *base, ptrdiff_t bstride, vint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16_v_i16mf4x3(base, bstride, v_tuple, vl); +void test_vssseg3e16_v_i16mf4x3(int16_t *rs1, ptrdiff_t rs2, vint16mf4x3_t vs3, size_t vl) { + return __riscv_vssseg3e16_v_i16mf4x3(rs1, rs2, vs3, vl); } -void test_vssseg3e16_v_i16mf2x3(int16_t *base, ptrdiff_t bstride, vint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16_v_i16mf2x3(base, bstride, v_tuple, vl); +void test_vssseg3e16_v_i16mf2x3(int16_t *rs1, ptrdiff_t rs2, vint16mf2x3_t vs3, size_t vl) { + return __riscv_vssseg3e16_v_i16mf2x3(rs1, rs2, vs3, vl); } -void test_vssseg3e16_v_i16m1x3(int16_t *base, ptrdiff_t bstride, vint16m1x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16_v_i16m1x3(base, bstride, v_tuple, vl); +void test_vssseg3e16_v_i16m1x3(int16_t *rs1, ptrdiff_t rs2, vint16m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e16_v_i16m1x3(rs1, rs2, vs3, vl); } -void test_vssseg3e16_v_i16m2x3(int16_t *base, ptrdiff_t bstride, vint16m2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16_v_i16m2x3(base, bstride, v_tuple, vl); +void test_vssseg3e16_v_i16m2x3(int16_t *rs1, ptrdiff_t rs2, vint16m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e16_v_i16m2x3(rs1, rs2, vs3, vl); } -void test_vssseg3e16_v_u16mf4x3(uint16_t *base, ptrdiff_t bstride, vuint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16_v_u16mf4x3(base, bstride, v_tuple, vl); +void test_vssseg3e16_v_u16mf4x3(uint16_t *rs1, ptrdiff_t rs2, vuint16mf4x3_t vs3, size_t vl) { + return __riscv_vssseg3e16_v_u16mf4x3(rs1, rs2, vs3, vl); } -void test_vssseg3e16_v_u16mf2x3(uint16_t *base, ptrdiff_t bstride, vuint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16_v_u16mf2x3(base, bstride, v_tuple, vl); +void test_vssseg3e16_v_u16mf2x3(uint16_t *rs1, ptrdiff_t rs2, vuint16mf2x3_t vs3, size_t vl) { + return __riscv_vssseg3e16_v_u16mf2x3(rs1, rs2, vs3, vl); } -void test_vssseg3e16_v_u16m1x3(uint16_t *base, ptrdiff_t bstride, vuint16m1x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16_v_u16m1x3(base, bstride, v_tuple, vl); +void test_vssseg3e16_v_u16m1x3(uint16_t *rs1, ptrdiff_t rs2, vuint16m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e16_v_u16m1x3(rs1, rs2, vs3, vl); } -void test_vssseg3e16_v_u16m2x3(uint16_t *base, ptrdiff_t bstride, vuint16m2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16_v_u16m2x3(base, bstride, v_tuple, vl); +void test_vssseg3e16_v_u16m2x3(uint16_t *rs1, ptrdiff_t rs2, vuint16m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e16_v_u16m2x3(rs1, rs2, vs3, vl); } -void test_vssseg3e16_v_f16mf4x3_m(vbool64_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf4x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16_v_f16mf4x3_m(mask, base, bstride, v_tuple, vl); +void test_vssseg3e16_v_f16mf4x3_m(vbool64_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vssseg3e16_v_f16mf4x3_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e16_v_f16mf2x3_m(vbool32_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16_v_f16mf2x3_m(mask, base, bstride, v_tuple, vl); +void test_vssseg3e16_v_f16mf2x3_m(vbool32_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vssseg3e16_v_f16mf2x3_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e16_v_f16m1x3_m(vbool16_t mask, float16_t *base, ptrdiff_t bstride, vfloat16m1x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16_v_f16m1x3_m(mask, base, bstride, v_tuple, vl); +void test_vssseg3e16_v_f16m1x3_m(vbool16_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e16_v_f16m1x3_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e16_v_f16m2x3_m(vbool8_t mask, float16_t *base, ptrdiff_t bstride, vfloat16m2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16_v_f16m2x3_m(mask, base, bstride, v_tuple, vl); +void test_vssseg3e16_v_f16m2x3_m(vbool8_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e16_v_f16m2x3_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e16_v_i16mf4x3_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16_v_i16mf4x3_m(mask, base, bstride, v_tuple, vl); +void test_vssseg3e16_v_i16mf4x3_m(vbool64_t vm, int16_t *rs1, ptrdiff_t rs2, vint16mf4x3_t vs3, size_t vl) { + return __riscv_vssseg3e16_v_i16mf4x3_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e16_v_i16mf2x3_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16_v_i16mf2x3_m(mask, base, bstride, v_tuple, vl); +void test_vssseg3e16_v_i16mf2x3_m(vbool32_t vm, int16_t *rs1, ptrdiff_t rs2, vint16mf2x3_t vs3, size_t vl) { + return __riscv_vssseg3e16_v_i16mf2x3_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e16_v_i16m1x3_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16_v_i16m1x3_m(mask, base, bstride, v_tuple, vl); +void test_vssseg3e16_v_i16m1x3_m(vbool16_t vm, int16_t *rs1, ptrdiff_t rs2, vint16m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e16_v_i16m1x3_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e16_v_i16m2x3_m(vbool8_t mask, int16_t *base, ptrdiff_t bstride, vint16m2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16_v_i16m2x3_m(mask, base, bstride, v_tuple, vl); +void test_vssseg3e16_v_i16m2x3_m(vbool8_t vm, int16_t *rs1, ptrdiff_t rs2, vint16m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e16_v_i16m2x3_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e16_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16_v_u16mf4x3_m(mask, base, bstride, v_tuple, vl); +void test_vssseg3e16_v_u16mf4x3_m(vbool64_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16mf4x3_t vs3, size_t vl) { + return __riscv_vssseg3e16_v_u16mf4x3_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e16_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16_v_u16mf2x3_m(mask, base, bstride, v_tuple, vl); +void test_vssseg3e16_v_u16mf2x3_m(vbool32_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16mf2x3_t vs3, size_t vl) { + return __riscv_vssseg3e16_v_u16mf2x3_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e16_v_u16m1x3_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16_v_u16m1x3_m(mask, base, bstride, v_tuple, vl); +void test_vssseg3e16_v_u16m1x3_m(vbool16_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e16_v_u16m1x3_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e16_v_u16m2x3_m(vbool8_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16_v_u16m2x3_m(mask, base, bstride, v_tuple, vl); +void test_vssseg3e16_v_u16m2x3_m(vbool8_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e16_v_u16m2x3_m(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg3e16\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/gnu-api-tests/vssseg3e32.c b/auto-generated/gnu-api-tests/vssseg3e32.c index 243681300..e75dd0cdc 100644 --- a/auto-generated/gnu-api-tests/vssseg3e32.c +++ b/auto-generated/gnu-api-tests/vssseg3e32.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg3e32_v_f32mf2x3(float32_t *base, ptrdiff_t bstride, vfloat32mf2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e32_v_f32mf2x3(base, bstride, v_tuple, vl); +void test_vssseg3e32_v_f32mf2x3(float32_t *rs1, ptrdiff_t rs2, vfloat32mf2x3_t vs3, size_t vl) { + return __riscv_vssseg3e32_v_f32mf2x3(rs1, rs2, vs3, vl); } -void test_vssseg3e32_v_f32m1x3(float32_t *base, ptrdiff_t bstride, vfloat32m1x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e32_v_f32m1x3(base, bstride, v_tuple, vl); +void test_vssseg3e32_v_f32m1x3(float32_t *rs1, ptrdiff_t rs2, vfloat32m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e32_v_f32m1x3(rs1, rs2, vs3, vl); } -void test_vssseg3e32_v_f32m2x3(float32_t *base, ptrdiff_t bstride, vfloat32m2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e32_v_f32m2x3(base, bstride, v_tuple, vl); +void test_vssseg3e32_v_f32m2x3(float32_t *rs1, ptrdiff_t rs2, vfloat32m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e32_v_f32m2x3(rs1, rs2, vs3, vl); } -void test_vssseg3e32_v_i32mf2x3(int32_t *base, ptrdiff_t bstride, vint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e32_v_i32mf2x3(base, bstride, v_tuple, vl); +void test_vssseg3e32_v_i32mf2x3(int32_t *rs1, ptrdiff_t rs2, vint32mf2x3_t vs3, size_t vl) { + return __riscv_vssseg3e32_v_i32mf2x3(rs1, rs2, vs3, vl); } -void test_vssseg3e32_v_i32m1x3(int32_t *base, ptrdiff_t bstride, vint32m1x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e32_v_i32m1x3(base, bstride, v_tuple, vl); +void test_vssseg3e32_v_i32m1x3(int32_t *rs1, ptrdiff_t rs2, vint32m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e32_v_i32m1x3(rs1, rs2, vs3, vl); } -void test_vssseg3e32_v_i32m2x3(int32_t *base, ptrdiff_t bstride, vint32m2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e32_v_i32m2x3(base, bstride, v_tuple, vl); +void test_vssseg3e32_v_i32m2x3(int32_t *rs1, ptrdiff_t rs2, vint32m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e32_v_i32m2x3(rs1, rs2, vs3, vl); } -void test_vssseg3e32_v_u32mf2x3(uint32_t *base, ptrdiff_t bstride, vuint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e32_v_u32mf2x3(base, bstride, v_tuple, vl); +void test_vssseg3e32_v_u32mf2x3(uint32_t *rs1, ptrdiff_t rs2, vuint32mf2x3_t vs3, size_t vl) { + return __riscv_vssseg3e32_v_u32mf2x3(rs1, rs2, vs3, vl); } -void test_vssseg3e32_v_u32m1x3(uint32_t *base, ptrdiff_t bstride, vuint32m1x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e32_v_u32m1x3(base, bstride, v_tuple, vl); +void test_vssseg3e32_v_u32m1x3(uint32_t *rs1, ptrdiff_t rs2, vuint32m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e32_v_u32m1x3(rs1, rs2, vs3, vl); } -void test_vssseg3e32_v_u32m2x3(uint32_t *base, ptrdiff_t bstride, vuint32m2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e32_v_u32m2x3(base, bstride, v_tuple, vl); +void test_vssseg3e32_v_u32m2x3(uint32_t *rs1, ptrdiff_t rs2, vuint32m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e32_v_u32m2x3(rs1, rs2, vs3, vl); } -void test_vssseg3e32_v_f32mf2x3_m(vbool64_t mask, float32_t *base, ptrdiff_t bstride, vfloat32mf2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e32_v_f32mf2x3_m(mask, base, bstride, v_tuple, vl); +void test_vssseg3e32_v_f32mf2x3_m(vbool64_t vm, float32_t *rs1, ptrdiff_t rs2, vfloat32mf2x3_t vs3, size_t vl) { + return __riscv_vssseg3e32_v_f32mf2x3_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e32_v_f32m1x3_m(vbool32_t mask, float32_t *base, ptrdiff_t bstride, vfloat32m1x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e32_v_f32m1x3_m(mask, base, bstride, v_tuple, vl); +void test_vssseg3e32_v_f32m1x3_m(vbool32_t vm, float32_t *rs1, ptrdiff_t rs2, vfloat32m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e32_v_f32m1x3_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e32_v_f32m2x3_m(vbool16_t mask, float32_t *base, ptrdiff_t bstride, vfloat32m2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e32_v_f32m2x3_m(mask, base, bstride, v_tuple, vl); +void test_vssseg3e32_v_f32m2x3_m(vbool16_t vm, float32_t *rs1, ptrdiff_t rs2, vfloat32m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e32_v_f32m2x3_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e32_v_i32mf2x3_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e32_v_i32mf2x3_m(mask, base, bstride, v_tuple, vl); +void test_vssseg3e32_v_i32mf2x3_m(vbool64_t vm, int32_t *rs1, ptrdiff_t rs2, vint32mf2x3_t vs3, size_t vl) { + return __riscv_vssseg3e32_v_i32mf2x3_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e32_v_i32m1x3_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e32_v_i32m1x3_m(mask, base, bstride, v_tuple, vl); +void test_vssseg3e32_v_i32m1x3_m(vbool32_t vm, int32_t *rs1, ptrdiff_t rs2, vint32m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e32_v_i32m1x3_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e32_v_i32m2x3_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride, vint32m2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e32_v_i32m2x3_m(mask, base, bstride, v_tuple, vl); +void test_vssseg3e32_v_i32m2x3_m(vbool16_t vm, int32_t *rs1, ptrdiff_t rs2, vint32m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e32_v_i32m2x3_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e32_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e32_v_u32mf2x3_m(mask, base, bstride, v_tuple, vl); +void test_vssseg3e32_v_u32mf2x3_m(vbool64_t vm, uint32_t *rs1, ptrdiff_t rs2, vuint32mf2x3_t vs3, size_t vl) { + return __riscv_vssseg3e32_v_u32mf2x3_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e32_v_u32m1x3_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e32_v_u32m1x3_m(mask, base, bstride, v_tuple, vl); +void test_vssseg3e32_v_u32m1x3_m(vbool32_t vm, uint32_t *rs1, ptrdiff_t rs2, vuint32m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e32_v_u32m1x3_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e32_v_u32m2x3_m(vbool16_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e32_v_u32m2x3_m(mask, base, bstride, v_tuple, vl); +void test_vssseg3e32_v_u32m2x3_m(vbool16_t vm, uint32_t *rs1, ptrdiff_t rs2, vuint32m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e32_v_u32m2x3_m(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg3e32\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-api-tests/vssseg3e64.c b/auto-generated/gnu-api-tests/vssseg3e64.c index 26da49317..0a8b8335c 100644 --- a/auto-generated/gnu-api-tests/vssseg3e64.c +++ b/auto-generated/gnu-api-tests/vssseg3e64.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg3e64_v_f64m1x3(float64_t *base, ptrdiff_t bstride, vfloat64m1x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e64_v_f64m1x3(base, bstride, v_tuple, vl); +void test_vssseg3e64_v_f64m1x3(float64_t *rs1, ptrdiff_t rs2, vfloat64m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e64_v_f64m1x3(rs1, rs2, vs3, vl); } -void test_vssseg3e64_v_f64m2x3(float64_t *base, ptrdiff_t bstride, vfloat64m2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e64_v_f64m2x3(base, bstride, v_tuple, vl); +void test_vssseg3e64_v_f64m2x3(float64_t *rs1, ptrdiff_t rs2, vfloat64m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e64_v_f64m2x3(rs1, rs2, vs3, vl); } -void test_vssseg3e64_v_i64m1x3(int64_t *base, ptrdiff_t bstride, vint64m1x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e64_v_i64m1x3(base, bstride, v_tuple, vl); +void test_vssseg3e64_v_i64m1x3(int64_t *rs1, ptrdiff_t rs2, vint64m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e64_v_i64m1x3(rs1, rs2, vs3, vl); } -void test_vssseg3e64_v_i64m2x3(int64_t *base, ptrdiff_t bstride, vint64m2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e64_v_i64m2x3(base, bstride, v_tuple, vl); +void test_vssseg3e64_v_i64m2x3(int64_t *rs1, ptrdiff_t rs2, vint64m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e64_v_i64m2x3(rs1, rs2, vs3, vl); } -void test_vssseg3e64_v_u64m1x3(uint64_t *base, ptrdiff_t bstride, vuint64m1x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e64_v_u64m1x3(base, bstride, v_tuple, vl); +void test_vssseg3e64_v_u64m1x3(uint64_t *rs1, ptrdiff_t rs2, vuint64m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e64_v_u64m1x3(rs1, rs2, vs3, vl); } -void test_vssseg3e64_v_u64m2x3(uint64_t *base, ptrdiff_t bstride, vuint64m2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e64_v_u64m2x3(base, bstride, v_tuple, vl); +void test_vssseg3e64_v_u64m2x3(uint64_t *rs1, ptrdiff_t rs2, vuint64m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e64_v_u64m2x3(rs1, rs2, vs3, vl); } -void test_vssseg3e64_v_f64m1x3_m(vbool64_t mask, float64_t *base, ptrdiff_t bstride, vfloat64m1x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e64_v_f64m1x3_m(mask, base, bstride, v_tuple, vl); +void test_vssseg3e64_v_f64m1x3_m(vbool64_t vm, float64_t *rs1, ptrdiff_t rs2, vfloat64m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e64_v_f64m1x3_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e64_v_f64m2x3_m(vbool32_t mask, float64_t *base, ptrdiff_t bstride, vfloat64m2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e64_v_f64m2x3_m(mask, base, bstride, v_tuple, vl); +void test_vssseg3e64_v_f64m2x3_m(vbool32_t vm, float64_t *rs1, ptrdiff_t rs2, vfloat64m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e64_v_f64m2x3_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e64_v_i64m1x3_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e64_v_i64m1x3_m(mask, base, bstride, v_tuple, vl); +void test_vssseg3e64_v_i64m1x3_m(vbool64_t vm, int64_t *rs1, ptrdiff_t rs2, vint64m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e64_v_i64m1x3_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e64_v_i64m2x3_m(vbool32_t mask, int64_t *base, ptrdiff_t bstride, vint64m2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e64_v_i64m2x3_m(mask, base, bstride, v_tuple, vl); +void test_vssseg3e64_v_i64m2x3_m(vbool32_t vm, int64_t *rs1, ptrdiff_t rs2, vint64m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e64_v_i64m2x3_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e64_v_u64m1x3_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e64_v_u64m1x3_m(mask, base, bstride, v_tuple, vl); +void test_vssseg3e64_v_u64m1x3_m(vbool64_t vm, uint64_t *rs1, ptrdiff_t rs2, vuint64m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e64_v_u64m1x3_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e64_v_u64m2x3_m(vbool32_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e64_v_u64m2x3_m(mask, base, bstride, v_tuple, vl); +void test_vssseg3e64_v_u64m2x3_m(vbool32_t vm, uint64_t *rs1, ptrdiff_t rs2, vuint64m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e64_v_u64m2x3_m(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg3e64\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-api-tests/vssseg3e8.c b/auto-generated/gnu-api-tests/vssseg3e8.c index e42938b62..62fa3e7a3 100644 --- a/auto-generated/gnu-api-tests/vssseg3e8.c +++ b/auto-generated/gnu-api-tests/vssseg3e8.c @@ -6,84 +6,84 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg3e8_v_i8mf8x3(int8_t *base, ptrdiff_t bstride, vint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e8_v_i8mf8x3(base, bstride, v_tuple, vl); +void test_vssseg3e8_v_i8mf8x3(int8_t *rs1, ptrdiff_t rs2, vint8mf8x3_t vs3, size_t vl) { + return __riscv_vssseg3e8_v_i8mf8x3(rs1, rs2, vs3, vl); } -void test_vssseg3e8_v_i8mf4x3(int8_t *base, ptrdiff_t bstride, vint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e8_v_i8mf4x3(base, bstride, v_tuple, vl); +void test_vssseg3e8_v_i8mf4x3(int8_t *rs1, ptrdiff_t rs2, vint8mf4x3_t vs3, size_t vl) { + return __riscv_vssseg3e8_v_i8mf4x3(rs1, rs2, vs3, vl); } -void test_vssseg3e8_v_i8mf2x3(int8_t *base, ptrdiff_t bstride, vint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e8_v_i8mf2x3(base, bstride, v_tuple, vl); +void test_vssseg3e8_v_i8mf2x3(int8_t *rs1, ptrdiff_t rs2, vint8mf2x3_t vs3, size_t vl) { + return __riscv_vssseg3e8_v_i8mf2x3(rs1, rs2, vs3, vl); } -void test_vssseg3e8_v_i8m1x3(int8_t *base, ptrdiff_t bstride, vint8m1x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e8_v_i8m1x3(base, bstride, v_tuple, vl); +void test_vssseg3e8_v_i8m1x3(int8_t *rs1, ptrdiff_t rs2, vint8m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e8_v_i8m1x3(rs1, rs2, vs3, vl); } -void test_vssseg3e8_v_i8m2x3(int8_t *base, ptrdiff_t bstride, vint8m2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e8_v_i8m2x3(base, bstride, v_tuple, vl); +void test_vssseg3e8_v_i8m2x3(int8_t *rs1, ptrdiff_t rs2, vint8m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e8_v_i8m2x3(rs1, rs2, vs3, vl); } -void test_vssseg3e8_v_u8mf8x3(uint8_t *base, ptrdiff_t bstride, vuint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e8_v_u8mf8x3(base, bstride, v_tuple, vl); +void test_vssseg3e8_v_u8mf8x3(uint8_t *rs1, ptrdiff_t rs2, vuint8mf8x3_t vs3, size_t vl) { + return __riscv_vssseg3e8_v_u8mf8x3(rs1, rs2, vs3, vl); } -void test_vssseg3e8_v_u8mf4x3(uint8_t *base, ptrdiff_t bstride, vuint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e8_v_u8mf4x3(base, bstride, v_tuple, vl); +void test_vssseg3e8_v_u8mf4x3(uint8_t *rs1, ptrdiff_t rs2, vuint8mf4x3_t vs3, size_t vl) { + return __riscv_vssseg3e8_v_u8mf4x3(rs1, rs2, vs3, vl); } -void test_vssseg3e8_v_u8mf2x3(uint8_t *base, ptrdiff_t bstride, vuint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e8_v_u8mf2x3(base, bstride, v_tuple, vl); +void test_vssseg3e8_v_u8mf2x3(uint8_t *rs1, ptrdiff_t rs2, vuint8mf2x3_t vs3, size_t vl) { + return __riscv_vssseg3e8_v_u8mf2x3(rs1, rs2, vs3, vl); } -void test_vssseg3e8_v_u8m1x3(uint8_t *base, ptrdiff_t bstride, vuint8m1x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e8_v_u8m1x3(base, bstride, v_tuple, vl); +void test_vssseg3e8_v_u8m1x3(uint8_t *rs1, ptrdiff_t rs2, vuint8m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e8_v_u8m1x3(rs1, rs2, vs3, vl); } -void test_vssseg3e8_v_u8m2x3(uint8_t *base, ptrdiff_t bstride, vuint8m2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e8_v_u8m2x3(base, bstride, v_tuple, vl); +void test_vssseg3e8_v_u8m2x3(uint8_t *rs1, ptrdiff_t rs2, vuint8m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e8_v_u8m2x3(rs1, rs2, vs3, vl); } -void test_vssseg3e8_v_i8mf8x3_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e8_v_i8mf8x3_m(mask, base, bstride, v_tuple, vl); +void test_vssseg3e8_v_i8mf8x3_m(vbool64_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf8x3_t vs3, size_t vl) { + return __riscv_vssseg3e8_v_i8mf8x3_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e8_v_i8mf4x3_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e8_v_i8mf4x3_m(mask, base, bstride, v_tuple, vl); +void test_vssseg3e8_v_i8mf4x3_m(vbool32_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf4x3_t vs3, size_t vl) { + return __riscv_vssseg3e8_v_i8mf4x3_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e8_v_i8mf2x3_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e8_v_i8mf2x3_m(mask, base, bstride, v_tuple, vl); +void test_vssseg3e8_v_i8mf2x3_m(vbool16_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf2x3_t vs3, size_t vl) { + return __riscv_vssseg3e8_v_i8mf2x3_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e8_v_i8m1x3_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e8_v_i8m1x3_m(mask, base, bstride, v_tuple, vl); +void test_vssseg3e8_v_i8m1x3_m(vbool8_t vm, int8_t *rs1, ptrdiff_t rs2, vint8m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e8_v_i8m1x3_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e8_v_i8m2x3_m(vbool4_t mask, int8_t *base, ptrdiff_t bstride, vint8m2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e8_v_i8m2x3_m(mask, base, bstride, v_tuple, vl); +void test_vssseg3e8_v_i8m2x3_m(vbool4_t vm, int8_t *rs1, ptrdiff_t rs2, vint8m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e8_v_i8m2x3_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e8_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e8_v_u8mf8x3_m(mask, base, bstride, v_tuple, vl); +void test_vssseg3e8_v_u8mf8x3_m(vbool64_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf8x3_t vs3, size_t vl) { + return __riscv_vssseg3e8_v_u8mf8x3_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e8_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e8_v_u8mf4x3_m(mask, base, bstride, v_tuple, vl); +void test_vssseg3e8_v_u8mf4x3_m(vbool32_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf4x3_t vs3, size_t vl) { + return __riscv_vssseg3e8_v_u8mf4x3_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e8_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e8_v_u8mf2x3_m(mask, base, bstride, v_tuple, vl); +void test_vssseg3e8_v_u8mf2x3_m(vbool16_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf2x3_t vs3, size_t vl) { + return __riscv_vssseg3e8_v_u8mf2x3_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e8_v_u8m1x3_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e8_v_u8m1x3_m(mask, base, bstride, v_tuple, vl); +void test_vssseg3e8_v_u8m1x3_m(vbool8_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e8_v_u8m1x3_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e8_v_u8m2x3_m(vbool4_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e8_v_u8m2x3_m(mask, base, bstride, v_tuple, vl); +void test_vssseg3e8_v_u8m2x3_m(vbool4_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e8_v_u8m2x3_m(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg3e8\.[ivxfswum.]+\s+} 20 } } */ diff --git a/auto-generated/gnu-api-tests/vssseg4e16.c b/auto-generated/gnu-api-tests/vssseg4e16.c index 5ef791c10..4ad8abcc1 100644 --- a/auto-generated/gnu-api-tests/vssseg4e16.c +++ b/auto-generated/gnu-api-tests/vssseg4e16.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg4e16_v_f16mf4x4(float16_t *base, ptrdiff_t bstride, vfloat16mf4x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16_v_f16mf4x4(base, bstride, v_tuple, vl); +void test_vssseg4e16_v_f16mf4x4(float16_t *rs1, ptrdiff_t rs2, vfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vssseg4e16_v_f16mf4x4(rs1, rs2, vs3, vl); } -void test_vssseg4e16_v_f16mf2x4(float16_t *base, ptrdiff_t bstride, vfloat16mf2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16_v_f16mf2x4(base, bstride, v_tuple, vl); +void test_vssseg4e16_v_f16mf2x4(float16_t *rs1, ptrdiff_t rs2, vfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vssseg4e16_v_f16mf2x4(rs1, rs2, vs3, vl); } -void test_vssseg4e16_v_f16m1x4(float16_t *base, ptrdiff_t bstride, vfloat16m1x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16_v_f16m1x4(base, bstride, v_tuple, vl); +void test_vssseg4e16_v_f16m1x4(float16_t *rs1, ptrdiff_t rs2, vfloat16m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e16_v_f16m1x4(rs1, rs2, vs3, vl); } -void test_vssseg4e16_v_f16m2x4(float16_t *base, ptrdiff_t bstride, vfloat16m2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16_v_f16m2x4(base, bstride, v_tuple, vl); +void test_vssseg4e16_v_f16m2x4(float16_t *rs1, ptrdiff_t rs2, vfloat16m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e16_v_f16m2x4(rs1, rs2, vs3, vl); } -void test_vssseg4e16_v_i16mf4x4(int16_t *base, ptrdiff_t bstride, vint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16_v_i16mf4x4(base, bstride, v_tuple, vl); +void test_vssseg4e16_v_i16mf4x4(int16_t *rs1, ptrdiff_t rs2, vint16mf4x4_t vs3, size_t vl) { + return __riscv_vssseg4e16_v_i16mf4x4(rs1, rs2, vs3, vl); } -void test_vssseg4e16_v_i16mf2x4(int16_t *base, ptrdiff_t bstride, vint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16_v_i16mf2x4(base, bstride, v_tuple, vl); +void test_vssseg4e16_v_i16mf2x4(int16_t *rs1, ptrdiff_t rs2, vint16mf2x4_t vs3, size_t vl) { + return __riscv_vssseg4e16_v_i16mf2x4(rs1, rs2, vs3, vl); } -void test_vssseg4e16_v_i16m1x4(int16_t *base, ptrdiff_t bstride, vint16m1x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16_v_i16m1x4(base, bstride, v_tuple, vl); +void test_vssseg4e16_v_i16m1x4(int16_t *rs1, ptrdiff_t rs2, vint16m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e16_v_i16m1x4(rs1, rs2, vs3, vl); } -void test_vssseg4e16_v_i16m2x4(int16_t *base, ptrdiff_t bstride, vint16m2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16_v_i16m2x4(base, bstride, v_tuple, vl); +void test_vssseg4e16_v_i16m2x4(int16_t *rs1, ptrdiff_t rs2, vint16m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e16_v_i16m2x4(rs1, rs2, vs3, vl); } -void test_vssseg4e16_v_u16mf4x4(uint16_t *base, ptrdiff_t bstride, vuint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16_v_u16mf4x4(base, bstride, v_tuple, vl); +void test_vssseg4e16_v_u16mf4x4(uint16_t *rs1, ptrdiff_t rs2, vuint16mf4x4_t vs3, size_t vl) { + return __riscv_vssseg4e16_v_u16mf4x4(rs1, rs2, vs3, vl); } -void test_vssseg4e16_v_u16mf2x4(uint16_t *base, ptrdiff_t bstride, vuint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16_v_u16mf2x4(base, bstride, v_tuple, vl); +void test_vssseg4e16_v_u16mf2x4(uint16_t *rs1, ptrdiff_t rs2, vuint16mf2x4_t vs3, size_t vl) { + return __riscv_vssseg4e16_v_u16mf2x4(rs1, rs2, vs3, vl); } -void test_vssseg4e16_v_u16m1x4(uint16_t *base, ptrdiff_t bstride, vuint16m1x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16_v_u16m1x4(base, bstride, v_tuple, vl); +void test_vssseg4e16_v_u16m1x4(uint16_t *rs1, ptrdiff_t rs2, vuint16m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e16_v_u16m1x4(rs1, rs2, vs3, vl); } -void test_vssseg4e16_v_u16m2x4(uint16_t *base, ptrdiff_t bstride, vuint16m2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16_v_u16m2x4(base, bstride, v_tuple, vl); +void test_vssseg4e16_v_u16m2x4(uint16_t *rs1, ptrdiff_t rs2, vuint16m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e16_v_u16m2x4(rs1, rs2, vs3, vl); } -void test_vssseg4e16_v_f16mf4x4_m(vbool64_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf4x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16_v_f16mf4x4_m(mask, base, bstride, v_tuple, vl); +void test_vssseg4e16_v_f16mf4x4_m(vbool64_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vssseg4e16_v_f16mf4x4_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e16_v_f16mf2x4_m(vbool32_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16_v_f16mf2x4_m(mask, base, bstride, v_tuple, vl); +void test_vssseg4e16_v_f16mf2x4_m(vbool32_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vssseg4e16_v_f16mf2x4_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e16_v_f16m1x4_m(vbool16_t mask, float16_t *base, ptrdiff_t bstride, vfloat16m1x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16_v_f16m1x4_m(mask, base, bstride, v_tuple, vl); +void test_vssseg4e16_v_f16m1x4_m(vbool16_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e16_v_f16m1x4_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e16_v_f16m2x4_m(vbool8_t mask, float16_t *base, ptrdiff_t bstride, vfloat16m2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16_v_f16m2x4_m(mask, base, bstride, v_tuple, vl); +void test_vssseg4e16_v_f16m2x4_m(vbool8_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e16_v_f16m2x4_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e16_v_i16mf4x4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16_v_i16mf4x4_m(mask, base, bstride, v_tuple, vl); +void test_vssseg4e16_v_i16mf4x4_m(vbool64_t vm, int16_t *rs1, ptrdiff_t rs2, vint16mf4x4_t vs3, size_t vl) { + return __riscv_vssseg4e16_v_i16mf4x4_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e16_v_i16mf2x4_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16_v_i16mf2x4_m(mask, base, bstride, v_tuple, vl); +void test_vssseg4e16_v_i16mf2x4_m(vbool32_t vm, int16_t *rs1, ptrdiff_t rs2, vint16mf2x4_t vs3, size_t vl) { + return __riscv_vssseg4e16_v_i16mf2x4_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e16_v_i16m1x4_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16_v_i16m1x4_m(mask, base, bstride, v_tuple, vl); +void test_vssseg4e16_v_i16m1x4_m(vbool16_t vm, int16_t *rs1, ptrdiff_t rs2, vint16m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e16_v_i16m1x4_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e16_v_i16m2x4_m(vbool8_t mask, int16_t *base, ptrdiff_t bstride, vint16m2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16_v_i16m2x4_m(mask, base, bstride, v_tuple, vl); +void test_vssseg4e16_v_i16m2x4_m(vbool8_t vm, int16_t *rs1, ptrdiff_t rs2, vint16m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e16_v_i16m2x4_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e16_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16_v_u16mf4x4_m(mask, base, bstride, v_tuple, vl); +void test_vssseg4e16_v_u16mf4x4_m(vbool64_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16mf4x4_t vs3, size_t vl) { + return __riscv_vssseg4e16_v_u16mf4x4_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e16_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16_v_u16mf2x4_m(mask, base, bstride, v_tuple, vl); +void test_vssseg4e16_v_u16mf2x4_m(vbool32_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16mf2x4_t vs3, size_t vl) { + return __riscv_vssseg4e16_v_u16mf2x4_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e16_v_u16m1x4_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16_v_u16m1x4_m(mask, base, bstride, v_tuple, vl); +void test_vssseg4e16_v_u16m1x4_m(vbool16_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e16_v_u16m1x4_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e16_v_u16m2x4_m(vbool8_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16_v_u16m2x4_m(mask, base, bstride, v_tuple, vl); +void test_vssseg4e16_v_u16m2x4_m(vbool8_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e16_v_u16m2x4_m(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg4e16\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/gnu-api-tests/vssseg4e32.c b/auto-generated/gnu-api-tests/vssseg4e32.c index 471f3a28a..265521265 100644 --- a/auto-generated/gnu-api-tests/vssseg4e32.c +++ b/auto-generated/gnu-api-tests/vssseg4e32.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg4e32_v_f32mf2x4(float32_t *base, ptrdiff_t bstride, vfloat32mf2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e32_v_f32mf2x4(base, bstride, v_tuple, vl); +void test_vssseg4e32_v_f32mf2x4(float32_t *rs1, ptrdiff_t rs2, vfloat32mf2x4_t vs3, size_t vl) { + return __riscv_vssseg4e32_v_f32mf2x4(rs1, rs2, vs3, vl); } -void test_vssseg4e32_v_f32m1x4(float32_t *base, ptrdiff_t bstride, vfloat32m1x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e32_v_f32m1x4(base, bstride, v_tuple, vl); +void test_vssseg4e32_v_f32m1x4(float32_t *rs1, ptrdiff_t rs2, vfloat32m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e32_v_f32m1x4(rs1, rs2, vs3, vl); } -void test_vssseg4e32_v_f32m2x4(float32_t *base, ptrdiff_t bstride, vfloat32m2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e32_v_f32m2x4(base, bstride, v_tuple, vl); +void test_vssseg4e32_v_f32m2x4(float32_t *rs1, ptrdiff_t rs2, vfloat32m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e32_v_f32m2x4(rs1, rs2, vs3, vl); } -void test_vssseg4e32_v_i32mf2x4(int32_t *base, ptrdiff_t bstride, vint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e32_v_i32mf2x4(base, bstride, v_tuple, vl); +void test_vssseg4e32_v_i32mf2x4(int32_t *rs1, ptrdiff_t rs2, vint32mf2x4_t vs3, size_t vl) { + return __riscv_vssseg4e32_v_i32mf2x4(rs1, rs2, vs3, vl); } -void test_vssseg4e32_v_i32m1x4(int32_t *base, ptrdiff_t bstride, vint32m1x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e32_v_i32m1x4(base, bstride, v_tuple, vl); +void test_vssseg4e32_v_i32m1x4(int32_t *rs1, ptrdiff_t rs2, vint32m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e32_v_i32m1x4(rs1, rs2, vs3, vl); } -void test_vssseg4e32_v_i32m2x4(int32_t *base, ptrdiff_t bstride, vint32m2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e32_v_i32m2x4(base, bstride, v_tuple, vl); +void test_vssseg4e32_v_i32m2x4(int32_t *rs1, ptrdiff_t rs2, vint32m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e32_v_i32m2x4(rs1, rs2, vs3, vl); } -void test_vssseg4e32_v_u32mf2x4(uint32_t *base, ptrdiff_t bstride, vuint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e32_v_u32mf2x4(base, bstride, v_tuple, vl); +void test_vssseg4e32_v_u32mf2x4(uint32_t *rs1, ptrdiff_t rs2, vuint32mf2x4_t vs3, size_t vl) { + return __riscv_vssseg4e32_v_u32mf2x4(rs1, rs2, vs3, vl); } -void test_vssseg4e32_v_u32m1x4(uint32_t *base, ptrdiff_t bstride, vuint32m1x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e32_v_u32m1x4(base, bstride, v_tuple, vl); +void test_vssseg4e32_v_u32m1x4(uint32_t *rs1, ptrdiff_t rs2, vuint32m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e32_v_u32m1x4(rs1, rs2, vs3, vl); } -void test_vssseg4e32_v_u32m2x4(uint32_t *base, ptrdiff_t bstride, vuint32m2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e32_v_u32m2x4(base, bstride, v_tuple, vl); +void test_vssseg4e32_v_u32m2x4(uint32_t *rs1, ptrdiff_t rs2, vuint32m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e32_v_u32m2x4(rs1, rs2, vs3, vl); } -void test_vssseg4e32_v_f32mf2x4_m(vbool64_t mask, float32_t *base, ptrdiff_t bstride, vfloat32mf2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e32_v_f32mf2x4_m(mask, base, bstride, v_tuple, vl); +void test_vssseg4e32_v_f32mf2x4_m(vbool64_t vm, float32_t *rs1, ptrdiff_t rs2, vfloat32mf2x4_t vs3, size_t vl) { + return __riscv_vssseg4e32_v_f32mf2x4_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e32_v_f32m1x4_m(vbool32_t mask, float32_t *base, ptrdiff_t bstride, vfloat32m1x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e32_v_f32m1x4_m(mask, base, bstride, v_tuple, vl); +void test_vssseg4e32_v_f32m1x4_m(vbool32_t vm, float32_t *rs1, ptrdiff_t rs2, vfloat32m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e32_v_f32m1x4_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e32_v_f32m2x4_m(vbool16_t mask, float32_t *base, ptrdiff_t bstride, vfloat32m2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e32_v_f32m2x4_m(mask, base, bstride, v_tuple, vl); +void test_vssseg4e32_v_f32m2x4_m(vbool16_t vm, float32_t *rs1, ptrdiff_t rs2, vfloat32m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e32_v_f32m2x4_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e32_v_i32mf2x4_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e32_v_i32mf2x4_m(mask, base, bstride, v_tuple, vl); +void test_vssseg4e32_v_i32mf2x4_m(vbool64_t vm, int32_t *rs1, ptrdiff_t rs2, vint32mf2x4_t vs3, size_t vl) { + return __riscv_vssseg4e32_v_i32mf2x4_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e32_v_i32m1x4_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e32_v_i32m1x4_m(mask, base, bstride, v_tuple, vl); +void test_vssseg4e32_v_i32m1x4_m(vbool32_t vm, int32_t *rs1, ptrdiff_t rs2, vint32m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e32_v_i32m1x4_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e32_v_i32m2x4_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride, vint32m2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e32_v_i32m2x4_m(mask, base, bstride, v_tuple, vl); +void test_vssseg4e32_v_i32m2x4_m(vbool16_t vm, int32_t *rs1, ptrdiff_t rs2, vint32m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e32_v_i32m2x4_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e32_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e32_v_u32mf2x4_m(mask, base, bstride, v_tuple, vl); +void test_vssseg4e32_v_u32mf2x4_m(vbool64_t vm, uint32_t *rs1, ptrdiff_t rs2, vuint32mf2x4_t vs3, size_t vl) { + return __riscv_vssseg4e32_v_u32mf2x4_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e32_v_u32m1x4_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e32_v_u32m1x4_m(mask, base, bstride, v_tuple, vl); +void test_vssseg4e32_v_u32m1x4_m(vbool32_t vm, uint32_t *rs1, ptrdiff_t rs2, vuint32m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e32_v_u32m1x4_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e32_v_u32m2x4_m(vbool16_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e32_v_u32m2x4_m(mask, base, bstride, v_tuple, vl); +void test_vssseg4e32_v_u32m2x4_m(vbool16_t vm, uint32_t *rs1, ptrdiff_t rs2, vuint32m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e32_v_u32m2x4_m(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg4e32\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-api-tests/vssseg4e64.c b/auto-generated/gnu-api-tests/vssseg4e64.c index 50c9d7c92..d1e8fb558 100644 --- a/auto-generated/gnu-api-tests/vssseg4e64.c +++ b/auto-generated/gnu-api-tests/vssseg4e64.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg4e64_v_f64m1x4(float64_t *base, ptrdiff_t bstride, vfloat64m1x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e64_v_f64m1x4(base, bstride, v_tuple, vl); +void test_vssseg4e64_v_f64m1x4(float64_t *rs1, ptrdiff_t rs2, vfloat64m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e64_v_f64m1x4(rs1, rs2, vs3, vl); } -void test_vssseg4e64_v_f64m2x4(float64_t *base, ptrdiff_t bstride, vfloat64m2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e64_v_f64m2x4(base, bstride, v_tuple, vl); +void test_vssseg4e64_v_f64m2x4(float64_t *rs1, ptrdiff_t rs2, vfloat64m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e64_v_f64m2x4(rs1, rs2, vs3, vl); } -void test_vssseg4e64_v_i64m1x4(int64_t *base, ptrdiff_t bstride, vint64m1x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e64_v_i64m1x4(base, bstride, v_tuple, vl); +void test_vssseg4e64_v_i64m1x4(int64_t *rs1, ptrdiff_t rs2, vint64m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e64_v_i64m1x4(rs1, rs2, vs3, vl); } -void test_vssseg4e64_v_i64m2x4(int64_t *base, ptrdiff_t bstride, vint64m2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e64_v_i64m2x4(base, bstride, v_tuple, vl); +void test_vssseg4e64_v_i64m2x4(int64_t *rs1, ptrdiff_t rs2, vint64m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e64_v_i64m2x4(rs1, rs2, vs3, vl); } -void test_vssseg4e64_v_u64m1x4(uint64_t *base, ptrdiff_t bstride, vuint64m1x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e64_v_u64m1x4(base, bstride, v_tuple, vl); +void test_vssseg4e64_v_u64m1x4(uint64_t *rs1, ptrdiff_t rs2, vuint64m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e64_v_u64m1x4(rs1, rs2, vs3, vl); } -void test_vssseg4e64_v_u64m2x4(uint64_t *base, ptrdiff_t bstride, vuint64m2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e64_v_u64m2x4(base, bstride, v_tuple, vl); +void test_vssseg4e64_v_u64m2x4(uint64_t *rs1, ptrdiff_t rs2, vuint64m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e64_v_u64m2x4(rs1, rs2, vs3, vl); } -void test_vssseg4e64_v_f64m1x4_m(vbool64_t mask, float64_t *base, ptrdiff_t bstride, vfloat64m1x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e64_v_f64m1x4_m(mask, base, bstride, v_tuple, vl); +void test_vssseg4e64_v_f64m1x4_m(vbool64_t vm, float64_t *rs1, ptrdiff_t rs2, vfloat64m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e64_v_f64m1x4_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e64_v_f64m2x4_m(vbool32_t mask, float64_t *base, ptrdiff_t bstride, vfloat64m2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e64_v_f64m2x4_m(mask, base, bstride, v_tuple, vl); +void test_vssseg4e64_v_f64m2x4_m(vbool32_t vm, float64_t *rs1, ptrdiff_t rs2, vfloat64m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e64_v_f64m2x4_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e64_v_i64m1x4_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e64_v_i64m1x4_m(mask, base, bstride, v_tuple, vl); +void test_vssseg4e64_v_i64m1x4_m(vbool64_t vm, int64_t *rs1, ptrdiff_t rs2, vint64m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e64_v_i64m1x4_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e64_v_i64m2x4_m(vbool32_t mask, int64_t *base, ptrdiff_t bstride, vint64m2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e64_v_i64m2x4_m(mask, base, bstride, v_tuple, vl); +void test_vssseg4e64_v_i64m2x4_m(vbool32_t vm, int64_t *rs1, ptrdiff_t rs2, vint64m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e64_v_i64m2x4_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e64_v_u64m1x4_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e64_v_u64m1x4_m(mask, base, bstride, v_tuple, vl); +void test_vssseg4e64_v_u64m1x4_m(vbool64_t vm, uint64_t *rs1, ptrdiff_t rs2, vuint64m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e64_v_u64m1x4_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e64_v_u64m2x4_m(vbool32_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e64_v_u64m2x4_m(mask, base, bstride, v_tuple, vl); +void test_vssseg4e64_v_u64m2x4_m(vbool32_t vm, uint64_t *rs1, ptrdiff_t rs2, vuint64m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e64_v_u64m2x4_m(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg4e64\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-api-tests/vssseg4e8.c b/auto-generated/gnu-api-tests/vssseg4e8.c index f7434c196..dbd6a61ee 100644 --- a/auto-generated/gnu-api-tests/vssseg4e8.c +++ b/auto-generated/gnu-api-tests/vssseg4e8.c @@ -6,84 +6,84 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg4e8_v_i8mf8x4(int8_t *base, ptrdiff_t bstride, vint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e8_v_i8mf8x4(base, bstride, v_tuple, vl); +void test_vssseg4e8_v_i8mf8x4(int8_t *rs1, ptrdiff_t rs2, vint8mf8x4_t vs3, size_t vl) { + return __riscv_vssseg4e8_v_i8mf8x4(rs1, rs2, vs3, vl); } -void test_vssseg4e8_v_i8mf4x4(int8_t *base, ptrdiff_t bstride, vint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e8_v_i8mf4x4(base, bstride, v_tuple, vl); +void test_vssseg4e8_v_i8mf4x4(int8_t *rs1, ptrdiff_t rs2, vint8mf4x4_t vs3, size_t vl) { + return __riscv_vssseg4e8_v_i8mf4x4(rs1, rs2, vs3, vl); } -void test_vssseg4e8_v_i8mf2x4(int8_t *base, ptrdiff_t bstride, vint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e8_v_i8mf2x4(base, bstride, v_tuple, vl); +void test_vssseg4e8_v_i8mf2x4(int8_t *rs1, ptrdiff_t rs2, vint8mf2x4_t vs3, size_t vl) { + return __riscv_vssseg4e8_v_i8mf2x4(rs1, rs2, vs3, vl); } -void test_vssseg4e8_v_i8m1x4(int8_t *base, ptrdiff_t bstride, vint8m1x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e8_v_i8m1x4(base, bstride, v_tuple, vl); +void test_vssseg4e8_v_i8m1x4(int8_t *rs1, ptrdiff_t rs2, vint8m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e8_v_i8m1x4(rs1, rs2, vs3, vl); } -void test_vssseg4e8_v_i8m2x4(int8_t *base, ptrdiff_t bstride, vint8m2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e8_v_i8m2x4(base, bstride, v_tuple, vl); +void test_vssseg4e8_v_i8m2x4(int8_t *rs1, ptrdiff_t rs2, vint8m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e8_v_i8m2x4(rs1, rs2, vs3, vl); } -void test_vssseg4e8_v_u8mf8x4(uint8_t *base, ptrdiff_t bstride, vuint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e8_v_u8mf8x4(base, bstride, v_tuple, vl); +void test_vssseg4e8_v_u8mf8x4(uint8_t *rs1, ptrdiff_t rs2, vuint8mf8x4_t vs3, size_t vl) { + return __riscv_vssseg4e8_v_u8mf8x4(rs1, rs2, vs3, vl); } -void test_vssseg4e8_v_u8mf4x4(uint8_t *base, ptrdiff_t bstride, vuint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e8_v_u8mf4x4(base, bstride, v_tuple, vl); +void test_vssseg4e8_v_u8mf4x4(uint8_t *rs1, ptrdiff_t rs2, vuint8mf4x4_t vs3, size_t vl) { + return __riscv_vssseg4e8_v_u8mf4x4(rs1, rs2, vs3, vl); } -void test_vssseg4e8_v_u8mf2x4(uint8_t *base, ptrdiff_t bstride, vuint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e8_v_u8mf2x4(base, bstride, v_tuple, vl); +void test_vssseg4e8_v_u8mf2x4(uint8_t *rs1, ptrdiff_t rs2, vuint8mf2x4_t vs3, size_t vl) { + return __riscv_vssseg4e8_v_u8mf2x4(rs1, rs2, vs3, vl); } -void test_vssseg4e8_v_u8m1x4(uint8_t *base, ptrdiff_t bstride, vuint8m1x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e8_v_u8m1x4(base, bstride, v_tuple, vl); +void test_vssseg4e8_v_u8m1x4(uint8_t *rs1, ptrdiff_t rs2, vuint8m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e8_v_u8m1x4(rs1, rs2, vs3, vl); } -void test_vssseg4e8_v_u8m2x4(uint8_t *base, ptrdiff_t bstride, vuint8m2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e8_v_u8m2x4(base, bstride, v_tuple, vl); +void test_vssseg4e8_v_u8m2x4(uint8_t *rs1, ptrdiff_t rs2, vuint8m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e8_v_u8m2x4(rs1, rs2, vs3, vl); } -void test_vssseg4e8_v_i8mf8x4_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e8_v_i8mf8x4_m(mask, base, bstride, v_tuple, vl); +void test_vssseg4e8_v_i8mf8x4_m(vbool64_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf8x4_t vs3, size_t vl) { + return __riscv_vssseg4e8_v_i8mf8x4_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e8_v_i8mf4x4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e8_v_i8mf4x4_m(mask, base, bstride, v_tuple, vl); +void test_vssseg4e8_v_i8mf4x4_m(vbool32_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf4x4_t vs3, size_t vl) { + return __riscv_vssseg4e8_v_i8mf4x4_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e8_v_i8mf2x4_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e8_v_i8mf2x4_m(mask, base, bstride, v_tuple, vl); +void test_vssseg4e8_v_i8mf2x4_m(vbool16_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf2x4_t vs3, size_t vl) { + return __riscv_vssseg4e8_v_i8mf2x4_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e8_v_i8m1x4_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e8_v_i8m1x4_m(mask, base, bstride, v_tuple, vl); +void test_vssseg4e8_v_i8m1x4_m(vbool8_t vm, int8_t *rs1, ptrdiff_t rs2, vint8m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e8_v_i8m1x4_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e8_v_i8m2x4_m(vbool4_t mask, int8_t *base, ptrdiff_t bstride, vint8m2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e8_v_i8m2x4_m(mask, base, bstride, v_tuple, vl); +void test_vssseg4e8_v_i8m2x4_m(vbool4_t vm, int8_t *rs1, ptrdiff_t rs2, vint8m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e8_v_i8m2x4_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e8_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e8_v_u8mf8x4_m(mask, base, bstride, v_tuple, vl); +void test_vssseg4e8_v_u8mf8x4_m(vbool64_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf8x4_t vs3, size_t vl) { + return __riscv_vssseg4e8_v_u8mf8x4_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e8_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e8_v_u8mf4x4_m(mask, base, bstride, v_tuple, vl); +void test_vssseg4e8_v_u8mf4x4_m(vbool32_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf4x4_t vs3, size_t vl) { + return __riscv_vssseg4e8_v_u8mf4x4_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e8_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e8_v_u8mf2x4_m(mask, base, bstride, v_tuple, vl); +void test_vssseg4e8_v_u8mf2x4_m(vbool16_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf2x4_t vs3, size_t vl) { + return __riscv_vssseg4e8_v_u8mf2x4_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e8_v_u8m1x4_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e8_v_u8m1x4_m(mask, base, bstride, v_tuple, vl); +void test_vssseg4e8_v_u8m1x4_m(vbool8_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e8_v_u8m1x4_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e8_v_u8m2x4_m(vbool4_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e8_v_u8m2x4_m(mask, base, bstride, v_tuple, vl); +void test_vssseg4e8_v_u8m2x4_m(vbool4_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e8_v_u8m2x4_m(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg4e8\.[ivxfswum.]+\s+} 20 } } */ diff --git a/auto-generated/gnu-api-tests/vssseg5e16.c b/auto-generated/gnu-api-tests/vssseg5e16.c index 447d563bb..d610a0b3e 100644 --- a/auto-generated/gnu-api-tests/vssseg5e16.c +++ b/auto-generated/gnu-api-tests/vssseg5e16.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg5e16_v_f16mf4x5(float16_t *base, ptrdiff_t bstride, vfloat16mf4x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e16_v_f16mf4x5(base, bstride, v_tuple, vl); +void test_vssseg5e16_v_f16mf4x5(float16_t *rs1, ptrdiff_t rs2, vfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vssseg5e16_v_f16mf4x5(rs1, rs2, vs3, vl); } -void test_vssseg5e16_v_f16mf2x5(float16_t *base, ptrdiff_t bstride, vfloat16mf2x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e16_v_f16mf2x5(base, bstride, v_tuple, vl); +void test_vssseg5e16_v_f16mf2x5(float16_t *rs1, ptrdiff_t rs2, vfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vssseg5e16_v_f16mf2x5(rs1, rs2, vs3, vl); } -void test_vssseg5e16_v_f16m1x5(float16_t *base, ptrdiff_t bstride, vfloat16m1x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e16_v_f16m1x5(base, bstride, v_tuple, vl); +void test_vssseg5e16_v_f16m1x5(float16_t *rs1, ptrdiff_t rs2, vfloat16m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e16_v_f16m1x5(rs1, rs2, vs3, vl); } -void test_vssseg5e16_v_i16mf4x5(int16_t *base, ptrdiff_t bstride, vint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e16_v_i16mf4x5(base, bstride, v_tuple, vl); +void test_vssseg5e16_v_i16mf4x5(int16_t *rs1, ptrdiff_t rs2, vint16mf4x5_t vs3, size_t vl) { + return __riscv_vssseg5e16_v_i16mf4x5(rs1, rs2, vs3, vl); } -void test_vssseg5e16_v_i16mf2x5(int16_t *base, ptrdiff_t bstride, vint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e16_v_i16mf2x5(base, bstride, v_tuple, vl); +void test_vssseg5e16_v_i16mf2x5(int16_t *rs1, ptrdiff_t rs2, vint16mf2x5_t vs3, size_t vl) { + return __riscv_vssseg5e16_v_i16mf2x5(rs1, rs2, vs3, vl); } -void test_vssseg5e16_v_i16m1x5(int16_t *base, ptrdiff_t bstride, vint16m1x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e16_v_i16m1x5(base, bstride, v_tuple, vl); +void test_vssseg5e16_v_i16m1x5(int16_t *rs1, ptrdiff_t rs2, vint16m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e16_v_i16m1x5(rs1, rs2, vs3, vl); } -void test_vssseg5e16_v_u16mf4x5(uint16_t *base, ptrdiff_t bstride, vuint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e16_v_u16mf4x5(base, bstride, v_tuple, vl); +void test_vssseg5e16_v_u16mf4x5(uint16_t *rs1, ptrdiff_t rs2, vuint16mf4x5_t vs3, size_t vl) { + return __riscv_vssseg5e16_v_u16mf4x5(rs1, rs2, vs3, vl); } -void test_vssseg5e16_v_u16mf2x5(uint16_t *base, ptrdiff_t bstride, vuint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e16_v_u16mf2x5(base, bstride, v_tuple, vl); +void test_vssseg5e16_v_u16mf2x5(uint16_t *rs1, ptrdiff_t rs2, vuint16mf2x5_t vs3, size_t vl) { + return __riscv_vssseg5e16_v_u16mf2x5(rs1, rs2, vs3, vl); } -void test_vssseg5e16_v_u16m1x5(uint16_t *base, ptrdiff_t bstride, vuint16m1x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e16_v_u16m1x5(base, bstride, v_tuple, vl); +void test_vssseg5e16_v_u16m1x5(uint16_t *rs1, ptrdiff_t rs2, vuint16m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e16_v_u16m1x5(rs1, rs2, vs3, vl); } -void test_vssseg5e16_v_f16mf4x5_m(vbool64_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf4x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e16_v_f16mf4x5_m(mask, base, bstride, v_tuple, vl); +void test_vssseg5e16_v_f16mf4x5_m(vbool64_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vssseg5e16_v_f16mf4x5_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg5e16_v_f16mf2x5_m(vbool32_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf2x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e16_v_f16mf2x5_m(mask, base, bstride, v_tuple, vl); +void test_vssseg5e16_v_f16mf2x5_m(vbool32_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vssseg5e16_v_f16mf2x5_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg5e16_v_f16m1x5_m(vbool16_t mask, float16_t *base, ptrdiff_t bstride, vfloat16m1x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e16_v_f16m1x5_m(mask, base, bstride, v_tuple, vl); +void test_vssseg5e16_v_f16m1x5_m(vbool16_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e16_v_f16m1x5_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg5e16_v_i16mf4x5_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e16_v_i16mf4x5_m(mask, base, bstride, v_tuple, vl); +void test_vssseg5e16_v_i16mf4x5_m(vbool64_t vm, int16_t *rs1, ptrdiff_t rs2, vint16mf4x5_t vs3, size_t vl) { + return __riscv_vssseg5e16_v_i16mf4x5_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg5e16_v_i16mf2x5_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e16_v_i16mf2x5_m(mask, base, bstride, v_tuple, vl); +void test_vssseg5e16_v_i16mf2x5_m(vbool32_t vm, int16_t *rs1, ptrdiff_t rs2, vint16mf2x5_t vs3, size_t vl) { + return __riscv_vssseg5e16_v_i16mf2x5_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg5e16_v_i16m1x5_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e16_v_i16m1x5_m(mask, base, bstride, v_tuple, vl); +void test_vssseg5e16_v_i16m1x5_m(vbool16_t vm, int16_t *rs1, ptrdiff_t rs2, vint16m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e16_v_i16m1x5_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg5e16_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e16_v_u16mf4x5_m(mask, base, bstride, v_tuple, vl); +void test_vssseg5e16_v_u16mf4x5_m(vbool64_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16mf4x5_t vs3, size_t vl) { + return __riscv_vssseg5e16_v_u16mf4x5_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg5e16_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e16_v_u16mf2x5_m(mask, base, bstride, v_tuple, vl); +void test_vssseg5e16_v_u16mf2x5_m(vbool32_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16mf2x5_t vs3, size_t vl) { + return __riscv_vssseg5e16_v_u16mf2x5_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg5e16_v_u16m1x5_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e16_v_u16m1x5_m(mask, base, bstride, v_tuple, vl); +void test_vssseg5e16_v_u16m1x5_m(vbool16_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e16_v_u16m1x5_m(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg5e16\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-api-tests/vssseg5e32.c b/auto-generated/gnu-api-tests/vssseg5e32.c index 11a1220b3..d29a00c38 100644 --- a/auto-generated/gnu-api-tests/vssseg5e32.c +++ b/auto-generated/gnu-api-tests/vssseg5e32.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg5e32_v_f32mf2x5(float32_t *base, ptrdiff_t bstride, vfloat32mf2x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e32_v_f32mf2x5(base, bstride, v_tuple, vl); +void test_vssseg5e32_v_f32mf2x5(float32_t *rs1, ptrdiff_t rs2, vfloat32mf2x5_t vs3, size_t vl) { + return __riscv_vssseg5e32_v_f32mf2x5(rs1, rs2, vs3, vl); } -void test_vssseg5e32_v_f32m1x5(float32_t *base, ptrdiff_t bstride, vfloat32m1x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e32_v_f32m1x5(base, bstride, v_tuple, vl); +void test_vssseg5e32_v_f32m1x5(float32_t *rs1, ptrdiff_t rs2, vfloat32m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e32_v_f32m1x5(rs1, rs2, vs3, vl); } -void test_vssseg5e32_v_i32mf2x5(int32_t *base, ptrdiff_t bstride, vint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e32_v_i32mf2x5(base, bstride, v_tuple, vl); +void test_vssseg5e32_v_i32mf2x5(int32_t *rs1, ptrdiff_t rs2, vint32mf2x5_t vs3, size_t vl) { + return __riscv_vssseg5e32_v_i32mf2x5(rs1, rs2, vs3, vl); } -void test_vssseg5e32_v_i32m1x5(int32_t *base, ptrdiff_t bstride, vint32m1x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e32_v_i32m1x5(base, bstride, v_tuple, vl); +void test_vssseg5e32_v_i32m1x5(int32_t *rs1, ptrdiff_t rs2, vint32m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e32_v_i32m1x5(rs1, rs2, vs3, vl); } -void test_vssseg5e32_v_u32mf2x5(uint32_t *base, ptrdiff_t bstride, vuint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e32_v_u32mf2x5(base, bstride, v_tuple, vl); +void test_vssseg5e32_v_u32mf2x5(uint32_t *rs1, ptrdiff_t rs2, vuint32mf2x5_t vs3, size_t vl) { + return __riscv_vssseg5e32_v_u32mf2x5(rs1, rs2, vs3, vl); } -void test_vssseg5e32_v_u32m1x5(uint32_t *base, ptrdiff_t bstride, vuint32m1x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e32_v_u32m1x5(base, bstride, v_tuple, vl); +void test_vssseg5e32_v_u32m1x5(uint32_t *rs1, ptrdiff_t rs2, vuint32m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e32_v_u32m1x5(rs1, rs2, vs3, vl); } -void test_vssseg5e32_v_f32mf2x5_m(vbool64_t mask, float32_t *base, ptrdiff_t bstride, vfloat32mf2x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e32_v_f32mf2x5_m(mask, base, bstride, v_tuple, vl); +void test_vssseg5e32_v_f32mf2x5_m(vbool64_t vm, float32_t *rs1, ptrdiff_t rs2, vfloat32mf2x5_t vs3, size_t vl) { + return __riscv_vssseg5e32_v_f32mf2x5_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg5e32_v_f32m1x5_m(vbool32_t mask, float32_t *base, ptrdiff_t bstride, vfloat32m1x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e32_v_f32m1x5_m(mask, base, bstride, v_tuple, vl); +void test_vssseg5e32_v_f32m1x5_m(vbool32_t vm, float32_t *rs1, ptrdiff_t rs2, vfloat32m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e32_v_f32m1x5_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg5e32_v_i32mf2x5_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e32_v_i32mf2x5_m(mask, base, bstride, v_tuple, vl); +void test_vssseg5e32_v_i32mf2x5_m(vbool64_t vm, int32_t *rs1, ptrdiff_t rs2, vint32mf2x5_t vs3, size_t vl) { + return __riscv_vssseg5e32_v_i32mf2x5_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg5e32_v_i32m1x5_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e32_v_i32m1x5_m(mask, base, bstride, v_tuple, vl); +void test_vssseg5e32_v_i32m1x5_m(vbool32_t vm, int32_t *rs1, ptrdiff_t rs2, vint32m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e32_v_i32m1x5_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg5e32_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e32_v_u32mf2x5_m(mask, base, bstride, v_tuple, vl); +void test_vssseg5e32_v_u32mf2x5_m(vbool64_t vm, uint32_t *rs1, ptrdiff_t rs2, vuint32mf2x5_t vs3, size_t vl) { + return __riscv_vssseg5e32_v_u32mf2x5_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg5e32_v_u32m1x5_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e32_v_u32m1x5_m(mask, base, bstride, v_tuple, vl); +void test_vssseg5e32_v_u32m1x5_m(vbool32_t vm, uint32_t *rs1, ptrdiff_t rs2, vuint32m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e32_v_u32m1x5_m(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg5e32\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-api-tests/vssseg5e64.c b/auto-generated/gnu-api-tests/vssseg5e64.c index 1f8c7a2ba..0867839ee 100644 --- a/auto-generated/gnu-api-tests/vssseg5e64.c +++ b/auto-generated/gnu-api-tests/vssseg5e64.c @@ -6,28 +6,28 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg5e64_v_f64m1x5(float64_t *base, ptrdiff_t bstride, vfloat64m1x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e64_v_f64m1x5(base, bstride, v_tuple, vl); +void test_vssseg5e64_v_f64m1x5(float64_t *rs1, ptrdiff_t rs2, vfloat64m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e64_v_f64m1x5(rs1, rs2, vs3, vl); } -void test_vssseg5e64_v_i64m1x5(int64_t *base, ptrdiff_t bstride, vint64m1x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e64_v_i64m1x5(base, bstride, v_tuple, vl); +void test_vssseg5e64_v_i64m1x5(int64_t *rs1, ptrdiff_t rs2, vint64m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e64_v_i64m1x5(rs1, rs2, vs3, vl); } -void test_vssseg5e64_v_u64m1x5(uint64_t *base, ptrdiff_t bstride, vuint64m1x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e64_v_u64m1x5(base, bstride, v_tuple, vl); +void test_vssseg5e64_v_u64m1x5(uint64_t *rs1, ptrdiff_t rs2, vuint64m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e64_v_u64m1x5(rs1, rs2, vs3, vl); } -void test_vssseg5e64_v_f64m1x5_m(vbool64_t mask, float64_t *base, ptrdiff_t bstride, vfloat64m1x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e64_v_f64m1x5_m(mask, base, bstride, v_tuple, vl); +void test_vssseg5e64_v_f64m1x5_m(vbool64_t vm, float64_t *rs1, ptrdiff_t rs2, vfloat64m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e64_v_f64m1x5_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg5e64_v_i64m1x5_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e64_v_i64m1x5_m(mask, base, bstride, v_tuple, vl); +void test_vssseg5e64_v_i64m1x5_m(vbool64_t vm, int64_t *rs1, ptrdiff_t rs2, vint64m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e64_v_i64m1x5_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg5e64_v_u64m1x5_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e64_v_u64m1x5_m(mask, base, bstride, v_tuple, vl); +void test_vssseg5e64_v_u64m1x5_m(vbool64_t vm, uint64_t *rs1, ptrdiff_t rs2, vuint64m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e64_v_u64m1x5_m(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg5e64\.[ivxfswum.]+\s+} 6 } } */ diff --git a/auto-generated/gnu-api-tests/vssseg5e8.c b/auto-generated/gnu-api-tests/vssseg5e8.c index 2d65e4ba9..b18165333 100644 --- a/auto-generated/gnu-api-tests/vssseg5e8.c +++ b/auto-generated/gnu-api-tests/vssseg5e8.c @@ -6,68 +6,68 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg5e8_v_i8mf8x5(int8_t *base, ptrdiff_t bstride, vint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e8_v_i8mf8x5(base, bstride, v_tuple, vl); +void test_vssseg5e8_v_i8mf8x5(int8_t *rs1, ptrdiff_t rs2, vint8mf8x5_t vs3, size_t vl) { + return __riscv_vssseg5e8_v_i8mf8x5(rs1, rs2, vs3, vl); } -void test_vssseg5e8_v_i8mf4x5(int8_t *base, ptrdiff_t bstride, vint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e8_v_i8mf4x5(base, bstride, v_tuple, vl); +void test_vssseg5e8_v_i8mf4x5(int8_t *rs1, ptrdiff_t rs2, vint8mf4x5_t vs3, size_t vl) { + return __riscv_vssseg5e8_v_i8mf4x5(rs1, rs2, vs3, vl); } -void test_vssseg5e8_v_i8mf2x5(int8_t *base, ptrdiff_t bstride, vint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e8_v_i8mf2x5(base, bstride, v_tuple, vl); +void test_vssseg5e8_v_i8mf2x5(int8_t *rs1, ptrdiff_t rs2, vint8mf2x5_t vs3, size_t vl) { + return __riscv_vssseg5e8_v_i8mf2x5(rs1, rs2, vs3, vl); } -void test_vssseg5e8_v_i8m1x5(int8_t *base, ptrdiff_t bstride, vint8m1x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e8_v_i8m1x5(base, bstride, v_tuple, vl); +void test_vssseg5e8_v_i8m1x5(int8_t *rs1, ptrdiff_t rs2, vint8m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e8_v_i8m1x5(rs1, rs2, vs3, vl); } -void test_vssseg5e8_v_u8mf8x5(uint8_t *base, ptrdiff_t bstride, vuint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e8_v_u8mf8x5(base, bstride, v_tuple, vl); +void test_vssseg5e8_v_u8mf8x5(uint8_t *rs1, ptrdiff_t rs2, vuint8mf8x5_t vs3, size_t vl) { + return __riscv_vssseg5e8_v_u8mf8x5(rs1, rs2, vs3, vl); } -void test_vssseg5e8_v_u8mf4x5(uint8_t *base, ptrdiff_t bstride, vuint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e8_v_u8mf4x5(base, bstride, v_tuple, vl); +void test_vssseg5e8_v_u8mf4x5(uint8_t *rs1, ptrdiff_t rs2, vuint8mf4x5_t vs3, size_t vl) { + return __riscv_vssseg5e8_v_u8mf4x5(rs1, rs2, vs3, vl); } -void test_vssseg5e8_v_u8mf2x5(uint8_t *base, ptrdiff_t bstride, vuint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e8_v_u8mf2x5(base, bstride, v_tuple, vl); +void test_vssseg5e8_v_u8mf2x5(uint8_t *rs1, ptrdiff_t rs2, vuint8mf2x5_t vs3, size_t vl) { + return __riscv_vssseg5e8_v_u8mf2x5(rs1, rs2, vs3, vl); } -void test_vssseg5e8_v_u8m1x5(uint8_t *base, ptrdiff_t bstride, vuint8m1x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e8_v_u8m1x5(base, bstride, v_tuple, vl); +void test_vssseg5e8_v_u8m1x5(uint8_t *rs1, ptrdiff_t rs2, vuint8m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e8_v_u8m1x5(rs1, rs2, vs3, vl); } -void test_vssseg5e8_v_i8mf8x5_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e8_v_i8mf8x5_m(mask, base, bstride, v_tuple, vl); +void test_vssseg5e8_v_i8mf8x5_m(vbool64_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf8x5_t vs3, size_t vl) { + return __riscv_vssseg5e8_v_i8mf8x5_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg5e8_v_i8mf4x5_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e8_v_i8mf4x5_m(mask, base, bstride, v_tuple, vl); +void test_vssseg5e8_v_i8mf4x5_m(vbool32_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf4x5_t vs3, size_t vl) { + return __riscv_vssseg5e8_v_i8mf4x5_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg5e8_v_i8mf2x5_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e8_v_i8mf2x5_m(mask, base, bstride, v_tuple, vl); +void test_vssseg5e8_v_i8mf2x5_m(vbool16_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf2x5_t vs3, size_t vl) { + return __riscv_vssseg5e8_v_i8mf2x5_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg5e8_v_i8m1x5_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e8_v_i8m1x5_m(mask, base, bstride, v_tuple, vl); +void test_vssseg5e8_v_i8m1x5_m(vbool8_t vm, int8_t *rs1, ptrdiff_t rs2, vint8m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e8_v_i8m1x5_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg5e8_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e8_v_u8mf8x5_m(mask, base, bstride, v_tuple, vl); +void test_vssseg5e8_v_u8mf8x5_m(vbool64_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf8x5_t vs3, size_t vl) { + return __riscv_vssseg5e8_v_u8mf8x5_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg5e8_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e8_v_u8mf4x5_m(mask, base, bstride, v_tuple, vl); +void test_vssseg5e8_v_u8mf4x5_m(vbool32_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf4x5_t vs3, size_t vl) { + return __riscv_vssseg5e8_v_u8mf4x5_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg5e8_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e8_v_u8mf2x5_m(mask, base, bstride, v_tuple, vl); +void test_vssseg5e8_v_u8mf2x5_m(vbool16_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf2x5_t vs3, size_t vl) { + return __riscv_vssseg5e8_v_u8mf2x5_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg5e8_v_u8m1x5_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e8_v_u8m1x5_m(mask, base, bstride, v_tuple, vl); +void test_vssseg5e8_v_u8m1x5_m(vbool8_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e8_v_u8m1x5_m(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg5e8\.[ivxfswum.]+\s+} 16 } } */ diff --git a/auto-generated/gnu-api-tests/vssseg6e16.c b/auto-generated/gnu-api-tests/vssseg6e16.c index 9ce795b56..af0107876 100644 --- a/auto-generated/gnu-api-tests/vssseg6e16.c +++ b/auto-generated/gnu-api-tests/vssseg6e16.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg6e16_v_f16mf4x6(float16_t *base, ptrdiff_t bstride, vfloat16mf4x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e16_v_f16mf4x6(base, bstride, v_tuple, vl); +void test_vssseg6e16_v_f16mf4x6(float16_t *rs1, ptrdiff_t rs2, vfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vssseg6e16_v_f16mf4x6(rs1, rs2, vs3, vl); } -void test_vssseg6e16_v_f16mf2x6(float16_t *base, ptrdiff_t bstride, vfloat16mf2x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e16_v_f16mf2x6(base, bstride, v_tuple, vl); +void test_vssseg6e16_v_f16mf2x6(float16_t *rs1, ptrdiff_t rs2, vfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vssseg6e16_v_f16mf2x6(rs1, rs2, vs3, vl); } -void test_vssseg6e16_v_f16m1x6(float16_t *base, ptrdiff_t bstride, vfloat16m1x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e16_v_f16m1x6(base, bstride, v_tuple, vl); +void test_vssseg6e16_v_f16m1x6(float16_t *rs1, ptrdiff_t rs2, vfloat16m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e16_v_f16m1x6(rs1, rs2, vs3, vl); } -void test_vssseg6e16_v_i16mf4x6(int16_t *base, ptrdiff_t bstride, vint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e16_v_i16mf4x6(base, bstride, v_tuple, vl); +void test_vssseg6e16_v_i16mf4x6(int16_t *rs1, ptrdiff_t rs2, vint16mf4x6_t vs3, size_t vl) { + return __riscv_vssseg6e16_v_i16mf4x6(rs1, rs2, vs3, vl); } -void test_vssseg6e16_v_i16mf2x6(int16_t *base, ptrdiff_t bstride, vint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e16_v_i16mf2x6(base, bstride, v_tuple, vl); +void test_vssseg6e16_v_i16mf2x6(int16_t *rs1, ptrdiff_t rs2, vint16mf2x6_t vs3, size_t vl) { + return __riscv_vssseg6e16_v_i16mf2x6(rs1, rs2, vs3, vl); } -void test_vssseg6e16_v_i16m1x6(int16_t *base, ptrdiff_t bstride, vint16m1x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e16_v_i16m1x6(base, bstride, v_tuple, vl); +void test_vssseg6e16_v_i16m1x6(int16_t *rs1, ptrdiff_t rs2, vint16m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e16_v_i16m1x6(rs1, rs2, vs3, vl); } -void test_vssseg6e16_v_u16mf4x6(uint16_t *base, ptrdiff_t bstride, vuint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e16_v_u16mf4x6(base, bstride, v_tuple, vl); +void test_vssseg6e16_v_u16mf4x6(uint16_t *rs1, ptrdiff_t rs2, vuint16mf4x6_t vs3, size_t vl) { + return __riscv_vssseg6e16_v_u16mf4x6(rs1, rs2, vs3, vl); } -void test_vssseg6e16_v_u16mf2x6(uint16_t *base, ptrdiff_t bstride, vuint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e16_v_u16mf2x6(base, bstride, v_tuple, vl); +void test_vssseg6e16_v_u16mf2x6(uint16_t *rs1, ptrdiff_t rs2, vuint16mf2x6_t vs3, size_t vl) { + return __riscv_vssseg6e16_v_u16mf2x6(rs1, rs2, vs3, vl); } -void test_vssseg6e16_v_u16m1x6(uint16_t *base, ptrdiff_t bstride, vuint16m1x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e16_v_u16m1x6(base, bstride, v_tuple, vl); +void test_vssseg6e16_v_u16m1x6(uint16_t *rs1, ptrdiff_t rs2, vuint16m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e16_v_u16m1x6(rs1, rs2, vs3, vl); } -void test_vssseg6e16_v_f16mf4x6_m(vbool64_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf4x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e16_v_f16mf4x6_m(mask, base, bstride, v_tuple, vl); +void test_vssseg6e16_v_f16mf4x6_m(vbool64_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vssseg6e16_v_f16mf4x6_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg6e16_v_f16mf2x6_m(vbool32_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf2x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e16_v_f16mf2x6_m(mask, base, bstride, v_tuple, vl); +void test_vssseg6e16_v_f16mf2x6_m(vbool32_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vssseg6e16_v_f16mf2x6_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg6e16_v_f16m1x6_m(vbool16_t mask, float16_t *base, ptrdiff_t bstride, vfloat16m1x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e16_v_f16m1x6_m(mask, base, bstride, v_tuple, vl); +void test_vssseg6e16_v_f16m1x6_m(vbool16_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e16_v_f16m1x6_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg6e16_v_i16mf4x6_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e16_v_i16mf4x6_m(mask, base, bstride, v_tuple, vl); +void test_vssseg6e16_v_i16mf4x6_m(vbool64_t vm, int16_t *rs1, ptrdiff_t rs2, vint16mf4x6_t vs3, size_t vl) { + return __riscv_vssseg6e16_v_i16mf4x6_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg6e16_v_i16mf2x6_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e16_v_i16mf2x6_m(mask, base, bstride, v_tuple, vl); +void test_vssseg6e16_v_i16mf2x6_m(vbool32_t vm, int16_t *rs1, ptrdiff_t rs2, vint16mf2x6_t vs3, size_t vl) { + return __riscv_vssseg6e16_v_i16mf2x6_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg6e16_v_i16m1x6_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e16_v_i16m1x6_m(mask, base, bstride, v_tuple, vl); +void test_vssseg6e16_v_i16m1x6_m(vbool16_t vm, int16_t *rs1, ptrdiff_t rs2, vint16m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e16_v_i16m1x6_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg6e16_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e16_v_u16mf4x6_m(mask, base, bstride, v_tuple, vl); +void test_vssseg6e16_v_u16mf4x6_m(vbool64_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16mf4x6_t vs3, size_t vl) { + return __riscv_vssseg6e16_v_u16mf4x6_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg6e16_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e16_v_u16mf2x6_m(mask, base, bstride, v_tuple, vl); +void test_vssseg6e16_v_u16mf2x6_m(vbool32_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16mf2x6_t vs3, size_t vl) { + return __riscv_vssseg6e16_v_u16mf2x6_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg6e16_v_u16m1x6_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e16_v_u16m1x6_m(mask, base, bstride, v_tuple, vl); +void test_vssseg6e16_v_u16m1x6_m(vbool16_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e16_v_u16m1x6_m(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg6e16\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-api-tests/vssseg6e32.c b/auto-generated/gnu-api-tests/vssseg6e32.c index 155f40610..5a66618e7 100644 --- a/auto-generated/gnu-api-tests/vssseg6e32.c +++ b/auto-generated/gnu-api-tests/vssseg6e32.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg6e32_v_f32mf2x6(float32_t *base, ptrdiff_t bstride, vfloat32mf2x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e32_v_f32mf2x6(base, bstride, v_tuple, vl); +void test_vssseg6e32_v_f32mf2x6(float32_t *rs1, ptrdiff_t rs2, vfloat32mf2x6_t vs3, size_t vl) { + return __riscv_vssseg6e32_v_f32mf2x6(rs1, rs2, vs3, vl); } -void test_vssseg6e32_v_f32m1x6(float32_t *base, ptrdiff_t bstride, vfloat32m1x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e32_v_f32m1x6(base, bstride, v_tuple, vl); +void test_vssseg6e32_v_f32m1x6(float32_t *rs1, ptrdiff_t rs2, vfloat32m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e32_v_f32m1x6(rs1, rs2, vs3, vl); } -void test_vssseg6e32_v_i32mf2x6(int32_t *base, ptrdiff_t bstride, vint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e32_v_i32mf2x6(base, bstride, v_tuple, vl); +void test_vssseg6e32_v_i32mf2x6(int32_t *rs1, ptrdiff_t rs2, vint32mf2x6_t vs3, size_t vl) { + return __riscv_vssseg6e32_v_i32mf2x6(rs1, rs2, vs3, vl); } -void test_vssseg6e32_v_i32m1x6(int32_t *base, ptrdiff_t bstride, vint32m1x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e32_v_i32m1x6(base, bstride, v_tuple, vl); +void test_vssseg6e32_v_i32m1x6(int32_t *rs1, ptrdiff_t rs2, vint32m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e32_v_i32m1x6(rs1, rs2, vs3, vl); } -void test_vssseg6e32_v_u32mf2x6(uint32_t *base, ptrdiff_t bstride, vuint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e32_v_u32mf2x6(base, bstride, v_tuple, vl); +void test_vssseg6e32_v_u32mf2x6(uint32_t *rs1, ptrdiff_t rs2, vuint32mf2x6_t vs3, size_t vl) { + return __riscv_vssseg6e32_v_u32mf2x6(rs1, rs2, vs3, vl); } -void test_vssseg6e32_v_u32m1x6(uint32_t *base, ptrdiff_t bstride, vuint32m1x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e32_v_u32m1x6(base, bstride, v_tuple, vl); +void test_vssseg6e32_v_u32m1x6(uint32_t *rs1, ptrdiff_t rs2, vuint32m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e32_v_u32m1x6(rs1, rs2, vs3, vl); } -void test_vssseg6e32_v_f32mf2x6_m(vbool64_t mask, float32_t *base, ptrdiff_t bstride, vfloat32mf2x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e32_v_f32mf2x6_m(mask, base, bstride, v_tuple, vl); +void test_vssseg6e32_v_f32mf2x6_m(vbool64_t vm, float32_t *rs1, ptrdiff_t rs2, vfloat32mf2x6_t vs3, size_t vl) { + return __riscv_vssseg6e32_v_f32mf2x6_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg6e32_v_f32m1x6_m(vbool32_t mask, float32_t *base, ptrdiff_t bstride, vfloat32m1x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e32_v_f32m1x6_m(mask, base, bstride, v_tuple, vl); +void test_vssseg6e32_v_f32m1x6_m(vbool32_t vm, float32_t *rs1, ptrdiff_t rs2, vfloat32m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e32_v_f32m1x6_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg6e32_v_i32mf2x6_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e32_v_i32mf2x6_m(mask, base, bstride, v_tuple, vl); +void test_vssseg6e32_v_i32mf2x6_m(vbool64_t vm, int32_t *rs1, ptrdiff_t rs2, vint32mf2x6_t vs3, size_t vl) { + return __riscv_vssseg6e32_v_i32mf2x6_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg6e32_v_i32m1x6_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e32_v_i32m1x6_m(mask, base, bstride, v_tuple, vl); +void test_vssseg6e32_v_i32m1x6_m(vbool32_t vm, int32_t *rs1, ptrdiff_t rs2, vint32m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e32_v_i32m1x6_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg6e32_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e32_v_u32mf2x6_m(mask, base, bstride, v_tuple, vl); +void test_vssseg6e32_v_u32mf2x6_m(vbool64_t vm, uint32_t *rs1, ptrdiff_t rs2, vuint32mf2x6_t vs3, size_t vl) { + return __riscv_vssseg6e32_v_u32mf2x6_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg6e32_v_u32m1x6_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e32_v_u32m1x6_m(mask, base, bstride, v_tuple, vl); +void test_vssseg6e32_v_u32m1x6_m(vbool32_t vm, uint32_t *rs1, ptrdiff_t rs2, vuint32m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e32_v_u32m1x6_m(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg6e32\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-api-tests/vssseg6e64.c b/auto-generated/gnu-api-tests/vssseg6e64.c index 054021f96..7f01bfd81 100644 --- a/auto-generated/gnu-api-tests/vssseg6e64.c +++ b/auto-generated/gnu-api-tests/vssseg6e64.c @@ -6,28 +6,28 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg6e64_v_f64m1x6(float64_t *base, ptrdiff_t bstride, vfloat64m1x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e64_v_f64m1x6(base, bstride, v_tuple, vl); +void test_vssseg6e64_v_f64m1x6(float64_t *rs1, ptrdiff_t rs2, vfloat64m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e64_v_f64m1x6(rs1, rs2, vs3, vl); } -void test_vssseg6e64_v_i64m1x6(int64_t *base, ptrdiff_t bstride, vint64m1x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e64_v_i64m1x6(base, bstride, v_tuple, vl); +void test_vssseg6e64_v_i64m1x6(int64_t *rs1, ptrdiff_t rs2, vint64m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e64_v_i64m1x6(rs1, rs2, vs3, vl); } -void test_vssseg6e64_v_u64m1x6(uint64_t *base, ptrdiff_t bstride, vuint64m1x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e64_v_u64m1x6(base, bstride, v_tuple, vl); +void test_vssseg6e64_v_u64m1x6(uint64_t *rs1, ptrdiff_t rs2, vuint64m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e64_v_u64m1x6(rs1, rs2, vs3, vl); } -void test_vssseg6e64_v_f64m1x6_m(vbool64_t mask, float64_t *base, ptrdiff_t bstride, vfloat64m1x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e64_v_f64m1x6_m(mask, base, bstride, v_tuple, vl); +void test_vssseg6e64_v_f64m1x6_m(vbool64_t vm, float64_t *rs1, ptrdiff_t rs2, vfloat64m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e64_v_f64m1x6_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg6e64_v_i64m1x6_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e64_v_i64m1x6_m(mask, base, bstride, v_tuple, vl); +void test_vssseg6e64_v_i64m1x6_m(vbool64_t vm, int64_t *rs1, ptrdiff_t rs2, vint64m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e64_v_i64m1x6_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg6e64_v_u64m1x6_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e64_v_u64m1x6_m(mask, base, bstride, v_tuple, vl); +void test_vssseg6e64_v_u64m1x6_m(vbool64_t vm, uint64_t *rs1, ptrdiff_t rs2, vuint64m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e64_v_u64m1x6_m(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg6e64\.[ivxfswum.]+\s+} 6 } } */ diff --git a/auto-generated/gnu-api-tests/vssseg6e8.c b/auto-generated/gnu-api-tests/vssseg6e8.c index 5a04852ff..9e92305c1 100644 --- a/auto-generated/gnu-api-tests/vssseg6e8.c +++ b/auto-generated/gnu-api-tests/vssseg6e8.c @@ -6,68 +6,68 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg6e8_v_i8mf8x6(int8_t *base, ptrdiff_t bstride, vint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e8_v_i8mf8x6(base, bstride, v_tuple, vl); +void test_vssseg6e8_v_i8mf8x6(int8_t *rs1, ptrdiff_t rs2, vint8mf8x6_t vs3, size_t vl) { + return __riscv_vssseg6e8_v_i8mf8x6(rs1, rs2, vs3, vl); } -void test_vssseg6e8_v_i8mf4x6(int8_t *base, ptrdiff_t bstride, vint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e8_v_i8mf4x6(base, bstride, v_tuple, vl); +void test_vssseg6e8_v_i8mf4x6(int8_t *rs1, ptrdiff_t rs2, vint8mf4x6_t vs3, size_t vl) { + return __riscv_vssseg6e8_v_i8mf4x6(rs1, rs2, vs3, vl); } -void test_vssseg6e8_v_i8mf2x6(int8_t *base, ptrdiff_t bstride, vint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e8_v_i8mf2x6(base, bstride, v_tuple, vl); +void test_vssseg6e8_v_i8mf2x6(int8_t *rs1, ptrdiff_t rs2, vint8mf2x6_t vs3, size_t vl) { + return __riscv_vssseg6e8_v_i8mf2x6(rs1, rs2, vs3, vl); } -void test_vssseg6e8_v_i8m1x6(int8_t *base, ptrdiff_t bstride, vint8m1x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e8_v_i8m1x6(base, bstride, v_tuple, vl); +void test_vssseg6e8_v_i8m1x6(int8_t *rs1, ptrdiff_t rs2, vint8m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e8_v_i8m1x6(rs1, rs2, vs3, vl); } -void test_vssseg6e8_v_u8mf8x6(uint8_t *base, ptrdiff_t bstride, vuint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e8_v_u8mf8x6(base, bstride, v_tuple, vl); +void test_vssseg6e8_v_u8mf8x6(uint8_t *rs1, ptrdiff_t rs2, vuint8mf8x6_t vs3, size_t vl) { + return __riscv_vssseg6e8_v_u8mf8x6(rs1, rs2, vs3, vl); } -void test_vssseg6e8_v_u8mf4x6(uint8_t *base, ptrdiff_t bstride, vuint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e8_v_u8mf4x6(base, bstride, v_tuple, vl); +void test_vssseg6e8_v_u8mf4x6(uint8_t *rs1, ptrdiff_t rs2, vuint8mf4x6_t vs3, size_t vl) { + return __riscv_vssseg6e8_v_u8mf4x6(rs1, rs2, vs3, vl); } -void test_vssseg6e8_v_u8mf2x6(uint8_t *base, ptrdiff_t bstride, vuint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e8_v_u8mf2x6(base, bstride, v_tuple, vl); +void test_vssseg6e8_v_u8mf2x6(uint8_t *rs1, ptrdiff_t rs2, vuint8mf2x6_t vs3, size_t vl) { + return __riscv_vssseg6e8_v_u8mf2x6(rs1, rs2, vs3, vl); } -void test_vssseg6e8_v_u8m1x6(uint8_t *base, ptrdiff_t bstride, vuint8m1x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e8_v_u8m1x6(base, bstride, v_tuple, vl); +void test_vssseg6e8_v_u8m1x6(uint8_t *rs1, ptrdiff_t rs2, vuint8m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e8_v_u8m1x6(rs1, rs2, vs3, vl); } -void test_vssseg6e8_v_i8mf8x6_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e8_v_i8mf8x6_m(mask, base, bstride, v_tuple, vl); +void test_vssseg6e8_v_i8mf8x6_m(vbool64_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf8x6_t vs3, size_t vl) { + return __riscv_vssseg6e8_v_i8mf8x6_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg6e8_v_i8mf4x6_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e8_v_i8mf4x6_m(mask, base, bstride, v_tuple, vl); +void test_vssseg6e8_v_i8mf4x6_m(vbool32_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf4x6_t vs3, size_t vl) { + return __riscv_vssseg6e8_v_i8mf4x6_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg6e8_v_i8mf2x6_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e8_v_i8mf2x6_m(mask, base, bstride, v_tuple, vl); +void test_vssseg6e8_v_i8mf2x6_m(vbool16_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf2x6_t vs3, size_t vl) { + return __riscv_vssseg6e8_v_i8mf2x6_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg6e8_v_i8m1x6_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e8_v_i8m1x6_m(mask, base, bstride, v_tuple, vl); +void test_vssseg6e8_v_i8m1x6_m(vbool8_t vm, int8_t *rs1, ptrdiff_t rs2, vint8m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e8_v_i8m1x6_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg6e8_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e8_v_u8mf8x6_m(mask, base, bstride, v_tuple, vl); +void test_vssseg6e8_v_u8mf8x6_m(vbool64_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf8x6_t vs3, size_t vl) { + return __riscv_vssseg6e8_v_u8mf8x6_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg6e8_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e8_v_u8mf4x6_m(mask, base, bstride, v_tuple, vl); +void test_vssseg6e8_v_u8mf4x6_m(vbool32_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf4x6_t vs3, size_t vl) { + return __riscv_vssseg6e8_v_u8mf4x6_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg6e8_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e8_v_u8mf2x6_m(mask, base, bstride, v_tuple, vl); +void test_vssseg6e8_v_u8mf2x6_m(vbool16_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf2x6_t vs3, size_t vl) { + return __riscv_vssseg6e8_v_u8mf2x6_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg6e8_v_u8m1x6_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e8_v_u8m1x6_m(mask, base, bstride, v_tuple, vl); +void test_vssseg6e8_v_u8m1x6_m(vbool8_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e8_v_u8m1x6_m(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg6e8\.[ivxfswum.]+\s+} 16 } } */ diff --git a/auto-generated/gnu-api-tests/vssseg7e16.c b/auto-generated/gnu-api-tests/vssseg7e16.c index 36053a56f..89588b08a 100644 --- a/auto-generated/gnu-api-tests/vssseg7e16.c +++ b/auto-generated/gnu-api-tests/vssseg7e16.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg7e16_v_f16mf4x7(float16_t *base, ptrdiff_t bstride, vfloat16mf4x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e16_v_f16mf4x7(base, bstride, v_tuple, vl); +void test_vssseg7e16_v_f16mf4x7(float16_t *rs1, ptrdiff_t rs2, vfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vssseg7e16_v_f16mf4x7(rs1, rs2, vs3, vl); } -void test_vssseg7e16_v_f16mf2x7(float16_t *base, ptrdiff_t bstride, vfloat16mf2x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e16_v_f16mf2x7(base, bstride, v_tuple, vl); +void test_vssseg7e16_v_f16mf2x7(float16_t *rs1, ptrdiff_t rs2, vfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vssseg7e16_v_f16mf2x7(rs1, rs2, vs3, vl); } -void test_vssseg7e16_v_f16m1x7(float16_t *base, ptrdiff_t bstride, vfloat16m1x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e16_v_f16m1x7(base, bstride, v_tuple, vl); +void test_vssseg7e16_v_f16m1x7(float16_t *rs1, ptrdiff_t rs2, vfloat16m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e16_v_f16m1x7(rs1, rs2, vs3, vl); } -void test_vssseg7e16_v_i16mf4x7(int16_t *base, ptrdiff_t bstride, vint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e16_v_i16mf4x7(base, bstride, v_tuple, vl); +void test_vssseg7e16_v_i16mf4x7(int16_t *rs1, ptrdiff_t rs2, vint16mf4x7_t vs3, size_t vl) { + return __riscv_vssseg7e16_v_i16mf4x7(rs1, rs2, vs3, vl); } -void test_vssseg7e16_v_i16mf2x7(int16_t *base, ptrdiff_t bstride, vint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e16_v_i16mf2x7(base, bstride, v_tuple, vl); +void test_vssseg7e16_v_i16mf2x7(int16_t *rs1, ptrdiff_t rs2, vint16mf2x7_t vs3, size_t vl) { + return __riscv_vssseg7e16_v_i16mf2x7(rs1, rs2, vs3, vl); } -void test_vssseg7e16_v_i16m1x7(int16_t *base, ptrdiff_t bstride, vint16m1x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e16_v_i16m1x7(base, bstride, v_tuple, vl); +void test_vssseg7e16_v_i16m1x7(int16_t *rs1, ptrdiff_t rs2, vint16m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e16_v_i16m1x7(rs1, rs2, vs3, vl); } -void test_vssseg7e16_v_u16mf4x7(uint16_t *base, ptrdiff_t bstride, vuint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e16_v_u16mf4x7(base, bstride, v_tuple, vl); +void test_vssseg7e16_v_u16mf4x7(uint16_t *rs1, ptrdiff_t rs2, vuint16mf4x7_t vs3, size_t vl) { + return __riscv_vssseg7e16_v_u16mf4x7(rs1, rs2, vs3, vl); } -void test_vssseg7e16_v_u16mf2x7(uint16_t *base, ptrdiff_t bstride, vuint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e16_v_u16mf2x7(base, bstride, v_tuple, vl); +void test_vssseg7e16_v_u16mf2x7(uint16_t *rs1, ptrdiff_t rs2, vuint16mf2x7_t vs3, size_t vl) { + return __riscv_vssseg7e16_v_u16mf2x7(rs1, rs2, vs3, vl); } -void test_vssseg7e16_v_u16m1x7(uint16_t *base, ptrdiff_t bstride, vuint16m1x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e16_v_u16m1x7(base, bstride, v_tuple, vl); +void test_vssseg7e16_v_u16m1x7(uint16_t *rs1, ptrdiff_t rs2, vuint16m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e16_v_u16m1x7(rs1, rs2, vs3, vl); } -void test_vssseg7e16_v_f16mf4x7_m(vbool64_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf4x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e16_v_f16mf4x7_m(mask, base, bstride, v_tuple, vl); +void test_vssseg7e16_v_f16mf4x7_m(vbool64_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vssseg7e16_v_f16mf4x7_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg7e16_v_f16mf2x7_m(vbool32_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf2x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e16_v_f16mf2x7_m(mask, base, bstride, v_tuple, vl); +void test_vssseg7e16_v_f16mf2x7_m(vbool32_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vssseg7e16_v_f16mf2x7_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg7e16_v_f16m1x7_m(vbool16_t mask, float16_t *base, ptrdiff_t bstride, vfloat16m1x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e16_v_f16m1x7_m(mask, base, bstride, v_tuple, vl); +void test_vssseg7e16_v_f16m1x7_m(vbool16_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e16_v_f16m1x7_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg7e16_v_i16mf4x7_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e16_v_i16mf4x7_m(mask, base, bstride, v_tuple, vl); +void test_vssseg7e16_v_i16mf4x7_m(vbool64_t vm, int16_t *rs1, ptrdiff_t rs2, vint16mf4x7_t vs3, size_t vl) { + return __riscv_vssseg7e16_v_i16mf4x7_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg7e16_v_i16mf2x7_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e16_v_i16mf2x7_m(mask, base, bstride, v_tuple, vl); +void test_vssseg7e16_v_i16mf2x7_m(vbool32_t vm, int16_t *rs1, ptrdiff_t rs2, vint16mf2x7_t vs3, size_t vl) { + return __riscv_vssseg7e16_v_i16mf2x7_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg7e16_v_i16m1x7_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e16_v_i16m1x7_m(mask, base, bstride, v_tuple, vl); +void test_vssseg7e16_v_i16m1x7_m(vbool16_t vm, int16_t *rs1, ptrdiff_t rs2, vint16m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e16_v_i16m1x7_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg7e16_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e16_v_u16mf4x7_m(mask, base, bstride, v_tuple, vl); +void test_vssseg7e16_v_u16mf4x7_m(vbool64_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16mf4x7_t vs3, size_t vl) { + return __riscv_vssseg7e16_v_u16mf4x7_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg7e16_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e16_v_u16mf2x7_m(mask, base, bstride, v_tuple, vl); +void test_vssseg7e16_v_u16mf2x7_m(vbool32_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16mf2x7_t vs3, size_t vl) { + return __riscv_vssseg7e16_v_u16mf2x7_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg7e16_v_u16m1x7_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e16_v_u16m1x7_m(mask, base, bstride, v_tuple, vl); +void test_vssseg7e16_v_u16m1x7_m(vbool16_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e16_v_u16m1x7_m(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg7e16\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-api-tests/vssseg7e32.c b/auto-generated/gnu-api-tests/vssseg7e32.c index cdad94356..500a548b6 100644 --- a/auto-generated/gnu-api-tests/vssseg7e32.c +++ b/auto-generated/gnu-api-tests/vssseg7e32.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg7e32_v_f32mf2x7(float32_t *base, ptrdiff_t bstride, vfloat32mf2x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e32_v_f32mf2x7(base, bstride, v_tuple, vl); +void test_vssseg7e32_v_f32mf2x7(float32_t *rs1, ptrdiff_t rs2, vfloat32mf2x7_t vs3, size_t vl) { + return __riscv_vssseg7e32_v_f32mf2x7(rs1, rs2, vs3, vl); } -void test_vssseg7e32_v_f32m1x7(float32_t *base, ptrdiff_t bstride, vfloat32m1x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e32_v_f32m1x7(base, bstride, v_tuple, vl); +void test_vssseg7e32_v_f32m1x7(float32_t *rs1, ptrdiff_t rs2, vfloat32m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e32_v_f32m1x7(rs1, rs2, vs3, vl); } -void test_vssseg7e32_v_i32mf2x7(int32_t *base, ptrdiff_t bstride, vint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e32_v_i32mf2x7(base, bstride, v_tuple, vl); +void test_vssseg7e32_v_i32mf2x7(int32_t *rs1, ptrdiff_t rs2, vint32mf2x7_t vs3, size_t vl) { + return __riscv_vssseg7e32_v_i32mf2x7(rs1, rs2, vs3, vl); } -void test_vssseg7e32_v_i32m1x7(int32_t *base, ptrdiff_t bstride, vint32m1x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e32_v_i32m1x7(base, bstride, v_tuple, vl); +void test_vssseg7e32_v_i32m1x7(int32_t *rs1, ptrdiff_t rs2, vint32m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e32_v_i32m1x7(rs1, rs2, vs3, vl); } -void test_vssseg7e32_v_u32mf2x7(uint32_t *base, ptrdiff_t bstride, vuint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e32_v_u32mf2x7(base, bstride, v_tuple, vl); +void test_vssseg7e32_v_u32mf2x7(uint32_t *rs1, ptrdiff_t rs2, vuint32mf2x7_t vs3, size_t vl) { + return __riscv_vssseg7e32_v_u32mf2x7(rs1, rs2, vs3, vl); } -void test_vssseg7e32_v_u32m1x7(uint32_t *base, ptrdiff_t bstride, vuint32m1x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e32_v_u32m1x7(base, bstride, v_tuple, vl); +void test_vssseg7e32_v_u32m1x7(uint32_t *rs1, ptrdiff_t rs2, vuint32m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e32_v_u32m1x7(rs1, rs2, vs3, vl); } -void test_vssseg7e32_v_f32mf2x7_m(vbool64_t mask, float32_t *base, ptrdiff_t bstride, vfloat32mf2x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e32_v_f32mf2x7_m(mask, base, bstride, v_tuple, vl); +void test_vssseg7e32_v_f32mf2x7_m(vbool64_t vm, float32_t *rs1, ptrdiff_t rs2, vfloat32mf2x7_t vs3, size_t vl) { + return __riscv_vssseg7e32_v_f32mf2x7_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg7e32_v_f32m1x7_m(vbool32_t mask, float32_t *base, ptrdiff_t bstride, vfloat32m1x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e32_v_f32m1x7_m(mask, base, bstride, v_tuple, vl); +void test_vssseg7e32_v_f32m1x7_m(vbool32_t vm, float32_t *rs1, ptrdiff_t rs2, vfloat32m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e32_v_f32m1x7_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg7e32_v_i32mf2x7_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e32_v_i32mf2x7_m(mask, base, bstride, v_tuple, vl); +void test_vssseg7e32_v_i32mf2x7_m(vbool64_t vm, int32_t *rs1, ptrdiff_t rs2, vint32mf2x7_t vs3, size_t vl) { + return __riscv_vssseg7e32_v_i32mf2x7_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg7e32_v_i32m1x7_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e32_v_i32m1x7_m(mask, base, bstride, v_tuple, vl); +void test_vssseg7e32_v_i32m1x7_m(vbool32_t vm, int32_t *rs1, ptrdiff_t rs2, vint32m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e32_v_i32m1x7_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg7e32_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e32_v_u32mf2x7_m(mask, base, bstride, v_tuple, vl); +void test_vssseg7e32_v_u32mf2x7_m(vbool64_t vm, uint32_t *rs1, ptrdiff_t rs2, vuint32mf2x7_t vs3, size_t vl) { + return __riscv_vssseg7e32_v_u32mf2x7_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg7e32_v_u32m1x7_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e32_v_u32m1x7_m(mask, base, bstride, v_tuple, vl); +void test_vssseg7e32_v_u32m1x7_m(vbool32_t vm, uint32_t *rs1, ptrdiff_t rs2, vuint32m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e32_v_u32m1x7_m(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg7e32\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-api-tests/vssseg7e64.c b/auto-generated/gnu-api-tests/vssseg7e64.c index 54fc01f60..e7d67d92a 100644 --- a/auto-generated/gnu-api-tests/vssseg7e64.c +++ b/auto-generated/gnu-api-tests/vssseg7e64.c @@ -6,28 +6,28 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg7e64_v_f64m1x7(float64_t *base, ptrdiff_t bstride, vfloat64m1x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e64_v_f64m1x7(base, bstride, v_tuple, vl); +void test_vssseg7e64_v_f64m1x7(float64_t *rs1, ptrdiff_t rs2, vfloat64m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e64_v_f64m1x7(rs1, rs2, vs3, vl); } -void test_vssseg7e64_v_i64m1x7(int64_t *base, ptrdiff_t bstride, vint64m1x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e64_v_i64m1x7(base, bstride, v_tuple, vl); +void test_vssseg7e64_v_i64m1x7(int64_t *rs1, ptrdiff_t rs2, vint64m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e64_v_i64m1x7(rs1, rs2, vs3, vl); } -void test_vssseg7e64_v_u64m1x7(uint64_t *base, ptrdiff_t bstride, vuint64m1x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e64_v_u64m1x7(base, bstride, v_tuple, vl); +void test_vssseg7e64_v_u64m1x7(uint64_t *rs1, ptrdiff_t rs2, vuint64m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e64_v_u64m1x7(rs1, rs2, vs3, vl); } -void test_vssseg7e64_v_f64m1x7_m(vbool64_t mask, float64_t *base, ptrdiff_t bstride, vfloat64m1x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e64_v_f64m1x7_m(mask, base, bstride, v_tuple, vl); +void test_vssseg7e64_v_f64m1x7_m(vbool64_t vm, float64_t *rs1, ptrdiff_t rs2, vfloat64m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e64_v_f64m1x7_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg7e64_v_i64m1x7_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e64_v_i64m1x7_m(mask, base, bstride, v_tuple, vl); +void test_vssseg7e64_v_i64m1x7_m(vbool64_t vm, int64_t *rs1, ptrdiff_t rs2, vint64m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e64_v_i64m1x7_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg7e64_v_u64m1x7_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e64_v_u64m1x7_m(mask, base, bstride, v_tuple, vl); +void test_vssseg7e64_v_u64m1x7_m(vbool64_t vm, uint64_t *rs1, ptrdiff_t rs2, vuint64m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e64_v_u64m1x7_m(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg7e64\.[ivxfswum.]+\s+} 6 } } */ diff --git a/auto-generated/gnu-api-tests/vssseg7e8.c b/auto-generated/gnu-api-tests/vssseg7e8.c index ecc6eeaee..e86b41cb6 100644 --- a/auto-generated/gnu-api-tests/vssseg7e8.c +++ b/auto-generated/gnu-api-tests/vssseg7e8.c @@ -6,68 +6,68 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg7e8_v_i8mf8x7(int8_t *base, ptrdiff_t bstride, vint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e8_v_i8mf8x7(base, bstride, v_tuple, vl); +void test_vssseg7e8_v_i8mf8x7(int8_t *rs1, ptrdiff_t rs2, vint8mf8x7_t vs3, size_t vl) { + return __riscv_vssseg7e8_v_i8mf8x7(rs1, rs2, vs3, vl); } -void test_vssseg7e8_v_i8mf4x7(int8_t *base, ptrdiff_t bstride, vint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e8_v_i8mf4x7(base, bstride, v_tuple, vl); +void test_vssseg7e8_v_i8mf4x7(int8_t *rs1, ptrdiff_t rs2, vint8mf4x7_t vs3, size_t vl) { + return __riscv_vssseg7e8_v_i8mf4x7(rs1, rs2, vs3, vl); } -void test_vssseg7e8_v_i8mf2x7(int8_t *base, ptrdiff_t bstride, vint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e8_v_i8mf2x7(base, bstride, v_tuple, vl); +void test_vssseg7e8_v_i8mf2x7(int8_t *rs1, ptrdiff_t rs2, vint8mf2x7_t vs3, size_t vl) { + return __riscv_vssseg7e8_v_i8mf2x7(rs1, rs2, vs3, vl); } -void test_vssseg7e8_v_i8m1x7(int8_t *base, ptrdiff_t bstride, vint8m1x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e8_v_i8m1x7(base, bstride, v_tuple, vl); +void test_vssseg7e8_v_i8m1x7(int8_t *rs1, ptrdiff_t rs2, vint8m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e8_v_i8m1x7(rs1, rs2, vs3, vl); } -void test_vssseg7e8_v_u8mf8x7(uint8_t *base, ptrdiff_t bstride, vuint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e8_v_u8mf8x7(base, bstride, v_tuple, vl); +void test_vssseg7e8_v_u8mf8x7(uint8_t *rs1, ptrdiff_t rs2, vuint8mf8x7_t vs3, size_t vl) { + return __riscv_vssseg7e8_v_u8mf8x7(rs1, rs2, vs3, vl); } -void test_vssseg7e8_v_u8mf4x7(uint8_t *base, ptrdiff_t bstride, vuint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e8_v_u8mf4x7(base, bstride, v_tuple, vl); +void test_vssseg7e8_v_u8mf4x7(uint8_t *rs1, ptrdiff_t rs2, vuint8mf4x7_t vs3, size_t vl) { + return __riscv_vssseg7e8_v_u8mf4x7(rs1, rs2, vs3, vl); } -void test_vssseg7e8_v_u8mf2x7(uint8_t *base, ptrdiff_t bstride, vuint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e8_v_u8mf2x7(base, bstride, v_tuple, vl); +void test_vssseg7e8_v_u8mf2x7(uint8_t *rs1, ptrdiff_t rs2, vuint8mf2x7_t vs3, size_t vl) { + return __riscv_vssseg7e8_v_u8mf2x7(rs1, rs2, vs3, vl); } -void test_vssseg7e8_v_u8m1x7(uint8_t *base, ptrdiff_t bstride, vuint8m1x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e8_v_u8m1x7(base, bstride, v_tuple, vl); +void test_vssseg7e8_v_u8m1x7(uint8_t *rs1, ptrdiff_t rs2, vuint8m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e8_v_u8m1x7(rs1, rs2, vs3, vl); } -void test_vssseg7e8_v_i8mf8x7_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e8_v_i8mf8x7_m(mask, base, bstride, v_tuple, vl); +void test_vssseg7e8_v_i8mf8x7_m(vbool64_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf8x7_t vs3, size_t vl) { + return __riscv_vssseg7e8_v_i8mf8x7_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg7e8_v_i8mf4x7_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e8_v_i8mf4x7_m(mask, base, bstride, v_tuple, vl); +void test_vssseg7e8_v_i8mf4x7_m(vbool32_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf4x7_t vs3, size_t vl) { + return __riscv_vssseg7e8_v_i8mf4x7_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg7e8_v_i8mf2x7_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e8_v_i8mf2x7_m(mask, base, bstride, v_tuple, vl); +void test_vssseg7e8_v_i8mf2x7_m(vbool16_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf2x7_t vs3, size_t vl) { + return __riscv_vssseg7e8_v_i8mf2x7_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg7e8_v_i8m1x7_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e8_v_i8m1x7_m(mask, base, bstride, v_tuple, vl); +void test_vssseg7e8_v_i8m1x7_m(vbool8_t vm, int8_t *rs1, ptrdiff_t rs2, vint8m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e8_v_i8m1x7_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg7e8_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e8_v_u8mf8x7_m(mask, base, bstride, v_tuple, vl); +void test_vssseg7e8_v_u8mf8x7_m(vbool64_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf8x7_t vs3, size_t vl) { + return __riscv_vssseg7e8_v_u8mf8x7_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg7e8_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e8_v_u8mf4x7_m(mask, base, bstride, v_tuple, vl); +void test_vssseg7e8_v_u8mf4x7_m(vbool32_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf4x7_t vs3, size_t vl) { + return __riscv_vssseg7e8_v_u8mf4x7_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg7e8_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e8_v_u8mf2x7_m(mask, base, bstride, v_tuple, vl); +void test_vssseg7e8_v_u8mf2x7_m(vbool16_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf2x7_t vs3, size_t vl) { + return __riscv_vssseg7e8_v_u8mf2x7_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg7e8_v_u8m1x7_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e8_v_u8m1x7_m(mask, base, bstride, v_tuple, vl); +void test_vssseg7e8_v_u8m1x7_m(vbool8_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e8_v_u8m1x7_m(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg7e8\.[ivxfswum.]+\s+} 16 } } */ diff --git a/auto-generated/gnu-api-tests/vssseg8e16.c b/auto-generated/gnu-api-tests/vssseg8e16.c index ef6173c5f..50b219689 100644 --- a/auto-generated/gnu-api-tests/vssseg8e16.c +++ b/auto-generated/gnu-api-tests/vssseg8e16.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg8e16_v_f16mf4x8(float16_t *base, ptrdiff_t bstride, vfloat16mf4x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e16_v_f16mf4x8(base, bstride, v_tuple, vl); +void test_vssseg8e16_v_f16mf4x8(float16_t *rs1, ptrdiff_t rs2, vfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vssseg8e16_v_f16mf4x8(rs1, rs2, vs3, vl); } -void test_vssseg8e16_v_f16mf2x8(float16_t *base, ptrdiff_t bstride, vfloat16mf2x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e16_v_f16mf2x8(base, bstride, v_tuple, vl); +void test_vssseg8e16_v_f16mf2x8(float16_t *rs1, ptrdiff_t rs2, vfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vssseg8e16_v_f16mf2x8(rs1, rs2, vs3, vl); } -void test_vssseg8e16_v_f16m1x8(float16_t *base, ptrdiff_t bstride, vfloat16m1x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e16_v_f16m1x8(base, bstride, v_tuple, vl); +void test_vssseg8e16_v_f16m1x8(float16_t *rs1, ptrdiff_t rs2, vfloat16m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e16_v_f16m1x8(rs1, rs2, vs3, vl); } -void test_vssseg8e16_v_i16mf4x8(int16_t *base, ptrdiff_t bstride, vint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e16_v_i16mf4x8(base, bstride, v_tuple, vl); +void test_vssseg8e16_v_i16mf4x8(int16_t *rs1, ptrdiff_t rs2, vint16mf4x8_t vs3, size_t vl) { + return __riscv_vssseg8e16_v_i16mf4x8(rs1, rs2, vs3, vl); } -void test_vssseg8e16_v_i16mf2x8(int16_t *base, ptrdiff_t bstride, vint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e16_v_i16mf2x8(base, bstride, v_tuple, vl); +void test_vssseg8e16_v_i16mf2x8(int16_t *rs1, ptrdiff_t rs2, vint16mf2x8_t vs3, size_t vl) { + return __riscv_vssseg8e16_v_i16mf2x8(rs1, rs2, vs3, vl); } -void test_vssseg8e16_v_i16m1x8(int16_t *base, ptrdiff_t bstride, vint16m1x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e16_v_i16m1x8(base, bstride, v_tuple, vl); +void test_vssseg8e16_v_i16m1x8(int16_t *rs1, ptrdiff_t rs2, vint16m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e16_v_i16m1x8(rs1, rs2, vs3, vl); } -void test_vssseg8e16_v_u16mf4x8(uint16_t *base, ptrdiff_t bstride, vuint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e16_v_u16mf4x8(base, bstride, v_tuple, vl); +void test_vssseg8e16_v_u16mf4x8(uint16_t *rs1, ptrdiff_t rs2, vuint16mf4x8_t vs3, size_t vl) { + return __riscv_vssseg8e16_v_u16mf4x8(rs1, rs2, vs3, vl); } -void test_vssseg8e16_v_u16mf2x8(uint16_t *base, ptrdiff_t bstride, vuint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e16_v_u16mf2x8(base, bstride, v_tuple, vl); +void test_vssseg8e16_v_u16mf2x8(uint16_t *rs1, ptrdiff_t rs2, vuint16mf2x8_t vs3, size_t vl) { + return __riscv_vssseg8e16_v_u16mf2x8(rs1, rs2, vs3, vl); } -void test_vssseg8e16_v_u16m1x8(uint16_t *base, ptrdiff_t bstride, vuint16m1x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e16_v_u16m1x8(base, bstride, v_tuple, vl); +void test_vssseg8e16_v_u16m1x8(uint16_t *rs1, ptrdiff_t rs2, vuint16m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e16_v_u16m1x8(rs1, rs2, vs3, vl); } -void test_vssseg8e16_v_f16mf4x8_m(vbool64_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf4x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e16_v_f16mf4x8_m(mask, base, bstride, v_tuple, vl); +void test_vssseg8e16_v_f16mf4x8_m(vbool64_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vssseg8e16_v_f16mf4x8_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg8e16_v_f16mf2x8_m(vbool32_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf2x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e16_v_f16mf2x8_m(mask, base, bstride, v_tuple, vl); +void test_vssseg8e16_v_f16mf2x8_m(vbool32_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vssseg8e16_v_f16mf2x8_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg8e16_v_f16m1x8_m(vbool16_t mask, float16_t *base, ptrdiff_t bstride, vfloat16m1x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e16_v_f16m1x8_m(mask, base, bstride, v_tuple, vl); +void test_vssseg8e16_v_f16m1x8_m(vbool16_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e16_v_f16m1x8_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg8e16_v_i16mf4x8_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e16_v_i16mf4x8_m(mask, base, bstride, v_tuple, vl); +void test_vssseg8e16_v_i16mf4x8_m(vbool64_t vm, int16_t *rs1, ptrdiff_t rs2, vint16mf4x8_t vs3, size_t vl) { + return __riscv_vssseg8e16_v_i16mf4x8_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg8e16_v_i16mf2x8_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e16_v_i16mf2x8_m(mask, base, bstride, v_tuple, vl); +void test_vssseg8e16_v_i16mf2x8_m(vbool32_t vm, int16_t *rs1, ptrdiff_t rs2, vint16mf2x8_t vs3, size_t vl) { + return __riscv_vssseg8e16_v_i16mf2x8_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg8e16_v_i16m1x8_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e16_v_i16m1x8_m(mask, base, bstride, v_tuple, vl); +void test_vssseg8e16_v_i16m1x8_m(vbool16_t vm, int16_t *rs1, ptrdiff_t rs2, vint16m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e16_v_i16m1x8_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg8e16_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e16_v_u16mf4x8_m(mask, base, bstride, v_tuple, vl); +void test_vssseg8e16_v_u16mf4x8_m(vbool64_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16mf4x8_t vs3, size_t vl) { + return __riscv_vssseg8e16_v_u16mf4x8_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg8e16_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e16_v_u16mf2x8_m(mask, base, bstride, v_tuple, vl); +void test_vssseg8e16_v_u16mf2x8_m(vbool32_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16mf2x8_t vs3, size_t vl) { + return __riscv_vssseg8e16_v_u16mf2x8_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg8e16_v_u16m1x8_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e16_v_u16m1x8_m(mask, base, bstride, v_tuple, vl); +void test_vssseg8e16_v_u16m1x8_m(vbool16_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e16_v_u16m1x8_m(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg8e16\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-api-tests/vssseg8e32.c b/auto-generated/gnu-api-tests/vssseg8e32.c index 05b54ca5b..ffd16b1f6 100644 --- a/auto-generated/gnu-api-tests/vssseg8e32.c +++ b/auto-generated/gnu-api-tests/vssseg8e32.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg8e32_v_f32mf2x8(float32_t *base, ptrdiff_t bstride, vfloat32mf2x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e32_v_f32mf2x8(base, bstride, v_tuple, vl); +void test_vssseg8e32_v_f32mf2x8(float32_t *rs1, ptrdiff_t rs2, vfloat32mf2x8_t vs3, size_t vl) { + return __riscv_vssseg8e32_v_f32mf2x8(rs1, rs2, vs3, vl); } -void test_vssseg8e32_v_f32m1x8(float32_t *base, ptrdiff_t bstride, vfloat32m1x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e32_v_f32m1x8(base, bstride, v_tuple, vl); +void test_vssseg8e32_v_f32m1x8(float32_t *rs1, ptrdiff_t rs2, vfloat32m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e32_v_f32m1x8(rs1, rs2, vs3, vl); } -void test_vssseg8e32_v_i32mf2x8(int32_t *base, ptrdiff_t bstride, vint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e32_v_i32mf2x8(base, bstride, v_tuple, vl); +void test_vssseg8e32_v_i32mf2x8(int32_t *rs1, ptrdiff_t rs2, vint32mf2x8_t vs3, size_t vl) { + return __riscv_vssseg8e32_v_i32mf2x8(rs1, rs2, vs3, vl); } -void test_vssseg8e32_v_i32m1x8(int32_t *base, ptrdiff_t bstride, vint32m1x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e32_v_i32m1x8(base, bstride, v_tuple, vl); +void test_vssseg8e32_v_i32m1x8(int32_t *rs1, ptrdiff_t rs2, vint32m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e32_v_i32m1x8(rs1, rs2, vs3, vl); } -void test_vssseg8e32_v_u32mf2x8(uint32_t *base, ptrdiff_t bstride, vuint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e32_v_u32mf2x8(base, bstride, v_tuple, vl); +void test_vssseg8e32_v_u32mf2x8(uint32_t *rs1, ptrdiff_t rs2, vuint32mf2x8_t vs3, size_t vl) { + return __riscv_vssseg8e32_v_u32mf2x8(rs1, rs2, vs3, vl); } -void test_vssseg8e32_v_u32m1x8(uint32_t *base, ptrdiff_t bstride, vuint32m1x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e32_v_u32m1x8(base, bstride, v_tuple, vl); +void test_vssseg8e32_v_u32m1x8(uint32_t *rs1, ptrdiff_t rs2, vuint32m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e32_v_u32m1x8(rs1, rs2, vs3, vl); } -void test_vssseg8e32_v_f32mf2x8_m(vbool64_t mask, float32_t *base, ptrdiff_t bstride, vfloat32mf2x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e32_v_f32mf2x8_m(mask, base, bstride, v_tuple, vl); +void test_vssseg8e32_v_f32mf2x8_m(vbool64_t vm, float32_t *rs1, ptrdiff_t rs2, vfloat32mf2x8_t vs3, size_t vl) { + return __riscv_vssseg8e32_v_f32mf2x8_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg8e32_v_f32m1x8_m(vbool32_t mask, float32_t *base, ptrdiff_t bstride, vfloat32m1x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e32_v_f32m1x8_m(mask, base, bstride, v_tuple, vl); +void test_vssseg8e32_v_f32m1x8_m(vbool32_t vm, float32_t *rs1, ptrdiff_t rs2, vfloat32m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e32_v_f32m1x8_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg8e32_v_i32mf2x8_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e32_v_i32mf2x8_m(mask, base, bstride, v_tuple, vl); +void test_vssseg8e32_v_i32mf2x8_m(vbool64_t vm, int32_t *rs1, ptrdiff_t rs2, vint32mf2x8_t vs3, size_t vl) { + return __riscv_vssseg8e32_v_i32mf2x8_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg8e32_v_i32m1x8_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e32_v_i32m1x8_m(mask, base, bstride, v_tuple, vl); +void test_vssseg8e32_v_i32m1x8_m(vbool32_t vm, int32_t *rs1, ptrdiff_t rs2, vint32m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e32_v_i32m1x8_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg8e32_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e32_v_u32mf2x8_m(mask, base, bstride, v_tuple, vl); +void test_vssseg8e32_v_u32mf2x8_m(vbool64_t vm, uint32_t *rs1, ptrdiff_t rs2, vuint32mf2x8_t vs3, size_t vl) { + return __riscv_vssseg8e32_v_u32mf2x8_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg8e32_v_u32m1x8_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e32_v_u32m1x8_m(mask, base, bstride, v_tuple, vl); +void test_vssseg8e32_v_u32m1x8_m(vbool32_t vm, uint32_t *rs1, ptrdiff_t rs2, vuint32m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e32_v_u32m1x8_m(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg8e32\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-api-tests/vssseg8e64.c b/auto-generated/gnu-api-tests/vssseg8e64.c index a943c6574..b6bd68799 100644 --- a/auto-generated/gnu-api-tests/vssseg8e64.c +++ b/auto-generated/gnu-api-tests/vssseg8e64.c @@ -6,28 +6,28 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg8e64_v_f64m1x8(float64_t *base, ptrdiff_t bstride, vfloat64m1x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e64_v_f64m1x8(base, bstride, v_tuple, vl); +void test_vssseg8e64_v_f64m1x8(float64_t *rs1, ptrdiff_t rs2, vfloat64m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e64_v_f64m1x8(rs1, rs2, vs3, vl); } -void test_vssseg8e64_v_i64m1x8(int64_t *base, ptrdiff_t bstride, vint64m1x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e64_v_i64m1x8(base, bstride, v_tuple, vl); +void test_vssseg8e64_v_i64m1x8(int64_t *rs1, ptrdiff_t rs2, vint64m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e64_v_i64m1x8(rs1, rs2, vs3, vl); } -void test_vssseg8e64_v_u64m1x8(uint64_t *base, ptrdiff_t bstride, vuint64m1x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e64_v_u64m1x8(base, bstride, v_tuple, vl); +void test_vssseg8e64_v_u64m1x8(uint64_t *rs1, ptrdiff_t rs2, vuint64m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e64_v_u64m1x8(rs1, rs2, vs3, vl); } -void test_vssseg8e64_v_f64m1x8_m(vbool64_t mask, float64_t *base, ptrdiff_t bstride, vfloat64m1x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e64_v_f64m1x8_m(mask, base, bstride, v_tuple, vl); +void test_vssseg8e64_v_f64m1x8_m(vbool64_t vm, float64_t *rs1, ptrdiff_t rs2, vfloat64m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e64_v_f64m1x8_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg8e64_v_i64m1x8_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e64_v_i64m1x8_m(mask, base, bstride, v_tuple, vl); +void test_vssseg8e64_v_i64m1x8_m(vbool64_t vm, int64_t *rs1, ptrdiff_t rs2, vint64m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e64_v_i64m1x8_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg8e64_v_u64m1x8_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e64_v_u64m1x8_m(mask, base, bstride, v_tuple, vl); +void test_vssseg8e64_v_u64m1x8_m(vbool64_t vm, uint64_t *rs1, ptrdiff_t rs2, vuint64m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e64_v_u64m1x8_m(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg8e64\.[ivxfswum.]+\s+} 6 } } */ diff --git a/auto-generated/gnu-api-tests/vssseg8e8.c b/auto-generated/gnu-api-tests/vssseg8e8.c index 806538dbf..afafead9d 100644 --- a/auto-generated/gnu-api-tests/vssseg8e8.c +++ b/auto-generated/gnu-api-tests/vssseg8e8.c @@ -6,68 +6,68 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg8e8_v_i8mf8x8(int8_t *base, ptrdiff_t bstride, vint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e8_v_i8mf8x8(base, bstride, v_tuple, vl); +void test_vssseg8e8_v_i8mf8x8(int8_t *rs1, ptrdiff_t rs2, vint8mf8x8_t vs3, size_t vl) { + return __riscv_vssseg8e8_v_i8mf8x8(rs1, rs2, vs3, vl); } -void test_vssseg8e8_v_i8mf4x8(int8_t *base, ptrdiff_t bstride, vint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e8_v_i8mf4x8(base, bstride, v_tuple, vl); +void test_vssseg8e8_v_i8mf4x8(int8_t *rs1, ptrdiff_t rs2, vint8mf4x8_t vs3, size_t vl) { + return __riscv_vssseg8e8_v_i8mf4x8(rs1, rs2, vs3, vl); } -void test_vssseg8e8_v_i8mf2x8(int8_t *base, ptrdiff_t bstride, vint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e8_v_i8mf2x8(base, bstride, v_tuple, vl); +void test_vssseg8e8_v_i8mf2x8(int8_t *rs1, ptrdiff_t rs2, vint8mf2x8_t vs3, size_t vl) { + return __riscv_vssseg8e8_v_i8mf2x8(rs1, rs2, vs3, vl); } -void test_vssseg8e8_v_i8m1x8(int8_t *base, ptrdiff_t bstride, vint8m1x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e8_v_i8m1x8(base, bstride, v_tuple, vl); +void test_vssseg8e8_v_i8m1x8(int8_t *rs1, ptrdiff_t rs2, vint8m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e8_v_i8m1x8(rs1, rs2, vs3, vl); } -void test_vssseg8e8_v_u8mf8x8(uint8_t *base, ptrdiff_t bstride, vuint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e8_v_u8mf8x8(base, bstride, v_tuple, vl); +void test_vssseg8e8_v_u8mf8x8(uint8_t *rs1, ptrdiff_t rs2, vuint8mf8x8_t vs3, size_t vl) { + return __riscv_vssseg8e8_v_u8mf8x8(rs1, rs2, vs3, vl); } -void test_vssseg8e8_v_u8mf4x8(uint8_t *base, ptrdiff_t bstride, vuint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e8_v_u8mf4x8(base, bstride, v_tuple, vl); +void test_vssseg8e8_v_u8mf4x8(uint8_t *rs1, ptrdiff_t rs2, vuint8mf4x8_t vs3, size_t vl) { + return __riscv_vssseg8e8_v_u8mf4x8(rs1, rs2, vs3, vl); } -void test_vssseg8e8_v_u8mf2x8(uint8_t *base, ptrdiff_t bstride, vuint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e8_v_u8mf2x8(base, bstride, v_tuple, vl); +void test_vssseg8e8_v_u8mf2x8(uint8_t *rs1, ptrdiff_t rs2, vuint8mf2x8_t vs3, size_t vl) { + return __riscv_vssseg8e8_v_u8mf2x8(rs1, rs2, vs3, vl); } -void test_vssseg8e8_v_u8m1x8(uint8_t *base, ptrdiff_t bstride, vuint8m1x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e8_v_u8m1x8(base, bstride, v_tuple, vl); +void test_vssseg8e8_v_u8m1x8(uint8_t *rs1, ptrdiff_t rs2, vuint8m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e8_v_u8m1x8(rs1, rs2, vs3, vl); } -void test_vssseg8e8_v_i8mf8x8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e8_v_i8mf8x8_m(mask, base, bstride, v_tuple, vl); +void test_vssseg8e8_v_i8mf8x8_m(vbool64_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf8x8_t vs3, size_t vl) { + return __riscv_vssseg8e8_v_i8mf8x8_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg8e8_v_i8mf4x8_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e8_v_i8mf4x8_m(mask, base, bstride, v_tuple, vl); +void test_vssseg8e8_v_i8mf4x8_m(vbool32_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf4x8_t vs3, size_t vl) { + return __riscv_vssseg8e8_v_i8mf4x8_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg8e8_v_i8mf2x8_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e8_v_i8mf2x8_m(mask, base, bstride, v_tuple, vl); +void test_vssseg8e8_v_i8mf2x8_m(vbool16_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf2x8_t vs3, size_t vl) { + return __riscv_vssseg8e8_v_i8mf2x8_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg8e8_v_i8m1x8_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e8_v_i8m1x8_m(mask, base, bstride, v_tuple, vl); +void test_vssseg8e8_v_i8m1x8_m(vbool8_t vm, int8_t *rs1, ptrdiff_t rs2, vint8m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e8_v_i8m1x8_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg8e8_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e8_v_u8mf8x8_m(mask, base, bstride, v_tuple, vl); +void test_vssseg8e8_v_u8mf8x8_m(vbool64_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf8x8_t vs3, size_t vl) { + return __riscv_vssseg8e8_v_u8mf8x8_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg8e8_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e8_v_u8mf4x8_m(mask, base, bstride, v_tuple, vl); +void test_vssseg8e8_v_u8mf4x8_m(vbool32_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf4x8_t vs3, size_t vl) { + return __riscv_vssseg8e8_v_u8mf4x8_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg8e8_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e8_v_u8mf2x8_m(mask, base, bstride, v_tuple, vl); +void test_vssseg8e8_v_u8mf2x8_m(vbool16_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf2x8_t vs3, size_t vl) { + return __riscv_vssseg8e8_v_u8mf2x8_m(vm, rs1, rs2, vs3, vl); } -void test_vssseg8e8_v_u8m1x8_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e8_v_u8m1x8_m(mask, base, bstride, v_tuple, vl); +void test_vssseg8e8_v_u8m1x8_m(vbool8_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e8_v_u8m1x8_m(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg8e8\.[ivxfswum.]+\s+} 16 } } */ diff --git a/auto-generated/gnu-api-tests/vssub.c b/auto-generated/gnu-api-tests/vssub.c index 2dbbeb62b..55f4f6b97 100644 --- a/auto-generated/gnu-api-tests/vssub.c +++ b/auto-generated/gnu-api-tests/vssub.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vssub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vssub_vv_i8mf8(op1, op2, vl); +vint8mf8_t test_vssub_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vssub_vv_i8mf8(vs2, vs1, vl); } -vint8mf8_t test_vssub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8mf8(op1, op2, vl); +vint8mf8_t test_vssub_vx_i8mf8(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_vx_i8mf8(vs2, rs1, vl); } -vint8mf4_t test_vssub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vssub_vv_i8mf4(op1, op2, vl); +vint8mf4_t test_vssub_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vssub_vv_i8mf4(vs2, vs1, vl); } -vint8mf4_t test_vssub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8mf4(op1, op2, vl); +vint8mf4_t test_vssub_vx_i8mf4(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_vx_i8mf4(vs2, rs1, vl); } -vint8mf2_t test_vssub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vssub_vv_i8mf2(op1, op2, vl); +vint8mf2_t test_vssub_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vssub_vv_i8mf2(vs2, vs1, vl); } -vint8mf2_t test_vssub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8mf2(op1, op2, vl); +vint8mf2_t test_vssub_vx_i8mf2(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_vx_i8mf2(vs2, rs1, vl); } -vint8m1_t test_vssub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vssub_vv_i8m1(op1, op2, vl); +vint8m1_t test_vssub_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vssub_vv_i8m1(vs2, vs1, vl); } -vint8m1_t test_vssub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m1(op1, op2, vl); +vint8m1_t test_vssub_vx_i8m1(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_vx_i8m1(vs2, rs1, vl); } -vint8m2_t test_vssub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vssub_vv_i8m2(op1, op2, vl); +vint8m2_t test_vssub_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vssub_vv_i8m2(vs2, vs1, vl); } -vint8m2_t test_vssub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m2(op1, op2, vl); +vint8m2_t test_vssub_vx_i8m2(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_vx_i8m2(vs2, rs1, vl); } -vint8m4_t test_vssub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vssub_vv_i8m4(op1, op2, vl); +vint8m4_t test_vssub_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vssub_vv_i8m4(vs2, vs1, vl); } -vint8m4_t test_vssub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m4(op1, op2, vl); +vint8m4_t test_vssub_vx_i8m4(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_vx_i8m4(vs2, rs1, vl); } -vint8m8_t test_vssub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vssub_vv_i8m8(op1, op2, vl); +vint8m8_t test_vssub_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vssub_vv_i8m8(vs2, vs1, vl); } -vint8m8_t test_vssub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m8(op1, op2, vl); +vint8m8_t test_vssub_vx_i8m8(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_vx_i8m8(vs2, rs1, vl); } -vint16mf4_t test_vssub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vssub_vv_i16mf4(op1, op2, vl); +vint16mf4_t test_vssub_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vssub_vv_i16mf4(vs2, vs1, vl); } -vint16mf4_t test_vssub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16mf4(op1, op2, vl); +vint16mf4_t test_vssub_vx_i16mf4(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_vx_i16mf4(vs2, rs1, vl); } -vint16mf2_t test_vssub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vssub_vv_i16mf2(op1, op2, vl); +vint16mf2_t test_vssub_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vssub_vv_i16mf2(vs2, vs1, vl); } -vint16mf2_t test_vssub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16mf2(op1, op2, vl); +vint16mf2_t test_vssub_vx_i16mf2(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_vx_i16mf2(vs2, rs1, vl); } -vint16m1_t test_vssub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vssub_vv_i16m1(op1, op2, vl); +vint16m1_t test_vssub_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vssub_vv_i16m1(vs2, vs1, vl); } -vint16m1_t test_vssub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m1(op1, op2, vl); +vint16m1_t test_vssub_vx_i16m1(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_vx_i16m1(vs2, rs1, vl); } -vint16m2_t test_vssub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vssub_vv_i16m2(op1, op2, vl); +vint16m2_t test_vssub_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vssub_vv_i16m2(vs2, vs1, vl); } -vint16m2_t test_vssub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m2(op1, op2, vl); +vint16m2_t test_vssub_vx_i16m2(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_vx_i16m2(vs2, rs1, vl); } -vint16m4_t test_vssub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vssub_vv_i16m4(op1, op2, vl); +vint16m4_t test_vssub_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vssub_vv_i16m4(vs2, vs1, vl); } -vint16m4_t test_vssub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m4(op1, op2, vl); +vint16m4_t test_vssub_vx_i16m4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_vx_i16m4(vs2, rs1, vl); } -vint16m8_t test_vssub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vssub_vv_i16m8(op1, op2, vl); +vint16m8_t test_vssub_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vssub_vv_i16m8(vs2, vs1, vl); } -vint16m8_t test_vssub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m8(op1, op2, vl); +vint16m8_t test_vssub_vx_i16m8(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_vx_i16m8(vs2, rs1, vl); } -vint32mf2_t test_vssub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vssub_vv_i32mf2(op1, op2, vl); +vint32mf2_t test_vssub_vv_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vssub_vv_i32mf2(vs2, vs1, vl); } -vint32mf2_t test_vssub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32mf2(op1, op2, vl); +vint32mf2_t test_vssub_vx_i32mf2(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_vx_i32mf2(vs2, rs1, vl); } -vint32m1_t test_vssub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vssub_vv_i32m1(op1, op2, vl); +vint32m1_t test_vssub_vv_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vssub_vv_i32m1(vs2, vs1, vl); } -vint32m1_t test_vssub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m1(op1, op2, vl); +vint32m1_t test_vssub_vx_i32m1(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_vx_i32m1(vs2, rs1, vl); } -vint32m2_t test_vssub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vssub_vv_i32m2(op1, op2, vl); +vint32m2_t test_vssub_vv_i32m2(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vssub_vv_i32m2(vs2, vs1, vl); } -vint32m2_t test_vssub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m2(op1, op2, vl); +vint32m2_t test_vssub_vx_i32m2(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_vx_i32m2(vs2, rs1, vl); } -vint32m4_t test_vssub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vssub_vv_i32m4(op1, op2, vl); +vint32m4_t test_vssub_vv_i32m4(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vssub_vv_i32m4(vs2, vs1, vl); } -vint32m4_t test_vssub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m4(op1, op2, vl); +vint32m4_t test_vssub_vx_i32m4(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_vx_i32m4(vs2, rs1, vl); } -vint32m8_t test_vssub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vssub_vv_i32m8(op1, op2, vl); +vint32m8_t test_vssub_vv_i32m8(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vssub_vv_i32m8(vs2, vs1, vl); } -vint32m8_t test_vssub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m8(op1, op2, vl); +vint32m8_t test_vssub_vx_i32m8(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_vx_i32m8(vs2, rs1, vl); } -vint64m1_t test_vssub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vssub_vv_i64m1(op1, op2, vl); +vint64m1_t test_vssub_vv_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vssub_vv_i64m1(vs2, vs1, vl); } -vint64m1_t test_vssub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m1(op1, op2, vl); +vint64m1_t test_vssub_vx_i64m1(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub_vx_i64m1(vs2, rs1, vl); } -vint64m2_t test_vssub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vssub_vv_i64m2(op1, op2, vl); +vint64m2_t test_vssub_vv_i64m2(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vssub_vv_i64m2(vs2, vs1, vl); } -vint64m2_t test_vssub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m2(op1, op2, vl); +vint64m2_t test_vssub_vx_i64m2(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub_vx_i64m2(vs2, rs1, vl); } -vint64m4_t test_vssub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vssub_vv_i64m4(op1, op2, vl); +vint64m4_t test_vssub_vv_i64m4(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vssub_vv_i64m4(vs2, vs1, vl); } -vint64m4_t test_vssub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m4(op1, op2, vl); +vint64m4_t test_vssub_vx_i64m4(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub_vx_i64m4(vs2, rs1, vl); } -vint64m8_t test_vssub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vssub_vv_i64m8(op1, op2, vl); +vint64m8_t test_vssub_vv_i64m8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vssub_vv_i64m8(vs2, vs1, vl); } -vint64m8_t test_vssub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m8(op1, op2, vl); +vint64m8_t test_vssub_vx_i64m8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub_vx_i64m8(vs2, rs1, vl); } -vint8mf8_t test_vssub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vssub_vv_i8mf8_m(mask, op1, op2, vl); +vint8mf8_t test_vssub_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vssub_vv_i8mf8_m(vm, vs2, vs1, vl); } -vint8mf8_t test_vssub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8mf8_m(mask, op1, op2, vl); +vint8mf8_t test_vssub_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_vx_i8mf8_m(vm, vs2, rs1, vl); } -vint8mf4_t test_vssub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vssub_vv_i8mf4_m(mask, op1, op2, vl); +vint8mf4_t test_vssub_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vssub_vv_i8mf4_m(vm, vs2, vs1, vl); } -vint8mf4_t test_vssub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8mf4_m(mask, op1, op2, vl); +vint8mf4_t test_vssub_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_vx_i8mf4_m(vm, vs2, rs1, vl); } -vint8mf2_t test_vssub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vssub_vv_i8mf2_m(mask, op1, op2, vl); +vint8mf2_t test_vssub_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vssub_vv_i8mf2_m(vm, vs2, vs1, vl); } -vint8mf2_t test_vssub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8mf2_m(mask, op1, op2, vl); +vint8mf2_t test_vssub_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_vx_i8mf2_m(vm, vs2, rs1, vl); } -vint8m1_t test_vssub_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vssub_vv_i8m1_m(mask, op1, op2, vl); +vint8m1_t test_vssub_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vssub_vv_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vssub_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m1_m(mask, op1, op2, vl); +vint8m1_t test_vssub_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_vx_i8m1_m(vm, vs2, rs1, vl); } -vint8m2_t test_vssub_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vssub_vv_i8m2_m(mask, op1, op2, vl); +vint8m2_t test_vssub_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vssub_vv_i8m2_m(vm, vs2, vs1, vl); } -vint8m2_t test_vssub_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m2_m(mask, op1, op2, vl); +vint8m2_t test_vssub_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_vx_i8m2_m(vm, vs2, rs1, vl); } -vint8m4_t test_vssub_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vssub_vv_i8m4_m(mask, op1, op2, vl); +vint8m4_t test_vssub_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vssub_vv_i8m4_m(vm, vs2, vs1, vl); } -vint8m4_t test_vssub_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m4_m(mask, op1, op2, vl); +vint8m4_t test_vssub_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_vx_i8m4_m(vm, vs2, rs1, vl); } -vint8m8_t test_vssub_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vssub_vv_i8m8_m(mask, op1, op2, vl); +vint8m8_t test_vssub_vv_i8m8_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vssub_vv_i8m8_m(vm, vs2, vs1, vl); } -vint8m8_t test_vssub_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m8_m(mask, op1, op2, vl); +vint8m8_t test_vssub_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_vx_i8m8_m(vm, vs2, rs1, vl); } -vint16mf4_t test_vssub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vssub_vv_i16mf4_m(mask, op1, op2, vl); +vint16mf4_t test_vssub_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vssub_vv_i16mf4_m(vm, vs2, vs1, vl); } -vint16mf4_t test_vssub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16mf4_m(mask, op1, op2, vl); +vint16mf4_t test_vssub_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_vx_i16mf4_m(vm, vs2, rs1, vl); } -vint16mf2_t test_vssub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vssub_vv_i16mf2_m(mask, op1, op2, vl); +vint16mf2_t test_vssub_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vssub_vv_i16mf2_m(vm, vs2, vs1, vl); } -vint16mf2_t test_vssub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16mf2_m(mask, op1, op2, vl); +vint16mf2_t test_vssub_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_vx_i16mf2_m(vm, vs2, rs1, vl); } -vint16m1_t test_vssub_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vssub_vv_i16m1_m(mask, op1, op2, vl); +vint16m1_t test_vssub_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vssub_vv_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vssub_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m1_m(mask, op1, op2, vl); +vint16m1_t test_vssub_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_vx_i16m1_m(vm, vs2, rs1, vl); } -vint16m2_t test_vssub_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vssub_vv_i16m2_m(mask, op1, op2, vl); +vint16m2_t test_vssub_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vssub_vv_i16m2_m(vm, vs2, vs1, vl); } -vint16m2_t test_vssub_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m2_m(mask, op1, op2, vl); +vint16m2_t test_vssub_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_vx_i16m2_m(vm, vs2, rs1, vl); } -vint16m4_t test_vssub_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vssub_vv_i16m4_m(mask, op1, op2, vl); +vint16m4_t test_vssub_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vssub_vv_i16m4_m(vm, vs2, vs1, vl); } -vint16m4_t test_vssub_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m4_m(mask, op1, op2, vl); +vint16m4_t test_vssub_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_vx_i16m4_m(vm, vs2, rs1, vl); } -vint16m8_t test_vssub_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vssub_vv_i16m8_m(mask, op1, op2, vl); +vint16m8_t test_vssub_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vssub_vv_i16m8_m(vm, vs2, vs1, vl); } -vint16m8_t test_vssub_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m8_m(mask, op1, op2, vl); +vint16m8_t test_vssub_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_vx_i16m8_m(vm, vs2, rs1, vl); } -vint32mf2_t test_vssub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vssub_vv_i32mf2_m(mask, op1, op2, vl); +vint32mf2_t test_vssub_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vssub_vv_i32mf2_m(vm, vs2, vs1, vl); } -vint32mf2_t test_vssub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32mf2_m(mask, op1, op2, vl); +vint32mf2_t test_vssub_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_vx_i32mf2_m(vm, vs2, rs1, vl); } -vint32m1_t test_vssub_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vssub_vv_i32m1_m(mask, op1, op2, vl); +vint32m1_t test_vssub_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vssub_vv_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vssub_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m1_m(mask, op1, op2, vl); +vint32m1_t test_vssub_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_vx_i32m1_m(vm, vs2, rs1, vl); } -vint32m2_t test_vssub_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vssub_vv_i32m2_m(mask, op1, op2, vl); +vint32m2_t test_vssub_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vssub_vv_i32m2_m(vm, vs2, vs1, vl); } -vint32m2_t test_vssub_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m2_m(mask, op1, op2, vl); +vint32m2_t test_vssub_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_vx_i32m2_m(vm, vs2, rs1, vl); } -vint32m4_t test_vssub_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vssub_vv_i32m4_m(mask, op1, op2, vl); +vint32m4_t test_vssub_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vssub_vv_i32m4_m(vm, vs2, vs1, vl); } -vint32m4_t test_vssub_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m4_m(mask, op1, op2, vl); +vint32m4_t test_vssub_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_vx_i32m4_m(vm, vs2, rs1, vl); } -vint32m8_t test_vssub_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vssub_vv_i32m8_m(mask, op1, op2, vl); +vint32m8_t test_vssub_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vssub_vv_i32m8_m(vm, vs2, vs1, vl); } -vint32m8_t test_vssub_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m8_m(mask, op1, op2, vl); +vint32m8_t test_vssub_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_vx_i32m8_m(vm, vs2, rs1, vl); } -vint64m1_t test_vssub_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vssub_vv_i64m1_m(mask, op1, op2, vl); +vint64m1_t test_vssub_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vssub_vv_i64m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vssub_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m1_m(mask, op1, op2, vl); +vint64m1_t test_vssub_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub_vx_i64m1_m(vm, vs2, rs1, vl); } -vint64m2_t test_vssub_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vssub_vv_i64m2_m(mask, op1, op2, vl); +vint64m2_t test_vssub_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vssub_vv_i64m2_m(vm, vs2, vs1, vl); } -vint64m2_t test_vssub_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m2_m(mask, op1, op2, vl); +vint64m2_t test_vssub_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub_vx_i64m2_m(vm, vs2, rs1, vl); } -vint64m4_t test_vssub_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vssub_vv_i64m4_m(mask, op1, op2, vl); +vint64m4_t test_vssub_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vssub_vv_i64m4_m(vm, vs2, vs1, vl); } -vint64m4_t test_vssub_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m4_m(mask, op1, op2, vl); +vint64m4_t test_vssub_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub_vx_i64m4_m(vm, vs2, rs1, vl); } -vint64m8_t test_vssub_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vssub_vv_i64m8_m(mask, op1, op2, vl); +vint64m8_t test_vssub_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vssub_vv_i64m8_m(vm, vs2, vs1, vl); } -vint64m8_t test_vssub_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m8_m(mask, op1, op2, vl); +vint64m8_t test_vssub_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub_vx_i64m8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssub\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-api-tests/vssubu.c b/auto-generated/gnu-api-tests/vssubu.c index eb14cfc48..ae38216bc 100644 --- a/auto-generated/gnu-api-tests/vssubu.c +++ b/auto-generated/gnu-api-tests/vssubu.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vssubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vssubu_vv_u8mf8(op1, op2, vl); +vuint8mf8_t test_vssubu_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vssubu_vv_u8mf8(vs2, vs1, vl); } -vuint8mf8_t test_vssubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8mf8(op1, op2, vl); +vuint8mf8_t test_vssubu_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_vx_u8mf8(vs2, rs1, vl); } -vuint8mf4_t test_vssubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vssubu_vv_u8mf4(op1, op2, vl); +vuint8mf4_t test_vssubu_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vssubu_vv_u8mf4(vs2, vs1, vl); } -vuint8mf4_t test_vssubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8mf4(op1, op2, vl); +vuint8mf4_t test_vssubu_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_vx_u8mf4(vs2, rs1, vl); } -vuint8mf2_t test_vssubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vssubu_vv_u8mf2(op1, op2, vl); +vuint8mf2_t test_vssubu_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vssubu_vv_u8mf2(vs2, vs1, vl); } -vuint8mf2_t test_vssubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8mf2(op1, op2, vl); +vuint8mf2_t test_vssubu_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_vx_u8mf2(vs2, rs1, vl); } -vuint8m1_t test_vssubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m1(op1, op2, vl); +vuint8m1_t test_vssubu_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vssubu_vv_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vssubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m1(op1, op2, vl); +vuint8m1_t test_vssubu_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_vx_u8m1(vs2, rs1, vl); } -vuint8m2_t test_vssubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m2(op1, op2, vl); +vuint8m2_t test_vssubu_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vssubu_vv_u8m2(vs2, vs1, vl); } -vuint8m2_t test_vssubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m2(op1, op2, vl); +vuint8m2_t test_vssubu_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_vx_u8m2(vs2, rs1, vl); } -vuint8m4_t test_vssubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m4(op1, op2, vl); +vuint8m4_t test_vssubu_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vssubu_vv_u8m4(vs2, vs1, vl); } -vuint8m4_t test_vssubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m4(op1, op2, vl); +vuint8m4_t test_vssubu_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_vx_u8m4(vs2, rs1, vl); } -vuint8m8_t test_vssubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m8(op1, op2, vl); +vuint8m8_t test_vssubu_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vssubu_vv_u8m8(vs2, vs1, vl); } -vuint8m8_t test_vssubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m8(op1, op2, vl); +vuint8m8_t test_vssubu_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_vx_u8m8(vs2, rs1, vl); } -vuint16mf4_t test_vssubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vssubu_vv_u16mf4(op1, op2, vl); +vuint16mf4_t test_vssubu_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vssubu_vv_u16mf4(vs2, vs1, vl); } -vuint16mf4_t test_vssubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16mf4(op1, op2, vl); +vuint16mf4_t test_vssubu_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_vx_u16mf4(vs2, rs1, vl); } -vuint16mf2_t test_vssubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vssubu_vv_u16mf2(op1, op2, vl); +vuint16mf2_t test_vssubu_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vssubu_vv_u16mf2(vs2, vs1, vl); } -vuint16mf2_t test_vssubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16mf2(op1, op2, vl); +vuint16mf2_t test_vssubu_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_vx_u16mf2(vs2, rs1, vl); } -vuint16m1_t test_vssubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m1(op1, op2, vl); +vuint16m1_t test_vssubu_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vssubu_vv_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vssubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m1(op1, op2, vl); +vuint16m1_t test_vssubu_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_vx_u16m1(vs2, rs1, vl); } -vuint16m2_t test_vssubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m2(op1, op2, vl); +vuint16m2_t test_vssubu_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vssubu_vv_u16m2(vs2, vs1, vl); } -vuint16m2_t test_vssubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m2(op1, op2, vl); +vuint16m2_t test_vssubu_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_vx_u16m2(vs2, rs1, vl); } -vuint16m4_t test_vssubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m4(op1, op2, vl); +vuint16m4_t test_vssubu_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vssubu_vv_u16m4(vs2, vs1, vl); } -vuint16m4_t test_vssubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m4(op1, op2, vl); +vuint16m4_t test_vssubu_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_vx_u16m4(vs2, rs1, vl); } -vuint16m8_t test_vssubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m8(op1, op2, vl); +vuint16m8_t test_vssubu_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vssubu_vv_u16m8(vs2, vs1, vl); } -vuint16m8_t test_vssubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m8(op1, op2, vl); +vuint16m8_t test_vssubu_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_vx_u16m8(vs2, rs1, vl); } -vuint32mf2_t test_vssubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vssubu_vv_u32mf2(op1, op2, vl); +vuint32mf2_t test_vssubu_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vssubu_vv_u32mf2(vs2, vs1, vl); } -vuint32mf2_t test_vssubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32mf2(op1, op2, vl); +vuint32mf2_t test_vssubu_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_vx_u32mf2(vs2, rs1, vl); } -vuint32m1_t test_vssubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m1(op1, op2, vl); +vuint32m1_t test_vssubu_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vssubu_vv_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vssubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m1(op1, op2, vl); +vuint32m1_t test_vssubu_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_vx_u32m1(vs2, rs1, vl); } -vuint32m2_t test_vssubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m2(op1, op2, vl); +vuint32m2_t test_vssubu_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vssubu_vv_u32m2(vs2, vs1, vl); } -vuint32m2_t test_vssubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m2(op1, op2, vl); +vuint32m2_t test_vssubu_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_vx_u32m2(vs2, rs1, vl); } -vuint32m4_t test_vssubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m4(op1, op2, vl); +vuint32m4_t test_vssubu_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vssubu_vv_u32m4(vs2, vs1, vl); } -vuint32m4_t test_vssubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m4(op1, op2, vl); +vuint32m4_t test_vssubu_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_vx_u32m4(vs2, rs1, vl); } -vuint32m8_t test_vssubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m8(op1, op2, vl); +vuint32m8_t test_vssubu_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vssubu_vv_u32m8(vs2, vs1, vl); } -vuint32m8_t test_vssubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m8(op1, op2, vl); +vuint32m8_t test_vssubu_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_vx_u32m8(vs2, rs1, vl); } -vuint64m1_t test_vssubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m1(op1, op2, vl); +vuint64m1_t test_vssubu_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vssubu_vv_u64m1(vs2, vs1, vl); } -vuint64m1_t test_vssubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m1(op1, op2, vl); +vuint64m1_t test_vssubu_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu_vx_u64m1(vs2, rs1, vl); } -vuint64m2_t test_vssubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m2(op1, op2, vl); +vuint64m2_t test_vssubu_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vssubu_vv_u64m2(vs2, vs1, vl); } -vuint64m2_t test_vssubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m2(op1, op2, vl); +vuint64m2_t test_vssubu_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu_vx_u64m2(vs2, rs1, vl); } -vuint64m4_t test_vssubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m4(op1, op2, vl); +vuint64m4_t test_vssubu_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vssubu_vv_u64m4(vs2, vs1, vl); } -vuint64m4_t test_vssubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m4(op1, op2, vl); +vuint64m4_t test_vssubu_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu_vx_u64m4(vs2, rs1, vl); } -vuint64m8_t test_vssubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m8(op1, op2, vl); +vuint64m8_t test_vssubu_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vssubu_vv_u64m8(vs2, vs1, vl); } -vuint64m8_t test_vssubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m8(op1, op2, vl); +vuint64m8_t test_vssubu_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu_vx_u64m8(vs2, rs1, vl); } -vuint8mf8_t test_vssubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vssubu_vv_u8mf8_m(mask, op1, op2, vl); +vuint8mf8_t test_vssubu_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vssubu_vv_u8mf8_m(vm, vs2, vs1, vl); } -vuint8mf8_t test_vssubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8mf8_m(mask, op1, op2, vl); +vuint8mf8_t test_vssubu_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_vx_u8mf8_m(vm, vs2, rs1, vl); } -vuint8mf4_t test_vssubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vssubu_vv_u8mf4_m(mask, op1, op2, vl); +vuint8mf4_t test_vssubu_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vssubu_vv_u8mf4_m(vm, vs2, vs1, vl); } -vuint8mf4_t test_vssubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8mf4_m(mask, op1, op2, vl); +vuint8mf4_t test_vssubu_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_vx_u8mf4_m(vm, vs2, rs1, vl); } -vuint8mf2_t test_vssubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vssubu_vv_u8mf2_m(mask, op1, op2, vl); +vuint8mf2_t test_vssubu_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vssubu_vv_u8mf2_m(vm, vs2, vs1, vl); } -vuint8mf2_t test_vssubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8mf2_m(mask, op1, op2, vl); +vuint8mf2_t test_vssubu_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_vx_u8mf2_m(vm, vs2, rs1, vl); } -vuint8m1_t test_vssubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m1_m(mask, op1, op2, vl); +vuint8m1_t test_vssubu_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vssubu_vv_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vssubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m1_m(mask, op1, op2, vl); +vuint8m1_t test_vssubu_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_vx_u8m1_m(vm, vs2, rs1, vl); } -vuint8m2_t test_vssubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m2_m(mask, op1, op2, vl); +vuint8m2_t test_vssubu_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vssubu_vv_u8m2_m(vm, vs2, vs1, vl); } -vuint8m2_t test_vssubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m2_m(mask, op1, op2, vl); +vuint8m2_t test_vssubu_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_vx_u8m2_m(vm, vs2, rs1, vl); } -vuint8m4_t test_vssubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m4_m(mask, op1, op2, vl); +vuint8m4_t test_vssubu_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vssubu_vv_u8m4_m(vm, vs2, vs1, vl); } -vuint8m4_t test_vssubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m4_m(mask, op1, op2, vl); +vuint8m4_t test_vssubu_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_vx_u8m4_m(vm, vs2, rs1, vl); } -vuint8m8_t test_vssubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m8_m(mask, op1, op2, vl); +vuint8m8_t test_vssubu_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vssubu_vv_u8m8_m(vm, vs2, vs1, vl); } -vuint8m8_t test_vssubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m8_m(mask, op1, op2, vl); +vuint8m8_t test_vssubu_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_vx_u8m8_m(vm, vs2, rs1, vl); } -vuint16mf4_t test_vssubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vssubu_vv_u16mf4_m(mask, op1, op2, vl); +vuint16mf4_t test_vssubu_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vssubu_vv_u16mf4_m(vm, vs2, vs1, vl); } -vuint16mf4_t test_vssubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16mf4_m(mask, op1, op2, vl); +vuint16mf4_t test_vssubu_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_vx_u16mf4_m(vm, vs2, rs1, vl); } -vuint16mf2_t test_vssubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vssubu_vv_u16mf2_m(mask, op1, op2, vl); +vuint16mf2_t test_vssubu_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vssubu_vv_u16mf2_m(vm, vs2, vs1, vl); } -vuint16mf2_t test_vssubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16mf2_m(mask, op1, op2, vl); +vuint16mf2_t test_vssubu_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_vx_u16mf2_m(vm, vs2, rs1, vl); } -vuint16m1_t test_vssubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m1_m(mask, op1, op2, vl); +vuint16m1_t test_vssubu_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vssubu_vv_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vssubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m1_m(mask, op1, op2, vl); +vuint16m1_t test_vssubu_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_vx_u16m1_m(vm, vs2, rs1, vl); } -vuint16m2_t test_vssubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m2_m(mask, op1, op2, vl); +vuint16m2_t test_vssubu_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vssubu_vv_u16m2_m(vm, vs2, vs1, vl); } -vuint16m2_t test_vssubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m2_m(mask, op1, op2, vl); +vuint16m2_t test_vssubu_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_vx_u16m2_m(vm, vs2, rs1, vl); } -vuint16m4_t test_vssubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m4_m(mask, op1, op2, vl); +vuint16m4_t test_vssubu_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vssubu_vv_u16m4_m(vm, vs2, vs1, vl); } -vuint16m4_t test_vssubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m4_m(mask, op1, op2, vl); +vuint16m4_t test_vssubu_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_vx_u16m4_m(vm, vs2, rs1, vl); } -vuint16m8_t test_vssubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m8_m(mask, op1, op2, vl); +vuint16m8_t test_vssubu_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vssubu_vv_u16m8_m(vm, vs2, vs1, vl); } -vuint16m8_t test_vssubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m8_m(mask, op1, op2, vl); +vuint16m8_t test_vssubu_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_vx_u16m8_m(vm, vs2, rs1, vl); } -vuint32mf2_t test_vssubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vssubu_vv_u32mf2_m(mask, op1, op2, vl); +vuint32mf2_t test_vssubu_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vssubu_vv_u32mf2_m(vm, vs2, vs1, vl); } -vuint32mf2_t test_vssubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32mf2_m(mask, op1, op2, vl); +vuint32mf2_t test_vssubu_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_vx_u32mf2_m(vm, vs2, rs1, vl); } -vuint32m1_t test_vssubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m1_m(mask, op1, op2, vl); +vuint32m1_t test_vssubu_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vssubu_vv_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vssubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m1_m(mask, op1, op2, vl); +vuint32m1_t test_vssubu_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_vx_u32m1_m(vm, vs2, rs1, vl); } -vuint32m2_t test_vssubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m2_m(mask, op1, op2, vl); +vuint32m2_t test_vssubu_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vssubu_vv_u32m2_m(vm, vs2, vs1, vl); } -vuint32m2_t test_vssubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m2_m(mask, op1, op2, vl); +vuint32m2_t test_vssubu_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_vx_u32m2_m(vm, vs2, rs1, vl); } -vuint32m4_t test_vssubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m4_m(mask, op1, op2, vl); +vuint32m4_t test_vssubu_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vssubu_vv_u32m4_m(vm, vs2, vs1, vl); } -vuint32m4_t test_vssubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m4_m(mask, op1, op2, vl); +vuint32m4_t test_vssubu_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_vx_u32m4_m(vm, vs2, rs1, vl); } -vuint32m8_t test_vssubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m8_m(mask, op1, op2, vl); +vuint32m8_t test_vssubu_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vssubu_vv_u32m8_m(vm, vs2, vs1, vl); } -vuint32m8_t test_vssubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m8_m(mask, op1, op2, vl); +vuint32m8_t test_vssubu_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_vx_u32m8_m(vm, vs2, rs1, vl); } -vuint64m1_t test_vssubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m1_m(mask, op1, op2, vl); +vuint64m1_t test_vssubu_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vssubu_vv_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vssubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m1_m(mask, op1, op2, vl); +vuint64m1_t test_vssubu_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu_vx_u64m1_m(vm, vs2, rs1, vl); } -vuint64m2_t test_vssubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m2_m(mask, op1, op2, vl); +vuint64m2_t test_vssubu_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vssubu_vv_u64m2_m(vm, vs2, vs1, vl); } -vuint64m2_t test_vssubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m2_m(mask, op1, op2, vl); +vuint64m2_t test_vssubu_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu_vx_u64m2_m(vm, vs2, rs1, vl); } -vuint64m4_t test_vssubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m4_m(mask, op1, op2, vl); +vuint64m4_t test_vssubu_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vssubu_vv_u64m4_m(vm, vs2, vs1, vl); } -vuint64m4_t test_vssubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m4_m(mask, op1, op2, vl); +vuint64m4_t test_vssubu_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu_vx_u64m4_m(vm, vs2, rs1, vl); } -vuint64m8_t test_vssubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m8_m(mask, op1, op2, vl); +vuint64m8_t test_vssubu_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vssubu_vv_u64m8_m(vm, vs2, vs1, vl); } -vuint64m8_t test_vssubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m8_m(mask, op1, op2, vl); +vuint64m8_t test_vssubu_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu_vx_u64m8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssubu\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-api-tests/vsub.c b/auto-generated/gnu-api-tests/vsub.c index 7edca714a..ed3bd02fd 100644 --- a/auto-generated/gnu-api-tests/vsub.c +++ b/auto-generated/gnu-api-tests/vsub.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vsub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsub_vv_i8mf8(op1, op2, vl); +vint8mf8_t test_vsub_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vsub_vv_i8mf8(vs2, vs1, vl); } -vint8mf8_t test_vsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_vx_i8mf8(op1, op2, vl); +vint8mf8_t test_vsub_vx_i8mf8(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_vx_i8mf8(vs2, rs1, vl); } -vint8mf4_t test_vsub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsub_vv_i8mf4(op1, op2, vl); +vint8mf4_t test_vsub_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vsub_vv_i8mf4(vs2, vs1, vl); } -vint8mf4_t test_vsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_vx_i8mf4(op1, op2, vl); +vint8mf4_t test_vsub_vx_i8mf4(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_vx_i8mf4(vs2, rs1, vl); } -vint8mf2_t test_vsub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsub_vv_i8mf2(op1, op2, vl); +vint8mf2_t test_vsub_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vsub_vv_i8mf2(vs2, vs1, vl); } -vint8mf2_t test_vsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_vx_i8mf2(op1, op2, vl); +vint8mf2_t test_vsub_vx_i8mf2(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_vx_i8mf2(vs2, rs1, vl); } -vint8m1_t test_vsub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsub_vv_i8m1(op1, op2, vl); +vint8m1_t test_vsub_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vsub_vv_i8m1(vs2, vs1, vl); } -vint8m1_t test_vsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_vx_i8m1(op1, op2, vl); +vint8m1_t test_vsub_vx_i8m1(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_vx_i8m1(vs2, rs1, vl); } -vint8m2_t test_vsub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsub_vv_i8m2(op1, op2, vl); +vint8m2_t test_vsub_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vsub_vv_i8m2(vs2, vs1, vl); } -vint8m2_t test_vsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_vx_i8m2(op1, op2, vl); +vint8m2_t test_vsub_vx_i8m2(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_vx_i8m2(vs2, rs1, vl); } -vint8m4_t test_vsub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsub_vv_i8m4(op1, op2, vl); +vint8m4_t test_vsub_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vsub_vv_i8m4(vs2, vs1, vl); } -vint8m4_t test_vsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_vx_i8m4(op1, op2, vl); +vint8m4_t test_vsub_vx_i8m4(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_vx_i8m4(vs2, rs1, vl); } -vint8m8_t test_vsub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsub_vv_i8m8(op1, op2, vl); +vint8m8_t test_vsub_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vsub_vv_i8m8(vs2, vs1, vl); } -vint8m8_t test_vsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_vx_i8m8(op1, op2, vl); +vint8m8_t test_vsub_vx_i8m8(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_vx_i8m8(vs2, rs1, vl); } -vint16mf4_t test_vsub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsub_vv_i16mf4(op1, op2, vl); +vint16mf4_t test_vsub_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vsub_vv_i16mf4(vs2, vs1, vl); } -vint16mf4_t test_vsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_vx_i16mf4(op1, op2, vl); +vint16mf4_t test_vsub_vx_i16mf4(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_vx_i16mf4(vs2, rs1, vl); } -vint16mf2_t test_vsub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsub_vv_i16mf2(op1, op2, vl); +vint16mf2_t test_vsub_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vsub_vv_i16mf2(vs2, vs1, vl); } -vint16mf2_t test_vsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_vx_i16mf2(op1, op2, vl); +vint16mf2_t test_vsub_vx_i16mf2(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_vx_i16mf2(vs2, rs1, vl); } -vint16m1_t test_vsub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsub_vv_i16m1(op1, op2, vl); +vint16m1_t test_vsub_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vsub_vv_i16m1(vs2, vs1, vl); } -vint16m1_t test_vsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_vx_i16m1(op1, op2, vl); +vint16m1_t test_vsub_vx_i16m1(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_vx_i16m1(vs2, rs1, vl); } -vint16m2_t test_vsub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsub_vv_i16m2(op1, op2, vl); +vint16m2_t test_vsub_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vsub_vv_i16m2(vs2, vs1, vl); } -vint16m2_t test_vsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_vx_i16m2(op1, op2, vl); +vint16m2_t test_vsub_vx_i16m2(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_vx_i16m2(vs2, rs1, vl); } -vint16m4_t test_vsub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsub_vv_i16m4(op1, op2, vl); +vint16m4_t test_vsub_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vsub_vv_i16m4(vs2, vs1, vl); } -vint16m4_t test_vsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_vx_i16m4(op1, op2, vl); +vint16m4_t test_vsub_vx_i16m4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_vx_i16m4(vs2, rs1, vl); } -vint16m8_t test_vsub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsub_vv_i16m8(op1, op2, vl); +vint16m8_t test_vsub_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vsub_vv_i16m8(vs2, vs1, vl); } -vint16m8_t test_vsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_vx_i16m8(op1, op2, vl); +vint16m8_t test_vsub_vx_i16m8(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_vx_i16m8(vs2, rs1, vl); } -vint32mf2_t test_vsub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsub_vv_i32mf2(op1, op2, vl); +vint32mf2_t test_vsub_vv_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vsub_vv_i32mf2(vs2, vs1, vl); } -vint32mf2_t test_vsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_vx_i32mf2(op1, op2, vl); +vint32mf2_t test_vsub_vx_i32mf2(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_vx_i32mf2(vs2, rs1, vl); } -vint32m1_t test_vsub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsub_vv_i32m1(op1, op2, vl); +vint32m1_t test_vsub_vv_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vsub_vv_i32m1(vs2, vs1, vl); } -vint32m1_t test_vsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_vx_i32m1(op1, op2, vl); +vint32m1_t test_vsub_vx_i32m1(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_vx_i32m1(vs2, rs1, vl); } -vint32m2_t test_vsub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsub_vv_i32m2(op1, op2, vl); +vint32m2_t test_vsub_vv_i32m2(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vsub_vv_i32m2(vs2, vs1, vl); } -vint32m2_t test_vsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_vx_i32m2(op1, op2, vl); +vint32m2_t test_vsub_vx_i32m2(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_vx_i32m2(vs2, rs1, vl); } -vint32m4_t test_vsub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsub_vv_i32m4(op1, op2, vl); +vint32m4_t test_vsub_vv_i32m4(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vsub_vv_i32m4(vs2, vs1, vl); } -vint32m4_t test_vsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_vx_i32m4(op1, op2, vl); +vint32m4_t test_vsub_vx_i32m4(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_vx_i32m4(vs2, rs1, vl); } -vint32m8_t test_vsub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsub_vv_i32m8(op1, op2, vl); +vint32m8_t test_vsub_vv_i32m8(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vsub_vv_i32m8(vs2, vs1, vl); } -vint32m8_t test_vsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_vx_i32m8(op1, op2, vl); +vint32m8_t test_vsub_vx_i32m8(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_vx_i32m8(vs2, rs1, vl); } -vint64m1_t test_vsub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsub_vv_i64m1(op1, op2, vl); +vint64m1_t test_vsub_vv_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vsub_vv_i64m1(vs2, vs1, vl); } -vint64m1_t test_vsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsub_vx_i64m1(op1, op2, vl); +vint64m1_t test_vsub_vx_i64m1(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub_vx_i64m1(vs2, rs1, vl); } -vint64m2_t test_vsub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsub_vv_i64m2(op1, op2, vl); +vint64m2_t test_vsub_vv_i64m2(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vsub_vv_i64m2(vs2, vs1, vl); } -vint64m2_t test_vsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsub_vx_i64m2(op1, op2, vl); +vint64m2_t test_vsub_vx_i64m2(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub_vx_i64m2(vs2, rs1, vl); } -vint64m4_t test_vsub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsub_vv_i64m4(op1, op2, vl); +vint64m4_t test_vsub_vv_i64m4(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vsub_vv_i64m4(vs2, vs1, vl); } -vint64m4_t test_vsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsub_vx_i64m4(op1, op2, vl); +vint64m4_t test_vsub_vx_i64m4(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub_vx_i64m4(vs2, rs1, vl); } -vint64m8_t test_vsub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsub_vv_i64m8(op1, op2, vl); +vint64m8_t test_vsub_vv_i64m8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vsub_vv_i64m8(vs2, vs1, vl); } -vint64m8_t test_vsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsub_vx_i64m8(op1, op2, vl); +vint64m8_t test_vsub_vx_i64m8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub_vx_i64m8(vs2, rs1, vl); } -vuint8mf8_t test_vsub_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vsub_vv_u8mf8(op1, op2, vl); +vuint8mf8_t test_vsub_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsub_vv_u8mf8(vs2, vs1, vl); } -vuint8mf8_t test_vsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_vx_u8mf8(op1, op2, vl); +vuint8mf8_t test_vsub_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_vx_u8mf8(vs2, rs1, vl); } -vuint8mf4_t test_vsub_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vsub_vv_u8mf4(op1, op2, vl); +vuint8mf4_t test_vsub_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsub_vv_u8mf4(vs2, vs1, vl); } -vuint8mf4_t test_vsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_vx_u8mf4(op1, op2, vl); +vuint8mf4_t test_vsub_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_vx_u8mf4(vs2, rs1, vl); } -vuint8mf2_t test_vsub_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vsub_vv_u8mf2(op1, op2, vl); +vuint8mf2_t test_vsub_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsub_vv_u8mf2(vs2, vs1, vl); } -vuint8mf2_t test_vsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_vx_u8mf2(op1, op2, vl); +vuint8mf2_t test_vsub_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_vx_u8mf2(vs2, rs1, vl); } -vuint8m1_t test_vsub_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vsub_vv_u8m1(op1, op2, vl); +vuint8m1_t test_vsub_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsub_vv_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_vx_u8m1(op1, op2, vl); +vuint8m1_t test_vsub_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_vx_u8m1(vs2, rs1, vl); } -vuint8m2_t test_vsub_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vsub_vv_u8m2(op1, op2, vl); +vuint8m2_t test_vsub_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsub_vv_u8m2(vs2, vs1, vl); } -vuint8m2_t test_vsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_vx_u8m2(op1, op2, vl); +vuint8m2_t test_vsub_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_vx_u8m2(vs2, rs1, vl); } -vuint8m4_t test_vsub_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vsub_vv_u8m4(op1, op2, vl); +vuint8m4_t test_vsub_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsub_vv_u8m4(vs2, vs1, vl); } -vuint8m4_t test_vsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_vx_u8m4(op1, op2, vl); +vuint8m4_t test_vsub_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_vx_u8m4(vs2, rs1, vl); } -vuint8m8_t test_vsub_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vsub_vv_u8m8(op1, op2, vl); +vuint8m8_t test_vsub_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsub_vv_u8m8(vs2, vs1, vl); } -vuint8m8_t test_vsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_vx_u8m8(op1, op2, vl); +vuint8m8_t test_vsub_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_vx_u8m8(vs2, rs1, vl); } -vuint16mf4_t test_vsub_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vsub_vv_u16mf4(op1, op2, vl); +vuint16mf4_t test_vsub_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsub_vv_u16mf4(vs2, vs1, vl); } -vuint16mf4_t test_vsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_vx_u16mf4(op1, op2, vl); +vuint16mf4_t test_vsub_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_vx_u16mf4(vs2, rs1, vl); } -vuint16mf2_t test_vsub_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vsub_vv_u16mf2(op1, op2, vl); +vuint16mf2_t test_vsub_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsub_vv_u16mf2(vs2, vs1, vl); } -vuint16mf2_t test_vsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_vx_u16mf2(op1, op2, vl); +vuint16mf2_t test_vsub_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_vx_u16mf2(vs2, rs1, vl); } -vuint16m1_t test_vsub_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vsub_vv_u16m1(op1, op2, vl); +vuint16m1_t test_vsub_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsub_vv_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_vx_u16m1(op1, op2, vl); +vuint16m1_t test_vsub_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_vx_u16m1(vs2, rs1, vl); } -vuint16m2_t test_vsub_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vsub_vv_u16m2(op1, op2, vl); +vuint16m2_t test_vsub_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsub_vv_u16m2(vs2, vs1, vl); } -vuint16m2_t test_vsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_vx_u16m2(op1, op2, vl); +vuint16m2_t test_vsub_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_vx_u16m2(vs2, rs1, vl); } -vuint16m4_t test_vsub_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vsub_vv_u16m4(op1, op2, vl); +vuint16m4_t test_vsub_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsub_vv_u16m4(vs2, vs1, vl); } -vuint16m4_t test_vsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_vx_u16m4(op1, op2, vl); +vuint16m4_t test_vsub_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_vx_u16m4(vs2, rs1, vl); } -vuint16m8_t test_vsub_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vsub_vv_u16m8(op1, op2, vl); +vuint16m8_t test_vsub_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsub_vv_u16m8(vs2, vs1, vl); } -vuint16m8_t test_vsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_vx_u16m8(op1, op2, vl); +vuint16m8_t test_vsub_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_vx_u16m8(vs2, rs1, vl); } -vuint32mf2_t test_vsub_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vsub_vv_u32mf2(op1, op2, vl); +vuint32mf2_t test_vsub_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsub_vv_u32mf2(vs2, vs1, vl); } -vuint32mf2_t test_vsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_vx_u32mf2(op1, op2, vl); +vuint32mf2_t test_vsub_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_vx_u32mf2(vs2, rs1, vl); } -vuint32m1_t test_vsub_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vsub_vv_u32m1(op1, op2, vl); +vuint32m1_t test_vsub_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsub_vv_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_vx_u32m1(op1, op2, vl); +vuint32m1_t test_vsub_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_vx_u32m1(vs2, rs1, vl); } -vuint32m2_t test_vsub_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vsub_vv_u32m2(op1, op2, vl); +vuint32m2_t test_vsub_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsub_vv_u32m2(vs2, vs1, vl); } -vuint32m2_t test_vsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_vx_u32m2(op1, op2, vl); +vuint32m2_t test_vsub_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_vx_u32m2(vs2, rs1, vl); } -vuint32m4_t test_vsub_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vsub_vv_u32m4(op1, op2, vl); +vuint32m4_t test_vsub_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsub_vv_u32m4(vs2, vs1, vl); } -vuint32m4_t test_vsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_vx_u32m4(op1, op2, vl); +vuint32m4_t test_vsub_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_vx_u32m4(vs2, rs1, vl); } -vuint32m8_t test_vsub_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vsub_vv_u32m8(op1, op2, vl); +vuint32m8_t test_vsub_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsub_vv_u32m8(vs2, vs1, vl); } -vuint32m8_t test_vsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_vx_u32m8(op1, op2, vl); +vuint32m8_t test_vsub_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_vx_u32m8(vs2, rs1, vl); } -vuint64m1_t test_vsub_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vsub_vv_u64m1(op1, op2, vl); +vuint64m1_t test_vsub_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsub_vv_u64m1(vs2, vs1, vl); } -vuint64m1_t test_vsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub_vx_u64m1(op1, op2, vl); +vuint64m1_t test_vsub_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub_vx_u64m1(vs2, rs1, vl); } -vuint64m2_t test_vsub_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vsub_vv_u64m2(op1, op2, vl); +vuint64m2_t test_vsub_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsub_vv_u64m2(vs2, vs1, vl); } -vuint64m2_t test_vsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub_vx_u64m2(op1, op2, vl); +vuint64m2_t test_vsub_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub_vx_u64m2(vs2, rs1, vl); } -vuint64m4_t test_vsub_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vsub_vv_u64m4(op1, op2, vl); +vuint64m4_t test_vsub_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsub_vv_u64m4(vs2, vs1, vl); } -vuint64m4_t test_vsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub_vx_u64m4(op1, op2, vl); +vuint64m4_t test_vsub_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub_vx_u64m4(vs2, rs1, vl); } -vuint64m8_t test_vsub_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vsub_vv_u64m8(op1, op2, vl); +vuint64m8_t test_vsub_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsub_vv_u64m8(vs2, vs1, vl); } -vuint64m8_t test_vsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub_vx_u64m8(op1, op2, vl); +vuint64m8_t test_vsub_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub_vx_u64m8(vs2, rs1, vl); } -vint8mf8_t test_vsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsub_vv_i8mf8_m(mask, op1, op2, vl); +vint8mf8_t test_vsub_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vsub_vv_i8mf8_m(vm, vs2, vs1, vl); } -vint8mf8_t test_vsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_vx_i8mf8_m(mask, op1, op2, vl); +vint8mf8_t test_vsub_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_vx_i8mf8_m(vm, vs2, rs1, vl); } -vint8mf4_t test_vsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsub_vv_i8mf4_m(mask, op1, op2, vl); +vint8mf4_t test_vsub_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vsub_vv_i8mf4_m(vm, vs2, vs1, vl); } -vint8mf4_t test_vsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_vx_i8mf4_m(mask, op1, op2, vl); +vint8mf4_t test_vsub_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_vx_i8mf4_m(vm, vs2, rs1, vl); } -vint8mf2_t test_vsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsub_vv_i8mf2_m(mask, op1, op2, vl); +vint8mf2_t test_vsub_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vsub_vv_i8mf2_m(vm, vs2, vs1, vl); } -vint8mf2_t test_vsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_vx_i8mf2_m(mask, op1, op2, vl); +vint8mf2_t test_vsub_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_vx_i8mf2_m(vm, vs2, rs1, vl); } -vint8m1_t test_vsub_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsub_vv_i8m1_m(mask, op1, op2, vl); +vint8m1_t test_vsub_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vsub_vv_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vsub_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_vx_i8m1_m(mask, op1, op2, vl); +vint8m1_t test_vsub_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_vx_i8m1_m(vm, vs2, rs1, vl); } -vint8m2_t test_vsub_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsub_vv_i8m2_m(mask, op1, op2, vl); +vint8m2_t test_vsub_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vsub_vv_i8m2_m(vm, vs2, vs1, vl); } -vint8m2_t test_vsub_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_vx_i8m2_m(mask, op1, op2, vl); +vint8m2_t test_vsub_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_vx_i8m2_m(vm, vs2, rs1, vl); } -vint8m4_t test_vsub_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsub_vv_i8m4_m(mask, op1, op2, vl); +vint8m4_t test_vsub_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vsub_vv_i8m4_m(vm, vs2, vs1, vl); } -vint8m4_t test_vsub_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_vx_i8m4_m(mask, op1, op2, vl); +vint8m4_t test_vsub_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_vx_i8m4_m(vm, vs2, rs1, vl); } -vint8m8_t test_vsub_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsub_vv_i8m8_m(mask, op1, op2, vl); +vint8m8_t test_vsub_vv_i8m8_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vsub_vv_i8m8_m(vm, vs2, vs1, vl); } -vint8m8_t test_vsub_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_vx_i8m8_m(mask, op1, op2, vl); +vint8m8_t test_vsub_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_vx_i8m8_m(vm, vs2, rs1, vl); } -vint16mf4_t test_vsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsub_vv_i16mf4_m(mask, op1, op2, vl); +vint16mf4_t test_vsub_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vsub_vv_i16mf4_m(vm, vs2, vs1, vl); } -vint16mf4_t test_vsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_vx_i16mf4_m(mask, op1, op2, vl); +vint16mf4_t test_vsub_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_vx_i16mf4_m(vm, vs2, rs1, vl); } -vint16mf2_t test_vsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsub_vv_i16mf2_m(mask, op1, op2, vl); +vint16mf2_t test_vsub_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vsub_vv_i16mf2_m(vm, vs2, vs1, vl); } -vint16mf2_t test_vsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_vx_i16mf2_m(mask, op1, op2, vl); +vint16mf2_t test_vsub_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_vx_i16mf2_m(vm, vs2, rs1, vl); } -vint16m1_t test_vsub_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsub_vv_i16m1_m(mask, op1, op2, vl); +vint16m1_t test_vsub_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vsub_vv_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vsub_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_vx_i16m1_m(mask, op1, op2, vl); +vint16m1_t test_vsub_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_vx_i16m1_m(vm, vs2, rs1, vl); } -vint16m2_t test_vsub_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsub_vv_i16m2_m(mask, op1, op2, vl); +vint16m2_t test_vsub_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vsub_vv_i16m2_m(vm, vs2, vs1, vl); } -vint16m2_t test_vsub_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_vx_i16m2_m(mask, op1, op2, vl); +vint16m2_t test_vsub_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_vx_i16m2_m(vm, vs2, rs1, vl); } -vint16m4_t test_vsub_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsub_vv_i16m4_m(mask, op1, op2, vl); +vint16m4_t test_vsub_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vsub_vv_i16m4_m(vm, vs2, vs1, vl); } -vint16m4_t test_vsub_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_vx_i16m4_m(mask, op1, op2, vl); +vint16m4_t test_vsub_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_vx_i16m4_m(vm, vs2, rs1, vl); } -vint16m8_t test_vsub_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsub_vv_i16m8_m(mask, op1, op2, vl); +vint16m8_t test_vsub_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vsub_vv_i16m8_m(vm, vs2, vs1, vl); } -vint16m8_t test_vsub_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_vx_i16m8_m(mask, op1, op2, vl); +vint16m8_t test_vsub_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_vx_i16m8_m(vm, vs2, rs1, vl); } -vint32mf2_t test_vsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsub_vv_i32mf2_m(mask, op1, op2, vl); +vint32mf2_t test_vsub_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vsub_vv_i32mf2_m(vm, vs2, vs1, vl); } -vint32mf2_t test_vsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_vx_i32mf2_m(mask, op1, op2, vl); +vint32mf2_t test_vsub_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_vx_i32mf2_m(vm, vs2, rs1, vl); } -vint32m1_t test_vsub_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsub_vv_i32m1_m(mask, op1, op2, vl); +vint32m1_t test_vsub_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vsub_vv_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vsub_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_vx_i32m1_m(mask, op1, op2, vl); +vint32m1_t test_vsub_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_vx_i32m1_m(vm, vs2, rs1, vl); } -vint32m2_t test_vsub_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsub_vv_i32m2_m(mask, op1, op2, vl); +vint32m2_t test_vsub_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vsub_vv_i32m2_m(vm, vs2, vs1, vl); } -vint32m2_t test_vsub_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_vx_i32m2_m(mask, op1, op2, vl); +vint32m2_t test_vsub_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_vx_i32m2_m(vm, vs2, rs1, vl); } -vint32m4_t test_vsub_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsub_vv_i32m4_m(mask, op1, op2, vl); +vint32m4_t test_vsub_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vsub_vv_i32m4_m(vm, vs2, vs1, vl); } -vint32m4_t test_vsub_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_vx_i32m4_m(mask, op1, op2, vl); +vint32m4_t test_vsub_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_vx_i32m4_m(vm, vs2, rs1, vl); } -vint32m8_t test_vsub_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsub_vv_i32m8_m(mask, op1, op2, vl); +vint32m8_t test_vsub_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vsub_vv_i32m8_m(vm, vs2, vs1, vl); } -vint32m8_t test_vsub_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_vx_i32m8_m(mask, op1, op2, vl); +vint32m8_t test_vsub_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_vx_i32m8_m(vm, vs2, rs1, vl); } -vint64m1_t test_vsub_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsub_vv_i64m1_m(mask, op1, op2, vl); +vint64m1_t test_vsub_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vsub_vv_i64m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vsub_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsub_vx_i64m1_m(mask, op1, op2, vl); +vint64m1_t test_vsub_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub_vx_i64m1_m(vm, vs2, rs1, vl); } -vint64m2_t test_vsub_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsub_vv_i64m2_m(mask, op1, op2, vl); +vint64m2_t test_vsub_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vsub_vv_i64m2_m(vm, vs2, vs1, vl); } -vint64m2_t test_vsub_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsub_vx_i64m2_m(mask, op1, op2, vl); +vint64m2_t test_vsub_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub_vx_i64m2_m(vm, vs2, rs1, vl); } -vint64m4_t test_vsub_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsub_vv_i64m4_m(mask, op1, op2, vl); +vint64m4_t test_vsub_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vsub_vv_i64m4_m(vm, vs2, vs1, vl); } -vint64m4_t test_vsub_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsub_vx_i64m4_m(mask, op1, op2, vl); +vint64m4_t test_vsub_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub_vx_i64m4_m(vm, vs2, rs1, vl); } -vint64m8_t test_vsub_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsub_vv_i64m8_m(mask, op1, op2, vl); +vint64m8_t test_vsub_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vsub_vv_i64m8_m(vm, vs2, vs1, vl); } -vint64m8_t test_vsub_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsub_vx_i64m8_m(mask, op1, op2, vl); +vint64m8_t test_vsub_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub_vx_i64m8_m(vm, vs2, rs1, vl); } -vuint8mf8_t test_vsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vsub_vv_u8mf8_m(mask, op1, op2, vl); +vuint8mf8_t test_vsub_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsub_vv_u8mf8_m(vm, vs2, vs1, vl); } -vuint8mf8_t test_vsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_vx_u8mf8_m(mask, op1, op2, vl); +vuint8mf8_t test_vsub_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_vx_u8mf8_m(vm, vs2, rs1, vl); } -vuint8mf4_t test_vsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vsub_vv_u8mf4_m(mask, op1, op2, vl); +vuint8mf4_t test_vsub_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsub_vv_u8mf4_m(vm, vs2, vs1, vl); } -vuint8mf4_t test_vsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_vx_u8mf4_m(mask, op1, op2, vl); +vuint8mf4_t test_vsub_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_vx_u8mf4_m(vm, vs2, rs1, vl); } -vuint8mf2_t test_vsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vsub_vv_u8mf2_m(mask, op1, op2, vl); +vuint8mf2_t test_vsub_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsub_vv_u8mf2_m(vm, vs2, vs1, vl); } -vuint8mf2_t test_vsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_vx_u8mf2_m(mask, op1, op2, vl); +vuint8mf2_t test_vsub_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_vx_u8mf2_m(vm, vs2, rs1, vl); } -vuint8m1_t test_vsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vsub_vv_u8m1_m(mask, op1, op2, vl); +vuint8m1_t test_vsub_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsub_vv_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_vx_u8m1_m(mask, op1, op2, vl); +vuint8m1_t test_vsub_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_vx_u8m1_m(vm, vs2, rs1, vl); } -vuint8m2_t test_vsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vsub_vv_u8m2_m(mask, op1, op2, vl); +vuint8m2_t test_vsub_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsub_vv_u8m2_m(vm, vs2, vs1, vl); } -vuint8m2_t test_vsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_vx_u8m2_m(mask, op1, op2, vl); +vuint8m2_t test_vsub_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_vx_u8m2_m(vm, vs2, rs1, vl); } -vuint8m4_t test_vsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vsub_vv_u8m4_m(mask, op1, op2, vl); +vuint8m4_t test_vsub_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsub_vv_u8m4_m(vm, vs2, vs1, vl); } -vuint8m4_t test_vsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_vx_u8m4_m(mask, op1, op2, vl); +vuint8m4_t test_vsub_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_vx_u8m4_m(vm, vs2, rs1, vl); } -vuint8m8_t test_vsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vsub_vv_u8m8_m(mask, op1, op2, vl); +vuint8m8_t test_vsub_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsub_vv_u8m8_m(vm, vs2, vs1, vl); } -vuint8m8_t test_vsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_vx_u8m8_m(mask, op1, op2, vl); +vuint8m8_t test_vsub_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_vx_u8m8_m(vm, vs2, rs1, vl); } -vuint16mf4_t test_vsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vsub_vv_u16mf4_m(mask, op1, op2, vl); +vuint16mf4_t test_vsub_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsub_vv_u16mf4_m(vm, vs2, vs1, vl); } -vuint16mf4_t test_vsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_vx_u16mf4_m(mask, op1, op2, vl); +vuint16mf4_t test_vsub_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_vx_u16mf4_m(vm, vs2, rs1, vl); } -vuint16mf2_t test_vsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vsub_vv_u16mf2_m(mask, op1, op2, vl); +vuint16mf2_t test_vsub_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsub_vv_u16mf2_m(vm, vs2, vs1, vl); } -vuint16mf2_t test_vsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_vx_u16mf2_m(mask, op1, op2, vl); +vuint16mf2_t test_vsub_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_vx_u16mf2_m(vm, vs2, rs1, vl); } -vuint16m1_t test_vsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vsub_vv_u16m1_m(mask, op1, op2, vl); +vuint16m1_t test_vsub_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsub_vv_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_vx_u16m1_m(mask, op1, op2, vl); +vuint16m1_t test_vsub_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_vx_u16m1_m(vm, vs2, rs1, vl); } -vuint16m2_t test_vsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vsub_vv_u16m2_m(mask, op1, op2, vl); +vuint16m2_t test_vsub_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsub_vv_u16m2_m(vm, vs2, vs1, vl); } -vuint16m2_t test_vsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_vx_u16m2_m(mask, op1, op2, vl); +vuint16m2_t test_vsub_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_vx_u16m2_m(vm, vs2, rs1, vl); } -vuint16m4_t test_vsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vsub_vv_u16m4_m(mask, op1, op2, vl); +vuint16m4_t test_vsub_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsub_vv_u16m4_m(vm, vs2, vs1, vl); } -vuint16m4_t test_vsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_vx_u16m4_m(mask, op1, op2, vl); +vuint16m4_t test_vsub_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_vx_u16m4_m(vm, vs2, rs1, vl); } -vuint16m8_t test_vsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vsub_vv_u16m8_m(mask, op1, op2, vl); +vuint16m8_t test_vsub_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsub_vv_u16m8_m(vm, vs2, vs1, vl); } -vuint16m8_t test_vsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_vx_u16m8_m(mask, op1, op2, vl); +vuint16m8_t test_vsub_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_vx_u16m8_m(vm, vs2, rs1, vl); } -vuint32mf2_t test_vsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vsub_vv_u32mf2_m(mask, op1, op2, vl); +vuint32mf2_t test_vsub_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsub_vv_u32mf2_m(vm, vs2, vs1, vl); } -vuint32mf2_t test_vsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_vx_u32mf2_m(mask, op1, op2, vl); +vuint32mf2_t test_vsub_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_vx_u32mf2_m(vm, vs2, rs1, vl); } -vuint32m1_t test_vsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vsub_vv_u32m1_m(mask, op1, op2, vl); +vuint32m1_t test_vsub_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsub_vv_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_vx_u32m1_m(mask, op1, op2, vl); +vuint32m1_t test_vsub_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_vx_u32m1_m(vm, vs2, rs1, vl); } -vuint32m2_t test_vsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vsub_vv_u32m2_m(mask, op1, op2, vl); +vuint32m2_t test_vsub_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsub_vv_u32m2_m(vm, vs2, vs1, vl); } -vuint32m2_t test_vsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_vx_u32m2_m(mask, op1, op2, vl); +vuint32m2_t test_vsub_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_vx_u32m2_m(vm, vs2, rs1, vl); } -vuint32m4_t test_vsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vsub_vv_u32m4_m(mask, op1, op2, vl); +vuint32m4_t test_vsub_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsub_vv_u32m4_m(vm, vs2, vs1, vl); } -vuint32m4_t test_vsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_vx_u32m4_m(mask, op1, op2, vl); +vuint32m4_t test_vsub_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_vx_u32m4_m(vm, vs2, rs1, vl); } -vuint32m8_t test_vsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vsub_vv_u32m8_m(mask, op1, op2, vl); +vuint32m8_t test_vsub_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsub_vv_u32m8_m(vm, vs2, vs1, vl); } -vuint32m8_t test_vsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_vx_u32m8_m(mask, op1, op2, vl); +vuint32m8_t test_vsub_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_vx_u32m8_m(vm, vs2, rs1, vl); } -vuint64m1_t test_vsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vsub_vv_u64m1_m(mask, op1, op2, vl); +vuint64m1_t test_vsub_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsub_vv_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub_vx_u64m1_m(mask, op1, op2, vl); +vuint64m1_t test_vsub_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub_vx_u64m1_m(vm, vs2, rs1, vl); } -vuint64m2_t test_vsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vsub_vv_u64m2_m(mask, op1, op2, vl); +vuint64m2_t test_vsub_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsub_vv_u64m2_m(vm, vs2, vs1, vl); } -vuint64m2_t test_vsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub_vx_u64m2_m(mask, op1, op2, vl); +vuint64m2_t test_vsub_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub_vx_u64m2_m(vm, vs2, rs1, vl); } -vuint64m4_t test_vsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vsub_vv_u64m4_m(mask, op1, op2, vl); +vuint64m4_t test_vsub_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsub_vv_u64m4_m(vm, vs2, vs1, vl); } -vuint64m4_t test_vsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub_vx_u64m4_m(mask, op1, op2, vl); +vuint64m4_t test_vsub_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub_vx_u64m4_m(vm, vs2, rs1, vl); } -vuint64m8_t test_vsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vsub_vv_u64m8_m(mask, op1, op2, vl); +vuint64m8_t test_vsub_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsub_vv_u64m8_m(vm, vs2, vs1, vl); } -vuint64m8_t test_vsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub_vx_u64m8_m(mask, op1, op2, vl); +vuint64m8_t test_vsub_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub_vx_u64m8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vsub\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/gnu-api-tests/vsuxei16.c b/auto-generated/gnu-api-tests/vsuxei16.c index 96874f0f0..5bb1403e8 100644 --- a/auto-generated/gnu-api-tests/vsuxei16.c +++ b/auto-generated/gnu-api-tests/vsuxei16.c @@ -6,460 +6,460 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxei16_v_f16mf4(float16_t *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) { - return __riscv_vsuxei16_v_f16mf4(base, bindex, value, vl); +void test_vsuxei16_v_f16mf4(float16_t *rs1, vuint16mf4_t rs2, vfloat16mf4_t vs3, size_t vl) { + return __riscv_vsuxei16_v_f16mf4(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f16mf2(float16_t *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) { - return __riscv_vsuxei16_v_f16mf2(base, bindex, value, vl); +void test_vsuxei16_v_f16mf2(float16_t *rs1, vuint16mf2_t rs2, vfloat16mf2_t vs3, size_t vl) { + return __riscv_vsuxei16_v_f16mf2(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f16m1(float16_t *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) { - return __riscv_vsuxei16_v_f16m1(base, bindex, value, vl); +void test_vsuxei16_v_f16m1(float16_t *rs1, vuint16m1_t rs2, vfloat16m1_t vs3, size_t vl) { + return __riscv_vsuxei16_v_f16m1(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f16m2(float16_t *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) { - return __riscv_vsuxei16_v_f16m2(base, bindex, value, vl); +void test_vsuxei16_v_f16m2(float16_t *rs1, vuint16m2_t rs2, vfloat16m2_t vs3, size_t vl) { + return __riscv_vsuxei16_v_f16m2(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f16m4(float16_t *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) { - return __riscv_vsuxei16_v_f16m4(base, bindex, value, vl); +void test_vsuxei16_v_f16m4(float16_t *rs1, vuint16m4_t rs2, vfloat16m4_t vs3, size_t vl) { + return __riscv_vsuxei16_v_f16m4(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f16m8(float16_t *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) { - return __riscv_vsuxei16_v_f16m8(base, bindex, value, vl); +void test_vsuxei16_v_f16m8(float16_t *rs1, vuint16m8_t rs2, vfloat16m8_t vs3, size_t vl) { + return __riscv_vsuxei16_v_f16m8(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f32mf2(float32_t *base, vuint16mf4_t bindex, vfloat32mf2_t value, size_t vl) { - return __riscv_vsuxei16_v_f32mf2(base, bindex, value, vl); +void test_vsuxei16_v_f32mf2(float32_t *rs1, vuint16mf4_t rs2, vfloat32mf2_t vs3, size_t vl) { + return __riscv_vsuxei16_v_f32mf2(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f32m1(float32_t *base, vuint16mf2_t bindex, vfloat32m1_t value, size_t vl) { - return __riscv_vsuxei16_v_f32m1(base, bindex, value, vl); +void test_vsuxei16_v_f32m1(float32_t *rs1, vuint16mf2_t rs2, vfloat32m1_t vs3, size_t vl) { + return __riscv_vsuxei16_v_f32m1(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f32m2(float32_t *base, vuint16m1_t bindex, vfloat32m2_t value, size_t vl) { - return __riscv_vsuxei16_v_f32m2(base, bindex, value, vl); +void test_vsuxei16_v_f32m2(float32_t *rs1, vuint16m1_t rs2, vfloat32m2_t vs3, size_t vl) { + return __riscv_vsuxei16_v_f32m2(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f32m4(float32_t *base, vuint16m2_t bindex, vfloat32m4_t value, size_t vl) { - return __riscv_vsuxei16_v_f32m4(base, bindex, value, vl); +void test_vsuxei16_v_f32m4(float32_t *rs1, vuint16m2_t rs2, vfloat32m4_t vs3, size_t vl) { + return __riscv_vsuxei16_v_f32m4(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f32m8(float32_t *base, vuint16m4_t bindex, vfloat32m8_t value, size_t vl) { - return __riscv_vsuxei16_v_f32m8(base, bindex, value, vl); +void test_vsuxei16_v_f32m8(float32_t *rs1, vuint16m4_t rs2, vfloat32m8_t vs3, size_t vl) { + return __riscv_vsuxei16_v_f32m8(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f64m1(float64_t *base, vuint16mf4_t bindex, vfloat64m1_t value, size_t vl) { - return __riscv_vsuxei16_v_f64m1(base, bindex, value, vl); +void test_vsuxei16_v_f64m1(float64_t *rs1, vuint16mf4_t rs2, vfloat64m1_t vs3, size_t vl) { + return __riscv_vsuxei16_v_f64m1(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f64m2(float64_t *base, vuint16mf2_t bindex, vfloat64m2_t value, size_t vl) { - return __riscv_vsuxei16_v_f64m2(base, bindex, value, vl); +void test_vsuxei16_v_f64m2(float64_t *rs1, vuint16mf2_t rs2, vfloat64m2_t vs3, size_t vl) { + return __riscv_vsuxei16_v_f64m2(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f64m4(float64_t *base, vuint16m1_t bindex, vfloat64m4_t value, size_t vl) { - return __riscv_vsuxei16_v_f64m4(base, bindex, value, vl); +void test_vsuxei16_v_f64m4(float64_t *rs1, vuint16m1_t rs2, vfloat64m4_t vs3, size_t vl) { + return __riscv_vsuxei16_v_f64m4(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f64m8(float64_t *base, vuint16m2_t bindex, vfloat64m8_t value, size_t vl) { - return __riscv_vsuxei16_v_f64m8(base, bindex, value, vl); +void test_vsuxei16_v_f64m8(float64_t *rs1, vuint16m2_t rs2, vfloat64m8_t vs3, size_t vl) { + return __riscv_vsuxei16_v_f64m8(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t value, size_t vl) { - return __riscv_vsuxei16_v_i8mf8(base, bindex, value, vl); +void test_vsuxei16_v_i8mf8(int8_t *rs1, vuint16mf4_t rs2, vint8mf8_t vs3, size_t vl) { + return __riscv_vsuxei16_v_i8mf8(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t value, size_t vl) { - return __riscv_vsuxei16_v_i8mf4(base, bindex, value, vl); +void test_vsuxei16_v_i8mf4(int8_t *rs1, vuint16mf2_t rs2, vint8mf4_t vs3, size_t vl) { + return __riscv_vsuxei16_v_i8mf4(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t value, size_t vl) { - return __riscv_vsuxei16_v_i8mf2(base, bindex, value, vl); +void test_vsuxei16_v_i8mf2(int8_t *rs1, vuint16m1_t rs2, vint8mf2_t vs3, size_t vl) { + return __riscv_vsuxei16_v_i8mf2(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t value, size_t vl) { - return __riscv_vsuxei16_v_i8m1(base, bindex, value, vl); +void test_vsuxei16_v_i8m1(int8_t *rs1, vuint16m2_t rs2, vint8m1_t vs3, size_t vl) { + return __riscv_vsuxei16_v_i8m1(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t value, size_t vl) { - return __riscv_vsuxei16_v_i8m2(base, bindex, value, vl); +void test_vsuxei16_v_i8m2(int8_t *rs1, vuint16m4_t rs2, vint8m2_t vs3, size_t vl) { + return __riscv_vsuxei16_v_i8m2(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i8m4(int8_t *base, vuint16m8_t bindex, vint8m4_t value, size_t vl) { - return __riscv_vsuxei16_v_i8m4(base, bindex, value, vl); +void test_vsuxei16_v_i8m4(int8_t *rs1, vuint16m8_t rs2, vint8m4_t vs3, size_t vl) { + return __riscv_vsuxei16_v_i8m4(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t value, size_t vl) { - return __riscv_vsuxei16_v_i16mf4(base, bindex, value, vl); +void test_vsuxei16_v_i16mf4(int16_t *rs1, vuint16mf4_t rs2, vint16mf4_t vs3, size_t vl) { + return __riscv_vsuxei16_v_i16mf4(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t value, size_t vl) { - return __riscv_vsuxei16_v_i16mf2(base, bindex, value, vl); +void test_vsuxei16_v_i16mf2(int16_t *rs1, vuint16mf2_t rs2, vint16mf2_t vs3, size_t vl) { + return __riscv_vsuxei16_v_i16mf2(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t value, size_t vl) { - return __riscv_vsuxei16_v_i16m1(base, bindex, value, vl); +void test_vsuxei16_v_i16m1(int16_t *rs1, vuint16m1_t rs2, vint16m1_t vs3, size_t vl) { + return __riscv_vsuxei16_v_i16m1(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t value, size_t vl) { - return __riscv_vsuxei16_v_i16m2(base, bindex, value, vl); +void test_vsuxei16_v_i16m2(int16_t *rs1, vuint16m2_t rs2, vint16m2_t vs3, size_t vl) { + return __riscv_vsuxei16_v_i16m2(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i16m4(int16_t *base, vuint16m4_t bindex, vint16m4_t value, size_t vl) { - return __riscv_vsuxei16_v_i16m4(base, bindex, value, vl); +void test_vsuxei16_v_i16m4(int16_t *rs1, vuint16m4_t rs2, vint16m4_t vs3, size_t vl) { + return __riscv_vsuxei16_v_i16m4(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i16m8(int16_t *base, vuint16m8_t bindex, vint16m8_t value, size_t vl) { - return __riscv_vsuxei16_v_i16m8(base, bindex, value, vl); +void test_vsuxei16_v_i16m8(int16_t *rs1, vuint16m8_t rs2, vint16m8_t vs3, size_t vl) { + return __riscv_vsuxei16_v_i16m8(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { - return __riscv_vsuxei16_v_i32mf2(base, bindex, value, vl); +void test_vsuxei16_v_i32mf2(int32_t *rs1, vuint16mf4_t rs2, vint32mf2_t vs3, size_t vl) { + return __riscv_vsuxei16_v_i32mf2(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { - return __riscv_vsuxei16_v_i32m1(base, bindex, value, vl); +void test_vsuxei16_v_i32m1(int32_t *rs1, vuint16mf2_t rs2, vint32m1_t vs3, size_t vl) { + return __riscv_vsuxei16_v_i32m1(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { - return __riscv_vsuxei16_v_i32m2(base, bindex, value, vl); +void test_vsuxei16_v_i32m2(int32_t *rs1, vuint16m1_t rs2, vint32m2_t vs3, size_t vl) { + return __riscv_vsuxei16_v_i32m2(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i32m4(int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { - return __riscv_vsuxei16_v_i32m4(base, bindex, value, vl); +void test_vsuxei16_v_i32m4(int32_t *rs1, vuint16m2_t rs2, vint32m4_t vs3, size_t vl) { + return __riscv_vsuxei16_v_i32m4(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i32m8(int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { - return __riscv_vsuxei16_v_i32m8(base, bindex, value, vl); +void test_vsuxei16_v_i32m8(int32_t *rs1, vuint16m4_t rs2, vint32m8_t vs3, size_t vl) { + return __riscv_vsuxei16_v_i32m8(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { - return __riscv_vsuxei16_v_i64m1(base, bindex, value, vl); +void test_vsuxei16_v_i64m1(int64_t *rs1, vuint16mf4_t rs2, vint64m1_t vs3, size_t vl) { + return __riscv_vsuxei16_v_i64m1(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { - return __riscv_vsuxei16_v_i64m2(base, bindex, value, vl); +void test_vsuxei16_v_i64m2(int64_t *rs1, vuint16mf2_t rs2, vint64m2_t vs3, size_t vl) { + return __riscv_vsuxei16_v_i64m2(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i64m4(int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { - return __riscv_vsuxei16_v_i64m4(base, bindex, value, vl); +void test_vsuxei16_v_i64m4(int64_t *rs1, vuint16m1_t rs2, vint64m4_t vs3, size_t vl) { + return __riscv_vsuxei16_v_i64m4(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i64m8(int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { - return __riscv_vsuxei16_v_i64m8(base, bindex, value, vl); +void test_vsuxei16_v_i64m8(int64_t *rs1, vuint16m2_t rs2, vint64m8_t vs3, size_t vl) { + return __riscv_vsuxei16_v_i64m8(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t value, size_t vl) { - return __riscv_vsuxei16_v_u8mf8(base, bindex, value, vl); +void test_vsuxei16_v_u8mf8(uint8_t *rs1, vuint16mf4_t rs2, vuint8mf8_t vs3, size_t vl) { + return __riscv_vsuxei16_v_u8mf8(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t value, size_t vl) { - return __riscv_vsuxei16_v_u8mf4(base, bindex, value, vl); +void test_vsuxei16_v_u8mf4(uint8_t *rs1, vuint16mf2_t rs2, vuint8mf4_t vs3, size_t vl) { + return __riscv_vsuxei16_v_u8mf4(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value, size_t vl) { - return __riscv_vsuxei16_v_u8mf2(base, bindex, value, vl); +void test_vsuxei16_v_u8mf2(uint8_t *rs1, vuint16m1_t rs2, vuint8mf2_t vs3, size_t vl) { + return __riscv_vsuxei16_v_u8mf2(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t value, size_t vl) { - return __riscv_vsuxei16_v_u8m1(base, bindex, value, vl); +void test_vsuxei16_v_u8m1(uint8_t *rs1, vuint16m2_t rs2, vuint8m1_t vs3, size_t vl) { + return __riscv_vsuxei16_v_u8m1(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t value, size_t vl) { - return __riscv_vsuxei16_v_u8m2(base, bindex, value, vl); +void test_vsuxei16_v_u8m2(uint8_t *rs1, vuint16m4_t rs2, vuint8m2_t vs3, size_t vl) { + return __riscv_vsuxei16_v_u8m2(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u8m4(uint8_t *base, vuint16m8_t bindex, vuint8m4_t value, size_t vl) { - return __riscv_vsuxei16_v_u8m4(base, bindex, value, vl); +void test_vsuxei16_v_u8m4(uint8_t *rs1, vuint16m8_t rs2, vuint8m4_t vs3, size_t vl) { + return __riscv_vsuxei16_v_u8m4(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t value, size_t vl) { - return __riscv_vsuxei16_v_u16mf4(base, bindex, value, vl); +void test_vsuxei16_v_u16mf4(uint16_t *rs1, vuint16mf4_t rs2, vuint16mf4_t vs3, size_t vl) { + return __riscv_vsuxei16_v_u16mf4(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t value, size_t vl) { - return __riscv_vsuxei16_v_u16mf2(base, bindex, value, vl); +void test_vsuxei16_v_u16mf2(uint16_t *rs1, vuint16mf2_t rs2, vuint16mf2_t vs3, size_t vl) { + return __riscv_vsuxei16_v_u16mf2(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t value, size_t vl) { - return __riscv_vsuxei16_v_u16m1(base, bindex, value, vl); +void test_vsuxei16_v_u16m1(uint16_t *rs1, vuint16m1_t rs2, vuint16m1_t vs3, size_t vl) { + return __riscv_vsuxei16_v_u16m1(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t value, size_t vl) { - return __riscv_vsuxei16_v_u16m2(base, bindex, value, vl); +void test_vsuxei16_v_u16m2(uint16_t *rs1, vuint16m2_t rs2, vuint16m2_t vs3, size_t vl) { + return __riscv_vsuxei16_v_u16m2(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u16m4(uint16_t *base, vuint16m4_t bindex, vuint16m4_t value, size_t vl) { - return __riscv_vsuxei16_v_u16m4(base, bindex, value, vl); +void test_vsuxei16_v_u16m4(uint16_t *rs1, vuint16m4_t rs2, vuint16m4_t vs3, size_t vl) { + return __riscv_vsuxei16_v_u16m4(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u16m8(uint16_t *base, vuint16m8_t bindex, vuint16m8_t value, size_t vl) { - return __riscv_vsuxei16_v_u16m8(base, bindex, value, vl); +void test_vsuxei16_v_u16m8(uint16_t *rs1, vuint16m8_t rs2, vuint16m8_t vs3, size_t vl) { + return __riscv_vsuxei16_v_u16m8(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { - return __riscv_vsuxei16_v_u32mf2(base, bindex, value, vl); +void test_vsuxei16_v_u32mf2(uint32_t *rs1, vuint16mf4_t rs2, vuint32mf2_t vs3, size_t vl) { + return __riscv_vsuxei16_v_u32mf2(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { - return __riscv_vsuxei16_v_u32m1(base, bindex, value, vl); +void test_vsuxei16_v_u32m1(uint32_t *rs1, vuint16mf2_t rs2, vuint32m1_t vs3, size_t vl) { + return __riscv_vsuxei16_v_u32m1(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { - return __riscv_vsuxei16_v_u32m2(base, bindex, value, vl); +void test_vsuxei16_v_u32m2(uint32_t *rs1, vuint16m1_t rs2, vuint32m2_t vs3, size_t vl) { + return __riscv_vsuxei16_v_u32m2(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u32m4(uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { - return __riscv_vsuxei16_v_u32m4(base, bindex, value, vl); +void test_vsuxei16_v_u32m4(uint32_t *rs1, vuint16m2_t rs2, vuint32m4_t vs3, size_t vl) { + return __riscv_vsuxei16_v_u32m4(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u32m8(uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { - return __riscv_vsuxei16_v_u32m8(base, bindex, value, vl); +void test_vsuxei16_v_u32m8(uint32_t *rs1, vuint16m4_t rs2, vuint32m8_t vs3, size_t vl) { + return __riscv_vsuxei16_v_u32m8(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { - return __riscv_vsuxei16_v_u64m1(base, bindex, value, vl); +void test_vsuxei16_v_u64m1(uint64_t *rs1, vuint16mf4_t rs2, vuint64m1_t vs3, size_t vl) { + return __riscv_vsuxei16_v_u64m1(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { - return __riscv_vsuxei16_v_u64m2(base, bindex, value, vl); +void test_vsuxei16_v_u64m2(uint64_t *rs1, vuint16mf2_t rs2, vuint64m2_t vs3, size_t vl) { + return __riscv_vsuxei16_v_u64m2(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u64m4(uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { - return __riscv_vsuxei16_v_u64m4(base, bindex, value, vl); +void test_vsuxei16_v_u64m4(uint64_t *rs1, vuint16m1_t rs2, vuint64m4_t vs3, size_t vl) { + return __riscv_vsuxei16_v_u64m4(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u64m8(uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { - return __riscv_vsuxei16_v_u64m8(base, bindex, value, vl); +void test_vsuxei16_v_u64m8(uint64_t *rs1, vuint16m2_t rs2, vuint64m8_t vs3, size_t vl) { + return __riscv_vsuxei16_v_u64m8(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f16mf4_m(vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) { - return __riscv_vsuxei16_v_f16mf4_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_f16mf4_m(vbool64_t vm, float16_t *rs1, vuint16mf4_t rs2, vfloat16mf4_t vs3, size_t vl) { + return __riscv_vsuxei16_v_f16mf4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f16mf2_m(vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) { - return __riscv_vsuxei16_v_f16mf2_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_f16mf2_m(vbool32_t vm, float16_t *rs1, vuint16mf2_t rs2, vfloat16mf2_t vs3, size_t vl) { + return __riscv_vsuxei16_v_f16mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f16m1_m(vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) { - return __riscv_vsuxei16_v_f16m1_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_f16m1_m(vbool16_t vm, float16_t *rs1, vuint16m1_t rs2, vfloat16m1_t vs3, size_t vl) { + return __riscv_vsuxei16_v_f16m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f16m2_m(vbool8_t mask, float16_t *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) { - return __riscv_vsuxei16_v_f16m2_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_f16m2_m(vbool8_t vm, float16_t *rs1, vuint16m2_t rs2, vfloat16m2_t vs3, size_t vl) { + return __riscv_vsuxei16_v_f16m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f16m4_m(vbool4_t mask, float16_t *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) { - return __riscv_vsuxei16_v_f16m4_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_f16m4_m(vbool4_t vm, float16_t *rs1, vuint16m4_t rs2, vfloat16m4_t vs3, size_t vl) { + return __riscv_vsuxei16_v_f16m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f16m8_m(vbool2_t mask, float16_t *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) { - return __riscv_vsuxei16_v_f16m8_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_f16m8_m(vbool2_t vm, float16_t *rs1, vuint16m8_t rs2, vfloat16m8_t vs3, size_t vl) { + return __riscv_vsuxei16_v_f16m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f32mf2_m(vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2_t value, size_t vl) { - return __riscv_vsuxei16_v_f32mf2_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_f32mf2_m(vbool64_t vm, float32_t *rs1, vuint16mf4_t rs2, vfloat32mf2_t vs3, size_t vl) { + return __riscv_vsuxei16_v_f32mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f32m1_m(vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1_t value, size_t vl) { - return __riscv_vsuxei16_v_f32m1_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_f32m1_m(vbool32_t vm, float32_t *rs1, vuint16mf2_t rs2, vfloat32m1_t vs3, size_t vl) { + return __riscv_vsuxei16_v_f32m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f32m2_m(vbool16_t mask, float32_t *base, vuint16m1_t bindex, vfloat32m2_t value, size_t vl) { - return __riscv_vsuxei16_v_f32m2_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_f32m2_m(vbool16_t vm, float32_t *rs1, vuint16m1_t rs2, vfloat32m2_t vs3, size_t vl) { + return __riscv_vsuxei16_v_f32m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f32m4_m(vbool8_t mask, float32_t *base, vuint16m2_t bindex, vfloat32m4_t value, size_t vl) { - return __riscv_vsuxei16_v_f32m4_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_f32m4_m(vbool8_t vm, float32_t *rs1, vuint16m2_t rs2, vfloat32m4_t vs3, size_t vl) { + return __riscv_vsuxei16_v_f32m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f32m8_m(vbool4_t mask, float32_t *base, vuint16m4_t bindex, vfloat32m8_t value, size_t vl) { - return __riscv_vsuxei16_v_f32m8_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_f32m8_m(vbool4_t vm, float32_t *rs1, vuint16m4_t rs2, vfloat32m8_t vs3, size_t vl) { + return __riscv_vsuxei16_v_f32m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f64m1_m(vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1_t value, size_t vl) { - return __riscv_vsuxei16_v_f64m1_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_f64m1_m(vbool64_t vm, float64_t *rs1, vuint16mf4_t rs2, vfloat64m1_t vs3, size_t vl) { + return __riscv_vsuxei16_v_f64m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f64m2_m(vbool32_t mask, float64_t *base, vuint16mf2_t bindex, vfloat64m2_t value, size_t vl) { - return __riscv_vsuxei16_v_f64m2_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_f64m2_m(vbool32_t vm, float64_t *rs1, vuint16mf2_t rs2, vfloat64m2_t vs3, size_t vl) { + return __riscv_vsuxei16_v_f64m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f64m4_m(vbool16_t mask, float64_t *base, vuint16m1_t bindex, vfloat64m4_t value, size_t vl) { - return __riscv_vsuxei16_v_f64m4_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_f64m4_m(vbool16_t vm, float64_t *rs1, vuint16m1_t rs2, vfloat64m4_t vs3, size_t vl) { + return __riscv_vsuxei16_v_f64m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f64m8_m(vbool8_t mask, float64_t *base, vuint16m2_t bindex, vfloat64m8_t value, size_t vl) { - return __riscv_vsuxei16_v_f64m8_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_f64m8_m(vbool8_t vm, float64_t *rs1, vuint16m2_t rs2, vfloat64m8_t vs3, size_t vl) { + return __riscv_vsuxei16_v_f64m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t value, size_t vl) { - return __riscv_vsuxei16_v_i8mf8_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_i8mf8_m(vbool64_t vm, int8_t *rs1, vuint16mf4_t rs2, vint8mf8_t vs3, size_t vl) { + return __riscv_vsuxei16_v_i8mf8_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t value, size_t vl) { - return __riscv_vsuxei16_v_i8mf4_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_i8mf4_m(vbool32_t vm, int8_t *rs1, vuint16mf2_t rs2, vint8mf4_t vs3, size_t vl) { + return __riscv_vsuxei16_v_i8mf4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t value, size_t vl) { - return __riscv_vsuxei16_v_i8mf2_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_i8mf2_m(vbool16_t vm, int8_t *rs1, vuint16m1_t rs2, vint8mf2_t vs3, size_t vl) { + return __riscv_vsuxei16_v_i8mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t value, size_t vl) { - return __riscv_vsuxei16_v_i8m1_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_i8m1_m(vbool8_t vm, int8_t *rs1, vuint16m2_t rs2, vint8m1_t vs3, size_t vl) { + return __riscv_vsuxei16_v_i8m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t value, size_t vl) { - return __riscv_vsuxei16_v_i8m2_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_i8m2_m(vbool4_t vm, int8_t *rs1, vuint16m4_t rs2, vint8m2_t vs3, size_t vl) { + return __riscv_vsuxei16_v_i8m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i8m4_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4_t value, size_t vl) { - return __riscv_vsuxei16_v_i8m4_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_i8m4_m(vbool2_t vm, int8_t *rs1, vuint16m8_t rs2, vint8m4_t vs3, size_t vl) { + return __riscv_vsuxei16_v_i8m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t value, size_t vl) { - return __riscv_vsuxei16_v_i16mf4_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_i16mf4_m(vbool64_t vm, int16_t *rs1, vuint16mf4_t rs2, vint16mf4_t vs3, size_t vl) { + return __riscv_vsuxei16_v_i16mf4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t value, size_t vl) { - return __riscv_vsuxei16_v_i16mf2_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_i16mf2_m(vbool32_t vm, int16_t *rs1, vuint16mf2_t rs2, vint16mf2_t vs3, size_t vl) { + return __riscv_vsuxei16_v_i16mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t value, size_t vl) { - return __riscv_vsuxei16_v_i16m1_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_i16m1_m(vbool16_t vm, int16_t *rs1, vuint16m1_t rs2, vint16m1_t vs3, size_t vl) { + return __riscv_vsuxei16_v_i16m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t value, size_t vl) { - return __riscv_vsuxei16_v_i16m2_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_i16m2_m(vbool8_t vm, int16_t *rs1, vuint16m2_t rs2, vint16m2_t vs3, size_t vl) { + return __riscv_vsuxei16_v_i16m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i16m4_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4_t value, size_t vl) { - return __riscv_vsuxei16_v_i16m4_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_i16m4_m(vbool4_t vm, int16_t *rs1, vuint16m4_t rs2, vint16m4_t vs3, size_t vl) { + return __riscv_vsuxei16_v_i16m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i16m8_m(vbool2_t mask, int16_t *base, vuint16m8_t bindex, vint16m8_t value, size_t vl) { - return __riscv_vsuxei16_v_i16m8_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_i16m8_m(vbool2_t vm, int16_t *rs1, vuint16m8_t rs2, vint16m8_t vs3, size_t vl) { + return __riscv_vsuxei16_v_i16m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { - return __riscv_vsuxei16_v_i32mf2_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_i32mf2_m(vbool64_t vm, int32_t *rs1, vuint16mf4_t rs2, vint32mf2_t vs3, size_t vl) { + return __riscv_vsuxei16_v_i32mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { - return __riscv_vsuxei16_v_i32m1_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_i32m1_m(vbool32_t vm, int32_t *rs1, vuint16mf2_t rs2, vint32m1_t vs3, size_t vl) { + return __riscv_vsuxei16_v_i32m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { - return __riscv_vsuxei16_v_i32m2_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_i32m2_m(vbool16_t vm, int32_t *rs1, vuint16m1_t rs2, vint32m2_t vs3, size_t vl) { + return __riscv_vsuxei16_v_i32m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i32m4_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { - return __riscv_vsuxei16_v_i32m4_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_i32m4_m(vbool8_t vm, int32_t *rs1, vuint16m2_t rs2, vint32m4_t vs3, size_t vl) { + return __riscv_vsuxei16_v_i32m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i32m8_m(vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { - return __riscv_vsuxei16_v_i32m8_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_i32m8_m(vbool4_t vm, int32_t *rs1, vuint16m4_t rs2, vint32m8_t vs3, size_t vl) { + return __riscv_vsuxei16_v_i32m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { - return __riscv_vsuxei16_v_i64m1_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_i64m1_m(vbool64_t vm, int64_t *rs1, vuint16mf4_t rs2, vint64m1_t vs3, size_t vl) { + return __riscv_vsuxei16_v_i64m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { - return __riscv_vsuxei16_v_i64m2_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_i64m2_m(vbool32_t vm, int64_t *rs1, vuint16mf2_t rs2, vint64m2_t vs3, size_t vl) { + return __riscv_vsuxei16_v_i64m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i64m4_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { - return __riscv_vsuxei16_v_i64m4_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_i64m4_m(vbool16_t vm, int64_t *rs1, vuint16m1_t rs2, vint64m4_t vs3, size_t vl) { + return __riscv_vsuxei16_v_i64m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i64m8_m(vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { - return __riscv_vsuxei16_v_i64m8_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_i64m8_m(vbool8_t vm, int64_t *rs1, vuint16m2_t rs2, vint64m8_t vs3, size_t vl) { + return __riscv_vsuxei16_v_i64m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t value, size_t vl) { - return __riscv_vsuxei16_v_u8mf8_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_u8mf8_m(vbool64_t vm, uint8_t *rs1, vuint16mf4_t rs2, vuint8mf8_t vs3, size_t vl) { + return __riscv_vsuxei16_v_u8mf8_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t value, size_t vl) { - return __riscv_vsuxei16_v_u8mf4_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_u8mf4_m(vbool32_t vm, uint8_t *rs1, vuint16mf2_t rs2, vuint8mf4_t vs3, size_t vl) { + return __riscv_vsuxei16_v_u8mf4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value, size_t vl) { - return __riscv_vsuxei16_v_u8mf2_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_u8mf2_m(vbool16_t vm, uint8_t *rs1, vuint16m1_t rs2, vuint8mf2_t vs3, size_t vl) { + return __riscv_vsuxei16_v_u8mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t value, size_t vl) { - return __riscv_vsuxei16_v_u8m1_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_u8m1_m(vbool8_t vm, uint8_t *rs1, vuint16m2_t rs2, vuint8m1_t vs3, size_t vl) { + return __riscv_vsuxei16_v_u8m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t value, size_t vl) { - return __riscv_vsuxei16_v_u8m2_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_u8m2_m(vbool4_t vm, uint8_t *rs1, vuint16m4_t rs2, vuint8m2_t vs3, size_t vl) { + return __riscv_vsuxei16_v_u8m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4_t value, size_t vl) { - return __riscv_vsuxei16_v_u8m4_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_u8m4_m(vbool2_t vm, uint8_t *rs1, vuint16m8_t rs2, vuint8m4_t vs3, size_t vl) { + return __riscv_vsuxei16_v_u8m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t value, size_t vl) { - return __riscv_vsuxei16_v_u16mf4_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_u16mf4_m(vbool64_t vm, uint16_t *rs1, vuint16mf4_t rs2, vuint16mf4_t vs3, size_t vl) { + return __riscv_vsuxei16_v_u16mf4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t value, size_t vl) { - return __riscv_vsuxei16_v_u16mf2_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_u16mf2_m(vbool32_t vm, uint16_t *rs1, vuint16mf2_t rs2, vuint16mf2_t vs3, size_t vl) { + return __riscv_vsuxei16_v_u16mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t value, size_t vl) { - return __riscv_vsuxei16_v_u16m1_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_u16m1_m(vbool16_t vm, uint16_t *rs1, vuint16m1_t rs2, vuint16m1_t vs3, size_t vl) { + return __riscv_vsuxei16_v_u16m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t value, size_t vl) { - return __riscv_vsuxei16_v_u16m2_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_u16m2_m(vbool8_t vm, uint16_t *rs1, vuint16m2_t rs2, vuint16m2_t vs3, size_t vl) { + return __riscv_vsuxei16_v_u16m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4_t value, size_t vl) { - return __riscv_vsuxei16_v_u16m4_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_u16m4_m(vbool4_t vm, uint16_t *rs1, vuint16m4_t rs2, vuint16m4_t vs3, size_t vl) { + return __riscv_vsuxei16_v_u16m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint16m8_t bindex, vuint16m8_t value, size_t vl) { - return __riscv_vsuxei16_v_u16m8_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_u16m8_m(vbool2_t vm, uint16_t *rs1, vuint16m8_t rs2, vuint16m8_t vs3, size_t vl) { + return __riscv_vsuxei16_v_u16m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { - return __riscv_vsuxei16_v_u32mf2_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_u32mf2_m(vbool64_t vm, uint32_t *rs1, vuint16mf4_t rs2, vuint32mf2_t vs3, size_t vl) { + return __riscv_vsuxei16_v_u32mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { - return __riscv_vsuxei16_v_u32m1_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_u32m1_m(vbool32_t vm, uint32_t *rs1, vuint16mf2_t rs2, vuint32m1_t vs3, size_t vl) { + return __riscv_vsuxei16_v_u32m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { - return __riscv_vsuxei16_v_u32m2_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_u32m2_m(vbool16_t vm, uint32_t *rs1, vuint16m1_t rs2, vuint32m2_t vs3, size_t vl) { + return __riscv_vsuxei16_v_u32m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { - return __riscv_vsuxei16_v_u32m4_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_u32m4_m(vbool8_t vm, uint32_t *rs1, vuint16m2_t rs2, vuint32m4_t vs3, size_t vl) { + return __riscv_vsuxei16_v_u32m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { - return __riscv_vsuxei16_v_u32m8_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_u32m8_m(vbool4_t vm, uint32_t *rs1, vuint16m4_t rs2, vuint32m8_t vs3, size_t vl) { + return __riscv_vsuxei16_v_u32m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { - return __riscv_vsuxei16_v_u64m1_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_u64m1_m(vbool64_t vm, uint64_t *rs1, vuint16mf4_t rs2, vuint64m1_t vs3, size_t vl) { + return __riscv_vsuxei16_v_u64m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { - return __riscv_vsuxei16_v_u64m2_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_u64m2_m(vbool32_t vm, uint64_t *rs1, vuint16mf2_t rs2, vuint64m2_t vs3, size_t vl) { + return __riscv_vsuxei16_v_u64m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { - return __riscv_vsuxei16_v_u64m4_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_u64m4_m(vbool16_t vm, uint64_t *rs1, vuint16m1_t rs2, vuint64m4_t vs3, size_t vl) { + return __riscv_vsuxei16_v_u64m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { - return __riscv_vsuxei16_v_u64m8_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_u64m8_m(vbool8_t vm, uint64_t *rs1, vuint16m2_t rs2, vuint64m8_t vs3, size_t vl) { + return __riscv_vsuxei16_v_u64m8_m(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxei16\.[ivxfswum.]+\s+} 114 } } */ diff --git a/auto-generated/gnu-api-tests/vsuxei32.c b/auto-generated/gnu-api-tests/vsuxei32.c index e48937410..ef1531ed6 100644 --- a/auto-generated/gnu-api-tests/vsuxei32.c +++ b/auto-generated/gnu-api-tests/vsuxei32.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxei32_v_f16mf4(float16_t *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) { - return __riscv_vsuxei32_v_f16mf4(base, bindex, value, vl); +void test_vsuxei32_v_f16mf4(float16_t *rs1, vuint32mf2_t rs2, vfloat16mf4_t vs3, size_t vl) { + return __riscv_vsuxei32_v_f16mf4(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f16mf2(float16_t *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) { - return __riscv_vsuxei32_v_f16mf2(base, bindex, value, vl); +void test_vsuxei32_v_f16mf2(float16_t *rs1, vuint32m1_t rs2, vfloat16mf2_t vs3, size_t vl) { + return __riscv_vsuxei32_v_f16mf2(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f16m1(float16_t *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) { - return __riscv_vsuxei32_v_f16m1(base, bindex, value, vl); +void test_vsuxei32_v_f16m1(float16_t *rs1, vuint32m2_t rs2, vfloat16m1_t vs3, size_t vl) { + return __riscv_vsuxei32_v_f16m1(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f16m2(float16_t *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) { - return __riscv_vsuxei32_v_f16m2(base, bindex, value, vl); +void test_vsuxei32_v_f16m2(float16_t *rs1, vuint32m4_t rs2, vfloat16m2_t vs3, size_t vl) { + return __riscv_vsuxei32_v_f16m2(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f16m4(float16_t *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) { - return __riscv_vsuxei32_v_f16m4(base, bindex, value, vl); +void test_vsuxei32_v_f16m4(float16_t *rs1, vuint32m8_t rs2, vfloat16m4_t vs3, size_t vl) { + return __riscv_vsuxei32_v_f16m4(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f32mf2(float32_t *base, vuint32mf2_t bindex, vfloat32mf2_t value, size_t vl) { - return __riscv_vsuxei32_v_f32mf2(base, bindex, value, vl); +void test_vsuxei32_v_f32mf2(float32_t *rs1, vuint32mf2_t rs2, vfloat32mf2_t vs3, size_t vl) { + return __riscv_vsuxei32_v_f32mf2(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f32m1(float32_t *base, vuint32m1_t bindex, vfloat32m1_t value, size_t vl) { - return __riscv_vsuxei32_v_f32m1(base, bindex, value, vl); +void test_vsuxei32_v_f32m1(float32_t *rs1, vuint32m1_t rs2, vfloat32m1_t vs3, size_t vl) { + return __riscv_vsuxei32_v_f32m1(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f32m2(float32_t *base, vuint32m2_t bindex, vfloat32m2_t value, size_t vl) { - return __riscv_vsuxei32_v_f32m2(base, bindex, value, vl); +void test_vsuxei32_v_f32m2(float32_t *rs1, vuint32m2_t rs2, vfloat32m2_t vs3, size_t vl) { + return __riscv_vsuxei32_v_f32m2(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f32m4(float32_t *base, vuint32m4_t bindex, vfloat32m4_t value, size_t vl) { - return __riscv_vsuxei32_v_f32m4(base, bindex, value, vl); +void test_vsuxei32_v_f32m4(float32_t *rs1, vuint32m4_t rs2, vfloat32m4_t vs3, size_t vl) { + return __riscv_vsuxei32_v_f32m4(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f32m8(float32_t *base, vuint32m8_t bindex, vfloat32m8_t value, size_t vl) { - return __riscv_vsuxei32_v_f32m8(base, bindex, value, vl); +void test_vsuxei32_v_f32m8(float32_t *rs1, vuint32m8_t rs2, vfloat32m8_t vs3, size_t vl) { + return __riscv_vsuxei32_v_f32m8(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f64m1(float64_t *base, vuint32mf2_t bindex, vfloat64m1_t value, size_t vl) { - return __riscv_vsuxei32_v_f64m1(base, bindex, value, vl); +void test_vsuxei32_v_f64m1(float64_t *rs1, vuint32mf2_t rs2, vfloat64m1_t vs3, size_t vl) { + return __riscv_vsuxei32_v_f64m1(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f64m2(float64_t *base, vuint32m1_t bindex, vfloat64m2_t value, size_t vl) { - return __riscv_vsuxei32_v_f64m2(base, bindex, value, vl); +void test_vsuxei32_v_f64m2(float64_t *rs1, vuint32m1_t rs2, vfloat64m2_t vs3, size_t vl) { + return __riscv_vsuxei32_v_f64m2(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f64m4(float64_t *base, vuint32m2_t bindex, vfloat64m4_t value, size_t vl) { - return __riscv_vsuxei32_v_f64m4(base, bindex, value, vl); +void test_vsuxei32_v_f64m4(float64_t *rs1, vuint32m2_t rs2, vfloat64m4_t vs3, size_t vl) { + return __riscv_vsuxei32_v_f64m4(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f64m8(float64_t *base, vuint32m4_t bindex, vfloat64m8_t value, size_t vl) { - return __riscv_vsuxei32_v_f64m8(base, bindex, value, vl); +void test_vsuxei32_v_f64m8(float64_t *rs1, vuint32m4_t rs2, vfloat64m8_t vs3, size_t vl) { + return __riscv_vsuxei32_v_f64m8(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t value, size_t vl) { - return __riscv_vsuxei32_v_i8mf8(base, bindex, value, vl); +void test_vsuxei32_v_i8mf8(int8_t *rs1, vuint32mf2_t rs2, vint8mf8_t vs3, size_t vl) { + return __riscv_vsuxei32_v_i8mf8(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t value, size_t vl) { - return __riscv_vsuxei32_v_i8mf4(base, bindex, value, vl); +void test_vsuxei32_v_i8mf4(int8_t *rs1, vuint32m1_t rs2, vint8mf4_t vs3, size_t vl) { + return __riscv_vsuxei32_v_i8mf4(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t value, size_t vl) { - return __riscv_vsuxei32_v_i8mf2(base, bindex, value, vl); +void test_vsuxei32_v_i8mf2(int8_t *rs1, vuint32m2_t rs2, vint8mf2_t vs3, size_t vl) { + return __riscv_vsuxei32_v_i8mf2(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t value, size_t vl) { - return __riscv_vsuxei32_v_i8m1(base, bindex, value, vl); +void test_vsuxei32_v_i8m1(int8_t *rs1, vuint32m4_t rs2, vint8m1_t vs3, size_t vl) { + return __riscv_vsuxei32_v_i8m1(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t value, size_t vl) { - return __riscv_vsuxei32_v_i8m2(base, bindex, value, vl); +void test_vsuxei32_v_i8m2(int8_t *rs1, vuint32m8_t rs2, vint8m2_t vs3, size_t vl) { + return __riscv_vsuxei32_v_i8m2(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t value, size_t vl) { - return __riscv_vsuxei32_v_i16mf4(base, bindex, value, vl); +void test_vsuxei32_v_i16mf4(int16_t *rs1, vuint32mf2_t rs2, vint16mf4_t vs3, size_t vl) { + return __riscv_vsuxei32_v_i16mf4(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t value, size_t vl) { - return __riscv_vsuxei32_v_i16mf2(base, bindex, value, vl); +void test_vsuxei32_v_i16mf2(int16_t *rs1, vuint32m1_t rs2, vint16mf2_t vs3, size_t vl) { + return __riscv_vsuxei32_v_i16mf2(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t value, size_t vl) { - return __riscv_vsuxei32_v_i16m1(base, bindex, value, vl); +void test_vsuxei32_v_i16m1(int16_t *rs1, vuint32m2_t rs2, vint16m1_t vs3, size_t vl) { + return __riscv_vsuxei32_v_i16m1(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t value, size_t vl) { - return __riscv_vsuxei32_v_i16m2(base, bindex, value, vl); +void test_vsuxei32_v_i16m2(int16_t *rs1, vuint32m4_t rs2, vint16m2_t vs3, size_t vl) { + return __riscv_vsuxei32_v_i16m2(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i16m4(int16_t *base, vuint32m8_t bindex, vint16m4_t value, size_t vl) { - return __riscv_vsuxei32_v_i16m4(base, bindex, value, vl); +void test_vsuxei32_v_i16m4(int16_t *rs1, vuint32m8_t rs2, vint16m4_t vs3, size_t vl) { + return __riscv_vsuxei32_v_i16m4(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { - return __riscv_vsuxei32_v_i32mf2(base, bindex, value, vl); +void test_vsuxei32_v_i32mf2(int32_t *rs1, vuint32mf2_t rs2, vint32mf2_t vs3, size_t vl) { + return __riscv_vsuxei32_v_i32mf2(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { - return __riscv_vsuxei32_v_i32m1(base, bindex, value, vl); +void test_vsuxei32_v_i32m1(int32_t *rs1, vuint32m1_t rs2, vint32m1_t vs3, size_t vl) { + return __riscv_vsuxei32_v_i32m1(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { - return __riscv_vsuxei32_v_i32m2(base, bindex, value, vl); +void test_vsuxei32_v_i32m2(int32_t *rs1, vuint32m2_t rs2, vint32m2_t vs3, size_t vl) { + return __riscv_vsuxei32_v_i32m2(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i32m4(int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { - return __riscv_vsuxei32_v_i32m4(base, bindex, value, vl); +void test_vsuxei32_v_i32m4(int32_t *rs1, vuint32m4_t rs2, vint32m4_t vs3, size_t vl) { + return __riscv_vsuxei32_v_i32m4(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i32m8(int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { - return __riscv_vsuxei32_v_i32m8(base, bindex, value, vl); +void test_vsuxei32_v_i32m8(int32_t *rs1, vuint32m8_t rs2, vint32m8_t vs3, size_t vl) { + return __riscv_vsuxei32_v_i32m8(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { - return __riscv_vsuxei32_v_i64m1(base, bindex, value, vl); +void test_vsuxei32_v_i64m1(int64_t *rs1, vuint32mf2_t rs2, vint64m1_t vs3, size_t vl) { + return __riscv_vsuxei32_v_i64m1(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { - return __riscv_vsuxei32_v_i64m2(base, bindex, value, vl); +void test_vsuxei32_v_i64m2(int64_t *rs1, vuint32m1_t rs2, vint64m2_t vs3, size_t vl) { + return __riscv_vsuxei32_v_i64m2(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i64m4(int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { - return __riscv_vsuxei32_v_i64m4(base, bindex, value, vl); +void test_vsuxei32_v_i64m4(int64_t *rs1, vuint32m2_t rs2, vint64m4_t vs3, size_t vl) { + return __riscv_vsuxei32_v_i64m4(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i64m8(int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { - return __riscv_vsuxei32_v_i64m8(base, bindex, value, vl); +void test_vsuxei32_v_i64m8(int64_t *rs1, vuint32m4_t rs2, vint64m8_t vs3, size_t vl) { + return __riscv_vsuxei32_v_i64m8(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t value, size_t vl) { - return __riscv_vsuxei32_v_u8mf8(base, bindex, value, vl); +void test_vsuxei32_v_u8mf8(uint8_t *rs1, vuint32mf2_t rs2, vuint8mf8_t vs3, size_t vl) { + return __riscv_vsuxei32_v_u8mf8(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t value, size_t vl) { - return __riscv_vsuxei32_v_u8mf4(base, bindex, value, vl); +void test_vsuxei32_v_u8mf4(uint8_t *rs1, vuint32m1_t rs2, vuint8mf4_t vs3, size_t vl) { + return __riscv_vsuxei32_v_u8mf4(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t value, size_t vl) { - return __riscv_vsuxei32_v_u8mf2(base, bindex, value, vl); +void test_vsuxei32_v_u8mf2(uint8_t *rs1, vuint32m2_t rs2, vuint8mf2_t vs3, size_t vl) { + return __riscv_vsuxei32_v_u8mf2(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t value, size_t vl) { - return __riscv_vsuxei32_v_u8m1(base, bindex, value, vl); +void test_vsuxei32_v_u8m1(uint8_t *rs1, vuint32m4_t rs2, vuint8m1_t vs3, size_t vl) { + return __riscv_vsuxei32_v_u8m1(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t value, size_t vl) { - return __riscv_vsuxei32_v_u8m2(base, bindex, value, vl); +void test_vsuxei32_v_u8m2(uint8_t *rs1, vuint32m8_t rs2, vuint8m2_t vs3, size_t vl) { + return __riscv_vsuxei32_v_u8m2(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t value, size_t vl) { - return __riscv_vsuxei32_v_u16mf4(base, bindex, value, vl); +void test_vsuxei32_v_u16mf4(uint16_t *rs1, vuint32mf2_t rs2, vuint16mf4_t vs3, size_t vl) { + return __riscv_vsuxei32_v_u16mf4(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t value, size_t vl) { - return __riscv_vsuxei32_v_u16mf2(base, bindex, value, vl); +void test_vsuxei32_v_u16mf2(uint16_t *rs1, vuint32m1_t rs2, vuint16mf2_t vs3, size_t vl) { + return __riscv_vsuxei32_v_u16mf2(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t value, size_t vl) { - return __riscv_vsuxei32_v_u16m1(base, bindex, value, vl); +void test_vsuxei32_v_u16m1(uint16_t *rs1, vuint32m2_t rs2, vuint16m1_t vs3, size_t vl) { + return __riscv_vsuxei32_v_u16m1(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t value, size_t vl) { - return __riscv_vsuxei32_v_u16m2(base, bindex, value, vl); +void test_vsuxei32_v_u16m2(uint16_t *rs1, vuint32m4_t rs2, vuint16m2_t vs3, size_t vl) { + return __riscv_vsuxei32_v_u16m2(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u16m4(uint16_t *base, vuint32m8_t bindex, vuint16m4_t value, size_t vl) { - return __riscv_vsuxei32_v_u16m4(base, bindex, value, vl); +void test_vsuxei32_v_u16m4(uint16_t *rs1, vuint32m8_t rs2, vuint16m4_t vs3, size_t vl) { + return __riscv_vsuxei32_v_u16m4(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { - return __riscv_vsuxei32_v_u32mf2(base, bindex, value, vl); +void test_vsuxei32_v_u32mf2(uint32_t *rs1, vuint32mf2_t rs2, vuint32mf2_t vs3, size_t vl) { + return __riscv_vsuxei32_v_u32mf2(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { - return __riscv_vsuxei32_v_u32m1(base, bindex, value, vl); +void test_vsuxei32_v_u32m1(uint32_t *rs1, vuint32m1_t rs2, vuint32m1_t vs3, size_t vl) { + return __riscv_vsuxei32_v_u32m1(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { - return __riscv_vsuxei32_v_u32m2(base, bindex, value, vl); +void test_vsuxei32_v_u32m2(uint32_t *rs1, vuint32m2_t rs2, vuint32m2_t vs3, size_t vl) { + return __riscv_vsuxei32_v_u32m2(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u32m4(uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { - return __riscv_vsuxei32_v_u32m4(base, bindex, value, vl); +void test_vsuxei32_v_u32m4(uint32_t *rs1, vuint32m4_t rs2, vuint32m4_t vs3, size_t vl) { + return __riscv_vsuxei32_v_u32m4(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u32m8(uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { - return __riscv_vsuxei32_v_u32m8(base, bindex, value, vl); +void test_vsuxei32_v_u32m8(uint32_t *rs1, vuint32m8_t rs2, vuint32m8_t vs3, size_t vl) { + return __riscv_vsuxei32_v_u32m8(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { - return __riscv_vsuxei32_v_u64m1(base, bindex, value, vl); +void test_vsuxei32_v_u64m1(uint64_t *rs1, vuint32mf2_t rs2, vuint64m1_t vs3, size_t vl) { + return __riscv_vsuxei32_v_u64m1(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { - return __riscv_vsuxei32_v_u64m2(base, bindex, value, vl); +void test_vsuxei32_v_u64m2(uint64_t *rs1, vuint32m1_t rs2, vuint64m2_t vs3, size_t vl) { + return __riscv_vsuxei32_v_u64m2(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u64m4(uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { - return __riscv_vsuxei32_v_u64m4(base, bindex, value, vl); +void test_vsuxei32_v_u64m4(uint64_t *rs1, vuint32m2_t rs2, vuint64m4_t vs3, size_t vl) { + return __riscv_vsuxei32_v_u64m4(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u64m8(uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { - return __riscv_vsuxei32_v_u64m8(base, bindex, value, vl); +void test_vsuxei32_v_u64m8(uint64_t *rs1, vuint32m4_t rs2, vuint64m8_t vs3, size_t vl) { + return __riscv_vsuxei32_v_u64m8(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f16mf4_m(vbool64_t mask, float16_t *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) { - return __riscv_vsuxei32_v_f16mf4_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_f16mf4_m(vbool64_t vm, float16_t *rs1, vuint32mf2_t rs2, vfloat16mf4_t vs3, size_t vl) { + return __riscv_vsuxei32_v_f16mf4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f16mf2_m(vbool32_t mask, float16_t *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) { - return __riscv_vsuxei32_v_f16mf2_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_f16mf2_m(vbool32_t vm, float16_t *rs1, vuint32m1_t rs2, vfloat16mf2_t vs3, size_t vl) { + return __riscv_vsuxei32_v_f16mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f16m1_m(vbool16_t mask, float16_t *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) { - return __riscv_vsuxei32_v_f16m1_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_f16m1_m(vbool16_t vm, float16_t *rs1, vuint32m2_t rs2, vfloat16m1_t vs3, size_t vl) { + return __riscv_vsuxei32_v_f16m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f16m2_m(vbool8_t mask, float16_t *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) { - return __riscv_vsuxei32_v_f16m2_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_f16m2_m(vbool8_t vm, float16_t *rs1, vuint32m4_t rs2, vfloat16m2_t vs3, size_t vl) { + return __riscv_vsuxei32_v_f16m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f16m4_m(vbool4_t mask, float16_t *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) { - return __riscv_vsuxei32_v_f16m4_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_f16m4_m(vbool4_t vm, float16_t *rs1, vuint32m8_t rs2, vfloat16m4_t vs3, size_t vl) { + return __riscv_vsuxei32_v_f16m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f32mf2_m(vbool64_t mask, float32_t *base, vuint32mf2_t bindex, vfloat32mf2_t value, size_t vl) { - return __riscv_vsuxei32_v_f32mf2_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_f32mf2_m(vbool64_t vm, float32_t *rs1, vuint32mf2_t rs2, vfloat32mf2_t vs3, size_t vl) { + return __riscv_vsuxei32_v_f32mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f32m1_m(vbool32_t mask, float32_t *base, vuint32m1_t bindex, vfloat32m1_t value, size_t vl) { - return __riscv_vsuxei32_v_f32m1_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_f32m1_m(vbool32_t vm, float32_t *rs1, vuint32m1_t rs2, vfloat32m1_t vs3, size_t vl) { + return __riscv_vsuxei32_v_f32m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f32m2_m(vbool16_t mask, float32_t *base, vuint32m2_t bindex, vfloat32m2_t value, size_t vl) { - return __riscv_vsuxei32_v_f32m2_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_f32m2_m(vbool16_t vm, float32_t *rs1, vuint32m2_t rs2, vfloat32m2_t vs3, size_t vl) { + return __riscv_vsuxei32_v_f32m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f32m4_m(vbool8_t mask, float32_t *base, vuint32m4_t bindex, vfloat32m4_t value, size_t vl) { - return __riscv_vsuxei32_v_f32m4_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_f32m4_m(vbool8_t vm, float32_t *rs1, vuint32m4_t rs2, vfloat32m4_t vs3, size_t vl) { + return __riscv_vsuxei32_v_f32m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f32m8_m(vbool4_t mask, float32_t *base, vuint32m8_t bindex, vfloat32m8_t value, size_t vl) { - return __riscv_vsuxei32_v_f32m8_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_f32m8_m(vbool4_t vm, float32_t *rs1, vuint32m8_t rs2, vfloat32m8_t vs3, size_t vl) { + return __riscv_vsuxei32_v_f32m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f64m1_m(vbool64_t mask, float64_t *base, vuint32mf2_t bindex, vfloat64m1_t value, size_t vl) { - return __riscv_vsuxei32_v_f64m1_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_f64m1_m(vbool64_t vm, float64_t *rs1, vuint32mf2_t rs2, vfloat64m1_t vs3, size_t vl) { + return __riscv_vsuxei32_v_f64m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f64m2_m(vbool32_t mask, float64_t *base, vuint32m1_t bindex, vfloat64m2_t value, size_t vl) { - return __riscv_vsuxei32_v_f64m2_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_f64m2_m(vbool32_t vm, float64_t *rs1, vuint32m1_t rs2, vfloat64m2_t vs3, size_t vl) { + return __riscv_vsuxei32_v_f64m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f64m4_m(vbool16_t mask, float64_t *base, vuint32m2_t bindex, vfloat64m4_t value, size_t vl) { - return __riscv_vsuxei32_v_f64m4_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_f64m4_m(vbool16_t vm, float64_t *rs1, vuint32m2_t rs2, vfloat64m4_t vs3, size_t vl) { + return __riscv_vsuxei32_v_f64m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f64m8_m(vbool8_t mask, float64_t *base, vuint32m4_t bindex, vfloat64m8_t value, size_t vl) { - return __riscv_vsuxei32_v_f64m8_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_f64m8_m(vbool8_t vm, float64_t *rs1, vuint32m4_t rs2, vfloat64m8_t vs3, size_t vl) { + return __riscv_vsuxei32_v_f64m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t value, size_t vl) { - return __riscv_vsuxei32_v_i8mf8_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_i8mf8_m(vbool64_t vm, int8_t *rs1, vuint32mf2_t rs2, vint8mf8_t vs3, size_t vl) { + return __riscv_vsuxei32_v_i8mf8_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t value, size_t vl) { - return __riscv_vsuxei32_v_i8mf4_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_i8mf4_m(vbool32_t vm, int8_t *rs1, vuint32m1_t rs2, vint8mf4_t vs3, size_t vl) { + return __riscv_vsuxei32_v_i8mf4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t value, size_t vl) { - return __riscv_vsuxei32_v_i8mf2_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_i8mf2_m(vbool16_t vm, int8_t *rs1, vuint32m2_t rs2, vint8mf2_t vs3, size_t vl) { + return __riscv_vsuxei32_v_i8mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t value, size_t vl) { - return __riscv_vsuxei32_v_i8m1_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_i8m1_m(vbool8_t vm, int8_t *rs1, vuint32m4_t rs2, vint8m1_t vs3, size_t vl) { + return __riscv_vsuxei32_v_i8m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t value, size_t vl) { - return __riscv_vsuxei32_v_i8m2_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_i8m2_m(vbool4_t vm, int8_t *rs1, vuint32m8_t rs2, vint8m2_t vs3, size_t vl) { + return __riscv_vsuxei32_v_i8m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t value, size_t vl) { - return __riscv_vsuxei32_v_i16mf4_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_i16mf4_m(vbool64_t vm, int16_t *rs1, vuint32mf2_t rs2, vint16mf4_t vs3, size_t vl) { + return __riscv_vsuxei32_v_i16mf4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t value, size_t vl) { - return __riscv_vsuxei32_v_i16mf2_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_i16mf2_m(vbool32_t vm, int16_t *rs1, vuint32m1_t rs2, vint16mf2_t vs3, size_t vl) { + return __riscv_vsuxei32_v_i16mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t value, size_t vl) { - return __riscv_vsuxei32_v_i16m1_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_i16m1_m(vbool16_t vm, int16_t *rs1, vuint32m2_t rs2, vint16m1_t vs3, size_t vl) { + return __riscv_vsuxei32_v_i16m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t value, size_t vl) { - return __riscv_vsuxei32_v_i16m2_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_i16m2_m(vbool8_t vm, int16_t *rs1, vuint32m4_t rs2, vint16m2_t vs3, size_t vl) { + return __riscv_vsuxei32_v_i16m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i16m4_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4_t value, size_t vl) { - return __riscv_vsuxei32_v_i16m4_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_i16m4_m(vbool4_t vm, int16_t *rs1, vuint32m8_t rs2, vint16m4_t vs3, size_t vl) { + return __riscv_vsuxei32_v_i16m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { - return __riscv_vsuxei32_v_i32mf2_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_i32mf2_m(vbool64_t vm, int32_t *rs1, vuint32mf2_t rs2, vint32mf2_t vs3, size_t vl) { + return __riscv_vsuxei32_v_i32mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { - return __riscv_vsuxei32_v_i32m1_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_i32m1_m(vbool32_t vm, int32_t *rs1, vuint32m1_t rs2, vint32m1_t vs3, size_t vl) { + return __riscv_vsuxei32_v_i32m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { - return __riscv_vsuxei32_v_i32m2_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_i32m2_m(vbool16_t vm, int32_t *rs1, vuint32m2_t rs2, vint32m2_t vs3, size_t vl) { + return __riscv_vsuxei32_v_i32m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i32m4_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { - return __riscv_vsuxei32_v_i32m4_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_i32m4_m(vbool8_t vm, int32_t *rs1, vuint32m4_t rs2, vint32m4_t vs3, size_t vl) { + return __riscv_vsuxei32_v_i32m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i32m8_m(vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { - return __riscv_vsuxei32_v_i32m8_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_i32m8_m(vbool4_t vm, int32_t *rs1, vuint32m8_t rs2, vint32m8_t vs3, size_t vl) { + return __riscv_vsuxei32_v_i32m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { - return __riscv_vsuxei32_v_i64m1_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_i64m1_m(vbool64_t vm, int64_t *rs1, vuint32mf2_t rs2, vint64m1_t vs3, size_t vl) { + return __riscv_vsuxei32_v_i64m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { - return __riscv_vsuxei32_v_i64m2_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_i64m2_m(vbool32_t vm, int64_t *rs1, vuint32m1_t rs2, vint64m2_t vs3, size_t vl) { + return __riscv_vsuxei32_v_i64m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i64m4_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { - return __riscv_vsuxei32_v_i64m4_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_i64m4_m(vbool16_t vm, int64_t *rs1, vuint32m2_t rs2, vint64m4_t vs3, size_t vl) { + return __riscv_vsuxei32_v_i64m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i64m8_m(vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { - return __riscv_vsuxei32_v_i64m8_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_i64m8_m(vbool8_t vm, int64_t *rs1, vuint32m4_t rs2, vint64m8_t vs3, size_t vl) { + return __riscv_vsuxei32_v_i64m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t value, size_t vl) { - return __riscv_vsuxei32_v_u8mf8_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_u8mf8_m(vbool64_t vm, uint8_t *rs1, vuint32mf2_t rs2, vuint8mf8_t vs3, size_t vl) { + return __riscv_vsuxei32_v_u8mf8_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t value, size_t vl) { - return __riscv_vsuxei32_v_u8mf4_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_u8mf4_m(vbool32_t vm, uint8_t *rs1, vuint32m1_t rs2, vuint8mf4_t vs3, size_t vl) { + return __riscv_vsuxei32_v_u8mf4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t value, size_t vl) { - return __riscv_vsuxei32_v_u8mf2_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_u8mf2_m(vbool16_t vm, uint8_t *rs1, vuint32m2_t rs2, vuint8mf2_t vs3, size_t vl) { + return __riscv_vsuxei32_v_u8mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t value, size_t vl) { - return __riscv_vsuxei32_v_u8m1_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_u8m1_m(vbool8_t vm, uint8_t *rs1, vuint32m4_t rs2, vuint8m1_t vs3, size_t vl) { + return __riscv_vsuxei32_v_u8m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t value, size_t vl) { - return __riscv_vsuxei32_v_u8m2_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_u8m2_m(vbool4_t vm, uint8_t *rs1, vuint32m8_t rs2, vuint8m2_t vs3, size_t vl) { + return __riscv_vsuxei32_v_u8m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t value, size_t vl) { - return __riscv_vsuxei32_v_u16mf4_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_u16mf4_m(vbool64_t vm, uint16_t *rs1, vuint32mf2_t rs2, vuint16mf4_t vs3, size_t vl) { + return __riscv_vsuxei32_v_u16mf4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t value, size_t vl) { - return __riscv_vsuxei32_v_u16mf2_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_u16mf2_m(vbool32_t vm, uint16_t *rs1, vuint32m1_t rs2, vuint16mf2_t vs3, size_t vl) { + return __riscv_vsuxei32_v_u16mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t value, size_t vl) { - return __riscv_vsuxei32_v_u16m1_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_u16m1_m(vbool16_t vm, uint16_t *rs1, vuint32m2_t rs2, vuint16m1_t vs3, size_t vl) { + return __riscv_vsuxei32_v_u16m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t value, size_t vl) { - return __riscv_vsuxei32_v_u16m2_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_u16m2_m(vbool8_t vm, uint16_t *rs1, vuint32m4_t rs2, vuint16m2_t vs3, size_t vl) { + return __riscv_vsuxei32_v_u16m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4_t value, size_t vl) { - return __riscv_vsuxei32_v_u16m4_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_u16m4_m(vbool4_t vm, uint16_t *rs1, vuint32m8_t rs2, vuint16m4_t vs3, size_t vl) { + return __riscv_vsuxei32_v_u16m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { - return __riscv_vsuxei32_v_u32mf2_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_u32mf2_m(vbool64_t vm, uint32_t *rs1, vuint32mf2_t rs2, vuint32mf2_t vs3, size_t vl) { + return __riscv_vsuxei32_v_u32mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { - return __riscv_vsuxei32_v_u32m1_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_u32m1_m(vbool32_t vm, uint32_t *rs1, vuint32m1_t rs2, vuint32m1_t vs3, size_t vl) { + return __riscv_vsuxei32_v_u32m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { - return __riscv_vsuxei32_v_u32m2_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_u32m2_m(vbool16_t vm, uint32_t *rs1, vuint32m2_t rs2, vuint32m2_t vs3, size_t vl) { + return __riscv_vsuxei32_v_u32m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { - return __riscv_vsuxei32_v_u32m4_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_u32m4_m(vbool8_t vm, uint32_t *rs1, vuint32m4_t rs2, vuint32m4_t vs3, size_t vl) { + return __riscv_vsuxei32_v_u32m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { - return __riscv_vsuxei32_v_u32m8_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_u32m8_m(vbool4_t vm, uint32_t *rs1, vuint32m8_t rs2, vuint32m8_t vs3, size_t vl) { + return __riscv_vsuxei32_v_u32m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { - return __riscv_vsuxei32_v_u64m1_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_u64m1_m(vbool64_t vm, uint64_t *rs1, vuint32mf2_t rs2, vuint64m1_t vs3, size_t vl) { + return __riscv_vsuxei32_v_u64m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { - return __riscv_vsuxei32_v_u64m2_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_u64m2_m(vbool32_t vm, uint64_t *rs1, vuint32m1_t rs2, vuint64m2_t vs3, size_t vl) { + return __riscv_vsuxei32_v_u64m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { - return __riscv_vsuxei32_v_u64m4_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_u64m4_m(vbool16_t vm, uint64_t *rs1, vuint32m2_t rs2, vuint64m4_t vs3, size_t vl) { + return __riscv_vsuxei32_v_u64m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { - return __riscv_vsuxei32_v_u64m8_m(mask, base, bindex, value, vl); +void test_vsuxei32_v_u64m8_m(vbool8_t vm, uint64_t *rs1, vuint32m4_t rs2, vuint64m8_t vs3, size_t vl) { + return __riscv_vsuxei32_v_u64m8_m(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxei32\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/gnu-api-tests/vsuxei64.c b/auto-generated/gnu-api-tests/vsuxei64.c index ef39c1e5d..bb685471d 100644 --- a/auto-generated/gnu-api-tests/vsuxei64.c +++ b/auto-generated/gnu-api-tests/vsuxei64.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxei64_v_f16mf4(float16_t *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) { - return __riscv_vsuxei64_v_f16mf4(base, bindex, value, vl); +void test_vsuxei64_v_f16mf4(float16_t *rs1, vuint64m1_t rs2, vfloat16mf4_t vs3, size_t vl) { + return __riscv_vsuxei64_v_f16mf4(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_f16mf2(float16_t *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) { - return __riscv_vsuxei64_v_f16mf2(base, bindex, value, vl); +void test_vsuxei64_v_f16mf2(float16_t *rs1, vuint64m2_t rs2, vfloat16mf2_t vs3, size_t vl) { + return __riscv_vsuxei64_v_f16mf2(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_f16m1(float16_t *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) { - return __riscv_vsuxei64_v_f16m1(base, bindex, value, vl); +void test_vsuxei64_v_f16m1(float16_t *rs1, vuint64m4_t rs2, vfloat16m1_t vs3, size_t vl) { + return __riscv_vsuxei64_v_f16m1(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_f16m2(float16_t *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) { - return __riscv_vsuxei64_v_f16m2(base, bindex, value, vl); +void test_vsuxei64_v_f16m2(float16_t *rs1, vuint64m8_t rs2, vfloat16m2_t vs3, size_t vl) { + return __riscv_vsuxei64_v_f16m2(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_f32mf2(float32_t *base, vuint64m1_t bindex, vfloat32mf2_t value, size_t vl) { - return __riscv_vsuxei64_v_f32mf2(base, bindex, value, vl); +void test_vsuxei64_v_f32mf2(float32_t *rs1, vuint64m1_t rs2, vfloat32mf2_t vs3, size_t vl) { + return __riscv_vsuxei64_v_f32mf2(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_f32m1(float32_t *base, vuint64m2_t bindex, vfloat32m1_t value, size_t vl) { - return __riscv_vsuxei64_v_f32m1(base, bindex, value, vl); +void test_vsuxei64_v_f32m1(float32_t *rs1, vuint64m2_t rs2, vfloat32m1_t vs3, size_t vl) { + return __riscv_vsuxei64_v_f32m1(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_f32m2(float32_t *base, vuint64m4_t bindex, vfloat32m2_t value, size_t vl) { - return __riscv_vsuxei64_v_f32m2(base, bindex, value, vl); +void test_vsuxei64_v_f32m2(float32_t *rs1, vuint64m4_t rs2, vfloat32m2_t vs3, size_t vl) { + return __riscv_vsuxei64_v_f32m2(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_f32m4(float32_t *base, vuint64m8_t bindex, vfloat32m4_t value, size_t vl) { - return __riscv_vsuxei64_v_f32m4(base, bindex, value, vl); +void test_vsuxei64_v_f32m4(float32_t *rs1, vuint64m8_t rs2, vfloat32m4_t vs3, size_t vl) { + return __riscv_vsuxei64_v_f32m4(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_f64m1(float64_t *base, vuint64m1_t bindex, vfloat64m1_t value, size_t vl) { - return __riscv_vsuxei64_v_f64m1(base, bindex, value, vl); +void test_vsuxei64_v_f64m1(float64_t *rs1, vuint64m1_t rs2, vfloat64m1_t vs3, size_t vl) { + return __riscv_vsuxei64_v_f64m1(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_f64m2(float64_t *base, vuint64m2_t bindex, vfloat64m2_t value, size_t vl) { - return __riscv_vsuxei64_v_f64m2(base, bindex, value, vl); +void test_vsuxei64_v_f64m2(float64_t *rs1, vuint64m2_t rs2, vfloat64m2_t vs3, size_t vl) { + return __riscv_vsuxei64_v_f64m2(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_f64m4(float64_t *base, vuint64m4_t bindex, vfloat64m4_t value, size_t vl) { - return __riscv_vsuxei64_v_f64m4(base, bindex, value, vl); +void test_vsuxei64_v_f64m4(float64_t *rs1, vuint64m4_t rs2, vfloat64m4_t vs3, size_t vl) { + return __riscv_vsuxei64_v_f64m4(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_f64m8(float64_t *base, vuint64m8_t bindex, vfloat64m8_t value, size_t vl) { - return __riscv_vsuxei64_v_f64m8(base, bindex, value, vl); +void test_vsuxei64_v_f64m8(float64_t *rs1, vuint64m8_t rs2, vfloat64m8_t vs3, size_t vl) { + return __riscv_vsuxei64_v_f64m8(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t value, size_t vl) { - return __riscv_vsuxei64_v_i8mf8(base, bindex, value, vl); +void test_vsuxei64_v_i8mf8(int8_t *rs1, vuint64m1_t rs2, vint8mf8_t vs3, size_t vl) { + return __riscv_vsuxei64_v_i8mf8(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t value, size_t vl) { - return __riscv_vsuxei64_v_i8mf4(base, bindex, value, vl); +void test_vsuxei64_v_i8mf4(int8_t *rs1, vuint64m2_t rs2, vint8mf4_t vs3, size_t vl) { + return __riscv_vsuxei64_v_i8mf4(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t value, size_t vl) { - return __riscv_vsuxei64_v_i8mf2(base, bindex, value, vl); +void test_vsuxei64_v_i8mf2(int8_t *rs1, vuint64m4_t rs2, vint8mf2_t vs3, size_t vl) { + return __riscv_vsuxei64_v_i8mf2(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t value, size_t vl) { - return __riscv_vsuxei64_v_i8m1(base, bindex, value, vl); +void test_vsuxei64_v_i8m1(int8_t *rs1, vuint64m8_t rs2, vint8m1_t vs3, size_t vl) { + return __riscv_vsuxei64_v_i8m1(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t value, size_t vl) { - return __riscv_vsuxei64_v_i16mf4(base, bindex, value, vl); +void test_vsuxei64_v_i16mf4(int16_t *rs1, vuint64m1_t rs2, vint16mf4_t vs3, size_t vl) { + return __riscv_vsuxei64_v_i16mf4(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t value, size_t vl) { - return __riscv_vsuxei64_v_i16mf2(base, bindex, value, vl); +void test_vsuxei64_v_i16mf2(int16_t *rs1, vuint64m2_t rs2, vint16mf2_t vs3, size_t vl) { + return __riscv_vsuxei64_v_i16mf2(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t value, size_t vl) { - return __riscv_vsuxei64_v_i16m1(base, bindex, value, vl); +void test_vsuxei64_v_i16m1(int16_t *rs1, vuint64m4_t rs2, vint16m1_t vs3, size_t vl) { + return __riscv_vsuxei64_v_i16m1(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t value, size_t vl) { - return __riscv_vsuxei64_v_i16m2(base, bindex, value, vl); +void test_vsuxei64_v_i16m2(int16_t *rs1, vuint64m8_t rs2, vint16m2_t vs3, size_t vl) { + return __riscv_vsuxei64_v_i16m2(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { - return __riscv_vsuxei64_v_i32mf2(base, bindex, value, vl); +void test_vsuxei64_v_i32mf2(int32_t *rs1, vuint64m1_t rs2, vint32mf2_t vs3, size_t vl) { + return __riscv_vsuxei64_v_i32mf2(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { - return __riscv_vsuxei64_v_i32m1(base, bindex, value, vl); +void test_vsuxei64_v_i32m1(int32_t *rs1, vuint64m2_t rs2, vint32m1_t vs3, size_t vl) { + return __riscv_vsuxei64_v_i32m1(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { - return __riscv_vsuxei64_v_i32m2(base, bindex, value, vl); +void test_vsuxei64_v_i32m2(int32_t *rs1, vuint64m4_t rs2, vint32m2_t vs3, size_t vl) { + return __riscv_vsuxei64_v_i32m2(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i32m4(int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { - return __riscv_vsuxei64_v_i32m4(base, bindex, value, vl); +void test_vsuxei64_v_i32m4(int32_t *rs1, vuint64m8_t rs2, vint32m4_t vs3, size_t vl) { + return __riscv_vsuxei64_v_i32m4(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { - return __riscv_vsuxei64_v_i64m1(base, bindex, value, vl); +void test_vsuxei64_v_i64m1(int64_t *rs1, vuint64m1_t rs2, vint64m1_t vs3, size_t vl) { + return __riscv_vsuxei64_v_i64m1(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { - return __riscv_vsuxei64_v_i64m2(base, bindex, value, vl); +void test_vsuxei64_v_i64m2(int64_t *rs1, vuint64m2_t rs2, vint64m2_t vs3, size_t vl) { + return __riscv_vsuxei64_v_i64m2(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i64m4(int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { - return __riscv_vsuxei64_v_i64m4(base, bindex, value, vl); +void test_vsuxei64_v_i64m4(int64_t *rs1, vuint64m4_t rs2, vint64m4_t vs3, size_t vl) { + return __riscv_vsuxei64_v_i64m4(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i64m8(int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { - return __riscv_vsuxei64_v_i64m8(base, bindex, value, vl); +void test_vsuxei64_v_i64m8(int64_t *rs1, vuint64m8_t rs2, vint64m8_t vs3, size_t vl) { + return __riscv_vsuxei64_v_i64m8(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value, size_t vl) { - return __riscv_vsuxei64_v_u8mf8(base, bindex, value, vl); +void test_vsuxei64_v_u8mf8(uint8_t *rs1, vuint64m1_t rs2, vuint8mf8_t vs3, size_t vl) { + return __riscv_vsuxei64_v_u8mf8(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value, size_t vl) { - return __riscv_vsuxei64_v_u8mf4(base, bindex, value, vl); +void test_vsuxei64_v_u8mf4(uint8_t *rs1, vuint64m2_t rs2, vuint8mf4_t vs3, size_t vl) { + return __riscv_vsuxei64_v_u8mf4(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value, size_t vl) { - return __riscv_vsuxei64_v_u8mf2(base, bindex, value, vl); +void test_vsuxei64_v_u8mf2(uint8_t *rs1, vuint64m4_t rs2, vuint8mf2_t vs3, size_t vl) { + return __riscv_vsuxei64_v_u8mf2(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t value, size_t vl) { - return __riscv_vsuxei64_v_u8m1(base, bindex, value, vl); +void test_vsuxei64_v_u8m1(uint8_t *rs1, vuint64m8_t rs2, vuint8m1_t vs3, size_t vl) { + return __riscv_vsuxei64_v_u8m1(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t value, size_t vl) { - return __riscv_vsuxei64_v_u16mf4(base, bindex, value, vl); +void test_vsuxei64_v_u16mf4(uint16_t *rs1, vuint64m1_t rs2, vuint16mf4_t vs3, size_t vl) { + return __riscv_vsuxei64_v_u16mf4(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t value, size_t vl) { - return __riscv_vsuxei64_v_u16mf2(base, bindex, value, vl); +void test_vsuxei64_v_u16mf2(uint16_t *rs1, vuint64m2_t rs2, vuint16mf2_t vs3, size_t vl) { + return __riscv_vsuxei64_v_u16mf2(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t value, size_t vl) { - return __riscv_vsuxei64_v_u16m1(base, bindex, value, vl); +void test_vsuxei64_v_u16m1(uint16_t *rs1, vuint64m4_t rs2, vuint16m1_t vs3, size_t vl) { + return __riscv_vsuxei64_v_u16m1(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t value, size_t vl) { - return __riscv_vsuxei64_v_u16m2(base, bindex, value, vl); +void test_vsuxei64_v_u16m2(uint16_t *rs1, vuint64m8_t rs2, vuint16m2_t vs3, size_t vl) { + return __riscv_vsuxei64_v_u16m2(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { - return __riscv_vsuxei64_v_u32mf2(base, bindex, value, vl); +void test_vsuxei64_v_u32mf2(uint32_t *rs1, vuint64m1_t rs2, vuint32mf2_t vs3, size_t vl) { + return __riscv_vsuxei64_v_u32mf2(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { - return __riscv_vsuxei64_v_u32m1(base, bindex, value, vl); +void test_vsuxei64_v_u32m1(uint32_t *rs1, vuint64m2_t rs2, vuint32m1_t vs3, size_t vl) { + return __riscv_vsuxei64_v_u32m1(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { - return __riscv_vsuxei64_v_u32m2(base, bindex, value, vl); +void test_vsuxei64_v_u32m2(uint32_t *rs1, vuint64m4_t rs2, vuint32m2_t vs3, size_t vl) { + return __riscv_vsuxei64_v_u32m2(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u32m4(uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { - return __riscv_vsuxei64_v_u32m4(base, bindex, value, vl); +void test_vsuxei64_v_u32m4(uint32_t *rs1, vuint64m8_t rs2, vuint32m4_t vs3, size_t vl) { + return __riscv_vsuxei64_v_u32m4(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { - return __riscv_vsuxei64_v_u64m1(base, bindex, value, vl); +void test_vsuxei64_v_u64m1(uint64_t *rs1, vuint64m1_t rs2, vuint64m1_t vs3, size_t vl) { + return __riscv_vsuxei64_v_u64m1(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { - return __riscv_vsuxei64_v_u64m2(base, bindex, value, vl); +void test_vsuxei64_v_u64m2(uint64_t *rs1, vuint64m2_t rs2, vuint64m2_t vs3, size_t vl) { + return __riscv_vsuxei64_v_u64m2(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u64m4(uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { - return __riscv_vsuxei64_v_u64m4(base, bindex, value, vl); +void test_vsuxei64_v_u64m4(uint64_t *rs1, vuint64m4_t rs2, vuint64m4_t vs3, size_t vl) { + return __riscv_vsuxei64_v_u64m4(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u64m8(uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { - return __riscv_vsuxei64_v_u64m8(base, bindex, value, vl); +void test_vsuxei64_v_u64m8(uint64_t *rs1, vuint64m8_t rs2, vuint64m8_t vs3, size_t vl) { + return __riscv_vsuxei64_v_u64m8(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_f16mf4_m(vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) { - return __riscv_vsuxei64_v_f16mf4_m(mask, base, bindex, value, vl); +void test_vsuxei64_v_f16mf4_m(vbool64_t vm, float16_t *rs1, vuint64m1_t rs2, vfloat16mf4_t vs3, size_t vl) { + return __riscv_vsuxei64_v_f16mf4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_f16mf2_m(vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) { - return __riscv_vsuxei64_v_f16mf2_m(mask, base, bindex, value, vl); +void test_vsuxei64_v_f16mf2_m(vbool32_t vm, float16_t *rs1, vuint64m2_t rs2, vfloat16mf2_t vs3, size_t vl) { + return __riscv_vsuxei64_v_f16mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_f16m1_m(vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) { - return __riscv_vsuxei64_v_f16m1_m(mask, base, bindex, value, vl); +void test_vsuxei64_v_f16m1_m(vbool16_t vm, float16_t *rs1, vuint64m4_t rs2, vfloat16m1_t vs3, size_t vl) { + return __riscv_vsuxei64_v_f16m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_f16m2_m(vbool8_t mask, float16_t *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) { - return __riscv_vsuxei64_v_f16m2_m(mask, base, bindex, value, vl); +void test_vsuxei64_v_f16m2_m(vbool8_t vm, float16_t *rs1, vuint64m8_t rs2, vfloat16m2_t vs3, size_t vl) { + return __riscv_vsuxei64_v_f16m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_f32mf2_m(vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2_t value, size_t vl) { - return __riscv_vsuxei64_v_f32mf2_m(mask, base, bindex, value, vl); +void test_vsuxei64_v_f32mf2_m(vbool64_t vm, float32_t *rs1, vuint64m1_t rs2, vfloat32mf2_t vs3, size_t vl) { + return __riscv_vsuxei64_v_f32mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_f32m1_m(vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1_t value, size_t vl) { - return __riscv_vsuxei64_v_f32m1_m(mask, base, bindex, value, vl); +void test_vsuxei64_v_f32m1_m(vbool32_t vm, float32_t *rs1, vuint64m2_t rs2, vfloat32m1_t vs3, size_t vl) { + return __riscv_vsuxei64_v_f32m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_f32m2_m(vbool16_t mask, float32_t *base, vuint64m4_t bindex, vfloat32m2_t value, size_t vl) { - return __riscv_vsuxei64_v_f32m2_m(mask, base, bindex, value, vl); +void test_vsuxei64_v_f32m2_m(vbool16_t vm, float32_t *rs1, vuint64m4_t rs2, vfloat32m2_t vs3, size_t vl) { + return __riscv_vsuxei64_v_f32m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_f32m4_m(vbool8_t mask, float32_t *base, vuint64m8_t bindex, vfloat32m4_t value, size_t vl) { - return __riscv_vsuxei64_v_f32m4_m(mask, base, bindex, value, vl); +void test_vsuxei64_v_f32m4_m(vbool8_t vm, float32_t *rs1, vuint64m8_t rs2, vfloat32m4_t vs3, size_t vl) { + return __riscv_vsuxei64_v_f32m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_f64m1_m(vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1_t value, size_t vl) { - return __riscv_vsuxei64_v_f64m1_m(mask, base, bindex, value, vl); +void test_vsuxei64_v_f64m1_m(vbool64_t vm, float64_t *rs1, vuint64m1_t rs2, vfloat64m1_t vs3, size_t vl) { + return __riscv_vsuxei64_v_f64m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_f64m2_m(vbool32_t mask, float64_t *base, vuint64m2_t bindex, vfloat64m2_t value, size_t vl) { - return __riscv_vsuxei64_v_f64m2_m(mask, base, bindex, value, vl); +void test_vsuxei64_v_f64m2_m(vbool32_t vm, float64_t *rs1, vuint64m2_t rs2, vfloat64m2_t vs3, size_t vl) { + return __riscv_vsuxei64_v_f64m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_f64m4_m(vbool16_t mask, float64_t *base, vuint64m4_t bindex, vfloat64m4_t value, size_t vl) { - return __riscv_vsuxei64_v_f64m4_m(mask, base, bindex, value, vl); +void test_vsuxei64_v_f64m4_m(vbool16_t vm, float64_t *rs1, vuint64m4_t rs2, vfloat64m4_t vs3, size_t vl) { + return __riscv_vsuxei64_v_f64m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_f64m8_m(vbool8_t mask, float64_t *base, vuint64m8_t bindex, vfloat64m8_t value, size_t vl) { - return __riscv_vsuxei64_v_f64m8_m(mask, base, bindex, value, vl); +void test_vsuxei64_v_f64m8_m(vbool8_t vm, float64_t *rs1, vuint64m8_t rs2, vfloat64m8_t vs3, size_t vl) { + return __riscv_vsuxei64_v_f64m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t value, size_t vl) { - return __riscv_vsuxei64_v_i8mf8_m(mask, base, bindex, value, vl); +void test_vsuxei64_v_i8mf8_m(vbool64_t vm, int8_t *rs1, vuint64m1_t rs2, vint8mf8_t vs3, size_t vl) { + return __riscv_vsuxei64_v_i8mf8_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t value, size_t vl) { - return __riscv_vsuxei64_v_i8mf4_m(mask, base, bindex, value, vl); +void test_vsuxei64_v_i8mf4_m(vbool32_t vm, int8_t *rs1, vuint64m2_t rs2, vint8mf4_t vs3, size_t vl) { + return __riscv_vsuxei64_v_i8mf4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t value, size_t vl) { - return __riscv_vsuxei64_v_i8mf2_m(mask, base, bindex, value, vl); +void test_vsuxei64_v_i8mf2_m(vbool16_t vm, int8_t *rs1, vuint64m4_t rs2, vint8mf2_t vs3, size_t vl) { + return __riscv_vsuxei64_v_i8mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t value, size_t vl) { - return __riscv_vsuxei64_v_i8m1_m(mask, base, bindex, value, vl); +void test_vsuxei64_v_i8m1_m(vbool8_t vm, int8_t *rs1, vuint64m8_t rs2, vint8m1_t vs3, size_t vl) { + return __riscv_vsuxei64_v_i8m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t value, size_t vl) { - return __riscv_vsuxei64_v_i16mf4_m(mask, base, bindex, value, vl); +void test_vsuxei64_v_i16mf4_m(vbool64_t vm, int16_t *rs1, vuint64m1_t rs2, vint16mf4_t vs3, size_t vl) { + return __riscv_vsuxei64_v_i16mf4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t value, size_t vl) { - return __riscv_vsuxei64_v_i16mf2_m(mask, base, bindex, value, vl); +void test_vsuxei64_v_i16mf2_m(vbool32_t vm, int16_t *rs1, vuint64m2_t rs2, vint16mf2_t vs3, size_t vl) { + return __riscv_vsuxei64_v_i16mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t value, size_t vl) { - return __riscv_vsuxei64_v_i16m1_m(mask, base, bindex, value, vl); +void test_vsuxei64_v_i16m1_m(vbool16_t vm, int16_t *rs1, vuint64m4_t rs2, vint16m1_t vs3, size_t vl) { + return __riscv_vsuxei64_v_i16m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t value, size_t vl) { - return __riscv_vsuxei64_v_i16m2_m(mask, base, bindex, value, vl); +void test_vsuxei64_v_i16m2_m(vbool8_t vm, int16_t *rs1, vuint64m8_t rs2, vint16m2_t vs3, size_t vl) { + return __riscv_vsuxei64_v_i16m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { - return __riscv_vsuxei64_v_i32mf2_m(mask, base, bindex, value, vl); +void test_vsuxei64_v_i32mf2_m(vbool64_t vm, int32_t *rs1, vuint64m1_t rs2, vint32mf2_t vs3, size_t vl) { + return __riscv_vsuxei64_v_i32mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { - return __riscv_vsuxei64_v_i32m1_m(mask, base, bindex, value, vl); +void test_vsuxei64_v_i32m1_m(vbool32_t vm, int32_t *rs1, vuint64m2_t rs2, vint32m1_t vs3, size_t vl) { + return __riscv_vsuxei64_v_i32m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { - return __riscv_vsuxei64_v_i32m2_m(mask, base, bindex, value, vl); +void test_vsuxei64_v_i32m2_m(vbool16_t vm, int32_t *rs1, vuint64m4_t rs2, vint32m2_t vs3, size_t vl) { + return __riscv_vsuxei64_v_i32m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i32m4_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { - return __riscv_vsuxei64_v_i32m4_m(mask, base, bindex, value, vl); +void test_vsuxei64_v_i32m4_m(vbool8_t vm, int32_t *rs1, vuint64m8_t rs2, vint32m4_t vs3, size_t vl) { + return __riscv_vsuxei64_v_i32m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { - return __riscv_vsuxei64_v_i64m1_m(mask, base, bindex, value, vl); +void test_vsuxei64_v_i64m1_m(vbool64_t vm, int64_t *rs1, vuint64m1_t rs2, vint64m1_t vs3, size_t vl) { + return __riscv_vsuxei64_v_i64m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { - return __riscv_vsuxei64_v_i64m2_m(mask, base, bindex, value, vl); +void test_vsuxei64_v_i64m2_m(vbool32_t vm, int64_t *rs1, vuint64m2_t rs2, vint64m2_t vs3, size_t vl) { + return __riscv_vsuxei64_v_i64m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i64m4_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { - return __riscv_vsuxei64_v_i64m4_m(mask, base, bindex, value, vl); +void test_vsuxei64_v_i64m4_m(vbool16_t vm, int64_t *rs1, vuint64m4_t rs2, vint64m4_t vs3, size_t vl) { + return __riscv_vsuxei64_v_i64m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i64m8_m(vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { - return __riscv_vsuxei64_v_i64m8_m(mask, base, bindex, value, vl); +void test_vsuxei64_v_i64m8_m(vbool8_t vm, int64_t *rs1, vuint64m8_t rs2, vint64m8_t vs3, size_t vl) { + return __riscv_vsuxei64_v_i64m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value, size_t vl) { - return __riscv_vsuxei64_v_u8mf8_m(mask, base, bindex, value, vl); +void test_vsuxei64_v_u8mf8_m(vbool64_t vm, uint8_t *rs1, vuint64m1_t rs2, vuint8mf8_t vs3, size_t vl) { + return __riscv_vsuxei64_v_u8mf8_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value, size_t vl) { - return __riscv_vsuxei64_v_u8mf4_m(mask, base, bindex, value, vl); +void test_vsuxei64_v_u8mf4_m(vbool32_t vm, uint8_t *rs1, vuint64m2_t rs2, vuint8mf4_t vs3, size_t vl) { + return __riscv_vsuxei64_v_u8mf4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value, size_t vl) { - return __riscv_vsuxei64_v_u8mf2_m(mask, base, bindex, value, vl); +void test_vsuxei64_v_u8mf2_m(vbool16_t vm, uint8_t *rs1, vuint64m4_t rs2, vuint8mf2_t vs3, size_t vl) { + return __riscv_vsuxei64_v_u8mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t value, size_t vl) { - return __riscv_vsuxei64_v_u8m1_m(mask, base, bindex, value, vl); +void test_vsuxei64_v_u8m1_m(vbool8_t vm, uint8_t *rs1, vuint64m8_t rs2, vuint8m1_t vs3, size_t vl) { + return __riscv_vsuxei64_v_u8m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t value, size_t vl) { - return __riscv_vsuxei64_v_u16mf4_m(mask, base, bindex, value, vl); +void test_vsuxei64_v_u16mf4_m(vbool64_t vm, uint16_t *rs1, vuint64m1_t rs2, vuint16mf4_t vs3, size_t vl) { + return __riscv_vsuxei64_v_u16mf4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t value, size_t vl) { - return __riscv_vsuxei64_v_u16mf2_m(mask, base, bindex, value, vl); +void test_vsuxei64_v_u16mf2_m(vbool32_t vm, uint16_t *rs1, vuint64m2_t rs2, vuint16mf2_t vs3, size_t vl) { + return __riscv_vsuxei64_v_u16mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t value, size_t vl) { - return __riscv_vsuxei64_v_u16m1_m(mask, base, bindex, value, vl); +void test_vsuxei64_v_u16m1_m(vbool16_t vm, uint16_t *rs1, vuint64m4_t rs2, vuint16m1_t vs3, size_t vl) { + return __riscv_vsuxei64_v_u16m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t value, size_t vl) { - return __riscv_vsuxei64_v_u16m2_m(mask, base, bindex, value, vl); +void test_vsuxei64_v_u16m2_m(vbool8_t vm, uint16_t *rs1, vuint64m8_t rs2, vuint16m2_t vs3, size_t vl) { + return __riscv_vsuxei64_v_u16m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { - return __riscv_vsuxei64_v_u32mf2_m(mask, base, bindex, value, vl); +void test_vsuxei64_v_u32mf2_m(vbool64_t vm, uint32_t *rs1, vuint64m1_t rs2, vuint32mf2_t vs3, size_t vl) { + return __riscv_vsuxei64_v_u32mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { - return __riscv_vsuxei64_v_u32m1_m(mask, base, bindex, value, vl); +void test_vsuxei64_v_u32m1_m(vbool32_t vm, uint32_t *rs1, vuint64m2_t rs2, vuint32m1_t vs3, size_t vl) { + return __riscv_vsuxei64_v_u32m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { - return __riscv_vsuxei64_v_u32m2_m(mask, base, bindex, value, vl); +void test_vsuxei64_v_u32m2_m(vbool16_t vm, uint32_t *rs1, vuint64m4_t rs2, vuint32m2_t vs3, size_t vl) { + return __riscv_vsuxei64_v_u32m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { - return __riscv_vsuxei64_v_u32m4_m(mask, base, bindex, value, vl); +void test_vsuxei64_v_u32m4_m(vbool8_t vm, uint32_t *rs1, vuint64m8_t rs2, vuint32m4_t vs3, size_t vl) { + return __riscv_vsuxei64_v_u32m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { - return __riscv_vsuxei64_v_u64m1_m(mask, base, bindex, value, vl); +void test_vsuxei64_v_u64m1_m(vbool64_t vm, uint64_t *rs1, vuint64m1_t rs2, vuint64m1_t vs3, size_t vl) { + return __riscv_vsuxei64_v_u64m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { - return __riscv_vsuxei64_v_u64m2_m(mask, base, bindex, value, vl); +void test_vsuxei64_v_u64m2_m(vbool32_t vm, uint64_t *rs1, vuint64m2_t rs2, vuint64m2_t vs3, size_t vl) { + return __riscv_vsuxei64_v_u64m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { - return __riscv_vsuxei64_v_u64m4_m(mask, base, bindex, value, vl); +void test_vsuxei64_v_u64m4_m(vbool16_t vm, uint64_t *rs1, vuint64m4_t rs2, vuint64m4_t vs3, size_t vl) { + return __riscv_vsuxei64_v_u64m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { - return __riscv_vsuxei64_v_u64m8_m(mask, base, bindex, value, vl); +void test_vsuxei64_v_u64m8_m(vbool8_t vm, uint64_t *rs1, vuint64m8_t rs2, vuint64m8_t vs3, size_t vl) { + return __riscv_vsuxei64_v_u64m8_m(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxei64\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-api-tests/vsuxei8.c b/auto-generated/gnu-api-tests/vsuxei8.c index ebb34c86b..5ecf03df4 100644 --- a/auto-generated/gnu-api-tests/vsuxei8.c +++ b/auto-generated/gnu-api-tests/vsuxei8.c @@ -6,476 +6,476 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxei8_v_f16mf4(float16_t *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) { - return __riscv_vsuxei8_v_f16mf4(base, bindex, value, vl); +void test_vsuxei8_v_f16mf4(float16_t *rs1, vuint8mf8_t rs2, vfloat16mf4_t vs3, size_t vl) { + return __riscv_vsuxei8_v_f16mf4(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f16mf2(float16_t *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) { - return __riscv_vsuxei8_v_f16mf2(base, bindex, value, vl); +void test_vsuxei8_v_f16mf2(float16_t *rs1, vuint8mf4_t rs2, vfloat16mf2_t vs3, size_t vl) { + return __riscv_vsuxei8_v_f16mf2(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f16m1(float16_t *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) { - return __riscv_vsuxei8_v_f16m1(base, bindex, value, vl); +void test_vsuxei8_v_f16m1(float16_t *rs1, vuint8mf2_t rs2, vfloat16m1_t vs3, size_t vl) { + return __riscv_vsuxei8_v_f16m1(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f16m2(float16_t *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) { - return __riscv_vsuxei8_v_f16m2(base, bindex, value, vl); +void test_vsuxei8_v_f16m2(float16_t *rs1, vuint8m1_t rs2, vfloat16m2_t vs3, size_t vl) { + return __riscv_vsuxei8_v_f16m2(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f16m4(float16_t *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) { - return __riscv_vsuxei8_v_f16m4(base, bindex, value, vl); +void test_vsuxei8_v_f16m4(float16_t *rs1, vuint8m2_t rs2, vfloat16m4_t vs3, size_t vl) { + return __riscv_vsuxei8_v_f16m4(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f16m8(float16_t *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) { - return __riscv_vsuxei8_v_f16m8(base, bindex, value, vl); +void test_vsuxei8_v_f16m8(float16_t *rs1, vuint8m4_t rs2, vfloat16m8_t vs3, size_t vl) { + return __riscv_vsuxei8_v_f16m8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f32mf2(float32_t *base, vuint8mf8_t bindex, vfloat32mf2_t value, size_t vl) { - return __riscv_vsuxei8_v_f32mf2(base, bindex, value, vl); +void test_vsuxei8_v_f32mf2(float32_t *rs1, vuint8mf8_t rs2, vfloat32mf2_t vs3, size_t vl) { + return __riscv_vsuxei8_v_f32mf2(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f32m1(float32_t *base, vuint8mf4_t bindex, vfloat32m1_t value, size_t vl) { - return __riscv_vsuxei8_v_f32m1(base, bindex, value, vl); +void test_vsuxei8_v_f32m1(float32_t *rs1, vuint8mf4_t rs2, vfloat32m1_t vs3, size_t vl) { + return __riscv_vsuxei8_v_f32m1(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f32m2(float32_t *base, vuint8mf2_t bindex, vfloat32m2_t value, size_t vl) { - return __riscv_vsuxei8_v_f32m2(base, bindex, value, vl); +void test_vsuxei8_v_f32m2(float32_t *rs1, vuint8mf2_t rs2, vfloat32m2_t vs3, size_t vl) { + return __riscv_vsuxei8_v_f32m2(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f32m4(float32_t *base, vuint8m1_t bindex, vfloat32m4_t value, size_t vl) { - return __riscv_vsuxei8_v_f32m4(base, bindex, value, vl); +void test_vsuxei8_v_f32m4(float32_t *rs1, vuint8m1_t rs2, vfloat32m4_t vs3, size_t vl) { + return __riscv_vsuxei8_v_f32m4(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f32m8(float32_t *base, vuint8m2_t bindex, vfloat32m8_t value, size_t vl) { - return __riscv_vsuxei8_v_f32m8(base, bindex, value, vl); +void test_vsuxei8_v_f32m8(float32_t *rs1, vuint8m2_t rs2, vfloat32m8_t vs3, size_t vl) { + return __riscv_vsuxei8_v_f32m8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f64m1(float64_t *base, vuint8mf8_t bindex, vfloat64m1_t value, size_t vl) { - return __riscv_vsuxei8_v_f64m1(base, bindex, value, vl); +void test_vsuxei8_v_f64m1(float64_t *rs1, vuint8mf8_t rs2, vfloat64m1_t vs3, size_t vl) { + return __riscv_vsuxei8_v_f64m1(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f64m2(float64_t *base, vuint8mf4_t bindex, vfloat64m2_t value, size_t vl) { - return __riscv_vsuxei8_v_f64m2(base, bindex, value, vl); +void test_vsuxei8_v_f64m2(float64_t *rs1, vuint8mf4_t rs2, vfloat64m2_t vs3, size_t vl) { + return __riscv_vsuxei8_v_f64m2(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f64m4(float64_t *base, vuint8mf2_t bindex, vfloat64m4_t value, size_t vl) { - return __riscv_vsuxei8_v_f64m4(base, bindex, value, vl); +void test_vsuxei8_v_f64m4(float64_t *rs1, vuint8mf2_t rs2, vfloat64m4_t vs3, size_t vl) { + return __riscv_vsuxei8_v_f64m4(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f64m8(float64_t *base, vuint8m1_t bindex, vfloat64m8_t value, size_t vl) { - return __riscv_vsuxei8_v_f64m8(base, bindex, value, vl); +void test_vsuxei8_v_f64m8(float64_t *rs1, vuint8m1_t rs2, vfloat64m8_t vs3, size_t vl) { + return __riscv_vsuxei8_v_f64m8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t value, size_t vl) { - return __riscv_vsuxei8_v_i8mf8(base, bindex, value, vl); +void test_vsuxei8_v_i8mf8(int8_t *rs1, vuint8mf8_t rs2, vint8mf8_t vs3, size_t vl) { + return __riscv_vsuxei8_v_i8mf8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t value, size_t vl) { - return __riscv_vsuxei8_v_i8mf4(base, bindex, value, vl); +void test_vsuxei8_v_i8mf4(int8_t *rs1, vuint8mf4_t rs2, vint8mf4_t vs3, size_t vl) { + return __riscv_vsuxei8_v_i8mf4(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t value, size_t vl) { - return __riscv_vsuxei8_v_i8mf2(base, bindex, value, vl); +void test_vsuxei8_v_i8mf2(int8_t *rs1, vuint8mf2_t rs2, vint8mf2_t vs3, size_t vl) { + return __riscv_vsuxei8_v_i8mf2(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t value, size_t vl) { - return __riscv_vsuxei8_v_i8m1(base, bindex, value, vl); +void test_vsuxei8_v_i8m1(int8_t *rs1, vuint8m1_t rs2, vint8m1_t vs3, size_t vl) { + return __riscv_vsuxei8_v_i8m1(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t value, size_t vl) { - return __riscv_vsuxei8_v_i8m2(base, bindex, value, vl); +void test_vsuxei8_v_i8m2(int8_t *rs1, vuint8m2_t rs2, vint8m2_t vs3, size_t vl) { + return __riscv_vsuxei8_v_i8m2(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i8m4(int8_t *base, vuint8m4_t bindex, vint8m4_t value, size_t vl) { - return __riscv_vsuxei8_v_i8m4(base, bindex, value, vl); +void test_vsuxei8_v_i8m4(int8_t *rs1, vuint8m4_t rs2, vint8m4_t vs3, size_t vl) { + return __riscv_vsuxei8_v_i8m4(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i8m8(int8_t *base, vuint8m8_t bindex, vint8m8_t value, size_t vl) { - return __riscv_vsuxei8_v_i8m8(base, bindex, value, vl); +void test_vsuxei8_v_i8m8(int8_t *rs1, vuint8m8_t rs2, vint8m8_t vs3, size_t vl) { + return __riscv_vsuxei8_v_i8m8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t value, size_t vl) { - return __riscv_vsuxei8_v_i16mf4(base, bindex, value, vl); +void test_vsuxei8_v_i16mf4(int16_t *rs1, vuint8mf8_t rs2, vint16mf4_t vs3, size_t vl) { + return __riscv_vsuxei8_v_i16mf4(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t value, size_t vl) { - return __riscv_vsuxei8_v_i16mf2(base, bindex, value, vl); +void test_vsuxei8_v_i16mf2(int16_t *rs1, vuint8mf4_t rs2, vint16mf2_t vs3, size_t vl) { + return __riscv_vsuxei8_v_i16mf2(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t value, size_t vl) { - return __riscv_vsuxei8_v_i16m1(base, bindex, value, vl); +void test_vsuxei8_v_i16m1(int16_t *rs1, vuint8mf2_t rs2, vint16m1_t vs3, size_t vl) { + return __riscv_vsuxei8_v_i16m1(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t value, size_t vl) { - return __riscv_vsuxei8_v_i16m2(base, bindex, value, vl); +void test_vsuxei8_v_i16m2(int16_t *rs1, vuint8m1_t rs2, vint16m2_t vs3, size_t vl) { + return __riscv_vsuxei8_v_i16m2(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i16m4(int16_t *base, vuint8m2_t bindex, vint16m4_t value, size_t vl) { - return __riscv_vsuxei8_v_i16m4(base, bindex, value, vl); +void test_vsuxei8_v_i16m4(int16_t *rs1, vuint8m2_t rs2, vint16m4_t vs3, size_t vl) { + return __riscv_vsuxei8_v_i16m4(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i16m8(int16_t *base, vuint8m4_t bindex, vint16m8_t value, size_t vl) { - return __riscv_vsuxei8_v_i16m8(base, bindex, value, vl); +void test_vsuxei8_v_i16m8(int16_t *rs1, vuint8m4_t rs2, vint16m8_t vs3, size_t vl) { + return __riscv_vsuxei8_v_i16m8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { - return __riscv_vsuxei8_v_i32mf2(base, bindex, value, vl); +void test_vsuxei8_v_i32mf2(int32_t *rs1, vuint8mf8_t rs2, vint32mf2_t vs3, size_t vl) { + return __riscv_vsuxei8_v_i32mf2(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { - return __riscv_vsuxei8_v_i32m1(base, bindex, value, vl); +void test_vsuxei8_v_i32m1(int32_t *rs1, vuint8mf4_t rs2, vint32m1_t vs3, size_t vl) { + return __riscv_vsuxei8_v_i32m1(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { - return __riscv_vsuxei8_v_i32m2(base, bindex, value, vl); +void test_vsuxei8_v_i32m2(int32_t *rs1, vuint8mf2_t rs2, vint32m2_t vs3, size_t vl) { + return __riscv_vsuxei8_v_i32m2(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i32m4(int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { - return __riscv_vsuxei8_v_i32m4(base, bindex, value, vl); +void test_vsuxei8_v_i32m4(int32_t *rs1, vuint8m1_t rs2, vint32m4_t vs3, size_t vl) { + return __riscv_vsuxei8_v_i32m4(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i32m8(int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { - return __riscv_vsuxei8_v_i32m8(base, bindex, value, vl); +void test_vsuxei8_v_i32m8(int32_t *rs1, vuint8m2_t rs2, vint32m8_t vs3, size_t vl) { + return __riscv_vsuxei8_v_i32m8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { - return __riscv_vsuxei8_v_i64m1(base, bindex, value, vl); +void test_vsuxei8_v_i64m1(int64_t *rs1, vuint8mf8_t rs2, vint64m1_t vs3, size_t vl) { + return __riscv_vsuxei8_v_i64m1(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { - return __riscv_vsuxei8_v_i64m2(base, bindex, value, vl); +void test_vsuxei8_v_i64m2(int64_t *rs1, vuint8mf4_t rs2, vint64m2_t vs3, size_t vl) { + return __riscv_vsuxei8_v_i64m2(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { - return __riscv_vsuxei8_v_i64m4(base, bindex, value, vl); +void test_vsuxei8_v_i64m4(int64_t *rs1, vuint8mf2_t rs2, vint64m4_t vs3, size_t vl) { + return __riscv_vsuxei8_v_i64m4(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i64m8(int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { - return __riscv_vsuxei8_v_i64m8(base, bindex, value, vl); +void test_vsuxei8_v_i64m8(int64_t *rs1, vuint8m1_t rs2, vint64m8_t vs3, size_t vl) { + return __riscv_vsuxei8_v_i64m8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value, size_t vl) { - return __riscv_vsuxei8_v_u8mf8(base, bindex, value, vl); +void test_vsuxei8_v_u8mf8(uint8_t *rs1, vuint8mf8_t rs2, vuint8mf8_t vs3, size_t vl) { + return __riscv_vsuxei8_v_u8mf8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value, size_t vl) { - return __riscv_vsuxei8_v_u8mf4(base, bindex, value, vl); +void test_vsuxei8_v_u8mf4(uint8_t *rs1, vuint8mf4_t rs2, vuint8mf4_t vs3, size_t vl) { + return __riscv_vsuxei8_v_u8mf4(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value, size_t vl) { - return __riscv_vsuxei8_v_u8mf2(base, bindex, value, vl); +void test_vsuxei8_v_u8mf2(uint8_t *rs1, vuint8mf2_t rs2, vuint8mf2_t vs3, size_t vl) { + return __riscv_vsuxei8_v_u8mf2(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t value, size_t vl) { - return __riscv_vsuxei8_v_u8m1(base, bindex, value, vl); +void test_vsuxei8_v_u8m1(uint8_t *rs1, vuint8m1_t rs2, vuint8m1_t vs3, size_t vl) { + return __riscv_vsuxei8_v_u8m1(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t value, size_t vl) { - return __riscv_vsuxei8_v_u8m2(base, bindex, value, vl); +void test_vsuxei8_v_u8m2(uint8_t *rs1, vuint8m2_t rs2, vuint8m2_t vs3, size_t vl) { + return __riscv_vsuxei8_v_u8m2(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u8m4(uint8_t *base, vuint8m4_t bindex, vuint8m4_t value, size_t vl) { - return __riscv_vsuxei8_v_u8m4(base, bindex, value, vl); +void test_vsuxei8_v_u8m4(uint8_t *rs1, vuint8m4_t rs2, vuint8m4_t vs3, size_t vl) { + return __riscv_vsuxei8_v_u8m4(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u8m8(uint8_t *base, vuint8m8_t bindex, vuint8m8_t value, size_t vl) { - return __riscv_vsuxei8_v_u8m8(base, bindex, value, vl); +void test_vsuxei8_v_u8m8(uint8_t *rs1, vuint8m8_t rs2, vuint8m8_t vs3, size_t vl) { + return __riscv_vsuxei8_v_u8m8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t value, size_t vl) { - return __riscv_vsuxei8_v_u16mf4(base, bindex, value, vl); +void test_vsuxei8_v_u16mf4(uint16_t *rs1, vuint8mf8_t rs2, vuint16mf4_t vs3, size_t vl) { + return __riscv_vsuxei8_v_u16mf4(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t value, size_t vl) { - return __riscv_vsuxei8_v_u16mf2(base, bindex, value, vl); +void test_vsuxei8_v_u16mf2(uint16_t *rs1, vuint8mf4_t rs2, vuint16mf2_t vs3, size_t vl) { + return __riscv_vsuxei8_v_u16mf2(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value, size_t vl) { - return __riscv_vsuxei8_v_u16m1(base, bindex, value, vl); +void test_vsuxei8_v_u16m1(uint16_t *rs1, vuint8mf2_t rs2, vuint16m1_t vs3, size_t vl) { + return __riscv_vsuxei8_v_u16m1(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t value, size_t vl) { - return __riscv_vsuxei8_v_u16m2(base, bindex, value, vl); +void test_vsuxei8_v_u16m2(uint16_t *rs1, vuint8m1_t rs2, vuint16m2_t vs3, size_t vl) { + return __riscv_vsuxei8_v_u16m2(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u16m4(uint16_t *base, vuint8m2_t bindex, vuint16m4_t value, size_t vl) { - return __riscv_vsuxei8_v_u16m4(base, bindex, value, vl); +void test_vsuxei8_v_u16m4(uint16_t *rs1, vuint8m2_t rs2, vuint16m4_t vs3, size_t vl) { + return __riscv_vsuxei8_v_u16m4(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u16m8(uint16_t *base, vuint8m4_t bindex, vuint16m8_t value, size_t vl) { - return __riscv_vsuxei8_v_u16m8(base, bindex, value, vl); +void test_vsuxei8_v_u16m8(uint16_t *rs1, vuint8m4_t rs2, vuint16m8_t vs3, size_t vl) { + return __riscv_vsuxei8_v_u16m8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { - return __riscv_vsuxei8_v_u32mf2(base, bindex, value, vl); +void test_vsuxei8_v_u32mf2(uint32_t *rs1, vuint8mf8_t rs2, vuint32mf2_t vs3, size_t vl) { + return __riscv_vsuxei8_v_u32mf2(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { - return __riscv_vsuxei8_v_u32m1(base, bindex, value, vl); +void test_vsuxei8_v_u32m1(uint32_t *rs1, vuint8mf4_t rs2, vuint32m1_t vs3, size_t vl) { + return __riscv_vsuxei8_v_u32m1(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { - return __riscv_vsuxei8_v_u32m2(base, bindex, value, vl); +void test_vsuxei8_v_u32m2(uint32_t *rs1, vuint8mf2_t rs2, vuint32m2_t vs3, size_t vl) { + return __riscv_vsuxei8_v_u32m2(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { - return __riscv_vsuxei8_v_u32m4(base, bindex, value, vl); +void test_vsuxei8_v_u32m4(uint32_t *rs1, vuint8m1_t rs2, vuint32m4_t vs3, size_t vl) { + return __riscv_vsuxei8_v_u32m4(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u32m8(uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { - return __riscv_vsuxei8_v_u32m8(base, bindex, value, vl); +void test_vsuxei8_v_u32m8(uint32_t *rs1, vuint8m2_t rs2, vuint32m8_t vs3, size_t vl) { + return __riscv_vsuxei8_v_u32m8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { - return __riscv_vsuxei8_v_u64m1(base, bindex, value, vl); +void test_vsuxei8_v_u64m1(uint64_t *rs1, vuint8mf8_t rs2, vuint64m1_t vs3, size_t vl) { + return __riscv_vsuxei8_v_u64m1(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { - return __riscv_vsuxei8_v_u64m2(base, bindex, value, vl); +void test_vsuxei8_v_u64m2(uint64_t *rs1, vuint8mf4_t rs2, vuint64m2_t vs3, size_t vl) { + return __riscv_vsuxei8_v_u64m2(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { - return __riscv_vsuxei8_v_u64m4(base, bindex, value, vl); +void test_vsuxei8_v_u64m4(uint64_t *rs1, vuint8mf2_t rs2, vuint64m4_t vs3, size_t vl) { + return __riscv_vsuxei8_v_u64m4(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u64m8(uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { - return __riscv_vsuxei8_v_u64m8(base, bindex, value, vl); +void test_vsuxei8_v_u64m8(uint64_t *rs1, vuint8m1_t rs2, vuint64m8_t vs3, size_t vl) { + return __riscv_vsuxei8_v_u64m8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f16mf4_m(vbool64_t mask, float16_t *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) { - return __riscv_vsuxei8_v_f16mf4_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_f16mf4_m(vbool64_t vm, float16_t *rs1, vuint8mf8_t rs2, vfloat16mf4_t vs3, size_t vl) { + return __riscv_vsuxei8_v_f16mf4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f16mf2_m(vbool32_t mask, float16_t *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) { - return __riscv_vsuxei8_v_f16mf2_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_f16mf2_m(vbool32_t vm, float16_t *rs1, vuint8mf4_t rs2, vfloat16mf2_t vs3, size_t vl) { + return __riscv_vsuxei8_v_f16mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f16m1_m(vbool16_t mask, float16_t *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) { - return __riscv_vsuxei8_v_f16m1_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_f16m1_m(vbool16_t vm, float16_t *rs1, vuint8mf2_t rs2, vfloat16m1_t vs3, size_t vl) { + return __riscv_vsuxei8_v_f16m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f16m2_m(vbool8_t mask, float16_t *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) { - return __riscv_vsuxei8_v_f16m2_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_f16m2_m(vbool8_t vm, float16_t *rs1, vuint8m1_t rs2, vfloat16m2_t vs3, size_t vl) { + return __riscv_vsuxei8_v_f16m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f16m4_m(vbool4_t mask, float16_t *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) { - return __riscv_vsuxei8_v_f16m4_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_f16m4_m(vbool4_t vm, float16_t *rs1, vuint8m2_t rs2, vfloat16m4_t vs3, size_t vl) { + return __riscv_vsuxei8_v_f16m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f16m8_m(vbool2_t mask, float16_t *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) { - return __riscv_vsuxei8_v_f16m8_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_f16m8_m(vbool2_t vm, float16_t *rs1, vuint8m4_t rs2, vfloat16m8_t vs3, size_t vl) { + return __riscv_vsuxei8_v_f16m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f32mf2_m(vbool64_t mask, float32_t *base, vuint8mf8_t bindex, vfloat32mf2_t value, size_t vl) { - return __riscv_vsuxei8_v_f32mf2_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_f32mf2_m(vbool64_t vm, float32_t *rs1, vuint8mf8_t rs2, vfloat32mf2_t vs3, size_t vl) { + return __riscv_vsuxei8_v_f32mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f32m1_m(vbool32_t mask, float32_t *base, vuint8mf4_t bindex, vfloat32m1_t value, size_t vl) { - return __riscv_vsuxei8_v_f32m1_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_f32m1_m(vbool32_t vm, float32_t *rs1, vuint8mf4_t rs2, vfloat32m1_t vs3, size_t vl) { + return __riscv_vsuxei8_v_f32m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f32m2_m(vbool16_t mask, float32_t *base, vuint8mf2_t bindex, vfloat32m2_t value, size_t vl) { - return __riscv_vsuxei8_v_f32m2_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_f32m2_m(vbool16_t vm, float32_t *rs1, vuint8mf2_t rs2, vfloat32m2_t vs3, size_t vl) { + return __riscv_vsuxei8_v_f32m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f32m4_m(vbool8_t mask, float32_t *base, vuint8m1_t bindex, vfloat32m4_t value, size_t vl) { - return __riscv_vsuxei8_v_f32m4_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_f32m4_m(vbool8_t vm, float32_t *rs1, vuint8m1_t rs2, vfloat32m4_t vs3, size_t vl) { + return __riscv_vsuxei8_v_f32m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f32m8_m(vbool4_t mask, float32_t *base, vuint8m2_t bindex, vfloat32m8_t value, size_t vl) { - return __riscv_vsuxei8_v_f32m8_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_f32m8_m(vbool4_t vm, float32_t *rs1, vuint8m2_t rs2, vfloat32m8_t vs3, size_t vl) { + return __riscv_vsuxei8_v_f32m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f64m1_m(vbool64_t mask, float64_t *base, vuint8mf8_t bindex, vfloat64m1_t value, size_t vl) { - return __riscv_vsuxei8_v_f64m1_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_f64m1_m(vbool64_t vm, float64_t *rs1, vuint8mf8_t rs2, vfloat64m1_t vs3, size_t vl) { + return __riscv_vsuxei8_v_f64m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f64m2_m(vbool32_t mask, float64_t *base, vuint8mf4_t bindex, vfloat64m2_t value, size_t vl) { - return __riscv_vsuxei8_v_f64m2_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_f64m2_m(vbool32_t vm, float64_t *rs1, vuint8mf4_t rs2, vfloat64m2_t vs3, size_t vl) { + return __riscv_vsuxei8_v_f64m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f64m4_m(vbool16_t mask, float64_t *base, vuint8mf2_t bindex, vfloat64m4_t value, size_t vl) { - return __riscv_vsuxei8_v_f64m4_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_f64m4_m(vbool16_t vm, float64_t *rs1, vuint8mf2_t rs2, vfloat64m4_t vs3, size_t vl) { + return __riscv_vsuxei8_v_f64m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f64m8_m(vbool8_t mask, float64_t *base, vuint8m1_t bindex, vfloat64m8_t value, size_t vl) { - return __riscv_vsuxei8_v_f64m8_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_f64m8_m(vbool8_t vm, float64_t *rs1, vuint8m1_t rs2, vfloat64m8_t vs3, size_t vl) { + return __riscv_vsuxei8_v_f64m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t value, size_t vl) { - return __riscv_vsuxei8_v_i8mf8_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_i8mf8_m(vbool64_t vm, int8_t *rs1, vuint8mf8_t rs2, vint8mf8_t vs3, size_t vl) { + return __riscv_vsuxei8_v_i8mf8_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t value, size_t vl) { - return __riscv_vsuxei8_v_i8mf4_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_i8mf4_m(vbool32_t vm, int8_t *rs1, vuint8mf4_t rs2, vint8mf4_t vs3, size_t vl) { + return __riscv_vsuxei8_v_i8mf4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t value, size_t vl) { - return __riscv_vsuxei8_v_i8mf2_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_i8mf2_m(vbool16_t vm, int8_t *rs1, vuint8mf2_t rs2, vint8mf2_t vs3, size_t vl) { + return __riscv_vsuxei8_v_i8mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t value, size_t vl) { - return __riscv_vsuxei8_v_i8m1_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_i8m1_m(vbool8_t vm, int8_t *rs1, vuint8m1_t rs2, vint8m1_t vs3, size_t vl) { + return __riscv_vsuxei8_v_i8m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t value, size_t vl) { - return __riscv_vsuxei8_v_i8m2_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_i8m2_m(vbool4_t vm, int8_t *rs1, vuint8m2_t rs2, vint8m2_t vs3, size_t vl) { + return __riscv_vsuxei8_v_i8m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i8m4_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4_t value, size_t vl) { - return __riscv_vsuxei8_v_i8m4_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_i8m4_m(vbool2_t vm, int8_t *rs1, vuint8m4_t rs2, vint8m4_t vs3, size_t vl) { + return __riscv_vsuxei8_v_i8m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i8m8_m(vbool1_t mask, int8_t *base, vuint8m8_t bindex, vint8m8_t value, size_t vl) { - return __riscv_vsuxei8_v_i8m8_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_i8m8_m(vbool1_t vm, int8_t *rs1, vuint8m8_t rs2, vint8m8_t vs3, size_t vl) { + return __riscv_vsuxei8_v_i8m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t value, size_t vl) { - return __riscv_vsuxei8_v_i16mf4_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_i16mf4_m(vbool64_t vm, int16_t *rs1, vuint8mf8_t rs2, vint16mf4_t vs3, size_t vl) { + return __riscv_vsuxei8_v_i16mf4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t value, size_t vl) { - return __riscv_vsuxei8_v_i16mf2_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_i16mf2_m(vbool32_t vm, int16_t *rs1, vuint8mf4_t rs2, vint16mf2_t vs3, size_t vl) { + return __riscv_vsuxei8_v_i16mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t value, size_t vl) { - return __riscv_vsuxei8_v_i16m1_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_i16m1_m(vbool16_t vm, int16_t *rs1, vuint8mf2_t rs2, vint16m1_t vs3, size_t vl) { + return __riscv_vsuxei8_v_i16m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t value, size_t vl) { - return __riscv_vsuxei8_v_i16m2_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_i16m2_m(vbool8_t vm, int16_t *rs1, vuint8m1_t rs2, vint16m2_t vs3, size_t vl) { + return __riscv_vsuxei8_v_i16m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i16m4_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4_t value, size_t vl) { - return __riscv_vsuxei8_v_i16m4_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_i16m4_m(vbool4_t vm, int16_t *rs1, vuint8m2_t rs2, vint16m4_t vs3, size_t vl) { + return __riscv_vsuxei8_v_i16m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i16m8_m(vbool2_t mask, int16_t *base, vuint8m4_t bindex, vint16m8_t value, size_t vl) { - return __riscv_vsuxei8_v_i16m8_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_i16m8_m(vbool2_t vm, int16_t *rs1, vuint8m4_t rs2, vint16m8_t vs3, size_t vl) { + return __riscv_vsuxei8_v_i16m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { - return __riscv_vsuxei8_v_i32mf2_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_i32mf2_m(vbool64_t vm, int32_t *rs1, vuint8mf8_t rs2, vint32mf2_t vs3, size_t vl) { + return __riscv_vsuxei8_v_i32mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { - return __riscv_vsuxei8_v_i32m1_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_i32m1_m(vbool32_t vm, int32_t *rs1, vuint8mf4_t rs2, vint32m1_t vs3, size_t vl) { + return __riscv_vsuxei8_v_i32m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { - return __riscv_vsuxei8_v_i32m2_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_i32m2_m(vbool16_t vm, int32_t *rs1, vuint8mf2_t rs2, vint32m2_t vs3, size_t vl) { + return __riscv_vsuxei8_v_i32m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i32m4_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { - return __riscv_vsuxei8_v_i32m4_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_i32m4_m(vbool8_t vm, int32_t *rs1, vuint8m1_t rs2, vint32m4_t vs3, size_t vl) { + return __riscv_vsuxei8_v_i32m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i32m8_m(vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { - return __riscv_vsuxei8_v_i32m8_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_i32m8_m(vbool4_t vm, int32_t *rs1, vuint8m2_t rs2, vint32m8_t vs3, size_t vl) { + return __riscv_vsuxei8_v_i32m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { - return __riscv_vsuxei8_v_i64m1_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_i64m1_m(vbool64_t vm, int64_t *rs1, vuint8mf8_t rs2, vint64m1_t vs3, size_t vl) { + return __riscv_vsuxei8_v_i64m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { - return __riscv_vsuxei8_v_i64m2_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_i64m2_m(vbool32_t vm, int64_t *rs1, vuint8mf4_t rs2, vint64m2_t vs3, size_t vl) { + return __riscv_vsuxei8_v_i64m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i64m4_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { - return __riscv_vsuxei8_v_i64m4_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_i64m4_m(vbool16_t vm, int64_t *rs1, vuint8mf2_t rs2, vint64m4_t vs3, size_t vl) { + return __riscv_vsuxei8_v_i64m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i64m8_m(vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { - return __riscv_vsuxei8_v_i64m8_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_i64m8_m(vbool8_t vm, int64_t *rs1, vuint8m1_t rs2, vint64m8_t vs3, size_t vl) { + return __riscv_vsuxei8_v_i64m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value, size_t vl) { - return __riscv_vsuxei8_v_u8mf8_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_u8mf8_m(vbool64_t vm, uint8_t *rs1, vuint8mf8_t rs2, vuint8mf8_t vs3, size_t vl) { + return __riscv_vsuxei8_v_u8mf8_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value, size_t vl) { - return __riscv_vsuxei8_v_u8mf4_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_u8mf4_m(vbool32_t vm, uint8_t *rs1, vuint8mf4_t rs2, vuint8mf4_t vs3, size_t vl) { + return __riscv_vsuxei8_v_u8mf4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value, size_t vl) { - return __riscv_vsuxei8_v_u8mf2_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_u8mf2_m(vbool16_t vm, uint8_t *rs1, vuint8mf2_t rs2, vuint8mf2_t vs3, size_t vl) { + return __riscv_vsuxei8_v_u8mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t value, size_t vl) { - return __riscv_vsuxei8_v_u8m1_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_u8m1_m(vbool8_t vm, uint8_t *rs1, vuint8m1_t rs2, vuint8m1_t vs3, size_t vl) { + return __riscv_vsuxei8_v_u8m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t value, size_t vl) { - return __riscv_vsuxei8_v_u8m2_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_u8m2_m(vbool4_t vm, uint8_t *rs1, vuint8m2_t rs2, vuint8m2_t vs3, size_t vl) { + return __riscv_vsuxei8_v_u8m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4_t value, size_t vl) { - return __riscv_vsuxei8_v_u8m4_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_u8m4_m(vbool2_t vm, uint8_t *rs1, vuint8m4_t rs2, vuint8m4_t vs3, size_t vl) { + return __riscv_vsuxei8_v_u8m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u8m8_m(vbool1_t mask, uint8_t *base, vuint8m8_t bindex, vuint8m8_t value, size_t vl) { - return __riscv_vsuxei8_v_u8m8_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_u8m8_m(vbool1_t vm, uint8_t *rs1, vuint8m8_t rs2, vuint8m8_t vs3, size_t vl) { + return __riscv_vsuxei8_v_u8m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t value, size_t vl) { - return __riscv_vsuxei8_v_u16mf4_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_u16mf4_m(vbool64_t vm, uint16_t *rs1, vuint8mf8_t rs2, vuint16mf4_t vs3, size_t vl) { + return __riscv_vsuxei8_v_u16mf4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t value, size_t vl) { - return __riscv_vsuxei8_v_u16mf2_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_u16mf2_m(vbool32_t vm, uint16_t *rs1, vuint8mf4_t rs2, vuint16mf2_t vs3, size_t vl) { + return __riscv_vsuxei8_v_u16mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value, size_t vl) { - return __riscv_vsuxei8_v_u16m1_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_u16m1_m(vbool16_t vm, uint16_t *rs1, vuint8mf2_t rs2, vuint16m1_t vs3, size_t vl) { + return __riscv_vsuxei8_v_u16m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t value, size_t vl) { - return __riscv_vsuxei8_v_u16m2_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_u16m2_m(vbool8_t vm, uint16_t *rs1, vuint8m1_t rs2, vuint16m2_t vs3, size_t vl) { + return __riscv_vsuxei8_v_u16m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4_t value, size_t vl) { - return __riscv_vsuxei8_v_u16m4_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_u16m4_m(vbool4_t vm, uint16_t *rs1, vuint8m2_t rs2, vuint16m4_t vs3, size_t vl) { + return __riscv_vsuxei8_v_u16m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint8m4_t bindex, vuint16m8_t value, size_t vl) { - return __riscv_vsuxei8_v_u16m8_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_u16m8_m(vbool2_t vm, uint16_t *rs1, vuint8m4_t rs2, vuint16m8_t vs3, size_t vl) { + return __riscv_vsuxei8_v_u16m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { - return __riscv_vsuxei8_v_u32mf2_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_u32mf2_m(vbool64_t vm, uint32_t *rs1, vuint8mf8_t rs2, vuint32mf2_t vs3, size_t vl) { + return __riscv_vsuxei8_v_u32mf2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { - return __riscv_vsuxei8_v_u32m1_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_u32m1_m(vbool32_t vm, uint32_t *rs1, vuint8mf4_t rs2, vuint32m1_t vs3, size_t vl) { + return __riscv_vsuxei8_v_u32m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { - return __riscv_vsuxei8_v_u32m2_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_u32m2_m(vbool16_t vm, uint32_t *rs1, vuint8mf2_t rs2, vuint32m2_t vs3, size_t vl) { + return __riscv_vsuxei8_v_u32m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { - return __riscv_vsuxei8_v_u32m4_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_u32m4_m(vbool8_t vm, uint32_t *rs1, vuint8m1_t rs2, vuint32m4_t vs3, size_t vl) { + return __riscv_vsuxei8_v_u32m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { - return __riscv_vsuxei8_v_u32m8_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_u32m8_m(vbool4_t vm, uint32_t *rs1, vuint8m2_t rs2, vuint32m8_t vs3, size_t vl) { + return __riscv_vsuxei8_v_u32m8_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { - return __riscv_vsuxei8_v_u64m1_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_u64m1_m(vbool64_t vm, uint64_t *rs1, vuint8mf8_t rs2, vuint64m1_t vs3, size_t vl) { + return __riscv_vsuxei8_v_u64m1_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { - return __riscv_vsuxei8_v_u64m2_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_u64m2_m(vbool32_t vm, uint64_t *rs1, vuint8mf4_t rs2, vuint64m2_t vs3, size_t vl) { + return __riscv_vsuxei8_v_u64m2_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { - return __riscv_vsuxei8_v_u64m4_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_u64m4_m(vbool16_t vm, uint64_t *rs1, vuint8mf2_t rs2, vuint64m4_t vs3, size_t vl) { + return __riscv_vsuxei8_v_u64m4_m(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { - return __riscv_vsuxei8_v_u64m8_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_u64m8_m(vbool8_t vm, uint64_t *rs1, vuint8m1_t rs2, vuint64m8_t vs3, size_t vl) { + return __riscv_vsuxei8_v_u64m8_m(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxei8\.[ivxfswum.]+\s+} 118 } } */ diff --git a/auto-generated/gnu-api-tests/vsuxseg2ei16.c b/auto-generated/gnu-api-tests/vsuxseg2ei16.c index a92fb4274..86f5e48f0 100644 --- a/auto-generated/gnu-api-tests/vsuxseg2ei16.c +++ b/auto-generated/gnu-api-tests/vsuxseg2ei16.c @@ -6,388 +6,388 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg2ei16_v_f16mf4x2(float16_t *base, vuint16mf4_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_f16mf4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f16mf4x2(float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_f16mf4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_f16mf2x2(float16_t *base, vuint16mf2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_f16mf2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f16mf2x2(float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_f16mf2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_f16m1x2(float16_t *base, vuint16m1_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_f16m1x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f16m1x2(float16_t *rs1, vuint16m1_t vs2, vfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_f16m1x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_f16m2x2(float16_t *base, vuint16m2_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_f16m2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f16m2x2(float16_t *rs1, vuint16m2_t vs2, vfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_f16m2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_f16m4x2(float16_t *base, vuint16m4_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_f16m4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f16m4x2(float16_t *rs1, vuint16m4_t vs2, vfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_f16m4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_f32mf2x2(float32_t *base, vuint16mf4_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_f32mf2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f32mf2x2(float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_f32mf2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_f32m1x2(float32_t *base, vuint16mf2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_f32m1x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f32m1x2(float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_f32m1x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_f32m2x2(float32_t *base, vuint16m1_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_f32m2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f32m2x2(float32_t *rs1, vuint16m1_t vs2, vfloat32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_f32m2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_f32m4x2(float32_t *base, vuint16m2_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_f32m4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f32m4x2(float32_t *rs1, vuint16m2_t vs2, vfloat32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_f32m4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_f64m1x2(float64_t *base, vuint16mf4_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_f64m1x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f64m1x2(float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_f64m1x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_f64m2x2(float64_t *base, vuint16mf2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_f64m2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f64m2x2(float64_t *rs1, vuint16mf2_t vs2, vfloat64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_f64m2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_f64m4x2(float64_t *base, vuint16m1_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_f64m4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f64m4x2(float64_t *rs1, vuint16m1_t vs2, vfloat64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_f64m4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i8mf8x2(int8_t *base, vuint16mf4_t bindex, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_i8mf8x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i8mf8x2(int8_t *rs1, vuint16mf4_t vs2, vint8mf8x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_i8mf8x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i8mf4x2(int8_t *base, vuint16mf2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_i8mf4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i8mf4x2(int8_t *rs1, vuint16mf2_t vs2, vint8mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_i8mf4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i8mf2x2(int8_t *base, vuint16m1_t bindex, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_i8mf2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i8mf2x2(int8_t *rs1, vuint16m1_t vs2, vint8mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_i8mf2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i8m1x2(int8_t *base, vuint16m2_t bindex, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_i8m1x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i8m1x2(int8_t *rs1, vuint16m2_t vs2, vint8m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_i8m1x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i8m2x2(int8_t *base, vuint16m4_t bindex, vint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_i8m2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i8m2x2(int8_t *rs1, vuint16m4_t vs2, vint8m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_i8m2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i8m4x2(int8_t *base, vuint16m8_t bindex, vint8m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_i8m4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i8m4x2(int8_t *rs1, vuint16m8_t vs2, vint8m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_i8m4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i16mf4x2(int16_t *base, vuint16mf4_t bindex, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_i16mf4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i16mf4x2(int16_t *rs1, vuint16mf4_t vs2, vint16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_i16mf4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i16mf2x2(int16_t *base, vuint16mf2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_i16mf2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i16mf2x2(int16_t *rs1, vuint16mf2_t vs2, vint16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_i16mf2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i16m1x2(int16_t *base, vuint16m1_t bindex, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_i16m1x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i16m1x2(int16_t *rs1, vuint16m1_t vs2, vint16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_i16m1x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i16m2x2(int16_t *base, vuint16m2_t bindex, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_i16m2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i16m2x2(int16_t *rs1, vuint16m2_t vs2, vint16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_i16m2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i16m4x2(int16_t *base, vuint16m4_t bindex, vint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_i16m4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i16m4x2(int16_t *rs1, vuint16m4_t vs2, vint16m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_i16m4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i32mf2x2(int32_t *base, vuint16mf4_t bindex, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_i32mf2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i32mf2x2(int32_t *rs1, vuint16mf4_t vs2, vint32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_i32mf2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i32m1x2(int32_t *base, vuint16mf2_t bindex, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_i32m1x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i32m1x2(int32_t *rs1, vuint16mf2_t vs2, vint32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_i32m1x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i32m2x2(int32_t *base, vuint16m1_t bindex, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_i32m2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i32m2x2(int32_t *rs1, vuint16m1_t vs2, vint32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_i32m2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i32m4x2(int32_t *base, vuint16m2_t bindex, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_i32m4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i32m4x2(int32_t *rs1, vuint16m2_t vs2, vint32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_i32m4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i64m1x2(int64_t *base, vuint16mf4_t bindex, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_i64m1x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i64m1x2(int64_t *rs1, vuint16mf4_t vs2, vint64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_i64m1x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i64m2x2(int64_t *base, vuint16mf2_t bindex, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_i64m2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i64m2x2(int64_t *rs1, vuint16mf2_t vs2, vint64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_i64m2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i64m4x2(int64_t *base, vuint16m1_t bindex, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_i64m4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i64m4x2(int64_t *rs1, vuint16m1_t vs2, vint64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_i64m4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u8mf8x2(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_u8mf8x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u8mf8x2(uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_u8mf8x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u8mf4x2(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_u8mf4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u8mf4x2(uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_u8mf4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u8mf2x2(uint8_t *base, vuint16m1_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_u8mf2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u8mf2x2(uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_u8mf2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u8m1x2(uint8_t *base, vuint16m2_t bindex, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_u8m1x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u8m1x2(uint8_t *rs1, vuint16m2_t vs2, vuint8m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_u8m1x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u8m2x2(uint8_t *base, vuint16m4_t bindex, vuint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_u8m2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u8m2x2(uint8_t *rs1, vuint16m4_t vs2, vuint8m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_u8m2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u8m4x2(uint8_t *base, vuint16m8_t bindex, vuint8m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_u8m4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u8m4x2(uint8_t *rs1, vuint16m8_t vs2, vuint8m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_u8m4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u16mf4x2(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_u16mf4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u16mf4x2(uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_u16mf4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u16mf2x2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_u16mf2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u16mf2x2(uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_u16mf2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u16m1x2(uint16_t *base, vuint16m1_t bindex, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_u16m1x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u16m1x2(uint16_t *rs1, vuint16m1_t vs2, vuint16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_u16m1x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u16m2x2(uint16_t *base, vuint16m2_t bindex, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_u16m2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u16m2x2(uint16_t *rs1, vuint16m2_t vs2, vuint16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_u16m2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u16m4x2(uint16_t *base, vuint16m4_t bindex, vuint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_u16m4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u16m4x2(uint16_t *rs1, vuint16m4_t vs2, vuint16m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_u16m4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u32mf2x2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_u32mf2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u32mf2x2(uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_u32mf2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u32m1x2(uint32_t *base, vuint16mf2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_u32m1x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u32m1x2(uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_u32m1x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u32m2x2(uint32_t *base, vuint16m1_t bindex, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_u32m2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u32m2x2(uint32_t *rs1, vuint16m1_t vs2, vuint32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_u32m2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u32m4x2(uint32_t *base, vuint16m2_t bindex, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_u32m4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u32m4x2(uint32_t *rs1, vuint16m2_t vs2, vuint32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_u32m4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u64m1x2(uint64_t *base, vuint16mf4_t bindex, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_u64m1x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u64m1x2(uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_u64m1x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u64m2x2(uint64_t *base, vuint16mf2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_u64m2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u64m2x2(uint64_t *rs1, vuint16mf2_t vs2, vuint64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_u64m2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u64m4x2(uint64_t *base, vuint16m1_t bindex, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_u64m4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u64m4x2(uint64_t *rs1, vuint16m1_t vs2, vuint64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_u64m4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_f16mf4x2_m(vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_f16mf4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f16mf4x2_m(vbool64_t vm, float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_f16mf4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_f16mf2x2_m(vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_f16mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f16mf2x2_m(vbool32_t vm, float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_f16mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_f16m1x2_m(vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_f16m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f16m1x2_m(vbool16_t vm, float16_t *rs1, vuint16m1_t vs2, vfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_f16m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_f16m2x2_m(vbool8_t mask, float16_t *base, vuint16m2_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_f16m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f16m2x2_m(vbool8_t vm, float16_t *rs1, vuint16m2_t vs2, vfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_f16m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_f16m4x2_m(vbool4_t mask, float16_t *base, vuint16m4_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_f16m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f16m4x2_m(vbool4_t vm, float16_t *rs1, vuint16m4_t vs2, vfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_f16m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_f32mf2x2_m(vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_f32mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f32mf2x2_m(vbool64_t vm, float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_f32mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_f32m1x2_m(vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_f32m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f32m1x2_m(vbool32_t vm, float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_f32m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_f32m2x2_m(vbool16_t mask, float32_t *base, vuint16m1_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_f32m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f32m2x2_m(vbool16_t vm, float32_t *rs1, vuint16m1_t vs2, vfloat32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_f32m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_f32m4x2_m(vbool8_t mask, float32_t *base, vuint16m2_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_f32m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f32m4x2_m(vbool8_t vm, float32_t *rs1, vuint16m2_t vs2, vfloat32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_f32m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_f64m1x2_m(vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_f64m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f64m1x2_m(vbool64_t vm, float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_f64m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_f64m2x2_m(vbool32_t mask, float64_t *base, vuint16mf2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_f64m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f64m2x2_m(vbool32_t vm, float64_t *rs1, vuint16mf2_t vs2, vfloat64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_f64m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_f64m4x2_m(vbool16_t mask, float64_t *base, vuint16m1_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_f64m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f64m4x2_m(vbool16_t vm, float64_t *rs1, vuint16m1_t vs2, vfloat64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_f64m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_i8mf8x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i8mf8x2_m(vbool64_t vm, int8_t *rs1, vuint16mf4_t vs2, vint8mf8x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_i8mf8x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_i8mf4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i8mf4x2_m(vbool32_t vm, int8_t *rs1, vuint16mf2_t vs2, vint8mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_i8mf4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_i8mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i8mf2x2_m(vbool16_t vm, int8_t *rs1, vuint16m1_t vs2, vint8mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_i8mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_i8m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i8m1x2_m(vbool8_t vm, int8_t *rs1, vuint16m2_t vs2, vint8m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_i8m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_i8m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i8m2x2_m(vbool4_t vm, int8_t *rs1, vuint16m4_t vs2, vint8m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_i8m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_i8m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i8m4x2_m(vbool2_t vm, int8_t *rs1, vuint16m8_t vs2, vint8m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_i8m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_i16mf4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i16mf4x2_m(vbool64_t vm, int16_t *rs1, vuint16mf4_t vs2, vint16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_i16mf4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_i16mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i16mf2x2_m(vbool32_t vm, int16_t *rs1, vuint16mf2_t vs2, vint16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_i16mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_i16m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i16m1x2_m(vbool16_t vm, int16_t *rs1, vuint16m1_t vs2, vint16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_i16m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_i16m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i16m2x2_m(vbool8_t vm, int16_t *rs1, vuint16m2_t vs2, vint16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_i16m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_i16m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i16m4x2_m(vbool4_t vm, int16_t *rs1, vuint16m4_t vs2, vint16m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_i16m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_i32mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i32mf2x2_m(vbool64_t vm, int32_t *rs1, vuint16mf4_t vs2, vint32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_i32mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_i32m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i32m1x2_m(vbool32_t vm, int32_t *rs1, vuint16mf2_t vs2, vint32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_i32m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_i32m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i32m2x2_m(vbool16_t vm, int32_t *rs1, vuint16m1_t vs2, vint32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_i32m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_i32m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i32m4x2_m(vbool8_t vm, int32_t *rs1, vuint16m2_t vs2, vint32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_i32m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_i64m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i64m1x2_m(vbool64_t vm, int64_t *rs1, vuint16mf4_t vs2, vint64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_i64m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_i64m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i64m2x2_m(vbool32_t vm, int64_t *rs1, vuint16mf2_t vs2, vint64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_i64m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_i64m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i64m4x2_m(vbool16_t vm, int64_t *rs1, vuint16m1_t vs2, vint64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_i64m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_u8mf8x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u8mf8x2_m(vbool64_t vm, uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_u8mf8x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_u8mf4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u8mf4x2_m(vbool32_t vm, uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_u8mf4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_u8mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u8mf2x2_m(vbool16_t vm, uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_u8mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_u8m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u8m1x2_m(vbool8_t vm, uint8_t *rs1, vuint16m2_t vs2, vuint8m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_u8m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_u8m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u8m2x2_m(vbool4_t vm, uint8_t *rs1, vuint16m4_t vs2, vuint8m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_u8m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_u8m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u8m4x2_m(vbool2_t vm, uint8_t *rs1, vuint16m8_t vs2, vuint8m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_u8m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_u16mf4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u16mf4x2_m(vbool64_t vm, uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_u16mf4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_u16mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u16mf2x2_m(vbool32_t vm, uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_u16mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_u16m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u16m1x2_m(vbool16_t vm, uint16_t *rs1, vuint16m1_t vs2, vuint16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_u16m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_u16m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u16m2x2_m(vbool8_t vm, uint16_t *rs1, vuint16m2_t vs2, vuint16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_u16m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_u16m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u16m4x2_m(vbool4_t vm, uint16_t *rs1, vuint16m4_t vs2, vuint16m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_u16m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_u32mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u32mf2x2_m(vbool64_t vm, uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_u32mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_u32m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u32m1x2_m(vbool32_t vm, uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_u32m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_u32m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u32m2x2_m(vbool16_t vm, uint32_t *rs1, vuint16m1_t vs2, vuint32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_u32m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_u32m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u32m4x2_m(vbool8_t vm, uint32_t *rs1, vuint16m2_t vs2, vuint32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_u32m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_u64m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u64m1x2_m(vbool64_t vm, uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_u64m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_u64m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u64m2x2_m(vbool32_t vm, uint64_t *rs1, vuint16mf2_t vs2, vuint64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_u64m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16_v_u64m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u64m4x2_m(vbool16_t vm, uint64_t *rs1, vuint16m1_t vs2, vuint64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_u64m4x2_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg2ei16\.[ivxfswum.]+\s+} 96 } } */ diff --git a/auto-generated/gnu-api-tests/vsuxseg2ei32.c b/auto-generated/gnu-api-tests/vsuxseg2ei32.c index b6861f99b..82c4342be 100644 --- a/auto-generated/gnu-api-tests/vsuxseg2ei32.c +++ b/auto-generated/gnu-api-tests/vsuxseg2ei32.c @@ -6,372 +6,372 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg2ei32_v_f16mf4x2(float16_t *base, vuint32mf2_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_f16mf4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f16mf4x2(float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_f16mf4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_f16mf2x2(float16_t *base, vuint32m1_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_f16mf2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f16mf2x2(float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_f16mf2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_f16m1x2(float16_t *base, vuint32m2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_f16m1x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f16m1x2(float16_t *rs1, vuint32m2_t vs2, vfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_f16m1x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_f16m2x2(float16_t *base, vuint32m4_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_f16m2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f16m2x2(float16_t *rs1, vuint32m4_t vs2, vfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_f16m2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_f16m4x2(float16_t *base, vuint32m8_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_f16m4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f16m4x2(float16_t *rs1, vuint32m8_t vs2, vfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_f16m4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_f32mf2x2(float32_t *base, vuint32mf2_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_f32mf2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f32mf2x2(float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_f32mf2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_f32m1x2(float32_t *base, vuint32m1_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_f32m1x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f32m1x2(float32_t *rs1, vuint32m1_t vs2, vfloat32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_f32m1x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_f32m2x2(float32_t *base, vuint32m2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_f32m2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f32m2x2(float32_t *rs1, vuint32m2_t vs2, vfloat32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_f32m2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_f32m4x2(float32_t *base, vuint32m4_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_f32m4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f32m4x2(float32_t *rs1, vuint32m4_t vs2, vfloat32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_f32m4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_f64m1x2(float64_t *base, vuint32mf2_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_f64m1x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f64m1x2(float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_f64m1x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_f64m2x2(float64_t *base, vuint32m1_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_f64m2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f64m2x2(float64_t *rs1, vuint32m1_t vs2, vfloat64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_f64m2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_f64m4x2(float64_t *base, vuint32m2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_f64m4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f64m4x2(float64_t *rs1, vuint32m2_t vs2, vfloat64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_f64m4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i8mf8x2(int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_i8mf8x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i8mf8x2(int8_t *rs1, vuint32mf2_t vs2, vint8mf8x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_i8mf8x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i8mf4x2(int8_t *base, vuint32m1_t bindex, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_i8mf4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i8mf4x2(int8_t *rs1, vuint32m1_t vs2, vint8mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_i8mf4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i8mf2x2(int8_t *base, vuint32m2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_i8mf2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i8mf2x2(int8_t *rs1, vuint32m2_t vs2, vint8mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_i8mf2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i8m1x2(int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_i8m1x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i8m1x2(int8_t *rs1, vuint32m4_t vs2, vint8m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_i8m1x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i8m2x2(int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_i8m2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i8m2x2(int8_t *rs1, vuint32m8_t vs2, vint8m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_i8m2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i16mf4x2(int16_t *base, vuint32mf2_t bindex, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_i16mf4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i16mf4x2(int16_t *rs1, vuint32mf2_t vs2, vint16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_i16mf4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i16mf2x2(int16_t *base, vuint32m1_t bindex, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_i16mf2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i16mf2x2(int16_t *rs1, vuint32m1_t vs2, vint16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_i16mf2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i16m1x2(int16_t *base, vuint32m2_t bindex, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_i16m1x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i16m1x2(int16_t *rs1, vuint32m2_t vs2, vint16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_i16m1x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i16m2x2(int16_t *base, vuint32m4_t bindex, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_i16m2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i16m2x2(int16_t *rs1, vuint32m4_t vs2, vint16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_i16m2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i16m4x2(int16_t *base, vuint32m8_t bindex, vint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_i16m4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i16m4x2(int16_t *rs1, vuint32m8_t vs2, vint16m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_i16m4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i32mf2x2(int32_t *base, vuint32mf2_t bindex, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_i32mf2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i32mf2x2(int32_t *rs1, vuint32mf2_t vs2, vint32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_i32mf2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i32m1x2(int32_t *base, vuint32m1_t bindex, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_i32m1x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i32m1x2(int32_t *rs1, vuint32m1_t vs2, vint32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_i32m1x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i32m2x2(int32_t *base, vuint32m2_t bindex, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_i32m2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i32m2x2(int32_t *rs1, vuint32m2_t vs2, vint32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_i32m2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i32m4x2(int32_t *base, vuint32m4_t bindex, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_i32m4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i32m4x2(int32_t *rs1, vuint32m4_t vs2, vint32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_i32m4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i64m1x2(int64_t *base, vuint32mf2_t bindex, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_i64m1x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i64m1x2(int64_t *rs1, vuint32mf2_t vs2, vint64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_i64m1x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i64m2x2(int64_t *base, vuint32m1_t bindex, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_i64m2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i64m2x2(int64_t *rs1, vuint32m1_t vs2, vint64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_i64m2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i64m4x2(int64_t *base, vuint32m2_t bindex, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_i64m4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i64m4x2(int64_t *rs1, vuint32m2_t vs2, vint64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_i64m4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u8mf8x2(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_u8mf8x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u8mf8x2(uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_u8mf8x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u8mf4x2(uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_u8mf4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u8mf4x2(uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_u8mf4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u8mf2x2(uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_u8mf2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u8mf2x2(uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_u8mf2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u8m1x2(uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_u8m1x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u8m1x2(uint8_t *rs1, vuint32m4_t vs2, vuint8m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_u8m1x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u8m2x2(uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_u8m2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u8m2x2(uint8_t *rs1, vuint32m8_t vs2, vuint8m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_u8m2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u16mf4x2(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_u16mf4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u16mf4x2(uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_u16mf4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u16mf2x2(uint16_t *base, vuint32m1_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_u16mf2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u16mf2x2(uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_u16mf2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u16m1x2(uint16_t *base, vuint32m2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_u16m1x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u16m1x2(uint16_t *rs1, vuint32m2_t vs2, vuint16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_u16m1x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u16m2x2(uint16_t *base, vuint32m4_t bindex, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_u16m2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u16m2x2(uint16_t *rs1, vuint32m4_t vs2, vuint16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_u16m2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u16m4x2(uint16_t *base, vuint32m8_t bindex, vuint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_u16m4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u16m4x2(uint16_t *rs1, vuint32m8_t vs2, vuint16m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_u16m4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u32mf2x2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_u32mf2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u32mf2x2(uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_u32mf2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u32m1x2(uint32_t *base, vuint32m1_t bindex, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_u32m1x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u32m1x2(uint32_t *rs1, vuint32m1_t vs2, vuint32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_u32m1x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u32m2x2(uint32_t *base, vuint32m2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_u32m2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u32m2x2(uint32_t *rs1, vuint32m2_t vs2, vuint32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_u32m2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u32m4x2(uint32_t *base, vuint32m4_t bindex, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_u32m4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u32m4x2(uint32_t *rs1, vuint32m4_t vs2, vuint32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_u32m4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u64m1x2(uint64_t *base, vuint32mf2_t bindex, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_u64m1x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u64m1x2(uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_u64m1x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u64m2x2(uint64_t *base, vuint32m1_t bindex, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_u64m2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u64m2x2(uint64_t *rs1, vuint32m1_t vs2, vuint64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_u64m2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u64m4x2(uint64_t *base, vuint32m2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_u64m4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u64m4x2(uint64_t *rs1, vuint32m2_t vs2, vuint64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_u64m4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_f16mf4x2_m(vbool64_t mask, float16_t *base, vuint32mf2_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_f16mf4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f16mf4x2_m(vbool64_t vm, float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_f16mf4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_f16mf2x2_m(vbool32_t mask, float16_t *base, vuint32m1_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_f16mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f16mf2x2_m(vbool32_t vm, float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_f16mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_f16m1x2_m(vbool16_t mask, float16_t *base, vuint32m2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_f16m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f16m1x2_m(vbool16_t vm, float16_t *rs1, vuint32m2_t vs2, vfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_f16m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_f16m2x2_m(vbool8_t mask, float16_t *base, vuint32m4_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_f16m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f16m2x2_m(vbool8_t vm, float16_t *rs1, vuint32m4_t vs2, vfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_f16m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_f16m4x2_m(vbool4_t mask, float16_t *base, vuint32m8_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_f16m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f16m4x2_m(vbool4_t vm, float16_t *rs1, vuint32m8_t vs2, vfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_f16m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_f32mf2x2_m(vbool64_t mask, float32_t *base, vuint32mf2_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_f32mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f32mf2x2_m(vbool64_t vm, float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_f32mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_f32m1x2_m(vbool32_t mask, float32_t *base, vuint32m1_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_f32m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f32m1x2_m(vbool32_t vm, float32_t *rs1, vuint32m1_t vs2, vfloat32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_f32m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_f32m2x2_m(vbool16_t mask, float32_t *base, vuint32m2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_f32m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f32m2x2_m(vbool16_t vm, float32_t *rs1, vuint32m2_t vs2, vfloat32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_f32m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_f32m4x2_m(vbool8_t mask, float32_t *base, vuint32m4_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_f32m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f32m4x2_m(vbool8_t vm, float32_t *rs1, vuint32m4_t vs2, vfloat32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_f32m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_f64m1x2_m(vbool64_t mask, float64_t *base, vuint32mf2_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_f64m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f64m1x2_m(vbool64_t vm, float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_f64m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_f64m2x2_m(vbool32_t mask, float64_t *base, vuint32m1_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_f64m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f64m2x2_m(vbool32_t vm, float64_t *rs1, vuint32m1_t vs2, vfloat64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_f64m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_f64m4x2_m(vbool16_t mask, float64_t *base, vuint32m2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_f64m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f64m4x2_m(vbool16_t vm, float64_t *rs1, vuint32m2_t vs2, vfloat64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_f64m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_i8mf8x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i8mf8x2_m(vbool64_t vm, int8_t *rs1, vuint32mf2_t vs2, vint8mf8x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_i8mf8x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_i8mf4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i8mf4x2_m(vbool32_t vm, int8_t *rs1, vuint32m1_t vs2, vint8mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_i8mf4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_i8mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i8mf2x2_m(vbool16_t vm, int8_t *rs1, vuint32m2_t vs2, vint8mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_i8mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_i8m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i8m1x2_m(vbool8_t vm, int8_t *rs1, vuint32m4_t vs2, vint8m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_i8m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_i8m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i8m2x2_m(vbool4_t vm, int8_t *rs1, vuint32m8_t vs2, vint8m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_i8m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_i16mf4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i16mf4x2_m(vbool64_t vm, int16_t *rs1, vuint32mf2_t vs2, vint16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_i16mf4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_i16mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i16mf2x2_m(vbool32_t vm, int16_t *rs1, vuint32m1_t vs2, vint16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_i16mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_i16m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i16m1x2_m(vbool16_t vm, int16_t *rs1, vuint32m2_t vs2, vint16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_i16m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_i16m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i16m2x2_m(vbool8_t vm, int16_t *rs1, vuint32m4_t vs2, vint16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_i16m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_i16m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i16m4x2_m(vbool4_t vm, int16_t *rs1, vuint32m8_t vs2, vint16m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_i16m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_i32mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i32mf2x2_m(vbool64_t vm, int32_t *rs1, vuint32mf2_t vs2, vint32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_i32mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_i32m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i32m1x2_m(vbool32_t vm, int32_t *rs1, vuint32m1_t vs2, vint32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_i32m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_i32m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i32m2x2_m(vbool16_t vm, int32_t *rs1, vuint32m2_t vs2, vint32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_i32m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_i32m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i32m4x2_m(vbool8_t vm, int32_t *rs1, vuint32m4_t vs2, vint32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_i32m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_i64m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i64m1x2_m(vbool64_t vm, int64_t *rs1, vuint32mf2_t vs2, vint64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_i64m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_i64m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i64m2x2_m(vbool32_t vm, int64_t *rs1, vuint32m1_t vs2, vint64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_i64m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_i64m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i64m4x2_m(vbool16_t vm, int64_t *rs1, vuint32m2_t vs2, vint64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_i64m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_u8mf8x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u8mf8x2_m(vbool64_t vm, uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_u8mf8x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_u8mf4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u8mf4x2_m(vbool32_t vm, uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_u8mf4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_u8mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u8mf2x2_m(vbool16_t vm, uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_u8mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_u8m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u8m1x2_m(vbool8_t vm, uint8_t *rs1, vuint32m4_t vs2, vuint8m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_u8m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_u8m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u8m2x2_m(vbool4_t vm, uint8_t *rs1, vuint32m8_t vs2, vuint8m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_u8m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_u16mf4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u16mf4x2_m(vbool64_t vm, uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_u16mf4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_u16mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u16mf2x2_m(vbool32_t vm, uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_u16mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_u16m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u16m1x2_m(vbool16_t vm, uint16_t *rs1, vuint32m2_t vs2, vuint16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_u16m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_u16m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u16m2x2_m(vbool8_t vm, uint16_t *rs1, vuint32m4_t vs2, vuint16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_u16m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_u16m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u16m4x2_m(vbool4_t vm, uint16_t *rs1, vuint32m8_t vs2, vuint16m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_u16m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_u32mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u32mf2x2_m(vbool64_t vm, uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_u32mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_u32m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u32m1x2_m(vbool32_t vm, uint32_t *rs1, vuint32m1_t vs2, vuint32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_u32m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_u32m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u32m2x2_m(vbool16_t vm, uint32_t *rs1, vuint32m2_t vs2, vuint32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_u32m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_u32m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u32m4x2_m(vbool8_t vm, uint32_t *rs1, vuint32m4_t vs2, vuint32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_u32m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_u64m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u64m1x2_m(vbool64_t vm, uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_u64m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_u64m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u64m2x2_m(vbool32_t vm, uint64_t *rs1, vuint32m1_t vs2, vuint64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_u64m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_u64m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u64m4x2_m(vbool16_t vm, uint64_t *rs1, vuint32m2_t vs2, vuint64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32_v_u64m4x2_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg2ei32\.[ivxfswum.]+\s+} 92 } } */ diff --git a/auto-generated/gnu-api-tests/vsuxseg2ei64.c b/auto-generated/gnu-api-tests/vsuxseg2ei64.c index 9c317da15..ef0051b66 100644 --- a/auto-generated/gnu-api-tests/vsuxseg2ei64.c +++ b/auto-generated/gnu-api-tests/vsuxseg2ei64.c @@ -6,332 +6,332 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg2ei64_v_f16mf4x2(float16_t *base, vuint64m1_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_f16mf4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_f16mf4x2(float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_f16mf4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_f16mf2x2(float16_t *base, vuint64m2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_f16mf2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_f16mf2x2(float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_f16mf2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_f16m1x2(float16_t *base, vuint64m4_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_f16m1x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_f16m1x2(float16_t *rs1, vuint64m4_t vs2, vfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_f16m1x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_f16m2x2(float16_t *base, vuint64m8_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_f16m2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_f16m2x2(float16_t *rs1, vuint64m8_t vs2, vfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_f16m2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_f32mf2x2(float32_t *base, vuint64m1_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_f32mf2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_f32mf2x2(float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_f32mf2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_f32m1x2(float32_t *base, vuint64m2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_f32m1x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_f32m1x2(float32_t *rs1, vuint64m2_t vs2, vfloat32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_f32m1x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_f32m2x2(float32_t *base, vuint64m4_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_f32m2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_f32m2x2(float32_t *rs1, vuint64m4_t vs2, vfloat32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_f32m2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_f32m4x2(float32_t *base, vuint64m8_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_f32m4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_f32m4x2(float32_t *rs1, vuint64m8_t vs2, vfloat32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_f32m4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_f64m1x2(float64_t *base, vuint64m1_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_f64m1x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_f64m1x2(float64_t *rs1, vuint64m1_t vs2, vfloat64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_f64m1x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_f64m2x2(float64_t *base, vuint64m2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_f64m2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_f64m2x2(float64_t *rs1, vuint64m2_t vs2, vfloat64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_f64m2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_f64m4x2(float64_t *base, vuint64m4_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_f64m4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_f64m4x2(float64_t *rs1, vuint64m4_t vs2, vfloat64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_f64m4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i8mf8x2(int8_t *base, vuint64m1_t bindex, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_i8mf8x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i8mf8x2(int8_t *rs1, vuint64m1_t vs2, vint8mf8x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_i8mf8x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i8mf4x2(int8_t *base, vuint64m2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_i8mf4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i8mf4x2(int8_t *rs1, vuint64m2_t vs2, vint8mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_i8mf4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i8mf2x2(int8_t *base, vuint64m4_t bindex, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_i8mf2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i8mf2x2(int8_t *rs1, vuint64m4_t vs2, vint8mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_i8mf2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i8m1x2(int8_t *base, vuint64m8_t bindex, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_i8m1x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i8m1x2(int8_t *rs1, vuint64m8_t vs2, vint8m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_i8m1x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i16mf4x2(int16_t *base, vuint64m1_t bindex, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_i16mf4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i16mf4x2(int16_t *rs1, vuint64m1_t vs2, vint16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_i16mf4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i16mf2x2(int16_t *base, vuint64m2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_i16mf2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i16mf2x2(int16_t *rs1, vuint64m2_t vs2, vint16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_i16mf2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i16m1x2(int16_t *base, vuint64m4_t bindex, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_i16m1x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i16m1x2(int16_t *rs1, vuint64m4_t vs2, vint16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_i16m1x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i16m2x2(int16_t *base, vuint64m8_t bindex, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_i16m2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i16m2x2(int16_t *rs1, vuint64m8_t vs2, vint16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_i16m2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i32mf2x2(int32_t *base, vuint64m1_t bindex, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_i32mf2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i32mf2x2(int32_t *rs1, vuint64m1_t vs2, vint32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_i32mf2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i32m1x2(int32_t *base, vuint64m2_t bindex, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_i32m1x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i32m1x2(int32_t *rs1, vuint64m2_t vs2, vint32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_i32m1x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i32m2x2(int32_t *base, vuint64m4_t bindex, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_i32m2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i32m2x2(int32_t *rs1, vuint64m4_t vs2, vint32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_i32m2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i32m4x2(int32_t *base, vuint64m8_t bindex, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_i32m4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i32m4x2(int32_t *rs1, vuint64m8_t vs2, vint32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_i32m4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i64m1x2(int64_t *base, vuint64m1_t bindex, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_i64m1x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i64m1x2(int64_t *rs1, vuint64m1_t vs2, vint64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_i64m1x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i64m2x2(int64_t *base, vuint64m2_t bindex, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_i64m2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i64m2x2(int64_t *rs1, vuint64m2_t vs2, vint64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_i64m2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i64m4x2(int64_t *base, vuint64m4_t bindex, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_i64m4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i64m4x2(int64_t *rs1, vuint64m4_t vs2, vint64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_i64m4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u8mf8x2(uint8_t *base, vuint64m1_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_u8mf8x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u8mf8x2(uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_u8mf8x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u8mf4x2(uint8_t *base, vuint64m2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_u8mf4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u8mf4x2(uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_u8mf4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u8mf2x2(uint8_t *base, vuint64m4_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_u8mf2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u8mf2x2(uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_u8mf2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u8m1x2(uint8_t *base, vuint64m8_t bindex, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_u8m1x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u8m1x2(uint8_t *rs1, vuint64m8_t vs2, vuint8m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_u8m1x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u16mf4x2(uint16_t *base, vuint64m1_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_u16mf4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u16mf4x2(uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_u16mf4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u16mf2x2(uint16_t *base, vuint64m2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_u16mf2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u16mf2x2(uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_u16mf2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u16m1x2(uint16_t *base, vuint64m4_t bindex, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_u16m1x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u16m1x2(uint16_t *rs1, vuint64m4_t vs2, vuint16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_u16m1x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u16m2x2(uint16_t *base, vuint64m8_t bindex, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_u16m2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u16m2x2(uint16_t *rs1, vuint64m8_t vs2, vuint16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_u16m2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u32mf2x2(uint32_t *base, vuint64m1_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_u32mf2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u32mf2x2(uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_u32mf2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u32m1x2(uint32_t *base, vuint64m2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_u32m1x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u32m1x2(uint32_t *rs1, vuint64m2_t vs2, vuint32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_u32m1x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u32m2x2(uint32_t *base, vuint64m4_t bindex, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_u32m2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u32m2x2(uint32_t *rs1, vuint64m4_t vs2, vuint32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_u32m2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u32m4x2(uint32_t *base, vuint64m8_t bindex, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_u32m4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u32m4x2(uint32_t *rs1, vuint64m8_t vs2, vuint32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_u32m4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u64m1x2(uint64_t *base, vuint64m1_t bindex, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_u64m1x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u64m1x2(uint64_t *rs1, vuint64m1_t vs2, vuint64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_u64m1x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u64m2x2(uint64_t *base, vuint64m2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_u64m2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u64m2x2(uint64_t *rs1, vuint64m2_t vs2, vuint64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_u64m2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u64m4x2(uint64_t *base, vuint64m4_t bindex, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_u64m4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u64m4x2(uint64_t *rs1, vuint64m4_t vs2, vuint64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_u64m4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_f16mf4x2_m(vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_f16mf4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_f16mf4x2_m(vbool64_t vm, float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_f16mf4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_f16mf2x2_m(vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_f16mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_f16mf2x2_m(vbool32_t vm, float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_f16mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_f16m1x2_m(vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_f16m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_f16m1x2_m(vbool16_t vm, float16_t *rs1, vuint64m4_t vs2, vfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_f16m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_f16m2x2_m(vbool8_t mask, float16_t *base, vuint64m8_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_f16m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_f16m2x2_m(vbool8_t vm, float16_t *rs1, vuint64m8_t vs2, vfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_f16m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_f32mf2x2_m(vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_f32mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_f32mf2x2_m(vbool64_t vm, float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_f32mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_f32m1x2_m(vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_f32m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_f32m1x2_m(vbool32_t vm, float32_t *rs1, vuint64m2_t vs2, vfloat32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_f32m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_f32m2x2_m(vbool16_t mask, float32_t *base, vuint64m4_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_f32m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_f32m2x2_m(vbool16_t vm, float32_t *rs1, vuint64m4_t vs2, vfloat32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_f32m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_f32m4x2_m(vbool8_t mask, float32_t *base, vuint64m8_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_f32m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_f32m4x2_m(vbool8_t vm, float32_t *rs1, vuint64m8_t vs2, vfloat32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_f32m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_f64m1x2_m(vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_f64m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_f64m1x2_m(vbool64_t vm, float64_t *rs1, vuint64m1_t vs2, vfloat64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_f64m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_f64m2x2_m(vbool32_t mask, float64_t *base, vuint64m2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_f64m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_f64m2x2_m(vbool32_t vm, float64_t *rs1, vuint64m2_t vs2, vfloat64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_f64m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_f64m4x2_m(vbool16_t mask, float64_t *base, vuint64m4_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_f64m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_f64m4x2_m(vbool16_t vm, float64_t *rs1, vuint64m4_t vs2, vfloat64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_f64m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_i8mf8x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i8mf8x2_m(vbool64_t vm, int8_t *rs1, vuint64m1_t vs2, vint8mf8x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_i8mf8x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_i8mf4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i8mf4x2_m(vbool32_t vm, int8_t *rs1, vuint64m2_t vs2, vint8mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_i8mf4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_i8mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i8mf2x2_m(vbool16_t vm, int8_t *rs1, vuint64m4_t vs2, vint8mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_i8mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_i8m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i8m1x2_m(vbool8_t vm, int8_t *rs1, vuint64m8_t vs2, vint8m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_i8m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_i16mf4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i16mf4x2_m(vbool64_t vm, int16_t *rs1, vuint64m1_t vs2, vint16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_i16mf4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_i16mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i16mf2x2_m(vbool32_t vm, int16_t *rs1, vuint64m2_t vs2, vint16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_i16mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_i16m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i16m1x2_m(vbool16_t vm, int16_t *rs1, vuint64m4_t vs2, vint16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_i16m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_i16m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i16m2x2_m(vbool8_t vm, int16_t *rs1, vuint64m8_t vs2, vint16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_i16m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_i32mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i32mf2x2_m(vbool64_t vm, int32_t *rs1, vuint64m1_t vs2, vint32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_i32mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_i32m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i32m1x2_m(vbool32_t vm, int32_t *rs1, vuint64m2_t vs2, vint32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_i32m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_i32m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i32m2x2_m(vbool16_t vm, int32_t *rs1, vuint64m4_t vs2, vint32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_i32m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_i32m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i32m4x2_m(vbool8_t vm, int32_t *rs1, vuint64m8_t vs2, vint32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_i32m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_i64m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i64m1x2_m(vbool64_t vm, int64_t *rs1, vuint64m1_t vs2, vint64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_i64m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_i64m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i64m2x2_m(vbool32_t vm, int64_t *rs1, vuint64m2_t vs2, vint64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_i64m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_i64m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i64m4x2_m(vbool16_t vm, int64_t *rs1, vuint64m4_t vs2, vint64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_i64m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_u8mf8x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u8mf8x2_m(vbool64_t vm, uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_u8mf8x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_u8mf4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u8mf4x2_m(vbool32_t vm, uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_u8mf4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_u8mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u8mf2x2_m(vbool16_t vm, uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_u8mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_u8m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u8m1x2_m(vbool8_t vm, uint8_t *rs1, vuint64m8_t vs2, vuint8m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_u8m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_u16mf4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u16mf4x2_m(vbool64_t vm, uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_u16mf4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_u16mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u16mf2x2_m(vbool32_t vm, uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_u16mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_u16m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u16m1x2_m(vbool16_t vm, uint16_t *rs1, vuint64m4_t vs2, vuint16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_u16m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_u16m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u16m2x2_m(vbool8_t vm, uint16_t *rs1, vuint64m8_t vs2, vuint16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_u16m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_u32mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u32mf2x2_m(vbool64_t vm, uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_u32mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_u32m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u32m1x2_m(vbool32_t vm, uint32_t *rs1, vuint64m2_t vs2, vuint32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_u32m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_u32m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u32m2x2_m(vbool16_t vm, uint32_t *rs1, vuint64m4_t vs2, vuint32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_u32m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_u32m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u32m4x2_m(vbool8_t vm, uint32_t *rs1, vuint64m8_t vs2, vuint32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_u32m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_u64m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u64m1x2_m(vbool64_t vm, uint64_t *rs1, vuint64m1_t vs2, vuint64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_u64m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_u64m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u64m2x2_m(vbool32_t vm, uint64_t *rs1, vuint64m2_t vs2, vuint64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_u64m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64_v_u64m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u64m4x2_m(vbool16_t vm, uint64_t *rs1, vuint64m4_t vs2, vuint64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64_v_u64m4x2_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg2ei64\.[ivxfswum.]+\s+} 82 } } */ diff --git a/auto-generated/gnu-api-tests/vsuxseg2ei8.c b/auto-generated/gnu-api-tests/vsuxseg2ei8.c index 1dd938de4..d2a553845 100644 --- a/auto-generated/gnu-api-tests/vsuxseg2ei8.c +++ b/auto-generated/gnu-api-tests/vsuxseg2ei8.c @@ -6,388 +6,388 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg2ei8_v_f16mf4x2(float16_t *base, vuint8mf8_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_f16mf4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f16mf4x2(float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_f16mf4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_f16mf2x2(float16_t *base, vuint8mf4_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_f16mf2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f16mf2x2(float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_f16mf2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_f16m1x2(float16_t *base, vuint8mf2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_f16m1x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f16m1x2(float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_f16m1x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_f16m2x2(float16_t *base, vuint8m1_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_f16m2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f16m2x2(float16_t *rs1, vuint8m1_t vs2, vfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_f16m2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_f16m4x2(float16_t *base, vuint8m2_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_f16m4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f16m4x2(float16_t *rs1, vuint8m2_t vs2, vfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_f16m4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_f32mf2x2(float32_t *base, vuint8mf8_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_f32mf2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f32mf2x2(float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_f32mf2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_f32m1x2(float32_t *base, vuint8mf4_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_f32m1x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f32m1x2(float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_f32m1x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_f32m2x2(float32_t *base, vuint8mf2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_f32m2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f32m2x2(float32_t *rs1, vuint8mf2_t vs2, vfloat32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_f32m2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_f32m4x2(float32_t *base, vuint8m1_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_f32m4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f32m4x2(float32_t *rs1, vuint8m1_t vs2, vfloat32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_f32m4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_f64m1x2(float64_t *base, vuint8mf8_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_f64m1x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f64m1x2(float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_f64m1x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_f64m2x2(float64_t *base, vuint8mf4_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_f64m2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f64m2x2(float64_t *rs1, vuint8mf4_t vs2, vfloat64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_f64m2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_f64m4x2(float64_t *base, vuint8mf2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_f64m4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f64m4x2(float64_t *rs1, vuint8mf2_t vs2, vfloat64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_f64m4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i8mf8x2(int8_t *base, vuint8mf8_t bindex, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_i8mf8x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i8mf8x2(int8_t *rs1, vuint8mf8_t vs2, vint8mf8x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_i8mf8x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i8mf4x2(int8_t *base, vuint8mf4_t bindex, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_i8mf4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i8mf4x2(int8_t *rs1, vuint8mf4_t vs2, vint8mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_i8mf4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i8mf2x2(int8_t *base, vuint8mf2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_i8mf2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i8mf2x2(int8_t *rs1, vuint8mf2_t vs2, vint8mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_i8mf2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i8m1x2(int8_t *base, vuint8m1_t bindex, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_i8m1x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i8m1x2(int8_t *rs1, vuint8m1_t vs2, vint8m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_i8m1x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i8m2x2(int8_t *base, vuint8m2_t bindex, vint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_i8m2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i8m2x2(int8_t *rs1, vuint8m2_t vs2, vint8m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_i8m2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i8m4x2(int8_t *base, vuint8m4_t bindex, vint8m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_i8m4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i8m4x2(int8_t *rs1, vuint8m4_t vs2, vint8m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_i8m4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i16mf4x2(int16_t *base, vuint8mf8_t bindex, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_i16mf4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i16mf4x2(int16_t *rs1, vuint8mf8_t vs2, vint16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_i16mf4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i16mf2x2(int16_t *base, vuint8mf4_t bindex, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_i16mf2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i16mf2x2(int16_t *rs1, vuint8mf4_t vs2, vint16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_i16mf2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i16m1x2(int16_t *base, vuint8mf2_t bindex, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_i16m1x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i16m1x2(int16_t *rs1, vuint8mf2_t vs2, vint16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_i16m1x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i16m2x2(int16_t *base, vuint8m1_t bindex, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_i16m2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i16m2x2(int16_t *rs1, vuint8m1_t vs2, vint16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_i16m2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i16m4x2(int16_t *base, vuint8m2_t bindex, vint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_i16m4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i16m4x2(int16_t *rs1, vuint8m2_t vs2, vint16m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_i16m4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i32mf2x2(int32_t *base, vuint8mf8_t bindex, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_i32mf2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i32mf2x2(int32_t *rs1, vuint8mf8_t vs2, vint32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_i32mf2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i32m1x2(int32_t *base, vuint8mf4_t bindex, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_i32m1x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i32m1x2(int32_t *rs1, vuint8mf4_t vs2, vint32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_i32m1x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i32m2x2(int32_t *base, vuint8mf2_t bindex, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_i32m2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i32m2x2(int32_t *rs1, vuint8mf2_t vs2, vint32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_i32m2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i32m4x2(int32_t *base, vuint8m1_t bindex, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_i32m4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i32m4x2(int32_t *rs1, vuint8m1_t vs2, vint32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_i32m4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i64m1x2(int64_t *base, vuint8mf8_t bindex, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_i64m1x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i64m1x2(int64_t *rs1, vuint8mf8_t vs2, vint64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_i64m1x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i64m2x2(int64_t *base, vuint8mf4_t bindex, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_i64m2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i64m2x2(int64_t *rs1, vuint8mf4_t vs2, vint64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_i64m2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i64m4x2(int64_t *base, vuint8mf2_t bindex, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_i64m4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i64m4x2(int64_t *rs1, vuint8mf2_t vs2, vint64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_i64m4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u8mf8x2(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_u8mf8x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u8mf8x2(uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_u8mf8x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u8mf4x2(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_u8mf4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u8mf4x2(uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_u8mf4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u8mf2x2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_u8mf2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u8mf2x2(uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_u8mf2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u8m1x2(uint8_t *base, vuint8m1_t bindex, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_u8m1x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u8m1x2(uint8_t *rs1, vuint8m1_t vs2, vuint8m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_u8m1x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u8m2x2(uint8_t *base, vuint8m2_t bindex, vuint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_u8m2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u8m2x2(uint8_t *rs1, vuint8m2_t vs2, vuint8m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_u8m2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u8m4x2(uint8_t *base, vuint8m4_t bindex, vuint8m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_u8m4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u8m4x2(uint8_t *rs1, vuint8m4_t vs2, vuint8m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_u8m4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u16mf4x2(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_u16mf4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u16mf4x2(uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_u16mf4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u16mf2x2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_u16mf2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u16mf2x2(uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_u16mf2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u16m1x2(uint16_t *base, vuint8mf2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_u16m1x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u16m1x2(uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_u16m1x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u16m2x2(uint16_t *base, vuint8m1_t bindex, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_u16m2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u16m2x2(uint16_t *rs1, vuint8m1_t vs2, vuint16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_u16m2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u16m4x2(uint16_t *base, vuint8m2_t bindex, vuint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_u16m4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u16m4x2(uint16_t *rs1, vuint8m2_t vs2, vuint16m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_u16m4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u32mf2x2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_u32mf2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u32mf2x2(uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_u32mf2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u32m1x2(uint32_t *base, vuint8mf4_t bindex, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_u32m1x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u32m1x2(uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_u32m1x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u32m2x2(uint32_t *base, vuint8mf2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_u32m2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u32m2x2(uint32_t *rs1, vuint8mf2_t vs2, vuint32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_u32m2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u32m4x2(uint32_t *base, vuint8m1_t bindex, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_u32m4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u32m4x2(uint32_t *rs1, vuint8m1_t vs2, vuint32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_u32m4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u64m1x2(uint64_t *base, vuint8mf8_t bindex, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_u64m1x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u64m1x2(uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_u64m1x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u64m2x2(uint64_t *base, vuint8mf4_t bindex, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_u64m2x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u64m2x2(uint64_t *rs1, vuint8mf4_t vs2, vuint64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_u64m2x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u64m4x2(uint64_t *base, vuint8mf2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_u64m4x2(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u64m4x2(uint64_t *rs1, vuint8mf2_t vs2, vuint64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_u64m4x2(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_f16mf4x2_m(vbool64_t mask, float16_t *base, vuint8mf8_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_f16mf4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f16mf4x2_m(vbool64_t vm, float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_f16mf4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_f16mf2x2_m(vbool32_t mask, float16_t *base, vuint8mf4_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_f16mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f16mf2x2_m(vbool32_t vm, float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_f16mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_f16m1x2_m(vbool16_t mask, float16_t *base, vuint8mf2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_f16m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f16m1x2_m(vbool16_t vm, float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_f16m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_f16m2x2_m(vbool8_t mask, float16_t *base, vuint8m1_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_f16m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f16m2x2_m(vbool8_t vm, float16_t *rs1, vuint8m1_t vs2, vfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_f16m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_f16m4x2_m(vbool4_t mask, float16_t *base, vuint8m2_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_f16m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f16m4x2_m(vbool4_t vm, float16_t *rs1, vuint8m2_t vs2, vfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_f16m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_f32mf2x2_m(vbool64_t mask, float32_t *base, vuint8mf8_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_f32mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f32mf2x2_m(vbool64_t vm, float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_f32mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_f32m1x2_m(vbool32_t mask, float32_t *base, vuint8mf4_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_f32m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f32m1x2_m(vbool32_t vm, float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_f32m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_f32m2x2_m(vbool16_t mask, float32_t *base, vuint8mf2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_f32m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f32m2x2_m(vbool16_t vm, float32_t *rs1, vuint8mf2_t vs2, vfloat32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_f32m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_f32m4x2_m(vbool8_t mask, float32_t *base, vuint8m1_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_f32m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f32m4x2_m(vbool8_t vm, float32_t *rs1, vuint8m1_t vs2, vfloat32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_f32m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_f64m1x2_m(vbool64_t mask, float64_t *base, vuint8mf8_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_f64m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f64m1x2_m(vbool64_t vm, float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_f64m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_f64m2x2_m(vbool32_t mask, float64_t *base, vuint8mf4_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_f64m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f64m2x2_m(vbool32_t vm, float64_t *rs1, vuint8mf4_t vs2, vfloat64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_f64m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_f64m4x2_m(vbool16_t mask, float64_t *base, vuint8mf2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_f64m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f64m4x2_m(vbool16_t vm, float64_t *rs1, vuint8mf2_t vs2, vfloat64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_f64m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_i8mf8x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i8mf8x2_m(vbool64_t vm, int8_t *rs1, vuint8mf8_t vs2, vint8mf8x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_i8mf8x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_i8mf4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i8mf4x2_m(vbool32_t vm, int8_t *rs1, vuint8mf4_t vs2, vint8mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_i8mf4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_i8mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i8mf2x2_m(vbool16_t vm, int8_t *rs1, vuint8mf2_t vs2, vint8mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_i8mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_i8m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i8m1x2_m(vbool8_t vm, int8_t *rs1, vuint8m1_t vs2, vint8m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_i8m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_i8m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i8m2x2_m(vbool4_t vm, int8_t *rs1, vuint8m2_t vs2, vint8m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_i8m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_i8m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i8m4x2_m(vbool2_t vm, int8_t *rs1, vuint8m4_t vs2, vint8m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_i8m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_i16mf4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i16mf4x2_m(vbool64_t vm, int16_t *rs1, vuint8mf8_t vs2, vint16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_i16mf4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_i16mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i16mf2x2_m(vbool32_t vm, int16_t *rs1, vuint8mf4_t vs2, vint16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_i16mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_i16m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i16m1x2_m(vbool16_t vm, int16_t *rs1, vuint8mf2_t vs2, vint16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_i16m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_i16m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i16m2x2_m(vbool8_t vm, int16_t *rs1, vuint8m1_t vs2, vint16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_i16m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_i16m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i16m4x2_m(vbool4_t vm, int16_t *rs1, vuint8m2_t vs2, vint16m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_i16m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_i32mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i32mf2x2_m(vbool64_t vm, int32_t *rs1, vuint8mf8_t vs2, vint32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_i32mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_i32m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i32m1x2_m(vbool32_t vm, int32_t *rs1, vuint8mf4_t vs2, vint32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_i32m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_i32m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i32m2x2_m(vbool16_t vm, int32_t *rs1, vuint8mf2_t vs2, vint32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_i32m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_i32m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i32m4x2_m(vbool8_t vm, int32_t *rs1, vuint8m1_t vs2, vint32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_i32m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_i64m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i64m1x2_m(vbool64_t vm, int64_t *rs1, vuint8mf8_t vs2, vint64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_i64m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_i64m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i64m2x2_m(vbool32_t vm, int64_t *rs1, vuint8mf4_t vs2, vint64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_i64m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_i64m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i64m4x2_m(vbool16_t vm, int64_t *rs1, vuint8mf2_t vs2, vint64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_i64m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_u8mf8x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u8mf8x2_m(vbool64_t vm, uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_u8mf8x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_u8mf4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u8mf4x2_m(vbool32_t vm, uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_u8mf4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_u8mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u8mf2x2_m(vbool16_t vm, uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_u8mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_u8m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u8m1x2_m(vbool8_t vm, uint8_t *rs1, vuint8m1_t vs2, vuint8m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_u8m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_u8m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u8m2x2_m(vbool4_t vm, uint8_t *rs1, vuint8m2_t vs2, vuint8m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_u8m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_u8m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u8m4x2_m(vbool2_t vm, uint8_t *rs1, vuint8m4_t vs2, vuint8m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_u8m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_u16mf4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u16mf4x2_m(vbool64_t vm, uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_u16mf4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_u16mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u16mf2x2_m(vbool32_t vm, uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_u16mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_u16m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u16m1x2_m(vbool16_t vm, uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_u16m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_u16m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u16m2x2_m(vbool8_t vm, uint16_t *rs1, vuint8m1_t vs2, vuint16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_u16m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_u16m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u16m4x2_m(vbool4_t vm, uint16_t *rs1, vuint8m2_t vs2, vuint16m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_u16m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_u32mf2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u32mf2x2_m(vbool64_t vm, uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_u32mf2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_u32m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u32m1x2_m(vbool32_t vm, uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_u32m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_u32m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u32m2x2_m(vbool16_t vm, uint32_t *rs1, vuint8mf2_t vs2, vuint32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_u32m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_u32m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u32m4x2_m(vbool8_t vm, uint32_t *rs1, vuint8m1_t vs2, vuint32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_u32m4x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_u64m1x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u64m1x2_m(vbool64_t vm, uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_u64m1x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_u64m2x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u64m2x2_m(vbool32_t vm, uint64_t *rs1, vuint8mf4_t vs2, vuint64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_u64m2x2_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8_v_u64m4x2_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u64m4x2_m(vbool16_t vm, uint64_t *rs1, vuint8mf2_t vs2, vuint64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8_v_u64m4x2_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg2ei8\.[ivxfswum.]+\s+} 96 } } */ diff --git a/auto-generated/gnu-api-tests/vsuxseg3ei16.c b/auto-generated/gnu-api-tests/vsuxseg3ei16.c index bbafc6188..f62f91d70 100644 --- a/auto-generated/gnu-api-tests/vsuxseg3ei16.c +++ b/auto-generated/gnu-api-tests/vsuxseg3ei16.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg3ei16_v_f16mf4x3(float16_t *base, vuint16mf4_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_f16mf4x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_f16mf4x3(float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_f16mf4x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_f16mf2x3(float16_t *base, vuint16mf2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_f16mf2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_f16mf2x3(float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_f16mf2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_f16m1x3(float16_t *base, vuint16m1_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_f16m1x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_f16m1x3(float16_t *rs1, vuint16m1_t vs2, vfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_f16m1x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_f16m2x3(float16_t *base, vuint16m2_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_f16m2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_f16m2x3(float16_t *rs1, vuint16m2_t vs2, vfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_f16m2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_f32mf2x3(float32_t *base, vuint16mf4_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_f32mf2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_f32mf2x3(float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_f32mf2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_f32m1x3(float32_t *base, vuint16mf2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_f32m1x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_f32m1x3(float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_f32m1x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_f32m2x3(float32_t *base, vuint16m1_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_f32m2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_f32m2x3(float32_t *rs1, vuint16m1_t vs2, vfloat32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_f32m2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_f64m1x3(float64_t *base, vuint16mf4_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_f64m1x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_f64m1x3(float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_f64m1x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_f64m2x3(float64_t *base, vuint16mf2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_f64m2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_f64m2x3(float64_t *rs1, vuint16mf2_t vs2, vfloat64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_f64m2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i8mf8x3(int8_t *base, vuint16mf4_t bindex, vint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_i8mf8x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i8mf8x3(int8_t *rs1, vuint16mf4_t vs2, vint8mf8x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_i8mf8x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i8mf4x3(int8_t *base, vuint16mf2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_i8mf4x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i8mf4x3(int8_t *rs1, vuint16mf2_t vs2, vint8mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_i8mf4x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i8mf2x3(int8_t *base, vuint16m1_t bindex, vint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_i8mf2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i8mf2x3(int8_t *rs1, vuint16m1_t vs2, vint8mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_i8mf2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i8m1x3(int8_t *base, vuint16m2_t bindex, vint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_i8m1x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i8m1x3(int8_t *rs1, vuint16m2_t vs2, vint8m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_i8m1x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i8m2x3(int8_t *base, vuint16m4_t bindex, vint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_i8m2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i8m2x3(int8_t *rs1, vuint16m4_t vs2, vint8m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_i8m2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i16mf4x3(int16_t *base, vuint16mf4_t bindex, vint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_i16mf4x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i16mf4x3(int16_t *rs1, vuint16mf4_t vs2, vint16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_i16mf4x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i16mf2x3(int16_t *base, vuint16mf2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_i16mf2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i16mf2x3(int16_t *rs1, vuint16mf2_t vs2, vint16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_i16mf2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i16m1x3(int16_t *base, vuint16m1_t bindex, vint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_i16m1x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i16m1x3(int16_t *rs1, vuint16m1_t vs2, vint16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_i16m1x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i16m2x3(int16_t *base, vuint16m2_t bindex, vint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_i16m2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i16m2x3(int16_t *rs1, vuint16m2_t vs2, vint16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_i16m2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i32mf2x3(int32_t *base, vuint16mf4_t bindex, vint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_i32mf2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i32mf2x3(int32_t *rs1, vuint16mf4_t vs2, vint32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_i32mf2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i32m1x3(int32_t *base, vuint16mf2_t bindex, vint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_i32m1x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i32m1x3(int32_t *rs1, vuint16mf2_t vs2, vint32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_i32m1x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i32m2x3(int32_t *base, vuint16m1_t bindex, vint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_i32m2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i32m2x3(int32_t *rs1, vuint16m1_t vs2, vint32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_i32m2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i64m1x3(int64_t *base, vuint16mf4_t bindex, vint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_i64m1x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i64m1x3(int64_t *rs1, vuint16mf4_t vs2, vint64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_i64m1x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i64m2x3(int64_t *base, vuint16mf2_t bindex, vint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_i64m2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i64m2x3(int64_t *rs1, vuint16mf2_t vs2, vint64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_i64m2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u8mf8x3(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_u8mf8x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u8mf8x3(uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_u8mf8x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u8mf4x3(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_u8mf4x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u8mf4x3(uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_u8mf4x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u8mf2x3(uint8_t *base, vuint16m1_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_u8mf2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u8mf2x3(uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_u8mf2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u8m1x3(uint8_t *base, vuint16m2_t bindex, vuint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_u8m1x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u8m1x3(uint8_t *rs1, vuint16m2_t vs2, vuint8m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_u8m1x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u8m2x3(uint8_t *base, vuint16m4_t bindex, vuint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_u8m2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u8m2x3(uint8_t *rs1, vuint16m4_t vs2, vuint8m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_u8m2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u16mf4x3(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_u16mf4x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u16mf4x3(uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_u16mf4x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u16mf2x3(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_u16mf2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u16mf2x3(uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_u16mf2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u16m1x3(uint16_t *base, vuint16m1_t bindex, vuint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_u16m1x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u16m1x3(uint16_t *rs1, vuint16m1_t vs2, vuint16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_u16m1x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u16m2x3(uint16_t *base, vuint16m2_t bindex, vuint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_u16m2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u16m2x3(uint16_t *rs1, vuint16m2_t vs2, vuint16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_u16m2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u32mf2x3(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_u32mf2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u32mf2x3(uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_u32mf2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u32m1x3(uint32_t *base, vuint16mf2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_u32m1x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u32m1x3(uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_u32m1x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u32m2x3(uint32_t *base, vuint16m1_t bindex, vuint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_u32m2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u32m2x3(uint32_t *rs1, vuint16m1_t vs2, vuint32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_u32m2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u64m1x3(uint64_t *base, vuint16mf4_t bindex, vuint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_u64m1x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u64m1x3(uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_u64m1x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u64m2x3(uint64_t *base, vuint16mf2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_u64m2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u64m2x3(uint64_t *rs1, vuint16mf2_t vs2, vuint64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_u64m2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_f16mf4x3_m(vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_f16mf4x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_f16mf4x3_m(vbool64_t vm, float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_f16mf4x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_f16mf2x3_m(vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_f16mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_f16mf2x3_m(vbool32_t vm, float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_f16mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_f16m1x3_m(vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_f16m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_f16m1x3_m(vbool16_t vm, float16_t *rs1, vuint16m1_t vs2, vfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_f16m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_f16m2x3_m(vbool8_t mask, float16_t *base, vuint16m2_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_f16m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_f16m2x3_m(vbool8_t vm, float16_t *rs1, vuint16m2_t vs2, vfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_f16m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_f32mf2x3_m(vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_f32mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_f32mf2x3_m(vbool64_t vm, float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_f32mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_f32m1x3_m(vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_f32m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_f32m1x3_m(vbool32_t vm, float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_f32m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_f32m2x3_m(vbool16_t mask, float32_t *base, vuint16m1_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_f32m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_f32m2x3_m(vbool16_t vm, float32_t *rs1, vuint16m1_t vs2, vfloat32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_f32m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_f64m1x3_m(vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_f64m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_f64m1x3_m(vbool64_t vm, float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_f64m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_f64m2x3_m(vbool32_t mask, float64_t *base, vuint16mf2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_f64m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_f64m2x3_m(vbool32_t vm, float64_t *rs1, vuint16mf2_t vs2, vfloat64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_f64m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_i8mf8x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i8mf8x3_m(vbool64_t vm, int8_t *rs1, vuint16mf4_t vs2, vint8mf8x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_i8mf8x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_i8mf4x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i8mf4x3_m(vbool32_t vm, int8_t *rs1, vuint16mf2_t vs2, vint8mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_i8mf4x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_i8mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i8mf2x3_m(vbool16_t vm, int8_t *rs1, vuint16m1_t vs2, vint8mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_i8mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_i8m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i8m1x3_m(vbool8_t vm, int8_t *rs1, vuint16m2_t vs2, vint8m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_i8m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_i8m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i8m2x3_m(vbool4_t vm, int8_t *rs1, vuint16m4_t vs2, vint8m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_i8m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_i16mf4x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i16mf4x3_m(vbool64_t vm, int16_t *rs1, vuint16mf4_t vs2, vint16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_i16mf4x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_i16mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i16mf2x3_m(vbool32_t vm, int16_t *rs1, vuint16mf2_t vs2, vint16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_i16mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_i16m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i16m1x3_m(vbool16_t vm, int16_t *rs1, vuint16m1_t vs2, vint16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_i16m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_i16m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i16m2x3_m(vbool8_t vm, int16_t *rs1, vuint16m2_t vs2, vint16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_i16m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_i32mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i32mf2x3_m(vbool64_t vm, int32_t *rs1, vuint16mf4_t vs2, vint32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_i32mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_i32m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i32m1x3_m(vbool32_t vm, int32_t *rs1, vuint16mf2_t vs2, vint32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_i32m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_i32m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i32m2x3_m(vbool16_t vm, int32_t *rs1, vuint16m1_t vs2, vint32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_i32m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_i64m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i64m1x3_m(vbool64_t vm, int64_t *rs1, vuint16mf4_t vs2, vint64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_i64m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_i64m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i64m2x3_m(vbool32_t vm, int64_t *rs1, vuint16mf2_t vs2, vint64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_i64m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_u8mf8x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u8mf8x3_m(vbool64_t vm, uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_u8mf8x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_u8mf4x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u8mf4x3_m(vbool32_t vm, uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_u8mf4x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_u8mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u8mf2x3_m(vbool16_t vm, uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_u8mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_u8m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u8m1x3_m(vbool8_t vm, uint8_t *rs1, vuint16m2_t vs2, vuint8m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_u8m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_u8m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u8m2x3_m(vbool4_t vm, uint8_t *rs1, vuint16m4_t vs2, vuint8m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_u8m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_u16mf4x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u16mf4x3_m(vbool64_t vm, uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_u16mf4x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_u16mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u16mf2x3_m(vbool32_t vm, uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_u16mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_u16m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u16m1x3_m(vbool16_t vm, uint16_t *rs1, vuint16m1_t vs2, vuint16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_u16m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_u16m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u16m2x3_m(vbool8_t vm, uint16_t *rs1, vuint16m2_t vs2, vuint16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_u16m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_u32mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u32mf2x3_m(vbool64_t vm, uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_u32mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_u32m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u32m1x3_m(vbool32_t vm, uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_u32m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_u32m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u32m2x3_m(vbool16_t vm, uint32_t *rs1, vuint16m1_t vs2, vuint32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_u32m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_u64m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u64m1x3_m(vbool64_t vm, uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_u64m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16_v_u64m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u64m2x3_m(vbool32_t vm, uint64_t *rs1, vuint16mf2_t vs2, vuint64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_u64m2x3_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg3ei16\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-api-tests/vsuxseg3ei32.c b/auto-generated/gnu-api-tests/vsuxseg3ei32.c index 0eea4e9f6..59f411875 100644 --- a/auto-generated/gnu-api-tests/vsuxseg3ei32.c +++ b/auto-generated/gnu-api-tests/vsuxseg3ei32.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg3ei32_v_f16mf4x3(float16_t *base, vuint32mf2_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_f16mf4x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_f16mf4x3(float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_f16mf4x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_f16mf2x3(float16_t *base, vuint32m1_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_f16mf2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_f16mf2x3(float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_f16mf2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_f16m1x3(float16_t *base, vuint32m2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_f16m1x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_f16m1x3(float16_t *rs1, vuint32m2_t vs2, vfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_f16m1x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_f16m2x3(float16_t *base, vuint32m4_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_f16m2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_f16m2x3(float16_t *rs1, vuint32m4_t vs2, vfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_f16m2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_f32mf2x3(float32_t *base, vuint32mf2_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_f32mf2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_f32mf2x3(float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_f32mf2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_f32m1x3(float32_t *base, vuint32m1_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_f32m1x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_f32m1x3(float32_t *rs1, vuint32m1_t vs2, vfloat32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_f32m1x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_f32m2x3(float32_t *base, vuint32m2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_f32m2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_f32m2x3(float32_t *rs1, vuint32m2_t vs2, vfloat32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_f32m2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_f64m1x3(float64_t *base, vuint32mf2_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_f64m1x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_f64m1x3(float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_f64m1x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_f64m2x3(float64_t *base, vuint32m1_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_f64m2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_f64m2x3(float64_t *rs1, vuint32m1_t vs2, vfloat64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_f64m2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i8mf8x3(int8_t *base, vuint32mf2_t bindex, vint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_i8mf8x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i8mf8x3(int8_t *rs1, vuint32mf2_t vs2, vint8mf8x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_i8mf8x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i8mf4x3(int8_t *base, vuint32m1_t bindex, vint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_i8mf4x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i8mf4x3(int8_t *rs1, vuint32m1_t vs2, vint8mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_i8mf4x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i8mf2x3(int8_t *base, vuint32m2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_i8mf2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i8mf2x3(int8_t *rs1, vuint32m2_t vs2, vint8mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_i8mf2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i8m1x3(int8_t *base, vuint32m4_t bindex, vint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_i8m1x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i8m1x3(int8_t *rs1, vuint32m4_t vs2, vint8m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_i8m1x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i8m2x3(int8_t *base, vuint32m8_t bindex, vint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_i8m2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i8m2x3(int8_t *rs1, vuint32m8_t vs2, vint8m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_i8m2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i16mf4x3(int16_t *base, vuint32mf2_t bindex, vint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_i16mf4x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i16mf4x3(int16_t *rs1, vuint32mf2_t vs2, vint16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_i16mf4x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i16mf2x3(int16_t *base, vuint32m1_t bindex, vint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_i16mf2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i16mf2x3(int16_t *rs1, vuint32m1_t vs2, vint16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_i16mf2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i16m1x3(int16_t *base, vuint32m2_t bindex, vint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_i16m1x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i16m1x3(int16_t *rs1, vuint32m2_t vs2, vint16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_i16m1x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i16m2x3(int16_t *base, vuint32m4_t bindex, vint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_i16m2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i16m2x3(int16_t *rs1, vuint32m4_t vs2, vint16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_i16m2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i32mf2x3(int32_t *base, vuint32mf2_t bindex, vint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_i32mf2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i32mf2x3(int32_t *rs1, vuint32mf2_t vs2, vint32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_i32mf2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i32m1x3(int32_t *base, vuint32m1_t bindex, vint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_i32m1x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i32m1x3(int32_t *rs1, vuint32m1_t vs2, vint32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_i32m1x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i32m2x3(int32_t *base, vuint32m2_t bindex, vint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_i32m2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i32m2x3(int32_t *rs1, vuint32m2_t vs2, vint32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_i32m2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i64m1x3(int64_t *base, vuint32mf2_t bindex, vint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_i64m1x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i64m1x3(int64_t *rs1, vuint32mf2_t vs2, vint64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_i64m1x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i64m2x3(int64_t *base, vuint32m1_t bindex, vint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_i64m2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i64m2x3(int64_t *rs1, vuint32m1_t vs2, vint64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_i64m2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u8mf8x3(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_u8mf8x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u8mf8x3(uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_u8mf8x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u8mf4x3(uint8_t *base, vuint32m1_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_u8mf4x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u8mf4x3(uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_u8mf4x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u8mf2x3(uint8_t *base, vuint32m2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_u8mf2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u8mf2x3(uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_u8mf2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u8m1x3(uint8_t *base, vuint32m4_t bindex, vuint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_u8m1x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u8m1x3(uint8_t *rs1, vuint32m4_t vs2, vuint8m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_u8m1x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u8m2x3(uint8_t *base, vuint32m8_t bindex, vuint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_u8m2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u8m2x3(uint8_t *rs1, vuint32m8_t vs2, vuint8m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_u8m2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u16mf4x3(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_u16mf4x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u16mf4x3(uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_u16mf4x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u16mf2x3(uint16_t *base, vuint32m1_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_u16mf2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u16mf2x3(uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_u16mf2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u16m1x3(uint16_t *base, vuint32m2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_u16m1x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u16m1x3(uint16_t *rs1, vuint32m2_t vs2, vuint16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_u16m1x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u16m2x3(uint16_t *base, vuint32m4_t bindex, vuint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_u16m2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u16m2x3(uint16_t *rs1, vuint32m4_t vs2, vuint16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_u16m2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u32mf2x3(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_u32mf2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u32mf2x3(uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_u32mf2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u32m1x3(uint32_t *base, vuint32m1_t bindex, vuint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_u32m1x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u32m1x3(uint32_t *rs1, vuint32m1_t vs2, vuint32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_u32m1x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u32m2x3(uint32_t *base, vuint32m2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_u32m2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u32m2x3(uint32_t *rs1, vuint32m2_t vs2, vuint32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_u32m2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u64m1x3(uint64_t *base, vuint32mf2_t bindex, vuint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_u64m1x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u64m1x3(uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_u64m1x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u64m2x3(uint64_t *base, vuint32m1_t bindex, vuint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_u64m2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u64m2x3(uint64_t *rs1, vuint32m1_t vs2, vuint64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_u64m2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_f16mf4x3_m(vbool64_t mask, float16_t *base, vuint32mf2_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_f16mf4x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_f16mf4x3_m(vbool64_t vm, float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_f16mf4x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_f16mf2x3_m(vbool32_t mask, float16_t *base, vuint32m1_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_f16mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_f16mf2x3_m(vbool32_t vm, float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_f16mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_f16m1x3_m(vbool16_t mask, float16_t *base, vuint32m2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_f16m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_f16m1x3_m(vbool16_t vm, float16_t *rs1, vuint32m2_t vs2, vfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_f16m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_f16m2x3_m(vbool8_t mask, float16_t *base, vuint32m4_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_f16m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_f16m2x3_m(vbool8_t vm, float16_t *rs1, vuint32m4_t vs2, vfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_f16m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_f32mf2x3_m(vbool64_t mask, float32_t *base, vuint32mf2_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_f32mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_f32mf2x3_m(vbool64_t vm, float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_f32mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_f32m1x3_m(vbool32_t mask, float32_t *base, vuint32m1_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_f32m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_f32m1x3_m(vbool32_t vm, float32_t *rs1, vuint32m1_t vs2, vfloat32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_f32m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_f32m2x3_m(vbool16_t mask, float32_t *base, vuint32m2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_f32m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_f32m2x3_m(vbool16_t vm, float32_t *rs1, vuint32m2_t vs2, vfloat32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_f32m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_f64m1x3_m(vbool64_t mask, float64_t *base, vuint32mf2_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_f64m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_f64m1x3_m(vbool64_t vm, float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_f64m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_f64m2x3_m(vbool32_t mask, float64_t *base, vuint32m1_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_f64m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_f64m2x3_m(vbool32_t vm, float64_t *rs1, vuint32m1_t vs2, vfloat64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_f64m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_i8mf8x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i8mf8x3_m(vbool64_t vm, int8_t *rs1, vuint32mf2_t vs2, vint8mf8x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_i8mf8x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_i8mf4x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i8mf4x3_m(vbool32_t vm, int8_t *rs1, vuint32m1_t vs2, vint8mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_i8mf4x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_i8mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i8mf2x3_m(vbool16_t vm, int8_t *rs1, vuint32m2_t vs2, vint8mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_i8mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_i8m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i8m1x3_m(vbool8_t vm, int8_t *rs1, vuint32m4_t vs2, vint8m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_i8m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_i8m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i8m2x3_m(vbool4_t vm, int8_t *rs1, vuint32m8_t vs2, vint8m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_i8m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_i16mf4x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i16mf4x3_m(vbool64_t vm, int16_t *rs1, vuint32mf2_t vs2, vint16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_i16mf4x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_i16mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i16mf2x3_m(vbool32_t vm, int16_t *rs1, vuint32m1_t vs2, vint16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_i16mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_i16m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i16m1x3_m(vbool16_t vm, int16_t *rs1, vuint32m2_t vs2, vint16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_i16m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_i16m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i16m2x3_m(vbool8_t vm, int16_t *rs1, vuint32m4_t vs2, vint16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_i16m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_i32mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i32mf2x3_m(vbool64_t vm, int32_t *rs1, vuint32mf2_t vs2, vint32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_i32mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_i32m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i32m1x3_m(vbool32_t vm, int32_t *rs1, vuint32m1_t vs2, vint32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_i32m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_i32m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i32m2x3_m(vbool16_t vm, int32_t *rs1, vuint32m2_t vs2, vint32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_i32m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_i64m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i64m1x3_m(vbool64_t vm, int64_t *rs1, vuint32mf2_t vs2, vint64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_i64m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_i64m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i64m2x3_m(vbool32_t vm, int64_t *rs1, vuint32m1_t vs2, vint64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_i64m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_u8mf8x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u8mf8x3_m(vbool64_t vm, uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_u8mf8x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_u8mf4x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u8mf4x3_m(vbool32_t vm, uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_u8mf4x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_u8mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u8mf2x3_m(vbool16_t vm, uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_u8mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_u8m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u8m1x3_m(vbool8_t vm, uint8_t *rs1, vuint32m4_t vs2, vuint8m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_u8m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_u8m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u8m2x3_m(vbool4_t vm, uint8_t *rs1, vuint32m8_t vs2, vuint8m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_u8m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_u16mf4x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u16mf4x3_m(vbool64_t vm, uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_u16mf4x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_u16mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u16mf2x3_m(vbool32_t vm, uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_u16mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_u16m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u16m1x3_m(vbool16_t vm, uint16_t *rs1, vuint32m2_t vs2, vuint16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_u16m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_u16m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u16m2x3_m(vbool8_t vm, uint16_t *rs1, vuint32m4_t vs2, vuint16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_u16m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_u32mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u32mf2x3_m(vbool64_t vm, uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_u32mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_u32m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u32m1x3_m(vbool32_t vm, uint32_t *rs1, vuint32m1_t vs2, vuint32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_u32m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_u32m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u32m2x3_m(vbool16_t vm, uint32_t *rs1, vuint32m2_t vs2, vuint32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_u32m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_u64m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u64m1x3_m(vbool64_t vm, uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_u64m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32_v_u64m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u64m2x3_m(vbool32_t vm, uint64_t *rs1, vuint32m1_t vs2, vuint64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32_v_u64m2x3_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg3ei32\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-api-tests/vsuxseg3ei64.c b/auto-generated/gnu-api-tests/vsuxseg3ei64.c index 6fa771a57..4f1df970a 100644 --- a/auto-generated/gnu-api-tests/vsuxseg3ei64.c +++ b/auto-generated/gnu-api-tests/vsuxseg3ei64.c @@ -6,284 +6,284 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg3ei64_v_f16mf4x3(float16_t *base, vuint64m1_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_f16mf4x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_f16mf4x3(float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_f16mf4x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_f16mf2x3(float16_t *base, vuint64m2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_f16mf2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_f16mf2x3(float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_f16mf2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_f16m1x3(float16_t *base, vuint64m4_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_f16m1x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_f16m1x3(float16_t *rs1, vuint64m4_t vs2, vfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_f16m1x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_f16m2x3(float16_t *base, vuint64m8_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_f16m2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_f16m2x3(float16_t *rs1, vuint64m8_t vs2, vfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_f16m2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_f32mf2x3(float32_t *base, vuint64m1_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_f32mf2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_f32mf2x3(float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_f32mf2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_f32m1x3(float32_t *base, vuint64m2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_f32m1x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_f32m1x3(float32_t *rs1, vuint64m2_t vs2, vfloat32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_f32m1x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_f32m2x3(float32_t *base, vuint64m4_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_f32m2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_f32m2x3(float32_t *rs1, vuint64m4_t vs2, vfloat32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_f32m2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_f64m1x3(float64_t *base, vuint64m1_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_f64m1x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_f64m1x3(float64_t *rs1, vuint64m1_t vs2, vfloat64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_f64m1x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_f64m2x3(float64_t *base, vuint64m2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_f64m2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_f64m2x3(float64_t *rs1, vuint64m2_t vs2, vfloat64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_f64m2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i8mf8x3(int8_t *base, vuint64m1_t bindex, vint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_i8mf8x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i8mf8x3(int8_t *rs1, vuint64m1_t vs2, vint8mf8x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_i8mf8x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i8mf4x3(int8_t *base, vuint64m2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_i8mf4x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i8mf4x3(int8_t *rs1, vuint64m2_t vs2, vint8mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_i8mf4x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i8mf2x3(int8_t *base, vuint64m4_t bindex, vint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_i8mf2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i8mf2x3(int8_t *rs1, vuint64m4_t vs2, vint8mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_i8mf2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i8m1x3(int8_t *base, vuint64m8_t bindex, vint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_i8m1x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i8m1x3(int8_t *rs1, vuint64m8_t vs2, vint8m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_i8m1x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i16mf4x3(int16_t *base, vuint64m1_t bindex, vint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_i16mf4x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i16mf4x3(int16_t *rs1, vuint64m1_t vs2, vint16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_i16mf4x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i16mf2x3(int16_t *base, vuint64m2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_i16mf2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i16mf2x3(int16_t *rs1, vuint64m2_t vs2, vint16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_i16mf2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i16m1x3(int16_t *base, vuint64m4_t bindex, vint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_i16m1x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i16m1x3(int16_t *rs1, vuint64m4_t vs2, vint16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_i16m1x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i16m2x3(int16_t *base, vuint64m8_t bindex, vint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_i16m2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i16m2x3(int16_t *rs1, vuint64m8_t vs2, vint16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_i16m2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i32mf2x3(int32_t *base, vuint64m1_t bindex, vint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_i32mf2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i32mf2x3(int32_t *rs1, vuint64m1_t vs2, vint32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_i32mf2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i32m1x3(int32_t *base, vuint64m2_t bindex, vint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_i32m1x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i32m1x3(int32_t *rs1, vuint64m2_t vs2, vint32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_i32m1x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i32m2x3(int32_t *base, vuint64m4_t bindex, vint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_i32m2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i32m2x3(int32_t *rs1, vuint64m4_t vs2, vint32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_i32m2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i64m1x3(int64_t *base, vuint64m1_t bindex, vint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_i64m1x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i64m1x3(int64_t *rs1, vuint64m1_t vs2, vint64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_i64m1x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i64m2x3(int64_t *base, vuint64m2_t bindex, vint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_i64m2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i64m2x3(int64_t *rs1, vuint64m2_t vs2, vint64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_i64m2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u8mf8x3(uint8_t *base, vuint64m1_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_u8mf8x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u8mf8x3(uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_u8mf8x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u8mf4x3(uint8_t *base, vuint64m2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_u8mf4x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u8mf4x3(uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_u8mf4x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u8mf2x3(uint8_t *base, vuint64m4_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_u8mf2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u8mf2x3(uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_u8mf2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u8m1x3(uint8_t *base, vuint64m8_t bindex, vuint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_u8m1x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u8m1x3(uint8_t *rs1, vuint64m8_t vs2, vuint8m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_u8m1x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u16mf4x3(uint16_t *base, vuint64m1_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_u16mf4x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u16mf4x3(uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_u16mf4x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u16mf2x3(uint16_t *base, vuint64m2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_u16mf2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u16mf2x3(uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_u16mf2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u16m1x3(uint16_t *base, vuint64m4_t bindex, vuint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_u16m1x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u16m1x3(uint16_t *rs1, vuint64m4_t vs2, vuint16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_u16m1x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u16m2x3(uint16_t *base, vuint64m8_t bindex, vuint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_u16m2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u16m2x3(uint16_t *rs1, vuint64m8_t vs2, vuint16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_u16m2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u32mf2x3(uint32_t *base, vuint64m1_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_u32mf2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u32mf2x3(uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_u32mf2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u32m1x3(uint32_t *base, vuint64m2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_u32m1x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u32m1x3(uint32_t *rs1, vuint64m2_t vs2, vuint32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_u32m1x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u32m2x3(uint32_t *base, vuint64m4_t bindex, vuint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_u32m2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u32m2x3(uint32_t *rs1, vuint64m4_t vs2, vuint32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_u32m2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u64m1x3(uint64_t *base, vuint64m1_t bindex, vuint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_u64m1x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u64m1x3(uint64_t *rs1, vuint64m1_t vs2, vuint64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_u64m1x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u64m2x3(uint64_t *base, vuint64m2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_u64m2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u64m2x3(uint64_t *rs1, vuint64m2_t vs2, vuint64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_u64m2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_f16mf4x3_m(vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_f16mf4x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_f16mf4x3_m(vbool64_t vm, float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_f16mf4x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_f16mf2x3_m(vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_f16mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_f16mf2x3_m(vbool32_t vm, float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_f16mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_f16m1x3_m(vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_f16m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_f16m1x3_m(vbool16_t vm, float16_t *rs1, vuint64m4_t vs2, vfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_f16m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_f16m2x3_m(vbool8_t mask, float16_t *base, vuint64m8_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_f16m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_f16m2x3_m(vbool8_t vm, float16_t *rs1, vuint64m8_t vs2, vfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_f16m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_f32mf2x3_m(vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_f32mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_f32mf2x3_m(vbool64_t vm, float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_f32mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_f32m1x3_m(vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_f32m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_f32m1x3_m(vbool32_t vm, float32_t *rs1, vuint64m2_t vs2, vfloat32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_f32m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_f32m2x3_m(vbool16_t mask, float32_t *base, vuint64m4_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_f32m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_f32m2x3_m(vbool16_t vm, float32_t *rs1, vuint64m4_t vs2, vfloat32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_f32m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_f64m1x3_m(vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_f64m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_f64m1x3_m(vbool64_t vm, float64_t *rs1, vuint64m1_t vs2, vfloat64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_f64m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_f64m2x3_m(vbool32_t mask, float64_t *base, vuint64m2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_f64m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_f64m2x3_m(vbool32_t vm, float64_t *rs1, vuint64m2_t vs2, vfloat64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_f64m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_i8mf8x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i8mf8x3_m(vbool64_t vm, int8_t *rs1, vuint64m1_t vs2, vint8mf8x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_i8mf8x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_i8mf4x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i8mf4x3_m(vbool32_t vm, int8_t *rs1, vuint64m2_t vs2, vint8mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_i8mf4x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_i8mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i8mf2x3_m(vbool16_t vm, int8_t *rs1, vuint64m4_t vs2, vint8mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_i8mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_i8m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i8m1x3_m(vbool8_t vm, int8_t *rs1, vuint64m8_t vs2, vint8m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_i8m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_i16mf4x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i16mf4x3_m(vbool64_t vm, int16_t *rs1, vuint64m1_t vs2, vint16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_i16mf4x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_i16mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i16mf2x3_m(vbool32_t vm, int16_t *rs1, vuint64m2_t vs2, vint16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_i16mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_i16m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i16m1x3_m(vbool16_t vm, int16_t *rs1, vuint64m4_t vs2, vint16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_i16m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_i16m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i16m2x3_m(vbool8_t vm, int16_t *rs1, vuint64m8_t vs2, vint16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_i16m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_i32mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i32mf2x3_m(vbool64_t vm, int32_t *rs1, vuint64m1_t vs2, vint32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_i32mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_i32m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i32m1x3_m(vbool32_t vm, int32_t *rs1, vuint64m2_t vs2, vint32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_i32m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_i32m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i32m2x3_m(vbool16_t vm, int32_t *rs1, vuint64m4_t vs2, vint32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_i32m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_i64m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i64m1x3_m(vbool64_t vm, int64_t *rs1, vuint64m1_t vs2, vint64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_i64m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_i64m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i64m2x3_m(vbool32_t vm, int64_t *rs1, vuint64m2_t vs2, vint64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_i64m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_u8mf8x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u8mf8x3_m(vbool64_t vm, uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_u8mf8x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_u8mf4x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u8mf4x3_m(vbool32_t vm, uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_u8mf4x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_u8mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u8mf2x3_m(vbool16_t vm, uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_u8mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_u8m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u8m1x3_m(vbool8_t vm, uint8_t *rs1, vuint64m8_t vs2, vuint8m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_u8m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_u16mf4x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u16mf4x3_m(vbool64_t vm, uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_u16mf4x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_u16mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u16mf2x3_m(vbool32_t vm, uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_u16mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_u16m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u16m1x3_m(vbool16_t vm, uint16_t *rs1, vuint64m4_t vs2, vuint16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_u16m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_u16m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u16m2x3_m(vbool8_t vm, uint16_t *rs1, vuint64m8_t vs2, vuint16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_u16m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_u32mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u32mf2x3_m(vbool64_t vm, uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_u32mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_u32m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u32m1x3_m(vbool32_t vm, uint32_t *rs1, vuint64m2_t vs2, vuint32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_u32m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_u32m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u32m2x3_m(vbool16_t vm, uint32_t *rs1, vuint64m4_t vs2, vuint32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_u32m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_u64m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u64m1x3_m(vbool64_t vm, uint64_t *rs1, vuint64m1_t vs2, vuint64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_u64m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64_v_u64m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u64m2x3_m(vbool32_t vm, uint64_t *rs1, vuint64m2_t vs2, vuint64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64_v_u64m2x3_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg3ei64\.[ivxfswum.]+\s+} 70 } } */ diff --git a/auto-generated/gnu-api-tests/vsuxseg3ei8.c b/auto-generated/gnu-api-tests/vsuxseg3ei8.c index c29dcfc06..f314ec4a1 100644 --- a/auto-generated/gnu-api-tests/vsuxseg3ei8.c +++ b/auto-generated/gnu-api-tests/vsuxseg3ei8.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg3ei8_v_f16mf4x3(float16_t *base, vuint8mf8_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_f16mf4x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_f16mf4x3(float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_f16mf4x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_f16mf2x3(float16_t *base, vuint8mf4_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_f16mf2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_f16mf2x3(float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_f16mf2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_f16m1x3(float16_t *base, vuint8mf2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_f16m1x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_f16m1x3(float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_f16m1x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_f16m2x3(float16_t *base, vuint8m1_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_f16m2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_f16m2x3(float16_t *rs1, vuint8m1_t vs2, vfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_f16m2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_f32mf2x3(float32_t *base, vuint8mf8_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_f32mf2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_f32mf2x3(float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_f32mf2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_f32m1x3(float32_t *base, vuint8mf4_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_f32m1x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_f32m1x3(float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_f32m1x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_f32m2x3(float32_t *base, vuint8mf2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_f32m2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_f32m2x3(float32_t *rs1, vuint8mf2_t vs2, vfloat32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_f32m2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_f64m1x3(float64_t *base, vuint8mf8_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_f64m1x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_f64m1x3(float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_f64m1x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_f64m2x3(float64_t *base, vuint8mf4_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_f64m2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_f64m2x3(float64_t *rs1, vuint8mf4_t vs2, vfloat64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_f64m2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i8mf8x3(int8_t *base, vuint8mf8_t bindex, vint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_i8mf8x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i8mf8x3(int8_t *rs1, vuint8mf8_t vs2, vint8mf8x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_i8mf8x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i8mf4x3(int8_t *base, vuint8mf4_t bindex, vint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_i8mf4x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i8mf4x3(int8_t *rs1, vuint8mf4_t vs2, vint8mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_i8mf4x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i8mf2x3(int8_t *base, vuint8mf2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_i8mf2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i8mf2x3(int8_t *rs1, vuint8mf2_t vs2, vint8mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_i8mf2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i8m1x3(int8_t *base, vuint8m1_t bindex, vint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_i8m1x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i8m1x3(int8_t *rs1, vuint8m1_t vs2, vint8m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_i8m1x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i8m2x3(int8_t *base, vuint8m2_t bindex, vint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_i8m2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i8m2x3(int8_t *rs1, vuint8m2_t vs2, vint8m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_i8m2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i16mf4x3(int16_t *base, vuint8mf8_t bindex, vint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_i16mf4x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i16mf4x3(int16_t *rs1, vuint8mf8_t vs2, vint16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_i16mf4x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i16mf2x3(int16_t *base, vuint8mf4_t bindex, vint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_i16mf2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i16mf2x3(int16_t *rs1, vuint8mf4_t vs2, vint16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_i16mf2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i16m1x3(int16_t *base, vuint8mf2_t bindex, vint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_i16m1x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i16m1x3(int16_t *rs1, vuint8mf2_t vs2, vint16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_i16m1x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i16m2x3(int16_t *base, vuint8m1_t bindex, vint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_i16m2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i16m2x3(int16_t *rs1, vuint8m1_t vs2, vint16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_i16m2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i32mf2x3(int32_t *base, vuint8mf8_t bindex, vint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_i32mf2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i32mf2x3(int32_t *rs1, vuint8mf8_t vs2, vint32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_i32mf2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i32m1x3(int32_t *base, vuint8mf4_t bindex, vint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_i32m1x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i32m1x3(int32_t *rs1, vuint8mf4_t vs2, vint32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_i32m1x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i32m2x3(int32_t *base, vuint8mf2_t bindex, vint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_i32m2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i32m2x3(int32_t *rs1, vuint8mf2_t vs2, vint32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_i32m2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i64m1x3(int64_t *base, vuint8mf8_t bindex, vint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_i64m1x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i64m1x3(int64_t *rs1, vuint8mf8_t vs2, vint64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_i64m1x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i64m2x3(int64_t *base, vuint8mf4_t bindex, vint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_i64m2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i64m2x3(int64_t *rs1, vuint8mf4_t vs2, vint64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_i64m2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u8mf8x3(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_u8mf8x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u8mf8x3(uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_u8mf8x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u8mf4x3(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_u8mf4x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u8mf4x3(uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_u8mf4x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u8mf2x3(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_u8mf2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u8mf2x3(uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_u8mf2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u8m1x3(uint8_t *base, vuint8m1_t bindex, vuint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_u8m1x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u8m1x3(uint8_t *rs1, vuint8m1_t vs2, vuint8m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_u8m1x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u8m2x3(uint8_t *base, vuint8m2_t bindex, vuint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_u8m2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u8m2x3(uint8_t *rs1, vuint8m2_t vs2, vuint8m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_u8m2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u16mf4x3(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_u16mf4x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u16mf4x3(uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_u16mf4x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u16mf2x3(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_u16mf2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u16mf2x3(uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_u16mf2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u16m1x3(uint16_t *base, vuint8mf2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_u16m1x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u16m1x3(uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_u16m1x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u16m2x3(uint16_t *base, vuint8m1_t bindex, vuint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_u16m2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u16m2x3(uint16_t *rs1, vuint8m1_t vs2, vuint16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_u16m2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u32mf2x3(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_u32mf2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u32mf2x3(uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_u32mf2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u32m1x3(uint32_t *base, vuint8mf4_t bindex, vuint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_u32m1x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u32m1x3(uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_u32m1x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u32m2x3(uint32_t *base, vuint8mf2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_u32m2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u32m2x3(uint32_t *rs1, vuint8mf2_t vs2, vuint32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_u32m2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u64m1x3(uint64_t *base, vuint8mf8_t bindex, vuint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_u64m1x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u64m1x3(uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_u64m1x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u64m2x3(uint64_t *base, vuint8mf4_t bindex, vuint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_u64m2x3(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u64m2x3(uint64_t *rs1, vuint8mf4_t vs2, vuint64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_u64m2x3(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_f16mf4x3_m(vbool64_t mask, float16_t *base, vuint8mf8_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_f16mf4x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_f16mf4x3_m(vbool64_t vm, float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_f16mf4x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_f16mf2x3_m(vbool32_t mask, float16_t *base, vuint8mf4_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_f16mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_f16mf2x3_m(vbool32_t vm, float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_f16mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_f16m1x3_m(vbool16_t mask, float16_t *base, vuint8mf2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_f16m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_f16m1x3_m(vbool16_t vm, float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_f16m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_f16m2x3_m(vbool8_t mask, float16_t *base, vuint8m1_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_f16m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_f16m2x3_m(vbool8_t vm, float16_t *rs1, vuint8m1_t vs2, vfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_f16m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_f32mf2x3_m(vbool64_t mask, float32_t *base, vuint8mf8_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_f32mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_f32mf2x3_m(vbool64_t vm, float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_f32mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_f32m1x3_m(vbool32_t mask, float32_t *base, vuint8mf4_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_f32m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_f32m1x3_m(vbool32_t vm, float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_f32m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_f32m2x3_m(vbool16_t mask, float32_t *base, vuint8mf2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_f32m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_f32m2x3_m(vbool16_t vm, float32_t *rs1, vuint8mf2_t vs2, vfloat32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_f32m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_f64m1x3_m(vbool64_t mask, float64_t *base, vuint8mf8_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_f64m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_f64m1x3_m(vbool64_t vm, float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_f64m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_f64m2x3_m(vbool32_t mask, float64_t *base, vuint8mf4_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_f64m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_f64m2x3_m(vbool32_t vm, float64_t *rs1, vuint8mf4_t vs2, vfloat64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_f64m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_i8mf8x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i8mf8x3_m(vbool64_t vm, int8_t *rs1, vuint8mf8_t vs2, vint8mf8x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_i8mf8x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_i8mf4x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i8mf4x3_m(vbool32_t vm, int8_t *rs1, vuint8mf4_t vs2, vint8mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_i8mf4x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_i8mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i8mf2x3_m(vbool16_t vm, int8_t *rs1, vuint8mf2_t vs2, vint8mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_i8mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_i8m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i8m1x3_m(vbool8_t vm, int8_t *rs1, vuint8m1_t vs2, vint8m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_i8m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_i8m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i8m2x3_m(vbool4_t vm, int8_t *rs1, vuint8m2_t vs2, vint8m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_i8m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_i16mf4x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i16mf4x3_m(vbool64_t vm, int16_t *rs1, vuint8mf8_t vs2, vint16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_i16mf4x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_i16mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i16mf2x3_m(vbool32_t vm, int16_t *rs1, vuint8mf4_t vs2, vint16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_i16mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_i16m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i16m1x3_m(vbool16_t vm, int16_t *rs1, vuint8mf2_t vs2, vint16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_i16m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_i16m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i16m2x3_m(vbool8_t vm, int16_t *rs1, vuint8m1_t vs2, vint16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_i16m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_i32mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i32mf2x3_m(vbool64_t vm, int32_t *rs1, vuint8mf8_t vs2, vint32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_i32mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_i32m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i32m1x3_m(vbool32_t vm, int32_t *rs1, vuint8mf4_t vs2, vint32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_i32m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_i32m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i32m2x3_m(vbool16_t vm, int32_t *rs1, vuint8mf2_t vs2, vint32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_i32m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_i64m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i64m1x3_m(vbool64_t vm, int64_t *rs1, vuint8mf8_t vs2, vint64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_i64m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_i64m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i64m2x3_m(vbool32_t vm, int64_t *rs1, vuint8mf4_t vs2, vint64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_i64m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_u8mf8x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u8mf8x3_m(vbool64_t vm, uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_u8mf8x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_u8mf4x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u8mf4x3_m(vbool32_t vm, uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_u8mf4x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_u8mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u8mf2x3_m(vbool16_t vm, uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_u8mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_u8m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u8m1x3_m(vbool8_t vm, uint8_t *rs1, vuint8m1_t vs2, vuint8m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_u8m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_u8m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u8m2x3_m(vbool4_t vm, uint8_t *rs1, vuint8m2_t vs2, vuint8m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_u8m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_u16mf4x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u16mf4x3_m(vbool64_t vm, uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_u16mf4x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_u16mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u16mf2x3_m(vbool32_t vm, uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_u16mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_u16m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u16m1x3_m(vbool16_t vm, uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_u16m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_u16m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u16m2x3_m(vbool8_t vm, uint16_t *rs1, vuint8m1_t vs2, vuint16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_u16m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_u32mf2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u32mf2x3_m(vbool64_t vm, uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_u32mf2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_u32m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u32m1x3_m(vbool32_t vm, uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_u32m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_u32m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u32m2x3_m(vbool16_t vm, uint32_t *rs1, vuint8mf2_t vs2, vuint32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_u32m2x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_u64m1x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u64m1x3_m(vbool64_t vm, uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_u64m1x3_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8_v_u64m2x3_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u64m2x3_m(vbool32_t vm, uint64_t *rs1, vuint8mf4_t vs2, vuint64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8_v_u64m2x3_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg3ei8\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-api-tests/vsuxseg4ei16.c b/auto-generated/gnu-api-tests/vsuxseg4ei16.c index a6caa3bb2..497cd4610 100644 --- a/auto-generated/gnu-api-tests/vsuxseg4ei16.c +++ b/auto-generated/gnu-api-tests/vsuxseg4ei16.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg4ei16_v_f16mf4x4(float16_t *base, vuint16mf4_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_f16mf4x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_f16mf4x4(float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_f16mf4x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_f16mf2x4(float16_t *base, vuint16mf2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_f16mf2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_f16mf2x4(float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_f16mf2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_f16m1x4(float16_t *base, vuint16m1_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_f16m1x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_f16m1x4(float16_t *rs1, vuint16m1_t vs2, vfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_f16m1x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_f16m2x4(float16_t *base, vuint16m2_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_f16m2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_f16m2x4(float16_t *rs1, vuint16m2_t vs2, vfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_f16m2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_f32mf2x4(float32_t *base, vuint16mf4_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_f32mf2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_f32mf2x4(float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_f32mf2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_f32m1x4(float32_t *base, vuint16mf2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_f32m1x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_f32m1x4(float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_f32m1x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_f32m2x4(float32_t *base, vuint16m1_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_f32m2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_f32m2x4(float32_t *rs1, vuint16m1_t vs2, vfloat32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_f32m2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_f64m1x4(float64_t *base, vuint16mf4_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_f64m1x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_f64m1x4(float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_f64m1x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_f64m2x4(float64_t *base, vuint16mf2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_f64m2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_f64m2x4(float64_t *rs1, vuint16mf2_t vs2, vfloat64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_f64m2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i8mf8x4(int8_t *base, vuint16mf4_t bindex, vint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_i8mf8x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i8mf8x4(int8_t *rs1, vuint16mf4_t vs2, vint8mf8x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_i8mf8x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i8mf4x4(int8_t *base, vuint16mf2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_i8mf4x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i8mf4x4(int8_t *rs1, vuint16mf2_t vs2, vint8mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_i8mf4x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i8mf2x4(int8_t *base, vuint16m1_t bindex, vint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_i8mf2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i8mf2x4(int8_t *rs1, vuint16m1_t vs2, vint8mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_i8mf2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i8m1x4(int8_t *base, vuint16m2_t bindex, vint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_i8m1x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i8m1x4(int8_t *rs1, vuint16m2_t vs2, vint8m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_i8m1x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i8m2x4(int8_t *base, vuint16m4_t bindex, vint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_i8m2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i8m2x4(int8_t *rs1, vuint16m4_t vs2, vint8m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_i8m2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i16mf4x4(int16_t *base, vuint16mf4_t bindex, vint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_i16mf4x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i16mf4x4(int16_t *rs1, vuint16mf4_t vs2, vint16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_i16mf4x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i16mf2x4(int16_t *base, vuint16mf2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_i16mf2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i16mf2x4(int16_t *rs1, vuint16mf2_t vs2, vint16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_i16mf2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i16m1x4(int16_t *base, vuint16m1_t bindex, vint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_i16m1x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i16m1x4(int16_t *rs1, vuint16m1_t vs2, vint16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_i16m1x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i16m2x4(int16_t *base, vuint16m2_t bindex, vint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_i16m2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i16m2x4(int16_t *rs1, vuint16m2_t vs2, vint16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_i16m2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i32mf2x4(int32_t *base, vuint16mf4_t bindex, vint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_i32mf2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i32mf2x4(int32_t *rs1, vuint16mf4_t vs2, vint32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_i32mf2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i32m1x4(int32_t *base, vuint16mf2_t bindex, vint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_i32m1x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i32m1x4(int32_t *rs1, vuint16mf2_t vs2, vint32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_i32m1x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i32m2x4(int32_t *base, vuint16m1_t bindex, vint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_i32m2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i32m2x4(int32_t *rs1, vuint16m1_t vs2, vint32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_i32m2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i64m1x4(int64_t *base, vuint16mf4_t bindex, vint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_i64m1x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i64m1x4(int64_t *rs1, vuint16mf4_t vs2, vint64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_i64m1x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i64m2x4(int64_t *base, vuint16mf2_t bindex, vint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_i64m2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i64m2x4(int64_t *rs1, vuint16mf2_t vs2, vint64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_i64m2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u8mf8x4(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_u8mf8x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u8mf8x4(uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_u8mf8x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u8mf4x4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_u8mf4x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u8mf4x4(uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_u8mf4x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u8mf2x4(uint8_t *base, vuint16m1_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_u8mf2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u8mf2x4(uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_u8mf2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u8m1x4(uint8_t *base, vuint16m2_t bindex, vuint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_u8m1x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u8m1x4(uint8_t *rs1, vuint16m2_t vs2, vuint8m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_u8m1x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u8m2x4(uint8_t *base, vuint16m4_t bindex, vuint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_u8m2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u8m2x4(uint8_t *rs1, vuint16m4_t vs2, vuint8m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_u8m2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u16mf4x4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_u16mf4x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u16mf4x4(uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_u16mf4x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u16mf2x4(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_u16mf2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u16mf2x4(uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_u16mf2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u16m1x4(uint16_t *base, vuint16m1_t bindex, vuint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_u16m1x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u16m1x4(uint16_t *rs1, vuint16m1_t vs2, vuint16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_u16m1x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u16m2x4(uint16_t *base, vuint16m2_t bindex, vuint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_u16m2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u16m2x4(uint16_t *rs1, vuint16m2_t vs2, vuint16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_u16m2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u32mf2x4(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_u32mf2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u32mf2x4(uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_u32mf2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u32m1x4(uint32_t *base, vuint16mf2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_u32m1x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u32m1x4(uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_u32m1x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u32m2x4(uint32_t *base, vuint16m1_t bindex, vuint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_u32m2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u32m2x4(uint32_t *rs1, vuint16m1_t vs2, vuint32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_u32m2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u64m1x4(uint64_t *base, vuint16mf4_t bindex, vuint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_u64m1x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u64m1x4(uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_u64m1x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u64m2x4(uint64_t *base, vuint16mf2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_u64m2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u64m2x4(uint64_t *rs1, vuint16mf2_t vs2, vuint64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_u64m2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_f16mf4x4_m(vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_f16mf4x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_f16mf4x4_m(vbool64_t vm, float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_f16mf4x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_f16mf2x4_m(vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_f16mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_f16mf2x4_m(vbool32_t vm, float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_f16mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_f16m1x4_m(vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_f16m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_f16m1x4_m(vbool16_t vm, float16_t *rs1, vuint16m1_t vs2, vfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_f16m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_f16m2x4_m(vbool8_t mask, float16_t *base, vuint16m2_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_f16m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_f16m2x4_m(vbool8_t vm, float16_t *rs1, vuint16m2_t vs2, vfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_f16m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_f32mf2x4_m(vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_f32mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_f32mf2x4_m(vbool64_t vm, float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_f32mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_f32m1x4_m(vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_f32m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_f32m1x4_m(vbool32_t vm, float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_f32m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_f32m2x4_m(vbool16_t mask, float32_t *base, vuint16m1_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_f32m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_f32m2x4_m(vbool16_t vm, float32_t *rs1, vuint16m1_t vs2, vfloat32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_f32m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_f64m1x4_m(vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_f64m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_f64m1x4_m(vbool64_t vm, float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_f64m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_f64m2x4_m(vbool32_t mask, float64_t *base, vuint16mf2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_f64m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_f64m2x4_m(vbool32_t vm, float64_t *rs1, vuint16mf2_t vs2, vfloat64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_f64m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_i8mf8x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i8mf8x4_m(vbool64_t vm, int8_t *rs1, vuint16mf4_t vs2, vint8mf8x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_i8mf8x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_i8mf4x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i8mf4x4_m(vbool32_t vm, int8_t *rs1, vuint16mf2_t vs2, vint8mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_i8mf4x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_i8mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i8mf2x4_m(vbool16_t vm, int8_t *rs1, vuint16m1_t vs2, vint8mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_i8mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_i8m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i8m1x4_m(vbool8_t vm, int8_t *rs1, vuint16m2_t vs2, vint8m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_i8m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_i8m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i8m2x4_m(vbool4_t vm, int8_t *rs1, vuint16m4_t vs2, vint8m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_i8m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_i16mf4x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i16mf4x4_m(vbool64_t vm, int16_t *rs1, vuint16mf4_t vs2, vint16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_i16mf4x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_i16mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i16mf2x4_m(vbool32_t vm, int16_t *rs1, vuint16mf2_t vs2, vint16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_i16mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_i16m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i16m1x4_m(vbool16_t vm, int16_t *rs1, vuint16m1_t vs2, vint16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_i16m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_i16m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i16m2x4_m(vbool8_t vm, int16_t *rs1, vuint16m2_t vs2, vint16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_i16m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_i32mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i32mf2x4_m(vbool64_t vm, int32_t *rs1, vuint16mf4_t vs2, vint32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_i32mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_i32m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i32m1x4_m(vbool32_t vm, int32_t *rs1, vuint16mf2_t vs2, vint32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_i32m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_i32m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i32m2x4_m(vbool16_t vm, int32_t *rs1, vuint16m1_t vs2, vint32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_i32m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_i64m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i64m1x4_m(vbool64_t vm, int64_t *rs1, vuint16mf4_t vs2, vint64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_i64m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_i64m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i64m2x4_m(vbool32_t vm, int64_t *rs1, vuint16mf2_t vs2, vint64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_i64m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_u8mf8x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u8mf8x4_m(vbool64_t vm, uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_u8mf8x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_u8mf4x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u8mf4x4_m(vbool32_t vm, uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_u8mf4x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_u8mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u8mf2x4_m(vbool16_t vm, uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_u8mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_u8m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u8m1x4_m(vbool8_t vm, uint8_t *rs1, vuint16m2_t vs2, vuint8m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_u8m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_u8m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u8m2x4_m(vbool4_t vm, uint8_t *rs1, vuint16m4_t vs2, vuint8m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_u8m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_u16mf4x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u16mf4x4_m(vbool64_t vm, uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_u16mf4x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_u16mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u16mf2x4_m(vbool32_t vm, uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_u16mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_u16m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u16m1x4_m(vbool16_t vm, uint16_t *rs1, vuint16m1_t vs2, vuint16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_u16m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_u16m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u16m2x4_m(vbool8_t vm, uint16_t *rs1, vuint16m2_t vs2, vuint16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_u16m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_u32mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u32mf2x4_m(vbool64_t vm, uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_u32mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_u32m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u32m1x4_m(vbool32_t vm, uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_u32m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_u32m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u32m2x4_m(vbool16_t vm, uint32_t *rs1, vuint16m1_t vs2, vuint32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_u32m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_u64m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u64m1x4_m(vbool64_t vm, uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_u64m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16_v_u64m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u64m2x4_m(vbool32_t vm, uint64_t *rs1, vuint16mf2_t vs2, vuint64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_u64m2x4_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg4ei16\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-api-tests/vsuxseg4ei32.c b/auto-generated/gnu-api-tests/vsuxseg4ei32.c index 1b58d4b3e..39e16539c 100644 --- a/auto-generated/gnu-api-tests/vsuxseg4ei32.c +++ b/auto-generated/gnu-api-tests/vsuxseg4ei32.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg4ei32_v_f16mf4x4(float16_t *base, vuint32mf2_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_f16mf4x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_f16mf4x4(float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_f16mf4x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_f16mf2x4(float16_t *base, vuint32m1_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_f16mf2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_f16mf2x4(float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_f16mf2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_f16m1x4(float16_t *base, vuint32m2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_f16m1x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_f16m1x4(float16_t *rs1, vuint32m2_t vs2, vfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_f16m1x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_f16m2x4(float16_t *base, vuint32m4_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_f16m2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_f16m2x4(float16_t *rs1, vuint32m4_t vs2, vfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_f16m2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_f32mf2x4(float32_t *base, vuint32mf2_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_f32mf2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_f32mf2x4(float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_f32mf2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_f32m1x4(float32_t *base, vuint32m1_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_f32m1x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_f32m1x4(float32_t *rs1, vuint32m1_t vs2, vfloat32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_f32m1x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_f32m2x4(float32_t *base, vuint32m2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_f32m2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_f32m2x4(float32_t *rs1, vuint32m2_t vs2, vfloat32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_f32m2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_f64m1x4(float64_t *base, vuint32mf2_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_f64m1x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_f64m1x4(float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_f64m1x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_f64m2x4(float64_t *base, vuint32m1_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_f64m2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_f64m2x4(float64_t *rs1, vuint32m1_t vs2, vfloat64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_f64m2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i8mf8x4(int8_t *base, vuint32mf2_t bindex, vint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_i8mf8x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i8mf8x4(int8_t *rs1, vuint32mf2_t vs2, vint8mf8x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_i8mf8x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i8mf4x4(int8_t *base, vuint32m1_t bindex, vint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_i8mf4x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i8mf4x4(int8_t *rs1, vuint32m1_t vs2, vint8mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_i8mf4x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i8mf2x4(int8_t *base, vuint32m2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_i8mf2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i8mf2x4(int8_t *rs1, vuint32m2_t vs2, vint8mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_i8mf2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i8m1x4(int8_t *base, vuint32m4_t bindex, vint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_i8m1x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i8m1x4(int8_t *rs1, vuint32m4_t vs2, vint8m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_i8m1x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i8m2x4(int8_t *base, vuint32m8_t bindex, vint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_i8m2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i8m2x4(int8_t *rs1, vuint32m8_t vs2, vint8m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_i8m2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i16mf4x4(int16_t *base, vuint32mf2_t bindex, vint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_i16mf4x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i16mf4x4(int16_t *rs1, vuint32mf2_t vs2, vint16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_i16mf4x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i16mf2x4(int16_t *base, vuint32m1_t bindex, vint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_i16mf2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i16mf2x4(int16_t *rs1, vuint32m1_t vs2, vint16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_i16mf2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i16m1x4(int16_t *base, vuint32m2_t bindex, vint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_i16m1x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i16m1x4(int16_t *rs1, vuint32m2_t vs2, vint16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_i16m1x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i16m2x4(int16_t *base, vuint32m4_t bindex, vint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_i16m2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i16m2x4(int16_t *rs1, vuint32m4_t vs2, vint16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_i16m2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i32mf2x4(int32_t *base, vuint32mf2_t bindex, vint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_i32mf2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i32mf2x4(int32_t *rs1, vuint32mf2_t vs2, vint32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_i32mf2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i32m1x4(int32_t *base, vuint32m1_t bindex, vint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_i32m1x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i32m1x4(int32_t *rs1, vuint32m1_t vs2, vint32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_i32m1x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i32m2x4(int32_t *base, vuint32m2_t bindex, vint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_i32m2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i32m2x4(int32_t *rs1, vuint32m2_t vs2, vint32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_i32m2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i64m1x4(int64_t *base, vuint32mf2_t bindex, vint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_i64m1x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i64m1x4(int64_t *rs1, vuint32mf2_t vs2, vint64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_i64m1x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i64m2x4(int64_t *base, vuint32m1_t bindex, vint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_i64m2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i64m2x4(int64_t *rs1, vuint32m1_t vs2, vint64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_i64m2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u8mf8x4(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_u8mf8x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u8mf8x4(uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_u8mf8x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u8mf4x4(uint8_t *base, vuint32m1_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_u8mf4x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u8mf4x4(uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_u8mf4x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u8mf2x4(uint8_t *base, vuint32m2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_u8mf2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u8mf2x4(uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_u8mf2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u8m1x4(uint8_t *base, vuint32m4_t bindex, vuint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_u8m1x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u8m1x4(uint8_t *rs1, vuint32m4_t vs2, vuint8m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_u8m1x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u8m2x4(uint8_t *base, vuint32m8_t bindex, vuint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_u8m2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u8m2x4(uint8_t *rs1, vuint32m8_t vs2, vuint8m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_u8m2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u16mf4x4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_u16mf4x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u16mf4x4(uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_u16mf4x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u16mf2x4(uint16_t *base, vuint32m1_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_u16mf2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u16mf2x4(uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_u16mf2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u16m1x4(uint16_t *base, vuint32m2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_u16m1x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u16m1x4(uint16_t *rs1, vuint32m2_t vs2, vuint16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_u16m1x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u16m2x4(uint16_t *base, vuint32m4_t bindex, vuint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_u16m2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u16m2x4(uint16_t *rs1, vuint32m4_t vs2, vuint16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_u16m2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u32mf2x4(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_u32mf2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u32mf2x4(uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_u32mf2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u32m1x4(uint32_t *base, vuint32m1_t bindex, vuint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_u32m1x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u32m1x4(uint32_t *rs1, vuint32m1_t vs2, vuint32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_u32m1x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u32m2x4(uint32_t *base, vuint32m2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_u32m2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u32m2x4(uint32_t *rs1, vuint32m2_t vs2, vuint32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_u32m2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u64m1x4(uint64_t *base, vuint32mf2_t bindex, vuint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_u64m1x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u64m1x4(uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_u64m1x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u64m2x4(uint64_t *base, vuint32m1_t bindex, vuint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_u64m2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u64m2x4(uint64_t *rs1, vuint32m1_t vs2, vuint64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_u64m2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_f16mf4x4_m(vbool64_t mask, float16_t *base, vuint32mf2_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_f16mf4x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_f16mf4x4_m(vbool64_t vm, float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_f16mf4x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_f16mf2x4_m(vbool32_t mask, float16_t *base, vuint32m1_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_f16mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_f16mf2x4_m(vbool32_t vm, float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_f16mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_f16m1x4_m(vbool16_t mask, float16_t *base, vuint32m2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_f16m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_f16m1x4_m(vbool16_t vm, float16_t *rs1, vuint32m2_t vs2, vfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_f16m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_f16m2x4_m(vbool8_t mask, float16_t *base, vuint32m4_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_f16m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_f16m2x4_m(vbool8_t vm, float16_t *rs1, vuint32m4_t vs2, vfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_f16m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_f32mf2x4_m(vbool64_t mask, float32_t *base, vuint32mf2_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_f32mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_f32mf2x4_m(vbool64_t vm, float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_f32mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_f32m1x4_m(vbool32_t mask, float32_t *base, vuint32m1_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_f32m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_f32m1x4_m(vbool32_t vm, float32_t *rs1, vuint32m1_t vs2, vfloat32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_f32m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_f32m2x4_m(vbool16_t mask, float32_t *base, vuint32m2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_f32m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_f32m2x4_m(vbool16_t vm, float32_t *rs1, vuint32m2_t vs2, vfloat32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_f32m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_f64m1x4_m(vbool64_t mask, float64_t *base, vuint32mf2_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_f64m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_f64m1x4_m(vbool64_t vm, float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_f64m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_f64m2x4_m(vbool32_t mask, float64_t *base, vuint32m1_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_f64m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_f64m2x4_m(vbool32_t vm, float64_t *rs1, vuint32m1_t vs2, vfloat64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_f64m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_i8mf8x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i8mf8x4_m(vbool64_t vm, int8_t *rs1, vuint32mf2_t vs2, vint8mf8x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_i8mf8x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_i8mf4x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i8mf4x4_m(vbool32_t vm, int8_t *rs1, vuint32m1_t vs2, vint8mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_i8mf4x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_i8mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i8mf2x4_m(vbool16_t vm, int8_t *rs1, vuint32m2_t vs2, vint8mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_i8mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_i8m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i8m1x4_m(vbool8_t vm, int8_t *rs1, vuint32m4_t vs2, vint8m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_i8m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_i8m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i8m2x4_m(vbool4_t vm, int8_t *rs1, vuint32m8_t vs2, vint8m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_i8m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_i16mf4x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i16mf4x4_m(vbool64_t vm, int16_t *rs1, vuint32mf2_t vs2, vint16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_i16mf4x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_i16mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i16mf2x4_m(vbool32_t vm, int16_t *rs1, vuint32m1_t vs2, vint16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_i16mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_i16m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i16m1x4_m(vbool16_t vm, int16_t *rs1, vuint32m2_t vs2, vint16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_i16m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_i16m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i16m2x4_m(vbool8_t vm, int16_t *rs1, vuint32m4_t vs2, vint16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_i16m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_i32mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i32mf2x4_m(vbool64_t vm, int32_t *rs1, vuint32mf2_t vs2, vint32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_i32mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_i32m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i32m1x4_m(vbool32_t vm, int32_t *rs1, vuint32m1_t vs2, vint32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_i32m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_i32m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i32m2x4_m(vbool16_t vm, int32_t *rs1, vuint32m2_t vs2, vint32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_i32m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_i64m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i64m1x4_m(vbool64_t vm, int64_t *rs1, vuint32mf2_t vs2, vint64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_i64m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_i64m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i64m2x4_m(vbool32_t vm, int64_t *rs1, vuint32m1_t vs2, vint64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_i64m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_u8mf8x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u8mf8x4_m(vbool64_t vm, uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_u8mf8x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_u8mf4x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u8mf4x4_m(vbool32_t vm, uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_u8mf4x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_u8mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u8mf2x4_m(vbool16_t vm, uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_u8mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_u8m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u8m1x4_m(vbool8_t vm, uint8_t *rs1, vuint32m4_t vs2, vuint8m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_u8m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_u8m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u8m2x4_m(vbool4_t vm, uint8_t *rs1, vuint32m8_t vs2, vuint8m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_u8m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_u16mf4x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u16mf4x4_m(vbool64_t vm, uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_u16mf4x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_u16mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u16mf2x4_m(vbool32_t vm, uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_u16mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_u16m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u16m1x4_m(vbool16_t vm, uint16_t *rs1, vuint32m2_t vs2, vuint16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_u16m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_u16m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u16m2x4_m(vbool8_t vm, uint16_t *rs1, vuint32m4_t vs2, vuint16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_u16m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_u32mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u32mf2x4_m(vbool64_t vm, uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_u32mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_u32m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u32m1x4_m(vbool32_t vm, uint32_t *rs1, vuint32m1_t vs2, vuint32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_u32m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_u32m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u32m2x4_m(vbool16_t vm, uint32_t *rs1, vuint32m2_t vs2, vuint32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_u32m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_u64m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u64m1x4_m(vbool64_t vm, uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_u64m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32_v_u64m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u64m2x4_m(vbool32_t vm, uint64_t *rs1, vuint32m1_t vs2, vuint64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32_v_u64m2x4_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg4ei32\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-api-tests/vsuxseg4ei64.c b/auto-generated/gnu-api-tests/vsuxseg4ei64.c index 53253d3e9..4f6931914 100644 --- a/auto-generated/gnu-api-tests/vsuxseg4ei64.c +++ b/auto-generated/gnu-api-tests/vsuxseg4ei64.c @@ -6,284 +6,284 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg4ei64_v_f16mf4x4(float16_t *base, vuint64m1_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_f16mf4x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_f16mf4x4(float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_f16mf4x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_f16mf2x4(float16_t *base, vuint64m2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_f16mf2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_f16mf2x4(float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_f16mf2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_f16m1x4(float16_t *base, vuint64m4_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_f16m1x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_f16m1x4(float16_t *rs1, vuint64m4_t vs2, vfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_f16m1x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_f16m2x4(float16_t *base, vuint64m8_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_f16m2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_f16m2x4(float16_t *rs1, vuint64m8_t vs2, vfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_f16m2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_f32mf2x4(float32_t *base, vuint64m1_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_f32mf2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_f32mf2x4(float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_f32mf2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_f32m1x4(float32_t *base, vuint64m2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_f32m1x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_f32m1x4(float32_t *rs1, vuint64m2_t vs2, vfloat32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_f32m1x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_f32m2x4(float32_t *base, vuint64m4_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_f32m2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_f32m2x4(float32_t *rs1, vuint64m4_t vs2, vfloat32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_f32m2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_f64m1x4(float64_t *base, vuint64m1_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_f64m1x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_f64m1x4(float64_t *rs1, vuint64m1_t vs2, vfloat64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_f64m1x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_f64m2x4(float64_t *base, vuint64m2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_f64m2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_f64m2x4(float64_t *rs1, vuint64m2_t vs2, vfloat64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_f64m2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i8mf8x4(int8_t *base, vuint64m1_t bindex, vint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_i8mf8x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i8mf8x4(int8_t *rs1, vuint64m1_t vs2, vint8mf8x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_i8mf8x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i8mf4x4(int8_t *base, vuint64m2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_i8mf4x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i8mf4x4(int8_t *rs1, vuint64m2_t vs2, vint8mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_i8mf4x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i8mf2x4(int8_t *base, vuint64m4_t bindex, vint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_i8mf2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i8mf2x4(int8_t *rs1, vuint64m4_t vs2, vint8mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_i8mf2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i8m1x4(int8_t *base, vuint64m8_t bindex, vint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_i8m1x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i8m1x4(int8_t *rs1, vuint64m8_t vs2, vint8m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_i8m1x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i16mf4x4(int16_t *base, vuint64m1_t bindex, vint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_i16mf4x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i16mf4x4(int16_t *rs1, vuint64m1_t vs2, vint16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_i16mf4x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i16mf2x4(int16_t *base, vuint64m2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_i16mf2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i16mf2x4(int16_t *rs1, vuint64m2_t vs2, vint16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_i16mf2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i16m1x4(int16_t *base, vuint64m4_t bindex, vint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_i16m1x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i16m1x4(int16_t *rs1, vuint64m4_t vs2, vint16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_i16m1x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i16m2x4(int16_t *base, vuint64m8_t bindex, vint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_i16m2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i16m2x4(int16_t *rs1, vuint64m8_t vs2, vint16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_i16m2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i32mf2x4(int32_t *base, vuint64m1_t bindex, vint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_i32mf2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i32mf2x4(int32_t *rs1, vuint64m1_t vs2, vint32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_i32mf2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i32m1x4(int32_t *base, vuint64m2_t bindex, vint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_i32m1x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i32m1x4(int32_t *rs1, vuint64m2_t vs2, vint32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_i32m1x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i32m2x4(int32_t *base, vuint64m4_t bindex, vint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_i32m2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i32m2x4(int32_t *rs1, vuint64m4_t vs2, vint32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_i32m2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i64m1x4(int64_t *base, vuint64m1_t bindex, vint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_i64m1x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i64m1x4(int64_t *rs1, vuint64m1_t vs2, vint64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_i64m1x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i64m2x4(int64_t *base, vuint64m2_t bindex, vint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_i64m2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i64m2x4(int64_t *rs1, vuint64m2_t vs2, vint64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_i64m2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u8mf8x4(uint8_t *base, vuint64m1_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_u8mf8x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u8mf8x4(uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_u8mf8x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u8mf4x4(uint8_t *base, vuint64m2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_u8mf4x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u8mf4x4(uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_u8mf4x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u8mf2x4(uint8_t *base, vuint64m4_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_u8mf2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u8mf2x4(uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_u8mf2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u8m1x4(uint8_t *base, vuint64m8_t bindex, vuint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_u8m1x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u8m1x4(uint8_t *rs1, vuint64m8_t vs2, vuint8m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_u8m1x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u16mf4x4(uint16_t *base, vuint64m1_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_u16mf4x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u16mf4x4(uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_u16mf4x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u16mf2x4(uint16_t *base, vuint64m2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_u16mf2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u16mf2x4(uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_u16mf2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u16m1x4(uint16_t *base, vuint64m4_t bindex, vuint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_u16m1x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u16m1x4(uint16_t *rs1, vuint64m4_t vs2, vuint16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_u16m1x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u16m2x4(uint16_t *base, vuint64m8_t bindex, vuint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_u16m2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u16m2x4(uint16_t *rs1, vuint64m8_t vs2, vuint16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_u16m2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u32mf2x4(uint32_t *base, vuint64m1_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_u32mf2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u32mf2x4(uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_u32mf2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u32m1x4(uint32_t *base, vuint64m2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_u32m1x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u32m1x4(uint32_t *rs1, vuint64m2_t vs2, vuint32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_u32m1x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u32m2x4(uint32_t *base, vuint64m4_t bindex, vuint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_u32m2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u32m2x4(uint32_t *rs1, vuint64m4_t vs2, vuint32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_u32m2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u64m1x4(uint64_t *base, vuint64m1_t bindex, vuint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_u64m1x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u64m1x4(uint64_t *rs1, vuint64m1_t vs2, vuint64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_u64m1x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u64m2x4(uint64_t *base, vuint64m2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_u64m2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u64m2x4(uint64_t *rs1, vuint64m2_t vs2, vuint64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_u64m2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_f16mf4x4_m(vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_f16mf4x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_f16mf4x4_m(vbool64_t vm, float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_f16mf4x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_f16mf2x4_m(vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_f16mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_f16mf2x4_m(vbool32_t vm, float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_f16mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_f16m1x4_m(vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_f16m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_f16m1x4_m(vbool16_t vm, float16_t *rs1, vuint64m4_t vs2, vfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_f16m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_f16m2x4_m(vbool8_t mask, float16_t *base, vuint64m8_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_f16m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_f16m2x4_m(vbool8_t vm, float16_t *rs1, vuint64m8_t vs2, vfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_f16m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_f32mf2x4_m(vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_f32mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_f32mf2x4_m(vbool64_t vm, float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_f32mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_f32m1x4_m(vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_f32m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_f32m1x4_m(vbool32_t vm, float32_t *rs1, vuint64m2_t vs2, vfloat32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_f32m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_f32m2x4_m(vbool16_t mask, float32_t *base, vuint64m4_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_f32m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_f32m2x4_m(vbool16_t vm, float32_t *rs1, vuint64m4_t vs2, vfloat32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_f32m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_f64m1x4_m(vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_f64m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_f64m1x4_m(vbool64_t vm, float64_t *rs1, vuint64m1_t vs2, vfloat64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_f64m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_f64m2x4_m(vbool32_t mask, float64_t *base, vuint64m2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_f64m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_f64m2x4_m(vbool32_t vm, float64_t *rs1, vuint64m2_t vs2, vfloat64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_f64m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_i8mf8x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i8mf8x4_m(vbool64_t vm, int8_t *rs1, vuint64m1_t vs2, vint8mf8x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_i8mf8x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_i8mf4x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i8mf4x4_m(vbool32_t vm, int8_t *rs1, vuint64m2_t vs2, vint8mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_i8mf4x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_i8mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i8mf2x4_m(vbool16_t vm, int8_t *rs1, vuint64m4_t vs2, vint8mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_i8mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_i8m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i8m1x4_m(vbool8_t vm, int8_t *rs1, vuint64m8_t vs2, vint8m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_i8m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_i16mf4x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i16mf4x4_m(vbool64_t vm, int16_t *rs1, vuint64m1_t vs2, vint16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_i16mf4x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_i16mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i16mf2x4_m(vbool32_t vm, int16_t *rs1, vuint64m2_t vs2, vint16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_i16mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_i16m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i16m1x4_m(vbool16_t vm, int16_t *rs1, vuint64m4_t vs2, vint16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_i16m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_i16m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i16m2x4_m(vbool8_t vm, int16_t *rs1, vuint64m8_t vs2, vint16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_i16m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_i32mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i32mf2x4_m(vbool64_t vm, int32_t *rs1, vuint64m1_t vs2, vint32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_i32mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_i32m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i32m1x4_m(vbool32_t vm, int32_t *rs1, vuint64m2_t vs2, vint32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_i32m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_i32m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i32m2x4_m(vbool16_t vm, int32_t *rs1, vuint64m4_t vs2, vint32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_i32m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_i64m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i64m1x4_m(vbool64_t vm, int64_t *rs1, vuint64m1_t vs2, vint64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_i64m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_i64m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i64m2x4_m(vbool32_t vm, int64_t *rs1, vuint64m2_t vs2, vint64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_i64m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_u8mf8x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u8mf8x4_m(vbool64_t vm, uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_u8mf8x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_u8mf4x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u8mf4x4_m(vbool32_t vm, uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_u8mf4x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_u8mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u8mf2x4_m(vbool16_t vm, uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_u8mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_u8m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u8m1x4_m(vbool8_t vm, uint8_t *rs1, vuint64m8_t vs2, vuint8m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_u8m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_u16mf4x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u16mf4x4_m(vbool64_t vm, uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_u16mf4x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_u16mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u16mf2x4_m(vbool32_t vm, uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_u16mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_u16m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u16m1x4_m(vbool16_t vm, uint16_t *rs1, vuint64m4_t vs2, vuint16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_u16m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_u16m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u16m2x4_m(vbool8_t vm, uint16_t *rs1, vuint64m8_t vs2, vuint16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_u16m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_u32mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u32mf2x4_m(vbool64_t vm, uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_u32mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_u32m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u32m1x4_m(vbool32_t vm, uint32_t *rs1, vuint64m2_t vs2, vuint32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_u32m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_u32m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u32m2x4_m(vbool16_t vm, uint32_t *rs1, vuint64m4_t vs2, vuint32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_u32m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_u64m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u64m1x4_m(vbool64_t vm, uint64_t *rs1, vuint64m1_t vs2, vuint64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_u64m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64_v_u64m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u64m2x4_m(vbool32_t vm, uint64_t *rs1, vuint64m2_t vs2, vuint64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64_v_u64m2x4_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg4ei64\.[ivxfswum.]+\s+} 70 } } */ diff --git a/auto-generated/gnu-api-tests/vsuxseg4ei8.c b/auto-generated/gnu-api-tests/vsuxseg4ei8.c index a63cab1ab..3459a250c 100644 --- a/auto-generated/gnu-api-tests/vsuxseg4ei8.c +++ b/auto-generated/gnu-api-tests/vsuxseg4ei8.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg4ei8_v_f16mf4x4(float16_t *base, vuint8mf8_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_f16mf4x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_f16mf4x4(float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_f16mf4x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_f16mf2x4(float16_t *base, vuint8mf4_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_f16mf2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_f16mf2x4(float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_f16mf2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_f16m1x4(float16_t *base, vuint8mf2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_f16m1x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_f16m1x4(float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_f16m1x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_f16m2x4(float16_t *base, vuint8m1_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_f16m2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_f16m2x4(float16_t *rs1, vuint8m1_t vs2, vfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_f16m2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_f32mf2x4(float32_t *base, vuint8mf8_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_f32mf2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_f32mf2x4(float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_f32mf2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_f32m1x4(float32_t *base, vuint8mf4_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_f32m1x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_f32m1x4(float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_f32m1x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_f32m2x4(float32_t *base, vuint8mf2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_f32m2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_f32m2x4(float32_t *rs1, vuint8mf2_t vs2, vfloat32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_f32m2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_f64m1x4(float64_t *base, vuint8mf8_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_f64m1x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_f64m1x4(float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_f64m1x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_f64m2x4(float64_t *base, vuint8mf4_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_f64m2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_f64m2x4(float64_t *rs1, vuint8mf4_t vs2, vfloat64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_f64m2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i8mf8x4(int8_t *base, vuint8mf8_t bindex, vint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_i8mf8x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i8mf8x4(int8_t *rs1, vuint8mf8_t vs2, vint8mf8x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_i8mf8x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i8mf4x4(int8_t *base, vuint8mf4_t bindex, vint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_i8mf4x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i8mf4x4(int8_t *rs1, vuint8mf4_t vs2, vint8mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_i8mf4x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i8mf2x4(int8_t *base, vuint8mf2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_i8mf2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i8mf2x4(int8_t *rs1, vuint8mf2_t vs2, vint8mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_i8mf2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i8m1x4(int8_t *base, vuint8m1_t bindex, vint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_i8m1x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i8m1x4(int8_t *rs1, vuint8m1_t vs2, vint8m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_i8m1x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i8m2x4(int8_t *base, vuint8m2_t bindex, vint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_i8m2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i8m2x4(int8_t *rs1, vuint8m2_t vs2, vint8m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_i8m2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i16mf4x4(int16_t *base, vuint8mf8_t bindex, vint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_i16mf4x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i16mf4x4(int16_t *rs1, vuint8mf8_t vs2, vint16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_i16mf4x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i16mf2x4(int16_t *base, vuint8mf4_t bindex, vint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_i16mf2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i16mf2x4(int16_t *rs1, vuint8mf4_t vs2, vint16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_i16mf2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i16m1x4(int16_t *base, vuint8mf2_t bindex, vint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_i16m1x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i16m1x4(int16_t *rs1, vuint8mf2_t vs2, vint16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_i16m1x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i16m2x4(int16_t *base, vuint8m1_t bindex, vint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_i16m2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i16m2x4(int16_t *rs1, vuint8m1_t vs2, vint16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_i16m2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i32mf2x4(int32_t *base, vuint8mf8_t bindex, vint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_i32mf2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i32mf2x4(int32_t *rs1, vuint8mf8_t vs2, vint32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_i32mf2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i32m1x4(int32_t *base, vuint8mf4_t bindex, vint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_i32m1x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i32m1x4(int32_t *rs1, vuint8mf4_t vs2, vint32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_i32m1x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i32m2x4(int32_t *base, vuint8mf2_t bindex, vint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_i32m2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i32m2x4(int32_t *rs1, vuint8mf2_t vs2, vint32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_i32m2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i64m1x4(int64_t *base, vuint8mf8_t bindex, vint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_i64m1x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i64m1x4(int64_t *rs1, vuint8mf8_t vs2, vint64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_i64m1x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i64m2x4(int64_t *base, vuint8mf4_t bindex, vint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_i64m2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i64m2x4(int64_t *rs1, vuint8mf4_t vs2, vint64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_i64m2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u8mf8x4(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_u8mf8x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u8mf8x4(uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_u8mf8x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u8mf4x4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_u8mf4x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u8mf4x4(uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_u8mf4x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u8mf2x4(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_u8mf2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u8mf2x4(uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_u8mf2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u8m1x4(uint8_t *base, vuint8m1_t bindex, vuint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_u8m1x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u8m1x4(uint8_t *rs1, vuint8m1_t vs2, vuint8m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_u8m1x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u8m2x4(uint8_t *base, vuint8m2_t bindex, vuint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_u8m2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u8m2x4(uint8_t *rs1, vuint8m2_t vs2, vuint8m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_u8m2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u16mf4x4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_u16mf4x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u16mf4x4(uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_u16mf4x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u16mf2x4(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_u16mf2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u16mf2x4(uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_u16mf2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u16m1x4(uint16_t *base, vuint8mf2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_u16m1x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u16m1x4(uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_u16m1x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u16m2x4(uint16_t *base, vuint8m1_t bindex, vuint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_u16m2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u16m2x4(uint16_t *rs1, vuint8m1_t vs2, vuint16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_u16m2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u32mf2x4(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_u32mf2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u32mf2x4(uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_u32mf2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u32m1x4(uint32_t *base, vuint8mf4_t bindex, vuint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_u32m1x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u32m1x4(uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_u32m1x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u32m2x4(uint32_t *base, vuint8mf2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_u32m2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u32m2x4(uint32_t *rs1, vuint8mf2_t vs2, vuint32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_u32m2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u64m1x4(uint64_t *base, vuint8mf8_t bindex, vuint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_u64m1x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u64m1x4(uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_u64m1x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u64m2x4(uint64_t *base, vuint8mf4_t bindex, vuint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_u64m2x4(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u64m2x4(uint64_t *rs1, vuint8mf4_t vs2, vuint64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_u64m2x4(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_f16mf4x4_m(vbool64_t mask, float16_t *base, vuint8mf8_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_f16mf4x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_f16mf4x4_m(vbool64_t vm, float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_f16mf4x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_f16mf2x4_m(vbool32_t mask, float16_t *base, vuint8mf4_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_f16mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_f16mf2x4_m(vbool32_t vm, float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_f16mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_f16m1x4_m(vbool16_t mask, float16_t *base, vuint8mf2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_f16m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_f16m1x4_m(vbool16_t vm, float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_f16m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_f16m2x4_m(vbool8_t mask, float16_t *base, vuint8m1_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_f16m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_f16m2x4_m(vbool8_t vm, float16_t *rs1, vuint8m1_t vs2, vfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_f16m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_f32mf2x4_m(vbool64_t mask, float32_t *base, vuint8mf8_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_f32mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_f32mf2x4_m(vbool64_t vm, float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_f32mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_f32m1x4_m(vbool32_t mask, float32_t *base, vuint8mf4_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_f32m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_f32m1x4_m(vbool32_t vm, float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_f32m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_f32m2x4_m(vbool16_t mask, float32_t *base, vuint8mf2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_f32m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_f32m2x4_m(vbool16_t vm, float32_t *rs1, vuint8mf2_t vs2, vfloat32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_f32m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_f64m1x4_m(vbool64_t mask, float64_t *base, vuint8mf8_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_f64m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_f64m1x4_m(vbool64_t vm, float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_f64m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_f64m2x4_m(vbool32_t mask, float64_t *base, vuint8mf4_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_f64m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_f64m2x4_m(vbool32_t vm, float64_t *rs1, vuint8mf4_t vs2, vfloat64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_f64m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_i8mf8x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i8mf8x4_m(vbool64_t vm, int8_t *rs1, vuint8mf8_t vs2, vint8mf8x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_i8mf8x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_i8mf4x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i8mf4x4_m(vbool32_t vm, int8_t *rs1, vuint8mf4_t vs2, vint8mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_i8mf4x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_i8mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i8mf2x4_m(vbool16_t vm, int8_t *rs1, vuint8mf2_t vs2, vint8mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_i8mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_i8m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i8m1x4_m(vbool8_t vm, int8_t *rs1, vuint8m1_t vs2, vint8m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_i8m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_i8m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i8m2x4_m(vbool4_t vm, int8_t *rs1, vuint8m2_t vs2, vint8m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_i8m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_i16mf4x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i16mf4x4_m(vbool64_t vm, int16_t *rs1, vuint8mf8_t vs2, vint16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_i16mf4x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_i16mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i16mf2x4_m(vbool32_t vm, int16_t *rs1, vuint8mf4_t vs2, vint16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_i16mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_i16m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i16m1x4_m(vbool16_t vm, int16_t *rs1, vuint8mf2_t vs2, vint16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_i16m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_i16m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i16m2x4_m(vbool8_t vm, int16_t *rs1, vuint8m1_t vs2, vint16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_i16m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_i32mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i32mf2x4_m(vbool64_t vm, int32_t *rs1, vuint8mf8_t vs2, vint32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_i32mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_i32m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i32m1x4_m(vbool32_t vm, int32_t *rs1, vuint8mf4_t vs2, vint32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_i32m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_i32m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i32m2x4_m(vbool16_t vm, int32_t *rs1, vuint8mf2_t vs2, vint32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_i32m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_i64m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i64m1x4_m(vbool64_t vm, int64_t *rs1, vuint8mf8_t vs2, vint64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_i64m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_i64m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i64m2x4_m(vbool32_t vm, int64_t *rs1, vuint8mf4_t vs2, vint64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_i64m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_u8mf8x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u8mf8x4_m(vbool64_t vm, uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_u8mf8x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_u8mf4x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u8mf4x4_m(vbool32_t vm, uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_u8mf4x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_u8mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u8mf2x4_m(vbool16_t vm, uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_u8mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_u8m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u8m1x4_m(vbool8_t vm, uint8_t *rs1, vuint8m1_t vs2, vuint8m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_u8m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_u8m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u8m2x4_m(vbool4_t vm, uint8_t *rs1, vuint8m2_t vs2, vuint8m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_u8m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_u16mf4x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u16mf4x4_m(vbool64_t vm, uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_u16mf4x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_u16mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u16mf2x4_m(vbool32_t vm, uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_u16mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_u16m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u16m1x4_m(vbool16_t vm, uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_u16m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_u16m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u16m2x4_m(vbool8_t vm, uint16_t *rs1, vuint8m1_t vs2, vuint16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_u16m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_u32mf2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u32mf2x4_m(vbool64_t vm, uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_u32mf2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_u32m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u32m1x4_m(vbool32_t vm, uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_u32m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_u32m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u32m2x4_m(vbool16_t vm, uint32_t *rs1, vuint8mf2_t vs2, vuint32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_u32m2x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_u64m1x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u64m1x4_m(vbool64_t vm, uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_u64m1x4_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8_v_u64m2x4_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u64m2x4_m(vbool32_t vm, uint64_t *rs1, vuint8mf4_t vs2, vuint64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8_v_u64m2x4_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg4ei8\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-api-tests/vsuxseg5ei16.c b/auto-generated/gnu-api-tests/vsuxseg5ei16.c index dac8b0e1a..e7246f318 100644 --- a/auto-generated/gnu-api-tests/vsuxseg5ei16.c +++ b/auto-generated/gnu-api-tests/vsuxseg5ei16.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg5ei16_v_f16mf4x5(float16_t *base, vuint16mf4_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_f16mf4x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_f16mf4x5(float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_f16mf4x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_f16mf2x5(float16_t *base, vuint16mf2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_f16mf2x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_f16mf2x5(float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_f16mf2x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_f16m1x5(float16_t *base, vuint16m1_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_f16m1x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_f16m1x5(float16_t *rs1, vuint16m1_t vs2, vfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_f16m1x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_f32mf2x5(float32_t *base, vuint16mf4_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_f32mf2x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_f32mf2x5(float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_f32mf2x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_f32m1x5(float32_t *base, vuint16mf2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_f32m1x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_f32m1x5(float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_f32m1x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_f64m1x5(float64_t *base, vuint16mf4_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_f64m1x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_f64m1x5(float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_f64m1x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_i8mf8x5(int8_t *base, vuint16mf4_t bindex, vint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_i8mf8x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_i8mf8x5(int8_t *rs1, vuint16mf4_t vs2, vint8mf8x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_i8mf8x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_i8mf4x5(int8_t *base, vuint16mf2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_i8mf4x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_i8mf4x5(int8_t *rs1, vuint16mf2_t vs2, vint8mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_i8mf4x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_i8mf2x5(int8_t *base, vuint16m1_t bindex, vint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_i8mf2x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_i8mf2x5(int8_t *rs1, vuint16m1_t vs2, vint8mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_i8mf2x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_i8m1x5(int8_t *base, vuint16m2_t bindex, vint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_i8m1x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_i8m1x5(int8_t *rs1, vuint16m2_t vs2, vint8m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_i8m1x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_i16mf4x5(int16_t *base, vuint16mf4_t bindex, vint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_i16mf4x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_i16mf4x5(int16_t *rs1, vuint16mf4_t vs2, vint16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_i16mf4x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_i16mf2x5(int16_t *base, vuint16mf2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_i16mf2x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_i16mf2x5(int16_t *rs1, vuint16mf2_t vs2, vint16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_i16mf2x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_i16m1x5(int16_t *base, vuint16m1_t bindex, vint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_i16m1x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_i16m1x5(int16_t *rs1, vuint16m1_t vs2, vint16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_i16m1x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_i32mf2x5(int32_t *base, vuint16mf4_t bindex, vint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_i32mf2x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_i32mf2x5(int32_t *rs1, vuint16mf4_t vs2, vint32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_i32mf2x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_i32m1x5(int32_t *base, vuint16mf2_t bindex, vint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_i32m1x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_i32m1x5(int32_t *rs1, vuint16mf2_t vs2, vint32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_i32m1x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_i64m1x5(int64_t *base, vuint16mf4_t bindex, vint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_i64m1x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_i64m1x5(int64_t *rs1, vuint16mf4_t vs2, vint64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_i64m1x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_u8mf8x5(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_u8mf8x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_u8mf8x5(uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_u8mf8x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_u8mf4x5(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_u8mf4x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_u8mf4x5(uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_u8mf4x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_u8mf2x5(uint8_t *base, vuint16m1_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_u8mf2x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_u8mf2x5(uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_u8mf2x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_u8m1x5(uint8_t *base, vuint16m2_t bindex, vuint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_u8m1x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_u8m1x5(uint8_t *rs1, vuint16m2_t vs2, vuint8m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_u8m1x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_u16mf4x5(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_u16mf4x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_u16mf4x5(uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_u16mf4x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_u16mf2x5(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_u16mf2x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_u16mf2x5(uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_u16mf2x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_u16m1x5(uint16_t *base, vuint16m1_t bindex, vuint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_u16m1x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_u16m1x5(uint16_t *rs1, vuint16m1_t vs2, vuint16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_u16m1x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_u32mf2x5(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_u32mf2x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_u32mf2x5(uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_u32mf2x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_u32m1x5(uint32_t *base, vuint16mf2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_u32m1x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_u32m1x5(uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_u32m1x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_u64m1x5(uint64_t *base, vuint16mf4_t bindex, vuint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_u64m1x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_u64m1x5(uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_u64m1x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_f16mf4x5_m(vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_f16mf4x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_f16mf4x5_m(vbool64_t vm, float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_f16mf4x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_f16mf2x5_m(vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_f16mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_f16mf2x5_m(vbool32_t vm, float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_f16mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_f16m1x5_m(vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_f16m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_f16m1x5_m(vbool16_t vm, float16_t *rs1, vuint16m1_t vs2, vfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_f16m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_f32mf2x5_m(vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_f32mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_f32mf2x5_m(vbool64_t vm, float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_f32mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_f32m1x5_m(vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_f32m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_f32m1x5_m(vbool32_t vm, float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_f32m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_f64m1x5_m(vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_f64m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_f64m1x5_m(vbool64_t vm, float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_f64m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_i8mf8x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_i8mf8x5_m(vbool64_t vm, int8_t *rs1, vuint16mf4_t vs2, vint8mf8x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_i8mf8x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_i8mf4x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_i8mf4x5_m(vbool32_t vm, int8_t *rs1, vuint16mf2_t vs2, vint8mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_i8mf4x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_i8mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_i8mf2x5_m(vbool16_t vm, int8_t *rs1, vuint16m1_t vs2, vint8mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_i8mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_i8m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_i8m1x5_m(vbool8_t vm, int8_t *rs1, vuint16m2_t vs2, vint8m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_i8m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_i16mf4x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_i16mf4x5_m(vbool64_t vm, int16_t *rs1, vuint16mf4_t vs2, vint16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_i16mf4x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_i16mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_i16mf2x5_m(vbool32_t vm, int16_t *rs1, vuint16mf2_t vs2, vint16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_i16mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_i16m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_i16m1x5_m(vbool16_t vm, int16_t *rs1, vuint16m1_t vs2, vint16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_i16m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_i32mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_i32mf2x5_m(vbool64_t vm, int32_t *rs1, vuint16mf4_t vs2, vint32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_i32mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_i32m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_i32m1x5_m(vbool32_t vm, int32_t *rs1, vuint16mf2_t vs2, vint32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_i32m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_i64m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_i64m1x5_m(vbool64_t vm, int64_t *rs1, vuint16mf4_t vs2, vint64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_i64m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_u8mf8x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_u8mf8x5_m(vbool64_t vm, uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_u8mf8x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_u8mf4x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_u8mf4x5_m(vbool32_t vm, uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_u8mf4x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_u8mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_u8mf2x5_m(vbool16_t vm, uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_u8mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_u8m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_u8m1x5_m(vbool8_t vm, uint8_t *rs1, vuint16m2_t vs2, vuint8m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_u8m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_u16mf4x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_u16mf4x5_m(vbool64_t vm, uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_u16mf4x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_u16mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_u16mf2x5_m(vbool32_t vm, uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_u16mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_u16m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_u16m1x5_m(vbool16_t vm, uint16_t *rs1, vuint16m1_t vs2, vuint16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_u16m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_u32mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_u32mf2x5_m(vbool64_t vm, uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_u32mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_u32m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_u32m1x5_m(vbool32_t vm, uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_u32m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16_v_u64m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_u64m1x5_m(vbool64_t vm, uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_u64m1x5_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg5ei16\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vsuxseg5ei32.c b/auto-generated/gnu-api-tests/vsuxseg5ei32.c index 524e904b3..40b9526bc 100644 --- a/auto-generated/gnu-api-tests/vsuxseg5ei32.c +++ b/auto-generated/gnu-api-tests/vsuxseg5ei32.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg5ei32_v_f16mf4x5(float16_t *base, vuint32mf2_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_f16mf4x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_f16mf4x5(float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_f16mf4x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_f16mf2x5(float16_t *base, vuint32m1_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_f16mf2x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_f16mf2x5(float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_f16mf2x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_f16m1x5(float16_t *base, vuint32m2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_f16m1x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_f16m1x5(float16_t *rs1, vuint32m2_t vs2, vfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_f16m1x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_f32mf2x5(float32_t *base, vuint32mf2_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_f32mf2x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_f32mf2x5(float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_f32mf2x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_f32m1x5(float32_t *base, vuint32m1_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_f32m1x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_f32m1x5(float32_t *rs1, vuint32m1_t vs2, vfloat32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_f32m1x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_f64m1x5(float64_t *base, vuint32mf2_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_f64m1x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_f64m1x5(float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_f64m1x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_i8mf8x5(int8_t *base, vuint32mf2_t bindex, vint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_i8mf8x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_i8mf8x5(int8_t *rs1, vuint32mf2_t vs2, vint8mf8x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_i8mf8x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_i8mf4x5(int8_t *base, vuint32m1_t bindex, vint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_i8mf4x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_i8mf4x5(int8_t *rs1, vuint32m1_t vs2, vint8mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_i8mf4x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_i8mf2x5(int8_t *base, vuint32m2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_i8mf2x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_i8mf2x5(int8_t *rs1, vuint32m2_t vs2, vint8mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_i8mf2x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_i8m1x5(int8_t *base, vuint32m4_t bindex, vint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_i8m1x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_i8m1x5(int8_t *rs1, vuint32m4_t vs2, vint8m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_i8m1x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_i16mf4x5(int16_t *base, vuint32mf2_t bindex, vint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_i16mf4x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_i16mf4x5(int16_t *rs1, vuint32mf2_t vs2, vint16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_i16mf4x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_i16mf2x5(int16_t *base, vuint32m1_t bindex, vint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_i16mf2x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_i16mf2x5(int16_t *rs1, vuint32m1_t vs2, vint16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_i16mf2x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_i16m1x5(int16_t *base, vuint32m2_t bindex, vint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_i16m1x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_i16m1x5(int16_t *rs1, vuint32m2_t vs2, vint16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_i16m1x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_i32mf2x5(int32_t *base, vuint32mf2_t bindex, vint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_i32mf2x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_i32mf2x5(int32_t *rs1, vuint32mf2_t vs2, vint32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_i32mf2x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_i32m1x5(int32_t *base, vuint32m1_t bindex, vint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_i32m1x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_i32m1x5(int32_t *rs1, vuint32m1_t vs2, vint32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_i32m1x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_i64m1x5(int64_t *base, vuint32mf2_t bindex, vint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_i64m1x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_i64m1x5(int64_t *rs1, vuint32mf2_t vs2, vint64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_i64m1x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_u8mf8x5(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_u8mf8x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_u8mf8x5(uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_u8mf8x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_u8mf4x5(uint8_t *base, vuint32m1_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_u8mf4x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_u8mf4x5(uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_u8mf4x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_u8mf2x5(uint8_t *base, vuint32m2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_u8mf2x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_u8mf2x5(uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_u8mf2x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_u8m1x5(uint8_t *base, vuint32m4_t bindex, vuint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_u8m1x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_u8m1x5(uint8_t *rs1, vuint32m4_t vs2, vuint8m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_u8m1x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_u16mf4x5(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_u16mf4x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_u16mf4x5(uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_u16mf4x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_u16mf2x5(uint16_t *base, vuint32m1_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_u16mf2x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_u16mf2x5(uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_u16mf2x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_u16m1x5(uint16_t *base, vuint32m2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_u16m1x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_u16m1x5(uint16_t *rs1, vuint32m2_t vs2, vuint16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_u16m1x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_u32mf2x5(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_u32mf2x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_u32mf2x5(uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_u32mf2x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_u32m1x5(uint32_t *base, vuint32m1_t bindex, vuint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_u32m1x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_u32m1x5(uint32_t *rs1, vuint32m1_t vs2, vuint32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_u32m1x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_u64m1x5(uint64_t *base, vuint32mf2_t bindex, vuint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_u64m1x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_u64m1x5(uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_u64m1x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_f16mf4x5_m(vbool64_t mask, float16_t *base, vuint32mf2_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_f16mf4x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_f16mf4x5_m(vbool64_t vm, float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_f16mf4x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_f16mf2x5_m(vbool32_t mask, float16_t *base, vuint32m1_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_f16mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_f16mf2x5_m(vbool32_t vm, float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_f16mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_f16m1x5_m(vbool16_t mask, float16_t *base, vuint32m2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_f16m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_f16m1x5_m(vbool16_t vm, float16_t *rs1, vuint32m2_t vs2, vfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_f16m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_f32mf2x5_m(vbool64_t mask, float32_t *base, vuint32mf2_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_f32mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_f32mf2x5_m(vbool64_t vm, float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_f32mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_f32m1x5_m(vbool32_t mask, float32_t *base, vuint32m1_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_f32m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_f32m1x5_m(vbool32_t vm, float32_t *rs1, vuint32m1_t vs2, vfloat32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_f32m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_f64m1x5_m(vbool64_t mask, float64_t *base, vuint32mf2_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_f64m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_f64m1x5_m(vbool64_t vm, float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_f64m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_i8mf8x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_i8mf8x5_m(vbool64_t vm, int8_t *rs1, vuint32mf2_t vs2, vint8mf8x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_i8mf8x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_i8mf4x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_i8mf4x5_m(vbool32_t vm, int8_t *rs1, vuint32m1_t vs2, vint8mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_i8mf4x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_i8mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_i8mf2x5_m(vbool16_t vm, int8_t *rs1, vuint32m2_t vs2, vint8mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_i8mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_i8m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_i8m1x5_m(vbool8_t vm, int8_t *rs1, vuint32m4_t vs2, vint8m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_i8m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_i16mf4x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_i16mf4x5_m(vbool64_t vm, int16_t *rs1, vuint32mf2_t vs2, vint16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_i16mf4x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_i16mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_i16mf2x5_m(vbool32_t vm, int16_t *rs1, vuint32m1_t vs2, vint16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_i16mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_i16m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_i16m1x5_m(vbool16_t vm, int16_t *rs1, vuint32m2_t vs2, vint16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_i16m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_i32mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_i32mf2x5_m(vbool64_t vm, int32_t *rs1, vuint32mf2_t vs2, vint32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_i32mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_i32m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_i32m1x5_m(vbool32_t vm, int32_t *rs1, vuint32m1_t vs2, vint32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_i32m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_i64m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_i64m1x5_m(vbool64_t vm, int64_t *rs1, vuint32mf2_t vs2, vint64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_i64m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_u8mf8x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_u8mf8x5_m(vbool64_t vm, uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_u8mf8x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_u8mf4x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_u8mf4x5_m(vbool32_t vm, uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_u8mf4x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_u8mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_u8mf2x5_m(vbool16_t vm, uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_u8mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_u8m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_u8m1x5_m(vbool8_t vm, uint8_t *rs1, vuint32m4_t vs2, vuint8m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_u8m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_u16mf4x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_u16mf4x5_m(vbool64_t vm, uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_u16mf4x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_u16mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_u16mf2x5_m(vbool32_t vm, uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_u16mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_u16m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_u16m1x5_m(vbool16_t vm, uint16_t *rs1, vuint32m2_t vs2, vuint16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_u16m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_u32mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_u32mf2x5_m(vbool64_t vm, uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_u32mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_u32m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_u32m1x5_m(vbool32_t vm, uint32_t *rs1, vuint32m1_t vs2, vuint32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_u32m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32_v_u64m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_u64m1x5_m(vbool64_t vm, uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32_v_u64m1x5_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg5ei32\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vsuxseg5ei64.c b/auto-generated/gnu-api-tests/vsuxseg5ei64.c index ac6e5c559..c302374a3 100644 --- a/auto-generated/gnu-api-tests/vsuxseg5ei64.c +++ b/auto-generated/gnu-api-tests/vsuxseg5ei64.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg5ei64_v_f16mf4x5(float16_t *base, vuint64m1_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_f16mf4x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_f16mf4x5(float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_f16mf4x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_f16mf2x5(float16_t *base, vuint64m2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_f16mf2x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_f16mf2x5(float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_f16mf2x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_f16m1x5(float16_t *base, vuint64m4_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_f16m1x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_f16m1x5(float16_t *rs1, vuint64m4_t vs2, vfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_f16m1x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_f32mf2x5(float32_t *base, vuint64m1_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_f32mf2x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_f32mf2x5(float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_f32mf2x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_f32m1x5(float32_t *base, vuint64m2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_f32m1x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_f32m1x5(float32_t *rs1, vuint64m2_t vs2, vfloat32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_f32m1x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_f64m1x5(float64_t *base, vuint64m1_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_f64m1x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_f64m1x5(float64_t *rs1, vuint64m1_t vs2, vfloat64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_f64m1x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_i8mf8x5(int8_t *base, vuint64m1_t bindex, vint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_i8mf8x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_i8mf8x5(int8_t *rs1, vuint64m1_t vs2, vint8mf8x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_i8mf8x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_i8mf4x5(int8_t *base, vuint64m2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_i8mf4x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_i8mf4x5(int8_t *rs1, vuint64m2_t vs2, vint8mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_i8mf4x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_i8mf2x5(int8_t *base, vuint64m4_t bindex, vint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_i8mf2x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_i8mf2x5(int8_t *rs1, vuint64m4_t vs2, vint8mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_i8mf2x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_i8m1x5(int8_t *base, vuint64m8_t bindex, vint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_i8m1x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_i8m1x5(int8_t *rs1, vuint64m8_t vs2, vint8m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_i8m1x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_i16mf4x5(int16_t *base, vuint64m1_t bindex, vint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_i16mf4x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_i16mf4x5(int16_t *rs1, vuint64m1_t vs2, vint16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_i16mf4x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_i16mf2x5(int16_t *base, vuint64m2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_i16mf2x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_i16mf2x5(int16_t *rs1, vuint64m2_t vs2, vint16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_i16mf2x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_i16m1x5(int16_t *base, vuint64m4_t bindex, vint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_i16m1x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_i16m1x5(int16_t *rs1, vuint64m4_t vs2, vint16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_i16m1x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_i32mf2x5(int32_t *base, vuint64m1_t bindex, vint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_i32mf2x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_i32mf2x5(int32_t *rs1, vuint64m1_t vs2, vint32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_i32mf2x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_i32m1x5(int32_t *base, vuint64m2_t bindex, vint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_i32m1x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_i32m1x5(int32_t *rs1, vuint64m2_t vs2, vint32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_i32m1x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_i64m1x5(int64_t *base, vuint64m1_t bindex, vint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_i64m1x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_i64m1x5(int64_t *rs1, vuint64m1_t vs2, vint64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_i64m1x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_u8mf8x5(uint8_t *base, vuint64m1_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_u8mf8x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_u8mf8x5(uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_u8mf8x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_u8mf4x5(uint8_t *base, vuint64m2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_u8mf4x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_u8mf4x5(uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_u8mf4x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_u8mf2x5(uint8_t *base, vuint64m4_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_u8mf2x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_u8mf2x5(uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_u8mf2x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_u8m1x5(uint8_t *base, vuint64m8_t bindex, vuint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_u8m1x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_u8m1x5(uint8_t *rs1, vuint64m8_t vs2, vuint8m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_u8m1x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_u16mf4x5(uint16_t *base, vuint64m1_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_u16mf4x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_u16mf4x5(uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_u16mf4x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_u16mf2x5(uint16_t *base, vuint64m2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_u16mf2x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_u16mf2x5(uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_u16mf2x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_u16m1x5(uint16_t *base, vuint64m4_t bindex, vuint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_u16m1x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_u16m1x5(uint16_t *rs1, vuint64m4_t vs2, vuint16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_u16m1x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_u32mf2x5(uint32_t *base, vuint64m1_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_u32mf2x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_u32mf2x5(uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_u32mf2x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_u32m1x5(uint32_t *base, vuint64m2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_u32m1x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_u32m1x5(uint32_t *rs1, vuint64m2_t vs2, vuint32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_u32m1x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_u64m1x5(uint64_t *base, vuint64m1_t bindex, vuint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_u64m1x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_u64m1x5(uint64_t *rs1, vuint64m1_t vs2, vuint64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_u64m1x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_f16mf4x5_m(vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_f16mf4x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_f16mf4x5_m(vbool64_t vm, float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_f16mf4x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_f16mf2x5_m(vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_f16mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_f16mf2x5_m(vbool32_t vm, float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_f16mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_f16m1x5_m(vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_f16m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_f16m1x5_m(vbool16_t vm, float16_t *rs1, vuint64m4_t vs2, vfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_f16m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_f32mf2x5_m(vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_f32mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_f32mf2x5_m(vbool64_t vm, float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_f32mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_f32m1x5_m(vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_f32m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_f32m1x5_m(vbool32_t vm, float32_t *rs1, vuint64m2_t vs2, vfloat32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_f32m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_f64m1x5_m(vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_f64m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_f64m1x5_m(vbool64_t vm, float64_t *rs1, vuint64m1_t vs2, vfloat64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_f64m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_i8mf8x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_i8mf8x5_m(vbool64_t vm, int8_t *rs1, vuint64m1_t vs2, vint8mf8x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_i8mf8x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_i8mf4x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_i8mf4x5_m(vbool32_t vm, int8_t *rs1, vuint64m2_t vs2, vint8mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_i8mf4x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_i8mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_i8mf2x5_m(vbool16_t vm, int8_t *rs1, vuint64m4_t vs2, vint8mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_i8mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_i8m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_i8m1x5_m(vbool8_t vm, int8_t *rs1, vuint64m8_t vs2, vint8m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_i8m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_i16mf4x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_i16mf4x5_m(vbool64_t vm, int16_t *rs1, vuint64m1_t vs2, vint16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_i16mf4x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_i16mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_i16mf2x5_m(vbool32_t vm, int16_t *rs1, vuint64m2_t vs2, vint16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_i16mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_i16m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_i16m1x5_m(vbool16_t vm, int16_t *rs1, vuint64m4_t vs2, vint16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_i16m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_i32mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_i32mf2x5_m(vbool64_t vm, int32_t *rs1, vuint64m1_t vs2, vint32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_i32mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_i32m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_i32m1x5_m(vbool32_t vm, int32_t *rs1, vuint64m2_t vs2, vint32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_i32m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_i64m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_i64m1x5_m(vbool64_t vm, int64_t *rs1, vuint64m1_t vs2, vint64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_i64m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_u8mf8x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_u8mf8x5_m(vbool64_t vm, uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_u8mf8x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_u8mf4x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_u8mf4x5_m(vbool32_t vm, uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_u8mf4x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_u8mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_u8mf2x5_m(vbool16_t vm, uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_u8mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_u8m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_u8m1x5_m(vbool8_t vm, uint8_t *rs1, vuint64m8_t vs2, vuint8m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_u8m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_u16mf4x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_u16mf4x5_m(vbool64_t vm, uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_u16mf4x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_u16mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_u16mf2x5_m(vbool32_t vm, uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_u16mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_u16m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_u16m1x5_m(vbool16_t vm, uint16_t *rs1, vuint64m4_t vs2, vuint16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_u16m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_u32mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_u32mf2x5_m(vbool64_t vm, uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_u32mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_u32m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_u32m1x5_m(vbool32_t vm, uint32_t *rs1, vuint64m2_t vs2, vuint32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_u32m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64_v_u64m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_u64m1x5_m(vbool64_t vm, uint64_t *rs1, vuint64m1_t vs2, vuint64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64_v_u64m1x5_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg5ei64\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vsuxseg5ei8.c b/auto-generated/gnu-api-tests/vsuxseg5ei8.c index 94e8cb97b..f11d7ff8c 100644 --- a/auto-generated/gnu-api-tests/vsuxseg5ei8.c +++ b/auto-generated/gnu-api-tests/vsuxseg5ei8.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg5ei8_v_f16mf4x5(float16_t *base, vuint8mf8_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_f16mf4x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_f16mf4x5(float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_f16mf4x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_f16mf2x5(float16_t *base, vuint8mf4_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_f16mf2x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_f16mf2x5(float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_f16mf2x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_f16m1x5(float16_t *base, vuint8mf2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_f16m1x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_f16m1x5(float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_f16m1x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_f32mf2x5(float32_t *base, vuint8mf8_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_f32mf2x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_f32mf2x5(float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_f32mf2x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_f32m1x5(float32_t *base, vuint8mf4_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_f32m1x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_f32m1x5(float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_f32m1x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_f64m1x5(float64_t *base, vuint8mf8_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_f64m1x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_f64m1x5(float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_f64m1x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_i8mf8x5(int8_t *base, vuint8mf8_t bindex, vint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_i8mf8x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_i8mf8x5(int8_t *rs1, vuint8mf8_t vs2, vint8mf8x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_i8mf8x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_i8mf4x5(int8_t *base, vuint8mf4_t bindex, vint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_i8mf4x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_i8mf4x5(int8_t *rs1, vuint8mf4_t vs2, vint8mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_i8mf4x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_i8mf2x5(int8_t *base, vuint8mf2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_i8mf2x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_i8mf2x5(int8_t *rs1, vuint8mf2_t vs2, vint8mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_i8mf2x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_i8m1x5(int8_t *base, vuint8m1_t bindex, vint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_i8m1x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_i8m1x5(int8_t *rs1, vuint8m1_t vs2, vint8m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_i8m1x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_i16mf4x5(int16_t *base, vuint8mf8_t bindex, vint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_i16mf4x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_i16mf4x5(int16_t *rs1, vuint8mf8_t vs2, vint16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_i16mf4x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_i16mf2x5(int16_t *base, vuint8mf4_t bindex, vint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_i16mf2x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_i16mf2x5(int16_t *rs1, vuint8mf4_t vs2, vint16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_i16mf2x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_i16m1x5(int16_t *base, vuint8mf2_t bindex, vint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_i16m1x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_i16m1x5(int16_t *rs1, vuint8mf2_t vs2, vint16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_i16m1x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_i32mf2x5(int32_t *base, vuint8mf8_t bindex, vint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_i32mf2x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_i32mf2x5(int32_t *rs1, vuint8mf8_t vs2, vint32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_i32mf2x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_i32m1x5(int32_t *base, vuint8mf4_t bindex, vint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_i32m1x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_i32m1x5(int32_t *rs1, vuint8mf4_t vs2, vint32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_i32m1x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_i64m1x5(int64_t *base, vuint8mf8_t bindex, vint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_i64m1x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_i64m1x5(int64_t *rs1, vuint8mf8_t vs2, vint64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_i64m1x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_u8mf8x5(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_u8mf8x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_u8mf8x5(uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_u8mf8x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_u8mf4x5(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_u8mf4x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_u8mf4x5(uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_u8mf4x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_u8mf2x5(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_u8mf2x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_u8mf2x5(uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_u8mf2x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_u8m1x5(uint8_t *base, vuint8m1_t bindex, vuint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_u8m1x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_u8m1x5(uint8_t *rs1, vuint8m1_t vs2, vuint8m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_u8m1x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_u16mf4x5(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_u16mf4x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_u16mf4x5(uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_u16mf4x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_u16mf2x5(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_u16mf2x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_u16mf2x5(uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_u16mf2x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_u16m1x5(uint16_t *base, vuint8mf2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_u16m1x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_u16m1x5(uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_u16m1x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_u32mf2x5(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_u32mf2x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_u32mf2x5(uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_u32mf2x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_u32m1x5(uint32_t *base, vuint8mf4_t bindex, vuint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_u32m1x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_u32m1x5(uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_u32m1x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_u64m1x5(uint64_t *base, vuint8mf8_t bindex, vuint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_u64m1x5(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_u64m1x5(uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_u64m1x5(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_f16mf4x5_m(vbool64_t mask, float16_t *base, vuint8mf8_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_f16mf4x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_f16mf4x5_m(vbool64_t vm, float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_f16mf4x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_f16mf2x5_m(vbool32_t mask, float16_t *base, vuint8mf4_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_f16mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_f16mf2x5_m(vbool32_t vm, float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_f16mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_f16m1x5_m(vbool16_t mask, float16_t *base, vuint8mf2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_f16m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_f16m1x5_m(vbool16_t vm, float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_f16m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_f32mf2x5_m(vbool64_t mask, float32_t *base, vuint8mf8_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_f32mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_f32mf2x5_m(vbool64_t vm, float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_f32mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_f32m1x5_m(vbool32_t mask, float32_t *base, vuint8mf4_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_f32m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_f32m1x5_m(vbool32_t vm, float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_f32m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_f64m1x5_m(vbool64_t mask, float64_t *base, vuint8mf8_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_f64m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_f64m1x5_m(vbool64_t vm, float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_f64m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_i8mf8x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_i8mf8x5_m(vbool64_t vm, int8_t *rs1, vuint8mf8_t vs2, vint8mf8x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_i8mf8x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_i8mf4x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_i8mf4x5_m(vbool32_t vm, int8_t *rs1, vuint8mf4_t vs2, vint8mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_i8mf4x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_i8mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_i8mf2x5_m(vbool16_t vm, int8_t *rs1, vuint8mf2_t vs2, vint8mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_i8mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_i8m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_i8m1x5_m(vbool8_t vm, int8_t *rs1, vuint8m1_t vs2, vint8m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_i8m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_i16mf4x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_i16mf4x5_m(vbool64_t vm, int16_t *rs1, vuint8mf8_t vs2, vint16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_i16mf4x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_i16mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_i16mf2x5_m(vbool32_t vm, int16_t *rs1, vuint8mf4_t vs2, vint16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_i16mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_i16m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_i16m1x5_m(vbool16_t vm, int16_t *rs1, vuint8mf2_t vs2, vint16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_i16m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_i32mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_i32mf2x5_m(vbool64_t vm, int32_t *rs1, vuint8mf8_t vs2, vint32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_i32mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_i32m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_i32m1x5_m(vbool32_t vm, int32_t *rs1, vuint8mf4_t vs2, vint32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_i32m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_i64m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_i64m1x5_m(vbool64_t vm, int64_t *rs1, vuint8mf8_t vs2, vint64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_i64m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_u8mf8x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_u8mf8x5_m(vbool64_t vm, uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_u8mf8x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_u8mf4x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_u8mf4x5_m(vbool32_t vm, uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_u8mf4x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_u8mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_u8mf2x5_m(vbool16_t vm, uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_u8mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_u8m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_u8m1x5_m(vbool8_t vm, uint8_t *rs1, vuint8m1_t vs2, vuint8m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_u8m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_u16mf4x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_u16mf4x5_m(vbool64_t vm, uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_u16mf4x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_u16mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_u16mf2x5_m(vbool32_t vm, uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_u16mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_u16m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_u16m1x5_m(vbool16_t vm, uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_u16m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_u32mf2x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_u32mf2x5_m(vbool64_t vm, uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_u32mf2x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_u32m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_u32m1x5_m(vbool32_t vm, uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_u32m1x5_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8_v_u64m1x5_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_u64m1x5_m(vbool64_t vm, uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8_v_u64m1x5_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg5ei8\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vsuxseg6ei16.c b/auto-generated/gnu-api-tests/vsuxseg6ei16.c index 6702349ec..cd8c5c7a8 100644 --- a/auto-generated/gnu-api-tests/vsuxseg6ei16.c +++ b/auto-generated/gnu-api-tests/vsuxseg6ei16.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg6ei16_v_f16mf4x6(float16_t *base, vuint16mf4_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_f16mf4x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_f16mf4x6(float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_f16mf4x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_f16mf2x6(float16_t *base, vuint16mf2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_f16mf2x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_f16mf2x6(float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_f16mf2x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_f16m1x6(float16_t *base, vuint16m1_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_f16m1x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_f16m1x6(float16_t *rs1, vuint16m1_t vs2, vfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_f16m1x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_f32mf2x6(float32_t *base, vuint16mf4_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_f32mf2x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_f32mf2x6(float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_f32mf2x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_f32m1x6(float32_t *base, vuint16mf2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_f32m1x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_f32m1x6(float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_f32m1x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_f64m1x6(float64_t *base, vuint16mf4_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_f64m1x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_f64m1x6(float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_f64m1x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_i8mf8x6(int8_t *base, vuint16mf4_t bindex, vint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_i8mf8x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_i8mf8x6(int8_t *rs1, vuint16mf4_t vs2, vint8mf8x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_i8mf8x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_i8mf4x6(int8_t *base, vuint16mf2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_i8mf4x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_i8mf4x6(int8_t *rs1, vuint16mf2_t vs2, vint8mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_i8mf4x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_i8mf2x6(int8_t *base, vuint16m1_t bindex, vint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_i8mf2x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_i8mf2x6(int8_t *rs1, vuint16m1_t vs2, vint8mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_i8mf2x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_i8m1x6(int8_t *base, vuint16m2_t bindex, vint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_i8m1x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_i8m1x6(int8_t *rs1, vuint16m2_t vs2, vint8m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_i8m1x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_i16mf4x6(int16_t *base, vuint16mf4_t bindex, vint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_i16mf4x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_i16mf4x6(int16_t *rs1, vuint16mf4_t vs2, vint16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_i16mf4x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_i16mf2x6(int16_t *base, vuint16mf2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_i16mf2x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_i16mf2x6(int16_t *rs1, vuint16mf2_t vs2, vint16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_i16mf2x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_i16m1x6(int16_t *base, vuint16m1_t bindex, vint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_i16m1x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_i16m1x6(int16_t *rs1, vuint16m1_t vs2, vint16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_i16m1x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_i32mf2x6(int32_t *base, vuint16mf4_t bindex, vint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_i32mf2x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_i32mf2x6(int32_t *rs1, vuint16mf4_t vs2, vint32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_i32mf2x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_i32m1x6(int32_t *base, vuint16mf2_t bindex, vint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_i32m1x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_i32m1x6(int32_t *rs1, vuint16mf2_t vs2, vint32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_i32m1x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_i64m1x6(int64_t *base, vuint16mf4_t bindex, vint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_i64m1x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_i64m1x6(int64_t *rs1, vuint16mf4_t vs2, vint64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_i64m1x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_u8mf8x6(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_u8mf8x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_u8mf8x6(uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_u8mf8x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_u8mf4x6(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_u8mf4x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_u8mf4x6(uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_u8mf4x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_u8mf2x6(uint8_t *base, vuint16m1_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_u8mf2x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_u8mf2x6(uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_u8mf2x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_u8m1x6(uint8_t *base, vuint16m2_t bindex, vuint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_u8m1x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_u8m1x6(uint8_t *rs1, vuint16m2_t vs2, vuint8m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_u8m1x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_u16mf4x6(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_u16mf4x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_u16mf4x6(uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_u16mf4x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_u16mf2x6(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_u16mf2x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_u16mf2x6(uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_u16mf2x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_u16m1x6(uint16_t *base, vuint16m1_t bindex, vuint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_u16m1x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_u16m1x6(uint16_t *rs1, vuint16m1_t vs2, vuint16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_u16m1x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_u32mf2x6(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_u32mf2x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_u32mf2x6(uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_u32mf2x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_u32m1x6(uint32_t *base, vuint16mf2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_u32m1x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_u32m1x6(uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_u32m1x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_u64m1x6(uint64_t *base, vuint16mf4_t bindex, vuint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_u64m1x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_u64m1x6(uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_u64m1x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_f16mf4x6_m(vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_f16mf4x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_f16mf4x6_m(vbool64_t vm, float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_f16mf4x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_f16mf2x6_m(vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_f16mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_f16mf2x6_m(vbool32_t vm, float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_f16mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_f16m1x6_m(vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_f16m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_f16m1x6_m(vbool16_t vm, float16_t *rs1, vuint16m1_t vs2, vfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_f16m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_f32mf2x6_m(vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_f32mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_f32mf2x6_m(vbool64_t vm, float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_f32mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_f32m1x6_m(vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_f32m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_f32m1x6_m(vbool32_t vm, float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_f32m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_f64m1x6_m(vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_f64m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_f64m1x6_m(vbool64_t vm, float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_f64m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_i8mf8x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_i8mf8x6_m(vbool64_t vm, int8_t *rs1, vuint16mf4_t vs2, vint8mf8x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_i8mf8x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_i8mf4x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_i8mf4x6_m(vbool32_t vm, int8_t *rs1, vuint16mf2_t vs2, vint8mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_i8mf4x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_i8mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_i8mf2x6_m(vbool16_t vm, int8_t *rs1, vuint16m1_t vs2, vint8mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_i8mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_i8m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_i8m1x6_m(vbool8_t vm, int8_t *rs1, vuint16m2_t vs2, vint8m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_i8m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_i16mf4x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_i16mf4x6_m(vbool64_t vm, int16_t *rs1, vuint16mf4_t vs2, vint16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_i16mf4x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_i16mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_i16mf2x6_m(vbool32_t vm, int16_t *rs1, vuint16mf2_t vs2, vint16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_i16mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_i16m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_i16m1x6_m(vbool16_t vm, int16_t *rs1, vuint16m1_t vs2, vint16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_i16m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_i32mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_i32mf2x6_m(vbool64_t vm, int32_t *rs1, vuint16mf4_t vs2, vint32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_i32mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_i32m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_i32m1x6_m(vbool32_t vm, int32_t *rs1, vuint16mf2_t vs2, vint32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_i32m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_i64m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_i64m1x6_m(vbool64_t vm, int64_t *rs1, vuint16mf4_t vs2, vint64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_i64m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_u8mf8x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_u8mf8x6_m(vbool64_t vm, uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_u8mf8x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_u8mf4x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_u8mf4x6_m(vbool32_t vm, uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_u8mf4x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_u8mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_u8mf2x6_m(vbool16_t vm, uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_u8mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_u8m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_u8m1x6_m(vbool8_t vm, uint8_t *rs1, vuint16m2_t vs2, vuint8m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_u8m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_u16mf4x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_u16mf4x6_m(vbool64_t vm, uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_u16mf4x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_u16mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_u16mf2x6_m(vbool32_t vm, uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_u16mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_u16m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_u16m1x6_m(vbool16_t vm, uint16_t *rs1, vuint16m1_t vs2, vuint16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_u16m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_u32mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_u32mf2x6_m(vbool64_t vm, uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_u32mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_u32m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_u32m1x6_m(vbool32_t vm, uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_u32m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16_v_u64m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_u64m1x6_m(vbool64_t vm, uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_u64m1x6_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg6ei16\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vsuxseg6ei32.c b/auto-generated/gnu-api-tests/vsuxseg6ei32.c index 047abb3dc..6dafcb426 100644 --- a/auto-generated/gnu-api-tests/vsuxseg6ei32.c +++ b/auto-generated/gnu-api-tests/vsuxseg6ei32.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg6ei32_v_f16mf4x6(float16_t *base, vuint32mf2_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_f16mf4x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_f16mf4x6(float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_f16mf4x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_f16mf2x6(float16_t *base, vuint32m1_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_f16mf2x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_f16mf2x6(float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_f16mf2x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_f16m1x6(float16_t *base, vuint32m2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_f16m1x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_f16m1x6(float16_t *rs1, vuint32m2_t vs2, vfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_f16m1x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_f32mf2x6(float32_t *base, vuint32mf2_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_f32mf2x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_f32mf2x6(float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_f32mf2x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_f32m1x6(float32_t *base, vuint32m1_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_f32m1x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_f32m1x6(float32_t *rs1, vuint32m1_t vs2, vfloat32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_f32m1x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_f64m1x6(float64_t *base, vuint32mf2_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_f64m1x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_f64m1x6(float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_f64m1x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_i8mf8x6(int8_t *base, vuint32mf2_t bindex, vint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_i8mf8x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_i8mf8x6(int8_t *rs1, vuint32mf2_t vs2, vint8mf8x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_i8mf8x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_i8mf4x6(int8_t *base, vuint32m1_t bindex, vint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_i8mf4x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_i8mf4x6(int8_t *rs1, vuint32m1_t vs2, vint8mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_i8mf4x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_i8mf2x6(int8_t *base, vuint32m2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_i8mf2x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_i8mf2x6(int8_t *rs1, vuint32m2_t vs2, vint8mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_i8mf2x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_i8m1x6(int8_t *base, vuint32m4_t bindex, vint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_i8m1x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_i8m1x6(int8_t *rs1, vuint32m4_t vs2, vint8m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_i8m1x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_i16mf4x6(int16_t *base, vuint32mf2_t bindex, vint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_i16mf4x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_i16mf4x6(int16_t *rs1, vuint32mf2_t vs2, vint16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_i16mf4x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_i16mf2x6(int16_t *base, vuint32m1_t bindex, vint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_i16mf2x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_i16mf2x6(int16_t *rs1, vuint32m1_t vs2, vint16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_i16mf2x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_i16m1x6(int16_t *base, vuint32m2_t bindex, vint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_i16m1x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_i16m1x6(int16_t *rs1, vuint32m2_t vs2, vint16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_i16m1x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_i32mf2x6(int32_t *base, vuint32mf2_t bindex, vint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_i32mf2x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_i32mf2x6(int32_t *rs1, vuint32mf2_t vs2, vint32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_i32mf2x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_i32m1x6(int32_t *base, vuint32m1_t bindex, vint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_i32m1x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_i32m1x6(int32_t *rs1, vuint32m1_t vs2, vint32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_i32m1x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_i64m1x6(int64_t *base, vuint32mf2_t bindex, vint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_i64m1x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_i64m1x6(int64_t *rs1, vuint32mf2_t vs2, vint64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_i64m1x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_u8mf8x6(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_u8mf8x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_u8mf8x6(uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_u8mf8x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_u8mf4x6(uint8_t *base, vuint32m1_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_u8mf4x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_u8mf4x6(uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_u8mf4x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_u8mf2x6(uint8_t *base, vuint32m2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_u8mf2x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_u8mf2x6(uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_u8mf2x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_u8m1x6(uint8_t *base, vuint32m4_t bindex, vuint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_u8m1x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_u8m1x6(uint8_t *rs1, vuint32m4_t vs2, vuint8m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_u8m1x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_u16mf4x6(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_u16mf4x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_u16mf4x6(uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_u16mf4x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_u16mf2x6(uint16_t *base, vuint32m1_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_u16mf2x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_u16mf2x6(uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_u16mf2x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_u16m1x6(uint16_t *base, vuint32m2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_u16m1x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_u16m1x6(uint16_t *rs1, vuint32m2_t vs2, vuint16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_u16m1x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_u32mf2x6(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_u32mf2x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_u32mf2x6(uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_u32mf2x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_u32m1x6(uint32_t *base, vuint32m1_t bindex, vuint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_u32m1x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_u32m1x6(uint32_t *rs1, vuint32m1_t vs2, vuint32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_u32m1x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_u64m1x6(uint64_t *base, vuint32mf2_t bindex, vuint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_u64m1x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_u64m1x6(uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_u64m1x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_f16mf4x6_m(vbool64_t mask, float16_t *base, vuint32mf2_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_f16mf4x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_f16mf4x6_m(vbool64_t vm, float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_f16mf4x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_f16mf2x6_m(vbool32_t mask, float16_t *base, vuint32m1_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_f16mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_f16mf2x6_m(vbool32_t vm, float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_f16mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_f16m1x6_m(vbool16_t mask, float16_t *base, vuint32m2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_f16m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_f16m1x6_m(vbool16_t vm, float16_t *rs1, vuint32m2_t vs2, vfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_f16m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_f32mf2x6_m(vbool64_t mask, float32_t *base, vuint32mf2_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_f32mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_f32mf2x6_m(vbool64_t vm, float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_f32mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_f32m1x6_m(vbool32_t mask, float32_t *base, vuint32m1_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_f32m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_f32m1x6_m(vbool32_t vm, float32_t *rs1, vuint32m1_t vs2, vfloat32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_f32m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_f64m1x6_m(vbool64_t mask, float64_t *base, vuint32mf2_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_f64m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_f64m1x6_m(vbool64_t vm, float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_f64m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_i8mf8x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_i8mf8x6_m(vbool64_t vm, int8_t *rs1, vuint32mf2_t vs2, vint8mf8x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_i8mf8x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_i8mf4x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_i8mf4x6_m(vbool32_t vm, int8_t *rs1, vuint32m1_t vs2, vint8mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_i8mf4x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_i8mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_i8mf2x6_m(vbool16_t vm, int8_t *rs1, vuint32m2_t vs2, vint8mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_i8mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_i8m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_i8m1x6_m(vbool8_t vm, int8_t *rs1, vuint32m4_t vs2, vint8m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_i8m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_i16mf4x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_i16mf4x6_m(vbool64_t vm, int16_t *rs1, vuint32mf2_t vs2, vint16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_i16mf4x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_i16mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_i16mf2x6_m(vbool32_t vm, int16_t *rs1, vuint32m1_t vs2, vint16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_i16mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_i16m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_i16m1x6_m(vbool16_t vm, int16_t *rs1, vuint32m2_t vs2, vint16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_i16m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_i32mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_i32mf2x6_m(vbool64_t vm, int32_t *rs1, vuint32mf2_t vs2, vint32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_i32mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_i32m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_i32m1x6_m(vbool32_t vm, int32_t *rs1, vuint32m1_t vs2, vint32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_i32m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_i64m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_i64m1x6_m(vbool64_t vm, int64_t *rs1, vuint32mf2_t vs2, vint64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_i64m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_u8mf8x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_u8mf8x6_m(vbool64_t vm, uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_u8mf8x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_u8mf4x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_u8mf4x6_m(vbool32_t vm, uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_u8mf4x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_u8mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_u8mf2x6_m(vbool16_t vm, uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_u8mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_u8m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_u8m1x6_m(vbool8_t vm, uint8_t *rs1, vuint32m4_t vs2, vuint8m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_u8m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_u16mf4x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_u16mf4x6_m(vbool64_t vm, uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_u16mf4x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_u16mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_u16mf2x6_m(vbool32_t vm, uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_u16mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_u16m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_u16m1x6_m(vbool16_t vm, uint16_t *rs1, vuint32m2_t vs2, vuint16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_u16m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_u32mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_u32mf2x6_m(vbool64_t vm, uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_u32mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_u32m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_u32m1x6_m(vbool32_t vm, uint32_t *rs1, vuint32m1_t vs2, vuint32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_u32m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32_v_u64m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_u64m1x6_m(vbool64_t vm, uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32_v_u64m1x6_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg6ei32\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vsuxseg6ei64.c b/auto-generated/gnu-api-tests/vsuxseg6ei64.c index 8781007eb..76944b26d 100644 --- a/auto-generated/gnu-api-tests/vsuxseg6ei64.c +++ b/auto-generated/gnu-api-tests/vsuxseg6ei64.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg6ei64_v_f16mf4x6(float16_t *base, vuint64m1_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_f16mf4x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_f16mf4x6(float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_f16mf4x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_f16mf2x6(float16_t *base, vuint64m2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_f16mf2x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_f16mf2x6(float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_f16mf2x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_f16m1x6(float16_t *base, vuint64m4_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_f16m1x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_f16m1x6(float16_t *rs1, vuint64m4_t vs2, vfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_f16m1x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_f32mf2x6(float32_t *base, vuint64m1_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_f32mf2x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_f32mf2x6(float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_f32mf2x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_f32m1x6(float32_t *base, vuint64m2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_f32m1x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_f32m1x6(float32_t *rs1, vuint64m2_t vs2, vfloat32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_f32m1x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_f64m1x6(float64_t *base, vuint64m1_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_f64m1x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_f64m1x6(float64_t *rs1, vuint64m1_t vs2, vfloat64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_f64m1x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_i8mf8x6(int8_t *base, vuint64m1_t bindex, vint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_i8mf8x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_i8mf8x6(int8_t *rs1, vuint64m1_t vs2, vint8mf8x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_i8mf8x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_i8mf4x6(int8_t *base, vuint64m2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_i8mf4x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_i8mf4x6(int8_t *rs1, vuint64m2_t vs2, vint8mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_i8mf4x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_i8mf2x6(int8_t *base, vuint64m4_t bindex, vint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_i8mf2x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_i8mf2x6(int8_t *rs1, vuint64m4_t vs2, vint8mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_i8mf2x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_i8m1x6(int8_t *base, vuint64m8_t bindex, vint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_i8m1x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_i8m1x6(int8_t *rs1, vuint64m8_t vs2, vint8m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_i8m1x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_i16mf4x6(int16_t *base, vuint64m1_t bindex, vint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_i16mf4x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_i16mf4x6(int16_t *rs1, vuint64m1_t vs2, vint16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_i16mf4x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_i16mf2x6(int16_t *base, vuint64m2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_i16mf2x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_i16mf2x6(int16_t *rs1, vuint64m2_t vs2, vint16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_i16mf2x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_i16m1x6(int16_t *base, vuint64m4_t bindex, vint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_i16m1x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_i16m1x6(int16_t *rs1, vuint64m4_t vs2, vint16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_i16m1x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_i32mf2x6(int32_t *base, vuint64m1_t bindex, vint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_i32mf2x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_i32mf2x6(int32_t *rs1, vuint64m1_t vs2, vint32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_i32mf2x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_i32m1x6(int32_t *base, vuint64m2_t bindex, vint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_i32m1x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_i32m1x6(int32_t *rs1, vuint64m2_t vs2, vint32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_i32m1x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_i64m1x6(int64_t *base, vuint64m1_t bindex, vint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_i64m1x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_i64m1x6(int64_t *rs1, vuint64m1_t vs2, vint64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_i64m1x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_u8mf8x6(uint8_t *base, vuint64m1_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_u8mf8x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_u8mf8x6(uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_u8mf8x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_u8mf4x6(uint8_t *base, vuint64m2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_u8mf4x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_u8mf4x6(uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_u8mf4x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_u8mf2x6(uint8_t *base, vuint64m4_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_u8mf2x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_u8mf2x6(uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_u8mf2x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_u8m1x6(uint8_t *base, vuint64m8_t bindex, vuint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_u8m1x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_u8m1x6(uint8_t *rs1, vuint64m8_t vs2, vuint8m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_u8m1x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_u16mf4x6(uint16_t *base, vuint64m1_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_u16mf4x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_u16mf4x6(uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_u16mf4x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_u16mf2x6(uint16_t *base, vuint64m2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_u16mf2x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_u16mf2x6(uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_u16mf2x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_u16m1x6(uint16_t *base, vuint64m4_t bindex, vuint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_u16m1x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_u16m1x6(uint16_t *rs1, vuint64m4_t vs2, vuint16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_u16m1x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_u32mf2x6(uint32_t *base, vuint64m1_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_u32mf2x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_u32mf2x6(uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_u32mf2x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_u32m1x6(uint32_t *base, vuint64m2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_u32m1x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_u32m1x6(uint32_t *rs1, vuint64m2_t vs2, vuint32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_u32m1x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_u64m1x6(uint64_t *base, vuint64m1_t bindex, vuint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_u64m1x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_u64m1x6(uint64_t *rs1, vuint64m1_t vs2, vuint64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_u64m1x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_f16mf4x6_m(vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_f16mf4x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_f16mf4x6_m(vbool64_t vm, float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_f16mf4x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_f16mf2x6_m(vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_f16mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_f16mf2x6_m(vbool32_t vm, float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_f16mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_f16m1x6_m(vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_f16m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_f16m1x6_m(vbool16_t vm, float16_t *rs1, vuint64m4_t vs2, vfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_f16m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_f32mf2x6_m(vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_f32mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_f32mf2x6_m(vbool64_t vm, float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_f32mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_f32m1x6_m(vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_f32m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_f32m1x6_m(vbool32_t vm, float32_t *rs1, vuint64m2_t vs2, vfloat32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_f32m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_f64m1x6_m(vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_f64m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_f64m1x6_m(vbool64_t vm, float64_t *rs1, vuint64m1_t vs2, vfloat64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_f64m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_i8mf8x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_i8mf8x6_m(vbool64_t vm, int8_t *rs1, vuint64m1_t vs2, vint8mf8x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_i8mf8x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_i8mf4x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_i8mf4x6_m(vbool32_t vm, int8_t *rs1, vuint64m2_t vs2, vint8mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_i8mf4x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_i8mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_i8mf2x6_m(vbool16_t vm, int8_t *rs1, vuint64m4_t vs2, vint8mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_i8mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_i8m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_i8m1x6_m(vbool8_t vm, int8_t *rs1, vuint64m8_t vs2, vint8m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_i8m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_i16mf4x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_i16mf4x6_m(vbool64_t vm, int16_t *rs1, vuint64m1_t vs2, vint16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_i16mf4x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_i16mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_i16mf2x6_m(vbool32_t vm, int16_t *rs1, vuint64m2_t vs2, vint16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_i16mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_i16m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_i16m1x6_m(vbool16_t vm, int16_t *rs1, vuint64m4_t vs2, vint16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_i16m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_i32mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_i32mf2x6_m(vbool64_t vm, int32_t *rs1, vuint64m1_t vs2, vint32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_i32mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_i32m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_i32m1x6_m(vbool32_t vm, int32_t *rs1, vuint64m2_t vs2, vint32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_i32m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_i64m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_i64m1x6_m(vbool64_t vm, int64_t *rs1, vuint64m1_t vs2, vint64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_i64m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_u8mf8x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_u8mf8x6_m(vbool64_t vm, uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_u8mf8x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_u8mf4x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_u8mf4x6_m(vbool32_t vm, uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_u8mf4x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_u8mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_u8mf2x6_m(vbool16_t vm, uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_u8mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_u8m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_u8m1x6_m(vbool8_t vm, uint8_t *rs1, vuint64m8_t vs2, vuint8m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_u8m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_u16mf4x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_u16mf4x6_m(vbool64_t vm, uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_u16mf4x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_u16mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_u16mf2x6_m(vbool32_t vm, uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_u16mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_u16m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_u16m1x6_m(vbool16_t vm, uint16_t *rs1, vuint64m4_t vs2, vuint16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_u16m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_u32mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_u32mf2x6_m(vbool64_t vm, uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_u32mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_u32m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_u32m1x6_m(vbool32_t vm, uint32_t *rs1, vuint64m2_t vs2, vuint32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_u32m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64_v_u64m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_u64m1x6_m(vbool64_t vm, uint64_t *rs1, vuint64m1_t vs2, vuint64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64_v_u64m1x6_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg6ei64\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vsuxseg6ei8.c b/auto-generated/gnu-api-tests/vsuxseg6ei8.c index 523af8fb4..70d6854bc 100644 --- a/auto-generated/gnu-api-tests/vsuxseg6ei8.c +++ b/auto-generated/gnu-api-tests/vsuxseg6ei8.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg6ei8_v_f16mf4x6(float16_t *base, vuint8mf8_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_f16mf4x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_f16mf4x6(float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_f16mf4x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_f16mf2x6(float16_t *base, vuint8mf4_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_f16mf2x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_f16mf2x6(float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_f16mf2x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_f16m1x6(float16_t *base, vuint8mf2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_f16m1x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_f16m1x6(float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_f16m1x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_f32mf2x6(float32_t *base, vuint8mf8_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_f32mf2x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_f32mf2x6(float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_f32mf2x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_f32m1x6(float32_t *base, vuint8mf4_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_f32m1x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_f32m1x6(float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_f32m1x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_f64m1x6(float64_t *base, vuint8mf8_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_f64m1x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_f64m1x6(float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_f64m1x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_i8mf8x6(int8_t *base, vuint8mf8_t bindex, vint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_i8mf8x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_i8mf8x6(int8_t *rs1, vuint8mf8_t vs2, vint8mf8x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_i8mf8x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_i8mf4x6(int8_t *base, vuint8mf4_t bindex, vint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_i8mf4x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_i8mf4x6(int8_t *rs1, vuint8mf4_t vs2, vint8mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_i8mf4x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_i8mf2x6(int8_t *base, vuint8mf2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_i8mf2x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_i8mf2x6(int8_t *rs1, vuint8mf2_t vs2, vint8mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_i8mf2x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_i8m1x6(int8_t *base, vuint8m1_t bindex, vint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_i8m1x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_i8m1x6(int8_t *rs1, vuint8m1_t vs2, vint8m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_i8m1x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_i16mf4x6(int16_t *base, vuint8mf8_t bindex, vint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_i16mf4x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_i16mf4x6(int16_t *rs1, vuint8mf8_t vs2, vint16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_i16mf4x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_i16mf2x6(int16_t *base, vuint8mf4_t bindex, vint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_i16mf2x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_i16mf2x6(int16_t *rs1, vuint8mf4_t vs2, vint16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_i16mf2x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_i16m1x6(int16_t *base, vuint8mf2_t bindex, vint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_i16m1x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_i16m1x6(int16_t *rs1, vuint8mf2_t vs2, vint16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_i16m1x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_i32mf2x6(int32_t *base, vuint8mf8_t bindex, vint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_i32mf2x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_i32mf2x6(int32_t *rs1, vuint8mf8_t vs2, vint32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_i32mf2x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_i32m1x6(int32_t *base, vuint8mf4_t bindex, vint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_i32m1x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_i32m1x6(int32_t *rs1, vuint8mf4_t vs2, vint32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_i32m1x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_i64m1x6(int64_t *base, vuint8mf8_t bindex, vint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_i64m1x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_i64m1x6(int64_t *rs1, vuint8mf8_t vs2, vint64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_i64m1x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_u8mf8x6(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_u8mf8x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_u8mf8x6(uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_u8mf8x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_u8mf4x6(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_u8mf4x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_u8mf4x6(uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_u8mf4x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_u8mf2x6(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_u8mf2x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_u8mf2x6(uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_u8mf2x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_u8m1x6(uint8_t *base, vuint8m1_t bindex, vuint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_u8m1x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_u8m1x6(uint8_t *rs1, vuint8m1_t vs2, vuint8m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_u8m1x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_u16mf4x6(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_u16mf4x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_u16mf4x6(uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_u16mf4x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_u16mf2x6(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_u16mf2x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_u16mf2x6(uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_u16mf2x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_u16m1x6(uint16_t *base, vuint8mf2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_u16m1x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_u16m1x6(uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_u16m1x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_u32mf2x6(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_u32mf2x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_u32mf2x6(uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_u32mf2x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_u32m1x6(uint32_t *base, vuint8mf4_t bindex, vuint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_u32m1x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_u32m1x6(uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_u32m1x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_u64m1x6(uint64_t *base, vuint8mf8_t bindex, vuint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_u64m1x6(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_u64m1x6(uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_u64m1x6(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_f16mf4x6_m(vbool64_t mask, float16_t *base, vuint8mf8_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_f16mf4x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_f16mf4x6_m(vbool64_t vm, float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_f16mf4x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_f16mf2x6_m(vbool32_t mask, float16_t *base, vuint8mf4_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_f16mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_f16mf2x6_m(vbool32_t vm, float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_f16mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_f16m1x6_m(vbool16_t mask, float16_t *base, vuint8mf2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_f16m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_f16m1x6_m(vbool16_t vm, float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_f16m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_f32mf2x6_m(vbool64_t mask, float32_t *base, vuint8mf8_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_f32mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_f32mf2x6_m(vbool64_t vm, float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_f32mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_f32m1x6_m(vbool32_t mask, float32_t *base, vuint8mf4_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_f32m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_f32m1x6_m(vbool32_t vm, float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_f32m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_f64m1x6_m(vbool64_t mask, float64_t *base, vuint8mf8_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_f64m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_f64m1x6_m(vbool64_t vm, float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_f64m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_i8mf8x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_i8mf8x6_m(vbool64_t vm, int8_t *rs1, vuint8mf8_t vs2, vint8mf8x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_i8mf8x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_i8mf4x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_i8mf4x6_m(vbool32_t vm, int8_t *rs1, vuint8mf4_t vs2, vint8mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_i8mf4x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_i8mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_i8mf2x6_m(vbool16_t vm, int8_t *rs1, vuint8mf2_t vs2, vint8mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_i8mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_i8m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_i8m1x6_m(vbool8_t vm, int8_t *rs1, vuint8m1_t vs2, vint8m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_i8m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_i16mf4x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_i16mf4x6_m(vbool64_t vm, int16_t *rs1, vuint8mf8_t vs2, vint16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_i16mf4x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_i16mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_i16mf2x6_m(vbool32_t vm, int16_t *rs1, vuint8mf4_t vs2, vint16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_i16mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_i16m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_i16m1x6_m(vbool16_t vm, int16_t *rs1, vuint8mf2_t vs2, vint16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_i16m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_i32mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_i32mf2x6_m(vbool64_t vm, int32_t *rs1, vuint8mf8_t vs2, vint32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_i32mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_i32m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_i32m1x6_m(vbool32_t vm, int32_t *rs1, vuint8mf4_t vs2, vint32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_i32m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_i64m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_i64m1x6_m(vbool64_t vm, int64_t *rs1, vuint8mf8_t vs2, vint64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_i64m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_u8mf8x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_u8mf8x6_m(vbool64_t vm, uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_u8mf8x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_u8mf4x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_u8mf4x6_m(vbool32_t vm, uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_u8mf4x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_u8mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_u8mf2x6_m(vbool16_t vm, uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_u8mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_u8m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_u8m1x6_m(vbool8_t vm, uint8_t *rs1, vuint8m1_t vs2, vuint8m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_u8m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_u16mf4x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_u16mf4x6_m(vbool64_t vm, uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_u16mf4x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_u16mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_u16mf2x6_m(vbool32_t vm, uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_u16mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_u16m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_u16m1x6_m(vbool16_t vm, uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_u16m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_u32mf2x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_u32mf2x6_m(vbool64_t vm, uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_u32mf2x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_u32m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_u32m1x6_m(vbool32_t vm, uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_u32m1x6_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8_v_u64m1x6_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_u64m1x6_m(vbool64_t vm, uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8_v_u64m1x6_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg6ei8\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vsuxseg7ei16.c b/auto-generated/gnu-api-tests/vsuxseg7ei16.c index b4e512d85..c6558fee0 100644 --- a/auto-generated/gnu-api-tests/vsuxseg7ei16.c +++ b/auto-generated/gnu-api-tests/vsuxseg7ei16.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg7ei16_v_f16mf4x7(float16_t *base, vuint16mf4_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_f16mf4x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_f16mf4x7(float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_f16mf4x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_f16mf2x7(float16_t *base, vuint16mf2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_f16mf2x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_f16mf2x7(float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_f16mf2x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_f16m1x7(float16_t *base, vuint16m1_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_f16m1x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_f16m1x7(float16_t *rs1, vuint16m1_t vs2, vfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_f16m1x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_f32mf2x7(float32_t *base, vuint16mf4_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_f32mf2x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_f32mf2x7(float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_f32mf2x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_f32m1x7(float32_t *base, vuint16mf2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_f32m1x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_f32m1x7(float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_f32m1x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_f64m1x7(float64_t *base, vuint16mf4_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_f64m1x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_f64m1x7(float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_f64m1x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_i8mf8x7(int8_t *base, vuint16mf4_t bindex, vint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_i8mf8x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_i8mf8x7(int8_t *rs1, vuint16mf4_t vs2, vint8mf8x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_i8mf8x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_i8mf4x7(int8_t *base, vuint16mf2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_i8mf4x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_i8mf4x7(int8_t *rs1, vuint16mf2_t vs2, vint8mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_i8mf4x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_i8mf2x7(int8_t *base, vuint16m1_t bindex, vint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_i8mf2x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_i8mf2x7(int8_t *rs1, vuint16m1_t vs2, vint8mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_i8mf2x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_i8m1x7(int8_t *base, vuint16m2_t bindex, vint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_i8m1x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_i8m1x7(int8_t *rs1, vuint16m2_t vs2, vint8m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_i8m1x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_i16mf4x7(int16_t *base, vuint16mf4_t bindex, vint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_i16mf4x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_i16mf4x7(int16_t *rs1, vuint16mf4_t vs2, vint16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_i16mf4x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_i16mf2x7(int16_t *base, vuint16mf2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_i16mf2x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_i16mf2x7(int16_t *rs1, vuint16mf2_t vs2, vint16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_i16mf2x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_i16m1x7(int16_t *base, vuint16m1_t bindex, vint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_i16m1x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_i16m1x7(int16_t *rs1, vuint16m1_t vs2, vint16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_i16m1x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_i32mf2x7(int32_t *base, vuint16mf4_t bindex, vint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_i32mf2x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_i32mf2x7(int32_t *rs1, vuint16mf4_t vs2, vint32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_i32mf2x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_i32m1x7(int32_t *base, vuint16mf2_t bindex, vint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_i32m1x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_i32m1x7(int32_t *rs1, vuint16mf2_t vs2, vint32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_i32m1x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_i64m1x7(int64_t *base, vuint16mf4_t bindex, vint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_i64m1x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_i64m1x7(int64_t *rs1, vuint16mf4_t vs2, vint64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_i64m1x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_u8mf8x7(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_u8mf8x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_u8mf8x7(uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_u8mf8x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_u8mf4x7(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_u8mf4x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_u8mf4x7(uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_u8mf4x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_u8mf2x7(uint8_t *base, vuint16m1_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_u8mf2x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_u8mf2x7(uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_u8mf2x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_u8m1x7(uint8_t *base, vuint16m2_t bindex, vuint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_u8m1x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_u8m1x7(uint8_t *rs1, vuint16m2_t vs2, vuint8m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_u8m1x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_u16mf4x7(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_u16mf4x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_u16mf4x7(uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_u16mf4x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_u16mf2x7(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_u16mf2x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_u16mf2x7(uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_u16mf2x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_u16m1x7(uint16_t *base, vuint16m1_t bindex, vuint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_u16m1x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_u16m1x7(uint16_t *rs1, vuint16m1_t vs2, vuint16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_u16m1x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_u32mf2x7(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_u32mf2x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_u32mf2x7(uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_u32mf2x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_u32m1x7(uint32_t *base, vuint16mf2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_u32m1x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_u32m1x7(uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_u32m1x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_u64m1x7(uint64_t *base, vuint16mf4_t bindex, vuint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_u64m1x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_u64m1x7(uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_u64m1x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_f16mf4x7_m(vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_f16mf4x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_f16mf4x7_m(vbool64_t vm, float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_f16mf4x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_f16mf2x7_m(vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_f16mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_f16mf2x7_m(vbool32_t vm, float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_f16mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_f16m1x7_m(vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_f16m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_f16m1x7_m(vbool16_t vm, float16_t *rs1, vuint16m1_t vs2, vfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_f16m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_f32mf2x7_m(vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_f32mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_f32mf2x7_m(vbool64_t vm, float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_f32mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_f32m1x7_m(vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_f32m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_f32m1x7_m(vbool32_t vm, float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_f32m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_f64m1x7_m(vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_f64m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_f64m1x7_m(vbool64_t vm, float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_f64m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_i8mf8x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_i8mf8x7_m(vbool64_t vm, int8_t *rs1, vuint16mf4_t vs2, vint8mf8x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_i8mf8x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_i8mf4x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_i8mf4x7_m(vbool32_t vm, int8_t *rs1, vuint16mf2_t vs2, vint8mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_i8mf4x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_i8mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_i8mf2x7_m(vbool16_t vm, int8_t *rs1, vuint16m1_t vs2, vint8mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_i8mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_i8m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_i8m1x7_m(vbool8_t vm, int8_t *rs1, vuint16m2_t vs2, vint8m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_i8m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_i16mf4x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_i16mf4x7_m(vbool64_t vm, int16_t *rs1, vuint16mf4_t vs2, vint16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_i16mf4x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_i16mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_i16mf2x7_m(vbool32_t vm, int16_t *rs1, vuint16mf2_t vs2, vint16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_i16mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_i16m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_i16m1x7_m(vbool16_t vm, int16_t *rs1, vuint16m1_t vs2, vint16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_i16m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_i32mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_i32mf2x7_m(vbool64_t vm, int32_t *rs1, vuint16mf4_t vs2, vint32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_i32mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_i32m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_i32m1x7_m(vbool32_t vm, int32_t *rs1, vuint16mf2_t vs2, vint32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_i32m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_i64m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_i64m1x7_m(vbool64_t vm, int64_t *rs1, vuint16mf4_t vs2, vint64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_i64m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_u8mf8x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_u8mf8x7_m(vbool64_t vm, uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_u8mf8x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_u8mf4x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_u8mf4x7_m(vbool32_t vm, uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_u8mf4x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_u8mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_u8mf2x7_m(vbool16_t vm, uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_u8mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_u8m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_u8m1x7_m(vbool8_t vm, uint8_t *rs1, vuint16m2_t vs2, vuint8m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_u8m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_u16mf4x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_u16mf4x7_m(vbool64_t vm, uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_u16mf4x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_u16mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_u16mf2x7_m(vbool32_t vm, uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_u16mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_u16m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_u16m1x7_m(vbool16_t vm, uint16_t *rs1, vuint16m1_t vs2, vuint16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_u16m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_u32mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_u32mf2x7_m(vbool64_t vm, uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_u32mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_u32m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_u32m1x7_m(vbool32_t vm, uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_u32m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16_v_u64m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_u64m1x7_m(vbool64_t vm, uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_u64m1x7_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg7ei16\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vsuxseg7ei32.c b/auto-generated/gnu-api-tests/vsuxseg7ei32.c index ee4154224..2bc9e5a49 100644 --- a/auto-generated/gnu-api-tests/vsuxseg7ei32.c +++ b/auto-generated/gnu-api-tests/vsuxseg7ei32.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg7ei32_v_f16mf4x7(float16_t *base, vuint32mf2_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_f16mf4x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_f16mf4x7(float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_f16mf4x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_f16mf2x7(float16_t *base, vuint32m1_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_f16mf2x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_f16mf2x7(float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_f16mf2x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_f16m1x7(float16_t *base, vuint32m2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_f16m1x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_f16m1x7(float16_t *rs1, vuint32m2_t vs2, vfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_f16m1x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_f32mf2x7(float32_t *base, vuint32mf2_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_f32mf2x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_f32mf2x7(float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_f32mf2x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_f32m1x7(float32_t *base, vuint32m1_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_f32m1x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_f32m1x7(float32_t *rs1, vuint32m1_t vs2, vfloat32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_f32m1x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_f64m1x7(float64_t *base, vuint32mf2_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_f64m1x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_f64m1x7(float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_f64m1x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_i8mf8x7(int8_t *base, vuint32mf2_t bindex, vint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_i8mf8x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_i8mf8x7(int8_t *rs1, vuint32mf2_t vs2, vint8mf8x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_i8mf8x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_i8mf4x7(int8_t *base, vuint32m1_t bindex, vint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_i8mf4x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_i8mf4x7(int8_t *rs1, vuint32m1_t vs2, vint8mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_i8mf4x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_i8mf2x7(int8_t *base, vuint32m2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_i8mf2x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_i8mf2x7(int8_t *rs1, vuint32m2_t vs2, vint8mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_i8mf2x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_i8m1x7(int8_t *base, vuint32m4_t bindex, vint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_i8m1x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_i8m1x7(int8_t *rs1, vuint32m4_t vs2, vint8m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_i8m1x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_i16mf4x7(int16_t *base, vuint32mf2_t bindex, vint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_i16mf4x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_i16mf4x7(int16_t *rs1, vuint32mf2_t vs2, vint16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_i16mf4x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_i16mf2x7(int16_t *base, vuint32m1_t bindex, vint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_i16mf2x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_i16mf2x7(int16_t *rs1, vuint32m1_t vs2, vint16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_i16mf2x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_i16m1x7(int16_t *base, vuint32m2_t bindex, vint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_i16m1x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_i16m1x7(int16_t *rs1, vuint32m2_t vs2, vint16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_i16m1x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_i32mf2x7(int32_t *base, vuint32mf2_t bindex, vint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_i32mf2x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_i32mf2x7(int32_t *rs1, vuint32mf2_t vs2, vint32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_i32mf2x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_i32m1x7(int32_t *base, vuint32m1_t bindex, vint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_i32m1x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_i32m1x7(int32_t *rs1, vuint32m1_t vs2, vint32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_i32m1x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_i64m1x7(int64_t *base, vuint32mf2_t bindex, vint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_i64m1x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_i64m1x7(int64_t *rs1, vuint32mf2_t vs2, vint64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_i64m1x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_u8mf8x7(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_u8mf8x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_u8mf8x7(uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_u8mf8x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_u8mf4x7(uint8_t *base, vuint32m1_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_u8mf4x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_u8mf4x7(uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_u8mf4x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_u8mf2x7(uint8_t *base, vuint32m2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_u8mf2x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_u8mf2x7(uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_u8mf2x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_u8m1x7(uint8_t *base, vuint32m4_t bindex, vuint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_u8m1x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_u8m1x7(uint8_t *rs1, vuint32m4_t vs2, vuint8m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_u8m1x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_u16mf4x7(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_u16mf4x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_u16mf4x7(uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_u16mf4x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_u16mf2x7(uint16_t *base, vuint32m1_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_u16mf2x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_u16mf2x7(uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_u16mf2x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_u16m1x7(uint16_t *base, vuint32m2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_u16m1x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_u16m1x7(uint16_t *rs1, vuint32m2_t vs2, vuint16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_u16m1x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_u32mf2x7(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_u32mf2x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_u32mf2x7(uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_u32mf2x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_u32m1x7(uint32_t *base, vuint32m1_t bindex, vuint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_u32m1x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_u32m1x7(uint32_t *rs1, vuint32m1_t vs2, vuint32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_u32m1x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_u64m1x7(uint64_t *base, vuint32mf2_t bindex, vuint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_u64m1x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_u64m1x7(uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_u64m1x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_f16mf4x7_m(vbool64_t mask, float16_t *base, vuint32mf2_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_f16mf4x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_f16mf4x7_m(vbool64_t vm, float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_f16mf4x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_f16mf2x7_m(vbool32_t mask, float16_t *base, vuint32m1_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_f16mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_f16mf2x7_m(vbool32_t vm, float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_f16mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_f16m1x7_m(vbool16_t mask, float16_t *base, vuint32m2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_f16m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_f16m1x7_m(vbool16_t vm, float16_t *rs1, vuint32m2_t vs2, vfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_f16m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_f32mf2x7_m(vbool64_t mask, float32_t *base, vuint32mf2_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_f32mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_f32mf2x7_m(vbool64_t vm, float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_f32mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_f32m1x7_m(vbool32_t mask, float32_t *base, vuint32m1_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_f32m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_f32m1x7_m(vbool32_t vm, float32_t *rs1, vuint32m1_t vs2, vfloat32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_f32m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_f64m1x7_m(vbool64_t mask, float64_t *base, vuint32mf2_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_f64m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_f64m1x7_m(vbool64_t vm, float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_f64m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_i8mf8x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_i8mf8x7_m(vbool64_t vm, int8_t *rs1, vuint32mf2_t vs2, vint8mf8x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_i8mf8x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_i8mf4x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_i8mf4x7_m(vbool32_t vm, int8_t *rs1, vuint32m1_t vs2, vint8mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_i8mf4x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_i8mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_i8mf2x7_m(vbool16_t vm, int8_t *rs1, vuint32m2_t vs2, vint8mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_i8mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_i8m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_i8m1x7_m(vbool8_t vm, int8_t *rs1, vuint32m4_t vs2, vint8m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_i8m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_i16mf4x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_i16mf4x7_m(vbool64_t vm, int16_t *rs1, vuint32mf2_t vs2, vint16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_i16mf4x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_i16mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_i16mf2x7_m(vbool32_t vm, int16_t *rs1, vuint32m1_t vs2, vint16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_i16mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_i16m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_i16m1x7_m(vbool16_t vm, int16_t *rs1, vuint32m2_t vs2, vint16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_i16m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_i32mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_i32mf2x7_m(vbool64_t vm, int32_t *rs1, vuint32mf2_t vs2, vint32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_i32mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_i32m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_i32m1x7_m(vbool32_t vm, int32_t *rs1, vuint32m1_t vs2, vint32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_i32m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_i64m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_i64m1x7_m(vbool64_t vm, int64_t *rs1, vuint32mf2_t vs2, vint64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_i64m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_u8mf8x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_u8mf8x7_m(vbool64_t vm, uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_u8mf8x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_u8mf4x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_u8mf4x7_m(vbool32_t vm, uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_u8mf4x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_u8mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_u8mf2x7_m(vbool16_t vm, uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_u8mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_u8m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_u8m1x7_m(vbool8_t vm, uint8_t *rs1, vuint32m4_t vs2, vuint8m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_u8m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_u16mf4x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_u16mf4x7_m(vbool64_t vm, uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_u16mf4x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_u16mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_u16mf2x7_m(vbool32_t vm, uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_u16mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_u16m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_u16m1x7_m(vbool16_t vm, uint16_t *rs1, vuint32m2_t vs2, vuint16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_u16m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_u32mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_u32mf2x7_m(vbool64_t vm, uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_u32mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_u32m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_u32m1x7_m(vbool32_t vm, uint32_t *rs1, vuint32m1_t vs2, vuint32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_u32m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32_v_u64m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_u64m1x7_m(vbool64_t vm, uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32_v_u64m1x7_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg7ei32\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vsuxseg7ei64.c b/auto-generated/gnu-api-tests/vsuxseg7ei64.c index 9e9865663..76ef4b61c 100644 --- a/auto-generated/gnu-api-tests/vsuxseg7ei64.c +++ b/auto-generated/gnu-api-tests/vsuxseg7ei64.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg7ei64_v_f16mf4x7(float16_t *base, vuint64m1_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_f16mf4x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_f16mf4x7(float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_f16mf4x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_f16mf2x7(float16_t *base, vuint64m2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_f16mf2x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_f16mf2x7(float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_f16mf2x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_f16m1x7(float16_t *base, vuint64m4_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_f16m1x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_f16m1x7(float16_t *rs1, vuint64m4_t vs2, vfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_f16m1x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_f32mf2x7(float32_t *base, vuint64m1_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_f32mf2x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_f32mf2x7(float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_f32mf2x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_f32m1x7(float32_t *base, vuint64m2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_f32m1x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_f32m1x7(float32_t *rs1, vuint64m2_t vs2, vfloat32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_f32m1x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_f64m1x7(float64_t *base, vuint64m1_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_f64m1x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_f64m1x7(float64_t *rs1, vuint64m1_t vs2, vfloat64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_f64m1x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_i8mf8x7(int8_t *base, vuint64m1_t bindex, vint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_i8mf8x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_i8mf8x7(int8_t *rs1, vuint64m1_t vs2, vint8mf8x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_i8mf8x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_i8mf4x7(int8_t *base, vuint64m2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_i8mf4x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_i8mf4x7(int8_t *rs1, vuint64m2_t vs2, vint8mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_i8mf4x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_i8mf2x7(int8_t *base, vuint64m4_t bindex, vint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_i8mf2x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_i8mf2x7(int8_t *rs1, vuint64m4_t vs2, vint8mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_i8mf2x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_i8m1x7(int8_t *base, vuint64m8_t bindex, vint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_i8m1x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_i8m1x7(int8_t *rs1, vuint64m8_t vs2, vint8m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_i8m1x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_i16mf4x7(int16_t *base, vuint64m1_t bindex, vint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_i16mf4x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_i16mf4x7(int16_t *rs1, vuint64m1_t vs2, vint16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_i16mf4x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_i16mf2x7(int16_t *base, vuint64m2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_i16mf2x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_i16mf2x7(int16_t *rs1, vuint64m2_t vs2, vint16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_i16mf2x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_i16m1x7(int16_t *base, vuint64m4_t bindex, vint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_i16m1x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_i16m1x7(int16_t *rs1, vuint64m4_t vs2, vint16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_i16m1x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_i32mf2x7(int32_t *base, vuint64m1_t bindex, vint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_i32mf2x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_i32mf2x7(int32_t *rs1, vuint64m1_t vs2, vint32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_i32mf2x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_i32m1x7(int32_t *base, vuint64m2_t bindex, vint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_i32m1x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_i32m1x7(int32_t *rs1, vuint64m2_t vs2, vint32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_i32m1x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_i64m1x7(int64_t *base, vuint64m1_t bindex, vint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_i64m1x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_i64m1x7(int64_t *rs1, vuint64m1_t vs2, vint64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_i64m1x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_u8mf8x7(uint8_t *base, vuint64m1_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_u8mf8x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_u8mf8x7(uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_u8mf8x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_u8mf4x7(uint8_t *base, vuint64m2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_u8mf4x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_u8mf4x7(uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_u8mf4x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_u8mf2x7(uint8_t *base, vuint64m4_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_u8mf2x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_u8mf2x7(uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_u8mf2x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_u8m1x7(uint8_t *base, vuint64m8_t bindex, vuint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_u8m1x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_u8m1x7(uint8_t *rs1, vuint64m8_t vs2, vuint8m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_u8m1x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_u16mf4x7(uint16_t *base, vuint64m1_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_u16mf4x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_u16mf4x7(uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_u16mf4x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_u16mf2x7(uint16_t *base, vuint64m2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_u16mf2x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_u16mf2x7(uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_u16mf2x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_u16m1x7(uint16_t *base, vuint64m4_t bindex, vuint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_u16m1x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_u16m1x7(uint16_t *rs1, vuint64m4_t vs2, vuint16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_u16m1x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_u32mf2x7(uint32_t *base, vuint64m1_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_u32mf2x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_u32mf2x7(uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_u32mf2x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_u32m1x7(uint32_t *base, vuint64m2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_u32m1x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_u32m1x7(uint32_t *rs1, vuint64m2_t vs2, vuint32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_u32m1x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_u64m1x7(uint64_t *base, vuint64m1_t bindex, vuint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_u64m1x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_u64m1x7(uint64_t *rs1, vuint64m1_t vs2, vuint64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_u64m1x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_f16mf4x7_m(vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_f16mf4x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_f16mf4x7_m(vbool64_t vm, float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_f16mf4x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_f16mf2x7_m(vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_f16mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_f16mf2x7_m(vbool32_t vm, float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_f16mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_f16m1x7_m(vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_f16m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_f16m1x7_m(vbool16_t vm, float16_t *rs1, vuint64m4_t vs2, vfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_f16m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_f32mf2x7_m(vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_f32mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_f32mf2x7_m(vbool64_t vm, float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_f32mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_f32m1x7_m(vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_f32m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_f32m1x7_m(vbool32_t vm, float32_t *rs1, vuint64m2_t vs2, vfloat32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_f32m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_f64m1x7_m(vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_f64m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_f64m1x7_m(vbool64_t vm, float64_t *rs1, vuint64m1_t vs2, vfloat64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_f64m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_i8mf8x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_i8mf8x7_m(vbool64_t vm, int8_t *rs1, vuint64m1_t vs2, vint8mf8x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_i8mf8x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_i8mf4x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_i8mf4x7_m(vbool32_t vm, int8_t *rs1, vuint64m2_t vs2, vint8mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_i8mf4x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_i8mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_i8mf2x7_m(vbool16_t vm, int8_t *rs1, vuint64m4_t vs2, vint8mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_i8mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_i8m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_i8m1x7_m(vbool8_t vm, int8_t *rs1, vuint64m8_t vs2, vint8m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_i8m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_i16mf4x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_i16mf4x7_m(vbool64_t vm, int16_t *rs1, vuint64m1_t vs2, vint16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_i16mf4x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_i16mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_i16mf2x7_m(vbool32_t vm, int16_t *rs1, vuint64m2_t vs2, vint16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_i16mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_i16m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_i16m1x7_m(vbool16_t vm, int16_t *rs1, vuint64m4_t vs2, vint16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_i16m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_i32mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_i32mf2x7_m(vbool64_t vm, int32_t *rs1, vuint64m1_t vs2, vint32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_i32mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_i32m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_i32m1x7_m(vbool32_t vm, int32_t *rs1, vuint64m2_t vs2, vint32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_i32m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_i64m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_i64m1x7_m(vbool64_t vm, int64_t *rs1, vuint64m1_t vs2, vint64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_i64m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_u8mf8x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_u8mf8x7_m(vbool64_t vm, uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_u8mf8x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_u8mf4x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_u8mf4x7_m(vbool32_t vm, uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_u8mf4x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_u8mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_u8mf2x7_m(vbool16_t vm, uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_u8mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_u8m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_u8m1x7_m(vbool8_t vm, uint8_t *rs1, vuint64m8_t vs2, vuint8m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_u8m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_u16mf4x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_u16mf4x7_m(vbool64_t vm, uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_u16mf4x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_u16mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_u16mf2x7_m(vbool32_t vm, uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_u16mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_u16m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_u16m1x7_m(vbool16_t vm, uint16_t *rs1, vuint64m4_t vs2, vuint16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_u16m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_u32mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_u32mf2x7_m(vbool64_t vm, uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_u32mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_u32m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_u32m1x7_m(vbool32_t vm, uint32_t *rs1, vuint64m2_t vs2, vuint32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_u32m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64_v_u64m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_u64m1x7_m(vbool64_t vm, uint64_t *rs1, vuint64m1_t vs2, vuint64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64_v_u64m1x7_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg7ei64\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vsuxseg7ei8.c b/auto-generated/gnu-api-tests/vsuxseg7ei8.c index 2274dd9a8..a49a3656c 100644 --- a/auto-generated/gnu-api-tests/vsuxseg7ei8.c +++ b/auto-generated/gnu-api-tests/vsuxseg7ei8.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg7ei8_v_f16mf4x7(float16_t *base, vuint8mf8_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_f16mf4x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_f16mf4x7(float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_f16mf4x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_f16mf2x7(float16_t *base, vuint8mf4_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_f16mf2x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_f16mf2x7(float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_f16mf2x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_f16m1x7(float16_t *base, vuint8mf2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_f16m1x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_f16m1x7(float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_f16m1x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_f32mf2x7(float32_t *base, vuint8mf8_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_f32mf2x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_f32mf2x7(float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_f32mf2x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_f32m1x7(float32_t *base, vuint8mf4_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_f32m1x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_f32m1x7(float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_f32m1x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_f64m1x7(float64_t *base, vuint8mf8_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_f64m1x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_f64m1x7(float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_f64m1x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_i8mf8x7(int8_t *base, vuint8mf8_t bindex, vint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_i8mf8x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_i8mf8x7(int8_t *rs1, vuint8mf8_t vs2, vint8mf8x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_i8mf8x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_i8mf4x7(int8_t *base, vuint8mf4_t bindex, vint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_i8mf4x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_i8mf4x7(int8_t *rs1, vuint8mf4_t vs2, vint8mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_i8mf4x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_i8mf2x7(int8_t *base, vuint8mf2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_i8mf2x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_i8mf2x7(int8_t *rs1, vuint8mf2_t vs2, vint8mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_i8mf2x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_i8m1x7(int8_t *base, vuint8m1_t bindex, vint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_i8m1x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_i8m1x7(int8_t *rs1, vuint8m1_t vs2, vint8m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_i8m1x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_i16mf4x7(int16_t *base, vuint8mf8_t bindex, vint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_i16mf4x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_i16mf4x7(int16_t *rs1, vuint8mf8_t vs2, vint16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_i16mf4x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_i16mf2x7(int16_t *base, vuint8mf4_t bindex, vint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_i16mf2x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_i16mf2x7(int16_t *rs1, vuint8mf4_t vs2, vint16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_i16mf2x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_i16m1x7(int16_t *base, vuint8mf2_t bindex, vint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_i16m1x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_i16m1x7(int16_t *rs1, vuint8mf2_t vs2, vint16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_i16m1x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_i32mf2x7(int32_t *base, vuint8mf8_t bindex, vint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_i32mf2x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_i32mf2x7(int32_t *rs1, vuint8mf8_t vs2, vint32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_i32mf2x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_i32m1x7(int32_t *base, vuint8mf4_t bindex, vint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_i32m1x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_i32m1x7(int32_t *rs1, vuint8mf4_t vs2, vint32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_i32m1x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_i64m1x7(int64_t *base, vuint8mf8_t bindex, vint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_i64m1x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_i64m1x7(int64_t *rs1, vuint8mf8_t vs2, vint64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_i64m1x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_u8mf8x7(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_u8mf8x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_u8mf8x7(uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_u8mf8x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_u8mf4x7(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_u8mf4x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_u8mf4x7(uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_u8mf4x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_u8mf2x7(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_u8mf2x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_u8mf2x7(uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_u8mf2x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_u8m1x7(uint8_t *base, vuint8m1_t bindex, vuint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_u8m1x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_u8m1x7(uint8_t *rs1, vuint8m1_t vs2, vuint8m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_u8m1x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_u16mf4x7(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_u16mf4x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_u16mf4x7(uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_u16mf4x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_u16mf2x7(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_u16mf2x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_u16mf2x7(uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_u16mf2x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_u16m1x7(uint16_t *base, vuint8mf2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_u16m1x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_u16m1x7(uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_u16m1x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_u32mf2x7(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_u32mf2x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_u32mf2x7(uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_u32mf2x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_u32m1x7(uint32_t *base, vuint8mf4_t bindex, vuint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_u32m1x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_u32m1x7(uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_u32m1x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_u64m1x7(uint64_t *base, vuint8mf8_t bindex, vuint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_u64m1x7(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_u64m1x7(uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_u64m1x7(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_f16mf4x7_m(vbool64_t mask, float16_t *base, vuint8mf8_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_f16mf4x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_f16mf4x7_m(vbool64_t vm, float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_f16mf4x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_f16mf2x7_m(vbool32_t mask, float16_t *base, vuint8mf4_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_f16mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_f16mf2x7_m(vbool32_t vm, float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_f16mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_f16m1x7_m(vbool16_t mask, float16_t *base, vuint8mf2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_f16m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_f16m1x7_m(vbool16_t vm, float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_f16m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_f32mf2x7_m(vbool64_t mask, float32_t *base, vuint8mf8_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_f32mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_f32mf2x7_m(vbool64_t vm, float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_f32mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_f32m1x7_m(vbool32_t mask, float32_t *base, vuint8mf4_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_f32m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_f32m1x7_m(vbool32_t vm, float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_f32m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_f64m1x7_m(vbool64_t mask, float64_t *base, vuint8mf8_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_f64m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_f64m1x7_m(vbool64_t vm, float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_f64m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_i8mf8x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_i8mf8x7_m(vbool64_t vm, int8_t *rs1, vuint8mf8_t vs2, vint8mf8x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_i8mf8x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_i8mf4x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_i8mf4x7_m(vbool32_t vm, int8_t *rs1, vuint8mf4_t vs2, vint8mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_i8mf4x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_i8mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_i8mf2x7_m(vbool16_t vm, int8_t *rs1, vuint8mf2_t vs2, vint8mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_i8mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_i8m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_i8m1x7_m(vbool8_t vm, int8_t *rs1, vuint8m1_t vs2, vint8m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_i8m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_i16mf4x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_i16mf4x7_m(vbool64_t vm, int16_t *rs1, vuint8mf8_t vs2, vint16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_i16mf4x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_i16mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_i16mf2x7_m(vbool32_t vm, int16_t *rs1, vuint8mf4_t vs2, vint16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_i16mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_i16m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_i16m1x7_m(vbool16_t vm, int16_t *rs1, vuint8mf2_t vs2, vint16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_i16m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_i32mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_i32mf2x7_m(vbool64_t vm, int32_t *rs1, vuint8mf8_t vs2, vint32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_i32mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_i32m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_i32m1x7_m(vbool32_t vm, int32_t *rs1, vuint8mf4_t vs2, vint32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_i32m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_i64m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_i64m1x7_m(vbool64_t vm, int64_t *rs1, vuint8mf8_t vs2, vint64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_i64m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_u8mf8x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_u8mf8x7_m(vbool64_t vm, uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_u8mf8x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_u8mf4x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_u8mf4x7_m(vbool32_t vm, uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_u8mf4x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_u8mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_u8mf2x7_m(vbool16_t vm, uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_u8mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_u8m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_u8m1x7_m(vbool8_t vm, uint8_t *rs1, vuint8m1_t vs2, vuint8m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_u8m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_u16mf4x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_u16mf4x7_m(vbool64_t vm, uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_u16mf4x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_u16mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_u16mf2x7_m(vbool32_t vm, uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_u16mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_u16m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_u16m1x7_m(vbool16_t vm, uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_u16m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_u32mf2x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_u32mf2x7_m(vbool64_t vm, uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_u32mf2x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_u32m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_u32m1x7_m(vbool32_t vm, uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_u32m1x7_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8_v_u64m1x7_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_u64m1x7_m(vbool64_t vm, uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8_v_u64m1x7_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg7ei8\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vsuxseg8ei16.c b/auto-generated/gnu-api-tests/vsuxseg8ei16.c index 92e401990..a0bf29700 100644 --- a/auto-generated/gnu-api-tests/vsuxseg8ei16.c +++ b/auto-generated/gnu-api-tests/vsuxseg8ei16.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg8ei16_v_f16mf4x8(float16_t *base, vuint16mf4_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_f16mf4x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_f16mf4x8(float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_f16mf4x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_f16mf2x8(float16_t *base, vuint16mf2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_f16mf2x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_f16mf2x8(float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_f16mf2x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_f16m1x8(float16_t *base, vuint16m1_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_f16m1x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_f16m1x8(float16_t *rs1, vuint16m1_t vs2, vfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_f16m1x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_f32mf2x8(float32_t *base, vuint16mf4_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_f32mf2x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_f32mf2x8(float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_f32mf2x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_f32m1x8(float32_t *base, vuint16mf2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_f32m1x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_f32m1x8(float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_f32m1x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_f64m1x8(float64_t *base, vuint16mf4_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_f64m1x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_f64m1x8(float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_f64m1x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_i8mf8x8(int8_t *base, vuint16mf4_t bindex, vint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_i8mf8x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_i8mf8x8(int8_t *rs1, vuint16mf4_t vs2, vint8mf8x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_i8mf8x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_i8mf4x8(int8_t *base, vuint16mf2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_i8mf4x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_i8mf4x8(int8_t *rs1, vuint16mf2_t vs2, vint8mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_i8mf4x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_i8mf2x8(int8_t *base, vuint16m1_t bindex, vint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_i8mf2x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_i8mf2x8(int8_t *rs1, vuint16m1_t vs2, vint8mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_i8mf2x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_i8m1x8(int8_t *base, vuint16m2_t bindex, vint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_i8m1x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_i8m1x8(int8_t *rs1, vuint16m2_t vs2, vint8m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_i8m1x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_i16mf4x8(int16_t *base, vuint16mf4_t bindex, vint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_i16mf4x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_i16mf4x8(int16_t *rs1, vuint16mf4_t vs2, vint16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_i16mf4x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_i16mf2x8(int16_t *base, vuint16mf2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_i16mf2x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_i16mf2x8(int16_t *rs1, vuint16mf2_t vs2, vint16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_i16mf2x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_i16m1x8(int16_t *base, vuint16m1_t bindex, vint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_i16m1x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_i16m1x8(int16_t *rs1, vuint16m1_t vs2, vint16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_i16m1x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_i32mf2x8(int32_t *base, vuint16mf4_t bindex, vint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_i32mf2x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_i32mf2x8(int32_t *rs1, vuint16mf4_t vs2, vint32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_i32mf2x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_i32m1x8(int32_t *base, vuint16mf2_t bindex, vint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_i32m1x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_i32m1x8(int32_t *rs1, vuint16mf2_t vs2, vint32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_i32m1x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_i64m1x8(int64_t *base, vuint16mf4_t bindex, vint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_i64m1x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_i64m1x8(int64_t *rs1, vuint16mf4_t vs2, vint64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_i64m1x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_u8mf8x8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_u8mf8x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_u8mf8x8(uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_u8mf8x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_u8mf4x8(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_u8mf4x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_u8mf4x8(uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_u8mf4x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_u8mf2x8(uint8_t *base, vuint16m1_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_u8mf2x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_u8mf2x8(uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_u8mf2x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_u8m1x8(uint8_t *base, vuint16m2_t bindex, vuint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_u8m1x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_u8m1x8(uint8_t *rs1, vuint16m2_t vs2, vuint8m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_u8m1x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_u16mf4x8(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_u16mf4x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_u16mf4x8(uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_u16mf4x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_u16mf2x8(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_u16mf2x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_u16mf2x8(uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_u16mf2x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_u16m1x8(uint16_t *base, vuint16m1_t bindex, vuint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_u16m1x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_u16m1x8(uint16_t *rs1, vuint16m1_t vs2, vuint16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_u16m1x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_u32mf2x8(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_u32mf2x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_u32mf2x8(uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_u32mf2x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_u32m1x8(uint32_t *base, vuint16mf2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_u32m1x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_u32m1x8(uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_u32m1x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_u64m1x8(uint64_t *base, vuint16mf4_t bindex, vuint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_u64m1x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_u64m1x8(uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_u64m1x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_f16mf4x8_m(vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_f16mf4x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_f16mf4x8_m(vbool64_t vm, float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_f16mf4x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_f16mf2x8_m(vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_f16mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_f16mf2x8_m(vbool32_t vm, float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_f16mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_f16m1x8_m(vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_f16m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_f16m1x8_m(vbool16_t vm, float16_t *rs1, vuint16m1_t vs2, vfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_f16m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_f32mf2x8_m(vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_f32mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_f32mf2x8_m(vbool64_t vm, float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_f32mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_f32m1x8_m(vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_f32m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_f32m1x8_m(vbool32_t vm, float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_f32m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_f64m1x8_m(vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_f64m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_f64m1x8_m(vbool64_t vm, float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_f64m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_i8mf8x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_i8mf8x8_m(vbool64_t vm, int8_t *rs1, vuint16mf4_t vs2, vint8mf8x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_i8mf8x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_i8mf4x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_i8mf4x8_m(vbool32_t vm, int8_t *rs1, vuint16mf2_t vs2, vint8mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_i8mf4x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_i8mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_i8mf2x8_m(vbool16_t vm, int8_t *rs1, vuint16m1_t vs2, vint8mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_i8mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_i8m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_i8m1x8_m(vbool8_t vm, int8_t *rs1, vuint16m2_t vs2, vint8m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_i8m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_i16mf4x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_i16mf4x8_m(vbool64_t vm, int16_t *rs1, vuint16mf4_t vs2, vint16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_i16mf4x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_i16mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_i16mf2x8_m(vbool32_t vm, int16_t *rs1, vuint16mf2_t vs2, vint16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_i16mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_i16m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_i16m1x8_m(vbool16_t vm, int16_t *rs1, vuint16m1_t vs2, vint16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_i16m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_i32mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_i32mf2x8_m(vbool64_t vm, int32_t *rs1, vuint16mf4_t vs2, vint32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_i32mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_i32m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_i32m1x8_m(vbool32_t vm, int32_t *rs1, vuint16mf2_t vs2, vint32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_i32m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_i64m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_i64m1x8_m(vbool64_t vm, int64_t *rs1, vuint16mf4_t vs2, vint64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_i64m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_u8mf8x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_u8mf8x8_m(vbool64_t vm, uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_u8mf8x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_u8mf4x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_u8mf4x8_m(vbool32_t vm, uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_u8mf4x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_u8mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_u8mf2x8_m(vbool16_t vm, uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_u8mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_u8m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_u8m1x8_m(vbool8_t vm, uint8_t *rs1, vuint16m2_t vs2, vuint8m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_u8m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_u16mf4x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_u16mf4x8_m(vbool64_t vm, uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_u16mf4x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_u16mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_u16mf2x8_m(vbool32_t vm, uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_u16mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_u16m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_u16m1x8_m(vbool16_t vm, uint16_t *rs1, vuint16m1_t vs2, vuint16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_u16m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_u32mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_u32mf2x8_m(vbool64_t vm, uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_u32mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_u32m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_u32m1x8_m(vbool32_t vm, uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_u32m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16_v_u64m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_u64m1x8_m(vbool64_t vm, uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_u64m1x8_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg8ei16\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vsuxseg8ei32.c b/auto-generated/gnu-api-tests/vsuxseg8ei32.c index 457573437..84b0b8ce8 100644 --- a/auto-generated/gnu-api-tests/vsuxseg8ei32.c +++ b/auto-generated/gnu-api-tests/vsuxseg8ei32.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg8ei32_v_f16mf4x8(float16_t *base, vuint32mf2_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_f16mf4x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_f16mf4x8(float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_f16mf4x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_f16mf2x8(float16_t *base, vuint32m1_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_f16mf2x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_f16mf2x8(float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_f16mf2x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_f16m1x8(float16_t *base, vuint32m2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_f16m1x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_f16m1x8(float16_t *rs1, vuint32m2_t vs2, vfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_f16m1x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_f32mf2x8(float32_t *base, vuint32mf2_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_f32mf2x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_f32mf2x8(float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_f32mf2x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_f32m1x8(float32_t *base, vuint32m1_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_f32m1x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_f32m1x8(float32_t *rs1, vuint32m1_t vs2, vfloat32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_f32m1x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_f64m1x8(float64_t *base, vuint32mf2_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_f64m1x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_f64m1x8(float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_f64m1x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_i8mf8x8(int8_t *base, vuint32mf2_t bindex, vint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_i8mf8x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_i8mf8x8(int8_t *rs1, vuint32mf2_t vs2, vint8mf8x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_i8mf8x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_i8mf4x8(int8_t *base, vuint32m1_t bindex, vint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_i8mf4x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_i8mf4x8(int8_t *rs1, vuint32m1_t vs2, vint8mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_i8mf4x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_i8mf2x8(int8_t *base, vuint32m2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_i8mf2x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_i8mf2x8(int8_t *rs1, vuint32m2_t vs2, vint8mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_i8mf2x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_i8m1x8(int8_t *base, vuint32m4_t bindex, vint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_i8m1x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_i8m1x8(int8_t *rs1, vuint32m4_t vs2, vint8m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_i8m1x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_i16mf4x8(int16_t *base, vuint32mf2_t bindex, vint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_i16mf4x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_i16mf4x8(int16_t *rs1, vuint32mf2_t vs2, vint16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_i16mf4x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_i16mf2x8(int16_t *base, vuint32m1_t bindex, vint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_i16mf2x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_i16mf2x8(int16_t *rs1, vuint32m1_t vs2, vint16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_i16mf2x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_i16m1x8(int16_t *base, vuint32m2_t bindex, vint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_i16m1x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_i16m1x8(int16_t *rs1, vuint32m2_t vs2, vint16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_i16m1x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_i32mf2x8(int32_t *base, vuint32mf2_t bindex, vint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_i32mf2x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_i32mf2x8(int32_t *rs1, vuint32mf2_t vs2, vint32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_i32mf2x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_i32m1x8(int32_t *base, vuint32m1_t bindex, vint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_i32m1x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_i32m1x8(int32_t *rs1, vuint32m1_t vs2, vint32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_i32m1x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_i64m1x8(int64_t *base, vuint32mf2_t bindex, vint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_i64m1x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_i64m1x8(int64_t *rs1, vuint32mf2_t vs2, vint64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_i64m1x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_u8mf8x8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_u8mf8x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_u8mf8x8(uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_u8mf8x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_u8mf4x8(uint8_t *base, vuint32m1_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_u8mf4x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_u8mf4x8(uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_u8mf4x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_u8mf2x8(uint8_t *base, vuint32m2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_u8mf2x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_u8mf2x8(uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_u8mf2x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_u8m1x8(uint8_t *base, vuint32m4_t bindex, vuint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_u8m1x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_u8m1x8(uint8_t *rs1, vuint32m4_t vs2, vuint8m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_u8m1x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_u16mf4x8(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_u16mf4x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_u16mf4x8(uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_u16mf4x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_u16mf2x8(uint16_t *base, vuint32m1_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_u16mf2x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_u16mf2x8(uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_u16mf2x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_u16m1x8(uint16_t *base, vuint32m2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_u16m1x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_u16m1x8(uint16_t *rs1, vuint32m2_t vs2, vuint16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_u16m1x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_u32mf2x8(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_u32mf2x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_u32mf2x8(uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_u32mf2x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_u32m1x8(uint32_t *base, vuint32m1_t bindex, vuint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_u32m1x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_u32m1x8(uint32_t *rs1, vuint32m1_t vs2, vuint32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_u32m1x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_u64m1x8(uint64_t *base, vuint32mf2_t bindex, vuint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_u64m1x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_u64m1x8(uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_u64m1x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_f16mf4x8_m(vbool64_t mask, float16_t *base, vuint32mf2_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_f16mf4x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_f16mf4x8_m(vbool64_t vm, float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_f16mf4x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_f16mf2x8_m(vbool32_t mask, float16_t *base, vuint32m1_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_f16mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_f16mf2x8_m(vbool32_t vm, float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_f16mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_f16m1x8_m(vbool16_t mask, float16_t *base, vuint32m2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_f16m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_f16m1x8_m(vbool16_t vm, float16_t *rs1, vuint32m2_t vs2, vfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_f16m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_f32mf2x8_m(vbool64_t mask, float32_t *base, vuint32mf2_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_f32mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_f32mf2x8_m(vbool64_t vm, float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_f32mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_f32m1x8_m(vbool32_t mask, float32_t *base, vuint32m1_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_f32m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_f32m1x8_m(vbool32_t vm, float32_t *rs1, vuint32m1_t vs2, vfloat32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_f32m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_f64m1x8_m(vbool64_t mask, float64_t *base, vuint32mf2_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_f64m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_f64m1x8_m(vbool64_t vm, float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_f64m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_i8mf8x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_i8mf8x8_m(vbool64_t vm, int8_t *rs1, vuint32mf2_t vs2, vint8mf8x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_i8mf8x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_i8mf4x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_i8mf4x8_m(vbool32_t vm, int8_t *rs1, vuint32m1_t vs2, vint8mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_i8mf4x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_i8mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_i8mf2x8_m(vbool16_t vm, int8_t *rs1, vuint32m2_t vs2, vint8mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_i8mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_i8m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_i8m1x8_m(vbool8_t vm, int8_t *rs1, vuint32m4_t vs2, vint8m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_i8m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_i16mf4x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_i16mf4x8_m(vbool64_t vm, int16_t *rs1, vuint32mf2_t vs2, vint16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_i16mf4x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_i16mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_i16mf2x8_m(vbool32_t vm, int16_t *rs1, vuint32m1_t vs2, vint16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_i16mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_i16m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_i16m1x8_m(vbool16_t vm, int16_t *rs1, vuint32m2_t vs2, vint16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_i16m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_i32mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_i32mf2x8_m(vbool64_t vm, int32_t *rs1, vuint32mf2_t vs2, vint32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_i32mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_i32m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_i32m1x8_m(vbool32_t vm, int32_t *rs1, vuint32m1_t vs2, vint32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_i32m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_i64m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_i64m1x8_m(vbool64_t vm, int64_t *rs1, vuint32mf2_t vs2, vint64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_i64m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_u8mf8x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_u8mf8x8_m(vbool64_t vm, uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_u8mf8x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_u8mf4x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_u8mf4x8_m(vbool32_t vm, uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_u8mf4x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_u8mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_u8mf2x8_m(vbool16_t vm, uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_u8mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_u8m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_u8m1x8_m(vbool8_t vm, uint8_t *rs1, vuint32m4_t vs2, vuint8m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_u8m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_u16mf4x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_u16mf4x8_m(vbool64_t vm, uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_u16mf4x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_u16mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_u16mf2x8_m(vbool32_t vm, uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_u16mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_u16m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_u16m1x8_m(vbool16_t vm, uint16_t *rs1, vuint32m2_t vs2, vuint16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_u16m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_u32mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_u32mf2x8_m(vbool64_t vm, uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_u32mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_u32m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_u32m1x8_m(vbool32_t vm, uint32_t *rs1, vuint32m1_t vs2, vuint32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_u32m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32_v_u64m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_u64m1x8_m(vbool64_t vm, uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32_v_u64m1x8_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg8ei32\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vsuxseg8ei64.c b/auto-generated/gnu-api-tests/vsuxseg8ei64.c index 8c8e88ac8..c6de1a448 100644 --- a/auto-generated/gnu-api-tests/vsuxseg8ei64.c +++ b/auto-generated/gnu-api-tests/vsuxseg8ei64.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg8ei64_v_f16mf4x8(float16_t *base, vuint64m1_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_f16mf4x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_f16mf4x8(float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_f16mf4x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_f16mf2x8(float16_t *base, vuint64m2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_f16mf2x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_f16mf2x8(float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_f16mf2x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_f16m1x8(float16_t *base, vuint64m4_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_f16m1x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_f16m1x8(float16_t *rs1, vuint64m4_t vs2, vfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_f16m1x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_f32mf2x8(float32_t *base, vuint64m1_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_f32mf2x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_f32mf2x8(float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_f32mf2x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_f32m1x8(float32_t *base, vuint64m2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_f32m1x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_f32m1x8(float32_t *rs1, vuint64m2_t vs2, vfloat32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_f32m1x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_f64m1x8(float64_t *base, vuint64m1_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_f64m1x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_f64m1x8(float64_t *rs1, vuint64m1_t vs2, vfloat64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_f64m1x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_i8mf8x8(int8_t *base, vuint64m1_t bindex, vint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_i8mf8x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_i8mf8x8(int8_t *rs1, vuint64m1_t vs2, vint8mf8x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_i8mf8x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_i8mf4x8(int8_t *base, vuint64m2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_i8mf4x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_i8mf4x8(int8_t *rs1, vuint64m2_t vs2, vint8mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_i8mf4x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_i8mf2x8(int8_t *base, vuint64m4_t bindex, vint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_i8mf2x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_i8mf2x8(int8_t *rs1, vuint64m4_t vs2, vint8mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_i8mf2x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_i8m1x8(int8_t *base, vuint64m8_t bindex, vint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_i8m1x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_i8m1x8(int8_t *rs1, vuint64m8_t vs2, vint8m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_i8m1x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_i16mf4x8(int16_t *base, vuint64m1_t bindex, vint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_i16mf4x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_i16mf4x8(int16_t *rs1, vuint64m1_t vs2, vint16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_i16mf4x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_i16mf2x8(int16_t *base, vuint64m2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_i16mf2x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_i16mf2x8(int16_t *rs1, vuint64m2_t vs2, vint16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_i16mf2x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_i16m1x8(int16_t *base, vuint64m4_t bindex, vint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_i16m1x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_i16m1x8(int16_t *rs1, vuint64m4_t vs2, vint16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_i16m1x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_i32mf2x8(int32_t *base, vuint64m1_t bindex, vint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_i32mf2x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_i32mf2x8(int32_t *rs1, vuint64m1_t vs2, vint32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_i32mf2x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_i32m1x8(int32_t *base, vuint64m2_t bindex, vint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_i32m1x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_i32m1x8(int32_t *rs1, vuint64m2_t vs2, vint32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_i32m1x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_i64m1x8(int64_t *base, vuint64m1_t bindex, vint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_i64m1x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_i64m1x8(int64_t *rs1, vuint64m1_t vs2, vint64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_i64m1x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_u8mf8x8(uint8_t *base, vuint64m1_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_u8mf8x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_u8mf8x8(uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_u8mf8x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_u8mf4x8(uint8_t *base, vuint64m2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_u8mf4x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_u8mf4x8(uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_u8mf4x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_u8mf2x8(uint8_t *base, vuint64m4_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_u8mf2x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_u8mf2x8(uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_u8mf2x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_u8m1x8(uint8_t *base, vuint64m8_t bindex, vuint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_u8m1x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_u8m1x8(uint8_t *rs1, vuint64m8_t vs2, vuint8m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_u8m1x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_u16mf4x8(uint16_t *base, vuint64m1_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_u16mf4x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_u16mf4x8(uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_u16mf4x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_u16mf2x8(uint16_t *base, vuint64m2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_u16mf2x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_u16mf2x8(uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_u16mf2x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_u16m1x8(uint16_t *base, vuint64m4_t bindex, vuint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_u16m1x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_u16m1x8(uint16_t *rs1, vuint64m4_t vs2, vuint16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_u16m1x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_u32mf2x8(uint32_t *base, vuint64m1_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_u32mf2x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_u32mf2x8(uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_u32mf2x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_u32m1x8(uint32_t *base, vuint64m2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_u32m1x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_u32m1x8(uint32_t *rs1, vuint64m2_t vs2, vuint32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_u32m1x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_u64m1x8(uint64_t *base, vuint64m1_t bindex, vuint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_u64m1x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_u64m1x8(uint64_t *rs1, vuint64m1_t vs2, vuint64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_u64m1x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_f16mf4x8_m(vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_f16mf4x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_f16mf4x8_m(vbool64_t vm, float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_f16mf4x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_f16mf2x8_m(vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_f16mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_f16mf2x8_m(vbool32_t vm, float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_f16mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_f16m1x8_m(vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_f16m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_f16m1x8_m(vbool16_t vm, float16_t *rs1, vuint64m4_t vs2, vfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_f16m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_f32mf2x8_m(vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_f32mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_f32mf2x8_m(vbool64_t vm, float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_f32mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_f32m1x8_m(vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_f32m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_f32m1x8_m(vbool32_t vm, float32_t *rs1, vuint64m2_t vs2, vfloat32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_f32m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_f64m1x8_m(vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_f64m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_f64m1x8_m(vbool64_t vm, float64_t *rs1, vuint64m1_t vs2, vfloat64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_f64m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_i8mf8x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_i8mf8x8_m(vbool64_t vm, int8_t *rs1, vuint64m1_t vs2, vint8mf8x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_i8mf8x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_i8mf4x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_i8mf4x8_m(vbool32_t vm, int8_t *rs1, vuint64m2_t vs2, vint8mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_i8mf4x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_i8mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_i8mf2x8_m(vbool16_t vm, int8_t *rs1, vuint64m4_t vs2, vint8mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_i8mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_i8m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_i8m1x8_m(vbool8_t vm, int8_t *rs1, vuint64m8_t vs2, vint8m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_i8m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_i16mf4x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_i16mf4x8_m(vbool64_t vm, int16_t *rs1, vuint64m1_t vs2, vint16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_i16mf4x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_i16mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_i16mf2x8_m(vbool32_t vm, int16_t *rs1, vuint64m2_t vs2, vint16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_i16mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_i16m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_i16m1x8_m(vbool16_t vm, int16_t *rs1, vuint64m4_t vs2, vint16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_i16m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_i32mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_i32mf2x8_m(vbool64_t vm, int32_t *rs1, vuint64m1_t vs2, vint32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_i32mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_i32m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_i32m1x8_m(vbool32_t vm, int32_t *rs1, vuint64m2_t vs2, vint32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_i32m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_i64m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_i64m1x8_m(vbool64_t vm, int64_t *rs1, vuint64m1_t vs2, vint64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_i64m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_u8mf8x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_u8mf8x8_m(vbool64_t vm, uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_u8mf8x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_u8mf4x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_u8mf4x8_m(vbool32_t vm, uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_u8mf4x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_u8mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_u8mf2x8_m(vbool16_t vm, uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_u8mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_u8m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_u8m1x8_m(vbool8_t vm, uint8_t *rs1, vuint64m8_t vs2, vuint8m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_u8m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_u16mf4x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_u16mf4x8_m(vbool64_t vm, uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_u16mf4x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_u16mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_u16mf2x8_m(vbool32_t vm, uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_u16mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_u16m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_u16m1x8_m(vbool16_t vm, uint16_t *rs1, vuint64m4_t vs2, vuint16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_u16m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_u32mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_u32mf2x8_m(vbool64_t vm, uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_u32mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_u32m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_u32m1x8_m(vbool32_t vm, uint32_t *rs1, vuint64m2_t vs2, vuint32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_u32m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64_v_u64m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_u64m1x8_m(vbool64_t vm, uint64_t *rs1, vuint64m1_t vs2, vuint64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64_v_u64m1x8_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg8ei64\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vsuxseg8ei8.c b/auto-generated/gnu-api-tests/vsuxseg8ei8.c index ef4e76e6d..6e85608f4 100644 --- a/auto-generated/gnu-api-tests/vsuxseg8ei8.c +++ b/auto-generated/gnu-api-tests/vsuxseg8ei8.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg8ei8_v_f16mf4x8(float16_t *base, vuint8mf8_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_f16mf4x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_f16mf4x8(float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_f16mf4x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_f16mf2x8(float16_t *base, vuint8mf4_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_f16mf2x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_f16mf2x8(float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_f16mf2x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_f16m1x8(float16_t *base, vuint8mf2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_f16m1x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_f16m1x8(float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_f16m1x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_f32mf2x8(float32_t *base, vuint8mf8_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_f32mf2x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_f32mf2x8(float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_f32mf2x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_f32m1x8(float32_t *base, vuint8mf4_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_f32m1x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_f32m1x8(float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_f32m1x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_f64m1x8(float64_t *base, vuint8mf8_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_f64m1x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_f64m1x8(float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_f64m1x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_i8mf8x8(int8_t *base, vuint8mf8_t bindex, vint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_i8mf8x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_i8mf8x8(int8_t *rs1, vuint8mf8_t vs2, vint8mf8x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_i8mf8x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_i8mf4x8(int8_t *base, vuint8mf4_t bindex, vint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_i8mf4x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_i8mf4x8(int8_t *rs1, vuint8mf4_t vs2, vint8mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_i8mf4x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_i8mf2x8(int8_t *base, vuint8mf2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_i8mf2x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_i8mf2x8(int8_t *rs1, vuint8mf2_t vs2, vint8mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_i8mf2x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_i8m1x8(int8_t *base, vuint8m1_t bindex, vint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_i8m1x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_i8m1x8(int8_t *rs1, vuint8m1_t vs2, vint8m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_i8m1x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_i16mf4x8(int16_t *base, vuint8mf8_t bindex, vint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_i16mf4x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_i16mf4x8(int16_t *rs1, vuint8mf8_t vs2, vint16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_i16mf4x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_i16mf2x8(int16_t *base, vuint8mf4_t bindex, vint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_i16mf2x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_i16mf2x8(int16_t *rs1, vuint8mf4_t vs2, vint16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_i16mf2x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_i16m1x8(int16_t *base, vuint8mf2_t bindex, vint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_i16m1x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_i16m1x8(int16_t *rs1, vuint8mf2_t vs2, vint16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_i16m1x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_i32mf2x8(int32_t *base, vuint8mf8_t bindex, vint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_i32mf2x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_i32mf2x8(int32_t *rs1, vuint8mf8_t vs2, vint32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_i32mf2x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_i32m1x8(int32_t *base, vuint8mf4_t bindex, vint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_i32m1x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_i32m1x8(int32_t *rs1, vuint8mf4_t vs2, vint32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_i32m1x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_i64m1x8(int64_t *base, vuint8mf8_t bindex, vint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_i64m1x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_i64m1x8(int64_t *rs1, vuint8mf8_t vs2, vint64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_i64m1x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_u8mf8x8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_u8mf8x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_u8mf8x8(uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_u8mf8x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_u8mf4x8(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_u8mf4x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_u8mf4x8(uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_u8mf4x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_u8mf2x8(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_u8mf2x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_u8mf2x8(uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_u8mf2x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_u8m1x8(uint8_t *base, vuint8m1_t bindex, vuint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_u8m1x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_u8m1x8(uint8_t *rs1, vuint8m1_t vs2, vuint8m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_u8m1x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_u16mf4x8(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_u16mf4x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_u16mf4x8(uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_u16mf4x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_u16mf2x8(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_u16mf2x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_u16mf2x8(uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_u16mf2x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_u16m1x8(uint16_t *base, vuint8mf2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_u16m1x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_u16m1x8(uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_u16m1x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_u32mf2x8(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_u32mf2x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_u32mf2x8(uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_u32mf2x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_u32m1x8(uint32_t *base, vuint8mf4_t bindex, vuint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_u32m1x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_u32m1x8(uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_u32m1x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_u64m1x8(uint64_t *base, vuint8mf8_t bindex, vuint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_u64m1x8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_u64m1x8(uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_u64m1x8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_f16mf4x8_m(vbool64_t mask, float16_t *base, vuint8mf8_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_f16mf4x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_f16mf4x8_m(vbool64_t vm, float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_f16mf4x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_f16mf2x8_m(vbool32_t mask, float16_t *base, vuint8mf4_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_f16mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_f16mf2x8_m(vbool32_t vm, float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_f16mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_f16m1x8_m(vbool16_t mask, float16_t *base, vuint8mf2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_f16m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_f16m1x8_m(vbool16_t vm, float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_f16m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_f32mf2x8_m(vbool64_t mask, float32_t *base, vuint8mf8_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_f32mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_f32mf2x8_m(vbool64_t vm, float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_f32mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_f32m1x8_m(vbool32_t mask, float32_t *base, vuint8mf4_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_f32m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_f32m1x8_m(vbool32_t vm, float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_f32m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_f64m1x8_m(vbool64_t mask, float64_t *base, vuint8mf8_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_f64m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_f64m1x8_m(vbool64_t vm, float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_f64m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_i8mf8x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_i8mf8x8_m(vbool64_t vm, int8_t *rs1, vuint8mf8_t vs2, vint8mf8x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_i8mf8x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_i8mf4x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_i8mf4x8_m(vbool32_t vm, int8_t *rs1, vuint8mf4_t vs2, vint8mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_i8mf4x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_i8mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_i8mf2x8_m(vbool16_t vm, int8_t *rs1, vuint8mf2_t vs2, vint8mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_i8mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_i8m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_i8m1x8_m(vbool8_t vm, int8_t *rs1, vuint8m1_t vs2, vint8m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_i8m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_i16mf4x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_i16mf4x8_m(vbool64_t vm, int16_t *rs1, vuint8mf8_t vs2, vint16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_i16mf4x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_i16mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_i16mf2x8_m(vbool32_t vm, int16_t *rs1, vuint8mf4_t vs2, vint16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_i16mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_i16m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_i16m1x8_m(vbool16_t vm, int16_t *rs1, vuint8mf2_t vs2, vint16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_i16m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_i32mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_i32mf2x8_m(vbool64_t vm, int32_t *rs1, vuint8mf8_t vs2, vint32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_i32mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_i32m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_i32m1x8_m(vbool32_t vm, int32_t *rs1, vuint8mf4_t vs2, vint32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_i32m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_i64m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_i64m1x8_m(vbool64_t vm, int64_t *rs1, vuint8mf8_t vs2, vint64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_i64m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_u8mf8x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_u8mf8x8_m(vbool64_t vm, uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_u8mf8x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_u8mf4x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_u8mf4x8_m(vbool32_t vm, uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_u8mf4x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_u8mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_u8mf2x8_m(vbool16_t vm, uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_u8mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_u8m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_u8m1x8_m(vbool8_t vm, uint8_t *rs1, vuint8m1_t vs2, vuint8m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_u8m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_u16mf4x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_u16mf4x8_m(vbool64_t vm, uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_u16mf4x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_u16mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_u16mf2x8_m(vbool32_t vm, uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_u16mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_u16m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_u16m1x8_m(vbool16_t vm, uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_u16m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_u32mf2x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_u32mf2x8_m(vbool64_t vm, uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_u32mf2x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_u32m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_u32m1x8_m(vbool32_t vm, uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_u32m1x8_m(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8_v_u64m1x8_m(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_u64m1x8_m(vbool64_t vm, uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8_v_u64m1x8_m(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg8ei8\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-api-tests/vwadd.c b/auto-generated/gnu-api-tests/vwadd.c index a22da6792..02d9eeb4e 100644 --- a/auto-generated/gnu-api-tests/vwadd.c +++ b/auto-generated/gnu-api-tests/vwadd.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint16mf4_t test_vwadd_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwadd_vv_i16mf4(op1, op2, vl); +vint16mf4_t test_vwadd_vv_i16mf4(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwadd_vv_i16mf4(vs2, vs1, vl); } -vint16mf4_t test_vwadd_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_i16mf4(op1, op2, vl); +vint16mf4_t test_vwadd_vx_i16mf4(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_i16mf4(vs2, rs1, vl); } -vint16mf4_t test_vwadd_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwadd_wv_i16mf4(op1, op2, vl); +vint16mf4_t test_vwadd_wv_i16mf4(vint16mf4_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwadd_wv_i16mf4(vs2, vs1, vl); } -vint16mf4_t test_vwadd_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_i16mf4(op1, op2, vl); +vint16mf4_t test_vwadd_wx_i16mf4(vint16mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_i16mf4(vs2, rs1, vl); } -vint16mf2_t test_vwadd_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwadd_vv_i16mf2(op1, op2, vl); +vint16mf2_t test_vwadd_vv_i16mf2(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwadd_vv_i16mf2(vs2, vs1, vl); } -vint16mf2_t test_vwadd_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_i16mf2(op1, op2, vl); +vint16mf2_t test_vwadd_vx_i16mf2(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_i16mf2(vs2, rs1, vl); } -vint16mf2_t test_vwadd_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwadd_wv_i16mf2(op1, op2, vl); +vint16mf2_t test_vwadd_wv_i16mf2(vint16mf2_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwadd_wv_i16mf2(vs2, vs1, vl); } -vint16mf2_t test_vwadd_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_i16mf2(op1, op2, vl); +vint16mf2_t test_vwadd_wx_i16mf2(vint16mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_i16mf2(vs2, rs1, vl); } -vint16m1_t test_vwadd_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwadd_vv_i16m1(op1, op2, vl); +vint16m1_t test_vwadd_vv_i16m1(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwadd_vv_i16m1(vs2, vs1, vl); } -vint16m1_t test_vwadd_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_i16m1(op1, op2, vl); +vint16m1_t test_vwadd_vx_i16m1(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_i16m1(vs2, rs1, vl); } -vint16m1_t test_vwadd_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwadd_wv_i16m1(op1, op2, vl); +vint16m1_t test_vwadd_wv_i16m1(vint16m1_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwadd_wv_i16m1(vs2, vs1, vl); } -vint16m1_t test_vwadd_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_i16m1(op1, op2, vl); +vint16m1_t test_vwadd_wx_i16m1(vint16m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_i16m1(vs2, rs1, vl); } -vint16m2_t test_vwadd_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwadd_vv_i16m2(op1, op2, vl); +vint16m2_t test_vwadd_vv_i16m2(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwadd_vv_i16m2(vs2, vs1, vl); } -vint16m2_t test_vwadd_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_i16m2(op1, op2, vl); +vint16m2_t test_vwadd_vx_i16m2(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_i16m2(vs2, rs1, vl); } -vint16m2_t test_vwadd_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwadd_wv_i16m2(op1, op2, vl); +vint16m2_t test_vwadd_wv_i16m2(vint16m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwadd_wv_i16m2(vs2, vs1, vl); } -vint16m2_t test_vwadd_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_i16m2(op1, op2, vl); +vint16m2_t test_vwadd_wx_i16m2(vint16m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_i16m2(vs2, rs1, vl); } -vint16m4_t test_vwadd_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwadd_vv_i16m4(op1, op2, vl); +vint16m4_t test_vwadd_vv_i16m4(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwadd_vv_i16m4(vs2, vs1, vl); } -vint16m4_t test_vwadd_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_i16m4(op1, op2, vl); +vint16m4_t test_vwadd_vx_i16m4(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_i16m4(vs2, rs1, vl); } -vint16m4_t test_vwadd_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwadd_wv_i16m4(op1, op2, vl); +vint16m4_t test_vwadd_wv_i16m4(vint16m4_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwadd_wv_i16m4(vs2, vs1, vl); } -vint16m4_t test_vwadd_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_i16m4(op1, op2, vl); +vint16m4_t test_vwadd_wx_i16m4(vint16m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_i16m4(vs2, rs1, vl); } -vint16m8_t test_vwadd_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwadd_vv_i16m8(op1, op2, vl); +vint16m8_t test_vwadd_vv_i16m8(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwadd_vv_i16m8(vs2, vs1, vl); } -vint16m8_t test_vwadd_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_i16m8(op1, op2, vl); +vint16m8_t test_vwadd_vx_i16m8(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_i16m8(vs2, rs1, vl); } -vint16m8_t test_vwadd_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwadd_wv_i16m8(op1, op2, vl); +vint16m8_t test_vwadd_wv_i16m8(vint16m8_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwadd_wv_i16m8(vs2, vs1, vl); } -vint16m8_t test_vwadd_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_i16m8(op1, op2, vl); +vint16m8_t test_vwadd_wx_i16m8(vint16m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_i16m8(vs2, rs1, vl); } -vint32mf2_t test_vwadd_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwadd_vv_i32mf2(op1, op2, vl); +vint32mf2_t test_vwadd_vv_i32mf2(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwadd_vv_i32mf2(vs2, vs1, vl); } -vint32mf2_t test_vwadd_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_i32mf2(op1, op2, vl); +vint32mf2_t test_vwadd_vx_i32mf2(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_i32mf2(vs2, rs1, vl); } -vint32mf2_t test_vwadd_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwadd_wv_i32mf2(op1, op2, vl); +vint32mf2_t test_vwadd_wv_i32mf2(vint32mf2_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwadd_wv_i32mf2(vs2, vs1, vl); } -vint32mf2_t test_vwadd_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_i32mf2(op1, op2, vl); +vint32mf2_t test_vwadd_wx_i32mf2(vint32mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_i32mf2(vs2, rs1, vl); } -vint32m1_t test_vwadd_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwadd_vv_i32m1(op1, op2, vl); +vint32m1_t test_vwadd_vv_i32m1(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwadd_vv_i32m1(vs2, vs1, vl); } -vint32m1_t test_vwadd_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_i32m1(op1, op2, vl); +vint32m1_t test_vwadd_vx_i32m1(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_i32m1(vs2, rs1, vl); } -vint32m1_t test_vwadd_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwadd_wv_i32m1(op1, op2, vl); +vint32m1_t test_vwadd_wv_i32m1(vint32m1_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwadd_wv_i32m1(vs2, vs1, vl); } -vint32m1_t test_vwadd_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_i32m1(op1, op2, vl); +vint32m1_t test_vwadd_wx_i32m1(vint32m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_i32m1(vs2, rs1, vl); } -vint32m2_t test_vwadd_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwadd_vv_i32m2(op1, op2, vl); +vint32m2_t test_vwadd_vv_i32m2(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwadd_vv_i32m2(vs2, vs1, vl); } -vint32m2_t test_vwadd_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_i32m2(op1, op2, vl); +vint32m2_t test_vwadd_vx_i32m2(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_i32m2(vs2, rs1, vl); } -vint32m2_t test_vwadd_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwadd_wv_i32m2(op1, op2, vl); +vint32m2_t test_vwadd_wv_i32m2(vint32m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwadd_wv_i32m2(vs2, vs1, vl); } -vint32m2_t test_vwadd_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_i32m2(op1, op2, vl); +vint32m2_t test_vwadd_wx_i32m2(vint32m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_i32m2(vs2, rs1, vl); } -vint32m4_t test_vwadd_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwadd_vv_i32m4(op1, op2, vl); +vint32m4_t test_vwadd_vv_i32m4(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwadd_vv_i32m4(vs2, vs1, vl); } -vint32m4_t test_vwadd_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_i32m4(op1, op2, vl); +vint32m4_t test_vwadd_vx_i32m4(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_i32m4(vs2, rs1, vl); } -vint32m4_t test_vwadd_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwadd_wv_i32m4(op1, op2, vl); +vint32m4_t test_vwadd_wv_i32m4(vint32m4_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwadd_wv_i32m4(vs2, vs1, vl); } -vint32m4_t test_vwadd_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_i32m4(op1, op2, vl); +vint32m4_t test_vwadd_wx_i32m4(vint32m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_i32m4(vs2, rs1, vl); } -vint32m8_t test_vwadd_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwadd_vv_i32m8(op1, op2, vl); +vint32m8_t test_vwadd_vv_i32m8(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwadd_vv_i32m8(vs2, vs1, vl); } -vint32m8_t test_vwadd_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_i32m8(op1, op2, vl); +vint32m8_t test_vwadd_vx_i32m8(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_i32m8(vs2, rs1, vl); } -vint32m8_t test_vwadd_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwadd_wv_i32m8(op1, op2, vl); +vint32m8_t test_vwadd_wv_i32m8(vint32m8_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwadd_wv_i32m8(vs2, vs1, vl); } -vint32m8_t test_vwadd_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_i32m8(op1, op2, vl); +vint32m8_t test_vwadd_wx_i32m8(vint32m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_i32m8(vs2, rs1, vl); } -vint64m1_t test_vwadd_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwadd_vv_i64m1(op1, op2, vl); +vint64m1_t test_vwadd_vv_i64m1(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwadd_vv_i64m1(vs2, vs1, vl); } -vint64m1_t test_vwadd_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx_i64m1(op1, op2, vl); +vint64m1_t test_vwadd_vx_i64m1(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx_i64m1(vs2, rs1, vl); } -vint64m1_t test_vwadd_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwadd_wv_i64m1(op1, op2, vl); +vint64m1_t test_vwadd_wv_i64m1(vint64m1_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwadd_wv_i64m1(vs2, vs1, vl); } -vint64m1_t test_vwadd_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx_i64m1(op1, op2, vl); +vint64m1_t test_vwadd_wx_i64m1(vint64m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx_i64m1(vs2, rs1, vl); } -vint64m2_t test_vwadd_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwadd_vv_i64m2(op1, op2, vl); +vint64m2_t test_vwadd_vv_i64m2(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwadd_vv_i64m2(vs2, vs1, vl); } -vint64m2_t test_vwadd_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx_i64m2(op1, op2, vl); +vint64m2_t test_vwadd_vx_i64m2(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx_i64m2(vs2, rs1, vl); } -vint64m2_t test_vwadd_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwadd_wv_i64m2(op1, op2, vl); +vint64m2_t test_vwadd_wv_i64m2(vint64m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwadd_wv_i64m2(vs2, vs1, vl); } -vint64m2_t test_vwadd_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx_i64m2(op1, op2, vl); +vint64m2_t test_vwadd_wx_i64m2(vint64m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx_i64m2(vs2, rs1, vl); } -vint64m4_t test_vwadd_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwadd_vv_i64m4(op1, op2, vl); +vint64m4_t test_vwadd_vv_i64m4(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwadd_vv_i64m4(vs2, vs1, vl); } -vint64m4_t test_vwadd_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx_i64m4(op1, op2, vl); +vint64m4_t test_vwadd_vx_i64m4(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx_i64m4(vs2, rs1, vl); } -vint64m4_t test_vwadd_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwadd_wv_i64m4(op1, op2, vl); +vint64m4_t test_vwadd_wv_i64m4(vint64m4_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwadd_wv_i64m4(vs2, vs1, vl); } -vint64m4_t test_vwadd_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx_i64m4(op1, op2, vl); +vint64m4_t test_vwadd_wx_i64m4(vint64m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx_i64m4(vs2, rs1, vl); } -vint64m8_t test_vwadd_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwadd_vv_i64m8(op1, op2, vl); +vint64m8_t test_vwadd_vv_i64m8(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwadd_vv_i64m8(vs2, vs1, vl); } -vint64m8_t test_vwadd_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx_i64m8(op1, op2, vl); +vint64m8_t test_vwadd_vx_i64m8(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx_i64m8(vs2, rs1, vl); } -vint64m8_t test_vwadd_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwadd_wv_i64m8(op1, op2, vl); +vint64m8_t test_vwadd_wv_i64m8(vint64m8_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwadd_wv_i64m8(vs2, vs1, vl); } -vint64m8_t test_vwadd_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx_i64m8(op1, op2, vl); +vint64m8_t test_vwadd_wx_i64m8(vint64m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx_i64m8(vs2, rs1, vl); } -vint16mf4_t test_vwadd_vv_i16mf4_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwadd_vv_i16mf4_m(mask, op1, op2, vl); +vint16mf4_t test_vwadd_vv_i16mf4_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwadd_vv_i16mf4_m(vm, vs2, vs1, vl); } -vint16mf4_t test_vwadd_vx_i16mf4_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_i16mf4_m(mask, op1, op2, vl); +vint16mf4_t test_vwadd_vx_i16mf4_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_i16mf4_m(vm, vs2, rs1, vl); } -vint16mf4_t test_vwadd_wv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwadd_wv_i16mf4_m(mask, op1, op2, vl); +vint16mf4_t test_vwadd_wv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwadd_wv_i16mf4_m(vm, vs2, vs1, vl); } -vint16mf4_t test_vwadd_wx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_i16mf4_m(mask, op1, op2, vl); +vint16mf4_t test_vwadd_wx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_i16mf4_m(vm, vs2, rs1, vl); } -vint16mf2_t test_vwadd_vv_i16mf2_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwadd_vv_i16mf2_m(mask, op1, op2, vl); +vint16mf2_t test_vwadd_vv_i16mf2_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwadd_vv_i16mf2_m(vm, vs2, vs1, vl); } -vint16mf2_t test_vwadd_vx_i16mf2_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_i16mf2_m(mask, op1, op2, vl); +vint16mf2_t test_vwadd_vx_i16mf2_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_i16mf2_m(vm, vs2, rs1, vl); } -vint16mf2_t test_vwadd_wv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwadd_wv_i16mf2_m(mask, op1, op2, vl); +vint16mf2_t test_vwadd_wv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwadd_wv_i16mf2_m(vm, vs2, vs1, vl); } -vint16mf2_t test_vwadd_wx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_i16mf2_m(mask, op1, op2, vl); +vint16mf2_t test_vwadd_wx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_i16mf2_m(vm, vs2, rs1, vl); } -vint16m1_t test_vwadd_vv_i16m1_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwadd_vv_i16m1_m(mask, op1, op2, vl); +vint16m1_t test_vwadd_vv_i16m1_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwadd_vv_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vwadd_vx_i16m1_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_i16m1_m(mask, op1, op2, vl); +vint16m1_t test_vwadd_vx_i16m1_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_i16m1_m(vm, vs2, rs1, vl); } -vint16m1_t test_vwadd_wv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwadd_wv_i16m1_m(mask, op1, op2, vl); +vint16m1_t test_vwadd_wv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwadd_wv_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vwadd_wx_i16m1_m(vbool16_t mask, vint16m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_i16m1_m(mask, op1, op2, vl); +vint16m1_t test_vwadd_wx_i16m1_m(vbool16_t vm, vint16m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_i16m1_m(vm, vs2, rs1, vl); } -vint16m2_t test_vwadd_vv_i16m2_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwadd_vv_i16m2_m(mask, op1, op2, vl); +vint16m2_t test_vwadd_vv_i16m2_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwadd_vv_i16m2_m(vm, vs2, vs1, vl); } -vint16m2_t test_vwadd_vx_i16m2_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_i16m2_m(mask, op1, op2, vl); +vint16m2_t test_vwadd_vx_i16m2_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_i16m2_m(vm, vs2, rs1, vl); } -vint16m2_t test_vwadd_wv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwadd_wv_i16m2_m(mask, op1, op2, vl); +vint16m2_t test_vwadd_wv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwadd_wv_i16m2_m(vm, vs2, vs1, vl); } -vint16m2_t test_vwadd_wx_i16m2_m(vbool8_t mask, vint16m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_i16m2_m(mask, op1, op2, vl); +vint16m2_t test_vwadd_wx_i16m2_m(vbool8_t vm, vint16m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_i16m2_m(vm, vs2, rs1, vl); } -vint16m4_t test_vwadd_vv_i16m4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwadd_vv_i16m4_m(mask, op1, op2, vl); +vint16m4_t test_vwadd_vv_i16m4_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwadd_vv_i16m4_m(vm, vs2, vs1, vl); } -vint16m4_t test_vwadd_vx_i16m4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_i16m4_m(mask, op1, op2, vl); +vint16m4_t test_vwadd_vx_i16m4_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_i16m4_m(vm, vs2, rs1, vl); } -vint16m4_t test_vwadd_wv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwadd_wv_i16m4_m(mask, op1, op2, vl); +vint16m4_t test_vwadd_wv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwadd_wv_i16m4_m(vm, vs2, vs1, vl); } -vint16m4_t test_vwadd_wx_i16m4_m(vbool4_t mask, vint16m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_i16m4_m(mask, op1, op2, vl); +vint16m4_t test_vwadd_wx_i16m4_m(vbool4_t vm, vint16m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_i16m4_m(vm, vs2, rs1, vl); } -vint16m8_t test_vwadd_vv_i16m8_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwadd_vv_i16m8_m(mask, op1, op2, vl); +vint16m8_t test_vwadd_vv_i16m8_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwadd_vv_i16m8_m(vm, vs2, vs1, vl); } -vint16m8_t test_vwadd_vx_i16m8_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_i16m8_m(mask, op1, op2, vl); +vint16m8_t test_vwadd_vx_i16m8_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_i16m8_m(vm, vs2, rs1, vl); } -vint16m8_t test_vwadd_wv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwadd_wv_i16m8_m(mask, op1, op2, vl); +vint16m8_t test_vwadd_wv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwadd_wv_i16m8_m(vm, vs2, vs1, vl); } -vint16m8_t test_vwadd_wx_i16m8_m(vbool2_t mask, vint16m8_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_i16m8_m(mask, op1, op2, vl); +vint16m8_t test_vwadd_wx_i16m8_m(vbool2_t vm, vint16m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_i16m8_m(vm, vs2, rs1, vl); } -vint32mf2_t test_vwadd_vv_i32mf2_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwadd_vv_i32mf2_m(mask, op1, op2, vl); +vint32mf2_t test_vwadd_vv_i32mf2_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwadd_vv_i32mf2_m(vm, vs2, vs1, vl); } -vint32mf2_t test_vwadd_vx_i32mf2_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_i32mf2_m(mask, op1, op2, vl); +vint32mf2_t test_vwadd_vx_i32mf2_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_i32mf2_m(vm, vs2, rs1, vl); } -vint32mf2_t test_vwadd_wv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwadd_wv_i32mf2_m(mask, op1, op2, vl); +vint32mf2_t test_vwadd_wv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwadd_wv_i32mf2_m(vm, vs2, vs1, vl); } -vint32mf2_t test_vwadd_wx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_i32mf2_m(mask, op1, op2, vl); +vint32mf2_t test_vwadd_wx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_i32mf2_m(vm, vs2, rs1, vl); } -vint32m1_t test_vwadd_vv_i32m1_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwadd_vv_i32m1_m(mask, op1, op2, vl); +vint32m1_t test_vwadd_vv_i32m1_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwadd_vv_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vwadd_vx_i32m1_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_i32m1_m(mask, op1, op2, vl); +vint32m1_t test_vwadd_vx_i32m1_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_i32m1_m(vm, vs2, rs1, vl); } -vint32m1_t test_vwadd_wv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwadd_wv_i32m1_m(mask, op1, op2, vl); +vint32m1_t test_vwadd_wv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwadd_wv_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vwadd_wx_i32m1_m(vbool32_t mask, vint32m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_i32m1_m(mask, op1, op2, vl); +vint32m1_t test_vwadd_wx_i32m1_m(vbool32_t vm, vint32m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_i32m1_m(vm, vs2, rs1, vl); } -vint32m2_t test_vwadd_vv_i32m2_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwadd_vv_i32m2_m(mask, op1, op2, vl); +vint32m2_t test_vwadd_vv_i32m2_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwadd_vv_i32m2_m(vm, vs2, vs1, vl); } -vint32m2_t test_vwadd_vx_i32m2_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_i32m2_m(mask, op1, op2, vl); +vint32m2_t test_vwadd_vx_i32m2_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_i32m2_m(vm, vs2, rs1, vl); } -vint32m2_t test_vwadd_wv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwadd_wv_i32m2_m(mask, op1, op2, vl); +vint32m2_t test_vwadd_wv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwadd_wv_i32m2_m(vm, vs2, vs1, vl); } -vint32m2_t test_vwadd_wx_i32m2_m(vbool16_t mask, vint32m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_i32m2_m(mask, op1, op2, vl); +vint32m2_t test_vwadd_wx_i32m2_m(vbool16_t vm, vint32m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_i32m2_m(vm, vs2, rs1, vl); } -vint32m4_t test_vwadd_vv_i32m4_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwadd_vv_i32m4_m(mask, op1, op2, vl); +vint32m4_t test_vwadd_vv_i32m4_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwadd_vv_i32m4_m(vm, vs2, vs1, vl); } -vint32m4_t test_vwadd_vx_i32m4_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_i32m4_m(mask, op1, op2, vl); +vint32m4_t test_vwadd_vx_i32m4_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_i32m4_m(vm, vs2, rs1, vl); } -vint32m4_t test_vwadd_wv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwadd_wv_i32m4_m(mask, op1, op2, vl); +vint32m4_t test_vwadd_wv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwadd_wv_i32m4_m(vm, vs2, vs1, vl); } -vint32m4_t test_vwadd_wx_i32m4_m(vbool8_t mask, vint32m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_i32m4_m(mask, op1, op2, vl); +vint32m4_t test_vwadd_wx_i32m4_m(vbool8_t vm, vint32m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_i32m4_m(vm, vs2, rs1, vl); } -vint32m8_t test_vwadd_vv_i32m8_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwadd_vv_i32m8_m(mask, op1, op2, vl); +vint32m8_t test_vwadd_vv_i32m8_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwadd_vv_i32m8_m(vm, vs2, vs1, vl); } -vint32m8_t test_vwadd_vx_i32m8_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_i32m8_m(mask, op1, op2, vl); +vint32m8_t test_vwadd_vx_i32m8_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_i32m8_m(vm, vs2, rs1, vl); } -vint32m8_t test_vwadd_wv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwadd_wv_i32m8_m(mask, op1, op2, vl); +vint32m8_t test_vwadd_wv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwadd_wv_i32m8_m(vm, vs2, vs1, vl); } -vint32m8_t test_vwadd_wx_i32m8_m(vbool4_t mask, vint32m8_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_i32m8_m(mask, op1, op2, vl); +vint32m8_t test_vwadd_wx_i32m8_m(vbool4_t vm, vint32m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_i32m8_m(vm, vs2, rs1, vl); } -vint64m1_t test_vwadd_vv_i64m1_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwadd_vv_i64m1_m(mask, op1, op2, vl); +vint64m1_t test_vwadd_vv_i64m1_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwadd_vv_i64m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vwadd_vx_i64m1_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx_i64m1_m(mask, op1, op2, vl); +vint64m1_t test_vwadd_vx_i64m1_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx_i64m1_m(vm, vs2, rs1, vl); } -vint64m1_t test_vwadd_wv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwadd_wv_i64m1_m(mask, op1, op2, vl); +vint64m1_t test_vwadd_wv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwadd_wv_i64m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vwadd_wx_i64m1_m(vbool64_t mask, vint64m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx_i64m1_m(mask, op1, op2, vl); +vint64m1_t test_vwadd_wx_i64m1_m(vbool64_t vm, vint64m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx_i64m1_m(vm, vs2, rs1, vl); } -vint64m2_t test_vwadd_vv_i64m2_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwadd_vv_i64m2_m(mask, op1, op2, vl); +vint64m2_t test_vwadd_vv_i64m2_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwadd_vv_i64m2_m(vm, vs2, vs1, vl); } -vint64m2_t test_vwadd_vx_i64m2_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx_i64m2_m(mask, op1, op2, vl); +vint64m2_t test_vwadd_vx_i64m2_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx_i64m2_m(vm, vs2, rs1, vl); } -vint64m2_t test_vwadd_wv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwadd_wv_i64m2_m(mask, op1, op2, vl); +vint64m2_t test_vwadd_wv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwadd_wv_i64m2_m(vm, vs2, vs1, vl); } -vint64m2_t test_vwadd_wx_i64m2_m(vbool32_t mask, vint64m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx_i64m2_m(mask, op1, op2, vl); +vint64m2_t test_vwadd_wx_i64m2_m(vbool32_t vm, vint64m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx_i64m2_m(vm, vs2, rs1, vl); } -vint64m4_t test_vwadd_vv_i64m4_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwadd_vv_i64m4_m(mask, op1, op2, vl); +vint64m4_t test_vwadd_vv_i64m4_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwadd_vv_i64m4_m(vm, vs2, vs1, vl); } -vint64m4_t test_vwadd_vx_i64m4_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx_i64m4_m(mask, op1, op2, vl); +vint64m4_t test_vwadd_vx_i64m4_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx_i64m4_m(vm, vs2, rs1, vl); } -vint64m4_t test_vwadd_wv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwadd_wv_i64m4_m(mask, op1, op2, vl); +vint64m4_t test_vwadd_wv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwadd_wv_i64m4_m(vm, vs2, vs1, vl); } -vint64m4_t test_vwadd_wx_i64m4_m(vbool16_t mask, vint64m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx_i64m4_m(mask, op1, op2, vl); +vint64m4_t test_vwadd_wx_i64m4_m(vbool16_t vm, vint64m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx_i64m4_m(vm, vs2, rs1, vl); } -vint64m8_t test_vwadd_vv_i64m8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwadd_vv_i64m8_m(mask, op1, op2, vl); +vint64m8_t test_vwadd_vv_i64m8_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwadd_vv_i64m8_m(vm, vs2, vs1, vl); } -vint64m8_t test_vwadd_vx_i64m8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx_i64m8_m(mask, op1, op2, vl); +vint64m8_t test_vwadd_vx_i64m8_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx_i64m8_m(vm, vs2, rs1, vl); } -vint64m8_t test_vwadd_wv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwadd_wv_i64m8_m(mask, op1, op2, vl); +vint64m8_t test_vwadd_wv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwadd_wv_i64m8_m(vm, vs2, vs1, vl); } -vint64m8_t test_vwadd_wx_i64m8_m(vbool8_t mask, vint64m8_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx_i64m8_m(mask, op1, op2, vl); +vint64m8_t test_vwadd_wx_i64m8_m(vbool8_t vm, vint64m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx_i64m8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+v[w]?add\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/gnu-api-tests/vwaddu.c b/auto-generated/gnu-api-tests/vwaddu.c index f94ef875e..af38b23d6 100644 --- a/auto-generated/gnu-api-tests/vwaddu.c +++ b/auto-generated/gnu-api-tests/vwaddu.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint16mf4_t test_vwaddu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwaddu_vv_u16mf4(op1, op2, vl); +vuint16mf4_t test_vwaddu_vv_u16mf4(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u16mf4(vs2, vs1, vl); } -vuint16mf4_t test_vwaddu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_u16mf4(op1, op2, vl); +vuint16mf4_t test_vwaddu_vx_u16mf4(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u16mf4(vs2, rs1, vl); } -vuint16mf4_t test_vwaddu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwaddu_wv_u16mf4(op1, op2, vl); +vuint16mf4_t test_vwaddu_wv_u16mf4(vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u16mf4(vs2, vs1, vl); } -vuint16mf4_t test_vwaddu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_u16mf4(op1, op2, vl); +vuint16mf4_t test_vwaddu_wx_u16mf4(vuint16mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u16mf4(vs2, rs1, vl); } -vuint16mf2_t test_vwaddu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwaddu_vv_u16mf2(op1, op2, vl); +vuint16mf2_t test_vwaddu_vv_u16mf2(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u16mf2(vs2, vs1, vl); } -vuint16mf2_t test_vwaddu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_u16mf2(op1, op2, vl); +vuint16mf2_t test_vwaddu_vx_u16mf2(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u16mf2(vs2, rs1, vl); } -vuint16mf2_t test_vwaddu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwaddu_wv_u16mf2(op1, op2, vl); +vuint16mf2_t test_vwaddu_wv_u16mf2(vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u16mf2(vs2, vs1, vl); } -vuint16mf2_t test_vwaddu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_u16mf2(op1, op2, vl); +vuint16mf2_t test_vwaddu_wx_u16mf2(vuint16mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u16mf2(vs2, rs1, vl); } -vuint16m1_t test_vwaddu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwaddu_vv_u16m1(op1, op2, vl); +vuint16m1_t test_vwaddu_vv_u16m1(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vwaddu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_u16m1(op1, op2, vl); +vuint16m1_t test_vwaddu_vx_u16m1(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u16m1(vs2, rs1, vl); } -vuint16m1_t test_vwaddu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwaddu_wv_u16m1(op1, op2, vl); +vuint16m1_t test_vwaddu_wv_u16m1(vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vwaddu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_u16m1(op1, op2, vl); +vuint16m1_t test_vwaddu_wx_u16m1(vuint16m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u16m1(vs2, rs1, vl); } -vuint16m2_t test_vwaddu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwaddu_vv_u16m2(op1, op2, vl); +vuint16m2_t test_vwaddu_vv_u16m2(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u16m2(vs2, vs1, vl); } -vuint16m2_t test_vwaddu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_u16m2(op1, op2, vl); +vuint16m2_t test_vwaddu_vx_u16m2(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u16m2(vs2, rs1, vl); } -vuint16m2_t test_vwaddu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwaddu_wv_u16m2(op1, op2, vl); +vuint16m2_t test_vwaddu_wv_u16m2(vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u16m2(vs2, vs1, vl); } -vuint16m2_t test_vwaddu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_u16m2(op1, op2, vl); +vuint16m2_t test_vwaddu_wx_u16m2(vuint16m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u16m2(vs2, rs1, vl); } -vuint16m4_t test_vwaddu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwaddu_vv_u16m4(op1, op2, vl); +vuint16m4_t test_vwaddu_vv_u16m4(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u16m4(vs2, vs1, vl); } -vuint16m4_t test_vwaddu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_u16m4(op1, op2, vl); +vuint16m4_t test_vwaddu_vx_u16m4(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u16m4(vs2, rs1, vl); } -vuint16m4_t test_vwaddu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwaddu_wv_u16m4(op1, op2, vl); +vuint16m4_t test_vwaddu_wv_u16m4(vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u16m4(vs2, vs1, vl); } -vuint16m4_t test_vwaddu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_u16m4(op1, op2, vl); +vuint16m4_t test_vwaddu_wx_u16m4(vuint16m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u16m4(vs2, rs1, vl); } -vuint16m8_t test_vwaddu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwaddu_vv_u16m8(op1, op2, vl); +vuint16m8_t test_vwaddu_vv_u16m8(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u16m8(vs2, vs1, vl); } -vuint16m8_t test_vwaddu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_u16m8(op1, op2, vl); +vuint16m8_t test_vwaddu_vx_u16m8(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u16m8(vs2, rs1, vl); } -vuint16m8_t test_vwaddu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwaddu_wv_u16m8(op1, op2, vl); +vuint16m8_t test_vwaddu_wv_u16m8(vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u16m8(vs2, vs1, vl); } -vuint16m8_t test_vwaddu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_u16m8(op1, op2, vl); +vuint16m8_t test_vwaddu_wx_u16m8(vuint16m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u16m8(vs2, rs1, vl); } -vuint32mf2_t test_vwaddu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwaddu_vv_u32mf2(op1, op2, vl); +vuint32mf2_t test_vwaddu_vv_u32mf2(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u32mf2(vs2, vs1, vl); } -vuint32mf2_t test_vwaddu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_u32mf2(op1, op2, vl); +vuint32mf2_t test_vwaddu_vx_u32mf2(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u32mf2(vs2, rs1, vl); } -vuint32mf2_t test_vwaddu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwaddu_wv_u32mf2(op1, op2, vl); +vuint32mf2_t test_vwaddu_wv_u32mf2(vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u32mf2(vs2, vs1, vl); } -vuint32mf2_t test_vwaddu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_u32mf2(op1, op2, vl); +vuint32mf2_t test_vwaddu_wx_u32mf2(vuint32mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u32mf2(vs2, rs1, vl); } -vuint32m1_t test_vwaddu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwaddu_vv_u32m1(op1, op2, vl); +vuint32m1_t test_vwaddu_vv_u32m1(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vwaddu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_u32m1(op1, op2, vl); +vuint32m1_t test_vwaddu_vx_u32m1(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u32m1(vs2, rs1, vl); } -vuint32m1_t test_vwaddu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwaddu_wv_u32m1(op1, op2, vl); +vuint32m1_t test_vwaddu_wv_u32m1(vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vwaddu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_u32m1(op1, op2, vl); +vuint32m1_t test_vwaddu_wx_u32m1(vuint32m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u32m1(vs2, rs1, vl); } -vuint32m2_t test_vwaddu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwaddu_vv_u32m2(op1, op2, vl); +vuint32m2_t test_vwaddu_vv_u32m2(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u32m2(vs2, vs1, vl); } -vuint32m2_t test_vwaddu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_u32m2(op1, op2, vl); +vuint32m2_t test_vwaddu_vx_u32m2(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u32m2(vs2, rs1, vl); } -vuint32m2_t test_vwaddu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwaddu_wv_u32m2(op1, op2, vl); +vuint32m2_t test_vwaddu_wv_u32m2(vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u32m2(vs2, vs1, vl); } -vuint32m2_t test_vwaddu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_u32m2(op1, op2, vl); +vuint32m2_t test_vwaddu_wx_u32m2(vuint32m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u32m2(vs2, rs1, vl); } -vuint32m4_t test_vwaddu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwaddu_vv_u32m4(op1, op2, vl); +vuint32m4_t test_vwaddu_vv_u32m4(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u32m4(vs2, vs1, vl); } -vuint32m4_t test_vwaddu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_u32m4(op1, op2, vl); +vuint32m4_t test_vwaddu_vx_u32m4(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u32m4(vs2, rs1, vl); } -vuint32m4_t test_vwaddu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwaddu_wv_u32m4(op1, op2, vl); +vuint32m4_t test_vwaddu_wv_u32m4(vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u32m4(vs2, vs1, vl); } -vuint32m4_t test_vwaddu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_u32m4(op1, op2, vl); +vuint32m4_t test_vwaddu_wx_u32m4(vuint32m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u32m4(vs2, rs1, vl); } -vuint32m8_t test_vwaddu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwaddu_vv_u32m8(op1, op2, vl); +vuint32m8_t test_vwaddu_vv_u32m8(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u32m8(vs2, vs1, vl); } -vuint32m8_t test_vwaddu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_u32m8(op1, op2, vl); +vuint32m8_t test_vwaddu_vx_u32m8(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u32m8(vs2, rs1, vl); } -vuint32m8_t test_vwaddu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwaddu_wv_u32m8(op1, op2, vl); +vuint32m8_t test_vwaddu_wv_u32m8(vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u32m8(vs2, vs1, vl); } -vuint32m8_t test_vwaddu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_u32m8(op1, op2, vl); +vuint32m8_t test_vwaddu_wx_u32m8(vuint32m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u32m8(vs2, rs1, vl); } -vuint64m1_t test_vwaddu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwaddu_vv_u64m1(op1, op2, vl); +vuint64m1_t test_vwaddu_vv_u64m1(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u64m1(vs2, vs1, vl); } -vuint64m1_t test_vwaddu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx_u64m1(op1, op2, vl); +vuint64m1_t test_vwaddu_vx_u64m1(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u64m1(vs2, rs1, vl); } -vuint64m1_t test_vwaddu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwaddu_wv_u64m1(op1, op2, vl); +vuint64m1_t test_vwaddu_wv_u64m1(vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u64m1(vs2, vs1, vl); } -vuint64m1_t test_vwaddu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx_u64m1(op1, op2, vl); +vuint64m1_t test_vwaddu_wx_u64m1(vuint64m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u64m1(vs2, rs1, vl); } -vuint64m2_t test_vwaddu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwaddu_vv_u64m2(op1, op2, vl); +vuint64m2_t test_vwaddu_vv_u64m2(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u64m2(vs2, vs1, vl); } -vuint64m2_t test_vwaddu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx_u64m2(op1, op2, vl); +vuint64m2_t test_vwaddu_vx_u64m2(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u64m2(vs2, rs1, vl); } -vuint64m2_t test_vwaddu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwaddu_wv_u64m2(op1, op2, vl); +vuint64m2_t test_vwaddu_wv_u64m2(vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u64m2(vs2, vs1, vl); } -vuint64m2_t test_vwaddu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx_u64m2(op1, op2, vl); +vuint64m2_t test_vwaddu_wx_u64m2(vuint64m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u64m2(vs2, rs1, vl); } -vuint64m4_t test_vwaddu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwaddu_vv_u64m4(op1, op2, vl); +vuint64m4_t test_vwaddu_vv_u64m4(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u64m4(vs2, vs1, vl); } -vuint64m4_t test_vwaddu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx_u64m4(op1, op2, vl); +vuint64m4_t test_vwaddu_vx_u64m4(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u64m4(vs2, rs1, vl); } -vuint64m4_t test_vwaddu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwaddu_wv_u64m4(op1, op2, vl); +vuint64m4_t test_vwaddu_wv_u64m4(vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u64m4(vs2, vs1, vl); } -vuint64m4_t test_vwaddu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx_u64m4(op1, op2, vl); +vuint64m4_t test_vwaddu_wx_u64m4(vuint64m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u64m4(vs2, rs1, vl); } -vuint64m8_t test_vwaddu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwaddu_vv_u64m8(op1, op2, vl); +vuint64m8_t test_vwaddu_vv_u64m8(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u64m8(vs2, vs1, vl); } -vuint64m8_t test_vwaddu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx_u64m8(op1, op2, vl); +vuint64m8_t test_vwaddu_vx_u64m8(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u64m8(vs2, rs1, vl); } -vuint64m8_t test_vwaddu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwaddu_wv_u64m8(op1, op2, vl); +vuint64m8_t test_vwaddu_wv_u64m8(vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u64m8(vs2, vs1, vl); } -vuint64m8_t test_vwaddu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx_u64m8(op1, op2, vl); +vuint64m8_t test_vwaddu_wx_u64m8(vuint64m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u64m8(vs2, rs1, vl); } -vuint16mf4_t test_vwaddu_vv_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwaddu_vv_u16mf4_m(mask, op1, op2, vl); +vuint16mf4_t test_vwaddu_vv_u16mf4_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u16mf4_m(vm, vs2, vs1, vl); } -vuint16mf4_t test_vwaddu_vx_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_u16mf4_m(mask, op1, op2, vl); +vuint16mf4_t test_vwaddu_vx_u16mf4_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u16mf4_m(vm, vs2, rs1, vl); } -vuint16mf4_t test_vwaddu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwaddu_wv_u16mf4_m(mask, op1, op2, vl); +vuint16mf4_t test_vwaddu_wv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u16mf4_m(vm, vs2, vs1, vl); } -vuint16mf4_t test_vwaddu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_u16mf4_m(mask, op1, op2, vl); +vuint16mf4_t test_vwaddu_wx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u16mf4_m(vm, vs2, rs1, vl); } -vuint16mf2_t test_vwaddu_vv_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwaddu_vv_u16mf2_m(mask, op1, op2, vl); +vuint16mf2_t test_vwaddu_vv_u16mf2_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u16mf2_m(vm, vs2, vs1, vl); } -vuint16mf2_t test_vwaddu_vx_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_u16mf2_m(mask, op1, op2, vl); +vuint16mf2_t test_vwaddu_vx_u16mf2_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u16mf2_m(vm, vs2, rs1, vl); } -vuint16mf2_t test_vwaddu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwaddu_wv_u16mf2_m(mask, op1, op2, vl); +vuint16mf2_t test_vwaddu_wv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u16mf2_m(vm, vs2, vs1, vl); } -vuint16mf2_t test_vwaddu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_u16mf2_m(mask, op1, op2, vl); +vuint16mf2_t test_vwaddu_wx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u16mf2_m(vm, vs2, rs1, vl); } -vuint16m1_t test_vwaddu_vv_u16m1_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwaddu_vv_u16m1_m(mask, op1, op2, vl); +vuint16m1_t test_vwaddu_vv_u16m1_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vwaddu_vx_u16m1_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_u16m1_m(mask, op1, op2, vl); +vuint16m1_t test_vwaddu_vx_u16m1_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u16m1_m(vm, vs2, rs1, vl); } -vuint16m1_t test_vwaddu_wv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwaddu_wv_u16m1_m(mask, op1, op2, vl); +vuint16m1_t test_vwaddu_wv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vwaddu_wx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_u16m1_m(mask, op1, op2, vl); +vuint16m1_t test_vwaddu_wx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u16m1_m(vm, vs2, rs1, vl); } -vuint16m2_t test_vwaddu_vv_u16m2_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwaddu_vv_u16m2_m(mask, op1, op2, vl); +vuint16m2_t test_vwaddu_vv_u16m2_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u16m2_m(vm, vs2, vs1, vl); } -vuint16m2_t test_vwaddu_vx_u16m2_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_u16m2_m(mask, op1, op2, vl); +vuint16m2_t test_vwaddu_vx_u16m2_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u16m2_m(vm, vs2, rs1, vl); } -vuint16m2_t test_vwaddu_wv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwaddu_wv_u16m2_m(mask, op1, op2, vl); +vuint16m2_t test_vwaddu_wv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u16m2_m(vm, vs2, vs1, vl); } -vuint16m2_t test_vwaddu_wx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_u16m2_m(mask, op1, op2, vl); +vuint16m2_t test_vwaddu_wx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u16m2_m(vm, vs2, rs1, vl); } -vuint16m4_t test_vwaddu_vv_u16m4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwaddu_vv_u16m4_m(mask, op1, op2, vl); +vuint16m4_t test_vwaddu_vv_u16m4_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u16m4_m(vm, vs2, vs1, vl); } -vuint16m4_t test_vwaddu_vx_u16m4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_u16m4_m(mask, op1, op2, vl); +vuint16m4_t test_vwaddu_vx_u16m4_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u16m4_m(vm, vs2, rs1, vl); } -vuint16m4_t test_vwaddu_wv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwaddu_wv_u16m4_m(mask, op1, op2, vl); +vuint16m4_t test_vwaddu_wv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u16m4_m(vm, vs2, vs1, vl); } -vuint16m4_t test_vwaddu_wx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_u16m4_m(mask, op1, op2, vl); +vuint16m4_t test_vwaddu_wx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u16m4_m(vm, vs2, rs1, vl); } -vuint16m8_t test_vwaddu_vv_u16m8_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwaddu_vv_u16m8_m(mask, op1, op2, vl); +vuint16m8_t test_vwaddu_vv_u16m8_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u16m8_m(vm, vs2, vs1, vl); } -vuint16m8_t test_vwaddu_vx_u16m8_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_u16m8_m(mask, op1, op2, vl); +vuint16m8_t test_vwaddu_vx_u16m8_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u16m8_m(vm, vs2, rs1, vl); } -vuint16m8_t test_vwaddu_wv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwaddu_wv_u16m8_m(mask, op1, op2, vl); +vuint16m8_t test_vwaddu_wv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u16m8_m(vm, vs2, vs1, vl); } -vuint16m8_t test_vwaddu_wx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_u16m8_m(mask, op1, op2, vl); +vuint16m8_t test_vwaddu_wx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u16m8_m(vm, vs2, rs1, vl); } -vuint32mf2_t test_vwaddu_vv_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwaddu_vv_u32mf2_m(mask, op1, op2, vl); +vuint32mf2_t test_vwaddu_vv_u32mf2_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u32mf2_m(vm, vs2, vs1, vl); } -vuint32mf2_t test_vwaddu_vx_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_u32mf2_m(mask, op1, op2, vl); +vuint32mf2_t test_vwaddu_vx_u32mf2_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u32mf2_m(vm, vs2, rs1, vl); } -vuint32mf2_t test_vwaddu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwaddu_wv_u32mf2_m(mask, op1, op2, vl); +vuint32mf2_t test_vwaddu_wv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u32mf2_m(vm, vs2, vs1, vl); } -vuint32mf2_t test_vwaddu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_u32mf2_m(mask, op1, op2, vl); +vuint32mf2_t test_vwaddu_wx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u32mf2_m(vm, vs2, rs1, vl); } -vuint32m1_t test_vwaddu_vv_u32m1_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwaddu_vv_u32m1_m(mask, op1, op2, vl); +vuint32m1_t test_vwaddu_vv_u32m1_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vwaddu_vx_u32m1_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_u32m1_m(mask, op1, op2, vl); +vuint32m1_t test_vwaddu_vx_u32m1_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u32m1_m(vm, vs2, rs1, vl); } -vuint32m1_t test_vwaddu_wv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwaddu_wv_u32m1_m(mask, op1, op2, vl); +vuint32m1_t test_vwaddu_wv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vwaddu_wx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_u32m1_m(mask, op1, op2, vl); +vuint32m1_t test_vwaddu_wx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u32m1_m(vm, vs2, rs1, vl); } -vuint32m2_t test_vwaddu_vv_u32m2_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwaddu_vv_u32m2_m(mask, op1, op2, vl); +vuint32m2_t test_vwaddu_vv_u32m2_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u32m2_m(vm, vs2, vs1, vl); } -vuint32m2_t test_vwaddu_vx_u32m2_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_u32m2_m(mask, op1, op2, vl); +vuint32m2_t test_vwaddu_vx_u32m2_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u32m2_m(vm, vs2, rs1, vl); } -vuint32m2_t test_vwaddu_wv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwaddu_wv_u32m2_m(mask, op1, op2, vl); +vuint32m2_t test_vwaddu_wv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u32m2_m(vm, vs2, vs1, vl); } -vuint32m2_t test_vwaddu_wx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_u32m2_m(mask, op1, op2, vl); +vuint32m2_t test_vwaddu_wx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u32m2_m(vm, vs2, rs1, vl); } -vuint32m4_t test_vwaddu_vv_u32m4_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwaddu_vv_u32m4_m(mask, op1, op2, vl); +vuint32m4_t test_vwaddu_vv_u32m4_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u32m4_m(vm, vs2, vs1, vl); } -vuint32m4_t test_vwaddu_vx_u32m4_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_u32m4_m(mask, op1, op2, vl); +vuint32m4_t test_vwaddu_vx_u32m4_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u32m4_m(vm, vs2, rs1, vl); } -vuint32m4_t test_vwaddu_wv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwaddu_wv_u32m4_m(mask, op1, op2, vl); +vuint32m4_t test_vwaddu_wv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u32m4_m(vm, vs2, vs1, vl); } -vuint32m4_t test_vwaddu_wx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_u32m4_m(mask, op1, op2, vl); +vuint32m4_t test_vwaddu_wx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u32m4_m(vm, vs2, rs1, vl); } -vuint32m8_t test_vwaddu_vv_u32m8_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwaddu_vv_u32m8_m(mask, op1, op2, vl); +vuint32m8_t test_vwaddu_vv_u32m8_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u32m8_m(vm, vs2, vs1, vl); } -vuint32m8_t test_vwaddu_vx_u32m8_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_u32m8_m(mask, op1, op2, vl); +vuint32m8_t test_vwaddu_vx_u32m8_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u32m8_m(vm, vs2, rs1, vl); } -vuint32m8_t test_vwaddu_wv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwaddu_wv_u32m8_m(mask, op1, op2, vl); +vuint32m8_t test_vwaddu_wv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u32m8_m(vm, vs2, vs1, vl); } -vuint32m8_t test_vwaddu_wx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_u32m8_m(mask, op1, op2, vl); +vuint32m8_t test_vwaddu_wx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u32m8_m(vm, vs2, rs1, vl); } -vuint64m1_t test_vwaddu_vv_u64m1_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwaddu_vv_u64m1_m(mask, op1, op2, vl); +vuint64m1_t test_vwaddu_vv_u64m1_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vwaddu_vx_u64m1_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx_u64m1_m(mask, op1, op2, vl); +vuint64m1_t test_vwaddu_vx_u64m1_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u64m1_m(vm, vs2, rs1, vl); } -vuint64m1_t test_vwaddu_wv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwaddu_wv_u64m1_m(mask, op1, op2, vl); +vuint64m1_t test_vwaddu_wv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vwaddu_wx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx_u64m1_m(mask, op1, op2, vl); +vuint64m1_t test_vwaddu_wx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u64m1_m(vm, vs2, rs1, vl); } -vuint64m2_t test_vwaddu_vv_u64m2_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwaddu_vv_u64m2_m(mask, op1, op2, vl); +vuint64m2_t test_vwaddu_vv_u64m2_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u64m2_m(vm, vs2, vs1, vl); } -vuint64m2_t test_vwaddu_vx_u64m2_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx_u64m2_m(mask, op1, op2, vl); +vuint64m2_t test_vwaddu_vx_u64m2_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u64m2_m(vm, vs2, rs1, vl); } -vuint64m2_t test_vwaddu_wv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwaddu_wv_u64m2_m(mask, op1, op2, vl); +vuint64m2_t test_vwaddu_wv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u64m2_m(vm, vs2, vs1, vl); } -vuint64m2_t test_vwaddu_wx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx_u64m2_m(mask, op1, op2, vl); +vuint64m2_t test_vwaddu_wx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u64m2_m(vm, vs2, rs1, vl); } -vuint64m4_t test_vwaddu_vv_u64m4_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwaddu_vv_u64m4_m(mask, op1, op2, vl); +vuint64m4_t test_vwaddu_vv_u64m4_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u64m4_m(vm, vs2, vs1, vl); } -vuint64m4_t test_vwaddu_vx_u64m4_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx_u64m4_m(mask, op1, op2, vl); +vuint64m4_t test_vwaddu_vx_u64m4_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u64m4_m(vm, vs2, rs1, vl); } -vuint64m4_t test_vwaddu_wv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwaddu_wv_u64m4_m(mask, op1, op2, vl); +vuint64m4_t test_vwaddu_wv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u64m4_m(vm, vs2, vs1, vl); } -vuint64m4_t test_vwaddu_wx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx_u64m4_m(mask, op1, op2, vl); +vuint64m4_t test_vwaddu_wx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u64m4_m(vm, vs2, rs1, vl); } -vuint64m8_t test_vwaddu_vv_u64m8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwaddu_vv_u64m8_m(mask, op1, op2, vl); +vuint64m8_t test_vwaddu_vv_u64m8_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u64m8_m(vm, vs2, vs1, vl); } -vuint64m8_t test_vwaddu_vx_u64m8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx_u64m8_m(mask, op1, op2, vl); +vuint64m8_t test_vwaddu_vx_u64m8_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u64m8_m(vm, vs2, rs1, vl); } -vuint64m8_t test_vwaddu_wv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwaddu_wv_u64m8_m(mask, op1, op2, vl); +vuint64m8_t test_vwaddu_wv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u64m8_m(vm, vs2, vs1, vl); } -vuint64m8_t test_vwaddu_wx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx_u64m8_m(mask, op1, op2, vl); +vuint64m8_t test_vwaddu_wx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u64m8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+v[w]?add[u]?\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/gnu-api-tests/vwcvt.c b/auto-generated/gnu-api-tests/vwcvt.c index 777ecf746..ec2bbf9ab 100644 --- a/auto-generated/gnu-api-tests/vwcvt.c +++ b/auto-generated/gnu-api-tests/vwcvt.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint16mf4_t test_vwcvt_x_x_v_i16mf4(vint8mf8_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i16mf4(src, vl); +vint16mf4_t test_vwcvt_x_x_v_i16mf4(vint8mf8_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i16mf4(vs2, vl); } -vint16mf2_t test_vwcvt_x_x_v_i16mf2(vint8mf4_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i16mf2(src, vl); +vint16mf2_t test_vwcvt_x_x_v_i16mf2(vint8mf4_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i16mf2(vs2, vl); } -vint16m1_t test_vwcvt_x_x_v_i16m1(vint8mf2_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i16m1(src, vl); +vint16m1_t test_vwcvt_x_x_v_i16m1(vint8mf2_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i16m1(vs2, vl); } -vint16m2_t test_vwcvt_x_x_v_i16m2(vint8m1_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i16m2(src, vl); +vint16m2_t test_vwcvt_x_x_v_i16m2(vint8m1_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i16m2(vs2, vl); } -vint16m4_t test_vwcvt_x_x_v_i16m4(vint8m2_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i16m4(src, vl); +vint16m4_t test_vwcvt_x_x_v_i16m4(vint8m2_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i16m4(vs2, vl); } -vint16m8_t test_vwcvt_x_x_v_i16m8(vint8m4_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i16m8(src, vl); +vint16m8_t test_vwcvt_x_x_v_i16m8(vint8m4_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i16m8(vs2, vl); } -vint32mf2_t test_vwcvt_x_x_v_i32mf2(vint16mf4_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i32mf2(src, vl); +vint32mf2_t test_vwcvt_x_x_v_i32mf2(vint16mf4_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i32mf2(vs2, vl); } -vint32m1_t test_vwcvt_x_x_v_i32m1(vint16mf2_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i32m1(src, vl); +vint32m1_t test_vwcvt_x_x_v_i32m1(vint16mf2_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i32m1(vs2, vl); } -vint32m2_t test_vwcvt_x_x_v_i32m2(vint16m1_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i32m2(src, vl); +vint32m2_t test_vwcvt_x_x_v_i32m2(vint16m1_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i32m2(vs2, vl); } -vint32m4_t test_vwcvt_x_x_v_i32m4(vint16m2_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i32m4(src, vl); +vint32m4_t test_vwcvt_x_x_v_i32m4(vint16m2_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i32m4(vs2, vl); } -vint32m8_t test_vwcvt_x_x_v_i32m8(vint16m4_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i32m8(src, vl); +vint32m8_t test_vwcvt_x_x_v_i32m8(vint16m4_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i32m8(vs2, vl); } -vint64m1_t test_vwcvt_x_x_v_i64m1(vint32mf2_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i64m1(src, vl); +vint64m1_t test_vwcvt_x_x_v_i64m1(vint32mf2_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i64m1(vs2, vl); } -vint64m2_t test_vwcvt_x_x_v_i64m2(vint32m1_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i64m2(src, vl); +vint64m2_t test_vwcvt_x_x_v_i64m2(vint32m1_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i64m2(vs2, vl); } -vint64m4_t test_vwcvt_x_x_v_i64m4(vint32m2_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i64m4(src, vl); +vint64m4_t test_vwcvt_x_x_v_i64m4(vint32m2_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i64m4(vs2, vl); } -vint64m8_t test_vwcvt_x_x_v_i64m8(vint32m4_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i64m8(src, vl); +vint64m8_t test_vwcvt_x_x_v_i64m8(vint32m4_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i64m8(vs2, vl); } -vint16mf4_t test_vwcvt_x_x_v_i16mf4_m(vbool64_t mask, vint8mf8_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i16mf4_m(mask, src, vl); +vint16mf4_t test_vwcvt_x_x_v_i16mf4_m(vbool64_t vm, vint8mf8_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i16mf4_m(vm, vs2, vl); } -vint16mf2_t test_vwcvt_x_x_v_i16mf2_m(vbool32_t mask, vint8mf4_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i16mf2_m(mask, src, vl); +vint16mf2_t test_vwcvt_x_x_v_i16mf2_m(vbool32_t vm, vint8mf4_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i16mf2_m(vm, vs2, vl); } -vint16m1_t test_vwcvt_x_x_v_i16m1_m(vbool16_t mask, vint8mf2_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i16m1_m(mask, src, vl); +vint16m1_t test_vwcvt_x_x_v_i16m1_m(vbool16_t vm, vint8mf2_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i16m1_m(vm, vs2, vl); } -vint16m2_t test_vwcvt_x_x_v_i16m2_m(vbool8_t mask, vint8m1_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i16m2_m(mask, src, vl); +vint16m2_t test_vwcvt_x_x_v_i16m2_m(vbool8_t vm, vint8m1_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i16m2_m(vm, vs2, vl); } -vint16m4_t test_vwcvt_x_x_v_i16m4_m(vbool4_t mask, vint8m2_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i16m4_m(mask, src, vl); +vint16m4_t test_vwcvt_x_x_v_i16m4_m(vbool4_t vm, vint8m2_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i16m4_m(vm, vs2, vl); } -vint16m8_t test_vwcvt_x_x_v_i16m8_m(vbool2_t mask, vint8m4_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i16m8_m(mask, src, vl); +vint16m8_t test_vwcvt_x_x_v_i16m8_m(vbool2_t vm, vint8m4_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i16m8_m(vm, vs2, vl); } -vint32mf2_t test_vwcvt_x_x_v_i32mf2_m(vbool64_t mask, vint16mf4_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i32mf2_m(mask, src, vl); +vint32mf2_t test_vwcvt_x_x_v_i32mf2_m(vbool64_t vm, vint16mf4_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i32mf2_m(vm, vs2, vl); } -vint32m1_t test_vwcvt_x_x_v_i32m1_m(vbool32_t mask, vint16mf2_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i32m1_m(mask, src, vl); +vint32m1_t test_vwcvt_x_x_v_i32m1_m(vbool32_t vm, vint16mf2_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i32m1_m(vm, vs2, vl); } -vint32m2_t test_vwcvt_x_x_v_i32m2_m(vbool16_t mask, vint16m1_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i32m2_m(mask, src, vl); +vint32m2_t test_vwcvt_x_x_v_i32m2_m(vbool16_t vm, vint16m1_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i32m2_m(vm, vs2, vl); } -vint32m4_t test_vwcvt_x_x_v_i32m4_m(vbool8_t mask, vint16m2_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i32m4_m(mask, src, vl); +vint32m4_t test_vwcvt_x_x_v_i32m4_m(vbool8_t vm, vint16m2_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i32m4_m(vm, vs2, vl); } -vint32m8_t test_vwcvt_x_x_v_i32m8_m(vbool4_t mask, vint16m4_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i32m8_m(mask, src, vl); +vint32m8_t test_vwcvt_x_x_v_i32m8_m(vbool4_t vm, vint16m4_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i32m8_m(vm, vs2, vl); } -vint64m1_t test_vwcvt_x_x_v_i64m1_m(vbool64_t mask, vint32mf2_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i64m1_m(mask, src, vl); +vint64m1_t test_vwcvt_x_x_v_i64m1_m(vbool64_t vm, vint32mf2_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i64m1_m(vm, vs2, vl); } -vint64m2_t test_vwcvt_x_x_v_i64m2_m(vbool32_t mask, vint32m1_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i64m2_m(mask, src, vl); +vint64m2_t test_vwcvt_x_x_v_i64m2_m(vbool32_t vm, vint32m1_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i64m2_m(vm, vs2, vl); } -vint64m4_t test_vwcvt_x_x_v_i64m4_m(vbool16_t mask, vint32m2_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i64m4_m(mask, src, vl); +vint64m4_t test_vwcvt_x_x_v_i64m4_m(vbool16_t vm, vint32m2_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i64m4_m(vm, vs2, vl); } -vint64m8_t test_vwcvt_x_x_v_i64m8_m(vbool8_t mask, vint32m4_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i64m8_m(mask, src, vl); +vint64m8_t test_vwcvt_x_x_v_i64m8_m(vbool8_t vm, vint32m4_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i64m8_m(vm, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vwcvt\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/gnu-api-tests/vwcvtu.c b/auto-generated/gnu-api-tests/vwcvtu.c index 11225e03c..056b40d5d 100644 --- a/auto-generated/gnu-api-tests/vwcvtu.c +++ b/auto-generated/gnu-api-tests/vwcvtu.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint16mf4_t test_vwcvtu_x_x_v_u16mf4(vuint8mf8_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u16mf4(src, vl); +vuint16mf4_t test_vwcvtu_x_x_v_u16mf4(vuint8mf8_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u16mf4(vs2, vl); } -vuint16mf2_t test_vwcvtu_x_x_v_u16mf2(vuint8mf4_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u16mf2(src, vl); +vuint16mf2_t test_vwcvtu_x_x_v_u16mf2(vuint8mf4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u16mf2(vs2, vl); } -vuint16m1_t test_vwcvtu_x_x_v_u16m1(vuint8mf2_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u16m1(src, vl); +vuint16m1_t test_vwcvtu_x_x_v_u16m1(vuint8mf2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u16m1(vs2, vl); } -vuint16m2_t test_vwcvtu_x_x_v_u16m2(vuint8m1_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u16m2(src, vl); +vuint16m2_t test_vwcvtu_x_x_v_u16m2(vuint8m1_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u16m2(vs2, vl); } -vuint16m4_t test_vwcvtu_x_x_v_u16m4(vuint8m2_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u16m4(src, vl); +vuint16m4_t test_vwcvtu_x_x_v_u16m4(vuint8m2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u16m4(vs2, vl); } -vuint16m8_t test_vwcvtu_x_x_v_u16m8(vuint8m4_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u16m8(src, vl); +vuint16m8_t test_vwcvtu_x_x_v_u16m8(vuint8m4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u16m8(vs2, vl); } -vuint32mf2_t test_vwcvtu_x_x_v_u32mf2(vuint16mf4_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u32mf2(src, vl); +vuint32mf2_t test_vwcvtu_x_x_v_u32mf2(vuint16mf4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u32mf2(vs2, vl); } -vuint32m1_t test_vwcvtu_x_x_v_u32m1(vuint16mf2_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u32m1(src, vl); +vuint32m1_t test_vwcvtu_x_x_v_u32m1(vuint16mf2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u32m1(vs2, vl); } -vuint32m2_t test_vwcvtu_x_x_v_u32m2(vuint16m1_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u32m2(src, vl); +vuint32m2_t test_vwcvtu_x_x_v_u32m2(vuint16m1_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u32m2(vs2, vl); } -vuint32m4_t test_vwcvtu_x_x_v_u32m4(vuint16m2_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u32m4(src, vl); +vuint32m4_t test_vwcvtu_x_x_v_u32m4(vuint16m2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u32m4(vs2, vl); } -vuint32m8_t test_vwcvtu_x_x_v_u32m8(vuint16m4_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u32m8(src, vl); +vuint32m8_t test_vwcvtu_x_x_v_u32m8(vuint16m4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u32m8(vs2, vl); } -vuint64m1_t test_vwcvtu_x_x_v_u64m1(vuint32mf2_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u64m1(src, vl); +vuint64m1_t test_vwcvtu_x_x_v_u64m1(vuint32mf2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u64m1(vs2, vl); } -vuint64m2_t test_vwcvtu_x_x_v_u64m2(vuint32m1_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u64m2(src, vl); +vuint64m2_t test_vwcvtu_x_x_v_u64m2(vuint32m1_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u64m2(vs2, vl); } -vuint64m4_t test_vwcvtu_x_x_v_u64m4(vuint32m2_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u64m4(src, vl); +vuint64m4_t test_vwcvtu_x_x_v_u64m4(vuint32m2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u64m4(vs2, vl); } -vuint64m8_t test_vwcvtu_x_x_v_u64m8(vuint32m4_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u64m8(src, vl); +vuint64m8_t test_vwcvtu_x_x_v_u64m8(vuint32m4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u64m8(vs2, vl); } -vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_m(vbool64_t mask, vuint8mf8_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u16mf4_m(mask, src, vl); +vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u16mf4_m(vm, vs2, vl); } -vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_m(vbool32_t mask, vuint8mf4_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u16mf2_m(mask, src, vl); +vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u16mf2_m(vm, vs2, vl); } -vuint16m1_t test_vwcvtu_x_x_v_u16m1_m(vbool16_t mask, vuint8mf2_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u16m1_m(mask, src, vl); +vuint16m1_t test_vwcvtu_x_x_v_u16m1_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u16m1_m(vm, vs2, vl); } -vuint16m2_t test_vwcvtu_x_x_v_u16m2_m(vbool8_t mask, vuint8m1_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u16m2_m(mask, src, vl); +vuint16m2_t test_vwcvtu_x_x_v_u16m2_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u16m2_m(vm, vs2, vl); } -vuint16m4_t test_vwcvtu_x_x_v_u16m4_m(vbool4_t mask, vuint8m2_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u16m4_m(mask, src, vl); +vuint16m4_t test_vwcvtu_x_x_v_u16m4_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u16m4_m(vm, vs2, vl); } -vuint16m8_t test_vwcvtu_x_x_v_u16m8_m(vbool2_t mask, vuint8m4_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u16m8_m(mask, src, vl); +vuint16m8_t test_vwcvtu_x_x_v_u16m8_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u16m8_m(vm, vs2, vl); } -vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_m(vbool64_t mask, vuint16mf4_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u32mf2_m(mask, src, vl); +vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u32mf2_m(vm, vs2, vl); } -vuint32m1_t test_vwcvtu_x_x_v_u32m1_m(vbool32_t mask, vuint16mf2_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u32m1_m(mask, src, vl); +vuint32m1_t test_vwcvtu_x_x_v_u32m1_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u32m1_m(vm, vs2, vl); } -vuint32m2_t test_vwcvtu_x_x_v_u32m2_m(vbool16_t mask, vuint16m1_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u32m2_m(mask, src, vl); +vuint32m2_t test_vwcvtu_x_x_v_u32m2_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u32m2_m(vm, vs2, vl); } -vuint32m4_t test_vwcvtu_x_x_v_u32m4_m(vbool8_t mask, vuint16m2_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u32m4_m(mask, src, vl); +vuint32m4_t test_vwcvtu_x_x_v_u32m4_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u32m4_m(vm, vs2, vl); } -vuint32m8_t test_vwcvtu_x_x_v_u32m8_m(vbool4_t mask, vuint16m4_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u32m8_m(mask, src, vl); +vuint32m8_t test_vwcvtu_x_x_v_u32m8_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u32m8_m(vm, vs2, vl); } -vuint64m1_t test_vwcvtu_x_x_v_u64m1_m(vbool64_t mask, vuint32mf2_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u64m1_m(mask, src, vl); +vuint64m1_t test_vwcvtu_x_x_v_u64m1_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u64m1_m(vm, vs2, vl); } -vuint64m2_t test_vwcvtu_x_x_v_u64m2_m(vbool32_t mask, vuint32m1_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u64m2_m(mask, src, vl); +vuint64m2_t test_vwcvtu_x_x_v_u64m2_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u64m2_m(vm, vs2, vl); } -vuint64m4_t test_vwcvtu_x_x_v_u64m4_m(vbool16_t mask, vuint32m2_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u64m4_m(mask, src, vl); +vuint64m4_t test_vwcvtu_x_x_v_u64m4_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u64m4_m(vm, vs2, vl); } -vuint64m8_t test_vwcvtu_x_x_v_u64m8_m(vbool8_t mask, vuint32m4_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u64m8_m(mask, src, vl); +vuint64m8_t test_vwcvtu_x_x_v_u64m8_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u64m8_m(vm, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vwcvtu\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/gnu-api-tests/vwmacc.c b/auto-generated/gnu-api-tests/vwmacc.c index 6bf24ad26..f22dd796a 100644 --- a/auto-generated/gnu-api-tests/vwmacc.c +++ b/auto-generated/gnu-api-tests/vwmacc.c @@ -126,124 +126,124 @@ vint64m8_t test_vwmacc_vx_i64m8(vint64m8_t vd, int32_t rs1, vint32m4_t vs2, size return __riscv_vwmacc_vx_i64m8(vd, rs1, vs2, vl); } -vint16mf4_t test_vwmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i16mf4_m(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vwmacc_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i16mf4_m(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vwmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i16mf4_m(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vwmacc_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i16mf4_m(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vwmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i16mf2_m(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vwmacc_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i16mf2_m(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vwmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i16mf2_m(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vwmacc_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i16mf2_m(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vwmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i16m1_m(mask, vd, vs1, vs2, vl); +vint16m1_t test_vwmacc_vv_i16m1_m(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i16m1_m(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vwmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i16m1_m(mask, vd, rs1, vs2, vl); +vint16m1_t test_vwmacc_vx_i16m1_m(vbool16_t vm, vint16m1_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i16m1_m(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vwmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i16m2_m(mask, vd, vs1, vs2, vl); +vint16m2_t test_vwmacc_vv_i16m2_m(vbool8_t vm, vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i16m2_m(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vwmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i16m2_m(mask, vd, rs1, vs2, vl); +vint16m2_t test_vwmacc_vx_i16m2_m(vbool8_t vm, vint16m2_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i16m2_m(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vwmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i16m4_m(mask, vd, vs1, vs2, vl); +vint16m4_t test_vwmacc_vv_i16m4_m(vbool4_t vm, vint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i16m4_m(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vwmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i16m4_m(mask, vd, rs1, vs2, vl); +vint16m4_t test_vwmacc_vx_i16m4_m(vbool4_t vm, vint16m4_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i16m4_m(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vwmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i16m8_m(mask, vd, vs1, vs2, vl); +vint16m8_t test_vwmacc_vv_i16m8_m(vbool2_t vm, vint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i16m8_m(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vwmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i16m8_m(mask, vd, rs1, vs2, vl); +vint16m8_t test_vwmacc_vx_i16m8_m(vbool2_t vm, vint16m8_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i16m8_m(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vwmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i32mf2_m(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vwmacc_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i32mf2_m(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vwmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i32mf2_m(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vwmacc_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i32mf2_m(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vwmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i32m1_m(mask, vd, vs1, vs2, vl); +vint32m1_t test_vwmacc_vv_i32m1_m(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i32m1_m(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vwmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i32m1_m(mask, vd, rs1, vs2, vl); +vint32m1_t test_vwmacc_vx_i32m1_m(vbool32_t vm, vint32m1_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i32m1_m(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vwmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i32m2_m(mask, vd, vs1, vs2, vl); +vint32m2_t test_vwmacc_vv_i32m2_m(vbool16_t vm, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i32m2_m(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vwmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i32m2_m(mask, vd, rs1, vs2, vl); +vint32m2_t test_vwmacc_vx_i32m2_m(vbool16_t vm, vint32m2_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i32m2_m(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vwmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i32m4_m(mask, vd, vs1, vs2, vl); +vint32m4_t test_vwmacc_vv_i32m4_m(vbool8_t vm, vint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i32m4_m(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vwmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i32m4_m(mask, vd, rs1, vs2, vl); +vint32m4_t test_vwmacc_vx_i32m4_m(vbool8_t vm, vint32m4_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i32m4_m(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vwmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i32m8_m(mask, vd, vs1, vs2, vl); +vint32m8_t test_vwmacc_vv_i32m8_m(vbool4_t vm, vint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i32m8_m(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vwmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i32m8_m(mask, vd, rs1, vs2, vl); +vint32m8_t test_vwmacc_vx_i32m8_m(vbool4_t vm, vint32m8_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i32m8_m(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vwmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i64m1_m(mask, vd, vs1, vs2, vl); +vint64m1_t test_vwmacc_vv_i64m1_m(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i64m1_m(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vwmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i64m1_m(mask, vd, rs1, vs2, vl); +vint64m1_t test_vwmacc_vx_i64m1_m(vbool64_t vm, vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i64m1_m(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vwmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i64m2_m(mask, vd, vs1, vs2, vl); +vint64m2_t test_vwmacc_vv_i64m2_m(vbool32_t vm, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i64m2_m(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vwmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i64m2_m(mask, vd, rs1, vs2, vl); +vint64m2_t test_vwmacc_vx_i64m2_m(vbool32_t vm, vint64m2_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i64m2_m(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vwmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i64m4_m(mask, vd, vs1, vs2, vl); +vint64m4_t test_vwmacc_vv_i64m4_m(vbool16_t vm, vint64m4_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i64m4_m(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vwmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i64m4_m(mask, vd, rs1, vs2, vl); +vint64m4_t test_vwmacc_vx_i64m4_m(vbool16_t vm, vint64m4_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i64m4_m(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vwmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i64m8_m(mask, vd, vs1, vs2, vl); +vint64m8_t test_vwmacc_vv_i64m8_m(vbool8_t vm, vint64m8_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i64m8_m(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vwmacc_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i64m8_m(mask, vd, rs1, vs2, vl); +vint64m8_t test_vwmacc_vx_i64m8_m(vbool8_t vm, vint64m8_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i64m8_m(vm, vd, rs1, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vwmacc\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-api-tests/vwmaccsu.c b/auto-generated/gnu-api-tests/vwmaccsu.c index fee3d906e..921e87bac 100644 --- a/auto-generated/gnu-api-tests/vwmaccsu.c +++ b/auto-generated/gnu-api-tests/vwmaccsu.c @@ -126,124 +126,124 @@ vint64m8_t test_vwmaccsu_vx_i64m8(vint64m8_t vd, int32_t rs1, vuint32m4_t vs2, s return __riscv_vwmaccsu_vx_i64m8(vd, rs1, vs2, vl); } -vint16mf4_t test_vwmaccsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i16mf4_m(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vwmaccsu_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i16mf4_m(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vwmaccsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i16mf4_m(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vwmaccsu_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vd, int8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i16mf4_m(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vwmaccsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i16mf2_m(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vwmaccsu_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i16mf2_m(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vwmaccsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i16mf2_m(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vwmaccsu_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vd, int8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i16mf2_m(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vwmaccsu_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i16m1_m(mask, vd, vs1, vs2, vl); +vint16m1_t test_vwmaccsu_vv_i16m1_m(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i16m1_m(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vwmaccsu_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i16m1_m(mask, vd, rs1, vs2, vl); +vint16m1_t test_vwmaccsu_vx_i16m1_m(vbool16_t vm, vint16m1_t vd, int8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i16m1_m(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vwmaccsu_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i16m2_m(mask, vd, vs1, vs2, vl); +vint16m2_t test_vwmaccsu_vv_i16m2_m(vbool8_t vm, vint16m2_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i16m2_m(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vwmaccsu_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i16m2_m(mask, vd, rs1, vs2, vl); +vint16m2_t test_vwmaccsu_vx_i16m2_m(vbool8_t vm, vint16m2_t vd, int8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i16m2_m(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vwmaccsu_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i16m4_m(mask, vd, vs1, vs2, vl); +vint16m4_t test_vwmaccsu_vv_i16m4_m(vbool4_t vm, vint16m4_t vd, vint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i16m4_m(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vwmaccsu_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i16m4_m(mask, vd, rs1, vs2, vl); +vint16m4_t test_vwmaccsu_vx_i16m4_m(vbool4_t vm, vint16m4_t vd, int8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i16m4_m(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vwmaccsu_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i16m8_m(mask, vd, vs1, vs2, vl); +vint16m8_t test_vwmaccsu_vv_i16m8_m(vbool2_t vm, vint16m8_t vd, vint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i16m8_m(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vwmaccsu_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i16m8_m(mask, vd, rs1, vs2, vl); +vint16m8_t test_vwmaccsu_vx_i16m8_m(vbool2_t vm, vint16m8_t vd, int8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i16m8_m(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vwmaccsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i32mf2_m(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vwmaccsu_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i32mf2_m(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vwmaccsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i32mf2_m(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vwmaccsu_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vd, int16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i32mf2_m(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vwmaccsu_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i32m1_m(mask, vd, vs1, vs2, vl); +vint32m1_t test_vwmaccsu_vv_i32m1_m(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i32m1_m(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vwmaccsu_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i32m1_m(mask, vd, rs1, vs2, vl); +vint32m1_t test_vwmaccsu_vx_i32m1_m(vbool32_t vm, vint32m1_t vd, int16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i32m1_m(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vwmaccsu_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i32m2_m(mask, vd, vs1, vs2, vl); +vint32m2_t test_vwmaccsu_vv_i32m2_m(vbool16_t vm, vint32m2_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i32m2_m(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vwmaccsu_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i32m2_m(mask, vd, rs1, vs2, vl); +vint32m2_t test_vwmaccsu_vx_i32m2_m(vbool16_t vm, vint32m2_t vd, int16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i32m2_m(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vwmaccsu_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i32m4_m(mask, vd, vs1, vs2, vl); +vint32m4_t test_vwmaccsu_vv_i32m4_m(vbool8_t vm, vint32m4_t vd, vint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i32m4_m(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vwmaccsu_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i32m4_m(mask, vd, rs1, vs2, vl); +vint32m4_t test_vwmaccsu_vx_i32m4_m(vbool8_t vm, vint32m4_t vd, int16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i32m4_m(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vwmaccsu_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i32m8_m(mask, vd, vs1, vs2, vl); +vint32m8_t test_vwmaccsu_vv_i32m8_m(vbool4_t vm, vint32m8_t vd, vint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i32m8_m(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vwmaccsu_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i32m8_m(mask, vd, rs1, vs2, vl); +vint32m8_t test_vwmaccsu_vx_i32m8_m(vbool4_t vm, vint32m8_t vd, int16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i32m8_m(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vwmaccsu_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i64m1_m(mask, vd, vs1, vs2, vl); +vint64m1_t test_vwmaccsu_vv_i64m1_m(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i64m1_m(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vwmaccsu_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i64m1_m(mask, vd, rs1, vs2, vl); +vint64m1_t test_vwmaccsu_vx_i64m1_m(vbool64_t vm, vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i64m1_m(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vwmaccsu_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i64m2_m(mask, vd, vs1, vs2, vl); +vint64m2_t test_vwmaccsu_vv_i64m2_m(vbool32_t vm, vint64m2_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i64m2_m(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vwmaccsu_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i64m2_m(mask, vd, rs1, vs2, vl); +vint64m2_t test_vwmaccsu_vx_i64m2_m(vbool32_t vm, vint64m2_t vd, int32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i64m2_m(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vwmaccsu_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i64m4_m(mask, vd, vs1, vs2, vl); +vint64m4_t test_vwmaccsu_vv_i64m4_m(vbool16_t vm, vint64m4_t vd, vint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i64m4_m(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vwmaccsu_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i64m4_m(mask, vd, rs1, vs2, vl); +vint64m4_t test_vwmaccsu_vx_i64m4_m(vbool16_t vm, vint64m4_t vd, int32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i64m4_m(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vwmaccsu_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i64m8_m(mask, vd, vs1, vs2, vl); +vint64m8_t test_vwmaccsu_vv_i64m8_m(vbool8_t vm, vint64m8_t vd, vint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i64m8_m(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vwmaccsu_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, int32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i64m8_m(mask, vd, rs1, vs2, vl); +vint64m8_t test_vwmaccsu_vx_i64m8_m(vbool8_t vm, vint64m8_t vd, int32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i64m8_m(vm, vd, rs1, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vwmaccsu\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-api-tests/vwmaccu.c b/auto-generated/gnu-api-tests/vwmaccu.c index d1648644f..0e1206e60 100644 --- a/auto-generated/gnu-api-tests/vwmaccu.c +++ b/auto-generated/gnu-api-tests/vwmaccu.c @@ -126,124 +126,124 @@ vuint64m8_t test_vwmaccu_vx_u64m8(vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2, return __riscv_vwmaccu_vx_u64m8(vd, rs1, vs2, vl); } -vuint16mf4_t test_vwmaccu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u16mf4_m(mask, vd, vs1, vs2, vl); +vuint16mf4_t test_vwmaccu_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u16mf4_m(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vwmaccu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u16mf4_m(mask, vd, rs1, vs2, vl); +vuint16mf4_t test_vwmaccu_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u16mf4_m(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vwmaccu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u16mf2_m(mask, vd, vs1, vs2, vl); +vuint16mf2_t test_vwmaccu_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u16mf2_m(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vwmaccu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u16mf2_m(mask, vd, rs1, vs2, vl); +vuint16mf2_t test_vwmaccu_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u16mf2_m(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vwmaccu_vv_u16m1_m(vbool16_t mask, vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u16m1_m(mask, vd, vs1, vs2, vl); +vuint16m1_t test_vwmaccu_vv_u16m1_m(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u16m1_m(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vwmaccu_vx_u16m1_m(vbool16_t mask, vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u16m1_m(mask, vd, rs1, vs2, vl); +vuint16m1_t test_vwmaccu_vx_u16m1_m(vbool16_t vm, vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u16m1_m(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vwmaccu_vv_u16m2_m(vbool8_t mask, vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u16m2_m(mask, vd, vs1, vs2, vl); +vuint16m2_t test_vwmaccu_vv_u16m2_m(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u16m2_m(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vwmaccu_vx_u16m2_m(vbool8_t mask, vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u16m2_m(mask, vd, rs1, vs2, vl); +vuint16m2_t test_vwmaccu_vx_u16m2_m(vbool8_t vm, vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u16m2_m(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vwmaccu_vv_u16m4_m(vbool4_t mask, vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u16m4_m(mask, vd, vs1, vs2, vl); +vuint16m4_t test_vwmaccu_vv_u16m4_m(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u16m4_m(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vwmaccu_vx_u16m4_m(vbool4_t mask, vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u16m4_m(mask, vd, rs1, vs2, vl); +vuint16m4_t test_vwmaccu_vx_u16m4_m(vbool4_t vm, vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u16m4_m(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vwmaccu_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u16m8_m(mask, vd, vs1, vs2, vl); +vuint16m8_t test_vwmaccu_vv_u16m8_m(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u16m8_m(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vwmaccu_vx_u16m8_m(vbool2_t mask, vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u16m8_m(mask, vd, rs1, vs2, vl); +vuint16m8_t test_vwmaccu_vx_u16m8_m(vbool2_t vm, vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u16m8_m(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vwmaccu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u32mf2_m(mask, vd, vs1, vs2, vl); +vuint32mf2_t test_vwmaccu_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u32mf2_m(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vwmaccu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u32mf2_m(mask, vd, rs1, vs2, vl); +vuint32mf2_t test_vwmaccu_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u32mf2_m(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vwmaccu_vv_u32m1_m(vbool32_t mask, vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u32m1_m(mask, vd, vs1, vs2, vl); +vuint32m1_t test_vwmaccu_vv_u32m1_m(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u32m1_m(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vwmaccu_vx_u32m1_m(vbool32_t mask, vuint32m1_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u32m1_m(mask, vd, rs1, vs2, vl); +vuint32m1_t test_vwmaccu_vx_u32m1_m(vbool32_t vm, vuint32m1_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u32m1_m(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vwmaccu_vv_u32m2_m(vbool16_t mask, vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u32m2_m(mask, vd, vs1, vs2, vl); +vuint32m2_t test_vwmaccu_vv_u32m2_m(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u32m2_m(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vwmaccu_vx_u32m2_m(vbool16_t mask, vuint32m2_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u32m2_m(mask, vd, rs1, vs2, vl); +vuint32m2_t test_vwmaccu_vx_u32m2_m(vbool16_t vm, vuint32m2_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u32m2_m(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vwmaccu_vv_u32m4_m(vbool8_t mask, vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u32m4_m(mask, vd, vs1, vs2, vl); +vuint32m4_t test_vwmaccu_vv_u32m4_m(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u32m4_m(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vwmaccu_vx_u32m4_m(vbool8_t mask, vuint32m4_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u32m4_m(mask, vd, rs1, vs2, vl); +vuint32m4_t test_vwmaccu_vx_u32m4_m(vbool8_t vm, vuint32m4_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u32m4_m(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vwmaccu_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u32m8_m(mask, vd, vs1, vs2, vl); +vuint32m8_t test_vwmaccu_vv_u32m8_m(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u32m8_m(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vwmaccu_vx_u32m8_m(vbool4_t mask, vuint32m8_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u32m8_m(mask, vd, rs1, vs2, vl); +vuint32m8_t test_vwmaccu_vx_u32m8_m(vbool4_t vm, vuint32m8_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u32m8_m(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vwmaccu_vv_u64m1_m(vbool64_t mask, vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u64m1_m(mask, vd, vs1, vs2, vl); +vuint64m1_t test_vwmaccu_vv_u64m1_m(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u64m1_m(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vwmaccu_vx_u64m1_m(vbool64_t mask, vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u64m1_m(mask, vd, rs1, vs2, vl); +vuint64m1_t test_vwmaccu_vx_u64m1_m(vbool64_t vm, vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u64m1_m(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vwmaccu_vv_u64m2_m(vbool32_t mask, vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u64m2_m(mask, vd, vs1, vs2, vl); +vuint64m2_t test_vwmaccu_vv_u64m2_m(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u64m2_m(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vwmaccu_vx_u64m2_m(vbool32_t mask, vuint64m2_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u64m2_m(mask, vd, rs1, vs2, vl); +vuint64m2_t test_vwmaccu_vx_u64m2_m(vbool32_t vm, vuint64m2_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u64m2_m(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vwmaccu_vv_u64m4_m(vbool16_t mask, vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u64m4_m(mask, vd, vs1, vs2, vl); +vuint64m4_t test_vwmaccu_vv_u64m4_m(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u64m4_m(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vwmaccu_vx_u64m4_m(vbool16_t mask, vuint64m4_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u64m4_m(mask, vd, rs1, vs2, vl); +vuint64m4_t test_vwmaccu_vx_u64m4_m(vbool16_t vm, vuint64m4_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u64m4_m(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vwmaccu_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u64m8_m(mask, vd, vs1, vs2, vl); +vuint64m8_t test_vwmaccu_vv_u64m8_m(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u64m8_m(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vwmaccu_vx_u64m8_m(vbool8_t mask, vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u64m8_m(mask, vd, rs1, vs2, vl); +vuint64m8_t test_vwmaccu_vx_u64m8_m(vbool8_t vm, vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u64m8_m(vm, vd, rs1, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vwmaccu\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-api-tests/vwmaccus.c b/auto-generated/gnu-api-tests/vwmaccus.c index a167ddc5e..8284767bd 100644 --- a/auto-generated/gnu-api-tests/vwmaccus.c +++ b/auto-generated/gnu-api-tests/vwmaccus.c @@ -66,64 +66,64 @@ vint64m8_t test_vwmaccus_vx_i64m8(vint64m8_t vd, uint32_t rs1, vint32m4_t vs2, s return __riscv_vwmaccus_vx_i64m8(vd, rs1, vs2, vl); } -vint16mf4_t test_vwmaccus_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, uint8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i16mf4_m(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vwmaccus_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vd, uint8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i16mf4_m(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vwmaccus_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, uint8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i16mf2_m(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vwmaccus_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vd, uint8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i16mf2_m(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vwmaccus_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, uint8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i16m1_m(mask, vd, rs1, vs2, vl); +vint16m1_t test_vwmaccus_vx_i16m1_m(vbool16_t vm, vint16m1_t vd, uint8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i16m1_m(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vwmaccus_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, uint8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i16m2_m(mask, vd, rs1, vs2, vl); +vint16m2_t test_vwmaccus_vx_i16m2_m(vbool8_t vm, vint16m2_t vd, uint8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i16m2_m(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vwmaccus_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, uint8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i16m4_m(mask, vd, rs1, vs2, vl); +vint16m4_t test_vwmaccus_vx_i16m4_m(vbool4_t vm, vint16m4_t vd, uint8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i16m4_m(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vwmaccus_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, uint8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i16m8_m(mask, vd, rs1, vs2, vl); +vint16m8_t test_vwmaccus_vx_i16m8_m(vbool2_t vm, vint16m8_t vd, uint8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i16m8_m(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vwmaccus_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, uint16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i32mf2_m(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vwmaccus_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vd, uint16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i32mf2_m(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vwmaccus_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, uint16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i32m1_m(mask, vd, rs1, vs2, vl); +vint32m1_t test_vwmaccus_vx_i32m1_m(vbool32_t vm, vint32m1_t vd, uint16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i32m1_m(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vwmaccus_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, uint16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i32m2_m(mask, vd, rs1, vs2, vl); +vint32m2_t test_vwmaccus_vx_i32m2_m(vbool16_t vm, vint32m2_t vd, uint16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i32m2_m(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vwmaccus_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, uint16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i32m4_m(mask, vd, rs1, vs2, vl); +vint32m4_t test_vwmaccus_vx_i32m4_m(vbool8_t vm, vint32m4_t vd, uint16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i32m4_m(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vwmaccus_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, uint16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i32m8_m(mask, vd, rs1, vs2, vl); +vint32m8_t test_vwmaccus_vx_i32m8_m(vbool4_t vm, vint32m8_t vd, uint16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i32m8_m(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vwmaccus_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i64m1_m(mask, vd, rs1, vs2, vl); +vint64m1_t test_vwmaccus_vx_i64m1_m(vbool64_t vm, vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i64m1_m(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vwmaccus_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, uint32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i64m2_m(mask, vd, rs1, vs2, vl); +vint64m2_t test_vwmaccus_vx_i64m2_m(vbool32_t vm, vint64m2_t vd, uint32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i64m2_m(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vwmaccus_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, uint32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i64m4_m(mask, vd, rs1, vs2, vl); +vint64m4_t test_vwmaccus_vx_i64m4_m(vbool16_t vm, vint64m4_t vd, uint32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i64m4_m(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vwmaccus_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, uint32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i64m8_m(mask, vd, rs1, vs2, vl); +vint64m8_t test_vwmaccus_vx_i64m8_m(vbool8_t vm, vint64m8_t vd, uint32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i64m8_m(vm, vd, rs1, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vwmaccus\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/gnu-api-tests/vwmul.c b/auto-generated/gnu-api-tests/vwmul.c index ac3e3b5ec..60a1ece1d 100644 --- a/auto-generated/gnu-api-tests/vwmul.c +++ b/auto-generated/gnu-api-tests/vwmul.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint16mf4_t test_vwmul_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwmul_vv_i16mf4(op1, op2, vl); +vint16mf4_t test_vwmul_vv_i16mf4(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwmul_vv_i16mf4(vs2, vs1, vl); } -vint16mf4_t test_vwmul_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_vx_i16mf4(op1, op2, vl); +vint16mf4_t test_vwmul_vx_i16mf4(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_vx_i16mf4(vs2, rs1, vl); } -vint16mf2_t test_vwmul_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwmul_vv_i16mf2(op1, op2, vl); +vint16mf2_t test_vwmul_vv_i16mf2(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwmul_vv_i16mf2(vs2, vs1, vl); } -vint16mf2_t test_vwmul_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_vx_i16mf2(op1, op2, vl); +vint16mf2_t test_vwmul_vx_i16mf2(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_vx_i16mf2(vs2, rs1, vl); } -vint16m1_t test_vwmul_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwmul_vv_i16m1(op1, op2, vl); +vint16m1_t test_vwmul_vv_i16m1(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwmul_vv_i16m1(vs2, vs1, vl); } -vint16m1_t test_vwmul_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_vx_i16m1(op1, op2, vl); +vint16m1_t test_vwmul_vx_i16m1(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_vx_i16m1(vs2, rs1, vl); } -vint16m2_t test_vwmul_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwmul_vv_i16m2(op1, op2, vl); +vint16m2_t test_vwmul_vv_i16m2(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwmul_vv_i16m2(vs2, vs1, vl); } -vint16m2_t test_vwmul_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_vx_i16m2(op1, op2, vl); +vint16m2_t test_vwmul_vx_i16m2(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_vx_i16m2(vs2, rs1, vl); } -vint16m4_t test_vwmul_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwmul_vv_i16m4(op1, op2, vl); +vint16m4_t test_vwmul_vv_i16m4(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwmul_vv_i16m4(vs2, vs1, vl); } -vint16m4_t test_vwmul_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_vx_i16m4(op1, op2, vl); +vint16m4_t test_vwmul_vx_i16m4(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_vx_i16m4(vs2, rs1, vl); } -vint16m8_t test_vwmul_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwmul_vv_i16m8(op1, op2, vl); +vint16m8_t test_vwmul_vv_i16m8(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwmul_vv_i16m8(vs2, vs1, vl); } -vint16m8_t test_vwmul_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_vx_i16m8(op1, op2, vl); +vint16m8_t test_vwmul_vx_i16m8(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_vx_i16m8(vs2, rs1, vl); } -vint32mf2_t test_vwmul_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwmul_vv_i32mf2(op1, op2, vl); +vint32mf2_t test_vwmul_vv_i32mf2(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwmul_vv_i32mf2(vs2, vs1, vl); } -vint32mf2_t test_vwmul_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_vx_i32mf2(op1, op2, vl); +vint32mf2_t test_vwmul_vx_i32mf2(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_vx_i32mf2(vs2, rs1, vl); } -vint32m1_t test_vwmul_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwmul_vv_i32m1(op1, op2, vl); +vint32m1_t test_vwmul_vv_i32m1(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwmul_vv_i32m1(vs2, vs1, vl); } -vint32m1_t test_vwmul_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_vx_i32m1(op1, op2, vl); +vint32m1_t test_vwmul_vx_i32m1(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_vx_i32m1(vs2, rs1, vl); } -vint32m2_t test_vwmul_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwmul_vv_i32m2(op1, op2, vl); +vint32m2_t test_vwmul_vv_i32m2(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwmul_vv_i32m2(vs2, vs1, vl); } -vint32m2_t test_vwmul_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_vx_i32m2(op1, op2, vl); +vint32m2_t test_vwmul_vx_i32m2(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_vx_i32m2(vs2, rs1, vl); } -vint32m4_t test_vwmul_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwmul_vv_i32m4(op1, op2, vl); +vint32m4_t test_vwmul_vv_i32m4(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwmul_vv_i32m4(vs2, vs1, vl); } -vint32m4_t test_vwmul_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_vx_i32m4(op1, op2, vl); +vint32m4_t test_vwmul_vx_i32m4(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_vx_i32m4(vs2, rs1, vl); } -vint32m8_t test_vwmul_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwmul_vv_i32m8(op1, op2, vl); +vint32m8_t test_vwmul_vv_i32m8(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwmul_vv_i32m8(vs2, vs1, vl); } -vint32m8_t test_vwmul_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_vx_i32m8(op1, op2, vl); +vint32m8_t test_vwmul_vx_i32m8(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_vx_i32m8(vs2, rs1, vl); } -vint64m1_t test_vwmul_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwmul_vv_i64m1(op1, op2, vl); +vint64m1_t test_vwmul_vv_i64m1(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwmul_vv_i64m1(vs2, vs1, vl); } -vint64m1_t test_vwmul_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul_vx_i64m1(op1, op2, vl); +vint64m1_t test_vwmul_vx_i64m1(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul_vx_i64m1(vs2, rs1, vl); } -vint64m2_t test_vwmul_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwmul_vv_i64m2(op1, op2, vl); +vint64m2_t test_vwmul_vv_i64m2(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwmul_vv_i64m2(vs2, vs1, vl); } -vint64m2_t test_vwmul_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul_vx_i64m2(op1, op2, vl); +vint64m2_t test_vwmul_vx_i64m2(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul_vx_i64m2(vs2, rs1, vl); } -vint64m4_t test_vwmul_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwmul_vv_i64m4(op1, op2, vl); +vint64m4_t test_vwmul_vv_i64m4(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwmul_vv_i64m4(vs2, vs1, vl); } -vint64m4_t test_vwmul_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul_vx_i64m4(op1, op2, vl); +vint64m4_t test_vwmul_vx_i64m4(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul_vx_i64m4(vs2, rs1, vl); } -vint64m8_t test_vwmul_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwmul_vv_i64m8(op1, op2, vl); +vint64m8_t test_vwmul_vv_i64m8(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwmul_vv_i64m8(vs2, vs1, vl); } -vint64m8_t test_vwmul_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul_vx_i64m8(op1, op2, vl); +vint64m8_t test_vwmul_vx_i64m8(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul_vx_i64m8(vs2, rs1, vl); } -vint16mf4_t test_vwmul_vv_i16mf4_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwmul_vv_i16mf4_m(mask, op1, op2, vl); +vint16mf4_t test_vwmul_vv_i16mf4_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwmul_vv_i16mf4_m(vm, vs2, vs1, vl); } -vint16mf4_t test_vwmul_vx_i16mf4_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_vx_i16mf4_m(mask, op1, op2, vl); +vint16mf4_t test_vwmul_vx_i16mf4_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_vx_i16mf4_m(vm, vs2, rs1, vl); } -vint16mf2_t test_vwmul_vv_i16mf2_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwmul_vv_i16mf2_m(mask, op1, op2, vl); +vint16mf2_t test_vwmul_vv_i16mf2_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwmul_vv_i16mf2_m(vm, vs2, vs1, vl); } -vint16mf2_t test_vwmul_vx_i16mf2_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_vx_i16mf2_m(mask, op1, op2, vl); +vint16mf2_t test_vwmul_vx_i16mf2_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_vx_i16mf2_m(vm, vs2, rs1, vl); } -vint16m1_t test_vwmul_vv_i16m1_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwmul_vv_i16m1_m(mask, op1, op2, vl); +vint16m1_t test_vwmul_vv_i16m1_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwmul_vv_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vwmul_vx_i16m1_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_vx_i16m1_m(mask, op1, op2, vl); +vint16m1_t test_vwmul_vx_i16m1_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_vx_i16m1_m(vm, vs2, rs1, vl); } -vint16m2_t test_vwmul_vv_i16m2_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwmul_vv_i16m2_m(mask, op1, op2, vl); +vint16m2_t test_vwmul_vv_i16m2_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwmul_vv_i16m2_m(vm, vs2, vs1, vl); } -vint16m2_t test_vwmul_vx_i16m2_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_vx_i16m2_m(mask, op1, op2, vl); +vint16m2_t test_vwmul_vx_i16m2_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_vx_i16m2_m(vm, vs2, rs1, vl); } -vint16m4_t test_vwmul_vv_i16m4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwmul_vv_i16m4_m(mask, op1, op2, vl); +vint16m4_t test_vwmul_vv_i16m4_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwmul_vv_i16m4_m(vm, vs2, vs1, vl); } -vint16m4_t test_vwmul_vx_i16m4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_vx_i16m4_m(mask, op1, op2, vl); +vint16m4_t test_vwmul_vx_i16m4_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_vx_i16m4_m(vm, vs2, rs1, vl); } -vint16m8_t test_vwmul_vv_i16m8_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwmul_vv_i16m8_m(mask, op1, op2, vl); +vint16m8_t test_vwmul_vv_i16m8_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwmul_vv_i16m8_m(vm, vs2, vs1, vl); } -vint16m8_t test_vwmul_vx_i16m8_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_vx_i16m8_m(mask, op1, op2, vl); +vint16m8_t test_vwmul_vx_i16m8_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_vx_i16m8_m(vm, vs2, rs1, vl); } -vint32mf2_t test_vwmul_vv_i32mf2_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwmul_vv_i32mf2_m(mask, op1, op2, vl); +vint32mf2_t test_vwmul_vv_i32mf2_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwmul_vv_i32mf2_m(vm, vs2, vs1, vl); } -vint32mf2_t test_vwmul_vx_i32mf2_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_vx_i32mf2_m(mask, op1, op2, vl); +vint32mf2_t test_vwmul_vx_i32mf2_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_vx_i32mf2_m(vm, vs2, rs1, vl); } -vint32m1_t test_vwmul_vv_i32m1_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwmul_vv_i32m1_m(mask, op1, op2, vl); +vint32m1_t test_vwmul_vv_i32m1_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwmul_vv_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vwmul_vx_i32m1_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_vx_i32m1_m(mask, op1, op2, vl); +vint32m1_t test_vwmul_vx_i32m1_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_vx_i32m1_m(vm, vs2, rs1, vl); } -vint32m2_t test_vwmul_vv_i32m2_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwmul_vv_i32m2_m(mask, op1, op2, vl); +vint32m2_t test_vwmul_vv_i32m2_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwmul_vv_i32m2_m(vm, vs2, vs1, vl); } -vint32m2_t test_vwmul_vx_i32m2_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_vx_i32m2_m(mask, op1, op2, vl); +vint32m2_t test_vwmul_vx_i32m2_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_vx_i32m2_m(vm, vs2, rs1, vl); } -vint32m4_t test_vwmul_vv_i32m4_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwmul_vv_i32m4_m(mask, op1, op2, vl); +vint32m4_t test_vwmul_vv_i32m4_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwmul_vv_i32m4_m(vm, vs2, vs1, vl); } -vint32m4_t test_vwmul_vx_i32m4_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_vx_i32m4_m(mask, op1, op2, vl); +vint32m4_t test_vwmul_vx_i32m4_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_vx_i32m4_m(vm, vs2, rs1, vl); } -vint32m8_t test_vwmul_vv_i32m8_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwmul_vv_i32m8_m(mask, op1, op2, vl); +vint32m8_t test_vwmul_vv_i32m8_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwmul_vv_i32m8_m(vm, vs2, vs1, vl); } -vint32m8_t test_vwmul_vx_i32m8_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_vx_i32m8_m(mask, op1, op2, vl); +vint32m8_t test_vwmul_vx_i32m8_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_vx_i32m8_m(vm, vs2, rs1, vl); } -vint64m1_t test_vwmul_vv_i64m1_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwmul_vv_i64m1_m(mask, op1, op2, vl); +vint64m1_t test_vwmul_vv_i64m1_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwmul_vv_i64m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vwmul_vx_i64m1_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul_vx_i64m1_m(mask, op1, op2, vl); +vint64m1_t test_vwmul_vx_i64m1_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul_vx_i64m1_m(vm, vs2, rs1, vl); } -vint64m2_t test_vwmul_vv_i64m2_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwmul_vv_i64m2_m(mask, op1, op2, vl); +vint64m2_t test_vwmul_vv_i64m2_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwmul_vv_i64m2_m(vm, vs2, vs1, vl); } -vint64m2_t test_vwmul_vx_i64m2_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul_vx_i64m2_m(mask, op1, op2, vl); +vint64m2_t test_vwmul_vx_i64m2_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul_vx_i64m2_m(vm, vs2, rs1, vl); } -vint64m4_t test_vwmul_vv_i64m4_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwmul_vv_i64m4_m(mask, op1, op2, vl); +vint64m4_t test_vwmul_vv_i64m4_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwmul_vv_i64m4_m(vm, vs2, vs1, vl); } -vint64m4_t test_vwmul_vx_i64m4_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul_vx_i64m4_m(mask, op1, op2, vl); +vint64m4_t test_vwmul_vx_i64m4_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul_vx_i64m4_m(vm, vs2, rs1, vl); } -vint64m8_t test_vwmul_vv_i64m8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwmul_vv_i64m8_m(mask, op1, op2, vl); +vint64m8_t test_vwmul_vv_i64m8_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwmul_vv_i64m8_m(vm, vs2, vs1, vl); } -vint64m8_t test_vwmul_vx_i64m8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul_vx_i64m8_m(mask, op1, op2, vl); +vint64m8_t test_vwmul_vx_i64m8_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul_vx_i64m8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vwmul\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-api-tests/vwmulsu.c b/auto-generated/gnu-api-tests/vwmulsu.c index 93ef797c8..628250fbf 100644 --- a/auto-generated/gnu-api-tests/vwmulsu.c +++ b/auto-generated/gnu-api-tests/vwmulsu.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint16mf4_t test_vwmulsu_vv_i16mf4(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i16mf4(op1, op2, vl); +vint16mf4_t test_vwmulsu_vv_i16mf4(vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i16mf4(vs2, vs1, vl); } -vint16mf4_t test_vwmulsu_vx_i16mf4(vint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i16mf4(op1, op2, vl); +vint16mf4_t test_vwmulsu_vx_i16mf4(vint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i16mf4(vs2, rs1, vl); } -vint16mf2_t test_vwmulsu_vv_i16mf2(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i16mf2(op1, op2, vl); +vint16mf2_t test_vwmulsu_vv_i16mf2(vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i16mf2(vs2, vs1, vl); } -vint16mf2_t test_vwmulsu_vx_i16mf2(vint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i16mf2(op1, op2, vl); +vint16mf2_t test_vwmulsu_vx_i16mf2(vint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i16mf2(vs2, rs1, vl); } -vint16m1_t test_vwmulsu_vv_i16m1(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i16m1(op1, op2, vl); +vint16m1_t test_vwmulsu_vv_i16m1(vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i16m1(vs2, vs1, vl); } -vint16m1_t test_vwmulsu_vx_i16m1(vint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i16m1(op1, op2, vl); +vint16m1_t test_vwmulsu_vx_i16m1(vint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i16m1(vs2, rs1, vl); } -vint16m2_t test_vwmulsu_vv_i16m2(vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i16m2(op1, op2, vl); +vint16m2_t test_vwmulsu_vv_i16m2(vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i16m2(vs2, vs1, vl); } -vint16m2_t test_vwmulsu_vx_i16m2(vint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i16m2(op1, op2, vl); +vint16m2_t test_vwmulsu_vx_i16m2(vint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i16m2(vs2, rs1, vl); } -vint16m4_t test_vwmulsu_vv_i16m4(vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i16m4(op1, op2, vl); +vint16m4_t test_vwmulsu_vv_i16m4(vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i16m4(vs2, vs1, vl); } -vint16m4_t test_vwmulsu_vx_i16m4(vint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i16m4(op1, op2, vl); +vint16m4_t test_vwmulsu_vx_i16m4(vint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i16m4(vs2, rs1, vl); } -vint16m8_t test_vwmulsu_vv_i16m8(vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i16m8(op1, op2, vl); +vint16m8_t test_vwmulsu_vv_i16m8(vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i16m8(vs2, vs1, vl); } -vint16m8_t test_vwmulsu_vx_i16m8(vint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i16m8(op1, op2, vl); +vint16m8_t test_vwmulsu_vx_i16m8(vint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i16m8(vs2, rs1, vl); } -vint32mf2_t test_vwmulsu_vv_i32mf2(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i32mf2(op1, op2, vl); +vint32mf2_t test_vwmulsu_vv_i32mf2(vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i32mf2(vs2, vs1, vl); } -vint32mf2_t test_vwmulsu_vx_i32mf2(vint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i32mf2(op1, op2, vl); +vint32mf2_t test_vwmulsu_vx_i32mf2(vint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i32mf2(vs2, rs1, vl); } -vint32m1_t test_vwmulsu_vv_i32m1(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i32m1(op1, op2, vl); +vint32m1_t test_vwmulsu_vv_i32m1(vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i32m1(vs2, vs1, vl); } -vint32m1_t test_vwmulsu_vx_i32m1(vint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i32m1(op1, op2, vl); +vint32m1_t test_vwmulsu_vx_i32m1(vint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i32m1(vs2, rs1, vl); } -vint32m2_t test_vwmulsu_vv_i32m2(vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i32m2(op1, op2, vl); +vint32m2_t test_vwmulsu_vv_i32m2(vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i32m2(vs2, vs1, vl); } -vint32m2_t test_vwmulsu_vx_i32m2(vint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i32m2(op1, op2, vl); +vint32m2_t test_vwmulsu_vx_i32m2(vint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i32m2(vs2, rs1, vl); } -vint32m4_t test_vwmulsu_vv_i32m4(vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i32m4(op1, op2, vl); +vint32m4_t test_vwmulsu_vv_i32m4(vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i32m4(vs2, vs1, vl); } -vint32m4_t test_vwmulsu_vx_i32m4(vint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i32m4(op1, op2, vl); +vint32m4_t test_vwmulsu_vx_i32m4(vint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i32m4(vs2, rs1, vl); } -vint32m8_t test_vwmulsu_vv_i32m8(vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i32m8(op1, op2, vl); +vint32m8_t test_vwmulsu_vv_i32m8(vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i32m8(vs2, vs1, vl); } -vint32m8_t test_vwmulsu_vx_i32m8(vint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i32m8(op1, op2, vl); +vint32m8_t test_vwmulsu_vx_i32m8(vint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i32m8(vs2, rs1, vl); } -vint64m1_t test_vwmulsu_vv_i64m1(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i64m1(op1, op2, vl); +vint64m1_t test_vwmulsu_vv_i64m1(vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i64m1(vs2, vs1, vl); } -vint64m1_t test_vwmulsu_vx_i64m1(vint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i64m1(op1, op2, vl); +vint64m1_t test_vwmulsu_vx_i64m1(vint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i64m1(vs2, rs1, vl); } -vint64m2_t test_vwmulsu_vv_i64m2(vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i64m2(op1, op2, vl); +vint64m2_t test_vwmulsu_vv_i64m2(vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i64m2(vs2, vs1, vl); } -vint64m2_t test_vwmulsu_vx_i64m2(vint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i64m2(op1, op2, vl); +vint64m2_t test_vwmulsu_vx_i64m2(vint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i64m2(vs2, rs1, vl); } -vint64m4_t test_vwmulsu_vv_i64m4(vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i64m4(op1, op2, vl); +vint64m4_t test_vwmulsu_vv_i64m4(vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i64m4(vs2, vs1, vl); } -vint64m4_t test_vwmulsu_vx_i64m4(vint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i64m4(op1, op2, vl); +vint64m4_t test_vwmulsu_vx_i64m4(vint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i64m4(vs2, rs1, vl); } -vint64m8_t test_vwmulsu_vv_i64m8(vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i64m8(op1, op2, vl); +vint64m8_t test_vwmulsu_vv_i64m8(vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i64m8(vs2, vs1, vl); } -vint64m8_t test_vwmulsu_vx_i64m8(vint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i64m8(op1, op2, vl); +vint64m8_t test_vwmulsu_vx_i64m8(vint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i64m8(vs2, rs1, vl); } -vint16mf4_t test_vwmulsu_vv_i16mf4_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i16mf4_m(mask, op1, op2, vl); +vint16mf4_t test_vwmulsu_vv_i16mf4_m(vbool64_t vm, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i16mf4_m(vm, vs2, vs1, vl); } -vint16mf4_t test_vwmulsu_vx_i16mf4_m(vbool64_t mask, vint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i16mf4_m(mask, op1, op2, vl); +vint16mf4_t test_vwmulsu_vx_i16mf4_m(vbool64_t vm, vint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i16mf4_m(vm, vs2, rs1, vl); } -vint16mf2_t test_vwmulsu_vv_i16mf2_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i16mf2_m(mask, op1, op2, vl); +vint16mf2_t test_vwmulsu_vv_i16mf2_m(vbool32_t vm, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i16mf2_m(vm, vs2, vs1, vl); } -vint16mf2_t test_vwmulsu_vx_i16mf2_m(vbool32_t mask, vint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i16mf2_m(mask, op1, op2, vl); +vint16mf2_t test_vwmulsu_vx_i16mf2_m(vbool32_t vm, vint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i16mf2_m(vm, vs2, rs1, vl); } -vint16m1_t test_vwmulsu_vv_i16m1_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i16m1_m(mask, op1, op2, vl); +vint16m1_t test_vwmulsu_vv_i16m1_m(vbool16_t vm, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vwmulsu_vx_i16m1_m(vbool16_t mask, vint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i16m1_m(mask, op1, op2, vl); +vint16m1_t test_vwmulsu_vx_i16m1_m(vbool16_t vm, vint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i16m1_m(vm, vs2, rs1, vl); } -vint16m2_t test_vwmulsu_vv_i16m2_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i16m2_m(mask, op1, op2, vl); +vint16m2_t test_vwmulsu_vv_i16m2_m(vbool8_t vm, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i16m2_m(vm, vs2, vs1, vl); } -vint16m2_t test_vwmulsu_vx_i16m2_m(vbool8_t mask, vint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i16m2_m(mask, op1, op2, vl); +vint16m2_t test_vwmulsu_vx_i16m2_m(vbool8_t vm, vint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i16m2_m(vm, vs2, rs1, vl); } -vint16m4_t test_vwmulsu_vv_i16m4_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i16m4_m(mask, op1, op2, vl); +vint16m4_t test_vwmulsu_vv_i16m4_m(vbool4_t vm, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i16m4_m(vm, vs2, vs1, vl); } -vint16m4_t test_vwmulsu_vx_i16m4_m(vbool4_t mask, vint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i16m4_m(mask, op1, op2, vl); +vint16m4_t test_vwmulsu_vx_i16m4_m(vbool4_t vm, vint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i16m4_m(vm, vs2, rs1, vl); } -vint16m8_t test_vwmulsu_vv_i16m8_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i16m8_m(mask, op1, op2, vl); +vint16m8_t test_vwmulsu_vv_i16m8_m(vbool2_t vm, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i16m8_m(vm, vs2, vs1, vl); } -vint16m8_t test_vwmulsu_vx_i16m8_m(vbool2_t mask, vint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i16m8_m(mask, op1, op2, vl); +vint16m8_t test_vwmulsu_vx_i16m8_m(vbool2_t vm, vint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i16m8_m(vm, vs2, rs1, vl); } -vint32mf2_t test_vwmulsu_vv_i32mf2_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i32mf2_m(mask, op1, op2, vl); +vint32mf2_t test_vwmulsu_vv_i32mf2_m(vbool64_t vm, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i32mf2_m(vm, vs2, vs1, vl); } -vint32mf2_t test_vwmulsu_vx_i32mf2_m(vbool64_t mask, vint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i32mf2_m(mask, op1, op2, vl); +vint32mf2_t test_vwmulsu_vx_i32mf2_m(vbool64_t vm, vint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i32mf2_m(vm, vs2, rs1, vl); } -vint32m1_t test_vwmulsu_vv_i32m1_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i32m1_m(mask, op1, op2, vl); +vint32m1_t test_vwmulsu_vv_i32m1_m(vbool32_t vm, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vwmulsu_vx_i32m1_m(vbool32_t mask, vint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i32m1_m(mask, op1, op2, vl); +vint32m1_t test_vwmulsu_vx_i32m1_m(vbool32_t vm, vint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i32m1_m(vm, vs2, rs1, vl); } -vint32m2_t test_vwmulsu_vv_i32m2_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i32m2_m(mask, op1, op2, vl); +vint32m2_t test_vwmulsu_vv_i32m2_m(vbool16_t vm, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i32m2_m(vm, vs2, vs1, vl); } -vint32m2_t test_vwmulsu_vx_i32m2_m(vbool16_t mask, vint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i32m2_m(mask, op1, op2, vl); +vint32m2_t test_vwmulsu_vx_i32m2_m(vbool16_t vm, vint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i32m2_m(vm, vs2, rs1, vl); } -vint32m4_t test_vwmulsu_vv_i32m4_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i32m4_m(mask, op1, op2, vl); +vint32m4_t test_vwmulsu_vv_i32m4_m(vbool8_t vm, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i32m4_m(vm, vs2, vs1, vl); } -vint32m4_t test_vwmulsu_vx_i32m4_m(vbool8_t mask, vint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i32m4_m(mask, op1, op2, vl); +vint32m4_t test_vwmulsu_vx_i32m4_m(vbool8_t vm, vint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i32m4_m(vm, vs2, rs1, vl); } -vint32m8_t test_vwmulsu_vv_i32m8_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i32m8_m(mask, op1, op2, vl); +vint32m8_t test_vwmulsu_vv_i32m8_m(vbool4_t vm, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i32m8_m(vm, vs2, vs1, vl); } -vint32m8_t test_vwmulsu_vx_i32m8_m(vbool4_t mask, vint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i32m8_m(mask, op1, op2, vl); +vint32m8_t test_vwmulsu_vx_i32m8_m(vbool4_t vm, vint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i32m8_m(vm, vs2, rs1, vl); } -vint64m1_t test_vwmulsu_vv_i64m1_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i64m1_m(mask, op1, op2, vl); +vint64m1_t test_vwmulsu_vv_i64m1_m(vbool64_t vm, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i64m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vwmulsu_vx_i64m1_m(vbool64_t mask, vint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i64m1_m(mask, op1, op2, vl); +vint64m1_t test_vwmulsu_vx_i64m1_m(vbool64_t vm, vint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i64m1_m(vm, vs2, rs1, vl); } -vint64m2_t test_vwmulsu_vv_i64m2_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i64m2_m(mask, op1, op2, vl); +vint64m2_t test_vwmulsu_vv_i64m2_m(vbool32_t vm, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i64m2_m(vm, vs2, vs1, vl); } -vint64m2_t test_vwmulsu_vx_i64m2_m(vbool32_t mask, vint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i64m2_m(mask, op1, op2, vl); +vint64m2_t test_vwmulsu_vx_i64m2_m(vbool32_t vm, vint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i64m2_m(vm, vs2, rs1, vl); } -vint64m4_t test_vwmulsu_vv_i64m4_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i64m4_m(mask, op1, op2, vl); +vint64m4_t test_vwmulsu_vv_i64m4_m(vbool16_t vm, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i64m4_m(vm, vs2, vs1, vl); } -vint64m4_t test_vwmulsu_vx_i64m4_m(vbool16_t mask, vint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i64m4_m(mask, op1, op2, vl); +vint64m4_t test_vwmulsu_vx_i64m4_m(vbool16_t vm, vint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i64m4_m(vm, vs2, rs1, vl); } -vint64m8_t test_vwmulsu_vv_i64m8_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i64m8_m(mask, op1, op2, vl); +vint64m8_t test_vwmulsu_vv_i64m8_m(vbool8_t vm, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i64m8_m(vm, vs2, vs1, vl); } -vint64m8_t test_vwmulsu_vx_i64m8_m(vbool8_t mask, vint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i64m8_m(mask, op1, op2, vl); +vint64m8_t test_vwmulsu_vx_i64m8_m(vbool8_t vm, vint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i64m8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vwmulsu\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-api-tests/vwmulu.c b/auto-generated/gnu-api-tests/vwmulu.c index 80487c65b..56d872c34 100644 --- a/auto-generated/gnu-api-tests/vwmulu.c +++ b/auto-generated/gnu-api-tests/vwmulu.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint16mf4_t test_vwmulu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwmulu_vv_u16mf4(op1, op2, vl); +vuint16mf4_t test_vwmulu_vv_u16mf4(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u16mf4(vs2, vs1, vl); } -vuint16mf4_t test_vwmulu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_vx_u16mf4(op1, op2, vl); +vuint16mf4_t test_vwmulu_vx_u16mf4(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u16mf4(vs2, rs1, vl); } -vuint16mf2_t test_vwmulu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwmulu_vv_u16mf2(op1, op2, vl); +vuint16mf2_t test_vwmulu_vv_u16mf2(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u16mf2(vs2, vs1, vl); } -vuint16mf2_t test_vwmulu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_vx_u16mf2(op1, op2, vl); +vuint16mf2_t test_vwmulu_vx_u16mf2(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u16mf2(vs2, rs1, vl); } -vuint16m1_t test_vwmulu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwmulu_vv_u16m1(op1, op2, vl); +vuint16m1_t test_vwmulu_vv_u16m1(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vwmulu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_vx_u16m1(op1, op2, vl); +vuint16m1_t test_vwmulu_vx_u16m1(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u16m1(vs2, rs1, vl); } -vuint16m2_t test_vwmulu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwmulu_vv_u16m2(op1, op2, vl); +vuint16m2_t test_vwmulu_vv_u16m2(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u16m2(vs2, vs1, vl); } -vuint16m2_t test_vwmulu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_vx_u16m2(op1, op2, vl); +vuint16m2_t test_vwmulu_vx_u16m2(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u16m2(vs2, rs1, vl); } -vuint16m4_t test_vwmulu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwmulu_vv_u16m4(op1, op2, vl); +vuint16m4_t test_vwmulu_vv_u16m4(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u16m4(vs2, vs1, vl); } -vuint16m4_t test_vwmulu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_vx_u16m4(op1, op2, vl); +vuint16m4_t test_vwmulu_vx_u16m4(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u16m4(vs2, rs1, vl); } -vuint16m8_t test_vwmulu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwmulu_vv_u16m8(op1, op2, vl); +vuint16m8_t test_vwmulu_vv_u16m8(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u16m8(vs2, vs1, vl); } -vuint16m8_t test_vwmulu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_vx_u16m8(op1, op2, vl); +vuint16m8_t test_vwmulu_vx_u16m8(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u16m8(vs2, rs1, vl); } -vuint32mf2_t test_vwmulu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwmulu_vv_u32mf2(op1, op2, vl); +vuint32mf2_t test_vwmulu_vv_u32mf2(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u32mf2(vs2, vs1, vl); } -vuint32mf2_t test_vwmulu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_vx_u32mf2(op1, op2, vl); +vuint32mf2_t test_vwmulu_vx_u32mf2(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u32mf2(vs2, rs1, vl); } -vuint32m1_t test_vwmulu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwmulu_vv_u32m1(op1, op2, vl); +vuint32m1_t test_vwmulu_vv_u32m1(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vwmulu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_vx_u32m1(op1, op2, vl); +vuint32m1_t test_vwmulu_vx_u32m1(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u32m1(vs2, rs1, vl); } -vuint32m2_t test_vwmulu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwmulu_vv_u32m2(op1, op2, vl); +vuint32m2_t test_vwmulu_vv_u32m2(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u32m2(vs2, vs1, vl); } -vuint32m2_t test_vwmulu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_vx_u32m2(op1, op2, vl); +vuint32m2_t test_vwmulu_vx_u32m2(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u32m2(vs2, rs1, vl); } -vuint32m4_t test_vwmulu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwmulu_vv_u32m4(op1, op2, vl); +vuint32m4_t test_vwmulu_vv_u32m4(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u32m4(vs2, vs1, vl); } -vuint32m4_t test_vwmulu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_vx_u32m4(op1, op2, vl); +vuint32m4_t test_vwmulu_vx_u32m4(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u32m4(vs2, rs1, vl); } -vuint32m8_t test_vwmulu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwmulu_vv_u32m8(op1, op2, vl); +vuint32m8_t test_vwmulu_vv_u32m8(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u32m8(vs2, vs1, vl); } -vuint32m8_t test_vwmulu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_vx_u32m8(op1, op2, vl); +vuint32m8_t test_vwmulu_vx_u32m8(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u32m8(vs2, rs1, vl); } -vuint64m1_t test_vwmulu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwmulu_vv_u64m1(op1, op2, vl); +vuint64m1_t test_vwmulu_vv_u64m1(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u64m1(vs2, vs1, vl); } -vuint64m1_t test_vwmulu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu_vx_u64m1(op1, op2, vl); +vuint64m1_t test_vwmulu_vx_u64m1(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u64m1(vs2, rs1, vl); } -vuint64m2_t test_vwmulu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwmulu_vv_u64m2(op1, op2, vl); +vuint64m2_t test_vwmulu_vv_u64m2(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u64m2(vs2, vs1, vl); } -vuint64m2_t test_vwmulu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu_vx_u64m2(op1, op2, vl); +vuint64m2_t test_vwmulu_vx_u64m2(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u64m2(vs2, rs1, vl); } -vuint64m4_t test_vwmulu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwmulu_vv_u64m4(op1, op2, vl); +vuint64m4_t test_vwmulu_vv_u64m4(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u64m4(vs2, vs1, vl); } -vuint64m4_t test_vwmulu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu_vx_u64m4(op1, op2, vl); +vuint64m4_t test_vwmulu_vx_u64m4(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u64m4(vs2, rs1, vl); } -vuint64m8_t test_vwmulu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwmulu_vv_u64m8(op1, op2, vl); +vuint64m8_t test_vwmulu_vv_u64m8(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u64m8(vs2, vs1, vl); } -vuint64m8_t test_vwmulu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu_vx_u64m8(op1, op2, vl); +vuint64m8_t test_vwmulu_vx_u64m8(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u64m8(vs2, rs1, vl); } -vuint16mf4_t test_vwmulu_vv_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwmulu_vv_u16mf4_m(mask, op1, op2, vl); +vuint16mf4_t test_vwmulu_vv_u16mf4_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u16mf4_m(vm, vs2, vs1, vl); } -vuint16mf4_t test_vwmulu_vx_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_vx_u16mf4_m(mask, op1, op2, vl); +vuint16mf4_t test_vwmulu_vx_u16mf4_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u16mf4_m(vm, vs2, rs1, vl); } -vuint16mf2_t test_vwmulu_vv_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwmulu_vv_u16mf2_m(mask, op1, op2, vl); +vuint16mf2_t test_vwmulu_vv_u16mf2_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u16mf2_m(vm, vs2, vs1, vl); } -vuint16mf2_t test_vwmulu_vx_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_vx_u16mf2_m(mask, op1, op2, vl); +vuint16mf2_t test_vwmulu_vx_u16mf2_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u16mf2_m(vm, vs2, rs1, vl); } -vuint16m1_t test_vwmulu_vv_u16m1_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwmulu_vv_u16m1_m(mask, op1, op2, vl); +vuint16m1_t test_vwmulu_vv_u16m1_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vwmulu_vx_u16m1_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_vx_u16m1_m(mask, op1, op2, vl); +vuint16m1_t test_vwmulu_vx_u16m1_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u16m1_m(vm, vs2, rs1, vl); } -vuint16m2_t test_vwmulu_vv_u16m2_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwmulu_vv_u16m2_m(mask, op1, op2, vl); +vuint16m2_t test_vwmulu_vv_u16m2_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u16m2_m(vm, vs2, vs1, vl); } -vuint16m2_t test_vwmulu_vx_u16m2_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_vx_u16m2_m(mask, op1, op2, vl); +vuint16m2_t test_vwmulu_vx_u16m2_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u16m2_m(vm, vs2, rs1, vl); } -vuint16m4_t test_vwmulu_vv_u16m4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwmulu_vv_u16m4_m(mask, op1, op2, vl); +vuint16m4_t test_vwmulu_vv_u16m4_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u16m4_m(vm, vs2, vs1, vl); } -vuint16m4_t test_vwmulu_vx_u16m4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_vx_u16m4_m(mask, op1, op2, vl); +vuint16m4_t test_vwmulu_vx_u16m4_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u16m4_m(vm, vs2, rs1, vl); } -vuint16m8_t test_vwmulu_vv_u16m8_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwmulu_vv_u16m8_m(mask, op1, op2, vl); +vuint16m8_t test_vwmulu_vv_u16m8_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u16m8_m(vm, vs2, vs1, vl); } -vuint16m8_t test_vwmulu_vx_u16m8_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_vx_u16m8_m(mask, op1, op2, vl); +vuint16m8_t test_vwmulu_vx_u16m8_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u16m8_m(vm, vs2, rs1, vl); } -vuint32mf2_t test_vwmulu_vv_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwmulu_vv_u32mf2_m(mask, op1, op2, vl); +vuint32mf2_t test_vwmulu_vv_u32mf2_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u32mf2_m(vm, vs2, vs1, vl); } -vuint32mf2_t test_vwmulu_vx_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_vx_u32mf2_m(mask, op1, op2, vl); +vuint32mf2_t test_vwmulu_vx_u32mf2_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u32mf2_m(vm, vs2, rs1, vl); } -vuint32m1_t test_vwmulu_vv_u32m1_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwmulu_vv_u32m1_m(mask, op1, op2, vl); +vuint32m1_t test_vwmulu_vv_u32m1_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vwmulu_vx_u32m1_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_vx_u32m1_m(mask, op1, op2, vl); +vuint32m1_t test_vwmulu_vx_u32m1_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u32m1_m(vm, vs2, rs1, vl); } -vuint32m2_t test_vwmulu_vv_u32m2_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwmulu_vv_u32m2_m(mask, op1, op2, vl); +vuint32m2_t test_vwmulu_vv_u32m2_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u32m2_m(vm, vs2, vs1, vl); } -vuint32m2_t test_vwmulu_vx_u32m2_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_vx_u32m2_m(mask, op1, op2, vl); +vuint32m2_t test_vwmulu_vx_u32m2_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u32m2_m(vm, vs2, rs1, vl); } -vuint32m4_t test_vwmulu_vv_u32m4_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwmulu_vv_u32m4_m(mask, op1, op2, vl); +vuint32m4_t test_vwmulu_vv_u32m4_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u32m4_m(vm, vs2, vs1, vl); } -vuint32m4_t test_vwmulu_vx_u32m4_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_vx_u32m4_m(mask, op1, op2, vl); +vuint32m4_t test_vwmulu_vx_u32m4_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u32m4_m(vm, vs2, rs1, vl); } -vuint32m8_t test_vwmulu_vv_u32m8_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwmulu_vv_u32m8_m(mask, op1, op2, vl); +vuint32m8_t test_vwmulu_vv_u32m8_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u32m8_m(vm, vs2, vs1, vl); } -vuint32m8_t test_vwmulu_vx_u32m8_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_vx_u32m8_m(mask, op1, op2, vl); +vuint32m8_t test_vwmulu_vx_u32m8_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u32m8_m(vm, vs2, rs1, vl); } -vuint64m1_t test_vwmulu_vv_u64m1_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwmulu_vv_u64m1_m(mask, op1, op2, vl); +vuint64m1_t test_vwmulu_vv_u64m1_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vwmulu_vx_u64m1_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu_vx_u64m1_m(mask, op1, op2, vl); +vuint64m1_t test_vwmulu_vx_u64m1_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u64m1_m(vm, vs2, rs1, vl); } -vuint64m2_t test_vwmulu_vv_u64m2_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwmulu_vv_u64m2_m(mask, op1, op2, vl); +vuint64m2_t test_vwmulu_vv_u64m2_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u64m2_m(vm, vs2, vs1, vl); } -vuint64m2_t test_vwmulu_vx_u64m2_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu_vx_u64m2_m(mask, op1, op2, vl); +vuint64m2_t test_vwmulu_vx_u64m2_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u64m2_m(vm, vs2, rs1, vl); } -vuint64m4_t test_vwmulu_vv_u64m4_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwmulu_vv_u64m4_m(mask, op1, op2, vl); +vuint64m4_t test_vwmulu_vv_u64m4_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u64m4_m(vm, vs2, vs1, vl); } -vuint64m4_t test_vwmulu_vx_u64m4_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu_vx_u64m4_m(mask, op1, op2, vl); +vuint64m4_t test_vwmulu_vx_u64m4_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u64m4_m(vm, vs2, rs1, vl); } -vuint64m8_t test_vwmulu_vv_u64m8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwmulu_vv_u64m8_m(mask, op1, op2, vl); +vuint64m8_t test_vwmulu_vv_u64m8_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u64m8_m(vm, vs2, vs1, vl); } -vuint64m8_t test_vwmulu_vx_u64m8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu_vx_u64m8_m(mask, op1, op2, vl); +vuint64m8_t test_vwmulu_vx_u64m8_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u64m8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vwmulu\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-api-tests/vwredsum.c b/auto-generated/gnu-api-tests/vwredsum.c index fea5094a8..5806061ee 100644 --- a/auto-generated/gnu-api-tests/vwredsum.c +++ b/auto-generated/gnu-api-tests/vwredsum.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint16m1_t test_vwredsum_vs_i8mf8_i16m1(vint8mf8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i8mf8_i16m1(vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf8_i16m1(vint8mf8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i8mf8_i16m1(vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8mf4_i16m1(vint8mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i8mf4_i16m1(vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf4_i16m1(vint8mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i8mf4_i16m1(vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8mf2_i16m1(vint8mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i8mf2_i16m1(vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf2_i16m1(vint8mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i8mf2_i16m1(vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8m1_i16m1(vint8m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i8m1_i16m1(vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m1_i16m1(vint8m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i8m1_i16m1(vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8m2_i16m1(vint8m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i8m2_i16m1(vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m2_i16m1(vint8m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i8m2_i16m1(vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8m4_i16m1(vint8m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i8m4_i16m1(vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m4_i16m1(vint8m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i8m4_i16m1(vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8m8_i16m1(vint8m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i8m8_i16m1(vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m8_i16m1(vint8m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i8m8_i16m1(vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16mf4_i32m1(vint16mf4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i16mf4_i32m1(vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16mf4_i32m1(vint16mf4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i16mf4_i32m1(vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16mf2_i32m1(vint16mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i16mf2_i32m1(vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16mf2_i32m1(vint16mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i16mf2_i32m1(vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16m1_i32m1(vint16m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i16m1_i32m1(vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m1_i32m1(vint16m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i16m1_i32m1(vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16m2_i32m1(vint16m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i16m2_i32m1(vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m2_i32m1(vint16m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i16m2_i32m1(vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16m4_i32m1(vint16m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i16m4_i32m1(vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m4_i32m1(vint16m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i16m4_i32m1(vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16m8_i32m1(vint16m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i16m8_i32m1(vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m8_i32m1(vint16m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i16m8_i32m1(vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32mf2_i64m1(vint32mf2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i32mf2_i64m1(vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32mf2_i64m1(vint32mf2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i32mf2_i64m1(vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32m1_i64m1(vint32m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i32m1_i64m1(vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m1_i64m1(vint32m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i32m1_i64m1(vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32m2_i64m1(vint32m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i32m2_i64m1(vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m2_i64m1(vint32m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i32m2_i64m1(vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32m4_i64m1(vint32m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i32m4_i64m1(vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m4_i64m1(vint32m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i32m4_i64m1(vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32m8_i64m1(vint32m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i32m8_i64m1(vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m8_i64m1(vint32m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i32m8_i64m1(vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8mf8_i16m1_m(vbool64_t mask, vint8mf8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i8mf8_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf8_i16m1_m(vbool64_t vm, vint8mf8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i8mf8_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8mf4_i16m1_m(vbool32_t mask, vint8mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i8mf4_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf4_i16m1_m(vbool32_t vm, vint8mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i8mf4_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8mf2_i16m1_m(vbool16_t mask, vint8mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i8mf2_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf2_i16m1_m(vbool16_t vm, vint8mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i8mf2_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8m1_i16m1_m(vbool8_t mask, vint8m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i8m1_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m1_i16m1_m(vbool8_t vm, vint8m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i8m1_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8m2_i16m1_m(vbool4_t mask, vint8m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i8m2_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m2_i16m1_m(vbool4_t vm, vint8m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i8m2_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8m4_i16m1_m(vbool2_t mask, vint8m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i8m4_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m4_i16m1_m(vbool2_t vm, vint8m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i8m4_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8m8_i16m1_m(vbool1_t mask, vint8m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i8m8_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m8_i16m1_m(vbool1_t vm, vint8m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i8m8_i16m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16mf4_i32m1_m(vbool64_t mask, vint16mf4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i16mf4_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16mf4_i32m1_m(vbool64_t vm, vint16mf4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i16mf4_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16mf2_i32m1_m(vbool32_t mask, vint16mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i16mf2_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16mf2_i32m1_m(vbool32_t vm, vint16mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i16mf2_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16m1_i32m1_m(vbool16_t mask, vint16m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i16m1_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m1_i32m1_m(vbool16_t vm, vint16m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i16m1_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16m2_i32m1_m(vbool8_t mask, vint16m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i16m2_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m2_i32m1_m(vbool8_t vm, vint16m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i16m2_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16m4_i32m1_m(vbool4_t mask, vint16m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i16m4_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m4_i32m1_m(vbool4_t vm, vint16m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i16m4_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16m8_i32m1_m(vbool2_t mask, vint16m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i16m8_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m8_i32m1_m(vbool2_t vm, vint16m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i16m8_i32m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32mf2_i64m1_m(vbool64_t mask, vint32mf2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i32mf2_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32mf2_i64m1_m(vbool64_t vm, vint32mf2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i32mf2_i64m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32m1_i64m1_m(vbool32_t mask, vint32m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i32m1_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m1_i64m1_m(vbool32_t vm, vint32m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i32m1_i64m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32m2_i64m1_m(vbool16_t mask, vint32m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i32m2_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m2_i64m1_m(vbool16_t vm, vint32m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i32m2_i64m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32m4_i64m1_m(vbool8_t mask, vint32m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i32m4_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m4_i64m1_m(vbool8_t vm, vint32m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i32m4_i64m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32m8_i64m1_m(vbool4_t mask, vint32m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i32m8_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m8_i64m1_m(vbool4_t vm, vint32m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i32m8_i64m1_m(vm, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vwredsum\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/gnu-api-tests/vwredsumu.c b/auto-generated/gnu-api-tests/vwredsumu.c index f045e0ec0..f05eab580 100644 --- a/auto-generated/gnu-api-tests/vwredsumu.c +++ b/auto-generated/gnu-api-tests/vwredsumu.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1(vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u8mf8_u16m1(vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1(vuint8mf8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u8mf8_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1(vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u8mf4_u16m1(vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1(vuint8mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u8mf4_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1(vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u8mf2_u16m1(vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1(vuint8mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u8mf2_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8m1_u16m1(vuint8m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u8m1_u16m1(vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m1_u16m1(vuint8m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u8m1_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8m2_u16m1(vuint8m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u8m2_u16m1(vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m2_u16m1(vuint8m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u8m2_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8m4_u16m1(vuint8m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u8m4_u16m1(vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m4_u16m1(vuint8m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u8m4_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8m8_u16m1(vuint8m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u8m8_u16m1(vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m8_u16m1(vuint8m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u8m8_u16m1(vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1(vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u16mf4_u32m1(vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1(vuint16mf4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u16mf4_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1(vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u16mf2_u32m1(vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1(vuint16mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u16mf2_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16m1_u32m1(vuint16m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u16m1_u32m1(vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m1_u32m1(vuint16m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u16m1_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16m2_u32m1(vuint16m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u16m2_u32m1(vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m2_u32m1(vuint16m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u16m2_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16m4_u32m1(vuint16m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u16m4_u32m1(vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m4_u32m1(vuint16m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u16m4_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16m8_u32m1(vuint16m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u16m8_u32m1(vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m8_u32m1(vuint16m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u16m8_u32m1(vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1(vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u32mf2_u64m1(vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1(vuint32mf2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u32mf2_u64m1(vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32m1_u64m1(vuint32m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u32m1_u64m1(vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m1_u64m1(vuint32m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u32m1_u64m1(vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32m2_u64m1(vuint32m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u32m2_u64m1(vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m2_u64m1(vuint32m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u32m2_u64m1(vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32m4_u64m1(vuint32m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u32m4_u64m1(vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m4_u64m1(vuint32m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u32m4_u64m1(vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32m8_u64m1(vuint32m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u32m8_u64m1(vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m8_u64m1(vuint32m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u32m8_u64m1(vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_m(vbool64_t mask, vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u8mf8_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_m(vbool64_t vm, vuint8mf8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u8mf8_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_m(vbool32_t mask, vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u8mf4_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_m(vbool32_t vm, vuint8mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u8mf4_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_m(vbool16_t mask, vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u8mf2_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_m(vbool16_t vm, vuint8mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u8mf2_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_m(vbool8_t mask, vuint8m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u8m1_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_m(vbool8_t vm, vuint8m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u8m1_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_m(vbool4_t mask, vuint8m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u8m2_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_m(vbool4_t vm, vuint8m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u8m2_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_m(vbool2_t mask, vuint8m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u8m4_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_m(vbool2_t vm, vuint8m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u8m4_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_m(vbool1_t mask, vuint8m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u8m8_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_m(vbool1_t vm, vuint8m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u8m8_u16m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_m(vbool64_t mask, vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u16mf4_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_m(vbool64_t vm, vuint16mf4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u16mf4_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_m(vbool32_t mask, vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u16mf2_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_m(vbool32_t vm, vuint16mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u16mf2_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_m(vbool16_t mask, vuint16m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u16m1_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_m(vbool16_t vm, vuint16m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u16m1_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_m(vbool8_t mask, vuint16m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u16m2_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_m(vbool8_t vm, vuint16m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u16m2_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_m(vbool4_t mask, vuint16m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u16m4_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_m(vbool4_t vm, vuint16m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u16m4_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_m(vbool2_t mask, vuint16m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u16m8_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_m(vbool2_t vm, vuint16m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u16m8_u32m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_m(vbool64_t mask, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u32mf2_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_m(vbool64_t vm, vuint32mf2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u32mf2_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_m(vbool32_t mask, vuint32m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u32m1_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_m(vbool32_t vm, vuint32m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u32m1_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_m(vbool16_t mask, vuint32m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u32m2_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_m(vbool16_t vm, vuint32m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u32m2_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_m(vbool8_t mask, vuint32m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u32m4_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_m(vbool8_t vm, vuint32m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u32m4_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_m(vbool4_t mask, vuint32m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u32m8_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_m(vbool4_t vm, vuint32m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u32m8_u64m1_m(vm, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vwredsumu\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/gnu-api-tests/vwsub.c b/auto-generated/gnu-api-tests/vwsub.c index e08c0edd6..19e5e63f4 100644 --- a/auto-generated/gnu-api-tests/vwsub.c +++ b/auto-generated/gnu-api-tests/vwsub.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint16mf4_t test_vwsub_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwsub_vv_i16mf4(op1, op2, vl); +vint16mf4_t test_vwsub_vv_i16mf4(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwsub_vv_i16mf4(vs2, vs1, vl); } -vint16mf4_t test_vwsub_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_i16mf4(op1, op2, vl); +vint16mf4_t test_vwsub_vx_i16mf4(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_i16mf4(vs2, rs1, vl); } -vint16mf4_t test_vwsub_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwsub_wv_i16mf4(op1, op2, vl); +vint16mf4_t test_vwsub_wv_i16mf4(vint16mf4_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwsub_wv_i16mf4(vs2, vs1, vl); } -vint16mf4_t test_vwsub_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_i16mf4(op1, op2, vl); +vint16mf4_t test_vwsub_wx_i16mf4(vint16mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_i16mf4(vs2, rs1, vl); } -vint16mf2_t test_vwsub_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwsub_vv_i16mf2(op1, op2, vl); +vint16mf2_t test_vwsub_vv_i16mf2(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwsub_vv_i16mf2(vs2, vs1, vl); } -vint16mf2_t test_vwsub_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_i16mf2(op1, op2, vl); +vint16mf2_t test_vwsub_vx_i16mf2(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_i16mf2(vs2, rs1, vl); } -vint16mf2_t test_vwsub_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwsub_wv_i16mf2(op1, op2, vl); +vint16mf2_t test_vwsub_wv_i16mf2(vint16mf2_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwsub_wv_i16mf2(vs2, vs1, vl); } -vint16mf2_t test_vwsub_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_i16mf2(op1, op2, vl); +vint16mf2_t test_vwsub_wx_i16mf2(vint16mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_i16mf2(vs2, rs1, vl); } -vint16m1_t test_vwsub_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwsub_vv_i16m1(op1, op2, vl); +vint16m1_t test_vwsub_vv_i16m1(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwsub_vv_i16m1(vs2, vs1, vl); } -vint16m1_t test_vwsub_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_i16m1(op1, op2, vl); +vint16m1_t test_vwsub_vx_i16m1(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_i16m1(vs2, rs1, vl); } -vint16m1_t test_vwsub_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwsub_wv_i16m1(op1, op2, vl); +vint16m1_t test_vwsub_wv_i16m1(vint16m1_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwsub_wv_i16m1(vs2, vs1, vl); } -vint16m1_t test_vwsub_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_i16m1(op1, op2, vl); +vint16m1_t test_vwsub_wx_i16m1(vint16m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_i16m1(vs2, rs1, vl); } -vint16m2_t test_vwsub_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwsub_vv_i16m2(op1, op2, vl); +vint16m2_t test_vwsub_vv_i16m2(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwsub_vv_i16m2(vs2, vs1, vl); } -vint16m2_t test_vwsub_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_i16m2(op1, op2, vl); +vint16m2_t test_vwsub_vx_i16m2(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_i16m2(vs2, rs1, vl); } -vint16m2_t test_vwsub_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwsub_wv_i16m2(op1, op2, vl); +vint16m2_t test_vwsub_wv_i16m2(vint16m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwsub_wv_i16m2(vs2, vs1, vl); } -vint16m2_t test_vwsub_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_i16m2(op1, op2, vl); +vint16m2_t test_vwsub_wx_i16m2(vint16m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_i16m2(vs2, rs1, vl); } -vint16m4_t test_vwsub_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwsub_vv_i16m4(op1, op2, vl); +vint16m4_t test_vwsub_vv_i16m4(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwsub_vv_i16m4(vs2, vs1, vl); } -vint16m4_t test_vwsub_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_i16m4(op1, op2, vl); +vint16m4_t test_vwsub_vx_i16m4(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_i16m4(vs2, rs1, vl); } -vint16m4_t test_vwsub_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwsub_wv_i16m4(op1, op2, vl); +vint16m4_t test_vwsub_wv_i16m4(vint16m4_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwsub_wv_i16m4(vs2, vs1, vl); } -vint16m4_t test_vwsub_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_i16m4(op1, op2, vl); +vint16m4_t test_vwsub_wx_i16m4(vint16m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_i16m4(vs2, rs1, vl); } -vint16m8_t test_vwsub_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwsub_vv_i16m8(op1, op2, vl); +vint16m8_t test_vwsub_vv_i16m8(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwsub_vv_i16m8(vs2, vs1, vl); } -vint16m8_t test_vwsub_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_i16m8(op1, op2, vl); +vint16m8_t test_vwsub_vx_i16m8(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_i16m8(vs2, rs1, vl); } -vint16m8_t test_vwsub_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwsub_wv_i16m8(op1, op2, vl); +vint16m8_t test_vwsub_wv_i16m8(vint16m8_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwsub_wv_i16m8(vs2, vs1, vl); } -vint16m8_t test_vwsub_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_i16m8(op1, op2, vl); +vint16m8_t test_vwsub_wx_i16m8(vint16m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_i16m8(vs2, rs1, vl); } -vint32mf2_t test_vwsub_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwsub_vv_i32mf2(op1, op2, vl); +vint32mf2_t test_vwsub_vv_i32mf2(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwsub_vv_i32mf2(vs2, vs1, vl); } -vint32mf2_t test_vwsub_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_i32mf2(op1, op2, vl); +vint32mf2_t test_vwsub_vx_i32mf2(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_i32mf2(vs2, rs1, vl); } -vint32mf2_t test_vwsub_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwsub_wv_i32mf2(op1, op2, vl); +vint32mf2_t test_vwsub_wv_i32mf2(vint32mf2_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwsub_wv_i32mf2(vs2, vs1, vl); } -vint32mf2_t test_vwsub_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_i32mf2(op1, op2, vl); +vint32mf2_t test_vwsub_wx_i32mf2(vint32mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_i32mf2(vs2, rs1, vl); } -vint32m1_t test_vwsub_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwsub_vv_i32m1(op1, op2, vl); +vint32m1_t test_vwsub_vv_i32m1(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwsub_vv_i32m1(vs2, vs1, vl); } -vint32m1_t test_vwsub_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_i32m1(op1, op2, vl); +vint32m1_t test_vwsub_vx_i32m1(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_i32m1(vs2, rs1, vl); } -vint32m1_t test_vwsub_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwsub_wv_i32m1(op1, op2, vl); +vint32m1_t test_vwsub_wv_i32m1(vint32m1_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwsub_wv_i32m1(vs2, vs1, vl); } -vint32m1_t test_vwsub_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_i32m1(op1, op2, vl); +vint32m1_t test_vwsub_wx_i32m1(vint32m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_i32m1(vs2, rs1, vl); } -vint32m2_t test_vwsub_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwsub_vv_i32m2(op1, op2, vl); +vint32m2_t test_vwsub_vv_i32m2(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwsub_vv_i32m2(vs2, vs1, vl); } -vint32m2_t test_vwsub_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_i32m2(op1, op2, vl); +vint32m2_t test_vwsub_vx_i32m2(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_i32m2(vs2, rs1, vl); } -vint32m2_t test_vwsub_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwsub_wv_i32m2(op1, op2, vl); +vint32m2_t test_vwsub_wv_i32m2(vint32m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwsub_wv_i32m2(vs2, vs1, vl); } -vint32m2_t test_vwsub_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_i32m2(op1, op2, vl); +vint32m2_t test_vwsub_wx_i32m2(vint32m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_i32m2(vs2, rs1, vl); } -vint32m4_t test_vwsub_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwsub_vv_i32m4(op1, op2, vl); +vint32m4_t test_vwsub_vv_i32m4(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwsub_vv_i32m4(vs2, vs1, vl); } -vint32m4_t test_vwsub_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_i32m4(op1, op2, vl); +vint32m4_t test_vwsub_vx_i32m4(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_i32m4(vs2, rs1, vl); } -vint32m4_t test_vwsub_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwsub_wv_i32m4(op1, op2, vl); +vint32m4_t test_vwsub_wv_i32m4(vint32m4_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwsub_wv_i32m4(vs2, vs1, vl); } -vint32m4_t test_vwsub_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_i32m4(op1, op2, vl); +vint32m4_t test_vwsub_wx_i32m4(vint32m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_i32m4(vs2, rs1, vl); } -vint32m8_t test_vwsub_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwsub_vv_i32m8(op1, op2, vl); +vint32m8_t test_vwsub_vv_i32m8(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwsub_vv_i32m8(vs2, vs1, vl); } -vint32m8_t test_vwsub_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_i32m8(op1, op2, vl); +vint32m8_t test_vwsub_vx_i32m8(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_i32m8(vs2, rs1, vl); } -vint32m8_t test_vwsub_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwsub_wv_i32m8(op1, op2, vl); +vint32m8_t test_vwsub_wv_i32m8(vint32m8_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwsub_wv_i32m8(vs2, vs1, vl); } -vint32m8_t test_vwsub_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_i32m8(op1, op2, vl); +vint32m8_t test_vwsub_wx_i32m8(vint32m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_i32m8(vs2, rs1, vl); } -vint64m1_t test_vwsub_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwsub_vv_i64m1(op1, op2, vl); +vint64m1_t test_vwsub_vv_i64m1(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwsub_vv_i64m1(vs2, vs1, vl); } -vint64m1_t test_vwsub_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx_i64m1(op1, op2, vl); +vint64m1_t test_vwsub_vx_i64m1(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx_i64m1(vs2, rs1, vl); } -vint64m1_t test_vwsub_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwsub_wv_i64m1(op1, op2, vl); +vint64m1_t test_vwsub_wv_i64m1(vint64m1_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwsub_wv_i64m1(vs2, vs1, vl); } -vint64m1_t test_vwsub_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx_i64m1(op1, op2, vl); +vint64m1_t test_vwsub_wx_i64m1(vint64m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx_i64m1(vs2, rs1, vl); } -vint64m2_t test_vwsub_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwsub_vv_i64m2(op1, op2, vl); +vint64m2_t test_vwsub_vv_i64m2(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwsub_vv_i64m2(vs2, vs1, vl); } -vint64m2_t test_vwsub_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx_i64m2(op1, op2, vl); +vint64m2_t test_vwsub_vx_i64m2(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx_i64m2(vs2, rs1, vl); } -vint64m2_t test_vwsub_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwsub_wv_i64m2(op1, op2, vl); +vint64m2_t test_vwsub_wv_i64m2(vint64m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwsub_wv_i64m2(vs2, vs1, vl); } -vint64m2_t test_vwsub_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx_i64m2(op1, op2, vl); +vint64m2_t test_vwsub_wx_i64m2(vint64m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx_i64m2(vs2, rs1, vl); } -vint64m4_t test_vwsub_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwsub_vv_i64m4(op1, op2, vl); +vint64m4_t test_vwsub_vv_i64m4(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwsub_vv_i64m4(vs2, vs1, vl); } -vint64m4_t test_vwsub_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx_i64m4(op1, op2, vl); +vint64m4_t test_vwsub_vx_i64m4(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx_i64m4(vs2, rs1, vl); } -vint64m4_t test_vwsub_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwsub_wv_i64m4(op1, op2, vl); +vint64m4_t test_vwsub_wv_i64m4(vint64m4_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwsub_wv_i64m4(vs2, vs1, vl); } -vint64m4_t test_vwsub_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx_i64m4(op1, op2, vl); +vint64m4_t test_vwsub_wx_i64m4(vint64m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx_i64m4(vs2, rs1, vl); } -vint64m8_t test_vwsub_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwsub_vv_i64m8(op1, op2, vl); +vint64m8_t test_vwsub_vv_i64m8(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwsub_vv_i64m8(vs2, vs1, vl); } -vint64m8_t test_vwsub_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx_i64m8(op1, op2, vl); +vint64m8_t test_vwsub_vx_i64m8(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx_i64m8(vs2, rs1, vl); } -vint64m8_t test_vwsub_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwsub_wv_i64m8(op1, op2, vl); +vint64m8_t test_vwsub_wv_i64m8(vint64m8_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwsub_wv_i64m8(vs2, vs1, vl); } -vint64m8_t test_vwsub_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx_i64m8(op1, op2, vl); +vint64m8_t test_vwsub_wx_i64m8(vint64m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx_i64m8(vs2, rs1, vl); } -vint16mf4_t test_vwsub_vv_i16mf4_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwsub_vv_i16mf4_m(mask, op1, op2, vl); +vint16mf4_t test_vwsub_vv_i16mf4_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwsub_vv_i16mf4_m(vm, vs2, vs1, vl); } -vint16mf4_t test_vwsub_vx_i16mf4_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_i16mf4_m(mask, op1, op2, vl); +vint16mf4_t test_vwsub_vx_i16mf4_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_i16mf4_m(vm, vs2, rs1, vl); } -vint16mf4_t test_vwsub_wv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwsub_wv_i16mf4_m(mask, op1, op2, vl); +vint16mf4_t test_vwsub_wv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwsub_wv_i16mf4_m(vm, vs2, vs1, vl); } -vint16mf4_t test_vwsub_wx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_i16mf4_m(mask, op1, op2, vl); +vint16mf4_t test_vwsub_wx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_i16mf4_m(vm, vs2, rs1, vl); } -vint16mf2_t test_vwsub_vv_i16mf2_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwsub_vv_i16mf2_m(mask, op1, op2, vl); +vint16mf2_t test_vwsub_vv_i16mf2_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwsub_vv_i16mf2_m(vm, vs2, vs1, vl); } -vint16mf2_t test_vwsub_vx_i16mf2_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_i16mf2_m(mask, op1, op2, vl); +vint16mf2_t test_vwsub_vx_i16mf2_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_i16mf2_m(vm, vs2, rs1, vl); } -vint16mf2_t test_vwsub_wv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwsub_wv_i16mf2_m(mask, op1, op2, vl); +vint16mf2_t test_vwsub_wv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwsub_wv_i16mf2_m(vm, vs2, vs1, vl); } -vint16mf2_t test_vwsub_wx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_i16mf2_m(mask, op1, op2, vl); +vint16mf2_t test_vwsub_wx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_i16mf2_m(vm, vs2, rs1, vl); } -vint16m1_t test_vwsub_vv_i16m1_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwsub_vv_i16m1_m(mask, op1, op2, vl); +vint16m1_t test_vwsub_vv_i16m1_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwsub_vv_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vwsub_vx_i16m1_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_i16m1_m(mask, op1, op2, vl); +vint16m1_t test_vwsub_vx_i16m1_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_i16m1_m(vm, vs2, rs1, vl); } -vint16m1_t test_vwsub_wv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwsub_wv_i16m1_m(mask, op1, op2, vl); +vint16m1_t test_vwsub_wv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwsub_wv_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vwsub_wx_i16m1_m(vbool16_t mask, vint16m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_i16m1_m(mask, op1, op2, vl); +vint16m1_t test_vwsub_wx_i16m1_m(vbool16_t vm, vint16m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_i16m1_m(vm, vs2, rs1, vl); } -vint16m2_t test_vwsub_vv_i16m2_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwsub_vv_i16m2_m(mask, op1, op2, vl); +vint16m2_t test_vwsub_vv_i16m2_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwsub_vv_i16m2_m(vm, vs2, vs1, vl); } -vint16m2_t test_vwsub_vx_i16m2_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_i16m2_m(mask, op1, op2, vl); +vint16m2_t test_vwsub_vx_i16m2_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_i16m2_m(vm, vs2, rs1, vl); } -vint16m2_t test_vwsub_wv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwsub_wv_i16m2_m(mask, op1, op2, vl); +vint16m2_t test_vwsub_wv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwsub_wv_i16m2_m(vm, vs2, vs1, vl); } -vint16m2_t test_vwsub_wx_i16m2_m(vbool8_t mask, vint16m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_i16m2_m(mask, op1, op2, vl); +vint16m2_t test_vwsub_wx_i16m2_m(vbool8_t vm, vint16m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_i16m2_m(vm, vs2, rs1, vl); } -vint16m4_t test_vwsub_vv_i16m4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwsub_vv_i16m4_m(mask, op1, op2, vl); +vint16m4_t test_vwsub_vv_i16m4_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwsub_vv_i16m4_m(vm, vs2, vs1, vl); } -vint16m4_t test_vwsub_vx_i16m4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_i16m4_m(mask, op1, op2, vl); +vint16m4_t test_vwsub_vx_i16m4_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_i16m4_m(vm, vs2, rs1, vl); } -vint16m4_t test_vwsub_wv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwsub_wv_i16m4_m(mask, op1, op2, vl); +vint16m4_t test_vwsub_wv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwsub_wv_i16m4_m(vm, vs2, vs1, vl); } -vint16m4_t test_vwsub_wx_i16m4_m(vbool4_t mask, vint16m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_i16m4_m(mask, op1, op2, vl); +vint16m4_t test_vwsub_wx_i16m4_m(vbool4_t vm, vint16m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_i16m4_m(vm, vs2, rs1, vl); } -vint16m8_t test_vwsub_vv_i16m8_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwsub_vv_i16m8_m(mask, op1, op2, vl); +vint16m8_t test_vwsub_vv_i16m8_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwsub_vv_i16m8_m(vm, vs2, vs1, vl); } -vint16m8_t test_vwsub_vx_i16m8_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_i16m8_m(mask, op1, op2, vl); +vint16m8_t test_vwsub_vx_i16m8_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_i16m8_m(vm, vs2, rs1, vl); } -vint16m8_t test_vwsub_wv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwsub_wv_i16m8_m(mask, op1, op2, vl); +vint16m8_t test_vwsub_wv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwsub_wv_i16m8_m(vm, vs2, vs1, vl); } -vint16m8_t test_vwsub_wx_i16m8_m(vbool2_t mask, vint16m8_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_i16m8_m(mask, op1, op2, vl); +vint16m8_t test_vwsub_wx_i16m8_m(vbool2_t vm, vint16m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_i16m8_m(vm, vs2, rs1, vl); } -vint32mf2_t test_vwsub_vv_i32mf2_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwsub_vv_i32mf2_m(mask, op1, op2, vl); +vint32mf2_t test_vwsub_vv_i32mf2_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwsub_vv_i32mf2_m(vm, vs2, vs1, vl); } -vint32mf2_t test_vwsub_vx_i32mf2_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_i32mf2_m(mask, op1, op2, vl); +vint32mf2_t test_vwsub_vx_i32mf2_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_i32mf2_m(vm, vs2, rs1, vl); } -vint32mf2_t test_vwsub_wv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwsub_wv_i32mf2_m(mask, op1, op2, vl); +vint32mf2_t test_vwsub_wv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwsub_wv_i32mf2_m(vm, vs2, vs1, vl); } -vint32mf2_t test_vwsub_wx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_i32mf2_m(mask, op1, op2, vl); +vint32mf2_t test_vwsub_wx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_i32mf2_m(vm, vs2, rs1, vl); } -vint32m1_t test_vwsub_vv_i32m1_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwsub_vv_i32m1_m(mask, op1, op2, vl); +vint32m1_t test_vwsub_vv_i32m1_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwsub_vv_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vwsub_vx_i32m1_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_i32m1_m(mask, op1, op2, vl); +vint32m1_t test_vwsub_vx_i32m1_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_i32m1_m(vm, vs2, rs1, vl); } -vint32m1_t test_vwsub_wv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwsub_wv_i32m1_m(mask, op1, op2, vl); +vint32m1_t test_vwsub_wv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwsub_wv_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vwsub_wx_i32m1_m(vbool32_t mask, vint32m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_i32m1_m(mask, op1, op2, vl); +vint32m1_t test_vwsub_wx_i32m1_m(vbool32_t vm, vint32m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_i32m1_m(vm, vs2, rs1, vl); } -vint32m2_t test_vwsub_vv_i32m2_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwsub_vv_i32m2_m(mask, op1, op2, vl); +vint32m2_t test_vwsub_vv_i32m2_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwsub_vv_i32m2_m(vm, vs2, vs1, vl); } -vint32m2_t test_vwsub_vx_i32m2_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_i32m2_m(mask, op1, op2, vl); +vint32m2_t test_vwsub_vx_i32m2_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_i32m2_m(vm, vs2, rs1, vl); } -vint32m2_t test_vwsub_wv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwsub_wv_i32m2_m(mask, op1, op2, vl); +vint32m2_t test_vwsub_wv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwsub_wv_i32m2_m(vm, vs2, vs1, vl); } -vint32m2_t test_vwsub_wx_i32m2_m(vbool16_t mask, vint32m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_i32m2_m(mask, op1, op2, vl); +vint32m2_t test_vwsub_wx_i32m2_m(vbool16_t vm, vint32m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_i32m2_m(vm, vs2, rs1, vl); } -vint32m4_t test_vwsub_vv_i32m4_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwsub_vv_i32m4_m(mask, op1, op2, vl); +vint32m4_t test_vwsub_vv_i32m4_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwsub_vv_i32m4_m(vm, vs2, vs1, vl); } -vint32m4_t test_vwsub_vx_i32m4_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_i32m4_m(mask, op1, op2, vl); +vint32m4_t test_vwsub_vx_i32m4_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_i32m4_m(vm, vs2, rs1, vl); } -vint32m4_t test_vwsub_wv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwsub_wv_i32m4_m(mask, op1, op2, vl); +vint32m4_t test_vwsub_wv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwsub_wv_i32m4_m(vm, vs2, vs1, vl); } -vint32m4_t test_vwsub_wx_i32m4_m(vbool8_t mask, vint32m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_i32m4_m(mask, op1, op2, vl); +vint32m4_t test_vwsub_wx_i32m4_m(vbool8_t vm, vint32m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_i32m4_m(vm, vs2, rs1, vl); } -vint32m8_t test_vwsub_vv_i32m8_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwsub_vv_i32m8_m(mask, op1, op2, vl); +vint32m8_t test_vwsub_vv_i32m8_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwsub_vv_i32m8_m(vm, vs2, vs1, vl); } -vint32m8_t test_vwsub_vx_i32m8_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_i32m8_m(mask, op1, op2, vl); +vint32m8_t test_vwsub_vx_i32m8_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_i32m8_m(vm, vs2, rs1, vl); } -vint32m8_t test_vwsub_wv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwsub_wv_i32m8_m(mask, op1, op2, vl); +vint32m8_t test_vwsub_wv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwsub_wv_i32m8_m(vm, vs2, vs1, vl); } -vint32m8_t test_vwsub_wx_i32m8_m(vbool4_t mask, vint32m8_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_i32m8_m(mask, op1, op2, vl); +vint32m8_t test_vwsub_wx_i32m8_m(vbool4_t vm, vint32m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_i32m8_m(vm, vs2, rs1, vl); } -vint64m1_t test_vwsub_vv_i64m1_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwsub_vv_i64m1_m(mask, op1, op2, vl); +vint64m1_t test_vwsub_vv_i64m1_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwsub_vv_i64m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vwsub_vx_i64m1_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx_i64m1_m(mask, op1, op2, vl); +vint64m1_t test_vwsub_vx_i64m1_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx_i64m1_m(vm, vs2, rs1, vl); } -vint64m1_t test_vwsub_wv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwsub_wv_i64m1_m(mask, op1, op2, vl); +vint64m1_t test_vwsub_wv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwsub_wv_i64m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vwsub_wx_i64m1_m(vbool64_t mask, vint64m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx_i64m1_m(mask, op1, op2, vl); +vint64m1_t test_vwsub_wx_i64m1_m(vbool64_t vm, vint64m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx_i64m1_m(vm, vs2, rs1, vl); } -vint64m2_t test_vwsub_vv_i64m2_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwsub_vv_i64m2_m(mask, op1, op2, vl); +vint64m2_t test_vwsub_vv_i64m2_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwsub_vv_i64m2_m(vm, vs2, vs1, vl); } -vint64m2_t test_vwsub_vx_i64m2_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx_i64m2_m(mask, op1, op2, vl); +vint64m2_t test_vwsub_vx_i64m2_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx_i64m2_m(vm, vs2, rs1, vl); } -vint64m2_t test_vwsub_wv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwsub_wv_i64m2_m(mask, op1, op2, vl); +vint64m2_t test_vwsub_wv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwsub_wv_i64m2_m(vm, vs2, vs1, vl); } -vint64m2_t test_vwsub_wx_i64m2_m(vbool32_t mask, vint64m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx_i64m2_m(mask, op1, op2, vl); +vint64m2_t test_vwsub_wx_i64m2_m(vbool32_t vm, vint64m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx_i64m2_m(vm, vs2, rs1, vl); } -vint64m4_t test_vwsub_vv_i64m4_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwsub_vv_i64m4_m(mask, op1, op2, vl); +vint64m4_t test_vwsub_vv_i64m4_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwsub_vv_i64m4_m(vm, vs2, vs1, vl); } -vint64m4_t test_vwsub_vx_i64m4_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx_i64m4_m(mask, op1, op2, vl); +vint64m4_t test_vwsub_vx_i64m4_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx_i64m4_m(vm, vs2, rs1, vl); } -vint64m4_t test_vwsub_wv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwsub_wv_i64m4_m(mask, op1, op2, vl); +vint64m4_t test_vwsub_wv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwsub_wv_i64m4_m(vm, vs2, vs1, vl); } -vint64m4_t test_vwsub_wx_i64m4_m(vbool16_t mask, vint64m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx_i64m4_m(mask, op1, op2, vl); +vint64m4_t test_vwsub_wx_i64m4_m(vbool16_t vm, vint64m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx_i64m4_m(vm, vs2, rs1, vl); } -vint64m8_t test_vwsub_vv_i64m8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwsub_vv_i64m8_m(mask, op1, op2, vl); +vint64m8_t test_vwsub_vv_i64m8_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwsub_vv_i64m8_m(vm, vs2, vs1, vl); } -vint64m8_t test_vwsub_vx_i64m8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx_i64m8_m(mask, op1, op2, vl); +vint64m8_t test_vwsub_vx_i64m8_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx_i64m8_m(vm, vs2, rs1, vl); } -vint64m8_t test_vwsub_wv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwsub_wv_i64m8_m(mask, op1, op2, vl); +vint64m8_t test_vwsub_wv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwsub_wv_i64m8_m(vm, vs2, vs1, vl); } -vint64m8_t test_vwsub_wx_i64m8_m(vbool8_t mask, vint64m8_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx_i64m8_m(mask, op1, op2, vl); +vint64m8_t test_vwsub_wx_i64m8_m(vbool8_t vm, vint64m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx_i64m8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+v[w]?sub\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/gnu-api-tests/vwsubu.c b/auto-generated/gnu-api-tests/vwsubu.c index 407f05edf..d5bb85d1e 100644 --- a/auto-generated/gnu-api-tests/vwsubu.c +++ b/auto-generated/gnu-api-tests/vwsubu.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint16mf4_t test_vwsubu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwsubu_vv_u16mf4(op1, op2, vl); +vuint16mf4_t test_vwsubu_vv_u16mf4(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u16mf4(vs2, vs1, vl); } -vuint16mf4_t test_vwsubu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_u16mf4(op1, op2, vl); +vuint16mf4_t test_vwsubu_vx_u16mf4(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u16mf4(vs2, rs1, vl); } -vuint16mf4_t test_vwsubu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwsubu_wv_u16mf4(op1, op2, vl); +vuint16mf4_t test_vwsubu_wv_u16mf4(vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u16mf4(vs2, vs1, vl); } -vuint16mf4_t test_vwsubu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_u16mf4(op1, op2, vl); +vuint16mf4_t test_vwsubu_wx_u16mf4(vuint16mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u16mf4(vs2, rs1, vl); } -vuint16mf2_t test_vwsubu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwsubu_vv_u16mf2(op1, op2, vl); +vuint16mf2_t test_vwsubu_vv_u16mf2(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u16mf2(vs2, vs1, vl); } -vuint16mf2_t test_vwsubu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_u16mf2(op1, op2, vl); +vuint16mf2_t test_vwsubu_vx_u16mf2(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u16mf2(vs2, rs1, vl); } -vuint16mf2_t test_vwsubu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwsubu_wv_u16mf2(op1, op2, vl); +vuint16mf2_t test_vwsubu_wv_u16mf2(vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u16mf2(vs2, vs1, vl); } -vuint16mf2_t test_vwsubu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_u16mf2(op1, op2, vl); +vuint16mf2_t test_vwsubu_wx_u16mf2(vuint16mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u16mf2(vs2, rs1, vl); } -vuint16m1_t test_vwsubu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwsubu_vv_u16m1(op1, op2, vl); +vuint16m1_t test_vwsubu_vv_u16m1(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vwsubu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_u16m1(op1, op2, vl); +vuint16m1_t test_vwsubu_vx_u16m1(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u16m1(vs2, rs1, vl); } -vuint16m1_t test_vwsubu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwsubu_wv_u16m1(op1, op2, vl); +vuint16m1_t test_vwsubu_wv_u16m1(vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vwsubu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_u16m1(op1, op2, vl); +vuint16m1_t test_vwsubu_wx_u16m1(vuint16m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u16m1(vs2, rs1, vl); } -vuint16m2_t test_vwsubu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwsubu_vv_u16m2(op1, op2, vl); +vuint16m2_t test_vwsubu_vv_u16m2(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u16m2(vs2, vs1, vl); } -vuint16m2_t test_vwsubu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_u16m2(op1, op2, vl); +vuint16m2_t test_vwsubu_vx_u16m2(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u16m2(vs2, rs1, vl); } -vuint16m2_t test_vwsubu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwsubu_wv_u16m2(op1, op2, vl); +vuint16m2_t test_vwsubu_wv_u16m2(vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u16m2(vs2, vs1, vl); } -vuint16m2_t test_vwsubu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_u16m2(op1, op2, vl); +vuint16m2_t test_vwsubu_wx_u16m2(vuint16m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u16m2(vs2, rs1, vl); } -vuint16m4_t test_vwsubu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwsubu_vv_u16m4(op1, op2, vl); +vuint16m4_t test_vwsubu_vv_u16m4(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u16m4(vs2, vs1, vl); } -vuint16m4_t test_vwsubu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_u16m4(op1, op2, vl); +vuint16m4_t test_vwsubu_vx_u16m4(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u16m4(vs2, rs1, vl); } -vuint16m4_t test_vwsubu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwsubu_wv_u16m4(op1, op2, vl); +vuint16m4_t test_vwsubu_wv_u16m4(vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u16m4(vs2, vs1, vl); } -vuint16m4_t test_vwsubu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_u16m4(op1, op2, vl); +vuint16m4_t test_vwsubu_wx_u16m4(vuint16m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u16m4(vs2, rs1, vl); } -vuint16m8_t test_vwsubu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwsubu_vv_u16m8(op1, op2, vl); +vuint16m8_t test_vwsubu_vv_u16m8(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u16m8(vs2, vs1, vl); } -vuint16m8_t test_vwsubu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_u16m8(op1, op2, vl); +vuint16m8_t test_vwsubu_vx_u16m8(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u16m8(vs2, rs1, vl); } -vuint16m8_t test_vwsubu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwsubu_wv_u16m8(op1, op2, vl); +vuint16m8_t test_vwsubu_wv_u16m8(vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u16m8(vs2, vs1, vl); } -vuint16m8_t test_vwsubu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_u16m8(op1, op2, vl); +vuint16m8_t test_vwsubu_wx_u16m8(vuint16m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u16m8(vs2, rs1, vl); } -vuint32mf2_t test_vwsubu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwsubu_vv_u32mf2(op1, op2, vl); +vuint32mf2_t test_vwsubu_vv_u32mf2(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u32mf2(vs2, vs1, vl); } -vuint32mf2_t test_vwsubu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_u32mf2(op1, op2, vl); +vuint32mf2_t test_vwsubu_vx_u32mf2(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u32mf2(vs2, rs1, vl); } -vuint32mf2_t test_vwsubu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwsubu_wv_u32mf2(op1, op2, vl); +vuint32mf2_t test_vwsubu_wv_u32mf2(vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u32mf2(vs2, vs1, vl); } -vuint32mf2_t test_vwsubu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_u32mf2(op1, op2, vl); +vuint32mf2_t test_vwsubu_wx_u32mf2(vuint32mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u32mf2(vs2, rs1, vl); } -vuint32m1_t test_vwsubu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwsubu_vv_u32m1(op1, op2, vl); +vuint32m1_t test_vwsubu_vv_u32m1(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vwsubu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_u32m1(op1, op2, vl); +vuint32m1_t test_vwsubu_vx_u32m1(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u32m1(vs2, rs1, vl); } -vuint32m1_t test_vwsubu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwsubu_wv_u32m1(op1, op2, vl); +vuint32m1_t test_vwsubu_wv_u32m1(vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vwsubu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_u32m1(op1, op2, vl); +vuint32m1_t test_vwsubu_wx_u32m1(vuint32m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u32m1(vs2, rs1, vl); } -vuint32m2_t test_vwsubu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwsubu_vv_u32m2(op1, op2, vl); +vuint32m2_t test_vwsubu_vv_u32m2(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u32m2(vs2, vs1, vl); } -vuint32m2_t test_vwsubu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_u32m2(op1, op2, vl); +vuint32m2_t test_vwsubu_vx_u32m2(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u32m2(vs2, rs1, vl); } -vuint32m2_t test_vwsubu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwsubu_wv_u32m2(op1, op2, vl); +vuint32m2_t test_vwsubu_wv_u32m2(vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u32m2(vs2, vs1, vl); } -vuint32m2_t test_vwsubu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_u32m2(op1, op2, vl); +vuint32m2_t test_vwsubu_wx_u32m2(vuint32m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u32m2(vs2, rs1, vl); } -vuint32m4_t test_vwsubu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwsubu_vv_u32m4(op1, op2, vl); +vuint32m4_t test_vwsubu_vv_u32m4(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u32m4(vs2, vs1, vl); } -vuint32m4_t test_vwsubu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_u32m4(op1, op2, vl); +vuint32m4_t test_vwsubu_vx_u32m4(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u32m4(vs2, rs1, vl); } -vuint32m4_t test_vwsubu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwsubu_wv_u32m4(op1, op2, vl); +vuint32m4_t test_vwsubu_wv_u32m4(vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u32m4(vs2, vs1, vl); } -vuint32m4_t test_vwsubu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_u32m4(op1, op2, vl); +vuint32m4_t test_vwsubu_wx_u32m4(vuint32m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u32m4(vs2, rs1, vl); } -vuint32m8_t test_vwsubu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwsubu_vv_u32m8(op1, op2, vl); +vuint32m8_t test_vwsubu_vv_u32m8(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u32m8(vs2, vs1, vl); } -vuint32m8_t test_vwsubu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_u32m8(op1, op2, vl); +vuint32m8_t test_vwsubu_vx_u32m8(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u32m8(vs2, rs1, vl); } -vuint32m8_t test_vwsubu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwsubu_wv_u32m8(op1, op2, vl); +vuint32m8_t test_vwsubu_wv_u32m8(vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u32m8(vs2, vs1, vl); } -vuint32m8_t test_vwsubu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_u32m8(op1, op2, vl); +vuint32m8_t test_vwsubu_wx_u32m8(vuint32m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u32m8(vs2, rs1, vl); } -vuint64m1_t test_vwsubu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwsubu_vv_u64m1(op1, op2, vl); +vuint64m1_t test_vwsubu_vv_u64m1(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u64m1(vs2, vs1, vl); } -vuint64m1_t test_vwsubu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx_u64m1(op1, op2, vl); +vuint64m1_t test_vwsubu_vx_u64m1(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u64m1(vs2, rs1, vl); } -vuint64m1_t test_vwsubu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwsubu_wv_u64m1(op1, op2, vl); +vuint64m1_t test_vwsubu_wv_u64m1(vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u64m1(vs2, vs1, vl); } -vuint64m1_t test_vwsubu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx_u64m1(op1, op2, vl); +vuint64m1_t test_vwsubu_wx_u64m1(vuint64m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u64m1(vs2, rs1, vl); } -vuint64m2_t test_vwsubu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwsubu_vv_u64m2(op1, op2, vl); +vuint64m2_t test_vwsubu_vv_u64m2(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u64m2(vs2, vs1, vl); } -vuint64m2_t test_vwsubu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx_u64m2(op1, op2, vl); +vuint64m2_t test_vwsubu_vx_u64m2(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u64m2(vs2, rs1, vl); } -vuint64m2_t test_vwsubu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwsubu_wv_u64m2(op1, op2, vl); +vuint64m2_t test_vwsubu_wv_u64m2(vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u64m2(vs2, vs1, vl); } -vuint64m2_t test_vwsubu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx_u64m2(op1, op2, vl); +vuint64m2_t test_vwsubu_wx_u64m2(vuint64m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u64m2(vs2, rs1, vl); } -vuint64m4_t test_vwsubu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwsubu_vv_u64m4(op1, op2, vl); +vuint64m4_t test_vwsubu_vv_u64m4(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u64m4(vs2, vs1, vl); } -vuint64m4_t test_vwsubu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx_u64m4(op1, op2, vl); +vuint64m4_t test_vwsubu_vx_u64m4(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u64m4(vs2, rs1, vl); } -vuint64m4_t test_vwsubu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwsubu_wv_u64m4(op1, op2, vl); +vuint64m4_t test_vwsubu_wv_u64m4(vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u64m4(vs2, vs1, vl); } -vuint64m4_t test_vwsubu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx_u64m4(op1, op2, vl); +vuint64m4_t test_vwsubu_wx_u64m4(vuint64m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u64m4(vs2, rs1, vl); } -vuint64m8_t test_vwsubu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwsubu_vv_u64m8(op1, op2, vl); +vuint64m8_t test_vwsubu_vv_u64m8(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u64m8(vs2, vs1, vl); } -vuint64m8_t test_vwsubu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx_u64m8(op1, op2, vl); +vuint64m8_t test_vwsubu_vx_u64m8(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u64m8(vs2, rs1, vl); } -vuint64m8_t test_vwsubu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwsubu_wv_u64m8(op1, op2, vl); +vuint64m8_t test_vwsubu_wv_u64m8(vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u64m8(vs2, vs1, vl); } -vuint64m8_t test_vwsubu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx_u64m8(op1, op2, vl); +vuint64m8_t test_vwsubu_wx_u64m8(vuint64m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u64m8(vs2, rs1, vl); } -vuint16mf4_t test_vwsubu_vv_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwsubu_vv_u16mf4_m(mask, op1, op2, vl); +vuint16mf4_t test_vwsubu_vv_u16mf4_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u16mf4_m(vm, vs2, vs1, vl); } -vuint16mf4_t test_vwsubu_vx_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_u16mf4_m(mask, op1, op2, vl); +vuint16mf4_t test_vwsubu_vx_u16mf4_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u16mf4_m(vm, vs2, rs1, vl); } -vuint16mf4_t test_vwsubu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwsubu_wv_u16mf4_m(mask, op1, op2, vl); +vuint16mf4_t test_vwsubu_wv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u16mf4_m(vm, vs2, vs1, vl); } -vuint16mf4_t test_vwsubu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_u16mf4_m(mask, op1, op2, vl); +vuint16mf4_t test_vwsubu_wx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u16mf4_m(vm, vs2, rs1, vl); } -vuint16mf2_t test_vwsubu_vv_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwsubu_vv_u16mf2_m(mask, op1, op2, vl); +vuint16mf2_t test_vwsubu_vv_u16mf2_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u16mf2_m(vm, vs2, vs1, vl); } -vuint16mf2_t test_vwsubu_vx_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_u16mf2_m(mask, op1, op2, vl); +vuint16mf2_t test_vwsubu_vx_u16mf2_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u16mf2_m(vm, vs2, rs1, vl); } -vuint16mf2_t test_vwsubu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwsubu_wv_u16mf2_m(mask, op1, op2, vl); +vuint16mf2_t test_vwsubu_wv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u16mf2_m(vm, vs2, vs1, vl); } -vuint16mf2_t test_vwsubu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_u16mf2_m(mask, op1, op2, vl); +vuint16mf2_t test_vwsubu_wx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u16mf2_m(vm, vs2, rs1, vl); } -vuint16m1_t test_vwsubu_vv_u16m1_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwsubu_vv_u16m1_m(mask, op1, op2, vl); +vuint16m1_t test_vwsubu_vv_u16m1_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vwsubu_vx_u16m1_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_u16m1_m(mask, op1, op2, vl); +vuint16m1_t test_vwsubu_vx_u16m1_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u16m1_m(vm, vs2, rs1, vl); } -vuint16m1_t test_vwsubu_wv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwsubu_wv_u16m1_m(mask, op1, op2, vl); +vuint16m1_t test_vwsubu_wv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vwsubu_wx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_u16m1_m(mask, op1, op2, vl); +vuint16m1_t test_vwsubu_wx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u16m1_m(vm, vs2, rs1, vl); } -vuint16m2_t test_vwsubu_vv_u16m2_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwsubu_vv_u16m2_m(mask, op1, op2, vl); +vuint16m2_t test_vwsubu_vv_u16m2_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u16m2_m(vm, vs2, vs1, vl); } -vuint16m2_t test_vwsubu_vx_u16m2_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_u16m2_m(mask, op1, op2, vl); +vuint16m2_t test_vwsubu_vx_u16m2_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u16m2_m(vm, vs2, rs1, vl); } -vuint16m2_t test_vwsubu_wv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwsubu_wv_u16m2_m(mask, op1, op2, vl); +vuint16m2_t test_vwsubu_wv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u16m2_m(vm, vs2, vs1, vl); } -vuint16m2_t test_vwsubu_wx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_u16m2_m(mask, op1, op2, vl); +vuint16m2_t test_vwsubu_wx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u16m2_m(vm, vs2, rs1, vl); } -vuint16m4_t test_vwsubu_vv_u16m4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwsubu_vv_u16m4_m(mask, op1, op2, vl); +vuint16m4_t test_vwsubu_vv_u16m4_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u16m4_m(vm, vs2, vs1, vl); } -vuint16m4_t test_vwsubu_vx_u16m4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_u16m4_m(mask, op1, op2, vl); +vuint16m4_t test_vwsubu_vx_u16m4_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u16m4_m(vm, vs2, rs1, vl); } -vuint16m4_t test_vwsubu_wv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwsubu_wv_u16m4_m(mask, op1, op2, vl); +vuint16m4_t test_vwsubu_wv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u16m4_m(vm, vs2, vs1, vl); } -vuint16m4_t test_vwsubu_wx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_u16m4_m(mask, op1, op2, vl); +vuint16m4_t test_vwsubu_wx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u16m4_m(vm, vs2, rs1, vl); } -vuint16m8_t test_vwsubu_vv_u16m8_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwsubu_vv_u16m8_m(mask, op1, op2, vl); +vuint16m8_t test_vwsubu_vv_u16m8_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u16m8_m(vm, vs2, vs1, vl); } -vuint16m8_t test_vwsubu_vx_u16m8_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_u16m8_m(mask, op1, op2, vl); +vuint16m8_t test_vwsubu_vx_u16m8_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u16m8_m(vm, vs2, rs1, vl); } -vuint16m8_t test_vwsubu_wv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwsubu_wv_u16m8_m(mask, op1, op2, vl); +vuint16m8_t test_vwsubu_wv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u16m8_m(vm, vs2, vs1, vl); } -vuint16m8_t test_vwsubu_wx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_u16m8_m(mask, op1, op2, vl); +vuint16m8_t test_vwsubu_wx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u16m8_m(vm, vs2, rs1, vl); } -vuint32mf2_t test_vwsubu_vv_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwsubu_vv_u32mf2_m(mask, op1, op2, vl); +vuint32mf2_t test_vwsubu_vv_u32mf2_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u32mf2_m(vm, vs2, vs1, vl); } -vuint32mf2_t test_vwsubu_vx_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_u32mf2_m(mask, op1, op2, vl); +vuint32mf2_t test_vwsubu_vx_u32mf2_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u32mf2_m(vm, vs2, rs1, vl); } -vuint32mf2_t test_vwsubu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwsubu_wv_u32mf2_m(mask, op1, op2, vl); +vuint32mf2_t test_vwsubu_wv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u32mf2_m(vm, vs2, vs1, vl); } -vuint32mf2_t test_vwsubu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_u32mf2_m(mask, op1, op2, vl); +vuint32mf2_t test_vwsubu_wx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u32mf2_m(vm, vs2, rs1, vl); } -vuint32m1_t test_vwsubu_vv_u32m1_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwsubu_vv_u32m1_m(mask, op1, op2, vl); +vuint32m1_t test_vwsubu_vv_u32m1_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vwsubu_vx_u32m1_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_u32m1_m(mask, op1, op2, vl); +vuint32m1_t test_vwsubu_vx_u32m1_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u32m1_m(vm, vs2, rs1, vl); } -vuint32m1_t test_vwsubu_wv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwsubu_wv_u32m1_m(mask, op1, op2, vl); +vuint32m1_t test_vwsubu_wv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vwsubu_wx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_u32m1_m(mask, op1, op2, vl); +vuint32m1_t test_vwsubu_wx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u32m1_m(vm, vs2, rs1, vl); } -vuint32m2_t test_vwsubu_vv_u32m2_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwsubu_vv_u32m2_m(mask, op1, op2, vl); +vuint32m2_t test_vwsubu_vv_u32m2_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u32m2_m(vm, vs2, vs1, vl); } -vuint32m2_t test_vwsubu_vx_u32m2_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_u32m2_m(mask, op1, op2, vl); +vuint32m2_t test_vwsubu_vx_u32m2_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u32m2_m(vm, vs2, rs1, vl); } -vuint32m2_t test_vwsubu_wv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwsubu_wv_u32m2_m(mask, op1, op2, vl); +vuint32m2_t test_vwsubu_wv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u32m2_m(vm, vs2, vs1, vl); } -vuint32m2_t test_vwsubu_wx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_u32m2_m(mask, op1, op2, vl); +vuint32m2_t test_vwsubu_wx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u32m2_m(vm, vs2, rs1, vl); } -vuint32m4_t test_vwsubu_vv_u32m4_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwsubu_vv_u32m4_m(mask, op1, op2, vl); +vuint32m4_t test_vwsubu_vv_u32m4_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u32m4_m(vm, vs2, vs1, vl); } -vuint32m4_t test_vwsubu_vx_u32m4_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_u32m4_m(mask, op1, op2, vl); +vuint32m4_t test_vwsubu_vx_u32m4_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u32m4_m(vm, vs2, rs1, vl); } -vuint32m4_t test_vwsubu_wv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwsubu_wv_u32m4_m(mask, op1, op2, vl); +vuint32m4_t test_vwsubu_wv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u32m4_m(vm, vs2, vs1, vl); } -vuint32m4_t test_vwsubu_wx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_u32m4_m(mask, op1, op2, vl); +vuint32m4_t test_vwsubu_wx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u32m4_m(vm, vs2, rs1, vl); } -vuint32m8_t test_vwsubu_vv_u32m8_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwsubu_vv_u32m8_m(mask, op1, op2, vl); +vuint32m8_t test_vwsubu_vv_u32m8_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u32m8_m(vm, vs2, vs1, vl); } -vuint32m8_t test_vwsubu_vx_u32m8_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_u32m8_m(mask, op1, op2, vl); +vuint32m8_t test_vwsubu_vx_u32m8_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u32m8_m(vm, vs2, rs1, vl); } -vuint32m8_t test_vwsubu_wv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwsubu_wv_u32m8_m(mask, op1, op2, vl); +vuint32m8_t test_vwsubu_wv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u32m8_m(vm, vs2, vs1, vl); } -vuint32m8_t test_vwsubu_wx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_u32m8_m(mask, op1, op2, vl); +vuint32m8_t test_vwsubu_wx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u32m8_m(vm, vs2, rs1, vl); } -vuint64m1_t test_vwsubu_vv_u64m1_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwsubu_vv_u64m1_m(mask, op1, op2, vl); +vuint64m1_t test_vwsubu_vv_u64m1_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vwsubu_vx_u64m1_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx_u64m1_m(mask, op1, op2, vl); +vuint64m1_t test_vwsubu_vx_u64m1_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u64m1_m(vm, vs2, rs1, vl); } -vuint64m1_t test_vwsubu_wv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwsubu_wv_u64m1_m(mask, op1, op2, vl); +vuint64m1_t test_vwsubu_wv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vwsubu_wx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx_u64m1_m(mask, op1, op2, vl); +vuint64m1_t test_vwsubu_wx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u64m1_m(vm, vs2, rs1, vl); } -vuint64m2_t test_vwsubu_vv_u64m2_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwsubu_vv_u64m2_m(mask, op1, op2, vl); +vuint64m2_t test_vwsubu_vv_u64m2_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u64m2_m(vm, vs2, vs1, vl); } -vuint64m2_t test_vwsubu_vx_u64m2_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx_u64m2_m(mask, op1, op2, vl); +vuint64m2_t test_vwsubu_vx_u64m2_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u64m2_m(vm, vs2, rs1, vl); } -vuint64m2_t test_vwsubu_wv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwsubu_wv_u64m2_m(mask, op1, op2, vl); +vuint64m2_t test_vwsubu_wv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u64m2_m(vm, vs2, vs1, vl); } -vuint64m2_t test_vwsubu_wx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx_u64m2_m(mask, op1, op2, vl); +vuint64m2_t test_vwsubu_wx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u64m2_m(vm, vs2, rs1, vl); } -vuint64m4_t test_vwsubu_vv_u64m4_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwsubu_vv_u64m4_m(mask, op1, op2, vl); +vuint64m4_t test_vwsubu_vv_u64m4_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u64m4_m(vm, vs2, vs1, vl); } -vuint64m4_t test_vwsubu_vx_u64m4_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx_u64m4_m(mask, op1, op2, vl); +vuint64m4_t test_vwsubu_vx_u64m4_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u64m4_m(vm, vs2, rs1, vl); } -vuint64m4_t test_vwsubu_wv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwsubu_wv_u64m4_m(mask, op1, op2, vl); +vuint64m4_t test_vwsubu_wv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u64m4_m(vm, vs2, vs1, vl); } -vuint64m4_t test_vwsubu_wx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx_u64m4_m(mask, op1, op2, vl); +vuint64m4_t test_vwsubu_wx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u64m4_m(vm, vs2, rs1, vl); } -vuint64m8_t test_vwsubu_vv_u64m8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwsubu_vv_u64m8_m(mask, op1, op2, vl); +vuint64m8_t test_vwsubu_vv_u64m8_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u64m8_m(vm, vs2, vs1, vl); } -vuint64m8_t test_vwsubu_vx_u64m8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx_u64m8_m(mask, op1, op2, vl); +vuint64m8_t test_vwsubu_vx_u64m8_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u64m8_m(vm, vs2, rs1, vl); } -vuint64m8_t test_vwsubu_wv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwsubu_wv_u64m8_m(mask, op1, op2, vl); +vuint64m8_t test_vwsubu_wv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u64m8_m(vm, vs2, vs1, vl); } -vuint64m8_t test_vwsubu_wx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx_u64m8_m(mask, op1, op2, vl); +vuint64m8_t test_vwsubu_wx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u64m8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+v[w]?sub[u]?\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/gnu-api-tests/vxor.c b/auto-generated/gnu-api-tests/vxor.c index 6dc7b2ba7..70daa25de 100644 --- a/auto-generated/gnu-api-tests/vxor.c +++ b/auto-generated/gnu-api-tests/vxor.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vxor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vxor_vv_i8mf8(op1, op2, vl); +vint8mf8_t test_vxor_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vxor_vv_i8mf8(vs2, vs1, vl); } -vint8mf8_t test_vxor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_vx_i8mf8(op1, op2, vl); +vint8mf8_t test_vxor_vx_i8mf8(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_vx_i8mf8(vs2, rs1, vl); } -vint8mf4_t test_vxor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vxor_vv_i8mf4(op1, op2, vl); +vint8mf4_t test_vxor_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vxor_vv_i8mf4(vs2, vs1, vl); } -vint8mf4_t test_vxor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_vx_i8mf4(op1, op2, vl); +vint8mf4_t test_vxor_vx_i8mf4(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_vx_i8mf4(vs2, rs1, vl); } -vint8mf2_t test_vxor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vxor_vv_i8mf2(op1, op2, vl); +vint8mf2_t test_vxor_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vxor_vv_i8mf2(vs2, vs1, vl); } -vint8mf2_t test_vxor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_vx_i8mf2(op1, op2, vl); +vint8mf2_t test_vxor_vx_i8mf2(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_vx_i8mf2(vs2, rs1, vl); } -vint8m1_t test_vxor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vxor_vv_i8m1(op1, op2, vl); +vint8m1_t test_vxor_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vxor_vv_i8m1(vs2, vs1, vl); } -vint8m1_t test_vxor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_vx_i8m1(op1, op2, vl); +vint8m1_t test_vxor_vx_i8m1(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_vx_i8m1(vs2, rs1, vl); } -vint8m2_t test_vxor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vxor_vv_i8m2(op1, op2, vl); +vint8m2_t test_vxor_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vxor_vv_i8m2(vs2, vs1, vl); } -vint8m2_t test_vxor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_vx_i8m2(op1, op2, vl); +vint8m2_t test_vxor_vx_i8m2(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_vx_i8m2(vs2, rs1, vl); } -vint8m4_t test_vxor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vxor_vv_i8m4(op1, op2, vl); +vint8m4_t test_vxor_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vxor_vv_i8m4(vs2, vs1, vl); } -vint8m4_t test_vxor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_vx_i8m4(op1, op2, vl); +vint8m4_t test_vxor_vx_i8m4(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_vx_i8m4(vs2, rs1, vl); } -vint8m8_t test_vxor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vxor_vv_i8m8(op1, op2, vl); +vint8m8_t test_vxor_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vxor_vv_i8m8(vs2, vs1, vl); } -vint8m8_t test_vxor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_vx_i8m8(op1, op2, vl); +vint8m8_t test_vxor_vx_i8m8(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_vx_i8m8(vs2, rs1, vl); } -vint16mf4_t test_vxor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vxor_vv_i16mf4(op1, op2, vl); +vint16mf4_t test_vxor_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vxor_vv_i16mf4(vs2, vs1, vl); } -vint16mf4_t test_vxor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_vx_i16mf4(op1, op2, vl); +vint16mf4_t test_vxor_vx_i16mf4(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_vx_i16mf4(vs2, rs1, vl); } -vint16mf2_t test_vxor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vxor_vv_i16mf2(op1, op2, vl); +vint16mf2_t test_vxor_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vxor_vv_i16mf2(vs2, vs1, vl); } -vint16mf2_t test_vxor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_vx_i16mf2(op1, op2, vl); +vint16mf2_t test_vxor_vx_i16mf2(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_vx_i16mf2(vs2, rs1, vl); } -vint16m1_t test_vxor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vxor_vv_i16m1(op1, op2, vl); +vint16m1_t test_vxor_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vxor_vv_i16m1(vs2, vs1, vl); } -vint16m1_t test_vxor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_vx_i16m1(op1, op2, vl); +vint16m1_t test_vxor_vx_i16m1(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_vx_i16m1(vs2, rs1, vl); } -vint16m2_t test_vxor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vxor_vv_i16m2(op1, op2, vl); +vint16m2_t test_vxor_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vxor_vv_i16m2(vs2, vs1, vl); } -vint16m2_t test_vxor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_vx_i16m2(op1, op2, vl); +vint16m2_t test_vxor_vx_i16m2(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_vx_i16m2(vs2, rs1, vl); } -vint16m4_t test_vxor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vxor_vv_i16m4(op1, op2, vl); +vint16m4_t test_vxor_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vxor_vv_i16m4(vs2, vs1, vl); } -vint16m4_t test_vxor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_vx_i16m4(op1, op2, vl); +vint16m4_t test_vxor_vx_i16m4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_vx_i16m4(vs2, rs1, vl); } -vint16m8_t test_vxor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vxor_vv_i16m8(op1, op2, vl); +vint16m8_t test_vxor_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vxor_vv_i16m8(vs2, vs1, vl); } -vint16m8_t test_vxor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_vx_i16m8(op1, op2, vl); +vint16m8_t test_vxor_vx_i16m8(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_vx_i16m8(vs2, rs1, vl); } -vint32mf2_t test_vxor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vxor_vv_i32mf2(op1, op2, vl); +vint32mf2_t test_vxor_vv_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vxor_vv_i32mf2(vs2, vs1, vl); } -vint32mf2_t test_vxor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_vx_i32mf2(op1, op2, vl); +vint32mf2_t test_vxor_vx_i32mf2(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_vx_i32mf2(vs2, rs1, vl); } -vint32m1_t test_vxor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vxor_vv_i32m1(op1, op2, vl); +vint32m1_t test_vxor_vv_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vxor_vv_i32m1(vs2, vs1, vl); } -vint32m1_t test_vxor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_vx_i32m1(op1, op2, vl); +vint32m1_t test_vxor_vx_i32m1(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_vx_i32m1(vs2, rs1, vl); } -vint32m2_t test_vxor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vxor_vv_i32m2(op1, op2, vl); +vint32m2_t test_vxor_vv_i32m2(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vxor_vv_i32m2(vs2, vs1, vl); } -vint32m2_t test_vxor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_vx_i32m2(op1, op2, vl); +vint32m2_t test_vxor_vx_i32m2(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_vx_i32m2(vs2, rs1, vl); } -vint32m4_t test_vxor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vxor_vv_i32m4(op1, op2, vl); +vint32m4_t test_vxor_vv_i32m4(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vxor_vv_i32m4(vs2, vs1, vl); } -vint32m4_t test_vxor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_vx_i32m4(op1, op2, vl); +vint32m4_t test_vxor_vx_i32m4(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_vx_i32m4(vs2, rs1, vl); } -vint32m8_t test_vxor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vxor_vv_i32m8(op1, op2, vl); +vint32m8_t test_vxor_vv_i32m8(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vxor_vv_i32m8(vs2, vs1, vl); } -vint32m8_t test_vxor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_vx_i32m8(op1, op2, vl); +vint32m8_t test_vxor_vx_i32m8(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_vx_i32m8(vs2, rs1, vl); } -vint64m1_t test_vxor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vxor_vv_i64m1(op1, op2, vl); +vint64m1_t test_vxor_vv_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vxor_vv_i64m1(vs2, vs1, vl); } -vint64m1_t test_vxor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vxor_vx_i64m1(op1, op2, vl); +vint64m1_t test_vxor_vx_i64m1(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor_vx_i64m1(vs2, rs1, vl); } -vint64m2_t test_vxor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vxor_vv_i64m2(op1, op2, vl); +vint64m2_t test_vxor_vv_i64m2(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vxor_vv_i64m2(vs2, vs1, vl); } -vint64m2_t test_vxor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vxor_vx_i64m2(op1, op2, vl); +vint64m2_t test_vxor_vx_i64m2(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor_vx_i64m2(vs2, rs1, vl); } -vint64m4_t test_vxor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vxor_vv_i64m4(op1, op2, vl); +vint64m4_t test_vxor_vv_i64m4(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vxor_vv_i64m4(vs2, vs1, vl); } -vint64m4_t test_vxor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vxor_vx_i64m4(op1, op2, vl); +vint64m4_t test_vxor_vx_i64m4(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor_vx_i64m4(vs2, rs1, vl); } -vint64m8_t test_vxor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vxor_vv_i64m8(op1, op2, vl); +vint64m8_t test_vxor_vv_i64m8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vxor_vv_i64m8(vs2, vs1, vl); } -vint64m8_t test_vxor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vxor_vx_i64m8(op1, op2, vl); +vint64m8_t test_vxor_vx_i64m8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor_vx_i64m8(vs2, rs1, vl); } -vuint8mf8_t test_vxor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vxor_vv_u8mf8(op1, op2, vl); +vuint8mf8_t test_vxor_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vxor_vv_u8mf8(vs2, vs1, vl); } -vuint8mf8_t test_vxor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_vx_u8mf8(op1, op2, vl); +vuint8mf8_t test_vxor_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_vx_u8mf8(vs2, rs1, vl); } -vuint8mf4_t test_vxor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vxor_vv_u8mf4(op1, op2, vl); +vuint8mf4_t test_vxor_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vxor_vv_u8mf4(vs2, vs1, vl); } -vuint8mf4_t test_vxor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_vx_u8mf4(op1, op2, vl); +vuint8mf4_t test_vxor_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_vx_u8mf4(vs2, rs1, vl); } -vuint8mf2_t test_vxor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vxor_vv_u8mf2(op1, op2, vl); +vuint8mf2_t test_vxor_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vxor_vv_u8mf2(vs2, vs1, vl); } -vuint8mf2_t test_vxor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_vx_u8mf2(op1, op2, vl); +vuint8mf2_t test_vxor_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_vx_u8mf2(vs2, rs1, vl); } -vuint8m1_t test_vxor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vxor_vv_u8m1(op1, op2, vl); +vuint8m1_t test_vxor_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vxor_vv_u8m1(vs2, vs1, vl); } -vuint8m1_t test_vxor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_vx_u8m1(op1, op2, vl); +vuint8m1_t test_vxor_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_vx_u8m1(vs2, rs1, vl); } -vuint8m2_t test_vxor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vxor_vv_u8m2(op1, op2, vl); +vuint8m2_t test_vxor_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vxor_vv_u8m2(vs2, vs1, vl); } -vuint8m2_t test_vxor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_vx_u8m2(op1, op2, vl); +vuint8m2_t test_vxor_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_vx_u8m2(vs2, rs1, vl); } -vuint8m4_t test_vxor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vxor_vv_u8m4(op1, op2, vl); +vuint8m4_t test_vxor_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vxor_vv_u8m4(vs2, vs1, vl); } -vuint8m4_t test_vxor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_vx_u8m4(op1, op2, vl); +vuint8m4_t test_vxor_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_vx_u8m4(vs2, rs1, vl); } -vuint8m8_t test_vxor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vxor_vv_u8m8(op1, op2, vl); +vuint8m8_t test_vxor_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vxor_vv_u8m8(vs2, vs1, vl); } -vuint8m8_t test_vxor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_vx_u8m8(op1, op2, vl); +vuint8m8_t test_vxor_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_vx_u8m8(vs2, rs1, vl); } -vuint16mf4_t test_vxor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vxor_vv_u16mf4(op1, op2, vl); +vuint16mf4_t test_vxor_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vxor_vv_u16mf4(vs2, vs1, vl); } -vuint16mf4_t test_vxor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_vx_u16mf4(op1, op2, vl); +vuint16mf4_t test_vxor_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_vx_u16mf4(vs2, rs1, vl); } -vuint16mf2_t test_vxor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vxor_vv_u16mf2(op1, op2, vl); +vuint16mf2_t test_vxor_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vxor_vv_u16mf2(vs2, vs1, vl); } -vuint16mf2_t test_vxor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_vx_u16mf2(op1, op2, vl); +vuint16mf2_t test_vxor_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_vx_u16mf2(vs2, rs1, vl); } -vuint16m1_t test_vxor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vxor_vv_u16m1(op1, op2, vl); +vuint16m1_t test_vxor_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vxor_vv_u16m1(vs2, vs1, vl); } -vuint16m1_t test_vxor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_vx_u16m1(op1, op2, vl); +vuint16m1_t test_vxor_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_vx_u16m1(vs2, rs1, vl); } -vuint16m2_t test_vxor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vxor_vv_u16m2(op1, op2, vl); +vuint16m2_t test_vxor_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vxor_vv_u16m2(vs2, vs1, vl); } -vuint16m2_t test_vxor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_vx_u16m2(op1, op2, vl); +vuint16m2_t test_vxor_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_vx_u16m2(vs2, rs1, vl); } -vuint16m4_t test_vxor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vxor_vv_u16m4(op1, op2, vl); +vuint16m4_t test_vxor_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vxor_vv_u16m4(vs2, vs1, vl); } -vuint16m4_t test_vxor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_vx_u16m4(op1, op2, vl); +vuint16m4_t test_vxor_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_vx_u16m4(vs2, rs1, vl); } -vuint16m8_t test_vxor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vxor_vv_u16m8(op1, op2, vl); +vuint16m8_t test_vxor_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vxor_vv_u16m8(vs2, vs1, vl); } -vuint16m8_t test_vxor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_vx_u16m8(op1, op2, vl); +vuint16m8_t test_vxor_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_vx_u16m8(vs2, rs1, vl); } -vuint32mf2_t test_vxor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vxor_vv_u32mf2(op1, op2, vl); +vuint32mf2_t test_vxor_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vxor_vv_u32mf2(vs2, vs1, vl); } -vuint32mf2_t test_vxor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_vx_u32mf2(op1, op2, vl); +vuint32mf2_t test_vxor_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_vx_u32mf2(vs2, rs1, vl); } -vuint32m1_t test_vxor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vxor_vv_u32m1(op1, op2, vl); +vuint32m1_t test_vxor_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vxor_vv_u32m1(vs2, vs1, vl); } -vuint32m1_t test_vxor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_vx_u32m1(op1, op2, vl); +vuint32m1_t test_vxor_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_vx_u32m1(vs2, rs1, vl); } -vuint32m2_t test_vxor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vxor_vv_u32m2(op1, op2, vl); +vuint32m2_t test_vxor_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vxor_vv_u32m2(vs2, vs1, vl); } -vuint32m2_t test_vxor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_vx_u32m2(op1, op2, vl); +vuint32m2_t test_vxor_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_vx_u32m2(vs2, rs1, vl); } -vuint32m4_t test_vxor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vxor_vv_u32m4(op1, op2, vl); +vuint32m4_t test_vxor_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vxor_vv_u32m4(vs2, vs1, vl); } -vuint32m4_t test_vxor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_vx_u32m4(op1, op2, vl); +vuint32m4_t test_vxor_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_vx_u32m4(vs2, rs1, vl); } -vuint32m8_t test_vxor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vxor_vv_u32m8(op1, op2, vl); +vuint32m8_t test_vxor_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vxor_vv_u32m8(vs2, vs1, vl); } -vuint32m8_t test_vxor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_vx_u32m8(op1, op2, vl); +vuint32m8_t test_vxor_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_vx_u32m8(vs2, rs1, vl); } -vuint64m1_t test_vxor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vxor_vv_u64m1(op1, op2, vl); +vuint64m1_t test_vxor_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vxor_vv_u64m1(vs2, vs1, vl); } -vuint64m1_t test_vxor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor_vx_u64m1(op1, op2, vl); +vuint64m1_t test_vxor_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor_vx_u64m1(vs2, rs1, vl); } -vuint64m2_t test_vxor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vxor_vv_u64m2(op1, op2, vl); +vuint64m2_t test_vxor_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vxor_vv_u64m2(vs2, vs1, vl); } -vuint64m2_t test_vxor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor_vx_u64m2(op1, op2, vl); +vuint64m2_t test_vxor_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor_vx_u64m2(vs2, rs1, vl); } -vuint64m4_t test_vxor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vxor_vv_u64m4(op1, op2, vl); +vuint64m4_t test_vxor_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vxor_vv_u64m4(vs2, vs1, vl); } -vuint64m4_t test_vxor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor_vx_u64m4(op1, op2, vl); +vuint64m4_t test_vxor_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor_vx_u64m4(vs2, rs1, vl); } -vuint64m8_t test_vxor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vxor_vv_u64m8(op1, op2, vl); +vuint64m8_t test_vxor_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vxor_vv_u64m8(vs2, vs1, vl); } -vuint64m8_t test_vxor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor_vx_u64m8(op1, op2, vl); +vuint64m8_t test_vxor_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor_vx_u64m8(vs2, rs1, vl); } -vint8mf8_t test_vxor_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vxor_vv_i8mf8_m(mask, op1, op2, vl); +vint8mf8_t test_vxor_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vxor_vv_i8mf8_m(vm, vs2, vs1, vl); } -vint8mf8_t test_vxor_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_vx_i8mf8_m(mask, op1, op2, vl); +vint8mf8_t test_vxor_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_vx_i8mf8_m(vm, vs2, rs1, vl); } -vint8mf4_t test_vxor_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vxor_vv_i8mf4_m(mask, op1, op2, vl); +vint8mf4_t test_vxor_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vxor_vv_i8mf4_m(vm, vs2, vs1, vl); } -vint8mf4_t test_vxor_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_vx_i8mf4_m(mask, op1, op2, vl); +vint8mf4_t test_vxor_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_vx_i8mf4_m(vm, vs2, rs1, vl); } -vint8mf2_t test_vxor_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vxor_vv_i8mf2_m(mask, op1, op2, vl); +vint8mf2_t test_vxor_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vxor_vv_i8mf2_m(vm, vs2, vs1, vl); } -vint8mf2_t test_vxor_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_vx_i8mf2_m(mask, op1, op2, vl); +vint8mf2_t test_vxor_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_vx_i8mf2_m(vm, vs2, rs1, vl); } -vint8m1_t test_vxor_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vxor_vv_i8m1_m(mask, op1, op2, vl); +vint8m1_t test_vxor_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vxor_vv_i8m1_m(vm, vs2, vs1, vl); } -vint8m1_t test_vxor_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_vx_i8m1_m(mask, op1, op2, vl); +vint8m1_t test_vxor_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_vx_i8m1_m(vm, vs2, rs1, vl); } -vint8m2_t test_vxor_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vxor_vv_i8m2_m(mask, op1, op2, vl); +vint8m2_t test_vxor_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vxor_vv_i8m2_m(vm, vs2, vs1, vl); } -vint8m2_t test_vxor_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_vx_i8m2_m(mask, op1, op2, vl); +vint8m2_t test_vxor_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_vx_i8m2_m(vm, vs2, rs1, vl); } -vint8m4_t test_vxor_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vxor_vv_i8m4_m(mask, op1, op2, vl); +vint8m4_t test_vxor_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vxor_vv_i8m4_m(vm, vs2, vs1, vl); } -vint8m4_t test_vxor_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_vx_i8m4_m(mask, op1, op2, vl); +vint8m4_t test_vxor_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_vx_i8m4_m(vm, vs2, rs1, vl); } -vint8m8_t test_vxor_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vxor_vv_i8m8_m(mask, op1, op2, vl); +vint8m8_t test_vxor_vv_i8m8_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vxor_vv_i8m8_m(vm, vs2, vs1, vl); } -vint8m8_t test_vxor_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_vx_i8m8_m(mask, op1, op2, vl); +vint8m8_t test_vxor_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_vx_i8m8_m(vm, vs2, rs1, vl); } -vint16mf4_t test_vxor_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vxor_vv_i16mf4_m(mask, op1, op2, vl); +vint16mf4_t test_vxor_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vxor_vv_i16mf4_m(vm, vs2, vs1, vl); } -vint16mf4_t test_vxor_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_vx_i16mf4_m(mask, op1, op2, vl); +vint16mf4_t test_vxor_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_vx_i16mf4_m(vm, vs2, rs1, vl); } -vint16mf2_t test_vxor_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vxor_vv_i16mf2_m(mask, op1, op2, vl); +vint16mf2_t test_vxor_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vxor_vv_i16mf2_m(vm, vs2, vs1, vl); } -vint16mf2_t test_vxor_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_vx_i16mf2_m(mask, op1, op2, vl); +vint16mf2_t test_vxor_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_vx_i16mf2_m(vm, vs2, rs1, vl); } -vint16m1_t test_vxor_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vxor_vv_i16m1_m(mask, op1, op2, vl); +vint16m1_t test_vxor_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vxor_vv_i16m1_m(vm, vs2, vs1, vl); } -vint16m1_t test_vxor_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_vx_i16m1_m(mask, op1, op2, vl); +vint16m1_t test_vxor_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_vx_i16m1_m(vm, vs2, rs1, vl); } -vint16m2_t test_vxor_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vxor_vv_i16m2_m(mask, op1, op2, vl); +vint16m2_t test_vxor_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vxor_vv_i16m2_m(vm, vs2, vs1, vl); } -vint16m2_t test_vxor_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_vx_i16m2_m(mask, op1, op2, vl); +vint16m2_t test_vxor_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_vx_i16m2_m(vm, vs2, rs1, vl); } -vint16m4_t test_vxor_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vxor_vv_i16m4_m(mask, op1, op2, vl); +vint16m4_t test_vxor_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vxor_vv_i16m4_m(vm, vs2, vs1, vl); } -vint16m4_t test_vxor_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_vx_i16m4_m(mask, op1, op2, vl); +vint16m4_t test_vxor_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_vx_i16m4_m(vm, vs2, rs1, vl); } -vint16m8_t test_vxor_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vxor_vv_i16m8_m(mask, op1, op2, vl); +vint16m8_t test_vxor_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vxor_vv_i16m8_m(vm, vs2, vs1, vl); } -vint16m8_t test_vxor_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_vx_i16m8_m(mask, op1, op2, vl); +vint16m8_t test_vxor_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_vx_i16m8_m(vm, vs2, rs1, vl); } -vint32mf2_t test_vxor_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vxor_vv_i32mf2_m(mask, op1, op2, vl); +vint32mf2_t test_vxor_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vxor_vv_i32mf2_m(vm, vs2, vs1, vl); } -vint32mf2_t test_vxor_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_vx_i32mf2_m(mask, op1, op2, vl); +vint32mf2_t test_vxor_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_vx_i32mf2_m(vm, vs2, rs1, vl); } -vint32m1_t test_vxor_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vxor_vv_i32m1_m(mask, op1, op2, vl); +vint32m1_t test_vxor_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vxor_vv_i32m1_m(vm, vs2, vs1, vl); } -vint32m1_t test_vxor_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_vx_i32m1_m(mask, op1, op2, vl); +vint32m1_t test_vxor_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_vx_i32m1_m(vm, vs2, rs1, vl); } -vint32m2_t test_vxor_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vxor_vv_i32m2_m(mask, op1, op2, vl); +vint32m2_t test_vxor_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vxor_vv_i32m2_m(vm, vs2, vs1, vl); } -vint32m2_t test_vxor_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_vx_i32m2_m(mask, op1, op2, vl); +vint32m2_t test_vxor_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_vx_i32m2_m(vm, vs2, rs1, vl); } -vint32m4_t test_vxor_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vxor_vv_i32m4_m(mask, op1, op2, vl); +vint32m4_t test_vxor_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vxor_vv_i32m4_m(vm, vs2, vs1, vl); } -vint32m4_t test_vxor_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_vx_i32m4_m(mask, op1, op2, vl); +vint32m4_t test_vxor_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_vx_i32m4_m(vm, vs2, rs1, vl); } -vint32m8_t test_vxor_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vxor_vv_i32m8_m(mask, op1, op2, vl); +vint32m8_t test_vxor_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vxor_vv_i32m8_m(vm, vs2, vs1, vl); } -vint32m8_t test_vxor_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_vx_i32m8_m(mask, op1, op2, vl); +vint32m8_t test_vxor_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_vx_i32m8_m(vm, vs2, rs1, vl); } -vint64m1_t test_vxor_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vxor_vv_i64m1_m(mask, op1, op2, vl); +vint64m1_t test_vxor_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vxor_vv_i64m1_m(vm, vs2, vs1, vl); } -vint64m1_t test_vxor_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vxor_vx_i64m1_m(mask, op1, op2, vl); +vint64m1_t test_vxor_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor_vx_i64m1_m(vm, vs2, rs1, vl); } -vint64m2_t test_vxor_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vxor_vv_i64m2_m(mask, op1, op2, vl); +vint64m2_t test_vxor_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vxor_vv_i64m2_m(vm, vs2, vs1, vl); } -vint64m2_t test_vxor_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vxor_vx_i64m2_m(mask, op1, op2, vl); +vint64m2_t test_vxor_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor_vx_i64m2_m(vm, vs2, rs1, vl); } -vint64m4_t test_vxor_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vxor_vv_i64m4_m(mask, op1, op2, vl); +vint64m4_t test_vxor_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vxor_vv_i64m4_m(vm, vs2, vs1, vl); } -vint64m4_t test_vxor_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vxor_vx_i64m4_m(mask, op1, op2, vl); +vint64m4_t test_vxor_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor_vx_i64m4_m(vm, vs2, rs1, vl); } -vint64m8_t test_vxor_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vxor_vv_i64m8_m(mask, op1, op2, vl); +vint64m8_t test_vxor_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vxor_vv_i64m8_m(vm, vs2, vs1, vl); } -vint64m8_t test_vxor_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vxor_vx_i64m8_m(mask, op1, op2, vl); +vint64m8_t test_vxor_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor_vx_i64m8_m(vm, vs2, rs1, vl); } -vuint8mf8_t test_vxor_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vxor_vv_u8mf8_m(mask, op1, op2, vl); +vuint8mf8_t test_vxor_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vxor_vv_u8mf8_m(vm, vs2, vs1, vl); } -vuint8mf8_t test_vxor_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_vx_u8mf8_m(mask, op1, op2, vl); +vuint8mf8_t test_vxor_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_vx_u8mf8_m(vm, vs2, rs1, vl); } -vuint8mf4_t test_vxor_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vxor_vv_u8mf4_m(mask, op1, op2, vl); +vuint8mf4_t test_vxor_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vxor_vv_u8mf4_m(vm, vs2, vs1, vl); } -vuint8mf4_t test_vxor_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_vx_u8mf4_m(mask, op1, op2, vl); +vuint8mf4_t test_vxor_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_vx_u8mf4_m(vm, vs2, rs1, vl); } -vuint8mf2_t test_vxor_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vxor_vv_u8mf2_m(mask, op1, op2, vl); +vuint8mf2_t test_vxor_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vxor_vv_u8mf2_m(vm, vs2, vs1, vl); } -vuint8mf2_t test_vxor_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_vx_u8mf2_m(mask, op1, op2, vl); +vuint8mf2_t test_vxor_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_vx_u8mf2_m(vm, vs2, rs1, vl); } -vuint8m1_t test_vxor_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vxor_vv_u8m1_m(mask, op1, op2, vl); +vuint8m1_t test_vxor_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vxor_vv_u8m1_m(vm, vs2, vs1, vl); } -vuint8m1_t test_vxor_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_vx_u8m1_m(mask, op1, op2, vl); +vuint8m1_t test_vxor_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_vx_u8m1_m(vm, vs2, rs1, vl); } -vuint8m2_t test_vxor_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vxor_vv_u8m2_m(mask, op1, op2, vl); +vuint8m2_t test_vxor_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vxor_vv_u8m2_m(vm, vs2, vs1, vl); } -vuint8m2_t test_vxor_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_vx_u8m2_m(mask, op1, op2, vl); +vuint8m2_t test_vxor_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_vx_u8m2_m(vm, vs2, rs1, vl); } -vuint8m4_t test_vxor_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vxor_vv_u8m4_m(mask, op1, op2, vl); +vuint8m4_t test_vxor_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vxor_vv_u8m4_m(vm, vs2, vs1, vl); } -vuint8m4_t test_vxor_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_vx_u8m4_m(mask, op1, op2, vl); +vuint8m4_t test_vxor_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_vx_u8m4_m(vm, vs2, rs1, vl); } -vuint8m8_t test_vxor_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vxor_vv_u8m8_m(mask, op1, op2, vl); +vuint8m8_t test_vxor_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vxor_vv_u8m8_m(vm, vs2, vs1, vl); } -vuint8m8_t test_vxor_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_vx_u8m8_m(mask, op1, op2, vl); +vuint8m8_t test_vxor_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_vx_u8m8_m(vm, vs2, rs1, vl); } -vuint16mf4_t test_vxor_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vxor_vv_u16mf4_m(mask, op1, op2, vl); +vuint16mf4_t test_vxor_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vxor_vv_u16mf4_m(vm, vs2, vs1, vl); } -vuint16mf4_t test_vxor_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_vx_u16mf4_m(mask, op1, op2, vl); +vuint16mf4_t test_vxor_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_vx_u16mf4_m(vm, vs2, rs1, vl); } -vuint16mf2_t test_vxor_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vxor_vv_u16mf2_m(mask, op1, op2, vl); +vuint16mf2_t test_vxor_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vxor_vv_u16mf2_m(vm, vs2, vs1, vl); } -vuint16mf2_t test_vxor_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_vx_u16mf2_m(mask, op1, op2, vl); +vuint16mf2_t test_vxor_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_vx_u16mf2_m(vm, vs2, rs1, vl); } -vuint16m1_t test_vxor_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vxor_vv_u16m1_m(mask, op1, op2, vl); +vuint16m1_t test_vxor_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vxor_vv_u16m1_m(vm, vs2, vs1, vl); } -vuint16m1_t test_vxor_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_vx_u16m1_m(mask, op1, op2, vl); +vuint16m1_t test_vxor_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_vx_u16m1_m(vm, vs2, rs1, vl); } -vuint16m2_t test_vxor_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vxor_vv_u16m2_m(mask, op1, op2, vl); +vuint16m2_t test_vxor_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vxor_vv_u16m2_m(vm, vs2, vs1, vl); } -vuint16m2_t test_vxor_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_vx_u16m2_m(mask, op1, op2, vl); +vuint16m2_t test_vxor_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_vx_u16m2_m(vm, vs2, rs1, vl); } -vuint16m4_t test_vxor_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vxor_vv_u16m4_m(mask, op1, op2, vl); +vuint16m4_t test_vxor_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vxor_vv_u16m4_m(vm, vs2, vs1, vl); } -vuint16m4_t test_vxor_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_vx_u16m4_m(mask, op1, op2, vl); +vuint16m4_t test_vxor_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_vx_u16m4_m(vm, vs2, rs1, vl); } -vuint16m8_t test_vxor_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vxor_vv_u16m8_m(mask, op1, op2, vl); +vuint16m8_t test_vxor_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vxor_vv_u16m8_m(vm, vs2, vs1, vl); } -vuint16m8_t test_vxor_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_vx_u16m8_m(mask, op1, op2, vl); +vuint16m8_t test_vxor_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_vx_u16m8_m(vm, vs2, rs1, vl); } -vuint32mf2_t test_vxor_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vxor_vv_u32mf2_m(mask, op1, op2, vl); +vuint32mf2_t test_vxor_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vxor_vv_u32mf2_m(vm, vs2, vs1, vl); } -vuint32mf2_t test_vxor_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_vx_u32mf2_m(mask, op1, op2, vl); +vuint32mf2_t test_vxor_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_vx_u32mf2_m(vm, vs2, rs1, vl); } -vuint32m1_t test_vxor_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vxor_vv_u32m1_m(mask, op1, op2, vl); +vuint32m1_t test_vxor_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vxor_vv_u32m1_m(vm, vs2, vs1, vl); } -vuint32m1_t test_vxor_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_vx_u32m1_m(mask, op1, op2, vl); +vuint32m1_t test_vxor_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_vx_u32m1_m(vm, vs2, rs1, vl); } -vuint32m2_t test_vxor_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vxor_vv_u32m2_m(mask, op1, op2, vl); +vuint32m2_t test_vxor_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vxor_vv_u32m2_m(vm, vs2, vs1, vl); } -vuint32m2_t test_vxor_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_vx_u32m2_m(mask, op1, op2, vl); +vuint32m2_t test_vxor_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_vx_u32m2_m(vm, vs2, rs1, vl); } -vuint32m4_t test_vxor_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vxor_vv_u32m4_m(mask, op1, op2, vl); +vuint32m4_t test_vxor_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vxor_vv_u32m4_m(vm, vs2, vs1, vl); } -vuint32m4_t test_vxor_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_vx_u32m4_m(mask, op1, op2, vl); +vuint32m4_t test_vxor_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_vx_u32m4_m(vm, vs2, rs1, vl); } -vuint32m8_t test_vxor_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vxor_vv_u32m8_m(mask, op1, op2, vl); +vuint32m8_t test_vxor_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vxor_vv_u32m8_m(vm, vs2, vs1, vl); } -vuint32m8_t test_vxor_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_vx_u32m8_m(mask, op1, op2, vl); +vuint32m8_t test_vxor_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_vx_u32m8_m(vm, vs2, rs1, vl); } -vuint64m1_t test_vxor_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vxor_vv_u64m1_m(mask, op1, op2, vl); +vuint64m1_t test_vxor_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vxor_vv_u64m1_m(vm, vs2, vs1, vl); } -vuint64m1_t test_vxor_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor_vx_u64m1_m(mask, op1, op2, vl); +vuint64m1_t test_vxor_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor_vx_u64m1_m(vm, vs2, rs1, vl); } -vuint64m2_t test_vxor_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vxor_vv_u64m2_m(mask, op1, op2, vl); +vuint64m2_t test_vxor_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vxor_vv_u64m2_m(vm, vs2, vs1, vl); } -vuint64m2_t test_vxor_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor_vx_u64m2_m(mask, op1, op2, vl); +vuint64m2_t test_vxor_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor_vx_u64m2_m(vm, vs2, rs1, vl); } -vuint64m4_t test_vxor_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vxor_vv_u64m4_m(mask, op1, op2, vl); +vuint64m4_t test_vxor_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vxor_vv_u64m4_m(vm, vs2, vs1, vl); } -vuint64m4_t test_vxor_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor_vx_u64m4_m(mask, op1, op2, vl); +vuint64m4_t test_vxor_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor_vx_u64m4_m(vm, vs2, rs1, vl); } -vuint64m8_t test_vxor_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vxor_vv_u64m8_m(mask, op1, op2, vl); +vuint64m8_t test_vxor_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vxor_vv_u64m8_m(vm, vs2, vs1, vl); } -vuint64m8_t test_vxor_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor_vx_u64m8_m(mask, op1, op2, vl); +vuint64m8_t test_vxor_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor_vx_u64m8_m(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vxor\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/gnu-api-tests/vzext_vf2.c b/auto-generated/gnu-api-tests/vzext_vf2.c index 5aafdb334..b87919416 100644 --- a/auto-generated/gnu-api-tests/vzext_vf2.c +++ b/auto-generated/gnu-api-tests/vzext_vf2.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint16mf4_t test_vzext_vf2_u16mf4(vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf2_u16mf4(op1, vl); +vuint16mf4_t test_vzext_vf2_u16mf4(vuint8mf8_t vs2, size_t vl) { + return __riscv_vzext_vf2_u16mf4(vs2, vl); } -vuint16mf2_t test_vzext_vf2_u16mf2(vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf2_u16mf2(op1, vl); +vuint16mf2_t test_vzext_vf2_u16mf2(vuint8mf4_t vs2, size_t vl) { + return __riscv_vzext_vf2_u16mf2(vs2, vl); } -vuint16m1_t test_vzext_vf2_u16m1(vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf2_u16m1(op1, vl); +vuint16m1_t test_vzext_vf2_u16m1(vuint8mf2_t vs2, size_t vl) { + return __riscv_vzext_vf2_u16m1(vs2, vl); } -vuint16m2_t test_vzext_vf2_u16m2(vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf2_u16m2(op1, vl); +vuint16m2_t test_vzext_vf2_u16m2(vuint8m1_t vs2, size_t vl) { + return __riscv_vzext_vf2_u16m2(vs2, vl); } -vuint16m4_t test_vzext_vf2_u16m4(vuint8m2_t op1, size_t vl) { - return __riscv_vzext_vf2_u16m4(op1, vl); +vuint16m4_t test_vzext_vf2_u16m4(vuint8m2_t vs2, size_t vl) { + return __riscv_vzext_vf2_u16m4(vs2, vl); } -vuint16m8_t test_vzext_vf2_u16m8(vuint8m4_t op1, size_t vl) { - return __riscv_vzext_vf2_u16m8(op1, vl); +vuint16m8_t test_vzext_vf2_u16m8(vuint8m4_t vs2, size_t vl) { + return __riscv_vzext_vf2_u16m8(vs2, vl); } -vuint32mf2_t test_vzext_vf2_u32mf2(vuint16mf4_t op1, size_t vl) { - return __riscv_vzext_vf2_u32mf2(op1, vl); +vuint32mf2_t test_vzext_vf2_u32mf2(vuint16mf4_t vs2, size_t vl) { + return __riscv_vzext_vf2_u32mf2(vs2, vl); } -vuint32m1_t test_vzext_vf2_u32m1(vuint16mf2_t op1, size_t vl) { - return __riscv_vzext_vf2_u32m1(op1, vl); +vuint32m1_t test_vzext_vf2_u32m1(vuint16mf2_t vs2, size_t vl) { + return __riscv_vzext_vf2_u32m1(vs2, vl); } -vuint32m2_t test_vzext_vf2_u32m2(vuint16m1_t op1, size_t vl) { - return __riscv_vzext_vf2_u32m2(op1, vl); +vuint32m2_t test_vzext_vf2_u32m2(vuint16m1_t vs2, size_t vl) { + return __riscv_vzext_vf2_u32m2(vs2, vl); } -vuint32m4_t test_vzext_vf2_u32m4(vuint16m2_t op1, size_t vl) { - return __riscv_vzext_vf2_u32m4(op1, vl); +vuint32m4_t test_vzext_vf2_u32m4(vuint16m2_t vs2, size_t vl) { + return __riscv_vzext_vf2_u32m4(vs2, vl); } -vuint32m8_t test_vzext_vf2_u32m8(vuint16m4_t op1, size_t vl) { - return __riscv_vzext_vf2_u32m8(op1, vl); +vuint32m8_t test_vzext_vf2_u32m8(vuint16m4_t vs2, size_t vl) { + return __riscv_vzext_vf2_u32m8(vs2, vl); } -vuint64m1_t test_vzext_vf2_u64m1(vuint32mf2_t op1, size_t vl) { - return __riscv_vzext_vf2_u64m1(op1, vl); +vuint64m1_t test_vzext_vf2_u64m1(vuint32mf2_t vs2, size_t vl) { + return __riscv_vzext_vf2_u64m1(vs2, vl); } -vuint64m2_t test_vzext_vf2_u64m2(vuint32m1_t op1, size_t vl) { - return __riscv_vzext_vf2_u64m2(op1, vl); +vuint64m2_t test_vzext_vf2_u64m2(vuint32m1_t vs2, size_t vl) { + return __riscv_vzext_vf2_u64m2(vs2, vl); } -vuint64m4_t test_vzext_vf2_u64m4(vuint32m2_t op1, size_t vl) { - return __riscv_vzext_vf2_u64m4(op1, vl); +vuint64m4_t test_vzext_vf2_u64m4(vuint32m2_t vs2, size_t vl) { + return __riscv_vzext_vf2_u64m4(vs2, vl); } -vuint64m8_t test_vzext_vf2_u64m8(vuint32m4_t op1, size_t vl) { - return __riscv_vzext_vf2_u64m8(op1, vl); +vuint64m8_t test_vzext_vf2_u64m8(vuint32m4_t vs2, size_t vl) { + return __riscv_vzext_vf2_u64m8(vs2, vl); } -vuint16mf4_t test_vzext_vf2_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf2_u16mf4_m(mask, op1, vl); +vuint16mf4_t test_vzext_vf2_u16mf4_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vzext_vf2_u16mf4_m(vm, vs2, vl); } -vuint16mf2_t test_vzext_vf2_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf2_u16mf2_m(mask, op1, vl); +vuint16mf2_t test_vzext_vf2_u16mf2_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vzext_vf2_u16mf2_m(vm, vs2, vl); } -vuint16m1_t test_vzext_vf2_u16m1_m(vbool16_t mask, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf2_u16m1_m(mask, op1, vl); +vuint16m1_t test_vzext_vf2_u16m1_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vzext_vf2_u16m1_m(vm, vs2, vl); } -vuint16m2_t test_vzext_vf2_u16m2_m(vbool8_t mask, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf2_u16m2_m(mask, op1, vl); +vuint16m2_t test_vzext_vf2_u16m2_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vzext_vf2_u16m2_m(vm, vs2, vl); } -vuint16m4_t test_vzext_vf2_u16m4_m(vbool4_t mask, vuint8m2_t op1, size_t vl) { - return __riscv_vzext_vf2_u16m4_m(mask, op1, vl); +vuint16m4_t test_vzext_vf2_u16m4_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vzext_vf2_u16m4_m(vm, vs2, vl); } -vuint16m8_t test_vzext_vf2_u16m8_m(vbool2_t mask, vuint8m4_t op1, size_t vl) { - return __riscv_vzext_vf2_u16m8_m(mask, op1, vl); +vuint16m8_t test_vzext_vf2_u16m8_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vzext_vf2_u16m8_m(vm, vs2, vl); } -vuint32mf2_t test_vzext_vf2_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, size_t vl) { - return __riscv_vzext_vf2_u32mf2_m(mask, op1, vl); +vuint32mf2_t test_vzext_vf2_u32mf2_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vzext_vf2_u32mf2_m(vm, vs2, vl); } -vuint32m1_t test_vzext_vf2_u32m1_m(vbool32_t mask, vuint16mf2_t op1, size_t vl) { - return __riscv_vzext_vf2_u32m1_m(mask, op1, vl); +vuint32m1_t test_vzext_vf2_u32m1_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vzext_vf2_u32m1_m(vm, vs2, vl); } -vuint32m2_t test_vzext_vf2_u32m2_m(vbool16_t mask, vuint16m1_t op1, size_t vl) { - return __riscv_vzext_vf2_u32m2_m(mask, op1, vl); +vuint32m2_t test_vzext_vf2_u32m2_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vzext_vf2_u32m2_m(vm, vs2, vl); } -vuint32m4_t test_vzext_vf2_u32m4_m(vbool8_t mask, vuint16m2_t op1, size_t vl) { - return __riscv_vzext_vf2_u32m4_m(mask, op1, vl); +vuint32m4_t test_vzext_vf2_u32m4_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vzext_vf2_u32m4_m(vm, vs2, vl); } -vuint32m8_t test_vzext_vf2_u32m8_m(vbool4_t mask, vuint16m4_t op1, size_t vl) { - return __riscv_vzext_vf2_u32m8_m(mask, op1, vl); +vuint32m8_t test_vzext_vf2_u32m8_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vzext_vf2_u32m8_m(vm, vs2, vl); } -vuint64m1_t test_vzext_vf2_u64m1_m(vbool64_t mask, vuint32mf2_t op1, size_t vl) { - return __riscv_vzext_vf2_u64m1_m(mask, op1, vl); +vuint64m1_t test_vzext_vf2_u64m1_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vzext_vf2_u64m1_m(vm, vs2, vl); } -vuint64m2_t test_vzext_vf2_u64m2_m(vbool32_t mask, vuint32m1_t op1, size_t vl) { - return __riscv_vzext_vf2_u64m2_m(mask, op1, vl); +vuint64m2_t test_vzext_vf2_u64m2_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vzext_vf2_u64m2_m(vm, vs2, vl); } -vuint64m4_t test_vzext_vf2_u64m4_m(vbool16_t mask, vuint32m2_t op1, size_t vl) { - return __riscv_vzext_vf2_u64m4_m(mask, op1, vl); +vuint64m4_t test_vzext_vf2_u64m4_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vzext_vf2_u64m4_m(vm, vs2, vl); } -vuint64m8_t test_vzext_vf2_u64m8_m(vbool8_t mask, vuint32m4_t op1, size_t vl) { - return __riscv_vzext_vf2_u64m8_m(mask, op1, vl); +vuint64m8_t test_vzext_vf2_u64m8_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vzext_vf2_u64m8_m(vm, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vzext\.vf2[ivxfswum.]*\s+} 30 } } */ diff --git a/auto-generated/gnu-api-tests/vzext_vf4.c b/auto-generated/gnu-api-tests/vzext_vf4.c index 38f7c0787..b2d1329dd 100644 --- a/auto-generated/gnu-api-tests/vzext_vf4.c +++ b/auto-generated/gnu-api-tests/vzext_vf4.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint32mf2_t test_vzext_vf4_u32mf2(vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf4_u32mf2(op1, vl); +vuint32mf2_t test_vzext_vf4_u32mf2(vuint8mf8_t vs2, size_t vl) { + return __riscv_vzext_vf4_u32mf2(vs2, vl); } -vuint32m1_t test_vzext_vf4_u32m1(vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m1(op1, vl); +vuint32m1_t test_vzext_vf4_u32m1(vuint8mf4_t vs2, size_t vl) { + return __riscv_vzext_vf4_u32m1(vs2, vl); } -vuint32m2_t test_vzext_vf4_u32m2(vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m2(op1, vl); +vuint32m2_t test_vzext_vf4_u32m2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vzext_vf4_u32m2(vs2, vl); } -vuint32m4_t test_vzext_vf4_u32m4(vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m4(op1, vl); +vuint32m4_t test_vzext_vf4_u32m4(vuint8m1_t vs2, size_t vl) { + return __riscv_vzext_vf4_u32m4(vs2, vl); } -vuint32m8_t test_vzext_vf4_u32m8(vuint8m2_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m8(op1, vl); +vuint32m8_t test_vzext_vf4_u32m8(vuint8m2_t vs2, size_t vl) { + return __riscv_vzext_vf4_u32m8(vs2, vl); } -vuint64m1_t test_vzext_vf4_u64m1(vuint16mf4_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m1(op1, vl); +vuint64m1_t test_vzext_vf4_u64m1(vuint16mf4_t vs2, size_t vl) { + return __riscv_vzext_vf4_u64m1(vs2, vl); } -vuint64m2_t test_vzext_vf4_u64m2(vuint16mf2_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m2(op1, vl); +vuint64m2_t test_vzext_vf4_u64m2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vzext_vf4_u64m2(vs2, vl); } -vuint64m4_t test_vzext_vf4_u64m4(vuint16m1_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m4(op1, vl); +vuint64m4_t test_vzext_vf4_u64m4(vuint16m1_t vs2, size_t vl) { + return __riscv_vzext_vf4_u64m4(vs2, vl); } -vuint64m8_t test_vzext_vf4_u64m8(vuint16m2_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m8(op1, vl); +vuint64m8_t test_vzext_vf4_u64m8(vuint16m2_t vs2, size_t vl) { + return __riscv_vzext_vf4_u64m8(vs2, vl); } -vuint32mf2_t test_vzext_vf4_u32mf2_m(vbool64_t mask, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf4_u32mf2_m(mask, op1, vl); +vuint32mf2_t test_vzext_vf4_u32mf2_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vzext_vf4_u32mf2_m(vm, vs2, vl); } -vuint32m1_t test_vzext_vf4_u32m1_m(vbool32_t mask, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m1_m(mask, op1, vl); +vuint32m1_t test_vzext_vf4_u32m1_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vzext_vf4_u32m1_m(vm, vs2, vl); } -vuint32m2_t test_vzext_vf4_u32m2_m(vbool16_t mask, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m2_m(mask, op1, vl); +vuint32m2_t test_vzext_vf4_u32m2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vzext_vf4_u32m2_m(vm, vs2, vl); } -vuint32m4_t test_vzext_vf4_u32m4_m(vbool8_t mask, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m4_m(mask, op1, vl); +vuint32m4_t test_vzext_vf4_u32m4_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vzext_vf4_u32m4_m(vm, vs2, vl); } -vuint32m8_t test_vzext_vf4_u32m8_m(vbool4_t mask, vuint8m2_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m8_m(mask, op1, vl); +vuint32m8_t test_vzext_vf4_u32m8_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vzext_vf4_u32m8_m(vm, vs2, vl); } -vuint64m1_t test_vzext_vf4_u64m1_m(vbool64_t mask, vuint16mf4_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m1_m(mask, op1, vl); +vuint64m1_t test_vzext_vf4_u64m1_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vzext_vf4_u64m1_m(vm, vs2, vl); } -vuint64m2_t test_vzext_vf4_u64m2_m(vbool32_t mask, vuint16mf2_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m2_m(mask, op1, vl); +vuint64m2_t test_vzext_vf4_u64m2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vzext_vf4_u64m2_m(vm, vs2, vl); } -vuint64m4_t test_vzext_vf4_u64m4_m(vbool16_t mask, vuint16m1_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m4_m(mask, op1, vl); +vuint64m4_t test_vzext_vf4_u64m4_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vzext_vf4_u64m4_m(vm, vs2, vl); } -vuint64m8_t test_vzext_vf4_u64m8_m(vbool8_t mask, vuint16m2_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m8_m(mask, op1, vl); +vuint64m8_t test_vzext_vf4_u64m8_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vzext_vf4_u64m8_m(vm, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vzext\.vf4[ivxfswum.]*\s+} 18 } } */ diff --git a/auto-generated/gnu-api-tests/vzext_vf8.c b/auto-generated/gnu-api-tests/vzext_vf8.c index 6984d0258..3baaf03e9 100644 --- a/auto-generated/gnu-api-tests/vzext_vf8.c +++ b/auto-generated/gnu-api-tests/vzext_vf8.c @@ -6,36 +6,36 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint64m1_t test_vzext_vf8_u64m1(vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m1(op1, vl); +vuint64m1_t test_vzext_vf8_u64m1(vuint8mf8_t vs2, size_t vl) { + return __riscv_vzext_vf8_u64m1(vs2, vl); } -vuint64m2_t test_vzext_vf8_u64m2(vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m2(op1, vl); +vuint64m2_t test_vzext_vf8_u64m2(vuint8mf4_t vs2, size_t vl) { + return __riscv_vzext_vf8_u64m2(vs2, vl); } -vuint64m4_t test_vzext_vf8_u64m4(vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m4(op1, vl); +vuint64m4_t test_vzext_vf8_u64m4(vuint8mf2_t vs2, size_t vl) { + return __riscv_vzext_vf8_u64m4(vs2, vl); } -vuint64m8_t test_vzext_vf8_u64m8(vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m8(op1, vl); +vuint64m8_t test_vzext_vf8_u64m8(vuint8m1_t vs2, size_t vl) { + return __riscv_vzext_vf8_u64m8(vs2, vl); } -vuint64m1_t test_vzext_vf8_u64m1_m(vbool64_t mask, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m1_m(mask, op1, vl); +vuint64m1_t test_vzext_vf8_u64m1_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vzext_vf8_u64m1_m(vm, vs2, vl); } -vuint64m2_t test_vzext_vf8_u64m2_m(vbool32_t mask, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m2_m(mask, op1, vl); +vuint64m2_t test_vzext_vf8_u64m2_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vzext_vf8_u64m2_m(vm, vs2, vl); } -vuint64m4_t test_vzext_vf8_u64m4_m(vbool16_t mask, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m4_m(mask, op1, vl); +vuint64m4_t test_vzext_vf8_u64m4_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vzext_vf8_u64m4_m(vm, vs2, vl); } -vuint64m8_t test_vzext_vf8_u64m8_m(vbool8_t mask, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m8_m(mask, op1, vl); +vuint64m8_t test_vzext_vf8_u64m8_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vzext_vf8_u64m8_m(vm, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vzext\.vf8[ivxfswum.]*\s+} 8 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vaadd.c b/auto-generated/gnu-overloaded-tests/vaadd.c index 83775c665..5e9d98a35 100644 --- a/auto-generated/gnu-overloaded-tests/vaadd.c +++ b/auto-generated/gnu-overloaded-tests/vaadd.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vaadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vaadd(op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vaadd_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vaadd(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vaadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd(op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vaadd_vx_i8mf8(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vaadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vaadd(op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vaadd_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vaadd(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vaadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd(op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vaadd_vx_i8mf4(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vaadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vaadd(op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vaadd_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vaadd(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vaadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd(op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vaadd_vx_i8mf2(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vaadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vaadd(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vaadd_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vaadd(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vaadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vaadd_vx_i8m1(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vaadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vaadd(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vaadd_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vaadd(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vaadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vaadd_vx_i8m2(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vaadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vaadd(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vaadd_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vaadd(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vaadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vaadd_vx_i8m4(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vaadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vaadd(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vaadd_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vaadd(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vaadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vaadd_vx_i8m8(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vaadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vaadd(op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vaadd_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vaadd(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vaadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd(op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vaadd_vx_i16mf4(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vaadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vaadd(op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vaadd_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vaadd(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vaadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd(op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vaadd_vx_i16mf2(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vaadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vaadd(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vaadd_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vaadd(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vaadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vaadd_vx_i16m1(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vaadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vaadd(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vaadd_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vaadd(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vaadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vaadd_vx_i16m2(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vaadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vaadd(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vaadd_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vaadd(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vaadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vaadd_vx_i16m4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vaadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vaadd(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vaadd_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vaadd(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vaadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vaadd_vx_i16m8(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vaadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vaadd(op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vaadd_vv_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vaadd(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vaadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd(op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vaadd_vx_i32mf2(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vaadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vaadd(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vaadd_vv_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vaadd(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vaadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vaadd_vx_i32m1(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vaadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vaadd(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vaadd_vv_i32m2(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vaadd(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vaadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vaadd_vx_i32m2(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vaadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vaadd(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vaadd_vv_i32m4(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vaadd(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vaadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vaadd_vx_i32m4(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vaadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vaadd(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vaadd_vv_i32m8(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vaadd(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vaadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vaadd_vx_i32m8(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vaadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vaadd(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vaadd_vv_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vaadd(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vaadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vaadd_vx_i64m1(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vaadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vaadd(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vaadd_vv_i64m2(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vaadd(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vaadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vaadd_vx_i64m2(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vaadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vaadd(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vaadd_vv_i64m4(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vaadd(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vaadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vaadd_vx_i64m4(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vaadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vaadd(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vaadd_vv_i64m8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vaadd(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vaadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vaadd_vx_i64m8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vaadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vaadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vaadd_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vaadd(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vaadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vaadd_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vaadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vaadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vaadd_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vaadd(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vaadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vaadd_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vaadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vaadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vaadd_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vaadd(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vaadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vaadd_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vaadd_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vaadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vaadd_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vaadd(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vaadd_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vaadd_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vaadd_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vaadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vaadd_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vaadd(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vaadd_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vaadd_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vaadd_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vaadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vaadd_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vaadd(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vaadd_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vaadd_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vaadd_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vaadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vaadd_vv_i8m8_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vaadd(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vaadd_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vaadd_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vaadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vaadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vaadd_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vaadd(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vaadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vaadd_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vaadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vaadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vaadd_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vaadd(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vaadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vaadd_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vaadd_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vaadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vaadd_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vaadd(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vaadd_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vaadd_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vaadd_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vaadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vaadd_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vaadd(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vaadd_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vaadd_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vaadd_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vaadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vaadd_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vaadd(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vaadd_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vaadd_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vaadd_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vaadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vaadd_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vaadd(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vaadd_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vaadd_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vaadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vaadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vaadd_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vaadd(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vaadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vaadd_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vaadd_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vaadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vaadd_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vaadd(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vaadd_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vaadd_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vaadd_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vaadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vaadd_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vaadd(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vaadd_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vaadd_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vaadd_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vaadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vaadd_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vaadd(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vaadd_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vaadd_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vaadd_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vaadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vaadd_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vaadd(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vaadd_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vaadd_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vaadd_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vaadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vaadd_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vaadd(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vaadd_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vaadd_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vaadd_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vaadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vaadd_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vaadd(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vaadd_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vaadd_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vaadd_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vaadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vaadd_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vaadd(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vaadd_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vaadd_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vaadd_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vaadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vaadd_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vaadd(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vaadd_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vaadd_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vaadd\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vaaddu.c b/auto-generated/gnu-overloaded-tests/vaaddu.c index 25f0de67a..affeced68 100644 --- a/auto-generated/gnu-overloaded-tests/vaaddu.c +++ b/auto-generated/gnu-overloaded-tests/vaaddu.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vaaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vaaddu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vaaddu_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vaaddu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vaaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vaaddu_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vaaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vaaddu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vaaddu_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vaaddu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vaaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vaaddu_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vaaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vaaddu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vaaddu_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vaaddu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vaaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vaaddu_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vaaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vaaddu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vaaddu_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vaaddu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vaaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vaaddu_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vaaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vaaddu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vaaddu_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vaaddu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vaaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vaaddu_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vaaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vaaddu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vaaddu_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vaaddu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vaaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vaaddu_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vaaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vaaddu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vaaddu_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vaaddu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vaaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vaaddu_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vaaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vaaddu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vaaddu_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vaaddu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vaaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vaaddu_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vaaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vaaddu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vaaddu_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vaaddu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vaaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vaaddu_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vaaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vaaddu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vaaddu_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vaaddu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vaaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vaaddu_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vaaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vaaddu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vaaddu_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vaaddu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vaaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vaaddu_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vaaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vaaddu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vaaddu_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vaaddu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vaaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vaaddu_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vaaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vaaddu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vaaddu_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vaaddu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vaaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vaaddu_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vaaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vaaddu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vaaddu_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vaaddu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vaaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vaaddu_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vaaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vaaddu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vaaddu_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vaaddu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vaaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vaaddu_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vaaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vaaddu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vaaddu_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vaaddu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vaaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vaaddu_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vaaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vaaddu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vaaddu_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vaaddu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vaaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vaaddu_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vaaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vaaddu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vaaddu_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vaaddu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vaaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vaaddu_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vaaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vaaddu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vaaddu_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vaaddu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vaaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vaaddu_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vaaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vaaddu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vaaddu_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vaaddu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vaaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vaaddu_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vaaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vaaddu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vaaddu_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vaaddu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vaaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vaaddu_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vaaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vaaddu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vaaddu_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vaaddu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vaaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vaaddu_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vaaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vaaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vaaddu_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vaaddu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vaaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vaaddu_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vaaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vaaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vaaddu_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vaaddu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vaaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vaaddu_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vaaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vaaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vaaddu_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vaaddu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vaaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vaaddu_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vaaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vaaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vaaddu_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vaaddu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vaaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vaaddu_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vaaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vaaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vaaddu_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vaaddu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vaaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vaaddu_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vaaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vaaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vaaddu_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vaaddu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vaaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vaaddu_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vaaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vaaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vaaddu_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vaaddu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vaaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vaaddu_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vaaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vaaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vaaddu_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vaaddu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vaaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vaaddu_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vaaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vaaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vaaddu_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vaaddu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vaaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vaaddu_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vaaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vaaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vaaddu_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vaaddu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vaaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vaaddu_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vaaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vaaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vaaddu_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vaaddu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vaaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vaaddu_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vaaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vaaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vaaddu_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vaaddu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vaaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vaaddu_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vaaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vaaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vaaddu_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vaaddu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vaaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vaaddu_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vaaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vaaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vaaddu_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vaaddu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vaaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vaaddu_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vaaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vaaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vaaddu_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vaaddu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vaaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vaaddu_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vaaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vaaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vaaddu_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vaaddu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vaaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vaaddu_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vaaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vaaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vaaddu_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vaaddu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vaaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vaaddu_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vaaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vaaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vaaddu_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vaaddu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vaaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vaaddu_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vaaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vaaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vaaddu_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vaaddu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vaaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vaaddu_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vaaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vaaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vaaddu_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vaaddu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vaaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vaaddu_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vaaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vaaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vaaddu_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vaaddu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vaaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vaaddu_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vaaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vaaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vaaddu_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vaaddu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vaaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vaaddu_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vaaddu\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vadc.c b/auto-generated/gnu-overloaded-tests/vadc.c index 63310a551..f99f33099 100644 --- a/auto-generated/gnu-overloaded-tests/vadc.c +++ b/auto-generated/gnu-overloaded-tests/vadc.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vadc_vvm_i8mf8(vint8mf8_t op1, vint8mf8_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vint8mf8_t test_vadc_vvm_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vadc(vs2, vs1, v0, vl); } -vint8mf8_t test_vadc_vxm_i8mf8(vint8mf8_t op1, int8_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vint8mf8_t test_vadc_vxm_i8mf8(vint8mf8_t vs2, int8_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vadc(vs2, rs1, v0, vl); } -vint8mf4_t test_vadc_vvm_i8mf4(vint8mf4_t op1, vint8mf4_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vint8mf4_t test_vadc_vvm_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vadc(vs2, vs1, v0, vl); } -vint8mf4_t test_vadc_vxm_i8mf4(vint8mf4_t op1, int8_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vint8mf4_t test_vadc_vxm_i8mf4(vint8mf4_t vs2, int8_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vadc(vs2, rs1, v0, vl); } -vint8mf2_t test_vadc_vvm_i8mf2(vint8mf2_t op1, vint8mf2_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vint8mf2_t test_vadc_vvm_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vadc(vs2, vs1, v0, vl); } -vint8mf2_t test_vadc_vxm_i8mf2(vint8mf2_t op1, int8_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vint8mf2_t test_vadc_vxm_i8mf2(vint8mf2_t vs2, int8_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vadc(vs2, rs1, v0, vl); } -vint8m1_t test_vadc_vvm_i8m1(vint8m1_t op1, vint8m1_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vint8m1_t test_vadc_vvm_i8m1(vint8m1_t vs2, vint8m1_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vadc(vs2, vs1, v0, vl); } -vint8m1_t test_vadc_vxm_i8m1(vint8m1_t op1, int8_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vint8m1_t test_vadc_vxm_i8m1(vint8m1_t vs2, int8_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vadc(vs2, rs1, v0, vl); } -vint8m2_t test_vadc_vvm_i8m2(vint8m2_t op1, vint8m2_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vint8m2_t test_vadc_vvm_i8m2(vint8m2_t vs2, vint8m2_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vadc(vs2, vs1, v0, vl); } -vint8m2_t test_vadc_vxm_i8m2(vint8m2_t op1, int8_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vint8m2_t test_vadc_vxm_i8m2(vint8m2_t vs2, int8_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vadc(vs2, rs1, v0, vl); } -vint8m4_t test_vadc_vvm_i8m4(vint8m4_t op1, vint8m4_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vint8m4_t test_vadc_vvm_i8m4(vint8m4_t vs2, vint8m4_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vadc(vs2, vs1, v0, vl); } -vint8m4_t test_vadc_vxm_i8m4(vint8m4_t op1, int8_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vint8m4_t test_vadc_vxm_i8m4(vint8m4_t vs2, int8_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vadc(vs2, rs1, v0, vl); } -vint8m8_t test_vadc_vvm_i8m8(vint8m8_t op1, vint8m8_t op2, vbool1_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vint8m8_t test_vadc_vvm_i8m8(vint8m8_t vs2, vint8m8_t vs1, vbool1_t v0, size_t vl) { + return __riscv_vadc(vs2, vs1, v0, vl); } -vint8m8_t test_vadc_vxm_i8m8(vint8m8_t op1, int8_t op2, vbool1_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vint8m8_t test_vadc_vxm_i8m8(vint8m8_t vs2, int8_t rs1, vbool1_t v0, size_t vl) { + return __riscv_vadc(vs2, rs1, v0, vl); } -vint16mf4_t test_vadc_vvm_i16mf4(vint16mf4_t op1, vint16mf4_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vint16mf4_t test_vadc_vvm_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vadc(vs2, vs1, v0, vl); } -vint16mf4_t test_vadc_vxm_i16mf4(vint16mf4_t op1, int16_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vint16mf4_t test_vadc_vxm_i16mf4(vint16mf4_t vs2, int16_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vadc(vs2, rs1, v0, vl); } -vint16mf2_t test_vadc_vvm_i16mf2(vint16mf2_t op1, vint16mf2_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vint16mf2_t test_vadc_vvm_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vadc(vs2, vs1, v0, vl); } -vint16mf2_t test_vadc_vxm_i16mf2(vint16mf2_t op1, int16_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vint16mf2_t test_vadc_vxm_i16mf2(vint16mf2_t vs2, int16_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vadc(vs2, rs1, v0, vl); } -vint16m1_t test_vadc_vvm_i16m1(vint16m1_t op1, vint16m1_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vint16m1_t test_vadc_vvm_i16m1(vint16m1_t vs2, vint16m1_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vadc(vs2, vs1, v0, vl); } -vint16m1_t test_vadc_vxm_i16m1(vint16m1_t op1, int16_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vint16m1_t test_vadc_vxm_i16m1(vint16m1_t vs2, int16_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vadc(vs2, rs1, v0, vl); } -vint16m2_t test_vadc_vvm_i16m2(vint16m2_t op1, vint16m2_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vint16m2_t test_vadc_vvm_i16m2(vint16m2_t vs2, vint16m2_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vadc(vs2, vs1, v0, vl); } -vint16m2_t test_vadc_vxm_i16m2(vint16m2_t op1, int16_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vint16m2_t test_vadc_vxm_i16m2(vint16m2_t vs2, int16_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vadc(vs2, rs1, v0, vl); } -vint16m4_t test_vadc_vvm_i16m4(vint16m4_t op1, vint16m4_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vint16m4_t test_vadc_vvm_i16m4(vint16m4_t vs2, vint16m4_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vadc(vs2, vs1, v0, vl); } -vint16m4_t test_vadc_vxm_i16m4(vint16m4_t op1, int16_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vint16m4_t test_vadc_vxm_i16m4(vint16m4_t vs2, int16_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vadc(vs2, rs1, v0, vl); } -vint16m8_t test_vadc_vvm_i16m8(vint16m8_t op1, vint16m8_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vint16m8_t test_vadc_vvm_i16m8(vint16m8_t vs2, vint16m8_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vadc(vs2, vs1, v0, vl); } -vint16m8_t test_vadc_vxm_i16m8(vint16m8_t op1, int16_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vint16m8_t test_vadc_vxm_i16m8(vint16m8_t vs2, int16_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vadc(vs2, rs1, v0, vl); } -vint32mf2_t test_vadc_vvm_i32mf2(vint32mf2_t op1, vint32mf2_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vint32mf2_t test_vadc_vvm_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vadc(vs2, vs1, v0, vl); } -vint32mf2_t test_vadc_vxm_i32mf2(vint32mf2_t op1, int32_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vint32mf2_t test_vadc_vxm_i32mf2(vint32mf2_t vs2, int32_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vadc(vs2, rs1, v0, vl); } -vint32m1_t test_vadc_vvm_i32m1(vint32m1_t op1, vint32m1_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vint32m1_t test_vadc_vvm_i32m1(vint32m1_t vs2, vint32m1_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vadc(vs2, vs1, v0, vl); } -vint32m1_t test_vadc_vxm_i32m1(vint32m1_t op1, int32_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vint32m1_t test_vadc_vxm_i32m1(vint32m1_t vs2, int32_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vadc(vs2, rs1, v0, vl); } -vint32m2_t test_vadc_vvm_i32m2(vint32m2_t op1, vint32m2_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vint32m2_t test_vadc_vvm_i32m2(vint32m2_t vs2, vint32m2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vadc(vs2, vs1, v0, vl); } -vint32m2_t test_vadc_vxm_i32m2(vint32m2_t op1, int32_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vint32m2_t test_vadc_vxm_i32m2(vint32m2_t vs2, int32_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vadc(vs2, rs1, v0, vl); } -vint32m4_t test_vadc_vvm_i32m4(vint32m4_t op1, vint32m4_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vint32m4_t test_vadc_vvm_i32m4(vint32m4_t vs2, vint32m4_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vadc(vs2, vs1, v0, vl); } -vint32m4_t test_vadc_vxm_i32m4(vint32m4_t op1, int32_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vint32m4_t test_vadc_vxm_i32m4(vint32m4_t vs2, int32_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vadc(vs2, rs1, v0, vl); } -vint32m8_t test_vadc_vvm_i32m8(vint32m8_t op1, vint32m8_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vint32m8_t test_vadc_vvm_i32m8(vint32m8_t vs2, vint32m8_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vadc(vs2, vs1, v0, vl); } -vint32m8_t test_vadc_vxm_i32m8(vint32m8_t op1, int32_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vint32m8_t test_vadc_vxm_i32m8(vint32m8_t vs2, int32_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vadc(vs2, rs1, v0, vl); } -vint64m1_t test_vadc_vvm_i64m1(vint64m1_t op1, vint64m1_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vint64m1_t test_vadc_vvm_i64m1(vint64m1_t vs2, vint64m1_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vadc(vs2, vs1, v0, vl); } -vint64m1_t test_vadc_vxm_i64m1(vint64m1_t op1, int64_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vint64m1_t test_vadc_vxm_i64m1(vint64m1_t vs2, int64_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vadc(vs2, rs1, v0, vl); } -vint64m2_t test_vadc_vvm_i64m2(vint64m2_t op1, vint64m2_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vint64m2_t test_vadc_vvm_i64m2(vint64m2_t vs2, vint64m2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vadc(vs2, vs1, v0, vl); } -vint64m2_t test_vadc_vxm_i64m2(vint64m2_t op1, int64_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vint64m2_t test_vadc_vxm_i64m2(vint64m2_t vs2, int64_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vadc(vs2, rs1, v0, vl); } -vint64m4_t test_vadc_vvm_i64m4(vint64m4_t op1, vint64m4_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vint64m4_t test_vadc_vvm_i64m4(vint64m4_t vs2, vint64m4_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vadc(vs2, vs1, v0, vl); } -vint64m4_t test_vadc_vxm_i64m4(vint64m4_t op1, int64_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vint64m4_t test_vadc_vxm_i64m4(vint64m4_t vs2, int64_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vadc(vs2, rs1, v0, vl); } -vint64m8_t test_vadc_vvm_i64m8(vint64m8_t op1, vint64m8_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vint64m8_t test_vadc_vvm_i64m8(vint64m8_t vs2, vint64m8_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vadc(vs2, vs1, v0, vl); } -vint64m8_t test_vadc_vxm_i64m8(vint64m8_t op1, int64_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vint64m8_t test_vadc_vxm_i64m8(vint64m8_t vs2, int64_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vadc(vs2, rs1, v0, vl); } -vuint8mf8_t test_vadc_vvm_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vuint8mf8_t test_vadc_vvm_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vadc(vs2, vs1, v0, vl); } -vuint8mf8_t test_vadc_vxm_u8mf8(vuint8mf8_t op1, uint8_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vuint8mf8_t test_vadc_vxm_u8mf8(vuint8mf8_t vs2, uint8_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vadc(vs2, rs1, v0, vl); } -vuint8mf4_t test_vadc_vvm_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vuint8mf4_t test_vadc_vvm_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vadc(vs2, vs1, v0, vl); } -vuint8mf4_t test_vadc_vxm_u8mf4(vuint8mf4_t op1, uint8_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vuint8mf4_t test_vadc_vxm_u8mf4(vuint8mf4_t vs2, uint8_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vadc(vs2, rs1, v0, vl); } -vuint8mf2_t test_vadc_vvm_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vuint8mf2_t test_vadc_vvm_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vadc(vs2, vs1, v0, vl); } -vuint8mf2_t test_vadc_vxm_u8mf2(vuint8mf2_t op1, uint8_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vuint8mf2_t test_vadc_vxm_u8mf2(vuint8mf2_t vs2, uint8_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vadc(vs2, rs1, v0, vl); } -vuint8m1_t test_vadc_vvm_u8m1(vuint8m1_t op1, vuint8m1_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vuint8m1_t test_vadc_vvm_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vadc(vs2, vs1, v0, vl); } -vuint8m1_t test_vadc_vxm_u8m1(vuint8m1_t op1, uint8_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vuint8m1_t test_vadc_vxm_u8m1(vuint8m1_t vs2, uint8_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vadc(vs2, rs1, v0, vl); } -vuint8m2_t test_vadc_vvm_u8m2(vuint8m2_t op1, vuint8m2_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vuint8m2_t test_vadc_vvm_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vadc(vs2, vs1, v0, vl); } -vuint8m2_t test_vadc_vxm_u8m2(vuint8m2_t op1, uint8_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vuint8m2_t test_vadc_vxm_u8m2(vuint8m2_t vs2, uint8_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vadc(vs2, rs1, v0, vl); } -vuint8m4_t test_vadc_vvm_u8m4(vuint8m4_t op1, vuint8m4_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vuint8m4_t test_vadc_vvm_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vadc(vs2, vs1, v0, vl); } -vuint8m4_t test_vadc_vxm_u8m4(vuint8m4_t op1, uint8_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vuint8m4_t test_vadc_vxm_u8m4(vuint8m4_t vs2, uint8_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vadc(vs2, rs1, v0, vl); } -vuint8m8_t test_vadc_vvm_u8m8(vuint8m8_t op1, vuint8m8_t op2, vbool1_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vuint8m8_t test_vadc_vvm_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, vbool1_t v0, size_t vl) { + return __riscv_vadc(vs2, vs1, v0, vl); } -vuint8m8_t test_vadc_vxm_u8m8(vuint8m8_t op1, uint8_t op2, vbool1_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vuint8m8_t test_vadc_vxm_u8m8(vuint8m8_t vs2, uint8_t rs1, vbool1_t v0, size_t vl) { + return __riscv_vadc(vs2, rs1, v0, vl); } -vuint16mf4_t test_vadc_vvm_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vuint16mf4_t test_vadc_vvm_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vadc(vs2, vs1, v0, vl); } -vuint16mf4_t test_vadc_vxm_u16mf4(vuint16mf4_t op1, uint16_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vuint16mf4_t test_vadc_vxm_u16mf4(vuint16mf4_t vs2, uint16_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vadc(vs2, rs1, v0, vl); } -vuint16mf2_t test_vadc_vvm_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vuint16mf2_t test_vadc_vvm_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vadc(vs2, vs1, v0, vl); } -vuint16mf2_t test_vadc_vxm_u16mf2(vuint16mf2_t op1, uint16_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vuint16mf2_t test_vadc_vxm_u16mf2(vuint16mf2_t vs2, uint16_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vadc(vs2, rs1, v0, vl); } -vuint16m1_t test_vadc_vvm_u16m1(vuint16m1_t op1, vuint16m1_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vuint16m1_t test_vadc_vvm_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vadc(vs2, vs1, v0, vl); } -vuint16m1_t test_vadc_vxm_u16m1(vuint16m1_t op1, uint16_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vuint16m1_t test_vadc_vxm_u16m1(vuint16m1_t vs2, uint16_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vadc(vs2, rs1, v0, vl); } -vuint16m2_t test_vadc_vvm_u16m2(vuint16m2_t op1, vuint16m2_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vuint16m2_t test_vadc_vvm_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vadc(vs2, vs1, v0, vl); } -vuint16m2_t test_vadc_vxm_u16m2(vuint16m2_t op1, uint16_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vuint16m2_t test_vadc_vxm_u16m2(vuint16m2_t vs2, uint16_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vadc(vs2, rs1, v0, vl); } -vuint16m4_t test_vadc_vvm_u16m4(vuint16m4_t op1, vuint16m4_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vuint16m4_t test_vadc_vvm_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vadc(vs2, vs1, v0, vl); } -vuint16m4_t test_vadc_vxm_u16m4(vuint16m4_t op1, uint16_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vuint16m4_t test_vadc_vxm_u16m4(vuint16m4_t vs2, uint16_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vadc(vs2, rs1, v0, vl); } -vuint16m8_t test_vadc_vvm_u16m8(vuint16m8_t op1, vuint16m8_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vuint16m8_t test_vadc_vvm_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vadc(vs2, vs1, v0, vl); } -vuint16m8_t test_vadc_vxm_u16m8(vuint16m8_t op1, uint16_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vuint16m8_t test_vadc_vxm_u16m8(vuint16m8_t vs2, uint16_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vadc(vs2, rs1, v0, vl); } -vuint32mf2_t test_vadc_vvm_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vuint32mf2_t test_vadc_vvm_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vadc(vs2, vs1, v0, vl); } -vuint32mf2_t test_vadc_vxm_u32mf2(vuint32mf2_t op1, uint32_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vuint32mf2_t test_vadc_vxm_u32mf2(vuint32mf2_t vs2, uint32_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vadc(vs2, rs1, v0, vl); } -vuint32m1_t test_vadc_vvm_u32m1(vuint32m1_t op1, vuint32m1_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vuint32m1_t test_vadc_vvm_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vadc(vs2, vs1, v0, vl); } -vuint32m1_t test_vadc_vxm_u32m1(vuint32m1_t op1, uint32_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vuint32m1_t test_vadc_vxm_u32m1(vuint32m1_t vs2, uint32_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vadc(vs2, rs1, v0, vl); } -vuint32m2_t test_vadc_vvm_u32m2(vuint32m2_t op1, vuint32m2_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vuint32m2_t test_vadc_vvm_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vadc(vs2, vs1, v0, vl); } -vuint32m2_t test_vadc_vxm_u32m2(vuint32m2_t op1, uint32_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vuint32m2_t test_vadc_vxm_u32m2(vuint32m2_t vs2, uint32_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vadc(vs2, rs1, v0, vl); } -vuint32m4_t test_vadc_vvm_u32m4(vuint32m4_t op1, vuint32m4_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vuint32m4_t test_vadc_vvm_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vadc(vs2, vs1, v0, vl); } -vuint32m4_t test_vadc_vxm_u32m4(vuint32m4_t op1, uint32_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vuint32m4_t test_vadc_vxm_u32m4(vuint32m4_t vs2, uint32_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vadc(vs2, rs1, v0, vl); } -vuint32m8_t test_vadc_vvm_u32m8(vuint32m8_t op1, vuint32m8_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vuint32m8_t test_vadc_vvm_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vadc(vs2, vs1, v0, vl); } -vuint32m8_t test_vadc_vxm_u32m8(vuint32m8_t op1, uint32_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vuint32m8_t test_vadc_vxm_u32m8(vuint32m8_t vs2, uint32_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vadc(vs2, rs1, v0, vl); } -vuint64m1_t test_vadc_vvm_u64m1(vuint64m1_t op1, vuint64m1_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vuint64m1_t test_vadc_vvm_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vadc(vs2, vs1, v0, vl); } -vuint64m1_t test_vadc_vxm_u64m1(vuint64m1_t op1, uint64_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vuint64m1_t test_vadc_vxm_u64m1(vuint64m1_t vs2, uint64_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vadc(vs2, rs1, v0, vl); } -vuint64m2_t test_vadc_vvm_u64m2(vuint64m2_t op1, vuint64m2_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vuint64m2_t test_vadc_vvm_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vadc(vs2, vs1, v0, vl); } -vuint64m2_t test_vadc_vxm_u64m2(vuint64m2_t op1, uint64_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vuint64m2_t test_vadc_vxm_u64m2(vuint64m2_t vs2, uint64_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vadc(vs2, rs1, v0, vl); } -vuint64m4_t test_vadc_vvm_u64m4(vuint64m4_t op1, vuint64m4_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vuint64m4_t test_vadc_vvm_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vadc(vs2, vs1, v0, vl); } -vuint64m4_t test_vadc_vxm_u64m4(vuint64m4_t op1, uint64_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vuint64m4_t test_vadc_vxm_u64m4(vuint64m4_t vs2, uint64_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vadc(vs2, rs1, v0, vl); } -vuint64m8_t test_vadc_vvm_u64m8(vuint64m8_t op1, vuint64m8_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vuint64m8_t test_vadc_vvm_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vadc(vs2, vs1, v0, vl); } -vuint64m8_t test_vadc_vxm_u64m8(vuint64m8_t op1, uint64_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc(op1, op2, carryin, vl); +vuint64m8_t test_vadc_vxm_u64m8(vuint64m8_t vs2, uint64_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vadc(vs2, rs1, v0, vl); } /* { dg-final { scan-assembler-times {vadc\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vadd.c b/auto-generated/gnu-overloaded-tests/vadd.c index 5a373f915..06b0135d7 100644 --- a/auto-generated/gnu-overloaded-tests/vadd.c +++ b/auto-generated/gnu-overloaded-tests/vadd.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vint8mf8_t test_vadd_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vadd(vs2, vs1, vl); } -vint8mf8_t test_vadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vint8mf8_t test_vadd_vx_i8mf8(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd(vs2, rs1, vl); } -vint8mf4_t test_vadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vint8mf4_t test_vadd_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vadd(vs2, vs1, vl); } -vint8mf4_t test_vadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vint8mf4_t test_vadd_vx_i8mf4(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd(vs2, rs1, vl); } -vint8mf2_t test_vadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vint8mf2_t test_vadd_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vadd(vs2, vs1, vl); } -vint8mf2_t test_vadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vint8mf2_t test_vadd_vx_i8mf2(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd(vs2, rs1, vl); } -vint8m1_t test_vadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vint8m1_t test_vadd_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vadd(vs2, vs1, vl); } -vint8m1_t test_vadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vint8m1_t test_vadd_vx_i8m1(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd(vs2, rs1, vl); } -vint8m2_t test_vadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vint8m2_t test_vadd_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vadd(vs2, vs1, vl); } -vint8m2_t test_vadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vint8m2_t test_vadd_vx_i8m2(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd(vs2, rs1, vl); } -vint8m4_t test_vadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vint8m4_t test_vadd_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vadd(vs2, vs1, vl); } -vint8m4_t test_vadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vint8m4_t test_vadd_vx_i8m4(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd(vs2, rs1, vl); } -vint8m8_t test_vadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vint8m8_t test_vadd_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vadd(vs2, vs1, vl); } -vint8m8_t test_vadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vint8m8_t test_vadd_vx_i8m8(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd(vs2, rs1, vl); } -vint16mf4_t test_vadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vint16mf4_t test_vadd_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vadd(vs2, vs1, vl); } -vint16mf4_t test_vadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vint16mf4_t test_vadd_vx_i16mf4(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd(vs2, rs1, vl); } -vint16mf2_t test_vadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vint16mf2_t test_vadd_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vadd(vs2, vs1, vl); } -vint16mf2_t test_vadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vint16mf2_t test_vadd_vx_i16mf2(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd(vs2, rs1, vl); } -vint16m1_t test_vadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vint16m1_t test_vadd_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vadd(vs2, vs1, vl); } -vint16m1_t test_vadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vint16m1_t test_vadd_vx_i16m1(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd(vs2, rs1, vl); } -vint16m2_t test_vadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vint16m2_t test_vadd_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vadd(vs2, vs1, vl); } -vint16m2_t test_vadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vint16m2_t test_vadd_vx_i16m2(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd(vs2, rs1, vl); } -vint16m4_t test_vadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vint16m4_t test_vadd_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vadd(vs2, vs1, vl); } -vint16m4_t test_vadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vint16m4_t test_vadd_vx_i16m4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd(vs2, rs1, vl); } -vint16m8_t test_vadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vint16m8_t test_vadd_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vadd(vs2, vs1, vl); } -vint16m8_t test_vadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vint16m8_t test_vadd_vx_i16m8(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd(vs2, rs1, vl); } -vint32mf2_t test_vadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vint32mf2_t test_vadd_vv_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vadd(vs2, vs1, vl); } -vint32mf2_t test_vadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vint32mf2_t test_vadd_vx_i32mf2(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd(vs2, rs1, vl); } -vint32m1_t test_vadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vint32m1_t test_vadd_vv_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vadd(vs2, vs1, vl); } -vint32m1_t test_vadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vint32m1_t test_vadd_vx_i32m1(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd(vs2, rs1, vl); } -vint32m2_t test_vadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vint32m2_t test_vadd_vv_i32m2(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vadd(vs2, vs1, vl); } -vint32m2_t test_vadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vint32m2_t test_vadd_vx_i32m2(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd(vs2, rs1, vl); } -vint32m4_t test_vadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vint32m4_t test_vadd_vv_i32m4(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vadd(vs2, vs1, vl); } -vint32m4_t test_vadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vint32m4_t test_vadd_vx_i32m4(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd(vs2, rs1, vl); } -vint32m8_t test_vadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vint32m8_t test_vadd_vv_i32m8(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vadd(vs2, vs1, vl); } -vint32m8_t test_vadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vint32m8_t test_vadd_vx_i32m8(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd(vs2, rs1, vl); } -vint64m1_t test_vadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vint64m1_t test_vadd_vv_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vadd(vs2, vs1, vl); } -vint64m1_t test_vadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vint64m1_t test_vadd_vx_i64m1(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd(vs2, rs1, vl); } -vint64m2_t test_vadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vint64m2_t test_vadd_vv_i64m2(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vadd(vs2, vs1, vl); } -vint64m2_t test_vadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vint64m2_t test_vadd_vx_i64m2(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd(vs2, rs1, vl); } -vint64m4_t test_vadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vint64m4_t test_vadd_vv_i64m4(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vadd(vs2, vs1, vl); } -vint64m4_t test_vadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vint64m4_t test_vadd_vx_i64m4(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd(vs2, rs1, vl); } -vint64m8_t test_vadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vint64m8_t test_vadd_vv_i64m8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vadd(vs2, vs1, vl); } -vint64m8_t test_vadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vint64m8_t test_vadd_vx_i64m8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd(vs2, rs1, vl); } -vuint8mf8_t test_vadd_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vuint8mf8_t test_vadd_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vadd(vs2, vs1, vl); } -vuint8mf8_t test_vadd_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vuint8mf8_t test_vadd_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd(vs2, rs1, vl); } -vuint8mf4_t test_vadd_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vuint8mf4_t test_vadd_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vadd(vs2, vs1, vl); } -vuint8mf4_t test_vadd_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vuint8mf4_t test_vadd_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd(vs2, rs1, vl); } -vuint8mf2_t test_vadd_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vuint8mf2_t test_vadd_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vadd(vs2, vs1, vl); } -vuint8mf2_t test_vadd_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vuint8mf2_t test_vadd_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd(vs2, rs1, vl); } -vuint8m1_t test_vadd_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vuint8m1_t test_vadd_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vadd(vs2, vs1, vl); } -vuint8m1_t test_vadd_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vuint8m1_t test_vadd_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd(vs2, rs1, vl); } -vuint8m2_t test_vadd_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vuint8m2_t test_vadd_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vadd(vs2, vs1, vl); } -vuint8m2_t test_vadd_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vuint8m2_t test_vadd_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd(vs2, rs1, vl); } -vuint8m4_t test_vadd_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vuint8m4_t test_vadd_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vadd(vs2, vs1, vl); } -vuint8m4_t test_vadd_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vuint8m4_t test_vadd_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd(vs2, rs1, vl); } -vuint8m8_t test_vadd_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vuint8m8_t test_vadd_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vadd(vs2, vs1, vl); } -vuint8m8_t test_vadd_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vuint8m8_t test_vadd_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd(vs2, rs1, vl); } -vuint16mf4_t test_vadd_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vuint16mf4_t test_vadd_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vadd(vs2, vs1, vl); } -vuint16mf4_t test_vadd_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vuint16mf4_t test_vadd_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd(vs2, rs1, vl); } -vuint16mf2_t test_vadd_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vuint16mf2_t test_vadd_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vadd(vs2, vs1, vl); } -vuint16mf2_t test_vadd_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vuint16mf2_t test_vadd_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd(vs2, rs1, vl); } -vuint16m1_t test_vadd_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vuint16m1_t test_vadd_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vadd(vs2, vs1, vl); } -vuint16m1_t test_vadd_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vuint16m1_t test_vadd_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd(vs2, rs1, vl); } -vuint16m2_t test_vadd_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vuint16m2_t test_vadd_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vadd(vs2, vs1, vl); } -vuint16m2_t test_vadd_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vuint16m2_t test_vadd_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd(vs2, rs1, vl); } -vuint16m4_t test_vadd_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vuint16m4_t test_vadd_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vadd(vs2, vs1, vl); } -vuint16m4_t test_vadd_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vuint16m4_t test_vadd_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd(vs2, rs1, vl); } -vuint16m8_t test_vadd_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vuint16m8_t test_vadd_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vadd(vs2, vs1, vl); } -vuint16m8_t test_vadd_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vuint16m8_t test_vadd_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd(vs2, rs1, vl); } -vuint32mf2_t test_vadd_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vuint32mf2_t test_vadd_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vadd(vs2, vs1, vl); } -vuint32mf2_t test_vadd_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vuint32mf2_t test_vadd_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd(vs2, rs1, vl); } -vuint32m1_t test_vadd_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vuint32m1_t test_vadd_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vadd(vs2, vs1, vl); } -vuint32m1_t test_vadd_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vuint32m1_t test_vadd_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd(vs2, rs1, vl); } -vuint32m2_t test_vadd_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vuint32m2_t test_vadd_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vadd(vs2, vs1, vl); } -vuint32m2_t test_vadd_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vuint32m2_t test_vadd_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd(vs2, rs1, vl); } -vuint32m4_t test_vadd_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vuint32m4_t test_vadd_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vadd(vs2, vs1, vl); } -vuint32m4_t test_vadd_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vuint32m4_t test_vadd_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd(vs2, rs1, vl); } -vuint32m8_t test_vadd_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vuint32m8_t test_vadd_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vadd(vs2, vs1, vl); } -vuint32m8_t test_vadd_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vuint32m8_t test_vadd_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd(vs2, rs1, vl); } -vuint64m1_t test_vadd_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vuint64m1_t test_vadd_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vadd(vs2, vs1, vl); } -vuint64m1_t test_vadd_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vuint64m1_t test_vadd_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd(vs2, rs1, vl); } -vuint64m2_t test_vadd_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vuint64m2_t test_vadd_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vadd(vs2, vs1, vl); } -vuint64m2_t test_vadd_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vuint64m2_t test_vadd_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd(vs2, rs1, vl); } -vuint64m4_t test_vadd_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vuint64m4_t test_vadd_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vadd(vs2, vs1, vl); } -vuint64m4_t test_vadd_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vuint64m4_t test_vadd_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd(vs2, rs1, vl); } -vuint64m8_t test_vadd_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vuint64m8_t test_vadd_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vadd(vs2, vs1, vl); } -vuint64m8_t test_vadd_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd(op1, op2, vl); +vuint64m8_t test_vadd_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd(vs2, rs1, vl); } -vint8mf8_t test_vadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vint8mf8_t test_vadd_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vadd(vm, vs2, vs1, vl); } -vint8mf8_t test_vadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vint8mf8_t test_vadd_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd(vm, vs2, rs1, vl); } -vint8mf4_t test_vadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vint8mf4_t test_vadd_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vadd(vm, vs2, vs1, vl); } -vint8mf4_t test_vadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vint8mf4_t test_vadd_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd(vm, vs2, rs1, vl); } -vint8mf2_t test_vadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vint8mf2_t test_vadd_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vadd(vm, vs2, vs1, vl); } -vint8mf2_t test_vadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vint8mf2_t test_vadd_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd(vm, vs2, rs1, vl); } -vint8m1_t test_vadd_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vint8m1_t test_vadd_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vadd(vm, vs2, vs1, vl); } -vint8m1_t test_vadd_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vint8m1_t test_vadd_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd(vm, vs2, rs1, vl); } -vint8m2_t test_vadd_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vint8m2_t test_vadd_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vadd(vm, vs2, vs1, vl); } -vint8m2_t test_vadd_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vint8m2_t test_vadd_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd(vm, vs2, rs1, vl); } -vint8m4_t test_vadd_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vint8m4_t test_vadd_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vadd(vm, vs2, vs1, vl); } -vint8m4_t test_vadd_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vint8m4_t test_vadd_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd(vm, vs2, rs1, vl); } -vint8m8_t test_vadd_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vint8m8_t test_vadd_vv_i8m8_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vadd(vm, vs2, vs1, vl); } -vint8m8_t test_vadd_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vint8m8_t test_vadd_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd(vm, vs2, rs1, vl); } -vint16mf4_t test_vadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vint16mf4_t test_vadd_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vadd(vm, vs2, vs1, vl); } -vint16mf4_t test_vadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vint16mf4_t test_vadd_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd(vm, vs2, rs1, vl); } -vint16mf2_t test_vadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vint16mf2_t test_vadd_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vadd(vm, vs2, vs1, vl); } -vint16mf2_t test_vadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vint16mf2_t test_vadd_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd(vm, vs2, rs1, vl); } -vint16m1_t test_vadd_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vint16m1_t test_vadd_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vadd(vm, vs2, vs1, vl); } -vint16m1_t test_vadd_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vint16m1_t test_vadd_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd(vm, vs2, rs1, vl); } -vint16m2_t test_vadd_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vint16m2_t test_vadd_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vadd(vm, vs2, vs1, vl); } -vint16m2_t test_vadd_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vint16m2_t test_vadd_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd(vm, vs2, rs1, vl); } -vint16m4_t test_vadd_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vint16m4_t test_vadd_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vadd(vm, vs2, vs1, vl); } -vint16m4_t test_vadd_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vint16m4_t test_vadd_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd(vm, vs2, rs1, vl); } -vint16m8_t test_vadd_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vint16m8_t test_vadd_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vadd(vm, vs2, vs1, vl); } -vint16m8_t test_vadd_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vint16m8_t test_vadd_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd(vm, vs2, rs1, vl); } -vint32mf2_t test_vadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vint32mf2_t test_vadd_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vadd(vm, vs2, vs1, vl); } -vint32mf2_t test_vadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vint32mf2_t test_vadd_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd(vm, vs2, rs1, vl); } -vint32m1_t test_vadd_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vint32m1_t test_vadd_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vadd(vm, vs2, vs1, vl); } -vint32m1_t test_vadd_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vint32m1_t test_vadd_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd(vm, vs2, rs1, vl); } -vint32m2_t test_vadd_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vint32m2_t test_vadd_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vadd(vm, vs2, vs1, vl); } -vint32m2_t test_vadd_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vint32m2_t test_vadd_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd(vm, vs2, rs1, vl); } -vint32m4_t test_vadd_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vint32m4_t test_vadd_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vadd(vm, vs2, vs1, vl); } -vint32m4_t test_vadd_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vint32m4_t test_vadd_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd(vm, vs2, rs1, vl); } -vint32m8_t test_vadd_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vint32m8_t test_vadd_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vadd(vm, vs2, vs1, vl); } -vint32m8_t test_vadd_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vint32m8_t test_vadd_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd(vm, vs2, rs1, vl); } -vint64m1_t test_vadd_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vint64m1_t test_vadd_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vadd(vm, vs2, vs1, vl); } -vint64m1_t test_vadd_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vint64m1_t test_vadd_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd(vm, vs2, rs1, vl); } -vint64m2_t test_vadd_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vint64m2_t test_vadd_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vadd(vm, vs2, vs1, vl); } -vint64m2_t test_vadd_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vint64m2_t test_vadd_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd(vm, vs2, rs1, vl); } -vint64m4_t test_vadd_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vint64m4_t test_vadd_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vadd(vm, vs2, vs1, vl); } -vint64m4_t test_vadd_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vint64m4_t test_vadd_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd(vm, vs2, rs1, vl); } -vint64m8_t test_vadd_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vint64m8_t test_vadd_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vadd(vm, vs2, vs1, vl); } -vint64m8_t test_vadd_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vint64m8_t test_vadd_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd(vm, vs2, rs1, vl); } -vuint8mf8_t test_vadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vuint8mf8_t test_vadd_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vadd(vm, vs2, vs1, vl); } -vuint8mf8_t test_vadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vuint8mf8_t test_vadd_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd(vm, vs2, rs1, vl); } -vuint8mf4_t test_vadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vuint8mf4_t test_vadd_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vadd(vm, vs2, vs1, vl); } -vuint8mf4_t test_vadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vuint8mf4_t test_vadd_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd(vm, vs2, rs1, vl); } -vuint8mf2_t test_vadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vuint8mf2_t test_vadd_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vadd(vm, vs2, vs1, vl); } -vuint8mf2_t test_vadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vuint8mf2_t test_vadd_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd(vm, vs2, rs1, vl); } -vuint8m1_t test_vadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vuint8m1_t test_vadd_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vadd(vm, vs2, vs1, vl); } -vuint8m1_t test_vadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vuint8m1_t test_vadd_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd(vm, vs2, rs1, vl); } -vuint8m2_t test_vadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vuint8m2_t test_vadd_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vadd(vm, vs2, vs1, vl); } -vuint8m2_t test_vadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vuint8m2_t test_vadd_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd(vm, vs2, rs1, vl); } -vuint8m4_t test_vadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vuint8m4_t test_vadd_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vadd(vm, vs2, vs1, vl); } -vuint8m4_t test_vadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vuint8m4_t test_vadd_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd(vm, vs2, rs1, vl); } -vuint8m8_t test_vadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vuint8m8_t test_vadd_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vadd(vm, vs2, vs1, vl); } -vuint8m8_t test_vadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vuint8m8_t test_vadd_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd(vm, vs2, rs1, vl); } -vuint16mf4_t test_vadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vuint16mf4_t test_vadd_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vadd(vm, vs2, vs1, vl); } -vuint16mf4_t test_vadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vuint16mf4_t test_vadd_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd(vm, vs2, rs1, vl); } -vuint16mf2_t test_vadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vuint16mf2_t test_vadd_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vadd(vm, vs2, vs1, vl); } -vuint16mf2_t test_vadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vuint16mf2_t test_vadd_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd(vm, vs2, rs1, vl); } -vuint16m1_t test_vadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vuint16m1_t test_vadd_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vadd(vm, vs2, vs1, vl); } -vuint16m1_t test_vadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vuint16m1_t test_vadd_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd(vm, vs2, rs1, vl); } -vuint16m2_t test_vadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vuint16m2_t test_vadd_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vadd(vm, vs2, vs1, vl); } -vuint16m2_t test_vadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vuint16m2_t test_vadd_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd(vm, vs2, rs1, vl); } -vuint16m4_t test_vadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vuint16m4_t test_vadd_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vadd(vm, vs2, vs1, vl); } -vuint16m4_t test_vadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vuint16m4_t test_vadd_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd(vm, vs2, rs1, vl); } -vuint16m8_t test_vadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vuint16m8_t test_vadd_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vadd(vm, vs2, vs1, vl); } -vuint16m8_t test_vadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vuint16m8_t test_vadd_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd(vm, vs2, rs1, vl); } -vuint32mf2_t test_vadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vuint32mf2_t test_vadd_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vadd(vm, vs2, vs1, vl); } -vuint32mf2_t test_vadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vuint32mf2_t test_vadd_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd(vm, vs2, rs1, vl); } -vuint32m1_t test_vadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vuint32m1_t test_vadd_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vadd(vm, vs2, vs1, vl); } -vuint32m1_t test_vadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vuint32m1_t test_vadd_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd(vm, vs2, rs1, vl); } -vuint32m2_t test_vadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vuint32m2_t test_vadd_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vadd(vm, vs2, vs1, vl); } -vuint32m2_t test_vadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vuint32m2_t test_vadd_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd(vm, vs2, rs1, vl); } -vuint32m4_t test_vadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vuint32m4_t test_vadd_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vadd(vm, vs2, vs1, vl); } -vuint32m4_t test_vadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vuint32m4_t test_vadd_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd(vm, vs2, rs1, vl); } -vuint32m8_t test_vadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vuint32m8_t test_vadd_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vadd(vm, vs2, vs1, vl); } -vuint32m8_t test_vadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vuint32m8_t test_vadd_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd(vm, vs2, rs1, vl); } -vuint64m1_t test_vadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vuint64m1_t test_vadd_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vadd(vm, vs2, vs1, vl); } -vuint64m1_t test_vadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vuint64m1_t test_vadd_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd(vm, vs2, rs1, vl); } -vuint64m2_t test_vadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vuint64m2_t test_vadd_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vadd(vm, vs2, vs1, vl); } -vuint64m2_t test_vadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vuint64m2_t test_vadd_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd(vm, vs2, rs1, vl); } -vuint64m4_t test_vadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vuint64m4_t test_vadd_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vadd(vm, vs2, vs1, vl); } -vuint64m4_t test_vadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vuint64m4_t test_vadd_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd(vm, vs2, rs1, vl); } -vuint64m8_t test_vadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vuint64m8_t test_vadd_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vadd(vm, vs2, vs1, vl); } -vuint64m8_t test_vadd_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd(mask, op1, op2, vl); +vuint64m8_t test_vadd_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vadd\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vand.c b/auto-generated/gnu-overloaded-tests/vand.c index eefc6e0d9..82f37c67b 100644 --- a/auto-generated/gnu-overloaded-tests/vand.c +++ b/auto-generated/gnu-overloaded-tests/vand.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vand_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vint8mf8_t test_vand_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vand(vs2, vs1, vl); } -vint8mf8_t test_vand_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vint8mf8_t test_vand_vx_i8mf8(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand(vs2, rs1, vl); } -vint8mf4_t test_vand_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vint8mf4_t test_vand_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vand(vs2, vs1, vl); } -vint8mf4_t test_vand_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vint8mf4_t test_vand_vx_i8mf4(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand(vs2, rs1, vl); } -vint8mf2_t test_vand_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vint8mf2_t test_vand_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vand(vs2, vs1, vl); } -vint8mf2_t test_vand_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vint8mf2_t test_vand_vx_i8mf2(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand(vs2, rs1, vl); } -vint8m1_t test_vand_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vint8m1_t test_vand_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vand(vs2, vs1, vl); } -vint8m1_t test_vand_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vint8m1_t test_vand_vx_i8m1(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand(vs2, rs1, vl); } -vint8m2_t test_vand_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vint8m2_t test_vand_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vand(vs2, vs1, vl); } -vint8m2_t test_vand_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vint8m2_t test_vand_vx_i8m2(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand(vs2, rs1, vl); } -vint8m4_t test_vand_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vint8m4_t test_vand_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vand(vs2, vs1, vl); } -vint8m4_t test_vand_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vint8m4_t test_vand_vx_i8m4(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand(vs2, rs1, vl); } -vint8m8_t test_vand_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vint8m8_t test_vand_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vand(vs2, vs1, vl); } -vint8m8_t test_vand_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vint8m8_t test_vand_vx_i8m8(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand(vs2, rs1, vl); } -vint16mf4_t test_vand_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vint16mf4_t test_vand_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vand(vs2, vs1, vl); } -vint16mf4_t test_vand_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vint16mf4_t test_vand_vx_i16mf4(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand(vs2, rs1, vl); } -vint16mf2_t test_vand_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vint16mf2_t test_vand_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vand(vs2, vs1, vl); } -vint16mf2_t test_vand_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vint16mf2_t test_vand_vx_i16mf2(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand(vs2, rs1, vl); } -vint16m1_t test_vand_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vint16m1_t test_vand_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vand(vs2, vs1, vl); } -vint16m1_t test_vand_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vint16m1_t test_vand_vx_i16m1(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand(vs2, rs1, vl); } -vint16m2_t test_vand_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vint16m2_t test_vand_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vand(vs2, vs1, vl); } -vint16m2_t test_vand_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vint16m2_t test_vand_vx_i16m2(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand(vs2, rs1, vl); } -vint16m4_t test_vand_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vint16m4_t test_vand_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vand(vs2, vs1, vl); } -vint16m4_t test_vand_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vint16m4_t test_vand_vx_i16m4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand(vs2, rs1, vl); } -vint16m8_t test_vand_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vint16m8_t test_vand_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vand(vs2, vs1, vl); } -vint16m8_t test_vand_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vint16m8_t test_vand_vx_i16m8(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand(vs2, rs1, vl); } -vint32mf2_t test_vand_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vint32mf2_t test_vand_vv_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vand(vs2, vs1, vl); } -vint32mf2_t test_vand_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vint32mf2_t test_vand_vx_i32mf2(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand(vs2, rs1, vl); } -vint32m1_t test_vand_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vint32m1_t test_vand_vv_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vand(vs2, vs1, vl); } -vint32m1_t test_vand_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vint32m1_t test_vand_vx_i32m1(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand(vs2, rs1, vl); } -vint32m2_t test_vand_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vint32m2_t test_vand_vv_i32m2(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vand(vs2, vs1, vl); } -vint32m2_t test_vand_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vint32m2_t test_vand_vx_i32m2(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand(vs2, rs1, vl); } -vint32m4_t test_vand_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vint32m4_t test_vand_vv_i32m4(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vand(vs2, vs1, vl); } -vint32m4_t test_vand_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vint32m4_t test_vand_vx_i32m4(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand(vs2, rs1, vl); } -vint32m8_t test_vand_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vint32m8_t test_vand_vv_i32m8(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vand(vs2, vs1, vl); } -vint32m8_t test_vand_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vint32m8_t test_vand_vx_i32m8(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand(vs2, rs1, vl); } -vint64m1_t test_vand_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vint64m1_t test_vand_vv_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vand(vs2, vs1, vl); } -vint64m1_t test_vand_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vint64m1_t test_vand_vx_i64m1(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand(vs2, rs1, vl); } -vint64m2_t test_vand_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vint64m2_t test_vand_vv_i64m2(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vand(vs2, vs1, vl); } -vint64m2_t test_vand_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vint64m2_t test_vand_vx_i64m2(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand(vs2, rs1, vl); } -vint64m4_t test_vand_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vint64m4_t test_vand_vv_i64m4(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vand(vs2, vs1, vl); } -vint64m4_t test_vand_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vint64m4_t test_vand_vx_i64m4(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand(vs2, rs1, vl); } -vint64m8_t test_vand_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vint64m8_t test_vand_vv_i64m8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vand(vs2, vs1, vl); } -vint64m8_t test_vand_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vint64m8_t test_vand_vx_i64m8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand(vs2, rs1, vl); } -vuint8mf8_t test_vand_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vuint8mf8_t test_vand_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vand(vs2, vs1, vl); } -vuint8mf8_t test_vand_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vuint8mf8_t test_vand_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand(vs2, rs1, vl); } -vuint8mf4_t test_vand_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vuint8mf4_t test_vand_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vand(vs2, vs1, vl); } -vuint8mf4_t test_vand_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vuint8mf4_t test_vand_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand(vs2, rs1, vl); } -vuint8mf2_t test_vand_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vuint8mf2_t test_vand_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vand(vs2, vs1, vl); } -vuint8mf2_t test_vand_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vuint8mf2_t test_vand_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand(vs2, rs1, vl); } -vuint8m1_t test_vand_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vuint8m1_t test_vand_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vand(vs2, vs1, vl); } -vuint8m1_t test_vand_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vuint8m1_t test_vand_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand(vs2, rs1, vl); } -vuint8m2_t test_vand_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vuint8m2_t test_vand_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vand(vs2, vs1, vl); } -vuint8m2_t test_vand_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vuint8m2_t test_vand_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand(vs2, rs1, vl); } -vuint8m4_t test_vand_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vuint8m4_t test_vand_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vand(vs2, vs1, vl); } -vuint8m4_t test_vand_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vuint8m4_t test_vand_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand(vs2, rs1, vl); } -vuint8m8_t test_vand_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vuint8m8_t test_vand_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vand(vs2, vs1, vl); } -vuint8m8_t test_vand_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vuint8m8_t test_vand_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand(vs2, rs1, vl); } -vuint16mf4_t test_vand_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vuint16mf4_t test_vand_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vand(vs2, vs1, vl); } -vuint16mf4_t test_vand_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vuint16mf4_t test_vand_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand(vs2, rs1, vl); } -vuint16mf2_t test_vand_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vuint16mf2_t test_vand_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vand(vs2, vs1, vl); } -vuint16mf2_t test_vand_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vuint16mf2_t test_vand_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand(vs2, rs1, vl); } -vuint16m1_t test_vand_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vuint16m1_t test_vand_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vand(vs2, vs1, vl); } -vuint16m1_t test_vand_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vuint16m1_t test_vand_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand(vs2, rs1, vl); } -vuint16m2_t test_vand_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vuint16m2_t test_vand_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vand(vs2, vs1, vl); } -vuint16m2_t test_vand_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vuint16m2_t test_vand_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand(vs2, rs1, vl); } -vuint16m4_t test_vand_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vuint16m4_t test_vand_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vand(vs2, vs1, vl); } -vuint16m4_t test_vand_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vuint16m4_t test_vand_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand(vs2, rs1, vl); } -vuint16m8_t test_vand_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vuint16m8_t test_vand_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vand(vs2, vs1, vl); } -vuint16m8_t test_vand_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vuint16m8_t test_vand_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand(vs2, rs1, vl); } -vuint32mf2_t test_vand_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vuint32mf2_t test_vand_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vand(vs2, vs1, vl); } -vuint32mf2_t test_vand_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vuint32mf2_t test_vand_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand(vs2, rs1, vl); } -vuint32m1_t test_vand_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vuint32m1_t test_vand_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vand(vs2, vs1, vl); } -vuint32m1_t test_vand_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vuint32m1_t test_vand_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand(vs2, rs1, vl); } -vuint32m2_t test_vand_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vuint32m2_t test_vand_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vand(vs2, vs1, vl); } -vuint32m2_t test_vand_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vuint32m2_t test_vand_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand(vs2, rs1, vl); } -vuint32m4_t test_vand_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vuint32m4_t test_vand_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vand(vs2, vs1, vl); } -vuint32m4_t test_vand_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vuint32m4_t test_vand_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand(vs2, rs1, vl); } -vuint32m8_t test_vand_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vuint32m8_t test_vand_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vand(vs2, vs1, vl); } -vuint32m8_t test_vand_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vuint32m8_t test_vand_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand(vs2, rs1, vl); } -vuint64m1_t test_vand_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vuint64m1_t test_vand_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vand(vs2, vs1, vl); } -vuint64m1_t test_vand_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vuint64m1_t test_vand_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand(vs2, rs1, vl); } -vuint64m2_t test_vand_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vuint64m2_t test_vand_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vand(vs2, vs1, vl); } -vuint64m2_t test_vand_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vuint64m2_t test_vand_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand(vs2, rs1, vl); } -vuint64m4_t test_vand_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vuint64m4_t test_vand_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vand(vs2, vs1, vl); } -vuint64m4_t test_vand_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vuint64m4_t test_vand_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand(vs2, rs1, vl); } -vuint64m8_t test_vand_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vuint64m8_t test_vand_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vand(vs2, vs1, vl); } -vuint64m8_t test_vand_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vand(op1, op2, vl); +vuint64m8_t test_vand_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand(vs2, rs1, vl); } -vint8mf8_t test_vand_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vint8mf8_t test_vand_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vand(vm, vs2, vs1, vl); } -vint8mf8_t test_vand_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vint8mf8_t test_vand_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand(vm, vs2, rs1, vl); } -vint8mf4_t test_vand_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vint8mf4_t test_vand_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vand(vm, vs2, vs1, vl); } -vint8mf4_t test_vand_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vint8mf4_t test_vand_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand(vm, vs2, rs1, vl); } -vint8mf2_t test_vand_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vint8mf2_t test_vand_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vand(vm, vs2, vs1, vl); } -vint8mf2_t test_vand_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vint8mf2_t test_vand_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand(vm, vs2, rs1, vl); } -vint8m1_t test_vand_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vint8m1_t test_vand_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vand(vm, vs2, vs1, vl); } -vint8m1_t test_vand_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vint8m1_t test_vand_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand(vm, vs2, rs1, vl); } -vint8m2_t test_vand_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vint8m2_t test_vand_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vand(vm, vs2, vs1, vl); } -vint8m2_t test_vand_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vint8m2_t test_vand_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand(vm, vs2, rs1, vl); } -vint8m4_t test_vand_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vint8m4_t test_vand_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vand(vm, vs2, vs1, vl); } -vint8m4_t test_vand_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vint8m4_t test_vand_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand(vm, vs2, rs1, vl); } -vint8m8_t test_vand_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vint8m8_t test_vand_vv_i8m8_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vand(vm, vs2, vs1, vl); } -vint8m8_t test_vand_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vint8m8_t test_vand_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand(vm, vs2, rs1, vl); } -vint16mf4_t test_vand_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vint16mf4_t test_vand_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vand(vm, vs2, vs1, vl); } -vint16mf4_t test_vand_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vint16mf4_t test_vand_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand(vm, vs2, rs1, vl); } -vint16mf2_t test_vand_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vint16mf2_t test_vand_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vand(vm, vs2, vs1, vl); } -vint16mf2_t test_vand_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vint16mf2_t test_vand_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand(vm, vs2, rs1, vl); } -vint16m1_t test_vand_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vint16m1_t test_vand_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vand(vm, vs2, vs1, vl); } -vint16m1_t test_vand_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vint16m1_t test_vand_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand(vm, vs2, rs1, vl); } -vint16m2_t test_vand_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vint16m2_t test_vand_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vand(vm, vs2, vs1, vl); } -vint16m2_t test_vand_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vint16m2_t test_vand_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand(vm, vs2, rs1, vl); } -vint16m4_t test_vand_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vint16m4_t test_vand_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vand(vm, vs2, vs1, vl); } -vint16m4_t test_vand_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vint16m4_t test_vand_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand(vm, vs2, rs1, vl); } -vint16m8_t test_vand_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vint16m8_t test_vand_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vand(vm, vs2, vs1, vl); } -vint16m8_t test_vand_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vint16m8_t test_vand_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand(vm, vs2, rs1, vl); } -vint32mf2_t test_vand_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vint32mf2_t test_vand_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vand(vm, vs2, vs1, vl); } -vint32mf2_t test_vand_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vint32mf2_t test_vand_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand(vm, vs2, rs1, vl); } -vint32m1_t test_vand_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vint32m1_t test_vand_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vand(vm, vs2, vs1, vl); } -vint32m1_t test_vand_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vint32m1_t test_vand_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand(vm, vs2, rs1, vl); } -vint32m2_t test_vand_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vint32m2_t test_vand_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vand(vm, vs2, vs1, vl); } -vint32m2_t test_vand_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vint32m2_t test_vand_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand(vm, vs2, rs1, vl); } -vint32m4_t test_vand_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vint32m4_t test_vand_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vand(vm, vs2, vs1, vl); } -vint32m4_t test_vand_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vint32m4_t test_vand_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand(vm, vs2, rs1, vl); } -vint32m8_t test_vand_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vint32m8_t test_vand_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vand(vm, vs2, vs1, vl); } -vint32m8_t test_vand_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vint32m8_t test_vand_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand(vm, vs2, rs1, vl); } -vint64m1_t test_vand_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vint64m1_t test_vand_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vand(vm, vs2, vs1, vl); } -vint64m1_t test_vand_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vint64m1_t test_vand_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand(vm, vs2, rs1, vl); } -vint64m2_t test_vand_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vint64m2_t test_vand_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vand(vm, vs2, vs1, vl); } -vint64m2_t test_vand_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vint64m2_t test_vand_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand(vm, vs2, rs1, vl); } -vint64m4_t test_vand_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vint64m4_t test_vand_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vand(vm, vs2, vs1, vl); } -vint64m4_t test_vand_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vint64m4_t test_vand_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand(vm, vs2, rs1, vl); } -vint64m8_t test_vand_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vint64m8_t test_vand_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vand(vm, vs2, vs1, vl); } -vint64m8_t test_vand_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vint64m8_t test_vand_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand(vm, vs2, rs1, vl); } -vuint8mf8_t test_vand_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vuint8mf8_t test_vand_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vand(vm, vs2, vs1, vl); } -vuint8mf8_t test_vand_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vuint8mf8_t test_vand_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand(vm, vs2, rs1, vl); } -vuint8mf4_t test_vand_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vuint8mf4_t test_vand_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vand(vm, vs2, vs1, vl); } -vuint8mf4_t test_vand_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vuint8mf4_t test_vand_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand(vm, vs2, rs1, vl); } -vuint8mf2_t test_vand_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vuint8mf2_t test_vand_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vand(vm, vs2, vs1, vl); } -vuint8mf2_t test_vand_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vuint8mf2_t test_vand_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand(vm, vs2, rs1, vl); } -vuint8m1_t test_vand_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vuint8m1_t test_vand_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vand(vm, vs2, vs1, vl); } -vuint8m1_t test_vand_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vuint8m1_t test_vand_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand(vm, vs2, rs1, vl); } -vuint8m2_t test_vand_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vuint8m2_t test_vand_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vand(vm, vs2, vs1, vl); } -vuint8m2_t test_vand_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vuint8m2_t test_vand_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand(vm, vs2, rs1, vl); } -vuint8m4_t test_vand_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vuint8m4_t test_vand_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vand(vm, vs2, vs1, vl); } -vuint8m4_t test_vand_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vuint8m4_t test_vand_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand(vm, vs2, rs1, vl); } -vuint8m8_t test_vand_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vuint8m8_t test_vand_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vand(vm, vs2, vs1, vl); } -vuint8m8_t test_vand_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vuint8m8_t test_vand_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand(vm, vs2, rs1, vl); } -vuint16mf4_t test_vand_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vuint16mf4_t test_vand_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vand(vm, vs2, vs1, vl); } -vuint16mf4_t test_vand_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vuint16mf4_t test_vand_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand(vm, vs2, rs1, vl); } -vuint16mf2_t test_vand_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vuint16mf2_t test_vand_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vand(vm, vs2, vs1, vl); } -vuint16mf2_t test_vand_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vuint16mf2_t test_vand_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand(vm, vs2, rs1, vl); } -vuint16m1_t test_vand_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vuint16m1_t test_vand_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vand(vm, vs2, vs1, vl); } -vuint16m1_t test_vand_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vuint16m1_t test_vand_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand(vm, vs2, rs1, vl); } -vuint16m2_t test_vand_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vuint16m2_t test_vand_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vand(vm, vs2, vs1, vl); } -vuint16m2_t test_vand_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vuint16m2_t test_vand_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand(vm, vs2, rs1, vl); } -vuint16m4_t test_vand_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vuint16m4_t test_vand_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vand(vm, vs2, vs1, vl); } -vuint16m4_t test_vand_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vuint16m4_t test_vand_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand(vm, vs2, rs1, vl); } -vuint16m8_t test_vand_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vuint16m8_t test_vand_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vand(vm, vs2, vs1, vl); } -vuint16m8_t test_vand_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vuint16m8_t test_vand_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand(vm, vs2, rs1, vl); } -vuint32mf2_t test_vand_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vuint32mf2_t test_vand_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vand(vm, vs2, vs1, vl); } -vuint32mf2_t test_vand_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vuint32mf2_t test_vand_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand(vm, vs2, rs1, vl); } -vuint32m1_t test_vand_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vuint32m1_t test_vand_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vand(vm, vs2, vs1, vl); } -vuint32m1_t test_vand_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vuint32m1_t test_vand_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand(vm, vs2, rs1, vl); } -vuint32m2_t test_vand_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vuint32m2_t test_vand_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vand(vm, vs2, vs1, vl); } -vuint32m2_t test_vand_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vuint32m2_t test_vand_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand(vm, vs2, rs1, vl); } -vuint32m4_t test_vand_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vuint32m4_t test_vand_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vand(vm, vs2, vs1, vl); } -vuint32m4_t test_vand_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vuint32m4_t test_vand_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand(vm, vs2, rs1, vl); } -vuint32m8_t test_vand_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vuint32m8_t test_vand_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vand(vm, vs2, vs1, vl); } -vuint32m8_t test_vand_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vuint32m8_t test_vand_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand(vm, vs2, rs1, vl); } -vuint64m1_t test_vand_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vuint64m1_t test_vand_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vand(vm, vs2, vs1, vl); } -vuint64m1_t test_vand_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vuint64m1_t test_vand_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand(vm, vs2, rs1, vl); } -vuint64m2_t test_vand_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vuint64m2_t test_vand_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vand(vm, vs2, vs1, vl); } -vuint64m2_t test_vand_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vuint64m2_t test_vand_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand(vm, vs2, rs1, vl); } -vuint64m4_t test_vand_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vuint64m4_t test_vand_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vand(vm, vs2, vs1, vl); } -vuint64m4_t test_vand_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vuint64m4_t test_vand_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand(vm, vs2, rs1, vl); } -vuint64m8_t test_vand_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vuint64m8_t test_vand_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vand(vm, vs2, vs1, vl); } -vuint64m8_t test_vand_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vand(mask, op1, op2, vl); +vuint64m8_t test_vand_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vand\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vasub.c b/auto-generated/gnu-overloaded-tests/vasub.c index f94087ed4..eebb48531 100644 --- a/auto-generated/gnu-overloaded-tests/vasub.c +++ b/auto-generated/gnu-overloaded-tests/vasub.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vasub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vasub(op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vasub_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vasub(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vasub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vasub(op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vasub_vx_i8mf8(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vasub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vasub(op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vasub_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vasub(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vasub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vasub(op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vasub_vx_i8mf4(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vasub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vasub(op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vasub_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vasub(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vasub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vasub(op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vasub_vx_i8mf2(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vasub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vasub(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vasub_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vasub(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vasub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vasub(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vasub_vx_i8m1(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vasub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vasub(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vasub_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vasub(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vasub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vasub(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vasub_vx_i8m2(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vasub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vasub(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vasub_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vasub(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vasub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vasub(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vasub_vx_i8m4(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vasub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vasub(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vasub_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vasub(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vasub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vasub(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vasub_vx_i8m8(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vasub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vasub(op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vasub_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vasub(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vasub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vasub(op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vasub_vx_i16mf4(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vasub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vasub(op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vasub_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vasub(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vasub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vasub(op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vasub_vx_i16mf2(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vasub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vasub(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vasub_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vasub(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vasub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vasub(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vasub_vx_i16m1(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vasub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vasub(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vasub_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vasub(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vasub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vasub(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vasub_vx_i16m2(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vasub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vasub(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vasub_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vasub(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vasub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vasub(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vasub_vx_i16m4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vasub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vasub(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vasub_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vasub(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vasub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vasub(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vasub_vx_i16m8(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vasub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vasub(op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vasub_vv_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vasub(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vasub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vasub(op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vasub_vx_i32mf2(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vasub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vasub(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vasub_vv_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vasub(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vasub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vasub(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vasub_vx_i32m1(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vasub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vasub(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vasub_vv_i32m2(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vasub(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vasub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vasub(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vasub_vx_i32m2(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vasub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vasub(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vasub_vv_i32m4(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vasub(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vasub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vasub(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vasub_vx_i32m4(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vasub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vasub(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vasub_vv_i32m8(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vasub(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vasub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vasub(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vasub_vx_i32m8(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vasub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vasub(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vasub_vv_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vasub(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vasub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vasub(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vasub_vx_i64m1(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vasub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vasub(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vasub_vv_i64m2(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vasub(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vasub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vasub(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vasub_vx_i64m2(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vasub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vasub(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vasub_vv_i64m4(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vasub(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vasub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vasub(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vasub_vx_i64m4(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vasub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vasub(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vasub_vv_i64m8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vasub(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vasub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vasub(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vasub_vx_i64m8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vasub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vasub(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vasub_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vasub(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vasub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vasub(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vasub_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vasub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vasub(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vasub_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vasub(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vasub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vasub(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vasub_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vasub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vasub(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vasub_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vasub(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vasub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vasub(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vasub_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vasub_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vasub(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vasub_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vasub(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vasub_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vasub(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vasub_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vasub_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vasub(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vasub_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vasub(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vasub_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vasub(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vasub_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vasub_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vasub(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vasub_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vasub(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vasub_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vasub(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vasub_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vasub_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vasub(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vasub_vv_i8m8_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vasub(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vasub_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vasub(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vasub_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vasub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vasub(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vasub_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vasub(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vasub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vasub(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vasub_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vasub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vasub(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vasub_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vasub(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vasub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vasub(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vasub_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vasub_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vasub(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vasub_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vasub(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vasub_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vasub(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vasub_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vasub_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vasub(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vasub_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vasub(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vasub_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vasub(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vasub_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vasub_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vasub(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vasub_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vasub(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vasub_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vasub(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vasub_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vasub_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vasub(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vasub_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vasub(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vasub_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vasub(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vasub_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vasub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vasub(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vasub_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vasub(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vasub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vasub(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vasub_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vasub_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vasub(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vasub_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vasub(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vasub_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vasub(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vasub_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vasub_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vasub(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vasub_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vasub(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vasub_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vasub(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vasub_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vasub_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vasub(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vasub_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vasub(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vasub_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vasub(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vasub_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vasub_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vasub(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vasub_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vasub(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vasub_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vasub(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vasub_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vasub_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vasub(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vasub_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vasub(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vasub_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vasub(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vasub_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vasub_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vasub(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vasub_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vasub(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vasub_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vasub(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vasub_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vasub_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vasub(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vasub_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vasub(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vasub_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vasub(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vasub_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vasub_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vasub(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vasub_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vasub(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vasub_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vasub(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vasub_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vasub\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vasubu.c b/auto-generated/gnu-overloaded-tests/vasubu.c index 2cd3974a7..70fff9e6c 100644 --- a/auto-generated/gnu-overloaded-tests/vasubu.c +++ b/auto-generated/gnu-overloaded-tests/vasubu.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vasubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vasubu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vasubu_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vasubu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vasubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vasubu_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vasubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vasubu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vasubu_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vasubu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vasubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vasubu_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vasubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vasubu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vasubu_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vasubu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vasubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vasubu_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vasubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vasubu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vasubu_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vasubu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vasubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vasubu_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vasubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vasubu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vasubu_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vasubu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vasubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vasubu_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vasubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vasubu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vasubu_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vasubu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vasubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vasubu_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vasubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vasubu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vasubu_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vasubu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vasubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vasubu_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vasubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vasubu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vasubu_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vasubu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vasubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vasubu_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vasubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vasubu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vasubu_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vasubu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vasubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vasubu_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vasubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vasubu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vasubu_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vasubu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vasubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vasubu_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vasubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vasubu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vasubu_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vasubu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vasubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vasubu_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vasubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vasubu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vasubu_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vasubu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vasubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vasubu_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vasubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vasubu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vasubu_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vasubu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vasubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vasubu_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vasubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vasubu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vasubu_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vasubu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vasubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vasubu_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vasubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vasubu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vasubu_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vasubu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vasubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vasubu_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vasubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vasubu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vasubu_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vasubu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vasubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vasubu_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vasubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vasubu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vasubu_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vasubu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vasubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vasubu_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vasubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vasubu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vasubu_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vasubu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vasubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vasubu_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vasubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vasubu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vasubu_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vasubu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vasubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vasubu_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vasubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vasubu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vasubu_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vasubu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vasubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vasubu_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vasubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vasubu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vasubu_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vasubu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vasubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vasubu_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vasubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vasubu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vasubu_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vasubu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vasubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu(op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vasubu_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vasubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vasubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vasubu_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vasubu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vasubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vasubu_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vasubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vasubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vasubu_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vasubu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vasubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vasubu_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vasubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vasubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vasubu_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vasubu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vasubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vasubu_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vasubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vasubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vasubu_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vasubu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vasubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vasubu_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vasubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vasubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vasubu_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vasubu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vasubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vasubu_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vasubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vasubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vasubu_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vasubu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vasubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vasubu_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vasubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vasubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vasubu_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vasubu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vasubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vasubu_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vasubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vasubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vasubu_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vasubu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vasubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vasubu_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vasubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vasubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vasubu_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vasubu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vasubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vasubu_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vasubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vasubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vasubu_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vasubu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vasubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vasubu_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vasubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vasubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vasubu_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vasubu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vasubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vasubu_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vasubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vasubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vasubu_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vasubu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vasubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vasubu_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vasubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vasubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vasubu_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vasubu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vasubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vasubu_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vasubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vasubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vasubu_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vasubu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vasubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vasubu_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vasubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vasubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vasubu_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vasubu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vasubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vasubu_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vasubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vasubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vasubu_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vasubu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vasubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vasubu_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vasubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vasubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vasubu_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vasubu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vasubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vasubu_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vasubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vasubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vasubu_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vasubu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vasubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vasubu_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vasubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vasubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vasubu_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vasubu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vasubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vasubu_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vasubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vasubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vasubu_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vasubu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vasubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vasubu_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vasubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vasubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vasubu_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vasubu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vasubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vasubu_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vasubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vasubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vasubu_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vasubu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vasubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vasubu_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vasubu\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vcompress.c b/auto-generated/gnu-overloaded-tests/vcompress.c index 31ca952ac..aa65b32f6 100644 --- a/auto-generated/gnu-overloaded-tests/vcompress.c +++ b/auto-generated/gnu-overloaded-tests/vcompress.c @@ -6,240 +6,240 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vcompress_vm_f16mf4(vfloat16mf4_t src, vbool64_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vfloat16mf4_t test_vcompress_vm_f16mf4(vfloat16mf4_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vfloat16mf2_t test_vcompress_vm_f16mf2(vfloat16mf2_t src, vbool32_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vfloat16mf2_t test_vcompress_vm_f16mf2(vfloat16mf2_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vfloat16m1_t test_vcompress_vm_f16m1(vfloat16m1_t src, vbool16_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vfloat16m1_t test_vcompress_vm_f16m1(vfloat16m1_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vfloat16m2_t test_vcompress_vm_f16m2(vfloat16m2_t src, vbool8_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vfloat16m2_t test_vcompress_vm_f16m2(vfloat16m2_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vfloat16m4_t test_vcompress_vm_f16m4(vfloat16m4_t src, vbool4_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vfloat16m4_t test_vcompress_vm_f16m4(vfloat16m4_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vfloat16m8_t test_vcompress_vm_f16m8(vfloat16m8_t src, vbool2_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vfloat16m8_t test_vcompress_vm_f16m8(vfloat16m8_t vs2, vbool2_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vfloat32mf2_t test_vcompress_vm_f32mf2(vfloat32mf2_t src, vbool64_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vfloat32mf2_t test_vcompress_vm_f32mf2(vfloat32mf2_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vfloat32m1_t test_vcompress_vm_f32m1(vfloat32m1_t src, vbool32_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vfloat32m1_t test_vcompress_vm_f32m1(vfloat32m1_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vfloat32m2_t test_vcompress_vm_f32m2(vfloat32m2_t src, vbool16_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vfloat32m2_t test_vcompress_vm_f32m2(vfloat32m2_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vfloat32m4_t test_vcompress_vm_f32m4(vfloat32m4_t src, vbool8_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vfloat32m4_t test_vcompress_vm_f32m4(vfloat32m4_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vfloat32m8_t test_vcompress_vm_f32m8(vfloat32m8_t src, vbool4_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vfloat32m8_t test_vcompress_vm_f32m8(vfloat32m8_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vfloat64m1_t test_vcompress_vm_f64m1(vfloat64m1_t src, vbool64_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vfloat64m1_t test_vcompress_vm_f64m1(vfloat64m1_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vfloat64m2_t test_vcompress_vm_f64m2(vfloat64m2_t src, vbool32_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vfloat64m2_t test_vcompress_vm_f64m2(vfloat64m2_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vfloat64m4_t test_vcompress_vm_f64m4(vfloat64m4_t src, vbool16_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vfloat64m4_t test_vcompress_vm_f64m4(vfloat64m4_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vfloat64m8_t test_vcompress_vm_f64m8(vfloat64m8_t src, vbool8_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vfloat64m8_t test_vcompress_vm_f64m8(vfloat64m8_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vint8mf8_t test_vcompress_vm_i8mf8(vint8mf8_t src, vbool64_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vint8mf8_t test_vcompress_vm_i8mf8(vint8mf8_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vint8mf4_t test_vcompress_vm_i8mf4(vint8mf4_t src, vbool32_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vint8mf4_t test_vcompress_vm_i8mf4(vint8mf4_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vint8mf2_t test_vcompress_vm_i8mf2(vint8mf2_t src, vbool16_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vint8mf2_t test_vcompress_vm_i8mf2(vint8mf2_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vint8m1_t test_vcompress_vm_i8m1(vint8m1_t src, vbool8_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vint8m1_t test_vcompress_vm_i8m1(vint8m1_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vint8m2_t test_vcompress_vm_i8m2(vint8m2_t src, vbool4_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vint8m2_t test_vcompress_vm_i8m2(vint8m2_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vint8m4_t test_vcompress_vm_i8m4(vint8m4_t src, vbool2_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vint8m4_t test_vcompress_vm_i8m4(vint8m4_t vs2, vbool2_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vint8m8_t test_vcompress_vm_i8m8(vint8m8_t src, vbool1_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vint8m8_t test_vcompress_vm_i8m8(vint8m8_t vs2, vbool1_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vint16mf4_t test_vcompress_vm_i16mf4(vint16mf4_t src, vbool64_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vint16mf4_t test_vcompress_vm_i16mf4(vint16mf4_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vint16mf2_t test_vcompress_vm_i16mf2(vint16mf2_t src, vbool32_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vint16mf2_t test_vcompress_vm_i16mf2(vint16mf2_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vint16m1_t test_vcompress_vm_i16m1(vint16m1_t src, vbool16_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vint16m1_t test_vcompress_vm_i16m1(vint16m1_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vint16m2_t test_vcompress_vm_i16m2(vint16m2_t src, vbool8_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vint16m2_t test_vcompress_vm_i16m2(vint16m2_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vint16m4_t test_vcompress_vm_i16m4(vint16m4_t src, vbool4_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vint16m4_t test_vcompress_vm_i16m4(vint16m4_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vint16m8_t test_vcompress_vm_i16m8(vint16m8_t src, vbool2_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vint16m8_t test_vcompress_vm_i16m8(vint16m8_t vs2, vbool2_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vint32mf2_t test_vcompress_vm_i32mf2(vint32mf2_t src, vbool64_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vint32mf2_t test_vcompress_vm_i32mf2(vint32mf2_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vint32m1_t test_vcompress_vm_i32m1(vint32m1_t src, vbool32_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vint32m1_t test_vcompress_vm_i32m1(vint32m1_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vint32m2_t test_vcompress_vm_i32m2(vint32m2_t src, vbool16_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vint32m2_t test_vcompress_vm_i32m2(vint32m2_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vint32m4_t test_vcompress_vm_i32m4(vint32m4_t src, vbool8_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vint32m4_t test_vcompress_vm_i32m4(vint32m4_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vint32m8_t test_vcompress_vm_i32m8(vint32m8_t src, vbool4_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vint32m8_t test_vcompress_vm_i32m8(vint32m8_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vint64m1_t test_vcompress_vm_i64m1(vint64m1_t src, vbool64_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vint64m1_t test_vcompress_vm_i64m1(vint64m1_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vint64m2_t test_vcompress_vm_i64m2(vint64m2_t src, vbool32_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vint64m2_t test_vcompress_vm_i64m2(vint64m2_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vint64m4_t test_vcompress_vm_i64m4(vint64m4_t src, vbool16_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vint64m4_t test_vcompress_vm_i64m4(vint64m4_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vint64m8_t test_vcompress_vm_i64m8(vint64m8_t src, vbool8_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vint64m8_t test_vcompress_vm_i64m8(vint64m8_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vuint8mf8_t test_vcompress_vm_u8mf8(vuint8mf8_t src, vbool64_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vuint8mf8_t test_vcompress_vm_u8mf8(vuint8mf8_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vuint8mf4_t test_vcompress_vm_u8mf4(vuint8mf4_t src, vbool32_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vuint8mf4_t test_vcompress_vm_u8mf4(vuint8mf4_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vuint8mf2_t test_vcompress_vm_u8mf2(vuint8mf2_t src, vbool16_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vuint8mf2_t test_vcompress_vm_u8mf2(vuint8mf2_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vuint8m1_t test_vcompress_vm_u8m1(vuint8m1_t src, vbool8_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vuint8m1_t test_vcompress_vm_u8m1(vuint8m1_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vuint8m2_t test_vcompress_vm_u8m2(vuint8m2_t src, vbool4_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vuint8m2_t test_vcompress_vm_u8m2(vuint8m2_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vuint8m4_t test_vcompress_vm_u8m4(vuint8m4_t src, vbool2_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vuint8m4_t test_vcompress_vm_u8m4(vuint8m4_t vs2, vbool2_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vuint8m8_t test_vcompress_vm_u8m8(vuint8m8_t src, vbool1_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vuint8m8_t test_vcompress_vm_u8m8(vuint8m8_t vs2, vbool1_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vuint16mf4_t test_vcompress_vm_u16mf4(vuint16mf4_t src, vbool64_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vuint16mf4_t test_vcompress_vm_u16mf4(vuint16mf4_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vuint16mf2_t test_vcompress_vm_u16mf2(vuint16mf2_t src, vbool32_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vuint16mf2_t test_vcompress_vm_u16mf2(vuint16mf2_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vuint16m1_t test_vcompress_vm_u16m1(vuint16m1_t src, vbool16_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vuint16m1_t test_vcompress_vm_u16m1(vuint16m1_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vuint16m2_t test_vcompress_vm_u16m2(vuint16m2_t src, vbool8_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vuint16m2_t test_vcompress_vm_u16m2(vuint16m2_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vuint16m4_t test_vcompress_vm_u16m4(vuint16m4_t src, vbool4_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vuint16m4_t test_vcompress_vm_u16m4(vuint16m4_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vuint16m8_t test_vcompress_vm_u16m8(vuint16m8_t src, vbool2_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vuint16m8_t test_vcompress_vm_u16m8(vuint16m8_t vs2, vbool2_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vuint32mf2_t test_vcompress_vm_u32mf2(vuint32mf2_t src, vbool64_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vuint32mf2_t test_vcompress_vm_u32mf2(vuint32mf2_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vuint32m1_t test_vcompress_vm_u32m1(vuint32m1_t src, vbool32_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vuint32m1_t test_vcompress_vm_u32m1(vuint32m1_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vuint32m2_t test_vcompress_vm_u32m2(vuint32m2_t src, vbool16_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vuint32m2_t test_vcompress_vm_u32m2(vuint32m2_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vuint32m4_t test_vcompress_vm_u32m4(vuint32m4_t src, vbool8_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vuint32m4_t test_vcompress_vm_u32m4(vuint32m4_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vuint32m8_t test_vcompress_vm_u32m8(vuint32m8_t src, vbool4_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vuint32m8_t test_vcompress_vm_u32m8(vuint32m8_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vuint64m1_t test_vcompress_vm_u64m1(vuint64m1_t src, vbool64_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vuint64m1_t test_vcompress_vm_u64m1(vuint64m1_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vuint64m2_t test_vcompress_vm_u64m2(vuint64m2_t src, vbool32_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vuint64m2_t test_vcompress_vm_u64m2(vuint64m2_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vuint64m4_t test_vcompress_vm_u64m4(vuint64m4_t src, vbool16_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vuint64m4_t test_vcompress_vm_u64m4(vuint64m4_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } -vuint64m8_t test_vcompress_vm_u64m8(vuint64m8_t src, vbool8_t mask, size_t vl) { - return __riscv_vcompress(src, mask, vl); +vuint64m8_t test_vcompress_vm_u64m8(vuint64m8_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vcompress(vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vcompress\.[ivxfswum.]+\s+} 59 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vcpop.c b/auto-generated/gnu-overloaded-tests/vcpop.c index d99768d65..19406d4e9 100644 --- a/auto-generated/gnu-overloaded-tests/vcpop.c +++ b/auto-generated/gnu-overloaded-tests/vcpop.c @@ -6,60 +6,60 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -unsigned long test_vcpop_m_b1(vbool1_t op1, size_t vl) { - return __riscv_vcpop(op1, vl); +unsigned long test_vcpop_m_b1(vbool1_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); } -unsigned long test_vcpop_m_b2(vbool2_t op1, size_t vl) { - return __riscv_vcpop(op1, vl); +unsigned long test_vcpop_m_b2(vbool2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); } -unsigned long test_vcpop_m_b4(vbool4_t op1, size_t vl) { - return __riscv_vcpop(op1, vl); +unsigned long test_vcpop_m_b4(vbool4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); } -unsigned long test_vcpop_m_b8(vbool8_t op1, size_t vl) { - return __riscv_vcpop(op1, vl); +unsigned long test_vcpop_m_b8(vbool8_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); } -unsigned long test_vcpop_m_b16(vbool16_t op1, size_t vl) { - return __riscv_vcpop(op1, vl); +unsigned long test_vcpop_m_b16(vbool16_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); } -unsigned long test_vcpop_m_b32(vbool32_t op1, size_t vl) { - return __riscv_vcpop(op1, vl); +unsigned long test_vcpop_m_b32(vbool32_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); } -unsigned long test_vcpop_m_b64(vbool64_t op1, size_t vl) { - return __riscv_vcpop(op1, vl); +unsigned long test_vcpop_m_b64(vbool64_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); } -unsigned long test_vcpop_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) { - return __riscv_vcpop(mask, op1, vl); +unsigned long test_vcpop_m_b1_m(vbool1_t vm, vbool1_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); } -unsigned long test_vcpop_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) { - return __riscv_vcpop(mask, op1, vl); +unsigned long test_vcpop_m_b2_m(vbool2_t vm, vbool2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); } -unsigned long test_vcpop_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) { - return __riscv_vcpop(mask, op1, vl); +unsigned long test_vcpop_m_b4_m(vbool4_t vm, vbool4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); } -unsigned long test_vcpop_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) { - return __riscv_vcpop(mask, op1, vl); +unsigned long test_vcpop_m_b8_m(vbool8_t vm, vbool8_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); } -unsigned long test_vcpop_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) { - return __riscv_vcpop(mask, op1, vl); +unsigned long test_vcpop_m_b16_m(vbool16_t vm, vbool16_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); } -unsigned long test_vcpop_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) { - return __riscv_vcpop(mask, op1, vl); +unsigned long test_vcpop_m_b32_m(vbool32_t vm, vbool32_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); } -unsigned long test_vcpop_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) { - return __riscv_vcpop(mask, op1, vl); +unsigned long test_vcpop_m_b64_m(vbool64_t vm, vbool64_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vcpop\.[ivxfswum.]+\s+} 14 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vdiv.c b/auto-generated/gnu-overloaded-tests/vdiv.c index 09e846a06..17595da46 100644 --- a/auto-generated/gnu-overloaded-tests/vdiv.c +++ b/auto-generated/gnu-overloaded-tests/vdiv.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vdiv_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vdiv(op1, op2, vl); +vint8mf8_t test_vdiv_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vdiv(vs2, vs1, vl); } -vint8mf8_t test_vdiv_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv(op1, op2, vl); +vint8mf8_t test_vdiv_vx_i8mf8(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv(vs2, rs1, vl); } -vint8mf4_t test_vdiv_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vdiv(op1, op2, vl); +vint8mf4_t test_vdiv_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vdiv(vs2, vs1, vl); } -vint8mf4_t test_vdiv_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv(op1, op2, vl); +vint8mf4_t test_vdiv_vx_i8mf4(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv(vs2, rs1, vl); } -vint8mf2_t test_vdiv_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vdiv(op1, op2, vl); +vint8mf2_t test_vdiv_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vdiv(vs2, vs1, vl); } -vint8mf2_t test_vdiv_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv(op1, op2, vl); +vint8mf2_t test_vdiv_vx_i8mf2(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv(vs2, rs1, vl); } -vint8m1_t test_vdiv_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vdiv(op1, op2, vl); +vint8m1_t test_vdiv_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vdiv(vs2, vs1, vl); } -vint8m1_t test_vdiv_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv(op1, op2, vl); +vint8m1_t test_vdiv_vx_i8m1(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv(vs2, rs1, vl); } -vint8m2_t test_vdiv_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vdiv(op1, op2, vl); +vint8m2_t test_vdiv_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vdiv(vs2, vs1, vl); } -vint8m2_t test_vdiv_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv(op1, op2, vl); +vint8m2_t test_vdiv_vx_i8m2(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv(vs2, rs1, vl); } -vint8m4_t test_vdiv_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vdiv(op1, op2, vl); +vint8m4_t test_vdiv_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vdiv(vs2, vs1, vl); } -vint8m4_t test_vdiv_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv(op1, op2, vl); +vint8m4_t test_vdiv_vx_i8m4(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv(vs2, rs1, vl); } -vint8m8_t test_vdiv_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vdiv(op1, op2, vl); +vint8m8_t test_vdiv_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vdiv(vs2, vs1, vl); } -vint8m8_t test_vdiv_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv(op1, op2, vl); +vint8m8_t test_vdiv_vx_i8m8(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv(vs2, rs1, vl); } -vint16mf4_t test_vdiv_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vdiv(op1, op2, vl); +vint16mf4_t test_vdiv_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vdiv(vs2, vs1, vl); } -vint16mf4_t test_vdiv_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv(op1, op2, vl); +vint16mf4_t test_vdiv_vx_i16mf4(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv(vs2, rs1, vl); } -vint16mf2_t test_vdiv_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vdiv(op1, op2, vl); +vint16mf2_t test_vdiv_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vdiv(vs2, vs1, vl); } -vint16mf2_t test_vdiv_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv(op1, op2, vl); +vint16mf2_t test_vdiv_vx_i16mf2(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv(vs2, rs1, vl); } -vint16m1_t test_vdiv_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vdiv(op1, op2, vl); +vint16m1_t test_vdiv_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vdiv(vs2, vs1, vl); } -vint16m1_t test_vdiv_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv(op1, op2, vl); +vint16m1_t test_vdiv_vx_i16m1(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv(vs2, rs1, vl); } -vint16m2_t test_vdiv_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vdiv(op1, op2, vl); +vint16m2_t test_vdiv_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vdiv(vs2, vs1, vl); } -vint16m2_t test_vdiv_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv(op1, op2, vl); +vint16m2_t test_vdiv_vx_i16m2(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv(vs2, rs1, vl); } -vint16m4_t test_vdiv_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vdiv(op1, op2, vl); +vint16m4_t test_vdiv_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vdiv(vs2, vs1, vl); } -vint16m4_t test_vdiv_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv(op1, op2, vl); +vint16m4_t test_vdiv_vx_i16m4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv(vs2, rs1, vl); } -vint16m8_t test_vdiv_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vdiv(op1, op2, vl); +vint16m8_t test_vdiv_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vdiv(vs2, vs1, vl); } -vint16m8_t test_vdiv_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv(op1, op2, vl); +vint16m8_t test_vdiv_vx_i16m8(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv(vs2, rs1, vl); } -vint32mf2_t test_vdiv_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vdiv(op1, op2, vl); +vint32mf2_t test_vdiv_vv_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vdiv(vs2, vs1, vl); } -vint32mf2_t test_vdiv_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv(op1, op2, vl); +vint32mf2_t test_vdiv_vx_i32mf2(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv(vs2, rs1, vl); } -vint32m1_t test_vdiv_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vdiv(op1, op2, vl); +vint32m1_t test_vdiv_vv_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vdiv(vs2, vs1, vl); } -vint32m1_t test_vdiv_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv(op1, op2, vl); +vint32m1_t test_vdiv_vx_i32m1(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv(vs2, rs1, vl); } -vint32m2_t test_vdiv_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vdiv(op1, op2, vl); +vint32m2_t test_vdiv_vv_i32m2(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vdiv(vs2, vs1, vl); } -vint32m2_t test_vdiv_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv(op1, op2, vl); +vint32m2_t test_vdiv_vx_i32m2(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv(vs2, rs1, vl); } -vint32m4_t test_vdiv_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vdiv(op1, op2, vl); +vint32m4_t test_vdiv_vv_i32m4(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vdiv(vs2, vs1, vl); } -vint32m4_t test_vdiv_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv(op1, op2, vl); +vint32m4_t test_vdiv_vx_i32m4(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv(vs2, rs1, vl); } -vint32m8_t test_vdiv_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vdiv(op1, op2, vl); +vint32m8_t test_vdiv_vv_i32m8(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vdiv(vs2, vs1, vl); } -vint32m8_t test_vdiv_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv(op1, op2, vl); +vint32m8_t test_vdiv_vx_i32m8(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv(vs2, rs1, vl); } -vint64m1_t test_vdiv_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vdiv(op1, op2, vl); +vint64m1_t test_vdiv_vv_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vdiv(vs2, vs1, vl); } -vint64m1_t test_vdiv_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv(op1, op2, vl); +vint64m1_t test_vdiv_vx_i64m1(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv(vs2, rs1, vl); } -vint64m2_t test_vdiv_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vdiv(op1, op2, vl); +vint64m2_t test_vdiv_vv_i64m2(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vdiv(vs2, vs1, vl); } -vint64m2_t test_vdiv_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv(op1, op2, vl); +vint64m2_t test_vdiv_vx_i64m2(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv(vs2, rs1, vl); } -vint64m4_t test_vdiv_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vdiv(op1, op2, vl); +vint64m4_t test_vdiv_vv_i64m4(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vdiv(vs2, vs1, vl); } -vint64m4_t test_vdiv_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv(op1, op2, vl); +vint64m4_t test_vdiv_vx_i64m4(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv(vs2, rs1, vl); } -vint64m8_t test_vdiv_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vdiv(op1, op2, vl); +vint64m8_t test_vdiv_vv_i64m8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vdiv(vs2, vs1, vl); } -vint64m8_t test_vdiv_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv(op1, op2, vl); +vint64m8_t test_vdiv_vx_i64m8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv(vs2, rs1, vl); } -vint8mf8_t test_vdiv_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vdiv(mask, op1, op2, vl); +vint8mf8_t test_vdiv_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vdiv(vm, vs2, vs1, vl); } -vint8mf8_t test_vdiv_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv(mask, op1, op2, vl); +vint8mf8_t test_vdiv_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv(vm, vs2, rs1, vl); } -vint8mf4_t test_vdiv_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vdiv(mask, op1, op2, vl); +vint8mf4_t test_vdiv_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vdiv(vm, vs2, vs1, vl); } -vint8mf4_t test_vdiv_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv(mask, op1, op2, vl); +vint8mf4_t test_vdiv_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv(vm, vs2, rs1, vl); } -vint8mf2_t test_vdiv_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vdiv(mask, op1, op2, vl); +vint8mf2_t test_vdiv_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vdiv(vm, vs2, vs1, vl); } -vint8mf2_t test_vdiv_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv(mask, op1, op2, vl); +vint8mf2_t test_vdiv_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv(vm, vs2, rs1, vl); } -vint8m1_t test_vdiv_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vdiv(mask, op1, op2, vl); +vint8m1_t test_vdiv_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vdiv(vm, vs2, vs1, vl); } -vint8m1_t test_vdiv_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv(mask, op1, op2, vl); +vint8m1_t test_vdiv_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv(vm, vs2, rs1, vl); } -vint8m2_t test_vdiv_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vdiv(mask, op1, op2, vl); +vint8m2_t test_vdiv_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vdiv(vm, vs2, vs1, vl); } -vint8m2_t test_vdiv_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv(mask, op1, op2, vl); +vint8m2_t test_vdiv_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv(vm, vs2, rs1, vl); } -vint8m4_t test_vdiv_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vdiv(mask, op1, op2, vl); +vint8m4_t test_vdiv_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vdiv(vm, vs2, vs1, vl); } -vint8m4_t test_vdiv_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv(mask, op1, op2, vl); +vint8m4_t test_vdiv_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv(vm, vs2, rs1, vl); } -vint8m8_t test_vdiv_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vdiv(mask, op1, op2, vl); +vint8m8_t test_vdiv_vv_i8m8_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vdiv(vm, vs2, vs1, vl); } -vint8m8_t test_vdiv_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv(mask, op1, op2, vl); +vint8m8_t test_vdiv_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv(vm, vs2, rs1, vl); } -vint16mf4_t test_vdiv_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vdiv(mask, op1, op2, vl); +vint16mf4_t test_vdiv_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vdiv(vm, vs2, vs1, vl); } -vint16mf4_t test_vdiv_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv(mask, op1, op2, vl); +vint16mf4_t test_vdiv_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv(vm, vs2, rs1, vl); } -vint16mf2_t test_vdiv_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vdiv(mask, op1, op2, vl); +vint16mf2_t test_vdiv_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vdiv(vm, vs2, vs1, vl); } -vint16mf2_t test_vdiv_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv(mask, op1, op2, vl); +vint16mf2_t test_vdiv_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv(vm, vs2, rs1, vl); } -vint16m1_t test_vdiv_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vdiv(mask, op1, op2, vl); +vint16m1_t test_vdiv_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vdiv(vm, vs2, vs1, vl); } -vint16m1_t test_vdiv_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv(mask, op1, op2, vl); +vint16m1_t test_vdiv_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv(vm, vs2, rs1, vl); } -vint16m2_t test_vdiv_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vdiv(mask, op1, op2, vl); +vint16m2_t test_vdiv_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vdiv(vm, vs2, vs1, vl); } -vint16m2_t test_vdiv_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv(mask, op1, op2, vl); +vint16m2_t test_vdiv_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv(vm, vs2, rs1, vl); } -vint16m4_t test_vdiv_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vdiv(mask, op1, op2, vl); +vint16m4_t test_vdiv_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vdiv(vm, vs2, vs1, vl); } -vint16m4_t test_vdiv_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv(mask, op1, op2, vl); +vint16m4_t test_vdiv_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv(vm, vs2, rs1, vl); } -vint16m8_t test_vdiv_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vdiv(mask, op1, op2, vl); +vint16m8_t test_vdiv_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vdiv(vm, vs2, vs1, vl); } -vint16m8_t test_vdiv_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv(mask, op1, op2, vl); +vint16m8_t test_vdiv_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv(vm, vs2, rs1, vl); } -vint32mf2_t test_vdiv_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vdiv(mask, op1, op2, vl); +vint32mf2_t test_vdiv_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vdiv(vm, vs2, vs1, vl); } -vint32mf2_t test_vdiv_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv(mask, op1, op2, vl); +vint32mf2_t test_vdiv_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv(vm, vs2, rs1, vl); } -vint32m1_t test_vdiv_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vdiv(mask, op1, op2, vl); +vint32m1_t test_vdiv_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vdiv(vm, vs2, vs1, vl); } -vint32m1_t test_vdiv_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv(mask, op1, op2, vl); +vint32m1_t test_vdiv_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv(vm, vs2, rs1, vl); } -vint32m2_t test_vdiv_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vdiv(mask, op1, op2, vl); +vint32m2_t test_vdiv_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vdiv(vm, vs2, vs1, vl); } -vint32m2_t test_vdiv_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv(mask, op1, op2, vl); +vint32m2_t test_vdiv_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv(vm, vs2, rs1, vl); } -vint32m4_t test_vdiv_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vdiv(mask, op1, op2, vl); +vint32m4_t test_vdiv_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vdiv(vm, vs2, vs1, vl); } -vint32m4_t test_vdiv_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv(mask, op1, op2, vl); +vint32m4_t test_vdiv_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv(vm, vs2, rs1, vl); } -vint32m8_t test_vdiv_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vdiv(mask, op1, op2, vl); +vint32m8_t test_vdiv_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vdiv(vm, vs2, vs1, vl); } -vint32m8_t test_vdiv_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv(mask, op1, op2, vl); +vint32m8_t test_vdiv_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv(vm, vs2, rs1, vl); } -vint64m1_t test_vdiv_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vdiv(mask, op1, op2, vl); +vint64m1_t test_vdiv_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vdiv(vm, vs2, vs1, vl); } -vint64m1_t test_vdiv_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv(mask, op1, op2, vl); +vint64m1_t test_vdiv_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv(vm, vs2, rs1, vl); } -vint64m2_t test_vdiv_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vdiv(mask, op1, op2, vl); +vint64m2_t test_vdiv_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vdiv(vm, vs2, vs1, vl); } -vint64m2_t test_vdiv_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv(mask, op1, op2, vl); +vint64m2_t test_vdiv_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv(vm, vs2, rs1, vl); } -vint64m4_t test_vdiv_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vdiv(mask, op1, op2, vl); +vint64m4_t test_vdiv_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vdiv(vm, vs2, vs1, vl); } -vint64m4_t test_vdiv_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv(mask, op1, op2, vl); +vint64m4_t test_vdiv_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv(vm, vs2, rs1, vl); } -vint64m8_t test_vdiv_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vdiv(mask, op1, op2, vl); +vint64m8_t test_vdiv_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vdiv(vm, vs2, vs1, vl); } -vint64m8_t test_vdiv_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv(mask, op1, op2, vl); +vint64m8_t test_vdiv_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vdiv\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vdivu.c b/auto-generated/gnu-overloaded-tests/vdivu.c index 894af36b4..71dbcbf65 100644 --- a/auto-generated/gnu-overloaded-tests/vdivu.c +++ b/auto-generated/gnu-overloaded-tests/vdivu.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vdivu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vdivu(op1, op2, vl); +vuint8mf8_t test_vdivu_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vdivu(vs2, vs1, vl); } -vuint8mf8_t test_vdivu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu(op1, op2, vl); +vuint8mf8_t test_vdivu_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu(vs2, rs1, vl); } -vuint8mf4_t test_vdivu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vdivu(op1, op2, vl); +vuint8mf4_t test_vdivu_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vdivu(vs2, vs1, vl); } -vuint8mf4_t test_vdivu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu(op1, op2, vl); +vuint8mf4_t test_vdivu_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu(vs2, rs1, vl); } -vuint8mf2_t test_vdivu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vdivu(op1, op2, vl); +vuint8mf2_t test_vdivu_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vdivu(vs2, vs1, vl); } -vuint8mf2_t test_vdivu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu(op1, op2, vl); +vuint8mf2_t test_vdivu_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu(vs2, rs1, vl); } -vuint8m1_t test_vdivu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vdivu(op1, op2, vl); +vuint8m1_t test_vdivu_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vdivu(vs2, vs1, vl); } -vuint8m1_t test_vdivu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu(op1, op2, vl); +vuint8m1_t test_vdivu_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu(vs2, rs1, vl); } -vuint8m2_t test_vdivu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vdivu(op1, op2, vl); +vuint8m2_t test_vdivu_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vdivu(vs2, vs1, vl); } -vuint8m2_t test_vdivu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu(op1, op2, vl); +vuint8m2_t test_vdivu_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu(vs2, rs1, vl); } -vuint8m4_t test_vdivu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vdivu(op1, op2, vl); +vuint8m4_t test_vdivu_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vdivu(vs2, vs1, vl); } -vuint8m4_t test_vdivu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu(op1, op2, vl); +vuint8m4_t test_vdivu_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu(vs2, rs1, vl); } -vuint8m8_t test_vdivu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vdivu(op1, op2, vl); +vuint8m8_t test_vdivu_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vdivu(vs2, vs1, vl); } -vuint8m8_t test_vdivu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu(op1, op2, vl); +vuint8m8_t test_vdivu_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu(vs2, rs1, vl); } -vuint16mf4_t test_vdivu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vdivu(op1, op2, vl); +vuint16mf4_t test_vdivu_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vdivu(vs2, vs1, vl); } -vuint16mf4_t test_vdivu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu(op1, op2, vl); +vuint16mf4_t test_vdivu_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu(vs2, rs1, vl); } -vuint16mf2_t test_vdivu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vdivu(op1, op2, vl); +vuint16mf2_t test_vdivu_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vdivu(vs2, vs1, vl); } -vuint16mf2_t test_vdivu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu(op1, op2, vl); +vuint16mf2_t test_vdivu_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu(vs2, rs1, vl); } -vuint16m1_t test_vdivu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vdivu(op1, op2, vl); +vuint16m1_t test_vdivu_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vdivu(vs2, vs1, vl); } -vuint16m1_t test_vdivu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu(op1, op2, vl); +vuint16m1_t test_vdivu_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu(vs2, rs1, vl); } -vuint16m2_t test_vdivu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vdivu(op1, op2, vl); +vuint16m2_t test_vdivu_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vdivu(vs2, vs1, vl); } -vuint16m2_t test_vdivu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu(op1, op2, vl); +vuint16m2_t test_vdivu_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu(vs2, rs1, vl); } -vuint16m4_t test_vdivu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vdivu(op1, op2, vl); +vuint16m4_t test_vdivu_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vdivu(vs2, vs1, vl); } -vuint16m4_t test_vdivu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu(op1, op2, vl); +vuint16m4_t test_vdivu_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu(vs2, rs1, vl); } -vuint16m8_t test_vdivu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vdivu(op1, op2, vl); +vuint16m8_t test_vdivu_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vdivu(vs2, vs1, vl); } -vuint16m8_t test_vdivu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu(op1, op2, vl); +vuint16m8_t test_vdivu_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu(vs2, rs1, vl); } -vuint32mf2_t test_vdivu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vdivu(op1, op2, vl); +vuint32mf2_t test_vdivu_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vdivu(vs2, vs1, vl); } -vuint32mf2_t test_vdivu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu(op1, op2, vl); +vuint32mf2_t test_vdivu_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu(vs2, rs1, vl); } -vuint32m1_t test_vdivu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vdivu(op1, op2, vl); +vuint32m1_t test_vdivu_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vdivu(vs2, vs1, vl); } -vuint32m1_t test_vdivu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu(op1, op2, vl); +vuint32m1_t test_vdivu_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu(vs2, rs1, vl); } -vuint32m2_t test_vdivu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vdivu(op1, op2, vl); +vuint32m2_t test_vdivu_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vdivu(vs2, vs1, vl); } -vuint32m2_t test_vdivu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu(op1, op2, vl); +vuint32m2_t test_vdivu_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu(vs2, rs1, vl); } -vuint32m4_t test_vdivu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vdivu(op1, op2, vl); +vuint32m4_t test_vdivu_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vdivu(vs2, vs1, vl); } -vuint32m4_t test_vdivu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu(op1, op2, vl); +vuint32m4_t test_vdivu_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu(vs2, rs1, vl); } -vuint32m8_t test_vdivu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vdivu(op1, op2, vl); +vuint32m8_t test_vdivu_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vdivu(vs2, vs1, vl); } -vuint32m8_t test_vdivu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu(op1, op2, vl); +vuint32m8_t test_vdivu_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu(vs2, rs1, vl); } -vuint64m1_t test_vdivu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vdivu(op1, op2, vl); +vuint64m1_t test_vdivu_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vdivu(vs2, vs1, vl); } -vuint64m1_t test_vdivu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu(op1, op2, vl); +vuint64m1_t test_vdivu_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu(vs2, rs1, vl); } -vuint64m2_t test_vdivu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vdivu(op1, op2, vl); +vuint64m2_t test_vdivu_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vdivu(vs2, vs1, vl); } -vuint64m2_t test_vdivu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu(op1, op2, vl); +vuint64m2_t test_vdivu_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu(vs2, rs1, vl); } -vuint64m4_t test_vdivu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vdivu(op1, op2, vl); +vuint64m4_t test_vdivu_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vdivu(vs2, vs1, vl); } -vuint64m4_t test_vdivu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu(op1, op2, vl); +vuint64m4_t test_vdivu_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu(vs2, rs1, vl); } -vuint64m8_t test_vdivu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vdivu(op1, op2, vl); +vuint64m8_t test_vdivu_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vdivu(vs2, vs1, vl); } -vuint64m8_t test_vdivu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu(op1, op2, vl); +vuint64m8_t test_vdivu_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu(vs2, rs1, vl); } -vuint8mf8_t test_vdivu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vdivu(mask, op1, op2, vl); +vuint8mf8_t test_vdivu_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vdivu(vm, vs2, vs1, vl); } -vuint8mf8_t test_vdivu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu(mask, op1, op2, vl); +vuint8mf8_t test_vdivu_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu(vm, vs2, rs1, vl); } -vuint8mf4_t test_vdivu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vdivu(mask, op1, op2, vl); +vuint8mf4_t test_vdivu_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vdivu(vm, vs2, vs1, vl); } -vuint8mf4_t test_vdivu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu(mask, op1, op2, vl); +vuint8mf4_t test_vdivu_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu(vm, vs2, rs1, vl); } -vuint8mf2_t test_vdivu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vdivu(mask, op1, op2, vl); +vuint8mf2_t test_vdivu_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vdivu(vm, vs2, vs1, vl); } -vuint8mf2_t test_vdivu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu(mask, op1, op2, vl); +vuint8mf2_t test_vdivu_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu(vm, vs2, rs1, vl); } -vuint8m1_t test_vdivu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vdivu(mask, op1, op2, vl); +vuint8m1_t test_vdivu_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vdivu(vm, vs2, vs1, vl); } -vuint8m1_t test_vdivu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu(mask, op1, op2, vl); +vuint8m1_t test_vdivu_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu(vm, vs2, rs1, vl); } -vuint8m2_t test_vdivu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vdivu(mask, op1, op2, vl); +vuint8m2_t test_vdivu_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vdivu(vm, vs2, vs1, vl); } -vuint8m2_t test_vdivu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu(mask, op1, op2, vl); +vuint8m2_t test_vdivu_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu(vm, vs2, rs1, vl); } -vuint8m4_t test_vdivu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vdivu(mask, op1, op2, vl); +vuint8m4_t test_vdivu_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vdivu(vm, vs2, vs1, vl); } -vuint8m4_t test_vdivu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu(mask, op1, op2, vl); +vuint8m4_t test_vdivu_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu(vm, vs2, rs1, vl); } -vuint8m8_t test_vdivu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vdivu(mask, op1, op2, vl); +vuint8m8_t test_vdivu_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vdivu(vm, vs2, vs1, vl); } -vuint8m8_t test_vdivu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu(mask, op1, op2, vl); +vuint8m8_t test_vdivu_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu(vm, vs2, rs1, vl); } -vuint16mf4_t test_vdivu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vdivu(mask, op1, op2, vl); +vuint16mf4_t test_vdivu_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vdivu(vm, vs2, vs1, vl); } -vuint16mf4_t test_vdivu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu(mask, op1, op2, vl); +vuint16mf4_t test_vdivu_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu(vm, vs2, rs1, vl); } -vuint16mf2_t test_vdivu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vdivu(mask, op1, op2, vl); +vuint16mf2_t test_vdivu_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vdivu(vm, vs2, vs1, vl); } -vuint16mf2_t test_vdivu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu(mask, op1, op2, vl); +vuint16mf2_t test_vdivu_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu(vm, vs2, rs1, vl); } -vuint16m1_t test_vdivu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vdivu(mask, op1, op2, vl); +vuint16m1_t test_vdivu_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vdivu(vm, vs2, vs1, vl); } -vuint16m1_t test_vdivu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu(mask, op1, op2, vl); +vuint16m1_t test_vdivu_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu(vm, vs2, rs1, vl); } -vuint16m2_t test_vdivu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vdivu(mask, op1, op2, vl); +vuint16m2_t test_vdivu_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vdivu(vm, vs2, vs1, vl); } -vuint16m2_t test_vdivu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu(mask, op1, op2, vl); +vuint16m2_t test_vdivu_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu(vm, vs2, rs1, vl); } -vuint16m4_t test_vdivu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vdivu(mask, op1, op2, vl); +vuint16m4_t test_vdivu_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vdivu(vm, vs2, vs1, vl); } -vuint16m4_t test_vdivu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu(mask, op1, op2, vl); +vuint16m4_t test_vdivu_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu(vm, vs2, rs1, vl); } -vuint16m8_t test_vdivu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vdivu(mask, op1, op2, vl); +vuint16m8_t test_vdivu_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vdivu(vm, vs2, vs1, vl); } -vuint16m8_t test_vdivu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu(mask, op1, op2, vl); +vuint16m8_t test_vdivu_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu(vm, vs2, rs1, vl); } -vuint32mf2_t test_vdivu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vdivu(mask, op1, op2, vl); +vuint32mf2_t test_vdivu_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vdivu(vm, vs2, vs1, vl); } -vuint32mf2_t test_vdivu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu(mask, op1, op2, vl); +vuint32mf2_t test_vdivu_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu(vm, vs2, rs1, vl); } -vuint32m1_t test_vdivu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vdivu(mask, op1, op2, vl); +vuint32m1_t test_vdivu_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vdivu(vm, vs2, vs1, vl); } -vuint32m1_t test_vdivu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu(mask, op1, op2, vl); +vuint32m1_t test_vdivu_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu(vm, vs2, rs1, vl); } -vuint32m2_t test_vdivu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vdivu(mask, op1, op2, vl); +vuint32m2_t test_vdivu_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vdivu(vm, vs2, vs1, vl); } -vuint32m2_t test_vdivu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu(mask, op1, op2, vl); +vuint32m2_t test_vdivu_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu(vm, vs2, rs1, vl); } -vuint32m4_t test_vdivu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vdivu(mask, op1, op2, vl); +vuint32m4_t test_vdivu_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vdivu(vm, vs2, vs1, vl); } -vuint32m4_t test_vdivu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu(mask, op1, op2, vl); +vuint32m4_t test_vdivu_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu(vm, vs2, rs1, vl); } -vuint32m8_t test_vdivu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vdivu(mask, op1, op2, vl); +vuint32m8_t test_vdivu_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vdivu(vm, vs2, vs1, vl); } -vuint32m8_t test_vdivu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu(mask, op1, op2, vl); +vuint32m8_t test_vdivu_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu(vm, vs2, rs1, vl); } -vuint64m1_t test_vdivu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vdivu(mask, op1, op2, vl); +vuint64m1_t test_vdivu_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vdivu(vm, vs2, vs1, vl); } -vuint64m1_t test_vdivu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu(mask, op1, op2, vl); +vuint64m1_t test_vdivu_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu(vm, vs2, rs1, vl); } -vuint64m2_t test_vdivu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vdivu(mask, op1, op2, vl); +vuint64m2_t test_vdivu_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vdivu(vm, vs2, vs1, vl); } -vuint64m2_t test_vdivu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu(mask, op1, op2, vl); +vuint64m2_t test_vdivu_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu(vm, vs2, rs1, vl); } -vuint64m4_t test_vdivu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vdivu(mask, op1, op2, vl); +vuint64m4_t test_vdivu_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vdivu(vm, vs2, vs1, vl); } -vuint64m4_t test_vdivu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu(mask, op1, op2, vl); +vuint64m4_t test_vdivu_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu(vm, vs2, rs1, vl); } -vuint64m8_t test_vdivu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vdivu(mask, op1, op2, vl); +vuint64m8_t test_vdivu_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vdivu(vm, vs2, vs1, vl); } -vuint64m8_t test_vdivu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu(mask, op1, op2, vl); +vuint64m8_t test_vdivu_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vdivu\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfabs.c b/auto-generated/gnu-overloaded-tests/vfabs.c index 2ff120487..d73a303ad 100644 --- a/auto-generated/gnu-overloaded-tests/vfabs.c +++ b/auto-generated/gnu-overloaded-tests/vfabs.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfabs_v_f16mf4(vfloat16mf4_t op1, size_t vl) { - return __riscv_vfabs(op1, vl); +vfloat16mf4_t test_vfabs_v_f16mf4(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfabs(vs2, vl); } -vfloat16mf2_t test_vfabs_v_f16mf2(vfloat16mf2_t op1, size_t vl) { - return __riscv_vfabs(op1, vl); +vfloat16mf2_t test_vfabs_v_f16mf2(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfabs(vs2, vl); } -vfloat16m1_t test_vfabs_v_f16m1(vfloat16m1_t op1, size_t vl) { - return __riscv_vfabs(op1, vl); +vfloat16m1_t test_vfabs_v_f16m1(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfabs(vs2, vl); } -vfloat16m2_t test_vfabs_v_f16m2(vfloat16m2_t op1, size_t vl) { - return __riscv_vfabs(op1, vl); +vfloat16m2_t test_vfabs_v_f16m2(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfabs(vs2, vl); } -vfloat16m4_t test_vfabs_v_f16m4(vfloat16m4_t op1, size_t vl) { - return __riscv_vfabs(op1, vl); +vfloat16m4_t test_vfabs_v_f16m4(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfabs(vs2, vl); } -vfloat16m8_t test_vfabs_v_f16m8(vfloat16m8_t op1, size_t vl) { - return __riscv_vfabs(op1, vl); +vfloat16m8_t test_vfabs_v_f16m8(vfloat16m8_t vs2, size_t vl) { + return __riscv_vfabs(vs2, vl); } -vfloat32mf2_t test_vfabs_v_f32mf2(vfloat32mf2_t op1, size_t vl) { - return __riscv_vfabs(op1, vl); +vfloat32mf2_t test_vfabs_v_f32mf2(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfabs(vs2, vl); } -vfloat32m1_t test_vfabs_v_f32m1(vfloat32m1_t op1, size_t vl) { - return __riscv_vfabs(op1, vl); +vfloat32m1_t test_vfabs_v_f32m1(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfabs(vs2, vl); } -vfloat32m2_t test_vfabs_v_f32m2(vfloat32m2_t op1, size_t vl) { - return __riscv_vfabs(op1, vl); +vfloat32m2_t test_vfabs_v_f32m2(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfabs(vs2, vl); } -vfloat32m4_t test_vfabs_v_f32m4(vfloat32m4_t op1, size_t vl) { - return __riscv_vfabs(op1, vl); +vfloat32m4_t test_vfabs_v_f32m4(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfabs(vs2, vl); } -vfloat32m8_t test_vfabs_v_f32m8(vfloat32m8_t op1, size_t vl) { - return __riscv_vfabs(op1, vl); +vfloat32m8_t test_vfabs_v_f32m8(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfabs(vs2, vl); } -vfloat64m1_t test_vfabs_v_f64m1(vfloat64m1_t op1, size_t vl) { - return __riscv_vfabs(op1, vl); +vfloat64m1_t test_vfabs_v_f64m1(vfloat64m1_t vs2, size_t vl) { + return __riscv_vfabs(vs2, vl); } -vfloat64m2_t test_vfabs_v_f64m2(vfloat64m2_t op1, size_t vl) { - return __riscv_vfabs(op1, vl); +vfloat64m2_t test_vfabs_v_f64m2(vfloat64m2_t vs2, size_t vl) { + return __riscv_vfabs(vs2, vl); } -vfloat64m4_t test_vfabs_v_f64m4(vfloat64m4_t op1, size_t vl) { - return __riscv_vfabs(op1, vl); +vfloat64m4_t test_vfabs_v_f64m4(vfloat64m4_t vs2, size_t vl) { + return __riscv_vfabs(vs2, vl); } -vfloat64m8_t test_vfabs_v_f64m8(vfloat64m8_t op1, size_t vl) { - return __riscv_vfabs(op1, vl); +vfloat64m8_t test_vfabs_v_f64m8(vfloat64m8_t vs2, size_t vl) { + return __riscv_vfabs(vs2, vl); } -vfloat16mf4_t test_vfabs_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfabs(mask, op1, vl); +vfloat16mf4_t test_vfabs_v_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfabs(vm, vs2, vl); } -vfloat16mf2_t test_vfabs_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfabs(mask, op1, vl); +vfloat16mf2_t test_vfabs_v_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfabs(vm, vs2, vl); } -vfloat16m1_t test_vfabs_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) { - return __riscv_vfabs(mask, op1, vl); +vfloat16m1_t test_vfabs_v_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfabs(vm, vs2, vl); } -vfloat16m2_t test_vfabs_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) { - return __riscv_vfabs(mask, op1, vl); +vfloat16m2_t test_vfabs_v_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfabs(vm, vs2, vl); } -vfloat16m4_t test_vfabs_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) { - return __riscv_vfabs(mask, op1, vl); +vfloat16m4_t test_vfabs_v_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfabs(vm, vs2, vl); } -vfloat16m8_t test_vfabs_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) { - return __riscv_vfabs(mask, op1, vl); +vfloat16m8_t test_vfabs_v_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfabs(vm, vs2, vl); } -vfloat32mf2_t test_vfabs_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfabs(mask, op1, vl); +vfloat32mf2_t test_vfabs_v_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfabs(vm, vs2, vl); } -vfloat32m1_t test_vfabs_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) { - return __riscv_vfabs(mask, op1, vl); +vfloat32m1_t test_vfabs_v_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfabs(vm, vs2, vl); } -vfloat32m2_t test_vfabs_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) { - return __riscv_vfabs(mask, op1, vl); +vfloat32m2_t test_vfabs_v_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfabs(vm, vs2, vl); } -vfloat32m4_t test_vfabs_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) { - return __riscv_vfabs(mask, op1, vl); +vfloat32m4_t test_vfabs_v_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfabs(vm, vs2, vl); } -vfloat32m8_t test_vfabs_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) { - return __riscv_vfabs(mask, op1, vl); +vfloat32m8_t test_vfabs_v_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfabs(vm, vs2, vl); } -vfloat64m1_t test_vfabs_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) { - return __riscv_vfabs(mask, op1, vl); +vfloat64m1_t test_vfabs_v_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfabs(vm, vs2, vl); } -vfloat64m2_t test_vfabs_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) { - return __riscv_vfabs(mask, op1, vl); +vfloat64m2_t test_vfabs_v_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfabs(vm, vs2, vl); } -vfloat64m4_t test_vfabs_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) { - return __riscv_vfabs(mask, op1, vl); +vfloat64m4_t test_vfabs_v_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfabs(vm, vs2, vl); } -vfloat64m8_t test_vfabs_v_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) { - return __riscv_vfabs(mask, op1, vl); +vfloat64m8_t test_vfabs_v_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfabs(vm, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfabs\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfadd.c b/auto-generated/gnu-overloaded-tests/vfadd.c index e166ac9ca..649eedabc 100644 --- a/auto-generated/gnu-overloaded-tests/vfadd.c +++ b/auto-generated/gnu-overloaded-tests/vfadd.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfadd_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, vl); +vfloat16mf4_t test_vfadd_vv_f16mf4(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfadd(vs2, vs1, vl); } -vfloat16mf4_t test_vfadd_vf_f16mf4(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, vl); +vfloat16mf4_t test_vfadd_vf_f16mf4(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd(vs2, rs1, vl); } -vfloat16mf2_t test_vfadd_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, vl); +vfloat16mf2_t test_vfadd_vv_f16mf2(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfadd(vs2, vs1, vl); } -vfloat16mf2_t test_vfadd_vf_f16mf2(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, vl); +vfloat16mf2_t test_vfadd_vf_f16mf2(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd(vs2, rs1, vl); } -vfloat16m1_t test_vfadd_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, vl); +vfloat16m1_t test_vfadd_vv_f16m1(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfadd(vs2, vs1, vl); } -vfloat16m1_t test_vfadd_vf_f16m1(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, vl); +vfloat16m1_t test_vfadd_vf_f16m1(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd(vs2, rs1, vl); } -vfloat16m2_t test_vfadd_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, vl); +vfloat16m2_t test_vfadd_vv_f16m2(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfadd(vs2, vs1, vl); } -vfloat16m2_t test_vfadd_vf_f16m2(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, vl); +vfloat16m2_t test_vfadd_vf_f16m2(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd(vs2, rs1, vl); } -vfloat16m4_t test_vfadd_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, vl); +vfloat16m4_t test_vfadd_vv_f16m4(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfadd(vs2, vs1, vl); } -vfloat16m4_t test_vfadd_vf_f16m4(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, vl); +vfloat16m4_t test_vfadd_vf_f16m4(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd(vs2, rs1, vl); } -vfloat16m8_t test_vfadd_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, vl); +vfloat16m8_t test_vfadd_vv_f16m8(vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfadd(vs2, vs1, vl); } -vfloat16m8_t test_vfadd_vf_f16m8(vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, vl); +vfloat16m8_t test_vfadd_vf_f16m8(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd(vs2, rs1, vl); } -vfloat32mf2_t test_vfadd_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, vl); +vfloat32mf2_t test_vfadd_vv_f32mf2(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfadd(vs2, vs1, vl); } -vfloat32mf2_t test_vfadd_vf_f32mf2(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, vl); +vfloat32mf2_t test_vfadd_vf_f32mf2(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd(vs2, rs1, vl); } -vfloat32m1_t test_vfadd_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, vl); +vfloat32m1_t test_vfadd_vv_f32m1(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfadd(vs2, vs1, vl); } -vfloat32m1_t test_vfadd_vf_f32m1(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, vl); +vfloat32m1_t test_vfadd_vf_f32m1(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd(vs2, rs1, vl); } -vfloat32m2_t test_vfadd_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, vl); +vfloat32m2_t test_vfadd_vv_f32m2(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfadd(vs2, vs1, vl); } -vfloat32m2_t test_vfadd_vf_f32m2(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, vl); +vfloat32m2_t test_vfadd_vf_f32m2(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd(vs2, rs1, vl); } -vfloat32m4_t test_vfadd_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, vl); +vfloat32m4_t test_vfadd_vv_f32m4(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfadd(vs2, vs1, vl); } -vfloat32m4_t test_vfadd_vf_f32m4(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, vl); +vfloat32m4_t test_vfadd_vf_f32m4(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd(vs2, rs1, vl); } -vfloat32m8_t test_vfadd_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, vl); +vfloat32m8_t test_vfadd_vv_f32m8(vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfadd(vs2, vs1, vl); } -vfloat32m8_t test_vfadd_vf_f32m8(vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, vl); +vfloat32m8_t test_vfadd_vf_f32m8(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd(vs2, rs1, vl); } -vfloat64m1_t test_vfadd_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, vl); +vfloat64m1_t test_vfadd_vv_f64m1(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfadd(vs2, vs1, vl); } -vfloat64m1_t test_vfadd_vf_f64m1(vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, vl); +vfloat64m1_t test_vfadd_vf_f64m1(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd(vs2, rs1, vl); } -vfloat64m2_t test_vfadd_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, vl); +vfloat64m2_t test_vfadd_vv_f64m2(vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfadd(vs2, vs1, vl); } -vfloat64m2_t test_vfadd_vf_f64m2(vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, vl); +vfloat64m2_t test_vfadd_vf_f64m2(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd(vs2, rs1, vl); } -vfloat64m4_t test_vfadd_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, vl); +vfloat64m4_t test_vfadd_vv_f64m4(vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfadd(vs2, vs1, vl); } -vfloat64m4_t test_vfadd_vf_f64m4(vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, vl); +vfloat64m4_t test_vfadd_vf_f64m4(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd(vs2, rs1, vl); } -vfloat64m8_t test_vfadd_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, vl); +vfloat64m8_t test_vfadd_vv_f64m8(vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfadd(vs2, vs1, vl); } -vfloat64m8_t test_vfadd_vf_f64m8(vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, vl); +vfloat64m8_t test_vfadd_vf_f64m8(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd(vs2, rs1, vl); } -vfloat16mf4_t test_vfadd_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, vl); +vfloat16mf4_t test_vfadd_vv_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfadd(vm, vs2, vs1, vl); } -vfloat16mf4_t test_vfadd_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, vl); +vfloat16mf4_t test_vfadd_vf_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd(vm, vs2, rs1, vl); } -vfloat16mf2_t test_vfadd_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, vl); +vfloat16mf2_t test_vfadd_vv_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfadd(vm, vs2, vs1, vl); } -vfloat16mf2_t test_vfadd_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, vl); +vfloat16mf2_t test_vfadd_vf_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd(vm, vs2, rs1, vl); } -vfloat16m1_t test_vfadd_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, vl); +vfloat16m1_t test_vfadd_vv_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfadd(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfadd_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, vl); +vfloat16m1_t test_vfadd_vf_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd(vm, vs2, rs1, vl); } -vfloat16m2_t test_vfadd_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, vl); +vfloat16m2_t test_vfadd_vv_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfadd(vm, vs2, vs1, vl); } -vfloat16m2_t test_vfadd_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, vl); +vfloat16m2_t test_vfadd_vf_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd(vm, vs2, rs1, vl); } -vfloat16m4_t test_vfadd_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, vl); +vfloat16m4_t test_vfadd_vv_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfadd(vm, vs2, vs1, vl); } -vfloat16m4_t test_vfadd_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, vl); +vfloat16m4_t test_vfadd_vf_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd(vm, vs2, rs1, vl); } -vfloat16m8_t test_vfadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, vl); +vfloat16m8_t test_vfadd_vv_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfadd(vm, vs2, vs1, vl); } -vfloat16m8_t test_vfadd_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, vl); +vfloat16m8_t test_vfadd_vf_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd(vm, vs2, rs1, vl); } -vfloat32mf2_t test_vfadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, vl); +vfloat32mf2_t test_vfadd_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfadd(vm, vs2, vs1, vl); } -vfloat32mf2_t test_vfadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, vl); +vfloat32mf2_t test_vfadd_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd(vm, vs2, rs1, vl); } -vfloat32m1_t test_vfadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, vl); +vfloat32m1_t test_vfadd_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfadd(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, vl); +vfloat32m1_t test_vfadd_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd(vm, vs2, rs1, vl); } -vfloat32m2_t test_vfadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, vl); +vfloat32m2_t test_vfadd_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfadd(vm, vs2, vs1, vl); } -vfloat32m2_t test_vfadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, vl); +vfloat32m2_t test_vfadd_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd(vm, vs2, rs1, vl); } -vfloat32m4_t test_vfadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, vl); +vfloat32m4_t test_vfadd_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfadd(vm, vs2, vs1, vl); } -vfloat32m4_t test_vfadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, vl); +vfloat32m4_t test_vfadd_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd(vm, vs2, rs1, vl); } -vfloat32m8_t test_vfadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, vl); +vfloat32m8_t test_vfadd_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfadd(vm, vs2, vs1, vl); } -vfloat32m8_t test_vfadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, vl); +vfloat32m8_t test_vfadd_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd(vm, vs2, rs1, vl); } -vfloat64m1_t test_vfadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, vl); +vfloat64m1_t test_vfadd_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfadd(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, vl); +vfloat64m1_t test_vfadd_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd(vm, vs2, rs1, vl); } -vfloat64m2_t test_vfadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, vl); +vfloat64m2_t test_vfadd_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfadd(vm, vs2, vs1, vl); } -vfloat64m2_t test_vfadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, vl); +vfloat64m2_t test_vfadd_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd(vm, vs2, rs1, vl); } -vfloat64m4_t test_vfadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, vl); +vfloat64m4_t test_vfadd_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfadd(vm, vs2, vs1, vl); } -vfloat64m4_t test_vfadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, vl); +vfloat64m4_t test_vfadd_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd(vm, vs2, rs1, vl); } -vfloat64m8_t test_vfadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, vl); +vfloat64m8_t test_vfadd_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfadd(vm, vs2, vs1, vl); } -vfloat64m8_t test_vfadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, vl); +vfloat64m8_t test_vfadd_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd(vm, vs2, rs1, vl); } -vfloat16mf4_t test_vfadd_vv_f16mf4_rm(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfadd_vv_f16mf4_rm(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfadd(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfadd_vf_f16mf4_rm(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfadd_vf_f16mf4_rm(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfadd_vv_f16mf2_rm(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfadd_vv_f16mf2_rm(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfadd(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfadd_vf_f16mf2_rm(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfadd_vf_f16mf2_rm(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfadd_vv_f16m1_rm(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfadd_vv_f16m1_rm(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfadd(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfadd_vf_f16m1_rm(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfadd_vf_f16m1_rm(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfadd_vv_f16m2_rm(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfadd_vv_f16m2_rm(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfadd(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfadd_vf_f16m2_rm(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfadd_vf_f16m2_rm(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfadd_vv_f16m4_rm(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfadd_vv_f16m4_rm(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfadd(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfadd_vf_f16m4_rm(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfadd_vf_f16m4_rm(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfadd_vv_f16m8_rm(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfadd_vv_f16m8_rm(vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfadd(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfadd_vf_f16m8_rm(vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfadd_vf_f16m8_rm(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfadd_vv_f32mf2_rm(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfadd_vv_f32mf2_rm(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfadd(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfadd_vf_f32mf2_rm(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfadd_vf_f32mf2_rm(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfadd_vv_f32m1_rm(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfadd_vv_f32m1_rm(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfadd(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfadd_vf_f32m1_rm(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfadd_vf_f32m1_rm(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfadd_vv_f32m2_rm(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfadd_vv_f32m2_rm(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfadd(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfadd_vf_f32m2_rm(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfadd_vf_f32m2_rm(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfadd_vv_f32m4_rm(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfadd_vv_f32m4_rm(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfadd(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfadd_vf_f32m4_rm(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfadd_vf_f32m4_rm(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfadd_vv_f32m8_rm(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfadd_vv_f32m8_rm(vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfadd(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfadd_vf_f32m8_rm(vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfadd_vf_f32m8_rm(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfadd_vv_f64m1_rm(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfadd_vv_f64m1_rm(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfadd(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfadd_vf_f64m1_rm(vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfadd_vf_f64m1_rm(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfadd_vv_f64m2_rm(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfadd_vv_f64m2_rm(vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfadd(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfadd_vf_f64m2_rm(vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfadd_vf_f64m2_rm(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfadd_vv_f64m4_rm(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfadd_vv_f64m4_rm(vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfadd(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfadd_vf_f64m4_rm(vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfadd_vf_f64m4_rm(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfadd_vv_f64m8_rm(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfadd_vv_f64m8_rm(vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfadd(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfadd_vf_f64m8_rm(vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfadd_vf_f64m8_rm(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfadd_vv_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfadd_vv_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfadd(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfadd_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfadd_vf_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfadd_vv_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfadd_vv_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfadd(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfadd_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfadd_vf_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfadd_vv_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfadd_vv_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfadd(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfadd_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfadd_vf_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfadd_vv_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfadd_vv_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfadd(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfadd_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfadd_vf_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfadd_vv_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfadd_vv_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfadd(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfadd_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfadd_vf_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfadd_vv_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfadd_vv_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfadd(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfadd_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfadd_vf_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfadd_vv_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfadd_vv_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfadd(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfadd_vf_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfadd_vf_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfadd_vv_f32m1_rm_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfadd_vv_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfadd(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfadd_vf_f32m1_rm_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfadd_vf_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfadd_vv_f32m2_rm_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfadd_vv_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfadd(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfadd_vf_f32m2_rm_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfadd_vf_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfadd_vv_f32m4_rm_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfadd_vv_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfadd(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfadd_vf_f32m4_rm_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfadd_vf_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfadd_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfadd_vv_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfadd(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfadd_vf_f32m8_rm_m(vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfadd_vf_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfadd_vv_f64m1_rm_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfadd_vv_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfadd(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfadd_vf_f64m1_rm_m(vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfadd_vf_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfadd_vv_f64m2_rm_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfadd_vv_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfadd(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfadd_vf_f64m2_rm_m(vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfadd_vf_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfadd_vv_f64m4_rm_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfadd_vv_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfadd(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfadd_vf_f64m4_rm_m(vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfadd_vf_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfadd_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfadd_vv_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfadd(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfadd_vf_f64m8_rm_m(vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfadd_vf_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfadd\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfclass.c b/auto-generated/gnu-overloaded-tests/vfclass.c index 5d6dacd47..78bfbafc7 100644 --- a/auto-generated/gnu-overloaded-tests/vfclass.c +++ b/auto-generated/gnu-overloaded-tests/vfclass.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint16mf4_t test_vfclass_v_u16mf4(vfloat16mf4_t op1, size_t vl) { - return __riscv_vfclass(op1, vl); +vuint16mf4_t test_vfclass_v_u16mf4(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfclass(vs2, vl); } -vuint16mf2_t test_vfclass_v_u16mf2(vfloat16mf2_t op1, size_t vl) { - return __riscv_vfclass(op1, vl); +vuint16mf2_t test_vfclass_v_u16mf2(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfclass(vs2, vl); } -vuint16m1_t test_vfclass_v_u16m1(vfloat16m1_t op1, size_t vl) { - return __riscv_vfclass(op1, vl); +vuint16m1_t test_vfclass_v_u16m1(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfclass(vs2, vl); } -vuint16m2_t test_vfclass_v_u16m2(vfloat16m2_t op1, size_t vl) { - return __riscv_vfclass(op1, vl); +vuint16m2_t test_vfclass_v_u16m2(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfclass(vs2, vl); } -vuint16m4_t test_vfclass_v_u16m4(vfloat16m4_t op1, size_t vl) { - return __riscv_vfclass(op1, vl); +vuint16m4_t test_vfclass_v_u16m4(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfclass(vs2, vl); } -vuint16m8_t test_vfclass_v_u16m8(vfloat16m8_t op1, size_t vl) { - return __riscv_vfclass(op1, vl); +vuint16m8_t test_vfclass_v_u16m8(vfloat16m8_t vs2, size_t vl) { + return __riscv_vfclass(vs2, vl); } -vuint32mf2_t test_vfclass_v_u32mf2(vfloat32mf2_t op1, size_t vl) { - return __riscv_vfclass(op1, vl); +vuint32mf2_t test_vfclass_v_u32mf2(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfclass(vs2, vl); } -vuint32m1_t test_vfclass_v_u32m1(vfloat32m1_t op1, size_t vl) { - return __riscv_vfclass(op1, vl); +vuint32m1_t test_vfclass_v_u32m1(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfclass(vs2, vl); } -vuint32m2_t test_vfclass_v_u32m2(vfloat32m2_t op1, size_t vl) { - return __riscv_vfclass(op1, vl); +vuint32m2_t test_vfclass_v_u32m2(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfclass(vs2, vl); } -vuint32m4_t test_vfclass_v_u32m4(vfloat32m4_t op1, size_t vl) { - return __riscv_vfclass(op1, vl); +vuint32m4_t test_vfclass_v_u32m4(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfclass(vs2, vl); } -vuint32m8_t test_vfclass_v_u32m8(vfloat32m8_t op1, size_t vl) { - return __riscv_vfclass(op1, vl); +vuint32m8_t test_vfclass_v_u32m8(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfclass(vs2, vl); } -vuint64m1_t test_vfclass_v_u64m1(vfloat64m1_t op1, size_t vl) { - return __riscv_vfclass(op1, vl); +vuint64m1_t test_vfclass_v_u64m1(vfloat64m1_t vs2, size_t vl) { + return __riscv_vfclass(vs2, vl); } -vuint64m2_t test_vfclass_v_u64m2(vfloat64m2_t op1, size_t vl) { - return __riscv_vfclass(op1, vl); +vuint64m2_t test_vfclass_v_u64m2(vfloat64m2_t vs2, size_t vl) { + return __riscv_vfclass(vs2, vl); } -vuint64m4_t test_vfclass_v_u64m4(vfloat64m4_t op1, size_t vl) { - return __riscv_vfclass(op1, vl); +vuint64m4_t test_vfclass_v_u64m4(vfloat64m4_t vs2, size_t vl) { + return __riscv_vfclass(vs2, vl); } -vuint64m8_t test_vfclass_v_u64m8(vfloat64m8_t op1, size_t vl) { - return __riscv_vfclass(op1, vl); +vuint64m8_t test_vfclass_v_u64m8(vfloat64m8_t vs2, size_t vl) { + return __riscv_vfclass(vs2, vl); } -vuint16mf4_t test_vfclass_v_u16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfclass(mask, op1, vl); +vuint16mf4_t test_vfclass_v_u16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfclass(vm, vs2, vl); } -vuint16mf2_t test_vfclass_v_u16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfclass(mask, op1, vl); +vuint16mf2_t test_vfclass_v_u16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfclass(vm, vs2, vl); } -vuint16m1_t test_vfclass_v_u16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) { - return __riscv_vfclass(mask, op1, vl); +vuint16m1_t test_vfclass_v_u16m1_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfclass(vm, vs2, vl); } -vuint16m2_t test_vfclass_v_u16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) { - return __riscv_vfclass(mask, op1, vl); +vuint16m2_t test_vfclass_v_u16m2_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfclass(vm, vs2, vl); } -vuint16m4_t test_vfclass_v_u16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) { - return __riscv_vfclass(mask, op1, vl); +vuint16m4_t test_vfclass_v_u16m4_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfclass(vm, vs2, vl); } -vuint16m8_t test_vfclass_v_u16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) { - return __riscv_vfclass(mask, op1, vl); +vuint16m8_t test_vfclass_v_u16m8_m(vbool2_t vm, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfclass(vm, vs2, vl); } -vuint32mf2_t test_vfclass_v_u32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfclass(mask, op1, vl); +vuint32mf2_t test_vfclass_v_u32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfclass(vm, vs2, vl); } -vuint32m1_t test_vfclass_v_u32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) { - return __riscv_vfclass(mask, op1, vl); +vuint32m1_t test_vfclass_v_u32m1_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfclass(vm, vs2, vl); } -vuint32m2_t test_vfclass_v_u32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) { - return __riscv_vfclass(mask, op1, vl); +vuint32m2_t test_vfclass_v_u32m2_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfclass(vm, vs2, vl); } -vuint32m4_t test_vfclass_v_u32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) { - return __riscv_vfclass(mask, op1, vl); +vuint32m4_t test_vfclass_v_u32m4_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfclass(vm, vs2, vl); } -vuint32m8_t test_vfclass_v_u32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) { - return __riscv_vfclass(mask, op1, vl); +vuint32m8_t test_vfclass_v_u32m8_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfclass(vm, vs2, vl); } -vuint64m1_t test_vfclass_v_u64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) { - return __riscv_vfclass(mask, op1, vl); +vuint64m1_t test_vfclass_v_u64m1_m(vbool64_t vm, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfclass(vm, vs2, vl); } -vuint64m2_t test_vfclass_v_u64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) { - return __riscv_vfclass(mask, op1, vl); +vuint64m2_t test_vfclass_v_u64m2_m(vbool32_t vm, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfclass(vm, vs2, vl); } -vuint64m4_t test_vfclass_v_u64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) { - return __riscv_vfclass(mask, op1, vl); +vuint64m4_t test_vfclass_v_u64m4_m(vbool16_t vm, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfclass(vm, vs2, vl); } -vuint64m8_t test_vfclass_v_u64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) { - return __riscv_vfclass(mask, op1, vl); +vuint64m8_t test_vfclass_v_u64m8_m(vbool8_t vm, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfclass(vm, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfclass\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfcvt.c b/auto-generated/gnu-overloaded-tests/vfcvt.c index dd78f515e..34d040d1b 100644 --- a/auto-generated/gnu-overloaded-tests/vfcvt.c +++ b/auto-generated/gnu-overloaded-tests/vfcvt.c @@ -6,964 +6,964 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint16mf4_t test_vfcvt_x_f_v_i16mf4(vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_x(src, vl); +vint16mf4_t test_vfcvt_x_f_v_i16mf4(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_x(vs2, vl); } -vint16mf2_t test_vfcvt_x_f_v_i16mf2(vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_x(src, vl); +vint16mf2_t test_vfcvt_x_f_v_i16mf2(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x(vs2, vl); } -vint16m1_t test_vfcvt_x_f_v_i16m1(vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_x(src, vl); +vint16m1_t test_vfcvt_x_f_v_i16m1(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_x(vs2, vl); } -vint16m2_t test_vfcvt_x_f_v_i16m2(vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_x(src, vl); +vint16m2_t test_vfcvt_x_f_v_i16m2(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_x(vs2, vl); } -vint16m4_t test_vfcvt_x_f_v_i16m4(vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_x(src, vl); +vint16m4_t test_vfcvt_x_f_v_i16m4(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_x(vs2, vl); } -vint16m8_t test_vfcvt_x_f_v_i16m8(vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_x(src, vl); +vint16m8_t test_vfcvt_x_f_v_i16m8(vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_x(vs2, vl); } -vuint16mf4_t test_vfcvt_xu_f_v_u16mf4(vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_xu(src, vl); +vuint16mf4_t test_vfcvt_xu_f_v_u16mf4(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vs2, vl); } -vuint16mf2_t test_vfcvt_xu_f_v_u16mf2(vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_xu(src, vl); +vuint16mf2_t test_vfcvt_xu_f_v_u16mf2(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vs2, vl); } -vuint16m1_t test_vfcvt_xu_f_v_u16m1(vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_xu(src, vl); +vuint16m1_t test_vfcvt_xu_f_v_u16m1(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vs2, vl); } -vuint16m2_t test_vfcvt_xu_f_v_u16m2(vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_xu(src, vl); +vuint16m2_t test_vfcvt_xu_f_v_u16m2(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vs2, vl); } -vuint16m4_t test_vfcvt_xu_f_v_u16m4(vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_xu(src, vl); +vuint16m4_t test_vfcvt_xu_f_v_u16m4(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vs2, vl); } -vuint16m8_t test_vfcvt_xu_f_v_u16m8(vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_xu(src, vl); +vuint16m8_t test_vfcvt_xu_f_v_u16m8(vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vs2, vl); } -vfloat16mf4_t test_vfcvt_f_x_v_f16mf4(vint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f(src, vl); +vfloat16mf4_t test_vfcvt_f_x_v_f16mf4(vint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, vl); } -vfloat16mf2_t test_vfcvt_f_x_v_f16mf2(vint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f(src, vl); +vfloat16mf2_t test_vfcvt_f_x_v_f16mf2(vint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, vl); } -vfloat16m1_t test_vfcvt_f_x_v_f16m1(vint16m1_t src, size_t vl) { - return __riscv_vfcvt_f(src, vl); +vfloat16m1_t test_vfcvt_f_x_v_f16m1(vint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, vl); } -vfloat16m2_t test_vfcvt_f_x_v_f16m2(vint16m2_t src, size_t vl) { - return __riscv_vfcvt_f(src, vl); +vfloat16m2_t test_vfcvt_f_x_v_f16m2(vint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, vl); } -vfloat16m4_t test_vfcvt_f_x_v_f16m4(vint16m4_t src, size_t vl) { - return __riscv_vfcvt_f(src, vl); +vfloat16m4_t test_vfcvt_f_x_v_f16m4(vint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, vl); } -vfloat16m8_t test_vfcvt_f_x_v_f16m8(vint16m8_t src, size_t vl) { - return __riscv_vfcvt_f(src, vl); +vfloat16m8_t test_vfcvt_f_x_v_f16m8(vint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, vl); } -vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4(vuint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f(src, vl); +vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, vl); } -vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2(vuint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f(src, vl); +vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, vl); } -vfloat16m1_t test_vfcvt_f_xu_v_f16m1(vuint16m1_t src, size_t vl) { - return __riscv_vfcvt_f(src, vl); +vfloat16m1_t test_vfcvt_f_xu_v_f16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, vl); } -vfloat16m2_t test_vfcvt_f_xu_v_f16m2(vuint16m2_t src, size_t vl) { - return __riscv_vfcvt_f(src, vl); +vfloat16m2_t test_vfcvt_f_xu_v_f16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, vl); } -vfloat16m4_t test_vfcvt_f_xu_v_f16m4(vuint16m4_t src, size_t vl) { - return __riscv_vfcvt_f(src, vl); +vfloat16m4_t test_vfcvt_f_xu_v_f16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, vl); } -vfloat16m8_t test_vfcvt_f_xu_v_f16m8(vuint16m8_t src, size_t vl) { - return __riscv_vfcvt_f(src, vl); +vfloat16m8_t test_vfcvt_f_xu_v_f16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, vl); } -vint32mf2_t test_vfcvt_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_x(src, vl); +vint32mf2_t test_vfcvt_x_f_v_i32mf2(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x(vs2, vl); } -vint32m1_t test_vfcvt_x_f_v_i32m1(vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_x(src, vl); +vint32m1_t test_vfcvt_x_f_v_i32m1(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_x(vs2, vl); } -vint32m2_t test_vfcvt_x_f_v_i32m2(vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_x(src, vl); +vint32m2_t test_vfcvt_x_f_v_i32m2(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_x(vs2, vl); } -vint32m4_t test_vfcvt_x_f_v_i32m4(vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_x(src, vl); +vint32m4_t test_vfcvt_x_f_v_i32m4(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_x(vs2, vl); } -vint32m8_t test_vfcvt_x_f_v_i32m8(vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_x(src, vl); +vint32m8_t test_vfcvt_x_f_v_i32m8(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_x(vs2, vl); } -vuint32mf2_t test_vfcvt_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_xu(src, vl); +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vs2, vl); } -vuint32m1_t test_vfcvt_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_xu(src, vl); +vuint32m1_t test_vfcvt_xu_f_v_u32m1(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vs2, vl); } -vuint32m2_t test_vfcvt_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_xu(src, vl); +vuint32m2_t test_vfcvt_xu_f_v_u32m2(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vs2, vl); } -vuint32m4_t test_vfcvt_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_xu(src, vl); +vuint32m4_t test_vfcvt_xu_f_v_u32m4(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vs2, vl); } -vuint32m8_t test_vfcvt_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_xu(src, vl); +vuint32m8_t test_vfcvt_xu_f_v_u32m8(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vs2, vl); } -vfloat32mf2_t test_vfcvt_f_x_v_f32mf2(vint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f(src, vl); +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2(vint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, vl); } -vfloat32m1_t test_vfcvt_f_x_v_f32m1(vint32m1_t src, size_t vl) { - return __riscv_vfcvt_f(src, vl); +vfloat32m1_t test_vfcvt_f_x_v_f32m1(vint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, vl); } -vfloat32m2_t test_vfcvt_f_x_v_f32m2(vint32m2_t src, size_t vl) { - return __riscv_vfcvt_f(src, vl); +vfloat32m2_t test_vfcvt_f_x_v_f32m2(vint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, vl); } -vfloat32m4_t test_vfcvt_f_x_v_f32m4(vint32m4_t src, size_t vl) { - return __riscv_vfcvt_f(src, vl); +vfloat32m4_t test_vfcvt_f_x_v_f32m4(vint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, vl); } -vfloat32m8_t test_vfcvt_f_x_v_f32m8(vint32m8_t src, size_t vl) { - return __riscv_vfcvt_f(src, vl); +vfloat32m8_t test_vfcvt_f_x_v_f32m8(vint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, vl); } -vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2(vuint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f(src, vl); +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, vl); } -vfloat32m1_t test_vfcvt_f_xu_v_f32m1(vuint32m1_t src, size_t vl) { - return __riscv_vfcvt_f(src, vl); +vfloat32m1_t test_vfcvt_f_xu_v_f32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, vl); } -vfloat32m2_t test_vfcvt_f_xu_v_f32m2(vuint32m2_t src, size_t vl) { - return __riscv_vfcvt_f(src, vl); +vfloat32m2_t test_vfcvt_f_xu_v_f32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, vl); } -vfloat32m4_t test_vfcvt_f_xu_v_f32m4(vuint32m4_t src, size_t vl) { - return __riscv_vfcvt_f(src, vl); +vfloat32m4_t test_vfcvt_f_xu_v_f32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, vl); } -vfloat32m8_t test_vfcvt_f_xu_v_f32m8(vuint32m8_t src, size_t vl) { - return __riscv_vfcvt_f(src, vl); +vfloat32m8_t test_vfcvt_f_xu_v_f32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, vl); } -vint64m1_t test_vfcvt_x_f_v_i64m1(vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_x(src, vl); +vint64m1_t test_vfcvt_x_f_v_i64m1(vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_x(vs2, vl); } -vint64m2_t test_vfcvt_x_f_v_i64m2(vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_x(src, vl); +vint64m2_t test_vfcvt_x_f_v_i64m2(vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_x(vs2, vl); } -vint64m4_t test_vfcvt_x_f_v_i64m4(vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_x(src, vl); +vint64m4_t test_vfcvt_x_f_v_i64m4(vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_x(vs2, vl); } -vint64m8_t test_vfcvt_x_f_v_i64m8(vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_x(src, vl); +vint64m8_t test_vfcvt_x_f_v_i64m8(vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_x(vs2, vl); } -vuint64m1_t test_vfcvt_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_xu(src, vl); +vuint64m1_t test_vfcvt_xu_f_v_u64m1(vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vs2, vl); } -vuint64m2_t test_vfcvt_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_xu(src, vl); +vuint64m2_t test_vfcvt_xu_f_v_u64m2(vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vs2, vl); } -vuint64m4_t test_vfcvt_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_xu(src, vl); +vuint64m4_t test_vfcvt_xu_f_v_u64m4(vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vs2, vl); } -vuint64m8_t test_vfcvt_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_xu(src, vl); +vuint64m8_t test_vfcvt_xu_f_v_u64m8(vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vs2, vl); } -vfloat64m1_t test_vfcvt_f_x_v_f64m1(vint64m1_t src, size_t vl) { - return __riscv_vfcvt_f(src, vl); +vfloat64m1_t test_vfcvt_f_x_v_f64m1(vint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, vl); } -vfloat64m2_t test_vfcvt_f_x_v_f64m2(vint64m2_t src, size_t vl) { - return __riscv_vfcvt_f(src, vl); +vfloat64m2_t test_vfcvt_f_x_v_f64m2(vint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, vl); } -vfloat64m4_t test_vfcvt_f_x_v_f64m4(vint64m4_t src, size_t vl) { - return __riscv_vfcvt_f(src, vl); +vfloat64m4_t test_vfcvt_f_x_v_f64m4(vint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, vl); } -vfloat64m8_t test_vfcvt_f_x_v_f64m8(vint64m8_t src, size_t vl) { - return __riscv_vfcvt_f(src, vl); +vfloat64m8_t test_vfcvt_f_x_v_f64m8(vint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, vl); } -vfloat64m1_t test_vfcvt_f_xu_v_f64m1(vuint64m1_t src, size_t vl) { - return __riscv_vfcvt_f(src, vl); +vfloat64m1_t test_vfcvt_f_xu_v_f64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, vl); } -vfloat64m2_t test_vfcvt_f_xu_v_f64m2(vuint64m2_t src, size_t vl) { - return __riscv_vfcvt_f(src, vl); +vfloat64m2_t test_vfcvt_f_xu_v_f64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, vl); } -vfloat64m4_t test_vfcvt_f_xu_v_f64m4(vuint64m4_t src, size_t vl) { - return __riscv_vfcvt_f(src, vl); +vfloat64m4_t test_vfcvt_f_xu_v_f64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, vl); } -vfloat64m8_t test_vfcvt_f_xu_v_f64m8(vuint64m8_t src, size_t vl) { - return __riscv_vfcvt_f(src, vl); +vfloat64m8_t test_vfcvt_f_xu_v_f64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, vl); } -vint16mf4_t test_vfcvt_x_f_v_i16mf4_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_x(mask, src, vl); +vint16mf4_t test_vfcvt_x_f_v_i16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_x(vm, vs2, vl); } -vint16mf2_t test_vfcvt_x_f_v_i16mf2_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_x(mask, src, vl); +vint16mf2_t test_vfcvt_x_f_v_i16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x(vm, vs2, vl); } -vint16m1_t test_vfcvt_x_f_v_i16m1_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_x(mask, src, vl); +vint16m1_t test_vfcvt_x_f_v_i16m1_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_x(vm, vs2, vl); } -vint16m2_t test_vfcvt_x_f_v_i16m2_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_x(mask, src, vl); +vint16m2_t test_vfcvt_x_f_v_i16m2_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_x(vm, vs2, vl); } -vint16m4_t test_vfcvt_x_f_v_i16m4_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_x(mask, src, vl); +vint16m4_t test_vfcvt_x_f_v_i16m4_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_x(vm, vs2, vl); } -vint16m8_t test_vfcvt_x_f_v_i16m8_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_x(mask, src, vl); +vint16m8_t test_vfcvt_x_f_v_i16m8_m(vbool2_t vm, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_x(vm, vs2, vl); } -vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_xu(mask, src, vl); +vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vm, vs2, vl); } -vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_xu(mask, src, vl); +vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vm, vs2, vl); } -vuint16m1_t test_vfcvt_xu_f_v_u16m1_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_xu(mask, src, vl); +vuint16m1_t test_vfcvt_xu_f_v_u16m1_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vm, vs2, vl); } -vuint16m2_t test_vfcvt_xu_f_v_u16m2_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_xu(mask, src, vl); +vuint16m2_t test_vfcvt_xu_f_v_u16m2_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vm, vs2, vl); } -vuint16m4_t test_vfcvt_xu_f_v_u16m4_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_xu(mask, src, vl); +vuint16m4_t test_vfcvt_xu_f_v_u16m4_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vm, vs2, vl); } -vuint16m8_t test_vfcvt_xu_f_v_u16m8_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_xu(mask, src, vl); +vuint16m8_t test_vfcvt_xu_f_v_u16m8_m(vbool2_t vm, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vm, vs2, vl); } -vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_m(vbool64_t mask, vint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, vl); +vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_m(vbool64_t vm, vint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, vl); } -vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_m(vbool32_t mask, vint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, vl); +vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_m(vbool32_t vm, vint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, vl); } -vfloat16m1_t test_vfcvt_f_x_v_f16m1_m(vbool16_t mask, vint16m1_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, vl); +vfloat16m1_t test_vfcvt_f_x_v_f16m1_m(vbool16_t vm, vint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, vl); } -vfloat16m2_t test_vfcvt_f_x_v_f16m2_m(vbool8_t mask, vint16m2_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, vl); +vfloat16m2_t test_vfcvt_f_x_v_f16m2_m(vbool8_t vm, vint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, vl); } -vfloat16m4_t test_vfcvt_f_x_v_f16m4_m(vbool4_t mask, vint16m4_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, vl); +vfloat16m4_t test_vfcvt_f_x_v_f16m4_m(vbool4_t vm, vint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, vl); } -vfloat16m8_t test_vfcvt_f_x_v_f16m8_m(vbool2_t mask, vint16m8_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, vl); +vfloat16m8_t test_vfcvt_f_x_v_f16m8_m(vbool2_t vm, vint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, vl); } -vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_m(vbool64_t mask, vuint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, vl); +vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, vl); } -vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_m(vbool32_t mask, vuint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, vl); +vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, vl); } -vfloat16m1_t test_vfcvt_f_xu_v_f16m1_m(vbool16_t mask, vuint16m1_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, vl); +vfloat16m1_t test_vfcvt_f_xu_v_f16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, vl); } -vfloat16m2_t test_vfcvt_f_xu_v_f16m2_m(vbool8_t mask, vuint16m2_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, vl); +vfloat16m2_t test_vfcvt_f_xu_v_f16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, vl); } -vfloat16m4_t test_vfcvt_f_xu_v_f16m4_m(vbool4_t mask, vuint16m4_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, vl); +vfloat16m4_t test_vfcvt_f_xu_v_f16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, vl); } -vfloat16m8_t test_vfcvt_f_xu_v_f16m8_m(vbool2_t mask, vuint16m8_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, vl); +vfloat16m8_t test_vfcvt_f_xu_v_f16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, vl); } -vint32mf2_t test_vfcvt_x_f_v_i32mf2_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_x(mask, src, vl); +vint32mf2_t test_vfcvt_x_f_v_i32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x(vm, vs2, vl); } -vint32m1_t test_vfcvt_x_f_v_i32m1_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_x(mask, src, vl); +vint32m1_t test_vfcvt_x_f_v_i32m1_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_x(vm, vs2, vl); } -vint32m2_t test_vfcvt_x_f_v_i32m2_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_x(mask, src, vl); +vint32m2_t test_vfcvt_x_f_v_i32m2_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_x(vm, vs2, vl); } -vint32m4_t test_vfcvt_x_f_v_i32m4_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_x(mask, src, vl); +vint32m4_t test_vfcvt_x_f_v_i32m4_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_x(vm, vs2, vl); } -vint32m8_t test_vfcvt_x_f_v_i32m8_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_x(mask, src, vl); +vint32m8_t test_vfcvt_x_f_v_i32m8_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_x(vm, vs2, vl); } -vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_xu(mask, src, vl); +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vm, vs2, vl); } -vuint32m1_t test_vfcvt_xu_f_v_u32m1_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_xu(mask, src, vl); +vuint32m1_t test_vfcvt_xu_f_v_u32m1_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vm, vs2, vl); } -vuint32m2_t test_vfcvt_xu_f_v_u32m2_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_xu(mask, src, vl); +vuint32m2_t test_vfcvt_xu_f_v_u32m2_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vm, vs2, vl); } -vuint32m4_t test_vfcvt_xu_f_v_u32m4_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_xu(mask, src, vl); +vuint32m4_t test_vfcvt_xu_f_v_u32m4_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vm, vs2, vl); } -vuint32m8_t test_vfcvt_xu_f_v_u32m8_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_xu(mask, src, vl); +vuint32m8_t test_vfcvt_xu_f_v_u32m8_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vm, vs2, vl); } -vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_m(vbool64_t mask, vint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, vl); +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_m(vbool64_t vm, vint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, vl); } -vfloat32m1_t test_vfcvt_f_x_v_f32m1_m(vbool32_t mask, vint32m1_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, vl); +vfloat32m1_t test_vfcvt_f_x_v_f32m1_m(vbool32_t vm, vint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, vl); } -vfloat32m2_t test_vfcvt_f_x_v_f32m2_m(vbool16_t mask, vint32m2_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, vl); +vfloat32m2_t test_vfcvt_f_x_v_f32m2_m(vbool16_t vm, vint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, vl); } -vfloat32m4_t test_vfcvt_f_x_v_f32m4_m(vbool8_t mask, vint32m4_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, vl); +vfloat32m4_t test_vfcvt_f_x_v_f32m4_m(vbool8_t vm, vint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, vl); } -vfloat32m8_t test_vfcvt_f_x_v_f32m8_m(vbool4_t mask, vint32m8_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, vl); +vfloat32m8_t test_vfcvt_f_x_v_f32m8_m(vbool4_t vm, vint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, vl); } -vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_m(vbool64_t mask, vuint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, vl); +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, vl); } -vfloat32m1_t test_vfcvt_f_xu_v_f32m1_m(vbool32_t mask, vuint32m1_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, vl); +vfloat32m1_t test_vfcvt_f_xu_v_f32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, vl); } -vfloat32m2_t test_vfcvt_f_xu_v_f32m2_m(vbool16_t mask, vuint32m2_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, vl); +vfloat32m2_t test_vfcvt_f_xu_v_f32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, vl); } -vfloat32m4_t test_vfcvt_f_xu_v_f32m4_m(vbool8_t mask, vuint32m4_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, vl); +vfloat32m4_t test_vfcvt_f_xu_v_f32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, vl); } -vfloat32m8_t test_vfcvt_f_xu_v_f32m8_m(vbool4_t mask, vuint32m8_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, vl); +vfloat32m8_t test_vfcvt_f_xu_v_f32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, vl); } -vint64m1_t test_vfcvt_x_f_v_i64m1_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_x(mask, src, vl); +vint64m1_t test_vfcvt_x_f_v_i64m1_m(vbool64_t vm, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_x(vm, vs2, vl); } -vint64m2_t test_vfcvt_x_f_v_i64m2_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_x(mask, src, vl); +vint64m2_t test_vfcvt_x_f_v_i64m2_m(vbool32_t vm, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_x(vm, vs2, vl); } -vint64m4_t test_vfcvt_x_f_v_i64m4_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_x(mask, src, vl); +vint64m4_t test_vfcvt_x_f_v_i64m4_m(vbool16_t vm, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_x(vm, vs2, vl); } -vint64m8_t test_vfcvt_x_f_v_i64m8_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_x(mask, src, vl); +vint64m8_t test_vfcvt_x_f_v_i64m8_m(vbool8_t vm, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_x(vm, vs2, vl); } -vuint64m1_t test_vfcvt_xu_f_v_u64m1_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_xu(mask, src, vl); +vuint64m1_t test_vfcvt_xu_f_v_u64m1_m(vbool64_t vm, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vm, vs2, vl); } -vuint64m2_t test_vfcvt_xu_f_v_u64m2_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_xu(mask, src, vl); +vuint64m2_t test_vfcvt_xu_f_v_u64m2_m(vbool32_t vm, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vm, vs2, vl); } -vuint64m4_t test_vfcvt_xu_f_v_u64m4_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_xu(mask, src, vl); +vuint64m4_t test_vfcvt_xu_f_v_u64m4_m(vbool16_t vm, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vm, vs2, vl); } -vuint64m8_t test_vfcvt_xu_f_v_u64m8_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_xu(mask, src, vl); +vuint64m8_t test_vfcvt_xu_f_v_u64m8_m(vbool8_t vm, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vm, vs2, vl); } -vfloat64m1_t test_vfcvt_f_x_v_f64m1_m(vbool64_t mask, vint64m1_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, vl); +vfloat64m1_t test_vfcvt_f_x_v_f64m1_m(vbool64_t vm, vint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, vl); } -vfloat64m2_t test_vfcvt_f_x_v_f64m2_m(vbool32_t mask, vint64m2_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, vl); +vfloat64m2_t test_vfcvt_f_x_v_f64m2_m(vbool32_t vm, vint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, vl); } -vfloat64m4_t test_vfcvt_f_x_v_f64m4_m(vbool16_t mask, vint64m4_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, vl); +vfloat64m4_t test_vfcvt_f_x_v_f64m4_m(vbool16_t vm, vint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, vl); } -vfloat64m8_t test_vfcvt_f_x_v_f64m8_m(vbool8_t mask, vint64m8_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, vl); +vfloat64m8_t test_vfcvt_f_x_v_f64m8_m(vbool8_t vm, vint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, vl); } -vfloat64m1_t test_vfcvt_f_xu_v_f64m1_m(vbool64_t mask, vuint64m1_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, vl); +vfloat64m1_t test_vfcvt_f_xu_v_f64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, vl); } -vfloat64m2_t test_vfcvt_f_xu_v_f64m2_m(vbool32_t mask, vuint64m2_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, vl); +vfloat64m2_t test_vfcvt_f_xu_v_f64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, vl); } -vfloat64m4_t test_vfcvt_f_xu_v_f64m4_m(vbool16_t mask, vuint64m4_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, vl); +vfloat64m4_t test_vfcvt_f_xu_v_f64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, vl); } -vfloat64m8_t test_vfcvt_f_xu_v_f64m8_m(vbool8_t mask, vuint64m8_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, vl); +vfloat64m8_t test_vfcvt_f_xu_v_f64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, vl); } -vint16mf4_t test_vfcvt_x_f_v_i16mf4_rm(vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_x(src, __RISCV_FRM_RNE, vl); +vint16mf4_t test_vfcvt_x_f_v_i16mf4_rm(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_x(vs2, __RISCV_FRM_RNE, vl); } -vint16mf2_t test_vfcvt_x_f_v_i16mf2_rm(vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_x(src, __RISCV_FRM_RNE, vl); +vint16mf2_t test_vfcvt_x_f_v_i16mf2_rm(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x(vs2, __RISCV_FRM_RNE, vl); } -vint16m1_t test_vfcvt_x_f_v_i16m1_rm(vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_x(src, __RISCV_FRM_RNE, vl); +vint16m1_t test_vfcvt_x_f_v_i16m1_rm(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_x(vs2, __RISCV_FRM_RNE, vl); } -vint16m2_t test_vfcvt_x_f_v_i16m2_rm(vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_x(src, __RISCV_FRM_RNE, vl); +vint16m2_t test_vfcvt_x_f_v_i16m2_rm(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_x(vs2, __RISCV_FRM_RNE, vl); } -vint16m4_t test_vfcvt_x_f_v_i16m4_rm(vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_x(src, __RISCV_FRM_RNE, vl); +vint16m4_t test_vfcvt_x_f_v_i16m4_rm(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_x(vs2, __RISCV_FRM_RNE, vl); } -vint16m8_t test_vfcvt_x_f_v_i16m8_rm(vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_x(src, __RISCV_FRM_RNE, vl); +vint16m8_t test_vfcvt_x_f_v_i16m8_rm(vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_x(vs2, __RISCV_FRM_RNE, vl); } -vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_rm(vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_xu(src, __RISCV_FRM_RNE, vl); +vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_rm(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vs2, __RISCV_FRM_RNE, vl); } -vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_rm(vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_xu(src, __RISCV_FRM_RNE, vl); +vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_rm(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vs2, __RISCV_FRM_RNE, vl); } -vuint16m1_t test_vfcvt_xu_f_v_u16m1_rm(vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_xu(src, __RISCV_FRM_RNE, vl); +vuint16m1_t test_vfcvt_xu_f_v_u16m1_rm(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vs2, __RISCV_FRM_RNE, vl); } -vuint16m2_t test_vfcvt_xu_f_v_u16m2_rm(vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_xu(src, __RISCV_FRM_RNE, vl); +vuint16m2_t test_vfcvt_xu_f_v_u16m2_rm(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vs2, __RISCV_FRM_RNE, vl); } -vuint16m4_t test_vfcvt_xu_f_v_u16m4_rm(vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_xu(src, __RISCV_FRM_RNE, vl); +vuint16m4_t test_vfcvt_xu_f_v_u16m4_rm(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vs2, __RISCV_FRM_RNE, vl); } -vuint16m8_t test_vfcvt_xu_f_v_u16m8_rm(vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_xu(src, __RISCV_FRM_RNE, vl); +vuint16m8_t test_vfcvt_xu_f_v_u16m8_rm(vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_rm(vint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f(src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_rm(vint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_rm(vint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f(src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_rm(vint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfcvt_f_x_v_f16m1_rm(vint16m1_t src, size_t vl) { - return __riscv_vfcvt_f(src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfcvt_f_x_v_f16m1_rm(vint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfcvt_f_x_v_f16m2_rm(vint16m2_t src, size_t vl) { - return __riscv_vfcvt_f(src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfcvt_f_x_v_f16m2_rm(vint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfcvt_f_x_v_f16m4_rm(vint16m4_t src, size_t vl) { - return __riscv_vfcvt_f(src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfcvt_f_x_v_f16m4_rm(vint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfcvt_f_x_v_f16m8_rm(vint16m8_t src, size_t vl) { - return __riscv_vfcvt_f(src, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfcvt_f_x_v_f16m8_rm(vint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_rm(vuint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f(src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_rm(vuint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_rm(vuint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f(src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_rm(vuint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfcvt_f_xu_v_f16m1_rm(vuint16m1_t src, size_t vl) { - return __riscv_vfcvt_f(src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfcvt_f_xu_v_f16m1_rm(vuint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfcvt_f_xu_v_f16m2_rm(vuint16m2_t src, size_t vl) { - return __riscv_vfcvt_f(src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfcvt_f_xu_v_f16m2_rm(vuint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfcvt_f_xu_v_f16m4_rm(vuint16m4_t src, size_t vl) { - return __riscv_vfcvt_f(src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfcvt_f_xu_v_f16m4_rm(vuint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfcvt_f_xu_v_f16m8_rm(vuint16m8_t src, size_t vl) { - return __riscv_vfcvt_f(src, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfcvt_f_xu_v_f16m8_rm(vuint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, __RISCV_FRM_RNE, vl); } -vint32mf2_t test_vfcvt_x_f_v_i32mf2_rm(vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_x(src, __RISCV_FRM_RNE, vl); +vint32mf2_t test_vfcvt_x_f_v_i32mf2_rm(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x(vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfcvt_x_f_v_i32m1_rm(vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_x(src, __RISCV_FRM_RNE, vl); +vint32m1_t test_vfcvt_x_f_v_i32m1_rm(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_x(vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfcvt_x_f_v_i32m2_rm(vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_x(src, __RISCV_FRM_RNE, vl); +vint32m2_t test_vfcvt_x_f_v_i32m2_rm(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_x(vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfcvt_x_f_v_i32m4_rm(vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_x(src, __RISCV_FRM_RNE, vl); +vint32m4_t test_vfcvt_x_f_v_i32m4_rm(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_x(vs2, __RISCV_FRM_RNE, vl); } -vint32m8_t test_vfcvt_x_f_v_i32m8_rm(vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_x(src, __RISCV_FRM_RNE, vl); +vint32m8_t test_vfcvt_x_f_v_i32m8_rm(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_x(vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_rm(vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_xu(src, __RISCV_FRM_RNE, vl); +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_rm(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfcvt_xu_f_v_u32m1_rm(vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_xu(src, __RISCV_FRM_RNE, vl); +vuint32m1_t test_vfcvt_xu_f_v_u32m1_rm(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfcvt_xu_f_v_u32m2_rm(vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_xu(src, __RISCV_FRM_RNE, vl); +vuint32m2_t test_vfcvt_xu_f_v_u32m2_rm(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfcvt_xu_f_v_u32m4_rm(vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_xu(src, __RISCV_FRM_RNE, vl); +vuint32m4_t test_vfcvt_xu_f_v_u32m4_rm(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vs2, __RISCV_FRM_RNE, vl); } -vuint32m8_t test_vfcvt_xu_f_v_u32m8_rm(vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_xu(src, __RISCV_FRM_RNE, vl); +vuint32m8_t test_vfcvt_xu_f_v_u32m8_rm(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_rm(vint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f(src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_rm(vint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfcvt_f_x_v_f32m1_rm(vint32m1_t src, size_t vl) { - return __riscv_vfcvt_f(src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfcvt_f_x_v_f32m1_rm(vint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfcvt_f_x_v_f32m2_rm(vint32m2_t src, size_t vl) { - return __riscv_vfcvt_f(src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfcvt_f_x_v_f32m2_rm(vint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfcvt_f_x_v_f32m4_rm(vint32m4_t src, size_t vl) { - return __riscv_vfcvt_f(src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfcvt_f_x_v_f32m4_rm(vint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfcvt_f_x_v_f32m8_rm(vint32m8_t src, size_t vl) { - return __riscv_vfcvt_f(src, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfcvt_f_x_v_f32m8_rm(vint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_rm(vuint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f(src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_rm(vuint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfcvt_f_xu_v_f32m1_rm(vuint32m1_t src, size_t vl) { - return __riscv_vfcvt_f(src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfcvt_f_xu_v_f32m1_rm(vuint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfcvt_f_xu_v_f32m2_rm(vuint32m2_t src, size_t vl) { - return __riscv_vfcvt_f(src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfcvt_f_xu_v_f32m2_rm(vuint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfcvt_f_xu_v_f32m4_rm(vuint32m4_t src, size_t vl) { - return __riscv_vfcvt_f(src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfcvt_f_xu_v_f32m4_rm(vuint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfcvt_f_xu_v_f32m8_rm(vuint32m8_t src, size_t vl) { - return __riscv_vfcvt_f(src, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfcvt_f_xu_v_f32m8_rm(vuint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, __RISCV_FRM_RNE, vl); } -vint64m1_t test_vfcvt_x_f_v_i64m1_rm(vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_x(src, __RISCV_FRM_RNE, vl); +vint64m1_t test_vfcvt_x_f_v_i64m1_rm(vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_x(vs2, __RISCV_FRM_RNE, vl); } -vint64m2_t test_vfcvt_x_f_v_i64m2_rm(vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_x(src, __RISCV_FRM_RNE, vl); +vint64m2_t test_vfcvt_x_f_v_i64m2_rm(vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_x(vs2, __RISCV_FRM_RNE, vl); } -vint64m4_t test_vfcvt_x_f_v_i64m4_rm(vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_x(src, __RISCV_FRM_RNE, vl); +vint64m4_t test_vfcvt_x_f_v_i64m4_rm(vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_x(vs2, __RISCV_FRM_RNE, vl); } -vint64m8_t test_vfcvt_x_f_v_i64m8_rm(vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_x(src, __RISCV_FRM_RNE, vl); +vint64m8_t test_vfcvt_x_f_v_i64m8_rm(vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_x(vs2, __RISCV_FRM_RNE, vl); } -vuint64m1_t test_vfcvt_xu_f_v_u64m1_rm(vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_xu(src, __RISCV_FRM_RNE, vl); +vuint64m1_t test_vfcvt_xu_f_v_u64m1_rm(vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vs2, __RISCV_FRM_RNE, vl); } -vuint64m2_t test_vfcvt_xu_f_v_u64m2_rm(vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_xu(src, __RISCV_FRM_RNE, vl); +vuint64m2_t test_vfcvt_xu_f_v_u64m2_rm(vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vs2, __RISCV_FRM_RNE, vl); } -vuint64m4_t test_vfcvt_xu_f_v_u64m4_rm(vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_xu(src, __RISCV_FRM_RNE, vl); +vuint64m4_t test_vfcvt_xu_f_v_u64m4_rm(vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vs2, __RISCV_FRM_RNE, vl); } -vuint64m8_t test_vfcvt_xu_f_v_u64m8_rm(vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_xu(src, __RISCV_FRM_RNE, vl); +vuint64m8_t test_vfcvt_xu_f_v_u64m8_rm(vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfcvt_f_x_v_f64m1_rm(vint64m1_t src, size_t vl) { - return __riscv_vfcvt_f(src, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfcvt_f_x_v_f64m1_rm(vint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfcvt_f_x_v_f64m2_rm(vint64m2_t src, size_t vl) { - return __riscv_vfcvt_f(src, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfcvt_f_x_v_f64m2_rm(vint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfcvt_f_x_v_f64m4_rm(vint64m4_t src, size_t vl) { - return __riscv_vfcvt_f(src, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfcvt_f_x_v_f64m4_rm(vint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfcvt_f_x_v_f64m8_rm(vint64m8_t src, size_t vl) { - return __riscv_vfcvt_f(src, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfcvt_f_x_v_f64m8_rm(vint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfcvt_f_xu_v_f64m1_rm(vuint64m1_t src, size_t vl) { - return __riscv_vfcvt_f(src, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfcvt_f_xu_v_f64m1_rm(vuint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfcvt_f_xu_v_f64m2_rm(vuint64m2_t src, size_t vl) { - return __riscv_vfcvt_f(src, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfcvt_f_xu_v_f64m2_rm(vuint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfcvt_f_xu_v_f64m4_rm(vuint64m4_t src, size_t vl) { - return __riscv_vfcvt_f(src, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfcvt_f_xu_v_f64m4_rm(vuint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfcvt_f_xu_v_f64m8_rm(vuint64m8_t src, size_t vl) { - return __riscv_vfcvt_f(src, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfcvt_f_xu_v_f64m8_rm(vuint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f(vs2, __RISCV_FRM_RNE, vl); } -vint16mf4_t test_vfcvt_x_f_v_i16mf4_rm_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_x(mask, src, __RISCV_FRM_RNE, vl); +vint16mf4_t test_vfcvt_x_f_v_i16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_x(vm, vs2, __RISCV_FRM_RNE, vl); } -vint16mf2_t test_vfcvt_x_f_v_i16mf2_rm_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_x(mask, src, __RISCV_FRM_RNE, vl); +vint16mf2_t test_vfcvt_x_f_v_i16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x(vm, vs2, __RISCV_FRM_RNE, vl); } -vint16m1_t test_vfcvt_x_f_v_i16m1_rm_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_x(mask, src, __RISCV_FRM_RNE, vl); +vint16m1_t test_vfcvt_x_f_v_i16m1_rm_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_x(vm, vs2, __RISCV_FRM_RNE, vl); } -vint16m2_t test_vfcvt_x_f_v_i16m2_rm_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_x(mask, src, __RISCV_FRM_RNE, vl); +vint16m2_t test_vfcvt_x_f_v_i16m2_rm_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_x(vm, vs2, __RISCV_FRM_RNE, vl); } -vint16m4_t test_vfcvt_x_f_v_i16m4_rm_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_x(mask, src, __RISCV_FRM_RNE, vl); +vint16m4_t test_vfcvt_x_f_v_i16m4_rm_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_x(vm, vs2, __RISCV_FRM_RNE, vl); } -vint16m8_t test_vfcvt_x_f_v_i16m8_rm_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_x(mask, src, __RISCV_FRM_RNE, vl); +vint16m8_t test_vfcvt_x_f_v_i16m8_rm_m(vbool2_t vm, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_x(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_rm_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_xu(mask, src, __RISCV_FRM_RNE, vl); +vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_rm_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_xu(mask, src, __RISCV_FRM_RNE, vl); +vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint16m1_t test_vfcvt_xu_f_v_u16m1_rm_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_xu(mask, src, __RISCV_FRM_RNE, vl); +vuint16m1_t test_vfcvt_xu_f_v_u16m1_rm_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint16m2_t test_vfcvt_xu_f_v_u16m2_rm_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_xu(mask, src, __RISCV_FRM_RNE, vl); +vuint16m2_t test_vfcvt_xu_f_v_u16m2_rm_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint16m4_t test_vfcvt_xu_f_v_u16m4_rm_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_xu(mask, src, __RISCV_FRM_RNE, vl); +vuint16m4_t test_vfcvt_xu_f_v_u16m4_rm_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint16m8_t test_vfcvt_xu_f_v_u16m8_rm_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_xu(mask, src, __RISCV_FRM_RNE, vl); +vuint16m8_t test_vfcvt_xu_f_v_u16m8_rm_m(vbool2_t vm, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_rm_m(vbool64_t mask, vint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_rm_m(vbool64_t vm, vint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_rm_m(vbool32_t mask, vint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_rm_m(vbool32_t vm, vint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfcvt_f_x_v_f16m1_rm_m(vbool16_t mask, vint16m1_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfcvt_f_x_v_f16m1_rm_m(vbool16_t vm, vint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfcvt_f_x_v_f16m2_rm_m(vbool8_t mask, vint16m2_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfcvt_f_x_v_f16m2_rm_m(vbool8_t vm, vint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfcvt_f_x_v_f16m4_rm_m(vbool4_t mask, vint16m4_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfcvt_f_x_v_f16m4_rm_m(vbool4_t vm, vint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfcvt_f_x_v_f16m8_rm_m(vbool2_t mask, vint16m8_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfcvt_f_x_v_f16m8_rm_m(vbool2_t vm, vint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_rm_m(vbool64_t mask, vuint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_rm_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_rm_m(vbool32_t mask, vuint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_rm_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfcvt_f_xu_v_f16m1_rm_m(vbool16_t mask, vuint16m1_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfcvt_f_xu_v_f16m1_rm_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfcvt_f_xu_v_f16m2_rm_m(vbool8_t mask, vuint16m2_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfcvt_f_xu_v_f16m2_rm_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfcvt_f_xu_v_f16m4_rm_m(vbool4_t mask, vuint16m4_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfcvt_f_xu_v_f16m4_rm_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfcvt_f_xu_v_f16m8_rm_m(vbool2_t mask, vuint16m8_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfcvt_f_xu_v_f16m8_rm_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vint32mf2_t test_vfcvt_x_f_v_i32mf2_rm_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_x(mask, src, __RISCV_FRM_RNE, vl); +vint32mf2_t test_vfcvt_x_f_v_i32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x(vm, vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfcvt_x_f_v_i32m1_rm_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_x(mask, src, __RISCV_FRM_RNE, vl); +vint32m1_t test_vfcvt_x_f_v_i32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_x(vm, vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfcvt_x_f_v_i32m2_rm_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_x(mask, src, __RISCV_FRM_RNE, vl); +vint32m2_t test_vfcvt_x_f_v_i32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_x(vm, vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfcvt_x_f_v_i32m4_rm_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_x(mask, src, __RISCV_FRM_RNE, vl); +vint32m4_t test_vfcvt_x_f_v_i32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_x(vm, vs2, __RISCV_FRM_RNE, vl); } -vint32m8_t test_vfcvt_x_f_v_i32m8_rm_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_x(mask, src, __RISCV_FRM_RNE, vl); +vint32m8_t test_vfcvt_x_f_v_i32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_x(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_rm_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_xu(mask, src, __RISCV_FRM_RNE, vl); +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfcvt_xu_f_v_u32m1_rm_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_xu(mask, src, __RISCV_FRM_RNE, vl); +vuint32m1_t test_vfcvt_xu_f_v_u32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfcvt_xu_f_v_u32m2_rm_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_xu(mask, src, __RISCV_FRM_RNE, vl); +vuint32m2_t test_vfcvt_xu_f_v_u32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfcvt_xu_f_v_u32m4_rm_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_xu(mask, src, __RISCV_FRM_RNE, vl); +vuint32m4_t test_vfcvt_xu_f_v_u32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint32m8_t test_vfcvt_xu_f_v_u32m8_rm_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_xu(mask, src, __RISCV_FRM_RNE, vl); +vuint32m8_t test_vfcvt_xu_f_v_u32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_rm_m(vbool64_t mask, vint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_rm_m(vbool64_t vm, vint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfcvt_f_x_v_f32m1_rm_m(vbool32_t mask, vint32m1_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfcvt_f_x_v_f32m1_rm_m(vbool32_t vm, vint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfcvt_f_x_v_f32m2_rm_m(vbool16_t mask, vint32m2_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfcvt_f_x_v_f32m2_rm_m(vbool16_t vm, vint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfcvt_f_x_v_f32m4_rm_m(vbool8_t mask, vint32m4_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfcvt_f_x_v_f32m4_rm_m(vbool8_t vm, vint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfcvt_f_x_v_f32m8_rm_m(vbool4_t mask, vint32m8_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfcvt_f_x_v_f32m8_rm_m(vbool4_t vm, vint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_rm_m(vbool64_t mask, vuint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_rm_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfcvt_f_xu_v_f32m1_rm_m(vbool32_t mask, vuint32m1_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfcvt_f_xu_v_f32m1_rm_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfcvt_f_xu_v_f32m2_rm_m(vbool16_t mask, vuint32m2_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfcvt_f_xu_v_f32m2_rm_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfcvt_f_xu_v_f32m4_rm_m(vbool8_t mask, vuint32m4_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfcvt_f_xu_v_f32m4_rm_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfcvt_f_xu_v_f32m8_rm_m(vbool4_t mask, vuint32m8_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfcvt_f_xu_v_f32m8_rm_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vint64m1_t test_vfcvt_x_f_v_i64m1_rm_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_x(mask, src, __RISCV_FRM_RNE, vl); +vint64m1_t test_vfcvt_x_f_v_i64m1_rm_m(vbool64_t vm, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_x(vm, vs2, __RISCV_FRM_RNE, vl); } -vint64m2_t test_vfcvt_x_f_v_i64m2_rm_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_x(mask, src, __RISCV_FRM_RNE, vl); +vint64m2_t test_vfcvt_x_f_v_i64m2_rm_m(vbool32_t vm, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_x(vm, vs2, __RISCV_FRM_RNE, vl); } -vint64m4_t test_vfcvt_x_f_v_i64m4_rm_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_x(mask, src, __RISCV_FRM_RNE, vl); +vint64m4_t test_vfcvt_x_f_v_i64m4_rm_m(vbool16_t vm, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_x(vm, vs2, __RISCV_FRM_RNE, vl); } -vint64m8_t test_vfcvt_x_f_v_i64m8_rm_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_x(mask, src, __RISCV_FRM_RNE, vl); +vint64m8_t test_vfcvt_x_f_v_i64m8_rm_m(vbool8_t vm, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_x(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint64m1_t test_vfcvt_xu_f_v_u64m1_rm_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_xu(mask, src, __RISCV_FRM_RNE, vl); +vuint64m1_t test_vfcvt_xu_f_v_u64m1_rm_m(vbool64_t vm, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint64m2_t test_vfcvt_xu_f_v_u64m2_rm_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_xu(mask, src, __RISCV_FRM_RNE, vl); +vuint64m2_t test_vfcvt_xu_f_v_u64m2_rm_m(vbool32_t vm, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint64m4_t test_vfcvt_xu_f_v_u64m4_rm_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_xu(mask, src, __RISCV_FRM_RNE, vl); +vuint64m4_t test_vfcvt_xu_f_v_u64m4_rm_m(vbool16_t vm, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint64m8_t test_vfcvt_xu_f_v_u64m8_rm_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_xu(mask, src, __RISCV_FRM_RNE, vl); +vuint64m8_t test_vfcvt_xu_f_v_u64m8_rm_m(vbool8_t vm, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfcvt_f_x_v_f64m1_rm_m(vbool64_t mask, vint64m1_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfcvt_f_x_v_f64m1_rm_m(vbool64_t vm, vint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfcvt_f_x_v_f64m2_rm_m(vbool32_t mask, vint64m2_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfcvt_f_x_v_f64m2_rm_m(vbool32_t vm, vint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfcvt_f_x_v_f64m4_rm_m(vbool16_t mask, vint64m4_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfcvt_f_x_v_f64m4_rm_m(vbool16_t vm, vint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfcvt_f_x_v_f64m8_rm_m(vbool8_t mask, vint64m8_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfcvt_f_x_v_f64m8_rm_m(vbool8_t vm, vint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfcvt_f_xu_v_f64m1_rm_m(vbool64_t mask, vuint64m1_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfcvt_f_xu_v_f64m1_rm_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfcvt_f_xu_v_f64m2_rm_m(vbool32_t mask, vuint64m2_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfcvt_f_xu_v_f64m2_rm_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfcvt_f_xu_v_f64m4_rm_m(vbool16_t mask, vuint64m4_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfcvt_f_xu_v_f64m4_rm_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfcvt_f_xu_v_f64m8_rm_m(vbool8_t mask, vuint64m8_t src, size_t vl) { - return __riscv_vfcvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfcvt_f_xu_v_f64m8_rm_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfcvt\.[ivxfswum.]+\s+} 240 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfcvt_rtz.c b/auto-generated/gnu-overloaded-tests/vfcvt_rtz.c index 704ac4f0a..1e10e16ef 100644 --- a/auto-generated/gnu-overloaded-tests/vfcvt_rtz.c +++ b/auto-generated/gnu-overloaded-tests/vfcvt_rtz.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4(vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(src, vl); +vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x(vs2, vl); } -vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2(vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(src, vl); +vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x(vs2, vl); } -vint16m1_t test_vfcvt_rtz_x_f_v_i16m1(vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(src, vl); +vint16m1_t test_vfcvt_rtz_x_f_v_i16m1(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x(vs2, vl); } -vint16m2_t test_vfcvt_rtz_x_f_v_i16m2(vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(src, vl); +vint16m2_t test_vfcvt_rtz_x_f_v_i16m2(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x(vs2, vl); } -vint16m4_t test_vfcvt_rtz_x_f_v_i16m4(vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(src, vl); +vint16m4_t test_vfcvt_rtz_x_f_v_i16m4(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x(vs2, vl); } -vint16m8_t test_vfcvt_rtz_x_f_v_i16m8(vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(src, vl); +vint16m8_t test_vfcvt_rtz_x_f_v_i16m8(vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x(vs2, vl); } -vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4(vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(src, vl); +vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu(vs2, vl); } -vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2(vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(src, vl); +vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu(vs2, vl); } -vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1(vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(src, vl); +vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu(vs2, vl); } -vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2(vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(src, vl); +vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu(vs2, vl); } -vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4(vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(src, vl); +vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu(vs2, vl); } -vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8(vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(src, vl); +vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8(vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu(vs2, vl); } -vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(src, vl); +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x(vs2, vl); } -vint32m1_t test_vfcvt_rtz_x_f_v_i32m1(vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(src, vl); +vint32m1_t test_vfcvt_rtz_x_f_v_i32m1(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x(vs2, vl); } -vint32m2_t test_vfcvt_rtz_x_f_v_i32m2(vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(src, vl); +vint32m2_t test_vfcvt_rtz_x_f_v_i32m2(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x(vs2, vl); } -vint32m4_t test_vfcvt_rtz_x_f_v_i32m4(vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(src, vl); +vint32m4_t test_vfcvt_rtz_x_f_v_i32m4(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x(vs2, vl); } -vint32m8_t test_vfcvt_rtz_x_f_v_i32m8(vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(src, vl); +vint32m8_t test_vfcvt_rtz_x_f_v_i32m8(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x(vs2, vl); } -vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(src, vl); +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu(vs2, vl); } -vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(src, vl); +vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu(vs2, vl); } -vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(src, vl); +vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu(vs2, vl); } -vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(src, vl); +vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu(vs2, vl); } -vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(src, vl); +vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu(vs2, vl); } -vint64m1_t test_vfcvt_rtz_x_f_v_i64m1(vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(src, vl); +vint64m1_t test_vfcvt_rtz_x_f_v_i64m1(vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x(vs2, vl); } -vint64m2_t test_vfcvt_rtz_x_f_v_i64m2(vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(src, vl); +vint64m2_t test_vfcvt_rtz_x_f_v_i64m2(vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x(vs2, vl); } -vint64m4_t test_vfcvt_rtz_x_f_v_i64m4(vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(src, vl); +vint64m4_t test_vfcvt_rtz_x_f_v_i64m4(vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x(vs2, vl); } -vint64m8_t test_vfcvt_rtz_x_f_v_i64m8(vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(src, vl); +vint64m8_t test_vfcvt_rtz_x_f_v_i64m8(vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x(vs2, vl); } -vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(src, vl); +vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1(vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu(vs2, vl); } -vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(src, vl); +vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2(vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu(vs2, vl); } -vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(src, vl); +vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4(vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu(vs2, vl); } -vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(src, vl); +vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8(vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu(vs2, vl); } -vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(mask, src, vl); +vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x(vm, vs2, vl); } -vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(mask, src, vl); +vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x(vm, vs2, vl); } -vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(mask, src, vl); +vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x(vm, vs2, vl); } -vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(mask, src, vl); +vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x(vm, vs2, vl); } -vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(mask, src, vl); +vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x(vm, vs2, vl); } -vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(mask, src, vl); +vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_m(vbool2_t vm, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x(vm, vs2, vl); } -vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(mask, src, vl); +vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu(vm, vs2, vl); } -vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(mask, src, vl); +vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu(vm, vs2, vl); } -vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(mask, src, vl); +vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu(vm, vs2, vl); } -vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(mask, src, vl); +vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu(vm, vs2, vl); } -vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(mask, src, vl); +vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu(vm, vs2, vl); } -vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(mask, src, vl); +vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_m(vbool2_t vm, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu(vm, vs2, vl); } -vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(mask, src, vl); +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x(vm, vs2, vl); } -vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(mask, src, vl); +vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x(vm, vs2, vl); } -vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(mask, src, vl); +vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x(vm, vs2, vl); } -vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(mask, src, vl); +vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x(vm, vs2, vl); } -vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(mask, src, vl); +vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x(vm, vs2, vl); } -vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(mask, src, vl); +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu(vm, vs2, vl); } -vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(mask, src, vl); +vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu(vm, vs2, vl); } -vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(mask, src, vl); +vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu(vm, vs2, vl); } -vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(mask, src, vl); +vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu(vm, vs2, vl); } -vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(mask, src, vl); +vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu(vm, vs2, vl); } -vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(mask, src, vl); +vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_m(vbool64_t vm, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x(vm, vs2, vl); } -vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(mask, src, vl); +vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_m(vbool32_t vm, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x(vm, vs2, vl); } -vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(mask, src, vl); +vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_m(vbool16_t vm, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x(vm, vs2, vl); } -vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(mask, src, vl); +vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_m(vbool8_t vm, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x(vm, vs2, vl); } -vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(mask, src, vl); +vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_m(vbool64_t vm, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu(vm, vs2, vl); } -vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(mask, src, vl); +vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_m(vbool32_t vm, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu(vm, vs2, vl); } -vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(mask, src, vl); +vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_m(vbool16_t vm, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu(vm, vs2, vl); } -vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(mask, src, vl); +vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_m(vbool8_t vm, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu(vm, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfcvt\.rtz[ivxfswum.]*\s+} 60 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfdiv.c b/auto-generated/gnu-overloaded-tests/vfdiv.c index 77c2c424c..eb8c152e4 100644 --- a/auto-generated/gnu-overloaded-tests/vfdiv.c +++ b/auto-generated/gnu-overloaded-tests/vfdiv.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfdiv_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, vl); +vfloat16mf4_t test_vfdiv_vv_f16mf4(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfdiv(vs2, vs1, vl); } -vfloat16mf4_t test_vfdiv_vf_f16mf4(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, vl); +vfloat16mf4_t test_vfdiv_vf_f16mf4(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv(vs2, rs1, vl); } -vfloat16mf2_t test_vfdiv_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, vl); +vfloat16mf2_t test_vfdiv_vv_f16mf2(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfdiv(vs2, vs1, vl); } -vfloat16mf2_t test_vfdiv_vf_f16mf2(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, vl); +vfloat16mf2_t test_vfdiv_vf_f16mf2(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv(vs2, rs1, vl); } -vfloat16m1_t test_vfdiv_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, vl); +vfloat16m1_t test_vfdiv_vv_f16m1(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfdiv(vs2, vs1, vl); } -vfloat16m1_t test_vfdiv_vf_f16m1(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, vl); +vfloat16m1_t test_vfdiv_vf_f16m1(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv(vs2, rs1, vl); } -vfloat16m2_t test_vfdiv_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, vl); +vfloat16m2_t test_vfdiv_vv_f16m2(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfdiv(vs2, vs1, vl); } -vfloat16m2_t test_vfdiv_vf_f16m2(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, vl); +vfloat16m2_t test_vfdiv_vf_f16m2(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv(vs2, rs1, vl); } -vfloat16m4_t test_vfdiv_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, vl); +vfloat16m4_t test_vfdiv_vv_f16m4(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfdiv(vs2, vs1, vl); } -vfloat16m4_t test_vfdiv_vf_f16m4(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, vl); +vfloat16m4_t test_vfdiv_vf_f16m4(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv(vs2, rs1, vl); } -vfloat16m8_t test_vfdiv_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, vl); +vfloat16m8_t test_vfdiv_vv_f16m8(vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfdiv(vs2, vs1, vl); } -vfloat16m8_t test_vfdiv_vf_f16m8(vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, vl); +vfloat16m8_t test_vfdiv_vf_f16m8(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv(vs2, rs1, vl); } -vfloat32mf2_t test_vfdiv_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, vl); +vfloat32mf2_t test_vfdiv_vv_f32mf2(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfdiv(vs2, vs1, vl); } -vfloat32mf2_t test_vfdiv_vf_f32mf2(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, vl); +vfloat32mf2_t test_vfdiv_vf_f32mf2(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv(vs2, rs1, vl); } -vfloat32m1_t test_vfdiv_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, vl); +vfloat32m1_t test_vfdiv_vv_f32m1(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfdiv(vs2, vs1, vl); } -vfloat32m1_t test_vfdiv_vf_f32m1(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, vl); +vfloat32m1_t test_vfdiv_vf_f32m1(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv(vs2, rs1, vl); } -vfloat32m2_t test_vfdiv_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, vl); +vfloat32m2_t test_vfdiv_vv_f32m2(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfdiv(vs2, vs1, vl); } -vfloat32m2_t test_vfdiv_vf_f32m2(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, vl); +vfloat32m2_t test_vfdiv_vf_f32m2(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv(vs2, rs1, vl); } -vfloat32m4_t test_vfdiv_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, vl); +vfloat32m4_t test_vfdiv_vv_f32m4(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfdiv(vs2, vs1, vl); } -vfloat32m4_t test_vfdiv_vf_f32m4(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, vl); +vfloat32m4_t test_vfdiv_vf_f32m4(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv(vs2, rs1, vl); } -vfloat32m8_t test_vfdiv_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, vl); +vfloat32m8_t test_vfdiv_vv_f32m8(vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfdiv(vs2, vs1, vl); } -vfloat32m8_t test_vfdiv_vf_f32m8(vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, vl); +vfloat32m8_t test_vfdiv_vf_f32m8(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv(vs2, rs1, vl); } -vfloat64m1_t test_vfdiv_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, vl); +vfloat64m1_t test_vfdiv_vv_f64m1(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfdiv(vs2, vs1, vl); } -vfloat64m1_t test_vfdiv_vf_f64m1(vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, vl); +vfloat64m1_t test_vfdiv_vf_f64m1(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv(vs2, rs1, vl); } -vfloat64m2_t test_vfdiv_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, vl); +vfloat64m2_t test_vfdiv_vv_f64m2(vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfdiv(vs2, vs1, vl); } -vfloat64m2_t test_vfdiv_vf_f64m2(vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, vl); +vfloat64m2_t test_vfdiv_vf_f64m2(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv(vs2, rs1, vl); } -vfloat64m4_t test_vfdiv_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, vl); +vfloat64m4_t test_vfdiv_vv_f64m4(vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfdiv(vs2, vs1, vl); } -vfloat64m4_t test_vfdiv_vf_f64m4(vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, vl); +vfloat64m4_t test_vfdiv_vf_f64m4(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv(vs2, rs1, vl); } -vfloat64m8_t test_vfdiv_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, vl); +vfloat64m8_t test_vfdiv_vv_f64m8(vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfdiv(vs2, vs1, vl); } -vfloat64m8_t test_vfdiv_vf_f64m8(vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, vl); +vfloat64m8_t test_vfdiv_vf_f64m8(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv(vs2, rs1, vl); } -vfloat16mf4_t test_vfdiv_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, vl); +vfloat16mf4_t test_vfdiv_vv_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, vs1, vl); } -vfloat16mf4_t test_vfdiv_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, vl); +vfloat16mf4_t test_vfdiv_vf_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, rs1, vl); } -vfloat16mf2_t test_vfdiv_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, vl); +vfloat16mf2_t test_vfdiv_vv_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, vs1, vl); } -vfloat16mf2_t test_vfdiv_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, vl); +vfloat16mf2_t test_vfdiv_vf_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, rs1, vl); } -vfloat16m1_t test_vfdiv_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, vl); +vfloat16m1_t test_vfdiv_vv_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfdiv_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, vl); +vfloat16m1_t test_vfdiv_vf_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, rs1, vl); } -vfloat16m2_t test_vfdiv_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, vl); +vfloat16m2_t test_vfdiv_vv_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, vs1, vl); } -vfloat16m2_t test_vfdiv_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, vl); +vfloat16m2_t test_vfdiv_vf_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, rs1, vl); } -vfloat16m4_t test_vfdiv_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, vl); +vfloat16m4_t test_vfdiv_vv_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, vs1, vl); } -vfloat16m4_t test_vfdiv_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, vl); +vfloat16m4_t test_vfdiv_vf_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, rs1, vl); } -vfloat16m8_t test_vfdiv_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, vl); +vfloat16m8_t test_vfdiv_vv_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, vs1, vl); } -vfloat16m8_t test_vfdiv_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, vl); +vfloat16m8_t test_vfdiv_vf_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, rs1, vl); } -vfloat32mf2_t test_vfdiv_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, vl); +vfloat32mf2_t test_vfdiv_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, vs1, vl); } -vfloat32mf2_t test_vfdiv_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, vl); +vfloat32mf2_t test_vfdiv_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, rs1, vl); } -vfloat32m1_t test_vfdiv_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, vl); +vfloat32m1_t test_vfdiv_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, vl); +vfloat32m1_t test_vfdiv_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, rs1, vl); } -vfloat32m2_t test_vfdiv_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, vl); +vfloat32m2_t test_vfdiv_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, vs1, vl); } -vfloat32m2_t test_vfdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, vl); +vfloat32m2_t test_vfdiv_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, rs1, vl); } -vfloat32m4_t test_vfdiv_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, vl); +vfloat32m4_t test_vfdiv_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, vs1, vl); } -vfloat32m4_t test_vfdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, vl); +vfloat32m4_t test_vfdiv_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, rs1, vl); } -vfloat32m8_t test_vfdiv_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, vl); +vfloat32m8_t test_vfdiv_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, vs1, vl); } -vfloat32m8_t test_vfdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, vl); +vfloat32m8_t test_vfdiv_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, rs1, vl); } -vfloat64m1_t test_vfdiv_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, vl); +vfloat64m1_t test_vfdiv_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, vl); +vfloat64m1_t test_vfdiv_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, rs1, vl); } -vfloat64m2_t test_vfdiv_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, vl); +vfloat64m2_t test_vfdiv_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, vs1, vl); } -vfloat64m2_t test_vfdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, vl); +vfloat64m2_t test_vfdiv_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, rs1, vl); } -vfloat64m4_t test_vfdiv_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, vl); +vfloat64m4_t test_vfdiv_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, vs1, vl); } -vfloat64m4_t test_vfdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, vl); +vfloat64m4_t test_vfdiv_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, rs1, vl); } -vfloat64m8_t test_vfdiv_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, vl); +vfloat64m8_t test_vfdiv_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, vs1, vl); } -vfloat64m8_t test_vfdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, vl); +vfloat64m8_t test_vfdiv_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, rs1, vl); } -vfloat16mf4_t test_vfdiv_vv_f16mf4_rm(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfdiv_vv_f16mf4_rm(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfdiv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfdiv_vf_f16mf4_rm(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfdiv_vf_f16mf4_rm(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfdiv_vv_f16mf2_rm(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfdiv_vv_f16mf2_rm(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfdiv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfdiv_vf_f16mf2_rm(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfdiv_vf_f16mf2_rm(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfdiv_vv_f16m1_rm(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfdiv_vv_f16m1_rm(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfdiv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfdiv_vf_f16m1_rm(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfdiv_vf_f16m1_rm(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfdiv_vv_f16m2_rm(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfdiv_vv_f16m2_rm(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfdiv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfdiv_vf_f16m2_rm(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfdiv_vf_f16m2_rm(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfdiv_vv_f16m4_rm(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfdiv_vv_f16m4_rm(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfdiv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfdiv_vf_f16m4_rm(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfdiv_vf_f16m4_rm(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfdiv_vv_f16m8_rm(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfdiv_vv_f16m8_rm(vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfdiv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfdiv_vf_f16m8_rm(vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfdiv_vf_f16m8_rm(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfdiv_vv_f32mf2_rm(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfdiv_vv_f32mf2_rm(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfdiv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfdiv_vf_f32mf2_rm(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfdiv_vf_f32mf2_rm(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfdiv_vv_f32m1_rm(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfdiv_vv_f32m1_rm(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfdiv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfdiv_vf_f32m1_rm(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfdiv_vf_f32m1_rm(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfdiv_vv_f32m2_rm(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfdiv_vv_f32m2_rm(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfdiv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfdiv_vf_f32m2_rm(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfdiv_vf_f32m2_rm(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfdiv_vv_f32m4_rm(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfdiv_vv_f32m4_rm(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfdiv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfdiv_vf_f32m4_rm(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfdiv_vf_f32m4_rm(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfdiv_vv_f32m8_rm(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfdiv_vv_f32m8_rm(vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfdiv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfdiv_vf_f32m8_rm(vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfdiv_vf_f32m8_rm(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfdiv_vv_f64m1_rm(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfdiv_vv_f64m1_rm(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfdiv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfdiv_vf_f64m1_rm(vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfdiv_vf_f64m1_rm(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfdiv_vv_f64m2_rm(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfdiv_vv_f64m2_rm(vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfdiv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfdiv_vf_f64m2_rm(vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfdiv_vf_f64m2_rm(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfdiv_vv_f64m4_rm(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfdiv_vv_f64m4_rm(vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfdiv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfdiv_vf_f64m4_rm(vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfdiv_vf_f64m4_rm(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfdiv_vv_f64m8_rm(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfdiv_vv_f64m8_rm(vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfdiv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfdiv_vf_f64m8_rm(vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfdiv_vf_f64m8_rm(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfdiv_vv_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfdiv_vv_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfdiv_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfdiv_vf_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfdiv_vv_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfdiv_vv_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfdiv_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfdiv_vf_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfdiv_vv_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfdiv_vv_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfdiv_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfdiv_vf_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfdiv_vv_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfdiv_vv_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfdiv_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfdiv_vf_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfdiv_vv_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfdiv_vv_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfdiv_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfdiv_vf_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfdiv_vv_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfdiv_vv_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfdiv_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfdiv_vf_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfdiv_vv_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfdiv_vv_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfdiv_vf_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfdiv_vf_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfdiv_vv_f32m1_rm_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfdiv_vv_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfdiv_vf_f32m1_rm_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfdiv_vf_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfdiv_vv_f32m2_rm_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfdiv_vv_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfdiv_vf_f32m2_rm_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfdiv_vf_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfdiv_vv_f32m4_rm_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfdiv_vv_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfdiv_vf_f32m4_rm_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfdiv_vf_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfdiv_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfdiv_vv_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfdiv_vf_f32m8_rm_m(vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfdiv_vf_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfdiv_vv_f64m1_rm_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfdiv_vv_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfdiv_vf_f64m1_rm_m(vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfdiv_vf_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfdiv_vv_f64m2_rm_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfdiv_vv_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfdiv_vf_f64m2_rm_m(vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfdiv_vf_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfdiv_vv_f64m4_rm_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfdiv_vv_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfdiv_vf_f64m4_rm_m(vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfdiv_vf_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfdiv_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfdiv_vv_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfdiv_vf_f64m8_rm_m(vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfdiv_vf_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfdiv\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfirst.c b/auto-generated/gnu-overloaded-tests/vfirst.c index 1a5f070e1..fc913211b 100644 --- a/auto-generated/gnu-overloaded-tests/vfirst.c +++ b/auto-generated/gnu-overloaded-tests/vfirst.c @@ -6,60 +6,60 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -long test_vfirst_m_b1(vbool1_t op1, size_t vl) { - return __riscv_vfirst(op1, vl); +long test_vfirst_m_b1(vbool1_t vs2, size_t vl) { + return __riscv_vfirst(vs2, vl); } -long test_vfirst_m_b2(vbool2_t op1, size_t vl) { - return __riscv_vfirst(op1, vl); +long test_vfirst_m_b2(vbool2_t vs2, size_t vl) { + return __riscv_vfirst(vs2, vl); } -long test_vfirst_m_b4(vbool4_t op1, size_t vl) { - return __riscv_vfirst(op1, vl); +long test_vfirst_m_b4(vbool4_t vs2, size_t vl) { + return __riscv_vfirst(vs2, vl); } -long test_vfirst_m_b8(vbool8_t op1, size_t vl) { - return __riscv_vfirst(op1, vl); +long test_vfirst_m_b8(vbool8_t vs2, size_t vl) { + return __riscv_vfirst(vs2, vl); } -long test_vfirst_m_b16(vbool16_t op1, size_t vl) { - return __riscv_vfirst(op1, vl); +long test_vfirst_m_b16(vbool16_t vs2, size_t vl) { + return __riscv_vfirst(vs2, vl); } -long test_vfirst_m_b32(vbool32_t op1, size_t vl) { - return __riscv_vfirst(op1, vl); +long test_vfirst_m_b32(vbool32_t vs2, size_t vl) { + return __riscv_vfirst(vs2, vl); } -long test_vfirst_m_b64(vbool64_t op1, size_t vl) { - return __riscv_vfirst(op1, vl); +long test_vfirst_m_b64(vbool64_t vs2, size_t vl) { + return __riscv_vfirst(vs2, vl); } -long test_vfirst_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) { - return __riscv_vfirst(mask, op1, vl); +long test_vfirst_m_b1_m(vbool1_t vm, vbool1_t vs2, size_t vl) { + return __riscv_vfirst(vm, vs2, vl); } -long test_vfirst_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) { - return __riscv_vfirst(mask, op1, vl); +long test_vfirst_m_b2_m(vbool2_t vm, vbool2_t vs2, size_t vl) { + return __riscv_vfirst(vm, vs2, vl); } -long test_vfirst_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) { - return __riscv_vfirst(mask, op1, vl); +long test_vfirst_m_b4_m(vbool4_t vm, vbool4_t vs2, size_t vl) { + return __riscv_vfirst(vm, vs2, vl); } -long test_vfirst_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) { - return __riscv_vfirst(mask, op1, vl); +long test_vfirst_m_b8_m(vbool8_t vm, vbool8_t vs2, size_t vl) { + return __riscv_vfirst(vm, vs2, vl); } -long test_vfirst_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) { - return __riscv_vfirst(mask, op1, vl); +long test_vfirst_m_b16_m(vbool16_t vm, vbool16_t vs2, size_t vl) { + return __riscv_vfirst(vm, vs2, vl); } -long test_vfirst_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) { - return __riscv_vfirst(mask, op1, vl); +long test_vfirst_m_b32_m(vbool32_t vm, vbool32_t vs2, size_t vl) { + return __riscv_vfirst(vm, vs2, vl); } -long test_vfirst_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) { - return __riscv_vfirst(mask, op1, vl); +long test_vfirst_m_b64_m(vbool64_t vm, vbool64_t vs2, size_t vl) { + return __riscv_vfirst(vm, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfirst\.[ivxfswum.]+\s+} 14 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfmacc.c b/auto-generated/gnu-overloaded-tests/vfmacc.c index a5200f32a..d7597fe1a 100644 --- a/auto-generated/gnu-overloaded-tests/vfmacc.c +++ b/auto-generated/gnu-overloaded-tests/vfmacc.c @@ -126,124 +126,124 @@ vfloat64m8_t test_vfmacc_vf_f64m8(vfloat64m8_t vd, float64_t rs1, vfloat64m8_t v return __riscv_vfmacc(vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmacc_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfmacc_vv_f16mf4_m(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmacc_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfmacc_vf_f16mf4_m(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmacc_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfmacc_vv_f16mf2_m(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmacc_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfmacc_vf_f16mf2_m(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmacc_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfmacc_vv_f16m1_m(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmacc_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfmacc_vf_f16m1_m(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmacc_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfmacc_vv_f16m2_m(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmacc_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfmacc_vf_f16m2_m(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmacc_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfmacc_vv_f16m4_m(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmacc_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfmacc_vf_f16m4_m(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmacc_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfmacc_vv_f16m8_m(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmacc_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfmacc_vf_f16m8_m(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfmacc_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfmacc_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfmacc_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfmacc_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfmacc_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfmacc_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfmacc_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfmacc_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfmacc_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfmacc_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfmacc_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfmacc_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfmacc_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfmacc_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfmacc_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfmacc_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfmacc_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfmacc_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, rs1, vs2, vl); } vfloat16mf4_t test_vfmacc_vv_f16mf4_rm(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -366,124 +366,124 @@ vfloat64m8_t test_vfmacc_vf_f64m8_rm(vfloat64m8_t vd, float64_t rs1, vfloat64m8_ return __riscv_vfmacc(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmacc_vv_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmacc_vv_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmacc_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmacc_vf_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmacc_vv_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmacc_vv_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmacc_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmacc_vf_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmacc_vv_f16m1_rm_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmacc_vv_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmacc_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmacc_vf_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmacc_vv_f16m2_rm_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmacc_vv_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmacc_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmacc_vf_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmacc_vv_f16m4_rm_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmacc_vv_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmacc_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmacc_vf_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmacc_vv_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmacc_vv_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmacc_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmacc_vf_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmacc_vv_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmacc_vv_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmacc_vf_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmacc_vf_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmacc_vv_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmacc_vv_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmacc_vf_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmacc_vf_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmacc_vv_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmacc_vv_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmacc_vf_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmacc_vf_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmacc_vv_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmacc_vv_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmacc_vf_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmacc_vf_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmacc_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmacc_vv_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmacc_vf_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmacc_vf_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmacc_vv_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmacc_vv_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmacc_vf_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmacc_vf_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmacc_vv_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmacc_vv_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmacc_vf_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmacc_vf_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmacc_vv_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmacc_vv_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmacc_vf_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmacc_vf_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmacc_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmacc_vv_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmacc_vf_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmacc(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmacc_vf_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmacc(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfmacc\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfmadd.c b/auto-generated/gnu-overloaded-tests/vfmadd.c index 8fe711b0d..866c03442 100644 --- a/auto-generated/gnu-overloaded-tests/vfmadd.c +++ b/auto-generated/gnu-overloaded-tests/vfmadd.c @@ -126,124 +126,124 @@ vfloat64m8_t test_vfmadd_vf_f64m8(vfloat64m8_t vd, float64_t rs1, vfloat64m8_t v return __riscv_vfmadd(vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmadd_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfmadd_vv_f16mf4_m(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmadd_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfmadd_vf_f16mf4_m(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmadd_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfmadd_vv_f16mf2_m(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmadd_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfmadd_vf_f16mf2_m(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmadd_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfmadd_vv_f16m1_m(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmadd_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfmadd_vf_f16m1_m(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmadd_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfmadd_vv_f16m2_m(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmadd_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfmadd_vf_f16m2_m(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmadd_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfmadd_vv_f16m4_m(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmadd_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfmadd_vf_f16m4_m(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfmadd_vv_f16m8_m(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmadd_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfmadd_vf_f16m8_m(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfmadd_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfmadd_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfmadd_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfmadd_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfmadd_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfmadd_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfmadd_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfmadd_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfmadd_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfmadd_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfmadd_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfmadd_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfmadd_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfmadd_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfmadd_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfmadd_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfmadd_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfmadd_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, rs1, vs2, vl); } vfloat16mf4_t test_vfmadd_vv_f16mf4_rm(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -366,124 +366,124 @@ vfloat64m8_t test_vfmadd_vf_f64m8_rm(vfloat64m8_t vd, float64_t rs1, vfloat64m8_ return __riscv_vfmadd(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmadd_vv_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmadd_vv_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmadd_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmadd_vf_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmadd_vv_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmadd_vv_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmadd_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmadd_vf_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmadd_vv_f16m1_rm_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmadd_vv_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmadd_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmadd_vf_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmadd_vv_f16m2_rm_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmadd_vv_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmadd_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmadd_vf_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmadd_vv_f16m4_rm_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmadd_vv_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmadd_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmadd_vf_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmadd_vv_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmadd_vv_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmadd_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmadd_vf_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmadd_vv_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmadd_vv_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmadd_vf_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmadd_vf_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmadd_vv_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmadd_vv_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmadd_vf_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmadd_vf_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmadd_vv_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmadd_vv_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmadd_vf_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmadd_vf_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmadd_vv_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmadd_vv_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmadd_vf_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmadd_vf_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmadd_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmadd_vv_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmadd_vf_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmadd_vf_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmadd_vv_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmadd_vv_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmadd_vf_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmadd_vf_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmadd_vv_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmadd_vv_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmadd_vf_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmadd_vf_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmadd_vv_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmadd_vv_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmadd_vf_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmadd_vf_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmadd_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmadd_vv_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmadd_vf_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmadd(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmadd_vf_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmadd(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfmadd\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfmax.c b/auto-generated/gnu-overloaded-tests/vfmax.c index c0f877b3a..446a67686 100644 --- a/auto-generated/gnu-overloaded-tests/vfmax.c +++ b/auto-generated/gnu-overloaded-tests/vfmax.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfmax_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmax(op1, op2, vl); +vfloat16mf4_t test_vfmax_vv_f16mf4(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmax(vs2, vs1, vl); } -vfloat16mf4_t test_vfmax_vf_f16mf4(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax(op1, op2, vl); +vfloat16mf4_t test_vfmax_vf_f16mf4(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax(vs2, rs1, vl); } -vfloat16mf2_t test_vfmax_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmax(op1, op2, vl); +vfloat16mf2_t test_vfmax_vv_f16mf2(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmax(vs2, vs1, vl); } -vfloat16mf2_t test_vfmax_vf_f16mf2(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax(op1, op2, vl); +vfloat16mf2_t test_vfmax_vf_f16mf2(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax(vs2, rs1, vl); } -vfloat16m1_t test_vfmax_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmax(op1, op2, vl); +vfloat16m1_t test_vfmax_vv_f16m1(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmax(vs2, vs1, vl); } -vfloat16m1_t test_vfmax_vf_f16m1(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax(op1, op2, vl); +vfloat16m1_t test_vfmax_vf_f16m1(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax(vs2, rs1, vl); } -vfloat16m2_t test_vfmax_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmax(op1, op2, vl); +vfloat16m2_t test_vfmax_vv_f16m2(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmax(vs2, vs1, vl); } -vfloat16m2_t test_vfmax_vf_f16m2(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax(op1, op2, vl); +vfloat16m2_t test_vfmax_vf_f16m2(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax(vs2, rs1, vl); } -vfloat16m4_t test_vfmax_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmax(op1, op2, vl); +vfloat16m4_t test_vfmax_vv_f16m4(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmax(vs2, vs1, vl); } -vfloat16m4_t test_vfmax_vf_f16m4(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax(op1, op2, vl); +vfloat16m4_t test_vfmax_vf_f16m4(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax(vs2, rs1, vl); } -vfloat16m8_t test_vfmax_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmax(op1, op2, vl); +vfloat16m8_t test_vfmax_vv_f16m8(vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmax(vs2, vs1, vl); } -vfloat16m8_t test_vfmax_vf_f16m8(vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax(op1, op2, vl); +vfloat16m8_t test_vfmax_vf_f16m8(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax(vs2, rs1, vl); } -vfloat32mf2_t test_vfmax_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmax(op1, op2, vl); +vfloat32mf2_t test_vfmax_vv_f32mf2(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmax(vs2, vs1, vl); } -vfloat32mf2_t test_vfmax_vf_f32mf2(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax(op1, op2, vl); +vfloat32mf2_t test_vfmax_vf_f32mf2(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax(vs2, rs1, vl); } -vfloat32m1_t test_vfmax_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmax(op1, op2, vl); +vfloat32m1_t test_vfmax_vv_f32m1(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmax(vs2, vs1, vl); } -vfloat32m1_t test_vfmax_vf_f32m1(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax(op1, op2, vl); +vfloat32m1_t test_vfmax_vf_f32m1(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax(vs2, rs1, vl); } -vfloat32m2_t test_vfmax_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmax(op1, op2, vl); +vfloat32m2_t test_vfmax_vv_f32m2(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmax(vs2, vs1, vl); } -vfloat32m2_t test_vfmax_vf_f32m2(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax(op1, op2, vl); +vfloat32m2_t test_vfmax_vf_f32m2(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax(vs2, rs1, vl); } -vfloat32m4_t test_vfmax_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmax(op1, op2, vl); +vfloat32m4_t test_vfmax_vv_f32m4(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmax(vs2, vs1, vl); } -vfloat32m4_t test_vfmax_vf_f32m4(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax(op1, op2, vl); +vfloat32m4_t test_vfmax_vf_f32m4(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax(vs2, rs1, vl); } -vfloat32m8_t test_vfmax_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmax(op1, op2, vl); +vfloat32m8_t test_vfmax_vv_f32m8(vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmax(vs2, vs1, vl); } -vfloat32m8_t test_vfmax_vf_f32m8(vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax(op1, op2, vl); +vfloat32m8_t test_vfmax_vf_f32m8(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax(vs2, rs1, vl); } -vfloat64m1_t test_vfmax_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmax(op1, op2, vl); +vfloat64m1_t test_vfmax_vv_f64m1(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmax(vs2, vs1, vl); } -vfloat64m1_t test_vfmax_vf_f64m1(vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax(op1, op2, vl); +vfloat64m1_t test_vfmax_vf_f64m1(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax(vs2, rs1, vl); } -vfloat64m2_t test_vfmax_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmax(op1, op2, vl); +vfloat64m2_t test_vfmax_vv_f64m2(vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmax(vs2, vs1, vl); } -vfloat64m2_t test_vfmax_vf_f64m2(vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax(op1, op2, vl); +vfloat64m2_t test_vfmax_vf_f64m2(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax(vs2, rs1, vl); } -vfloat64m4_t test_vfmax_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmax(op1, op2, vl); +vfloat64m4_t test_vfmax_vv_f64m4(vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmax(vs2, vs1, vl); } -vfloat64m4_t test_vfmax_vf_f64m4(vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax(op1, op2, vl); +vfloat64m4_t test_vfmax_vf_f64m4(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax(vs2, rs1, vl); } -vfloat64m8_t test_vfmax_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmax(op1, op2, vl); +vfloat64m8_t test_vfmax_vv_f64m8(vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmax(vs2, vs1, vl); } -vfloat64m8_t test_vfmax_vf_f64m8(vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax(op1, op2, vl); +vfloat64m8_t test_vfmax_vf_f64m8(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax(vs2, rs1, vl); } -vfloat16mf4_t test_vfmax_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmax(mask, op1, op2, vl); +vfloat16mf4_t test_vfmax_vv_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmax(vm, vs2, vs1, vl); } -vfloat16mf4_t test_vfmax_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax(mask, op1, op2, vl); +vfloat16mf4_t test_vfmax_vf_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax(vm, vs2, rs1, vl); } -vfloat16mf2_t test_vfmax_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmax(mask, op1, op2, vl); +vfloat16mf2_t test_vfmax_vv_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmax(vm, vs2, vs1, vl); } -vfloat16mf2_t test_vfmax_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax(mask, op1, op2, vl); +vfloat16mf2_t test_vfmax_vf_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax(vm, vs2, rs1, vl); } -vfloat16m1_t test_vfmax_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmax(mask, op1, op2, vl); +vfloat16m1_t test_vfmax_vv_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmax(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfmax_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax(mask, op1, op2, vl); +vfloat16m1_t test_vfmax_vf_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax(vm, vs2, rs1, vl); } -vfloat16m2_t test_vfmax_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmax(mask, op1, op2, vl); +vfloat16m2_t test_vfmax_vv_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmax(vm, vs2, vs1, vl); } -vfloat16m2_t test_vfmax_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax(mask, op1, op2, vl); +vfloat16m2_t test_vfmax_vf_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax(vm, vs2, rs1, vl); } -vfloat16m4_t test_vfmax_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmax(mask, op1, op2, vl); +vfloat16m4_t test_vfmax_vv_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmax(vm, vs2, vs1, vl); } -vfloat16m4_t test_vfmax_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax(mask, op1, op2, vl); +vfloat16m4_t test_vfmax_vf_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax(vm, vs2, rs1, vl); } -vfloat16m8_t test_vfmax_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmax(mask, op1, op2, vl); +vfloat16m8_t test_vfmax_vv_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmax(vm, vs2, vs1, vl); } -vfloat16m8_t test_vfmax_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax(mask, op1, op2, vl); +vfloat16m8_t test_vfmax_vf_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax(vm, vs2, rs1, vl); } -vfloat32mf2_t test_vfmax_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmax(mask, op1, op2, vl); +vfloat32mf2_t test_vfmax_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmax(vm, vs2, vs1, vl); } -vfloat32mf2_t test_vfmax_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax(mask, op1, op2, vl); +vfloat32mf2_t test_vfmax_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax(vm, vs2, rs1, vl); } -vfloat32m1_t test_vfmax_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmax(mask, op1, op2, vl); +vfloat32m1_t test_vfmax_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmax(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfmax_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax(mask, op1, op2, vl); +vfloat32m1_t test_vfmax_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax(vm, vs2, rs1, vl); } -vfloat32m2_t test_vfmax_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmax(mask, op1, op2, vl); +vfloat32m2_t test_vfmax_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmax(vm, vs2, vs1, vl); } -vfloat32m2_t test_vfmax_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax(mask, op1, op2, vl); +vfloat32m2_t test_vfmax_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax(vm, vs2, rs1, vl); } -vfloat32m4_t test_vfmax_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmax(mask, op1, op2, vl); +vfloat32m4_t test_vfmax_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmax(vm, vs2, vs1, vl); } -vfloat32m4_t test_vfmax_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax(mask, op1, op2, vl); +vfloat32m4_t test_vfmax_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax(vm, vs2, rs1, vl); } -vfloat32m8_t test_vfmax_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmax(mask, op1, op2, vl); +vfloat32m8_t test_vfmax_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmax(vm, vs2, vs1, vl); } -vfloat32m8_t test_vfmax_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax(mask, op1, op2, vl); +vfloat32m8_t test_vfmax_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax(vm, vs2, rs1, vl); } -vfloat64m1_t test_vfmax_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmax(mask, op1, op2, vl); +vfloat64m1_t test_vfmax_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmax(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfmax_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax(mask, op1, op2, vl); +vfloat64m1_t test_vfmax_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax(vm, vs2, rs1, vl); } -vfloat64m2_t test_vfmax_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmax(mask, op1, op2, vl); +vfloat64m2_t test_vfmax_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmax(vm, vs2, vs1, vl); } -vfloat64m2_t test_vfmax_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax(mask, op1, op2, vl); +vfloat64m2_t test_vfmax_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax(vm, vs2, rs1, vl); } -vfloat64m4_t test_vfmax_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmax(mask, op1, op2, vl); +vfloat64m4_t test_vfmax_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmax(vm, vs2, vs1, vl); } -vfloat64m4_t test_vfmax_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax(mask, op1, op2, vl); +vfloat64m4_t test_vfmax_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax(vm, vs2, rs1, vl); } -vfloat64m8_t test_vfmax_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmax(mask, op1, op2, vl); +vfloat64m8_t test_vfmax_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmax(vm, vs2, vs1, vl); } -vfloat64m8_t test_vfmax_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax(mask, op1, op2, vl); +vfloat64m8_t test_vfmax_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfmax\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfmerge.c b/auto-generated/gnu-overloaded-tests/vfmerge.c index 5fdc9f7af..45f0b365d 100644 --- a/auto-generated/gnu-overloaded-tests/vfmerge.c +++ b/auto-generated/gnu-overloaded-tests/vfmerge.c @@ -6,64 +6,64 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfmerge_vfm_f16mf4(vfloat16mf4_t op1, float16_t op2, vbool64_t mask, size_t vl) { - return __riscv_vfmerge(op1, op2, mask, vl); +vfloat16mf4_t test_vfmerge_vfm_f16mf4(vfloat16mf4_t vs2, float16_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vfmerge(vs2, rs1, v0, vl); } -vfloat16mf2_t test_vfmerge_vfm_f16mf2(vfloat16mf2_t op1, float16_t op2, vbool32_t mask, size_t vl) { - return __riscv_vfmerge(op1, op2, mask, vl); +vfloat16mf2_t test_vfmerge_vfm_f16mf2(vfloat16mf2_t vs2, float16_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vfmerge(vs2, rs1, v0, vl); } -vfloat16m1_t test_vfmerge_vfm_f16m1(vfloat16m1_t op1, float16_t op2, vbool16_t mask, size_t vl) { - return __riscv_vfmerge(op1, op2, mask, vl); +vfloat16m1_t test_vfmerge_vfm_f16m1(vfloat16m1_t vs2, float16_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vfmerge(vs2, rs1, v0, vl); } -vfloat16m2_t test_vfmerge_vfm_f16m2(vfloat16m2_t op1, float16_t op2, vbool8_t mask, size_t vl) { - return __riscv_vfmerge(op1, op2, mask, vl); +vfloat16m2_t test_vfmerge_vfm_f16m2(vfloat16m2_t vs2, float16_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vfmerge(vs2, rs1, v0, vl); } -vfloat16m4_t test_vfmerge_vfm_f16m4(vfloat16m4_t op1, float16_t op2, vbool4_t mask, size_t vl) { - return __riscv_vfmerge(op1, op2, mask, vl); +vfloat16m4_t test_vfmerge_vfm_f16m4(vfloat16m4_t vs2, float16_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vfmerge(vs2, rs1, v0, vl); } -vfloat16m8_t test_vfmerge_vfm_f16m8(vfloat16m8_t op1, float16_t op2, vbool2_t mask, size_t vl) { - return __riscv_vfmerge(op1, op2, mask, vl); +vfloat16m8_t test_vfmerge_vfm_f16m8(vfloat16m8_t vs2, float16_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vfmerge(vs2, rs1, v0, vl); } -vfloat32mf2_t test_vfmerge_vfm_f32mf2(vfloat32mf2_t op1, float32_t op2, vbool64_t mask, size_t vl) { - return __riscv_vfmerge(op1, op2, mask, vl); +vfloat32mf2_t test_vfmerge_vfm_f32mf2(vfloat32mf2_t vs2, float32_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vfmerge(vs2, rs1, v0, vl); } -vfloat32m1_t test_vfmerge_vfm_f32m1(vfloat32m1_t op1, float32_t op2, vbool32_t mask, size_t vl) { - return __riscv_vfmerge(op1, op2, mask, vl); +vfloat32m1_t test_vfmerge_vfm_f32m1(vfloat32m1_t vs2, float32_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vfmerge(vs2, rs1, v0, vl); } -vfloat32m2_t test_vfmerge_vfm_f32m2(vfloat32m2_t op1, float32_t op2, vbool16_t mask, size_t vl) { - return __riscv_vfmerge(op1, op2, mask, vl); +vfloat32m2_t test_vfmerge_vfm_f32m2(vfloat32m2_t vs2, float32_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vfmerge(vs2, rs1, v0, vl); } -vfloat32m4_t test_vfmerge_vfm_f32m4(vfloat32m4_t op1, float32_t op2, vbool8_t mask, size_t vl) { - return __riscv_vfmerge(op1, op2, mask, vl); +vfloat32m4_t test_vfmerge_vfm_f32m4(vfloat32m4_t vs2, float32_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vfmerge(vs2, rs1, v0, vl); } -vfloat32m8_t test_vfmerge_vfm_f32m8(vfloat32m8_t op1, float32_t op2, vbool4_t mask, size_t vl) { - return __riscv_vfmerge(op1, op2, mask, vl); +vfloat32m8_t test_vfmerge_vfm_f32m8(vfloat32m8_t vs2, float32_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vfmerge(vs2, rs1, v0, vl); } -vfloat64m1_t test_vfmerge_vfm_f64m1(vfloat64m1_t op1, float64_t op2, vbool64_t mask, size_t vl) { - return __riscv_vfmerge(op1, op2, mask, vl); +vfloat64m1_t test_vfmerge_vfm_f64m1(vfloat64m1_t vs2, float64_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vfmerge(vs2, rs1, v0, vl); } -vfloat64m2_t test_vfmerge_vfm_f64m2(vfloat64m2_t op1, float64_t op2, vbool32_t mask, size_t vl) { - return __riscv_vfmerge(op1, op2, mask, vl); +vfloat64m2_t test_vfmerge_vfm_f64m2(vfloat64m2_t vs2, float64_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vfmerge(vs2, rs1, v0, vl); } -vfloat64m4_t test_vfmerge_vfm_f64m4(vfloat64m4_t op1, float64_t op2, vbool16_t mask, size_t vl) { - return __riscv_vfmerge(op1, op2, mask, vl); +vfloat64m4_t test_vfmerge_vfm_f64m4(vfloat64m4_t vs2, float64_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vfmerge(vs2, rs1, v0, vl); } -vfloat64m8_t test_vfmerge_vfm_f64m8(vfloat64m8_t op1, float64_t op2, vbool8_t mask, size_t vl) { - return __riscv_vfmerge(op1, op2, mask, vl); +vfloat64m8_t test_vfmerge_vfm_f64m8(vfloat64m8_t vs2, float64_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vfmerge(vs2, rs1, v0, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfmerge\.[ivxfswum.]+\s+} 15 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfmin.c b/auto-generated/gnu-overloaded-tests/vfmin.c index 1c3f61d47..26a022577 100644 --- a/auto-generated/gnu-overloaded-tests/vfmin.c +++ b/auto-generated/gnu-overloaded-tests/vfmin.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfmin_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmin(op1, op2, vl); +vfloat16mf4_t test_vfmin_vv_f16mf4(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmin(vs2, vs1, vl); } -vfloat16mf4_t test_vfmin_vf_f16mf4(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin(op1, op2, vl); +vfloat16mf4_t test_vfmin_vf_f16mf4(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin(vs2, rs1, vl); } -vfloat16mf2_t test_vfmin_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmin(op1, op2, vl); +vfloat16mf2_t test_vfmin_vv_f16mf2(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmin(vs2, vs1, vl); } -vfloat16mf2_t test_vfmin_vf_f16mf2(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin(op1, op2, vl); +vfloat16mf2_t test_vfmin_vf_f16mf2(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin(vs2, rs1, vl); } -vfloat16m1_t test_vfmin_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmin(op1, op2, vl); +vfloat16m1_t test_vfmin_vv_f16m1(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmin(vs2, vs1, vl); } -vfloat16m1_t test_vfmin_vf_f16m1(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin(op1, op2, vl); +vfloat16m1_t test_vfmin_vf_f16m1(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin(vs2, rs1, vl); } -vfloat16m2_t test_vfmin_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmin(op1, op2, vl); +vfloat16m2_t test_vfmin_vv_f16m2(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmin(vs2, vs1, vl); } -vfloat16m2_t test_vfmin_vf_f16m2(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin(op1, op2, vl); +vfloat16m2_t test_vfmin_vf_f16m2(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin(vs2, rs1, vl); } -vfloat16m4_t test_vfmin_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmin(op1, op2, vl); +vfloat16m4_t test_vfmin_vv_f16m4(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmin(vs2, vs1, vl); } -vfloat16m4_t test_vfmin_vf_f16m4(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin(op1, op2, vl); +vfloat16m4_t test_vfmin_vf_f16m4(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin(vs2, rs1, vl); } -vfloat16m8_t test_vfmin_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmin(op1, op2, vl); +vfloat16m8_t test_vfmin_vv_f16m8(vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmin(vs2, vs1, vl); } -vfloat16m8_t test_vfmin_vf_f16m8(vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin(op1, op2, vl); +vfloat16m8_t test_vfmin_vf_f16m8(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin(vs2, rs1, vl); } -vfloat32mf2_t test_vfmin_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmin(op1, op2, vl); +vfloat32mf2_t test_vfmin_vv_f32mf2(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmin(vs2, vs1, vl); } -vfloat32mf2_t test_vfmin_vf_f32mf2(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin(op1, op2, vl); +vfloat32mf2_t test_vfmin_vf_f32mf2(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin(vs2, rs1, vl); } -vfloat32m1_t test_vfmin_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmin(op1, op2, vl); +vfloat32m1_t test_vfmin_vv_f32m1(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmin(vs2, vs1, vl); } -vfloat32m1_t test_vfmin_vf_f32m1(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin(op1, op2, vl); +vfloat32m1_t test_vfmin_vf_f32m1(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin(vs2, rs1, vl); } -vfloat32m2_t test_vfmin_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmin(op1, op2, vl); +vfloat32m2_t test_vfmin_vv_f32m2(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmin(vs2, vs1, vl); } -vfloat32m2_t test_vfmin_vf_f32m2(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin(op1, op2, vl); +vfloat32m2_t test_vfmin_vf_f32m2(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin(vs2, rs1, vl); } -vfloat32m4_t test_vfmin_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmin(op1, op2, vl); +vfloat32m4_t test_vfmin_vv_f32m4(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmin(vs2, vs1, vl); } -vfloat32m4_t test_vfmin_vf_f32m4(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin(op1, op2, vl); +vfloat32m4_t test_vfmin_vf_f32m4(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin(vs2, rs1, vl); } -vfloat32m8_t test_vfmin_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmin(op1, op2, vl); +vfloat32m8_t test_vfmin_vv_f32m8(vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmin(vs2, vs1, vl); } -vfloat32m8_t test_vfmin_vf_f32m8(vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin(op1, op2, vl); +vfloat32m8_t test_vfmin_vf_f32m8(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin(vs2, rs1, vl); } -vfloat64m1_t test_vfmin_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmin(op1, op2, vl); +vfloat64m1_t test_vfmin_vv_f64m1(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmin(vs2, vs1, vl); } -vfloat64m1_t test_vfmin_vf_f64m1(vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin(op1, op2, vl); +vfloat64m1_t test_vfmin_vf_f64m1(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin(vs2, rs1, vl); } -vfloat64m2_t test_vfmin_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmin(op1, op2, vl); +vfloat64m2_t test_vfmin_vv_f64m2(vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmin(vs2, vs1, vl); } -vfloat64m2_t test_vfmin_vf_f64m2(vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin(op1, op2, vl); +vfloat64m2_t test_vfmin_vf_f64m2(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin(vs2, rs1, vl); } -vfloat64m4_t test_vfmin_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmin(op1, op2, vl); +vfloat64m4_t test_vfmin_vv_f64m4(vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmin(vs2, vs1, vl); } -vfloat64m4_t test_vfmin_vf_f64m4(vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin(op1, op2, vl); +vfloat64m4_t test_vfmin_vf_f64m4(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin(vs2, rs1, vl); } -vfloat64m8_t test_vfmin_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmin(op1, op2, vl); +vfloat64m8_t test_vfmin_vv_f64m8(vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmin(vs2, vs1, vl); } -vfloat64m8_t test_vfmin_vf_f64m8(vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin(op1, op2, vl); +vfloat64m8_t test_vfmin_vf_f64m8(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin(vs2, rs1, vl); } -vfloat16mf4_t test_vfmin_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmin(mask, op1, op2, vl); +vfloat16mf4_t test_vfmin_vv_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmin(vm, vs2, vs1, vl); } -vfloat16mf4_t test_vfmin_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin(mask, op1, op2, vl); +vfloat16mf4_t test_vfmin_vf_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin(vm, vs2, rs1, vl); } -vfloat16mf2_t test_vfmin_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmin(mask, op1, op2, vl); +vfloat16mf2_t test_vfmin_vv_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmin(vm, vs2, vs1, vl); } -vfloat16mf2_t test_vfmin_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin(mask, op1, op2, vl); +vfloat16mf2_t test_vfmin_vf_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin(vm, vs2, rs1, vl); } -vfloat16m1_t test_vfmin_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmin(mask, op1, op2, vl); +vfloat16m1_t test_vfmin_vv_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmin(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfmin_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin(mask, op1, op2, vl); +vfloat16m1_t test_vfmin_vf_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin(vm, vs2, rs1, vl); } -vfloat16m2_t test_vfmin_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmin(mask, op1, op2, vl); +vfloat16m2_t test_vfmin_vv_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmin(vm, vs2, vs1, vl); } -vfloat16m2_t test_vfmin_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin(mask, op1, op2, vl); +vfloat16m2_t test_vfmin_vf_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin(vm, vs2, rs1, vl); } -vfloat16m4_t test_vfmin_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmin(mask, op1, op2, vl); +vfloat16m4_t test_vfmin_vv_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmin(vm, vs2, vs1, vl); } -vfloat16m4_t test_vfmin_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin(mask, op1, op2, vl); +vfloat16m4_t test_vfmin_vf_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin(vm, vs2, rs1, vl); } -vfloat16m8_t test_vfmin_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmin(mask, op1, op2, vl); +vfloat16m8_t test_vfmin_vv_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmin(vm, vs2, vs1, vl); } -vfloat16m8_t test_vfmin_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin(mask, op1, op2, vl); +vfloat16m8_t test_vfmin_vf_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin(vm, vs2, rs1, vl); } -vfloat32mf2_t test_vfmin_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmin(mask, op1, op2, vl); +vfloat32mf2_t test_vfmin_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmin(vm, vs2, vs1, vl); } -vfloat32mf2_t test_vfmin_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin(mask, op1, op2, vl); +vfloat32mf2_t test_vfmin_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin(vm, vs2, rs1, vl); } -vfloat32m1_t test_vfmin_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmin(mask, op1, op2, vl); +vfloat32m1_t test_vfmin_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmin(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfmin_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin(mask, op1, op2, vl); +vfloat32m1_t test_vfmin_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin(vm, vs2, rs1, vl); } -vfloat32m2_t test_vfmin_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmin(mask, op1, op2, vl); +vfloat32m2_t test_vfmin_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmin(vm, vs2, vs1, vl); } -vfloat32m2_t test_vfmin_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin(mask, op1, op2, vl); +vfloat32m2_t test_vfmin_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin(vm, vs2, rs1, vl); } -vfloat32m4_t test_vfmin_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmin(mask, op1, op2, vl); +vfloat32m4_t test_vfmin_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmin(vm, vs2, vs1, vl); } -vfloat32m4_t test_vfmin_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin(mask, op1, op2, vl); +vfloat32m4_t test_vfmin_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin(vm, vs2, rs1, vl); } -vfloat32m8_t test_vfmin_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmin(mask, op1, op2, vl); +vfloat32m8_t test_vfmin_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmin(vm, vs2, vs1, vl); } -vfloat32m8_t test_vfmin_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin(mask, op1, op2, vl); +vfloat32m8_t test_vfmin_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin(vm, vs2, rs1, vl); } -vfloat64m1_t test_vfmin_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmin(mask, op1, op2, vl); +vfloat64m1_t test_vfmin_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmin(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfmin_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin(mask, op1, op2, vl); +vfloat64m1_t test_vfmin_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin(vm, vs2, rs1, vl); } -vfloat64m2_t test_vfmin_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmin(mask, op1, op2, vl); +vfloat64m2_t test_vfmin_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmin(vm, vs2, vs1, vl); } -vfloat64m2_t test_vfmin_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin(mask, op1, op2, vl); +vfloat64m2_t test_vfmin_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin(vm, vs2, rs1, vl); } -vfloat64m4_t test_vfmin_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmin(mask, op1, op2, vl); +vfloat64m4_t test_vfmin_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmin(vm, vs2, vs1, vl); } -vfloat64m4_t test_vfmin_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin(mask, op1, op2, vl); +vfloat64m4_t test_vfmin_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin(vm, vs2, rs1, vl); } -vfloat64m8_t test_vfmin_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmin(mask, op1, op2, vl); +vfloat64m8_t test_vfmin_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmin(vm, vs2, vs1, vl); } -vfloat64m8_t test_vfmin_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin(mask, op1, op2, vl); +vfloat64m8_t test_vfmin_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfmin\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfmsac.c b/auto-generated/gnu-overloaded-tests/vfmsac.c index 2e2e76f30..e2ecfbabc 100644 --- a/auto-generated/gnu-overloaded-tests/vfmsac.c +++ b/auto-generated/gnu-overloaded-tests/vfmsac.c @@ -126,124 +126,124 @@ vfloat64m8_t test_vfmsac_vf_f64m8(vfloat64m8_t vd, float64_t rs1, vfloat64m8_t v return __riscv_vfmsac(vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmsac_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfmsac_vv_f16mf4_m(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmsac_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfmsac_vf_f16mf4_m(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmsac_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfmsac_vv_f16mf2_m(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmsac_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfmsac_vf_f16mf2_m(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmsac_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfmsac_vv_f16m1_m(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmsac_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfmsac_vf_f16m1_m(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmsac_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfmsac_vv_f16m2_m(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmsac_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfmsac_vf_f16m2_m(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmsac_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfmsac_vv_f16m4_m(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmsac_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfmsac_vf_f16m4_m(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmsac_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfmsac_vv_f16m8_m(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmsac_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfmsac_vf_f16m8_m(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfmsac_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfmsac_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfmsac_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfmsac_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfmsac_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfmsac_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfmsac_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfmsac_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfmsac_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfmsac_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfmsac_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfmsac_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfmsac_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfmsac_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfmsac_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfmsac_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfmsac_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfmsac_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, rs1, vs2, vl); } vfloat16mf4_t test_vfmsac_vv_f16mf4_rm(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -366,124 +366,124 @@ vfloat64m8_t test_vfmsac_vf_f64m8_rm(vfloat64m8_t vd, float64_t rs1, vfloat64m8_ return __riscv_vfmsac(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmsac_vv_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmsac_vv_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmsac_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmsac_vf_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmsac_vv_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmsac_vv_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmsac_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmsac_vf_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmsac_vv_f16m1_rm_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmsac_vv_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmsac_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmsac_vf_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsac_vv_f16m2_rm_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmsac_vv_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsac_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmsac_vf_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsac_vv_f16m4_rm_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmsac_vv_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsac_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmsac_vf_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsac_vv_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmsac_vv_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsac_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmsac_vf_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmsac_vv_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmsac_vv_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmsac_vf_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmsac_vf_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmsac_vv_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmsac_vv_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmsac_vf_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmsac_vf_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsac_vv_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmsac_vv_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsac_vf_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmsac_vf_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsac_vv_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmsac_vv_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsac_vf_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmsac_vf_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsac_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmsac_vv_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsac_vf_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmsac_vf_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsac_vv_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmsac_vv_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsac_vf_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmsac_vf_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsac_vv_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmsac_vv_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsac_vf_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmsac_vf_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsac_vv_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmsac_vv_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsac_vf_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmsac_vf_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsac_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmsac_vv_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsac_vf_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsac(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmsac_vf_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsac(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfmsac\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfmsub.c b/auto-generated/gnu-overloaded-tests/vfmsub.c index deead240b..c4d7ab7fc 100644 --- a/auto-generated/gnu-overloaded-tests/vfmsub.c +++ b/auto-generated/gnu-overloaded-tests/vfmsub.c @@ -126,124 +126,124 @@ vfloat64m8_t test_vfmsub_vf_f64m8(vfloat64m8_t vd, float64_t rs1, vfloat64m8_t v return __riscv_vfmsub(vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmsub_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfmsub_vv_f16mf4_m(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmsub_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfmsub_vf_f16mf4_m(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmsub_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfmsub_vv_f16mf2_m(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmsub_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfmsub_vf_f16mf2_m(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmsub_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfmsub_vv_f16m1_m(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmsub_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfmsub_vf_f16m1_m(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmsub_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfmsub_vv_f16m2_m(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmsub_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfmsub_vf_f16m2_m(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmsub_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfmsub_vv_f16m4_m(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmsub_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfmsub_vf_f16m4_m(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmsub_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfmsub_vv_f16m8_m(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmsub_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfmsub_vf_f16m8_m(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfmsub_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfmsub_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfmsub_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfmsub_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfmsub_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfmsub_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfmsub_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfmsub_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfmsub_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfmsub_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfmsub_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfmsub_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfmsub_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfmsub_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfmsub_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfmsub_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfmsub_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfmsub_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, rs1, vs2, vl); } vfloat16mf4_t test_vfmsub_vv_f16mf4_rm(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -366,124 +366,124 @@ vfloat64m8_t test_vfmsub_vf_f64m8_rm(vfloat64m8_t vd, float64_t rs1, vfloat64m8_ return __riscv_vfmsub(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmsub_vv_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmsub_vv_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmsub_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmsub_vf_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmsub_vv_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmsub_vv_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmsub_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmsub_vf_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmsub_vv_f16m1_rm_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmsub_vv_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmsub_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmsub_vf_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsub_vv_f16m2_rm_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmsub_vv_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsub_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmsub_vf_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsub_vv_f16m4_rm_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmsub_vv_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsub_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmsub_vf_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsub_vv_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmsub_vv_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsub_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmsub_vf_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmsub_vv_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmsub_vv_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmsub_vf_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmsub_vf_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmsub_vv_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmsub_vv_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmsub_vf_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmsub_vf_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsub_vv_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmsub_vv_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsub_vf_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmsub_vf_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsub_vv_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmsub_vv_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsub_vf_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmsub_vf_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsub_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmsub_vv_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsub_vf_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmsub_vf_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsub_vv_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmsub_vv_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsub_vf_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmsub_vf_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsub_vv_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmsub_vv_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsub_vf_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmsub_vf_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsub_vv_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmsub_vv_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsub_vf_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmsub_vf_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsub_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmsub_vv_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsub_vf_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsub(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmsub_vf_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsub(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfmsub\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfmul.c b/auto-generated/gnu-overloaded-tests/vfmul.c index fc7d501a8..aec792af2 100644 --- a/auto-generated/gnu-overloaded-tests/vfmul.c +++ b/auto-generated/gnu-overloaded-tests/vfmul.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfmul_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, vl); +vfloat16mf4_t test_vfmul_vv_f16mf4(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmul(vs2, vs1, vl); } -vfloat16mf4_t test_vfmul_vf_f16mf4(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, vl); +vfloat16mf4_t test_vfmul_vf_f16mf4(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul(vs2, rs1, vl); } -vfloat16mf2_t test_vfmul_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, vl); +vfloat16mf2_t test_vfmul_vv_f16mf2(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmul(vs2, vs1, vl); } -vfloat16mf2_t test_vfmul_vf_f16mf2(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, vl); +vfloat16mf2_t test_vfmul_vf_f16mf2(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul(vs2, rs1, vl); } -vfloat16m1_t test_vfmul_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, vl); +vfloat16m1_t test_vfmul_vv_f16m1(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmul(vs2, vs1, vl); } -vfloat16m1_t test_vfmul_vf_f16m1(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, vl); +vfloat16m1_t test_vfmul_vf_f16m1(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul(vs2, rs1, vl); } -vfloat16m2_t test_vfmul_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, vl); +vfloat16m2_t test_vfmul_vv_f16m2(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmul(vs2, vs1, vl); } -vfloat16m2_t test_vfmul_vf_f16m2(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, vl); +vfloat16m2_t test_vfmul_vf_f16m2(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul(vs2, rs1, vl); } -vfloat16m4_t test_vfmul_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, vl); +vfloat16m4_t test_vfmul_vv_f16m4(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmul(vs2, vs1, vl); } -vfloat16m4_t test_vfmul_vf_f16m4(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, vl); +vfloat16m4_t test_vfmul_vf_f16m4(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul(vs2, rs1, vl); } -vfloat16m8_t test_vfmul_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, vl); +vfloat16m8_t test_vfmul_vv_f16m8(vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmul(vs2, vs1, vl); } -vfloat16m8_t test_vfmul_vf_f16m8(vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, vl); +vfloat16m8_t test_vfmul_vf_f16m8(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul(vs2, rs1, vl); } -vfloat32mf2_t test_vfmul_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, vl); +vfloat32mf2_t test_vfmul_vv_f32mf2(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmul(vs2, vs1, vl); } -vfloat32mf2_t test_vfmul_vf_f32mf2(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, vl); +vfloat32mf2_t test_vfmul_vf_f32mf2(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul(vs2, rs1, vl); } -vfloat32m1_t test_vfmul_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, vl); +vfloat32m1_t test_vfmul_vv_f32m1(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmul(vs2, vs1, vl); } -vfloat32m1_t test_vfmul_vf_f32m1(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, vl); +vfloat32m1_t test_vfmul_vf_f32m1(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul(vs2, rs1, vl); } -vfloat32m2_t test_vfmul_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, vl); +vfloat32m2_t test_vfmul_vv_f32m2(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmul(vs2, vs1, vl); } -vfloat32m2_t test_vfmul_vf_f32m2(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, vl); +vfloat32m2_t test_vfmul_vf_f32m2(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul(vs2, rs1, vl); } -vfloat32m4_t test_vfmul_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, vl); +vfloat32m4_t test_vfmul_vv_f32m4(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmul(vs2, vs1, vl); } -vfloat32m4_t test_vfmul_vf_f32m4(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, vl); +vfloat32m4_t test_vfmul_vf_f32m4(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul(vs2, rs1, vl); } -vfloat32m8_t test_vfmul_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, vl); +vfloat32m8_t test_vfmul_vv_f32m8(vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmul(vs2, vs1, vl); } -vfloat32m8_t test_vfmul_vf_f32m8(vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, vl); +vfloat32m8_t test_vfmul_vf_f32m8(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul(vs2, rs1, vl); } -vfloat64m1_t test_vfmul_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, vl); +vfloat64m1_t test_vfmul_vv_f64m1(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmul(vs2, vs1, vl); } -vfloat64m1_t test_vfmul_vf_f64m1(vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, vl); +vfloat64m1_t test_vfmul_vf_f64m1(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul(vs2, rs1, vl); } -vfloat64m2_t test_vfmul_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, vl); +vfloat64m2_t test_vfmul_vv_f64m2(vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmul(vs2, vs1, vl); } -vfloat64m2_t test_vfmul_vf_f64m2(vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, vl); +vfloat64m2_t test_vfmul_vf_f64m2(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul(vs2, rs1, vl); } -vfloat64m4_t test_vfmul_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, vl); +vfloat64m4_t test_vfmul_vv_f64m4(vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmul(vs2, vs1, vl); } -vfloat64m4_t test_vfmul_vf_f64m4(vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, vl); +vfloat64m4_t test_vfmul_vf_f64m4(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul(vs2, rs1, vl); } -vfloat64m8_t test_vfmul_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, vl); +vfloat64m8_t test_vfmul_vv_f64m8(vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmul(vs2, vs1, vl); } -vfloat64m8_t test_vfmul_vf_f64m8(vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, vl); +vfloat64m8_t test_vfmul_vf_f64m8(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul(vs2, rs1, vl); } -vfloat16mf4_t test_vfmul_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, vl); +vfloat16mf4_t test_vfmul_vv_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmul(vm, vs2, vs1, vl); } -vfloat16mf4_t test_vfmul_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, vl); +vfloat16mf4_t test_vfmul_vf_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul(vm, vs2, rs1, vl); } -vfloat16mf2_t test_vfmul_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, vl); +vfloat16mf2_t test_vfmul_vv_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmul(vm, vs2, vs1, vl); } -vfloat16mf2_t test_vfmul_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, vl); +vfloat16mf2_t test_vfmul_vf_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul(vm, vs2, rs1, vl); } -vfloat16m1_t test_vfmul_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, vl); +vfloat16m1_t test_vfmul_vv_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmul(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfmul_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, vl); +vfloat16m1_t test_vfmul_vf_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul(vm, vs2, rs1, vl); } -vfloat16m2_t test_vfmul_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, vl); +vfloat16m2_t test_vfmul_vv_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmul(vm, vs2, vs1, vl); } -vfloat16m2_t test_vfmul_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, vl); +vfloat16m2_t test_vfmul_vf_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul(vm, vs2, rs1, vl); } -vfloat16m4_t test_vfmul_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, vl); +vfloat16m4_t test_vfmul_vv_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmul(vm, vs2, vs1, vl); } -vfloat16m4_t test_vfmul_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, vl); +vfloat16m4_t test_vfmul_vf_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul(vm, vs2, rs1, vl); } -vfloat16m8_t test_vfmul_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, vl); +vfloat16m8_t test_vfmul_vv_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmul(vm, vs2, vs1, vl); } -vfloat16m8_t test_vfmul_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, vl); +vfloat16m8_t test_vfmul_vf_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul(vm, vs2, rs1, vl); } -vfloat32mf2_t test_vfmul_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, vl); +vfloat32mf2_t test_vfmul_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmul(vm, vs2, vs1, vl); } -vfloat32mf2_t test_vfmul_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, vl); +vfloat32mf2_t test_vfmul_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul(vm, vs2, rs1, vl); } -vfloat32m1_t test_vfmul_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, vl); +vfloat32m1_t test_vfmul_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmul(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfmul_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, vl); +vfloat32m1_t test_vfmul_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul(vm, vs2, rs1, vl); } -vfloat32m2_t test_vfmul_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, vl); +vfloat32m2_t test_vfmul_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmul(vm, vs2, vs1, vl); } -vfloat32m2_t test_vfmul_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, vl); +vfloat32m2_t test_vfmul_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul(vm, vs2, rs1, vl); } -vfloat32m4_t test_vfmul_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, vl); +vfloat32m4_t test_vfmul_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmul(vm, vs2, vs1, vl); } -vfloat32m4_t test_vfmul_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, vl); +vfloat32m4_t test_vfmul_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul(vm, vs2, rs1, vl); } -vfloat32m8_t test_vfmul_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, vl); +vfloat32m8_t test_vfmul_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmul(vm, vs2, vs1, vl); } -vfloat32m8_t test_vfmul_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, vl); +vfloat32m8_t test_vfmul_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul(vm, vs2, rs1, vl); } -vfloat64m1_t test_vfmul_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, vl); +vfloat64m1_t test_vfmul_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmul(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfmul_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, vl); +vfloat64m1_t test_vfmul_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul(vm, vs2, rs1, vl); } -vfloat64m2_t test_vfmul_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, vl); +vfloat64m2_t test_vfmul_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmul(vm, vs2, vs1, vl); } -vfloat64m2_t test_vfmul_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, vl); +vfloat64m2_t test_vfmul_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul(vm, vs2, rs1, vl); } -vfloat64m4_t test_vfmul_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, vl); +vfloat64m4_t test_vfmul_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmul(vm, vs2, vs1, vl); } -vfloat64m4_t test_vfmul_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, vl); +vfloat64m4_t test_vfmul_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul(vm, vs2, rs1, vl); } -vfloat64m8_t test_vfmul_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, vl); +vfloat64m8_t test_vfmul_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmul(vm, vs2, vs1, vl); } -vfloat64m8_t test_vfmul_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, vl); +vfloat64m8_t test_vfmul_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul(vm, vs2, rs1, vl); } -vfloat16mf4_t test_vfmul_vv_f16mf4_rm(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmul_vv_f16mf4_rm(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmul(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmul_vf_f16mf4_rm(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmul_vf_f16mf4_rm(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmul_vv_f16mf2_rm(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmul_vv_f16mf2_rm(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmul(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmul_vf_f16mf2_rm(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmul_vf_f16mf2_rm(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmul_vv_f16m1_rm(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmul_vv_f16m1_rm(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmul(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmul_vf_f16m1_rm(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmul_vf_f16m1_rm(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmul_vv_f16m2_rm(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmul_vv_f16m2_rm(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmul(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmul_vf_f16m2_rm(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmul_vf_f16m2_rm(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmul_vv_f16m4_rm(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmul_vv_f16m4_rm(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmul(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmul_vf_f16m4_rm(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmul_vf_f16m4_rm(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmul_vv_f16m8_rm(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmul_vv_f16m8_rm(vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmul(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmul_vf_f16m8_rm(vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmul_vf_f16m8_rm(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmul_vv_f32mf2_rm(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmul_vv_f32mf2_rm(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmul(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmul_vf_f32mf2_rm(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmul_vf_f32mf2_rm(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmul_vv_f32m1_rm(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmul_vv_f32m1_rm(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmul(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmul_vf_f32m1_rm(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmul_vf_f32m1_rm(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmul_vv_f32m2_rm(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmul_vv_f32m2_rm(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmul(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmul_vf_f32m2_rm(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmul_vf_f32m2_rm(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmul_vv_f32m4_rm(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmul_vv_f32m4_rm(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmul(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmul_vf_f32m4_rm(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmul_vf_f32m4_rm(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmul_vv_f32m8_rm(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmul_vv_f32m8_rm(vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmul(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmul_vf_f32m8_rm(vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmul_vf_f32m8_rm(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmul_vv_f64m1_rm(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmul_vv_f64m1_rm(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmul(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmul_vf_f64m1_rm(vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmul_vf_f64m1_rm(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmul_vv_f64m2_rm(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmul_vv_f64m2_rm(vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmul(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmul_vf_f64m2_rm(vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmul_vf_f64m2_rm(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmul_vv_f64m4_rm(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmul_vv_f64m4_rm(vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmul(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmul_vf_f64m4_rm(vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmul_vf_f64m4_rm(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmul_vv_f64m8_rm(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmul_vv_f64m8_rm(vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmul(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmul_vf_f64m8_rm(vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmul_vf_f64m8_rm(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmul_vv_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmul_vv_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmul_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmul_vf_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmul_vv_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmul_vv_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmul_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmul_vf_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmul_vv_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmul_vv_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmul_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmul_vf_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmul_vv_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmul_vv_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmul_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmul_vf_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmul_vv_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmul_vv_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmul_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmul_vf_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmul_vv_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmul_vv_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmul_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmul_vf_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmul_vv_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmul_vv_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmul_vf_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmul_vf_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmul_vv_f32m1_rm_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmul_vv_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmul_vf_f32m1_rm_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmul_vf_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmul_vv_f32m2_rm_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmul_vv_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmul_vf_f32m2_rm_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmul_vf_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmul_vv_f32m4_rm_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmul_vv_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmul_vf_f32m4_rm_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmul_vf_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmul_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmul_vv_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmul_vf_f32m8_rm_m(vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmul_vf_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmul_vv_f64m1_rm_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmul_vv_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmul_vf_f64m1_rm_m(vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmul_vf_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmul_vv_f64m2_rm_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmul_vv_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmul_vf_f64m2_rm_m(vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmul_vf_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmul_vv_f64m4_rm_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmul_vv_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmul_vf_f64m4_rm_m(vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmul_vf_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmul_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmul_vv_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmul_vf_f64m8_rm_m(vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmul_vf_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfmul\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfmv.c b/auto-generated/gnu-overloaded-tests/vfmv.c index dd8d6eb68..3db1bb8f8 100644 --- a/auto-generated/gnu-overloaded-tests/vfmv.c +++ b/auto-generated/gnu-overloaded-tests/vfmv.c @@ -6,64 +6,64 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -float16_t test_vfmv_f_s_f16mf4_f16(vfloat16mf4_t src) { - return __riscv_vfmv_f(src); +float16_t test_vfmv_f_s_f16mf4_f16(vfloat16mf4_t vs1) { + return __riscv_vfmv_f(vs1); } -float16_t test_vfmv_f_s_f16mf2_f16(vfloat16mf2_t src) { - return __riscv_vfmv_f(src); +float16_t test_vfmv_f_s_f16mf2_f16(vfloat16mf2_t vs1) { + return __riscv_vfmv_f(vs1); } -float16_t test_vfmv_f_s_f16m1_f16(vfloat16m1_t src) { - return __riscv_vfmv_f(src); +float16_t test_vfmv_f_s_f16m1_f16(vfloat16m1_t vs1) { + return __riscv_vfmv_f(vs1); } -float16_t test_vfmv_f_s_f16m2_f16(vfloat16m2_t src) { - return __riscv_vfmv_f(src); +float16_t test_vfmv_f_s_f16m2_f16(vfloat16m2_t vs1) { + return __riscv_vfmv_f(vs1); } -float16_t test_vfmv_f_s_f16m4_f16(vfloat16m4_t src) { - return __riscv_vfmv_f(src); +float16_t test_vfmv_f_s_f16m4_f16(vfloat16m4_t vs1) { + return __riscv_vfmv_f(vs1); } -float16_t test_vfmv_f_s_f16m8_f16(vfloat16m8_t src) { - return __riscv_vfmv_f(src); +float16_t test_vfmv_f_s_f16m8_f16(vfloat16m8_t vs1) { + return __riscv_vfmv_f(vs1); } -float32_t test_vfmv_f_s_f32mf2_f32(vfloat32mf2_t src) { - return __riscv_vfmv_f(src); +float32_t test_vfmv_f_s_f32mf2_f32(vfloat32mf2_t vs1) { + return __riscv_vfmv_f(vs1); } -float32_t test_vfmv_f_s_f32m1_f32(vfloat32m1_t src) { - return __riscv_vfmv_f(src); +float32_t test_vfmv_f_s_f32m1_f32(vfloat32m1_t vs1) { + return __riscv_vfmv_f(vs1); } -float32_t test_vfmv_f_s_f32m2_f32(vfloat32m2_t src) { - return __riscv_vfmv_f(src); +float32_t test_vfmv_f_s_f32m2_f32(vfloat32m2_t vs1) { + return __riscv_vfmv_f(vs1); } -float32_t test_vfmv_f_s_f32m4_f32(vfloat32m4_t src) { - return __riscv_vfmv_f(src); +float32_t test_vfmv_f_s_f32m4_f32(vfloat32m4_t vs1) { + return __riscv_vfmv_f(vs1); } -float32_t test_vfmv_f_s_f32m8_f32(vfloat32m8_t src) { - return __riscv_vfmv_f(src); +float32_t test_vfmv_f_s_f32m8_f32(vfloat32m8_t vs1) { + return __riscv_vfmv_f(vs1); } -float64_t test_vfmv_f_s_f64m1_f64(vfloat64m1_t src) { - return __riscv_vfmv_f(src); +float64_t test_vfmv_f_s_f64m1_f64(vfloat64m1_t vs1) { + return __riscv_vfmv_f(vs1); } -float64_t test_vfmv_f_s_f64m2_f64(vfloat64m2_t src) { - return __riscv_vfmv_f(src); +float64_t test_vfmv_f_s_f64m2_f64(vfloat64m2_t vs1) { + return __riscv_vfmv_f(vs1); } -float64_t test_vfmv_f_s_f64m4_f64(vfloat64m4_t src) { - return __riscv_vfmv_f(src); +float64_t test_vfmv_f_s_f64m4_f64(vfloat64m4_t vs1) { + return __riscv_vfmv_f(vs1); } -float64_t test_vfmv_f_s_f64m8_f64(vfloat64m8_t src) { - return __riscv_vfmv_f(src); +float64_t test_vfmv_f_s_f64m8_f64(vfloat64m8_t vs1) { + return __riscv_vfmv_f(vs1); } /* { dg-final { scan-assembler-times {vfmv\.[ivxfswum.]+\s+[,\sa-x0-9()]+} 15 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfncvt.c b/auto-generated/gnu-overloaded-tests/vfncvt.c index 6bc2f92b5..5fe191f47 100644 --- a/auto-generated/gnu-overloaded-tests/vfncvt.c +++ b/auto-generated/gnu-overloaded-tests/vfncvt.c @@ -6,916 +6,916 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vfncvt_x_f_w_i8mf8(vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_x(src, vl); +vint8mf8_t test_vfncvt_x_f_w_i8mf8(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, vl); } -vint8mf4_t test_vfncvt_x_f_w_i8mf4(vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_x(src, vl); +vint8mf4_t test_vfncvt_x_f_w_i8mf4(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, vl); } -vint8mf2_t test_vfncvt_x_f_w_i8mf2(vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_x(src, vl); +vint8mf2_t test_vfncvt_x_f_w_i8mf2(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, vl); } -vint8m1_t test_vfncvt_x_f_w_i8m1(vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_x(src, vl); +vint8m1_t test_vfncvt_x_f_w_i8m1(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, vl); } -vint8m2_t test_vfncvt_x_f_w_i8m2(vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_x(src, vl); +vint8m2_t test_vfncvt_x_f_w_i8m2(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, vl); } -vint8m4_t test_vfncvt_x_f_w_i8m4(vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_x(src, vl); +vint8m4_t test_vfncvt_x_f_w_i8m4(vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, vl); } -vuint8mf8_t test_vfncvt_xu_f_w_u8mf8(vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_xu(src, vl); +vuint8mf8_t test_vfncvt_xu_f_w_u8mf8(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, vl); } -vuint8mf4_t test_vfncvt_xu_f_w_u8mf4(vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_xu(src, vl); +vuint8mf4_t test_vfncvt_xu_f_w_u8mf4(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, vl); } -vuint8mf2_t test_vfncvt_xu_f_w_u8mf2(vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_xu(src, vl); +vuint8mf2_t test_vfncvt_xu_f_w_u8mf2(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, vl); } -vuint8m1_t test_vfncvt_xu_f_w_u8m1(vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_xu(src, vl); +vuint8m1_t test_vfncvt_xu_f_w_u8m1(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, vl); } -vuint8m2_t test_vfncvt_xu_f_w_u8m2(vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_xu(src, vl); +vuint8m2_t test_vfncvt_xu_f_w_u8m2(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, vl); } -vuint8m4_t test_vfncvt_xu_f_w_u8m4(vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_xu(src, vl); +vuint8m4_t test_vfncvt_xu_f_w_u8m4(vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, vl); } -vint16mf4_t test_vfncvt_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_x(src, vl); +vint16mf4_t test_vfncvt_x_f_w_i16mf4(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, vl); } -vint16mf2_t test_vfncvt_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_x(src, vl); +vint16mf2_t test_vfncvt_x_f_w_i16mf2(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, vl); } -vint16m1_t test_vfncvt_x_f_w_i16m1(vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_x(src, vl); +vint16m1_t test_vfncvt_x_f_w_i16m1(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, vl); } -vint16m2_t test_vfncvt_x_f_w_i16m2(vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_x(src, vl); +vint16m2_t test_vfncvt_x_f_w_i16m2(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, vl); } -vint16m4_t test_vfncvt_x_f_w_i16m4(vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_x(src, vl); +vint16m4_t test_vfncvt_x_f_w_i16m4(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, vl); } -vuint16mf4_t test_vfncvt_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_xu(src, vl); +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, vl); } -vuint16mf2_t test_vfncvt_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_xu(src, vl); +vuint16mf2_t test_vfncvt_xu_f_w_u16mf2(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, vl); } -vuint16m1_t test_vfncvt_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_xu(src, vl); +vuint16m1_t test_vfncvt_xu_f_w_u16m1(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, vl); } -vuint16m2_t test_vfncvt_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_xu(src, vl); +vuint16m2_t test_vfncvt_xu_f_w_u16m2(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, vl); } -vuint16m4_t test_vfncvt_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_xu(src, vl); +vuint16m4_t test_vfncvt_xu_f_w_u16m4(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, vl); } -vfloat16mf4_t test_vfncvt_f_x_w_f16mf4(vint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f(src, vl); +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4(vint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, vl); } -vfloat16mf2_t test_vfncvt_f_x_w_f16mf2(vint32m1_t src, size_t vl) { - return __riscv_vfncvt_f(src, vl); +vfloat16mf2_t test_vfncvt_f_x_w_f16mf2(vint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, vl); } -vfloat16m1_t test_vfncvt_f_x_w_f16m1(vint32m2_t src, size_t vl) { - return __riscv_vfncvt_f(src, vl); +vfloat16m1_t test_vfncvt_f_x_w_f16m1(vint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, vl); } -vfloat16m2_t test_vfncvt_f_x_w_f16m2(vint32m4_t src, size_t vl) { - return __riscv_vfncvt_f(src, vl); +vfloat16m2_t test_vfncvt_f_x_w_f16m2(vint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, vl); } -vfloat16m4_t test_vfncvt_f_x_w_f16m4(vint32m8_t src, size_t vl) { - return __riscv_vfncvt_f(src, vl); +vfloat16m4_t test_vfncvt_f_x_w_f16m4(vint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, vl); } -vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4(vuint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f(src, vl); +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4(vuint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, vl); } -vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2(vuint32m1_t src, size_t vl) { - return __riscv_vfncvt_f(src, vl); +vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2(vuint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, vl); } -vfloat16m1_t test_vfncvt_f_xu_w_f16m1(vuint32m2_t src, size_t vl) { - return __riscv_vfncvt_f(src, vl); +vfloat16m1_t test_vfncvt_f_xu_w_f16m1(vuint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, vl); } -vfloat16m2_t test_vfncvt_f_xu_w_f16m2(vuint32m4_t src, size_t vl) { - return __riscv_vfncvt_f(src, vl); +vfloat16m2_t test_vfncvt_f_xu_w_f16m2(vuint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, vl); } -vfloat16m4_t test_vfncvt_f_xu_w_f16m4(vuint32m8_t src, size_t vl) { - return __riscv_vfncvt_f(src, vl); +vfloat16m4_t test_vfncvt_f_xu_w_f16m4(vuint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, vl); } -vfloat16mf4_t test_vfncvt_f_f_w_f16mf4(vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_f(src, vl); +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, vl); } -vfloat16mf2_t test_vfncvt_f_f_w_f16mf2(vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_f(src, vl); +vfloat16mf2_t test_vfncvt_f_f_w_f16mf2(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, vl); } -vfloat16m1_t test_vfncvt_f_f_w_f16m1(vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_f(src, vl); +vfloat16m1_t test_vfncvt_f_f_w_f16m1(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, vl); } -vfloat16m2_t test_vfncvt_f_f_w_f16m2(vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_f(src, vl); +vfloat16m2_t test_vfncvt_f_f_w_f16m2(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, vl); } -vfloat16m4_t test_vfncvt_f_f_w_f16m4(vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_f(src, vl); +vfloat16m4_t test_vfncvt_f_f_w_f16m4(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, vl); } -vint32mf2_t test_vfncvt_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_x(src, vl); +vint32mf2_t test_vfncvt_x_f_w_i32mf2(vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, vl); } -vint32m1_t test_vfncvt_x_f_w_i32m1(vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_x(src, vl); +vint32m1_t test_vfncvt_x_f_w_i32m1(vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, vl); } -vint32m2_t test_vfncvt_x_f_w_i32m2(vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_x(src, vl); +vint32m2_t test_vfncvt_x_f_w_i32m2(vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, vl); } -vint32m4_t test_vfncvt_x_f_w_i32m4(vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_x(src, vl); +vint32m4_t test_vfncvt_x_f_w_i32m4(vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, vl); } -vuint32mf2_t test_vfncvt_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_xu(src, vl); +vuint32mf2_t test_vfncvt_xu_f_w_u32mf2(vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, vl); } -vuint32m1_t test_vfncvt_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_xu(src, vl); +vuint32m1_t test_vfncvt_xu_f_w_u32m1(vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, vl); } -vuint32m2_t test_vfncvt_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_xu(src, vl); +vuint32m2_t test_vfncvt_xu_f_w_u32m2(vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, vl); } -vuint32m4_t test_vfncvt_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_xu(src, vl); +vuint32m4_t test_vfncvt_xu_f_w_u32m4(vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, vl); } -vfloat32mf2_t test_vfncvt_f_x_w_f32mf2(vint64m1_t src, size_t vl) { - return __riscv_vfncvt_f(src, vl); +vfloat32mf2_t test_vfncvt_f_x_w_f32mf2(vint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, vl); } -vfloat32m1_t test_vfncvt_f_x_w_f32m1(vint64m2_t src, size_t vl) { - return __riscv_vfncvt_f(src, vl); +vfloat32m1_t test_vfncvt_f_x_w_f32m1(vint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, vl); } -vfloat32m2_t test_vfncvt_f_x_w_f32m2(vint64m4_t src, size_t vl) { - return __riscv_vfncvt_f(src, vl); +vfloat32m2_t test_vfncvt_f_x_w_f32m2(vint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, vl); } -vfloat32m4_t test_vfncvt_f_x_w_f32m4(vint64m8_t src, size_t vl) { - return __riscv_vfncvt_f(src, vl); +vfloat32m4_t test_vfncvt_f_x_w_f32m4(vint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, vl); } -vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2(vuint64m1_t src, size_t vl) { - return __riscv_vfncvt_f(src, vl); +vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2(vuint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, vl); } -vfloat32m1_t test_vfncvt_f_xu_w_f32m1(vuint64m2_t src, size_t vl) { - return __riscv_vfncvt_f(src, vl); +vfloat32m1_t test_vfncvt_f_xu_w_f32m1(vuint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, vl); } -vfloat32m2_t test_vfncvt_f_xu_w_f32m2(vuint64m4_t src, size_t vl) { - return __riscv_vfncvt_f(src, vl); +vfloat32m2_t test_vfncvt_f_xu_w_f32m2(vuint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, vl); } -vfloat32m4_t test_vfncvt_f_xu_w_f32m4(vuint64m8_t src, size_t vl) { - return __riscv_vfncvt_f(src, vl); +vfloat32m4_t test_vfncvt_f_xu_w_f32m4(vuint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, vl); } -vfloat32mf2_t test_vfncvt_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_f(src, vl); +vfloat32mf2_t test_vfncvt_f_f_w_f32mf2(vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, vl); } -vfloat32m1_t test_vfncvt_f_f_w_f32m1(vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_f(src, vl); +vfloat32m1_t test_vfncvt_f_f_w_f32m1(vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, vl); } -vfloat32m2_t test_vfncvt_f_f_w_f32m2(vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_f(src, vl); +vfloat32m2_t test_vfncvt_f_f_w_f32m2(vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, vl); } -vfloat32m4_t test_vfncvt_f_f_w_f32m4(vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_f(src, vl); +vfloat32m4_t test_vfncvt_f_f_w_f32m4(vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, vl); } -vint8mf8_t test_vfncvt_x_f_w_i8mf8_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_x(mask, src, vl); +vint8mf8_t test_vfncvt_x_f_w_i8mf8_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x(vm, vs2, vl); } -vint8mf4_t test_vfncvt_x_f_w_i8mf4_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_x(mask, src, vl); +vint8mf4_t test_vfncvt_x_f_w_i8mf4_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x(vm, vs2, vl); } -vint8mf2_t test_vfncvt_x_f_w_i8mf2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_x(mask, src, vl); +vint8mf2_t test_vfncvt_x_f_w_i8mf2_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x(vm, vs2, vl); } -vint8m1_t test_vfncvt_x_f_w_i8m1_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_x(mask, src, vl); +vint8m1_t test_vfncvt_x_f_w_i8m1_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x(vm, vs2, vl); } -vint8m2_t test_vfncvt_x_f_w_i8m2_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_x(mask, src, vl); +vint8m2_t test_vfncvt_x_f_w_i8m2_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x(vm, vs2, vl); } -vint8m4_t test_vfncvt_x_f_w_i8m4_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_x(mask, src, vl); +vint8m4_t test_vfncvt_x_f_w_i8m4_m(vbool2_t vm, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x(vm, vs2, vl); } -vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_xu(mask, src, vl); +vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, vl); } -vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_xu(mask, src, vl); +vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, vl); } -vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_xu(mask, src, vl); +vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, vl); } -vuint8m1_t test_vfncvt_xu_f_w_u8m1_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_xu(mask, src, vl); +vuint8m1_t test_vfncvt_xu_f_w_u8m1_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, vl); } -vuint8m2_t test_vfncvt_xu_f_w_u8m2_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_xu(mask, src, vl); +vuint8m2_t test_vfncvt_xu_f_w_u8m2_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, vl); } -vuint8m4_t test_vfncvt_xu_f_w_u8m4_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_xu(mask, src, vl); +vuint8m4_t test_vfncvt_xu_f_w_u8m4_m(vbool2_t vm, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, vl); } -vint16mf4_t test_vfncvt_x_f_w_i16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_x(mask, src, vl); +vint16mf4_t test_vfncvt_x_f_w_i16mf4_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x(vm, vs2, vl); } -vint16mf2_t test_vfncvt_x_f_w_i16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_x(mask, src, vl); +vint16mf2_t test_vfncvt_x_f_w_i16mf2_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_x(vm, vs2, vl); } -vint16m1_t test_vfncvt_x_f_w_i16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_x(mask, src, vl); +vint16m1_t test_vfncvt_x_f_w_i16m1_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_x(vm, vs2, vl); } -vint16m2_t test_vfncvt_x_f_w_i16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_x(mask, src, vl); +vint16m2_t test_vfncvt_x_f_w_i16m2_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_x(vm, vs2, vl); } -vint16m4_t test_vfncvt_x_f_w_i16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_x(mask, src, vl); +vint16m4_t test_vfncvt_x_f_w_i16m4_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_x(vm, vs2, vl); } -vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_xu(mask, src, vl); +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, vl); } -vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_xu(mask, src, vl); +vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, vl); } -vuint16m1_t test_vfncvt_xu_f_w_u16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_xu(mask, src, vl); +vuint16m1_t test_vfncvt_xu_f_w_u16m1_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, vl); } -vuint16m2_t test_vfncvt_xu_f_w_u16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_xu(mask, src, vl); +vuint16m2_t test_vfncvt_xu_f_w_u16m2_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, vl); } -vuint16m4_t test_vfncvt_xu_f_w_u16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_xu(mask, src, vl); +vuint16m4_t test_vfncvt_xu_f_w_u16m4_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, vl); } -vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_m(vbool64_t mask, vint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, vl); +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_m(vbool64_t vm, vint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, vl); } -vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_m(vbool32_t mask, vint32m1_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, vl); +vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_m(vbool32_t vm, vint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, vl); } -vfloat16m1_t test_vfncvt_f_x_w_f16m1_m(vbool16_t mask, vint32m2_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, vl); +vfloat16m1_t test_vfncvt_f_x_w_f16m1_m(vbool16_t vm, vint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, vl); } -vfloat16m2_t test_vfncvt_f_x_w_f16m2_m(vbool8_t mask, vint32m4_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, vl); +vfloat16m2_t test_vfncvt_f_x_w_f16m2_m(vbool8_t vm, vint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, vl); } -vfloat16m4_t test_vfncvt_f_x_w_f16m4_m(vbool4_t mask, vint32m8_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, vl); +vfloat16m4_t test_vfncvt_f_x_w_f16m4_m(vbool4_t vm, vint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, vl); } -vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_m(vbool64_t mask, vuint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, vl); +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, vl); } -vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_m(vbool32_t mask, vuint32m1_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, vl); +vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, vl); } -vfloat16m1_t test_vfncvt_f_xu_w_f16m1_m(vbool16_t mask, vuint32m2_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, vl); +vfloat16m1_t test_vfncvt_f_xu_w_f16m1_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, vl); } -vfloat16m2_t test_vfncvt_f_xu_w_f16m2_m(vbool8_t mask, vuint32m4_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, vl); +vfloat16m2_t test_vfncvt_f_xu_w_f16m2_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, vl); } -vfloat16m4_t test_vfncvt_f_xu_w_f16m4_m(vbool4_t mask, vuint32m8_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, vl); +vfloat16m4_t test_vfncvt_f_xu_w_f16m4_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, vl); } -vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, vl); +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, vl); } -vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, vl); +vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, vl); } -vfloat16m1_t test_vfncvt_f_f_w_f16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, vl); +vfloat16m1_t test_vfncvt_f_f_w_f16m1_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, vl); } -vfloat16m2_t test_vfncvt_f_f_w_f16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, vl); +vfloat16m2_t test_vfncvt_f_f_w_f16m2_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, vl); } -vfloat16m4_t test_vfncvt_f_f_w_f16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, vl); +vfloat16m4_t test_vfncvt_f_f_w_f16m4_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, vl); } -vint32mf2_t test_vfncvt_x_f_w_i32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_x(mask, src, vl); +vint32mf2_t test_vfncvt_x_f_w_i32mf2_m(vbool64_t vm, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_x(vm, vs2, vl); } -vint32m1_t test_vfncvt_x_f_w_i32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_x(mask, src, vl); +vint32m1_t test_vfncvt_x_f_w_i32m1_m(vbool32_t vm, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_x(vm, vs2, vl); } -vint32m2_t test_vfncvt_x_f_w_i32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_x(mask, src, vl); +vint32m2_t test_vfncvt_x_f_w_i32m2_m(vbool16_t vm, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_x(vm, vs2, vl); } -vint32m4_t test_vfncvt_x_f_w_i32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_x(mask, src, vl); +vint32m4_t test_vfncvt_x_f_w_i32m4_m(vbool8_t vm, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_x(vm, vs2, vl); } -vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_xu(mask, src, vl); +vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_m(vbool64_t vm, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, vl); } -vuint32m1_t test_vfncvt_xu_f_w_u32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_xu(mask, src, vl); +vuint32m1_t test_vfncvt_xu_f_w_u32m1_m(vbool32_t vm, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, vl); } -vuint32m2_t test_vfncvt_xu_f_w_u32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_xu(mask, src, vl); +vuint32m2_t test_vfncvt_xu_f_w_u32m2_m(vbool16_t vm, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, vl); } -vuint32m4_t test_vfncvt_xu_f_w_u32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_xu(mask, src, vl); +vuint32m4_t test_vfncvt_xu_f_w_u32m4_m(vbool8_t vm, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, vl); } -vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_m(vbool64_t mask, vint64m1_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, vl); +vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_m(vbool64_t vm, vint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, vl); } -vfloat32m1_t test_vfncvt_f_x_w_f32m1_m(vbool32_t mask, vint64m2_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, vl); +vfloat32m1_t test_vfncvt_f_x_w_f32m1_m(vbool32_t vm, vint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, vl); } -vfloat32m2_t test_vfncvt_f_x_w_f32m2_m(vbool16_t mask, vint64m4_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, vl); +vfloat32m2_t test_vfncvt_f_x_w_f32m2_m(vbool16_t vm, vint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, vl); } -vfloat32m4_t test_vfncvt_f_x_w_f32m4_m(vbool8_t mask, vint64m8_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, vl); +vfloat32m4_t test_vfncvt_f_x_w_f32m4_m(vbool8_t vm, vint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, vl); } -vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_m(vbool64_t mask, vuint64m1_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, vl); +vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, vl); } -vfloat32m1_t test_vfncvt_f_xu_w_f32m1_m(vbool32_t mask, vuint64m2_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, vl); +vfloat32m1_t test_vfncvt_f_xu_w_f32m1_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, vl); } -vfloat32m2_t test_vfncvt_f_xu_w_f32m2_m(vbool16_t mask, vuint64m4_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, vl); +vfloat32m2_t test_vfncvt_f_xu_w_f32m2_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, vl); } -vfloat32m4_t test_vfncvt_f_xu_w_f32m4_m(vbool8_t mask, vuint64m8_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, vl); +vfloat32m4_t test_vfncvt_f_xu_w_f32m4_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, vl); } -vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, vl); +vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_m(vbool64_t vm, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, vl); } -vfloat32m1_t test_vfncvt_f_f_w_f32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, vl); +vfloat32m1_t test_vfncvt_f_f_w_f32m1_m(vbool32_t vm, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, vl); } -vfloat32m2_t test_vfncvt_f_f_w_f32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, vl); +vfloat32m2_t test_vfncvt_f_f_w_f32m2_m(vbool16_t vm, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, vl); } -vfloat32m4_t test_vfncvt_f_f_w_f32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, vl); +vfloat32m4_t test_vfncvt_f_f_w_f32m4_m(vbool8_t vm, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, vl); } -vint8mf8_t test_vfncvt_x_f_w_i8mf8_rm(vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_x(src, __RISCV_FRM_RNE, vl); +vint8mf8_t test_vfncvt_x_f_w_i8mf8_rm(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl); } -vint8mf4_t test_vfncvt_x_f_w_i8mf4_rm(vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_x(src, __RISCV_FRM_RNE, vl); +vint8mf4_t test_vfncvt_x_f_w_i8mf4_rm(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl); } -vint8mf2_t test_vfncvt_x_f_w_i8mf2_rm(vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_x(src, __RISCV_FRM_RNE, vl); +vint8mf2_t test_vfncvt_x_f_w_i8mf2_rm(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl); } -vint8m1_t test_vfncvt_x_f_w_i8m1_rm(vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_x(src, __RISCV_FRM_RNE, vl); +vint8m1_t test_vfncvt_x_f_w_i8m1_rm(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl); } -vint8m2_t test_vfncvt_x_f_w_i8m2_rm(vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_x(src, __RISCV_FRM_RNE, vl); +vint8m2_t test_vfncvt_x_f_w_i8m2_rm(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl); } -vint8m4_t test_vfncvt_x_f_w_i8m4_rm(vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_x(src, __RISCV_FRM_RNE, vl); +vint8m4_t test_vfncvt_x_f_w_i8m4_rm(vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl); } -vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_rm(vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_xu(src, __RISCV_FRM_RNE, vl); +vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_rm(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl); } -vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_rm(vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_xu(src, __RISCV_FRM_RNE, vl); +vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_rm(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl); } -vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_rm(vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_xu(src, __RISCV_FRM_RNE, vl); +vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_rm(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl); } -vuint8m1_t test_vfncvt_xu_f_w_u8m1_rm(vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_xu(src, __RISCV_FRM_RNE, vl); +vuint8m1_t test_vfncvt_xu_f_w_u8m1_rm(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl); } -vuint8m2_t test_vfncvt_xu_f_w_u8m2_rm(vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_xu(src, __RISCV_FRM_RNE, vl); +vuint8m2_t test_vfncvt_xu_f_w_u8m2_rm(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl); } -vuint8m4_t test_vfncvt_xu_f_w_u8m4_rm(vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_xu(src, __RISCV_FRM_RNE, vl); +vuint8m4_t test_vfncvt_xu_f_w_u8m4_rm(vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl); } -vint16mf4_t test_vfncvt_x_f_w_i16mf4_rm(vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_x(src, __RISCV_FRM_RNE, vl); +vint16mf4_t test_vfncvt_x_f_w_i16mf4_rm(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl); } -vint16mf2_t test_vfncvt_x_f_w_i16mf2_rm(vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_x(src, __RISCV_FRM_RNE, vl); +vint16mf2_t test_vfncvt_x_f_w_i16mf2_rm(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl); } -vint16m1_t test_vfncvt_x_f_w_i16m1_rm(vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_x(src, __RISCV_FRM_RNE, vl); +vint16m1_t test_vfncvt_x_f_w_i16m1_rm(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl); } -vint16m2_t test_vfncvt_x_f_w_i16m2_rm(vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_x(src, __RISCV_FRM_RNE, vl); +vint16m2_t test_vfncvt_x_f_w_i16m2_rm(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl); } -vint16m4_t test_vfncvt_x_f_w_i16m4_rm(vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_x(src, __RISCV_FRM_RNE, vl); +vint16m4_t test_vfncvt_x_f_w_i16m4_rm(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl); } -vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_rm(vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_xu(src, __RISCV_FRM_RNE, vl); +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_rm(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl); } -vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_rm(vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_xu(src, __RISCV_FRM_RNE, vl); +vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_rm(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl); } -vuint16m1_t test_vfncvt_xu_f_w_u16m1_rm(vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_xu(src, __RISCV_FRM_RNE, vl); +vuint16m1_t test_vfncvt_xu_f_w_u16m1_rm(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl); } -vuint16m2_t test_vfncvt_xu_f_w_u16m2_rm(vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_xu(src, __RISCV_FRM_RNE, vl); +vuint16m2_t test_vfncvt_xu_f_w_u16m2_rm(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl); } -vuint16m4_t test_vfncvt_xu_f_w_u16m4_rm(vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_xu(src, __RISCV_FRM_RNE, vl); +vuint16m4_t test_vfncvt_xu_f_w_u16m4_rm(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_rm(vint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f(src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_rm(vint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_rm(vint32m1_t src, size_t vl) { - return __riscv_vfncvt_f(src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_rm(vint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_x_w_f16m1_rm(vint32m2_t src, size_t vl) { - return __riscv_vfncvt_f(src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfncvt_f_x_w_f16m1_rm(vint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_x_w_f16m2_rm(vint32m4_t src, size_t vl) { - return __riscv_vfncvt_f(src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfncvt_f_x_w_f16m2_rm(vint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_x_w_f16m4_rm(vint32m8_t src, size_t vl) { - return __riscv_vfncvt_f(src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfncvt_f_x_w_f16m4_rm(vint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_rm(vuint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f(src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_rm(vuint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_rm(vuint32m1_t src, size_t vl) { - return __riscv_vfncvt_f(src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_rm(vuint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_xu_w_f16m1_rm(vuint32m2_t src, size_t vl) { - return __riscv_vfncvt_f(src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfncvt_f_xu_w_f16m1_rm(vuint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_xu_w_f16m2_rm(vuint32m4_t src, size_t vl) { - return __riscv_vfncvt_f(src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfncvt_f_xu_w_f16m2_rm(vuint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_xu_w_f16m4_rm(vuint32m8_t src, size_t vl) { - return __riscv_vfncvt_f(src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfncvt_f_xu_w_f16m4_rm(vuint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_rm(vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_f(src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_rm(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_rm(vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_f(src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_rm(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_f_w_f16m1_rm(vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_f(src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfncvt_f_f_w_f16m1_rm(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_f_w_f16m2_rm(vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_f(src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfncvt_f_f_w_f16m2_rm(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_f_w_f16m4_rm(vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_f(src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfncvt_f_f_w_f16m4_rm(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, __RISCV_FRM_RNE, vl); } -vint32mf2_t test_vfncvt_x_f_w_i32mf2_rm(vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_x(src, __RISCV_FRM_RNE, vl); +vint32mf2_t test_vfncvt_x_f_w_i32mf2_rm(vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfncvt_x_f_w_i32m1_rm(vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_x(src, __RISCV_FRM_RNE, vl); +vint32m1_t test_vfncvt_x_f_w_i32m1_rm(vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfncvt_x_f_w_i32m2_rm(vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_x(src, __RISCV_FRM_RNE, vl); +vint32m2_t test_vfncvt_x_f_w_i32m2_rm(vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfncvt_x_f_w_i32m4_rm(vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_x(src, __RISCV_FRM_RNE, vl); +vint32m4_t test_vfncvt_x_f_w_i32m4_rm(vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_rm(vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_xu(src, __RISCV_FRM_RNE, vl); +vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_rm(vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfncvt_xu_f_w_u32m1_rm(vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_xu(src, __RISCV_FRM_RNE, vl); +vuint32m1_t test_vfncvt_xu_f_w_u32m1_rm(vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfncvt_xu_f_w_u32m2_rm(vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_xu(src, __RISCV_FRM_RNE, vl); +vuint32m2_t test_vfncvt_xu_f_w_u32m2_rm(vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfncvt_xu_f_w_u32m4_rm(vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_xu(src, __RISCV_FRM_RNE, vl); +vuint32m4_t test_vfncvt_xu_f_w_u32m4_rm(vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_rm(vint64m1_t src, size_t vl) { - return __riscv_vfncvt_f(src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_rm(vint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_x_w_f32m1_rm(vint64m2_t src, size_t vl) { - return __riscv_vfncvt_f(src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfncvt_f_x_w_f32m1_rm(vint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_x_w_f32m2_rm(vint64m4_t src, size_t vl) { - return __riscv_vfncvt_f(src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfncvt_f_x_w_f32m2_rm(vint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_x_w_f32m4_rm(vint64m8_t src, size_t vl) { - return __riscv_vfncvt_f(src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfncvt_f_x_w_f32m4_rm(vint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_rm(vuint64m1_t src, size_t vl) { - return __riscv_vfncvt_f(src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_rm(vuint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_xu_w_f32m1_rm(vuint64m2_t src, size_t vl) { - return __riscv_vfncvt_f(src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfncvt_f_xu_w_f32m1_rm(vuint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_xu_w_f32m2_rm(vuint64m4_t src, size_t vl) { - return __riscv_vfncvt_f(src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfncvt_f_xu_w_f32m2_rm(vuint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_xu_w_f32m4_rm(vuint64m8_t src, size_t vl) { - return __riscv_vfncvt_f(src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfncvt_f_xu_w_f32m4_rm(vuint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_rm(vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_f(src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_rm(vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_f_w_f32m1_rm(vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_f(src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfncvt_f_f_w_f32m1_rm(vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_f_w_f32m2_rm(vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_f(src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfncvt_f_f_w_f32m2_rm(vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_f_w_f32m4_rm(vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_f(src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfncvt_f_f_w_f32m4_rm(vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f(vs2, __RISCV_FRM_RNE, vl); } -vint8mf8_t test_vfncvt_x_f_w_i8mf8_rm_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_x(mask, src, __RISCV_FRM_RNE, vl); +vint8mf8_t test_vfncvt_x_f_w_i8mf8_rm_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl); } -vint8mf4_t test_vfncvt_x_f_w_i8mf4_rm_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_x(mask, src, __RISCV_FRM_RNE, vl); +vint8mf4_t test_vfncvt_x_f_w_i8mf4_rm_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl); } -vint8mf2_t test_vfncvt_x_f_w_i8mf2_rm_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_x(mask, src, __RISCV_FRM_RNE, vl); +vint8mf2_t test_vfncvt_x_f_w_i8mf2_rm_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl); } -vint8m1_t test_vfncvt_x_f_w_i8m1_rm_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_x(mask, src, __RISCV_FRM_RNE, vl); +vint8m1_t test_vfncvt_x_f_w_i8m1_rm_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl); } -vint8m2_t test_vfncvt_x_f_w_i8m2_rm_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_x(mask, src, __RISCV_FRM_RNE, vl); +vint8m2_t test_vfncvt_x_f_w_i8m2_rm_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl); } -vint8m4_t test_vfncvt_x_f_w_i8m4_rm_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_x(mask, src, __RISCV_FRM_RNE, vl); +vint8m4_t test_vfncvt_x_f_w_i8m4_rm_m(vbool2_t vm, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_rm_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_xu(mask, src, __RISCV_FRM_RNE, vl); +vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_rm_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_rm_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_xu(mask, src, __RISCV_FRM_RNE, vl); +vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_rm_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_rm_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_xu(mask, src, __RISCV_FRM_RNE, vl); +vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_rm_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint8m1_t test_vfncvt_xu_f_w_u8m1_rm_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_xu(mask, src, __RISCV_FRM_RNE, vl); +vuint8m1_t test_vfncvt_xu_f_w_u8m1_rm_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint8m2_t test_vfncvt_xu_f_w_u8m2_rm_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_xu(mask, src, __RISCV_FRM_RNE, vl); +vuint8m2_t test_vfncvt_xu_f_w_u8m2_rm_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint8m4_t test_vfncvt_xu_f_w_u8m4_rm_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_xu(mask, src, __RISCV_FRM_RNE, vl); +vuint8m4_t test_vfncvt_xu_f_w_u8m4_rm_m(vbool2_t vm, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); } -vint16mf4_t test_vfncvt_x_f_w_i16mf4_rm_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_x(mask, src, __RISCV_FRM_RNE, vl); +vint16mf4_t test_vfncvt_x_f_w_i16mf4_rm_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl); } -vint16mf2_t test_vfncvt_x_f_w_i16mf2_rm_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_x(mask, src, __RISCV_FRM_RNE, vl); +vint16mf2_t test_vfncvt_x_f_w_i16mf2_rm_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl); } -vint16m1_t test_vfncvt_x_f_w_i16m1_rm_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_x(mask, src, __RISCV_FRM_RNE, vl); +vint16m1_t test_vfncvt_x_f_w_i16m1_rm_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl); } -vint16m2_t test_vfncvt_x_f_w_i16m2_rm_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_x(mask, src, __RISCV_FRM_RNE, vl); +vint16m2_t test_vfncvt_x_f_w_i16m2_rm_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl); } -vint16m4_t test_vfncvt_x_f_w_i16m4_rm_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_x(mask, src, __RISCV_FRM_RNE, vl); +vint16m4_t test_vfncvt_x_f_w_i16m4_rm_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_rm_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_xu(mask, src, __RISCV_FRM_RNE, vl); +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_rm_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_rm_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_xu(mask, src, __RISCV_FRM_RNE, vl); +vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_rm_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint16m1_t test_vfncvt_xu_f_w_u16m1_rm_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_xu(mask, src, __RISCV_FRM_RNE, vl); +vuint16m1_t test_vfncvt_xu_f_w_u16m1_rm_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint16m2_t test_vfncvt_xu_f_w_u16m2_rm_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_xu(mask, src, __RISCV_FRM_RNE, vl); +vuint16m2_t test_vfncvt_xu_f_w_u16m2_rm_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint16m4_t test_vfncvt_xu_f_w_u16m4_rm_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_xu(mask, src, __RISCV_FRM_RNE, vl); +vuint16m4_t test_vfncvt_xu_f_w_u16m4_rm_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_rm_m(vbool64_t mask, vint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_rm_m(vbool64_t vm, vint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_rm_m(vbool32_t mask, vint32m1_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_rm_m(vbool32_t vm, vint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_x_w_f16m1_rm_m(vbool16_t mask, vint32m2_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfncvt_f_x_w_f16m1_rm_m(vbool16_t vm, vint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_x_w_f16m2_rm_m(vbool8_t mask, vint32m4_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfncvt_f_x_w_f16m2_rm_m(vbool8_t vm, vint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_x_w_f16m4_rm_m(vbool4_t mask, vint32m8_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfncvt_f_x_w_f16m4_rm_m(vbool4_t vm, vint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_rm_m(vbool64_t mask, vuint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_rm_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_rm_m(vbool32_t mask, vuint32m1_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_rm_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_xu_w_f16m1_rm_m(vbool16_t mask, vuint32m2_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfncvt_f_xu_w_f16m1_rm_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_xu_w_f16m2_rm_m(vbool8_t mask, vuint32m4_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfncvt_f_xu_w_f16m2_rm_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_xu_w_f16m4_rm_m(vbool4_t mask, vuint32m8_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfncvt_f_xu_w_f16m4_rm_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_rm_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_rm_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_rm_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_rm_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_f_w_f16m1_rm_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfncvt_f_f_w_f16m1_rm_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_f_w_f16m2_rm_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfncvt_f_f_w_f16m2_rm_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_f_w_f16m4_rm_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfncvt_f_f_w_f16m4_rm_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vint32mf2_t test_vfncvt_x_f_w_i32mf2_rm_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_x(mask, src, __RISCV_FRM_RNE, vl); +vint32mf2_t test_vfncvt_x_f_w_i32mf2_rm_m(vbool64_t vm, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfncvt_x_f_w_i32m1_rm_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_x(mask, src, __RISCV_FRM_RNE, vl); +vint32m1_t test_vfncvt_x_f_w_i32m1_rm_m(vbool32_t vm, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfncvt_x_f_w_i32m2_rm_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_x(mask, src, __RISCV_FRM_RNE, vl); +vint32m2_t test_vfncvt_x_f_w_i32m2_rm_m(vbool16_t vm, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfncvt_x_f_w_i32m4_rm_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_x(mask, src, __RISCV_FRM_RNE, vl); +vint32m4_t test_vfncvt_x_f_w_i32m4_rm_m(vbool8_t vm, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_rm_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_xu(mask, src, __RISCV_FRM_RNE, vl); +vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_rm_m(vbool64_t vm, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfncvt_xu_f_w_u32m1_rm_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_xu(mask, src, __RISCV_FRM_RNE, vl); +vuint32m1_t test_vfncvt_xu_f_w_u32m1_rm_m(vbool32_t vm, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfncvt_xu_f_w_u32m2_rm_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_xu(mask, src, __RISCV_FRM_RNE, vl); +vuint32m2_t test_vfncvt_xu_f_w_u32m2_rm_m(vbool16_t vm, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfncvt_xu_f_w_u32m4_rm_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_xu(mask, src, __RISCV_FRM_RNE, vl); +vuint32m4_t test_vfncvt_xu_f_w_u32m4_rm_m(vbool8_t vm, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_rm_m(vbool64_t mask, vint64m1_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_rm_m(vbool64_t vm, vint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_x_w_f32m1_rm_m(vbool32_t mask, vint64m2_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfncvt_f_x_w_f32m1_rm_m(vbool32_t vm, vint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_x_w_f32m2_rm_m(vbool16_t mask, vint64m4_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfncvt_f_x_w_f32m2_rm_m(vbool16_t vm, vint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_x_w_f32m4_rm_m(vbool8_t mask, vint64m8_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfncvt_f_x_w_f32m4_rm_m(vbool8_t vm, vint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_rm_m(vbool64_t mask, vuint64m1_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_rm_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_xu_w_f32m1_rm_m(vbool32_t mask, vuint64m2_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfncvt_f_xu_w_f32m1_rm_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_xu_w_f32m2_rm_m(vbool16_t mask, vuint64m4_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfncvt_f_xu_w_f32m2_rm_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_xu_w_f32m4_rm_m(vbool8_t mask, vuint64m8_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfncvt_f_xu_w_f32m4_rm_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_rm_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_rm_m(vbool64_t vm, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_f_w_f32m1_rm_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfncvt_f_f_w_f32m1_rm_m(vbool32_t vm, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_f_w_f32m2_rm_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfncvt_f_f_w_f32m2_rm_m(vbool16_t vm, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_f_w_f32m4_rm_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_f(mask, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfncvt_f_f_w_f32m4_rm_m(vbool8_t vm, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f(vm, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfncvt\.[ivxfswum.]+\s+} 228 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfncvt_rod.c b/auto-generated/gnu-overloaded-tests/vfncvt_rod.c index c0dd9d400..1e77bac9a 100644 --- a/auto-generated/gnu-overloaded-tests/vfncvt_rod.c +++ b/auto-generated/gnu-overloaded-tests/vfncvt_rod.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4(vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rod_f(src, vl); +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f(vs2, vl); } -vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2(vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f(src, vl); +vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f(vs2, vl); } -vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1(vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f(src, vl); +vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f(vs2, vl); } -vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2(vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f(src, vl); +vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f(vs2, vl); } -vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4(vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f(src, vl); +vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f(vs2, vl); } -vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f(src, vl); +vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2(vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f(vs2, vl); } -vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1(vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f(src, vl); +vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1(vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f(vs2, vl); } -vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2(vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f(src, vl); +vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2(vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f(vs2, vl); } -vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4(vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f(src, vl); +vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4(vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f(vs2, vl); } -vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rod_f(mask, src, vl); +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f(vm, vs2, vl); } -vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f(mask, src, vl); +vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f(vm, vs2, vl); } -vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f(mask, src, vl); +vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f(vm, vs2, vl); } -vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f(mask, src, vl); +vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f(vm, vs2, vl); } -vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f(mask, src, vl); +vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f(vm, vs2, vl); } -vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f(mask, src, vl); +vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_m(vbool64_t vm, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f(vm, vs2, vl); } -vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f(mask, src, vl); +vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_m(vbool32_t vm, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f(vm, vs2, vl); } -vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f(mask, src, vl); +vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_m(vbool16_t vm, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f(vm, vs2, vl); } -vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f(mask, src, vl); +vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_m(vbool8_t vm, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f(vm, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfncvt\.rod[ivxfswum.]*\s+} 18 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfncvt_rtz.c b/auto-generated/gnu-overloaded-tests/vfncvt_rtz.c index 38324cad3..8662fce35 100644 --- a/auto-generated/gnu-overloaded-tests/vfncvt_rtz.c +++ b/auto-generated/gnu-overloaded-tests/vfncvt_rtz.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8(vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(src, vl); +vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x(vs2, vl); } -vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4(vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(src, vl); +vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x(vs2, vl); } -vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2(vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(src, vl); +vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x(vs2, vl); } -vint8m1_t test_vfncvt_rtz_x_f_w_i8m1(vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(src, vl); +vint8m1_t test_vfncvt_rtz_x_f_w_i8m1(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x(vs2, vl); } -vint8m2_t test_vfncvt_rtz_x_f_w_i8m2(vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(src, vl); +vint8m2_t test_vfncvt_rtz_x_f_w_i8m2(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x(vs2, vl); } -vint8m4_t test_vfncvt_rtz_x_f_w_i8m4(vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(src, vl); +vint8m4_t test_vfncvt_rtz_x_f_w_i8m4(vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x(vs2, vl); } -vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8(vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(src, vl); +vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu(vs2, vl); } -vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4(vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(src, vl); +vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu(vs2, vl); } -vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2(vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(src, vl); +vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu(vs2, vl); } -vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1(vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(src, vl); +vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu(vs2, vl); } -vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2(vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(src, vl); +vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu(vs2, vl); } -vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4(vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(src, vl); +vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4(vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu(vs2, vl); } -vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(src, vl); +vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x(vs2, vl); } -vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(src, vl); +vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x(vs2, vl); } -vint16m1_t test_vfncvt_rtz_x_f_w_i16m1(vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(src, vl); +vint16m1_t test_vfncvt_rtz_x_f_w_i16m1(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x(vs2, vl); } -vint16m2_t test_vfncvt_rtz_x_f_w_i16m2(vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(src, vl); +vint16m2_t test_vfncvt_rtz_x_f_w_i16m2(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x(vs2, vl); } -vint16m4_t test_vfncvt_rtz_x_f_w_i16m4(vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(src, vl); +vint16m4_t test_vfncvt_rtz_x_f_w_i16m4(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x(vs2, vl); } -vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(src, vl); +vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu(vs2, vl); } -vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(src, vl); +vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu(vs2, vl); } -vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(src, vl); +vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu(vs2, vl); } -vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(src, vl); +vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu(vs2, vl); } -vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(src, vl); +vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu(vs2, vl); } -vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(src, vl); +vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2(vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x(vs2, vl); } -vint32m1_t test_vfncvt_rtz_x_f_w_i32m1(vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(src, vl); +vint32m1_t test_vfncvt_rtz_x_f_w_i32m1(vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x(vs2, vl); } -vint32m2_t test_vfncvt_rtz_x_f_w_i32m2(vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(src, vl); +vint32m2_t test_vfncvt_rtz_x_f_w_i32m2(vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x(vs2, vl); } -vint32m4_t test_vfncvt_rtz_x_f_w_i32m4(vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(src, vl); +vint32m4_t test_vfncvt_rtz_x_f_w_i32m4(vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x(vs2, vl); } -vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(src, vl); +vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2(vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu(vs2, vl); } -vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(src, vl); +vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1(vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu(vs2, vl); } -vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(src, vl); +vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2(vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu(vs2, vl); } -vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(src, vl); +vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4(vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu(vs2, vl); } -vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(mask, src, vl); +vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x(vm, vs2, vl); } -vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(mask, src, vl); +vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x(vm, vs2, vl); } -vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(mask, src, vl); +vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x(vm, vs2, vl); } -vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(mask, src, vl); +vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x(vm, vs2, vl); } -vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(mask, src, vl); +vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x(vm, vs2, vl); } -vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(mask, src, vl); +vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_m(vbool2_t vm, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x(vm, vs2, vl); } -vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(mask, src, vl); +vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu(vm, vs2, vl); } -vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(mask, src, vl); +vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu(vm, vs2, vl); } -vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(mask, src, vl); +vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu(vm, vs2, vl); } -vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(mask, src, vl); +vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu(vm, vs2, vl); } -vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(mask, src, vl); +vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu(vm, vs2, vl); } -vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(mask, src, vl); +vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_m(vbool2_t vm, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu(vm, vs2, vl); } -vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(mask, src, vl); +vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x(vm, vs2, vl); } -vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(mask, src, vl); +vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x(vm, vs2, vl); } -vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(mask, src, vl); +vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x(vm, vs2, vl); } -vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(mask, src, vl); +vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x(vm, vs2, vl); } -vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(mask, src, vl); +vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x(vm, vs2, vl); } -vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(mask, src, vl); +vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu(vm, vs2, vl); } -vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(mask, src, vl); +vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu(vm, vs2, vl); } -vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(mask, src, vl); +vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu(vm, vs2, vl); } -vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(mask, src, vl); +vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu(vm, vs2, vl); } -vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(mask, src, vl); +vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu(vm, vs2, vl); } -vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(mask, src, vl); +vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_m(vbool64_t vm, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x(vm, vs2, vl); } -vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(mask, src, vl); +vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_m(vbool32_t vm, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x(vm, vs2, vl); } -vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(mask, src, vl); +vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_m(vbool16_t vm, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x(vm, vs2, vl); } -vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(mask, src, vl); +vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_m(vbool8_t vm, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x(vm, vs2, vl); } -vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(mask, src, vl); +vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_m(vbool64_t vm, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu(vm, vs2, vl); } -vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(mask, src, vl); +vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_m(vbool32_t vm, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu(vm, vs2, vl); } -vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(mask, src, vl); +vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_m(vbool16_t vm, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu(vm, vs2, vl); } -vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(mask, src, vl); +vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_m(vbool8_t vm, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu(vm, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfncvt\.rtz[ivxfswum.]*\s+} 60 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfneg.c b/auto-generated/gnu-overloaded-tests/vfneg.c index 472c3bf6a..5e0577520 100644 --- a/auto-generated/gnu-overloaded-tests/vfneg.c +++ b/auto-generated/gnu-overloaded-tests/vfneg.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfneg_v_f16mf4(vfloat16mf4_t op1, size_t vl) { - return __riscv_vfneg(op1, vl); +vfloat16mf4_t test_vfneg_v_f16mf4(vfloat16mf4_t vs, size_t vl) { + return __riscv_vfneg(vs, vl); } -vfloat16mf2_t test_vfneg_v_f16mf2(vfloat16mf2_t op1, size_t vl) { - return __riscv_vfneg(op1, vl); +vfloat16mf2_t test_vfneg_v_f16mf2(vfloat16mf2_t vs, size_t vl) { + return __riscv_vfneg(vs, vl); } -vfloat16m1_t test_vfneg_v_f16m1(vfloat16m1_t op1, size_t vl) { - return __riscv_vfneg(op1, vl); +vfloat16m1_t test_vfneg_v_f16m1(vfloat16m1_t vs, size_t vl) { + return __riscv_vfneg(vs, vl); } -vfloat16m2_t test_vfneg_v_f16m2(vfloat16m2_t op1, size_t vl) { - return __riscv_vfneg(op1, vl); +vfloat16m2_t test_vfneg_v_f16m2(vfloat16m2_t vs, size_t vl) { + return __riscv_vfneg(vs, vl); } -vfloat16m4_t test_vfneg_v_f16m4(vfloat16m4_t op1, size_t vl) { - return __riscv_vfneg(op1, vl); +vfloat16m4_t test_vfneg_v_f16m4(vfloat16m4_t vs, size_t vl) { + return __riscv_vfneg(vs, vl); } -vfloat16m8_t test_vfneg_v_f16m8(vfloat16m8_t op1, size_t vl) { - return __riscv_vfneg(op1, vl); +vfloat16m8_t test_vfneg_v_f16m8(vfloat16m8_t vs, size_t vl) { + return __riscv_vfneg(vs, vl); } -vfloat32mf2_t test_vfneg_v_f32mf2(vfloat32mf2_t op1, size_t vl) { - return __riscv_vfneg(op1, vl); +vfloat32mf2_t test_vfneg_v_f32mf2(vfloat32mf2_t vs, size_t vl) { + return __riscv_vfneg(vs, vl); } -vfloat32m1_t test_vfneg_v_f32m1(vfloat32m1_t op1, size_t vl) { - return __riscv_vfneg(op1, vl); +vfloat32m1_t test_vfneg_v_f32m1(vfloat32m1_t vs, size_t vl) { + return __riscv_vfneg(vs, vl); } -vfloat32m2_t test_vfneg_v_f32m2(vfloat32m2_t op1, size_t vl) { - return __riscv_vfneg(op1, vl); +vfloat32m2_t test_vfneg_v_f32m2(vfloat32m2_t vs, size_t vl) { + return __riscv_vfneg(vs, vl); } -vfloat32m4_t test_vfneg_v_f32m4(vfloat32m4_t op1, size_t vl) { - return __riscv_vfneg(op1, vl); +vfloat32m4_t test_vfneg_v_f32m4(vfloat32m4_t vs, size_t vl) { + return __riscv_vfneg(vs, vl); } -vfloat32m8_t test_vfneg_v_f32m8(vfloat32m8_t op1, size_t vl) { - return __riscv_vfneg(op1, vl); +vfloat32m8_t test_vfneg_v_f32m8(vfloat32m8_t vs, size_t vl) { + return __riscv_vfneg(vs, vl); } -vfloat64m1_t test_vfneg_v_f64m1(vfloat64m1_t op1, size_t vl) { - return __riscv_vfneg(op1, vl); +vfloat64m1_t test_vfneg_v_f64m1(vfloat64m1_t vs, size_t vl) { + return __riscv_vfneg(vs, vl); } -vfloat64m2_t test_vfneg_v_f64m2(vfloat64m2_t op1, size_t vl) { - return __riscv_vfneg(op1, vl); +vfloat64m2_t test_vfneg_v_f64m2(vfloat64m2_t vs, size_t vl) { + return __riscv_vfneg(vs, vl); } -vfloat64m4_t test_vfneg_v_f64m4(vfloat64m4_t op1, size_t vl) { - return __riscv_vfneg(op1, vl); +vfloat64m4_t test_vfneg_v_f64m4(vfloat64m4_t vs, size_t vl) { + return __riscv_vfneg(vs, vl); } -vfloat64m8_t test_vfneg_v_f64m8(vfloat64m8_t op1, size_t vl) { - return __riscv_vfneg(op1, vl); +vfloat64m8_t test_vfneg_v_f64m8(vfloat64m8_t vs, size_t vl) { + return __riscv_vfneg(vs, vl); } -vfloat16mf4_t test_vfneg_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfneg(mask, op1, vl); +vfloat16mf4_t test_vfneg_v_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs, size_t vl) { + return __riscv_vfneg(vm, vs, vl); } -vfloat16mf2_t test_vfneg_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfneg(mask, op1, vl); +vfloat16mf2_t test_vfneg_v_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs, size_t vl) { + return __riscv_vfneg(vm, vs, vl); } -vfloat16m1_t test_vfneg_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) { - return __riscv_vfneg(mask, op1, vl); +vfloat16m1_t test_vfneg_v_f16m1_m(vbool16_t vm, vfloat16m1_t vs, size_t vl) { + return __riscv_vfneg(vm, vs, vl); } -vfloat16m2_t test_vfneg_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) { - return __riscv_vfneg(mask, op1, vl); +vfloat16m2_t test_vfneg_v_f16m2_m(vbool8_t vm, vfloat16m2_t vs, size_t vl) { + return __riscv_vfneg(vm, vs, vl); } -vfloat16m4_t test_vfneg_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) { - return __riscv_vfneg(mask, op1, vl); +vfloat16m4_t test_vfneg_v_f16m4_m(vbool4_t vm, vfloat16m4_t vs, size_t vl) { + return __riscv_vfneg(vm, vs, vl); } -vfloat16m8_t test_vfneg_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) { - return __riscv_vfneg(mask, op1, vl); +vfloat16m8_t test_vfneg_v_f16m8_m(vbool2_t vm, vfloat16m8_t vs, size_t vl) { + return __riscv_vfneg(vm, vs, vl); } -vfloat32mf2_t test_vfneg_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfneg(mask, op1, vl); +vfloat32mf2_t test_vfneg_v_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs, size_t vl) { + return __riscv_vfneg(vm, vs, vl); } -vfloat32m1_t test_vfneg_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) { - return __riscv_vfneg(mask, op1, vl); +vfloat32m1_t test_vfneg_v_f32m1_m(vbool32_t vm, vfloat32m1_t vs, size_t vl) { + return __riscv_vfneg(vm, vs, vl); } -vfloat32m2_t test_vfneg_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) { - return __riscv_vfneg(mask, op1, vl); +vfloat32m2_t test_vfneg_v_f32m2_m(vbool16_t vm, vfloat32m2_t vs, size_t vl) { + return __riscv_vfneg(vm, vs, vl); } -vfloat32m4_t test_vfneg_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) { - return __riscv_vfneg(mask, op1, vl); +vfloat32m4_t test_vfneg_v_f32m4_m(vbool8_t vm, vfloat32m4_t vs, size_t vl) { + return __riscv_vfneg(vm, vs, vl); } -vfloat32m8_t test_vfneg_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) { - return __riscv_vfneg(mask, op1, vl); +vfloat32m8_t test_vfneg_v_f32m8_m(vbool4_t vm, vfloat32m8_t vs, size_t vl) { + return __riscv_vfneg(vm, vs, vl); } -vfloat64m1_t test_vfneg_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) { - return __riscv_vfneg(mask, op1, vl); +vfloat64m1_t test_vfneg_v_f64m1_m(vbool64_t vm, vfloat64m1_t vs, size_t vl) { + return __riscv_vfneg(vm, vs, vl); } -vfloat64m2_t test_vfneg_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) { - return __riscv_vfneg(mask, op1, vl); +vfloat64m2_t test_vfneg_v_f64m2_m(vbool32_t vm, vfloat64m2_t vs, size_t vl) { + return __riscv_vfneg(vm, vs, vl); } -vfloat64m4_t test_vfneg_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) { - return __riscv_vfneg(mask, op1, vl); +vfloat64m4_t test_vfneg_v_f64m4_m(vbool16_t vm, vfloat64m4_t vs, size_t vl) { + return __riscv_vfneg(vm, vs, vl); } -vfloat64m8_t test_vfneg_v_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) { - return __riscv_vfneg(mask, op1, vl); +vfloat64m8_t test_vfneg_v_f64m8_m(vbool8_t vm, vfloat64m8_t vs, size_t vl) { + return __riscv_vfneg(vm, vs, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfneg\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfnmacc.c b/auto-generated/gnu-overloaded-tests/vfnmacc.c index 9e7b4b7dd..b1a1001ec 100644 --- a/auto-generated/gnu-overloaded-tests/vfnmacc.c +++ b/auto-generated/gnu-overloaded-tests/vfnmacc.c @@ -126,124 +126,124 @@ vfloat64m8_t test_vfnmacc_vf_f64m8(vfloat64m8_t vd, float64_t rs1, vfloat64m8_t return __riscv_vfnmacc(vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmacc_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfnmacc_vv_f16mf4_m(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmacc_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfnmacc_vf_f16mf4_m(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmacc_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfnmacc_vv_f16mf2_m(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmacc_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfnmacc_vf_f16mf2_m(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmacc_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfnmacc_vv_f16m1_m(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmacc_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfnmacc_vf_f16m1_m(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmacc_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfnmacc_vv_f16m2_m(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmacc_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfnmacc_vf_f16m2_m(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmacc_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfnmacc_vv_f16m4_m(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmacc_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfnmacc_vf_f16m4_m(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmacc_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfnmacc_vv_f16m8_m(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmacc_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfnmacc_vf_f16m8_m(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfnmacc_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfnmacc_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfnmacc_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfnmacc_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfnmacc_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfnmacc_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfnmacc_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfnmacc_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfnmacc_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfnmacc_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfnmacc_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfnmacc_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfnmacc_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfnmacc_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfnmacc_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfnmacc_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfnmacc_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfnmacc_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, rs1, vs2, vl); } vfloat16mf4_t test_vfnmacc_vv_f16mf4_rm(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -366,124 +366,124 @@ vfloat64m8_t test_vfnmacc_vf_f64m8_rm(vfloat64m8_t vd, float64_t rs1, vfloat64m8 return __riscv_vfnmacc(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmacc_vv_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmacc_vv_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmacc_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmacc_vf_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmacc_vv_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmacc_vv_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmacc_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmacc_vf_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmacc_vv_f16m1_rm_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmacc_vv_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmacc_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmacc_vf_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmacc_vv_f16m2_rm_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmacc_vv_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmacc_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmacc_vf_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmacc_vv_f16m4_rm_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmacc_vv_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmacc_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmacc_vf_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmacc_vv_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmacc_vv_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmacc_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmacc_vf_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmacc_vv_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmacc_vv_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmacc_vf_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmacc_vf_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmacc_vv_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmacc_vv_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmacc_vf_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmacc_vf_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmacc_vv_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmacc_vv_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmacc_vf_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmacc_vf_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmacc_vv_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmacc_vv_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmacc_vf_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmacc_vf_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmacc_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmacc_vv_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmacc_vf_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmacc_vf_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmacc_vv_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmacc_vv_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmacc_vf_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmacc_vf_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmacc_vv_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmacc_vv_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmacc_vf_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmacc_vf_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmacc_vv_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmacc_vv_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmacc_vf_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmacc_vf_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmacc_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmacc_vv_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmacc_vf_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmacc(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmacc_vf_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmacc(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfnmacc\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfnmadd.c b/auto-generated/gnu-overloaded-tests/vfnmadd.c index 06ebb6f8c..a3bfbd501 100644 --- a/auto-generated/gnu-overloaded-tests/vfnmadd.c +++ b/auto-generated/gnu-overloaded-tests/vfnmadd.c @@ -126,124 +126,124 @@ vfloat64m8_t test_vfnmadd_vf_f64m8(vfloat64m8_t vd, float64_t rs1, vfloat64m8_t return __riscv_vfnmadd(vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmadd_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfnmadd_vv_f16mf4_m(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmadd_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfnmadd_vf_f16mf4_m(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmadd_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfnmadd_vv_f16mf2_m(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmadd_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfnmadd_vf_f16mf2_m(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmadd_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfnmadd_vv_f16m1_m(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmadd_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfnmadd_vf_f16m1_m(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmadd_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfnmadd_vv_f16m2_m(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmadd_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfnmadd_vf_f16m2_m(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmadd_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfnmadd_vv_f16m4_m(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmadd_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfnmadd_vf_f16m4_m(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfnmadd_vv_f16m8_m(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmadd_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfnmadd_vf_f16m8_m(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfnmadd_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfnmadd_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfnmadd_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfnmadd_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfnmadd_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfnmadd_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfnmadd_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfnmadd_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfnmadd_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfnmadd_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfnmadd_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfnmadd_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfnmadd_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfnmadd_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfnmadd_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfnmadd_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfnmadd_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfnmadd_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, rs1, vs2, vl); } vfloat16mf4_t test_vfnmadd_vv_f16mf4_rm(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -366,124 +366,124 @@ vfloat64m8_t test_vfnmadd_vf_f64m8_rm(vfloat64m8_t vd, float64_t rs1, vfloat64m8 return __riscv_vfnmadd(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmadd_vv_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmadd_vv_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmadd_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmadd_vf_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmadd_vv_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmadd_vv_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmadd_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmadd_vf_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmadd_vv_f16m1_rm_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmadd_vv_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmadd_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmadd_vf_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmadd_vv_f16m2_rm_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmadd_vv_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmadd_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmadd_vf_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmadd_vv_f16m4_rm_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmadd_vv_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmadd_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmadd_vf_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmadd_vv_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmadd_vv_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmadd_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmadd_vf_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmadd_vv_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmadd_vv_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmadd_vf_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmadd_vf_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmadd_vv_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmadd_vv_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmadd_vf_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmadd_vf_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmadd_vv_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmadd_vv_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmadd_vf_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmadd_vf_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmadd_vv_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmadd_vv_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmadd_vf_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmadd_vf_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmadd_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmadd_vv_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmadd_vf_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmadd_vf_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmadd_vv_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmadd_vv_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmadd_vf_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmadd_vf_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmadd_vv_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmadd_vv_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmadd_vf_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmadd_vf_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmadd_vv_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmadd_vv_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmadd_vf_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmadd_vf_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmadd_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmadd_vv_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmadd_vf_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmadd(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmadd_vf_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmadd(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfnmadd\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfnmsac.c b/auto-generated/gnu-overloaded-tests/vfnmsac.c index 5324994c5..7840b4ec3 100644 --- a/auto-generated/gnu-overloaded-tests/vfnmsac.c +++ b/auto-generated/gnu-overloaded-tests/vfnmsac.c @@ -126,124 +126,124 @@ vfloat64m8_t test_vfnmsac_vf_f64m8(vfloat64m8_t vd, float64_t rs1, vfloat64m8_t return __riscv_vfnmsac(vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmsac_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfnmsac_vv_f16mf4_m(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmsac_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfnmsac_vf_f16mf4_m(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmsac_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfnmsac_vv_f16mf2_m(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmsac_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfnmsac_vf_f16mf2_m(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmsac_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfnmsac_vv_f16m1_m(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmsac_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfnmsac_vf_f16m1_m(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmsac_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfnmsac_vv_f16m2_m(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmsac_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfnmsac_vf_f16m2_m(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmsac_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfnmsac_vv_f16m4_m(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmsac_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfnmsac_vf_f16m4_m(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmsac_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfnmsac_vv_f16m8_m(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmsac_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfnmsac_vf_f16m8_m(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfnmsac_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfnmsac_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfnmsac_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfnmsac_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfnmsac_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfnmsac_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfnmsac_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfnmsac_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfnmsac_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfnmsac_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfnmsac_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfnmsac_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfnmsac_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfnmsac_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfnmsac_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfnmsac_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfnmsac_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfnmsac_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, rs1, vs2, vl); } vfloat16mf4_t test_vfnmsac_vv_f16mf4_rm(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -366,124 +366,124 @@ vfloat64m8_t test_vfnmsac_vf_f64m8_rm(vfloat64m8_t vd, float64_t rs1, vfloat64m8 return __riscv_vfnmsac(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmsac_vv_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmsac_vv_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmsac_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmsac_vf_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmsac_vv_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmsac_vv_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmsac_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmsac_vf_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmsac_vv_f16m1_rm_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmsac_vv_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmsac_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmsac_vf_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmsac_vv_f16m2_rm_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmsac_vv_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmsac_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmsac_vf_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmsac_vv_f16m4_rm_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmsac_vv_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmsac_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmsac_vf_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmsac_vv_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmsac_vv_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmsac_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmsac_vf_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmsac_vv_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmsac_vv_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmsac_vf_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmsac_vf_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmsac_vv_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmsac_vv_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmsac_vf_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmsac_vf_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmsac_vv_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmsac_vv_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmsac_vf_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmsac_vf_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmsac_vv_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmsac_vv_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmsac_vf_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmsac_vf_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmsac_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmsac_vv_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmsac_vf_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmsac_vf_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmsac_vv_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmsac_vv_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmsac_vf_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmsac_vf_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmsac_vv_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmsac_vv_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmsac_vf_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmsac_vf_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmsac_vv_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmsac_vv_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmsac_vf_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmsac_vf_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmsac_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmsac_vv_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmsac_vf_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsac(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmsac_vf_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsac(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfnmsac\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfnmsub.c b/auto-generated/gnu-overloaded-tests/vfnmsub.c index 915d9b4f1..af63a77e6 100644 --- a/auto-generated/gnu-overloaded-tests/vfnmsub.c +++ b/auto-generated/gnu-overloaded-tests/vfnmsub.c @@ -126,124 +126,124 @@ vfloat64m8_t test_vfnmsub_vf_f64m8(vfloat64m8_t vd, float64_t rs1, vfloat64m8_t return __riscv_vfnmsub(vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmsub_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfnmsub_vv_f16mf4_m(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmsub_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfnmsub_vf_f16mf4_m(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmsub_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfnmsub_vv_f16mf2_m(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmsub_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfnmsub_vf_f16mf2_m(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmsub_vv_f16m1_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfnmsub_vv_f16m1_m(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmsub_vf_f16m1_m(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfnmsub_vf_f16m1_m(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmsub_vv_f16m2_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfnmsub_vv_f16m2_m(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmsub_vf_f16m2_m(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfnmsub_vf_f16m2_m(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmsub_vv_f16m4_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfnmsub_vv_f16m4_m(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmsub_vf_f16m4_m(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfnmsub_vf_f16m4_m(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmsub_vv_f16m8_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfnmsub_vv_f16m8_m(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmsub_vf_f16m8_m(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfnmsub_vf_f16m8_m(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfnmsub_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfnmsub_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfnmsub_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfnmsub_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfnmsub_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfnmsub_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfnmsub_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfnmsub_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfnmsub_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfnmsub_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfnmsub_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfnmsub_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfnmsub_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfnmsub_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfnmsub_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfnmsub_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfnmsub_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfnmsub_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, rs1, vs2, vl); } vfloat16mf4_t test_vfnmsub_vv_f16mf4_rm(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -366,124 +366,124 @@ vfloat64m8_t test_vfnmsub_vf_f64m8_rm(vfloat64m8_t vd, float64_t rs1, vfloat64m8 return __riscv_vfnmsub(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmsub_vv_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmsub_vv_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmsub_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmsub_vf_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmsub_vv_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmsub_vv_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmsub_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmsub_vf_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmsub_vv_f16m1_rm_m(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmsub_vv_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmsub_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmsub_vf_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmsub_vv_f16m2_rm_m(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmsub_vv_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmsub_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmsub_vf_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmsub_vv_f16m4_rm_m(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmsub_vv_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmsub_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmsub_vf_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmsub_vv_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmsub_vv_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmsub_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmsub_vf_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmsub_vv_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmsub_vv_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmsub_vf_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmsub_vf_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmsub_vv_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmsub_vv_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmsub_vf_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmsub_vf_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmsub_vv_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmsub_vv_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmsub_vf_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmsub_vf_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmsub_vv_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmsub_vv_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmsub_vf_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmsub_vf_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmsub_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmsub_vv_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmsub_vf_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmsub_vf_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmsub_vv_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmsub_vv_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmsub_vf_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmsub_vf_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmsub_vv_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmsub_vv_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmsub_vf_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmsub_vf_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmsub_vv_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmsub_vv_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmsub_vf_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmsub_vf_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmsub_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmsub_vv_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmsub_vf_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsub(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmsub_vf_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsub(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfnmsub\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfrdiv.c b/auto-generated/gnu-overloaded-tests/vfrdiv.c index be43e3950..6ec637395 100644 --- a/auto-generated/gnu-overloaded-tests/vfrdiv.c +++ b/auto-generated/gnu-overloaded-tests/vfrdiv.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfrdiv_vf_f16mf4(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv(op1, op2, vl); +vfloat16mf4_t test_vfrdiv_vf_f16mf4(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv(vs2, rs1, vl); } -vfloat16mf2_t test_vfrdiv_vf_f16mf2(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv(op1, op2, vl); +vfloat16mf2_t test_vfrdiv_vf_f16mf2(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv(vs2, rs1, vl); } -vfloat16m1_t test_vfrdiv_vf_f16m1(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv(op1, op2, vl); +vfloat16m1_t test_vfrdiv_vf_f16m1(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv(vs2, rs1, vl); } -vfloat16m2_t test_vfrdiv_vf_f16m2(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv(op1, op2, vl); +vfloat16m2_t test_vfrdiv_vf_f16m2(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv(vs2, rs1, vl); } -vfloat16m4_t test_vfrdiv_vf_f16m4(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv(op1, op2, vl); +vfloat16m4_t test_vfrdiv_vf_f16m4(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv(vs2, rs1, vl); } -vfloat16m8_t test_vfrdiv_vf_f16m8(vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv(op1, op2, vl); +vfloat16m8_t test_vfrdiv_vf_f16m8(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv(vs2, rs1, vl); } -vfloat32mf2_t test_vfrdiv_vf_f32mf2(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv(op1, op2, vl); +vfloat32mf2_t test_vfrdiv_vf_f32mf2(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv(vs2, rs1, vl); } -vfloat32m1_t test_vfrdiv_vf_f32m1(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv(op1, op2, vl); +vfloat32m1_t test_vfrdiv_vf_f32m1(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv(vs2, rs1, vl); } -vfloat32m2_t test_vfrdiv_vf_f32m2(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv(op1, op2, vl); +vfloat32m2_t test_vfrdiv_vf_f32m2(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv(vs2, rs1, vl); } -vfloat32m4_t test_vfrdiv_vf_f32m4(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv(op1, op2, vl); +vfloat32m4_t test_vfrdiv_vf_f32m4(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv(vs2, rs1, vl); } -vfloat32m8_t test_vfrdiv_vf_f32m8(vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv(op1, op2, vl); +vfloat32m8_t test_vfrdiv_vf_f32m8(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv(vs2, rs1, vl); } -vfloat64m1_t test_vfrdiv_vf_f64m1(vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv(op1, op2, vl); +vfloat64m1_t test_vfrdiv_vf_f64m1(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv(vs2, rs1, vl); } -vfloat64m2_t test_vfrdiv_vf_f64m2(vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv(op1, op2, vl); +vfloat64m2_t test_vfrdiv_vf_f64m2(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv(vs2, rs1, vl); } -vfloat64m4_t test_vfrdiv_vf_f64m4(vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv(op1, op2, vl); +vfloat64m4_t test_vfrdiv_vf_f64m4(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv(vs2, rs1, vl); } -vfloat64m8_t test_vfrdiv_vf_f64m8(vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv(op1, op2, vl); +vfloat64m8_t test_vfrdiv_vf_f64m8(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv(vs2, rs1, vl); } -vfloat16mf4_t test_vfrdiv_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv(mask, op1, op2, vl); +vfloat16mf4_t test_vfrdiv_vf_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv(vm, vs2, rs1, vl); } -vfloat16mf2_t test_vfrdiv_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv(mask, op1, op2, vl); +vfloat16mf2_t test_vfrdiv_vf_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv(vm, vs2, rs1, vl); } -vfloat16m1_t test_vfrdiv_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv(mask, op1, op2, vl); +vfloat16m1_t test_vfrdiv_vf_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv(vm, vs2, rs1, vl); } -vfloat16m2_t test_vfrdiv_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv(mask, op1, op2, vl); +vfloat16m2_t test_vfrdiv_vf_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv(vm, vs2, rs1, vl); } -vfloat16m4_t test_vfrdiv_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv(mask, op1, op2, vl); +vfloat16m4_t test_vfrdiv_vf_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv(vm, vs2, rs1, vl); } -vfloat16m8_t test_vfrdiv_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv(mask, op1, op2, vl); +vfloat16m8_t test_vfrdiv_vf_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv(vm, vs2, rs1, vl); } -vfloat32mf2_t test_vfrdiv_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv(mask, op1, op2, vl); +vfloat32mf2_t test_vfrdiv_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv(vm, vs2, rs1, vl); } -vfloat32m1_t test_vfrdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv(mask, op1, op2, vl); +vfloat32m1_t test_vfrdiv_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv(vm, vs2, rs1, vl); } -vfloat32m2_t test_vfrdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv(mask, op1, op2, vl); +vfloat32m2_t test_vfrdiv_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv(vm, vs2, rs1, vl); } -vfloat32m4_t test_vfrdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv(mask, op1, op2, vl); +vfloat32m4_t test_vfrdiv_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv(vm, vs2, rs1, vl); } -vfloat32m8_t test_vfrdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv(mask, op1, op2, vl); +vfloat32m8_t test_vfrdiv_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv(vm, vs2, rs1, vl); } -vfloat64m1_t test_vfrdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv(mask, op1, op2, vl); +vfloat64m1_t test_vfrdiv_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv(vm, vs2, rs1, vl); } -vfloat64m2_t test_vfrdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv(mask, op1, op2, vl); +vfloat64m2_t test_vfrdiv_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv(vm, vs2, rs1, vl); } -vfloat64m4_t test_vfrdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv(mask, op1, op2, vl); +vfloat64m4_t test_vfrdiv_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv(vm, vs2, rs1, vl); } -vfloat64m8_t test_vfrdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv(mask, op1, op2, vl); +vfloat64m8_t test_vfrdiv_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv(vm, vs2, rs1, vl); } -vfloat16mf4_t test_vfrdiv_vf_f16mf4_rm(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfrdiv_vf_f16mf4_rm(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfrdiv_vf_f16mf2_rm(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfrdiv_vf_f16mf2_rm(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfrdiv_vf_f16m1_rm(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfrdiv_vf_f16m1_rm(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrdiv_vf_f16m2_rm(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfrdiv_vf_f16m2_rm(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrdiv_vf_f16m4_rm(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfrdiv_vf_f16m4_rm(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrdiv_vf_f16m8_rm(vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfrdiv_vf_f16m8_rm(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrdiv_vf_f32mf2_rm(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfrdiv_vf_f32mf2_rm(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfrdiv_vf_f32m1_rm(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfrdiv_vf_f32m1_rm(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrdiv_vf_f32m2_rm(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfrdiv_vf_f32m2_rm(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrdiv_vf_f32m4_rm(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfrdiv_vf_f32m4_rm(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrdiv_vf_f32m8_rm(vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfrdiv_vf_f32m8_rm(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrdiv_vf_f64m1_rm(vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfrdiv_vf_f64m1_rm(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrdiv_vf_f64m2_rm(vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfrdiv_vf_f64m2_rm(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrdiv_vf_f64m4_rm(vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfrdiv_vf_f64m4_rm(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrdiv_vf_f64m8_rm(vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfrdiv_vf_f64m8_rm(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfrdiv_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfrdiv_vf_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfrdiv_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfrdiv_vf_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfrdiv_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfrdiv_vf_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrdiv_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfrdiv_vf_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrdiv_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfrdiv_vf_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrdiv_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfrdiv_vf_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrdiv_vf_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfrdiv_vf_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfrdiv_vf_f32m1_rm_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfrdiv_vf_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrdiv_vf_f32m2_rm_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfrdiv_vf_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrdiv_vf_f32m4_rm_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfrdiv_vf_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrdiv_vf_f32m8_rm_m(vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfrdiv_vf_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrdiv_vf_f64m1_rm_m(vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfrdiv_vf_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrdiv_vf_f64m2_rm_m(vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfrdiv_vf_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrdiv_vf_f64m4_rm_m(vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfrdiv_vf_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrdiv_vf_f64m8_rm_m(vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfrdiv_vf_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfrdiv\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfrec7.c b/auto-generated/gnu-overloaded-tests/vfrec7.c index 902ac0885..3a0f01759 100644 --- a/auto-generated/gnu-overloaded-tests/vfrec7.c +++ b/auto-generated/gnu-overloaded-tests/vfrec7.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfrec7_v_f16mf4(vfloat16mf4_t op1, size_t vl) { - return __riscv_vfrec7(op1, vl); +vfloat16mf4_t test_vfrec7_v_f16mf4(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfrec7(vs2, vl); } -vfloat16mf2_t test_vfrec7_v_f16mf2(vfloat16mf2_t op1, size_t vl) { - return __riscv_vfrec7(op1, vl); +vfloat16mf2_t test_vfrec7_v_f16mf2(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfrec7(vs2, vl); } -vfloat16m1_t test_vfrec7_v_f16m1(vfloat16m1_t op1, size_t vl) { - return __riscv_vfrec7(op1, vl); +vfloat16m1_t test_vfrec7_v_f16m1(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfrec7(vs2, vl); } -vfloat16m2_t test_vfrec7_v_f16m2(vfloat16m2_t op1, size_t vl) { - return __riscv_vfrec7(op1, vl); +vfloat16m2_t test_vfrec7_v_f16m2(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfrec7(vs2, vl); } -vfloat16m4_t test_vfrec7_v_f16m4(vfloat16m4_t op1, size_t vl) { - return __riscv_vfrec7(op1, vl); +vfloat16m4_t test_vfrec7_v_f16m4(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfrec7(vs2, vl); } -vfloat16m8_t test_vfrec7_v_f16m8(vfloat16m8_t op1, size_t vl) { - return __riscv_vfrec7(op1, vl); +vfloat16m8_t test_vfrec7_v_f16m8(vfloat16m8_t vs2, size_t vl) { + return __riscv_vfrec7(vs2, vl); } -vfloat32mf2_t test_vfrec7_v_f32mf2(vfloat32mf2_t op1, size_t vl) { - return __riscv_vfrec7(op1, vl); +vfloat32mf2_t test_vfrec7_v_f32mf2(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfrec7(vs2, vl); } -vfloat32m1_t test_vfrec7_v_f32m1(vfloat32m1_t op1, size_t vl) { - return __riscv_vfrec7(op1, vl); +vfloat32m1_t test_vfrec7_v_f32m1(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfrec7(vs2, vl); } -vfloat32m2_t test_vfrec7_v_f32m2(vfloat32m2_t op1, size_t vl) { - return __riscv_vfrec7(op1, vl); +vfloat32m2_t test_vfrec7_v_f32m2(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfrec7(vs2, vl); } -vfloat32m4_t test_vfrec7_v_f32m4(vfloat32m4_t op1, size_t vl) { - return __riscv_vfrec7(op1, vl); +vfloat32m4_t test_vfrec7_v_f32m4(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfrec7(vs2, vl); } -vfloat32m8_t test_vfrec7_v_f32m8(vfloat32m8_t op1, size_t vl) { - return __riscv_vfrec7(op1, vl); +vfloat32m8_t test_vfrec7_v_f32m8(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfrec7(vs2, vl); } -vfloat64m1_t test_vfrec7_v_f64m1(vfloat64m1_t op1, size_t vl) { - return __riscv_vfrec7(op1, vl); +vfloat64m1_t test_vfrec7_v_f64m1(vfloat64m1_t vs2, size_t vl) { + return __riscv_vfrec7(vs2, vl); } -vfloat64m2_t test_vfrec7_v_f64m2(vfloat64m2_t op1, size_t vl) { - return __riscv_vfrec7(op1, vl); +vfloat64m2_t test_vfrec7_v_f64m2(vfloat64m2_t vs2, size_t vl) { + return __riscv_vfrec7(vs2, vl); } -vfloat64m4_t test_vfrec7_v_f64m4(vfloat64m4_t op1, size_t vl) { - return __riscv_vfrec7(op1, vl); +vfloat64m4_t test_vfrec7_v_f64m4(vfloat64m4_t vs2, size_t vl) { + return __riscv_vfrec7(vs2, vl); } -vfloat64m8_t test_vfrec7_v_f64m8(vfloat64m8_t op1, size_t vl) { - return __riscv_vfrec7(op1, vl); +vfloat64m8_t test_vfrec7_v_f64m8(vfloat64m8_t vs2, size_t vl) { + return __riscv_vfrec7(vs2, vl); } -vfloat16mf4_t test_vfrec7_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfrec7(mask, op1, vl); +vfloat16mf4_t test_vfrec7_v_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfrec7(vm, vs2, vl); } -vfloat16mf2_t test_vfrec7_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfrec7(mask, op1, vl); +vfloat16mf2_t test_vfrec7_v_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfrec7(vm, vs2, vl); } -vfloat16m1_t test_vfrec7_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) { - return __riscv_vfrec7(mask, op1, vl); +vfloat16m1_t test_vfrec7_v_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfrec7(vm, vs2, vl); } -vfloat16m2_t test_vfrec7_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) { - return __riscv_vfrec7(mask, op1, vl); +vfloat16m2_t test_vfrec7_v_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfrec7(vm, vs2, vl); } -vfloat16m4_t test_vfrec7_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) { - return __riscv_vfrec7(mask, op1, vl); +vfloat16m4_t test_vfrec7_v_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfrec7(vm, vs2, vl); } -vfloat16m8_t test_vfrec7_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) { - return __riscv_vfrec7(mask, op1, vl); +vfloat16m8_t test_vfrec7_v_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfrec7(vm, vs2, vl); } -vfloat32mf2_t test_vfrec7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfrec7(mask, op1, vl); +vfloat32mf2_t test_vfrec7_v_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfrec7(vm, vs2, vl); } -vfloat32m1_t test_vfrec7_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) { - return __riscv_vfrec7(mask, op1, vl); +vfloat32m1_t test_vfrec7_v_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfrec7(vm, vs2, vl); } -vfloat32m2_t test_vfrec7_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) { - return __riscv_vfrec7(mask, op1, vl); +vfloat32m2_t test_vfrec7_v_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfrec7(vm, vs2, vl); } -vfloat32m4_t test_vfrec7_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) { - return __riscv_vfrec7(mask, op1, vl); +vfloat32m4_t test_vfrec7_v_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfrec7(vm, vs2, vl); } -vfloat32m8_t test_vfrec7_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) { - return __riscv_vfrec7(mask, op1, vl); +vfloat32m8_t test_vfrec7_v_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfrec7(vm, vs2, vl); } -vfloat64m1_t test_vfrec7_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) { - return __riscv_vfrec7(mask, op1, vl); +vfloat64m1_t test_vfrec7_v_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfrec7(vm, vs2, vl); } -vfloat64m2_t test_vfrec7_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) { - return __riscv_vfrec7(mask, op1, vl); +vfloat64m2_t test_vfrec7_v_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfrec7(vm, vs2, vl); } -vfloat64m4_t test_vfrec7_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) { - return __riscv_vfrec7(mask, op1, vl); +vfloat64m4_t test_vfrec7_v_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfrec7(vm, vs2, vl); } -vfloat64m8_t test_vfrec7_v_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) { - return __riscv_vfrec7(mask, op1, vl); +vfloat64m8_t test_vfrec7_v_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfrec7(vm, vs2, vl); } -vfloat16mf4_t test_vfrec7_v_f16mf4_rm(vfloat16mf4_t op1, size_t vl) { - return __riscv_vfrec7(op1, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfrec7_v_f16mf4_rm(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfrec7(vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfrec7_v_f16mf2_rm(vfloat16mf2_t op1, size_t vl) { - return __riscv_vfrec7(op1, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfrec7_v_f16mf2_rm(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfrec7(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfrec7_v_f16m1_rm(vfloat16m1_t op1, size_t vl) { - return __riscv_vfrec7(op1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfrec7_v_f16m1_rm(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfrec7(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrec7_v_f16m2_rm(vfloat16m2_t op1, size_t vl) { - return __riscv_vfrec7(op1, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfrec7_v_f16m2_rm(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfrec7(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrec7_v_f16m4_rm(vfloat16m4_t op1, size_t vl) { - return __riscv_vfrec7(op1, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfrec7_v_f16m4_rm(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfrec7(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrec7_v_f16m8_rm(vfloat16m8_t op1, size_t vl) { - return __riscv_vfrec7(op1, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfrec7_v_f16m8_rm(vfloat16m8_t vs2, size_t vl) { + return __riscv_vfrec7(vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrec7_v_f32mf2_rm(vfloat32mf2_t op1, size_t vl) { - return __riscv_vfrec7(op1, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfrec7_v_f32mf2_rm(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfrec7(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfrec7_v_f32m1_rm(vfloat32m1_t op1, size_t vl) { - return __riscv_vfrec7(op1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfrec7_v_f32m1_rm(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfrec7(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrec7_v_f32m2_rm(vfloat32m2_t op1, size_t vl) { - return __riscv_vfrec7(op1, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfrec7_v_f32m2_rm(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfrec7(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrec7_v_f32m4_rm(vfloat32m4_t op1, size_t vl) { - return __riscv_vfrec7(op1, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfrec7_v_f32m4_rm(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfrec7(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrec7_v_f32m8_rm(vfloat32m8_t op1, size_t vl) { - return __riscv_vfrec7(op1, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfrec7_v_f32m8_rm(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfrec7(vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrec7_v_f64m1_rm(vfloat64m1_t op1, size_t vl) { - return __riscv_vfrec7(op1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfrec7_v_f64m1_rm(vfloat64m1_t vs2, size_t vl) { + return __riscv_vfrec7(vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrec7_v_f64m2_rm(vfloat64m2_t op1, size_t vl) { - return __riscv_vfrec7(op1, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfrec7_v_f64m2_rm(vfloat64m2_t vs2, size_t vl) { + return __riscv_vfrec7(vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrec7_v_f64m4_rm(vfloat64m4_t op1, size_t vl) { - return __riscv_vfrec7(op1, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfrec7_v_f64m4_rm(vfloat64m4_t vs2, size_t vl) { + return __riscv_vfrec7(vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrec7_v_f64m8_rm(vfloat64m8_t op1, size_t vl) { - return __riscv_vfrec7(op1, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfrec7_v_f64m8_rm(vfloat64m8_t vs2, size_t vl) { + return __riscv_vfrec7(vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfrec7_v_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfrec7(mask, op1, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfrec7_v_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfrec7(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfrec7_v_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfrec7(mask, op1, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfrec7_v_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfrec7(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfrec7_v_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) { - return __riscv_vfrec7(mask, op1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfrec7_v_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfrec7(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrec7_v_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) { - return __riscv_vfrec7(mask, op1, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfrec7_v_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfrec7(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrec7_v_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) { - return __riscv_vfrec7(mask, op1, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfrec7_v_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfrec7(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrec7_v_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) { - return __riscv_vfrec7(mask, op1, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfrec7_v_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfrec7(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrec7_v_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfrec7(mask, op1, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfrec7_v_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfrec7(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfrec7_v_f32m1_rm_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) { - return __riscv_vfrec7(mask, op1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfrec7_v_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfrec7(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrec7_v_f32m2_rm_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) { - return __riscv_vfrec7(mask, op1, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfrec7_v_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfrec7(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrec7_v_f32m4_rm_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) { - return __riscv_vfrec7(mask, op1, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfrec7_v_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfrec7(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrec7_v_f32m8_rm_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) { - return __riscv_vfrec7(mask, op1, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfrec7_v_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfrec7(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrec7_v_f64m1_rm_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) { - return __riscv_vfrec7(mask, op1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfrec7_v_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfrec7(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrec7_v_f64m2_rm_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) { - return __riscv_vfrec7(mask, op1, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfrec7_v_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfrec7(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrec7_v_f64m4_rm_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) { - return __riscv_vfrec7(mask, op1, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfrec7_v_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfrec7(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrec7_v_f64m8_rm_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) { - return __riscv_vfrec7(mask, op1, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfrec7_v_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfrec7(vm, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfrec7\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfredmax.c b/auto-generated/gnu-overloaded-tests/vfredmax.c index 2cc675330..5cd291856 100644 --- a/auto-generated/gnu-overloaded-tests/vfredmax.c +++ b/auto-generated/gnu-overloaded-tests/vfredmax.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1(vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax(vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1(vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax(vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1(vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax(vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1(vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax(vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax(vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m1_f16m1(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax(vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax(vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m2_f16m1(vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax(vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax(vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m4_f16m1(vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax(vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax(vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m8_f16m1(vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax(vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax(vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1(vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmax(vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax(vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m1_f32m1(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmax(vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax(vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m2_f32m1(vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmax(vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax(vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m4_f32m1(vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmax(vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax(vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m8_f32m1(vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmax(vs2, vs1, vl); } -vfloat64m1_t test_vfredmax_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmax(vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m1_f64m1(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmax(vs2, vs1, vl); } -vfloat64m1_t test_vfredmax_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmax(vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m2_f64m1(vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmax(vs2, vs1, vl); } -vfloat64m1_t test_vfredmax_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmax(vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m4_f64m1(vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmax(vs2, vs1, vl); } -vfloat64m1_t test_vfredmax_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmax(vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m8_f64m1(vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmax(vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax(mask, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax(mask, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax(mask, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax(mask, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax(mask, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax(mask, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax(mask, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmax(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax(mask, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmax(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax(mask, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmax(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax(mask, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmax(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax(mask, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmax(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmax(mask, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmax(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmax(mask, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmax(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmax(mask, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmax(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmax(mask, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmax(vm, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfredmax\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfredmin.c b/auto-generated/gnu-overloaded-tests/vfredmin.c index 10c7fa1d9..fbfd073f8 100644 --- a/auto-generated/gnu-overloaded-tests/vfredmin.c +++ b/auto-generated/gnu-overloaded-tests/vfredmin.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1(vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin(vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1(vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin(vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1(vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin(vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1(vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin(vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin(vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m1_f16m1(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin(vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin(vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m2_f16m1(vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin(vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin(vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m4_f16m1(vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin(vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin(vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m8_f16m1(vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin(vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin(vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1(vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmin(vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin(vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m1_f32m1(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmin(vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin(vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m2_f32m1(vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmin(vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin(vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m4_f32m1(vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmin(vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin(vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m8_f32m1(vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmin(vs2, vs1, vl); } -vfloat64m1_t test_vfredmin_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmin(vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m1_f64m1(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmin(vs2, vs1, vl); } -vfloat64m1_t test_vfredmin_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmin(vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m2_f64m1(vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmin(vs2, vs1, vl); } -vfloat64m1_t test_vfredmin_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmin(vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m4_f64m1(vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmin(vs2, vs1, vl); } -vfloat64m1_t test_vfredmin_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmin(vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m8_f64m1(vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmin(vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin(mask, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin(mask, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin(mask, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin(mask, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin(mask, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin(mask, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin(mask, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmin(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin(mask, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmin(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin(mask, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmin(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin(mask, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmin(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin(mask, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmin(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmin(mask, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmin(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmin(mask, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmin(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmin(mask, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmin(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmin(mask, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmin(vm, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfredmin\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfredosum.c b/auto-generated/gnu-overloaded-tests/vfredosum.c index f2278fc7c..13e1e1dc1 100644 --- a/auto-generated/gnu-overloaded-tests/vfredosum.c +++ b/auto-generated/gnu-overloaded-tests/vfredosum.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1(vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum(vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1(vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum(vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1(vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum(vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1(vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum(vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum(vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum(vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum(vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1(vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum(vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum(vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1(vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum(vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum(vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1(vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum(vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum(vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1(vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum(vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum(vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum(vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum(vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1(vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum(vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum(vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1(vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum(vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum(vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1(vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum(vs2, vs1, vl); } -vfloat64m1_t test_vfredosum_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum(vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum(vs2, vs1, vl); } -vfloat64m1_t test_vfredosum_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum(vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1(vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum(vs2, vs1, vl); } -vfloat64m1_t test_vfredosum_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum(vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1(vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum(vs2, vs1, vl); } -vfloat64m1_t test_vfredosum_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum(vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1(vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum(vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum(mask, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum(mask, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum(mask, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum(mask, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum(mask, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum(mask, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum(mask, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum(mask, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum(mask, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum(mask, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum(mask, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum(mask, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum(mask, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum(mask, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum(mask, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_rm(vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_rm(vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_rm(vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_rm(vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_rm(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_rm(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_rm(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_rm(vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_rm(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_rm(vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_rm(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_rm(vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_rm(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_rm(vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_rm(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_rm(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_rm(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_rm(vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_rm(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_rm(vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_rm(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_rm(vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_rm(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_rm(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_rm(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_rm(vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_rm(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_rm(vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_rm(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_rm(vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_rm_m(vbool64_t mask, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_rm_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_rm_m(vbool32_t mask, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_rm_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_rm_m(vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_rm_m(vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_rm_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_rm_m(vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_rm_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_rm_m(vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_rm_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_rm_m(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_rm_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_rm_m(vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_rm_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_rm_m(vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_rm_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_rm_m(vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_rm_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_rm_m(vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_rm_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_rm_m(vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_rm_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_rm_m(vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_rm_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfredosum\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfredusum.c b/auto-generated/gnu-overloaded-tests/vfredusum.c index e9602d5c3..9e8c0c5d5 100644 --- a/auto-generated/gnu-overloaded-tests/vfredusum.c +++ b/auto-generated/gnu-overloaded-tests/vfredusum.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1(vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum(vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1(vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum(vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1(vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum(vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1(vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum(vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum(vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m1_f16m1(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum(vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum(vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m2_f16m1(vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum(vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum(vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m4_f16m1(vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum(vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum(vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m8_f16m1(vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum(vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum(vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1(vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum(vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum(vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m1_f32m1(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum(vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum(vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m2_f32m1(vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum(vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum(vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m4_f32m1(vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum(vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum(vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m8_f32m1(vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum(vs2, vs1, vl); } -vfloat64m1_t test_vfredusum_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum(vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m1_f64m1(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum(vs2, vs1, vl); } -vfloat64m1_t test_vfredusum_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum(vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m2_f64m1(vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum(vs2, vs1, vl); } -vfloat64m1_t test_vfredusum_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum(vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m4_f64m1(vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum(vs2, vs1, vl); } -vfloat64m1_t test_vfredusum_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum(vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m8_f64m1(vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum(vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum(mask, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum(mask, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum(mask, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum(mask, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum(mask, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum(mask, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum(mask, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum(mask, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum(mask, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum(mask, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum(mask, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum(mask, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum(mask, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum(mask, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum(mask, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_rm(vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_rm(vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_rm(vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_rm(vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_rm(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_rm(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_rm(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_rm(vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_rm(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_rm(vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_rm(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_rm(vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_rm(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_rm(vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_rm(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_rm(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_rm(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_rm(vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_rm(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_rm(vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_rm(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_rm(vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_rm(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_rm(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_rm(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_rm(vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_rm(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_rm(vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_rm(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_rm(vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_rm_m(vbool64_t mask, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_rm_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_rm_m(vbool32_t mask, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_rm_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_rm_m(vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_rm_m(vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_rm_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_rm_m(vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_rm_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_rm_m(vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_rm_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_rm_m(vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_rm_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_rm_m(vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_rm_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_rm_m(vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_rm_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_rm_m(vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_rm_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_rm_m(vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_rm_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_rm_m(vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_rm_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_rm_m(vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_rm_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfredusum\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfrsqrt7.c b/auto-generated/gnu-overloaded-tests/vfrsqrt7.c index 33d1772d5..29c763522 100644 --- a/auto-generated/gnu-overloaded-tests/vfrsqrt7.c +++ b/auto-generated/gnu-overloaded-tests/vfrsqrt7.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfrsqrt7_v_f16mf4(vfloat16mf4_t op1, size_t vl) { - return __riscv_vfrsqrt7(op1, vl); +vfloat16mf4_t test_vfrsqrt7_v_f16mf4(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfrsqrt7(vs2, vl); } -vfloat16mf2_t test_vfrsqrt7_v_f16mf2(vfloat16mf2_t op1, size_t vl) { - return __riscv_vfrsqrt7(op1, vl); +vfloat16mf2_t test_vfrsqrt7_v_f16mf2(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfrsqrt7(vs2, vl); } -vfloat16m1_t test_vfrsqrt7_v_f16m1(vfloat16m1_t op1, size_t vl) { - return __riscv_vfrsqrt7(op1, vl); +vfloat16m1_t test_vfrsqrt7_v_f16m1(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfrsqrt7(vs2, vl); } -vfloat16m2_t test_vfrsqrt7_v_f16m2(vfloat16m2_t op1, size_t vl) { - return __riscv_vfrsqrt7(op1, vl); +vfloat16m2_t test_vfrsqrt7_v_f16m2(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfrsqrt7(vs2, vl); } -vfloat16m4_t test_vfrsqrt7_v_f16m4(vfloat16m4_t op1, size_t vl) { - return __riscv_vfrsqrt7(op1, vl); +vfloat16m4_t test_vfrsqrt7_v_f16m4(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfrsqrt7(vs2, vl); } -vfloat16m8_t test_vfrsqrt7_v_f16m8(vfloat16m8_t op1, size_t vl) { - return __riscv_vfrsqrt7(op1, vl); +vfloat16m8_t test_vfrsqrt7_v_f16m8(vfloat16m8_t vs2, size_t vl) { + return __riscv_vfrsqrt7(vs2, vl); } -vfloat32mf2_t test_vfrsqrt7_v_f32mf2(vfloat32mf2_t op1, size_t vl) { - return __riscv_vfrsqrt7(op1, vl); +vfloat32mf2_t test_vfrsqrt7_v_f32mf2(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfrsqrt7(vs2, vl); } -vfloat32m1_t test_vfrsqrt7_v_f32m1(vfloat32m1_t op1, size_t vl) { - return __riscv_vfrsqrt7(op1, vl); +vfloat32m1_t test_vfrsqrt7_v_f32m1(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfrsqrt7(vs2, vl); } -vfloat32m2_t test_vfrsqrt7_v_f32m2(vfloat32m2_t op1, size_t vl) { - return __riscv_vfrsqrt7(op1, vl); +vfloat32m2_t test_vfrsqrt7_v_f32m2(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfrsqrt7(vs2, vl); } -vfloat32m4_t test_vfrsqrt7_v_f32m4(vfloat32m4_t op1, size_t vl) { - return __riscv_vfrsqrt7(op1, vl); +vfloat32m4_t test_vfrsqrt7_v_f32m4(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfrsqrt7(vs2, vl); } -vfloat32m8_t test_vfrsqrt7_v_f32m8(vfloat32m8_t op1, size_t vl) { - return __riscv_vfrsqrt7(op1, vl); +vfloat32m8_t test_vfrsqrt7_v_f32m8(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfrsqrt7(vs2, vl); } -vfloat64m1_t test_vfrsqrt7_v_f64m1(vfloat64m1_t op1, size_t vl) { - return __riscv_vfrsqrt7(op1, vl); +vfloat64m1_t test_vfrsqrt7_v_f64m1(vfloat64m1_t vs2, size_t vl) { + return __riscv_vfrsqrt7(vs2, vl); } -vfloat64m2_t test_vfrsqrt7_v_f64m2(vfloat64m2_t op1, size_t vl) { - return __riscv_vfrsqrt7(op1, vl); +vfloat64m2_t test_vfrsqrt7_v_f64m2(vfloat64m2_t vs2, size_t vl) { + return __riscv_vfrsqrt7(vs2, vl); } -vfloat64m4_t test_vfrsqrt7_v_f64m4(vfloat64m4_t op1, size_t vl) { - return __riscv_vfrsqrt7(op1, vl); +vfloat64m4_t test_vfrsqrt7_v_f64m4(vfloat64m4_t vs2, size_t vl) { + return __riscv_vfrsqrt7(vs2, vl); } -vfloat64m8_t test_vfrsqrt7_v_f64m8(vfloat64m8_t op1, size_t vl) { - return __riscv_vfrsqrt7(op1, vl); +vfloat64m8_t test_vfrsqrt7_v_f64m8(vfloat64m8_t vs2, size_t vl) { + return __riscv_vfrsqrt7(vs2, vl); } -vfloat16mf4_t test_vfrsqrt7_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfrsqrt7(mask, op1, vl); +vfloat16mf4_t test_vfrsqrt7_v_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfrsqrt7(vm, vs2, vl); } -vfloat16mf2_t test_vfrsqrt7_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfrsqrt7(mask, op1, vl); +vfloat16mf2_t test_vfrsqrt7_v_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfrsqrt7(vm, vs2, vl); } -vfloat16m1_t test_vfrsqrt7_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) { - return __riscv_vfrsqrt7(mask, op1, vl); +vfloat16m1_t test_vfrsqrt7_v_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfrsqrt7(vm, vs2, vl); } -vfloat16m2_t test_vfrsqrt7_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) { - return __riscv_vfrsqrt7(mask, op1, vl); +vfloat16m2_t test_vfrsqrt7_v_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfrsqrt7(vm, vs2, vl); } -vfloat16m4_t test_vfrsqrt7_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) { - return __riscv_vfrsqrt7(mask, op1, vl); +vfloat16m4_t test_vfrsqrt7_v_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfrsqrt7(vm, vs2, vl); } -vfloat16m8_t test_vfrsqrt7_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) { - return __riscv_vfrsqrt7(mask, op1, vl); +vfloat16m8_t test_vfrsqrt7_v_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfrsqrt7(vm, vs2, vl); } -vfloat32mf2_t test_vfrsqrt7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfrsqrt7(mask, op1, vl); +vfloat32mf2_t test_vfrsqrt7_v_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfrsqrt7(vm, vs2, vl); } -vfloat32m1_t test_vfrsqrt7_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) { - return __riscv_vfrsqrt7(mask, op1, vl); +vfloat32m1_t test_vfrsqrt7_v_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfrsqrt7(vm, vs2, vl); } -vfloat32m2_t test_vfrsqrt7_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) { - return __riscv_vfrsqrt7(mask, op1, vl); +vfloat32m2_t test_vfrsqrt7_v_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfrsqrt7(vm, vs2, vl); } -vfloat32m4_t test_vfrsqrt7_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) { - return __riscv_vfrsqrt7(mask, op1, vl); +vfloat32m4_t test_vfrsqrt7_v_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfrsqrt7(vm, vs2, vl); } -vfloat32m8_t test_vfrsqrt7_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) { - return __riscv_vfrsqrt7(mask, op1, vl); +vfloat32m8_t test_vfrsqrt7_v_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfrsqrt7(vm, vs2, vl); } -vfloat64m1_t test_vfrsqrt7_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) { - return __riscv_vfrsqrt7(mask, op1, vl); +vfloat64m1_t test_vfrsqrt7_v_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfrsqrt7(vm, vs2, vl); } -vfloat64m2_t test_vfrsqrt7_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) { - return __riscv_vfrsqrt7(mask, op1, vl); +vfloat64m2_t test_vfrsqrt7_v_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfrsqrt7(vm, vs2, vl); } -vfloat64m4_t test_vfrsqrt7_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) { - return __riscv_vfrsqrt7(mask, op1, vl); +vfloat64m4_t test_vfrsqrt7_v_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfrsqrt7(vm, vs2, vl); } -vfloat64m8_t test_vfrsqrt7_v_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) { - return __riscv_vfrsqrt7(mask, op1, vl); +vfloat64m8_t test_vfrsqrt7_v_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfrsqrt7(vm, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfrsqrt7\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfrsub.c b/auto-generated/gnu-overloaded-tests/vfrsub.c index 001104ae7..dd6334503 100644 --- a/auto-generated/gnu-overloaded-tests/vfrsub.c +++ b/auto-generated/gnu-overloaded-tests/vfrsub.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfrsub_vf_f16mf4(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub(op1, op2, vl); +vfloat16mf4_t test_vfrsub_vf_f16mf4(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub(vs2, rs1, vl); } -vfloat16mf2_t test_vfrsub_vf_f16mf2(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub(op1, op2, vl); +vfloat16mf2_t test_vfrsub_vf_f16mf2(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub(vs2, rs1, vl); } -vfloat16m1_t test_vfrsub_vf_f16m1(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub(op1, op2, vl); +vfloat16m1_t test_vfrsub_vf_f16m1(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub(vs2, rs1, vl); } -vfloat16m2_t test_vfrsub_vf_f16m2(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub(op1, op2, vl); +vfloat16m2_t test_vfrsub_vf_f16m2(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub(vs2, rs1, vl); } -vfloat16m4_t test_vfrsub_vf_f16m4(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub(op1, op2, vl); +vfloat16m4_t test_vfrsub_vf_f16m4(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub(vs2, rs1, vl); } -vfloat16m8_t test_vfrsub_vf_f16m8(vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub(op1, op2, vl); +vfloat16m8_t test_vfrsub_vf_f16m8(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub(vs2, rs1, vl); } -vfloat32mf2_t test_vfrsub_vf_f32mf2(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub(op1, op2, vl); +vfloat32mf2_t test_vfrsub_vf_f32mf2(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub(vs2, rs1, vl); } -vfloat32m1_t test_vfrsub_vf_f32m1(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub(op1, op2, vl); +vfloat32m1_t test_vfrsub_vf_f32m1(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub(vs2, rs1, vl); } -vfloat32m2_t test_vfrsub_vf_f32m2(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub(op1, op2, vl); +vfloat32m2_t test_vfrsub_vf_f32m2(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub(vs2, rs1, vl); } -vfloat32m4_t test_vfrsub_vf_f32m4(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub(op1, op2, vl); +vfloat32m4_t test_vfrsub_vf_f32m4(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub(vs2, rs1, vl); } -vfloat32m8_t test_vfrsub_vf_f32m8(vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub(op1, op2, vl); +vfloat32m8_t test_vfrsub_vf_f32m8(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub(vs2, rs1, vl); } -vfloat64m1_t test_vfrsub_vf_f64m1(vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub(op1, op2, vl); +vfloat64m1_t test_vfrsub_vf_f64m1(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub(vs2, rs1, vl); } -vfloat64m2_t test_vfrsub_vf_f64m2(vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub(op1, op2, vl); +vfloat64m2_t test_vfrsub_vf_f64m2(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub(vs2, rs1, vl); } -vfloat64m4_t test_vfrsub_vf_f64m4(vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub(op1, op2, vl); +vfloat64m4_t test_vfrsub_vf_f64m4(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub(vs2, rs1, vl); } -vfloat64m8_t test_vfrsub_vf_f64m8(vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub(op1, op2, vl); +vfloat64m8_t test_vfrsub_vf_f64m8(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub(vs2, rs1, vl); } -vfloat16mf4_t test_vfrsub_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub(mask, op1, op2, vl); +vfloat16mf4_t test_vfrsub_vf_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub(vm, vs2, rs1, vl); } -vfloat16mf2_t test_vfrsub_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub(mask, op1, op2, vl); +vfloat16mf2_t test_vfrsub_vf_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub(vm, vs2, rs1, vl); } -vfloat16m1_t test_vfrsub_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub(mask, op1, op2, vl); +vfloat16m1_t test_vfrsub_vf_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub(vm, vs2, rs1, vl); } -vfloat16m2_t test_vfrsub_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub(mask, op1, op2, vl); +vfloat16m2_t test_vfrsub_vf_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub(vm, vs2, rs1, vl); } -vfloat16m4_t test_vfrsub_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub(mask, op1, op2, vl); +vfloat16m4_t test_vfrsub_vf_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub(vm, vs2, rs1, vl); } -vfloat16m8_t test_vfrsub_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub(mask, op1, op2, vl); +vfloat16m8_t test_vfrsub_vf_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub(vm, vs2, rs1, vl); } -vfloat32mf2_t test_vfrsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub(mask, op1, op2, vl); +vfloat32mf2_t test_vfrsub_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub(vm, vs2, rs1, vl); } -vfloat32m1_t test_vfrsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub(mask, op1, op2, vl); +vfloat32m1_t test_vfrsub_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub(vm, vs2, rs1, vl); } -vfloat32m2_t test_vfrsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub(mask, op1, op2, vl); +vfloat32m2_t test_vfrsub_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub(vm, vs2, rs1, vl); } -vfloat32m4_t test_vfrsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub(mask, op1, op2, vl); +vfloat32m4_t test_vfrsub_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub(vm, vs2, rs1, vl); } -vfloat32m8_t test_vfrsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub(mask, op1, op2, vl); +vfloat32m8_t test_vfrsub_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub(vm, vs2, rs1, vl); } -vfloat64m1_t test_vfrsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub(mask, op1, op2, vl); +vfloat64m1_t test_vfrsub_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub(vm, vs2, rs1, vl); } -vfloat64m2_t test_vfrsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub(mask, op1, op2, vl); +vfloat64m2_t test_vfrsub_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub(vm, vs2, rs1, vl); } -vfloat64m4_t test_vfrsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub(mask, op1, op2, vl); +vfloat64m4_t test_vfrsub_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub(vm, vs2, rs1, vl); } -vfloat64m8_t test_vfrsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub(mask, op1, op2, vl); +vfloat64m8_t test_vfrsub_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub(vm, vs2, rs1, vl); } -vfloat16mf4_t test_vfrsub_vf_f16mf4_rm(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfrsub_vf_f16mf4_rm(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfrsub_vf_f16mf2_rm(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfrsub_vf_f16mf2_rm(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfrsub_vf_f16m1_rm(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfrsub_vf_f16m1_rm(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrsub_vf_f16m2_rm(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfrsub_vf_f16m2_rm(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrsub_vf_f16m4_rm(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfrsub_vf_f16m4_rm(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrsub_vf_f16m8_rm(vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfrsub_vf_f16m8_rm(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrsub_vf_f32mf2_rm(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfrsub_vf_f32mf2_rm(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfrsub_vf_f32m1_rm(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfrsub_vf_f32m1_rm(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrsub_vf_f32m2_rm(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfrsub_vf_f32m2_rm(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrsub_vf_f32m4_rm(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfrsub_vf_f32m4_rm(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrsub_vf_f32m8_rm(vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfrsub_vf_f32m8_rm(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrsub_vf_f64m1_rm(vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfrsub_vf_f64m1_rm(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrsub_vf_f64m2_rm(vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfrsub_vf_f64m2_rm(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrsub_vf_f64m4_rm(vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfrsub_vf_f64m4_rm(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrsub_vf_f64m8_rm(vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfrsub_vf_f64m8_rm(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfrsub_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfrsub_vf_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfrsub_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfrsub_vf_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfrsub_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfrsub_vf_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrsub_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfrsub_vf_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrsub_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfrsub_vf_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrsub_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfrsub_vf_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrsub_vf_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfrsub_vf_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfrsub_vf_f32m1_rm_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfrsub_vf_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrsub_vf_f32m2_rm_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfrsub_vf_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrsub_vf_f32m4_rm_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfrsub_vf_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrsub_vf_f32m8_rm_m(vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfrsub_vf_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrsub_vf_f64m1_rm_m(vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfrsub_vf_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrsub_vf_f64m2_rm_m(vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfrsub_vf_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrsub_vf_f64m4_rm_m(vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfrsub_vf_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrsub_vf_f64m8_rm_m(vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfrsub_vf_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfrsub\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfsgnj.c b/auto-generated/gnu-overloaded-tests/vfsgnj.c index fb69d53a0..665cf2d50 100644 --- a/auto-generated/gnu-overloaded-tests/vfsgnj.c +++ b/auto-generated/gnu-overloaded-tests/vfsgnj.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfsgnj_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsgnj(op1, op2, vl); +vfloat16mf4_t test_vfsgnj_vv_f16mf4(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsgnj(vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnj_vf_f16mf4(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj(op1, op2, vl); +vfloat16mf4_t test_vfsgnj_vf_f16mf4(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj(vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnj_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsgnj(op1, op2, vl); +vfloat16mf2_t test_vfsgnj_vv_f16mf2(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsgnj(vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnj_vf_f16mf2(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj(op1, op2, vl); +vfloat16mf2_t test_vfsgnj_vf_f16mf2(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj(vs2, rs1, vl); } -vfloat16m1_t test_vfsgnj_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsgnj(op1, op2, vl); +vfloat16m1_t test_vfsgnj_vv_f16m1(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsgnj(vs2, vs1, vl); } -vfloat16m1_t test_vfsgnj_vf_f16m1(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj(op1, op2, vl); +vfloat16m1_t test_vfsgnj_vf_f16m1(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj(vs2, rs1, vl); } -vfloat16m2_t test_vfsgnj_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsgnj(op1, op2, vl); +vfloat16m2_t test_vfsgnj_vv_f16m2(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsgnj(vs2, vs1, vl); } -vfloat16m2_t test_vfsgnj_vf_f16m2(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj(op1, op2, vl); +vfloat16m2_t test_vfsgnj_vf_f16m2(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj(vs2, rs1, vl); } -vfloat16m4_t test_vfsgnj_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsgnj(op1, op2, vl); +vfloat16m4_t test_vfsgnj_vv_f16m4(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsgnj(vs2, vs1, vl); } -vfloat16m4_t test_vfsgnj_vf_f16m4(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj(op1, op2, vl); +vfloat16m4_t test_vfsgnj_vf_f16m4(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj(vs2, rs1, vl); } -vfloat16m8_t test_vfsgnj_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsgnj(op1, op2, vl); +vfloat16m8_t test_vfsgnj_vv_f16m8(vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsgnj(vs2, vs1, vl); } -vfloat16m8_t test_vfsgnj_vf_f16m8(vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj(op1, op2, vl); +vfloat16m8_t test_vfsgnj_vf_f16m8(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj(vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnj_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsgnj(op1, op2, vl); +vfloat32mf2_t test_vfsgnj_vv_f32mf2(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsgnj(vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnj_vf_f32mf2(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj(op1, op2, vl); +vfloat32mf2_t test_vfsgnj_vf_f32mf2(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj(vs2, rs1, vl); } -vfloat32m1_t test_vfsgnj_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsgnj(op1, op2, vl); +vfloat32m1_t test_vfsgnj_vv_f32m1(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsgnj(vs2, vs1, vl); } -vfloat32m1_t test_vfsgnj_vf_f32m1(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj(op1, op2, vl); +vfloat32m1_t test_vfsgnj_vf_f32m1(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj(vs2, rs1, vl); } -vfloat32m2_t test_vfsgnj_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsgnj(op1, op2, vl); +vfloat32m2_t test_vfsgnj_vv_f32m2(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsgnj(vs2, vs1, vl); } -vfloat32m2_t test_vfsgnj_vf_f32m2(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj(op1, op2, vl); +vfloat32m2_t test_vfsgnj_vf_f32m2(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj(vs2, rs1, vl); } -vfloat32m4_t test_vfsgnj_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsgnj(op1, op2, vl); +vfloat32m4_t test_vfsgnj_vv_f32m4(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsgnj(vs2, vs1, vl); } -vfloat32m4_t test_vfsgnj_vf_f32m4(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj(op1, op2, vl); +vfloat32m4_t test_vfsgnj_vf_f32m4(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj(vs2, rs1, vl); } -vfloat32m8_t test_vfsgnj_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsgnj(op1, op2, vl); +vfloat32m8_t test_vfsgnj_vv_f32m8(vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsgnj(vs2, vs1, vl); } -vfloat32m8_t test_vfsgnj_vf_f32m8(vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj(op1, op2, vl); +vfloat32m8_t test_vfsgnj_vf_f32m8(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj(vs2, rs1, vl); } -vfloat64m1_t test_vfsgnj_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsgnj(op1, op2, vl); +vfloat64m1_t test_vfsgnj_vv_f64m1(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsgnj(vs2, vs1, vl); } -vfloat64m1_t test_vfsgnj_vf_f64m1(vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj(op1, op2, vl); +vfloat64m1_t test_vfsgnj_vf_f64m1(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj(vs2, rs1, vl); } -vfloat64m2_t test_vfsgnj_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsgnj(op1, op2, vl); +vfloat64m2_t test_vfsgnj_vv_f64m2(vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsgnj(vs2, vs1, vl); } -vfloat64m2_t test_vfsgnj_vf_f64m2(vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj(op1, op2, vl); +vfloat64m2_t test_vfsgnj_vf_f64m2(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj(vs2, rs1, vl); } -vfloat64m4_t test_vfsgnj_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsgnj(op1, op2, vl); +vfloat64m4_t test_vfsgnj_vv_f64m4(vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsgnj(vs2, vs1, vl); } -vfloat64m4_t test_vfsgnj_vf_f64m4(vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj(op1, op2, vl); +vfloat64m4_t test_vfsgnj_vf_f64m4(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj(vs2, rs1, vl); } -vfloat64m8_t test_vfsgnj_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsgnj(op1, op2, vl); +vfloat64m8_t test_vfsgnj_vv_f64m8(vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsgnj(vs2, vs1, vl); } -vfloat64m8_t test_vfsgnj_vf_f64m8(vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj(op1, op2, vl); +vfloat64m8_t test_vfsgnj_vf_f64m8(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj(vs2, rs1, vl); } -vfloat16mf4_t test_vfsgnj_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsgnj(mask, op1, op2, vl); +vfloat16mf4_t test_vfsgnj_vv_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsgnj(vm, vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnj_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj(mask, op1, op2, vl); +vfloat16mf4_t test_vfsgnj_vf_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj(vm, vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnj_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsgnj(mask, op1, op2, vl); +vfloat16mf2_t test_vfsgnj_vv_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsgnj(vm, vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnj_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj(mask, op1, op2, vl); +vfloat16mf2_t test_vfsgnj_vf_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj(vm, vs2, rs1, vl); } -vfloat16m1_t test_vfsgnj_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsgnj(mask, op1, op2, vl); +vfloat16m1_t test_vfsgnj_vv_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsgnj(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfsgnj_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj(mask, op1, op2, vl); +vfloat16m1_t test_vfsgnj_vf_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj(vm, vs2, rs1, vl); } -vfloat16m2_t test_vfsgnj_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsgnj(mask, op1, op2, vl); +vfloat16m2_t test_vfsgnj_vv_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsgnj(vm, vs2, vs1, vl); } -vfloat16m2_t test_vfsgnj_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj(mask, op1, op2, vl); +vfloat16m2_t test_vfsgnj_vf_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj(vm, vs2, rs1, vl); } -vfloat16m4_t test_vfsgnj_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsgnj(mask, op1, op2, vl); +vfloat16m4_t test_vfsgnj_vv_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsgnj(vm, vs2, vs1, vl); } -vfloat16m4_t test_vfsgnj_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj(mask, op1, op2, vl); +vfloat16m4_t test_vfsgnj_vf_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj(vm, vs2, rs1, vl); } -vfloat16m8_t test_vfsgnj_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsgnj(mask, op1, op2, vl); +vfloat16m8_t test_vfsgnj_vv_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsgnj(vm, vs2, vs1, vl); } -vfloat16m8_t test_vfsgnj_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj(mask, op1, op2, vl); +vfloat16m8_t test_vfsgnj_vf_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj(vm, vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnj_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsgnj(mask, op1, op2, vl); +vfloat32mf2_t test_vfsgnj_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsgnj(vm, vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnj_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj(mask, op1, op2, vl); +vfloat32mf2_t test_vfsgnj_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj(vm, vs2, rs1, vl); } -vfloat32m1_t test_vfsgnj_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsgnj(mask, op1, op2, vl); +vfloat32m1_t test_vfsgnj_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsgnj(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfsgnj_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj(mask, op1, op2, vl); +vfloat32m1_t test_vfsgnj_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj(vm, vs2, rs1, vl); } -vfloat32m2_t test_vfsgnj_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsgnj(mask, op1, op2, vl); +vfloat32m2_t test_vfsgnj_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsgnj(vm, vs2, vs1, vl); } -vfloat32m2_t test_vfsgnj_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj(mask, op1, op2, vl); +vfloat32m2_t test_vfsgnj_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj(vm, vs2, rs1, vl); } -vfloat32m4_t test_vfsgnj_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsgnj(mask, op1, op2, vl); +vfloat32m4_t test_vfsgnj_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsgnj(vm, vs2, vs1, vl); } -vfloat32m4_t test_vfsgnj_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj(mask, op1, op2, vl); +vfloat32m4_t test_vfsgnj_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj(vm, vs2, rs1, vl); } -vfloat32m8_t test_vfsgnj_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsgnj(mask, op1, op2, vl); +vfloat32m8_t test_vfsgnj_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsgnj(vm, vs2, vs1, vl); } -vfloat32m8_t test_vfsgnj_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj(mask, op1, op2, vl); +vfloat32m8_t test_vfsgnj_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj(vm, vs2, rs1, vl); } -vfloat64m1_t test_vfsgnj_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsgnj(mask, op1, op2, vl); +vfloat64m1_t test_vfsgnj_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsgnj(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfsgnj_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj(mask, op1, op2, vl); +vfloat64m1_t test_vfsgnj_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj(vm, vs2, rs1, vl); } -vfloat64m2_t test_vfsgnj_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsgnj(mask, op1, op2, vl); +vfloat64m2_t test_vfsgnj_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsgnj(vm, vs2, vs1, vl); } -vfloat64m2_t test_vfsgnj_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj(mask, op1, op2, vl); +vfloat64m2_t test_vfsgnj_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj(vm, vs2, rs1, vl); } -vfloat64m4_t test_vfsgnj_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsgnj(mask, op1, op2, vl); +vfloat64m4_t test_vfsgnj_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsgnj(vm, vs2, vs1, vl); } -vfloat64m4_t test_vfsgnj_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj(mask, op1, op2, vl); +vfloat64m4_t test_vfsgnj_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj(vm, vs2, rs1, vl); } -vfloat64m8_t test_vfsgnj_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsgnj(mask, op1, op2, vl); +vfloat64m8_t test_vfsgnj_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsgnj(vm, vs2, vs1, vl); } -vfloat64m8_t test_vfsgnj_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj(mask, op1, op2, vl); +vfloat64m8_t test_vfsgnj_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfsgnj\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfsgnjn.c b/auto-generated/gnu-overloaded-tests/vfsgnjn.c index 812e9660e..4507ec545 100644 --- a/auto-generated/gnu-overloaded-tests/vfsgnjn.c +++ b/auto-generated/gnu-overloaded-tests/vfsgnjn.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfsgnjn_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsgnjn(op1, op2, vl); +vfloat16mf4_t test_vfsgnjn_vv_f16mf4(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsgnjn(vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnjn_vf_f16mf4(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn(op1, op2, vl); +vfloat16mf4_t test_vfsgnjn_vf_f16mf4(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn(vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnjn_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsgnjn(op1, op2, vl); +vfloat16mf2_t test_vfsgnjn_vv_f16mf2(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsgnjn(vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnjn_vf_f16mf2(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn(op1, op2, vl); +vfloat16mf2_t test_vfsgnjn_vf_f16mf2(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn(vs2, rs1, vl); } -vfloat16m1_t test_vfsgnjn_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsgnjn(op1, op2, vl); +vfloat16m1_t test_vfsgnjn_vv_f16m1(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsgnjn(vs2, vs1, vl); } -vfloat16m1_t test_vfsgnjn_vf_f16m1(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn(op1, op2, vl); +vfloat16m1_t test_vfsgnjn_vf_f16m1(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn(vs2, rs1, vl); } -vfloat16m2_t test_vfsgnjn_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsgnjn(op1, op2, vl); +vfloat16m2_t test_vfsgnjn_vv_f16m2(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsgnjn(vs2, vs1, vl); } -vfloat16m2_t test_vfsgnjn_vf_f16m2(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn(op1, op2, vl); +vfloat16m2_t test_vfsgnjn_vf_f16m2(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn(vs2, rs1, vl); } -vfloat16m4_t test_vfsgnjn_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsgnjn(op1, op2, vl); +vfloat16m4_t test_vfsgnjn_vv_f16m4(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsgnjn(vs2, vs1, vl); } -vfloat16m4_t test_vfsgnjn_vf_f16m4(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn(op1, op2, vl); +vfloat16m4_t test_vfsgnjn_vf_f16m4(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn(vs2, rs1, vl); } -vfloat16m8_t test_vfsgnjn_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsgnjn(op1, op2, vl); +vfloat16m8_t test_vfsgnjn_vv_f16m8(vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsgnjn(vs2, vs1, vl); } -vfloat16m8_t test_vfsgnjn_vf_f16m8(vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn(op1, op2, vl); +vfloat16m8_t test_vfsgnjn_vf_f16m8(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn(vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnjn_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsgnjn(op1, op2, vl); +vfloat32mf2_t test_vfsgnjn_vv_f32mf2(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsgnjn(vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnjn_vf_f32mf2(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn(op1, op2, vl); +vfloat32mf2_t test_vfsgnjn_vf_f32mf2(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn(vs2, rs1, vl); } -vfloat32m1_t test_vfsgnjn_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsgnjn(op1, op2, vl); +vfloat32m1_t test_vfsgnjn_vv_f32m1(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsgnjn(vs2, vs1, vl); } -vfloat32m1_t test_vfsgnjn_vf_f32m1(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn(op1, op2, vl); +vfloat32m1_t test_vfsgnjn_vf_f32m1(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn(vs2, rs1, vl); } -vfloat32m2_t test_vfsgnjn_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsgnjn(op1, op2, vl); +vfloat32m2_t test_vfsgnjn_vv_f32m2(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsgnjn(vs2, vs1, vl); } -vfloat32m2_t test_vfsgnjn_vf_f32m2(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn(op1, op2, vl); +vfloat32m2_t test_vfsgnjn_vf_f32m2(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn(vs2, rs1, vl); } -vfloat32m4_t test_vfsgnjn_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsgnjn(op1, op2, vl); +vfloat32m4_t test_vfsgnjn_vv_f32m4(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsgnjn(vs2, vs1, vl); } -vfloat32m4_t test_vfsgnjn_vf_f32m4(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn(op1, op2, vl); +vfloat32m4_t test_vfsgnjn_vf_f32m4(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn(vs2, rs1, vl); } -vfloat32m8_t test_vfsgnjn_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsgnjn(op1, op2, vl); +vfloat32m8_t test_vfsgnjn_vv_f32m8(vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsgnjn(vs2, vs1, vl); } -vfloat32m8_t test_vfsgnjn_vf_f32m8(vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn(op1, op2, vl); +vfloat32m8_t test_vfsgnjn_vf_f32m8(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn(vs2, rs1, vl); } -vfloat64m1_t test_vfsgnjn_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsgnjn(op1, op2, vl); +vfloat64m1_t test_vfsgnjn_vv_f64m1(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsgnjn(vs2, vs1, vl); } -vfloat64m1_t test_vfsgnjn_vf_f64m1(vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn(op1, op2, vl); +vfloat64m1_t test_vfsgnjn_vf_f64m1(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn(vs2, rs1, vl); } -vfloat64m2_t test_vfsgnjn_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsgnjn(op1, op2, vl); +vfloat64m2_t test_vfsgnjn_vv_f64m2(vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsgnjn(vs2, vs1, vl); } -vfloat64m2_t test_vfsgnjn_vf_f64m2(vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn(op1, op2, vl); +vfloat64m2_t test_vfsgnjn_vf_f64m2(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn(vs2, rs1, vl); } -vfloat64m4_t test_vfsgnjn_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsgnjn(op1, op2, vl); +vfloat64m4_t test_vfsgnjn_vv_f64m4(vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsgnjn(vs2, vs1, vl); } -vfloat64m4_t test_vfsgnjn_vf_f64m4(vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn(op1, op2, vl); +vfloat64m4_t test_vfsgnjn_vf_f64m4(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn(vs2, rs1, vl); } -vfloat64m8_t test_vfsgnjn_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsgnjn(op1, op2, vl); +vfloat64m8_t test_vfsgnjn_vv_f64m8(vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsgnjn(vs2, vs1, vl); } -vfloat64m8_t test_vfsgnjn_vf_f64m8(vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn(op1, op2, vl); +vfloat64m8_t test_vfsgnjn_vf_f64m8(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn(vs2, rs1, vl); } -vfloat16mf4_t test_vfsgnjn_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsgnjn(mask, op1, op2, vl); +vfloat16mf4_t test_vfsgnjn_vv_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsgnjn(vm, vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnjn_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn(mask, op1, op2, vl); +vfloat16mf4_t test_vfsgnjn_vf_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn(vm, vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnjn_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsgnjn(mask, op1, op2, vl); +vfloat16mf2_t test_vfsgnjn_vv_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsgnjn(vm, vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnjn_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn(mask, op1, op2, vl); +vfloat16mf2_t test_vfsgnjn_vf_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn(vm, vs2, rs1, vl); } -vfloat16m1_t test_vfsgnjn_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsgnjn(mask, op1, op2, vl); +vfloat16m1_t test_vfsgnjn_vv_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsgnjn(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfsgnjn_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn(mask, op1, op2, vl); +vfloat16m1_t test_vfsgnjn_vf_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn(vm, vs2, rs1, vl); } -vfloat16m2_t test_vfsgnjn_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsgnjn(mask, op1, op2, vl); +vfloat16m2_t test_vfsgnjn_vv_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsgnjn(vm, vs2, vs1, vl); } -vfloat16m2_t test_vfsgnjn_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn(mask, op1, op2, vl); +vfloat16m2_t test_vfsgnjn_vf_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn(vm, vs2, rs1, vl); } -vfloat16m4_t test_vfsgnjn_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsgnjn(mask, op1, op2, vl); +vfloat16m4_t test_vfsgnjn_vv_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsgnjn(vm, vs2, vs1, vl); } -vfloat16m4_t test_vfsgnjn_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn(mask, op1, op2, vl); +vfloat16m4_t test_vfsgnjn_vf_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn(vm, vs2, rs1, vl); } -vfloat16m8_t test_vfsgnjn_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsgnjn(mask, op1, op2, vl); +vfloat16m8_t test_vfsgnjn_vv_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsgnjn(vm, vs2, vs1, vl); } -vfloat16m8_t test_vfsgnjn_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn(mask, op1, op2, vl); +vfloat16m8_t test_vfsgnjn_vf_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn(vm, vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnjn_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsgnjn(mask, op1, op2, vl); +vfloat32mf2_t test_vfsgnjn_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsgnjn(vm, vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnjn_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn(mask, op1, op2, vl); +vfloat32mf2_t test_vfsgnjn_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn(vm, vs2, rs1, vl); } -vfloat32m1_t test_vfsgnjn_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsgnjn(mask, op1, op2, vl); +vfloat32m1_t test_vfsgnjn_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsgnjn(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfsgnjn_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn(mask, op1, op2, vl); +vfloat32m1_t test_vfsgnjn_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn(vm, vs2, rs1, vl); } -vfloat32m2_t test_vfsgnjn_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsgnjn(mask, op1, op2, vl); +vfloat32m2_t test_vfsgnjn_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsgnjn(vm, vs2, vs1, vl); } -vfloat32m2_t test_vfsgnjn_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn(mask, op1, op2, vl); +vfloat32m2_t test_vfsgnjn_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn(vm, vs2, rs1, vl); } -vfloat32m4_t test_vfsgnjn_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsgnjn(mask, op1, op2, vl); +vfloat32m4_t test_vfsgnjn_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsgnjn(vm, vs2, vs1, vl); } -vfloat32m4_t test_vfsgnjn_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn(mask, op1, op2, vl); +vfloat32m4_t test_vfsgnjn_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn(vm, vs2, rs1, vl); } -vfloat32m8_t test_vfsgnjn_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsgnjn(mask, op1, op2, vl); +vfloat32m8_t test_vfsgnjn_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsgnjn(vm, vs2, vs1, vl); } -vfloat32m8_t test_vfsgnjn_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn(mask, op1, op2, vl); +vfloat32m8_t test_vfsgnjn_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn(vm, vs2, rs1, vl); } -vfloat64m1_t test_vfsgnjn_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsgnjn(mask, op1, op2, vl); +vfloat64m1_t test_vfsgnjn_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsgnjn(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfsgnjn_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn(mask, op1, op2, vl); +vfloat64m1_t test_vfsgnjn_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn(vm, vs2, rs1, vl); } -vfloat64m2_t test_vfsgnjn_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsgnjn(mask, op1, op2, vl); +vfloat64m2_t test_vfsgnjn_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsgnjn(vm, vs2, vs1, vl); } -vfloat64m2_t test_vfsgnjn_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn(mask, op1, op2, vl); +vfloat64m2_t test_vfsgnjn_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn(vm, vs2, rs1, vl); } -vfloat64m4_t test_vfsgnjn_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsgnjn(mask, op1, op2, vl); +vfloat64m4_t test_vfsgnjn_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsgnjn(vm, vs2, vs1, vl); } -vfloat64m4_t test_vfsgnjn_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn(mask, op1, op2, vl); +vfloat64m4_t test_vfsgnjn_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn(vm, vs2, rs1, vl); } -vfloat64m8_t test_vfsgnjn_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsgnjn(mask, op1, op2, vl); +vfloat64m8_t test_vfsgnjn_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsgnjn(vm, vs2, vs1, vl); } -vfloat64m8_t test_vfsgnjn_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn(mask, op1, op2, vl); +vfloat64m8_t test_vfsgnjn_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfsgnjn\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfsgnjx.c b/auto-generated/gnu-overloaded-tests/vfsgnjx.c index c9746893c..6954f4671 100644 --- a/auto-generated/gnu-overloaded-tests/vfsgnjx.c +++ b/auto-generated/gnu-overloaded-tests/vfsgnjx.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfsgnjx_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsgnjx(op1, op2, vl); +vfloat16mf4_t test_vfsgnjx_vv_f16mf4(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsgnjx(vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnjx_vf_f16mf4(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx(op1, op2, vl); +vfloat16mf4_t test_vfsgnjx_vf_f16mf4(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx(vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnjx_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsgnjx(op1, op2, vl); +vfloat16mf2_t test_vfsgnjx_vv_f16mf2(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsgnjx(vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnjx_vf_f16mf2(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx(op1, op2, vl); +vfloat16mf2_t test_vfsgnjx_vf_f16mf2(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx(vs2, rs1, vl); } -vfloat16m1_t test_vfsgnjx_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsgnjx(op1, op2, vl); +vfloat16m1_t test_vfsgnjx_vv_f16m1(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsgnjx(vs2, vs1, vl); } -vfloat16m1_t test_vfsgnjx_vf_f16m1(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx(op1, op2, vl); +vfloat16m1_t test_vfsgnjx_vf_f16m1(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx(vs2, rs1, vl); } -vfloat16m2_t test_vfsgnjx_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsgnjx(op1, op2, vl); +vfloat16m2_t test_vfsgnjx_vv_f16m2(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsgnjx(vs2, vs1, vl); } -vfloat16m2_t test_vfsgnjx_vf_f16m2(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx(op1, op2, vl); +vfloat16m2_t test_vfsgnjx_vf_f16m2(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx(vs2, rs1, vl); } -vfloat16m4_t test_vfsgnjx_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsgnjx(op1, op2, vl); +vfloat16m4_t test_vfsgnjx_vv_f16m4(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsgnjx(vs2, vs1, vl); } -vfloat16m4_t test_vfsgnjx_vf_f16m4(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx(op1, op2, vl); +vfloat16m4_t test_vfsgnjx_vf_f16m4(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx(vs2, rs1, vl); } -vfloat16m8_t test_vfsgnjx_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsgnjx(op1, op2, vl); +vfloat16m8_t test_vfsgnjx_vv_f16m8(vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsgnjx(vs2, vs1, vl); } -vfloat16m8_t test_vfsgnjx_vf_f16m8(vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx(op1, op2, vl); +vfloat16m8_t test_vfsgnjx_vf_f16m8(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx(vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnjx_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsgnjx(op1, op2, vl); +vfloat32mf2_t test_vfsgnjx_vv_f32mf2(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsgnjx(vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnjx_vf_f32mf2(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx(op1, op2, vl); +vfloat32mf2_t test_vfsgnjx_vf_f32mf2(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx(vs2, rs1, vl); } -vfloat32m1_t test_vfsgnjx_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsgnjx(op1, op2, vl); +vfloat32m1_t test_vfsgnjx_vv_f32m1(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsgnjx(vs2, vs1, vl); } -vfloat32m1_t test_vfsgnjx_vf_f32m1(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx(op1, op2, vl); +vfloat32m1_t test_vfsgnjx_vf_f32m1(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx(vs2, rs1, vl); } -vfloat32m2_t test_vfsgnjx_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsgnjx(op1, op2, vl); +vfloat32m2_t test_vfsgnjx_vv_f32m2(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsgnjx(vs2, vs1, vl); } -vfloat32m2_t test_vfsgnjx_vf_f32m2(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx(op1, op2, vl); +vfloat32m2_t test_vfsgnjx_vf_f32m2(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx(vs2, rs1, vl); } -vfloat32m4_t test_vfsgnjx_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsgnjx(op1, op2, vl); +vfloat32m4_t test_vfsgnjx_vv_f32m4(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsgnjx(vs2, vs1, vl); } -vfloat32m4_t test_vfsgnjx_vf_f32m4(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx(op1, op2, vl); +vfloat32m4_t test_vfsgnjx_vf_f32m4(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx(vs2, rs1, vl); } -vfloat32m8_t test_vfsgnjx_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsgnjx(op1, op2, vl); +vfloat32m8_t test_vfsgnjx_vv_f32m8(vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsgnjx(vs2, vs1, vl); } -vfloat32m8_t test_vfsgnjx_vf_f32m8(vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx(op1, op2, vl); +vfloat32m8_t test_vfsgnjx_vf_f32m8(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx(vs2, rs1, vl); } -vfloat64m1_t test_vfsgnjx_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsgnjx(op1, op2, vl); +vfloat64m1_t test_vfsgnjx_vv_f64m1(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsgnjx(vs2, vs1, vl); } -vfloat64m1_t test_vfsgnjx_vf_f64m1(vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx(op1, op2, vl); +vfloat64m1_t test_vfsgnjx_vf_f64m1(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx(vs2, rs1, vl); } -vfloat64m2_t test_vfsgnjx_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsgnjx(op1, op2, vl); +vfloat64m2_t test_vfsgnjx_vv_f64m2(vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsgnjx(vs2, vs1, vl); } -vfloat64m2_t test_vfsgnjx_vf_f64m2(vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx(op1, op2, vl); +vfloat64m2_t test_vfsgnjx_vf_f64m2(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx(vs2, rs1, vl); } -vfloat64m4_t test_vfsgnjx_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsgnjx(op1, op2, vl); +vfloat64m4_t test_vfsgnjx_vv_f64m4(vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsgnjx(vs2, vs1, vl); } -vfloat64m4_t test_vfsgnjx_vf_f64m4(vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx(op1, op2, vl); +vfloat64m4_t test_vfsgnjx_vf_f64m4(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx(vs2, rs1, vl); } -vfloat64m8_t test_vfsgnjx_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsgnjx(op1, op2, vl); +vfloat64m8_t test_vfsgnjx_vv_f64m8(vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsgnjx(vs2, vs1, vl); } -vfloat64m8_t test_vfsgnjx_vf_f64m8(vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx(op1, op2, vl); +vfloat64m8_t test_vfsgnjx_vf_f64m8(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx(vs2, rs1, vl); } -vfloat16mf4_t test_vfsgnjx_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsgnjx(mask, op1, op2, vl); +vfloat16mf4_t test_vfsgnjx_vv_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsgnjx(vm, vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnjx_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx(mask, op1, op2, vl); +vfloat16mf4_t test_vfsgnjx_vf_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx(vm, vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnjx_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsgnjx(mask, op1, op2, vl); +vfloat16mf2_t test_vfsgnjx_vv_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsgnjx(vm, vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnjx_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx(mask, op1, op2, vl); +vfloat16mf2_t test_vfsgnjx_vf_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx(vm, vs2, rs1, vl); } -vfloat16m1_t test_vfsgnjx_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsgnjx(mask, op1, op2, vl); +vfloat16m1_t test_vfsgnjx_vv_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsgnjx(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfsgnjx_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx(mask, op1, op2, vl); +vfloat16m1_t test_vfsgnjx_vf_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx(vm, vs2, rs1, vl); } -vfloat16m2_t test_vfsgnjx_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsgnjx(mask, op1, op2, vl); +vfloat16m2_t test_vfsgnjx_vv_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsgnjx(vm, vs2, vs1, vl); } -vfloat16m2_t test_vfsgnjx_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx(mask, op1, op2, vl); +vfloat16m2_t test_vfsgnjx_vf_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx(vm, vs2, rs1, vl); } -vfloat16m4_t test_vfsgnjx_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsgnjx(mask, op1, op2, vl); +vfloat16m4_t test_vfsgnjx_vv_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsgnjx(vm, vs2, vs1, vl); } -vfloat16m4_t test_vfsgnjx_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx(mask, op1, op2, vl); +vfloat16m4_t test_vfsgnjx_vf_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx(vm, vs2, rs1, vl); } -vfloat16m8_t test_vfsgnjx_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsgnjx(mask, op1, op2, vl); +vfloat16m8_t test_vfsgnjx_vv_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsgnjx(vm, vs2, vs1, vl); } -vfloat16m8_t test_vfsgnjx_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx(mask, op1, op2, vl); +vfloat16m8_t test_vfsgnjx_vf_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx(vm, vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnjx_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsgnjx(mask, op1, op2, vl); +vfloat32mf2_t test_vfsgnjx_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsgnjx(vm, vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnjx_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx(mask, op1, op2, vl); +vfloat32mf2_t test_vfsgnjx_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx(vm, vs2, rs1, vl); } -vfloat32m1_t test_vfsgnjx_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsgnjx(mask, op1, op2, vl); +vfloat32m1_t test_vfsgnjx_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsgnjx(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfsgnjx_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx(mask, op1, op2, vl); +vfloat32m1_t test_vfsgnjx_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx(vm, vs2, rs1, vl); } -vfloat32m2_t test_vfsgnjx_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsgnjx(mask, op1, op2, vl); +vfloat32m2_t test_vfsgnjx_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsgnjx(vm, vs2, vs1, vl); } -vfloat32m2_t test_vfsgnjx_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx(mask, op1, op2, vl); +vfloat32m2_t test_vfsgnjx_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx(vm, vs2, rs1, vl); } -vfloat32m4_t test_vfsgnjx_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsgnjx(mask, op1, op2, vl); +vfloat32m4_t test_vfsgnjx_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsgnjx(vm, vs2, vs1, vl); } -vfloat32m4_t test_vfsgnjx_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx(mask, op1, op2, vl); +vfloat32m4_t test_vfsgnjx_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx(vm, vs2, rs1, vl); } -vfloat32m8_t test_vfsgnjx_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsgnjx(mask, op1, op2, vl); +vfloat32m8_t test_vfsgnjx_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsgnjx(vm, vs2, vs1, vl); } -vfloat32m8_t test_vfsgnjx_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx(mask, op1, op2, vl); +vfloat32m8_t test_vfsgnjx_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx(vm, vs2, rs1, vl); } -vfloat64m1_t test_vfsgnjx_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsgnjx(mask, op1, op2, vl); +vfloat64m1_t test_vfsgnjx_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsgnjx(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfsgnjx_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx(mask, op1, op2, vl); +vfloat64m1_t test_vfsgnjx_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx(vm, vs2, rs1, vl); } -vfloat64m2_t test_vfsgnjx_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsgnjx(mask, op1, op2, vl); +vfloat64m2_t test_vfsgnjx_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsgnjx(vm, vs2, vs1, vl); } -vfloat64m2_t test_vfsgnjx_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx(mask, op1, op2, vl); +vfloat64m2_t test_vfsgnjx_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx(vm, vs2, rs1, vl); } -vfloat64m4_t test_vfsgnjx_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsgnjx(mask, op1, op2, vl); +vfloat64m4_t test_vfsgnjx_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsgnjx(vm, vs2, vs1, vl); } -vfloat64m4_t test_vfsgnjx_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx(mask, op1, op2, vl); +vfloat64m4_t test_vfsgnjx_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx(vm, vs2, rs1, vl); } -vfloat64m8_t test_vfsgnjx_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsgnjx(mask, op1, op2, vl); +vfloat64m8_t test_vfsgnjx_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsgnjx(vm, vs2, vs1, vl); } -vfloat64m8_t test_vfsgnjx_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx(mask, op1, op2, vl); +vfloat64m8_t test_vfsgnjx_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfsgnjx\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfslide1down.c b/auto-generated/gnu-overloaded-tests/vfslide1down.c index b807acfe0..38c526ffc 100644 --- a/auto-generated/gnu-overloaded-tests/vfslide1down.c +++ b/auto-generated/gnu-overloaded-tests/vfslide1down.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfslide1down_vf_f16mf4(vfloat16mf4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down(src, value, vl); +vfloat16mf4_t test_vfslide1down_vf_f16mf4(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down(vs2, rs1, vl); } -vfloat16mf2_t test_vfslide1down_vf_f16mf2(vfloat16mf2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down(src, value, vl); +vfloat16mf2_t test_vfslide1down_vf_f16mf2(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down(vs2, rs1, vl); } -vfloat16m1_t test_vfslide1down_vf_f16m1(vfloat16m1_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down(src, value, vl); +vfloat16m1_t test_vfslide1down_vf_f16m1(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down(vs2, rs1, vl); } -vfloat16m2_t test_vfslide1down_vf_f16m2(vfloat16m2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down(src, value, vl); +vfloat16m2_t test_vfslide1down_vf_f16m2(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down(vs2, rs1, vl); } -vfloat16m4_t test_vfslide1down_vf_f16m4(vfloat16m4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down(src, value, vl); +vfloat16m4_t test_vfslide1down_vf_f16m4(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down(vs2, rs1, vl); } -vfloat16m8_t test_vfslide1down_vf_f16m8(vfloat16m8_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down(src, value, vl); +vfloat16m8_t test_vfslide1down_vf_f16m8(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down(vs2, rs1, vl); } -vfloat32mf2_t test_vfslide1down_vf_f32mf2(vfloat32mf2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down(src, value, vl); +vfloat32mf2_t test_vfslide1down_vf_f32mf2(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down(vs2, rs1, vl); } -vfloat32m1_t test_vfslide1down_vf_f32m1(vfloat32m1_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down(src, value, vl); +vfloat32m1_t test_vfslide1down_vf_f32m1(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down(vs2, rs1, vl); } -vfloat32m2_t test_vfslide1down_vf_f32m2(vfloat32m2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down(src, value, vl); +vfloat32m2_t test_vfslide1down_vf_f32m2(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down(vs2, rs1, vl); } -vfloat32m4_t test_vfslide1down_vf_f32m4(vfloat32m4_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down(src, value, vl); +vfloat32m4_t test_vfslide1down_vf_f32m4(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down(vs2, rs1, vl); } -vfloat32m8_t test_vfslide1down_vf_f32m8(vfloat32m8_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down(src, value, vl); +vfloat32m8_t test_vfslide1down_vf_f32m8(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down(vs2, rs1, vl); } -vfloat64m1_t test_vfslide1down_vf_f64m1(vfloat64m1_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down(src, value, vl); +vfloat64m1_t test_vfslide1down_vf_f64m1(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down(vs2, rs1, vl); } -vfloat64m2_t test_vfslide1down_vf_f64m2(vfloat64m2_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down(src, value, vl); +vfloat64m2_t test_vfslide1down_vf_f64m2(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down(vs2, rs1, vl); } -vfloat64m4_t test_vfslide1down_vf_f64m4(vfloat64m4_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down(src, value, vl); +vfloat64m4_t test_vfslide1down_vf_f64m4(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down(vs2, rs1, vl); } -vfloat64m8_t test_vfslide1down_vf_f64m8(vfloat64m8_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down(src, value, vl); +vfloat64m8_t test_vfslide1down_vf_f64m8(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down(vs2, rs1, vl); } -vfloat16mf4_t test_vfslide1down_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down(mask, src, value, vl); +vfloat16mf4_t test_vfslide1down_vf_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down(vm, vs2, rs1, vl); } -vfloat16mf2_t test_vfslide1down_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down(mask, src, value, vl); +vfloat16mf2_t test_vfslide1down_vf_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down(vm, vs2, rs1, vl); } -vfloat16m1_t test_vfslide1down_vf_f16m1_m(vbool16_t mask, vfloat16m1_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down(mask, src, value, vl); +vfloat16m1_t test_vfslide1down_vf_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down(vm, vs2, rs1, vl); } -vfloat16m2_t test_vfslide1down_vf_f16m2_m(vbool8_t mask, vfloat16m2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down(mask, src, value, vl); +vfloat16m2_t test_vfslide1down_vf_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down(vm, vs2, rs1, vl); } -vfloat16m4_t test_vfslide1down_vf_f16m4_m(vbool4_t mask, vfloat16m4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down(mask, src, value, vl); +vfloat16m4_t test_vfslide1down_vf_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down(vm, vs2, rs1, vl); } -vfloat16m8_t test_vfslide1down_vf_f16m8_m(vbool2_t mask, vfloat16m8_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down(mask, src, value, vl); +vfloat16m8_t test_vfslide1down_vf_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down(vm, vs2, rs1, vl); } -vfloat32mf2_t test_vfslide1down_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down(mask, src, value, vl); +vfloat32mf2_t test_vfslide1down_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down(vm, vs2, rs1, vl); } -vfloat32m1_t test_vfslide1down_vf_f32m1_m(vbool32_t mask, vfloat32m1_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down(mask, src, value, vl); +vfloat32m1_t test_vfslide1down_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down(vm, vs2, rs1, vl); } -vfloat32m2_t test_vfslide1down_vf_f32m2_m(vbool16_t mask, vfloat32m2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down(mask, src, value, vl); +vfloat32m2_t test_vfslide1down_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down(vm, vs2, rs1, vl); } -vfloat32m4_t test_vfslide1down_vf_f32m4_m(vbool8_t mask, vfloat32m4_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down(mask, src, value, vl); +vfloat32m4_t test_vfslide1down_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down(vm, vs2, rs1, vl); } -vfloat32m8_t test_vfslide1down_vf_f32m8_m(vbool4_t mask, vfloat32m8_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down(mask, src, value, vl); +vfloat32m8_t test_vfslide1down_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down(vm, vs2, rs1, vl); } -vfloat64m1_t test_vfslide1down_vf_f64m1_m(vbool64_t mask, vfloat64m1_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down(mask, src, value, vl); +vfloat64m1_t test_vfslide1down_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down(vm, vs2, rs1, vl); } -vfloat64m2_t test_vfslide1down_vf_f64m2_m(vbool32_t mask, vfloat64m2_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down(mask, src, value, vl); +vfloat64m2_t test_vfslide1down_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down(vm, vs2, rs1, vl); } -vfloat64m4_t test_vfslide1down_vf_f64m4_m(vbool16_t mask, vfloat64m4_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down(mask, src, value, vl); +vfloat64m4_t test_vfslide1down_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down(vm, vs2, rs1, vl); } -vfloat64m8_t test_vfslide1down_vf_f64m8_m(vbool8_t mask, vfloat64m8_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down(mask, src, value, vl); +vfloat64m8_t test_vfslide1down_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfslide1down\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfslide1up.c b/auto-generated/gnu-overloaded-tests/vfslide1up.c index 6c74aaf15..abd899cb5 100644 --- a/auto-generated/gnu-overloaded-tests/vfslide1up.c +++ b/auto-generated/gnu-overloaded-tests/vfslide1up.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfslide1up_vf_f16mf4(vfloat16mf4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up(src, value, vl); +vfloat16mf4_t test_vfslide1up_vf_f16mf4(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up(vs2, rs1, vl); } -vfloat16mf2_t test_vfslide1up_vf_f16mf2(vfloat16mf2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up(src, value, vl); +vfloat16mf2_t test_vfslide1up_vf_f16mf2(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up(vs2, rs1, vl); } -vfloat16m1_t test_vfslide1up_vf_f16m1(vfloat16m1_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up(src, value, vl); +vfloat16m1_t test_vfslide1up_vf_f16m1(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up(vs2, rs1, vl); } -vfloat16m2_t test_vfslide1up_vf_f16m2(vfloat16m2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up(src, value, vl); +vfloat16m2_t test_vfslide1up_vf_f16m2(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up(vs2, rs1, vl); } -vfloat16m4_t test_vfslide1up_vf_f16m4(vfloat16m4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up(src, value, vl); +vfloat16m4_t test_vfslide1up_vf_f16m4(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up(vs2, rs1, vl); } -vfloat16m8_t test_vfslide1up_vf_f16m8(vfloat16m8_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up(src, value, vl); +vfloat16m8_t test_vfslide1up_vf_f16m8(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up(vs2, rs1, vl); } -vfloat32mf2_t test_vfslide1up_vf_f32mf2(vfloat32mf2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up(src, value, vl); +vfloat32mf2_t test_vfslide1up_vf_f32mf2(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up(vs2, rs1, vl); } -vfloat32m1_t test_vfslide1up_vf_f32m1(vfloat32m1_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up(src, value, vl); +vfloat32m1_t test_vfslide1up_vf_f32m1(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up(vs2, rs1, vl); } -vfloat32m2_t test_vfslide1up_vf_f32m2(vfloat32m2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up(src, value, vl); +vfloat32m2_t test_vfslide1up_vf_f32m2(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up(vs2, rs1, vl); } -vfloat32m4_t test_vfslide1up_vf_f32m4(vfloat32m4_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up(src, value, vl); +vfloat32m4_t test_vfslide1up_vf_f32m4(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up(vs2, rs1, vl); } -vfloat32m8_t test_vfslide1up_vf_f32m8(vfloat32m8_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up(src, value, vl); +vfloat32m8_t test_vfslide1up_vf_f32m8(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up(vs2, rs1, vl); } -vfloat64m1_t test_vfslide1up_vf_f64m1(vfloat64m1_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up(src, value, vl); +vfloat64m1_t test_vfslide1up_vf_f64m1(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up(vs2, rs1, vl); } -vfloat64m2_t test_vfslide1up_vf_f64m2(vfloat64m2_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up(src, value, vl); +vfloat64m2_t test_vfslide1up_vf_f64m2(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up(vs2, rs1, vl); } -vfloat64m4_t test_vfslide1up_vf_f64m4(vfloat64m4_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up(src, value, vl); +vfloat64m4_t test_vfslide1up_vf_f64m4(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up(vs2, rs1, vl); } -vfloat64m8_t test_vfslide1up_vf_f64m8(vfloat64m8_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up(src, value, vl); +vfloat64m8_t test_vfslide1up_vf_f64m8(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up(vs2, rs1, vl); } -vfloat16mf4_t test_vfslide1up_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up(mask, src, value, vl); +vfloat16mf4_t test_vfslide1up_vf_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up(vm, vs2, rs1, vl); } -vfloat16mf2_t test_vfslide1up_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up(mask, src, value, vl); +vfloat16mf2_t test_vfslide1up_vf_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up(vm, vs2, rs1, vl); } -vfloat16m1_t test_vfslide1up_vf_f16m1_m(vbool16_t mask, vfloat16m1_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up(mask, src, value, vl); +vfloat16m1_t test_vfslide1up_vf_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up(vm, vs2, rs1, vl); } -vfloat16m2_t test_vfslide1up_vf_f16m2_m(vbool8_t mask, vfloat16m2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up(mask, src, value, vl); +vfloat16m2_t test_vfslide1up_vf_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up(vm, vs2, rs1, vl); } -vfloat16m4_t test_vfslide1up_vf_f16m4_m(vbool4_t mask, vfloat16m4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up(mask, src, value, vl); +vfloat16m4_t test_vfslide1up_vf_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up(vm, vs2, rs1, vl); } -vfloat16m8_t test_vfslide1up_vf_f16m8_m(vbool2_t mask, vfloat16m8_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up(mask, src, value, vl); +vfloat16m8_t test_vfslide1up_vf_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up(vm, vs2, rs1, vl); } -vfloat32mf2_t test_vfslide1up_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up(mask, src, value, vl); +vfloat32mf2_t test_vfslide1up_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up(vm, vs2, rs1, vl); } -vfloat32m1_t test_vfslide1up_vf_f32m1_m(vbool32_t mask, vfloat32m1_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up(mask, src, value, vl); +vfloat32m1_t test_vfslide1up_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up(vm, vs2, rs1, vl); } -vfloat32m2_t test_vfslide1up_vf_f32m2_m(vbool16_t mask, vfloat32m2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up(mask, src, value, vl); +vfloat32m2_t test_vfslide1up_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up(vm, vs2, rs1, vl); } -vfloat32m4_t test_vfslide1up_vf_f32m4_m(vbool8_t mask, vfloat32m4_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up(mask, src, value, vl); +vfloat32m4_t test_vfslide1up_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up(vm, vs2, rs1, vl); } -vfloat32m8_t test_vfslide1up_vf_f32m8_m(vbool4_t mask, vfloat32m8_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up(mask, src, value, vl); +vfloat32m8_t test_vfslide1up_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up(vm, vs2, rs1, vl); } -vfloat64m1_t test_vfslide1up_vf_f64m1_m(vbool64_t mask, vfloat64m1_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up(mask, src, value, vl); +vfloat64m1_t test_vfslide1up_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up(vm, vs2, rs1, vl); } -vfloat64m2_t test_vfslide1up_vf_f64m2_m(vbool32_t mask, vfloat64m2_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up(mask, src, value, vl); +vfloat64m2_t test_vfslide1up_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up(vm, vs2, rs1, vl); } -vfloat64m4_t test_vfslide1up_vf_f64m4_m(vbool16_t mask, vfloat64m4_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up(mask, src, value, vl); +vfloat64m4_t test_vfslide1up_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up(vm, vs2, rs1, vl); } -vfloat64m8_t test_vfslide1up_vf_f64m8_m(vbool8_t mask, vfloat64m8_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up(mask, src, value, vl); +vfloat64m8_t test_vfslide1up_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfslide1up\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfsqrt.c b/auto-generated/gnu-overloaded-tests/vfsqrt.c index 5486f8fb9..047c3cb6c 100644 --- a/auto-generated/gnu-overloaded-tests/vfsqrt.c +++ b/auto-generated/gnu-overloaded-tests/vfsqrt.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfsqrt_v_f16mf4(vfloat16mf4_t op1, size_t vl) { - return __riscv_vfsqrt(op1, vl); +vfloat16mf4_t test_vfsqrt_v_f16mf4(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfsqrt(vs2, vl); } -vfloat16mf2_t test_vfsqrt_v_f16mf2(vfloat16mf2_t op1, size_t vl) { - return __riscv_vfsqrt(op1, vl); +vfloat16mf2_t test_vfsqrt_v_f16mf2(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfsqrt(vs2, vl); } -vfloat16m1_t test_vfsqrt_v_f16m1(vfloat16m1_t op1, size_t vl) { - return __riscv_vfsqrt(op1, vl); +vfloat16m1_t test_vfsqrt_v_f16m1(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfsqrt(vs2, vl); } -vfloat16m2_t test_vfsqrt_v_f16m2(vfloat16m2_t op1, size_t vl) { - return __riscv_vfsqrt(op1, vl); +vfloat16m2_t test_vfsqrt_v_f16m2(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfsqrt(vs2, vl); } -vfloat16m4_t test_vfsqrt_v_f16m4(vfloat16m4_t op1, size_t vl) { - return __riscv_vfsqrt(op1, vl); +vfloat16m4_t test_vfsqrt_v_f16m4(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfsqrt(vs2, vl); } -vfloat16m8_t test_vfsqrt_v_f16m8(vfloat16m8_t op1, size_t vl) { - return __riscv_vfsqrt(op1, vl); +vfloat16m8_t test_vfsqrt_v_f16m8(vfloat16m8_t vs2, size_t vl) { + return __riscv_vfsqrt(vs2, vl); } -vfloat32mf2_t test_vfsqrt_v_f32mf2(vfloat32mf2_t op1, size_t vl) { - return __riscv_vfsqrt(op1, vl); +vfloat32mf2_t test_vfsqrt_v_f32mf2(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfsqrt(vs2, vl); } -vfloat32m1_t test_vfsqrt_v_f32m1(vfloat32m1_t op1, size_t vl) { - return __riscv_vfsqrt(op1, vl); +vfloat32m1_t test_vfsqrt_v_f32m1(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfsqrt(vs2, vl); } -vfloat32m2_t test_vfsqrt_v_f32m2(vfloat32m2_t op1, size_t vl) { - return __riscv_vfsqrt(op1, vl); +vfloat32m2_t test_vfsqrt_v_f32m2(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfsqrt(vs2, vl); } -vfloat32m4_t test_vfsqrt_v_f32m4(vfloat32m4_t op1, size_t vl) { - return __riscv_vfsqrt(op1, vl); +vfloat32m4_t test_vfsqrt_v_f32m4(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfsqrt(vs2, vl); } -vfloat32m8_t test_vfsqrt_v_f32m8(vfloat32m8_t op1, size_t vl) { - return __riscv_vfsqrt(op1, vl); +vfloat32m8_t test_vfsqrt_v_f32m8(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfsqrt(vs2, vl); } -vfloat64m1_t test_vfsqrt_v_f64m1(vfloat64m1_t op1, size_t vl) { - return __riscv_vfsqrt(op1, vl); +vfloat64m1_t test_vfsqrt_v_f64m1(vfloat64m1_t vs2, size_t vl) { + return __riscv_vfsqrt(vs2, vl); } -vfloat64m2_t test_vfsqrt_v_f64m2(vfloat64m2_t op1, size_t vl) { - return __riscv_vfsqrt(op1, vl); +vfloat64m2_t test_vfsqrt_v_f64m2(vfloat64m2_t vs2, size_t vl) { + return __riscv_vfsqrt(vs2, vl); } -vfloat64m4_t test_vfsqrt_v_f64m4(vfloat64m4_t op1, size_t vl) { - return __riscv_vfsqrt(op1, vl); +vfloat64m4_t test_vfsqrt_v_f64m4(vfloat64m4_t vs2, size_t vl) { + return __riscv_vfsqrt(vs2, vl); } -vfloat64m8_t test_vfsqrt_v_f64m8(vfloat64m8_t op1, size_t vl) { - return __riscv_vfsqrt(op1, vl); +vfloat64m8_t test_vfsqrt_v_f64m8(vfloat64m8_t vs2, size_t vl) { + return __riscv_vfsqrt(vs2, vl); } -vfloat16mf4_t test_vfsqrt_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfsqrt(mask, op1, vl); +vfloat16mf4_t test_vfsqrt_v_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfsqrt(vm, vs2, vl); } -vfloat16mf2_t test_vfsqrt_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfsqrt(mask, op1, vl); +vfloat16mf2_t test_vfsqrt_v_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfsqrt(vm, vs2, vl); } -vfloat16m1_t test_vfsqrt_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) { - return __riscv_vfsqrt(mask, op1, vl); +vfloat16m1_t test_vfsqrt_v_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfsqrt(vm, vs2, vl); } -vfloat16m2_t test_vfsqrt_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) { - return __riscv_vfsqrt(mask, op1, vl); +vfloat16m2_t test_vfsqrt_v_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfsqrt(vm, vs2, vl); } -vfloat16m4_t test_vfsqrt_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) { - return __riscv_vfsqrt(mask, op1, vl); +vfloat16m4_t test_vfsqrt_v_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfsqrt(vm, vs2, vl); } -vfloat16m8_t test_vfsqrt_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) { - return __riscv_vfsqrt(mask, op1, vl); +vfloat16m8_t test_vfsqrt_v_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfsqrt(vm, vs2, vl); } -vfloat32mf2_t test_vfsqrt_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfsqrt(mask, op1, vl); +vfloat32mf2_t test_vfsqrt_v_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfsqrt(vm, vs2, vl); } -vfloat32m1_t test_vfsqrt_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) { - return __riscv_vfsqrt(mask, op1, vl); +vfloat32m1_t test_vfsqrt_v_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfsqrt(vm, vs2, vl); } -vfloat32m2_t test_vfsqrt_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) { - return __riscv_vfsqrt(mask, op1, vl); +vfloat32m2_t test_vfsqrt_v_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfsqrt(vm, vs2, vl); } -vfloat32m4_t test_vfsqrt_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) { - return __riscv_vfsqrt(mask, op1, vl); +vfloat32m4_t test_vfsqrt_v_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfsqrt(vm, vs2, vl); } -vfloat32m8_t test_vfsqrt_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) { - return __riscv_vfsqrt(mask, op1, vl); +vfloat32m8_t test_vfsqrt_v_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfsqrt(vm, vs2, vl); } -vfloat64m1_t test_vfsqrt_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) { - return __riscv_vfsqrt(mask, op1, vl); +vfloat64m1_t test_vfsqrt_v_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfsqrt(vm, vs2, vl); } -vfloat64m2_t test_vfsqrt_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) { - return __riscv_vfsqrt(mask, op1, vl); +vfloat64m2_t test_vfsqrt_v_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfsqrt(vm, vs2, vl); } -vfloat64m4_t test_vfsqrt_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) { - return __riscv_vfsqrt(mask, op1, vl); +vfloat64m4_t test_vfsqrt_v_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfsqrt(vm, vs2, vl); } -vfloat64m8_t test_vfsqrt_v_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) { - return __riscv_vfsqrt(mask, op1, vl); +vfloat64m8_t test_vfsqrt_v_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfsqrt(vm, vs2, vl); } -vfloat16mf4_t test_vfsqrt_v_f16mf4_rm(vfloat16mf4_t op1, size_t vl) { - return __riscv_vfsqrt(op1, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfsqrt_v_f16mf4_rm(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfsqrt(vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsqrt_v_f16mf2_rm(vfloat16mf2_t op1, size_t vl) { - return __riscv_vfsqrt(op1, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfsqrt_v_f16mf2_rm(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfsqrt(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsqrt_v_f16m1_rm(vfloat16m1_t op1, size_t vl) { - return __riscv_vfsqrt(op1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfsqrt_v_f16m1_rm(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfsqrt(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsqrt_v_f16m2_rm(vfloat16m2_t op1, size_t vl) { - return __riscv_vfsqrt(op1, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfsqrt_v_f16m2_rm(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfsqrt(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsqrt_v_f16m4_rm(vfloat16m4_t op1, size_t vl) { - return __riscv_vfsqrt(op1, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfsqrt_v_f16m4_rm(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfsqrt(vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsqrt_v_f16m8_rm(vfloat16m8_t op1, size_t vl) { - return __riscv_vfsqrt(op1, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfsqrt_v_f16m8_rm(vfloat16m8_t vs2, size_t vl) { + return __riscv_vfsqrt(vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsqrt_v_f32mf2_rm(vfloat32mf2_t op1, size_t vl) { - return __riscv_vfsqrt(op1, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfsqrt_v_f32mf2_rm(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfsqrt(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsqrt_v_f32m1_rm(vfloat32m1_t op1, size_t vl) { - return __riscv_vfsqrt(op1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfsqrt_v_f32m1_rm(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfsqrt(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsqrt_v_f32m2_rm(vfloat32m2_t op1, size_t vl) { - return __riscv_vfsqrt(op1, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfsqrt_v_f32m2_rm(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfsqrt(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsqrt_v_f32m4_rm(vfloat32m4_t op1, size_t vl) { - return __riscv_vfsqrt(op1, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfsqrt_v_f32m4_rm(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfsqrt(vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsqrt_v_f32m8_rm(vfloat32m8_t op1, size_t vl) { - return __riscv_vfsqrt(op1, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfsqrt_v_f32m8_rm(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfsqrt(vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsqrt_v_f64m1_rm(vfloat64m1_t op1, size_t vl) { - return __riscv_vfsqrt(op1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfsqrt_v_f64m1_rm(vfloat64m1_t vs2, size_t vl) { + return __riscv_vfsqrt(vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsqrt_v_f64m2_rm(vfloat64m2_t op1, size_t vl) { - return __riscv_vfsqrt(op1, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfsqrt_v_f64m2_rm(vfloat64m2_t vs2, size_t vl) { + return __riscv_vfsqrt(vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsqrt_v_f64m4_rm(vfloat64m4_t op1, size_t vl) { - return __riscv_vfsqrt(op1, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfsqrt_v_f64m4_rm(vfloat64m4_t vs2, size_t vl) { + return __riscv_vfsqrt(vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsqrt_v_f64m8_rm(vfloat64m8_t op1, size_t vl) { - return __riscv_vfsqrt(op1, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfsqrt_v_f64m8_rm(vfloat64m8_t vs2, size_t vl) { + return __riscv_vfsqrt(vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfsqrt_v_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfsqrt(mask, op1, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfsqrt_v_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfsqrt(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsqrt_v_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfsqrt(mask, op1, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfsqrt_v_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfsqrt(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsqrt_v_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) { - return __riscv_vfsqrt(mask, op1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfsqrt_v_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfsqrt(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsqrt_v_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) { - return __riscv_vfsqrt(mask, op1, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfsqrt_v_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfsqrt(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsqrt_v_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) { - return __riscv_vfsqrt(mask, op1, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfsqrt_v_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfsqrt(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsqrt_v_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) { - return __riscv_vfsqrt(mask, op1, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfsqrt_v_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfsqrt(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsqrt_v_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfsqrt(mask, op1, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfsqrt_v_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfsqrt(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsqrt_v_f32m1_rm_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) { - return __riscv_vfsqrt(mask, op1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfsqrt_v_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfsqrt(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsqrt_v_f32m2_rm_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) { - return __riscv_vfsqrt(mask, op1, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfsqrt_v_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfsqrt(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsqrt_v_f32m4_rm_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) { - return __riscv_vfsqrt(mask, op1, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfsqrt_v_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfsqrt(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsqrt_v_f32m8_rm_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) { - return __riscv_vfsqrt(mask, op1, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfsqrt_v_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfsqrt(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsqrt_v_f64m1_rm_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) { - return __riscv_vfsqrt(mask, op1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfsqrt_v_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfsqrt(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsqrt_v_f64m2_rm_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) { - return __riscv_vfsqrt(mask, op1, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfsqrt_v_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfsqrt(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsqrt_v_f64m4_rm_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) { - return __riscv_vfsqrt(mask, op1, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfsqrt_v_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfsqrt(vm, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsqrt_v_f64m8_rm_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) { - return __riscv_vfsqrt(mask, op1, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfsqrt_v_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfsqrt(vm, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfsqrt\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfsub.c b/auto-generated/gnu-overloaded-tests/vfsub.c index 641b65aff..ca47b898c 100644 --- a/auto-generated/gnu-overloaded-tests/vfsub.c +++ b/auto-generated/gnu-overloaded-tests/vfsub.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfsub_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, vl); +vfloat16mf4_t test_vfsub_vv_f16mf4(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsub(vs2, vs1, vl); } -vfloat16mf4_t test_vfsub_vf_f16mf4(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, vl); +vfloat16mf4_t test_vfsub_vf_f16mf4(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub(vs2, rs1, vl); } -vfloat16mf2_t test_vfsub_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, vl); +vfloat16mf2_t test_vfsub_vv_f16mf2(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsub(vs2, vs1, vl); } -vfloat16mf2_t test_vfsub_vf_f16mf2(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, vl); +vfloat16mf2_t test_vfsub_vf_f16mf2(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub(vs2, rs1, vl); } -vfloat16m1_t test_vfsub_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, vl); +vfloat16m1_t test_vfsub_vv_f16m1(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsub(vs2, vs1, vl); } -vfloat16m1_t test_vfsub_vf_f16m1(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, vl); +vfloat16m1_t test_vfsub_vf_f16m1(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub(vs2, rs1, vl); } -vfloat16m2_t test_vfsub_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, vl); +vfloat16m2_t test_vfsub_vv_f16m2(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsub(vs2, vs1, vl); } -vfloat16m2_t test_vfsub_vf_f16m2(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, vl); +vfloat16m2_t test_vfsub_vf_f16m2(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub(vs2, rs1, vl); } -vfloat16m4_t test_vfsub_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, vl); +vfloat16m4_t test_vfsub_vv_f16m4(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsub(vs2, vs1, vl); } -vfloat16m4_t test_vfsub_vf_f16m4(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, vl); +vfloat16m4_t test_vfsub_vf_f16m4(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub(vs2, rs1, vl); } -vfloat16m8_t test_vfsub_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, vl); +vfloat16m8_t test_vfsub_vv_f16m8(vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsub(vs2, vs1, vl); } -vfloat16m8_t test_vfsub_vf_f16m8(vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, vl); +vfloat16m8_t test_vfsub_vf_f16m8(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub(vs2, rs1, vl); } -vfloat32mf2_t test_vfsub_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, vl); +vfloat32mf2_t test_vfsub_vv_f32mf2(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsub(vs2, vs1, vl); } -vfloat32mf2_t test_vfsub_vf_f32mf2(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, vl); +vfloat32mf2_t test_vfsub_vf_f32mf2(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub(vs2, rs1, vl); } -vfloat32m1_t test_vfsub_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, vl); +vfloat32m1_t test_vfsub_vv_f32m1(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsub(vs2, vs1, vl); } -vfloat32m1_t test_vfsub_vf_f32m1(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, vl); +vfloat32m1_t test_vfsub_vf_f32m1(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub(vs2, rs1, vl); } -vfloat32m2_t test_vfsub_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, vl); +vfloat32m2_t test_vfsub_vv_f32m2(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsub(vs2, vs1, vl); } -vfloat32m2_t test_vfsub_vf_f32m2(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, vl); +vfloat32m2_t test_vfsub_vf_f32m2(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub(vs2, rs1, vl); } -vfloat32m4_t test_vfsub_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, vl); +vfloat32m4_t test_vfsub_vv_f32m4(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsub(vs2, vs1, vl); } -vfloat32m4_t test_vfsub_vf_f32m4(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, vl); +vfloat32m4_t test_vfsub_vf_f32m4(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub(vs2, rs1, vl); } -vfloat32m8_t test_vfsub_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, vl); +vfloat32m8_t test_vfsub_vv_f32m8(vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsub(vs2, vs1, vl); } -vfloat32m8_t test_vfsub_vf_f32m8(vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, vl); +vfloat32m8_t test_vfsub_vf_f32m8(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub(vs2, rs1, vl); } -vfloat64m1_t test_vfsub_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, vl); +vfloat64m1_t test_vfsub_vv_f64m1(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsub(vs2, vs1, vl); } -vfloat64m1_t test_vfsub_vf_f64m1(vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, vl); +vfloat64m1_t test_vfsub_vf_f64m1(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub(vs2, rs1, vl); } -vfloat64m2_t test_vfsub_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, vl); +vfloat64m2_t test_vfsub_vv_f64m2(vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsub(vs2, vs1, vl); } -vfloat64m2_t test_vfsub_vf_f64m2(vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, vl); +vfloat64m2_t test_vfsub_vf_f64m2(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub(vs2, rs1, vl); } -vfloat64m4_t test_vfsub_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, vl); +vfloat64m4_t test_vfsub_vv_f64m4(vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsub(vs2, vs1, vl); } -vfloat64m4_t test_vfsub_vf_f64m4(vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, vl); +vfloat64m4_t test_vfsub_vf_f64m4(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub(vs2, rs1, vl); } -vfloat64m8_t test_vfsub_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, vl); +vfloat64m8_t test_vfsub_vv_f64m8(vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsub(vs2, vs1, vl); } -vfloat64m8_t test_vfsub_vf_f64m8(vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, vl); +vfloat64m8_t test_vfsub_vf_f64m8(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub(vs2, rs1, vl); } -vfloat16mf4_t test_vfsub_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, vl); +vfloat16mf4_t test_vfsub_vv_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsub(vm, vs2, vs1, vl); } -vfloat16mf4_t test_vfsub_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, vl); +vfloat16mf4_t test_vfsub_vf_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub(vm, vs2, rs1, vl); } -vfloat16mf2_t test_vfsub_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, vl); +vfloat16mf2_t test_vfsub_vv_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsub(vm, vs2, vs1, vl); } -vfloat16mf2_t test_vfsub_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, vl); +vfloat16mf2_t test_vfsub_vf_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub(vm, vs2, rs1, vl); } -vfloat16m1_t test_vfsub_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, vl); +vfloat16m1_t test_vfsub_vv_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsub(vm, vs2, vs1, vl); } -vfloat16m1_t test_vfsub_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, vl); +vfloat16m1_t test_vfsub_vf_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub(vm, vs2, rs1, vl); } -vfloat16m2_t test_vfsub_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, vl); +vfloat16m2_t test_vfsub_vv_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsub(vm, vs2, vs1, vl); } -vfloat16m2_t test_vfsub_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, vl); +vfloat16m2_t test_vfsub_vf_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub(vm, vs2, rs1, vl); } -vfloat16m4_t test_vfsub_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, vl); +vfloat16m4_t test_vfsub_vv_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsub(vm, vs2, vs1, vl); } -vfloat16m4_t test_vfsub_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, vl); +vfloat16m4_t test_vfsub_vf_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub(vm, vs2, rs1, vl); } -vfloat16m8_t test_vfsub_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, vl); +vfloat16m8_t test_vfsub_vv_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsub(vm, vs2, vs1, vl); } -vfloat16m8_t test_vfsub_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, vl); +vfloat16m8_t test_vfsub_vf_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub(vm, vs2, rs1, vl); } -vfloat32mf2_t test_vfsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, vl); +vfloat32mf2_t test_vfsub_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsub(vm, vs2, vs1, vl); } -vfloat32mf2_t test_vfsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, vl); +vfloat32mf2_t test_vfsub_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub(vm, vs2, rs1, vl); } -vfloat32m1_t test_vfsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, vl); +vfloat32m1_t test_vfsub_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsub(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, vl); +vfloat32m1_t test_vfsub_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub(vm, vs2, rs1, vl); } -vfloat32m2_t test_vfsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, vl); +vfloat32m2_t test_vfsub_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsub(vm, vs2, vs1, vl); } -vfloat32m2_t test_vfsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, vl); +vfloat32m2_t test_vfsub_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub(vm, vs2, rs1, vl); } -vfloat32m4_t test_vfsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, vl); +vfloat32m4_t test_vfsub_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsub(vm, vs2, vs1, vl); } -vfloat32m4_t test_vfsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, vl); +vfloat32m4_t test_vfsub_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub(vm, vs2, rs1, vl); } -vfloat32m8_t test_vfsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, vl); +vfloat32m8_t test_vfsub_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsub(vm, vs2, vs1, vl); } -vfloat32m8_t test_vfsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, vl); +vfloat32m8_t test_vfsub_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub(vm, vs2, rs1, vl); } -vfloat64m1_t test_vfsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, vl); +vfloat64m1_t test_vfsub_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsub(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, vl); +vfloat64m1_t test_vfsub_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub(vm, vs2, rs1, vl); } -vfloat64m2_t test_vfsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, vl); +vfloat64m2_t test_vfsub_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsub(vm, vs2, vs1, vl); } -vfloat64m2_t test_vfsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, vl); +vfloat64m2_t test_vfsub_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub(vm, vs2, rs1, vl); } -vfloat64m4_t test_vfsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, vl); +vfloat64m4_t test_vfsub_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsub(vm, vs2, vs1, vl); } -vfloat64m4_t test_vfsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, vl); +vfloat64m4_t test_vfsub_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub(vm, vs2, rs1, vl); } -vfloat64m8_t test_vfsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, vl); +vfloat64m8_t test_vfsub_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsub(vm, vs2, vs1, vl); } -vfloat64m8_t test_vfsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, vl); +vfloat64m8_t test_vfsub_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub(vm, vs2, rs1, vl); } -vfloat16mf4_t test_vfsub_vv_f16mf4_rm(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfsub_vv_f16mf4_rm(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsub(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfsub_vf_f16mf4_rm(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfsub_vf_f16mf4_rm(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsub_vv_f16mf2_rm(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfsub_vv_f16mf2_rm(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsub(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsub_vf_f16mf2_rm(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfsub_vf_f16mf2_rm(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsub_vv_f16m1_rm(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfsub_vv_f16m1_rm(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsub(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsub_vf_f16m1_rm(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfsub_vf_f16m1_rm(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsub_vv_f16m2_rm(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfsub_vv_f16m2_rm(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsub(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsub_vf_f16m2_rm(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfsub_vf_f16m2_rm(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsub_vv_f16m4_rm(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfsub_vv_f16m4_rm(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsub(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsub_vf_f16m4_rm(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfsub_vf_f16m4_rm(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsub_vv_f16m8_rm(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfsub_vv_f16m8_rm(vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsub(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsub_vf_f16m8_rm(vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfsub_vf_f16m8_rm(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsub_vv_f32mf2_rm(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfsub_vv_f32mf2_rm(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsub(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsub_vf_f32mf2_rm(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfsub_vf_f32mf2_rm(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsub_vv_f32m1_rm(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfsub_vv_f32m1_rm(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsub(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsub_vf_f32m1_rm(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfsub_vf_f32m1_rm(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsub_vv_f32m2_rm(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfsub_vv_f32m2_rm(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsub(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsub_vf_f32m2_rm(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfsub_vf_f32m2_rm(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsub_vv_f32m4_rm(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfsub_vv_f32m4_rm(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsub(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsub_vf_f32m4_rm(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfsub_vf_f32m4_rm(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsub_vv_f32m8_rm(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfsub_vv_f32m8_rm(vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsub(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsub_vf_f32m8_rm(vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfsub_vf_f32m8_rm(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsub_vv_f64m1_rm(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfsub_vv_f64m1_rm(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsub(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsub_vf_f64m1_rm(vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfsub_vf_f64m1_rm(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsub_vv_f64m2_rm(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfsub_vv_f64m2_rm(vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsub(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsub_vf_f64m2_rm(vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfsub_vf_f64m2_rm(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsub_vv_f64m4_rm(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfsub_vv_f64m4_rm(vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsub(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsub_vf_f64m4_rm(vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfsub_vf_f64m4_rm(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsub_vv_f64m8_rm(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfsub_vv_f64m8_rm(vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsub(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsub_vf_f64m8_rm(vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfsub_vf_f64m8_rm(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfsub_vv_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfsub_vv_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsub(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfsub_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfsub_vf_f16mf4_rm_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsub_vv_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfsub_vv_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsub(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsub_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfsub_vf_f16mf2_rm_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsub_vv_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfsub_vv_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsub(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsub_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfsub_vf_f16m1_rm_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsub_vv_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfsub_vv_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsub(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsub_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfsub_vf_f16m2_rm_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsub_vv_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfsub_vv_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsub(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsub_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfsub_vf_f16m4_rm_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsub_vv_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfsub_vv_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsub(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsub_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfsub_vf_f16m8_rm_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsub_vv_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfsub_vv_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsub(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsub_vf_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfsub_vf_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsub_vv_f32m1_rm_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfsub_vv_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsub(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsub_vf_f32m1_rm_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfsub_vf_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsub_vv_f32m2_rm_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfsub_vv_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsub(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsub_vf_f32m2_rm_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfsub_vf_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsub_vv_f32m4_rm_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfsub_vv_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsub(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsub_vf_f32m4_rm_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfsub_vf_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsub_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfsub_vv_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsub(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsub_vf_f32m8_rm_m(vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfsub_vf_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsub_vv_f64m1_rm_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfsub_vv_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsub(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsub_vf_f64m1_rm_m(vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfsub_vf_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsub_vv_f64m2_rm_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfsub_vv_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsub(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsub_vf_f64m2_rm_m(vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfsub_vf_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsub_vv_f64m4_rm_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfsub_vv_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsub(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsub_vf_f64m4_rm_m(vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfsub_vf_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsub_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfsub_vv_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsub(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsub_vf_f64m8_rm_m(vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfsub_vf_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfsub\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfwadd.c b/auto-generated/gnu-overloaded-tests/vfwadd.c index 6b4627ac5..c39b5e935 100644 --- a/auto-generated/gnu-overloaded-tests/vfwadd.c +++ b/auto-generated/gnu-overloaded-tests/vfwadd.c @@ -6,580 +6,580 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2_t test_vfwadd_vv_f32mf2(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_vv(op1, op2, vl); +vfloat32mf2_t test_vfwadd_vv_f32mf2(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vs2, vs1, vl); } -vfloat32mf2_t test_vfwadd_vf_f32mf2(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf(op1, op2, vl); +vfloat32mf2_t test_vfwadd_vf_f32mf2(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf(vs2, rs1, vl); } -vfloat32mf2_t test_vfwadd_wv_f32mf2(vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_wv(op1, op2, vl); +vfloat32mf2_t test_vfwadd_wv_f32mf2(vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vs2, vs1, vl); } -vfloat32mf2_t test_vfwadd_wf_f32mf2(vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf(op1, op2, vl); +vfloat32mf2_t test_vfwadd_wf_f32mf2(vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf(vs2, rs1, vl); } -vfloat32m1_t test_vfwadd_vv_f32m1(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv(op1, op2, vl); +vfloat32m1_t test_vfwadd_vv_f32m1(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vs2, vs1, vl); } -vfloat32m1_t test_vfwadd_vf_f32m1(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf(op1, op2, vl); +vfloat32m1_t test_vfwadd_vf_f32m1(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf(vs2, rs1, vl); } -vfloat32m1_t test_vfwadd_wv_f32m1(vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv(op1, op2, vl); +vfloat32m1_t test_vfwadd_wv_f32m1(vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vs2, vs1, vl); } -vfloat32m1_t test_vfwadd_wf_f32m1(vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf(op1, op2, vl); +vfloat32m1_t test_vfwadd_wf_f32m1(vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf(vs2, rs1, vl); } -vfloat32m2_t test_vfwadd_vv_f32m2(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_vv(op1, op2, vl); +vfloat32m2_t test_vfwadd_vv_f32m2(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vs2, vs1, vl); } -vfloat32m2_t test_vfwadd_vf_f32m2(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf(op1, op2, vl); +vfloat32m2_t test_vfwadd_vf_f32m2(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf(vs2, rs1, vl); } -vfloat32m2_t test_vfwadd_wv_f32m2(vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_wv(op1, op2, vl); +vfloat32m2_t test_vfwadd_wv_f32m2(vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vs2, vs1, vl); } -vfloat32m2_t test_vfwadd_wf_f32m2(vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf(op1, op2, vl); +vfloat32m2_t test_vfwadd_wf_f32m2(vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf(vs2, rs1, vl); } -vfloat32m4_t test_vfwadd_vv_f32m4(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_vv(op1, op2, vl); +vfloat32m4_t test_vfwadd_vv_f32m4(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vs2, vs1, vl); } -vfloat32m4_t test_vfwadd_vf_f32m4(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf(op1, op2, vl); +vfloat32m4_t test_vfwadd_vf_f32m4(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf(vs2, rs1, vl); } -vfloat32m4_t test_vfwadd_wv_f32m4(vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_wv(op1, op2, vl); +vfloat32m4_t test_vfwadd_wv_f32m4(vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vs2, vs1, vl); } -vfloat32m4_t test_vfwadd_wf_f32m4(vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf(op1, op2, vl); +vfloat32m4_t test_vfwadd_wf_f32m4(vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf(vs2, rs1, vl); } -vfloat32m8_t test_vfwadd_vv_f32m8(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_vv(op1, op2, vl); +vfloat32m8_t test_vfwadd_vv_f32m8(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vs2, vs1, vl); } -vfloat32m8_t test_vfwadd_vf_f32m8(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf(op1, op2, vl); +vfloat32m8_t test_vfwadd_vf_f32m8(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf(vs2, rs1, vl); } -vfloat32m8_t test_vfwadd_wv_f32m8(vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_wv(op1, op2, vl); +vfloat32m8_t test_vfwadd_wv_f32m8(vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vs2, vs1, vl); } -vfloat32m8_t test_vfwadd_wf_f32m8(vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf(op1, op2, vl); +vfloat32m8_t test_vfwadd_wf_f32m8(vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf(vs2, rs1, vl); } -vfloat64m1_t test_vfwadd_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv(op1, op2, vl); +vfloat64m1_t test_vfwadd_vv_f64m1(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vs2, vs1, vl); } -vfloat64m1_t test_vfwadd_vf_f64m1(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf(op1, op2, vl); +vfloat64m1_t test_vfwadd_vf_f64m1(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf(vs2, rs1, vl); } -vfloat64m1_t test_vfwadd_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv(op1, op2, vl); +vfloat64m1_t test_vfwadd_wv_f64m1(vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vs2, vs1, vl); } -vfloat64m1_t test_vfwadd_wf_f64m1(vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf(op1, op2, vl); +vfloat64m1_t test_vfwadd_wf_f64m1(vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf(vs2, rs1, vl); } -vfloat64m2_t test_vfwadd_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_vv(op1, op2, vl); +vfloat64m2_t test_vfwadd_vv_f64m2(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vs2, vs1, vl); } -vfloat64m2_t test_vfwadd_vf_f64m2(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf(op1, op2, vl); +vfloat64m2_t test_vfwadd_vf_f64m2(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf(vs2, rs1, vl); } -vfloat64m2_t test_vfwadd_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_wv(op1, op2, vl); +vfloat64m2_t test_vfwadd_wv_f64m2(vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vs2, vs1, vl); } -vfloat64m2_t test_vfwadd_wf_f64m2(vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf(op1, op2, vl); +vfloat64m2_t test_vfwadd_wf_f64m2(vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf(vs2, rs1, vl); } -vfloat64m4_t test_vfwadd_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_vv(op1, op2, vl); +vfloat64m4_t test_vfwadd_vv_f64m4(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vs2, vs1, vl); } -vfloat64m4_t test_vfwadd_vf_f64m4(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf(op1, op2, vl); +vfloat64m4_t test_vfwadd_vf_f64m4(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf(vs2, rs1, vl); } -vfloat64m4_t test_vfwadd_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_wv(op1, op2, vl); +vfloat64m4_t test_vfwadd_wv_f64m4(vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vs2, vs1, vl); } -vfloat64m4_t test_vfwadd_wf_f64m4(vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf(op1, op2, vl); +vfloat64m4_t test_vfwadd_wf_f64m4(vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf(vs2, rs1, vl); } -vfloat64m8_t test_vfwadd_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_vv(op1, op2, vl); +vfloat64m8_t test_vfwadd_vv_f64m8(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vs2, vs1, vl); } -vfloat64m8_t test_vfwadd_vf_f64m8(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf(op1, op2, vl); +vfloat64m8_t test_vfwadd_vf_f64m8(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf(vs2, rs1, vl); } -vfloat64m8_t test_vfwadd_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_wv(op1, op2, vl); +vfloat64m8_t test_vfwadd_wv_f64m8(vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vs2, vs1, vl); } -vfloat64m8_t test_vfwadd_wf_f64m8(vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf(op1, op2, vl); +vfloat64m8_t test_vfwadd_wf_f64m8(vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf(vs2, rs1, vl); } -vfloat32mf2_t test_vfwadd_vv_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_vv(mask, op1, op2, vl); +vfloat32mf2_t test_vfwadd_vv_f32mf2_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vm, vs2, vs1, vl); } -vfloat32mf2_t test_vfwadd_vf_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf(mask, op1, op2, vl); +vfloat32mf2_t test_vfwadd_vf_f32mf2_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf(vm, vs2, rs1, vl); } -vfloat32mf2_t test_vfwadd_wv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_wv(mask, op1, op2, vl); +vfloat32mf2_t test_vfwadd_wv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vm, vs2, vs1, vl); } -vfloat32mf2_t test_vfwadd_wf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf(mask, op1, op2, vl); +vfloat32mf2_t test_vfwadd_wf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf(vm, vs2, rs1, vl); } -vfloat32m1_t test_vfwadd_vv_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv(mask, op1, op2, vl); +vfloat32m1_t test_vfwadd_vv_f32m1_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfwadd_vf_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf(mask, op1, op2, vl); +vfloat32m1_t test_vfwadd_vf_f32m1_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf(vm, vs2, rs1, vl); } -vfloat32m1_t test_vfwadd_wv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv(mask, op1, op2, vl); +vfloat32m1_t test_vfwadd_wv_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfwadd_wf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf(mask, op1, op2, vl); +vfloat32m1_t test_vfwadd_wf_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf(vm, vs2, rs1, vl); } -vfloat32m2_t test_vfwadd_vv_f32m2_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_vv(mask, op1, op2, vl); +vfloat32m2_t test_vfwadd_vv_f32m2_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vm, vs2, vs1, vl); } -vfloat32m2_t test_vfwadd_vf_f32m2_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf(mask, op1, op2, vl); +vfloat32m2_t test_vfwadd_vf_f32m2_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf(vm, vs2, rs1, vl); } -vfloat32m2_t test_vfwadd_wv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_wv(mask, op1, op2, vl); +vfloat32m2_t test_vfwadd_wv_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vm, vs2, vs1, vl); } -vfloat32m2_t test_vfwadd_wf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf(mask, op1, op2, vl); +vfloat32m2_t test_vfwadd_wf_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf(vm, vs2, rs1, vl); } -vfloat32m4_t test_vfwadd_vv_f32m4_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_vv(mask, op1, op2, vl); +vfloat32m4_t test_vfwadd_vv_f32m4_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vm, vs2, vs1, vl); } -vfloat32m4_t test_vfwadd_vf_f32m4_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf(mask, op1, op2, vl); +vfloat32m4_t test_vfwadd_vf_f32m4_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf(vm, vs2, rs1, vl); } -vfloat32m4_t test_vfwadd_wv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_wv(mask, op1, op2, vl); +vfloat32m4_t test_vfwadd_wv_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vm, vs2, vs1, vl); } -vfloat32m4_t test_vfwadd_wf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf(mask, op1, op2, vl); +vfloat32m4_t test_vfwadd_wf_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf(vm, vs2, rs1, vl); } -vfloat32m8_t test_vfwadd_vv_f32m8_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_vv(mask, op1, op2, vl); +vfloat32m8_t test_vfwadd_vv_f32m8_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vm, vs2, vs1, vl); } -vfloat32m8_t test_vfwadd_vf_f32m8_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf(mask, op1, op2, vl); +vfloat32m8_t test_vfwadd_vf_f32m8_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf(vm, vs2, rs1, vl); } -vfloat32m8_t test_vfwadd_wv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_wv(mask, op1, op2, vl); +vfloat32m8_t test_vfwadd_wv_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vm, vs2, vs1, vl); } -vfloat32m8_t test_vfwadd_wf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf(mask, op1, op2, vl); +vfloat32m8_t test_vfwadd_wf_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf(vm, vs2, rs1, vl); } -vfloat64m1_t test_vfwadd_vv_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv(mask, op1, op2, vl); +vfloat64m1_t test_vfwadd_vv_f64m1_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfwadd_vf_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf(mask, op1, op2, vl); +vfloat64m1_t test_vfwadd_vf_f64m1_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf(vm, vs2, rs1, vl); } -vfloat64m1_t test_vfwadd_wv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv(mask, op1, op2, vl); +vfloat64m1_t test_vfwadd_wv_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfwadd_wf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf(mask, op1, op2, vl); +vfloat64m1_t test_vfwadd_wf_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf(vm, vs2, rs1, vl); } -vfloat64m2_t test_vfwadd_vv_f64m2_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_vv(mask, op1, op2, vl); +vfloat64m2_t test_vfwadd_vv_f64m2_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vm, vs2, vs1, vl); } -vfloat64m2_t test_vfwadd_vf_f64m2_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf(mask, op1, op2, vl); +vfloat64m2_t test_vfwadd_vf_f64m2_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf(vm, vs2, rs1, vl); } -vfloat64m2_t test_vfwadd_wv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_wv(mask, op1, op2, vl); +vfloat64m2_t test_vfwadd_wv_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vm, vs2, vs1, vl); } -vfloat64m2_t test_vfwadd_wf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf(mask, op1, op2, vl); +vfloat64m2_t test_vfwadd_wf_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf(vm, vs2, rs1, vl); } -vfloat64m4_t test_vfwadd_vv_f64m4_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_vv(mask, op1, op2, vl); +vfloat64m4_t test_vfwadd_vv_f64m4_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vm, vs2, vs1, vl); } -vfloat64m4_t test_vfwadd_vf_f64m4_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf(mask, op1, op2, vl); +vfloat64m4_t test_vfwadd_vf_f64m4_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf(vm, vs2, rs1, vl); } -vfloat64m4_t test_vfwadd_wv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_wv(mask, op1, op2, vl); +vfloat64m4_t test_vfwadd_wv_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vm, vs2, vs1, vl); } -vfloat64m4_t test_vfwadd_wf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf(mask, op1, op2, vl); +vfloat64m4_t test_vfwadd_wf_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf(vm, vs2, rs1, vl); } -vfloat64m8_t test_vfwadd_vv_f64m8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_vv(mask, op1, op2, vl); +vfloat64m8_t test_vfwadd_vv_f64m8_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vm, vs2, vs1, vl); } -vfloat64m8_t test_vfwadd_vf_f64m8_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf(mask, op1, op2, vl); +vfloat64m8_t test_vfwadd_vf_f64m8_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf(vm, vs2, rs1, vl); } -vfloat64m8_t test_vfwadd_wv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_wv(mask, op1, op2, vl); +vfloat64m8_t test_vfwadd_wv_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vm, vs2, vs1, vl); } -vfloat64m8_t test_vfwadd_wf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf(mask, op1, op2, vl); +vfloat64m8_t test_vfwadd_wf_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf(vm, vs2, rs1, vl); } -vfloat32mf2_t test_vfwadd_vv_f32mf2_rm(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_vv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_vv_f32mf2_rm(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_vf_f32mf2_rm(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_vf_f32mf2_rm(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_wv_f32mf2_rm(vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_wv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_wv_f32mf2_rm(vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_wf_f32mf2_rm(vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_wf_f32mf2_rm(vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_vv_f32m1_rm(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_vv_f32m1_rm(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_vf_f32m1_rm(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_vf_f32m1_rm(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_wv_f32m1_rm(vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_wv_f32m1_rm(vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_wf_f32m1_rm(vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_wf_f32m1_rm(vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_vv_f32m2_rm(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_vv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_vv_f32m2_rm(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_vf_f32m2_rm(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_vf_f32m2_rm(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_wv_f32m2_rm(vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_wv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_wv_f32m2_rm(vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_wf_f32m2_rm(vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_wf_f32m2_rm(vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_vv_f32m4_rm(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_vv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_vv_f32m4_rm(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_vf_f32m4_rm(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_vf_f32m4_rm(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_wv_f32m4_rm(vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_wv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_wv_f32m4_rm(vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_wf_f32m4_rm(vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_wf_f32m4_rm(vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_vv_f32m8_rm(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_vv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_vv_f32m8_rm(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_vf_f32m8_rm(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_vf_f32m8_rm(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_wv_f32m8_rm(vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_wv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_wv_f32m8_rm(vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_wf_f32m8_rm(vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_wf_f32m8_rm(vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_vv_f64m1_rm(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_vv_f64m1_rm(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_vf_f64m1_rm(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_vf_f64m1_rm(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_wv_f64m1_rm(vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_wv_f64m1_rm(vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_wf_f64m1_rm(vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_wf_f64m1_rm(vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_vv_f64m2_rm(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_vv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_vv_f64m2_rm(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_vf_f64m2_rm(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_vf_f64m2_rm(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_wv_f64m2_rm(vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_wv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_wv_f64m2_rm(vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_wf_f64m2_rm(vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_wf_f64m2_rm(vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_vv_f64m4_rm(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_vv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_vv_f64m4_rm(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_vf_f64m4_rm(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_vf_f64m4_rm(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_wv_f64m4_rm(vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_wv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_wv_f64m4_rm(vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_wf_f64m4_rm(vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_wf_f64m4_rm(vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_vv_f64m8_rm(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_vv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_vv_f64m8_rm(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_vf_f64m8_rm(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_vf_f64m8_rm(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_wv_f64m8_rm(vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_wv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_wv_f64m8_rm(vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_wf_f64m8_rm(vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_wf_f64m8_rm(vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_vv_f32mf2_rm_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_vv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_vv_f32mf2_rm_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_vf_f32mf2_rm_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_vf_f32mf2_rm_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_wv_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_wv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_wv_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_wf_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_wf_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_vv_f32m1_rm_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_vv_f32m1_rm_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_vf_f32m1_rm_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_vf_f32m1_rm_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_wv_f32m1_rm_m(vbool32_t mask, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_wv_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_wf_f32m1_rm_m(vbool32_t mask, vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_wf_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_vv_f32m2_rm_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_vv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_vv_f32m2_rm_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_vf_f32m2_rm_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_vf_f32m2_rm_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_wv_f32m2_rm_m(vbool16_t mask, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_wv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_wv_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_wf_f32m2_rm_m(vbool16_t mask, vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_wf_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_vv_f32m4_rm_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_vv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_vv_f32m4_rm_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_vf_f32m4_rm_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_vf_f32m4_rm_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_wv_f32m4_rm_m(vbool8_t mask, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_wv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_wv_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_wf_f32m4_rm_m(vbool8_t mask, vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_wf_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_vv_f32m8_rm_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_vv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_vv_f32m8_rm_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_vf_f32m8_rm_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_vf_f32m8_rm_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_wv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_wv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_wv_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_wf_f32m8_rm_m(vbool4_t mask, vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_wf_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_vv_f64m1_rm_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_vv_f64m1_rm_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_vf_f64m1_rm_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_vf_f64m1_rm_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_wv_f64m1_rm_m(vbool64_t mask, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_wv_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_wf_f64m1_rm_m(vbool64_t mask, vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_wf_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_vv_f64m2_rm_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_vv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_vv_f64m2_rm_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_vf_f64m2_rm_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_vf_f64m2_rm_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_wv_f64m2_rm_m(vbool32_t mask, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_wv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_wv_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_wf_f64m2_rm_m(vbool32_t mask, vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_wf_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_vv_f64m4_rm_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_vv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_vv_f64m4_rm_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_vf_f64m4_rm_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_vf_f64m4_rm_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_wv_f64m4_rm_m(vbool16_t mask, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_wv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_wv_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_wf_f64m4_rm_m(vbool16_t mask, vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_wf_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_vv_f64m8_rm_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_vv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_vv_f64m8_rm_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_vf_f64m8_rm_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_vf_f64m8_rm_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_wv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_wv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_wv_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_wf_f64m8_rm_m(vbool8_t mask, vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_wf_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfwadd\.[ivxfswum.]+\s+} 144 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfwcvt.c b/auto-generated/gnu-overloaded-tests/vfwcvt.c index cfe21f06a..03c4943b3 100644 --- a/auto-generated/gnu-overloaded-tests/vfwcvt.c +++ b/auto-generated/gnu-overloaded-tests/vfwcvt.c @@ -6,604 +6,604 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4(vint8mf8_t src, size_t vl) { - return __riscv_vfwcvt_f(src, vl); +vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4(vint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); } -vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2(vint8mf4_t src, size_t vl) { - return __riscv_vfwcvt_f(src, vl); +vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2(vint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); } -vfloat16m1_t test_vfwcvt_f_x_v_f16m1(vint8mf2_t src, size_t vl) { - return __riscv_vfwcvt_f(src, vl); +vfloat16m1_t test_vfwcvt_f_x_v_f16m1(vint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); } -vfloat16m2_t test_vfwcvt_f_x_v_f16m2(vint8m1_t src, size_t vl) { - return __riscv_vfwcvt_f(src, vl); +vfloat16m2_t test_vfwcvt_f_x_v_f16m2(vint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); } -vfloat16m4_t test_vfwcvt_f_x_v_f16m4(vint8m2_t src, size_t vl) { - return __riscv_vfwcvt_f(src, vl); +vfloat16m4_t test_vfwcvt_f_x_v_f16m4(vint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); } -vfloat16m8_t test_vfwcvt_f_x_v_f16m8(vint8m4_t src, size_t vl) { - return __riscv_vfwcvt_f(src, vl); +vfloat16m8_t test_vfwcvt_f_x_v_f16m8(vint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); } -vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4(vuint8mf8_t src, size_t vl) { - return __riscv_vfwcvt_f(src, vl); +vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4(vuint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); } -vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2(vuint8mf4_t src, size_t vl) { - return __riscv_vfwcvt_f(src, vl); +vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2(vuint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); } -vfloat16m1_t test_vfwcvt_f_xu_v_f16m1(vuint8mf2_t src, size_t vl) { - return __riscv_vfwcvt_f(src, vl); +vfloat16m1_t test_vfwcvt_f_xu_v_f16m1(vuint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); } -vfloat16m2_t test_vfwcvt_f_xu_v_f16m2(vuint8m1_t src, size_t vl) { - return __riscv_vfwcvt_f(src, vl); +vfloat16m2_t test_vfwcvt_f_xu_v_f16m2(vuint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); } -vfloat16m4_t test_vfwcvt_f_xu_v_f16m4(vuint8m2_t src, size_t vl) { - return __riscv_vfwcvt_f(src, vl); +vfloat16m4_t test_vfwcvt_f_xu_v_f16m4(vuint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); } -vfloat16m8_t test_vfwcvt_f_xu_v_f16m8(vuint8m4_t src, size_t vl) { - return __riscv_vfwcvt_f(src, vl); +vfloat16m8_t test_vfwcvt_f_xu_v_f16m8(vuint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); } -vint32mf2_t test_vfwcvt_x_f_v_i32mf2(vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_x(src, vl); +vint32mf2_t test_vfwcvt_x_f_v_i32mf2(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_x(vs2, vl); } -vint32m1_t test_vfwcvt_x_f_v_i32m1(vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_x(src, vl); +vint32m1_t test_vfwcvt_x_f_v_i32m1(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x(vs2, vl); } -vint32m2_t test_vfwcvt_x_f_v_i32m2(vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_x(src, vl); +vint32m2_t test_vfwcvt_x_f_v_i32m2(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x(vs2, vl); } -vint32m4_t test_vfwcvt_x_f_v_i32m4(vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_x(src, vl); +vint32m4_t test_vfwcvt_x_f_v_i32m4(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x(vs2, vl); } -vint32m8_t test_vfwcvt_x_f_v_i32m8(vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_x(src, vl); +vint32m8_t test_vfwcvt_x_f_v_i32m8(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x(vs2, vl); } -vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2(vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_xu(src, vl); +vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu(vs2, vl); } -vuint32m1_t test_vfwcvt_xu_f_v_u32m1(vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu(src, vl); +vuint32m1_t test_vfwcvt_xu_f_v_u32m1(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu(vs2, vl); } -vuint32m2_t test_vfwcvt_xu_f_v_u32m2(vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_xu(src, vl); +vuint32m2_t test_vfwcvt_xu_f_v_u32m2(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu(vs2, vl); } -vuint32m4_t test_vfwcvt_xu_f_v_u32m4(vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_xu(src, vl); +vuint32m4_t test_vfwcvt_xu_f_v_u32m4(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu(vs2, vl); } -vuint32m8_t test_vfwcvt_xu_f_v_u32m8(vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_xu(src, vl); +vuint32m8_t test_vfwcvt_xu_f_v_u32m8(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu(vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2(vint16mf4_t src, size_t vl) { - return __riscv_vfwcvt_f(src, vl); +vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2(vint16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); } -vfloat32m1_t test_vfwcvt_f_x_v_f32m1(vint16mf2_t src, size_t vl) { - return __riscv_vfwcvt_f(src, vl); +vfloat32m1_t test_vfwcvt_f_x_v_f32m1(vint16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); } -vfloat32m2_t test_vfwcvt_f_x_v_f32m2(vint16m1_t src, size_t vl) { - return __riscv_vfwcvt_f(src, vl); +vfloat32m2_t test_vfwcvt_f_x_v_f32m2(vint16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); } -vfloat32m4_t test_vfwcvt_f_x_v_f32m4(vint16m2_t src, size_t vl) { - return __riscv_vfwcvt_f(src, vl); +vfloat32m4_t test_vfwcvt_f_x_v_f32m4(vint16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); } -vfloat32m8_t test_vfwcvt_f_x_v_f32m8(vint16m4_t src, size_t vl) { - return __riscv_vfwcvt_f(src, vl); +vfloat32m8_t test_vfwcvt_f_x_v_f32m8(vint16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2(vuint16mf4_t src, size_t vl) { - return __riscv_vfwcvt_f(src, vl); +vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2(vuint16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); } -vfloat32m1_t test_vfwcvt_f_xu_v_f32m1(vuint16mf2_t src, size_t vl) { - return __riscv_vfwcvt_f(src, vl); +vfloat32m1_t test_vfwcvt_f_xu_v_f32m1(vuint16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); } -vfloat32m2_t test_vfwcvt_f_xu_v_f32m2(vuint16m1_t src, size_t vl) { - return __riscv_vfwcvt_f(src, vl); +vfloat32m2_t test_vfwcvt_f_xu_v_f32m2(vuint16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); } -vfloat32m4_t test_vfwcvt_f_xu_v_f32m4(vuint16m2_t src, size_t vl) { - return __riscv_vfwcvt_f(src, vl); +vfloat32m4_t test_vfwcvt_f_xu_v_f32m4(vuint16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); } -vfloat32m8_t test_vfwcvt_f_xu_v_f32m8(vuint16m4_t src, size_t vl) { - return __riscv_vfwcvt_f(src, vl); +vfloat32m8_t test_vfwcvt_f_xu_v_f32m8(vuint16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2(vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_f(src, vl); +vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); } -vfloat32m1_t test_vfwcvt_f_f_v_f32m1(vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_f(src, vl); +vfloat32m1_t test_vfwcvt_f_f_v_f32m1(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); } -vfloat32m2_t test_vfwcvt_f_f_v_f32m2(vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_f(src, vl); +vfloat32m2_t test_vfwcvt_f_f_v_f32m2(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); } -vfloat32m4_t test_vfwcvt_f_f_v_f32m4(vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_f(src, vl); +vfloat32m4_t test_vfwcvt_f_f_v_f32m4(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); } -vfloat32m8_t test_vfwcvt_f_f_v_f32m8(vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_f(src, vl); +vfloat32m8_t test_vfwcvt_f_f_v_f32m8(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); } -vint64m1_t test_vfwcvt_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_x(src, vl); +vint64m1_t test_vfwcvt_x_f_v_i64m1(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x(vs2, vl); } -vint64m2_t test_vfwcvt_x_f_v_i64m2(vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_x(src, vl); +vint64m2_t test_vfwcvt_x_f_v_i64m2(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x(vs2, vl); } -vint64m4_t test_vfwcvt_x_f_v_i64m4(vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_x(src, vl); +vint64m4_t test_vfwcvt_x_f_v_i64m4(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x(vs2, vl); } -vint64m8_t test_vfwcvt_x_f_v_i64m8(vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_x(src, vl); +vint64m8_t test_vfwcvt_x_f_v_i64m8(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x(vs2, vl); } -vuint64m1_t test_vfwcvt_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu(src, vl); +vuint64m1_t test_vfwcvt_xu_f_v_u64m1(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu(vs2, vl); } -vuint64m2_t test_vfwcvt_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_xu(src, vl); +vuint64m2_t test_vfwcvt_xu_f_v_u64m2(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu(vs2, vl); } -vuint64m4_t test_vfwcvt_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_xu(src, vl); +vuint64m4_t test_vfwcvt_xu_f_v_u64m4(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu(vs2, vl); } -vuint64m8_t test_vfwcvt_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_xu(src, vl); +vuint64m8_t test_vfwcvt_xu_f_v_u64m8(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu(vs2, vl); } -vfloat64m1_t test_vfwcvt_f_x_v_f64m1(vint32mf2_t src, size_t vl) { - return __riscv_vfwcvt_f(src, vl); +vfloat64m1_t test_vfwcvt_f_x_v_f64m1(vint32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); } -vfloat64m2_t test_vfwcvt_f_x_v_f64m2(vint32m1_t src, size_t vl) { - return __riscv_vfwcvt_f(src, vl); +vfloat64m2_t test_vfwcvt_f_x_v_f64m2(vint32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); } -vfloat64m4_t test_vfwcvt_f_x_v_f64m4(vint32m2_t src, size_t vl) { - return __riscv_vfwcvt_f(src, vl); +vfloat64m4_t test_vfwcvt_f_x_v_f64m4(vint32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); } -vfloat64m8_t test_vfwcvt_f_x_v_f64m8(vint32m4_t src, size_t vl) { - return __riscv_vfwcvt_f(src, vl); +vfloat64m8_t test_vfwcvt_f_x_v_f64m8(vint32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); } -vfloat64m1_t test_vfwcvt_f_xu_v_f64m1(vuint32mf2_t src, size_t vl) { - return __riscv_vfwcvt_f(src, vl); +vfloat64m1_t test_vfwcvt_f_xu_v_f64m1(vuint32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); } -vfloat64m2_t test_vfwcvt_f_xu_v_f64m2(vuint32m1_t src, size_t vl) { - return __riscv_vfwcvt_f(src, vl); +vfloat64m2_t test_vfwcvt_f_xu_v_f64m2(vuint32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); } -vfloat64m4_t test_vfwcvt_f_xu_v_f64m4(vuint32m2_t src, size_t vl) { - return __riscv_vfwcvt_f(src, vl); +vfloat64m4_t test_vfwcvt_f_xu_v_f64m4(vuint32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); } -vfloat64m8_t test_vfwcvt_f_xu_v_f64m8(vuint32m4_t src, size_t vl) { - return __riscv_vfwcvt_f(src, vl); +vfloat64m8_t test_vfwcvt_f_xu_v_f64m8(vuint32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); } -vfloat64m1_t test_vfwcvt_f_f_v_f64m1(vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_f(src, vl); +vfloat64m1_t test_vfwcvt_f_f_v_f64m1(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); } -vfloat64m2_t test_vfwcvt_f_f_v_f64m2(vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_f(src, vl); +vfloat64m2_t test_vfwcvt_f_f_v_f64m2(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); } -vfloat64m4_t test_vfwcvt_f_f_v_f64m4(vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_f(src, vl); +vfloat64m4_t test_vfwcvt_f_f_v_f64m4(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); } -vfloat64m8_t test_vfwcvt_f_f_v_f64m8(vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_f(src, vl); +vfloat64m8_t test_vfwcvt_f_f_v_f64m8(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); } -vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_m(vbool64_t mask, vint8mf8_t src, size_t vl) { - return __riscv_vfwcvt_f(mask, src, vl); +vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_m(vbool64_t vm, vint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); } -vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_m(vbool32_t mask, vint8mf4_t src, size_t vl) { - return __riscv_vfwcvt_f(mask, src, vl); +vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_m(vbool32_t vm, vint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); } -vfloat16m1_t test_vfwcvt_f_x_v_f16m1_m(vbool16_t mask, vint8mf2_t src, size_t vl) { - return __riscv_vfwcvt_f(mask, src, vl); +vfloat16m1_t test_vfwcvt_f_x_v_f16m1_m(vbool16_t vm, vint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); } -vfloat16m2_t test_vfwcvt_f_x_v_f16m2_m(vbool8_t mask, vint8m1_t src, size_t vl) { - return __riscv_vfwcvt_f(mask, src, vl); +vfloat16m2_t test_vfwcvt_f_x_v_f16m2_m(vbool8_t vm, vint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); } -vfloat16m4_t test_vfwcvt_f_x_v_f16m4_m(vbool4_t mask, vint8m2_t src, size_t vl) { - return __riscv_vfwcvt_f(mask, src, vl); +vfloat16m4_t test_vfwcvt_f_x_v_f16m4_m(vbool4_t vm, vint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); } -vfloat16m8_t test_vfwcvt_f_x_v_f16m8_m(vbool2_t mask, vint8m4_t src, size_t vl) { - return __riscv_vfwcvt_f(mask, src, vl); +vfloat16m8_t test_vfwcvt_f_x_v_f16m8_m(vbool2_t vm, vint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); } -vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_m(vbool64_t mask, vuint8mf8_t src, size_t vl) { - return __riscv_vfwcvt_f(mask, src, vl); +vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); } -vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_m(vbool32_t mask, vuint8mf4_t src, size_t vl) { - return __riscv_vfwcvt_f(mask, src, vl); +vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); } -vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_m(vbool16_t mask, vuint8mf2_t src, size_t vl) { - return __riscv_vfwcvt_f(mask, src, vl); +vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); } -vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_m(vbool8_t mask, vuint8m1_t src, size_t vl) { - return __riscv_vfwcvt_f(mask, src, vl); +vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); } -vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_m(vbool4_t mask, vuint8m2_t src, size_t vl) { - return __riscv_vfwcvt_f(mask, src, vl); +vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); } -vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_m(vbool2_t mask, vuint8m4_t src, size_t vl) { - return __riscv_vfwcvt_f(mask, src, vl); +vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); } -vint32mf2_t test_vfwcvt_x_f_v_i32mf2_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_x(mask, src, vl); +vint32mf2_t test_vfwcvt_x_f_v_i32mf2_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_x(vm, vs2, vl); } -vint32m1_t test_vfwcvt_x_f_v_i32m1_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_x(mask, src, vl); +vint32m1_t test_vfwcvt_x_f_v_i32m1_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x(vm, vs2, vl); } -vint32m2_t test_vfwcvt_x_f_v_i32m2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_x(mask, src, vl); +vint32m2_t test_vfwcvt_x_f_v_i32m2_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x(vm, vs2, vl); } -vint32m4_t test_vfwcvt_x_f_v_i32m4_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_x(mask, src, vl); +vint32m4_t test_vfwcvt_x_f_v_i32m4_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x(vm, vs2, vl); } -vint32m8_t test_vfwcvt_x_f_v_i32m8_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_x(mask, src, vl); +vint32m8_t test_vfwcvt_x_f_v_i32m8_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x(vm, vs2, vl); } -vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_xu(mask, src, vl); +vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu(vm, vs2, vl); } -vuint32m1_t test_vfwcvt_xu_f_v_u32m1_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu(mask, src, vl); +vuint32m1_t test_vfwcvt_xu_f_v_u32m1_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu(vm, vs2, vl); } -vuint32m2_t test_vfwcvt_xu_f_v_u32m2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_xu(mask, src, vl); +vuint32m2_t test_vfwcvt_xu_f_v_u32m2_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu(vm, vs2, vl); } -vuint32m4_t test_vfwcvt_xu_f_v_u32m4_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_xu(mask, src, vl); +vuint32m4_t test_vfwcvt_xu_f_v_u32m4_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu(vm, vs2, vl); } -vuint32m8_t test_vfwcvt_xu_f_v_u32m8_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_xu(mask, src, vl); +vuint32m8_t test_vfwcvt_xu_f_v_u32m8_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu(vm, vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_m(vbool64_t mask, vint16mf4_t src, size_t vl) { - return __riscv_vfwcvt_f(mask, src, vl); +vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_m(vbool64_t vm, vint16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); } -vfloat32m1_t test_vfwcvt_f_x_v_f32m1_m(vbool32_t mask, vint16mf2_t src, size_t vl) { - return __riscv_vfwcvt_f(mask, src, vl); +vfloat32m1_t test_vfwcvt_f_x_v_f32m1_m(vbool32_t vm, vint16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); } -vfloat32m2_t test_vfwcvt_f_x_v_f32m2_m(vbool16_t mask, vint16m1_t src, size_t vl) { - return __riscv_vfwcvt_f(mask, src, vl); +vfloat32m2_t test_vfwcvt_f_x_v_f32m2_m(vbool16_t vm, vint16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); } -vfloat32m4_t test_vfwcvt_f_x_v_f32m4_m(vbool8_t mask, vint16m2_t src, size_t vl) { - return __riscv_vfwcvt_f(mask, src, vl); +vfloat32m4_t test_vfwcvt_f_x_v_f32m4_m(vbool8_t vm, vint16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); } -vfloat32m8_t test_vfwcvt_f_x_v_f32m8_m(vbool4_t mask, vint16m4_t src, size_t vl) { - return __riscv_vfwcvt_f(mask, src, vl); +vfloat32m8_t test_vfwcvt_f_x_v_f32m8_m(vbool4_t vm, vint16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_m(vbool64_t mask, vuint16mf4_t src, size_t vl) { - return __riscv_vfwcvt_f(mask, src, vl); +vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); } -vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_m(vbool32_t mask, vuint16mf2_t src, size_t vl) { - return __riscv_vfwcvt_f(mask, src, vl); +vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); } -vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_m(vbool16_t mask, vuint16m1_t src, size_t vl) { - return __riscv_vfwcvt_f(mask, src, vl); +vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); } -vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_m(vbool8_t mask, vuint16m2_t src, size_t vl) { - return __riscv_vfwcvt_f(mask, src, vl); +vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); } -vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_m(vbool4_t mask, vuint16m4_t src, size_t vl) { - return __riscv_vfwcvt_f(mask, src, vl); +vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_f(mask, src, vl); +vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); } -vfloat32m1_t test_vfwcvt_f_f_v_f32m1_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_f(mask, src, vl); +vfloat32m1_t test_vfwcvt_f_f_v_f32m1_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); } -vfloat32m2_t test_vfwcvt_f_f_v_f32m2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_f(mask, src, vl); +vfloat32m2_t test_vfwcvt_f_f_v_f32m2_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); } -vfloat32m4_t test_vfwcvt_f_f_v_f32m4_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_f(mask, src, vl); +vfloat32m4_t test_vfwcvt_f_f_v_f32m4_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); } -vfloat32m8_t test_vfwcvt_f_f_v_f32m8_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_f(mask, src, vl); +vfloat32m8_t test_vfwcvt_f_f_v_f32m8_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); } -vint64m1_t test_vfwcvt_x_f_v_i64m1_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_x(mask, src, vl); +vint64m1_t test_vfwcvt_x_f_v_i64m1_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x(vm, vs2, vl); } -vint64m2_t test_vfwcvt_x_f_v_i64m2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_x(mask, src, vl); +vint64m2_t test_vfwcvt_x_f_v_i64m2_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x(vm, vs2, vl); } -vint64m4_t test_vfwcvt_x_f_v_i64m4_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_x(mask, src, vl); +vint64m4_t test_vfwcvt_x_f_v_i64m4_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x(vm, vs2, vl); } -vint64m8_t test_vfwcvt_x_f_v_i64m8_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_x(mask, src, vl); +vint64m8_t test_vfwcvt_x_f_v_i64m8_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x(vm, vs2, vl); } -vuint64m1_t test_vfwcvt_xu_f_v_u64m1_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu(mask, src, vl); +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu(vm, vs2, vl); } -vuint64m2_t test_vfwcvt_xu_f_v_u64m2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_xu(mask, src, vl); +vuint64m2_t test_vfwcvt_xu_f_v_u64m2_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu(vm, vs2, vl); } -vuint64m4_t test_vfwcvt_xu_f_v_u64m4_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_xu(mask, src, vl); +vuint64m4_t test_vfwcvt_xu_f_v_u64m4_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu(vm, vs2, vl); } -vuint64m8_t test_vfwcvt_xu_f_v_u64m8_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_xu(mask, src, vl); +vuint64m8_t test_vfwcvt_xu_f_v_u64m8_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu(vm, vs2, vl); } -vfloat64m1_t test_vfwcvt_f_x_v_f64m1_m(vbool64_t mask, vint32mf2_t src, size_t vl) { - return __riscv_vfwcvt_f(mask, src, vl); +vfloat64m1_t test_vfwcvt_f_x_v_f64m1_m(vbool64_t vm, vint32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); } -vfloat64m2_t test_vfwcvt_f_x_v_f64m2_m(vbool32_t mask, vint32m1_t src, size_t vl) { - return __riscv_vfwcvt_f(mask, src, vl); +vfloat64m2_t test_vfwcvt_f_x_v_f64m2_m(vbool32_t vm, vint32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); } -vfloat64m4_t test_vfwcvt_f_x_v_f64m4_m(vbool16_t mask, vint32m2_t src, size_t vl) { - return __riscv_vfwcvt_f(mask, src, vl); +vfloat64m4_t test_vfwcvt_f_x_v_f64m4_m(vbool16_t vm, vint32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); } -vfloat64m8_t test_vfwcvt_f_x_v_f64m8_m(vbool8_t mask, vint32m4_t src, size_t vl) { - return __riscv_vfwcvt_f(mask, src, vl); +vfloat64m8_t test_vfwcvt_f_x_v_f64m8_m(vbool8_t vm, vint32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); } -vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_m(vbool64_t mask, vuint32mf2_t src, size_t vl) { - return __riscv_vfwcvt_f(mask, src, vl); +vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); } -vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_m(vbool32_t mask, vuint32m1_t src, size_t vl) { - return __riscv_vfwcvt_f(mask, src, vl); +vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); } -vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_m(vbool16_t mask, vuint32m2_t src, size_t vl) { - return __riscv_vfwcvt_f(mask, src, vl); +vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); } -vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_m(vbool8_t mask, vuint32m4_t src, size_t vl) { - return __riscv_vfwcvt_f(mask, src, vl); +vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); } -vfloat64m1_t test_vfwcvt_f_f_v_f64m1_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_f(mask, src, vl); +vfloat64m1_t test_vfwcvt_f_f_v_f64m1_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); } -vfloat64m2_t test_vfwcvt_f_f_v_f64m2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_f(mask, src, vl); +vfloat64m2_t test_vfwcvt_f_f_v_f64m2_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); } -vfloat64m4_t test_vfwcvt_f_f_v_f64m4_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_f(mask, src, vl); +vfloat64m4_t test_vfwcvt_f_f_v_f64m4_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); } -vfloat64m8_t test_vfwcvt_f_f_v_f64m8_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_f(mask, src, vl); +vfloat64m8_t test_vfwcvt_f_f_v_f64m8_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); } -vint32mf2_t test_vfwcvt_x_f_v_i32mf2_rm(vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_x(src, __RISCV_FRM_RNE, vl); +vint32mf2_t test_vfwcvt_x_f_v_i32mf2_rm(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_x(vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfwcvt_x_f_v_i32m1_rm(vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_x(src, __RISCV_FRM_RNE, vl); +vint32m1_t test_vfwcvt_x_f_v_i32m1_rm(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x(vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfwcvt_x_f_v_i32m2_rm(vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_x(src, __RISCV_FRM_RNE, vl); +vint32m2_t test_vfwcvt_x_f_v_i32m2_rm(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x(vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfwcvt_x_f_v_i32m4_rm(vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_x(src, __RISCV_FRM_RNE, vl); +vint32m4_t test_vfwcvt_x_f_v_i32m4_rm(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x(vs2, __RISCV_FRM_RNE, vl); } -vint32m8_t test_vfwcvt_x_f_v_i32m8_rm(vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_x(src, __RISCV_FRM_RNE, vl); +vint32m8_t test_vfwcvt_x_f_v_i32m8_rm(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x(vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_rm(vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_xu(src, __RISCV_FRM_RNE, vl); +vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_rm(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu(vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfwcvt_xu_f_v_u32m1_rm(vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu(src, __RISCV_FRM_RNE, vl); +vuint32m1_t test_vfwcvt_xu_f_v_u32m1_rm(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu(vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfwcvt_xu_f_v_u32m2_rm(vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_xu(src, __RISCV_FRM_RNE, vl); +vuint32m2_t test_vfwcvt_xu_f_v_u32m2_rm(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu(vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfwcvt_xu_f_v_u32m4_rm(vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_xu(src, __RISCV_FRM_RNE, vl); +vuint32m4_t test_vfwcvt_xu_f_v_u32m4_rm(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu(vs2, __RISCV_FRM_RNE, vl); } -vuint32m8_t test_vfwcvt_xu_f_v_u32m8_rm(vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_xu(src, __RISCV_FRM_RNE, vl); +vuint32m8_t test_vfwcvt_xu_f_v_u32m8_rm(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu(vs2, __RISCV_FRM_RNE, vl); } -vint64m1_t test_vfwcvt_x_f_v_i64m1_rm(vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_x(src, __RISCV_FRM_RNE, vl); +vint64m1_t test_vfwcvt_x_f_v_i64m1_rm(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x(vs2, __RISCV_FRM_RNE, vl); } -vint64m2_t test_vfwcvt_x_f_v_i64m2_rm(vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_x(src, __RISCV_FRM_RNE, vl); +vint64m2_t test_vfwcvt_x_f_v_i64m2_rm(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x(vs2, __RISCV_FRM_RNE, vl); } -vint64m4_t test_vfwcvt_x_f_v_i64m4_rm(vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_x(src, __RISCV_FRM_RNE, vl); +vint64m4_t test_vfwcvt_x_f_v_i64m4_rm(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x(vs2, __RISCV_FRM_RNE, vl); } -vint64m8_t test_vfwcvt_x_f_v_i64m8_rm(vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_x(src, __RISCV_FRM_RNE, vl); +vint64m8_t test_vfwcvt_x_f_v_i64m8_rm(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x(vs2, __RISCV_FRM_RNE, vl); } -vuint64m1_t test_vfwcvt_xu_f_v_u64m1_rm(vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu(src, __RISCV_FRM_RNE, vl); +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_rm(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu(vs2, __RISCV_FRM_RNE, vl); } -vuint64m2_t test_vfwcvt_xu_f_v_u64m2_rm(vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_xu(src, __RISCV_FRM_RNE, vl); +vuint64m2_t test_vfwcvt_xu_f_v_u64m2_rm(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu(vs2, __RISCV_FRM_RNE, vl); } -vuint64m4_t test_vfwcvt_xu_f_v_u64m4_rm(vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_xu(src, __RISCV_FRM_RNE, vl); +vuint64m4_t test_vfwcvt_xu_f_v_u64m4_rm(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu(vs2, __RISCV_FRM_RNE, vl); } -vuint64m8_t test_vfwcvt_xu_f_v_u64m8_rm(vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_xu(src, __RISCV_FRM_RNE, vl); +vuint64m8_t test_vfwcvt_xu_f_v_u64m8_rm(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu(vs2, __RISCV_FRM_RNE, vl); } -vint32mf2_t test_vfwcvt_x_f_v_i32mf2_rm_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_x(mask, src, __RISCV_FRM_RNE, vl); +vint32mf2_t test_vfwcvt_x_f_v_i32mf2_rm_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_x(vm, vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfwcvt_x_f_v_i32m1_rm_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_x(mask, src, __RISCV_FRM_RNE, vl); +vint32m1_t test_vfwcvt_x_f_v_i32m1_rm_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x(vm, vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfwcvt_x_f_v_i32m2_rm_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_x(mask, src, __RISCV_FRM_RNE, vl); +vint32m2_t test_vfwcvt_x_f_v_i32m2_rm_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x(vm, vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfwcvt_x_f_v_i32m4_rm_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_x(mask, src, __RISCV_FRM_RNE, vl); +vint32m4_t test_vfwcvt_x_f_v_i32m4_rm_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x(vm, vs2, __RISCV_FRM_RNE, vl); } -vint32m8_t test_vfwcvt_x_f_v_i32m8_rm_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_x(mask, src, __RISCV_FRM_RNE, vl); +vint32m8_t test_vfwcvt_x_f_v_i32m8_rm_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_rm_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_xu(mask, src, __RISCV_FRM_RNE, vl); +vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_rm_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfwcvt_xu_f_v_u32m1_rm_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu(mask, src, __RISCV_FRM_RNE, vl); +vuint32m1_t test_vfwcvt_xu_f_v_u32m1_rm_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfwcvt_xu_f_v_u32m2_rm_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_xu(mask, src, __RISCV_FRM_RNE, vl); +vuint32m2_t test_vfwcvt_xu_f_v_u32m2_rm_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfwcvt_xu_f_v_u32m4_rm_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_xu(mask, src, __RISCV_FRM_RNE, vl); +vuint32m4_t test_vfwcvt_xu_f_v_u32m4_rm_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint32m8_t test_vfwcvt_xu_f_v_u32m8_rm_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_xu(mask, src, __RISCV_FRM_RNE, vl); +vuint32m8_t test_vfwcvt_xu_f_v_u32m8_rm_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); } -vint64m1_t test_vfwcvt_x_f_v_i64m1_rm_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_x(mask, src, __RISCV_FRM_RNE, vl); +vint64m1_t test_vfwcvt_x_f_v_i64m1_rm_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x(vm, vs2, __RISCV_FRM_RNE, vl); } -vint64m2_t test_vfwcvt_x_f_v_i64m2_rm_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_x(mask, src, __RISCV_FRM_RNE, vl); +vint64m2_t test_vfwcvt_x_f_v_i64m2_rm_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x(vm, vs2, __RISCV_FRM_RNE, vl); } -vint64m4_t test_vfwcvt_x_f_v_i64m4_rm_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_x(mask, src, __RISCV_FRM_RNE, vl); +vint64m4_t test_vfwcvt_x_f_v_i64m4_rm_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x(vm, vs2, __RISCV_FRM_RNE, vl); } -vint64m8_t test_vfwcvt_x_f_v_i64m8_rm_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_x(mask, src, __RISCV_FRM_RNE, vl); +vint64m8_t test_vfwcvt_x_f_v_i64m8_rm_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint64m1_t test_vfwcvt_xu_f_v_u64m1_rm_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu(mask, src, __RISCV_FRM_RNE, vl); +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_rm_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint64m2_t test_vfwcvt_xu_f_v_u64m2_rm_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_xu(mask, src, __RISCV_FRM_RNE, vl); +vuint64m2_t test_vfwcvt_xu_f_v_u64m2_rm_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint64m4_t test_vfwcvt_xu_f_v_u64m4_rm_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_xu(mask, src, __RISCV_FRM_RNE, vl); +vuint64m4_t test_vfwcvt_xu_f_v_u64m4_rm_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); } -vuint64m8_t test_vfwcvt_xu_f_v_u64m8_rm_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_xu(mask, src, __RISCV_FRM_RNE, vl); +vuint64m8_t test_vfwcvt_xu_f_v_u64m8_rm_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfwcvt\.[ivxfswum.]+\s+} 150 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfwcvt_rtz.c b/auto-generated/gnu-overloaded-tests/vfwcvt_rtz.c index 8e353ba04..7d7dfdfdc 100644 --- a/auto-generated/gnu-overloaded-tests/vfwcvt_rtz.c +++ b/auto-generated/gnu-overloaded-tests/vfwcvt_rtz.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2(vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x(src, vl); +vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x(vs2, vl); } -vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1(vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x(src, vl); +vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x(vs2, vl); } -vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2(vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x(src, vl); +vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x(vs2, vl); } -vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4(vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x(src, vl); +vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x(vs2, vl); } -vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8(vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x(src, vl); +vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x(vs2, vl); } -vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2(vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu(src, vl); +vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2(vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu(vs2, vl); } -vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1(vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu(src, vl); +vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1(vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu(vs2, vl); } -vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2(vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu(src, vl); +vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2(vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu(vs2, vl); } -vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4(vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu(src, vl); +vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4(vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu(vs2, vl); } -vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8(vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu(src, vl); +vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8(vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu(vs2, vl); } -vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x(src, vl); +vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x(vs2, vl); } -vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2(vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x(src, vl); +vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x(vs2, vl); } -vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4(vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x(src, vl); +vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x(vs2, vl); } -vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8(vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x(src, vl); +vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x(vs2, vl); } -vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu(src, vl); +vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu(vs2, vl); } -vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu(src, vl); +vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu(vs2, vl); } -vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu(src, vl); +vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu(vs2, vl); } -vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu(src, vl); +vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu(vs2, vl); } -vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x(mask, src, vl); +vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x(vm, vs2, vl); } -vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x(mask, src, vl); +vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x(vm, vs2, vl); } -vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x(mask, src, vl); +vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x(vm, vs2, vl); } -vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x(mask, src, vl); +vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x(vm, vs2, vl); } -vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x(mask, src, vl); +vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x(vm, vs2, vl); } -vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu(mask, src, vl); +vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu(vm, vs2, vl); } -vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu(mask, src, vl); +vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu(vm, vs2, vl); } -vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu(mask, src, vl); +vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_m(vbool16_t vm, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu(vm, vs2, vl); } -vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu(mask, src, vl); +vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_m(vbool8_t vm, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu(vm, vs2, vl); } -vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu(mask, src, vl); +vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_m(vbool4_t vm, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu(vm, vs2, vl); } -vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x(mask, src, vl); +vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x(vm, vs2, vl); } -vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x(mask, src, vl); +vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x(vm, vs2, vl); } -vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x(mask, src, vl); +vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x(vm, vs2, vl); } -vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x(mask, src, vl); +vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x(vm, vs2, vl); } -vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu(mask, src, vl); +vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu(vm, vs2, vl); } -vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu(mask, src, vl); +vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_m(vbool32_t vm, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu(vm, vs2, vl); } -vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu(mask, src, vl); +vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_m(vbool16_t vm, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu(vm, vs2, vl); } -vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu(mask, src, vl); +vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_m(vbool8_t vm, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu(vm, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfwcvt\.rtz[ivxfswum.]*\s+} 36 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfwmacc.c b/auto-generated/gnu-overloaded-tests/vfwmacc.c index ee70c10a4..dfbe51eff 100644 --- a/auto-generated/gnu-overloaded-tests/vfwmacc.c +++ b/auto-generated/gnu-overloaded-tests/vfwmacc.c @@ -78,76 +78,76 @@ vfloat64m8_t test_vfwmacc_vf_f64m8(vfloat64m8_t vd, float32_t vs1, vfloat32m4_t return __riscv_vfwmacc(vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmacc(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwmacc_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmacc(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwmacc_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmacc(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwmacc_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmacc(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwmacc_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmacc(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwmacc_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmacc(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwmacc_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmacc(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwmacc_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmacc(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwmacc_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmacc(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwmacc_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmacc(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwmacc_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmacc(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwmacc_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmacc(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwmacc_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmacc(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwmacc_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmacc(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwmacc_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmacc(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwmacc_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmacc(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwmacc_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmacc(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwmacc_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmacc(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwmacc_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, vl); } vfloat32mf2_t test_vfwmacc_vv_f32mf2_rm(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -222,76 +222,76 @@ vfloat64m8_t test_vfwmacc_vf_f64m8_rm(vfloat64m8_t vd, float32_t vs1, vfloat32m4 return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmacc_vv_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmacc_vv_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmacc_vf_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmacc_vf_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmacc_vv_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmacc_vv_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmacc_vf_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmacc_vf_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmacc_vv_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmacc_vv_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmacc_vf_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmacc_vf_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmacc_vv_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmacc_vv_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmacc_vf_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmacc_vf_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmacc_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmacc_vv_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmacc_vf_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmacc_vf_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmacc_vv_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmacc_vv_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmacc_vf_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmacc_vf_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmacc_vv_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmacc_vv_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmacc_vf_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmacc_vf_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmacc_vv_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmacc_vv_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmacc_vf_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmacc_vf_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmacc_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmacc_vv_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmacc_vf_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmacc_vf_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfwmacc\.[ivxfswum.]+\s+} 72 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfwmsac.c b/auto-generated/gnu-overloaded-tests/vfwmsac.c index c7467a64d..c7f4a6596 100644 --- a/auto-generated/gnu-overloaded-tests/vfwmsac.c +++ b/auto-generated/gnu-overloaded-tests/vfwmsac.c @@ -78,76 +78,76 @@ vfloat64m8_t test_vfwmsac_vf_f64m8(vfloat64m8_t vd, float32_t vs1, vfloat32m4_t return __riscv_vfwmsac(vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmsac(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwmsac_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmsac(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwmsac_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmsac(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwmsac_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmsac(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwmsac_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmsac(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwmsac_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmsac(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwmsac_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmsac(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwmsac_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmsac(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwmsac_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmsac(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwmsac_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmsac(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwmsac_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmsac(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwmsac_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmsac(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwmsac_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmsac(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwmsac_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmsac(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwmsac_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmsac(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwmsac_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmsac(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwmsac_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmsac(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwmsac_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmsac(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwmsac_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, vl); } vfloat32mf2_t test_vfwmsac_vv_f32mf2_rm(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -222,76 +222,76 @@ vfloat64m8_t test_vfwmsac_vf_f64m8_rm(vfloat64m8_t vd, float32_t vs1, vfloat32m4 return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmsac_vv_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmsac_vv_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmsac_vf_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmsac_vf_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmsac_vv_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmsac_vv_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmsac_vf_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmsac_vf_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmsac_vv_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmsac_vv_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmsac_vf_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmsac_vf_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmsac_vv_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmsac_vv_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmsac_vf_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmsac_vf_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmsac_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmsac_vv_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmsac_vf_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmsac_vf_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmsac_vv_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmsac_vv_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmsac_vf_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmsac_vf_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmsac_vv_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmsac_vv_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmsac_vf_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmsac_vf_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmsac_vv_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmsac_vv_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmsac_vf_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmsac_vf_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmsac_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmsac_vv_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmsac_vf_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmsac_vf_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfwmsac\.[ivxfswum.]+\s+} 72 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfwmul.c b/auto-generated/gnu-overloaded-tests/vfwmul.c index f2df40437..80c8fea82 100644 --- a/auto-generated/gnu-overloaded-tests/vfwmul.c +++ b/auto-generated/gnu-overloaded-tests/vfwmul.c @@ -6,292 +6,292 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2_t test_vfwmul_vv_f32mf2(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwmul(op1, op2, vl); +vfloat32mf2_t test_vfwmul_vv_f32mf2(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul(vs2, vs1, vl); } -vfloat32mf2_t test_vfwmul_vf_f32mf2(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul(op1, op2, vl); +vfloat32mf2_t test_vfwmul_vf_f32mf2(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul(vs2, rs1, vl); } -vfloat32m1_t test_vfwmul_vv_f32m1(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwmul(op1, op2, vl); +vfloat32m1_t test_vfwmul_vv_f32m1(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul(vs2, vs1, vl); } -vfloat32m1_t test_vfwmul_vf_f32m1(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul(op1, op2, vl); +vfloat32m1_t test_vfwmul_vf_f32m1(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul(vs2, rs1, vl); } -vfloat32m2_t test_vfwmul_vv_f32m2(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwmul(op1, op2, vl); +vfloat32m2_t test_vfwmul_vv_f32m2(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul(vs2, vs1, vl); } -vfloat32m2_t test_vfwmul_vf_f32m2(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul(op1, op2, vl); +vfloat32m2_t test_vfwmul_vf_f32m2(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul(vs2, rs1, vl); } -vfloat32m4_t test_vfwmul_vv_f32m4(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwmul(op1, op2, vl); +vfloat32m4_t test_vfwmul_vv_f32m4(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul(vs2, vs1, vl); } -vfloat32m4_t test_vfwmul_vf_f32m4(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul(op1, op2, vl); +vfloat32m4_t test_vfwmul_vf_f32m4(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul(vs2, rs1, vl); } -vfloat32m8_t test_vfwmul_vv_f32m8(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwmul(op1, op2, vl); +vfloat32m8_t test_vfwmul_vv_f32m8(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul(vs2, vs1, vl); } -vfloat32m8_t test_vfwmul_vf_f32m8(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul(op1, op2, vl); +vfloat32m8_t test_vfwmul_vf_f32m8(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul(vs2, rs1, vl); } -vfloat64m1_t test_vfwmul_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwmul(op1, op2, vl); +vfloat64m1_t test_vfwmul_vv_f64m1(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwmul(vs2, vs1, vl); } -vfloat64m1_t test_vfwmul_vf_f64m1(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul(op1, op2, vl); +vfloat64m1_t test_vfwmul_vf_f64m1(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul(vs2, rs1, vl); } -vfloat64m2_t test_vfwmul_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwmul(op1, op2, vl); +vfloat64m2_t test_vfwmul_vv_f64m2(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwmul(vs2, vs1, vl); } -vfloat64m2_t test_vfwmul_vf_f64m2(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul(op1, op2, vl); +vfloat64m2_t test_vfwmul_vf_f64m2(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul(vs2, rs1, vl); } -vfloat64m4_t test_vfwmul_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwmul(op1, op2, vl); +vfloat64m4_t test_vfwmul_vv_f64m4(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwmul(vs2, vs1, vl); } -vfloat64m4_t test_vfwmul_vf_f64m4(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul(op1, op2, vl); +vfloat64m4_t test_vfwmul_vf_f64m4(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul(vs2, rs1, vl); } -vfloat64m8_t test_vfwmul_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwmul(op1, op2, vl); +vfloat64m8_t test_vfwmul_vv_f64m8(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwmul(vs2, vs1, vl); } -vfloat64m8_t test_vfwmul_vf_f64m8(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul(op1, op2, vl); +vfloat64m8_t test_vfwmul_vf_f64m8(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul(vs2, rs1, vl); } -vfloat32mf2_t test_vfwmul_vv_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwmul(mask, op1, op2, vl); +vfloat32mf2_t test_vfwmul_vv_f32mf2_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, vs1, vl); } -vfloat32mf2_t test_vfwmul_vf_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul(mask, op1, op2, vl); +vfloat32mf2_t test_vfwmul_vf_f32mf2_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, rs1, vl); } -vfloat32m1_t test_vfwmul_vv_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwmul(mask, op1, op2, vl); +vfloat32m1_t test_vfwmul_vv_f32m1_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfwmul_vf_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul(mask, op1, op2, vl); +vfloat32m1_t test_vfwmul_vf_f32m1_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, rs1, vl); } -vfloat32m2_t test_vfwmul_vv_f32m2_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwmul(mask, op1, op2, vl); +vfloat32m2_t test_vfwmul_vv_f32m2_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, vs1, vl); } -vfloat32m2_t test_vfwmul_vf_f32m2_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul(mask, op1, op2, vl); +vfloat32m2_t test_vfwmul_vf_f32m2_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, rs1, vl); } -vfloat32m4_t test_vfwmul_vv_f32m4_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwmul(mask, op1, op2, vl); +vfloat32m4_t test_vfwmul_vv_f32m4_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, vs1, vl); } -vfloat32m4_t test_vfwmul_vf_f32m4_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul(mask, op1, op2, vl); +vfloat32m4_t test_vfwmul_vf_f32m4_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, rs1, vl); } -vfloat32m8_t test_vfwmul_vv_f32m8_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwmul(mask, op1, op2, vl); +vfloat32m8_t test_vfwmul_vv_f32m8_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, vs1, vl); } -vfloat32m8_t test_vfwmul_vf_f32m8_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul(mask, op1, op2, vl); +vfloat32m8_t test_vfwmul_vf_f32m8_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, rs1, vl); } -vfloat64m1_t test_vfwmul_vv_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwmul(mask, op1, op2, vl); +vfloat64m1_t test_vfwmul_vv_f64m1_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfwmul_vf_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul(mask, op1, op2, vl); +vfloat64m1_t test_vfwmul_vf_f64m1_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, rs1, vl); } -vfloat64m2_t test_vfwmul_vv_f64m2_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwmul(mask, op1, op2, vl); +vfloat64m2_t test_vfwmul_vv_f64m2_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, vs1, vl); } -vfloat64m2_t test_vfwmul_vf_f64m2_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul(mask, op1, op2, vl); +vfloat64m2_t test_vfwmul_vf_f64m2_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, rs1, vl); } -vfloat64m4_t test_vfwmul_vv_f64m4_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwmul(mask, op1, op2, vl); +vfloat64m4_t test_vfwmul_vv_f64m4_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, vs1, vl); } -vfloat64m4_t test_vfwmul_vf_f64m4_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul(mask, op1, op2, vl); +vfloat64m4_t test_vfwmul_vf_f64m4_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, rs1, vl); } -vfloat64m8_t test_vfwmul_vv_f64m8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwmul(mask, op1, op2, vl); +vfloat64m8_t test_vfwmul_vv_f64m8_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, vs1, vl); } -vfloat64m8_t test_vfwmul_vf_f64m8_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul(mask, op1, op2, vl); +vfloat64m8_t test_vfwmul_vf_f64m8_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, rs1, vl); } -vfloat32mf2_t test_vfwmul_vv_f32mf2_rm(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmul_vv_f32mf2_rm(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmul_vf_f32mf2_rm(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmul_vf_f32mf2_rm(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmul_vv_f32m1_rm(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmul_vv_f32m1_rm(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmul_vf_f32m1_rm(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmul_vf_f32m1_rm(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmul_vv_f32m2_rm(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmul_vv_f32m2_rm(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmul_vf_f32m2_rm(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmul_vf_f32m2_rm(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmul_vv_f32m4_rm(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmul_vv_f32m4_rm(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmul_vf_f32m4_rm(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmul_vf_f32m4_rm(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmul_vv_f32m8_rm(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmul_vv_f32m8_rm(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmul_vf_f32m8_rm(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmul_vf_f32m8_rm(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmul_vv_f64m1_rm(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmul_vv_f64m1_rm(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwmul(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmul_vf_f64m1_rm(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmul_vf_f64m1_rm(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmul_vv_f64m2_rm(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmul_vv_f64m2_rm(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwmul(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmul_vf_f64m2_rm(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmul_vf_f64m2_rm(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmul_vv_f64m4_rm(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmul_vv_f64m4_rm(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwmul(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmul_vf_f64m4_rm(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmul_vf_f64m4_rm(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmul_vv_f64m8_rm(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmul_vv_f64m8_rm(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwmul(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmul_vf_f64m8_rm(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmul_vf_f64m8_rm(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmul_vv_f32mf2_rm_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmul_vv_f32mf2_rm_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmul_vf_f32mf2_rm_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmul_vf_f32mf2_rm_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmul_vv_f32m1_rm_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmul_vv_f32m1_rm_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmul_vf_f32m1_rm_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmul_vf_f32m1_rm_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmul_vv_f32m2_rm_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmul_vv_f32m2_rm_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmul_vf_f32m2_rm_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmul_vf_f32m2_rm_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmul_vv_f32m4_rm_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmul_vv_f32m4_rm_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmul_vf_f32m4_rm_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmul_vf_f32m4_rm_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmul_vv_f32m8_rm_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmul_vv_f32m8_rm_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmul_vf_f32m8_rm_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmul_vf_f32m8_rm_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmul_vv_f64m1_rm_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmul_vv_f64m1_rm_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmul_vf_f64m1_rm_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmul_vf_f64m1_rm_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmul_vv_f64m2_rm_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmul_vv_f64m2_rm_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmul_vf_f64m2_rm_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmul_vf_f64m2_rm_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmul_vv_f64m4_rm_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmul_vv_f64m4_rm_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmul_vf_f64m4_rm_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmul_vf_f64m4_rm_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmul_vv_f64m8_rm_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmul_vv_f64m8_rm_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmul_vf_f64m8_rm_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmul_vf_f64m8_rm_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfwmul\.[ivxfswum.]+\s+} 72 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfwnmacc.c b/auto-generated/gnu-overloaded-tests/vfwnmacc.c index f93eb3183..06b85f8f3 100644 --- a/auto-generated/gnu-overloaded-tests/vfwnmacc.c +++ b/auto-generated/gnu-overloaded-tests/vfwnmacc.c @@ -78,76 +78,76 @@ vfloat64m8_t test_vfwnmacc_vf_f64m8(vfloat64m8_t vd, float32_t vs1, vfloat32m4_t return __riscv_vfwnmacc(vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmacc(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwnmacc_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmacc(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwnmacc_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwnmacc_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwnmacc_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmacc(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwnmacc_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmacc(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwnmacc_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmacc(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwnmacc_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmacc(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwnmacc_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmacc(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwnmacc_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmacc(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwnmacc_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwnmacc_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwnmacc_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmacc(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwnmacc_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmacc(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwnmacc_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmacc(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwnmacc_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmacc(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwnmacc_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmacc(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwnmacc_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmacc(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwnmacc_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl); } vfloat32mf2_t test_vfwnmacc_vv_f32mf2_rm(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -222,76 +222,76 @@ vfloat64m8_t test_vfwnmacc_vf_f64m8_rm(vfloat64m8_t vd, float32_t vs1, vfloat32m return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwnmacc_vv_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwnmacc_vv_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwnmacc_vf_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwnmacc_vf_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwnmacc_vv_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwnmacc_vv_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwnmacc_vf_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwnmacc_vf_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwnmacc_vv_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwnmacc_vv_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwnmacc_vf_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwnmacc_vf_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwnmacc_vv_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwnmacc_vv_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwnmacc_vf_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwnmacc_vf_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwnmacc_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwnmacc_vv_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwnmacc_vf_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwnmacc_vf_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwnmacc_vv_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwnmacc_vv_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwnmacc_vf_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwnmacc_vf_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwnmacc_vv_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwnmacc_vv_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwnmacc_vf_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwnmacc_vf_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwnmacc_vv_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwnmacc_vv_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwnmacc_vf_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwnmacc_vf_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwnmacc_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwnmacc_vv_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwnmacc_vf_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmacc(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwnmacc_vf_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfwnmacc\.[ivxfswum.]+\s+} 72 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfwnmsac.c b/auto-generated/gnu-overloaded-tests/vfwnmsac.c index 169bb1b07..aede690e3 100644 --- a/auto-generated/gnu-overloaded-tests/vfwnmsac.c +++ b/auto-generated/gnu-overloaded-tests/vfwnmsac.c @@ -78,76 +78,76 @@ vfloat64m8_t test_vfwnmsac_vf_f64m8(vfloat64m8_t vd, float32_t vs1, vfloat32m4_t return __riscv_vfwnmsac(vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmsac(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwnmsac_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmsac(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwnmsac_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwnmsac_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwnmsac_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmsac(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwnmsac_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmsac(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwnmsac_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmsac(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwnmsac_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmsac(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwnmsac_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmsac(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwnmsac_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmsac(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwnmsac_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwnmsac_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwnmsac_vf_f64m1_m(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmsac(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwnmsac_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmsac(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwnmsac_vf_f64m2_m(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmsac(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwnmsac_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmsac(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwnmsac_vf_f64m4_m(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmsac(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwnmsac_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmsac(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwnmsac_vf_f64m8_m(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl); } vfloat32mf2_t test_vfwnmsac_vv_f32mf2_rm(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -222,76 +222,76 @@ vfloat64m8_t test_vfwnmsac_vf_f64m8_rm(vfloat64m8_t vd, float32_t vs1, vfloat32m return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwnmsac_vv_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwnmsac_vv_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwnmsac_vf_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwnmsac_vf_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwnmsac_vv_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwnmsac_vv_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwnmsac_vf_f32m1_rm_m(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwnmsac_vf_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwnmsac_vv_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwnmsac_vv_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwnmsac_vf_f32m2_rm_m(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwnmsac_vf_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwnmsac_vv_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwnmsac_vv_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwnmsac_vf_f32m4_rm_m(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwnmsac_vf_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwnmsac_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwnmsac_vv_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwnmsac_vf_f32m8_rm_m(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwnmsac_vf_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwnmsac_vv_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwnmsac_vv_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwnmsac_vf_f64m1_rm_m(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwnmsac_vf_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwnmsac_vv_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwnmsac_vv_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwnmsac_vf_f64m2_rm_m(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwnmsac_vf_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwnmsac_vv_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwnmsac_vv_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwnmsac_vf_f64m4_rm_m(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwnmsac_vf_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwnmsac_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwnmsac_vv_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwnmsac_vf_f64m8_rm_m(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmsac(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwnmsac_vf_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfwnmsac\.[ivxfswum.]+\s+} 72 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfwredosum.c b/auto-generated/gnu-overloaded-tests/vfwredosum.c index c1c6cddba..af383c651 100644 --- a/auto-generated/gnu-overloaded-tests/vfwredosum.c +++ b/auto-generated/gnu-overloaded-tests/vfwredosum.c @@ -6,180 +6,180 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1(vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum(vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1(vfloat16mf4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum(vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1(vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum(vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1(vfloat16mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum(vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1(vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum(vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1(vfloat16m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum(vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1(vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum(vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1(vfloat16m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum(vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1(vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum(vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1(vfloat16m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum(vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1(vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum(vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1(vfloat16m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum(vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1(vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum(vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1(vfloat32mf2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum(vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1(vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum(vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1(vfloat32m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum(vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1(vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum(vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1(vfloat32m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum(vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1(vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum(vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1(vfloat32m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum(vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1(vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum(vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1(vfloat32m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum(vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_m(vbool64_t mask, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum(mask, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_m(vbool32_t mask, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum(mask, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum(mask, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_m(vbool16_t vm, vfloat16m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum(mask, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_m(vbool8_t vm, vfloat16m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum(mask, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_m(vbool4_t vm, vfloat16m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum(mask, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_m(vbool2_t vm, vfloat16m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum(mask, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum(mask, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_m(vbool32_t vm, vfloat32m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum(mask, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_m(vbool16_t vm, vfloat32m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum(mask, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_m(vbool8_t vm, vfloat32m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum(mask, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_m(vbool4_t vm, vfloat32m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_rm(vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_rm(vfloat16mf4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_rm(vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_rm(vfloat16mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_rm(vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_rm(vfloat16m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_rm(vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_rm(vfloat16m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_rm(vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_rm(vfloat16m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_rm(vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_rm(vfloat16m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_rm(vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_rm(vfloat32mf2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_rm(vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_rm(vfloat32m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_rm(vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_rm(vfloat32m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_rm(vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_rm(vfloat32m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_rm(vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_rm(vfloat32m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_rm_m(vbool64_t mask, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_rm_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_rm_m(vbool32_t mask, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_rm_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_rm_m(vbool16_t mask, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_rm_m(vbool16_t vm, vfloat16m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_rm_m(vbool8_t mask, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_rm_m(vbool8_t vm, vfloat16m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_rm_m(vbool4_t mask, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_rm_m(vbool4_t vm, vfloat16m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_rm_m(vbool2_t mask, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_rm_m(vbool2_t vm, vfloat16m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_rm_m(vbool64_t mask, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_rm_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_rm_m(vbool32_t mask, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_rm_m(vbool16_t mask, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_rm_m(vbool16_t vm, vfloat32m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_rm_m(vbool8_t mask, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_rm_m(vbool8_t vm, vfloat32m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_rm_m(vbool4_t mask, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_rm_m(vbool4_t vm, vfloat32m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfwredosum\.[ivxfswum.]+\s+} 44 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfwredusum.c b/auto-generated/gnu-overloaded-tests/vfwredusum.c index 9be3e4132..89a64aadf 100644 --- a/auto-generated/gnu-overloaded-tests/vfwredusum.c +++ b/auto-generated/gnu-overloaded-tests/vfwredusum.c @@ -6,180 +6,180 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1(vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum(vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1(vfloat16mf4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum(vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1(vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum(vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1(vfloat16mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum(vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1(vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum(vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1(vfloat16m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum(vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1(vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum(vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1(vfloat16m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum(vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1(vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum(vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1(vfloat16m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum(vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1(vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum(vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1(vfloat16m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum(vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1(vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum(vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1(vfloat32mf2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum(vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1(vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum(vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1(vfloat32m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum(vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1(vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum(vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1(vfloat32m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum(vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1(vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum(vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1(vfloat32m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum(vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1(vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum(vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1(vfloat32m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum(vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_m(vbool64_t mask, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum(mask, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_m(vbool32_t mask, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum(mask, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum(mask, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_m(vbool16_t vm, vfloat16m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum(mask, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_m(vbool8_t vm, vfloat16m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum(mask, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_m(vbool4_t vm, vfloat16m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum(mask, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_m(vbool2_t vm, vfloat16m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum(mask, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum(mask, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_m(vbool32_t vm, vfloat32m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum(mask, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_m(vbool16_t vm, vfloat32m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum(mask, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_m(vbool8_t vm, vfloat32m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum(mask, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_m(vbool4_t vm, vfloat32m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_rm(vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_rm(vfloat16mf4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_rm(vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_rm(vfloat16mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_rm(vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_rm(vfloat16m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_rm(vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_rm(vfloat16m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_rm(vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_rm(vfloat16m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_rm(vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_rm(vfloat16m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_rm(vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_rm(vfloat32mf2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_rm(vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_rm(vfloat32m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_rm(vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_rm(vfloat32m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_rm(vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_rm(vfloat32m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_rm(vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum(vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_rm(vfloat32m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_rm_m(vbool64_t mask, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_rm_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_rm_m(vbool32_t mask, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_rm_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_rm_m(vbool16_t mask, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_rm_m(vbool16_t vm, vfloat16m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_rm_m(vbool8_t mask, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_rm_m(vbool8_t vm, vfloat16m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_rm_m(vbool4_t mask, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_rm_m(vbool4_t vm, vfloat16m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_rm_m(vbool2_t mask, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_rm_m(vbool2_t vm, vfloat16m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_rm_m(vbool64_t mask, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_rm_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_rm_m(vbool32_t mask, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_rm_m(vbool16_t mask, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_rm_m(vbool16_t vm, vfloat32m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_rm_m(vbool8_t mask, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_rm_m(vbool8_t vm, vfloat32m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_rm_m(vbool4_t mask, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum(mask, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_rm_m(vbool4_t vm, vfloat32m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfwredusum\.[ivxfswum.]+\s+} 44 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfwsub.c b/auto-generated/gnu-overloaded-tests/vfwsub.c index be153d0ff..7e46c1f8a 100644 --- a/auto-generated/gnu-overloaded-tests/vfwsub.c +++ b/auto-generated/gnu-overloaded-tests/vfwsub.c @@ -6,580 +6,580 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2_t test_vfwsub_vv_f32mf2(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_vv(op1, op2, vl); +vfloat32mf2_t test_vfwsub_vv_f32mf2(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vs2, vs1, vl); } -vfloat32mf2_t test_vfwsub_vf_f32mf2(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf(op1, op2, vl); +vfloat32mf2_t test_vfwsub_vf_f32mf2(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf(vs2, rs1, vl); } -vfloat32mf2_t test_vfwsub_wv_f32mf2(vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_wv(op1, op2, vl); +vfloat32mf2_t test_vfwsub_wv_f32mf2(vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vs2, vs1, vl); } -vfloat32mf2_t test_vfwsub_wf_f32mf2(vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf(op1, op2, vl); +vfloat32mf2_t test_vfwsub_wf_f32mf2(vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf(vs2, rs1, vl); } -vfloat32m1_t test_vfwsub_vv_f32m1(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv(op1, op2, vl); +vfloat32m1_t test_vfwsub_vv_f32m1(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vs2, vs1, vl); } -vfloat32m1_t test_vfwsub_vf_f32m1(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf(op1, op2, vl); +vfloat32m1_t test_vfwsub_vf_f32m1(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf(vs2, rs1, vl); } -vfloat32m1_t test_vfwsub_wv_f32m1(vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv(op1, op2, vl); +vfloat32m1_t test_vfwsub_wv_f32m1(vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vs2, vs1, vl); } -vfloat32m1_t test_vfwsub_wf_f32m1(vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf(op1, op2, vl); +vfloat32m1_t test_vfwsub_wf_f32m1(vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf(vs2, rs1, vl); } -vfloat32m2_t test_vfwsub_vv_f32m2(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_vv(op1, op2, vl); +vfloat32m2_t test_vfwsub_vv_f32m2(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vs2, vs1, vl); } -vfloat32m2_t test_vfwsub_vf_f32m2(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf(op1, op2, vl); +vfloat32m2_t test_vfwsub_vf_f32m2(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf(vs2, rs1, vl); } -vfloat32m2_t test_vfwsub_wv_f32m2(vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_wv(op1, op2, vl); +vfloat32m2_t test_vfwsub_wv_f32m2(vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vs2, vs1, vl); } -vfloat32m2_t test_vfwsub_wf_f32m2(vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf(op1, op2, vl); +vfloat32m2_t test_vfwsub_wf_f32m2(vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf(vs2, rs1, vl); } -vfloat32m4_t test_vfwsub_vv_f32m4(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_vv(op1, op2, vl); +vfloat32m4_t test_vfwsub_vv_f32m4(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vs2, vs1, vl); } -vfloat32m4_t test_vfwsub_vf_f32m4(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf(op1, op2, vl); +vfloat32m4_t test_vfwsub_vf_f32m4(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf(vs2, rs1, vl); } -vfloat32m4_t test_vfwsub_wv_f32m4(vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_wv(op1, op2, vl); +vfloat32m4_t test_vfwsub_wv_f32m4(vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vs2, vs1, vl); } -vfloat32m4_t test_vfwsub_wf_f32m4(vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf(op1, op2, vl); +vfloat32m4_t test_vfwsub_wf_f32m4(vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf(vs2, rs1, vl); } -vfloat32m8_t test_vfwsub_vv_f32m8(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_vv(op1, op2, vl); +vfloat32m8_t test_vfwsub_vv_f32m8(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vs2, vs1, vl); } -vfloat32m8_t test_vfwsub_vf_f32m8(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf(op1, op2, vl); +vfloat32m8_t test_vfwsub_vf_f32m8(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf(vs2, rs1, vl); } -vfloat32m8_t test_vfwsub_wv_f32m8(vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_wv(op1, op2, vl); +vfloat32m8_t test_vfwsub_wv_f32m8(vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vs2, vs1, vl); } -vfloat32m8_t test_vfwsub_wf_f32m8(vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf(op1, op2, vl); +vfloat32m8_t test_vfwsub_wf_f32m8(vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf(vs2, rs1, vl); } -vfloat64m1_t test_vfwsub_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv(op1, op2, vl); +vfloat64m1_t test_vfwsub_vv_f64m1(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vs2, vs1, vl); } -vfloat64m1_t test_vfwsub_vf_f64m1(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf(op1, op2, vl); +vfloat64m1_t test_vfwsub_vf_f64m1(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf(vs2, rs1, vl); } -vfloat64m1_t test_vfwsub_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv(op1, op2, vl); +vfloat64m1_t test_vfwsub_wv_f64m1(vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vs2, vs1, vl); } -vfloat64m1_t test_vfwsub_wf_f64m1(vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf(op1, op2, vl); +vfloat64m1_t test_vfwsub_wf_f64m1(vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf(vs2, rs1, vl); } -vfloat64m2_t test_vfwsub_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_vv(op1, op2, vl); +vfloat64m2_t test_vfwsub_vv_f64m2(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vs2, vs1, vl); } -vfloat64m2_t test_vfwsub_vf_f64m2(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf(op1, op2, vl); +vfloat64m2_t test_vfwsub_vf_f64m2(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf(vs2, rs1, vl); } -vfloat64m2_t test_vfwsub_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_wv(op1, op2, vl); +vfloat64m2_t test_vfwsub_wv_f64m2(vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vs2, vs1, vl); } -vfloat64m2_t test_vfwsub_wf_f64m2(vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf(op1, op2, vl); +vfloat64m2_t test_vfwsub_wf_f64m2(vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf(vs2, rs1, vl); } -vfloat64m4_t test_vfwsub_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_vv(op1, op2, vl); +vfloat64m4_t test_vfwsub_vv_f64m4(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vs2, vs1, vl); } -vfloat64m4_t test_vfwsub_vf_f64m4(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf(op1, op2, vl); +vfloat64m4_t test_vfwsub_vf_f64m4(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf(vs2, rs1, vl); } -vfloat64m4_t test_vfwsub_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_wv(op1, op2, vl); +vfloat64m4_t test_vfwsub_wv_f64m4(vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vs2, vs1, vl); } -vfloat64m4_t test_vfwsub_wf_f64m4(vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf(op1, op2, vl); +vfloat64m4_t test_vfwsub_wf_f64m4(vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf(vs2, rs1, vl); } -vfloat64m8_t test_vfwsub_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_vv(op1, op2, vl); +vfloat64m8_t test_vfwsub_vv_f64m8(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vs2, vs1, vl); } -vfloat64m8_t test_vfwsub_vf_f64m8(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf(op1, op2, vl); +vfloat64m8_t test_vfwsub_vf_f64m8(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf(vs2, rs1, vl); } -vfloat64m8_t test_vfwsub_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_wv(op1, op2, vl); +vfloat64m8_t test_vfwsub_wv_f64m8(vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vs2, vs1, vl); } -vfloat64m8_t test_vfwsub_wf_f64m8(vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf(op1, op2, vl); +vfloat64m8_t test_vfwsub_wf_f64m8(vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf(vs2, rs1, vl); } -vfloat32mf2_t test_vfwsub_vv_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_vv(mask, op1, op2, vl); +vfloat32mf2_t test_vfwsub_vv_f32mf2_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vm, vs2, vs1, vl); } -vfloat32mf2_t test_vfwsub_vf_f32mf2_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf(mask, op1, op2, vl); +vfloat32mf2_t test_vfwsub_vf_f32mf2_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf(vm, vs2, rs1, vl); } -vfloat32mf2_t test_vfwsub_wv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_wv(mask, op1, op2, vl); +vfloat32mf2_t test_vfwsub_wv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vm, vs2, vs1, vl); } -vfloat32mf2_t test_vfwsub_wf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf(mask, op1, op2, vl); +vfloat32mf2_t test_vfwsub_wf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf(vm, vs2, rs1, vl); } -vfloat32m1_t test_vfwsub_vv_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv(mask, op1, op2, vl); +vfloat32m1_t test_vfwsub_vv_f32m1_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfwsub_vf_f32m1_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf(mask, op1, op2, vl); +vfloat32m1_t test_vfwsub_vf_f32m1_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf(vm, vs2, rs1, vl); } -vfloat32m1_t test_vfwsub_wv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv(mask, op1, op2, vl); +vfloat32m1_t test_vfwsub_wv_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vm, vs2, vs1, vl); } -vfloat32m1_t test_vfwsub_wf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf(mask, op1, op2, vl); +vfloat32m1_t test_vfwsub_wf_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf(vm, vs2, rs1, vl); } -vfloat32m2_t test_vfwsub_vv_f32m2_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_vv(mask, op1, op2, vl); +vfloat32m2_t test_vfwsub_vv_f32m2_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vm, vs2, vs1, vl); } -vfloat32m2_t test_vfwsub_vf_f32m2_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf(mask, op1, op2, vl); +vfloat32m2_t test_vfwsub_vf_f32m2_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf(vm, vs2, rs1, vl); } -vfloat32m2_t test_vfwsub_wv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_wv(mask, op1, op2, vl); +vfloat32m2_t test_vfwsub_wv_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vm, vs2, vs1, vl); } -vfloat32m2_t test_vfwsub_wf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf(mask, op1, op2, vl); +vfloat32m2_t test_vfwsub_wf_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf(vm, vs2, rs1, vl); } -vfloat32m4_t test_vfwsub_vv_f32m4_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_vv(mask, op1, op2, vl); +vfloat32m4_t test_vfwsub_vv_f32m4_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vm, vs2, vs1, vl); } -vfloat32m4_t test_vfwsub_vf_f32m4_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf(mask, op1, op2, vl); +vfloat32m4_t test_vfwsub_vf_f32m4_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf(vm, vs2, rs1, vl); } -vfloat32m4_t test_vfwsub_wv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_wv(mask, op1, op2, vl); +vfloat32m4_t test_vfwsub_wv_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vm, vs2, vs1, vl); } -vfloat32m4_t test_vfwsub_wf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf(mask, op1, op2, vl); +vfloat32m4_t test_vfwsub_wf_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf(vm, vs2, rs1, vl); } -vfloat32m8_t test_vfwsub_vv_f32m8_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_vv(mask, op1, op2, vl); +vfloat32m8_t test_vfwsub_vv_f32m8_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vm, vs2, vs1, vl); } -vfloat32m8_t test_vfwsub_vf_f32m8_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf(mask, op1, op2, vl); +vfloat32m8_t test_vfwsub_vf_f32m8_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf(vm, vs2, rs1, vl); } -vfloat32m8_t test_vfwsub_wv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_wv(mask, op1, op2, vl); +vfloat32m8_t test_vfwsub_wv_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vm, vs2, vs1, vl); } -vfloat32m8_t test_vfwsub_wf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf(mask, op1, op2, vl); +vfloat32m8_t test_vfwsub_wf_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf(vm, vs2, rs1, vl); } -vfloat64m1_t test_vfwsub_vv_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv(mask, op1, op2, vl); +vfloat64m1_t test_vfwsub_vv_f64m1_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfwsub_vf_f64m1_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf(mask, op1, op2, vl); +vfloat64m1_t test_vfwsub_vf_f64m1_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf(vm, vs2, rs1, vl); } -vfloat64m1_t test_vfwsub_wv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv(mask, op1, op2, vl); +vfloat64m1_t test_vfwsub_wv_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vm, vs2, vs1, vl); } -vfloat64m1_t test_vfwsub_wf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf(mask, op1, op2, vl); +vfloat64m1_t test_vfwsub_wf_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf(vm, vs2, rs1, vl); } -vfloat64m2_t test_vfwsub_vv_f64m2_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_vv(mask, op1, op2, vl); +vfloat64m2_t test_vfwsub_vv_f64m2_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vm, vs2, vs1, vl); } -vfloat64m2_t test_vfwsub_vf_f64m2_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf(mask, op1, op2, vl); +vfloat64m2_t test_vfwsub_vf_f64m2_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf(vm, vs2, rs1, vl); } -vfloat64m2_t test_vfwsub_wv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_wv(mask, op1, op2, vl); +vfloat64m2_t test_vfwsub_wv_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vm, vs2, vs1, vl); } -vfloat64m2_t test_vfwsub_wf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf(mask, op1, op2, vl); +vfloat64m2_t test_vfwsub_wf_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf(vm, vs2, rs1, vl); } -vfloat64m4_t test_vfwsub_vv_f64m4_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_vv(mask, op1, op2, vl); +vfloat64m4_t test_vfwsub_vv_f64m4_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vm, vs2, vs1, vl); } -vfloat64m4_t test_vfwsub_vf_f64m4_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf(mask, op1, op2, vl); +vfloat64m4_t test_vfwsub_vf_f64m4_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf(vm, vs2, rs1, vl); } -vfloat64m4_t test_vfwsub_wv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_wv(mask, op1, op2, vl); +vfloat64m4_t test_vfwsub_wv_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vm, vs2, vs1, vl); } -vfloat64m4_t test_vfwsub_wf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf(mask, op1, op2, vl); +vfloat64m4_t test_vfwsub_wf_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf(vm, vs2, rs1, vl); } -vfloat64m8_t test_vfwsub_vv_f64m8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_vv(mask, op1, op2, vl); +vfloat64m8_t test_vfwsub_vv_f64m8_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vm, vs2, vs1, vl); } -vfloat64m8_t test_vfwsub_vf_f64m8_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf(mask, op1, op2, vl); +vfloat64m8_t test_vfwsub_vf_f64m8_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf(vm, vs2, rs1, vl); } -vfloat64m8_t test_vfwsub_wv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_wv(mask, op1, op2, vl); +vfloat64m8_t test_vfwsub_wv_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vm, vs2, vs1, vl); } -vfloat64m8_t test_vfwsub_wf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf(mask, op1, op2, vl); +vfloat64m8_t test_vfwsub_wf_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf(vm, vs2, rs1, vl); } -vfloat32mf2_t test_vfwsub_vv_f32mf2_rm(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_vv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_vv_f32mf2_rm(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_vf_f32mf2_rm(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_vf_f32mf2_rm(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_wv_f32mf2_rm(vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_wv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_wv_f32mf2_rm(vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_wf_f32mf2_rm(vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_wf_f32mf2_rm(vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_vv_f32m1_rm(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_vv_f32m1_rm(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_vf_f32m1_rm(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_vf_f32m1_rm(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_wv_f32m1_rm(vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_wv_f32m1_rm(vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_wf_f32m1_rm(vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_wf_f32m1_rm(vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_vv_f32m2_rm(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_vv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_vv_f32m2_rm(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_vf_f32m2_rm(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_vf_f32m2_rm(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_wv_f32m2_rm(vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_wv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_wv_f32m2_rm(vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_wf_f32m2_rm(vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_wf_f32m2_rm(vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_vv_f32m4_rm(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_vv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_vv_f32m4_rm(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_vf_f32m4_rm(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_vf_f32m4_rm(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_wv_f32m4_rm(vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_wv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_wv_f32m4_rm(vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_wf_f32m4_rm(vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_wf_f32m4_rm(vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_vv_f32m8_rm(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_vv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_vv_f32m8_rm(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_vf_f32m8_rm(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_vf_f32m8_rm(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_wv_f32m8_rm(vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_wv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_wv_f32m8_rm(vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_wf_f32m8_rm(vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf(op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_wf_f32m8_rm(vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_vv_f64m1_rm(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_vv_f64m1_rm(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_vf_f64m1_rm(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_vf_f64m1_rm(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_wv_f64m1_rm(vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_wv_f64m1_rm(vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_wf_f64m1_rm(vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_wf_f64m1_rm(vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_vv_f64m2_rm(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_vv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_vv_f64m2_rm(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_vf_f64m2_rm(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_vf_f64m2_rm(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_wv_f64m2_rm(vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_wv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_wv_f64m2_rm(vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_wf_f64m2_rm(vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_wf_f64m2_rm(vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_vv_f64m4_rm(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_vv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_vv_f64m4_rm(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_vf_f64m4_rm(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_vf_f64m4_rm(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_wv_f64m4_rm(vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_wv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_wv_f64m4_rm(vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_wf_f64m4_rm(vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_wf_f64m4_rm(vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_vv_f64m8_rm(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_vv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_vv_f64m8_rm(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_vf_f64m8_rm(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_vf_f64m8_rm(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_wv_f64m8_rm(vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_wv(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_wv_f64m8_rm(vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_wf_f64m8_rm(vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf(op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_wf_f64m8_rm(vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf(vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_vv_f32mf2_rm_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_vv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_vv_f32mf2_rm_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_vf_f32mf2_rm_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_vf_f32mf2_rm_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_wv_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_wv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_wv_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_wf_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_wf_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_vv_f32m1_rm_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_vv_f32m1_rm_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_vf_f32m1_rm_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_vf_f32m1_rm_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_wv_f32m1_rm_m(vbool32_t mask, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_wv_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_wf_f32m1_rm_m(vbool32_t mask, vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_wf_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_vv_f32m2_rm_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_vv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_vv_f32m2_rm_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_vf_f32m2_rm_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_vf_f32m2_rm_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_wv_f32m2_rm_m(vbool16_t mask, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_wv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_wv_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_wf_f32m2_rm_m(vbool16_t mask, vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_wf_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_vv_f32m4_rm_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_vv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_vv_f32m4_rm_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_vf_f32m4_rm_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_vf_f32m4_rm_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_wv_f32m4_rm_m(vbool8_t mask, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_wv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_wv_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_wf_f32m4_rm_m(vbool8_t mask, vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_wf_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_vv_f32m8_rm_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_vv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_vv_f32m8_rm_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_vf_f32m8_rm_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_vf_f32m8_rm_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_wv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_wv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_wv_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_wf_f32m8_rm_m(vbool4_t mask, vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_wf_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_vv_f64m1_rm_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_vv_f64m1_rm_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_vf_f64m1_rm_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_vf_f64m1_rm_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_wv_f64m1_rm_m(vbool64_t mask, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_wv_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_wf_f64m1_rm_m(vbool64_t mask, vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_wf_f64m1_rm_m(vbool64_t vm, vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_vv_f64m2_rm_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_vv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_vv_f64m2_rm_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_vf_f64m2_rm_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_vf_f64m2_rm_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_wv_f64m2_rm_m(vbool32_t mask, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_wv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_wv_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_wf_f64m2_rm_m(vbool32_t mask, vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_wf_f64m2_rm_m(vbool32_t vm, vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_vv_f64m4_rm_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_vv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_vv_f64m4_rm_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_vf_f64m4_rm_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_vf_f64m4_rm_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_wv_f64m4_rm_m(vbool16_t mask, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_wv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_wv_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_wf_f64m4_rm_m(vbool16_t mask, vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_wf_f64m4_rm_m(vbool16_t vm, vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_vv_f64m8_rm_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_vv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_vv_f64m8_rm_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_vf_f64m8_rm_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_vf_f64m8_rm_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_wv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_wv(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_wv_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_wf_f64m8_rm_m(vbool8_t mask, vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf(mask, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_wf_f64m8_rm_m(vbool8_t vm, vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfwsub\.[ivxfswum.]+\s+} 144 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vle16.c b/auto-generated/gnu-overloaded-tests/vle16.c index 8bb99c4c1..347cdbdce 100644 --- a/auto-generated/gnu-overloaded-tests/vle16.c +++ b/auto-generated/gnu-overloaded-tests/vle16.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vle16_v_f16mf4_m(vbool64_t mask, const float16_t *base, size_t vl) { - return __riscv_vle16(mask, base, vl); +vfloat16mf4_t test_vle16_v_f16mf4_m(vbool64_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vle16(vm, rs1, vl); } -vfloat16mf2_t test_vle16_v_f16mf2_m(vbool32_t mask, const float16_t *base, size_t vl) { - return __riscv_vle16(mask, base, vl); +vfloat16mf2_t test_vle16_v_f16mf2_m(vbool32_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vle16(vm, rs1, vl); } -vfloat16m1_t test_vle16_v_f16m1_m(vbool16_t mask, const float16_t *base, size_t vl) { - return __riscv_vle16(mask, base, vl); +vfloat16m1_t test_vle16_v_f16m1_m(vbool16_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vle16(vm, rs1, vl); } -vfloat16m2_t test_vle16_v_f16m2_m(vbool8_t mask, const float16_t *base, size_t vl) { - return __riscv_vle16(mask, base, vl); +vfloat16m2_t test_vle16_v_f16m2_m(vbool8_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vle16(vm, rs1, vl); } -vfloat16m4_t test_vle16_v_f16m4_m(vbool4_t mask, const float16_t *base, size_t vl) { - return __riscv_vle16(mask, base, vl); +vfloat16m4_t test_vle16_v_f16m4_m(vbool4_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vle16(vm, rs1, vl); } -vfloat16m8_t test_vle16_v_f16m8_m(vbool2_t mask, const float16_t *base, size_t vl) { - return __riscv_vle16(mask, base, vl); +vfloat16m8_t test_vle16_v_f16m8_m(vbool2_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vle16(vm, rs1, vl); } -vint16mf4_t test_vle16_v_i16mf4_m(vbool64_t mask, const int16_t *base, size_t vl) { - return __riscv_vle16(mask, base, vl); +vint16mf4_t test_vle16_v_i16mf4_m(vbool64_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vle16(vm, rs1, vl); } -vint16mf2_t test_vle16_v_i16mf2_m(vbool32_t mask, const int16_t *base, size_t vl) { - return __riscv_vle16(mask, base, vl); +vint16mf2_t test_vle16_v_i16mf2_m(vbool32_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vle16(vm, rs1, vl); } -vint16m1_t test_vle16_v_i16m1_m(vbool16_t mask, const int16_t *base, size_t vl) { - return __riscv_vle16(mask, base, vl); +vint16m1_t test_vle16_v_i16m1_m(vbool16_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vle16(vm, rs1, vl); } -vint16m2_t test_vle16_v_i16m2_m(vbool8_t mask, const int16_t *base, size_t vl) { - return __riscv_vle16(mask, base, vl); +vint16m2_t test_vle16_v_i16m2_m(vbool8_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vle16(vm, rs1, vl); } -vint16m4_t test_vle16_v_i16m4_m(vbool4_t mask, const int16_t *base, size_t vl) { - return __riscv_vle16(mask, base, vl); +vint16m4_t test_vle16_v_i16m4_m(vbool4_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vle16(vm, rs1, vl); } -vint16m8_t test_vle16_v_i16m8_m(vbool2_t mask, const int16_t *base, size_t vl) { - return __riscv_vle16(mask, base, vl); +vint16m8_t test_vle16_v_i16m8_m(vbool2_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vle16(vm, rs1, vl); } -vuint16mf4_t test_vle16_v_u16mf4_m(vbool64_t mask, const uint16_t *base, size_t vl) { - return __riscv_vle16(mask, base, vl); +vuint16mf4_t test_vle16_v_u16mf4_m(vbool64_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vle16(vm, rs1, vl); } -vuint16mf2_t test_vle16_v_u16mf2_m(vbool32_t mask, const uint16_t *base, size_t vl) { - return __riscv_vle16(mask, base, vl); +vuint16mf2_t test_vle16_v_u16mf2_m(vbool32_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vle16(vm, rs1, vl); } -vuint16m1_t test_vle16_v_u16m1_m(vbool16_t mask, const uint16_t *base, size_t vl) { - return __riscv_vle16(mask, base, vl); +vuint16m1_t test_vle16_v_u16m1_m(vbool16_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vle16(vm, rs1, vl); } -vuint16m2_t test_vle16_v_u16m2_m(vbool8_t mask, const uint16_t *base, size_t vl) { - return __riscv_vle16(mask, base, vl); +vuint16m2_t test_vle16_v_u16m2_m(vbool8_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vle16(vm, rs1, vl); } -vuint16m4_t test_vle16_v_u16m4_m(vbool4_t mask, const uint16_t *base, size_t vl) { - return __riscv_vle16(mask, base, vl); +vuint16m4_t test_vle16_v_u16m4_m(vbool4_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vle16(vm, rs1, vl); } -vuint16m8_t test_vle16_v_u16m8_m(vbool2_t mask, const uint16_t *base, size_t vl) { - return __riscv_vle16(mask, base, vl); +vuint16m8_t test_vle16_v_u16m8_m(vbool2_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vle16(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vle16\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vle16ff.c b/auto-generated/gnu-overloaded-tests/vle16ff.c index 6d2970d9d..30fca7fc4 100644 --- a/auto-generated/gnu-overloaded-tests/vle16ff.c +++ b/auto-generated/gnu-overloaded-tests/vle16ff.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vle16ff_v_f16mf4_m(vbool64_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff(mask, base, new_vl, vl); +vfloat16mf4_t test_vle16ff_v_f16mf4_m(vbool64_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff(vm, rs1, new_vl, vl); } -vfloat16mf2_t test_vle16ff_v_f16mf2_m(vbool32_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff(mask, base, new_vl, vl); +vfloat16mf2_t test_vle16ff_v_f16mf2_m(vbool32_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff(vm, rs1, new_vl, vl); } -vfloat16m1_t test_vle16ff_v_f16m1_m(vbool16_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff(mask, base, new_vl, vl); +vfloat16m1_t test_vle16ff_v_f16m1_m(vbool16_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff(vm, rs1, new_vl, vl); } -vfloat16m2_t test_vle16ff_v_f16m2_m(vbool8_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff(mask, base, new_vl, vl); +vfloat16m2_t test_vle16ff_v_f16m2_m(vbool8_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff(vm, rs1, new_vl, vl); } -vfloat16m4_t test_vle16ff_v_f16m4_m(vbool4_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff(mask, base, new_vl, vl); +vfloat16m4_t test_vle16ff_v_f16m4_m(vbool4_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff(vm, rs1, new_vl, vl); } -vfloat16m8_t test_vle16ff_v_f16m8_m(vbool2_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff(mask, base, new_vl, vl); +vfloat16m8_t test_vle16ff_v_f16m8_m(vbool2_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff(vm, rs1, new_vl, vl); } -vint16mf4_t test_vle16ff_v_i16mf4_m(vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff(mask, base, new_vl, vl); +vint16mf4_t test_vle16ff_v_i16mf4_m(vbool64_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff(vm, rs1, new_vl, vl); } -vint16mf2_t test_vle16ff_v_i16mf2_m(vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff(mask, base, new_vl, vl); +vint16mf2_t test_vle16ff_v_i16mf2_m(vbool32_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff(vm, rs1, new_vl, vl); } -vint16m1_t test_vle16ff_v_i16m1_m(vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff(mask, base, new_vl, vl); +vint16m1_t test_vle16ff_v_i16m1_m(vbool16_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff(vm, rs1, new_vl, vl); } -vint16m2_t test_vle16ff_v_i16m2_m(vbool8_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff(mask, base, new_vl, vl); +vint16m2_t test_vle16ff_v_i16m2_m(vbool8_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff(vm, rs1, new_vl, vl); } -vint16m4_t test_vle16ff_v_i16m4_m(vbool4_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff(mask, base, new_vl, vl); +vint16m4_t test_vle16ff_v_i16m4_m(vbool4_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff(vm, rs1, new_vl, vl); } -vint16m8_t test_vle16ff_v_i16m8_m(vbool2_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff(mask, base, new_vl, vl); +vint16m8_t test_vle16ff_v_i16m8_m(vbool2_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff(vm, rs1, new_vl, vl); } -vuint16mf4_t test_vle16ff_v_u16mf4_m(vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff(mask, base, new_vl, vl); +vuint16mf4_t test_vle16ff_v_u16mf4_m(vbool64_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff(vm, rs1, new_vl, vl); } -vuint16mf2_t test_vle16ff_v_u16mf2_m(vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff(mask, base, new_vl, vl); +vuint16mf2_t test_vle16ff_v_u16mf2_m(vbool32_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff(vm, rs1, new_vl, vl); } -vuint16m1_t test_vle16ff_v_u16m1_m(vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff(mask, base, new_vl, vl); +vuint16m1_t test_vle16ff_v_u16m1_m(vbool16_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff(vm, rs1, new_vl, vl); } -vuint16m2_t test_vle16ff_v_u16m2_m(vbool8_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff(mask, base, new_vl, vl); +vuint16m2_t test_vle16ff_v_u16m2_m(vbool8_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff(vm, rs1, new_vl, vl); } -vuint16m4_t test_vle16ff_v_u16m4_m(vbool4_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff(mask, base, new_vl, vl); +vuint16m4_t test_vle16ff_v_u16m4_m(vbool4_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff(vm, rs1, new_vl, vl); } -vuint16m8_t test_vle16ff_v_u16m8_m(vbool2_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff(mask, base, new_vl, vl); +vuint16m8_t test_vle16ff_v_u16m8_m(vbool2_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vle16ff\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vle32.c b/auto-generated/gnu-overloaded-tests/vle32.c index 60564695f..079c3a398 100644 --- a/auto-generated/gnu-overloaded-tests/vle32.c +++ b/auto-generated/gnu-overloaded-tests/vle32.c @@ -6,64 +6,64 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2_t test_vle32_v_f32mf2_m(vbool64_t mask, const float32_t *base, size_t vl) { - return __riscv_vle32(mask, base, vl); +vfloat32mf2_t test_vle32_v_f32mf2_m(vbool64_t vm, const float32_t *rs1, size_t vl) { + return __riscv_vle32(vm, rs1, vl); } -vfloat32m1_t test_vle32_v_f32m1_m(vbool32_t mask, const float32_t *base, size_t vl) { - return __riscv_vle32(mask, base, vl); +vfloat32m1_t test_vle32_v_f32m1_m(vbool32_t vm, const float32_t *rs1, size_t vl) { + return __riscv_vle32(vm, rs1, vl); } -vfloat32m2_t test_vle32_v_f32m2_m(vbool16_t mask, const float32_t *base, size_t vl) { - return __riscv_vle32(mask, base, vl); +vfloat32m2_t test_vle32_v_f32m2_m(vbool16_t vm, const float32_t *rs1, size_t vl) { + return __riscv_vle32(vm, rs1, vl); } -vfloat32m4_t test_vle32_v_f32m4_m(vbool8_t mask, const float32_t *base, size_t vl) { - return __riscv_vle32(mask, base, vl); +vfloat32m4_t test_vle32_v_f32m4_m(vbool8_t vm, const float32_t *rs1, size_t vl) { + return __riscv_vle32(vm, rs1, vl); } -vfloat32m8_t test_vle32_v_f32m8_m(vbool4_t mask, const float32_t *base, size_t vl) { - return __riscv_vle32(mask, base, vl); +vfloat32m8_t test_vle32_v_f32m8_m(vbool4_t vm, const float32_t *rs1, size_t vl) { + return __riscv_vle32(vm, rs1, vl); } -vint32mf2_t test_vle32_v_i32mf2_m(vbool64_t mask, const int32_t *base, size_t vl) { - return __riscv_vle32(mask, base, vl); +vint32mf2_t test_vle32_v_i32mf2_m(vbool64_t vm, const int32_t *rs1, size_t vl) { + return __riscv_vle32(vm, rs1, vl); } -vint32m1_t test_vle32_v_i32m1_m(vbool32_t mask, const int32_t *base, size_t vl) { - return __riscv_vle32(mask, base, vl); +vint32m1_t test_vle32_v_i32m1_m(vbool32_t vm, const int32_t *rs1, size_t vl) { + return __riscv_vle32(vm, rs1, vl); } -vint32m2_t test_vle32_v_i32m2_m(vbool16_t mask, const int32_t *base, size_t vl) { - return __riscv_vle32(mask, base, vl); +vint32m2_t test_vle32_v_i32m2_m(vbool16_t vm, const int32_t *rs1, size_t vl) { + return __riscv_vle32(vm, rs1, vl); } -vint32m4_t test_vle32_v_i32m4_m(vbool8_t mask, const int32_t *base, size_t vl) { - return __riscv_vle32(mask, base, vl); +vint32m4_t test_vle32_v_i32m4_m(vbool8_t vm, const int32_t *rs1, size_t vl) { + return __riscv_vle32(vm, rs1, vl); } -vint32m8_t test_vle32_v_i32m8_m(vbool4_t mask, const int32_t *base, size_t vl) { - return __riscv_vle32(mask, base, vl); +vint32m8_t test_vle32_v_i32m8_m(vbool4_t vm, const int32_t *rs1, size_t vl) { + return __riscv_vle32(vm, rs1, vl); } -vuint32mf2_t test_vle32_v_u32mf2_m(vbool64_t mask, const uint32_t *base, size_t vl) { - return __riscv_vle32(mask, base, vl); +vuint32mf2_t test_vle32_v_u32mf2_m(vbool64_t vm, const uint32_t *rs1, size_t vl) { + return __riscv_vle32(vm, rs1, vl); } -vuint32m1_t test_vle32_v_u32m1_m(vbool32_t mask, const uint32_t *base, size_t vl) { - return __riscv_vle32(mask, base, vl); +vuint32m1_t test_vle32_v_u32m1_m(vbool32_t vm, const uint32_t *rs1, size_t vl) { + return __riscv_vle32(vm, rs1, vl); } -vuint32m2_t test_vle32_v_u32m2_m(vbool16_t mask, const uint32_t *base, size_t vl) { - return __riscv_vle32(mask, base, vl); +vuint32m2_t test_vle32_v_u32m2_m(vbool16_t vm, const uint32_t *rs1, size_t vl) { + return __riscv_vle32(vm, rs1, vl); } -vuint32m4_t test_vle32_v_u32m4_m(vbool8_t mask, const uint32_t *base, size_t vl) { - return __riscv_vle32(mask, base, vl); +vuint32m4_t test_vle32_v_u32m4_m(vbool8_t vm, const uint32_t *rs1, size_t vl) { + return __riscv_vle32(vm, rs1, vl); } -vuint32m8_t test_vle32_v_u32m8_m(vbool4_t mask, const uint32_t *base, size_t vl) { - return __riscv_vle32(mask, base, vl); +vuint32m8_t test_vle32_v_u32m8_m(vbool4_t vm, const uint32_t *rs1, size_t vl) { + return __riscv_vle32(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vle32\.[ivxfswum.]+\s+} 15 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vle32ff.c b/auto-generated/gnu-overloaded-tests/vle32ff.c index 68acd8276..f5d26dc77 100644 --- a/auto-generated/gnu-overloaded-tests/vle32ff.c +++ b/auto-generated/gnu-overloaded-tests/vle32ff.c @@ -6,64 +6,64 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2_t test_vle32ff_v_f32mf2_m(vbool64_t mask, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff(mask, base, new_vl, vl); +vfloat32mf2_t test_vle32ff_v_f32mf2_m(vbool64_t vm, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff(vm, rs1, new_vl, vl); } -vfloat32m1_t test_vle32ff_v_f32m1_m(vbool32_t mask, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff(mask, base, new_vl, vl); +vfloat32m1_t test_vle32ff_v_f32m1_m(vbool32_t vm, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff(vm, rs1, new_vl, vl); } -vfloat32m2_t test_vle32ff_v_f32m2_m(vbool16_t mask, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff(mask, base, new_vl, vl); +vfloat32m2_t test_vle32ff_v_f32m2_m(vbool16_t vm, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff(vm, rs1, new_vl, vl); } -vfloat32m4_t test_vle32ff_v_f32m4_m(vbool8_t mask, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff(mask, base, new_vl, vl); +vfloat32m4_t test_vle32ff_v_f32m4_m(vbool8_t vm, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff(vm, rs1, new_vl, vl); } -vfloat32m8_t test_vle32ff_v_f32m8_m(vbool4_t mask, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff(mask, base, new_vl, vl); +vfloat32m8_t test_vle32ff_v_f32m8_m(vbool4_t vm, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff(vm, rs1, new_vl, vl); } -vint32mf2_t test_vle32ff_v_i32mf2_m(vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff(mask, base, new_vl, vl); +vint32mf2_t test_vle32ff_v_i32mf2_m(vbool64_t vm, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff(vm, rs1, new_vl, vl); } -vint32m1_t test_vle32ff_v_i32m1_m(vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff(mask, base, new_vl, vl); +vint32m1_t test_vle32ff_v_i32m1_m(vbool32_t vm, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff(vm, rs1, new_vl, vl); } -vint32m2_t test_vle32ff_v_i32m2_m(vbool16_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff(mask, base, new_vl, vl); +vint32m2_t test_vle32ff_v_i32m2_m(vbool16_t vm, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff(vm, rs1, new_vl, vl); } -vint32m4_t test_vle32ff_v_i32m4_m(vbool8_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff(mask, base, new_vl, vl); +vint32m4_t test_vle32ff_v_i32m4_m(vbool8_t vm, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff(vm, rs1, new_vl, vl); } -vint32m8_t test_vle32ff_v_i32m8_m(vbool4_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff(mask, base, new_vl, vl); +vint32m8_t test_vle32ff_v_i32m8_m(vbool4_t vm, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff(vm, rs1, new_vl, vl); } -vuint32mf2_t test_vle32ff_v_u32mf2_m(vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff(mask, base, new_vl, vl); +vuint32mf2_t test_vle32ff_v_u32mf2_m(vbool64_t vm, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff(vm, rs1, new_vl, vl); } -vuint32m1_t test_vle32ff_v_u32m1_m(vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff(mask, base, new_vl, vl); +vuint32m1_t test_vle32ff_v_u32m1_m(vbool32_t vm, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff(vm, rs1, new_vl, vl); } -vuint32m2_t test_vle32ff_v_u32m2_m(vbool16_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff(mask, base, new_vl, vl); +vuint32m2_t test_vle32ff_v_u32m2_m(vbool16_t vm, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff(vm, rs1, new_vl, vl); } -vuint32m4_t test_vle32ff_v_u32m4_m(vbool8_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff(mask, base, new_vl, vl); +vuint32m4_t test_vle32ff_v_u32m4_m(vbool8_t vm, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff(vm, rs1, new_vl, vl); } -vuint32m8_t test_vle32ff_v_u32m8_m(vbool4_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff(mask, base, new_vl, vl); +vuint32m8_t test_vle32ff_v_u32m8_m(vbool4_t vm, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vle32ff\.[ivxfswum.]+\s+} 15 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vle64.c b/auto-generated/gnu-overloaded-tests/vle64.c index a41ef43a3..9ccf5c6f0 100644 --- a/auto-generated/gnu-overloaded-tests/vle64.c +++ b/auto-generated/gnu-overloaded-tests/vle64.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1_t test_vle64_v_f64m1_m(vbool64_t mask, const float64_t *base, size_t vl) { - return __riscv_vle64(mask, base, vl); +vfloat64m1_t test_vle64_v_f64m1_m(vbool64_t vm, const float64_t *rs1, size_t vl) { + return __riscv_vle64(vm, rs1, vl); } -vfloat64m2_t test_vle64_v_f64m2_m(vbool32_t mask, const float64_t *base, size_t vl) { - return __riscv_vle64(mask, base, vl); +vfloat64m2_t test_vle64_v_f64m2_m(vbool32_t vm, const float64_t *rs1, size_t vl) { + return __riscv_vle64(vm, rs1, vl); } -vfloat64m4_t test_vle64_v_f64m4_m(vbool16_t mask, const float64_t *base, size_t vl) { - return __riscv_vle64(mask, base, vl); +vfloat64m4_t test_vle64_v_f64m4_m(vbool16_t vm, const float64_t *rs1, size_t vl) { + return __riscv_vle64(vm, rs1, vl); } -vfloat64m8_t test_vle64_v_f64m8_m(vbool8_t mask, const float64_t *base, size_t vl) { - return __riscv_vle64(mask, base, vl); +vfloat64m8_t test_vle64_v_f64m8_m(vbool8_t vm, const float64_t *rs1, size_t vl) { + return __riscv_vle64(vm, rs1, vl); } -vint64m1_t test_vle64_v_i64m1_m(vbool64_t mask, const int64_t *base, size_t vl) { - return __riscv_vle64(mask, base, vl); +vint64m1_t test_vle64_v_i64m1_m(vbool64_t vm, const int64_t *rs1, size_t vl) { + return __riscv_vle64(vm, rs1, vl); } -vint64m2_t test_vle64_v_i64m2_m(vbool32_t mask, const int64_t *base, size_t vl) { - return __riscv_vle64(mask, base, vl); +vint64m2_t test_vle64_v_i64m2_m(vbool32_t vm, const int64_t *rs1, size_t vl) { + return __riscv_vle64(vm, rs1, vl); } -vint64m4_t test_vle64_v_i64m4_m(vbool16_t mask, const int64_t *base, size_t vl) { - return __riscv_vle64(mask, base, vl); +vint64m4_t test_vle64_v_i64m4_m(vbool16_t vm, const int64_t *rs1, size_t vl) { + return __riscv_vle64(vm, rs1, vl); } -vint64m8_t test_vle64_v_i64m8_m(vbool8_t mask, const int64_t *base, size_t vl) { - return __riscv_vle64(mask, base, vl); +vint64m8_t test_vle64_v_i64m8_m(vbool8_t vm, const int64_t *rs1, size_t vl) { + return __riscv_vle64(vm, rs1, vl); } -vuint64m1_t test_vle64_v_u64m1_m(vbool64_t mask, const uint64_t *base, size_t vl) { - return __riscv_vle64(mask, base, vl); +vuint64m1_t test_vle64_v_u64m1_m(vbool64_t vm, const uint64_t *rs1, size_t vl) { + return __riscv_vle64(vm, rs1, vl); } -vuint64m2_t test_vle64_v_u64m2_m(vbool32_t mask, const uint64_t *base, size_t vl) { - return __riscv_vle64(mask, base, vl); +vuint64m2_t test_vle64_v_u64m2_m(vbool32_t vm, const uint64_t *rs1, size_t vl) { + return __riscv_vle64(vm, rs1, vl); } -vuint64m4_t test_vle64_v_u64m4_m(vbool16_t mask, const uint64_t *base, size_t vl) { - return __riscv_vle64(mask, base, vl); +vuint64m4_t test_vle64_v_u64m4_m(vbool16_t vm, const uint64_t *rs1, size_t vl) { + return __riscv_vle64(vm, rs1, vl); } -vuint64m8_t test_vle64_v_u64m8_m(vbool8_t mask, const uint64_t *base, size_t vl) { - return __riscv_vle64(mask, base, vl); +vuint64m8_t test_vle64_v_u64m8_m(vbool8_t vm, const uint64_t *rs1, size_t vl) { + return __riscv_vle64(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vle64\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vle64ff.c b/auto-generated/gnu-overloaded-tests/vle64ff.c index 9e00bf942..33077b3d8 100644 --- a/auto-generated/gnu-overloaded-tests/vle64ff.c +++ b/auto-generated/gnu-overloaded-tests/vle64ff.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1_t test_vle64ff_v_f64m1_m(vbool64_t mask, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff(mask, base, new_vl, vl); +vfloat64m1_t test_vle64ff_v_f64m1_m(vbool64_t vm, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff(vm, rs1, new_vl, vl); } -vfloat64m2_t test_vle64ff_v_f64m2_m(vbool32_t mask, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff(mask, base, new_vl, vl); +vfloat64m2_t test_vle64ff_v_f64m2_m(vbool32_t vm, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff(vm, rs1, new_vl, vl); } -vfloat64m4_t test_vle64ff_v_f64m4_m(vbool16_t mask, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff(mask, base, new_vl, vl); +vfloat64m4_t test_vle64ff_v_f64m4_m(vbool16_t vm, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff(vm, rs1, new_vl, vl); } -vfloat64m8_t test_vle64ff_v_f64m8_m(vbool8_t mask, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff(mask, base, new_vl, vl); +vfloat64m8_t test_vle64ff_v_f64m8_m(vbool8_t vm, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff(vm, rs1, new_vl, vl); } -vint64m1_t test_vle64ff_v_i64m1_m(vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff(mask, base, new_vl, vl); +vint64m1_t test_vle64ff_v_i64m1_m(vbool64_t vm, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff(vm, rs1, new_vl, vl); } -vint64m2_t test_vle64ff_v_i64m2_m(vbool32_t mask, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff(mask, base, new_vl, vl); +vint64m2_t test_vle64ff_v_i64m2_m(vbool32_t vm, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff(vm, rs1, new_vl, vl); } -vint64m4_t test_vle64ff_v_i64m4_m(vbool16_t mask, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff(mask, base, new_vl, vl); +vint64m4_t test_vle64ff_v_i64m4_m(vbool16_t vm, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff(vm, rs1, new_vl, vl); } -vint64m8_t test_vle64ff_v_i64m8_m(vbool8_t mask, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff(mask, base, new_vl, vl); +vint64m8_t test_vle64ff_v_i64m8_m(vbool8_t vm, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff(vm, rs1, new_vl, vl); } -vuint64m1_t test_vle64ff_v_u64m1_m(vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff(mask, base, new_vl, vl); +vuint64m1_t test_vle64ff_v_u64m1_m(vbool64_t vm, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff(vm, rs1, new_vl, vl); } -vuint64m2_t test_vle64ff_v_u64m2_m(vbool32_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff(mask, base, new_vl, vl); +vuint64m2_t test_vle64ff_v_u64m2_m(vbool32_t vm, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff(vm, rs1, new_vl, vl); } -vuint64m4_t test_vle64ff_v_u64m4_m(vbool16_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff(mask, base, new_vl, vl); +vuint64m4_t test_vle64ff_v_u64m4_m(vbool16_t vm, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff(vm, rs1, new_vl, vl); } -vuint64m8_t test_vle64ff_v_u64m8_m(vbool8_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff(mask, base, new_vl, vl); +vuint64m8_t test_vle64ff_v_u64m8_m(vbool8_t vm, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vle64ff\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vle8.c b/auto-generated/gnu-overloaded-tests/vle8.c index 7dc02d6f3..e4c2e520d 100644 --- a/auto-generated/gnu-overloaded-tests/vle8.c +++ b/auto-generated/gnu-overloaded-tests/vle8.c @@ -6,60 +6,60 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vle8_v_i8mf8_m(vbool64_t mask, const int8_t *base, size_t vl) { - return __riscv_vle8(mask, base, vl); +vint8mf8_t test_vle8_v_i8mf8_m(vbool64_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vle8(vm, rs1, vl); } -vint8mf4_t test_vle8_v_i8mf4_m(vbool32_t mask, const int8_t *base, size_t vl) { - return __riscv_vle8(mask, base, vl); +vint8mf4_t test_vle8_v_i8mf4_m(vbool32_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vle8(vm, rs1, vl); } -vint8mf2_t test_vle8_v_i8mf2_m(vbool16_t mask, const int8_t *base, size_t vl) { - return __riscv_vle8(mask, base, vl); +vint8mf2_t test_vle8_v_i8mf2_m(vbool16_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vle8(vm, rs1, vl); } -vint8m1_t test_vle8_v_i8m1_m(vbool8_t mask, const int8_t *base, size_t vl) { - return __riscv_vle8(mask, base, vl); +vint8m1_t test_vle8_v_i8m1_m(vbool8_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vle8(vm, rs1, vl); } -vint8m2_t test_vle8_v_i8m2_m(vbool4_t mask, const int8_t *base, size_t vl) { - return __riscv_vle8(mask, base, vl); +vint8m2_t test_vle8_v_i8m2_m(vbool4_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vle8(vm, rs1, vl); } -vint8m4_t test_vle8_v_i8m4_m(vbool2_t mask, const int8_t *base, size_t vl) { - return __riscv_vle8(mask, base, vl); +vint8m4_t test_vle8_v_i8m4_m(vbool2_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vle8(vm, rs1, vl); } -vint8m8_t test_vle8_v_i8m8_m(vbool1_t mask, const int8_t *base, size_t vl) { - return __riscv_vle8(mask, base, vl); +vint8m8_t test_vle8_v_i8m8_m(vbool1_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vle8(vm, rs1, vl); } -vuint8mf8_t test_vle8_v_u8mf8_m(vbool64_t mask, const uint8_t *base, size_t vl) { - return __riscv_vle8(mask, base, vl); +vuint8mf8_t test_vle8_v_u8mf8_m(vbool64_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vle8(vm, rs1, vl); } -vuint8mf4_t test_vle8_v_u8mf4_m(vbool32_t mask, const uint8_t *base, size_t vl) { - return __riscv_vle8(mask, base, vl); +vuint8mf4_t test_vle8_v_u8mf4_m(vbool32_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vle8(vm, rs1, vl); } -vuint8mf2_t test_vle8_v_u8mf2_m(vbool16_t mask, const uint8_t *base, size_t vl) { - return __riscv_vle8(mask, base, vl); +vuint8mf2_t test_vle8_v_u8mf2_m(vbool16_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vle8(vm, rs1, vl); } -vuint8m1_t test_vle8_v_u8m1_m(vbool8_t mask, const uint8_t *base, size_t vl) { - return __riscv_vle8(mask, base, vl); +vuint8m1_t test_vle8_v_u8m1_m(vbool8_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vle8(vm, rs1, vl); } -vuint8m2_t test_vle8_v_u8m2_m(vbool4_t mask, const uint8_t *base, size_t vl) { - return __riscv_vle8(mask, base, vl); +vuint8m2_t test_vle8_v_u8m2_m(vbool4_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vle8(vm, rs1, vl); } -vuint8m4_t test_vle8_v_u8m4_m(vbool2_t mask, const uint8_t *base, size_t vl) { - return __riscv_vle8(mask, base, vl); +vuint8m4_t test_vle8_v_u8m4_m(vbool2_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vle8(vm, rs1, vl); } -vuint8m8_t test_vle8_v_u8m8_m(vbool1_t mask, const uint8_t *base, size_t vl) { - return __riscv_vle8(mask, base, vl); +vuint8m8_t test_vle8_v_u8m8_m(vbool1_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vle8(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vle8\.[ivxfswum.]+\s+} 14 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vle8ff.c b/auto-generated/gnu-overloaded-tests/vle8ff.c index 3b7b4f817..841e14135 100644 --- a/auto-generated/gnu-overloaded-tests/vle8ff.c +++ b/auto-generated/gnu-overloaded-tests/vle8ff.c @@ -6,60 +6,60 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vle8ff_v_i8mf8_m(vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff(mask, base, new_vl, vl); +vint8mf8_t test_vle8ff_v_i8mf8_m(vbool64_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff(vm, rs1, new_vl, vl); } -vint8mf4_t test_vle8ff_v_i8mf4_m(vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff(mask, base, new_vl, vl); +vint8mf4_t test_vle8ff_v_i8mf4_m(vbool32_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff(vm, rs1, new_vl, vl); } -vint8mf2_t test_vle8ff_v_i8mf2_m(vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff(mask, base, new_vl, vl); +vint8mf2_t test_vle8ff_v_i8mf2_m(vbool16_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff(vm, rs1, new_vl, vl); } -vint8m1_t test_vle8ff_v_i8m1_m(vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff(mask, base, new_vl, vl); +vint8m1_t test_vle8ff_v_i8m1_m(vbool8_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff(vm, rs1, new_vl, vl); } -vint8m2_t test_vle8ff_v_i8m2_m(vbool4_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff(mask, base, new_vl, vl); +vint8m2_t test_vle8ff_v_i8m2_m(vbool4_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff(vm, rs1, new_vl, vl); } -vint8m4_t test_vle8ff_v_i8m4_m(vbool2_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff(mask, base, new_vl, vl); +vint8m4_t test_vle8ff_v_i8m4_m(vbool2_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff(vm, rs1, new_vl, vl); } -vint8m8_t test_vle8ff_v_i8m8_m(vbool1_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff(mask, base, new_vl, vl); +vint8m8_t test_vle8ff_v_i8m8_m(vbool1_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff(vm, rs1, new_vl, vl); } -vuint8mf8_t test_vle8ff_v_u8mf8_m(vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff(mask, base, new_vl, vl); +vuint8mf8_t test_vle8ff_v_u8mf8_m(vbool64_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff(vm, rs1, new_vl, vl); } -vuint8mf4_t test_vle8ff_v_u8mf4_m(vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff(mask, base, new_vl, vl); +vuint8mf4_t test_vle8ff_v_u8mf4_m(vbool32_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff(vm, rs1, new_vl, vl); } -vuint8mf2_t test_vle8ff_v_u8mf2_m(vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff(mask, base, new_vl, vl); +vuint8mf2_t test_vle8ff_v_u8mf2_m(vbool16_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff(vm, rs1, new_vl, vl); } -vuint8m1_t test_vle8ff_v_u8m1_m(vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff(mask, base, new_vl, vl); +vuint8m1_t test_vle8ff_v_u8m1_m(vbool8_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff(vm, rs1, new_vl, vl); } -vuint8m2_t test_vle8ff_v_u8m2_m(vbool4_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff(mask, base, new_vl, vl); +vuint8m2_t test_vle8ff_v_u8m2_m(vbool4_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff(vm, rs1, new_vl, vl); } -vuint8m4_t test_vle8ff_v_u8m4_m(vbool2_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff(mask, base, new_vl, vl); +vuint8m4_t test_vle8ff_v_u8m4_m(vbool2_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff(vm, rs1, new_vl, vl); } -vuint8m8_t test_vle8ff_v_u8m8_m(vbool1_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff(mask, base, new_vl, vl); +vuint8m8_t test_vle8ff_v_u8m8_m(vbool1_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vle8ff\.[ivxfswum.]+\s+} 14 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlmul_ext_v.c b/auto-generated/gnu-overloaded-tests/vlmul_ext_v.c index 9e8366f04..37092a9cc 100644 --- a/auto-generated/gnu-overloaded-tests/vlmul_ext_v.c +++ b/auto-generated/gnu-overloaded-tests/vlmul_ext_v.c @@ -6,544 +6,544 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf2_t test_vlmul_ext_v_f16mf4_f16mf2(vfloat16mf4_t op1) { - return __riscv_vlmul_ext_f16mf2(op1); +vfloat16mf2_t test_vlmul_ext_v_f16mf4_f16mf2(vfloat16mf4_t value) { + return __riscv_vlmul_ext_f16mf2(value); } -vfloat16m1_t test_vlmul_ext_v_f16mf4_f16m1(vfloat16mf4_t op1) { - return __riscv_vlmul_ext_f16m1(op1); +vfloat16m1_t test_vlmul_ext_v_f16mf4_f16m1(vfloat16mf4_t value) { + return __riscv_vlmul_ext_f16m1(value); } -vfloat16m2_t test_vlmul_ext_v_f16mf4_f16m2(vfloat16mf4_t op1) { - return __riscv_vlmul_ext_f16m2(op1); +vfloat16m2_t test_vlmul_ext_v_f16mf4_f16m2(vfloat16mf4_t value) { + return __riscv_vlmul_ext_f16m2(value); } -vfloat16m4_t test_vlmul_ext_v_f16mf4_f16m4(vfloat16mf4_t op1) { - return __riscv_vlmul_ext_f16m4(op1); +vfloat16m4_t test_vlmul_ext_v_f16mf4_f16m4(vfloat16mf4_t value) { + return __riscv_vlmul_ext_f16m4(value); } -vfloat16m8_t test_vlmul_ext_v_f16mf4_f16m8(vfloat16mf4_t op1) { - return __riscv_vlmul_ext_f16m8(op1); +vfloat16m8_t test_vlmul_ext_v_f16mf4_f16m8(vfloat16mf4_t value) { + return __riscv_vlmul_ext_f16m8(value); } -vfloat16m1_t test_vlmul_ext_v_f16mf2_f16m1(vfloat16mf2_t op1) { - return __riscv_vlmul_ext_f16m1(op1); +vfloat16m1_t test_vlmul_ext_v_f16mf2_f16m1(vfloat16mf2_t value) { + return __riscv_vlmul_ext_f16m1(value); } -vfloat16m2_t test_vlmul_ext_v_f16mf2_f16m2(vfloat16mf2_t op1) { - return __riscv_vlmul_ext_f16m2(op1); +vfloat16m2_t test_vlmul_ext_v_f16mf2_f16m2(vfloat16mf2_t value) { + return __riscv_vlmul_ext_f16m2(value); } -vfloat16m4_t test_vlmul_ext_v_f16mf2_f16m4(vfloat16mf2_t op1) { - return __riscv_vlmul_ext_f16m4(op1); +vfloat16m4_t test_vlmul_ext_v_f16mf2_f16m4(vfloat16mf2_t value) { + return __riscv_vlmul_ext_f16m4(value); } -vfloat16m8_t test_vlmul_ext_v_f16mf2_f16m8(vfloat16mf2_t op1) { - return __riscv_vlmul_ext_f16m8(op1); +vfloat16m8_t test_vlmul_ext_v_f16mf2_f16m8(vfloat16mf2_t value) { + return __riscv_vlmul_ext_f16m8(value); } -vfloat16m2_t test_vlmul_ext_v_f16m1_f16m2(vfloat16m1_t op1) { - return __riscv_vlmul_ext_f16m2(op1); +vfloat16m2_t test_vlmul_ext_v_f16m1_f16m2(vfloat16m1_t value) { + return __riscv_vlmul_ext_f16m2(value); } -vfloat16m4_t test_vlmul_ext_v_f16m1_f16m4(vfloat16m1_t op1) { - return __riscv_vlmul_ext_f16m4(op1); +vfloat16m4_t test_vlmul_ext_v_f16m1_f16m4(vfloat16m1_t value) { + return __riscv_vlmul_ext_f16m4(value); } -vfloat16m8_t test_vlmul_ext_v_f16m1_f16m8(vfloat16m1_t op1) { - return __riscv_vlmul_ext_f16m8(op1); +vfloat16m8_t test_vlmul_ext_v_f16m1_f16m8(vfloat16m1_t value) { + return __riscv_vlmul_ext_f16m8(value); } -vfloat16m4_t test_vlmul_ext_v_f16m2_f16m4(vfloat16m2_t op1) { - return __riscv_vlmul_ext_f16m4(op1); +vfloat16m4_t test_vlmul_ext_v_f16m2_f16m4(vfloat16m2_t value) { + return __riscv_vlmul_ext_f16m4(value); } -vfloat16m8_t test_vlmul_ext_v_f16m2_f16m8(vfloat16m2_t op1) { - return __riscv_vlmul_ext_f16m8(op1); +vfloat16m8_t test_vlmul_ext_v_f16m2_f16m8(vfloat16m2_t value) { + return __riscv_vlmul_ext_f16m8(value); } -vfloat16m8_t test_vlmul_ext_v_f16m4_f16m8(vfloat16m4_t op1) { - return __riscv_vlmul_ext_f16m8(op1); +vfloat16m8_t test_vlmul_ext_v_f16m4_f16m8(vfloat16m4_t value) { + return __riscv_vlmul_ext_f16m8(value); } -vfloat32m1_t test_vlmul_ext_v_f32mf2_f32m1(vfloat32mf2_t op1) { - return __riscv_vlmul_ext_f32m1(op1); +vfloat32m1_t test_vlmul_ext_v_f32mf2_f32m1(vfloat32mf2_t value) { + return __riscv_vlmul_ext_f32m1(value); } -vfloat32m2_t test_vlmul_ext_v_f32mf2_f32m2(vfloat32mf2_t op1) { - return __riscv_vlmul_ext_f32m2(op1); +vfloat32m2_t test_vlmul_ext_v_f32mf2_f32m2(vfloat32mf2_t value) { + return __riscv_vlmul_ext_f32m2(value); } -vfloat32m4_t test_vlmul_ext_v_f32mf2_f32m4(vfloat32mf2_t op1) { - return __riscv_vlmul_ext_f32m4(op1); +vfloat32m4_t test_vlmul_ext_v_f32mf2_f32m4(vfloat32mf2_t value) { + return __riscv_vlmul_ext_f32m4(value); } -vfloat32m8_t test_vlmul_ext_v_f32mf2_f32m8(vfloat32mf2_t op1) { - return __riscv_vlmul_ext_f32m8(op1); +vfloat32m8_t test_vlmul_ext_v_f32mf2_f32m8(vfloat32mf2_t value) { + return __riscv_vlmul_ext_f32m8(value); } -vfloat32m2_t test_vlmul_ext_v_f32m1_f32m2(vfloat32m1_t op1) { - return __riscv_vlmul_ext_f32m2(op1); +vfloat32m2_t test_vlmul_ext_v_f32m1_f32m2(vfloat32m1_t value) { + return __riscv_vlmul_ext_f32m2(value); } -vfloat32m4_t test_vlmul_ext_v_f32m1_f32m4(vfloat32m1_t op1) { - return __riscv_vlmul_ext_f32m4(op1); +vfloat32m4_t test_vlmul_ext_v_f32m1_f32m4(vfloat32m1_t value) { + return __riscv_vlmul_ext_f32m4(value); } -vfloat32m8_t test_vlmul_ext_v_f32m1_f32m8(vfloat32m1_t op1) { - return __riscv_vlmul_ext_f32m8(op1); +vfloat32m8_t test_vlmul_ext_v_f32m1_f32m8(vfloat32m1_t value) { + return __riscv_vlmul_ext_f32m8(value); } -vfloat32m4_t test_vlmul_ext_v_f32m2_f32m4(vfloat32m2_t op1) { - return __riscv_vlmul_ext_f32m4(op1); +vfloat32m4_t test_vlmul_ext_v_f32m2_f32m4(vfloat32m2_t value) { + return __riscv_vlmul_ext_f32m4(value); } -vfloat32m8_t test_vlmul_ext_v_f32m2_f32m8(vfloat32m2_t op1) { - return __riscv_vlmul_ext_f32m8(op1); +vfloat32m8_t test_vlmul_ext_v_f32m2_f32m8(vfloat32m2_t value) { + return __riscv_vlmul_ext_f32m8(value); } -vfloat32m8_t test_vlmul_ext_v_f32m4_f32m8(vfloat32m4_t op1) { - return __riscv_vlmul_ext_f32m8(op1); +vfloat32m8_t test_vlmul_ext_v_f32m4_f32m8(vfloat32m4_t value) { + return __riscv_vlmul_ext_f32m8(value); } -vfloat64m2_t test_vlmul_ext_v_f64m1_f64m2(vfloat64m1_t op1) { - return __riscv_vlmul_ext_f64m2(op1); +vfloat64m2_t test_vlmul_ext_v_f64m1_f64m2(vfloat64m1_t value) { + return __riscv_vlmul_ext_f64m2(value); } -vfloat64m4_t test_vlmul_ext_v_f64m1_f64m4(vfloat64m1_t op1) { - return __riscv_vlmul_ext_f64m4(op1); +vfloat64m4_t test_vlmul_ext_v_f64m1_f64m4(vfloat64m1_t value) { + return __riscv_vlmul_ext_f64m4(value); } -vfloat64m8_t test_vlmul_ext_v_f64m1_f64m8(vfloat64m1_t op1) { - return __riscv_vlmul_ext_f64m8(op1); +vfloat64m8_t test_vlmul_ext_v_f64m1_f64m8(vfloat64m1_t value) { + return __riscv_vlmul_ext_f64m8(value); } -vfloat64m4_t test_vlmul_ext_v_f64m2_f64m4(vfloat64m2_t op1) { - return __riscv_vlmul_ext_f64m4(op1); +vfloat64m4_t test_vlmul_ext_v_f64m2_f64m4(vfloat64m2_t value) { + return __riscv_vlmul_ext_f64m4(value); } -vfloat64m8_t test_vlmul_ext_v_f64m2_f64m8(vfloat64m2_t op1) { - return __riscv_vlmul_ext_f64m8(op1); +vfloat64m8_t test_vlmul_ext_v_f64m2_f64m8(vfloat64m2_t value) { + return __riscv_vlmul_ext_f64m8(value); } -vfloat64m8_t test_vlmul_ext_v_f64m4_f64m8(vfloat64m4_t op1) { - return __riscv_vlmul_ext_f64m8(op1); +vfloat64m8_t test_vlmul_ext_v_f64m4_f64m8(vfloat64m4_t value) { + return __riscv_vlmul_ext_f64m8(value); } -vint8mf4_t test_vlmul_ext_v_i8mf8_i8mf4(vint8mf8_t op1) { - return __riscv_vlmul_ext_i8mf4(op1); +vint8mf4_t test_vlmul_ext_v_i8mf8_i8mf4(vint8mf8_t value) { + return __riscv_vlmul_ext_i8mf4(value); } -vint8mf2_t test_vlmul_ext_v_i8mf8_i8mf2(vint8mf8_t op1) { - return __riscv_vlmul_ext_i8mf2(op1); +vint8mf2_t test_vlmul_ext_v_i8mf8_i8mf2(vint8mf8_t value) { + return __riscv_vlmul_ext_i8mf2(value); } -vint8m1_t test_vlmul_ext_v_i8mf8_i8m1(vint8mf8_t op1) { - return __riscv_vlmul_ext_i8m1(op1); +vint8m1_t test_vlmul_ext_v_i8mf8_i8m1(vint8mf8_t value) { + return __riscv_vlmul_ext_i8m1(value); } -vint8m2_t test_vlmul_ext_v_i8mf8_i8m2(vint8mf8_t op1) { - return __riscv_vlmul_ext_i8m2(op1); +vint8m2_t test_vlmul_ext_v_i8mf8_i8m2(vint8mf8_t value) { + return __riscv_vlmul_ext_i8m2(value); } -vint8m4_t test_vlmul_ext_v_i8mf8_i8m4(vint8mf8_t op1) { - return __riscv_vlmul_ext_i8m4(op1); +vint8m4_t test_vlmul_ext_v_i8mf8_i8m4(vint8mf8_t value) { + return __riscv_vlmul_ext_i8m4(value); } -vint8m8_t test_vlmul_ext_v_i8mf8_i8m8(vint8mf8_t op1) { - return __riscv_vlmul_ext_i8m8(op1); +vint8m8_t test_vlmul_ext_v_i8mf8_i8m8(vint8mf8_t value) { + return __riscv_vlmul_ext_i8m8(value); } -vint8mf2_t test_vlmul_ext_v_i8mf4_i8mf2(vint8mf4_t op1) { - return __riscv_vlmul_ext_i8mf2(op1); +vint8mf2_t test_vlmul_ext_v_i8mf4_i8mf2(vint8mf4_t value) { + return __riscv_vlmul_ext_i8mf2(value); } -vint8m1_t test_vlmul_ext_v_i8mf4_i8m1(vint8mf4_t op1) { - return __riscv_vlmul_ext_i8m1(op1); +vint8m1_t test_vlmul_ext_v_i8mf4_i8m1(vint8mf4_t value) { + return __riscv_vlmul_ext_i8m1(value); } -vint8m2_t test_vlmul_ext_v_i8mf4_i8m2(vint8mf4_t op1) { - return __riscv_vlmul_ext_i8m2(op1); +vint8m2_t test_vlmul_ext_v_i8mf4_i8m2(vint8mf4_t value) { + return __riscv_vlmul_ext_i8m2(value); } -vint8m4_t test_vlmul_ext_v_i8mf4_i8m4(vint8mf4_t op1) { - return __riscv_vlmul_ext_i8m4(op1); +vint8m4_t test_vlmul_ext_v_i8mf4_i8m4(vint8mf4_t value) { + return __riscv_vlmul_ext_i8m4(value); } -vint8m8_t test_vlmul_ext_v_i8mf4_i8m8(vint8mf4_t op1) { - return __riscv_vlmul_ext_i8m8(op1); +vint8m8_t test_vlmul_ext_v_i8mf4_i8m8(vint8mf4_t value) { + return __riscv_vlmul_ext_i8m8(value); } -vint8m1_t test_vlmul_ext_v_i8mf2_i8m1(vint8mf2_t op1) { - return __riscv_vlmul_ext_i8m1(op1); +vint8m1_t test_vlmul_ext_v_i8mf2_i8m1(vint8mf2_t value) { + return __riscv_vlmul_ext_i8m1(value); } -vint8m2_t test_vlmul_ext_v_i8mf2_i8m2(vint8mf2_t op1) { - return __riscv_vlmul_ext_i8m2(op1); +vint8m2_t test_vlmul_ext_v_i8mf2_i8m2(vint8mf2_t value) { + return __riscv_vlmul_ext_i8m2(value); } -vint8m4_t test_vlmul_ext_v_i8mf2_i8m4(vint8mf2_t op1) { - return __riscv_vlmul_ext_i8m4(op1); +vint8m4_t test_vlmul_ext_v_i8mf2_i8m4(vint8mf2_t value) { + return __riscv_vlmul_ext_i8m4(value); } -vint8m8_t test_vlmul_ext_v_i8mf2_i8m8(vint8mf2_t op1) { - return __riscv_vlmul_ext_i8m8(op1); +vint8m8_t test_vlmul_ext_v_i8mf2_i8m8(vint8mf2_t value) { + return __riscv_vlmul_ext_i8m8(value); } -vint8m2_t test_vlmul_ext_v_i8m1_i8m2(vint8m1_t op1) { - return __riscv_vlmul_ext_i8m2(op1); +vint8m2_t test_vlmul_ext_v_i8m1_i8m2(vint8m1_t value) { + return __riscv_vlmul_ext_i8m2(value); } -vint8m4_t test_vlmul_ext_v_i8m1_i8m4(vint8m1_t op1) { - return __riscv_vlmul_ext_i8m4(op1); +vint8m4_t test_vlmul_ext_v_i8m1_i8m4(vint8m1_t value) { + return __riscv_vlmul_ext_i8m4(value); } -vint8m8_t test_vlmul_ext_v_i8m1_i8m8(vint8m1_t op1) { - return __riscv_vlmul_ext_i8m8(op1); +vint8m8_t test_vlmul_ext_v_i8m1_i8m8(vint8m1_t value) { + return __riscv_vlmul_ext_i8m8(value); } -vint8m4_t test_vlmul_ext_v_i8m2_i8m4(vint8m2_t op1) { - return __riscv_vlmul_ext_i8m4(op1); +vint8m4_t test_vlmul_ext_v_i8m2_i8m4(vint8m2_t value) { + return __riscv_vlmul_ext_i8m4(value); } -vint8m8_t test_vlmul_ext_v_i8m2_i8m8(vint8m2_t op1) { - return __riscv_vlmul_ext_i8m8(op1); +vint8m8_t test_vlmul_ext_v_i8m2_i8m8(vint8m2_t value) { + return __riscv_vlmul_ext_i8m8(value); } -vint8m8_t test_vlmul_ext_v_i8m4_i8m8(vint8m4_t op1) { - return __riscv_vlmul_ext_i8m8(op1); +vint8m8_t test_vlmul_ext_v_i8m4_i8m8(vint8m4_t value) { + return __riscv_vlmul_ext_i8m8(value); } -vint16mf2_t test_vlmul_ext_v_i16mf4_i16mf2(vint16mf4_t op1) { - return __riscv_vlmul_ext_i16mf2(op1); +vint16mf2_t test_vlmul_ext_v_i16mf4_i16mf2(vint16mf4_t value) { + return __riscv_vlmul_ext_i16mf2(value); } -vint16m1_t test_vlmul_ext_v_i16mf4_i16m1(vint16mf4_t op1) { - return __riscv_vlmul_ext_i16m1(op1); +vint16m1_t test_vlmul_ext_v_i16mf4_i16m1(vint16mf4_t value) { + return __riscv_vlmul_ext_i16m1(value); } -vint16m2_t test_vlmul_ext_v_i16mf4_i16m2(vint16mf4_t op1) { - return __riscv_vlmul_ext_i16m2(op1); +vint16m2_t test_vlmul_ext_v_i16mf4_i16m2(vint16mf4_t value) { + return __riscv_vlmul_ext_i16m2(value); } -vint16m4_t test_vlmul_ext_v_i16mf4_i16m4(vint16mf4_t op1) { - return __riscv_vlmul_ext_i16m4(op1); +vint16m4_t test_vlmul_ext_v_i16mf4_i16m4(vint16mf4_t value) { + return __riscv_vlmul_ext_i16m4(value); } -vint16m8_t test_vlmul_ext_v_i16mf4_i16m8(vint16mf4_t op1) { - return __riscv_vlmul_ext_i16m8(op1); +vint16m8_t test_vlmul_ext_v_i16mf4_i16m8(vint16mf4_t value) { + return __riscv_vlmul_ext_i16m8(value); } -vint16m1_t test_vlmul_ext_v_i16mf2_i16m1(vint16mf2_t op1) { - return __riscv_vlmul_ext_i16m1(op1); +vint16m1_t test_vlmul_ext_v_i16mf2_i16m1(vint16mf2_t value) { + return __riscv_vlmul_ext_i16m1(value); } -vint16m2_t test_vlmul_ext_v_i16mf2_i16m2(vint16mf2_t op1) { - return __riscv_vlmul_ext_i16m2(op1); +vint16m2_t test_vlmul_ext_v_i16mf2_i16m2(vint16mf2_t value) { + return __riscv_vlmul_ext_i16m2(value); } -vint16m4_t test_vlmul_ext_v_i16mf2_i16m4(vint16mf2_t op1) { - return __riscv_vlmul_ext_i16m4(op1); +vint16m4_t test_vlmul_ext_v_i16mf2_i16m4(vint16mf2_t value) { + return __riscv_vlmul_ext_i16m4(value); } -vint16m8_t test_vlmul_ext_v_i16mf2_i16m8(vint16mf2_t op1) { - return __riscv_vlmul_ext_i16m8(op1); +vint16m8_t test_vlmul_ext_v_i16mf2_i16m8(vint16mf2_t value) { + return __riscv_vlmul_ext_i16m8(value); } -vint16m2_t test_vlmul_ext_v_i16m1_i16m2(vint16m1_t op1) { - return __riscv_vlmul_ext_i16m2(op1); +vint16m2_t test_vlmul_ext_v_i16m1_i16m2(vint16m1_t value) { + return __riscv_vlmul_ext_i16m2(value); } -vint16m4_t test_vlmul_ext_v_i16m1_i16m4(vint16m1_t op1) { - return __riscv_vlmul_ext_i16m4(op1); +vint16m4_t test_vlmul_ext_v_i16m1_i16m4(vint16m1_t value) { + return __riscv_vlmul_ext_i16m4(value); } -vint16m8_t test_vlmul_ext_v_i16m1_i16m8(vint16m1_t op1) { - return __riscv_vlmul_ext_i16m8(op1); +vint16m8_t test_vlmul_ext_v_i16m1_i16m8(vint16m1_t value) { + return __riscv_vlmul_ext_i16m8(value); } -vint16m4_t test_vlmul_ext_v_i16m2_i16m4(vint16m2_t op1) { - return __riscv_vlmul_ext_i16m4(op1); +vint16m4_t test_vlmul_ext_v_i16m2_i16m4(vint16m2_t value) { + return __riscv_vlmul_ext_i16m4(value); } -vint16m8_t test_vlmul_ext_v_i16m2_i16m8(vint16m2_t op1) { - return __riscv_vlmul_ext_i16m8(op1); +vint16m8_t test_vlmul_ext_v_i16m2_i16m8(vint16m2_t value) { + return __riscv_vlmul_ext_i16m8(value); } -vint16m8_t test_vlmul_ext_v_i16m4_i16m8(vint16m4_t op1) { - return __riscv_vlmul_ext_i16m8(op1); +vint16m8_t test_vlmul_ext_v_i16m4_i16m8(vint16m4_t value) { + return __riscv_vlmul_ext_i16m8(value); } -vint32m1_t test_vlmul_ext_v_i32mf2_i32m1(vint32mf2_t op1) { - return __riscv_vlmul_ext_i32m1(op1); +vint32m1_t test_vlmul_ext_v_i32mf2_i32m1(vint32mf2_t value) { + return __riscv_vlmul_ext_i32m1(value); } -vint32m2_t test_vlmul_ext_v_i32mf2_i32m2(vint32mf2_t op1) { - return __riscv_vlmul_ext_i32m2(op1); +vint32m2_t test_vlmul_ext_v_i32mf2_i32m2(vint32mf2_t value) { + return __riscv_vlmul_ext_i32m2(value); } -vint32m4_t test_vlmul_ext_v_i32mf2_i32m4(vint32mf2_t op1) { - return __riscv_vlmul_ext_i32m4(op1); +vint32m4_t test_vlmul_ext_v_i32mf2_i32m4(vint32mf2_t value) { + return __riscv_vlmul_ext_i32m4(value); } -vint32m8_t test_vlmul_ext_v_i32mf2_i32m8(vint32mf2_t op1) { - return __riscv_vlmul_ext_i32m8(op1); +vint32m8_t test_vlmul_ext_v_i32mf2_i32m8(vint32mf2_t value) { + return __riscv_vlmul_ext_i32m8(value); } -vint32m2_t test_vlmul_ext_v_i32m1_i32m2(vint32m1_t op1) { - return __riscv_vlmul_ext_i32m2(op1); +vint32m2_t test_vlmul_ext_v_i32m1_i32m2(vint32m1_t value) { + return __riscv_vlmul_ext_i32m2(value); } -vint32m4_t test_vlmul_ext_v_i32m1_i32m4(vint32m1_t op1) { - return __riscv_vlmul_ext_i32m4(op1); +vint32m4_t test_vlmul_ext_v_i32m1_i32m4(vint32m1_t value) { + return __riscv_vlmul_ext_i32m4(value); } -vint32m8_t test_vlmul_ext_v_i32m1_i32m8(vint32m1_t op1) { - return __riscv_vlmul_ext_i32m8(op1); +vint32m8_t test_vlmul_ext_v_i32m1_i32m8(vint32m1_t value) { + return __riscv_vlmul_ext_i32m8(value); } -vint32m4_t test_vlmul_ext_v_i32m2_i32m4(vint32m2_t op1) { - return __riscv_vlmul_ext_i32m4(op1); +vint32m4_t test_vlmul_ext_v_i32m2_i32m4(vint32m2_t value) { + return __riscv_vlmul_ext_i32m4(value); } -vint32m8_t test_vlmul_ext_v_i32m2_i32m8(vint32m2_t op1) { - return __riscv_vlmul_ext_i32m8(op1); +vint32m8_t test_vlmul_ext_v_i32m2_i32m8(vint32m2_t value) { + return __riscv_vlmul_ext_i32m8(value); } -vint32m8_t test_vlmul_ext_v_i32m4_i32m8(vint32m4_t op1) { - return __riscv_vlmul_ext_i32m8(op1); +vint32m8_t test_vlmul_ext_v_i32m4_i32m8(vint32m4_t value) { + return __riscv_vlmul_ext_i32m8(value); } -vint64m2_t test_vlmul_ext_v_i64m1_i64m2(vint64m1_t op1) { - return __riscv_vlmul_ext_i64m2(op1); +vint64m2_t test_vlmul_ext_v_i64m1_i64m2(vint64m1_t value) { + return __riscv_vlmul_ext_i64m2(value); } -vint64m4_t test_vlmul_ext_v_i64m1_i64m4(vint64m1_t op1) { - return __riscv_vlmul_ext_i64m4(op1); +vint64m4_t test_vlmul_ext_v_i64m1_i64m4(vint64m1_t value) { + return __riscv_vlmul_ext_i64m4(value); } -vint64m8_t test_vlmul_ext_v_i64m1_i64m8(vint64m1_t op1) { - return __riscv_vlmul_ext_i64m8(op1); +vint64m8_t test_vlmul_ext_v_i64m1_i64m8(vint64m1_t value) { + return __riscv_vlmul_ext_i64m8(value); } -vint64m4_t test_vlmul_ext_v_i64m2_i64m4(vint64m2_t op1) { - return __riscv_vlmul_ext_i64m4(op1); +vint64m4_t test_vlmul_ext_v_i64m2_i64m4(vint64m2_t value) { + return __riscv_vlmul_ext_i64m4(value); } -vint64m8_t test_vlmul_ext_v_i64m2_i64m8(vint64m2_t op1) { - return __riscv_vlmul_ext_i64m8(op1); +vint64m8_t test_vlmul_ext_v_i64m2_i64m8(vint64m2_t value) { + return __riscv_vlmul_ext_i64m8(value); } -vint64m8_t test_vlmul_ext_v_i64m4_i64m8(vint64m4_t op1) { - return __riscv_vlmul_ext_i64m8(op1); +vint64m8_t test_vlmul_ext_v_i64m4_i64m8(vint64m4_t value) { + return __riscv_vlmul_ext_i64m8(value); } -vuint8mf4_t test_vlmul_ext_v_u8mf8_u8mf4(vuint8mf8_t op1) { - return __riscv_vlmul_ext_u8mf4(op1); +vuint8mf4_t test_vlmul_ext_v_u8mf8_u8mf4(vuint8mf8_t value) { + return __riscv_vlmul_ext_u8mf4(value); } -vuint8mf2_t test_vlmul_ext_v_u8mf8_u8mf2(vuint8mf8_t op1) { - return __riscv_vlmul_ext_u8mf2(op1); +vuint8mf2_t test_vlmul_ext_v_u8mf8_u8mf2(vuint8mf8_t value) { + return __riscv_vlmul_ext_u8mf2(value); } -vuint8m1_t test_vlmul_ext_v_u8mf8_u8m1(vuint8mf8_t op1) { - return __riscv_vlmul_ext_u8m1(op1); +vuint8m1_t test_vlmul_ext_v_u8mf8_u8m1(vuint8mf8_t value) { + return __riscv_vlmul_ext_u8m1(value); } -vuint8m2_t test_vlmul_ext_v_u8mf8_u8m2(vuint8mf8_t op1) { - return __riscv_vlmul_ext_u8m2(op1); +vuint8m2_t test_vlmul_ext_v_u8mf8_u8m2(vuint8mf8_t value) { + return __riscv_vlmul_ext_u8m2(value); } -vuint8m4_t test_vlmul_ext_v_u8mf8_u8m4(vuint8mf8_t op1) { - return __riscv_vlmul_ext_u8m4(op1); +vuint8m4_t test_vlmul_ext_v_u8mf8_u8m4(vuint8mf8_t value) { + return __riscv_vlmul_ext_u8m4(value); } -vuint8m8_t test_vlmul_ext_v_u8mf8_u8m8(vuint8mf8_t op1) { - return __riscv_vlmul_ext_u8m8(op1); +vuint8m8_t test_vlmul_ext_v_u8mf8_u8m8(vuint8mf8_t value) { + return __riscv_vlmul_ext_u8m8(value); } -vuint8mf2_t test_vlmul_ext_v_u8mf4_u8mf2(vuint8mf4_t op1) { - return __riscv_vlmul_ext_u8mf2(op1); +vuint8mf2_t test_vlmul_ext_v_u8mf4_u8mf2(vuint8mf4_t value) { + return __riscv_vlmul_ext_u8mf2(value); } -vuint8m1_t test_vlmul_ext_v_u8mf4_u8m1(vuint8mf4_t op1) { - return __riscv_vlmul_ext_u8m1(op1); +vuint8m1_t test_vlmul_ext_v_u8mf4_u8m1(vuint8mf4_t value) { + return __riscv_vlmul_ext_u8m1(value); } -vuint8m2_t test_vlmul_ext_v_u8mf4_u8m2(vuint8mf4_t op1) { - return __riscv_vlmul_ext_u8m2(op1); +vuint8m2_t test_vlmul_ext_v_u8mf4_u8m2(vuint8mf4_t value) { + return __riscv_vlmul_ext_u8m2(value); } -vuint8m4_t test_vlmul_ext_v_u8mf4_u8m4(vuint8mf4_t op1) { - return __riscv_vlmul_ext_u8m4(op1); +vuint8m4_t test_vlmul_ext_v_u8mf4_u8m4(vuint8mf4_t value) { + return __riscv_vlmul_ext_u8m4(value); } -vuint8m8_t test_vlmul_ext_v_u8mf4_u8m8(vuint8mf4_t op1) { - return __riscv_vlmul_ext_u8m8(op1); +vuint8m8_t test_vlmul_ext_v_u8mf4_u8m8(vuint8mf4_t value) { + return __riscv_vlmul_ext_u8m8(value); } -vuint8m1_t test_vlmul_ext_v_u8mf2_u8m1(vuint8mf2_t op1) { - return __riscv_vlmul_ext_u8m1(op1); +vuint8m1_t test_vlmul_ext_v_u8mf2_u8m1(vuint8mf2_t value) { + return __riscv_vlmul_ext_u8m1(value); } -vuint8m2_t test_vlmul_ext_v_u8mf2_u8m2(vuint8mf2_t op1) { - return __riscv_vlmul_ext_u8m2(op1); +vuint8m2_t test_vlmul_ext_v_u8mf2_u8m2(vuint8mf2_t value) { + return __riscv_vlmul_ext_u8m2(value); } -vuint8m4_t test_vlmul_ext_v_u8mf2_u8m4(vuint8mf2_t op1) { - return __riscv_vlmul_ext_u8m4(op1); +vuint8m4_t test_vlmul_ext_v_u8mf2_u8m4(vuint8mf2_t value) { + return __riscv_vlmul_ext_u8m4(value); } -vuint8m8_t test_vlmul_ext_v_u8mf2_u8m8(vuint8mf2_t op1) { - return __riscv_vlmul_ext_u8m8(op1); +vuint8m8_t test_vlmul_ext_v_u8mf2_u8m8(vuint8mf2_t value) { + return __riscv_vlmul_ext_u8m8(value); } -vuint8m2_t test_vlmul_ext_v_u8m1_u8m2(vuint8m1_t op1) { - return __riscv_vlmul_ext_u8m2(op1); +vuint8m2_t test_vlmul_ext_v_u8m1_u8m2(vuint8m1_t value) { + return __riscv_vlmul_ext_u8m2(value); } -vuint8m4_t test_vlmul_ext_v_u8m1_u8m4(vuint8m1_t op1) { - return __riscv_vlmul_ext_u8m4(op1); +vuint8m4_t test_vlmul_ext_v_u8m1_u8m4(vuint8m1_t value) { + return __riscv_vlmul_ext_u8m4(value); } -vuint8m8_t test_vlmul_ext_v_u8m1_u8m8(vuint8m1_t op1) { - return __riscv_vlmul_ext_u8m8(op1); +vuint8m8_t test_vlmul_ext_v_u8m1_u8m8(vuint8m1_t value) { + return __riscv_vlmul_ext_u8m8(value); } -vuint8m4_t test_vlmul_ext_v_u8m2_u8m4(vuint8m2_t op1) { - return __riscv_vlmul_ext_u8m4(op1); +vuint8m4_t test_vlmul_ext_v_u8m2_u8m4(vuint8m2_t value) { + return __riscv_vlmul_ext_u8m4(value); } -vuint8m8_t test_vlmul_ext_v_u8m2_u8m8(vuint8m2_t op1) { - return __riscv_vlmul_ext_u8m8(op1); +vuint8m8_t test_vlmul_ext_v_u8m2_u8m8(vuint8m2_t value) { + return __riscv_vlmul_ext_u8m8(value); } -vuint8m8_t test_vlmul_ext_v_u8m4_u8m8(vuint8m4_t op1) { - return __riscv_vlmul_ext_u8m8(op1); +vuint8m8_t test_vlmul_ext_v_u8m4_u8m8(vuint8m4_t value) { + return __riscv_vlmul_ext_u8m8(value); } -vuint16mf2_t test_vlmul_ext_v_u16mf4_u16mf2(vuint16mf4_t op1) { - return __riscv_vlmul_ext_u16mf2(op1); +vuint16mf2_t test_vlmul_ext_v_u16mf4_u16mf2(vuint16mf4_t value) { + return __riscv_vlmul_ext_u16mf2(value); } -vuint16m1_t test_vlmul_ext_v_u16mf4_u16m1(vuint16mf4_t op1) { - return __riscv_vlmul_ext_u16m1(op1); +vuint16m1_t test_vlmul_ext_v_u16mf4_u16m1(vuint16mf4_t value) { + return __riscv_vlmul_ext_u16m1(value); } -vuint16m2_t test_vlmul_ext_v_u16mf4_u16m2(vuint16mf4_t op1) { - return __riscv_vlmul_ext_u16m2(op1); +vuint16m2_t test_vlmul_ext_v_u16mf4_u16m2(vuint16mf4_t value) { + return __riscv_vlmul_ext_u16m2(value); } -vuint16m4_t test_vlmul_ext_v_u16mf4_u16m4(vuint16mf4_t op1) { - return __riscv_vlmul_ext_u16m4(op1); +vuint16m4_t test_vlmul_ext_v_u16mf4_u16m4(vuint16mf4_t value) { + return __riscv_vlmul_ext_u16m4(value); } -vuint16m8_t test_vlmul_ext_v_u16mf4_u16m8(vuint16mf4_t op1) { - return __riscv_vlmul_ext_u16m8(op1); +vuint16m8_t test_vlmul_ext_v_u16mf4_u16m8(vuint16mf4_t value) { + return __riscv_vlmul_ext_u16m8(value); } -vuint16m1_t test_vlmul_ext_v_u16mf2_u16m1(vuint16mf2_t op1) { - return __riscv_vlmul_ext_u16m1(op1); +vuint16m1_t test_vlmul_ext_v_u16mf2_u16m1(vuint16mf2_t value) { + return __riscv_vlmul_ext_u16m1(value); } -vuint16m2_t test_vlmul_ext_v_u16mf2_u16m2(vuint16mf2_t op1) { - return __riscv_vlmul_ext_u16m2(op1); +vuint16m2_t test_vlmul_ext_v_u16mf2_u16m2(vuint16mf2_t value) { + return __riscv_vlmul_ext_u16m2(value); } -vuint16m4_t test_vlmul_ext_v_u16mf2_u16m4(vuint16mf2_t op1) { - return __riscv_vlmul_ext_u16m4(op1); +vuint16m4_t test_vlmul_ext_v_u16mf2_u16m4(vuint16mf2_t value) { + return __riscv_vlmul_ext_u16m4(value); } -vuint16m8_t test_vlmul_ext_v_u16mf2_u16m8(vuint16mf2_t op1) { - return __riscv_vlmul_ext_u16m8(op1); +vuint16m8_t test_vlmul_ext_v_u16mf2_u16m8(vuint16mf2_t value) { + return __riscv_vlmul_ext_u16m8(value); } -vuint16m2_t test_vlmul_ext_v_u16m1_u16m2(vuint16m1_t op1) { - return __riscv_vlmul_ext_u16m2(op1); +vuint16m2_t test_vlmul_ext_v_u16m1_u16m2(vuint16m1_t value) { + return __riscv_vlmul_ext_u16m2(value); } -vuint16m4_t test_vlmul_ext_v_u16m1_u16m4(vuint16m1_t op1) { - return __riscv_vlmul_ext_u16m4(op1); +vuint16m4_t test_vlmul_ext_v_u16m1_u16m4(vuint16m1_t value) { + return __riscv_vlmul_ext_u16m4(value); } -vuint16m8_t test_vlmul_ext_v_u16m1_u16m8(vuint16m1_t op1) { - return __riscv_vlmul_ext_u16m8(op1); +vuint16m8_t test_vlmul_ext_v_u16m1_u16m8(vuint16m1_t value) { + return __riscv_vlmul_ext_u16m8(value); } -vuint16m4_t test_vlmul_ext_v_u16m2_u16m4(vuint16m2_t op1) { - return __riscv_vlmul_ext_u16m4(op1); +vuint16m4_t test_vlmul_ext_v_u16m2_u16m4(vuint16m2_t value) { + return __riscv_vlmul_ext_u16m4(value); } -vuint16m8_t test_vlmul_ext_v_u16m2_u16m8(vuint16m2_t op1) { - return __riscv_vlmul_ext_u16m8(op1); +vuint16m8_t test_vlmul_ext_v_u16m2_u16m8(vuint16m2_t value) { + return __riscv_vlmul_ext_u16m8(value); } -vuint16m8_t test_vlmul_ext_v_u16m4_u16m8(vuint16m4_t op1) { - return __riscv_vlmul_ext_u16m8(op1); +vuint16m8_t test_vlmul_ext_v_u16m4_u16m8(vuint16m4_t value) { + return __riscv_vlmul_ext_u16m8(value); } -vuint32m1_t test_vlmul_ext_v_u32mf2_u32m1(vuint32mf2_t op1) { - return __riscv_vlmul_ext_u32m1(op1); +vuint32m1_t test_vlmul_ext_v_u32mf2_u32m1(vuint32mf2_t value) { + return __riscv_vlmul_ext_u32m1(value); } -vuint32m2_t test_vlmul_ext_v_u32mf2_u32m2(vuint32mf2_t op1) { - return __riscv_vlmul_ext_u32m2(op1); +vuint32m2_t test_vlmul_ext_v_u32mf2_u32m2(vuint32mf2_t value) { + return __riscv_vlmul_ext_u32m2(value); } -vuint32m4_t test_vlmul_ext_v_u32mf2_u32m4(vuint32mf2_t op1) { - return __riscv_vlmul_ext_u32m4(op1); +vuint32m4_t test_vlmul_ext_v_u32mf2_u32m4(vuint32mf2_t value) { + return __riscv_vlmul_ext_u32m4(value); } -vuint32m8_t test_vlmul_ext_v_u32mf2_u32m8(vuint32mf2_t op1) { - return __riscv_vlmul_ext_u32m8(op1); +vuint32m8_t test_vlmul_ext_v_u32mf2_u32m8(vuint32mf2_t value) { + return __riscv_vlmul_ext_u32m8(value); } -vuint32m2_t test_vlmul_ext_v_u32m1_u32m2(vuint32m1_t op1) { - return __riscv_vlmul_ext_u32m2(op1); +vuint32m2_t test_vlmul_ext_v_u32m1_u32m2(vuint32m1_t value) { + return __riscv_vlmul_ext_u32m2(value); } -vuint32m4_t test_vlmul_ext_v_u32m1_u32m4(vuint32m1_t op1) { - return __riscv_vlmul_ext_u32m4(op1); +vuint32m4_t test_vlmul_ext_v_u32m1_u32m4(vuint32m1_t value) { + return __riscv_vlmul_ext_u32m4(value); } -vuint32m8_t test_vlmul_ext_v_u32m1_u32m8(vuint32m1_t op1) { - return __riscv_vlmul_ext_u32m8(op1); +vuint32m8_t test_vlmul_ext_v_u32m1_u32m8(vuint32m1_t value) { + return __riscv_vlmul_ext_u32m8(value); } -vuint32m4_t test_vlmul_ext_v_u32m2_u32m4(vuint32m2_t op1) { - return __riscv_vlmul_ext_u32m4(op1); +vuint32m4_t test_vlmul_ext_v_u32m2_u32m4(vuint32m2_t value) { + return __riscv_vlmul_ext_u32m4(value); } -vuint32m8_t test_vlmul_ext_v_u32m2_u32m8(vuint32m2_t op1) { - return __riscv_vlmul_ext_u32m8(op1); +vuint32m8_t test_vlmul_ext_v_u32m2_u32m8(vuint32m2_t value) { + return __riscv_vlmul_ext_u32m8(value); } -vuint32m8_t test_vlmul_ext_v_u32m4_u32m8(vuint32m4_t op1) { - return __riscv_vlmul_ext_u32m8(op1); +vuint32m8_t test_vlmul_ext_v_u32m4_u32m8(vuint32m4_t value) { + return __riscv_vlmul_ext_u32m8(value); } -vuint64m2_t test_vlmul_ext_v_u64m1_u64m2(vuint64m1_t op1) { - return __riscv_vlmul_ext_u64m2(op1); +vuint64m2_t test_vlmul_ext_v_u64m1_u64m2(vuint64m1_t value) { + return __riscv_vlmul_ext_u64m2(value); } -vuint64m4_t test_vlmul_ext_v_u64m1_u64m4(vuint64m1_t op1) { - return __riscv_vlmul_ext_u64m4(op1); +vuint64m4_t test_vlmul_ext_v_u64m1_u64m4(vuint64m1_t value) { + return __riscv_vlmul_ext_u64m4(value); } -vuint64m8_t test_vlmul_ext_v_u64m1_u64m8(vuint64m1_t op1) { - return __riscv_vlmul_ext_u64m8(op1); +vuint64m8_t test_vlmul_ext_v_u64m1_u64m8(vuint64m1_t value) { + return __riscv_vlmul_ext_u64m8(value); } -vuint64m4_t test_vlmul_ext_v_u64m2_u64m4(vuint64m2_t op1) { - return __riscv_vlmul_ext_u64m4(op1); +vuint64m4_t test_vlmul_ext_v_u64m2_u64m4(vuint64m2_t value) { + return __riscv_vlmul_ext_u64m4(value); } -vuint64m8_t test_vlmul_ext_v_u64m2_u64m8(vuint64m2_t op1) { - return __riscv_vlmul_ext_u64m8(op1); +vuint64m8_t test_vlmul_ext_v_u64m2_u64m8(vuint64m2_t value) { + return __riscv_vlmul_ext_u64m8(value); } -vuint64m8_t test_vlmul_ext_v_u64m4_u64m8(vuint64m4_t op1) { - return __riscv_vlmul_ext_u64m8(op1); +vuint64m8_t test_vlmul_ext_v_u64m4_u64m8(vuint64m4_t value) { + return __riscv_vlmul_ext_u64m8(value); } /* { dg-final { scan-assembler-times {vs[1248e][r123468]+\.[ivxfswum.]+\s+[,\sa-x0-9()]+} 135 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlmul_trunc_v.c b/auto-generated/gnu-overloaded-tests/vlmul_trunc_v.c index 0e93b3bd3..4b70e0dda 100644 --- a/auto-generated/gnu-overloaded-tests/vlmul_trunc_v.c +++ b/auto-generated/gnu-overloaded-tests/vlmul_trunc_v.c @@ -6,544 +6,544 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vlmul_trunc_v_f16mf2_f16mf4(vfloat16mf2_t op1) { - return __riscv_vlmul_trunc_f16mf4(op1); +vfloat16mf4_t test_vlmul_trunc_v_f16mf2_f16mf4(vfloat16mf2_t value) { + return __riscv_vlmul_trunc_f16mf4(value); } -vfloat16mf4_t test_vlmul_trunc_v_f16m1_f16mf4(vfloat16m1_t op1) { - return __riscv_vlmul_trunc_f16mf4(op1); +vfloat16mf4_t test_vlmul_trunc_v_f16m1_f16mf4(vfloat16m1_t value) { + return __riscv_vlmul_trunc_f16mf4(value); } -vfloat16mf2_t test_vlmul_trunc_v_f16m1_f16mf2(vfloat16m1_t op1) { - return __riscv_vlmul_trunc_f16mf2(op1); +vfloat16mf2_t test_vlmul_trunc_v_f16m1_f16mf2(vfloat16m1_t value) { + return __riscv_vlmul_trunc_f16mf2(value); } -vfloat16mf4_t test_vlmul_trunc_v_f16m2_f16mf4(vfloat16m2_t op1) { - return __riscv_vlmul_trunc_f16mf4(op1); +vfloat16mf4_t test_vlmul_trunc_v_f16m2_f16mf4(vfloat16m2_t value) { + return __riscv_vlmul_trunc_f16mf4(value); } -vfloat16mf2_t test_vlmul_trunc_v_f16m2_f16mf2(vfloat16m2_t op1) { - return __riscv_vlmul_trunc_f16mf2(op1); +vfloat16mf2_t test_vlmul_trunc_v_f16m2_f16mf2(vfloat16m2_t value) { + return __riscv_vlmul_trunc_f16mf2(value); } -vfloat16m1_t test_vlmul_trunc_v_f16m2_f16m1(vfloat16m2_t op1) { - return __riscv_vlmul_trunc_f16m1(op1); +vfloat16m1_t test_vlmul_trunc_v_f16m2_f16m1(vfloat16m2_t value) { + return __riscv_vlmul_trunc_f16m1(value); } -vfloat16mf4_t test_vlmul_trunc_v_f16m4_f16mf4(vfloat16m4_t op1) { - return __riscv_vlmul_trunc_f16mf4(op1); +vfloat16mf4_t test_vlmul_trunc_v_f16m4_f16mf4(vfloat16m4_t value) { + return __riscv_vlmul_trunc_f16mf4(value); } -vfloat16mf2_t test_vlmul_trunc_v_f16m4_f16mf2(vfloat16m4_t op1) { - return __riscv_vlmul_trunc_f16mf2(op1); +vfloat16mf2_t test_vlmul_trunc_v_f16m4_f16mf2(vfloat16m4_t value) { + return __riscv_vlmul_trunc_f16mf2(value); } -vfloat16m1_t test_vlmul_trunc_v_f16m4_f16m1(vfloat16m4_t op1) { - return __riscv_vlmul_trunc_f16m1(op1); +vfloat16m1_t test_vlmul_trunc_v_f16m4_f16m1(vfloat16m4_t value) { + return __riscv_vlmul_trunc_f16m1(value); } -vfloat16m2_t test_vlmul_trunc_v_f16m4_f16m2(vfloat16m4_t op1) { - return __riscv_vlmul_trunc_f16m2(op1); +vfloat16m2_t test_vlmul_trunc_v_f16m4_f16m2(vfloat16m4_t value) { + return __riscv_vlmul_trunc_f16m2(value); } -vfloat16mf4_t test_vlmul_trunc_v_f16m8_f16mf4(vfloat16m8_t op1) { - return __riscv_vlmul_trunc_f16mf4(op1); +vfloat16mf4_t test_vlmul_trunc_v_f16m8_f16mf4(vfloat16m8_t value) { + return __riscv_vlmul_trunc_f16mf4(value); } -vfloat16mf2_t test_vlmul_trunc_v_f16m8_f16mf2(vfloat16m8_t op1) { - return __riscv_vlmul_trunc_f16mf2(op1); +vfloat16mf2_t test_vlmul_trunc_v_f16m8_f16mf2(vfloat16m8_t value) { + return __riscv_vlmul_trunc_f16mf2(value); } -vfloat16m1_t test_vlmul_trunc_v_f16m8_f16m1(vfloat16m8_t op1) { - return __riscv_vlmul_trunc_f16m1(op1); +vfloat16m1_t test_vlmul_trunc_v_f16m8_f16m1(vfloat16m8_t value) { + return __riscv_vlmul_trunc_f16m1(value); } -vfloat16m2_t test_vlmul_trunc_v_f16m8_f16m2(vfloat16m8_t op1) { - return __riscv_vlmul_trunc_f16m2(op1); +vfloat16m2_t test_vlmul_trunc_v_f16m8_f16m2(vfloat16m8_t value) { + return __riscv_vlmul_trunc_f16m2(value); } -vfloat16m4_t test_vlmul_trunc_v_f16m8_f16m4(vfloat16m8_t op1) { - return __riscv_vlmul_trunc_f16m4(op1); +vfloat16m4_t test_vlmul_trunc_v_f16m8_f16m4(vfloat16m8_t value) { + return __riscv_vlmul_trunc_f16m4(value); } -vfloat32mf2_t test_vlmul_trunc_v_f32m1_f32mf2(vfloat32m1_t op1) { - return __riscv_vlmul_trunc_f32mf2(op1); +vfloat32mf2_t test_vlmul_trunc_v_f32m1_f32mf2(vfloat32m1_t value) { + return __riscv_vlmul_trunc_f32mf2(value); } -vfloat32mf2_t test_vlmul_trunc_v_f32m2_f32mf2(vfloat32m2_t op1) { - return __riscv_vlmul_trunc_f32mf2(op1); +vfloat32mf2_t test_vlmul_trunc_v_f32m2_f32mf2(vfloat32m2_t value) { + return __riscv_vlmul_trunc_f32mf2(value); } -vfloat32m1_t test_vlmul_trunc_v_f32m2_f32m1(vfloat32m2_t op1) { - return __riscv_vlmul_trunc_f32m1(op1); +vfloat32m1_t test_vlmul_trunc_v_f32m2_f32m1(vfloat32m2_t value) { + return __riscv_vlmul_trunc_f32m1(value); } -vfloat32mf2_t test_vlmul_trunc_v_f32m4_f32mf2(vfloat32m4_t op1) { - return __riscv_vlmul_trunc_f32mf2(op1); +vfloat32mf2_t test_vlmul_trunc_v_f32m4_f32mf2(vfloat32m4_t value) { + return __riscv_vlmul_trunc_f32mf2(value); } -vfloat32m1_t test_vlmul_trunc_v_f32m4_f32m1(vfloat32m4_t op1) { - return __riscv_vlmul_trunc_f32m1(op1); +vfloat32m1_t test_vlmul_trunc_v_f32m4_f32m1(vfloat32m4_t value) { + return __riscv_vlmul_trunc_f32m1(value); } -vfloat32m2_t test_vlmul_trunc_v_f32m4_f32m2(vfloat32m4_t op1) { - return __riscv_vlmul_trunc_f32m2(op1); +vfloat32m2_t test_vlmul_trunc_v_f32m4_f32m2(vfloat32m4_t value) { + return __riscv_vlmul_trunc_f32m2(value); } -vfloat32mf2_t test_vlmul_trunc_v_f32m8_f32mf2(vfloat32m8_t op1) { - return __riscv_vlmul_trunc_f32mf2(op1); +vfloat32mf2_t test_vlmul_trunc_v_f32m8_f32mf2(vfloat32m8_t value) { + return __riscv_vlmul_trunc_f32mf2(value); } -vfloat32m1_t test_vlmul_trunc_v_f32m8_f32m1(vfloat32m8_t op1) { - return __riscv_vlmul_trunc_f32m1(op1); +vfloat32m1_t test_vlmul_trunc_v_f32m8_f32m1(vfloat32m8_t value) { + return __riscv_vlmul_trunc_f32m1(value); } -vfloat32m2_t test_vlmul_trunc_v_f32m8_f32m2(vfloat32m8_t op1) { - return __riscv_vlmul_trunc_f32m2(op1); +vfloat32m2_t test_vlmul_trunc_v_f32m8_f32m2(vfloat32m8_t value) { + return __riscv_vlmul_trunc_f32m2(value); } -vfloat32m4_t test_vlmul_trunc_v_f32m8_f32m4(vfloat32m8_t op1) { - return __riscv_vlmul_trunc_f32m4(op1); +vfloat32m4_t test_vlmul_trunc_v_f32m8_f32m4(vfloat32m8_t value) { + return __riscv_vlmul_trunc_f32m4(value); } -vfloat64m1_t test_vlmul_trunc_v_f64m2_f64m1(vfloat64m2_t op1) { - return __riscv_vlmul_trunc_f64m1(op1); +vfloat64m1_t test_vlmul_trunc_v_f64m2_f64m1(vfloat64m2_t value) { + return __riscv_vlmul_trunc_f64m1(value); } -vfloat64m1_t test_vlmul_trunc_v_f64m4_f64m1(vfloat64m4_t op1) { - return __riscv_vlmul_trunc_f64m1(op1); +vfloat64m1_t test_vlmul_trunc_v_f64m4_f64m1(vfloat64m4_t value) { + return __riscv_vlmul_trunc_f64m1(value); } -vfloat64m2_t test_vlmul_trunc_v_f64m4_f64m2(vfloat64m4_t op1) { - return __riscv_vlmul_trunc_f64m2(op1); +vfloat64m2_t test_vlmul_trunc_v_f64m4_f64m2(vfloat64m4_t value) { + return __riscv_vlmul_trunc_f64m2(value); } -vfloat64m1_t test_vlmul_trunc_v_f64m8_f64m1(vfloat64m8_t op1) { - return __riscv_vlmul_trunc_f64m1(op1); +vfloat64m1_t test_vlmul_trunc_v_f64m8_f64m1(vfloat64m8_t value) { + return __riscv_vlmul_trunc_f64m1(value); } -vfloat64m2_t test_vlmul_trunc_v_f64m8_f64m2(vfloat64m8_t op1) { - return __riscv_vlmul_trunc_f64m2(op1); +vfloat64m2_t test_vlmul_trunc_v_f64m8_f64m2(vfloat64m8_t value) { + return __riscv_vlmul_trunc_f64m2(value); } -vfloat64m4_t test_vlmul_trunc_v_f64m8_f64m4(vfloat64m8_t op1) { - return __riscv_vlmul_trunc_f64m4(op1); +vfloat64m4_t test_vlmul_trunc_v_f64m8_f64m4(vfloat64m8_t value) { + return __riscv_vlmul_trunc_f64m4(value); } -vint8mf8_t test_vlmul_trunc_v_i8mf4_i8mf8(vint8mf4_t op1) { - return __riscv_vlmul_trunc_i8mf8(op1); +vint8mf8_t test_vlmul_trunc_v_i8mf4_i8mf8(vint8mf4_t value) { + return __riscv_vlmul_trunc_i8mf8(value); } -vint8mf8_t test_vlmul_trunc_v_i8mf2_i8mf8(vint8mf2_t op1) { - return __riscv_vlmul_trunc_i8mf8(op1); +vint8mf8_t test_vlmul_trunc_v_i8mf2_i8mf8(vint8mf2_t value) { + return __riscv_vlmul_trunc_i8mf8(value); } -vint8mf4_t test_vlmul_trunc_v_i8mf2_i8mf4(vint8mf2_t op1) { - return __riscv_vlmul_trunc_i8mf4(op1); +vint8mf4_t test_vlmul_trunc_v_i8mf2_i8mf4(vint8mf2_t value) { + return __riscv_vlmul_trunc_i8mf4(value); } -vint8mf8_t test_vlmul_trunc_v_i8m1_i8mf8(vint8m1_t op1) { - return __riscv_vlmul_trunc_i8mf8(op1); +vint8mf8_t test_vlmul_trunc_v_i8m1_i8mf8(vint8m1_t value) { + return __riscv_vlmul_trunc_i8mf8(value); } -vint8mf4_t test_vlmul_trunc_v_i8m1_i8mf4(vint8m1_t op1) { - return __riscv_vlmul_trunc_i8mf4(op1); +vint8mf4_t test_vlmul_trunc_v_i8m1_i8mf4(vint8m1_t value) { + return __riscv_vlmul_trunc_i8mf4(value); } -vint8mf2_t test_vlmul_trunc_v_i8m1_i8mf2(vint8m1_t op1) { - return __riscv_vlmul_trunc_i8mf2(op1); +vint8mf2_t test_vlmul_trunc_v_i8m1_i8mf2(vint8m1_t value) { + return __riscv_vlmul_trunc_i8mf2(value); } -vint8mf8_t test_vlmul_trunc_v_i8m2_i8mf8(vint8m2_t op1) { - return __riscv_vlmul_trunc_i8mf8(op1); +vint8mf8_t test_vlmul_trunc_v_i8m2_i8mf8(vint8m2_t value) { + return __riscv_vlmul_trunc_i8mf8(value); } -vint8mf4_t test_vlmul_trunc_v_i8m2_i8mf4(vint8m2_t op1) { - return __riscv_vlmul_trunc_i8mf4(op1); +vint8mf4_t test_vlmul_trunc_v_i8m2_i8mf4(vint8m2_t value) { + return __riscv_vlmul_trunc_i8mf4(value); } -vint8mf2_t test_vlmul_trunc_v_i8m2_i8mf2(vint8m2_t op1) { - return __riscv_vlmul_trunc_i8mf2(op1); +vint8mf2_t test_vlmul_trunc_v_i8m2_i8mf2(vint8m2_t value) { + return __riscv_vlmul_trunc_i8mf2(value); } -vint8m1_t test_vlmul_trunc_v_i8m2_i8m1(vint8m2_t op1) { - return __riscv_vlmul_trunc_i8m1(op1); +vint8m1_t test_vlmul_trunc_v_i8m2_i8m1(vint8m2_t value) { + return __riscv_vlmul_trunc_i8m1(value); } -vint8mf8_t test_vlmul_trunc_v_i8m4_i8mf8(vint8m4_t op1) { - return __riscv_vlmul_trunc_i8mf8(op1); +vint8mf8_t test_vlmul_trunc_v_i8m4_i8mf8(vint8m4_t value) { + return __riscv_vlmul_trunc_i8mf8(value); } -vint8mf4_t test_vlmul_trunc_v_i8m4_i8mf4(vint8m4_t op1) { - return __riscv_vlmul_trunc_i8mf4(op1); +vint8mf4_t test_vlmul_trunc_v_i8m4_i8mf4(vint8m4_t value) { + return __riscv_vlmul_trunc_i8mf4(value); } -vint8mf2_t test_vlmul_trunc_v_i8m4_i8mf2(vint8m4_t op1) { - return __riscv_vlmul_trunc_i8mf2(op1); +vint8mf2_t test_vlmul_trunc_v_i8m4_i8mf2(vint8m4_t value) { + return __riscv_vlmul_trunc_i8mf2(value); } -vint8m1_t test_vlmul_trunc_v_i8m4_i8m1(vint8m4_t op1) { - return __riscv_vlmul_trunc_i8m1(op1); +vint8m1_t test_vlmul_trunc_v_i8m4_i8m1(vint8m4_t value) { + return __riscv_vlmul_trunc_i8m1(value); } -vint8m2_t test_vlmul_trunc_v_i8m4_i8m2(vint8m4_t op1) { - return __riscv_vlmul_trunc_i8m2(op1); +vint8m2_t test_vlmul_trunc_v_i8m4_i8m2(vint8m4_t value) { + return __riscv_vlmul_trunc_i8m2(value); } -vint8mf8_t test_vlmul_trunc_v_i8m8_i8mf8(vint8m8_t op1) { - return __riscv_vlmul_trunc_i8mf8(op1); +vint8mf8_t test_vlmul_trunc_v_i8m8_i8mf8(vint8m8_t value) { + return __riscv_vlmul_trunc_i8mf8(value); } -vint8mf4_t test_vlmul_trunc_v_i8m8_i8mf4(vint8m8_t op1) { - return __riscv_vlmul_trunc_i8mf4(op1); +vint8mf4_t test_vlmul_trunc_v_i8m8_i8mf4(vint8m8_t value) { + return __riscv_vlmul_trunc_i8mf4(value); } -vint8mf2_t test_vlmul_trunc_v_i8m8_i8mf2(vint8m8_t op1) { - return __riscv_vlmul_trunc_i8mf2(op1); +vint8mf2_t test_vlmul_trunc_v_i8m8_i8mf2(vint8m8_t value) { + return __riscv_vlmul_trunc_i8mf2(value); } -vint8m1_t test_vlmul_trunc_v_i8m8_i8m1(vint8m8_t op1) { - return __riscv_vlmul_trunc_i8m1(op1); +vint8m1_t test_vlmul_trunc_v_i8m8_i8m1(vint8m8_t value) { + return __riscv_vlmul_trunc_i8m1(value); } -vint8m2_t test_vlmul_trunc_v_i8m8_i8m2(vint8m8_t op1) { - return __riscv_vlmul_trunc_i8m2(op1); +vint8m2_t test_vlmul_trunc_v_i8m8_i8m2(vint8m8_t value) { + return __riscv_vlmul_trunc_i8m2(value); } -vint8m4_t test_vlmul_trunc_v_i8m8_i8m4(vint8m8_t op1) { - return __riscv_vlmul_trunc_i8m4(op1); +vint8m4_t test_vlmul_trunc_v_i8m8_i8m4(vint8m8_t value) { + return __riscv_vlmul_trunc_i8m4(value); } -vint16mf4_t test_vlmul_trunc_v_i16mf2_i16mf4(vint16mf2_t op1) { - return __riscv_vlmul_trunc_i16mf4(op1); +vint16mf4_t test_vlmul_trunc_v_i16mf2_i16mf4(vint16mf2_t value) { + return __riscv_vlmul_trunc_i16mf4(value); } -vint16mf4_t test_vlmul_trunc_v_i16m1_i16mf4(vint16m1_t op1) { - return __riscv_vlmul_trunc_i16mf4(op1); +vint16mf4_t test_vlmul_trunc_v_i16m1_i16mf4(vint16m1_t value) { + return __riscv_vlmul_trunc_i16mf4(value); } -vint16mf2_t test_vlmul_trunc_v_i16m1_i16mf2(vint16m1_t op1) { - return __riscv_vlmul_trunc_i16mf2(op1); +vint16mf2_t test_vlmul_trunc_v_i16m1_i16mf2(vint16m1_t value) { + return __riscv_vlmul_trunc_i16mf2(value); } -vint16mf4_t test_vlmul_trunc_v_i16m2_i16mf4(vint16m2_t op1) { - return __riscv_vlmul_trunc_i16mf4(op1); +vint16mf4_t test_vlmul_trunc_v_i16m2_i16mf4(vint16m2_t value) { + return __riscv_vlmul_trunc_i16mf4(value); } -vint16mf2_t test_vlmul_trunc_v_i16m2_i16mf2(vint16m2_t op1) { - return __riscv_vlmul_trunc_i16mf2(op1); +vint16mf2_t test_vlmul_trunc_v_i16m2_i16mf2(vint16m2_t value) { + return __riscv_vlmul_trunc_i16mf2(value); } -vint16m1_t test_vlmul_trunc_v_i16m2_i16m1(vint16m2_t op1) { - return __riscv_vlmul_trunc_i16m1(op1); +vint16m1_t test_vlmul_trunc_v_i16m2_i16m1(vint16m2_t value) { + return __riscv_vlmul_trunc_i16m1(value); } -vint16mf4_t test_vlmul_trunc_v_i16m4_i16mf4(vint16m4_t op1) { - return __riscv_vlmul_trunc_i16mf4(op1); +vint16mf4_t test_vlmul_trunc_v_i16m4_i16mf4(vint16m4_t value) { + return __riscv_vlmul_trunc_i16mf4(value); } -vint16mf2_t test_vlmul_trunc_v_i16m4_i16mf2(vint16m4_t op1) { - return __riscv_vlmul_trunc_i16mf2(op1); +vint16mf2_t test_vlmul_trunc_v_i16m4_i16mf2(vint16m4_t value) { + return __riscv_vlmul_trunc_i16mf2(value); } -vint16m1_t test_vlmul_trunc_v_i16m4_i16m1(vint16m4_t op1) { - return __riscv_vlmul_trunc_i16m1(op1); +vint16m1_t test_vlmul_trunc_v_i16m4_i16m1(vint16m4_t value) { + return __riscv_vlmul_trunc_i16m1(value); } -vint16m2_t test_vlmul_trunc_v_i16m4_i16m2(vint16m4_t op1) { - return __riscv_vlmul_trunc_i16m2(op1); +vint16m2_t test_vlmul_trunc_v_i16m4_i16m2(vint16m4_t value) { + return __riscv_vlmul_trunc_i16m2(value); } -vint16mf4_t test_vlmul_trunc_v_i16m8_i16mf4(vint16m8_t op1) { - return __riscv_vlmul_trunc_i16mf4(op1); +vint16mf4_t test_vlmul_trunc_v_i16m8_i16mf4(vint16m8_t value) { + return __riscv_vlmul_trunc_i16mf4(value); } -vint16mf2_t test_vlmul_trunc_v_i16m8_i16mf2(vint16m8_t op1) { - return __riscv_vlmul_trunc_i16mf2(op1); +vint16mf2_t test_vlmul_trunc_v_i16m8_i16mf2(vint16m8_t value) { + return __riscv_vlmul_trunc_i16mf2(value); } -vint16m1_t test_vlmul_trunc_v_i16m8_i16m1(vint16m8_t op1) { - return __riscv_vlmul_trunc_i16m1(op1); +vint16m1_t test_vlmul_trunc_v_i16m8_i16m1(vint16m8_t value) { + return __riscv_vlmul_trunc_i16m1(value); } -vint16m2_t test_vlmul_trunc_v_i16m8_i16m2(vint16m8_t op1) { - return __riscv_vlmul_trunc_i16m2(op1); +vint16m2_t test_vlmul_trunc_v_i16m8_i16m2(vint16m8_t value) { + return __riscv_vlmul_trunc_i16m2(value); } -vint16m4_t test_vlmul_trunc_v_i16m8_i16m4(vint16m8_t op1) { - return __riscv_vlmul_trunc_i16m4(op1); +vint16m4_t test_vlmul_trunc_v_i16m8_i16m4(vint16m8_t value) { + return __riscv_vlmul_trunc_i16m4(value); } -vint32mf2_t test_vlmul_trunc_v_i32m1_i32mf2(vint32m1_t op1) { - return __riscv_vlmul_trunc_i32mf2(op1); +vint32mf2_t test_vlmul_trunc_v_i32m1_i32mf2(vint32m1_t value) { + return __riscv_vlmul_trunc_i32mf2(value); } -vint32mf2_t test_vlmul_trunc_v_i32m2_i32mf2(vint32m2_t op1) { - return __riscv_vlmul_trunc_i32mf2(op1); +vint32mf2_t test_vlmul_trunc_v_i32m2_i32mf2(vint32m2_t value) { + return __riscv_vlmul_trunc_i32mf2(value); } -vint32m1_t test_vlmul_trunc_v_i32m2_i32m1(vint32m2_t op1) { - return __riscv_vlmul_trunc_i32m1(op1); +vint32m1_t test_vlmul_trunc_v_i32m2_i32m1(vint32m2_t value) { + return __riscv_vlmul_trunc_i32m1(value); } -vint32mf2_t test_vlmul_trunc_v_i32m4_i32mf2(vint32m4_t op1) { - return __riscv_vlmul_trunc_i32mf2(op1); +vint32mf2_t test_vlmul_trunc_v_i32m4_i32mf2(vint32m4_t value) { + return __riscv_vlmul_trunc_i32mf2(value); } -vint32m1_t test_vlmul_trunc_v_i32m4_i32m1(vint32m4_t op1) { - return __riscv_vlmul_trunc_i32m1(op1); +vint32m1_t test_vlmul_trunc_v_i32m4_i32m1(vint32m4_t value) { + return __riscv_vlmul_trunc_i32m1(value); } -vint32m2_t test_vlmul_trunc_v_i32m4_i32m2(vint32m4_t op1) { - return __riscv_vlmul_trunc_i32m2(op1); +vint32m2_t test_vlmul_trunc_v_i32m4_i32m2(vint32m4_t value) { + return __riscv_vlmul_trunc_i32m2(value); } -vint32mf2_t test_vlmul_trunc_v_i32m8_i32mf2(vint32m8_t op1) { - return __riscv_vlmul_trunc_i32mf2(op1); +vint32mf2_t test_vlmul_trunc_v_i32m8_i32mf2(vint32m8_t value) { + return __riscv_vlmul_trunc_i32mf2(value); } -vint32m1_t test_vlmul_trunc_v_i32m8_i32m1(vint32m8_t op1) { - return __riscv_vlmul_trunc_i32m1(op1); +vint32m1_t test_vlmul_trunc_v_i32m8_i32m1(vint32m8_t value) { + return __riscv_vlmul_trunc_i32m1(value); } -vint32m2_t test_vlmul_trunc_v_i32m8_i32m2(vint32m8_t op1) { - return __riscv_vlmul_trunc_i32m2(op1); +vint32m2_t test_vlmul_trunc_v_i32m8_i32m2(vint32m8_t value) { + return __riscv_vlmul_trunc_i32m2(value); } -vint32m4_t test_vlmul_trunc_v_i32m8_i32m4(vint32m8_t op1) { - return __riscv_vlmul_trunc_i32m4(op1); +vint32m4_t test_vlmul_trunc_v_i32m8_i32m4(vint32m8_t value) { + return __riscv_vlmul_trunc_i32m4(value); } -vint64m1_t test_vlmul_trunc_v_i64m2_i64m1(vint64m2_t op1) { - return __riscv_vlmul_trunc_i64m1(op1); +vint64m1_t test_vlmul_trunc_v_i64m2_i64m1(vint64m2_t value) { + return __riscv_vlmul_trunc_i64m1(value); } -vint64m1_t test_vlmul_trunc_v_i64m4_i64m1(vint64m4_t op1) { - return __riscv_vlmul_trunc_i64m1(op1); +vint64m1_t test_vlmul_trunc_v_i64m4_i64m1(vint64m4_t value) { + return __riscv_vlmul_trunc_i64m1(value); } -vint64m2_t test_vlmul_trunc_v_i64m4_i64m2(vint64m4_t op1) { - return __riscv_vlmul_trunc_i64m2(op1); +vint64m2_t test_vlmul_trunc_v_i64m4_i64m2(vint64m4_t value) { + return __riscv_vlmul_trunc_i64m2(value); } -vint64m1_t test_vlmul_trunc_v_i64m8_i64m1(vint64m8_t op1) { - return __riscv_vlmul_trunc_i64m1(op1); +vint64m1_t test_vlmul_trunc_v_i64m8_i64m1(vint64m8_t value) { + return __riscv_vlmul_trunc_i64m1(value); } -vint64m2_t test_vlmul_trunc_v_i64m8_i64m2(vint64m8_t op1) { - return __riscv_vlmul_trunc_i64m2(op1); +vint64m2_t test_vlmul_trunc_v_i64m8_i64m2(vint64m8_t value) { + return __riscv_vlmul_trunc_i64m2(value); } -vint64m4_t test_vlmul_trunc_v_i64m8_i64m4(vint64m8_t op1) { - return __riscv_vlmul_trunc_i64m4(op1); +vint64m4_t test_vlmul_trunc_v_i64m8_i64m4(vint64m8_t value) { + return __riscv_vlmul_trunc_i64m4(value); } -vuint8mf8_t test_vlmul_trunc_v_u8mf4_u8mf8(vuint8mf4_t op1) { - return __riscv_vlmul_trunc_u8mf8(op1); +vuint8mf8_t test_vlmul_trunc_v_u8mf4_u8mf8(vuint8mf4_t value) { + return __riscv_vlmul_trunc_u8mf8(value); } -vuint8mf8_t test_vlmul_trunc_v_u8mf2_u8mf8(vuint8mf2_t op1) { - return __riscv_vlmul_trunc_u8mf8(op1); +vuint8mf8_t test_vlmul_trunc_v_u8mf2_u8mf8(vuint8mf2_t value) { + return __riscv_vlmul_trunc_u8mf8(value); } -vuint8mf4_t test_vlmul_trunc_v_u8mf2_u8mf4(vuint8mf2_t op1) { - return __riscv_vlmul_trunc_u8mf4(op1); +vuint8mf4_t test_vlmul_trunc_v_u8mf2_u8mf4(vuint8mf2_t value) { + return __riscv_vlmul_trunc_u8mf4(value); } -vuint8mf8_t test_vlmul_trunc_v_u8m1_u8mf8(vuint8m1_t op1) { - return __riscv_vlmul_trunc_u8mf8(op1); +vuint8mf8_t test_vlmul_trunc_v_u8m1_u8mf8(vuint8m1_t value) { + return __riscv_vlmul_trunc_u8mf8(value); } -vuint8mf4_t test_vlmul_trunc_v_u8m1_u8mf4(vuint8m1_t op1) { - return __riscv_vlmul_trunc_u8mf4(op1); +vuint8mf4_t test_vlmul_trunc_v_u8m1_u8mf4(vuint8m1_t value) { + return __riscv_vlmul_trunc_u8mf4(value); } -vuint8mf2_t test_vlmul_trunc_v_u8m1_u8mf2(vuint8m1_t op1) { - return __riscv_vlmul_trunc_u8mf2(op1); +vuint8mf2_t test_vlmul_trunc_v_u8m1_u8mf2(vuint8m1_t value) { + return __riscv_vlmul_trunc_u8mf2(value); } -vuint8mf8_t test_vlmul_trunc_v_u8m2_u8mf8(vuint8m2_t op1) { - return __riscv_vlmul_trunc_u8mf8(op1); +vuint8mf8_t test_vlmul_trunc_v_u8m2_u8mf8(vuint8m2_t value) { + return __riscv_vlmul_trunc_u8mf8(value); } -vuint8mf4_t test_vlmul_trunc_v_u8m2_u8mf4(vuint8m2_t op1) { - return __riscv_vlmul_trunc_u8mf4(op1); +vuint8mf4_t test_vlmul_trunc_v_u8m2_u8mf4(vuint8m2_t value) { + return __riscv_vlmul_trunc_u8mf4(value); } -vuint8mf2_t test_vlmul_trunc_v_u8m2_u8mf2(vuint8m2_t op1) { - return __riscv_vlmul_trunc_u8mf2(op1); +vuint8mf2_t test_vlmul_trunc_v_u8m2_u8mf2(vuint8m2_t value) { + return __riscv_vlmul_trunc_u8mf2(value); } -vuint8m1_t test_vlmul_trunc_v_u8m2_u8m1(vuint8m2_t op1) { - return __riscv_vlmul_trunc_u8m1(op1); +vuint8m1_t test_vlmul_trunc_v_u8m2_u8m1(vuint8m2_t value) { + return __riscv_vlmul_trunc_u8m1(value); } -vuint8mf8_t test_vlmul_trunc_v_u8m4_u8mf8(vuint8m4_t op1) { - return __riscv_vlmul_trunc_u8mf8(op1); +vuint8mf8_t test_vlmul_trunc_v_u8m4_u8mf8(vuint8m4_t value) { + return __riscv_vlmul_trunc_u8mf8(value); } -vuint8mf4_t test_vlmul_trunc_v_u8m4_u8mf4(vuint8m4_t op1) { - return __riscv_vlmul_trunc_u8mf4(op1); +vuint8mf4_t test_vlmul_trunc_v_u8m4_u8mf4(vuint8m4_t value) { + return __riscv_vlmul_trunc_u8mf4(value); } -vuint8mf2_t test_vlmul_trunc_v_u8m4_u8mf2(vuint8m4_t op1) { - return __riscv_vlmul_trunc_u8mf2(op1); +vuint8mf2_t test_vlmul_trunc_v_u8m4_u8mf2(vuint8m4_t value) { + return __riscv_vlmul_trunc_u8mf2(value); } -vuint8m1_t test_vlmul_trunc_v_u8m4_u8m1(vuint8m4_t op1) { - return __riscv_vlmul_trunc_u8m1(op1); +vuint8m1_t test_vlmul_trunc_v_u8m4_u8m1(vuint8m4_t value) { + return __riscv_vlmul_trunc_u8m1(value); } -vuint8m2_t test_vlmul_trunc_v_u8m4_u8m2(vuint8m4_t op1) { - return __riscv_vlmul_trunc_u8m2(op1); +vuint8m2_t test_vlmul_trunc_v_u8m4_u8m2(vuint8m4_t value) { + return __riscv_vlmul_trunc_u8m2(value); } -vuint8mf8_t test_vlmul_trunc_v_u8m8_u8mf8(vuint8m8_t op1) { - return __riscv_vlmul_trunc_u8mf8(op1); +vuint8mf8_t test_vlmul_trunc_v_u8m8_u8mf8(vuint8m8_t value) { + return __riscv_vlmul_trunc_u8mf8(value); } -vuint8mf4_t test_vlmul_trunc_v_u8m8_u8mf4(vuint8m8_t op1) { - return __riscv_vlmul_trunc_u8mf4(op1); +vuint8mf4_t test_vlmul_trunc_v_u8m8_u8mf4(vuint8m8_t value) { + return __riscv_vlmul_trunc_u8mf4(value); } -vuint8mf2_t test_vlmul_trunc_v_u8m8_u8mf2(vuint8m8_t op1) { - return __riscv_vlmul_trunc_u8mf2(op1); +vuint8mf2_t test_vlmul_trunc_v_u8m8_u8mf2(vuint8m8_t value) { + return __riscv_vlmul_trunc_u8mf2(value); } -vuint8m1_t test_vlmul_trunc_v_u8m8_u8m1(vuint8m8_t op1) { - return __riscv_vlmul_trunc_u8m1(op1); +vuint8m1_t test_vlmul_trunc_v_u8m8_u8m1(vuint8m8_t value) { + return __riscv_vlmul_trunc_u8m1(value); } -vuint8m2_t test_vlmul_trunc_v_u8m8_u8m2(vuint8m8_t op1) { - return __riscv_vlmul_trunc_u8m2(op1); +vuint8m2_t test_vlmul_trunc_v_u8m8_u8m2(vuint8m8_t value) { + return __riscv_vlmul_trunc_u8m2(value); } -vuint8m4_t test_vlmul_trunc_v_u8m8_u8m4(vuint8m8_t op1) { - return __riscv_vlmul_trunc_u8m4(op1); +vuint8m4_t test_vlmul_trunc_v_u8m8_u8m4(vuint8m8_t value) { + return __riscv_vlmul_trunc_u8m4(value); } -vuint16mf4_t test_vlmul_trunc_v_u16mf2_u16mf4(vuint16mf2_t op1) { - return __riscv_vlmul_trunc_u16mf4(op1); +vuint16mf4_t test_vlmul_trunc_v_u16mf2_u16mf4(vuint16mf2_t value) { + return __riscv_vlmul_trunc_u16mf4(value); } -vuint16mf4_t test_vlmul_trunc_v_u16m1_u16mf4(vuint16m1_t op1) { - return __riscv_vlmul_trunc_u16mf4(op1); +vuint16mf4_t test_vlmul_trunc_v_u16m1_u16mf4(vuint16m1_t value) { + return __riscv_vlmul_trunc_u16mf4(value); } -vuint16mf2_t test_vlmul_trunc_v_u16m1_u16mf2(vuint16m1_t op1) { - return __riscv_vlmul_trunc_u16mf2(op1); +vuint16mf2_t test_vlmul_trunc_v_u16m1_u16mf2(vuint16m1_t value) { + return __riscv_vlmul_trunc_u16mf2(value); } -vuint16mf4_t test_vlmul_trunc_v_u16m2_u16mf4(vuint16m2_t op1) { - return __riscv_vlmul_trunc_u16mf4(op1); +vuint16mf4_t test_vlmul_trunc_v_u16m2_u16mf4(vuint16m2_t value) { + return __riscv_vlmul_trunc_u16mf4(value); } -vuint16mf2_t test_vlmul_trunc_v_u16m2_u16mf2(vuint16m2_t op1) { - return __riscv_vlmul_trunc_u16mf2(op1); +vuint16mf2_t test_vlmul_trunc_v_u16m2_u16mf2(vuint16m2_t value) { + return __riscv_vlmul_trunc_u16mf2(value); } -vuint16m1_t test_vlmul_trunc_v_u16m2_u16m1(vuint16m2_t op1) { - return __riscv_vlmul_trunc_u16m1(op1); +vuint16m1_t test_vlmul_trunc_v_u16m2_u16m1(vuint16m2_t value) { + return __riscv_vlmul_trunc_u16m1(value); } -vuint16mf4_t test_vlmul_trunc_v_u16m4_u16mf4(vuint16m4_t op1) { - return __riscv_vlmul_trunc_u16mf4(op1); +vuint16mf4_t test_vlmul_trunc_v_u16m4_u16mf4(vuint16m4_t value) { + return __riscv_vlmul_trunc_u16mf4(value); } -vuint16mf2_t test_vlmul_trunc_v_u16m4_u16mf2(vuint16m4_t op1) { - return __riscv_vlmul_trunc_u16mf2(op1); +vuint16mf2_t test_vlmul_trunc_v_u16m4_u16mf2(vuint16m4_t value) { + return __riscv_vlmul_trunc_u16mf2(value); } -vuint16m1_t test_vlmul_trunc_v_u16m4_u16m1(vuint16m4_t op1) { - return __riscv_vlmul_trunc_u16m1(op1); +vuint16m1_t test_vlmul_trunc_v_u16m4_u16m1(vuint16m4_t value) { + return __riscv_vlmul_trunc_u16m1(value); } -vuint16m2_t test_vlmul_trunc_v_u16m4_u16m2(vuint16m4_t op1) { - return __riscv_vlmul_trunc_u16m2(op1); +vuint16m2_t test_vlmul_trunc_v_u16m4_u16m2(vuint16m4_t value) { + return __riscv_vlmul_trunc_u16m2(value); } -vuint16mf4_t test_vlmul_trunc_v_u16m8_u16mf4(vuint16m8_t op1) { - return __riscv_vlmul_trunc_u16mf4(op1); +vuint16mf4_t test_vlmul_trunc_v_u16m8_u16mf4(vuint16m8_t value) { + return __riscv_vlmul_trunc_u16mf4(value); } -vuint16mf2_t test_vlmul_trunc_v_u16m8_u16mf2(vuint16m8_t op1) { - return __riscv_vlmul_trunc_u16mf2(op1); +vuint16mf2_t test_vlmul_trunc_v_u16m8_u16mf2(vuint16m8_t value) { + return __riscv_vlmul_trunc_u16mf2(value); } -vuint16m1_t test_vlmul_trunc_v_u16m8_u16m1(vuint16m8_t op1) { - return __riscv_vlmul_trunc_u16m1(op1); +vuint16m1_t test_vlmul_trunc_v_u16m8_u16m1(vuint16m8_t value) { + return __riscv_vlmul_trunc_u16m1(value); } -vuint16m2_t test_vlmul_trunc_v_u16m8_u16m2(vuint16m8_t op1) { - return __riscv_vlmul_trunc_u16m2(op1); +vuint16m2_t test_vlmul_trunc_v_u16m8_u16m2(vuint16m8_t value) { + return __riscv_vlmul_trunc_u16m2(value); } -vuint16m4_t test_vlmul_trunc_v_u16m8_u16m4(vuint16m8_t op1) { - return __riscv_vlmul_trunc_u16m4(op1); +vuint16m4_t test_vlmul_trunc_v_u16m8_u16m4(vuint16m8_t value) { + return __riscv_vlmul_trunc_u16m4(value); } -vuint32mf2_t test_vlmul_trunc_v_u32m1_u32mf2(vuint32m1_t op1) { - return __riscv_vlmul_trunc_u32mf2(op1); +vuint32mf2_t test_vlmul_trunc_v_u32m1_u32mf2(vuint32m1_t value) { + return __riscv_vlmul_trunc_u32mf2(value); } -vuint32mf2_t test_vlmul_trunc_v_u32m2_u32mf2(vuint32m2_t op1) { - return __riscv_vlmul_trunc_u32mf2(op1); +vuint32mf2_t test_vlmul_trunc_v_u32m2_u32mf2(vuint32m2_t value) { + return __riscv_vlmul_trunc_u32mf2(value); } -vuint32m1_t test_vlmul_trunc_v_u32m2_u32m1(vuint32m2_t op1) { - return __riscv_vlmul_trunc_u32m1(op1); +vuint32m1_t test_vlmul_trunc_v_u32m2_u32m1(vuint32m2_t value) { + return __riscv_vlmul_trunc_u32m1(value); } -vuint32mf2_t test_vlmul_trunc_v_u32m4_u32mf2(vuint32m4_t op1) { - return __riscv_vlmul_trunc_u32mf2(op1); +vuint32mf2_t test_vlmul_trunc_v_u32m4_u32mf2(vuint32m4_t value) { + return __riscv_vlmul_trunc_u32mf2(value); } -vuint32m1_t test_vlmul_trunc_v_u32m4_u32m1(vuint32m4_t op1) { - return __riscv_vlmul_trunc_u32m1(op1); +vuint32m1_t test_vlmul_trunc_v_u32m4_u32m1(vuint32m4_t value) { + return __riscv_vlmul_trunc_u32m1(value); } -vuint32m2_t test_vlmul_trunc_v_u32m4_u32m2(vuint32m4_t op1) { - return __riscv_vlmul_trunc_u32m2(op1); +vuint32m2_t test_vlmul_trunc_v_u32m4_u32m2(vuint32m4_t value) { + return __riscv_vlmul_trunc_u32m2(value); } -vuint32mf2_t test_vlmul_trunc_v_u32m8_u32mf2(vuint32m8_t op1) { - return __riscv_vlmul_trunc_u32mf2(op1); +vuint32mf2_t test_vlmul_trunc_v_u32m8_u32mf2(vuint32m8_t value) { + return __riscv_vlmul_trunc_u32mf2(value); } -vuint32m1_t test_vlmul_trunc_v_u32m8_u32m1(vuint32m8_t op1) { - return __riscv_vlmul_trunc_u32m1(op1); +vuint32m1_t test_vlmul_trunc_v_u32m8_u32m1(vuint32m8_t value) { + return __riscv_vlmul_trunc_u32m1(value); } -vuint32m2_t test_vlmul_trunc_v_u32m8_u32m2(vuint32m8_t op1) { - return __riscv_vlmul_trunc_u32m2(op1); +vuint32m2_t test_vlmul_trunc_v_u32m8_u32m2(vuint32m8_t value) { + return __riscv_vlmul_trunc_u32m2(value); } -vuint32m4_t test_vlmul_trunc_v_u32m8_u32m4(vuint32m8_t op1) { - return __riscv_vlmul_trunc_u32m4(op1); +vuint32m4_t test_vlmul_trunc_v_u32m8_u32m4(vuint32m8_t value) { + return __riscv_vlmul_trunc_u32m4(value); } -vuint64m1_t test_vlmul_trunc_v_u64m2_u64m1(vuint64m2_t op1) { - return __riscv_vlmul_trunc_u64m1(op1); +vuint64m1_t test_vlmul_trunc_v_u64m2_u64m1(vuint64m2_t value) { + return __riscv_vlmul_trunc_u64m1(value); } -vuint64m1_t test_vlmul_trunc_v_u64m4_u64m1(vuint64m4_t op1) { - return __riscv_vlmul_trunc_u64m1(op1); +vuint64m1_t test_vlmul_trunc_v_u64m4_u64m1(vuint64m4_t value) { + return __riscv_vlmul_trunc_u64m1(value); } -vuint64m2_t test_vlmul_trunc_v_u64m4_u64m2(vuint64m4_t op1) { - return __riscv_vlmul_trunc_u64m2(op1); +vuint64m2_t test_vlmul_trunc_v_u64m4_u64m2(vuint64m4_t value) { + return __riscv_vlmul_trunc_u64m2(value); } -vuint64m1_t test_vlmul_trunc_v_u64m8_u64m1(vuint64m8_t op1) { - return __riscv_vlmul_trunc_u64m1(op1); +vuint64m1_t test_vlmul_trunc_v_u64m8_u64m1(vuint64m8_t value) { + return __riscv_vlmul_trunc_u64m1(value); } -vuint64m2_t test_vlmul_trunc_v_u64m8_u64m2(vuint64m8_t op1) { - return __riscv_vlmul_trunc_u64m2(op1); +vuint64m2_t test_vlmul_trunc_v_u64m8_u64m2(vuint64m8_t value) { + return __riscv_vlmul_trunc_u64m2(value); } -vuint64m4_t test_vlmul_trunc_v_u64m8_u64m4(vuint64m8_t op1) { - return __riscv_vlmul_trunc_u64m4(op1); +vuint64m4_t test_vlmul_trunc_v_u64m8_u64m4(vuint64m8_t value) { + return __riscv_vlmul_trunc_u64m4(value); } /* { dg-final { scan-assembler-times {vs[1248e][r123468]+\.[ivxfswum.]+\s+[,\sa-x0-9()]+} 135 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vloxei16.c b/auto-generated/gnu-overloaded-tests/vloxei16.c index 27fbaa6cd..fe3cc5320 100644 --- a/auto-generated/gnu-overloaded-tests/vloxei16.c +++ b/auto-generated/gnu-overloaded-tests/vloxei16.c @@ -6,460 +6,460 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vloxei16_v_f16mf4(const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vfloat16mf4_t test_vloxei16_v_f16mf4(const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vfloat16mf2_t test_vloxei16_v_f16mf2(const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vfloat16mf2_t test_vloxei16_v_f16mf2(const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vfloat16m1_t test_vloxei16_v_f16m1(const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vfloat16m1_t test_vloxei16_v_f16m1(const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vfloat16m2_t test_vloxei16_v_f16m2(const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vfloat16m2_t test_vloxei16_v_f16m2(const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vfloat16m4_t test_vloxei16_v_f16m4(const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vfloat16m4_t test_vloxei16_v_f16m4(const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vfloat16m8_t test_vloxei16_v_f16m8(const float16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vfloat16m8_t test_vloxei16_v_f16m8(const float16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vfloat32mf2_t test_vloxei16_v_f32mf2(const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vfloat32mf2_t test_vloxei16_v_f32mf2(const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vfloat32m1_t test_vloxei16_v_f32m1(const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vfloat32m1_t test_vloxei16_v_f32m1(const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vfloat32m2_t test_vloxei16_v_f32m2(const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vfloat32m2_t test_vloxei16_v_f32m2(const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vfloat32m4_t test_vloxei16_v_f32m4(const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vfloat32m4_t test_vloxei16_v_f32m4(const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vfloat32m8_t test_vloxei16_v_f32m8(const float32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vfloat32m8_t test_vloxei16_v_f32m8(const float32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vfloat64m1_t test_vloxei16_v_f64m1(const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vfloat64m1_t test_vloxei16_v_f64m1(const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vfloat64m2_t test_vloxei16_v_f64m2(const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vfloat64m2_t test_vloxei16_v_f64m2(const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vfloat64m4_t test_vloxei16_v_f64m4(const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vfloat64m4_t test_vloxei16_v_f64m4(const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vfloat64m8_t test_vloxei16_v_f64m8(const float64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vfloat64m8_t test_vloxei16_v_f64m8(const float64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vint8mf8_t test_vloxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vint8mf8_t test_vloxei16_v_i8mf8(const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vint8mf4_t test_vloxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vint8mf4_t test_vloxei16_v_i8mf4(const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vint8mf2_t test_vloxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vint8mf2_t test_vloxei16_v_i8mf2(const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vint8m1_t test_vloxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vint8m1_t test_vloxei16_v_i8m1(const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vint8m2_t test_vloxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vint8m2_t test_vloxei16_v_i8m2(const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vint8m4_t test_vloxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vint8m4_t test_vloxei16_v_i8m4(const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vint16mf4_t test_vloxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vint16mf4_t test_vloxei16_v_i16mf4(const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vint16mf2_t test_vloxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vint16mf2_t test_vloxei16_v_i16mf2(const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vint16m1_t test_vloxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vint16m1_t test_vloxei16_v_i16m1(const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vint16m2_t test_vloxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vint16m2_t test_vloxei16_v_i16m2(const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vint16m4_t test_vloxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vint16m4_t test_vloxei16_v_i16m4(const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vint16m8_t test_vloxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vint16m8_t test_vloxei16_v_i16m8(const int16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vint32mf2_t test_vloxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vint32mf2_t test_vloxei16_v_i32mf2(const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vint32m1_t test_vloxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vint32m1_t test_vloxei16_v_i32m1(const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vint32m2_t test_vloxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vint32m2_t test_vloxei16_v_i32m2(const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vint32m4_t test_vloxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vint32m4_t test_vloxei16_v_i32m4(const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vint32m8_t test_vloxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vint32m8_t test_vloxei16_v_i32m8(const int32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vint64m1_t test_vloxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vint64m1_t test_vloxei16_v_i64m1(const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vint64m2_t test_vloxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vint64m2_t test_vloxei16_v_i64m2(const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vint64m4_t test_vloxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vint64m4_t test_vloxei16_v_i64m4(const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vint64m8_t test_vloxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vint64m8_t test_vloxei16_v_i64m8(const int64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vuint8mf8_t test_vloxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vuint8mf8_t test_vloxei16_v_u8mf8(const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vuint8mf4_t test_vloxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vuint8mf4_t test_vloxei16_v_u8mf4(const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vuint8mf2_t test_vloxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vuint8mf2_t test_vloxei16_v_u8mf2(const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vuint8m1_t test_vloxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vuint8m1_t test_vloxei16_v_u8m1(const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vuint8m2_t test_vloxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vuint8m2_t test_vloxei16_v_u8m2(const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vuint8m4_t test_vloxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vuint8m4_t test_vloxei16_v_u8m4(const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vuint16mf4_t test_vloxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vuint16mf4_t test_vloxei16_v_u16mf4(const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vuint16mf2_t test_vloxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vuint16mf2_t test_vloxei16_v_u16mf2(const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vuint16m1_t test_vloxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vuint16m1_t test_vloxei16_v_u16m1(const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vuint16m2_t test_vloxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vuint16m2_t test_vloxei16_v_u16m2(const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vuint16m4_t test_vloxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vuint16m4_t test_vloxei16_v_u16m4(const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vuint16m8_t test_vloxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vuint16m8_t test_vloxei16_v_u16m8(const uint16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vuint32mf2_t test_vloxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vuint32mf2_t test_vloxei16_v_u32mf2(const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vuint32m1_t test_vloxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vuint32m1_t test_vloxei16_v_u32m1(const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vuint32m2_t test_vloxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vuint32m2_t test_vloxei16_v_u32m2(const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vuint32m4_t test_vloxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vuint32m4_t test_vloxei16_v_u32m4(const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vuint32m8_t test_vloxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vuint32m8_t test_vloxei16_v_u32m8(const uint32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vuint64m1_t test_vloxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vuint64m1_t test_vloxei16_v_u64m1(const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vuint64m2_t test_vloxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vuint64m2_t test_vloxei16_v_u64m2(const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vuint64m4_t test_vloxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vuint64m4_t test_vloxei16_v_u64m4(const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vuint64m8_t test_vloxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16(base, bindex, vl); +vuint64m8_t test_vloxei16_v_u64m8(const uint64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); } -vfloat16mf4_t test_vloxei16_v_f16mf4_m(vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vfloat16mf4_t test_vloxei16_v_f16mf4_m(vbool64_t vm, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei16_v_f16mf2_m(vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vfloat16mf2_t test_vloxei16_v_f16mf2_m(vbool32_t vm, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vfloat16m1_t test_vloxei16_v_f16m1_m(vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vfloat16m1_t test_vloxei16_v_f16m1_m(vbool16_t vm, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vfloat16m2_t test_vloxei16_v_f16m2_m(vbool8_t mask, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vfloat16m2_t test_vloxei16_v_f16m2_m(vbool8_t vm, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vfloat16m4_t test_vloxei16_v_f16m4_m(vbool4_t mask, const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vfloat16m4_t test_vloxei16_v_f16m4_m(vbool4_t vm, const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vfloat16m8_t test_vloxei16_v_f16m8_m(vbool2_t mask, const float16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vfloat16m8_t test_vloxei16_v_f16m8_m(vbool2_t vm, const float16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei16_v_f32mf2_m(vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vfloat32mf2_t test_vloxei16_v_f32mf2_m(vbool64_t vm, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vfloat32m1_t test_vloxei16_v_f32m1_m(vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vfloat32m1_t test_vloxei16_v_f32m1_m(vbool32_t vm, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vfloat32m2_t test_vloxei16_v_f32m2_m(vbool16_t mask, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vfloat32m2_t test_vloxei16_v_f32m2_m(vbool16_t vm, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vfloat32m4_t test_vloxei16_v_f32m4_m(vbool8_t mask, const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vfloat32m4_t test_vloxei16_v_f32m4_m(vbool8_t vm, const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vfloat32m8_t test_vloxei16_v_f32m8_m(vbool4_t mask, const float32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vfloat32m8_t test_vloxei16_v_f32m8_m(vbool4_t vm, const float32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vfloat64m1_t test_vloxei16_v_f64m1_m(vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vfloat64m1_t test_vloxei16_v_f64m1_m(vbool64_t vm, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vfloat64m2_t test_vloxei16_v_f64m2_m(vbool32_t mask, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vfloat64m2_t test_vloxei16_v_f64m2_m(vbool32_t vm, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vfloat64m4_t test_vloxei16_v_f64m4_m(vbool16_t mask, const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vfloat64m4_t test_vloxei16_v_f64m4_m(vbool16_t vm, const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vfloat64m8_t test_vloxei16_v_f64m8_m(vbool8_t mask, const float64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vfloat64m8_t test_vloxei16_v_f64m8_m(vbool8_t vm, const float64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vint8mf8_t test_vloxei16_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vint8mf8_t test_vloxei16_v_i8mf8_m(vbool64_t vm, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vint8mf4_t test_vloxei16_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vint8mf4_t test_vloxei16_v_i8mf4_m(vbool32_t vm, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vint8mf2_t test_vloxei16_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vint8mf2_t test_vloxei16_v_i8mf2_m(vbool16_t vm, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vint8m1_t test_vloxei16_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vint8m1_t test_vloxei16_v_i8m1_m(vbool8_t vm, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vint8m2_t test_vloxei16_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vint8m2_t test_vloxei16_v_i8m2_m(vbool4_t vm, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vint8m4_t test_vloxei16_v_i8m4_m(vbool2_t mask, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vint8m4_t test_vloxei16_v_i8m4_m(vbool2_t vm, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vint16mf4_t test_vloxei16_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vint16mf4_t test_vloxei16_v_i16mf4_m(vbool64_t vm, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vint16mf2_t test_vloxei16_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vint16mf2_t test_vloxei16_v_i16mf2_m(vbool32_t vm, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vint16m1_t test_vloxei16_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vint16m1_t test_vloxei16_v_i16m1_m(vbool16_t vm, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vint16m2_t test_vloxei16_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vint16m2_t test_vloxei16_v_i16m2_m(vbool8_t vm, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vint16m4_t test_vloxei16_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vint16m4_t test_vloxei16_v_i16m4_m(vbool4_t vm, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vint16m8_t test_vloxei16_v_i16m8_m(vbool2_t mask, const int16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vint16m8_t test_vloxei16_v_i16m8_m(vbool2_t vm, const int16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vint32mf2_t test_vloxei16_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vint32mf2_t test_vloxei16_v_i32mf2_m(vbool64_t vm, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vint32m1_t test_vloxei16_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vint32m1_t test_vloxei16_v_i32m1_m(vbool32_t vm, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vint32m2_t test_vloxei16_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vint32m2_t test_vloxei16_v_i32m2_m(vbool16_t vm, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vint32m4_t test_vloxei16_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vint32m4_t test_vloxei16_v_i32m4_m(vbool8_t vm, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vint32m8_t test_vloxei16_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vint32m8_t test_vloxei16_v_i32m8_m(vbool4_t vm, const int32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vint64m1_t test_vloxei16_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vint64m1_t test_vloxei16_v_i64m1_m(vbool64_t vm, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vint64m2_t test_vloxei16_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vint64m2_t test_vloxei16_v_i64m2_m(vbool32_t vm, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vint64m4_t test_vloxei16_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vint64m4_t test_vloxei16_v_i64m4_m(vbool16_t vm, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vint64m8_t test_vloxei16_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vint64m8_t test_vloxei16_v_i64m8_m(vbool8_t vm, const int64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vuint8mf8_t test_vloxei16_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vuint8mf8_t test_vloxei16_v_u8mf8_m(vbool64_t vm, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vuint8mf4_t test_vloxei16_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vuint8mf4_t test_vloxei16_v_u8mf4_m(vbool32_t vm, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vuint8mf2_t test_vloxei16_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vuint8mf2_t test_vloxei16_v_u8mf2_m(vbool16_t vm, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vuint8m1_t test_vloxei16_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vuint8m1_t test_vloxei16_v_u8m1_m(vbool8_t vm, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vuint8m2_t test_vloxei16_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vuint8m2_t test_vloxei16_v_u8m2_m(vbool4_t vm, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vuint8m4_t test_vloxei16_v_u8m4_m(vbool2_t mask, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vuint8m4_t test_vloxei16_v_u8m4_m(vbool2_t vm, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vuint16mf4_t test_vloxei16_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vuint16mf4_t test_vloxei16_v_u16mf4_m(vbool64_t vm, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vuint16mf2_t test_vloxei16_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vuint16mf2_t test_vloxei16_v_u16mf2_m(vbool32_t vm, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vuint16m1_t test_vloxei16_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vuint16m1_t test_vloxei16_v_u16m1_m(vbool16_t vm, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vuint16m2_t test_vloxei16_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vuint16m2_t test_vloxei16_v_u16m2_m(vbool8_t vm, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vuint16m4_t test_vloxei16_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vuint16m4_t test_vloxei16_v_u16m4_m(vbool4_t vm, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vuint16m8_t test_vloxei16_v_u16m8_m(vbool2_t mask, const uint16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vuint16m8_t test_vloxei16_v_u16m8_m(vbool2_t vm, const uint16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vuint32mf2_t test_vloxei16_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vuint32mf2_t test_vloxei16_v_u32mf2_m(vbool64_t vm, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vuint32m1_t test_vloxei16_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vuint32m1_t test_vloxei16_v_u32m1_m(vbool32_t vm, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vuint32m2_t test_vloxei16_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vuint32m2_t test_vloxei16_v_u32m2_m(vbool16_t vm, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vuint32m4_t test_vloxei16_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vuint32m4_t test_vloxei16_v_u32m4_m(vbool8_t vm, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vuint32m8_t test_vloxei16_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vuint32m8_t test_vloxei16_v_u32m8_m(vbool4_t vm, const uint32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vuint64m1_t test_vloxei16_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vuint64m1_t test_vloxei16_v_u64m1_m(vbool64_t vm, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vuint64m2_t test_vloxei16_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vuint64m2_t test_vloxei16_v_u64m2_m(vbool32_t vm, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vuint64m4_t test_vloxei16_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vuint64m4_t test_vloxei16_v_u64m4_m(vbool16_t vm, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } -vuint64m8_t test_vloxei16_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16(mask, base, bindex, vl); +vuint64m8_t test_vloxei16_v_u64m8_m(vbool8_t vm, const uint64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxei16\.[ivxfswum.]+\s+} 114 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vloxei32.c b/auto-generated/gnu-overloaded-tests/vloxei32.c index 499721535..ebba62ccd 100644 --- a/auto-generated/gnu-overloaded-tests/vloxei32.c +++ b/auto-generated/gnu-overloaded-tests/vloxei32.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vloxei32_v_f16mf4(const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vfloat16mf4_t test_vloxei32_v_f16mf4(const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vfloat16mf2_t test_vloxei32_v_f16mf2(const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vfloat16mf2_t test_vloxei32_v_f16mf2(const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vfloat16m1_t test_vloxei32_v_f16m1(const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vfloat16m1_t test_vloxei32_v_f16m1(const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vfloat16m2_t test_vloxei32_v_f16m2(const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vfloat16m2_t test_vloxei32_v_f16m2(const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vfloat16m4_t test_vloxei32_v_f16m4(const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vfloat16m4_t test_vloxei32_v_f16m4(const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vfloat32mf2_t test_vloxei32_v_f32mf2(const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vfloat32mf2_t test_vloxei32_v_f32mf2(const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vfloat32m1_t test_vloxei32_v_f32m1(const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vfloat32m1_t test_vloxei32_v_f32m1(const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vfloat32m2_t test_vloxei32_v_f32m2(const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vfloat32m2_t test_vloxei32_v_f32m2(const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vfloat32m4_t test_vloxei32_v_f32m4(const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vfloat32m4_t test_vloxei32_v_f32m4(const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vfloat32m8_t test_vloxei32_v_f32m8(const float32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vfloat32m8_t test_vloxei32_v_f32m8(const float32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vfloat64m1_t test_vloxei32_v_f64m1(const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vfloat64m1_t test_vloxei32_v_f64m1(const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vfloat64m2_t test_vloxei32_v_f64m2(const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vfloat64m2_t test_vloxei32_v_f64m2(const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vfloat64m4_t test_vloxei32_v_f64m4(const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vfloat64m4_t test_vloxei32_v_f64m4(const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vfloat64m8_t test_vloxei32_v_f64m8(const float64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vfloat64m8_t test_vloxei32_v_f64m8(const float64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vint8mf8_t test_vloxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vint8mf8_t test_vloxei32_v_i8mf8(const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vint8mf4_t test_vloxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vint8mf4_t test_vloxei32_v_i8mf4(const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vint8mf2_t test_vloxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vint8mf2_t test_vloxei32_v_i8mf2(const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vint8m1_t test_vloxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vint8m1_t test_vloxei32_v_i8m1(const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vint8m2_t test_vloxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vint8m2_t test_vloxei32_v_i8m2(const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vint16mf4_t test_vloxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vint16mf4_t test_vloxei32_v_i16mf4(const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vint16mf2_t test_vloxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vint16mf2_t test_vloxei32_v_i16mf2(const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vint16m1_t test_vloxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vint16m1_t test_vloxei32_v_i16m1(const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vint16m2_t test_vloxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vint16m2_t test_vloxei32_v_i16m2(const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vint16m4_t test_vloxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vint16m4_t test_vloxei32_v_i16m4(const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vint32mf2_t test_vloxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vint32mf2_t test_vloxei32_v_i32mf2(const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vint32m1_t test_vloxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vint32m1_t test_vloxei32_v_i32m1(const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vint32m2_t test_vloxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vint32m2_t test_vloxei32_v_i32m2(const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vint32m4_t test_vloxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vint32m4_t test_vloxei32_v_i32m4(const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vint32m8_t test_vloxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vint32m8_t test_vloxei32_v_i32m8(const int32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vint64m1_t test_vloxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vint64m1_t test_vloxei32_v_i64m1(const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vint64m2_t test_vloxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vint64m2_t test_vloxei32_v_i64m2(const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vint64m4_t test_vloxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vint64m4_t test_vloxei32_v_i64m4(const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vint64m8_t test_vloxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vint64m8_t test_vloxei32_v_i64m8(const int64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vuint8mf8_t test_vloxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vuint8mf8_t test_vloxei32_v_u8mf8(const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vuint8mf4_t test_vloxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vuint8mf4_t test_vloxei32_v_u8mf4(const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vuint8mf2_t test_vloxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vuint8mf2_t test_vloxei32_v_u8mf2(const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vuint8m1_t test_vloxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vuint8m1_t test_vloxei32_v_u8m1(const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vuint8m2_t test_vloxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vuint8m2_t test_vloxei32_v_u8m2(const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vuint16mf4_t test_vloxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vuint16mf4_t test_vloxei32_v_u16mf4(const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vuint16mf2_t test_vloxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vuint16mf2_t test_vloxei32_v_u16mf2(const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vuint16m1_t test_vloxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vuint16m1_t test_vloxei32_v_u16m1(const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vuint16m2_t test_vloxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vuint16m2_t test_vloxei32_v_u16m2(const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vuint16m4_t test_vloxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vuint16m4_t test_vloxei32_v_u16m4(const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vuint32mf2_t test_vloxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vuint32mf2_t test_vloxei32_v_u32mf2(const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vuint32m1_t test_vloxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vuint32m1_t test_vloxei32_v_u32m1(const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vuint32m2_t test_vloxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vuint32m2_t test_vloxei32_v_u32m2(const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vuint32m4_t test_vloxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vuint32m4_t test_vloxei32_v_u32m4(const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vuint32m8_t test_vloxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vuint32m8_t test_vloxei32_v_u32m8(const uint32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vuint64m1_t test_vloxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vuint64m1_t test_vloxei32_v_u64m1(const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vuint64m2_t test_vloxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vuint64m2_t test_vloxei32_v_u64m2(const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vuint64m4_t test_vloxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vuint64m4_t test_vloxei32_v_u64m4(const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vuint64m8_t test_vloxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32(base, bindex, vl); +vuint64m8_t test_vloxei32_v_u64m8(const uint64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32(rs1, rs2, vl); } -vfloat16mf4_t test_vloxei32_v_f16mf4_m(vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vfloat16mf4_t test_vloxei32_v_f16mf4_m(vbool64_t vm, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei32_v_f16mf2_m(vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vfloat16mf2_t test_vloxei32_v_f16mf2_m(vbool32_t vm, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vfloat16m1_t test_vloxei32_v_f16m1_m(vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vfloat16m1_t test_vloxei32_v_f16m1_m(vbool16_t vm, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vfloat16m2_t test_vloxei32_v_f16m2_m(vbool8_t mask, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vfloat16m2_t test_vloxei32_v_f16m2_m(vbool8_t vm, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vfloat16m4_t test_vloxei32_v_f16m4_m(vbool4_t mask, const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vfloat16m4_t test_vloxei32_v_f16m4_m(vbool4_t vm, const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei32_v_f32mf2_m(vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vfloat32mf2_t test_vloxei32_v_f32mf2_m(vbool64_t vm, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vfloat32m1_t test_vloxei32_v_f32m1_m(vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vfloat32m1_t test_vloxei32_v_f32m1_m(vbool32_t vm, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vfloat32m2_t test_vloxei32_v_f32m2_m(vbool16_t mask, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vfloat32m2_t test_vloxei32_v_f32m2_m(vbool16_t vm, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vfloat32m4_t test_vloxei32_v_f32m4_m(vbool8_t mask, const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vfloat32m4_t test_vloxei32_v_f32m4_m(vbool8_t vm, const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vfloat32m8_t test_vloxei32_v_f32m8_m(vbool4_t mask, const float32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vfloat32m8_t test_vloxei32_v_f32m8_m(vbool4_t vm, const float32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vfloat64m1_t test_vloxei32_v_f64m1_m(vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vfloat64m1_t test_vloxei32_v_f64m1_m(vbool64_t vm, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vfloat64m2_t test_vloxei32_v_f64m2_m(vbool32_t mask, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vfloat64m2_t test_vloxei32_v_f64m2_m(vbool32_t vm, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vfloat64m4_t test_vloxei32_v_f64m4_m(vbool16_t mask, const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vfloat64m4_t test_vloxei32_v_f64m4_m(vbool16_t vm, const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vfloat64m8_t test_vloxei32_v_f64m8_m(vbool8_t mask, const float64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vfloat64m8_t test_vloxei32_v_f64m8_m(vbool8_t vm, const float64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vint8mf8_t test_vloxei32_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vint8mf8_t test_vloxei32_v_i8mf8_m(vbool64_t vm, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vint8mf4_t test_vloxei32_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vint8mf4_t test_vloxei32_v_i8mf4_m(vbool32_t vm, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vint8mf2_t test_vloxei32_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vint8mf2_t test_vloxei32_v_i8mf2_m(vbool16_t vm, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vint8m1_t test_vloxei32_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vint8m1_t test_vloxei32_v_i8m1_m(vbool8_t vm, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vint8m2_t test_vloxei32_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vint8m2_t test_vloxei32_v_i8m2_m(vbool4_t vm, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vint16mf4_t test_vloxei32_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vint16mf4_t test_vloxei32_v_i16mf4_m(vbool64_t vm, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vint16mf2_t test_vloxei32_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vint16mf2_t test_vloxei32_v_i16mf2_m(vbool32_t vm, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vint16m1_t test_vloxei32_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vint16m1_t test_vloxei32_v_i16m1_m(vbool16_t vm, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vint16m2_t test_vloxei32_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vint16m2_t test_vloxei32_v_i16m2_m(vbool8_t vm, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vint16m4_t test_vloxei32_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vint16m4_t test_vloxei32_v_i16m4_m(vbool4_t vm, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vint32mf2_t test_vloxei32_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vint32mf2_t test_vloxei32_v_i32mf2_m(vbool64_t vm, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vint32m1_t test_vloxei32_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vint32m1_t test_vloxei32_v_i32m1_m(vbool32_t vm, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vint32m2_t test_vloxei32_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vint32m2_t test_vloxei32_v_i32m2_m(vbool16_t vm, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vint32m4_t test_vloxei32_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vint32m4_t test_vloxei32_v_i32m4_m(vbool8_t vm, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vint32m8_t test_vloxei32_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vint32m8_t test_vloxei32_v_i32m8_m(vbool4_t vm, const int32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vint64m1_t test_vloxei32_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vint64m1_t test_vloxei32_v_i64m1_m(vbool64_t vm, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vint64m2_t test_vloxei32_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vint64m2_t test_vloxei32_v_i64m2_m(vbool32_t vm, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vint64m4_t test_vloxei32_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vint64m4_t test_vloxei32_v_i64m4_m(vbool16_t vm, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vint64m8_t test_vloxei32_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vint64m8_t test_vloxei32_v_i64m8_m(vbool8_t vm, const int64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vuint8mf8_t test_vloxei32_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vuint8mf8_t test_vloxei32_v_u8mf8_m(vbool64_t vm, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vuint8mf4_t test_vloxei32_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vuint8mf4_t test_vloxei32_v_u8mf4_m(vbool32_t vm, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vuint8mf2_t test_vloxei32_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vuint8mf2_t test_vloxei32_v_u8mf2_m(vbool16_t vm, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vuint8m1_t test_vloxei32_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vuint8m1_t test_vloxei32_v_u8m1_m(vbool8_t vm, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vuint8m2_t test_vloxei32_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vuint8m2_t test_vloxei32_v_u8m2_m(vbool4_t vm, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vuint16mf4_t test_vloxei32_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vuint16mf4_t test_vloxei32_v_u16mf4_m(vbool64_t vm, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vuint16mf2_t test_vloxei32_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vuint16mf2_t test_vloxei32_v_u16mf2_m(vbool32_t vm, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vuint16m1_t test_vloxei32_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vuint16m1_t test_vloxei32_v_u16m1_m(vbool16_t vm, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vuint16m2_t test_vloxei32_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vuint16m2_t test_vloxei32_v_u16m2_m(vbool8_t vm, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vuint16m4_t test_vloxei32_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vuint16m4_t test_vloxei32_v_u16m4_m(vbool4_t vm, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vuint32mf2_t test_vloxei32_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vuint32mf2_t test_vloxei32_v_u32mf2_m(vbool64_t vm, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vuint32m1_t test_vloxei32_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vuint32m1_t test_vloxei32_v_u32m1_m(vbool32_t vm, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vuint32m2_t test_vloxei32_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vuint32m2_t test_vloxei32_v_u32m2_m(vbool16_t vm, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vuint32m4_t test_vloxei32_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vuint32m4_t test_vloxei32_v_u32m4_m(vbool8_t vm, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vuint32m8_t test_vloxei32_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vuint32m8_t test_vloxei32_v_u32m8_m(vbool4_t vm, const uint32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vuint64m1_t test_vloxei32_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vuint64m1_t test_vloxei32_v_u64m1_m(vbool64_t vm, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vuint64m2_t test_vloxei32_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vuint64m2_t test_vloxei32_v_u64m2_m(vbool32_t vm, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vuint64m4_t test_vloxei32_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vuint64m4_t test_vloxei32_v_u64m4_m(vbool16_t vm, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } -vuint64m8_t test_vloxei32_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32(mask, base, bindex, vl); +vuint64m8_t test_vloxei32_v_u64m8_m(vbool8_t vm, const uint64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxei32\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vloxei64.c b/auto-generated/gnu-overloaded-tests/vloxei64.c index fb7325423..7edf89032 100644 --- a/auto-generated/gnu-overloaded-tests/vloxei64.c +++ b/auto-generated/gnu-overloaded-tests/vloxei64.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vloxei64_v_f16mf4(const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64(base, bindex, vl); +vfloat16mf4_t test_vloxei64_v_f16mf4(const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64(rs1, rs2, vl); } -vfloat16mf2_t test_vloxei64_v_f16mf2(const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64(base, bindex, vl); +vfloat16mf2_t test_vloxei64_v_f16mf2(const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64(rs1, rs2, vl); } -vfloat16m1_t test_vloxei64_v_f16m1(const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64(base, bindex, vl); +vfloat16m1_t test_vloxei64_v_f16m1(const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64(rs1, rs2, vl); } -vfloat16m2_t test_vloxei64_v_f16m2(const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64(base, bindex, vl); +vfloat16m2_t test_vloxei64_v_f16m2(const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64(rs1, rs2, vl); } -vfloat32mf2_t test_vloxei64_v_f32mf2(const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64(base, bindex, vl); +vfloat32mf2_t test_vloxei64_v_f32mf2(const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64(rs1, rs2, vl); } -vfloat32m1_t test_vloxei64_v_f32m1(const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64(base, bindex, vl); +vfloat32m1_t test_vloxei64_v_f32m1(const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64(rs1, rs2, vl); } -vfloat32m2_t test_vloxei64_v_f32m2(const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64(base, bindex, vl); +vfloat32m2_t test_vloxei64_v_f32m2(const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64(rs1, rs2, vl); } -vfloat32m4_t test_vloxei64_v_f32m4(const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64(base, bindex, vl); +vfloat32m4_t test_vloxei64_v_f32m4(const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64(rs1, rs2, vl); } -vfloat64m1_t test_vloxei64_v_f64m1(const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64(base, bindex, vl); +vfloat64m1_t test_vloxei64_v_f64m1(const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64(rs1, rs2, vl); } -vfloat64m2_t test_vloxei64_v_f64m2(const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64(base, bindex, vl); +vfloat64m2_t test_vloxei64_v_f64m2(const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64(rs1, rs2, vl); } -vfloat64m4_t test_vloxei64_v_f64m4(const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64(base, bindex, vl); +vfloat64m4_t test_vloxei64_v_f64m4(const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64(rs1, rs2, vl); } -vfloat64m8_t test_vloxei64_v_f64m8(const float64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64(base, bindex, vl); +vfloat64m8_t test_vloxei64_v_f64m8(const float64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64(rs1, rs2, vl); } -vint8mf8_t test_vloxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64(base, bindex, vl); +vint8mf8_t test_vloxei64_v_i8mf8(const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64(rs1, rs2, vl); } -vint8mf4_t test_vloxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64(base, bindex, vl); +vint8mf4_t test_vloxei64_v_i8mf4(const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64(rs1, rs2, vl); } -vint8mf2_t test_vloxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64(base, bindex, vl); +vint8mf2_t test_vloxei64_v_i8mf2(const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64(rs1, rs2, vl); } -vint8m1_t test_vloxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64(base, bindex, vl); +vint8m1_t test_vloxei64_v_i8m1(const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64(rs1, rs2, vl); } -vint16mf4_t test_vloxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64(base, bindex, vl); +vint16mf4_t test_vloxei64_v_i16mf4(const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64(rs1, rs2, vl); } -vint16mf2_t test_vloxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64(base, bindex, vl); +vint16mf2_t test_vloxei64_v_i16mf2(const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64(rs1, rs2, vl); } -vint16m1_t test_vloxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64(base, bindex, vl); +vint16m1_t test_vloxei64_v_i16m1(const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64(rs1, rs2, vl); } -vint16m2_t test_vloxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64(base, bindex, vl); +vint16m2_t test_vloxei64_v_i16m2(const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64(rs1, rs2, vl); } -vint32mf2_t test_vloxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64(base, bindex, vl); +vint32mf2_t test_vloxei64_v_i32mf2(const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64(rs1, rs2, vl); } -vint32m1_t test_vloxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64(base, bindex, vl); +vint32m1_t test_vloxei64_v_i32m1(const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64(rs1, rs2, vl); } -vint32m2_t test_vloxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64(base, bindex, vl); +vint32m2_t test_vloxei64_v_i32m2(const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64(rs1, rs2, vl); } -vint32m4_t test_vloxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64(base, bindex, vl); +vint32m4_t test_vloxei64_v_i32m4(const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64(rs1, rs2, vl); } -vint64m1_t test_vloxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64(base, bindex, vl); +vint64m1_t test_vloxei64_v_i64m1(const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64(rs1, rs2, vl); } -vint64m2_t test_vloxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64(base, bindex, vl); +vint64m2_t test_vloxei64_v_i64m2(const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64(rs1, rs2, vl); } -vint64m4_t test_vloxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64(base, bindex, vl); +vint64m4_t test_vloxei64_v_i64m4(const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64(rs1, rs2, vl); } -vint64m8_t test_vloxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64(base, bindex, vl); +vint64m8_t test_vloxei64_v_i64m8(const int64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64(rs1, rs2, vl); } -vuint8mf8_t test_vloxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64(base, bindex, vl); +vuint8mf8_t test_vloxei64_v_u8mf8(const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64(rs1, rs2, vl); } -vuint8mf4_t test_vloxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64(base, bindex, vl); +vuint8mf4_t test_vloxei64_v_u8mf4(const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64(rs1, rs2, vl); } -vuint8mf2_t test_vloxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64(base, bindex, vl); +vuint8mf2_t test_vloxei64_v_u8mf2(const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64(rs1, rs2, vl); } -vuint8m1_t test_vloxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64(base, bindex, vl); +vuint8m1_t test_vloxei64_v_u8m1(const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64(rs1, rs2, vl); } -vuint16mf4_t test_vloxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64(base, bindex, vl); +vuint16mf4_t test_vloxei64_v_u16mf4(const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64(rs1, rs2, vl); } -vuint16mf2_t test_vloxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64(base, bindex, vl); +vuint16mf2_t test_vloxei64_v_u16mf2(const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64(rs1, rs2, vl); } -vuint16m1_t test_vloxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64(base, bindex, vl); +vuint16m1_t test_vloxei64_v_u16m1(const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64(rs1, rs2, vl); } -vuint16m2_t test_vloxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64(base, bindex, vl); +vuint16m2_t test_vloxei64_v_u16m2(const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64(rs1, rs2, vl); } -vuint32mf2_t test_vloxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64(base, bindex, vl); +vuint32mf2_t test_vloxei64_v_u32mf2(const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64(rs1, rs2, vl); } -vuint32m1_t test_vloxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64(base, bindex, vl); +vuint32m1_t test_vloxei64_v_u32m1(const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64(rs1, rs2, vl); } -vuint32m2_t test_vloxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64(base, bindex, vl); +vuint32m2_t test_vloxei64_v_u32m2(const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64(rs1, rs2, vl); } -vuint32m4_t test_vloxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64(base, bindex, vl); +vuint32m4_t test_vloxei64_v_u32m4(const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64(rs1, rs2, vl); } -vuint64m1_t test_vloxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64(base, bindex, vl); +vuint64m1_t test_vloxei64_v_u64m1(const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64(rs1, rs2, vl); } -vuint64m2_t test_vloxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64(base, bindex, vl); +vuint64m2_t test_vloxei64_v_u64m2(const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64(rs1, rs2, vl); } -vuint64m4_t test_vloxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64(base, bindex, vl); +vuint64m4_t test_vloxei64_v_u64m4(const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64(rs1, rs2, vl); } -vuint64m8_t test_vloxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64(base, bindex, vl); +vuint64m8_t test_vloxei64_v_u64m8(const uint64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64(rs1, rs2, vl); } -vfloat16mf4_t test_vloxei64_v_f16mf4_m(vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64(mask, base, bindex, vl); +vfloat16mf4_t test_vloxei64_v_f16mf4_m(vbool64_t vm, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64(vm, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei64_v_f16mf2_m(vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64(mask, base, bindex, vl); +vfloat16mf2_t test_vloxei64_v_f16mf2_m(vbool32_t vm, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64(vm, rs1, rs2, vl); } -vfloat16m1_t test_vloxei64_v_f16m1_m(vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64(mask, base, bindex, vl); +vfloat16m1_t test_vloxei64_v_f16m1_m(vbool16_t vm, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64(vm, rs1, rs2, vl); } -vfloat16m2_t test_vloxei64_v_f16m2_m(vbool8_t mask, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64(mask, base, bindex, vl); +vfloat16m2_t test_vloxei64_v_f16m2_m(vbool8_t vm, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64(vm, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei64_v_f32mf2_m(vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64(mask, base, bindex, vl); +vfloat32mf2_t test_vloxei64_v_f32mf2_m(vbool64_t vm, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64(vm, rs1, rs2, vl); } -vfloat32m1_t test_vloxei64_v_f32m1_m(vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64(mask, base, bindex, vl); +vfloat32m1_t test_vloxei64_v_f32m1_m(vbool32_t vm, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64(vm, rs1, rs2, vl); } -vfloat32m2_t test_vloxei64_v_f32m2_m(vbool16_t mask, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64(mask, base, bindex, vl); +vfloat32m2_t test_vloxei64_v_f32m2_m(vbool16_t vm, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64(vm, rs1, rs2, vl); } -vfloat32m4_t test_vloxei64_v_f32m4_m(vbool8_t mask, const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64(mask, base, bindex, vl); +vfloat32m4_t test_vloxei64_v_f32m4_m(vbool8_t vm, const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64(vm, rs1, rs2, vl); } -vfloat64m1_t test_vloxei64_v_f64m1_m(vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64(mask, base, bindex, vl); +vfloat64m1_t test_vloxei64_v_f64m1_m(vbool64_t vm, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64(vm, rs1, rs2, vl); } -vfloat64m2_t test_vloxei64_v_f64m2_m(vbool32_t mask, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64(mask, base, bindex, vl); +vfloat64m2_t test_vloxei64_v_f64m2_m(vbool32_t vm, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64(vm, rs1, rs2, vl); } -vfloat64m4_t test_vloxei64_v_f64m4_m(vbool16_t mask, const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64(mask, base, bindex, vl); +vfloat64m4_t test_vloxei64_v_f64m4_m(vbool16_t vm, const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64(vm, rs1, rs2, vl); } -vfloat64m8_t test_vloxei64_v_f64m8_m(vbool8_t mask, const float64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64(mask, base, bindex, vl); +vfloat64m8_t test_vloxei64_v_f64m8_m(vbool8_t vm, const float64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64(vm, rs1, rs2, vl); } -vint8mf8_t test_vloxei64_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64(mask, base, bindex, vl); +vint8mf8_t test_vloxei64_v_i8mf8_m(vbool64_t vm, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64(vm, rs1, rs2, vl); } -vint8mf4_t test_vloxei64_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64(mask, base, bindex, vl); +vint8mf4_t test_vloxei64_v_i8mf4_m(vbool32_t vm, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64(vm, rs1, rs2, vl); } -vint8mf2_t test_vloxei64_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64(mask, base, bindex, vl); +vint8mf2_t test_vloxei64_v_i8mf2_m(vbool16_t vm, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64(vm, rs1, rs2, vl); } -vint8m1_t test_vloxei64_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64(mask, base, bindex, vl); +vint8m1_t test_vloxei64_v_i8m1_m(vbool8_t vm, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64(vm, rs1, rs2, vl); } -vint16mf4_t test_vloxei64_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64(mask, base, bindex, vl); +vint16mf4_t test_vloxei64_v_i16mf4_m(vbool64_t vm, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64(vm, rs1, rs2, vl); } -vint16mf2_t test_vloxei64_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64(mask, base, bindex, vl); +vint16mf2_t test_vloxei64_v_i16mf2_m(vbool32_t vm, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64(vm, rs1, rs2, vl); } -vint16m1_t test_vloxei64_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64(mask, base, bindex, vl); +vint16m1_t test_vloxei64_v_i16m1_m(vbool16_t vm, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64(vm, rs1, rs2, vl); } -vint16m2_t test_vloxei64_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64(mask, base, bindex, vl); +vint16m2_t test_vloxei64_v_i16m2_m(vbool8_t vm, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64(vm, rs1, rs2, vl); } -vint32mf2_t test_vloxei64_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64(mask, base, bindex, vl); +vint32mf2_t test_vloxei64_v_i32mf2_m(vbool64_t vm, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64(vm, rs1, rs2, vl); } -vint32m1_t test_vloxei64_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64(mask, base, bindex, vl); +vint32m1_t test_vloxei64_v_i32m1_m(vbool32_t vm, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64(vm, rs1, rs2, vl); } -vint32m2_t test_vloxei64_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64(mask, base, bindex, vl); +vint32m2_t test_vloxei64_v_i32m2_m(vbool16_t vm, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64(vm, rs1, rs2, vl); } -vint32m4_t test_vloxei64_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64(mask, base, bindex, vl); +vint32m4_t test_vloxei64_v_i32m4_m(vbool8_t vm, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64(vm, rs1, rs2, vl); } -vint64m1_t test_vloxei64_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64(mask, base, bindex, vl); +vint64m1_t test_vloxei64_v_i64m1_m(vbool64_t vm, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64(vm, rs1, rs2, vl); } -vint64m2_t test_vloxei64_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64(mask, base, bindex, vl); +vint64m2_t test_vloxei64_v_i64m2_m(vbool32_t vm, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64(vm, rs1, rs2, vl); } -vint64m4_t test_vloxei64_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64(mask, base, bindex, vl); +vint64m4_t test_vloxei64_v_i64m4_m(vbool16_t vm, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64(vm, rs1, rs2, vl); } -vint64m8_t test_vloxei64_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64(mask, base, bindex, vl); +vint64m8_t test_vloxei64_v_i64m8_m(vbool8_t vm, const int64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64(vm, rs1, rs2, vl); } -vuint8mf8_t test_vloxei64_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64(mask, base, bindex, vl); +vuint8mf8_t test_vloxei64_v_u8mf8_m(vbool64_t vm, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64(vm, rs1, rs2, vl); } -vuint8mf4_t test_vloxei64_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64(mask, base, bindex, vl); +vuint8mf4_t test_vloxei64_v_u8mf4_m(vbool32_t vm, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64(vm, rs1, rs2, vl); } -vuint8mf2_t test_vloxei64_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64(mask, base, bindex, vl); +vuint8mf2_t test_vloxei64_v_u8mf2_m(vbool16_t vm, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64(vm, rs1, rs2, vl); } -vuint8m1_t test_vloxei64_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64(mask, base, bindex, vl); +vuint8m1_t test_vloxei64_v_u8m1_m(vbool8_t vm, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64(vm, rs1, rs2, vl); } -vuint16mf4_t test_vloxei64_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64(mask, base, bindex, vl); +vuint16mf4_t test_vloxei64_v_u16mf4_m(vbool64_t vm, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64(vm, rs1, rs2, vl); } -vuint16mf2_t test_vloxei64_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64(mask, base, bindex, vl); +vuint16mf2_t test_vloxei64_v_u16mf2_m(vbool32_t vm, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64(vm, rs1, rs2, vl); } -vuint16m1_t test_vloxei64_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64(mask, base, bindex, vl); +vuint16m1_t test_vloxei64_v_u16m1_m(vbool16_t vm, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64(vm, rs1, rs2, vl); } -vuint16m2_t test_vloxei64_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64(mask, base, bindex, vl); +vuint16m2_t test_vloxei64_v_u16m2_m(vbool8_t vm, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64(vm, rs1, rs2, vl); } -vuint32mf2_t test_vloxei64_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64(mask, base, bindex, vl); +vuint32mf2_t test_vloxei64_v_u32mf2_m(vbool64_t vm, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64(vm, rs1, rs2, vl); } -vuint32m1_t test_vloxei64_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64(mask, base, bindex, vl); +vuint32m1_t test_vloxei64_v_u32m1_m(vbool32_t vm, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64(vm, rs1, rs2, vl); } -vuint32m2_t test_vloxei64_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64(mask, base, bindex, vl); +vuint32m2_t test_vloxei64_v_u32m2_m(vbool16_t vm, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64(vm, rs1, rs2, vl); } -vuint32m4_t test_vloxei64_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64(mask, base, bindex, vl); +vuint32m4_t test_vloxei64_v_u32m4_m(vbool8_t vm, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64(vm, rs1, rs2, vl); } -vuint64m1_t test_vloxei64_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64(mask, base, bindex, vl); +vuint64m1_t test_vloxei64_v_u64m1_m(vbool64_t vm, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64(vm, rs1, rs2, vl); } -vuint64m2_t test_vloxei64_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64(mask, base, bindex, vl); +vuint64m2_t test_vloxei64_v_u64m2_m(vbool32_t vm, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64(vm, rs1, rs2, vl); } -vuint64m4_t test_vloxei64_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64(mask, base, bindex, vl); +vuint64m4_t test_vloxei64_v_u64m4_m(vbool16_t vm, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64(vm, rs1, rs2, vl); } -vuint64m8_t test_vloxei64_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64(mask, base, bindex, vl); +vuint64m8_t test_vloxei64_v_u64m8_m(vbool8_t vm, const uint64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxei64\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vloxei8.c b/auto-generated/gnu-overloaded-tests/vloxei8.c index 91b20a584..2ebf5b744 100644 --- a/auto-generated/gnu-overloaded-tests/vloxei8.c +++ b/auto-generated/gnu-overloaded-tests/vloxei8.c @@ -6,476 +6,476 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vloxei8_v_f16mf4(const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vfloat16mf4_t test_vloxei8_v_f16mf4(const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vfloat16mf2_t test_vloxei8_v_f16mf2(const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vfloat16mf2_t test_vloxei8_v_f16mf2(const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vfloat16m1_t test_vloxei8_v_f16m1(const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vfloat16m1_t test_vloxei8_v_f16m1(const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vfloat16m2_t test_vloxei8_v_f16m2(const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vfloat16m2_t test_vloxei8_v_f16m2(const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vfloat16m4_t test_vloxei8_v_f16m4(const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vfloat16m4_t test_vloxei8_v_f16m4(const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vfloat16m8_t test_vloxei8_v_f16m8(const float16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vfloat16m8_t test_vloxei8_v_f16m8(const float16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vfloat32mf2_t test_vloxei8_v_f32mf2(const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vfloat32mf2_t test_vloxei8_v_f32mf2(const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vfloat32m1_t test_vloxei8_v_f32m1(const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vfloat32m1_t test_vloxei8_v_f32m1(const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vfloat32m2_t test_vloxei8_v_f32m2(const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vfloat32m2_t test_vloxei8_v_f32m2(const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vfloat32m4_t test_vloxei8_v_f32m4(const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vfloat32m4_t test_vloxei8_v_f32m4(const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vfloat32m8_t test_vloxei8_v_f32m8(const float32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vfloat32m8_t test_vloxei8_v_f32m8(const float32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vfloat64m1_t test_vloxei8_v_f64m1(const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vfloat64m1_t test_vloxei8_v_f64m1(const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vfloat64m2_t test_vloxei8_v_f64m2(const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vfloat64m2_t test_vloxei8_v_f64m2(const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vfloat64m4_t test_vloxei8_v_f64m4(const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vfloat64m4_t test_vloxei8_v_f64m4(const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vfloat64m8_t test_vloxei8_v_f64m8(const float64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vfloat64m8_t test_vloxei8_v_f64m8(const float64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vint8mf8_t test_vloxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vint8mf8_t test_vloxei8_v_i8mf8(const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vint8mf4_t test_vloxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vint8mf4_t test_vloxei8_v_i8mf4(const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vint8mf2_t test_vloxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vint8mf2_t test_vloxei8_v_i8mf2(const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vint8m1_t test_vloxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vint8m1_t test_vloxei8_v_i8m1(const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vint8m2_t test_vloxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vint8m2_t test_vloxei8_v_i8m2(const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vint8m4_t test_vloxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vint8m4_t test_vloxei8_v_i8m4(const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vint8m8_t test_vloxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vint8m8_t test_vloxei8_v_i8m8(const int8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vint16mf4_t test_vloxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vint16mf4_t test_vloxei8_v_i16mf4(const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vint16mf2_t test_vloxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vint16mf2_t test_vloxei8_v_i16mf2(const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vint16m1_t test_vloxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vint16m1_t test_vloxei8_v_i16m1(const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vint16m2_t test_vloxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vint16m2_t test_vloxei8_v_i16m2(const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vint16m4_t test_vloxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vint16m4_t test_vloxei8_v_i16m4(const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vint16m8_t test_vloxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vint16m8_t test_vloxei8_v_i16m8(const int16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vint32mf2_t test_vloxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vint32mf2_t test_vloxei8_v_i32mf2(const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vint32m1_t test_vloxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vint32m1_t test_vloxei8_v_i32m1(const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vint32m2_t test_vloxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vint32m2_t test_vloxei8_v_i32m2(const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vint32m4_t test_vloxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vint32m4_t test_vloxei8_v_i32m4(const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vint32m8_t test_vloxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vint32m8_t test_vloxei8_v_i32m8(const int32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vint64m1_t test_vloxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vint64m1_t test_vloxei8_v_i64m1(const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vint64m2_t test_vloxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vint64m2_t test_vloxei8_v_i64m2(const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vint64m4_t test_vloxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vint64m4_t test_vloxei8_v_i64m4(const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vint64m8_t test_vloxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vint64m8_t test_vloxei8_v_i64m8(const int64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vuint8mf8_t test_vloxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vuint8mf8_t test_vloxei8_v_u8mf8(const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vuint8mf4_t test_vloxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vuint8mf4_t test_vloxei8_v_u8mf4(const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vuint8mf2_t test_vloxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vuint8mf2_t test_vloxei8_v_u8mf2(const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vuint8m1_t test_vloxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vuint8m1_t test_vloxei8_v_u8m1(const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vuint8m2_t test_vloxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vuint8m2_t test_vloxei8_v_u8m2(const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vuint8m4_t test_vloxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vuint8m4_t test_vloxei8_v_u8m4(const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vuint8m8_t test_vloxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vuint8m8_t test_vloxei8_v_u8m8(const uint8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vuint16mf4_t test_vloxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vuint16mf4_t test_vloxei8_v_u16mf4(const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vuint16mf2_t test_vloxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vuint16mf2_t test_vloxei8_v_u16mf2(const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vuint16m1_t test_vloxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vuint16m1_t test_vloxei8_v_u16m1(const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vuint16m2_t test_vloxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vuint16m2_t test_vloxei8_v_u16m2(const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vuint16m4_t test_vloxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vuint16m4_t test_vloxei8_v_u16m4(const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vuint16m8_t test_vloxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vuint16m8_t test_vloxei8_v_u16m8(const uint16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vuint32mf2_t test_vloxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vuint32mf2_t test_vloxei8_v_u32mf2(const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vuint32m1_t test_vloxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vuint32m1_t test_vloxei8_v_u32m1(const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vuint32m2_t test_vloxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vuint32m2_t test_vloxei8_v_u32m2(const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vuint32m4_t test_vloxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vuint32m4_t test_vloxei8_v_u32m4(const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vuint32m8_t test_vloxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vuint32m8_t test_vloxei8_v_u32m8(const uint32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vuint64m1_t test_vloxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vuint64m1_t test_vloxei8_v_u64m1(const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vuint64m2_t test_vloxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vuint64m2_t test_vloxei8_v_u64m2(const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vuint64m4_t test_vloxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vuint64m4_t test_vloxei8_v_u64m4(const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vuint64m8_t test_vloxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8(base, bindex, vl); +vuint64m8_t test_vloxei8_v_u64m8(const uint64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8(rs1, rs2, vl); } -vfloat16mf4_t test_vloxei8_v_f16mf4_m(vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vfloat16mf4_t test_vloxei8_v_f16mf4_m(vbool64_t vm, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei8_v_f16mf2_m(vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vfloat16mf2_t test_vloxei8_v_f16mf2_m(vbool32_t vm, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vfloat16m1_t test_vloxei8_v_f16m1_m(vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vfloat16m1_t test_vloxei8_v_f16m1_m(vbool16_t vm, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vfloat16m2_t test_vloxei8_v_f16m2_m(vbool8_t mask, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vfloat16m2_t test_vloxei8_v_f16m2_m(vbool8_t vm, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vfloat16m4_t test_vloxei8_v_f16m4_m(vbool4_t mask, const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vfloat16m4_t test_vloxei8_v_f16m4_m(vbool4_t vm, const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vfloat16m8_t test_vloxei8_v_f16m8_m(vbool2_t mask, const float16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vfloat16m8_t test_vloxei8_v_f16m8_m(vbool2_t vm, const float16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei8_v_f32mf2_m(vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vfloat32mf2_t test_vloxei8_v_f32mf2_m(vbool64_t vm, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vfloat32m1_t test_vloxei8_v_f32m1_m(vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vfloat32m1_t test_vloxei8_v_f32m1_m(vbool32_t vm, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vfloat32m2_t test_vloxei8_v_f32m2_m(vbool16_t mask, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vfloat32m2_t test_vloxei8_v_f32m2_m(vbool16_t vm, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vfloat32m4_t test_vloxei8_v_f32m4_m(vbool8_t mask, const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vfloat32m4_t test_vloxei8_v_f32m4_m(vbool8_t vm, const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vfloat32m8_t test_vloxei8_v_f32m8_m(vbool4_t mask, const float32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vfloat32m8_t test_vloxei8_v_f32m8_m(vbool4_t vm, const float32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vfloat64m1_t test_vloxei8_v_f64m1_m(vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vfloat64m1_t test_vloxei8_v_f64m1_m(vbool64_t vm, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vfloat64m2_t test_vloxei8_v_f64m2_m(vbool32_t mask, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vfloat64m2_t test_vloxei8_v_f64m2_m(vbool32_t vm, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vfloat64m4_t test_vloxei8_v_f64m4_m(vbool16_t mask, const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vfloat64m4_t test_vloxei8_v_f64m4_m(vbool16_t vm, const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vfloat64m8_t test_vloxei8_v_f64m8_m(vbool8_t mask, const float64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vfloat64m8_t test_vloxei8_v_f64m8_m(vbool8_t vm, const float64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vint8mf8_t test_vloxei8_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vint8mf8_t test_vloxei8_v_i8mf8_m(vbool64_t vm, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vint8mf4_t test_vloxei8_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vint8mf4_t test_vloxei8_v_i8mf4_m(vbool32_t vm, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vint8mf2_t test_vloxei8_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vint8mf2_t test_vloxei8_v_i8mf2_m(vbool16_t vm, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vint8m1_t test_vloxei8_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vint8m1_t test_vloxei8_v_i8m1_m(vbool8_t vm, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vint8m2_t test_vloxei8_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vint8m2_t test_vloxei8_v_i8m2_m(vbool4_t vm, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vint8m4_t test_vloxei8_v_i8m4_m(vbool2_t mask, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vint8m4_t test_vloxei8_v_i8m4_m(vbool2_t vm, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vint8m8_t test_vloxei8_v_i8m8_m(vbool1_t mask, const int8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vint8m8_t test_vloxei8_v_i8m8_m(vbool1_t vm, const int8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vint16mf4_t test_vloxei8_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vint16mf4_t test_vloxei8_v_i16mf4_m(vbool64_t vm, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vint16mf2_t test_vloxei8_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vint16mf2_t test_vloxei8_v_i16mf2_m(vbool32_t vm, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vint16m1_t test_vloxei8_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vint16m1_t test_vloxei8_v_i16m1_m(vbool16_t vm, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vint16m2_t test_vloxei8_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vint16m2_t test_vloxei8_v_i16m2_m(vbool8_t vm, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vint16m4_t test_vloxei8_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vint16m4_t test_vloxei8_v_i16m4_m(vbool4_t vm, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vint16m8_t test_vloxei8_v_i16m8_m(vbool2_t mask, const int16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vint16m8_t test_vloxei8_v_i16m8_m(vbool2_t vm, const int16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vint32mf2_t test_vloxei8_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vint32mf2_t test_vloxei8_v_i32mf2_m(vbool64_t vm, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vint32m1_t test_vloxei8_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vint32m1_t test_vloxei8_v_i32m1_m(vbool32_t vm, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vint32m2_t test_vloxei8_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vint32m2_t test_vloxei8_v_i32m2_m(vbool16_t vm, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vint32m4_t test_vloxei8_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vint32m4_t test_vloxei8_v_i32m4_m(vbool8_t vm, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vint32m8_t test_vloxei8_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vint32m8_t test_vloxei8_v_i32m8_m(vbool4_t vm, const int32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vint64m1_t test_vloxei8_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vint64m1_t test_vloxei8_v_i64m1_m(vbool64_t vm, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vint64m2_t test_vloxei8_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vint64m2_t test_vloxei8_v_i64m2_m(vbool32_t vm, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vint64m4_t test_vloxei8_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vint64m4_t test_vloxei8_v_i64m4_m(vbool16_t vm, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vint64m8_t test_vloxei8_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vint64m8_t test_vloxei8_v_i64m8_m(vbool8_t vm, const int64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vuint8mf8_t test_vloxei8_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vuint8mf8_t test_vloxei8_v_u8mf8_m(vbool64_t vm, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vuint8mf4_t test_vloxei8_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vuint8mf4_t test_vloxei8_v_u8mf4_m(vbool32_t vm, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vuint8mf2_t test_vloxei8_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vuint8mf2_t test_vloxei8_v_u8mf2_m(vbool16_t vm, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vuint8m1_t test_vloxei8_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vuint8m1_t test_vloxei8_v_u8m1_m(vbool8_t vm, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vuint8m2_t test_vloxei8_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vuint8m2_t test_vloxei8_v_u8m2_m(vbool4_t vm, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vuint8m4_t test_vloxei8_v_u8m4_m(vbool2_t mask, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vuint8m4_t test_vloxei8_v_u8m4_m(vbool2_t vm, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vuint8m8_t test_vloxei8_v_u8m8_m(vbool1_t mask, const uint8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vuint8m8_t test_vloxei8_v_u8m8_m(vbool1_t vm, const uint8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vuint16mf4_t test_vloxei8_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vuint16mf4_t test_vloxei8_v_u16mf4_m(vbool64_t vm, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vuint16mf2_t test_vloxei8_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vuint16mf2_t test_vloxei8_v_u16mf2_m(vbool32_t vm, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vuint16m1_t test_vloxei8_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vuint16m1_t test_vloxei8_v_u16m1_m(vbool16_t vm, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vuint16m2_t test_vloxei8_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vuint16m2_t test_vloxei8_v_u16m2_m(vbool8_t vm, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vuint16m4_t test_vloxei8_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vuint16m4_t test_vloxei8_v_u16m4_m(vbool4_t vm, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vuint16m8_t test_vloxei8_v_u16m8_m(vbool2_t mask, const uint16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vuint16m8_t test_vloxei8_v_u16m8_m(vbool2_t vm, const uint16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vuint32mf2_t test_vloxei8_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vuint32mf2_t test_vloxei8_v_u32mf2_m(vbool64_t vm, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vuint32m1_t test_vloxei8_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vuint32m1_t test_vloxei8_v_u32m1_m(vbool32_t vm, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vuint32m2_t test_vloxei8_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vuint32m2_t test_vloxei8_v_u32m2_m(vbool16_t vm, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vuint32m4_t test_vloxei8_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vuint32m4_t test_vloxei8_v_u32m4_m(vbool8_t vm, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vuint32m8_t test_vloxei8_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vuint32m8_t test_vloxei8_v_u32m8_m(vbool4_t vm, const uint32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vuint64m1_t test_vloxei8_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vuint64m1_t test_vloxei8_v_u64m1_m(vbool64_t vm, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vuint64m2_t test_vloxei8_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vuint64m2_t test_vloxei8_v_u64m2_m(vbool32_t vm, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vuint64m4_t test_vloxei8_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vuint64m4_t test_vloxei8_v_u64m4_m(vbool16_t vm, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } -vuint64m8_t test_vloxei8_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8(mask, base, bindex, vl); +vuint64m8_t test_vloxei8_v_u64m8_m(vbool8_t vm, const uint64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxei8\.[ivxfswum.]+\s+} 118 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vloxseg2ei16.c b/auto-generated/gnu-overloaded-tests/vloxseg2ei16.c index fc8a1e3fa..b2df84ac5 100644 --- a/auto-generated/gnu-overloaded-tests/vloxseg2ei16.c +++ b/auto-generated/gnu-overloaded-tests/vloxseg2ei16.c @@ -6,388 +6,388 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2(const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2(const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2(const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2(const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2(const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2(const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2(const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2(const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2(const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2(const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2(const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2(const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2(const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2(const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2(const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2(const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2(const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2(const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2(const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2(const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2(const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2(const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2(const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2(const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2(const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2(const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2(const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2(const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2(const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2(const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei16_v_i8m1x2(const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vint8m1x2_t test_vloxseg2ei16_v_i8m1x2(const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei16_v_i8m2x2(const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vint8m2x2_t test_vloxseg2ei16_v_i8m2x2(const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vint8m4x2_t test_vloxseg2ei16_v_i8m4x2(const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vint8m4x2_t test_vloxseg2ei16_v_i8m4x2(const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2(const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2(const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2(const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2(const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei16_v_i16m1x2(const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vint16m1x2_t test_vloxseg2ei16_v_i16m1x2(const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei16_v_i16m2x2(const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vint16m2x2_t test_vloxseg2ei16_v_i16m2x2(const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei16_v_i16m4x2(const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vint16m4x2_t test_vloxseg2ei16_v_i16m4x2(const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2(const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2(const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei16_v_i32m1x2(const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vint32m1x2_t test_vloxseg2ei16_v_i32m1x2(const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei16_v_i32m2x2(const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vint32m2x2_t test_vloxseg2ei16_v_i32m2x2(const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei16_v_i32m4x2(const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vint32m4x2_t test_vloxseg2ei16_v_i32m4x2(const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei16_v_i64m1x2(const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vint64m1x2_t test_vloxseg2ei16_v_i64m1x2(const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei16_v_i64m2x2(const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vint64m2x2_t test_vloxseg2ei16_v_i64m2x2(const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei16_v_i64m4x2(const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vint64m4x2_t test_vloxseg2ei16_v_i64m4x2(const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2(const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2(const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2(const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2(const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2(const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2(const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2(const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2(const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2(const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2(const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2(const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2(const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2(const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2(const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2(const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2(const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2(const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2(const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2(const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2(const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2(const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2(const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2(const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2(const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2(const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2(const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2(const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2(const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); } -vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_m(vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_m(vbool64_t vm, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_m(vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_m(vbool32_t vm, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_m(vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_m(vbool16_t vm, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_m(vbool8_t mask, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_m(vbool8_t vm, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_m(vbool4_t mask, const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_m(vbool4_t vm, const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_m(vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_m(vbool64_t vm, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_m(vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_m(vbool32_t vm, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_m(vbool16_t mask, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_m(vbool16_t vm, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_m(vbool8_t mask, const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_m(vbool8_t vm, const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_m(vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_m(vbool64_t vm, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_m(vbool32_t mask, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_m(vbool32_t vm, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_m(vbool16_t mask, const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_m(vbool16_t vm, const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_m(vbool64_t vm, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_m(vbool32_t vm, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_m(vbool16_t vm, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_m(vbool8_t vm, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_m(vbool4_t vm, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } -vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_m(vbool2_t mask, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_m(vbool2_t vm, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_m(vbool64_t vm, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_m(vbool32_t vm, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_m(vbool16_t vm, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_m(vbool8_t vm, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_m(vbool4_t mask, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_m(vbool4_t vm, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_m(vbool64_t vm, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_m(vbool32_t vm, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_m(vbool16_t vm, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_m(vbool8_t vm, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_m(vbool64_t vm, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_m(vbool32_t vm, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_m(vbool16_t vm, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_m(vbool64_t vm, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_m(vbool32_t vm, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_m(vbool16_t vm, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_m(vbool8_t vm, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_m(vbool4_t vm, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } -vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_m(vbool2_t vm, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_m(vbool64_t vm, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_m(vbool32_t vm, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_m(vbool16_t vm, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_m(vbool8_t vm, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_m(vbool4_t vm, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_m(vbool64_t vm, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_m(vbool32_t vm, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_m(vbool16_t vm, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_m(vbool8_t vm, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_m(vbool64_t vm, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_m(vbool32_t vm, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16(mask, base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_m(vbool16_t vm, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg2ei16\.[ivxfswum.]+\s+} 96 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vloxseg2ei32.c b/auto-generated/gnu-overloaded-tests/vloxseg2ei32.c index e728540fd..a8ecf0d2b 100644 --- a/auto-generated/gnu-overloaded-tests/vloxseg2ei32.c +++ b/auto-generated/gnu-overloaded-tests/vloxseg2ei32.c @@ -6,372 +6,372 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2(const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2(const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2(const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2(const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2(const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2(const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2(const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2(const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2(const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(base, bindex, vl); +vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2(const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2(const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2(const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2(const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2(const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2(const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2(const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2(const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2(const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2(const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2(const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2(const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2(const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2(const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2(const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2(const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2(const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2(const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2(const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2(const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2(const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei32_v_i8m1x2(const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(base, bindex, vl); +vint8m1x2_t test_vloxseg2ei32_v_i8m1x2(const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei32_v_i8m2x2(const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(base, bindex, vl); +vint8m2x2_t test_vloxseg2ei32_v_i8m2x2(const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2(const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2(const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2(const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2(const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei32_v_i16m1x2(const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(base, bindex, vl); +vint16m1x2_t test_vloxseg2ei32_v_i16m1x2(const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei32_v_i16m2x2(const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(base, bindex, vl); +vint16m2x2_t test_vloxseg2ei32_v_i16m2x2(const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei32_v_i16m4x2(const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(base, bindex, vl); +vint16m4x2_t test_vloxseg2ei32_v_i16m4x2(const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2(const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2(const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei32_v_i32m1x2(const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(base, bindex, vl); +vint32m1x2_t test_vloxseg2ei32_v_i32m1x2(const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei32_v_i32m2x2(const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(base, bindex, vl); +vint32m2x2_t test_vloxseg2ei32_v_i32m2x2(const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei32_v_i32m4x2(const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(base, bindex, vl); +vint32m4x2_t test_vloxseg2ei32_v_i32m4x2(const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei32_v_i64m1x2(const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(base, bindex, vl); +vint64m1x2_t test_vloxseg2ei32_v_i64m1x2(const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei32_v_i64m2x2(const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(base, bindex, vl); +vint64m2x2_t test_vloxseg2ei32_v_i64m2x2(const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei32_v_i64m4x2(const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(base, bindex, vl); +vint64m4x2_t test_vloxseg2ei32_v_i64m4x2(const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2(const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2(const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2(const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2(const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2(const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2(const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2(const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2(const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(base, bindex, vl); +vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2(const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2(const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2(const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2(const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2(const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2(const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2(const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2(const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2(const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(base, bindex, vl); +vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2(const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2(const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2(const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2(const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2(const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2(const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2(const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2(const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2(const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2(const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2(const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2(const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2(const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(rs1, rs2, vl); } -vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_m(vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(mask, base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_m(vbool64_t vm, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(vm, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_m(vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(mask, base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_m(vbool32_t vm, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(vm, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_m(vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(mask, base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_m(vbool16_t vm, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(vm, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_m(vbool8_t mask, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(mask, base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_m(vbool8_t vm, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(vm, rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_m(vbool4_t mask, const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(mask, base, bindex, vl); +vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_m(vbool4_t vm, const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(vm, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_m(vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(mask, base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_m(vbool64_t vm, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(vm, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_m(vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(mask, base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_m(vbool32_t vm, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(vm, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_m(vbool16_t mask, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(mask, base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_m(vbool16_t vm, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(vm, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_m(vbool8_t mask, const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(mask, base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_m(vbool8_t vm, const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(vm, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_m(vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(mask, base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_m(vbool64_t vm, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(vm, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_m(vbool32_t mask, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(mask, base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_m(vbool32_t vm, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(vm, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_m(vbool16_t mask, const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(mask, base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_m(vbool16_t vm, const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(vm, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(mask, base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_m(vbool64_t vm, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(vm, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(mask, base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_m(vbool32_t vm, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(vm, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(mask, base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_m(vbool16_t vm, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(vm, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(mask, base, bindex, vl); +vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_m(vbool8_t vm, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(vm, rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(mask, base, bindex, vl); +vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_m(vbool4_t vm, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(vm, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(mask, base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_m(vbool64_t vm, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(vm, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(mask, base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_m(vbool32_t vm, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(vm, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(mask, base, bindex, vl); +vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_m(vbool16_t vm, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(vm, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(mask, base, bindex, vl); +vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_m(vbool8_t vm, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(vm, rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_m(vbool4_t mask, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(mask, base, bindex, vl); +vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_m(vbool4_t vm, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(vm, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(mask, base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_m(vbool64_t vm, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(vm, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(mask, base, bindex, vl); +vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_m(vbool32_t vm, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(vm, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(mask, base, bindex, vl); +vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_m(vbool16_t vm, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(vm, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(mask, base, bindex, vl); +vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_m(vbool8_t vm, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(vm, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(mask, base, bindex, vl); +vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_m(vbool64_t vm, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(vm, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(mask, base, bindex, vl); +vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_m(vbool32_t vm, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(vm, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(mask, base, bindex, vl); +vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_m(vbool16_t vm, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(vm, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(mask, base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_m(vbool64_t vm, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(vm, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(mask, base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_m(vbool32_t vm, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(vm, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(mask, base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_m(vbool16_t vm, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(vm, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(mask, base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_m(vbool8_t vm, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(vm, rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(mask, base, bindex, vl); +vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_m(vbool4_t vm, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(vm, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(mask, base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_m(vbool64_t vm, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(vm, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(mask, base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_m(vbool32_t vm, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(vm, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(mask, base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_m(vbool16_t vm, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(vm, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(mask, base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_m(vbool8_t vm, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(vm, rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(mask, base, bindex, vl); +vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_m(vbool4_t vm, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(vm, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(mask, base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_m(vbool64_t vm, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(vm, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(mask, base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_m(vbool32_t vm, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(vm, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(mask, base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_m(vbool16_t vm, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(vm, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(mask, base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_m(vbool8_t vm, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(vm, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(mask, base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_m(vbool64_t vm, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(vm, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(mask, base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_m(vbool32_t vm, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(vm, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32(mask, base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_m(vbool16_t vm, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg2ei32\.[ivxfswum.]+\s+} 92 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vloxseg2ei64.c b/auto-generated/gnu-overloaded-tests/vloxseg2ei64.c index 98d5cebcb..fc944310c 100644 --- a/auto-generated/gnu-overloaded-tests/vloxseg2ei64.c +++ b/auto-generated/gnu-overloaded-tests/vloxseg2ei64.c @@ -6,332 +6,332 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2(const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2(const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2(const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2(const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2(const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2(const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2(const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2(const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2(const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2(const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2(const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2(const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2(const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2(const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2(const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2(const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2(const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2(const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2(const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2(const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2(const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2(const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2(const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2(const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2(const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2(const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2(const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2(const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei64_v_i8m1x2(const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(base, bindex, vl); +vint8m1x2_t test_vloxseg2ei64_v_i8m1x2(const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2(const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2(const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2(const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2(const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei64_v_i16m1x2(const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(base, bindex, vl); +vint16m1x2_t test_vloxseg2ei64_v_i16m1x2(const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei64_v_i16m2x2(const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(base, bindex, vl); +vint16m2x2_t test_vloxseg2ei64_v_i16m2x2(const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2(const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2(const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei64_v_i32m1x2(const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(base, bindex, vl); +vint32m1x2_t test_vloxseg2ei64_v_i32m1x2(const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei64_v_i32m2x2(const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(base, bindex, vl); +vint32m2x2_t test_vloxseg2ei64_v_i32m2x2(const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei64_v_i32m4x2(const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(base, bindex, vl); +vint32m4x2_t test_vloxseg2ei64_v_i32m4x2(const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei64_v_i64m1x2(const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(base, bindex, vl); +vint64m1x2_t test_vloxseg2ei64_v_i64m1x2(const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei64_v_i64m2x2(const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(base, bindex, vl); +vint64m2x2_t test_vloxseg2ei64_v_i64m2x2(const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei64_v_i64m4x2(const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(base, bindex, vl); +vint64m4x2_t test_vloxseg2ei64_v_i64m4x2(const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2(const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2(const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2(const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2(const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2(const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2(const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2(const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2(const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2(const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2(const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2(const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2(const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2(const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2(const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2(const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2(const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2(const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2(const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2(const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2(const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2(const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2(const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2(const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2(const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2(const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2(const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2(const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2(const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2(const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2(const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(rs1, rs2, vl); } -vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_m(vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(mask, base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_m(vbool64_t vm, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(vm, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_m(vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(mask, base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_m(vbool32_t vm, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(vm, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_m(vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(mask, base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_m(vbool16_t vm, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(vm, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_m(vbool8_t mask, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(mask, base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_m(vbool8_t vm, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(vm, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_m(vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(mask, base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_m(vbool64_t vm, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(vm, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_m(vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(mask, base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_m(vbool32_t vm, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(vm, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_m(vbool16_t mask, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(mask, base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_m(vbool16_t vm, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(vm, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_m(vbool8_t mask, const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(mask, base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_m(vbool8_t vm, const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(vm, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_m(vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(mask, base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_m(vbool64_t vm, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(vm, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_m(vbool32_t mask, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(mask, base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_m(vbool32_t vm, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(vm, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_m(vbool16_t mask, const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(mask, base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_m(vbool16_t vm, const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(vm, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(mask, base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_m(vbool64_t vm, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(vm, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(mask, base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_m(vbool32_t vm, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(vm, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(mask, base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_m(vbool16_t vm, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(vm, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(mask, base, bindex, vl); +vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_m(vbool8_t vm, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(vm, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(mask, base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_m(vbool64_t vm, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(vm, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(mask, base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_m(vbool32_t vm, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(vm, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(mask, base, bindex, vl); +vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_m(vbool16_t vm, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(vm, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(mask, base, bindex, vl); +vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_m(vbool8_t vm, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(vm, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(mask, base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_m(vbool64_t vm, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(vm, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(mask, base, bindex, vl); +vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_m(vbool32_t vm, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(vm, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(mask, base, bindex, vl); +vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_m(vbool16_t vm, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(vm, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(mask, base, bindex, vl); +vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_m(vbool8_t vm, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(vm, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(mask, base, bindex, vl); +vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_m(vbool64_t vm, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(vm, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(mask, base, bindex, vl); +vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_m(vbool32_t vm, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(vm, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(mask, base, bindex, vl); +vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_m(vbool16_t vm, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(vm, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(mask, base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_m(vbool64_t vm, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(vm, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(mask, base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_m(vbool32_t vm, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(vm, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(mask, base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_m(vbool16_t vm, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(vm, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(mask, base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_m(vbool8_t vm, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(vm, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(mask, base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_m(vbool64_t vm, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(vm, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(mask, base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_m(vbool32_t vm, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(vm, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(mask, base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_m(vbool16_t vm, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(vm, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(mask, base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_m(vbool8_t vm, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(vm, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(mask, base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_m(vbool64_t vm, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(vm, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(mask, base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_m(vbool32_t vm, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(vm, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(mask, base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_m(vbool16_t vm, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(vm, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(mask, base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_m(vbool8_t vm, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(vm, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(mask, base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_m(vbool64_t vm, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(vm, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(mask, base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_m(vbool32_t vm, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(vm, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64(mask, base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_m(vbool16_t vm, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg2ei64\.[ivxfswum.]+\s+} 82 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vloxseg2ei8.c b/auto-generated/gnu-overloaded-tests/vloxseg2ei8.c index 44ca3fb61..30667ec9e 100644 --- a/auto-generated/gnu-overloaded-tests/vloxseg2ei8.c +++ b/auto-generated/gnu-overloaded-tests/vloxseg2ei8.c @@ -6,388 +6,388 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2(const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2(const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2(const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2(const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2(const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2(const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2(const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2(const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2(const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2(const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2(const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2(const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2(const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2(const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2(const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2(const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2(const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2(const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2(const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2(const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2(const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2(const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2(const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2(const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2(const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2(const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2(const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2(const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2(const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2(const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei8_v_i8m1x2(const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vint8m1x2_t test_vloxseg2ei8_v_i8m1x2(const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei8_v_i8m2x2(const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vint8m2x2_t test_vloxseg2ei8_v_i8m2x2(const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vint8m4x2_t test_vloxseg2ei8_v_i8m4x2(const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vint8m4x2_t test_vloxseg2ei8_v_i8m4x2(const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2(const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2(const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2(const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2(const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei8_v_i16m1x2(const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vint16m1x2_t test_vloxseg2ei8_v_i16m1x2(const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei8_v_i16m2x2(const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vint16m2x2_t test_vloxseg2ei8_v_i16m2x2(const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei8_v_i16m4x2(const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vint16m4x2_t test_vloxseg2ei8_v_i16m4x2(const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2(const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2(const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei8_v_i32m1x2(const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vint32m1x2_t test_vloxseg2ei8_v_i32m1x2(const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei8_v_i32m2x2(const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vint32m2x2_t test_vloxseg2ei8_v_i32m2x2(const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei8_v_i32m4x2(const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vint32m4x2_t test_vloxseg2ei8_v_i32m4x2(const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei8_v_i64m1x2(const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vint64m1x2_t test_vloxseg2ei8_v_i64m1x2(const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei8_v_i64m2x2(const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vint64m2x2_t test_vloxseg2ei8_v_i64m2x2(const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei8_v_i64m4x2(const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vint64m4x2_t test_vloxseg2ei8_v_i64m4x2(const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2(const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2(const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2(const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2(const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2(const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2(const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2(const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2(const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2(const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2(const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2(const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2(const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2(const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2(const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2(const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2(const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2(const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2(const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2(const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2(const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2(const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2(const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2(const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2(const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2(const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(rs1, rs2, vl); } -vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_m(vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_m(vbool64_t vm, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_m(vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_m(vbool32_t vm, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_m(vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_m(vbool16_t vm, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_m(vbool8_t mask, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_m(vbool8_t vm, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_m(vbool4_t mask, const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_m(vbool4_t vm, const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_m(vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_m(vbool64_t vm, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_m(vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_m(vbool32_t vm, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_m(vbool16_t mask, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_m(vbool16_t vm, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_m(vbool8_t mask, const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_m(vbool8_t vm, const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_m(vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_m(vbool64_t vm, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_m(vbool32_t mask, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_m(vbool32_t vm, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_m(vbool16_t mask, const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_m(vbool16_t vm, const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_m(vbool64_t vm, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_m(vbool32_t vm, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_m(vbool16_t vm, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_m(vbool8_t vm, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_m(vbool4_t vm, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } -vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_m(vbool2_t mask, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_m(vbool2_t vm, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_m(vbool64_t vm, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_m(vbool32_t vm, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_m(vbool16_t vm, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_m(vbool8_t vm, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_m(vbool4_t mask, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_m(vbool4_t vm, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_m(vbool64_t vm, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_m(vbool32_t vm, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_m(vbool16_t vm, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_m(vbool8_t vm, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_m(vbool64_t vm, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_m(vbool32_t vm, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_m(vbool16_t vm, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_m(vbool64_t vm, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_m(vbool32_t vm, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_m(vbool16_t vm, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_m(vbool8_t vm, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_m(vbool4_t vm, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } -vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_m(vbool2_t vm, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_m(vbool64_t vm, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_m(vbool32_t vm, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_m(vbool16_t vm, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_m(vbool8_t vm, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_m(vbool4_t vm, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_m(vbool64_t vm, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_m(vbool32_t vm, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_m(vbool16_t vm, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_m(vbool8_t vm, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_m(vbool64_t vm, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_m(vbool32_t vm, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8(mask, base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_m(vbool16_t vm, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg2ei8\.[ivxfswum.]+\s+} 96 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vloxseg3ei16.c b/auto-generated/gnu-overloaded-tests/vloxseg3ei16.c index b768fea51..fc79890c6 100644 --- a/auto-generated/gnu-overloaded-tests/vloxseg3ei16.c +++ b/auto-generated/gnu-overloaded-tests/vloxseg3ei16.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3(const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3(const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3(const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3(const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3(const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3(const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3(const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3(const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3(const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3(const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3(const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3(const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3(const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3(const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3(const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3(const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3(const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3(const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3(const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3(const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3(const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3(const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3(const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3(const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei16_v_i8m1x3(const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(base, bindex, vl); +vint8m1x3_t test_vloxseg3ei16_v_i8m1x3(const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei16_v_i8m2x3(const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(base, bindex, vl); +vint8m2x3_t test_vloxseg3ei16_v_i8m2x3(const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3(const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3(const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3(const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3(const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei16_v_i16m1x3(const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(base, bindex, vl); +vint16m1x3_t test_vloxseg3ei16_v_i16m1x3(const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei16_v_i16m2x3(const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(base, bindex, vl); +vint16m2x3_t test_vloxseg3ei16_v_i16m2x3(const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3(const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3(const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei16_v_i32m1x3(const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(base, bindex, vl); +vint32m1x3_t test_vloxseg3ei16_v_i32m1x3(const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei16_v_i32m2x3(const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(base, bindex, vl); +vint32m2x3_t test_vloxseg3ei16_v_i32m2x3(const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei16_v_i64m1x3(const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(base, bindex, vl); +vint64m1x3_t test_vloxseg3ei16_v_i64m1x3(const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei16_v_i64m2x3(const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(base, bindex, vl); +vint64m2x3_t test_vloxseg3ei16_v_i64m2x3(const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3(const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3(const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3(const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3(const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3(const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3(const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3(const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(base, bindex, vl); +vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3(const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3(const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3(const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3(const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3(const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3(const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3(const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3(const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3(const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3(const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3(const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3(const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3(const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(rs1, rs2, vl); } -vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_m(vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(mask, base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_m(vbool64_t vm, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(vm, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_m(vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(mask, base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_m(vbool32_t vm, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(vm, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_m(vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(mask, base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_m(vbool16_t vm, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(vm, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_m(vbool8_t mask, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(mask, base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_m(vbool8_t vm, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(vm, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_m(vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(mask, base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_m(vbool64_t vm, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(vm, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_m(vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(mask, base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_m(vbool32_t vm, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(vm, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_m(vbool16_t mask, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(mask, base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_m(vbool16_t vm, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(vm, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_m(vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(mask, base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_m(vbool64_t vm, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(vm, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_m(vbool32_t mask, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(mask, base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_m(vbool32_t vm, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(vm, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(mask, base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_m(vbool64_t vm, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(vm, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(mask, base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_m(vbool32_t vm, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(vm, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(mask, base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_m(vbool16_t vm, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(vm, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(mask, base, bindex, vl); +vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_m(vbool8_t vm, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(vm, rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(mask, base, bindex, vl); +vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_m(vbool4_t vm, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(vm, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(mask, base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_m(vbool64_t vm, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(vm, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(mask, base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_m(vbool32_t vm, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(vm, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(mask, base, bindex, vl); +vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_m(vbool16_t vm, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(vm, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(mask, base, bindex, vl); +vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_m(vbool8_t vm, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(vm, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(mask, base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_m(vbool64_t vm, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(vm, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(mask, base, bindex, vl); +vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_m(vbool32_t vm, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(vm, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(mask, base, bindex, vl); +vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_m(vbool16_t vm, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(vm, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(mask, base, bindex, vl); +vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_m(vbool64_t vm, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(vm, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(mask, base, bindex, vl); +vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_m(vbool32_t vm, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(vm, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(mask, base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_m(vbool64_t vm, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(vm, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(mask, base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_m(vbool32_t vm, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(vm, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(mask, base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_m(vbool16_t vm, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(vm, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(mask, base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_m(vbool8_t vm, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(vm, rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(mask, base, bindex, vl); +vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_m(vbool4_t vm, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(vm, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(mask, base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_m(vbool64_t vm, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(vm, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(mask, base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_m(vbool32_t vm, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(vm, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(mask, base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_m(vbool16_t vm, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(vm, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(mask, base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_m(vbool8_t vm, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(vm, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(mask, base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_m(vbool64_t vm, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(vm, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(mask, base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_m(vbool32_t vm, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(vm, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(mask, base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_m(vbool16_t vm, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(vm, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(mask, base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_m(vbool64_t vm, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(vm, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16(mask, base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_m(vbool32_t vm, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg3ei16\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vloxseg3ei32.c b/auto-generated/gnu-overloaded-tests/vloxseg3ei32.c index d232ae4c7..136365de8 100644 --- a/auto-generated/gnu-overloaded-tests/vloxseg3ei32.c +++ b/auto-generated/gnu-overloaded-tests/vloxseg3ei32.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3(const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3(const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3(const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3(const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3(const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3(const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3(const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3(const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3(const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3(const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3(const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3(const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3(const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3(const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3(const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3(const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3(const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3(const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3(const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3(const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3(const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3(const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3(const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3(const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei32_v_i8m1x3(const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(base, bindex, vl); +vint8m1x3_t test_vloxseg3ei32_v_i8m1x3(const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei32_v_i8m2x3(const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(base, bindex, vl); +vint8m2x3_t test_vloxseg3ei32_v_i8m2x3(const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3(const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3(const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3(const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3(const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei32_v_i16m1x3(const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(base, bindex, vl); +vint16m1x3_t test_vloxseg3ei32_v_i16m1x3(const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei32_v_i16m2x3(const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(base, bindex, vl); +vint16m2x3_t test_vloxseg3ei32_v_i16m2x3(const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3(const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3(const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei32_v_i32m1x3(const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(base, bindex, vl); +vint32m1x3_t test_vloxseg3ei32_v_i32m1x3(const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei32_v_i32m2x3(const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(base, bindex, vl); +vint32m2x3_t test_vloxseg3ei32_v_i32m2x3(const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei32_v_i64m1x3(const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(base, bindex, vl); +vint64m1x3_t test_vloxseg3ei32_v_i64m1x3(const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei32_v_i64m2x3(const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(base, bindex, vl); +vint64m2x3_t test_vloxseg3ei32_v_i64m2x3(const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3(const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3(const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3(const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3(const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3(const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3(const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3(const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3(const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(base, bindex, vl); +vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3(const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3(const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3(const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3(const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3(const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3(const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3(const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3(const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3(const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3(const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3(const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3(const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3(const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3(const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3(const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3(const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(rs1, rs2, vl); } -vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_m(vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(mask, base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_m(vbool64_t vm, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(vm, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_m(vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(mask, base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_m(vbool32_t vm, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(vm, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_m(vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(mask, base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_m(vbool16_t vm, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(vm, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_m(vbool8_t mask, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(mask, base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_m(vbool8_t vm, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(vm, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_m(vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(mask, base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_m(vbool64_t vm, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(vm, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_m(vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(mask, base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_m(vbool32_t vm, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(vm, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_m(vbool16_t mask, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(mask, base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_m(vbool16_t vm, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(vm, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_m(vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(mask, base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_m(vbool64_t vm, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(vm, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_m(vbool32_t mask, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(mask, base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_m(vbool32_t vm, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(vm, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(mask, base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_m(vbool64_t vm, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(vm, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(mask, base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_m(vbool32_t vm, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(vm, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(mask, base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_m(vbool16_t vm, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(vm, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(mask, base, bindex, vl); +vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_m(vbool8_t vm, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(vm, rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(mask, base, bindex, vl); +vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_m(vbool4_t vm, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(vm, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(mask, base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_m(vbool64_t vm, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(vm, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(mask, base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_m(vbool32_t vm, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(vm, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(mask, base, bindex, vl); +vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_m(vbool16_t vm, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(vm, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(mask, base, bindex, vl); +vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_m(vbool8_t vm, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(vm, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(mask, base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_m(vbool64_t vm, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(vm, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(mask, base, bindex, vl); +vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_m(vbool32_t vm, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(vm, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(mask, base, bindex, vl); +vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_m(vbool16_t vm, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(vm, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(mask, base, bindex, vl); +vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_m(vbool64_t vm, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(vm, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(mask, base, bindex, vl); +vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_m(vbool32_t vm, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(vm, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(mask, base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_m(vbool64_t vm, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(vm, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(mask, base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_m(vbool32_t vm, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(vm, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(mask, base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_m(vbool16_t vm, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(vm, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(mask, base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_m(vbool8_t vm, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(vm, rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(mask, base, bindex, vl); +vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_m(vbool4_t vm, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(vm, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(mask, base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_m(vbool64_t vm, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(vm, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(mask, base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_m(vbool32_t vm, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(vm, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(mask, base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_m(vbool16_t vm, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(vm, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(mask, base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_m(vbool8_t vm, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(vm, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(mask, base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_m(vbool64_t vm, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(vm, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(mask, base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_m(vbool32_t vm, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(vm, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(mask, base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_m(vbool16_t vm, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(vm, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(mask, base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_m(vbool64_t vm, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(vm, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32(mask, base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_m(vbool32_t vm, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg3ei32\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vloxseg3ei64.c b/auto-generated/gnu-overloaded-tests/vloxseg3ei64.c index 942a69707..c59ee98df 100644 --- a/auto-generated/gnu-overloaded-tests/vloxseg3ei64.c +++ b/auto-generated/gnu-overloaded-tests/vloxseg3ei64.c @@ -6,284 +6,284 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3(const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3(const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3(const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3(const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3(const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3(const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3(const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3(const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3(const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3(const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3(const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3(const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3(const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3(const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3(const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3(const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3(const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3(const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3(const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3(const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3(const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3(const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3(const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3(const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei64_v_i8m1x3(const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(base, bindex, vl); +vint8m1x3_t test_vloxseg3ei64_v_i8m1x3(const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3(const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3(const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3(const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3(const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei64_v_i16m1x3(const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(base, bindex, vl); +vint16m1x3_t test_vloxseg3ei64_v_i16m1x3(const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei64_v_i16m2x3(const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(base, bindex, vl); +vint16m2x3_t test_vloxseg3ei64_v_i16m2x3(const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3(const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3(const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei64_v_i32m1x3(const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(base, bindex, vl); +vint32m1x3_t test_vloxseg3ei64_v_i32m1x3(const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei64_v_i32m2x3(const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(base, bindex, vl); +vint32m2x3_t test_vloxseg3ei64_v_i32m2x3(const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei64_v_i64m1x3(const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(base, bindex, vl); +vint64m1x3_t test_vloxseg3ei64_v_i64m1x3(const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei64_v_i64m2x3(const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(base, bindex, vl); +vint64m2x3_t test_vloxseg3ei64_v_i64m2x3(const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3(const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3(const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3(const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3(const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3(const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3(const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3(const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3(const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3(const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3(const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3(const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3(const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3(const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3(const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3(const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3(const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3(const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3(const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3(const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3(const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3(const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3(const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3(const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3(const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3(const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3(const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(rs1, rs2, vl); } -vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_m(vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(mask, base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_m(vbool64_t vm, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(vm, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_m(vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(mask, base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_m(vbool32_t vm, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(vm, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_m(vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(mask, base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_m(vbool16_t vm, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(vm, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_m(vbool8_t mask, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(mask, base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_m(vbool8_t vm, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(vm, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_m(vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(mask, base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_m(vbool64_t vm, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(vm, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_m(vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(mask, base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_m(vbool32_t vm, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(vm, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_m(vbool16_t mask, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(mask, base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_m(vbool16_t vm, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(vm, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_m(vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(mask, base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_m(vbool64_t vm, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(vm, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_m(vbool32_t mask, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(mask, base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_m(vbool32_t vm, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(vm, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(mask, base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_m(vbool64_t vm, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(vm, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(mask, base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_m(vbool32_t vm, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(vm, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(mask, base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_m(vbool16_t vm, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(vm, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(mask, base, bindex, vl); +vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_m(vbool8_t vm, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(vm, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(mask, base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_m(vbool64_t vm, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(vm, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(mask, base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_m(vbool32_t vm, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(vm, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(mask, base, bindex, vl); +vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_m(vbool16_t vm, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(vm, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(mask, base, bindex, vl); +vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_m(vbool8_t vm, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(vm, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(mask, base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_m(vbool64_t vm, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(vm, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(mask, base, bindex, vl); +vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_m(vbool32_t vm, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(vm, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(mask, base, bindex, vl); +vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_m(vbool16_t vm, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(vm, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(mask, base, bindex, vl); +vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_m(vbool64_t vm, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(vm, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(mask, base, bindex, vl); +vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_m(vbool32_t vm, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(vm, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(mask, base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_m(vbool64_t vm, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(vm, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(mask, base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_m(vbool32_t vm, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(vm, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(mask, base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_m(vbool16_t vm, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(vm, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(mask, base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_m(vbool8_t vm, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(vm, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(mask, base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_m(vbool64_t vm, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(vm, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(mask, base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_m(vbool32_t vm, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(vm, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(mask, base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_m(vbool16_t vm, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(vm, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(mask, base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_m(vbool8_t vm, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(vm, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(mask, base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_m(vbool64_t vm, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(vm, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(mask, base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_m(vbool32_t vm, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(vm, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(mask, base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_m(vbool16_t vm, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(vm, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(mask, base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_m(vbool64_t vm, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(vm, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64(mask, base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_m(vbool32_t vm, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg3ei64\.[ivxfswum.]+\s+} 70 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vloxseg3ei8.c b/auto-generated/gnu-overloaded-tests/vloxseg3ei8.c index 88c48e495..519b689fa 100644 --- a/auto-generated/gnu-overloaded-tests/vloxseg3ei8.c +++ b/auto-generated/gnu-overloaded-tests/vloxseg3ei8.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3(const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3(const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3(const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3(const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3(const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3(const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3(const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3(const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3(const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3(const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3(const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3(const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3(const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3(const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3(const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3(const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3(const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3(const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3(const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3(const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3(const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3(const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3(const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3(const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei8_v_i8m1x3(const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(base, bindex, vl); +vint8m1x3_t test_vloxseg3ei8_v_i8m1x3(const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei8_v_i8m2x3(const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(base, bindex, vl); +vint8m2x3_t test_vloxseg3ei8_v_i8m2x3(const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3(const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3(const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3(const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3(const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei8_v_i16m1x3(const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(base, bindex, vl); +vint16m1x3_t test_vloxseg3ei8_v_i16m1x3(const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei8_v_i16m2x3(const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(base, bindex, vl); +vint16m2x3_t test_vloxseg3ei8_v_i16m2x3(const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3(const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3(const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei8_v_i32m1x3(const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(base, bindex, vl); +vint32m1x3_t test_vloxseg3ei8_v_i32m1x3(const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei8_v_i32m2x3(const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(base, bindex, vl); +vint32m2x3_t test_vloxseg3ei8_v_i32m2x3(const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei8_v_i64m1x3(const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(base, bindex, vl); +vint64m1x3_t test_vloxseg3ei8_v_i64m1x3(const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei8_v_i64m2x3(const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(base, bindex, vl); +vint64m2x3_t test_vloxseg3ei8_v_i64m2x3(const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3(const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3(const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3(const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3(const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3(const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3(const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(base, bindex, vl); +vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3(const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3(const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3(const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3(const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3(const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3(const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3(const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3(const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3(const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3(const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3(const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(rs1, rs2, vl); } -vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_m(vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(mask, base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_m(vbool64_t vm, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(vm, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_m(vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(mask, base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_m(vbool32_t vm, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(vm, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_m(vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(mask, base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_m(vbool16_t vm, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(vm, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_m(vbool8_t mask, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(mask, base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_m(vbool8_t vm, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(vm, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_m(vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(mask, base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_m(vbool64_t vm, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(vm, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_m(vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(mask, base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_m(vbool32_t vm, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(vm, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_m(vbool16_t mask, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(mask, base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_m(vbool16_t vm, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(vm, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_m(vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(mask, base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_m(vbool64_t vm, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(vm, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_m(vbool32_t mask, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(mask, base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_m(vbool32_t vm, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(vm, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(mask, base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_m(vbool64_t vm, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(vm, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(mask, base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_m(vbool32_t vm, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(vm, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(mask, base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_m(vbool16_t vm, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(vm, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(mask, base, bindex, vl); +vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_m(vbool8_t vm, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(vm, rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(mask, base, bindex, vl); +vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_m(vbool4_t vm, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(vm, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(mask, base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_m(vbool64_t vm, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(vm, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(mask, base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_m(vbool32_t vm, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(vm, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(mask, base, bindex, vl); +vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_m(vbool16_t vm, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(vm, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(mask, base, bindex, vl); +vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_m(vbool8_t vm, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(vm, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(mask, base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_m(vbool64_t vm, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(vm, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(mask, base, bindex, vl); +vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_m(vbool32_t vm, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(vm, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(mask, base, bindex, vl); +vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_m(vbool16_t vm, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(vm, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(mask, base, bindex, vl); +vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_m(vbool64_t vm, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(vm, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(mask, base, bindex, vl); +vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_m(vbool32_t vm, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(vm, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(mask, base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_m(vbool64_t vm, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(vm, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(mask, base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_m(vbool32_t vm, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(vm, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(mask, base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_m(vbool16_t vm, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(vm, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(mask, base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_m(vbool8_t vm, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(vm, rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(mask, base, bindex, vl); +vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_m(vbool4_t vm, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(vm, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(mask, base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_m(vbool64_t vm, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(vm, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(mask, base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_m(vbool32_t vm, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(vm, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(mask, base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_m(vbool16_t vm, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(vm, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(mask, base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_m(vbool8_t vm, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(vm, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(mask, base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_m(vbool64_t vm, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(vm, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(mask, base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_m(vbool32_t vm, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(vm, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(mask, base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_m(vbool16_t vm, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(vm, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(mask, base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_m(vbool64_t vm, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(vm, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8(mask, base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_m(vbool32_t vm, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg3ei8\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vloxseg4ei16.c b/auto-generated/gnu-overloaded-tests/vloxseg4ei16.c index edff402d5..67a8193da 100644 --- a/auto-generated/gnu-overloaded-tests/vloxseg4ei16.c +++ b/auto-generated/gnu-overloaded-tests/vloxseg4ei16.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4(const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4(const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4(const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4(const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4(const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4(const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4(const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4(const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4(const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4(const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4(const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4(const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4(const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4(const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4(const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4(const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4(const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4(const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4(const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4(const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4(const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4(const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4(const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4(const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei16_v_i8m1x4(const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(base, bindex, vl); +vint8m1x4_t test_vloxseg4ei16_v_i8m1x4(const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei16_v_i8m2x4(const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(base, bindex, vl); +vint8m2x4_t test_vloxseg4ei16_v_i8m2x4(const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4(const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4(const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4(const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4(const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei16_v_i16m1x4(const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(base, bindex, vl); +vint16m1x4_t test_vloxseg4ei16_v_i16m1x4(const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei16_v_i16m2x4(const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(base, bindex, vl); +vint16m2x4_t test_vloxseg4ei16_v_i16m2x4(const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4(const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4(const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei16_v_i32m1x4(const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(base, bindex, vl); +vint32m1x4_t test_vloxseg4ei16_v_i32m1x4(const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei16_v_i32m2x4(const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(base, bindex, vl); +vint32m2x4_t test_vloxseg4ei16_v_i32m2x4(const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei16_v_i64m1x4(const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(base, bindex, vl); +vint64m1x4_t test_vloxseg4ei16_v_i64m1x4(const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei16_v_i64m2x4(const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(base, bindex, vl); +vint64m2x4_t test_vloxseg4ei16_v_i64m2x4(const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4(const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4(const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4(const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4(const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4(const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4(const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4(const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(base, bindex, vl); +vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4(const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4(const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4(const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4(const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4(const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4(const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4(const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4(const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4(const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4(const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4(const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4(const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4(const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(rs1, rs2, vl); } -vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_m(vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(mask, base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_m(vbool64_t vm, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(vm, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_m(vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(mask, base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_m(vbool32_t vm, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(vm, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_m(vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(mask, base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_m(vbool16_t vm, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(vm, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_m(vbool8_t mask, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(mask, base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_m(vbool8_t vm, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(vm, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_m(vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(mask, base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_m(vbool64_t vm, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(vm, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_m(vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(mask, base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_m(vbool32_t vm, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(vm, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_m(vbool16_t mask, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(mask, base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_m(vbool16_t vm, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(vm, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_m(vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(mask, base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_m(vbool64_t vm, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(vm, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_m(vbool32_t mask, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(mask, base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_m(vbool32_t vm, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(vm, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(mask, base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_m(vbool64_t vm, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(vm, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(mask, base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_m(vbool32_t vm, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(vm, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(mask, base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_m(vbool16_t vm, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(vm, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(mask, base, bindex, vl); +vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_m(vbool8_t vm, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(vm, rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(mask, base, bindex, vl); +vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_m(vbool4_t vm, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(vm, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(mask, base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_m(vbool64_t vm, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(vm, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(mask, base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_m(vbool32_t vm, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(vm, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(mask, base, bindex, vl); +vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_m(vbool16_t vm, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(vm, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(mask, base, bindex, vl); +vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_m(vbool8_t vm, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(vm, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(mask, base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_m(vbool64_t vm, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(vm, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(mask, base, bindex, vl); +vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_m(vbool32_t vm, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(vm, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(mask, base, bindex, vl); +vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_m(vbool16_t vm, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(vm, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(mask, base, bindex, vl); +vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_m(vbool64_t vm, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(vm, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(mask, base, bindex, vl); +vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_m(vbool32_t vm, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(vm, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(mask, base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_m(vbool64_t vm, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(vm, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(mask, base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_m(vbool32_t vm, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(vm, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(mask, base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_m(vbool16_t vm, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(vm, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(mask, base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_m(vbool8_t vm, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(vm, rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(mask, base, bindex, vl); +vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_m(vbool4_t vm, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(vm, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(mask, base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_m(vbool64_t vm, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(vm, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(mask, base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_m(vbool32_t vm, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(vm, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(mask, base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_m(vbool16_t vm, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(vm, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(mask, base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_m(vbool8_t vm, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(vm, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(mask, base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_m(vbool64_t vm, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(vm, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(mask, base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_m(vbool32_t vm, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(vm, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(mask, base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_m(vbool16_t vm, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(vm, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(mask, base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_m(vbool64_t vm, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(vm, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16(mask, base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_m(vbool32_t vm, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg4ei16\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vloxseg4ei32.c b/auto-generated/gnu-overloaded-tests/vloxseg4ei32.c index 42b64cd58..5e01d11ee 100644 --- a/auto-generated/gnu-overloaded-tests/vloxseg4ei32.c +++ b/auto-generated/gnu-overloaded-tests/vloxseg4ei32.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4(const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4(const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4(const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4(const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4(const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4(const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4(const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4(const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4(const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4(const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4(const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4(const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4(const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4(const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4(const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4(const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4(const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4(const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4(const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4(const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4(const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4(const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4(const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4(const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei32_v_i8m1x4(const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(base, bindex, vl); +vint8m1x4_t test_vloxseg4ei32_v_i8m1x4(const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei32_v_i8m2x4(const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(base, bindex, vl); +vint8m2x4_t test_vloxseg4ei32_v_i8m2x4(const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4(const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4(const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4(const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4(const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei32_v_i16m1x4(const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(base, bindex, vl); +vint16m1x4_t test_vloxseg4ei32_v_i16m1x4(const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei32_v_i16m2x4(const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(base, bindex, vl); +vint16m2x4_t test_vloxseg4ei32_v_i16m2x4(const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4(const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4(const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei32_v_i32m1x4(const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(base, bindex, vl); +vint32m1x4_t test_vloxseg4ei32_v_i32m1x4(const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei32_v_i32m2x4(const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(base, bindex, vl); +vint32m2x4_t test_vloxseg4ei32_v_i32m2x4(const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei32_v_i64m1x4(const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(base, bindex, vl); +vint64m1x4_t test_vloxseg4ei32_v_i64m1x4(const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei32_v_i64m2x4(const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(base, bindex, vl); +vint64m2x4_t test_vloxseg4ei32_v_i64m2x4(const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4(const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4(const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4(const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4(const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4(const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4(const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4(const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4(const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(base, bindex, vl); +vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4(const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4(const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4(const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4(const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4(const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4(const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4(const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4(const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4(const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4(const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4(const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4(const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4(const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4(const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4(const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4(const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(rs1, rs2, vl); } -vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_m(vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(mask, base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_m(vbool64_t vm, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(vm, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_m(vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(mask, base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_m(vbool32_t vm, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(vm, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_m(vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(mask, base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_m(vbool16_t vm, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(vm, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_m(vbool8_t mask, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(mask, base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_m(vbool8_t vm, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(vm, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_m(vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(mask, base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_m(vbool64_t vm, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(vm, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_m(vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(mask, base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_m(vbool32_t vm, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(vm, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_m(vbool16_t mask, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(mask, base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_m(vbool16_t vm, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(vm, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_m(vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(mask, base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_m(vbool64_t vm, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(vm, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_m(vbool32_t mask, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(mask, base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_m(vbool32_t vm, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(vm, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(mask, base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_m(vbool64_t vm, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(vm, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(mask, base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_m(vbool32_t vm, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(vm, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(mask, base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_m(vbool16_t vm, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(vm, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(mask, base, bindex, vl); +vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_m(vbool8_t vm, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(vm, rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(mask, base, bindex, vl); +vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_m(vbool4_t vm, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(vm, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(mask, base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_m(vbool64_t vm, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(vm, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(mask, base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_m(vbool32_t vm, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(vm, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(mask, base, bindex, vl); +vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_m(vbool16_t vm, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(vm, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(mask, base, bindex, vl); +vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_m(vbool8_t vm, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(vm, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(mask, base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_m(vbool64_t vm, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(vm, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(mask, base, bindex, vl); +vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_m(vbool32_t vm, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(vm, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(mask, base, bindex, vl); +vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_m(vbool16_t vm, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(vm, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(mask, base, bindex, vl); +vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_m(vbool64_t vm, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(vm, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(mask, base, bindex, vl); +vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_m(vbool32_t vm, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(vm, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(mask, base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_m(vbool64_t vm, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(vm, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(mask, base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_m(vbool32_t vm, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(vm, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(mask, base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_m(vbool16_t vm, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(vm, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(mask, base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_m(vbool8_t vm, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(vm, rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(mask, base, bindex, vl); +vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_m(vbool4_t vm, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(vm, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(mask, base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_m(vbool64_t vm, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(vm, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(mask, base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_m(vbool32_t vm, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(vm, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(mask, base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_m(vbool16_t vm, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(vm, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(mask, base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_m(vbool8_t vm, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(vm, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(mask, base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_m(vbool64_t vm, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(vm, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(mask, base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_m(vbool32_t vm, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(vm, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(mask, base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_m(vbool16_t vm, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(vm, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(mask, base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_m(vbool64_t vm, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(vm, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32(mask, base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_m(vbool32_t vm, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg4ei32\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vloxseg4ei64.c b/auto-generated/gnu-overloaded-tests/vloxseg4ei64.c index e66359888..7ee62505a 100644 --- a/auto-generated/gnu-overloaded-tests/vloxseg4ei64.c +++ b/auto-generated/gnu-overloaded-tests/vloxseg4ei64.c @@ -6,284 +6,284 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4(const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4(const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4(const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4(const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4(const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4(const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4(const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4(const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4(const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4(const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4(const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4(const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4(const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4(const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4(const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4(const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4(const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4(const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4(const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4(const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4(const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4(const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4(const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4(const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei64_v_i8m1x4(const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(base, bindex, vl); +vint8m1x4_t test_vloxseg4ei64_v_i8m1x4(const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4(const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4(const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4(const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4(const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei64_v_i16m1x4(const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(base, bindex, vl); +vint16m1x4_t test_vloxseg4ei64_v_i16m1x4(const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei64_v_i16m2x4(const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(base, bindex, vl); +vint16m2x4_t test_vloxseg4ei64_v_i16m2x4(const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4(const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4(const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei64_v_i32m1x4(const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(base, bindex, vl); +vint32m1x4_t test_vloxseg4ei64_v_i32m1x4(const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei64_v_i32m2x4(const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(base, bindex, vl); +vint32m2x4_t test_vloxseg4ei64_v_i32m2x4(const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei64_v_i64m1x4(const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(base, bindex, vl); +vint64m1x4_t test_vloxseg4ei64_v_i64m1x4(const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei64_v_i64m2x4(const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(base, bindex, vl); +vint64m2x4_t test_vloxseg4ei64_v_i64m2x4(const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4(const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4(const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4(const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4(const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4(const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4(const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4(const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4(const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4(const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4(const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4(const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4(const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4(const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4(const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4(const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4(const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4(const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4(const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4(const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4(const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4(const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4(const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4(const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4(const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4(const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4(const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(rs1, rs2, vl); } -vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_m(vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(mask, base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_m(vbool64_t vm, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(vm, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_m(vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(mask, base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_m(vbool32_t vm, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(vm, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_m(vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(mask, base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_m(vbool16_t vm, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(vm, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_m(vbool8_t mask, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(mask, base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_m(vbool8_t vm, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(vm, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_m(vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(mask, base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_m(vbool64_t vm, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(vm, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_m(vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(mask, base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_m(vbool32_t vm, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(vm, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_m(vbool16_t mask, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(mask, base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_m(vbool16_t vm, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(vm, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_m(vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(mask, base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_m(vbool64_t vm, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(vm, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_m(vbool32_t mask, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(mask, base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_m(vbool32_t vm, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(vm, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(mask, base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_m(vbool64_t vm, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(vm, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(mask, base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_m(vbool32_t vm, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(vm, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(mask, base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_m(vbool16_t vm, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(vm, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(mask, base, bindex, vl); +vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_m(vbool8_t vm, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(vm, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(mask, base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_m(vbool64_t vm, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(vm, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(mask, base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_m(vbool32_t vm, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(vm, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(mask, base, bindex, vl); +vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_m(vbool16_t vm, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(vm, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(mask, base, bindex, vl); +vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_m(vbool8_t vm, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(vm, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(mask, base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_m(vbool64_t vm, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(vm, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(mask, base, bindex, vl); +vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_m(vbool32_t vm, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(vm, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(mask, base, bindex, vl); +vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_m(vbool16_t vm, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(vm, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(mask, base, bindex, vl); +vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_m(vbool64_t vm, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(vm, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(mask, base, bindex, vl); +vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_m(vbool32_t vm, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(vm, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(mask, base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_m(vbool64_t vm, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(vm, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(mask, base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_m(vbool32_t vm, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(vm, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(mask, base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_m(vbool16_t vm, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(vm, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(mask, base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_m(vbool8_t vm, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(vm, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(mask, base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_m(vbool64_t vm, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(vm, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(mask, base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_m(vbool32_t vm, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(vm, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(mask, base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_m(vbool16_t vm, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(vm, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(mask, base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_m(vbool8_t vm, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(vm, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(mask, base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_m(vbool64_t vm, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(vm, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(mask, base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_m(vbool32_t vm, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(vm, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(mask, base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_m(vbool16_t vm, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(vm, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(mask, base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_m(vbool64_t vm, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(vm, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64(mask, base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_m(vbool32_t vm, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg4ei64\.[ivxfswum.]+\s+} 70 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vloxseg4ei8.c b/auto-generated/gnu-overloaded-tests/vloxseg4ei8.c index 82d357687..0da5634f5 100644 --- a/auto-generated/gnu-overloaded-tests/vloxseg4ei8.c +++ b/auto-generated/gnu-overloaded-tests/vloxseg4ei8.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4(const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4(const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4(const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4(const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4(const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4(const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4(const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4(const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4(const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4(const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4(const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4(const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4(const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4(const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4(const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4(const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4(const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4(const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4(const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4(const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4(const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4(const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4(const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4(const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei8_v_i8m1x4(const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(base, bindex, vl); +vint8m1x4_t test_vloxseg4ei8_v_i8m1x4(const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei8_v_i8m2x4(const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(base, bindex, vl); +vint8m2x4_t test_vloxseg4ei8_v_i8m2x4(const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4(const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4(const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4(const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4(const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei8_v_i16m1x4(const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(base, bindex, vl); +vint16m1x4_t test_vloxseg4ei8_v_i16m1x4(const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei8_v_i16m2x4(const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(base, bindex, vl); +vint16m2x4_t test_vloxseg4ei8_v_i16m2x4(const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4(const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4(const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei8_v_i32m1x4(const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(base, bindex, vl); +vint32m1x4_t test_vloxseg4ei8_v_i32m1x4(const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei8_v_i32m2x4(const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(base, bindex, vl); +vint32m2x4_t test_vloxseg4ei8_v_i32m2x4(const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei8_v_i64m1x4(const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(base, bindex, vl); +vint64m1x4_t test_vloxseg4ei8_v_i64m1x4(const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei8_v_i64m2x4(const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(base, bindex, vl); +vint64m2x4_t test_vloxseg4ei8_v_i64m2x4(const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4(const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4(const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4(const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4(const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4(const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4(const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(base, bindex, vl); +vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4(const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4(const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4(const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4(const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4(const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4(const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4(const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4(const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4(const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4(const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4(const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(rs1, rs2, vl); } -vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_m(vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(mask, base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_m(vbool64_t vm, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(vm, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_m(vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(mask, base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_m(vbool32_t vm, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(vm, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_m(vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(mask, base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_m(vbool16_t vm, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(vm, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_m(vbool8_t mask, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(mask, base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_m(vbool8_t vm, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(vm, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_m(vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(mask, base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_m(vbool64_t vm, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(vm, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_m(vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(mask, base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_m(vbool32_t vm, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(vm, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_m(vbool16_t mask, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(mask, base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_m(vbool16_t vm, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(vm, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_m(vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(mask, base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_m(vbool64_t vm, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(vm, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_m(vbool32_t mask, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(mask, base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_m(vbool32_t vm, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(vm, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(mask, base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_m(vbool64_t vm, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(vm, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(mask, base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_m(vbool32_t vm, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(vm, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(mask, base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_m(vbool16_t vm, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(vm, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(mask, base, bindex, vl); +vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_m(vbool8_t vm, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(vm, rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(mask, base, bindex, vl); +vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_m(vbool4_t vm, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(vm, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(mask, base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_m(vbool64_t vm, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(vm, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(mask, base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_m(vbool32_t vm, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(vm, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(mask, base, bindex, vl); +vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_m(vbool16_t vm, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(vm, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(mask, base, bindex, vl); +vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_m(vbool8_t vm, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(vm, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(mask, base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_m(vbool64_t vm, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(vm, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(mask, base, bindex, vl); +vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_m(vbool32_t vm, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(vm, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(mask, base, bindex, vl); +vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_m(vbool16_t vm, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(vm, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(mask, base, bindex, vl); +vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_m(vbool64_t vm, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(vm, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(mask, base, bindex, vl); +vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_m(vbool32_t vm, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(vm, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(mask, base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_m(vbool64_t vm, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(vm, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(mask, base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_m(vbool32_t vm, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(vm, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(mask, base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_m(vbool16_t vm, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(vm, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(mask, base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_m(vbool8_t vm, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(vm, rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(mask, base, bindex, vl); +vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_m(vbool4_t vm, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(vm, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(mask, base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_m(vbool64_t vm, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(vm, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(mask, base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_m(vbool32_t vm, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(vm, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(mask, base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_m(vbool16_t vm, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(vm, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(mask, base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_m(vbool8_t vm, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(vm, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(mask, base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_m(vbool64_t vm, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(vm, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(mask, base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_m(vbool32_t vm, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(vm, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(mask, base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_m(vbool16_t vm, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(vm, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(mask, base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_m(vbool64_t vm, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(vm, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8(mask, base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_m(vbool32_t vm, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg4ei8\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vloxseg5ei16.c b/auto-generated/gnu-overloaded-tests/vloxseg5ei16.c index dfcae1488..5b927edcf 100644 --- a/auto-generated/gnu-overloaded-tests/vloxseg5ei16.c +++ b/auto-generated/gnu-overloaded-tests/vloxseg5ei16.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5(const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5(const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5(const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5(const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5(const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5(const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5(const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5(const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5(const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5(const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5(const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5(const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5(const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5(const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5(const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5(const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5(const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5(const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei16_v_i8m1x5(const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(base, bindex, vl); +vint8m1x5_t test_vloxseg5ei16_v_i8m1x5(const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5(const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5(const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5(const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5(const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei16_v_i16m1x5(const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(base, bindex, vl); +vint16m1x5_t test_vloxseg5ei16_v_i16m1x5(const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5(const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5(const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei16_v_i32m1x5(const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(base, bindex, vl); +vint32m1x5_t test_vloxseg5ei16_v_i32m1x5(const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei16_v_i64m1x5(const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(base, bindex, vl); +vint64m1x5_t test_vloxseg5ei16_v_i64m1x5(const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5(const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5(const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5(const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5(const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5(const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5(const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5(const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5(const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5(const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5(const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5(const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5(const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5(const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(rs1, rs2, vl); } -vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_m(vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(mask, base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_m(vbool64_t vm, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(vm, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_m(vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(mask, base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_m(vbool32_t vm, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(vm, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_m(vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(mask, base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_m(vbool16_t vm, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(vm, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_m(vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(mask, base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_m(vbool64_t vm, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(vm, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_m(vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(mask, base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_m(vbool32_t vm, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(vm, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_m(vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(mask, base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_m(vbool64_t vm, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(vm, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(mask, base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_m(vbool64_t vm, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(vm, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(mask, base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_m(vbool32_t vm, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(vm, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(mask, base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_m(vbool16_t vm, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(vm, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(mask, base, bindex, vl); +vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_m(vbool8_t vm, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(vm, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(mask, base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_m(vbool64_t vm, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(vm, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(mask, base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_m(vbool32_t vm, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(vm, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(mask, base, bindex, vl); +vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_m(vbool16_t vm, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(vm, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(mask, base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_m(vbool64_t vm, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(vm, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(mask, base, bindex, vl); +vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_m(vbool32_t vm, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(vm, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(mask, base, bindex, vl); +vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_m(vbool64_t vm, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(vm, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(mask, base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_m(vbool64_t vm, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(vm, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(mask, base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_m(vbool32_t vm, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(vm, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(mask, base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_m(vbool16_t vm, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(vm, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(mask, base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_m(vbool8_t vm, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(vm, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(mask, base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_m(vbool64_t vm, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(vm, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(mask, base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_m(vbool32_t vm, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(vm, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(mask, base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_m(vbool16_t vm, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(vm, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(mask, base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_m(vbool64_t vm, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(vm, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(mask, base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_m(vbool32_t vm, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(vm, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16(mask, base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_m(vbool64_t vm, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg5ei16\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vloxseg5ei32.c b/auto-generated/gnu-overloaded-tests/vloxseg5ei32.c index cdf9d0649..f80e5cbc7 100644 --- a/auto-generated/gnu-overloaded-tests/vloxseg5ei32.c +++ b/auto-generated/gnu-overloaded-tests/vloxseg5ei32.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5(const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5(const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5(const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5(const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5(const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5(const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5(const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5(const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5(const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5(const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5(const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5(const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5(const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5(const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5(const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5(const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5(const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5(const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei32_v_i8m1x5(const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(base, bindex, vl); +vint8m1x5_t test_vloxseg5ei32_v_i8m1x5(const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5(const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5(const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5(const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5(const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei32_v_i16m1x5(const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(base, bindex, vl); +vint16m1x5_t test_vloxseg5ei32_v_i16m1x5(const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5(const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5(const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei32_v_i32m1x5(const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(base, bindex, vl); +vint32m1x5_t test_vloxseg5ei32_v_i32m1x5(const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei32_v_i64m1x5(const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(base, bindex, vl); +vint64m1x5_t test_vloxseg5ei32_v_i64m1x5(const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5(const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5(const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5(const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5(const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5(const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5(const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5(const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5(const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5(const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5(const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5(const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5(const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5(const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5(const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5(const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5(const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(rs1, rs2, vl); } -vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_m(vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(mask, base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_m(vbool64_t vm, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(vm, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_m(vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(mask, base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_m(vbool32_t vm, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(vm, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_m(vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(mask, base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_m(vbool16_t vm, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(vm, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_m(vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(mask, base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_m(vbool64_t vm, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(vm, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_m(vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(mask, base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_m(vbool32_t vm, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(vm, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_m(vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(mask, base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_m(vbool64_t vm, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(vm, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(mask, base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_m(vbool64_t vm, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(vm, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(mask, base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_m(vbool32_t vm, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(vm, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(mask, base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_m(vbool16_t vm, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(vm, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(mask, base, bindex, vl); +vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_m(vbool8_t vm, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(vm, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(mask, base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_m(vbool64_t vm, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(vm, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(mask, base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_m(vbool32_t vm, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(vm, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(mask, base, bindex, vl); +vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_m(vbool16_t vm, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(vm, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(mask, base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_m(vbool64_t vm, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(vm, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(mask, base, bindex, vl); +vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_m(vbool32_t vm, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(vm, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(mask, base, bindex, vl); +vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_m(vbool64_t vm, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(vm, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(mask, base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_m(vbool64_t vm, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(vm, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(mask, base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_m(vbool32_t vm, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(vm, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(mask, base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_m(vbool16_t vm, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(vm, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(mask, base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_m(vbool8_t vm, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(vm, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(mask, base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_m(vbool64_t vm, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(vm, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(mask, base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_m(vbool32_t vm, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(vm, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(mask, base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_m(vbool16_t vm, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(vm, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(mask, base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_m(vbool64_t vm, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(vm, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(mask, base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_m(vbool32_t vm, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(vm, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32(mask, base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_m(vbool64_t vm, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg5ei32\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vloxseg5ei64.c b/auto-generated/gnu-overloaded-tests/vloxseg5ei64.c index 0663b64e0..d5462fff9 100644 --- a/auto-generated/gnu-overloaded-tests/vloxseg5ei64.c +++ b/auto-generated/gnu-overloaded-tests/vloxseg5ei64.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5(const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5(const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5(const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5(const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5(const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5(const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5(const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5(const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5(const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5(const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5(const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5(const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5(const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5(const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5(const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5(const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5(const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5(const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei64_v_i8m1x5(const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(base, bindex, vl); +vint8m1x5_t test_vloxseg5ei64_v_i8m1x5(const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5(const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5(const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5(const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5(const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei64_v_i16m1x5(const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(base, bindex, vl); +vint16m1x5_t test_vloxseg5ei64_v_i16m1x5(const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5(const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5(const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei64_v_i32m1x5(const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(base, bindex, vl); +vint32m1x5_t test_vloxseg5ei64_v_i32m1x5(const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei64_v_i64m1x5(const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(base, bindex, vl); +vint64m1x5_t test_vloxseg5ei64_v_i64m1x5(const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5(const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5(const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5(const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5(const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5(const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5(const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5(const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5(const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5(const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5(const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5(const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5(const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5(const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5(const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5(const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5(const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5(const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5(const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5(const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5(const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(rs1, rs2, vl); } -vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_m(vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(mask, base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_m(vbool64_t vm, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(vm, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_m(vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(mask, base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_m(vbool32_t vm, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(vm, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_m(vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(mask, base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_m(vbool16_t vm, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(vm, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_m(vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(mask, base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_m(vbool64_t vm, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(vm, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_m(vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(mask, base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_m(vbool32_t vm, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(vm, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_m(vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(mask, base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_m(vbool64_t vm, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(vm, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(mask, base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_m(vbool64_t vm, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(vm, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(mask, base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_m(vbool32_t vm, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(vm, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(mask, base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_m(vbool16_t vm, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(vm, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(mask, base, bindex, vl); +vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_m(vbool8_t vm, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(vm, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(mask, base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_m(vbool64_t vm, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(vm, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(mask, base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_m(vbool32_t vm, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(vm, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(mask, base, bindex, vl); +vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_m(vbool16_t vm, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(vm, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(mask, base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_m(vbool64_t vm, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(vm, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(mask, base, bindex, vl); +vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_m(vbool32_t vm, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(vm, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(mask, base, bindex, vl); +vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_m(vbool64_t vm, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(vm, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(mask, base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_m(vbool64_t vm, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(vm, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(mask, base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_m(vbool32_t vm, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(vm, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(mask, base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_m(vbool16_t vm, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(vm, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(mask, base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_m(vbool8_t vm, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(vm, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(mask, base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_m(vbool64_t vm, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(vm, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(mask, base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_m(vbool32_t vm, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(vm, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(mask, base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_m(vbool16_t vm, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(vm, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(mask, base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_m(vbool64_t vm, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(vm, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(mask, base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_m(vbool32_t vm, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(vm, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64(mask, base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_m(vbool64_t vm, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg5ei64\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vloxseg5ei8.c b/auto-generated/gnu-overloaded-tests/vloxseg5ei8.c index 998ac8743..5f00ba5cd 100644 --- a/auto-generated/gnu-overloaded-tests/vloxseg5ei8.c +++ b/auto-generated/gnu-overloaded-tests/vloxseg5ei8.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5(const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5(const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5(const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5(const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5(const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5(const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5(const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5(const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5(const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5(const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5(const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5(const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5(const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5(const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5(const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5(const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5(const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5(const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei8_v_i8m1x5(const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(base, bindex, vl); +vint8m1x5_t test_vloxseg5ei8_v_i8m1x5(const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5(const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5(const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5(const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5(const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei8_v_i16m1x5(const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(base, bindex, vl); +vint16m1x5_t test_vloxseg5ei8_v_i16m1x5(const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5(const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5(const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei8_v_i32m1x5(const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(base, bindex, vl); +vint32m1x5_t test_vloxseg5ei8_v_i32m1x5(const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei8_v_i64m1x5(const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(base, bindex, vl); +vint64m1x5_t test_vloxseg5ei8_v_i64m1x5(const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5(const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5(const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5(const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5(const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5(const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5(const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5(const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5(const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5(const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5(const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5(const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(rs1, rs2, vl); } -vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_m(vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(mask, base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_m(vbool64_t vm, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(vm, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_m(vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(mask, base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_m(vbool32_t vm, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(vm, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_m(vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(mask, base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_m(vbool16_t vm, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(vm, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_m(vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(mask, base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_m(vbool64_t vm, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(vm, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_m(vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(mask, base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_m(vbool32_t vm, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(vm, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_m(vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(mask, base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_m(vbool64_t vm, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(vm, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(mask, base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_m(vbool64_t vm, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(vm, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(mask, base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_m(vbool32_t vm, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(vm, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(mask, base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_m(vbool16_t vm, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(vm, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(mask, base, bindex, vl); +vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_m(vbool8_t vm, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(vm, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(mask, base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_m(vbool64_t vm, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(vm, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(mask, base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_m(vbool32_t vm, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(vm, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(mask, base, bindex, vl); +vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_m(vbool16_t vm, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(vm, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(mask, base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_m(vbool64_t vm, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(vm, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(mask, base, bindex, vl); +vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_m(vbool32_t vm, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(vm, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(mask, base, bindex, vl); +vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_m(vbool64_t vm, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(vm, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(mask, base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_m(vbool64_t vm, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(vm, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(mask, base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_m(vbool32_t vm, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(vm, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(mask, base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_m(vbool16_t vm, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(vm, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(mask, base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_m(vbool8_t vm, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(vm, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(mask, base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_m(vbool64_t vm, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(vm, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(mask, base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_m(vbool32_t vm, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(vm, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(mask, base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_m(vbool16_t vm, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(vm, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(mask, base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_m(vbool64_t vm, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(vm, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(mask, base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_m(vbool32_t vm, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(vm, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8(mask, base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_m(vbool64_t vm, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg5ei8\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vloxseg6ei16.c b/auto-generated/gnu-overloaded-tests/vloxseg6ei16.c index 675aa264c..23b3f87e5 100644 --- a/auto-generated/gnu-overloaded-tests/vloxseg6ei16.c +++ b/auto-generated/gnu-overloaded-tests/vloxseg6ei16.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6(const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6(const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6(const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6(const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6(const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6(const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6(const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6(const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6(const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6(const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6(const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6(const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6(const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6(const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6(const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6(const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6(const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6(const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei16_v_i8m1x6(const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(base, bindex, vl); +vint8m1x6_t test_vloxseg6ei16_v_i8m1x6(const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6(const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6(const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6(const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6(const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei16_v_i16m1x6(const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(base, bindex, vl); +vint16m1x6_t test_vloxseg6ei16_v_i16m1x6(const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6(const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6(const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei16_v_i32m1x6(const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(base, bindex, vl); +vint32m1x6_t test_vloxseg6ei16_v_i32m1x6(const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei16_v_i64m1x6(const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(base, bindex, vl); +vint64m1x6_t test_vloxseg6ei16_v_i64m1x6(const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6(const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6(const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6(const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6(const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6(const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6(const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6(const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6(const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6(const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6(const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6(const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6(const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6(const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(rs1, rs2, vl); } -vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_m(vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(mask, base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_m(vbool64_t vm, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(vm, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_m(vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(mask, base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_m(vbool32_t vm, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(vm, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_m(vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(mask, base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_m(vbool16_t vm, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(vm, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_m(vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(mask, base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_m(vbool64_t vm, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(vm, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_m(vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(mask, base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_m(vbool32_t vm, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(vm, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_m(vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(mask, base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_m(vbool64_t vm, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(vm, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(mask, base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_m(vbool64_t vm, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(vm, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(mask, base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_m(vbool32_t vm, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(vm, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(mask, base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_m(vbool16_t vm, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(vm, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(mask, base, bindex, vl); +vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_m(vbool8_t vm, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(vm, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(mask, base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_m(vbool64_t vm, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(vm, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(mask, base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_m(vbool32_t vm, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(vm, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(mask, base, bindex, vl); +vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_m(vbool16_t vm, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(vm, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(mask, base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_m(vbool64_t vm, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(vm, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(mask, base, bindex, vl); +vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_m(vbool32_t vm, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(vm, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(mask, base, bindex, vl); +vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_m(vbool64_t vm, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(vm, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(mask, base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_m(vbool64_t vm, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(vm, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(mask, base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_m(vbool32_t vm, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(vm, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(mask, base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_m(vbool16_t vm, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(vm, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(mask, base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_m(vbool8_t vm, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(vm, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(mask, base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_m(vbool64_t vm, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(vm, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(mask, base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_m(vbool32_t vm, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(vm, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(mask, base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_m(vbool16_t vm, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(vm, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(mask, base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_m(vbool64_t vm, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(vm, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(mask, base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_m(vbool32_t vm, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(vm, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16(mask, base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_m(vbool64_t vm, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg6ei16\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vloxseg6ei32.c b/auto-generated/gnu-overloaded-tests/vloxseg6ei32.c index 1c62bccf2..55420d0af 100644 --- a/auto-generated/gnu-overloaded-tests/vloxseg6ei32.c +++ b/auto-generated/gnu-overloaded-tests/vloxseg6ei32.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6(const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6(const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6(const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6(const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6(const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6(const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6(const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6(const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6(const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6(const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6(const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6(const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6(const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6(const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6(const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6(const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6(const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6(const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei32_v_i8m1x6(const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(base, bindex, vl); +vint8m1x6_t test_vloxseg6ei32_v_i8m1x6(const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6(const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6(const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6(const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6(const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei32_v_i16m1x6(const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(base, bindex, vl); +vint16m1x6_t test_vloxseg6ei32_v_i16m1x6(const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6(const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6(const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei32_v_i32m1x6(const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(base, bindex, vl); +vint32m1x6_t test_vloxseg6ei32_v_i32m1x6(const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei32_v_i64m1x6(const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(base, bindex, vl); +vint64m1x6_t test_vloxseg6ei32_v_i64m1x6(const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6(const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6(const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6(const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6(const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6(const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6(const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6(const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6(const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6(const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6(const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6(const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6(const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6(const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6(const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6(const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6(const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(rs1, rs2, vl); } -vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_m(vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(mask, base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_m(vbool64_t vm, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(vm, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_m(vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(mask, base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_m(vbool32_t vm, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(vm, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_m(vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(mask, base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_m(vbool16_t vm, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(vm, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_m(vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(mask, base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_m(vbool64_t vm, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(vm, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_m(vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(mask, base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_m(vbool32_t vm, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(vm, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_m(vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(mask, base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_m(vbool64_t vm, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(vm, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(mask, base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_m(vbool64_t vm, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(vm, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(mask, base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_m(vbool32_t vm, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(vm, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(mask, base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_m(vbool16_t vm, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(vm, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(mask, base, bindex, vl); +vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_m(vbool8_t vm, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(vm, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(mask, base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_m(vbool64_t vm, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(vm, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(mask, base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_m(vbool32_t vm, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(vm, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(mask, base, bindex, vl); +vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_m(vbool16_t vm, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(vm, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(mask, base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_m(vbool64_t vm, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(vm, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(mask, base, bindex, vl); +vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_m(vbool32_t vm, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(vm, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(mask, base, bindex, vl); +vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_m(vbool64_t vm, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(vm, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(mask, base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_m(vbool64_t vm, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(vm, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(mask, base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_m(vbool32_t vm, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(vm, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(mask, base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_m(vbool16_t vm, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(vm, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(mask, base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_m(vbool8_t vm, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(vm, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(mask, base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_m(vbool64_t vm, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(vm, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(mask, base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_m(vbool32_t vm, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(vm, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(mask, base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_m(vbool16_t vm, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(vm, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(mask, base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_m(vbool64_t vm, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(vm, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(mask, base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_m(vbool32_t vm, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(vm, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32(mask, base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_m(vbool64_t vm, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg6ei32\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vloxseg6ei64.c b/auto-generated/gnu-overloaded-tests/vloxseg6ei64.c index 8192f68d8..4357edef9 100644 --- a/auto-generated/gnu-overloaded-tests/vloxseg6ei64.c +++ b/auto-generated/gnu-overloaded-tests/vloxseg6ei64.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6(const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6(const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6(const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6(const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6(const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6(const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6(const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6(const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6(const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6(const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6(const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6(const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6(const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6(const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6(const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6(const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6(const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6(const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei64_v_i8m1x6(const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(base, bindex, vl); +vint8m1x6_t test_vloxseg6ei64_v_i8m1x6(const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6(const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6(const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6(const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6(const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei64_v_i16m1x6(const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(base, bindex, vl); +vint16m1x6_t test_vloxseg6ei64_v_i16m1x6(const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6(const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6(const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei64_v_i32m1x6(const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(base, bindex, vl); +vint32m1x6_t test_vloxseg6ei64_v_i32m1x6(const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei64_v_i64m1x6(const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(base, bindex, vl); +vint64m1x6_t test_vloxseg6ei64_v_i64m1x6(const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6(const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6(const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6(const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6(const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6(const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6(const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6(const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6(const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6(const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6(const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6(const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6(const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6(const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6(const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6(const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6(const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6(const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6(const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6(const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6(const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(rs1, rs2, vl); } -vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_m(vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(mask, base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_m(vbool64_t vm, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(vm, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_m(vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(mask, base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_m(vbool32_t vm, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(vm, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_m(vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(mask, base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_m(vbool16_t vm, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(vm, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_m(vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(mask, base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_m(vbool64_t vm, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(vm, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_m(vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(mask, base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_m(vbool32_t vm, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(vm, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_m(vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(mask, base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_m(vbool64_t vm, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(vm, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(mask, base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_m(vbool64_t vm, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(vm, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(mask, base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_m(vbool32_t vm, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(vm, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(mask, base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_m(vbool16_t vm, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(vm, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(mask, base, bindex, vl); +vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_m(vbool8_t vm, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(vm, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(mask, base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_m(vbool64_t vm, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(vm, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(mask, base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_m(vbool32_t vm, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(vm, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(mask, base, bindex, vl); +vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_m(vbool16_t vm, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(vm, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(mask, base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_m(vbool64_t vm, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(vm, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(mask, base, bindex, vl); +vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_m(vbool32_t vm, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(vm, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(mask, base, bindex, vl); +vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_m(vbool64_t vm, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(vm, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(mask, base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_m(vbool64_t vm, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(vm, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(mask, base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_m(vbool32_t vm, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(vm, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(mask, base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_m(vbool16_t vm, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(vm, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(mask, base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_m(vbool8_t vm, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(vm, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(mask, base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_m(vbool64_t vm, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(vm, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(mask, base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_m(vbool32_t vm, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(vm, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(mask, base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_m(vbool16_t vm, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(vm, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(mask, base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_m(vbool64_t vm, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(vm, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(mask, base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_m(vbool32_t vm, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(vm, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64(mask, base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_m(vbool64_t vm, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg6ei64\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vloxseg6ei8.c b/auto-generated/gnu-overloaded-tests/vloxseg6ei8.c index 0154efd41..4177401ce 100644 --- a/auto-generated/gnu-overloaded-tests/vloxseg6ei8.c +++ b/auto-generated/gnu-overloaded-tests/vloxseg6ei8.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6(const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6(const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6(const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6(const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6(const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6(const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6(const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6(const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6(const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6(const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6(const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6(const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6(const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6(const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6(const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6(const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6(const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6(const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei8_v_i8m1x6(const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(base, bindex, vl); +vint8m1x6_t test_vloxseg6ei8_v_i8m1x6(const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6(const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6(const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6(const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6(const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei8_v_i16m1x6(const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(base, bindex, vl); +vint16m1x6_t test_vloxseg6ei8_v_i16m1x6(const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6(const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6(const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei8_v_i32m1x6(const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(base, bindex, vl); +vint32m1x6_t test_vloxseg6ei8_v_i32m1x6(const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei8_v_i64m1x6(const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(base, bindex, vl); +vint64m1x6_t test_vloxseg6ei8_v_i64m1x6(const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6(const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6(const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6(const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6(const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6(const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6(const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6(const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6(const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6(const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6(const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6(const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(rs1, rs2, vl); } -vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_m(vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(mask, base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_m(vbool64_t vm, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(vm, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_m(vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(mask, base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_m(vbool32_t vm, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(vm, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_m(vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(mask, base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_m(vbool16_t vm, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(vm, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_m(vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(mask, base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_m(vbool64_t vm, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(vm, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_m(vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(mask, base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_m(vbool32_t vm, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(vm, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_m(vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(mask, base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_m(vbool64_t vm, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(vm, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(mask, base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_m(vbool64_t vm, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(vm, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(mask, base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_m(vbool32_t vm, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(vm, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(mask, base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_m(vbool16_t vm, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(vm, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(mask, base, bindex, vl); +vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_m(vbool8_t vm, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(vm, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(mask, base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_m(vbool64_t vm, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(vm, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(mask, base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_m(vbool32_t vm, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(vm, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(mask, base, bindex, vl); +vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_m(vbool16_t vm, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(vm, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(mask, base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_m(vbool64_t vm, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(vm, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(mask, base, bindex, vl); +vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_m(vbool32_t vm, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(vm, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(mask, base, bindex, vl); +vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_m(vbool64_t vm, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(vm, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(mask, base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_m(vbool64_t vm, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(vm, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(mask, base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_m(vbool32_t vm, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(vm, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(mask, base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_m(vbool16_t vm, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(vm, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(mask, base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_m(vbool8_t vm, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(vm, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(mask, base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_m(vbool64_t vm, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(vm, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(mask, base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_m(vbool32_t vm, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(vm, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(mask, base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_m(vbool16_t vm, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(vm, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(mask, base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_m(vbool64_t vm, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(vm, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(mask, base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_m(vbool32_t vm, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(vm, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8(mask, base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_m(vbool64_t vm, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg6ei8\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vloxseg7ei16.c b/auto-generated/gnu-overloaded-tests/vloxseg7ei16.c index 9f0bf6a2c..fbd6973b5 100644 --- a/auto-generated/gnu-overloaded-tests/vloxseg7ei16.c +++ b/auto-generated/gnu-overloaded-tests/vloxseg7ei16.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7(const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7(const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7(const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7(const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7(const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7(const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7(const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7(const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7(const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7(const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7(const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7(const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7(const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7(const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7(const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7(const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7(const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7(const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei16_v_i8m1x7(const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(base, bindex, vl); +vint8m1x7_t test_vloxseg7ei16_v_i8m1x7(const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7(const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7(const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7(const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7(const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei16_v_i16m1x7(const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(base, bindex, vl); +vint16m1x7_t test_vloxseg7ei16_v_i16m1x7(const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7(const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7(const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei16_v_i32m1x7(const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(base, bindex, vl); +vint32m1x7_t test_vloxseg7ei16_v_i32m1x7(const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei16_v_i64m1x7(const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(base, bindex, vl); +vint64m1x7_t test_vloxseg7ei16_v_i64m1x7(const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7(const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7(const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7(const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7(const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7(const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7(const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7(const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7(const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7(const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7(const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7(const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7(const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7(const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(rs1, rs2, vl); } -vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_m(vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(mask, base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_m(vbool64_t vm, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(vm, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_m(vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(mask, base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_m(vbool32_t vm, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(vm, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_m(vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(mask, base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_m(vbool16_t vm, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(vm, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_m(vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(mask, base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_m(vbool64_t vm, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(vm, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_m(vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(mask, base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_m(vbool32_t vm, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(vm, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_m(vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(mask, base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_m(vbool64_t vm, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(vm, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(mask, base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_m(vbool64_t vm, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(vm, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(mask, base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_m(vbool32_t vm, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(vm, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(mask, base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_m(vbool16_t vm, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(vm, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(mask, base, bindex, vl); +vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_m(vbool8_t vm, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(vm, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(mask, base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_m(vbool64_t vm, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(vm, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(mask, base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_m(vbool32_t vm, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(vm, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(mask, base, bindex, vl); +vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_m(vbool16_t vm, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(vm, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(mask, base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_m(vbool64_t vm, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(vm, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(mask, base, bindex, vl); +vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_m(vbool32_t vm, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(vm, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(mask, base, bindex, vl); +vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_m(vbool64_t vm, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(vm, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(mask, base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_m(vbool64_t vm, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(vm, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(mask, base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_m(vbool32_t vm, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(vm, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(mask, base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_m(vbool16_t vm, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(vm, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(mask, base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_m(vbool8_t vm, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(vm, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(mask, base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_m(vbool64_t vm, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(vm, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(mask, base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_m(vbool32_t vm, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(vm, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(mask, base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_m(vbool16_t vm, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(vm, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(mask, base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_m(vbool64_t vm, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(vm, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(mask, base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_m(vbool32_t vm, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(vm, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16(mask, base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_m(vbool64_t vm, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg7ei16\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vloxseg7ei32.c b/auto-generated/gnu-overloaded-tests/vloxseg7ei32.c index e09e18203..1b9eca89b 100644 --- a/auto-generated/gnu-overloaded-tests/vloxseg7ei32.c +++ b/auto-generated/gnu-overloaded-tests/vloxseg7ei32.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7(const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7(const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7(const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7(const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7(const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7(const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7(const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7(const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7(const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7(const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7(const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7(const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7(const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7(const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7(const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7(const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7(const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7(const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei32_v_i8m1x7(const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(base, bindex, vl); +vint8m1x7_t test_vloxseg7ei32_v_i8m1x7(const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7(const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7(const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7(const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7(const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei32_v_i16m1x7(const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(base, bindex, vl); +vint16m1x7_t test_vloxseg7ei32_v_i16m1x7(const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7(const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7(const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei32_v_i32m1x7(const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(base, bindex, vl); +vint32m1x7_t test_vloxseg7ei32_v_i32m1x7(const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei32_v_i64m1x7(const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(base, bindex, vl); +vint64m1x7_t test_vloxseg7ei32_v_i64m1x7(const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7(const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7(const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7(const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7(const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7(const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7(const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7(const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7(const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7(const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7(const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7(const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7(const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7(const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7(const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7(const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7(const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(rs1, rs2, vl); } -vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_m(vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(mask, base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_m(vbool64_t vm, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(vm, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_m(vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(mask, base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_m(vbool32_t vm, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(vm, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_m(vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(mask, base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_m(vbool16_t vm, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(vm, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_m(vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(mask, base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_m(vbool64_t vm, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(vm, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_m(vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(mask, base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_m(vbool32_t vm, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(vm, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_m(vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(mask, base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_m(vbool64_t vm, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(vm, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(mask, base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_m(vbool64_t vm, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(vm, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(mask, base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_m(vbool32_t vm, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(vm, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(mask, base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_m(vbool16_t vm, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(vm, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(mask, base, bindex, vl); +vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_m(vbool8_t vm, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(vm, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(mask, base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_m(vbool64_t vm, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(vm, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(mask, base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_m(vbool32_t vm, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(vm, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(mask, base, bindex, vl); +vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_m(vbool16_t vm, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(vm, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(mask, base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_m(vbool64_t vm, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(vm, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(mask, base, bindex, vl); +vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_m(vbool32_t vm, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(vm, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(mask, base, bindex, vl); +vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_m(vbool64_t vm, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(vm, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(mask, base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_m(vbool64_t vm, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(vm, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(mask, base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_m(vbool32_t vm, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(vm, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(mask, base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_m(vbool16_t vm, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(vm, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(mask, base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_m(vbool8_t vm, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(vm, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(mask, base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_m(vbool64_t vm, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(vm, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(mask, base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_m(vbool32_t vm, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(vm, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(mask, base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_m(vbool16_t vm, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(vm, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(mask, base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_m(vbool64_t vm, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(vm, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(mask, base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_m(vbool32_t vm, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(vm, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32(mask, base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_m(vbool64_t vm, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg7ei32\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vloxseg7ei64.c b/auto-generated/gnu-overloaded-tests/vloxseg7ei64.c index 9771a9e75..39cba735d 100644 --- a/auto-generated/gnu-overloaded-tests/vloxseg7ei64.c +++ b/auto-generated/gnu-overloaded-tests/vloxseg7ei64.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7(const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7(const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7(const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7(const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7(const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7(const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7(const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7(const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7(const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7(const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7(const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7(const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7(const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7(const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7(const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7(const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7(const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7(const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei64_v_i8m1x7(const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(base, bindex, vl); +vint8m1x7_t test_vloxseg7ei64_v_i8m1x7(const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7(const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7(const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7(const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7(const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei64_v_i16m1x7(const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(base, bindex, vl); +vint16m1x7_t test_vloxseg7ei64_v_i16m1x7(const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7(const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7(const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei64_v_i32m1x7(const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(base, bindex, vl); +vint32m1x7_t test_vloxseg7ei64_v_i32m1x7(const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei64_v_i64m1x7(const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(base, bindex, vl); +vint64m1x7_t test_vloxseg7ei64_v_i64m1x7(const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7(const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7(const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7(const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7(const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7(const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7(const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7(const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7(const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7(const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7(const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7(const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7(const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7(const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7(const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7(const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7(const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7(const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7(const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7(const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7(const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(rs1, rs2, vl); } -vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_m(vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(mask, base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_m(vbool64_t vm, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(vm, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_m(vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(mask, base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_m(vbool32_t vm, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(vm, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_m(vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(mask, base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_m(vbool16_t vm, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(vm, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_m(vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(mask, base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_m(vbool64_t vm, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(vm, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_m(vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(mask, base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_m(vbool32_t vm, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(vm, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_m(vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(mask, base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_m(vbool64_t vm, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(vm, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(mask, base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_m(vbool64_t vm, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(vm, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(mask, base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_m(vbool32_t vm, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(vm, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(mask, base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_m(vbool16_t vm, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(vm, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(mask, base, bindex, vl); +vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_m(vbool8_t vm, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(vm, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(mask, base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_m(vbool64_t vm, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(vm, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(mask, base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_m(vbool32_t vm, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(vm, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(mask, base, bindex, vl); +vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_m(vbool16_t vm, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(vm, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(mask, base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_m(vbool64_t vm, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(vm, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(mask, base, bindex, vl); +vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_m(vbool32_t vm, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(vm, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(mask, base, bindex, vl); +vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_m(vbool64_t vm, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(vm, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(mask, base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_m(vbool64_t vm, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(vm, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(mask, base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_m(vbool32_t vm, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(vm, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(mask, base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_m(vbool16_t vm, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(vm, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(mask, base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_m(vbool8_t vm, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(vm, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(mask, base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_m(vbool64_t vm, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(vm, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(mask, base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_m(vbool32_t vm, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(vm, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(mask, base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_m(vbool16_t vm, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(vm, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(mask, base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_m(vbool64_t vm, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(vm, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(mask, base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_m(vbool32_t vm, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(vm, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64(mask, base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_m(vbool64_t vm, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg7ei64\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vloxseg7ei8.c b/auto-generated/gnu-overloaded-tests/vloxseg7ei8.c index f8ca3df7b..d494723e2 100644 --- a/auto-generated/gnu-overloaded-tests/vloxseg7ei8.c +++ b/auto-generated/gnu-overloaded-tests/vloxseg7ei8.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7(const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7(const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7(const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7(const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7(const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7(const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7(const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7(const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7(const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7(const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7(const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7(const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7(const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7(const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7(const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7(const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7(const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7(const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei8_v_i8m1x7(const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(base, bindex, vl); +vint8m1x7_t test_vloxseg7ei8_v_i8m1x7(const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7(const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7(const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7(const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7(const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei8_v_i16m1x7(const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(base, bindex, vl); +vint16m1x7_t test_vloxseg7ei8_v_i16m1x7(const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7(const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7(const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei8_v_i32m1x7(const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(base, bindex, vl); +vint32m1x7_t test_vloxseg7ei8_v_i32m1x7(const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei8_v_i64m1x7(const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(base, bindex, vl); +vint64m1x7_t test_vloxseg7ei8_v_i64m1x7(const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7(const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7(const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7(const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7(const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7(const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7(const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7(const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7(const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7(const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7(const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7(const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(rs1, rs2, vl); } -vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_m(vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(mask, base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_m(vbool64_t vm, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(vm, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_m(vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(mask, base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_m(vbool32_t vm, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(vm, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_m(vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(mask, base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_m(vbool16_t vm, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(vm, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_m(vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(mask, base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_m(vbool64_t vm, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(vm, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_m(vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(mask, base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_m(vbool32_t vm, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(vm, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_m(vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(mask, base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_m(vbool64_t vm, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(vm, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(mask, base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_m(vbool64_t vm, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(vm, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(mask, base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_m(vbool32_t vm, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(vm, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(mask, base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_m(vbool16_t vm, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(vm, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(mask, base, bindex, vl); +vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_m(vbool8_t vm, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(vm, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(mask, base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_m(vbool64_t vm, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(vm, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(mask, base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_m(vbool32_t vm, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(vm, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(mask, base, bindex, vl); +vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_m(vbool16_t vm, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(vm, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(mask, base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_m(vbool64_t vm, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(vm, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(mask, base, bindex, vl); +vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_m(vbool32_t vm, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(vm, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(mask, base, bindex, vl); +vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_m(vbool64_t vm, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(vm, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(mask, base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_m(vbool64_t vm, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(vm, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(mask, base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_m(vbool32_t vm, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(vm, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(mask, base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_m(vbool16_t vm, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(vm, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(mask, base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_m(vbool8_t vm, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(vm, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(mask, base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_m(vbool64_t vm, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(vm, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(mask, base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_m(vbool32_t vm, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(vm, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(mask, base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_m(vbool16_t vm, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(vm, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(mask, base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_m(vbool64_t vm, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(vm, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(mask, base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_m(vbool32_t vm, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(vm, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8(mask, base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_m(vbool64_t vm, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg7ei8\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vloxseg8ei16.c b/auto-generated/gnu-overloaded-tests/vloxseg8ei16.c index ec1ed63be..81f807ac2 100644 --- a/auto-generated/gnu-overloaded-tests/vloxseg8ei16.c +++ b/auto-generated/gnu-overloaded-tests/vloxseg8ei16.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8(const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8(const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8(const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8(const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8(const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8(const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8(const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8(const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8(const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8(const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8(const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8(const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8(const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8(const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8(const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8(const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8(const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8(const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei16_v_i8m1x8(const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(base, bindex, vl); +vint8m1x8_t test_vloxseg8ei16_v_i8m1x8(const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8(const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8(const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8(const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8(const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei16_v_i16m1x8(const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(base, bindex, vl); +vint16m1x8_t test_vloxseg8ei16_v_i16m1x8(const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8(const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8(const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei16_v_i32m1x8(const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(base, bindex, vl); +vint32m1x8_t test_vloxseg8ei16_v_i32m1x8(const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei16_v_i64m1x8(const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(base, bindex, vl); +vint64m1x8_t test_vloxseg8ei16_v_i64m1x8(const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8(const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8(const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8(const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8(const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8(const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8(const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8(const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8(const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8(const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8(const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8(const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8(const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8(const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(rs1, rs2, vl); } -vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_m(vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(mask, base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_m(vbool64_t vm, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(vm, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_m(vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(mask, base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_m(vbool32_t vm, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(vm, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_m(vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(mask, base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_m(vbool16_t vm, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(vm, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_m(vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(mask, base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_m(vbool64_t vm, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(vm, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_m(vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(mask, base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_m(vbool32_t vm, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(vm, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_m(vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(mask, base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_m(vbool64_t vm, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(vm, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(mask, base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_m(vbool64_t vm, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(vm, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(mask, base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_m(vbool32_t vm, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(vm, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(mask, base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_m(vbool16_t vm, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(vm, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(mask, base, bindex, vl); +vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_m(vbool8_t vm, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(vm, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(mask, base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_m(vbool64_t vm, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(vm, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(mask, base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_m(vbool32_t vm, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(vm, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(mask, base, bindex, vl); +vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_m(vbool16_t vm, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(vm, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(mask, base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_m(vbool64_t vm, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(vm, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(mask, base, bindex, vl); +vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_m(vbool32_t vm, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(vm, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(mask, base, bindex, vl); +vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_m(vbool64_t vm, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(vm, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(mask, base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_m(vbool64_t vm, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(vm, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(mask, base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_m(vbool32_t vm, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(vm, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(mask, base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_m(vbool16_t vm, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(vm, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(mask, base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_m(vbool8_t vm, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(vm, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(mask, base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_m(vbool64_t vm, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(vm, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(mask, base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_m(vbool32_t vm, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(vm, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(mask, base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_m(vbool16_t vm, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(vm, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(mask, base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_m(vbool64_t vm, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(vm, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(mask, base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_m(vbool32_t vm, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(vm, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16(mask, base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_m(vbool64_t vm, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg8ei16\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vloxseg8ei32.c b/auto-generated/gnu-overloaded-tests/vloxseg8ei32.c index 523a0b0a3..fb0ed24ad 100644 --- a/auto-generated/gnu-overloaded-tests/vloxseg8ei32.c +++ b/auto-generated/gnu-overloaded-tests/vloxseg8ei32.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8(const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8(const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8(const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8(const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8(const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8(const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8(const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8(const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8(const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8(const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8(const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8(const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8(const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8(const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8(const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8(const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8(const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8(const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei32_v_i8m1x8(const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(base, bindex, vl); +vint8m1x8_t test_vloxseg8ei32_v_i8m1x8(const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8(const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8(const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8(const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8(const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei32_v_i16m1x8(const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(base, bindex, vl); +vint16m1x8_t test_vloxseg8ei32_v_i16m1x8(const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8(const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8(const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei32_v_i32m1x8(const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(base, bindex, vl); +vint32m1x8_t test_vloxseg8ei32_v_i32m1x8(const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei32_v_i64m1x8(const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(base, bindex, vl); +vint64m1x8_t test_vloxseg8ei32_v_i64m1x8(const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8(const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8(const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8(const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8(const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8(const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8(const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8(const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8(const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8(const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8(const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8(const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8(const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8(const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8(const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8(const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8(const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(rs1, rs2, vl); } -vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_m(vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(mask, base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_m(vbool64_t vm, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(vm, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_m(vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(mask, base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_m(vbool32_t vm, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(vm, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_m(vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(mask, base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_m(vbool16_t vm, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(vm, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_m(vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(mask, base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_m(vbool64_t vm, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(vm, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_m(vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(mask, base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_m(vbool32_t vm, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(vm, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_m(vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(mask, base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_m(vbool64_t vm, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(vm, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(mask, base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_m(vbool64_t vm, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(vm, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(mask, base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_m(vbool32_t vm, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(vm, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(mask, base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_m(vbool16_t vm, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(vm, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(mask, base, bindex, vl); +vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_m(vbool8_t vm, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(vm, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(mask, base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_m(vbool64_t vm, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(vm, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(mask, base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_m(vbool32_t vm, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(vm, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(mask, base, bindex, vl); +vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_m(vbool16_t vm, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(vm, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(mask, base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_m(vbool64_t vm, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(vm, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(mask, base, bindex, vl); +vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_m(vbool32_t vm, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(vm, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(mask, base, bindex, vl); +vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_m(vbool64_t vm, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(vm, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(mask, base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_m(vbool64_t vm, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(vm, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(mask, base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_m(vbool32_t vm, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(vm, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(mask, base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_m(vbool16_t vm, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(vm, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(mask, base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_m(vbool8_t vm, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(vm, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(mask, base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_m(vbool64_t vm, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(vm, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(mask, base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_m(vbool32_t vm, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(vm, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(mask, base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_m(vbool16_t vm, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(vm, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(mask, base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_m(vbool64_t vm, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(vm, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(mask, base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_m(vbool32_t vm, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(vm, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32(mask, base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_m(vbool64_t vm, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg8ei32\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vloxseg8ei64.c b/auto-generated/gnu-overloaded-tests/vloxseg8ei64.c index f2bc59b9a..8762e7e33 100644 --- a/auto-generated/gnu-overloaded-tests/vloxseg8ei64.c +++ b/auto-generated/gnu-overloaded-tests/vloxseg8ei64.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8(const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8(const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8(const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8(const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8(const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8(const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8(const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8(const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8(const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8(const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8(const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8(const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8(const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8(const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8(const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8(const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8(const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8(const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei64_v_i8m1x8(const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(base, bindex, vl); +vint8m1x8_t test_vloxseg8ei64_v_i8m1x8(const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8(const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8(const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8(const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8(const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei64_v_i16m1x8(const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(base, bindex, vl); +vint16m1x8_t test_vloxseg8ei64_v_i16m1x8(const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8(const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8(const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei64_v_i32m1x8(const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(base, bindex, vl); +vint32m1x8_t test_vloxseg8ei64_v_i32m1x8(const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei64_v_i64m1x8(const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(base, bindex, vl); +vint64m1x8_t test_vloxseg8ei64_v_i64m1x8(const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8(const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8(const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8(const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8(const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8(const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8(const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8(const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8(const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8(const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8(const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8(const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8(const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8(const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8(const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8(const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8(const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8(const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8(const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8(const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8(const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(rs1, rs2, vl); } -vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_m(vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(mask, base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_m(vbool64_t vm, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(vm, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_m(vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(mask, base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_m(vbool32_t vm, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(vm, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_m(vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(mask, base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_m(vbool16_t vm, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(vm, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_m(vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(mask, base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_m(vbool64_t vm, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(vm, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_m(vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(mask, base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_m(vbool32_t vm, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(vm, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_m(vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(mask, base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_m(vbool64_t vm, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(vm, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(mask, base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_m(vbool64_t vm, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(vm, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(mask, base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_m(vbool32_t vm, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(vm, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(mask, base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_m(vbool16_t vm, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(vm, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(mask, base, bindex, vl); +vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_m(vbool8_t vm, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(vm, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(mask, base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_m(vbool64_t vm, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(vm, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(mask, base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_m(vbool32_t vm, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(vm, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(mask, base, bindex, vl); +vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_m(vbool16_t vm, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(vm, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(mask, base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_m(vbool64_t vm, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(vm, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(mask, base, bindex, vl); +vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_m(vbool32_t vm, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(vm, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(mask, base, bindex, vl); +vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_m(vbool64_t vm, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(vm, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(mask, base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_m(vbool64_t vm, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(vm, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(mask, base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_m(vbool32_t vm, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(vm, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(mask, base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_m(vbool16_t vm, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(vm, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(mask, base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_m(vbool8_t vm, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(vm, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(mask, base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_m(vbool64_t vm, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(vm, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(mask, base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_m(vbool32_t vm, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(vm, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(mask, base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_m(vbool16_t vm, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(vm, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(mask, base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_m(vbool64_t vm, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(vm, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(mask, base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_m(vbool32_t vm, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(vm, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64(mask, base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_m(vbool64_t vm, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg8ei64\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vloxseg8ei8.c b/auto-generated/gnu-overloaded-tests/vloxseg8ei8.c index 0653cff23..efd03f93d 100644 --- a/auto-generated/gnu-overloaded-tests/vloxseg8ei8.c +++ b/auto-generated/gnu-overloaded-tests/vloxseg8ei8.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8(const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8(const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8(const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8(const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8(const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8(const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8(const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8(const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8(const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8(const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8(const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8(const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8(const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8(const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8(const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8(const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8(const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8(const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei8_v_i8m1x8(const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(base, bindex, vl); +vint8m1x8_t test_vloxseg8ei8_v_i8m1x8(const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8(const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8(const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8(const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8(const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei8_v_i16m1x8(const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(base, bindex, vl); +vint16m1x8_t test_vloxseg8ei8_v_i16m1x8(const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8(const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8(const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei8_v_i32m1x8(const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(base, bindex, vl); +vint32m1x8_t test_vloxseg8ei8_v_i32m1x8(const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei8_v_i64m1x8(const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(base, bindex, vl); +vint64m1x8_t test_vloxseg8ei8_v_i64m1x8(const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8(const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8(const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8(const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8(const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8(const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8(const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8(const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8(const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8(const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8(const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8(const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(rs1, rs2, vl); } -vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_m(vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(mask, base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_m(vbool64_t vm, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(vm, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_m(vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(mask, base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_m(vbool32_t vm, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(vm, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_m(vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(mask, base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_m(vbool16_t vm, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(vm, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_m(vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(mask, base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_m(vbool64_t vm, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(vm, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_m(vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(mask, base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_m(vbool32_t vm, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(vm, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_m(vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(mask, base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_m(vbool64_t vm, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(vm, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(mask, base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_m(vbool64_t vm, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(vm, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(mask, base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_m(vbool32_t vm, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(vm, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(mask, base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_m(vbool16_t vm, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(vm, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(mask, base, bindex, vl); +vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_m(vbool8_t vm, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(vm, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(mask, base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_m(vbool64_t vm, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(vm, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(mask, base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_m(vbool32_t vm, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(vm, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(mask, base, bindex, vl); +vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_m(vbool16_t vm, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(vm, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(mask, base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_m(vbool64_t vm, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(vm, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(mask, base, bindex, vl); +vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_m(vbool32_t vm, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(vm, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(mask, base, bindex, vl); +vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_m(vbool64_t vm, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(vm, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(mask, base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_m(vbool64_t vm, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(vm, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(mask, base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_m(vbool32_t vm, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(vm, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(mask, base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_m(vbool16_t vm, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(vm, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(mask, base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_m(vbool8_t vm, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(vm, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(mask, base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_m(vbool64_t vm, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(vm, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(mask, base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_m(vbool32_t vm, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(vm, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(mask, base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_m(vbool16_t vm, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(vm, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(mask, base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_m(vbool64_t vm, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(vm, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(mask, base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_m(vbool32_t vm, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(vm, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8(mask, base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_m(vbool64_t vm, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg8ei8\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlse16.c b/auto-generated/gnu-overloaded-tests/vlse16.c index 0ebaf949e..b06884210 100644 --- a/auto-generated/gnu-overloaded-tests/vlse16.c +++ b/auto-generated/gnu-overloaded-tests/vlse16.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vlse16_v_f16mf4_m(vbool64_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16(mask, base, bstride, vl); +vfloat16mf4_t test_vlse16_v_f16mf4_m(vbool64_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16(vm, rs1, rs2, vl); } -vfloat16mf2_t test_vlse16_v_f16mf2_m(vbool32_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16(mask, base, bstride, vl); +vfloat16mf2_t test_vlse16_v_f16mf2_m(vbool32_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16(vm, rs1, rs2, vl); } -vfloat16m1_t test_vlse16_v_f16m1_m(vbool16_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16(mask, base, bstride, vl); +vfloat16m1_t test_vlse16_v_f16m1_m(vbool16_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16(vm, rs1, rs2, vl); } -vfloat16m2_t test_vlse16_v_f16m2_m(vbool8_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16(mask, base, bstride, vl); +vfloat16m2_t test_vlse16_v_f16m2_m(vbool8_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16(vm, rs1, rs2, vl); } -vfloat16m4_t test_vlse16_v_f16m4_m(vbool4_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16(mask, base, bstride, vl); +vfloat16m4_t test_vlse16_v_f16m4_m(vbool4_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16(vm, rs1, rs2, vl); } -vfloat16m8_t test_vlse16_v_f16m8_m(vbool2_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16(mask, base, bstride, vl); +vfloat16m8_t test_vlse16_v_f16m8_m(vbool2_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16(vm, rs1, rs2, vl); } -vint16mf4_t test_vlse16_v_i16mf4_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16(mask, base, bstride, vl); +vint16mf4_t test_vlse16_v_i16mf4_m(vbool64_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16(vm, rs1, rs2, vl); } -vint16mf2_t test_vlse16_v_i16mf2_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16(mask, base, bstride, vl); +vint16mf2_t test_vlse16_v_i16mf2_m(vbool32_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16(vm, rs1, rs2, vl); } -vint16m1_t test_vlse16_v_i16m1_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16(mask, base, bstride, vl); +vint16m1_t test_vlse16_v_i16m1_m(vbool16_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16(vm, rs1, rs2, vl); } -vint16m2_t test_vlse16_v_i16m2_m(vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16(mask, base, bstride, vl); +vint16m2_t test_vlse16_v_i16m2_m(vbool8_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16(vm, rs1, rs2, vl); } -vint16m4_t test_vlse16_v_i16m4_m(vbool4_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16(mask, base, bstride, vl); +vint16m4_t test_vlse16_v_i16m4_m(vbool4_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16(vm, rs1, rs2, vl); } -vint16m8_t test_vlse16_v_i16m8_m(vbool2_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16(mask, base, bstride, vl); +vint16m8_t test_vlse16_v_i16m8_m(vbool2_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16(vm, rs1, rs2, vl); } -vuint16mf4_t test_vlse16_v_u16mf4_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16(mask, base, bstride, vl); +vuint16mf4_t test_vlse16_v_u16mf4_m(vbool64_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16(vm, rs1, rs2, vl); } -vuint16mf2_t test_vlse16_v_u16mf2_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16(mask, base, bstride, vl); +vuint16mf2_t test_vlse16_v_u16mf2_m(vbool32_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16(vm, rs1, rs2, vl); } -vuint16m1_t test_vlse16_v_u16m1_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16(mask, base, bstride, vl); +vuint16m1_t test_vlse16_v_u16m1_m(vbool16_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16(vm, rs1, rs2, vl); } -vuint16m2_t test_vlse16_v_u16m2_m(vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16(mask, base, bstride, vl); +vuint16m2_t test_vlse16_v_u16m2_m(vbool8_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16(vm, rs1, rs2, vl); } -vuint16m4_t test_vlse16_v_u16m4_m(vbool4_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16(mask, base, bstride, vl); +vuint16m4_t test_vlse16_v_u16m4_m(vbool4_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16(vm, rs1, rs2, vl); } -vuint16m8_t test_vlse16_v_u16m8_m(vbool2_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16(mask, base, bstride, vl); +vuint16m8_t test_vlse16_v_u16m8_m(vbool2_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlse16\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlse32.c b/auto-generated/gnu-overloaded-tests/vlse32.c index a75d89bf0..a18b7734b 100644 --- a/auto-generated/gnu-overloaded-tests/vlse32.c +++ b/auto-generated/gnu-overloaded-tests/vlse32.c @@ -6,64 +6,64 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2_t test_vlse32_v_f32mf2_m(vbool64_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32(mask, base, bstride, vl); +vfloat32mf2_t test_vlse32_v_f32mf2_m(vbool64_t vm, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32(vm, rs1, rs2, vl); } -vfloat32m1_t test_vlse32_v_f32m1_m(vbool32_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32(mask, base, bstride, vl); +vfloat32m1_t test_vlse32_v_f32m1_m(vbool32_t vm, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32(vm, rs1, rs2, vl); } -vfloat32m2_t test_vlse32_v_f32m2_m(vbool16_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32(mask, base, bstride, vl); +vfloat32m2_t test_vlse32_v_f32m2_m(vbool16_t vm, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32(vm, rs1, rs2, vl); } -vfloat32m4_t test_vlse32_v_f32m4_m(vbool8_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32(mask, base, bstride, vl); +vfloat32m4_t test_vlse32_v_f32m4_m(vbool8_t vm, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32(vm, rs1, rs2, vl); } -vfloat32m8_t test_vlse32_v_f32m8_m(vbool4_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32(mask, base, bstride, vl); +vfloat32m8_t test_vlse32_v_f32m8_m(vbool4_t vm, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32(vm, rs1, rs2, vl); } -vint32mf2_t test_vlse32_v_i32mf2_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32(mask, base, bstride, vl); +vint32mf2_t test_vlse32_v_i32mf2_m(vbool64_t vm, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32(vm, rs1, rs2, vl); } -vint32m1_t test_vlse32_v_i32m1_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32(mask, base, bstride, vl); +vint32m1_t test_vlse32_v_i32m1_m(vbool32_t vm, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32(vm, rs1, rs2, vl); } -vint32m2_t test_vlse32_v_i32m2_m(vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32(mask, base, bstride, vl); +vint32m2_t test_vlse32_v_i32m2_m(vbool16_t vm, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32(vm, rs1, rs2, vl); } -vint32m4_t test_vlse32_v_i32m4_m(vbool8_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32(mask, base, bstride, vl); +vint32m4_t test_vlse32_v_i32m4_m(vbool8_t vm, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32(vm, rs1, rs2, vl); } -vint32m8_t test_vlse32_v_i32m8_m(vbool4_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32(mask, base, bstride, vl); +vint32m8_t test_vlse32_v_i32m8_m(vbool4_t vm, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32(vm, rs1, rs2, vl); } -vuint32mf2_t test_vlse32_v_u32mf2_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32(mask, base, bstride, vl); +vuint32mf2_t test_vlse32_v_u32mf2_m(vbool64_t vm, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32(vm, rs1, rs2, vl); } -vuint32m1_t test_vlse32_v_u32m1_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32(mask, base, bstride, vl); +vuint32m1_t test_vlse32_v_u32m1_m(vbool32_t vm, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32(vm, rs1, rs2, vl); } -vuint32m2_t test_vlse32_v_u32m2_m(vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32(mask, base, bstride, vl); +vuint32m2_t test_vlse32_v_u32m2_m(vbool16_t vm, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32(vm, rs1, rs2, vl); } -vuint32m4_t test_vlse32_v_u32m4_m(vbool8_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32(mask, base, bstride, vl); +vuint32m4_t test_vlse32_v_u32m4_m(vbool8_t vm, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32(vm, rs1, rs2, vl); } -vuint32m8_t test_vlse32_v_u32m8_m(vbool4_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32(mask, base, bstride, vl); +vuint32m8_t test_vlse32_v_u32m8_m(vbool4_t vm, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlse32\.[ivxfswum.]+\s+} 15 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlse64.c b/auto-generated/gnu-overloaded-tests/vlse64.c index 97d92b064..82384e69e 100644 --- a/auto-generated/gnu-overloaded-tests/vlse64.c +++ b/auto-generated/gnu-overloaded-tests/vlse64.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1_t test_vlse64_v_f64m1_m(vbool64_t mask, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64(mask, base, bstride, vl); +vfloat64m1_t test_vlse64_v_f64m1_m(vbool64_t vm, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64(vm, rs1, rs2, vl); } -vfloat64m2_t test_vlse64_v_f64m2_m(vbool32_t mask, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64(mask, base, bstride, vl); +vfloat64m2_t test_vlse64_v_f64m2_m(vbool32_t vm, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64(vm, rs1, rs2, vl); } -vfloat64m4_t test_vlse64_v_f64m4_m(vbool16_t mask, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64(mask, base, bstride, vl); +vfloat64m4_t test_vlse64_v_f64m4_m(vbool16_t vm, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64(vm, rs1, rs2, vl); } -vfloat64m8_t test_vlse64_v_f64m8_m(vbool8_t mask, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64(mask, base, bstride, vl); +vfloat64m8_t test_vlse64_v_f64m8_m(vbool8_t vm, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64(vm, rs1, rs2, vl); } -vint64m1_t test_vlse64_v_i64m1_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64(mask, base, bstride, vl); +vint64m1_t test_vlse64_v_i64m1_m(vbool64_t vm, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64(vm, rs1, rs2, vl); } -vint64m2_t test_vlse64_v_i64m2_m(vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64(mask, base, bstride, vl); +vint64m2_t test_vlse64_v_i64m2_m(vbool32_t vm, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64(vm, rs1, rs2, vl); } -vint64m4_t test_vlse64_v_i64m4_m(vbool16_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64(mask, base, bstride, vl); +vint64m4_t test_vlse64_v_i64m4_m(vbool16_t vm, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64(vm, rs1, rs2, vl); } -vint64m8_t test_vlse64_v_i64m8_m(vbool8_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64(mask, base, bstride, vl); +vint64m8_t test_vlse64_v_i64m8_m(vbool8_t vm, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64(vm, rs1, rs2, vl); } -vuint64m1_t test_vlse64_v_u64m1_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64(mask, base, bstride, vl); +vuint64m1_t test_vlse64_v_u64m1_m(vbool64_t vm, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64(vm, rs1, rs2, vl); } -vuint64m2_t test_vlse64_v_u64m2_m(vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64(mask, base, bstride, vl); +vuint64m2_t test_vlse64_v_u64m2_m(vbool32_t vm, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64(vm, rs1, rs2, vl); } -vuint64m4_t test_vlse64_v_u64m4_m(vbool16_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64(mask, base, bstride, vl); +vuint64m4_t test_vlse64_v_u64m4_m(vbool16_t vm, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64(vm, rs1, rs2, vl); } -vuint64m8_t test_vlse64_v_u64m8_m(vbool8_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64(mask, base, bstride, vl); +vuint64m8_t test_vlse64_v_u64m8_m(vbool8_t vm, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlse64\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlse8.c b/auto-generated/gnu-overloaded-tests/vlse8.c index 8e17be076..2400c7e4e 100644 --- a/auto-generated/gnu-overloaded-tests/vlse8.c +++ b/auto-generated/gnu-overloaded-tests/vlse8.c @@ -6,60 +6,60 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vlse8_v_i8mf8_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8(mask, base, bstride, vl); +vint8mf8_t test_vlse8_v_i8mf8_m(vbool64_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8(vm, rs1, rs2, vl); } -vint8mf4_t test_vlse8_v_i8mf4_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8(mask, base, bstride, vl); +vint8mf4_t test_vlse8_v_i8mf4_m(vbool32_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8(vm, rs1, rs2, vl); } -vint8mf2_t test_vlse8_v_i8mf2_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8(mask, base, bstride, vl); +vint8mf2_t test_vlse8_v_i8mf2_m(vbool16_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8(vm, rs1, rs2, vl); } -vint8m1_t test_vlse8_v_i8m1_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8(mask, base, bstride, vl); +vint8m1_t test_vlse8_v_i8m1_m(vbool8_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8(vm, rs1, rs2, vl); } -vint8m2_t test_vlse8_v_i8m2_m(vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8(mask, base, bstride, vl); +vint8m2_t test_vlse8_v_i8m2_m(vbool4_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8(vm, rs1, rs2, vl); } -vint8m4_t test_vlse8_v_i8m4_m(vbool2_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8(mask, base, bstride, vl); +vint8m4_t test_vlse8_v_i8m4_m(vbool2_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8(vm, rs1, rs2, vl); } -vint8m8_t test_vlse8_v_i8m8_m(vbool1_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8(mask, base, bstride, vl); +vint8m8_t test_vlse8_v_i8m8_m(vbool1_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8(vm, rs1, rs2, vl); } -vuint8mf8_t test_vlse8_v_u8mf8_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8(mask, base, bstride, vl); +vuint8mf8_t test_vlse8_v_u8mf8_m(vbool64_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8(vm, rs1, rs2, vl); } -vuint8mf4_t test_vlse8_v_u8mf4_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8(mask, base, bstride, vl); +vuint8mf4_t test_vlse8_v_u8mf4_m(vbool32_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8(vm, rs1, rs2, vl); } -vuint8mf2_t test_vlse8_v_u8mf2_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8(mask, base, bstride, vl); +vuint8mf2_t test_vlse8_v_u8mf2_m(vbool16_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8(vm, rs1, rs2, vl); } -vuint8m1_t test_vlse8_v_u8m1_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8(mask, base, bstride, vl); +vuint8m1_t test_vlse8_v_u8m1_m(vbool8_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8(vm, rs1, rs2, vl); } -vuint8m2_t test_vlse8_v_u8m2_m(vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8(mask, base, bstride, vl); +vuint8m2_t test_vlse8_v_u8m2_m(vbool4_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8(vm, rs1, rs2, vl); } -vuint8m4_t test_vlse8_v_u8m4_m(vbool2_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8(mask, base, bstride, vl); +vuint8m4_t test_vlse8_v_u8m4_m(vbool2_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8(vm, rs1, rs2, vl); } -vuint8m8_t test_vlse8_v_u8m8_m(vbool1_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8(mask, base, bstride, vl); +vuint8m8_t test_vlse8_v_u8m8_m(vbool1_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlse8\.[ivxfswum.]+\s+} 14 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg2e16.c b/auto-generated/gnu-overloaded-tests/vlseg2e16.c index 73544d978..996052fb3 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg2e16.c +++ b/auto-generated/gnu-overloaded-tests/vlseg2e16.c @@ -6,64 +6,64 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x2_t test_vlseg2e16_v_f16mf4x2_m(vbool64_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16(mask, base, vl); +vfloat16mf4x2_t test_vlseg2e16_v_f16mf4x2_m(vbool64_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16(vm, rs1, vl); } -vfloat16mf2x2_t test_vlseg2e16_v_f16mf2x2_m(vbool32_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16(mask, base, vl); +vfloat16mf2x2_t test_vlseg2e16_v_f16mf2x2_m(vbool32_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16(vm, rs1, vl); } -vfloat16m1x2_t test_vlseg2e16_v_f16m1x2_m(vbool16_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16(mask, base, vl); +vfloat16m1x2_t test_vlseg2e16_v_f16m1x2_m(vbool16_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16(vm, rs1, vl); } -vfloat16m2x2_t test_vlseg2e16_v_f16m2x2_m(vbool8_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16(mask, base, vl); +vfloat16m2x2_t test_vlseg2e16_v_f16m2x2_m(vbool8_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16(vm, rs1, vl); } -vfloat16m4x2_t test_vlseg2e16_v_f16m4x2_m(vbool4_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16(mask, base, vl); +vfloat16m4x2_t test_vlseg2e16_v_f16m4x2_m(vbool4_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16(vm, rs1, vl); } -vint16mf4x2_t test_vlseg2e16_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16(mask, base, vl); +vint16mf4x2_t test_vlseg2e16_v_i16mf4x2_m(vbool64_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16(vm, rs1, vl); } -vint16mf2x2_t test_vlseg2e16_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16(mask, base, vl); +vint16mf2x2_t test_vlseg2e16_v_i16mf2x2_m(vbool32_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16(vm, rs1, vl); } -vint16m1x2_t test_vlseg2e16_v_i16m1x2_m(vbool16_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16(mask, base, vl); +vint16m1x2_t test_vlseg2e16_v_i16m1x2_m(vbool16_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16(vm, rs1, vl); } -vint16m2x2_t test_vlseg2e16_v_i16m2x2_m(vbool8_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16(mask, base, vl); +vint16m2x2_t test_vlseg2e16_v_i16m2x2_m(vbool8_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16(vm, rs1, vl); } -vint16m4x2_t test_vlseg2e16_v_i16m4x2_m(vbool4_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16(mask, base, vl); +vint16m4x2_t test_vlseg2e16_v_i16m4x2_m(vbool4_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16(vm, rs1, vl); } -vuint16mf4x2_t test_vlseg2e16_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16(mask, base, vl); +vuint16mf4x2_t test_vlseg2e16_v_u16mf4x2_m(vbool64_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16(vm, rs1, vl); } -vuint16mf2x2_t test_vlseg2e16_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16(mask, base, vl); +vuint16mf2x2_t test_vlseg2e16_v_u16mf2x2_m(vbool32_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16(vm, rs1, vl); } -vuint16m1x2_t test_vlseg2e16_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16(mask, base, vl); +vuint16m1x2_t test_vlseg2e16_v_u16m1x2_m(vbool16_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16(vm, rs1, vl); } -vuint16m2x2_t test_vlseg2e16_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16(mask, base, vl); +vuint16m2x2_t test_vlseg2e16_v_u16m2x2_m(vbool8_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16(vm, rs1, vl); } -vuint16m4x2_t test_vlseg2e16_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16(mask, base, vl); +vuint16m4x2_t test_vlseg2e16_v_u16m4x2_m(vbool4_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg2e16\.[ivxfswum.]+\s+} 15 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg2e16ff.c b/auto-generated/gnu-overloaded-tests/vlseg2e16ff.c index 49d0de2f0..07c691f1c 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg2e16ff.c +++ b/auto-generated/gnu-overloaded-tests/vlseg2e16ff.c @@ -6,64 +6,64 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_m(vbool64_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff(mask, base, new_vl, vl); +vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_m(vbool64_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff(vm, rs1, new_vl, vl); } -vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_m(vbool32_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff(mask, base, new_vl, vl); +vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_m(vbool32_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff(vm, rs1, new_vl, vl); } -vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_m(vbool16_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff(mask, base, new_vl, vl); +vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_m(vbool16_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff(vm, rs1, new_vl, vl); } -vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_m(vbool8_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff(mask, base, new_vl, vl); +vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_m(vbool8_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff(vm, rs1, new_vl, vl); } -vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_m(vbool4_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff(mask, base, new_vl, vl); +vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_m(vbool4_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff(vm, rs1, new_vl, vl); } -vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff(mask, base, new_vl, vl); +vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_m(vbool64_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff(vm, rs1, new_vl, vl); } -vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff(mask, base, new_vl, vl); +vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_m(vbool32_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff(vm, rs1, new_vl, vl); } -vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_m(vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff(mask, base, new_vl, vl); +vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_m(vbool16_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff(vm, rs1, new_vl, vl); } -vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_m(vbool8_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff(mask, base, new_vl, vl); +vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_m(vbool8_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff(vm, rs1, new_vl, vl); } -vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_m(vbool4_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff(mask, base, new_vl, vl); +vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_m(vbool4_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff(vm, rs1, new_vl, vl); } -vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff(mask, base, new_vl, vl); +vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_m(vbool64_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff(vm, rs1, new_vl, vl); } -vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff(mask, base, new_vl, vl); +vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_m(vbool32_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff(vm, rs1, new_vl, vl); } -vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff(mask, base, new_vl, vl); +vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_m(vbool16_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff(vm, rs1, new_vl, vl); } -vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff(mask, base, new_vl, vl); +vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_m(vbool8_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff(vm, rs1, new_vl, vl); } -vuint16m4x2_t test_vlseg2e16ff_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff(mask, base, new_vl, vl); +vuint16m4x2_t test_vlseg2e16ff_v_u16m4x2_m(vbool4_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg2e16ff\.[ivxfswum.]+\s+} 15 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg2e32.c b/auto-generated/gnu-overloaded-tests/vlseg2e32.c index f222f1c84..529cb6a75 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg2e32.c +++ b/auto-generated/gnu-overloaded-tests/vlseg2e32.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x2_t test_vlseg2e32_v_f32mf2x2_m(vbool64_t mask, const float32_t *base, size_t vl) { - return __riscv_vlseg2e32(mask, base, vl); +vfloat32mf2x2_t test_vlseg2e32_v_f32mf2x2_m(vbool64_t vm, const float32_t *rs1, size_t vl) { + return __riscv_vlseg2e32(vm, rs1, vl); } -vfloat32m1x2_t test_vlseg2e32_v_f32m1x2_m(vbool32_t mask, const float32_t *base, size_t vl) { - return __riscv_vlseg2e32(mask, base, vl); +vfloat32m1x2_t test_vlseg2e32_v_f32m1x2_m(vbool32_t vm, const float32_t *rs1, size_t vl) { + return __riscv_vlseg2e32(vm, rs1, vl); } -vfloat32m2x2_t test_vlseg2e32_v_f32m2x2_m(vbool16_t mask, const float32_t *base, size_t vl) { - return __riscv_vlseg2e32(mask, base, vl); +vfloat32m2x2_t test_vlseg2e32_v_f32m2x2_m(vbool16_t vm, const float32_t *rs1, size_t vl) { + return __riscv_vlseg2e32(vm, rs1, vl); } -vfloat32m4x2_t test_vlseg2e32_v_f32m4x2_m(vbool8_t mask, const float32_t *base, size_t vl) { - return __riscv_vlseg2e32(mask, base, vl); +vfloat32m4x2_t test_vlseg2e32_v_f32m4x2_m(vbool8_t vm, const float32_t *rs1, size_t vl) { + return __riscv_vlseg2e32(vm, rs1, vl); } -vint32mf2x2_t test_vlseg2e32_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, size_t vl) { - return __riscv_vlseg2e32(mask, base, vl); +vint32mf2x2_t test_vlseg2e32_v_i32mf2x2_m(vbool64_t vm, const int32_t *rs1, size_t vl) { + return __riscv_vlseg2e32(vm, rs1, vl); } -vint32m1x2_t test_vlseg2e32_v_i32m1x2_m(vbool32_t mask, const int32_t *base, size_t vl) { - return __riscv_vlseg2e32(mask, base, vl); +vint32m1x2_t test_vlseg2e32_v_i32m1x2_m(vbool32_t vm, const int32_t *rs1, size_t vl) { + return __riscv_vlseg2e32(vm, rs1, vl); } -vint32m2x2_t test_vlseg2e32_v_i32m2x2_m(vbool16_t mask, const int32_t *base, size_t vl) { - return __riscv_vlseg2e32(mask, base, vl); +vint32m2x2_t test_vlseg2e32_v_i32m2x2_m(vbool16_t vm, const int32_t *rs1, size_t vl) { + return __riscv_vlseg2e32(vm, rs1, vl); } -vint32m4x2_t test_vlseg2e32_v_i32m4x2_m(vbool8_t mask, const int32_t *base, size_t vl) { - return __riscv_vlseg2e32(mask, base, vl); +vint32m4x2_t test_vlseg2e32_v_i32m4x2_m(vbool8_t vm, const int32_t *rs1, size_t vl) { + return __riscv_vlseg2e32(vm, rs1, vl); } -vuint32mf2x2_t test_vlseg2e32_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, size_t vl) { - return __riscv_vlseg2e32(mask, base, vl); +vuint32mf2x2_t test_vlseg2e32_v_u32mf2x2_m(vbool64_t vm, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg2e32(vm, rs1, vl); } -vuint32m1x2_t test_vlseg2e32_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, size_t vl) { - return __riscv_vlseg2e32(mask, base, vl); +vuint32m1x2_t test_vlseg2e32_v_u32m1x2_m(vbool32_t vm, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg2e32(vm, rs1, vl); } -vuint32m2x2_t test_vlseg2e32_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, size_t vl) { - return __riscv_vlseg2e32(mask, base, vl); +vuint32m2x2_t test_vlseg2e32_v_u32m2x2_m(vbool16_t vm, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg2e32(vm, rs1, vl); } -vuint32m4x2_t test_vlseg2e32_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, size_t vl) { - return __riscv_vlseg2e32(mask, base, vl); +vuint32m4x2_t test_vlseg2e32_v_u32m4x2_m(vbool8_t vm, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg2e32(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg2e32\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg2e32ff.c b/auto-generated/gnu-overloaded-tests/vlseg2e32ff.c index 680b6d1dc..7e4575576 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg2e32ff.c +++ b/auto-generated/gnu-overloaded-tests/vlseg2e32ff.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_m(vbool64_t mask, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff(mask, base, new_vl, vl); +vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_m(vbool64_t vm, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff(vm, rs1, new_vl, vl); } -vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_m(vbool32_t mask, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff(mask, base, new_vl, vl); +vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_m(vbool32_t vm, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff(vm, rs1, new_vl, vl); } -vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_m(vbool16_t mask, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff(mask, base, new_vl, vl); +vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_m(vbool16_t vm, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff(vm, rs1, new_vl, vl); } -vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_m(vbool8_t mask, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff(mask, base, new_vl, vl); +vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_m(vbool8_t vm, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff(vm, rs1, new_vl, vl); } -vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff(mask, base, new_vl, vl); +vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_m(vbool64_t vm, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff(vm, rs1, new_vl, vl); } -vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_m(vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff(mask, base, new_vl, vl); +vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_m(vbool32_t vm, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff(vm, rs1, new_vl, vl); } -vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_m(vbool16_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff(mask, base, new_vl, vl); +vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_m(vbool16_t vm, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff(vm, rs1, new_vl, vl); } -vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_m(vbool8_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff(mask, base, new_vl, vl); +vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_m(vbool8_t vm, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff(vm, rs1, new_vl, vl); } -vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff(mask, base, new_vl, vl); +vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_m(vbool64_t vm, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff(vm, rs1, new_vl, vl); } -vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff(mask, base, new_vl, vl); +vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_m(vbool32_t vm, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff(vm, rs1, new_vl, vl); } -vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff(mask, base, new_vl, vl); +vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_m(vbool16_t vm, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff(vm, rs1, new_vl, vl); } -vuint32m4x2_t test_vlseg2e32ff_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff(mask, base, new_vl, vl); +vuint32m4x2_t test_vlseg2e32ff_v_u32m4x2_m(vbool8_t vm, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg2e32ff\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg2e64.c b/auto-generated/gnu-overloaded-tests/vlseg2e64.c index 07356ef07..e89ce7444 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg2e64.c +++ b/auto-generated/gnu-overloaded-tests/vlseg2e64.c @@ -6,40 +6,40 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x2_t test_vlseg2e64_v_f64m1x2_m(vbool64_t mask, const float64_t *base, size_t vl) { - return __riscv_vlseg2e64(mask, base, vl); +vfloat64m1x2_t test_vlseg2e64_v_f64m1x2_m(vbool64_t vm, const float64_t *rs1, size_t vl) { + return __riscv_vlseg2e64(vm, rs1, vl); } -vfloat64m2x2_t test_vlseg2e64_v_f64m2x2_m(vbool32_t mask, const float64_t *base, size_t vl) { - return __riscv_vlseg2e64(mask, base, vl); +vfloat64m2x2_t test_vlseg2e64_v_f64m2x2_m(vbool32_t vm, const float64_t *rs1, size_t vl) { + return __riscv_vlseg2e64(vm, rs1, vl); } -vfloat64m4x2_t test_vlseg2e64_v_f64m4x2_m(vbool16_t mask, const float64_t *base, size_t vl) { - return __riscv_vlseg2e64(mask, base, vl); +vfloat64m4x2_t test_vlseg2e64_v_f64m4x2_m(vbool16_t vm, const float64_t *rs1, size_t vl) { + return __riscv_vlseg2e64(vm, rs1, vl); } -vint64m1x2_t test_vlseg2e64_v_i64m1x2_m(vbool64_t mask, const int64_t *base, size_t vl) { - return __riscv_vlseg2e64(mask, base, vl); +vint64m1x2_t test_vlseg2e64_v_i64m1x2_m(vbool64_t vm, const int64_t *rs1, size_t vl) { + return __riscv_vlseg2e64(vm, rs1, vl); } -vint64m2x2_t test_vlseg2e64_v_i64m2x2_m(vbool32_t mask, const int64_t *base, size_t vl) { - return __riscv_vlseg2e64(mask, base, vl); +vint64m2x2_t test_vlseg2e64_v_i64m2x2_m(vbool32_t vm, const int64_t *rs1, size_t vl) { + return __riscv_vlseg2e64(vm, rs1, vl); } -vint64m4x2_t test_vlseg2e64_v_i64m4x2_m(vbool16_t mask, const int64_t *base, size_t vl) { - return __riscv_vlseg2e64(mask, base, vl); +vint64m4x2_t test_vlseg2e64_v_i64m4x2_m(vbool16_t vm, const int64_t *rs1, size_t vl) { + return __riscv_vlseg2e64(vm, rs1, vl); } -vuint64m1x2_t test_vlseg2e64_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, size_t vl) { - return __riscv_vlseg2e64(mask, base, vl); +vuint64m1x2_t test_vlseg2e64_v_u64m1x2_m(vbool64_t vm, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg2e64(vm, rs1, vl); } -vuint64m2x2_t test_vlseg2e64_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, size_t vl) { - return __riscv_vlseg2e64(mask, base, vl); +vuint64m2x2_t test_vlseg2e64_v_u64m2x2_m(vbool32_t vm, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg2e64(vm, rs1, vl); } -vuint64m4x2_t test_vlseg2e64_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, size_t vl) { - return __riscv_vlseg2e64(mask, base, vl); +vuint64m4x2_t test_vlseg2e64_v_u64m4x2_m(vbool16_t vm, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg2e64(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg2e64\.[ivxfswum.]+\s+} 9 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg2e64ff.c b/auto-generated/gnu-overloaded-tests/vlseg2e64ff.c index 3618cf273..d903bfaa7 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg2e64ff.c +++ b/auto-generated/gnu-overloaded-tests/vlseg2e64ff.c @@ -6,40 +6,40 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_m(vbool64_t mask, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff(mask, base, new_vl, vl); +vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_m(vbool64_t vm, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff(vm, rs1, new_vl, vl); } -vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_m(vbool32_t mask, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff(mask, base, new_vl, vl); +vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_m(vbool32_t vm, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff(vm, rs1, new_vl, vl); } -vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_m(vbool16_t mask, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff(mask, base, new_vl, vl); +vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_m(vbool16_t vm, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff(vm, rs1, new_vl, vl); } -vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_m(vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff(mask, base, new_vl, vl); +vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_m(vbool64_t vm, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff(vm, rs1, new_vl, vl); } -vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_m(vbool32_t mask, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff(mask, base, new_vl, vl); +vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_m(vbool32_t vm, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff(vm, rs1, new_vl, vl); } -vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_m(vbool16_t mask, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff(mask, base, new_vl, vl); +vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_m(vbool16_t vm, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff(vm, rs1, new_vl, vl); } -vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff(mask, base, new_vl, vl); +vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_m(vbool64_t vm, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff(vm, rs1, new_vl, vl); } -vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff(mask, base, new_vl, vl); +vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_m(vbool32_t vm, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff(vm, rs1, new_vl, vl); } -vuint64m4x2_t test_vlseg2e64ff_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff(mask, base, new_vl, vl); +vuint64m4x2_t test_vlseg2e64ff_v_u64m4x2_m(vbool16_t vm, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg2e64ff\.[ivxfswum.]+\s+} 9 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg2e8.c b/auto-generated/gnu-overloaded-tests/vlseg2e8.c index 2dc5b61e4..f6b001950 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg2e8.c +++ b/auto-generated/gnu-overloaded-tests/vlseg2e8.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x2_t test_vlseg2e8_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8(mask, base, vl); +vint8mf8x2_t test_vlseg2e8_v_i8mf8x2_m(vbool64_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8(vm, rs1, vl); } -vint8mf4x2_t test_vlseg2e8_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8(mask, base, vl); +vint8mf4x2_t test_vlseg2e8_v_i8mf4x2_m(vbool32_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8(vm, rs1, vl); } -vint8mf2x2_t test_vlseg2e8_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8(mask, base, vl); +vint8mf2x2_t test_vlseg2e8_v_i8mf2x2_m(vbool16_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8(vm, rs1, vl); } -vint8m1x2_t test_vlseg2e8_v_i8m1x2_m(vbool8_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8(mask, base, vl); +vint8m1x2_t test_vlseg2e8_v_i8m1x2_m(vbool8_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8(vm, rs1, vl); } -vint8m2x2_t test_vlseg2e8_v_i8m2x2_m(vbool4_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8(mask, base, vl); +vint8m2x2_t test_vlseg2e8_v_i8m2x2_m(vbool4_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8(vm, rs1, vl); } -vint8m4x2_t test_vlseg2e8_v_i8m4x2_m(vbool2_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8(mask, base, vl); +vint8m4x2_t test_vlseg2e8_v_i8m4x2_m(vbool2_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8(vm, rs1, vl); } -vuint8mf8x2_t test_vlseg2e8_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8(mask, base, vl); +vuint8mf8x2_t test_vlseg2e8_v_u8mf8x2_m(vbool64_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8(vm, rs1, vl); } -vuint8mf4x2_t test_vlseg2e8_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8(mask, base, vl); +vuint8mf4x2_t test_vlseg2e8_v_u8mf4x2_m(vbool32_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8(vm, rs1, vl); } -vuint8mf2x2_t test_vlseg2e8_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8(mask, base, vl); +vuint8mf2x2_t test_vlseg2e8_v_u8mf2x2_m(vbool16_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8(vm, rs1, vl); } -vuint8m1x2_t test_vlseg2e8_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8(mask, base, vl); +vuint8m1x2_t test_vlseg2e8_v_u8m1x2_m(vbool8_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8(vm, rs1, vl); } -vuint8m2x2_t test_vlseg2e8_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8(mask, base, vl); +vuint8m2x2_t test_vlseg2e8_v_u8m2x2_m(vbool4_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8(vm, rs1, vl); } -vuint8m4x2_t test_vlseg2e8_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8(mask, base, vl); +vuint8m4x2_t test_vlseg2e8_v_u8m4x2_m(vbool2_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg2e8\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg2e8ff.c b/auto-generated/gnu-overloaded-tests/vlseg2e8ff.c index c5cfd4dc4..acfeb4228 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg2e8ff.c +++ b/auto-generated/gnu-overloaded-tests/vlseg2e8ff.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff(mask, base, new_vl, vl); +vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_m(vbool64_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff(vm, rs1, new_vl, vl); } -vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff(mask, base, new_vl, vl); +vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_m(vbool32_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff(vm, rs1, new_vl, vl); } -vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff(mask, base, new_vl, vl); +vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_m(vbool16_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff(vm, rs1, new_vl, vl); } -vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_m(vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff(mask, base, new_vl, vl); +vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_m(vbool8_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff(vm, rs1, new_vl, vl); } -vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_m(vbool4_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff(mask, base, new_vl, vl); +vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_m(vbool4_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff(vm, rs1, new_vl, vl); } -vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_m(vbool2_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff(mask, base, new_vl, vl); +vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_m(vbool2_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff(vm, rs1, new_vl, vl); } -vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff(mask, base, new_vl, vl); +vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_m(vbool64_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff(vm, rs1, new_vl, vl); } -vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff(mask, base, new_vl, vl); +vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_m(vbool32_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff(vm, rs1, new_vl, vl); } -vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff(mask, base, new_vl, vl); +vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_m(vbool16_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff(vm, rs1, new_vl, vl); } -vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff(mask, base, new_vl, vl); +vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_m(vbool8_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff(vm, rs1, new_vl, vl); } -vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff(mask, base, new_vl, vl); +vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_m(vbool4_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff(vm, rs1, new_vl, vl); } -vuint8m4x2_t test_vlseg2e8ff_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff(mask, base, new_vl, vl); +vuint8m4x2_t test_vlseg2e8ff_v_u8m4x2_m(vbool2_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg2e8ff\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg3e16.c b/auto-generated/gnu-overloaded-tests/vlseg3e16.c index 457680ff9..5be433631 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg3e16.c +++ b/auto-generated/gnu-overloaded-tests/vlseg3e16.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x3_t test_vlseg3e16_v_f16mf4x3_m(vbool64_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg3e16(mask, base, vl); +vfloat16mf4x3_t test_vlseg3e16_v_f16mf4x3_m(vbool64_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg3e16(vm, rs1, vl); } -vfloat16mf2x3_t test_vlseg3e16_v_f16mf2x3_m(vbool32_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg3e16(mask, base, vl); +vfloat16mf2x3_t test_vlseg3e16_v_f16mf2x3_m(vbool32_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg3e16(vm, rs1, vl); } -vfloat16m1x3_t test_vlseg3e16_v_f16m1x3_m(vbool16_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg3e16(mask, base, vl); +vfloat16m1x3_t test_vlseg3e16_v_f16m1x3_m(vbool16_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg3e16(vm, rs1, vl); } -vfloat16m2x3_t test_vlseg3e16_v_f16m2x3_m(vbool8_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg3e16(mask, base, vl); +vfloat16m2x3_t test_vlseg3e16_v_f16m2x3_m(vbool8_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg3e16(vm, rs1, vl); } -vint16mf4x3_t test_vlseg3e16_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg3e16(mask, base, vl); +vint16mf4x3_t test_vlseg3e16_v_i16mf4x3_m(vbool64_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg3e16(vm, rs1, vl); } -vint16mf2x3_t test_vlseg3e16_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg3e16(mask, base, vl); +vint16mf2x3_t test_vlseg3e16_v_i16mf2x3_m(vbool32_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg3e16(vm, rs1, vl); } -vint16m1x3_t test_vlseg3e16_v_i16m1x3_m(vbool16_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg3e16(mask, base, vl); +vint16m1x3_t test_vlseg3e16_v_i16m1x3_m(vbool16_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg3e16(vm, rs1, vl); } -vint16m2x3_t test_vlseg3e16_v_i16m2x3_m(vbool8_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg3e16(mask, base, vl); +vint16m2x3_t test_vlseg3e16_v_i16m2x3_m(vbool8_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg3e16(vm, rs1, vl); } -vuint16mf4x3_t test_vlseg3e16_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg3e16(mask, base, vl); +vuint16mf4x3_t test_vlseg3e16_v_u16mf4x3_m(vbool64_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg3e16(vm, rs1, vl); } -vuint16mf2x3_t test_vlseg3e16_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg3e16(mask, base, vl); +vuint16mf2x3_t test_vlseg3e16_v_u16mf2x3_m(vbool32_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg3e16(vm, rs1, vl); } -vuint16m1x3_t test_vlseg3e16_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg3e16(mask, base, vl); +vuint16m1x3_t test_vlseg3e16_v_u16m1x3_m(vbool16_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg3e16(vm, rs1, vl); } -vuint16m2x3_t test_vlseg3e16_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg3e16(mask, base, vl); +vuint16m2x3_t test_vlseg3e16_v_u16m2x3_m(vbool8_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg3e16(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg3e16\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg3e16ff.c b/auto-generated/gnu-overloaded-tests/vlseg3e16ff.c index 78c80c6f3..4d890d876 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg3e16ff.c +++ b/auto-generated/gnu-overloaded-tests/vlseg3e16ff.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_m(vbool64_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff(mask, base, new_vl, vl); +vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_m(vbool64_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff(vm, rs1, new_vl, vl); } -vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_m(vbool32_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff(mask, base, new_vl, vl); +vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_m(vbool32_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff(vm, rs1, new_vl, vl); } -vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_m(vbool16_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff(mask, base, new_vl, vl); +vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_m(vbool16_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff(vm, rs1, new_vl, vl); } -vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_m(vbool8_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff(mask, base, new_vl, vl); +vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_m(vbool8_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff(vm, rs1, new_vl, vl); } -vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff(mask, base, new_vl, vl); +vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_m(vbool64_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff(vm, rs1, new_vl, vl); } -vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff(mask, base, new_vl, vl); +vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_m(vbool32_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff(vm, rs1, new_vl, vl); } -vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_m(vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff(mask, base, new_vl, vl); +vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_m(vbool16_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff(vm, rs1, new_vl, vl); } -vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_m(vbool8_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff(mask, base, new_vl, vl); +vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_m(vbool8_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff(vm, rs1, new_vl, vl); } -vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff(mask, base, new_vl, vl); +vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_m(vbool64_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff(vm, rs1, new_vl, vl); } -vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff(mask, base, new_vl, vl); +vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_m(vbool32_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff(vm, rs1, new_vl, vl); } -vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff(mask, base, new_vl, vl); +vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_m(vbool16_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff(vm, rs1, new_vl, vl); } -vuint16m2x3_t test_vlseg3e16ff_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff(mask, base, new_vl, vl); +vuint16m2x3_t test_vlseg3e16ff_v_u16m2x3_m(vbool8_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg3e16ff\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg3e32.c b/auto-generated/gnu-overloaded-tests/vlseg3e32.c index 9ded9dfe0..60a1d1769 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg3e32.c +++ b/auto-generated/gnu-overloaded-tests/vlseg3e32.c @@ -6,40 +6,40 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x3_t test_vlseg3e32_v_f32mf2x3_m(vbool64_t mask, const float32_t *base, size_t vl) { - return __riscv_vlseg3e32(mask, base, vl); +vfloat32mf2x3_t test_vlseg3e32_v_f32mf2x3_m(vbool64_t vm, const float32_t *rs1, size_t vl) { + return __riscv_vlseg3e32(vm, rs1, vl); } -vfloat32m1x3_t test_vlseg3e32_v_f32m1x3_m(vbool32_t mask, const float32_t *base, size_t vl) { - return __riscv_vlseg3e32(mask, base, vl); +vfloat32m1x3_t test_vlseg3e32_v_f32m1x3_m(vbool32_t vm, const float32_t *rs1, size_t vl) { + return __riscv_vlseg3e32(vm, rs1, vl); } -vfloat32m2x3_t test_vlseg3e32_v_f32m2x3_m(vbool16_t mask, const float32_t *base, size_t vl) { - return __riscv_vlseg3e32(mask, base, vl); +vfloat32m2x3_t test_vlseg3e32_v_f32m2x3_m(vbool16_t vm, const float32_t *rs1, size_t vl) { + return __riscv_vlseg3e32(vm, rs1, vl); } -vint32mf2x3_t test_vlseg3e32_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, size_t vl) { - return __riscv_vlseg3e32(mask, base, vl); +vint32mf2x3_t test_vlseg3e32_v_i32mf2x3_m(vbool64_t vm, const int32_t *rs1, size_t vl) { + return __riscv_vlseg3e32(vm, rs1, vl); } -vint32m1x3_t test_vlseg3e32_v_i32m1x3_m(vbool32_t mask, const int32_t *base, size_t vl) { - return __riscv_vlseg3e32(mask, base, vl); +vint32m1x3_t test_vlseg3e32_v_i32m1x3_m(vbool32_t vm, const int32_t *rs1, size_t vl) { + return __riscv_vlseg3e32(vm, rs1, vl); } -vint32m2x3_t test_vlseg3e32_v_i32m2x3_m(vbool16_t mask, const int32_t *base, size_t vl) { - return __riscv_vlseg3e32(mask, base, vl); +vint32m2x3_t test_vlseg3e32_v_i32m2x3_m(vbool16_t vm, const int32_t *rs1, size_t vl) { + return __riscv_vlseg3e32(vm, rs1, vl); } -vuint32mf2x3_t test_vlseg3e32_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, size_t vl) { - return __riscv_vlseg3e32(mask, base, vl); +vuint32mf2x3_t test_vlseg3e32_v_u32mf2x3_m(vbool64_t vm, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg3e32(vm, rs1, vl); } -vuint32m1x3_t test_vlseg3e32_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, size_t vl) { - return __riscv_vlseg3e32(mask, base, vl); +vuint32m1x3_t test_vlseg3e32_v_u32m1x3_m(vbool32_t vm, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg3e32(vm, rs1, vl); } -vuint32m2x3_t test_vlseg3e32_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, size_t vl) { - return __riscv_vlseg3e32(mask, base, vl); +vuint32m2x3_t test_vlseg3e32_v_u32m2x3_m(vbool16_t vm, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg3e32(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg3e32\.[ivxfswum.]+\s+} 9 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg3e32ff.c b/auto-generated/gnu-overloaded-tests/vlseg3e32ff.c index ceed88654..b5d863eb7 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg3e32ff.c +++ b/auto-generated/gnu-overloaded-tests/vlseg3e32ff.c @@ -6,40 +6,40 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_m(vbool64_t mask, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff(mask, base, new_vl, vl); +vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_m(vbool64_t vm, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff(vm, rs1, new_vl, vl); } -vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_m(vbool32_t mask, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff(mask, base, new_vl, vl); +vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_m(vbool32_t vm, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff(vm, rs1, new_vl, vl); } -vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_m(vbool16_t mask, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff(mask, base, new_vl, vl); +vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_m(vbool16_t vm, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff(vm, rs1, new_vl, vl); } -vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff(mask, base, new_vl, vl); +vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_m(vbool64_t vm, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff(vm, rs1, new_vl, vl); } -vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_m(vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff(mask, base, new_vl, vl); +vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_m(vbool32_t vm, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff(vm, rs1, new_vl, vl); } -vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_m(vbool16_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff(mask, base, new_vl, vl); +vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_m(vbool16_t vm, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff(vm, rs1, new_vl, vl); } -vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff(mask, base, new_vl, vl); +vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_m(vbool64_t vm, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff(vm, rs1, new_vl, vl); } -vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff(mask, base, new_vl, vl); +vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_m(vbool32_t vm, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff(vm, rs1, new_vl, vl); } -vuint32m2x3_t test_vlseg3e32ff_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff(mask, base, new_vl, vl); +vuint32m2x3_t test_vlseg3e32ff_v_u32m2x3_m(vbool16_t vm, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg3e32ff\.[ivxfswum.]+\s+} 9 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg3e64.c b/auto-generated/gnu-overloaded-tests/vlseg3e64.c index d43339a43..cb58ee8bb 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg3e64.c +++ b/auto-generated/gnu-overloaded-tests/vlseg3e64.c @@ -6,28 +6,28 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x3_t test_vlseg3e64_v_f64m1x3_m(vbool64_t mask, const float64_t *base, size_t vl) { - return __riscv_vlseg3e64(mask, base, vl); +vfloat64m1x3_t test_vlseg3e64_v_f64m1x3_m(vbool64_t vm, const float64_t *rs1, size_t vl) { + return __riscv_vlseg3e64(vm, rs1, vl); } -vfloat64m2x3_t test_vlseg3e64_v_f64m2x3_m(vbool32_t mask, const float64_t *base, size_t vl) { - return __riscv_vlseg3e64(mask, base, vl); +vfloat64m2x3_t test_vlseg3e64_v_f64m2x3_m(vbool32_t vm, const float64_t *rs1, size_t vl) { + return __riscv_vlseg3e64(vm, rs1, vl); } -vint64m1x3_t test_vlseg3e64_v_i64m1x3_m(vbool64_t mask, const int64_t *base, size_t vl) { - return __riscv_vlseg3e64(mask, base, vl); +vint64m1x3_t test_vlseg3e64_v_i64m1x3_m(vbool64_t vm, const int64_t *rs1, size_t vl) { + return __riscv_vlseg3e64(vm, rs1, vl); } -vint64m2x3_t test_vlseg3e64_v_i64m2x3_m(vbool32_t mask, const int64_t *base, size_t vl) { - return __riscv_vlseg3e64(mask, base, vl); +vint64m2x3_t test_vlseg3e64_v_i64m2x3_m(vbool32_t vm, const int64_t *rs1, size_t vl) { + return __riscv_vlseg3e64(vm, rs1, vl); } -vuint64m1x3_t test_vlseg3e64_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, size_t vl) { - return __riscv_vlseg3e64(mask, base, vl); +vuint64m1x3_t test_vlseg3e64_v_u64m1x3_m(vbool64_t vm, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg3e64(vm, rs1, vl); } -vuint64m2x3_t test_vlseg3e64_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, size_t vl) { - return __riscv_vlseg3e64(mask, base, vl); +vuint64m2x3_t test_vlseg3e64_v_u64m2x3_m(vbool32_t vm, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg3e64(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg3e64\.[ivxfswum.]+\s+} 6 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg3e64ff.c b/auto-generated/gnu-overloaded-tests/vlseg3e64ff.c index 6311f8f1c..d299212a2 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg3e64ff.c +++ b/auto-generated/gnu-overloaded-tests/vlseg3e64ff.c @@ -6,28 +6,28 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_m(vbool64_t mask, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff(mask, base, new_vl, vl); +vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_m(vbool64_t vm, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff(vm, rs1, new_vl, vl); } -vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_m(vbool32_t mask, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff(mask, base, new_vl, vl); +vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_m(vbool32_t vm, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff(vm, rs1, new_vl, vl); } -vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_m(vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff(mask, base, new_vl, vl); +vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_m(vbool64_t vm, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff(vm, rs1, new_vl, vl); } -vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_m(vbool32_t mask, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff(mask, base, new_vl, vl); +vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_m(vbool32_t vm, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff(vm, rs1, new_vl, vl); } -vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff(mask, base, new_vl, vl); +vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_m(vbool64_t vm, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff(vm, rs1, new_vl, vl); } -vuint64m2x3_t test_vlseg3e64ff_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff(mask, base, new_vl, vl); +vuint64m2x3_t test_vlseg3e64ff_v_u64m2x3_m(vbool32_t vm, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg3e64ff\.[ivxfswum.]+\s+} 6 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg3e8.c b/auto-generated/gnu-overloaded-tests/vlseg3e8.c index fe909ed07..f5146bfe5 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg3e8.c +++ b/auto-generated/gnu-overloaded-tests/vlseg3e8.c @@ -6,44 +6,44 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x3_t test_vlseg3e8_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8(mask, base, vl); +vint8mf8x3_t test_vlseg3e8_v_i8mf8x3_m(vbool64_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8(vm, rs1, vl); } -vint8mf4x3_t test_vlseg3e8_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8(mask, base, vl); +vint8mf4x3_t test_vlseg3e8_v_i8mf4x3_m(vbool32_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8(vm, rs1, vl); } -vint8mf2x3_t test_vlseg3e8_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8(mask, base, vl); +vint8mf2x3_t test_vlseg3e8_v_i8mf2x3_m(vbool16_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8(vm, rs1, vl); } -vint8m1x3_t test_vlseg3e8_v_i8m1x3_m(vbool8_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8(mask, base, vl); +vint8m1x3_t test_vlseg3e8_v_i8m1x3_m(vbool8_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8(vm, rs1, vl); } -vint8m2x3_t test_vlseg3e8_v_i8m2x3_m(vbool4_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8(mask, base, vl); +vint8m2x3_t test_vlseg3e8_v_i8m2x3_m(vbool4_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8(vm, rs1, vl); } -vuint8mf8x3_t test_vlseg3e8_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8(mask, base, vl); +vuint8mf8x3_t test_vlseg3e8_v_u8mf8x3_m(vbool64_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8(vm, rs1, vl); } -vuint8mf4x3_t test_vlseg3e8_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8(mask, base, vl); +vuint8mf4x3_t test_vlseg3e8_v_u8mf4x3_m(vbool32_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8(vm, rs1, vl); } -vuint8mf2x3_t test_vlseg3e8_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8(mask, base, vl); +vuint8mf2x3_t test_vlseg3e8_v_u8mf2x3_m(vbool16_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8(vm, rs1, vl); } -vuint8m1x3_t test_vlseg3e8_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8(mask, base, vl); +vuint8m1x3_t test_vlseg3e8_v_u8m1x3_m(vbool8_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8(vm, rs1, vl); } -vuint8m2x3_t test_vlseg3e8_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8(mask, base, vl); +vuint8m2x3_t test_vlseg3e8_v_u8m2x3_m(vbool4_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg3e8\.[ivxfswum.]+\s+} 10 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg3e8ff.c b/auto-generated/gnu-overloaded-tests/vlseg3e8ff.c index cb20d29e4..11aa78b0e 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg3e8ff.c +++ b/auto-generated/gnu-overloaded-tests/vlseg3e8ff.c @@ -6,44 +6,44 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff(mask, base, new_vl, vl); +vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_m(vbool64_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff(vm, rs1, new_vl, vl); } -vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff(mask, base, new_vl, vl); +vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_m(vbool32_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff(vm, rs1, new_vl, vl); } -vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff(mask, base, new_vl, vl); +vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_m(vbool16_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff(vm, rs1, new_vl, vl); } -vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_m(vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff(mask, base, new_vl, vl); +vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_m(vbool8_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff(vm, rs1, new_vl, vl); } -vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_m(vbool4_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff(mask, base, new_vl, vl); +vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_m(vbool4_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff(vm, rs1, new_vl, vl); } -vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff(mask, base, new_vl, vl); +vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_m(vbool64_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff(vm, rs1, new_vl, vl); } -vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff(mask, base, new_vl, vl); +vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_m(vbool32_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff(vm, rs1, new_vl, vl); } -vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff(mask, base, new_vl, vl); +vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_m(vbool16_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff(vm, rs1, new_vl, vl); } -vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff(mask, base, new_vl, vl); +vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_m(vbool8_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff(vm, rs1, new_vl, vl); } -vuint8m2x3_t test_vlseg3e8ff_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff(mask, base, new_vl, vl); +vuint8m2x3_t test_vlseg3e8ff_v_u8m2x3_m(vbool4_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg3e8ff\.[ivxfswum.]+\s+} 10 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg4e16.c b/auto-generated/gnu-overloaded-tests/vlseg4e16.c index d52153179..0cd0a68c7 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg4e16.c +++ b/auto-generated/gnu-overloaded-tests/vlseg4e16.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x4_t test_vlseg4e16_v_f16mf4x4_m(vbool64_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg4e16(mask, base, vl); +vfloat16mf4x4_t test_vlseg4e16_v_f16mf4x4_m(vbool64_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg4e16(vm, rs1, vl); } -vfloat16mf2x4_t test_vlseg4e16_v_f16mf2x4_m(vbool32_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg4e16(mask, base, vl); +vfloat16mf2x4_t test_vlseg4e16_v_f16mf2x4_m(vbool32_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg4e16(vm, rs1, vl); } -vfloat16m1x4_t test_vlseg4e16_v_f16m1x4_m(vbool16_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg4e16(mask, base, vl); +vfloat16m1x4_t test_vlseg4e16_v_f16m1x4_m(vbool16_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg4e16(vm, rs1, vl); } -vfloat16m2x4_t test_vlseg4e16_v_f16m2x4_m(vbool8_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg4e16(mask, base, vl); +vfloat16m2x4_t test_vlseg4e16_v_f16m2x4_m(vbool8_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg4e16(vm, rs1, vl); } -vint16mf4x4_t test_vlseg4e16_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg4e16(mask, base, vl); +vint16mf4x4_t test_vlseg4e16_v_i16mf4x4_m(vbool64_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg4e16(vm, rs1, vl); } -vint16mf2x4_t test_vlseg4e16_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg4e16(mask, base, vl); +vint16mf2x4_t test_vlseg4e16_v_i16mf2x4_m(vbool32_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg4e16(vm, rs1, vl); } -vint16m1x4_t test_vlseg4e16_v_i16m1x4_m(vbool16_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg4e16(mask, base, vl); +vint16m1x4_t test_vlseg4e16_v_i16m1x4_m(vbool16_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg4e16(vm, rs1, vl); } -vint16m2x4_t test_vlseg4e16_v_i16m2x4_m(vbool8_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg4e16(mask, base, vl); +vint16m2x4_t test_vlseg4e16_v_i16m2x4_m(vbool8_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg4e16(vm, rs1, vl); } -vuint16mf4x4_t test_vlseg4e16_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg4e16(mask, base, vl); +vuint16mf4x4_t test_vlseg4e16_v_u16mf4x4_m(vbool64_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg4e16(vm, rs1, vl); } -vuint16mf2x4_t test_vlseg4e16_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg4e16(mask, base, vl); +vuint16mf2x4_t test_vlseg4e16_v_u16mf2x4_m(vbool32_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg4e16(vm, rs1, vl); } -vuint16m1x4_t test_vlseg4e16_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg4e16(mask, base, vl); +vuint16m1x4_t test_vlseg4e16_v_u16m1x4_m(vbool16_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg4e16(vm, rs1, vl); } -vuint16m2x4_t test_vlseg4e16_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg4e16(mask, base, vl); +vuint16m2x4_t test_vlseg4e16_v_u16m2x4_m(vbool8_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg4e16(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg4e16\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg4e16ff.c b/auto-generated/gnu-overloaded-tests/vlseg4e16ff.c index 2fead616d..1368435f3 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg4e16ff.c +++ b/auto-generated/gnu-overloaded-tests/vlseg4e16ff.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_m(vbool64_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff(mask, base, new_vl, vl); +vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_m(vbool64_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff(vm, rs1, new_vl, vl); } -vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_m(vbool32_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff(mask, base, new_vl, vl); +vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_m(vbool32_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff(vm, rs1, new_vl, vl); } -vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_m(vbool16_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff(mask, base, new_vl, vl); +vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_m(vbool16_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff(vm, rs1, new_vl, vl); } -vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_m(vbool8_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff(mask, base, new_vl, vl); +vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_m(vbool8_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff(vm, rs1, new_vl, vl); } -vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff(mask, base, new_vl, vl); +vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_m(vbool64_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff(vm, rs1, new_vl, vl); } -vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff(mask, base, new_vl, vl); +vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_m(vbool32_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff(vm, rs1, new_vl, vl); } -vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_m(vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff(mask, base, new_vl, vl); +vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_m(vbool16_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff(vm, rs1, new_vl, vl); } -vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_m(vbool8_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff(mask, base, new_vl, vl); +vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_m(vbool8_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff(vm, rs1, new_vl, vl); } -vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff(mask, base, new_vl, vl); +vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_m(vbool64_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff(vm, rs1, new_vl, vl); } -vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff(mask, base, new_vl, vl); +vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_m(vbool32_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff(vm, rs1, new_vl, vl); } -vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff(mask, base, new_vl, vl); +vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_m(vbool16_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff(vm, rs1, new_vl, vl); } -vuint16m2x4_t test_vlseg4e16ff_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff(mask, base, new_vl, vl); +vuint16m2x4_t test_vlseg4e16ff_v_u16m2x4_m(vbool8_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg4e16ff\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg4e32.c b/auto-generated/gnu-overloaded-tests/vlseg4e32.c index 1ad390b3b..3dbac14cd 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg4e32.c +++ b/auto-generated/gnu-overloaded-tests/vlseg4e32.c @@ -6,40 +6,40 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x4_t test_vlseg4e32_v_f32mf2x4_m(vbool64_t mask, const float32_t *base, size_t vl) { - return __riscv_vlseg4e32(mask, base, vl); +vfloat32mf2x4_t test_vlseg4e32_v_f32mf2x4_m(vbool64_t vm, const float32_t *rs1, size_t vl) { + return __riscv_vlseg4e32(vm, rs1, vl); } -vfloat32m1x4_t test_vlseg4e32_v_f32m1x4_m(vbool32_t mask, const float32_t *base, size_t vl) { - return __riscv_vlseg4e32(mask, base, vl); +vfloat32m1x4_t test_vlseg4e32_v_f32m1x4_m(vbool32_t vm, const float32_t *rs1, size_t vl) { + return __riscv_vlseg4e32(vm, rs1, vl); } -vfloat32m2x4_t test_vlseg4e32_v_f32m2x4_m(vbool16_t mask, const float32_t *base, size_t vl) { - return __riscv_vlseg4e32(mask, base, vl); +vfloat32m2x4_t test_vlseg4e32_v_f32m2x4_m(vbool16_t vm, const float32_t *rs1, size_t vl) { + return __riscv_vlseg4e32(vm, rs1, vl); } -vint32mf2x4_t test_vlseg4e32_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, size_t vl) { - return __riscv_vlseg4e32(mask, base, vl); +vint32mf2x4_t test_vlseg4e32_v_i32mf2x4_m(vbool64_t vm, const int32_t *rs1, size_t vl) { + return __riscv_vlseg4e32(vm, rs1, vl); } -vint32m1x4_t test_vlseg4e32_v_i32m1x4_m(vbool32_t mask, const int32_t *base, size_t vl) { - return __riscv_vlseg4e32(mask, base, vl); +vint32m1x4_t test_vlseg4e32_v_i32m1x4_m(vbool32_t vm, const int32_t *rs1, size_t vl) { + return __riscv_vlseg4e32(vm, rs1, vl); } -vint32m2x4_t test_vlseg4e32_v_i32m2x4_m(vbool16_t mask, const int32_t *base, size_t vl) { - return __riscv_vlseg4e32(mask, base, vl); +vint32m2x4_t test_vlseg4e32_v_i32m2x4_m(vbool16_t vm, const int32_t *rs1, size_t vl) { + return __riscv_vlseg4e32(vm, rs1, vl); } -vuint32mf2x4_t test_vlseg4e32_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, size_t vl) { - return __riscv_vlseg4e32(mask, base, vl); +vuint32mf2x4_t test_vlseg4e32_v_u32mf2x4_m(vbool64_t vm, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg4e32(vm, rs1, vl); } -vuint32m1x4_t test_vlseg4e32_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, size_t vl) { - return __riscv_vlseg4e32(mask, base, vl); +vuint32m1x4_t test_vlseg4e32_v_u32m1x4_m(vbool32_t vm, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg4e32(vm, rs1, vl); } -vuint32m2x4_t test_vlseg4e32_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, size_t vl) { - return __riscv_vlseg4e32(mask, base, vl); +vuint32m2x4_t test_vlseg4e32_v_u32m2x4_m(vbool16_t vm, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg4e32(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg4e32\.[ivxfswum.]+\s+} 9 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg4e32ff.c b/auto-generated/gnu-overloaded-tests/vlseg4e32ff.c index 9ab2fd4a2..bfa2dc127 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg4e32ff.c +++ b/auto-generated/gnu-overloaded-tests/vlseg4e32ff.c @@ -6,40 +6,40 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_m(vbool64_t mask, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff(mask, base, new_vl, vl); +vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_m(vbool64_t vm, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff(vm, rs1, new_vl, vl); } -vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_m(vbool32_t mask, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff(mask, base, new_vl, vl); +vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_m(vbool32_t vm, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff(vm, rs1, new_vl, vl); } -vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_m(vbool16_t mask, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff(mask, base, new_vl, vl); +vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_m(vbool16_t vm, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff(vm, rs1, new_vl, vl); } -vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff(mask, base, new_vl, vl); +vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_m(vbool64_t vm, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff(vm, rs1, new_vl, vl); } -vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_m(vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff(mask, base, new_vl, vl); +vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_m(vbool32_t vm, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff(vm, rs1, new_vl, vl); } -vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_m(vbool16_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff(mask, base, new_vl, vl); +vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_m(vbool16_t vm, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff(vm, rs1, new_vl, vl); } -vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff(mask, base, new_vl, vl); +vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_m(vbool64_t vm, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff(vm, rs1, new_vl, vl); } -vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff(mask, base, new_vl, vl); +vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_m(vbool32_t vm, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff(vm, rs1, new_vl, vl); } -vuint32m2x4_t test_vlseg4e32ff_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff(mask, base, new_vl, vl); +vuint32m2x4_t test_vlseg4e32ff_v_u32m2x4_m(vbool16_t vm, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg4e32ff\.[ivxfswum.]+\s+} 9 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg4e64.c b/auto-generated/gnu-overloaded-tests/vlseg4e64.c index a0fccc05f..b1fa19233 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg4e64.c +++ b/auto-generated/gnu-overloaded-tests/vlseg4e64.c @@ -6,28 +6,28 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x4_t test_vlseg4e64_v_f64m1x4_m(vbool64_t mask, const float64_t *base, size_t vl) { - return __riscv_vlseg4e64(mask, base, vl); +vfloat64m1x4_t test_vlseg4e64_v_f64m1x4_m(vbool64_t vm, const float64_t *rs1, size_t vl) { + return __riscv_vlseg4e64(vm, rs1, vl); } -vfloat64m2x4_t test_vlseg4e64_v_f64m2x4_m(vbool32_t mask, const float64_t *base, size_t vl) { - return __riscv_vlseg4e64(mask, base, vl); +vfloat64m2x4_t test_vlseg4e64_v_f64m2x4_m(vbool32_t vm, const float64_t *rs1, size_t vl) { + return __riscv_vlseg4e64(vm, rs1, vl); } -vint64m1x4_t test_vlseg4e64_v_i64m1x4_m(vbool64_t mask, const int64_t *base, size_t vl) { - return __riscv_vlseg4e64(mask, base, vl); +vint64m1x4_t test_vlseg4e64_v_i64m1x4_m(vbool64_t vm, const int64_t *rs1, size_t vl) { + return __riscv_vlseg4e64(vm, rs1, vl); } -vint64m2x4_t test_vlseg4e64_v_i64m2x4_m(vbool32_t mask, const int64_t *base, size_t vl) { - return __riscv_vlseg4e64(mask, base, vl); +vint64m2x4_t test_vlseg4e64_v_i64m2x4_m(vbool32_t vm, const int64_t *rs1, size_t vl) { + return __riscv_vlseg4e64(vm, rs1, vl); } -vuint64m1x4_t test_vlseg4e64_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, size_t vl) { - return __riscv_vlseg4e64(mask, base, vl); +vuint64m1x4_t test_vlseg4e64_v_u64m1x4_m(vbool64_t vm, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg4e64(vm, rs1, vl); } -vuint64m2x4_t test_vlseg4e64_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, size_t vl) { - return __riscv_vlseg4e64(mask, base, vl); +vuint64m2x4_t test_vlseg4e64_v_u64m2x4_m(vbool32_t vm, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg4e64(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg4e64\.[ivxfswum.]+\s+} 6 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg4e64ff.c b/auto-generated/gnu-overloaded-tests/vlseg4e64ff.c index 7bdd3a6b2..8c5cd00ef 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg4e64ff.c +++ b/auto-generated/gnu-overloaded-tests/vlseg4e64ff.c @@ -6,28 +6,28 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_m(vbool64_t mask, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff(mask, base, new_vl, vl); +vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_m(vbool64_t vm, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff(vm, rs1, new_vl, vl); } -vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_m(vbool32_t mask, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff(mask, base, new_vl, vl); +vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_m(vbool32_t vm, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff(vm, rs1, new_vl, vl); } -vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_m(vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff(mask, base, new_vl, vl); +vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_m(vbool64_t vm, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff(vm, rs1, new_vl, vl); } -vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_m(vbool32_t mask, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff(mask, base, new_vl, vl); +vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_m(vbool32_t vm, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff(vm, rs1, new_vl, vl); } -vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff(mask, base, new_vl, vl); +vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_m(vbool64_t vm, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff(vm, rs1, new_vl, vl); } -vuint64m2x4_t test_vlseg4e64ff_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff(mask, base, new_vl, vl); +vuint64m2x4_t test_vlseg4e64ff_v_u64m2x4_m(vbool32_t vm, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg4e64ff\.[ivxfswum.]+\s+} 6 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg4e8.c b/auto-generated/gnu-overloaded-tests/vlseg4e8.c index 479affd82..65dd05e1d 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg4e8.c +++ b/auto-generated/gnu-overloaded-tests/vlseg4e8.c @@ -6,44 +6,44 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x4_t test_vlseg4e8_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8(mask, base, vl); +vint8mf8x4_t test_vlseg4e8_v_i8mf8x4_m(vbool64_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8(vm, rs1, vl); } -vint8mf4x4_t test_vlseg4e8_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8(mask, base, vl); +vint8mf4x4_t test_vlseg4e8_v_i8mf4x4_m(vbool32_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8(vm, rs1, vl); } -vint8mf2x4_t test_vlseg4e8_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8(mask, base, vl); +vint8mf2x4_t test_vlseg4e8_v_i8mf2x4_m(vbool16_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8(vm, rs1, vl); } -vint8m1x4_t test_vlseg4e8_v_i8m1x4_m(vbool8_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8(mask, base, vl); +vint8m1x4_t test_vlseg4e8_v_i8m1x4_m(vbool8_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8(vm, rs1, vl); } -vint8m2x4_t test_vlseg4e8_v_i8m2x4_m(vbool4_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8(mask, base, vl); +vint8m2x4_t test_vlseg4e8_v_i8m2x4_m(vbool4_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8(vm, rs1, vl); } -vuint8mf8x4_t test_vlseg4e8_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8(mask, base, vl); +vuint8mf8x4_t test_vlseg4e8_v_u8mf8x4_m(vbool64_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8(vm, rs1, vl); } -vuint8mf4x4_t test_vlseg4e8_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8(mask, base, vl); +vuint8mf4x4_t test_vlseg4e8_v_u8mf4x4_m(vbool32_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8(vm, rs1, vl); } -vuint8mf2x4_t test_vlseg4e8_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8(mask, base, vl); +vuint8mf2x4_t test_vlseg4e8_v_u8mf2x4_m(vbool16_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8(vm, rs1, vl); } -vuint8m1x4_t test_vlseg4e8_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8(mask, base, vl); +vuint8m1x4_t test_vlseg4e8_v_u8m1x4_m(vbool8_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8(vm, rs1, vl); } -vuint8m2x4_t test_vlseg4e8_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8(mask, base, vl); +vuint8m2x4_t test_vlseg4e8_v_u8m2x4_m(vbool4_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg4e8\.[ivxfswum.]+\s+} 10 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg4e8ff.c b/auto-generated/gnu-overloaded-tests/vlseg4e8ff.c index c2f359c93..6c0c024f5 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg4e8ff.c +++ b/auto-generated/gnu-overloaded-tests/vlseg4e8ff.c @@ -6,44 +6,44 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff(mask, base, new_vl, vl); +vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_m(vbool64_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff(vm, rs1, new_vl, vl); } -vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff(mask, base, new_vl, vl); +vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_m(vbool32_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff(vm, rs1, new_vl, vl); } -vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff(mask, base, new_vl, vl); +vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_m(vbool16_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff(vm, rs1, new_vl, vl); } -vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_m(vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff(mask, base, new_vl, vl); +vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_m(vbool8_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff(vm, rs1, new_vl, vl); } -vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_m(vbool4_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff(mask, base, new_vl, vl); +vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_m(vbool4_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff(vm, rs1, new_vl, vl); } -vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff(mask, base, new_vl, vl); +vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_m(vbool64_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff(vm, rs1, new_vl, vl); } -vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff(mask, base, new_vl, vl); +vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_m(vbool32_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff(vm, rs1, new_vl, vl); } -vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff(mask, base, new_vl, vl); +vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_m(vbool16_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff(vm, rs1, new_vl, vl); } -vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff(mask, base, new_vl, vl); +vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_m(vbool8_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff(vm, rs1, new_vl, vl); } -vuint8m2x4_t test_vlseg4e8ff_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff(mask, base, new_vl, vl); +vuint8m2x4_t test_vlseg4e8ff_v_u8m2x4_m(vbool4_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg4e8ff\.[ivxfswum.]+\s+} 10 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg5e16.c b/auto-generated/gnu-overloaded-tests/vlseg5e16.c index a7181ba21..d4fd97379 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg5e16.c +++ b/auto-generated/gnu-overloaded-tests/vlseg5e16.c @@ -6,40 +6,40 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x5_t test_vlseg5e16_v_f16mf4x5_m(vbool64_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg5e16(mask, base, vl); +vfloat16mf4x5_t test_vlseg5e16_v_f16mf4x5_m(vbool64_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg5e16(vm, rs1, vl); } -vfloat16mf2x5_t test_vlseg5e16_v_f16mf2x5_m(vbool32_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg5e16(mask, base, vl); +vfloat16mf2x5_t test_vlseg5e16_v_f16mf2x5_m(vbool32_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg5e16(vm, rs1, vl); } -vfloat16m1x5_t test_vlseg5e16_v_f16m1x5_m(vbool16_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg5e16(mask, base, vl); +vfloat16m1x5_t test_vlseg5e16_v_f16m1x5_m(vbool16_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg5e16(vm, rs1, vl); } -vint16mf4x5_t test_vlseg5e16_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg5e16(mask, base, vl); +vint16mf4x5_t test_vlseg5e16_v_i16mf4x5_m(vbool64_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg5e16(vm, rs1, vl); } -vint16mf2x5_t test_vlseg5e16_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg5e16(mask, base, vl); +vint16mf2x5_t test_vlseg5e16_v_i16mf2x5_m(vbool32_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg5e16(vm, rs1, vl); } -vint16m1x5_t test_vlseg5e16_v_i16m1x5_m(vbool16_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg5e16(mask, base, vl); +vint16m1x5_t test_vlseg5e16_v_i16m1x5_m(vbool16_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg5e16(vm, rs1, vl); } -vuint16mf4x5_t test_vlseg5e16_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg5e16(mask, base, vl); +vuint16mf4x5_t test_vlseg5e16_v_u16mf4x5_m(vbool64_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg5e16(vm, rs1, vl); } -vuint16mf2x5_t test_vlseg5e16_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg5e16(mask, base, vl); +vuint16mf2x5_t test_vlseg5e16_v_u16mf2x5_m(vbool32_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg5e16(vm, rs1, vl); } -vuint16m1x5_t test_vlseg5e16_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg5e16(mask, base, vl); +vuint16m1x5_t test_vlseg5e16_v_u16m1x5_m(vbool16_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg5e16(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg5e16\.[ivxfswum.]+\s+} 9 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg5e16ff.c b/auto-generated/gnu-overloaded-tests/vlseg5e16ff.c index 20c3c8537..74a00e1ba 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg5e16ff.c +++ b/auto-generated/gnu-overloaded-tests/vlseg5e16ff.c @@ -6,40 +6,40 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_m(vbool64_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff(mask, base, new_vl, vl); +vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_m(vbool64_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff(vm, rs1, new_vl, vl); } -vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_m(vbool32_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff(mask, base, new_vl, vl); +vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_m(vbool32_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff(vm, rs1, new_vl, vl); } -vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_m(vbool16_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff(mask, base, new_vl, vl); +vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_m(vbool16_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff(vm, rs1, new_vl, vl); } -vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff(mask, base, new_vl, vl); +vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_m(vbool64_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff(vm, rs1, new_vl, vl); } -vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff(mask, base, new_vl, vl); +vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_m(vbool32_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff(vm, rs1, new_vl, vl); } -vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_m(vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff(mask, base, new_vl, vl); +vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_m(vbool16_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff(vm, rs1, new_vl, vl); } -vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff(mask, base, new_vl, vl); +vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_m(vbool64_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff(vm, rs1, new_vl, vl); } -vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff(mask, base, new_vl, vl); +vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_m(vbool32_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff(vm, rs1, new_vl, vl); } -vuint16m1x5_t test_vlseg5e16ff_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff(mask, base, new_vl, vl); +vuint16m1x5_t test_vlseg5e16ff_v_u16m1x5_m(vbool16_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg5e16ff\.[ivxfswum.]+\s+} 9 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg5e32.c b/auto-generated/gnu-overloaded-tests/vlseg5e32.c index 403920848..d36ca7c35 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg5e32.c +++ b/auto-generated/gnu-overloaded-tests/vlseg5e32.c @@ -6,28 +6,28 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x5_t test_vlseg5e32_v_f32mf2x5_m(vbool64_t mask, const float32_t *base, size_t vl) { - return __riscv_vlseg5e32(mask, base, vl); +vfloat32mf2x5_t test_vlseg5e32_v_f32mf2x5_m(vbool64_t vm, const float32_t *rs1, size_t vl) { + return __riscv_vlseg5e32(vm, rs1, vl); } -vfloat32m1x5_t test_vlseg5e32_v_f32m1x5_m(vbool32_t mask, const float32_t *base, size_t vl) { - return __riscv_vlseg5e32(mask, base, vl); +vfloat32m1x5_t test_vlseg5e32_v_f32m1x5_m(vbool32_t vm, const float32_t *rs1, size_t vl) { + return __riscv_vlseg5e32(vm, rs1, vl); } -vint32mf2x5_t test_vlseg5e32_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, size_t vl) { - return __riscv_vlseg5e32(mask, base, vl); +vint32mf2x5_t test_vlseg5e32_v_i32mf2x5_m(vbool64_t vm, const int32_t *rs1, size_t vl) { + return __riscv_vlseg5e32(vm, rs1, vl); } -vint32m1x5_t test_vlseg5e32_v_i32m1x5_m(vbool32_t mask, const int32_t *base, size_t vl) { - return __riscv_vlseg5e32(mask, base, vl); +vint32m1x5_t test_vlseg5e32_v_i32m1x5_m(vbool32_t vm, const int32_t *rs1, size_t vl) { + return __riscv_vlseg5e32(vm, rs1, vl); } -vuint32mf2x5_t test_vlseg5e32_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, size_t vl) { - return __riscv_vlseg5e32(mask, base, vl); +vuint32mf2x5_t test_vlseg5e32_v_u32mf2x5_m(vbool64_t vm, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg5e32(vm, rs1, vl); } -vuint32m1x5_t test_vlseg5e32_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, size_t vl) { - return __riscv_vlseg5e32(mask, base, vl); +vuint32m1x5_t test_vlseg5e32_v_u32m1x5_m(vbool32_t vm, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg5e32(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg5e32\.[ivxfswum.]+\s+} 6 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg5e32ff.c b/auto-generated/gnu-overloaded-tests/vlseg5e32ff.c index 479d43032..9de8d84c5 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg5e32ff.c +++ b/auto-generated/gnu-overloaded-tests/vlseg5e32ff.c @@ -6,28 +6,28 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_m(vbool64_t mask, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff(mask, base, new_vl, vl); +vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_m(vbool64_t vm, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff(vm, rs1, new_vl, vl); } -vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_m(vbool32_t mask, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff(mask, base, new_vl, vl); +vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_m(vbool32_t vm, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff(vm, rs1, new_vl, vl); } -vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff(mask, base, new_vl, vl); +vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_m(vbool64_t vm, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff(vm, rs1, new_vl, vl); } -vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_m(vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff(mask, base, new_vl, vl); +vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_m(vbool32_t vm, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff(vm, rs1, new_vl, vl); } -vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff(mask, base, new_vl, vl); +vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_m(vbool64_t vm, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff(vm, rs1, new_vl, vl); } -vuint32m1x5_t test_vlseg5e32ff_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff(mask, base, new_vl, vl); +vuint32m1x5_t test_vlseg5e32ff_v_u32m1x5_m(vbool32_t vm, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg5e32ff\.[ivxfswum.]+\s+} 6 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg5e64.c b/auto-generated/gnu-overloaded-tests/vlseg5e64.c index e1468b522..921504e65 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg5e64.c +++ b/auto-generated/gnu-overloaded-tests/vlseg5e64.c @@ -6,16 +6,16 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x5_t test_vlseg5e64_v_f64m1x5_m(vbool64_t mask, const float64_t *base, size_t vl) { - return __riscv_vlseg5e64(mask, base, vl); +vfloat64m1x5_t test_vlseg5e64_v_f64m1x5_m(vbool64_t vm, const float64_t *rs1, size_t vl) { + return __riscv_vlseg5e64(vm, rs1, vl); } -vint64m1x5_t test_vlseg5e64_v_i64m1x5_m(vbool64_t mask, const int64_t *base, size_t vl) { - return __riscv_vlseg5e64(mask, base, vl); +vint64m1x5_t test_vlseg5e64_v_i64m1x5_m(vbool64_t vm, const int64_t *rs1, size_t vl) { + return __riscv_vlseg5e64(vm, rs1, vl); } -vuint64m1x5_t test_vlseg5e64_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, size_t vl) { - return __riscv_vlseg5e64(mask, base, vl); +vuint64m1x5_t test_vlseg5e64_v_u64m1x5_m(vbool64_t vm, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg5e64(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg5e64\.[ivxfswum.]+\s+} 3 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg5e64ff.c b/auto-generated/gnu-overloaded-tests/vlseg5e64ff.c index 77ba89554..e4d0bfe3a 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg5e64ff.c +++ b/auto-generated/gnu-overloaded-tests/vlseg5e64ff.c @@ -6,16 +6,16 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_m(vbool64_t mask, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e64ff(mask, base, new_vl, vl); +vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_m(vbool64_t vm, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e64ff(vm, rs1, new_vl, vl); } -vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_m(vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e64ff(mask, base, new_vl, vl); +vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_m(vbool64_t vm, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e64ff(vm, rs1, new_vl, vl); } -vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e64ff(mask, base, new_vl, vl); +vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5_m(vbool64_t vm, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e64ff(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg5e64ff\.[ivxfswum.]+\s+} 3 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg5e8.c b/auto-generated/gnu-overloaded-tests/vlseg5e8.c index b43a1000a..f16f6599f 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg5e8.c +++ b/auto-generated/gnu-overloaded-tests/vlseg5e8.c @@ -6,36 +6,36 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x5_t test_vlseg5e8_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg5e8(mask, base, vl); +vint8mf8x5_t test_vlseg5e8_v_i8mf8x5_m(vbool64_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg5e8(vm, rs1, vl); } -vint8mf4x5_t test_vlseg5e8_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg5e8(mask, base, vl); +vint8mf4x5_t test_vlseg5e8_v_i8mf4x5_m(vbool32_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg5e8(vm, rs1, vl); } -vint8mf2x5_t test_vlseg5e8_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg5e8(mask, base, vl); +vint8mf2x5_t test_vlseg5e8_v_i8mf2x5_m(vbool16_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg5e8(vm, rs1, vl); } -vint8m1x5_t test_vlseg5e8_v_i8m1x5_m(vbool8_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg5e8(mask, base, vl); +vint8m1x5_t test_vlseg5e8_v_i8m1x5_m(vbool8_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg5e8(vm, rs1, vl); } -vuint8mf8x5_t test_vlseg5e8_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg5e8(mask, base, vl); +vuint8mf8x5_t test_vlseg5e8_v_u8mf8x5_m(vbool64_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg5e8(vm, rs1, vl); } -vuint8mf4x5_t test_vlseg5e8_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg5e8(mask, base, vl); +vuint8mf4x5_t test_vlseg5e8_v_u8mf4x5_m(vbool32_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg5e8(vm, rs1, vl); } -vuint8mf2x5_t test_vlseg5e8_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg5e8(mask, base, vl); +vuint8mf2x5_t test_vlseg5e8_v_u8mf2x5_m(vbool16_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg5e8(vm, rs1, vl); } -vuint8m1x5_t test_vlseg5e8_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg5e8(mask, base, vl); +vuint8m1x5_t test_vlseg5e8_v_u8m1x5_m(vbool8_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg5e8(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg5e8\.[ivxfswum.]+\s+} 8 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg5e8ff.c b/auto-generated/gnu-overloaded-tests/vlseg5e8ff.c index b1094808b..91cdb8e95 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg5e8ff.c +++ b/auto-generated/gnu-overloaded-tests/vlseg5e8ff.c @@ -6,36 +6,36 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff(mask, base, new_vl, vl); +vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_m(vbool64_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff(vm, rs1, new_vl, vl); } -vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff(mask, base, new_vl, vl); +vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_m(vbool32_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff(vm, rs1, new_vl, vl); } -vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff(mask, base, new_vl, vl); +vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_m(vbool16_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff(vm, rs1, new_vl, vl); } -vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_m(vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff(mask, base, new_vl, vl); +vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_m(vbool8_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff(vm, rs1, new_vl, vl); } -vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff(mask, base, new_vl, vl); +vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_m(vbool64_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff(vm, rs1, new_vl, vl); } -vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff(mask, base, new_vl, vl); +vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_m(vbool32_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff(vm, rs1, new_vl, vl); } -vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff(mask, base, new_vl, vl); +vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_m(vbool16_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff(vm, rs1, new_vl, vl); } -vuint8m1x5_t test_vlseg5e8ff_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff(mask, base, new_vl, vl); +vuint8m1x5_t test_vlseg5e8ff_v_u8m1x5_m(vbool8_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg5e8ff\.[ivxfswum.]+\s+} 8 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg6e16.c b/auto-generated/gnu-overloaded-tests/vlseg6e16.c index 221a67c0a..70bec278b 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg6e16.c +++ b/auto-generated/gnu-overloaded-tests/vlseg6e16.c @@ -6,40 +6,40 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x6_t test_vlseg6e16_v_f16mf4x6_m(vbool64_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg6e16(mask, base, vl); +vfloat16mf4x6_t test_vlseg6e16_v_f16mf4x6_m(vbool64_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg6e16(vm, rs1, vl); } -vfloat16mf2x6_t test_vlseg6e16_v_f16mf2x6_m(vbool32_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg6e16(mask, base, vl); +vfloat16mf2x6_t test_vlseg6e16_v_f16mf2x6_m(vbool32_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg6e16(vm, rs1, vl); } -vfloat16m1x6_t test_vlseg6e16_v_f16m1x6_m(vbool16_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg6e16(mask, base, vl); +vfloat16m1x6_t test_vlseg6e16_v_f16m1x6_m(vbool16_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg6e16(vm, rs1, vl); } -vint16mf4x6_t test_vlseg6e16_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg6e16(mask, base, vl); +vint16mf4x6_t test_vlseg6e16_v_i16mf4x6_m(vbool64_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg6e16(vm, rs1, vl); } -vint16mf2x6_t test_vlseg6e16_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg6e16(mask, base, vl); +vint16mf2x6_t test_vlseg6e16_v_i16mf2x6_m(vbool32_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg6e16(vm, rs1, vl); } -vint16m1x6_t test_vlseg6e16_v_i16m1x6_m(vbool16_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg6e16(mask, base, vl); +vint16m1x6_t test_vlseg6e16_v_i16m1x6_m(vbool16_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg6e16(vm, rs1, vl); } -vuint16mf4x6_t test_vlseg6e16_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg6e16(mask, base, vl); +vuint16mf4x6_t test_vlseg6e16_v_u16mf4x6_m(vbool64_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg6e16(vm, rs1, vl); } -vuint16mf2x6_t test_vlseg6e16_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg6e16(mask, base, vl); +vuint16mf2x6_t test_vlseg6e16_v_u16mf2x6_m(vbool32_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg6e16(vm, rs1, vl); } -vuint16m1x6_t test_vlseg6e16_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg6e16(mask, base, vl); +vuint16m1x6_t test_vlseg6e16_v_u16m1x6_m(vbool16_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg6e16(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg6e16\.[ivxfswum.]+\s+} 9 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg6e16ff.c b/auto-generated/gnu-overloaded-tests/vlseg6e16ff.c index 4a9101938..c9a3ff157 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg6e16ff.c +++ b/auto-generated/gnu-overloaded-tests/vlseg6e16ff.c @@ -6,40 +6,40 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_m(vbool64_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff(mask, base, new_vl, vl); +vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_m(vbool64_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff(vm, rs1, new_vl, vl); } -vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_m(vbool32_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff(mask, base, new_vl, vl); +vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_m(vbool32_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff(vm, rs1, new_vl, vl); } -vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_m(vbool16_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff(mask, base, new_vl, vl); +vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_m(vbool16_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff(vm, rs1, new_vl, vl); } -vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff(mask, base, new_vl, vl); +vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_m(vbool64_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff(vm, rs1, new_vl, vl); } -vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff(mask, base, new_vl, vl); +vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_m(vbool32_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff(vm, rs1, new_vl, vl); } -vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_m(vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff(mask, base, new_vl, vl); +vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_m(vbool16_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff(vm, rs1, new_vl, vl); } -vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff(mask, base, new_vl, vl); +vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_m(vbool64_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff(vm, rs1, new_vl, vl); } -vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff(mask, base, new_vl, vl); +vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_m(vbool32_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff(vm, rs1, new_vl, vl); } -vuint16m1x6_t test_vlseg6e16ff_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff(mask, base, new_vl, vl); +vuint16m1x6_t test_vlseg6e16ff_v_u16m1x6_m(vbool16_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg6e16ff\.[ivxfswum.]+\s+} 9 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg6e32.c b/auto-generated/gnu-overloaded-tests/vlseg6e32.c index 44197f2ef..5af0643e1 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg6e32.c +++ b/auto-generated/gnu-overloaded-tests/vlseg6e32.c @@ -6,28 +6,28 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x6_t test_vlseg6e32_v_f32mf2x6_m(vbool64_t mask, const float32_t *base, size_t vl) { - return __riscv_vlseg6e32(mask, base, vl); +vfloat32mf2x6_t test_vlseg6e32_v_f32mf2x6_m(vbool64_t vm, const float32_t *rs1, size_t vl) { + return __riscv_vlseg6e32(vm, rs1, vl); } -vfloat32m1x6_t test_vlseg6e32_v_f32m1x6_m(vbool32_t mask, const float32_t *base, size_t vl) { - return __riscv_vlseg6e32(mask, base, vl); +vfloat32m1x6_t test_vlseg6e32_v_f32m1x6_m(vbool32_t vm, const float32_t *rs1, size_t vl) { + return __riscv_vlseg6e32(vm, rs1, vl); } -vint32mf2x6_t test_vlseg6e32_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, size_t vl) { - return __riscv_vlseg6e32(mask, base, vl); +vint32mf2x6_t test_vlseg6e32_v_i32mf2x6_m(vbool64_t vm, const int32_t *rs1, size_t vl) { + return __riscv_vlseg6e32(vm, rs1, vl); } -vint32m1x6_t test_vlseg6e32_v_i32m1x6_m(vbool32_t mask, const int32_t *base, size_t vl) { - return __riscv_vlseg6e32(mask, base, vl); +vint32m1x6_t test_vlseg6e32_v_i32m1x6_m(vbool32_t vm, const int32_t *rs1, size_t vl) { + return __riscv_vlseg6e32(vm, rs1, vl); } -vuint32mf2x6_t test_vlseg6e32_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, size_t vl) { - return __riscv_vlseg6e32(mask, base, vl); +vuint32mf2x6_t test_vlseg6e32_v_u32mf2x6_m(vbool64_t vm, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg6e32(vm, rs1, vl); } -vuint32m1x6_t test_vlseg6e32_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, size_t vl) { - return __riscv_vlseg6e32(mask, base, vl); +vuint32m1x6_t test_vlseg6e32_v_u32m1x6_m(vbool32_t vm, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg6e32(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg6e32\.[ivxfswum.]+\s+} 6 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg6e32ff.c b/auto-generated/gnu-overloaded-tests/vlseg6e32ff.c index 107df7d13..3dcecd12e 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg6e32ff.c +++ b/auto-generated/gnu-overloaded-tests/vlseg6e32ff.c @@ -6,28 +6,28 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_m(vbool64_t mask, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff(mask, base, new_vl, vl); +vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_m(vbool64_t vm, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff(vm, rs1, new_vl, vl); } -vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_m(vbool32_t mask, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff(mask, base, new_vl, vl); +vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_m(vbool32_t vm, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff(vm, rs1, new_vl, vl); } -vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff(mask, base, new_vl, vl); +vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_m(vbool64_t vm, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff(vm, rs1, new_vl, vl); } -vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_m(vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff(mask, base, new_vl, vl); +vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_m(vbool32_t vm, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff(vm, rs1, new_vl, vl); } -vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff(mask, base, new_vl, vl); +vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_m(vbool64_t vm, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff(vm, rs1, new_vl, vl); } -vuint32m1x6_t test_vlseg6e32ff_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff(mask, base, new_vl, vl); +vuint32m1x6_t test_vlseg6e32ff_v_u32m1x6_m(vbool32_t vm, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg6e32ff\.[ivxfswum.]+\s+} 6 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg6e64.c b/auto-generated/gnu-overloaded-tests/vlseg6e64.c index eca22de37..1b9b23383 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg6e64.c +++ b/auto-generated/gnu-overloaded-tests/vlseg6e64.c @@ -6,16 +6,16 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x6_t test_vlseg6e64_v_f64m1x6_m(vbool64_t mask, const float64_t *base, size_t vl) { - return __riscv_vlseg6e64(mask, base, vl); +vfloat64m1x6_t test_vlseg6e64_v_f64m1x6_m(vbool64_t vm, const float64_t *rs1, size_t vl) { + return __riscv_vlseg6e64(vm, rs1, vl); } -vint64m1x6_t test_vlseg6e64_v_i64m1x6_m(vbool64_t mask, const int64_t *base, size_t vl) { - return __riscv_vlseg6e64(mask, base, vl); +vint64m1x6_t test_vlseg6e64_v_i64m1x6_m(vbool64_t vm, const int64_t *rs1, size_t vl) { + return __riscv_vlseg6e64(vm, rs1, vl); } -vuint64m1x6_t test_vlseg6e64_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, size_t vl) { - return __riscv_vlseg6e64(mask, base, vl); +vuint64m1x6_t test_vlseg6e64_v_u64m1x6_m(vbool64_t vm, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg6e64(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg6e64\.[ivxfswum.]+\s+} 3 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg6e64ff.c b/auto-generated/gnu-overloaded-tests/vlseg6e64ff.c index 3bbdcdfaa..593f10c4b 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg6e64ff.c +++ b/auto-generated/gnu-overloaded-tests/vlseg6e64ff.c @@ -6,16 +6,16 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_m(vbool64_t mask, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e64ff(mask, base, new_vl, vl); +vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_m(vbool64_t vm, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e64ff(vm, rs1, new_vl, vl); } -vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_m(vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e64ff(mask, base, new_vl, vl); +vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_m(vbool64_t vm, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e64ff(vm, rs1, new_vl, vl); } -vuint64m1x6_t test_vlseg6e64ff_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e64ff(mask, base, new_vl, vl); +vuint64m1x6_t test_vlseg6e64ff_v_u64m1x6_m(vbool64_t vm, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e64ff(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg6e64ff\.[ivxfswum.]+\s+} 3 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg6e8.c b/auto-generated/gnu-overloaded-tests/vlseg6e8.c index 9857441c4..019e31c4a 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg6e8.c +++ b/auto-generated/gnu-overloaded-tests/vlseg6e8.c @@ -6,36 +6,36 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x6_t test_vlseg6e8_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg6e8(mask, base, vl); +vint8mf8x6_t test_vlseg6e8_v_i8mf8x6_m(vbool64_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg6e8(vm, rs1, vl); } -vint8mf4x6_t test_vlseg6e8_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg6e8(mask, base, vl); +vint8mf4x6_t test_vlseg6e8_v_i8mf4x6_m(vbool32_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg6e8(vm, rs1, vl); } -vint8mf2x6_t test_vlseg6e8_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg6e8(mask, base, vl); +vint8mf2x6_t test_vlseg6e8_v_i8mf2x6_m(vbool16_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg6e8(vm, rs1, vl); } -vint8m1x6_t test_vlseg6e8_v_i8m1x6_m(vbool8_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg6e8(mask, base, vl); +vint8m1x6_t test_vlseg6e8_v_i8m1x6_m(vbool8_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg6e8(vm, rs1, vl); } -vuint8mf8x6_t test_vlseg6e8_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg6e8(mask, base, vl); +vuint8mf8x6_t test_vlseg6e8_v_u8mf8x6_m(vbool64_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg6e8(vm, rs1, vl); } -vuint8mf4x6_t test_vlseg6e8_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg6e8(mask, base, vl); +vuint8mf4x6_t test_vlseg6e8_v_u8mf4x6_m(vbool32_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg6e8(vm, rs1, vl); } -vuint8mf2x6_t test_vlseg6e8_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg6e8(mask, base, vl); +vuint8mf2x6_t test_vlseg6e8_v_u8mf2x6_m(vbool16_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg6e8(vm, rs1, vl); } -vuint8m1x6_t test_vlseg6e8_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg6e8(mask, base, vl); +vuint8m1x6_t test_vlseg6e8_v_u8m1x6_m(vbool8_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg6e8(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg6e8\.[ivxfswum.]+\s+} 8 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg6e8ff.c b/auto-generated/gnu-overloaded-tests/vlseg6e8ff.c index 309f27aec..254ac951d 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg6e8ff.c +++ b/auto-generated/gnu-overloaded-tests/vlseg6e8ff.c @@ -6,36 +6,36 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff(mask, base, new_vl, vl); +vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_m(vbool64_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff(vm, rs1, new_vl, vl); } -vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff(mask, base, new_vl, vl); +vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_m(vbool32_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff(vm, rs1, new_vl, vl); } -vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff(mask, base, new_vl, vl); +vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_m(vbool16_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff(vm, rs1, new_vl, vl); } -vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_m(vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff(mask, base, new_vl, vl); +vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_m(vbool8_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff(vm, rs1, new_vl, vl); } -vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff(mask, base, new_vl, vl); +vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_m(vbool64_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff(vm, rs1, new_vl, vl); } -vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff(mask, base, new_vl, vl); +vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_m(vbool32_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff(vm, rs1, new_vl, vl); } -vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff(mask, base, new_vl, vl); +vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_m(vbool16_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff(vm, rs1, new_vl, vl); } -vuint8m1x6_t test_vlseg6e8ff_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff(mask, base, new_vl, vl); +vuint8m1x6_t test_vlseg6e8ff_v_u8m1x6_m(vbool8_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg6e8ff\.[ivxfswum.]+\s+} 8 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg7e16.c b/auto-generated/gnu-overloaded-tests/vlseg7e16.c index 5d84b6bb2..075e4178b 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg7e16.c +++ b/auto-generated/gnu-overloaded-tests/vlseg7e16.c @@ -6,40 +6,40 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x7_t test_vlseg7e16_v_f16mf4x7_m(vbool64_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg7e16(mask, base, vl); +vfloat16mf4x7_t test_vlseg7e16_v_f16mf4x7_m(vbool64_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg7e16(vm, rs1, vl); } -vfloat16mf2x7_t test_vlseg7e16_v_f16mf2x7_m(vbool32_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg7e16(mask, base, vl); +vfloat16mf2x7_t test_vlseg7e16_v_f16mf2x7_m(vbool32_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg7e16(vm, rs1, vl); } -vfloat16m1x7_t test_vlseg7e16_v_f16m1x7_m(vbool16_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg7e16(mask, base, vl); +vfloat16m1x7_t test_vlseg7e16_v_f16m1x7_m(vbool16_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg7e16(vm, rs1, vl); } -vint16mf4x7_t test_vlseg7e16_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg7e16(mask, base, vl); +vint16mf4x7_t test_vlseg7e16_v_i16mf4x7_m(vbool64_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg7e16(vm, rs1, vl); } -vint16mf2x7_t test_vlseg7e16_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg7e16(mask, base, vl); +vint16mf2x7_t test_vlseg7e16_v_i16mf2x7_m(vbool32_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg7e16(vm, rs1, vl); } -vint16m1x7_t test_vlseg7e16_v_i16m1x7_m(vbool16_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg7e16(mask, base, vl); +vint16m1x7_t test_vlseg7e16_v_i16m1x7_m(vbool16_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg7e16(vm, rs1, vl); } -vuint16mf4x7_t test_vlseg7e16_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg7e16(mask, base, vl); +vuint16mf4x7_t test_vlseg7e16_v_u16mf4x7_m(vbool64_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg7e16(vm, rs1, vl); } -vuint16mf2x7_t test_vlseg7e16_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg7e16(mask, base, vl); +vuint16mf2x7_t test_vlseg7e16_v_u16mf2x7_m(vbool32_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg7e16(vm, rs1, vl); } -vuint16m1x7_t test_vlseg7e16_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg7e16(mask, base, vl); +vuint16m1x7_t test_vlseg7e16_v_u16m1x7_m(vbool16_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg7e16(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg7e16\.[ivxfswum.]+\s+} 9 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg7e16ff.c b/auto-generated/gnu-overloaded-tests/vlseg7e16ff.c index 8a6c53ae2..cd52b5d14 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg7e16ff.c +++ b/auto-generated/gnu-overloaded-tests/vlseg7e16ff.c @@ -6,40 +6,40 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_m(vbool64_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff(mask, base, new_vl, vl); +vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_m(vbool64_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff(vm, rs1, new_vl, vl); } -vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_m(vbool32_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff(mask, base, new_vl, vl); +vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_m(vbool32_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff(vm, rs1, new_vl, vl); } -vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_m(vbool16_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff(mask, base, new_vl, vl); +vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_m(vbool16_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff(vm, rs1, new_vl, vl); } -vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff(mask, base, new_vl, vl); +vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_m(vbool64_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff(vm, rs1, new_vl, vl); } -vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff(mask, base, new_vl, vl); +vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_m(vbool32_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff(vm, rs1, new_vl, vl); } -vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_m(vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff(mask, base, new_vl, vl); +vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_m(vbool16_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff(vm, rs1, new_vl, vl); } -vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff(mask, base, new_vl, vl); +vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_m(vbool64_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff(vm, rs1, new_vl, vl); } -vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff(mask, base, new_vl, vl); +vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_m(vbool32_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff(vm, rs1, new_vl, vl); } -vuint16m1x7_t test_vlseg7e16ff_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff(mask, base, new_vl, vl); +vuint16m1x7_t test_vlseg7e16ff_v_u16m1x7_m(vbool16_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg7e16ff\.[ivxfswum.]+\s+} 9 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg7e32.c b/auto-generated/gnu-overloaded-tests/vlseg7e32.c index e8c712f93..80dc87f71 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg7e32.c +++ b/auto-generated/gnu-overloaded-tests/vlseg7e32.c @@ -6,28 +6,28 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x7_t test_vlseg7e32_v_f32mf2x7_m(vbool64_t mask, const float32_t *base, size_t vl) { - return __riscv_vlseg7e32(mask, base, vl); +vfloat32mf2x7_t test_vlseg7e32_v_f32mf2x7_m(vbool64_t vm, const float32_t *rs1, size_t vl) { + return __riscv_vlseg7e32(vm, rs1, vl); } -vfloat32m1x7_t test_vlseg7e32_v_f32m1x7_m(vbool32_t mask, const float32_t *base, size_t vl) { - return __riscv_vlseg7e32(mask, base, vl); +vfloat32m1x7_t test_vlseg7e32_v_f32m1x7_m(vbool32_t vm, const float32_t *rs1, size_t vl) { + return __riscv_vlseg7e32(vm, rs1, vl); } -vint32mf2x7_t test_vlseg7e32_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, size_t vl) { - return __riscv_vlseg7e32(mask, base, vl); +vint32mf2x7_t test_vlseg7e32_v_i32mf2x7_m(vbool64_t vm, const int32_t *rs1, size_t vl) { + return __riscv_vlseg7e32(vm, rs1, vl); } -vint32m1x7_t test_vlseg7e32_v_i32m1x7_m(vbool32_t mask, const int32_t *base, size_t vl) { - return __riscv_vlseg7e32(mask, base, vl); +vint32m1x7_t test_vlseg7e32_v_i32m1x7_m(vbool32_t vm, const int32_t *rs1, size_t vl) { + return __riscv_vlseg7e32(vm, rs1, vl); } -vuint32mf2x7_t test_vlseg7e32_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, size_t vl) { - return __riscv_vlseg7e32(mask, base, vl); +vuint32mf2x7_t test_vlseg7e32_v_u32mf2x7_m(vbool64_t vm, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg7e32(vm, rs1, vl); } -vuint32m1x7_t test_vlseg7e32_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, size_t vl) { - return __riscv_vlseg7e32(mask, base, vl); +vuint32m1x7_t test_vlseg7e32_v_u32m1x7_m(vbool32_t vm, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg7e32(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg7e32\.[ivxfswum.]+\s+} 6 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg7e32ff.c b/auto-generated/gnu-overloaded-tests/vlseg7e32ff.c index 48e232105..3fe4455a4 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg7e32ff.c +++ b/auto-generated/gnu-overloaded-tests/vlseg7e32ff.c @@ -6,28 +6,28 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_m(vbool64_t mask, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff(mask, base, new_vl, vl); +vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_m(vbool64_t vm, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff(vm, rs1, new_vl, vl); } -vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_m(vbool32_t mask, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff(mask, base, new_vl, vl); +vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_m(vbool32_t vm, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff(vm, rs1, new_vl, vl); } -vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff(mask, base, new_vl, vl); +vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_m(vbool64_t vm, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff(vm, rs1, new_vl, vl); } -vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_m(vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff(mask, base, new_vl, vl); +vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_m(vbool32_t vm, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff(vm, rs1, new_vl, vl); } -vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff(mask, base, new_vl, vl); +vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_m(vbool64_t vm, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff(vm, rs1, new_vl, vl); } -vuint32m1x7_t test_vlseg7e32ff_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff(mask, base, new_vl, vl); +vuint32m1x7_t test_vlseg7e32ff_v_u32m1x7_m(vbool32_t vm, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg7e32ff\.[ivxfswum.]+\s+} 6 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg7e64.c b/auto-generated/gnu-overloaded-tests/vlseg7e64.c index f5e92ecb9..cb5987c75 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg7e64.c +++ b/auto-generated/gnu-overloaded-tests/vlseg7e64.c @@ -6,16 +6,16 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x7_t test_vlseg7e64_v_f64m1x7_m(vbool64_t mask, const float64_t *base, size_t vl) { - return __riscv_vlseg7e64(mask, base, vl); +vfloat64m1x7_t test_vlseg7e64_v_f64m1x7_m(vbool64_t vm, const float64_t *rs1, size_t vl) { + return __riscv_vlseg7e64(vm, rs1, vl); } -vint64m1x7_t test_vlseg7e64_v_i64m1x7_m(vbool64_t mask, const int64_t *base, size_t vl) { - return __riscv_vlseg7e64(mask, base, vl); +vint64m1x7_t test_vlseg7e64_v_i64m1x7_m(vbool64_t vm, const int64_t *rs1, size_t vl) { + return __riscv_vlseg7e64(vm, rs1, vl); } -vuint64m1x7_t test_vlseg7e64_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, size_t vl) { - return __riscv_vlseg7e64(mask, base, vl); +vuint64m1x7_t test_vlseg7e64_v_u64m1x7_m(vbool64_t vm, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg7e64(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg7e64\.[ivxfswum.]+\s+} 3 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg7e64ff.c b/auto-generated/gnu-overloaded-tests/vlseg7e64ff.c index ac6c41bb2..f4bae6cbd 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg7e64ff.c +++ b/auto-generated/gnu-overloaded-tests/vlseg7e64ff.c @@ -6,16 +6,16 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_m(vbool64_t mask, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e64ff(mask, base, new_vl, vl); +vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_m(vbool64_t vm, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e64ff(vm, rs1, new_vl, vl); } -vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_m(vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e64ff(mask, base, new_vl, vl); +vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_m(vbool64_t vm, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e64ff(vm, rs1, new_vl, vl); } -vuint64m1x7_t test_vlseg7e64ff_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e64ff(mask, base, new_vl, vl); +vuint64m1x7_t test_vlseg7e64ff_v_u64m1x7_m(vbool64_t vm, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e64ff(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg7e64ff\.[ivxfswum.]+\s+} 3 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg7e8.c b/auto-generated/gnu-overloaded-tests/vlseg7e8.c index 797140a4b..d3de564fc 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg7e8.c +++ b/auto-generated/gnu-overloaded-tests/vlseg7e8.c @@ -6,36 +6,36 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x7_t test_vlseg7e8_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg7e8(mask, base, vl); +vint8mf8x7_t test_vlseg7e8_v_i8mf8x7_m(vbool64_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg7e8(vm, rs1, vl); } -vint8mf4x7_t test_vlseg7e8_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg7e8(mask, base, vl); +vint8mf4x7_t test_vlseg7e8_v_i8mf4x7_m(vbool32_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg7e8(vm, rs1, vl); } -vint8mf2x7_t test_vlseg7e8_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg7e8(mask, base, vl); +vint8mf2x7_t test_vlseg7e8_v_i8mf2x7_m(vbool16_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg7e8(vm, rs1, vl); } -vint8m1x7_t test_vlseg7e8_v_i8m1x7_m(vbool8_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg7e8(mask, base, vl); +vint8m1x7_t test_vlseg7e8_v_i8m1x7_m(vbool8_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg7e8(vm, rs1, vl); } -vuint8mf8x7_t test_vlseg7e8_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg7e8(mask, base, vl); +vuint8mf8x7_t test_vlseg7e8_v_u8mf8x7_m(vbool64_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg7e8(vm, rs1, vl); } -vuint8mf4x7_t test_vlseg7e8_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg7e8(mask, base, vl); +vuint8mf4x7_t test_vlseg7e8_v_u8mf4x7_m(vbool32_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg7e8(vm, rs1, vl); } -vuint8mf2x7_t test_vlseg7e8_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg7e8(mask, base, vl); +vuint8mf2x7_t test_vlseg7e8_v_u8mf2x7_m(vbool16_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg7e8(vm, rs1, vl); } -vuint8m1x7_t test_vlseg7e8_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg7e8(mask, base, vl); +vuint8m1x7_t test_vlseg7e8_v_u8m1x7_m(vbool8_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg7e8(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg7e8\.[ivxfswum.]+\s+} 8 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg7e8ff.c b/auto-generated/gnu-overloaded-tests/vlseg7e8ff.c index d9a339e6d..6e5198e76 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg7e8ff.c +++ b/auto-generated/gnu-overloaded-tests/vlseg7e8ff.c @@ -6,36 +6,36 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff(mask, base, new_vl, vl); +vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_m(vbool64_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff(vm, rs1, new_vl, vl); } -vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff(mask, base, new_vl, vl); +vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_m(vbool32_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff(vm, rs1, new_vl, vl); } -vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff(mask, base, new_vl, vl); +vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_m(vbool16_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff(vm, rs1, new_vl, vl); } -vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_m(vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff(mask, base, new_vl, vl); +vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_m(vbool8_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff(vm, rs1, new_vl, vl); } -vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff(mask, base, new_vl, vl); +vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_m(vbool64_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff(vm, rs1, new_vl, vl); } -vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff(mask, base, new_vl, vl); +vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_m(vbool32_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff(vm, rs1, new_vl, vl); } -vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff(mask, base, new_vl, vl); +vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_m(vbool16_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff(vm, rs1, new_vl, vl); } -vuint8m1x7_t test_vlseg7e8ff_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff(mask, base, new_vl, vl); +vuint8m1x7_t test_vlseg7e8ff_v_u8m1x7_m(vbool8_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg7e8ff\.[ivxfswum.]+\s+} 8 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg8e16.c b/auto-generated/gnu-overloaded-tests/vlseg8e16.c index 95c35182b..a2b2d87c2 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg8e16.c +++ b/auto-generated/gnu-overloaded-tests/vlseg8e16.c @@ -6,40 +6,40 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x8_t test_vlseg8e16_v_f16mf4x8_m(vbool64_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg8e16(mask, base, vl); +vfloat16mf4x8_t test_vlseg8e16_v_f16mf4x8_m(vbool64_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg8e16(vm, rs1, vl); } -vfloat16mf2x8_t test_vlseg8e16_v_f16mf2x8_m(vbool32_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg8e16(mask, base, vl); +vfloat16mf2x8_t test_vlseg8e16_v_f16mf2x8_m(vbool32_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg8e16(vm, rs1, vl); } -vfloat16m1x8_t test_vlseg8e16_v_f16m1x8_m(vbool16_t mask, const float16_t *base, size_t vl) { - return __riscv_vlseg8e16(mask, base, vl); +vfloat16m1x8_t test_vlseg8e16_v_f16m1x8_m(vbool16_t vm, const float16_t *rs1, size_t vl) { + return __riscv_vlseg8e16(vm, rs1, vl); } -vint16mf4x8_t test_vlseg8e16_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg8e16(mask, base, vl); +vint16mf4x8_t test_vlseg8e16_v_i16mf4x8_m(vbool64_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg8e16(vm, rs1, vl); } -vint16mf2x8_t test_vlseg8e16_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg8e16(mask, base, vl); +vint16mf2x8_t test_vlseg8e16_v_i16mf2x8_m(vbool32_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg8e16(vm, rs1, vl); } -vint16m1x8_t test_vlseg8e16_v_i16m1x8_m(vbool16_t mask, const int16_t *base, size_t vl) { - return __riscv_vlseg8e16(mask, base, vl); +vint16m1x8_t test_vlseg8e16_v_i16m1x8_m(vbool16_t vm, const int16_t *rs1, size_t vl) { + return __riscv_vlseg8e16(vm, rs1, vl); } -vuint16mf4x8_t test_vlseg8e16_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg8e16(mask, base, vl); +vuint16mf4x8_t test_vlseg8e16_v_u16mf4x8_m(vbool64_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg8e16(vm, rs1, vl); } -vuint16mf2x8_t test_vlseg8e16_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg8e16(mask, base, vl); +vuint16mf2x8_t test_vlseg8e16_v_u16mf2x8_m(vbool32_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg8e16(vm, rs1, vl); } -vuint16m1x8_t test_vlseg8e16_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, size_t vl) { - return __riscv_vlseg8e16(mask, base, vl); +vuint16m1x8_t test_vlseg8e16_v_u16m1x8_m(vbool16_t vm, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg8e16(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg8e16\.[ivxfswum.]+\s+} 9 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg8e16ff.c b/auto-generated/gnu-overloaded-tests/vlseg8e16ff.c index be273f462..0cb7e9213 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg8e16ff.c +++ b/auto-generated/gnu-overloaded-tests/vlseg8e16ff.c @@ -6,40 +6,40 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_m(vbool64_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff(mask, base, new_vl, vl); +vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_m(vbool64_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff(vm, rs1, new_vl, vl); } -vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_m(vbool32_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff(mask, base, new_vl, vl); +vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_m(vbool32_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff(vm, rs1, new_vl, vl); } -vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_m(vbool16_t mask, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff(mask, base, new_vl, vl); +vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_m(vbool16_t vm, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff(vm, rs1, new_vl, vl); } -vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff(mask, base, new_vl, vl); +vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_m(vbool64_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff(vm, rs1, new_vl, vl); } -vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff(mask, base, new_vl, vl); +vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_m(vbool32_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff(vm, rs1, new_vl, vl); } -vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_m(vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff(mask, base, new_vl, vl); +vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_m(vbool16_t vm, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff(vm, rs1, new_vl, vl); } -vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff(mask, base, new_vl, vl); +vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_m(vbool64_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff(vm, rs1, new_vl, vl); } -vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff(mask, base, new_vl, vl); +vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_m(vbool32_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff(vm, rs1, new_vl, vl); } -vuint16m1x8_t test_vlseg8e16ff_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff(mask, base, new_vl, vl); +vuint16m1x8_t test_vlseg8e16ff_v_u16m1x8_m(vbool16_t vm, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg8e16ff\.[ivxfswum.]+\s+} 9 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg8e32.c b/auto-generated/gnu-overloaded-tests/vlseg8e32.c index 23cb31706..b4d2272cd 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg8e32.c +++ b/auto-generated/gnu-overloaded-tests/vlseg8e32.c @@ -6,28 +6,28 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x8_t test_vlseg8e32_v_f32mf2x8_m(vbool64_t mask, const float32_t *base, size_t vl) { - return __riscv_vlseg8e32(mask, base, vl); +vfloat32mf2x8_t test_vlseg8e32_v_f32mf2x8_m(vbool64_t vm, const float32_t *rs1, size_t vl) { + return __riscv_vlseg8e32(vm, rs1, vl); } -vfloat32m1x8_t test_vlseg8e32_v_f32m1x8_m(vbool32_t mask, const float32_t *base, size_t vl) { - return __riscv_vlseg8e32(mask, base, vl); +vfloat32m1x8_t test_vlseg8e32_v_f32m1x8_m(vbool32_t vm, const float32_t *rs1, size_t vl) { + return __riscv_vlseg8e32(vm, rs1, vl); } -vint32mf2x8_t test_vlseg8e32_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, size_t vl) { - return __riscv_vlseg8e32(mask, base, vl); +vint32mf2x8_t test_vlseg8e32_v_i32mf2x8_m(vbool64_t vm, const int32_t *rs1, size_t vl) { + return __riscv_vlseg8e32(vm, rs1, vl); } -vint32m1x8_t test_vlseg8e32_v_i32m1x8_m(vbool32_t mask, const int32_t *base, size_t vl) { - return __riscv_vlseg8e32(mask, base, vl); +vint32m1x8_t test_vlseg8e32_v_i32m1x8_m(vbool32_t vm, const int32_t *rs1, size_t vl) { + return __riscv_vlseg8e32(vm, rs1, vl); } -vuint32mf2x8_t test_vlseg8e32_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, size_t vl) { - return __riscv_vlseg8e32(mask, base, vl); +vuint32mf2x8_t test_vlseg8e32_v_u32mf2x8_m(vbool64_t vm, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg8e32(vm, rs1, vl); } -vuint32m1x8_t test_vlseg8e32_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, size_t vl) { - return __riscv_vlseg8e32(mask, base, vl); +vuint32m1x8_t test_vlseg8e32_v_u32m1x8_m(vbool32_t vm, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg8e32(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg8e32\.[ivxfswum.]+\s+} 6 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg8e32ff.c b/auto-generated/gnu-overloaded-tests/vlseg8e32ff.c index 2295e0204..2e4c27128 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg8e32ff.c +++ b/auto-generated/gnu-overloaded-tests/vlseg8e32ff.c @@ -6,28 +6,28 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_m(vbool64_t mask, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff(mask, base, new_vl, vl); +vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_m(vbool64_t vm, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff(vm, rs1, new_vl, vl); } -vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_m(vbool32_t mask, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff(mask, base, new_vl, vl); +vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_m(vbool32_t vm, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff(vm, rs1, new_vl, vl); } -vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff(mask, base, new_vl, vl); +vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_m(vbool64_t vm, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff(vm, rs1, new_vl, vl); } -vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_m(vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff(mask, base, new_vl, vl); +vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_m(vbool32_t vm, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff(vm, rs1, new_vl, vl); } -vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff(mask, base, new_vl, vl); +vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_m(vbool64_t vm, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff(vm, rs1, new_vl, vl); } -vuint32m1x8_t test_vlseg8e32ff_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff(mask, base, new_vl, vl); +vuint32m1x8_t test_vlseg8e32ff_v_u32m1x8_m(vbool32_t vm, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg8e32ff\.[ivxfswum.]+\s+} 6 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg8e64.c b/auto-generated/gnu-overloaded-tests/vlseg8e64.c index 46ea87ac5..e65e11f17 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg8e64.c +++ b/auto-generated/gnu-overloaded-tests/vlseg8e64.c @@ -6,16 +6,16 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x8_t test_vlseg8e64_v_f64m1x8_m(vbool64_t mask, const float64_t *base, size_t vl) { - return __riscv_vlseg8e64(mask, base, vl); +vfloat64m1x8_t test_vlseg8e64_v_f64m1x8_m(vbool64_t vm, const float64_t *rs1, size_t vl) { + return __riscv_vlseg8e64(vm, rs1, vl); } -vint64m1x8_t test_vlseg8e64_v_i64m1x8_m(vbool64_t mask, const int64_t *base, size_t vl) { - return __riscv_vlseg8e64(mask, base, vl); +vint64m1x8_t test_vlseg8e64_v_i64m1x8_m(vbool64_t vm, const int64_t *rs1, size_t vl) { + return __riscv_vlseg8e64(vm, rs1, vl); } -vuint64m1x8_t test_vlseg8e64_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, size_t vl) { - return __riscv_vlseg8e64(mask, base, vl); +vuint64m1x8_t test_vlseg8e64_v_u64m1x8_m(vbool64_t vm, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg8e64(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg8e64\.[ivxfswum.]+\s+} 3 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg8e64ff.c b/auto-generated/gnu-overloaded-tests/vlseg8e64ff.c index 78e1e7cf2..471bae225 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg8e64ff.c +++ b/auto-generated/gnu-overloaded-tests/vlseg8e64ff.c @@ -6,16 +6,16 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_m(vbool64_t mask, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e64ff(mask, base, new_vl, vl); +vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_m(vbool64_t vm, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e64ff(vm, rs1, new_vl, vl); } -vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_m(vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e64ff(mask, base, new_vl, vl); +vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_m(vbool64_t vm, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e64ff(vm, rs1, new_vl, vl); } -vuint64m1x8_t test_vlseg8e64ff_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e64ff(mask, base, new_vl, vl); +vuint64m1x8_t test_vlseg8e64ff_v_u64m1x8_m(vbool64_t vm, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e64ff(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg8e64ff\.[ivxfswum.]+\s+} 3 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg8e8.c b/auto-generated/gnu-overloaded-tests/vlseg8e8.c index 9634c33ac..950382b76 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg8e8.c +++ b/auto-generated/gnu-overloaded-tests/vlseg8e8.c @@ -6,36 +6,36 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x8_t test_vlseg8e8_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg8e8(mask, base, vl); +vint8mf8x8_t test_vlseg8e8_v_i8mf8x8_m(vbool64_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg8e8(vm, rs1, vl); } -vint8mf4x8_t test_vlseg8e8_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg8e8(mask, base, vl); +vint8mf4x8_t test_vlseg8e8_v_i8mf4x8_m(vbool32_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg8e8(vm, rs1, vl); } -vint8mf2x8_t test_vlseg8e8_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg8e8(mask, base, vl); +vint8mf2x8_t test_vlseg8e8_v_i8mf2x8_m(vbool16_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg8e8(vm, rs1, vl); } -vint8m1x8_t test_vlseg8e8_v_i8m1x8_m(vbool8_t mask, const int8_t *base, size_t vl) { - return __riscv_vlseg8e8(mask, base, vl); +vint8m1x8_t test_vlseg8e8_v_i8m1x8_m(vbool8_t vm, const int8_t *rs1, size_t vl) { + return __riscv_vlseg8e8(vm, rs1, vl); } -vuint8mf8x8_t test_vlseg8e8_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg8e8(mask, base, vl); +vuint8mf8x8_t test_vlseg8e8_v_u8mf8x8_m(vbool64_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg8e8(vm, rs1, vl); } -vuint8mf4x8_t test_vlseg8e8_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg8e8(mask, base, vl); +vuint8mf4x8_t test_vlseg8e8_v_u8mf4x8_m(vbool32_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg8e8(vm, rs1, vl); } -vuint8mf2x8_t test_vlseg8e8_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg8e8(mask, base, vl); +vuint8mf2x8_t test_vlseg8e8_v_u8mf2x8_m(vbool16_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg8e8(vm, rs1, vl); } -vuint8m1x8_t test_vlseg8e8_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, size_t vl) { - return __riscv_vlseg8e8(mask, base, vl); +vuint8m1x8_t test_vlseg8e8_v_u8m1x8_m(vbool8_t vm, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg8e8(vm, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg8e8\.[ivxfswum.]+\s+} 8 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlseg8e8ff.c b/auto-generated/gnu-overloaded-tests/vlseg8e8ff.c index 16625cbd8..9870fe3fe 100644 --- a/auto-generated/gnu-overloaded-tests/vlseg8e8ff.c +++ b/auto-generated/gnu-overloaded-tests/vlseg8e8ff.c @@ -6,36 +6,36 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff(mask, base, new_vl, vl); +vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_m(vbool64_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff(vm, rs1, new_vl, vl); } -vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff(mask, base, new_vl, vl); +vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_m(vbool32_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff(vm, rs1, new_vl, vl); } -vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff(mask, base, new_vl, vl); +vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_m(vbool16_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff(vm, rs1, new_vl, vl); } -vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_m(vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff(mask, base, new_vl, vl); +vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_m(vbool8_t vm, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff(vm, rs1, new_vl, vl); } -vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff(mask, base, new_vl, vl); +vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_m(vbool64_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff(vm, rs1, new_vl, vl); } -vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff(mask, base, new_vl, vl); +vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_m(vbool32_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff(vm, rs1, new_vl, vl); } -vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff(mask, base, new_vl, vl); +vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_m(vbool16_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff(vm, rs1, new_vl, vl); } -vuint8m1x8_t test_vlseg8e8ff_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff(mask, base, new_vl, vl); +vuint8m1x8_t test_vlseg8e8ff_v_u8m1x8_m(vbool8_t vm, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff(vm, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg8e8ff\.[ivxfswum.]+\s+} 8 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlsseg2e16.c b/auto-generated/gnu-overloaded-tests/vlsseg2e16.c index c276b5368..c171261e6 100644 --- a/auto-generated/gnu-overloaded-tests/vlsseg2e16.c +++ b/auto-generated/gnu-overloaded-tests/vlsseg2e16.c @@ -6,64 +6,64 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_m(vbool64_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16(mask, base, bstride, vl); +vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_m(vbool64_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16(vm, rs1, rs2, vl); } -vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_m(vbool32_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16(mask, base, bstride, vl); +vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_m(vbool32_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16(vm, rs1, rs2, vl); } -vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_m(vbool16_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16(mask, base, bstride, vl); +vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_m(vbool16_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16(vm, rs1, rs2, vl); } -vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_m(vbool8_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16(mask, base, bstride, vl); +vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_m(vbool8_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16(vm, rs1, rs2, vl); } -vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_m(vbool4_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16(mask, base, bstride, vl); +vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_m(vbool4_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16(vm, rs1, rs2, vl); } -vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16(mask, base, bstride, vl); +vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_m(vbool64_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16(vm, rs1, rs2, vl); } -vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16(mask, base, bstride, vl); +vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_m(vbool32_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16(vm, rs1, rs2, vl); } -vint16m1x2_t test_vlsseg2e16_v_i16m1x2_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16(mask, base, bstride, vl); +vint16m1x2_t test_vlsseg2e16_v_i16m1x2_m(vbool16_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16(vm, rs1, rs2, vl); } -vint16m2x2_t test_vlsseg2e16_v_i16m2x2_m(vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16(mask, base, bstride, vl); +vint16m2x2_t test_vlsseg2e16_v_i16m2x2_m(vbool8_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16(vm, rs1, rs2, vl); } -vint16m4x2_t test_vlsseg2e16_v_i16m4x2_m(vbool4_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16(mask, base, bstride, vl); +vint16m4x2_t test_vlsseg2e16_v_i16m4x2_m(vbool4_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16(vm, rs1, rs2, vl); } -vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16(mask, base, bstride, vl); +vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_m(vbool64_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16(vm, rs1, rs2, vl); } -vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16(mask, base, bstride, vl); +vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_m(vbool32_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16(vm, rs1, rs2, vl); } -vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16(mask, base, bstride, vl); +vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_m(vbool16_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16(vm, rs1, rs2, vl); } -vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16(mask, base, bstride, vl); +vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_m(vbool8_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16(vm, rs1, rs2, vl); } -vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16(mask, base, bstride, vl); +vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_m(vbool4_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg2e16\.[ivxfswum.]+\s+} 15 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlsseg2e32.c b/auto-generated/gnu-overloaded-tests/vlsseg2e32.c index 0a4338a09..ec07c723e 100644 --- a/auto-generated/gnu-overloaded-tests/vlsseg2e32.c +++ b/auto-generated/gnu-overloaded-tests/vlsseg2e32.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_m(vbool64_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32(mask, base, bstride, vl); +vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_m(vbool64_t vm, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32(vm, rs1, rs2, vl); } -vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_m(vbool32_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32(mask, base, bstride, vl); +vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_m(vbool32_t vm, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32(vm, rs1, rs2, vl); } -vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_m(vbool16_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32(mask, base, bstride, vl); +vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_m(vbool16_t vm, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32(vm, rs1, rs2, vl); } -vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_m(vbool8_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32(mask, base, bstride, vl); +vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_m(vbool8_t vm, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32(vm, rs1, rs2, vl); } -vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32(mask, base, bstride, vl); +vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_m(vbool64_t vm, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32(vm, rs1, rs2, vl); } -vint32m1x2_t test_vlsseg2e32_v_i32m1x2_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32(mask, base, bstride, vl); +vint32m1x2_t test_vlsseg2e32_v_i32m1x2_m(vbool32_t vm, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32(vm, rs1, rs2, vl); } -vint32m2x2_t test_vlsseg2e32_v_i32m2x2_m(vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32(mask, base, bstride, vl); +vint32m2x2_t test_vlsseg2e32_v_i32m2x2_m(vbool16_t vm, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32(vm, rs1, rs2, vl); } -vint32m4x2_t test_vlsseg2e32_v_i32m4x2_m(vbool8_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32(mask, base, bstride, vl); +vint32m4x2_t test_vlsseg2e32_v_i32m4x2_m(vbool8_t vm, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32(vm, rs1, rs2, vl); } -vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32(mask, base, bstride, vl); +vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_m(vbool64_t vm, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32(vm, rs1, rs2, vl); } -vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32(mask, base, bstride, vl); +vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_m(vbool32_t vm, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32(vm, rs1, rs2, vl); } -vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32(mask, base, bstride, vl); +vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_m(vbool16_t vm, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32(vm, rs1, rs2, vl); } -vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32(mask, base, bstride, vl); +vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_m(vbool8_t vm, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg2e32\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlsseg2e64.c b/auto-generated/gnu-overloaded-tests/vlsseg2e64.c index 05d53f830..75c4da058 100644 --- a/auto-generated/gnu-overloaded-tests/vlsseg2e64.c +++ b/auto-generated/gnu-overloaded-tests/vlsseg2e64.c @@ -6,40 +6,40 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_m(vbool64_t mask, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64(mask, base, bstride, vl); +vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_m(vbool64_t vm, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64(vm, rs1, rs2, vl); } -vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_m(vbool32_t mask, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64(mask, base, bstride, vl); +vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_m(vbool32_t vm, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64(vm, rs1, rs2, vl); } -vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_m(vbool16_t mask, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64(mask, base, bstride, vl); +vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_m(vbool16_t vm, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64(vm, rs1, rs2, vl); } -vint64m1x2_t test_vlsseg2e64_v_i64m1x2_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64(mask, base, bstride, vl); +vint64m1x2_t test_vlsseg2e64_v_i64m1x2_m(vbool64_t vm, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64(vm, rs1, rs2, vl); } -vint64m2x2_t test_vlsseg2e64_v_i64m2x2_m(vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64(mask, base, bstride, vl); +vint64m2x2_t test_vlsseg2e64_v_i64m2x2_m(vbool32_t vm, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64(vm, rs1, rs2, vl); } -vint64m4x2_t test_vlsseg2e64_v_i64m4x2_m(vbool16_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64(mask, base, bstride, vl); +vint64m4x2_t test_vlsseg2e64_v_i64m4x2_m(vbool16_t vm, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64(vm, rs1, rs2, vl); } -vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64(mask, base, bstride, vl); +vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_m(vbool64_t vm, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64(vm, rs1, rs2, vl); } -vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64(mask, base, bstride, vl); +vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_m(vbool32_t vm, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64(vm, rs1, rs2, vl); } -vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64(mask, base, bstride, vl); +vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_m(vbool16_t vm, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg2e64\.[ivxfswum.]+\s+} 9 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlsseg2e8.c b/auto-generated/gnu-overloaded-tests/vlsseg2e8.c index ea5e3cfe2..6d0ef44ec 100644 --- a/auto-generated/gnu-overloaded-tests/vlsseg2e8.c +++ b/auto-generated/gnu-overloaded-tests/vlsseg2e8.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8(mask, base, bstride, vl); +vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_m(vbool64_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8(vm, rs1, rs2, vl); } -vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8(mask, base, bstride, vl); +vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_m(vbool32_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8(vm, rs1, rs2, vl); } -vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8(mask, base, bstride, vl); +vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_m(vbool16_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8(vm, rs1, rs2, vl); } -vint8m1x2_t test_vlsseg2e8_v_i8m1x2_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8(mask, base, bstride, vl); +vint8m1x2_t test_vlsseg2e8_v_i8m1x2_m(vbool8_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8(vm, rs1, rs2, vl); } -vint8m2x2_t test_vlsseg2e8_v_i8m2x2_m(vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8(mask, base, bstride, vl); +vint8m2x2_t test_vlsseg2e8_v_i8m2x2_m(vbool4_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8(vm, rs1, rs2, vl); } -vint8m4x2_t test_vlsseg2e8_v_i8m4x2_m(vbool2_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8(mask, base, bstride, vl); +vint8m4x2_t test_vlsseg2e8_v_i8m4x2_m(vbool2_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8(vm, rs1, rs2, vl); } -vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8(mask, base, bstride, vl); +vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_m(vbool64_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8(vm, rs1, rs2, vl); } -vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8(mask, base, bstride, vl); +vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_m(vbool32_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8(vm, rs1, rs2, vl); } -vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8(mask, base, bstride, vl); +vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_m(vbool16_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8(vm, rs1, rs2, vl); } -vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8(mask, base, bstride, vl); +vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_m(vbool8_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8(vm, rs1, rs2, vl); } -vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8(mask, base, bstride, vl); +vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_m(vbool4_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8(vm, rs1, rs2, vl); } -vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8(mask, base, bstride, vl); +vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_m(vbool2_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg2e8\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlsseg3e16.c b/auto-generated/gnu-overloaded-tests/vlsseg3e16.c index 243584386..aadab77d7 100644 --- a/auto-generated/gnu-overloaded-tests/vlsseg3e16.c +++ b/auto-generated/gnu-overloaded-tests/vlsseg3e16.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_m(vbool64_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16(mask, base, bstride, vl); +vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_m(vbool64_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16(vm, rs1, rs2, vl); } -vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_m(vbool32_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16(mask, base, bstride, vl); +vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_m(vbool32_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16(vm, rs1, rs2, vl); } -vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_m(vbool16_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16(mask, base, bstride, vl); +vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_m(vbool16_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16(vm, rs1, rs2, vl); } -vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_m(vbool8_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16(mask, base, bstride, vl); +vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_m(vbool8_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16(vm, rs1, rs2, vl); } -vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16(mask, base, bstride, vl); +vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_m(vbool64_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16(vm, rs1, rs2, vl); } -vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16(mask, base, bstride, vl); +vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_m(vbool32_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16(vm, rs1, rs2, vl); } -vint16m1x3_t test_vlsseg3e16_v_i16m1x3_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16(mask, base, bstride, vl); +vint16m1x3_t test_vlsseg3e16_v_i16m1x3_m(vbool16_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16(vm, rs1, rs2, vl); } -vint16m2x3_t test_vlsseg3e16_v_i16m2x3_m(vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16(mask, base, bstride, vl); +vint16m2x3_t test_vlsseg3e16_v_i16m2x3_m(vbool8_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16(vm, rs1, rs2, vl); } -vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16(mask, base, bstride, vl); +vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_m(vbool64_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16(vm, rs1, rs2, vl); } -vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16(mask, base, bstride, vl); +vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_m(vbool32_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16(vm, rs1, rs2, vl); } -vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16(mask, base, bstride, vl); +vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_m(vbool16_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16(vm, rs1, rs2, vl); } -vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16(mask, base, bstride, vl); +vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_m(vbool8_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg3e16\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlsseg3e32.c b/auto-generated/gnu-overloaded-tests/vlsseg3e32.c index aaab35ef4..fac1156c3 100644 --- a/auto-generated/gnu-overloaded-tests/vlsseg3e32.c +++ b/auto-generated/gnu-overloaded-tests/vlsseg3e32.c @@ -6,40 +6,40 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_m(vbool64_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32(mask, base, bstride, vl); +vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_m(vbool64_t vm, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32(vm, rs1, rs2, vl); } -vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_m(vbool32_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32(mask, base, bstride, vl); +vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_m(vbool32_t vm, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32(vm, rs1, rs2, vl); } -vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_m(vbool16_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32(mask, base, bstride, vl); +vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_m(vbool16_t vm, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32(vm, rs1, rs2, vl); } -vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32(mask, base, bstride, vl); +vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_m(vbool64_t vm, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32(vm, rs1, rs2, vl); } -vint32m1x3_t test_vlsseg3e32_v_i32m1x3_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32(mask, base, bstride, vl); +vint32m1x3_t test_vlsseg3e32_v_i32m1x3_m(vbool32_t vm, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32(vm, rs1, rs2, vl); } -vint32m2x3_t test_vlsseg3e32_v_i32m2x3_m(vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32(mask, base, bstride, vl); +vint32m2x3_t test_vlsseg3e32_v_i32m2x3_m(vbool16_t vm, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32(vm, rs1, rs2, vl); } -vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32(mask, base, bstride, vl); +vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_m(vbool64_t vm, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32(vm, rs1, rs2, vl); } -vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32(mask, base, bstride, vl); +vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_m(vbool32_t vm, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32(vm, rs1, rs2, vl); } -vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32(mask, base, bstride, vl); +vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_m(vbool16_t vm, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg3e32\.[ivxfswum.]+\s+} 9 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlsseg3e64.c b/auto-generated/gnu-overloaded-tests/vlsseg3e64.c index 0bbfd0575..aafabcd2d 100644 --- a/auto-generated/gnu-overloaded-tests/vlsseg3e64.c +++ b/auto-generated/gnu-overloaded-tests/vlsseg3e64.c @@ -6,28 +6,28 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_m(vbool64_t mask, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64(mask, base, bstride, vl); +vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_m(vbool64_t vm, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64(vm, rs1, rs2, vl); } -vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_m(vbool32_t mask, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64(mask, base, bstride, vl); +vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_m(vbool32_t vm, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64(vm, rs1, rs2, vl); } -vint64m1x3_t test_vlsseg3e64_v_i64m1x3_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64(mask, base, bstride, vl); +vint64m1x3_t test_vlsseg3e64_v_i64m1x3_m(vbool64_t vm, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64(vm, rs1, rs2, vl); } -vint64m2x3_t test_vlsseg3e64_v_i64m2x3_m(vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64(mask, base, bstride, vl); +vint64m2x3_t test_vlsseg3e64_v_i64m2x3_m(vbool32_t vm, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64(vm, rs1, rs2, vl); } -vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64(mask, base, bstride, vl); +vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_m(vbool64_t vm, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64(vm, rs1, rs2, vl); } -vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64(mask, base, bstride, vl); +vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_m(vbool32_t vm, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg3e64\.[ivxfswum.]+\s+} 6 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlsseg3e8.c b/auto-generated/gnu-overloaded-tests/vlsseg3e8.c index 7fe0d8185..9e8e82690 100644 --- a/auto-generated/gnu-overloaded-tests/vlsseg3e8.c +++ b/auto-generated/gnu-overloaded-tests/vlsseg3e8.c @@ -6,44 +6,44 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8(mask, base, bstride, vl); +vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_m(vbool64_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8(vm, rs1, rs2, vl); } -vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8(mask, base, bstride, vl); +vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_m(vbool32_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8(vm, rs1, rs2, vl); } -vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8(mask, base, bstride, vl); +vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_m(vbool16_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8(vm, rs1, rs2, vl); } -vint8m1x3_t test_vlsseg3e8_v_i8m1x3_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8(mask, base, bstride, vl); +vint8m1x3_t test_vlsseg3e8_v_i8m1x3_m(vbool8_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8(vm, rs1, rs2, vl); } -vint8m2x3_t test_vlsseg3e8_v_i8m2x3_m(vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8(mask, base, bstride, vl); +vint8m2x3_t test_vlsseg3e8_v_i8m2x3_m(vbool4_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8(vm, rs1, rs2, vl); } -vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8(mask, base, bstride, vl); +vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_m(vbool64_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8(vm, rs1, rs2, vl); } -vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8(mask, base, bstride, vl); +vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_m(vbool32_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8(vm, rs1, rs2, vl); } -vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8(mask, base, bstride, vl); +vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_m(vbool16_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8(vm, rs1, rs2, vl); } -vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8(mask, base, bstride, vl); +vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_m(vbool8_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8(vm, rs1, rs2, vl); } -vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8(mask, base, bstride, vl); +vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_m(vbool4_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg3e8\.[ivxfswum.]+\s+} 10 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlsseg4e16.c b/auto-generated/gnu-overloaded-tests/vlsseg4e16.c index 1b38cda84..3ca222954 100644 --- a/auto-generated/gnu-overloaded-tests/vlsseg4e16.c +++ b/auto-generated/gnu-overloaded-tests/vlsseg4e16.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_m(vbool64_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16(mask, base, bstride, vl); +vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_m(vbool64_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16(vm, rs1, rs2, vl); } -vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_m(vbool32_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16(mask, base, bstride, vl); +vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_m(vbool32_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16(vm, rs1, rs2, vl); } -vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_m(vbool16_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16(mask, base, bstride, vl); +vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_m(vbool16_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16(vm, rs1, rs2, vl); } -vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_m(vbool8_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16(mask, base, bstride, vl); +vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_m(vbool8_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16(vm, rs1, rs2, vl); } -vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16(mask, base, bstride, vl); +vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_m(vbool64_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16(vm, rs1, rs2, vl); } -vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16(mask, base, bstride, vl); +vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_m(vbool32_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16(vm, rs1, rs2, vl); } -vint16m1x4_t test_vlsseg4e16_v_i16m1x4_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16(mask, base, bstride, vl); +vint16m1x4_t test_vlsseg4e16_v_i16m1x4_m(vbool16_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16(vm, rs1, rs2, vl); } -vint16m2x4_t test_vlsseg4e16_v_i16m2x4_m(vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16(mask, base, bstride, vl); +vint16m2x4_t test_vlsseg4e16_v_i16m2x4_m(vbool8_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16(vm, rs1, rs2, vl); } -vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16(mask, base, bstride, vl); +vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_m(vbool64_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16(vm, rs1, rs2, vl); } -vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16(mask, base, bstride, vl); +vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_m(vbool32_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16(vm, rs1, rs2, vl); } -vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16(mask, base, bstride, vl); +vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_m(vbool16_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16(vm, rs1, rs2, vl); } -vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16(mask, base, bstride, vl); +vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_m(vbool8_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg4e16\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlsseg4e32.c b/auto-generated/gnu-overloaded-tests/vlsseg4e32.c index 530552cc8..4caa38cfc 100644 --- a/auto-generated/gnu-overloaded-tests/vlsseg4e32.c +++ b/auto-generated/gnu-overloaded-tests/vlsseg4e32.c @@ -6,40 +6,40 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_m(vbool64_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32(mask, base, bstride, vl); +vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_m(vbool64_t vm, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32(vm, rs1, rs2, vl); } -vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_m(vbool32_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32(mask, base, bstride, vl); +vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_m(vbool32_t vm, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32(vm, rs1, rs2, vl); } -vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_m(vbool16_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32(mask, base, bstride, vl); +vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_m(vbool16_t vm, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32(vm, rs1, rs2, vl); } -vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32(mask, base, bstride, vl); +vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_m(vbool64_t vm, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32(vm, rs1, rs2, vl); } -vint32m1x4_t test_vlsseg4e32_v_i32m1x4_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32(mask, base, bstride, vl); +vint32m1x4_t test_vlsseg4e32_v_i32m1x4_m(vbool32_t vm, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32(vm, rs1, rs2, vl); } -vint32m2x4_t test_vlsseg4e32_v_i32m2x4_m(vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32(mask, base, bstride, vl); +vint32m2x4_t test_vlsseg4e32_v_i32m2x4_m(vbool16_t vm, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32(vm, rs1, rs2, vl); } -vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32(mask, base, bstride, vl); +vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_m(vbool64_t vm, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32(vm, rs1, rs2, vl); } -vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32(mask, base, bstride, vl); +vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_m(vbool32_t vm, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32(vm, rs1, rs2, vl); } -vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32(mask, base, bstride, vl); +vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_m(vbool16_t vm, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg4e32\.[ivxfswum.]+\s+} 9 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlsseg4e64.c b/auto-generated/gnu-overloaded-tests/vlsseg4e64.c index a4037cfe7..4dc14a7eb 100644 --- a/auto-generated/gnu-overloaded-tests/vlsseg4e64.c +++ b/auto-generated/gnu-overloaded-tests/vlsseg4e64.c @@ -6,28 +6,28 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_m(vbool64_t mask, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64(mask, base, bstride, vl); +vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_m(vbool64_t vm, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64(vm, rs1, rs2, vl); } -vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_m(vbool32_t mask, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64(mask, base, bstride, vl); +vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_m(vbool32_t vm, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64(vm, rs1, rs2, vl); } -vint64m1x4_t test_vlsseg4e64_v_i64m1x4_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64(mask, base, bstride, vl); +vint64m1x4_t test_vlsseg4e64_v_i64m1x4_m(vbool64_t vm, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64(vm, rs1, rs2, vl); } -vint64m2x4_t test_vlsseg4e64_v_i64m2x4_m(vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64(mask, base, bstride, vl); +vint64m2x4_t test_vlsseg4e64_v_i64m2x4_m(vbool32_t vm, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64(vm, rs1, rs2, vl); } -vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64(mask, base, bstride, vl); +vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_m(vbool64_t vm, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64(vm, rs1, rs2, vl); } -vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64(mask, base, bstride, vl); +vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_m(vbool32_t vm, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg4e64\.[ivxfswum.]+\s+} 6 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlsseg4e8.c b/auto-generated/gnu-overloaded-tests/vlsseg4e8.c index ae20e8511..7ef237d90 100644 --- a/auto-generated/gnu-overloaded-tests/vlsseg4e8.c +++ b/auto-generated/gnu-overloaded-tests/vlsseg4e8.c @@ -6,44 +6,44 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8(mask, base, bstride, vl); +vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_m(vbool64_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8(vm, rs1, rs2, vl); } -vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8(mask, base, bstride, vl); +vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_m(vbool32_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8(vm, rs1, rs2, vl); } -vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8(mask, base, bstride, vl); +vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_m(vbool16_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8(vm, rs1, rs2, vl); } -vint8m1x4_t test_vlsseg4e8_v_i8m1x4_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8(mask, base, bstride, vl); +vint8m1x4_t test_vlsseg4e8_v_i8m1x4_m(vbool8_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8(vm, rs1, rs2, vl); } -vint8m2x4_t test_vlsseg4e8_v_i8m2x4_m(vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8(mask, base, bstride, vl); +vint8m2x4_t test_vlsseg4e8_v_i8m2x4_m(vbool4_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8(vm, rs1, rs2, vl); } -vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8(mask, base, bstride, vl); +vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_m(vbool64_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8(vm, rs1, rs2, vl); } -vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8(mask, base, bstride, vl); +vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_m(vbool32_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8(vm, rs1, rs2, vl); } -vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8(mask, base, bstride, vl); +vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_m(vbool16_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8(vm, rs1, rs2, vl); } -vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8(mask, base, bstride, vl); +vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_m(vbool8_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8(vm, rs1, rs2, vl); } -vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8(mask, base, bstride, vl); +vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_m(vbool4_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg4e8\.[ivxfswum.]+\s+} 10 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlsseg5e16.c b/auto-generated/gnu-overloaded-tests/vlsseg5e16.c index d1fff3c1a..197f14bee 100644 --- a/auto-generated/gnu-overloaded-tests/vlsseg5e16.c +++ b/auto-generated/gnu-overloaded-tests/vlsseg5e16.c @@ -6,40 +6,40 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_m(vbool64_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16(mask, base, bstride, vl); +vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_m(vbool64_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16(vm, rs1, rs2, vl); } -vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_m(vbool32_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16(mask, base, bstride, vl); +vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_m(vbool32_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16(vm, rs1, rs2, vl); } -vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_m(vbool16_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16(mask, base, bstride, vl); +vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_m(vbool16_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16(vm, rs1, rs2, vl); } -vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16(mask, base, bstride, vl); +vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_m(vbool64_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16(vm, rs1, rs2, vl); } -vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16(mask, base, bstride, vl); +vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_m(vbool32_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16(vm, rs1, rs2, vl); } -vint16m1x5_t test_vlsseg5e16_v_i16m1x5_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16(mask, base, bstride, vl); +vint16m1x5_t test_vlsseg5e16_v_i16m1x5_m(vbool16_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16(vm, rs1, rs2, vl); } -vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16(mask, base, bstride, vl); +vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_m(vbool64_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16(vm, rs1, rs2, vl); } -vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16(mask, base, bstride, vl); +vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_m(vbool32_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16(vm, rs1, rs2, vl); } -vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16(mask, base, bstride, vl); +vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_m(vbool16_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg5e16\.[ivxfswum.]+\s+} 9 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlsseg5e32.c b/auto-generated/gnu-overloaded-tests/vlsseg5e32.c index 724faccf6..47601d836 100644 --- a/auto-generated/gnu-overloaded-tests/vlsseg5e32.c +++ b/auto-generated/gnu-overloaded-tests/vlsseg5e32.c @@ -6,28 +6,28 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_m(vbool64_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32(mask, base, bstride, vl); +vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_m(vbool64_t vm, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32(vm, rs1, rs2, vl); } -vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_m(vbool32_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32(mask, base, bstride, vl); +vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_m(vbool32_t vm, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32(vm, rs1, rs2, vl); } -vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32(mask, base, bstride, vl); +vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_m(vbool64_t vm, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32(vm, rs1, rs2, vl); } -vint32m1x5_t test_vlsseg5e32_v_i32m1x5_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32(mask, base, bstride, vl); +vint32m1x5_t test_vlsseg5e32_v_i32m1x5_m(vbool32_t vm, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32(vm, rs1, rs2, vl); } -vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32(mask, base, bstride, vl); +vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_m(vbool64_t vm, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32(vm, rs1, rs2, vl); } -vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32(mask, base, bstride, vl); +vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_m(vbool32_t vm, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg5e32\.[ivxfswum.]+\s+} 6 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlsseg5e64.c b/auto-generated/gnu-overloaded-tests/vlsseg5e64.c index 8f90d1952..6843bb1c2 100644 --- a/auto-generated/gnu-overloaded-tests/vlsseg5e64.c +++ b/auto-generated/gnu-overloaded-tests/vlsseg5e64.c @@ -6,16 +6,16 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_m(vbool64_t mask, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64(mask, base, bstride, vl); +vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_m(vbool64_t vm, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e64(vm, rs1, rs2, vl); } -vint64m1x5_t test_vlsseg5e64_v_i64m1x5_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64(mask, base, bstride, vl); +vint64m1x5_t test_vlsseg5e64_v_i64m1x5_m(vbool64_t vm, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e64(vm, rs1, rs2, vl); } -vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64(mask, base, bstride, vl); +vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_m(vbool64_t vm, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e64(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg5e64\.[ivxfswum.]+\s+} 3 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlsseg5e8.c b/auto-generated/gnu-overloaded-tests/vlsseg5e8.c index 300b66676..14e971af6 100644 --- a/auto-generated/gnu-overloaded-tests/vlsseg5e8.c +++ b/auto-generated/gnu-overloaded-tests/vlsseg5e8.c @@ -6,36 +6,36 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8(mask, base, bstride, vl); +vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_m(vbool64_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8(vm, rs1, rs2, vl); } -vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8(mask, base, bstride, vl); +vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_m(vbool32_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8(vm, rs1, rs2, vl); } -vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8(mask, base, bstride, vl); +vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_m(vbool16_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8(vm, rs1, rs2, vl); } -vint8m1x5_t test_vlsseg5e8_v_i8m1x5_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8(mask, base, bstride, vl); +vint8m1x5_t test_vlsseg5e8_v_i8m1x5_m(vbool8_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8(vm, rs1, rs2, vl); } -vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8(mask, base, bstride, vl); +vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_m(vbool64_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8(vm, rs1, rs2, vl); } -vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8(mask, base, bstride, vl); +vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_m(vbool32_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8(vm, rs1, rs2, vl); } -vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8(mask, base, bstride, vl); +vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_m(vbool16_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8(vm, rs1, rs2, vl); } -vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8(mask, base, bstride, vl); +vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_m(vbool8_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg5e8\.[ivxfswum.]+\s+} 8 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlsseg6e16.c b/auto-generated/gnu-overloaded-tests/vlsseg6e16.c index 5732931d7..3cd268169 100644 --- a/auto-generated/gnu-overloaded-tests/vlsseg6e16.c +++ b/auto-generated/gnu-overloaded-tests/vlsseg6e16.c @@ -6,40 +6,40 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_m(vbool64_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16(mask, base, bstride, vl); +vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_m(vbool64_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16(vm, rs1, rs2, vl); } -vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_m(vbool32_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16(mask, base, bstride, vl); +vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_m(vbool32_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16(vm, rs1, rs2, vl); } -vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_m(vbool16_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16(mask, base, bstride, vl); +vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_m(vbool16_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16(vm, rs1, rs2, vl); } -vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16(mask, base, bstride, vl); +vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_m(vbool64_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16(vm, rs1, rs2, vl); } -vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16(mask, base, bstride, vl); +vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_m(vbool32_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16(vm, rs1, rs2, vl); } -vint16m1x6_t test_vlsseg6e16_v_i16m1x6_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16(mask, base, bstride, vl); +vint16m1x6_t test_vlsseg6e16_v_i16m1x6_m(vbool16_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16(vm, rs1, rs2, vl); } -vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16(mask, base, bstride, vl); +vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_m(vbool64_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16(vm, rs1, rs2, vl); } -vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16(mask, base, bstride, vl); +vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_m(vbool32_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16(vm, rs1, rs2, vl); } -vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16(mask, base, bstride, vl); +vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_m(vbool16_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg6e16\.[ivxfswum.]+\s+} 9 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlsseg6e32.c b/auto-generated/gnu-overloaded-tests/vlsseg6e32.c index ea8d6ce9a..fca0d0405 100644 --- a/auto-generated/gnu-overloaded-tests/vlsseg6e32.c +++ b/auto-generated/gnu-overloaded-tests/vlsseg6e32.c @@ -6,28 +6,28 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_m(vbool64_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32(mask, base, bstride, vl); +vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_m(vbool64_t vm, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32(vm, rs1, rs2, vl); } -vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_m(vbool32_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32(mask, base, bstride, vl); +vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_m(vbool32_t vm, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32(vm, rs1, rs2, vl); } -vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32(mask, base, bstride, vl); +vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_m(vbool64_t vm, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32(vm, rs1, rs2, vl); } -vint32m1x6_t test_vlsseg6e32_v_i32m1x6_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32(mask, base, bstride, vl); +vint32m1x6_t test_vlsseg6e32_v_i32m1x6_m(vbool32_t vm, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32(vm, rs1, rs2, vl); } -vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32(mask, base, bstride, vl); +vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_m(vbool64_t vm, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32(vm, rs1, rs2, vl); } -vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32(mask, base, bstride, vl); +vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_m(vbool32_t vm, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg6e32\.[ivxfswum.]+\s+} 6 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlsseg6e64.c b/auto-generated/gnu-overloaded-tests/vlsseg6e64.c index 39b6d6cba..65919257b 100644 --- a/auto-generated/gnu-overloaded-tests/vlsseg6e64.c +++ b/auto-generated/gnu-overloaded-tests/vlsseg6e64.c @@ -6,16 +6,16 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_m(vbool64_t mask, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64(mask, base, bstride, vl); +vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_m(vbool64_t vm, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e64(vm, rs1, rs2, vl); } -vint64m1x6_t test_vlsseg6e64_v_i64m1x6_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64(mask, base, bstride, vl); +vint64m1x6_t test_vlsseg6e64_v_i64m1x6_m(vbool64_t vm, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e64(vm, rs1, rs2, vl); } -vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64(mask, base, bstride, vl); +vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_m(vbool64_t vm, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e64(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg6e64\.[ivxfswum.]+\s+} 3 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlsseg6e8.c b/auto-generated/gnu-overloaded-tests/vlsseg6e8.c index c32fa075c..0794c17dc 100644 --- a/auto-generated/gnu-overloaded-tests/vlsseg6e8.c +++ b/auto-generated/gnu-overloaded-tests/vlsseg6e8.c @@ -6,36 +6,36 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8(mask, base, bstride, vl); +vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_m(vbool64_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8(vm, rs1, rs2, vl); } -vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8(mask, base, bstride, vl); +vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_m(vbool32_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8(vm, rs1, rs2, vl); } -vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8(mask, base, bstride, vl); +vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_m(vbool16_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8(vm, rs1, rs2, vl); } -vint8m1x6_t test_vlsseg6e8_v_i8m1x6_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8(mask, base, bstride, vl); +vint8m1x6_t test_vlsseg6e8_v_i8m1x6_m(vbool8_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8(vm, rs1, rs2, vl); } -vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8(mask, base, bstride, vl); +vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_m(vbool64_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8(vm, rs1, rs2, vl); } -vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8(mask, base, bstride, vl); +vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_m(vbool32_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8(vm, rs1, rs2, vl); } -vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8(mask, base, bstride, vl); +vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_m(vbool16_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8(vm, rs1, rs2, vl); } -vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8(mask, base, bstride, vl); +vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_m(vbool8_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg6e8\.[ivxfswum.]+\s+} 8 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlsseg7e16.c b/auto-generated/gnu-overloaded-tests/vlsseg7e16.c index 2adc49cb7..2c8dcfd2b 100644 --- a/auto-generated/gnu-overloaded-tests/vlsseg7e16.c +++ b/auto-generated/gnu-overloaded-tests/vlsseg7e16.c @@ -6,40 +6,40 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_m(vbool64_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16(mask, base, bstride, vl); +vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_m(vbool64_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16(vm, rs1, rs2, vl); } -vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_m(vbool32_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16(mask, base, bstride, vl); +vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_m(vbool32_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16(vm, rs1, rs2, vl); } -vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_m(vbool16_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16(mask, base, bstride, vl); +vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_m(vbool16_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16(vm, rs1, rs2, vl); } -vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16(mask, base, bstride, vl); +vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_m(vbool64_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16(vm, rs1, rs2, vl); } -vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16(mask, base, bstride, vl); +vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_m(vbool32_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16(vm, rs1, rs2, vl); } -vint16m1x7_t test_vlsseg7e16_v_i16m1x7_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16(mask, base, bstride, vl); +vint16m1x7_t test_vlsseg7e16_v_i16m1x7_m(vbool16_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16(vm, rs1, rs2, vl); } -vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16(mask, base, bstride, vl); +vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_m(vbool64_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16(vm, rs1, rs2, vl); } -vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16(mask, base, bstride, vl); +vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_m(vbool32_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16(vm, rs1, rs2, vl); } -vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16(mask, base, bstride, vl); +vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_m(vbool16_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg7e16\.[ivxfswum.]+\s+} 9 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlsseg7e32.c b/auto-generated/gnu-overloaded-tests/vlsseg7e32.c index a8a564809..aa627c275 100644 --- a/auto-generated/gnu-overloaded-tests/vlsseg7e32.c +++ b/auto-generated/gnu-overloaded-tests/vlsseg7e32.c @@ -6,28 +6,28 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_m(vbool64_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32(mask, base, bstride, vl); +vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_m(vbool64_t vm, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32(vm, rs1, rs2, vl); } -vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_m(vbool32_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32(mask, base, bstride, vl); +vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_m(vbool32_t vm, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32(vm, rs1, rs2, vl); } -vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32(mask, base, bstride, vl); +vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_m(vbool64_t vm, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32(vm, rs1, rs2, vl); } -vint32m1x7_t test_vlsseg7e32_v_i32m1x7_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32(mask, base, bstride, vl); +vint32m1x7_t test_vlsseg7e32_v_i32m1x7_m(vbool32_t vm, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32(vm, rs1, rs2, vl); } -vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32(mask, base, bstride, vl); +vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_m(vbool64_t vm, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32(vm, rs1, rs2, vl); } -vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32(mask, base, bstride, vl); +vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_m(vbool32_t vm, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg7e32\.[ivxfswum.]+\s+} 6 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlsseg7e64.c b/auto-generated/gnu-overloaded-tests/vlsseg7e64.c index f41ca4af0..51b914c97 100644 --- a/auto-generated/gnu-overloaded-tests/vlsseg7e64.c +++ b/auto-generated/gnu-overloaded-tests/vlsseg7e64.c @@ -6,16 +6,16 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_m(vbool64_t mask, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64(mask, base, bstride, vl); +vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_m(vbool64_t vm, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e64(vm, rs1, rs2, vl); } -vint64m1x7_t test_vlsseg7e64_v_i64m1x7_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64(mask, base, bstride, vl); +vint64m1x7_t test_vlsseg7e64_v_i64m1x7_m(vbool64_t vm, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e64(vm, rs1, rs2, vl); } -vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64(mask, base, bstride, vl); +vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_m(vbool64_t vm, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e64(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg7e64\.[ivxfswum.]+\s+} 3 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlsseg7e8.c b/auto-generated/gnu-overloaded-tests/vlsseg7e8.c index 546a1b740..99d6da699 100644 --- a/auto-generated/gnu-overloaded-tests/vlsseg7e8.c +++ b/auto-generated/gnu-overloaded-tests/vlsseg7e8.c @@ -6,36 +6,36 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8(mask, base, bstride, vl); +vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_m(vbool64_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8(vm, rs1, rs2, vl); } -vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8(mask, base, bstride, vl); +vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_m(vbool32_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8(vm, rs1, rs2, vl); } -vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8(mask, base, bstride, vl); +vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_m(vbool16_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8(vm, rs1, rs2, vl); } -vint8m1x7_t test_vlsseg7e8_v_i8m1x7_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8(mask, base, bstride, vl); +vint8m1x7_t test_vlsseg7e8_v_i8m1x7_m(vbool8_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8(vm, rs1, rs2, vl); } -vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8(mask, base, bstride, vl); +vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_m(vbool64_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8(vm, rs1, rs2, vl); } -vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8(mask, base, bstride, vl); +vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_m(vbool32_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8(vm, rs1, rs2, vl); } -vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8(mask, base, bstride, vl); +vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_m(vbool16_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8(vm, rs1, rs2, vl); } -vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8(mask, base, bstride, vl); +vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_m(vbool8_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg7e8\.[ivxfswum.]+\s+} 8 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlsseg8e16.c b/auto-generated/gnu-overloaded-tests/vlsseg8e16.c index 7306ce6cb..0a09250b1 100644 --- a/auto-generated/gnu-overloaded-tests/vlsseg8e16.c +++ b/auto-generated/gnu-overloaded-tests/vlsseg8e16.c @@ -6,40 +6,40 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_m(vbool64_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16(mask, base, bstride, vl); +vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_m(vbool64_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16(vm, rs1, rs2, vl); } -vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_m(vbool32_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16(mask, base, bstride, vl); +vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_m(vbool32_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16(vm, rs1, rs2, vl); } -vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_m(vbool16_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16(mask, base, bstride, vl); +vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_m(vbool16_t vm, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16(vm, rs1, rs2, vl); } -vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16(mask, base, bstride, vl); +vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_m(vbool64_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16(vm, rs1, rs2, vl); } -vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16(mask, base, bstride, vl); +vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_m(vbool32_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16(vm, rs1, rs2, vl); } -vint16m1x8_t test_vlsseg8e16_v_i16m1x8_m(vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16(mask, base, bstride, vl); +vint16m1x8_t test_vlsseg8e16_v_i16m1x8_m(vbool16_t vm, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16(vm, rs1, rs2, vl); } -vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16(mask, base, bstride, vl); +vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_m(vbool64_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16(vm, rs1, rs2, vl); } -vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16(mask, base, bstride, vl); +vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_m(vbool32_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16(vm, rs1, rs2, vl); } -vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16(mask, base, bstride, vl); +vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_m(vbool16_t vm, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg8e16\.[ivxfswum.]+\s+} 9 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlsseg8e32.c b/auto-generated/gnu-overloaded-tests/vlsseg8e32.c index baaa41de8..31ab51825 100644 --- a/auto-generated/gnu-overloaded-tests/vlsseg8e32.c +++ b/auto-generated/gnu-overloaded-tests/vlsseg8e32.c @@ -6,28 +6,28 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_m(vbool64_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32(mask, base, bstride, vl); +vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_m(vbool64_t vm, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32(vm, rs1, rs2, vl); } -vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_m(vbool32_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32(mask, base, bstride, vl); +vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_m(vbool32_t vm, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32(vm, rs1, rs2, vl); } -vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32(mask, base, bstride, vl); +vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_m(vbool64_t vm, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32(vm, rs1, rs2, vl); } -vint32m1x8_t test_vlsseg8e32_v_i32m1x8_m(vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32(mask, base, bstride, vl); +vint32m1x8_t test_vlsseg8e32_v_i32m1x8_m(vbool32_t vm, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32(vm, rs1, rs2, vl); } -vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32(mask, base, bstride, vl); +vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_m(vbool64_t vm, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32(vm, rs1, rs2, vl); } -vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32(mask, base, bstride, vl); +vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_m(vbool32_t vm, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg8e32\.[ivxfswum.]+\s+} 6 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlsseg8e64.c b/auto-generated/gnu-overloaded-tests/vlsseg8e64.c index e3bec959d..9ae2f8857 100644 --- a/auto-generated/gnu-overloaded-tests/vlsseg8e64.c +++ b/auto-generated/gnu-overloaded-tests/vlsseg8e64.c @@ -6,16 +6,16 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_m(vbool64_t mask, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64(mask, base, bstride, vl); +vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_m(vbool64_t vm, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e64(vm, rs1, rs2, vl); } -vint64m1x8_t test_vlsseg8e64_v_i64m1x8_m(vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64(mask, base, bstride, vl); +vint64m1x8_t test_vlsseg8e64_v_i64m1x8_m(vbool64_t vm, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e64(vm, rs1, rs2, vl); } -vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64(mask, base, bstride, vl); +vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_m(vbool64_t vm, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e64(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg8e64\.[ivxfswum.]+\s+} 3 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vlsseg8e8.c b/auto-generated/gnu-overloaded-tests/vlsseg8e8.c index d899e7394..8a1db8ec0 100644 --- a/auto-generated/gnu-overloaded-tests/vlsseg8e8.c +++ b/auto-generated/gnu-overloaded-tests/vlsseg8e8.c @@ -6,36 +6,36 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8(mask, base, bstride, vl); +vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_m(vbool64_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8(vm, rs1, rs2, vl); } -vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8(mask, base, bstride, vl); +vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_m(vbool32_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8(vm, rs1, rs2, vl); } -vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8(mask, base, bstride, vl); +vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_m(vbool16_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8(vm, rs1, rs2, vl); } -vint8m1x8_t test_vlsseg8e8_v_i8m1x8_m(vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8(mask, base, bstride, vl); +vint8m1x8_t test_vlsseg8e8_v_i8m1x8_m(vbool8_t vm, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8(vm, rs1, rs2, vl); } -vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8(mask, base, bstride, vl); +vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_m(vbool64_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8(vm, rs1, rs2, vl); } -vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8(mask, base, bstride, vl); +vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_m(vbool32_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8(vm, rs1, rs2, vl); } -vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8(mask, base, bstride, vl); +vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_m(vbool16_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8(vm, rs1, rs2, vl); } -vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8(mask, base, bstride, vl); +vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_m(vbool8_t vm, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg8e8\.[ivxfswum.]+\s+} 8 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vluxei16.c b/auto-generated/gnu-overloaded-tests/vluxei16.c index a8d74b5e0..0c6dc0e0f 100644 --- a/auto-generated/gnu-overloaded-tests/vluxei16.c +++ b/auto-generated/gnu-overloaded-tests/vluxei16.c @@ -6,460 +6,460 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vluxei16_v_f16mf4(const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vfloat16mf4_t test_vluxei16_v_f16mf4(const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vfloat16mf2_t test_vluxei16_v_f16mf2(const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vfloat16mf2_t test_vluxei16_v_f16mf2(const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vfloat16m1_t test_vluxei16_v_f16m1(const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vfloat16m1_t test_vluxei16_v_f16m1(const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vfloat16m2_t test_vluxei16_v_f16m2(const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vfloat16m2_t test_vluxei16_v_f16m2(const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vfloat16m4_t test_vluxei16_v_f16m4(const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vfloat16m4_t test_vluxei16_v_f16m4(const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vfloat16m8_t test_vluxei16_v_f16m8(const float16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vfloat16m8_t test_vluxei16_v_f16m8(const float16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vfloat32mf2_t test_vluxei16_v_f32mf2(const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vfloat32mf2_t test_vluxei16_v_f32mf2(const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vfloat32m1_t test_vluxei16_v_f32m1(const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vfloat32m1_t test_vluxei16_v_f32m1(const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vfloat32m2_t test_vluxei16_v_f32m2(const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vfloat32m2_t test_vluxei16_v_f32m2(const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vfloat32m4_t test_vluxei16_v_f32m4(const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vfloat32m4_t test_vluxei16_v_f32m4(const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vfloat32m8_t test_vluxei16_v_f32m8(const float32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vfloat32m8_t test_vluxei16_v_f32m8(const float32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vfloat64m1_t test_vluxei16_v_f64m1(const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vfloat64m1_t test_vluxei16_v_f64m1(const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vfloat64m2_t test_vluxei16_v_f64m2(const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vfloat64m2_t test_vluxei16_v_f64m2(const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vfloat64m4_t test_vluxei16_v_f64m4(const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vfloat64m4_t test_vluxei16_v_f64m4(const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vfloat64m8_t test_vluxei16_v_f64m8(const float64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vfloat64m8_t test_vluxei16_v_f64m8(const float64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vint8mf8_t test_vluxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vint8mf8_t test_vluxei16_v_i8mf8(const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vint8mf4_t test_vluxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vint8mf4_t test_vluxei16_v_i8mf4(const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vint8mf2_t test_vluxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vint8mf2_t test_vluxei16_v_i8mf2(const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vint8m1_t test_vluxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vint8m1_t test_vluxei16_v_i8m1(const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vint8m2_t test_vluxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vint8m2_t test_vluxei16_v_i8m2(const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vint8m4_t test_vluxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vint8m4_t test_vluxei16_v_i8m4(const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vint16mf4_t test_vluxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vint16mf4_t test_vluxei16_v_i16mf4(const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vint16mf2_t test_vluxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vint16mf2_t test_vluxei16_v_i16mf2(const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vint16m1_t test_vluxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vint16m1_t test_vluxei16_v_i16m1(const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vint16m2_t test_vluxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vint16m2_t test_vluxei16_v_i16m2(const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vint16m4_t test_vluxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vint16m4_t test_vluxei16_v_i16m4(const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vint16m8_t test_vluxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vint16m8_t test_vluxei16_v_i16m8(const int16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vint32mf2_t test_vluxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vint32mf2_t test_vluxei16_v_i32mf2(const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vint32m1_t test_vluxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vint32m1_t test_vluxei16_v_i32m1(const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vint32m2_t test_vluxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vint32m2_t test_vluxei16_v_i32m2(const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vint32m4_t test_vluxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vint32m4_t test_vluxei16_v_i32m4(const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vint32m8_t test_vluxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vint32m8_t test_vluxei16_v_i32m8(const int32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vint64m1_t test_vluxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vint64m1_t test_vluxei16_v_i64m1(const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vint64m2_t test_vluxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vint64m2_t test_vluxei16_v_i64m2(const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vint64m4_t test_vluxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vint64m4_t test_vluxei16_v_i64m4(const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vint64m8_t test_vluxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vint64m8_t test_vluxei16_v_i64m8(const int64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vuint8mf8_t test_vluxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vuint8mf8_t test_vluxei16_v_u8mf8(const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vuint8mf4_t test_vluxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vuint8mf4_t test_vluxei16_v_u8mf4(const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vuint8mf2_t test_vluxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vuint8mf2_t test_vluxei16_v_u8mf2(const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vuint8m1_t test_vluxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vuint8m1_t test_vluxei16_v_u8m1(const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vuint8m2_t test_vluxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vuint8m2_t test_vluxei16_v_u8m2(const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vuint8m4_t test_vluxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vuint8m4_t test_vluxei16_v_u8m4(const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vuint16mf4_t test_vluxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vuint16mf4_t test_vluxei16_v_u16mf4(const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vuint16mf2_t test_vluxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vuint16mf2_t test_vluxei16_v_u16mf2(const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vuint16m1_t test_vluxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vuint16m1_t test_vluxei16_v_u16m1(const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vuint16m2_t test_vluxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vuint16m2_t test_vluxei16_v_u16m2(const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vuint16m4_t test_vluxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vuint16m4_t test_vluxei16_v_u16m4(const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vuint16m8_t test_vluxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vuint16m8_t test_vluxei16_v_u16m8(const uint16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vuint32mf2_t test_vluxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vuint32mf2_t test_vluxei16_v_u32mf2(const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vuint32m1_t test_vluxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vuint32m1_t test_vluxei16_v_u32m1(const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vuint32m2_t test_vluxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vuint32m2_t test_vluxei16_v_u32m2(const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vuint32m4_t test_vluxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vuint32m4_t test_vluxei16_v_u32m4(const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vuint32m8_t test_vluxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vuint32m8_t test_vluxei16_v_u32m8(const uint32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vuint64m1_t test_vluxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vuint64m1_t test_vluxei16_v_u64m1(const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vuint64m2_t test_vluxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vuint64m2_t test_vluxei16_v_u64m2(const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vuint64m4_t test_vluxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vuint64m4_t test_vluxei16_v_u64m4(const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vuint64m8_t test_vluxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16(base, bindex, vl); +vuint64m8_t test_vluxei16_v_u64m8(const uint64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); } -vfloat16mf4_t test_vluxei16_v_f16mf4_m(vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vfloat16mf4_t test_vluxei16_v_f16mf4_m(vbool64_t vm, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei16_v_f16mf2_m(vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vfloat16mf2_t test_vluxei16_v_f16mf2_m(vbool32_t vm, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vfloat16m1_t test_vluxei16_v_f16m1_m(vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vfloat16m1_t test_vluxei16_v_f16m1_m(vbool16_t vm, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vfloat16m2_t test_vluxei16_v_f16m2_m(vbool8_t mask, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vfloat16m2_t test_vluxei16_v_f16m2_m(vbool8_t vm, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vfloat16m4_t test_vluxei16_v_f16m4_m(vbool4_t mask, const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vfloat16m4_t test_vluxei16_v_f16m4_m(vbool4_t vm, const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vfloat16m8_t test_vluxei16_v_f16m8_m(vbool2_t mask, const float16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vfloat16m8_t test_vluxei16_v_f16m8_m(vbool2_t vm, const float16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei16_v_f32mf2_m(vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vfloat32mf2_t test_vluxei16_v_f32mf2_m(vbool64_t vm, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vfloat32m1_t test_vluxei16_v_f32m1_m(vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vfloat32m1_t test_vluxei16_v_f32m1_m(vbool32_t vm, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vfloat32m2_t test_vluxei16_v_f32m2_m(vbool16_t mask, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vfloat32m2_t test_vluxei16_v_f32m2_m(vbool16_t vm, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vfloat32m4_t test_vluxei16_v_f32m4_m(vbool8_t mask, const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vfloat32m4_t test_vluxei16_v_f32m4_m(vbool8_t vm, const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vfloat32m8_t test_vluxei16_v_f32m8_m(vbool4_t mask, const float32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vfloat32m8_t test_vluxei16_v_f32m8_m(vbool4_t vm, const float32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vfloat64m1_t test_vluxei16_v_f64m1_m(vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vfloat64m1_t test_vluxei16_v_f64m1_m(vbool64_t vm, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vfloat64m2_t test_vluxei16_v_f64m2_m(vbool32_t mask, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vfloat64m2_t test_vluxei16_v_f64m2_m(vbool32_t vm, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vfloat64m4_t test_vluxei16_v_f64m4_m(vbool16_t mask, const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vfloat64m4_t test_vluxei16_v_f64m4_m(vbool16_t vm, const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vfloat64m8_t test_vluxei16_v_f64m8_m(vbool8_t mask, const float64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vfloat64m8_t test_vluxei16_v_f64m8_m(vbool8_t vm, const float64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vint8mf8_t test_vluxei16_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vint8mf8_t test_vluxei16_v_i8mf8_m(vbool64_t vm, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vint8mf4_t test_vluxei16_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vint8mf4_t test_vluxei16_v_i8mf4_m(vbool32_t vm, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vint8mf2_t test_vluxei16_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vint8mf2_t test_vluxei16_v_i8mf2_m(vbool16_t vm, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vint8m1_t test_vluxei16_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vint8m1_t test_vluxei16_v_i8m1_m(vbool8_t vm, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vint8m2_t test_vluxei16_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vint8m2_t test_vluxei16_v_i8m2_m(vbool4_t vm, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vint8m4_t test_vluxei16_v_i8m4_m(vbool2_t mask, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vint8m4_t test_vluxei16_v_i8m4_m(vbool2_t vm, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vint16mf4_t test_vluxei16_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vint16mf4_t test_vluxei16_v_i16mf4_m(vbool64_t vm, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vint16mf2_t test_vluxei16_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vint16mf2_t test_vluxei16_v_i16mf2_m(vbool32_t vm, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vint16m1_t test_vluxei16_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vint16m1_t test_vluxei16_v_i16m1_m(vbool16_t vm, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vint16m2_t test_vluxei16_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vint16m2_t test_vluxei16_v_i16m2_m(vbool8_t vm, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vint16m4_t test_vluxei16_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vint16m4_t test_vluxei16_v_i16m4_m(vbool4_t vm, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vint16m8_t test_vluxei16_v_i16m8_m(vbool2_t mask, const int16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vint16m8_t test_vluxei16_v_i16m8_m(vbool2_t vm, const int16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vint32mf2_t test_vluxei16_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vint32mf2_t test_vluxei16_v_i32mf2_m(vbool64_t vm, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vint32m1_t test_vluxei16_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vint32m1_t test_vluxei16_v_i32m1_m(vbool32_t vm, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vint32m2_t test_vluxei16_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vint32m2_t test_vluxei16_v_i32m2_m(vbool16_t vm, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vint32m4_t test_vluxei16_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vint32m4_t test_vluxei16_v_i32m4_m(vbool8_t vm, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vint32m8_t test_vluxei16_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vint32m8_t test_vluxei16_v_i32m8_m(vbool4_t vm, const int32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vint64m1_t test_vluxei16_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vint64m1_t test_vluxei16_v_i64m1_m(vbool64_t vm, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vint64m2_t test_vluxei16_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vint64m2_t test_vluxei16_v_i64m2_m(vbool32_t vm, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vint64m4_t test_vluxei16_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vint64m4_t test_vluxei16_v_i64m4_m(vbool16_t vm, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vint64m8_t test_vluxei16_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vint64m8_t test_vluxei16_v_i64m8_m(vbool8_t vm, const int64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vuint8mf8_t test_vluxei16_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vuint8mf8_t test_vluxei16_v_u8mf8_m(vbool64_t vm, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vuint8mf4_t test_vluxei16_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vuint8mf4_t test_vluxei16_v_u8mf4_m(vbool32_t vm, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vuint8mf2_t test_vluxei16_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vuint8mf2_t test_vluxei16_v_u8mf2_m(vbool16_t vm, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vuint8m1_t test_vluxei16_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vuint8m1_t test_vluxei16_v_u8m1_m(vbool8_t vm, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vuint8m2_t test_vluxei16_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vuint8m2_t test_vluxei16_v_u8m2_m(vbool4_t vm, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vuint8m4_t test_vluxei16_v_u8m4_m(vbool2_t mask, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vuint8m4_t test_vluxei16_v_u8m4_m(vbool2_t vm, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vuint16mf4_t test_vluxei16_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vuint16mf4_t test_vluxei16_v_u16mf4_m(vbool64_t vm, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vuint16mf2_t test_vluxei16_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vuint16mf2_t test_vluxei16_v_u16mf2_m(vbool32_t vm, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vuint16m1_t test_vluxei16_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vuint16m1_t test_vluxei16_v_u16m1_m(vbool16_t vm, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vuint16m2_t test_vluxei16_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vuint16m2_t test_vluxei16_v_u16m2_m(vbool8_t vm, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vuint16m4_t test_vluxei16_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vuint16m4_t test_vluxei16_v_u16m4_m(vbool4_t vm, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vuint16m8_t test_vluxei16_v_u16m8_m(vbool2_t mask, const uint16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vuint16m8_t test_vluxei16_v_u16m8_m(vbool2_t vm, const uint16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vuint32mf2_t test_vluxei16_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vuint32mf2_t test_vluxei16_v_u32mf2_m(vbool64_t vm, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vuint32m1_t test_vluxei16_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vuint32m1_t test_vluxei16_v_u32m1_m(vbool32_t vm, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vuint32m2_t test_vluxei16_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vuint32m2_t test_vluxei16_v_u32m2_m(vbool16_t vm, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vuint32m4_t test_vluxei16_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vuint32m4_t test_vluxei16_v_u32m4_m(vbool8_t vm, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vuint32m8_t test_vluxei16_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vuint32m8_t test_vluxei16_v_u32m8_m(vbool4_t vm, const uint32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vuint64m1_t test_vluxei16_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vuint64m1_t test_vluxei16_v_u64m1_m(vbool64_t vm, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vuint64m2_t test_vluxei16_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vuint64m2_t test_vluxei16_v_u64m2_m(vbool32_t vm, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vuint64m4_t test_vluxei16_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vuint64m4_t test_vluxei16_v_u64m4_m(vbool16_t vm, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } -vuint64m8_t test_vluxei16_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16(mask, base, bindex, vl); +vuint64m8_t test_vluxei16_v_u64m8_m(vbool8_t vm, const uint64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxei16\.[ivxfswum.]+\s+} 114 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vluxei32.c b/auto-generated/gnu-overloaded-tests/vluxei32.c index 9c9223dff..350f0f3c0 100644 --- a/auto-generated/gnu-overloaded-tests/vluxei32.c +++ b/auto-generated/gnu-overloaded-tests/vluxei32.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vluxei32_v_f16mf4(const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vfloat16mf4_t test_vluxei32_v_f16mf4(const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vfloat16mf2_t test_vluxei32_v_f16mf2(const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vfloat16mf2_t test_vluxei32_v_f16mf2(const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vfloat16m1_t test_vluxei32_v_f16m1(const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vfloat16m1_t test_vluxei32_v_f16m1(const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vfloat16m2_t test_vluxei32_v_f16m2(const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vfloat16m2_t test_vluxei32_v_f16m2(const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vfloat16m4_t test_vluxei32_v_f16m4(const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vfloat16m4_t test_vluxei32_v_f16m4(const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vfloat32mf2_t test_vluxei32_v_f32mf2(const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vfloat32mf2_t test_vluxei32_v_f32mf2(const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vfloat32m1_t test_vluxei32_v_f32m1(const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vfloat32m1_t test_vluxei32_v_f32m1(const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vfloat32m2_t test_vluxei32_v_f32m2(const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vfloat32m2_t test_vluxei32_v_f32m2(const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vfloat32m4_t test_vluxei32_v_f32m4(const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vfloat32m4_t test_vluxei32_v_f32m4(const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vfloat32m8_t test_vluxei32_v_f32m8(const float32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vfloat32m8_t test_vluxei32_v_f32m8(const float32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vfloat64m1_t test_vluxei32_v_f64m1(const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vfloat64m1_t test_vluxei32_v_f64m1(const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vfloat64m2_t test_vluxei32_v_f64m2(const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vfloat64m2_t test_vluxei32_v_f64m2(const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vfloat64m4_t test_vluxei32_v_f64m4(const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vfloat64m4_t test_vluxei32_v_f64m4(const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vfloat64m8_t test_vluxei32_v_f64m8(const float64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vfloat64m8_t test_vluxei32_v_f64m8(const float64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vint8mf8_t test_vluxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vint8mf8_t test_vluxei32_v_i8mf8(const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vint8mf4_t test_vluxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vint8mf4_t test_vluxei32_v_i8mf4(const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vint8mf2_t test_vluxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vint8mf2_t test_vluxei32_v_i8mf2(const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vint8m1_t test_vluxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vint8m1_t test_vluxei32_v_i8m1(const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vint8m2_t test_vluxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vint8m2_t test_vluxei32_v_i8m2(const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vint16mf4_t test_vluxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vint16mf4_t test_vluxei32_v_i16mf4(const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vint16mf2_t test_vluxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vint16mf2_t test_vluxei32_v_i16mf2(const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vint16m1_t test_vluxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vint16m1_t test_vluxei32_v_i16m1(const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vint16m2_t test_vluxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vint16m2_t test_vluxei32_v_i16m2(const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vint16m4_t test_vluxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vint16m4_t test_vluxei32_v_i16m4(const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vint32mf2_t test_vluxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vint32mf2_t test_vluxei32_v_i32mf2(const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vint32m1_t test_vluxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vint32m1_t test_vluxei32_v_i32m1(const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vint32m2_t test_vluxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vint32m2_t test_vluxei32_v_i32m2(const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vint32m4_t test_vluxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vint32m4_t test_vluxei32_v_i32m4(const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vint32m8_t test_vluxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vint32m8_t test_vluxei32_v_i32m8(const int32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vint64m1_t test_vluxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vint64m1_t test_vluxei32_v_i64m1(const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vint64m2_t test_vluxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vint64m2_t test_vluxei32_v_i64m2(const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vint64m4_t test_vluxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vint64m4_t test_vluxei32_v_i64m4(const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vint64m8_t test_vluxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vint64m8_t test_vluxei32_v_i64m8(const int64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vuint8mf8_t test_vluxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vuint8mf8_t test_vluxei32_v_u8mf8(const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vuint8mf4_t test_vluxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vuint8mf4_t test_vluxei32_v_u8mf4(const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vuint8mf2_t test_vluxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vuint8mf2_t test_vluxei32_v_u8mf2(const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vuint8m1_t test_vluxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vuint8m1_t test_vluxei32_v_u8m1(const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vuint8m2_t test_vluxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vuint8m2_t test_vluxei32_v_u8m2(const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vuint16mf4_t test_vluxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vuint16mf4_t test_vluxei32_v_u16mf4(const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vuint16mf2_t test_vluxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vuint16mf2_t test_vluxei32_v_u16mf2(const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vuint16m1_t test_vluxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vuint16m1_t test_vluxei32_v_u16m1(const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vuint16m2_t test_vluxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vuint16m2_t test_vluxei32_v_u16m2(const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vuint16m4_t test_vluxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vuint16m4_t test_vluxei32_v_u16m4(const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vuint32mf2_t test_vluxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vuint32mf2_t test_vluxei32_v_u32mf2(const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vuint32m1_t test_vluxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vuint32m1_t test_vluxei32_v_u32m1(const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vuint32m2_t test_vluxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vuint32m2_t test_vluxei32_v_u32m2(const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vuint32m4_t test_vluxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vuint32m4_t test_vluxei32_v_u32m4(const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vuint32m8_t test_vluxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vuint32m8_t test_vluxei32_v_u32m8(const uint32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vuint64m1_t test_vluxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vuint64m1_t test_vluxei32_v_u64m1(const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vuint64m2_t test_vluxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vuint64m2_t test_vluxei32_v_u64m2(const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vuint64m4_t test_vluxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vuint64m4_t test_vluxei32_v_u64m4(const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vuint64m8_t test_vluxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32(base, bindex, vl); +vuint64m8_t test_vluxei32_v_u64m8(const uint64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32(rs1, rs2, vl); } -vfloat16mf4_t test_vluxei32_v_f16mf4_m(vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vfloat16mf4_t test_vluxei32_v_f16mf4_m(vbool64_t vm, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei32_v_f16mf2_m(vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vfloat16mf2_t test_vluxei32_v_f16mf2_m(vbool32_t vm, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vfloat16m1_t test_vluxei32_v_f16m1_m(vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vfloat16m1_t test_vluxei32_v_f16m1_m(vbool16_t vm, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vfloat16m2_t test_vluxei32_v_f16m2_m(vbool8_t mask, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vfloat16m2_t test_vluxei32_v_f16m2_m(vbool8_t vm, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vfloat16m4_t test_vluxei32_v_f16m4_m(vbool4_t mask, const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vfloat16m4_t test_vluxei32_v_f16m4_m(vbool4_t vm, const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei32_v_f32mf2_m(vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vfloat32mf2_t test_vluxei32_v_f32mf2_m(vbool64_t vm, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vfloat32m1_t test_vluxei32_v_f32m1_m(vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vfloat32m1_t test_vluxei32_v_f32m1_m(vbool32_t vm, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vfloat32m2_t test_vluxei32_v_f32m2_m(vbool16_t mask, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vfloat32m2_t test_vluxei32_v_f32m2_m(vbool16_t vm, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vfloat32m4_t test_vluxei32_v_f32m4_m(vbool8_t mask, const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vfloat32m4_t test_vluxei32_v_f32m4_m(vbool8_t vm, const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vfloat32m8_t test_vluxei32_v_f32m8_m(vbool4_t mask, const float32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vfloat32m8_t test_vluxei32_v_f32m8_m(vbool4_t vm, const float32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vfloat64m1_t test_vluxei32_v_f64m1_m(vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vfloat64m1_t test_vluxei32_v_f64m1_m(vbool64_t vm, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vfloat64m2_t test_vluxei32_v_f64m2_m(vbool32_t mask, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vfloat64m2_t test_vluxei32_v_f64m2_m(vbool32_t vm, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vfloat64m4_t test_vluxei32_v_f64m4_m(vbool16_t mask, const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vfloat64m4_t test_vluxei32_v_f64m4_m(vbool16_t vm, const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vfloat64m8_t test_vluxei32_v_f64m8_m(vbool8_t mask, const float64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vfloat64m8_t test_vluxei32_v_f64m8_m(vbool8_t vm, const float64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vint8mf8_t test_vluxei32_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vint8mf8_t test_vluxei32_v_i8mf8_m(vbool64_t vm, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vint8mf4_t test_vluxei32_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vint8mf4_t test_vluxei32_v_i8mf4_m(vbool32_t vm, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vint8mf2_t test_vluxei32_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vint8mf2_t test_vluxei32_v_i8mf2_m(vbool16_t vm, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vint8m1_t test_vluxei32_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vint8m1_t test_vluxei32_v_i8m1_m(vbool8_t vm, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vint8m2_t test_vluxei32_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vint8m2_t test_vluxei32_v_i8m2_m(vbool4_t vm, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vint16mf4_t test_vluxei32_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vint16mf4_t test_vluxei32_v_i16mf4_m(vbool64_t vm, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vint16mf2_t test_vluxei32_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vint16mf2_t test_vluxei32_v_i16mf2_m(vbool32_t vm, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vint16m1_t test_vluxei32_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vint16m1_t test_vluxei32_v_i16m1_m(vbool16_t vm, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vint16m2_t test_vluxei32_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vint16m2_t test_vluxei32_v_i16m2_m(vbool8_t vm, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vint16m4_t test_vluxei32_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vint16m4_t test_vluxei32_v_i16m4_m(vbool4_t vm, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vint32mf2_t test_vluxei32_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vint32mf2_t test_vluxei32_v_i32mf2_m(vbool64_t vm, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vint32m1_t test_vluxei32_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vint32m1_t test_vluxei32_v_i32m1_m(vbool32_t vm, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vint32m2_t test_vluxei32_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vint32m2_t test_vluxei32_v_i32m2_m(vbool16_t vm, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vint32m4_t test_vluxei32_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vint32m4_t test_vluxei32_v_i32m4_m(vbool8_t vm, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vint32m8_t test_vluxei32_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vint32m8_t test_vluxei32_v_i32m8_m(vbool4_t vm, const int32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vint64m1_t test_vluxei32_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vint64m1_t test_vluxei32_v_i64m1_m(vbool64_t vm, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vint64m2_t test_vluxei32_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vint64m2_t test_vluxei32_v_i64m2_m(vbool32_t vm, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vint64m4_t test_vluxei32_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vint64m4_t test_vluxei32_v_i64m4_m(vbool16_t vm, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vint64m8_t test_vluxei32_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vint64m8_t test_vluxei32_v_i64m8_m(vbool8_t vm, const int64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vuint8mf8_t test_vluxei32_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vuint8mf8_t test_vluxei32_v_u8mf8_m(vbool64_t vm, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vuint8mf4_t test_vluxei32_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vuint8mf4_t test_vluxei32_v_u8mf4_m(vbool32_t vm, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vuint8mf2_t test_vluxei32_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vuint8mf2_t test_vluxei32_v_u8mf2_m(vbool16_t vm, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vuint8m1_t test_vluxei32_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vuint8m1_t test_vluxei32_v_u8m1_m(vbool8_t vm, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vuint8m2_t test_vluxei32_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vuint8m2_t test_vluxei32_v_u8m2_m(vbool4_t vm, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vuint16mf4_t test_vluxei32_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vuint16mf4_t test_vluxei32_v_u16mf4_m(vbool64_t vm, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vuint16mf2_t test_vluxei32_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vuint16mf2_t test_vluxei32_v_u16mf2_m(vbool32_t vm, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vuint16m1_t test_vluxei32_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vuint16m1_t test_vluxei32_v_u16m1_m(vbool16_t vm, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vuint16m2_t test_vluxei32_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vuint16m2_t test_vluxei32_v_u16m2_m(vbool8_t vm, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vuint16m4_t test_vluxei32_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vuint16m4_t test_vluxei32_v_u16m4_m(vbool4_t vm, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vuint32mf2_t test_vluxei32_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vuint32mf2_t test_vluxei32_v_u32mf2_m(vbool64_t vm, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vuint32m1_t test_vluxei32_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vuint32m1_t test_vluxei32_v_u32m1_m(vbool32_t vm, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vuint32m2_t test_vluxei32_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vuint32m2_t test_vluxei32_v_u32m2_m(vbool16_t vm, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vuint32m4_t test_vluxei32_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vuint32m4_t test_vluxei32_v_u32m4_m(vbool8_t vm, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vuint32m8_t test_vluxei32_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vuint32m8_t test_vluxei32_v_u32m8_m(vbool4_t vm, const uint32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vuint64m1_t test_vluxei32_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vuint64m1_t test_vluxei32_v_u64m1_m(vbool64_t vm, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vuint64m2_t test_vluxei32_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vuint64m2_t test_vluxei32_v_u64m2_m(vbool32_t vm, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vuint64m4_t test_vluxei32_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vuint64m4_t test_vluxei32_v_u64m4_m(vbool16_t vm, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } -vuint64m8_t test_vluxei32_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32(mask, base, bindex, vl); +vuint64m8_t test_vluxei32_v_u64m8_m(vbool8_t vm, const uint64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxei32\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vluxei64.c b/auto-generated/gnu-overloaded-tests/vluxei64.c index aa72b9e6f..730b99e2c 100644 --- a/auto-generated/gnu-overloaded-tests/vluxei64.c +++ b/auto-generated/gnu-overloaded-tests/vluxei64.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vluxei64_v_f16mf4(const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64(base, bindex, vl); +vfloat16mf4_t test_vluxei64_v_f16mf4(const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64(rs1, rs2, vl); } -vfloat16mf2_t test_vluxei64_v_f16mf2(const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64(base, bindex, vl); +vfloat16mf2_t test_vluxei64_v_f16mf2(const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64(rs1, rs2, vl); } -vfloat16m1_t test_vluxei64_v_f16m1(const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64(base, bindex, vl); +vfloat16m1_t test_vluxei64_v_f16m1(const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64(rs1, rs2, vl); } -vfloat16m2_t test_vluxei64_v_f16m2(const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64(base, bindex, vl); +vfloat16m2_t test_vluxei64_v_f16m2(const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64(rs1, rs2, vl); } -vfloat32mf2_t test_vluxei64_v_f32mf2(const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64(base, bindex, vl); +vfloat32mf2_t test_vluxei64_v_f32mf2(const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64(rs1, rs2, vl); } -vfloat32m1_t test_vluxei64_v_f32m1(const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64(base, bindex, vl); +vfloat32m1_t test_vluxei64_v_f32m1(const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64(rs1, rs2, vl); } -vfloat32m2_t test_vluxei64_v_f32m2(const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64(base, bindex, vl); +vfloat32m2_t test_vluxei64_v_f32m2(const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64(rs1, rs2, vl); } -vfloat32m4_t test_vluxei64_v_f32m4(const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64(base, bindex, vl); +vfloat32m4_t test_vluxei64_v_f32m4(const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64(rs1, rs2, vl); } -vfloat64m1_t test_vluxei64_v_f64m1(const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64(base, bindex, vl); +vfloat64m1_t test_vluxei64_v_f64m1(const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64(rs1, rs2, vl); } -vfloat64m2_t test_vluxei64_v_f64m2(const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64(base, bindex, vl); +vfloat64m2_t test_vluxei64_v_f64m2(const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64(rs1, rs2, vl); } -vfloat64m4_t test_vluxei64_v_f64m4(const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64(base, bindex, vl); +vfloat64m4_t test_vluxei64_v_f64m4(const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64(rs1, rs2, vl); } -vfloat64m8_t test_vluxei64_v_f64m8(const float64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64(base, bindex, vl); +vfloat64m8_t test_vluxei64_v_f64m8(const float64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64(rs1, rs2, vl); } -vint8mf8_t test_vluxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64(base, bindex, vl); +vint8mf8_t test_vluxei64_v_i8mf8(const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64(rs1, rs2, vl); } -vint8mf4_t test_vluxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64(base, bindex, vl); +vint8mf4_t test_vluxei64_v_i8mf4(const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64(rs1, rs2, vl); } -vint8mf2_t test_vluxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64(base, bindex, vl); +vint8mf2_t test_vluxei64_v_i8mf2(const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64(rs1, rs2, vl); } -vint8m1_t test_vluxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64(base, bindex, vl); +vint8m1_t test_vluxei64_v_i8m1(const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64(rs1, rs2, vl); } -vint16mf4_t test_vluxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64(base, bindex, vl); +vint16mf4_t test_vluxei64_v_i16mf4(const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64(rs1, rs2, vl); } -vint16mf2_t test_vluxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64(base, bindex, vl); +vint16mf2_t test_vluxei64_v_i16mf2(const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64(rs1, rs2, vl); } -vint16m1_t test_vluxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64(base, bindex, vl); +vint16m1_t test_vluxei64_v_i16m1(const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64(rs1, rs2, vl); } -vint16m2_t test_vluxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64(base, bindex, vl); +vint16m2_t test_vluxei64_v_i16m2(const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64(rs1, rs2, vl); } -vint32mf2_t test_vluxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64(base, bindex, vl); +vint32mf2_t test_vluxei64_v_i32mf2(const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64(rs1, rs2, vl); } -vint32m1_t test_vluxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64(base, bindex, vl); +vint32m1_t test_vluxei64_v_i32m1(const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64(rs1, rs2, vl); } -vint32m2_t test_vluxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64(base, bindex, vl); +vint32m2_t test_vluxei64_v_i32m2(const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64(rs1, rs2, vl); } -vint32m4_t test_vluxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64(base, bindex, vl); +vint32m4_t test_vluxei64_v_i32m4(const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64(rs1, rs2, vl); } -vint64m1_t test_vluxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64(base, bindex, vl); +vint64m1_t test_vluxei64_v_i64m1(const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64(rs1, rs2, vl); } -vint64m2_t test_vluxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64(base, bindex, vl); +vint64m2_t test_vluxei64_v_i64m2(const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64(rs1, rs2, vl); } -vint64m4_t test_vluxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64(base, bindex, vl); +vint64m4_t test_vluxei64_v_i64m4(const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64(rs1, rs2, vl); } -vint64m8_t test_vluxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64(base, bindex, vl); +vint64m8_t test_vluxei64_v_i64m8(const int64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64(rs1, rs2, vl); } -vuint8mf8_t test_vluxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64(base, bindex, vl); +vuint8mf8_t test_vluxei64_v_u8mf8(const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64(rs1, rs2, vl); } -vuint8mf4_t test_vluxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64(base, bindex, vl); +vuint8mf4_t test_vluxei64_v_u8mf4(const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64(rs1, rs2, vl); } -vuint8mf2_t test_vluxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64(base, bindex, vl); +vuint8mf2_t test_vluxei64_v_u8mf2(const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64(rs1, rs2, vl); } -vuint8m1_t test_vluxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64(base, bindex, vl); +vuint8m1_t test_vluxei64_v_u8m1(const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64(rs1, rs2, vl); } -vuint16mf4_t test_vluxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64(base, bindex, vl); +vuint16mf4_t test_vluxei64_v_u16mf4(const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64(rs1, rs2, vl); } -vuint16mf2_t test_vluxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64(base, bindex, vl); +vuint16mf2_t test_vluxei64_v_u16mf2(const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64(rs1, rs2, vl); } -vuint16m1_t test_vluxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64(base, bindex, vl); +vuint16m1_t test_vluxei64_v_u16m1(const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64(rs1, rs2, vl); } -vuint16m2_t test_vluxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64(base, bindex, vl); +vuint16m2_t test_vluxei64_v_u16m2(const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64(rs1, rs2, vl); } -vuint32mf2_t test_vluxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64(base, bindex, vl); +vuint32mf2_t test_vluxei64_v_u32mf2(const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64(rs1, rs2, vl); } -vuint32m1_t test_vluxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64(base, bindex, vl); +vuint32m1_t test_vluxei64_v_u32m1(const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64(rs1, rs2, vl); } -vuint32m2_t test_vluxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64(base, bindex, vl); +vuint32m2_t test_vluxei64_v_u32m2(const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64(rs1, rs2, vl); } -vuint32m4_t test_vluxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64(base, bindex, vl); +vuint32m4_t test_vluxei64_v_u32m4(const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64(rs1, rs2, vl); } -vuint64m1_t test_vluxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64(base, bindex, vl); +vuint64m1_t test_vluxei64_v_u64m1(const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64(rs1, rs2, vl); } -vuint64m2_t test_vluxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64(base, bindex, vl); +vuint64m2_t test_vluxei64_v_u64m2(const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64(rs1, rs2, vl); } -vuint64m4_t test_vluxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64(base, bindex, vl); +vuint64m4_t test_vluxei64_v_u64m4(const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64(rs1, rs2, vl); } -vuint64m8_t test_vluxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64(base, bindex, vl); +vuint64m8_t test_vluxei64_v_u64m8(const uint64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64(rs1, rs2, vl); } -vfloat16mf4_t test_vluxei64_v_f16mf4_m(vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64(mask, base, bindex, vl); +vfloat16mf4_t test_vluxei64_v_f16mf4_m(vbool64_t vm, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64(vm, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei64_v_f16mf2_m(vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64(mask, base, bindex, vl); +vfloat16mf2_t test_vluxei64_v_f16mf2_m(vbool32_t vm, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64(vm, rs1, rs2, vl); } -vfloat16m1_t test_vluxei64_v_f16m1_m(vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64(mask, base, bindex, vl); +vfloat16m1_t test_vluxei64_v_f16m1_m(vbool16_t vm, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64(vm, rs1, rs2, vl); } -vfloat16m2_t test_vluxei64_v_f16m2_m(vbool8_t mask, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64(mask, base, bindex, vl); +vfloat16m2_t test_vluxei64_v_f16m2_m(vbool8_t vm, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64(vm, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei64_v_f32mf2_m(vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64(mask, base, bindex, vl); +vfloat32mf2_t test_vluxei64_v_f32mf2_m(vbool64_t vm, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64(vm, rs1, rs2, vl); } -vfloat32m1_t test_vluxei64_v_f32m1_m(vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64(mask, base, bindex, vl); +vfloat32m1_t test_vluxei64_v_f32m1_m(vbool32_t vm, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64(vm, rs1, rs2, vl); } -vfloat32m2_t test_vluxei64_v_f32m2_m(vbool16_t mask, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64(mask, base, bindex, vl); +vfloat32m2_t test_vluxei64_v_f32m2_m(vbool16_t vm, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64(vm, rs1, rs2, vl); } -vfloat32m4_t test_vluxei64_v_f32m4_m(vbool8_t mask, const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64(mask, base, bindex, vl); +vfloat32m4_t test_vluxei64_v_f32m4_m(vbool8_t vm, const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64(vm, rs1, rs2, vl); } -vfloat64m1_t test_vluxei64_v_f64m1_m(vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64(mask, base, bindex, vl); +vfloat64m1_t test_vluxei64_v_f64m1_m(vbool64_t vm, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64(vm, rs1, rs2, vl); } -vfloat64m2_t test_vluxei64_v_f64m2_m(vbool32_t mask, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64(mask, base, bindex, vl); +vfloat64m2_t test_vluxei64_v_f64m2_m(vbool32_t vm, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64(vm, rs1, rs2, vl); } -vfloat64m4_t test_vluxei64_v_f64m4_m(vbool16_t mask, const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64(mask, base, bindex, vl); +vfloat64m4_t test_vluxei64_v_f64m4_m(vbool16_t vm, const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64(vm, rs1, rs2, vl); } -vfloat64m8_t test_vluxei64_v_f64m8_m(vbool8_t mask, const float64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64(mask, base, bindex, vl); +vfloat64m8_t test_vluxei64_v_f64m8_m(vbool8_t vm, const float64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64(vm, rs1, rs2, vl); } -vint8mf8_t test_vluxei64_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64(mask, base, bindex, vl); +vint8mf8_t test_vluxei64_v_i8mf8_m(vbool64_t vm, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64(vm, rs1, rs2, vl); } -vint8mf4_t test_vluxei64_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64(mask, base, bindex, vl); +vint8mf4_t test_vluxei64_v_i8mf4_m(vbool32_t vm, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64(vm, rs1, rs2, vl); } -vint8mf2_t test_vluxei64_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64(mask, base, bindex, vl); +vint8mf2_t test_vluxei64_v_i8mf2_m(vbool16_t vm, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64(vm, rs1, rs2, vl); } -vint8m1_t test_vluxei64_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64(mask, base, bindex, vl); +vint8m1_t test_vluxei64_v_i8m1_m(vbool8_t vm, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64(vm, rs1, rs2, vl); } -vint16mf4_t test_vluxei64_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64(mask, base, bindex, vl); +vint16mf4_t test_vluxei64_v_i16mf4_m(vbool64_t vm, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64(vm, rs1, rs2, vl); } -vint16mf2_t test_vluxei64_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64(mask, base, bindex, vl); +vint16mf2_t test_vluxei64_v_i16mf2_m(vbool32_t vm, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64(vm, rs1, rs2, vl); } -vint16m1_t test_vluxei64_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64(mask, base, bindex, vl); +vint16m1_t test_vluxei64_v_i16m1_m(vbool16_t vm, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64(vm, rs1, rs2, vl); } -vint16m2_t test_vluxei64_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64(mask, base, bindex, vl); +vint16m2_t test_vluxei64_v_i16m2_m(vbool8_t vm, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64(vm, rs1, rs2, vl); } -vint32mf2_t test_vluxei64_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64(mask, base, bindex, vl); +vint32mf2_t test_vluxei64_v_i32mf2_m(vbool64_t vm, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64(vm, rs1, rs2, vl); } -vint32m1_t test_vluxei64_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64(mask, base, bindex, vl); +vint32m1_t test_vluxei64_v_i32m1_m(vbool32_t vm, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64(vm, rs1, rs2, vl); } -vint32m2_t test_vluxei64_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64(mask, base, bindex, vl); +vint32m2_t test_vluxei64_v_i32m2_m(vbool16_t vm, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64(vm, rs1, rs2, vl); } -vint32m4_t test_vluxei64_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64(mask, base, bindex, vl); +vint32m4_t test_vluxei64_v_i32m4_m(vbool8_t vm, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64(vm, rs1, rs2, vl); } -vint64m1_t test_vluxei64_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64(mask, base, bindex, vl); +vint64m1_t test_vluxei64_v_i64m1_m(vbool64_t vm, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64(vm, rs1, rs2, vl); } -vint64m2_t test_vluxei64_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64(mask, base, bindex, vl); +vint64m2_t test_vluxei64_v_i64m2_m(vbool32_t vm, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64(vm, rs1, rs2, vl); } -vint64m4_t test_vluxei64_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64(mask, base, bindex, vl); +vint64m4_t test_vluxei64_v_i64m4_m(vbool16_t vm, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64(vm, rs1, rs2, vl); } -vint64m8_t test_vluxei64_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64(mask, base, bindex, vl); +vint64m8_t test_vluxei64_v_i64m8_m(vbool8_t vm, const int64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64(vm, rs1, rs2, vl); } -vuint8mf8_t test_vluxei64_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64(mask, base, bindex, vl); +vuint8mf8_t test_vluxei64_v_u8mf8_m(vbool64_t vm, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64(vm, rs1, rs2, vl); } -vuint8mf4_t test_vluxei64_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64(mask, base, bindex, vl); +vuint8mf4_t test_vluxei64_v_u8mf4_m(vbool32_t vm, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64(vm, rs1, rs2, vl); } -vuint8mf2_t test_vluxei64_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64(mask, base, bindex, vl); +vuint8mf2_t test_vluxei64_v_u8mf2_m(vbool16_t vm, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64(vm, rs1, rs2, vl); } -vuint8m1_t test_vluxei64_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64(mask, base, bindex, vl); +vuint8m1_t test_vluxei64_v_u8m1_m(vbool8_t vm, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64(vm, rs1, rs2, vl); } -vuint16mf4_t test_vluxei64_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64(mask, base, bindex, vl); +vuint16mf4_t test_vluxei64_v_u16mf4_m(vbool64_t vm, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64(vm, rs1, rs2, vl); } -vuint16mf2_t test_vluxei64_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64(mask, base, bindex, vl); +vuint16mf2_t test_vluxei64_v_u16mf2_m(vbool32_t vm, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64(vm, rs1, rs2, vl); } -vuint16m1_t test_vluxei64_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64(mask, base, bindex, vl); +vuint16m1_t test_vluxei64_v_u16m1_m(vbool16_t vm, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64(vm, rs1, rs2, vl); } -vuint16m2_t test_vluxei64_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64(mask, base, bindex, vl); +vuint16m2_t test_vluxei64_v_u16m2_m(vbool8_t vm, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64(vm, rs1, rs2, vl); } -vuint32mf2_t test_vluxei64_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64(mask, base, bindex, vl); +vuint32mf2_t test_vluxei64_v_u32mf2_m(vbool64_t vm, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64(vm, rs1, rs2, vl); } -vuint32m1_t test_vluxei64_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64(mask, base, bindex, vl); +vuint32m1_t test_vluxei64_v_u32m1_m(vbool32_t vm, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64(vm, rs1, rs2, vl); } -vuint32m2_t test_vluxei64_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64(mask, base, bindex, vl); +vuint32m2_t test_vluxei64_v_u32m2_m(vbool16_t vm, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64(vm, rs1, rs2, vl); } -vuint32m4_t test_vluxei64_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64(mask, base, bindex, vl); +vuint32m4_t test_vluxei64_v_u32m4_m(vbool8_t vm, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64(vm, rs1, rs2, vl); } -vuint64m1_t test_vluxei64_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64(mask, base, bindex, vl); +vuint64m1_t test_vluxei64_v_u64m1_m(vbool64_t vm, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64(vm, rs1, rs2, vl); } -vuint64m2_t test_vluxei64_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64(mask, base, bindex, vl); +vuint64m2_t test_vluxei64_v_u64m2_m(vbool32_t vm, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64(vm, rs1, rs2, vl); } -vuint64m4_t test_vluxei64_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64(mask, base, bindex, vl); +vuint64m4_t test_vluxei64_v_u64m4_m(vbool16_t vm, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64(vm, rs1, rs2, vl); } -vuint64m8_t test_vluxei64_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64(mask, base, bindex, vl); +vuint64m8_t test_vluxei64_v_u64m8_m(vbool8_t vm, const uint64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxei64\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vluxei8.c b/auto-generated/gnu-overloaded-tests/vluxei8.c index 43e833b14..1219eb8b3 100644 --- a/auto-generated/gnu-overloaded-tests/vluxei8.c +++ b/auto-generated/gnu-overloaded-tests/vluxei8.c @@ -6,476 +6,476 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vluxei8_v_f16mf4(const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vfloat16mf4_t test_vluxei8_v_f16mf4(const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vfloat16mf2_t test_vluxei8_v_f16mf2(const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vfloat16mf2_t test_vluxei8_v_f16mf2(const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vfloat16m1_t test_vluxei8_v_f16m1(const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vfloat16m1_t test_vluxei8_v_f16m1(const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vfloat16m2_t test_vluxei8_v_f16m2(const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vfloat16m2_t test_vluxei8_v_f16m2(const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vfloat16m4_t test_vluxei8_v_f16m4(const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vfloat16m4_t test_vluxei8_v_f16m4(const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vfloat16m8_t test_vluxei8_v_f16m8(const float16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vfloat16m8_t test_vluxei8_v_f16m8(const float16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vfloat32mf2_t test_vluxei8_v_f32mf2(const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vfloat32mf2_t test_vluxei8_v_f32mf2(const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vfloat32m1_t test_vluxei8_v_f32m1(const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vfloat32m1_t test_vluxei8_v_f32m1(const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vfloat32m2_t test_vluxei8_v_f32m2(const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vfloat32m2_t test_vluxei8_v_f32m2(const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vfloat32m4_t test_vluxei8_v_f32m4(const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vfloat32m4_t test_vluxei8_v_f32m4(const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vfloat32m8_t test_vluxei8_v_f32m8(const float32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vfloat32m8_t test_vluxei8_v_f32m8(const float32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vfloat64m1_t test_vluxei8_v_f64m1(const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vfloat64m1_t test_vluxei8_v_f64m1(const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vfloat64m2_t test_vluxei8_v_f64m2(const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vfloat64m2_t test_vluxei8_v_f64m2(const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vfloat64m4_t test_vluxei8_v_f64m4(const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vfloat64m4_t test_vluxei8_v_f64m4(const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vfloat64m8_t test_vluxei8_v_f64m8(const float64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vfloat64m8_t test_vluxei8_v_f64m8(const float64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vint8mf8_t test_vluxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vint8mf8_t test_vluxei8_v_i8mf8(const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vint8mf4_t test_vluxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vint8mf4_t test_vluxei8_v_i8mf4(const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vint8mf2_t test_vluxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vint8mf2_t test_vluxei8_v_i8mf2(const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vint8m1_t test_vluxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vint8m1_t test_vluxei8_v_i8m1(const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vint8m2_t test_vluxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vint8m2_t test_vluxei8_v_i8m2(const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vint8m4_t test_vluxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vint8m4_t test_vluxei8_v_i8m4(const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vint8m8_t test_vluxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vint8m8_t test_vluxei8_v_i8m8(const int8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vint16mf4_t test_vluxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vint16mf4_t test_vluxei8_v_i16mf4(const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vint16mf2_t test_vluxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vint16mf2_t test_vluxei8_v_i16mf2(const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vint16m1_t test_vluxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vint16m1_t test_vluxei8_v_i16m1(const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vint16m2_t test_vluxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vint16m2_t test_vluxei8_v_i16m2(const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vint16m4_t test_vluxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vint16m4_t test_vluxei8_v_i16m4(const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vint16m8_t test_vluxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vint16m8_t test_vluxei8_v_i16m8(const int16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vint32mf2_t test_vluxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vint32mf2_t test_vluxei8_v_i32mf2(const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vint32m1_t test_vluxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vint32m1_t test_vluxei8_v_i32m1(const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vint32m2_t test_vluxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vint32m2_t test_vluxei8_v_i32m2(const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vint32m4_t test_vluxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vint32m4_t test_vluxei8_v_i32m4(const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vint32m8_t test_vluxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vint32m8_t test_vluxei8_v_i32m8(const int32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vint64m1_t test_vluxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vint64m1_t test_vluxei8_v_i64m1(const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vint64m2_t test_vluxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vint64m2_t test_vluxei8_v_i64m2(const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vint64m4_t test_vluxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vint64m4_t test_vluxei8_v_i64m4(const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vint64m8_t test_vluxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vint64m8_t test_vluxei8_v_i64m8(const int64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vuint8mf8_t test_vluxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vuint8mf8_t test_vluxei8_v_u8mf8(const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vuint8mf4_t test_vluxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vuint8mf4_t test_vluxei8_v_u8mf4(const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vuint8mf2_t test_vluxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vuint8mf2_t test_vluxei8_v_u8mf2(const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vuint8m1_t test_vluxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vuint8m1_t test_vluxei8_v_u8m1(const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vuint8m2_t test_vluxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vuint8m2_t test_vluxei8_v_u8m2(const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vuint8m4_t test_vluxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vuint8m4_t test_vluxei8_v_u8m4(const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vuint8m8_t test_vluxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vuint8m8_t test_vluxei8_v_u8m8(const uint8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vuint16mf4_t test_vluxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vuint16mf4_t test_vluxei8_v_u16mf4(const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vuint16mf2_t test_vluxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vuint16mf2_t test_vluxei8_v_u16mf2(const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vuint16m1_t test_vluxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vuint16m1_t test_vluxei8_v_u16m1(const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vuint16m2_t test_vluxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vuint16m2_t test_vluxei8_v_u16m2(const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vuint16m4_t test_vluxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vuint16m4_t test_vluxei8_v_u16m4(const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vuint16m8_t test_vluxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vuint16m8_t test_vluxei8_v_u16m8(const uint16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vuint32mf2_t test_vluxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vuint32mf2_t test_vluxei8_v_u32mf2(const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vuint32m1_t test_vluxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vuint32m1_t test_vluxei8_v_u32m1(const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vuint32m2_t test_vluxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vuint32m2_t test_vluxei8_v_u32m2(const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vuint32m4_t test_vluxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vuint32m4_t test_vluxei8_v_u32m4(const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vuint32m8_t test_vluxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vuint32m8_t test_vluxei8_v_u32m8(const uint32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vuint64m1_t test_vluxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vuint64m1_t test_vluxei8_v_u64m1(const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vuint64m2_t test_vluxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vuint64m2_t test_vluxei8_v_u64m2(const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vuint64m4_t test_vluxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vuint64m4_t test_vluxei8_v_u64m4(const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vuint64m8_t test_vluxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8(base, bindex, vl); +vuint64m8_t test_vluxei8_v_u64m8(const uint64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8(rs1, rs2, vl); } -vfloat16mf4_t test_vluxei8_v_f16mf4_m(vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vfloat16mf4_t test_vluxei8_v_f16mf4_m(vbool64_t vm, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei8_v_f16mf2_m(vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vfloat16mf2_t test_vluxei8_v_f16mf2_m(vbool32_t vm, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vfloat16m1_t test_vluxei8_v_f16m1_m(vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vfloat16m1_t test_vluxei8_v_f16m1_m(vbool16_t vm, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vfloat16m2_t test_vluxei8_v_f16m2_m(vbool8_t mask, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vfloat16m2_t test_vluxei8_v_f16m2_m(vbool8_t vm, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vfloat16m4_t test_vluxei8_v_f16m4_m(vbool4_t mask, const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vfloat16m4_t test_vluxei8_v_f16m4_m(vbool4_t vm, const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vfloat16m8_t test_vluxei8_v_f16m8_m(vbool2_t mask, const float16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vfloat16m8_t test_vluxei8_v_f16m8_m(vbool2_t vm, const float16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei8_v_f32mf2_m(vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vfloat32mf2_t test_vluxei8_v_f32mf2_m(vbool64_t vm, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vfloat32m1_t test_vluxei8_v_f32m1_m(vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vfloat32m1_t test_vluxei8_v_f32m1_m(vbool32_t vm, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vfloat32m2_t test_vluxei8_v_f32m2_m(vbool16_t mask, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vfloat32m2_t test_vluxei8_v_f32m2_m(vbool16_t vm, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vfloat32m4_t test_vluxei8_v_f32m4_m(vbool8_t mask, const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vfloat32m4_t test_vluxei8_v_f32m4_m(vbool8_t vm, const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vfloat32m8_t test_vluxei8_v_f32m8_m(vbool4_t mask, const float32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vfloat32m8_t test_vluxei8_v_f32m8_m(vbool4_t vm, const float32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vfloat64m1_t test_vluxei8_v_f64m1_m(vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vfloat64m1_t test_vluxei8_v_f64m1_m(vbool64_t vm, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vfloat64m2_t test_vluxei8_v_f64m2_m(vbool32_t mask, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vfloat64m2_t test_vluxei8_v_f64m2_m(vbool32_t vm, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vfloat64m4_t test_vluxei8_v_f64m4_m(vbool16_t mask, const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vfloat64m4_t test_vluxei8_v_f64m4_m(vbool16_t vm, const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vfloat64m8_t test_vluxei8_v_f64m8_m(vbool8_t mask, const float64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vfloat64m8_t test_vluxei8_v_f64m8_m(vbool8_t vm, const float64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vint8mf8_t test_vluxei8_v_i8mf8_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vint8mf8_t test_vluxei8_v_i8mf8_m(vbool64_t vm, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vint8mf4_t test_vluxei8_v_i8mf4_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vint8mf4_t test_vluxei8_v_i8mf4_m(vbool32_t vm, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vint8mf2_t test_vluxei8_v_i8mf2_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vint8mf2_t test_vluxei8_v_i8mf2_m(vbool16_t vm, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vint8m1_t test_vluxei8_v_i8m1_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vint8m1_t test_vluxei8_v_i8m1_m(vbool8_t vm, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vint8m2_t test_vluxei8_v_i8m2_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vint8m2_t test_vluxei8_v_i8m2_m(vbool4_t vm, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vint8m4_t test_vluxei8_v_i8m4_m(vbool2_t mask, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vint8m4_t test_vluxei8_v_i8m4_m(vbool2_t vm, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vint8m8_t test_vluxei8_v_i8m8_m(vbool1_t mask, const int8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vint8m8_t test_vluxei8_v_i8m8_m(vbool1_t vm, const int8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vint16mf4_t test_vluxei8_v_i16mf4_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vint16mf4_t test_vluxei8_v_i16mf4_m(vbool64_t vm, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vint16mf2_t test_vluxei8_v_i16mf2_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vint16mf2_t test_vluxei8_v_i16mf2_m(vbool32_t vm, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vint16m1_t test_vluxei8_v_i16m1_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vint16m1_t test_vluxei8_v_i16m1_m(vbool16_t vm, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vint16m2_t test_vluxei8_v_i16m2_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vint16m2_t test_vluxei8_v_i16m2_m(vbool8_t vm, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vint16m4_t test_vluxei8_v_i16m4_m(vbool4_t mask, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vint16m4_t test_vluxei8_v_i16m4_m(vbool4_t vm, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vint16m8_t test_vluxei8_v_i16m8_m(vbool2_t mask, const int16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vint16m8_t test_vluxei8_v_i16m8_m(vbool2_t vm, const int16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vint32mf2_t test_vluxei8_v_i32mf2_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vint32mf2_t test_vluxei8_v_i32mf2_m(vbool64_t vm, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vint32m1_t test_vluxei8_v_i32m1_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vint32m1_t test_vluxei8_v_i32m1_m(vbool32_t vm, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vint32m2_t test_vluxei8_v_i32m2_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vint32m2_t test_vluxei8_v_i32m2_m(vbool16_t vm, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vint32m4_t test_vluxei8_v_i32m4_m(vbool8_t mask, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vint32m4_t test_vluxei8_v_i32m4_m(vbool8_t vm, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vint32m8_t test_vluxei8_v_i32m8_m(vbool4_t mask, const int32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vint32m8_t test_vluxei8_v_i32m8_m(vbool4_t vm, const int32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vint64m1_t test_vluxei8_v_i64m1_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vint64m1_t test_vluxei8_v_i64m1_m(vbool64_t vm, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vint64m2_t test_vluxei8_v_i64m2_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vint64m2_t test_vluxei8_v_i64m2_m(vbool32_t vm, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vint64m4_t test_vluxei8_v_i64m4_m(vbool16_t mask, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vint64m4_t test_vluxei8_v_i64m4_m(vbool16_t vm, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vint64m8_t test_vluxei8_v_i64m8_m(vbool8_t mask, const int64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vint64m8_t test_vluxei8_v_i64m8_m(vbool8_t vm, const int64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vuint8mf8_t test_vluxei8_v_u8mf8_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vuint8mf8_t test_vluxei8_v_u8mf8_m(vbool64_t vm, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vuint8mf4_t test_vluxei8_v_u8mf4_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vuint8mf4_t test_vluxei8_v_u8mf4_m(vbool32_t vm, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vuint8mf2_t test_vluxei8_v_u8mf2_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vuint8mf2_t test_vluxei8_v_u8mf2_m(vbool16_t vm, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vuint8m1_t test_vluxei8_v_u8m1_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vuint8m1_t test_vluxei8_v_u8m1_m(vbool8_t vm, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vuint8m2_t test_vluxei8_v_u8m2_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vuint8m2_t test_vluxei8_v_u8m2_m(vbool4_t vm, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vuint8m4_t test_vluxei8_v_u8m4_m(vbool2_t mask, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vuint8m4_t test_vluxei8_v_u8m4_m(vbool2_t vm, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vuint8m8_t test_vluxei8_v_u8m8_m(vbool1_t mask, const uint8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vuint8m8_t test_vluxei8_v_u8m8_m(vbool1_t vm, const uint8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vuint16mf4_t test_vluxei8_v_u16mf4_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vuint16mf4_t test_vluxei8_v_u16mf4_m(vbool64_t vm, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vuint16mf2_t test_vluxei8_v_u16mf2_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vuint16mf2_t test_vluxei8_v_u16mf2_m(vbool32_t vm, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vuint16m1_t test_vluxei8_v_u16m1_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vuint16m1_t test_vluxei8_v_u16m1_m(vbool16_t vm, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vuint16m2_t test_vluxei8_v_u16m2_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vuint16m2_t test_vluxei8_v_u16m2_m(vbool8_t vm, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vuint16m4_t test_vluxei8_v_u16m4_m(vbool4_t mask, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vuint16m4_t test_vluxei8_v_u16m4_m(vbool4_t vm, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vuint16m8_t test_vluxei8_v_u16m8_m(vbool2_t mask, const uint16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vuint16m8_t test_vluxei8_v_u16m8_m(vbool2_t vm, const uint16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vuint32mf2_t test_vluxei8_v_u32mf2_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vuint32mf2_t test_vluxei8_v_u32mf2_m(vbool64_t vm, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vuint32m1_t test_vluxei8_v_u32m1_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vuint32m1_t test_vluxei8_v_u32m1_m(vbool32_t vm, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vuint32m2_t test_vluxei8_v_u32m2_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vuint32m2_t test_vluxei8_v_u32m2_m(vbool16_t vm, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vuint32m4_t test_vluxei8_v_u32m4_m(vbool8_t mask, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vuint32m4_t test_vluxei8_v_u32m4_m(vbool8_t vm, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vuint32m8_t test_vluxei8_v_u32m8_m(vbool4_t mask, const uint32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vuint32m8_t test_vluxei8_v_u32m8_m(vbool4_t vm, const uint32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vuint64m1_t test_vluxei8_v_u64m1_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vuint64m1_t test_vluxei8_v_u64m1_m(vbool64_t vm, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vuint64m2_t test_vluxei8_v_u64m2_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vuint64m2_t test_vluxei8_v_u64m2_m(vbool32_t vm, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vuint64m4_t test_vluxei8_v_u64m4_m(vbool16_t mask, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vuint64m4_t test_vluxei8_v_u64m4_m(vbool16_t vm, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } -vuint64m8_t test_vluxei8_v_u64m8_m(vbool8_t mask, const uint64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8(mask, base, bindex, vl); +vuint64m8_t test_vluxei8_v_u64m8_m(vbool8_t vm, const uint64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxei8\.[ivxfswum.]+\s+} 118 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vluxseg2ei16.c b/auto-generated/gnu-overloaded-tests/vluxseg2ei16.c index 8c52f09ec..76e4ad5b0 100644 --- a/auto-generated/gnu-overloaded-tests/vluxseg2ei16.c +++ b/auto-generated/gnu-overloaded-tests/vluxseg2ei16.c @@ -6,388 +6,388 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2(const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2(const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2(const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2(const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2(const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2(const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2(const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2(const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2(const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2(const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2(const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2(const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2(const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2(const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2(const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2(const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2(const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2(const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2(const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2(const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2(const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2(const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2(const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2(const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2(const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2(const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2(const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2(const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2(const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2(const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei16_v_i8m1x2(const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vint8m1x2_t test_vluxseg2ei16_v_i8m1x2(const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei16_v_i8m2x2(const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vint8m2x2_t test_vluxseg2ei16_v_i8m2x2(const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vint8m4x2_t test_vluxseg2ei16_v_i8m4x2(const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vint8m4x2_t test_vluxseg2ei16_v_i8m4x2(const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2(const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2(const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2(const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2(const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei16_v_i16m1x2(const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vint16m1x2_t test_vluxseg2ei16_v_i16m1x2(const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei16_v_i16m2x2(const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vint16m2x2_t test_vluxseg2ei16_v_i16m2x2(const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei16_v_i16m4x2(const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vint16m4x2_t test_vluxseg2ei16_v_i16m4x2(const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2(const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2(const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei16_v_i32m1x2(const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vint32m1x2_t test_vluxseg2ei16_v_i32m1x2(const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei16_v_i32m2x2(const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vint32m2x2_t test_vluxseg2ei16_v_i32m2x2(const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei16_v_i32m4x2(const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vint32m4x2_t test_vluxseg2ei16_v_i32m4x2(const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei16_v_i64m1x2(const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vint64m1x2_t test_vluxseg2ei16_v_i64m1x2(const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei16_v_i64m2x2(const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vint64m2x2_t test_vluxseg2ei16_v_i64m2x2(const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei16_v_i64m4x2(const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vint64m4x2_t test_vluxseg2ei16_v_i64m4x2(const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2(const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2(const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2(const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2(const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2(const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2(const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2(const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2(const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2(const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2(const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2(const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2(const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2(const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2(const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2(const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2(const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2(const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2(const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2(const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2(const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2(const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2(const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2(const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2(const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2(const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2(const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2(const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2(const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); } -vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_m(vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_m(vbool64_t vm, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_m(vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_m(vbool32_t vm, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_m(vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_m(vbool16_t vm, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_m(vbool8_t mask, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_m(vbool8_t vm, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_m(vbool4_t mask, const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_m(vbool4_t vm, const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_m(vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_m(vbool64_t vm, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_m(vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_m(vbool32_t vm, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_m(vbool16_t mask, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_m(vbool16_t vm, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_m(vbool8_t mask, const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_m(vbool8_t vm, const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_m(vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_m(vbool64_t vm, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_m(vbool32_t mask, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_m(vbool32_t vm, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_m(vbool16_t mask, const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_m(vbool16_t vm, const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_m(vbool64_t vm, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_m(vbool32_t vm, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_m(vbool16_t vm, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_m(vbool8_t vm, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_m(vbool4_t vm, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } -vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_m(vbool2_t mask, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_m(vbool2_t vm, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_m(vbool64_t vm, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_m(vbool32_t vm, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_m(vbool16_t vm, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_m(vbool8_t vm, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_m(vbool4_t mask, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_m(vbool4_t vm, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_m(vbool64_t vm, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_m(vbool32_t vm, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_m(vbool16_t vm, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_m(vbool8_t vm, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_m(vbool64_t vm, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_m(vbool32_t vm, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_m(vbool16_t vm, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_m(vbool64_t vm, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_m(vbool32_t vm, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_m(vbool16_t vm, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_m(vbool8_t vm, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_m(vbool4_t vm, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } -vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_m(vbool2_t vm, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_m(vbool64_t vm, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_m(vbool32_t vm, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_m(vbool16_t vm, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_m(vbool8_t vm, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_m(vbool4_t vm, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_m(vbool64_t vm, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_m(vbool32_t vm, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_m(vbool16_t vm, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_m(vbool8_t vm, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_m(vbool64_t vm, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_m(vbool32_t vm, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16(mask, base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_m(vbool16_t vm, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg2ei16\.[ivxfswum.]+\s+} 96 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vluxseg2ei32.c b/auto-generated/gnu-overloaded-tests/vluxseg2ei32.c index 8f45260d7..bda9b1c87 100644 --- a/auto-generated/gnu-overloaded-tests/vluxseg2ei32.c +++ b/auto-generated/gnu-overloaded-tests/vluxseg2ei32.c @@ -6,372 +6,372 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2(const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2(const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2(const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2(const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2(const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2(const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2(const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2(const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2(const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(base, bindex, vl); +vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2(const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2(const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2(const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2(const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2(const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2(const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2(const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2(const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2(const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2(const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2(const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2(const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2(const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2(const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2(const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2(const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2(const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2(const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2(const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2(const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2(const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei32_v_i8m1x2(const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(base, bindex, vl); +vint8m1x2_t test_vluxseg2ei32_v_i8m1x2(const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei32_v_i8m2x2(const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(base, bindex, vl); +vint8m2x2_t test_vluxseg2ei32_v_i8m2x2(const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2(const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2(const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2(const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2(const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei32_v_i16m1x2(const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(base, bindex, vl); +vint16m1x2_t test_vluxseg2ei32_v_i16m1x2(const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei32_v_i16m2x2(const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(base, bindex, vl); +vint16m2x2_t test_vluxseg2ei32_v_i16m2x2(const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei32_v_i16m4x2(const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(base, bindex, vl); +vint16m4x2_t test_vluxseg2ei32_v_i16m4x2(const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2(const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2(const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei32_v_i32m1x2(const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(base, bindex, vl); +vint32m1x2_t test_vluxseg2ei32_v_i32m1x2(const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei32_v_i32m2x2(const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(base, bindex, vl); +vint32m2x2_t test_vluxseg2ei32_v_i32m2x2(const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei32_v_i32m4x2(const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(base, bindex, vl); +vint32m4x2_t test_vluxseg2ei32_v_i32m4x2(const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei32_v_i64m1x2(const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(base, bindex, vl); +vint64m1x2_t test_vluxseg2ei32_v_i64m1x2(const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei32_v_i64m2x2(const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(base, bindex, vl); +vint64m2x2_t test_vluxseg2ei32_v_i64m2x2(const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei32_v_i64m4x2(const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(base, bindex, vl); +vint64m4x2_t test_vluxseg2ei32_v_i64m4x2(const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2(const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2(const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2(const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2(const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2(const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2(const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2(const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2(const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(base, bindex, vl); +vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2(const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2(const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2(const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2(const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2(const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2(const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2(const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2(const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2(const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(base, bindex, vl); +vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2(const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2(const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2(const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2(const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2(const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2(const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2(const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2(const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2(const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2(const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2(const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2(const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2(const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(rs1, rs2, vl); } -vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_m(vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(mask, base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_m(vbool64_t vm, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(vm, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_m(vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(mask, base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_m(vbool32_t vm, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(vm, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_m(vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(mask, base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_m(vbool16_t vm, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(vm, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_m(vbool8_t mask, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(mask, base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_m(vbool8_t vm, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(vm, rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_m(vbool4_t mask, const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(mask, base, bindex, vl); +vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_m(vbool4_t vm, const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(vm, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_m(vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(mask, base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_m(vbool64_t vm, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(vm, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_m(vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(mask, base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_m(vbool32_t vm, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(vm, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_m(vbool16_t mask, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(mask, base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_m(vbool16_t vm, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(vm, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_m(vbool8_t mask, const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(mask, base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_m(vbool8_t vm, const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(vm, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_m(vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(mask, base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_m(vbool64_t vm, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(vm, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_m(vbool32_t mask, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(mask, base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_m(vbool32_t vm, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(vm, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_m(vbool16_t mask, const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(mask, base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_m(vbool16_t vm, const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(vm, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(mask, base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_m(vbool64_t vm, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(vm, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(mask, base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_m(vbool32_t vm, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(vm, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(mask, base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_m(vbool16_t vm, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(vm, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(mask, base, bindex, vl); +vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_m(vbool8_t vm, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(vm, rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(mask, base, bindex, vl); +vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_m(vbool4_t vm, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(vm, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(mask, base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_m(vbool64_t vm, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(vm, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(mask, base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_m(vbool32_t vm, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(vm, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(mask, base, bindex, vl); +vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_m(vbool16_t vm, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(vm, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(mask, base, bindex, vl); +vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_m(vbool8_t vm, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(vm, rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_m(vbool4_t mask, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(mask, base, bindex, vl); +vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_m(vbool4_t vm, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(vm, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(mask, base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_m(vbool64_t vm, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(vm, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(mask, base, bindex, vl); +vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_m(vbool32_t vm, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(vm, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(mask, base, bindex, vl); +vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_m(vbool16_t vm, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(vm, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(mask, base, bindex, vl); +vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_m(vbool8_t vm, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(vm, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(mask, base, bindex, vl); +vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_m(vbool64_t vm, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(vm, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(mask, base, bindex, vl); +vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_m(vbool32_t vm, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(vm, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(mask, base, bindex, vl); +vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_m(vbool16_t vm, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(vm, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(mask, base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_m(vbool64_t vm, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(vm, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(mask, base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_m(vbool32_t vm, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(vm, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(mask, base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_m(vbool16_t vm, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(vm, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(mask, base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_m(vbool8_t vm, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(vm, rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(mask, base, bindex, vl); +vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_m(vbool4_t vm, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(vm, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(mask, base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_m(vbool64_t vm, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(vm, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(mask, base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_m(vbool32_t vm, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(vm, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(mask, base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_m(vbool16_t vm, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(vm, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(mask, base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_m(vbool8_t vm, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(vm, rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(mask, base, bindex, vl); +vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_m(vbool4_t vm, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(vm, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(mask, base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_m(vbool64_t vm, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(vm, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(mask, base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_m(vbool32_t vm, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(vm, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(mask, base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_m(vbool16_t vm, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(vm, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(mask, base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_m(vbool8_t vm, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(vm, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(mask, base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_m(vbool64_t vm, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(vm, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(mask, base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_m(vbool32_t vm, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(vm, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32(mask, base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_m(vbool16_t vm, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg2ei32\.[ivxfswum.]+\s+} 92 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vluxseg2ei64.c b/auto-generated/gnu-overloaded-tests/vluxseg2ei64.c index 320a5d7e0..bab15861a 100644 --- a/auto-generated/gnu-overloaded-tests/vluxseg2ei64.c +++ b/auto-generated/gnu-overloaded-tests/vluxseg2ei64.c @@ -6,332 +6,332 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2(const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2(const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2(const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2(const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2(const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2(const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2(const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2(const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2(const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2(const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2(const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2(const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2(const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2(const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2(const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2(const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2(const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2(const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2(const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2(const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2(const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2(const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2(const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2(const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2(const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2(const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2(const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2(const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei64_v_i8m1x2(const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(base, bindex, vl); +vint8m1x2_t test_vluxseg2ei64_v_i8m1x2(const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2(const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2(const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2(const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2(const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei64_v_i16m1x2(const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(base, bindex, vl); +vint16m1x2_t test_vluxseg2ei64_v_i16m1x2(const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei64_v_i16m2x2(const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(base, bindex, vl); +vint16m2x2_t test_vluxseg2ei64_v_i16m2x2(const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2(const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2(const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei64_v_i32m1x2(const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(base, bindex, vl); +vint32m1x2_t test_vluxseg2ei64_v_i32m1x2(const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei64_v_i32m2x2(const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(base, bindex, vl); +vint32m2x2_t test_vluxseg2ei64_v_i32m2x2(const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei64_v_i32m4x2(const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(base, bindex, vl); +vint32m4x2_t test_vluxseg2ei64_v_i32m4x2(const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei64_v_i64m1x2(const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(base, bindex, vl); +vint64m1x2_t test_vluxseg2ei64_v_i64m1x2(const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei64_v_i64m2x2(const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(base, bindex, vl); +vint64m2x2_t test_vluxseg2ei64_v_i64m2x2(const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei64_v_i64m4x2(const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(base, bindex, vl); +vint64m4x2_t test_vluxseg2ei64_v_i64m4x2(const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2(const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2(const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2(const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2(const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2(const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2(const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2(const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2(const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2(const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2(const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2(const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2(const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2(const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2(const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2(const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2(const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2(const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2(const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2(const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2(const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2(const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2(const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2(const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2(const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2(const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2(const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2(const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2(const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2(const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2(const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(rs1, rs2, vl); } -vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_m(vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(mask, base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_m(vbool64_t vm, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(vm, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_m(vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(mask, base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_m(vbool32_t vm, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(vm, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_m(vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(mask, base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_m(vbool16_t vm, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(vm, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_m(vbool8_t mask, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(mask, base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_m(vbool8_t vm, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(vm, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_m(vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(mask, base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_m(vbool64_t vm, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(vm, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_m(vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(mask, base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_m(vbool32_t vm, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(vm, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_m(vbool16_t mask, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(mask, base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_m(vbool16_t vm, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(vm, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_m(vbool8_t mask, const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(mask, base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_m(vbool8_t vm, const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(vm, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_m(vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(mask, base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_m(vbool64_t vm, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(vm, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_m(vbool32_t mask, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(mask, base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_m(vbool32_t vm, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(vm, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_m(vbool16_t mask, const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(mask, base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_m(vbool16_t vm, const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(vm, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(mask, base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_m(vbool64_t vm, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(vm, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(mask, base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_m(vbool32_t vm, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(vm, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(mask, base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_m(vbool16_t vm, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(vm, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(mask, base, bindex, vl); +vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_m(vbool8_t vm, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(vm, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(mask, base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_m(vbool64_t vm, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(vm, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(mask, base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_m(vbool32_t vm, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(vm, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(mask, base, bindex, vl); +vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_m(vbool16_t vm, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(vm, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(mask, base, bindex, vl); +vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_m(vbool8_t vm, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(vm, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(mask, base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_m(vbool64_t vm, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(vm, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(mask, base, bindex, vl); +vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_m(vbool32_t vm, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(vm, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(mask, base, bindex, vl); +vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_m(vbool16_t vm, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(vm, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(mask, base, bindex, vl); +vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_m(vbool8_t vm, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(vm, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(mask, base, bindex, vl); +vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_m(vbool64_t vm, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(vm, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(mask, base, bindex, vl); +vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_m(vbool32_t vm, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(vm, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(mask, base, bindex, vl); +vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_m(vbool16_t vm, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(vm, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(mask, base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_m(vbool64_t vm, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(vm, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(mask, base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_m(vbool32_t vm, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(vm, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(mask, base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_m(vbool16_t vm, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(vm, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(mask, base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_m(vbool8_t vm, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(vm, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(mask, base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_m(vbool64_t vm, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(vm, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(mask, base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_m(vbool32_t vm, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(vm, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(mask, base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_m(vbool16_t vm, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(vm, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(mask, base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_m(vbool8_t vm, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(vm, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(mask, base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_m(vbool64_t vm, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(vm, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(mask, base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_m(vbool32_t vm, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(vm, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(mask, base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_m(vbool16_t vm, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(vm, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(mask, base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_m(vbool8_t vm, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(vm, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(mask, base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_m(vbool64_t vm, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(vm, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(mask, base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_m(vbool32_t vm, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(vm, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64(mask, base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_m(vbool16_t vm, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg2ei64\.[ivxfswum.]+\s+} 82 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vluxseg2ei8.c b/auto-generated/gnu-overloaded-tests/vluxseg2ei8.c index aadd81d3e..b5d6ecb5d 100644 --- a/auto-generated/gnu-overloaded-tests/vluxseg2ei8.c +++ b/auto-generated/gnu-overloaded-tests/vluxseg2ei8.c @@ -6,388 +6,388 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2(const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2(const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2(const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2(const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2(const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2(const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2(const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2(const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2(const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2(const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2(const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2(const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2(const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2(const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2(const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2(const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2(const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2(const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2(const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2(const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2(const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2(const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2(const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2(const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2(const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2(const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2(const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2(const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2(const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2(const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei8_v_i8m1x2(const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vint8m1x2_t test_vluxseg2ei8_v_i8m1x2(const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei8_v_i8m2x2(const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vint8m2x2_t test_vluxseg2ei8_v_i8m2x2(const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vint8m4x2_t test_vluxseg2ei8_v_i8m4x2(const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vint8m4x2_t test_vluxseg2ei8_v_i8m4x2(const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2(const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2(const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2(const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2(const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei8_v_i16m1x2(const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vint16m1x2_t test_vluxseg2ei8_v_i16m1x2(const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei8_v_i16m2x2(const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vint16m2x2_t test_vluxseg2ei8_v_i16m2x2(const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei8_v_i16m4x2(const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vint16m4x2_t test_vluxseg2ei8_v_i16m4x2(const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2(const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2(const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei8_v_i32m1x2(const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vint32m1x2_t test_vluxseg2ei8_v_i32m1x2(const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei8_v_i32m2x2(const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vint32m2x2_t test_vluxseg2ei8_v_i32m2x2(const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei8_v_i32m4x2(const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vint32m4x2_t test_vluxseg2ei8_v_i32m4x2(const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei8_v_i64m1x2(const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vint64m1x2_t test_vluxseg2ei8_v_i64m1x2(const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei8_v_i64m2x2(const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vint64m2x2_t test_vluxseg2ei8_v_i64m2x2(const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei8_v_i64m4x2(const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vint64m4x2_t test_vluxseg2ei8_v_i64m4x2(const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2(const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2(const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2(const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2(const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2(const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2(const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2(const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2(const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2(const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2(const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2(const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2(const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2(const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2(const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2(const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2(const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2(const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2(const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2(const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2(const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2(const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2(const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2(const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2(const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2(const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(rs1, rs2, vl); } -vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_m(vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_m(vbool64_t vm, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_m(vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_m(vbool32_t vm, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_m(vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_m(vbool16_t vm, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_m(vbool8_t mask, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_m(vbool8_t vm, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_m(vbool4_t mask, const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_m(vbool4_t vm, const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_m(vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_m(vbool64_t vm, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_m(vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_m(vbool32_t vm, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_m(vbool16_t mask, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_m(vbool16_t vm, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_m(vbool8_t mask, const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_m(vbool8_t vm, const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_m(vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_m(vbool64_t vm, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_m(vbool32_t mask, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_m(vbool32_t vm, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_m(vbool16_t mask, const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_m(vbool16_t vm, const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_m(vbool64_t vm, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_m(vbool32_t vm, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_m(vbool16_t vm, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_m(vbool8_t vm, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_m(vbool4_t vm, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } -vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_m(vbool2_t mask, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_m(vbool2_t vm, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_m(vbool64_t vm, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_m(vbool32_t vm, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_m(vbool16_t vm, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_m(vbool8_t vm, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_m(vbool4_t mask, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_m(vbool4_t vm, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_m(vbool64_t vm, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_m(vbool32_t vm, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_m(vbool16_t vm, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_m(vbool8_t mask, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_m(vbool8_t vm, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_m(vbool64_t vm, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_m(vbool32_t vm, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_m(vbool16_t mask, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_m(vbool16_t vm, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_m(vbool64_t vm, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_m(vbool32_t vm, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_m(vbool16_t vm, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_m(vbool8_t vm, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_m(vbool4_t vm, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } -vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_m(vbool2_t mask, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_m(vbool2_t vm, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_m(vbool64_t vm, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_m(vbool32_t vm, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_m(vbool16_t vm, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_m(vbool8_t vm, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_m(vbool4_t mask, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_m(vbool4_t vm, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_m(vbool64_t vm, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_m(vbool32_t vm, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_m(vbool16_t vm, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_m(vbool8_t mask, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_m(vbool8_t vm, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_m(vbool64_t vm, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_m(vbool32_t vm, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_m(vbool16_t mask, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8(mask, base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_m(vbool16_t vm, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg2ei8\.[ivxfswum.]+\s+} 96 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vluxseg3ei16.c b/auto-generated/gnu-overloaded-tests/vluxseg3ei16.c index 80cd2f38c..d393f92e3 100644 --- a/auto-generated/gnu-overloaded-tests/vluxseg3ei16.c +++ b/auto-generated/gnu-overloaded-tests/vluxseg3ei16.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3(const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3(const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3(const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3(const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3(const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3(const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3(const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3(const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3(const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3(const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3(const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3(const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3(const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3(const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3(const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3(const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3(const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3(const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3(const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3(const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3(const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3(const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3(const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3(const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei16_v_i8m1x3(const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(base, bindex, vl); +vint8m1x3_t test_vluxseg3ei16_v_i8m1x3(const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei16_v_i8m2x3(const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(base, bindex, vl); +vint8m2x3_t test_vluxseg3ei16_v_i8m2x3(const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3(const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3(const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3(const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3(const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei16_v_i16m1x3(const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(base, bindex, vl); +vint16m1x3_t test_vluxseg3ei16_v_i16m1x3(const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei16_v_i16m2x3(const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(base, bindex, vl); +vint16m2x3_t test_vluxseg3ei16_v_i16m2x3(const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3(const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3(const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei16_v_i32m1x3(const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(base, bindex, vl); +vint32m1x3_t test_vluxseg3ei16_v_i32m1x3(const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei16_v_i32m2x3(const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(base, bindex, vl); +vint32m2x3_t test_vluxseg3ei16_v_i32m2x3(const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei16_v_i64m1x3(const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(base, bindex, vl); +vint64m1x3_t test_vluxseg3ei16_v_i64m1x3(const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei16_v_i64m2x3(const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(base, bindex, vl); +vint64m2x3_t test_vluxseg3ei16_v_i64m2x3(const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3(const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3(const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3(const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3(const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3(const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3(const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3(const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(base, bindex, vl); +vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3(const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3(const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3(const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3(const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3(const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3(const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3(const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3(const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3(const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3(const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3(const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3(const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3(const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(rs1, rs2, vl); } -vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_m(vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(mask, base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_m(vbool64_t vm, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(vm, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_m(vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(mask, base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_m(vbool32_t vm, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(vm, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_m(vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(mask, base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_m(vbool16_t vm, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(vm, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_m(vbool8_t mask, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(mask, base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_m(vbool8_t vm, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(vm, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_m(vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(mask, base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_m(vbool64_t vm, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(vm, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_m(vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(mask, base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_m(vbool32_t vm, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(vm, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_m(vbool16_t mask, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(mask, base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_m(vbool16_t vm, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(vm, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_m(vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(mask, base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_m(vbool64_t vm, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(vm, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_m(vbool32_t mask, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(mask, base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_m(vbool32_t vm, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(vm, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(mask, base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_m(vbool64_t vm, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(vm, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(mask, base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_m(vbool32_t vm, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(vm, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(mask, base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_m(vbool16_t vm, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(vm, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(mask, base, bindex, vl); +vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_m(vbool8_t vm, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(vm, rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(mask, base, bindex, vl); +vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_m(vbool4_t vm, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(vm, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(mask, base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_m(vbool64_t vm, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(vm, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(mask, base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_m(vbool32_t vm, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(vm, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(mask, base, bindex, vl); +vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_m(vbool16_t vm, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(vm, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(mask, base, bindex, vl); +vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_m(vbool8_t vm, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(vm, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(mask, base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_m(vbool64_t vm, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(vm, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(mask, base, bindex, vl); +vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_m(vbool32_t vm, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(vm, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(mask, base, bindex, vl); +vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_m(vbool16_t vm, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(vm, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(mask, base, bindex, vl); +vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_m(vbool64_t vm, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(vm, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(mask, base, bindex, vl); +vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_m(vbool32_t vm, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(vm, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(mask, base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_m(vbool64_t vm, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(vm, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(mask, base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_m(vbool32_t vm, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(vm, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(mask, base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_m(vbool16_t vm, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(vm, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(mask, base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_m(vbool8_t vm, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(vm, rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(mask, base, bindex, vl); +vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_m(vbool4_t vm, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(vm, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(mask, base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_m(vbool64_t vm, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(vm, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(mask, base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_m(vbool32_t vm, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(vm, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(mask, base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_m(vbool16_t vm, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(vm, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(mask, base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_m(vbool8_t vm, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(vm, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(mask, base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_m(vbool64_t vm, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(vm, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(mask, base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_m(vbool32_t vm, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(vm, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(mask, base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_m(vbool16_t vm, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(vm, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(mask, base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_m(vbool64_t vm, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(vm, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16(mask, base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_m(vbool32_t vm, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg3ei16\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vluxseg3ei32.c b/auto-generated/gnu-overloaded-tests/vluxseg3ei32.c index b500ca344..da7fb5d52 100644 --- a/auto-generated/gnu-overloaded-tests/vluxseg3ei32.c +++ b/auto-generated/gnu-overloaded-tests/vluxseg3ei32.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3(const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3(const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3(const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3(const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3(const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3(const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3(const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3(const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3(const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3(const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3(const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3(const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3(const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3(const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3(const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3(const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3(const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3(const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3(const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3(const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3(const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3(const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3(const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3(const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei32_v_i8m1x3(const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(base, bindex, vl); +vint8m1x3_t test_vluxseg3ei32_v_i8m1x3(const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei32_v_i8m2x3(const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(base, bindex, vl); +vint8m2x3_t test_vluxseg3ei32_v_i8m2x3(const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3(const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3(const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3(const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3(const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei32_v_i16m1x3(const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(base, bindex, vl); +vint16m1x3_t test_vluxseg3ei32_v_i16m1x3(const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei32_v_i16m2x3(const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(base, bindex, vl); +vint16m2x3_t test_vluxseg3ei32_v_i16m2x3(const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3(const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3(const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei32_v_i32m1x3(const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(base, bindex, vl); +vint32m1x3_t test_vluxseg3ei32_v_i32m1x3(const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei32_v_i32m2x3(const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(base, bindex, vl); +vint32m2x3_t test_vluxseg3ei32_v_i32m2x3(const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei32_v_i64m1x3(const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(base, bindex, vl); +vint64m1x3_t test_vluxseg3ei32_v_i64m1x3(const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei32_v_i64m2x3(const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(base, bindex, vl); +vint64m2x3_t test_vluxseg3ei32_v_i64m2x3(const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3(const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3(const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3(const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3(const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3(const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3(const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3(const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3(const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(base, bindex, vl); +vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3(const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3(const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3(const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3(const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3(const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3(const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3(const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3(const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3(const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3(const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3(const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3(const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3(const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3(const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3(const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3(const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(rs1, rs2, vl); } -vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_m(vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(mask, base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_m(vbool64_t vm, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(vm, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_m(vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(mask, base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_m(vbool32_t vm, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(vm, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_m(vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(mask, base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_m(vbool16_t vm, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(vm, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_m(vbool8_t mask, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(mask, base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_m(vbool8_t vm, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(vm, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_m(vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(mask, base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_m(vbool64_t vm, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(vm, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_m(vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(mask, base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_m(vbool32_t vm, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(vm, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_m(vbool16_t mask, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(mask, base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_m(vbool16_t vm, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(vm, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_m(vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(mask, base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_m(vbool64_t vm, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(vm, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_m(vbool32_t mask, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(mask, base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_m(vbool32_t vm, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(vm, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(mask, base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_m(vbool64_t vm, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(vm, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(mask, base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_m(vbool32_t vm, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(vm, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(mask, base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_m(vbool16_t vm, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(vm, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(mask, base, bindex, vl); +vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_m(vbool8_t vm, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(vm, rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(mask, base, bindex, vl); +vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_m(vbool4_t vm, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(vm, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(mask, base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_m(vbool64_t vm, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(vm, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(mask, base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_m(vbool32_t vm, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(vm, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(mask, base, bindex, vl); +vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_m(vbool16_t vm, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(vm, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(mask, base, bindex, vl); +vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_m(vbool8_t vm, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(vm, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(mask, base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_m(vbool64_t vm, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(vm, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(mask, base, bindex, vl); +vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_m(vbool32_t vm, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(vm, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(mask, base, bindex, vl); +vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_m(vbool16_t vm, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(vm, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(mask, base, bindex, vl); +vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_m(vbool64_t vm, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(vm, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(mask, base, bindex, vl); +vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_m(vbool32_t vm, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(vm, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(mask, base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_m(vbool64_t vm, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(vm, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(mask, base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_m(vbool32_t vm, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(vm, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(mask, base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_m(vbool16_t vm, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(vm, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(mask, base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_m(vbool8_t vm, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(vm, rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(mask, base, bindex, vl); +vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_m(vbool4_t vm, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(vm, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(mask, base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_m(vbool64_t vm, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(vm, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(mask, base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_m(vbool32_t vm, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(vm, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(mask, base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_m(vbool16_t vm, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(vm, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(mask, base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_m(vbool8_t vm, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(vm, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(mask, base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_m(vbool64_t vm, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(vm, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(mask, base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_m(vbool32_t vm, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(vm, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(mask, base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_m(vbool16_t vm, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(vm, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(mask, base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_m(vbool64_t vm, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(vm, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32(mask, base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_m(vbool32_t vm, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg3ei32\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vluxseg3ei64.c b/auto-generated/gnu-overloaded-tests/vluxseg3ei64.c index 41b8d2211..55cc57d44 100644 --- a/auto-generated/gnu-overloaded-tests/vluxseg3ei64.c +++ b/auto-generated/gnu-overloaded-tests/vluxseg3ei64.c @@ -6,284 +6,284 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3(const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3(const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3(const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3(const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3(const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3(const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3(const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3(const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3(const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3(const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3(const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3(const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3(const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3(const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3(const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3(const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3(const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3(const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3(const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3(const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3(const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3(const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3(const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3(const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei64_v_i8m1x3(const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(base, bindex, vl); +vint8m1x3_t test_vluxseg3ei64_v_i8m1x3(const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3(const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3(const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3(const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3(const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei64_v_i16m1x3(const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(base, bindex, vl); +vint16m1x3_t test_vluxseg3ei64_v_i16m1x3(const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei64_v_i16m2x3(const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(base, bindex, vl); +vint16m2x3_t test_vluxseg3ei64_v_i16m2x3(const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3(const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3(const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei64_v_i32m1x3(const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(base, bindex, vl); +vint32m1x3_t test_vluxseg3ei64_v_i32m1x3(const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei64_v_i32m2x3(const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(base, bindex, vl); +vint32m2x3_t test_vluxseg3ei64_v_i32m2x3(const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei64_v_i64m1x3(const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(base, bindex, vl); +vint64m1x3_t test_vluxseg3ei64_v_i64m1x3(const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei64_v_i64m2x3(const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(base, bindex, vl); +vint64m2x3_t test_vluxseg3ei64_v_i64m2x3(const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3(const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3(const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3(const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3(const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3(const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3(const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3(const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3(const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3(const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3(const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3(const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3(const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3(const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3(const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3(const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3(const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3(const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3(const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3(const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3(const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3(const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3(const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3(const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3(const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3(const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3(const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(rs1, rs2, vl); } -vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_m(vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(mask, base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_m(vbool64_t vm, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(vm, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_m(vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(mask, base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_m(vbool32_t vm, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(vm, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_m(vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(mask, base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_m(vbool16_t vm, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(vm, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_m(vbool8_t mask, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(mask, base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_m(vbool8_t vm, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(vm, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_m(vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(mask, base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_m(vbool64_t vm, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(vm, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_m(vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(mask, base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_m(vbool32_t vm, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(vm, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_m(vbool16_t mask, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(mask, base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_m(vbool16_t vm, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(vm, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_m(vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(mask, base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_m(vbool64_t vm, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(vm, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_m(vbool32_t mask, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(mask, base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_m(vbool32_t vm, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(vm, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(mask, base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_m(vbool64_t vm, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(vm, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(mask, base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_m(vbool32_t vm, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(vm, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(mask, base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_m(vbool16_t vm, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(vm, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(mask, base, bindex, vl); +vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_m(vbool8_t vm, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(vm, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(mask, base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_m(vbool64_t vm, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(vm, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(mask, base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_m(vbool32_t vm, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(vm, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(mask, base, bindex, vl); +vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_m(vbool16_t vm, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(vm, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(mask, base, bindex, vl); +vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_m(vbool8_t vm, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(vm, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(mask, base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_m(vbool64_t vm, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(vm, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(mask, base, bindex, vl); +vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_m(vbool32_t vm, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(vm, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(mask, base, bindex, vl); +vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_m(vbool16_t vm, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(vm, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(mask, base, bindex, vl); +vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_m(vbool64_t vm, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(vm, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(mask, base, bindex, vl); +vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_m(vbool32_t vm, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(vm, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(mask, base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_m(vbool64_t vm, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(vm, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(mask, base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_m(vbool32_t vm, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(vm, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(mask, base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_m(vbool16_t vm, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(vm, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(mask, base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_m(vbool8_t vm, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(vm, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(mask, base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_m(vbool64_t vm, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(vm, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(mask, base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_m(vbool32_t vm, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(vm, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(mask, base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_m(vbool16_t vm, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(vm, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(mask, base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_m(vbool8_t vm, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(vm, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(mask, base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_m(vbool64_t vm, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(vm, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(mask, base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_m(vbool32_t vm, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(vm, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(mask, base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_m(vbool16_t vm, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(vm, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(mask, base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_m(vbool64_t vm, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(vm, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64(mask, base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_m(vbool32_t vm, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg3ei64\.[ivxfswum.]+\s+} 70 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vluxseg3ei8.c b/auto-generated/gnu-overloaded-tests/vluxseg3ei8.c index 7de861833..86d162f60 100644 --- a/auto-generated/gnu-overloaded-tests/vluxseg3ei8.c +++ b/auto-generated/gnu-overloaded-tests/vluxseg3ei8.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3(const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3(const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3(const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3(const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3(const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3(const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3(const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3(const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3(const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3(const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3(const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3(const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3(const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3(const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3(const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3(const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3(const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3(const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3(const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3(const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3(const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3(const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3(const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3(const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei8_v_i8m1x3(const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(base, bindex, vl); +vint8m1x3_t test_vluxseg3ei8_v_i8m1x3(const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei8_v_i8m2x3(const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(base, bindex, vl); +vint8m2x3_t test_vluxseg3ei8_v_i8m2x3(const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3(const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3(const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3(const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3(const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei8_v_i16m1x3(const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(base, bindex, vl); +vint16m1x3_t test_vluxseg3ei8_v_i16m1x3(const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei8_v_i16m2x3(const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(base, bindex, vl); +vint16m2x3_t test_vluxseg3ei8_v_i16m2x3(const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3(const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3(const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei8_v_i32m1x3(const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(base, bindex, vl); +vint32m1x3_t test_vluxseg3ei8_v_i32m1x3(const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei8_v_i32m2x3(const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(base, bindex, vl); +vint32m2x3_t test_vluxseg3ei8_v_i32m2x3(const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei8_v_i64m1x3(const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(base, bindex, vl); +vint64m1x3_t test_vluxseg3ei8_v_i64m1x3(const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei8_v_i64m2x3(const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(base, bindex, vl); +vint64m2x3_t test_vluxseg3ei8_v_i64m2x3(const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3(const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3(const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3(const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3(const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3(const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3(const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(base, bindex, vl); +vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3(const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3(const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3(const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3(const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3(const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3(const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3(const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3(const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3(const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3(const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3(const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(rs1, rs2, vl); } -vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_m(vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(mask, base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_m(vbool64_t vm, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(vm, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_m(vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(mask, base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_m(vbool32_t vm, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(vm, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_m(vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(mask, base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_m(vbool16_t vm, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(vm, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_m(vbool8_t mask, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(mask, base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_m(vbool8_t vm, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(vm, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_m(vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(mask, base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_m(vbool64_t vm, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(vm, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_m(vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(mask, base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_m(vbool32_t vm, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(vm, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_m(vbool16_t mask, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(mask, base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_m(vbool16_t vm, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(vm, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_m(vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(mask, base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_m(vbool64_t vm, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(vm, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_m(vbool32_t mask, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(mask, base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_m(vbool32_t vm, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(vm, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(mask, base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_m(vbool64_t vm, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(vm, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(mask, base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_m(vbool32_t vm, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(vm, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(mask, base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_m(vbool16_t vm, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(vm, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(mask, base, bindex, vl); +vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_m(vbool8_t vm, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(vm, rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(mask, base, bindex, vl); +vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_m(vbool4_t vm, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(vm, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(mask, base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_m(vbool64_t vm, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(vm, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(mask, base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_m(vbool32_t vm, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(vm, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(mask, base, bindex, vl); +vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_m(vbool16_t vm, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(vm, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(mask, base, bindex, vl); +vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_m(vbool8_t vm, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(vm, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(mask, base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_m(vbool64_t vm, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(vm, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(mask, base, bindex, vl); +vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_m(vbool32_t vm, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(vm, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(mask, base, bindex, vl); +vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_m(vbool16_t vm, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(vm, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(mask, base, bindex, vl); +vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_m(vbool64_t vm, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(vm, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(mask, base, bindex, vl); +vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_m(vbool32_t vm, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(vm, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(mask, base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_m(vbool64_t vm, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(vm, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(mask, base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_m(vbool32_t vm, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(vm, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(mask, base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_m(vbool16_t vm, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(vm, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(mask, base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_m(vbool8_t vm, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(vm, rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(mask, base, bindex, vl); +vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_m(vbool4_t vm, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(vm, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(mask, base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_m(vbool64_t vm, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(vm, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(mask, base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_m(vbool32_t vm, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(vm, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(mask, base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_m(vbool16_t vm, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(vm, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(mask, base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_m(vbool8_t vm, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(vm, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(mask, base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_m(vbool64_t vm, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(vm, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(mask, base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_m(vbool32_t vm, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(vm, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(mask, base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_m(vbool16_t vm, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(vm, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(mask, base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_m(vbool64_t vm, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(vm, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8(mask, base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_m(vbool32_t vm, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg3ei8\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vluxseg4ei16.c b/auto-generated/gnu-overloaded-tests/vluxseg4ei16.c index 00774180f..f8b6cb4c7 100644 --- a/auto-generated/gnu-overloaded-tests/vluxseg4ei16.c +++ b/auto-generated/gnu-overloaded-tests/vluxseg4ei16.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4(const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4(const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4(const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4(const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4(const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4(const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4(const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4(const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4(const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4(const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4(const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4(const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4(const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4(const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4(const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4(const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4(const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4(const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4(const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4(const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4(const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4(const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4(const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4(const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei16_v_i8m1x4(const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(base, bindex, vl); +vint8m1x4_t test_vluxseg4ei16_v_i8m1x4(const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei16_v_i8m2x4(const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(base, bindex, vl); +vint8m2x4_t test_vluxseg4ei16_v_i8m2x4(const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4(const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4(const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4(const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4(const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei16_v_i16m1x4(const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(base, bindex, vl); +vint16m1x4_t test_vluxseg4ei16_v_i16m1x4(const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei16_v_i16m2x4(const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(base, bindex, vl); +vint16m2x4_t test_vluxseg4ei16_v_i16m2x4(const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4(const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4(const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei16_v_i32m1x4(const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(base, bindex, vl); +vint32m1x4_t test_vluxseg4ei16_v_i32m1x4(const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei16_v_i32m2x4(const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(base, bindex, vl); +vint32m2x4_t test_vluxseg4ei16_v_i32m2x4(const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei16_v_i64m1x4(const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(base, bindex, vl); +vint64m1x4_t test_vluxseg4ei16_v_i64m1x4(const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei16_v_i64m2x4(const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(base, bindex, vl); +vint64m2x4_t test_vluxseg4ei16_v_i64m2x4(const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4(const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4(const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4(const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4(const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4(const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4(const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4(const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(base, bindex, vl); +vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4(const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4(const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4(const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4(const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4(const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4(const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4(const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4(const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4(const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4(const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4(const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4(const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4(const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(rs1, rs2, vl); } -vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_m(vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(mask, base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_m(vbool64_t vm, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(vm, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_m(vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(mask, base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_m(vbool32_t vm, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(vm, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_m(vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(mask, base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_m(vbool16_t vm, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(vm, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_m(vbool8_t mask, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(mask, base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_m(vbool8_t vm, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(vm, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_m(vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(mask, base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_m(vbool64_t vm, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(vm, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_m(vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(mask, base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_m(vbool32_t vm, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(vm, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_m(vbool16_t mask, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(mask, base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_m(vbool16_t vm, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(vm, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_m(vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(mask, base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_m(vbool64_t vm, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(vm, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_m(vbool32_t mask, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(mask, base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_m(vbool32_t vm, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(vm, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(mask, base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_m(vbool64_t vm, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(vm, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(mask, base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_m(vbool32_t vm, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(vm, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(mask, base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_m(vbool16_t vm, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(vm, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(mask, base, bindex, vl); +vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_m(vbool8_t vm, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(vm, rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(mask, base, bindex, vl); +vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_m(vbool4_t vm, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(vm, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(mask, base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_m(vbool64_t vm, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(vm, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(mask, base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_m(vbool32_t vm, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(vm, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(mask, base, bindex, vl); +vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_m(vbool16_t vm, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(vm, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(mask, base, bindex, vl); +vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_m(vbool8_t vm, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(vm, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(mask, base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_m(vbool64_t vm, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(vm, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(mask, base, bindex, vl); +vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_m(vbool32_t vm, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(vm, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(mask, base, bindex, vl); +vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_m(vbool16_t vm, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(vm, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(mask, base, bindex, vl); +vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_m(vbool64_t vm, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(vm, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(mask, base, bindex, vl); +vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_m(vbool32_t vm, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(vm, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(mask, base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_m(vbool64_t vm, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(vm, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(mask, base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_m(vbool32_t vm, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(vm, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(mask, base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_m(vbool16_t vm, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(vm, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(mask, base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_m(vbool8_t vm, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(vm, rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(mask, base, bindex, vl); +vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_m(vbool4_t vm, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(vm, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(mask, base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_m(vbool64_t vm, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(vm, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(mask, base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_m(vbool32_t vm, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(vm, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(mask, base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_m(vbool16_t vm, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(vm, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(mask, base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_m(vbool8_t vm, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(vm, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(mask, base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_m(vbool64_t vm, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(vm, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(mask, base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_m(vbool32_t vm, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(vm, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(mask, base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_m(vbool16_t vm, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(vm, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(mask, base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_m(vbool64_t vm, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(vm, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16(mask, base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_m(vbool32_t vm, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg4ei16\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vluxseg4ei32.c b/auto-generated/gnu-overloaded-tests/vluxseg4ei32.c index 0b9e51535..711c5fe3d 100644 --- a/auto-generated/gnu-overloaded-tests/vluxseg4ei32.c +++ b/auto-generated/gnu-overloaded-tests/vluxseg4ei32.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4(const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4(const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4(const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4(const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4(const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4(const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4(const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4(const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4(const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4(const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4(const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4(const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4(const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4(const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4(const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4(const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4(const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4(const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4(const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4(const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4(const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4(const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4(const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4(const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei32_v_i8m1x4(const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(base, bindex, vl); +vint8m1x4_t test_vluxseg4ei32_v_i8m1x4(const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei32_v_i8m2x4(const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(base, bindex, vl); +vint8m2x4_t test_vluxseg4ei32_v_i8m2x4(const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4(const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4(const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4(const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4(const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei32_v_i16m1x4(const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(base, bindex, vl); +vint16m1x4_t test_vluxseg4ei32_v_i16m1x4(const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei32_v_i16m2x4(const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(base, bindex, vl); +vint16m2x4_t test_vluxseg4ei32_v_i16m2x4(const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4(const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4(const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei32_v_i32m1x4(const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(base, bindex, vl); +vint32m1x4_t test_vluxseg4ei32_v_i32m1x4(const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei32_v_i32m2x4(const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(base, bindex, vl); +vint32m2x4_t test_vluxseg4ei32_v_i32m2x4(const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei32_v_i64m1x4(const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(base, bindex, vl); +vint64m1x4_t test_vluxseg4ei32_v_i64m1x4(const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei32_v_i64m2x4(const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(base, bindex, vl); +vint64m2x4_t test_vluxseg4ei32_v_i64m2x4(const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4(const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4(const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4(const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4(const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4(const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4(const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4(const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4(const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(base, bindex, vl); +vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4(const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4(const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4(const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4(const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4(const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4(const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4(const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4(const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4(const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4(const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4(const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4(const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4(const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4(const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4(const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4(const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(rs1, rs2, vl); } -vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_m(vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(mask, base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_m(vbool64_t vm, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(vm, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_m(vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(mask, base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_m(vbool32_t vm, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(vm, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_m(vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(mask, base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_m(vbool16_t vm, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(vm, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_m(vbool8_t mask, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(mask, base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_m(vbool8_t vm, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(vm, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_m(vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(mask, base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_m(vbool64_t vm, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(vm, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_m(vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(mask, base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_m(vbool32_t vm, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(vm, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_m(vbool16_t mask, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(mask, base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_m(vbool16_t vm, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(vm, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_m(vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(mask, base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_m(vbool64_t vm, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(vm, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_m(vbool32_t mask, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(mask, base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_m(vbool32_t vm, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(vm, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(mask, base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_m(vbool64_t vm, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(vm, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(mask, base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_m(vbool32_t vm, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(vm, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(mask, base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_m(vbool16_t vm, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(vm, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(mask, base, bindex, vl); +vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_m(vbool8_t vm, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(vm, rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(mask, base, bindex, vl); +vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_m(vbool4_t vm, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(vm, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(mask, base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_m(vbool64_t vm, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(vm, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(mask, base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_m(vbool32_t vm, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(vm, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(mask, base, bindex, vl); +vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_m(vbool16_t vm, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(vm, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(mask, base, bindex, vl); +vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_m(vbool8_t vm, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(vm, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(mask, base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_m(vbool64_t vm, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(vm, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(mask, base, bindex, vl); +vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_m(vbool32_t vm, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(vm, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(mask, base, bindex, vl); +vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_m(vbool16_t vm, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(vm, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(mask, base, bindex, vl); +vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_m(vbool64_t vm, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(vm, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(mask, base, bindex, vl); +vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_m(vbool32_t vm, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(vm, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(mask, base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_m(vbool64_t vm, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(vm, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(mask, base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_m(vbool32_t vm, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(vm, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(mask, base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_m(vbool16_t vm, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(vm, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(mask, base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_m(vbool8_t vm, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(vm, rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(mask, base, bindex, vl); +vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_m(vbool4_t vm, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(vm, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(mask, base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_m(vbool64_t vm, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(vm, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(mask, base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_m(vbool32_t vm, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(vm, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(mask, base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_m(vbool16_t vm, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(vm, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(mask, base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_m(vbool8_t vm, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(vm, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(mask, base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_m(vbool64_t vm, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(vm, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(mask, base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_m(vbool32_t vm, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(vm, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(mask, base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_m(vbool16_t vm, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(vm, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(mask, base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_m(vbool64_t vm, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(vm, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32(mask, base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_m(vbool32_t vm, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg4ei32\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vluxseg4ei64.c b/auto-generated/gnu-overloaded-tests/vluxseg4ei64.c index b8b6a2d47..5ae7120ec 100644 --- a/auto-generated/gnu-overloaded-tests/vluxseg4ei64.c +++ b/auto-generated/gnu-overloaded-tests/vluxseg4ei64.c @@ -6,284 +6,284 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4(const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4(const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4(const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4(const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4(const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4(const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4(const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4(const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4(const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4(const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4(const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4(const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4(const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4(const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4(const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4(const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4(const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4(const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4(const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4(const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4(const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4(const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4(const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4(const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei64_v_i8m1x4(const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(base, bindex, vl); +vint8m1x4_t test_vluxseg4ei64_v_i8m1x4(const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4(const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4(const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4(const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4(const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei64_v_i16m1x4(const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(base, bindex, vl); +vint16m1x4_t test_vluxseg4ei64_v_i16m1x4(const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei64_v_i16m2x4(const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(base, bindex, vl); +vint16m2x4_t test_vluxseg4ei64_v_i16m2x4(const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4(const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4(const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei64_v_i32m1x4(const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(base, bindex, vl); +vint32m1x4_t test_vluxseg4ei64_v_i32m1x4(const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei64_v_i32m2x4(const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(base, bindex, vl); +vint32m2x4_t test_vluxseg4ei64_v_i32m2x4(const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei64_v_i64m1x4(const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(base, bindex, vl); +vint64m1x4_t test_vluxseg4ei64_v_i64m1x4(const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei64_v_i64m2x4(const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(base, bindex, vl); +vint64m2x4_t test_vluxseg4ei64_v_i64m2x4(const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4(const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4(const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4(const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4(const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4(const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4(const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4(const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4(const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4(const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4(const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4(const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4(const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4(const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4(const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4(const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4(const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4(const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4(const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4(const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4(const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4(const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4(const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4(const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4(const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4(const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4(const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(rs1, rs2, vl); } -vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_m(vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(mask, base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_m(vbool64_t vm, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(vm, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_m(vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(mask, base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_m(vbool32_t vm, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(vm, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_m(vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(mask, base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_m(vbool16_t vm, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(vm, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_m(vbool8_t mask, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(mask, base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_m(vbool8_t vm, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(vm, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_m(vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(mask, base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_m(vbool64_t vm, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(vm, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_m(vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(mask, base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_m(vbool32_t vm, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(vm, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_m(vbool16_t mask, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(mask, base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_m(vbool16_t vm, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(vm, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_m(vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(mask, base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_m(vbool64_t vm, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(vm, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_m(vbool32_t mask, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(mask, base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_m(vbool32_t vm, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(vm, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(mask, base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_m(vbool64_t vm, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(vm, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(mask, base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_m(vbool32_t vm, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(vm, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(mask, base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_m(vbool16_t vm, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(vm, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(mask, base, bindex, vl); +vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_m(vbool8_t vm, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(vm, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(mask, base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_m(vbool64_t vm, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(vm, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(mask, base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_m(vbool32_t vm, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(vm, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(mask, base, bindex, vl); +vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_m(vbool16_t vm, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(vm, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(mask, base, bindex, vl); +vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_m(vbool8_t vm, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(vm, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(mask, base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_m(vbool64_t vm, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(vm, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(mask, base, bindex, vl); +vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_m(vbool32_t vm, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(vm, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(mask, base, bindex, vl); +vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_m(vbool16_t vm, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(vm, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(mask, base, bindex, vl); +vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_m(vbool64_t vm, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(vm, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(mask, base, bindex, vl); +vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_m(vbool32_t vm, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(vm, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(mask, base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_m(vbool64_t vm, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(vm, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(mask, base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_m(vbool32_t vm, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(vm, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(mask, base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_m(vbool16_t vm, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(vm, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(mask, base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_m(vbool8_t vm, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(vm, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(mask, base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_m(vbool64_t vm, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(vm, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(mask, base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_m(vbool32_t vm, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(vm, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(mask, base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_m(vbool16_t vm, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(vm, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(mask, base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_m(vbool8_t vm, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(vm, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(mask, base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_m(vbool64_t vm, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(vm, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(mask, base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_m(vbool32_t vm, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(vm, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(mask, base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_m(vbool16_t vm, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(vm, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(mask, base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_m(vbool64_t vm, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(vm, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64(mask, base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_m(vbool32_t vm, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg4ei64\.[ivxfswum.]+\s+} 70 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vluxseg4ei8.c b/auto-generated/gnu-overloaded-tests/vluxseg4ei8.c index 5f625f714..33cfe581f 100644 --- a/auto-generated/gnu-overloaded-tests/vluxseg4ei8.c +++ b/auto-generated/gnu-overloaded-tests/vluxseg4ei8.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4(const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4(const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4(const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4(const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4(const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4(const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4(const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4(const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4(const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4(const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4(const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4(const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4(const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4(const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4(const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4(const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4(const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4(const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4(const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4(const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4(const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4(const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4(const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4(const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei8_v_i8m1x4(const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(base, bindex, vl); +vint8m1x4_t test_vluxseg4ei8_v_i8m1x4(const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei8_v_i8m2x4(const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(base, bindex, vl); +vint8m2x4_t test_vluxseg4ei8_v_i8m2x4(const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4(const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4(const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4(const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4(const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei8_v_i16m1x4(const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(base, bindex, vl); +vint16m1x4_t test_vluxseg4ei8_v_i16m1x4(const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei8_v_i16m2x4(const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(base, bindex, vl); +vint16m2x4_t test_vluxseg4ei8_v_i16m2x4(const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4(const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4(const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei8_v_i32m1x4(const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(base, bindex, vl); +vint32m1x4_t test_vluxseg4ei8_v_i32m1x4(const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei8_v_i32m2x4(const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(base, bindex, vl); +vint32m2x4_t test_vluxseg4ei8_v_i32m2x4(const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei8_v_i64m1x4(const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(base, bindex, vl); +vint64m1x4_t test_vluxseg4ei8_v_i64m1x4(const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei8_v_i64m2x4(const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(base, bindex, vl); +vint64m2x4_t test_vluxseg4ei8_v_i64m2x4(const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4(const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4(const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4(const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4(const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4(const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4(const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(base, bindex, vl); +vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4(const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4(const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4(const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4(const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4(const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4(const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4(const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4(const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4(const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4(const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4(const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(rs1, rs2, vl); } -vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_m(vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(mask, base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_m(vbool64_t vm, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(vm, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_m(vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(mask, base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_m(vbool32_t vm, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(vm, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_m(vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(mask, base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_m(vbool16_t vm, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(vm, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_m(vbool8_t mask, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(mask, base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_m(vbool8_t vm, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(vm, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_m(vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(mask, base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_m(vbool64_t vm, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(vm, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_m(vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(mask, base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_m(vbool32_t vm, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(vm, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_m(vbool16_t mask, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(mask, base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_m(vbool16_t vm, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(vm, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_m(vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(mask, base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_m(vbool64_t vm, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(vm, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_m(vbool32_t mask, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(mask, base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_m(vbool32_t vm, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(vm, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(mask, base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_m(vbool64_t vm, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(vm, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(mask, base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_m(vbool32_t vm, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(vm, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(mask, base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_m(vbool16_t vm, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(vm, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(mask, base, bindex, vl); +vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_m(vbool8_t vm, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(vm, rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_m(vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(mask, base, bindex, vl); +vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_m(vbool4_t vm, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(vm, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(mask, base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_m(vbool64_t vm, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(vm, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(mask, base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_m(vbool32_t vm, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(vm, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(mask, base, bindex, vl); +vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_m(vbool16_t vm, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(vm, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_m(vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(mask, base, bindex, vl); +vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_m(vbool8_t vm, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(vm, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(mask, base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_m(vbool64_t vm, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(vm, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(mask, base, bindex, vl); +vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_m(vbool32_t vm, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(vm, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_m(vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(mask, base, bindex, vl); +vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_m(vbool16_t vm, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(vm, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(mask, base, bindex, vl); +vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_m(vbool64_t vm, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(vm, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_m(vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(mask, base, bindex, vl); +vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_m(vbool32_t vm, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(vm, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(mask, base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_m(vbool64_t vm, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(vm, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(mask, base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_m(vbool32_t vm, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(vm, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(mask, base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_m(vbool16_t vm, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(vm, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(mask, base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_m(vbool8_t vm, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(vm, rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_m(vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(mask, base, bindex, vl); +vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_m(vbool4_t vm, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(vm, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(mask, base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_m(vbool64_t vm, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(vm, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(mask, base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_m(vbool32_t vm, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(vm, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(mask, base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_m(vbool16_t vm, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(vm, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_m(vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(mask, base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_m(vbool8_t vm, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(vm, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(mask, base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_m(vbool64_t vm, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(vm, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(mask, base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_m(vbool32_t vm, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(vm, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_m(vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(mask, base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_m(vbool16_t vm, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(vm, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(mask, base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_m(vbool64_t vm, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(vm, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_m(vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8(mask, base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_m(vbool32_t vm, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg4ei8\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vluxseg5ei16.c b/auto-generated/gnu-overloaded-tests/vluxseg5ei16.c index 1a33751bb..651994af8 100644 --- a/auto-generated/gnu-overloaded-tests/vluxseg5ei16.c +++ b/auto-generated/gnu-overloaded-tests/vluxseg5ei16.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5(const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5(const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5(const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5(const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5(const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5(const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5(const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5(const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5(const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5(const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5(const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5(const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5(const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5(const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5(const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5(const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5(const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5(const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei16_v_i8m1x5(const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(base, bindex, vl); +vint8m1x5_t test_vluxseg5ei16_v_i8m1x5(const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5(const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5(const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5(const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5(const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei16_v_i16m1x5(const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(base, bindex, vl); +vint16m1x5_t test_vluxseg5ei16_v_i16m1x5(const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5(const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5(const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei16_v_i32m1x5(const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(base, bindex, vl); +vint32m1x5_t test_vluxseg5ei16_v_i32m1x5(const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei16_v_i64m1x5(const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(base, bindex, vl); +vint64m1x5_t test_vluxseg5ei16_v_i64m1x5(const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5(const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5(const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5(const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5(const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5(const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5(const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5(const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5(const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5(const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5(const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5(const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5(const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5(const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(rs1, rs2, vl); } -vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_m(vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(mask, base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_m(vbool64_t vm, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(vm, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_m(vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(mask, base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_m(vbool32_t vm, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(vm, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_m(vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(mask, base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_m(vbool16_t vm, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(vm, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_m(vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(mask, base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_m(vbool64_t vm, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(vm, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_m(vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(mask, base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_m(vbool32_t vm, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(vm, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_m(vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(mask, base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_m(vbool64_t vm, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(vm, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(mask, base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_m(vbool64_t vm, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(vm, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(mask, base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_m(vbool32_t vm, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(vm, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(mask, base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_m(vbool16_t vm, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(vm, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(mask, base, bindex, vl); +vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_m(vbool8_t vm, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(vm, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(mask, base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_m(vbool64_t vm, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(vm, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(mask, base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_m(vbool32_t vm, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(vm, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(mask, base, bindex, vl); +vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_m(vbool16_t vm, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(vm, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(mask, base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_m(vbool64_t vm, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(vm, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(mask, base, bindex, vl); +vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_m(vbool32_t vm, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(vm, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(mask, base, bindex, vl); +vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_m(vbool64_t vm, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(vm, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(mask, base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_m(vbool64_t vm, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(vm, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(mask, base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_m(vbool32_t vm, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(vm, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(mask, base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_m(vbool16_t vm, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(vm, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(mask, base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_m(vbool8_t vm, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(vm, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(mask, base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_m(vbool64_t vm, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(vm, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(mask, base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_m(vbool32_t vm, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(vm, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(mask, base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_m(vbool16_t vm, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(vm, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(mask, base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_m(vbool64_t vm, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(vm, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(mask, base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_m(vbool32_t vm, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(vm, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16(mask, base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_m(vbool64_t vm, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg5ei16\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vluxseg5ei32.c b/auto-generated/gnu-overloaded-tests/vluxseg5ei32.c index 225b14015..996943a68 100644 --- a/auto-generated/gnu-overloaded-tests/vluxseg5ei32.c +++ b/auto-generated/gnu-overloaded-tests/vluxseg5ei32.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5(const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5(const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5(const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5(const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5(const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5(const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5(const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5(const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5(const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5(const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5(const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5(const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5(const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5(const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5(const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5(const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5(const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5(const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei32_v_i8m1x5(const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(base, bindex, vl); +vint8m1x5_t test_vluxseg5ei32_v_i8m1x5(const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5(const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5(const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5(const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5(const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei32_v_i16m1x5(const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(base, bindex, vl); +vint16m1x5_t test_vluxseg5ei32_v_i16m1x5(const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5(const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5(const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei32_v_i32m1x5(const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(base, bindex, vl); +vint32m1x5_t test_vluxseg5ei32_v_i32m1x5(const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei32_v_i64m1x5(const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(base, bindex, vl); +vint64m1x5_t test_vluxseg5ei32_v_i64m1x5(const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5(const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5(const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5(const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5(const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5(const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5(const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5(const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5(const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5(const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5(const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5(const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5(const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5(const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5(const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5(const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5(const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(rs1, rs2, vl); } -vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_m(vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(mask, base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_m(vbool64_t vm, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(vm, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_m(vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(mask, base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_m(vbool32_t vm, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(vm, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_m(vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(mask, base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_m(vbool16_t vm, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(vm, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_m(vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(mask, base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_m(vbool64_t vm, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(vm, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_m(vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(mask, base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_m(vbool32_t vm, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(vm, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_m(vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(mask, base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_m(vbool64_t vm, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(vm, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(mask, base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_m(vbool64_t vm, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(vm, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(mask, base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_m(vbool32_t vm, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(vm, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(mask, base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_m(vbool16_t vm, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(vm, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(mask, base, bindex, vl); +vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_m(vbool8_t vm, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(vm, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(mask, base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_m(vbool64_t vm, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(vm, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(mask, base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_m(vbool32_t vm, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(vm, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(mask, base, bindex, vl); +vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_m(vbool16_t vm, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(vm, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(mask, base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_m(vbool64_t vm, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(vm, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(mask, base, bindex, vl); +vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_m(vbool32_t vm, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(vm, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(mask, base, bindex, vl); +vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_m(vbool64_t vm, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(vm, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(mask, base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_m(vbool64_t vm, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(vm, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(mask, base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_m(vbool32_t vm, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(vm, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(mask, base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_m(vbool16_t vm, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(vm, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(mask, base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_m(vbool8_t vm, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(vm, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(mask, base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_m(vbool64_t vm, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(vm, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(mask, base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_m(vbool32_t vm, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(vm, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(mask, base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_m(vbool16_t vm, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(vm, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(mask, base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_m(vbool64_t vm, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(vm, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(mask, base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_m(vbool32_t vm, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(vm, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32(mask, base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_m(vbool64_t vm, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg5ei32\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vluxseg5ei64.c b/auto-generated/gnu-overloaded-tests/vluxseg5ei64.c index 65b37f741..eeea7e664 100644 --- a/auto-generated/gnu-overloaded-tests/vluxseg5ei64.c +++ b/auto-generated/gnu-overloaded-tests/vluxseg5ei64.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5(const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5(const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5(const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5(const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5(const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5(const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5(const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5(const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5(const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5(const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5(const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5(const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5(const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5(const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5(const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5(const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5(const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5(const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei64_v_i8m1x5(const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(base, bindex, vl); +vint8m1x5_t test_vluxseg5ei64_v_i8m1x5(const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5(const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5(const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5(const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5(const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei64_v_i16m1x5(const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(base, bindex, vl); +vint16m1x5_t test_vluxseg5ei64_v_i16m1x5(const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5(const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5(const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei64_v_i32m1x5(const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(base, bindex, vl); +vint32m1x5_t test_vluxseg5ei64_v_i32m1x5(const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei64_v_i64m1x5(const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(base, bindex, vl); +vint64m1x5_t test_vluxseg5ei64_v_i64m1x5(const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5(const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5(const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5(const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5(const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5(const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5(const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5(const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5(const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5(const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5(const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5(const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5(const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5(const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5(const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5(const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5(const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5(const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5(const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5(const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5(const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(rs1, rs2, vl); } -vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_m(vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(mask, base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_m(vbool64_t vm, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(vm, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_m(vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(mask, base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_m(vbool32_t vm, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(vm, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_m(vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(mask, base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_m(vbool16_t vm, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(vm, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_m(vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(mask, base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_m(vbool64_t vm, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(vm, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_m(vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(mask, base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_m(vbool32_t vm, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(vm, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_m(vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(mask, base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_m(vbool64_t vm, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(vm, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(mask, base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_m(vbool64_t vm, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(vm, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(mask, base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_m(vbool32_t vm, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(vm, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(mask, base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_m(vbool16_t vm, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(vm, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(mask, base, bindex, vl); +vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_m(vbool8_t vm, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(vm, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(mask, base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_m(vbool64_t vm, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(vm, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(mask, base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_m(vbool32_t vm, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(vm, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(mask, base, bindex, vl); +vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_m(vbool16_t vm, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(vm, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(mask, base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_m(vbool64_t vm, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(vm, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(mask, base, bindex, vl); +vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_m(vbool32_t vm, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(vm, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(mask, base, bindex, vl); +vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_m(vbool64_t vm, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(vm, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(mask, base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_m(vbool64_t vm, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(vm, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(mask, base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_m(vbool32_t vm, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(vm, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(mask, base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_m(vbool16_t vm, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(vm, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(mask, base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_m(vbool8_t vm, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(vm, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(mask, base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_m(vbool64_t vm, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(vm, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(mask, base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_m(vbool32_t vm, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(vm, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(mask, base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_m(vbool16_t vm, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(vm, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(mask, base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_m(vbool64_t vm, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(vm, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(mask, base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_m(vbool32_t vm, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(vm, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64(mask, base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_m(vbool64_t vm, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg5ei64\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vluxseg5ei8.c b/auto-generated/gnu-overloaded-tests/vluxseg5ei8.c index 6b15584c5..4b3f890e8 100644 --- a/auto-generated/gnu-overloaded-tests/vluxseg5ei8.c +++ b/auto-generated/gnu-overloaded-tests/vluxseg5ei8.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5(const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5(const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5(const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5(const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5(const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5(const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5(const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5(const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5(const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5(const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5(const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5(const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5(const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5(const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5(const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5(const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5(const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5(const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei8_v_i8m1x5(const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(base, bindex, vl); +vint8m1x5_t test_vluxseg5ei8_v_i8m1x5(const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5(const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5(const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5(const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5(const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei8_v_i16m1x5(const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(base, bindex, vl); +vint16m1x5_t test_vluxseg5ei8_v_i16m1x5(const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5(const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5(const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei8_v_i32m1x5(const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(base, bindex, vl); +vint32m1x5_t test_vluxseg5ei8_v_i32m1x5(const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei8_v_i64m1x5(const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(base, bindex, vl); +vint64m1x5_t test_vluxseg5ei8_v_i64m1x5(const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5(const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5(const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5(const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5(const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5(const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5(const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5(const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5(const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5(const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5(const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5(const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(rs1, rs2, vl); } -vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_m(vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(mask, base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_m(vbool64_t vm, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(vm, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_m(vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(mask, base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_m(vbool32_t vm, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(vm, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_m(vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(mask, base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_m(vbool16_t vm, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(vm, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_m(vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(mask, base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_m(vbool64_t vm, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(vm, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_m(vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(mask, base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_m(vbool32_t vm, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(vm, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_m(vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(mask, base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_m(vbool64_t vm, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(vm, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(mask, base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_m(vbool64_t vm, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(vm, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(mask, base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_m(vbool32_t vm, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(vm, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(mask, base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_m(vbool16_t vm, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(vm, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(mask, base, bindex, vl); +vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_m(vbool8_t vm, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(vm, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(mask, base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_m(vbool64_t vm, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(vm, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(mask, base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_m(vbool32_t vm, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(vm, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(mask, base, bindex, vl); +vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_m(vbool16_t vm, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(vm, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(mask, base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_m(vbool64_t vm, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(vm, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(mask, base, bindex, vl); +vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_m(vbool32_t vm, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(vm, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(mask, base, bindex, vl); +vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_m(vbool64_t vm, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(vm, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(mask, base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_m(vbool64_t vm, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(vm, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(mask, base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_m(vbool32_t vm, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(vm, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(mask, base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_m(vbool16_t vm, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(vm, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(mask, base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_m(vbool8_t vm, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(vm, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(mask, base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_m(vbool64_t vm, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(vm, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(mask, base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_m(vbool32_t vm, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(vm, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(mask, base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_m(vbool16_t vm, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(vm, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(mask, base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_m(vbool64_t vm, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(vm, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(mask, base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_m(vbool32_t vm, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(vm, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8(mask, base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_m(vbool64_t vm, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg5ei8\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vluxseg6ei16.c b/auto-generated/gnu-overloaded-tests/vluxseg6ei16.c index e840464b7..1b24e67fc 100644 --- a/auto-generated/gnu-overloaded-tests/vluxseg6ei16.c +++ b/auto-generated/gnu-overloaded-tests/vluxseg6ei16.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6(const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6(const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6(const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6(const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6(const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6(const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6(const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6(const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6(const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6(const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6(const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6(const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6(const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6(const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6(const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6(const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6(const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6(const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei16_v_i8m1x6(const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(base, bindex, vl); +vint8m1x6_t test_vluxseg6ei16_v_i8m1x6(const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6(const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6(const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6(const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6(const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei16_v_i16m1x6(const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(base, bindex, vl); +vint16m1x6_t test_vluxseg6ei16_v_i16m1x6(const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6(const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6(const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei16_v_i32m1x6(const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(base, bindex, vl); +vint32m1x6_t test_vluxseg6ei16_v_i32m1x6(const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei16_v_i64m1x6(const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(base, bindex, vl); +vint64m1x6_t test_vluxseg6ei16_v_i64m1x6(const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6(const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6(const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6(const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6(const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6(const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6(const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6(const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6(const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6(const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6(const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6(const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6(const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6(const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(rs1, rs2, vl); } -vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_m(vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(mask, base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_m(vbool64_t vm, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(vm, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_m(vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(mask, base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_m(vbool32_t vm, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(vm, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_m(vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(mask, base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_m(vbool16_t vm, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(vm, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_m(vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(mask, base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_m(vbool64_t vm, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(vm, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_m(vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(mask, base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_m(vbool32_t vm, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(vm, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_m(vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(mask, base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_m(vbool64_t vm, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(vm, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(mask, base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_m(vbool64_t vm, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(vm, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(mask, base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_m(vbool32_t vm, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(vm, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(mask, base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_m(vbool16_t vm, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(vm, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(mask, base, bindex, vl); +vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_m(vbool8_t vm, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(vm, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(mask, base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_m(vbool64_t vm, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(vm, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(mask, base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_m(vbool32_t vm, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(vm, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(mask, base, bindex, vl); +vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_m(vbool16_t vm, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(vm, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(mask, base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_m(vbool64_t vm, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(vm, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(mask, base, bindex, vl); +vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_m(vbool32_t vm, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(vm, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(mask, base, bindex, vl); +vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_m(vbool64_t vm, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(vm, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(mask, base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_m(vbool64_t vm, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(vm, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(mask, base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_m(vbool32_t vm, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(vm, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(mask, base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_m(vbool16_t vm, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(vm, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(mask, base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_m(vbool8_t vm, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(vm, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(mask, base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_m(vbool64_t vm, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(vm, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(mask, base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_m(vbool32_t vm, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(vm, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(mask, base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_m(vbool16_t vm, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(vm, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(mask, base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_m(vbool64_t vm, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(vm, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(mask, base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_m(vbool32_t vm, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(vm, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16(mask, base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_m(vbool64_t vm, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg6ei16\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vluxseg6ei32.c b/auto-generated/gnu-overloaded-tests/vluxseg6ei32.c index d8c80a453..2bf32887c 100644 --- a/auto-generated/gnu-overloaded-tests/vluxseg6ei32.c +++ b/auto-generated/gnu-overloaded-tests/vluxseg6ei32.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6(const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6(const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6(const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6(const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6(const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6(const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6(const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6(const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6(const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6(const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6(const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6(const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6(const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6(const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6(const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6(const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6(const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6(const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei32_v_i8m1x6(const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(base, bindex, vl); +vint8m1x6_t test_vluxseg6ei32_v_i8m1x6(const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6(const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6(const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6(const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6(const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei32_v_i16m1x6(const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(base, bindex, vl); +vint16m1x6_t test_vluxseg6ei32_v_i16m1x6(const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6(const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6(const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei32_v_i32m1x6(const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(base, bindex, vl); +vint32m1x6_t test_vluxseg6ei32_v_i32m1x6(const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei32_v_i64m1x6(const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(base, bindex, vl); +vint64m1x6_t test_vluxseg6ei32_v_i64m1x6(const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6(const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6(const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6(const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6(const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6(const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6(const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6(const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6(const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6(const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6(const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6(const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6(const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6(const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6(const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6(const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6(const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(rs1, rs2, vl); } -vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_m(vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(mask, base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_m(vbool64_t vm, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(vm, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_m(vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(mask, base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_m(vbool32_t vm, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(vm, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_m(vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(mask, base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_m(vbool16_t vm, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(vm, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_m(vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(mask, base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_m(vbool64_t vm, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(vm, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_m(vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(mask, base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_m(vbool32_t vm, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(vm, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_m(vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(mask, base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_m(vbool64_t vm, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(vm, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(mask, base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_m(vbool64_t vm, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(vm, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(mask, base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_m(vbool32_t vm, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(vm, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(mask, base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_m(vbool16_t vm, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(vm, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(mask, base, bindex, vl); +vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_m(vbool8_t vm, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(vm, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(mask, base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_m(vbool64_t vm, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(vm, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(mask, base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_m(vbool32_t vm, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(vm, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(mask, base, bindex, vl); +vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_m(vbool16_t vm, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(vm, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(mask, base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_m(vbool64_t vm, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(vm, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(mask, base, bindex, vl); +vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_m(vbool32_t vm, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(vm, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(mask, base, bindex, vl); +vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_m(vbool64_t vm, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(vm, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(mask, base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_m(vbool64_t vm, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(vm, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(mask, base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_m(vbool32_t vm, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(vm, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(mask, base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_m(vbool16_t vm, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(vm, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(mask, base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_m(vbool8_t vm, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(vm, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(mask, base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_m(vbool64_t vm, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(vm, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(mask, base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_m(vbool32_t vm, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(vm, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(mask, base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_m(vbool16_t vm, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(vm, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(mask, base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_m(vbool64_t vm, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(vm, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(mask, base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_m(vbool32_t vm, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(vm, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32(mask, base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_m(vbool64_t vm, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg6ei32\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vluxseg6ei64.c b/auto-generated/gnu-overloaded-tests/vluxseg6ei64.c index f04466b3d..8c85ae73b 100644 --- a/auto-generated/gnu-overloaded-tests/vluxseg6ei64.c +++ b/auto-generated/gnu-overloaded-tests/vluxseg6ei64.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6(const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6(const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6(const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6(const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6(const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6(const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6(const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6(const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6(const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6(const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6(const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6(const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6(const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6(const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6(const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6(const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6(const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6(const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei64_v_i8m1x6(const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(base, bindex, vl); +vint8m1x6_t test_vluxseg6ei64_v_i8m1x6(const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6(const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6(const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6(const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6(const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei64_v_i16m1x6(const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(base, bindex, vl); +vint16m1x6_t test_vluxseg6ei64_v_i16m1x6(const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6(const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6(const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei64_v_i32m1x6(const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(base, bindex, vl); +vint32m1x6_t test_vluxseg6ei64_v_i32m1x6(const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei64_v_i64m1x6(const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(base, bindex, vl); +vint64m1x6_t test_vluxseg6ei64_v_i64m1x6(const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6(const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6(const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6(const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6(const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6(const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6(const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6(const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6(const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6(const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6(const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6(const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6(const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6(const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6(const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6(const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6(const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6(const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6(const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6(const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6(const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(rs1, rs2, vl); } -vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_m(vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(mask, base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_m(vbool64_t vm, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(vm, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_m(vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(mask, base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_m(vbool32_t vm, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(vm, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_m(vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(mask, base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_m(vbool16_t vm, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(vm, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_m(vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(mask, base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_m(vbool64_t vm, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(vm, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_m(vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(mask, base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_m(vbool32_t vm, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(vm, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_m(vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(mask, base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_m(vbool64_t vm, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(vm, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(mask, base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_m(vbool64_t vm, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(vm, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(mask, base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_m(vbool32_t vm, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(vm, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(mask, base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_m(vbool16_t vm, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(vm, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(mask, base, bindex, vl); +vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_m(vbool8_t vm, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(vm, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(mask, base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_m(vbool64_t vm, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(vm, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(mask, base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_m(vbool32_t vm, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(vm, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(mask, base, bindex, vl); +vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_m(vbool16_t vm, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(vm, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(mask, base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_m(vbool64_t vm, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(vm, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(mask, base, bindex, vl); +vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_m(vbool32_t vm, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(vm, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(mask, base, bindex, vl); +vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_m(vbool64_t vm, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(vm, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(mask, base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_m(vbool64_t vm, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(vm, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(mask, base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_m(vbool32_t vm, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(vm, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(mask, base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_m(vbool16_t vm, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(vm, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(mask, base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_m(vbool8_t vm, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(vm, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(mask, base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_m(vbool64_t vm, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(vm, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(mask, base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_m(vbool32_t vm, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(vm, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(mask, base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_m(vbool16_t vm, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(vm, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(mask, base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_m(vbool64_t vm, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(vm, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(mask, base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_m(vbool32_t vm, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(vm, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64(mask, base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_m(vbool64_t vm, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg6ei64\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vluxseg6ei8.c b/auto-generated/gnu-overloaded-tests/vluxseg6ei8.c index e19b0463a..ff4f4b2fd 100644 --- a/auto-generated/gnu-overloaded-tests/vluxseg6ei8.c +++ b/auto-generated/gnu-overloaded-tests/vluxseg6ei8.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6(const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6(const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6(const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6(const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6(const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6(const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6(const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6(const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6(const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6(const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6(const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6(const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6(const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6(const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6(const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6(const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6(const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6(const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei8_v_i8m1x6(const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(base, bindex, vl); +vint8m1x6_t test_vluxseg6ei8_v_i8m1x6(const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6(const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6(const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6(const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6(const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei8_v_i16m1x6(const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(base, bindex, vl); +vint16m1x6_t test_vluxseg6ei8_v_i16m1x6(const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6(const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6(const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei8_v_i32m1x6(const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(base, bindex, vl); +vint32m1x6_t test_vluxseg6ei8_v_i32m1x6(const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei8_v_i64m1x6(const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(base, bindex, vl); +vint64m1x6_t test_vluxseg6ei8_v_i64m1x6(const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6(const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6(const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6(const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6(const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6(const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6(const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6(const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6(const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6(const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6(const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6(const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(rs1, rs2, vl); } -vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_m(vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(mask, base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_m(vbool64_t vm, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(vm, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_m(vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(mask, base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_m(vbool32_t vm, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(vm, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_m(vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(mask, base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_m(vbool16_t vm, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(vm, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_m(vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(mask, base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_m(vbool64_t vm, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(vm, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_m(vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(mask, base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_m(vbool32_t vm, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(vm, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_m(vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(mask, base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_m(vbool64_t vm, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(vm, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(mask, base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_m(vbool64_t vm, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(vm, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(mask, base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_m(vbool32_t vm, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(vm, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(mask, base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_m(vbool16_t vm, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(vm, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(mask, base, bindex, vl); +vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_m(vbool8_t vm, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(vm, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(mask, base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_m(vbool64_t vm, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(vm, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(mask, base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_m(vbool32_t vm, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(vm, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(mask, base, bindex, vl); +vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_m(vbool16_t vm, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(vm, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(mask, base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_m(vbool64_t vm, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(vm, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(mask, base, bindex, vl); +vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_m(vbool32_t vm, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(vm, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(mask, base, bindex, vl); +vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_m(vbool64_t vm, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(vm, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(mask, base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_m(vbool64_t vm, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(vm, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(mask, base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_m(vbool32_t vm, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(vm, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(mask, base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_m(vbool16_t vm, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(vm, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(mask, base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_m(vbool8_t vm, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(vm, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(mask, base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_m(vbool64_t vm, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(vm, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(mask, base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_m(vbool32_t vm, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(vm, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(mask, base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_m(vbool16_t vm, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(vm, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(mask, base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_m(vbool64_t vm, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(vm, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(mask, base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_m(vbool32_t vm, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(vm, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8(mask, base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_m(vbool64_t vm, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg6ei8\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vluxseg7ei16.c b/auto-generated/gnu-overloaded-tests/vluxseg7ei16.c index b39461c7e..2b40d7e22 100644 --- a/auto-generated/gnu-overloaded-tests/vluxseg7ei16.c +++ b/auto-generated/gnu-overloaded-tests/vluxseg7ei16.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7(const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7(const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7(const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7(const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7(const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7(const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7(const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7(const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7(const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7(const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7(const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7(const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7(const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7(const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7(const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7(const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7(const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7(const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei16_v_i8m1x7(const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(base, bindex, vl); +vint8m1x7_t test_vluxseg7ei16_v_i8m1x7(const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7(const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7(const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7(const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7(const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei16_v_i16m1x7(const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(base, bindex, vl); +vint16m1x7_t test_vluxseg7ei16_v_i16m1x7(const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7(const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7(const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei16_v_i32m1x7(const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(base, bindex, vl); +vint32m1x7_t test_vluxseg7ei16_v_i32m1x7(const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei16_v_i64m1x7(const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(base, bindex, vl); +vint64m1x7_t test_vluxseg7ei16_v_i64m1x7(const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7(const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7(const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7(const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7(const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7(const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7(const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7(const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7(const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7(const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7(const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7(const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7(const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7(const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(rs1, rs2, vl); } -vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_m(vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(mask, base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_m(vbool64_t vm, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(vm, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_m(vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(mask, base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_m(vbool32_t vm, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(vm, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_m(vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(mask, base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_m(vbool16_t vm, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(vm, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_m(vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(mask, base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_m(vbool64_t vm, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(vm, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_m(vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(mask, base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_m(vbool32_t vm, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(vm, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_m(vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(mask, base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_m(vbool64_t vm, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(vm, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(mask, base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_m(vbool64_t vm, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(vm, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(mask, base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_m(vbool32_t vm, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(vm, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(mask, base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_m(vbool16_t vm, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(vm, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(mask, base, bindex, vl); +vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_m(vbool8_t vm, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(vm, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(mask, base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_m(vbool64_t vm, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(vm, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(mask, base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_m(vbool32_t vm, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(vm, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(mask, base, bindex, vl); +vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_m(vbool16_t vm, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(vm, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(mask, base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_m(vbool64_t vm, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(vm, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(mask, base, bindex, vl); +vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_m(vbool32_t vm, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(vm, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(mask, base, bindex, vl); +vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_m(vbool64_t vm, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(vm, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(mask, base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_m(vbool64_t vm, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(vm, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(mask, base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_m(vbool32_t vm, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(vm, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(mask, base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_m(vbool16_t vm, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(vm, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(mask, base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_m(vbool8_t vm, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(vm, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(mask, base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_m(vbool64_t vm, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(vm, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(mask, base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_m(vbool32_t vm, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(vm, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(mask, base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_m(vbool16_t vm, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(vm, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(mask, base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_m(vbool64_t vm, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(vm, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(mask, base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_m(vbool32_t vm, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(vm, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16(mask, base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_m(vbool64_t vm, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg7ei16\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vluxseg7ei32.c b/auto-generated/gnu-overloaded-tests/vluxseg7ei32.c index a2d118ba4..0059fa167 100644 --- a/auto-generated/gnu-overloaded-tests/vluxseg7ei32.c +++ b/auto-generated/gnu-overloaded-tests/vluxseg7ei32.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7(const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7(const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7(const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7(const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7(const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7(const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7(const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7(const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7(const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7(const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7(const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7(const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7(const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7(const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7(const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7(const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7(const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7(const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei32_v_i8m1x7(const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(base, bindex, vl); +vint8m1x7_t test_vluxseg7ei32_v_i8m1x7(const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7(const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7(const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7(const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7(const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei32_v_i16m1x7(const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(base, bindex, vl); +vint16m1x7_t test_vluxseg7ei32_v_i16m1x7(const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7(const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7(const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei32_v_i32m1x7(const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(base, bindex, vl); +vint32m1x7_t test_vluxseg7ei32_v_i32m1x7(const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei32_v_i64m1x7(const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(base, bindex, vl); +vint64m1x7_t test_vluxseg7ei32_v_i64m1x7(const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7(const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7(const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7(const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7(const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7(const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7(const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7(const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7(const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7(const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7(const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7(const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7(const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7(const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7(const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7(const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7(const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(rs1, rs2, vl); } -vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_m(vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(mask, base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_m(vbool64_t vm, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(vm, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_m(vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(mask, base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_m(vbool32_t vm, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(vm, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_m(vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(mask, base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_m(vbool16_t vm, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(vm, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_m(vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(mask, base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_m(vbool64_t vm, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(vm, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_m(vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(mask, base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_m(vbool32_t vm, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(vm, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_m(vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(mask, base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_m(vbool64_t vm, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(vm, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(mask, base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_m(vbool64_t vm, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(vm, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(mask, base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_m(vbool32_t vm, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(vm, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(mask, base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_m(vbool16_t vm, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(vm, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(mask, base, bindex, vl); +vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_m(vbool8_t vm, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(vm, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(mask, base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_m(vbool64_t vm, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(vm, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(mask, base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_m(vbool32_t vm, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(vm, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(mask, base, bindex, vl); +vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_m(vbool16_t vm, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(vm, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(mask, base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_m(vbool64_t vm, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(vm, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(mask, base, bindex, vl); +vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_m(vbool32_t vm, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(vm, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(mask, base, bindex, vl); +vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_m(vbool64_t vm, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(vm, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(mask, base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_m(vbool64_t vm, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(vm, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(mask, base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_m(vbool32_t vm, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(vm, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(mask, base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_m(vbool16_t vm, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(vm, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(mask, base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_m(vbool8_t vm, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(vm, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(mask, base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_m(vbool64_t vm, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(vm, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(mask, base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_m(vbool32_t vm, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(vm, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(mask, base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_m(vbool16_t vm, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(vm, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(mask, base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_m(vbool64_t vm, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(vm, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(mask, base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_m(vbool32_t vm, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(vm, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32(mask, base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_m(vbool64_t vm, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg7ei32\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vluxseg7ei64.c b/auto-generated/gnu-overloaded-tests/vluxseg7ei64.c index 3c98cd665..696123df1 100644 --- a/auto-generated/gnu-overloaded-tests/vluxseg7ei64.c +++ b/auto-generated/gnu-overloaded-tests/vluxseg7ei64.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7(const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7(const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7(const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7(const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7(const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7(const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7(const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7(const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7(const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7(const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7(const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7(const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7(const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7(const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7(const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7(const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7(const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7(const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei64_v_i8m1x7(const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(base, bindex, vl); +vint8m1x7_t test_vluxseg7ei64_v_i8m1x7(const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7(const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7(const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7(const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7(const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei64_v_i16m1x7(const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(base, bindex, vl); +vint16m1x7_t test_vluxseg7ei64_v_i16m1x7(const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7(const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7(const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei64_v_i32m1x7(const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(base, bindex, vl); +vint32m1x7_t test_vluxseg7ei64_v_i32m1x7(const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei64_v_i64m1x7(const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(base, bindex, vl); +vint64m1x7_t test_vluxseg7ei64_v_i64m1x7(const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7(const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7(const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7(const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7(const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7(const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7(const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7(const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7(const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7(const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7(const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7(const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7(const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7(const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7(const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7(const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7(const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7(const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7(const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7(const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7(const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(rs1, rs2, vl); } -vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_m(vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(mask, base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_m(vbool64_t vm, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(vm, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_m(vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(mask, base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_m(vbool32_t vm, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(vm, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_m(vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(mask, base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_m(vbool16_t vm, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(vm, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_m(vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(mask, base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_m(vbool64_t vm, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(vm, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_m(vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(mask, base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_m(vbool32_t vm, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(vm, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_m(vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(mask, base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_m(vbool64_t vm, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(vm, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(mask, base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_m(vbool64_t vm, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(vm, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(mask, base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_m(vbool32_t vm, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(vm, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(mask, base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_m(vbool16_t vm, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(vm, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(mask, base, bindex, vl); +vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_m(vbool8_t vm, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(vm, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(mask, base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_m(vbool64_t vm, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(vm, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(mask, base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_m(vbool32_t vm, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(vm, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(mask, base, bindex, vl); +vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_m(vbool16_t vm, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(vm, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(mask, base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_m(vbool64_t vm, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(vm, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(mask, base, bindex, vl); +vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_m(vbool32_t vm, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(vm, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(mask, base, bindex, vl); +vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_m(vbool64_t vm, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(vm, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(mask, base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_m(vbool64_t vm, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(vm, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(mask, base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_m(vbool32_t vm, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(vm, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(mask, base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_m(vbool16_t vm, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(vm, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(mask, base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_m(vbool8_t vm, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(vm, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(mask, base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_m(vbool64_t vm, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(vm, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(mask, base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_m(vbool32_t vm, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(vm, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(mask, base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_m(vbool16_t vm, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(vm, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(mask, base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_m(vbool64_t vm, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(vm, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(mask, base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_m(vbool32_t vm, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(vm, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64(mask, base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_m(vbool64_t vm, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg7ei64\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vluxseg7ei8.c b/auto-generated/gnu-overloaded-tests/vluxseg7ei8.c index a2720d76e..f1a11feb2 100644 --- a/auto-generated/gnu-overloaded-tests/vluxseg7ei8.c +++ b/auto-generated/gnu-overloaded-tests/vluxseg7ei8.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7(const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7(const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7(const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7(const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7(const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7(const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7(const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7(const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7(const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7(const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7(const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7(const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7(const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7(const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7(const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7(const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7(const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7(const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei8_v_i8m1x7(const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(base, bindex, vl); +vint8m1x7_t test_vluxseg7ei8_v_i8m1x7(const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7(const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7(const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7(const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7(const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei8_v_i16m1x7(const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(base, bindex, vl); +vint16m1x7_t test_vluxseg7ei8_v_i16m1x7(const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7(const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7(const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei8_v_i32m1x7(const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(base, bindex, vl); +vint32m1x7_t test_vluxseg7ei8_v_i32m1x7(const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei8_v_i64m1x7(const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(base, bindex, vl); +vint64m1x7_t test_vluxseg7ei8_v_i64m1x7(const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7(const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7(const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7(const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7(const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7(const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7(const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7(const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7(const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7(const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7(const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7(const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(rs1, rs2, vl); } -vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_m(vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(mask, base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_m(vbool64_t vm, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(vm, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_m(vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(mask, base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_m(vbool32_t vm, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(vm, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_m(vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(mask, base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_m(vbool16_t vm, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(vm, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_m(vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(mask, base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_m(vbool64_t vm, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(vm, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_m(vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(mask, base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_m(vbool32_t vm, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(vm, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_m(vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(mask, base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_m(vbool64_t vm, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(vm, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(mask, base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_m(vbool64_t vm, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(vm, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(mask, base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_m(vbool32_t vm, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(vm, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(mask, base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_m(vbool16_t vm, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(vm, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(mask, base, bindex, vl); +vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_m(vbool8_t vm, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(vm, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(mask, base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_m(vbool64_t vm, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(vm, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(mask, base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_m(vbool32_t vm, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(vm, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(mask, base, bindex, vl); +vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_m(vbool16_t vm, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(vm, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(mask, base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_m(vbool64_t vm, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(vm, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(mask, base, bindex, vl); +vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_m(vbool32_t vm, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(vm, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(mask, base, bindex, vl); +vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_m(vbool64_t vm, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(vm, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(mask, base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_m(vbool64_t vm, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(vm, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(mask, base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_m(vbool32_t vm, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(vm, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(mask, base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_m(vbool16_t vm, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(vm, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(mask, base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_m(vbool8_t vm, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(vm, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(mask, base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_m(vbool64_t vm, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(vm, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(mask, base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_m(vbool32_t vm, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(vm, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(mask, base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_m(vbool16_t vm, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(vm, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(mask, base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_m(vbool64_t vm, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(vm, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(mask, base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_m(vbool32_t vm, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(vm, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8(mask, base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_m(vbool64_t vm, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg7ei8\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vluxseg8ei16.c b/auto-generated/gnu-overloaded-tests/vluxseg8ei16.c index fe7c75c41..5b5336dd3 100644 --- a/auto-generated/gnu-overloaded-tests/vluxseg8ei16.c +++ b/auto-generated/gnu-overloaded-tests/vluxseg8ei16.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8(const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8(const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8(const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8(const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8(const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8(const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8(const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8(const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8(const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8(const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8(const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8(const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8(const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8(const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8(const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8(const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8(const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8(const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei16_v_i8m1x8(const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(base, bindex, vl); +vint8m1x8_t test_vluxseg8ei16_v_i8m1x8(const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8(const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8(const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8(const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8(const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei16_v_i16m1x8(const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(base, bindex, vl); +vint16m1x8_t test_vluxseg8ei16_v_i16m1x8(const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8(const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8(const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei16_v_i32m1x8(const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(base, bindex, vl); +vint32m1x8_t test_vluxseg8ei16_v_i32m1x8(const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei16_v_i64m1x8(const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(base, bindex, vl); +vint64m1x8_t test_vluxseg8ei16_v_i64m1x8(const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8(const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8(const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8(const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8(const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8(const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8(const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8(const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8(const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8(const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8(const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8(const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8(const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8(const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(rs1, rs2, vl); } -vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_m(vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(mask, base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_m(vbool64_t vm, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(vm, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_m(vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(mask, base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_m(vbool32_t vm, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(vm, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_m(vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(mask, base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_m(vbool16_t vm, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(vm, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_m(vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(mask, base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_m(vbool64_t vm, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(vm, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_m(vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(mask, base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_m(vbool32_t vm, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(vm, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_m(vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(mask, base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_m(vbool64_t vm, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(vm, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(mask, base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_m(vbool64_t vm, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(vm, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(mask, base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_m(vbool32_t vm, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(vm, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(mask, base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_m(vbool16_t vm, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(vm, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(mask, base, bindex, vl); +vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_m(vbool8_t vm, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(vm, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(mask, base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_m(vbool64_t vm, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(vm, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(mask, base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_m(vbool32_t vm, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(vm, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(mask, base, bindex, vl); +vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_m(vbool16_t vm, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(vm, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(mask, base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_m(vbool64_t vm, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(vm, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(mask, base, bindex, vl); +vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_m(vbool32_t vm, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(vm, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(mask, base, bindex, vl); +vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_m(vbool64_t vm, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(vm, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(mask, base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_m(vbool64_t vm, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(vm, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(mask, base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_m(vbool32_t vm, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(vm, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(mask, base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_m(vbool16_t vm, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(vm, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(mask, base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_m(vbool8_t vm, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(vm, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(mask, base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_m(vbool64_t vm, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(vm, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(mask, base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_m(vbool32_t vm, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(vm, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(mask, base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_m(vbool16_t vm, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(vm, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(mask, base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_m(vbool64_t vm, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(vm, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(mask, base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_m(vbool32_t vm, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(vm, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16(mask, base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_m(vbool64_t vm, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg8ei16\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vluxseg8ei32.c b/auto-generated/gnu-overloaded-tests/vluxseg8ei32.c index 8c33853e3..e7edb1960 100644 --- a/auto-generated/gnu-overloaded-tests/vluxseg8ei32.c +++ b/auto-generated/gnu-overloaded-tests/vluxseg8ei32.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8(const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8(const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8(const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8(const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8(const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8(const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8(const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8(const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8(const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8(const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8(const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8(const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8(const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8(const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8(const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8(const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8(const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8(const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei32_v_i8m1x8(const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(base, bindex, vl); +vint8m1x8_t test_vluxseg8ei32_v_i8m1x8(const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8(const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8(const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8(const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8(const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei32_v_i16m1x8(const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(base, bindex, vl); +vint16m1x8_t test_vluxseg8ei32_v_i16m1x8(const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8(const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8(const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei32_v_i32m1x8(const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(base, bindex, vl); +vint32m1x8_t test_vluxseg8ei32_v_i32m1x8(const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei32_v_i64m1x8(const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(base, bindex, vl); +vint64m1x8_t test_vluxseg8ei32_v_i64m1x8(const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8(const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8(const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8(const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8(const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8(const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8(const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8(const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8(const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8(const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8(const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8(const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8(const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8(const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8(const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8(const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8(const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(rs1, rs2, vl); } -vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_m(vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(mask, base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_m(vbool64_t vm, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(vm, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_m(vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(mask, base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_m(vbool32_t vm, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(vm, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_m(vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(mask, base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_m(vbool16_t vm, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(vm, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_m(vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(mask, base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_m(vbool64_t vm, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(vm, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_m(vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(mask, base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_m(vbool32_t vm, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(vm, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_m(vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(mask, base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_m(vbool64_t vm, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(vm, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(mask, base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_m(vbool64_t vm, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(vm, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(mask, base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_m(vbool32_t vm, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(vm, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(mask, base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_m(vbool16_t vm, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(vm, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(mask, base, bindex, vl); +vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_m(vbool8_t vm, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(vm, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(mask, base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_m(vbool64_t vm, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(vm, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(mask, base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_m(vbool32_t vm, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(vm, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(mask, base, bindex, vl); +vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_m(vbool16_t vm, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(vm, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(mask, base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_m(vbool64_t vm, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(vm, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(mask, base, bindex, vl); +vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_m(vbool32_t vm, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(vm, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(mask, base, bindex, vl); +vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_m(vbool64_t vm, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(vm, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(mask, base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_m(vbool64_t vm, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(vm, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(mask, base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_m(vbool32_t vm, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(vm, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(mask, base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_m(vbool16_t vm, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(vm, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(mask, base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_m(vbool8_t vm, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(vm, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(mask, base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_m(vbool64_t vm, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(vm, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(mask, base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_m(vbool32_t vm, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(vm, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(mask, base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_m(vbool16_t vm, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(vm, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(mask, base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_m(vbool64_t vm, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(vm, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(mask, base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_m(vbool32_t vm, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(vm, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32(mask, base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_m(vbool64_t vm, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg8ei32\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vluxseg8ei64.c b/auto-generated/gnu-overloaded-tests/vluxseg8ei64.c index 4c88c2ba3..6fe4b2681 100644 --- a/auto-generated/gnu-overloaded-tests/vluxseg8ei64.c +++ b/auto-generated/gnu-overloaded-tests/vluxseg8ei64.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8(const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8(const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8(const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8(const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8(const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8(const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8(const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8(const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8(const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8(const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8(const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8(const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8(const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8(const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8(const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8(const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8(const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8(const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei64_v_i8m1x8(const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(base, bindex, vl); +vint8m1x8_t test_vluxseg8ei64_v_i8m1x8(const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8(const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8(const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8(const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8(const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei64_v_i16m1x8(const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(base, bindex, vl); +vint16m1x8_t test_vluxseg8ei64_v_i16m1x8(const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8(const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8(const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei64_v_i32m1x8(const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(base, bindex, vl); +vint32m1x8_t test_vluxseg8ei64_v_i32m1x8(const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei64_v_i64m1x8(const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(base, bindex, vl); +vint64m1x8_t test_vluxseg8ei64_v_i64m1x8(const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8(const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8(const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8(const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8(const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8(const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8(const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8(const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8(const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8(const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8(const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8(const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8(const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8(const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8(const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8(const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8(const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8(const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8(const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8(const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8(const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(rs1, rs2, vl); } -vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_m(vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(mask, base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_m(vbool64_t vm, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(vm, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_m(vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(mask, base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_m(vbool32_t vm, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(vm, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_m(vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(mask, base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_m(vbool16_t vm, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(vm, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_m(vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(mask, base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_m(vbool64_t vm, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(vm, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_m(vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(mask, base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_m(vbool32_t vm, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(vm, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_m(vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(mask, base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_m(vbool64_t vm, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(vm, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(mask, base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_m(vbool64_t vm, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(vm, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(mask, base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_m(vbool32_t vm, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(vm, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(mask, base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_m(vbool16_t vm, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(vm, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(mask, base, bindex, vl); +vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_m(vbool8_t vm, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(vm, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(mask, base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_m(vbool64_t vm, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(vm, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(mask, base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_m(vbool32_t vm, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(vm, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(mask, base, bindex, vl); +vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_m(vbool16_t vm, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(vm, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(mask, base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_m(vbool64_t vm, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(vm, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(mask, base, bindex, vl); +vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_m(vbool32_t vm, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(vm, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(mask, base, bindex, vl); +vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_m(vbool64_t vm, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(vm, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(mask, base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_m(vbool64_t vm, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(vm, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(mask, base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_m(vbool32_t vm, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(vm, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(mask, base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_m(vbool16_t vm, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(vm, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(mask, base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_m(vbool8_t vm, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(vm, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(mask, base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_m(vbool64_t vm, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(vm, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(mask, base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_m(vbool32_t vm, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(vm, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(mask, base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_m(vbool16_t vm, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(vm, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(mask, base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_m(vbool64_t vm, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(vm, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(mask, base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_m(vbool32_t vm, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(vm, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64(mask, base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_m(vbool64_t vm, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg8ei64\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vluxseg8ei8.c b/auto-generated/gnu-overloaded-tests/vluxseg8ei8.c index 51bf70133..7937ac852 100644 --- a/auto-generated/gnu-overloaded-tests/vluxseg8ei8.c +++ b/auto-generated/gnu-overloaded-tests/vluxseg8ei8.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8(const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8(const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8(const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8(const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8(const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8(const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8(const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8(const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8(const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8(const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8(const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8(const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8(const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8(const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8(const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8(const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8(const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8(const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei8_v_i8m1x8(const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(base, bindex, vl); +vint8m1x8_t test_vluxseg8ei8_v_i8m1x8(const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8(const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8(const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8(const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8(const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei8_v_i16m1x8(const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(base, bindex, vl); +vint16m1x8_t test_vluxseg8ei8_v_i16m1x8(const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8(const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8(const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei8_v_i32m1x8(const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(base, bindex, vl); +vint32m1x8_t test_vluxseg8ei8_v_i32m1x8(const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei8_v_i64m1x8(const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(base, bindex, vl); +vint64m1x8_t test_vluxseg8ei8_v_i64m1x8(const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8(const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8(const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8(const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8(const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8(const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8(const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8(const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8(const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8(const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8(const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8(const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(rs1, rs2, vl); } -vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_m(vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(mask, base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_m(vbool64_t vm, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(vm, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_m(vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(mask, base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_m(vbool32_t vm, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(vm, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_m(vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(mask, base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_m(vbool16_t vm, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(vm, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_m(vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(mask, base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_m(vbool64_t vm, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(vm, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_m(vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(mask, base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_m(vbool32_t vm, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(vm, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_m(vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(mask, base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_m(vbool64_t vm, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(vm, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_m(vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(mask, base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_m(vbool64_t vm, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(vm, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_m(vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(mask, base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_m(vbool32_t vm, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(vm, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_m(vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(mask, base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_m(vbool16_t vm, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(vm, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_m(vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(mask, base, bindex, vl); +vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_m(vbool8_t vm, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(vm, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_m(vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(mask, base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_m(vbool64_t vm, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(vm, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_m(vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(mask, base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_m(vbool32_t vm, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(vm, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_m(vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(mask, base, bindex, vl); +vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_m(vbool16_t vm, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(vm, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_m(vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(mask, base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_m(vbool64_t vm, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(vm, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_m(vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(mask, base, bindex, vl); +vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_m(vbool32_t vm, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(vm, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_m(vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(mask, base, bindex, vl); +vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_m(vbool64_t vm, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(vm, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_m(vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(mask, base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_m(vbool64_t vm, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(vm, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_m(vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(mask, base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_m(vbool32_t vm, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(vm, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_m(vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(mask, base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_m(vbool16_t vm, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(vm, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_m(vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(mask, base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_m(vbool8_t vm, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(vm, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_m(vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(mask, base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_m(vbool64_t vm, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(vm, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_m(vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(mask, base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_m(vbool32_t vm, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(vm, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_m(vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(mask, base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_m(vbool16_t vm, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(vm, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_m(vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(mask, base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_m(vbool64_t vm, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(vm, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_m(vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(mask, base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_m(vbool32_t vm, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(vm, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_m(vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8(mask, base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_m(vbool64_t vm, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8(vm, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg8ei8\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vmacc.c b/auto-generated/gnu-overloaded-tests/vmacc.c index 391b60ac6..0e4d9eaed 100644 --- a/auto-generated/gnu-overloaded-tests/vmacc.c +++ b/auto-generated/gnu-overloaded-tests/vmacc.c @@ -358,356 +358,356 @@ vuint64m8_t test_vmacc_vx_u64m8(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, s return __riscv_vmacc(vd, rs1, vs2, vl); } -vint8mf8_t test_vmacc_vv_i8mf8_m(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, vs1, vs2, vl); +vint8mf8_t test_vmacc_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, vs1, vs2, vl); } -vint8mf8_t test_vmacc_vx_i8mf8_m(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, rs1, vs2, vl); +vint8mf8_t test_vmacc_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, rs1, vs2, vl); } -vint8mf4_t test_vmacc_vv_i8mf4_m(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, vs1, vs2, vl); +vint8mf4_t test_vmacc_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, vs1, vs2, vl); } -vint8mf4_t test_vmacc_vx_i8mf4_m(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, rs1, vs2, vl); +vint8mf4_t test_vmacc_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, rs1, vs2, vl); } -vint8mf2_t test_vmacc_vv_i8mf2_m(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, vs1, vs2, vl); +vint8mf2_t test_vmacc_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, vs1, vs2, vl); } -vint8mf2_t test_vmacc_vx_i8mf2_m(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, rs1, vs2, vl); +vint8mf2_t test_vmacc_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, rs1, vs2, vl); } -vint8m1_t test_vmacc_vv_i8m1_m(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, vs1, vs2, vl); +vint8m1_t test_vmacc_vv_i8m1_m(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, vs1, vs2, vl); } -vint8m1_t test_vmacc_vx_i8m1_m(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, rs1, vs2, vl); +vint8m1_t test_vmacc_vx_i8m1_m(vbool8_t vm, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, rs1, vs2, vl); } -vint8m2_t test_vmacc_vv_i8m2_m(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, vs1, vs2, vl); +vint8m2_t test_vmacc_vv_i8m2_m(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, vs1, vs2, vl); } -vint8m2_t test_vmacc_vx_i8m2_m(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, rs1, vs2, vl); +vint8m2_t test_vmacc_vx_i8m2_m(vbool4_t vm, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, rs1, vs2, vl); } -vint8m4_t test_vmacc_vv_i8m4_m(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, vs1, vs2, vl); +vint8m4_t test_vmacc_vv_i8m4_m(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, vs1, vs2, vl); } -vint8m4_t test_vmacc_vx_i8m4_m(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, rs1, vs2, vl); +vint8m4_t test_vmacc_vx_i8m4_m(vbool2_t vm, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, rs1, vs2, vl); } -vint8m8_t test_vmacc_vv_i8m8_m(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, vs1, vs2, vl); +vint8m8_t test_vmacc_vv_i8m8_m(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, vs1, vs2, vl); } -vint8m8_t test_vmacc_vx_i8m8_m(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, rs1, vs2, vl); +vint8m8_t test_vmacc_vx_i8m8_m(vbool1_t vm, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vmacc_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vmacc_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vmacc_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vmacc_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, vs1, vs2, vl); +vint16m1_t test_vmacc_vv_i16m1_m(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, rs1, vs2, vl); +vint16m1_t test_vmacc_vx_i16m1_m(vbool16_t vm, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, vs1, vs2, vl); +vint16m2_t test_vmacc_vv_i16m2_m(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, rs1, vs2, vl); +vint16m2_t test_vmacc_vx_i16m2_m(vbool8_t vm, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, vs1, vs2, vl); +vint16m4_t test_vmacc_vv_i16m4_m(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, rs1, vs2, vl); +vint16m4_t test_vmacc_vx_i16m4_m(vbool4_t vm, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, vs1, vs2, vl); +vint16m8_t test_vmacc_vv_i16m8_m(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, rs1, vs2, vl); +vint16m8_t test_vmacc_vx_i16m8_m(vbool2_t vm, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vmacc_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vmacc_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, vs1, vs2, vl); +vint32m1_t test_vmacc_vv_i32m1_m(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, rs1, vs2, vl); +vint32m1_t test_vmacc_vx_i32m1_m(vbool32_t vm, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, vs1, vs2, vl); +vint32m2_t test_vmacc_vv_i32m2_m(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, rs1, vs2, vl); +vint32m2_t test_vmacc_vx_i32m2_m(vbool16_t vm, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, vs1, vs2, vl); +vint32m4_t test_vmacc_vv_i32m4_m(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, rs1, vs2, vl); +vint32m4_t test_vmacc_vx_i32m4_m(vbool8_t vm, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, vs1, vs2, vl); +vint32m8_t test_vmacc_vv_i32m8_m(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, rs1, vs2, vl); +vint32m8_t test_vmacc_vx_i32m8_m(vbool4_t vm, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, vs1, vs2, vl); +vint64m1_t test_vmacc_vv_i64m1_m(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, rs1, vs2, vl); +vint64m1_t test_vmacc_vx_i64m1_m(vbool64_t vm, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, vs1, vs2, vl); +vint64m2_t test_vmacc_vv_i64m2_m(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, rs1, vs2, vl); +vint64m2_t test_vmacc_vx_i64m2_m(vbool32_t vm, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, vs1, vs2, vl); +vint64m4_t test_vmacc_vv_i64m4_m(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, rs1, vs2, vl); +vint64m4_t test_vmacc_vx_i64m4_m(vbool16_t vm, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, vs1, vs2, vl); +vint64m8_t test_vmacc_vv_i64m8_m(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vmacc_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, rs1, vs2, vl); +vint64m8_t test_vmacc_vx_i64m8_m(vbool8_t vm, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, rs1, vs2, vl); } -vuint8mf8_t test_vmacc_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, vs1, vs2, vl); +vuint8mf8_t test_vmacc_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, vs1, vs2, vl); } -vuint8mf8_t test_vmacc_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, rs1, vs2, vl); +vuint8mf8_t test_vmacc_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, rs1, vs2, vl); } -vuint8mf4_t test_vmacc_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, vs1, vs2, vl); +vuint8mf4_t test_vmacc_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, vs1, vs2, vl); } -vuint8mf4_t test_vmacc_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, rs1, vs2, vl); +vuint8mf4_t test_vmacc_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, rs1, vs2, vl); } -vuint8mf2_t test_vmacc_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, vs1, vs2, vl); +vuint8mf2_t test_vmacc_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, vs1, vs2, vl); } -vuint8mf2_t test_vmacc_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, rs1, vs2, vl); +vuint8mf2_t test_vmacc_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, rs1, vs2, vl); } -vuint8m1_t test_vmacc_vv_u8m1_m(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, vs1, vs2, vl); +vuint8m1_t test_vmacc_vv_u8m1_m(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, vs1, vs2, vl); } -vuint8m1_t test_vmacc_vx_u8m1_m(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, rs1, vs2, vl); +vuint8m1_t test_vmacc_vx_u8m1_m(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, rs1, vs2, vl); } -vuint8m2_t test_vmacc_vv_u8m2_m(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, vs1, vs2, vl); +vuint8m2_t test_vmacc_vv_u8m2_m(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, vs1, vs2, vl); } -vuint8m2_t test_vmacc_vx_u8m2_m(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, rs1, vs2, vl); +vuint8m2_t test_vmacc_vx_u8m2_m(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, rs1, vs2, vl); } -vuint8m4_t test_vmacc_vv_u8m4_m(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, vs1, vs2, vl); +vuint8m4_t test_vmacc_vv_u8m4_m(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, vs1, vs2, vl); } -vuint8m4_t test_vmacc_vx_u8m4_m(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, rs1, vs2, vl); +vuint8m4_t test_vmacc_vx_u8m4_m(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, rs1, vs2, vl); } -vuint8m8_t test_vmacc_vv_u8m8_m(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, vs1, vs2, vl); +vuint8m8_t test_vmacc_vv_u8m8_m(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, vs1, vs2, vl); } -vuint8m8_t test_vmacc_vx_u8m8_m(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, rs1, vs2, vl); +vuint8m8_t test_vmacc_vx_u8m8_m(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vmacc_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, vs1, vs2, vl); +vuint16mf4_t test_vmacc_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vmacc_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, rs1, vs2, vl); +vuint16mf4_t test_vmacc_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vmacc_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, vs1, vs2, vl); +vuint16mf2_t test_vmacc_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vmacc_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, rs1, vs2, vl); +vuint16mf2_t test_vmacc_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vmacc_vv_u16m1_m(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, vs1, vs2, vl); +vuint16m1_t test_vmacc_vv_u16m1_m(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vmacc_vx_u16m1_m(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, rs1, vs2, vl); +vuint16m1_t test_vmacc_vx_u16m1_m(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vmacc_vv_u16m2_m(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, vs1, vs2, vl); +vuint16m2_t test_vmacc_vv_u16m2_m(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vmacc_vx_u16m2_m(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, rs1, vs2, vl); +vuint16m2_t test_vmacc_vx_u16m2_m(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vmacc_vv_u16m4_m(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, vs1, vs2, vl); +vuint16m4_t test_vmacc_vv_u16m4_m(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vmacc_vx_u16m4_m(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, rs1, vs2, vl); +vuint16m4_t test_vmacc_vx_u16m4_m(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vmacc_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, vs1, vs2, vl); +vuint16m8_t test_vmacc_vv_u16m8_m(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vmacc_vx_u16m8_m(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, rs1, vs2, vl); +vuint16m8_t test_vmacc_vx_u16m8_m(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vmacc_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, vs1, vs2, vl); +vuint32mf2_t test_vmacc_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vmacc_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, rs1, vs2, vl); +vuint32mf2_t test_vmacc_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vmacc_vv_u32m1_m(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, vs1, vs2, vl); +vuint32m1_t test_vmacc_vv_u32m1_m(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vmacc_vx_u32m1_m(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, rs1, vs2, vl); +vuint32m1_t test_vmacc_vx_u32m1_m(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vmacc_vv_u32m2_m(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, vs1, vs2, vl); +vuint32m2_t test_vmacc_vv_u32m2_m(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vmacc_vx_u32m2_m(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, rs1, vs2, vl); +vuint32m2_t test_vmacc_vx_u32m2_m(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vmacc_vv_u32m4_m(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, vs1, vs2, vl); +vuint32m4_t test_vmacc_vv_u32m4_m(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vmacc_vx_u32m4_m(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, rs1, vs2, vl); +vuint32m4_t test_vmacc_vx_u32m4_m(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vmacc_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, vs1, vs2, vl); +vuint32m8_t test_vmacc_vv_u32m8_m(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vmacc_vx_u32m8_m(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, rs1, vs2, vl); +vuint32m8_t test_vmacc_vx_u32m8_m(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vmacc_vv_u64m1_m(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, vs1, vs2, vl); +vuint64m1_t test_vmacc_vv_u64m1_m(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vmacc_vx_u64m1_m(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, rs1, vs2, vl); +vuint64m1_t test_vmacc_vx_u64m1_m(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vmacc_vv_u64m2_m(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, vs1, vs2, vl); +vuint64m2_t test_vmacc_vv_u64m2_m(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vmacc_vx_u64m2_m(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, rs1, vs2, vl); +vuint64m2_t test_vmacc_vx_u64m2_m(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vmacc_vv_u64m4_m(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, vs1, vs2, vl); +vuint64m4_t test_vmacc_vv_u64m4_m(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vmacc_vx_u64m4_m(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, rs1, vs2, vl); +vuint64m4_t test_vmacc_vx_u64m4_m(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vmacc_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, vs1, vs2, vl); +vuint64m8_t test_vmacc_vv_u64m8_m(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vmacc_vx_u64m8_m(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vmacc(mask, vd, rs1, vs2, vl); +vuint64m8_t test_vmacc_vx_u64m8_m(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vmacc(vm, vd, rs1, vs2, vl); } /* { dg-final { scan-assembler-times {vma[c-d][c-d]\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vmadc.c b/auto-generated/gnu-overloaded-tests/vmadc.c index d1b067ebe..12f04cb73 100644 --- a/auto-generated/gnu-overloaded-tests/vmadc.c +++ b/auto-generated/gnu-overloaded-tests/vmadc.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmadc_vvm_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool64_t test_vmadc_vvm_i8mf8_b64(vint8mf8_t vs2, vint8mf8_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmadc(vs2, vs1, v0, vl); } -vbool64_t test_vmadc_vxm_i8mf8_b64(vint8mf8_t op1, int8_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool64_t test_vmadc_vxm_i8mf8_b64(vint8mf8_t vs2, int8_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmadc(vs2, rs1, v0, vl); } -vbool64_t test_vmadc_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool64_t test_vmadc_vv_i8mf8_b64(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmadc(vs2, vs1, vl); } -vbool64_t test_vmadc_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool64_t test_vmadc_vx_i8mf8_b64(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmadc(vs2, rs1, vl); } -vbool32_t test_vmadc_vvm_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool32_t test_vmadc_vvm_i8mf4_b32(vint8mf4_t vs2, vint8mf4_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmadc(vs2, vs1, v0, vl); } -vbool32_t test_vmadc_vxm_i8mf4_b32(vint8mf4_t op1, int8_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool32_t test_vmadc_vxm_i8mf4_b32(vint8mf4_t vs2, int8_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmadc(vs2, rs1, v0, vl); } -vbool32_t test_vmadc_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool32_t test_vmadc_vv_i8mf4_b32(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmadc(vs2, vs1, vl); } -vbool32_t test_vmadc_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool32_t test_vmadc_vx_i8mf4_b32(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmadc(vs2, rs1, vl); } -vbool16_t test_vmadc_vvm_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool16_t test_vmadc_vvm_i8mf2_b16(vint8mf2_t vs2, vint8mf2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmadc(vs2, vs1, v0, vl); } -vbool16_t test_vmadc_vxm_i8mf2_b16(vint8mf2_t op1, int8_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool16_t test_vmadc_vxm_i8mf2_b16(vint8mf2_t vs2, int8_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmadc(vs2, rs1, v0, vl); } -vbool16_t test_vmadc_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool16_t test_vmadc_vv_i8mf2_b16(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmadc(vs2, vs1, vl); } -vbool16_t test_vmadc_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool16_t test_vmadc_vx_i8mf2_b16(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmadc(vs2, rs1, vl); } -vbool8_t test_vmadc_vvm_i8m1_b8(vint8m1_t op1, vint8m1_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool8_t test_vmadc_vvm_i8m1_b8(vint8m1_t vs2, vint8m1_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmadc(vs2, vs1, v0, vl); } -vbool8_t test_vmadc_vxm_i8m1_b8(vint8m1_t op1, int8_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool8_t test_vmadc_vxm_i8m1_b8(vint8m1_t vs2, int8_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmadc(vs2, rs1, v0, vl); } -vbool8_t test_vmadc_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool8_t test_vmadc_vv_i8m1_b8(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmadc(vs2, vs1, vl); } -vbool8_t test_vmadc_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool8_t test_vmadc_vx_i8m1_b8(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmadc(vs2, rs1, vl); } -vbool4_t test_vmadc_vvm_i8m2_b4(vint8m2_t op1, vint8m2_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool4_t test_vmadc_vvm_i8m2_b4(vint8m2_t vs2, vint8m2_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmadc(vs2, vs1, v0, vl); } -vbool4_t test_vmadc_vxm_i8m2_b4(vint8m2_t op1, int8_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool4_t test_vmadc_vxm_i8m2_b4(vint8m2_t vs2, int8_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmadc(vs2, rs1, v0, vl); } -vbool4_t test_vmadc_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool4_t test_vmadc_vv_i8m2_b4(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmadc(vs2, vs1, vl); } -vbool4_t test_vmadc_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool4_t test_vmadc_vx_i8m2_b4(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmadc(vs2, rs1, vl); } -vbool2_t test_vmadc_vvm_i8m4_b2(vint8m4_t op1, vint8m4_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool2_t test_vmadc_vvm_i8m4_b2(vint8m4_t vs2, vint8m4_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vmadc(vs2, vs1, v0, vl); } -vbool2_t test_vmadc_vxm_i8m4_b2(vint8m4_t op1, int8_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool2_t test_vmadc_vxm_i8m4_b2(vint8m4_t vs2, int8_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vmadc(vs2, rs1, v0, vl); } -vbool2_t test_vmadc_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool2_t test_vmadc_vv_i8m4_b2(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmadc(vs2, vs1, vl); } -vbool2_t test_vmadc_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool2_t test_vmadc_vx_i8m4_b2(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmadc(vs2, rs1, vl); } -vbool1_t test_vmadc_vvm_i8m8_b1(vint8m8_t op1, vint8m8_t op2, vbool1_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool1_t test_vmadc_vvm_i8m8_b1(vint8m8_t vs2, vint8m8_t vs1, vbool1_t v0, size_t vl) { + return __riscv_vmadc(vs2, vs1, v0, vl); } -vbool1_t test_vmadc_vxm_i8m8_b1(vint8m8_t op1, int8_t op2, vbool1_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool1_t test_vmadc_vxm_i8m8_b1(vint8m8_t vs2, int8_t rs1, vbool1_t v0, size_t vl) { + return __riscv_vmadc(vs2, rs1, v0, vl); } -vbool1_t test_vmadc_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool1_t test_vmadc_vv_i8m8_b1(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmadc(vs2, vs1, vl); } -vbool1_t test_vmadc_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool1_t test_vmadc_vx_i8m8_b1(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmadc(vs2, rs1, vl); } -vbool64_t test_vmadc_vvm_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool64_t test_vmadc_vvm_i16mf4_b64(vint16mf4_t vs2, vint16mf4_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmadc(vs2, vs1, v0, vl); } -vbool64_t test_vmadc_vxm_i16mf4_b64(vint16mf4_t op1, int16_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool64_t test_vmadc_vxm_i16mf4_b64(vint16mf4_t vs2, int16_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmadc(vs2, rs1, v0, vl); } -vbool64_t test_vmadc_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool64_t test_vmadc_vv_i16mf4_b64(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmadc(vs2, vs1, vl); } -vbool64_t test_vmadc_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool64_t test_vmadc_vx_i16mf4_b64(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmadc(vs2, rs1, vl); } -vbool32_t test_vmadc_vvm_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool32_t test_vmadc_vvm_i16mf2_b32(vint16mf2_t vs2, vint16mf2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmadc(vs2, vs1, v0, vl); } -vbool32_t test_vmadc_vxm_i16mf2_b32(vint16mf2_t op1, int16_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool32_t test_vmadc_vxm_i16mf2_b32(vint16mf2_t vs2, int16_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmadc(vs2, rs1, v0, vl); } -vbool32_t test_vmadc_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool32_t test_vmadc_vv_i16mf2_b32(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmadc(vs2, vs1, vl); } -vbool32_t test_vmadc_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool32_t test_vmadc_vx_i16mf2_b32(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmadc(vs2, rs1, vl); } -vbool16_t test_vmadc_vvm_i16m1_b16(vint16m1_t op1, vint16m1_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool16_t test_vmadc_vvm_i16m1_b16(vint16m1_t vs2, vint16m1_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmadc(vs2, vs1, v0, vl); } -vbool16_t test_vmadc_vxm_i16m1_b16(vint16m1_t op1, int16_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool16_t test_vmadc_vxm_i16m1_b16(vint16m1_t vs2, int16_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmadc(vs2, rs1, v0, vl); } -vbool16_t test_vmadc_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool16_t test_vmadc_vv_i16m1_b16(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmadc(vs2, vs1, vl); } -vbool16_t test_vmadc_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool16_t test_vmadc_vx_i16m1_b16(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmadc(vs2, rs1, vl); } -vbool8_t test_vmadc_vvm_i16m2_b8(vint16m2_t op1, vint16m2_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool8_t test_vmadc_vvm_i16m2_b8(vint16m2_t vs2, vint16m2_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmadc(vs2, vs1, v0, vl); } -vbool8_t test_vmadc_vxm_i16m2_b8(vint16m2_t op1, int16_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool8_t test_vmadc_vxm_i16m2_b8(vint16m2_t vs2, int16_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmadc(vs2, rs1, v0, vl); } -vbool8_t test_vmadc_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool8_t test_vmadc_vv_i16m2_b8(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmadc(vs2, vs1, vl); } -vbool8_t test_vmadc_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool8_t test_vmadc_vx_i16m2_b8(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmadc(vs2, rs1, vl); } -vbool4_t test_vmadc_vvm_i16m4_b4(vint16m4_t op1, vint16m4_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool4_t test_vmadc_vvm_i16m4_b4(vint16m4_t vs2, vint16m4_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmadc(vs2, vs1, v0, vl); } -vbool4_t test_vmadc_vxm_i16m4_b4(vint16m4_t op1, int16_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool4_t test_vmadc_vxm_i16m4_b4(vint16m4_t vs2, int16_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmadc(vs2, rs1, v0, vl); } -vbool4_t test_vmadc_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool4_t test_vmadc_vv_i16m4_b4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmadc(vs2, vs1, vl); } -vbool4_t test_vmadc_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool4_t test_vmadc_vx_i16m4_b4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmadc(vs2, rs1, vl); } -vbool2_t test_vmadc_vvm_i16m8_b2(vint16m8_t op1, vint16m8_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool2_t test_vmadc_vvm_i16m8_b2(vint16m8_t vs2, vint16m8_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vmadc(vs2, vs1, v0, vl); } -vbool2_t test_vmadc_vxm_i16m8_b2(vint16m8_t op1, int16_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool2_t test_vmadc_vxm_i16m8_b2(vint16m8_t vs2, int16_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vmadc(vs2, rs1, v0, vl); } -vbool2_t test_vmadc_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool2_t test_vmadc_vv_i16m8_b2(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmadc(vs2, vs1, vl); } -vbool2_t test_vmadc_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool2_t test_vmadc_vx_i16m8_b2(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmadc(vs2, rs1, vl); } -vbool64_t test_vmadc_vvm_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool64_t test_vmadc_vvm_i32mf2_b64(vint32mf2_t vs2, vint32mf2_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmadc(vs2, vs1, v0, vl); } -vbool64_t test_vmadc_vxm_i32mf2_b64(vint32mf2_t op1, int32_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool64_t test_vmadc_vxm_i32mf2_b64(vint32mf2_t vs2, int32_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmadc(vs2, rs1, v0, vl); } -vbool64_t test_vmadc_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool64_t test_vmadc_vv_i32mf2_b64(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmadc(vs2, vs1, vl); } -vbool64_t test_vmadc_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool64_t test_vmadc_vx_i32mf2_b64(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmadc(vs2, rs1, vl); } -vbool32_t test_vmadc_vvm_i32m1_b32(vint32m1_t op1, vint32m1_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool32_t test_vmadc_vvm_i32m1_b32(vint32m1_t vs2, vint32m1_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmadc(vs2, vs1, v0, vl); } -vbool32_t test_vmadc_vxm_i32m1_b32(vint32m1_t op1, int32_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool32_t test_vmadc_vxm_i32m1_b32(vint32m1_t vs2, int32_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmadc(vs2, rs1, v0, vl); } -vbool32_t test_vmadc_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool32_t test_vmadc_vv_i32m1_b32(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmadc(vs2, vs1, vl); } -vbool32_t test_vmadc_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool32_t test_vmadc_vx_i32m1_b32(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmadc(vs2, rs1, vl); } -vbool16_t test_vmadc_vvm_i32m2_b16(vint32m2_t op1, vint32m2_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool16_t test_vmadc_vvm_i32m2_b16(vint32m2_t vs2, vint32m2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmadc(vs2, vs1, v0, vl); } -vbool16_t test_vmadc_vxm_i32m2_b16(vint32m2_t op1, int32_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool16_t test_vmadc_vxm_i32m2_b16(vint32m2_t vs2, int32_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmadc(vs2, rs1, v0, vl); } -vbool16_t test_vmadc_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool16_t test_vmadc_vv_i32m2_b16(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmadc(vs2, vs1, vl); } -vbool16_t test_vmadc_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool16_t test_vmadc_vx_i32m2_b16(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmadc(vs2, rs1, vl); } -vbool8_t test_vmadc_vvm_i32m4_b8(vint32m4_t op1, vint32m4_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool8_t test_vmadc_vvm_i32m4_b8(vint32m4_t vs2, vint32m4_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmadc(vs2, vs1, v0, vl); } -vbool8_t test_vmadc_vxm_i32m4_b8(vint32m4_t op1, int32_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool8_t test_vmadc_vxm_i32m4_b8(vint32m4_t vs2, int32_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmadc(vs2, rs1, v0, vl); } -vbool8_t test_vmadc_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool8_t test_vmadc_vv_i32m4_b8(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmadc(vs2, vs1, vl); } -vbool8_t test_vmadc_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool8_t test_vmadc_vx_i32m4_b8(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmadc(vs2, rs1, vl); } -vbool4_t test_vmadc_vvm_i32m8_b4(vint32m8_t op1, vint32m8_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool4_t test_vmadc_vvm_i32m8_b4(vint32m8_t vs2, vint32m8_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmadc(vs2, vs1, v0, vl); } -vbool4_t test_vmadc_vxm_i32m8_b4(vint32m8_t op1, int32_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool4_t test_vmadc_vxm_i32m8_b4(vint32m8_t vs2, int32_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmadc(vs2, rs1, v0, vl); } -vbool4_t test_vmadc_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool4_t test_vmadc_vv_i32m8_b4(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmadc(vs2, vs1, vl); } -vbool4_t test_vmadc_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool4_t test_vmadc_vx_i32m8_b4(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmadc(vs2, rs1, vl); } -vbool64_t test_vmadc_vvm_i64m1_b64(vint64m1_t op1, vint64m1_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool64_t test_vmadc_vvm_i64m1_b64(vint64m1_t vs2, vint64m1_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmadc(vs2, vs1, v0, vl); } -vbool64_t test_vmadc_vxm_i64m1_b64(vint64m1_t op1, int64_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool64_t test_vmadc_vxm_i64m1_b64(vint64m1_t vs2, int64_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmadc(vs2, rs1, v0, vl); } -vbool64_t test_vmadc_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool64_t test_vmadc_vv_i64m1_b64(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmadc(vs2, vs1, vl); } -vbool64_t test_vmadc_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool64_t test_vmadc_vx_i64m1_b64(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmadc(vs2, rs1, vl); } -vbool32_t test_vmadc_vvm_i64m2_b32(vint64m2_t op1, vint64m2_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool32_t test_vmadc_vvm_i64m2_b32(vint64m2_t vs2, vint64m2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmadc(vs2, vs1, v0, vl); } -vbool32_t test_vmadc_vxm_i64m2_b32(vint64m2_t op1, int64_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool32_t test_vmadc_vxm_i64m2_b32(vint64m2_t vs2, int64_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmadc(vs2, rs1, v0, vl); } -vbool32_t test_vmadc_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool32_t test_vmadc_vv_i64m2_b32(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmadc(vs2, vs1, vl); } -vbool32_t test_vmadc_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool32_t test_vmadc_vx_i64m2_b32(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmadc(vs2, rs1, vl); } -vbool16_t test_vmadc_vvm_i64m4_b16(vint64m4_t op1, vint64m4_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool16_t test_vmadc_vvm_i64m4_b16(vint64m4_t vs2, vint64m4_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmadc(vs2, vs1, v0, vl); } -vbool16_t test_vmadc_vxm_i64m4_b16(vint64m4_t op1, int64_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool16_t test_vmadc_vxm_i64m4_b16(vint64m4_t vs2, int64_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmadc(vs2, rs1, v0, vl); } -vbool16_t test_vmadc_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool16_t test_vmadc_vv_i64m4_b16(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmadc(vs2, vs1, vl); } -vbool16_t test_vmadc_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool16_t test_vmadc_vx_i64m4_b16(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmadc(vs2, rs1, vl); } -vbool8_t test_vmadc_vvm_i64m8_b8(vint64m8_t op1, vint64m8_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool8_t test_vmadc_vvm_i64m8_b8(vint64m8_t vs2, vint64m8_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmadc(vs2, vs1, v0, vl); } -vbool8_t test_vmadc_vxm_i64m8_b8(vint64m8_t op1, int64_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool8_t test_vmadc_vxm_i64m8_b8(vint64m8_t vs2, int64_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmadc(vs2, rs1, v0, vl); } -vbool8_t test_vmadc_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool8_t test_vmadc_vv_i64m8_b8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmadc(vs2, vs1, vl); } -vbool8_t test_vmadc_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool8_t test_vmadc_vx_i64m8_b8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmadc(vs2, rs1, vl); } -vbool64_t test_vmadc_vvm_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool64_t test_vmadc_vvm_u8mf8_b64(vuint8mf8_t vs2, vuint8mf8_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmadc(vs2, vs1, v0, vl); } -vbool64_t test_vmadc_vxm_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool64_t test_vmadc_vxm_u8mf8_b64(vuint8mf8_t vs2, uint8_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmadc(vs2, rs1, v0, vl); } -vbool64_t test_vmadc_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool64_t test_vmadc_vv_u8mf8_b64(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmadc(vs2, vs1, vl); } -vbool64_t test_vmadc_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool64_t test_vmadc_vx_u8mf8_b64(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmadc(vs2, rs1, vl); } -vbool32_t test_vmadc_vvm_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool32_t test_vmadc_vvm_u8mf4_b32(vuint8mf4_t vs2, vuint8mf4_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmadc(vs2, vs1, v0, vl); } -vbool32_t test_vmadc_vxm_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool32_t test_vmadc_vxm_u8mf4_b32(vuint8mf4_t vs2, uint8_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmadc(vs2, rs1, v0, vl); } -vbool32_t test_vmadc_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool32_t test_vmadc_vv_u8mf4_b32(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmadc(vs2, vs1, vl); } -vbool32_t test_vmadc_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool32_t test_vmadc_vx_u8mf4_b32(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmadc(vs2, rs1, vl); } -vbool16_t test_vmadc_vvm_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool16_t test_vmadc_vvm_u8mf2_b16(vuint8mf2_t vs2, vuint8mf2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmadc(vs2, vs1, v0, vl); } -vbool16_t test_vmadc_vxm_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool16_t test_vmadc_vxm_u8mf2_b16(vuint8mf2_t vs2, uint8_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmadc(vs2, rs1, v0, vl); } -vbool16_t test_vmadc_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool16_t test_vmadc_vv_u8mf2_b16(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmadc(vs2, vs1, vl); } -vbool16_t test_vmadc_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool16_t test_vmadc_vx_u8mf2_b16(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmadc(vs2, rs1, vl); } -vbool8_t test_vmadc_vvm_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool8_t test_vmadc_vvm_u8m1_b8(vuint8m1_t vs2, vuint8m1_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmadc(vs2, vs1, v0, vl); } -vbool8_t test_vmadc_vxm_u8m1_b8(vuint8m1_t op1, uint8_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool8_t test_vmadc_vxm_u8m1_b8(vuint8m1_t vs2, uint8_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmadc(vs2, rs1, v0, vl); } -vbool8_t test_vmadc_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool8_t test_vmadc_vv_u8m1_b8(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmadc(vs2, vs1, vl); } -vbool8_t test_vmadc_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool8_t test_vmadc_vx_u8m1_b8(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmadc(vs2, rs1, vl); } -vbool4_t test_vmadc_vvm_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool4_t test_vmadc_vvm_u8m2_b4(vuint8m2_t vs2, vuint8m2_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmadc(vs2, vs1, v0, vl); } -vbool4_t test_vmadc_vxm_u8m2_b4(vuint8m2_t op1, uint8_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool4_t test_vmadc_vxm_u8m2_b4(vuint8m2_t vs2, uint8_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmadc(vs2, rs1, v0, vl); } -vbool4_t test_vmadc_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool4_t test_vmadc_vv_u8m2_b4(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmadc(vs2, vs1, vl); } -vbool4_t test_vmadc_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool4_t test_vmadc_vx_u8m2_b4(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmadc(vs2, rs1, vl); } -vbool2_t test_vmadc_vvm_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool2_t test_vmadc_vvm_u8m4_b2(vuint8m4_t vs2, vuint8m4_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vmadc(vs2, vs1, v0, vl); } -vbool2_t test_vmadc_vxm_u8m4_b2(vuint8m4_t op1, uint8_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool2_t test_vmadc_vxm_u8m4_b2(vuint8m4_t vs2, uint8_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vmadc(vs2, rs1, v0, vl); } -vbool2_t test_vmadc_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool2_t test_vmadc_vv_u8m4_b2(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmadc(vs2, vs1, vl); } -vbool2_t test_vmadc_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool2_t test_vmadc_vx_u8m4_b2(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmadc(vs2, rs1, vl); } -vbool1_t test_vmadc_vvm_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, vbool1_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool1_t test_vmadc_vvm_u8m8_b1(vuint8m8_t vs2, vuint8m8_t vs1, vbool1_t v0, size_t vl) { + return __riscv_vmadc(vs2, vs1, v0, vl); } -vbool1_t test_vmadc_vxm_u8m8_b1(vuint8m8_t op1, uint8_t op2, vbool1_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool1_t test_vmadc_vxm_u8m8_b1(vuint8m8_t vs2, uint8_t rs1, vbool1_t v0, size_t vl) { + return __riscv_vmadc(vs2, rs1, v0, vl); } -vbool1_t test_vmadc_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool1_t test_vmadc_vv_u8m8_b1(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmadc(vs2, vs1, vl); } -vbool1_t test_vmadc_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool1_t test_vmadc_vx_u8m8_b1(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmadc(vs2, rs1, vl); } -vbool64_t test_vmadc_vvm_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool64_t test_vmadc_vvm_u16mf4_b64(vuint16mf4_t vs2, vuint16mf4_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmadc(vs2, vs1, v0, vl); } -vbool64_t test_vmadc_vxm_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool64_t test_vmadc_vxm_u16mf4_b64(vuint16mf4_t vs2, uint16_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmadc(vs2, rs1, v0, vl); } -vbool64_t test_vmadc_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool64_t test_vmadc_vv_u16mf4_b64(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmadc(vs2, vs1, vl); } -vbool64_t test_vmadc_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool64_t test_vmadc_vx_u16mf4_b64(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmadc(vs2, rs1, vl); } -vbool32_t test_vmadc_vvm_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool32_t test_vmadc_vvm_u16mf2_b32(vuint16mf2_t vs2, vuint16mf2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmadc(vs2, vs1, v0, vl); } -vbool32_t test_vmadc_vxm_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool32_t test_vmadc_vxm_u16mf2_b32(vuint16mf2_t vs2, uint16_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmadc(vs2, rs1, v0, vl); } -vbool32_t test_vmadc_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool32_t test_vmadc_vv_u16mf2_b32(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmadc(vs2, vs1, vl); } -vbool32_t test_vmadc_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool32_t test_vmadc_vx_u16mf2_b32(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmadc(vs2, rs1, vl); } -vbool16_t test_vmadc_vvm_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool16_t test_vmadc_vvm_u16m1_b16(vuint16m1_t vs2, vuint16m1_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmadc(vs2, vs1, v0, vl); } -vbool16_t test_vmadc_vxm_u16m1_b16(vuint16m1_t op1, uint16_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool16_t test_vmadc_vxm_u16m1_b16(vuint16m1_t vs2, uint16_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmadc(vs2, rs1, v0, vl); } -vbool16_t test_vmadc_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool16_t test_vmadc_vv_u16m1_b16(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmadc(vs2, vs1, vl); } -vbool16_t test_vmadc_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool16_t test_vmadc_vx_u16m1_b16(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmadc(vs2, rs1, vl); } -vbool8_t test_vmadc_vvm_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool8_t test_vmadc_vvm_u16m2_b8(vuint16m2_t vs2, vuint16m2_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmadc(vs2, vs1, v0, vl); } -vbool8_t test_vmadc_vxm_u16m2_b8(vuint16m2_t op1, uint16_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool8_t test_vmadc_vxm_u16m2_b8(vuint16m2_t vs2, uint16_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmadc(vs2, rs1, v0, vl); } -vbool8_t test_vmadc_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool8_t test_vmadc_vv_u16m2_b8(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmadc(vs2, vs1, vl); } -vbool8_t test_vmadc_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool8_t test_vmadc_vx_u16m2_b8(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmadc(vs2, rs1, vl); } -vbool4_t test_vmadc_vvm_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool4_t test_vmadc_vvm_u16m4_b4(vuint16m4_t vs2, vuint16m4_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmadc(vs2, vs1, v0, vl); } -vbool4_t test_vmadc_vxm_u16m4_b4(vuint16m4_t op1, uint16_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool4_t test_vmadc_vxm_u16m4_b4(vuint16m4_t vs2, uint16_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmadc(vs2, rs1, v0, vl); } -vbool4_t test_vmadc_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool4_t test_vmadc_vv_u16m4_b4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmadc(vs2, vs1, vl); } -vbool4_t test_vmadc_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool4_t test_vmadc_vx_u16m4_b4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmadc(vs2, rs1, vl); } -vbool2_t test_vmadc_vvm_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool2_t test_vmadc_vvm_u16m8_b2(vuint16m8_t vs2, vuint16m8_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vmadc(vs2, vs1, v0, vl); } -vbool2_t test_vmadc_vxm_u16m8_b2(vuint16m8_t op1, uint16_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool2_t test_vmadc_vxm_u16m8_b2(vuint16m8_t vs2, uint16_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vmadc(vs2, rs1, v0, vl); } -vbool2_t test_vmadc_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool2_t test_vmadc_vv_u16m8_b2(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmadc(vs2, vs1, vl); } -vbool2_t test_vmadc_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool2_t test_vmadc_vx_u16m8_b2(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmadc(vs2, rs1, vl); } -vbool64_t test_vmadc_vvm_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool64_t test_vmadc_vvm_u32mf2_b64(vuint32mf2_t vs2, vuint32mf2_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmadc(vs2, vs1, v0, vl); } -vbool64_t test_vmadc_vxm_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool64_t test_vmadc_vxm_u32mf2_b64(vuint32mf2_t vs2, uint32_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmadc(vs2, rs1, v0, vl); } -vbool64_t test_vmadc_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool64_t test_vmadc_vv_u32mf2_b64(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmadc(vs2, vs1, vl); } -vbool64_t test_vmadc_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool64_t test_vmadc_vx_u32mf2_b64(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmadc(vs2, rs1, vl); } -vbool32_t test_vmadc_vvm_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool32_t test_vmadc_vvm_u32m1_b32(vuint32m1_t vs2, vuint32m1_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmadc(vs2, vs1, v0, vl); } -vbool32_t test_vmadc_vxm_u32m1_b32(vuint32m1_t op1, uint32_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool32_t test_vmadc_vxm_u32m1_b32(vuint32m1_t vs2, uint32_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmadc(vs2, rs1, v0, vl); } -vbool32_t test_vmadc_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool32_t test_vmadc_vv_u32m1_b32(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmadc(vs2, vs1, vl); } -vbool32_t test_vmadc_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool32_t test_vmadc_vx_u32m1_b32(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmadc(vs2, rs1, vl); } -vbool16_t test_vmadc_vvm_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool16_t test_vmadc_vvm_u32m2_b16(vuint32m2_t vs2, vuint32m2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmadc(vs2, vs1, v0, vl); } -vbool16_t test_vmadc_vxm_u32m2_b16(vuint32m2_t op1, uint32_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool16_t test_vmadc_vxm_u32m2_b16(vuint32m2_t vs2, uint32_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmadc(vs2, rs1, v0, vl); } -vbool16_t test_vmadc_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool16_t test_vmadc_vv_u32m2_b16(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmadc(vs2, vs1, vl); } -vbool16_t test_vmadc_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool16_t test_vmadc_vx_u32m2_b16(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmadc(vs2, rs1, vl); } -vbool8_t test_vmadc_vvm_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool8_t test_vmadc_vvm_u32m4_b8(vuint32m4_t vs2, vuint32m4_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmadc(vs2, vs1, v0, vl); } -vbool8_t test_vmadc_vxm_u32m4_b8(vuint32m4_t op1, uint32_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool8_t test_vmadc_vxm_u32m4_b8(vuint32m4_t vs2, uint32_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmadc(vs2, rs1, v0, vl); } -vbool8_t test_vmadc_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool8_t test_vmadc_vv_u32m4_b8(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmadc(vs2, vs1, vl); } -vbool8_t test_vmadc_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool8_t test_vmadc_vx_u32m4_b8(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmadc(vs2, rs1, vl); } -vbool4_t test_vmadc_vvm_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool4_t test_vmadc_vvm_u32m8_b4(vuint32m8_t vs2, vuint32m8_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmadc(vs2, vs1, v0, vl); } -vbool4_t test_vmadc_vxm_u32m8_b4(vuint32m8_t op1, uint32_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool4_t test_vmadc_vxm_u32m8_b4(vuint32m8_t vs2, uint32_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmadc(vs2, rs1, v0, vl); } -vbool4_t test_vmadc_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool4_t test_vmadc_vv_u32m8_b4(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmadc(vs2, vs1, vl); } -vbool4_t test_vmadc_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool4_t test_vmadc_vx_u32m8_b4(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmadc(vs2, rs1, vl); } -vbool64_t test_vmadc_vvm_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool64_t test_vmadc_vvm_u64m1_b64(vuint64m1_t vs2, vuint64m1_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmadc(vs2, vs1, v0, vl); } -vbool64_t test_vmadc_vxm_u64m1_b64(vuint64m1_t op1, uint64_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool64_t test_vmadc_vxm_u64m1_b64(vuint64m1_t vs2, uint64_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmadc(vs2, rs1, v0, vl); } -vbool64_t test_vmadc_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool64_t test_vmadc_vv_u64m1_b64(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmadc(vs2, vs1, vl); } -vbool64_t test_vmadc_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool64_t test_vmadc_vx_u64m1_b64(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmadc(vs2, rs1, vl); } -vbool32_t test_vmadc_vvm_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool32_t test_vmadc_vvm_u64m2_b32(vuint64m2_t vs2, vuint64m2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmadc(vs2, vs1, v0, vl); } -vbool32_t test_vmadc_vxm_u64m2_b32(vuint64m2_t op1, uint64_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool32_t test_vmadc_vxm_u64m2_b32(vuint64m2_t vs2, uint64_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmadc(vs2, rs1, v0, vl); } -vbool32_t test_vmadc_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool32_t test_vmadc_vv_u64m2_b32(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmadc(vs2, vs1, vl); } -vbool32_t test_vmadc_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool32_t test_vmadc_vx_u64m2_b32(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmadc(vs2, rs1, vl); } -vbool16_t test_vmadc_vvm_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool16_t test_vmadc_vvm_u64m4_b16(vuint64m4_t vs2, vuint64m4_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmadc(vs2, vs1, v0, vl); } -vbool16_t test_vmadc_vxm_u64m4_b16(vuint64m4_t op1, uint64_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool16_t test_vmadc_vxm_u64m4_b16(vuint64m4_t vs2, uint64_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmadc(vs2, rs1, v0, vl); } -vbool16_t test_vmadc_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool16_t test_vmadc_vv_u64m4_b16(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmadc(vs2, vs1, vl); } -vbool16_t test_vmadc_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool16_t test_vmadc_vx_u64m4_b16(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmadc(vs2, rs1, vl); } -vbool8_t test_vmadc_vvm_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool8_t test_vmadc_vvm_u64m8_b8(vuint64m8_t vs2, vuint64m8_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmadc(vs2, vs1, v0, vl); } -vbool8_t test_vmadc_vxm_u64m8_b8(vuint64m8_t op1, uint64_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vmadc(op1, op2, carryin, vl); +vbool8_t test_vmadc_vxm_u64m8_b8(vuint64m8_t vs2, uint64_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmadc(vs2, rs1, v0, vl); } -vbool8_t test_vmadc_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool8_t test_vmadc_vv_u64m8_b8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmadc(vs2, vs1, vl); } -vbool8_t test_vmadc_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmadc(op1, op2, vl); +vbool8_t test_vmadc_vx_u64m8_b8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmadc(vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vmadc\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vmadd.c b/auto-generated/gnu-overloaded-tests/vmadd.c index 3f877cfb6..daf6c019d 100644 --- a/auto-generated/gnu-overloaded-tests/vmadd.c +++ b/auto-generated/gnu-overloaded-tests/vmadd.c @@ -358,356 +358,356 @@ vuint64m8_t test_vmadd_vx_u64m8(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, s return __riscv_vmadd(vd, rs1, vs2, vl); } -vint8mf8_t test_vmadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, vs1, vs2, vl); +vint8mf8_t test_vmadd_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, vs1, vs2, vl); } -vint8mf8_t test_vmadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, rs1, vs2, vl); +vint8mf8_t test_vmadd_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, rs1, vs2, vl); } -vint8mf4_t test_vmadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, vs1, vs2, vl); +vint8mf4_t test_vmadd_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, vs1, vs2, vl); } -vint8mf4_t test_vmadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, rs1, vs2, vl); +vint8mf4_t test_vmadd_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, rs1, vs2, vl); } -vint8mf2_t test_vmadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, vs1, vs2, vl); +vint8mf2_t test_vmadd_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, vs1, vs2, vl); } -vint8mf2_t test_vmadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, rs1, vs2, vl); +vint8mf2_t test_vmadd_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, rs1, vs2, vl); } -vint8m1_t test_vmadd_vv_i8m1_m(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, vs1, vs2, vl); +vint8m1_t test_vmadd_vv_i8m1_m(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, vs1, vs2, vl); } -vint8m1_t test_vmadd_vx_i8m1_m(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, rs1, vs2, vl); +vint8m1_t test_vmadd_vx_i8m1_m(vbool8_t vm, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, rs1, vs2, vl); } -vint8m2_t test_vmadd_vv_i8m2_m(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, vs1, vs2, vl); +vint8m2_t test_vmadd_vv_i8m2_m(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, vs1, vs2, vl); } -vint8m2_t test_vmadd_vx_i8m2_m(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, rs1, vs2, vl); +vint8m2_t test_vmadd_vx_i8m2_m(vbool4_t vm, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, rs1, vs2, vl); } -vint8m4_t test_vmadd_vv_i8m4_m(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, vs1, vs2, vl); +vint8m4_t test_vmadd_vv_i8m4_m(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, vs1, vs2, vl); } -vint8m4_t test_vmadd_vx_i8m4_m(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, rs1, vs2, vl); +vint8m4_t test_vmadd_vx_i8m4_m(vbool2_t vm, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, rs1, vs2, vl); } -vint8m8_t test_vmadd_vv_i8m8_m(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, vs1, vs2, vl); +vint8m8_t test_vmadd_vv_i8m8_m(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, vs1, vs2, vl); } -vint8m8_t test_vmadd_vx_i8m8_m(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, rs1, vs2, vl); +vint8m8_t test_vmadd_vx_i8m8_m(vbool1_t vm, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vmadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vmadd_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vmadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vmadd_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vmadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vmadd_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vmadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vmadd_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vmadd_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, vs1, vs2, vl); +vint16m1_t test_vmadd_vv_i16m1_m(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vmadd_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, rs1, vs2, vl); +vint16m1_t test_vmadd_vx_i16m1_m(vbool16_t vm, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vmadd_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, vs1, vs2, vl); +vint16m2_t test_vmadd_vv_i16m2_m(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vmadd_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, rs1, vs2, vl); +vint16m2_t test_vmadd_vx_i16m2_m(vbool8_t vm, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vmadd_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, vs1, vs2, vl); +vint16m4_t test_vmadd_vv_i16m4_m(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vmadd_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, rs1, vs2, vl); +vint16m4_t test_vmadd_vx_i16m4_m(vbool4_t vm, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vmadd_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, vs1, vs2, vl); +vint16m8_t test_vmadd_vv_i16m8_m(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vmadd_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, rs1, vs2, vl); +vint16m8_t test_vmadd_vx_i16m8_m(vbool2_t vm, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vmadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vmadd_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vmadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vmadd_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vmadd_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, vs1, vs2, vl); +vint32m1_t test_vmadd_vv_i32m1_m(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vmadd_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, rs1, vs2, vl); +vint32m1_t test_vmadd_vx_i32m1_m(vbool32_t vm, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vmadd_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, vs1, vs2, vl); +vint32m2_t test_vmadd_vv_i32m2_m(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vmadd_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, rs1, vs2, vl); +vint32m2_t test_vmadd_vx_i32m2_m(vbool16_t vm, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vmadd_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, vs1, vs2, vl); +vint32m4_t test_vmadd_vv_i32m4_m(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vmadd_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, rs1, vs2, vl); +vint32m4_t test_vmadd_vx_i32m4_m(vbool8_t vm, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vmadd_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, vs1, vs2, vl); +vint32m8_t test_vmadd_vv_i32m8_m(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vmadd_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, rs1, vs2, vl); +vint32m8_t test_vmadd_vx_i32m8_m(vbool4_t vm, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vmadd_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, vs1, vs2, vl); +vint64m1_t test_vmadd_vv_i64m1_m(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vmadd_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, rs1, vs2, vl); +vint64m1_t test_vmadd_vx_i64m1_m(vbool64_t vm, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vmadd_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, vs1, vs2, vl); +vint64m2_t test_vmadd_vv_i64m2_m(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vmadd_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, rs1, vs2, vl); +vint64m2_t test_vmadd_vx_i64m2_m(vbool32_t vm, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vmadd_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, vs1, vs2, vl); +vint64m4_t test_vmadd_vv_i64m4_m(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vmadd_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, rs1, vs2, vl); +vint64m4_t test_vmadd_vx_i64m4_m(vbool16_t vm, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vmadd_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, vs1, vs2, vl); +vint64m8_t test_vmadd_vv_i64m8_m(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vmadd_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, rs1, vs2, vl); +vint64m8_t test_vmadd_vx_i64m8_m(vbool8_t vm, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, rs1, vs2, vl); } -vuint8mf8_t test_vmadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, vs1, vs2, vl); +vuint8mf8_t test_vmadd_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, vs1, vs2, vl); } -vuint8mf8_t test_vmadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, rs1, vs2, vl); +vuint8mf8_t test_vmadd_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, rs1, vs2, vl); } -vuint8mf4_t test_vmadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, vs1, vs2, vl); +vuint8mf4_t test_vmadd_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, vs1, vs2, vl); } -vuint8mf4_t test_vmadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, rs1, vs2, vl); +vuint8mf4_t test_vmadd_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, rs1, vs2, vl); } -vuint8mf2_t test_vmadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, vs1, vs2, vl); +vuint8mf2_t test_vmadd_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, vs1, vs2, vl); } -vuint8mf2_t test_vmadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, rs1, vs2, vl); +vuint8mf2_t test_vmadd_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, rs1, vs2, vl); } -vuint8m1_t test_vmadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, vs1, vs2, vl); +vuint8m1_t test_vmadd_vv_u8m1_m(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, vs1, vs2, vl); } -vuint8m1_t test_vmadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, rs1, vs2, vl); +vuint8m1_t test_vmadd_vx_u8m1_m(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, rs1, vs2, vl); } -vuint8m2_t test_vmadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, vs1, vs2, vl); +vuint8m2_t test_vmadd_vv_u8m2_m(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, vs1, vs2, vl); } -vuint8m2_t test_vmadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, rs1, vs2, vl); +vuint8m2_t test_vmadd_vx_u8m2_m(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, rs1, vs2, vl); } -vuint8m4_t test_vmadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, vs1, vs2, vl); +vuint8m4_t test_vmadd_vv_u8m4_m(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, vs1, vs2, vl); } -vuint8m4_t test_vmadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, rs1, vs2, vl); +vuint8m4_t test_vmadd_vx_u8m4_m(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, rs1, vs2, vl); } -vuint8m8_t test_vmadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, vs1, vs2, vl); +vuint8m8_t test_vmadd_vv_u8m8_m(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, vs1, vs2, vl); } -vuint8m8_t test_vmadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, rs1, vs2, vl); +vuint8m8_t test_vmadd_vx_u8m8_m(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vmadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, vs1, vs2, vl); +vuint16mf4_t test_vmadd_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vmadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, rs1, vs2, vl); +vuint16mf4_t test_vmadd_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vmadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, vs1, vs2, vl); +vuint16mf2_t test_vmadd_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vmadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, rs1, vs2, vl); +vuint16mf2_t test_vmadd_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vmadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, vs1, vs2, vl); +vuint16m1_t test_vmadd_vv_u16m1_m(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vmadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, rs1, vs2, vl); +vuint16m1_t test_vmadd_vx_u16m1_m(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vmadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, vs1, vs2, vl); +vuint16m2_t test_vmadd_vv_u16m2_m(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vmadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, rs1, vs2, vl); +vuint16m2_t test_vmadd_vx_u16m2_m(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vmadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, vs1, vs2, vl); +vuint16m4_t test_vmadd_vv_u16m4_m(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vmadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, rs1, vs2, vl); +vuint16m4_t test_vmadd_vx_u16m4_m(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vmadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, vs1, vs2, vl); +vuint16m8_t test_vmadd_vv_u16m8_m(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vmadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, rs1, vs2, vl); +vuint16m8_t test_vmadd_vx_u16m8_m(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vmadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, vs1, vs2, vl); +vuint32mf2_t test_vmadd_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vmadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, rs1, vs2, vl); +vuint32mf2_t test_vmadd_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vmadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, vs1, vs2, vl); +vuint32m1_t test_vmadd_vv_u32m1_m(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vmadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, rs1, vs2, vl); +vuint32m1_t test_vmadd_vx_u32m1_m(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vmadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, vs1, vs2, vl); +vuint32m2_t test_vmadd_vv_u32m2_m(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vmadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, rs1, vs2, vl); +vuint32m2_t test_vmadd_vx_u32m2_m(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vmadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, vs1, vs2, vl); +vuint32m4_t test_vmadd_vv_u32m4_m(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vmadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, rs1, vs2, vl); +vuint32m4_t test_vmadd_vx_u32m4_m(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vmadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, vs1, vs2, vl); +vuint32m8_t test_vmadd_vv_u32m8_m(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vmadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, rs1, vs2, vl); +vuint32m8_t test_vmadd_vx_u32m8_m(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vmadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, vs1, vs2, vl); +vuint64m1_t test_vmadd_vv_u64m1_m(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vmadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, rs1, vs2, vl); +vuint64m1_t test_vmadd_vx_u64m1_m(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vmadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, vs1, vs2, vl); +vuint64m2_t test_vmadd_vv_u64m2_m(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vmadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, rs1, vs2, vl); +vuint64m2_t test_vmadd_vx_u64m2_m(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vmadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, vs1, vs2, vl); +vuint64m4_t test_vmadd_vv_u64m4_m(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vmadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, rs1, vs2, vl); +vuint64m4_t test_vmadd_vx_u64m4_m(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vmadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, vs1, vs2, vl); +vuint64m8_t test_vmadd_vv_u64m8_m(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vmadd_vx_u64m8_m(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vmadd(mask, vd, rs1, vs2, vl); +vuint64m8_t test_vmadd_vx_u64m8_m(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vmadd(vm, vd, rs1, vs2, vl); } /* { dg-final { scan-assembler-times {vma[c-d][c-d]\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vmand.c b/auto-generated/gnu-overloaded-tests/vmand.c index bc0bc6e80..51429a6ad 100644 --- a/auto-generated/gnu-overloaded-tests/vmand.c +++ b/auto-generated/gnu-overloaded-tests/vmand.c @@ -6,32 +6,32 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool1_t test_vmand_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { - return __riscv_vmand(op1, op2, vl); +vbool1_t test_vmand_mm_b1(vbool1_t vs2, vbool1_t vs1, size_t vl) { + return __riscv_vmand(vs2, vs1, vl); } -vbool2_t test_vmand_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { - return __riscv_vmand(op1, op2, vl); +vbool2_t test_vmand_mm_b2(vbool2_t vs2, vbool2_t vs1, size_t vl) { + return __riscv_vmand(vs2, vs1, vl); } -vbool4_t test_vmand_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { - return __riscv_vmand(op1, op2, vl); +vbool4_t test_vmand_mm_b4(vbool4_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vmand(vs2, vs1, vl); } -vbool8_t test_vmand_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { - return __riscv_vmand(op1, op2, vl); +vbool8_t test_vmand_mm_b8(vbool8_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vmand(vs2, vs1, vl); } -vbool16_t test_vmand_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { - return __riscv_vmand(op1, op2, vl); +vbool16_t test_vmand_mm_b16(vbool16_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vmand(vs2, vs1, vl); } -vbool32_t test_vmand_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { - return __riscv_vmand(op1, op2, vl); +vbool32_t test_vmand_mm_b32(vbool32_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vmand(vs2, vs1, vl); } -vbool64_t test_vmand_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) { - return __riscv_vmand(op1, op2, vl); +vbool64_t test_vmand_mm_b64(vbool64_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vmand(vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmand\.[ivxfswum.]+\s+} 7 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vmandn.c b/auto-generated/gnu-overloaded-tests/vmandn.c index ddbf08c92..89bd535f2 100644 --- a/auto-generated/gnu-overloaded-tests/vmandn.c +++ b/auto-generated/gnu-overloaded-tests/vmandn.c @@ -6,32 +6,32 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool1_t test_vmandn_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { - return __riscv_vmandn(op1, op2, vl); +vbool1_t test_vmandn_mm_b1(vbool1_t vs2, vbool1_t vs1, size_t vl) { + return __riscv_vmandn(vs2, vs1, vl); } -vbool2_t test_vmandn_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { - return __riscv_vmandn(op1, op2, vl); +vbool2_t test_vmandn_mm_b2(vbool2_t vs2, vbool2_t vs1, size_t vl) { + return __riscv_vmandn(vs2, vs1, vl); } -vbool4_t test_vmandn_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { - return __riscv_vmandn(op1, op2, vl); +vbool4_t test_vmandn_mm_b4(vbool4_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vmandn(vs2, vs1, vl); } -vbool8_t test_vmandn_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { - return __riscv_vmandn(op1, op2, vl); +vbool8_t test_vmandn_mm_b8(vbool8_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vmandn(vs2, vs1, vl); } -vbool16_t test_vmandn_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { - return __riscv_vmandn(op1, op2, vl); +vbool16_t test_vmandn_mm_b16(vbool16_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vmandn(vs2, vs1, vl); } -vbool32_t test_vmandn_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { - return __riscv_vmandn(op1, op2, vl); +vbool32_t test_vmandn_mm_b32(vbool32_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vmandn(vs2, vs1, vl); } -vbool64_t test_vmandn_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) { - return __riscv_vmandn(op1, op2, vl); +vbool64_t test_vmandn_mm_b64(vbool64_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vmandn(vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmandn\.[ivxfswum.]+\s+} 7 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vmax.c b/auto-generated/gnu-overloaded-tests/vmax.c index cb9bc7596..5186ddc5e 100644 --- a/auto-generated/gnu-overloaded-tests/vmax.c +++ b/auto-generated/gnu-overloaded-tests/vmax.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vmax_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmax(op1, op2, vl); +vint8mf8_t test_vmax_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmax(vs2, vs1, vl); } -vint8mf8_t test_vmax_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmax(op1, op2, vl); +vint8mf8_t test_vmax_vx_i8mf8(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax(vs2, rs1, vl); } -vint8mf4_t test_vmax_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmax(op1, op2, vl); +vint8mf4_t test_vmax_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmax(vs2, vs1, vl); } -vint8mf4_t test_vmax_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmax(op1, op2, vl); +vint8mf4_t test_vmax_vx_i8mf4(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax(vs2, rs1, vl); } -vint8mf2_t test_vmax_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmax(op1, op2, vl); +vint8mf2_t test_vmax_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmax(vs2, vs1, vl); } -vint8mf2_t test_vmax_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmax(op1, op2, vl); +vint8mf2_t test_vmax_vx_i8mf2(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax(vs2, rs1, vl); } -vint8m1_t test_vmax_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmax(op1, op2, vl); +vint8m1_t test_vmax_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmax(vs2, vs1, vl); } -vint8m1_t test_vmax_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmax(op1, op2, vl); +vint8m1_t test_vmax_vx_i8m1(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax(vs2, rs1, vl); } -vint8m2_t test_vmax_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmax(op1, op2, vl); +vint8m2_t test_vmax_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmax(vs2, vs1, vl); } -vint8m2_t test_vmax_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmax(op1, op2, vl); +vint8m2_t test_vmax_vx_i8m2(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax(vs2, rs1, vl); } -vint8m4_t test_vmax_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmax(op1, op2, vl); +vint8m4_t test_vmax_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmax(vs2, vs1, vl); } -vint8m4_t test_vmax_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmax(op1, op2, vl); +vint8m4_t test_vmax_vx_i8m4(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax(vs2, rs1, vl); } -vint8m8_t test_vmax_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmax(op1, op2, vl); +vint8m8_t test_vmax_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmax(vs2, vs1, vl); } -vint8m8_t test_vmax_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmax(op1, op2, vl); +vint8m8_t test_vmax_vx_i8m8(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax(vs2, rs1, vl); } -vint16mf4_t test_vmax_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmax(op1, op2, vl); +vint16mf4_t test_vmax_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmax(vs2, vs1, vl); } -vint16mf4_t test_vmax_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmax(op1, op2, vl); +vint16mf4_t test_vmax_vx_i16mf4(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax(vs2, rs1, vl); } -vint16mf2_t test_vmax_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmax(op1, op2, vl); +vint16mf2_t test_vmax_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmax(vs2, vs1, vl); } -vint16mf2_t test_vmax_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmax(op1, op2, vl); +vint16mf2_t test_vmax_vx_i16mf2(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax(vs2, rs1, vl); } -vint16m1_t test_vmax_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmax(op1, op2, vl); +vint16m1_t test_vmax_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmax(vs2, vs1, vl); } -vint16m1_t test_vmax_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmax(op1, op2, vl); +vint16m1_t test_vmax_vx_i16m1(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax(vs2, rs1, vl); } -vint16m2_t test_vmax_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmax(op1, op2, vl); +vint16m2_t test_vmax_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmax(vs2, vs1, vl); } -vint16m2_t test_vmax_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmax(op1, op2, vl); +vint16m2_t test_vmax_vx_i16m2(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax(vs2, rs1, vl); } -vint16m4_t test_vmax_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmax(op1, op2, vl); +vint16m4_t test_vmax_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmax(vs2, vs1, vl); } -vint16m4_t test_vmax_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmax(op1, op2, vl); +vint16m4_t test_vmax_vx_i16m4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax(vs2, rs1, vl); } -vint16m8_t test_vmax_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmax(op1, op2, vl); +vint16m8_t test_vmax_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmax(vs2, vs1, vl); } -vint16m8_t test_vmax_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmax(op1, op2, vl); +vint16m8_t test_vmax_vx_i16m8(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax(vs2, rs1, vl); } -vint32mf2_t test_vmax_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmax(op1, op2, vl); +vint32mf2_t test_vmax_vv_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmax(vs2, vs1, vl); } -vint32mf2_t test_vmax_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmax(op1, op2, vl); +vint32mf2_t test_vmax_vx_i32mf2(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax(vs2, rs1, vl); } -vint32m1_t test_vmax_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmax(op1, op2, vl); +vint32m1_t test_vmax_vv_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmax(vs2, vs1, vl); } -vint32m1_t test_vmax_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmax(op1, op2, vl); +vint32m1_t test_vmax_vx_i32m1(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax(vs2, rs1, vl); } -vint32m2_t test_vmax_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmax(op1, op2, vl); +vint32m2_t test_vmax_vv_i32m2(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmax(vs2, vs1, vl); } -vint32m2_t test_vmax_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmax(op1, op2, vl); +vint32m2_t test_vmax_vx_i32m2(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax(vs2, rs1, vl); } -vint32m4_t test_vmax_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmax(op1, op2, vl); +vint32m4_t test_vmax_vv_i32m4(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmax(vs2, vs1, vl); } -vint32m4_t test_vmax_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmax(op1, op2, vl); +vint32m4_t test_vmax_vx_i32m4(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax(vs2, rs1, vl); } -vint32m8_t test_vmax_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmax(op1, op2, vl); +vint32m8_t test_vmax_vv_i32m8(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmax(vs2, vs1, vl); } -vint32m8_t test_vmax_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmax(op1, op2, vl); +vint32m8_t test_vmax_vx_i32m8(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax(vs2, rs1, vl); } -vint64m1_t test_vmax_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmax(op1, op2, vl); +vint64m1_t test_vmax_vv_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmax(vs2, vs1, vl); } -vint64m1_t test_vmax_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmax(op1, op2, vl); +vint64m1_t test_vmax_vx_i64m1(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax(vs2, rs1, vl); } -vint64m2_t test_vmax_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmax(op1, op2, vl); +vint64m2_t test_vmax_vv_i64m2(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmax(vs2, vs1, vl); } -vint64m2_t test_vmax_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmax(op1, op2, vl); +vint64m2_t test_vmax_vx_i64m2(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax(vs2, rs1, vl); } -vint64m4_t test_vmax_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmax(op1, op2, vl); +vint64m4_t test_vmax_vv_i64m4(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmax(vs2, vs1, vl); } -vint64m4_t test_vmax_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmax(op1, op2, vl); +vint64m4_t test_vmax_vx_i64m4(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax(vs2, rs1, vl); } -vint64m8_t test_vmax_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmax(op1, op2, vl); +vint64m8_t test_vmax_vv_i64m8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmax(vs2, vs1, vl); } -vint64m8_t test_vmax_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmax(op1, op2, vl); +vint64m8_t test_vmax_vx_i64m8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax(vs2, rs1, vl); } -vint8mf8_t test_vmax_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmax(mask, op1, op2, vl); +vint8mf8_t test_vmax_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmax(vm, vs2, vs1, vl); } -vint8mf8_t test_vmax_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmax(mask, op1, op2, vl); +vint8mf8_t test_vmax_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax(vm, vs2, rs1, vl); } -vint8mf4_t test_vmax_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmax(mask, op1, op2, vl); +vint8mf4_t test_vmax_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmax(vm, vs2, vs1, vl); } -vint8mf4_t test_vmax_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmax(mask, op1, op2, vl); +vint8mf4_t test_vmax_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax(vm, vs2, rs1, vl); } -vint8mf2_t test_vmax_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmax(mask, op1, op2, vl); +vint8mf2_t test_vmax_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmax(vm, vs2, vs1, vl); } -vint8mf2_t test_vmax_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmax(mask, op1, op2, vl); +vint8mf2_t test_vmax_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax(vm, vs2, rs1, vl); } -vint8m1_t test_vmax_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmax(mask, op1, op2, vl); +vint8m1_t test_vmax_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmax(vm, vs2, vs1, vl); } -vint8m1_t test_vmax_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmax(mask, op1, op2, vl); +vint8m1_t test_vmax_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax(vm, vs2, rs1, vl); } -vint8m2_t test_vmax_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmax(mask, op1, op2, vl); +vint8m2_t test_vmax_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmax(vm, vs2, vs1, vl); } -vint8m2_t test_vmax_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmax(mask, op1, op2, vl); +vint8m2_t test_vmax_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax(vm, vs2, rs1, vl); } -vint8m4_t test_vmax_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmax(mask, op1, op2, vl); +vint8m4_t test_vmax_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmax(vm, vs2, vs1, vl); } -vint8m4_t test_vmax_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmax(mask, op1, op2, vl); +vint8m4_t test_vmax_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax(vm, vs2, rs1, vl); } -vint8m8_t test_vmax_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmax(mask, op1, op2, vl); +vint8m8_t test_vmax_vv_i8m8_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmax(vm, vs2, vs1, vl); } -vint8m8_t test_vmax_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmax(mask, op1, op2, vl); +vint8m8_t test_vmax_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax(vm, vs2, rs1, vl); } -vint16mf4_t test_vmax_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmax(mask, op1, op2, vl); +vint16mf4_t test_vmax_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmax(vm, vs2, vs1, vl); } -vint16mf4_t test_vmax_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmax(mask, op1, op2, vl); +vint16mf4_t test_vmax_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax(vm, vs2, rs1, vl); } -vint16mf2_t test_vmax_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmax(mask, op1, op2, vl); +vint16mf2_t test_vmax_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmax(vm, vs2, vs1, vl); } -vint16mf2_t test_vmax_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmax(mask, op1, op2, vl); +vint16mf2_t test_vmax_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax(vm, vs2, rs1, vl); } -vint16m1_t test_vmax_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmax(mask, op1, op2, vl); +vint16m1_t test_vmax_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmax(vm, vs2, vs1, vl); } -vint16m1_t test_vmax_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmax(mask, op1, op2, vl); +vint16m1_t test_vmax_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax(vm, vs2, rs1, vl); } -vint16m2_t test_vmax_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmax(mask, op1, op2, vl); +vint16m2_t test_vmax_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmax(vm, vs2, vs1, vl); } -vint16m2_t test_vmax_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmax(mask, op1, op2, vl); +vint16m2_t test_vmax_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax(vm, vs2, rs1, vl); } -vint16m4_t test_vmax_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmax(mask, op1, op2, vl); +vint16m4_t test_vmax_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmax(vm, vs2, vs1, vl); } -vint16m4_t test_vmax_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmax(mask, op1, op2, vl); +vint16m4_t test_vmax_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax(vm, vs2, rs1, vl); } -vint16m8_t test_vmax_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmax(mask, op1, op2, vl); +vint16m8_t test_vmax_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmax(vm, vs2, vs1, vl); } -vint16m8_t test_vmax_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmax(mask, op1, op2, vl); +vint16m8_t test_vmax_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax(vm, vs2, rs1, vl); } -vint32mf2_t test_vmax_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmax(mask, op1, op2, vl); +vint32mf2_t test_vmax_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmax(vm, vs2, vs1, vl); } -vint32mf2_t test_vmax_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmax(mask, op1, op2, vl); +vint32mf2_t test_vmax_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax(vm, vs2, rs1, vl); } -vint32m1_t test_vmax_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmax(mask, op1, op2, vl); +vint32m1_t test_vmax_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmax(vm, vs2, vs1, vl); } -vint32m1_t test_vmax_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmax(mask, op1, op2, vl); +vint32m1_t test_vmax_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax(vm, vs2, rs1, vl); } -vint32m2_t test_vmax_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmax(mask, op1, op2, vl); +vint32m2_t test_vmax_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmax(vm, vs2, vs1, vl); } -vint32m2_t test_vmax_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmax(mask, op1, op2, vl); +vint32m2_t test_vmax_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax(vm, vs2, rs1, vl); } -vint32m4_t test_vmax_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmax(mask, op1, op2, vl); +vint32m4_t test_vmax_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmax(vm, vs2, vs1, vl); } -vint32m4_t test_vmax_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmax(mask, op1, op2, vl); +vint32m4_t test_vmax_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax(vm, vs2, rs1, vl); } -vint32m8_t test_vmax_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmax(mask, op1, op2, vl); +vint32m8_t test_vmax_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmax(vm, vs2, vs1, vl); } -vint32m8_t test_vmax_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmax(mask, op1, op2, vl); +vint32m8_t test_vmax_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax(vm, vs2, rs1, vl); } -vint64m1_t test_vmax_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmax(mask, op1, op2, vl); +vint64m1_t test_vmax_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmax(vm, vs2, vs1, vl); } -vint64m1_t test_vmax_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmax(mask, op1, op2, vl); +vint64m1_t test_vmax_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax(vm, vs2, rs1, vl); } -vint64m2_t test_vmax_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmax(mask, op1, op2, vl); +vint64m2_t test_vmax_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmax(vm, vs2, vs1, vl); } -vint64m2_t test_vmax_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmax(mask, op1, op2, vl); +vint64m2_t test_vmax_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax(vm, vs2, rs1, vl); } -vint64m4_t test_vmax_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmax(mask, op1, op2, vl); +vint64m4_t test_vmax_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmax(vm, vs2, vs1, vl); } -vint64m4_t test_vmax_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmax(mask, op1, op2, vl); +vint64m4_t test_vmax_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax(vm, vs2, rs1, vl); } -vint64m8_t test_vmax_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmax(mask, op1, op2, vl); +vint64m8_t test_vmax_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmax(vm, vs2, vs1, vl); } -vint64m8_t test_vmax_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmax(mask, op1, op2, vl); +vint64m8_t test_vmax_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vmax\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vmaxu.c b/auto-generated/gnu-overloaded-tests/vmaxu.c index 7b150634c..5c9e8a224 100644 --- a/auto-generated/gnu-overloaded-tests/vmaxu.c +++ b/auto-generated/gnu-overloaded-tests/vmaxu.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vmaxu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmaxu(op1, op2, vl); +vuint8mf8_t test_vmaxu_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmaxu(vs2, vs1, vl); } -vuint8mf8_t test_vmaxu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu(op1, op2, vl); +vuint8mf8_t test_vmaxu_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu(vs2, rs1, vl); } -vuint8mf4_t test_vmaxu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmaxu(op1, op2, vl); +vuint8mf4_t test_vmaxu_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmaxu(vs2, vs1, vl); } -vuint8mf4_t test_vmaxu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu(op1, op2, vl); +vuint8mf4_t test_vmaxu_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu(vs2, rs1, vl); } -vuint8mf2_t test_vmaxu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmaxu(op1, op2, vl); +vuint8mf2_t test_vmaxu_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmaxu(vs2, vs1, vl); } -vuint8mf2_t test_vmaxu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu(op1, op2, vl); +vuint8mf2_t test_vmaxu_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu(vs2, rs1, vl); } -vuint8m1_t test_vmaxu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmaxu(op1, op2, vl); +vuint8m1_t test_vmaxu_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmaxu(vs2, vs1, vl); } -vuint8m1_t test_vmaxu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu(op1, op2, vl); +vuint8m1_t test_vmaxu_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu(vs2, rs1, vl); } -vuint8m2_t test_vmaxu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmaxu(op1, op2, vl); +vuint8m2_t test_vmaxu_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmaxu(vs2, vs1, vl); } -vuint8m2_t test_vmaxu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu(op1, op2, vl); +vuint8m2_t test_vmaxu_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu(vs2, rs1, vl); } -vuint8m4_t test_vmaxu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmaxu(op1, op2, vl); +vuint8m4_t test_vmaxu_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmaxu(vs2, vs1, vl); } -vuint8m4_t test_vmaxu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu(op1, op2, vl); +vuint8m4_t test_vmaxu_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu(vs2, rs1, vl); } -vuint8m8_t test_vmaxu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmaxu(op1, op2, vl); +vuint8m8_t test_vmaxu_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmaxu(vs2, vs1, vl); } -vuint8m8_t test_vmaxu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu(op1, op2, vl); +vuint8m8_t test_vmaxu_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu(vs2, rs1, vl); } -vuint16mf4_t test_vmaxu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmaxu(op1, op2, vl); +vuint16mf4_t test_vmaxu_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmaxu(vs2, vs1, vl); } -vuint16mf4_t test_vmaxu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu(op1, op2, vl); +vuint16mf4_t test_vmaxu_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu(vs2, rs1, vl); } -vuint16mf2_t test_vmaxu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmaxu(op1, op2, vl); +vuint16mf2_t test_vmaxu_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmaxu(vs2, vs1, vl); } -vuint16mf2_t test_vmaxu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu(op1, op2, vl); +vuint16mf2_t test_vmaxu_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu(vs2, rs1, vl); } -vuint16m1_t test_vmaxu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmaxu(op1, op2, vl); +vuint16m1_t test_vmaxu_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmaxu(vs2, vs1, vl); } -vuint16m1_t test_vmaxu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu(op1, op2, vl); +vuint16m1_t test_vmaxu_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu(vs2, rs1, vl); } -vuint16m2_t test_vmaxu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmaxu(op1, op2, vl); +vuint16m2_t test_vmaxu_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmaxu(vs2, vs1, vl); } -vuint16m2_t test_vmaxu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu(op1, op2, vl); +vuint16m2_t test_vmaxu_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu(vs2, rs1, vl); } -vuint16m4_t test_vmaxu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmaxu(op1, op2, vl); +vuint16m4_t test_vmaxu_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmaxu(vs2, vs1, vl); } -vuint16m4_t test_vmaxu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu(op1, op2, vl); +vuint16m4_t test_vmaxu_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu(vs2, rs1, vl); } -vuint16m8_t test_vmaxu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmaxu(op1, op2, vl); +vuint16m8_t test_vmaxu_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmaxu(vs2, vs1, vl); } -vuint16m8_t test_vmaxu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu(op1, op2, vl); +vuint16m8_t test_vmaxu_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu(vs2, rs1, vl); } -vuint32mf2_t test_vmaxu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmaxu(op1, op2, vl); +vuint32mf2_t test_vmaxu_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmaxu(vs2, vs1, vl); } -vuint32mf2_t test_vmaxu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu(op1, op2, vl); +vuint32mf2_t test_vmaxu_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu(vs2, rs1, vl); } -vuint32m1_t test_vmaxu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmaxu(op1, op2, vl); +vuint32m1_t test_vmaxu_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmaxu(vs2, vs1, vl); } -vuint32m1_t test_vmaxu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu(op1, op2, vl); +vuint32m1_t test_vmaxu_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu(vs2, rs1, vl); } -vuint32m2_t test_vmaxu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmaxu(op1, op2, vl); +vuint32m2_t test_vmaxu_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmaxu(vs2, vs1, vl); } -vuint32m2_t test_vmaxu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu(op1, op2, vl); +vuint32m2_t test_vmaxu_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu(vs2, rs1, vl); } -vuint32m4_t test_vmaxu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmaxu(op1, op2, vl); +vuint32m4_t test_vmaxu_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmaxu(vs2, vs1, vl); } -vuint32m4_t test_vmaxu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu(op1, op2, vl); +vuint32m4_t test_vmaxu_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu(vs2, rs1, vl); } -vuint32m8_t test_vmaxu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmaxu(op1, op2, vl); +vuint32m8_t test_vmaxu_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmaxu(vs2, vs1, vl); } -vuint32m8_t test_vmaxu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu(op1, op2, vl); +vuint32m8_t test_vmaxu_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu(vs2, rs1, vl); } -vuint64m1_t test_vmaxu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmaxu(op1, op2, vl); +vuint64m1_t test_vmaxu_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmaxu(vs2, vs1, vl); } -vuint64m1_t test_vmaxu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu(op1, op2, vl); +vuint64m1_t test_vmaxu_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu(vs2, rs1, vl); } -vuint64m2_t test_vmaxu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmaxu(op1, op2, vl); +vuint64m2_t test_vmaxu_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmaxu(vs2, vs1, vl); } -vuint64m2_t test_vmaxu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu(op1, op2, vl); +vuint64m2_t test_vmaxu_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu(vs2, rs1, vl); } -vuint64m4_t test_vmaxu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmaxu(op1, op2, vl); +vuint64m4_t test_vmaxu_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmaxu(vs2, vs1, vl); } -vuint64m4_t test_vmaxu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu(op1, op2, vl); +vuint64m4_t test_vmaxu_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu(vs2, rs1, vl); } -vuint64m8_t test_vmaxu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmaxu(op1, op2, vl); +vuint64m8_t test_vmaxu_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmaxu(vs2, vs1, vl); } -vuint64m8_t test_vmaxu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu(op1, op2, vl); +vuint64m8_t test_vmaxu_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu(vs2, rs1, vl); } -vuint8mf8_t test_vmaxu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmaxu(mask, op1, op2, vl); +vuint8mf8_t test_vmaxu_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmaxu(vm, vs2, vs1, vl); } -vuint8mf8_t test_vmaxu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu(mask, op1, op2, vl); +vuint8mf8_t test_vmaxu_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu(vm, vs2, rs1, vl); } -vuint8mf4_t test_vmaxu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmaxu(mask, op1, op2, vl); +vuint8mf4_t test_vmaxu_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmaxu(vm, vs2, vs1, vl); } -vuint8mf4_t test_vmaxu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu(mask, op1, op2, vl); +vuint8mf4_t test_vmaxu_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu(vm, vs2, rs1, vl); } -vuint8mf2_t test_vmaxu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmaxu(mask, op1, op2, vl); +vuint8mf2_t test_vmaxu_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmaxu(vm, vs2, vs1, vl); } -vuint8mf2_t test_vmaxu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu(mask, op1, op2, vl); +vuint8mf2_t test_vmaxu_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu(vm, vs2, rs1, vl); } -vuint8m1_t test_vmaxu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmaxu(mask, op1, op2, vl); +vuint8m1_t test_vmaxu_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmaxu(vm, vs2, vs1, vl); } -vuint8m1_t test_vmaxu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu(mask, op1, op2, vl); +vuint8m1_t test_vmaxu_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu(vm, vs2, rs1, vl); } -vuint8m2_t test_vmaxu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmaxu(mask, op1, op2, vl); +vuint8m2_t test_vmaxu_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmaxu(vm, vs2, vs1, vl); } -vuint8m2_t test_vmaxu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu(mask, op1, op2, vl); +vuint8m2_t test_vmaxu_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu(vm, vs2, rs1, vl); } -vuint8m4_t test_vmaxu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmaxu(mask, op1, op2, vl); +vuint8m4_t test_vmaxu_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmaxu(vm, vs2, vs1, vl); } -vuint8m4_t test_vmaxu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu(mask, op1, op2, vl); +vuint8m4_t test_vmaxu_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu(vm, vs2, rs1, vl); } -vuint8m8_t test_vmaxu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmaxu(mask, op1, op2, vl); +vuint8m8_t test_vmaxu_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmaxu(vm, vs2, vs1, vl); } -vuint8m8_t test_vmaxu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu(mask, op1, op2, vl); +vuint8m8_t test_vmaxu_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu(vm, vs2, rs1, vl); } -vuint16mf4_t test_vmaxu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmaxu(mask, op1, op2, vl); +vuint16mf4_t test_vmaxu_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmaxu(vm, vs2, vs1, vl); } -vuint16mf4_t test_vmaxu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu(mask, op1, op2, vl); +vuint16mf4_t test_vmaxu_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu(vm, vs2, rs1, vl); } -vuint16mf2_t test_vmaxu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmaxu(mask, op1, op2, vl); +vuint16mf2_t test_vmaxu_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmaxu(vm, vs2, vs1, vl); } -vuint16mf2_t test_vmaxu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu(mask, op1, op2, vl); +vuint16mf2_t test_vmaxu_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu(vm, vs2, rs1, vl); } -vuint16m1_t test_vmaxu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmaxu(mask, op1, op2, vl); +vuint16m1_t test_vmaxu_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmaxu(vm, vs2, vs1, vl); } -vuint16m1_t test_vmaxu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu(mask, op1, op2, vl); +vuint16m1_t test_vmaxu_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu(vm, vs2, rs1, vl); } -vuint16m2_t test_vmaxu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmaxu(mask, op1, op2, vl); +vuint16m2_t test_vmaxu_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmaxu(vm, vs2, vs1, vl); } -vuint16m2_t test_vmaxu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu(mask, op1, op2, vl); +vuint16m2_t test_vmaxu_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu(vm, vs2, rs1, vl); } -vuint16m4_t test_vmaxu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmaxu(mask, op1, op2, vl); +vuint16m4_t test_vmaxu_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmaxu(vm, vs2, vs1, vl); } -vuint16m4_t test_vmaxu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu(mask, op1, op2, vl); +vuint16m4_t test_vmaxu_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu(vm, vs2, rs1, vl); } -vuint16m8_t test_vmaxu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmaxu(mask, op1, op2, vl); +vuint16m8_t test_vmaxu_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmaxu(vm, vs2, vs1, vl); } -vuint16m8_t test_vmaxu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu(mask, op1, op2, vl); +vuint16m8_t test_vmaxu_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu(vm, vs2, rs1, vl); } -vuint32mf2_t test_vmaxu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmaxu(mask, op1, op2, vl); +vuint32mf2_t test_vmaxu_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmaxu(vm, vs2, vs1, vl); } -vuint32mf2_t test_vmaxu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu(mask, op1, op2, vl); +vuint32mf2_t test_vmaxu_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu(vm, vs2, rs1, vl); } -vuint32m1_t test_vmaxu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmaxu(mask, op1, op2, vl); +vuint32m1_t test_vmaxu_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmaxu(vm, vs2, vs1, vl); } -vuint32m1_t test_vmaxu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu(mask, op1, op2, vl); +vuint32m1_t test_vmaxu_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu(vm, vs2, rs1, vl); } -vuint32m2_t test_vmaxu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmaxu(mask, op1, op2, vl); +vuint32m2_t test_vmaxu_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmaxu(vm, vs2, vs1, vl); } -vuint32m2_t test_vmaxu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu(mask, op1, op2, vl); +vuint32m2_t test_vmaxu_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu(vm, vs2, rs1, vl); } -vuint32m4_t test_vmaxu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmaxu(mask, op1, op2, vl); +vuint32m4_t test_vmaxu_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmaxu(vm, vs2, vs1, vl); } -vuint32m4_t test_vmaxu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu(mask, op1, op2, vl); +vuint32m4_t test_vmaxu_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu(vm, vs2, rs1, vl); } -vuint32m8_t test_vmaxu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmaxu(mask, op1, op2, vl); +vuint32m8_t test_vmaxu_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmaxu(vm, vs2, vs1, vl); } -vuint32m8_t test_vmaxu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu(mask, op1, op2, vl); +vuint32m8_t test_vmaxu_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu(vm, vs2, rs1, vl); } -vuint64m1_t test_vmaxu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmaxu(mask, op1, op2, vl); +vuint64m1_t test_vmaxu_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmaxu(vm, vs2, vs1, vl); } -vuint64m1_t test_vmaxu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu(mask, op1, op2, vl); +vuint64m1_t test_vmaxu_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu(vm, vs2, rs1, vl); } -vuint64m2_t test_vmaxu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmaxu(mask, op1, op2, vl); +vuint64m2_t test_vmaxu_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmaxu(vm, vs2, vs1, vl); } -vuint64m2_t test_vmaxu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu(mask, op1, op2, vl); +vuint64m2_t test_vmaxu_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu(vm, vs2, rs1, vl); } -vuint64m4_t test_vmaxu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmaxu(mask, op1, op2, vl); +vuint64m4_t test_vmaxu_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmaxu(vm, vs2, vs1, vl); } -vuint64m4_t test_vmaxu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu(mask, op1, op2, vl); +vuint64m4_t test_vmaxu_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu(vm, vs2, rs1, vl); } -vuint64m8_t test_vmaxu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmaxu(mask, op1, op2, vl); +vuint64m8_t test_vmaxu_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmaxu(vm, vs2, vs1, vl); } -vuint64m8_t test_vmaxu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu(mask, op1, op2, vl); +vuint64m8_t test_vmaxu_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vmaxu\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vmerge.c b/auto-generated/gnu-overloaded-tests/vmerge.c index 34bcedac4..6b1d8b6a2 100644 --- a/auto-generated/gnu-overloaded-tests/vmerge.c +++ b/auto-generated/gnu-overloaded-tests/vmerge.c @@ -6,416 +6,416 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vmerge_vvm_i8mf8(vint8mf8_t op1, vint8mf8_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vint8mf8_t test_vmerge_vvm_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vint8mf8_t test_vmerge_vxm_i8mf8(vint8mf8_t op1, int8_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vint8mf8_t test_vmerge_vxm_i8mf8(vint8mf8_t vs2, int8_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge(vs2, rs1, v0, vl); } -vint8mf4_t test_vmerge_vvm_i8mf4(vint8mf4_t op1, vint8mf4_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vint8mf4_t test_vmerge_vvm_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vint8mf4_t test_vmerge_vxm_i8mf4(vint8mf4_t op1, int8_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vint8mf4_t test_vmerge_vxm_i8mf4(vint8mf4_t vs2, int8_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge(vs2, rs1, v0, vl); } -vint8mf2_t test_vmerge_vvm_i8mf2(vint8mf2_t op1, vint8mf2_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vint8mf2_t test_vmerge_vvm_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vint8mf2_t test_vmerge_vxm_i8mf2(vint8mf2_t op1, int8_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vint8mf2_t test_vmerge_vxm_i8mf2(vint8mf2_t vs2, int8_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge(vs2, rs1, v0, vl); } -vint8m1_t test_vmerge_vvm_i8m1(vint8m1_t op1, vint8m1_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vint8m1_t test_vmerge_vvm_i8m1(vint8m1_t vs2, vint8m1_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vint8m1_t test_vmerge_vxm_i8m1(vint8m1_t op1, int8_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vint8m1_t test_vmerge_vxm_i8m1(vint8m1_t vs2, int8_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge(vs2, rs1, v0, vl); } -vint8m2_t test_vmerge_vvm_i8m2(vint8m2_t op1, vint8m2_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vint8m2_t test_vmerge_vvm_i8m2(vint8m2_t vs2, vint8m2_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vint8m2_t test_vmerge_vxm_i8m2(vint8m2_t op1, int8_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vint8m2_t test_vmerge_vxm_i8m2(vint8m2_t vs2, int8_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge(vs2, rs1, v0, vl); } -vint8m4_t test_vmerge_vvm_i8m4(vint8m4_t op1, vint8m4_t op2, vbool2_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vint8m4_t test_vmerge_vvm_i8m4(vint8m4_t vs2, vint8m4_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vint8m4_t test_vmerge_vxm_i8m4(vint8m4_t op1, int8_t op2, vbool2_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vint8m4_t test_vmerge_vxm_i8m4(vint8m4_t vs2, int8_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vmerge(vs2, rs1, v0, vl); } -vint8m8_t test_vmerge_vvm_i8m8(vint8m8_t op1, vint8m8_t op2, vbool1_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vint8m8_t test_vmerge_vvm_i8m8(vint8m8_t vs2, vint8m8_t vs1, vbool1_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vint8m8_t test_vmerge_vxm_i8m8(vint8m8_t op1, int8_t op2, vbool1_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vint8m8_t test_vmerge_vxm_i8m8(vint8m8_t vs2, int8_t rs1, vbool1_t v0, size_t vl) { + return __riscv_vmerge(vs2, rs1, v0, vl); } -vint16mf4_t test_vmerge_vvm_i16mf4(vint16mf4_t op1, vint16mf4_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vint16mf4_t test_vmerge_vvm_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vint16mf4_t test_vmerge_vxm_i16mf4(vint16mf4_t op1, int16_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vint16mf4_t test_vmerge_vxm_i16mf4(vint16mf4_t vs2, int16_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge(vs2, rs1, v0, vl); } -vint16mf2_t test_vmerge_vvm_i16mf2(vint16mf2_t op1, vint16mf2_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vint16mf2_t test_vmerge_vvm_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vint16mf2_t test_vmerge_vxm_i16mf2(vint16mf2_t op1, int16_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vint16mf2_t test_vmerge_vxm_i16mf2(vint16mf2_t vs2, int16_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge(vs2, rs1, v0, vl); } -vint16m1_t test_vmerge_vvm_i16m1(vint16m1_t op1, vint16m1_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vint16m1_t test_vmerge_vvm_i16m1(vint16m1_t vs2, vint16m1_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vint16m1_t test_vmerge_vxm_i16m1(vint16m1_t op1, int16_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vint16m1_t test_vmerge_vxm_i16m1(vint16m1_t vs2, int16_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge(vs2, rs1, v0, vl); } -vint16m2_t test_vmerge_vvm_i16m2(vint16m2_t op1, vint16m2_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vint16m2_t test_vmerge_vvm_i16m2(vint16m2_t vs2, vint16m2_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vint16m2_t test_vmerge_vxm_i16m2(vint16m2_t op1, int16_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vint16m2_t test_vmerge_vxm_i16m2(vint16m2_t vs2, int16_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge(vs2, rs1, v0, vl); } -vint16m4_t test_vmerge_vvm_i16m4(vint16m4_t op1, vint16m4_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vint16m4_t test_vmerge_vvm_i16m4(vint16m4_t vs2, vint16m4_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vint16m4_t test_vmerge_vxm_i16m4(vint16m4_t op1, int16_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vint16m4_t test_vmerge_vxm_i16m4(vint16m4_t vs2, int16_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge(vs2, rs1, v0, vl); } -vint16m8_t test_vmerge_vvm_i16m8(vint16m8_t op1, vint16m8_t op2, vbool2_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vint16m8_t test_vmerge_vvm_i16m8(vint16m8_t vs2, vint16m8_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vint16m8_t test_vmerge_vxm_i16m8(vint16m8_t op1, int16_t op2, vbool2_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vint16m8_t test_vmerge_vxm_i16m8(vint16m8_t vs2, int16_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vmerge(vs2, rs1, v0, vl); } -vint32mf2_t test_vmerge_vvm_i32mf2(vint32mf2_t op1, vint32mf2_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vint32mf2_t test_vmerge_vvm_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vint32mf2_t test_vmerge_vxm_i32mf2(vint32mf2_t op1, int32_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vint32mf2_t test_vmerge_vxm_i32mf2(vint32mf2_t vs2, int32_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge(vs2, rs1, v0, vl); } -vint32m1_t test_vmerge_vvm_i32m1(vint32m1_t op1, vint32m1_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vint32m1_t test_vmerge_vvm_i32m1(vint32m1_t vs2, vint32m1_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vint32m1_t test_vmerge_vxm_i32m1(vint32m1_t op1, int32_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vint32m1_t test_vmerge_vxm_i32m1(vint32m1_t vs2, int32_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge(vs2, rs1, v0, vl); } -vint32m2_t test_vmerge_vvm_i32m2(vint32m2_t op1, vint32m2_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vint32m2_t test_vmerge_vvm_i32m2(vint32m2_t vs2, vint32m2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vint32m2_t test_vmerge_vxm_i32m2(vint32m2_t op1, int32_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vint32m2_t test_vmerge_vxm_i32m2(vint32m2_t vs2, int32_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge(vs2, rs1, v0, vl); } -vint32m4_t test_vmerge_vvm_i32m4(vint32m4_t op1, vint32m4_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vint32m4_t test_vmerge_vvm_i32m4(vint32m4_t vs2, vint32m4_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vint32m4_t test_vmerge_vxm_i32m4(vint32m4_t op1, int32_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vint32m4_t test_vmerge_vxm_i32m4(vint32m4_t vs2, int32_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge(vs2, rs1, v0, vl); } -vint32m8_t test_vmerge_vvm_i32m8(vint32m8_t op1, vint32m8_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vint32m8_t test_vmerge_vvm_i32m8(vint32m8_t vs2, vint32m8_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vint32m8_t test_vmerge_vxm_i32m8(vint32m8_t op1, int32_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vint32m8_t test_vmerge_vxm_i32m8(vint32m8_t vs2, int32_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge(vs2, rs1, v0, vl); } -vint64m1_t test_vmerge_vvm_i64m1(vint64m1_t op1, vint64m1_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vint64m1_t test_vmerge_vvm_i64m1(vint64m1_t vs2, vint64m1_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vint64m1_t test_vmerge_vxm_i64m1(vint64m1_t op1, int64_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vint64m1_t test_vmerge_vxm_i64m1(vint64m1_t vs2, int64_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge(vs2, rs1, v0, vl); } -vint64m2_t test_vmerge_vvm_i64m2(vint64m2_t op1, vint64m2_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vint64m2_t test_vmerge_vvm_i64m2(vint64m2_t vs2, vint64m2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vint64m2_t test_vmerge_vxm_i64m2(vint64m2_t op1, int64_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vint64m2_t test_vmerge_vxm_i64m2(vint64m2_t vs2, int64_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge(vs2, rs1, v0, vl); } -vint64m4_t test_vmerge_vvm_i64m4(vint64m4_t op1, vint64m4_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vint64m4_t test_vmerge_vvm_i64m4(vint64m4_t vs2, vint64m4_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vint64m4_t test_vmerge_vxm_i64m4(vint64m4_t op1, int64_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vint64m4_t test_vmerge_vxm_i64m4(vint64m4_t vs2, int64_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge(vs2, rs1, v0, vl); } -vint64m8_t test_vmerge_vvm_i64m8(vint64m8_t op1, vint64m8_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vint64m8_t test_vmerge_vvm_i64m8(vint64m8_t vs2, vint64m8_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vint64m8_t test_vmerge_vxm_i64m8(vint64m8_t op1, int64_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vint64m8_t test_vmerge_vxm_i64m8(vint64m8_t vs2, int64_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge(vs2, rs1, v0, vl); } -vuint8mf8_t test_vmerge_vvm_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vuint8mf8_t test_vmerge_vvm_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vuint8mf8_t test_vmerge_vxm_u8mf8(vuint8mf8_t op1, uint8_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vuint8mf8_t test_vmerge_vxm_u8mf8(vuint8mf8_t vs2, uint8_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge(vs2, rs1, v0, vl); } -vuint8mf4_t test_vmerge_vvm_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vuint8mf4_t test_vmerge_vvm_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vuint8mf4_t test_vmerge_vxm_u8mf4(vuint8mf4_t op1, uint8_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vuint8mf4_t test_vmerge_vxm_u8mf4(vuint8mf4_t vs2, uint8_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge(vs2, rs1, v0, vl); } -vuint8mf2_t test_vmerge_vvm_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vuint8mf2_t test_vmerge_vvm_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vuint8mf2_t test_vmerge_vxm_u8mf2(vuint8mf2_t op1, uint8_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vuint8mf2_t test_vmerge_vxm_u8mf2(vuint8mf2_t vs2, uint8_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge(vs2, rs1, v0, vl); } -vuint8m1_t test_vmerge_vvm_u8m1(vuint8m1_t op1, vuint8m1_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vuint8m1_t test_vmerge_vvm_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vuint8m1_t test_vmerge_vxm_u8m1(vuint8m1_t op1, uint8_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vuint8m1_t test_vmerge_vxm_u8m1(vuint8m1_t vs2, uint8_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge(vs2, rs1, v0, vl); } -vuint8m2_t test_vmerge_vvm_u8m2(vuint8m2_t op1, vuint8m2_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vuint8m2_t test_vmerge_vvm_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vuint8m2_t test_vmerge_vxm_u8m2(vuint8m2_t op1, uint8_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vuint8m2_t test_vmerge_vxm_u8m2(vuint8m2_t vs2, uint8_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge(vs2, rs1, v0, vl); } -vuint8m4_t test_vmerge_vvm_u8m4(vuint8m4_t op1, vuint8m4_t op2, vbool2_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vuint8m4_t test_vmerge_vvm_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vuint8m4_t test_vmerge_vxm_u8m4(vuint8m4_t op1, uint8_t op2, vbool2_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vuint8m4_t test_vmerge_vxm_u8m4(vuint8m4_t vs2, uint8_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vmerge(vs2, rs1, v0, vl); } -vuint8m8_t test_vmerge_vvm_u8m8(vuint8m8_t op1, vuint8m8_t op2, vbool1_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vuint8m8_t test_vmerge_vvm_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, vbool1_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vuint8m8_t test_vmerge_vxm_u8m8(vuint8m8_t op1, uint8_t op2, vbool1_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vuint8m8_t test_vmerge_vxm_u8m8(vuint8m8_t vs2, uint8_t rs1, vbool1_t v0, size_t vl) { + return __riscv_vmerge(vs2, rs1, v0, vl); } -vuint16mf4_t test_vmerge_vvm_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vuint16mf4_t test_vmerge_vvm_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vuint16mf4_t test_vmerge_vxm_u16mf4(vuint16mf4_t op1, uint16_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vuint16mf4_t test_vmerge_vxm_u16mf4(vuint16mf4_t vs2, uint16_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge(vs2, rs1, v0, vl); } -vuint16mf2_t test_vmerge_vvm_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vuint16mf2_t test_vmerge_vvm_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vuint16mf2_t test_vmerge_vxm_u16mf2(vuint16mf2_t op1, uint16_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vuint16mf2_t test_vmerge_vxm_u16mf2(vuint16mf2_t vs2, uint16_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge(vs2, rs1, v0, vl); } -vuint16m1_t test_vmerge_vvm_u16m1(vuint16m1_t op1, vuint16m1_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vuint16m1_t test_vmerge_vvm_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vuint16m1_t test_vmerge_vxm_u16m1(vuint16m1_t op1, uint16_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vuint16m1_t test_vmerge_vxm_u16m1(vuint16m1_t vs2, uint16_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge(vs2, rs1, v0, vl); } -vuint16m2_t test_vmerge_vvm_u16m2(vuint16m2_t op1, vuint16m2_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vuint16m2_t test_vmerge_vvm_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vuint16m2_t test_vmerge_vxm_u16m2(vuint16m2_t op1, uint16_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vuint16m2_t test_vmerge_vxm_u16m2(vuint16m2_t vs2, uint16_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge(vs2, rs1, v0, vl); } -vuint16m4_t test_vmerge_vvm_u16m4(vuint16m4_t op1, vuint16m4_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vuint16m4_t test_vmerge_vvm_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vuint16m4_t test_vmerge_vxm_u16m4(vuint16m4_t op1, uint16_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vuint16m4_t test_vmerge_vxm_u16m4(vuint16m4_t vs2, uint16_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge(vs2, rs1, v0, vl); } -vuint16m8_t test_vmerge_vvm_u16m8(vuint16m8_t op1, vuint16m8_t op2, vbool2_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vuint16m8_t test_vmerge_vvm_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vuint16m8_t test_vmerge_vxm_u16m8(vuint16m8_t op1, uint16_t op2, vbool2_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vuint16m8_t test_vmerge_vxm_u16m8(vuint16m8_t vs2, uint16_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vmerge(vs2, rs1, v0, vl); } -vuint32mf2_t test_vmerge_vvm_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vuint32mf2_t test_vmerge_vvm_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vuint32mf2_t test_vmerge_vxm_u32mf2(vuint32mf2_t op1, uint32_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vuint32mf2_t test_vmerge_vxm_u32mf2(vuint32mf2_t vs2, uint32_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge(vs2, rs1, v0, vl); } -vuint32m1_t test_vmerge_vvm_u32m1(vuint32m1_t op1, vuint32m1_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vuint32m1_t test_vmerge_vvm_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vuint32m1_t test_vmerge_vxm_u32m1(vuint32m1_t op1, uint32_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vuint32m1_t test_vmerge_vxm_u32m1(vuint32m1_t vs2, uint32_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge(vs2, rs1, v0, vl); } -vuint32m2_t test_vmerge_vvm_u32m2(vuint32m2_t op1, vuint32m2_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vuint32m2_t test_vmerge_vvm_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vuint32m2_t test_vmerge_vxm_u32m2(vuint32m2_t op1, uint32_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vuint32m2_t test_vmerge_vxm_u32m2(vuint32m2_t vs2, uint32_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge(vs2, rs1, v0, vl); } -vuint32m4_t test_vmerge_vvm_u32m4(vuint32m4_t op1, vuint32m4_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vuint32m4_t test_vmerge_vvm_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vuint32m4_t test_vmerge_vxm_u32m4(vuint32m4_t op1, uint32_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vuint32m4_t test_vmerge_vxm_u32m4(vuint32m4_t vs2, uint32_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge(vs2, rs1, v0, vl); } -vuint32m8_t test_vmerge_vvm_u32m8(vuint32m8_t op1, vuint32m8_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vuint32m8_t test_vmerge_vvm_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vuint32m8_t test_vmerge_vxm_u32m8(vuint32m8_t op1, uint32_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vuint32m8_t test_vmerge_vxm_u32m8(vuint32m8_t vs2, uint32_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge(vs2, rs1, v0, vl); } -vuint64m1_t test_vmerge_vvm_u64m1(vuint64m1_t op1, vuint64m1_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vuint64m1_t test_vmerge_vvm_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vuint64m1_t test_vmerge_vxm_u64m1(vuint64m1_t op1, uint64_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vuint64m1_t test_vmerge_vxm_u64m1(vuint64m1_t vs2, uint64_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge(vs2, rs1, v0, vl); } -vuint64m2_t test_vmerge_vvm_u64m2(vuint64m2_t op1, vuint64m2_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vuint64m2_t test_vmerge_vvm_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vuint64m2_t test_vmerge_vxm_u64m2(vuint64m2_t op1, uint64_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vuint64m2_t test_vmerge_vxm_u64m2(vuint64m2_t vs2, uint64_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge(vs2, rs1, v0, vl); } -vuint64m4_t test_vmerge_vvm_u64m4(vuint64m4_t op1, vuint64m4_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vuint64m4_t test_vmerge_vvm_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vuint64m4_t test_vmerge_vxm_u64m4(vuint64m4_t op1, uint64_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vuint64m4_t test_vmerge_vxm_u64m4(vuint64m4_t vs2, uint64_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge(vs2, rs1, v0, vl); } -vuint64m8_t test_vmerge_vvm_u64m8(vuint64m8_t op1, vuint64m8_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vuint64m8_t test_vmerge_vvm_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vuint64m8_t test_vmerge_vxm_u64m8(vuint64m8_t op1, uint64_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vuint64m8_t test_vmerge_vxm_u64m8(vuint64m8_t vs2, uint64_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge(vs2, rs1, v0, vl); } -vfloat16mf4_t test_vmerge_vvm_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vfloat16mf4_t test_vmerge_vvm_f16mf4(vfloat16mf4_t vs2, vfloat16mf4_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vfloat16mf2_t test_vmerge_vvm_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vfloat16mf2_t test_vmerge_vvm_f16mf2(vfloat16mf2_t vs2, vfloat16mf2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vfloat16m1_t test_vmerge_vvm_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vfloat16m1_t test_vmerge_vvm_f16m1(vfloat16m1_t vs2, vfloat16m1_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vfloat16m2_t test_vmerge_vvm_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vfloat16m2_t test_vmerge_vvm_f16m2(vfloat16m2_t vs2, vfloat16m2_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vfloat16m4_t test_vmerge_vvm_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vfloat16m4_t test_vmerge_vvm_f16m4(vfloat16m4_t vs2, vfloat16m4_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vfloat16m8_t test_vmerge_vvm_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, vbool2_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vfloat16m8_t test_vmerge_vvm_f16m8(vfloat16m8_t vs2, vfloat16m8_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vfloat32mf2_t test_vmerge_vvm_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vfloat32mf2_t test_vmerge_vvm_f32mf2(vfloat32mf2_t vs2, vfloat32mf2_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vfloat32m1_t test_vmerge_vvm_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vfloat32m1_t test_vmerge_vvm_f32m1(vfloat32m1_t vs2, vfloat32m1_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vfloat32m2_t test_vmerge_vvm_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vfloat32m2_t test_vmerge_vvm_f32m2(vfloat32m2_t vs2, vfloat32m2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vfloat32m4_t test_vmerge_vvm_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vfloat32m4_t test_vmerge_vvm_f32m4(vfloat32m4_t vs2, vfloat32m4_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vfloat32m8_t test_vmerge_vvm_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vfloat32m8_t test_vmerge_vvm_f32m8(vfloat32m8_t vs2, vfloat32m8_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vfloat64m1_t test_vmerge_vvm_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vfloat64m1_t test_vmerge_vvm_f64m1(vfloat64m1_t vs2, vfloat64m1_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vfloat64m2_t test_vmerge_vvm_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vfloat64m2_t test_vmerge_vvm_f64m2(vfloat64m2_t vs2, vfloat64m2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vfloat64m4_t test_vmerge_vvm_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vfloat64m4_t test_vmerge_vvm_f64m4(vfloat64m4_t vs2, vfloat64m4_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } -vfloat64m8_t test_vmerge_vvm_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge(op1, op2, mask, vl); +vfloat64m8_t test_vmerge_vvm_f64m8(vfloat64m8_t vs2, vfloat64m8_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); } /* { dg-final { scan-assembler-times {vmerge\.[ivxfswum.]+\s+} 103 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vmfeq.c b/auto-generated/gnu-overloaded-tests/vmfeq.c index d9c274c1e..b54532e1e 100644 --- a/auto-generated/gnu-overloaded-tests/vmfeq.c +++ b/auto-generated/gnu-overloaded-tests/vmfeq.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmfeq_vv_f16mf4_b64(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vmfeq(op1, op2, vl); +vbool64_t test_vmfeq_vv_f16mf4_b64(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vmfeq(vs2, vs1, vl); } -vbool64_t test_vmfeq_vf_f16mf4_b64(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfeq(op1, op2, vl); +vbool64_t test_vmfeq_vf_f16mf4_b64(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfeq(vs2, rs1, vl); } -vbool32_t test_vmfeq_vv_f16mf2_b32(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vmfeq(op1, op2, vl); +vbool32_t test_vmfeq_vv_f16mf2_b32(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vmfeq(vs2, vs1, vl); } -vbool32_t test_vmfeq_vf_f16mf2_b32(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfeq(op1, op2, vl); +vbool32_t test_vmfeq_vf_f16mf2_b32(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfeq(vs2, rs1, vl); } -vbool16_t test_vmfeq_vv_f16m1_b16(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vmfeq(op1, op2, vl); +vbool16_t test_vmfeq_vv_f16m1_b16(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vmfeq(vs2, vs1, vl); } -vbool16_t test_vmfeq_vf_f16m1_b16(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vmfeq(op1, op2, vl); +vbool16_t test_vmfeq_vf_f16m1_b16(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfeq(vs2, rs1, vl); } -vbool8_t test_vmfeq_vv_f16m2_b8(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vmfeq(op1, op2, vl); +vbool8_t test_vmfeq_vv_f16m2_b8(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vmfeq(vs2, vs1, vl); } -vbool8_t test_vmfeq_vf_f16m2_b8(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfeq(op1, op2, vl); +vbool8_t test_vmfeq_vf_f16m2_b8(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfeq(vs2, rs1, vl); } -vbool4_t test_vmfeq_vv_f16m4_b4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vmfeq(op1, op2, vl); +vbool4_t test_vmfeq_vv_f16m4_b4(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vmfeq(vs2, vs1, vl); } -vbool4_t test_vmfeq_vf_f16m4_b4(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfeq(op1, op2, vl); +vbool4_t test_vmfeq_vf_f16m4_b4(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfeq(vs2, rs1, vl); } -vbool2_t test_vmfeq_vv_f16m8_b2(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vmfeq(op1, op2, vl); +vbool2_t test_vmfeq_vv_f16m8_b2(vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vmfeq(vs2, vs1, vl); } -vbool2_t test_vmfeq_vf_f16m8_b2(vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vmfeq(op1, op2, vl); +vbool2_t test_vmfeq_vf_f16m8_b2(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfeq(vs2, rs1, vl); } -vbool64_t test_vmfeq_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vmfeq(op1, op2, vl); +vbool64_t test_vmfeq_vv_f32mf2_b64(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vmfeq(vs2, vs1, vl); } -vbool64_t test_vmfeq_vf_f32mf2_b64(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfeq(op1, op2, vl); +vbool64_t test_vmfeq_vf_f32mf2_b64(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfeq(vs2, rs1, vl); } -vbool32_t test_vmfeq_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vmfeq(op1, op2, vl); +vbool32_t test_vmfeq_vv_f32m1_b32(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vmfeq(vs2, vs1, vl); } -vbool32_t test_vmfeq_vf_f32m1_b32(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vmfeq(op1, op2, vl); +vbool32_t test_vmfeq_vf_f32m1_b32(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfeq(vs2, rs1, vl); } -vbool16_t test_vmfeq_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vmfeq(op1, op2, vl); +vbool16_t test_vmfeq_vv_f32m2_b16(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vmfeq(vs2, vs1, vl); } -vbool16_t test_vmfeq_vf_f32m2_b16(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfeq(op1, op2, vl); +vbool16_t test_vmfeq_vf_f32m2_b16(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfeq(vs2, rs1, vl); } -vbool8_t test_vmfeq_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vmfeq(op1, op2, vl); +vbool8_t test_vmfeq_vv_f32m4_b8(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vmfeq(vs2, vs1, vl); } -vbool8_t test_vmfeq_vf_f32m4_b8(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vmfeq(op1, op2, vl); +vbool8_t test_vmfeq_vf_f32m4_b8(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfeq(vs2, rs1, vl); } -vbool4_t test_vmfeq_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vmfeq(op1, op2, vl); +vbool4_t test_vmfeq_vv_f32m8_b4(vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vmfeq(vs2, vs1, vl); } -vbool4_t test_vmfeq_vf_f32m8_b4(vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vmfeq(op1, op2, vl); +vbool4_t test_vmfeq_vf_f32m8_b4(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfeq(vs2, rs1, vl); } -vbool64_t test_vmfeq_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vmfeq(op1, op2, vl); +vbool64_t test_vmfeq_vv_f64m1_b64(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vmfeq(vs2, vs1, vl); } -vbool64_t test_vmfeq_vf_f64m1_b64(vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vmfeq(op1, op2, vl); +vbool64_t test_vmfeq_vf_f64m1_b64(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfeq(vs2, rs1, vl); } -vbool32_t test_vmfeq_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vmfeq(op1, op2, vl); +vbool32_t test_vmfeq_vv_f64m2_b32(vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vmfeq(vs2, vs1, vl); } -vbool32_t test_vmfeq_vf_f64m2_b32(vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vmfeq(op1, op2, vl); +vbool32_t test_vmfeq_vf_f64m2_b32(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfeq(vs2, rs1, vl); } -vbool16_t test_vmfeq_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vmfeq(op1, op2, vl); +vbool16_t test_vmfeq_vv_f64m4_b16(vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vmfeq(vs2, vs1, vl); } -vbool16_t test_vmfeq_vf_f64m4_b16(vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vmfeq(op1, op2, vl); +vbool16_t test_vmfeq_vf_f64m4_b16(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfeq(vs2, rs1, vl); } -vbool8_t test_vmfeq_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vmfeq(op1, op2, vl); +vbool8_t test_vmfeq_vv_f64m8_b8(vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vmfeq(vs2, vs1, vl); } -vbool8_t test_vmfeq_vf_f64m8_b8(vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vmfeq(op1, op2, vl); +vbool8_t test_vmfeq_vf_f64m8_b8(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfeq(vs2, rs1, vl); } -vbool64_t test_vmfeq_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vmfeq(mask, op1, op2, vl); +vbool64_t test_vmfeq_vv_f16mf4_b64_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vmfeq(vm, vs2, vs1, vl); } -vbool64_t test_vmfeq_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfeq(mask, op1, op2, vl); +vbool64_t test_vmfeq_vf_f16mf4_b64_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfeq(vm, vs2, rs1, vl); } -vbool32_t test_vmfeq_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vmfeq(mask, op1, op2, vl); +vbool32_t test_vmfeq_vv_f16mf2_b32_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vmfeq(vm, vs2, vs1, vl); } -vbool32_t test_vmfeq_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfeq(mask, op1, op2, vl); +vbool32_t test_vmfeq_vf_f16mf2_b32_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfeq(vm, vs2, rs1, vl); } -vbool16_t test_vmfeq_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vmfeq(mask, op1, op2, vl); +vbool16_t test_vmfeq_vv_f16m1_b16_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vmfeq(vm, vs2, vs1, vl); } -vbool16_t test_vmfeq_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vmfeq(mask, op1, op2, vl); +vbool16_t test_vmfeq_vf_f16m1_b16_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfeq(vm, vs2, rs1, vl); } -vbool8_t test_vmfeq_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vmfeq(mask, op1, op2, vl); +vbool8_t test_vmfeq_vv_f16m2_b8_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vmfeq(vm, vs2, vs1, vl); } -vbool8_t test_vmfeq_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfeq(mask, op1, op2, vl); +vbool8_t test_vmfeq_vf_f16m2_b8_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfeq(vm, vs2, rs1, vl); } -vbool4_t test_vmfeq_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vmfeq(mask, op1, op2, vl); +vbool4_t test_vmfeq_vv_f16m4_b4_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vmfeq(vm, vs2, vs1, vl); } -vbool4_t test_vmfeq_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfeq(mask, op1, op2, vl); +vbool4_t test_vmfeq_vf_f16m4_b4_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfeq(vm, vs2, rs1, vl); } -vbool2_t test_vmfeq_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vmfeq(mask, op1, op2, vl); +vbool2_t test_vmfeq_vv_f16m8_b2_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vmfeq(vm, vs2, vs1, vl); } -vbool2_t test_vmfeq_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vmfeq(mask, op1, op2, vl); +vbool2_t test_vmfeq_vf_f16m8_b2_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfeq(vm, vs2, rs1, vl); } -vbool64_t test_vmfeq_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vmfeq(mask, op1, op2, vl); +vbool64_t test_vmfeq_vv_f32mf2_b64_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vmfeq(vm, vs2, vs1, vl); } -vbool64_t test_vmfeq_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfeq(mask, op1, op2, vl); +vbool64_t test_vmfeq_vf_f32mf2_b64_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfeq(vm, vs2, rs1, vl); } -vbool32_t test_vmfeq_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vmfeq(mask, op1, op2, vl); +vbool32_t test_vmfeq_vv_f32m1_b32_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vmfeq(vm, vs2, vs1, vl); } -vbool32_t test_vmfeq_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vmfeq(mask, op1, op2, vl); +vbool32_t test_vmfeq_vf_f32m1_b32_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfeq(vm, vs2, rs1, vl); } -vbool16_t test_vmfeq_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vmfeq(mask, op1, op2, vl); +vbool16_t test_vmfeq_vv_f32m2_b16_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vmfeq(vm, vs2, vs1, vl); } -vbool16_t test_vmfeq_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfeq(mask, op1, op2, vl); +vbool16_t test_vmfeq_vf_f32m2_b16_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfeq(vm, vs2, rs1, vl); } -vbool8_t test_vmfeq_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vmfeq(mask, op1, op2, vl); +vbool8_t test_vmfeq_vv_f32m4_b8_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vmfeq(vm, vs2, vs1, vl); } -vbool8_t test_vmfeq_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vmfeq(mask, op1, op2, vl); +vbool8_t test_vmfeq_vf_f32m4_b8_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfeq(vm, vs2, rs1, vl); } -vbool4_t test_vmfeq_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vmfeq(mask, op1, op2, vl); +vbool4_t test_vmfeq_vv_f32m8_b4_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vmfeq(vm, vs2, vs1, vl); } -vbool4_t test_vmfeq_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vmfeq(mask, op1, op2, vl); +vbool4_t test_vmfeq_vf_f32m8_b4_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfeq(vm, vs2, rs1, vl); } -vbool64_t test_vmfeq_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vmfeq(mask, op1, op2, vl); +vbool64_t test_vmfeq_vv_f64m1_b64_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vmfeq(vm, vs2, vs1, vl); } -vbool64_t test_vmfeq_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vmfeq(mask, op1, op2, vl); +vbool64_t test_vmfeq_vf_f64m1_b64_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfeq(vm, vs2, rs1, vl); } -vbool32_t test_vmfeq_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vmfeq(mask, op1, op2, vl); +vbool32_t test_vmfeq_vv_f64m2_b32_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vmfeq(vm, vs2, vs1, vl); } -vbool32_t test_vmfeq_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vmfeq(mask, op1, op2, vl); +vbool32_t test_vmfeq_vf_f64m2_b32_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfeq(vm, vs2, rs1, vl); } -vbool16_t test_vmfeq_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vmfeq(mask, op1, op2, vl); +vbool16_t test_vmfeq_vv_f64m4_b16_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vmfeq(vm, vs2, vs1, vl); } -vbool16_t test_vmfeq_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vmfeq(mask, op1, op2, vl); +vbool16_t test_vmfeq_vf_f64m4_b16_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfeq(vm, vs2, rs1, vl); } -vbool8_t test_vmfeq_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vmfeq(mask, op1, op2, vl); +vbool8_t test_vmfeq_vv_f64m8_b8_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vmfeq(vm, vs2, vs1, vl); } -vbool8_t test_vmfeq_vf_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vmfeq(mask, op1, op2, vl); +vbool8_t test_vmfeq_vf_f64m8_b8_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfeq(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmfeq\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vmfge.c b/auto-generated/gnu-overloaded-tests/vmfge.c index 7e664b02a..d42a310c5 100644 --- a/auto-generated/gnu-overloaded-tests/vmfge.c +++ b/auto-generated/gnu-overloaded-tests/vmfge.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmfge_vv_f16mf4_b64(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vmfge(op1, op2, vl); +vbool64_t test_vmfge_vv_f16mf4_b64(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vmfge(vs2, vs1, vl); } -vbool64_t test_vmfge_vf_f16mf4_b64(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfge(op1, op2, vl); +vbool64_t test_vmfge_vf_f16mf4_b64(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfge(vs2, rs1, vl); } -vbool32_t test_vmfge_vv_f16mf2_b32(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vmfge(op1, op2, vl); +vbool32_t test_vmfge_vv_f16mf2_b32(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vmfge(vs2, vs1, vl); } -vbool32_t test_vmfge_vf_f16mf2_b32(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfge(op1, op2, vl); +vbool32_t test_vmfge_vf_f16mf2_b32(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfge(vs2, rs1, vl); } -vbool16_t test_vmfge_vv_f16m1_b16(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vmfge(op1, op2, vl); +vbool16_t test_vmfge_vv_f16m1_b16(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vmfge(vs2, vs1, vl); } -vbool16_t test_vmfge_vf_f16m1_b16(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vmfge(op1, op2, vl); +vbool16_t test_vmfge_vf_f16m1_b16(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfge(vs2, rs1, vl); } -vbool8_t test_vmfge_vv_f16m2_b8(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vmfge(op1, op2, vl); +vbool8_t test_vmfge_vv_f16m2_b8(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vmfge(vs2, vs1, vl); } -vbool8_t test_vmfge_vf_f16m2_b8(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfge(op1, op2, vl); +vbool8_t test_vmfge_vf_f16m2_b8(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfge(vs2, rs1, vl); } -vbool4_t test_vmfge_vv_f16m4_b4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vmfge(op1, op2, vl); +vbool4_t test_vmfge_vv_f16m4_b4(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vmfge(vs2, vs1, vl); } -vbool4_t test_vmfge_vf_f16m4_b4(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfge(op1, op2, vl); +vbool4_t test_vmfge_vf_f16m4_b4(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfge(vs2, rs1, vl); } -vbool2_t test_vmfge_vv_f16m8_b2(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vmfge(op1, op2, vl); +vbool2_t test_vmfge_vv_f16m8_b2(vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vmfge(vs2, vs1, vl); } -vbool2_t test_vmfge_vf_f16m8_b2(vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vmfge(op1, op2, vl); +vbool2_t test_vmfge_vf_f16m8_b2(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfge(vs2, rs1, vl); } -vbool64_t test_vmfge_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vmfge(op1, op2, vl); +vbool64_t test_vmfge_vv_f32mf2_b64(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vmfge(vs2, vs1, vl); } -vbool64_t test_vmfge_vf_f32mf2_b64(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfge(op1, op2, vl); +vbool64_t test_vmfge_vf_f32mf2_b64(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfge(vs2, rs1, vl); } -vbool32_t test_vmfge_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vmfge(op1, op2, vl); +vbool32_t test_vmfge_vv_f32m1_b32(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vmfge(vs2, vs1, vl); } -vbool32_t test_vmfge_vf_f32m1_b32(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vmfge(op1, op2, vl); +vbool32_t test_vmfge_vf_f32m1_b32(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfge(vs2, rs1, vl); } -vbool16_t test_vmfge_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vmfge(op1, op2, vl); +vbool16_t test_vmfge_vv_f32m2_b16(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vmfge(vs2, vs1, vl); } -vbool16_t test_vmfge_vf_f32m2_b16(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfge(op1, op2, vl); +vbool16_t test_vmfge_vf_f32m2_b16(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfge(vs2, rs1, vl); } -vbool8_t test_vmfge_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vmfge(op1, op2, vl); +vbool8_t test_vmfge_vv_f32m4_b8(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vmfge(vs2, vs1, vl); } -vbool8_t test_vmfge_vf_f32m4_b8(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vmfge(op1, op2, vl); +vbool8_t test_vmfge_vf_f32m4_b8(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfge(vs2, rs1, vl); } -vbool4_t test_vmfge_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vmfge(op1, op2, vl); +vbool4_t test_vmfge_vv_f32m8_b4(vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vmfge(vs2, vs1, vl); } -vbool4_t test_vmfge_vf_f32m8_b4(vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vmfge(op1, op2, vl); +vbool4_t test_vmfge_vf_f32m8_b4(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfge(vs2, rs1, vl); } -vbool64_t test_vmfge_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vmfge(op1, op2, vl); +vbool64_t test_vmfge_vv_f64m1_b64(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vmfge(vs2, vs1, vl); } -vbool64_t test_vmfge_vf_f64m1_b64(vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vmfge(op1, op2, vl); +vbool64_t test_vmfge_vf_f64m1_b64(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfge(vs2, rs1, vl); } -vbool32_t test_vmfge_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vmfge(op1, op2, vl); +vbool32_t test_vmfge_vv_f64m2_b32(vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vmfge(vs2, vs1, vl); } -vbool32_t test_vmfge_vf_f64m2_b32(vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vmfge(op1, op2, vl); +vbool32_t test_vmfge_vf_f64m2_b32(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfge(vs2, rs1, vl); } -vbool16_t test_vmfge_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vmfge(op1, op2, vl); +vbool16_t test_vmfge_vv_f64m4_b16(vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vmfge(vs2, vs1, vl); } -vbool16_t test_vmfge_vf_f64m4_b16(vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vmfge(op1, op2, vl); +vbool16_t test_vmfge_vf_f64m4_b16(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfge(vs2, rs1, vl); } -vbool8_t test_vmfge_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vmfge(op1, op2, vl); +vbool8_t test_vmfge_vv_f64m8_b8(vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vmfge(vs2, vs1, vl); } -vbool8_t test_vmfge_vf_f64m8_b8(vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vmfge(op1, op2, vl); +vbool8_t test_vmfge_vf_f64m8_b8(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfge(vs2, rs1, vl); } -vbool64_t test_vmfge_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vmfge(mask, op1, op2, vl); +vbool64_t test_vmfge_vv_f16mf4_b64_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vmfge(vm, vs2, vs1, vl); } -vbool64_t test_vmfge_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfge(mask, op1, op2, vl); +vbool64_t test_vmfge_vf_f16mf4_b64_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfge(vm, vs2, rs1, vl); } -vbool32_t test_vmfge_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vmfge(mask, op1, op2, vl); +vbool32_t test_vmfge_vv_f16mf2_b32_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vmfge(vm, vs2, vs1, vl); } -vbool32_t test_vmfge_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfge(mask, op1, op2, vl); +vbool32_t test_vmfge_vf_f16mf2_b32_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfge(vm, vs2, rs1, vl); } -vbool16_t test_vmfge_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vmfge(mask, op1, op2, vl); +vbool16_t test_vmfge_vv_f16m1_b16_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vmfge(vm, vs2, vs1, vl); } -vbool16_t test_vmfge_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vmfge(mask, op1, op2, vl); +vbool16_t test_vmfge_vf_f16m1_b16_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfge(vm, vs2, rs1, vl); } -vbool8_t test_vmfge_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vmfge(mask, op1, op2, vl); +vbool8_t test_vmfge_vv_f16m2_b8_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vmfge(vm, vs2, vs1, vl); } -vbool8_t test_vmfge_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfge(mask, op1, op2, vl); +vbool8_t test_vmfge_vf_f16m2_b8_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfge(vm, vs2, rs1, vl); } -vbool4_t test_vmfge_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vmfge(mask, op1, op2, vl); +vbool4_t test_vmfge_vv_f16m4_b4_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vmfge(vm, vs2, vs1, vl); } -vbool4_t test_vmfge_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfge(mask, op1, op2, vl); +vbool4_t test_vmfge_vf_f16m4_b4_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfge(vm, vs2, rs1, vl); } -vbool2_t test_vmfge_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vmfge(mask, op1, op2, vl); +vbool2_t test_vmfge_vv_f16m8_b2_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vmfge(vm, vs2, vs1, vl); } -vbool2_t test_vmfge_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vmfge(mask, op1, op2, vl); +vbool2_t test_vmfge_vf_f16m8_b2_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfge(vm, vs2, rs1, vl); } -vbool64_t test_vmfge_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vmfge(mask, op1, op2, vl); +vbool64_t test_vmfge_vv_f32mf2_b64_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vmfge(vm, vs2, vs1, vl); } -vbool64_t test_vmfge_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfge(mask, op1, op2, vl); +vbool64_t test_vmfge_vf_f32mf2_b64_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfge(vm, vs2, rs1, vl); } -vbool32_t test_vmfge_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vmfge(mask, op1, op2, vl); +vbool32_t test_vmfge_vv_f32m1_b32_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vmfge(vm, vs2, vs1, vl); } -vbool32_t test_vmfge_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vmfge(mask, op1, op2, vl); +vbool32_t test_vmfge_vf_f32m1_b32_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfge(vm, vs2, rs1, vl); } -vbool16_t test_vmfge_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vmfge(mask, op1, op2, vl); +vbool16_t test_vmfge_vv_f32m2_b16_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vmfge(vm, vs2, vs1, vl); } -vbool16_t test_vmfge_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfge(mask, op1, op2, vl); +vbool16_t test_vmfge_vf_f32m2_b16_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfge(vm, vs2, rs1, vl); } -vbool8_t test_vmfge_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vmfge(mask, op1, op2, vl); +vbool8_t test_vmfge_vv_f32m4_b8_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vmfge(vm, vs2, vs1, vl); } -vbool8_t test_vmfge_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vmfge(mask, op1, op2, vl); +vbool8_t test_vmfge_vf_f32m4_b8_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfge(vm, vs2, rs1, vl); } -vbool4_t test_vmfge_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vmfge(mask, op1, op2, vl); +vbool4_t test_vmfge_vv_f32m8_b4_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vmfge(vm, vs2, vs1, vl); } -vbool4_t test_vmfge_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vmfge(mask, op1, op2, vl); +vbool4_t test_vmfge_vf_f32m8_b4_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfge(vm, vs2, rs1, vl); } -vbool64_t test_vmfge_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vmfge(mask, op1, op2, vl); +vbool64_t test_vmfge_vv_f64m1_b64_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vmfge(vm, vs2, vs1, vl); } -vbool64_t test_vmfge_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vmfge(mask, op1, op2, vl); +vbool64_t test_vmfge_vf_f64m1_b64_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfge(vm, vs2, rs1, vl); } -vbool32_t test_vmfge_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vmfge(mask, op1, op2, vl); +vbool32_t test_vmfge_vv_f64m2_b32_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vmfge(vm, vs2, vs1, vl); } -vbool32_t test_vmfge_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vmfge(mask, op1, op2, vl); +vbool32_t test_vmfge_vf_f64m2_b32_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfge(vm, vs2, rs1, vl); } -vbool16_t test_vmfge_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vmfge(mask, op1, op2, vl); +vbool16_t test_vmfge_vv_f64m4_b16_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vmfge(vm, vs2, vs1, vl); } -vbool16_t test_vmfge_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vmfge(mask, op1, op2, vl); +vbool16_t test_vmfge_vf_f64m4_b16_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfge(vm, vs2, rs1, vl); } -vbool8_t test_vmfge_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vmfge(mask, op1, op2, vl); +vbool8_t test_vmfge_vv_f64m8_b8_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vmfge(vm, vs2, vs1, vl); } -vbool8_t test_vmfge_vf_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vmfge(mask, op1, op2, vl); +vbool8_t test_vmfge_vf_f64m8_b8_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfge(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmfge\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vmfgt.c b/auto-generated/gnu-overloaded-tests/vmfgt.c index 1f3d1be2d..cf805c5d2 100644 --- a/auto-generated/gnu-overloaded-tests/vmfgt.c +++ b/auto-generated/gnu-overloaded-tests/vmfgt.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmfgt_vv_f16mf4_b64(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vmfgt(op1, op2, vl); +vbool64_t test_vmfgt_vv_f16mf4_b64(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vmfgt(vs2, vs1, vl); } -vbool64_t test_vmfgt_vf_f16mf4_b64(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfgt(op1, op2, vl); +vbool64_t test_vmfgt_vf_f16mf4_b64(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfgt(vs2, rs1, vl); } -vbool32_t test_vmfgt_vv_f16mf2_b32(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vmfgt(op1, op2, vl); +vbool32_t test_vmfgt_vv_f16mf2_b32(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vmfgt(vs2, vs1, vl); } -vbool32_t test_vmfgt_vf_f16mf2_b32(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfgt(op1, op2, vl); +vbool32_t test_vmfgt_vf_f16mf2_b32(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfgt(vs2, rs1, vl); } -vbool16_t test_vmfgt_vv_f16m1_b16(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vmfgt(op1, op2, vl); +vbool16_t test_vmfgt_vv_f16m1_b16(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vmfgt(vs2, vs1, vl); } -vbool16_t test_vmfgt_vf_f16m1_b16(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vmfgt(op1, op2, vl); +vbool16_t test_vmfgt_vf_f16m1_b16(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfgt(vs2, rs1, vl); } -vbool8_t test_vmfgt_vv_f16m2_b8(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vmfgt(op1, op2, vl); +vbool8_t test_vmfgt_vv_f16m2_b8(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vmfgt(vs2, vs1, vl); } -vbool8_t test_vmfgt_vf_f16m2_b8(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfgt(op1, op2, vl); +vbool8_t test_vmfgt_vf_f16m2_b8(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfgt(vs2, rs1, vl); } -vbool4_t test_vmfgt_vv_f16m4_b4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vmfgt(op1, op2, vl); +vbool4_t test_vmfgt_vv_f16m4_b4(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vmfgt(vs2, vs1, vl); } -vbool4_t test_vmfgt_vf_f16m4_b4(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfgt(op1, op2, vl); +vbool4_t test_vmfgt_vf_f16m4_b4(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfgt(vs2, rs1, vl); } -vbool2_t test_vmfgt_vv_f16m8_b2(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vmfgt(op1, op2, vl); +vbool2_t test_vmfgt_vv_f16m8_b2(vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vmfgt(vs2, vs1, vl); } -vbool2_t test_vmfgt_vf_f16m8_b2(vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vmfgt(op1, op2, vl); +vbool2_t test_vmfgt_vf_f16m8_b2(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfgt(vs2, rs1, vl); } -vbool64_t test_vmfgt_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vmfgt(op1, op2, vl); +vbool64_t test_vmfgt_vv_f32mf2_b64(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vmfgt(vs2, vs1, vl); } -vbool64_t test_vmfgt_vf_f32mf2_b64(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfgt(op1, op2, vl); +vbool64_t test_vmfgt_vf_f32mf2_b64(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfgt(vs2, rs1, vl); } -vbool32_t test_vmfgt_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vmfgt(op1, op2, vl); +vbool32_t test_vmfgt_vv_f32m1_b32(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vmfgt(vs2, vs1, vl); } -vbool32_t test_vmfgt_vf_f32m1_b32(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vmfgt(op1, op2, vl); +vbool32_t test_vmfgt_vf_f32m1_b32(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfgt(vs2, rs1, vl); } -vbool16_t test_vmfgt_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vmfgt(op1, op2, vl); +vbool16_t test_vmfgt_vv_f32m2_b16(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vmfgt(vs2, vs1, vl); } -vbool16_t test_vmfgt_vf_f32m2_b16(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfgt(op1, op2, vl); +vbool16_t test_vmfgt_vf_f32m2_b16(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfgt(vs2, rs1, vl); } -vbool8_t test_vmfgt_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vmfgt(op1, op2, vl); +vbool8_t test_vmfgt_vv_f32m4_b8(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vmfgt(vs2, vs1, vl); } -vbool8_t test_vmfgt_vf_f32m4_b8(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vmfgt(op1, op2, vl); +vbool8_t test_vmfgt_vf_f32m4_b8(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfgt(vs2, rs1, vl); } -vbool4_t test_vmfgt_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vmfgt(op1, op2, vl); +vbool4_t test_vmfgt_vv_f32m8_b4(vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vmfgt(vs2, vs1, vl); } -vbool4_t test_vmfgt_vf_f32m8_b4(vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vmfgt(op1, op2, vl); +vbool4_t test_vmfgt_vf_f32m8_b4(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfgt(vs2, rs1, vl); } -vbool64_t test_vmfgt_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vmfgt(op1, op2, vl); +vbool64_t test_vmfgt_vv_f64m1_b64(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vmfgt(vs2, vs1, vl); } -vbool64_t test_vmfgt_vf_f64m1_b64(vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vmfgt(op1, op2, vl); +vbool64_t test_vmfgt_vf_f64m1_b64(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfgt(vs2, rs1, vl); } -vbool32_t test_vmfgt_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vmfgt(op1, op2, vl); +vbool32_t test_vmfgt_vv_f64m2_b32(vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vmfgt(vs2, vs1, vl); } -vbool32_t test_vmfgt_vf_f64m2_b32(vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vmfgt(op1, op2, vl); +vbool32_t test_vmfgt_vf_f64m2_b32(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfgt(vs2, rs1, vl); } -vbool16_t test_vmfgt_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vmfgt(op1, op2, vl); +vbool16_t test_vmfgt_vv_f64m4_b16(vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vmfgt(vs2, vs1, vl); } -vbool16_t test_vmfgt_vf_f64m4_b16(vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vmfgt(op1, op2, vl); +vbool16_t test_vmfgt_vf_f64m4_b16(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfgt(vs2, rs1, vl); } -vbool8_t test_vmfgt_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vmfgt(op1, op2, vl); +vbool8_t test_vmfgt_vv_f64m8_b8(vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vmfgt(vs2, vs1, vl); } -vbool8_t test_vmfgt_vf_f64m8_b8(vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vmfgt(op1, op2, vl); +vbool8_t test_vmfgt_vf_f64m8_b8(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfgt(vs2, rs1, vl); } -vbool64_t test_vmfgt_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vmfgt(mask, op1, op2, vl); +vbool64_t test_vmfgt_vv_f16mf4_b64_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vmfgt(vm, vs2, vs1, vl); } -vbool64_t test_vmfgt_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfgt(mask, op1, op2, vl); +vbool64_t test_vmfgt_vf_f16mf4_b64_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfgt(vm, vs2, rs1, vl); } -vbool32_t test_vmfgt_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vmfgt(mask, op1, op2, vl); +vbool32_t test_vmfgt_vv_f16mf2_b32_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vmfgt(vm, vs2, vs1, vl); } -vbool32_t test_vmfgt_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfgt(mask, op1, op2, vl); +vbool32_t test_vmfgt_vf_f16mf2_b32_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfgt(vm, vs2, rs1, vl); } -vbool16_t test_vmfgt_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vmfgt(mask, op1, op2, vl); +vbool16_t test_vmfgt_vv_f16m1_b16_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vmfgt(vm, vs2, vs1, vl); } -vbool16_t test_vmfgt_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vmfgt(mask, op1, op2, vl); +vbool16_t test_vmfgt_vf_f16m1_b16_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfgt(vm, vs2, rs1, vl); } -vbool8_t test_vmfgt_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vmfgt(mask, op1, op2, vl); +vbool8_t test_vmfgt_vv_f16m2_b8_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vmfgt(vm, vs2, vs1, vl); } -vbool8_t test_vmfgt_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfgt(mask, op1, op2, vl); +vbool8_t test_vmfgt_vf_f16m2_b8_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfgt(vm, vs2, rs1, vl); } -vbool4_t test_vmfgt_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vmfgt(mask, op1, op2, vl); +vbool4_t test_vmfgt_vv_f16m4_b4_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vmfgt(vm, vs2, vs1, vl); } -vbool4_t test_vmfgt_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfgt(mask, op1, op2, vl); +vbool4_t test_vmfgt_vf_f16m4_b4_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfgt(vm, vs2, rs1, vl); } -vbool2_t test_vmfgt_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vmfgt(mask, op1, op2, vl); +vbool2_t test_vmfgt_vv_f16m8_b2_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vmfgt(vm, vs2, vs1, vl); } -vbool2_t test_vmfgt_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vmfgt(mask, op1, op2, vl); +vbool2_t test_vmfgt_vf_f16m8_b2_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfgt(vm, vs2, rs1, vl); } -vbool64_t test_vmfgt_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vmfgt(mask, op1, op2, vl); +vbool64_t test_vmfgt_vv_f32mf2_b64_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vmfgt(vm, vs2, vs1, vl); } -vbool64_t test_vmfgt_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfgt(mask, op1, op2, vl); +vbool64_t test_vmfgt_vf_f32mf2_b64_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfgt(vm, vs2, rs1, vl); } -vbool32_t test_vmfgt_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vmfgt(mask, op1, op2, vl); +vbool32_t test_vmfgt_vv_f32m1_b32_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vmfgt(vm, vs2, vs1, vl); } -vbool32_t test_vmfgt_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vmfgt(mask, op1, op2, vl); +vbool32_t test_vmfgt_vf_f32m1_b32_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfgt(vm, vs2, rs1, vl); } -vbool16_t test_vmfgt_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vmfgt(mask, op1, op2, vl); +vbool16_t test_vmfgt_vv_f32m2_b16_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vmfgt(vm, vs2, vs1, vl); } -vbool16_t test_vmfgt_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfgt(mask, op1, op2, vl); +vbool16_t test_vmfgt_vf_f32m2_b16_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfgt(vm, vs2, rs1, vl); } -vbool8_t test_vmfgt_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vmfgt(mask, op1, op2, vl); +vbool8_t test_vmfgt_vv_f32m4_b8_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vmfgt(vm, vs2, vs1, vl); } -vbool8_t test_vmfgt_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vmfgt(mask, op1, op2, vl); +vbool8_t test_vmfgt_vf_f32m4_b8_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfgt(vm, vs2, rs1, vl); } -vbool4_t test_vmfgt_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vmfgt(mask, op1, op2, vl); +vbool4_t test_vmfgt_vv_f32m8_b4_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vmfgt(vm, vs2, vs1, vl); } -vbool4_t test_vmfgt_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vmfgt(mask, op1, op2, vl); +vbool4_t test_vmfgt_vf_f32m8_b4_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfgt(vm, vs2, rs1, vl); } -vbool64_t test_vmfgt_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vmfgt(mask, op1, op2, vl); +vbool64_t test_vmfgt_vv_f64m1_b64_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vmfgt(vm, vs2, vs1, vl); } -vbool64_t test_vmfgt_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vmfgt(mask, op1, op2, vl); +vbool64_t test_vmfgt_vf_f64m1_b64_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfgt(vm, vs2, rs1, vl); } -vbool32_t test_vmfgt_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vmfgt(mask, op1, op2, vl); +vbool32_t test_vmfgt_vv_f64m2_b32_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vmfgt(vm, vs2, vs1, vl); } -vbool32_t test_vmfgt_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vmfgt(mask, op1, op2, vl); +vbool32_t test_vmfgt_vf_f64m2_b32_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfgt(vm, vs2, rs1, vl); } -vbool16_t test_vmfgt_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vmfgt(mask, op1, op2, vl); +vbool16_t test_vmfgt_vv_f64m4_b16_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vmfgt(vm, vs2, vs1, vl); } -vbool16_t test_vmfgt_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vmfgt(mask, op1, op2, vl); +vbool16_t test_vmfgt_vf_f64m4_b16_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfgt(vm, vs2, rs1, vl); } -vbool8_t test_vmfgt_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vmfgt(mask, op1, op2, vl); +vbool8_t test_vmfgt_vv_f64m8_b8_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vmfgt(vm, vs2, vs1, vl); } -vbool8_t test_vmfgt_vf_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vmfgt(mask, op1, op2, vl); +vbool8_t test_vmfgt_vf_f64m8_b8_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfgt(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmfgt\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vmfle.c b/auto-generated/gnu-overloaded-tests/vmfle.c index a6a8feb3d..dfd48f606 100644 --- a/auto-generated/gnu-overloaded-tests/vmfle.c +++ b/auto-generated/gnu-overloaded-tests/vmfle.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmfle_vv_f16mf4_b64(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vmfle(op1, op2, vl); +vbool64_t test_vmfle_vv_f16mf4_b64(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vmfle(vs2, vs1, vl); } -vbool64_t test_vmfle_vf_f16mf4_b64(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfle(op1, op2, vl); +vbool64_t test_vmfle_vf_f16mf4_b64(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfle(vs2, rs1, vl); } -vbool32_t test_vmfle_vv_f16mf2_b32(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vmfle(op1, op2, vl); +vbool32_t test_vmfle_vv_f16mf2_b32(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vmfle(vs2, vs1, vl); } -vbool32_t test_vmfle_vf_f16mf2_b32(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfle(op1, op2, vl); +vbool32_t test_vmfle_vf_f16mf2_b32(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfle(vs2, rs1, vl); } -vbool16_t test_vmfle_vv_f16m1_b16(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vmfle(op1, op2, vl); +vbool16_t test_vmfle_vv_f16m1_b16(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vmfle(vs2, vs1, vl); } -vbool16_t test_vmfle_vf_f16m1_b16(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vmfle(op1, op2, vl); +vbool16_t test_vmfle_vf_f16m1_b16(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfle(vs2, rs1, vl); } -vbool8_t test_vmfle_vv_f16m2_b8(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vmfle(op1, op2, vl); +vbool8_t test_vmfle_vv_f16m2_b8(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vmfle(vs2, vs1, vl); } -vbool8_t test_vmfle_vf_f16m2_b8(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfle(op1, op2, vl); +vbool8_t test_vmfle_vf_f16m2_b8(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfle(vs2, rs1, vl); } -vbool4_t test_vmfle_vv_f16m4_b4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vmfle(op1, op2, vl); +vbool4_t test_vmfle_vv_f16m4_b4(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vmfle(vs2, vs1, vl); } -vbool4_t test_vmfle_vf_f16m4_b4(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfle(op1, op2, vl); +vbool4_t test_vmfle_vf_f16m4_b4(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfle(vs2, rs1, vl); } -vbool2_t test_vmfle_vv_f16m8_b2(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vmfle(op1, op2, vl); +vbool2_t test_vmfle_vv_f16m8_b2(vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vmfle(vs2, vs1, vl); } -vbool2_t test_vmfle_vf_f16m8_b2(vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vmfle(op1, op2, vl); +vbool2_t test_vmfle_vf_f16m8_b2(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfle(vs2, rs1, vl); } -vbool64_t test_vmfle_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vmfle(op1, op2, vl); +vbool64_t test_vmfle_vv_f32mf2_b64(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vmfle(vs2, vs1, vl); } -vbool64_t test_vmfle_vf_f32mf2_b64(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfle(op1, op2, vl); +vbool64_t test_vmfle_vf_f32mf2_b64(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfle(vs2, rs1, vl); } -vbool32_t test_vmfle_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vmfle(op1, op2, vl); +vbool32_t test_vmfle_vv_f32m1_b32(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vmfle(vs2, vs1, vl); } -vbool32_t test_vmfle_vf_f32m1_b32(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vmfle(op1, op2, vl); +vbool32_t test_vmfle_vf_f32m1_b32(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfle(vs2, rs1, vl); } -vbool16_t test_vmfle_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vmfle(op1, op2, vl); +vbool16_t test_vmfle_vv_f32m2_b16(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vmfle(vs2, vs1, vl); } -vbool16_t test_vmfle_vf_f32m2_b16(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfle(op1, op2, vl); +vbool16_t test_vmfle_vf_f32m2_b16(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfle(vs2, rs1, vl); } -vbool8_t test_vmfle_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vmfle(op1, op2, vl); +vbool8_t test_vmfle_vv_f32m4_b8(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vmfle(vs2, vs1, vl); } -vbool8_t test_vmfle_vf_f32m4_b8(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vmfle(op1, op2, vl); +vbool8_t test_vmfle_vf_f32m4_b8(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfle(vs2, rs1, vl); } -vbool4_t test_vmfle_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vmfle(op1, op2, vl); +vbool4_t test_vmfle_vv_f32m8_b4(vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vmfle(vs2, vs1, vl); } -vbool4_t test_vmfle_vf_f32m8_b4(vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vmfle(op1, op2, vl); +vbool4_t test_vmfle_vf_f32m8_b4(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfle(vs2, rs1, vl); } -vbool64_t test_vmfle_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vmfle(op1, op2, vl); +vbool64_t test_vmfle_vv_f64m1_b64(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vmfle(vs2, vs1, vl); } -vbool64_t test_vmfle_vf_f64m1_b64(vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vmfle(op1, op2, vl); +vbool64_t test_vmfle_vf_f64m1_b64(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfle(vs2, rs1, vl); } -vbool32_t test_vmfle_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vmfle(op1, op2, vl); +vbool32_t test_vmfle_vv_f64m2_b32(vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vmfle(vs2, vs1, vl); } -vbool32_t test_vmfle_vf_f64m2_b32(vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vmfle(op1, op2, vl); +vbool32_t test_vmfle_vf_f64m2_b32(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfle(vs2, rs1, vl); } -vbool16_t test_vmfle_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vmfle(op1, op2, vl); +vbool16_t test_vmfle_vv_f64m4_b16(vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vmfle(vs2, vs1, vl); } -vbool16_t test_vmfle_vf_f64m4_b16(vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vmfle(op1, op2, vl); +vbool16_t test_vmfle_vf_f64m4_b16(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfle(vs2, rs1, vl); } -vbool8_t test_vmfle_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vmfle(op1, op2, vl); +vbool8_t test_vmfle_vv_f64m8_b8(vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vmfle(vs2, vs1, vl); } -vbool8_t test_vmfle_vf_f64m8_b8(vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vmfle(op1, op2, vl); +vbool8_t test_vmfle_vf_f64m8_b8(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfle(vs2, rs1, vl); } -vbool64_t test_vmfle_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vmfle(mask, op1, op2, vl); +vbool64_t test_vmfle_vv_f16mf4_b64_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vmfle(vm, vs2, vs1, vl); } -vbool64_t test_vmfle_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfle(mask, op1, op2, vl); +vbool64_t test_vmfle_vf_f16mf4_b64_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfle(vm, vs2, rs1, vl); } -vbool32_t test_vmfle_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vmfle(mask, op1, op2, vl); +vbool32_t test_vmfle_vv_f16mf2_b32_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vmfle(vm, vs2, vs1, vl); } -vbool32_t test_vmfle_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfle(mask, op1, op2, vl); +vbool32_t test_vmfle_vf_f16mf2_b32_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfle(vm, vs2, rs1, vl); } -vbool16_t test_vmfle_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vmfle(mask, op1, op2, vl); +vbool16_t test_vmfle_vv_f16m1_b16_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vmfle(vm, vs2, vs1, vl); } -vbool16_t test_vmfle_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vmfle(mask, op1, op2, vl); +vbool16_t test_vmfle_vf_f16m1_b16_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfle(vm, vs2, rs1, vl); } -vbool8_t test_vmfle_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vmfle(mask, op1, op2, vl); +vbool8_t test_vmfle_vv_f16m2_b8_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vmfle(vm, vs2, vs1, vl); } -vbool8_t test_vmfle_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfle(mask, op1, op2, vl); +vbool8_t test_vmfle_vf_f16m2_b8_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfle(vm, vs2, rs1, vl); } -vbool4_t test_vmfle_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vmfle(mask, op1, op2, vl); +vbool4_t test_vmfle_vv_f16m4_b4_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vmfle(vm, vs2, vs1, vl); } -vbool4_t test_vmfle_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfle(mask, op1, op2, vl); +vbool4_t test_vmfle_vf_f16m4_b4_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfle(vm, vs2, rs1, vl); } -vbool2_t test_vmfle_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vmfle(mask, op1, op2, vl); +vbool2_t test_vmfle_vv_f16m8_b2_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vmfle(vm, vs2, vs1, vl); } -vbool2_t test_vmfle_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vmfle(mask, op1, op2, vl); +vbool2_t test_vmfle_vf_f16m8_b2_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfle(vm, vs2, rs1, vl); } -vbool64_t test_vmfle_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vmfle(mask, op1, op2, vl); +vbool64_t test_vmfle_vv_f32mf2_b64_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vmfle(vm, vs2, vs1, vl); } -vbool64_t test_vmfle_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfle(mask, op1, op2, vl); +vbool64_t test_vmfle_vf_f32mf2_b64_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfle(vm, vs2, rs1, vl); } -vbool32_t test_vmfle_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vmfle(mask, op1, op2, vl); +vbool32_t test_vmfle_vv_f32m1_b32_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vmfle(vm, vs2, vs1, vl); } -vbool32_t test_vmfle_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vmfle(mask, op1, op2, vl); +vbool32_t test_vmfle_vf_f32m1_b32_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfle(vm, vs2, rs1, vl); } -vbool16_t test_vmfle_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vmfle(mask, op1, op2, vl); +vbool16_t test_vmfle_vv_f32m2_b16_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vmfle(vm, vs2, vs1, vl); } -vbool16_t test_vmfle_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfle(mask, op1, op2, vl); +vbool16_t test_vmfle_vf_f32m2_b16_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfle(vm, vs2, rs1, vl); } -vbool8_t test_vmfle_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vmfle(mask, op1, op2, vl); +vbool8_t test_vmfle_vv_f32m4_b8_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vmfle(vm, vs2, vs1, vl); } -vbool8_t test_vmfle_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vmfle(mask, op1, op2, vl); +vbool8_t test_vmfle_vf_f32m4_b8_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfle(vm, vs2, rs1, vl); } -vbool4_t test_vmfle_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vmfle(mask, op1, op2, vl); +vbool4_t test_vmfle_vv_f32m8_b4_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vmfle(vm, vs2, vs1, vl); } -vbool4_t test_vmfle_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vmfle(mask, op1, op2, vl); +vbool4_t test_vmfle_vf_f32m8_b4_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfle(vm, vs2, rs1, vl); } -vbool64_t test_vmfle_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vmfle(mask, op1, op2, vl); +vbool64_t test_vmfle_vv_f64m1_b64_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vmfle(vm, vs2, vs1, vl); } -vbool64_t test_vmfle_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vmfle(mask, op1, op2, vl); +vbool64_t test_vmfle_vf_f64m1_b64_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfle(vm, vs2, rs1, vl); } -vbool32_t test_vmfle_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vmfle(mask, op1, op2, vl); +vbool32_t test_vmfle_vv_f64m2_b32_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vmfle(vm, vs2, vs1, vl); } -vbool32_t test_vmfle_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vmfle(mask, op1, op2, vl); +vbool32_t test_vmfle_vf_f64m2_b32_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfle(vm, vs2, rs1, vl); } -vbool16_t test_vmfle_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vmfle(mask, op1, op2, vl); +vbool16_t test_vmfle_vv_f64m4_b16_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vmfle(vm, vs2, vs1, vl); } -vbool16_t test_vmfle_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vmfle(mask, op1, op2, vl); +vbool16_t test_vmfle_vf_f64m4_b16_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfle(vm, vs2, rs1, vl); } -vbool8_t test_vmfle_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vmfle(mask, op1, op2, vl); +vbool8_t test_vmfle_vv_f64m8_b8_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vmfle(vm, vs2, vs1, vl); } -vbool8_t test_vmfle_vf_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vmfle(mask, op1, op2, vl); +vbool8_t test_vmfle_vf_f64m8_b8_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfle(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmfle\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vmflt.c b/auto-generated/gnu-overloaded-tests/vmflt.c index e75872668..4e1bf2059 100644 --- a/auto-generated/gnu-overloaded-tests/vmflt.c +++ b/auto-generated/gnu-overloaded-tests/vmflt.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmflt_vv_f16mf4_b64(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vmflt(op1, op2, vl); +vbool64_t test_vmflt_vv_f16mf4_b64(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vmflt(vs2, vs1, vl); } -vbool64_t test_vmflt_vf_f16mf4_b64(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vmflt(op1, op2, vl); +vbool64_t test_vmflt_vf_f16mf4_b64(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmflt(vs2, rs1, vl); } -vbool32_t test_vmflt_vv_f16mf2_b32(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vmflt(op1, op2, vl); +vbool32_t test_vmflt_vv_f16mf2_b32(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vmflt(vs2, vs1, vl); } -vbool32_t test_vmflt_vf_f16mf2_b32(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vmflt(op1, op2, vl); +vbool32_t test_vmflt_vf_f16mf2_b32(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmflt(vs2, rs1, vl); } -vbool16_t test_vmflt_vv_f16m1_b16(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vmflt(op1, op2, vl); +vbool16_t test_vmflt_vv_f16m1_b16(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vmflt(vs2, vs1, vl); } -vbool16_t test_vmflt_vf_f16m1_b16(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vmflt(op1, op2, vl); +vbool16_t test_vmflt_vf_f16m1_b16(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmflt(vs2, rs1, vl); } -vbool8_t test_vmflt_vv_f16m2_b8(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vmflt(op1, op2, vl); +vbool8_t test_vmflt_vv_f16m2_b8(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vmflt(vs2, vs1, vl); } -vbool8_t test_vmflt_vf_f16m2_b8(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vmflt(op1, op2, vl); +vbool8_t test_vmflt_vf_f16m2_b8(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmflt(vs2, rs1, vl); } -vbool4_t test_vmflt_vv_f16m4_b4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vmflt(op1, op2, vl); +vbool4_t test_vmflt_vv_f16m4_b4(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vmflt(vs2, vs1, vl); } -vbool4_t test_vmflt_vf_f16m4_b4(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vmflt(op1, op2, vl); +vbool4_t test_vmflt_vf_f16m4_b4(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmflt(vs2, rs1, vl); } -vbool2_t test_vmflt_vv_f16m8_b2(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vmflt(op1, op2, vl); +vbool2_t test_vmflt_vv_f16m8_b2(vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vmflt(vs2, vs1, vl); } -vbool2_t test_vmflt_vf_f16m8_b2(vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vmflt(op1, op2, vl); +vbool2_t test_vmflt_vf_f16m8_b2(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmflt(vs2, rs1, vl); } -vbool64_t test_vmflt_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vmflt(op1, op2, vl); +vbool64_t test_vmflt_vv_f32mf2_b64(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vmflt(vs2, vs1, vl); } -vbool64_t test_vmflt_vf_f32mf2_b64(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vmflt(op1, op2, vl); +vbool64_t test_vmflt_vf_f32mf2_b64(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmflt(vs2, rs1, vl); } -vbool32_t test_vmflt_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vmflt(op1, op2, vl); +vbool32_t test_vmflt_vv_f32m1_b32(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vmflt(vs2, vs1, vl); } -vbool32_t test_vmflt_vf_f32m1_b32(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vmflt(op1, op2, vl); +vbool32_t test_vmflt_vf_f32m1_b32(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmflt(vs2, rs1, vl); } -vbool16_t test_vmflt_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vmflt(op1, op2, vl); +vbool16_t test_vmflt_vv_f32m2_b16(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vmflt(vs2, vs1, vl); } -vbool16_t test_vmflt_vf_f32m2_b16(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vmflt(op1, op2, vl); +vbool16_t test_vmflt_vf_f32m2_b16(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmflt(vs2, rs1, vl); } -vbool8_t test_vmflt_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vmflt(op1, op2, vl); +vbool8_t test_vmflt_vv_f32m4_b8(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vmflt(vs2, vs1, vl); } -vbool8_t test_vmflt_vf_f32m4_b8(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vmflt(op1, op2, vl); +vbool8_t test_vmflt_vf_f32m4_b8(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmflt(vs2, rs1, vl); } -vbool4_t test_vmflt_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vmflt(op1, op2, vl); +vbool4_t test_vmflt_vv_f32m8_b4(vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vmflt(vs2, vs1, vl); } -vbool4_t test_vmflt_vf_f32m8_b4(vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vmflt(op1, op2, vl); +vbool4_t test_vmflt_vf_f32m8_b4(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmflt(vs2, rs1, vl); } -vbool64_t test_vmflt_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vmflt(op1, op2, vl); +vbool64_t test_vmflt_vv_f64m1_b64(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vmflt(vs2, vs1, vl); } -vbool64_t test_vmflt_vf_f64m1_b64(vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vmflt(op1, op2, vl); +vbool64_t test_vmflt_vf_f64m1_b64(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmflt(vs2, rs1, vl); } -vbool32_t test_vmflt_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vmflt(op1, op2, vl); +vbool32_t test_vmflt_vv_f64m2_b32(vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vmflt(vs2, vs1, vl); } -vbool32_t test_vmflt_vf_f64m2_b32(vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vmflt(op1, op2, vl); +vbool32_t test_vmflt_vf_f64m2_b32(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmflt(vs2, rs1, vl); } -vbool16_t test_vmflt_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vmflt(op1, op2, vl); +vbool16_t test_vmflt_vv_f64m4_b16(vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vmflt(vs2, vs1, vl); } -vbool16_t test_vmflt_vf_f64m4_b16(vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vmflt(op1, op2, vl); +vbool16_t test_vmflt_vf_f64m4_b16(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmflt(vs2, rs1, vl); } -vbool8_t test_vmflt_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vmflt(op1, op2, vl); +vbool8_t test_vmflt_vv_f64m8_b8(vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vmflt(vs2, vs1, vl); } -vbool8_t test_vmflt_vf_f64m8_b8(vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vmflt(op1, op2, vl); +vbool8_t test_vmflt_vf_f64m8_b8(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmflt(vs2, rs1, vl); } -vbool64_t test_vmflt_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vmflt(mask, op1, op2, vl); +vbool64_t test_vmflt_vv_f16mf4_b64_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vmflt(vm, vs2, vs1, vl); } -vbool64_t test_vmflt_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vmflt(mask, op1, op2, vl); +vbool64_t test_vmflt_vf_f16mf4_b64_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmflt(vm, vs2, rs1, vl); } -vbool32_t test_vmflt_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vmflt(mask, op1, op2, vl); +vbool32_t test_vmflt_vv_f16mf2_b32_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vmflt(vm, vs2, vs1, vl); } -vbool32_t test_vmflt_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vmflt(mask, op1, op2, vl); +vbool32_t test_vmflt_vf_f16mf2_b32_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmflt(vm, vs2, rs1, vl); } -vbool16_t test_vmflt_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vmflt(mask, op1, op2, vl); +vbool16_t test_vmflt_vv_f16m1_b16_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vmflt(vm, vs2, vs1, vl); } -vbool16_t test_vmflt_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vmflt(mask, op1, op2, vl); +vbool16_t test_vmflt_vf_f16m1_b16_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmflt(vm, vs2, rs1, vl); } -vbool8_t test_vmflt_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vmflt(mask, op1, op2, vl); +vbool8_t test_vmflt_vv_f16m2_b8_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vmflt(vm, vs2, vs1, vl); } -vbool8_t test_vmflt_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vmflt(mask, op1, op2, vl); +vbool8_t test_vmflt_vf_f16m2_b8_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmflt(vm, vs2, rs1, vl); } -vbool4_t test_vmflt_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vmflt(mask, op1, op2, vl); +vbool4_t test_vmflt_vv_f16m4_b4_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vmflt(vm, vs2, vs1, vl); } -vbool4_t test_vmflt_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vmflt(mask, op1, op2, vl); +vbool4_t test_vmflt_vf_f16m4_b4_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmflt(vm, vs2, rs1, vl); } -vbool2_t test_vmflt_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vmflt(mask, op1, op2, vl); +vbool2_t test_vmflt_vv_f16m8_b2_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vmflt(vm, vs2, vs1, vl); } -vbool2_t test_vmflt_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vmflt(mask, op1, op2, vl); +vbool2_t test_vmflt_vf_f16m8_b2_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmflt(vm, vs2, rs1, vl); } -vbool64_t test_vmflt_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vmflt(mask, op1, op2, vl); +vbool64_t test_vmflt_vv_f32mf2_b64_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vmflt(vm, vs2, vs1, vl); } -vbool64_t test_vmflt_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vmflt(mask, op1, op2, vl); +vbool64_t test_vmflt_vf_f32mf2_b64_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmflt(vm, vs2, rs1, vl); } -vbool32_t test_vmflt_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vmflt(mask, op1, op2, vl); +vbool32_t test_vmflt_vv_f32m1_b32_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vmflt(vm, vs2, vs1, vl); } -vbool32_t test_vmflt_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vmflt(mask, op1, op2, vl); +vbool32_t test_vmflt_vf_f32m1_b32_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmflt(vm, vs2, rs1, vl); } -vbool16_t test_vmflt_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vmflt(mask, op1, op2, vl); +vbool16_t test_vmflt_vv_f32m2_b16_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vmflt(vm, vs2, vs1, vl); } -vbool16_t test_vmflt_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vmflt(mask, op1, op2, vl); +vbool16_t test_vmflt_vf_f32m2_b16_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmflt(vm, vs2, rs1, vl); } -vbool8_t test_vmflt_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vmflt(mask, op1, op2, vl); +vbool8_t test_vmflt_vv_f32m4_b8_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vmflt(vm, vs2, vs1, vl); } -vbool8_t test_vmflt_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vmflt(mask, op1, op2, vl); +vbool8_t test_vmflt_vf_f32m4_b8_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmflt(vm, vs2, rs1, vl); } -vbool4_t test_vmflt_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vmflt(mask, op1, op2, vl); +vbool4_t test_vmflt_vv_f32m8_b4_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vmflt(vm, vs2, vs1, vl); } -vbool4_t test_vmflt_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vmflt(mask, op1, op2, vl); +vbool4_t test_vmflt_vf_f32m8_b4_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmflt(vm, vs2, rs1, vl); } -vbool64_t test_vmflt_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vmflt(mask, op1, op2, vl); +vbool64_t test_vmflt_vv_f64m1_b64_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vmflt(vm, vs2, vs1, vl); } -vbool64_t test_vmflt_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vmflt(mask, op1, op2, vl); +vbool64_t test_vmflt_vf_f64m1_b64_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmflt(vm, vs2, rs1, vl); } -vbool32_t test_vmflt_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vmflt(mask, op1, op2, vl); +vbool32_t test_vmflt_vv_f64m2_b32_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vmflt(vm, vs2, vs1, vl); } -vbool32_t test_vmflt_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vmflt(mask, op1, op2, vl); +vbool32_t test_vmflt_vf_f64m2_b32_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmflt(vm, vs2, rs1, vl); } -vbool16_t test_vmflt_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vmflt(mask, op1, op2, vl); +vbool16_t test_vmflt_vv_f64m4_b16_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vmflt(vm, vs2, vs1, vl); } -vbool16_t test_vmflt_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vmflt(mask, op1, op2, vl); +vbool16_t test_vmflt_vf_f64m4_b16_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmflt(vm, vs2, rs1, vl); } -vbool8_t test_vmflt_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vmflt(mask, op1, op2, vl); +vbool8_t test_vmflt_vv_f64m8_b8_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vmflt(vm, vs2, vs1, vl); } -vbool8_t test_vmflt_vf_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vmflt(mask, op1, op2, vl); +vbool8_t test_vmflt_vf_f64m8_b8_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmflt(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmflt\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vmfne.c b/auto-generated/gnu-overloaded-tests/vmfne.c index 335fd4770..965591328 100644 --- a/auto-generated/gnu-overloaded-tests/vmfne.c +++ b/auto-generated/gnu-overloaded-tests/vmfne.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmfne_vv_f16mf4_b64(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vmfne(op1, op2, vl); +vbool64_t test_vmfne_vv_f16mf4_b64(vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vmfne(vs2, vs1, vl); } -vbool64_t test_vmfne_vf_f16mf4_b64(vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfne(op1, op2, vl); +vbool64_t test_vmfne_vf_f16mf4_b64(vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfne(vs2, rs1, vl); } -vbool32_t test_vmfne_vv_f16mf2_b32(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vmfne(op1, op2, vl); +vbool32_t test_vmfne_vv_f16mf2_b32(vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vmfne(vs2, vs1, vl); } -vbool32_t test_vmfne_vf_f16mf2_b32(vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfne(op1, op2, vl); +vbool32_t test_vmfne_vf_f16mf2_b32(vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfne(vs2, rs1, vl); } -vbool16_t test_vmfne_vv_f16m1_b16(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vmfne(op1, op2, vl); +vbool16_t test_vmfne_vv_f16m1_b16(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vmfne(vs2, vs1, vl); } -vbool16_t test_vmfne_vf_f16m1_b16(vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vmfne(op1, op2, vl); +vbool16_t test_vmfne_vf_f16m1_b16(vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfne(vs2, rs1, vl); } -vbool8_t test_vmfne_vv_f16m2_b8(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vmfne(op1, op2, vl); +vbool8_t test_vmfne_vv_f16m2_b8(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vmfne(vs2, vs1, vl); } -vbool8_t test_vmfne_vf_f16m2_b8(vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfne(op1, op2, vl); +vbool8_t test_vmfne_vf_f16m2_b8(vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfne(vs2, rs1, vl); } -vbool4_t test_vmfne_vv_f16m4_b4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vmfne(op1, op2, vl); +vbool4_t test_vmfne_vv_f16m4_b4(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vmfne(vs2, vs1, vl); } -vbool4_t test_vmfne_vf_f16m4_b4(vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfne(op1, op2, vl); +vbool4_t test_vmfne_vf_f16m4_b4(vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfne(vs2, rs1, vl); } -vbool2_t test_vmfne_vv_f16m8_b2(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vmfne(op1, op2, vl); +vbool2_t test_vmfne_vv_f16m8_b2(vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vmfne(vs2, vs1, vl); } -vbool2_t test_vmfne_vf_f16m8_b2(vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vmfne(op1, op2, vl); +vbool2_t test_vmfne_vf_f16m8_b2(vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfne(vs2, rs1, vl); } -vbool64_t test_vmfne_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vmfne(op1, op2, vl); +vbool64_t test_vmfne_vv_f32mf2_b64(vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vmfne(vs2, vs1, vl); } -vbool64_t test_vmfne_vf_f32mf2_b64(vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfne(op1, op2, vl); +vbool64_t test_vmfne_vf_f32mf2_b64(vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfne(vs2, rs1, vl); } -vbool32_t test_vmfne_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vmfne(op1, op2, vl); +vbool32_t test_vmfne_vv_f32m1_b32(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vmfne(vs2, vs1, vl); } -vbool32_t test_vmfne_vf_f32m1_b32(vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vmfne(op1, op2, vl); +vbool32_t test_vmfne_vf_f32m1_b32(vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfne(vs2, rs1, vl); } -vbool16_t test_vmfne_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vmfne(op1, op2, vl); +vbool16_t test_vmfne_vv_f32m2_b16(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vmfne(vs2, vs1, vl); } -vbool16_t test_vmfne_vf_f32m2_b16(vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfne(op1, op2, vl); +vbool16_t test_vmfne_vf_f32m2_b16(vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfne(vs2, rs1, vl); } -vbool8_t test_vmfne_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vmfne(op1, op2, vl); +vbool8_t test_vmfne_vv_f32m4_b8(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vmfne(vs2, vs1, vl); } -vbool8_t test_vmfne_vf_f32m4_b8(vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vmfne(op1, op2, vl); +vbool8_t test_vmfne_vf_f32m4_b8(vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfne(vs2, rs1, vl); } -vbool4_t test_vmfne_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vmfne(op1, op2, vl); +vbool4_t test_vmfne_vv_f32m8_b4(vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vmfne(vs2, vs1, vl); } -vbool4_t test_vmfne_vf_f32m8_b4(vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vmfne(op1, op2, vl); +vbool4_t test_vmfne_vf_f32m8_b4(vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfne(vs2, rs1, vl); } -vbool64_t test_vmfne_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vmfne(op1, op2, vl); +vbool64_t test_vmfne_vv_f64m1_b64(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vmfne(vs2, vs1, vl); } -vbool64_t test_vmfne_vf_f64m1_b64(vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vmfne(op1, op2, vl); +vbool64_t test_vmfne_vf_f64m1_b64(vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfne(vs2, rs1, vl); } -vbool32_t test_vmfne_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vmfne(op1, op2, vl); +vbool32_t test_vmfne_vv_f64m2_b32(vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vmfne(vs2, vs1, vl); } -vbool32_t test_vmfne_vf_f64m2_b32(vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vmfne(op1, op2, vl); +vbool32_t test_vmfne_vf_f64m2_b32(vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfne(vs2, rs1, vl); } -vbool16_t test_vmfne_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vmfne(op1, op2, vl); +vbool16_t test_vmfne_vv_f64m4_b16(vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vmfne(vs2, vs1, vl); } -vbool16_t test_vmfne_vf_f64m4_b16(vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vmfne(op1, op2, vl); +vbool16_t test_vmfne_vf_f64m4_b16(vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfne(vs2, rs1, vl); } -vbool8_t test_vmfne_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vmfne(op1, op2, vl); +vbool8_t test_vmfne_vv_f64m8_b8(vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vmfne(vs2, vs1, vl); } -vbool8_t test_vmfne_vf_f64m8_b8(vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vmfne(op1, op2, vl); +vbool8_t test_vmfne_vf_f64m8_b8(vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfne(vs2, rs1, vl); } -vbool64_t test_vmfne_vv_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vmfne(mask, op1, op2, vl); +vbool64_t test_vmfne_vv_f16mf4_b64_m(vbool64_t vm, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vmfne(vm, vs2, vs1, vl); } -vbool64_t test_vmfne_vf_f16mf4_b64_m(vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfne(mask, op1, op2, vl); +vbool64_t test_vmfne_vf_f16mf4_b64_m(vbool64_t vm, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfne(vm, vs2, rs1, vl); } -vbool32_t test_vmfne_vv_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vmfne(mask, op1, op2, vl); +vbool32_t test_vmfne_vv_f16mf2_b32_m(vbool32_t vm, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vmfne(vm, vs2, vs1, vl); } -vbool32_t test_vmfne_vf_f16mf2_b32_m(vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfne(mask, op1, op2, vl); +vbool32_t test_vmfne_vf_f16mf2_b32_m(vbool32_t vm, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfne(vm, vs2, rs1, vl); } -vbool16_t test_vmfne_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vmfne(mask, op1, op2, vl); +vbool16_t test_vmfne_vv_f16m1_b16_m(vbool16_t vm, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vmfne(vm, vs2, vs1, vl); } -vbool16_t test_vmfne_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vmfne(mask, op1, op2, vl); +vbool16_t test_vmfne_vf_f16m1_b16_m(vbool16_t vm, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfne(vm, vs2, rs1, vl); } -vbool8_t test_vmfne_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vmfne(mask, op1, op2, vl); +vbool8_t test_vmfne_vv_f16m2_b8_m(vbool8_t vm, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vmfne(vm, vs2, vs1, vl); } -vbool8_t test_vmfne_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfne(mask, op1, op2, vl); +vbool8_t test_vmfne_vf_f16m2_b8_m(vbool8_t vm, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfne(vm, vs2, rs1, vl); } -vbool4_t test_vmfne_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vmfne(mask, op1, op2, vl); +vbool4_t test_vmfne_vv_f16m4_b4_m(vbool4_t vm, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vmfne(vm, vs2, vs1, vl); } -vbool4_t test_vmfne_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfne(mask, op1, op2, vl); +vbool4_t test_vmfne_vf_f16m4_b4_m(vbool4_t vm, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfne(vm, vs2, rs1, vl); } -vbool2_t test_vmfne_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vmfne(mask, op1, op2, vl); +vbool2_t test_vmfne_vv_f16m8_b2_m(vbool2_t vm, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vmfne(vm, vs2, vs1, vl); } -vbool2_t test_vmfne_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vmfne(mask, op1, op2, vl); +vbool2_t test_vmfne_vf_f16m8_b2_m(vbool2_t vm, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfne(vm, vs2, rs1, vl); } -vbool64_t test_vmfne_vv_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vmfne(mask, op1, op2, vl); +vbool64_t test_vmfne_vv_f32mf2_b64_m(vbool64_t vm, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vmfne(vm, vs2, vs1, vl); } -vbool64_t test_vmfne_vf_f32mf2_b64_m(vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfne(mask, op1, op2, vl); +vbool64_t test_vmfne_vf_f32mf2_b64_m(vbool64_t vm, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfne(vm, vs2, rs1, vl); } -vbool32_t test_vmfne_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vmfne(mask, op1, op2, vl); +vbool32_t test_vmfne_vv_f32m1_b32_m(vbool32_t vm, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vmfne(vm, vs2, vs1, vl); } -vbool32_t test_vmfne_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vmfne(mask, op1, op2, vl); +vbool32_t test_vmfne_vf_f32m1_b32_m(vbool32_t vm, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfne(vm, vs2, rs1, vl); } -vbool16_t test_vmfne_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vmfne(mask, op1, op2, vl); +vbool16_t test_vmfne_vv_f32m2_b16_m(vbool16_t vm, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vmfne(vm, vs2, vs1, vl); } -vbool16_t test_vmfne_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfne(mask, op1, op2, vl); +vbool16_t test_vmfne_vf_f32m2_b16_m(vbool16_t vm, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfne(vm, vs2, rs1, vl); } -vbool8_t test_vmfne_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vmfne(mask, op1, op2, vl); +vbool8_t test_vmfne_vv_f32m4_b8_m(vbool8_t vm, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vmfne(vm, vs2, vs1, vl); } -vbool8_t test_vmfne_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vmfne(mask, op1, op2, vl); +vbool8_t test_vmfne_vf_f32m4_b8_m(vbool8_t vm, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfne(vm, vs2, rs1, vl); } -vbool4_t test_vmfne_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vmfne(mask, op1, op2, vl); +vbool4_t test_vmfne_vv_f32m8_b4_m(vbool4_t vm, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vmfne(vm, vs2, vs1, vl); } -vbool4_t test_vmfne_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vmfne(mask, op1, op2, vl); +vbool4_t test_vmfne_vf_f32m8_b4_m(vbool4_t vm, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfne(vm, vs2, rs1, vl); } -vbool64_t test_vmfne_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vmfne(mask, op1, op2, vl); +vbool64_t test_vmfne_vv_f64m1_b64_m(vbool64_t vm, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vmfne(vm, vs2, vs1, vl); } -vbool64_t test_vmfne_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vmfne(mask, op1, op2, vl); +vbool64_t test_vmfne_vf_f64m1_b64_m(vbool64_t vm, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfne(vm, vs2, rs1, vl); } -vbool32_t test_vmfne_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vmfne(mask, op1, op2, vl); +vbool32_t test_vmfne_vv_f64m2_b32_m(vbool32_t vm, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vmfne(vm, vs2, vs1, vl); } -vbool32_t test_vmfne_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vmfne(mask, op1, op2, vl); +vbool32_t test_vmfne_vf_f64m2_b32_m(vbool32_t vm, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfne(vm, vs2, rs1, vl); } -vbool16_t test_vmfne_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vmfne(mask, op1, op2, vl); +vbool16_t test_vmfne_vv_f64m4_b16_m(vbool16_t vm, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vmfne(vm, vs2, vs1, vl); } -vbool16_t test_vmfne_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vmfne(mask, op1, op2, vl); +vbool16_t test_vmfne_vf_f64m4_b16_m(vbool16_t vm, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfne(vm, vs2, rs1, vl); } -vbool8_t test_vmfne_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vmfne(mask, op1, op2, vl); +vbool8_t test_vmfne_vv_f64m8_b8_m(vbool8_t vm, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vmfne(vm, vs2, vs1, vl); } -vbool8_t test_vmfne_vf_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vmfne(mask, op1, op2, vl); +vbool8_t test_vmfne_vf_f64m8_b8_m(vbool8_t vm, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfne(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmfne\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vmin.c b/auto-generated/gnu-overloaded-tests/vmin.c index 72dc6f2a1..cc170a668 100644 --- a/auto-generated/gnu-overloaded-tests/vmin.c +++ b/auto-generated/gnu-overloaded-tests/vmin.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vmin_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmin(op1, op2, vl); +vint8mf8_t test_vmin_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmin(vs2, vs1, vl); } -vint8mf8_t test_vmin_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmin(op1, op2, vl); +vint8mf8_t test_vmin_vx_i8mf8(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin(vs2, rs1, vl); } -vint8mf4_t test_vmin_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmin(op1, op2, vl); +vint8mf4_t test_vmin_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmin(vs2, vs1, vl); } -vint8mf4_t test_vmin_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmin(op1, op2, vl); +vint8mf4_t test_vmin_vx_i8mf4(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin(vs2, rs1, vl); } -vint8mf2_t test_vmin_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmin(op1, op2, vl); +vint8mf2_t test_vmin_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmin(vs2, vs1, vl); } -vint8mf2_t test_vmin_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmin(op1, op2, vl); +vint8mf2_t test_vmin_vx_i8mf2(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin(vs2, rs1, vl); } -vint8m1_t test_vmin_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmin(op1, op2, vl); +vint8m1_t test_vmin_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmin(vs2, vs1, vl); } -vint8m1_t test_vmin_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmin(op1, op2, vl); +vint8m1_t test_vmin_vx_i8m1(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin(vs2, rs1, vl); } -vint8m2_t test_vmin_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmin(op1, op2, vl); +vint8m2_t test_vmin_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmin(vs2, vs1, vl); } -vint8m2_t test_vmin_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmin(op1, op2, vl); +vint8m2_t test_vmin_vx_i8m2(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin(vs2, rs1, vl); } -vint8m4_t test_vmin_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmin(op1, op2, vl); +vint8m4_t test_vmin_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmin(vs2, vs1, vl); } -vint8m4_t test_vmin_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmin(op1, op2, vl); +vint8m4_t test_vmin_vx_i8m4(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin(vs2, rs1, vl); } -vint8m8_t test_vmin_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmin(op1, op2, vl); +vint8m8_t test_vmin_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmin(vs2, vs1, vl); } -vint8m8_t test_vmin_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmin(op1, op2, vl); +vint8m8_t test_vmin_vx_i8m8(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin(vs2, rs1, vl); } -vint16mf4_t test_vmin_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmin(op1, op2, vl); +vint16mf4_t test_vmin_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmin(vs2, vs1, vl); } -vint16mf4_t test_vmin_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmin(op1, op2, vl); +vint16mf4_t test_vmin_vx_i16mf4(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin(vs2, rs1, vl); } -vint16mf2_t test_vmin_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmin(op1, op2, vl); +vint16mf2_t test_vmin_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmin(vs2, vs1, vl); } -vint16mf2_t test_vmin_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmin(op1, op2, vl); +vint16mf2_t test_vmin_vx_i16mf2(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin(vs2, rs1, vl); } -vint16m1_t test_vmin_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmin(op1, op2, vl); +vint16m1_t test_vmin_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmin(vs2, vs1, vl); } -vint16m1_t test_vmin_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmin(op1, op2, vl); +vint16m1_t test_vmin_vx_i16m1(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin(vs2, rs1, vl); } -vint16m2_t test_vmin_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmin(op1, op2, vl); +vint16m2_t test_vmin_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmin(vs2, vs1, vl); } -vint16m2_t test_vmin_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmin(op1, op2, vl); +vint16m2_t test_vmin_vx_i16m2(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin(vs2, rs1, vl); } -vint16m4_t test_vmin_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmin(op1, op2, vl); +vint16m4_t test_vmin_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmin(vs2, vs1, vl); } -vint16m4_t test_vmin_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmin(op1, op2, vl); +vint16m4_t test_vmin_vx_i16m4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin(vs2, rs1, vl); } -vint16m8_t test_vmin_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmin(op1, op2, vl); +vint16m8_t test_vmin_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmin(vs2, vs1, vl); } -vint16m8_t test_vmin_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmin(op1, op2, vl); +vint16m8_t test_vmin_vx_i16m8(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin(vs2, rs1, vl); } -vint32mf2_t test_vmin_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmin(op1, op2, vl); +vint32mf2_t test_vmin_vv_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmin(vs2, vs1, vl); } -vint32mf2_t test_vmin_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmin(op1, op2, vl); +vint32mf2_t test_vmin_vx_i32mf2(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin(vs2, rs1, vl); } -vint32m1_t test_vmin_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmin(op1, op2, vl); +vint32m1_t test_vmin_vv_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmin(vs2, vs1, vl); } -vint32m1_t test_vmin_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmin(op1, op2, vl); +vint32m1_t test_vmin_vx_i32m1(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin(vs2, rs1, vl); } -vint32m2_t test_vmin_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmin(op1, op2, vl); +vint32m2_t test_vmin_vv_i32m2(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmin(vs2, vs1, vl); } -vint32m2_t test_vmin_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmin(op1, op2, vl); +vint32m2_t test_vmin_vx_i32m2(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin(vs2, rs1, vl); } -vint32m4_t test_vmin_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmin(op1, op2, vl); +vint32m4_t test_vmin_vv_i32m4(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmin(vs2, vs1, vl); } -vint32m4_t test_vmin_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmin(op1, op2, vl); +vint32m4_t test_vmin_vx_i32m4(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin(vs2, rs1, vl); } -vint32m8_t test_vmin_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmin(op1, op2, vl); +vint32m8_t test_vmin_vv_i32m8(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmin(vs2, vs1, vl); } -vint32m8_t test_vmin_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmin(op1, op2, vl); +vint32m8_t test_vmin_vx_i32m8(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin(vs2, rs1, vl); } -vint64m1_t test_vmin_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmin(op1, op2, vl); +vint64m1_t test_vmin_vv_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmin(vs2, vs1, vl); } -vint64m1_t test_vmin_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmin(op1, op2, vl); +vint64m1_t test_vmin_vx_i64m1(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin(vs2, rs1, vl); } -vint64m2_t test_vmin_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmin(op1, op2, vl); +vint64m2_t test_vmin_vv_i64m2(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmin(vs2, vs1, vl); } -vint64m2_t test_vmin_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmin(op1, op2, vl); +vint64m2_t test_vmin_vx_i64m2(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin(vs2, rs1, vl); } -vint64m4_t test_vmin_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmin(op1, op2, vl); +vint64m4_t test_vmin_vv_i64m4(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmin(vs2, vs1, vl); } -vint64m4_t test_vmin_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmin(op1, op2, vl); +vint64m4_t test_vmin_vx_i64m4(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin(vs2, rs1, vl); } -vint64m8_t test_vmin_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmin(op1, op2, vl); +vint64m8_t test_vmin_vv_i64m8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmin(vs2, vs1, vl); } -vint64m8_t test_vmin_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmin(op1, op2, vl); +vint64m8_t test_vmin_vx_i64m8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin(vs2, rs1, vl); } -vint8mf8_t test_vmin_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmin(mask, op1, op2, vl); +vint8mf8_t test_vmin_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmin(vm, vs2, vs1, vl); } -vint8mf8_t test_vmin_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmin(mask, op1, op2, vl); +vint8mf8_t test_vmin_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin(vm, vs2, rs1, vl); } -vint8mf4_t test_vmin_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmin(mask, op1, op2, vl); +vint8mf4_t test_vmin_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmin(vm, vs2, vs1, vl); } -vint8mf4_t test_vmin_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmin(mask, op1, op2, vl); +vint8mf4_t test_vmin_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin(vm, vs2, rs1, vl); } -vint8mf2_t test_vmin_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmin(mask, op1, op2, vl); +vint8mf2_t test_vmin_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmin(vm, vs2, vs1, vl); } -vint8mf2_t test_vmin_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmin(mask, op1, op2, vl); +vint8mf2_t test_vmin_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin(vm, vs2, rs1, vl); } -vint8m1_t test_vmin_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmin(mask, op1, op2, vl); +vint8m1_t test_vmin_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmin(vm, vs2, vs1, vl); } -vint8m1_t test_vmin_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmin(mask, op1, op2, vl); +vint8m1_t test_vmin_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin(vm, vs2, rs1, vl); } -vint8m2_t test_vmin_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmin(mask, op1, op2, vl); +vint8m2_t test_vmin_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmin(vm, vs2, vs1, vl); } -vint8m2_t test_vmin_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmin(mask, op1, op2, vl); +vint8m2_t test_vmin_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin(vm, vs2, rs1, vl); } -vint8m4_t test_vmin_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmin(mask, op1, op2, vl); +vint8m4_t test_vmin_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmin(vm, vs2, vs1, vl); } -vint8m4_t test_vmin_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmin(mask, op1, op2, vl); +vint8m4_t test_vmin_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin(vm, vs2, rs1, vl); } -vint8m8_t test_vmin_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmin(mask, op1, op2, vl); +vint8m8_t test_vmin_vv_i8m8_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmin(vm, vs2, vs1, vl); } -vint8m8_t test_vmin_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmin(mask, op1, op2, vl); +vint8m8_t test_vmin_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin(vm, vs2, rs1, vl); } -vint16mf4_t test_vmin_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmin(mask, op1, op2, vl); +vint16mf4_t test_vmin_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmin(vm, vs2, vs1, vl); } -vint16mf4_t test_vmin_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmin(mask, op1, op2, vl); +vint16mf4_t test_vmin_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin(vm, vs2, rs1, vl); } -vint16mf2_t test_vmin_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmin(mask, op1, op2, vl); +vint16mf2_t test_vmin_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmin(vm, vs2, vs1, vl); } -vint16mf2_t test_vmin_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmin(mask, op1, op2, vl); +vint16mf2_t test_vmin_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin(vm, vs2, rs1, vl); } -vint16m1_t test_vmin_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmin(mask, op1, op2, vl); +vint16m1_t test_vmin_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmin(vm, vs2, vs1, vl); } -vint16m1_t test_vmin_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmin(mask, op1, op2, vl); +vint16m1_t test_vmin_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin(vm, vs2, rs1, vl); } -vint16m2_t test_vmin_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmin(mask, op1, op2, vl); +vint16m2_t test_vmin_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmin(vm, vs2, vs1, vl); } -vint16m2_t test_vmin_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmin(mask, op1, op2, vl); +vint16m2_t test_vmin_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin(vm, vs2, rs1, vl); } -vint16m4_t test_vmin_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmin(mask, op1, op2, vl); +vint16m4_t test_vmin_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmin(vm, vs2, vs1, vl); } -vint16m4_t test_vmin_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmin(mask, op1, op2, vl); +vint16m4_t test_vmin_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin(vm, vs2, rs1, vl); } -vint16m8_t test_vmin_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmin(mask, op1, op2, vl); +vint16m8_t test_vmin_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmin(vm, vs2, vs1, vl); } -vint16m8_t test_vmin_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmin(mask, op1, op2, vl); +vint16m8_t test_vmin_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin(vm, vs2, rs1, vl); } -vint32mf2_t test_vmin_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmin(mask, op1, op2, vl); +vint32mf2_t test_vmin_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmin(vm, vs2, vs1, vl); } -vint32mf2_t test_vmin_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmin(mask, op1, op2, vl); +vint32mf2_t test_vmin_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin(vm, vs2, rs1, vl); } -vint32m1_t test_vmin_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmin(mask, op1, op2, vl); +vint32m1_t test_vmin_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmin(vm, vs2, vs1, vl); } -vint32m1_t test_vmin_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmin(mask, op1, op2, vl); +vint32m1_t test_vmin_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin(vm, vs2, rs1, vl); } -vint32m2_t test_vmin_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmin(mask, op1, op2, vl); +vint32m2_t test_vmin_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmin(vm, vs2, vs1, vl); } -vint32m2_t test_vmin_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmin(mask, op1, op2, vl); +vint32m2_t test_vmin_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin(vm, vs2, rs1, vl); } -vint32m4_t test_vmin_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmin(mask, op1, op2, vl); +vint32m4_t test_vmin_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmin(vm, vs2, vs1, vl); } -vint32m4_t test_vmin_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmin(mask, op1, op2, vl); +vint32m4_t test_vmin_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin(vm, vs2, rs1, vl); } -vint32m8_t test_vmin_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmin(mask, op1, op2, vl); +vint32m8_t test_vmin_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmin(vm, vs2, vs1, vl); } -vint32m8_t test_vmin_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmin(mask, op1, op2, vl); +vint32m8_t test_vmin_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin(vm, vs2, rs1, vl); } -vint64m1_t test_vmin_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmin(mask, op1, op2, vl); +vint64m1_t test_vmin_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmin(vm, vs2, vs1, vl); } -vint64m1_t test_vmin_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmin(mask, op1, op2, vl); +vint64m1_t test_vmin_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin(vm, vs2, rs1, vl); } -vint64m2_t test_vmin_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmin(mask, op1, op2, vl); +vint64m2_t test_vmin_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmin(vm, vs2, vs1, vl); } -vint64m2_t test_vmin_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmin(mask, op1, op2, vl); +vint64m2_t test_vmin_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin(vm, vs2, rs1, vl); } -vint64m4_t test_vmin_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmin(mask, op1, op2, vl); +vint64m4_t test_vmin_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmin(vm, vs2, vs1, vl); } -vint64m4_t test_vmin_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmin(mask, op1, op2, vl); +vint64m4_t test_vmin_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin(vm, vs2, rs1, vl); } -vint64m8_t test_vmin_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmin(mask, op1, op2, vl); +vint64m8_t test_vmin_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmin(vm, vs2, vs1, vl); } -vint64m8_t test_vmin_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmin(mask, op1, op2, vl); +vint64m8_t test_vmin_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vmin\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vminu.c b/auto-generated/gnu-overloaded-tests/vminu.c index 41721d023..a35c311a9 100644 --- a/auto-generated/gnu-overloaded-tests/vminu.c +++ b/auto-generated/gnu-overloaded-tests/vminu.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vminu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vminu(op1, op2, vl); +vuint8mf8_t test_vminu_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vminu(vs2, vs1, vl); } -vuint8mf8_t test_vminu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu(op1, op2, vl); +vuint8mf8_t test_vminu_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu(vs2, rs1, vl); } -vuint8mf4_t test_vminu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vminu(op1, op2, vl); +vuint8mf4_t test_vminu_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vminu(vs2, vs1, vl); } -vuint8mf4_t test_vminu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu(op1, op2, vl); +vuint8mf4_t test_vminu_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu(vs2, rs1, vl); } -vuint8mf2_t test_vminu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vminu(op1, op2, vl); +vuint8mf2_t test_vminu_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vminu(vs2, vs1, vl); } -vuint8mf2_t test_vminu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu(op1, op2, vl); +vuint8mf2_t test_vminu_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu(vs2, rs1, vl); } -vuint8m1_t test_vminu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vminu(op1, op2, vl); +vuint8m1_t test_vminu_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vminu(vs2, vs1, vl); } -vuint8m1_t test_vminu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu(op1, op2, vl); +vuint8m1_t test_vminu_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu(vs2, rs1, vl); } -vuint8m2_t test_vminu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vminu(op1, op2, vl); +vuint8m2_t test_vminu_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vminu(vs2, vs1, vl); } -vuint8m2_t test_vminu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu(op1, op2, vl); +vuint8m2_t test_vminu_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu(vs2, rs1, vl); } -vuint8m4_t test_vminu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vminu(op1, op2, vl); +vuint8m4_t test_vminu_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vminu(vs2, vs1, vl); } -vuint8m4_t test_vminu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu(op1, op2, vl); +vuint8m4_t test_vminu_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu(vs2, rs1, vl); } -vuint8m8_t test_vminu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vminu(op1, op2, vl); +vuint8m8_t test_vminu_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vminu(vs2, vs1, vl); } -vuint8m8_t test_vminu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu(op1, op2, vl); +vuint8m8_t test_vminu_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu(vs2, rs1, vl); } -vuint16mf4_t test_vminu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vminu(op1, op2, vl); +vuint16mf4_t test_vminu_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vminu(vs2, vs1, vl); } -vuint16mf4_t test_vminu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu(op1, op2, vl); +vuint16mf4_t test_vminu_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu(vs2, rs1, vl); } -vuint16mf2_t test_vminu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vminu(op1, op2, vl); +vuint16mf2_t test_vminu_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vminu(vs2, vs1, vl); } -vuint16mf2_t test_vminu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu(op1, op2, vl); +vuint16mf2_t test_vminu_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu(vs2, rs1, vl); } -vuint16m1_t test_vminu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vminu(op1, op2, vl); +vuint16m1_t test_vminu_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vminu(vs2, vs1, vl); } -vuint16m1_t test_vminu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu(op1, op2, vl); +vuint16m1_t test_vminu_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu(vs2, rs1, vl); } -vuint16m2_t test_vminu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vminu(op1, op2, vl); +vuint16m2_t test_vminu_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vminu(vs2, vs1, vl); } -vuint16m2_t test_vminu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu(op1, op2, vl); +vuint16m2_t test_vminu_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu(vs2, rs1, vl); } -vuint16m4_t test_vminu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vminu(op1, op2, vl); +vuint16m4_t test_vminu_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vminu(vs2, vs1, vl); } -vuint16m4_t test_vminu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu(op1, op2, vl); +vuint16m4_t test_vminu_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu(vs2, rs1, vl); } -vuint16m8_t test_vminu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vminu(op1, op2, vl); +vuint16m8_t test_vminu_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vminu(vs2, vs1, vl); } -vuint16m8_t test_vminu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu(op1, op2, vl); +vuint16m8_t test_vminu_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu(vs2, rs1, vl); } -vuint32mf2_t test_vminu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vminu(op1, op2, vl); +vuint32mf2_t test_vminu_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vminu(vs2, vs1, vl); } -vuint32mf2_t test_vminu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu(op1, op2, vl); +vuint32mf2_t test_vminu_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu(vs2, rs1, vl); } -vuint32m1_t test_vminu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vminu(op1, op2, vl); +vuint32m1_t test_vminu_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vminu(vs2, vs1, vl); } -vuint32m1_t test_vminu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu(op1, op2, vl); +vuint32m1_t test_vminu_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu(vs2, rs1, vl); } -vuint32m2_t test_vminu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vminu(op1, op2, vl); +vuint32m2_t test_vminu_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vminu(vs2, vs1, vl); } -vuint32m2_t test_vminu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu(op1, op2, vl); +vuint32m2_t test_vminu_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu(vs2, rs1, vl); } -vuint32m4_t test_vminu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vminu(op1, op2, vl); +vuint32m4_t test_vminu_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vminu(vs2, vs1, vl); } -vuint32m4_t test_vminu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu(op1, op2, vl); +vuint32m4_t test_vminu_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu(vs2, rs1, vl); } -vuint32m8_t test_vminu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vminu(op1, op2, vl); +vuint32m8_t test_vminu_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vminu(vs2, vs1, vl); } -vuint32m8_t test_vminu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu(op1, op2, vl); +vuint32m8_t test_vminu_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu(vs2, rs1, vl); } -vuint64m1_t test_vminu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vminu(op1, op2, vl); +vuint64m1_t test_vminu_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vminu(vs2, vs1, vl); } -vuint64m1_t test_vminu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu(op1, op2, vl); +vuint64m1_t test_vminu_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu(vs2, rs1, vl); } -vuint64m2_t test_vminu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vminu(op1, op2, vl); +vuint64m2_t test_vminu_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vminu(vs2, vs1, vl); } -vuint64m2_t test_vminu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu(op1, op2, vl); +vuint64m2_t test_vminu_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu(vs2, rs1, vl); } -vuint64m4_t test_vminu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vminu(op1, op2, vl); +vuint64m4_t test_vminu_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vminu(vs2, vs1, vl); } -vuint64m4_t test_vminu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu(op1, op2, vl); +vuint64m4_t test_vminu_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu(vs2, rs1, vl); } -vuint64m8_t test_vminu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vminu(op1, op2, vl); +vuint64m8_t test_vminu_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vminu(vs2, vs1, vl); } -vuint64m8_t test_vminu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu(op1, op2, vl); +vuint64m8_t test_vminu_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu(vs2, rs1, vl); } -vuint8mf8_t test_vminu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vminu(mask, op1, op2, vl); +vuint8mf8_t test_vminu_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vminu(vm, vs2, vs1, vl); } -vuint8mf8_t test_vminu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu(mask, op1, op2, vl); +vuint8mf8_t test_vminu_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu(vm, vs2, rs1, vl); } -vuint8mf4_t test_vminu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vminu(mask, op1, op2, vl); +vuint8mf4_t test_vminu_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vminu(vm, vs2, vs1, vl); } -vuint8mf4_t test_vminu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu(mask, op1, op2, vl); +vuint8mf4_t test_vminu_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu(vm, vs2, rs1, vl); } -vuint8mf2_t test_vminu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vminu(mask, op1, op2, vl); +vuint8mf2_t test_vminu_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vminu(vm, vs2, vs1, vl); } -vuint8mf2_t test_vminu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu(mask, op1, op2, vl); +vuint8mf2_t test_vminu_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu(vm, vs2, rs1, vl); } -vuint8m1_t test_vminu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vminu(mask, op1, op2, vl); +vuint8m1_t test_vminu_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vminu(vm, vs2, vs1, vl); } -vuint8m1_t test_vminu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu(mask, op1, op2, vl); +vuint8m1_t test_vminu_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu(vm, vs2, rs1, vl); } -vuint8m2_t test_vminu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vminu(mask, op1, op2, vl); +vuint8m2_t test_vminu_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vminu(vm, vs2, vs1, vl); } -vuint8m2_t test_vminu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu(mask, op1, op2, vl); +vuint8m2_t test_vminu_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu(vm, vs2, rs1, vl); } -vuint8m4_t test_vminu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vminu(mask, op1, op2, vl); +vuint8m4_t test_vminu_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vminu(vm, vs2, vs1, vl); } -vuint8m4_t test_vminu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu(mask, op1, op2, vl); +vuint8m4_t test_vminu_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu(vm, vs2, rs1, vl); } -vuint8m8_t test_vminu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vminu(mask, op1, op2, vl); +vuint8m8_t test_vminu_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vminu(vm, vs2, vs1, vl); } -vuint8m8_t test_vminu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu(mask, op1, op2, vl); +vuint8m8_t test_vminu_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu(vm, vs2, rs1, vl); } -vuint16mf4_t test_vminu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vminu(mask, op1, op2, vl); +vuint16mf4_t test_vminu_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vminu(vm, vs2, vs1, vl); } -vuint16mf4_t test_vminu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu(mask, op1, op2, vl); +vuint16mf4_t test_vminu_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu(vm, vs2, rs1, vl); } -vuint16mf2_t test_vminu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vminu(mask, op1, op2, vl); +vuint16mf2_t test_vminu_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vminu(vm, vs2, vs1, vl); } -vuint16mf2_t test_vminu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu(mask, op1, op2, vl); +vuint16mf2_t test_vminu_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu(vm, vs2, rs1, vl); } -vuint16m1_t test_vminu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vminu(mask, op1, op2, vl); +vuint16m1_t test_vminu_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vminu(vm, vs2, vs1, vl); } -vuint16m1_t test_vminu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu(mask, op1, op2, vl); +vuint16m1_t test_vminu_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu(vm, vs2, rs1, vl); } -vuint16m2_t test_vminu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vminu(mask, op1, op2, vl); +vuint16m2_t test_vminu_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vminu(vm, vs2, vs1, vl); } -vuint16m2_t test_vminu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu(mask, op1, op2, vl); +vuint16m2_t test_vminu_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu(vm, vs2, rs1, vl); } -vuint16m4_t test_vminu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vminu(mask, op1, op2, vl); +vuint16m4_t test_vminu_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vminu(vm, vs2, vs1, vl); } -vuint16m4_t test_vminu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu(mask, op1, op2, vl); +vuint16m4_t test_vminu_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu(vm, vs2, rs1, vl); } -vuint16m8_t test_vminu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vminu(mask, op1, op2, vl); +vuint16m8_t test_vminu_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vminu(vm, vs2, vs1, vl); } -vuint16m8_t test_vminu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu(mask, op1, op2, vl); +vuint16m8_t test_vminu_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu(vm, vs2, rs1, vl); } -vuint32mf2_t test_vminu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vminu(mask, op1, op2, vl); +vuint32mf2_t test_vminu_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vminu(vm, vs2, vs1, vl); } -vuint32mf2_t test_vminu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu(mask, op1, op2, vl); +vuint32mf2_t test_vminu_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu(vm, vs2, rs1, vl); } -vuint32m1_t test_vminu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vminu(mask, op1, op2, vl); +vuint32m1_t test_vminu_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vminu(vm, vs2, vs1, vl); } -vuint32m1_t test_vminu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu(mask, op1, op2, vl); +vuint32m1_t test_vminu_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu(vm, vs2, rs1, vl); } -vuint32m2_t test_vminu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vminu(mask, op1, op2, vl); +vuint32m2_t test_vminu_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vminu(vm, vs2, vs1, vl); } -vuint32m2_t test_vminu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu(mask, op1, op2, vl); +vuint32m2_t test_vminu_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu(vm, vs2, rs1, vl); } -vuint32m4_t test_vminu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vminu(mask, op1, op2, vl); +vuint32m4_t test_vminu_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vminu(vm, vs2, vs1, vl); } -vuint32m4_t test_vminu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu(mask, op1, op2, vl); +vuint32m4_t test_vminu_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu(vm, vs2, rs1, vl); } -vuint32m8_t test_vminu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vminu(mask, op1, op2, vl); +vuint32m8_t test_vminu_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vminu(vm, vs2, vs1, vl); } -vuint32m8_t test_vminu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu(mask, op1, op2, vl); +vuint32m8_t test_vminu_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu(vm, vs2, rs1, vl); } -vuint64m1_t test_vminu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vminu(mask, op1, op2, vl); +vuint64m1_t test_vminu_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vminu(vm, vs2, vs1, vl); } -vuint64m1_t test_vminu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu(mask, op1, op2, vl); +vuint64m1_t test_vminu_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu(vm, vs2, rs1, vl); } -vuint64m2_t test_vminu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vminu(mask, op1, op2, vl); +vuint64m2_t test_vminu_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vminu(vm, vs2, vs1, vl); } -vuint64m2_t test_vminu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu(mask, op1, op2, vl); +vuint64m2_t test_vminu_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu(vm, vs2, rs1, vl); } -vuint64m4_t test_vminu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vminu(mask, op1, op2, vl); +vuint64m4_t test_vminu_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vminu(vm, vs2, vs1, vl); } -vuint64m4_t test_vminu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu(mask, op1, op2, vl); +vuint64m4_t test_vminu_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu(vm, vs2, rs1, vl); } -vuint64m8_t test_vminu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vminu(mask, op1, op2, vl); +vuint64m8_t test_vminu_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vminu(vm, vs2, vs1, vl); } -vuint64m8_t test_vminu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu(mask, op1, op2, vl); +vuint64m8_t test_vminu_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vminu\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vmmv.c b/auto-generated/gnu-overloaded-tests/vmmv.c index 2596d8b4f..575acc69f 100644 --- a/auto-generated/gnu-overloaded-tests/vmmv.c +++ b/auto-generated/gnu-overloaded-tests/vmmv.c @@ -6,32 +6,32 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool1_t test_vmmv_m_b1(vbool1_t op1, size_t vl) { - return __riscv_vmmv(op1, vl); +vbool1_t test_vmmv_m_b1(vbool1_t vs, size_t vl) { + return __riscv_vmmv(vs, vl); } -vbool2_t test_vmmv_m_b2(vbool2_t op1, size_t vl) { - return __riscv_vmmv(op1, vl); +vbool2_t test_vmmv_m_b2(vbool2_t vs, size_t vl) { + return __riscv_vmmv(vs, vl); } -vbool4_t test_vmmv_m_b4(vbool4_t op1, size_t vl) { - return __riscv_vmmv(op1, vl); +vbool4_t test_vmmv_m_b4(vbool4_t vs, size_t vl) { + return __riscv_vmmv(vs, vl); } -vbool8_t test_vmmv_m_b8(vbool8_t op1, size_t vl) { - return __riscv_vmmv(op1, vl); +vbool8_t test_vmmv_m_b8(vbool8_t vs, size_t vl) { + return __riscv_vmmv(vs, vl); } -vbool16_t test_vmmv_m_b16(vbool16_t op1, size_t vl) { - return __riscv_vmmv(op1, vl); +vbool16_t test_vmmv_m_b16(vbool16_t vs, size_t vl) { + return __riscv_vmmv(vs, vl); } -vbool32_t test_vmmv_m_b32(vbool32_t op1, size_t vl) { - return __riscv_vmmv(op1, vl); +vbool32_t test_vmmv_m_b32(vbool32_t vs, size_t vl) { + return __riscv_vmmv(vs, vl); } -vbool64_t test_vmmv_m_b64(vbool64_t op1, size_t vl) { - return __riscv_vmmv(op1, vl); +vbool64_t test_vmmv_m_b64(vbool64_t vs, size_t vl) { + return __riscv_vmmv(vs, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmmv\.[ivxfswum.]+\s+} 7 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vmnand.c b/auto-generated/gnu-overloaded-tests/vmnand.c index 685917d12..7376ff888 100644 --- a/auto-generated/gnu-overloaded-tests/vmnand.c +++ b/auto-generated/gnu-overloaded-tests/vmnand.c @@ -6,32 +6,32 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool1_t test_vmnand_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { - return __riscv_vmnand(op1, op2, vl); +vbool1_t test_vmnand_mm_b1(vbool1_t vs2, vbool1_t vs1, size_t vl) { + return __riscv_vmnand(vs2, vs1, vl); } -vbool2_t test_vmnand_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { - return __riscv_vmnand(op1, op2, vl); +vbool2_t test_vmnand_mm_b2(vbool2_t vs2, vbool2_t vs1, size_t vl) { + return __riscv_vmnand(vs2, vs1, vl); } -vbool4_t test_vmnand_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { - return __riscv_vmnand(op1, op2, vl); +vbool4_t test_vmnand_mm_b4(vbool4_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vmnand(vs2, vs1, vl); } -vbool8_t test_vmnand_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { - return __riscv_vmnand(op1, op2, vl); +vbool8_t test_vmnand_mm_b8(vbool8_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vmnand(vs2, vs1, vl); } -vbool16_t test_vmnand_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { - return __riscv_vmnand(op1, op2, vl); +vbool16_t test_vmnand_mm_b16(vbool16_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vmnand(vs2, vs1, vl); } -vbool32_t test_vmnand_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { - return __riscv_vmnand(op1, op2, vl); +vbool32_t test_vmnand_mm_b32(vbool32_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vmnand(vs2, vs1, vl); } -vbool64_t test_vmnand_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) { - return __riscv_vmnand(op1, op2, vl); +vbool64_t test_vmnand_mm_b64(vbool64_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vmnand(vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmnand\.[ivxfswum.]+\s+} 7 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vmnor.c b/auto-generated/gnu-overloaded-tests/vmnor.c index 44a0b3310..411b8967f 100644 --- a/auto-generated/gnu-overloaded-tests/vmnor.c +++ b/auto-generated/gnu-overloaded-tests/vmnor.c @@ -6,32 +6,32 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool1_t test_vmnor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { - return __riscv_vmnor(op1, op2, vl); +vbool1_t test_vmnor_mm_b1(vbool1_t vs2, vbool1_t vs1, size_t vl) { + return __riscv_vmnor(vs2, vs1, vl); } -vbool2_t test_vmnor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { - return __riscv_vmnor(op1, op2, vl); +vbool2_t test_vmnor_mm_b2(vbool2_t vs2, vbool2_t vs1, size_t vl) { + return __riscv_vmnor(vs2, vs1, vl); } -vbool4_t test_vmnor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { - return __riscv_vmnor(op1, op2, vl); +vbool4_t test_vmnor_mm_b4(vbool4_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vmnor(vs2, vs1, vl); } -vbool8_t test_vmnor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { - return __riscv_vmnor(op1, op2, vl); +vbool8_t test_vmnor_mm_b8(vbool8_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vmnor(vs2, vs1, vl); } -vbool16_t test_vmnor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { - return __riscv_vmnor(op1, op2, vl); +vbool16_t test_vmnor_mm_b16(vbool16_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vmnor(vs2, vs1, vl); } -vbool32_t test_vmnor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { - return __riscv_vmnor(op1, op2, vl); +vbool32_t test_vmnor_mm_b32(vbool32_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vmnor(vs2, vs1, vl); } -vbool64_t test_vmnor_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) { - return __riscv_vmnor(op1, op2, vl); +vbool64_t test_vmnor_mm_b64(vbool64_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vmnor(vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmnor\.[ivxfswum.]+\s+} 7 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vmnot.c b/auto-generated/gnu-overloaded-tests/vmnot.c index 13e5fc5ba..877ddeff1 100644 --- a/auto-generated/gnu-overloaded-tests/vmnot.c +++ b/auto-generated/gnu-overloaded-tests/vmnot.c @@ -6,32 +6,32 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool1_t test_vmnot_m_b1(vbool1_t op1, size_t vl) { - return __riscv_vmnot(op1, vl); +vbool1_t test_vmnot_m_b1(vbool1_t vs, size_t vl) { + return __riscv_vmnot(vs, vl); } -vbool2_t test_vmnot_m_b2(vbool2_t op1, size_t vl) { - return __riscv_vmnot(op1, vl); +vbool2_t test_vmnot_m_b2(vbool2_t vs, size_t vl) { + return __riscv_vmnot(vs, vl); } -vbool4_t test_vmnot_m_b4(vbool4_t op1, size_t vl) { - return __riscv_vmnot(op1, vl); +vbool4_t test_vmnot_m_b4(vbool4_t vs, size_t vl) { + return __riscv_vmnot(vs, vl); } -vbool8_t test_vmnot_m_b8(vbool8_t op1, size_t vl) { - return __riscv_vmnot(op1, vl); +vbool8_t test_vmnot_m_b8(vbool8_t vs, size_t vl) { + return __riscv_vmnot(vs, vl); } -vbool16_t test_vmnot_m_b16(vbool16_t op1, size_t vl) { - return __riscv_vmnot(op1, vl); +vbool16_t test_vmnot_m_b16(vbool16_t vs, size_t vl) { + return __riscv_vmnot(vs, vl); } -vbool32_t test_vmnot_m_b32(vbool32_t op1, size_t vl) { - return __riscv_vmnot(op1, vl); +vbool32_t test_vmnot_m_b32(vbool32_t vs, size_t vl) { + return __riscv_vmnot(vs, vl); } -vbool64_t test_vmnot_m_b64(vbool64_t op1, size_t vl) { - return __riscv_vmnot(op1, vl); +vbool64_t test_vmnot_m_b64(vbool64_t vs, size_t vl) { + return __riscv_vmnot(vs, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmnot\.[ivxfswum.]+\s+} 7 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vmor.c b/auto-generated/gnu-overloaded-tests/vmor.c index 95e4e4d81..72cff6799 100644 --- a/auto-generated/gnu-overloaded-tests/vmor.c +++ b/auto-generated/gnu-overloaded-tests/vmor.c @@ -6,32 +6,32 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool1_t test_vmor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { - return __riscv_vmor(op1, op2, vl); +vbool1_t test_vmor_mm_b1(vbool1_t vs2, vbool1_t vs1, size_t vl) { + return __riscv_vmor(vs2, vs1, vl); } -vbool2_t test_vmor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { - return __riscv_vmor(op1, op2, vl); +vbool2_t test_vmor_mm_b2(vbool2_t vs2, vbool2_t vs1, size_t vl) { + return __riscv_vmor(vs2, vs1, vl); } -vbool4_t test_vmor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { - return __riscv_vmor(op1, op2, vl); +vbool4_t test_vmor_mm_b4(vbool4_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vmor(vs2, vs1, vl); } -vbool8_t test_vmor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { - return __riscv_vmor(op1, op2, vl); +vbool8_t test_vmor_mm_b8(vbool8_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vmor(vs2, vs1, vl); } -vbool16_t test_vmor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { - return __riscv_vmor(op1, op2, vl); +vbool16_t test_vmor_mm_b16(vbool16_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vmor(vs2, vs1, vl); } -vbool32_t test_vmor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { - return __riscv_vmor(op1, op2, vl); +vbool32_t test_vmor_mm_b32(vbool32_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vmor(vs2, vs1, vl); } -vbool64_t test_vmor_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) { - return __riscv_vmor(op1, op2, vl); +vbool64_t test_vmor_mm_b64(vbool64_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vmor(vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmor\.[ivxfswum.]+\s+} 7 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vmorn.c b/auto-generated/gnu-overloaded-tests/vmorn.c index 42d74293f..c4eb4c8ae 100644 --- a/auto-generated/gnu-overloaded-tests/vmorn.c +++ b/auto-generated/gnu-overloaded-tests/vmorn.c @@ -6,32 +6,32 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool1_t test_vmorn_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { - return __riscv_vmorn(op1, op2, vl); +vbool1_t test_vmorn_mm_b1(vbool1_t vs2, vbool1_t vs1, size_t vl) { + return __riscv_vmorn(vs2, vs1, vl); } -vbool2_t test_vmorn_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { - return __riscv_vmorn(op1, op2, vl); +vbool2_t test_vmorn_mm_b2(vbool2_t vs2, vbool2_t vs1, size_t vl) { + return __riscv_vmorn(vs2, vs1, vl); } -vbool4_t test_vmorn_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { - return __riscv_vmorn(op1, op2, vl); +vbool4_t test_vmorn_mm_b4(vbool4_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vmorn(vs2, vs1, vl); } -vbool8_t test_vmorn_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { - return __riscv_vmorn(op1, op2, vl); +vbool8_t test_vmorn_mm_b8(vbool8_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vmorn(vs2, vs1, vl); } -vbool16_t test_vmorn_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { - return __riscv_vmorn(op1, op2, vl); +vbool16_t test_vmorn_mm_b16(vbool16_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vmorn(vs2, vs1, vl); } -vbool32_t test_vmorn_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { - return __riscv_vmorn(op1, op2, vl); +vbool32_t test_vmorn_mm_b32(vbool32_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vmorn(vs2, vs1, vl); } -vbool64_t test_vmorn_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) { - return __riscv_vmorn(op1, op2, vl); +vbool64_t test_vmorn_mm_b64(vbool64_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vmorn(vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmorn\.[ivxfswum.]+\s+} 7 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vmsbc.c b/auto-generated/gnu-overloaded-tests/vmsbc.c index 3e6bca020..c20fe6d8c 100644 --- a/auto-generated/gnu-overloaded-tests/vmsbc.c +++ b/auto-generated/gnu-overloaded-tests/vmsbc.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmsbc_vvm_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool64_t test_vmsbc_vvm_i8mf8_b64(vint8mf8_t vs2, vint8mf8_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmsbc(vs2, vs1, v0, vl); } -vbool64_t test_vmsbc_vxm_i8mf8_b64(vint8mf8_t op1, int8_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool64_t test_vmsbc_vxm_i8mf8_b64(vint8mf8_t vs2, int8_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmsbc(vs2, rs1, v0, vl); } -vbool64_t test_vmsbc_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool64_t test_vmsbc_vv_i8mf8_b64(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmsbc(vs2, vs1, vl); } -vbool64_t test_vmsbc_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool64_t test_vmsbc_vx_i8mf8_b64(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsbc(vs2, rs1, vl); } -vbool32_t test_vmsbc_vvm_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool32_t test_vmsbc_vvm_i8mf4_b32(vint8mf4_t vs2, vint8mf4_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmsbc(vs2, vs1, v0, vl); } -vbool32_t test_vmsbc_vxm_i8mf4_b32(vint8mf4_t op1, int8_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool32_t test_vmsbc_vxm_i8mf4_b32(vint8mf4_t vs2, int8_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmsbc(vs2, rs1, v0, vl); } -vbool32_t test_vmsbc_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool32_t test_vmsbc_vv_i8mf4_b32(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmsbc(vs2, vs1, vl); } -vbool32_t test_vmsbc_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool32_t test_vmsbc_vx_i8mf4_b32(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsbc(vs2, rs1, vl); } -vbool16_t test_vmsbc_vvm_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool16_t test_vmsbc_vvm_i8mf2_b16(vint8mf2_t vs2, vint8mf2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmsbc(vs2, vs1, v0, vl); } -vbool16_t test_vmsbc_vxm_i8mf2_b16(vint8mf2_t op1, int8_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool16_t test_vmsbc_vxm_i8mf2_b16(vint8mf2_t vs2, int8_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmsbc(vs2, rs1, v0, vl); } -vbool16_t test_vmsbc_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool16_t test_vmsbc_vv_i8mf2_b16(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmsbc(vs2, vs1, vl); } -vbool16_t test_vmsbc_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool16_t test_vmsbc_vx_i8mf2_b16(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsbc(vs2, rs1, vl); } -vbool8_t test_vmsbc_vvm_i8m1_b8(vint8m1_t op1, vint8m1_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool8_t test_vmsbc_vvm_i8m1_b8(vint8m1_t vs2, vint8m1_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmsbc(vs2, vs1, v0, vl); } -vbool8_t test_vmsbc_vxm_i8m1_b8(vint8m1_t op1, int8_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool8_t test_vmsbc_vxm_i8m1_b8(vint8m1_t vs2, int8_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmsbc(vs2, rs1, v0, vl); } -vbool8_t test_vmsbc_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool8_t test_vmsbc_vv_i8m1_b8(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmsbc(vs2, vs1, vl); } -vbool8_t test_vmsbc_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool8_t test_vmsbc_vx_i8m1_b8(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsbc(vs2, rs1, vl); } -vbool4_t test_vmsbc_vvm_i8m2_b4(vint8m2_t op1, vint8m2_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool4_t test_vmsbc_vvm_i8m2_b4(vint8m2_t vs2, vint8m2_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmsbc(vs2, vs1, v0, vl); } -vbool4_t test_vmsbc_vxm_i8m2_b4(vint8m2_t op1, int8_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool4_t test_vmsbc_vxm_i8m2_b4(vint8m2_t vs2, int8_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmsbc(vs2, rs1, v0, vl); } -vbool4_t test_vmsbc_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool4_t test_vmsbc_vv_i8m2_b4(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmsbc(vs2, vs1, vl); } -vbool4_t test_vmsbc_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool4_t test_vmsbc_vx_i8m2_b4(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsbc(vs2, rs1, vl); } -vbool2_t test_vmsbc_vvm_i8m4_b2(vint8m4_t op1, vint8m4_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool2_t test_vmsbc_vvm_i8m4_b2(vint8m4_t vs2, vint8m4_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vmsbc(vs2, vs1, v0, vl); } -vbool2_t test_vmsbc_vxm_i8m4_b2(vint8m4_t op1, int8_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool2_t test_vmsbc_vxm_i8m4_b2(vint8m4_t vs2, int8_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vmsbc(vs2, rs1, v0, vl); } -vbool2_t test_vmsbc_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool2_t test_vmsbc_vv_i8m4_b2(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmsbc(vs2, vs1, vl); } -vbool2_t test_vmsbc_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool2_t test_vmsbc_vx_i8m4_b2(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsbc(vs2, rs1, vl); } -vbool1_t test_vmsbc_vvm_i8m8_b1(vint8m8_t op1, vint8m8_t op2, vbool1_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool1_t test_vmsbc_vvm_i8m8_b1(vint8m8_t vs2, vint8m8_t vs1, vbool1_t v0, size_t vl) { + return __riscv_vmsbc(vs2, vs1, v0, vl); } -vbool1_t test_vmsbc_vxm_i8m8_b1(vint8m8_t op1, int8_t op2, vbool1_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool1_t test_vmsbc_vxm_i8m8_b1(vint8m8_t vs2, int8_t rs1, vbool1_t v0, size_t vl) { + return __riscv_vmsbc(vs2, rs1, v0, vl); } -vbool1_t test_vmsbc_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool1_t test_vmsbc_vv_i8m8_b1(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmsbc(vs2, vs1, vl); } -vbool1_t test_vmsbc_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool1_t test_vmsbc_vx_i8m8_b1(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsbc(vs2, rs1, vl); } -vbool64_t test_vmsbc_vvm_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool64_t test_vmsbc_vvm_i16mf4_b64(vint16mf4_t vs2, vint16mf4_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmsbc(vs2, vs1, v0, vl); } -vbool64_t test_vmsbc_vxm_i16mf4_b64(vint16mf4_t op1, int16_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool64_t test_vmsbc_vxm_i16mf4_b64(vint16mf4_t vs2, int16_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmsbc(vs2, rs1, v0, vl); } -vbool64_t test_vmsbc_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool64_t test_vmsbc_vv_i16mf4_b64(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmsbc(vs2, vs1, vl); } -vbool64_t test_vmsbc_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool64_t test_vmsbc_vx_i16mf4_b64(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsbc(vs2, rs1, vl); } -vbool32_t test_vmsbc_vvm_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool32_t test_vmsbc_vvm_i16mf2_b32(vint16mf2_t vs2, vint16mf2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmsbc(vs2, vs1, v0, vl); } -vbool32_t test_vmsbc_vxm_i16mf2_b32(vint16mf2_t op1, int16_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool32_t test_vmsbc_vxm_i16mf2_b32(vint16mf2_t vs2, int16_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmsbc(vs2, rs1, v0, vl); } -vbool32_t test_vmsbc_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool32_t test_vmsbc_vv_i16mf2_b32(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmsbc(vs2, vs1, vl); } -vbool32_t test_vmsbc_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool32_t test_vmsbc_vx_i16mf2_b32(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsbc(vs2, rs1, vl); } -vbool16_t test_vmsbc_vvm_i16m1_b16(vint16m1_t op1, vint16m1_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool16_t test_vmsbc_vvm_i16m1_b16(vint16m1_t vs2, vint16m1_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmsbc(vs2, vs1, v0, vl); } -vbool16_t test_vmsbc_vxm_i16m1_b16(vint16m1_t op1, int16_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool16_t test_vmsbc_vxm_i16m1_b16(vint16m1_t vs2, int16_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmsbc(vs2, rs1, v0, vl); } -vbool16_t test_vmsbc_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool16_t test_vmsbc_vv_i16m1_b16(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmsbc(vs2, vs1, vl); } -vbool16_t test_vmsbc_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool16_t test_vmsbc_vx_i16m1_b16(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsbc(vs2, rs1, vl); } -vbool8_t test_vmsbc_vvm_i16m2_b8(vint16m2_t op1, vint16m2_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool8_t test_vmsbc_vvm_i16m2_b8(vint16m2_t vs2, vint16m2_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmsbc(vs2, vs1, v0, vl); } -vbool8_t test_vmsbc_vxm_i16m2_b8(vint16m2_t op1, int16_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool8_t test_vmsbc_vxm_i16m2_b8(vint16m2_t vs2, int16_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmsbc(vs2, rs1, v0, vl); } -vbool8_t test_vmsbc_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool8_t test_vmsbc_vv_i16m2_b8(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmsbc(vs2, vs1, vl); } -vbool8_t test_vmsbc_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool8_t test_vmsbc_vx_i16m2_b8(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsbc(vs2, rs1, vl); } -vbool4_t test_vmsbc_vvm_i16m4_b4(vint16m4_t op1, vint16m4_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool4_t test_vmsbc_vvm_i16m4_b4(vint16m4_t vs2, vint16m4_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmsbc(vs2, vs1, v0, vl); } -vbool4_t test_vmsbc_vxm_i16m4_b4(vint16m4_t op1, int16_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool4_t test_vmsbc_vxm_i16m4_b4(vint16m4_t vs2, int16_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmsbc(vs2, rs1, v0, vl); } -vbool4_t test_vmsbc_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool4_t test_vmsbc_vv_i16m4_b4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmsbc(vs2, vs1, vl); } -vbool4_t test_vmsbc_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool4_t test_vmsbc_vx_i16m4_b4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsbc(vs2, rs1, vl); } -vbool2_t test_vmsbc_vvm_i16m8_b2(vint16m8_t op1, vint16m8_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool2_t test_vmsbc_vvm_i16m8_b2(vint16m8_t vs2, vint16m8_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vmsbc(vs2, vs1, v0, vl); } -vbool2_t test_vmsbc_vxm_i16m8_b2(vint16m8_t op1, int16_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool2_t test_vmsbc_vxm_i16m8_b2(vint16m8_t vs2, int16_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vmsbc(vs2, rs1, v0, vl); } -vbool2_t test_vmsbc_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool2_t test_vmsbc_vv_i16m8_b2(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmsbc(vs2, vs1, vl); } -vbool2_t test_vmsbc_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool2_t test_vmsbc_vx_i16m8_b2(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsbc(vs2, rs1, vl); } -vbool64_t test_vmsbc_vvm_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool64_t test_vmsbc_vvm_i32mf2_b64(vint32mf2_t vs2, vint32mf2_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmsbc(vs2, vs1, v0, vl); } -vbool64_t test_vmsbc_vxm_i32mf2_b64(vint32mf2_t op1, int32_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool64_t test_vmsbc_vxm_i32mf2_b64(vint32mf2_t vs2, int32_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmsbc(vs2, rs1, v0, vl); } -vbool64_t test_vmsbc_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool64_t test_vmsbc_vv_i32mf2_b64(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmsbc(vs2, vs1, vl); } -vbool64_t test_vmsbc_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool64_t test_vmsbc_vx_i32mf2_b64(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsbc(vs2, rs1, vl); } -vbool32_t test_vmsbc_vvm_i32m1_b32(vint32m1_t op1, vint32m1_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool32_t test_vmsbc_vvm_i32m1_b32(vint32m1_t vs2, vint32m1_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmsbc(vs2, vs1, v0, vl); } -vbool32_t test_vmsbc_vxm_i32m1_b32(vint32m1_t op1, int32_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool32_t test_vmsbc_vxm_i32m1_b32(vint32m1_t vs2, int32_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmsbc(vs2, rs1, v0, vl); } -vbool32_t test_vmsbc_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool32_t test_vmsbc_vv_i32m1_b32(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmsbc(vs2, vs1, vl); } -vbool32_t test_vmsbc_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool32_t test_vmsbc_vx_i32m1_b32(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsbc(vs2, rs1, vl); } -vbool16_t test_vmsbc_vvm_i32m2_b16(vint32m2_t op1, vint32m2_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool16_t test_vmsbc_vvm_i32m2_b16(vint32m2_t vs2, vint32m2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmsbc(vs2, vs1, v0, vl); } -vbool16_t test_vmsbc_vxm_i32m2_b16(vint32m2_t op1, int32_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool16_t test_vmsbc_vxm_i32m2_b16(vint32m2_t vs2, int32_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmsbc(vs2, rs1, v0, vl); } -vbool16_t test_vmsbc_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool16_t test_vmsbc_vv_i32m2_b16(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmsbc(vs2, vs1, vl); } -vbool16_t test_vmsbc_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool16_t test_vmsbc_vx_i32m2_b16(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsbc(vs2, rs1, vl); } -vbool8_t test_vmsbc_vvm_i32m4_b8(vint32m4_t op1, vint32m4_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool8_t test_vmsbc_vvm_i32m4_b8(vint32m4_t vs2, vint32m4_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmsbc(vs2, vs1, v0, vl); } -vbool8_t test_vmsbc_vxm_i32m4_b8(vint32m4_t op1, int32_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool8_t test_vmsbc_vxm_i32m4_b8(vint32m4_t vs2, int32_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmsbc(vs2, rs1, v0, vl); } -vbool8_t test_vmsbc_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool8_t test_vmsbc_vv_i32m4_b8(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmsbc(vs2, vs1, vl); } -vbool8_t test_vmsbc_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool8_t test_vmsbc_vx_i32m4_b8(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsbc(vs2, rs1, vl); } -vbool4_t test_vmsbc_vvm_i32m8_b4(vint32m8_t op1, vint32m8_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool4_t test_vmsbc_vvm_i32m8_b4(vint32m8_t vs2, vint32m8_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmsbc(vs2, vs1, v0, vl); } -vbool4_t test_vmsbc_vxm_i32m8_b4(vint32m8_t op1, int32_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool4_t test_vmsbc_vxm_i32m8_b4(vint32m8_t vs2, int32_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmsbc(vs2, rs1, v0, vl); } -vbool4_t test_vmsbc_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool4_t test_vmsbc_vv_i32m8_b4(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmsbc(vs2, vs1, vl); } -vbool4_t test_vmsbc_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool4_t test_vmsbc_vx_i32m8_b4(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsbc(vs2, rs1, vl); } -vbool64_t test_vmsbc_vvm_i64m1_b64(vint64m1_t op1, vint64m1_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool64_t test_vmsbc_vvm_i64m1_b64(vint64m1_t vs2, vint64m1_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmsbc(vs2, vs1, v0, vl); } -vbool64_t test_vmsbc_vxm_i64m1_b64(vint64m1_t op1, int64_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool64_t test_vmsbc_vxm_i64m1_b64(vint64m1_t vs2, int64_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmsbc(vs2, rs1, v0, vl); } -vbool64_t test_vmsbc_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool64_t test_vmsbc_vv_i64m1_b64(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmsbc(vs2, vs1, vl); } -vbool64_t test_vmsbc_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool64_t test_vmsbc_vx_i64m1_b64(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsbc(vs2, rs1, vl); } -vbool32_t test_vmsbc_vvm_i64m2_b32(vint64m2_t op1, vint64m2_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool32_t test_vmsbc_vvm_i64m2_b32(vint64m2_t vs2, vint64m2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmsbc(vs2, vs1, v0, vl); } -vbool32_t test_vmsbc_vxm_i64m2_b32(vint64m2_t op1, int64_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool32_t test_vmsbc_vxm_i64m2_b32(vint64m2_t vs2, int64_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmsbc(vs2, rs1, v0, vl); } -vbool32_t test_vmsbc_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool32_t test_vmsbc_vv_i64m2_b32(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmsbc(vs2, vs1, vl); } -vbool32_t test_vmsbc_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool32_t test_vmsbc_vx_i64m2_b32(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsbc(vs2, rs1, vl); } -vbool16_t test_vmsbc_vvm_i64m4_b16(vint64m4_t op1, vint64m4_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool16_t test_vmsbc_vvm_i64m4_b16(vint64m4_t vs2, vint64m4_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmsbc(vs2, vs1, v0, vl); } -vbool16_t test_vmsbc_vxm_i64m4_b16(vint64m4_t op1, int64_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool16_t test_vmsbc_vxm_i64m4_b16(vint64m4_t vs2, int64_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmsbc(vs2, rs1, v0, vl); } -vbool16_t test_vmsbc_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool16_t test_vmsbc_vv_i64m4_b16(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmsbc(vs2, vs1, vl); } -vbool16_t test_vmsbc_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool16_t test_vmsbc_vx_i64m4_b16(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsbc(vs2, rs1, vl); } -vbool8_t test_vmsbc_vvm_i64m8_b8(vint64m8_t op1, vint64m8_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool8_t test_vmsbc_vvm_i64m8_b8(vint64m8_t vs2, vint64m8_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmsbc(vs2, vs1, v0, vl); } -vbool8_t test_vmsbc_vxm_i64m8_b8(vint64m8_t op1, int64_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool8_t test_vmsbc_vxm_i64m8_b8(vint64m8_t vs2, int64_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmsbc(vs2, rs1, v0, vl); } -vbool8_t test_vmsbc_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool8_t test_vmsbc_vv_i64m8_b8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmsbc(vs2, vs1, vl); } -vbool8_t test_vmsbc_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool8_t test_vmsbc_vx_i64m8_b8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsbc(vs2, rs1, vl); } -vbool64_t test_vmsbc_vvm_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool64_t test_vmsbc_vvm_u8mf8_b64(vuint8mf8_t vs2, vuint8mf8_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmsbc(vs2, vs1, v0, vl); } -vbool64_t test_vmsbc_vxm_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool64_t test_vmsbc_vxm_u8mf8_b64(vuint8mf8_t vs2, uint8_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmsbc(vs2, rs1, v0, vl); } -vbool64_t test_vmsbc_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool64_t test_vmsbc_vv_u8mf8_b64(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmsbc(vs2, vs1, vl); } -vbool64_t test_vmsbc_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool64_t test_vmsbc_vx_u8mf8_b64(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsbc(vs2, rs1, vl); } -vbool32_t test_vmsbc_vvm_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool32_t test_vmsbc_vvm_u8mf4_b32(vuint8mf4_t vs2, vuint8mf4_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmsbc(vs2, vs1, v0, vl); } -vbool32_t test_vmsbc_vxm_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool32_t test_vmsbc_vxm_u8mf4_b32(vuint8mf4_t vs2, uint8_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmsbc(vs2, rs1, v0, vl); } -vbool32_t test_vmsbc_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool32_t test_vmsbc_vv_u8mf4_b32(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmsbc(vs2, vs1, vl); } -vbool32_t test_vmsbc_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool32_t test_vmsbc_vx_u8mf4_b32(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsbc(vs2, rs1, vl); } -vbool16_t test_vmsbc_vvm_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool16_t test_vmsbc_vvm_u8mf2_b16(vuint8mf2_t vs2, vuint8mf2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmsbc(vs2, vs1, v0, vl); } -vbool16_t test_vmsbc_vxm_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool16_t test_vmsbc_vxm_u8mf2_b16(vuint8mf2_t vs2, uint8_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmsbc(vs2, rs1, v0, vl); } -vbool16_t test_vmsbc_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool16_t test_vmsbc_vv_u8mf2_b16(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmsbc(vs2, vs1, vl); } -vbool16_t test_vmsbc_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool16_t test_vmsbc_vx_u8mf2_b16(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsbc(vs2, rs1, vl); } -vbool8_t test_vmsbc_vvm_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool8_t test_vmsbc_vvm_u8m1_b8(vuint8m1_t vs2, vuint8m1_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmsbc(vs2, vs1, v0, vl); } -vbool8_t test_vmsbc_vxm_u8m1_b8(vuint8m1_t op1, uint8_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool8_t test_vmsbc_vxm_u8m1_b8(vuint8m1_t vs2, uint8_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmsbc(vs2, rs1, v0, vl); } -vbool8_t test_vmsbc_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool8_t test_vmsbc_vv_u8m1_b8(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmsbc(vs2, vs1, vl); } -vbool8_t test_vmsbc_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool8_t test_vmsbc_vx_u8m1_b8(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsbc(vs2, rs1, vl); } -vbool4_t test_vmsbc_vvm_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool4_t test_vmsbc_vvm_u8m2_b4(vuint8m2_t vs2, vuint8m2_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmsbc(vs2, vs1, v0, vl); } -vbool4_t test_vmsbc_vxm_u8m2_b4(vuint8m2_t op1, uint8_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool4_t test_vmsbc_vxm_u8m2_b4(vuint8m2_t vs2, uint8_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmsbc(vs2, rs1, v0, vl); } -vbool4_t test_vmsbc_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool4_t test_vmsbc_vv_u8m2_b4(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmsbc(vs2, vs1, vl); } -vbool4_t test_vmsbc_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool4_t test_vmsbc_vx_u8m2_b4(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsbc(vs2, rs1, vl); } -vbool2_t test_vmsbc_vvm_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool2_t test_vmsbc_vvm_u8m4_b2(vuint8m4_t vs2, vuint8m4_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vmsbc(vs2, vs1, v0, vl); } -vbool2_t test_vmsbc_vxm_u8m4_b2(vuint8m4_t op1, uint8_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool2_t test_vmsbc_vxm_u8m4_b2(vuint8m4_t vs2, uint8_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vmsbc(vs2, rs1, v0, vl); } -vbool2_t test_vmsbc_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool2_t test_vmsbc_vv_u8m4_b2(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmsbc(vs2, vs1, vl); } -vbool2_t test_vmsbc_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool2_t test_vmsbc_vx_u8m4_b2(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsbc(vs2, rs1, vl); } -vbool1_t test_vmsbc_vvm_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, vbool1_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool1_t test_vmsbc_vvm_u8m8_b1(vuint8m8_t vs2, vuint8m8_t vs1, vbool1_t v0, size_t vl) { + return __riscv_vmsbc(vs2, vs1, v0, vl); } -vbool1_t test_vmsbc_vxm_u8m8_b1(vuint8m8_t op1, uint8_t op2, vbool1_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool1_t test_vmsbc_vxm_u8m8_b1(vuint8m8_t vs2, uint8_t rs1, vbool1_t v0, size_t vl) { + return __riscv_vmsbc(vs2, rs1, v0, vl); } -vbool1_t test_vmsbc_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool1_t test_vmsbc_vv_u8m8_b1(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmsbc(vs2, vs1, vl); } -vbool1_t test_vmsbc_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool1_t test_vmsbc_vx_u8m8_b1(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsbc(vs2, rs1, vl); } -vbool64_t test_vmsbc_vvm_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool64_t test_vmsbc_vvm_u16mf4_b64(vuint16mf4_t vs2, vuint16mf4_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmsbc(vs2, vs1, v0, vl); } -vbool64_t test_vmsbc_vxm_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool64_t test_vmsbc_vxm_u16mf4_b64(vuint16mf4_t vs2, uint16_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmsbc(vs2, rs1, v0, vl); } -vbool64_t test_vmsbc_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool64_t test_vmsbc_vv_u16mf4_b64(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmsbc(vs2, vs1, vl); } -vbool64_t test_vmsbc_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool64_t test_vmsbc_vx_u16mf4_b64(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsbc(vs2, rs1, vl); } -vbool32_t test_vmsbc_vvm_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool32_t test_vmsbc_vvm_u16mf2_b32(vuint16mf2_t vs2, vuint16mf2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmsbc(vs2, vs1, v0, vl); } -vbool32_t test_vmsbc_vxm_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool32_t test_vmsbc_vxm_u16mf2_b32(vuint16mf2_t vs2, uint16_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmsbc(vs2, rs1, v0, vl); } -vbool32_t test_vmsbc_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool32_t test_vmsbc_vv_u16mf2_b32(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmsbc(vs2, vs1, vl); } -vbool32_t test_vmsbc_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool32_t test_vmsbc_vx_u16mf2_b32(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsbc(vs2, rs1, vl); } -vbool16_t test_vmsbc_vvm_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool16_t test_vmsbc_vvm_u16m1_b16(vuint16m1_t vs2, vuint16m1_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmsbc(vs2, vs1, v0, vl); } -vbool16_t test_vmsbc_vxm_u16m1_b16(vuint16m1_t op1, uint16_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool16_t test_vmsbc_vxm_u16m1_b16(vuint16m1_t vs2, uint16_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmsbc(vs2, rs1, v0, vl); } -vbool16_t test_vmsbc_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool16_t test_vmsbc_vv_u16m1_b16(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmsbc(vs2, vs1, vl); } -vbool16_t test_vmsbc_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool16_t test_vmsbc_vx_u16m1_b16(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsbc(vs2, rs1, vl); } -vbool8_t test_vmsbc_vvm_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool8_t test_vmsbc_vvm_u16m2_b8(vuint16m2_t vs2, vuint16m2_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmsbc(vs2, vs1, v0, vl); } -vbool8_t test_vmsbc_vxm_u16m2_b8(vuint16m2_t op1, uint16_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool8_t test_vmsbc_vxm_u16m2_b8(vuint16m2_t vs2, uint16_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmsbc(vs2, rs1, v0, vl); } -vbool8_t test_vmsbc_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool8_t test_vmsbc_vv_u16m2_b8(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmsbc(vs2, vs1, vl); } -vbool8_t test_vmsbc_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool8_t test_vmsbc_vx_u16m2_b8(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsbc(vs2, rs1, vl); } -vbool4_t test_vmsbc_vvm_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool4_t test_vmsbc_vvm_u16m4_b4(vuint16m4_t vs2, vuint16m4_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmsbc(vs2, vs1, v0, vl); } -vbool4_t test_vmsbc_vxm_u16m4_b4(vuint16m4_t op1, uint16_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool4_t test_vmsbc_vxm_u16m4_b4(vuint16m4_t vs2, uint16_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmsbc(vs2, rs1, v0, vl); } -vbool4_t test_vmsbc_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool4_t test_vmsbc_vv_u16m4_b4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmsbc(vs2, vs1, vl); } -vbool4_t test_vmsbc_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool4_t test_vmsbc_vx_u16m4_b4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsbc(vs2, rs1, vl); } -vbool2_t test_vmsbc_vvm_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool2_t test_vmsbc_vvm_u16m8_b2(vuint16m8_t vs2, vuint16m8_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vmsbc(vs2, vs1, v0, vl); } -vbool2_t test_vmsbc_vxm_u16m8_b2(vuint16m8_t op1, uint16_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool2_t test_vmsbc_vxm_u16m8_b2(vuint16m8_t vs2, uint16_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vmsbc(vs2, rs1, v0, vl); } -vbool2_t test_vmsbc_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool2_t test_vmsbc_vv_u16m8_b2(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmsbc(vs2, vs1, vl); } -vbool2_t test_vmsbc_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool2_t test_vmsbc_vx_u16m8_b2(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsbc(vs2, rs1, vl); } -vbool64_t test_vmsbc_vvm_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool64_t test_vmsbc_vvm_u32mf2_b64(vuint32mf2_t vs2, vuint32mf2_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmsbc(vs2, vs1, v0, vl); } -vbool64_t test_vmsbc_vxm_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool64_t test_vmsbc_vxm_u32mf2_b64(vuint32mf2_t vs2, uint32_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmsbc(vs2, rs1, v0, vl); } -vbool64_t test_vmsbc_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool64_t test_vmsbc_vv_u32mf2_b64(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmsbc(vs2, vs1, vl); } -vbool64_t test_vmsbc_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool64_t test_vmsbc_vx_u32mf2_b64(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsbc(vs2, rs1, vl); } -vbool32_t test_vmsbc_vvm_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool32_t test_vmsbc_vvm_u32m1_b32(vuint32m1_t vs2, vuint32m1_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmsbc(vs2, vs1, v0, vl); } -vbool32_t test_vmsbc_vxm_u32m1_b32(vuint32m1_t op1, uint32_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool32_t test_vmsbc_vxm_u32m1_b32(vuint32m1_t vs2, uint32_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmsbc(vs2, rs1, v0, vl); } -vbool32_t test_vmsbc_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool32_t test_vmsbc_vv_u32m1_b32(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmsbc(vs2, vs1, vl); } -vbool32_t test_vmsbc_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool32_t test_vmsbc_vx_u32m1_b32(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsbc(vs2, rs1, vl); } -vbool16_t test_vmsbc_vvm_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool16_t test_vmsbc_vvm_u32m2_b16(vuint32m2_t vs2, vuint32m2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmsbc(vs2, vs1, v0, vl); } -vbool16_t test_vmsbc_vxm_u32m2_b16(vuint32m2_t op1, uint32_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool16_t test_vmsbc_vxm_u32m2_b16(vuint32m2_t vs2, uint32_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmsbc(vs2, rs1, v0, vl); } -vbool16_t test_vmsbc_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool16_t test_vmsbc_vv_u32m2_b16(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmsbc(vs2, vs1, vl); } -vbool16_t test_vmsbc_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool16_t test_vmsbc_vx_u32m2_b16(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsbc(vs2, rs1, vl); } -vbool8_t test_vmsbc_vvm_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool8_t test_vmsbc_vvm_u32m4_b8(vuint32m4_t vs2, vuint32m4_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmsbc(vs2, vs1, v0, vl); } -vbool8_t test_vmsbc_vxm_u32m4_b8(vuint32m4_t op1, uint32_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool8_t test_vmsbc_vxm_u32m4_b8(vuint32m4_t vs2, uint32_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmsbc(vs2, rs1, v0, vl); } -vbool8_t test_vmsbc_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool8_t test_vmsbc_vv_u32m4_b8(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmsbc(vs2, vs1, vl); } -vbool8_t test_vmsbc_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool8_t test_vmsbc_vx_u32m4_b8(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsbc(vs2, rs1, vl); } -vbool4_t test_vmsbc_vvm_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool4_t test_vmsbc_vvm_u32m8_b4(vuint32m8_t vs2, vuint32m8_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmsbc(vs2, vs1, v0, vl); } -vbool4_t test_vmsbc_vxm_u32m8_b4(vuint32m8_t op1, uint32_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool4_t test_vmsbc_vxm_u32m8_b4(vuint32m8_t vs2, uint32_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmsbc(vs2, rs1, v0, vl); } -vbool4_t test_vmsbc_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool4_t test_vmsbc_vv_u32m8_b4(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmsbc(vs2, vs1, vl); } -vbool4_t test_vmsbc_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool4_t test_vmsbc_vx_u32m8_b4(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsbc(vs2, rs1, vl); } -vbool64_t test_vmsbc_vvm_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool64_t test_vmsbc_vvm_u64m1_b64(vuint64m1_t vs2, vuint64m1_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmsbc(vs2, vs1, v0, vl); } -vbool64_t test_vmsbc_vxm_u64m1_b64(vuint64m1_t op1, uint64_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool64_t test_vmsbc_vxm_u64m1_b64(vuint64m1_t vs2, uint64_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmsbc(vs2, rs1, v0, vl); } -vbool64_t test_vmsbc_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool64_t test_vmsbc_vv_u64m1_b64(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmsbc(vs2, vs1, vl); } -vbool64_t test_vmsbc_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool64_t test_vmsbc_vx_u64m1_b64(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsbc(vs2, rs1, vl); } -vbool32_t test_vmsbc_vvm_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool32_t test_vmsbc_vvm_u64m2_b32(vuint64m2_t vs2, vuint64m2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmsbc(vs2, vs1, v0, vl); } -vbool32_t test_vmsbc_vxm_u64m2_b32(vuint64m2_t op1, uint64_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool32_t test_vmsbc_vxm_u64m2_b32(vuint64m2_t vs2, uint64_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmsbc(vs2, rs1, v0, vl); } -vbool32_t test_vmsbc_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool32_t test_vmsbc_vv_u64m2_b32(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmsbc(vs2, vs1, vl); } -vbool32_t test_vmsbc_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool32_t test_vmsbc_vx_u64m2_b32(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsbc(vs2, rs1, vl); } -vbool16_t test_vmsbc_vvm_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool16_t test_vmsbc_vvm_u64m4_b16(vuint64m4_t vs2, vuint64m4_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmsbc(vs2, vs1, v0, vl); } -vbool16_t test_vmsbc_vxm_u64m4_b16(vuint64m4_t op1, uint64_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool16_t test_vmsbc_vxm_u64m4_b16(vuint64m4_t vs2, uint64_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmsbc(vs2, rs1, v0, vl); } -vbool16_t test_vmsbc_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool16_t test_vmsbc_vv_u64m4_b16(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmsbc(vs2, vs1, vl); } -vbool16_t test_vmsbc_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool16_t test_vmsbc_vx_u64m4_b16(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsbc(vs2, rs1, vl); } -vbool8_t test_vmsbc_vvm_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool8_t test_vmsbc_vvm_u64m8_b8(vuint64m8_t vs2, vuint64m8_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmsbc(vs2, vs1, v0, vl); } -vbool8_t test_vmsbc_vxm_u64m8_b8(vuint64m8_t op1, uint64_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vmsbc(op1, op2, borrowin, vl); +vbool8_t test_vmsbc_vxm_u64m8_b8(vuint64m8_t vs2, uint64_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmsbc(vs2, rs1, v0, vl); } -vbool8_t test_vmsbc_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool8_t test_vmsbc_vv_u64m8_b8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmsbc(vs2, vs1, vl); } -vbool8_t test_vmsbc_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsbc(op1, op2, vl); +vbool8_t test_vmsbc_vx_u64m8_b8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsbc(vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vmsbc\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vmsbf.c b/auto-generated/gnu-overloaded-tests/vmsbf.c index a1e6f9886..4b8224486 100644 --- a/auto-generated/gnu-overloaded-tests/vmsbf.c +++ b/auto-generated/gnu-overloaded-tests/vmsbf.c @@ -6,60 +6,60 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool1_t test_vmsbf_m_b1(vbool1_t op1, size_t vl) { - return __riscv_vmsbf(op1, vl); +vbool1_t test_vmsbf_m_b1(vbool1_t vs2, size_t vl) { + return __riscv_vmsbf(vs2, vl); } -vbool2_t test_vmsbf_m_b2(vbool2_t op1, size_t vl) { - return __riscv_vmsbf(op1, vl); +vbool2_t test_vmsbf_m_b2(vbool2_t vs2, size_t vl) { + return __riscv_vmsbf(vs2, vl); } -vbool4_t test_vmsbf_m_b4(vbool4_t op1, size_t vl) { - return __riscv_vmsbf(op1, vl); +vbool4_t test_vmsbf_m_b4(vbool4_t vs2, size_t vl) { + return __riscv_vmsbf(vs2, vl); } -vbool8_t test_vmsbf_m_b8(vbool8_t op1, size_t vl) { - return __riscv_vmsbf(op1, vl); +vbool8_t test_vmsbf_m_b8(vbool8_t vs2, size_t vl) { + return __riscv_vmsbf(vs2, vl); } -vbool16_t test_vmsbf_m_b16(vbool16_t op1, size_t vl) { - return __riscv_vmsbf(op1, vl); +vbool16_t test_vmsbf_m_b16(vbool16_t vs2, size_t vl) { + return __riscv_vmsbf(vs2, vl); } -vbool32_t test_vmsbf_m_b32(vbool32_t op1, size_t vl) { - return __riscv_vmsbf(op1, vl); +vbool32_t test_vmsbf_m_b32(vbool32_t vs2, size_t vl) { + return __riscv_vmsbf(vs2, vl); } -vbool64_t test_vmsbf_m_b64(vbool64_t op1, size_t vl) { - return __riscv_vmsbf(op1, vl); +vbool64_t test_vmsbf_m_b64(vbool64_t vs2, size_t vl) { + return __riscv_vmsbf(vs2, vl); } -vbool1_t test_vmsbf_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) { - return __riscv_vmsbf(mask, op1, vl); +vbool1_t test_vmsbf_m_b1_m(vbool1_t vm, vbool1_t vs2, size_t vl) { + return __riscv_vmsbf(vm, vs2, vl); } -vbool2_t test_vmsbf_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) { - return __riscv_vmsbf(mask, op1, vl); +vbool2_t test_vmsbf_m_b2_m(vbool2_t vm, vbool2_t vs2, size_t vl) { + return __riscv_vmsbf(vm, vs2, vl); } -vbool4_t test_vmsbf_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) { - return __riscv_vmsbf(mask, op1, vl); +vbool4_t test_vmsbf_m_b4_m(vbool4_t vm, vbool4_t vs2, size_t vl) { + return __riscv_vmsbf(vm, vs2, vl); } -vbool8_t test_vmsbf_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) { - return __riscv_vmsbf(mask, op1, vl); +vbool8_t test_vmsbf_m_b8_m(vbool8_t vm, vbool8_t vs2, size_t vl) { + return __riscv_vmsbf(vm, vs2, vl); } -vbool16_t test_vmsbf_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) { - return __riscv_vmsbf(mask, op1, vl); +vbool16_t test_vmsbf_m_b16_m(vbool16_t vm, vbool16_t vs2, size_t vl) { + return __riscv_vmsbf(vm, vs2, vl); } -vbool32_t test_vmsbf_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) { - return __riscv_vmsbf(mask, op1, vl); +vbool32_t test_vmsbf_m_b32_m(vbool32_t vm, vbool32_t vs2, size_t vl) { + return __riscv_vmsbf(vm, vs2, vl); } -vbool64_t test_vmsbf_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) { - return __riscv_vmsbf(mask, op1, vl); +vbool64_t test_vmsbf_m_b64_m(vbool64_t vm, vbool64_t vs2, size_t vl) { + return __riscv_vmsbf(vm, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmsbf\.[ivxfswum.]+\s+} 14 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vmseq.c b/auto-generated/gnu-overloaded-tests/vmseq.c index 18772c860..0df0a048c 100644 --- a/auto-generated/gnu-overloaded-tests/vmseq.c +++ b/auto-generated/gnu-overloaded-tests/vmseq.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmseq_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool64_t test_vmseq_vv_i8mf8_b64(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmseq(vs2, vs1, vl); } -vbool64_t test_vmseq_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool64_t test_vmseq_vx_i8mf8_b64(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmseq(vs2, rs1, vl); } -vbool32_t test_vmseq_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool32_t test_vmseq_vv_i8mf4_b32(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmseq(vs2, vs1, vl); } -vbool32_t test_vmseq_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool32_t test_vmseq_vx_i8mf4_b32(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmseq(vs2, rs1, vl); } -vbool16_t test_vmseq_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool16_t test_vmseq_vv_i8mf2_b16(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmseq(vs2, vs1, vl); } -vbool16_t test_vmseq_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool16_t test_vmseq_vx_i8mf2_b16(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmseq(vs2, rs1, vl); } -vbool8_t test_vmseq_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool8_t test_vmseq_vv_i8m1_b8(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmseq(vs2, vs1, vl); } -vbool8_t test_vmseq_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool8_t test_vmseq_vx_i8m1_b8(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmseq(vs2, rs1, vl); } -vbool4_t test_vmseq_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool4_t test_vmseq_vv_i8m2_b4(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmseq(vs2, vs1, vl); } -vbool4_t test_vmseq_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool4_t test_vmseq_vx_i8m2_b4(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmseq(vs2, rs1, vl); } -vbool2_t test_vmseq_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool2_t test_vmseq_vv_i8m4_b2(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmseq(vs2, vs1, vl); } -vbool2_t test_vmseq_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool2_t test_vmseq_vx_i8m4_b2(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmseq(vs2, rs1, vl); } -vbool1_t test_vmseq_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool1_t test_vmseq_vv_i8m8_b1(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmseq(vs2, vs1, vl); } -vbool1_t test_vmseq_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool1_t test_vmseq_vx_i8m8_b1(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmseq(vs2, rs1, vl); } -vbool64_t test_vmseq_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool64_t test_vmseq_vv_i16mf4_b64(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmseq(vs2, vs1, vl); } -vbool64_t test_vmseq_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool64_t test_vmseq_vx_i16mf4_b64(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmseq(vs2, rs1, vl); } -vbool32_t test_vmseq_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool32_t test_vmseq_vv_i16mf2_b32(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmseq(vs2, vs1, vl); } -vbool32_t test_vmseq_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool32_t test_vmseq_vx_i16mf2_b32(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmseq(vs2, rs1, vl); } -vbool16_t test_vmseq_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool16_t test_vmseq_vv_i16m1_b16(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmseq(vs2, vs1, vl); } -vbool16_t test_vmseq_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool16_t test_vmseq_vx_i16m1_b16(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmseq(vs2, rs1, vl); } -vbool8_t test_vmseq_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool8_t test_vmseq_vv_i16m2_b8(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmseq(vs2, vs1, vl); } -vbool8_t test_vmseq_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool8_t test_vmseq_vx_i16m2_b8(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmseq(vs2, rs1, vl); } -vbool4_t test_vmseq_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool4_t test_vmseq_vv_i16m4_b4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmseq(vs2, vs1, vl); } -vbool4_t test_vmseq_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool4_t test_vmseq_vx_i16m4_b4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmseq(vs2, rs1, vl); } -vbool2_t test_vmseq_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool2_t test_vmseq_vv_i16m8_b2(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmseq(vs2, vs1, vl); } -vbool2_t test_vmseq_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool2_t test_vmseq_vx_i16m8_b2(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmseq(vs2, rs1, vl); } -vbool64_t test_vmseq_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool64_t test_vmseq_vv_i32mf2_b64(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmseq(vs2, vs1, vl); } -vbool64_t test_vmseq_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool64_t test_vmseq_vx_i32mf2_b64(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmseq(vs2, rs1, vl); } -vbool32_t test_vmseq_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool32_t test_vmseq_vv_i32m1_b32(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmseq(vs2, vs1, vl); } -vbool32_t test_vmseq_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool32_t test_vmseq_vx_i32m1_b32(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmseq(vs2, rs1, vl); } -vbool16_t test_vmseq_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool16_t test_vmseq_vv_i32m2_b16(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmseq(vs2, vs1, vl); } -vbool16_t test_vmseq_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool16_t test_vmseq_vx_i32m2_b16(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmseq(vs2, rs1, vl); } -vbool8_t test_vmseq_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool8_t test_vmseq_vv_i32m4_b8(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmseq(vs2, vs1, vl); } -vbool8_t test_vmseq_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool8_t test_vmseq_vx_i32m4_b8(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmseq(vs2, rs1, vl); } -vbool4_t test_vmseq_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool4_t test_vmseq_vv_i32m8_b4(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmseq(vs2, vs1, vl); } -vbool4_t test_vmseq_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool4_t test_vmseq_vx_i32m8_b4(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmseq(vs2, rs1, vl); } -vbool64_t test_vmseq_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool64_t test_vmseq_vv_i64m1_b64(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmseq(vs2, vs1, vl); } -vbool64_t test_vmseq_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool64_t test_vmseq_vx_i64m1_b64(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmseq(vs2, rs1, vl); } -vbool32_t test_vmseq_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool32_t test_vmseq_vv_i64m2_b32(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmseq(vs2, vs1, vl); } -vbool32_t test_vmseq_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool32_t test_vmseq_vx_i64m2_b32(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmseq(vs2, rs1, vl); } -vbool16_t test_vmseq_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool16_t test_vmseq_vv_i64m4_b16(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmseq(vs2, vs1, vl); } -vbool16_t test_vmseq_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool16_t test_vmseq_vx_i64m4_b16(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmseq(vs2, rs1, vl); } -vbool8_t test_vmseq_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool8_t test_vmseq_vv_i64m8_b8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmseq(vs2, vs1, vl); } -vbool8_t test_vmseq_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool8_t test_vmseq_vx_i64m8_b8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmseq(vs2, rs1, vl); } -vbool64_t test_vmseq_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool64_t test_vmseq_vv_u8mf8_b64(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmseq(vs2, vs1, vl); } -vbool64_t test_vmseq_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool64_t test_vmseq_vx_u8mf8_b64(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmseq(vs2, rs1, vl); } -vbool32_t test_vmseq_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool32_t test_vmseq_vv_u8mf4_b32(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmseq(vs2, vs1, vl); } -vbool32_t test_vmseq_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool32_t test_vmseq_vx_u8mf4_b32(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmseq(vs2, rs1, vl); } -vbool16_t test_vmseq_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool16_t test_vmseq_vv_u8mf2_b16(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmseq(vs2, vs1, vl); } -vbool16_t test_vmseq_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool16_t test_vmseq_vx_u8mf2_b16(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmseq(vs2, rs1, vl); } -vbool8_t test_vmseq_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool8_t test_vmseq_vv_u8m1_b8(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmseq(vs2, vs1, vl); } -vbool8_t test_vmseq_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool8_t test_vmseq_vx_u8m1_b8(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmseq(vs2, rs1, vl); } -vbool4_t test_vmseq_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool4_t test_vmseq_vv_u8m2_b4(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmseq(vs2, vs1, vl); } -vbool4_t test_vmseq_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool4_t test_vmseq_vx_u8m2_b4(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmseq(vs2, rs1, vl); } -vbool2_t test_vmseq_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool2_t test_vmseq_vv_u8m4_b2(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmseq(vs2, vs1, vl); } -vbool2_t test_vmseq_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool2_t test_vmseq_vx_u8m4_b2(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmseq(vs2, rs1, vl); } -vbool1_t test_vmseq_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool1_t test_vmseq_vv_u8m8_b1(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmseq(vs2, vs1, vl); } -vbool1_t test_vmseq_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool1_t test_vmseq_vx_u8m8_b1(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmseq(vs2, rs1, vl); } -vbool64_t test_vmseq_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool64_t test_vmseq_vv_u16mf4_b64(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmseq(vs2, vs1, vl); } -vbool64_t test_vmseq_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool64_t test_vmseq_vx_u16mf4_b64(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmseq(vs2, rs1, vl); } -vbool32_t test_vmseq_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool32_t test_vmseq_vv_u16mf2_b32(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmseq(vs2, vs1, vl); } -vbool32_t test_vmseq_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool32_t test_vmseq_vx_u16mf2_b32(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmseq(vs2, rs1, vl); } -vbool16_t test_vmseq_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool16_t test_vmseq_vv_u16m1_b16(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmseq(vs2, vs1, vl); } -vbool16_t test_vmseq_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool16_t test_vmseq_vx_u16m1_b16(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmseq(vs2, rs1, vl); } -vbool8_t test_vmseq_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool8_t test_vmseq_vv_u16m2_b8(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmseq(vs2, vs1, vl); } -vbool8_t test_vmseq_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool8_t test_vmseq_vx_u16m2_b8(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmseq(vs2, rs1, vl); } -vbool4_t test_vmseq_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool4_t test_vmseq_vv_u16m4_b4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmseq(vs2, vs1, vl); } -vbool4_t test_vmseq_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool4_t test_vmseq_vx_u16m4_b4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmseq(vs2, rs1, vl); } -vbool2_t test_vmseq_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool2_t test_vmseq_vv_u16m8_b2(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmseq(vs2, vs1, vl); } -vbool2_t test_vmseq_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool2_t test_vmseq_vx_u16m8_b2(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmseq(vs2, rs1, vl); } -vbool64_t test_vmseq_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool64_t test_vmseq_vv_u32mf2_b64(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmseq(vs2, vs1, vl); } -vbool64_t test_vmseq_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool64_t test_vmseq_vx_u32mf2_b64(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmseq(vs2, rs1, vl); } -vbool32_t test_vmseq_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool32_t test_vmseq_vv_u32m1_b32(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmseq(vs2, vs1, vl); } -vbool32_t test_vmseq_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool32_t test_vmseq_vx_u32m1_b32(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmseq(vs2, rs1, vl); } -vbool16_t test_vmseq_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool16_t test_vmseq_vv_u32m2_b16(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmseq(vs2, vs1, vl); } -vbool16_t test_vmseq_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool16_t test_vmseq_vx_u32m2_b16(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmseq(vs2, rs1, vl); } -vbool8_t test_vmseq_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool8_t test_vmseq_vv_u32m4_b8(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmseq(vs2, vs1, vl); } -vbool8_t test_vmseq_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool8_t test_vmseq_vx_u32m4_b8(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmseq(vs2, rs1, vl); } -vbool4_t test_vmseq_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool4_t test_vmseq_vv_u32m8_b4(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmseq(vs2, vs1, vl); } -vbool4_t test_vmseq_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool4_t test_vmseq_vx_u32m8_b4(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmseq(vs2, rs1, vl); } -vbool64_t test_vmseq_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool64_t test_vmseq_vv_u64m1_b64(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmseq(vs2, vs1, vl); } -vbool64_t test_vmseq_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool64_t test_vmseq_vx_u64m1_b64(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmseq(vs2, rs1, vl); } -vbool32_t test_vmseq_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool32_t test_vmseq_vv_u64m2_b32(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmseq(vs2, vs1, vl); } -vbool32_t test_vmseq_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool32_t test_vmseq_vx_u64m2_b32(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmseq(vs2, rs1, vl); } -vbool16_t test_vmseq_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool16_t test_vmseq_vv_u64m4_b16(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmseq(vs2, vs1, vl); } -vbool16_t test_vmseq_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool16_t test_vmseq_vx_u64m4_b16(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmseq(vs2, rs1, vl); } -vbool8_t test_vmseq_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool8_t test_vmseq_vv_u64m8_b8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmseq(vs2, vs1, vl); } -vbool8_t test_vmseq_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmseq(op1, op2, vl); +vbool8_t test_vmseq_vx_u64m8_b8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmseq(vs2, rs1, vl); } -vbool64_t test_vmseq_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool64_t test_vmseq_vv_i8mf8_b64_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmseq(vm, vs2, vs1, vl); } -vbool64_t test_vmseq_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool64_t test_vmseq_vx_i8mf8_b64_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmseq(vm, vs2, rs1, vl); } -vbool32_t test_vmseq_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool32_t test_vmseq_vv_i8mf4_b32_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmseq(vm, vs2, vs1, vl); } -vbool32_t test_vmseq_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool32_t test_vmseq_vx_i8mf4_b32_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmseq(vm, vs2, rs1, vl); } -vbool16_t test_vmseq_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool16_t test_vmseq_vv_i8mf2_b16_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmseq(vm, vs2, vs1, vl); } -vbool16_t test_vmseq_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool16_t test_vmseq_vx_i8mf2_b16_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmseq(vm, vs2, rs1, vl); } -vbool8_t test_vmseq_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool8_t test_vmseq_vv_i8m1_b8_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmseq(vm, vs2, vs1, vl); } -vbool8_t test_vmseq_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool8_t test_vmseq_vx_i8m1_b8_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmseq(vm, vs2, rs1, vl); } -vbool4_t test_vmseq_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool4_t test_vmseq_vv_i8m2_b4_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmseq(vm, vs2, vs1, vl); } -vbool4_t test_vmseq_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool4_t test_vmseq_vx_i8m2_b4_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmseq(vm, vs2, rs1, vl); } -vbool2_t test_vmseq_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool2_t test_vmseq_vv_i8m4_b2_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmseq(vm, vs2, vs1, vl); } -vbool2_t test_vmseq_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool2_t test_vmseq_vx_i8m4_b2_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmseq(vm, vs2, rs1, vl); } -vbool1_t test_vmseq_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool1_t test_vmseq_vv_i8m8_b1_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmseq(vm, vs2, vs1, vl); } -vbool1_t test_vmseq_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool1_t test_vmseq_vx_i8m8_b1_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmseq(vm, vs2, rs1, vl); } -vbool64_t test_vmseq_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool64_t test_vmseq_vv_i16mf4_b64_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmseq(vm, vs2, vs1, vl); } -vbool64_t test_vmseq_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool64_t test_vmseq_vx_i16mf4_b64_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmseq(vm, vs2, rs1, vl); } -vbool32_t test_vmseq_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool32_t test_vmseq_vv_i16mf2_b32_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmseq(vm, vs2, vs1, vl); } -vbool32_t test_vmseq_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool32_t test_vmseq_vx_i16mf2_b32_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmseq(vm, vs2, rs1, vl); } -vbool16_t test_vmseq_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool16_t test_vmseq_vv_i16m1_b16_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmseq(vm, vs2, vs1, vl); } -vbool16_t test_vmseq_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool16_t test_vmseq_vx_i16m1_b16_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmseq(vm, vs2, rs1, vl); } -vbool8_t test_vmseq_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool8_t test_vmseq_vv_i16m2_b8_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmseq(vm, vs2, vs1, vl); } -vbool8_t test_vmseq_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool8_t test_vmseq_vx_i16m2_b8_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmseq(vm, vs2, rs1, vl); } -vbool4_t test_vmseq_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool4_t test_vmseq_vv_i16m4_b4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmseq(vm, vs2, vs1, vl); } -vbool4_t test_vmseq_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool4_t test_vmseq_vx_i16m4_b4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmseq(vm, vs2, rs1, vl); } -vbool2_t test_vmseq_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool2_t test_vmseq_vv_i16m8_b2_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmseq(vm, vs2, vs1, vl); } -vbool2_t test_vmseq_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool2_t test_vmseq_vx_i16m8_b2_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmseq(vm, vs2, rs1, vl); } -vbool64_t test_vmseq_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool64_t test_vmseq_vv_i32mf2_b64_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmseq(vm, vs2, vs1, vl); } -vbool64_t test_vmseq_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool64_t test_vmseq_vx_i32mf2_b64_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmseq(vm, vs2, rs1, vl); } -vbool32_t test_vmseq_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool32_t test_vmseq_vv_i32m1_b32_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmseq(vm, vs2, vs1, vl); } -vbool32_t test_vmseq_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool32_t test_vmseq_vx_i32m1_b32_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmseq(vm, vs2, rs1, vl); } -vbool16_t test_vmseq_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool16_t test_vmseq_vv_i32m2_b16_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmseq(vm, vs2, vs1, vl); } -vbool16_t test_vmseq_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool16_t test_vmseq_vx_i32m2_b16_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmseq(vm, vs2, rs1, vl); } -vbool8_t test_vmseq_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool8_t test_vmseq_vv_i32m4_b8_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmseq(vm, vs2, vs1, vl); } -vbool8_t test_vmseq_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool8_t test_vmseq_vx_i32m4_b8_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmseq(vm, vs2, rs1, vl); } -vbool4_t test_vmseq_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool4_t test_vmseq_vv_i32m8_b4_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmseq(vm, vs2, vs1, vl); } -vbool4_t test_vmseq_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool4_t test_vmseq_vx_i32m8_b4_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmseq(vm, vs2, rs1, vl); } -vbool64_t test_vmseq_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool64_t test_vmseq_vv_i64m1_b64_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmseq(vm, vs2, vs1, vl); } -vbool64_t test_vmseq_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool64_t test_vmseq_vx_i64m1_b64_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmseq(vm, vs2, rs1, vl); } -vbool32_t test_vmseq_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool32_t test_vmseq_vv_i64m2_b32_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmseq(vm, vs2, vs1, vl); } -vbool32_t test_vmseq_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool32_t test_vmseq_vx_i64m2_b32_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmseq(vm, vs2, rs1, vl); } -vbool16_t test_vmseq_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool16_t test_vmseq_vv_i64m4_b16_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmseq(vm, vs2, vs1, vl); } -vbool16_t test_vmseq_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool16_t test_vmseq_vx_i64m4_b16_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmseq(vm, vs2, rs1, vl); } -vbool8_t test_vmseq_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool8_t test_vmseq_vv_i64m8_b8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmseq(vm, vs2, vs1, vl); } -vbool8_t test_vmseq_vx_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool8_t test_vmseq_vx_i64m8_b8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmseq(vm, vs2, rs1, vl); } -vbool64_t test_vmseq_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool64_t test_vmseq_vv_u8mf8_b64_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmseq(vm, vs2, vs1, vl); } -vbool64_t test_vmseq_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool64_t test_vmseq_vx_u8mf8_b64_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmseq(vm, vs2, rs1, vl); } -vbool32_t test_vmseq_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool32_t test_vmseq_vv_u8mf4_b32_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmseq(vm, vs2, vs1, vl); } -vbool32_t test_vmseq_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool32_t test_vmseq_vx_u8mf4_b32_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmseq(vm, vs2, rs1, vl); } -vbool16_t test_vmseq_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool16_t test_vmseq_vv_u8mf2_b16_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmseq(vm, vs2, vs1, vl); } -vbool16_t test_vmseq_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool16_t test_vmseq_vx_u8mf2_b16_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmseq(vm, vs2, rs1, vl); } -vbool8_t test_vmseq_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool8_t test_vmseq_vv_u8m1_b8_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmseq(vm, vs2, vs1, vl); } -vbool8_t test_vmseq_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool8_t test_vmseq_vx_u8m1_b8_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmseq(vm, vs2, rs1, vl); } -vbool4_t test_vmseq_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool4_t test_vmseq_vv_u8m2_b4_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmseq(vm, vs2, vs1, vl); } -vbool4_t test_vmseq_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool4_t test_vmseq_vx_u8m2_b4_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmseq(vm, vs2, rs1, vl); } -vbool2_t test_vmseq_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool2_t test_vmseq_vv_u8m4_b2_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmseq(vm, vs2, vs1, vl); } -vbool2_t test_vmseq_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool2_t test_vmseq_vx_u8m4_b2_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmseq(vm, vs2, rs1, vl); } -vbool1_t test_vmseq_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool1_t test_vmseq_vv_u8m8_b1_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmseq(vm, vs2, vs1, vl); } -vbool1_t test_vmseq_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool1_t test_vmseq_vx_u8m8_b1_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmseq(vm, vs2, rs1, vl); } -vbool64_t test_vmseq_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool64_t test_vmseq_vv_u16mf4_b64_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmseq(vm, vs2, vs1, vl); } -vbool64_t test_vmseq_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool64_t test_vmseq_vx_u16mf4_b64_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmseq(vm, vs2, rs1, vl); } -vbool32_t test_vmseq_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool32_t test_vmseq_vv_u16mf2_b32_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmseq(vm, vs2, vs1, vl); } -vbool32_t test_vmseq_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool32_t test_vmseq_vx_u16mf2_b32_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmseq(vm, vs2, rs1, vl); } -vbool16_t test_vmseq_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool16_t test_vmseq_vv_u16m1_b16_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmseq(vm, vs2, vs1, vl); } -vbool16_t test_vmseq_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool16_t test_vmseq_vx_u16m1_b16_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmseq(vm, vs2, rs1, vl); } -vbool8_t test_vmseq_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool8_t test_vmseq_vv_u16m2_b8_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmseq(vm, vs2, vs1, vl); } -vbool8_t test_vmseq_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool8_t test_vmseq_vx_u16m2_b8_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmseq(vm, vs2, rs1, vl); } -vbool4_t test_vmseq_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool4_t test_vmseq_vv_u16m4_b4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmseq(vm, vs2, vs1, vl); } -vbool4_t test_vmseq_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool4_t test_vmseq_vx_u16m4_b4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmseq(vm, vs2, rs1, vl); } -vbool2_t test_vmseq_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool2_t test_vmseq_vv_u16m8_b2_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmseq(vm, vs2, vs1, vl); } -vbool2_t test_vmseq_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool2_t test_vmseq_vx_u16m8_b2_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmseq(vm, vs2, rs1, vl); } -vbool64_t test_vmseq_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool64_t test_vmseq_vv_u32mf2_b64_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmseq(vm, vs2, vs1, vl); } -vbool64_t test_vmseq_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool64_t test_vmseq_vx_u32mf2_b64_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmseq(vm, vs2, rs1, vl); } -vbool32_t test_vmseq_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool32_t test_vmseq_vv_u32m1_b32_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmseq(vm, vs2, vs1, vl); } -vbool32_t test_vmseq_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool32_t test_vmseq_vx_u32m1_b32_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmseq(vm, vs2, rs1, vl); } -vbool16_t test_vmseq_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool16_t test_vmseq_vv_u32m2_b16_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmseq(vm, vs2, vs1, vl); } -vbool16_t test_vmseq_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool16_t test_vmseq_vx_u32m2_b16_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmseq(vm, vs2, rs1, vl); } -vbool8_t test_vmseq_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool8_t test_vmseq_vv_u32m4_b8_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmseq(vm, vs2, vs1, vl); } -vbool8_t test_vmseq_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool8_t test_vmseq_vx_u32m4_b8_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmseq(vm, vs2, rs1, vl); } -vbool4_t test_vmseq_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool4_t test_vmseq_vv_u32m8_b4_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmseq(vm, vs2, vs1, vl); } -vbool4_t test_vmseq_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool4_t test_vmseq_vx_u32m8_b4_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmseq(vm, vs2, rs1, vl); } -vbool64_t test_vmseq_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool64_t test_vmseq_vv_u64m1_b64_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmseq(vm, vs2, vs1, vl); } -vbool64_t test_vmseq_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool64_t test_vmseq_vx_u64m1_b64_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmseq(vm, vs2, rs1, vl); } -vbool32_t test_vmseq_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool32_t test_vmseq_vv_u64m2_b32_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmseq(vm, vs2, vs1, vl); } -vbool32_t test_vmseq_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool32_t test_vmseq_vx_u64m2_b32_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmseq(vm, vs2, rs1, vl); } -vbool16_t test_vmseq_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool16_t test_vmseq_vv_u64m4_b16_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmseq(vm, vs2, vs1, vl); } -vbool16_t test_vmseq_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool16_t test_vmseq_vx_u64m4_b16_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmseq(vm, vs2, rs1, vl); } -vbool8_t test_vmseq_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool8_t test_vmseq_vv_u64m8_b8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmseq(vm, vs2, vs1, vl); } -vbool8_t test_vmseq_vx_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmseq(mask, op1, op2, vl); +vbool8_t test_vmseq_vx_u64m8_b8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmseq(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vmseq\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vmsge.c b/auto-generated/gnu-overloaded-tests/vmsge.c index df6dade3a..90719beb1 100644 --- a/auto-generated/gnu-overloaded-tests/vmsge.c +++ b/auto-generated/gnu-overloaded-tests/vmsge.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmsge_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmsge(op1, op2, vl); +vbool64_t test_vmsge_vv_i8mf8_b64(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmsge(vs2, vs1, vl); } -vbool64_t test_vmsge_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsge(op1, op2, vl); +vbool64_t test_vmsge_vx_i8mf8_b64(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsge(vs2, rs1, vl); } -vbool32_t test_vmsge_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmsge(op1, op2, vl); +vbool32_t test_vmsge_vv_i8mf4_b32(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmsge(vs2, vs1, vl); } -vbool32_t test_vmsge_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsge(op1, op2, vl); +vbool32_t test_vmsge_vx_i8mf4_b32(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsge(vs2, rs1, vl); } -vbool16_t test_vmsge_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmsge(op1, op2, vl); +vbool16_t test_vmsge_vv_i8mf2_b16(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmsge(vs2, vs1, vl); } -vbool16_t test_vmsge_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsge(op1, op2, vl); +vbool16_t test_vmsge_vx_i8mf2_b16(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsge(vs2, rs1, vl); } -vbool8_t test_vmsge_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmsge(op1, op2, vl); +vbool8_t test_vmsge_vv_i8m1_b8(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmsge(vs2, vs1, vl); } -vbool8_t test_vmsge_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmsge(op1, op2, vl); +vbool8_t test_vmsge_vx_i8m1_b8(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsge(vs2, rs1, vl); } -vbool4_t test_vmsge_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmsge(op1, op2, vl); +vbool4_t test_vmsge_vv_i8m2_b4(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmsge(vs2, vs1, vl); } -vbool4_t test_vmsge_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsge(op1, op2, vl); +vbool4_t test_vmsge_vx_i8m2_b4(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsge(vs2, rs1, vl); } -vbool2_t test_vmsge_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmsge(op1, op2, vl); +vbool2_t test_vmsge_vv_i8m4_b2(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmsge(vs2, vs1, vl); } -vbool2_t test_vmsge_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsge(op1, op2, vl); +vbool2_t test_vmsge_vx_i8m4_b2(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsge(vs2, rs1, vl); } -vbool1_t test_vmsge_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmsge(op1, op2, vl); +vbool1_t test_vmsge_vv_i8m8_b1(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmsge(vs2, vs1, vl); } -vbool1_t test_vmsge_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsge(op1, op2, vl); +vbool1_t test_vmsge_vx_i8m8_b1(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsge(vs2, rs1, vl); } -vbool64_t test_vmsge_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmsge(op1, op2, vl); +vbool64_t test_vmsge_vv_i16mf4_b64(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmsge(vs2, vs1, vl); } -vbool64_t test_vmsge_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsge(op1, op2, vl); +vbool64_t test_vmsge_vx_i16mf4_b64(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsge(vs2, rs1, vl); } -vbool32_t test_vmsge_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmsge(op1, op2, vl); +vbool32_t test_vmsge_vv_i16mf2_b32(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmsge(vs2, vs1, vl); } -vbool32_t test_vmsge_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsge(op1, op2, vl); +vbool32_t test_vmsge_vx_i16mf2_b32(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsge(vs2, rs1, vl); } -vbool16_t test_vmsge_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmsge(op1, op2, vl); +vbool16_t test_vmsge_vv_i16m1_b16(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmsge(vs2, vs1, vl); } -vbool16_t test_vmsge_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmsge(op1, op2, vl); +vbool16_t test_vmsge_vx_i16m1_b16(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsge(vs2, rs1, vl); } -vbool8_t test_vmsge_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmsge(op1, op2, vl); +vbool8_t test_vmsge_vv_i16m2_b8(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmsge(vs2, vs1, vl); } -vbool8_t test_vmsge_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsge(op1, op2, vl); +vbool8_t test_vmsge_vx_i16m2_b8(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsge(vs2, rs1, vl); } -vbool4_t test_vmsge_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmsge(op1, op2, vl); +vbool4_t test_vmsge_vv_i16m4_b4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmsge(vs2, vs1, vl); } -vbool4_t test_vmsge_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsge(op1, op2, vl); +vbool4_t test_vmsge_vx_i16m4_b4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsge(vs2, rs1, vl); } -vbool2_t test_vmsge_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmsge(op1, op2, vl); +vbool2_t test_vmsge_vv_i16m8_b2(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmsge(vs2, vs1, vl); } -vbool2_t test_vmsge_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmsge(op1, op2, vl); +vbool2_t test_vmsge_vx_i16m8_b2(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsge(vs2, rs1, vl); } -vbool64_t test_vmsge_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmsge(op1, op2, vl); +vbool64_t test_vmsge_vv_i32mf2_b64(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmsge(vs2, vs1, vl); } -vbool64_t test_vmsge_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsge(op1, op2, vl); +vbool64_t test_vmsge_vx_i32mf2_b64(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsge(vs2, rs1, vl); } -vbool32_t test_vmsge_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmsge(op1, op2, vl); +vbool32_t test_vmsge_vv_i32m1_b32(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmsge(vs2, vs1, vl); } -vbool32_t test_vmsge_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmsge(op1, op2, vl); +vbool32_t test_vmsge_vx_i32m1_b32(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsge(vs2, rs1, vl); } -vbool16_t test_vmsge_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmsge(op1, op2, vl); +vbool16_t test_vmsge_vv_i32m2_b16(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmsge(vs2, vs1, vl); } -vbool16_t test_vmsge_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsge(op1, op2, vl); +vbool16_t test_vmsge_vx_i32m2_b16(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsge(vs2, rs1, vl); } -vbool8_t test_vmsge_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmsge(op1, op2, vl); +vbool8_t test_vmsge_vv_i32m4_b8(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmsge(vs2, vs1, vl); } -vbool8_t test_vmsge_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmsge(op1, op2, vl); +vbool8_t test_vmsge_vx_i32m4_b8(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsge(vs2, rs1, vl); } -vbool4_t test_vmsge_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmsge(op1, op2, vl); +vbool4_t test_vmsge_vv_i32m8_b4(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmsge(vs2, vs1, vl); } -vbool4_t test_vmsge_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmsge(op1, op2, vl); +vbool4_t test_vmsge_vx_i32m8_b4(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsge(vs2, rs1, vl); } -vbool64_t test_vmsge_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmsge(op1, op2, vl); +vbool64_t test_vmsge_vv_i64m1_b64(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmsge(vs2, vs1, vl); } -vbool64_t test_vmsge_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmsge(op1, op2, vl); +vbool64_t test_vmsge_vx_i64m1_b64(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsge(vs2, rs1, vl); } -vbool32_t test_vmsge_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmsge(op1, op2, vl); +vbool32_t test_vmsge_vv_i64m2_b32(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmsge(vs2, vs1, vl); } -vbool32_t test_vmsge_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmsge(op1, op2, vl); +vbool32_t test_vmsge_vx_i64m2_b32(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsge(vs2, rs1, vl); } -vbool16_t test_vmsge_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmsge(op1, op2, vl); +vbool16_t test_vmsge_vv_i64m4_b16(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmsge(vs2, vs1, vl); } -vbool16_t test_vmsge_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmsge(op1, op2, vl); +vbool16_t test_vmsge_vx_i64m4_b16(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsge(vs2, rs1, vl); } -vbool8_t test_vmsge_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmsge(op1, op2, vl); +vbool8_t test_vmsge_vv_i64m8_b8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmsge(vs2, vs1, vl); } -vbool8_t test_vmsge_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmsge(op1, op2, vl); +vbool8_t test_vmsge_vx_i64m8_b8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsge(vs2, rs1, vl); } -vbool64_t test_vmsge_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmsge(mask, op1, op2, vl); +vbool64_t test_vmsge_vv_i8mf8_b64_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmsge(vm, vs2, vs1, vl); } -vbool64_t test_vmsge_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsge(mask, op1, op2, vl); +vbool64_t test_vmsge_vx_i8mf8_b64_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsge(vm, vs2, rs1, vl); } -vbool32_t test_vmsge_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmsge(mask, op1, op2, vl); +vbool32_t test_vmsge_vv_i8mf4_b32_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmsge(vm, vs2, vs1, vl); } -vbool32_t test_vmsge_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsge(mask, op1, op2, vl); +vbool32_t test_vmsge_vx_i8mf4_b32_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsge(vm, vs2, rs1, vl); } -vbool16_t test_vmsge_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmsge(mask, op1, op2, vl); +vbool16_t test_vmsge_vv_i8mf2_b16_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmsge(vm, vs2, vs1, vl); } -vbool16_t test_vmsge_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsge(mask, op1, op2, vl); +vbool16_t test_vmsge_vx_i8mf2_b16_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsge(vm, vs2, rs1, vl); } -vbool8_t test_vmsge_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmsge(mask, op1, op2, vl); +vbool8_t test_vmsge_vv_i8m1_b8_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmsge(vm, vs2, vs1, vl); } -vbool8_t test_vmsge_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmsge(mask, op1, op2, vl); +vbool8_t test_vmsge_vx_i8m1_b8_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsge(vm, vs2, rs1, vl); } -vbool4_t test_vmsge_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmsge(mask, op1, op2, vl); +vbool4_t test_vmsge_vv_i8m2_b4_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmsge(vm, vs2, vs1, vl); } -vbool4_t test_vmsge_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsge(mask, op1, op2, vl); +vbool4_t test_vmsge_vx_i8m2_b4_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsge(vm, vs2, rs1, vl); } -vbool2_t test_vmsge_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmsge(mask, op1, op2, vl); +vbool2_t test_vmsge_vv_i8m4_b2_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmsge(vm, vs2, vs1, vl); } -vbool2_t test_vmsge_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsge(mask, op1, op2, vl); +vbool2_t test_vmsge_vx_i8m4_b2_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsge(vm, vs2, rs1, vl); } -vbool1_t test_vmsge_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmsge(mask, op1, op2, vl); +vbool1_t test_vmsge_vv_i8m8_b1_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmsge(vm, vs2, vs1, vl); } -vbool1_t test_vmsge_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsge(mask, op1, op2, vl); +vbool1_t test_vmsge_vx_i8m8_b1_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsge(vm, vs2, rs1, vl); } -vbool64_t test_vmsge_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmsge(mask, op1, op2, vl); +vbool64_t test_vmsge_vv_i16mf4_b64_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmsge(vm, vs2, vs1, vl); } -vbool64_t test_vmsge_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsge(mask, op1, op2, vl); +vbool64_t test_vmsge_vx_i16mf4_b64_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsge(vm, vs2, rs1, vl); } -vbool32_t test_vmsge_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmsge(mask, op1, op2, vl); +vbool32_t test_vmsge_vv_i16mf2_b32_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmsge(vm, vs2, vs1, vl); } -vbool32_t test_vmsge_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsge(mask, op1, op2, vl); +vbool32_t test_vmsge_vx_i16mf2_b32_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsge(vm, vs2, rs1, vl); } -vbool16_t test_vmsge_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmsge(mask, op1, op2, vl); +vbool16_t test_vmsge_vv_i16m1_b16_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmsge(vm, vs2, vs1, vl); } -vbool16_t test_vmsge_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmsge(mask, op1, op2, vl); +vbool16_t test_vmsge_vx_i16m1_b16_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsge(vm, vs2, rs1, vl); } -vbool8_t test_vmsge_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmsge(mask, op1, op2, vl); +vbool8_t test_vmsge_vv_i16m2_b8_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmsge(vm, vs2, vs1, vl); } -vbool8_t test_vmsge_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsge(mask, op1, op2, vl); +vbool8_t test_vmsge_vx_i16m2_b8_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsge(vm, vs2, rs1, vl); } -vbool4_t test_vmsge_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmsge(mask, op1, op2, vl); +vbool4_t test_vmsge_vv_i16m4_b4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmsge(vm, vs2, vs1, vl); } -vbool4_t test_vmsge_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsge(mask, op1, op2, vl); +vbool4_t test_vmsge_vx_i16m4_b4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsge(vm, vs2, rs1, vl); } -vbool2_t test_vmsge_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmsge(mask, op1, op2, vl); +vbool2_t test_vmsge_vv_i16m8_b2_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmsge(vm, vs2, vs1, vl); } -vbool2_t test_vmsge_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmsge(mask, op1, op2, vl); +vbool2_t test_vmsge_vx_i16m8_b2_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsge(vm, vs2, rs1, vl); } -vbool64_t test_vmsge_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmsge(mask, op1, op2, vl); +vbool64_t test_vmsge_vv_i32mf2_b64_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmsge(vm, vs2, vs1, vl); } -vbool64_t test_vmsge_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsge(mask, op1, op2, vl); +vbool64_t test_vmsge_vx_i32mf2_b64_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsge(vm, vs2, rs1, vl); } -vbool32_t test_vmsge_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmsge(mask, op1, op2, vl); +vbool32_t test_vmsge_vv_i32m1_b32_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmsge(vm, vs2, vs1, vl); } -vbool32_t test_vmsge_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmsge(mask, op1, op2, vl); +vbool32_t test_vmsge_vx_i32m1_b32_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsge(vm, vs2, rs1, vl); } -vbool16_t test_vmsge_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmsge(mask, op1, op2, vl); +vbool16_t test_vmsge_vv_i32m2_b16_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmsge(vm, vs2, vs1, vl); } -vbool16_t test_vmsge_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsge(mask, op1, op2, vl); +vbool16_t test_vmsge_vx_i32m2_b16_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsge(vm, vs2, rs1, vl); } -vbool8_t test_vmsge_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmsge(mask, op1, op2, vl); +vbool8_t test_vmsge_vv_i32m4_b8_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmsge(vm, vs2, vs1, vl); } -vbool8_t test_vmsge_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmsge(mask, op1, op2, vl); +vbool8_t test_vmsge_vx_i32m4_b8_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsge(vm, vs2, rs1, vl); } -vbool4_t test_vmsge_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmsge(mask, op1, op2, vl); +vbool4_t test_vmsge_vv_i32m8_b4_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmsge(vm, vs2, vs1, vl); } -vbool4_t test_vmsge_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmsge(mask, op1, op2, vl); +vbool4_t test_vmsge_vx_i32m8_b4_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsge(vm, vs2, rs1, vl); } -vbool64_t test_vmsge_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmsge(mask, op1, op2, vl); +vbool64_t test_vmsge_vv_i64m1_b64_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmsge(vm, vs2, vs1, vl); } -vbool64_t test_vmsge_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmsge(mask, op1, op2, vl); +vbool64_t test_vmsge_vx_i64m1_b64_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsge(vm, vs2, rs1, vl); } -vbool32_t test_vmsge_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmsge(mask, op1, op2, vl); +vbool32_t test_vmsge_vv_i64m2_b32_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmsge(vm, vs2, vs1, vl); } -vbool32_t test_vmsge_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmsge(mask, op1, op2, vl); +vbool32_t test_vmsge_vx_i64m2_b32_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsge(vm, vs2, rs1, vl); } -vbool16_t test_vmsge_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmsge(mask, op1, op2, vl); +vbool16_t test_vmsge_vv_i64m4_b16_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmsge(vm, vs2, vs1, vl); } -vbool16_t test_vmsge_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmsge(mask, op1, op2, vl); +vbool16_t test_vmsge_vx_i64m4_b16_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsge(vm, vs2, rs1, vl); } -vbool8_t test_vmsge_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmsge(mask, op1, op2, vl); +vbool8_t test_vmsge_vv_i64m8_b8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmsge(vm, vs2, vs1, vl); } -vbool8_t test_vmsge_vx_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmsge(mask, op1, op2, vl); +vbool8_t test_vmsge_vx_i64m8_b8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsge(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vms[gl][et]\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vmsgeu.c b/auto-generated/gnu-overloaded-tests/vmsgeu.c index 14eb301dc..720840467 100644 --- a/auto-generated/gnu-overloaded-tests/vmsgeu.c +++ b/auto-generated/gnu-overloaded-tests/vmsgeu.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmsgeu_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmsgeu(op1, op2, vl); +vbool64_t test_vmsgeu_vv_u8mf8_b64(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmsgeu(vs2, vs1, vl); } -vbool64_t test_vmsgeu_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgeu(op1, op2, vl); +vbool64_t test_vmsgeu_vx_u8mf8_b64(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgeu(vs2, rs1, vl); } -vbool32_t test_vmsgeu_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmsgeu(op1, op2, vl); +vbool32_t test_vmsgeu_vv_u8mf4_b32(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmsgeu(vs2, vs1, vl); } -vbool32_t test_vmsgeu_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgeu(op1, op2, vl); +vbool32_t test_vmsgeu_vx_u8mf4_b32(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgeu(vs2, rs1, vl); } -vbool16_t test_vmsgeu_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmsgeu(op1, op2, vl); +vbool16_t test_vmsgeu_vv_u8mf2_b16(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmsgeu(vs2, vs1, vl); } -vbool16_t test_vmsgeu_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgeu(op1, op2, vl); +vbool16_t test_vmsgeu_vx_u8mf2_b16(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgeu(vs2, rs1, vl); } -vbool8_t test_vmsgeu_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmsgeu(op1, op2, vl); +vbool8_t test_vmsgeu_vv_u8m1_b8(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmsgeu(vs2, vs1, vl); } -vbool8_t test_vmsgeu_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgeu(op1, op2, vl); +vbool8_t test_vmsgeu_vx_u8m1_b8(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgeu(vs2, rs1, vl); } -vbool4_t test_vmsgeu_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmsgeu(op1, op2, vl); +vbool4_t test_vmsgeu_vv_u8m2_b4(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmsgeu(vs2, vs1, vl); } -vbool4_t test_vmsgeu_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgeu(op1, op2, vl); +vbool4_t test_vmsgeu_vx_u8m2_b4(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgeu(vs2, rs1, vl); } -vbool2_t test_vmsgeu_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmsgeu(op1, op2, vl); +vbool2_t test_vmsgeu_vv_u8m4_b2(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmsgeu(vs2, vs1, vl); } -vbool2_t test_vmsgeu_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgeu(op1, op2, vl); +vbool2_t test_vmsgeu_vx_u8m4_b2(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgeu(vs2, rs1, vl); } -vbool1_t test_vmsgeu_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmsgeu(op1, op2, vl); +vbool1_t test_vmsgeu_vv_u8m8_b1(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmsgeu(vs2, vs1, vl); } -vbool1_t test_vmsgeu_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgeu(op1, op2, vl); +vbool1_t test_vmsgeu_vx_u8m8_b1(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgeu(vs2, rs1, vl); } -vbool64_t test_vmsgeu_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmsgeu(op1, op2, vl); +vbool64_t test_vmsgeu_vv_u16mf4_b64(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmsgeu(vs2, vs1, vl); } -vbool64_t test_vmsgeu_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgeu(op1, op2, vl); +vbool64_t test_vmsgeu_vx_u16mf4_b64(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgeu(vs2, rs1, vl); } -vbool32_t test_vmsgeu_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmsgeu(op1, op2, vl); +vbool32_t test_vmsgeu_vv_u16mf2_b32(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmsgeu(vs2, vs1, vl); } -vbool32_t test_vmsgeu_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgeu(op1, op2, vl); +vbool32_t test_vmsgeu_vx_u16mf2_b32(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgeu(vs2, rs1, vl); } -vbool16_t test_vmsgeu_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmsgeu(op1, op2, vl); +vbool16_t test_vmsgeu_vv_u16m1_b16(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmsgeu(vs2, vs1, vl); } -vbool16_t test_vmsgeu_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgeu(op1, op2, vl); +vbool16_t test_vmsgeu_vx_u16m1_b16(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgeu(vs2, rs1, vl); } -vbool8_t test_vmsgeu_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmsgeu(op1, op2, vl); +vbool8_t test_vmsgeu_vv_u16m2_b8(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmsgeu(vs2, vs1, vl); } -vbool8_t test_vmsgeu_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgeu(op1, op2, vl); +vbool8_t test_vmsgeu_vx_u16m2_b8(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgeu(vs2, rs1, vl); } -vbool4_t test_vmsgeu_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmsgeu(op1, op2, vl); +vbool4_t test_vmsgeu_vv_u16m4_b4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmsgeu(vs2, vs1, vl); } -vbool4_t test_vmsgeu_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgeu(op1, op2, vl); +vbool4_t test_vmsgeu_vx_u16m4_b4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgeu(vs2, rs1, vl); } -vbool2_t test_vmsgeu_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmsgeu(op1, op2, vl); +vbool2_t test_vmsgeu_vv_u16m8_b2(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmsgeu(vs2, vs1, vl); } -vbool2_t test_vmsgeu_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgeu(op1, op2, vl); +vbool2_t test_vmsgeu_vx_u16m8_b2(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgeu(vs2, rs1, vl); } -vbool64_t test_vmsgeu_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmsgeu(op1, op2, vl); +vbool64_t test_vmsgeu_vv_u32mf2_b64(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmsgeu(vs2, vs1, vl); } -vbool64_t test_vmsgeu_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgeu(op1, op2, vl); +vbool64_t test_vmsgeu_vx_u32mf2_b64(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgeu(vs2, rs1, vl); } -vbool32_t test_vmsgeu_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmsgeu(op1, op2, vl); +vbool32_t test_vmsgeu_vv_u32m1_b32(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmsgeu(vs2, vs1, vl); } -vbool32_t test_vmsgeu_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgeu(op1, op2, vl); +vbool32_t test_vmsgeu_vx_u32m1_b32(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgeu(vs2, rs1, vl); } -vbool16_t test_vmsgeu_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmsgeu(op1, op2, vl); +vbool16_t test_vmsgeu_vv_u32m2_b16(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmsgeu(vs2, vs1, vl); } -vbool16_t test_vmsgeu_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgeu(op1, op2, vl); +vbool16_t test_vmsgeu_vx_u32m2_b16(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgeu(vs2, rs1, vl); } -vbool8_t test_vmsgeu_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmsgeu(op1, op2, vl); +vbool8_t test_vmsgeu_vv_u32m4_b8(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmsgeu(vs2, vs1, vl); } -vbool8_t test_vmsgeu_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgeu(op1, op2, vl); +vbool8_t test_vmsgeu_vx_u32m4_b8(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgeu(vs2, rs1, vl); } -vbool4_t test_vmsgeu_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmsgeu(op1, op2, vl); +vbool4_t test_vmsgeu_vv_u32m8_b4(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmsgeu(vs2, vs1, vl); } -vbool4_t test_vmsgeu_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgeu(op1, op2, vl); +vbool4_t test_vmsgeu_vx_u32m8_b4(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgeu(vs2, rs1, vl); } -vbool64_t test_vmsgeu_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmsgeu(op1, op2, vl); +vbool64_t test_vmsgeu_vv_u64m1_b64(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmsgeu(vs2, vs1, vl); } -vbool64_t test_vmsgeu_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgeu(op1, op2, vl); +vbool64_t test_vmsgeu_vx_u64m1_b64(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgeu(vs2, rs1, vl); } -vbool32_t test_vmsgeu_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmsgeu(op1, op2, vl); +vbool32_t test_vmsgeu_vv_u64m2_b32(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmsgeu(vs2, vs1, vl); } -vbool32_t test_vmsgeu_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgeu(op1, op2, vl); +vbool32_t test_vmsgeu_vx_u64m2_b32(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgeu(vs2, rs1, vl); } -vbool16_t test_vmsgeu_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmsgeu(op1, op2, vl); +vbool16_t test_vmsgeu_vv_u64m4_b16(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmsgeu(vs2, vs1, vl); } -vbool16_t test_vmsgeu_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgeu(op1, op2, vl); +vbool16_t test_vmsgeu_vx_u64m4_b16(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgeu(vs2, rs1, vl); } -vbool8_t test_vmsgeu_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmsgeu(op1, op2, vl); +vbool8_t test_vmsgeu_vv_u64m8_b8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmsgeu(vs2, vs1, vl); } -vbool8_t test_vmsgeu_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgeu(op1, op2, vl); +vbool8_t test_vmsgeu_vx_u64m8_b8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgeu(vs2, rs1, vl); } -vbool64_t test_vmsgeu_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmsgeu(mask, op1, op2, vl); +vbool64_t test_vmsgeu_vv_u8mf8_b64_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmsgeu(vm, vs2, vs1, vl); } -vbool64_t test_vmsgeu_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgeu(mask, op1, op2, vl); +vbool64_t test_vmsgeu_vx_u8mf8_b64_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgeu(vm, vs2, rs1, vl); } -vbool32_t test_vmsgeu_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmsgeu(mask, op1, op2, vl); +vbool32_t test_vmsgeu_vv_u8mf4_b32_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmsgeu(vm, vs2, vs1, vl); } -vbool32_t test_vmsgeu_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgeu(mask, op1, op2, vl); +vbool32_t test_vmsgeu_vx_u8mf4_b32_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgeu(vm, vs2, rs1, vl); } -vbool16_t test_vmsgeu_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmsgeu(mask, op1, op2, vl); +vbool16_t test_vmsgeu_vv_u8mf2_b16_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmsgeu(vm, vs2, vs1, vl); } -vbool16_t test_vmsgeu_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgeu(mask, op1, op2, vl); +vbool16_t test_vmsgeu_vx_u8mf2_b16_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgeu(vm, vs2, rs1, vl); } -vbool8_t test_vmsgeu_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmsgeu(mask, op1, op2, vl); +vbool8_t test_vmsgeu_vv_u8m1_b8_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmsgeu(vm, vs2, vs1, vl); } -vbool8_t test_vmsgeu_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgeu(mask, op1, op2, vl); +vbool8_t test_vmsgeu_vx_u8m1_b8_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgeu(vm, vs2, rs1, vl); } -vbool4_t test_vmsgeu_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmsgeu(mask, op1, op2, vl); +vbool4_t test_vmsgeu_vv_u8m2_b4_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmsgeu(vm, vs2, vs1, vl); } -vbool4_t test_vmsgeu_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgeu(mask, op1, op2, vl); +vbool4_t test_vmsgeu_vx_u8m2_b4_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgeu(vm, vs2, rs1, vl); } -vbool2_t test_vmsgeu_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmsgeu(mask, op1, op2, vl); +vbool2_t test_vmsgeu_vv_u8m4_b2_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmsgeu(vm, vs2, vs1, vl); } -vbool2_t test_vmsgeu_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgeu(mask, op1, op2, vl); +vbool2_t test_vmsgeu_vx_u8m4_b2_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgeu(vm, vs2, rs1, vl); } -vbool1_t test_vmsgeu_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmsgeu(mask, op1, op2, vl); +vbool1_t test_vmsgeu_vv_u8m8_b1_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmsgeu(vm, vs2, vs1, vl); } -vbool1_t test_vmsgeu_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgeu(mask, op1, op2, vl); +vbool1_t test_vmsgeu_vx_u8m8_b1_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgeu(vm, vs2, rs1, vl); } -vbool64_t test_vmsgeu_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmsgeu(mask, op1, op2, vl); +vbool64_t test_vmsgeu_vv_u16mf4_b64_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmsgeu(vm, vs2, vs1, vl); } -vbool64_t test_vmsgeu_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgeu(mask, op1, op2, vl); +vbool64_t test_vmsgeu_vx_u16mf4_b64_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgeu(vm, vs2, rs1, vl); } -vbool32_t test_vmsgeu_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmsgeu(mask, op1, op2, vl); +vbool32_t test_vmsgeu_vv_u16mf2_b32_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmsgeu(vm, vs2, vs1, vl); } -vbool32_t test_vmsgeu_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgeu(mask, op1, op2, vl); +vbool32_t test_vmsgeu_vx_u16mf2_b32_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgeu(vm, vs2, rs1, vl); } -vbool16_t test_vmsgeu_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmsgeu(mask, op1, op2, vl); +vbool16_t test_vmsgeu_vv_u16m1_b16_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmsgeu(vm, vs2, vs1, vl); } -vbool16_t test_vmsgeu_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgeu(mask, op1, op2, vl); +vbool16_t test_vmsgeu_vx_u16m1_b16_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgeu(vm, vs2, rs1, vl); } -vbool8_t test_vmsgeu_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmsgeu(mask, op1, op2, vl); +vbool8_t test_vmsgeu_vv_u16m2_b8_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmsgeu(vm, vs2, vs1, vl); } -vbool8_t test_vmsgeu_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgeu(mask, op1, op2, vl); +vbool8_t test_vmsgeu_vx_u16m2_b8_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgeu(vm, vs2, rs1, vl); } -vbool4_t test_vmsgeu_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmsgeu(mask, op1, op2, vl); +vbool4_t test_vmsgeu_vv_u16m4_b4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmsgeu(vm, vs2, vs1, vl); } -vbool4_t test_vmsgeu_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgeu(mask, op1, op2, vl); +vbool4_t test_vmsgeu_vx_u16m4_b4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgeu(vm, vs2, rs1, vl); } -vbool2_t test_vmsgeu_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmsgeu(mask, op1, op2, vl); +vbool2_t test_vmsgeu_vv_u16m8_b2_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmsgeu(vm, vs2, vs1, vl); } -vbool2_t test_vmsgeu_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgeu(mask, op1, op2, vl); +vbool2_t test_vmsgeu_vx_u16m8_b2_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgeu(vm, vs2, rs1, vl); } -vbool64_t test_vmsgeu_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmsgeu(mask, op1, op2, vl); +vbool64_t test_vmsgeu_vv_u32mf2_b64_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmsgeu(vm, vs2, vs1, vl); } -vbool64_t test_vmsgeu_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgeu(mask, op1, op2, vl); +vbool64_t test_vmsgeu_vx_u32mf2_b64_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgeu(vm, vs2, rs1, vl); } -vbool32_t test_vmsgeu_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmsgeu(mask, op1, op2, vl); +vbool32_t test_vmsgeu_vv_u32m1_b32_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmsgeu(vm, vs2, vs1, vl); } -vbool32_t test_vmsgeu_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgeu(mask, op1, op2, vl); +vbool32_t test_vmsgeu_vx_u32m1_b32_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgeu(vm, vs2, rs1, vl); } -vbool16_t test_vmsgeu_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmsgeu(mask, op1, op2, vl); +vbool16_t test_vmsgeu_vv_u32m2_b16_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmsgeu(vm, vs2, vs1, vl); } -vbool16_t test_vmsgeu_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgeu(mask, op1, op2, vl); +vbool16_t test_vmsgeu_vx_u32m2_b16_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgeu(vm, vs2, rs1, vl); } -vbool8_t test_vmsgeu_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmsgeu(mask, op1, op2, vl); +vbool8_t test_vmsgeu_vv_u32m4_b8_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmsgeu(vm, vs2, vs1, vl); } -vbool8_t test_vmsgeu_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgeu(mask, op1, op2, vl); +vbool8_t test_vmsgeu_vx_u32m4_b8_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgeu(vm, vs2, rs1, vl); } -vbool4_t test_vmsgeu_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmsgeu(mask, op1, op2, vl); +vbool4_t test_vmsgeu_vv_u32m8_b4_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmsgeu(vm, vs2, vs1, vl); } -vbool4_t test_vmsgeu_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgeu(mask, op1, op2, vl); +vbool4_t test_vmsgeu_vx_u32m8_b4_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgeu(vm, vs2, rs1, vl); } -vbool64_t test_vmsgeu_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmsgeu(mask, op1, op2, vl); +vbool64_t test_vmsgeu_vv_u64m1_b64_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmsgeu(vm, vs2, vs1, vl); } -vbool64_t test_vmsgeu_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgeu(mask, op1, op2, vl); +vbool64_t test_vmsgeu_vx_u64m1_b64_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgeu(vm, vs2, rs1, vl); } -vbool32_t test_vmsgeu_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmsgeu(mask, op1, op2, vl); +vbool32_t test_vmsgeu_vv_u64m2_b32_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmsgeu(vm, vs2, vs1, vl); } -vbool32_t test_vmsgeu_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgeu(mask, op1, op2, vl); +vbool32_t test_vmsgeu_vx_u64m2_b32_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgeu(vm, vs2, rs1, vl); } -vbool16_t test_vmsgeu_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmsgeu(mask, op1, op2, vl); +vbool16_t test_vmsgeu_vv_u64m4_b16_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmsgeu(vm, vs2, vs1, vl); } -vbool16_t test_vmsgeu_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgeu(mask, op1, op2, vl); +vbool16_t test_vmsgeu_vx_u64m4_b16_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgeu(vm, vs2, rs1, vl); } -vbool8_t test_vmsgeu_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmsgeu(mask, op1, op2, vl); +vbool8_t test_vmsgeu_vv_u64m8_b8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmsgeu(vm, vs2, vs1, vl); } -vbool8_t test_vmsgeu_vx_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgeu(mask, op1, op2, vl); +vbool8_t test_vmsgeu_vx_u64m8_b8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgeu(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vms[gl][et]u\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vmsgt.c b/auto-generated/gnu-overloaded-tests/vmsgt.c index 26f79e882..208d45613 100644 --- a/auto-generated/gnu-overloaded-tests/vmsgt.c +++ b/auto-generated/gnu-overloaded-tests/vmsgt.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmsgt_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmsgt(op1, op2, vl); +vbool64_t test_vmsgt_vv_i8mf8_b64(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmsgt(vs2, vs1, vl); } -vbool64_t test_vmsgt_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsgt(op1, op2, vl); +vbool64_t test_vmsgt_vx_i8mf8_b64(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsgt(vs2, rs1, vl); } -vbool32_t test_vmsgt_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmsgt(op1, op2, vl); +vbool32_t test_vmsgt_vv_i8mf4_b32(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmsgt(vs2, vs1, vl); } -vbool32_t test_vmsgt_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsgt(op1, op2, vl); +vbool32_t test_vmsgt_vx_i8mf4_b32(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsgt(vs2, rs1, vl); } -vbool16_t test_vmsgt_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmsgt(op1, op2, vl); +vbool16_t test_vmsgt_vv_i8mf2_b16(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmsgt(vs2, vs1, vl); } -vbool16_t test_vmsgt_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsgt(op1, op2, vl); +vbool16_t test_vmsgt_vx_i8mf2_b16(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsgt(vs2, rs1, vl); } -vbool8_t test_vmsgt_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmsgt(op1, op2, vl); +vbool8_t test_vmsgt_vv_i8m1_b8(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmsgt(vs2, vs1, vl); } -vbool8_t test_vmsgt_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmsgt(op1, op2, vl); +vbool8_t test_vmsgt_vx_i8m1_b8(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsgt(vs2, rs1, vl); } -vbool4_t test_vmsgt_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmsgt(op1, op2, vl); +vbool4_t test_vmsgt_vv_i8m2_b4(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmsgt(vs2, vs1, vl); } -vbool4_t test_vmsgt_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsgt(op1, op2, vl); +vbool4_t test_vmsgt_vx_i8m2_b4(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsgt(vs2, rs1, vl); } -vbool2_t test_vmsgt_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmsgt(op1, op2, vl); +vbool2_t test_vmsgt_vv_i8m4_b2(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmsgt(vs2, vs1, vl); } -vbool2_t test_vmsgt_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsgt(op1, op2, vl); +vbool2_t test_vmsgt_vx_i8m4_b2(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsgt(vs2, rs1, vl); } -vbool1_t test_vmsgt_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmsgt(op1, op2, vl); +vbool1_t test_vmsgt_vv_i8m8_b1(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmsgt(vs2, vs1, vl); } -vbool1_t test_vmsgt_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsgt(op1, op2, vl); +vbool1_t test_vmsgt_vx_i8m8_b1(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsgt(vs2, rs1, vl); } -vbool64_t test_vmsgt_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmsgt(op1, op2, vl); +vbool64_t test_vmsgt_vv_i16mf4_b64(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmsgt(vs2, vs1, vl); } -vbool64_t test_vmsgt_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsgt(op1, op2, vl); +vbool64_t test_vmsgt_vx_i16mf4_b64(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsgt(vs2, rs1, vl); } -vbool32_t test_vmsgt_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmsgt(op1, op2, vl); +vbool32_t test_vmsgt_vv_i16mf2_b32(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmsgt(vs2, vs1, vl); } -vbool32_t test_vmsgt_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsgt(op1, op2, vl); +vbool32_t test_vmsgt_vx_i16mf2_b32(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsgt(vs2, rs1, vl); } -vbool16_t test_vmsgt_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmsgt(op1, op2, vl); +vbool16_t test_vmsgt_vv_i16m1_b16(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmsgt(vs2, vs1, vl); } -vbool16_t test_vmsgt_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmsgt(op1, op2, vl); +vbool16_t test_vmsgt_vx_i16m1_b16(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsgt(vs2, rs1, vl); } -vbool8_t test_vmsgt_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmsgt(op1, op2, vl); +vbool8_t test_vmsgt_vv_i16m2_b8(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmsgt(vs2, vs1, vl); } -vbool8_t test_vmsgt_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsgt(op1, op2, vl); +vbool8_t test_vmsgt_vx_i16m2_b8(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsgt(vs2, rs1, vl); } -vbool4_t test_vmsgt_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmsgt(op1, op2, vl); +vbool4_t test_vmsgt_vv_i16m4_b4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmsgt(vs2, vs1, vl); } -vbool4_t test_vmsgt_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsgt(op1, op2, vl); +vbool4_t test_vmsgt_vx_i16m4_b4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsgt(vs2, rs1, vl); } -vbool2_t test_vmsgt_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmsgt(op1, op2, vl); +vbool2_t test_vmsgt_vv_i16m8_b2(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmsgt(vs2, vs1, vl); } -vbool2_t test_vmsgt_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmsgt(op1, op2, vl); +vbool2_t test_vmsgt_vx_i16m8_b2(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsgt(vs2, rs1, vl); } -vbool64_t test_vmsgt_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmsgt(op1, op2, vl); +vbool64_t test_vmsgt_vv_i32mf2_b64(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmsgt(vs2, vs1, vl); } -vbool64_t test_vmsgt_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsgt(op1, op2, vl); +vbool64_t test_vmsgt_vx_i32mf2_b64(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsgt(vs2, rs1, vl); } -vbool32_t test_vmsgt_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmsgt(op1, op2, vl); +vbool32_t test_vmsgt_vv_i32m1_b32(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmsgt(vs2, vs1, vl); } -vbool32_t test_vmsgt_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmsgt(op1, op2, vl); +vbool32_t test_vmsgt_vx_i32m1_b32(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsgt(vs2, rs1, vl); } -vbool16_t test_vmsgt_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmsgt(op1, op2, vl); +vbool16_t test_vmsgt_vv_i32m2_b16(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmsgt(vs2, vs1, vl); } -vbool16_t test_vmsgt_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsgt(op1, op2, vl); +vbool16_t test_vmsgt_vx_i32m2_b16(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsgt(vs2, rs1, vl); } -vbool8_t test_vmsgt_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmsgt(op1, op2, vl); +vbool8_t test_vmsgt_vv_i32m4_b8(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmsgt(vs2, vs1, vl); } -vbool8_t test_vmsgt_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmsgt(op1, op2, vl); +vbool8_t test_vmsgt_vx_i32m4_b8(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsgt(vs2, rs1, vl); } -vbool4_t test_vmsgt_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmsgt(op1, op2, vl); +vbool4_t test_vmsgt_vv_i32m8_b4(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmsgt(vs2, vs1, vl); } -vbool4_t test_vmsgt_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmsgt(op1, op2, vl); +vbool4_t test_vmsgt_vx_i32m8_b4(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsgt(vs2, rs1, vl); } -vbool64_t test_vmsgt_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmsgt(op1, op2, vl); +vbool64_t test_vmsgt_vv_i64m1_b64(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmsgt(vs2, vs1, vl); } -vbool64_t test_vmsgt_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmsgt(op1, op2, vl); +vbool64_t test_vmsgt_vx_i64m1_b64(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsgt(vs2, rs1, vl); } -vbool32_t test_vmsgt_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmsgt(op1, op2, vl); +vbool32_t test_vmsgt_vv_i64m2_b32(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmsgt(vs2, vs1, vl); } -vbool32_t test_vmsgt_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmsgt(op1, op2, vl); +vbool32_t test_vmsgt_vx_i64m2_b32(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsgt(vs2, rs1, vl); } -vbool16_t test_vmsgt_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmsgt(op1, op2, vl); +vbool16_t test_vmsgt_vv_i64m4_b16(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmsgt(vs2, vs1, vl); } -vbool16_t test_vmsgt_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmsgt(op1, op2, vl); +vbool16_t test_vmsgt_vx_i64m4_b16(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsgt(vs2, rs1, vl); } -vbool8_t test_vmsgt_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmsgt(op1, op2, vl); +vbool8_t test_vmsgt_vv_i64m8_b8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmsgt(vs2, vs1, vl); } -vbool8_t test_vmsgt_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmsgt(op1, op2, vl); +vbool8_t test_vmsgt_vx_i64m8_b8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsgt(vs2, rs1, vl); } -vbool64_t test_vmsgt_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmsgt(mask, op1, op2, vl); +vbool64_t test_vmsgt_vv_i8mf8_b64_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmsgt(vm, vs2, vs1, vl); } -vbool64_t test_vmsgt_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsgt(mask, op1, op2, vl); +vbool64_t test_vmsgt_vx_i8mf8_b64_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsgt(vm, vs2, rs1, vl); } -vbool32_t test_vmsgt_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmsgt(mask, op1, op2, vl); +vbool32_t test_vmsgt_vv_i8mf4_b32_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmsgt(vm, vs2, vs1, vl); } -vbool32_t test_vmsgt_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsgt(mask, op1, op2, vl); +vbool32_t test_vmsgt_vx_i8mf4_b32_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsgt(vm, vs2, rs1, vl); } -vbool16_t test_vmsgt_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmsgt(mask, op1, op2, vl); +vbool16_t test_vmsgt_vv_i8mf2_b16_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmsgt(vm, vs2, vs1, vl); } -vbool16_t test_vmsgt_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsgt(mask, op1, op2, vl); +vbool16_t test_vmsgt_vx_i8mf2_b16_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsgt(vm, vs2, rs1, vl); } -vbool8_t test_vmsgt_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmsgt(mask, op1, op2, vl); +vbool8_t test_vmsgt_vv_i8m1_b8_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmsgt(vm, vs2, vs1, vl); } -vbool8_t test_vmsgt_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmsgt(mask, op1, op2, vl); +vbool8_t test_vmsgt_vx_i8m1_b8_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsgt(vm, vs2, rs1, vl); } -vbool4_t test_vmsgt_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmsgt(mask, op1, op2, vl); +vbool4_t test_vmsgt_vv_i8m2_b4_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmsgt(vm, vs2, vs1, vl); } -vbool4_t test_vmsgt_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsgt(mask, op1, op2, vl); +vbool4_t test_vmsgt_vx_i8m2_b4_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsgt(vm, vs2, rs1, vl); } -vbool2_t test_vmsgt_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmsgt(mask, op1, op2, vl); +vbool2_t test_vmsgt_vv_i8m4_b2_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmsgt(vm, vs2, vs1, vl); } -vbool2_t test_vmsgt_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsgt(mask, op1, op2, vl); +vbool2_t test_vmsgt_vx_i8m4_b2_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsgt(vm, vs2, rs1, vl); } -vbool1_t test_vmsgt_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmsgt(mask, op1, op2, vl); +vbool1_t test_vmsgt_vv_i8m8_b1_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmsgt(vm, vs2, vs1, vl); } -vbool1_t test_vmsgt_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsgt(mask, op1, op2, vl); +vbool1_t test_vmsgt_vx_i8m8_b1_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsgt(vm, vs2, rs1, vl); } -vbool64_t test_vmsgt_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmsgt(mask, op1, op2, vl); +vbool64_t test_vmsgt_vv_i16mf4_b64_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmsgt(vm, vs2, vs1, vl); } -vbool64_t test_vmsgt_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsgt(mask, op1, op2, vl); +vbool64_t test_vmsgt_vx_i16mf4_b64_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsgt(vm, vs2, rs1, vl); } -vbool32_t test_vmsgt_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmsgt(mask, op1, op2, vl); +vbool32_t test_vmsgt_vv_i16mf2_b32_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmsgt(vm, vs2, vs1, vl); } -vbool32_t test_vmsgt_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsgt(mask, op1, op2, vl); +vbool32_t test_vmsgt_vx_i16mf2_b32_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsgt(vm, vs2, rs1, vl); } -vbool16_t test_vmsgt_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmsgt(mask, op1, op2, vl); +vbool16_t test_vmsgt_vv_i16m1_b16_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmsgt(vm, vs2, vs1, vl); } -vbool16_t test_vmsgt_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmsgt(mask, op1, op2, vl); +vbool16_t test_vmsgt_vx_i16m1_b16_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsgt(vm, vs2, rs1, vl); } -vbool8_t test_vmsgt_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmsgt(mask, op1, op2, vl); +vbool8_t test_vmsgt_vv_i16m2_b8_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmsgt(vm, vs2, vs1, vl); } -vbool8_t test_vmsgt_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsgt(mask, op1, op2, vl); +vbool8_t test_vmsgt_vx_i16m2_b8_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsgt(vm, vs2, rs1, vl); } -vbool4_t test_vmsgt_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmsgt(mask, op1, op2, vl); +vbool4_t test_vmsgt_vv_i16m4_b4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmsgt(vm, vs2, vs1, vl); } -vbool4_t test_vmsgt_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsgt(mask, op1, op2, vl); +vbool4_t test_vmsgt_vx_i16m4_b4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsgt(vm, vs2, rs1, vl); } -vbool2_t test_vmsgt_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmsgt(mask, op1, op2, vl); +vbool2_t test_vmsgt_vv_i16m8_b2_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmsgt(vm, vs2, vs1, vl); } -vbool2_t test_vmsgt_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmsgt(mask, op1, op2, vl); +vbool2_t test_vmsgt_vx_i16m8_b2_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsgt(vm, vs2, rs1, vl); } -vbool64_t test_vmsgt_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmsgt(mask, op1, op2, vl); +vbool64_t test_vmsgt_vv_i32mf2_b64_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmsgt(vm, vs2, vs1, vl); } -vbool64_t test_vmsgt_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsgt(mask, op1, op2, vl); +vbool64_t test_vmsgt_vx_i32mf2_b64_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsgt(vm, vs2, rs1, vl); } -vbool32_t test_vmsgt_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmsgt(mask, op1, op2, vl); +vbool32_t test_vmsgt_vv_i32m1_b32_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmsgt(vm, vs2, vs1, vl); } -vbool32_t test_vmsgt_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmsgt(mask, op1, op2, vl); +vbool32_t test_vmsgt_vx_i32m1_b32_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsgt(vm, vs2, rs1, vl); } -vbool16_t test_vmsgt_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmsgt(mask, op1, op2, vl); +vbool16_t test_vmsgt_vv_i32m2_b16_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmsgt(vm, vs2, vs1, vl); } -vbool16_t test_vmsgt_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsgt(mask, op1, op2, vl); +vbool16_t test_vmsgt_vx_i32m2_b16_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsgt(vm, vs2, rs1, vl); } -vbool8_t test_vmsgt_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmsgt(mask, op1, op2, vl); +vbool8_t test_vmsgt_vv_i32m4_b8_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmsgt(vm, vs2, vs1, vl); } -vbool8_t test_vmsgt_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmsgt(mask, op1, op2, vl); +vbool8_t test_vmsgt_vx_i32m4_b8_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsgt(vm, vs2, rs1, vl); } -vbool4_t test_vmsgt_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmsgt(mask, op1, op2, vl); +vbool4_t test_vmsgt_vv_i32m8_b4_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmsgt(vm, vs2, vs1, vl); } -vbool4_t test_vmsgt_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmsgt(mask, op1, op2, vl); +vbool4_t test_vmsgt_vx_i32m8_b4_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsgt(vm, vs2, rs1, vl); } -vbool64_t test_vmsgt_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmsgt(mask, op1, op2, vl); +vbool64_t test_vmsgt_vv_i64m1_b64_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmsgt(vm, vs2, vs1, vl); } -vbool64_t test_vmsgt_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmsgt(mask, op1, op2, vl); +vbool64_t test_vmsgt_vx_i64m1_b64_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsgt(vm, vs2, rs1, vl); } -vbool32_t test_vmsgt_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmsgt(mask, op1, op2, vl); +vbool32_t test_vmsgt_vv_i64m2_b32_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmsgt(vm, vs2, vs1, vl); } -vbool32_t test_vmsgt_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmsgt(mask, op1, op2, vl); +vbool32_t test_vmsgt_vx_i64m2_b32_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsgt(vm, vs2, rs1, vl); } -vbool16_t test_vmsgt_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmsgt(mask, op1, op2, vl); +vbool16_t test_vmsgt_vv_i64m4_b16_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmsgt(vm, vs2, vs1, vl); } -vbool16_t test_vmsgt_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmsgt(mask, op1, op2, vl); +vbool16_t test_vmsgt_vx_i64m4_b16_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsgt(vm, vs2, rs1, vl); } -vbool8_t test_vmsgt_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmsgt(mask, op1, op2, vl); +vbool8_t test_vmsgt_vv_i64m8_b8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmsgt(vm, vs2, vs1, vl); } -vbool8_t test_vmsgt_vx_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmsgt(mask, op1, op2, vl); +vbool8_t test_vmsgt_vx_i64m8_b8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsgt(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vmsgt\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vmsgtu.c b/auto-generated/gnu-overloaded-tests/vmsgtu.c index ddf1a4fbb..0be40ff4f 100644 --- a/auto-generated/gnu-overloaded-tests/vmsgtu.c +++ b/auto-generated/gnu-overloaded-tests/vmsgtu.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmsgtu_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmsgtu(op1, op2, vl); +vbool64_t test_vmsgtu_vv_u8mf8_b64(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmsgtu(vs2, vs1, vl); } -vbool64_t test_vmsgtu_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgtu(op1, op2, vl); +vbool64_t test_vmsgtu_vx_u8mf8_b64(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgtu(vs2, rs1, vl); } -vbool32_t test_vmsgtu_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmsgtu(op1, op2, vl); +vbool32_t test_vmsgtu_vv_u8mf4_b32(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmsgtu(vs2, vs1, vl); } -vbool32_t test_vmsgtu_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgtu(op1, op2, vl); +vbool32_t test_vmsgtu_vx_u8mf4_b32(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgtu(vs2, rs1, vl); } -vbool16_t test_vmsgtu_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmsgtu(op1, op2, vl); +vbool16_t test_vmsgtu_vv_u8mf2_b16(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmsgtu(vs2, vs1, vl); } -vbool16_t test_vmsgtu_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgtu(op1, op2, vl); +vbool16_t test_vmsgtu_vx_u8mf2_b16(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgtu(vs2, rs1, vl); } -vbool8_t test_vmsgtu_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmsgtu(op1, op2, vl); +vbool8_t test_vmsgtu_vv_u8m1_b8(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmsgtu(vs2, vs1, vl); } -vbool8_t test_vmsgtu_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgtu(op1, op2, vl); +vbool8_t test_vmsgtu_vx_u8m1_b8(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgtu(vs2, rs1, vl); } -vbool4_t test_vmsgtu_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmsgtu(op1, op2, vl); +vbool4_t test_vmsgtu_vv_u8m2_b4(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmsgtu(vs2, vs1, vl); } -vbool4_t test_vmsgtu_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgtu(op1, op2, vl); +vbool4_t test_vmsgtu_vx_u8m2_b4(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgtu(vs2, rs1, vl); } -vbool2_t test_vmsgtu_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmsgtu(op1, op2, vl); +vbool2_t test_vmsgtu_vv_u8m4_b2(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmsgtu(vs2, vs1, vl); } -vbool2_t test_vmsgtu_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgtu(op1, op2, vl); +vbool2_t test_vmsgtu_vx_u8m4_b2(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgtu(vs2, rs1, vl); } -vbool1_t test_vmsgtu_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmsgtu(op1, op2, vl); +vbool1_t test_vmsgtu_vv_u8m8_b1(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmsgtu(vs2, vs1, vl); } -vbool1_t test_vmsgtu_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgtu(op1, op2, vl); +vbool1_t test_vmsgtu_vx_u8m8_b1(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgtu(vs2, rs1, vl); } -vbool64_t test_vmsgtu_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmsgtu(op1, op2, vl); +vbool64_t test_vmsgtu_vv_u16mf4_b64(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmsgtu(vs2, vs1, vl); } -vbool64_t test_vmsgtu_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgtu(op1, op2, vl); +vbool64_t test_vmsgtu_vx_u16mf4_b64(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgtu(vs2, rs1, vl); } -vbool32_t test_vmsgtu_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmsgtu(op1, op2, vl); +vbool32_t test_vmsgtu_vv_u16mf2_b32(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmsgtu(vs2, vs1, vl); } -vbool32_t test_vmsgtu_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgtu(op1, op2, vl); +vbool32_t test_vmsgtu_vx_u16mf2_b32(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgtu(vs2, rs1, vl); } -vbool16_t test_vmsgtu_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmsgtu(op1, op2, vl); +vbool16_t test_vmsgtu_vv_u16m1_b16(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmsgtu(vs2, vs1, vl); } -vbool16_t test_vmsgtu_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgtu(op1, op2, vl); +vbool16_t test_vmsgtu_vx_u16m1_b16(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgtu(vs2, rs1, vl); } -vbool8_t test_vmsgtu_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmsgtu(op1, op2, vl); +vbool8_t test_vmsgtu_vv_u16m2_b8(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmsgtu(vs2, vs1, vl); } -vbool8_t test_vmsgtu_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgtu(op1, op2, vl); +vbool8_t test_vmsgtu_vx_u16m2_b8(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgtu(vs2, rs1, vl); } -vbool4_t test_vmsgtu_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmsgtu(op1, op2, vl); +vbool4_t test_vmsgtu_vv_u16m4_b4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmsgtu(vs2, vs1, vl); } -vbool4_t test_vmsgtu_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgtu(op1, op2, vl); +vbool4_t test_vmsgtu_vx_u16m4_b4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgtu(vs2, rs1, vl); } -vbool2_t test_vmsgtu_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmsgtu(op1, op2, vl); +vbool2_t test_vmsgtu_vv_u16m8_b2(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmsgtu(vs2, vs1, vl); } -vbool2_t test_vmsgtu_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgtu(op1, op2, vl); +vbool2_t test_vmsgtu_vx_u16m8_b2(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgtu(vs2, rs1, vl); } -vbool64_t test_vmsgtu_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmsgtu(op1, op2, vl); +vbool64_t test_vmsgtu_vv_u32mf2_b64(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmsgtu(vs2, vs1, vl); } -vbool64_t test_vmsgtu_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgtu(op1, op2, vl); +vbool64_t test_vmsgtu_vx_u32mf2_b64(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgtu(vs2, rs1, vl); } -vbool32_t test_vmsgtu_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmsgtu(op1, op2, vl); +vbool32_t test_vmsgtu_vv_u32m1_b32(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmsgtu(vs2, vs1, vl); } -vbool32_t test_vmsgtu_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgtu(op1, op2, vl); +vbool32_t test_vmsgtu_vx_u32m1_b32(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgtu(vs2, rs1, vl); } -vbool16_t test_vmsgtu_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmsgtu(op1, op2, vl); +vbool16_t test_vmsgtu_vv_u32m2_b16(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmsgtu(vs2, vs1, vl); } -vbool16_t test_vmsgtu_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgtu(op1, op2, vl); +vbool16_t test_vmsgtu_vx_u32m2_b16(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgtu(vs2, rs1, vl); } -vbool8_t test_vmsgtu_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmsgtu(op1, op2, vl); +vbool8_t test_vmsgtu_vv_u32m4_b8(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmsgtu(vs2, vs1, vl); } -vbool8_t test_vmsgtu_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgtu(op1, op2, vl); +vbool8_t test_vmsgtu_vx_u32m4_b8(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgtu(vs2, rs1, vl); } -vbool4_t test_vmsgtu_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmsgtu(op1, op2, vl); +vbool4_t test_vmsgtu_vv_u32m8_b4(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmsgtu(vs2, vs1, vl); } -vbool4_t test_vmsgtu_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgtu(op1, op2, vl); +vbool4_t test_vmsgtu_vx_u32m8_b4(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgtu(vs2, rs1, vl); } -vbool64_t test_vmsgtu_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmsgtu(op1, op2, vl); +vbool64_t test_vmsgtu_vv_u64m1_b64(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmsgtu(vs2, vs1, vl); } -vbool64_t test_vmsgtu_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgtu(op1, op2, vl); +vbool64_t test_vmsgtu_vx_u64m1_b64(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgtu(vs2, rs1, vl); } -vbool32_t test_vmsgtu_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmsgtu(op1, op2, vl); +vbool32_t test_vmsgtu_vv_u64m2_b32(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmsgtu(vs2, vs1, vl); } -vbool32_t test_vmsgtu_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgtu(op1, op2, vl); +vbool32_t test_vmsgtu_vx_u64m2_b32(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgtu(vs2, rs1, vl); } -vbool16_t test_vmsgtu_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmsgtu(op1, op2, vl); +vbool16_t test_vmsgtu_vv_u64m4_b16(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmsgtu(vs2, vs1, vl); } -vbool16_t test_vmsgtu_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgtu(op1, op2, vl); +vbool16_t test_vmsgtu_vx_u64m4_b16(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgtu(vs2, rs1, vl); } -vbool8_t test_vmsgtu_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmsgtu(op1, op2, vl); +vbool8_t test_vmsgtu_vv_u64m8_b8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmsgtu(vs2, vs1, vl); } -vbool8_t test_vmsgtu_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgtu(op1, op2, vl); +vbool8_t test_vmsgtu_vx_u64m8_b8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgtu(vs2, rs1, vl); } -vbool64_t test_vmsgtu_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmsgtu(mask, op1, op2, vl); +vbool64_t test_vmsgtu_vv_u8mf8_b64_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmsgtu(vm, vs2, vs1, vl); } -vbool64_t test_vmsgtu_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgtu(mask, op1, op2, vl); +vbool64_t test_vmsgtu_vx_u8mf8_b64_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgtu(vm, vs2, rs1, vl); } -vbool32_t test_vmsgtu_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmsgtu(mask, op1, op2, vl); +vbool32_t test_vmsgtu_vv_u8mf4_b32_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmsgtu(vm, vs2, vs1, vl); } -vbool32_t test_vmsgtu_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgtu(mask, op1, op2, vl); +vbool32_t test_vmsgtu_vx_u8mf4_b32_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgtu(vm, vs2, rs1, vl); } -vbool16_t test_vmsgtu_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmsgtu(mask, op1, op2, vl); +vbool16_t test_vmsgtu_vv_u8mf2_b16_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmsgtu(vm, vs2, vs1, vl); } -vbool16_t test_vmsgtu_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgtu(mask, op1, op2, vl); +vbool16_t test_vmsgtu_vx_u8mf2_b16_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgtu(vm, vs2, rs1, vl); } -vbool8_t test_vmsgtu_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmsgtu(mask, op1, op2, vl); +vbool8_t test_vmsgtu_vv_u8m1_b8_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmsgtu(vm, vs2, vs1, vl); } -vbool8_t test_vmsgtu_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgtu(mask, op1, op2, vl); +vbool8_t test_vmsgtu_vx_u8m1_b8_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgtu(vm, vs2, rs1, vl); } -vbool4_t test_vmsgtu_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmsgtu(mask, op1, op2, vl); +vbool4_t test_vmsgtu_vv_u8m2_b4_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmsgtu(vm, vs2, vs1, vl); } -vbool4_t test_vmsgtu_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgtu(mask, op1, op2, vl); +vbool4_t test_vmsgtu_vx_u8m2_b4_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgtu(vm, vs2, rs1, vl); } -vbool2_t test_vmsgtu_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmsgtu(mask, op1, op2, vl); +vbool2_t test_vmsgtu_vv_u8m4_b2_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmsgtu(vm, vs2, vs1, vl); } -vbool2_t test_vmsgtu_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgtu(mask, op1, op2, vl); +vbool2_t test_vmsgtu_vx_u8m4_b2_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgtu(vm, vs2, rs1, vl); } -vbool1_t test_vmsgtu_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmsgtu(mask, op1, op2, vl); +vbool1_t test_vmsgtu_vv_u8m8_b1_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmsgtu(vm, vs2, vs1, vl); } -vbool1_t test_vmsgtu_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgtu(mask, op1, op2, vl); +vbool1_t test_vmsgtu_vx_u8m8_b1_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgtu(vm, vs2, rs1, vl); } -vbool64_t test_vmsgtu_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmsgtu(mask, op1, op2, vl); +vbool64_t test_vmsgtu_vv_u16mf4_b64_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmsgtu(vm, vs2, vs1, vl); } -vbool64_t test_vmsgtu_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgtu(mask, op1, op2, vl); +vbool64_t test_vmsgtu_vx_u16mf4_b64_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgtu(vm, vs2, rs1, vl); } -vbool32_t test_vmsgtu_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmsgtu(mask, op1, op2, vl); +vbool32_t test_vmsgtu_vv_u16mf2_b32_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmsgtu(vm, vs2, vs1, vl); } -vbool32_t test_vmsgtu_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgtu(mask, op1, op2, vl); +vbool32_t test_vmsgtu_vx_u16mf2_b32_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgtu(vm, vs2, rs1, vl); } -vbool16_t test_vmsgtu_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmsgtu(mask, op1, op2, vl); +vbool16_t test_vmsgtu_vv_u16m1_b16_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmsgtu(vm, vs2, vs1, vl); } -vbool16_t test_vmsgtu_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgtu(mask, op1, op2, vl); +vbool16_t test_vmsgtu_vx_u16m1_b16_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgtu(vm, vs2, rs1, vl); } -vbool8_t test_vmsgtu_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmsgtu(mask, op1, op2, vl); +vbool8_t test_vmsgtu_vv_u16m2_b8_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmsgtu(vm, vs2, vs1, vl); } -vbool8_t test_vmsgtu_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgtu(mask, op1, op2, vl); +vbool8_t test_vmsgtu_vx_u16m2_b8_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgtu(vm, vs2, rs1, vl); } -vbool4_t test_vmsgtu_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmsgtu(mask, op1, op2, vl); +vbool4_t test_vmsgtu_vv_u16m4_b4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmsgtu(vm, vs2, vs1, vl); } -vbool4_t test_vmsgtu_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgtu(mask, op1, op2, vl); +vbool4_t test_vmsgtu_vx_u16m4_b4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgtu(vm, vs2, rs1, vl); } -vbool2_t test_vmsgtu_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmsgtu(mask, op1, op2, vl); +vbool2_t test_vmsgtu_vv_u16m8_b2_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmsgtu(vm, vs2, vs1, vl); } -vbool2_t test_vmsgtu_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgtu(mask, op1, op2, vl); +vbool2_t test_vmsgtu_vx_u16m8_b2_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgtu(vm, vs2, rs1, vl); } -vbool64_t test_vmsgtu_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmsgtu(mask, op1, op2, vl); +vbool64_t test_vmsgtu_vv_u32mf2_b64_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmsgtu(vm, vs2, vs1, vl); } -vbool64_t test_vmsgtu_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgtu(mask, op1, op2, vl); +vbool64_t test_vmsgtu_vx_u32mf2_b64_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgtu(vm, vs2, rs1, vl); } -vbool32_t test_vmsgtu_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmsgtu(mask, op1, op2, vl); +vbool32_t test_vmsgtu_vv_u32m1_b32_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmsgtu(vm, vs2, vs1, vl); } -vbool32_t test_vmsgtu_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgtu(mask, op1, op2, vl); +vbool32_t test_vmsgtu_vx_u32m1_b32_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgtu(vm, vs2, rs1, vl); } -vbool16_t test_vmsgtu_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmsgtu(mask, op1, op2, vl); +vbool16_t test_vmsgtu_vv_u32m2_b16_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmsgtu(vm, vs2, vs1, vl); } -vbool16_t test_vmsgtu_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgtu(mask, op1, op2, vl); +vbool16_t test_vmsgtu_vx_u32m2_b16_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgtu(vm, vs2, rs1, vl); } -vbool8_t test_vmsgtu_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmsgtu(mask, op1, op2, vl); +vbool8_t test_vmsgtu_vv_u32m4_b8_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmsgtu(vm, vs2, vs1, vl); } -vbool8_t test_vmsgtu_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgtu(mask, op1, op2, vl); +vbool8_t test_vmsgtu_vx_u32m4_b8_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgtu(vm, vs2, rs1, vl); } -vbool4_t test_vmsgtu_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmsgtu(mask, op1, op2, vl); +vbool4_t test_vmsgtu_vv_u32m8_b4_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmsgtu(vm, vs2, vs1, vl); } -vbool4_t test_vmsgtu_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgtu(mask, op1, op2, vl); +vbool4_t test_vmsgtu_vx_u32m8_b4_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgtu(vm, vs2, rs1, vl); } -vbool64_t test_vmsgtu_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmsgtu(mask, op1, op2, vl); +vbool64_t test_vmsgtu_vv_u64m1_b64_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmsgtu(vm, vs2, vs1, vl); } -vbool64_t test_vmsgtu_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgtu(mask, op1, op2, vl); +vbool64_t test_vmsgtu_vx_u64m1_b64_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgtu(vm, vs2, rs1, vl); } -vbool32_t test_vmsgtu_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmsgtu(mask, op1, op2, vl); +vbool32_t test_vmsgtu_vv_u64m2_b32_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmsgtu(vm, vs2, vs1, vl); } -vbool32_t test_vmsgtu_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgtu(mask, op1, op2, vl); +vbool32_t test_vmsgtu_vx_u64m2_b32_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgtu(vm, vs2, rs1, vl); } -vbool16_t test_vmsgtu_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmsgtu(mask, op1, op2, vl); +vbool16_t test_vmsgtu_vv_u64m4_b16_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmsgtu(vm, vs2, vs1, vl); } -vbool16_t test_vmsgtu_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgtu(mask, op1, op2, vl); +vbool16_t test_vmsgtu_vx_u64m4_b16_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgtu(vm, vs2, rs1, vl); } -vbool8_t test_vmsgtu_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmsgtu(mask, op1, op2, vl); +vbool8_t test_vmsgtu_vv_u64m8_b8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmsgtu(vm, vs2, vs1, vl); } -vbool8_t test_vmsgtu_vx_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgtu(mask, op1, op2, vl); +vbool8_t test_vmsgtu_vx_u64m8_b8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgtu(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vmsgtu\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vmsif.c b/auto-generated/gnu-overloaded-tests/vmsif.c index 9c73e9b66..3c0975d74 100644 --- a/auto-generated/gnu-overloaded-tests/vmsif.c +++ b/auto-generated/gnu-overloaded-tests/vmsif.c @@ -6,60 +6,60 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool1_t test_vmsif_m_b1(vbool1_t op1, size_t vl) { - return __riscv_vmsif(op1, vl); +vbool1_t test_vmsif_m_b1(vbool1_t vs2, size_t vl) { + return __riscv_vmsif(vs2, vl); } -vbool2_t test_vmsif_m_b2(vbool2_t op1, size_t vl) { - return __riscv_vmsif(op1, vl); +vbool2_t test_vmsif_m_b2(vbool2_t vs2, size_t vl) { + return __riscv_vmsif(vs2, vl); } -vbool4_t test_vmsif_m_b4(vbool4_t op1, size_t vl) { - return __riscv_vmsif(op1, vl); +vbool4_t test_vmsif_m_b4(vbool4_t vs2, size_t vl) { + return __riscv_vmsif(vs2, vl); } -vbool8_t test_vmsif_m_b8(vbool8_t op1, size_t vl) { - return __riscv_vmsif(op1, vl); +vbool8_t test_vmsif_m_b8(vbool8_t vs2, size_t vl) { + return __riscv_vmsif(vs2, vl); } -vbool16_t test_vmsif_m_b16(vbool16_t op1, size_t vl) { - return __riscv_vmsif(op1, vl); +vbool16_t test_vmsif_m_b16(vbool16_t vs2, size_t vl) { + return __riscv_vmsif(vs2, vl); } -vbool32_t test_vmsif_m_b32(vbool32_t op1, size_t vl) { - return __riscv_vmsif(op1, vl); +vbool32_t test_vmsif_m_b32(vbool32_t vs2, size_t vl) { + return __riscv_vmsif(vs2, vl); } -vbool64_t test_vmsif_m_b64(vbool64_t op1, size_t vl) { - return __riscv_vmsif(op1, vl); +vbool64_t test_vmsif_m_b64(vbool64_t vs2, size_t vl) { + return __riscv_vmsif(vs2, vl); } -vbool1_t test_vmsif_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) { - return __riscv_vmsif(mask, op1, vl); +vbool1_t test_vmsif_m_b1_m(vbool1_t vm, vbool1_t vs2, size_t vl) { + return __riscv_vmsif(vm, vs2, vl); } -vbool2_t test_vmsif_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) { - return __riscv_vmsif(mask, op1, vl); +vbool2_t test_vmsif_m_b2_m(vbool2_t vm, vbool2_t vs2, size_t vl) { + return __riscv_vmsif(vm, vs2, vl); } -vbool4_t test_vmsif_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) { - return __riscv_vmsif(mask, op1, vl); +vbool4_t test_vmsif_m_b4_m(vbool4_t vm, vbool4_t vs2, size_t vl) { + return __riscv_vmsif(vm, vs2, vl); } -vbool8_t test_vmsif_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) { - return __riscv_vmsif(mask, op1, vl); +vbool8_t test_vmsif_m_b8_m(vbool8_t vm, vbool8_t vs2, size_t vl) { + return __riscv_vmsif(vm, vs2, vl); } -vbool16_t test_vmsif_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) { - return __riscv_vmsif(mask, op1, vl); +vbool16_t test_vmsif_m_b16_m(vbool16_t vm, vbool16_t vs2, size_t vl) { + return __riscv_vmsif(vm, vs2, vl); } -vbool32_t test_vmsif_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) { - return __riscv_vmsif(mask, op1, vl); +vbool32_t test_vmsif_m_b32_m(vbool32_t vm, vbool32_t vs2, size_t vl) { + return __riscv_vmsif(vm, vs2, vl); } -vbool64_t test_vmsif_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) { - return __riscv_vmsif(mask, op1, vl); +vbool64_t test_vmsif_m_b64_m(vbool64_t vm, vbool64_t vs2, size_t vl) { + return __riscv_vmsif(vm, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmsif\.[ivxfswum.]+\s+} 14 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vmsle.c b/auto-generated/gnu-overloaded-tests/vmsle.c index 9b1335f9c..8ac527da8 100644 --- a/auto-generated/gnu-overloaded-tests/vmsle.c +++ b/auto-generated/gnu-overloaded-tests/vmsle.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmsle_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmsle(op1, op2, vl); +vbool64_t test_vmsle_vv_i8mf8_b64(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmsle(vs2, vs1, vl); } -vbool64_t test_vmsle_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsle(op1, op2, vl); +vbool64_t test_vmsle_vx_i8mf8_b64(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsle(vs2, rs1, vl); } -vbool32_t test_vmsle_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmsle(op1, op2, vl); +vbool32_t test_vmsle_vv_i8mf4_b32(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmsle(vs2, vs1, vl); } -vbool32_t test_vmsle_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsle(op1, op2, vl); +vbool32_t test_vmsle_vx_i8mf4_b32(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsle(vs2, rs1, vl); } -vbool16_t test_vmsle_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmsle(op1, op2, vl); +vbool16_t test_vmsle_vv_i8mf2_b16(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmsle(vs2, vs1, vl); } -vbool16_t test_vmsle_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsle(op1, op2, vl); +vbool16_t test_vmsle_vx_i8mf2_b16(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsle(vs2, rs1, vl); } -vbool8_t test_vmsle_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmsle(op1, op2, vl); +vbool8_t test_vmsle_vv_i8m1_b8(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmsle(vs2, vs1, vl); } -vbool8_t test_vmsle_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmsle(op1, op2, vl); +vbool8_t test_vmsle_vx_i8m1_b8(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsle(vs2, rs1, vl); } -vbool4_t test_vmsle_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmsle(op1, op2, vl); +vbool4_t test_vmsle_vv_i8m2_b4(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmsle(vs2, vs1, vl); } -vbool4_t test_vmsle_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsle(op1, op2, vl); +vbool4_t test_vmsle_vx_i8m2_b4(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsle(vs2, rs1, vl); } -vbool2_t test_vmsle_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmsle(op1, op2, vl); +vbool2_t test_vmsle_vv_i8m4_b2(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmsle(vs2, vs1, vl); } -vbool2_t test_vmsle_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsle(op1, op2, vl); +vbool2_t test_vmsle_vx_i8m4_b2(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsle(vs2, rs1, vl); } -vbool1_t test_vmsle_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmsle(op1, op2, vl); +vbool1_t test_vmsle_vv_i8m8_b1(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmsle(vs2, vs1, vl); } -vbool1_t test_vmsle_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsle(op1, op2, vl); +vbool1_t test_vmsle_vx_i8m8_b1(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsle(vs2, rs1, vl); } -vbool64_t test_vmsle_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmsle(op1, op2, vl); +vbool64_t test_vmsle_vv_i16mf4_b64(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmsle(vs2, vs1, vl); } -vbool64_t test_vmsle_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsle(op1, op2, vl); +vbool64_t test_vmsle_vx_i16mf4_b64(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsle(vs2, rs1, vl); } -vbool32_t test_vmsle_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmsle(op1, op2, vl); +vbool32_t test_vmsle_vv_i16mf2_b32(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmsle(vs2, vs1, vl); } -vbool32_t test_vmsle_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsle(op1, op2, vl); +vbool32_t test_vmsle_vx_i16mf2_b32(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsle(vs2, rs1, vl); } -vbool16_t test_vmsle_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmsle(op1, op2, vl); +vbool16_t test_vmsle_vv_i16m1_b16(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmsle(vs2, vs1, vl); } -vbool16_t test_vmsle_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmsle(op1, op2, vl); +vbool16_t test_vmsle_vx_i16m1_b16(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsle(vs2, rs1, vl); } -vbool8_t test_vmsle_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmsle(op1, op2, vl); +vbool8_t test_vmsle_vv_i16m2_b8(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmsle(vs2, vs1, vl); } -vbool8_t test_vmsle_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsle(op1, op2, vl); +vbool8_t test_vmsle_vx_i16m2_b8(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsle(vs2, rs1, vl); } -vbool4_t test_vmsle_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmsle(op1, op2, vl); +vbool4_t test_vmsle_vv_i16m4_b4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmsle(vs2, vs1, vl); } -vbool4_t test_vmsle_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsle(op1, op2, vl); +vbool4_t test_vmsle_vx_i16m4_b4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsle(vs2, rs1, vl); } -vbool2_t test_vmsle_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmsle(op1, op2, vl); +vbool2_t test_vmsle_vv_i16m8_b2(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmsle(vs2, vs1, vl); } -vbool2_t test_vmsle_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmsle(op1, op2, vl); +vbool2_t test_vmsle_vx_i16m8_b2(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsle(vs2, rs1, vl); } -vbool64_t test_vmsle_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmsle(op1, op2, vl); +vbool64_t test_vmsle_vv_i32mf2_b64(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmsle(vs2, vs1, vl); } -vbool64_t test_vmsle_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsle(op1, op2, vl); +vbool64_t test_vmsle_vx_i32mf2_b64(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsle(vs2, rs1, vl); } -vbool32_t test_vmsle_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmsle(op1, op2, vl); +vbool32_t test_vmsle_vv_i32m1_b32(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmsle(vs2, vs1, vl); } -vbool32_t test_vmsle_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmsle(op1, op2, vl); +vbool32_t test_vmsle_vx_i32m1_b32(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsle(vs2, rs1, vl); } -vbool16_t test_vmsle_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmsle(op1, op2, vl); +vbool16_t test_vmsle_vv_i32m2_b16(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmsle(vs2, vs1, vl); } -vbool16_t test_vmsle_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsle(op1, op2, vl); +vbool16_t test_vmsle_vx_i32m2_b16(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsle(vs2, rs1, vl); } -vbool8_t test_vmsle_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmsle(op1, op2, vl); +vbool8_t test_vmsle_vv_i32m4_b8(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmsle(vs2, vs1, vl); } -vbool8_t test_vmsle_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmsle(op1, op2, vl); +vbool8_t test_vmsle_vx_i32m4_b8(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsle(vs2, rs1, vl); } -vbool4_t test_vmsle_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmsle(op1, op2, vl); +vbool4_t test_vmsle_vv_i32m8_b4(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmsle(vs2, vs1, vl); } -vbool4_t test_vmsle_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmsle(op1, op2, vl); +vbool4_t test_vmsle_vx_i32m8_b4(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsle(vs2, rs1, vl); } -vbool64_t test_vmsle_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmsle(op1, op2, vl); +vbool64_t test_vmsle_vv_i64m1_b64(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmsle(vs2, vs1, vl); } -vbool64_t test_vmsle_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmsle(op1, op2, vl); +vbool64_t test_vmsle_vx_i64m1_b64(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsle(vs2, rs1, vl); } -vbool32_t test_vmsle_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmsle(op1, op2, vl); +vbool32_t test_vmsle_vv_i64m2_b32(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmsle(vs2, vs1, vl); } -vbool32_t test_vmsle_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmsle(op1, op2, vl); +vbool32_t test_vmsle_vx_i64m2_b32(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsle(vs2, rs1, vl); } -vbool16_t test_vmsle_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmsle(op1, op2, vl); +vbool16_t test_vmsle_vv_i64m4_b16(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmsle(vs2, vs1, vl); } -vbool16_t test_vmsle_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmsle(op1, op2, vl); +vbool16_t test_vmsle_vx_i64m4_b16(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsle(vs2, rs1, vl); } -vbool8_t test_vmsle_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmsle(op1, op2, vl); +vbool8_t test_vmsle_vv_i64m8_b8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmsle(vs2, vs1, vl); } -vbool8_t test_vmsle_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmsle(op1, op2, vl); +vbool8_t test_vmsle_vx_i64m8_b8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsle(vs2, rs1, vl); } -vbool64_t test_vmsle_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmsle(mask, op1, op2, vl); +vbool64_t test_vmsle_vv_i8mf8_b64_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmsle(vm, vs2, vs1, vl); } -vbool64_t test_vmsle_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsle(mask, op1, op2, vl); +vbool64_t test_vmsle_vx_i8mf8_b64_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsle(vm, vs2, rs1, vl); } -vbool32_t test_vmsle_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmsle(mask, op1, op2, vl); +vbool32_t test_vmsle_vv_i8mf4_b32_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmsle(vm, vs2, vs1, vl); } -vbool32_t test_vmsle_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsle(mask, op1, op2, vl); +vbool32_t test_vmsle_vx_i8mf4_b32_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsle(vm, vs2, rs1, vl); } -vbool16_t test_vmsle_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmsle(mask, op1, op2, vl); +vbool16_t test_vmsle_vv_i8mf2_b16_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmsle(vm, vs2, vs1, vl); } -vbool16_t test_vmsle_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsle(mask, op1, op2, vl); +vbool16_t test_vmsle_vx_i8mf2_b16_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsle(vm, vs2, rs1, vl); } -vbool8_t test_vmsle_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmsle(mask, op1, op2, vl); +vbool8_t test_vmsle_vv_i8m1_b8_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmsle(vm, vs2, vs1, vl); } -vbool8_t test_vmsle_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmsle(mask, op1, op2, vl); +vbool8_t test_vmsle_vx_i8m1_b8_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsle(vm, vs2, rs1, vl); } -vbool4_t test_vmsle_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmsle(mask, op1, op2, vl); +vbool4_t test_vmsle_vv_i8m2_b4_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmsle(vm, vs2, vs1, vl); } -vbool4_t test_vmsle_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsle(mask, op1, op2, vl); +vbool4_t test_vmsle_vx_i8m2_b4_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsle(vm, vs2, rs1, vl); } -vbool2_t test_vmsle_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmsle(mask, op1, op2, vl); +vbool2_t test_vmsle_vv_i8m4_b2_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmsle(vm, vs2, vs1, vl); } -vbool2_t test_vmsle_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsle(mask, op1, op2, vl); +vbool2_t test_vmsle_vx_i8m4_b2_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsle(vm, vs2, rs1, vl); } -vbool1_t test_vmsle_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmsle(mask, op1, op2, vl); +vbool1_t test_vmsle_vv_i8m8_b1_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmsle(vm, vs2, vs1, vl); } -vbool1_t test_vmsle_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsle(mask, op1, op2, vl); +vbool1_t test_vmsle_vx_i8m8_b1_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsle(vm, vs2, rs1, vl); } -vbool64_t test_vmsle_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmsle(mask, op1, op2, vl); +vbool64_t test_vmsle_vv_i16mf4_b64_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmsle(vm, vs2, vs1, vl); } -vbool64_t test_vmsle_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsle(mask, op1, op2, vl); +vbool64_t test_vmsle_vx_i16mf4_b64_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsle(vm, vs2, rs1, vl); } -vbool32_t test_vmsle_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmsle(mask, op1, op2, vl); +vbool32_t test_vmsle_vv_i16mf2_b32_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmsle(vm, vs2, vs1, vl); } -vbool32_t test_vmsle_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsle(mask, op1, op2, vl); +vbool32_t test_vmsle_vx_i16mf2_b32_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsle(vm, vs2, rs1, vl); } -vbool16_t test_vmsle_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmsle(mask, op1, op2, vl); +vbool16_t test_vmsle_vv_i16m1_b16_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmsle(vm, vs2, vs1, vl); } -vbool16_t test_vmsle_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmsle(mask, op1, op2, vl); +vbool16_t test_vmsle_vx_i16m1_b16_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsle(vm, vs2, rs1, vl); } -vbool8_t test_vmsle_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmsle(mask, op1, op2, vl); +vbool8_t test_vmsle_vv_i16m2_b8_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmsle(vm, vs2, vs1, vl); } -vbool8_t test_vmsle_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsle(mask, op1, op2, vl); +vbool8_t test_vmsle_vx_i16m2_b8_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsle(vm, vs2, rs1, vl); } -vbool4_t test_vmsle_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmsle(mask, op1, op2, vl); +vbool4_t test_vmsle_vv_i16m4_b4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmsle(vm, vs2, vs1, vl); } -vbool4_t test_vmsle_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsle(mask, op1, op2, vl); +vbool4_t test_vmsle_vx_i16m4_b4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsle(vm, vs2, rs1, vl); } -vbool2_t test_vmsle_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmsle(mask, op1, op2, vl); +vbool2_t test_vmsle_vv_i16m8_b2_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmsle(vm, vs2, vs1, vl); } -vbool2_t test_vmsle_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmsle(mask, op1, op2, vl); +vbool2_t test_vmsle_vx_i16m8_b2_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsle(vm, vs2, rs1, vl); } -vbool64_t test_vmsle_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmsle(mask, op1, op2, vl); +vbool64_t test_vmsle_vv_i32mf2_b64_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmsle(vm, vs2, vs1, vl); } -vbool64_t test_vmsle_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsle(mask, op1, op2, vl); +vbool64_t test_vmsle_vx_i32mf2_b64_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsle(vm, vs2, rs1, vl); } -vbool32_t test_vmsle_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmsle(mask, op1, op2, vl); +vbool32_t test_vmsle_vv_i32m1_b32_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmsle(vm, vs2, vs1, vl); } -vbool32_t test_vmsle_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmsle(mask, op1, op2, vl); +vbool32_t test_vmsle_vx_i32m1_b32_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsle(vm, vs2, rs1, vl); } -vbool16_t test_vmsle_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmsle(mask, op1, op2, vl); +vbool16_t test_vmsle_vv_i32m2_b16_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmsle(vm, vs2, vs1, vl); } -vbool16_t test_vmsle_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsle(mask, op1, op2, vl); +vbool16_t test_vmsle_vx_i32m2_b16_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsle(vm, vs2, rs1, vl); } -vbool8_t test_vmsle_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmsle(mask, op1, op2, vl); +vbool8_t test_vmsle_vv_i32m4_b8_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmsle(vm, vs2, vs1, vl); } -vbool8_t test_vmsle_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmsle(mask, op1, op2, vl); +vbool8_t test_vmsle_vx_i32m4_b8_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsle(vm, vs2, rs1, vl); } -vbool4_t test_vmsle_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmsle(mask, op1, op2, vl); +vbool4_t test_vmsle_vv_i32m8_b4_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmsle(vm, vs2, vs1, vl); } -vbool4_t test_vmsle_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmsle(mask, op1, op2, vl); +vbool4_t test_vmsle_vx_i32m8_b4_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsle(vm, vs2, rs1, vl); } -vbool64_t test_vmsle_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmsle(mask, op1, op2, vl); +vbool64_t test_vmsle_vv_i64m1_b64_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmsle(vm, vs2, vs1, vl); } -vbool64_t test_vmsle_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmsle(mask, op1, op2, vl); +vbool64_t test_vmsle_vx_i64m1_b64_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsle(vm, vs2, rs1, vl); } -vbool32_t test_vmsle_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmsle(mask, op1, op2, vl); +vbool32_t test_vmsle_vv_i64m2_b32_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmsle(vm, vs2, vs1, vl); } -vbool32_t test_vmsle_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmsle(mask, op1, op2, vl); +vbool32_t test_vmsle_vx_i64m2_b32_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsle(vm, vs2, rs1, vl); } -vbool16_t test_vmsle_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmsle(mask, op1, op2, vl); +vbool16_t test_vmsle_vv_i64m4_b16_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmsle(vm, vs2, vs1, vl); } -vbool16_t test_vmsle_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmsle(mask, op1, op2, vl); +vbool16_t test_vmsle_vx_i64m4_b16_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsle(vm, vs2, rs1, vl); } -vbool8_t test_vmsle_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmsle(mask, op1, op2, vl); +vbool8_t test_vmsle_vv_i64m8_b8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmsle(vm, vs2, vs1, vl); } -vbool8_t test_vmsle_vx_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmsle(mask, op1, op2, vl); +vbool8_t test_vmsle_vx_i64m8_b8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsle(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vmsle\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vmsleu.c b/auto-generated/gnu-overloaded-tests/vmsleu.c index 6f80f8fb2..2b703506a 100644 --- a/auto-generated/gnu-overloaded-tests/vmsleu.c +++ b/auto-generated/gnu-overloaded-tests/vmsleu.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmsleu_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmsleu(op1, op2, vl); +vbool64_t test_vmsleu_vv_u8mf8_b64(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmsleu(vs2, vs1, vl); } -vbool64_t test_vmsleu_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsleu(op1, op2, vl); +vbool64_t test_vmsleu_vx_u8mf8_b64(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsleu(vs2, rs1, vl); } -vbool32_t test_vmsleu_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmsleu(op1, op2, vl); +vbool32_t test_vmsleu_vv_u8mf4_b32(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmsleu(vs2, vs1, vl); } -vbool32_t test_vmsleu_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsleu(op1, op2, vl); +vbool32_t test_vmsleu_vx_u8mf4_b32(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsleu(vs2, rs1, vl); } -vbool16_t test_vmsleu_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmsleu(op1, op2, vl); +vbool16_t test_vmsleu_vv_u8mf2_b16(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmsleu(vs2, vs1, vl); } -vbool16_t test_vmsleu_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsleu(op1, op2, vl); +vbool16_t test_vmsleu_vx_u8mf2_b16(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsleu(vs2, rs1, vl); } -vbool8_t test_vmsleu_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmsleu(op1, op2, vl); +vbool8_t test_vmsleu_vv_u8m1_b8(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmsleu(vs2, vs1, vl); } -vbool8_t test_vmsleu_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsleu(op1, op2, vl); +vbool8_t test_vmsleu_vx_u8m1_b8(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsleu(vs2, rs1, vl); } -vbool4_t test_vmsleu_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmsleu(op1, op2, vl); +vbool4_t test_vmsleu_vv_u8m2_b4(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmsleu(vs2, vs1, vl); } -vbool4_t test_vmsleu_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsleu(op1, op2, vl); +vbool4_t test_vmsleu_vx_u8m2_b4(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsleu(vs2, rs1, vl); } -vbool2_t test_vmsleu_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmsleu(op1, op2, vl); +vbool2_t test_vmsleu_vv_u8m4_b2(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmsleu(vs2, vs1, vl); } -vbool2_t test_vmsleu_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsleu(op1, op2, vl); +vbool2_t test_vmsleu_vx_u8m4_b2(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsleu(vs2, rs1, vl); } -vbool1_t test_vmsleu_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmsleu(op1, op2, vl); +vbool1_t test_vmsleu_vv_u8m8_b1(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmsleu(vs2, vs1, vl); } -vbool1_t test_vmsleu_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsleu(op1, op2, vl); +vbool1_t test_vmsleu_vx_u8m8_b1(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsleu(vs2, rs1, vl); } -vbool64_t test_vmsleu_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmsleu(op1, op2, vl); +vbool64_t test_vmsleu_vv_u16mf4_b64(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmsleu(vs2, vs1, vl); } -vbool64_t test_vmsleu_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsleu(op1, op2, vl); +vbool64_t test_vmsleu_vx_u16mf4_b64(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsleu(vs2, rs1, vl); } -vbool32_t test_vmsleu_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmsleu(op1, op2, vl); +vbool32_t test_vmsleu_vv_u16mf2_b32(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmsleu(vs2, vs1, vl); } -vbool32_t test_vmsleu_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsleu(op1, op2, vl); +vbool32_t test_vmsleu_vx_u16mf2_b32(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsleu(vs2, rs1, vl); } -vbool16_t test_vmsleu_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmsleu(op1, op2, vl); +vbool16_t test_vmsleu_vv_u16m1_b16(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmsleu(vs2, vs1, vl); } -vbool16_t test_vmsleu_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsleu(op1, op2, vl); +vbool16_t test_vmsleu_vx_u16m1_b16(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsleu(vs2, rs1, vl); } -vbool8_t test_vmsleu_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmsleu(op1, op2, vl); +vbool8_t test_vmsleu_vv_u16m2_b8(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmsleu(vs2, vs1, vl); } -vbool8_t test_vmsleu_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsleu(op1, op2, vl); +vbool8_t test_vmsleu_vx_u16m2_b8(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsleu(vs2, rs1, vl); } -vbool4_t test_vmsleu_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmsleu(op1, op2, vl); +vbool4_t test_vmsleu_vv_u16m4_b4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmsleu(vs2, vs1, vl); } -vbool4_t test_vmsleu_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsleu(op1, op2, vl); +vbool4_t test_vmsleu_vx_u16m4_b4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsleu(vs2, rs1, vl); } -vbool2_t test_vmsleu_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmsleu(op1, op2, vl); +vbool2_t test_vmsleu_vv_u16m8_b2(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmsleu(vs2, vs1, vl); } -vbool2_t test_vmsleu_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsleu(op1, op2, vl); +vbool2_t test_vmsleu_vx_u16m8_b2(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsleu(vs2, rs1, vl); } -vbool64_t test_vmsleu_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmsleu(op1, op2, vl); +vbool64_t test_vmsleu_vv_u32mf2_b64(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmsleu(vs2, vs1, vl); } -vbool64_t test_vmsleu_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsleu(op1, op2, vl); +vbool64_t test_vmsleu_vx_u32mf2_b64(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsleu(vs2, rs1, vl); } -vbool32_t test_vmsleu_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmsleu(op1, op2, vl); +vbool32_t test_vmsleu_vv_u32m1_b32(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmsleu(vs2, vs1, vl); } -vbool32_t test_vmsleu_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsleu(op1, op2, vl); +vbool32_t test_vmsleu_vx_u32m1_b32(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsleu(vs2, rs1, vl); } -vbool16_t test_vmsleu_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmsleu(op1, op2, vl); +vbool16_t test_vmsleu_vv_u32m2_b16(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmsleu(vs2, vs1, vl); } -vbool16_t test_vmsleu_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsleu(op1, op2, vl); +vbool16_t test_vmsleu_vx_u32m2_b16(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsleu(vs2, rs1, vl); } -vbool8_t test_vmsleu_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmsleu(op1, op2, vl); +vbool8_t test_vmsleu_vv_u32m4_b8(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmsleu(vs2, vs1, vl); } -vbool8_t test_vmsleu_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsleu(op1, op2, vl); +vbool8_t test_vmsleu_vx_u32m4_b8(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsleu(vs2, rs1, vl); } -vbool4_t test_vmsleu_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmsleu(op1, op2, vl); +vbool4_t test_vmsleu_vv_u32m8_b4(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmsleu(vs2, vs1, vl); } -vbool4_t test_vmsleu_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsleu(op1, op2, vl); +vbool4_t test_vmsleu_vx_u32m8_b4(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsleu(vs2, rs1, vl); } -vbool64_t test_vmsleu_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmsleu(op1, op2, vl); +vbool64_t test_vmsleu_vv_u64m1_b64(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmsleu(vs2, vs1, vl); } -vbool64_t test_vmsleu_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsleu(op1, op2, vl); +vbool64_t test_vmsleu_vx_u64m1_b64(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsleu(vs2, rs1, vl); } -vbool32_t test_vmsleu_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmsleu(op1, op2, vl); +vbool32_t test_vmsleu_vv_u64m2_b32(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmsleu(vs2, vs1, vl); } -vbool32_t test_vmsleu_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsleu(op1, op2, vl); +vbool32_t test_vmsleu_vx_u64m2_b32(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsleu(vs2, rs1, vl); } -vbool16_t test_vmsleu_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmsleu(op1, op2, vl); +vbool16_t test_vmsleu_vv_u64m4_b16(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmsleu(vs2, vs1, vl); } -vbool16_t test_vmsleu_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsleu(op1, op2, vl); +vbool16_t test_vmsleu_vx_u64m4_b16(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsleu(vs2, rs1, vl); } -vbool8_t test_vmsleu_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmsleu(op1, op2, vl); +vbool8_t test_vmsleu_vv_u64m8_b8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmsleu(vs2, vs1, vl); } -vbool8_t test_vmsleu_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsleu(op1, op2, vl); +vbool8_t test_vmsleu_vx_u64m8_b8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsleu(vs2, rs1, vl); } -vbool64_t test_vmsleu_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmsleu(mask, op1, op2, vl); +vbool64_t test_vmsleu_vv_u8mf8_b64_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmsleu(vm, vs2, vs1, vl); } -vbool64_t test_vmsleu_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsleu(mask, op1, op2, vl); +vbool64_t test_vmsleu_vx_u8mf8_b64_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsleu(vm, vs2, rs1, vl); } -vbool32_t test_vmsleu_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmsleu(mask, op1, op2, vl); +vbool32_t test_vmsleu_vv_u8mf4_b32_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmsleu(vm, vs2, vs1, vl); } -vbool32_t test_vmsleu_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsleu(mask, op1, op2, vl); +vbool32_t test_vmsleu_vx_u8mf4_b32_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsleu(vm, vs2, rs1, vl); } -vbool16_t test_vmsleu_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmsleu(mask, op1, op2, vl); +vbool16_t test_vmsleu_vv_u8mf2_b16_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmsleu(vm, vs2, vs1, vl); } -vbool16_t test_vmsleu_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsleu(mask, op1, op2, vl); +vbool16_t test_vmsleu_vx_u8mf2_b16_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsleu(vm, vs2, rs1, vl); } -vbool8_t test_vmsleu_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmsleu(mask, op1, op2, vl); +vbool8_t test_vmsleu_vv_u8m1_b8_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmsleu(vm, vs2, vs1, vl); } -vbool8_t test_vmsleu_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsleu(mask, op1, op2, vl); +vbool8_t test_vmsleu_vx_u8m1_b8_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsleu(vm, vs2, rs1, vl); } -vbool4_t test_vmsleu_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmsleu(mask, op1, op2, vl); +vbool4_t test_vmsleu_vv_u8m2_b4_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmsleu(vm, vs2, vs1, vl); } -vbool4_t test_vmsleu_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsleu(mask, op1, op2, vl); +vbool4_t test_vmsleu_vx_u8m2_b4_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsleu(vm, vs2, rs1, vl); } -vbool2_t test_vmsleu_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmsleu(mask, op1, op2, vl); +vbool2_t test_vmsleu_vv_u8m4_b2_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmsleu(vm, vs2, vs1, vl); } -vbool2_t test_vmsleu_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsleu(mask, op1, op2, vl); +vbool2_t test_vmsleu_vx_u8m4_b2_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsleu(vm, vs2, rs1, vl); } -vbool1_t test_vmsleu_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmsleu(mask, op1, op2, vl); +vbool1_t test_vmsleu_vv_u8m8_b1_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmsleu(vm, vs2, vs1, vl); } -vbool1_t test_vmsleu_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsleu(mask, op1, op2, vl); +vbool1_t test_vmsleu_vx_u8m8_b1_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsleu(vm, vs2, rs1, vl); } -vbool64_t test_vmsleu_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmsleu(mask, op1, op2, vl); +vbool64_t test_vmsleu_vv_u16mf4_b64_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmsleu(vm, vs2, vs1, vl); } -vbool64_t test_vmsleu_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsleu(mask, op1, op2, vl); +vbool64_t test_vmsleu_vx_u16mf4_b64_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsleu(vm, vs2, rs1, vl); } -vbool32_t test_vmsleu_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmsleu(mask, op1, op2, vl); +vbool32_t test_vmsleu_vv_u16mf2_b32_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmsleu(vm, vs2, vs1, vl); } -vbool32_t test_vmsleu_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsleu(mask, op1, op2, vl); +vbool32_t test_vmsleu_vx_u16mf2_b32_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsleu(vm, vs2, rs1, vl); } -vbool16_t test_vmsleu_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmsleu(mask, op1, op2, vl); +vbool16_t test_vmsleu_vv_u16m1_b16_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmsleu(vm, vs2, vs1, vl); } -vbool16_t test_vmsleu_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsleu(mask, op1, op2, vl); +vbool16_t test_vmsleu_vx_u16m1_b16_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsleu(vm, vs2, rs1, vl); } -vbool8_t test_vmsleu_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmsleu(mask, op1, op2, vl); +vbool8_t test_vmsleu_vv_u16m2_b8_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmsleu(vm, vs2, vs1, vl); } -vbool8_t test_vmsleu_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsleu(mask, op1, op2, vl); +vbool8_t test_vmsleu_vx_u16m2_b8_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsleu(vm, vs2, rs1, vl); } -vbool4_t test_vmsleu_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmsleu(mask, op1, op2, vl); +vbool4_t test_vmsleu_vv_u16m4_b4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmsleu(vm, vs2, vs1, vl); } -vbool4_t test_vmsleu_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsleu(mask, op1, op2, vl); +vbool4_t test_vmsleu_vx_u16m4_b4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsleu(vm, vs2, rs1, vl); } -vbool2_t test_vmsleu_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmsleu(mask, op1, op2, vl); +vbool2_t test_vmsleu_vv_u16m8_b2_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmsleu(vm, vs2, vs1, vl); } -vbool2_t test_vmsleu_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsleu(mask, op1, op2, vl); +vbool2_t test_vmsleu_vx_u16m8_b2_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsleu(vm, vs2, rs1, vl); } -vbool64_t test_vmsleu_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmsleu(mask, op1, op2, vl); +vbool64_t test_vmsleu_vv_u32mf2_b64_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmsleu(vm, vs2, vs1, vl); } -vbool64_t test_vmsleu_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsleu(mask, op1, op2, vl); +vbool64_t test_vmsleu_vx_u32mf2_b64_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsleu(vm, vs2, rs1, vl); } -vbool32_t test_vmsleu_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmsleu(mask, op1, op2, vl); +vbool32_t test_vmsleu_vv_u32m1_b32_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmsleu(vm, vs2, vs1, vl); } -vbool32_t test_vmsleu_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsleu(mask, op1, op2, vl); +vbool32_t test_vmsleu_vx_u32m1_b32_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsleu(vm, vs2, rs1, vl); } -vbool16_t test_vmsleu_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmsleu(mask, op1, op2, vl); +vbool16_t test_vmsleu_vv_u32m2_b16_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmsleu(vm, vs2, vs1, vl); } -vbool16_t test_vmsleu_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsleu(mask, op1, op2, vl); +vbool16_t test_vmsleu_vx_u32m2_b16_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsleu(vm, vs2, rs1, vl); } -vbool8_t test_vmsleu_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmsleu(mask, op1, op2, vl); +vbool8_t test_vmsleu_vv_u32m4_b8_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmsleu(vm, vs2, vs1, vl); } -vbool8_t test_vmsleu_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsleu(mask, op1, op2, vl); +vbool8_t test_vmsleu_vx_u32m4_b8_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsleu(vm, vs2, rs1, vl); } -vbool4_t test_vmsleu_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmsleu(mask, op1, op2, vl); +vbool4_t test_vmsleu_vv_u32m8_b4_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmsleu(vm, vs2, vs1, vl); } -vbool4_t test_vmsleu_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsleu(mask, op1, op2, vl); +vbool4_t test_vmsleu_vx_u32m8_b4_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsleu(vm, vs2, rs1, vl); } -vbool64_t test_vmsleu_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmsleu(mask, op1, op2, vl); +vbool64_t test_vmsleu_vv_u64m1_b64_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmsleu(vm, vs2, vs1, vl); } -vbool64_t test_vmsleu_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsleu(mask, op1, op2, vl); +vbool64_t test_vmsleu_vx_u64m1_b64_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsleu(vm, vs2, rs1, vl); } -vbool32_t test_vmsleu_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmsleu(mask, op1, op2, vl); +vbool32_t test_vmsleu_vv_u64m2_b32_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmsleu(vm, vs2, vs1, vl); } -vbool32_t test_vmsleu_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsleu(mask, op1, op2, vl); +vbool32_t test_vmsleu_vx_u64m2_b32_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsleu(vm, vs2, rs1, vl); } -vbool16_t test_vmsleu_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmsleu(mask, op1, op2, vl); +vbool16_t test_vmsleu_vv_u64m4_b16_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmsleu(vm, vs2, vs1, vl); } -vbool16_t test_vmsleu_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsleu(mask, op1, op2, vl); +vbool16_t test_vmsleu_vx_u64m4_b16_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsleu(vm, vs2, rs1, vl); } -vbool8_t test_vmsleu_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmsleu(mask, op1, op2, vl); +vbool8_t test_vmsleu_vv_u64m8_b8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmsleu(vm, vs2, vs1, vl); } -vbool8_t test_vmsleu_vx_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsleu(mask, op1, op2, vl); +vbool8_t test_vmsleu_vx_u64m8_b8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsleu(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vmsleu\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vmslt.c b/auto-generated/gnu-overloaded-tests/vmslt.c index 3820a4f72..bda54aae3 100644 --- a/auto-generated/gnu-overloaded-tests/vmslt.c +++ b/auto-generated/gnu-overloaded-tests/vmslt.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmslt_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmslt(op1, op2, vl); +vbool64_t test_vmslt_vv_i8mf8_b64(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmslt(vs2, vs1, vl); } -vbool64_t test_vmslt_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmslt(op1, op2, vl); +vbool64_t test_vmslt_vx_i8mf8_b64(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmslt(vs2, rs1, vl); } -vbool32_t test_vmslt_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmslt(op1, op2, vl); +vbool32_t test_vmslt_vv_i8mf4_b32(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmslt(vs2, vs1, vl); } -vbool32_t test_vmslt_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmslt(op1, op2, vl); +vbool32_t test_vmslt_vx_i8mf4_b32(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmslt(vs2, rs1, vl); } -vbool16_t test_vmslt_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmslt(op1, op2, vl); +vbool16_t test_vmslt_vv_i8mf2_b16(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmslt(vs2, vs1, vl); } -vbool16_t test_vmslt_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmslt(op1, op2, vl); +vbool16_t test_vmslt_vx_i8mf2_b16(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmslt(vs2, rs1, vl); } -vbool8_t test_vmslt_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmslt(op1, op2, vl); +vbool8_t test_vmslt_vv_i8m1_b8(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmslt(vs2, vs1, vl); } -vbool8_t test_vmslt_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmslt(op1, op2, vl); +vbool8_t test_vmslt_vx_i8m1_b8(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmslt(vs2, rs1, vl); } -vbool4_t test_vmslt_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmslt(op1, op2, vl); +vbool4_t test_vmslt_vv_i8m2_b4(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmslt(vs2, vs1, vl); } -vbool4_t test_vmslt_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmslt(op1, op2, vl); +vbool4_t test_vmslt_vx_i8m2_b4(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmslt(vs2, rs1, vl); } -vbool2_t test_vmslt_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmslt(op1, op2, vl); +vbool2_t test_vmslt_vv_i8m4_b2(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmslt(vs2, vs1, vl); } -vbool2_t test_vmslt_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmslt(op1, op2, vl); +vbool2_t test_vmslt_vx_i8m4_b2(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmslt(vs2, rs1, vl); } -vbool1_t test_vmslt_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmslt(op1, op2, vl); +vbool1_t test_vmslt_vv_i8m8_b1(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmslt(vs2, vs1, vl); } -vbool1_t test_vmslt_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmslt(op1, op2, vl); +vbool1_t test_vmslt_vx_i8m8_b1(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmslt(vs2, rs1, vl); } -vbool64_t test_vmslt_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmslt(op1, op2, vl); +vbool64_t test_vmslt_vv_i16mf4_b64(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmslt(vs2, vs1, vl); } -vbool64_t test_vmslt_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmslt(op1, op2, vl); +vbool64_t test_vmslt_vx_i16mf4_b64(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmslt(vs2, rs1, vl); } -vbool32_t test_vmslt_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmslt(op1, op2, vl); +vbool32_t test_vmslt_vv_i16mf2_b32(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmslt(vs2, vs1, vl); } -vbool32_t test_vmslt_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmslt(op1, op2, vl); +vbool32_t test_vmslt_vx_i16mf2_b32(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmslt(vs2, rs1, vl); } -vbool16_t test_vmslt_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmslt(op1, op2, vl); +vbool16_t test_vmslt_vv_i16m1_b16(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmslt(vs2, vs1, vl); } -vbool16_t test_vmslt_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmslt(op1, op2, vl); +vbool16_t test_vmslt_vx_i16m1_b16(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmslt(vs2, rs1, vl); } -vbool8_t test_vmslt_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmslt(op1, op2, vl); +vbool8_t test_vmslt_vv_i16m2_b8(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmslt(vs2, vs1, vl); } -vbool8_t test_vmslt_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmslt(op1, op2, vl); +vbool8_t test_vmslt_vx_i16m2_b8(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmslt(vs2, rs1, vl); } -vbool4_t test_vmslt_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmslt(op1, op2, vl); +vbool4_t test_vmslt_vv_i16m4_b4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmslt(vs2, vs1, vl); } -vbool4_t test_vmslt_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmslt(op1, op2, vl); +vbool4_t test_vmslt_vx_i16m4_b4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmslt(vs2, rs1, vl); } -vbool2_t test_vmslt_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmslt(op1, op2, vl); +vbool2_t test_vmslt_vv_i16m8_b2(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmslt(vs2, vs1, vl); } -vbool2_t test_vmslt_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmslt(op1, op2, vl); +vbool2_t test_vmslt_vx_i16m8_b2(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmslt(vs2, rs1, vl); } -vbool64_t test_vmslt_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmslt(op1, op2, vl); +vbool64_t test_vmslt_vv_i32mf2_b64(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmslt(vs2, vs1, vl); } -vbool64_t test_vmslt_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmslt(op1, op2, vl); +vbool64_t test_vmslt_vx_i32mf2_b64(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmslt(vs2, rs1, vl); } -vbool32_t test_vmslt_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmslt(op1, op2, vl); +vbool32_t test_vmslt_vv_i32m1_b32(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmslt(vs2, vs1, vl); } -vbool32_t test_vmslt_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmslt(op1, op2, vl); +vbool32_t test_vmslt_vx_i32m1_b32(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmslt(vs2, rs1, vl); } -vbool16_t test_vmslt_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmslt(op1, op2, vl); +vbool16_t test_vmslt_vv_i32m2_b16(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmslt(vs2, vs1, vl); } -vbool16_t test_vmslt_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmslt(op1, op2, vl); +vbool16_t test_vmslt_vx_i32m2_b16(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmslt(vs2, rs1, vl); } -vbool8_t test_vmslt_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmslt(op1, op2, vl); +vbool8_t test_vmslt_vv_i32m4_b8(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmslt(vs2, vs1, vl); } -vbool8_t test_vmslt_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmslt(op1, op2, vl); +vbool8_t test_vmslt_vx_i32m4_b8(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmslt(vs2, rs1, vl); } -vbool4_t test_vmslt_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmslt(op1, op2, vl); +vbool4_t test_vmslt_vv_i32m8_b4(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmslt(vs2, vs1, vl); } -vbool4_t test_vmslt_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmslt(op1, op2, vl); +vbool4_t test_vmslt_vx_i32m8_b4(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmslt(vs2, rs1, vl); } -vbool64_t test_vmslt_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmslt(op1, op2, vl); +vbool64_t test_vmslt_vv_i64m1_b64(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmslt(vs2, vs1, vl); } -vbool64_t test_vmslt_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmslt(op1, op2, vl); +vbool64_t test_vmslt_vx_i64m1_b64(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmslt(vs2, rs1, vl); } -vbool32_t test_vmslt_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmslt(op1, op2, vl); +vbool32_t test_vmslt_vv_i64m2_b32(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmslt(vs2, vs1, vl); } -vbool32_t test_vmslt_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmslt(op1, op2, vl); +vbool32_t test_vmslt_vx_i64m2_b32(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmslt(vs2, rs1, vl); } -vbool16_t test_vmslt_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmslt(op1, op2, vl); +vbool16_t test_vmslt_vv_i64m4_b16(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmslt(vs2, vs1, vl); } -vbool16_t test_vmslt_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmslt(op1, op2, vl); +vbool16_t test_vmslt_vx_i64m4_b16(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmslt(vs2, rs1, vl); } -vbool8_t test_vmslt_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmslt(op1, op2, vl); +vbool8_t test_vmslt_vv_i64m8_b8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmslt(vs2, vs1, vl); } -vbool8_t test_vmslt_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmslt(op1, op2, vl); +vbool8_t test_vmslt_vx_i64m8_b8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmslt(vs2, rs1, vl); } -vbool64_t test_vmslt_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmslt(mask, op1, op2, vl); +vbool64_t test_vmslt_vv_i8mf8_b64_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmslt(vm, vs2, vs1, vl); } -vbool64_t test_vmslt_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmslt(mask, op1, op2, vl); +vbool64_t test_vmslt_vx_i8mf8_b64_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmslt(vm, vs2, rs1, vl); } -vbool32_t test_vmslt_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmslt(mask, op1, op2, vl); +vbool32_t test_vmslt_vv_i8mf4_b32_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmslt(vm, vs2, vs1, vl); } -vbool32_t test_vmslt_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmslt(mask, op1, op2, vl); +vbool32_t test_vmslt_vx_i8mf4_b32_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmslt(vm, vs2, rs1, vl); } -vbool16_t test_vmslt_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmslt(mask, op1, op2, vl); +vbool16_t test_vmslt_vv_i8mf2_b16_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmslt(vm, vs2, vs1, vl); } -vbool16_t test_vmslt_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmslt(mask, op1, op2, vl); +vbool16_t test_vmslt_vx_i8mf2_b16_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmslt(vm, vs2, rs1, vl); } -vbool8_t test_vmslt_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmslt(mask, op1, op2, vl); +vbool8_t test_vmslt_vv_i8m1_b8_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmslt(vm, vs2, vs1, vl); } -vbool8_t test_vmslt_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmslt(mask, op1, op2, vl); +vbool8_t test_vmslt_vx_i8m1_b8_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmslt(vm, vs2, rs1, vl); } -vbool4_t test_vmslt_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmslt(mask, op1, op2, vl); +vbool4_t test_vmslt_vv_i8m2_b4_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmslt(vm, vs2, vs1, vl); } -vbool4_t test_vmslt_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmslt(mask, op1, op2, vl); +vbool4_t test_vmslt_vx_i8m2_b4_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmslt(vm, vs2, rs1, vl); } -vbool2_t test_vmslt_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmslt(mask, op1, op2, vl); +vbool2_t test_vmslt_vv_i8m4_b2_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmslt(vm, vs2, vs1, vl); } -vbool2_t test_vmslt_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmslt(mask, op1, op2, vl); +vbool2_t test_vmslt_vx_i8m4_b2_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmslt(vm, vs2, rs1, vl); } -vbool1_t test_vmslt_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmslt(mask, op1, op2, vl); +vbool1_t test_vmslt_vv_i8m8_b1_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmslt(vm, vs2, vs1, vl); } -vbool1_t test_vmslt_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmslt(mask, op1, op2, vl); +vbool1_t test_vmslt_vx_i8m8_b1_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmslt(vm, vs2, rs1, vl); } -vbool64_t test_vmslt_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmslt(mask, op1, op2, vl); +vbool64_t test_vmslt_vv_i16mf4_b64_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmslt(vm, vs2, vs1, vl); } -vbool64_t test_vmslt_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmslt(mask, op1, op2, vl); +vbool64_t test_vmslt_vx_i16mf4_b64_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmslt(vm, vs2, rs1, vl); } -vbool32_t test_vmslt_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmslt(mask, op1, op2, vl); +vbool32_t test_vmslt_vv_i16mf2_b32_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmslt(vm, vs2, vs1, vl); } -vbool32_t test_vmslt_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmslt(mask, op1, op2, vl); +vbool32_t test_vmslt_vx_i16mf2_b32_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmslt(vm, vs2, rs1, vl); } -vbool16_t test_vmslt_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmslt(mask, op1, op2, vl); +vbool16_t test_vmslt_vv_i16m1_b16_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmslt(vm, vs2, vs1, vl); } -vbool16_t test_vmslt_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmslt(mask, op1, op2, vl); +vbool16_t test_vmslt_vx_i16m1_b16_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmslt(vm, vs2, rs1, vl); } -vbool8_t test_vmslt_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmslt(mask, op1, op2, vl); +vbool8_t test_vmslt_vv_i16m2_b8_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmslt(vm, vs2, vs1, vl); } -vbool8_t test_vmslt_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmslt(mask, op1, op2, vl); +vbool8_t test_vmslt_vx_i16m2_b8_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmslt(vm, vs2, rs1, vl); } -vbool4_t test_vmslt_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmslt(mask, op1, op2, vl); +vbool4_t test_vmslt_vv_i16m4_b4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmslt(vm, vs2, vs1, vl); } -vbool4_t test_vmslt_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmslt(mask, op1, op2, vl); +vbool4_t test_vmslt_vx_i16m4_b4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmslt(vm, vs2, rs1, vl); } -vbool2_t test_vmslt_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmslt(mask, op1, op2, vl); +vbool2_t test_vmslt_vv_i16m8_b2_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmslt(vm, vs2, vs1, vl); } -vbool2_t test_vmslt_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmslt(mask, op1, op2, vl); +vbool2_t test_vmslt_vx_i16m8_b2_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmslt(vm, vs2, rs1, vl); } -vbool64_t test_vmslt_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmslt(mask, op1, op2, vl); +vbool64_t test_vmslt_vv_i32mf2_b64_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmslt(vm, vs2, vs1, vl); } -vbool64_t test_vmslt_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmslt(mask, op1, op2, vl); +vbool64_t test_vmslt_vx_i32mf2_b64_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmslt(vm, vs2, rs1, vl); } -vbool32_t test_vmslt_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmslt(mask, op1, op2, vl); +vbool32_t test_vmslt_vv_i32m1_b32_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmslt(vm, vs2, vs1, vl); } -vbool32_t test_vmslt_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmslt(mask, op1, op2, vl); +vbool32_t test_vmslt_vx_i32m1_b32_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmslt(vm, vs2, rs1, vl); } -vbool16_t test_vmslt_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmslt(mask, op1, op2, vl); +vbool16_t test_vmslt_vv_i32m2_b16_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmslt(vm, vs2, vs1, vl); } -vbool16_t test_vmslt_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmslt(mask, op1, op2, vl); +vbool16_t test_vmslt_vx_i32m2_b16_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmslt(vm, vs2, rs1, vl); } -vbool8_t test_vmslt_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmslt(mask, op1, op2, vl); +vbool8_t test_vmslt_vv_i32m4_b8_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmslt(vm, vs2, vs1, vl); } -vbool8_t test_vmslt_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmslt(mask, op1, op2, vl); +vbool8_t test_vmslt_vx_i32m4_b8_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmslt(vm, vs2, rs1, vl); } -vbool4_t test_vmslt_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmslt(mask, op1, op2, vl); +vbool4_t test_vmslt_vv_i32m8_b4_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmslt(vm, vs2, vs1, vl); } -vbool4_t test_vmslt_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmslt(mask, op1, op2, vl); +vbool4_t test_vmslt_vx_i32m8_b4_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmslt(vm, vs2, rs1, vl); } -vbool64_t test_vmslt_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmslt(mask, op1, op2, vl); +vbool64_t test_vmslt_vv_i64m1_b64_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmslt(vm, vs2, vs1, vl); } -vbool64_t test_vmslt_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmslt(mask, op1, op2, vl); +vbool64_t test_vmslt_vx_i64m1_b64_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmslt(vm, vs2, rs1, vl); } -vbool32_t test_vmslt_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmslt(mask, op1, op2, vl); +vbool32_t test_vmslt_vv_i64m2_b32_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmslt(vm, vs2, vs1, vl); } -vbool32_t test_vmslt_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmslt(mask, op1, op2, vl); +vbool32_t test_vmslt_vx_i64m2_b32_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmslt(vm, vs2, rs1, vl); } -vbool16_t test_vmslt_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmslt(mask, op1, op2, vl); +vbool16_t test_vmslt_vv_i64m4_b16_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmslt(vm, vs2, vs1, vl); } -vbool16_t test_vmslt_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmslt(mask, op1, op2, vl); +vbool16_t test_vmslt_vx_i64m4_b16_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmslt(vm, vs2, rs1, vl); } -vbool8_t test_vmslt_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmslt(mask, op1, op2, vl); +vbool8_t test_vmslt_vv_i64m8_b8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmslt(vm, vs2, vs1, vl); } -vbool8_t test_vmslt_vx_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmslt(mask, op1, op2, vl); +vbool8_t test_vmslt_vx_i64m8_b8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmslt(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vms[gl][et]\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vmsltu.c b/auto-generated/gnu-overloaded-tests/vmsltu.c index 78600d1d7..e40e55c6f 100644 --- a/auto-generated/gnu-overloaded-tests/vmsltu.c +++ b/auto-generated/gnu-overloaded-tests/vmsltu.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmsltu_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmsltu(op1, op2, vl); +vbool64_t test_vmsltu_vv_u8mf8_b64(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmsltu(vs2, vs1, vl); } -vbool64_t test_vmsltu_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsltu(op1, op2, vl); +vbool64_t test_vmsltu_vx_u8mf8_b64(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsltu(vs2, rs1, vl); } -vbool32_t test_vmsltu_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmsltu(op1, op2, vl); +vbool32_t test_vmsltu_vv_u8mf4_b32(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmsltu(vs2, vs1, vl); } -vbool32_t test_vmsltu_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsltu(op1, op2, vl); +vbool32_t test_vmsltu_vx_u8mf4_b32(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsltu(vs2, rs1, vl); } -vbool16_t test_vmsltu_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmsltu(op1, op2, vl); +vbool16_t test_vmsltu_vv_u8mf2_b16(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmsltu(vs2, vs1, vl); } -vbool16_t test_vmsltu_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsltu(op1, op2, vl); +vbool16_t test_vmsltu_vx_u8mf2_b16(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsltu(vs2, rs1, vl); } -vbool8_t test_vmsltu_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmsltu(op1, op2, vl); +vbool8_t test_vmsltu_vv_u8m1_b8(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmsltu(vs2, vs1, vl); } -vbool8_t test_vmsltu_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsltu(op1, op2, vl); +vbool8_t test_vmsltu_vx_u8m1_b8(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsltu(vs2, rs1, vl); } -vbool4_t test_vmsltu_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmsltu(op1, op2, vl); +vbool4_t test_vmsltu_vv_u8m2_b4(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmsltu(vs2, vs1, vl); } -vbool4_t test_vmsltu_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsltu(op1, op2, vl); +vbool4_t test_vmsltu_vx_u8m2_b4(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsltu(vs2, rs1, vl); } -vbool2_t test_vmsltu_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmsltu(op1, op2, vl); +vbool2_t test_vmsltu_vv_u8m4_b2(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmsltu(vs2, vs1, vl); } -vbool2_t test_vmsltu_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsltu(op1, op2, vl); +vbool2_t test_vmsltu_vx_u8m4_b2(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsltu(vs2, rs1, vl); } -vbool1_t test_vmsltu_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmsltu(op1, op2, vl); +vbool1_t test_vmsltu_vv_u8m8_b1(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmsltu(vs2, vs1, vl); } -vbool1_t test_vmsltu_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsltu(op1, op2, vl); +vbool1_t test_vmsltu_vx_u8m8_b1(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsltu(vs2, rs1, vl); } -vbool64_t test_vmsltu_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmsltu(op1, op2, vl); +vbool64_t test_vmsltu_vv_u16mf4_b64(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmsltu(vs2, vs1, vl); } -vbool64_t test_vmsltu_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsltu(op1, op2, vl); +vbool64_t test_vmsltu_vx_u16mf4_b64(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsltu(vs2, rs1, vl); } -vbool32_t test_vmsltu_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmsltu(op1, op2, vl); +vbool32_t test_vmsltu_vv_u16mf2_b32(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmsltu(vs2, vs1, vl); } -vbool32_t test_vmsltu_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsltu(op1, op2, vl); +vbool32_t test_vmsltu_vx_u16mf2_b32(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsltu(vs2, rs1, vl); } -vbool16_t test_vmsltu_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmsltu(op1, op2, vl); +vbool16_t test_vmsltu_vv_u16m1_b16(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmsltu(vs2, vs1, vl); } -vbool16_t test_vmsltu_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsltu(op1, op2, vl); +vbool16_t test_vmsltu_vx_u16m1_b16(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsltu(vs2, rs1, vl); } -vbool8_t test_vmsltu_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmsltu(op1, op2, vl); +vbool8_t test_vmsltu_vv_u16m2_b8(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmsltu(vs2, vs1, vl); } -vbool8_t test_vmsltu_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsltu(op1, op2, vl); +vbool8_t test_vmsltu_vx_u16m2_b8(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsltu(vs2, rs1, vl); } -vbool4_t test_vmsltu_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmsltu(op1, op2, vl); +vbool4_t test_vmsltu_vv_u16m4_b4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmsltu(vs2, vs1, vl); } -vbool4_t test_vmsltu_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsltu(op1, op2, vl); +vbool4_t test_vmsltu_vx_u16m4_b4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsltu(vs2, rs1, vl); } -vbool2_t test_vmsltu_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmsltu(op1, op2, vl); +vbool2_t test_vmsltu_vv_u16m8_b2(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmsltu(vs2, vs1, vl); } -vbool2_t test_vmsltu_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsltu(op1, op2, vl); +vbool2_t test_vmsltu_vx_u16m8_b2(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsltu(vs2, rs1, vl); } -vbool64_t test_vmsltu_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmsltu(op1, op2, vl); +vbool64_t test_vmsltu_vv_u32mf2_b64(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmsltu(vs2, vs1, vl); } -vbool64_t test_vmsltu_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsltu(op1, op2, vl); +vbool64_t test_vmsltu_vx_u32mf2_b64(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsltu(vs2, rs1, vl); } -vbool32_t test_vmsltu_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmsltu(op1, op2, vl); +vbool32_t test_vmsltu_vv_u32m1_b32(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmsltu(vs2, vs1, vl); } -vbool32_t test_vmsltu_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsltu(op1, op2, vl); +vbool32_t test_vmsltu_vx_u32m1_b32(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsltu(vs2, rs1, vl); } -vbool16_t test_vmsltu_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmsltu(op1, op2, vl); +vbool16_t test_vmsltu_vv_u32m2_b16(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmsltu(vs2, vs1, vl); } -vbool16_t test_vmsltu_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsltu(op1, op2, vl); +vbool16_t test_vmsltu_vx_u32m2_b16(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsltu(vs2, rs1, vl); } -vbool8_t test_vmsltu_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmsltu(op1, op2, vl); +vbool8_t test_vmsltu_vv_u32m4_b8(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmsltu(vs2, vs1, vl); } -vbool8_t test_vmsltu_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsltu(op1, op2, vl); +vbool8_t test_vmsltu_vx_u32m4_b8(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsltu(vs2, rs1, vl); } -vbool4_t test_vmsltu_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmsltu(op1, op2, vl); +vbool4_t test_vmsltu_vv_u32m8_b4(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmsltu(vs2, vs1, vl); } -vbool4_t test_vmsltu_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsltu(op1, op2, vl); +vbool4_t test_vmsltu_vx_u32m8_b4(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsltu(vs2, rs1, vl); } -vbool64_t test_vmsltu_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmsltu(op1, op2, vl); +vbool64_t test_vmsltu_vv_u64m1_b64(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmsltu(vs2, vs1, vl); } -vbool64_t test_vmsltu_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsltu(op1, op2, vl); +vbool64_t test_vmsltu_vx_u64m1_b64(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsltu(vs2, rs1, vl); } -vbool32_t test_vmsltu_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmsltu(op1, op2, vl); +vbool32_t test_vmsltu_vv_u64m2_b32(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmsltu(vs2, vs1, vl); } -vbool32_t test_vmsltu_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsltu(op1, op2, vl); +vbool32_t test_vmsltu_vx_u64m2_b32(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsltu(vs2, rs1, vl); } -vbool16_t test_vmsltu_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmsltu(op1, op2, vl); +vbool16_t test_vmsltu_vv_u64m4_b16(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmsltu(vs2, vs1, vl); } -vbool16_t test_vmsltu_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsltu(op1, op2, vl); +vbool16_t test_vmsltu_vx_u64m4_b16(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsltu(vs2, rs1, vl); } -vbool8_t test_vmsltu_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmsltu(op1, op2, vl); +vbool8_t test_vmsltu_vv_u64m8_b8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmsltu(vs2, vs1, vl); } -vbool8_t test_vmsltu_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsltu(op1, op2, vl); +vbool8_t test_vmsltu_vx_u64m8_b8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsltu(vs2, rs1, vl); } -vbool64_t test_vmsltu_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmsltu(mask, op1, op2, vl); +vbool64_t test_vmsltu_vv_u8mf8_b64_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmsltu(vm, vs2, vs1, vl); } -vbool64_t test_vmsltu_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsltu(mask, op1, op2, vl); +vbool64_t test_vmsltu_vx_u8mf8_b64_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsltu(vm, vs2, rs1, vl); } -vbool32_t test_vmsltu_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmsltu(mask, op1, op2, vl); +vbool32_t test_vmsltu_vv_u8mf4_b32_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmsltu(vm, vs2, vs1, vl); } -vbool32_t test_vmsltu_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsltu(mask, op1, op2, vl); +vbool32_t test_vmsltu_vx_u8mf4_b32_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsltu(vm, vs2, rs1, vl); } -vbool16_t test_vmsltu_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmsltu(mask, op1, op2, vl); +vbool16_t test_vmsltu_vv_u8mf2_b16_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmsltu(vm, vs2, vs1, vl); } -vbool16_t test_vmsltu_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsltu(mask, op1, op2, vl); +vbool16_t test_vmsltu_vx_u8mf2_b16_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsltu(vm, vs2, rs1, vl); } -vbool8_t test_vmsltu_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmsltu(mask, op1, op2, vl); +vbool8_t test_vmsltu_vv_u8m1_b8_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmsltu(vm, vs2, vs1, vl); } -vbool8_t test_vmsltu_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsltu(mask, op1, op2, vl); +vbool8_t test_vmsltu_vx_u8m1_b8_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsltu(vm, vs2, rs1, vl); } -vbool4_t test_vmsltu_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmsltu(mask, op1, op2, vl); +vbool4_t test_vmsltu_vv_u8m2_b4_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmsltu(vm, vs2, vs1, vl); } -vbool4_t test_vmsltu_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsltu(mask, op1, op2, vl); +vbool4_t test_vmsltu_vx_u8m2_b4_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsltu(vm, vs2, rs1, vl); } -vbool2_t test_vmsltu_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmsltu(mask, op1, op2, vl); +vbool2_t test_vmsltu_vv_u8m4_b2_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmsltu(vm, vs2, vs1, vl); } -vbool2_t test_vmsltu_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsltu(mask, op1, op2, vl); +vbool2_t test_vmsltu_vx_u8m4_b2_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsltu(vm, vs2, rs1, vl); } -vbool1_t test_vmsltu_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmsltu(mask, op1, op2, vl); +vbool1_t test_vmsltu_vv_u8m8_b1_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmsltu(vm, vs2, vs1, vl); } -vbool1_t test_vmsltu_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsltu(mask, op1, op2, vl); +vbool1_t test_vmsltu_vx_u8m8_b1_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsltu(vm, vs2, rs1, vl); } -vbool64_t test_vmsltu_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmsltu(mask, op1, op2, vl); +vbool64_t test_vmsltu_vv_u16mf4_b64_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmsltu(vm, vs2, vs1, vl); } -vbool64_t test_vmsltu_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsltu(mask, op1, op2, vl); +vbool64_t test_vmsltu_vx_u16mf4_b64_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsltu(vm, vs2, rs1, vl); } -vbool32_t test_vmsltu_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmsltu(mask, op1, op2, vl); +vbool32_t test_vmsltu_vv_u16mf2_b32_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmsltu(vm, vs2, vs1, vl); } -vbool32_t test_vmsltu_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsltu(mask, op1, op2, vl); +vbool32_t test_vmsltu_vx_u16mf2_b32_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsltu(vm, vs2, rs1, vl); } -vbool16_t test_vmsltu_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmsltu(mask, op1, op2, vl); +vbool16_t test_vmsltu_vv_u16m1_b16_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmsltu(vm, vs2, vs1, vl); } -vbool16_t test_vmsltu_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsltu(mask, op1, op2, vl); +vbool16_t test_vmsltu_vx_u16m1_b16_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsltu(vm, vs2, rs1, vl); } -vbool8_t test_vmsltu_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmsltu(mask, op1, op2, vl); +vbool8_t test_vmsltu_vv_u16m2_b8_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmsltu(vm, vs2, vs1, vl); } -vbool8_t test_vmsltu_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsltu(mask, op1, op2, vl); +vbool8_t test_vmsltu_vx_u16m2_b8_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsltu(vm, vs2, rs1, vl); } -vbool4_t test_vmsltu_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmsltu(mask, op1, op2, vl); +vbool4_t test_vmsltu_vv_u16m4_b4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmsltu(vm, vs2, vs1, vl); } -vbool4_t test_vmsltu_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsltu(mask, op1, op2, vl); +vbool4_t test_vmsltu_vx_u16m4_b4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsltu(vm, vs2, rs1, vl); } -vbool2_t test_vmsltu_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmsltu(mask, op1, op2, vl); +vbool2_t test_vmsltu_vv_u16m8_b2_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmsltu(vm, vs2, vs1, vl); } -vbool2_t test_vmsltu_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsltu(mask, op1, op2, vl); +vbool2_t test_vmsltu_vx_u16m8_b2_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsltu(vm, vs2, rs1, vl); } -vbool64_t test_vmsltu_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmsltu(mask, op1, op2, vl); +vbool64_t test_vmsltu_vv_u32mf2_b64_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmsltu(vm, vs2, vs1, vl); } -vbool64_t test_vmsltu_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsltu(mask, op1, op2, vl); +vbool64_t test_vmsltu_vx_u32mf2_b64_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsltu(vm, vs2, rs1, vl); } -vbool32_t test_vmsltu_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmsltu(mask, op1, op2, vl); +vbool32_t test_vmsltu_vv_u32m1_b32_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmsltu(vm, vs2, vs1, vl); } -vbool32_t test_vmsltu_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsltu(mask, op1, op2, vl); +vbool32_t test_vmsltu_vx_u32m1_b32_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsltu(vm, vs2, rs1, vl); } -vbool16_t test_vmsltu_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmsltu(mask, op1, op2, vl); +vbool16_t test_vmsltu_vv_u32m2_b16_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmsltu(vm, vs2, vs1, vl); } -vbool16_t test_vmsltu_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsltu(mask, op1, op2, vl); +vbool16_t test_vmsltu_vx_u32m2_b16_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsltu(vm, vs2, rs1, vl); } -vbool8_t test_vmsltu_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmsltu(mask, op1, op2, vl); +vbool8_t test_vmsltu_vv_u32m4_b8_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmsltu(vm, vs2, vs1, vl); } -vbool8_t test_vmsltu_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsltu(mask, op1, op2, vl); +vbool8_t test_vmsltu_vx_u32m4_b8_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsltu(vm, vs2, rs1, vl); } -vbool4_t test_vmsltu_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmsltu(mask, op1, op2, vl); +vbool4_t test_vmsltu_vv_u32m8_b4_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmsltu(vm, vs2, vs1, vl); } -vbool4_t test_vmsltu_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsltu(mask, op1, op2, vl); +vbool4_t test_vmsltu_vx_u32m8_b4_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsltu(vm, vs2, rs1, vl); } -vbool64_t test_vmsltu_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmsltu(mask, op1, op2, vl); +vbool64_t test_vmsltu_vv_u64m1_b64_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmsltu(vm, vs2, vs1, vl); } -vbool64_t test_vmsltu_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsltu(mask, op1, op2, vl); +vbool64_t test_vmsltu_vx_u64m1_b64_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsltu(vm, vs2, rs1, vl); } -vbool32_t test_vmsltu_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmsltu(mask, op1, op2, vl); +vbool32_t test_vmsltu_vv_u64m2_b32_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmsltu(vm, vs2, vs1, vl); } -vbool32_t test_vmsltu_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsltu(mask, op1, op2, vl); +vbool32_t test_vmsltu_vx_u64m2_b32_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsltu(vm, vs2, rs1, vl); } -vbool16_t test_vmsltu_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmsltu(mask, op1, op2, vl); +vbool16_t test_vmsltu_vv_u64m4_b16_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmsltu(vm, vs2, vs1, vl); } -vbool16_t test_vmsltu_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsltu(mask, op1, op2, vl); +vbool16_t test_vmsltu_vx_u64m4_b16_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsltu(vm, vs2, rs1, vl); } -vbool8_t test_vmsltu_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmsltu(mask, op1, op2, vl); +vbool8_t test_vmsltu_vv_u64m8_b8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmsltu(vm, vs2, vs1, vl); } -vbool8_t test_vmsltu_vx_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsltu(mask, op1, op2, vl); +vbool8_t test_vmsltu_vx_u64m8_b8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsltu(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vms[gl][et]u\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vmsne.c b/auto-generated/gnu-overloaded-tests/vmsne.c index ea369c93c..3237b540e 100644 --- a/auto-generated/gnu-overloaded-tests/vmsne.c +++ b/auto-generated/gnu-overloaded-tests/vmsne.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmsne_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool64_t test_vmsne_vv_i8mf8_b64(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmsne(vs2, vs1, vl); } -vbool64_t test_vmsne_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool64_t test_vmsne_vx_i8mf8_b64(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsne(vs2, rs1, vl); } -vbool32_t test_vmsne_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool32_t test_vmsne_vv_i8mf4_b32(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmsne(vs2, vs1, vl); } -vbool32_t test_vmsne_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool32_t test_vmsne_vx_i8mf4_b32(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsne(vs2, rs1, vl); } -vbool16_t test_vmsne_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool16_t test_vmsne_vv_i8mf2_b16(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmsne(vs2, vs1, vl); } -vbool16_t test_vmsne_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool16_t test_vmsne_vx_i8mf2_b16(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsne(vs2, rs1, vl); } -vbool8_t test_vmsne_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool8_t test_vmsne_vv_i8m1_b8(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmsne(vs2, vs1, vl); } -vbool8_t test_vmsne_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool8_t test_vmsne_vx_i8m1_b8(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsne(vs2, rs1, vl); } -vbool4_t test_vmsne_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool4_t test_vmsne_vv_i8m2_b4(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmsne(vs2, vs1, vl); } -vbool4_t test_vmsne_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool4_t test_vmsne_vx_i8m2_b4(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsne(vs2, rs1, vl); } -vbool2_t test_vmsne_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool2_t test_vmsne_vv_i8m4_b2(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmsne(vs2, vs1, vl); } -vbool2_t test_vmsne_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool2_t test_vmsne_vx_i8m4_b2(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsne(vs2, rs1, vl); } -vbool1_t test_vmsne_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool1_t test_vmsne_vv_i8m8_b1(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmsne(vs2, vs1, vl); } -vbool1_t test_vmsne_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool1_t test_vmsne_vx_i8m8_b1(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsne(vs2, rs1, vl); } -vbool64_t test_vmsne_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool64_t test_vmsne_vv_i16mf4_b64(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmsne(vs2, vs1, vl); } -vbool64_t test_vmsne_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool64_t test_vmsne_vx_i16mf4_b64(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsne(vs2, rs1, vl); } -vbool32_t test_vmsne_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool32_t test_vmsne_vv_i16mf2_b32(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmsne(vs2, vs1, vl); } -vbool32_t test_vmsne_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool32_t test_vmsne_vx_i16mf2_b32(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsne(vs2, rs1, vl); } -vbool16_t test_vmsne_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool16_t test_vmsne_vv_i16m1_b16(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmsne(vs2, vs1, vl); } -vbool16_t test_vmsne_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool16_t test_vmsne_vx_i16m1_b16(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsne(vs2, rs1, vl); } -vbool8_t test_vmsne_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool8_t test_vmsne_vv_i16m2_b8(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmsne(vs2, vs1, vl); } -vbool8_t test_vmsne_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool8_t test_vmsne_vx_i16m2_b8(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsne(vs2, rs1, vl); } -vbool4_t test_vmsne_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool4_t test_vmsne_vv_i16m4_b4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmsne(vs2, vs1, vl); } -vbool4_t test_vmsne_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool4_t test_vmsne_vx_i16m4_b4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsne(vs2, rs1, vl); } -vbool2_t test_vmsne_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool2_t test_vmsne_vv_i16m8_b2(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmsne(vs2, vs1, vl); } -vbool2_t test_vmsne_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool2_t test_vmsne_vx_i16m8_b2(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsne(vs2, rs1, vl); } -vbool64_t test_vmsne_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool64_t test_vmsne_vv_i32mf2_b64(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmsne(vs2, vs1, vl); } -vbool64_t test_vmsne_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool64_t test_vmsne_vx_i32mf2_b64(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsne(vs2, rs1, vl); } -vbool32_t test_vmsne_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool32_t test_vmsne_vv_i32m1_b32(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmsne(vs2, vs1, vl); } -vbool32_t test_vmsne_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool32_t test_vmsne_vx_i32m1_b32(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsne(vs2, rs1, vl); } -vbool16_t test_vmsne_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool16_t test_vmsne_vv_i32m2_b16(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmsne(vs2, vs1, vl); } -vbool16_t test_vmsne_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool16_t test_vmsne_vx_i32m2_b16(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsne(vs2, rs1, vl); } -vbool8_t test_vmsne_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool8_t test_vmsne_vv_i32m4_b8(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmsne(vs2, vs1, vl); } -vbool8_t test_vmsne_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool8_t test_vmsne_vx_i32m4_b8(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsne(vs2, rs1, vl); } -vbool4_t test_vmsne_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool4_t test_vmsne_vv_i32m8_b4(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmsne(vs2, vs1, vl); } -vbool4_t test_vmsne_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool4_t test_vmsne_vx_i32m8_b4(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsne(vs2, rs1, vl); } -vbool64_t test_vmsne_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool64_t test_vmsne_vv_i64m1_b64(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmsne(vs2, vs1, vl); } -vbool64_t test_vmsne_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool64_t test_vmsne_vx_i64m1_b64(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsne(vs2, rs1, vl); } -vbool32_t test_vmsne_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool32_t test_vmsne_vv_i64m2_b32(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmsne(vs2, vs1, vl); } -vbool32_t test_vmsne_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool32_t test_vmsne_vx_i64m2_b32(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsne(vs2, rs1, vl); } -vbool16_t test_vmsne_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool16_t test_vmsne_vv_i64m4_b16(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmsne(vs2, vs1, vl); } -vbool16_t test_vmsne_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool16_t test_vmsne_vx_i64m4_b16(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsne(vs2, rs1, vl); } -vbool8_t test_vmsne_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool8_t test_vmsne_vv_i64m8_b8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmsne(vs2, vs1, vl); } -vbool8_t test_vmsne_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool8_t test_vmsne_vx_i64m8_b8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsne(vs2, rs1, vl); } -vbool64_t test_vmsne_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool64_t test_vmsne_vv_u8mf8_b64(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmsne(vs2, vs1, vl); } -vbool64_t test_vmsne_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool64_t test_vmsne_vx_u8mf8_b64(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsne(vs2, rs1, vl); } -vbool32_t test_vmsne_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool32_t test_vmsne_vv_u8mf4_b32(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmsne(vs2, vs1, vl); } -vbool32_t test_vmsne_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool32_t test_vmsne_vx_u8mf4_b32(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsne(vs2, rs1, vl); } -vbool16_t test_vmsne_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool16_t test_vmsne_vv_u8mf2_b16(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmsne(vs2, vs1, vl); } -vbool16_t test_vmsne_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool16_t test_vmsne_vx_u8mf2_b16(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsne(vs2, rs1, vl); } -vbool8_t test_vmsne_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool8_t test_vmsne_vv_u8m1_b8(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmsne(vs2, vs1, vl); } -vbool8_t test_vmsne_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool8_t test_vmsne_vx_u8m1_b8(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsne(vs2, rs1, vl); } -vbool4_t test_vmsne_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool4_t test_vmsne_vv_u8m2_b4(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmsne(vs2, vs1, vl); } -vbool4_t test_vmsne_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool4_t test_vmsne_vx_u8m2_b4(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsne(vs2, rs1, vl); } -vbool2_t test_vmsne_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool2_t test_vmsne_vv_u8m4_b2(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmsne(vs2, vs1, vl); } -vbool2_t test_vmsne_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool2_t test_vmsne_vx_u8m4_b2(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsne(vs2, rs1, vl); } -vbool1_t test_vmsne_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool1_t test_vmsne_vv_u8m8_b1(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmsne(vs2, vs1, vl); } -vbool1_t test_vmsne_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool1_t test_vmsne_vx_u8m8_b1(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsne(vs2, rs1, vl); } -vbool64_t test_vmsne_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool64_t test_vmsne_vv_u16mf4_b64(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmsne(vs2, vs1, vl); } -vbool64_t test_vmsne_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool64_t test_vmsne_vx_u16mf4_b64(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsne(vs2, rs1, vl); } -vbool32_t test_vmsne_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool32_t test_vmsne_vv_u16mf2_b32(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmsne(vs2, vs1, vl); } -vbool32_t test_vmsne_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool32_t test_vmsne_vx_u16mf2_b32(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsne(vs2, rs1, vl); } -vbool16_t test_vmsne_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool16_t test_vmsne_vv_u16m1_b16(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmsne(vs2, vs1, vl); } -vbool16_t test_vmsne_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool16_t test_vmsne_vx_u16m1_b16(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsne(vs2, rs1, vl); } -vbool8_t test_vmsne_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool8_t test_vmsne_vv_u16m2_b8(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmsne(vs2, vs1, vl); } -vbool8_t test_vmsne_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool8_t test_vmsne_vx_u16m2_b8(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsne(vs2, rs1, vl); } -vbool4_t test_vmsne_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool4_t test_vmsne_vv_u16m4_b4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmsne(vs2, vs1, vl); } -vbool4_t test_vmsne_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool4_t test_vmsne_vx_u16m4_b4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsne(vs2, rs1, vl); } -vbool2_t test_vmsne_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool2_t test_vmsne_vv_u16m8_b2(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmsne(vs2, vs1, vl); } -vbool2_t test_vmsne_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool2_t test_vmsne_vx_u16m8_b2(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsne(vs2, rs1, vl); } -vbool64_t test_vmsne_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool64_t test_vmsne_vv_u32mf2_b64(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmsne(vs2, vs1, vl); } -vbool64_t test_vmsne_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool64_t test_vmsne_vx_u32mf2_b64(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsne(vs2, rs1, vl); } -vbool32_t test_vmsne_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool32_t test_vmsne_vv_u32m1_b32(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmsne(vs2, vs1, vl); } -vbool32_t test_vmsne_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool32_t test_vmsne_vx_u32m1_b32(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsne(vs2, rs1, vl); } -vbool16_t test_vmsne_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool16_t test_vmsne_vv_u32m2_b16(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmsne(vs2, vs1, vl); } -vbool16_t test_vmsne_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool16_t test_vmsne_vx_u32m2_b16(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsne(vs2, rs1, vl); } -vbool8_t test_vmsne_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool8_t test_vmsne_vv_u32m4_b8(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmsne(vs2, vs1, vl); } -vbool8_t test_vmsne_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool8_t test_vmsne_vx_u32m4_b8(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsne(vs2, rs1, vl); } -vbool4_t test_vmsne_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool4_t test_vmsne_vv_u32m8_b4(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmsne(vs2, vs1, vl); } -vbool4_t test_vmsne_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool4_t test_vmsne_vx_u32m8_b4(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsne(vs2, rs1, vl); } -vbool64_t test_vmsne_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool64_t test_vmsne_vv_u64m1_b64(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmsne(vs2, vs1, vl); } -vbool64_t test_vmsne_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool64_t test_vmsne_vx_u64m1_b64(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsne(vs2, rs1, vl); } -vbool32_t test_vmsne_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool32_t test_vmsne_vv_u64m2_b32(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmsne(vs2, vs1, vl); } -vbool32_t test_vmsne_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool32_t test_vmsne_vx_u64m2_b32(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsne(vs2, rs1, vl); } -vbool16_t test_vmsne_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool16_t test_vmsne_vv_u64m4_b16(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmsne(vs2, vs1, vl); } -vbool16_t test_vmsne_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool16_t test_vmsne_vx_u64m4_b16(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsne(vs2, rs1, vl); } -vbool8_t test_vmsne_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool8_t test_vmsne_vv_u64m8_b8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmsne(vs2, vs1, vl); } -vbool8_t test_vmsne_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsne(op1, op2, vl); +vbool8_t test_vmsne_vx_u64m8_b8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsne(vs2, rs1, vl); } -vbool64_t test_vmsne_vv_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool64_t test_vmsne_vv_i8mf8_b64_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmsne(vm, vs2, vs1, vl); } -vbool64_t test_vmsne_vx_i8mf8_b64_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool64_t test_vmsne_vx_i8mf8_b64_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsne(vm, vs2, rs1, vl); } -vbool32_t test_vmsne_vv_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool32_t test_vmsne_vv_i8mf4_b32_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmsne(vm, vs2, vs1, vl); } -vbool32_t test_vmsne_vx_i8mf4_b32_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool32_t test_vmsne_vx_i8mf4_b32_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsne(vm, vs2, rs1, vl); } -vbool16_t test_vmsne_vv_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool16_t test_vmsne_vv_i8mf2_b16_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmsne(vm, vs2, vs1, vl); } -vbool16_t test_vmsne_vx_i8mf2_b16_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool16_t test_vmsne_vx_i8mf2_b16_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsne(vm, vs2, rs1, vl); } -vbool8_t test_vmsne_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool8_t test_vmsne_vv_i8m1_b8_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmsne(vm, vs2, vs1, vl); } -vbool8_t test_vmsne_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool8_t test_vmsne_vx_i8m1_b8_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsne(vm, vs2, rs1, vl); } -vbool4_t test_vmsne_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool4_t test_vmsne_vv_i8m2_b4_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmsne(vm, vs2, vs1, vl); } -vbool4_t test_vmsne_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool4_t test_vmsne_vx_i8m2_b4_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsne(vm, vs2, rs1, vl); } -vbool2_t test_vmsne_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool2_t test_vmsne_vv_i8m4_b2_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmsne(vm, vs2, vs1, vl); } -vbool2_t test_vmsne_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool2_t test_vmsne_vx_i8m4_b2_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsne(vm, vs2, rs1, vl); } -vbool1_t test_vmsne_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool1_t test_vmsne_vv_i8m8_b1_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmsne(vm, vs2, vs1, vl); } -vbool1_t test_vmsne_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool1_t test_vmsne_vx_i8m8_b1_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsne(vm, vs2, rs1, vl); } -vbool64_t test_vmsne_vv_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool64_t test_vmsne_vv_i16mf4_b64_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmsne(vm, vs2, vs1, vl); } -vbool64_t test_vmsne_vx_i16mf4_b64_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool64_t test_vmsne_vx_i16mf4_b64_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsne(vm, vs2, rs1, vl); } -vbool32_t test_vmsne_vv_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool32_t test_vmsne_vv_i16mf2_b32_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmsne(vm, vs2, vs1, vl); } -vbool32_t test_vmsne_vx_i16mf2_b32_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool32_t test_vmsne_vx_i16mf2_b32_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsne(vm, vs2, rs1, vl); } -vbool16_t test_vmsne_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool16_t test_vmsne_vv_i16m1_b16_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmsne(vm, vs2, vs1, vl); } -vbool16_t test_vmsne_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool16_t test_vmsne_vx_i16m1_b16_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsne(vm, vs2, rs1, vl); } -vbool8_t test_vmsne_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool8_t test_vmsne_vv_i16m2_b8_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmsne(vm, vs2, vs1, vl); } -vbool8_t test_vmsne_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool8_t test_vmsne_vx_i16m2_b8_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsne(vm, vs2, rs1, vl); } -vbool4_t test_vmsne_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool4_t test_vmsne_vv_i16m4_b4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmsne(vm, vs2, vs1, vl); } -vbool4_t test_vmsne_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool4_t test_vmsne_vx_i16m4_b4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsne(vm, vs2, rs1, vl); } -vbool2_t test_vmsne_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool2_t test_vmsne_vv_i16m8_b2_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmsne(vm, vs2, vs1, vl); } -vbool2_t test_vmsne_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool2_t test_vmsne_vx_i16m8_b2_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsne(vm, vs2, rs1, vl); } -vbool64_t test_vmsne_vv_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool64_t test_vmsne_vv_i32mf2_b64_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmsne(vm, vs2, vs1, vl); } -vbool64_t test_vmsne_vx_i32mf2_b64_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool64_t test_vmsne_vx_i32mf2_b64_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsne(vm, vs2, rs1, vl); } -vbool32_t test_vmsne_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool32_t test_vmsne_vv_i32m1_b32_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmsne(vm, vs2, vs1, vl); } -vbool32_t test_vmsne_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool32_t test_vmsne_vx_i32m1_b32_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsne(vm, vs2, rs1, vl); } -vbool16_t test_vmsne_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool16_t test_vmsne_vv_i32m2_b16_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmsne(vm, vs2, vs1, vl); } -vbool16_t test_vmsne_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool16_t test_vmsne_vx_i32m2_b16_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsne(vm, vs2, rs1, vl); } -vbool8_t test_vmsne_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool8_t test_vmsne_vv_i32m4_b8_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmsne(vm, vs2, vs1, vl); } -vbool8_t test_vmsne_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool8_t test_vmsne_vx_i32m4_b8_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsne(vm, vs2, rs1, vl); } -vbool4_t test_vmsne_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool4_t test_vmsne_vv_i32m8_b4_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmsne(vm, vs2, vs1, vl); } -vbool4_t test_vmsne_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool4_t test_vmsne_vx_i32m8_b4_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsne(vm, vs2, rs1, vl); } -vbool64_t test_vmsne_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool64_t test_vmsne_vv_i64m1_b64_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmsne(vm, vs2, vs1, vl); } -vbool64_t test_vmsne_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool64_t test_vmsne_vx_i64m1_b64_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsne(vm, vs2, rs1, vl); } -vbool32_t test_vmsne_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool32_t test_vmsne_vv_i64m2_b32_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmsne(vm, vs2, vs1, vl); } -vbool32_t test_vmsne_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool32_t test_vmsne_vx_i64m2_b32_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsne(vm, vs2, rs1, vl); } -vbool16_t test_vmsne_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool16_t test_vmsne_vv_i64m4_b16_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmsne(vm, vs2, vs1, vl); } -vbool16_t test_vmsne_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool16_t test_vmsne_vx_i64m4_b16_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsne(vm, vs2, rs1, vl); } -vbool8_t test_vmsne_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool8_t test_vmsne_vv_i64m8_b8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmsne(vm, vs2, vs1, vl); } -vbool8_t test_vmsne_vx_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool8_t test_vmsne_vx_i64m8_b8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsne(vm, vs2, rs1, vl); } -vbool64_t test_vmsne_vv_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool64_t test_vmsne_vv_u8mf8_b64_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmsne(vm, vs2, vs1, vl); } -vbool64_t test_vmsne_vx_u8mf8_b64_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool64_t test_vmsne_vx_u8mf8_b64_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsne(vm, vs2, rs1, vl); } -vbool32_t test_vmsne_vv_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool32_t test_vmsne_vv_u8mf4_b32_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmsne(vm, vs2, vs1, vl); } -vbool32_t test_vmsne_vx_u8mf4_b32_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool32_t test_vmsne_vx_u8mf4_b32_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsne(vm, vs2, rs1, vl); } -vbool16_t test_vmsne_vv_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool16_t test_vmsne_vv_u8mf2_b16_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmsne(vm, vs2, vs1, vl); } -vbool16_t test_vmsne_vx_u8mf2_b16_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool16_t test_vmsne_vx_u8mf2_b16_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsne(vm, vs2, rs1, vl); } -vbool8_t test_vmsne_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool8_t test_vmsne_vv_u8m1_b8_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmsne(vm, vs2, vs1, vl); } -vbool8_t test_vmsne_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool8_t test_vmsne_vx_u8m1_b8_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsne(vm, vs2, rs1, vl); } -vbool4_t test_vmsne_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool4_t test_vmsne_vv_u8m2_b4_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmsne(vm, vs2, vs1, vl); } -vbool4_t test_vmsne_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool4_t test_vmsne_vx_u8m2_b4_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsne(vm, vs2, rs1, vl); } -vbool2_t test_vmsne_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool2_t test_vmsne_vv_u8m4_b2_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmsne(vm, vs2, vs1, vl); } -vbool2_t test_vmsne_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool2_t test_vmsne_vx_u8m4_b2_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsne(vm, vs2, rs1, vl); } -vbool1_t test_vmsne_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool1_t test_vmsne_vv_u8m8_b1_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmsne(vm, vs2, vs1, vl); } -vbool1_t test_vmsne_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool1_t test_vmsne_vx_u8m8_b1_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsne(vm, vs2, rs1, vl); } -vbool64_t test_vmsne_vv_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool64_t test_vmsne_vv_u16mf4_b64_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmsne(vm, vs2, vs1, vl); } -vbool64_t test_vmsne_vx_u16mf4_b64_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool64_t test_vmsne_vx_u16mf4_b64_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsne(vm, vs2, rs1, vl); } -vbool32_t test_vmsne_vv_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool32_t test_vmsne_vv_u16mf2_b32_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmsne(vm, vs2, vs1, vl); } -vbool32_t test_vmsne_vx_u16mf2_b32_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool32_t test_vmsne_vx_u16mf2_b32_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsne(vm, vs2, rs1, vl); } -vbool16_t test_vmsne_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool16_t test_vmsne_vv_u16m1_b16_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmsne(vm, vs2, vs1, vl); } -vbool16_t test_vmsne_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool16_t test_vmsne_vx_u16m1_b16_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsne(vm, vs2, rs1, vl); } -vbool8_t test_vmsne_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool8_t test_vmsne_vv_u16m2_b8_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmsne(vm, vs2, vs1, vl); } -vbool8_t test_vmsne_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool8_t test_vmsne_vx_u16m2_b8_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsne(vm, vs2, rs1, vl); } -vbool4_t test_vmsne_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool4_t test_vmsne_vv_u16m4_b4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmsne(vm, vs2, vs1, vl); } -vbool4_t test_vmsne_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool4_t test_vmsne_vx_u16m4_b4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsne(vm, vs2, rs1, vl); } -vbool2_t test_vmsne_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool2_t test_vmsne_vv_u16m8_b2_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmsne(vm, vs2, vs1, vl); } -vbool2_t test_vmsne_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool2_t test_vmsne_vx_u16m8_b2_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsne(vm, vs2, rs1, vl); } -vbool64_t test_vmsne_vv_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool64_t test_vmsne_vv_u32mf2_b64_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmsne(vm, vs2, vs1, vl); } -vbool64_t test_vmsne_vx_u32mf2_b64_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool64_t test_vmsne_vx_u32mf2_b64_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsne(vm, vs2, rs1, vl); } -vbool32_t test_vmsne_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool32_t test_vmsne_vv_u32m1_b32_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmsne(vm, vs2, vs1, vl); } -vbool32_t test_vmsne_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool32_t test_vmsne_vx_u32m1_b32_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsne(vm, vs2, rs1, vl); } -vbool16_t test_vmsne_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool16_t test_vmsne_vv_u32m2_b16_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmsne(vm, vs2, vs1, vl); } -vbool16_t test_vmsne_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool16_t test_vmsne_vx_u32m2_b16_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsne(vm, vs2, rs1, vl); } -vbool8_t test_vmsne_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool8_t test_vmsne_vv_u32m4_b8_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmsne(vm, vs2, vs1, vl); } -vbool8_t test_vmsne_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool8_t test_vmsne_vx_u32m4_b8_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsne(vm, vs2, rs1, vl); } -vbool4_t test_vmsne_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool4_t test_vmsne_vv_u32m8_b4_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmsne(vm, vs2, vs1, vl); } -vbool4_t test_vmsne_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool4_t test_vmsne_vx_u32m8_b4_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsne(vm, vs2, rs1, vl); } -vbool64_t test_vmsne_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool64_t test_vmsne_vv_u64m1_b64_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmsne(vm, vs2, vs1, vl); } -vbool64_t test_vmsne_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool64_t test_vmsne_vx_u64m1_b64_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsne(vm, vs2, rs1, vl); } -vbool32_t test_vmsne_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool32_t test_vmsne_vv_u64m2_b32_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmsne(vm, vs2, vs1, vl); } -vbool32_t test_vmsne_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool32_t test_vmsne_vx_u64m2_b32_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsne(vm, vs2, rs1, vl); } -vbool16_t test_vmsne_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool16_t test_vmsne_vv_u64m4_b16_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmsne(vm, vs2, vs1, vl); } -vbool16_t test_vmsne_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool16_t test_vmsne_vx_u64m4_b16_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsne(vm, vs2, rs1, vl); } -vbool8_t test_vmsne_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool8_t test_vmsne_vv_u64m8_b8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmsne(vm, vs2, vs1, vl); } -vbool8_t test_vmsne_vx_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsne(mask, op1, op2, vl); +vbool8_t test_vmsne_vx_u64m8_b8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsne(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vmsne\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vmsof.c b/auto-generated/gnu-overloaded-tests/vmsof.c index baa1b5339..ea1f0fd04 100644 --- a/auto-generated/gnu-overloaded-tests/vmsof.c +++ b/auto-generated/gnu-overloaded-tests/vmsof.c @@ -6,60 +6,60 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool1_t test_vmsof_m_b1(vbool1_t op1, size_t vl) { - return __riscv_vmsof(op1, vl); +vbool1_t test_vmsof_m_b1(vbool1_t vs2, size_t vl) { + return __riscv_vmsof(vs2, vl); } -vbool2_t test_vmsof_m_b2(vbool2_t op1, size_t vl) { - return __riscv_vmsof(op1, vl); +vbool2_t test_vmsof_m_b2(vbool2_t vs2, size_t vl) { + return __riscv_vmsof(vs2, vl); } -vbool4_t test_vmsof_m_b4(vbool4_t op1, size_t vl) { - return __riscv_vmsof(op1, vl); +vbool4_t test_vmsof_m_b4(vbool4_t vs2, size_t vl) { + return __riscv_vmsof(vs2, vl); } -vbool8_t test_vmsof_m_b8(vbool8_t op1, size_t vl) { - return __riscv_vmsof(op1, vl); +vbool8_t test_vmsof_m_b8(vbool8_t vs2, size_t vl) { + return __riscv_vmsof(vs2, vl); } -vbool16_t test_vmsof_m_b16(vbool16_t op1, size_t vl) { - return __riscv_vmsof(op1, vl); +vbool16_t test_vmsof_m_b16(vbool16_t vs2, size_t vl) { + return __riscv_vmsof(vs2, vl); } -vbool32_t test_vmsof_m_b32(vbool32_t op1, size_t vl) { - return __riscv_vmsof(op1, vl); +vbool32_t test_vmsof_m_b32(vbool32_t vs2, size_t vl) { + return __riscv_vmsof(vs2, vl); } -vbool64_t test_vmsof_m_b64(vbool64_t op1, size_t vl) { - return __riscv_vmsof(op1, vl); +vbool64_t test_vmsof_m_b64(vbool64_t vs2, size_t vl) { + return __riscv_vmsof(vs2, vl); } -vbool1_t test_vmsof_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) { - return __riscv_vmsof(mask, op1, vl); +vbool1_t test_vmsof_m_b1_m(vbool1_t vm, vbool1_t vs2, size_t vl) { + return __riscv_vmsof(vm, vs2, vl); } -vbool2_t test_vmsof_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) { - return __riscv_vmsof(mask, op1, vl); +vbool2_t test_vmsof_m_b2_m(vbool2_t vm, vbool2_t vs2, size_t vl) { + return __riscv_vmsof(vm, vs2, vl); } -vbool4_t test_vmsof_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) { - return __riscv_vmsof(mask, op1, vl); +vbool4_t test_vmsof_m_b4_m(vbool4_t vm, vbool4_t vs2, size_t vl) { + return __riscv_vmsof(vm, vs2, vl); } -vbool8_t test_vmsof_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) { - return __riscv_vmsof(mask, op1, vl); +vbool8_t test_vmsof_m_b8_m(vbool8_t vm, vbool8_t vs2, size_t vl) { + return __riscv_vmsof(vm, vs2, vl); } -vbool16_t test_vmsof_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) { - return __riscv_vmsof(mask, op1, vl); +vbool16_t test_vmsof_m_b16_m(vbool16_t vm, vbool16_t vs2, size_t vl) { + return __riscv_vmsof(vm, vs2, vl); } -vbool32_t test_vmsof_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) { - return __riscv_vmsof(mask, op1, vl); +vbool32_t test_vmsof_m_b32_m(vbool32_t vm, vbool32_t vs2, size_t vl) { + return __riscv_vmsof(vm, vs2, vl); } -vbool64_t test_vmsof_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) { - return __riscv_vmsof(mask, op1, vl); +vbool64_t test_vmsof_m_b64_m(vbool64_t vm, vbool64_t vs2, size_t vl) { + return __riscv_vmsof(vm, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmsof\.[ivxfswum.]+\s+} 14 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vmul.c b/auto-generated/gnu-overloaded-tests/vmul.c index 44ec433f1..dced6ceb6 100644 --- a/auto-generated/gnu-overloaded-tests/vmul.c +++ b/auto-generated/gnu-overloaded-tests/vmul.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vint8mf8_t test_vmul_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmul(vs2, vs1, vl); } -vint8mf8_t test_vmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vint8mf8_t test_vmul_vx_i8mf8(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul(vs2, rs1, vl); } -vint8mf4_t test_vmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vint8mf4_t test_vmul_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmul(vs2, vs1, vl); } -vint8mf4_t test_vmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vint8mf4_t test_vmul_vx_i8mf4(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul(vs2, rs1, vl); } -vint8mf2_t test_vmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vint8mf2_t test_vmul_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmul(vs2, vs1, vl); } -vint8mf2_t test_vmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vint8mf2_t test_vmul_vx_i8mf2(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul(vs2, rs1, vl); } -vint8m1_t test_vmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vint8m1_t test_vmul_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmul(vs2, vs1, vl); } -vint8m1_t test_vmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vint8m1_t test_vmul_vx_i8m1(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul(vs2, rs1, vl); } -vint8m2_t test_vmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vint8m2_t test_vmul_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmul(vs2, vs1, vl); } -vint8m2_t test_vmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vint8m2_t test_vmul_vx_i8m2(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul(vs2, rs1, vl); } -vint8m4_t test_vmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vint8m4_t test_vmul_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmul(vs2, vs1, vl); } -vint8m4_t test_vmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vint8m4_t test_vmul_vx_i8m4(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul(vs2, rs1, vl); } -vint8m8_t test_vmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vint8m8_t test_vmul_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmul(vs2, vs1, vl); } -vint8m8_t test_vmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vint8m8_t test_vmul_vx_i8m8(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul(vs2, rs1, vl); } -vint16mf4_t test_vmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vint16mf4_t test_vmul_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmul(vs2, vs1, vl); } -vint16mf4_t test_vmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vint16mf4_t test_vmul_vx_i16mf4(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul(vs2, rs1, vl); } -vint16mf2_t test_vmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vint16mf2_t test_vmul_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmul(vs2, vs1, vl); } -vint16mf2_t test_vmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vint16mf2_t test_vmul_vx_i16mf2(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul(vs2, rs1, vl); } -vint16m1_t test_vmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vint16m1_t test_vmul_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmul(vs2, vs1, vl); } -vint16m1_t test_vmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vint16m1_t test_vmul_vx_i16m1(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul(vs2, rs1, vl); } -vint16m2_t test_vmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vint16m2_t test_vmul_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmul(vs2, vs1, vl); } -vint16m2_t test_vmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vint16m2_t test_vmul_vx_i16m2(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul(vs2, rs1, vl); } -vint16m4_t test_vmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vint16m4_t test_vmul_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmul(vs2, vs1, vl); } -vint16m4_t test_vmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vint16m4_t test_vmul_vx_i16m4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul(vs2, rs1, vl); } -vint16m8_t test_vmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vint16m8_t test_vmul_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmul(vs2, vs1, vl); } -vint16m8_t test_vmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vint16m8_t test_vmul_vx_i16m8(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul(vs2, rs1, vl); } -vint32mf2_t test_vmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vint32mf2_t test_vmul_vv_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmul(vs2, vs1, vl); } -vint32mf2_t test_vmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vint32mf2_t test_vmul_vx_i32mf2(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul(vs2, rs1, vl); } -vint32m1_t test_vmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vint32m1_t test_vmul_vv_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmul(vs2, vs1, vl); } -vint32m1_t test_vmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vint32m1_t test_vmul_vx_i32m1(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul(vs2, rs1, vl); } -vint32m2_t test_vmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vint32m2_t test_vmul_vv_i32m2(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmul(vs2, vs1, vl); } -vint32m2_t test_vmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vint32m2_t test_vmul_vx_i32m2(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul(vs2, rs1, vl); } -vint32m4_t test_vmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vint32m4_t test_vmul_vv_i32m4(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmul(vs2, vs1, vl); } -vint32m4_t test_vmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vint32m4_t test_vmul_vx_i32m4(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul(vs2, rs1, vl); } -vint32m8_t test_vmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vint32m8_t test_vmul_vv_i32m8(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmul(vs2, vs1, vl); } -vint32m8_t test_vmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vint32m8_t test_vmul_vx_i32m8(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul(vs2, rs1, vl); } -vint64m1_t test_vmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vint64m1_t test_vmul_vv_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmul(vs2, vs1, vl); } -vint64m1_t test_vmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vint64m1_t test_vmul_vx_i64m1(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul(vs2, rs1, vl); } -vint64m2_t test_vmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vint64m2_t test_vmul_vv_i64m2(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmul(vs2, vs1, vl); } -vint64m2_t test_vmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vint64m2_t test_vmul_vx_i64m2(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul(vs2, rs1, vl); } -vint64m4_t test_vmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vint64m4_t test_vmul_vv_i64m4(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmul(vs2, vs1, vl); } -vint64m4_t test_vmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vint64m4_t test_vmul_vx_i64m4(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul(vs2, rs1, vl); } -vint64m8_t test_vmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vint64m8_t test_vmul_vv_i64m8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmul(vs2, vs1, vl); } -vint64m8_t test_vmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vint64m8_t test_vmul_vx_i64m8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul(vs2, rs1, vl); } -vuint8mf8_t test_vmul_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vuint8mf8_t test_vmul_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmul(vs2, vs1, vl); } -vuint8mf8_t test_vmul_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vuint8mf8_t test_vmul_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul(vs2, rs1, vl); } -vuint8mf4_t test_vmul_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vuint8mf4_t test_vmul_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmul(vs2, vs1, vl); } -vuint8mf4_t test_vmul_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vuint8mf4_t test_vmul_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul(vs2, rs1, vl); } -vuint8mf2_t test_vmul_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vuint8mf2_t test_vmul_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmul(vs2, vs1, vl); } -vuint8mf2_t test_vmul_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vuint8mf2_t test_vmul_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul(vs2, rs1, vl); } -vuint8m1_t test_vmul_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vuint8m1_t test_vmul_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmul(vs2, vs1, vl); } -vuint8m1_t test_vmul_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vuint8m1_t test_vmul_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul(vs2, rs1, vl); } -vuint8m2_t test_vmul_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vuint8m2_t test_vmul_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmul(vs2, vs1, vl); } -vuint8m2_t test_vmul_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vuint8m2_t test_vmul_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul(vs2, rs1, vl); } -vuint8m4_t test_vmul_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vuint8m4_t test_vmul_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmul(vs2, vs1, vl); } -vuint8m4_t test_vmul_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vuint8m4_t test_vmul_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul(vs2, rs1, vl); } -vuint8m8_t test_vmul_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vuint8m8_t test_vmul_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmul(vs2, vs1, vl); } -vuint8m8_t test_vmul_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vuint8m8_t test_vmul_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul(vs2, rs1, vl); } -vuint16mf4_t test_vmul_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vuint16mf4_t test_vmul_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmul(vs2, vs1, vl); } -vuint16mf4_t test_vmul_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vuint16mf4_t test_vmul_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul(vs2, rs1, vl); } -vuint16mf2_t test_vmul_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vuint16mf2_t test_vmul_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmul(vs2, vs1, vl); } -vuint16mf2_t test_vmul_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vuint16mf2_t test_vmul_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul(vs2, rs1, vl); } -vuint16m1_t test_vmul_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vuint16m1_t test_vmul_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmul(vs2, vs1, vl); } -vuint16m1_t test_vmul_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vuint16m1_t test_vmul_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul(vs2, rs1, vl); } -vuint16m2_t test_vmul_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vuint16m2_t test_vmul_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmul(vs2, vs1, vl); } -vuint16m2_t test_vmul_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vuint16m2_t test_vmul_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul(vs2, rs1, vl); } -vuint16m4_t test_vmul_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vuint16m4_t test_vmul_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmul(vs2, vs1, vl); } -vuint16m4_t test_vmul_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vuint16m4_t test_vmul_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul(vs2, rs1, vl); } -vuint16m8_t test_vmul_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vuint16m8_t test_vmul_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmul(vs2, vs1, vl); } -vuint16m8_t test_vmul_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vuint16m8_t test_vmul_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul(vs2, rs1, vl); } -vuint32mf2_t test_vmul_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vuint32mf2_t test_vmul_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmul(vs2, vs1, vl); } -vuint32mf2_t test_vmul_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vuint32mf2_t test_vmul_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul(vs2, rs1, vl); } -vuint32m1_t test_vmul_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vuint32m1_t test_vmul_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmul(vs2, vs1, vl); } -vuint32m1_t test_vmul_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vuint32m1_t test_vmul_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul(vs2, rs1, vl); } -vuint32m2_t test_vmul_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vuint32m2_t test_vmul_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmul(vs2, vs1, vl); } -vuint32m2_t test_vmul_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vuint32m2_t test_vmul_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul(vs2, rs1, vl); } -vuint32m4_t test_vmul_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vuint32m4_t test_vmul_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmul(vs2, vs1, vl); } -vuint32m4_t test_vmul_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vuint32m4_t test_vmul_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul(vs2, rs1, vl); } -vuint32m8_t test_vmul_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vuint32m8_t test_vmul_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmul(vs2, vs1, vl); } -vuint32m8_t test_vmul_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vuint32m8_t test_vmul_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul(vs2, rs1, vl); } -vuint64m1_t test_vmul_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vuint64m1_t test_vmul_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmul(vs2, vs1, vl); } -vuint64m1_t test_vmul_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vuint64m1_t test_vmul_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul(vs2, rs1, vl); } -vuint64m2_t test_vmul_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vuint64m2_t test_vmul_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmul(vs2, vs1, vl); } -vuint64m2_t test_vmul_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vuint64m2_t test_vmul_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul(vs2, rs1, vl); } -vuint64m4_t test_vmul_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vuint64m4_t test_vmul_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmul(vs2, vs1, vl); } -vuint64m4_t test_vmul_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vuint64m4_t test_vmul_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul(vs2, rs1, vl); } -vuint64m8_t test_vmul_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vuint64m8_t test_vmul_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmul(vs2, vs1, vl); } -vuint64m8_t test_vmul_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul(op1, op2, vl); +vuint64m8_t test_vmul_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul(vs2, rs1, vl); } -vint8mf8_t test_vmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vint8mf8_t test_vmul_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmul(vm, vs2, vs1, vl); } -vint8mf8_t test_vmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vint8mf8_t test_vmul_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul(vm, vs2, rs1, vl); } -vint8mf4_t test_vmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vint8mf4_t test_vmul_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmul(vm, vs2, vs1, vl); } -vint8mf4_t test_vmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vint8mf4_t test_vmul_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul(vm, vs2, rs1, vl); } -vint8mf2_t test_vmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vint8mf2_t test_vmul_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmul(vm, vs2, vs1, vl); } -vint8mf2_t test_vmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vint8mf2_t test_vmul_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul(vm, vs2, rs1, vl); } -vint8m1_t test_vmul_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vint8m1_t test_vmul_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmul(vm, vs2, vs1, vl); } -vint8m1_t test_vmul_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vint8m1_t test_vmul_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul(vm, vs2, rs1, vl); } -vint8m2_t test_vmul_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vint8m2_t test_vmul_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmul(vm, vs2, vs1, vl); } -vint8m2_t test_vmul_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vint8m2_t test_vmul_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul(vm, vs2, rs1, vl); } -vint8m4_t test_vmul_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vint8m4_t test_vmul_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmul(vm, vs2, vs1, vl); } -vint8m4_t test_vmul_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vint8m4_t test_vmul_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul(vm, vs2, rs1, vl); } -vint8m8_t test_vmul_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vint8m8_t test_vmul_vv_i8m8_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmul(vm, vs2, vs1, vl); } -vint8m8_t test_vmul_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vint8m8_t test_vmul_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul(vm, vs2, rs1, vl); } -vint16mf4_t test_vmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vint16mf4_t test_vmul_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmul(vm, vs2, vs1, vl); } -vint16mf4_t test_vmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vint16mf4_t test_vmul_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul(vm, vs2, rs1, vl); } -vint16mf2_t test_vmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vint16mf2_t test_vmul_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmul(vm, vs2, vs1, vl); } -vint16mf2_t test_vmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vint16mf2_t test_vmul_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul(vm, vs2, rs1, vl); } -vint16m1_t test_vmul_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vint16m1_t test_vmul_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmul(vm, vs2, vs1, vl); } -vint16m1_t test_vmul_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vint16m1_t test_vmul_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul(vm, vs2, rs1, vl); } -vint16m2_t test_vmul_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vint16m2_t test_vmul_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmul(vm, vs2, vs1, vl); } -vint16m2_t test_vmul_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vint16m2_t test_vmul_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul(vm, vs2, rs1, vl); } -vint16m4_t test_vmul_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vint16m4_t test_vmul_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmul(vm, vs2, vs1, vl); } -vint16m4_t test_vmul_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vint16m4_t test_vmul_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul(vm, vs2, rs1, vl); } -vint16m8_t test_vmul_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vint16m8_t test_vmul_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmul(vm, vs2, vs1, vl); } -vint16m8_t test_vmul_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vint16m8_t test_vmul_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul(vm, vs2, rs1, vl); } -vint32mf2_t test_vmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vint32mf2_t test_vmul_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmul(vm, vs2, vs1, vl); } -vint32mf2_t test_vmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vint32mf2_t test_vmul_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul(vm, vs2, rs1, vl); } -vint32m1_t test_vmul_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vint32m1_t test_vmul_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmul(vm, vs2, vs1, vl); } -vint32m1_t test_vmul_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vint32m1_t test_vmul_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul(vm, vs2, rs1, vl); } -vint32m2_t test_vmul_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vint32m2_t test_vmul_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmul(vm, vs2, vs1, vl); } -vint32m2_t test_vmul_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vint32m2_t test_vmul_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul(vm, vs2, rs1, vl); } -vint32m4_t test_vmul_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vint32m4_t test_vmul_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmul(vm, vs2, vs1, vl); } -vint32m4_t test_vmul_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vint32m4_t test_vmul_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul(vm, vs2, rs1, vl); } -vint32m8_t test_vmul_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vint32m8_t test_vmul_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmul(vm, vs2, vs1, vl); } -vint32m8_t test_vmul_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vint32m8_t test_vmul_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul(vm, vs2, rs1, vl); } -vint64m1_t test_vmul_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vint64m1_t test_vmul_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmul(vm, vs2, vs1, vl); } -vint64m1_t test_vmul_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vint64m1_t test_vmul_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul(vm, vs2, rs1, vl); } -vint64m2_t test_vmul_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vint64m2_t test_vmul_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmul(vm, vs2, vs1, vl); } -vint64m2_t test_vmul_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vint64m2_t test_vmul_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul(vm, vs2, rs1, vl); } -vint64m4_t test_vmul_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vint64m4_t test_vmul_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmul(vm, vs2, vs1, vl); } -vint64m4_t test_vmul_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vint64m4_t test_vmul_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul(vm, vs2, rs1, vl); } -vint64m8_t test_vmul_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vint64m8_t test_vmul_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmul(vm, vs2, vs1, vl); } -vint64m8_t test_vmul_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vint64m8_t test_vmul_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul(vm, vs2, rs1, vl); } -vuint8mf8_t test_vmul_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vuint8mf8_t test_vmul_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmul(vm, vs2, vs1, vl); } -vuint8mf8_t test_vmul_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vuint8mf8_t test_vmul_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul(vm, vs2, rs1, vl); } -vuint8mf4_t test_vmul_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vuint8mf4_t test_vmul_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmul(vm, vs2, vs1, vl); } -vuint8mf4_t test_vmul_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vuint8mf4_t test_vmul_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul(vm, vs2, rs1, vl); } -vuint8mf2_t test_vmul_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vuint8mf2_t test_vmul_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmul(vm, vs2, vs1, vl); } -vuint8mf2_t test_vmul_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vuint8mf2_t test_vmul_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul(vm, vs2, rs1, vl); } -vuint8m1_t test_vmul_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vuint8m1_t test_vmul_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmul(vm, vs2, vs1, vl); } -vuint8m1_t test_vmul_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vuint8m1_t test_vmul_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul(vm, vs2, rs1, vl); } -vuint8m2_t test_vmul_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vuint8m2_t test_vmul_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmul(vm, vs2, vs1, vl); } -vuint8m2_t test_vmul_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vuint8m2_t test_vmul_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul(vm, vs2, rs1, vl); } -vuint8m4_t test_vmul_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vuint8m4_t test_vmul_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmul(vm, vs2, vs1, vl); } -vuint8m4_t test_vmul_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vuint8m4_t test_vmul_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul(vm, vs2, rs1, vl); } -vuint8m8_t test_vmul_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vuint8m8_t test_vmul_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmul(vm, vs2, vs1, vl); } -vuint8m8_t test_vmul_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vuint8m8_t test_vmul_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul(vm, vs2, rs1, vl); } -vuint16mf4_t test_vmul_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vuint16mf4_t test_vmul_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmul(vm, vs2, vs1, vl); } -vuint16mf4_t test_vmul_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vuint16mf4_t test_vmul_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul(vm, vs2, rs1, vl); } -vuint16mf2_t test_vmul_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vuint16mf2_t test_vmul_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmul(vm, vs2, vs1, vl); } -vuint16mf2_t test_vmul_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vuint16mf2_t test_vmul_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul(vm, vs2, rs1, vl); } -vuint16m1_t test_vmul_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vuint16m1_t test_vmul_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmul(vm, vs2, vs1, vl); } -vuint16m1_t test_vmul_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vuint16m1_t test_vmul_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul(vm, vs2, rs1, vl); } -vuint16m2_t test_vmul_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vuint16m2_t test_vmul_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmul(vm, vs2, vs1, vl); } -vuint16m2_t test_vmul_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vuint16m2_t test_vmul_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul(vm, vs2, rs1, vl); } -vuint16m4_t test_vmul_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vuint16m4_t test_vmul_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmul(vm, vs2, vs1, vl); } -vuint16m4_t test_vmul_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vuint16m4_t test_vmul_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul(vm, vs2, rs1, vl); } -vuint16m8_t test_vmul_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vuint16m8_t test_vmul_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmul(vm, vs2, vs1, vl); } -vuint16m8_t test_vmul_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vuint16m8_t test_vmul_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul(vm, vs2, rs1, vl); } -vuint32mf2_t test_vmul_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vuint32mf2_t test_vmul_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmul(vm, vs2, vs1, vl); } -vuint32mf2_t test_vmul_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vuint32mf2_t test_vmul_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul(vm, vs2, rs1, vl); } -vuint32m1_t test_vmul_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vuint32m1_t test_vmul_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmul(vm, vs2, vs1, vl); } -vuint32m1_t test_vmul_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vuint32m1_t test_vmul_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul(vm, vs2, rs1, vl); } -vuint32m2_t test_vmul_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vuint32m2_t test_vmul_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmul(vm, vs2, vs1, vl); } -vuint32m2_t test_vmul_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vuint32m2_t test_vmul_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul(vm, vs2, rs1, vl); } -vuint32m4_t test_vmul_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vuint32m4_t test_vmul_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmul(vm, vs2, vs1, vl); } -vuint32m4_t test_vmul_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vuint32m4_t test_vmul_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul(vm, vs2, rs1, vl); } -vuint32m8_t test_vmul_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vuint32m8_t test_vmul_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmul(vm, vs2, vs1, vl); } -vuint32m8_t test_vmul_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vuint32m8_t test_vmul_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul(vm, vs2, rs1, vl); } -vuint64m1_t test_vmul_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vuint64m1_t test_vmul_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmul(vm, vs2, vs1, vl); } -vuint64m1_t test_vmul_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vuint64m1_t test_vmul_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul(vm, vs2, rs1, vl); } -vuint64m2_t test_vmul_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vuint64m2_t test_vmul_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmul(vm, vs2, vs1, vl); } -vuint64m2_t test_vmul_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vuint64m2_t test_vmul_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul(vm, vs2, rs1, vl); } -vuint64m4_t test_vmul_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vuint64m4_t test_vmul_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmul(vm, vs2, vs1, vl); } -vuint64m4_t test_vmul_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vuint64m4_t test_vmul_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul(vm, vs2, rs1, vl); } -vuint64m8_t test_vmul_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vuint64m8_t test_vmul_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmul(vm, vs2, vs1, vl); } -vuint64m8_t test_vmul_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul(mask, op1, op2, vl); +vuint64m8_t test_vmul_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vmul\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vmulh.c b/auto-generated/gnu-overloaded-tests/vmulh.c index ba871eee9..29992b31c 100644 --- a/auto-generated/gnu-overloaded-tests/vmulh.c +++ b/auto-generated/gnu-overloaded-tests/vmulh.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vmulh_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmulh(op1, op2, vl); +vint8mf8_t test_vmulh_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmulh(vs2, vs1, vl); } -vint8mf8_t test_vmulh_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh(op1, op2, vl); +vint8mf8_t test_vmulh_vx_i8mf8(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh(vs2, rs1, vl); } -vint8mf4_t test_vmulh_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmulh(op1, op2, vl); +vint8mf4_t test_vmulh_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmulh(vs2, vs1, vl); } -vint8mf4_t test_vmulh_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh(op1, op2, vl); +vint8mf4_t test_vmulh_vx_i8mf4(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh(vs2, rs1, vl); } -vint8mf2_t test_vmulh_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmulh(op1, op2, vl); +vint8mf2_t test_vmulh_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmulh(vs2, vs1, vl); } -vint8mf2_t test_vmulh_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh(op1, op2, vl); +vint8mf2_t test_vmulh_vx_i8mf2(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh(vs2, rs1, vl); } -vint8m1_t test_vmulh_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmulh(op1, op2, vl); +vint8m1_t test_vmulh_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmulh(vs2, vs1, vl); } -vint8m1_t test_vmulh_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh(op1, op2, vl); +vint8m1_t test_vmulh_vx_i8m1(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh(vs2, rs1, vl); } -vint8m2_t test_vmulh_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmulh(op1, op2, vl); +vint8m2_t test_vmulh_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmulh(vs2, vs1, vl); } -vint8m2_t test_vmulh_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh(op1, op2, vl); +vint8m2_t test_vmulh_vx_i8m2(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh(vs2, rs1, vl); } -vint8m4_t test_vmulh_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmulh(op1, op2, vl); +vint8m4_t test_vmulh_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmulh(vs2, vs1, vl); } -vint8m4_t test_vmulh_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh(op1, op2, vl); +vint8m4_t test_vmulh_vx_i8m4(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh(vs2, rs1, vl); } -vint8m8_t test_vmulh_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmulh(op1, op2, vl); +vint8m8_t test_vmulh_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmulh(vs2, vs1, vl); } -vint8m8_t test_vmulh_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh(op1, op2, vl); +vint8m8_t test_vmulh_vx_i8m8(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh(vs2, rs1, vl); } -vint16mf4_t test_vmulh_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmulh(op1, op2, vl); +vint16mf4_t test_vmulh_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmulh(vs2, vs1, vl); } -vint16mf4_t test_vmulh_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh(op1, op2, vl); +vint16mf4_t test_vmulh_vx_i16mf4(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh(vs2, rs1, vl); } -vint16mf2_t test_vmulh_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmulh(op1, op2, vl); +vint16mf2_t test_vmulh_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmulh(vs2, vs1, vl); } -vint16mf2_t test_vmulh_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh(op1, op2, vl); +vint16mf2_t test_vmulh_vx_i16mf2(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh(vs2, rs1, vl); } -vint16m1_t test_vmulh_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmulh(op1, op2, vl); +vint16m1_t test_vmulh_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmulh(vs2, vs1, vl); } -vint16m1_t test_vmulh_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh(op1, op2, vl); +vint16m1_t test_vmulh_vx_i16m1(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh(vs2, rs1, vl); } -vint16m2_t test_vmulh_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmulh(op1, op2, vl); +vint16m2_t test_vmulh_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmulh(vs2, vs1, vl); } -vint16m2_t test_vmulh_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh(op1, op2, vl); +vint16m2_t test_vmulh_vx_i16m2(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh(vs2, rs1, vl); } -vint16m4_t test_vmulh_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmulh(op1, op2, vl); +vint16m4_t test_vmulh_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmulh(vs2, vs1, vl); } -vint16m4_t test_vmulh_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh(op1, op2, vl); +vint16m4_t test_vmulh_vx_i16m4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh(vs2, rs1, vl); } -vint16m8_t test_vmulh_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmulh(op1, op2, vl); +vint16m8_t test_vmulh_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmulh(vs2, vs1, vl); } -vint16m8_t test_vmulh_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh(op1, op2, vl); +vint16m8_t test_vmulh_vx_i16m8(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh(vs2, rs1, vl); } -vint32mf2_t test_vmulh_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmulh(op1, op2, vl); +vint32mf2_t test_vmulh_vv_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmulh(vs2, vs1, vl); } -vint32mf2_t test_vmulh_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh(op1, op2, vl); +vint32mf2_t test_vmulh_vx_i32mf2(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh(vs2, rs1, vl); } -vint32m1_t test_vmulh_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmulh(op1, op2, vl); +vint32m1_t test_vmulh_vv_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmulh(vs2, vs1, vl); } -vint32m1_t test_vmulh_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh(op1, op2, vl); +vint32m1_t test_vmulh_vx_i32m1(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh(vs2, rs1, vl); } -vint32m2_t test_vmulh_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmulh(op1, op2, vl); +vint32m2_t test_vmulh_vv_i32m2(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmulh(vs2, vs1, vl); } -vint32m2_t test_vmulh_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh(op1, op2, vl); +vint32m2_t test_vmulh_vx_i32m2(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh(vs2, rs1, vl); } -vint32m4_t test_vmulh_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmulh(op1, op2, vl); +vint32m4_t test_vmulh_vv_i32m4(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmulh(vs2, vs1, vl); } -vint32m4_t test_vmulh_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh(op1, op2, vl); +vint32m4_t test_vmulh_vx_i32m4(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh(vs2, rs1, vl); } -vint32m8_t test_vmulh_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmulh(op1, op2, vl); +vint32m8_t test_vmulh_vv_i32m8(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmulh(vs2, vs1, vl); } -vint32m8_t test_vmulh_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh(op1, op2, vl); +vint32m8_t test_vmulh_vx_i32m8(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh(vs2, rs1, vl); } -vint64m1_t test_vmulh_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmulh(op1, op2, vl); +vint64m1_t test_vmulh_vv_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmulh(vs2, vs1, vl); } -vint64m1_t test_vmulh_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh(op1, op2, vl); +vint64m1_t test_vmulh_vx_i64m1(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh(vs2, rs1, vl); } -vint64m2_t test_vmulh_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmulh(op1, op2, vl); +vint64m2_t test_vmulh_vv_i64m2(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmulh(vs2, vs1, vl); } -vint64m2_t test_vmulh_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh(op1, op2, vl); +vint64m2_t test_vmulh_vx_i64m2(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh(vs2, rs1, vl); } -vint64m4_t test_vmulh_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmulh(op1, op2, vl); +vint64m4_t test_vmulh_vv_i64m4(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmulh(vs2, vs1, vl); } -vint64m4_t test_vmulh_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh(op1, op2, vl); +vint64m4_t test_vmulh_vx_i64m4(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh(vs2, rs1, vl); } -vint64m8_t test_vmulh_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmulh(op1, op2, vl); +vint64m8_t test_vmulh_vv_i64m8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmulh(vs2, vs1, vl); } -vint64m8_t test_vmulh_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh(op1, op2, vl); +vint64m8_t test_vmulh_vx_i64m8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh(vs2, rs1, vl); } -vint8mf8_t test_vmulh_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmulh(mask, op1, op2, vl); +vint8mf8_t test_vmulh_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmulh(vm, vs2, vs1, vl); } -vint8mf8_t test_vmulh_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh(mask, op1, op2, vl); +vint8mf8_t test_vmulh_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh(vm, vs2, rs1, vl); } -vint8mf4_t test_vmulh_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmulh(mask, op1, op2, vl); +vint8mf4_t test_vmulh_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmulh(vm, vs2, vs1, vl); } -vint8mf4_t test_vmulh_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh(mask, op1, op2, vl); +vint8mf4_t test_vmulh_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh(vm, vs2, rs1, vl); } -vint8mf2_t test_vmulh_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmulh(mask, op1, op2, vl); +vint8mf2_t test_vmulh_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmulh(vm, vs2, vs1, vl); } -vint8mf2_t test_vmulh_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh(mask, op1, op2, vl); +vint8mf2_t test_vmulh_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh(vm, vs2, rs1, vl); } -vint8m1_t test_vmulh_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmulh(mask, op1, op2, vl); +vint8m1_t test_vmulh_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmulh(vm, vs2, vs1, vl); } -vint8m1_t test_vmulh_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh(mask, op1, op2, vl); +vint8m1_t test_vmulh_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh(vm, vs2, rs1, vl); } -vint8m2_t test_vmulh_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmulh(mask, op1, op2, vl); +vint8m2_t test_vmulh_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmulh(vm, vs2, vs1, vl); } -vint8m2_t test_vmulh_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh(mask, op1, op2, vl); +vint8m2_t test_vmulh_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh(vm, vs2, rs1, vl); } -vint8m4_t test_vmulh_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmulh(mask, op1, op2, vl); +vint8m4_t test_vmulh_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmulh(vm, vs2, vs1, vl); } -vint8m4_t test_vmulh_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh(mask, op1, op2, vl); +vint8m4_t test_vmulh_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh(vm, vs2, rs1, vl); } -vint8m8_t test_vmulh_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmulh(mask, op1, op2, vl); +vint8m8_t test_vmulh_vv_i8m8_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmulh(vm, vs2, vs1, vl); } -vint8m8_t test_vmulh_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh(mask, op1, op2, vl); +vint8m8_t test_vmulh_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh(vm, vs2, rs1, vl); } -vint16mf4_t test_vmulh_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmulh(mask, op1, op2, vl); +vint16mf4_t test_vmulh_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmulh(vm, vs2, vs1, vl); } -vint16mf4_t test_vmulh_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh(mask, op1, op2, vl); +vint16mf4_t test_vmulh_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh(vm, vs2, rs1, vl); } -vint16mf2_t test_vmulh_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmulh(mask, op1, op2, vl); +vint16mf2_t test_vmulh_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmulh(vm, vs2, vs1, vl); } -vint16mf2_t test_vmulh_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh(mask, op1, op2, vl); +vint16mf2_t test_vmulh_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh(vm, vs2, rs1, vl); } -vint16m1_t test_vmulh_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmulh(mask, op1, op2, vl); +vint16m1_t test_vmulh_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmulh(vm, vs2, vs1, vl); } -vint16m1_t test_vmulh_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh(mask, op1, op2, vl); +vint16m1_t test_vmulh_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh(vm, vs2, rs1, vl); } -vint16m2_t test_vmulh_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmulh(mask, op1, op2, vl); +vint16m2_t test_vmulh_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmulh(vm, vs2, vs1, vl); } -vint16m2_t test_vmulh_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh(mask, op1, op2, vl); +vint16m2_t test_vmulh_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh(vm, vs2, rs1, vl); } -vint16m4_t test_vmulh_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmulh(mask, op1, op2, vl); +vint16m4_t test_vmulh_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmulh(vm, vs2, vs1, vl); } -vint16m4_t test_vmulh_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh(mask, op1, op2, vl); +vint16m4_t test_vmulh_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh(vm, vs2, rs1, vl); } -vint16m8_t test_vmulh_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmulh(mask, op1, op2, vl); +vint16m8_t test_vmulh_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmulh(vm, vs2, vs1, vl); } -vint16m8_t test_vmulh_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh(mask, op1, op2, vl); +vint16m8_t test_vmulh_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh(vm, vs2, rs1, vl); } -vint32mf2_t test_vmulh_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmulh(mask, op1, op2, vl); +vint32mf2_t test_vmulh_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmulh(vm, vs2, vs1, vl); } -vint32mf2_t test_vmulh_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh(mask, op1, op2, vl); +vint32mf2_t test_vmulh_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh(vm, vs2, rs1, vl); } -vint32m1_t test_vmulh_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmulh(mask, op1, op2, vl); +vint32m1_t test_vmulh_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmulh(vm, vs2, vs1, vl); } -vint32m1_t test_vmulh_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh(mask, op1, op2, vl); +vint32m1_t test_vmulh_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh(vm, vs2, rs1, vl); } -vint32m2_t test_vmulh_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmulh(mask, op1, op2, vl); +vint32m2_t test_vmulh_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmulh(vm, vs2, vs1, vl); } -vint32m2_t test_vmulh_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh(mask, op1, op2, vl); +vint32m2_t test_vmulh_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh(vm, vs2, rs1, vl); } -vint32m4_t test_vmulh_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmulh(mask, op1, op2, vl); +vint32m4_t test_vmulh_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmulh(vm, vs2, vs1, vl); } -vint32m4_t test_vmulh_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh(mask, op1, op2, vl); +vint32m4_t test_vmulh_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh(vm, vs2, rs1, vl); } -vint32m8_t test_vmulh_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmulh(mask, op1, op2, vl); +vint32m8_t test_vmulh_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmulh(vm, vs2, vs1, vl); } -vint32m8_t test_vmulh_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh(mask, op1, op2, vl); +vint32m8_t test_vmulh_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh(vm, vs2, rs1, vl); } -vint64m1_t test_vmulh_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmulh(mask, op1, op2, vl); +vint64m1_t test_vmulh_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmulh(vm, vs2, vs1, vl); } -vint64m1_t test_vmulh_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh(mask, op1, op2, vl); +vint64m1_t test_vmulh_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh(vm, vs2, rs1, vl); } -vint64m2_t test_vmulh_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmulh(mask, op1, op2, vl); +vint64m2_t test_vmulh_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmulh(vm, vs2, vs1, vl); } -vint64m2_t test_vmulh_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh(mask, op1, op2, vl); +vint64m2_t test_vmulh_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh(vm, vs2, rs1, vl); } -vint64m4_t test_vmulh_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmulh(mask, op1, op2, vl); +vint64m4_t test_vmulh_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmulh(vm, vs2, vs1, vl); } -vint64m4_t test_vmulh_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh(mask, op1, op2, vl); +vint64m4_t test_vmulh_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh(vm, vs2, rs1, vl); } -vint64m8_t test_vmulh_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmulh(mask, op1, op2, vl); +vint64m8_t test_vmulh_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmulh(vm, vs2, vs1, vl); } -vint64m8_t test_vmulh_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh(mask, op1, op2, vl); +vint64m8_t test_vmulh_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmulh\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vmulhsu.c b/auto-generated/gnu-overloaded-tests/vmulhsu.c index b17935b9d..097217fd8 100644 --- a/auto-generated/gnu-overloaded-tests/vmulhsu.c +++ b/auto-generated/gnu-overloaded-tests/vmulhsu.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vmulhsu_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmulhsu(op1, op2, vl); +vint8mf8_t test_vmulhsu_vv_i8mf8(vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmulhsu(vs2, vs1, vl); } -vint8mf8_t test_vmulhsu_vx_i8mf8(vint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu(op1, op2, vl); +vint8mf8_t test_vmulhsu_vx_i8mf8(vint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu(vs2, rs1, vl); } -vint8mf4_t test_vmulhsu_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmulhsu(op1, op2, vl); +vint8mf4_t test_vmulhsu_vv_i8mf4(vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmulhsu(vs2, vs1, vl); } -vint8mf4_t test_vmulhsu_vx_i8mf4(vint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu(op1, op2, vl); +vint8mf4_t test_vmulhsu_vx_i8mf4(vint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu(vs2, rs1, vl); } -vint8mf2_t test_vmulhsu_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmulhsu(op1, op2, vl); +vint8mf2_t test_vmulhsu_vv_i8mf2(vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmulhsu(vs2, vs1, vl); } -vint8mf2_t test_vmulhsu_vx_i8mf2(vint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu(op1, op2, vl); +vint8mf2_t test_vmulhsu_vx_i8mf2(vint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu(vs2, rs1, vl); } -vint8m1_t test_vmulhsu_vv_i8m1(vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmulhsu(op1, op2, vl); +vint8m1_t test_vmulhsu_vv_i8m1(vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmulhsu(vs2, vs1, vl); } -vint8m1_t test_vmulhsu_vx_i8m1(vint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu(op1, op2, vl); +vint8m1_t test_vmulhsu_vx_i8m1(vint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu(vs2, rs1, vl); } -vint8m2_t test_vmulhsu_vv_i8m2(vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmulhsu(op1, op2, vl); +vint8m2_t test_vmulhsu_vv_i8m2(vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmulhsu(vs2, vs1, vl); } -vint8m2_t test_vmulhsu_vx_i8m2(vint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu(op1, op2, vl); +vint8m2_t test_vmulhsu_vx_i8m2(vint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu(vs2, rs1, vl); } -vint8m4_t test_vmulhsu_vv_i8m4(vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmulhsu(op1, op2, vl); +vint8m4_t test_vmulhsu_vv_i8m4(vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmulhsu(vs2, vs1, vl); } -vint8m4_t test_vmulhsu_vx_i8m4(vint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu(op1, op2, vl); +vint8m4_t test_vmulhsu_vx_i8m4(vint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu(vs2, rs1, vl); } -vint8m8_t test_vmulhsu_vv_i8m8(vint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmulhsu(op1, op2, vl); +vint8m8_t test_vmulhsu_vv_i8m8(vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmulhsu(vs2, vs1, vl); } -vint8m8_t test_vmulhsu_vx_i8m8(vint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu(op1, op2, vl); +vint8m8_t test_vmulhsu_vx_i8m8(vint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu(vs2, rs1, vl); } -vint16mf4_t test_vmulhsu_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmulhsu(op1, op2, vl); +vint16mf4_t test_vmulhsu_vv_i16mf4(vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmulhsu(vs2, vs1, vl); } -vint16mf4_t test_vmulhsu_vx_i16mf4(vint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu(op1, op2, vl); +vint16mf4_t test_vmulhsu_vx_i16mf4(vint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu(vs2, rs1, vl); } -vint16mf2_t test_vmulhsu_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmulhsu(op1, op2, vl); +vint16mf2_t test_vmulhsu_vv_i16mf2(vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmulhsu(vs2, vs1, vl); } -vint16mf2_t test_vmulhsu_vx_i16mf2(vint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu(op1, op2, vl); +vint16mf2_t test_vmulhsu_vx_i16mf2(vint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu(vs2, rs1, vl); } -vint16m1_t test_vmulhsu_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmulhsu(op1, op2, vl); +vint16m1_t test_vmulhsu_vv_i16m1(vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmulhsu(vs2, vs1, vl); } -vint16m1_t test_vmulhsu_vx_i16m1(vint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu(op1, op2, vl); +vint16m1_t test_vmulhsu_vx_i16m1(vint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu(vs2, rs1, vl); } -vint16m2_t test_vmulhsu_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmulhsu(op1, op2, vl); +vint16m2_t test_vmulhsu_vv_i16m2(vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmulhsu(vs2, vs1, vl); } -vint16m2_t test_vmulhsu_vx_i16m2(vint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu(op1, op2, vl); +vint16m2_t test_vmulhsu_vx_i16m2(vint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu(vs2, rs1, vl); } -vint16m4_t test_vmulhsu_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmulhsu(op1, op2, vl); +vint16m4_t test_vmulhsu_vv_i16m4(vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmulhsu(vs2, vs1, vl); } -vint16m4_t test_vmulhsu_vx_i16m4(vint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu(op1, op2, vl); +vint16m4_t test_vmulhsu_vx_i16m4(vint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu(vs2, rs1, vl); } -vint16m8_t test_vmulhsu_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmulhsu(op1, op2, vl); +vint16m8_t test_vmulhsu_vv_i16m8(vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmulhsu(vs2, vs1, vl); } -vint16m8_t test_vmulhsu_vx_i16m8(vint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu(op1, op2, vl); +vint16m8_t test_vmulhsu_vx_i16m8(vint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu(vs2, rs1, vl); } -vint32mf2_t test_vmulhsu_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmulhsu(op1, op2, vl); +vint32mf2_t test_vmulhsu_vv_i32mf2(vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmulhsu(vs2, vs1, vl); } -vint32mf2_t test_vmulhsu_vx_i32mf2(vint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu(op1, op2, vl); +vint32mf2_t test_vmulhsu_vx_i32mf2(vint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu(vs2, rs1, vl); } -vint32m1_t test_vmulhsu_vv_i32m1(vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmulhsu(op1, op2, vl); +vint32m1_t test_vmulhsu_vv_i32m1(vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmulhsu(vs2, vs1, vl); } -vint32m1_t test_vmulhsu_vx_i32m1(vint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu(op1, op2, vl); +vint32m1_t test_vmulhsu_vx_i32m1(vint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu(vs2, rs1, vl); } -vint32m2_t test_vmulhsu_vv_i32m2(vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmulhsu(op1, op2, vl); +vint32m2_t test_vmulhsu_vv_i32m2(vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmulhsu(vs2, vs1, vl); } -vint32m2_t test_vmulhsu_vx_i32m2(vint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu(op1, op2, vl); +vint32m2_t test_vmulhsu_vx_i32m2(vint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu(vs2, rs1, vl); } -vint32m4_t test_vmulhsu_vv_i32m4(vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmulhsu(op1, op2, vl); +vint32m4_t test_vmulhsu_vv_i32m4(vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmulhsu(vs2, vs1, vl); } -vint32m4_t test_vmulhsu_vx_i32m4(vint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu(op1, op2, vl); +vint32m4_t test_vmulhsu_vx_i32m4(vint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu(vs2, rs1, vl); } -vint32m8_t test_vmulhsu_vv_i32m8(vint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmulhsu(op1, op2, vl); +vint32m8_t test_vmulhsu_vv_i32m8(vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmulhsu(vs2, vs1, vl); } -vint32m8_t test_vmulhsu_vx_i32m8(vint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu(op1, op2, vl); +vint32m8_t test_vmulhsu_vx_i32m8(vint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu(vs2, rs1, vl); } -vint64m1_t test_vmulhsu_vv_i64m1(vint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmulhsu(op1, op2, vl); +vint64m1_t test_vmulhsu_vv_i64m1(vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmulhsu(vs2, vs1, vl); } -vint64m1_t test_vmulhsu_vx_i64m1(vint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu(op1, op2, vl); +vint64m1_t test_vmulhsu_vx_i64m1(vint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu(vs2, rs1, vl); } -vint64m2_t test_vmulhsu_vv_i64m2(vint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmulhsu(op1, op2, vl); +vint64m2_t test_vmulhsu_vv_i64m2(vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmulhsu(vs2, vs1, vl); } -vint64m2_t test_vmulhsu_vx_i64m2(vint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu(op1, op2, vl); +vint64m2_t test_vmulhsu_vx_i64m2(vint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu(vs2, rs1, vl); } -vint64m4_t test_vmulhsu_vv_i64m4(vint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmulhsu(op1, op2, vl); +vint64m4_t test_vmulhsu_vv_i64m4(vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmulhsu(vs2, vs1, vl); } -vint64m4_t test_vmulhsu_vx_i64m4(vint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu(op1, op2, vl); +vint64m4_t test_vmulhsu_vx_i64m4(vint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu(vs2, rs1, vl); } -vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmulhsu(op1, op2, vl); +vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmulhsu(vs2, vs1, vl); } -vint64m8_t test_vmulhsu_vx_i64m8(vint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu(op1, op2, vl); +vint64m8_t test_vmulhsu_vx_i64m8(vint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu(vs2, rs1, vl); } -vint8mf8_t test_vmulhsu_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmulhsu(mask, op1, op2, vl); +vint8mf8_t test_vmulhsu_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmulhsu(vm, vs2, vs1, vl); } -vint8mf8_t test_vmulhsu_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu(mask, op1, op2, vl); +vint8mf8_t test_vmulhsu_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu(vm, vs2, rs1, vl); } -vint8mf4_t test_vmulhsu_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmulhsu(mask, op1, op2, vl); +vint8mf4_t test_vmulhsu_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmulhsu(vm, vs2, vs1, vl); } -vint8mf4_t test_vmulhsu_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu(mask, op1, op2, vl); +vint8mf4_t test_vmulhsu_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu(vm, vs2, rs1, vl); } -vint8mf2_t test_vmulhsu_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmulhsu(mask, op1, op2, vl); +vint8mf2_t test_vmulhsu_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmulhsu(vm, vs2, vs1, vl); } -vint8mf2_t test_vmulhsu_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu(mask, op1, op2, vl); +vint8mf2_t test_vmulhsu_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu(vm, vs2, rs1, vl); } -vint8m1_t test_vmulhsu_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmulhsu(mask, op1, op2, vl); +vint8m1_t test_vmulhsu_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmulhsu(vm, vs2, vs1, vl); } -vint8m1_t test_vmulhsu_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu(mask, op1, op2, vl); +vint8m1_t test_vmulhsu_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu(vm, vs2, rs1, vl); } -vint8m2_t test_vmulhsu_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmulhsu(mask, op1, op2, vl); +vint8m2_t test_vmulhsu_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmulhsu(vm, vs2, vs1, vl); } -vint8m2_t test_vmulhsu_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu(mask, op1, op2, vl); +vint8m2_t test_vmulhsu_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu(vm, vs2, rs1, vl); } -vint8m4_t test_vmulhsu_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmulhsu(mask, op1, op2, vl); +vint8m4_t test_vmulhsu_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmulhsu(vm, vs2, vs1, vl); } -vint8m4_t test_vmulhsu_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu(mask, op1, op2, vl); +vint8m4_t test_vmulhsu_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu(vm, vs2, rs1, vl); } -vint8m8_t test_vmulhsu_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmulhsu(mask, op1, op2, vl); +vint8m8_t test_vmulhsu_vv_i8m8_m(vbool1_t vm, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmulhsu(vm, vs2, vs1, vl); } -vint8m8_t test_vmulhsu_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu(mask, op1, op2, vl); +vint8m8_t test_vmulhsu_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu(vm, vs2, rs1, vl); } -vint16mf4_t test_vmulhsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmulhsu(mask, op1, op2, vl); +vint16mf4_t test_vmulhsu_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmulhsu(vm, vs2, vs1, vl); } -vint16mf4_t test_vmulhsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu(mask, op1, op2, vl); +vint16mf4_t test_vmulhsu_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu(vm, vs2, rs1, vl); } -vint16mf2_t test_vmulhsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmulhsu(mask, op1, op2, vl); +vint16mf2_t test_vmulhsu_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmulhsu(vm, vs2, vs1, vl); } -vint16mf2_t test_vmulhsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu(mask, op1, op2, vl); +vint16mf2_t test_vmulhsu_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu(vm, vs2, rs1, vl); } -vint16m1_t test_vmulhsu_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmulhsu(mask, op1, op2, vl); +vint16m1_t test_vmulhsu_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmulhsu(vm, vs2, vs1, vl); } -vint16m1_t test_vmulhsu_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu(mask, op1, op2, vl); +vint16m1_t test_vmulhsu_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu(vm, vs2, rs1, vl); } -vint16m2_t test_vmulhsu_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmulhsu(mask, op1, op2, vl); +vint16m2_t test_vmulhsu_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmulhsu(vm, vs2, vs1, vl); } -vint16m2_t test_vmulhsu_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu(mask, op1, op2, vl); +vint16m2_t test_vmulhsu_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu(vm, vs2, rs1, vl); } -vint16m4_t test_vmulhsu_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmulhsu(mask, op1, op2, vl); +vint16m4_t test_vmulhsu_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmulhsu(vm, vs2, vs1, vl); } -vint16m4_t test_vmulhsu_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu(mask, op1, op2, vl); +vint16m4_t test_vmulhsu_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu(vm, vs2, rs1, vl); } -vint16m8_t test_vmulhsu_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmulhsu(mask, op1, op2, vl); +vint16m8_t test_vmulhsu_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmulhsu(vm, vs2, vs1, vl); } -vint16m8_t test_vmulhsu_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu(mask, op1, op2, vl); +vint16m8_t test_vmulhsu_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu(vm, vs2, rs1, vl); } -vint32mf2_t test_vmulhsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmulhsu(mask, op1, op2, vl); +vint32mf2_t test_vmulhsu_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmulhsu(vm, vs2, vs1, vl); } -vint32mf2_t test_vmulhsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu(mask, op1, op2, vl); +vint32mf2_t test_vmulhsu_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu(vm, vs2, rs1, vl); } -vint32m1_t test_vmulhsu_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmulhsu(mask, op1, op2, vl); +vint32m1_t test_vmulhsu_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmulhsu(vm, vs2, vs1, vl); } -vint32m1_t test_vmulhsu_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu(mask, op1, op2, vl); +vint32m1_t test_vmulhsu_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu(vm, vs2, rs1, vl); } -vint32m2_t test_vmulhsu_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmulhsu(mask, op1, op2, vl); +vint32m2_t test_vmulhsu_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmulhsu(vm, vs2, vs1, vl); } -vint32m2_t test_vmulhsu_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu(mask, op1, op2, vl); +vint32m2_t test_vmulhsu_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu(vm, vs2, rs1, vl); } -vint32m4_t test_vmulhsu_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmulhsu(mask, op1, op2, vl); +vint32m4_t test_vmulhsu_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmulhsu(vm, vs2, vs1, vl); } -vint32m4_t test_vmulhsu_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu(mask, op1, op2, vl); +vint32m4_t test_vmulhsu_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu(vm, vs2, rs1, vl); } -vint32m8_t test_vmulhsu_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmulhsu(mask, op1, op2, vl); +vint32m8_t test_vmulhsu_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmulhsu(vm, vs2, vs1, vl); } -vint32m8_t test_vmulhsu_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu(mask, op1, op2, vl); +vint32m8_t test_vmulhsu_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu(vm, vs2, rs1, vl); } -vint64m1_t test_vmulhsu_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmulhsu(mask, op1, op2, vl); +vint64m1_t test_vmulhsu_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmulhsu(vm, vs2, vs1, vl); } -vint64m1_t test_vmulhsu_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu(mask, op1, op2, vl); +vint64m1_t test_vmulhsu_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu(vm, vs2, rs1, vl); } -vint64m2_t test_vmulhsu_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmulhsu(mask, op1, op2, vl); +vint64m2_t test_vmulhsu_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmulhsu(vm, vs2, vs1, vl); } -vint64m2_t test_vmulhsu_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu(mask, op1, op2, vl); +vint64m2_t test_vmulhsu_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu(vm, vs2, rs1, vl); } -vint64m4_t test_vmulhsu_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmulhsu(mask, op1, op2, vl); +vint64m4_t test_vmulhsu_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmulhsu(vm, vs2, vs1, vl); } -vint64m4_t test_vmulhsu_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu(mask, op1, op2, vl); +vint64m4_t test_vmulhsu_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu(vm, vs2, rs1, vl); } -vint64m8_t test_vmulhsu_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmulhsu(mask, op1, op2, vl); +vint64m8_t test_vmulhsu_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmulhsu(vm, vs2, vs1, vl); } -vint64m8_t test_vmulhsu_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu(mask, op1, op2, vl); +vint64m8_t test_vmulhsu_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmulhsu\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vmulhu.c b/auto-generated/gnu-overloaded-tests/vmulhu.c index 4c3e23a6d..9e823657d 100644 --- a/auto-generated/gnu-overloaded-tests/vmulhu.c +++ b/auto-generated/gnu-overloaded-tests/vmulhu.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vmulhu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmulhu(op1, op2, vl); +vuint8mf8_t test_vmulhu_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmulhu(vs2, vs1, vl); } -vuint8mf8_t test_vmulhu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu(op1, op2, vl); +vuint8mf8_t test_vmulhu_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu(vs2, rs1, vl); } -vuint8mf4_t test_vmulhu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmulhu(op1, op2, vl); +vuint8mf4_t test_vmulhu_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmulhu(vs2, vs1, vl); } -vuint8mf4_t test_vmulhu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu(op1, op2, vl); +vuint8mf4_t test_vmulhu_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu(vs2, rs1, vl); } -vuint8mf2_t test_vmulhu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmulhu(op1, op2, vl); +vuint8mf2_t test_vmulhu_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmulhu(vs2, vs1, vl); } -vuint8mf2_t test_vmulhu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu(op1, op2, vl); +vuint8mf2_t test_vmulhu_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu(vs2, rs1, vl); } -vuint8m1_t test_vmulhu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmulhu(op1, op2, vl); +vuint8m1_t test_vmulhu_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmulhu(vs2, vs1, vl); } -vuint8m1_t test_vmulhu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu(op1, op2, vl); +vuint8m1_t test_vmulhu_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu(vs2, rs1, vl); } -vuint8m2_t test_vmulhu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmulhu(op1, op2, vl); +vuint8m2_t test_vmulhu_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmulhu(vs2, vs1, vl); } -vuint8m2_t test_vmulhu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu(op1, op2, vl); +vuint8m2_t test_vmulhu_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu(vs2, rs1, vl); } -vuint8m4_t test_vmulhu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmulhu(op1, op2, vl); +vuint8m4_t test_vmulhu_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmulhu(vs2, vs1, vl); } -vuint8m4_t test_vmulhu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu(op1, op2, vl); +vuint8m4_t test_vmulhu_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu(vs2, rs1, vl); } -vuint8m8_t test_vmulhu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmulhu(op1, op2, vl); +vuint8m8_t test_vmulhu_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmulhu(vs2, vs1, vl); } -vuint8m8_t test_vmulhu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu(op1, op2, vl); +vuint8m8_t test_vmulhu_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu(vs2, rs1, vl); } -vuint16mf4_t test_vmulhu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmulhu(op1, op2, vl); +vuint16mf4_t test_vmulhu_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmulhu(vs2, vs1, vl); } -vuint16mf4_t test_vmulhu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu(op1, op2, vl); +vuint16mf4_t test_vmulhu_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu(vs2, rs1, vl); } -vuint16mf2_t test_vmulhu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmulhu(op1, op2, vl); +vuint16mf2_t test_vmulhu_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmulhu(vs2, vs1, vl); } -vuint16mf2_t test_vmulhu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu(op1, op2, vl); +vuint16mf2_t test_vmulhu_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu(vs2, rs1, vl); } -vuint16m1_t test_vmulhu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmulhu(op1, op2, vl); +vuint16m1_t test_vmulhu_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmulhu(vs2, vs1, vl); } -vuint16m1_t test_vmulhu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu(op1, op2, vl); +vuint16m1_t test_vmulhu_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu(vs2, rs1, vl); } -vuint16m2_t test_vmulhu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmulhu(op1, op2, vl); +vuint16m2_t test_vmulhu_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmulhu(vs2, vs1, vl); } -vuint16m2_t test_vmulhu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu(op1, op2, vl); +vuint16m2_t test_vmulhu_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu(vs2, rs1, vl); } -vuint16m4_t test_vmulhu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmulhu(op1, op2, vl); +vuint16m4_t test_vmulhu_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmulhu(vs2, vs1, vl); } -vuint16m4_t test_vmulhu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu(op1, op2, vl); +vuint16m4_t test_vmulhu_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu(vs2, rs1, vl); } -vuint16m8_t test_vmulhu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmulhu(op1, op2, vl); +vuint16m8_t test_vmulhu_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmulhu(vs2, vs1, vl); } -vuint16m8_t test_vmulhu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu(op1, op2, vl); +vuint16m8_t test_vmulhu_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu(vs2, rs1, vl); } -vuint32mf2_t test_vmulhu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmulhu(op1, op2, vl); +vuint32mf2_t test_vmulhu_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmulhu(vs2, vs1, vl); } -vuint32mf2_t test_vmulhu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu(op1, op2, vl); +vuint32mf2_t test_vmulhu_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu(vs2, rs1, vl); } -vuint32m1_t test_vmulhu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmulhu(op1, op2, vl); +vuint32m1_t test_vmulhu_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmulhu(vs2, vs1, vl); } -vuint32m1_t test_vmulhu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu(op1, op2, vl); +vuint32m1_t test_vmulhu_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu(vs2, rs1, vl); } -vuint32m2_t test_vmulhu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmulhu(op1, op2, vl); +vuint32m2_t test_vmulhu_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmulhu(vs2, vs1, vl); } -vuint32m2_t test_vmulhu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu(op1, op2, vl); +vuint32m2_t test_vmulhu_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu(vs2, rs1, vl); } -vuint32m4_t test_vmulhu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmulhu(op1, op2, vl); +vuint32m4_t test_vmulhu_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmulhu(vs2, vs1, vl); } -vuint32m4_t test_vmulhu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu(op1, op2, vl); +vuint32m4_t test_vmulhu_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu(vs2, rs1, vl); } -vuint32m8_t test_vmulhu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmulhu(op1, op2, vl); +vuint32m8_t test_vmulhu_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmulhu(vs2, vs1, vl); } -vuint32m8_t test_vmulhu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu(op1, op2, vl); +vuint32m8_t test_vmulhu_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu(vs2, rs1, vl); } -vuint64m1_t test_vmulhu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmulhu(op1, op2, vl); +vuint64m1_t test_vmulhu_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmulhu(vs2, vs1, vl); } -vuint64m1_t test_vmulhu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu(op1, op2, vl); +vuint64m1_t test_vmulhu_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu(vs2, rs1, vl); } -vuint64m2_t test_vmulhu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmulhu(op1, op2, vl); +vuint64m2_t test_vmulhu_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmulhu(vs2, vs1, vl); } -vuint64m2_t test_vmulhu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu(op1, op2, vl); +vuint64m2_t test_vmulhu_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu(vs2, rs1, vl); } -vuint64m4_t test_vmulhu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmulhu(op1, op2, vl); +vuint64m4_t test_vmulhu_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmulhu(vs2, vs1, vl); } -vuint64m4_t test_vmulhu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu(op1, op2, vl); +vuint64m4_t test_vmulhu_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu(vs2, rs1, vl); } -vuint64m8_t test_vmulhu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmulhu(op1, op2, vl); +vuint64m8_t test_vmulhu_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmulhu(vs2, vs1, vl); } -vuint64m8_t test_vmulhu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu(op1, op2, vl); +vuint64m8_t test_vmulhu_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu(vs2, rs1, vl); } -vuint8mf8_t test_vmulhu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmulhu(mask, op1, op2, vl); +vuint8mf8_t test_vmulhu_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmulhu(vm, vs2, vs1, vl); } -vuint8mf8_t test_vmulhu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu(mask, op1, op2, vl); +vuint8mf8_t test_vmulhu_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu(vm, vs2, rs1, vl); } -vuint8mf4_t test_vmulhu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmulhu(mask, op1, op2, vl); +vuint8mf4_t test_vmulhu_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmulhu(vm, vs2, vs1, vl); } -vuint8mf4_t test_vmulhu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu(mask, op1, op2, vl); +vuint8mf4_t test_vmulhu_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu(vm, vs2, rs1, vl); } -vuint8mf2_t test_vmulhu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmulhu(mask, op1, op2, vl); +vuint8mf2_t test_vmulhu_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmulhu(vm, vs2, vs1, vl); } -vuint8mf2_t test_vmulhu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu(mask, op1, op2, vl); +vuint8mf2_t test_vmulhu_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu(vm, vs2, rs1, vl); } -vuint8m1_t test_vmulhu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmulhu(mask, op1, op2, vl); +vuint8m1_t test_vmulhu_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmulhu(vm, vs2, vs1, vl); } -vuint8m1_t test_vmulhu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu(mask, op1, op2, vl); +vuint8m1_t test_vmulhu_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu(vm, vs2, rs1, vl); } -vuint8m2_t test_vmulhu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmulhu(mask, op1, op2, vl); +vuint8m2_t test_vmulhu_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmulhu(vm, vs2, vs1, vl); } -vuint8m2_t test_vmulhu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu(mask, op1, op2, vl); +vuint8m2_t test_vmulhu_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu(vm, vs2, rs1, vl); } -vuint8m4_t test_vmulhu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmulhu(mask, op1, op2, vl); +vuint8m4_t test_vmulhu_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmulhu(vm, vs2, vs1, vl); } -vuint8m4_t test_vmulhu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu(mask, op1, op2, vl); +vuint8m4_t test_vmulhu_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu(vm, vs2, rs1, vl); } -vuint8m8_t test_vmulhu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmulhu(mask, op1, op2, vl); +vuint8m8_t test_vmulhu_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmulhu(vm, vs2, vs1, vl); } -vuint8m8_t test_vmulhu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu(mask, op1, op2, vl); +vuint8m8_t test_vmulhu_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu(vm, vs2, rs1, vl); } -vuint16mf4_t test_vmulhu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmulhu(mask, op1, op2, vl); +vuint16mf4_t test_vmulhu_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmulhu(vm, vs2, vs1, vl); } -vuint16mf4_t test_vmulhu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu(mask, op1, op2, vl); +vuint16mf4_t test_vmulhu_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu(vm, vs2, rs1, vl); } -vuint16mf2_t test_vmulhu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmulhu(mask, op1, op2, vl); +vuint16mf2_t test_vmulhu_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmulhu(vm, vs2, vs1, vl); } -vuint16mf2_t test_vmulhu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu(mask, op1, op2, vl); +vuint16mf2_t test_vmulhu_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu(vm, vs2, rs1, vl); } -vuint16m1_t test_vmulhu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmulhu(mask, op1, op2, vl); +vuint16m1_t test_vmulhu_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmulhu(vm, vs2, vs1, vl); } -vuint16m1_t test_vmulhu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu(mask, op1, op2, vl); +vuint16m1_t test_vmulhu_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu(vm, vs2, rs1, vl); } -vuint16m2_t test_vmulhu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmulhu(mask, op1, op2, vl); +vuint16m2_t test_vmulhu_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmulhu(vm, vs2, vs1, vl); } -vuint16m2_t test_vmulhu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu(mask, op1, op2, vl); +vuint16m2_t test_vmulhu_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu(vm, vs2, rs1, vl); } -vuint16m4_t test_vmulhu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmulhu(mask, op1, op2, vl); +vuint16m4_t test_vmulhu_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmulhu(vm, vs2, vs1, vl); } -vuint16m4_t test_vmulhu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu(mask, op1, op2, vl); +vuint16m4_t test_vmulhu_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu(vm, vs2, rs1, vl); } -vuint16m8_t test_vmulhu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmulhu(mask, op1, op2, vl); +vuint16m8_t test_vmulhu_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmulhu(vm, vs2, vs1, vl); } -vuint16m8_t test_vmulhu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu(mask, op1, op2, vl); +vuint16m8_t test_vmulhu_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu(vm, vs2, rs1, vl); } -vuint32mf2_t test_vmulhu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmulhu(mask, op1, op2, vl); +vuint32mf2_t test_vmulhu_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmulhu(vm, vs2, vs1, vl); } -vuint32mf2_t test_vmulhu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu(mask, op1, op2, vl); +vuint32mf2_t test_vmulhu_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu(vm, vs2, rs1, vl); } -vuint32m1_t test_vmulhu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmulhu(mask, op1, op2, vl); +vuint32m1_t test_vmulhu_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmulhu(vm, vs2, vs1, vl); } -vuint32m1_t test_vmulhu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu(mask, op1, op2, vl); +vuint32m1_t test_vmulhu_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu(vm, vs2, rs1, vl); } -vuint32m2_t test_vmulhu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmulhu(mask, op1, op2, vl); +vuint32m2_t test_vmulhu_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmulhu(vm, vs2, vs1, vl); } -vuint32m2_t test_vmulhu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu(mask, op1, op2, vl); +vuint32m2_t test_vmulhu_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu(vm, vs2, rs1, vl); } -vuint32m4_t test_vmulhu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmulhu(mask, op1, op2, vl); +vuint32m4_t test_vmulhu_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmulhu(vm, vs2, vs1, vl); } -vuint32m4_t test_vmulhu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu(mask, op1, op2, vl); +vuint32m4_t test_vmulhu_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu(vm, vs2, rs1, vl); } -vuint32m8_t test_vmulhu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmulhu(mask, op1, op2, vl); +vuint32m8_t test_vmulhu_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmulhu(vm, vs2, vs1, vl); } -vuint32m8_t test_vmulhu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu(mask, op1, op2, vl); +vuint32m8_t test_vmulhu_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu(vm, vs2, rs1, vl); } -vuint64m1_t test_vmulhu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmulhu(mask, op1, op2, vl); +vuint64m1_t test_vmulhu_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmulhu(vm, vs2, vs1, vl); } -vuint64m1_t test_vmulhu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu(mask, op1, op2, vl); +vuint64m1_t test_vmulhu_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu(vm, vs2, rs1, vl); } -vuint64m2_t test_vmulhu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmulhu(mask, op1, op2, vl); +vuint64m2_t test_vmulhu_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmulhu(vm, vs2, vs1, vl); } -vuint64m2_t test_vmulhu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu(mask, op1, op2, vl); +vuint64m2_t test_vmulhu_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu(vm, vs2, rs1, vl); } -vuint64m4_t test_vmulhu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmulhu(mask, op1, op2, vl); +vuint64m4_t test_vmulhu_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmulhu(vm, vs2, vs1, vl); } -vuint64m4_t test_vmulhu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu(mask, op1, op2, vl); +vuint64m4_t test_vmulhu_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu(vm, vs2, rs1, vl); } -vuint64m8_t test_vmulhu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmulhu(mask, op1, op2, vl); +vuint64m8_t test_vmulhu_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmulhu(vm, vs2, vs1, vl); } -vuint64m8_t test_vmulhu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu(mask, op1, op2, vl); +vuint64m8_t test_vmulhu_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmulhu\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vmv.c b/auto-generated/gnu-overloaded-tests/vmv.c index 651238917..bab8eeafe 100644 --- a/auto-generated/gnu-overloaded-tests/vmv.c +++ b/auto-generated/gnu-overloaded-tests/vmv.c @@ -6,416 +6,416 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vmv_v_v_i8mf8(vint8mf8_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vint8mf8_t test_vmv_v_v_i8mf8(vint8mf8_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vint8mf4_t test_vmv_v_v_i8mf4(vint8mf4_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vint8mf4_t test_vmv_v_v_i8mf4(vint8mf4_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vint8mf2_t test_vmv_v_v_i8mf2(vint8mf2_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vint8mf2_t test_vmv_v_v_i8mf2(vint8mf2_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vint8m1_t test_vmv_v_v_i8m1(vint8m1_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vint8m1_t test_vmv_v_v_i8m1(vint8m1_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vint8m2_t test_vmv_v_v_i8m2(vint8m2_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vint8m2_t test_vmv_v_v_i8m2(vint8m2_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vint8m4_t test_vmv_v_v_i8m4(vint8m4_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vint8m4_t test_vmv_v_v_i8m4(vint8m4_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vint8m8_t test_vmv_v_v_i8m8(vint8m8_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vint8m8_t test_vmv_v_v_i8m8(vint8m8_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vint16mf4_t test_vmv_v_v_i16mf4(vint16mf4_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vint16mf4_t test_vmv_v_v_i16mf4(vint16mf4_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vint16mf2_t test_vmv_v_v_i16mf2(vint16mf2_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vint16mf2_t test_vmv_v_v_i16mf2(vint16mf2_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vint16m1_t test_vmv_v_v_i16m1(vint16m1_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vint16m1_t test_vmv_v_v_i16m1(vint16m1_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vint16m2_t test_vmv_v_v_i16m2(vint16m2_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vint16m2_t test_vmv_v_v_i16m2(vint16m2_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vint16m4_t test_vmv_v_v_i16m4(vint16m4_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vint16m4_t test_vmv_v_v_i16m4(vint16m4_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vint16m8_t test_vmv_v_v_i16m8(vint16m8_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vint16m8_t test_vmv_v_v_i16m8(vint16m8_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vint32mf2_t test_vmv_v_v_i32mf2(vint32mf2_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vint32mf2_t test_vmv_v_v_i32mf2(vint32mf2_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vint32m1_t test_vmv_v_v_i32m1(vint32m1_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vint32m1_t test_vmv_v_v_i32m1(vint32m1_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vint32m2_t test_vmv_v_v_i32m2(vint32m2_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vint32m2_t test_vmv_v_v_i32m2(vint32m2_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vint32m4_t test_vmv_v_v_i32m4(vint32m4_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vint32m4_t test_vmv_v_v_i32m4(vint32m4_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vint32m8_t test_vmv_v_v_i32m8(vint32m8_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vint32m8_t test_vmv_v_v_i32m8(vint32m8_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vint64m1_t test_vmv_v_v_i64m1(vint64m1_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vint64m1_t test_vmv_v_v_i64m1(vint64m1_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vint64m2_t test_vmv_v_v_i64m2(vint64m2_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vint64m2_t test_vmv_v_v_i64m2(vint64m2_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vint64m4_t test_vmv_v_v_i64m4(vint64m4_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vint64m4_t test_vmv_v_v_i64m4(vint64m4_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vint64m8_t test_vmv_v_v_i64m8(vint64m8_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vint64m8_t test_vmv_v_v_i64m8(vint64m8_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vuint8mf8_t test_vmv_v_v_u8mf8(vuint8mf8_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vuint8mf8_t test_vmv_v_v_u8mf8(vuint8mf8_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vuint8mf4_t test_vmv_v_v_u8mf4(vuint8mf4_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vuint8mf4_t test_vmv_v_v_u8mf4(vuint8mf4_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vuint8mf2_t test_vmv_v_v_u8mf2(vuint8mf2_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vuint8mf2_t test_vmv_v_v_u8mf2(vuint8mf2_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vuint8m1_t test_vmv_v_v_u8m1(vuint8m1_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vuint8m1_t test_vmv_v_v_u8m1(vuint8m1_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vuint8m2_t test_vmv_v_v_u8m2(vuint8m2_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vuint8m2_t test_vmv_v_v_u8m2(vuint8m2_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vuint8m4_t test_vmv_v_v_u8m4(vuint8m4_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vuint8m4_t test_vmv_v_v_u8m4(vuint8m4_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vuint8m8_t test_vmv_v_v_u8m8(vuint8m8_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vuint8m8_t test_vmv_v_v_u8m8(vuint8m8_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vuint16mf4_t test_vmv_v_v_u16mf4(vuint16mf4_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vuint16mf4_t test_vmv_v_v_u16mf4(vuint16mf4_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vuint16mf2_t test_vmv_v_v_u16mf2(vuint16mf2_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vuint16mf2_t test_vmv_v_v_u16mf2(vuint16mf2_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vuint16m1_t test_vmv_v_v_u16m1(vuint16m1_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vuint16m1_t test_vmv_v_v_u16m1(vuint16m1_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vuint16m2_t test_vmv_v_v_u16m2(vuint16m2_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vuint16m2_t test_vmv_v_v_u16m2(vuint16m2_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vuint16m4_t test_vmv_v_v_u16m4(vuint16m4_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vuint16m4_t test_vmv_v_v_u16m4(vuint16m4_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vuint16m8_t test_vmv_v_v_u16m8(vuint16m8_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vuint16m8_t test_vmv_v_v_u16m8(vuint16m8_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vuint32mf2_t test_vmv_v_v_u32mf2(vuint32mf2_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vuint32mf2_t test_vmv_v_v_u32mf2(vuint32mf2_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vuint32m1_t test_vmv_v_v_u32m1(vuint32m1_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vuint32m1_t test_vmv_v_v_u32m1(vuint32m1_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vuint32m2_t test_vmv_v_v_u32m2(vuint32m2_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vuint32m2_t test_vmv_v_v_u32m2(vuint32m2_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vuint32m4_t test_vmv_v_v_u32m4(vuint32m4_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vuint32m4_t test_vmv_v_v_u32m4(vuint32m4_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vuint32m8_t test_vmv_v_v_u32m8(vuint32m8_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vuint32m8_t test_vmv_v_v_u32m8(vuint32m8_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vuint64m1_t test_vmv_v_v_u64m1(vuint64m1_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vuint64m1_t test_vmv_v_v_u64m1(vuint64m1_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vuint64m2_t test_vmv_v_v_u64m2(vuint64m2_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vuint64m2_t test_vmv_v_v_u64m2(vuint64m2_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vuint64m4_t test_vmv_v_v_u64m4(vuint64m4_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vuint64m4_t test_vmv_v_v_u64m4(vuint64m4_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vuint64m8_t test_vmv_v_v_u64m8(vuint64m8_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vuint64m8_t test_vmv_v_v_u64m8(vuint64m8_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vfloat16mf4_t test_vmv_v_v_f16mf4(vfloat16mf4_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vfloat16mf4_t test_vmv_v_v_f16mf4(vfloat16mf4_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vfloat16mf2_t test_vmv_v_v_f16mf2(vfloat16mf2_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vfloat16mf2_t test_vmv_v_v_f16mf2(vfloat16mf2_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vfloat16m1_t test_vmv_v_v_f16m1(vfloat16m1_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vfloat16m1_t test_vmv_v_v_f16m1(vfloat16m1_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vfloat16m2_t test_vmv_v_v_f16m2(vfloat16m2_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vfloat16m2_t test_vmv_v_v_f16m2(vfloat16m2_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vfloat16m4_t test_vmv_v_v_f16m4(vfloat16m4_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vfloat16m4_t test_vmv_v_v_f16m4(vfloat16m4_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vfloat16m8_t test_vmv_v_v_f16m8(vfloat16m8_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vfloat16m8_t test_vmv_v_v_f16m8(vfloat16m8_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vfloat32mf2_t test_vmv_v_v_f32mf2(vfloat32mf2_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vfloat32mf2_t test_vmv_v_v_f32mf2(vfloat32mf2_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vfloat32m1_t test_vmv_v_v_f32m1(vfloat32m1_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vfloat32m1_t test_vmv_v_v_f32m1(vfloat32m1_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vfloat32m2_t test_vmv_v_v_f32m2(vfloat32m2_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vfloat32m2_t test_vmv_v_v_f32m2(vfloat32m2_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vfloat32m4_t test_vmv_v_v_f32m4(vfloat32m4_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vfloat32m4_t test_vmv_v_v_f32m4(vfloat32m4_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vfloat32m8_t test_vmv_v_v_f32m8(vfloat32m8_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vfloat32m8_t test_vmv_v_v_f32m8(vfloat32m8_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vfloat64m1_t test_vmv_v_v_f64m1(vfloat64m1_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vfloat64m1_t test_vmv_v_v_f64m1(vfloat64m1_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vfloat64m2_t test_vmv_v_v_f64m2(vfloat64m2_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vfloat64m2_t test_vmv_v_v_f64m2(vfloat64m2_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vfloat64m4_t test_vmv_v_v_f64m4(vfloat64m4_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vfloat64m4_t test_vmv_v_v_f64m4(vfloat64m4_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -vfloat64m8_t test_vmv_v_v_f64m8(vfloat64m8_t src, size_t vl) { - return __riscv_vmv_v(src, vl); +vfloat64m8_t test_vmv_v_v_f64m8(vfloat64m8_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); } -int8_t test_vmv_x_s_i8mf8_i8(vint8mf8_t src) { - return __riscv_vmv_x(src); +int8_t test_vmv_x_s_i8mf8_i8(vint8mf8_t vs1) { + return __riscv_vmv_x(vs1); } -int8_t test_vmv_x_s_i8mf4_i8(vint8mf4_t src) { - return __riscv_vmv_x(src); +int8_t test_vmv_x_s_i8mf4_i8(vint8mf4_t vs1) { + return __riscv_vmv_x(vs1); } -int8_t test_vmv_x_s_i8mf2_i8(vint8mf2_t src) { - return __riscv_vmv_x(src); +int8_t test_vmv_x_s_i8mf2_i8(vint8mf2_t vs1) { + return __riscv_vmv_x(vs1); } -int8_t test_vmv_x_s_i8m1_i8(vint8m1_t src) { - return __riscv_vmv_x(src); +int8_t test_vmv_x_s_i8m1_i8(vint8m1_t vs1) { + return __riscv_vmv_x(vs1); } -int8_t test_vmv_x_s_i8m2_i8(vint8m2_t src) { - return __riscv_vmv_x(src); +int8_t test_vmv_x_s_i8m2_i8(vint8m2_t vs1) { + return __riscv_vmv_x(vs1); } -int8_t test_vmv_x_s_i8m4_i8(vint8m4_t src) { - return __riscv_vmv_x(src); +int8_t test_vmv_x_s_i8m4_i8(vint8m4_t vs1) { + return __riscv_vmv_x(vs1); } -int8_t test_vmv_x_s_i8m8_i8(vint8m8_t src) { - return __riscv_vmv_x(src); +int8_t test_vmv_x_s_i8m8_i8(vint8m8_t vs1) { + return __riscv_vmv_x(vs1); } -int16_t test_vmv_x_s_i16mf4_i16(vint16mf4_t src) { - return __riscv_vmv_x(src); +int16_t test_vmv_x_s_i16mf4_i16(vint16mf4_t vs1) { + return __riscv_vmv_x(vs1); } -int16_t test_vmv_x_s_i16mf2_i16(vint16mf2_t src) { - return __riscv_vmv_x(src); +int16_t test_vmv_x_s_i16mf2_i16(vint16mf2_t vs1) { + return __riscv_vmv_x(vs1); } -int16_t test_vmv_x_s_i16m1_i16(vint16m1_t src) { - return __riscv_vmv_x(src); +int16_t test_vmv_x_s_i16m1_i16(vint16m1_t vs1) { + return __riscv_vmv_x(vs1); } -int16_t test_vmv_x_s_i16m2_i16(vint16m2_t src) { - return __riscv_vmv_x(src); +int16_t test_vmv_x_s_i16m2_i16(vint16m2_t vs1) { + return __riscv_vmv_x(vs1); } -int16_t test_vmv_x_s_i16m4_i16(vint16m4_t src) { - return __riscv_vmv_x(src); +int16_t test_vmv_x_s_i16m4_i16(vint16m4_t vs1) { + return __riscv_vmv_x(vs1); } -int16_t test_vmv_x_s_i16m8_i16(vint16m8_t src) { - return __riscv_vmv_x(src); +int16_t test_vmv_x_s_i16m8_i16(vint16m8_t vs1) { + return __riscv_vmv_x(vs1); } -int32_t test_vmv_x_s_i32mf2_i32(vint32mf2_t src) { - return __riscv_vmv_x(src); +int32_t test_vmv_x_s_i32mf2_i32(vint32mf2_t vs1) { + return __riscv_vmv_x(vs1); } -int32_t test_vmv_x_s_i32m1_i32(vint32m1_t src) { - return __riscv_vmv_x(src); +int32_t test_vmv_x_s_i32m1_i32(vint32m1_t vs1) { + return __riscv_vmv_x(vs1); } -int32_t test_vmv_x_s_i32m2_i32(vint32m2_t src) { - return __riscv_vmv_x(src); +int32_t test_vmv_x_s_i32m2_i32(vint32m2_t vs1) { + return __riscv_vmv_x(vs1); } -int32_t test_vmv_x_s_i32m4_i32(vint32m4_t src) { - return __riscv_vmv_x(src); +int32_t test_vmv_x_s_i32m4_i32(vint32m4_t vs1) { + return __riscv_vmv_x(vs1); } -int32_t test_vmv_x_s_i32m8_i32(vint32m8_t src) { - return __riscv_vmv_x(src); +int32_t test_vmv_x_s_i32m8_i32(vint32m8_t vs1) { + return __riscv_vmv_x(vs1); } -int64_t test_vmv_x_s_i64m1_i64(vint64m1_t src) { - return __riscv_vmv_x(src); +int64_t test_vmv_x_s_i64m1_i64(vint64m1_t vs1) { + return __riscv_vmv_x(vs1); } -int64_t test_vmv_x_s_i64m2_i64(vint64m2_t src) { - return __riscv_vmv_x(src); +int64_t test_vmv_x_s_i64m2_i64(vint64m2_t vs1) { + return __riscv_vmv_x(vs1); } -int64_t test_vmv_x_s_i64m4_i64(vint64m4_t src) { - return __riscv_vmv_x(src); +int64_t test_vmv_x_s_i64m4_i64(vint64m4_t vs1) { + return __riscv_vmv_x(vs1); } -int64_t test_vmv_x_s_i64m8_i64(vint64m8_t src) { - return __riscv_vmv_x(src); +int64_t test_vmv_x_s_i64m8_i64(vint64m8_t vs1) { + return __riscv_vmv_x(vs1); } -uint8_t test_vmv_x_s_u8mf8_u8(vuint8mf8_t src) { - return __riscv_vmv_x(src); +uint8_t test_vmv_x_s_u8mf8_u8(vuint8mf8_t vs1) { + return __riscv_vmv_x(vs1); } -uint8_t test_vmv_x_s_u8mf4_u8(vuint8mf4_t src) { - return __riscv_vmv_x(src); +uint8_t test_vmv_x_s_u8mf4_u8(vuint8mf4_t vs1) { + return __riscv_vmv_x(vs1); } -uint8_t test_vmv_x_s_u8mf2_u8(vuint8mf2_t src) { - return __riscv_vmv_x(src); +uint8_t test_vmv_x_s_u8mf2_u8(vuint8mf2_t vs1) { + return __riscv_vmv_x(vs1); } -uint8_t test_vmv_x_s_u8m1_u8(vuint8m1_t src) { - return __riscv_vmv_x(src); +uint8_t test_vmv_x_s_u8m1_u8(vuint8m1_t vs1) { + return __riscv_vmv_x(vs1); } -uint8_t test_vmv_x_s_u8m2_u8(vuint8m2_t src) { - return __riscv_vmv_x(src); +uint8_t test_vmv_x_s_u8m2_u8(vuint8m2_t vs1) { + return __riscv_vmv_x(vs1); } -uint8_t test_vmv_x_s_u8m4_u8(vuint8m4_t src) { - return __riscv_vmv_x(src); +uint8_t test_vmv_x_s_u8m4_u8(vuint8m4_t vs1) { + return __riscv_vmv_x(vs1); } -uint8_t test_vmv_x_s_u8m8_u8(vuint8m8_t src) { - return __riscv_vmv_x(src); +uint8_t test_vmv_x_s_u8m8_u8(vuint8m8_t vs1) { + return __riscv_vmv_x(vs1); } -uint16_t test_vmv_x_s_u16mf4_u16(vuint16mf4_t src) { - return __riscv_vmv_x(src); +uint16_t test_vmv_x_s_u16mf4_u16(vuint16mf4_t vs1) { + return __riscv_vmv_x(vs1); } -uint16_t test_vmv_x_s_u16mf2_u16(vuint16mf2_t src) { - return __riscv_vmv_x(src); +uint16_t test_vmv_x_s_u16mf2_u16(vuint16mf2_t vs1) { + return __riscv_vmv_x(vs1); } -uint16_t test_vmv_x_s_u16m1_u16(vuint16m1_t src) { - return __riscv_vmv_x(src); +uint16_t test_vmv_x_s_u16m1_u16(vuint16m1_t vs1) { + return __riscv_vmv_x(vs1); } -uint16_t test_vmv_x_s_u16m2_u16(vuint16m2_t src) { - return __riscv_vmv_x(src); +uint16_t test_vmv_x_s_u16m2_u16(vuint16m2_t vs1) { + return __riscv_vmv_x(vs1); } -uint16_t test_vmv_x_s_u16m4_u16(vuint16m4_t src) { - return __riscv_vmv_x(src); +uint16_t test_vmv_x_s_u16m4_u16(vuint16m4_t vs1) { + return __riscv_vmv_x(vs1); } -uint16_t test_vmv_x_s_u16m8_u16(vuint16m8_t src) { - return __riscv_vmv_x(src); +uint16_t test_vmv_x_s_u16m8_u16(vuint16m8_t vs1) { + return __riscv_vmv_x(vs1); } -uint32_t test_vmv_x_s_u32mf2_u32(vuint32mf2_t src) { - return __riscv_vmv_x(src); +uint32_t test_vmv_x_s_u32mf2_u32(vuint32mf2_t vs1) { + return __riscv_vmv_x(vs1); } -uint32_t test_vmv_x_s_u32m1_u32(vuint32m1_t src) { - return __riscv_vmv_x(src); +uint32_t test_vmv_x_s_u32m1_u32(vuint32m1_t vs1) { + return __riscv_vmv_x(vs1); } -uint32_t test_vmv_x_s_u32m2_u32(vuint32m2_t src) { - return __riscv_vmv_x(src); +uint32_t test_vmv_x_s_u32m2_u32(vuint32m2_t vs1) { + return __riscv_vmv_x(vs1); } -uint32_t test_vmv_x_s_u32m4_u32(vuint32m4_t src) { - return __riscv_vmv_x(src); +uint32_t test_vmv_x_s_u32m4_u32(vuint32m4_t vs1) { + return __riscv_vmv_x(vs1); } -uint32_t test_vmv_x_s_u32m8_u32(vuint32m8_t src) { - return __riscv_vmv_x(src); +uint32_t test_vmv_x_s_u32m8_u32(vuint32m8_t vs1) { + return __riscv_vmv_x(vs1); } -uint64_t test_vmv_x_s_u64m1_u64(vuint64m1_t src) { - return __riscv_vmv_x(src); +uint64_t test_vmv_x_s_u64m1_u64(vuint64m1_t vs1) { + return __riscv_vmv_x(vs1); } -uint64_t test_vmv_x_s_u64m2_u64(vuint64m2_t src) { - return __riscv_vmv_x(src); +uint64_t test_vmv_x_s_u64m2_u64(vuint64m2_t vs1) { + return __riscv_vmv_x(vs1); } -uint64_t test_vmv_x_s_u64m4_u64(vuint64m4_t src) { - return __riscv_vmv_x(src); +uint64_t test_vmv_x_s_u64m4_u64(vuint64m4_t vs1) { + return __riscv_vmv_x(vs1); } -uint64_t test_vmv_x_s_u64m8_u64(vuint64m8_t src) { - return __riscv_vmv_x(src); +uint64_t test_vmv_x_s_u64m8_u64(vuint64m8_t vs1) { + return __riscv_vmv_x(vs1); } /* { dg-final { scan-assembler-times {v[ml][s]*[ve][0-9]*\.[ivxfswum.]+\s+} 130 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vmxnor.c b/auto-generated/gnu-overloaded-tests/vmxnor.c index bfb9668c5..d8ea75c2c 100644 --- a/auto-generated/gnu-overloaded-tests/vmxnor.c +++ b/auto-generated/gnu-overloaded-tests/vmxnor.c @@ -6,32 +6,32 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool1_t test_vmxnor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { - return __riscv_vmxnor(op1, op2, vl); +vbool1_t test_vmxnor_mm_b1(vbool1_t vs2, vbool1_t vs1, size_t vl) { + return __riscv_vmxnor(vs2, vs1, vl); } -vbool2_t test_vmxnor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { - return __riscv_vmxnor(op1, op2, vl); +vbool2_t test_vmxnor_mm_b2(vbool2_t vs2, vbool2_t vs1, size_t vl) { + return __riscv_vmxnor(vs2, vs1, vl); } -vbool4_t test_vmxnor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { - return __riscv_vmxnor(op1, op2, vl); +vbool4_t test_vmxnor_mm_b4(vbool4_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vmxnor(vs2, vs1, vl); } -vbool8_t test_vmxnor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { - return __riscv_vmxnor(op1, op2, vl); +vbool8_t test_vmxnor_mm_b8(vbool8_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vmxnor(vs2, vs1, vl); } -vbool16_t test_vmxnor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { - return __riscv_vmxnor(op1, op2, vl); +vbool16_t test_vmxnor_mm_b16(vbool16_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vmxnor(vs2, vs1, vl); } -vbool32_t test_vmxnor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { - return __riscv_vmxnor(op1, op2, vl); +vbool32_t test_vmxnor_mm_b32(vbool32_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vmxnor(vs2, vs1, vl); } -vbool64_t test_vmxnor_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) { - return __riscv_vmxnor(op1, op2, vl); +vbool64_t test_vmxnor_mm_b64(vbool64_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vmxnor(vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmxnor\.[ivxfswum.]+\s+} 7 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vmxor.c b/auto-generated/gnu-overloaded-tests/vmxor.c index 42b5e8b96..ac7cb0c4d 100644 --- a/auto-generated/gnu-overloaded-tests/vmxor.c +++ b/auto-generated/gnu-overloaded-tests/vmxor.c @@ -6,32 +6,32 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool1_t test_vmxor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { - return __riscv_vmxor(op1, op2, vl); +vbool1_t test_vmxor_mm_b1(vbool1_t vs2, vbool1_t vs1, size_t vl) { + return __riscv_vmxor(vs2, vs1, vl); } -vbool2_t test_vmxor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { - return __riscv_vmxor(op1, op2, vl); +vbool2_t test_vmxor_mm_b2(vbool2_t vs2, vbool2_t vs1, size_t vl) { + return __riscv_vmxor(vs2, vs1, vl); } -vbool4_t test_vmxor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { - return __riscv_vmxor(op1, op2, vl); +vbool4_t test_vmxor_mm_b4(vbool4_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vmxor(vs2, vs1, vl); } -vbool8_t test_vmxor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { - return __riscv_vmxor(op1, op2, vl); +vbool8_t test_vmxor_mm_b8(vbool8_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vmxor(vs2, vs1, vl); } -vbool16_t test_vmxor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { - return __riscv_vmxor(op1, op2, vl); +vbool16_t test_vmxor_mm_b16(vbool16_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vmxor(vs2, vs1, vl); } -vbool32_t test_vmxor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { - return __riscv_vmxor(op1, op2, vl); +vbool32_t test_vmxor_mm_b32(vbool32_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vmxor(vs2, vs1, vl); } -vbool64_t test_vmxor_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) { - return __riscv_vmxor(op1, op2, vl); +vbool64_t test_vmxor_mm_b64(vbool64_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vmxor(vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmxor\.[ivxfswum.]+\s+} 7 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vnclip.c b/auto-generated/gnu-overloaded-tests/vnclip.c index 8454515bf..17815d3c8 100644 --- a/auto-generated/gnu-overloaded-tests/vnclip.c +++ b/auto-generated/gnu-overloaded-tests/vnclip.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vnclip_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vnclip_wv_i8mf8(vint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnclip(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vnclip_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vnclip_wx_i8mf8(vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vnclip_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vnclip_wv_i8mf4(vint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnclip(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vnclip_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vnclip_wx_i8mf4(vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vnclip_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vnclip_wv_i8mf2(vint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnclip(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vnclip_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vnclip_wx_i8mf2(vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vnclip_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vnclip_wv_i8m1(vint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnclip(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vnclip_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vnclip_wx_i8m1(vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vnclip_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vnclip_wv_i8m2(vint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnclip(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vnclip_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vnclip_wx_i8m2(vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vnclip_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vnclip_wv_i8m4(vint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnclip(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vnclip_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vnclip_wx_i8m4(vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vnclip_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vnclip_wv_i16mf4(vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnclip(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vnclip_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vnclip_wx_i16mf4(vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vnclip_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vnclip_wv_i16mf2(vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnclip(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vnclip_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vnclip_wx_i16mf2(vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vnclip_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vnclip_wv_i16m1(vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnclip(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vnclip_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vnclip_wx_i16m1(vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vnclip_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vnclip_wv_i16m2(vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnclip(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vnclip_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vnclip_wx_i16m2(vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vnclip_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vnclip_wv_i16m4(vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnclip(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vnclip_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vnclip_wx_i16m4(vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vnclip_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vnclip_wv_i32mf2(vint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnclip(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vnclip_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vnclip_wx_i32mf2(vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vnclip_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vnclip_wv_i32m1(vint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnclip(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vnclip_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vnclip_wx_i32m1(vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vnclip_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vnclip_wv_i32m2(vint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnclip(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vnclip_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vnclip_wx_i32m2(vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vnclip_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vnclip_wv_i32m4(vint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnclip(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vnclip_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vnclip_wx_i32m4(vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vnclip_wv_i8mf8_m(vbool64_t mask, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vnclip_wv_i8mf8_m(vbool64_t vm, vint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnclip(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vnclip_wx_i8mf8_m(vbool64_t mask, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vnclip_wx_i8mf8_m(vbool64_t vm, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vnclip_wv_i8mf4_m(vbool32_t mask, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vnclip_wv_i8mf4_m(vbool32_t vm, vint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnclip(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vnclip_wx_i8mf4_m(vbool32_t mask, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vnclip_wx_i8mf4_m(vbool32_t vm, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vnclip_wv_i8mf2_m(vbool16_t mask, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vnclip_wv_i8mf2_m(vbool16_t vm, vint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnclip(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vnclip_wx_i8mf2_m(vbool16_t mask, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vnclip_wx_i8mf2_m(vbool16_t vm, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vnclip_wv_i8m1_m(vbool8_t mask, vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vnclip_wv_i8m1_m(vbool8_t vm, vint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnclip(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vnclip_wx_i8m1_m(vbool8_t mask, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vnclip_wx_i8m1_m(vbool8_t vm, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vnclip_wv_i8m2_m(vbool4_t mask, vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vnclip_wv_i8m2_m(vbool4_t vm, vint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnclip(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vnclip_wx_i8m2_m(vbool4_t mask, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vnclip_wx_i8m2_m(vbool4_t vm, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vnclip_wv_i8m4_m(vbool2_t mask, vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vnclip_wv_i8m4_m(vbool2_t vm, vint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnclip(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vnclip_wx_i8m4_m(vbool2_t mask, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vnclip_wx_i8m4_m(vbool2_t vm, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vnclip_wv_i16mf4_m(vbool64_t mask, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vnclip_wv_i16mf4_m(vbool64_t vm, vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnclip(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vnclip_wx_i16mf4_m(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vnclip_wx_i16mf4_m(vbool64_t vm, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vnclip_wv_i16mf2_m(vbool32_t mask, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vnclip_wv_i16mf2_m(vbool32_t vm, vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnclip(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vnclip_wx_i16mf2_m(vbool32_t mask, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vnclip_wx_i16mf2_m(vbool32_t vm, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vnclip_wv_i16m1_m(vbool16_t mask, vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vnclip_wv_i16m1_m(vbool16_t vm, vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnclip(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vnclip_wx_i16m1_m(vbool16_t mask, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vnclip_wx_i16m1_m(vbool16_t vm, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vnclip_wv_i16m2_m(vbool8_t mask, vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vnclip_wv_i16m2_m(vbool8_t vm, vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnclip(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vnclip_wx_i16m2_m(vbool8_t mask, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vnclip_wx_i16m2_m(vbool8_t vm, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vnclip_wv_i16m4_m(vbool4_t mask, vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vnclip_wv_i16m4_m(vbool4_t vm, vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnclip(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vnclip_wx_i16m4_m(vbool4_t mask, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vnclip_wx_i16m4_m(vbool4_t vm, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vnclip_wv_i32mf2_m(vbool64_t mask, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vnclip_wv_i32mf2_m(vbool64_t vm, vint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnclip(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vnclip_wx_i32mf2_m(vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vnclip_wx_i32mf2_m(vbool64_t vm, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vnclip_wv_i32m1_m(vbool32_t mask, vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vnclip_wv_i32m1_m(vbool32_t vm, vint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnclip(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vnclip_wx_i32m1_m(vbool32_t mask, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vnclip_wx_i32m1_m(vbool32_t vm, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vnclip_wv_i32m2_m(vbool16_t mask, vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vnclip_wv_i32m2_m(vbool16_t vm, vint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnclip(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vnclip_wx_i32m2_m(vbool16_t mask, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vnclip_wx_i32m2_m(vbool16_t vm, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vnclip_wv_i32m4_m(vbool8_t mask, vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vnclip_wv_i32m4_m(vbool8_t vm, vint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnclip(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vnclip_wx_i32m4_m(vbool8_t mask, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vnclip_wx_i32m4_m(vbool8_t vm, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vnclip\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vnclipu.c b/auto-generated/gnu-overloaded-tests/vnclipu.c index d6ce14226..39d611c71 100644 --- a/auto-generated/gnu-overloaded-tests/vnclipu.c +++ b/auto-generated/gnu-overloaded-tests/vnclipu.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vnclipu_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vnclipu_wv_u8mf8(vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnclipu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vnclipu_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vnclipu_wx_u8mf8(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vnclipu_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vnclipu_wv_u8mf4(vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnclipu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vnclipu_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vnclipu_wx_u8mf4(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vnclipu_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vnclipu_wv_u8mf2(vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnclipu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vnclipu_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vnclipu_wx_u8mf2(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vnclipu_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vnclipu_wv_u8m1(vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnclipu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vnclipu_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vnclipu_wx_u8m1(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vnclipu_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vnclipu_wv_u8m2(vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnclipu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vnclipu_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vnclipu_wx_u8m2(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vnclipu_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vnclipu_wv_u8m4(vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnclipu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vnclipu_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vnclipu_wx_u8m4(vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vnclipu_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vnclipu_wv_u16mf4(vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnclipu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vnclipu_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vnclipu_wx_u16mf4(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vnclipu_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vnclipu_wv_u16mf2(vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnclipu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vnclipu_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vnclipu_wx_u16mf2(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vnclipu_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vnclipu_wv_u16m1(vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnclipu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vnclipu_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vnclipu_wx_u16m1(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vnclipu_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vnclipu_wv_u16m2(vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnclipu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vnclipu_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vnclipu_wx_u16m2(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vnclipu_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vnclipu_wv_u16m4(vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnclipu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vnclipu_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vnclipu_wx_u16m4(vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vnclipu_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vnclipu_wv_u32mf2(vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnclipu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vnclipu_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vnclipu_wx_u32mf2(vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vnclipu_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vnclipu_wv_u32m1(vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnclipu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vnclipu_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vnclipu_wx_u32m1(vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vnclipu_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vnclipu_wv_u32m2(vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnclipu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vnclipu_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vnclipu_wx_u32m2(vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vnclipu_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vnclipu_wv_u32m4(vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnclipu(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vnclipu_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vnclipu_wx_u32m4(vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vnclipu_wv_u8mf8_m(vbool64_t mask, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vnclipu_wv_u8mf8_m(vbool64_t vm, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnclipu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vnclipu_wx_u8mf8_m(vbool64_t mask, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vnclipu_wx_u8mf8_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vnclipu_wv_u8mf4_m(vbool32_t mask, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vnclipu_wv_u8mf4_m(vbool32_t vm, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnclipu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vnclipu_wx_u8mf4_m(vbool32_t mask, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vnclipu_wx_u8mf4_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vnclipu_wv_u8mf2_m(vbool16_t mask, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vnclipu_wv_u8mf2_m(vbool16_t vm, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnclipu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vnclipu_wx_u8mf2_m(vbool16_t mask, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vnclipu_wx_u8mf2_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vnclipu_wv_u8m1_m(vbool8_t mask, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vnclipu_wv_u8m1_m(vbool8_t vm, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnclipu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vnclipu_wx_u8m1_m(vbool8_t mask, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vnclipu_wx_u8m1_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vnclipu_wv_u8m2_m(vbool4_t mask, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vnclipu_wv_u8m2_m(vbool4_t vm, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnclipu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vnclipu_wx_u8m2_m(vbool4_t mask, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vnclipu_wx_u8m2_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vnclipu_wv_u8m4_m(vbool2_t mask, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vnclipu_wv_u8m4_m(vbool2_t vm, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnclipu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vnclipu_wx_u8m4_m(vbool2_t mask, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vnclipu_wx_u8m4_m(vbool2_t vm, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vnclipu_wv_u16mf4_m(vbool64_t mask, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vnclipu_wv_u16mf4_m(vbool64_t vm, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnclipu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vnclipu_wx_u16mf4_m(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vnclipu_wx_u16mf4_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vnclipu_wv_u16mf2_m(vbool32_t mask, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vnclipu_wv_u16mf2_m(vbool32_t vm, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnclipu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vnclipu_wx_u16mf2_m(vbool32_t mask, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vnclipu_wx_u16mf2_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vnclipu_wv_u16m1_m(vbool16_t mask, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vnclipu_wv_u16m1_m(vbool16_t vm, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnclipu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vnclipu_wx_u16m1_m(vbool16_t mask, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vnclipu_wx_u16m1_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vnclipu_wv_u16m2_m(vbool8_t mask, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vnclipu_wv_u16m2_m(vbool8_t vm, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnclipu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vnclipu_wx_u16m2_m(vbool8_t mask, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vnclipu_wx_u16m2_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vnclipu_wv_u16m4_m(vbool4_t mask, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vnclipu_wv_u16m4_m(vbool4_t vm, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnclipu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vnclipu_wx_u16m4_m(vbool4_t mask, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vnclipu_wx_u16m4_m(vbool4_t vm, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vnclipu_wv_u32mf2_m(vbool64_t mask, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vnclipu_wv_u32mf2_m(vbool64_t vm, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnclipu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vnclipu_wx_u32mf2_m(vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vnclipu_wx_u32mf2_m(vbool64_t vm, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vnclipu_wv_u32m1_m(vbool32_t mask, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vnclipu_wv_u32m1_m(vbool32_t vm, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnclipu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vnclipu_wx_u32m1_m(vbool32_t mask, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vnclipu_wx_u32m1_m(vbool32_t vm, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vnclipu_wv_u32m2_m(vbool16_t mask, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vnclipu_wv_u32m2_m(vbool16_t vm, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnclipu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vnclipu_wx_u32m2_m(vbool16_t mask, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vnclipu_wx_u32m2_m(vbool16_t vm, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vnclipu_wv_u32m4_m(vbool8_t mask, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vnclipu_wv_u32m4_m(vbool8_t vm, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnclipu(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vnclipu_wx_u32m4_m(vbool8_t mask, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vnclipu_wx_u32m4_m(vbool8_t vm, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vnclipu\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vncvt.c b/auto-generated/gnu-overloaded-tests/vncvt.c index f75b9d572..f54fe5b88 100644 --- a/auto-generated/gnu-overloaded-tests/vncvt.c +++ b/auto-generated/gnu-overloaded-tests/vncvt.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vncvt_x_x_w_i8mf8(vint16mf4_t src, size_t vl) { - return __riscv_vncvt_x(src, vl); +vint8mf8_t test_vncvt_x_x_w_i8mf8(vint16mf4_t vs2, size_t vl) { + return __riscv_vncvt_x(vs2, vl); } -vint8mf4_t test_vncvt_x_x_w_i8mf4(vint16mf2_t src, size_t vl) { - return __riscv_vncvt_x(src, vl); +vint8mf4_t test_vncvt_x_x_w_i8mf4(vint16mf2_t vs2, size_t vl) { + return __riscv_vncvt_x(vs2, vl); } -vint8mf2_t test_vncvt_x_x_w_i8mf2(vint16m1_t src, size_t vl) { - return __riscv_vncvt_x(src, vl); +vint8mf2_t test_vncvt_x_x_w_i8mf2(vint16m1_t vs2, size_t vl) { + return __riscv_vncvt_x(vs2, vl); } -vint8m1_t test_vncvt_x_x_w_i8m1(vint16m2_t src, size_t vl) { - return __riscv_vncvt_x(src, vl); +vint8m1_t test_vncvt_x_x_w_i8m1(vint16m2_t vs2, size_t vl) { + return __riscv_vncvt_x(vs2, vl); } -vint8m2_t test_vncvt_x_x_w_i8m2(vint16m4_t src, size_t vl) { - return __riscv_vncvt_x(src, vl); +vint8m2_t test_vncvt_x_x_w_i8m2(vint16m4_t vs2, size_t vl) { + return __riscv_vncvt_x(vs2, vl); } -vint8m4_t test_vncvt_x_x_w_i8m4(vint16m8_t src, size_t vl) { - return __riscv_vncvt_x(src, vl); +vint8m4_t test_vncvt_x_x_w_i8m4(vint16m8_t vs2, size_t vl) { + return __riscv_vncvt_x(vs2, vl); } -vuint8mf8_t test_vncvt_x_x_w_u8mf8(vuint16mf4_t src, size_t vl) { - return __riscv_vncvt_x(src, vl); +vuint8mf8_t test_vncvt_x_x_w_u8mf8(vuint16mf4_t vs2, size_t vl) { + return __riscv_vncvt_x(vs2, vl); } -vuint8mf4_t test_vncvt_x_x_w_u8mf4(vuint16mf2_t src, size_t vl) { - return __riscv_vncvt_x(src, vl); +vuint8mf4_t test_vncvt_x_x_w_u8mf4(vuint16mf2_t vs2, size_t vl) { + return __riscv_vncvt_x(vs2, vl); } -vuint8mf2_t test_vncvt_x_x_w_u8mf2(vuint16m1_t src, size_t vl) { - return __riscv_vncvt_x(src, vl); +vuint8mf2_t test_vncvt_x_x_w_u8mf2(vuint16m1_t vs2, size_t vl) { + return __riscv_vncvt_x(vs2, vl); } -vuint8m1_t test_vncvt_x_x_w_u8m1(vuint16m2_t src, size_t vl) { - return __riscv_vncvt_x(src, vl); +vuint8m1_t test_vncvt_x_x_w_u8m1(vuint16m2_t vs2, size_t vl) { + return __riscv_vncvt_x(vs2, vl); } -vuint8m2_t test_vncvt_x_x_w_u8m2(vuint16m4_t src, size_t vl) { - return __riscv_vncvt_x(src, vl); +vuint8m2_t test_vncvt_x_x_w_u8m2(vuint16m4_t vs2, size_t vl) { + return __riscv_vncvt_x(vs2, vl); } -vuint8m4_t test_vncvt_x_x_w_u8m4(vuint16m8_t src, size_t vl) { - return __riscv_vncvt_x(src, vl); +vuint8m4_t test_vncvt_x_x_w_u8m4(vuint16m8_t vs2, size_t vl) { + return __riscv_vncvt_x(vs2, vl); } -vint16mf4_t test_vncvt_x_x_w_i16mf4(vint32mf2_t src, size_t vl) { - return __riscv_vncvt_x(src, vl); +vint16mf4_t test_vncvt_x_x_w_i16mf4(vint32mf2_t vs2, size_t vl) { + return __riscv_vncvt_x(vs2, vl); } -vint16mf2_t test_vncvt_x_x_w_i16mf2(vint32m1_t src, size_t vl) { - return __riscv_vncvt_x(src, vl); +vint16mf2_t test_vncvt_x_x_w_i16mf2(vint32m1_t vs2, size_t vl) { + return __riscv_vncvt_x(vs2, vl); } -vint16m1_t test_vncvt_x_x_w_i16m1(vint32m2_t src, size_t vl) { - return __riscv_vncvt_x(src, vl); +vint16m1_t test_vncvt_x_x_w_i16m1(vint32m2_t vs2, size_t vl) { + return __riscv_vncvt_x(vs2, vl); } -vint16m2_t test_vncvt_x_x_w_i16m2(vint32m4_t src, size_t vl) { - return __riscv_vncvt_x(src, vl); +vint16m2_t test_vncvt_x_x_w_i16m2(vint32m4_t vs2, size_t vl) { + return __riscv_vncvt_x(vs2, vl); } -vint16m4_t test_vncvt_x_x_w_i16m4(vint32m8_t src, size_t vl) { - return __riscv_vncvt_x(src, vl); +vint16m4_t test_vncvt_x_x_w_i16m4(vint32m8_t vs2, size_t vl) { + return __riscv_vncvt_x(vs2, vl); } -vuint16mf4_t test_vncvt_x_x_w_u16mf4(vuint32mf2_t src, size_t vl) { - return __riscv_vncvt_x(src, vl); +vuint16mf4_t test_vncvt_x_x_w_u16mf4(vuint32mf2_t vs2, size_t vl) { + return __riscv_vncvt_x(vs2, vl); } -vuint16mf2_t test_vncvt_x_x_w_u16mf2(vuint32m1_t src, size_t vl) { - return __riscv_vncvt_x(src, vl); +vuint16mf2_t test_vncvt_x_x_w_u16mf2(vuint32m1_t vs2, size_t vl) { + return __riscv_vncvt_x(vs2, vl); } -vuint16m1_t test_vncvt_x_x_w_u16m1(vuint32m2_t src, size_t vl) { - return __riscv_vncvt_x(src, vl); +vuint16m1_t test_vncvt_x_x_w_u16m1(vuint32m2_t vs2, size_t vl) { + return __riscv_vncvt_x(vs2, vl); } -vuint16m2_t test_vncvt_x_x_w_u16m2(vuint32m4_t src, size_t vl) { - return __riscv_vncvt_x(src, vl); +vuint16m2_t test_vncvt_x_x_w_u16m2(vuint32m4_t vs2, size_t vl) { + return __riscv_vncvt_x(vs2, vl); } -vuint16m4_t test_vncvt_x_x_w_u16m4(vuint32m8_t src, size_t vl) { - return __riscv_vncvt_x(src, vl); +vuint16m4_t test_vncvt_x_x_w_u16m4(vuint32m8_t vs2, size_t vl) { + return __riscv_vncvt_x(vs2, vl); } -vint32mf2_t test_vncvt_x_x_w_i32mf2(vint64m1_t src, size_t vl) { - return __riscv_vncvt_x(src, vl); +vint32mf2_t test_vncvt_x_x_w_i32mf2(vint64m1_t vs2, size_t vl) { + return __riscv_vncvt_x(vs2, vl); } -vint32m1_t test_vncvt_x_x_w_i32m1(vint64m2_t src, size_t vl) { - return __riscv_vncvt_x(src, vl); +vint32m1_t test_vncvt_x_x_w_i32m1(vint64m2_t vs2, size_t vl) { + return __riscv_vncvt_x(vs2, vl); } -vint32m2_t test_vncvt_x_x_w_i32m2(vint64m4_t src, size_t vl) { - return __riscv_vncvt_x(src, vl); +vint32m2_t test_vncvt_x_x_w_i32m2(vint64m4_t vs2, size_t vl) { + return __riscv_vncvt_x(vs2, vl); } -vint32m4_t test_vncvt_x_x_w_i32m4(vint64m8_t src, size_t vl) { - return __riscv_vncvt_x(src, vl); +vint32m4_t test_vncvt_x_x_w_i32m4(vint64m8_t vs2, size_t vl) { + return __riscv_vncvt_x(vs2, vl); } -vuint32mf2_t test_vncvt_x_x_w_u32mf2(vuint64m1_t src, size_t vl) { - return __riscv_vncvt_x(src, vl); +vuint32mf2_t test_vncvt_x_x_w_u32mf2(vuint64m1_t vs2, size_t vl) { + return __riscv_vncvt_x(vs2, vl); } -vuint32m1_t test_vncvt_x_x_w_u32m1(vuint64m2_t src, size_t vl) { - return __riscv_vncvt_x(src, vl); +vuint32m1_t test_vncvt_x_x_w_u32m1(vuint64m2_t vs2, size_t vl) { + return __riscv_vncvt_x(vs2, vl); } -vuint32m2_t test_vncvt_x_x_w_u32m2(vuint64m4_t src, size_t vl) { - return __riscv_vncvt_x(src, vl); +vuint32m2_t test_vncvt_x_x_w_u32m2(vuint64m4_t vs2, size_t vl) { + return __riscv_vncvt_x(vs2, vl); } -vuint32m4_t test_vncvt_x_x_w_u32m4(vuint64m8_t src, size_t vl) { - return __riscv_vncvt_x(src, vl); +vuint32m4_t test_vncvt_x_x_w_u32m4(vuint64m8_t vs2, size_t vl) { + return __riscv_vncvt_x(vs2, vl); } -vint8mf8_t test_vncvt_x_x_w_i8mf8_m(vbool64_t mask, vint16mf4_t src, size_t vl) { - return __riscv_vncvt_x(mask, src, vl); +vint8mf8_t test_vncvt_x_x_w_i8mf8_m(vbool64_t vm, vint16mf4_t vs2, size_t vl) { + return __riscv_vncvt_x(vm, vs2, vl); } -vint8mf4_t test_vncvt_x_x_w_i8mf4_m(vbool32_t mask, vint16mf2_t src, size_t vl) { - return __riscv_vncvt_x(mask, src, vl); +vint8mf4_t test_vncvt_x_x_w_i8mf4_m(vbool32_t vm, vint16mf2_t vs2, size_t vl) { + return __riscv_vncvt_x(vm, vs2, vl); } -vint8mf2_t test_vncvt_x_x_w_i8mf2_m(vbool16_t mask, vint16m1_t src, size_t vl) { - return __riscv_vncvt_x(mask, src, vl); +vint8mf2_t test_vncvt_x_x_w_i8mf2_m(vbool16_t vm, vint16m1_t vs2, size_t vl) { + return __riscv_vncvt_x(vm, vs2, vl); } -vint8m1_t test_vncvt_x_x_w_i8m1_m(vbool8_t mask, vint16m2_t src, size_t vl) { - return __riscv_vncvt_x(mask, src, vl); +vint8m1_t test_vncvt_x_x_w_i8m1_m(vbool8_t vm, vint16m2_t vs2, size_t vl) { + return __riscv_vncvt_x(vm, vs2, vl); } -vint8m2_t test_vncvt_x_x_w_i8m2_m(vbool4_t mask, vint16m4_t src, size_t vl) { - return __riscv_vncvt_x(mask, src, vl); +vint8m2_t test_vncvt_x_x_w_i8m2_m(vbool4_t vm, vint16m4_t vs2, size_t vl) { + return __riscv_vncvt_x(vm, vs2, vl); } -vint8m4_t test_vncvt_x_x_w_i8m4_m(vbool2_t mask, vint16m8_t src, size_t vl) { - return __riscv_vncvt_x(mask, src, vl); +vint8m4_t test_vncvt_x_x_w_i8m4_m(vbool2_t vm, vint16m8_t vs2, size_t vl) { + return __riscv_vncvt_x(vm, vs2, vl); } -vuint8mf8_t test_vncvt_x_x_w_u8mf8_m(vbool64_t mask, vuint16mf4_t src, size_t vl) { - return __riscv_vncvt_x(mask, src, vl); +vuint8mf8_t test_vncvt_x_x_w_u8mf8_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vncvt_x(vm, vs2, vl); } -vuint8mf4_t test_vncvt_x_x_w_u8mf4_m(vbool32_t mask, vuint16mf2_t src, size_t vl) { - return __riscv_vncvt_x(mask, src, vl); +vuint8mf4_t test_vncvt_x_x_w_u8mf4_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vncvt_x(vm, vs2, vl); } -vuint8mf2_t test_vncvt_x_x_w_u8mf2_m(vbool16_t mask, vuint16m1_t src, size_t vl) { - return __riscv_vncvt_x(mask, src, vl); +vuint8mf2_t test_vncvt_x_x_w_u8mf2_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vncvt_x(vm, vs2, vl); } -vuint8m1_t test_vncvt_x_x_w_u8m1_m(vbool8_t mask, vuint16m2_t src, size_t vl) { - return __riscv_vncvt_x(mask, src, vl); +vuint8m1_t test_vncvt_x_x_w_u8m1_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vncvt_x(vm, vs2, vl); } -vuint8m2_t test_vncvt_x_x_w_u8m2_m(vbool4_t mask, vuint16m4_t src, size_t vl) { - return __riscv_vncvt_x(mask, src, vl); +vuint8m2_t test_vncvt_x_x_w_u8m2_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vncvt_x(vm, vs2, vl); } -vuint8m4_t test_vncvt_x_x_w_u8m4_m(vbool2_t mask, vuint16m8_t src, size_t vl) { - return __riscv_vncvt_x(mask, src, vl); +vuint8m4_t test_vncvt_x_x_w_u8m4_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vncvt_x(vm, vs2, vl); } -vint16mf4_t test_vncvt_x_x_w_i16mf4_m(vbool64_t mask, vint32mf2_t src, size_t vl) { - return __riscv_vncvt_x(mask, src, vl); +vint16mf4_t test_vncvt_x_x_w_i16mf4_m(vbool64_t vm, vint32mf2_t vs2, size_t vl) { + return __riscv_vncvt_x(vm, vs2, vl); } -vint16mf2_t test_vncvt_x_x_w_i16mf2_m(vbool32_t mask, vint32m1_t src, size_t vl) { - return __riscv_vncvt_x(mask, src, vl); +vint16mf2_t test_vncvt_x_x_w_i16mf2_m(vbool32_t vm, vint32m1_t vs2, size_t vl) { + return __riscv_vncvt_x(vm, vs2, vl); } -vint16m1_t test_vncvt_x_x_w_i16m1_m(vbool16_t mask, vint32m2_t src, size_t vl) { - return __riscv_vncvt_x(mask, src, vl); +vint16m1_t test_vncvt_x_x_w_i16m1_m(vbool16_t vm, vint32m2_t vs2, size_t vl) { + return __riscv_vncvt_x(vm, vs2, vl); } -vint16m2_t test_vncvt_x_x_w_i16m2_m(vbool8_t mask, vint32m4_t src, size_t vl) { - return __riscv_vncvt_x(mask, src, vl); +vint16m2_t test_vncvt_x_x_w_i16m2_m(vbool8_t vm, vint32m4_t vs2, size_t vl) { + return __riscv_vncvt_x(vm, vs2, vl); } -vint16m4_t test_vncvt_x_x_w_i16m4_m(vbool4_t mask, vint32m8_t src, size_t vl) { - return __riscv_vncvt_x(mask, src, vl); +vint16m4_t test_vncvt_x_x_w_i16m4_m(vbool4_t vm, vint32m8_t vs2, size_t vl) { + return __riscv_vncvt_x(vm, vs2, vl); } -vuint16mf4_t test_vncvt_x_x_w_u16mf4_m(vbool64_t mask, vuint32mf2_t src, size_t vl) { - return __riscv_vncvt_x(mask, src, vl); +vuint16mf4_t test_vncvt_x_x_w_u16mf4_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vncvt_x(vm, vs2, vl); } -vuint16mf2_t test_vncvt_x_x_w_u16mf2_m(vbool32_t mask, vuint32m1_t src, size_t vl) { - return __riscv_vncvt_x(mask, src, vl); +vuint16mf2_t test_vncvt_x_x_w_u16mf2_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vncvt_x(vm, vs2, vl); } -vuint16m1_t test_vncvt_x_x_w_u16m1_m(vbool16_t mask, vuint32m2_t src, size_t vl) { - return __riscv_vncvt_x(mask, src, vl); +vuint16m1_t test_vncvt_x_x_w_u16m1_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vncvt_x(vm, vs2, vl); } -vuint16m2_t test_vncvt_x_x_w_u16m2_m(vbool8_t mask, vuint32m4_t src, size_t vl) { - return __riscv_vncvt_x(mask, src, vl); +vuint16m2_t test_vncvt_x_x_w_u16m2_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vncvt_x(vm, vs2, vl); } -vuint16m4_t test_vncvt_x_x_w_u16m4_m(vbool4_t mask, vuint32m8_t src, size_t vl) { - return __riscv_vncvt_x(mask, src, vl); +vuint16m4_t test_vncvt_x_x_w_u16m4_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vncvt_x(vm, vs2, vl); } -vint32mf2_t test_vncvt_x_x_w_i32mf2_m(vbool64_t mask, vint64m1_t src, size_t vl) { - return __riscv_vncvt_x(mask, src, vl); +vint32mf2_t test_vncvt_x_x_w_i32mf2_m(vbool64_t vm, vint64m1_t vs2, size_t vl) { + return __riscv_vncvt_x(vm, vs2, vl); } -vint32m1_t test_vncvt_x_x_w_i32m1_m(vbool32_t mask, vint64m2_t src, size_t vl) { - return __riscv_vncvt_x(mask, src, vl); +vint32m1_t test_vncvt_x_x_w_i32m1_m(vbool32_t vm, vint64m2_t vs2, size_t vl) { + return __riscv_vncvt_x(vm, vs2, vl); } -vint32m2_t test_vncvt_x_x_w_i32m2_m(vbool16_t mask, vint64m4_t src, size_t vl) { - return __riscv_vncvt_x(mask, src, vl); +vint32m2_t test_vncvt_x_x_w_i32m2_m(vbool16_t vm, vint64m4_t vs2, size_t vl) { + return __riscv_vncvt_x(vm, vs2, vl); } -vint32m4_t test_vncvt_x_x_w_i32m4_m(vbool8_t mask, vint64m8_t src, size_t vl) { - return __riscv_vncvt_x(mask, src, vl); +vint32m4_t test_vncvt_x_x_w_i32m4_m(vbool8_t vm, vint64m8_t vs2, size_t vl) { + return __riscv_vncvt_x(vm, vs2, vl); } -vuint32mf2_t test_vncvt_x_x_w_u32mf2_m(vbool64_t mask, vuint64m1_t src, size_t vl) { - return __riscv_vncvt_x(mask, src, vl); +vuint32mf2_t test_vncvt_x_x_w_u32mf2_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vncvt_x(vm, vs2, vl); } -vuint32m1_t test_vncvt_x_x_w_u32m1_m(vbool32_t mask, vuint64m2_t src, size_t vl) { - return __riscv_vncvt_x(mask, src, vl); +vuint32m1_t test_vncvt_x_x_w_u32m1_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vncvt_x(vm, vs2, vl); } -vuint32m2_t test_vncvt_x_x_w_u32m2_m(vbool16_t mask, vuint64m4_t src, size_t vl) { - return __riscv_vncvt_x(mask, src, vl); +vuint32m2_t test_vncvt_x_x_w_u32m2_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vncvt_x(vm, vs2, vl); } -vuint32m4_t test_vncvt_x_x_w_u32m4_m(vbool8_t mask, vuint64m8_t src, size_t vl) { - return __riscv_vncvt_x(mask, src, vl); +vuint32m4_t test_vncvt_x_x_w_u32m4_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vncvt_x(vm, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vncvt\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vneg.c b/auto-generated/gnu-overloaded-tests/vneg.c index eb4cb7181..336dcea1e 100644 --- a/auto-generated/gnu-overloaded-tests/vneg.c +++ b/auto-generated/gnu-overloaded-tests/vneg.c @@ -6,180 +6,180 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vneg_v_i8mf8(vint8mf8_t op1, size_t vl) { - return __riscv_vneg(op1, vl); +vint8mf8_t test_vneg_v_i8mf8(vint8mf8_t vs, size_t vl) { + return __riscv_vneg(vs, vl); } -vint8mf4_t test_vneg_v_i8mf4(vint8mf4_t op1, size_t vl) { - return __riscv_vneg(op1, vl); +vint8mf4_t test_vneg_v_i8mf4(vint8mf4_t vs, size_t vl) { + return __riscv_vneg(vs, vl); } -vint8mf2_t test_vneg_v_i8mf2(vint8mf2_t op1, size_t vl) { - return __riscv_vneg(op1, vl); +vint8mf2_t test_vneg_v_i8mf2(vint8mf2_t vs, size_t vl) { + return __riscv_vneg(vs, vl); } -vint8m1_t test_vneg_v_i8m1(vint8m1_t op1, size_t vl) { - return __riscv_vneg(op1, vl); +vint8m1_t test_vneg_v_i8m1(vint8m1_t vs, size_t vl) { + return __riscv_vneg(vs, vl); } -vint8m2_t test_vneg_v_i8m2(vint8m2_t op1, size_t vl) { - return __riscv_vneg(op1, vl); +vint8m2_t test_vneg_v_i8m2(vint8m2_t vs, size_t vl) { + return __riscv_vneg(vs, vl); } -vint8m4_t test_vneg_v_i8m4(vint8m4_t op1, size_t vl) { - return __riscv_vneg(op1, vl); +vint8m4_t test_vneg_v_i8m4(vint8m4_t vs, size_t vl) { + return __riscv_vneg(vs, vl); } -vint8m8_t test_vneg_v_i8m8(vint8m8_t op1, size_t vl) { - return __riscv_vneg(op1, vl); +vint8m8_t test_vneg_v_i8m8(vint8m8_t vs, size_t vl) { + return __riscv_vneg(vs, vl); } -vint16mf4_t test_vneg_v_i16mf4(vint16mf4_t op1, size_t vl) { - return __riscv_vneg(op1, vl); +vint16mf4_t test_vneg_v_i16mf4(vint16mf4_t vs, size_t vl) { + return __riscv_vneg(vs, vl); } -vint16mf2_t test_vneg_v_i16mf2(vint16mf2_t op1, size_t vl) { - return __riscv_vneg(op1, vl); +vint16mf2_t test_vneg_v_i16mf2(vint16mf2_t vs, size_t vl) { + return __riscv_vneg(vs, vl); } -vint16m1_t test_vneg_v_i16m1(vint16m1_t op1, size_t vl) { - return __riscv_vneg(op1, vl); +vint16m1_t test_vneg_v_i16m1(vint16m1_t vs, size_t vl) { + return __riscv_vneg(vs, vl); } -vint16m2_t test_vneg_v_i16m2(vint16m2_t op1, size_t vl) { - return __riscv_vneg(op1, vl); +vint16m2_t test_vneg_v_i16m2(vint16m2_t vs, size_t vl) { + return __riscv_vneg(vs, vl); } -vint16m4_t test_vneg_v_i16m4(vint16m4_t op1, size_t vl) { - return __riscv_vneg(op1, vl); +vint16m4_t test_vneg_v_i16m4(vint16m4_t vs, size_t vl) { + return __riscv_vneg(vs, vl); } -vint16m8_t test_vneg_v_i16m8(vint16m8_t op1, size_t vl) { - return __riscv_vneg(op1, vl); +vint16m8_t test_vneg_v_i16m8(vint16m8_t vs, size_t vl) { + return __riscv_vneg(vs, vl); } -vint32mf2_t test_vneg_v_i32mf2(vint32mf2_t op1, size_t vl) { - return __riscv_vneg(op1, vl); +vint32mf2_t test_vneg_v_i32mf2(vint32mf2_t vs, size_t vl) { + return __riscv_vneg(vs, vl); } -vint32m1_t test_vneg_v_i32m1(vint32m1_t op1, size_t vl) { - return __riscv_vneg(op1, vl); +vint32m1_t test_vneg_v_i32m1(vint32m1_t vs, size_t vl) { + return __riscv_vneg(vs, vl); } -vint32m2_t test_vneg_v_i32m2(vint32m2_t op1, size_t vl) { - return __riscv_vneg(op1, vl); +vint32m2_t test_vneg_v_i32m2(vint32m2_t vs, size_t vl) { + return __riscv_vneg(vs, vl); } -vint32m4_t test_vneg_v_i32m4(vint32m4_t op1, size_t vl) { - return __riscv_vneg(op1, vl); +vint32m4_t test_vneg_v_i32m4(vint32m4_t vs, size_t vl) { + return __riscv_vneg(vs, vl); } -vint32m8_t test_vneg_v_i32m8(vint32m8_t op1, size_t vl) { - return __riscv_vneg(op1, vl); +vint32m8_t test_vneg_v_i32m8(vint32m8_t vs, size_t vl) { + return __riscv_vneg(vs, vl); } -vint64m1_t test_vneg_v_i64m1(vint64m1_t op1, size_t vl) { - return __riscv_vneg(op1, vl); +vint64m1_t test_vneg_v_i64m1(vint64m1_t vs, size_t vl) { + return __riscv_vneg(vs, vl); } -vint64m2_t test_vneg_v_i64m2(vint64m2_t op1, size_t vl) { - return __riscv_vneg(op1, vl); +vint64m2_t test_vneg_v_i64m2(vint64m2_t vs, size_t vl) { + return __riscv_vneg(vs, vl); } -vint64m4_t test_vneg_v_i64m4(vint64m4_t op1, size_t vl) { - return __riscv_vneg(op1, vl); +vint64m4_t test_vneg_v_i64m4(vint64m4_t vs, size_t vl) { + return __riscv_vneg(vs, vl); } -vint64m8_t test_vneg_v_i64m8(vint64m8_t op1, size_t vl) { - return __riscv_vneg(op1, vl); +vint64m8_t test_vneg_v_i64m8(vint64m8_t vs, size_t vl) { + return __riscv_vneg(vs, vl); } -vint8mf8_t test_vneg_v_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t vl) { - return __riscv_vneg(mask, op1, vl); +vint8mf8_t test_vneg_v_i8mf8_m(vbool64_t vm, vint8mf8_t vs, size_t vl) { + return __riscv_vneg(vm, vs, vl); } -vint8mf4_t test_vneg_v_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t vl) { - return __riscv_vneg(mask, op1, vl); +vint8mf4_t test_vneg_v_i8mf4_m(vbool32_t vm, vint8mf4_t vs, size_t vl) { + return __riscv_vneg(vm, vs, vl); } -vint8mf2_t test_vneg_v_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t vl) { - return __riscv_vneg(mask, op1, vl); +vint8mf2_t test_vneg_v_i8mf2_m(vbool16_t vm, vint8mf2_t vs, size_t vl) { + return __riscv_vneg(vm, vs, vl); } -vint8m1_t test_vneg_v_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t vl) { - return __riscv_vneg(mask, op1, vl); +vint8m1_t test_vneg_v_i8m1_m(vbool8_t vm, vint8m1_t vs, size_t vl) { + return __riscv_vneg(vm, vs, vl); } -vint8m2_t test_vneg_v_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t vl) { - return __riscv_vneg(mask, op1, vl); +vint8m2_t test_vneg_v_i8m2_m(vbool4_t vm, vint8m2_t vs, size_t vl) { + return __riscv_vneg(vm, vs, vl); } -vint8m4_t test_vneg_v_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t vl) { - return __riscv_vneg(mask, op1, vl); +vint8m4_t test_vneg_v_i8m4_m(vbool2_t vm, vint8m4_t vs, size_t vl) { + return __riscv_vneg(vm, vs, vl); } -vint8m8_t test_vneg_v_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t vl) { - return __riscv_vneg(mask, op1, vl); +vint8m8_t test_vneg_v_i8m8_m(vbool1_t vm, vint8m8_t vs, size_t vl) { + return __riscv_vneg(vm, vs, vl); } -vint16mf4_t test_vneg_v_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t vl) { - return __riscv_vneg(mask, op1, vl); +vint16mf4_t test_vneg_v_i16mf4_m(vbool64_t vm, vint16mf4_t vs, size_t vl) { + return __riscv_vneg(vm, vs, vl); } -vint16mf2_t test_vneg_v_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t vl) { - return __riscv_vneg(mask, op1, vl); +vint16mf2_t test_vneg_v_i16mf2_m(vbool32_t vm, vint16mf2_t vs, size_t vl) { + return __riscv_vneg(vm, vs, vl); } -vint16m1_t test_vneg_v_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t vl) { - return __riscv_vneg(mask, op1, vl); +vint16m1_t test_vneg_v_i16m1_m(vbool16_t vm, vint16m1_t vs, size_t vl) { + return __riscv_vneg(vm, vs, vl); } -vint16m2_t test_vneg_v_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t vl) { - return __riscv_vneg(mask, op1, vl); +vint16m2_t test_vneg_v_i16m2_m(vbool8_t vm, vint16m2_t vs, size_t vl) { + return __riscv_vneg(vm, vs, vl); } -vint16m4_t test_vneg_v_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t vl) { - return __riscv_vneg(mask, op1, vl); +vint16m4_t test_vneg_v_i16m4_m(vbool4_t vm, vint16m4_t vs, size_t vl) { + return __riscv_vneg(vm, vs, vl); } -vint16m8_t test_vneg_v_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t vl) { - return __riscv_vneg(mask, op1, vl); +vint16m8_t test_vneg_v_i16m8_m(vbool2_t vm, vint16m8_t vs, size_t vl) { + return __riscv_vneg(vm, vs, vl); } -vint32mf2_t test_vneg_v_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t vl) { - return __riscv_vneg(mask, op1, vl); +vint32mf2_t test_vneg_v_i32mf2_m(vbool64_t vm, vint32mf2_t vs, size_t vl) { + return __riscv_vneg(vm, vs, vl); } -vint32m1_t test_vneg_v_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t vl) { - return __riscv_vneg(mask, op1, vl); +vint32m1_t test_vneg_v_i32m1_m(vbool32_t vm, vint32m1_t vs, size_t vl) { + return __riscv_vneg(vm, vs, vl); } -vint32m2_t test_vneg_v_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t vl) { - return __riscv_vneg(mask, op1, vl); +vint32m2_t test_vneg_v_i32m2_m(vbool16_t vm, vint32m2_t vs, size_t vl) { + return __riscv_vneg(vm, vs, vl); } -vint32m4_t test_vneg_v_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t vl) { - return __riscv_vneg(mask, op1, vl); +vint32m4_t test_vneg_v_i32m4_m(vbool8_t vm, vint32m4_t vs, size_t vl) { + return __riscv_vneg(vm, vs, vl); } -vint32m8_t test_vneg_v_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t vl) { - return __riscv_vneg(mask, op1, vl); +vint32m8_t test_vneg_v_i32m8_m(vbool4_t vm, vint32m8_t vs, size_t vl) { + return __riscv_vneg(vm, vs, vl); } -vint64m1_t test_vneg_v_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t vl) { - return __riscv_vneg(mask, op1, vl); +vint64m1_t test_vneg_v_i64m1_m(vbool64_t vm, vint64m1_t vs, size_t vl) { + return __riscv_vneg(vm, vs, vl); } -vint64m2_t test_vneg_v_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t vl) { - return __riscv_vneg(mask, op1, vl); +vint64m2_t test_vneg_v_i64m2_m(vbool32_t vm, vint64m2_t vs, size_t vl) { + return __riscv_vneg(vm, vs, vl); } -vint64m4_t test_vneg_v_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t vl) { - return __riscv_vneg(mask, op1, vl); +vint64m4_t test_vneg_v_i64m4_m(vbool16_t vm, vint64m4_t vs, size_t vl) { + return __riscv_vneg(vm, vs, vl); } -vint64m8_t test_vneg_v_i64m8_m(vbool8_t mask, vint64m8_t op1, size_t vl) { - return __riscv_vneg(mask, op1, vl); +vint64m8_t test_vneg_v_i64m8_m(vbool8_t vm, vint64m8_t vs, size_t vl) { + return __riscv_vneg(vm, vs, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vneg\.[ivxfswum.]+\s+} 44 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vnmsac.c b/auto-generated/gnu-overloaded-tests/vnmsac.c index 45bd251f3..3c722cb84 100644 --- a/auto-generated/gnu-overloaded-tests/vnmsac.c +++ b/auto-generated/gnu-overloaded-tests/vnmsac.c @@ -358,356 +358,356 @@ vuint64m8_t test_vnmsac_vx_u64m8(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, return __riscv_vnmsac(vd, rs1, vs2, vl); } -vint8mf8_t test_vnmsac_vv_i8mf8_m(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, vs1, vs2, vl); +vint8mf8_t test_vnmsac_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, vs1, vs2, vl); } -vint8mf8_t test_vnmsac_vx_i8mf8_m(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, rs1, vs2, vl); +vint8mf8_t test_vnmsac_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, rs1, vs2, vl); } -vint8mf4_t test_vnmsac_vv_i8mf4_m(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, vs1, vs2, vl); +vint8mf4_t test_vnmsac_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, vs1, vs2, vl); } -vint8mf4_t test_vnmsac_vx_i8mf4_m(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, rs1, vs2, vl); +vint8mf4_t test_vnmsac_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, rs1, vs2, vl); } -vint8mf2_t test_vnmsac_vv_i8mf2_m(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, vs1, vs2, vl); +vint8mf2_t test_vnmsac_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, vs1, vs2, vl); } -vint8mf2_t test_vnmsac_vx_i8mf2_m(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, rs1, vs2, vl); +vint8mf2_t test_vnmsac_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, rs1, vs2, vl); } -vint8m1_t test_vnmsac_vv_i8m1_m(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, vs1, vs2, vl); +vint8m1_t test_vnmsac_vv_i8m1_m(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, vs1, vs2, vl); } -vint8m1_t test_vnmsac_vx_i8m1_m(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, rs1, vs2, vl); +vint8m1_t test_vnmsac_vx_i8m1_m(vbool8_t vm, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, rs1, vs2, vl); } -vint8m2_t test_vnmsac_vv_i8m2_m(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, vs1, vs2, vl); +vint8m2_t test_vnmsac_vv_i8m2_m(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, vs1, vs2, vl); } -vint8m2_t test_vnmsac_vx_i8m2_m(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, rs1, vs2, vl); +vint8m2_t test_vnmsac_vx_i8m2_m(vbool4_t vm, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, rs1, vs2, vl); } -vint8m4_t test_vnmsac_vv_i8m4_m(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, vs1, vs2, vl); +vint8m4_t test_vnmsac_vv_i8m4_m(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, vs1, vs2, vl); } -vint8m4_t test_vnmsac_vx_i8m4_m(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, rs1, vs2, vl); +vint8m4_t test_vnmsac_vx_i8m4_m(vbool2_t vm, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, rs1, vs2, vl); } -vint8m8_t test_vnmsac_vv_i8m8_m(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, vs1, vs2, vl); +vint8m8_t test_vnmsac_vv_i8m8_m(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, vs1, vs2, vl); } -vint8m8_t test_vnmsac_vx_i8m8_m(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, rs1, vs2, vl); +vint8m8_t test_vnmsac_vx_i8m8_m(vbool1_t vm, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vnmsac_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vnmsac_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vnmsac_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vnmsac_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vnmsac_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vnmsac_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vnmsac_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vnmsac_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vnmsac_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, vs1, vs2, vl); +vint16m1_t test_vnmsac_vv_i16m1_m(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vnmsac_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, rs1, vs2, vl); +vint16m1_t test_vnmsac_vx_i16m1_m(vbool16_t vm, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vnmsac_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, vs1, vs2, vl); +vint16m2_t test_vnmsac_vv_i16m2_m(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vnmsac_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, rs1, vs2, vl); +vint16m2_t test_vnmsac_vx_i16m2_m(vbool8_t vm, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vnmsac_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, vs1, vs2, vl); +vint16m4_t test_vnmsac_vv_i16m4_m(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vnmsac_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, rs1, vs2, vl); +vint16m4_t test_vnmsac_vx_i16m4_m(vbool4_t vm, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vnmsac_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, vs1, vs2, vl); +vint16m8_t test_vnmsac_vv_i16m8_m(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vnmsac_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, rs1, vs2, vl); +vint16m8_t test_vnmsac_vx_i16m8_m(vbool2_t vm, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vnmsac_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vnmsac_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vnmsac_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vnmsac_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vnmsac_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, vs1, vs2, vl); +vint32m1_t test_vnmsac_vv_i32m1_m(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vnmsac_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, rs1, vs2, vl); +vint32m1_t test_vnmsac_vx_i32m1_m(vbool32_t vm, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vnmsac_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, vs1, vs2, vl); +vint32m2_t test_vnmsac_vv_i32m2_m(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vnmsac_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, rs1, vs2, vl); +vint32m2_t test_vnmsac_vx_i32m2_m(vbool16_t vm, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vnmsac_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, vs1, vs2, vl); +vint32m4_t test_vnmsac_vv_i32m4_m(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vnmsac_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, rs1, vs2, vl); +vint32m4_t test_vnmsac_vx_i32m4_m(vbool8_t vm, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vnmsac_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, vs1, vs2, vl); +vint32m8_t test_vnmsac_vv_i32m8_m(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vnmsac_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, rs1, vs2, vl); +vint32m8_t test_vnmsac_vx_i32m8_m(vbool4_t vm, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vnmsac_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, vs1, vs2, vl); +vint64m1_t test_vnmsac_vv_i64m1_m(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vnmsac_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, rs1, vs2, vl); +vint64m1_t test_vnmsac_vx_i64m1_m(vbool64_t vm, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vnmsac_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, vs1, vs2, vl); +vint64m2_t test_vnmsac_vv_i64m2_m(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vnmsac_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, rs1, vs2, vl); +vint64m2_t test_vnmsac_vx_i64m2_m(vbool32_t vm, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vnmsac_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, vs1, vs2, vl); +vint64m4_t test_vnmsac_vv_i64m4_m(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vnmsac_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, rs1, vs2, vl); +vint64m4_t test_vnmsac_vx_i64m4_m(vbool16_t vm, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vnmsac_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, vs1, vs2, vl); +vint64m8_t test_vnmsac_vv_i64m8_m(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vnmsac_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, rs1, vs2, vl); +vint64m8_t test_vnmsac_vx_i64m8_m(vbool8_t vm, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, rs1, vs2, vl); } -vuint8mf8_t test_vnmsac_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, vs1, vs2, vl); +vuint8mf8_t test_vnmsac_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, vs1, vs2, vl); } -vuint8mf8_t test_vnmsac_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, rs1, vs2, vl); +vuint8mf8_t test_vnmsac_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, rs1, vs2, vl); } -vuint8mf4_t test_vnmsac_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, vs1, vs2, vl); +vuint8mf4_t test_vnmsac_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, vs1, vs2, vl); } -vuint8mf4_t test_vnmsac_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, rs1, vs2, vl); +vuint8mf4_t test_vnmsac_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, rs1, vs2, vl); } -vuint8mf2_t test_vnmsac_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, vs1, vs2, vl); +vuint8mf2_t test_vnmsac_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, vs1, vs2, vl); } -vuint8mf2_t test_vnmsac_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, rs1, vs2, vl); +vuint8mf2_t test_vnmsac_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, rs1, vs2, vl); } -vuint8m1_t test_vnmsac_vv_u8m1_m(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, vs1, vs2, vl); +vuint8m1_t test_vnmsac_vv_u8m1_m(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, vs1, vs2, vl); } -vuint8m1_t test_vnmsac_vx_u8m1_m(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, rs1, vs2, vl); +vuint8m1_t test_vnmsac_vx_u8m1_m(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, rs1, vs2, vl); } -vuint8m2_t test_vnmsac_vv_u8m2_m(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, vs1, vs2, vl); +vuint8m2_t test_vnmsac_vv_u8m2_m(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, vs1, vs2, vl); } -vuint8m2_t test_vnmsac_vx_u8m2_m(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, rs1, vs2, vl); +vuint8m2_t test_vnmsac_vx_u8m2_m(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, rs1, vs2, vl); } -vuint8m4_t test_vnmsac_vv_u8m4_m(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, vs1, vs2, vl); +vuint8m4_t test_vnmsac_vv_u8m4_m(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, vs1, vs2, vl); } -vuint8m4_t test_vnmsac_vx_u8m4_m(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, rs1, vs2, vl); +vuint8m4_t test_vnmsac_vx_u8m4_m(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, rs1, vs2, vl); } -vuint8m8_t test_vnmsac_vv_u8m8_m(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, vs1, vs2, vl); +vuint8m8_t test_vnmsac_vv_u8m8_m(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, vs1, vs2, vl); } -vuint8m8_t test_vnmsac_vx_u8m8_m(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, rs1, vs2, vl); +vuint8m8_t test_vnmsac_vx_u8m8_m(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vnmsac_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, vs1, vs2, vl); +vuint16mf4_t test_vnmsac_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vnmsac_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, rs1, vs2, vl); +vuint16mf4_t test_vnmsac_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vnmsac_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, vs1, vs2, vl); +vuint16mf2_t test_vnmsac_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vnmsac_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, rs1, vs2, vl); +vuint16mf2_t test_vnmsac_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vnmsac_vv_u16m1_m(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, vs1, vs2, vl); +vuint16m1_t test_vnmsac_vv_u16m1_m(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vnmsac_vx_u16m1_m(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, rs1, vs2, vl); +vuint16m1_t test_vnmsac_vx_u16m1_m(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vnmsac_vv_u16m2_m(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, vs1, vs2, vl); +vuint16m2_t test_vnmsac_vv_u16m2_m(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vnmsac_vx_u16m2_m(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, rs1, vs2, vl); +vuint16m2_t test_vnmsac_vx_u16m2_m(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vnmsac_vv_u16m4_m(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, vs1, vs2, vl); +vuint16m4_t test_vnmsac_vv_u16m4_m(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vnmsac_vx_u16m4_m(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, rs1, vs2, vl); +vuint16m4_t test_vnmsac_vx_u16m4_m(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vnmsac_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, vs1, vs2, vl); +vuint16m8_t test_vnmsac_vv_u16m8_m(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vnmsac_vx_u16m8_m(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, rs1, vs2, vl); +vuint16m8_t test_vnmsac_vx_u16m8_m(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vnmsac_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, vs1, vs2, vl); +vuint32mf2_t test_vnmsac_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vnmsac_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, rs1, vs2, vl); +vuint32mf2_t test_vnmsac_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vnmsac_vv_u32m1_m(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, vs1, vs2, vl); +vuint32m1_t test_vnmsac_vv_u32m1_m(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vnmsac_vx_u32m1_m(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, rs1, vs2, vl); +vuint32m1_t test_vnmsac_vx_u32m1_m(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vnmsac_vv_u32m2_m(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, vs1, vs2, vl); +vuint32m2_t test_vnmsac_vv_u32m2_m(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vnmsac_vx_u32m2_m(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, rs1, vs2, vl); +vuint32m2_t test_vnmsac_vx_u32m2_m(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vnmsac_vv_u32m4_m(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, vs1, vs2, vl); +vuint32m4_t test_vnmsac_vv_u32m4_m(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vnmsac_vx_u32m4_m(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, rs1, vs2, vl); +vuint32m4_t test_vnmsac_vx_u32m4_m(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vnmsac_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, vs1, vs2, vl); +vuint32m8_t test_vnmsac_vv_u32m8_m(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vnmsac_vx_u32m8_m(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, rs1, vs2, vl); +vuint32m8_t test_vnmsac_vx_u32m8_m(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vnmsac_vv_u64m1_m(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, vs1, vs2, vl); +vuint64m1_t test_vnmsac_vv_u64m1_m(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vnmsac_vx_u64m1_m(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, rs1, vs2, vl); +vuint64m1_t test_vnmsac_vx_u64m1_m(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vnmsac_vv_u64m2_m(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, vs1, vs2, vl); +vuint64m2_t test_vnmsac_vv_u64m2_m(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vnmsac_vx_u64m2_m(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, rs1, vs2, vl); +vuint64m2_t test_vnmsac_vx_u64m2_m(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vnmsac_vv_u64m4_m(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, vs1, vs2, vl); +vuint64m4_t test_vnmsac_vv_u64m4_m(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vnmsac_vx_u64m4_m(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, rs1, vs2, vl); +vuint64m4_t test_vnmsac_vx_u64m4_m(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vnmsac_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, vs1, vs2, vl); +vuint64m8_t test_vnmsac_vv_u64m8_m(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vnmsac_vx_u64m8_m(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vnmsac(mask, vd, rs1, vs2, vl); +vuint64m8_t test_vnmsac_vx_u64m8_m(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vnmsac(vm, vd, rs1, vs2, vl); } /* { dg-final { scan-assembler-times {vnms[acub]+\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vnmsub.c b/auto-generated/gnu-overloaded-tests/vnmsub.c index 1657ec6f0..3194c3dec 100644 --- a/auto-generated/gnu-overloaded-tests/vnmsub.c +++ b/auto-generated/gnu-overloaded-tests/vnmsub.c @@ -358,356 +358,356 @@ vuint64m8_t test_vnmsub_vx_u64m8(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, return __riscv_vnmsub(vd, rs1, vs2, vl); } -vint8mf8_t test_vnmsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, vs1, vs2, vl); +vint8mf8_t test_vnmsub_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, vs1, vs2, vl); } -vint8mf8_t test_vnmsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, rs1, vs2, vl); +vint8mf8_t test_vnmsub_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, rs1, vs2, vl); } -vint8mf4_t test_vnmsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, vs1, vs2, vl); +vint8mf4_t test_vnmsub_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, vs1, vs2, vl); } -vint8mf4_t test_vnmsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, rs1, vs2, vl); +vint8mf4_t test_vnmsub_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, rs1, vs2, vl); } -vint8mf2_t test_vnmsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, vs1, vs2, vl); +vint8mf2_t test_vnmsub_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, vs1, vs2, vl); } -vint8mf2_t test_vnmsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, rs1, vs2, vl); +vint8mf2_t test_vnmsub_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, rs1, vs2, vl); } -vint8m1_t test_vnmsub_vv_i8m1_m(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, vs1, vs2, vl); +vint8m1_t test_vnmsub_vv_i8m1_m(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, vs1, vs2, vl); } -vint8m1_t test_vnmsub_vx_i8m1_m(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, rs1, vs2, vl); +vint8m1_t test_vnmsub_vx_i8m1_m(vbool8_t vm, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, rs1, vs2, vl); } -vint8m2_t test_vnmsub_vv_i8m2_m(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, vs1, vs2, vl); +vint8m2_t test_vnmsub_vv_i8m2_m(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, vs1, vs2, vl); } -vint8m2_t test_vnmsub_vx_i8m2_m(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, rs1, vs2, vl); +vint8m2_t test_vnmsub_vx_i8m2_m(vbool4_t vm, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, rs1, vs2, vl); } -vint8m4_t test_vnmsub_vv_i8m4_m(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, vs1, vs2, vl); +vint8m4_t test_vnmsub_vv_i8m4_m(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, vs1, vs2, vl); } -vint8m4_t test_vnmsub_vx_i8m4_m(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, rs1, vs2, vl); +vint8m4_t test_vnmsub_vx_i8m4_m(vbool2_t vm, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, rs1, vs2, vl); } -vint8m8_t test_vnmsub_vv_i8m8_m(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, vs1, vs2, vl); +vint8m8_t test_vnmsub_vv_i8m8_m(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, vs1, vs2, vl); } -vint8m8_t test_vnmsub_vx_i8m8_m(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, rs1, vs2, vl); +vint8m8_t test_vnmsub_vx_i8m8_m(vbool1_t vm, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vnmsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vnmsub_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vnmsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vnmsub_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vnmsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vnmsub_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vnmsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vnmsub_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vnmsub_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, vs1, vs2, vl); +vint16m1_t test_vnmsub_vv_i16m1_m(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vnmsub_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, rs1, vs2, vl); +vint16m1_t test_vnmsub_vx_i16m1_m(vbool16_t vm, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vnmsub_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, vs1, vs2, vl); +vint16m2_t test_vnmsub_vv_i16m2_m(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vnmsub_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, rs1, vs2, vl); +vint16m2_t test_vnmsub_vx_i16m2_m(vbool8_t vm, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vnmsub_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, vs1, vs2, vl); +vint16m4_t test_vnmsub_vv_i16m4_m(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vnmsub_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, rs1, vs2, vl); +vint16m4_t test_vnmsub_vx_i16m4_m(vbool4_t vm, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vnmsub_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, vs1, vs2, vl); +vint16m8_t test_vnmsub_vv_i16m8_m(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vnmsub_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, rs1, vs2, vl); +vint16m8_t test_vnmsub_vx_i16m8_m(vbool2_t vm, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vnmsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vnmsub_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vnmsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vnmsub_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vnmsub_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, vs1, vs2, vl); +vint32m1_t test_vnmsub_vv_i32m1_m(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vnmsub_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, rs1, vs2, vl); +vint32m1_t test_vnmsub_vx_i32m1_m(vbool32_t vm, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vnmsub_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, vs1, vs2, vl); +vint32m2_t test_vnmsub_vv_i32m2_m(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vnmsub_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, rs1, vs2, vl); +vint32m2_t test_vnmsub_vx_i32m2_m(vbool16_t vm, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vnmsub_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, vs1, vs2, vl); +vint32m4_t test_vnmsub_vv_i32m4_m(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vnmsub_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, rs1, vs2, vl); +vint32m4_t test_vnmsub_vx_i32m4_m(vbool8_t vm, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vnmsub_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, vs1, vs2, vl); +vint32m8_t test_vnmsub_vv_i32m8_m(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vnmsub_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, rs1, vs2, vl); +vint32m8_t test_vnmsub_vx_i32m8_m(vbool4_t vm, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vnmsub_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, vs1, vs2, vl); +vint64m1_t test_vnmsub_vv_i64m1_m(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vnmsub_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, rs1, vs2, vl); +vint64m1_t test_vnmsub_vx_i64m1_m(vbool64_t vm, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vnmsub_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, vs1, vs2, vl); +vint64m2_t test_vnmsub_vv_i64m2_m(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vnmsub_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, rs1, vs2, vl); +vint64m2_t test_vnmsub_vx_i64m2_m(vbool32_t vm, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vnmsub_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, vs1, vs2, vl); +vint64m4_t test_vnmsub_vv_i64m4_m(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vnmsub_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, rs1, vs2, vl); +vint64m4_t test_vnmsub_vx_i64m4_m(vbool16_t vm, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vnmsub_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, vs1, vs2, vl); +vint64m8_t test_vnmsub_vv_i64m8_m(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vnmsub_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, rs1, vs2, vl); +vint64m8_t test_vnmsub_vx_i64m8_m(vbool8_t vm, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, rs1, vs2, vl); } -vuint8mf8_t test_vnmsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, vs1, vs2, vl); +vuint8mf8_t test_vnmsub_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, vs1, vs2, vl); } -vuint8mf8_t test_vnmsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, rs1, vs2, vl); +vuint8mf8_t test_vnmsub_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, rs1, vs2, vl); } -vuint8mf4_t test_vnmsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, vs1, vs2, vl); +vuint8mf4_t test_vnmsub_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, vs1, vs2, vl); } -vuint8mf4_t test_vnmsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, rs1, vs2, vl); +vuint8mf4_t test_vnmsub_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, rs1, vs2, vl); } -vuint8mf2_t test_vnmsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, vs1, vs2, vl); +vuint8mf2_t test_vnmsub_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, vs1, vs2, vl); } -vuint8mf2_t test_vnmsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, rs1, vs2, vl); +vuint8mf2_t test_vnmsub_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, rs1, vs2, vl); } -vuint8m1_t test_vnmsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, vs1, vs2, vl); +vuint8m1_t test_vnmsub_vv_u8m1_m(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, vs1, vs2, vl); } -vuint8m1_t test_vnmsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, rs1, vs2, vl); +vuint8m1_t test_vnmsub_vx_u8m1_m(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, rs1, vs2, vl); } -vuint8m2_t test_vnmsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, vs1, vs2, vl); +vuint8m2_t test_vnmsub_vv_u8m2_m(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, vs1, vs2, vl); } -vuint8m2_t test_vnmsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, rs1, vs2, vl); +vuint8m2_t test_vnmsub_vx_u8m2_m(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, rs1, vs2, vl); } -vuint8m4_t test_vnmsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, vs1, vs2, vl); +vuint8m4_t test_vnmsub_vv_u8m4_m(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, vs1, vs2, vl); } -vuint8m4_t test_vnmsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, rs1, vs2, vl); +vuint8m4_t test_vnmsub_vx_u8m4_m(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, rs1, vs2, vl); } -vuint8m8_t test_vnmsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, vs1, vs2, vl); +vuint8m8_t test_vnmsub_vv_u8m8_m(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, vs1, vs2, vl); } -vuint8m8_t test_vnmsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, rs1, vs2, vl); +vuint8m8_t test_vnmsub_vx_u8m8_m(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vnmsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, vs1, vs2, vl); +vuint16mf4_t test_vnmsub_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vnmsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, rs1, vs2, vl); +vuint16mf4_t test_vnmsub_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vnmsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, vs1, vs2, vl); +vuint16mf2_t test_vnmsub_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vnmsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, rs1, vs2, vl); +vuint16mf2_t test_vnmsub_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vnmsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, vs1, vs2, vl); +vuint16m1_t test_vnmsub_vv_u16m1_m(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vnmsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, rs1, vs2, vl); +vuint16m1_t test_vnmsub_vx_u16m1_m(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vnmsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, vs1, vs2, vl); +vuint16m2_t test_vnmsub_vv_u16m2_m(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vnmsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, rs1, vs2, vl); +vuint16m2_t test_vnmsub_vx_u16m2_m(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vnmsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, vs1, vs2, vl); +vuint16m4_t test_vnmsub_vv_u16m4_m(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vnmsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, rs1, vs2, vl); +vuint16m4_t test_vnmsub_vx_u16m4_m(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vnmsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, vs1, vs2, vl); +vuint16m8_t test_vnmsub_vv_u16m8_m(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vnmsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, rs1, vs2, vl); +vuint16m8_t test_vnmsub_vx_u16m8_m(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vnmsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, vs1, vs2, vl); +vuint32mf2_t test_vnmsub_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vnmsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, rs1, vs2, vl); +vuint32mf2_t test_vnmsub_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vnmsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, vs1, vs2, vl); +vuint32m1_t test_vnmsub_vv_u32m1_m(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vnmsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, rs1, vs2, vl); +vuint32m1_t test_vnmsub_vx_u32m1_m(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vnmsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, vs1, vs2, vl); +vuint32m2_t test_vnmsub_vv_u32m2_m(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vnmsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, rs1, vs2, vl); +vuint32m2_t test_vnmsub_vx_u32m2_m(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vnmsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, vs1, vs2, vl); +vuint32m4_t test_vnmsub_vv_u32m4_m(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vnmsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, rs1, vs2, vl); +vuint32m4_t test_vnmsub_vx_u32m4_m(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vnmsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, vs1, vs2, vl); +vuint32m8_t test_vnmsub_vv_u32m8_m(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vnmsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, rs1, vs2, vl); +vuint32m8_t test_vnmsub_vx_u32m8_m(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vnmsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, vs1, vs2, vl); +vuint64m1_t test_vnmsub_vv_u64m1_m(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vnmsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, rs1, vs2, vl); +vuint64m1_t test_vnmsub_vx_u64m1_m(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vnmsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, vs1, vs2, vl); +vuint64m2_t test_vnmsub_vv_u64m2_m(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vnmsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, rs1, vs2, vl); +vuint64m2_t test_vnmsub_vx_u64m2_m(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vnmsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, vs1, vs2, vl); +vuint64m4_t test_vnmsub_vv_u64m4_m(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vnmsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, rs1, vs2, vl); +vuint64m4_t test_vnmsub_vx_u64m4_m(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vnmsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, vs1, vs2, vl); +vuint64m8_t test_vnmsub_vv_u64m8_m(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vnmsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vnmsub(mask, vd, rs1, vs2, vl); +vuint64m8_t test_vnmsub_vx_u64m8_m(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vnmsub(vm, vd, rs1, vs2, vl); } /* { dg-final { scan-assembler-times {vnms[acub]+\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vnot.c b/auto-generated/gnu-overloaded-tests/vnot.c index 7756b622e..b7391293c 100644 --- a/auto-generated/gnu-overloaded-tests/vnot.c +++ b/auto-generated/gnu-overloaded-tests/vnot.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vnot_v_i8mf8(vint8mf8_t op1, size_t vl) { - return __riscv_vnot(op1, vl); +vint8mf8_t test_vnot_v_i8mf8(vint8mf8_t vs, size_t vl) { + return __riscv_vnot(vs, vl); } -vint8mf4_t test_vnot_v_i8mf4(vint8mf4_t op1, size_t vl) { - return __riscv_vnot(op1, vl); +vint8mf4_t test_vnot_v_i8mf4(vint8mf4_t vs, size_t vl) { + return __riscv_vnot(vs, vl); } -vint8mf2_t test_vnot_v_i8mf2(vint8mf2_t op1, size_t vl) { - return __riscv_vnot(op1, vl); +vint8mf2_t test_vnot_v_i8mf2(vint8mf2_t vs, size_t vl) { + return __riscv_vnot(vs, vl); } -vint8m1_t test_vnot_v_i8m1(vint8m1_t op1, size_t vl) { - return __riscv_vnot(op1, vl); +vint8m1_t test_vnot_v_i8m1(vint8m1_t vs, size_t vl) { + return __riscv_vnot(vs, vl); } -vint8m2_t test_vnot_v_i8m2(vint8m2_t op1, size_t vl) { - return __riscv_vnot(op1, vl); +vint8m2_t test_vnot_v_i8m2(vint8m2_t vs, size_t vl) { + return __riscv_vnot(vs, vl); } -vint8m4_t test_vnot_v_i8m4(vint8m4_t op1, size_t vl) { - return __riscv_vnot(op1, vl); +vint8m4_t test_vnot_v_i8m4(vint8m4_t vs, size_t vl) { + return __riscv_vnot(vs, vl); } -vint8m8_t test_vnot_v_i8m8(vint8m8_t op1, size_t vl) { - return __riscv_vnot(op1, vl); +vint8m8_t test_vnot_v_i8m8(vint8m8_t vs, size_t vl) { + return __riscv_vnot(vs, vl); } -vint16mf4_t test_vnot_v_i16mf4(vint16mf4_t op1, size_t vl) { - return __riscv_vnot(op1, vl); +vint16mf4_t test_vnot_v_i16mf4(vint16mf4_t vs, size_t vl) { + return __riscv_vnot(vs, vl); } -vint16mf2_t test_vnot_v_i16mf2(vint16mf2_t op1, size_t vl) { - return __riscv_vnot(op1, vl); +vint16mf2_t test_vnot_v_i16mf2(vint16mf2_t vs, size_t vl) { + return __riscv_vnot(vs, vl); } -vint16m1_t test_vnot_v_i16m1(vint16m1_t op1, size_t vl) { - return __riscv_vnot(op1, vl); +vint16m1_t test_vnot_v_i16m1(vint16m1_t vs, size_t vl) { + return __riscv_vnot(vs, vl); } -vint16m2_t test_vnot_v_i16m2(vint16m2_t op1, size_t vl) { - return __riscv_vnot(op1, vl); +vint16m2_t test_vnot_v_i16m2(vint16m2_t vs, size_t vl) { + return __riscv_vnot(vs, vl); } -vint16m4_t test_vnot_v_i16m4(vint16m4_t op1, size_t vl) { - return __riscv_vnot(op1, vl); +vint16m4_t test_vnot_v_i16m4(vint16m4_t vs, size_t vl) { + return __riscv_vnot(vs, vl); } -vint16m8_t test_vnot_v_i16m8(vint16m8_t op1, size_t vl) { - return __riscv_vnot(op1, vl); +vint16m8_t test_vnot_v_i16m8(vint16m8_t vs, size_t vl) { + return __riscv_vnot(vs, vl); } -vint32mf2_t test_vnot_v_i32mf2(vint32mf2_t op1, size_t vl) { - return __riscv_vnot(op1, vl); +vint32mf2_t test_vnot_v_i32mf2(vint32mf2_t vs, size_t vl) { + return __riscv_vnot(vs, vl); } -vint32m1_t test_vnot_v_i32m1(vint32m1_t op1, size_t vl) { - return __riscv_vnot(op1, vl); +vint32m1_t test_vnot_v_i32m1(vint32m1_t vs, size_t vl) { + return __riscv_vnot(vs, vl); } -vint32m2_t test_vnot_v_i32m2(vint32m2_t op1, size_t vl) { - return __riscv_vnot(op1, vl); +vint32m2_t test_vnot_v_i32m2(vint32m2_t vs, size_t vl) { + return __riscv_vnot(vs, vl); } -vint32m4_t test_vnot_v_i32m4(vint32m4_t op1, size_t vl) { - return __riscv_vnot(op1, vl); +vint32m4_t test_vnot_v_i32m4(vint32m4_t vs, size_t vl) { + return __riscv_vnot(vs, vl); } -vint32m8_t test_vnot_v_i32m8(vint32m8_t op1, size_t vl) { - return __riscv_vnot(op1, vl); +vint32m8_t test_vnot_v_i32m8(vint32m8_t vs, size_t vl) { + return __riscv_vnot(vs, vl); } -vint64m1_t test_vnot_v_i64m1(vint64m1_t op1, size_t vl) { - return __riscv_vnot(op1, vl); +vint64m1_t test_vnot_v_i64m1(vint64m1_t vs, size_t vl) { + return __riscv_vnot(vs, vl); } -vint64m2_t test_vnot_v_i64m2(vint64m2_t op1, size_t vl) { - return __riscv_vnot(op1, vl); +vint64m2_t test_vnot_v_i64m2(vint64m2_t vs, size_t vl) { + return __riscv_vnot(vs, vl); } -vint64m4_t test_vnot_v_i64m4(vint64m4_t op1, size_t vl) { - return __riscv_vnot(op1, vl); +vint64m4_t test_vnot_v_i64m4(vint64m4_t vs, size_t vl) { + return __riscv_vnot(vs, vl); } -vint64m8_t test_vnot_v_i64m8(vint64m8_t op1, size_t vl) { - return __riscv_vnot(op1, vl); +vint64m8_t test_vnot_v_i64m8(vint64m8_t vs, size_t vl) { + return __riscv_vnot(vs, vl); } -vuint8mf8_t test_vnot_v_u8mf8(vuint8mf8_t op1, size_t vl) { - return __riscv_vnot(op1, vl); +vuint8mf8_t test_vnot_v_u8mf8(vuint8mf8_t vs, size_t vl) { + return __riscv_vnot(vs, vl); } -vuint8mf4_t test_vnot_v_u8mf4(vuint8mf4_t op1, size_t vl) { - return __riscv_vnot(op1, vl); +vuint8mf4_t test_vnot_v_u8mf4(vuint8mf4_t vs, size_t vl) { + return __riscv_vnot(vs, vl); } -vuint8mf2_t test_vnot_v_u8mf2(vuint8mf2_t op1, size_t vl) { - return __riscv_vnot(op1, vl); +vuint8mf2_t test_vnot_v_u8mf2(vuint8mf2_t vs, size_t vl) { + return __riscv_vnot(vs, vl); } -vuint8m1_t test_vnot_v_u8m1(vuint8m1_t op1, size_t vl) { - return __riscv_vnot(op1, vl); +vuint8m1_t test_vnot_v_u8m1(vuint8m1_t vs, size_t vl) { + return __riscv_vnot(vs, vl); } -vuint8m2_t test_vnot_v_u8m2(vuint8m2_t op1, size_t vl) { - return __riscv_vnot(op1, vl); +vuint8m2_t test_vnot_v_u8m2(vuint8m2_t vs, size_t vl) { + return __riscv_vnot(vs, vl); } -vuint8m4_t test_vnot_v_u8m4(vuint8m4_t op1, size_t vl) { - return __riscv_vnot(op1, vl); +vuint8m4_t test_vnot_v_u8m4(vuint8m4_t vs, size_t vl) { + return __riscv_vnot(vs, vl); } -vuint8m8_t test_vnot_v_u8m8(vuint8m8_t op1, size_t vl) { - return __riscv_vnot(op1, vl); +vuint8m8_t test_vnot_v_u8m8(vuint8m8_t vs, size_t vl) { + return __riscv_vnot(vs, vl); } -vuint16mf4_t test_vnot_v_u16mf4(vuint16mf4_t op1, size_t vl) { - return __riscv_vnot(op1, vl); +vuint16mf4_t test_vnot_v_u16mf4(vuint16mf4_t vs, size_t vl) { + return __riscv_vnot(vs, vl); } -vuint16mf2_t test_vnot_v_u16mf2(vuint16mf2_t op1, size_t vl) { - return __riscv_vnot(op1, vl); +vuint16mf2_t test_vnot_v_u16mf2(vuint16mf2_t vs, size_t vl) { + return __riscv_vnot(vs, vl); } -vuint16m1_t test_vnot_v_u16m1(vuint16m1_t op1, size_t vl) { - return __riscv_vnot(op1, vl); +vuint16m1_t test_vnot_v_u16m1(vuint16m1_t vs, size_t vl) { + return __riscv_vnot(vs, vl); } -vuint16m2_t test_vnot_v_u16m2(vuint16m2_t op1, size_t vl) { - return __riscv_vnot(op1, vl); +vuint16m2_t test_vnot_v_u16m2(vuint16m2_t vs, size_t vl) { + return __riscv_vnot(vs, vl); } -vuint16m4_t test_vnot_v_u16m4(vuint16m4_t op1, size_t vl) { - return __riscv_vnot(op1, vl); +vuint16m4_t test_vnot_v_u16m4(vuint16m4_t vs, size_t vl) { + return __riscv_vnot(vs, vl); } -vuint16m8_t test_vnot_v_u16m8(vuint16m8_t op1, size_t vl) { - return __riscv_vnot(op1, vl); +vuint16m8_t test_vnot_v_u16m8(vuint16m8_t vs, size_t vl) { + return __riscv_vnot(vs, vl); } -vuint32mf2_t test_vnot_v_u32mf2(vuint32mf2_t op1, size_t vl) { - return __riscv_vnot(op1, vl); +vuint32mf2_t test_vnot_v_u32mf2(vuint32mf2_t vs, size_t vl) { + return __riscv_vnot(vs, vl); } -vuint32m1_t test_vnot_v_u32m1(vuint32m1_t op1, size_t vl) { - return __riscv_vnot(op1, vl); +vuint32m1_t test_vnot_v_u32m1(vuint32m1_t vs, size_t vl) { + return __riscv_vnot(vs, vl); } -vuint32m2_t test_vnot_v_u32m2(vuint32m2_t op1, size_t vl) { - return __riscv_vnot(op1, vl); +vuint32m2_t test_vnot_v_u32m2(vuint32m2_t vs, size_t vl) { + return __riscv_vnot(vs, vl); } -vuint32m4_t test_vnot_v_u32m4(vuint32m4_t op1, size_t vl) { - return __riscv_vnot(op1, vl); +vuint32m4_t test_vnot_v_u32m4(vuint32m4_t vs, size_t vl) { + return __riscv_vnot(vs, vl); } -vuint32m8_t test_vnot_v_u32m8(vuint32m8_t op1, size_t vl) { - return __riscv_vnot(op1, vl); +vuint32m8_t test_vnot_v_u32m8(vuint32m8_t vs, size_t vl) { + return __riscv_vnot(vs, vl); } -vuint64m1_t test_vnot_v_u64m1(vuint64m1_t op1, size_t vl) { - return __riscv_vnot(op1, vl); +vuint64m1_t test_vnot_v_u64m1(vuint64m1_t vs, size_t vl) { + return __riscv_vnot(vs, vl); } -vuint64m2_t test_vnot_v_u64m2(vuint64m2_t op1, size_t vl) { - return __riscv_vnot(op1, vl); +vuint64m2_t test_vnot_v_u64m2(vuint64m2_t vs, size_t vl) { + return __riscv_vnot(vs, vl); } -vuint64m4_t test_vnot_v_u64m4(vuint64m4_t op1, size_t vl) { - return __riscv_vnot(op1, vl); +vuint64m4_t test_vnot_v_u64m4(vuint64m4_t vs, size_t vl) { + return __riscv_vnot(vs, vl); } -vuint64m8_t test_vnot_v_u64m8(vuint64m8_t op1, size_t vl) { - return __riscv_vnot(op1, vl); +vuint64m8_t test_vnot_v_u64m8(vuint64m8_t vs, size_t vl) { + return __riscv_vnot(vs, vl); } -vint8mf8_t test_vnot_v_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t vl) { - return __riscv_vnot(mask, op1, vl); +vint8mf8_t test_vnot_v_i8mf8_m(vbool64_t vm, vint8mf8_t vs, size_t vl) { + return __riscv_vnot(vm, vs, vl); } -vint8mf4_t test_vnot_v_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t vl) { - return __riscv_vnot(mask, op1, vl); +vint8mf4_t test_vnot_v_i8mf4_m(vbool32_t vm, vint8mf4_t vs, size_t vl) { + return __riscv_vnot(vm, vs, vl); } -vint8mf2_t test_vnot_v_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t vl) { - return __riscv_vnot(mask, op1, vl); +vint8mf2_t test_vnot_v_i8mf2_m(vbool16_t vm, vint8mf2_t vs, size_t vl) { + return __riscv_vnot(vm, vs, vl); } -vint8m1_t test_vnot_v_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t vl) { - return __riscv_vnot(mask, op1, vl); +vint8m1_t test_vnot_v_i8m1_m(vbool8_t vm, vint8m1_t vs, size_t vl) { + return __riscv_vnot(vm, vs, vl); } -vint8m2_t test_vnot_v_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t vl) { - return __riscv_vnot(mask, op1, vl); +vint8m2_t test_vnot_v_i8m2_m(vbool4_t vm, vint8m2_t vs, size_t vl) { + return __riscv_vnot(vm, vs, vl); } -vint8m4_t test_vnot_v_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t vl) { - return __riscv_vnot(mask, op1, vl); +vint8m4_t test_vnot_v_i8m4_m(vbool2_t vm, vint8m4_t vs, size_t vl) { + return __riscv_vnot(vm, vs, vl); } -vint8m8_t test_vnot_v_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t vl) { - return __riscv_vnot(mask, op1, vl); +vint8m8_t test_vnot_v_i8m8_m(vbool1_t vm, vint8m8_t vs, size_t vl) { + return __riscv_vnot(vm, vs, vl); } -vint16mf4_t test_vnot_v_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t vl) { - return __riscv_vnot(mask, op1, vl); +vint16mf4_t test_vnot_v_i16mf4_m(vbool64_t vm, vint16mf4_t vs, size_t vl) { + return __riscv_vnot(vm, vs, vl); } -vint16mf2_t test_vnot_v_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t vl) { - return __riscv_vnot(mask, op1, vl); +vint16mf2_t test_vnot_v_i16mf2_m(vbool32_t vm, vint16mf2_t vs, size_t vl) { + return __riscv_vnot(vm, vs, vl); } -vint16m1_t test_vnot_v_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t vl) { - return __riscv_vnot(mask, op1, vl); +vint16m1_t test_vnot_v_i16m1_m(vbool16_t vm, vint16m1_t vs, size_t vl) { + return __riscv_vnot(vm, vs, vl); } -vint16m2_t test_vnot_v_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t vl) { - return __riscv_vnot(mask, op1, vl); +vint16m2_t test_vnot_v_i16m2_m(vbool8_t vm, vint16m2_t vs, size_t vl) { + return __riscv_vnot(vm, vs, vl); } -vint16m4_t test_vnot_v_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t vl) { - return __riscv_vnot(mask, op1, vl); +vint16m4_t test_vnot_v_i16m4_m(vbool4_t vm, vint16m4_t vs, size_t vl) { + return __riscv_vnot(vm, vs, vl); } -vint16m8_t test_vnot_v_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t vl) { - return __riscv_vnot(mask, op1, vl); +vint16m8_t test_vnot_v_i16m8_m(vbool2_t vm, vint16m8_t vs, size_t vl) { + return __riscv_vnot(vm, vs, vl); } -vint32mf2_t test_vnot_v_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t vl) { - return __riscv_vnot(mask, op1, vl); +vint32mf2_t test_vnot_v_i32mf2_m(vbool64_t vm, vint32mf2_t vs, size_t vl) { + return __riscv_vnot(vm, vs, vl); } -vint32m1_t test_vnot_v_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t vl) { - return __riscv_vnot(mask, op1, vl); +vint32m1_t test_vnot_v_i32m1_m(vbool32_t vm, vint32m1_t vs, size_t vl) { + return __riscv_vnot(vm, vs, vl); } -vint32m2_t test_vnot_v_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t vl) { - return __riscv_vnot(mask, op1, vl); +vint32m2_t test_vnot_v_i32m2_m(vbool16_t vm, vint32m2_t vs, size_t vl) { + return __riscv_vnot(vm, vs, vl); } -vint32m4_t test_vnot_v_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t vl) { - return __riscv_vnot(mask, op1, vl); +vint32m4_t test_vnot_v_i32m4_m(vbool8_t vm, vint32m4_t vs, size_t vl) { + return __riscv_vnot(vm, vs, vl); } -vint32m8_t test_vnot_v_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t vl) { - return __riscv_vnot(mask, op1, vl); +vint32m8_t test_vnot_v_i32m8_m(vbool4_t vm, vint32m8_t vs, size_t vl) { + return __riscv_vnot(vm, vs, vl); } -vint64m1_t test_vnot_v_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t vl) { - return __riscv_vnot(mask, op1, vl); +vint64m1_t test_vnot_v_i64m1_m(vbool64_t vm, vint64m1_t vs, size_t vl) { + return __riscv_vnot(vm, vs, vl); } -vint64m2_t test_vnot_v_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t vl) { - return __riscv_vnot(mask, op1, vl); +vint64m2_t test_vnot_v_i64m2_m(vbool32_t vm, vint64m2_t vs, size_t vl) { + return __riscv_vnot(vm, vs, vl); } -vint64m4_t test_vnot_v_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t vl) { - return __riscv_vnot(mask, op1, vl); +vint64m4_t test_vnot_v_i64m4_m(vbool16_t vm, vint64m4_t vs, size_t vl) { + return __riscv_vnot(vm, vs, vl); } -vint64m8_t test_vnot_v_i64m8_m(vbool8_t mask, vint64m8_t op1, size_t vl) { - return __riscv_vnot(mask, op1, vl); +vint64m8_t test_vnot_v_i64m8_m(vbool8_t vm, vint64m8_t vs, size_t vl) { + return __riscv_vnot(vm, vs, vl); } -vuint8mf8_t test_vnot_v_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, size_t vl) { - return __riscv_vnot(mask, op1, vl); +vuint8mf8_t test_vnot_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs, size_t vl) { + return __riscv_vnot(vm, vs, vl); } -vuint8mf4_t test_vnot_v_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, size_t vl) { - return __riscv_vnot(mask, op1, vl); +vuint8mf4_t test_vnot_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs, size_t vl) { + return __riscv_vnot(vm, vs, vl); } -vuint8mf2_t test_vnot_v_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, size_t vl) { - return __riscv_vnot(mask, op1, vl); +vuint8mf2_t test_vnot_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs, size_t vl) { + return __riscv_vnot(vm, vs, vl); } -vuint8m1_t test_vnot_v_u8m1_m(vbool8_t mask, vuint8m1_t op1, size_t vl) { - return __riscv_vnot(mask, op1, vl); +vuint8m1_t test_vnot_v_u8m1_m(vbool8_t vm, vuint8m1_t vs, size_t vl) { + return __riscv_vnot(vm, vs, vl); } -vuint8m2_t test_vnot_v_u8m2_m(vbool4_t mask, vuint8m2_t op1, size_t vl) { - return __riscv_vnot(mask, op1, vl); +vuint8m2_t test_vnot_v_u8m2_m(vbool4_t vm, vuint8m2_t vs, size_t vl) { + return __riscv_vnot(vm, vs, vl); } -vuint8m4_t test_vnot_v_u8m4_m(vbool2_t mask, vuint8m4_t op1, size_t vl) { - return __riscv_vnot(mask, op1, vl); +vuint8m4_t test_vnot_v_u8m4_m(vbool2_t vm, vuint8m4_t vs, size_t vl) { + return __riscv_vnot(vm, vs, vl); } -vuint8m8_t test_vnot_v_u8m8_m(vbool1_t mask, vuint8m8_t op1, size_t vl) { - return __riscv_vnot(mask, op1, vl); +vuint8m8_t test_vnot_v_u8m8_m(vbool1_t vm, vuint8m8_t vs, size_t vl) { + return __riscv_vnot(vm, vs, vl); } -vuint16mf4_t test_vnot_v_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, size_t vl) { - return __riscv_vnot(mask, op1, vl); +vuint16mf4_t test_vnot_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs, size_t vl) { + return __riscv_vnot(vm, vs, vl); } -vuint16mf2_t test_vnot_v_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, size_t vl) { - return __riscv_vnot(mask, op1, vl); +vuint16mf2_t test_vnot_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs, size_t vl) { + return __riscv_vnot(vm, vs, vl); } -vuint16m1_t test_vnot_v_u16m1_m(vbool16_t mask, vuint16m1_t op1, size_t vl) { - return __riscv_vnot(mask, op1, vl); +vuint16m1_t test_vnot_v_u16m1_m(vbool16_t vm, vuint16m1_t vs, size_t vl) { + return __riscv_vnot(vm, vs, vl); } -vuint16m2_t test_vnot_v_u16m2_m(vbool8_t mask, vuint16m2_t op1, size_t vl) { - return __riscv_vnot(mask, op1, vl); +vuint16m2_t test_vnot_v_u16m2_m(vbool8_t vm, vuint16m2_t vs, size_t vl) { + return __riscv_vnot(vm, vs, vl); } -vuint16m4_t test_vnot_v_u16m4_m(vbool4_t mask, vuint16m4_t op1, size_t vl) { - return __riscv_vnot(mask, op1, vl); +vuint16m4_t test_vnot_v_u16m4_m(vbool4_t vm, vuint16m4_t vs, size_t vl) { + return __riscv_vnot(vm, vs, vl); } -vuint16m8_t test_vnot_v_u16m8_m(vbool2_t mask, vuint16m8_t op1, size_t vl) { - return __riscv_vnot(mask, op1, vl); +vuint16m8_t test_vnot_v_u16m8_m(vbool2_t vm, vuint16m8_t vs, size_t vl) { + return __riscv_vnot(vm, vs, vl); } -vuint32mf2_t test_vnot_v_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, size_t vl) { - return __riscv_vnot(mask, op1, vl); +vuint32mf2_t test_vnot_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs, size_t vl) { + return __riscv_vnot(vm, vs, vl); } -vuint32m1_t test_vnot_v_u32m1_m(vbool32_t mask, vuint32m1_t op1, size_t vl) { - return __riscv_vnot(mask, op1, vl); +vuint32m1_t test_vnot_v_u32m1_m(vbool32_t vm, vuint32m1_t vs, size_t vl) { + return __riscv_vnot(vm, vs, vl); } -vuint32m2_t test_vnot_v_u32m2_m(vbool16_t mask, vuint32m2_t op1, size_t vl) { - return __riscv_vnot(mask, op1, vl); +vuint32m2_t test_vnot_v_u32m2_m(vbool16_t vm, vuint32m2_t vs, size_t vl) { + return __riscv_vnot(vm, vs, vl); } -vuint32m4_t test_vnot_v_u32m4_m(vbool8_t mask, vuint32m4_t op1, size_t vl) { - return __riscv_vnot(mask, op1, vl); +vuint32m4_t test_vnot_v_u32m4_m(vbool8_t vm, vuint32m4_t vs, size_t vl) { + return __riscv_vnot(vm, vs, vl); } -vuint32m8_t test_vnot_v_u32m8_m(vbool4_t mask, vuint32m8_t op1, size_t vl) { - return __riscv_vnot(mask, op1, vl); +vuint32m8_t test_vnot_v_u32m8_m(vbool4_t vm, vuint32m8_t vs, size_t vl) { + return __riscv_vnot(vm, vs, vl); } -vuint64m1_t test_vnot_v_u64m1_m(vbool64_t mask, vuint64m1_t op1, size_t vl) { - return __riscv_vnot(mask, op1, vl); +vuint64m1_t test_vnot_v_u64m1_m(vbool64_t vm, vuint64m1_t vs, size_t vl) { + return __riscv_vnot(vm, vs, vl); } -vuint64m2_t test_vnot_v_u64m2_m(vbool32_t mask, vuint64m2_t op1, size_t vl) { - return __riscv_vnot(mask, op1, vl); +vuint64m2_t test_vnot_v_u64m2_m(vbool32_t vm, vuint64m2_t vs, size_t vl) { + return __riscv_vnot(vm, vs, vl); } -vuint64m4_t test_vnot_v_u64m4_m(vbool16_t mask, vuint64m4_t op1, size_t vl) { - return __riscv_vnot(mask, op1, vl); +vuint64m4_t test_vnot_v_u64m4_m(vbool16_t vm, vuint64m4_t vs, size_t vl) { + return __riscv_vnot(vm, vs, vl); } -vuint64m8_t test_vnot_v_u64m8_m(vbool8_t mask, vuint64m8_t op1, size_t vl) { - return __riscv_vnot(mask, op1, vl); +vuint64m8_t test_vnot_v_u64m8_m(vbool8_t vm, vuint64m8_t vs, size_t vl) { + return __riscv_vnot(vm, vs, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vnot\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vnsra.c b/auto-generated/gnu-overloaded-tests/vnsra.c index d7fd7f53a..2031b13fb 100644 --- a/auto-generated/gnu-overloaded-tests/vnsra.c +++ b/auto-generated/gnu-overloaded-tests/vnsra.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vnsra_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnsra(op1, shift, vl); +vint8mf8_t test_vnsra_wv_i8mf8(vint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnsra(vs2, vs1, vl); } -vint8mf8_t test_vnsra_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra(op1, shift, vl); +vint8mf8_t test_vnsra_wx_i8mf8(vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra(vs2, rs1, vl); } -vint8mf4_t test_vnsra_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnsra(op1, shift, vl); +vint8mf4_t test_vnsra_wv_i8mf4(vint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnsra(vs2, vs1, vl); } -vint8mf4_t test_vnsra_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra(op1, shift, vl); +vint8mf4_t test_vnsra_wx_i8mf4(vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra(vs2, rs1, vl); } -vint8mf2_t test_vnsra_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnsra(op1, shift, vl); +vint8mf2_t test_vnsra_wv_i8mf2(vint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnsra(vs2, vs1, vl); } -vint8mf2_t test_vnsra_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsra(op1, shift, vl); +vint8mf2_t test_vnsra_wx_i8mf2(vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra(vs2, rs1, vl); } -vint8m1_t test_vnsra_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnsra(op1, shift, vl); +vint8m1_t test_vnsra_wv_i8m1(vint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnsra(vs2, vs1, vl); } -vint8m1_t test_vnsra_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra(op1, shift, vl); +vint8m1_t test_vnsra_wx_i8m1(vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra(vs2, rs1, vl); } -vint8m2_t test_vnsra_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnsra(op1, shift, vl); +vint8m2_t test_vnsra_wv_i8m2(vint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnsra(vs2, vs1, vl); } -vint8m2_t test_vnsra_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra(op1, shift, vl); +vint8m2_t test_vnsra_wx_i8m2(vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra(vs2, rs1, vl); } -vint8m4_t test_vnsra_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnsra(op1, shift, vl); +vint8m4_t test_vnsra_wv_i8m4(vint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnsra(vs2, vs1, vl); } -vint8m4_t test_vnsra_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsra(op1, shift, vl); +vint8m4_t test_vnsra_wx_i8m4(vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra(vs2, rs1, vl); } -vint16mf4_t test_vnsra_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnsra(op1, shift, vl); +vint16mf4_t test_vnsra_wv_i16mf4(vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnsra(vs2, vs1, vl); } -vint16mf4_t test_vnsra_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra(op1, shift, vl); +vint16mf4_t test_vnsra_wx_i16mf4(vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra(vs2, rs1, vl); } -vint16mf2_t test_vnsra_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnsra(op1, shift, vl); +vint16mf2_t test_vnsra_wv_i16mf2(vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnsra(vs2, vs1, vl); } -vint16mf2_t test_vnsra_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsra(op1, shift, vl); +vint16mf2_t test_vnsra_wx_i16mf2(vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra(vs2, rs1, vl); } -vint16m1_t test_vnsra_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnsra(op1, shift, vl); +vint16m1_t test_vnsra_wv_i16m1(vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnsra(vs2, vs1, vl); } -vint16m1_t test_vnsra_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra(op1, shift, vl); +vint16m1_t test_vnsra_wx_i16m1(vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra(vs2, rs1, vl); } -vint16m2_t test_vnsra_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnsra(op1, shift, vl); +vint16m2_t test_vnsra_wv_i16m2(vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnsra(vs2, vs1, vl); } -vint16m2_t test_vnsra_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra(op1, shift, vl); +vint16m2_t test_vnsra_wx_i16m2(vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra(vs2, rs1, vl); } -vint16m4_t test_vnsra_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnsra(op1, shift, vl); +vint16m4_t test_vnsra_wv_i16m4(vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnsra(vs2, vs1, vl); } -vint16m4_t test_vnsra_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsra(op1, shift, vl); +vint16m4_t test_vnsra_wx_i16m4(vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra(vs2, rs1, vl); } -vint32mf2_t test_vnsra_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnsra(op1, shift, vl); +vint32mf2_t test_vnsra_wv_i32mf2(vint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnsra(vs2, vs1, vl); } -vint32mf2_t test_vnsra_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsra(op1, shift, vl); +vint32mf2_t test_vnsra_wx_i32mf2(vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra(vs2, rs1, vl); } -vint32m1_t test_vnsra_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnsra(op1, shift, vl); +vint32m1_t test_vnsra_wv_i32m1(vint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnsra(vs2, vs1, vl); } -vint32m1_t test_vnsra_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra(op1, shift, vl); +vint32m1_t test_vnsra_wx_i32m1(vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra(vs2, rs1, vl); } -vint32m2_t test_vnsra_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnsra(op1, shift, vl); +vint32m2_t test_vnsra_wv_i32m2(vint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnsra(vs2, vs1, vl); } -vint32m2_t test_vnsra_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra(op1, shift, vl); +vint32m2_t test_vnsra_wx_i32m2(vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra(vs2, rs1, vl); } -vint32m4_t test_vnsra_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnsra(op1, shift, vl); +vint32m4_t test_vnsra_wv_i32m4(vint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnsra(vs2, vs1, vl); } -vint32m4_t test_vnsra_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsra(op1, shift, vl); +vint32m4_t test_vnsra_wx_i32m4(vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra(vs2, rs1, vl); } -vint8mf8_t test_vnsra_wv_i8mf8_m(vbool64_t mask, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnsra(mask, op1, shift, vl); +vint8mf8_t test_vnsra_wv_i8mf8_m(vbool64_t vm, vint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnsra(vm, vs2, vs1, vl); } -vint8mf8_t test_vnsra_wx_i8mf8_m(vbool64_t mask, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra(mask, op1, shift, vl); +vint8mf8_t test_vnsra_wx_i8mf8_m(vbool64_t vm, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra(vm, vs2, rs1, vl); } -vint8mf4_t test_vnsra_wv_i8mf4_m(vbool32_t mask, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnsra(mask, op1, shift, vl); +vint8mf4_t test_vnsra_wv_i8mf4_m(vbool32_t vm, vint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnsra(vm, vs2, vs1, vl); } -vint8mf4_t test_vnsra_wx_i8mf4_m(vbool32_t mask, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra(mask, op1, shift, vl); +vint8mf4_t test_vnsra_wx_i8mf4_m(vbool32_t vm, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra(vm, vs2, rs1, vl); } -vint8mf2_t test_vnsra_wv_i8mf2_m(vbool16_t mask, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnsra(mask, op1, shift, vl); +vint8mf2_t test_vnsra_wv_i8mf2_m(vbool16_t vm, vint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnsra(vm, vs2, vs1, vl); } -vint8mf2_t test_vnsra_wx_i8mf2_m(vbool16_t mask, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsra(mask, op1, shift, vl); +vint8mf2_t test_vnsra_wx_i8mf2_m(vbool16_t vm, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra(vm, vs2, rs1, vl); } -vint8m1_t test_vnsra_wv_i8m1_m(vbool8_t mask, vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnsra(mask, op1, shift, vl); +vint8m1_t test_vnsra_wv_i8m1_m(vbool8_t vm, vint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnsra(vm, vs2, vs1, vl); } -vint8m1_t test_vnsra_wx_i8m1_m(vbool8_t mask, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra(mask, op1, shift, vl); +vint8m1_t test_vnsra_wx_i8m1_m(vbool8_t vm, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra(vm, vs2, rs1, vl); } -vint8m2_t test_vnsra_wv_i8m2_m(vbool4_t mask, vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnsra(mask, op1, shift, vl); +vint8m2_t test_vnsra_wv_i8m2_m(vbool4_t vm, vint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnsra(vm, vs2, vs1, vl); } -vint8m2_t test_vnsra_wx_i8m2_m(vbool4_t mask, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra(mask, op1, shift, vl); +vint8m2_t test_vnsra_wx_i8m2_m(vbool4_t vm, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra(vm, vs2, rs1, vl); } -vint8m4_t test_vnsra_wv_i8m4_m(vbool2_t mask, vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnsra(mask, op1, shift, vl); +vint8m4_t test_vnsra_wv_i8m4_m(vbool2_t vm, vint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnsra(vm, vs2, vs1, vl); } -vint8m4_t test_vnsra_wx_i8m4_m(vbool2_t mask, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsra(mask, op1, shift, vl); +vint8m4_t test_vnsra_wx_i8m4_m(vbool2_t vm, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra(vm, vs2, rs1, vl); } -vint16mf4_t test_vnsra_wv_i16mf4_m(vbool64_t mask, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnsra(mask, op1, shift, vl); +vint16mf4_t test_vnsra_wv_i16mf4_m(vbool64_t vm, vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnsra(vm, vs2, vs1, vl); } -vint16mf4_t test_vnsra_wx_i16mf4_m(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra(mask, op1, shift, vl); +vint16mf4_t test_vnsra_wx_i16mf4_m(vbool64_t vm, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra(vm, vs2, rs1, vl); } -vint16mf2_t test_vnsra_wv_i16mf2_m(vbool32_t mask, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnsra(mask, op1, shift, vl); +vint16mf2_t test_vnsra_wv_i16mf2_m(vbool32_t vm, vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnsra(vm, vs2, vs1, vl); } -vint16mf2_t test_vnsra_wx_i16mf2_m(vbool32_t mask, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsra(mask, op1, shift, vl); +vint16mf2_t test_vnsra_wx_i16mf2_m(vbool32_t vm, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra(vm, vs2, rs1, vl); } -vint16m1_t test_vnsra_wv_i16m1_m(vbool16_t mask, vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnsra(mask, op1, shift, vl); +vint16m1_t test_vnsra_wv_i16m1_m(vbool16_t vm, vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnsra(vm, vs2, vs1, vl); } -vint16m1_t test_vnsra_wx_i16m1_m(vbool16_t mask, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra(mask, op1, shift, vl); +vint16m1_t test_vnsra_wx_i16m1_m(vbool16_t vm, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra(vm, vs2, rs1, vl); } -vint16m2_t test_vnsra_wv_i16m2_m(vbool8_t mask, vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnsra(mask, op1, shift, vl); +vint16m2_t test_vnsra_wv_i16m2_m(vbool8_t vm, vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnsra(vm, vs2, vs1, vl); } -vint16m2_t test_vnsra_wx_i16m2_m(vbool8_t mask, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra(mask, op1, shift, vl); +vint16m2_t test_vnsra_wx_i16m2_m(vbool8_t vm, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra(vm, vs2, rs1, vl); } -vint16m4_t test_vnsra_wv_i16m4_m(vbool4_t mask, vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnsra(mask, op1, shift, vl); +vint16m4_t test_vnsra_wv_i16m4_m(vbool4_t vm, vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnsra(vm, vs2, vs1, vl); } -vint16m4_t test_vnsra_wx_i16m4_m(vbool4_t mask, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsra(mask, op1, shift, vl); +vint16m4_t test_vnsra_wx_i16m4_m(vbool4_t vm, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra(vm, vs2, rs1, vl); } -vint32mf2_t test_vnsra_wv_i32mf2_m(vbool64_t mask, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnsra(mask, op1, shift, vl); +vint32mf2_t test_vnsra_wv_i32mf2_m(vbool64_t vm, vint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnsra(vm, vs2, vs1, vl); } -vint32mf2_t test_vnsra_wx_i32mf2_m(vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsra(mask, op1, shift, vl); +vint32mf2_t test_vnsra_wx_i32mf2_m(vbool64_t vm, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra(vm, vs2, rs1, vl); } -vint32m1_t test_vnsra_wv_i32m1_m(vbool32_t mask, vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnsra(mask, op1, shift, vl); +vint32m1_t test_vnsra_wv_i32m1_m(vbool32_t vm, vint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnsra(vm, vs2, vs1, vl); } -vint32m1_t test_vnsra_wx_i32m1_m(vbool32_t mask, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra(mask, op1, shift, vl); +vint32m1_t test_vnsra_wx_i32m1_m(vbool32_t vm, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra(vm, vs2, rs1, vl); } -vint32m2_t test_vnsra_wv_i32m2_m(vbool16_t mask, vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnsra(mask, op1, shift, vl); +vint32m2_t test_vnsra_wv_i32m2_m(vbool16_t vm, vint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnsra(vm, vs2, vs1, vl); } -vint32m2_t test_vnsra_wx_i32m2_m(vbool16_t mask, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra(mask, op1, shift, vl); +vint32m2_t test_vnsra_wx_i32m2_m(vbool16_t vm, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra(vm, vs2, rs1, vl); } -vint32m4_t test_vnsra_wv_i32m4_m(vbool8_t mask, vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnsra(mask, op1, shift, vl); +vint32m4_t test_vnsra_wv_i32m4_m(vbool8_t vm, vint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnsra(vm, vs2, vs1, vl); } -vint32m4_t test_vnsra_wx_i32m4_m(vbool8_t mask, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsra(mask, op1, shift, vl); +vint32m4_t test_vnsra_wx_i32m4_m(vbool8_t vm, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vnsra\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vnsrl.c b/auto-generated/gnu-overloaded-tests/vnsrl.c index b2d9fd5df..d4c0363ca 100644 --- a/auto-generated/gnu-overloaded-tests/vnsrl.c +++ b/auto-generated/gnu-overloaded-tests/vnsrl.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vnsrl_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnsrl(op1, shift, vl); +vuint8mf8_t test_vnsrl_wv_u8mf8(vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnsrl(vs2, vs1, vl); } -vuint8mf8_t test_vnsrl_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl(op1, shift, vl); +vuint8mf8_t test_vnsrl_wx_u8mf8(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl(vs2, rs1, vl); } -vuint8mf4_t test_vnsrl_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnsrl(op1, shift, vl); +vuint8mf4_t test_vnsrl_wv_u8mf4(vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnsrl(vs2, vs1, vl); } -vuint8mf4_t test_vnsrl_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl(op1, shift, vl); +vuint8mf4_t test_vnsrl_wx_u8mf4(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl(vs2, rs1, vl); } -vuint8mf2_t test_vnsrl_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnsrl(op1, shift, vl); +vuint8mf2_t test_vnsrl_wv_u8mf2(vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnsrl(vs2, vs1, vl); } -vuint8mf2_t test_vnsrl_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl(op1, shift, vl); +vuint8mf2_t test_vnsrl_wx_u8mf2(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl(vs2, rs1, vl); } -vuint8m1_t test_vnsrl_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnsrl(op1, shift, vl); +vuint8m1_t test_vnsrl_wv_u8m1(vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnsrl(vs2, vs1, vl); } -vuint8m1_t test_vnsrl_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl(op1, shift, vl); +vuint8m1_t test_vnsrl_wx_u8m1(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl(vs2, rs1, vl); } -vuint8m2_t test_vnsrl_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnsrl(op1, shift, vl); +vuint8m2_t test_vnsrl_wv_u8m2(vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnsrl(vs2, vs1, vl); } -vuint8m2_t test_vnsrl_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl(op1, shift, vl); +vuint8m2_t test_vnsrl_wx_u8m2(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl(vs2, rs1, vl); } -vuint8m4_t test_vnsrl_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnsrl(op1, shift, vl); +vuint8m4_t test_vnsrl_wv_u8m4(vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnsrl(vs2, vs1, vl); } -vuint8m4_t test_vnsrl_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl(op1, shift, vl); +vuint8m4_t test_vnsrl_wx_u8m4(vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl(vs2, rs1, vl); } -vuint16mf4_t test_vnsrl_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnsrl(op1, shift, vl); +vuint16mf4_t test_vnsrl_wv_u16mf4(vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnsrl(vs2, vs1, vl); } -vuint16mf4_t test_vnsrl_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl(op1, shift, vl); +vuint16mf4_t test_vnsrl_wx_u16mf4(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl(vs2, rs1, vl); } -vuint16mf2_t test_vnsrl_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnsrl(op1, shift, vl); +vuint16mf2_t test_vnsrl_wv_u16mf2(vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnsrl(vs2, vs1, vl); } -vuint16mf2_t test_vnsrl_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl(op1, shift, vl); +vuint16mf2_t test_vnsrl_wx_u16mf2(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl(vs2, rs1, vl); } -vuint16m1_t test_vnsrl_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnsrl(op1, shift, vl); +vuint16m1_t test_vnsrl_wv_u16m1(vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnsrl(vs2, vs1, vl); } -vuint16m1_t test_vnsrl_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl(op1, shift, vl); +vuint16m1_t test_vnsrl_wx_u16m1(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl(vs2, rs1, vl); } -vuint16m2_t test_vnsrl_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnsrl(op1, shift, vl); +vuint16m2_t test_vnsrl_wv_u16m2(vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnsrl(vs2, vs1, vl); } -vuint16m2_t test_vnsrl_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl(op1, shift, vl); +vuint16m2_t test_vnsrl_wx_u16m2(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl(vs2, rs1, vl); } -vuint16m4_t test_vnsrl_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnsrl(op1, shift, vl); +vuint16m4_t test_vnsrl_wv_u16m4(vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnsrl(vs2, vs1, vl); } -vuint16m4_t test_vnsrl_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl(op1, shift, vl); +vuint16m4_t test_vnsrl_wx_u16m4(vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl(vs2, rs1, vl); } -vuint32mf2_t test_vnsrl_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnsrl(op1, shift, vl); +vuint32mf2_t test_vnsrl_wv_u32mf2(vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnsrl(vs2, vs1, vl); } -vuint32mf2_t test_vnsrl_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl(op1, shift, vl); +vuint32mf2_t test_vnsrl_wx_u32mf2(vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl(vs2, rs1, vl); } -vuint32m1_t test_vnsrl_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnsrl(op1, shift, vl); +vuint32m1_t test_vnsrl_wv_u32m1(vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnsrl(vs2, vs1, vl); } -vuint32m1_t test_vnsrl_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl(op1, shift, vl); +vuint32m1_t test_vnsrl_wx_u32m1(vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl(vs2, rs1, vl); } -vuint32m2_t test_vnsrl_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnsrl(op1, shift, vl); +vuint32m2_t test_vnsrl_wv_u32m2(vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnsrl(vs2, vs1, vl); } -vuint32m2_t test_vnsrl_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl(op1, shift, vl); +vuint32m2_t test_vnsrl_wx_u32m2(vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl(vs2, rs1, vl); } -vuint32m4_t test_vnsrl_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnsrl(op1, shift, vl); +vuint32m4_t test_vnsrl_wv_u32m4(vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnsrl(vs2, vs1, vl); } -vuint32m4_t test_vnsrl_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl(op1, shift, vl); +vuint32m4_t test_vnsrl_wx_u32m4(vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl(vs2, rs1, vl); } -vuint8mf8_t test_vnsrl_wv_u8mf8_m(vbool64_t mask, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnsrl(mask, op1, shift, vl); +vuint8mf8_t test_vnsrl_wv_u8mf8_m(vbool64_t vm, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnsrl(vm, vs2, vs1, vl); } -vuint8mf8_t test_vnsrl_wx_u8mf8_m(vbool64_t mask, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl(mask, op1, shift, vl); +vuint8mf8_t test_vnsrl_wx_u8mf8_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl(vm, vs2, rs1, vl); } -vuint8mf4_t test_vnsrl_wv_u8mf4_m(vbool32_t mask, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnsrl(mask, op1, shift, vl); +vuint8mf4_t test_vnsrl_wv_u8mf4_m(vbool32_t vm, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnsrl(vm, vs2, vs1, vl); } -vuint8mf4_t test_vnsrl_wx_u8mf4_m(vbool32_t mask, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl(mask, op1, shift, vl); +vuint8mf4_t test_vnsrl_wx_u8mf4_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl(vm, vs2, rs1, vl); } -vuint8mf2_t test_vnsrl_wv_u8mf2_m(vbool16_t mask, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnsrl(mask, op1, shift, vl); +vuint8mf2_t test_vnsrl_wv_u8mf2_m(vbool16_t vm, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnsrl(vm, vs2, vs1, vl); } -vuint8mf2_t test_vnsrl_wx_u8mf2_m(vbool16_t mask, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl(mask, op1, shift, vl); +vuint8mf2_t test_vnsrl_wx_u8mf2_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl(vm, vs2, rs1, vl); } -vuint8m1_t test_vnsrl_wv_u8m1_m(vbool8_t mask, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnsrl(mask, op1, shift, vl); +vuint8m1_t test_vnsrl_wv_u8m1_m(vbool8_t vm, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnsrl(vm, vs2, vs1, vl); } -vuint8m1_t test_vnsrl_wx_u8m1_m(vbool8_t mask, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl(mask, op1, shift, vl); +vuint8m1_t test_vnsrl_wx_u8m1_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl(vm, vs2, rs1, vl); } -vuint8m2_t test_vnsrl_wv_u8m2_m(vbool4_t mask, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnsrl(mask, op1, shift, vl); +vuint8m2_t test_vnsrl_wv_u8m2_m(vbool4_t vm, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnsrl(vm, vs2, vs1, vl); } -vuint8m2_t test_vnsrl_wx_u8m2_m(vbool4_t mask, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl(mask, op1, shift, vl); +vuint8m2_t test_vnsrl_wx_u8m2_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl(vm, vs2, rs1, vl); } -vuint8m4_t test_vnsrl_wv_u8m4_m(vbool2_t mask, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnsrl(mask, op1, shift, vl); +vuint8m4_t test_vnsrl_wv_u8m4_m(vbool2_t vm, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnsrl(vm, vs2, vs1, vl); } -vuint8m4_t test_vnsrl_wx_u8m4_m(vbool2_t mask, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl(mask, op1, shift, vl); +vuint8m4_t test_vnsrl_wx_u8m4_m(vbool2_t vm, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl(vm, vs2, rs1, vl); } -vuint16mf4_t test_vnsrl_wv_u16mf4_m(vbool64_t mask, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnsrl(mask, op1, shift, vl); +vuint16mf4_t test_vnsrl_wv_u16mf4_m(vbool64_t vm, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnsrl(vm, vs2, vs1, vl); } -vuint16mf4_t test_vnsrl_wx_u16mf4_m(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl(mask, op1, shift, vl); +vuint16mf4_t test_vnsrl_wx_u16mf4_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl(vm, vs2, rs1, vl); } -vuint16mf2_t test_vnsrl_wv_u16mf2_m(vbool32_t mask, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnsrl(mask, op1, shift, vl); +vuint16mf2_t test_vnsrl_wv_u16mf2_m(vbool32_t vm, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnsrl(vm, vs2, vs1, vl); } -vuint16mf2_t test_vnsrl_wx_u16mf2_m(vbool32_t mask, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl(mask, op1, shift, vl); +vuint16mf2_t test_vnsrl_wx_u16mf2_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl(vm, vs2, rs1, vl); } -vuint16m1_t test_vnsrl_wv_u16m1_m(vbool16_t mask, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnsrl(mask, op1, shift, vl); +vuint16m1_t test_vnsrl_wv_u16m1_m(vbool16_t vm, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnsrl(vm, vs2, vs1, vl); } -vuint16m1_t test_vnsrl_wx_u16m1_m(vbool16_t mask, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl(mask, op1, shift, vl); +vuint16m1_t test_vnsrl_wx_u16m1_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl(vm, vs2, rs1, vl); } -vuint16m2_t test_vnsrl_wv_u16m2_m(vbool8_t mask, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnsrl(mask, op1, shift, vl); +vuint16m2_t test_vnsrl_wv_u16m2_m(vbool8_t vm, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnsrl(vm, vs2, vs1, vl); } -vuint16m2_t test_vnsrl_wx_u16m2_m(vbool8_t mask, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl(mask, op1, shift, vl); +vuint16m2_t test_vnsrl_wx_u16m2_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl(vm, vs2, rs1, vl); } -vuint16m4_t test_vnsrl_wv_u16m4_m(vbool4_t mask, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnsrl(mask, op1, shift, vl); +vuint16m4_t test_vnsrl_wv_u16m4_m(vbool4_t vm, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnsrl(vm, vs2, vs1, vl); } -vuint16m4_t test_vnsrl_wx_u16m4_m(vbool4_t mask, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl(mask, op1, shift, vl); +vuint16m4_t test_vnsrl_wx_u16m4_m(vbool4_t vm, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl(vm, vs2, rs1, vl); } -vuint32mf2_t test_vnsrl_wv_u32mf2_m(vbool64_t mask, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnsrl(mask, op1, shift, vl); +vuint32mf2_t test_vnsrl_wv_u32mf2_m(vbool64_t vm, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnsrl(vm, vs2, vs1, vl); } -vuint32mf2_t test_vnsrl_wx_u32mf2_m(vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl(mask, op1, shift, vl); +vuint32mf2_t test_vnsrl_wx_u32mf2_m(vbool64_t vm, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl(vm, vs2, rs1, vl); } -vuint32m1_t test_vnsrl_wv_u32m1_m(vbool32_t mask, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnsrl(mask, op1, shift, vl); +vuint32m1_t test_vnsrl_wv_u32m1_m(vbool32_t vm, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnsrl(vm, vs2, vs1, vl); } -vuint32m1_t test_vnsrl_wx_u32m1_m(vbool32_t mask, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl(mask, op1, shift, vl); +vuint32m1_t test_vnsrl_wx_u32m1_m(vbool32_t vm, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl(vm, vs2, rs1, vl); } -vuint32m2_t test_vnsrl_wv_u32m2_m(vbool16_t mask, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnsrl(mask, op1, shift, vl); +vuint32m2_t test_vnsrl_wv_u32m2_m(vbool16_t vm, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnsrl(vm, vs2, vs1, vl); } -vuint32m2_t test_vnsrl_wx_u32m2_m(vbool16_t mask, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl(mask, op1, shift, vl); +vuint32m2_t test_vnsrl_wx_u32m2_m(vbool16_t vm, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl(vm, vs2, rs1, vl); } -vuint32m4_t test_vnsrl_wv_u32m4_m(vbool8_t mask, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnsrl(mask, op1, shift, vl); +vuint32m4_t test_vnsrl_wv_u32m4_m(vbool8_t vm, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnsrl(vm, vs2, vs1, vl); } -vuint32m4_t test_vnsrl_wx_u32m4_m(vbool8_t mask, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl(mask, op1, shift, vl); +vuint32m4_t test_vnsrl_wx_u32m4_m(vbool8_t vm, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vnsrl\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vor.c b/auto-generated/gnu-overloaded-tests/vor.c index a3481f156..b0d0474d2 100644 --- a/auto-generated/gnu-overloaded-tests/vor.c +++ b/auto-generated/gnu-overloaded-tests/vor.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vint8mf8_t test_vor_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vor(vs2, vs1, vl); } -vint8mf8_t test_vor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vint8mf8_t test_vor_vx_i8mf8(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor(vs2, rs1, vl); } -vint8mf4_t test_vor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vint8mf4_t test_vor_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vor(vs2, vs1, vl); } -vint8mf4_t test_vor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vint8mf4_t test_vor_vx_i8mf4(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor(vs2, rs1, vl); } -vint8mf2_t test_vor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vint8mf2_t test_vor_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vor(vs2, vs1, vl); } -vint8mf2_t test_vor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vint8mf2_t test_vor_vx_i8mf2(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor(vs2, rs1, vl); } -vint8m1_t test_vor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vint8m1_t test_vor_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vor(vs2, vs1, vl); } -vint8m1_t test_vor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vint8m1_t test_vor_vx_i8m1(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor(vs2, rs1, vl); } -vint8m2_t test_vor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vint8m2_t test_vor_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vor(vs2, vs1, vl); } -vint8m2_t test_vor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vint8m2_t test_vor_vx_i8m2(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor(vs2, rs1, vl); } -vint8m4_t test_vor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vint8m4_t test_vor_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vor(vs2, vs1, vl); } -vint8m4_t test_vor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vint8m4_t test_vor_vx_i8m4(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor(vs2, rs1, vl); } -vint8m8_t test_vor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vint8m8_t test_vor_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vor(vs2, vs1, vl); } -vint8m8_t test_vor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vint8m8_t test_vor_vx_i8m8(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor(vs2, rs1, vl); } -vint16mf4_t test_vor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vint16mf4_t test_vor_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vor(vs2, vs1, vl); } -vint16mf4_t test_vor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vint16mf4_t test_vor_vx_i16mf4(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor(vs2, rs1, vl); } -vint16mf2_t test_vor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vint16mf2_t test_vor_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vor(vs2, vs1, vl); } -vint16mf2_t test_vor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vint16mf2_t test_vor_vx_i16mf2(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor(vs2, rs1, vl); } -vint16m1_t test_vor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vint16m1_t test_vor_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vor(vs2, vs1, vl); } -vint16m1_t test_vor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vint16m1_t test_vor_vx_i16m1(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor(vs2, rs1, vl); } -vint16m2_t test_vor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vint16m2_t test_vor_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vor(vs2, vs1, vl); } -vint16m2_t test_vor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vint16m2_t test_vor_vx_i16m2(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor(vs2, rs1, vl); } -vint16m4_t test_vor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vint16m4_t test_vor_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vor(vs2, vs1, vl); } -vint16m4_t test_vor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vint16m4_t test_vor_vx_i16m4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor(vs2, rs1, vl); } -vint16m8_t test_vor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vint16m8_t test_vor_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vor(vs2, vs1, vl); } -vint16m8_t test_vor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vint16m8_t test_vor_vx_i16m8(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor(vs2, rs1, vl); } -vint32mf2_t test_vor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vint32mf2_t test_vor_vv_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vor(vs2, vs1, vl); } -vint32mf2_t test_vor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vint32mf2_t test_vor_vx_i32mf2(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor(vs2, rs1, vl); } -vint32m1_t test_vor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vint32m1_t test_vor_vv_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vor(vs2, vs1, vl); } -vint32m1_t test_vor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vint32m1_t test_vor_vx_i32m1(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor(vs2, rs1, vl); } -vint32m2_t test_vor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vint32m2_t test_vor_vv_i32m2(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vor(vs2, vs1, vl); } -vint32m2_t test_vor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vint32m2_t test_vor_vx_i32m2(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor(vs2, rs1, vl); } -vint32m4_t test_vor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vint32m4_t test_vor_vv_i32m4(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vor(vs2, vs1, vl); } -vint32m4_t test_vor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vint32m4_t test_vor_vx_i32m4(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor(vs2, rs1, vl); } -vint32m8_t test_vor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vint32m8_t test_vor_vv_i32m8(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vor(vs2, vs1, vl); } -vint32m8_t test_vor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vint32m8_t test_vor_vx_i32m8(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor(vs2, rs1, vl); } -vint64m1_t test_vor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vint64m1_t test_vor_vv_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vor(vs2, vs1, vl); } -vint64m1_t test_vor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vint64m1_t test_vor_vx_i64m1(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor(vs2, rs1, vl); } -vint64m2_t test_vor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vint64m2_t test_vor_vv_i64m2(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vor(vs2, vs1, vl); } -vint64m2_t test_vor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vint64m2_t test_vor_vx_i64m2(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor(vs2, rs1, vl); } -vint64m4_t test_vor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vint64m4_t test_vor_vv_i64m4(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vor(vs2, vs1, vl); } -vint64m4_t test_vor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vint64m4_t test_vor_vx_i64m4(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor(vs2, rs1, vl); } -vint64m8_t test_vor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vint64m8_t test_vor_vv_i64m8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vor(vs2, vs1, vl); } -vint64m8_t test_vor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vint64m8_t test_vor_vx_i64m8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor(vs2, rs1, vl); } -vuint8mf8_t test_vor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vuint8mf8_t test_vor_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vor(vs2, vs1, vl); } -vuint8mf8_t test_vor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vuint8mf8_t test_vor_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor(vs2, rs1, vl); } -vuint8mf4_t test_vor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vuint8mf4_t test_vor_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vor(vs2, vs1, vl); } -vuint8mf4_t test_vor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vuint8mf4_t test_vor_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor(vs2, rs1, vl); } -vuint8mf2_t test_vor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vuint8mf2_t test_vor_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vor(vs2, vs1, vl); } -vuint8mf2_t test_vor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vuint8mf2_t test_vor_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor(vs2, rs1, vl); } -vuint8m1_t test_vor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vuint8m1_t test_vor_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vor(vs2, vs1, vl); } -vuint8m1_t test_vor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vuint8m1_t test_vor_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor(vs2, rs1, vl); } -vuint8m2_t test_vor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vuint8m2_t test_vor_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vor(vs2, vs1, vl); } -vuint8m2_t test_vor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vuint8m2_t test_vor_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor(vs2, rs1, vl); } -vuint8m4_t test_vor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vuint8m4_t test_vor_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vor(vs2, vs1, vl); } -vuint8m4_t test_vor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vuint8m4_t test_vor_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor(vs2, rs1, vl); } -vuint8m8_t test_vor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vuint8m8_t test_vor_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vor(vs2, vs1, vl); } -vuint8m8_t test_vor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vuint8m8_t test_vor_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor(vs2, rs1, vl); } -vuint16mf4_t test_vor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vuint16mf4_t test_vor_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vor(vs2, vs1, vl); } -vuint16mf4_t test_vor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vuint16mf4_t test_vor_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor(vs2, rs1, vl); } -vuint16mf2_t test_vor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vuint16mf2_t test_vor_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vor(vs2, vs1, vl); } -vuint16mf2_t test_vor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vuint16mf2_t test_vor_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor(vs2, rs1, vl); } -vuint16m1_t test_vor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vuint16m1_t test_vor_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vor(vs2, vs1, vl); } -vuint16m1_t test_vor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vuint16m1_t test_vor_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor(vs2, rs1, vl); } -vuint16m2_t test_vor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vuint16m2_t test_vor_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vor(vs2, vs1, vl); } -vuint16m2_t test_vor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vuint16m2_t test_vor_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor(vs2, rs1, vl); } -vuint16m4_t test_vor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vuint16m4_t test_vor_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vor(vs2, vs1, vl); } -vuint16m4_t test_vor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vuint16m4_t test_vor_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor(vs2, rs1, vl); } -vuint16m8_t test_vor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vuint16m8_t test_vor_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vor(vs2, vs1, vl); } -vuint16m8_t test_vor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vuint16m8_t test_vor_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor(vs2, rs1, vl); } -vuint32mf2_t test_vor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vuint32mf2_t test_vor_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vor(vs2, vs1, vl); } -vuint32mf2_t test_vor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vuint32mf2_t test_vor_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor(vs2, rs1, vl); } -vuint32m1_t test_vor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vuint32m1_t test_vor_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vor(vs2, vs1, vl); } -vuint32m1_t test_vor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vuint32m1_t test_vor_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor(vs2, rs1, vl); } -vuint32m2_t test_vor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vuint32m2_t test_vor_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vor(vs2, vs1, vl); } -vuint32m2_t test_vor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vuint32m2_t test_vor_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor(vs2, rs1, vl); } -vuint32m4_t test_vor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vuint32m4_t test_vor_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vor(vs2, vs1, vl); } -vuint32m4_t test_vor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vuint32m4_t test_vor_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor(vs2, rs1, vl); } -vuint32m8_t test_vor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vuint32m8_t test_vor_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vor(vs2, vs1, vl); } -vuint32m8_t test_vor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vuint32m8_t test_vor_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor(vs2, rs1, vl); } -vuint64m1_t test_vor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vuint64m1_t test_vor_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vor(vs2, vs1, vl); } -vuint64m1_t test_vor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vuint64m1_t test_vor_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor(vs2, rs1, vl); } -vuint64m2_t test_vor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vuint64m2_t test_vor_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vor(vs2, vs1, vl); } -vuint64m2_t test_vor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vuint64m2_t test_vor_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor(vs2, rs1, vl); } -vuint64m4_t test_vor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vuint64m4_t test_vor_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vor(vs2, vs1, vl); } -vuint64m4_t test_vor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vuint64m4_t test_vor_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor(vs2, rs1, vl); } -vuint64m8_t test_vor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vuint64m8_t test_vor_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vor(vs2, vs1, vl); } -vuint64m8_t test_vor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vor(op1, op2, vl); +vuint64m8_t test_vor_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor(vs2, rs1, vl); } -vint8mf8_t test_vor_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vint8mf8_t test_vor_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vor(vm, vs2, vs1, vl); } -vint8mf8_t test_vor_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vint8mf8_t test_vor_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor(vm, vs2, rs1, vl); } -vint8mf4_t test_vor_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vint8mf4_t test_vor_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vor(vm, vs2, vs1, vl); } -vint8mf4_t test_vor_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vint8mf4_t test_vor_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor(vm, vs2, rs1, vl); } -vint8mf2_t test_vor_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vint8mf2_t test_vor_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vor(vm, vs2, vs1, vl); } -vint8mf2_t test_vor_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vint8mf2_t test_vor_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor(vm, vs2, rs1, vl); } -vint8m1_t test_vor_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vint8m1_t test_vor_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vor(vm, vs2, vs1, vl); } -vint8m1_t test_vor_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vint8m1_t test_vor_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor(vm, vs2, rs1, vl); } -vint8m2_t test_vor_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vint8m2_t test_vor_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vor(vm, vs2, vs1, vl); } -vint8m2_t test_vor_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vint8m2_t test_vor_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor(vm, vs2, rs1, vl); } -vint8m4_t test_vor_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vint8m4_t test_vor_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vor(vm, vs2, vs1, vl); } -vint8m4_t test_vor_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vint8m4_t test_vor_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor(vm, vs2, rs1, vl); } -vint8m8_t test_vor_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vint8m8_t test_vor_vv_i8m8_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vor(vm, vs2, vs1, vl); } -vint8m8_t test_vor_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vint8m8_t test_vor_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor(vm, vs2, rs1, vl); } -vint16mf4_t test_vor_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vint16mf4_t test_vor_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vor(vm, vs2, vs1, vl); } -vint16mf4_t test_vor_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vint16mf4_t test_vor_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor(vm, vs2, rs1, vl); } -vint16mf2_t test_vor_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vint16mf2_t test_vor_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vor(vm, vs2, vs1, vl); } -vint16mf2_t test_vor_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vint16mf2_t test_vor_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor(vm, vs2, rs1, vl); } -vint16m1_t test_vor_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vint16m1_t test_vor_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vor(vm, vs2, vs1, vl); } -vint16m1_t test_vor_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vint16m1_t test_vor_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor(vm, vs2, rs1, vl); } -vint16m2_t test_vor_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vint16m2_t test_vor_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vor(vm, vs2, vs1, vl); } -vint16m2_t test_vor_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vint16m2_t test_vor_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor(vm, vs2, rs1, vl); } -vint16m4_t test_vor_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vint16m4_t test_vor_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vor(vm, vs2, vs1, vl); } -vint16m4_t test_vor_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vint16m4_t test_vor_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor(vm, vs2, rs1, vl); } -vint16m8_t test_vor_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vint16m8_t test_vor_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vor(vm, vs2, vs1, vl); } -vint16m8_t test_vor_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vint16m8_t test_vor_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor(vm, vs2, rs1, vl); } -vint32mf2_t test_vor_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vint32mf2_t test_vor_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vor(vm, vs2, vs1, vl); } -vint32mf2_t test_vor_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vint32mf2_t test_vor_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor(vm, vs2, rs1, vl); } -vint32m1_t test_vor_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vint32m1_t test_vor_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vor(vm, vs2, vs1, vl); } -vint32m1_t test_vor_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vint32m1_t test_vor_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor(vm, vs2, rs1, vl); } -vint32m2_t test_vor_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vint32m2_t test_vor_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vor(vm, vs2, vs1, vl); } -vint32m2_t test_vor_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vint32m2_t test_vor_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor(vm, vs2, rs1, vl); } -vint32m4_t test_vor_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vint32m4_t test_vor_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vor(vm, vs2, vs1, vl); } -vint32m4_t test_vor_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vint32m4_t test_vor_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor(vm, vs2, rs1, vl); } -vint32m8_t test_vor_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vint32m8_t test_vor_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vor(vm, vs2, vs1, vl); } -vint32m8_t test_vor_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vint32m8_t test_vor_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor(vm, vs2, rs1, vl); } -vint64m1_t test_vor_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vint64m1_t test_vor_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vor(vm, vs2, vs1, vl); } -vint64m1_t test_vor_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vint64m1_t test_vor_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor(vm, vs2, rs1, vl); } -vint64m2_t test_vor_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vint64m2_t test_vor_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vor(vm, vs2, vs1, vl); } -vint64m2_t test_vor_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vint64m2_t test_vor_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor(vm, vs2, rs1, vl); } -vint64m4_t test_vor_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vint64m4_t test_vor_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vor(vm, vs2, vs1, vl); } -vint64m4_t test_vor_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vint64m4_t test_vor_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor(vm, vs2, rs1, vl); } -vint64m8_t test_vor_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vint64m8_t test_vor_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vor(vm, vs2, vs1, vl); } -vint64m8_t test_vor_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vint64m8_t test_vor_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor(vm, vs2, rs1, vl); } -vuint8mf8_t test_vor_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vuint8mf8_t test_vor_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vor(vm, vs2, vs1, vl); } -vuint8mf8_t test_vor_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vuint8mf8_t test_vor_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor(vm, vs2, rs1, vl); } -vuint8mf4_t test_vor_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vuint8mf4_t test_vor_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vor(vm, vs2, vs1, vl); } -vuint8mf4_t test_vor_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vuint8mf4_t test_vor_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor(vm, vs2, rs1, vl); } -vuint8mf2_t test_vor_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vuint8mf2_t test_vor_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vor(vm, vs2, vs1, vl); } -vuint8mf2_t test_vor_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vuint8mf2_t test_vor_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor(vm, vs2, rs1, vl); } -vuint8m1_t test_vor_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vuint8m1_t test_vor_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vor(vm, vs2, vs1, vl); } -vuint8m1_t test_vor_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vuint8m1_t test_vor_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor(vm, vs2, rs1, vl); } -vuint8m2_t test_vor_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vuint8m2_t test_vor_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vor(vm, vs2, vs1, vl); } -vuint8m2_t test_vor_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vuint8m2_t test_vor_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor(vm, vs2, rs1, vl); } -vuint8m4_t test_vor_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vuint8m4_t test_vor_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vor(vm, vs2, vs1, vl); } -vuint8m4_t test_vor_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vuint8m4_t test_vor_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor(vm, vs2, rs1, vl); } -vuint8m8_t test_vor_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vuint8m8_t test_vor_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vor(vm, vs2, vs1, vl); } -vuint8m8_t test_vor_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vuint8m8_t test_vor_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor(vm, vs2, rs1, vl); } -vuint16mf4_t test_vor_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vuint16mf4_t test_vor_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vor(vm, vs2, vs1, vl); } -vuint16mf4_t test_vor_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vuint16mf4_t test_vor_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor(vm, vs2, rs1, vl); } -vuint16mf2_t test_vor_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vuint16mf2_t test_vor_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vor(vm, vs2, vs1, vl); } -vuint16mf2_t test_vor_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vuint16mf2_t test_vor_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor(vm, vs2, rs1, vl); } -vuint16m1_t test_vor_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vuint16m1_t test_vor_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vor(vm, vs2, vs1, vl); } -vuint16m1_t test_vor_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vuint16m1_t test_vor_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor(vm, vs2, rs1, vl); } -vuint16m2_t test_vor_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vuint16m2_t test_vor_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vor(vm, vs2, vs1, vl); } -vuint16m2_t test_vor_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vuint16m2_t test_vor_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor(vm, vs2, rs1, vl); } -vuint16m4_t test_vor_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vuint16m4_t test_vor_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vor(vm, vs2, vs1, vl); } -vuint16m4_t test_vor_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vuint16m4_t test_vor_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor(vm, vs2, rs1, vl); } -vuint16m8_t test_vor_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vuint16m8_t test_vor_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vor(vm, vs2, vs1, vl); } -vuint16m8_t test_vor_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vuint16m8_t test_vor_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor(vm, vs2, rs1, vl); } -vuint32mf2_t test_vor_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vuint32mf2_t test_vor_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vor(vm, vs2, vs1, vl); } -vuint32mf2_t test_vor_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vuint32mf2_t test_vor_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor(vm, vs2, rs1, vl); } -vuint32m1_t test_vor_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vuint32m1_t test_vor_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vor(vm, vs2, vs1, vl); } -vuint32m1_t test_vor_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vuint32m1_t test_vor_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor(vm, vs2, rs1, vl); } -vuint32m2_t test_vor_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vuint32m2_t test_vor_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vor(vm, vs2, vs1, vl); } -vuint32m2_t test_vor_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vuint32m2_t test_vor_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor(vm, vs2, rs1, vl); } -vuint32m4_t test_vor_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vuint32m4_t test_vor_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vor(vm, vs2, vs1, vl); } -vuint32m4_t test_vor_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vuint32m4_t test_vor_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor(vm, vs2, rs1, vl); } -vuint32m8_t test_vor_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vuint32m8_t test_vor_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vor(vm, vs2, vs1, vl); } -vuint32m8_t test_vor_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vuint32m8_t test_vor_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor(vm, vs2, rs1, vl); } -vuint64m1_t test_vor_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vuint64m1_t test_vor_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vor(vm, vs2, vs1, vl); } -vuint64m1_t test_vor_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vuint64m1_t test_vor_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor(vm, vs2, rs1, vl); } -vuint64m2_t test_vor_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vuint64m2_t test_vor_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vor(vm, vs2, vs1, vl); } -vuint64m2_t test_vor_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vuint64m2_t test_vor_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor(vm, vs2, rs1, vl); } -vuint64m4_t test_vor_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vuint64m4_t test_vor_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vor(vm, vs2, vs1, vl); } -vuint64m4_t test_vor_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vuint64m4_t test_vor_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor(vm, vs2, rs1, vl); } -vuint64m8_t test_vor_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vuint64m8_t test_vor_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vor(vm, vs2, vs1, vl); } -vuint64m8_t test_vor_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vor(mask, op1, op2, vl); +vuint64m8_t test_vor_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vor\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vredand.c b/auto-generated/gnu-overloaded-tests/vredand.c index c714b655f..ae8872d85 100644 --- a/auto-generated/gnu-overloaded-tests/vredand.c +++ b/auto-generated/gnu-overloaded-tests/vredand.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8m1_t test_vredand_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand(vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf8_i8m1(vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand(vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand(vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf4_i8m1(vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand(vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand(vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf2_i8m1(vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand(vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand(vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m1_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand(vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand(vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m2_i8m1(vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand(vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand(vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m4_i8m1(vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand(vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand(vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m8_i8m1(vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand(vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand(vector, scalar, vl); +vint16m1_t test_vredand_vs_i16mf4_i16m1(vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand(vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand(vector, scalar, vl); +vint16m1_t test_vredand_vs_i16mf2_i16m1(vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand(vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand(vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m1_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand(vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand(vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m2_i16m1(vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand(vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand(vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m4_i16m1(vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand(vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand(vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m8_i16m1(vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand(vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand(vector, scalar, vl); +vint32m1_t test_vredand_vs_i32mf2_i32m1(vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredand(vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand(vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m1_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredand(vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand(vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m2_i32m1(vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredand(vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand(vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m4_i32m1(vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredand(vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand(vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m8_i32m1(vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredand(vs2, vs1, vl); } -vint64m1_t test_vredand_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredand(vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m1_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredand(vs2, vs1, vl); } -vint64m1_t test_vredand_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredand(vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m2_i64m1(vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredand(vs2, vs1, vl); } -vint64m1_t test_vredand_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredand(vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m4_i64m1(vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredand(vs2, vs1, vl); } -vint64m1_t test_vredand_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredand(vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m8_i64m1(vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredand(vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand(vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf8_u8m1(vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand(vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand(vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf4_u8m1(vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand(vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand(vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf2_u8m1(vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand(vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand(vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m1_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand(vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand(vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m2_u8m1(vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand(vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand(vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m4_u8m1(vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand(vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand(vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m8_u8m1(vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand(vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand(vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16mf4_u16m1(vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand(vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand(vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16mf2_u16m1(vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand(vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand(vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m1_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand(vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand(vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m2_u16m1(vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand(vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand(vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m4_u16m1(vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand(vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand(vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m8_u16m1(vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand(vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand(vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32mf2_u32m1(vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredand(vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand(vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m1_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredand(vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand(vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m2_u32m1(vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredand(vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand(vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m4_u32m1(vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredand(vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand(vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m8_u32m1(vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredand(vs2, vs1, vl); } -vuint64m1_t test_vredand_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredand(vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m1_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredand(vs2, vs1, vl); } -vuint64m1_t test_vredand_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredand(vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m2_u64m1(vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredand(vs2, vs1, vl); } -vuint64m1_t test_vredand_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredand(vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m4_u64m1(vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredand(vs2, vs1, vl); } -vuint64m1_t test_vredand_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredand(vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m8_u64m1(vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredand(vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand(mask, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf8_i8m1_m(vbool64_t vm, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand(vm, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand(mask, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf4_i8m1_m(vbool32_t vm, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand(vm, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand(mask, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf2_i8m1_m(vbool16_t vm, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand(vm, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand(mask, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m1_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand(vm, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand(mask, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m2_i8m1_m(vbool4_t vm, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand(vm, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand(mask, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m4_i8m1_m(vbool2_t vm, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand(vm, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand(mask, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m8_i8m1_m(vbool1_t vm, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand(vm, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand(mask, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16mf4_i16m1_m(vbool64_t vm, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand(vm, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand(mask, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16mf2_i16m1_m(vbool32_t vm, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand(vm, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand(mask, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m1_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand(vm, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand(mask, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m2_i16m1_m(vbool8_t vm, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand(vm, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand(mask, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m4_i16m1_m(vbool4_t vm, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand(vm, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand(mask, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m8_i16m1_m(vbool2_t vm, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand(vm, vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand(mask, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32mf2_i32m1_m(vbool64_t vm, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredand(vm, vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand(mask, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m1_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredand(vm, vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand(mask, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m2_i32m1_m(vbool16_t vm, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredand(vm, vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand(mask, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m4_i32m1_m(vbool8_t vm, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredand(vm, vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand(mask, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m8_i32m1_m(vbool4_t vm, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredand(vm, vs2, vs1, vl); } -vint64m1_t test_vredand_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredand(mask, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m1_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredand(vm, vs2, vs1, vl); } -vint64m1_t test_vredand_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredand(mask, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m2_i64m1_m(vbool32_t vm, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredand(vm, vs2, vs1, vl); } -vint64m1_t test_vredand_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredand(mask, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m4_i64m1_m(vbool16_t vm, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredand(vm, vs2, vs1, vl); } -vint64m1_t test_vredand_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredand(mask, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m8_i64m1_m(vbool8_t vm, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredand(vm, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand(mask, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf8_u8m1_m(vbool64_t vm, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand(vm, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand(mask, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf4_u8m1_m(vbool32_t vm, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand(vm, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand(mask, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf2_u8m1_m(vbool16_t vm, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand(vm, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand(mask, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m1_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand(vm, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand(mask, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m2_u8m1_m(vbool4_t vm, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand(vm, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand(mask, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m4_u8m1_m(vbool2_t vm, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand(vm, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand(mask, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m8_u8m1_m(vbool1_t vm, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand(vm, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand(mask, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16mf4_u16m1_m(vbool64_t vm, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand(vm, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand(mask, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16mf2_u16m1_m(vbool32_t vm, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand(vm, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand(mask, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m1_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand(vm, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand(mask, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m2_u16m1_m(vbool8_t vm, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand(vm, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand(mask, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m4_u16m1_m(vbool4_t vm, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand(vm, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand(mask, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m8_u16m1_m(vbool2_t vm, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand(vm, vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand(mask, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32mf2_u32m1_m(vbool64_t vm, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredand(vm, vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand(mask, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m1_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredand(vm, vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand(mask, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m2_u32m1_m(vbool16_t vm, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredand(vm, vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand(mask, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m4_u32m1_m(vbool8_t vm, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredand(vm, vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand(mask, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m8_u32m1_m(vbool4_t vm, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredand(vm, vs2, vs1, vl); } -vuint64m1_t test_vredand_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredand(mask, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m1_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredand(vm, vs2, vs1, vl); } -vuint64m1_t test_vredand_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredand(mask, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m2_u64m1_m(vbool32_t vm, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredand(vm, vs2, vs1, vl); } -vuint64m1_t test_vredand_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredand(mask, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m4_u64m1_m(vbool16_t vm, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredand(vm, vs2, vs1, vl); } -vuint64m1_t test_vredand_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredand(mask, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m8_u64m1_m(vbool8_t vm, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredand(vm, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vredand\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vredmax.c b/auto-generated/gnu-overloaded-tests/vredmax.c index a3ec1d49c..0a8edc88d 100644 --- a/auto-generated/gnu-overloaded-tests/vredmax.c +++ b/auto-generated/gnu-overloaded-tests/vredmax.c @@ -6,180 +6,180 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8m1_t test_vredmax_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax(vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf8_i8m1(vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax(vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax(vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf4_i8m1(vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax(vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax(vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf2_i8m1(vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax(vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax(vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m1_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax(vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax(vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m2_i8m1(vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax(vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax(vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m4_i8m1(vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax(vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax(vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m8_i8m1(vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax(vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax(vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16mf4_i16m1(vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax(vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax(vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16mf2_i16m1(vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax(vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax(vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m1_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax(vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax(vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m2_i16m1(vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax(vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax(vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m4_i16m1(vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax(vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax(vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m8_i16m1(vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax(vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax(vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32mf2_i32m1(vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmax(vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax(vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m1_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmax(vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax(vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m2_i32m1(vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmax(vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax(vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m4_i32m1(vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmax(vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax(vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m8_i32m1(vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmax(vs2, vs1, vl); } -vint64m1_t test_vredmax_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmax(vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m1_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmax(vs2, vs1, vl); } -vint64m1_t test_vredmax_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmax(vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m2_i64m1(vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmax(vs2, vs1, vl); } -vint64m1_t test_vredmax_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmax(vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m4_i64m1(vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmax(vs2, vs1, vl); } -vint64m1_t test_vredmax_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmax(vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m8_i64m1(vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmax(vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax(mask, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf8_i8m1_m(vbool64_t vm, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax(vm, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax(mask, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf4_i8m1_m(vbool32_t vm, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax(vm, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax(mask, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf2_i8m1_m(vbool16_t vm, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax(vm, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax(mask, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m1_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax(vm, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax(mask, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m2_i8m1_m(vbool4_t vm, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax(vm, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax(mask, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m4_i8m1_m(vbool2_t vm, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax(vm, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax(mask, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m8_i8m1_m(vbool1_t vm, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax(vm, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax(mask, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16mf4_i16m1_m(vbool64_t vm, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax(vm, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax(mask, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16mf2_i16m1_m(vbool32_t vm, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax(vm, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax(mask, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m1_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax(vm, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax(mask, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m2_i16m1_m(vbool8_t vm, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax(vm, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax(mask, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m4_i16m1_m(vbool4_t vm, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax(vm, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax(mask, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m8_i16m1_m(vbool2_t vm, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax(vm, vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax(mask, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32mf2_i32m1_m(vbool64_t vm, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmax(vm, vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax(mask, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m1_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmax(vm, vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax(mask, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m2_i32m1_m(vbool16_t vm, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmax(vm, vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax(mask, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m4_i32m1_m(vbool8_t vm, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmax(vm, vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax(mask, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m8_i32m1_m(vbool4_t vm, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmax(vm, vs2, vs1, vl); } -vint64m1_t test_vredmax_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmax(mask, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m1_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmax(vm, vs2, vs1, vl); } -vint64m1_t test_vredmax_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmax(mask, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m2_i64m1_m(vbool32_t vm, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmax(vm, vs2, vs1, vl); } -vint64m1_t test_vredmax_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmax(mask, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m4_i64m1_m(vbool16_t vm, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmax(vm, vs2, vs1, vl); } -vint64m1_t test_vredmax_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmax(mask, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m8_i64m1_m(vbool8_t vm, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmax(vm, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vredmax\.[ivxfswum.]+\s+} 44 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vredmaxu.c b/auto-generated/gnu-overloaded-tests/vredmaxu.c index 5d9e71fab..308f17a7b 100644 --- a/auto-generated/gnu-overloaded-tests/vredmaxu.c +++ b/auto-generated/gnu-overloaded-tests/vredmaxu.c @@ -6,180 +6,180 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu(vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1(vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu(vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu(vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1(vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu(vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu(vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1(vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu(vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu(vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m1_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu(vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu(vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m2_u8m1(vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu(vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu(vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m4_u8m1(vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu(vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu(vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m8_u8m1(vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu(vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu(vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1(vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu(vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu(vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1(vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu(vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu(vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m1_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu(vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu(vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m2_u16m1(vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu(vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu(vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m4_u16m1(vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu(vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu(vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m8_u16m1(vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu(vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu(vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1(vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredmaxu(vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu(vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m1_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredmaxu(vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu(vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m2_u32m1(vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredmaxu(vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu(vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m4_u32m1(vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredmaxu(vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu(vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m8_u32m1(vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredmaxu(vs2, vs1, vl); } -vuint64m1_t test_vredmaxu_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredmaxu(vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m1_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredmaxu(vs2, vs1, vl); } -vuint64m1_t test_vredmaxu_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredmaxu(vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m2_u64m1(vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredmaxu(vs2, vs1, vl); } -vuint64m1_t test_vredmaxu_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredmaxu(vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m4_u64m1(vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredmaxu(vs2, vs1, vl); } -vuint64m1_t test_vredmaxu_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredmaxu(vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m8_u64m1(vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredmaxu(vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu(mask, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_m(vbool64_t vm, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu(vm, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu(mask, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_m(vbool32_t vm, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu(vm, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu(mask, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_m(vbool16_t vm, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu(vm, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu(mask, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu(vm, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu(mask, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_m(vbool4_t vm, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu(vm, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu(mask, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_m(vbool2_t vm, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu(vm, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu(mask, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_m(vbool1_t vm, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu(vm, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu(mask, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_m(vbool64_t vm, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu(vm, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu(mask, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_m(vbool32_t vm, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu(vm, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu(mask, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu(vm, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu(mask, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_m(vbool8_t vm, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu(vm, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu(mask, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_m(vbool4_t vm, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu(vm, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu(mask, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_m(vbool2_t vm, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu(vm, vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu(mask, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_m(vbool64_t vm, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredmaxu(vm, vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu(mask, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredmaxu(vm, vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu(mask, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_m(vbool16_t vm, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredmaxu(vm, vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu(mask, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_m(vbool8_t vm, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredmaxu(vm, vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu(mask, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_m(vbool4_t vm, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredmaxu(vm, vs2, vs1, vl); } -vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredmaxu(mask, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredmaxu(vm, vs2, vs1, vl); } -vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredmaxu(mask, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_m(vbool32_t vm, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredmaxu(vm, vs2, vs1, vl); } -vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredmaxu(mask, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_m(vbool16_t vm, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredmaxu(vm, vs2, vs1, vl); } -vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredmaxu(mask, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_m(vbool8_t vm, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredmaxu(vm, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vredmaxu\.[ivxfswum.]+\s+} 44 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vredmin.c b/auto-generated/gnu-overloaded-tests/vredmin.c index d0212e333..805312709 100644 --- a/auto-generated/gnu-overloaded-tests/vredmin.c +++ b/auto-generated/gnu-overloaded-tests/vredmin.c @@ -6,180 +6,180 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8m1_t test_vredmin_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin(vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf8_i8m1(vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin(vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin(vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf4_i8m1(vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin(vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin(vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf2_i8m1(vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin(vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin(vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m1_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin(vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin(vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m2_i8m1(vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin(vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin(vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m4_i8m1(vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin(vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin(vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m8_i8m1(vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin(vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin(vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16mf4_i16m1(vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin(vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin(vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16mf2_i16m1(vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin(vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin(vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m1_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin(vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin(vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m2_i16m1(vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin(vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin(vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m4_i16m1(vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin(vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin(vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m8_i16m1(vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin(vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin(vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32mf2_i32m1(vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmin(vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin(vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m1_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmin(vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin(vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m2_i32m1(vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmin(vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin(vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m4_i32m1(vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmin(vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin(vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m8_i32m1(vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmin(vs2, vs1, vl); } -vint64m1_t test_vredmin_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmin(vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m1_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmin(vs2, vs1, vl); } -vint64m1_t test_vredmin_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmin(vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m2_i64m1(vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmin(vs2, vs1, vl); } -vint64m1_t test_vredmin_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmin(vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m4_i64m1(vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmin(vs2, vs1, vl); } -vint64m1_t test_vredmin_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmin(vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m8_i64m1(vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmin(vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin(mask, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf8_i8m1_m(vbool64_t vm, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin(vm, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin(mask, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf4_i8m1_m(vbool32_t vm, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin(vm, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin(mask, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf2_i8m1_m(vbool16_t vm, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin(vm, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin(mask, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m1_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin(vm, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin(mask, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m2_i8m1_m(vbool4_t vm, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin(vm, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin(mask, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m4_i8m1_m(vbool2_t vm, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin(vm, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin(mask, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m8_i8m1_m(vbool1_t vm, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin(vm, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin(mask, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16mf4_i16m1_m(vbool64_t vm, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin(vm, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin(mask, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16mf2_i16m1_m(vbool32_t vm, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin(vm, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin(mask, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m1_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin(vm, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin(mask, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m2_i16m1_m(vbool8_t vm, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin(vm, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin(mask, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m4_i16m1_m(vbool4_t vm, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin(vm, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin(mask, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m8_i16m1_m(vbool2_t vm, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin(vm, vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin(mask, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32mf2_i32m1_m(vbool64_t vm, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmin(vm, vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin(mask, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m1_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmin(vm, vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin(mask, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m2_i32m1_m(vbool16_t vm, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmin(vm, vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin(mask, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m4_i32m1_m(vbool8_t vm, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmin(vm, vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin(mask, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m8_i32m1_m(vbool4_t vm, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmin(vm, vs2, vs1, vl); } -vint64m1_t test_vredmin_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmin(mask, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m1_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmin(vm, vs2, vs1, vl); } -vint64m1_t test_vredmin_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmin(mask, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m2_i64m1_m(vbool32_t vm, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmin(vm, vs2, vs1, vl); } -vint64m1_t test_vredmin_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmin(mask, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m4_i64m1_m(vbool16_t vm, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmin(vm, vs2, vs1, vl); } -vint64m1_t test_vredmin_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmin(mask, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m8_i64m1_m(vbool8_t vm, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmin(vm, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vredmin\.[ivxfswum.]+\s+} 44 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vredminu.c b/auto-generated/gnu-overloaded-tests/vredminu.c index 70de6ac03..8fe5e1b8c 100644 --- a/auto-generated/gnu-overloaded-tests/vredminu.c +++ b/auto-generated/gnu-overloaded-tests/vredminu.c @@ -6,180 +6,180 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8m1_t test_vredminu_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu(vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf8_u8m1(vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu(vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu(vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf4_u8m1(vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu(vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu(vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf2_u8m1(vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu(vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu(vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m1_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu(vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu(vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m2_u8m1(vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu(vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu(vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m4_u8m1(vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu(vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu(vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m8_u8m1(vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu(vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu(vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16mf4_u16m1(vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu(vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu(vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16mf2_u16m1(vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu(vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu(vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m1_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu(vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu(vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m2_u16m1(vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu(vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu(vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m4_u16m1(vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu(vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu(vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m8_u16m1(vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu(vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu(vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32mf2_u32m1(vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredminu(vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu(vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m1_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredminu(vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu(vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m2_u32m1(vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredminu(vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu(vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m4_u32m1(vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredminu(vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu(vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m8_u32m1(vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredminu(vs2, vs1, vl); } -vuint64m1_t test_vredminu_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredminu(vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m1_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredminu(vs2, vs1, vl); } -vuint64m1_t test_vredminu_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredminu(vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m2_u64m1(vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredminu(vs2, vs1, vl); } -vuint64m1_t test_vredminu_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredminu(vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m4_u64m1(vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredminu(vs2, vs1, vl); } -vuint64m1_t test_vredminu_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredminu(vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m8_u64m1(vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredminu(vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu(mask, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf8_u8m1_m(vbool64_t vm, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu(vm, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu(mask, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf4_u8m1_m(vbool32_t vm, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu(vm, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu(mask, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf2_u8m1_m(vbool16_t vm, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu(vm, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu(mask, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m1_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu(vm, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu(mask, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m2_u8m1_m(vbool4_t vm, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu(vm, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu(mask, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m4_u8m1_m(vbool2_t vm, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu(vm, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu(mask, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m8_u8m1_m(vbool1_t vm, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu(vm, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu(mask, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16mf4_u16m1_m(vbool64_t vm, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu(vm, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu(mask, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16mf2_u16m1_m(vbool32_t vm, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu(vm, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu(mask, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m1_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu(vm, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu(mask, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m2_u16m1_m(vbool8_t vm, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu(vm, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu(mask, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m4_u16m1_m(vbool4_t vm, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu(vm, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu(mask, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m8_u16m1_m(vbool2_t vm, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu(vm, vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu(mask, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32mf2_u32m1_m(vbool64_t vm, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredminu(vm, vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu(mask, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m1_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredminu(vm, vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu(mask, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m2_u32m1_m(vbool16_t vm, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredminu(vm, vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu(mask, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m4_u32m1_m(vbool8_t vm, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredminu(vm, vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu(mask, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m8_u32m1_m(vbool4_t vm, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredminu(vm, vs2, vs1, vl); } -vuint64m1_t test_vredminu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredminu(mask, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m1_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredminu(vm, vs2, vs1, vl); } -vuint64m1_t test_vredminu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredminu(mask, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m2_u64m1_m(vbool32_t vm, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredminu(vm, vs2, vs1, vl); } -vuint64m1_t test_vredminu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredminu(mask, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m4_u64m1_m(vbool16_t vm, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredminu(vm, vs2, vs1, vl); } -vuint64m1_t test_vredminu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredminu(mask, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m8_u64m1_m(vbool8_t vm, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredminu(vm, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vredminu\.[ivxfswum.]+\s+} 44 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vredor.c b/auto-generated/gnu-overloaded-tests/vredor.c index 42a0f5586..d9b6f3af9 100644 --- a/auto-generated/gnu-overloaded-tests/vredor.c +++ b/auto-generated/gnu-overloaded-tests/vredor.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8m1_t test_vredor_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor(vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf8_i8m1(vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor(vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor(vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf4_i8m1(vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor(vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor(vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf2_i8m1(vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor(vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor(vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m1_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor(vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor(vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m2_i8m1(vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor(vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor(vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m4_i8m1(vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor(vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor(vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m8_i8m1(vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor(vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor(vector, scalar, vl); +vint16m1_t test_vredor_vs_i16mf4_i16m1(vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor(vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor(vector, scalar, vl); +vint16m1_t test_vredor_vs_i16mf2_i16m1(vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor(vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor(vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m1_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor(vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor(vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m2_i16m1(vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor(vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor(vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m4_i16m1(vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor(vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor(vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m8_i16m1(vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor(vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor(vector, scalar, vl); +vint32m1_t test_vredor_vs_i32mf2_i32m1(vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredor(vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor(vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m1_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredor(vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor(vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m2_i32m1(vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredor(vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor(vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m4_i32m1(vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredor(vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor(vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m8_i32m1(vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredor(vs2, vs1, vl); } -vint64m1_t test_vredor_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredor(vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m1_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredor(vs2, vs1, vl); } -vint64m1_t test_vredor_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredor(vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m2_i64m1(vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredor(vs2, vs1, vl); } -vint64m1_t test_vredor_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredor(vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m4_i64m1(vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredor(vs2, vs1, vl); } -vint64m1_t test_vredor_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredor(vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m8_i64m1(vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredor(vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor(vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf8_u8m1(vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor(vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor(vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf4_u8m1(vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor(vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor(vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf2_u8m1(vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor(vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor(vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m1_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor(vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor(vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m2_u8m1(vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor(vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor(vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m4_u8m1(vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor(vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor(vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m8_u8m1(vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor(vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor(vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16mf4_u16m1(vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor(vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor(vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16mf2_u16m1(vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor(vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor(vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m1_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor(vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor(vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m2_u16m1(vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor(vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor(vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m4_u16m1(vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor(vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor(vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m8_u16m1(vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor(vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor(vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32mf2_u32m1(vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredor(vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor(vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m1_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredor(vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor(vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m2_u32m1(vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredor(vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor(vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m4_u32m1(vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredor(vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor(vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m8_u32m1(vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredor(vs2, vs1, vl); } -vuint64m1_t test_vredor_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredor(vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m1_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredor(vs2, vs1, vl); } -vuint64m1_t test_vredor_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredor(vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m2_u64m1(vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredor(vs2, vs1, vl); } -vuint64m1_t test_vredor_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredor(vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m4_u64m1(vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredor(vs2, vs1, vl); } -vuint64m1_t test_vredor_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredor(vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m8_u64m1(vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredor(vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor(mask, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf8_i8m1_m(vbool64_t vm, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor(vm, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor(mask, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf4_i8m1_m(vbool32_t vm, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor(vm, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor(mask, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf2_i8m1_m(vbool16_t vm, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor(vm, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor(mask, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m1_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor(vm, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor(mask, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m2_i8m1_m(vbool4_t vm, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor(vm, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor(mask, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m4_i8m1_m(vbool2_t vm, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor(vm, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor(mask, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m8_i8m1_m(vbool1_t vm, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor(vm, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor(mask, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16mf4_i16m1_m(vbool64_t vm, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor(vm, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor(mask, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16mf2_i16m1_m(vbool32_t vm, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor(vm, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor(mask, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m1_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor(vm, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor(mask, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m2_i16m1_m(vbool8_t vm, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor(vm, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor(mask, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m4_i16m1_m(vbool4_t vm, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor(vm, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor(mask, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m8_i16m1_m(vbool2_t vm, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor(vm, vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor(mask, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32mf2_i32m1_m(vbool64_t vm, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredor(vm, vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor(mask, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m1_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredor(vm, vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor(mask, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m2_i32m1_m(vbool16_t vm, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredor(vm, vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor(mask, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m4_i32m1_m(vbool8_t vm, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredor(vm, vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor(mask, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m8_i32m1_m(vbool4_t vm, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredor(vm, vs2, vs1, vl); } -vint64m1_t test_vredor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredor(mask, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m1_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredor(vm, vs2, vs1, vl); } -vint64m1_t test_vredor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredor(mask, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m2_i64m1_m(vbool32_t vm, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredor(vm, vs2, vs1, vl); } -vint64m1_t test_vredor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredor(mask, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m4_i64m1_m(vbool16_t vm, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredor(vm, vs2, vs1, vl); } -vint64m1_t test_vredor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredor(mask, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m8_i64m1_m(vbool8_t vm, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredor(vm, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor(mask, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf8_u8m1_m(vbool64_t vm, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor(vm, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor(mask, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf4_u8m1_m(vbool32_t vm, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor(vm, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor(mask, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf2_u8m1_m(vbool16_t vm, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor(vm, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor(mask, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m1_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor(vm, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor(mask, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m2_u8m1_m(vbool4_t vm, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor(vm, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor(mask, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m4_u8m1_m(vbool2_t vm, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor(vm, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor(mask, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m8_u8m1_m(vbool1_t vm, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor(vm, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor(mask, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16mf4_u16m1_m(vbool64_t vm, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor(vm, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor(mask, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16mf2_u16m1_m(vbool32_t vm, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor(vm, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor(mask, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m1_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor(vm, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor(mask, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m2_u16m1_m(vbool8_t vm, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor(vm, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor(mask, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m4_u16m1_m(vbool4_t vm, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor(vm, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor(mask, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m8_u16m1_m(vbool2_t vm, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor(vm, vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor(mask, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32mf2_u32m1_m(vbool64_t vm, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredor(vm, vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor(mask, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m1_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredor(vm, vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor(mask, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m2_u32m1_m(vbool16_t vm, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredor(vm, vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor(mask, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m4_u32m1_m(vbool8_t vm, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredor(vm, vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor(mask, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m8_u32m1_m(vbool4_t vm, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredor(vm, vs2, vs1, vl); } -vuint64m1_t test_vredor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredor(mask, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m1_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredor(vm, vs2, vs1, vl); } -vuint64m1_t test_vredor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredor(mask, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m2_u64m1_m(vbool32_t vm, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredor(vm, vs2, vs1, vl); } -vuint64m1_t test_vredor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredor(mask, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m4_u64m1_m(vbool16_t vm, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredor(vm, vs2, vs1, vl); } -vuint64m1_t test_vredor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredor(mask, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m8_u64m1_m(vbool8_t vm, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredor(vm, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vredor\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vredsum.c b/auto-generated/gnu-overloaded-tests/vredsum.c index 4ac408fae..c34672f6a 100644 --- a/auto-generated/gnu-overloaded-tests/vredsum.c +++ b/auto-generated/gnu-overloaded-tests/vredsum.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8m1_t test_vredsum_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum(vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf8_i8m1(vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum(vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum(vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf4_i8m1(vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum(vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum(vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf2_i8m1(vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum(vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum(vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m1_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum(vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum(vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m2_i8m1(vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum(vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum(vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m4_i8m1(vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum(vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum(vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m8_i8m1(vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum(vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum(vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16mf4_i16m1(vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum(vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum(vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16mf2_i16m1(vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum(vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum(vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m1_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum(vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum(vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m2_i16m1(vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum(vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum(vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m4_i16m1(vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum(vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum(vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m8_i16m1(vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum(vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum(vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32mf2_i32m1(vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredsum(vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum(vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m1_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredsum(vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum(vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m2_i32m1(vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredsum(vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum(vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m4_i32m1(vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredsum(vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum(vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m8_i32m1(vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredsum(vs2, vs1, vl); } -vint64m1_t test_vredsum_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredsum(vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m1_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredsum(vs2, vs1, vl); } -vint64m1_t test_vredsum_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredsum(vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m2_i64m1(vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredsum(vs2, vs1, vl); } -vint64m1_t test_vredsum_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredsum(vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m4_i64m1(vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredsum(vs2, vs1, vl); } -vint64m1_t test_vredsum_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredsum(vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m8_i64m1(vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredsum(vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum(vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf8_u8m1(vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum(vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum(vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf4_u8m1(vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum(vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum(vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf2_u8m1(vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum(vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum(vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m1_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum(vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum(vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m2_u8m1(vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum(vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum(vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m4_u8m1(vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum(vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum(vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m8_u8m1(vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum(vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum(vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16mf4_u16m1(vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum(vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum(vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16mf2_u16m1(vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum(vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum(vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m1_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum(vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum(vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m2_u16m1(vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum(vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum(vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m4_u16m1(vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum(vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum(vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m8_u16m1(vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum(vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum(vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32mf2_u32m1(vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredsum(vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum(vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m1_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredsum(vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum(vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m2_u32m1(vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredsum(vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum(vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m4_u32m1(vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredsum(vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum(vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m8_u32m1(vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredsum(vs2, vs1, vl); } -vuint64m1_t test_vredsum_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredsum(vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m1_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredsum(vs2, vs1, vl); } -vuint64m1_t test_vredsum_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredsum(vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m2_u64m1(vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredsum(vs2, vs1, vl); } -vuint64m1_t test_vredsum_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredsum(vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m4_u64m1(vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredsum(vs2, vs1, vl); } -vuint64m1_t test_vredsum_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredsum(vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m8_u64m1(vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredsum(vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum(mask, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf8_i8m1_m(vbool64_t vm, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum(vm, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum(mask, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf4_i8m1_m(vbool32_t vm, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum(vm, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum(mask, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf2_i8m1_m(vbool16_t vm, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum(vm, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum(mask, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m1_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum(vm, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum(mask, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m2_i8m1_m(vbool4_t vm, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum(vm, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum(mask, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m4_i8m1_m(vbool2_t vm, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum(vm, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum(mask, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m8_i8m1_m(vbool1_t vm, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum(vm, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum(mask, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16mf4_i16m1_m(vbool64_t vm, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum(vm, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum(mask, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16mf2_i16m1_m(vbool32_t vm, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum(vm, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum(mask, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m1_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum(vm, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum(mask, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m2_i16m1_m(vbool8_t vm, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum(vm, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum(mask, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m4_i16m1_m(vbool4_t vm, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum(vm, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum(mask, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m8_i16m1_m(vbool2_t vm, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum(vm, vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum(mask, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32mf2_i32m1_m(vbool64_t vm, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredsum(vm, vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum(mask, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m1_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredsum(vm, vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum(mask, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m2_i32m1_m(vbool16_t vm, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredsum(vm, vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum(mask, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m4_i32m1_m(vbool8_t vm, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredsum(vm, vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum(mask, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m8_i32m1_m(vbool4_t vm, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredsum(vm, vs2, vs1, vl); } -vint64m1_t test_vredsum_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredsum(mask, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m1_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredsum(vm, vs2, vs1, vl); } -vint64m1_t test_vredsum_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredsum(mask, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m2_i64m1_m(vbool32_t vm, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredsum(vm, vs2, vs1, vl); } -vint64m1_t test_vredsum_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredsum(mask, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m4_i64m1_m(vbool16_t vm, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredsum(vm, vs2, vs1, vl); } -vint64m1_t test_vredsum_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredsum(mask, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m8_i64m1_m(vbool8_t vm, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredsum(vm, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum(mask, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf8_u8m1_m(vbool64_t vm, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum(vm, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum(mask, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf4_u8m1_m(vbool32_t vm, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum(vm, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum(mask, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf2_u8m1_m(vbool16_t vm, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum(vm, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum(mask, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m1_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum(vm, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum(mask, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m2_u8m1_m(vbool4_t vm, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum(vm, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum(mask, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m4_u8m1_m(vbool2_t vm, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum(vm, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum(mask, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m8_u8m1_m(vbool1_t vm, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum(vm, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum(mask, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16mf4_u16m1_m(vbool64_t vm, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum(vm, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum(mask, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16mf2_u16m1_m(vbool32_t vm, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum(vm, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum(mask, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m1_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum(vm, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum(mask, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m2_u16m1_m(vbool8_t vm, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum(vm, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum(mask, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m4_u16m1_m(vbool4_t vm, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum(vm, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum(mask, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m8_u16m1_m(vbool2_t vm, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum(vm, vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum(mask, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32mf2_u32m1_m(vbool64_t vm, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredsum(vm, vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum(mask, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m1_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredsum(vm, vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum(mask, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m2_u32m1_m(vbool16_t vm, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredsum(vm, vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum(mask, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m4_u32m1_m(vbool8_t vm, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredsum(vm, vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum(mask, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m8_u32m1_m(vbool4_t vm, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredsum(vm, vs2, vs1, vl); } -vuint64m1_t test_vredsum_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredsum(mask, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m1_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredsum(vm, vs2, vs1, vl); } -vuint64m1_t test_vredsum_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredsum(mask, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m2_u64m1_m(vbool32_t vm, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredsum(vm, vs2, vs1, vl); } -vuint64m1_t test_vredsum_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredsum(mask, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m4_u64m1_m(vbool16_t vm, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredsum(vm, vs2, vs1, vl); } -vuint64m1_t test_vredsum_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredsum(mask, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m8_u64m1_m(vbool8_t vm, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredsum(vm, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vredsum\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vredxor.c b/auto-generated/gnu-overloaded-tests/vredxor.c index 3139825b6..4cca4c3a7 100644 --- a/auto-generated/gnu-overloaded-tests/vredxor.c +++ b/auto-generated/gnu-overloaded-tests/vredxor.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8m1_t test_vredxor_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor(vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf8_i8m1(vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor(vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor(vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf4_i8m1(vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor(vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor(vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf2_i8m1(vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor(vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor(vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m1_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor(vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor(vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m2_i8m1(vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor(vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor(vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m4_i8m1(vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor(vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor(vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m8_i8m1(vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor(vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor(vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16mf4_i16m1(vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor(vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor(vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16mf2_i16m1(vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor(vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor(vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m1_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor(vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor(vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m2_i16m1(vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor(vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor(vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m4_i16m1(vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor(vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor(vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m8_i16m1(vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor(vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor(vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32mf2_i32m1(vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredxor(vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor(vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m1_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredxor(vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor(vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m2_i32m1(vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredxor(vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor(vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m4_i32m1(vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredxor(vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor(vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m8_i32m1(vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredxor(vs2, vs1, vl); } -vint64m1_t test_vredxor_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredxor(vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m1_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredxor(vs2, vs1, vl); } -vint64m1_t test_vredxor_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredxor(vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m2_i64m1(vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredxor(vs2, vs1, vl); } -vint64m1_t test_vredxor_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredxor(vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m4_i64m1(vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredxor(vs2, vs1, vl); } -vint64m1_t test_vredxor_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredxor(vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m8_i64m1(vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredxor(vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor(vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf8_u8m1(vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor(vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor(vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf4_u8m1(vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor(vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor(vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf2_u8m1(vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor(vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor(vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m1_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor(vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor(vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m2_u8m1(vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor(vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor(vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m4_u8m1(vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor(vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor(vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m8_u8m1(vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor(vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor(vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16mf4_u16m1(vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor(vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor(vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16mf2_u16m1(vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor(vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor(vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m1_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor(vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor(vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m2_u16m1(vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor(vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor(vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m4_u16m1(vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor(vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor(vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m8_u16m1(vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor(vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor(vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32mf2_u32m1(vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredxor(vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor(vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m1_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredxor(vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor(vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m2_u32m1(vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredxor(vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor(vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m4_u32m1(vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredxor(vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor(vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m8_u32m1(vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredxor(vs2, vs1, vl); } -vuint64m1_t test_vredxor_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredxor(vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m1_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredxor(vs2, vs1, vl); } -vuint64m1_t test_vredxor_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredxor(vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m2_u64m1(vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredxor(vs2, vs1, vl); } -vuint64m1_t test_vredxor_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredxor(vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m4_u64m1(vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredxor(vs2, vs1, vl); } -vuint64m1_t test_vredxor_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredxor(vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m8_u64m1(vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredxor(vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor(mask, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf8_i8m1_m(vbool64_t vm, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor(vm, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor(mask, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf4_i8m1_m(vbool32_t vm, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor(vm, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor(mask, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf2_i8m1_m(vbool16_t vm, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor(vm, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor(mask, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m1_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor(vm, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor(mask, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m2_i8m1_m(vbool4_t vm, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor(vm, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor(mask, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m4_i8m1_m(vbool2_t vm, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor(vm, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor(mask, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m8_i8m1_m(vbool1_t vm, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor(vm, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor(mask, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16mf4_i16m1_m(vbool64_t vm, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor(vm, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor(mask, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16mf2_i16m1_m(vbool32_t vm, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor(vm, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor(mask, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m1_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor(vm, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor(mask, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m2_i16m1_m(vbool8_t vm, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor(vm, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor(mask, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m4_i16m1_m(vbool4_t vm, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor(vm, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor(mask, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m8_i16m1_m(vbool2_t vm, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor(vm, vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor(mask, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32mf2_i32m1_m(vbool64_t vm, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredxor(vm, vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor(mask, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m1_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredxor(vm, vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor(mask, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m2_i32m1_m(vbool16_t vm, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredxor(vm, vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor(mask, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m4_i32m1_m(vbool8_t vm, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredxor(vm, vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor(mask, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m8_i32m1_m(vbool4_t vm, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredxor(vm, vs2, vs1, vl); } -vint64m1_t test_vredxor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredxor(mask, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m1_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredxor(vm, vs2, vs1, vl); } -vint64m1_t test_vredxor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredxor(mask, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m2_i64m1_m(vbool32_t vm, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredxor(vm, vs2, vs1, vl); } -vint64m1_t test_vredxor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredxor(mask, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m4_i64m1_m(vbool16_t vm, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredxor(vm, vs2, vs1, vl); } -vint64m1_t test_vredxor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredxor(mask, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m8_i64m1_m(vbool8_t vm, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredxor(vm, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor(mask, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf8_u8m1_m(vbool64_t vm, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor(vm, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor(mask, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf4_u8m1_m(vbool32_t vm, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor(vm, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor(mask, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf2_u8m1_m(vbool16_t vm, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor(vm, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor(mask, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m1_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor(vm, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor(mask, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m2_u8m1_m(vbool4_t vm, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor(vm, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor(mask, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m4_u8m1_m(vbool2_t vm, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor(vm, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor(mask, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m8_u8m1_m(vbool1_t vm, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor(vm, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor(mask, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16mf4_u16m1_m(vbool64_t vm, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor(vm, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor(mask, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16mf2_u16m1_m(vbool32_t vm, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor(vm, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor(mask, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m1_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor(vm, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor(mask, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m2_u16m1_m(vbool8_t vm, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor(vm, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor(mask, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m4_u16m1_m(vbool4_t vm, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor(vm, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor(mask, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m8_u16m1_m(vbool2_t vm, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor(vm, vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor(mask, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32mf2_u32m1_m(vbool64_t vm, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredxor(vm, vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor(mask, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m1_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredxor(vm, vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor(mask, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m2_u32m1_m(vbool16_t vm, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredxor(vm, vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor(mask, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m4_u32m1_m(vbool8_t vm, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredxor(vm, vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor(mask, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m8_u32m1_m(vbool4_t vm, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredxor(vm, vs2, vs1, vl); } -vuint64m1_t test_vredxor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredxor(mask, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m1_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredxor(vm, vs2, vs1, vl); } -vuint64m1_t test_vredxor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredxor(mask, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m2_u64m1_m(vbool32_t vm, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredxor(vm, vs2, vs1, vl); } -vuint64m1_t test_vredxor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredxor(mask, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m4_u64m1_m(vbool16_t vm, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredxor(vm, vs2, vs1, vl); } -vuint64m1_t test_vredxor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredxor(mask, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m8_u64m1_m(vbool8_t vm, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredxor(vm, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vredxor\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vrem.c b/auto-generated/gnu-overloaded-tests/vrem.c index d1ae45e6d..f2b2f65fc 100644 --- a/auto-generated/gnu-overloaded-tests/vrem.c +++ b/auto-generated/gnu-overloaded-tests/vrem.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vrem_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vrem(op1, op2, vl); +vint8mf8_t test_vrem_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vrem(vs2, vs1, vl); } -vint8mf8_t test_vrem_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vrem(op1, op2, vl); +vint8mf8_t test_vrem_vx_i8mf8(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem(vs2, rs1, vl); } -vint8mf4_t test_vrem_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vrem(op1, op2, vl); +vint8mf4_t test_vrem_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vrem(vs2, vs1, vl); } -vint8mf4_t test_vrem_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vrem(op1, op2, vl); +vint8mf4_t test_vrem_vx_i8mf4(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem(vs2, rs1, vl); } -vint8mf2_t test_vrem_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vrem(op1, op2, vl); +vint8mf2_t test_vrem_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vrem(vs2, vs1, vl); } -vint8mf2_t test_vrem_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vrem(op1, op2, vl); +vint8mf2_t test_vrem_vx_i8mf2(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem(vs2, rs1, vl); } -vint8m1_t test_vrem_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vrem(op1, op2, vl); +vint8m1_t test_vrem_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vrem(vs2, vs1, vl); } -vint8m1_t test_vrem_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vrem(op1, op2, vl); +vint8m1_t test_vrem_vx_i8m1(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem(vs2, rs1, vl); } -vint8m2_t test_vrem_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vrem(op1, op2, vl); +vint8m2_t test_vrem_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vrem(vs2, vs1, vl); } -vint8m2_t test_vrem_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vrem(op1, op2, vl); +vint8m2_t test_vrem_vx_i8m2(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem(vs2, rs1, vl); } -vint8m4_t test_vrem_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vrem(op1, op2, vl); +vint8m4_t test_vrem_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vrem(vs2, vs1, vl); } -vint8m4_t test_vrem_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vrem(op1, op2, vl); +vint8m4_t test_vrem_vx_i8m4(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem(vs2, rs1, vl); } -vint8m8_t test_vrem_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vrem(op1, op2, vl); +vint8m8_t test_vrem_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vrem(vs2, vs1, vl); } -vint8m8_t test_vrem_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vrem(op1, op2, vl); +vint8m8_t test_vrem_vx_i8m8(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem(vs2, rs1, vl); } -vint16mf4_t test_vrem_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vrem(op1, op2, vl); +vint16mf4_t test_vrem_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vrem(vs2, vs1, vl); } -vint16mf4_t test_vrem_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vrem(op1, op2, vl); +vint16mf4_t test_vrem_vx_i16mf4(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem(vs2, rs1, vl); } -vint16mf2_t test_vrem_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vrem(op1, op2, vl); +vint16mf2_t test_vrem_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vrem(vs2, vs1, vl); } -vint16mf2_t test_vrem_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vrem(op1, op2, vl); +vint16mf2_t test_vrem_vx_i16mf2(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem(vs2, rs1, vl); } -vint16m1_t test_vrem_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vrem(op1, op2, vl); +vint16m1_t test_vrem_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vrem(vs2, vs1, vl); } -vint16m1_t test_vrem_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vrem(op1, op2, vl); +vint16m1_t test_vrem_vx_i16m1(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem(vs2, rs1, vl); } -vint16m2_t test_vrem_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vrem(op1, op2, vl); +vint16m2_t test_vrem_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vrem(vs2, vs1, vl); } -vint16m2_t test_vrem_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vrem(op1, op2, vl); +vint16m2_t test_vrem_vx_i16m2(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem(vs2, rs1, vl); } -vint16m4_t test_vrem_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vrem(op1, op2, vl); +vint16m4_t test_vrem_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vrem(vs2, vs1, vl); } -vint16m4_t test_vrem_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vrem(op1, op2, vl); +vint16m4_t test_vrem_vx_i16m4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem(vs2, rs1, vl); } -vint16m8_t test_vrem_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vrem(op1, op2, vl); +vint16m8_t test_vrem_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vrem(vs2, vs1, vl); } -vint16m8_t test_vrem_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vrem(op1, op2, vl); +vint16m8_t test_vrem_vx_i16m8(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem(vs2, rs1, vl); } -vint32mf2_t test_vrem_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vrem(op1, op2, vl); +vint32mf2_t test_vrem_vv_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vrem(vs2, vs1, vl); } -vint32mf2_t test_vrem_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vrem(op1, op2, vl); +vint32mf2_t test_vrem_vx_i32mf2(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem(vs2, rs1, vl); } -vint32m1_t test_vrem_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vrem(op1, op2, vl); +vint32m1_t test_vrem_vv_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vrem(vs2, vs1, vl); } -vint32m1_t test_vrem_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vrem(op1, op2, vl); +vint32m1_t test_vrem_vx_i32m1(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem(vs2, rs1, vl); } -vint32m2_t test_vrem_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vrem(op1, op2, vl); +vint32m2_t test_vrem_vv_i32m2(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vrem(vs2, vs1, vl); } -vint32m2_t test_vrem_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vrem(op1, op2, vl); +vint32m2_t test_vrem_vx_i32m2(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem(vs2, rs1, vl); } -vint32m4_t test_vrem_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vrem(op1, op2, vl); +vint32m4_t test_vrem_vv_i32m4(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vrem(vs2, vs1, vl); } -vint32m4_t test_vrem_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vrem(op1, op2, vl); +vint32m4_t test_vrem_vx_i32m4(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem(vs2, rs1, vl); } -vint32m8_t test_vrem_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vrem(op1, op2, vl); +vint32m8_t test_vrem_vv_i32m8(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vrem(vs2, vs1, vl); } -vint32m8_t test_vrem_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vrem(op1, op2, vl); +vint32m8_t test_vrem_vx_i32m8(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem(vs2, rs1, vl); } -vint64m1_t test_vrem_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vrem(op1, op2, vl); +vint64m1_t test_vrem_vv_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vrem(vs2, vs1, vl); } -vint64m1_t test_vrem_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vrem(op1, op2, vl); +vint64m1_t test_vrem_vx_i64m1(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem(vs2, rs1, vl); } -vint64m2_t test_vrem_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vrem(op1, op2, vl); +vint64m2_t test_vrem_vv_i64m2(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vrem(vs2, vs1, vl); } -vint64m2_t test_vrem_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vrem(op1, op2, vl); +vint64m2_t test_vrem_vx_i64m2(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem(vs2, rs1, vl); } -vint64m4_t test_vrem_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vrem(op1, op2, vl); +vint64m4_t test_vrem_vv_i64m4(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vrem(vs2, vs1, vl); } -vint64m4_t test_vrem_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vrem(op1, op2, vl); +vint64m4_t test_vrem_vx_i64m4(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem(vs2, rs1, vl); } -vint64m8_t test_vrem_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vrem(op1, op2, vl); +vint64m8_t test_vrem_vv_i64m8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vrem(vs2, vs1, vl); } -vint64m8_t test_vrem_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vrem(op1, op2, vl); +vint64m8_t test_vrem_vx_i64m8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem(vs2, rs1, vl); } -vint8mf8_t test_vrem_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vrem(mask, op1, op2, vl); +vint8mf8_t test_vrem_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vrem(vm, vs2, vs1, vl); } -vint8mf8_t test_vrem_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vrem(mask, op1, op2, vl); +vint8mf8_t test_vrem_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem(vm, vs2, rs1, vl); } -vint8mf4_t test_vrem_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vrem(mask, op1, op2, vl); +vint8mf4_t test_vrem_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vrem(vm, vs2, vs1, vl); } -vint8mf4_t test_vrem_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vrem(mask, op1, op2, vl); +vint8mf4_t test_vrem_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem(vm, vs2, rs1, vl); } -vint8mf2_t test_vrem_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vrem(mask, op1, op2, vl); +vint8mf2_t test_vrem_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vrem(vm, vs2, vs1, vl); } -vint8mf2_t test_vrem_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vrem(mask, op1, op2, vl); +vint8mf2_t test_vrem_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem(vm, vs2, rs1, vl); } -vint8m1_t test_vrem_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vrem(mask, op1, op2, vl); +vint8m1_t test_vrem_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vrem(vm, vs2, vs1, vl); } -vint8m1_t test_vrem_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vrem(mask, op1, op2, vl); +vint8m1_t test_vrem_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem(vm, vs2, rs1, vl); } -vint8m2_t test_vrem_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vrem(mask, op1, op2, vl); +vint8m2_t test_vrem_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vrem(vm, vs2, vs1, vl); } -vint8m2_t test_vrem_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vrem(mask, op1, op2, vl); +vint8m2_t test_vrem_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem(vm, vs2, rs1, vl); } -vint8m4_t test_vrem_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vrem(mask, op1, op2, vl); +vint8m4_t test_vrem_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vrem(vm, vs2, vs1, vl); } -vint8m4_t test_vrem_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vrem(mask, op1, op2, vl); +vint8m4_t test_vrem_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem(vm, vs2, rs1, vl); } -vint8m8_t test_vrem_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vrem(mask, op1, op2, vl); +vint8m8_t test_vrem_vv_i8m8_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vrem(vm, vs2, vs1, vl); } -vint8m8_t test_vrem_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vrem(mask, op1, op2, vl); +vint8m8_t test_vrem_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem(vm, vs2, rs1, vl); } -vint16mf4_t test_vrem_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vrem(mask, op1, op2, vl); +vint16mf4_t test_vrem_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vrem(vm, vs2, vs1, vl); } -vint16mf4_t test_vrem_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vrem(mask, op1, op2, vl); +vint16mf4_t test_vrem_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem(vm, vs2, rs1, vl); } -vint16mf2_t test_vrem_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vrem(mask, op1, op2, vl); +vint16mf2_t test_vrem_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vrem(vm, vs2, vs1, vl); } -vint16mf2_t test_vrem_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vrem(mask, op1, op2, vl); +vint16mf2_t test_vrem_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem(vm, vs2, rs1, vl); } -vint16m1_t test_vrem_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vrem(mask, op1, op2, vl); +vint16m1_t test_vrem_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vrem(vm, vs2, vs1, vl); } -vint16m1_t test_vrem_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vrem(mask, op1, op2, vl); +vint16m1_t test_vrem_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem(vm, vs2, rs1, vl); } -vint16m2_t test_vrem_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vrem(mask, op1, op2, vl); +vint16m2_t test_vrem_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vrem(vm, vs2, vs1, vl); } -vint16m2_t test_vrem_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vrem(mask, op1, op2, vl); +vint16m2_t test_vrem_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem(vm, vs2, rs1, vl); } -vint16m4_t test_vrem_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vrem(mask, op1, op2, vl); +vint16m4_t test_vrem_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vrem(vm, vs2, vs1, vl); } -vint16m4_t test_vrem_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vrem(mask, op1, op2, vl); +vint16m4_t test_vrem_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem(vm, vs2, rs1, vl); } -vint16m8_t test_vrem_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vrem(mask, op1, op2, vl); +vint16m8_t test_vrem_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vrem(vm, vs2, vs1, vl); } -vint16m8_t test_vrem_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vrem(mask, op1, op2, vl); +vint16m8_t test_vrem_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem(vm, vs2, rs1, vl); } -vint32mf2_t test_vrem_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vrem(mask, op1, op2, vl); +vint32mf2_t test_vrem_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vrem(vm, vs2, vs1, vl); } -vint32mf2_t test_vrem_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vrem(mask, op1, op2, vl); +vint32mf2_t test_vrem_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem(vm, vs2, rs1, vl); } -vint32m1_t test_vrem_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vrem(mask, op1, op2, vl); +vint32m1_t test_vrem_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vrem(vm, vs2, vs1, vl); } -vint32m1_t test_vrem_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vrem(mask, op1, op2, vl); +vint32m1_t test_vrem_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem(vm, vs2, rs1, vl); } -vint32m2_t test_vrem_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vrem(mask, op1, op2, vl); +vint32m2_t test_vrem_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vrem(vm, vs2, vs1, vl); } -vint32m2_t test_vrem_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vrem(mask, op1, op2, vl); +vint32m2_t test_vrem_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem(vm, vs2, rs1, vl); } -vint32m4_t test_vrem_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vrem(mask, op1, op2, vl); +vint32m4_t test_vrem_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vrem(vm, vs2, vs1, vl); } -vint32m4_t test_vrem_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vrem(mask, op1, op2, vl); +vint32m4_t test_vrem_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem(vm, vs2, rs1, vl); } -vint32m8_t test_vrem_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vrem(mask, op1, op2, vl); +vint32m8_t test_vrem_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vrem(vm, vs2, vs1, vl); } -vint32m8_t test_vrem_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vrem(mask, op1, op2, vl); +vint32m8_t test_vrem_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem(vm, vs2, rs1, vl); } -vint64m1_t test_vrem_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vrem(mask, op1, op2, vl); +vint64m1_t test_vrem_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vrem(vm, vs2, vs1, vl); } -vint64m1_t test_vrem_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vrem(mask, op1, op2, vl); +vint64m1_t test_vrem_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem(vm, vs2, rs1, vl); } -vint64m2_t test_vrem_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vrem(mask, op1, op2, vl); +vint64m2_t test_vrem_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vrem(vm, vs2, vs1, vl); } -vint64m2_t test_vrem_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vrem(mask, op1, op2, vl); +vint64m2_t test_vrem_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem(vm, vs2, rs1, vl); } -vint64m4_t test_vrem_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vrem(mask, op1, op2, vl); +vint64m4_t test_vrem_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vrem(vm, vs2, vs1, vl); } -vint64m4_t test_vrem_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vrem(mask, op1, op2, vl); +vint64m4_t test_vrem_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem(vm, vs2, rs1, vl); } -vint64m8_t test_vrem_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vrem(mask, op1, op2, vl); +vint64m8_t test_vrem_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vrem(vm, vs2, vs1, vl); } -vint64m8_t test_vrem_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vrem(mask, op1, op2, vl); +vint64m8_t test_vrem_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vrem\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vremu.c b/auto-generated/gnu-overloaded-tests/vremu.c index 48fe5ef26..4dee37859 100644 --- a/auto-generated/gnu-overloaded-tests/vremu.c +++ b/auto-generated/gnu-overloaded-tests/vremu.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vremu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vremu(op1, op2, vl); +vuint8mf8_t test_vremu_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vremu(vs2, vs1, vl); } -vuint8mf8_t test_vremu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu(op1, op2, vl); +vuint8mf8_t test_vremu_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu(vs2, rs1, vl); } -vuint8mf4_t test_vremu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vremu(op1, op2, vl); +vuint8mf4_t test_vremu_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vremu(vs2, vs1, vl); } -vuint8mf4_t test_vremu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu(op1, op2, vl); +vuint8mf4_t test_vremu_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu(vs2, rs1, vl); } -vuint8mf2_t test_vremu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vremu(op1, op2, vl); +vuint8mf2_t test_vremu_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vremu(vs2, vs1, vl); } -vuint8mf2_t test_vremu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu(op1, op2, vl); +vuint8mf2_t test_vremu_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu(vs2, rs1, vl); } -vuint8m1_t test_vremu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vremu(op1, op2, vl); +vuint8m1_t test_vremu_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vremu(vs2, vs1, vl); } -vuint8m1_t test_vremu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu(op1, op2, vl); +vuint8m1_t test_vremu_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu(vs2, rs1, vl); } -vuint8m2_t test_vremu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vremu(op1, op2, vl); +vuint8m2_t test_vremu_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vremu(vs2, vs1, vl); } -vuint8m2_t test_vremu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu(op1, op2, vl); +vuint8m2_t test_vremu_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu(vs2, rs1, vl); } -vuint8m4_t test_vremu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vremu(op1, op2, vl); +vuint8m4_t test_vremu_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vremu(vs2, vs1, vl); } -vuint8m4_t test_vremu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu(op1, op2, vl); +vuint8m4_t test_vremu_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu(vs2, rs1, vl); } -vuint8m8_t test_vremu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vremu(op1, op2, vl); +vuint8m8_t test_vremu_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vremu(vs2, vs1, vl); } -vuint8m8_t test_vremu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu(op1, op2, vl); +vuint8m8_t test_vremu_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu(vs2, rs1, vl); } -vuint16mf4_t test_vremu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vremu(op1, op2, vl); +vuint16mf4_t test_vremu_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vremu(vs2, vs1, vl); } -vuint16mf4_t test_vremu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu(op1, op2, vl); +vuint16mf4_t test_vremu_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu(vs2, rs1, vl); } -vuint16mf2_t test_vremu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vremu(op1, op2, vl); +vuint16mf2_t test_vremu_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vremu(vs2, vs1, vl); } -vuint16mf2_t test_vremu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu(op1, op2, vl); +vuint16mf2_t test_vremu_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu(vs2, rs1, vl); } -vuint16m1_t test_vremu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vremu(op1, op2, vl); +vuint16m1_t test_vremu_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vremu(vs2, vs1, vl); } -vuint16m1_t test_vremu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu(op1, op2, vl); +vuint16m1_t test_vremu_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu(vs2, rs1, vl); } -vuint16m2_t test_vremu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vremu(op1, op2, vl); +vuint16m2_t test_vremu_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vremu(vs2, vs1, vl); } -vuint16m2_t test_vremu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu(op1, op2, vl); +vuint16m2_t test_vremu_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu(vs2, rs1, vl); } -vuint16m4_t test_vremu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vremu(op1, op2, vl); +vuint16m4_t test_vremu_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vremu(vs2, vs1, vl); } -vuint16m4_t test_vremu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu(op1, op2, vl); +vuint16m4_t test_vremu_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu(vs2, rs1, vl); } -vuint16m8_t test_vremu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vremu(op1, op2, vl); +vuint16m8_t test_vremu_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vremu(vs2, vs1, vl); } -vuint16m8_t test_vremu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu(op1, op2, vl); +vuint16m8_t test_vremu_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu(vs2, rs1, vl); } -vuint32mf2_t test_vremu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vremu(op1, op2, vl); +vuint32mf2_t test_vremu_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vremu(vs2, vs1, vl); } -vuint32mf2_t test_vremu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu(op1, op2, vl); +vuint32mf2_t test_vremu_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu(vs2, rs1, vl); } -vuint32m1_t test_vremu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vremu(op1, op2, vl); +vuint32m1_t test_vremu_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vremu(vs2, vs1, vl); } -vuint32m1_t test_vremu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu(op1, op2, vl); +vuint32m1_t test_vremu_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu(vs2, rs1, vl); } -vuint32m2_t test_vremu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vremu(op1, op2, vl); +vuint32m2_t test_vremu_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vremu(vs2, vs1, vl); } -vuint32m2_t test_vremu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu(op1, op2, vl); +vuint32m2_t test_vremu_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu(vs2, rs1, vl); } -vuint32m4_t test_vremu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vremu(op1, op2, vl); +vuint32m4_t test_vremu_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vremu(vs2, vs1, vl); } -vuint32m4_t test_vremu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu(op1, op2, vl); +vuint32m4_t test_vremu_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu(vs2, rs1, vl); } -vuint32m8_t test_vremu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vremu(op1, op2, vl); +vuint32m8_t test_vremu_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vremu(vs2, vs1, vl); } -vuint32m8_t test_vremu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu(op1, op2, vl); +vuint32m8_t test_vremu_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu(vs2, rs1, vl); } -vuint64m1_t test_vremu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vremu(op1, op2, vl); +vuint64m1_t test_vremu_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vremu(vs2, vs1, vl); } -vuint64m1_t test_vremu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu(op1, op2, vl); +vuint64m1_t test_vremu_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu(vs2, rs1, vl); } -vuint64m2_t test_vremu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vremu(op1, op2, vl); +vuint64m2_t test_vremu_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vremu(vs2, vs1, vl); } -vuint64m2_t test_vremu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu(op1, op2, vl); +vuint64m2_t test_vremu_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu(vs2, rs1, vl); } -vuint64m4_t test_vremu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vremu(op1, op2, vl); +vuint64m4_t test_vremu_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vremu(vs2, vs1, vl); } -vuint64m4_t test_vremu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu(op1, op2, vl); +vuint64m4_t test_vremu_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu(vs2, rs1, vl); } -vuint64m8_t test_vremu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vremu(op1, op2, vl); +vuint64m8_t test_vremu_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vremu(vs2, vs1, vl); } -vuint64m8_t test_vremu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu(op1, op2, vl); +vuint64m8_t test_vremu_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu(vs2, rs1, vl); } -vuint8mf8_t test_vremu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vremu(mask, op1, op2, vl); +vuint8mf8_t test_vremu_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vremu(vm, vs2, vs1, vl); } -vuint8mf8_t test_vremu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu(mask, op1, op2, vl); +vuint8mf8_t test_vremu_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu(vm, vs2, rs1, vl); } -vuint8mf4_t test_vremu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vremu(mask, op1, op2, vl); +vuint8mf4_t test_vremu_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vremu(vm, vs2, vs1, vl); } -vuint8mf4_t test_vremu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu(mask, op1, op2, vl); +vuint8mf4_t test_vremu_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu(vm, vs2, rs1, vl); } -vuint8mf2_t test_vremu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vremu(mask, op1, op2, vl); +vuint8mf2_t test_vremu_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vremu(vm, vs2, vs1, vl); } -vuint8mf2_t test_vremu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu(mask, op1, op2, vl); +vuint8mf2_t test_vremu_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu(vm, vs2, rs1, vl); } -vuint8m1_t test_vremu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vremu(mask, op1, op2, vl); +vuint8m1_t test_vremu_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vremu(vm, vs2, vs1, vl); } -vuint8m1_t test_vremu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu(mask, op1, op2, vl); +vuint8m1_t test_vremu_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu(vm, vs2, rs1, vl); } -vuint8m2_t test_vremu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vremu(mask, op1, op2, vl); +vuint8m2_t test_vremu_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vremu(vm, vs2, vs1, vl); } -vuint8m2_t test_vremu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu(mask, op1, op2, vl); +vuint8m2_t test_vremu_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu(vm, vs2, rs1, vl); } -vuint8m4_t test_vremu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vremu(mask, op1, op2, vl); +vuint8m4_t test_vremu_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vremu(vm, vs2, vs1, vl); } -vuint8m4_t test_vremu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu(mask, op1, op2, vl); +vuint8m4_t test_vremu_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu(vm, vs2, rs1, vl); } -vuint8m8_t test_vremu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vremu(mask, op1, op2, vl); +vuint8m8_t test_vremu_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vremu(vm, vs2, vs1, vl); } -vuint8m8_t test_vremu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu(mask, op1, op2, vl); +vuint8m8_t test_vremu_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu(vm, vs2, rs1, vl); } -vuint16mf4_t test_vremu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vremu(mask, op1, op2, vl); +vuint16mf4_t test_vremu_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vremu(vm, vs2, vs1, vl); } -vuint16mf4_t test_vremu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu(mask, op1, op2, vl); +vuint16mf4_t test_vremu_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu(vm, vs2, rs1, vl); } -vuint16mf2_t test_vremu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vremu(mask, op1, op2, vl); +vuint16mf2_t test_vremu_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vremu(vm, vs2, vs1, vl); } -vuint16mf2_t test_vremu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu(mask, op1, op2, vl); +vuint16mf2_t test_vremu_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu(vm, vs2, rs1, vl); } -vuint16m1_t test_vremu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vremu(mask, op1, op2, vl); +vuint16m1_t test_vremu_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vremu(vm, vs2, vs1, vl); } -vuint16m1_t test_vremu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu(mask, op1, op2, vl); +vuint16m1_t test_vremu_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu(vm, vs2, rs1, vl); } -vuint16m2_t test_vremu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vremu(mask, op1, op2, vl); +vuint16m2_t test_vremu_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vremu(vm, vs2, vs1, vl); } -vuint16m2_t test_vremu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu(mask, op1, op2, vl); +vuint16m2_t test_vremu_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu(vm, vs2, rs1, vl); } -vuint16m4_t test_vremu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vremu(mask, op1, op2, vl); +vuint16m4_t test_vremu_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vremu(vm, vs2, vs1, vl); } -vuint16m4_t test_vremu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu(mask, op1, op2, vl); +vuint16m4_t test_vremu_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu(vm, vs2, rs1, vl); } -vuint16m8_t test_vremu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vremu(mask, op1, op2, vl); +vuint16m8_t test_vremu_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vremu(vm, vs2, vs1, vl); } -vuint16m8_t test_vremu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu(mask, op1, op2, vl); +vuint16m8_t test_vremu_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu(vm, vs2, rs1, vl); } -vuint32mf2_t test_vremu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vremu(mask, op1, op2, vl); +vuint32mf2_t test_vremu_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vremu(vm, vs2, vs1, vl); } -vuint32mf2_t test_vremu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu(mask, op1, op2, vl); +vuint32mf2_t test_vremu_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu(vm, vs2, rs1, vl); } -vuint32m1_t test_vremu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vremu(mask, op1, op2, vl); +vuint32m1_t test_vremu_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vremu(vm, vs2, vs1, vl); } -vuint32m1_t test_vremu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu(mask, op1, op2, vl); +vuint32m1_t test_vremu_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu(vm, vs2, rs1, vl); } -vuint32m2_t test_vremu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vremu(mask, op1, op2, vl); +vuint32m2_t test_vremu_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vremu(vm, vs2, vs1, vl); } -vuint32m2_t test_vremu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu(mask, op1, op2, vl); +vuint32m2_t test_vremu_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu(vm, vs2, rs1, vl); } -vuint32m4_t test_vremu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vremu(mask, op1, op2, vl); +vuint32m4_t test_vremu_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vremu(vm, vs2, vs1, vl); } -vuint32m4_t test_vremu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu(mask, op1, op2, vl); +vuint32m4_t test_vremu_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu(vm, vs2, rs1, vl); } -vuint32m8_t test_vremu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vremu(mask, op1, op2, vl); +vuint32m8_t test_vremu_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vremu(vm, vs2, vs1, vl); } -vuint32m8_t test_vremu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu(mask, op1, op2, vl); +vuint32m8_t test_vremu_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu(vm, vs2, rs1, vl); } -vuint64m1_t test_vremu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vremu(mask, op1, op2, vl); +vuint64m1_t test_vremu_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vremu(vm, vs2, vs1, vl); } -vuint64m1_t test_vremu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu(mask, op1, op2, vl); +vuint64m1_t test_vremu_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu(vm, vs2, rs1, vl); } -vuint64m2_t test_vremu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vremu(mask, op1, op2, vl); +vuint64m2_t test_vremu_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vremu(vm, vs2, vs1, vl); } -vuint64m2_t test_vremu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu(mask, op1, op2, vl); +vuint64m2_t test_vremu_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu(vm, vs2, rs1, vl); } -vuint64m4_t test_vremu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vremu(mask, op1, op2, vl); +vuint64m4_t test_vremu_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vremu(vm, vs2, vs1, vl); } -vuint64m4_t test_vremu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu(mask, op1, op2, vl); +vuint64m4_t test_vremu_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu(vm, vs2, rs1, vl); } -vuint64m8_t test_vremu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vremu(mask, op1, op2, vl); +vuint64m8_t test_vremu_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vremu(vm, vs2, vs1, vl); } -vuint64m8_t test_vremu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu(mask, op1, op2, vl); +vuint64m8_t test_vremu_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vremu\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vrgather.c b/auto-generated/gnu-overloaded-tests/vrgather.c index bee1cbe1a..24b71da94 100644 --- a/auto-generated/gnu-overloaded-tests/vrgather.c +++ b/auto-generated/gnu-overloaded-tests/vrgather.c @@ -6,948 +6,948 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vrgather_vv_f16mf4(vfloat16mf4_t op1, vuint16mf4_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vfloat16mf4_t test_vrgather_vv_f16mf4(vfloat16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vfloat16mf4_t test_vrgather_vx_f16mf4(vfloat16mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vfloat16mf4_t test_vrgather_vx_f16mf4(vfloat16mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vfloat16mf2_t test_vrgather_vv_f16mf2(vfloat16mf2_t op1, vuint16mf2_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vfloat16mf2_t test_vrgather_vv_f16mf2(vfloat16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vfloat16mf2_t test_vrgather_vx_f16mf2(vfloat16mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vfloat16mf2_t test_vrgather_vx_f16mf2(vfloat16mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vfloat16m1_t test_vrgather_vv_f16m1(vfloat16m1_t op1, vuint16m1_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vfloat16m1_t test_vrgather_vv_f16m1(vfloat16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vfloat16m1_t test_vrgather_vx_f16m1(vfloat16m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vfloat16m1_t test_vrgather_vx_f16m1(vfloat16m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vfloat16m2_t test_vrgather_vv_f16m2(vfloat16m2_t op1, vuint16m2_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vfloat16m2_t test_vrgather_vv_f16m2(vfloat16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vfloat16m2_t test_vrgather_vx_f16m2(vfloat16m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vfloat16m2_t test_vrgather_vx_f16m2(vfloat16m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vfloat16m4_t test_vrgather_vv_f16m4(vfloat16m4_t op1, vuint16m4_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vfloat16m4_t test_vrgather_vv_f16m4(vfloat16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vfloat16m4_t test_vrgather_vx_f16m4(vfloat16m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vfloat16m4_t test_vrgather_vx_f16m4(vfloat16m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vfloat16m8_t test_vrgather_vv_f16m8(vfloat16m8_t op1, vuint16m8_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vfloat16m8_t test_vrgather_vv_f16m8(vfloat16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vfloat16m8_t test_vrgather_vx_f16m8(vfloat16m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vfloat16m8_t test_vrgather_vx_f16m8(vfloat16m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vfloat32mf2_t test_vrgather_vv_f32mf2(vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vfloat32mf2_t test_vrgather_vv_f32mf2(vfloat32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vfloat32mf2_t test_vrgather_vx_f32mf2(vfloat32mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vfloat32mf2_t test_vrgather_vx_f32mf2(vfloat32mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vfloat32m1_t test_vrgather_vv_f32m1(vfloat32m1_t op1, vuint32m1_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vfloat32m1_t test_vrgather_vv_f32m1(vfloat32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vfloat32m1_t test_vrgather_vx_f32m1(vfloat32m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vfloat32m1_t test_vrgather_vx_f32m1(vfloat32m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vfloat32m2_t test_vrgather_vv_f32m2(vfloat32m2_t op1, vuint32m2_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vfloat32m2_t test_vrgather_vv_f32m2(vfloat32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vfloat32m2_t test_vrgather_vx_f32m2(vfloat32m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vfloat32m2_t test_vrgather_vx_f32m2(vfloat32m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vfloat32m4_t test_vrgather_vv_f32m4(vfloat32m4_t op1, vuint32m4_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vfloat32m4_t test_vrgather_vv_f32m4(vfloat32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vfloat32m4_t test_vrgather_vx_f32m4(vfloat32m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vfloat32m4_t test_vrgather_vx_f32m4(vfloat32m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vfloat32m8_t test_vrgather_vv_f32m8(vfloat32m8_t op1, vuint32m8_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vfloat32m8_t test_vrgather_vv_f32m8(vfloat32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vfloat32m8_t test_vrgather_vx_f32m8(vfloat32m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vfloat32m8_t test_vrgather_vx_f32m8(vfloat32m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vfloat64m1_t test_vrgather_vv_f64m1(vfloat64m1_t op1, vuint64m1_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vfloat64m1_t test_vrgather_vv_f64m1(vfloat64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vfloat64m1_t test_vrgather_vx_f64m1(vfloat64m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vfloat64m1_t test_vrgather_vx_f64m1(vfloat64m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vfloat64m2_t test_vrgather_vv_f64m2(vfloat64m2_t op1, vuint64m2_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vfloat64m2_t test_vrgather_vv_f64m2(vfloat64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vfloat64m2_t test_vrgather_vx_f64m2(vfloat64m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vfloat64m2_t test_vrgather_vx_f64m2(vfloat64m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vfloat64m4_t test_vrgather_vv_f64m4(vfloat64m4_t op1, vuint64m4_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vfloat64m4_t test_vrgather_vv_f64m4(vfloat64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vfloat64m4_t test_vrgather_vx_f64m4(vfloat64m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vfloat64m4_t test_vrgather_vx_f64m4(vfloat64m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vfloat64m8_t test_vrgather_vv_f64m8(vfloat64m8_t op1, vuint64m8_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vfloat64m8_t test_vrgather_vv_f64m8(vfloat64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vfloat64m8_t test_vrgather_vx_f64m8(vfloat64m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vfloat64m8_t test_vrgather_vx_f64m8(vfloat64m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vint8mf8_t test_vrgather_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vint8mf8_t test_vrgather_vv_i8mf8(vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vint8mf8_t test_vrgather_vx_i8mf8(vint8mf8_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vint8mf8_t test_vrgather_vx_i8mf8(vint8mf8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vint8mf4_t test_vrgather_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vint8mf4_t test_vrgather_vv_i8mf4(vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vint8mf4_t test_vrgather_vx_i8mf4(vint8mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vint8mf4_t test_vrgather_vx_i8mf4(vint8mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vint8mf2_t test_vrgather_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vint8mf2_t test_vrgather_vv_i8mf2(vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vint8mf2_t test_vrgather_vx_i8mf2(vint8mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vint8mf2_t test_vrgather_vx_i8mf2(vint8mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vint8m1_t test_vrgather_vv_i8m1(vint8m1_t op1, vuint8m1_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vint8m1_t test_vrgather_vv_i8m1(vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vint8m1_t test_vrgather_vx_i8m1(vint8m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vint8m1_t test_vrgather_vx_i8m1(vint8m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vint8m2_t test_vrgather_vv_i8m2(vint8m2_t op1, vuint8m2_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vint8m2_t test_vrgather_vv_i8m2(vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vint8m2_t test_vrgather_vx_i8m2(vint8m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vint8m2_t test_vrgather_vx_i8m2(vint8m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vint8m4_t test_vrgather_vv_i8m4(vint8m4_t op1, vuint8m4_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vint8m4_t test_vrgather_vv_i8m4(vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vint8m4_t test_vrgather_vx_i8m4(vint8m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vint8m4_t test_vrgather_vx_i8m4(vint8m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vint8m8_t test_vrgather_vv_i8m8(vint8m8_t op1, vuint8m8_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vint8m8_t test_vrgather_vv_i8m8(vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vint8m8_t test_vrgather_vx_i8m8(vint8m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vint8m8_t test_vrgather_vx_i8m8(vint8m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vint16mf4_t test_vrgather_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vint16mf4_t test_vrgather_vv_i16mf4(vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vint16mf4_t test_vrgather_vx_i16mf4(vint16mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vint16mf4_t test_vrgather_vx_i16mf4(vint16mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vint16mf2_t test_vrgather_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vint16mf2_t test_vrgather_vv_i16mf2(vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vint16mf2_t test_vrgather_vx_i16mf2(vint16mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vint16mf2_t test_vrgather_vx_i16mf2(vint16mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vint16m1_t test_vrgather_vv_i16m1(vint16m1_t op1, vuint16m1_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vint16m1_t test_vrgather_vv_i16m1(vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vint16m1_t test_vrgather_vx_i16m1(vint16m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vint16m1_t test_vrgather_vx_i16m1(vint16m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vint16m2_t test_vrgather_vv_i16m2(vint16m2_t op1, vuint16m2_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vint16m2_t test_vrgather_vv_i16m2(vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vint16m2_t test_vrgather_vx_i16m2(vint16m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vint16m2_t test_vrgather_vx_i16m2(vint16m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vint16m4_t test_vrgather_vv_i16m4(vint16m4_t op1, vuint16m4_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vint16m4_t test_vrgather_vv_i16m4(vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vint16m4_t test_vrgather_vx_i16m4(vint16m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vint16m4_t test_vrgather_vx_i16m4(vint16m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vint16m8_t test_vrgather_vv_i16m8(vint16m8_t op1, vuint16m8_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vint16m8_t test_vrgather_vv_i16m8(vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vint16m8_t test_vrgather_vx_i16m8(vint16m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vint16m8_t test_vrgather_vx_i16m8(vint16m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vint32mf2_t test_vrgather_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vint32mf2_t test_vrgather_vv_i32mf2(vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vint32mf2_t test_vrgather_vx_i32mf2(vint32mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vint32mf2_t test_vrgather_vx_i32mf2(vint32mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vint32m1_t test_vrgather_vv_i32m1(vint32m1_t op1, vuint32m1_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vint32m1_t test_vrgather_vv_i32m1(vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vint32m1_t test_vrgather_vx_i32m1(vint32m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vint32m1_t test_vrgather_vx_i32m1(vint32m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vint32m2_t test_vrgather_vv_i32m2(vint32m2_t op1, vuint32m2_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vint32m2_t test_vrgather_vv_i32m2(vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vint32m2_t test_vrgather_vx_i32m2(vint32m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vint32m2_t test_vrgather_vx_i32m2(vint32m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vint32m4_t test_vrgather_vv_i32m4(vint32m4_t op1, vuint32m4_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vint32m4_t test_vrgather_vv_i32m4(vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vint32m4_t test_vrgather_vx_i32m4(vint32m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vint32m4_t test_vrgather_vx_i32m4(vint32m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vint32m8_t test_vrgather_vv_i32m8(vint32m8_t op1, vuint32m8_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vint32m8_t test_vrgather_vv_i32m8(vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vint32m8_t test_vrgather_vx_i32m8(vint32m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vint32m8_t test_vrgather_vx_i32m8(vint32m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vint64m1_t test_vrgather_vv_i64m1(vint64m1_t op1, vuint64m1_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vint64m1_t test_vrgather_vv_i64m1(vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vint64m1_t test_vrgather_vx_i64m1(vint64m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vint64m1_t test_vrgather_vx_i64m1(vint64m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vint64m2_t test_vrgather_vv_i64m2(vint64m2_t op1, vuint64m2_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vint64m2_t test_vrgather_vv_i64m2(vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vint64m2_t test_vrgather_vx_i64m2(vint64m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vint64m2_t test_vrgather_vx_i64m2(vint64m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vint64m4_t test_vrgather_vv_i64m4(vint64m4_t op1, vuint64m4_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vint64m4_t test_vrgather_vv_i64m4(vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vint64m4_t test_vrgather_vx_i64m4(vint64m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vint64m4_t test_vrgather_vx_i64m4(vint64m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vint64m8_t test_vrgather_vv_i64m8(vint64m8_t op1, vuint64m8_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vint64m8_t test_vrgather_vv_i64m8(vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vint64m8_t test_vrgather_vx_i64m8(vint64m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vint64m8_t test_vrgather_vx_i64m8(vint64m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vuint8mf8_t test_vrgather_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vuint8mf8_t test_vrgather_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vuint8mf8_t test_vrgather_vx_u8mf8(vuint8mf8_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vuint8mf8_t test_vrgather_vx_u8mf8(vuint8mf8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vuint8mf4_t test_vrgather_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vuint8mf4_t test_vrgather_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vuint8mf4_t test_vrgather_vx_u8mf4(vuint8mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vuint8mf4_t test_vrgather_vx_u8mf4(vuint8mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vuint8mf2_t test_vrgather_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vuint8mf2_t test_vrgather_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vuint8mf2_t test_vrgather_vx_u8mf2(vuint8mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vuint8mf2_t test_vrgather_vx_u8mf2(vuint8mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vuint8m1_t test_vrgather_vv_u8m1(vuint8m1_t op1, vuint8m1_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vuint8m1_t test_vrgather_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vuint8m1_t test_vrgather_vx_u8m1(vuint8m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vuint8m1_t test_vrgather_vx_u8m1(vuint8m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vuint8m2_t test_vrgather_vv_u8m2(vuint8m2_t op1, vuint8m2_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vuint8m2_t test_vrgather_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vuint8m2_t test_vrgather_vx_u8m2(vuint8m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vuint8m2_t test_vrgather_vx_u8m2(vuint8m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vuint8m4_t test_vrgather_vv_u8m4(vuint8m4_t op1, vuint8m4_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vuint8m4_t test_vrgather_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vuint8m4_t test_vrgather_vx_u8m4(vuint8m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vuint8m4_t test_vrgather_vx_u8m4(vuint8m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vuint8m8_t test_vrgather_vv_u8m8(vuint8m8_t op1, vuint8m8_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vuint8m8_t test_vrgather_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vuint8m8_t test_vrgather_vx_u8m8(vuint8m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vuint8m8_t test_vrgather_vx_u8m8(vuint8m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vuint16mf4_t test_vrgather_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vuint16mf4_t test_vrgather_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vuint16mf4_t test_vrgather_vx_u16mf4(vuint16mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vuint16mf4_t test_vrgather_vx_u16mf4(vuint16mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vuint16mf2_t test_vrgather_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vuint16mf2_t test_vrgather_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vuint16mf2_t test_vrgather_vx_u16mf2(vuint16mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vuint16mf2_t test_vrgather_vx_u16mf2(vuint16mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vuint16m1_t test_vrgather_vv_u16m1(vuint16m1_t op1, vuint16m1_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vuint16m1_t test_vrgather_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vuint16m1_t test_vrgather_vx_u16m1(vuint16m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vuint16m1_t test_vrgather_vx_u16m1(vuint16m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vuint16m2_t test_vrgather_vv_u16m2(vuint16m2_t op1, vuint16m2_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vuint16m2_t test_vrgather_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vuint16m2_t test_vrgather_vx_u16m2(vuint16m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vuint16m2_t test_vrgather_vx_u16m2(vuint16m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vuint16m4_t test_vrgather_vv_u16m4(vuint16m4_t op1, vuint16m4_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vuint16m4_t test_vrgather_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vuint16m4_t test_vrgather_vx_u16m4(vuint16m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vuint16m4_t test_vrgather_vx_u16m4(vuint16m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vuint16m8_t test_vrgather_vv_u16m8(vuint16m8_t op1, vuint16m8_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vuint16m8_t test_vrgather_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vuint16m8_t test_vrgather_vx_u16m8(vuint16m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vuint16m8_t test_vrgather_vx_u16m8(vuint16m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vuint32mf2_t test_vrgather_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vuint32mf2_t test_vrgather_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vuint32mf2_t test_vrgather_vx_u32mf2(vuint32mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vuint32mf2_t test_vrgather_vx_u32mf2(vuint32mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vuint32m1_t test_vrgather_vv_u32m1(vuint32m1_t op1, vuint32m1_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vuint32m1_t test_vrgather_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vuint32m1_t test_vrgather_vx_u32m1(vuint32m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vuint32m1_t test_vrgather_vx_u32m1(vuint32m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vuint32m2_t test_vrgather_vv_u32m2(vuint32m2_t op1, vuint32m2_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vuint32m2_t test_vrgather_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vuint32m2_t test_vrgather_vx_u32m2(vuint32m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vuint32m2_t test_vrgather_vx_u32m2(vuint32m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vuint32m4_t test_vrgather_vv_u32m4(vuint32m4_t op1, vuint32m4_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vuint32m4_t test_vrgather_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vuint32m4_t test_vrgather_vx_u32m4(vuint32m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vuint32m4_t test_vrgather_vx_u32m4(vuint32m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vuint32m8_t test_vrgather_vv_u32m8(vuint32m8_t op1, vuint32m8_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vuint32m8_t test_vrgather_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vuint32m8_t test_vrgather_vx_u32m8(vuint32m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vuint32m8_t test_vrgather_vx_u32m8(vuint32m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vuint64m1_t test_vrgather_vv_u64m1(vuint64m1_t op1, vuint64m1_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vuint64m1_t test_vrgather_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vuint64m1_t test_vrgather_vx_u64m1(vuint64m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vuint64m1_t test_vrgather_vx_u64m1(vuint64m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vuint64m2_t test_vrgather_vv_u64m2(vuint64m2_t op1, vuint64m2_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vuint64m2_t test_vrgather_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vuint64m2_t test_vrgather_vx_u64m2(vuint64m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vuint64m2_t test_vrgather_vx_u64m2(vuint64m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vuint64m4_t test_vrgather_vv_u64m4(vuint64m4_t op1, vuint64m4_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vuint64m4_t test_vrgather_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vuint64m4_t test_vrgather_vx_u64m4(vuint64m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vuint64m4_t test_vrgather_vx_u64m4(vuint64m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vuint64m8_t test_vrgather_vv_u64m8(vuint64m8_t op1, vuint64m8_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vuint64m8_t test_vrgather_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vuint64m8_t test_vrgather_vx_u64m8(vuint64m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather(op1, index, vl); +vuint64m8_t test_vrgather_vx_u64m8(vuint64m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vs2, vs1, vl); } -vfloat16mf4_t test_vrgather_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vuint16mf4_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vfloat16mf4_t test_vrgather_vv_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vfloat16mf4_t test_vrgather_vx_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vfloat16mf4_t test_vrgather_vx_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vfloat16mf2_t test_vrgather_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vuint16mf2_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vfloat16mf2_t test_vrgather_vv_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vfloat16mf2_t test_vrgather_vx_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vfloat16mf2_t test_vrgather_vx_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vfloat16m1_t test_vrgather_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vuint16m1_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vfloat16m1_t test_vrgather_vv_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vfloat16m1_t test_vrgather_vx_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vfloat16m1_t test_vrgather_vx_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vfloat16m2_t test_vrgather_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vuint16m2_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vfloat16m2_t test_vrgather_vv_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vfloat16m2_t test_vrgather_vx_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vfloat16m2_t test_vrgather_vx_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vfloat16m4_t test_vrgather_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vuint16m4_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vfloat16m4_t test_vrgather_vv_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vfloat16m4_t test_vrgather_vx_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vfloat16m4_t test_vrgather_vx_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vfloat16m8_t test_vrgather_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vuint16m8_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vfloat16m8_t test_vrgather_vv_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vfloat16m8_t test_vrgather_vx_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vfloat16m8_t test_vrgather_vx_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vfloat32mf2_t test_vrgather_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vfloat32mf2_t test_vrgather_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vfloat32mf2_t test_vrgather_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vfloat32mf2_t test_vrgather_vx_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vfloat32m1_t test_vrgather_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vuint32m1_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vfloat32m1_t test_vrgather_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vfloat32m1_t test_vrgather_vx_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vfloat32m1_t test_vrgather_vx_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vfloat32m2_t test_vrgather_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vuint32m2_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vfloat32m2_t test_vrgather_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vfloat32m2_t test_vrgather_vx_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vfloat32m2_t test_vrgather_vx_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vfloat32m4_t test_vrgather_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vuint32m4_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vfloat32m4_t test_vrgather_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vfloat32m4_t test_vrgather_vx_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vfloat32m4_t test_vrgather_vx_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vfloat32m8_t test_vrgather_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vuint32m8_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vfloat32m8_t test_vrgather_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vfloat32m8_t test_vrgather_vx_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vfloat32m8_t test_vrgather_vx_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vfloat64m1_t test_vrgather_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vuint64m1_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vfloat64m1_t test_vrgather_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vfloat64m1_t test_vrgather_vx_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vfloat64m1_t test_vrgather_vx_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vfloat64m2_t test_vrgather_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vuint64m2_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vfloat64m2_t test_vrgather_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vfloat64m2_t test_vrgather_vx_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vfloat64m2_t test_vrgather_vx_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vfloat64m4_t test_vrgather_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vuint64m4_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vfloat64m4_t test_vrgather_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vfloat64m4_t test_vrgather_vx_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vfloat64m4_t test_vrgather_vx_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vfloat64m8_t test_vrgather_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vuint64m8_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vfloat64m8_t test_vrgather_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vfloat64m8_t test_vrgather_vx_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vfloat64m8_t test_vrgather_vx_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vint8mf8_t test_vrgather_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vint8mf8_t test_vrgather_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vint8mf8_t test_vrgather_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vint8mf8_t test_vrgather_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vint8mf4_t test_vrgather_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vint8mf4_t test_vrgather_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vint8mf4_t test_vrgather_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vint8mf4_t test_vrgather_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vint8mf2_t test_vrgather_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vint8mf2_t test_vrgather_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vint8mf2_t test_vrgather_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vint8mf2_t test_vrgather_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vint8m1_t test_vrgather_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vint8m1_t test_vrgather_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vint8m1_t test_vrgather_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vint8m1_t test_vrgather_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vint8m2_t test_vrgather_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vint8m2_t test_vrgather_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vint8m2_t test_vrgather_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vint8m2_t test_vrgather_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vint8m4_t test_vrgather_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vint8m4_t test_vrgather_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vint8m4_t test_vrgather_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vint8m4_t test_vrgather_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vint8m8_t test_vrgather_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vint8m8_t test_vrgather_vv_i8m8_m(vbool1_t vm, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vint8m8_t test_vrgather_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vint8m8_t test_vrgather_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vint16mf4_t test_vrgather_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vint16mf4_t test_vrgather_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vint16mf4_t test_vrgather_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vint16mf4_t test_vrgather_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vint16mf2_t test_vrgather_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vint16mf2_t test_vrgather_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vint16mf2_t test_vrgather_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vint16mf2_t test_vrgather_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vint16m1_t test_vrgather_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vint16m1_t test_vrgather_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vint16m1_t test_vrgather_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vint16m1_t test_vrgather_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vint16m2_t test_vrgather_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vint16m2_t test_vrgather_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vint16m2_t test_vrgather_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vint16m2_t test_vrgather_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vint16m4_t test_vrgather_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vint16m4_t test_vrgather_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vint16m4_t test_vrgather_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vint16m4_t test_vrgather_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vint16m8_t test_vrgather_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vint16m8_t test_vrgather_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vint16m8_t test_vrgather_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vint16m8_t test_vrgather_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vint32mf2_t test_vrgather_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vint32mf2_t test_vrgather_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vint32mf2_t test_vrgather_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vint32mf2_t test_vrgather_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vint32m1_t test_vrgather_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vint32m1_t test_vrgather_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vint32m1_t test_vrgather_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vint32m1_t test_vrgather_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vint32m2_t test_vrgather_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vint32m2_t test_vrgather_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vint32m2_t test_vrgather_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vint32m2_t test_vrgather_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vint32m4_t test_vrgather_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vint32m4_t test_vrgather_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vint32m4_t test_vrgather_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vint32m4_t test_vrgather_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vint32m8_t test_vrgather_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vint32m8_t test_vrgather_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vint32m8_t test_vrgather_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vint32m8_t test_vrgather_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vint64m1_t test_vrgather_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vint64m1_t test_vrgather_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vint64m1_t test_vrgather_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vint64m1_t test_vrgather_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vint64m2_t test_vrgather_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vint64m2_t test_vrgather_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vint64m2_t test_vrgather_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vint64m2_t test_vrgather_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vint64m4_t test_vrgather_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vint64m4_t test_vrgather_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vint64m4_t test_vrgather_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vint64m4_t test_vrgather_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vint64m8_t test_vrgather_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vint64m8_t test_vrgather_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vint64m8_t test_vrgather_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vint64m8_t test_vrgather_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vuint8mf8_t test_vrgather_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vuint8mf8_t test_vrgather_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vuint8mf8_t test_vrgather_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vuint8mf8_t test_vrgather_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vuint8mf4_t test_vrgather_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vuint8mf4_t test_vrgather_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vuint8mf4_t test_vrgather_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vuint8mf4_t test_vrgather_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vuint8mf2_t test_vrgather_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vuint8mf2_t test_vrgather_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vuint8mf2_t test_vrgather_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vuint8mf2_t test_vrgather_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vuint8m1_t test_vrgather_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vuint8m1_t test_vrgather_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vuint8m1_t test_vrgather_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vuint8m1_t test_vrgather_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vuint8m2_t test_vrgather_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vuint8m2_t test_vrgather_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vuint8m2_t test_vrgather_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vuint8m2_t test_vrgather_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vuint8m4_t test_vrgather_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vuint8m4_t test_vrgather_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vuint8m4_t test_vrgather_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vuint8m4_t test_vrgather_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vuint8m8_t test_vrgather_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vuint8m8_t test_vrgather_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vuint8m8_t test_vrgather_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vuint8m8_t test_vrgather_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vuint16mf4_t test_vrgather_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vuint16mf4_t test_vrgather_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vuint16mf4_t test_vrgather_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vuint16mf4_t test_vrgather_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vuint16mf2_t test_vrgather_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vuint16mf2_t test_vrgather_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vuint16mf2_t test_vrgather_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vuint16mf2_t test_vrgather_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vuint16m1_t test_vrgather_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vuint16m1_t test_vrgather_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vuint16m1_t test_vrgather_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vuint16m1_t test_vrgather_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vuint16m2_t test_vrgather_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vuint16m2_t test_vrgather_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vuint16m2_t test_vrgather_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vuint16m2_t test_vrgather_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vuint16m4_t test_vrgather_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vuint16m4_t test_vrgather_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vuint16m4_t test_vrgather_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vuint16m4_t test_vrgather_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vuint16m8_t test_vrgather_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vuint16m8_t test_vrgather_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vuint16m8_t test_vrgather_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vuint16m8_t test_vrgather_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vuint32mf2_t test_vrgather_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vuint32mf2_t test_vrgather_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vuint32mf2_t test_vrgather_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vuint32mf2_t test_vrgather_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vuint32m1_t test_vrgather_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vuint32m1_t test_vrgather_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vuint32m1_t test_vrgather_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vuint32m1_t test_vrgather_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vuint32m2_t test_vrgather_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vuint32m2_t test_vrgather_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vuint32m2_t test_vrgather_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vuint32m2_t test_vrgather_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vuint32m4_t test_vrgather_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vuint32m4_t test_vrgather_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vuint32m4_t test_vrgather_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vuint32m4_t test_vrgather_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vuint32m8_t test_vrgather_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vuint32m8_t test_vrgather_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vuint32m8_t test_vrgather_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vuint32m8_t test_vrgather_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vuint64m1_t test_vrgather_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vuint64m1_t test_vrgather_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vuint64m1_t test_vrgather_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vuint64m1_t test_vrgather_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vuint64m2_t test_vrgather_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vuint64m2_t test_vrgather_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vuint64m2_t test_vrgather_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vuint64m2_t test_vrgather_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vuint64m4_t test_vrgather_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vuint64m4_t test_vrgather_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vuint64m4_t test_vrgather_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vuint64m4_t test_vrgather_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vuint64m8_t test_vrgather_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vuint64m8_t test_vrgather_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } -vuint64m8_t test_vrgather_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather(mask, op1, index, vl); +vuint64m8_t test_vrgather_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather(vm, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vrgather\.[ivxfswum.]+\s+} 236 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vrgatherei16.c b/auto-generated/gnu-overloaded-tests/vrgatherei16.c index 0f7d544d1..00d05a20a 100644 --- a/auto-generated/gnu-overloaded-tests/vrgatherei16.c +++ b/auto-generated/gnu-overloaded-tests/vrgatherei16.c @@ -6,460 +6,460 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vrgatherei16_vv_f16mf4(vfloat16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vfloat16mf4_t test_vrgatherei16_vv_f16mf4(vfloat16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vfloat16mf2_t test_vrgatherei16_vv_f16mf2(vfloat16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vfloat16mf2_t test_vrgatherei16_vv_f16mf2(vfloat16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vfloat16m1_t test_vrgatherei16_vv_f16m1(vfloat16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vfloat16m1_t test_vrgatherei16_vv_f16m1(vfloat16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vfloat16m2_t test_vrgatherei16_vv_f16m2(vfloat16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vfloat16m2_t test_vrgatherei16_vv_f16m2(vfloat16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vfloat16m4_t test_vrgatherei16_vv_f16m4(vfloat16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vfloat16m4_t test_vrgatherei16_vv_f16m4(vfloat16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vfloat16m8_t test_vrgatherei16_vv_f16m8(vfloat16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vfloat16m8_t test_vrgatherei16_vv_f16m8(vfloat16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vfloat32mf2_t test_vrgatherei16_vv_f32mf2(vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vfloat32mf2_t test_vrgatherei16_vv_f32mf2(vfloat32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vfloat32m1_t test_vrgatherei16_vv_f32m1(vfloat32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vfloat32m1_t test_vrgatherei16_vv_f32m1(vfloat32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vfloat32m2_t test_vrgatherei16_vv_f32m2(vfloat32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vfloat32m2_t test_vrgatherei16_vv_f32m2(vfloat32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vfloat32m4_t test_vrgatherei16_vv_f32m4(vfloat32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vfloat32m4_t test_vrgatherei16_vv_f32m4(vfloat32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vfloat32m8_t test_vrgatherei16_vv_f32m8(vfloat32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vfloat32m8_t test_vrgatherei16_vv_f32m8(vfloat32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vfloat64m1_t test_vrgatherei16_vv_f64m1(vfloat64m1_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vfloat64m1_t test_vrgatherei16_vv_f64m1(vfloat64m1_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vfloat64m2_t test_vrgatherei16_vv_f64m2(vfloat64m2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vfloat64m2_t test_vrgatherei16_vv_f64m2(vfloat64m2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vfloat64m4_t test_vrgatherei16_vv_f64m4(vfloat64m4_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vfloat64m4_t test_vrgatherei16_vv_f64m4(vfloat64m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vfloat64m8_t test_vrgatherei16_vv_f64m8(vfloat64m8_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vfloat64m8_t test_vrgatherei16_vv_f64m8(vfloat64m8_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vint8mf8_t test_vrgatherei16_vv_i8mf8(vint8mf8_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vint8mf8_t test_vrgatherei16_vv_i8mf8(vint8mf8_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vint8mf4_t test_vrgatherei16_vv_i8mf4(vint8mf4_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vint8mf4_t test_vrgatherei16_vv_i8mf4(vint8mf4_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vint8mf2_t test_vrgatherei16_vv_i8mf2(vint8mf2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vint8mf2_t test_vrgatherei16_vv_i8mf2(vint8mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vint8m1_t test_vrgatherei16_vv_i8m1(vint8m1_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vint8m1_t test_vrgatherei16_vv_i8m1(vint8m1_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vint8m2_t test_vrgatherei16_vv_i8m2(vint8m2_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vint8m2_t test_vrgatherei16_vv_i8m2(vint8m2_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vint8m4_t test_vrgatherei16_vv_i8m4(vint8m4_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vint8m4_t test_vrgatherei16_vv_i8m4(vint8m4_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vint16mf4_t test_vrgatherei16_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vint16mf4_t test_vrgatherei16_vv_i16mf4(vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vint16mf2_t test_vrgatherei16_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vint16mf2_t test_vrgatherei16_vv_i16mf2(vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vint16m1_t test_vrgatherei16_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vint16m1_t test_vrgatherei16_vv_i16m1(vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vint16m2_t test_vrgatherei16_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vint16m2_t test_vrgatherei16_vv_i16m2(vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vint16m4_t test_vrgatherei16_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vint16m4_t test_vrgatherei16_vv_i16m4(vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vint16m8_t test_vrgatherei16_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vint16m8_t test_vrgatherei16_vv_i16m8(vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vint32mf2_t test_vrgatherei16_vv_i32mf2(vint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vint32mf2_t test_vrgatherei16_vv_i32mf2(vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vint32m1_t test_vrgatherei16_vv_i32m1(vint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vint32m1_t test_vrgatherei16_vv_i32m1(vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vint32m2_t test_vrgatherei16_vv_i32m2(vint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vint32m2_t test_vrgatherei16_vv_i32m2(vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vint32m4_t test_vrgatherei16_vv_i32m4(vint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vint32m4_t test_vrgatherei16_vv_i32m4(vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vint32m8_t test_vrgatherei16_vv_i32m8(vint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vint32m8_t test_vrgatherei16_vv_i32m8(vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vint64m1_t test_vrgatherei16_vv_i64m1(vint64m1_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vint64m1_t test_vrgatherei16_vv_i64m1(vint64m1_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vint64m2_t test_vrgatherei16_vv_i64m2(vint64m2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vint64m2_t test_vrgatherei16_vv_i64m2(vint64m2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vint64m4_t test_vrgatherei16_vv_i64m4(vint64m4_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vint64m4_t test_vrgatherei16_vv_i64m4(vint64m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vint64m8_t test_vrgatherei16_vv_i64m8(vint64m8_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vint64m8_t test_vrgatherei16_vv_i64m8(vint64m8_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vuint8mf8_t test_vrgatherei16_vv_u8mf8(vuint8mf8_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vuint8mf8_t test_vrgatherei16_vv_u8mf8(vuint8mf8_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vuint8mf4_t test_vrgatherei16_vv_u8mf4(vuint8mf4_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vuint8mf4_t test_vrgatherei16_vv_u8mf4(vuint8mf4_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vuint8mf2_t test_vrgatherei16_vv_u8mf2(vuint8mf2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vuint8mf2_t test_vrgatherei16_vv_u8mf2(vuint8mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vuint8m1_t test_vrgatherei16_vv_u8m1(vuint8m1_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vuint8m1_t test_vrgatherei16_vv_u8m1(vuint8m1_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vuint8m2_t test_vrgatherei16_vv_u8m2(vuint8m2_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vuint8m2_t test_vrgatherei16_vv_u8m2(vuint8m2_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vuint8m4_t test_vrgatherei16_vv_u8m4(vuint8m4_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vuint8m4_t test_vrgatherei16_vv_u8m4(vuint8m4_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vuint16mf4_t test_vrgatherei16_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vuint16mf4_t test_vrgatherei16_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vuint16mf2_t test_vrgatherei16_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vuint16mf2_t test_vrgatherei16_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vuint16m1_t test_vrgatherei16_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vuint16m1_t test_vrgatherei16_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vuint16m2_t test_vrgatherei16_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vuint16m2_t test_vrgatherei16_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vuint16m4_t test_vrgatherei16_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vuint16m4_t test_vrgatherei16_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vuint16m8_t test_vrgatherei16_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vuint16m8_t test_vrgatherei16_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vuint32mf2_t test_vrgatherei16_vv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vuint32mf2_t test_vrgatherei16_vv_u32mf2(vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vuint32m1_t test_vrgatherei16_vv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vuint32m1_t test_vrgatherei16_vv_u32m1(vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vuint32m2_t test_vrgatherei16_vv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vuint32m2_t test_vrgatherei16_vv_u32m2(vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vuint32m4_t test_vrgatherei16_vv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vuint32m4_t test_vrgatherei16_vv_u32m4(vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vuint32m8_t test_vrgatherei16_vv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vuint32m8_t test_vrgatherei16_vv_u32m8(vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vuint64m1_t test_vrgatherei16_vv_u64m1(vuint64m1_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vuint64m1_t test_vrgatherei16_vv_u64m1(vuint64m1_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vuint64m2_t test_vrgatherei16_vv_u64m2(vuint64m2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vuint64m2_t test_vrgatherei16_vv_u64m2(vuint64m2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vuint64m4_t test_vrgatherei16_vv_u64m4(vuint64m4_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vuint64m4_t test_vrgatherei16_vv_u64m4(vuint64m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vuint64m8_t test_vrgatherei16_vv_u64m8(vuint64m8_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16(op1, op2, vl); +vuint64m8_t test_vrgatherei16_vv_u64m8(vuint64m8_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16(vs2, vs1, vl); } -vfloat16mf4_t test_vrgatherei16_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vfloat16mf4_t test_vrgatherei16_vv_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vfloat16mf2_t test_vrgatherei16_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vfloat16mf2_t test_vrgatherei16_vv_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vfloat16m1_t test_vrgatherei16_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vfloat16m1_t test_vrgatherei16_vv_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vfloat16m2_t test_vrgatherei16_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vfloat16m2_t test_vrgatherei16_vv_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vfloat16m4_t test_vrgatherei16_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vfloat16m4_t test_vrgatherei16_vv_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vfloat16m8_t test_vrgatherei16_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vfloat16m8_t test_vrgatherei16_vv_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vfloat32mf2_t test_vrgatherei16_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vfloat32mf2_t test_vrgatherei16_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vfloat32m1_t test_vrgatherei16_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vfloat32m1_t test_vrgatherei16_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vfloat32m2_t test_vrgatherei16_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vfloat32m2_t test_vrgatherei16_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vfloat32m4_t test_vrgatherei16_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vfloat32m4_t test_vrgatherei16_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vfloat32m8_t test_vrgatherei16_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vfloat32m8_t test_vrgatherei16_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vfloat64m1_t test_vrgatherei16_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vfloat64m1_t test_vrgatherei16_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vfloat64m2_t test_vrgatherei16_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vfloat64m2_t test_vrgatherei16_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vfloat64m4_t test_vrgatherei16_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vfloat64m4_t test_vrgatherei16_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vfloat64m8_t test_vrgatherei16_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vfloat64m8_t test_vrgatherei16_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vint8mf8_t test_vrgatherei16_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vint8mf8_t test_vrgatherei16_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vint8mf4_t test_vrgatherei16_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vint8mf4_t test_vrgatherei16_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vint8mf2_t test_vrgatherei16_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vint8mf2_t test_vrgatherei16_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vint8m1_t test_vrgatherei16_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vint8m1_t test_vrgatherei16_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vint8m2_t test_vrgatherei16_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vint8m2_t test_vrgatherei16_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vint8m4_t test_vrgatherei16_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vint8m4_t test_vrgatherei16_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vint16mf4_t test_vrgatherei16_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vint16mf4_t test_vrgatherei16_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vint16mf2_t test_vrgatherei16_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vint16mf2_t test_vrgatherei16_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vint16m1_t test_vrgatherei16_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vint16m1_t test_vrgatherei16_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vint16m2_t test_vrgatherei16_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vint16m2_t test_vrgatherei16_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vint16m4_t test_vrgatherei16_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vint16m4_t test_vrgatherei16_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vint16m8_t test_vrgatherei16_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vint16m8_t test_vrgatherei16_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vint32mf2_t test_vrgatherei16_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vint32mf2_t test_vrgatherei16_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vint32m1_t test_vrgatherei16_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vint32m1_t test_vrgatherei16_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vint32m2_t test_vrgatherei16_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vint32m2_t test_vrgatherei16_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vint32m4_t test_vrgatherei16_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vint32m4_t test_vrgatherei16_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vint32m8_t test_vrgatherei16_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vint32m8_t test_vrgatherei16_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vint64m1_t test_vrgatherei16_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vint64m1_t test_vrgatherei16_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vint64m2_t test_vrgatherei16_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vint64m2_t test_vrgatherei16_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vint64m4_t test_vrgatherei16_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vint64m4_t test_vrgatherei16_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vint64m8_t test_vrgatherei16_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vint64m8_t test_vrgatherei16_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vuint8mf8_t test_vrgatherei16_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vuint8mf8_t test_vrgatherei16_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vuint8mf4_t test_vrgatherei16_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vuint8mf4_t test_vrgatherei16_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vuint8mf2_t test_vrgatherei16_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vuint8mf2_t test_vrgatherei16_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vuint8m1_t test_vrgatherei16_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vuint8m1_t test_vrgatherei16_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vuint8m2_t test_vrgatherei16_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vuint8m2_t test_vrgatherei16_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vuint8m4_t test_vrgatherei16_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vuint8m4_t test_vrgatherei16_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vuint16mf4_t test_vrgatherei16_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vuint16mf4_t test_vrgatherei16_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vuint16mf2_t test_vrgatherei16_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vuint16mf2_t test_vrgatherei16_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vuint16m1_t test_vrgatherei16_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vuint16m1_t test_vrgatherei16_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vuint16m2_t test_vrgatherei16_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vuint16m2_t test_vrgatherei16_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vuint16m4_t test_vrgatherei16_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vuint16m4_t test_vrgatherei16_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vuint16m8_t test_vrgatherei16_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vuint16m8_t test_vrgatherei16_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vuint32mf2_t test_vrgatherei16_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vuint32mf2_t test_vrgatherei16_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vuint32m1_t test_vrgatherei16_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vuint32m1_t test_vrgatherei16_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vuint32m2_t test_vrgatherei16_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vuint32m2_t test_vrgatherei16_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vuint32m4_t test_vrgatherei16_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vuint32m4_t test_vrgatherei16_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vuint32m8_t test_vrgatherei16_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vuint32m8_t test_vrgatherei16_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vuint64m1_t test_vrgatherei16_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vuint64m1_t test_vrgatherei16_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vuint64m2_t test_vrgatherei16_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vuint64m2_t test_vrgatherei16_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vuint64m4_t test_vrgatherei16_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vuint64m4_t test_vrgatherei16_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } -vuint64m8_t test_vrgatherei16_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16(mask, op1, op2, vl); +vuint64m8_t test_vrgatherei16_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16(vm, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vrgatherei16\.[ivxfswum.]+\s+} 114 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vrsub.c b/auto-generated/gnu-overloaded-tests/vrsub.c index ac1d6335e..9c22a6253 100644 --- a/auto-generated/gnu-overloaded-tests/vrsub.c +++ b/auto-generated/gnu-overloaded-tests/vrsub.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vrsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub(op1, op2, vl); +vint8mf8_t test_vrsub_vx_i8mf8(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub(vs2, rs1, vl); } -vint8mf4_t test_vrsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub(op1, op2, vl); +vint8mf4_t test_vrsub_vx_i8mf4(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub(vs2, rs1, vl); } -vint8mf2_t test_vrsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub(op1, op2, vl); +vint8mf2_t test_vrsub_vx_i8mf2(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub(vs2, rs1, vl); } -vint8m1_t test_vrsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub(op1, op2, vl); +vint8m1_t test_vrsub_vx_i8m1(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub(vs2, rs1, vl); } -vint8m2_t test_vrsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub(op1, op2, vl); +vint8m2_t test_vrsub_vx_i8m2(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub(vs2, rs1, vl); } -vint8m4_t test_vrsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub(op1, op2, vl); +vint8m4_t test_vrsub_vx_i8m4(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub(vs2, rs1, vl); } -vint8m8_t test_vrsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub(op1, op2, vl); +vint8m8_t test_vrsub_vx_i8m8(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub(vs2, rs1, vl); } -vint16mf4_t test_vrsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub(op1, op2, vl); +vint16mf4_t test_vrsub_vx_i16mf4(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub(vs2, rs1, vl); } -vint16mf2_t test_vrsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub(op1, op2, vl); +vint16mf2_t test_vrsub_vx_i16mf2(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub(vs2, rs1, vl); } -vint16m1_t test_vrsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub(op1, op2, vl); +vint16m1_t test_vrsub_vx_i16m1(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub(vs2, rs1, vl); } -vint16m2_t test_vrsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub(op1, op2, vl); +vint16m2_t test_vrsub_vx_i16m2(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub(vs2, rs1, vl); } -vint16m4_t test_vrsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub(op1, op2, vl); +vint16m4_t test_vrsub_vx_i16m4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub(vs2, rs1, vl); } -vint16m8_t test_vrsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub(op1, op2, vl); +vint16m8_t test_vrsub_vx_i16m8(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub(vs2, rs1, vl); } -vint32mf2_t test_vrsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub(op1, op2, vl); +vint32mf2_t test_vrsub_vx_i32mf2(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub(vs2, rs1, vl); } -vint32m1_t test_vrsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub(op1, op2, vl); +vint32m1_t test_vrsub_vx_i32m1(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub(vs2, rs1, vl); } -vint32m2_t test_vrsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub(op1, op2, vl); +vint32m2_t test_vrsub_vx_i32m2(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub(vs2, rs1, vl); } -vint32m4_t test_vrsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub(op1, op2, vl); +vint32m4_t test_vrsub_vx_i32m4(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub(vs2, rs1, vl); } -vint32m8_t test_vrsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub(op1, op2, vl); +vint32m8_t test_vrsub_vx_i32m8(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub(vs2, rs1, vl); } -vint64m1_t test_vrsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub(op1, op2, vl); +vint64m1_t test_vrsub_vx_i64m1(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub(vs2, rs1, vl); } -vint64m2_t test_vrsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub(op1, op2, vl); +vint64m2_t test_vrsub_vx_i64m2(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub(vs2, rs1, vl); } -vint64m4_t test_vrsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub(op1, op2, vl); +vint64m4_t test_vrsub_vx_i64m4(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub(vs2, rs1, vl); } -vint64m8_t test_vrsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub(op1, op2, vl); +vint64m8_t test_vrsub_vx_i64m8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub(vs2, rs1, vl); } -vuint8mf8_t test_vrsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub(op1, op2, vl); +vuint8mf8_t test_vrsub_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub(vs2, rs1, vl); } -vuint8mf4_t test_vrsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub(op1, op2, vl); +vuint8mf4_t test_vrsub_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub(vs2, rs1, vl); } -vuint8mf2_t test_vrsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub(op1, op2, vl); +vuint8mf2_t test_vrsub_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub(vs2, rs1, vl); } -vuint8m1_t test_vrsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub(op1, op2, vl); +vuint8m1_t test_vrsub_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub(vs2, rs1, vl); } -vuint8m2_t test_vrsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub(op1, op2, vl); +vuint8m2_t test_vrsub_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub(vs2, rs1, vl); } -vuint8m4_t test_vrsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub(op1, op2, vl); +vuint8m4_t test_vrsub_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub(vs2, rs1, vl); } -vuint8m8_t test_vrsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub(op1, op2, vl); +vuint8m8_t test_vrsub_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub(vs2, rs1, vl); } -vuint16mf4_t test_vrsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub(op1, op2, vl); +vuint16mf4_t test_vrsub_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub(vs2, rs1, vl); } -vuint16mf2_t test_vrsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub(op1, op2, vl); +vuint16mf2_t test_vrsub_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub(vs2, rs1, vl); } -vuint16m1_t test_vrsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub(op1, op2, vl); +vuint16m1_t test_vrsub_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub(vs2, rs1, vl); } -vuint16m2_t test_vrsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub(op1, op2, vl); +vuint16m2_t test_vrsub_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub(vs2, rs1, vl); } -vuint16m4_t test_vrsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub(op1, op2, vl); +vuint16m4_t test_vrsub_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub(vs2, rs1, vl); } -vuint16m8_t test_vrsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub(op1, op2, vl); +vuint16m8_t test_vrsub_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub(vs2, rs1, vl); } -vuint32mf2_t test_vrsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub(op1, op2, vl); +vuint32mf2_t test_vrsub_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub(vs2, rs1, vl); } -vuint32m1_t test_vrsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub(op1, op2, vl); +vuint32m1_t test_vrsub_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub(vs2, rs1, vl); } -vuint32m2_t test_vrsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub(op1, op2, vl); +vuint32m2_t test_vrsub_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub(vs2, rs1, vl); } -vuint32m4_t test_vrsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub(op1, op2, vl); +vuint32m4_t test_vrsub_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub(vs2, rs1, vl); } -vuint32m8_t test_vrsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub(op1, op2, vl); +vuint32m8_t test_vrsub_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub(vs2, rs1, vl); } -vuint64m1_t test_vrsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub(op1, op2, vl); +vuint64m1_t test_vrsub_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub(vs2, rs1, vl); } -vuint64m2_t test_vrsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub(op1, op2, vl); +vuint64m2_t test_vrsub_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub(vs2, rs1, vl); } -vuint64m4_t test_vrsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub(op1, op2, vl); +vuint64m4_t test_vrsub_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub(vs2, rs1, vl); } -vuint64m8_t test_vrsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub(op1, op2, vl); +vuint64m8_t test_vrsub_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub(vs2, rs1, vl); } -vint8mf8_t test_vrsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub(mask, op1, op2, vl); +vint8mf8_t test_vrsub_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub(vm, vs2, rs1, vl); } -vint8mf4_t test_vrsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub(mask, op1, op2, vl); +vint8mf4_t test_vrsub_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub(vm, vs2, rs1, vl); } -vint8mf2_t test_vrsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub(mask, op1, op2, vl); +vint8mf2_t test_vrsub_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub(vm, vs2, rs1, vl); } -vint8m1_t test_vrsub_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub(mask, op1, op2, vl); +vint8m1_t test_vrsub_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub(vm, vs2, rs1, vl); } -vint8m2_t test_vrsub_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub(mask, op1, op2, vl); +vint8m2_t test_vrsub_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub(vm, vs2, rs1, vl); } -vint8m4_t test_vrsub_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub(mask, op1, op2, vl); +vint8m4_t test_vrsub_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub(vm, vs2, rs1, vl); } -vint8m8_t test_vrsub_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub(mask, op1, op2, vl); +vint8m8_t test_vrsub_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub(vm, vs2, rs1, vl); } -vint16mf4_t test_vrsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub(mask, op1, op2, vl); +vint16mf4_t test_vrsub_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub(vm, vs2, rs1, vl); } -vint16mf2_t test_vrsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub(mask, op1, op2, vl); +vint16mf2_t test_vrsub_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub(vm, vs2, rs1, vl); } -vint16m1_t test_vrsub_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub(mask, op1, op2, vl); +vint16m1_t test_vrsub_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub(vm, vs2, rs1, vl); } -vint16m2_t test_vrsub_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub(mask, op1, op2, vl); +vint16m2_t test_vrsub_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub(vm, vs2, rs1, vl); } -vint16m4_t test_vrsub_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub(mask, op1, op2, vl); +vint16m4_t test_vrsub_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub(vm, vs2, rs1, vl); } -vint16m8_t test_vrsub_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub(mask, op1, op2, vl); +vint16m8_t test_vrsub_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub(vm, vs2, rs1, vl); } -vint32mf2_t test_vrsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub(mask, op1, op2, vl); +vint32mf2_t test_vrsub_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub(vm, vs2, rs1, vl); } -vint32m1_t test_vrsub_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub(mask, op1, op2, vl); +vint32m1_t test_vrsub_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub(vm, vs2, rs1, vl); } -vint32m2_t test_vrsub_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub(mask, op1, op2, vl); +vint32m2_t test_vrsub_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub(vm, vs2, rs1, vl); } -vint32m4_t test_vrsub_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub(mask, op1, op2, vl); +vint32m4_t test_vrsub_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub(vm, vs2, rs1, vl); } -vint32m8_t test_vrsub_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub(mask, op1, op2, vl); +vint32m8_t test_vrsub_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub(vm, vs2, rs1, vl); } -vint64m1_t test_vrsub_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub(mask, op1, op2, vl); +vint64m1_t test_vrsub_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub(vm, vs2, rs1, vl); } -vint64m2_t test_vrsub_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub(mask, op1, op2, vl); +vint64m2_t test_vrsub_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub(vm, vs2, rs1, vl); } -vint64m4_t test_vrsub_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub(mask, op1, op2, vl); +vint64m4_t test_vrsub_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub(vm, vs2, rs1, vl); } -vint64m8_t test_vrsub_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub(mask, op1, op2, vl); +vint64m8_t test_vrsub_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub(vm, vs2, rs1, vl); } -vuint8mf8_t test_vrsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub(mask, op1, op2, vl); +vuint8mf8_t test_vrsub_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub(vm, vs2, rs1, vl); } -vuint8mf4_t test_vrsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub(mask, op1, op2, vl); +vuint8mf4_t test_vrsub_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub(vm, vs2, rs1, vl); } -vuint8mf2_t test_vrsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub(mask, op1, op2, vl); +vuint8mf2_t test_vrsub_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub(vm, vs2, rs1, vl); } -vuint8m1_t test_vrsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub(mask, op1, op2, vl); +vuint8m1_t test_vrsub_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub(vm, vs2, rs1, vl); } -vuint8m2_t test_vrsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub(mask, op1, op2, vl); +vuint8m2_t test_vrsub_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub(vm, vs2, rs1, vl); } -vuint8m4_t test_vrsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub(mask, op1, op2, vl); +vuint8m4_t test_vrsub_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub(vm, vs2, rs1, vl); } -vuint8m8_t test_vrsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub(mask, op1, op2, vl); +vuint8m8_t test_vrsub_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub(vm, vs2, rs1, vl); } -vuint16mf4_t test_vrsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub(mask, op1, op2, vl); +vuint16mf4_t test_vrsub_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub(vm, vs2, rs1, vl); } -vuint16mf2_t test_vrsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub(mask, op1, op2, vl); +vuint16mf2_t test_vrsub_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub(vm, vs2, rs1, vl); } -vuint16m1_t test_vrsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub(mask, op1, op2, vl); +vuint16m1_t test_vrsub_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub(vm, vs2, rs1, vl); } -vuint16m2_t test_vrsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub(mask, op1, op2, vl); +vuint16m2_t test_vrsub_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub(vm, vs2, rs1, vl); } -vuint16m4_t test_vrsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub(mask, op1, op2, vl); +vuint16m4_t test_vrsub_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub(vm, vs2, rs1, vl); } -vuint16m8_t test_vrsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub(mask, op1, op2, vl); +vuint16m8_t test_vrsub_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub(vm, vs2, rs1, vl); } -vuint32mf2_t test_vrsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub(mask, op1, op2, vl); +vuint32mf2_t test_vrsub_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub(vm, vs2, rs1, vl); } -vuint32m1_t test_vrsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub(mask, op1, op2, vl); +vuint32m1_t test_vrsub_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub(vm, vs2, rs1, vl); } -vuint32m2_t test_vrsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub(mask, op1, op2, vl); +vuint32m2_t test_vrsub_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub(vm, vs2, rs1, vl); } -vuint32m4_t test_vrsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub(mask, op1, op2, vl); +vuint32m4_t test_vrsub_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub(vm, vs2, rs1, vl); } -vuint32m8_t test_vrsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub(mask, op1, op2, vl); +vuint32m8_t test_vrsub_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub(vm, vs2, rs1, vl); } -vuint64m1_t test_vrsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub(mask, op1, op2, vl); +vuint64m1_t test_vrsub_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub(vm, vs2, rs1, vl); } -vuint64m2_t test_vrsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub(mask, op1, op2, vl); +vuint64m2_t test_vrsub_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub(vm, vs2, rs1, vl); } -vuint64m4_t test_vrsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub(mask, op1, op2, vl); +vuint64m4_t test_vrsub_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub(vm, vs2, rs1, vl); } -vuint64m8_t test_vrsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub(mask, op1, op2, vl); +vuint64m8_t test_vrsub_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vrsub\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsadd.c b/auto-generated/gnu-overloaded-tests/vsadd.c index d75451e16..0f6a336a9 100644 --- a/auto-generated/gnu-overloaded-tests/vsadd.c +++ b/auto-generated/gnu-overloaded-tests/vsadd.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vsadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); +vint8mf8_t test_vsadd_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vsadd(vs2, vs1, vl); } -vint8mf8_t test_vsadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); +vint8mf8_t test_vsadd_vx_i8mf8(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd(vs2, rs1, vl); } -vint8mf4_t test_vsadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); +vint8mf4_t test_vsadd_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vsadd(vs2, vs1, vl); } -vint8mf4_t test_vsadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); +vint8mf4_t test_vsadd_vx_i8mf4(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd(vs2, rs1, vl); } -vint8mf2_t test_vsadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); +vint8mf2_t test_vsadd_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vsadd(vs2, vs1, vl); } -vint8mf2_t test_vsadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); +vint8mf2_t test_vsadd_vx_i8mf2(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd(vs2, rs1, vl); } -vint8m1_t test_vsadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); +vint8m1_t test_vsadd_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vsadd(vs2, vs1, vl); } -vint8m1_t test_vsadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); +vint8m1_t test_vsadd_vx_i8m1(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd(vs2, rs1, vl); } -vint8m2_t test_vsadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); +vint8m2_t test_vsadd_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vsadd(vs2, vs1, vl); } -vint8m2_t test_vsadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); +vint8m2_t test_vsadd_vx_i8m2(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd(vs2, rs1, vl); } -vint8m4_t test_vsadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); +vint8m4_t test_vsadd_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vsadd(vs2, vs1, vl); } -vint8m4_t test_vsadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); +vint8m4_t test_vsadd_vx_i8m4(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd(vs2, rs1, vl); } -vint8m8_t test_vsadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); +vint8m8_t test_vsadd_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vsadd(vs2, vs1, vl); } -vint8m8_t test_vsadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); +vint8m8_t test_vsadd_vx_i8m8(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd(vs2, rs1, vl); } -vint16mf4_t test_vsadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); +vint16mf4_t test_vsadd_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vsadd(vs2, vs1, vl); } -vint16mf4_t test_vsadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); +vint16mf4_t test_vsadd_vx_i16mf4(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd(vs2, rs1, vl); } -vint16mf2_t test_vsadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); +vint16mf2_t test_vsadd_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vsadd(vs2, vs1, vl); } -vint16mf2_t test_vsadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); +vint16mf2_t test_vsadd_vx_i16mf2(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd(vs2, rs1, vl); } -vint16m1_t test_vsadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); +vint16m1_t test_vsadd_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vsadd(vs2, vs1, vl); } -vint16m1_t test_vsadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); +vint16m1_t test_vsadd_vx_i16m1(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd(vs2, rs1, vl); } -vint16m2_t test_vsadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); +vint16m2_t test_vsadd_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vsadd(vs2, vs1, vl); } -vint16m2_t test_vsadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); +vint16m2_t test_vsadd_vx_i16m2(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd(vs2, rs1, vl); } -vint16m4_t test_vsadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); +vint16m4_t test_vsadd_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vsadd(vs2, vs1, vl); } -vint16m4_t test_vsadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); +vint16m4_t test_vsadd_vx_i16m4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd(vs2, rs1, vl); } -vint16m8_t test_vsadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); +vint16m8_t test_vsadd_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vsadd(vs2, vs1, vl); } -vint16m8_t test_vsadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); +vint16m8_t test_vsadd_vx_i16m8(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd(vs2, rs1, vl); } -vint32mf2_t test_vsadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); +vint32mf2_t test_vsadd_vv_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vsadd(vs2, vs1, vl); } -vint32mf2_t test_vsadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); +vint32mf2_t test_vsadd_vx_i32mf2(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd(vs2, rs1, vl); } -vint32m1_t test_vsadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); +vint32m1_t test_vsadd_vv_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vsadd(vs2, vs1, vl); } -vint32m1_t test_vsadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); +vint32m1_t test_vsadd_vx_i32m1(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd(vs2, rs1, vl); } -vint32m2_t test_vsadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); +vint32m2_t test_vsadd_vv_i32m2(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vsadd(vs2, vs1, vl); } -vint32m2_t test_vsadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); +vint32m2_t test_vsadd_vx_i32m2(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd(vs2, rs1, vl); } -vint32m4_t test_vsadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); +vint32m4_t test_vsadd_vv_i32m4(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vsadd(vs2, vs1, vl); } -vint32m4_t test_vsadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); +vint32m4_t test_vsadd_vx_i32m4(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd(vs2, rs1, vl); } -vint32m8_t test_vsadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); +vint32m8_t test_vsadd_vv_i32m8(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vsadd(vs2, vs1, vl); } -vint32m8_t test_vsadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); +vint32m8_t test_vsadd_vx_i32m8(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd(vs2, rs1, vl); } -vint64m1_t test_vsadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); +vint64m1_t test_vsadd_vv_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vsadd(vs2, vs1, vl); } -vint64m1_t test_vsadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); +vint64m1_t test_vsadd_vx_i64m1(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd(vs2, rs1, vl); } -vint64m2_t test_vsadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); +vint64m2_t test_vsadd_vv_i64m2(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vsadd(vs2, vs1, vl); } -vint64m2_t test_vsadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); +vint64m2_t test_vsadd_vx_i64m2(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd(vs2, rs1, vl); } -vint64m4_t test_vsadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); +vint64m4_t test_vsadd_vv_i64m4(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vsadd(vs2, vs1, vl); } -vint64m4_t test_vsadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); +vint64m4_t test_vsadd_vx_i64m4(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd(vs2, rs1, vl); } -vint64m8_t test_vsadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); +vint64m8_t test_vsadd_vv_i64m8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vsadd(vs2, vs1, vl); } -vint64m8_t test_vsadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd(op1, op2, vl); +vint64m8_t test_vsadd_vx_i64m8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd(vs2, rs1, vl); } -vint8mf8_t test_vsadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); +vint8mf8_t test_vsadd_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vsadd(vm, vs2, vs1, vl); } -vint8mf8_t test_vsadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); +vint8mf8_t test_vsadd_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd(vm, vs2, rs1, vl); } -vint8mf4_t test_vsadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); +vint8mf4_t test_vsadd_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vsadd(vm, vs2, vs1, vl); } -vint8mf4_t test_vsadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); +vint8mf4_t test_vsadd_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd(vm, vs2, rs1, vl); } -vint8mf2_t test_vsadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); +vint8mf2_t test_vsadd_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vsadd(vm, vs2, vs1, vl); } -vint8mf2_t test_vsadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); +vint8mf2_t test_vsadd_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd(vm, vs2, rs1, vl); } -vint8m1_t test_vsadd_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); +vint8m1_t test_vsadd_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vsadd(vm, vs2, vs1, vl); } -vint8m1_t test_vsadd_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); +vint8m1_t test_vsadd_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd(vm, vs2, rs1, vl); } -vint8m2_t test_vsadd_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); +vint8m2_t test_vsadd_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vsadd(vm, vs2, vs1, vl); } -vint8m2_t test_vsadd_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); +vint8m2_t test_vsadd_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd(vm, vs2, rs1, vl); } -vint8m4_t test_vsadd_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); +vint8m4_t test_vsadd_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vsadd(vm, vs2, vs1, vl); } -vint8m4_t test_vsadd_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); +vint8m4_t test_vsadd_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd(vm, vs2, rs1, vl); } -vint8m8_t test_vsadd_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); +vint8m8_t test_vsadd_vv_i8m8_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vsadd(vm, vs2, vs1, vl); } -vint8m8_t test_vsadd_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); +vint8m8_t test_vsadd_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd(vm, vs2, rs1, vl); } -vint16mf4_t test_vsadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); +vint16mf4_t test_vsadd_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vsadd(vm, vs2, vs1, vl); } -vint16mf4_t test_vsadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); +vint16mf4_t test_vsadd_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd(vm, vs2, rs1, vl); } -vint16mf2_t test_vsadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); +vint16mf2_t test_vsadd_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vsadd(vm, vs2, vs1, vl); } -vint16mf2_t test_vsadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); +vint16mf2_t test_vsadd_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd(vm, vs2, rs1, vl); } -vint16m1_t test_vsadd_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); +vint16m1_t test_vsadd_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vsadd(vm, vs2, vs1, vl); } -vint16m1_t test_vsadd_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); +vint16m1_t test_vsadd_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd(vm, vs2, rs1, vl); } -vint16m2_t test_vsadd_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); +vint16m2_t test_vsadd_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vsadd(vm, vs2, vs1, vl); } -vint16m2_t test_vsadd_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); +vint16m2_t test_vsadd_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd(vm, vs2, rs1, vl); } -vint16m4_t test_vsadd_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); +vint16m4_t test_vsadd_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vsadd(vm, vs2, vs1, vl); } -vint16m4_t test_vsadd_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); +vint16m4_t test_vsadd_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd(vm, vs2, rs1, vl); } -vint16m8_t test_vsadd_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); +vint16m8_t test_vsadd_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vsadd(vm, vs2, vs1, vl); } -vint16m8_t test_vsadd_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); +vint16m8_t test_vsadd_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd(vm, vs2, rs1, vl); } -vint32mf2_t test_vsadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); +vint32mf2_t test_vsadd_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vsadd(vm, vs2, vs1, vl); } -vint32mf2_t test_vsadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); +vint32mf2_t test_vsadd_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd(vm, vs2, rs1, vl); } -vint32m1_t test_vsadd_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); +vint32m1_t test_vsadd_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vsadd(vm, vs2, vs1, vl); } -vint32m1_t test_vsadd_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); +vint32m1_t test_vsadd_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd(vm, vs2, rs1, vl); } -vint32m2_t test_vsadd_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); +vint32m2_t test_vsadd_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vsadd(vm, vs2, vs1, vl); } -vint32m2_t test_vsadd_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); +vint32m2_t test_vsadd_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd(vm, vs2, rs1, vl); } -vint32m4_t test_vsadd_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); +vint32m4_t test_vsadd_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vsadd(vm, vs2, vs1, vl); } -vint32m4_t test_vsadd_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); +vint32m4_t test_vsadd_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd(vm, vs2, rs1, vl); } -vint32m8_t test_vsadd_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); +vint32m8_t test_vsadd_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vsadd(vm, vs2, vs1, vl); } -vint32m8_t test_vsadd_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); +vint32m8_t test_vsadd_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd(vm, vs2, rs1, vl); } -vint64m1_t test_vsadd_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); +vint64m1_t test_vsadd_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vsadd(vm, vs2, vs1, vl); } -vint64m1_t test_vsadd_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); +vint64m1_t test_vsadd_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd(vm, vs2, rs1, vl); } -vint64m2_t test_vsadd_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); +vint64m2_t test_vsadd_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vsadd(vm, vs2, vs1, vl); } -vint64m2_t test_vsadd_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); +vint64m2_t test_vsadd_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd(vm, vs2, rs1, vl); } -vint64m4_t test_vsadd_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); +vint64m4_t test_vsadd_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vsadd(vm, vs2, vs1, vl); } -vint64m4_t test_vsadd_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); +vint64m4_t test_vsadd_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd(vm, vs2, rs1, vl); } -vint64m8_t test_vsadd_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); +vint64m8_t test_vsadd_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vsadd(vm, vs2, vs1, vl); } -vint64m8_t test_vsadd_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd(mask, op1, op2, vl); +vint64m8_t test_vsadd_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsadd\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsaddu.c b/auto-generated/gnu-overloaded-tests/vsaddu.c index 90f95b1f5..f2556fcdd 100644 --- a/auto-generated/gnu-overloaded-tests/vsaddu.c +++ b/auto-generated/gnu-overloaded-tests/vsaddu.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vsaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); +vuint8mf8_t test_vsaddu_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsaddu(vs2, vs1, vl); } -vuint8mf8_t test_vsaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); +vuint8mf8_t test_vsaddu_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu(vs2, rs1, vl); } -vuint8mf4_t test_vsaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); +vuint8mf4_t test_vsaddu_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsaddu(vs2, vs1, vl); } -vuint8mf4_t test_vsaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); +vuint8mf4_t test_vsaddu_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu(vs2, rs1, vl); } -vuint8mf2_t test_vsaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); +vuint8mf2_t test_vsaddu_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsaddu(vs2, vs1, vl); } -vuint8mf2_t test_vsaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); +vuint8mf2_t test_vsaddu_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu(vs2, rs1, vl); } -vuint8m1_t test_vsaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); +vuint8m1_t test_vsaddu_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsaddu(vs2, vs1, vl); } -vuint8m1_t test_vsaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); +vuint8m1_t test_vsaddu_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu(vs2, rs1, vl); } -vuint8m2_t test_vsaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); +vuint8m2_t test_vsaddu_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsaddu(vs2, vs1, vl); } -vuint8m2_t test_vsaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); +vuint8m2_t test_vsaddu_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu(vs2, rs1, vl); } -vuint8m4_t test_vsaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); +vuint8m4_t test_vsaddu_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsaddu(vs2, vs1, vl); } -vuint8m4_t test_vsaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); +vuint8m4_t test_vsaddu_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu(vs2, rs1, vl); } -vuint8m8_t test_vsaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); +vuint8m8_t test_vsaddu_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsaddu(vs2, vs1, vl); } -vuint8m8_t test_vsaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); +vuint8m8_t test_vsaddu_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu(vs2, rs1, vl); } -vuint16mf4_t test_vsaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); +vuint16mf4_t test_vsaddu_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsaddu(vs2, vs1, vl); } -vuint16mf4_t test_vsaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); +vuint16mf4_t test_vsaddu_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu(vs2, rs1, vl); } -vuint16mf2_t test_vsaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); +vuint16mf2_t test_vsaddu_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsaddu(vs2, vs1, vl); } -vuint16mf2_t test_vsaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); +vuint16mf2_t test_vsaddu_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu(vs2, rs1, vl); } -vuint16m1_t test_vsaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); +vuint16m1_t test_vsaddu_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsaddu(vs2, vs1, vl); } -vuint16m1_t test_vsaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); +vuint16m1_t test_vsaddu_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu(vs2, rs1, vl); } -vuint16m2_t test_vsaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); +vuint16m2_t test_vsaddu_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsaddu(vs2, vs1, vl); } -vuint16m2_t test_vsaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); +vuint16m2_t test_vsaddu_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu(vs2, rs1, vl); } -vuint16m4_t test_vsaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); +vuint16m4_t test_vsaddu_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsaddu(vs2, vs1, vl); } -vuint16m4_t test_vsaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); +vuint16m4_t test_vsaddu_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu(vs2, rs1, vl); } -vuint16m8_t test_vsaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); +vuint16m8_t test_vsaddu_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsaddu(vs2, vs1, vl); } -vuint16m8_t test_vsaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); +vuint16m8_t test_vsaddu_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu(vs2, rs1, vl); } -vuint32mf2_t test_vsaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); +vuint32mf2_t test_vsaddu_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsaddu(vs2, vs1, vl); } -vuint32mf2_t test_vsaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); +vuint32mf2_t test_vsaddu_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu(vs2, rs1, vl); } -vuint32m1_t test_vsaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); +vuint32m1_t test_vsaddu_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsaddu(vs2, vs1, vl); } -vuint32m1_t test_vsaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); +vuint32m1_t test_vsaddu_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu(vs2, rs1, vl); } -vuint32m2_t test_vsaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); +vuint32m2_t test_vsaddu_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsaddu(vs2, vs1, vl); } -vuint32m2_t test_vsaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); +vuint32m2_t test_vsaddu_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu(vs2, rs1, vl); } -vuint32m4_t test_vsaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); +vuint32m4_t test_vsaddu_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsaddu(vs2, vs1, vl); } -vuint32m4_t test_vsaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); +vuint32m4_t test_vsaddu_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu(vs2, rs1, vl); } -vuint32m8_t test_vsaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); +vuint32m8_t test_vsaddu_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsaddu(vs2, vs1, vl); } -vuint32m8_t test_vsaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); +vuint32m8_t test_vsaddu_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu(vs2, rs1, vl); } -vuint64m1_t test_vsaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); +vuint64m1_t test_vsaddu_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsaddu(vs2, vs1, vl); } -vuint64m1_t test_vsaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); +vuint64m1_t test_vsaddu_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu(vs2, rs1, vl); } -vuint64m2_t test_vsaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); +vuint64m2_t test_vsaddu_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsaddu(vs2, vs1, vl); } -vuint64m2_t test_vsaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); +vuint64m2_t test_vsaddu_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu(vs2, rs1, vl); } -vuint64m4_t test_vsaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); +vuint64m4_t test_vsaddu_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsaddu(vs2, vs1, vl); } -vuint64m4_t test_vsaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); +vuint64m4_t test_vsaddu_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu(vs2, rs1, vl); } -vuint64m8_t test_vsaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); +vuint64m8_t test_vsaddu_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsaddu(vs2, vs1, vl); } -vuint64m8_t test_vsaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu(op1, op2, vl); +vuint64m8_t test_vsaddu_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu(vs2, rs1, vl); } -vuint8mf8_t test_vsaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); +vuint8mf8_t test_vsaddu_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsaddu(vm, vs2, vs1, vl); } -vuint8mf8_t test_vsaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); +vuint8mf8_t test_vsaddu_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu(vm, vs2, rs1, vl); } -vuint8mf4_t test_vsaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); +vuint8mf4_t test_vsaddu_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsaddu(vm, vs2, vs1, vl); } -vuint8mf4_t test_vsaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); +vuint8mf4_t test_vsaddu_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu(vm, vs2, rs1, vl); } -vuint8mf2_t test_vsaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); +vuint8mf2_t test_vsaddu_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsaddu(vm, vs2, vs1, vl); } -vuint8mf2_t test_vsaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); +vuint8mf2_t test_vsaddu_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu(vm, vs2, rs1, vl); } -vuint8m1_t test_vsaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); +vuint8m1_t test_vsaddu_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsaddu(vm, vs2, vs1, vl); } -vuint8m1_t test_vsaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); +vuint8m1_t test_vsaddu_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu(vm, vs2, rs1, vl); } -vuint8m2_t test_vsaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); +vuint8m2_t test_vsaddu_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsaddu(vm, vs2, vs1, vl); } -vuint8m2_t test_vsaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); +vuint8m2_t test_vsaddu_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu(vm, vs2, rs1, vl); } -vuint8m4_t test_vsaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); +vuint8m4_t test_vsaddu_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsaddu(vm, vs2, vs1, vl); } -vuint8m4_t test_vsaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); +vuint8m4_t test_vsaddu_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu(vm, vs2, rs1, vl); } -vuint8m8_t test_vsaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); +vuint8m8_t test_vsaddu_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsaddu(vm, vs2, vs1, vl); } -vuint8m8_t test_vsaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); +vuint8m8_t test_vsaddu_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu(vm, vs2, rs1, vl); } -vuint16mf4_t test_vsaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); +vuint16mf4_t test_vsaddu_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsaddu(vm, vs2, vs1, vl); } -vuint16mf4_t test_vsaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); +vuint16mf4_t test_vsaddu_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu(vm, vs2, rs1, vl); } -vuint16mf2_t test_vsaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); +vuint16mf2_t test_vsaddu_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsaddu(vm, vs2, vs1, vl); } -vuint16mf2_t test_vsaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); +vuint16mf2_t test_vsaddu_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu(vm, vs2, rs1, vl); } -vuint16m1_t test_vsaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); +vuint16m1_t test_vsaddu_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsaddu(vm, vs2, vs1, vl); } -vuint16m1_t test_vsaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); +vuint16m1_t test_vsaddu_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu(vm, vs2, rs1, vl); } -vuint16m2_t test_vsaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); +vuint16m2_t test_vsaddu_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsaddu(vm, vs2, vs1, vl); } -vuint16m2_t test_vsaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); +vuint16m2_t test_vsaddu_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu(vm, vs2, rs1, vl); } -vuint16m4_t test_vsaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); +vuint16m4_t test_vsaddu_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsaddu(vm, vs2, vs1, vl); } -vuint16m4_t test_vsaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); +vuint16m4_t test_vsaddu_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu(vm, vs2, rs1, vl); } -vuint16m8_t test_vsaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); +vuint16m8_t test_vsaddu_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsaddu(vm, vs2, vs1, vl); } -vuint16m8_t test_vsaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); +vuint16m8_t test_vsaddu_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu(vm, vs2, rs1, vl); } -vuint32mf2_t test_vsaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); +vuint32mf2_t test_vsaddu_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsaddu(vm, vs2, vs1, vl); } -vuint32mf2_t test_vsaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); +vuint32mf2_t test_vsaddu_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu(vm, vs2, rs1, vl); } -vuint32m1_t test_vsaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); +vuint32m1_t test_vsaddu_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsaddu(vm, vs2, vs1, vl); } -vuint32m1_t test_vsaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); +vuint32m1_t test_vsaddu_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu(vm, vs2, rs1, vl); } -vuint32m2_t test_vsaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); +vuint32m2_t test_vsaddu_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsaddu(vm, vs2, vs1, vl); } -vuint32m2_t test_vsaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); +vuint32m2_t test_vsaddu_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu(vm, vs2, rs1, vl); } -vuint32m4_t test_vsaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); +vuint32m4_t test_vsaddu_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsaddu(vm, vs2, vs1, vl); } -vuint32m4_t test_vsaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); +vuint32m4_t test_vsaddu_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu(vm, vs2, rs1, vl); } -vuint32m8_t test_vsaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); +vuint32m8_t test_vsaddu_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsaddu(vm, vs2, vs1, vl); } -vuint32m8_t test_vsaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); +vuint32m8_t test_vsaddu_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu(vm, vs2, rs1, vl); } -vuint64m1_t test_vsaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); +vuint64m1_t test_vsaddu_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsaddu(vm, vs2, vs1, vl); } -vuint64m1_t test_vsaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); +vuint64m1_t test_vsaddu_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu(vm, vs2, rs1, vl); } -vuint64m2_t test_vsaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); +vuint64m2_t test_vsaddu_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsaddu(vm, vs2, vs1, vl); } -vuint64m2_t test_vsaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); +vuint64m2_t test_vsaddu_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu(vm, vs2, rs1, vl); } -vuint64m4_t test_vsaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); +vuint64m4_t test_vsaddu_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsaddu(vm, vs2, vs1, vl); } -vuint64m4_t test_vsaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); +vuint64m4_t test_vsaddu_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu(vm, vs2, rs1, vl); } -vuint64m8_t test_vsaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); +vuint64m8_t test_vsaddu_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsaddu(vm, vs2, vs1, vl); } -vuint64m8_t test_vsaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu(mask, op1, op2, vl); +vuint64m8_t test_vsaddu_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsaddu\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsbc.c b/auto-generated/gnu-overloaded-tests/vsbc.c index 88c8ed354..c212a5480 100644 --- a/auto-generated/gnu-overloaded-tests/vsbc.c +++ b/auto-generated/gnu-overloaded-tests/vsbc.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vsbc_vvm_i8mf8(vint8mf8_t op1, vint8mf8_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vint8mf8_t test_vsbc_vvm_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc(vs2, vs1, v0, vl); } -vint8mf8_t test_vsbc_vxm_i8mf8(vint8mf8_t op1, int8_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vint8mf8_t test_vsbc_vxm_i8mf8(vint8mf8_t vs2, int8_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc(vs2, rs1, v0, vl); } -vint8mf4_t test_vsbc_vvm_i8mf4(vint8mf4_t op1, vint8mf4_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vint8mf4_t test_vsbc_vvm_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc(vs2, vs1, v0, vl); } -vint8mf4_t test_vsbc_vxm_i8mf4(vint8mf4_t op1, int8_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vint8mf4_t test_vsbc_vxm_i8mf4(vint8mf4_t vs2, int8_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc(vs2, rs1, v0, vl); } -vint8mf2_t test_vsbc_vvm_i8mf2(vint8mf2_t op1, vint8mf2_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vint8mf2_t test_vsbc_vvm_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc(vs2, vs1, v0, vl); } -vint8mf2_t test_vsbc_vxm_i8mf2(vint8mf2_t op1, int8_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vint8mf2_t test_vsbc_vxm_i8mf2(vint8mf2_t vs2, int8_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc(vs2, rs1, v0, vl); } -vint8m1_t test_vsbc_vvm_i8m1(vint8m1_t op1, vint8m1_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vint8m1_t test_vsbc_vvm_i8m1(vint8m1_t vs2, vint8m1_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc(vs2, vs1, v0, vl); } -vint8m1_t test_vsbc_vxm_i8m1(vint8m1_t op1, int8_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vint8m1_t test_vsbc_vxm_i8m1(vint8m1_t vs2, int8_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc(vs2, rs1, v0, vl); } -vint8m2_t test_vsbc_vvm_i8m2(vint8m2_t op1, vint8m2_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vint8m2_t test_vsbc_vvm_i8m2(vint8m2_t vs2, vint8m2_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc(vs2, vs1, v0, vl); } -vint8m2_t test_vsbc_vxm_i8m2(vint8m2_t op1, int8_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vint8m2_t test_vsbc_vxm_i8m2(vint8m2_t vs2, int8_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc(vs2, rs1, v0, vl); } -vint8m4_t test_vsbc_vvm_i8m4(vint8m4_t op1, vint8m4_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vint8m4_t test_vsbc_vvm_i8m4(vint8m4_t vs2, vint8m4_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vsbc(vs2, vs1, v0, vl); } -vint8m4_t test_vsbc_vxm_i8m4(vint8m4_t op1, int8_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vint8m4_t test_vsbc_vxm_i8m4(vint8m4_t vs2, int8_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vsbc(vs2, rs1, v0, vl); } -vint8m8_t test_vsbc_vvm_i8m8(vint8m8_t op1, vint8m8_t op2, vbool1_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vint8m8_t test_vsbc_vvm_i8m8(vint8m8_t vs2, vint8m8_t vs1, vbool1_t v0, size_t vl) { + return __riscv_vsbc(vs2, vs1, v0, vl); } -vint8m8_t test_vsbc_vxm_i8m8(vint8m8_t op1, int8_t op2, vbool1_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vint8m8_t test_vsbc_vxm_i8m8(vint8m8_t vs2, int8_t rs1, vbool1_t v0, size_t vl) { + return __riscv_vsbc(vs2, rs1, v0, vl); } -vint16mf4_t test_vsbc_vvm_i16mf4(vint16mf4_t op1, vint16mf4_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vint16mf4_t test_vsbc_vvm_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc(vs2, vs1, v0, vl); } -vint16mf4_t test_vsbc_vxm_i16mf4(vint16mf4_t op1, int16_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vint16mf4_t test_vsbc_vxm_i16mf4(vint16mf4_t vs2, int16_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc(vs2, rs1, v0, vl); } -vint16mf2_t test_vsbc_vvm_i16mf2(vint16mf2_t op1, vint16mf2_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vint16mf2_t test_vsbc_vvm_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc(vs2, vs1, v0, vl); } -vint16mf2_t test_vsbc_vxm_i16mf2(vint16mf2_t op1, int16_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vint16mf2_t test_vsbc_vxm_i16mf2(vint16mf2_t vs2, int16_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc(vs2, rs1, v0, vl); } -vint16m1_t test_vsbc_vvm_i16m1(vint16m1_t op1, vint16m1_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vint16m1_t test_vsbc_vvm_i16m1(vint16m1_t vs2, vint16m1_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc(vs2, vs1, v0, vl); } -vint16m1_t test_vsbc_vxm_i16m1(vint16m1_t op1, int16_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vint16m1_t test_vsbc_vxm_i16m1(vint16m1_t vs2, int16_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc(vs2, rs1, v0, vl); } -vint16m2_t test_vsbc_vvm_i16m2(vint16m2_t op1, vint16m2_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vint16m2_t test_vsbc_vvm_i16m2(vint16m2_t vs2, vint16m2_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc(vs2, vs1, v0, vl); } -vint16m2_t test_vsbc_vxm_i16m2(vint16m2_t op1, int16_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vint16m2_t test_vsbc_vxm_i16m2(vint16m2_t vs2, int16_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc(vs2, rs1, v0, vl); } -vint16m4_t test_vsbc_vvm_i16m4(vint16m4_t op1, vint16m4_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vint16m4_t test_vsbc_vvm_i16m4(vint16m4_t vs2, vint16m4_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc(vs2, vs1, v0, vl); } -vint16m4_t test_vsbc_vxm_i16m4(vint16m4_t op1, int16_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vint16m4_t test_vsbc_vxm_i16m4(vint16m4_t vs2, int16_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc(vs2, rs1, v0, vl); } -vint16m8_t test_vsbc_vvm_i16m8(vint16m8_t op1, vint16m8_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vint16m8_t test_vsbc_vvm_i16m8(vint16m8_t vs2, vint16m8_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vsbc(vs2, vs1, v0, vl); } -vint16m8_t test_vsbc_vxm_i16m8(vint16m8_t op1, int16_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vint16m8_t test_vsbc_vxm_i16m8(vint16m8_t vs2, int16_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vsbc(vs2, rs1, v0, vl); } -vint32mf2_t test_vsbc_vvm_i32mf2(vint32mf2_t op1, vint32mf2_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vint32mf2_t test_vsbc_vvm_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc(vs2, vs1, v0, vl); } -vint32mf2_t test_vsbc_vxm_i32mf2(vint32mf2_t op1, int32_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vint32mf2_t test_vsbc_vxm_i32mf2(vint32mf2_t vs2, int32_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc(vs2, rs1, v0, vl); } -vint32m1_t test_vsbc_vvm_i32m1(vint32m1_t op1, vint32m1_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vint32m1_t test_vsbc_vvm_i32m1(vint32m1_t vs2, vint32m1_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc(vs2, vs1, v0, vl); } -vint32m1_t test_vsbc_vxm_i32m1(vint32m1_t op1, int32_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vint32m1_t test_vsbc_vxm_i32m1(vint32m1_t vs2, int32_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc(vs2, rs1, v0, vl); } -vint32m2_t test_vsbc_vvm_i32m2(vint32m2_t op1, vint32m2_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vint32m2_t test_vsbc_vvm_i32m2(vint32m2_t vs2, vint32m2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc(vs2, vs1, v0, vl); } -vint32m2_t test_vsbc_vxm_i32m2(vint32m2_t op1, int32_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vint32m2_t test_vsbc_vxm_i32m2(vint32m2_t vs2, int32_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc(vs2, rs1, v0, vl); } -vint32m4_t test_vsbc_vvm_i32m4(vint32m4_t op1, vint32m4_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vint32m4_t test_vsbc_vvm_i32m4(vint32m4_t vs2, vint32m4_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc(vs2, vs1, v0, vl); } -vint32m4_t test_vsbc_vxm_i32m4(vint32m4_t op1, int32_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vint32m4_t test_vsbc_vxm_i32m4(vint32m4_t vs2, int32_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc(vs2, rs1, v0, vl); } -vint32m8_t test_vsbc_vvm_i32m8(vint32m8_t op1, vint32m8_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vint32m8_t test_vsbc_vvm_i32m8(vint32m8_t vs2, vint32m8_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc(vs2, vs1, v0, vl); } -vint32m8_t test_vsbc_vxm_i32m8(vint32m8_t op1, int32_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vint32m8_t test_vsbc_vxm_i32m8(vint32m8_t vs2, int32_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc(vs2, rs1, v0, vl); } -vint64m1_t test_vsbc_vvm_i64m1(vint64m1_t op1, vint64m1_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vint64m1_t test_vsbc_vvm_i64m1(vint64m1_t vs2, vint64m1_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc(vs2, vs1, v0, vl); } -vint64m1_t test_vsbc_vxm_i64m1(vint64m1_t op1, int64_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vint64m1_t test_vsbc_vxm_i64m1(vint64m1_t vs2, int64_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc(vs2, rs1, v0, vl); } -vint64m2_t test_vsbc_vvm_i64m2(vint64m2_t op1, vint64m2_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vint64m2_t test_vsbc_vvm_i64m2(vint64m2_t vs2, vint64m2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc(vs2, vs1, v0, vl); } -vint64m2_t test_vsbc_vxm_i64m2(vint64m2_t op1, int64_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vint64m2_t test_vsbc_vxm_i64m2(vint64m2_t vs2, int64_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc(vs2, rs1, v0, vl); } -vint64m4_t test_vsbc_vvm_i64m4(vint64m4_t op1, vint64m4_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vint64m4_t test_vsbc_vvm_i64m4(vint64m4_t vs2, vint64m4_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc(vs2, vs1, v0, vl); } -vint64m4_t test_vsbc_vxm_i64m4(vint64m4_t op1, int64_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vint64m4_t test_vsbc_vxm_i64m4(vint64m4_t vs2, int64_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc(vs2, rs1, v0, vl); } -vint64m8_t test_vsbc_vvm_i64m8(vint64m8_t op1, vint64m8_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vint64m8_t test_vsbc_vvm_i64m8(vint64m8_t vs2, vint64m8_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc(vs2, vs1, v0, vl); } -vint64m8_t test_vsbc_vxm_i64m8(vint64m8_t op1, int64_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vint64m8_t test_vsbc_vxm_i64m8(vint64m8_t vs2, int64_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc(vs2, rs1, v0, vl); } -vuint8mf8_t test_vsbc_vvm_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vuint8mf8_t test_vsbc_vvm_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc(vs2, vs1, v0, vl); } -vuint8mf8_t test_vsbc_vxm_u8mf8(vuint8mf8_t op1, uint8_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vuint8mf8_t test_vsbc_vxm_u8mf8(vuint8mf8_t vs2, uint8_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc(vs2, rs1, v0, vl); } -vuint8mf4_t test_vsbc_vvm_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vuint8mf4_t test_vsbc_vvm_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc(vs2, vs1, v0, vl); } -vuint8mf4_t test_vsbc_vxm_u8mf4(vuint8mf4_t op1, uint8_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vuint8mf4_t test_vsbc_vxm_u8mf4(vuint8mf4_t vs2, uint8_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc(vs2, rs1, v0, vl); } -vuint8mf2_t test_vsbc_vvm_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vuint8mf2_t test_vsbc_vvm_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc(vs2, vs1, v0, vl); } -vuint8mf2_t test_vsbc_vxm_u8mf2(vuint8mf2_t op1, uint8_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vuint8mf2_t test_vsbc_vxm_u8mf2(vuint8mf2_t vs2, uint8_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc(vs2, rs1, v0, vl); } -vuint8m1_t test_vsbc_vvm_u8m1(vuint8m1_t op1, vuint8m1_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vuint8m1_t test_vsbc_vvm_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc(vs2, vs1, v0, vl); } -vuint8m1_t test_vsbc_vxm_u8m1(vuint8m1_t op1, uint8_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vuint8m1_t test_vsbc_vxm_u8m1(vuint8m1_t vs2, uint8_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc(vs2, rs1, v0, vl); } -vuint8m2_t test_vsbc_vvm_u8m2(vuint8m2_t op1, vuint8m2_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vuint8m2_t test_vsbc_vvm_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc(vs2, vs1, v0, vl); } -vuint8m2_t test_vsbc_vxm_u8m2(vuint8m2_t op1, uint8_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vuint8m2_t test_vsbc_vxm_u8m2(vuint8m2_t vs2, uint8_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc(vs2, rs1, v0, vl); } -vuint8m4_t test_vsbc_vvm_u8m4(vuint8m4_t op1, vuint8m4_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vuint8m4_t test_vsbc_vvm_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vsbc(vs2, vs1, v0, vl); } -vuint8m4_t test_vsbc_vxm_u8m4(vuint8m4_t op1, uint8_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vuint8m4_t test_vsbc_vxm_u8m4(vuint8m4_t vs2, uint8_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vsbc(vs2, rs1, v0, vl); } -vuint8m8_t test_vsbc_vvm_u8m8(vuint8m8_t op1, vuint8m8_t op2, vbool1_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vuint8m8_t test_vsbc_vvm_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, vbool1_t v0, size_t vl) { + return __riscv_vsbc(vs2, vs1, v0, vl); } -vuint8m8_t test_vsbc_vxm_u8m8(vuint8m8_t op1, uint8_t op2, vbool1_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vuint8m8_t test_vsbc_vxm_u8m8(vuint8m8_t vs2, uint8_t rs1, vbool1_t v0, size_t vl) { + return __riscv_vsbc(vs2, rs1, v0, vl); } -vuint16mf4_t test_vsbc_vvm_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vuint16mf4_t test_vsbc_vvm_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc(vs2, vs1, v0, vl); } -vuint16mf4_t test_vsbc_vxm_u16mf4(vuint16mf4_t op1, uint16_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vuint16mf4_t test_vsbc_vxm_u16mf4(vuint16mf4_t vs2, uint16_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc(vs2, rs1, v0, vl); } -vuint16mf2_t test_vsbc_vvm_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vuint16mf2_t test_vsbc_vvm_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc(vs2, vs1, v0, vl); } -vuint16mf2_t test_vsbc_vxm_u16mf2(vuint16mf2_t op1, uint16_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vuint16mf2_t test_vsbc_vxm_u16mf2(vuint16mf2_t vs2, uint16_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc(vs2, rs1, v0, vl); } -vuint16m1_t test_vsbc_vvm_u16m1(vuint16m1_t op1, vuint16m1_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vuint16m1_t test_vsbc_vvm_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc(vs2, vs1, v0, vl); } -vuint16m1_t test_vsbc_vxm_u16m1(vuint16m1_t op1, uint16_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vuint16m1_t test_vsbc_vxm_u16m1(vuint16m1_t vs2, uint16_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc(vs2, rs1, v0, vl); } -vuint16m2_t test_vsbc_vvm_u16m2(vuint16m2_t op1, vuint16m2_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vuint16m2_t test_vsbc_vvm_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc(vs2, vs1, v0, vl); } -vuint16m2_t test_vsbc_vxm_u16m2(vuint16m2_t op1, uint16_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vuint16m2_t test_vsbc_vxm_u16m2(vuint16m2_t vs2, uint16_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc(vs2, rs1, v0, vl); } -vuint16m4_t test_vsbc_vvm_u16m4(vuint16m4_t op1, vuint16m4_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vuint16m4_t test_vsbc_vvm_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc(vs2, vs1, v0, vl); } -vuint16m4_t test_vsbc_vxm_u16m4(vuint16m4_t op1, uint16_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vuint16m4_t test_vsbc_vxm_u16m4(vuint16m4_t vs2, uint16_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc(vs2, rs1, v0, vl); } -vuint16m8_t test_vsbc_vvm_u16m8(vuint16m8_t op1, vuint16m8_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vuint16m8_t test_vsbc_vvm_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vsbc(vs2, vs1, v0, vl); } -vuint16m8_t test_vsbc_vxm_u16m8(vuint16m8_t op1, uint16_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vuint16m8_t test_vsbc_vxm_u16m8(vuint16m8_t vs2, uint16_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vsbc(vs2, rs1, v0, vl); } -vuint32mf2_t test_vsbc_vvm_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vuint32mf2_t test_vsbc_vvm_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc(vs2, vs1, v0, vl); } -vuint32mf2_t test_vsbc_vxm_u32mf2(vuint32mf2_t op1, uint32_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vuint32mf2_t test_vsbc_vxm_u32mf2(vuint32mf2_t vs2, uint32_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc(vs2, rs1, v0, vl); } -vuint32m1_t test_vsbc_vvm_u32m1(vuint32m1_t op1, vuint32m1_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vuint32m1_t test_vsbc_vvm_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc(vs2, vs1, v0, vl); } -vuint32m1_t test_vsbc_vxm_u32m1(vuint32m1_t op1, uint32_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vuint32m1_t test_vsbc_vxm_u32m1(vuint32m1_t vs2, uint32_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc(vs2, rs1, v0, vl); } -vuint32m2_t test_vsbc_vvm_u32m2(vuint32m2_t op1, vuint32m2_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vuint32m2_t test_vsbc_vvm_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc(vs2, vs1, v0, vl); } -vuint32m2_t test_vsbc_vxm_u32m2(vuint32m2_t op1, uint32_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vuint32m2_t test_vsbc_vxm_u32m2(vuint32m2_t vs2, uint32_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc(vs2, rs1, v0, vl); } -vuint32m4_t test_vsbc_vvm_u32m4(vuint32m4_t op1, vuint32m4_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vuint32m4_t test_vsbc_vvm_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc(vs2, vs1, v0, vl); } -vuint32m4_t test_vsbc_vxm_u32m4(vuint32m4_t op1, uint32_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vuint32m4_t test_vsbc_vxm_u32m4(vuint32m4_t vs2, uint32_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc(vs2, rs1, v0, vl); } -vuint32m8_t test_vsbc_vvm_u32m8(vuint32m8_t op1, vuint32m8_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vuint32m8_t test_vsbc_vvm_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc(vs2, vs1, v0, vl); } -vuint32m8_t test_vsbc_vxm_u32m8(vuint32m8_t op1, uint32_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vuint32m8_t test_vsbc_vxm_u32m8(vuint32m8_t vs2, uint32_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc(vs2, rs1, v0, vl); } -vuint64m1_t test_vsbc_vvm_u64m1(vuint64m1_t op1, vuint64m1_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vuint64m1_t test_vsbc_vvm_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc(vs2, vs1, v0, vl); } -vuint64m1_t test_vsbc_vxm_u64m1(vuint64m1_t op1, uint64_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vuint64m1_t test_vsbc_vxm_u64m1(vuint64m1_t vs2, uint64_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc(vs2, rs1, v0, vl); } -vuint64m2_t test_vsbc_vvm_u64m2(vuint64m2_t op1, vuint64m2_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vuint64m2_t test_vsbc_vvm_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc(vs2, vs1, v0, vl); } -vuint64m2_t test_vsbc_vxm_u64m2(vuint64m2_t op1, uint64_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vuint64m2_t test_vsbc_vxm_u64m2(vuint64m2_t vs2, uint64_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc(vs2, rs1, v0, vl); } -vuint64m4_t test_vsbc_vvm_u64m4(vuint64m4_t op1, vuint64m4_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vuint64m4_t test_vsbc_vvm_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc(vs2, vs1, v0, vl); } -vuint64m4_t test_vsbc_vxm_u64m4(vuint64m4_t op1, uint64_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vuint64m4_t test_vsbc_vxm_u64m4(vuint64m4_t vs2, uint64_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc(vs2, rs1, v0, vl); } -vuint64m8_t test_vsbc_vvm_u64m8(vuint64m8_t op1, vuint64m8_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vuint64m8_t test_vsbc_vvm_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc(vs2, vs1, v0, vl); } -vuint64m8_t test_vsbc_vxm_u64m8(vuint64m8_t op1, uint64_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc(op1, op2, borrowin, vl); +vuint64m8_t test_vsbc_vxm_u64m8(vuint64m8_t vs2, uint64_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc(vs2, rs1, v0, vl); } /* { dg-final { scan-assembler-times {vsbc\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vse16.c b/auto-generated/gnu-overloaded-tests/vse16.c index 03408842d..23e079a70 100644 --- a/auto-generated/gnu-overloaded-tests/vse16.c +++ b/auto-generated/gnu-overloaded-tests/vse16.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vse16_v_f16mf4(float16_t *base, vfloat16mf4_t value, size_t vl) { - return __riscv_vse16(base, value, vl); +void test_vse16_v_f16mf4(float16_t *rs1, vfloat16mf4_t vs3, size_t vl) { + return __riscv_vse16(rs1, vs3, vl); } -void test_vse16_v_f16mf2(float16_t *base, vfloat16mf2_t value, size_t vl) { - return __riscv_vse16(base, value, vl); +void test_vse16_v_f16mf2(float16_t *rs1, vfloat16mf2_t vs3, size_t vl) { + return __riscv_vse16(rs1, vs3, vl); } -void test_vse16_v_f16m1(float16_t *base, vfloat16m1_t value, size_t vl) { - return __riscv_vse16(base, value, vl); +void test_vse16_v_f16m1(float16_t *rs1, vfloat16m1_t vs3, size_t vl) { + return __riscv_vse16(rs1, vs3, vl); } -void test_vse16_v_f16m2(float16_t *base, vfloat16m2_t value, size_t vl) { - return __riscv_vse16(base, value, vl); +void test_vse16_v_f16m2(float16_t *rs1, vfloat16m2_t vs3, size_t vl) { + return __riscv_vse16(rs1, vs3, vl); } -void test_vse16_v_f16m4(float16_t *base, vfloat16m4_t value, size_t vl) { - return __riscv_vse16(base, value, vl); +void test_vse16_v_f16m4(float16_t *rs1, vfloat16m4_t vs3, size_t vl) { + return __riscv_vse16(rs1, vs3, vl); } -void test_vse16_v_f16m8(float16_t *base, vfloat16m8_t value, size_t vl) { - return __riscv_vse16(base, value, vl); +void test_vse16_v_f16m8(float16_t *rs1, vfloat16m8_t vs3, size_t vl) { + return __riscv_vse16(rs1, vs3, vl); } -void test_vse16_v_i16mf4(int16_t *base, vint16mf4_t value, size_t vl) { - return __riscv_vse16(base, value, vl); +void test_vse16_v_i16mf4(int16_t *rs1, vint16mf4_t vs3, size_t vl) { + return __riscv_vse16(rs1, vs3, vl); } -void test_vse16_v_i16mf2(int16_t *base, vint16mf2_t value, size_t vl) { - return __riscv_vse16(base, value, vl); +void test_vse16_v_i16mf2(int16_t *rs1, vint16mf2_t vs3, size_t vl) { + return __riscv_vse16(rs1, vs3, vl); } -void test_vse16_v_i16m1(int16_t *base, vint16m1_t value, size_t vl) { - return __riscv_vse16(base, value, vl); +void test_vse16_v_i16m1(int16_t *rs1, vint16m1_t vs3, size_t vl) { + return __riscv_vse16(rs1, vs3, vl); } -void test_vse16_v_i16m2(int16_t *base, vint16m2_t value, size_t vl) { - return __riscv_vse16(base, value, vl); +void test_vse16_v_i16m2(int16_t *rs1, vint16m2_t vs3, size_t vl) { + return __riscv_vse16(rs1, vs3, vl); } -void test_vse16_v_i16m4(int16_t *base, vint16m4_t value, size_t vl) { - return __riscv_vse16(base, value, vl); +void test_vse16_v_i16m4(int16_t *rs1, vint16m4_t vs3, size_t vl) { + return __riscv_vse16(rs1, vs3, vl); } -void test_vse16_v_i16m8(int16_t *base, vint16m8_t value, size_t vl) { - return __riscv_vse16(base, value, vl); +void test_vse16_v_i16m8(int16_t *rs1, vint16m8_t vs3, size_t vl) { + return __riscv_vse16(rs1, vs3, vl); } -void test_vse16_v_u16mf4(uint16_t *base, vuint16mf4_t value, size_t vl) { - return __riscv_vse16(base, value, vl); +void test_vse16_v_u16mf4(uint16_t *rs1, vuint16mf4_t vs3, size_t vl) { + return __riscv_vse16(rs1, vs3, vl); } -void test_vse16_v_u16mf2(uint16_t *base, vuint16mf2_t value, size_t vl) { - return __riscv_vse16(base, value, vl); +void test_vse16_v_u16mf2(uint16_t *rs1, vuint16mf2_t vs3, size_t vl) { + return __riscv_vse16(rs1, vs3, vl); } -void test_vse16_v_u16m1(uint16_t *base, vuint16m1_t value, size_t vl) { - return __riscv_vse16(base, value, vl); +void test_vse16_v_u16m1(uint16_t *rs1, vuint16m1_t vs3, size_t vl) { + return __riscv_vse16(rs1, vs3, vl); } -void test_vse16_v_u16m2(uint16_t *base, vuint16m2_t value, size_t vl) { - return __riscv_vse16(base, value, vl); +void test_vse16_v_u16m2(uint16_t *rs1, vuint16m2_t vs3, size_t vl) { + return __riscv_vse16(rs1, vs3, vl); } -void test_vse16_v_u16m4(uint16_t *base, vuint16m4_t value, size_t vl) { - return __riscv_vse16(base, value, vl); +void test_vse16_v_u16m4(uint16_t *rs1, vuint16m4_t vs3, size_t vl) { + return __riscv_vse16(rs1, vs3, vl); } -void test_vse16_v_u16m8(uint16_t *base, vuint16m8_t value, size_t vl) { - return __riscv_vse16(base, value, vl); +void test_vse16_v_u16m8(uint16_t *rs1, vuint16m8_t vs3, size_t vl) { + return __riscv_vse16(rs1, vs3, vl); } -void test_vse16_v_f16mf4_m(vbool64_t mask, float16_t *base, vfloat16mf4_t value, size_t vl) { - return __riscv_vse16(mask, base, value, vl); +void test_vse16_v_f16mf4_m(vbool64_t vm, float16_t *rs1, vfloat16mf4_t vs3, size_t vl) { + return __riscv_vse16(vm, rs1, vs3, vl); } -void test_vse16_v_f16mf2_m(vbool32_t mask, float16_t *base, vfloat16mf2_t value, size_t vl) { - return __riscv_vse16(mask, base, value, vl); +void test_vse16_v_f16mf2_m(vbool32_t vm, float16_t *rs1, vfloat16mf2_t vs3, size_t vl) { + return __riscv_vse16(vm, rs1, vs3, vl); } -void test_vse16_v_f16m1_m(vbool16_t mask, float16_t *base, vfloat16m1_t value, size_t vl) { - return __riscv_vse16(mask, base, value, vl); +void test_vse16_v_f16m1_m(vbool16_t vm, float16_t *rs1, vfloat16m1_t vs3, size_t vl) { + return __riscv_vse16(vm, rs1, vs3, vl); } -void test_vse16_v_f16m2_m(vbool8_t mask, float16_t *base, vfloat16m2_t value, size_t vl) { - return __riscv_vse16(mask, base, value, vl); +void test_vse16_v_f16m2_m(vbool8_t vm, float16_t *rs1, vfloat16m2_t vs3, size_t vl) { + return __riscv_vse16(vm, rs1, vs3, vl); } -void test_vse16_v_f16m4_m(vbool4_t mask, float16_t *base, vfloat16m4_t value, size_t vl) { - return __riscv_vse16(mask, base, value, vl); +void test_vse16_v_f16m4_m(vbool4_t vm, float16_t *rs1, vfloat16m4_t vs3, size_t vl) { + return __riscv_vse16(vm, rs1, vs3, vl); } -void test_vse16_v_f16m8_m(vbool2_t mask, float16_t *base, vfloat16m8_t value, size_t vl) { - return __riscv_vse16(mask, base, value, vl); +void test_vse16_v_f16m8_m(vbool2_t vm, float16_t *rs1, vfloat16m8_t vs3, size_t vl) { + return __riscv_vse16(vm, rs1, vs3, vl); } -void test_vse16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t value, size_t vl) { - return __riscv_vse16(mask, base, value, vl); +void test_vse16_v_i16mf4_m(vbool64_t vm, int16_t *rs1, vint16mf4_t vs3, size_t vl) { + return __riscv_vse16(vm, rs1, vs3, vl); } -void test_vse16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t value, size_t vl) { - return __riscv_vse16(mask, base, value, vl); +void test_vse16_v_i16mf2_m(vbool32_t vm, int16_t *rs1, vint16mf2_t vs3, size_t vl) { + return __riscv_vse16(vm, rs1, vs3, vl); } -void test_vse16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t value, size_t vl) { - return __riscv_vse16(mask, base, value, vl); +void test_vse16_v_i16m1_m(vbool16_t vm, int16_t *rs1, vint16m1_t vs3, size_t vl) { + return __riscv_vse16(vm, rs1, vs3, vl); } -void test_vse16_v_i16m2_m(vbool8_t mask, int16_t *base, vint16m2_t value, size_t vl) { - return __riscv_vse16(mask, base, value, vl); +void test_vse16_v_i16m2_m(vbool8_t vm, int16_t *rs1, vint16m2_t vs3, size_t vl) { + return __riscv_vse16(vm, rs1, vs3, vl); } -void test_vse16_v_i16m4_m(vbool4_t mask, int16_t *base, vint16m4_t value, size_t vl) { - return __riscv_vse16(mask, base, value, vl); +void test_vse16_v_i16m4_m(vbool4_t vm, int16_t *rs1, vint16m4_t vs3, size_t vl) { + return __riscv_vse16(vm, rs1, vs3, vl); } -void test_vse16_v_i16m8_m(vbool2_t mask, int16_t *base, vint16m8_t value, size_t vl) { - return __riscv_vse16(mask, base, value, vl); +void test_vse16_v_i16m8_m(vbool2_t vm, int16_t *rs1, vint16m8_t vs3, size_t vl) { + return __riscv_vse16(vm, rs1, vs3, vl); } -void test_vse16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t value, size_t vl) { - return __riscv_vse16(mask, base, value, vl); +void test_vse16_v_u16mf4_m(vbool64_t vm, uint16_t *rs1, vuint16mf4_t vs3, size_t vl) { + return __riscv_vse16(vm, rs1, vs3, vl); } -void test_vse16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t value, size_t vl) { - return __riscv_vse16(mask, base, value, vl); +void test_vse16_v_u16mf2_m(vbool32_t vm, uint16_t *rs1, vuint16mf2_t vs3, size_t vl) { + return __riscv_vse16(vm, rs1, vs3, vl); } -void test_vse16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t value, size_t vl) { - return __riscv_vse16(mask, base, value, vl); +void test_vse16_v_u16m1_m(vbool16_t vm, uint16_t *rs1, vuint16m1_t vs3, size_t vl) { + return __riscv_vse16(vm, rs1, vs3, vl); } -void test_vse16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t value, size_t vl) { - return __riscv_vse16(mask, base, value, vl); +void test_vse16_v_u16m2_m(vbool8_t vm, uint16_t *rs1, vuint16m2_t vs3, size_t vl) { + return __riscv_vse16(vm, rs1, vs3, vl); } -void test_vse16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t value, size_t vl) { - return __riscv_vse16(mask, base, value, vl); +void test_vse16_v_u16m4_m(vbool4_t vm, uint16_t *rs1, vuint16m4_t vs3, size_t vl) { + return __riscv_vse16(vm, rs1, vs3, vl); } -void test_vse16_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint16m8_t value, size_t vl) { - return __riscv_vse16(mask, base, value, vl); +void test_vse16_v_u16m8_m(vbool2_t vm, uint16_t *rs1, vuint16m8_t vs3, size_t vl) { + return __riscv_vse16(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vse16\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vse32.c b/auto-generated/gnu-overloaded-tests/vse32.c index 71180ab6c..724b1c3f5 100644 --- a/auto-generated/gnu-overloaded-tests/vse32.c +++ b/auto-generated/gnu-overloaded-tests/vse32.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vse32_v_f32mf2(float32_t *base, vfloat32mf2_t value, size_t vl) { - return __riscv_vse32(base, value, vl); +void test_vse32_v_f32mf2(float32_t *rs1, vfloat32mf2_t vs3, size_t vl) { + return __riscv_vse32(rs1, vs3, vl); } -void test_vse32_v_f32m1(float32_t *base, vfloat32m1_t value, size_t vl) { - return __riscv_vse32(base, value, vl); +void test_vse32_v_f32m1(float32_t *rs1, vfloat32m1_t vs3, size_t vl) { + return __riscv_vse32(rs1, vs3, vl); } -void test_vse32_v_f32m2(float32_t *base, vfloat32m2_t value, size_t vl) { - return __riscv_vse32(base, value, vl); +void test_vse32_v_f32m2(float32_t *rs1, vfloat32m2_t vs3, size_t vl) { + return __riscv_vse32(rs1, vs3, vl); } -void test_vse32_v_f32m4(float32_t *base, vfloat32m4_t value, size_t vl) { - return __riscv_vse32(base, value, vl); +void test_vse32_v_f32m4(float32_t *rs1, vfloat32m4_t vs3, size_t vl) { + return __riscv_vse32(rs1, vs3, vl); } -void test_vse32_v_f32m8(float32_t *base, vfloat32m8_t value, size_t vl) { - return __riscv_vse32(base, value, vl); +void test_vse32_v_f32m8(float32_t *rs1, vfloat32m8_t vs3, size_t vl) { + return __riscv_vse32(rs1, vs3, vl); } -void test_vse32_v_i32mf2(int32_t *base, vint32mf2_t value, size_t vl) { - return __riscv_vse32(base, value, vl); +void test_vse32_v_i32mf2(int32_t *rs1, vint32mf2_t vs3, size_t vl) { + return __riscv_vse32(rs1, vs3, vl); } -void test_vse32_v_i32m1(int32_t *base, vint32m1_t value, size_t vl) { - return __riscv_vse32(base, value, vl); +void test_vse32_v_i32m1(int32_t *rs1, vint32m1_t vs3, size_t vl) { + return __riscv_vse32(rs1, vs3, vl); } -void test_vse32_v_i32m2(int32_t *base, vint32m2_t value, size_t vl) { - return __riscv_vse32(base, value, vl); +void test_vse32_v_i32m2(int32_t *rs1, vint32m2_t vs3, size_t vl) { + return __riscv_vse32(rs1, vs3, vl); } -void test_vse32_v_i32m4(int32_t *base, vint32m4_t value, size_t vl) { - return __riscv_vse32(base, value, vl); +void test_vse32_v_i32m4(int32_t *rs1, vint32m4_t vs3, size_t vl) { + return __riscv_vse32(rs1, vs3, vl); } -void test_vse32_v_i32m8(int32_t *base, vint32m8_t value, size_t vl) { - return __riscv_vse32(base, value, vl); +void test_vse32_v_i32m8(int32_t *rs1, vint32m8_t vs3, size_t vl) { + return __riscv_vse32(rs1, vs3, vl); } -void test_vse32_v_u32mf2(uint32_t *base, vuint32mf2_t value, size_t vl) { - return __riscv_vse32(base, value, vl); +void test_vse32_v_u32mf2(uint32_t *rs1, vuint32mf2_t vs3, size_t vl) { + return __riscv_vse32(rs1, vs3, vl); } -void test_vse32_v_u32m1(uint32_t *base, vuint32m1_t value, size_t vl) { - return __riscv_vse32(base, value, vl); +void test_vse32_v_u32m1(uint32_t *rs1, vuint32m1_t vs3, size_t vl) { + return __riscv_vse32(rs1, vs3, vl); } -void test_vse32_v_u32m2(uint32_t *base, vuint32m2_t value, size_t vl) { - return __riscv_vse32(base, value, vl); +void test_vse32_v_u32m2(uint32_t *rs1, vuint32m2_t vs3, size_t vl) { + return __riscv_vse32(rs1, vs3, vl); } -void test_vse32_v_u32m4(uint32_t *base, vuint32m4_t value, size_t vl) { - return __riscv_vse32(base, value, vl); +void test_vse32_v_u32m4(uint32_t *rs1, vuint32m4_t vs3, size_t vl) { + return __riscv_vse32(rs1, vs3, vl); } -void test_vse32_v_u32m8(uint32_t *base, vuint32m8_t value, size_t vl) { - return __riscv_vse32(base, value, vl); +void test_vse32_v_u32m8(uint32_t *rs1, vuint32m8_t vs3, size_t vl) { + return __riscv_vse32(rs1, vs3, vl); } -void test_vse32_v_f32mf2_m(vbool64_t mask, float32_t *base, vfloat32mf2_t value, size_t vl) { - return __riscv_vse32(mask, base, value, vl); +void test_vse32_v_f32mf2_m(vbool64_t vm, float32_t *rs1, vfloat32mf2_t vs3, size_t vl) { + return __riscv_vse32(vm, rs1, vs3, vl); } -void test_vse32_v_f32m1_m(vbool32_t mask, float32_t *base, vfloat32m1_t value, size_t vl) { - return __riscv_vse32(mask, base, value, vl); +void test_vse32_v_f32m1_m(vbool32_t vm, float32_t *rs1, vfloat32m1_t vs3, size_t vl) { + return __riscv_vse32(vm, rs1, vs3, vl); } -void test_vse32_v_f32m2_m(vbool16_t mask, float32_t *base, vfloat32m2_t value, size_t vl) { - return __riscv_vse32(mask, base, value, vl); +void test_vse32_v_f32m2_m(vbool16_t vm, float32_t *rs1, vfloat32m2_t vs3, size_t vl) { + return __riscv_vse32(vm, rs1, vs3, vl); } -void test_vse32_v_f32m4_m(vbool8_t mask, float32_t *base, vfloat32m4_t value, size_t vl) { - return __riscv_vse32(mask, base, value, vl); +void test_vse32_v_f32m4_m(vbool8_t vm, float32_t *rs1, vfloat32m4_t vs3, size_t vl) { + return __riscv_vse32(vm, rs1, vs3, vl); } -void test_vse32_v_f32m8_m(vbool4_t mask, float32_t *base, vfloat32m8_t value, size_t vl) { - return __riscv_vse32(mask, base, value, vl); +void test_vse32_v_f32m8_m(vbool4_t vm, float32_t *rs1, vfloat32m8_t vs3, size_t vl) { + return __riscv_vse32(vm, rs1, vs3, vl); } -void test_vse32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t value, size_t vl) { - return __riscv_vse32(mask, base, value, vl); +void test_vse32_v_i32mf2_m(vbool64_t vm, int32_t *rs1, vint32mf2_t vs3, size_t vl) { + return __riscv_vse32(vm, rs1, vs3, vl); } -void test_vse32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t value, size_t vl) { - return __riscv_vse32(mask, base, value, vl); +void test_vse32_v_i32m1_m(vbool32_t vm, int32_t *rs1, vint32m1_t vs3, size_t vl) { + return __riscv_vse32(vm, rs1, vs3, vl); } -void test_vse32_v_i32m2_m(vbool16_t mask, int32_t *base, vint32m2_t value, size_t vl) { - return __riscv_vse32(mask, base, value, vl); +void test_vse32_v_i32m2_m(vbool16_t vm, int32_t *rs1, vint32m2_t vs3, size_t vl) { + return __riscv_vse32(vm, rs1, vs3, vl); } -void test_vse32_v_i32m4_m(vbool8_t mask, int32_t *base, vint32m4_t value, size_t vl) { - return __riscv_vse32(mask, base, value, vl); +void test_vse32_v_i32m4_m(vbool8_t vm, int32_t *rs1, vint32m4_t vs3, size_t vl) { + return __riscv_vse32(vm, rs1, vs3, vl); } -void test_vse32_v_i32m8_m(vbool4_t mask, int32_t *base, vint32m8_t value, size_t vl) { - return __riscv_vse32(mask, base, value, vl); +void test_vse32_v_i32m8_m(vbool4_t vm, int32_t *rs1, vint32m8_t vs3, size_t vl) { + return __riscv_vse32(vm, rs1, vs3, vl); } -void test_vse32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t value, size_t vl) { - return __riscv_vse32(mask, base, value, vl); +void test_vse32_v_u32mf2_m(vbool64_t vm, uint32_t *rs1, vuint32mf2_t vs3, size_t vl) { + return __riscv_vse32(vm, rs1, vs3, vl); } -void test_vse32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t value, size_t vl) { - return __riscv_vse32(mask, base, value, vl); +void test_vse32_v_u32m1_m(vbool32_t vm, uint32_t *rs1, vuint32m1_t vs3, size_t vl) { + return __riscv_vse32(vm, rs1, vs3, vl); } -void test_vse32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t value, size_t vl) { - return __riscv_vse32(mask, base, value, vl); +void test_vse32_v_u32m2_m(vbool16_t vm, uint32_t *rs1, vuint32m2_t vs3, size_t vl) { + return __riscv_vse32(vm, rs1, vs3, vl); } -void test_vse32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t value, size_t vl) { - return __riscv_vse32(mask, base, value, vl); +void test_vse32_v_u32m4_m(vbool8_t vm, uint32_t *rs1, vuint32m4_t vs3, size_t vl) { + return __riscv_vse32(vm, rs1, vs3, vl); } -void test_vse32_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint32m8_t value, size_t vl) { - return __riscv_vse32(mask, base, value, vl); +void test_vse32_v_u32m8_m(vbool4_t vm, uint32_t *rs1, vuint32m8_t vs3, size_t vl) { + return __riscv_vse32(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vse32\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vse64.c b/auto-generated/gnu-overloaded-tests/vse64.c index 993e65104..023acb2d8 100644 --- a/auto-generated/gnu-overloaded-tests/vse64.c +++ b/auto-generated/gnu-overloaded-tests/vse64.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vse64_v_f64m1(float64_t *base, vfloat64m1_t value, size_t vl) { - return __riscv_vse64(base, value, vl); +void test_vse64_v_f64m1(float64_t *rs1, vfloat64m1_t vs3, size_t vl) { + return __riscv_vse64(rs1, vs3, vl); } -void test_vse64_v_f64m2(float64_t *base, vfloat64m2_t value, size_t vl) { - return __riscv_vse64(base, value, vl); +void test_vse64_v_f64m2(float64_t *rs1, vfloat64m2_t vs3, size_t vl) { + return __riscv_vse64(rs1, vs3, vl); } -void test_vse64_v_f64m4(float64_t *base, vfloat64m4_t value, size_t vl) { - return __riscv_vse64(base, value, vl); +void test_vse64_v_f64m4(float64_t *rs1, vfloat64m4_t vs3, size_t vl) { + return __riscv_vse64(rs1, vs3, vl); } -void test_vse64_v_f64m8(float64_t *base, vfloat64m8_t value, size_t vl) { - return __riscv_vse64(base, value, vl); +void test_vse64_v_f64m8(float64_t *rs1, vfloat64m8_t vs3, size_t vl) { + return __riscv_vse64(rs1, vs3, vl); } -void test_vse64_v_i64m1(int64_t *base, vint64m1_t value, size_t vl) { - return __riscv_vse64(base, value, vl); +void test_vse64_v_i64m1(int64_t *rs1, vint64m1_t vs3, size_t vl) { + return __riscv_vse64(rs1, vs3, vl); } -void test_vse64_v_i64m2(int64_t *base, vint64m2_t value, size_t vl) { - return __riscv_vse64(base, value, vl); +void test_vse64_v_i64m2(int64_t *rs1, vint64m2_t vs3, size_t vl) { + return __riscv_vse64(rs1, vs3, vl); } -void test_vse64_v_i64m4(int64_t *base, vint64m4_t value, size_t vl) { - return __riscv_vse64(base, value, vl); +void test_vse64_v_i64m4(int64_t *rs1, vint64m4_t vs3, size_t vl) { + return __riscv_vse64(rs1, vs3, vl); } -void test_vse64_v_i64m8(int64_t *base, vint64m8_t value, size_t vl) { - return __riscv_vse64(base, value, vl); +void test_vse64_v_i64m8(int64_t *rs1, vint64m8_t vs3, size_t vl) { + return __riscv_vse64(rs1, vs3, vl); } -void test_vse64_v_u64m1(uint64_t *base, vuint64m1_t value, size_t vl) { - return __riscv_vse64(base, value, vl); +void test_vse64_v_u64m1(uint64_t *rs1, vuint64m1_t vs3, size_t vl) { + return __riscv_vse64(rs1, vs3, vl); } -void test_vse64_v_u64m2(uint64_t *base, vuint64m2_t value, size_t vl) { - return __riscv_vse64(base, value, vl); +void test_vse64_v_u64m2(uint64_t *rs1, vuint64m2_t vs3, size_t vl) { + return __riscv_vse64(rs1, vs3, vl); } -void test_vse64_v_u64m4(uint64_t *base, vuint64m4_t value, size_t vl) { - return __riscv_vse64(base, value, vl); +void test_vse64_v_u64m4(uint64_t *rs1, vuint64m4_t vs3, size_t vl) { + return __riscv_vse64(rs1, vs3, vl); } -void test_vse64_v_u64m8(uint64_t *base, vuint64m8_t value, size_t vl) { - return __riscv_vse64(base, value, vl); +void test_vse64_v_u64m8(uint64_t *rs1, vuint64m8_t vs3, size_t vl) { + return __riscv_vse64(rs1, vs3, vl); } -void test_vse64_v_f64m1_m(vbool64_t mask, float64_t *base, vfloat64m1_t value, size_t vl) { - return __riscv_vse64(mask, base, value, vl); +void test_vse64_v_f64m1_m(vbool64_t vm, float64_t *rs1, vfloat64m1_t vs3, size_t vl) { + return __riscv_vse64(vm, rs1, vs3, vl); } -void test_vse64_v_f64m2_m(vbool32_t mask, float64_t *base, vfloat64m2_t value, size_t vl) { - return __riscv_vse64(mask, base, value, vl); +void test_vse64_v_f64m2_m(vbool32_t vm, float64_t *rs1, vfloat64m2_t vs3, size_t vl) { + return __riscv_vse64(vm, rs1, vs3, vl); } -void test_vse64_v_f64m4_m(vbool16_t mask, float64_t *base, vfloat64m4_t value, size_t vl) { - return __riscv_vse64(mask, base, value, vl); +void test_vse64_v_f64m4_m(vbool16_t vm, float64_t *rs1, vfloat64m4_t vs3, size_t vl) { + return __riscv_vse64(vm, rs1, vs3, vl); } -void test_vse64_v_f64m8_m(vbool8_t mask, float64_t *base, vfloat64m8_t value, size_t vl) { - return __riscv_vse64(mask, base, value, vl); +void test_vse64_v_f64m8_m(vbool8_t vm, float64_t *rs1, vfloat64m8_t vs3, size_t vl) { + return __riscv_vse64(vm, rs1, vs3, vl); } -void test_vse64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t value, size_t vl) { - return __riscv_vse64(mask, base, value, vl); +void test_vse64_v_i64m1_m(vbool64_t vm, int64_t *rs1, vint64m1_t vs3, size_t vl) { + return __riscv_vse64(vm, rs1, vs3, vl); } -void test_vse64_v_i64m2_m(vbool32_t mask, int64_t *base, vint64m2_t value, size_t vl) { - return __riscv_vse64(mask, base, value, vl); +void test_vse64_v_i64m2_m(vbool32_t vm, int64_t *rs1, vint64m2_t vs3, size_t vl) { + return __riscv_vse64(vm, rs1, vs3, vl); } -void test_vse64_v_i64m4_m(vbool16_t mask, int64_t *base, vint64m4_t value, size_t vl) { - return __riscv_vse64(mask, base, value, vl); +void test_vse64_v_i64m4_m(vbool16_t vm, int64_t *rs1, vint64m4_t vs3, size_t vl) { + return __riscv_vse64(vm, rs1, vs3, vl); } -void test_vse64_v_i64m8_m(vbool8_t mask, int64_t *base, vint64m8_t value, size_t vl) { - return __riscv_vse64(mask, base, value, vl); +void test_vse64_v_i64m8_m(vbool8_t vm, int64_t *rs1, vint64m8_t vs3, size_t vl) { + return __riscv_vse64(vm, rs1, vs3, vl); } -void test_vse64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t value, size_t vl) { - return __riscv_vse64(mask, base, value, vl); +void test_vse64_v_u64m1_m(vbool64_t vm, uint64_t *rs1, vuint64m1_t vs3, size_t vl) { + return __riscv_vse64(vm, rs1, vs3, vl); } -void test_vse64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t value, size_t vl) { - return __riscv_vse64(mask, base, value, vl); +void test_vse64_v_u64m2_m(vbool32_t vm, uint64_t *rs1, vuint64m2_t vs3, size_t vl) { + return __riscv_vse64(vm, rs1, vs3, vl); } -void test_vse64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t value, size_t vl) { - return __riscv_vse64(mask, base, value, vl); +void test_vse64_v_u64m4_m(vbool16_t vm, uint64_t *rs1, vuint64m4_t vs3, size_t vl) { + return __riscv_vse64(vm, rs1, vs3, vl); } -void test_vse64_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint64m8_t value, size_t vl) { - return __riscv_vse64(mask, base, value, vl); +void test_vse64_v_u64m8_m(vbool8_t vm, uint64_t *rs1, vuint64m8_t vs3, size_t vl) { + return __riscv_vse64(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vse64\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vse8.c b/auto-generated/gnu-overloaded-tests/vse8.c index 34c8e8b68..5bc331696 100644 --- a/auto-generated/gnu-overloaded-tests/vse8.c +++ b/auto-generated/gnu-overloaded-tests/vse8.c @@ -6,116 +6,116 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vse8_v_i8mf8(int8_t *base, vint8mf8_t value, size_t vl) { - return __riscv_vse8(base, value, vl); +void test_vse8_v_i8mf8(int8_t *rs1, vint8mf8_t vs3, size_t vl) { + return __riscv_vse8(rs1, vs3, vl); } -void test_vse8_v_i8mf4(int8_t *base, vint8mf4_t value, size_t vl) { - return __riscv_vse8(base, value, vl); +void test_vse8_v_i8mf4(int8_t *rs1, vint8mf4_t vs3, size_t vl) { + return __riscv_vse8(rs1, vs3, vl); } -void test_vse8_v_i8mf2(int8_t *base, vint8mf2_t value, size_t vl) { - return __riscv_vse8(base, value, vl); +void test_vse8_v_i8mf2(int8_t *rs1, vint8mf2_t vs3, size_t vl) { + return __riscv_vse8(rs1, vs3, vl); } -void test_vse8_v_i8m1(int8_t *base, vint8m1_t value, size_t vl) { - return __riscv_vse8(base, value, vl); +void test_vse8_v_i8m1(int8_t *rs1, vint8m1_t vs3, size_t vl) { + return __riscv_vse8(rs1, vs3, vl); } -void test_vse8_v_i8m2(int8_t *base, vint8m2_t value, size_t vl) { - return __riscv_vse8(base, value, vl); +void test_vse8_v_i8m2(int8_t *rs1, vint8m2_t vs3, size_t vl) { + return __riscv_vse8(rs1, vs3, vl); } -void test_vse8_v_i8m4(int8_t *base, vint8m4_t value, size_t vl) { - return __riscv_vse8(base, value, vl); +void test_vse8_v_i8m4(int8_t *rs1, vint8m4_t vs3, size_t vl) { + return __riscv_vse8(rs1, vs3, vl); } -void test_vse8_v_i8m8(int8_t *base, vint8m8_t value, size_t vl) { - return __riscv_vse8(base, value, vl); +void test_vse8_v_i8m8(int8_t *rs1, vint8m8_t vs3, size_t vl) { + return __riscv_vse8(rs1, vs3, vl); } -void test_vse8_v_u8mf8(uint8_t *base, vuint8mf8_t value, size_t vl) { - return __riscv_vse8(base, value, vl); +void test_vse8_v_u8mf8(uint8_t *rs1, vuint8mf8_t vs3, size_t vl) { + return __riscv_vse8(rs1, vs3, vl); } -void test_vse8_v_u8mf4(uint8_t *base, vuint8mf4_t value, size_t vl) { - return __riscv_vse8(base, value, vl); +void test_vse8_v_u8mf4(uint8_t *rs1, vuint8mf4_t vs3, size_t vl) { + return __riscv_vse8(rs1, vs3, vl); } -void test_vse8_v_u8mf2(uint8_t *base, vuint8mf2_t value, size_t vl) { - return __riscv_vse8(base, value, vl); +void test_vse8_v_u8mf2(uint8_t *rs1, vuint8mf2_t vs3, size_t vl) { + return __riscv_vse8(rs1, vs3, vl); } -void test_vse8_v_u8m1(uint8_t *base, vuint8m1_t value, size_t vl) { - return __riscv_vse8(base, value, vl); +void test_vse8_v_u8m1(uint8_t *rs1, vuint8m1_t vs3, size_t vl) { + return __riscv_vse8(rs1, vs3, vl); } -void test_vse8_v_u8m2(uint8_t *base, vuint8m2_t value, size_t vl) { - return __riscv_vse8(base, value, vl); +void test_vse8_v_u8m2(uint8_t *rs1, vuint8m2_t vs3, size_t vl) { + return __riscv_vse8(rs1, vs3, vl); } -void test_vse8_v_u8m4(uint8_t *base, vuint8m4_t value, size_t vl) { - return __riscv_vse8(base, value, vl); +void test_vse8_v_u8m4(uint8_t *rs1, vuint8m4_t vs3, size_t vl) { + return __riscv_vse8(rs1, vs3, vl); } -void test_vse8_v_u8m8(uint8_t *base, vuint8m8_t value, size_t vl) { - return __riscv_vse8(base, value, vl); +void test_vse8_v_u8m8(uint8_t *rs1, vuint8m8_t vs3, size_t vl) { + return __riscv_vse8(rs1, vs3, vl); } -void test_vse8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t value, size_t vl) { - return __riscv_vse8(mask, base, value, vl); +void test_vse8_v_i8mf8_m(vbool64_t vm, int8_t *rs1, vint8mf8_t vs3, size_t vl) { + return __riscv_vse8(vm, rs1, vs3, vl); } -void test_vse8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t value, size_t vl) { - return __riscv_vse8(mask, base, value, vl); +void test_vse8_v_i8mf4_m(vbool32_t vm, int8_t *rs1, vint8mf4_t vs3, size_t vl) { + return __riscv_vse8(vm, rs1, vs3, vl); } -void test_vse8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t value, size_t vl) { - return __riscv_vse8(mask, base, value, vl); +void test_vse8_v_i8mf2_m(vbool16_t vm, int8_t *rs1, vint8mf2_t vs3, size_t vl) { + return __riscv_vse8(vm, rs1, vs3, vl); } -void test_vse8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t value, size_t vl) { - return __riscv_vse8(mask, base, value, vl); +void test_vse8_v_i8m1_m(vbool8_t vm, int8_t *rs1, vint8m1_t vs3, size_t vl) { + return __riscv_vse8(vm, rs1, vs3, vl); } -void test_vse8_v_i8m2_m(vbool4_t mask, int8_t *base, vint8m2_t value, size_t vl) { - return __riscv_vse8(mask, base, value, vl); +void test_vse8_v_i8m2_m(vbool4_t vm, int8_t *rs1, vint8m2_t vs3, size_t vl) { + return __riscv_vse8(vm, rs1, vs3, vl); } -void test_vse8_v_i8m4_m(vbool2_t mask, int8_t *base, vint8m4_t value, size_t vl) { - return __riscv_vse8(mask, base, value, vl); +void test_vse8_v_i8m4_m(vbool2_t vm, int8_t *rs1, vint8m4_t vs3, size_t vl) { + return __riscv_vse8(vm, rs1, vs3, vl); } -void test_vse8_v_i8m8_m(vbool1_t mask, int8_t *base, vint8m8_t value, size_t vl) { - return __riscv_vse8(mask, base, value, vl); +void test_vse8_v_i8m8_m(vbool1_t vm, int8_t *rs1, vint8m8_t vs3, size_t vl) { + return __riscv_vse8(vm, rs1, vs3, vl); } -void test_vse8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t value, size_t vl) { - return __riscv_vse8(mask, base, value, vl); +void test_vse8_v_u8mf8_m(vbool64_t vm, uint8_t *rs1, vuint8mf8_t vs3, size_t vl) { + return __riscv_vse8(vm, rs1, vs3, vl); } -void test_vse8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t value, size_t vl) { - return __riscv_vse8(mask, base, value, vl); +void test_vse8_v_u8mf4_m(vbool32_t vm, uint8_t *rs1, vuint8mf4_t vs3, size_t vl) { + return __riscv_vse8(vm, rs1, vs3, vl); } -void test_vse8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t value, size_t vl) { - return __riscv_vse8(mask, base, value, vl); +void test_vse8_v_u8mf2_m(vbool16_t vm, uint8_t *rs1, vuint8mf2_t vs3, size_t vl) { + return __riscv_vse8(vm, rs1, vs3, vl); } -void test_vse8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t value, size_t vl) { - return __riscv_vse8(mask, base, value, vl); +void test_vse8_v_u8m1_m(vbool8_t vm, uint8_t *rs1, vuint8m1_t vs3, size_t vl) { + return __riscv_vse8(vm, rs1, vs3, vl); } -void test_vse8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t value, size_t vl) { - return __riscv_vse8(mask, base, value, vl); +void test_vse8_v_u8m2_m(vbool4_t vm, uint8_t *rs1, vuint8m2_t vs3, size_t vl) { + return __riscv_vse8(vm, rs1, vs3, vl); } -void test_vse8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t value, size_t vl) { - return __riscv_vse8(mask, base, value, vl); +void test_vse8_v_u8m4_m(vbool2_t vm, uint8_t *rs1, vuint8m4_t vs3, size_t vl) { + return __riscv_vse8(vm, rs1, vs3, vl); } -void test_vse8_v_u8m8_m(vbool1_t mask, uint8_t *base, vuint8m8_t value, size_t vl) { - return __riscv_vse8(mask, base, value, vl); +void test_vse8_v_u8m8_m(vbool1_t vm, uint8_t *rs1, vuint8m8_t vs3, size_t vl) { + return __riscv_vse8(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vse8\.[ivxfswum.]+\s+} 28 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vset.c b/auto-generated/gnu-overloaded-tests/vset.c index 87b6e6b85..5d9df76fe 100644 --- a/auto-generated/gnu-overloaded-tests/vset.c +++ b/auto-generated/gnu-overloaded-tests/vset.c @@ -6,1172 +6,1172 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16m2_t test_vset_v_f16m1_f16m2(vfloat16m2_t dest, size_t index, vfloat16m1_t val) { - return __riscv_vset(dest, 0, val); +vfloat16m2_t test_vset_v_f16m1_f16m2(vfloat16m2_t dest, size_t index, vfloat16m1_t value) { + return __riscv_vset(dest, 0, value); } -vfloat16m4_t test_vset_v_f16m1_f16m4(vfloat16m4_t dest, size_t index, vfloat16m1_t val) { - return __riscv_vset(dest, 0, val); +vfloat16m4_t test_vset_v_f16m1_f16m4(vfloat16m4_t dest, size_t index, vfloat16m1_t value) { + return __riscv_vset(dest, 0, value); } -vfloat16m4_t test_vset_v_f16m2_f16m4(vfloat16m4_t dest, size_t index, vfloat16m2_t val) { - return __riscv_vset(dest, 0, val); +vfloat16m4_t test_vset_v_f16m2_f16m4(vfloat16m4_t dest, size_t index, vfloat16m2_t value) { + return __riscv_vset(dest, 0, value); } -vfloat16m8_t test_vset_v_f16m1_f16m8(vfloat16m8_t dest, size_t index, vfloat16m1_t val) { - return __riscv_vset(dest, 0, val); +vfloat16m8_t test_vset_v_f16m1_f16m8(vfloat16m8_t dest, size_t index, vfloat16m1_t value) { + return __riscv_vset(dest, 0, value); } -vfloat16m8_t test_vset_v_f16m2_f16m8(vfloat16m8_t dest, size_t index, vfloat16m2_t val) { - return __riscv_vset(dest, 0, val); +vfloat16m8_t test_vset_v_f16m2_f16m8(vfloat16m8_t dest, size_t index, vfloat16m2_t value) { + return __riscv_vset(dest, 0, value); } -vfloat16m8_t test_vset_v_f16m4_f16m8(vfloat16m8_t dest, size_t index, vfloat16m4_t val) { - return __riscv_vset(dest, 0, val); +vfloat16m8_t test_vset_v_f16m4_f16m8(vfloat16m8_t dest, size_t index, vfloat16m4_t value) { + return __riscv_vset(dest, 0, value); } -vfloat32m2_t test_vset_v_f32m1_f32m2(vfloat32m2_t dest, size_t index, vfloat32m1_t val) { - return __riscv_vset(dest, 0, val); +vfloat32m2_t test_vset_v_f32m1_f32m2(vfloat32m2_t dest, size_t index, vfloat32m1_t value) { + return __riscv_vset(dest, 0, value); } -vfloat32m4_t test_vset_v_f32m1_f32m4(vfloat32m4_t dest, size_t index, vfloat32m1_t val) { - return __riscv_vset(dest, 0, val); +vfloat32m4_t test_vset_v_f32m1_f32m4(vfloat32m4_t dest, size_t index, vfloat32m1_t value) { + return __riscv_vset(dest, 0, value); } -vfloat32m4_t test_vset_v_f32m2_f32m4(vfloat32m4_t dest, size_t index, vfloat32m2_t val) { - return __riscv_vset(dest, 0, val); +vfloat32m4_t test_vset_v_f32m2_f32m4(vfloat32m4_t dest, size_t index, vfloat32m2_t value) { + return __riscv_vset(dest, 0, value); } -vfloat32m8_t test_vset_v_f32m1_f32m8(vfloat32m8_t dest, size_t index, vfloat32m1_t val) { - return __riscv_vset(dest, 0, val); +vfloat32m8_t test_vset_v_f32m1_f32m8(vfloat32m8_t dest, size_t index, vfloat32m1_t value) { + return __riscv_vset(dest, 0, value); } -vfloat32m8_t test_vset_v_f32m2_f32m8(vfloat32m8_t dest, size_t index, vfloat32m2_t val) { - return __riscv_vset(dest, 0, val); +vfloat32m8_t test_vset_v_f32m2_f32m8(vfloat32m8_t dest, size_t index, vfloat32m2_t value) { + return __riscv_vset(dest, 0, value); } -vfloat32m8_t test_vset_v_f32m4_f32m8(vfloat32m8_t dest, size_t index, vfloat32m4_t val) { - return __riscv_vset(dest, 0, val); +vfloat32m8_t test_vset_v_f32m4_f32m8(vfloat32m8_t dest, size_t index, vfloat32m4_t value) { + return __riscv_vset(dest, 0, value); } -vfloat64m2_t test_vset_v_f64m1_f64m2(vfloat64m2_t dest, size_t index, vfloat64m1_t val) { - return __riscv_vset(dest, 0, val); +vfloat64m2_t test_vset_v_f64m1_f64m2(vfloat64m2_t dest, size_t index, vfloat64m1_t value) { + return __riscv_vset(dest, 0, value); } -vfloat64m4_t test_vset_v_f64m1_f64m4(vfloat64m4_t dest, size_t index, vfloat64m1_t val) { - return __riscv_vset(dest, 0, val); +vfloat64m4_t test_vset_v_f64m1_f64m4(vfloat64m4_t dest, size_t index, vfloat64m1_t value) { + return __riscv_vset(dest, 0, value); } -vfloat64m4_t test_vset_v_f64m2_f64m4(vfloat64m4_t dest, size_t index, vfloat64m2_t val) { - return __riscv_vset(dest, 0, val); +vfloat64m4_t test_vset_v_f64m2_f64m4(vfloat64m4_t dest, size_t index, vfloat64m2_t value) { + return __riscv_vset(dest, 0, value); } -vfloat64m8_t test_vset_v_f64m1_f64m8(vfloat64m8_t dest, size_t index, vfloat64m1_t val) { - return __riscv_vset(dest, 0, val); +vfloat64m8_t test_vset_v_f64m1_f64m8(vfloat64m8_t dest, size_t index, vfloat64m1_t value) { + return __riscv_vset(dest, 0, value); } -vfloat64m8_t test_vset_v_f64m2_f64m8(vfloat64m8_t dest, size_t index, vfloat64m2_t val) { - return __riscv_vset(dest, 0, val); +vfloat64m8_t test_vset_v_f64m2_f64m8(vfloat64m8_t dest, size_t index, vfloat64m2_t value) { + return __riscv_vset(dest, 0, value); } -vfloat64m8_t test_vset_v_f64m4_f64m8(vfloat64m8_t dest, size_t index, vfloat64m4_t val) { - return __riscv_vset(dest, 0, val); +vfloat64m8_t test_vset_v_f64m4_f64m8(vfloat64m8_t dest, size_t index, vfloat64m4_t value) { + return __riscv_vset(dest, 0, value); } -vint8m2_t test_vset_v_i8m1_i8m2(vint8m2_t dest, size_t index, vint8m1_t val) { - return __riscv_vset(dest, 0, val); +vint8m2_t test_vset_v_i8m1_i8m2(vint8m2_t dest, size_t index, vint8m1_t value) { + return __riscv_vset(dest, 0, value); } -vint8m4_t test_vset_v_i8m1_i8m4(vint8m4_t dest, size_t index, vint8m1_t val) { - return __riscv_vset(dest, 0, val); +vint8m4_t test_vset_v_i8m1_i8m4(vint8m4_t dest, size_t index, vint8m1_t value) { + return __riscv_vset(dest, 0, value); } -vint8m4_t test_vset_v_i8m2_i8m4(vint8m4_t dest, size_t index, vint8m2_t val) { - return __riscv_vset(dest, 0, val); +vint8m4_t test_vset_v_i8m2_i8m4(vint8m4_t dest, size_t index, vint8m2_t value) { + return __riscv_vset(dest, 0, value); } -vint8m8_t test_vset_v_i8m1_i8m8(vint8m8_t dest, size_t index, vint8m1_t val) { - return __riscv_vset(dest, 0, val); +vint8m8_t test_vset_v_i8m1_i8m8(vint8m8_t dest, size_t index, vint8m1_t value) { + return __riscv_vset(dest, 0, value); } -vint8m8_t test_vset_v_i8m2_i8m8(vint8m8_t dest, size_t index, vint8m2_t val) { - return __riscv_vset(dest, 0, val); +vint8m8_t test_vset_v_i8m2_i8m8(vint8m8_t dest, size_t index, vint8m2_t value) { + return __riscv_vset(dest, 0, value); } -vint8m8_t test_vset_v_i8m4_i8m8(vint8m8_t dest, size_t index, vint8m4_t val) { - return __riscv_vset(dest, 0, val); +vint8m8_t test_vset_v_i8m4_i8m8(vint8m8_t dest, size_t index, vint8m4_t value) { + return __riscv_vset(dest, 0, value); } -vint16m2_t test_vset_v_i16m1_i16m2(vint16m2_t dest, size_t index, vint16m1_t val) { - return __riscv_vset(dest, 0, val); +vint16m2_t test_vset_v_i16m1_i16m2(vint16m2_t dest, size_t index, vint16m1_t value) { + return __riscv_vset(dest, 0, value); } -vint16m4_t test_vset_v_i16m1_i16m4(vint16m4_t dest, size_t index, vint16m1_t val) { - return __riscv_vset(dest, 0, val); +vint16m4_t test_vset_v_i16m1_i16m4(vint16m4_t dest, size_t index, vint16m1_t value) { + return __riscv_vset(dest, 0, value); } -vint16m4_t test_vset_v_i16m2_i16m4(vint16m4_t dest, size_t index, vint16m2_t val) { - return __riscv_vset(dest, 0, val); +vint16m4_t test_vset_v_i16m2_i16m4(vint16m4_t dest, size_t index, vint16m2_t value) { + return __riscv_vset(dest, 0, value); } -vint16m8_t test_vset_v_i16m1_i16m8(vint16m8_t dest, size_t index, vint16m1_t val) { - return __riscv_vset(dest, 0, val); +vint16m8_t test_vset_v_i16m1_i16m8(vint16m8_t dest, size_t index, vint16m1_t value) { + return __riscv_vset(dest, 0, value); } -vint16m8_t test_vset_v_i16m2_i16m8(vint16m8_t dest, size_t index, vint16m2_t val) { - return __riscv_vset(dest, 0, val); +vint16m8_t test_vset_v_i16m2_i16m8(vint16m8_t dest, size_t index, vint16m2_t value) { + return __riscv_vset(dest, 0, value); } -vint16m8_t test_vset_v_i16m4_i16m8(vint16m8_t dest, size_t index, vint16m4_t val) { - return __riscv_vset(dest, 0, val); +vint16m8_t test_vset_v_i16m4_i16m8(vint16m8_t dest, size_t index, vint16m4_t value) { + return __riscv_vset(dest, 0, value); } -vint32m2_t test_vset_v_i32m1_i32m2(vint32m2_t dest, size_t index, vint32m1_t val) { - return __riscv_vset(dest, 0, val); +vint32m2_t test_vset_v_i32m1_i32m2(vint32m2_t dest, size_t index, vint32m1_t value) { + return __riscv_vset(dest, 0, value); } -vint32m4_t test_vset_v_i32m1_i32m4(vint32m4_t dest, size_t index, vint32m1_t val) { - return __riscv_vset(dest, 0, val); +vint32m4_t test_vset_v_i32m1_i32m4(vint32m4_t dest, size_t index, vint32m1_t value) { + return __riscv_vset(dest, 0, value); } -vint32m4_t test_vset_v_i32m2_i32m4(vint32m4_t dest, size_t index, vint32m2_t val) { - return __riscv_vset(dest, 0, val); +vint32m4_t test_vset_v_i32m2_i32m4(vint32m4_t dest, size_t index, vint32m2_t value) { + return __riscv_vset(dest, 0, value); } -vint32m8_t test_vset_v_i32m1_i32m8(vint32m8_t dest, size_t index, vint32m1_t val) { - return __riscv_vset(dest, 0, val); +vint32m8_t test_vset_v_i32m1_i32m8(vint32m8_t dest, size_t index, vint32m1_t value) { + return __riscv_vset(dest, 0, value); } -vint32m8_t test_vset_v_i32m2_i32m8(vint32m8_t dest, size_t index, vint32m2_t val) { - return __riscv_vset(dest, 0, val); +vint32m8_t test_vset_v_i32m2_i32m8(vint32m8_t dest, size_t index, vint32m2_t value) { + return __riscv_vset(dest, 0, value); } -vint32m8_t test_vset_v_i32m4_i32m8(vint32m8_t dest, size_t index, vint32m4_t val) { - return __riscv_vset(dest, 0, val); +vint32m8_t test_vset_v_i32m4_i32m8(vint32m8_t dest, size_t index, vint32m4_t value) { + return __riscv_vset(dest, 0, value); } -vint64m2_t test_vset_v_i64m1_i64m2(vint64m2_t dest, size_t index, vint64m1_t val) { - return __riscv_vset(dest, 0, val); +vint64m2_t test_vset_v_i64m1_i64m2(vint64m2_t dest, size_t index, vint64m1_t value) { + return __riscv_vset(dest, 0, value); } -vint64m4_t test_vset_v_i64m1_i64m4(vint64m4_t dest, size_t index, vint64m1_t val) { - return __riscv_vset(dest, 0, val); +vint64m4_t test_vset_v_i64m1_i64m4(vint64m4_t dest, size_t index, vint64m1_t value) { + return __riscv_vset(dest, 0, value); } -vint64m4_t test_vset_v_i64m2_i64m4(vint64m4_t dest, size_t index, vint64m2_t val) { - return __riscv_vset(dest, 0, val); +vint64m4_t test_vset_v_i64m2_i64m4(vint64m4_t dest, size_t index, vint64m2_t value) { + return __riscv_vset(dest, 0, value); } -vint64m8_t test_vset_v_i64m1_i64m8(vint64m8_t dest, size_t index, vint64m1_t val) { - return __riscv_vset(dest, 0, val); +vint64m8_t test_vset_v_i64m1_i64m8(vint64m8_t dest, size_t index, vint64m1_t value) { + return __riscv_vset(dest, 0, value); } -vint64m8_t test_vset_v_i64m2_i64m8(vint64m8_t dest, size_t index, vint64m2_t val) { - return __riscv_vset(dest, 0, val); +vint64m8_t test_vset_v_i64m2_i64m8(vint64m8_t dest, size_t index, vint64m2_t value) { + return __riscv_vset(dest, 0, value); } -vint64m8_t test_vset_v_i64m4_i64m8(vint64m8_t dest, size_t index, vint64m4_t val) { - return __riscv_vset(dest, 0, val); +vint64m8_t test_vset_v_i64m4_i64m8(vint64m8_t dest, size_t index, vint64m4_t value) { + return __riscv_vset(dest, 0, value); } -vuint8m2_t test_vset_v_u8m1_u8m2(vuint8m2_t dest, size_t index, vuint8m1_t val) { - return __riscv_vset(dest, 0, val); +vuint8m2_t test_vset_v_u8m1_u8m2(vuint8m2_t dest, size_t index, vuint8m1_t value) { + return __riscv_vset(dest, 0, value); } -vuint8m4_t test_vset_v_u8m1_u8m4(vuint8m4_t dest, size_t index, vuint8m1_t val) { - return __riscv_vset(dest, 0, val); +vuint8m4_t test_vset_v_u8m1_u8m4(vuint8m4_t dest, size_t index, vuint8m1_t value) { + return __riscv_vset(dest, 0, value); } -vuint8m4_t test_vset_v_u8m2_u8m4(vuint8m4_t dest, size_t index, vuint8m2_t val) { - return __riscv_vset(dest, 0, val); +vuint8m4_t test_vset_v_u8m2_u8m4(vuint8m4_t dest, size_t index, vuint8m2_t value) { + return __riscv_vset(dest, 0, value); } -vuint8m8_t test_vset_v_u8m1_u8m8(vuint8m8_t dest, size_t index, vuint8m1_t val) { - return __riscv_vset(dest, 0, val); +vuint8m8_t test_vset_v_u8m1_u8m8(vuint8m8_t dest, size_t index, vuint8m1_t value) { + return __riscv_vset(dest, 0, value); } -vuint8m8_t test_vset_v_u8m2_u8m8(vuint8m8_t dest, size_t index, vuint8m2_t val) { - return __riscv_vset(dest, 0, val); +vuint8m8_t test_vset_v_u8m2_u8m8(vuint8m8_t dest, size_t index, vuint8m2_t value) { + return __riscv_vset(dest, 0, value); } -vuint8m8_t test_vset_v_u8m4_u8m8(vuint8m8_t dest, size_t index, vuint8m4_t val) { - return __riscv_vset(dest, 0, val); +vuint8m8_t test_vset_v_u8m4_u8m8(vuint8m8_t dest, size_t index, vuint8m4_t value) { + return __riscv_vset(dest, 0, value); } -vuint16m2_t test_vset_v_u16m1_u16m2(vuint16m2_t dest, size_t index, vuint16m1_t val) { - return __riscv_vset(dest, 0, val); +vuint16m2_t test_vset_v_u16m1_u16m2(vuint16m2_t dest, size_t index, vuint16m1_t value) { + return __riscv_vset(dest, 0, value); } -vuint16m4_t test_vset_v_u16m1_u16m4(vuint16m4_t dest, size_t index, vuint16m1_t val) { - return __riscv_vset(dest, 0, val); +vuint16m4_t test_vset_v_u16m1_u16m4(vuint16m4_t dest, size_t index, vuint16m1_t value) { + return __riscv_vset(dest, 0, value); } -vuint16m4_t test_vset_v_u16m2_u16m4(vuint16m4_t dest, size_t index, vuint16m2_t val) { - return __riscv_vset(dest, 0, val); +vuint16m4_t test_vset_v_u16m2_u16m4(vuint16m4_t dest, size_t index, vuint16m2_t value) { + return __riscv_vset(dest, 0, value); } -vuint16m8_t test_vset_v_u16m1_u16m8(vuint16m8_t dest, size_t index, vuint16m1_t val) { - return __riscv_vset(dest, 0, val); +vuint16m8_t test_vset_v_u16m1_u16m8(vuint16m8_t dest, size_t index, vuint16m1_t value) { + return __riscv_vset(dest, 0, value); } -vuint16m8_t test_vset_v_u16m2_u16m8(vuint16m8_t dest, size_t index, vuint16m2_t val) { - return __riscv_vset(dest, 0, val); +vuint16m8_t test_vset_v_u16m2_u16m8(vuint16m8_t dest, size_t index, vuint16m2_t value) { + return __riscv_vset(dest, 0, value); } -vuint16m8_t test_vset_v_u16m4_u16m8(vuint16m8_t dest, size_t index, vuint16m4_t val) { - return __riscv_vset(dest, 0, val); +vuint16m8_t test_vset_v_u16m4_u16m8(vuint16m8_t dest, size_t index, vuint16m4_t value) { + return __riscv_vset(dest, 0, value); } -vuint32m2_t test_vset_v_u32m1_u32m2(vuint32m2_t dest, size_t index, vuint32m1_t val) { - return __riscv_vset(dest, 0, val); +vuint32m2_t test_vset_v_u32m1_u32m2(vuint32m2_t dest, size_t index, vuint32m1_t value) { + return __riscv_vset(dest, 0, value); } -vuint32m4_t test_vset_v_u32m1_u32m4(vuint32m4_t dest, size_t index, vuint32m1_t val) { - return __riscv_vset(dest, 0, val); +vuint32m4_t test_vset_v_u32m1_u32m4(vuint32m4_t dest, size_t index, vuint32m1_t value) { + return __riscv_vset(dest, 0, value); } -vuint32m4_t test_vset_v_u32m2_u32m4(vuint32m4_t dest, size_t index, vuint32m2_t val) { - return __riscv_vset(dest, 0, val); +vuint32m4_t test_vset_v_u32m2_u32m4(vuint32m4_t dest, size_t index, vuint32m2_t value) { + return __riscv_vset(dest, 0, value); } -vuint32m8_t test_vset_v_u32m1_u32m8(vuint32m8_t dest, size_t index, vuint32m1_t val) { - return __riscv_vset(dest, 0, val); +vuint32m8_t test_vset_v_u32m1_u32m8(vuint32m8_t dest, size_t index, vuint32m1_t value) { + return __riscv_vset(dest, 0, value); } -vuint32m8_t test_vset_v_u32m2_u32m8(vuint32m8_t dest, size_t index, vuint32m2_t val) { - return __riscv_vset(dest, 0, val); +vuint32m8_t test_vset_v_u32m2_u32m8(vuint32m8_t dest, size_t index, vuint32m2_t value) { + return __riscv_vset(dest, 0, value); } -vuint32m8_t test_vset_v_u32m4_u32m8(vuint32m8_t dest, size_t index, vuint32m4_t val) { - return __riscv_vset(dest, 0, val); +vuint32m8_t test_vset_v_u32m4_u32m8(vuint32m8_t dest, size_t index, vuint32m4_t value) { + return __riscv_vset(dest, 0, value); } -vuint64m2_t test_vset_v_u64m1_u64m2(vuint64m2_t dest, size_t index, vuint64m1_t val) { - return __riscv_vset(dest, 0, val); +vuint64m2_t test_vset_v_u64m1_u64m2(vuint64m2_t dest, size_t index, vuint64m1_t value) { + return __riscv_vset(dest, 0, value); } -vuint64m4_t test_vset_v_u64m1_u64m4(vuint64m4_t dest, size_t index, vuint64m1_t val) { - return __riscv_vset(dest, 0, val); +vuint64m4_t test_vset_v_u64m1_u64m4(vuint64m4_t dest, size_t index, vuint64m1_t value) { + return __riscv_vset(dest, 0, value); } -vuint64m4_t test_vset_v_u64m2_u64m4(vuint64m4_t dest, size_t index, vuint64m2_t val) { - return __riscv_vset(dest, 0, val); +vuint64m4_t test_vset_v_u64m2_u64m4(vuint64m4_t dest, size_t index, vuint64m2_t value) { + return __riscv_vset(dest, 0, value); } -vuint64m8_t test_vset_v_u64m1_u64m8(vuint64m8_t dest, size_t index, vuint64m1_t val) { - return __riscv_vset(dest, 0, val); +vuint64m8_t test_vset_v_u64m1_u64m8(vuint64m8_t dest, size_t index, vuint64m1_t value) { + return __riscv_vset(dest, 0, value); } -vuint64m8_t test_vset_v_u64m2_u64m8(vuint64m8_t dest, size_t index, vuint64m2_t val) { - return __riscv_vset(dest, 0, val); +vuint64m8_t test_vset_v_u64m2_u64m8(vuint64m8_t dest, size_t index, vuint64m2_t value) { + return __riscv_vset(dest, 0, value); } -vuint64m8_t test_vset_v_u64m4_u64m8(vuint64m8_t dest, size_t index, vuint64m4_t val) { - return __riscv_vset(dest, 0, val); +vuint64m8_t test_vset_v_u64m4_u64m8(vuint64m8_t dest, size_t index, vuint64m4_t value) { + return __riscv_vset(dest, 0, value); } -vfloat16mf4x2_t test_vset_v_f16mf4_f16mf4x2(vfloat16mf4x2_t dest, size_t index, vfloat16mf4_t val) { - return __riscv_vset(dest, 0, val); +vfloat16mf4x2_t test_vset_v_f16mf4_f16mf4x2(vfloat16mf4x2_t dest, size_t index, vfloat16mf4_t value) { + return __riscv_vset(dest, 0, value); } -vfloat16mf4x3_t test_vset_v_f16mf4_f16mf4x3(vfloat16mf4x3_t dest, size_t index, vfloat16mf4_t val) { - return __riscv_vset(dest, 0, val); +vfloat16mf4x3_t test_vset_v_f16mf4_f16mf4x3(vfloat16mf4x3_t dest, size_t index, vfloat16mf4_t value) { + return __riscv_vset(dest, 0, value); } -vfloat16mf4x4_t test_vset_v_f16mf4_f16mf4x4(vfloat16mf4x4_t dest, size_t index, vfloat16mf4_t val) { - return __riscv_vset(dest, 0, val); +vfloat16mf4x4_t test_vset_v_f16mf4_f16mf4x4(vfloat16mf4x4_t dest, size_t index, vfloat16mf4_t value) { + return __riscv_vset(dest, 0, value); } -vfloat16mf4x5_t test_vset_v_f16mf4_f16mf4x5(vfloat16mf4x5_t dest, size_t index, vfloat16mf4_t val) { - return __riscv_vset(dest, 0, val); +vfloat16mf4x5_t test_vset_v_f16mf4_f16mf4x5(vfloat16mf4x5_t dest, size_t index, vfloat16mf4_t value) { + return __riscv_vset(dest, 0, value); } -vfloat16mf4x6_t test_vset_v_f16mf4_f16mf4x6(vfloat16mf4x6_t dest, size_t index, vfloat16mf4_t val) { - return __riscv_vset(dest, 0, val); +vfloat16mf4x6_t test_vset_v_f16mf4_f16mf4x6(vfloat16mf4x6_t dest, size_t index, vfloat16mf4_t value) { + return __riscv_vset(dest, 0, value); } -vfloat16mf4x7_t test_vset_v_f16mf4_f16mf4x7(vfloat16mf4x7_t dest, size_t index, vfloat16mf4_t val) { - return __riscv_vset(dest, 0, val); +vfloat16mf4x7_t test_vset_v_f16mf4_f16mf4x7(vfloat16mf4x7_t dest, size_t index, vfloat16mf4_t value) { + return __riscv_vset(dest, 0, value); } -vfloat16mf4x8_t test_vset_v_f16mf4_f16mf4x8(vfloat16mf4x8_t dest, size_t index, vfloat16mf4_t val) { - return __riscv_vset(dest, 0, val); +vfloat16mf4x8_t test_vset_v_f16mf4_f16mf4x8(vfloat16mf4x8_t dest, size_t index, vfloat16mf4_t value) { + return __riscv_vset(dest, 0, value); } -vfloat16mf2x2_t test_vset_v_f16mf2_f16mf2x2(vfloat16mf2x2_t dest, size_t index, vfloat16mf2_t val) { - return __riscv_vset(dest, 0, val); +vfloat16mf2x2_t test_vset_v_f16mf2_f16mf2x2(vfloat16mf2x2_t dest, size_t index, vfloat16mf2_t value) { + return __riscv_vset(dest, 0, value); } -vfloat16mf2x3_t test_vset_v_f16mf2_f16mf2x3(vfloat16mf2x3_t dest, size_t index, vfloat16mf2_t val) { - return __riscv_vset(dest, 0, val); +vfloat16mf2x3_t test_vset_v_f16mf2_f16mf2x3(vfloat16mf2x3_t dest, size_t index, vfloat16mf2_t value) { + return __riscv_vset(dest, 0, value); } -vfloat16mf2x4_t test_vset_v_f16mf2_f16mf2x4(vfloat16mf2x4_t dest, size_t index, vfloat16mf2_t val) { - return __riscv_vset(dest, 0, val); +vfloat16mf2x4_t test_vset_v_f16mf2_f16mf2x4(vfloat16mf2x4_t dest, size_t index, vfloat16mf2_t value) { + return __riscv_vset(dest, 0, value); } -vfloat16mf2x5_t test_vset_v_f16mf2_f16mf2x5(vfloat16mf2x5_t dest, size_t index, vfloat16mf2_t val) { - return __riscv_vset(dest, 0, val); +vfloat16mf2x5_t test_vset_v_f16mf2_f16mf2x5(vfloat16mf2x5_t dest, size_t index, vfloat16mf2_t value) { + return __riscv_vset(dest, 0, value); } -vfloat16mf2x6_t test_vset_v_f16mf2_f16mf2x6(vfloat16mf2x6_t dest, size_t index, vfloat16mf2_t val) { - return __riscv_vset(dest, 0, val); +vfloat16mf2x6_t test_vset_v_f16mf2_f16mf2x6(vfloat16mf2x6_t dest, size_t index, vfloat16mf2_t value) { + return __riscv_vset(dest, 0, value); } -vfloat16mf2x7_t test_vset_v_f16mf2_f16mf2x7(vfloat16mf2x7_t dest, size_t index, vfloat16mf2_t val) { - return __riscv_vset(dest, 0, val); +vfloat16mf2x7_t test_vset_v_f16mf2_f16mf2x7(vfloat16mf2x7_t dest, size_t index, vfloat16mf2_t value) { + return __riscv_vset(dest, 0, value); } -vfloat16mf2x8_t test_vset_v_f16mf2_f16mf2x8(vfloat16mf2x8_t dest, size_t index, vfloat16mf2_t val) { - return __riscv_vset(dest, 0, val); +vfloat16mf2x8_t test_vset_v_f16mf2_f16mf2x8(vfloat16mf2x8_t dest, size_t index, vfloat16mf2_t value) { + return __riscv_vset(dest, 0, value); } -vfloat16m1x2_t test_vset_v_f16m1_f16m1x2(vfloat16m1x2_t dest, size_t index, vfloat16m1_t val) { - return __riscv_vset(dest, 0, val); +vfloat16m1x2_t test_vset_v_f16m1_f16m1x2(vfloat16m1x2_t dest, size_t index, vfloat16m1_t value) { + return __riscv_vset(dest, 0, value); } -vfloat16m1x3_t test_vset_v_f16m1_f16m1x3(vfloat16m1x3_t dest, size_t index, vfloat16m1_t val) { - return __riscv_vset(dest, 0, val); +vfloat16m1x3_t test_vset_v_f16m1_f16m1x3(vfloat16m1x3_t dest, size_t index, vfloat16m1_t value) { + return __riscv_vset(dest, 0, value); } -vfloat16m1x4_t test_vset_v_f16m1_f16m1x4(vfloat16m1x4_t dest, size_t index, vfloat16m1_t val) { - return __riscv_vset(dest, 0, val); +vfloat16m1x4_t test_vset_v_f16m1_f16m1x4(vfloat16m1x4_t dest, size_t index, vfloat16m1_t value) { + return __riscv_vset(dest, 0, value); } -vfloat16m1x5_t test_vset_v_f16m1_f16m1x5(vfloat16m1x5_t dest, size_t index, vfloat16m1_t val) { - return __riscv_vset(dest, 0, val); +vfloat16m1x5_t test_vset_v_f16m1_f16m1x5(vfloat16m1x5_t dest, size_t index, vfloat16m1_t value) { + return __riscv_vset(dest, 0, value); } -vfloat16m1x6_t test_vset_v_f16m1_f16m1x6(vfloat16m1x6_t dest, size_t index, vfloat16m1_t val) { - return __riscv_vset(dest, 0, val); +vfloat16m1x6_t test_vset_v_f16m1_f16m1x6(vfloat16m1x6_t dest, size_t index, vfloat16m1_t value) { + return __riscv_vset(dest, 0, value); } -vfloat16m1x7_t test_vset_v_f16m1_f16m1x7(vfloat16m1x7_t dest, size_t index, vfloat16m1_t val) { - return __riscv_vset(dest, 0, val); +vfloat16m1x7_t test_vset_v_f16m1_f16m1x7(vfloat16m1x7_t dest, size_t index, vfloat16m1_t value) { + return __riscv_vset(dest, 0, value); } -vfloat16m1x8_t test_vset_v_f16m1_f16m1x8(vfloat16m1x8_t dest, size_t index, vfloat16m1_t val) { - return __riscv_vset(dest, 0, val); +vfloat16m1x8_t test_vset_v_f16m1_f16m1x8(vfloat16m1x8_t dest, size_t index, vfloat16m1_t value) { + return __riscv_vset(dest, 0, value); } -vfloat16m2x2_t test_vset_v_f16m2_f16m2x2(vfloat16m2x2_t dest, size_t index, vfloat16m2_t val) { - return __riscv_vset(dest, 0, val); +vfloat16m2x2_t test_vset_v_f16m2_f16m2x2(vfloat16m2x2_t dest, size_t index, vfloat16m2_t value) { + return __riscv_vset(dest, 0, value); } -vfloat16m2x3_t test_vset_v_f16m2_f16m2x3(vfloat16m2x3_t dest, size_t index, vfloat16m2_t val) { - return __riscv_vset(dest, 0, val); +vfloat16m2x3_t test_vset_v_f16m2_f16m2x3(vfloat16m2x3_t dest, size_t index, vfloat16m2_t value) { + return __riscv_vset(dest, 0, value); } -vfloat16m2x4_t test_vset_v_f16m2_f16m2x4(vfloat16m2x4_t dest, size_t index, vfloat16m2_t val) { - return __riscv_vset(dest, 0, val); +vfloat16m2x4_t test_vset_v_f16m2_f16m2x4(vfloat16m2x4_t dest, size_t index, vfloat16m2_t value) { + return __riscv_vset(dest, 0, value); } -vfloat16m4x2_t test_vset_v_f16m4_f16m4x2(vfloat16m4x2_t dest, size_t index, vfloat16m4_t val) { - return __riscv_vset(dest, 0, val); +vfloat16m4x2_t test_vset_v_f16m4_f16m4x2(vfloat16m4x2_t dest, size_t index, vfloat16m4_t value) { + return __riscv_vset(dest, 0, value); } -vfloat32mf2x2_t test_vset_v_f32mf2_f32mf2x2(vfloat32mf2x2_t dest, size_t index, vfloat32mf2_t val) { - return __riscv_vset(dest, 0, val); +vfloat32mf2x2_t test_vset_v_f32mf2_f32mf2x2(vfloat32mf2x2_t dest, size_t index, vfloat32mf2_t value) { + return __riscv_vset(dest, 0, value); } -vfloat32mf2x3_t test_vset_v_f32mf2_f32mf2x3(vfloat32mf2x3_t dest, size_t index, vfloat32mf2_t val) { - return __riscv_vset(dest, 0, val); +vfloat32mf2x3_t test_vset_v_f32mf2_f32mf2x3(vfloat32mf2x3_t dest, size_t index, vfloat32mf2_t value) { + return __riscv_vset(dest, 0, value); } -vfloat32mf2x4_t test_vset_v_f32mf2_f32mf2x4(vfloat32mf2x4_t dest, size_t index, vfloat32mf2_t val) { - return __riscv_vset(dest, 0, val); +vfloat32mf2x4_t test_vset_v_f32mf2_f32mf2x4(vfloat32mf2x4_t dest, size_t index, vfloat32mf2_t value) { + return __riscv_vset(dest, 0, value); } -vfloat32mf2x5_t test_vset_v_f32mf2_f32mf2x5(vfloat32mf2x5_t dest, size_t index, vfloat32mf2_t val) { - return __riscv_vset(dest, 0, val); +vfloat32mf2x5_t test_vset_v_f32mf2_f32mf2x5(vfloat32mf2x5_t dest, size_t index, vfloat32mf2_t value) { + return __riscv_vset(dest, 0, value); } -vfloat32mf2x6_t test_vset_v_f32mf2_f32mf2x6(vfloat32mf2x6_t dest, size_t index, vfloat32mf2_t val) { - return __riscv_vset(dest, 0, val); +vfloat32mf2x6_t test_vset_v_f32mf2_f32mf2x6(vfloat32mf2x6_t dest, size_t index, vfloat32mf2_t value) { + return __riscv_vset(dest, 0, value); } -vfloat32mf2x7_t test_vset_v_f32mf2_f32mf2x7(vfloat32mf2x7_t dest, size_t index, vfloat32mf2_t val) { - return __riscv_vset(dest, 0, val); +vfloat32mf2x7_t test_vset_v_f32mf2_f32mf2x7(vfloat32mf2x7_t dest, size_t index, vfloat32mf2_t value) { + return __riscv_vset(dest, 0, value); } -vfloat32mf2x8_t test_vset_v_f32mf2_f32mf2x8(vfloat32mf2x8_t dest, size_t index, vfloat32mf2_t val) { - return __riscv_vset(dest, 0, val); +vfloat32mf2x8_t test_vset_v_f32mf2_f32mf2x8(vfloat32mf2x8_t dest, size_t index, vfloat32mf2_t value) { + return __riscv_vset(dest, 0, value); } -vfloat32m1x2_t test_vset_v_f32m1_f32m1x2(vfloat32m1x2_t dest, size_t index, vfloat32m1_t val) { - return __riscv_vset(dest, 0, val); +vfloat32m1x2_t test_vset_v_f32m1_f32m1x2(vfloat32m1x2_t dest, size_t index, vfloat32m1_t value) { + return __riscv_vset(dest, 0, value); } -vfloat32m1x3_t test_vset_v_f32m1_f32m1x3(vfloat32m1x3_t dest, size_t index, vfloat32m1_t val) { - return __riscv_vset(dest, 0, val); +vfloat32m1x3_t test_vset_v_f32m1_f32m1x3(vfloat32m1x3_t dest, size_t index, vfloat32m1_t value) { + return __riscv_vset(dest, 0, value); } -vfloat32m1x4_t test_vset_v_f32m1_f32m1x4(vfloat32m1x4_t dest, size_t index, vfloat32m1_t val) { - return __riscv_vset(dest, 0, val); +vfloat32m1x4_t test_vset_v_f32m1_f32m1x4(vfloat32m1x4_t dest, size_t index, vfloat32m1_t value) { + return __riscv_vset(dest, 0, value); } -vfloat32m1x5_t test_vset_v_f32m1_f32m1x5(vfloat32m1x5_t dest, size_t index, vfloat32m1_t val) { - return __riscv_vset(dest, 0, val); +vfloat32m1x5_t test_vset_v_f32m1_f32m1x5(vfloat32m1x5_t dest, size_t index, vfloat32m1_t value) { + return __riscv_vset(dest, 0, value); } -vfloat32m1x6_t test_vset_v_f32m1_f32m1x6(vfloat32m1x6_t dest, size_t index, vfloat32m1_t val) { - return __riscv_vset(dest, 0, val); +vfloat32m1x6_t test_vset_v_f32m1_f32m1x6(vfloat32m1x6_t dest, size_t index, vfloat32m1_t value) { + return __riscv_vset(dest, 0, value); } -vfloat32m1x7_t test_vset_v_f32m1_f32m1x7(vfloat32m1x7_t dest, size_t index, vfloat32m1_t val) { - return __riscv_vset(dest, 0, val); +vfloat32m1x7_t test_vset_v_f32m1_f32m1x7(vfloat32m1x7_t dest, size_t index, vfloat32m1_t value) { + return __riscv_vset(dest, 0, value); } -vfloat32m1x8_t test_vset_v_f32m1_f32m1x8(vfloat32m1x8_t dest, size_t index, vfloat32m1_t val) { - return __riscv_vset(dest, 0, val); +vfloat32m1x8_t test_vset_v_f32m1_f32m1x8(vfloat32m1x8_t dest, size_t index, vfloat32m1_t value) { + return __riscv_vset(dest, 0, value); } -vfloat32m2x2_t test_vset_v_f32m2_f32m2x2(vfloat32m2x2_t dest, size_t index, vfloat32m2_t val) { - return __riscv_vset(dest, 0, val); +vfloat32m2x2_t test_vset_v_f32m2_f32m2x2(vfloat32m2x2_t dest, size_t index, vfloat32m2_t value) { + return __riscv_vset(dest, 0, value); } -vfloat32m2x3_t test_vset_v_f32m2_f32m2x3(vfloat32m2x3_t dest, size_t index, vfloat32m2_t val) { - return __riscv_vset(dest, 0, val); +vfloat32m2x3_t test_vset_v_f32m2_f32m2x3(vfloat32m2x3_t dest, size_t index, vfloat32m2_t value) { + return __riscv_vset(dest, 0, value); } -vfloat32m2x4_t test_vset_v_f32m2_f32m2x4(vfloat32m2x4_t dest, size_t index, vfloat32m2_t val) { - return __riscv_vset(dest, 0, val); +vfloat32m2x4_t test_vset_v_f32m2_f32m2x4(vfloat32m2x4_t dest, size_t index, vfloat32m2_t value) { + return __riscv_vset(dest, 0, value); } -vfloat32m4x2_t test_vset_v_f32m4_f32m4x2(vfloat32m4x2_t dest, size_t index, vfloat32m4_t val) { - return __riscv_vset(dest, 0, val); +vfloat32m4x2_t test_vset_v_f32m4_f32m4x2(vfloat32m4x2_t dest, size_t index, vfloat32m4_t value) { + return __riscv_vset(dest, 0, value); } -vfloat64m1x2_t test_vset_v_f64m1_f64m1x2(vfloat64m1x2_t dest, size_t index, vfloat64m1_t val) { - return __riscv_vset(dest, 0, val); +vfloat64m1x2_t test_vset_v_f64m1_f64m1x2(vfloat64m1x2_t dest, size_t index, vfloat64m1_t value) { + return __riscv_vset(dest, 0, value); } -vfloat64m1x3_t test_vset_v_f64m1_f64m1x3(vfloat64m1x3_t dest, size_t index, vfloat64m1_t val) { - return __riscv_vset(dest, 0, val); +vfloat64m1x3_t test_vset_v_f64m1_f64m1x3(vfloat64m1x3_t dest, size_t index, vfloat64m1_t value) { + return __riscv_vset(dest, 0, value); } -vfloat64m1x4_t test_vset_v_f64m1_f64m1x4(vfloat64m1x4_t dest, size_t index, vfloat64m1_t val) { - return __riscv_vset(dest, 0, val); +vfloat64m1x4_t test_vset_v_f64m1_f64m1x4(vfloat64m1x4_t dest, size_t index, vfloat64m1_t value) { + return __riscv_vset(dest, 0, value); } -vfloat64m1x5_t test_vset_v_f64m1_f64m1x5(vfloat64m1x5_t dest, size_t index, vfloat64m1_t val) { - return __riscv_vset(dest, 0, val); +vfloat64m1x5_t test_vset_v_f64m1_f64m1x5(vfloat64m1x5_t dest, size_t index, vfloat64m1_t value) { + return __riscv_vset(dest, 0, value); } -vfloat64m1x6_t test_vset_v_f64m1_f64m1x6(vfloat64m1x6_t dest, size_t index, vfloat64m1_t val) { - return __riscv_vset(dest, 0, val); +vfloat64m1x6_t test_vset_v_f64m1_f64m1x6(vfloat64m1x6_t dest, size_t index, vfloat64m1_t value) { + return __riscv_vset(dest, 0, value); } -vfloat64m1x7_t test_vset_v_f64m1_f64m1x7(vfloat64m1x7_t dest, size_t index, vfloat64m1_t val) { - return __riscv_vset(dest, 0, val); +vfloat64m1x7_t test_vset_v_f64m1_f64m1x7(vfloat64m1x7_t dest, size_t index, vfloat64m1_t value) { + return __riscv_vset(dest, 0, value); } -vfloat64m1x8_t test_vset_v_f64m1_f64m1x8(vfloat64m1x8_t dest, size_t index, vfloat64m1_t val) { - return __riscv_vset(dest, 0, val); +vfloat64m1x8_t test_vset_v_f64m1_f64m1x8(vfloat64m1x8_t dest, size_t index, vfloat64m1_t value) { + return __riscv_vset(dest, 0, value); } -vfloat64m2x2_t test_vset_v_f64m2_f64m2x2(vfloat64m2x2_t dest, size_t index, vfloat64m2_t val) { - return __riscv_vset(dest, 0, val); +vfloat64m2x2_t test_vset_v_f64m2_f64m2x2(vfloat64m2x2_t dest, size_t index, vfloat64m2_t value) { + return __riscv_vset(dest, 0, value); } -vfloat64m2x3_t test_vset_v_f64m2_f64m2x3(vfloat64m2x3_t dest, size_t index, vfloat64m2_t val) { - return __riscv_vset(dest, 0, val); +vfloat64m2x3_t test_vset_v_f64m2_f64m2x3(vfloat64m2x3_t dest, size_t index, vfloat64m2_t value) { + return __riscv_vset(dest, 0, value); } -vfloat64m2x4_t test_vset_v_f64m2_f64m2x4(vfloat64m2x4_t dest, size_t index, vfloat64m2_t val) { - return __riscv_vset(dest, 0, val); +vfloat64m2x4_t test_vset_v_f64m2_f64m2x4(vfloat64m2x4_t dest, size_t index, vfloat64m2_t value) { + return __riscv_vset(dest, 0, value); } -vfloat64m4x2_t test_vset_v_f64m4_f64m4x2(vfloat64m4x2_t dest, size_t index, vfloat64m4_t val) { - return __riscv_vset(dest, 0, val); +vfloat64m4x2_t test_vset_v_f64m4_f64m4x2(vfloat64m4x2_t dest, size_t index, vfloat64m4_t value) { + return __riscv_vset(dest, 0, value); } -vint8mf8x2_t test_vset_v_i8mf8_i8mf8x2(vint8mf8x2_t dest, size_t index, vint8mf8_t val) { - return __riscv_vset(dest, 0, val); +vint8mf8x2_t test_vset_v_i8mf8_i8mf8x2(vint8mf8x2_t dest, size_t index, vint8mf8_t value) { + return __riscv_vset(dest, 0, value); } -vint8mf8x3_t test_vset_v_i8mf8_i8mf8x3(vint8mf8x3_t dest, size_t index, vint8mf8_t val) { - return __riscv_vset(dest, 0, val); +vint8mf8x3_t test_vset_v_i8mf8_i8mf8x3(vint8mf8x3_t dest, size_t index, vint8mf8_t value) { + return __riscv_vset(dest, 0, value); } -vint8mf8x4_t test_vset_v_i8mf8_i8mf8x4(vint8mf8x4_t dest, size_t index, vint8mf8_t val) { - return __riscv_vset(dest, 0, val); +vint8mf8x4_t test_vset_v_i8mf8_i8mf8x4(vint8mf8x4_t dest, size_t index, vint8mf8_t value) { + return __riscv_vset(dest, 0, value); } -vint8mf8x5_t test_vset_v_i8mf8_i8mf8x5(vint8mf8x5_t dest, size_t index, vint8mf8_t val) { - return __riscv_vset(dest, 0, val); +vint8mf8x5_t test_vset_v_i8mf8_i8mf8x5(vint8mf8x5_t dest, size_t index, vint8mf8_t value) { + return __riscv_vset(dest, 0, value); } -vint8mf8x6_t test_vset_v_i8mf8_i8mf8x6(vint8mf8x6_t dest, size_t index, vint8mf8_t val) { - return __riscv_vset(dest, 0, val); +vint8mf8x6_t test_vset_v_i8mf8_i8mf8x6(vint8mf8x6_t dest, size_t index, vint8mf8_t value) { + return __riscv_vset(dest, 0, value); } -vint8mf8x7_t test_vset_v_i8mf8_i8mf8x7(vint8mf8x7_t dest, size_t index, vint8mf8_t val) { - return __riscv_vset(dest, 0, val); +vint8mf8x7_t test_vset_v_i8mf8_i8mf8x7(vint8mf8x7_t dest, size_t index, vint8mf8_t value) { + return __riscv_vset(dest, 0, value); } -vint8mf8x8_t test_vset_v_i8mf8_i8mf8x8(vint8mf8x8_t dest, size_t index, vint8mf8_t val) { - return __riscv_vset(dest, 0, val); +vint8mf8x8_t test_vset_v_i8mf8_i8mf8x8(vint8mf8x8_t dest, size_t index, vint8mf8_t value) { + return __riscv_vset(dest, 0, value); } -vint8mf4x2_t test_vset_v_i8mf4_i8mf4x2(vint8mf4x2_t dest, size_t index, vint8mf4_t val) { - return __riscv_vset(dest, 0, val); +vint8mf4x2_t test_vset_v_i8mf4_i8mf4x2(vint8mf4x2_t dest, size_t index, vint8mf4_t value) { + return __riscv_vset(dest, 0, value); } -vint8mf4x3_t test_vset_v_i8mf4_i8mf4x3(vint8mf4x3_t dest, size_t index, vint8mf4_t val) { - return __riscv_vset(dest, 0, val); +vint8mf4x3_t test_vset_v_i8mf4_i8mf4x3(vint8mf4x3_t dest, size_t index, vint8mf4_t value) { + return __riscv_vset(dest, 0, value); } -vint8mf4x4_t test_vset_v_i8mf4_i8mf4x4(vint8mf4x4_t dest, size_t index, vint8mf4_t val) { - return __riscv_vset(dest, 0, val); +vint8mf4x4_t test_vset_v_i8mf4_i8mf4x4(vint8mf4x4_t dest, size_t index, vint8mf4_t value) { + return __riscv_vset(dest, 0, value); } -vint8mf4x5_t test_vset_v_i8mf4_i8mf4x5(vint8mf4x5_t dest, size_t index, vint8mf4_t val) { - return __riscv_vset(dest, 0, val); +vint8mf4x5_t test_vset_v_i8mf4_i8mf4x5(vint8mf4x5_t dest, size_t index, vint8mf4_t value) { + return __riscv_vset(dest, 0, value); } -vint8mf4x6_t test_vset_v_i8mf4_i8mf4x6(vint8mf4x6_t dest, size_t index, vint8mf4_t val) { - return __riscv_vset(dest, 0, val); +vint8mf4x6_t test_vset_v_i8mf4_i8mf4x6(vint8mf4x6_t dest, size_t index, vint8mf4_t value) { + return __riscv_vset(dest, 0, value); } -vint8mf4x7_t test_vset_v_i8mf4_i8mf4x7(vint8mf4x7_t dest, size_t index, vint8mf4_t val) { - return __riscv_vset(dest, 0, val); +vint8mf4x7_t test_vset_v_i8mf4_i8mf4x7(vint8mf4x7_t dest, size_t index, vint8mf4_t value) { + return __riscv_vset(dest, 0, value); } -vint8mf4x8_t test_vset_v_i8mf4_i8mf4x8(vint8mf4x8_t dest, size_t index, vint8mf4_t val) { - return __riscv_vset(dest, 0, val); +vint8mf4x8_t test_vset_v_i8mf4_i8mf4x8(vint8mf4x8_t dest, size_t index, vint8mf4_t value) { + return __riscv_vset(dest, 0, value); } -vint8mf2x2_t test_vset_v_i8mf2_i8mf2x2(vint8mf2x2_t dest, size_t index, vint8mf2_t val) { - return __riscv_vset(dest, 0, val); +vint8mf2x2_t test_vset_v_i8mf2_i8mf2x2(vint8mf2x2_t dest, size_t index, vint8mf2_t value) { + return __riscv_vset(dest, 0, value); } -vint8mf2x3_t test_vset_v_i8mf2_i8mf2x3(vint8mf2x3_t dest, size_t index, vint8mf2_t val) { - return __riscv_vset(dest, 0, val); +vint8mf2x3_t test_vset_v_i8mf2_i8mf2x3(vint8mf2x3_t dest, size_t index, vint8mf2_t value) { + return __riscv_vset(dest, 0, value); } -vint8mf2x4_t test_vset_v_i8mf2_i8mf2x4(vint8mf2x4_t dest, size_t index, vint8mf2_t val) { - return __riscv_vset(dest, 0, val); +vint8mf2x4_t test_vset_v_i8mf2_i8mf2x4(vint8mf2x4_t dest, size_t index, vint8mf2_t value) { + return __riscv_vset(dest, 0, value); } -vint8mf2x5_t test_vset_v_i8mf2_i8mf2x5(vint8mf2x5_t dest, size_t index, vint8mf2_t val) { - return __riscv_vset(dest, 0, val); +vint8mf2x5_t test_vset_v_i8mf2_i8mf2x5(vint8mf2x5_t dest, size_t index, vint8mf2_t value) { + return __riscv_vset(dest, 0, value); } -vint8mf2x6_t test_vset_v_i8mf2_i8mf2x6(vint8mf2x6_t dest, size_t index, vint8mf2_t val) { - return __riscv_vset(dest, 0, val); +vint8mf2x6_t test_vset_v_i8mf2_i8mf2x6(vint8mf2x6_t dest, size_t index, vint8mf2_t value) { + return __riscv_vset(dest, 0, value); } -vint8mf2x7_t test_vset_v_i8mf2_i8mf2x7(vint8mf2x7_t dest, size_t index, vint8mf2_t val) { - return __riscv_vset(dest, 0, val); +vint8mf2x7_t test_vset_v_i8mf2_i8mf2x7(vint8mf2x7_t dest, size_t index, vint8mf2_t value) { + return __riscv_vset(dest, 0, value); } -vint8mf2x8_t test_vset_v_i8mf2_i8mf2x8(vint8mf2x8_t dest, size_t index, vint8mf2_t val) { - return __riscv_vset(dest, 0, val); +vint8mf2x8_t test_vset_v_i8mf2_i8mf2x8(vint8mf2x8_t dest, size_t index, vint8mf2_t value) { + return __riscv_vset(dest, 0, value); } -vint8m1x2_t test_vset_v_i8m1_i8m1x2(vint8m1x2_t dest, size_t index, vint8m1_t val) { - return __riscv_vset(dest, 0, val); +vint8m1x2_t test_vset_v_i8m1_i8m1x2(vint8m1x2_t dest, size_t index, vint8m1_t value) { + return __riscv_vset(dest, 0, value); } -vint8m1x3_t test_vset_v_i8m1_i8m1x3(vint8m1x3_t dest, size_t index, vint8m1_t val) { - return __riscv_vset(dest, 0, val); +vint8m1x3_t test_vset_v_i8m1_i8m1x3(vint8m1x3_t dest, size_t index, vint8m1_t value) { + return __riscv_vset(dest, 0, value); } -vint8m1x4_t test_vset_v_i8m1_i8m1x4(vint8m1x4_t dest, size_t index, vint8m1_t val) { - return __riscv_vset(dest, 0, val); +vint8m1x4_t test_vset_v_i8m1_i8m1x4(vint8m1x4_t dest, size_t index, vint8m1_t value) { + return __riscv_vset(dest, 0, value); } -vint8m1x5_t test_vset_v_i8m1_i8m1x5(vint8m1x5_t dest, size_t index, vint8m1_t val) { - return __riscv_vset(dest, 0, val); +vint8m1x5_t test_vset_v_i8m1_i8m1x5(vint8m1x5_t dest, size_t index, vint8m1_t value) { + return __riscv_vset(dest, 0, value); } -vint8m1x6_t test_vset_v_i8m1_i8m1x6(vint8m1x6_t dest, size_t index, vint8m1_t val) { - return __riscv_vset(dest, 0, val); +vint8m1x6_t test_vset_v_i8m1_i8m1x6(vint8m1x6_t dest, size_t index, vint8m1_t value) { + return __riscv_vset(dest, 0, value); } -vint8m1x7_t test_vset_v_i8m1_i8m1x7(vint8m1x7_t dest, size_t index, vint8m1_t val) { - return __riscv_vset(dest, 0, val); +vint8m1x7_t test_vset_v_i8m1_i8m1x7(vint8m1x7_t dest, size_t index, vint8m1_t value) { + return __riscv_vset(dest, 0, value); } -vint8m1x8_t test_vset_v_i8m1_i8m1x8(vint8m1x8_t dest, size_t index, vint8m1_t val) { - return __riscv_vset(dest, 0, val); +vint8m1x8_t test_vset_v_i8m1_i8m1x8(vint8m1x8_t dest, size_t index, vint8m1_t value) { + return __riscv_vset(dest, 0, value); } -vint8m2x2_t test_vset_v_i8m2_i8m2x2(vint8m2x2_t dest, size_t index, vint8m2_t val) { - return __riscv_vset(dest, 0, val); +vint8m2x2_t test_vset_v_i8m2_i8m2x2(vint8m2x2_t dest, size_t index, vint8m2_t value) { + return __riscv_vset(dest, 0, value); } -vint8m2x3_t test_vset_v_i8m2_i8m2x3(vint8m2x3_t dest, size_t index, vint8m2_t val) { - return __riscv_vset(dest, 0, val); +vint8m2x3_t test_vset_v_i8m2_i8m2x3(vint8m2x3_t dest, size_t index, vint8m2_t value) { + return __riscv_vset(dest, 0, value); } -vint8m2x4_t test_vset_v_i8m2_i8m2x4(vint8m2x4_t dest, size_t index, vint8m2_t val) { - return __riscv_vset(dest, 0, val); +vint8m2x4_t test_vset_v_i8m2_i8m2x4(vint8m2x4_t dest, size_t index, vint8m2_t value) { + return __riscv_vset(dest, 0, value); } -vint8m4x2_t test_vset_v_i8m4_i8m4x2(vint8m4x2_t dest, size_t index, vint8m4_t val) { - return __riscv_vset(dest, 0, val); +vint8m4x2_t test_vset_v_i8m4_i8m4x2(vint8m4x2_t dest, size_t index, vint8m4_t value) { + return __riscv_vset(dest, 0, value); } -vint16mf4x2_t test_vset_v_i16mf4_i16mf4x2(vint16mf4x2_t dest, size_t index, vint16mf4_t val) { - return __riscv_vset(dest, 0, val); +vint16mf4x2_t test_vset_v_i16mf4_i16mf4x2(vint16mf4x2_t dest, size_t index, vint16mf4_t value) { + return __riscv_vset(dest, 0, value); } -vint16mf4x3_t test_vset_v_i16mf4_i16mf4x3(vint16mf4x3_t dest, size_t index, vint16mf4_t val) { - return __riscv_vset(dest, 0, val); +vint16mf4x3_t test_vset_v_i16mf4_i16mf4x3(vint16mf4x3_t dest, size_t index, vint16mf4_t value) { + return __riscv_vset(dest, 0, value); } -vint16mf4x4_t test_vset_v_i16mf4_i16mf4x4(vint16mf4x4_t dest, size_t index, vint16mf4_t val) { - return __riscv_vset(dest, 0, val); +vint16mf4x4_t test_vset_v_i16mf4_i16mf4x4(vint16mf4x4_t dest, size_t index, vint16mf4_t value) { + return __riscv_vset(dest, 0, value); } -vint16mf4x5_t test_vset_v_i16mf4_i16mf4x5(vint16mf4x5_t dest, size_t index, vint16mf4_t val) { - return __riscv_vset(dest, 0, val); +vint16mf4x5_t test_vset_v_i16mf4_i16mf4x5(vint16mf4x5_t dest, size_t index, vint16mf4_t value) { + return __riscv_vset(dest, 0, value); } -vint16mf4x6_t test_vset_v_i16mf4_i16mf4x6(vint16mf4x6_t dest, size_t index, vint16mf4_t val) { - return __riscv_vset(dest, 0, val); +vint16mf4x6_t test_vset_v_i16mf4_i16mf4x6(vint16mf4x6_t dest, size_t index, vint16mf4_t value) { + return __riscv_vset(dest, 0, value); } -vint16mf4x7_t test_vset_v_i16mf4_i16mf4x7(vint16mf4x7_t dest, size_t index, vint16mf4_t val) { - return __riscv_vset(dest, 0, val); +vint16mf4x7_t test_vset_v_i16mf4_i16mf4x7(vint16mf4x7_t dest, size_t index, vint16mf4_t value) { + return __riscv_vset(dest, 0, value); } -vint16mf4x8_t test_vset_v_i16mf4_i16mf4x8(vint16mf4x8_t dest, size_t index, vint16mf4_t val) { - return __riscv_vset(dest, 0, val); +vint16mf4x8_t test_vset_v_i16mf4_i16mf4x8(vint16mf4x8_t dest, size_t index, vint16mf4_t value) { + return __riscv_vset(dest, 0, value); } -vint16mf2x2_t test_vset_v_i16mf2_i16mf2x2(vint16mf2x2_t dest, size_t index, vint16mf2_t val) { - return __riscv_vset(dest, 0, val); +vint16mf2x2_t test_vset_v_i16mf2_i16mf2x2(vint16mf2x2_t dest, size_t index, vint16mf2_t value) { + return __riscv_vset(dest, 0, value); } -vint16mf2x3_t test_vset_v_i16mf2_i16mf2x3(vint16mf2x3_t dest, size_t index, vint16mf2_t val) { - return __riscv_vset(dest, 0, val); +vint16mf2x3_t test_vset_v_i16mf2_i16mf2x3(vint16mf2x3_t dest, size_t index, vint16mf2_t value) { + return __riscv_vset(dest, 0, value); } -vint16mf2x4_t test_vset_v_i16mf2_i16mf2x4(vint16mf2x4_t dest, size_t index, vint16mf2_t val) { - return __riscv_vset(dest, 0, val); +vint16mf2x4_t test_vset_v_i16mf2_i16mf2x4(vint16mf2x4_t dest, size_t index, vint16mf2_t value) { + return __riscv_vset(dest, 0, value); } -vint16mf2x5_t test_vset_v_i16mf2_i16mf2x5(vint16mf2x5_t dest, size_t index, vint16mf2_t val) { - return __riscv_vset(dest, 0, val); +vint16mf2x5_t test_vset_v_i16mf2_i16mf2x5(vint16mf2x5_t dest, size_t index, vint16mf2_t value) { + return __riscv_vset(dest, 0, value); } -vint16mf2x6_t test_vset_v_i16mf2_i16mf2x6(vint16mf2x6_t dest, size_t index, vint16mf2_t val) { - return __riscv_vset(dest, 0, val); +vint16mf2x6_t test_vset_v_i16mf2_i16mf2x6(vint16mf2x6_t dest, size_t index, vint16mf2_t value) { + return __riscv_vset(dest, 0, value); } -vint16mf2x7_t test_vset_v_i16mf2_i16mf2x7(vint16mf2x7_t dest, size_t index, vint16mf2_t val) { - return __riscv_vset(dest, 0, val); +vint16mf2x7_t test_vset_v_i16mf2_i16mf2x7(vint16mf2x7_t dest, size_t index, vint16mf2_t value) { + return __riscv_vset(dest, 0, value); } -vint16mf2x8_t test_vset_v_i16mf2_i16mf2x8(vint16mf2x8_t dest, size_t index, vint16mf2_t val) { - return __riscv_vset(dest, 0, val); +vint16mf2x8_t test_vset_v_i16mf2_i16mf2x8(vint16mf2x8_t dest, size_t index, vint16mf2_t value) { + return __riscv_vset(dest, 0, value); } -vint16m1x2_t test_vset_v_i16m1_i16m1x2(vint16m1x2_t dest, size_t index, vint16m1_t val) { - return __riscv_vset(dest, 0, val); +vint16m1x2_t test_vset_v_i16m1_i16m1x2(vint16m1x2_t dest, size_t index, vint16m1_t value) { + return __riscv_vset(dest, 0, value); } -vint16m1x3_t test_vset_v_i16m1_i16m1x3(vint16m1x3_t dest, size_t index, vint16m1_t val) { - return __riscv_vset(dest, 0, val); +vint16m1x3_t test_vset_v_i16m1_i16m1x3(vint16m1x3_t dest, size_t index, vint16m1_t value) { + return __riscv_vset(dest, 0, value); } -vint16m1x4_t test_vset_v_i16m1_i16m1x4(vint16m1x4_t dest, size_t index, vint16m1_t val) { - return __riscv_vset(dest, 0, val); +vint16m1x4_t test_vset_v_i16m1_i16m1x4(vint16m1x4_t dest, size_t index, vint16m1_t value) { + return __riscv_vset(dest, 0, value); } -vint16m1x5_t test_vset_v_i16m1_i16m1x5(vint16m1x5_t dest, size_t index, vint16m1_t val) { - return __riscv_vset(dest, 0, val); +vint16m1x5_t test_vset_v_i16m1_i16m1x5(vint16m1x5_t dest, size_t index, vint16m1_t value) { + return __riscv_vset(dest, 0, value); } -vint16m1x6_t test_vset_v_i16m1_i16m1x6(vint16m1x6_t dest, size_t index, vint16m1_t val) { - return __riscv_vset(dest, 0, val); +vint16m1x6_t test_vset_v_i16m1_i16m1x6(vint16m1x6_t dest, size_t index, vint16m1_t value) { + return __riscv_vset(dest, 0, value); } -vint16m1x7_t test_vset_v_i16m1_i16m1x7(vint16m1x7_t dest, size_t index, vint16m1_t val) { - return __riscv_vset(dest, 0, val); +vint16m1x7_t test_vset_v_i16m1_i16m1x7(vint16m1x7_t dest, size_t index, vint16m1_t value) { + return __riscv_vset(dest, 0, value); } -vint16m1x8_t test_vset_v_i16m1_i16m1x8(vint16m1x8_t dest, size_t index, vint16m1_t val) { - return __riscv_vset(dest, 0, val); +vint16m1x8_t test_vset_v_i16m1_i16m1x8(vint16m1x8_t dest, size_t index, vint16m1_t value) { + return __riscv_vset(dest, 0, value); } -vint16m2x2_t test_vset_v_i16m2_i16m2x2(vint16m2x2_t dest, size_t index, vint16m2_t val) { - return __riscv_vset(dest, 0, val); +vint16m2x2_t test_vset_v_i16m2_i16m2x2(vint16m2x2_t dest, size_t index, vint16m2_t value) { + return __riscv_vset(dest, 0, value); } -vint16m2x3_t test_vset_v_i16m2_i16m2x3(vint16m2x3_t dest, size_t index, vint16m2_t val) { - return __riscv_vset(dest, 0, val); +vint16m2x3_t test_vset_v_i16m2_i16m2x3(vint16m2x3_t dest, size_t index, vint16m2_t value) { + return __riscv_vset(dest, 0, value); } -vint16m2x4_t test_vset_v_i16m2_i16m2x4(vint16m2x4_t dest, size_t index, vint16m2_t val) { - return __riscv_vset(dest, 0, val); +vint16m2x4_t test_vset_v_i16m2_i16m2x4(vint16m2x4_t dest, size_t index, vint16m2_t value) { + return __riscv_vset(dest, 0, value); } -vint16m4x2_t test_vset_v_i16m4_i16m4x2(vint16m4x2_t dest, size_t index, vint16m4_t val) { - return __riscv_vset(dest, 0, val); +vint16m4x2_t test_vset_v_i16m4_i16m4x2(vint16m4x2_t dest, size_t index, vint16m4_t value) { + return __riscv_vset(dest, 0, value); } -vint32mf2x2_t test_vset_v_i32mf2_i32mf2x2(vint32mf2x2_t dest, size_t index, vint32mf2_t val) { - return __riscv_vset(dest, 0, val); +vint32mf2x2_t test_vset_v_i32mf2_i32mf2x2(vint32mf2x2_t dest, size_t index, vint32mf2_t value) { + return __riscv_vset(dest, 0, value); } -vint32mf2x3_t test_vset_v_i32mf2_i32mf2x3(vint32mf2x3_t dest, size_t index, vint32mf2_t val) { - return __riscv_vset(dest, 0, val); +vint32mf2x3_t test_vset_v_i32mf2_i32mf2x3(vint32mf2x3_t dest, size_t index, vint32mf2_t value) { + return __riscv_vset(dest, 0, value); } -vint32mf2x4_t test_vset_v_i32mf2_i32mf2x4(vint32mf2x4_t dest, size_t index, vint32mf2_t val) { - return __riscv_vset(dest, 0, val); +vint32mf2x4_t test_vset_v_i32mf2_i32mf2x4(vint32mf2x4_t dest, size_t index, vint32mf2_t value) { + return __riscv_vset(dest, 0, value); } -vint32mf2x5_t test_vset_v_i32mf2_i32mf2x5(vint32mf2x5_t dest, size_t index, vint32mf2_t val) { - return __riscv_vset(dest, 0, val); +vint32mf2x5_t test_vset_v_i32mf2_i32mf2x5(vint32mf2x5_t dest, size_t index, vint32mf2_t value) { + return __riscv_vset(dest, 0, value); } -vint32mf2x6_t test_vset_v_i32mf2_i32mf2x6(vint32mf2x6_t dest, size_t index, vint32mf2_t val) { - return __riscv_vset(dest, 0, val); +vint32mf2x6_t test_vset_v_i32mf2_i32mf2x6(vint32mf2x6_t dest, size_t index, vint32mf2_t value) { + return __riscv_vset(dest, 0, value); } -vint32mf2x7_t test_vset_v_i32mf2_i32mf2x7(vint32mf2x7_t dest, size_t index, vint32mf2_t val) { - return __riscv_vset(dest, 0, val); +vint32mf2x7_t test_vset_v_i32mf2_i32mf2x7(vint32mf2x7_t dest, size_t index, vint32mf2_t value) { + return __riscv_vset(dest, 0, value); } -vint32mf2x8_t test_vset_v_i32mf2_i32mf2x8(vint32mf2x8_t dest, size_t index, vint32mf2_t val) { - return __riscv_vset(dest, 0, val); +vint32mf2x8_t test_vset_v_i32mf2_i32mf2x8(vint32mf2x8_t dest, size_t index, vint32mf2_t value) { + return __riscv_vset(dest, 0, value); } -vint32m1x2_t test_vset_v_i32m1_i32m1x2(vint32m1x2_t dest, size_t index, vint32m1_t val) { - return __riscv_vset(dest, 0, val); +vint32m1x2_t test_vset_v_i32m1_i32m1x2(vint32m1x2_t dest, size_t index, vint32m1_t value) { + return __riscv_vset(dest, 0, value); } -vint32m1x3_t test_vset_v_i32m1_i32m1x3(vint32m1x3_t dest, size_t index, vint32m1_t val) { - return __riscv_vset(dest, 0, val); +vint32m1x3_t test_vset_v_i32m1_i32m1x3(vint32m1x3_t dest, size_t index, vint32m1_t value) { + return __riscv_vset(dest, 0, value); } -vint32m1x4_t test_vset_v_i32m1_i32m1x4(vint32m1x4_t dest, size_t index, vint32m1_t val) { - return __riscv_vset(dest, 0, val); +vint32m1x4_t test_vset_v_i32m1_i32m1x4(vint32m1x4_t dest, size_t index, vint32m1_t value) { + return __riscv_vset(dest, 0, value); } -vint32m1x5_t test_vset_v_i32m1_i32m1x5(vint32m1x5_t dest, size_t index, vint32m1_t val) { - return __riscv_vset(dest, 0, val); +vint32m1x5_t test_vset_v_i32m1_i32m1x5(vint32m1x5_t dest, size_t index, vint32m1_t value) { + return __riscv_vset(dest, 0, value); } -vint32m1x6_t test_vset_v_i32m1_i32m1x6(vint32m1x6_t dest, size_t index, vint32m1_t val) { - return __riscv_vset(dest, 0, val); +vint32m1x6_t test_vset_v_i32m1_i32m1x6(vint32m1x6_t dest, size_t index, vint32m1_t value) { + return __riscv_vset(dest, 0, value); } -vint32m1x7_t test_vset_v_i32m1_i32m1x7(vint32m1x7_t dest, size_t index, vint32m1_t val) { - return __riscv_vset(dest, 0, val); +vint32m1x7_t test_vset_v_i32m1_i32m1x7(vint32m1x7_t dest, size_t index, vint32m1_t value) { + return __riscv_vset(dest, 0, value); } -vint32m1x8_t test_vset_v_i32m1_i32m1x8(vint32m1x8_t dest, size_t index, vint32m1_t val) { - return __riscv_vset(dest, 0, val); +vint32m1x8_t test_vset_v_i32m1_i32m1x8(vint32m1x8_t dest, size_t index, vint32m1_t value) { + return __riscv_vset(dest, 0, value); } -vint32m2x2_t test_vset_v_i32m2_i32m2x2(vint32m2x2_t dest, size_t index, vint32m2_t val) { - return __riscv_vset(dest, 0, val); +vint32m2x2_t test_vset_v_i32m2_i32m2x2(vint32m2x2_t dest, size_t index, vint32m2_t value) { + return __riscv_vset(dest, 0, value); } -vint32m2x3_t test_vset_v_i32m2_i32m2x3(vint32m2x3_t dest, size_t index, vint32m2_t val) { - return __riscv_vset(dest, 0, val); +vint32m2x3_t test_vset_v_i32m2_i32m2x3(vint32m2x3_t dest, size_t index, vint32m2_t value) { + return __riscv_vset(dest, 0, value); } -vint32m2x4_t test_vset_v_i32m2_i32m2x4(vint32m2x4_t dest, size_t index, vint32m2_t val) { - return __riscv_vset(dest, 0, val); +vint32m2x4_t test_vset_v_i32m2_i32m2x4(vint32m2x4_t dest, size_t index, vint32m2_t value) { + return __riscv_vset(dest, 0, value); } -vint32m4x2_t test_vset_v_i32m4_i32m4x2(vint32m4x2_t dest, size_t index, vint32m4_t val) { - return __riscv_vset(dest, 0, val); +vint32m4x2_t test_vset_v_i32m4_i32m4x2(vint32m4x2_t dest, size_t index, vint32m4_t value) { + return __riscv_vset(dest, 0, value); } -vint64m1x2_t test_vset_v_i64m1_i64m1x2(vint64m1x2_t dest, size_t index, vint64m1_t val) { - return __riscv_vset(dest, 0, val); +vint64m1x2_t test_vset_v_i64m1_i64m1x2(vint64m1x2_t dest, size_t index, vint64m1_t value) { + return __riscv_vset(dest, 0, value); } -vint64m1x3_t test_vset_v_i64m1_i64m1x3(vint64m1x3_t dest, size_t index, vint64m1_t val) { - return __riscv_vset(dest, 0, val); +vint64m1x3_t test_vset_v_i64m1_i64m1x3(vint64m1x3_t dest, size_t index, vint64m1_t value) { + return __riscv_vset(dest, 0, value); } -vint64m1x4_t test_vset_v_i64m1_i64m1x4(vint64m1x4_t dest, size_t index, vint64m1_t val) { - return __riscv_vset(dest, 0, val); +vint64m1x4_t test_vset_v_i64m1_i64m1x4(vint64m1x4_t dest, size_t index, vint64m1_t value) { + return __riscv_vset(dest, 0, value); } -vint64m1x5_t test_vset_v_i64m1_i64m1x5(vint64m1x5_t dest, size_t index, vint64m1_t val) { - return __riscv_vset(dest, 0, val); +vint64m1x5_t test_vset_v_i64m1_i64m1x5(vint64m1x5_t dest, size_t index, vint64m1_t value) { + return __riscv_vset(dest, 0, value); } -vint64m1x6_t test_vset_v_i64m1_i64m1x6(vint64m1x6_t dest, size_t index, vint64m1_t val) { - return __riscv_vset(dest, 0, val); +vint64m1x6_t test_vset_v_i64m1_i64m1x6(vint64m1x6_t dest, size_t index, vint64m1_t value) { + return __riscv_vset(dest, 0, value); } -vint64m1x7_t test_vset_v_i64m1_i64m1x7(vint64m1x7_t dest, size_t index, vint64m1_t val) { - return __riscv_vset(dest, 0, val); +vint64m1x7_t test_vset_v_i64m1_i64m1x7(vint64m1x7_t dest, size_t index, vint64m1_t value) { + return __riscv_vset(dest, 0, value); } -vint64m1x8_t test_vset_v_i64m1_i64m1x8(vint64m1x8_t dest, size_t index, vint64m1_t val) { - return __riscv_vset(dest, 0, val); +vint64m1x8_t test_vset_v_i64m1_i64m1x8(vint64m1x8_t dest, size_t index, vint64m1_t value) { + return __riscv_vset(dest, 0, value); } -vint64m2x2_t test_vset_v_i64m2_i64m2x2(vint64m2x2_t dest, size_t index, vint64m2_t val) { - return __riscv_vset(dest, 0, val); +vint64m2x2_t test_vset_v_i64m2_i64m2x2(vint64m2x2_t dest, size_t index, vint64m2_t value) { + return __riscv_vset(dest, 0, value); } -vint64m2x3_t test_vset_v_i64m2_i64m2x3(vint64m2x3_t dest, size_t index, vint64m2_t val) { - return __riscv_vset(dest, 0, val); +vint64m2x3_t test_vset_v_i64m2_i64m2x3(vint64m2x3_t dest, size_t index, vint64m2_t value) { + return __riscv_vset(dest, 0, value); } -vint64m2x4_t test_vset_v_i64m2_i64m2x4(vint64m2x4_t dest, size_t index, vint64m2_t val) { - return __riscv_vset(dest, 0, val); +vint64m2x4_t test_vset_v_i64m2_i64m2x4(vint64m2x4_t dest, size_t index, vint64m2_t value) { + return __riscv_vset(dest, 0, value); } -vint64m4x2_t test_vset_v_i64m4_i64m4x2(vint64m4x2_t dest, size_t index, vint64m4_t val) { - return __riscv_vset(dest, 0, val); +vint64m4x2_t test_vset_v_i64m4_i64m4x2(vint64m4x2_t dest, size_t index, vint64m4_t value) { + return __riscv_vset(dest, 0, value); } -vuint8mf8x2_t test_vset_v_u8mf8_u8mf8x2(vuint8mf8x2_t dest, size_t index, vuint8mf8_t val) { - return __riscv_vset(dest, 0, val); +vuint8mf8x2_t test_vset_v_u8mf8_u8mf8x2(vuint8mf8x2_t dest, size_t index, vuint8mf8_t value) { + return __riscv_vset(dest, 0, value); } -vuint8mf8x3_t test_vset_v_u8mf8_u8mf8x3(vuint8mf8x3_t dest, size_t index, vuint8mf8_t val) { - return __riscv_vset(dest, 0, val); +vuint8mf8x3_t test_vset_v_u8mf8_u8mf8x3(vuint8mf8x3_t dest, size_t index, vuint8mf8_t value) { + return __riscv_vset(dest, 0, value); } -vuint8mf8x4_t test_vset_v_u8mf8_u8mf8x4(vuint8mf8x4_t dest, size_t index, vuint8mf8_t val) { - return __riscv_vset(dest, 0, val); +vuint8mf8x4_t test_vset_v_u8mf8_u8mf8x4(vuint8mf8x4_t dest, size_t index, vuint8mf8_t value) { + return __riscv_vset(dest, 0, value); } -vuint8mf8x5_t test_vset_v_u8mf8_u8mf8x5(vuint8mf8x5_t dest, size_t index, vuint8mf8_t val) { - return __riscv_vset(dest, 0, val); +vuint8mf8x5_t test_vset_v_u8mf8_u8mf8x5(vuint8mf8x5_t dest, size_t index, vuint8mf8_t value) { + return __riscv_vset(dest, 0, value); } -vuint8mf8x6_t test_vset_v_u8mf8_u8mf8x6(vuint8mf8x6_t dest, size_t index, vuint8mf8_t val) { - return __riscv_vset(dest, 0, val); +vuint8mf8x6_t test_vset_v_u8mf8_u8mf8x6(vuint8mf8x6_t dest, size_t index, vuint8mf8_t value) { + return __riscv_vset(dest, 0, value); } -vuint8mf8x7_t test_vset_v_u8mf8_u8mf8x7(vuint8mf8x7_t dest, size_t index, vuint8mf8_t val) { - return __riscv_vset(dest, 0, val); +vuint8mf8x7_t test_vset_v_u8mf8_u8mf8x7(vuint8mf8x7_t dest, size_t index, vuint8mf8_t value) { + return __riscv_vset(dest, 0, value); } -vuint8mf8x8_t test_vset_v_u8mf8_u8mf8x8(vuint8mf8x8_t dest, size_t index, vuint8mf8_t val) { - return __riscv_vset(dest, 0, val); +vuint8mf8x8_t test_vset_v_u8mf8_u8mf8x8(vuint8mf8x8_t dest, size_t index, vuint8mf8_t value) { + return __riscv_vset(dest, 0, value); } -vuint8mf4x2_t test_vset_v_u8mf4_u8mf4x2(vuint8mf4x2_t dest, size_t index, vuint8mf4_t val) { - return __riscv_vset(dest, 0, val); +vuint8mf4x2_t test_vset_v_u8mf4_u8mf4x2(vuint8mf4x2_t dest, size_t index, vuint8mf4_t value) { + return __riscv_vset(dest, 0, value); } -vuint8mf4x3_t test_vset_v_u8mf4_u8mf4x3(vuint8mf4x3_t dest, size_t index, vuint8mf4_t val) { - return __riscv_vset(dest, 0, val); +vuint8mf4x3_t test_vset_v_u8mf4_u8mf4x3(vuint8mf4x3_t dest, size_t index, vuint8mf4_t value) { + return __riscv_vset(dest, 0, value); } -vuint8mf4x4_t test_vset_v_u8mf4_u8mf4x4(vuint8mf4x4_t dest, size_t index, vuint8mf4_t val) { - return __riscv_vset(dest, 0, val); +vuint8mf4x4_t test_vset_v_u8mf4_u8mf4x4(vuint8mf4x4_t dest, size_t index, vuint8mf4_t value) { + return __riscv_vset(dest, 0, value); } -vuint8mf4x5_t test_vset_v_u8mf4_u8mf4x5(vuint8mf4x5_t dest, size_t index, vuint8mf4_t val) { - return __riscv_vset(dest, 0, val); +vuint8mf4x5_t test_vset_v_u8mf4_u8mf4x5(vuint8mf4x5_t dest, size_t index, vuint8mf4_t value) { + return __riscv_vset(dest, 0, value); } -vuint8mf4x6_t test_vset_v_u8mf4_u8mf4x6(vuint8mf4x6_t dest, size_t index, vuint8mf4_t val) { - return __riscv_vset(dest, 0, val); +vuint8mf4x6_t test_vset_v_u8mf4_u8mf4x6(vuint8mf4x6_t dest, size_t index, vuint8mf4_t value) { + return __riscv_vset(dest, 0, value); } -vuint8mf4x7_t test_vset_v_u8mf4_u8mf4x7(vuint8mf4x7_t dest, size_t index, vuint8mf4_t val) { - return __riscv_vset(dest, 0, val); +vuint8mf4x7_t test_vset_v_u8mf4_u8mf4x7(vuint8mf4x7_t dest, size_t index, vuint8mf4_t value) { + return __riscv_vset(dest, 0, value); } -vuint8mf4x8_t test_vset_v_u8mf4_u8mf4x8(vuint8mf4x8_t dest, size_t index, vuint8mf4_t val) { - return __riscv_vset(dest, 0, val); +vuint8mf4x8_t test_vset_v_u8mf4_u8mf4x8(vuint8mf4x8_t dest, size_t index, vuint8mf4_t value) { + return __riscv_vset(dest, 0, value); } -vuint8mf2x2_t test_vset_v_u8mf2_u8mf2x2(vuint8mf2x2_t dest, size_t index, vuint8mf2_t val) { - return __riscv_vset(dest, 0, val); +vuint8mf2x2_t test_vset_v_u8mf2_u8mf2x2(vuint8mf2x2_t dest, size_t index, vuint8mf2_t value) { + return __riscv_vset(dest, 0, value); } -vuint8mf2x3_t test_vset_v_u8mf2_u8mf2x3(vuint8mf2x3_t dest, size_t index, vuint8mf2_t val) { - return __riscv_vset(dest, 0, val); +vuint8mf2x3_t test_vset_v_u8mf2_u8mf2x3(vuint8mf2x3_t dest, size_t index, vuint8mf2_t value) { + return __riscv_vset(dest, 0, value); } -vuint8mf2x4_t test_vset_v_u8mf2_u8mf2x4(vuint8mf2x4_t dest, size_t index, vuint8mf2_t val) { - return __riscv_vset(dest, 0, val); +vuint8mf2x4_t test_vset_v_u8mf2_u8mf2x4(vuint8mf2x4_t dest, size_t index, vuint8mf2_t value) { + return __riscv_vset(dest, 0, value); } -vuint8mf2x5_t test_vset_v_u8mf2_u8mf2x5(vuint8mf2x5_t dest, size_t index, vuint8mf2_t val) { - return __riscv_vset(dest, 0, val); +vuint8mf2x5_t test_vset_v_u8mf2_u8mf2x5(vuint8mf2x5_t dest, size_t index, vuint8mf2_t value) { + return __riscv_vset(dest, 0, value); } -vuint8mf2x6_t test_vset_v_u8mf2_u8mf2x6(vuint8mf2x6_t dest, size_t index, vuint8mf2_t val) { - return __riscv_vset(dest, 0, val); +vuint8mf2x6_t test_vset_v_u8mf2_u8mf2x6(vuint8mf2x6_t dest, size_t index, vuint8mf2_t value) { + return __riscv_vset(dest, 0, value); } -vuint8mf2x7_t test_vset_v_u8mf2_u8mf2x7(vuint8mf2x7_t dest, size_t index, vuint8mf2_t val) { - return __riscv_vset(dest, 0, val); +vuint8mf2x7_t test_vset_v_u8mf2_u8mf2x7(vuint8mf2x7_t dest, size_t index, vuint8mf2_t value) { + return __riscv_vset(dest, 0, value); } -vuint8mf2x8_t test_vset_v_u8mf2_u8mf2x8(vuint8mf2x8_t dest, size_t index, vuint8mf2_t val) { - return __riscv_vset(dest, 0, val); +vuint8mf2x8_t test_vset_v_u8mf2_u8mf2x8(vuint8mf2x8_t dest, size_t index, vuint8mf2_t value) { + return __riscv_vset(dest, 0, value); } -vuint8m1x2_t test_vset_v_u8m1_u8m1x2(vuint8m1x2_t dest, size_t index, vuint8m1_t val) { - return __riscv_vset(dest, 0, val); +vuint8m1x2_t test_vset_v_u8m1_u8m1x2(vuint8m1x2_t dest, size_t index, vuint8m1_t value) { + return __riscv_vset(dest, 0, value); } -vuint8m1x3_t test_vset_v_u8m1_u8m1x3(vuint8m1x3_t dest, size_t index, vuint8m1_t val) { - return __riscv_vset(dest, 0, val); +vuint8m1x3_t test_vset_v_u8m1_u8m1x3(vuint8m1x3_t dest, size_t index, vuint8m1_t value) { + return __riscv_vset(dest, 0, value); } -vuint8m1x4_t test_vset_v_u8m1_u8m1x4(vuint8m1x4_t dest, size_t index, vuint8m1_t val) { - return __riscv_vset(dest, 0, val); +vuint8m1x4_t test_vset_v_u8m1_u8m1x4(vuint8m1x4_t dest, size_t index, vuint8m1_t value) { + return __riscv_vset(dest, 0, value); } -vuint8m1x5_t test_vset_v_u8m1_u8m1x5(vuint8m1x5_t dest, size_t index, vuint8m1_t val) { - return __riscv_vset(dest, 0, val); +vuint8m1x5_t test_vset_v_u8m1_u8m1x5(vuint8m1x5_t dest, size_t index, vuint8m1_t value) { + return __riscv_vset(dest, 0, value); } -vuint8m1x6_t test_vset_v_u8m1_u8m1x6(vuint8m1x6_t dest, size_t index, vuint8m1_t val) { - return __riscv_vset(dest, 0, val); +vuint8m1x6_t test_vset_v_u8m1_u8m1x6(vuint8m1x6_t dest, size_t index, vuint8m1_t value) { + return __riscv_vset(dest, 0, value); } -vuint8m1x7_t test_vset_v_u8m1_u8m1x7(vuint8m1x7_t dest, size_t index, vuint8m1_t val) { - return __riscv_vset(dest, 0, val); +vuint8m1x7_t test_vset_v_u8m1_u8m1x7(vuint8m1x7_t dest, size_t index, vuint8m1_t value) { + return __riscv_vset(dest, 0, value); } -vuint8m1x8_t test_vset_v_u8m1_u8m1x8(vuint8m1x8_t dest, size_t index, vuint8m1_t val) { - return __riscv_vset(dest, 0, val); +vuint8m1x8_t test_vset_v_u8m1_u8m1x8(vuint8m1x8_t dest, size_t index, vuint8m1_t value) { + return __riscv_vset(dest, 0, value); } -vuint8m2x2_t test_vset_v_u8m2_u8m2x2(vuint8m2x2_t dest, size_t index, vuint8m2_t val) { - return __riscv_vset(dest, 0, val); +vuint8m2x2_t test_vset_v_u8m2_u8m2x2(vuint8m2x2_t dest, size_t index, vuint8m2_t value) { + return __riscv_vset(dest, 0, value); } -vuint8m2x3_t test_vset_v_u8m2_u8m2x3(vuint8m2x3_t dest, size_t index, vuint8m2_t val) { - return __riscv_vset(dest, 0, val); +vuint8m2x3_t test_vset_v_u8m2_u8m2x3(vuint8m2x3_t dest, size_t index, vuint8m2_t value) { + return __riscv_vset(dest, 0, value); } -vuint8m2x4_t test_vset_v_u8m2_u8m2x4(vuint8m2x4_t dest, size_t index, vuint8m2_t val) { - return __riscv_vset(dest, 0, val); +vuint8m2x4_t test_vset_v_u8m2_u8m2x4(vuint8m2x4_t dest, size_t index, vuint8m2_t value) { + return __riscv_vset(dest, 0, value); } -vuint8m4x2_t test_vset_v_u8m4_u8m4x2(vuint8m4x2_t dest, size_t index, vuint8m4_t val) { - return __riscv_vset(dest, 0, val); +vuint8m4x2_t test_vset_v_u8m4_u8m4x2(vuint8m4x2_t dest, size_t index, vuint8m4_t value) { + return __riscv_vset(dest, 0, value); } -vuint16mf4x2_t test_vset_v_u16mf4_u16mf4x2(vuint16mf4x2_t dest, size_t index, vuint16mf4_t val) { - return __riscv_vset(dest, 0, val); +vuint16mf4x2_t test_vset_v_u16mf4_u16mf4x2(vuint16mf4x2_t dest, size_t index, vuint16mf4_t value) { + return __riscv_vset(dest, 0, value); } -vuint16mf4x3_t test_vset_v_u16mf4_u16mf4x3(vuint16mf4x3_t dest, size_t index, vuint16mf4_t val) { - return __riscv_vset(dest, 0, val); +vuint16mf4x3_t test_vset_v_u16mf4_u16mf4x3(vuint16mf4x3_t dest, size_t index, vuint16mf4_t value) { + return __riscv_vset(dest, 0, value); } -vuint16mf4x4_t test_vset_v_u16mf4_u16mf4x4(vuint16mf4x4_t dest, size_t index, vuint16mf4_t val) { - return __riscv_vset(dest, 0, val); +vuint16mf4x4_t test_vset_v_u16mf4_u16mf4x4(vuint16mf4x4_t dest, size_t index, vuint16mf4_t value) { + return __riscv_vset(dest, 0, value); } -vuint16mf4x5_t test_vset_v_u16mf4_u16mf4x5(vuint16mf4x5_t dest, size_t index, vuint16mf4_t val) { - return __riscv_vset(dest, 0, val); +vuint16mf4x5_t test_vset_v_u16mf4_u16mf4x5(vuint16mf4x5_t dest, size_t index, vuint16mf4_t value) { + return __riscv_vset(dest, 0, value); } -vuint16mf4x6_t test_vset_v_u16mf4_u16mf4x6(vuint16mf4x6_t dest, size_t index, vuint16mf4_t val) { - return __riscv_vset(dest, 0, val); +vuint16mf4x6_t test_vset_v_u16mf4_u16mf4x6(vuint16mf4x6_t dest, size_t index, vuint16mf4_t value) { + return __riscv_vset(dest, 0, value); } -vuint16mf4x7_t test_vset_v_u16mf4_u16mf4x7(vuint16mf4x7_t dest, size_t index, vuint16mf4_t val) { - return __riscv_vset(dest, 0, val); +vuint16mf4x7_t test_vset_v_u16mf4_u16mf4x7(vuint16mf4x7_t dest, size_t index, vuint16mf4_t value) { + return __riscv_vset(dest, 0, value); } -vuint16mf4x8_t test_vset_v_u16mf4_u16mf4x8(vuint16mf4x8_t dest, size_t index, vuint16mf4_t val) { - return __riscv_vset(dest, 0, val); +vuint16mf4x8_t test_vset_v_u16mf4_u16mf4x8(vuint16mf4x8_t dest, size_t index, vuint16mf4_t value) { + return __riscv_vset(dest, 0, value); } -vuint16mf2x2_t test_vset_v_u16mf2_u16mf2x2(vuint16mf2x2_t dest, size_t index, vuint16mf2_t val) { - return __riscv_vset(dest, 0, val); +vuint16mf2x2_t test_vset_v_u16mf2_u16mf2x2(vuint16mf2x2_t dest, size_t index, vuint16mf2_t value) { + return __riscv_vset(dest, 0, value); } -vuint16mf2x3_t test_vset_v_u16mf2_u16mf2x3(vuint16mf2x3_t dest, size_t index, vuint16mf2_t val) { - return __riscv_vset(dest, 0, val); +vuint16mf2x3_t test_vset_v_u16mf2_u16mf2x3(vuint16mf2x3_t dest, size_t index, vuint16mf2_t value) { + return __riscv_vset(dest, 0, value); } -vuint16mf2x4_t test_vset_v_u16mf2_u16mf2x4(vuint16mf2x4_t dest, size_t index, vuint16mf2_t val) { - return __riscv_vset(dest, 0, val); +vuint16mf2x4_t test_vset_v_u16mf2_u16mf2x4(vuint16mf2x4_t dest, size_t index, vuint16mf2_t value) { + return __riscv_vset(dest, 0, value); } -vuint16mf2x5_t test_vset_v_u16mf2_u16mf2x5(vuint16mf2x5_t dest, size_t index, vuint16mf2_t val) { - return __riscv_vset(dest, 0, val); +vuint16mf2x5_t test_vset_v_u16mf2_u16mf2x5(vuint16mf2x5_t dest, size_t index, vuint16mf2_t value) { + return __riscv_vset(dest, 0, value); } -vuint16mf2x6_t test_vset_v_u16mf2_u16mf2x6(vuint16mf2x6_t dest, size_t index, vuint16mf2_t val) { - return __riscv_vset(dest, 0, val); +vuint16mf2x6_t test_vset_v_u16mf2_u16mf2x6(vuint16mf2x6_t dest, size_t index, vuint16mf2_t value) { + return __riscv_vset(dest, 0, value); } -vuint16mf2x7_t test_vset_v_u16mf2_u16mf2x7(vuint16mf2x7_t dest, size_t index, vuint16mf2_t val) { - return __riscv_vset(dest, 0, val); +vuint16mf2x7_t test_vset_v_u16mf2_u16mf2x7(vuint16mf2x7_t dest, size_t index, vuint16mf2_t value) { + return __riscv_vset(dest, 0, value); } -vuint16mf2x8_t test_vset_v_u16mf2_u16mf2x8(vuint16mf2x8_t dest, size_t index, vuint16mf2_t val) { - return __riscv_vset(dest, 0, val); +vuint16mf2x8_t test_vset_v_u16mf2_u16mf2x8(vuint16mf2x8_t dest, size_t index, vuint16mf2_t value) { + return __riscv_vset(dest, 0, value); } -vuint16m1x2_t test_vset_v_u16m1_u16m1x2(vuint16m1x2_t dest, size_t index, vuint16m1_t val) { - return __riscv_vset(dest, 0, val); +vuint16m1x2_t test_vset_v_u16m1_u16m1x2(vuint16m1x2_t dest, size_t index, vuint16m1_t value) { + return __riscv_vset(dest, 0, value); } -vuint16m1x3_t test_vset_v_u16m1_u16m1x3(vuint16m1x3_t dest, size_t index, vuint16m1_t val) { - return __riscv_vset(dest, 0, val); +vuint16m1x3_t test_vset_v_u16m1_u16m1x3(vuint16m1x3_t dest, size_t index, vuint16m1_t value) { + return __riscv_vset(dest, 0, value); } -vuint16m1x4_t test_vset_v_u16m1_u16m1x4(vuint16m1x4_t dest, size_t index, vuint16m1_t val) { - return __riscv_vset(dest, 0, val); +vuint16m1x4_t test_vset_v_u16m1_u16m1x4(vuint16m1x4_t dest, size_t index, vuint16m1_t value) { + return __riscv_vset(dest, 0, value); } -vuint16m1x5_t test_vset_v_u16m1_u16m1x5(vuint16m1x5_t dest, size_t index, vuint16m1_t val) { - return __riscv_vset(dest, 0, val); +vuint16m1x5_t test_vset_v_u16m1_u16m1x5(vuint16m1x5_t dest, size_t index, vuint16m1_t value) { + return __riscv_vset(dest, 0, value); } -vuint16m1x6_t test_vset_v_u16m1_u16m1x6(vuint16m1x6_t dest, size_t index, vuint16m1_t val) { - return __riscv_vset(dest, 0, val); +vuint16m1x6_t test_vset_v_u16m1_u16m1x6(vuint16m1x6_t dest, size_t index, vuint16m1_t value) { + return __riscv_vset(dest, 0, value); } -vuint16m1x7_t test_vset_v_u16m1_u16m1x7(vuint16m1x7_t dest, size_t index, vuint16m1_t val) { - return __riscv_vset(dest, 0, val); +vuint16m1x7_t test_vset_v_u16m1_u16m1x7(vuint16m1x7_t dest, size_t index, vuint16m1_t value) { + return __riscv_vset(dest, 0, value); } -vuint16m1x8_t test_vset_v_u16m1_u16m1x8(vuint16m1x8_t dest, size_t index, vuint16m1_t val) { - return __riscv_vset(dest, 0, val); +vuint16m1x8_t test_vset_v_u16m1_u16m1x8(vuint16m1x8_t dest, size_t index, vuint16m1_t value) { + return __riscv_vset(dest, 0, value); } -vuint16m2x2_t test_vset_v_u16m2_u16m2x2(vuint16m2x2_t dest, size_t index, vuint16m2_t val) { - return __riscv_vset(dest, 0, val); +vuint16m2x2_t test_vset_v_u16m2_u16m2x2(vuint16m2x2_t dest, size_t index, vuint16m2_t value) { + return __riscv_vset(dest, 0, value); } -vuint16m2x3_t test_vset_v_u16m2_u16m2x3(vuint16m2x3_t dest, size_t index, vuint16m2_t val) { - return __riscv_vset(dest, 0, val); +vuint16m2x3_t test_vset_v_u16m2_u16m2x3(vuint16m2x3_t dest, size_t index, vuint16m2_t value) { + return __riscv_vset(dest, 0, value); } -vuint16m2x4_t test_vset_v_u16m2_u16m2x4(vuint16m2x4_t dest, size_t index, vuint16m2_t val) { - return __riscv_vset(dest, 0, val); +vuint16m2x4_t test_vset_v_u16m2_u16m2x4(vuint16m2x4_t dest, size_t index, vuint16m2_t value) { + return __riscv_vset(dest, 0, value); } -vuint16m4x2_t test_vset_v_u16m4_u16m4x2(vuint16m4x2_t dest, size_t index, vuint16m4_t val) { - return __riscv_vset(dest, 0, val); +vuint16m4x2_t test_vset_v_u16m4_u16m4x2(vuint16m4x2_t dest, size_t index, vuint16m4_t value) { + return __riscv_vset(dest, 0, value); } -vuint32mf2x2_t test_vset_v_u32mf2_u32mf2x2(vuint32mf2x2_t dest, size_t index, vuint32mf2_t val) { - return __riscv_vset(dest, 0, val); +vuint32mf2x2_t test_vset_v_u32mf2_u32mf2x2(vuint32mf2x2_t dest, size_t index, vuint32mf2_t value) { + return __riscv_vset(dest, 0, value); } -vuint32mf2x3_t test_vset_v_u32mf2_u32mf2x3(vuint32mf2x3_t dest, size_t index, vuint32mf2_t val) { - return __riscv_vset(dest, 0, val); +vuint32mf2x3_t test_vset_v_u32mf2_u32mf2x3(vuint32mf2x3_t dest, size_t index, vuint32mf2_t value) { + return __riscv_vset(dest, 0, value); } -vuint32mf2x4_t test_vset_v_u32mf2_u32mf2x4(vuint32mf2x4_t dest, size_t index, vuint32mf2_t val) { - return __riscv_vset(dest, 0, val); +vuint32mf2x4_t test_vset_v_u32mf2_u32mf2x4(vuint32mf2x4_t dest, size_t index, vuint32mf2_t value) { + return __riscv_vset(dest, 0, value); } -vuint32mf2x5_t test_vset_v_u32mf2_u32mf2x5(vuint32mf2x5_t dest, size_t index, vuint32mf2_t val) { - return __riscv_vset(dest, 0, val); +vuint32mf2x5_t test_vset_v_u32mf2_u32mf2x5(vuint32mf2x5_t dest, size_t index, vuint32mf2_t value) { + return __riscv_vset(dest, 0, value); } -vuint32mf2x6_t test_vset_v_u32mf2_u32mf2x6(vuint32mf2x6_t dest, size_t index, vuint32mf2_t val) { - return __riscv_vset(dest, 0, val); +vuint32mf2x6_t test_vset_v_u32mf2_u32mf2x6(vuint32mf2x6_t dest, size_t index, vuint32mf2_t value) { + return __riscv_vset(dest, 0, value); } -vuint32mf2x7_t test_vset_v_u32mf2_u32mf2x7(vuint32mf2x7_t dest, size_t index, vuint32mf2_t val) { - return __riscv_vset(dest, 0, val); +vuint32mf2x7_t test_vset_v_u32mf2_u32mf2x7(vuint32mf2x7_t dest, size_t index, vuint32mf2_t value) { + return __riscv_vset(dest, 0, value); } -vuint32mf2x8_t test_vset_v_u32mf2_u32mf2x8(vuint32mf2x8_t dest, size_t index, vuint32mf2_t val) { - return __riscv_vset(dest, 0, val); +vuint32mf2x8_t test_vset_v_u32mf2_u32mf2x8(vuint32mf2x8_t dest, size_t index, vuint32mf2_t value) { + return __riscv_vset(dest, 0, value); } -vuint32m1x2_t test_vset_v_u32m1_u32m1x2(vuint32m1x2_t dest, size_t index, vuint32m1_t val) { - return __riscv_vset(dest, 0, val); +vuint32m1x2_t test_vset_v_u32m1_u32m1x2(vuint32m1x2_t dest, size_t index, vuint32m1_t value) { + return __riscv_vset(dest, 0, value); } -vuint32m1x3_t test_vset_v_u32m1_u32m1x3(vuint32m1x3_t dest, size_t index, vuint32m1_t val) { - return __riscv_vset(dest, 0, val); +vuint32m1x3_t test_vset_v_u32m1_u32m1x3(vuint32m1x3_t dest, size_t index, vuint32m1_t value) { + return __riscv_vset(dest, 0, value); } -vuint32m1x4_t test_vset_v_u32m1_u32m1x4(vuint32m1x4_t dest, size_t index, vuint32m1_t val) { - return __riscv_vset(dest, 0, val); +vuint32m1x4_t test_vset_v_u32m1_u32m1x4(vuint32m1x4_t dest, size_t index, vuint32m1_t value) { + return __riscv_vset(dest, 0, value); } -vuint32m1x5_t test_vset_v_u32m1_u32m1x5(vuint32m1x5_t dest, size_t index, vuint32m1_t val) { - return __riscv_vset(dest, 0, val); +vuint32m1x5_t test_vset_v_u32m1_u32m1x5(vuint32m1x5_t dest, size_t index, vuint32m1_t value) { + return __riscv_vset(dest, 0, value); } -vuint32m1x6_t test_vset_v_u32m1_u32m1x6(vuint32m1x6_t dest, size_t index, vuint32m1_t val) { - return __riscv_vset(dest, 0, val); +vuint32m1x6_t test_vset_v_u32m1_u32m1x6(vuint32m1x6_t dest, size_t index, vuint32m1_t value) { + return __riscv_vset(dest, 0, value); } -vuint32m1x7_t test_vset_v_u32m1_u32m1x7(vuint32m1x7_t dest, size_t index, vuint32m1_t val) { - return __riscv_vset(dest, 0, val); +vuint32m1x7_t test_vset_v_u32m1_u32m1x7(vuint32m1x7_t dest, size_t index, vuint32m1_t value) { + return __riscv_vset(dest, 0, value); } -vuint32m1x8_t test_vset_v_u32m1_u32m1x8(vuint32m1x8_t dest, size_t index, vuint32m1_t val) { - return __riscv_vset(dest, 0, val); +vuint32m1x8_t test_vset_v_u32m1_u32m1x8(vuint32m1x8_t dest, size_t index, vuint32m1_t value) { + return __riscv_vset(dest, 0, value); } -vuint32m2x2_t test_vset_v_u32m2_u32m2x2(vuint32m2x2_t dest, size_t index, vuint32m2_t val) { - return __riscv_vset(dest, 0, val); +vuint32m2x2_t test_vset_v_u32m2_u32m2x2(vuint32m2x2_t dest, size_t index, vuint32m2_t value) { + return __riscv_vset(dest, 0, value); } -vuint32m2x3_t test_vset_v_u32m2_u32m2x3(vuint32m2x3_t dest, size_t index, vuint32m2_t val) { - return __riscv_vset(dest, 0, val); +vuint32m2x3_t test_vset_v_u32m2_u32m2x3(vuint32m2x3_t dest, size_t index, vuint32m2_t value) { + return __riscv_vset(dest, 0, value); } -vuint32m2x4_t test_vset_v_u32m2_u32m2x4(vuint32m2x4_t dest, size_t index, vuint32m2_t val) { - return __riscv_vset(dest, 0, val); +vuint32m2x4_t test_vset_v_u32m2_u32m2x4(vuint32m2x4_t dest, size_t index, vuint32m2_t value) { + return __riscv_vset(dest, 0, value); } -vuint32m4x2_t test_vset_v_u32m4_u32m4x2(vuint32m4x2_t dest, size_t index, vuint32m4_t val) { - return __riscv_vset(dest, 0, val); +vuint32m4x2_t test_vset_v_u32m4_u32m4x2(vuint32m4x2_t dest, size_t index, vuint32m4_t value) { + return __riscv_vset(dest, 0, value); } -vuint64m1x2_t test_vset_v_u64m1_u64m1x2(vuint64m1x2_t dest, size_t index, vuint64m1_t val) { - return __riscv_vset(dest, 0, val); +vuint64m1x2_t test_vset_v_u64m1_u64m1x2(vuint64m1x2_t dest, size_t index, vuint64m1_t value) { + return __riscv_vset(dest, 0, value); } -vuint64m1x3_t test_vset_v_u64m1_u64m1x3(vuint64m1x3_t dest, size_t index, vuint64m1_t val) { - return __riscv_vset(dest, 0, val); +vuint64m1x3_t test_vset_v_u64m1_u64m1x3(vuint64m1x3_t dest, size_t index, vuint64m1_t value) { + return __riscv_vset(dest, 0, value); } -vuint64m1x4_t test_vset_v_u64m1_u64m1x4(vuint64m1x4_t dest, size_t index, vuint64m1_t val) { - return __riscv_vset(dest, 0, val); +vuint64m1x4_t test_vset_v_u64m1_u64m1x4(vuint64m1x4_t dest, size_t index, vuint64m1_t value) { + return __riscv_vset(dest, 0, value); } -vuint64m1x5_t test_vset_v_u64m1_u64m1x5(vuint64m1x5_t dest, size_t index, vuint64m1_t val) { - return __riscv_vset(dest, 0, val); +vuint64m1x5_t test_vset_v_u64m1_u64m1x5(vuint64m1x5_t dest, size_t index, vuint64m1_t value) { + return __riscv_vset(dest, 0, value); } -vuint64m1x6_t test_vset_v_u64m1_u64m1x6(vuint64m1x6_t dest, size_t index, vuint64m1_t val) { - return __riscv_vset(dest, 0, val); +vuint64m1x6_t test_vset_v_u64m1_u64m1x6(vuint64m1x6_t dest, size_t index, vuint64m1_t value) { + return __riscv_vset(dest, 0, value); } -vuint64m1x7_t test_vset_v_u64m1_u64m1x7(vuint64m1x7_t dest, size_t index, vuint64m1_t val) { - return __riscv_vset(dest, 0, val); +vuint64m1x7_t test_vset_v_u64m1_u64m1x7(vuint64m1x7_t dest, size_t index, vuint64m1_t value) { + return __riscv_vset(dest, 0, value); } -vuint64m1x8_t test_vset_v_u64m1_u64m1x8(vuint64m1x8_t dest, size_t index, vuint64m1_t val) { - return __riscv_vset(dest, 0, val); +vuint64m1x8_t test_vset_v_u64m1_u64m1x8(vuint64m1x8_t dest, size_t index, vuint64m1_t value) { + return __riscv_vset(dest, 0, value); } -vuint64m2x2_t test_vset_v_u64m2_u64m2x2(vuint64m2x2_t dest, size_t index, vuint64m2_t val) { - return __riscv_vset(dest, 0, val); +vuint64m2x2_t test_vset_v_u64m2_u64m2x2(vuint64m2x2_t dest, size_t index, vuint64m2_t value) { + return __riscv_vset(dest, 0, value); } -vuint64m2x3_t test_vset_v_u64m2_u64m2x3(vuint64m2x3_t dest, size_t index, vuint64m2_t val) { - return __riscv_vset(dest, 0, val); +vuint64m2x3_t test_vset_v_u64m2_u64m2x3(vuint64m2x3_t dest, size_t index, vuint64m2_t value) { + return __riscv_vset(dest, 0, value); } -vuint64m2x4_t test_vset_v_u64m2_u64m2x4(vuint64m2x4_t dest, size_t index, vuint64m2_t val) { - return __riscv_vset(dest, 0, val); +vuint64m2x4_t test_vset_v_u64m2_u64m2x4(vuint64m2x4_t dest, size_t index, vuint64m2_t value) { + return __riscv_vset(dest, 0, value); } -vuint64m4x2_t test_vset_v_u64m4_u64m4x2(vuint64m4x2_t dest, size_t index, vuint64m4_t val) { - return __riscv_vset(dest, 0, val); +vuint64m4x2_t test_vset_v_u64m4_u64m4x2(vuint64m4x2_t dest, size_t index, vuint64m4_t value) { + return __riscv_vset(dest, 0, value); } /* { dg-final { scan-assembler-times {vl[1248]re[0-9]*\.v\s+v[1248],0\([a-z0-9]*\)\s+vl[1248]re[0-9]*\.v\s+v[1248],0\([a-z0-9]*\)+[ivxfswum.]*\s+} 66 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsext_vf2.c b/auto-generated/gnu-overloaded-tests/vsext_vf2.c index cf17c7c31..d3d974fd5 100644 --- a/auto-generated/gnu-overloaded-tests/vsext_vf2.c +++ b/auto-generated/gnu-overloaded-tests/vsext_vf2.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint16mf4_t test_vsext_vf2_i16mf4(vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf2(op1, vl); +vint16mf4_t test_vsext_vf2_i16mf4(vint8mf8_t vs2, size_t vl) { + return __riscv_vsext_vf2(vs2, vl); } -vint16mf2_t test_vsext_vf2_i16mf2(vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf2(op1, vl); +vint16mf2_t test_vsext_vf2_i16mf2(vint8mf4_t vs2, size_t vl) { + return __riscv_vsext_vf2(vs2, vl); } -vint16m1_t test_vsext_vf2_i16m1(vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf2(op1, vl); +vint16m1_t test_vsext_vf2_i16m1(vint8mf2_t vs2, size_t vl) { + return __riscv_vsext_vf2(vs2, vl); } -vint16m2_t test_vsext_vf2_i16m2(vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf2(op1, vl); +vint16m2_t test_vsext_vf2_i16m2(vint8m1_t vs2, size_t vl) { + return __riscv_vsext_vf2(vs2, vl); } -vint16m4_t test_vsext_vf2_i16m4(vint8m2_t op1, size_t vl) { - return __riscv_vsext_vf2(op1, vl); +vint16m4_t test_vsext_vf2_i16m4(vint8m2_t vs2, size_t vl) { + return __riscv_vsext_vf2(vs2, vl); } -vint16m8_t test_vsext_vf2_i16m8(vint8m4_t op1, size_t vl) { - return __riscv_vsext_vf2(op1, vl); +vint16m8_t test_vsext_vf2_i16m8(vint8m4_t vs2, size_t vl) { + return __riscv_vsext_vf2(vs2, vl); } -vint32mf2_t test_vsext_vf2_i32mf2(vint16mf4_t op1, size_t vl) { - return __riscv_vsext_vf2(op1, vl); +vint32mf2_t test_vsext_vf2_i32mf2(vint16mf4_t vs2, size_t vl) { + return __riscv_vsext_vf2(vs2, vl); } -vint32m1_t test_vsext_vf2_i32m1(vint16mf2_t op1, size_t vl) { - return __riscv_vsext_vf2(op1, vl); +vint32m1_t test_vsext_vf2_i32m1(vint16mf2_t vs2, size_t vl) { + return __riscv_vsext_vf2(vs2, vl); } -vint32m2_t test_vsext_vf2_i32m2(vint16m1_t op1, size_t vl) { - return __riscv_vsext_vf2(op1, vl); +vint32m2_t test_vsext_vf2_i32m2(vint16m1_t vs2, size_t vl) { + return __riscv_vsext_vf2(vs2, vl); } -vint32m4_t test_vsext_vf2_i32m4(vint16m2_t op1, size_t vl) { - return __riscv_vsext_vf2(op1, vl); +vint32m4_t test_vsext_vf2_i32m4(vint16m2_t vs2, size_t vl) { + return __riscv_vsext_vf2(vs2, vl); } -vint32m8_t test_vsext_vf2_i32m8(vint16m4_t op1, size_t vl) { - return __riscv_vsext_vf2(op1, vl); +vint32m8_t test_vsext_vf2_i32m8(vint16m4_t vs2, size_t vl) { + return __riscv_vsext_vf2(vs2, vl); } -vint64m1_t test_vsext_vf2_i64m1(vint32mf2_t op1, size_t vl) { - return __riscv_vsext_vf2(op1, vl); +vint64m1_t test_vsext_vf2_i64m1(vint32mf2_t vs2, size_t vl) { + return __riscv_vsext_vf2(vs2, vl); } -vint64m2_t test_vsext_vf2_i64m2(vint32m1_t op1, size_t vl) { - return __riscv_vsext_vf2(op1, vl); +vint64m2_t test_vsext_vf2_i64m2(vint32m1_t vs2, size_t vl) { + return __riscv_vsext_vf2(vs2, vl); } -vint64m4_t test_vsext_vf2_i64m4(vint32m2_t op1, size_t vl) { - return __riscv_vsext_vf2(op1, vl); +vint64m4_t test_vsext_vf2_i64m4(vint32m2_t vs2, size_t vl) { + return __riscv_vsext_vf2(vs2, vl); } -vint64m8_t test_vsext_vf2_i64m8(vint32m4_t op1, size_t vl) { - return __riscv_vsext_vf2(op1, vl); +vint64m8_t test_vsext_vf2_i64m8(vint32m4_t vs2, size_t vl) { + return __riscv_vsext_vf2(vs2, vl); } -vint16mf4_t test_vsext_vf2_i16mf4_m(vbool64_t mask, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf2(mask, op1, vl); +vint16mf4_t test_vsext_vf2_i16mf4_m(vbool64_t vm, vint8mf8_t vs2, size_t vl) { + return __riscv_vsext_vf2(vm, vs2, vl); } -vint16mf2_t test_vsext_vf2_i16mf2_m(vbool32_t mask, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf2(mask, op1, vl); +vint16mf2_t test_vsext_vf2_i16mf2_m(vbool32_t vm, vint8mf4_t vs2, size_t vl) { + return __riscv_vsext_vf2(vm, vs2, vl); } -vint16m1_t test_vsext_vf2_i16m1_m(vbool16_t mask, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf2(mask, op1, vl); +vint16m1_t test_vsext_vf2_i16m1_m(vbool16_t vm, vint8mf2_t vs2, size_t vl) { + return __riscv_vsext_vf2(vm, vs2, vl); } -vint16m2_t test_vsext_vf2_i16m2_m(vbool8_t mask, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf2(mask, op1, vl); +vint16m2_t test_vsext_vf2_i16m2_m(vbool8_t vm, vint8m1_t vs2, size_t vl) { + return __riscv_vsext_vf2(vm, vs2, vl); } -vint16m4_t test_vsext_vf2_i16m4_m(vbool4_t mask, vint8m2_t op1, size_t vl) { - return __riscv_vsext_vf2(mask, op1, vl); +vint16m4_t test_vsext_vf2_i16m4_m(vbool4_t vm, vint8m2_t vs2, size_t vl) { + return __riscv_vsext_vf2(vm, vs2, vl); } -vint16m8_t test_vsext_vf2_i16m8_m(vbool2_t mask, vint8m4_t op1, size_t vl) { - return __riscv_vsext_vf2(mask, op1, vl); +vint16m8_t test_vsext_vf2_i16m8_m(vbool2_t vm, vint8m4_t vs2, size_t vl) { + return __riscv_vsext_vf2(vm, vs2, vl); } -vint32mf2_t test_vsext_vf2_i32mf2_m(vbool64_t mask, vint16mf4_t op1, size_t vl) { - return __riscv_vsext_vf2(mask, op1, vl); +vint32mf2_t test_vsext_vf2_i32mf2_m(vbool64_t vm, vint16mf4_t vs2, size_t vl) { + return __riscv_vsext_vf2(vm, vs2, vl); } -vint32m1_t test_vsext_vf2_i32m1_m(vbool32_t mask, vint16mf2_t op1, size_t vl) { - return __riscv_vsext_vf2(mask, op1, vl); +vint32m1_t test_vsext_vf2_i32m1_m(vbool32_t vm, vint16mf2_t vs2, size_t vl) { + return __riscv_vsext_vf2(vm, vs2, vl); } -vint32m2_t test_vsext_vf2_i32m2_m(vbool16_t mask, vint16m1_t op1, size_t vl) { - return __riscv_vsext_vf2(mask, op1, vl); +vint32m2_t test_vsext_vf2_i32m2_m(vbool16_t vm, vint16m1_t vs2, size_t vl) { + return __riscv_vsext_vf2(vm, vs2, vl); } -vint32m4_t test_vsext_vf2_i32m4_m(vbool8_t mask, vint16m2_t op1, size_t vl) { - return __riscv_vsext_vf2(mask, op1, vl); +vint32m4_t test_vsext_vf2_i32m4_m(vbool8_t vm, vint16m2_t vs2, size_t vl) { + return __riscv_vsext_vf2(vm, vs2, vl); } -vint32m8_t test_vsext_vf2_i32m8_m(vbool4_t mask, vint16m4_t op1, size_t vl) { - return __riscv_vsext_vf2(mask, op1, vl); +vint32m8_t test_vsext_vf2_i32m8_m(vbool4_t vm, vint16m4_t vs2, size_t vl) { + return __riscv_vsext_vf2(vm, vs2, vl); } -vint64m1_t test_vsext_vf2_i64m1_m(vbool64_t mask, vint32mf2_t op1, size_t vl) { - return __riscv_vsext_vf2(mask, op1, vl); +vint64m1_t test_vsext_vf2_i64m1_m(vbool64_t vm, vint32mf2_t vs2, size_t vl) { + return __riscv_vsext_vf2(vm, vs2, vl); } -vint64m2_t test_vsext_vf2_i64m2_m(vbool32_t mask, vint32m1_t op1, size_t vl) { - return __riscv_vsext_vf2(mask, op1, vl); +vint64m2_t test_vsext_vf2_i64m2_m(vbool32_t vm, vint32m1_t vs2, size_t vl) { + return __riscv_vsext_vf2(vm, vs2, vl); } -vint64m4_t test_vsext_vf2_i64m4_m(vbool16_t mask, vint32m2_t op1, size_t vl) { - return __riscv_vsext_vf2(mask, op1, vl); +vint64m4_t test_vsext_vf2_i64m4_m(vbool16_t vm, vint32m2_t vs2, size_t vl) { + return __riscv_vsext_vf2(vm, vs2, vl); } -vint64m8_t test_vsext_vf2_i64m8_m(vbool8_t mask, vint32m4_t op1, size_t vl) { - return __riscv_vsext_vf2(mask, op1, vl); +vint64m8_t test_vsext_vf2_i64m8_m(vbool8_t vm, vint32m4_t vs2, size_t vl) { + return __riscv_vsext_vf2(vm, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsext\.vf2[ivxfswum.]*\s+} 30 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsext_vf4.c b/auto-generated/gnu-overloaded-tests/vsext_vf4.c index 9753728dc..055a46edd 100644 --- a/auto-generated/gnu-overloaded-tests/vsext_vf4.c +++ b/auto-generated/gnu-overloaded-tests/vsext_vf4.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint32mf2_t test_vsext_vf4_i32mf2(vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf4(op1, vl); +vint32mf2_t test_vsext_vf4_i32mf2(vint8mf8_t vs2, size_t vl) { + return __riscv_vsext_vf4(vs2, vl); } -vint32m1_t test_vsext_vf4_i32m1(vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf4(op1, vl); +vint32m1_t test_vsext_vf4_i32m1(vint8mf4_t vs2, size_t vl) { + return __riscv_vsext_vf4(vs2, vl); } -vint32m2_t test_vsext_vf4_i32m2(vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf4(op1, vl); +vint32m2_t test_vsext_vf4_i32m2(vint8mf2_t vs2, size_t vl) { + return __riscv_vsext_vf4(vs2, vl); } -vint32m4_t test_vsext_vf4_i32m4(vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf4(op1, vl); +vint32m4_t test_vsext_vf4_i32m4(vint8m1_t vs2, size_t vl) { + return __riscv_vsext_vf4(vs2, vl); } -vint32m8_t test_vsext_vf4_i32m8(vint8m2_t op1, size_t vl) { - return __riscv_vsext_vf4(op1, vl); +vint32m8_t test_vsext_vf4_i32m8(vint8m2_t vs2, size_t vl) { + return __riscv_vsext_vf4(vs2, vl); } -vint64m1_t test_vsext_vf4_i64m1(vint16mf4_t op1, size_t vl) { - return __riscv_vsext_vf4(op1, vl); +vint64m1_t test_vsext_vf4_i64m1(vint16mf4_t vs2, size_t vl) { + return __riscv_vsext_vf4(vs2, vl); } -vint64m2_t test_vsext_vf4_i64m2(vint16mf2_t op1, size_t vl) { - return __riscv_vsext_vf4(op1, vl); +vint64m2_t test_vsext_vf4_i64m2(vint16mf2_t vs2, size_t vl) { + return __riscv_vsext_vf4(vs2, vl); } -vint64m4_t test_vsext_vf4_i64m4(vint16m1_t op1, size_t vl) { - return __riscv_vsext_vf4(op1, vl); +vint64m4_t test_vsext_vf4_i64m4(vint16m1_t vs2, size_t vl) { + return __riscv_vsext_vf4(vs2, vl); } -vint64m8_t test_vsext_vf4_i64m8(vint16m2_t op1, size_t vl) { - return __riscv_vsext_vf4(op1, vl); +vint64m8_t test_vsext_vf4_i64m8(vint16m2_t vs2, size_t vl) { + return __riscv_vsext_vf4(vs2, vl); } -vint32mf2_t test_vsext_vf4_i32mf2_m(vbool64_t mask, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf4(mask, op1, vl); +vint32mf2_t test_vsext_vf4_i32mf2_m(vbool64_t vm, vint8mf8_t vs2, size_t vl) { + return __riscv_vsext_vf4(vm, vs2, vl); } -vint32m1_t test_vsext_vf4_i32m1_m(vbool32_t mask, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf4(mask, op1, vl); +vint32m1_t test_vsext_vf4_i32m1_m(vbool32_t vm, vint8mf4_t vs2, size_t vl) { + return __riscv_vsext_vf4(vm, vs2, vl); } -vint32m2_t test_vsext_vf4_i32m2_m(vbool16_t mask, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf4(mask, op1, vl); +vint32m2_t test_vsext_vf4_i32m2_m(vbool16_t vm, vint8mf2_t vs2, size_t vl) { + return __riscv_vsext_vf4(vm, vs2, vl); } -vint32m4_t test_vsext_vf4_i32m4_m(vbool8_t mask, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf4(mask, op1, vl); +vint32m4_t test_vsext_vf4_i32m4_m(vbool8_t vm, vint8m1_t vs2, size_t vl) { + return __riscv_vsext_vf4(vm, vs2, vl); } -vint32m8_t test_vsext_vf4_i32m8_m(vbool4_t mask, vint8m2_t op1, size_t vl) { - return __riscv_vsext_vf4(mask, op1, vl); +vint32m8_t test_vsext_vf4_i32m8_m(vbool4_t vm, vint8m2_t vs2, size_t vl) { + return __riscv_vsext_vf4(vm, vs2, vl); } -vint64m1_t test_vsext_vf4_i64m1_m(vbool64_t mask, vint16mf4_t op1, size_t vl) { - return __riscv_vsext_vf4(mask, op1, vl); +vint64m1_t test_vsext_vf4_i64m1_m(vbool64_t vm, vint16mf4_t vs2, size_t vl) { + return __riscv_vsext_vf4(vm, vs2, vl); } -vint64m2_t test_vsext_vf4_i64m2_m(vbool32_t mask, vint16mf2_t op1, size_t vl) { - return __riscv_vsext_vf4(mask, op1, vl); +vint64m2_t test_vsext_vf4_i64m2_m(vbool32_t vm, vint16mf2_t vs2, size_t vl) { + return __riscv_vsext_vf4(vm, vs2, vl); } -vint64m4_t test_vsext_vf4_i64m4_m(vbool16_t mask, vint16m1_t op1, size_t vl) { - return __riscv_vsext_vf4(mask, op1, vl); +vint64m4_t test_vsext_vf4_i64m4_m(vbool16_t vm, vint16m1_t vs2, size_t vl) { + return __riscv_vsext_vf4(vm, vs2, vl); } -vint64m8_t test_vsext_vf4_i64m8_m(vbool8_t mask, vint16m2_t op1, size_t vl) { - return __riscv_vsext_vf4(mask, op1, vl); +vint64m8_t test_vsext_vf4_i64m8_m(vbool8_t vm, vint16m2_t vs2, size_t vl) { + return __riscv_vsext_vf4(vm, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsext\.vf4[ivxfswum.]*\s+} 18 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsext_vf8.c b/auto-generated/gnu-overloaded-tests/vsext_vf8.c index 31095c968..f6fbbb246 100644 --- a/auto-generated/gnu-overloaded-tests/vsext_vf8.c +++ b/auto-generated/gnu-overloaded-tests/vsext_vf8.c @@ -6,36 +6,36 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint64m1_t test_vsext_vf8_i64m1(vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf8(op1, vl); +vint64m1_t test_vsext_vf8_i64m1(vint8mf8_t vs2, size_t vl) { + return __riscv_vsext_vf8(vs2, vl); } -vint64m2_t test_vsext_vf8_i64m2(vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf8(op1, vl); +vint64m2_t test_vsext_vf8_i64m2(vint8mf4_t vs2, size_t vl) { + return __riscv_vsext_vf8(vs2, vl); } -vint64m4_t test_vsext_vf8_i64m4(vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf8(op1, vl); +vint64m4_t test_vsext_vf8_i64m4(vint8mf2_t vs2, size_t vl) { + return __riscv_vsext_vf8(vs2, vl); } -vint64m8_t test_vsext_vf8_i64m8(vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf8(op1, vl); +vint64m8_t test_vsext_vf8_i64m8(vint8m1_t vs2, size_t vl) { + return __riscv_vsext_vf8(vs2, vl); } -vint64m1_t test_vsext_vf8_i64m1_m(vbool64_t mask, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf8(mask, op1, vl); +vint64m1_t test_vsext_vf8_i64m1_m(vbool64_t vm, vint8mf8_t vs2, size_t vl) { + return __riscv_vsext_vf8(vm, vs2, vl); } -vint64m2_t test_vsext_vf8_i64m2_m(vbool32_t mask, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf8(mask, op1, vl); +vint64m2_t test_vsext_vf8_i64m2_m(vbool32_t vm, vint8mf4_t vs2, size_t vl) { + return __riscv_vsext_vf8(vm, vs2, vl); } -vint64m4_t test_vsext_vf8_i64m4_m(vbool16_t mask, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf8(mask, op1, vl); +vint64m4_t test_vsext_vf8_i64m4_m(vbool16_t vm, vint8mf2_t vs2, size_t vl) { + return __riscv_vsext_vf8(vm, vs2, vl); } -vint64m8_t test_vsext_vf8_i64m8_m(vbool8_t mask, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf8(mask, op1, vl); +vint64m8_t test_vsext_vf8_i64m8_m(vbool8_t vm, vint8m1_t vs2, size_t vl) { + return __riscv_vsext_vf8(vm, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsext\.vf8[ivxfswum.]*\s+} 8 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vslide1down.c b/auto-generated/gnu-overloaded-tests/vslide1down.c index de2b03e15..501cd9a6c 100644 --- a/auto-generated/gnu-overloaded-tests/vslide1down.c +++ b/auto-generated/gnu-overloaded-tests/vslide1down.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vslide1down_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) { - return __riscv_vslide1down(src, value, vl); +vint8mf8_t test_vslide1down_vx_i8mf8(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down(vs2, rs1, vl); } -vint8mf4_t test_vslide1down_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) { - return __riscv_vslide1down(src, value, vl); +vint8mf4_t test_vslide1down_vx_i8mf4(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down(vs2, rs1, vl); } -vint8mf2_t test_vslide1down_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) { - return __riscv_vslide1down(src, value, vl); +vint8mf2_t test_vslide1down_vx_i8mf2(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down(vs2, rs1, vl); } -vint8m1_t test_vslide1down_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) { - return __riscv_vslide1down(src, value, vl); +vint8m1_t test_vslide1down_vx_i8m1(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down(vs2, rs1, vl); } -vint8m2_t test_vslide1down_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) { - return __riscv_vslide1down(src, value, vl); +vint8m2_t test_vslide1down_vx_i8m2(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down(vs2, rs1, vl); } -vint8m4_t test_vslide1down_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) { - return __riscv_vslide1down(src, value, vl); +vint8m4_t test_vslide1down_vx_i8m4(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down(vs2, rs1, vl); } -vint8m8_t test_vslide1down_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) { - return __riscv_vslide1down(src, value, vl); +vint8m8_t test_vslide1down_vx_i8m8(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down(vs2, rs1, vl); } -vint16mf4_t test_vslide1down_vx_i16mf4(vint16mf4_t src, int16_t value, size_t vl) { - return __riscv_vslide1down(src, value, vl); +vint16mf4_t test_vslide1down_vx_i16mf4(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down(vs2, rs1, vl); } -vint16mf2_t test_vslide1down_vx_i16mf2(vint16mf2_t src, int16_t value, size_t vl) { - return __riscv_vslide1down(src, value, vl); +vint16mf2_t test_vslide1down_vx_i16mf2(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down(vs2, rs1, vl); } -vint16m1_t test_vslide1down_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) { - return __riscv_vslide1down(src, value, vl); +vint16m1_t test_vslide1down_vx_i16m1(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down(vs2, rs1, vl); } -vint16m2_t test_vslide1down_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) { - return __riscv_vslide1down(src, value, vl); +vint16m2_t test_vslide1down_vx_i16m2(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down(vs2, rs1, vl); } -vint16m4_t test_vslide1down_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) { - return __riscv_vslide1down(src, value, vl); +vint16m4_t test_vslide1down_vx_i16m4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down(vs2, rs1, vl); } -vint16m8_t test_vslide1down_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) { - return __riscv_vslide1down(src, value, vl); +vint16m8_t test_vslide1down_vx_i16m8(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down(vs2, rs1, vl); } -vint32mf2_t test_vslide1down_vx_i32mf2(vint32mf2_t src, int32_t value, size_t vl) { - return __riscv_vslide1down(src, value, vl); +vint32mf2_t test_vslide1down_vx_i32mf2(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down(vs2, rs1, vl); } -vint32m1_t test_vslide1down_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) { - return __riscv_vslide1down(src, value, vl); +vint32m1_t test_vslide1down_vx_i32m1(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down(vs2, rs1, vl); } -vint32m2_t test_vslide1down_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) { - return __riscv_vslide1down(src, value, vl); +vint32m2_t test_vslide1down_vx_i32m2(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down(vs2, rs1, vl); } -vint32m4_t test_vslide1down_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) { - return __riscv_vslide1down(src, value, vl); +vint32m4_t test_vslide1down_vx_i32m4(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down(vs2, rs1, vl); } -vint32m8_t test_vslide1down_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) { - return __riscv_vslide1down(src, value, vl); +vint32m8_t test_vslide1down_vx_i32m8(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down(vs2, rs1, vl); } -vint64m1_t test_vslide1down_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) { - return __riscv_vslide1down(src, value, vl); +vint64m1_t test_vslide1down_vx_i64m1(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down(vs2, rs1, vl); } -vint64m2_t test_vslide1down_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) { - return __riscv_vslide1down(src, value, vl); +vint64m2_t test_vslide1down_vx_i64m2(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down(vs2, rs1, vl); } -vint64m4_t test_vslide1down_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) { - return __riscv_vslide1down(src, value, vl); +vint64m4_t test_vslide1down_vx_i64m4(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down(vs2, rs1, vl); } -vint64m8_t test_vslide1down_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) { - return __riscv_vslide1down(src, value, vl); +vint64m8_t test_vslide1down_vx_i64m8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down(vs2, rs1, vl); } -vuint8mf8_t test_vslide1down_vx_u8mf8(vuint8mf8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down(src, value, vl); +vuint8mf8_t test_vslide1down_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down(vs2, rs1, vl); } -vuint8mf4_t test_vslide1down_vx_u8mf4(vuint8mf4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down(src, value, vl); +vuint8mf4_t test_vslide1down_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down(vs2, rs1, vl); } -vuint8mf2_t test_vslide1down_vx_u8mf2(vuint8mf2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down(src, value, vl); +vuint8mf2_t test_vslide1down_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down(vs2, rs1, vl); } -vuint8m1_t test_vslide1down_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down(src, value, vl); +vuint8m1_t test_vslide1down_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down(vs2, rs1, vl); } -vuint8m2_t test_vslide1down_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down(src, value, vl); +vuint8m2_t test_vslide1down_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down(vs2, rs1, vl); } -vuint8m4_t test_vslide1down_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down(src, value, vl); +vuint8m4_t test_vslide1down_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down(vs2, rs1, vl); } -vuint8m8_t test_vslide1down_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down(src, value, vl); +vuint8m8_t test_vslide1down_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down(vs2, rs1, vl); } -vuint16mf4_t test_vslide1down_vx_u16mf4(vuint16mf4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down(src, value, vl); +vuint16mf4_t test_vslide1down_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down(vs2, rs1, vl); } -vuint16mf2_t test_vslide1down_vx_u16mf2(vuint16mf2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down(src, value, vl); +vuint16mf2_t test_vslide1down_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down(vs2, rs1, vl); } -vuint16m1_t test_vslide1down_vx_u16m1(vuint16m1_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down(src, value, vl); +vuint16m1_t test_vslide1down_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down(vs2, rs1, vl); } -vuint16m2_t test_vslide1down_vx_u16m2(vuint16m2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down(src, value, vl); +vuint16m2_t test_vslide1down_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down(vs2, rs1, vl); } -vuint16m4_t test_vslide1down_vx_u16m4(vuint16m4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down(src, value, vl); +vuint16m4_t test_vslide1down_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down(vs2, rs1, vl); } -vuint16m8_t test_vslide1down_vx_u16m8(vuint16m8_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down(src, value, vl); +vuint16m8_t test_vslide1down_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down(vs2, rs1, vl); } -vuint32mf2_t test_vslide1down_vx_u32mf2(vuint32mf2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down(src, value, vl); +vuint32mf2_t test_vslide1down_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down(vs2, rs1, vl); } -vuint32m1_t test_vslide1down_vx_u32m1(vuint32m1_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down(src, value, vl); +vuint32m1_t test_vslide1down_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down(vs2, rs1, vl); } -vuint32m2_t test_vslide1down_vx_u32m2(vuint32m2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down(src, value, vl); +vuint32m2_t test_vslide1down_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down(vs2, rs1, vl); } -vuint32m4_t test_vslide1down_vx_u32m4(vuint32m4_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down(src, value, vl); +vuint32m4_t test_vslide1down_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down(vs2, rs1, vl); } -vuint32m8_t test_vslide1down_vx_u32m8(vuint32m8_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down(src, value, vl); +vuint32m8_t test_vslide1down_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down(vs2, rs1, vl); } -vuint64m1_t test_vslide1down_vx_u64m1(vuint64m1_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down(src, value, vl); +vuint64m1_t test_vslide1down_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down(vs2, rs1, vl); } -vuint64m2_t test_vslide1down_vx_u64m2(vuint64m2_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down(src, value, vl); +vuint64m2_t test_vslide1down_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down(vs2, rs1, vl); } -vuint64m4_t test_vslide1down_vx_u64m4(vuint64m4_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down(src, value, vl); +vuint64m4_t test_vslide1down_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down(vs2, rs1, vl); } -vuint64m8_t test_vslide1down_vx_u64m8(vuint64m8_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down(src, value, vl); +vuint64m8_t test_vslide1down_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down(vs2, rs1, vl); } -vint8mf8_t test_vslide1down_vx_i8mf8_m(vbool64_t mask, vint8mf8_t src, int8_t value, size_t vl) { - return __riscv_vslide1down(mask, src, value, vl); +vint8mf8_t test_vslide1down_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down(vm, vs2, rs1, vl); } -vint8mf4_t test_vslide1down_vx_i8mf4_m(vbool32_t mask, vint8mf4_t src, int8_t value, size_t vl) { - return __riscv_vslide1down(mask, src, value, vl); +vint8mf4_t test_vslide1down_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down(vm, vs2, rs1, vl); } -vint8mf2_t test_vslide1down_vx_i8mf2_m(vbool16_t mask, vint8mf2_t src, int8_t value, size_t vl) { - return __riscv_vslide1down(mask, src, value, vl); +vint8mf2_t test_vslide1down_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down(vm, vs2, rs1, vl); } -vint8m1_t test_vslide1down_vx_i8m1_m(vbool8_t mask, vint8m1_t src, int8_t value, size_t vl) { - return __riscv_vslide1down(mask, src, value, vl); +vint8m1_t test_vslide1down_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down(vm, vs2, rs1, vl); } -vint8m2_t test_vslide1down_vx_i8m2_m(vbool4_t mask, vint8m2_t src, int8_t value, size_t vl) { - return __riscv_vslide1down(mask, src, value, vl); +vint8m2_t test_vslide1down_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down(vm, vs2, rs1, vl); } -vint8m4_t test_vslide1down_vx_i8m4_m(vbool2_t mask, vint8m4_t src, int8_t value, size_t vl) { - return __riscv_vslide1down(mask, src, value, vl); +vint8m4_t test_vslide1down_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down(vm, vs2, rs1, vl); } -vint8m8_t test_vslide1down_vx_i8m8_m(vbool1_t mask, vint8m8_t src, int8_t value, size_t vl) { - return __riscv_vslide1down(mask, src, value, vl); +vint8m8_t test_vslide1down_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down(vm, vs2, rs1, vl); } -vint16mf4_t test_vslide1down_vx_i16mf4_m(vbool64_t mask, vint16mf4_t src, int16_t value, size_t vl) { - return __riscv_vslide1down(mask, src, value, vl); +vint16mf4_t test_vslide1down_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down(vm, vs2, rs1, vl); } -vint16mf2_t test_vslide1down_vx_i16mf2_m(vbool32_t mask, vint16mf2_t src, int16_t value, size_t vl) { - return __riscv_vslide1down(mask, src, value, vl); +vint16mf2_t test_vslide1down_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down(vm, vs2, rs1, vl); } -vint16m1_t test_vslide1down_vx_i16m1_m(vbool16_t mask, vint16m1_t src, int16_t value, size_t vl) { - return __riscv_vslide1down(mask, src, value, vl); +vint16m1_t test_vslide1down_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down(vm, vs2, rs1, vl); } -vint16m2_t test_vslide1down_vx_i16m2_m(vbool8_t mask, vint16m2_t src, int16_t value, size_t vl) { - return __riscv_vslide1down(mask, src, value, vl); +vint16m2_t test_vslide1down_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down(vm, vs2, rs1, vl); } -vint16m4_t test_vslide1down_vx_i16m4_m(vbool4_t mask, vint16m4_t src, int16_t value, size_t vl) { - return __riscv_vslide1down(mask, src, value, vl); +vint16m4_t test_vslide1down_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down(vm, vs2, rs1, vl); } -vint16m8_t test_vslide1down_vx_i16m8_m(vbool2_t mask, vint16m8_t src, int16_t value, size_t vl) { - return __riscv_vslide1down(mask, src, value, vl); +vint16m8_t test_vslide1down_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down(vm, vs2, rs1, vl); } -vint32mf2_t test_vslide1down_vx_i32mf2_m(vbool64_t mask, vint32mf2_t src, int32_t value, size_t vl) { - return __riscv_vslide1down(mask, src, value, vl); +vint32mf2_t test_vslide1down_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down(vm, vs2, rs1, vl); } -vint32m1_t test_vslide1down_vx_i32m1_m(vbool32_t mask, vint32m1_t src, int32_t value, size_t vl) { - return __riscv_vslide1down(mask, src, value, vl); +vint32m1_t test_vslide1down_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down(vm, vs2, rs1, vl); } -vint32m2_t test_vslide1down_vx_i32m2_m(vbool16_t mask, vint32m2_t src, int32_t value, size_t vl) { - return __riscv_vslide1down(mask, src, value, vl); +vint32m2_t test_vslide1down_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down(vm, vs2, rs1, vl); } -vint32m4_t test_vslide1down_vx_i32m4_m(vbool8_t mask, vint32m4_t src, int32_t value, size_t vl) { - return __riscv_vslide1down(mask, src, value, vl); +vint32m4_t test_vslide1down_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down(vm, vs2, rs1, vl); } -vint32m8_t test_vslide1down_vx_i32m8_m(vbool4_t mask, vint32m8_t src, int32_t value, size_t vl) { - return __riscv_vslide1down(mask, src, value, vl); +vint32m8_t test_vslide1down_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down(vm, vs2, rs1, vl); } -vint64m1_t test_vslide1down_vx_i64m1_m(vbool64_t mask, vint64m1_t src, int64_t value, size_t vl) { - return __riscv_vslide1down(mask, src, value, vl); +vint64m1_t test_vslide1down_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down(vm, vs2, rs1, vl); } -vint64m2_t test_vslide1down_vx_i64m2_m(vbool32_t mask, vint64m2_t src, int64_t value, size_t vl) { - return __riscv_vslide1down(mask, src, value, vl); +vint64m2_t test_vslide1down_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down(vm, vs2, rs1, vl); } -vint64m4_t test_vslide1down_vx_i64m4_m(vbool16_t mask, vint64m4_t src, int64_t value, size_t vl) { - return __riscv_vslide1down(mask, src, value, vl); +vint64m4_t test_vslide1down_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down(vm, vs2, rs1, vl); } -vint64m8_t test_vslide1down_vx_i64m8_m(vbool8_t mask, vint64m8_t src, int64_t value, size_t vl) { - return __riscv_vslide1down(mask, src, value, vl); +vint64m8_t test_vslide1down_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down(vm, vs2, rs1, vl); } -vuint8mf8_t test_vslide1down_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down(mask, src, value, vl); +vuint8mf8_t test_vslide1down_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down(vm, vs2, rs1, vl); } -vuint8mf4_t test_vslide1down_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down(mask, src, value, vl); +vuint8mf4_t test_vslide1down_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down(vm, vs2, rs1, vl); } -vuint8mf2_t test_vslide1down_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down(mask, src, value, vl); +vuint8mf2_t test_vslide1down_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down(vm, vs2, rs1, vl); } -vuint8m1_t test_vslide1down_vx_u8m1_m(vbool8_t mask, vuint8m1_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down(mask, src, value, vl); +vuint8m1_t test_vslide1down_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down(vm, vs2, rs1, vl); } -vuint8m2_t test_vslide1down_vx_u8m2_m(vbool4_t mask, vuint8m2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down(mask, src, value, vl); +vuint8m2_t test_vslide1down_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down(vm, vs2, rs1, vl); } -vuint8m4_t test_vslide1down_vx_u8m4_m(vbool2_t mask, vuint8m4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down(mask, src, value, vl); +vuint8m4_t test_vslide1down_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down(vm, vs2, rs1, vl); } -vuint8m8_t test_vslide1down_vx_u8m8_m(vbool1_t mask, vuint8m8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down(mask, src, value, vl); +vuint8m8_t test_vslide1down_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down(vm, vs2, rs1, vl); } -vuint16mf4_t test_vslide1down_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down(mask, src, value, vl); +vuint16mf4_t test_vslide1down_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down(vm, vs2, rs1, vl); } -vuint16mf2_t test_vslide1down_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down(mask, src, value, vl); +vuint16mf2_t test_vslide1down_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down(vm, vs2, rs1, vl); } -vuint16m1_t test_vslide1down_vx_u16m1_m(vbool16_t mask, vuint16m1_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down(mask, src, value, vl); +vuint16m1_t test_vslide1down_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down(vm, vs2, rs1, vl); } -vuint16m2_t test_vslide1down_vx_u16m2_m(vbool8_t mask, vuint16m2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down(mask, src, value, vl); +vuint16m2_t test_vslide1down_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down(vm, vs2, rs1, vl); } -vuint16m4_t test_vslide1down_vx_u16m4_m(vbool4_t mask, vuint16m4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down(mask, src, value, vl); +vuint16m4_t test_vslide1down_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down(vm, vs2, rs1, vl); } -vuint16m8_t test_vslide1down_vx_u16m8_m(vbool2_t mask, vuint16m8_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down(mask, src, value, vl); +vuint16m8_t test_vslide1down_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down(vm, vs2, rs1, vl); } -vuint32mf2_t test_vslide1down_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down(mask, src, value, vl); +vuint32mf2_t test_vslide1down_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down(vm, vs2, rs1, vl); } -vuint32m1_t test_vslide1down_vx_u32m1_m(vbool32_t mask, vuint32m1_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down(mask, src, value, vl); +vuint32m1_t test_vslide1down_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down(vm, vs2, rs1, vl); } -vuint32m2_t test_vslide1down_vx_u32m2_m(vbool16_t mask, vuint32m2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down(mask, src, value, vl); +vuint32m2_t test_vslide1down_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down(vm, vs2, rs1, vl); } -vuint32m4_t test_vslide1down_vx_u32m4_m(vbool8_t mask, vuint32m4_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down(mask, src, value, vl); +vuint32m4_t test_vslide1down_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down(vm, vs2, rs1, vl); } -vuint32m8_t test_vslide1down_vx_u32m8_m(vbool4_t mask, vuint32m8_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down(mask, src, value, vl); +vuint32m8_t test_vslide1down_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down(vm, vs2, rs1, vl); } -vuint64m1_t test_vslide1down_vx_u64m1_m(vbool64_t mask, vuint64m1_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down(mask, src, value, vl); +vuint64m1_t test_vslide1down_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down(vm, vs2, rs1, vl); } -vuint64m2_t test_vslide1down_vx_u64m2_m(vbool32_t mask, vuint64m2_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down(mask, src, value, vl); +vuint64m2_t test_vslide1down_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down(vm, vs2, rs1, vl); } -vuint64m4_t test_vslide1down_vx_u64m4_m(vbool16_t mask, vuint64m4_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down(mask, src, value, vl); +vuint64m4_t test_vslide1down_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down(vm, vs2, rs1, vl); } -vuint64m8_t test_vslide1down_vx_u64m8_m(vbool8_t mask, vuint64m8_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down(mask, src, value, vl); +vuint64m8_t test_vslide1down_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vslide1down\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vslide1up.c b/auto-generated/gnu-overloaded-tests/vslide1up.c index ff9d0f8ae..96c0c14f2 100644 --- a/auto-generated/gnu-overloaded-tests/vslide1up.c +++ b/auto-generated/gnu-overloaded-tests/vslide1up.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vslide1up_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) { - return __riscv_vslide1up(src, value, vl); +vint8mf8_t test_vslide1up_vx_i8mf8(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up(vs2, rs1, vl); } -vint8mf4_t test_vslide1up_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) { - return __riscv_vslide1up(src, value, vl); +vint8mf4_t test_vslide1up_vx_i8mf4(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up(vs2, rs1, vl); } -vint8mf2_t test_vslide1up_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) { - return __riscv_vslide1up(src, value, vl); +vint8mf2_t test_vslide1up_vx_i8mf2(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up(vs2, rs1, vl); } -vint8m1_t test_vslide1up_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) { - return __riscv_vslide1up(src, value, vl); +vint8m1_t test_vslide1up_vx_i8m1(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up(vs2, rs1, vl); } -vint8m2_t test_vslide1up_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) { - return __riscv_vslide1up(src, value, vl); +vint8m2_t test_vslide1up_vx_i8m2(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up(vs2, rs1, vl); } -vint8m4_t test_vslide1up_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) { - return __riscv_vslide1up(src, value, vl); +vint8m4_t test_vslide1up_vx_i8m4(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up(vs2, rs1, vl); } -vint8m8_t test_vslide1up_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) { - return __riscv_vslide1up(src, value, vl); +vint8m8_t test_vslide1up_vx_i8m8(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up(vs2, rs1, vl); } -vint16mf4_t test_vslide1up_vx_i16mf4(vint16mf4_t src, int16_t value, size_t vl) { - return __riscv_vslide1up(src, value, vl); +vint16mf4_t test_vslide1up_vx_i16mf4(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up(vs2, rs1, vl); } -vint16mf2_t test_vslide1up_vx_i16mf2(vint16mf2_t src, int16_t value, size_t vl) { - return __riscv_vslide1up(src, value, vl); +vint16mf2_t test_vslide1up_vx_i16mf2(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up(vs2, rs1, vl); } -vint16m1_t test_vslide1up_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) { - return __riscv_vslide1up(src, value, vl); +vint16m1_t test_vslide1up_vx_i16m1(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up(vs2, rs1, vl); } -vint16m2_t test_vslide1up_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) { - return __riscv_vslide1up(src, value, vl); +vint16m2_t test_vslide1up_vx_i16m2(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up(vs2, rs1, vl); } -vint16m4_t test_vslide1up_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) { - return __riscv_vslide1up(src, value, vl); +vint16m4_t test_vslide1up_vx_i16m4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up(vs2, rs1, vl); } -vint16m8_t test_vslide1up_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) { - return __riscv_vslide1up(src, value, vl); +vint16m8_t test_vslide1up_vx_i16m8(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up(vs2, rs1, vl); } -vint32mf2_t test_vslide1up_vx_i32mf2(vint32mf2_t src, int32_t value, size_t vl) { - return __riscv_vslide1up(src, value, vl); +vint32mf2_t test_vslide1up_vx_i32mf2(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up(vs2, rs1, vl); } -vint32m1_t test_vslide1up_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) { - return __riscv_vslide1up(src, value, vl); +vint32m1_t test_vslide1up_vx_i32m1(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up(vs2, rs1, vl); } -vint32m2_t test_vslide1up_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) { - return __riscv_vslide1up(src, value, vl); +vint32m2_t test_vslide1up_vx_i32m2(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up(vs2, rs1, vl); } -vint32m4_t test_vslide1up_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) { - return __riscv_vslide1up(src, value, vl); +vint32m4_t test_vslide1up_vx_i32m4(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up(vs2, rs1, vl); } -vint32m8_t test_vslide1up_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) { - return __riscv_vslide1up(src, value, vl); +vint32m8_t test_vslide1up_vx_i32m8(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up(vs2, rs1, vl); } -vint64m1_t test_vslide1up_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) { - return __riscv_vslide1up(src, value, vl); +vint64m1_t test_vslide1up_vx_i64m1(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up(vs2, rs1, vl); } -vint64m2_t test_vslide1up_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) { - return __riscv_vslide1up(src, value, vl); +vint64m2_t test_vslide1up_vx_i64m2(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up(vs2, rs1, vl); } -vint64m4_t test_vslide1up_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) { - return __riscv_vslide1up(src, value, vl); +vint64m4_t test_vslide1up_vx_i64m4(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up(vs2, rs1, vl); } -vint64m8_t test_vslide1up_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) { - return __riscv_vslide1up(src, value, vl); +vint64m8_t test_vslide1up_vx_i64m8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up(vs2, rs1, vl); } -vuint8mf8_t test_vslide1up_vx_u8mf8(vuint8mf8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up(src, value, vl); +vuint8mf8_t test_vslide1up_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up(vs2, rs1, vl); } -vuint8mf4_t test_vslide1up_vx_u8mf4(vuint8mf4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up(src, value, vl); +vuint8mf4_t test_vslide1up_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up(vs2, rs1, vl); } -vuint8mf2_t test_vslide1up_vx_u8mf2(vuint8mf2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up(src, value, vl); +vuint8mf2_t test_vslide1up_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up(vs2, rs1, vl); } -vuint8m1_t test_vslide1up_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up(src, value, vl); +vuint8m1_t test_vslide1up_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up(vs2, rs1, vl); } -vuint8m2_t test_vslide1up_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up(src, value, vl); +vuint8m2_t test_vslide1up_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up(vs2, rs1, vl); } -vuint8m4_t test_vslide1up_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up(src, value, vl); +vuint8m4_t test_vslide1up_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up(vs2, rs1, vl); } -vuint8m8_t test_vslide1up_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up(src, value, vl); +vuint8m8_t test_vslide1up_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up(vs2, rs1, vl); } -vuint16mf4_t test_vslide1up_vx_u16mf4(vuint16mf4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up(src, value, vl); +vuint16mf4_t test_vslide1up_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up(vs2, rs1, vl); } -vuint16mf2_t test_vslide1up_vx_u16mf2(vuint16mf2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up(src, value, vl); +vuint16mf2_t test_vslide1up_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up(vs2, rs1, vl); } -vuint16m1_t test_vslide1up_vx_u16m1(vuint16m1_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up(src, value, vl); +vuint16m1_t test_vslide1up_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up(vs2, rs1, vl); } -vuint16m2_t test_vslide1up_vx_u16m2(vuint16m2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up(src, value, vl); +vuint16m2_t test_vslide1up_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up(vs2, rs1, vl); } -vuint16m4_t test_vslide1up_vx_u16m4(vuint16m4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up(src, value, vl); +vuint16m4_t test_vslide1up_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up(vs2, rs1, vl); } -vuint16m8_t test_vslide1up_vx_u16m8(vuint16m8_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up(src, value, vl); +vuint16m8_t test_vslide1up_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up(vs2, rs1, vl); } -vuint32mf2_t test_vslide1up_vx_u32mf2(vuint32mf2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up(src, value, vl); +vuint32mf2_t test_vslide1up_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up(vs2, rs1, vl); } -vuint32m1_t test_vslide1up_vx_u32m1(vuint32m1_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up(src, value, vl); +vuint32m1_t test_vslide1up_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up(vs2, rs1, vl); } -vuint32m2_t test_vslide1up_vx_u32m2(vuint32m2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up(src, value, vl); +vuint32m2_t test_vslide1up_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up(vs2, rs1, vl); } -vuint32m4_t test_vslide1up_vx_u32m4(vuint32m4_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up(src, value, vl); +vuint32m4_t test_vslide1up_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up(vs2, rs1, vl); } -vuint32m8_t test_vslide1up_vx_u32m8(vuint32m8_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up(src, value, vl); +vuint32m8_t test_vslide1up_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up(vs2, rs1, vl); } -vuint64m1_t test_vslide1up_vx_u64m1(vuint64m1_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up(src, value, vl); +vuint64m1_t test_vslide1up_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up(vs2, rs1, vl); } -vuint64m2_t test_vslide1up_vx_u64m2(vuint64m2_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up(src, value, vl); +vuint64m2_t test_vslide1up_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up(vs2, rs1, vl); } -vuint64m4_t test_vslide1up_vx_u64m4(vuint64m4_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up(src, value, vl); +vuint64m4_t test_vslide1up_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up(vs2, rs1, vl); } -vuint64m8_t test_vslide1up_vx_u64m8(vuint64m8_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up(src, value, vl); +vuint64m8_t test_vslide1up_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up(vs2, rs1, vl); } -vint8mf8_t test_vslide1up_vx_i8mf8_m(vbool64_t mask, vint8mf8_t src, int8_t value, size_t vl) { - return __riscv_vslide1up(mask, src, value, vl); +vint8mf8_t test_vslide1up_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up(vm, vs2, rs1, vl); } -vint8mf4_t test_vslide1up_vx_i8mf4_m(vbool32_t mask, vint8mf4_t src, int8_t value, size_t vl) { - return __riscv_vslide1up(mask, src, value, vl); +vint8mf4_t test_vslide1up_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up(vm, vs2, rs1, vl); } -vint8mf2_t test_vslide1up_vx_i8mf2_m(vbool16_t mask, vint8mf2_t src, int8_t value, size_t vl) { - return __riscv_vslide1up(mask, src, value, vl); +vint8mf2_t test_vslide1up_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up(vm, vs2, rs1, vl); } -vint8m1_t test_vslide1up_vx_i8m1_m(vbool8_t mask, vint8m1_t src, int8_t value, size_t vl) { - return __riscv_vslide1up(mask, src, value, vl); +vint8m1_t test_vslide1up_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up(vm, vs2, rs1, vl); } -vint8m2_t test_vslide1up_vx_i8m2_m(vbool4_t mask, vint8m2_t src, int8_t value, size_t vl) { - return __riscv_vslide1up(mask, src, value, vl); +vint8m2_t test_vslide1up_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up(vm, vs2, rs1, vl); } -vint8m4_t test_vslide1up_vx_i8m4_m(vbool2_t mask, vint8m4_t src, int8_t value, size_t vl) { - return __riscv_vslide1up(mask, src, value, vl); +vint8m4_t test_vslide1up_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up(vm, vs2, rs1, vl); } -vint8m8_t test_vslide1up_vx_i8m8_m(vbool1_t mask, vint8m8_t src, int8_t value, size_t vl) { - return __riscv_vslide1up(mask, src, value, vl); +vint8m8_t test_vslide1up_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up(vm, vs2, rs1, vl); } -vint16mf4_t test_vslide1up_vx_i16mf4_m(vbool64_t mask, vint16mf4_t src, int16_t value, size_t vl) { - return __riscv_vslide1up(mask, src, value, vl); +vint16mf4_t test_vslide1up_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up(vm, vs2, rs1, vl); } -vint16mf2_t test_vslide1up_vx_i16mf2_m(vbool32_t mask, vint16mf2_t src, int16_t value, size_t vl) { - return __riscv_vslide1up(mask, src, value, vl); +vint16mf2_t test_vslide1up_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up(vm, vs2, rs1, vl); } -vint16m1_t test_vslide1up_vx_i16m1_m(vbool16_t mask, vint16m1_t src, int16_t value, size_t vl) { - return __riscv_vslide1up(mask, src, value, vl); +vint16m1_t test_vslide1up_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up(vm, vs2, rs1, vl); } -vint16m2_t test_vslide1up_vx_i16m2_m(vbool8_t mask, vint16m2_t src, int16_t value, size_t vl) { - return __riscv_vslide1up(mask, src, value, vl); +vint16m2_t test_vslide1up_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up(vm, vs2, rs1, vl); } -vint16m4_t test_vslide1up_vx_i16m4_m(vbool4_t mask, vint16m4_t src, int16_t value, size_t vl) { - return __riscv_vslide1up(mask, src, value, vl); +vint16m4_t test_vslide1up_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up(vm, vs2, rs1, vl); } -vint16m8_t test_vslide1up_vx_i16m8_m(vbool2_t mask, vint16m8_t src, int16_t value, size_t vl) { - return __riscv_vslide1up(mask, src, value, vl); +vint16m8_t test_vslide1up_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up(vm, vs2, rs1, vl); } -vint32mf2_t test_vslide1up_vx_i32mf2_m(vbool64_t mask, vint32mf2_t src, int32_t value, size_t vl) { - return __riscv_vslide1up(mask, src, value, vl); +vint32mf2_t test_vslide1up_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up(vm, vs2, rs1, vl); } -vint32m1_t test_vslide1up_vx_i32m1_m(vbool32_t mask, vint32m1_t src, int32_t value, size_t vl) { - return __riscv_vslide1up(mask, src, value, vl); +vint32m1_t test_vslide1up_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up(vm, vs2, rs1, vl); } -vint32m2_t test_vslide1up_vx_i32m2_m(vbool16_t mask, vint32m2_t src, int32_t value, size_t vl) { - return __riscv_vslide1up(mask, src, value, vl); +vint32m2_t test_vslide1up_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up(vm, vs2, rs1, vl); } -vint32m4_t test_vslide1up_vx_i32m4_m(vbool8_t mask, vint32m4_t src, int32_t value, size_t vl) { - return __riscv_vslide1up(mask, src, value, vl); +vint32m4_t test_vslide1up_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up(vm, vs2, rs1, vl); } -vint32m8_t test_vslide1up_vx_i32m8_m(vbool4_t mask, vint32m8_t src, int32_t value, size_t vl) { - return __riscv_vslide1up(mask, src, value, vl); +vint32m8_t test_vslide1up_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up(vm, vs2, rs1, vl); } -vint64m1_t test_vslide1up_vx_i64m1_m(vbool64_t mask, vint64m1_t src, int64_t value, size_t vl) { - return __riscv_vslide1up(mask, src, value, vl); +vint64m1_t test_vslide1up_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up(vm, vs2, rs1, vl); } -vint64m2_t test_vslide1up_vx_i64m2_m(vbool32_t mask, vint64m2_t src, int64_t value, size_t vl) { - return __riscv_vslide1up(mask, src, value, vl); +vint64m2_t test_vslide1up_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up(vm, vs2, rs1, vl); } -vint64m4_t test_vslide1up_vx_i64m4_m(vbool16_t mask, vint64m4_t src, int64_t value, size_t vl) { - return __riscv_vslide1up(mask, src, value, vl); +vint64m4_t test_vslide1up_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up(vm, vs2, rs1, vl); } -vint64m8_t test_vslide1up_vx_i64m8_m(vbool8_t mask, vint64m8_t src, int64_t value, size_t vl) { - return __riscv_vslide1up(mask, src, value, vl); +vint64m8_t test_vslide1up_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up(vm, vs2, rs1, vl); } -vuint8mf8_t test_vslide1up_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up(mask, src, value, vl); +vuint8mf8_t test_vslide1up_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up(vm, vs2, rs1, vl); } -vuint8mf4_t test_vslide1up_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up(mask, src, value, vl); +vuint8mf4_t test_vslide1up_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up(vm, vs2, rs1, vl); } -vuint8mf2_t test_vslide1up_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up(mask, src, value, vl); +vuint8mf2_t test_vslide1up_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up(vm, vs2, rs1, vl); } -vuint8m1_t test_vslide1up_vx_u8m1_m(vbool8_t mask, vuint8m1_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up(mask, src, value, vl); +vuint8m1_t test_vslide1up_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up(vm, vs2, rs1, vl); } -vuint8m2_t test_vslide1up_vx_u8m2_m(vbool4_t mask, vuint8m2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up(mask, src, value, vl); +vuint8m2_t test_vslide1up_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up(vm, vs2, rs1, vl); } -vuint8m4_t test_vslide1up_vx_u8m4_m(vbool2_t mask, vuint8m4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up(mask, src, value, vl); +vuint8m4_t test_vslide1up_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up(vm, vs2, rs1, vl); } -vuint8m8_t test_vslide1up_vx_u8m8_m(vbool1_t mask, vuint8m8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up(mask, src, value, vl); +vuint8m8_t test_vslide1up_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up(vm, vs2, rs1, vl); } -vuint16mf4_t test_vslide1up_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up(mask, src, value, vl); +vuint16mf4_t test_vslide1up_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up(vm, vs2, rs1, vl); } -vuint16mf2_t test_vslide1up_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up(mask, src, value, vl); +vuint16mf2_t test_vslide1up_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up(vm, vs2, rs1, vl); } -vuint16m1_t test_vslide1up_vx_u16m1_m(vbool16_t mask, vuint16m1_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up(mask, src, value, vl); +vuint16m1_t test_vslide1up_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up(vm, vs2, rs1, vl); } -vuint16m2_t test_vslide1up_vx_u16m2_m(vbool8_t mask, vuint16m2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up(mask, src, value, vl); +vuint16m2_t test_vslide1up_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up(vm, vs2, rs1, vl); } -vuint16m4_t test_vslide1up_vx_u16m4_m(vbool4_t mask, vuint16m4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up(mask, src, value, vl); +vuint16m4_t test_vslide1up_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up(vm, vs2, rs1, vl); } -vuint16m8_t test_vslide1up_vx_u16m8_m(vbool2_t mask, vuint16m8_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up(mask, src, value, vl); +vuint16m8_t test_vslide1up_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up(vm, vs2, rs1, vl); } -vuint32mf2_t test_vslide1up_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up(mask, src, value, vl); +vuint32mf2_t test_vslide1up_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up(vm, vs2, rs1, vl); } -vuint32m1_t test_vslide1up_vx_u32m1_m(vbool32_t mask, vuint32m1_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up(mask, src, value, vl); +vuint32m1_t test_vslide1up_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up(vm, vs2, rs1, vl); } -vuint32m2_t test_vslide1up_vx_u32m2_m(vbool16_t mask, vuint32m2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up(mask, src, value, vl); +vuint32m2_t test_vslide1up_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up(vm, vs2, rs1, vl); } -vuint32m4_t test_vslide1up_vx_u32m4_m(vbool8_t mask, vuint32m4_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up(mask, src, value, vl); +vuint32m4_t test_vslide1up_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up(vm, vs2, rs1, vl); } -vuint32m8_t test_vslide1up_vx_u32m8_m(vbool4_t mask, vuint32m8_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up(mask, src, value, vl); +vuint32m8_t test_vslide1up_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up(vm, vs2, rs1, vl); } -vuint64m1_t test_vslide1up_vx_u64m1_m(vbool64_t mask, vuint64m1_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up(mask, src, value, vl); +vuint64m1_t test_vslide1up_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up(vm, vs2, rs1, vl); } -vuint64m2_t test_vslide1up_vx_u64m2_m(vbool32_t mask, vuint64m2_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up(mask, src, value, vl); +vuint64m2_t test_vslide1up_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up(vm, vs2, rs1, vl); } -vuint64m4_t test_vslide1up_vx_u64m4_m(vbool16_t mask, vuint64m4_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up(mask, src, value, vl); +vuint64m4_t test_vslide1up_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up(vm, vs2, rs1, vl); } -vuint64m8_t test_vslide1up_vx_u64m8_m(vbool8_t mask, vuint64m8_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up(mask, src, value, vl); +vuint64m8_t test_vslide1up_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vslide1up\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vslidedown.c b/auto-generated/gnu-overloaded-tests/vslidedown.c index 56790b8ee..6ee52ec9f 100644 --- a/auto-generated/gnu-overloaded-tests/vslidedown.c +++ b/auto-generated/gnu-overloaded-tests/vslidedown.c @@ -6,476 +6,476 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vslidedown_vx_f16mf4(vfloat16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vfloat16mf4_t test_vslidedown_vx_f16mf4(vfloat16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vfloat16mf2_t test_vslidedown_vx_f16mf2(vfloat16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vfloat16mf2_t test_vslidedown_vx_f16mf2(vfloat16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vfloat16m1_t test_vslidedown_vx_f16m1(vfloat16m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vfloat16m1_t test_vslidedown_vx_f16m1(vfloat16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vfloat16m2_t test_vslidedown_vx_f16m2(vfloat16m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vfloat16m2_t test_vslidedown_vx_f16m2(vfloat16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vfloat16m4_t test_vslidedown_vx_f16m4(vfloat16m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vfloat16m4_t test_vslidedown_vx_f16m4(vfloat16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vfloat16m8_t test_vslidedown_vx_f16m8(vfloat16m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vfloat16m8_t test_vslidedown_vx_f16m8(vfloat16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vfloat32mf2_t test_vslidedown_vx_f32mf2(vfloat32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vfloat32mf2_t test_vslidedown_vx_f32mf2(vfloat32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vfloat32m1_t test_vslidedown_vx_f32m1(vfloat32m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vfloat32m1_t test_vslidedown_vx_f32m1(vfloat32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vfloat32m2_t test_vslidedown_vx_f32m2(vfloat32m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vfloat32m2_t test_vslidedown_vx_f32m2(vfloat32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vfloat32m4_t test_vslidedown_vx_f32m4(vfloat32m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vfloat32m4_t test_vslidedown_vx_f32m4(vfloat32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vfloat32m8_t test_vslidedown_vx_f32m8(vfloat32m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vfloat32m8_t test_vslidedown_vx_f32m8(vfloat32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vfloat64m1_t test_vslidedown_vx_f64m1(vfloat64m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vfloat64m1_t test_vslidedown_vx_f64m1(vfloat64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vfloat64m2_t test_vslidedown_vx_f64m2(vfloat64m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vfloat64m2_t test_vslidedown_vx_f64m2(vfloat64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vfloat64m4_t test_vslidedown_vx_f64m4(vfloat64m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vfloat64m4_t test_vslidedown_vx_f64m4(vfloat64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vfloat64m8_t test_vslidedown_vx_f64m8(vfloat64m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vfloat64m8_t test_vslidedown_vx_f64m8(vfloat64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vint8mf8_t test_vslidedown_vx_i8mf8(vint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vint8mf8_t test_vslidedown_vx_i8mf8(vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vint8mf4_t test_vslidedown_vx_i8mf4(vint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vint8mf4_t test_vslidedown_vx_i8mf4(vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vint8mf2_t test_vslidedown_vx_i8mf2(vint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vint8mf2_t test_vslidedown_vx_i8mf2(vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vint8m1_t test_vslidedown_vx_i8m1(vint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vint8m1_t test_vslidedown_vx_i8m1(vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vint8m2_t test_vslidedown_vx_i8m2(vint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vint8m2_t test_vslidedown_vx_i8m2(vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vint8m4_t test_vslidedown_vx_i8m4(vint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vint8m4_t test_vslidedown_vx_i8m4(vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vint8m8_t test_vslidedown_vx_i8m8(vint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vint8m8_t test_vslidedown_vx_i8m8(vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vint16mf4_t test_vslidedown_vx_i16mf4(vint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vint16mf4_t test_vslidedown_vx_i16mf4(vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vint16mf2_t test_vslidedown_vx_i16mf2(vint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vint16mf2_t test_vslidedown_vx_i16mf2(vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vint16m1_t test_vslidedown_vx_i16m1(vint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vint16m1_t test_vslidedown_vx_i16m1(vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vint16m2_t test_vslidedown_vx_i16m2(vint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vint16m2_t test_vslidedown_vx_i16m2(vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vint16m4_t test_vslidedown_vx_i16m4(vint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vint16m4_t test_vslidedown_vx_i16m4(vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vint16m8_t test_vslidedown_vx_i16m8(vint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vint16m8_t test_vslidedown_vx_i16m8(vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vint32mf2_t test_vslidedown_vx_i32mf2(vint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vint32mf2_t test_vslidedown_vx_i32mf2(vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vint32m1_t test_vslidedown_vx_i32m1(vint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vint32m1_t test_vslidedown_vx_i32m1(vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vint32m2_t test_vslidedown_vx_i32m2(vint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vint32m2_t test_vslidedown_vx_i32m2(vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vint32m4_t test_vslidedown_vx_i32m4(vint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vint32m4_t test_vslidedown_vx_i32m4(vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vint32m8_t test_vslidedown_vx_i32m8(vint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vint32m8_t test_vslidedown_vx_i32m8(vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vint64m1_t test_vslidedown_vx_i64m1(vint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vint64m1_t test_vslidedown_vx_i64m1(vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vint64m2_t test_vslidedown_vx_i64m2(vint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vint64m2_t test_vslidedown_vx_i64m2(vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vint64m4_t test_vslidedown_vx_i64m4(vint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vint64m4_t test_vslidedown_vx_i64m4(vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vint64m8_t test_vslidedown_vx_i64m8(vint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vint64m8_t test_vslidedown_vx_i64m8(vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vuint8mf8_t test_vslidedown_vx_u8mf8(vuint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vuint8mf8_t test_vslidedown_vx_u8mf8(vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vuint8mf4_t test_vslidedown_vx_u8mf4(vuint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vuint8mf4_t test_vslidedown_vx_u8mf4(vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vuint8mf2_t test_vslidedown_vx_u8mf2(vuint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vuint8mf2_t test_vslidedown_vx_u8mf2(vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vuint8m1_t test_vslidedown_vx_u8m1(vuint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vuint8m1_t test_vslidedown_vx_u8m1(vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vuint8m2_t test_vslidedown_vx_u8m2(vuint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vuint8m2_t test_vslidedown_vx_u8m2(vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vuint8m4_t test_vslidedown_vx_u8m4(vuint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vuint8m4_t test_vslidedown_vx_u8m4(vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vuint8m8_t test_vslidedown_vx_u8m8(vuint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vuint8m8_t test_vslidedown_vx_u8m8(vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vuint16mf4_t test_vslidedown_vx_u16mf4(vuint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vuint16mf4_t test_vslidedown_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vuint16mf2_t test_vslidedown_vx_u16mf2(vuint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vuint16mf2_t test_vslidedown_vx_u16mf2(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vuint16m1_t test_vslidedown_vx_u16m1(vuint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vuint16m1_t test_vslidedown_vx_u16m1(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vuint16m2_t test_vslidedown_vx_u16m2(vuint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vuint16m2_t test_vslidedown_vx_u16m2(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vuint16m4_t test_vslidedown_vx_u16m4(vuint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vuint16m4_t test_vslidedown_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vuint16m8_t test_vslidedown_vx_u16m8(vuint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vuint16m8_t test_vslidedown_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vuint32mf2_t test_vslidedown_vx_u32mf2(vuint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vuint32mf2_t test_vslidedown_vx_u32mf2(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vuint32m1_t test_vslidedown_vx_u32m1(vuint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vuint32m1_t test_vslidedown_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vuint32m2_t test_vslidedown_vx_u32m2(vuint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vuint32m2_t test_vslidedown_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vuint32m4_t test_vslidedown_vx_u32m4(vuint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vuint32m4_t test_vslidedown_vx_u32m4(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vuint32m8_t test_vslidedown_vx_u32m8(vuint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vuint32m8_t test_vslidedown_vx_u32m8(vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vuint64m1_t test_vslidedown_vx_u64m1(vuint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vuint64m1_t test_vslidedown_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vuint64m2_t test_vslidedown_vx_u64m2(vuint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vuint64m2_t test_vslidedown_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vuint64m4_t test_vslidedown_vx_u64m4(vuint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vuint64m4_t test_vslidedown_vx_u64m4(vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vuint64m8_t test_vslidedown_vx_u64m8(vuint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(src, offset, vl); +vuint64m8_t test_vslidedown_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vs2, rs1, vl); } -vfloat16mf4_t test_vslidedown_vx_f16mf4_m(vbool64_t mask, vfloat16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vfloat16mf4_t test_vslidedown_vx_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vfloat16mf2_t test_vslidedown_vx_f16mf2_m(vbool32_t mask, vfloat16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vfloat16mf2_t test_vslidedown_vx_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vfloat16m1_t test_vslidedown_vx_f16m1_m(vbool16_t mask, vfloat16m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vfloat16m1_t test_vslidedown_vx_f16m1_m(vbool16_t vm, vfloat16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vfloat16m2_t test_vslidedown_vx_f16m2_m(vbool8_t mask, vfloat16m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vfloat16m2_t test_vslidedown_vx_f16m2_m(vbool8_t vm, vfloat16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vfloat16m4_t test_vslidedown_vx_f16m4_m(vbool4_t mask, vfloat16m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vfloat16m4_t test_vslidedown_vx_f16m4_m(vbool4_t vm, vfloat16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vfloat16m8_t test_vslidedown_vx_f16m8_m(vbool2_t mask, vfloat16m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vfloat16m8_t test_vslidedown_vx_f16m8_m(vbool2_t vm, vfloat16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vfloat32mf2_t test_vslidedown_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vfloat32mf2_t test_vslidedown_vx_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vfloat32m1_t test_vslidedown_vx_f32m1_m(vbool32_t mask, vfloat32m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vfloat32m1_t test_vslidedown_vx_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vfloat32m2_t test_vslidedown_vx_f32m2_m(vbool16_t mask, vfloat32m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vfloat32m2_t test_vslidedown_vx_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vfloat32m4_t test_vslidedown_vx_f32m4_m(vbool8_t mask, vfloat32m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vfloat32m4_t test_vslidedown_vx_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vfloat32m8_t test_vslidedown_vx_f32m8_m(vbool4_t mask, vfloat32m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vfloat32m8_t test_vslidedown_vx_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vfloat64m1_t test_vslidedown_vx_f64m1_m(vbool64_t mask, vfloat64m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vfloat64m1_t test_vslidedown_vx_f64m1_m(vbool64_t vm, vfloat64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vfloat64m2_t test_vslidedown_vx_f64m2_m(vbool32_t mask, vfloat64m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vfloat64m2_t test_vslidedown_vx_f64m2_m(vbool32_t vm, vfloat64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vfloat64m4_t test_vslidedown_vx_f64m4_m(vbool16_t mask, vfloat64m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vfloat64m4_t test_vslidedown_vx_f64m4_m(vbool16_t vm, vfloat64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vfloat64m8_t test_vslidedown_vx_f64m8_m(vbool8_t mask, vfloat64m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vfloat64m8_t test_vslidedown_vx_f64m8_m(vbool8_t vm, vfloat64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vint8mf8_t test_vslidedown_vx_i8mf8_m(vbool64_t mask, vint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vint8mf8_t test_vslidedown_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vint8mf4_t test_vslidedown_vx_i8mf4_m(vbool32_t mask, vint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vint8mf4_t test_vslidedown_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vint8mf2_t test_vslidedown_vx_i8mf2_m(vbool16_t mask, vint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vint8mf2_t test_vslidedown_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vint8m1_t test_vslidedown_vx_i8m1_m(vbool8_t mask, vint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vint8m1_t test_vslidedown_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vint8m2_t test_vslidedown_vx_i8m2_m(vbool4_t mask, vint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vint8m2_t test_vslidedown_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vint8m4_t test_vslidedown_vx_i8m4_m(vbool2_t mask, vint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vint8m4_t test_vslidedown_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vint8m8_t test_vslidedown_vx_i8m8_m(vbool1_t mask, vint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vint8m8_t test_vslidedown_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vint16mf4_t test_vslidedown_vx_i16mf4_m(vbool64_t mask, vint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vint16mf4_t test_vslidedown_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vint16mf2_t test_vslidedown_vx_i16mf2_m(vbool32_t mask, vint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vint16mf2_t test_vslidedown_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vint16m1_t test_vslidedown_vx_i16m1_m(vbool16_t mask, vint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vint16m1_t test_vslidedown_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vint16m2_t test_vslidedown_vx_i16m2_m(vbool8_t mask, vint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vint16m2_t test_vslidedown_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vint16m4_t test_vslidedown_vx_i16m4_m(vbool4_t mask, vint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vint16m4_t test_vslidedown_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vint16m8_t test_vslidedown_vx_i16m8_m(vbool2_t mask, vint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vint16m8_t test_vslidedown_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vint32mf2_t test_vslidedown_vx_i32mf2_m(vbool64_t mask, vint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vint32mf2_t test_vslidedown_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vint32m1_t test_vslidedown_vx_i32m1_m(vbool32_t mask, vint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vint32m1_t test_vslidedown_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vint32m2_t test_vslidedown_vx_i32m2_m(vbool16_t mask, vint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vint32m2_t test_vslidedown_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vint32m4_t test_vslidedown_vx_i32m4_m(vbool8_t mask, vint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vint32m4_t test_vslidedown_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vint32m8_t test_vslidedown_vx_i32m8_m(vbool4_t mask, vint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vint32m8_t test_vslidedown_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vint64m1_t test_vslidedown_vx_i64m1_m(vbool64_t mask, vint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vint64m1_t test_vslidedown_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vint64m2_t test_vslidedown_vx_i64m2_m(vbool32_t mask, vint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vint64m2_t test_vslidedown_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vint64m4_t test_vslidedown_vx_i64m4_m(vbool16_t mask, vint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vint64m4_t test_vslidedown_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vint64m8_t test_vslidedown_vx_i64m8_m(vbool8_t mask, vint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vint64m8_t test_vslidedown_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vuint8mf8_t test_vslidedown_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vuint8mf8_t test_vslidedown_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vuint8mf4_t test_vslidedown_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vuint8mf4_t test_vslidedown_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vuint8mf2_t test_vslidedown_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vuint8mf2_t test_vslidedown_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vuint8m1_t test_vslidedown_vx_u8m1_m(vbool8_t mask, vuint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vuint8m1_t test_vslidedown_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vuint8m2_t test_vslidedown_vx_u8m2_m(vbool4_t mask, vuint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vuint8m2_t test_vslidedown_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vuint8m4_t test_vslidedown_vx_u8m4_m(vbool2_t mask, vuint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vuint8m4_t test_vslidedown_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vuint8m8_t test_vslidedown_vx_u8m8_m(vbool1_t mask, vuint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vuint8m8_t test_vslidedown_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vuint16mf4_t test_vslidedown_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vuint16mf4_t test_vslidedown_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vuint16mf2_t test_vslidedown_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vuint16mf2_t test_vslidedown_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vuint16m1_t test_vslidedown_vx_u16m1_m(vbool16_t mask, vuint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vuint16m1_t test_vslidedown_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vuint16m2_t test_vslidedown_vx_u16m2_m(vbool8_t mask, vuint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vuint16m2_t test_vslidedown_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vuint16m4_t test_vslidedown_vx_u16m4_m(vbool4_t mask, vuint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vuint16m4_t test_vslidedown_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vuint16m8_t test_vslidedown_vx_u16m8_m(vbool2_t mask, vuint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vuint16m8_t test_vslidedown_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vuint32mf2_t test_vslidedown_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vuint32mf2_t test_vslidedown_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vuint32m1_t test_vslidedown_vx_u32m1_m(vbool32_t mask, vuint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vuint32m1_t test_vslidedown_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vuint32m2_t test_vslidedown_vx_u32m2_m(vbool16_t mask, vuint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vuint32m2_t test_vslidedown_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vuint32m4_t test_vslidedown_vx_u32m4_m(vbool8_t mask, vuint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vuint32m4_t test_vslidedown_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vuint32m8_t test_vslidedown_vx_u32m8_m(vbool4_t mask, vuint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vuint32m8_t test_vslidedown_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vuint64m1_t test_vslidedown_vx_u64m1_m(vbool64_t mask, vuint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vuint64m1_t test_vslidedown_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vuint64m2_t test_vslidedown_vx_u64m2_m(vbool32_t mask, vuint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vuint64m2_t test_vslidedown_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vuint64m4_t test_vslidedown_vx_u64m4_m(vbool16_t mask, vuint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vuint64m4_t test_vslidedown_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } -vuint64m8_t test_vslidedown_vx_u64m8_m(vbool8_t mask, vuint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown(mask, src, offset, vl); +vuint64m8_t test_vslidedown_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vslidedown\.[ivxfswum.]+\s+} 118 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vslideup.c b/auto-generated/gnu-overloaded-tests/vslideup.c index 46ac8f110..a6151adb4 100644 --- a/auto-generated/gnu-overloaded-tests/vslideup.c +++ b/auto-generated/gnu-overloaded-tests/vslideup.c @@ -6,476 +6,476 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vslideup_vx_f16mf4(vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vfloat16mf4_t test_vslideup_vx_f16mf4(vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vfloat16mf2_t test_vslideup_vx_f16mf2(vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vfloat16mf2_t test_vslideup_vx_f16mf2(vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vfloat16m1_t test_vslideup_vx_f16m1(vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vfloat16m1_t test_vslideup_vx_f16m1(vfloat16m1_t vd, vfloat16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vfloat16m2_t test_vslideup_vx_f16m2(vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vfloat16m2_t test_vslideup_vx_f16m2(vfloat16m2_t vd, vfloat16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vfloat16m4_t test_vslideup_vx_f16m4(vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vfloat16m4_t test_vslideup_vx_f16m4(vfloat16m4_t vd, vfloat16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vfloat16m8_t test_vslideup_vx_f16m8(vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vfloat16m8_t test_vslideup_vx_f16m8(vfloat16m8_t vd, vfloat16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vslideup_vx_f32mf2(vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vfloat32mf2_t test_vslideup_vx_f32mf2(vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vfloat32m1_t test_vslideup_vx_f32m1(vfloat32m1_t dest, vfloat32m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vfloat32m1_t test_vslideup_vx_f32m1(vfloat32m1_t vd, vfloat32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vfloat32m2_t test_vslideup_vx_f32m2(vfloat32m2_t dest, vfloat32m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vfloat32m2_t test_vslideup_vx_f32m2(vfloat32m2_t vd, vfloat32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vfloat32m4_t test_vslideup_vx_f32m4(vfloat32m4_t dest, vfloat32m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vfloat32m4_t test_vslideup_vx_f32m4(vfloat32m4_t vd, vfloat32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vfloat32m8_t test_vslideup_vx_f32m8(vfloat32m8_t dest, vfloat32m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vfloat32m8_t test_vslideup_vx_f32m8(vfloat32m8_t vd, vfloat32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vfloat64m1_t test_vslideup_vx_f64m1(vfloat64m1_t dest, vfloat64m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vfloat64m1_t test_vslideup_vx_f64m1(vfloat64m1_t vd, vfloat64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vfloat64m2_t test_vslideup_vx_f64m2(vfloat64m2_t dest, vfloat64m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vfloat64m2_t test_vslideup_vx_f64m2(vfloat64m2_t vd, vfloat64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vfloat64m4_t test_vslideup_vx_f64m4(vfloat64m4_t dest, vfloat64m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vfloat64m4_t test_vslideup_vx_f64m4(vfloat64m4_t vd, vfloat64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vfloat64m8_t test_vslideup_vx_f64m8(vfloat64m8_t dest, vfloat64m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vfloat64m8_t test_vslideup_vx_f64m8(vfloat64m8_t vd, vfloat64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vint8mf8_t test_vslideup_vx_i8mf8(vint8mf8_t dest, vint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vint8mf8_t test_vslideup_vx_i8mf8(vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vint8mf4_t test_vslideup_vx_i8mf4(vint8mf4_t dest, vint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vint8mf4_t test_vslideup_vx_i8mf4(vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vint8mf2_t test_vslideup_vx_i8mf2(vint8mf2_t dest, vint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vint8mf2_t test_vslideup_vx_i8mf2(vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vint8m1_t test_vslideup_vx_i8m1(vint8m1_t dest, vint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vint8m1_t test_vslideup_vx_i8m1(vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vint8m2_t test_vslideup_vx_i8m2(vint8m2_t dest, vint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vint8m2_t test_vslideup_vx_i8m2(vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vint8m4_t test_vslideup_vx_i8m4(vint8m4_t dest, vint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vint8m4_t test_vslideup_vx_i8m4(vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vint8m8_t test_vslideup_vx_i8m8(vint8m8_t dest, vint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vint8m8_t test_vslideup_vx_i8m8(vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vint16mf4_t test_vslideup_vx_i16mf4(vint16mf4_t dest, vint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vint16mf4_t test_vslideup_vx_i16mf4(vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vint16mf2_t test_vslideup_vx_i16mf2(vint16mf2_t dest, vint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vint16mf2_t test_vslideup_vx_i16mf2(vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vint16m1_t test_vslideup_vx_i16m1(vint16m1_t dest, vint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vint16m1_t test_vslideup_vx_i16m1(vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vint16m2_t test_vslideup_vx_i16m2(vint16m2_t dest, vint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vint16m2_t test_vslideup_vx_i16m2(vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vint16m4_t test_vslideup_vx_i16m4(vint16m4_t dest, vint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vint16m4_t test_vslideup_vx_i16m4(vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vint16m8_t test_vslideup_vx_i16m8(vint16m8_t dest, vint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vint16m8_t test_vslideup_vx_i16m8(vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vint32mf2_t test_vslideup_vx_i32mf2(vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vint32mf2_t test_vslideup_vx_i32mf2(vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vint32m1_t test_vslideup_vx_i32m1(vint32m1_t dest, vint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vint32m1_t test_vslideup_vx_i32m1(vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vint32m2_t test_vslideup_vx_i32m2(vint32m2_t dest, vint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vint32m2_t test_vslideup_vx_i32m2(vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vint32m4_t test_vslideup_vx_i32m4(vint32m4_t dest, vint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vint32m4_t test_vslideup_vx_i32m4(vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vint32m8_t test_vslideup_vx_i32m8(vint32m8_t dest, vint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vint32m8_t test_vslideup_vx_i32m8(vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vint64m1_t test_vslideup_vx_i64m1(vint64m1_t dest, vint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vint64m1_t test_vslideup_vx_i64m1(vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vint64m2_t test_vslideup_vx_i64m2(vint64m2_t dest, vint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vint64m2_t test_vslideup_vx_i64m2(vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vint64m4_t test_vslideup_vx_i64m4(vint64m4_t dest, vint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vint64m4_t test_vslideup_vx_i64m4(vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vint64m8_t test_vslideup_vx_i64m8(vint64m8_t dest, vint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vint64m8_t test_vslideup_vx_i64m8(vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vuint8mf8_t test_vslideup_vx_u8mf8(vuint8mf8_t dest, vuint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vuint8mf8_t test_vslideup_vx_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vuint8mf4_t test_vslideup_vx_u8mf4(vuint8mf4_t dest, vuint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vuint8mf4_t test_vslideup_vx_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vuint8mf2_t test_vslideup_vx_u8mf2(vuint8mf2_t dest, vuint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vuint8mf2_t test_vslideup_vx_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vuint8m1_t test_vslideup_vx_u8m1(vuint8m1_t dest, vuint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vuint8m1_t test_vslideup_vx_u8m1(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vuint8m2_t test_vslideup_vx_u8m2(vuint8m2_t dest, vuint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vuint8m2_t test_vslideup_vx_u8m2(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vuint8m4_t test_vslideup_vx_u8m4(vuint8m4_t dest, vuint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vuint8m4_t test_vslideup_vx_u8m4(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vuint8m8_t test_vslideup_vx_u8m8(vuint8m8_t dest, vuint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vuint8m8_t test_vslideup_vx_u8m8(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vuint16mf4_t test_vslideup_vx_u16mf4(vuint16mf4_t dest, vuint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vuint16mf4_t test_vslideup_vx_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vuint16mf2_t test_vslideup_vx_u16mf2(vuint16mf2_t dest, vuint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vuint16mf2_t test_vslideup_vx_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vuint16m1_t test_vslideup_vx_u16m1(vuint16m1_t dest, vuint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vuint16m1_t test_vslideup_vx_u16m1(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vuint16m2_t test_vslideup_vx_u16m2(vuint16m2_t dest, vuint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vuint16m2_t test_vslideup_vx_u16m2(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vuint16m4_t test_vslideup_vx_u16m4(vuint16m4_t dest, vuint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vuint16m4_t test_vslideup_vx_u16m4(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vuint16m8_t test_vslideup_vx_u16m8(vuint16m8_t dest, vuint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vuint16m8_t test_vslideup_vx_u16m8(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vuint32mf2_t test_vslideup_vx_u32mf2(vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vuint32mf2_t test_vslideup_vx_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vuint32m1_t test_vslideup_vx_u32m1(vuint32m1_t dest, vuint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vuint32m1_t test_vslideup_vx_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vuint32m2_t test_vslideup_vx_u32m2(vuint32m2_t dest, vuint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vuint32m2_t test_vslideup_vx_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vuint32m4_t test_vslideup_vx_u32m4(vuint32m4_t dest, vuint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vuint32m4_t test_vslideup_vx_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vuint32m8_t test_vslideup_vx_u32m8(vuint32m8_t dest, vuint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vuint32m8_t test_vslideup_vx_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vuint64m1_t test_vslideup_vx_u64m1(vuint64m1_t dest, vuint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vuint64m1_t test_vslideup_vx_u64m1(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vuint64m2_t test_vslideup_vx_u64m2(vuint64m2_t dest, vuint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vuint64m2_t test_vslideup_vx_u64m2(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vuint64m4_t test_vslideup_vx_u64m4(vuint64m4_t dest, vuint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vuint64m4_t test_vslideup_vx_u64m4(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vuint64m8_t test_vslideup_vx_u64m8(vuint64m8_t dest, vuint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup(dest, src, offset, vl); +vuint64m8_t test_vslideup_vx_u64m8(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vd, vs2, rs1, vl); } -vfloat16mf4_t test_vslideup_vx_f16mf4_m(vbool64_t mask, vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vfloat16mf4_t test_vslideup_vx_f16mf4_m(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vslideup_vx_f16mf2_m(vbool32_t mask, vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vfloat16mf2_t test_vslideup_vx_f16mf2_m(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vslideup_vx_f16m1_m(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vfloat16m1_t test_vslideup_vx_f16m1_m(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vslideup_vx_f16m2_m(vbool8_t mask, vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vfloat16m2_t test_vslideup_vx_f16m2_m(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vslideup_vx_f16m4_m(vbool4_t mask, vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vfloat16m4_t test_vslideup_vx_f16m4_m(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vslideup_vx_f16m8_m(vbool2_t mask, vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vfloat16m8_t test_vslideup_vx_f16m8_m(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vslideup_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vfloat32mf2_t test_vslideup_vx_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vslideup_vx_f32m1_m(vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vfloat32m1_t test_vslideup_vx_f32m1_m(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vslideup_vx_f32m2_m(vbool16_t mask, vfloat32m2_t dest, vfloat32m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vfloat32m2_t test_vslideup_vx_f32m2_m(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vslideup_vx_f32m4_m(vbool8_t mask, vfloat32m4_t dest, vfloat32m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vfloat32m4_t test_vslideup_vx_f32m4_m(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vslideup_vx_f32m8_m(vbool4_t mask, vfloat32m8_t dest, vfloat32m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vfloat32m8_t test_vslideup_vx_f32m8_m(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vslideup_vx_f64m1_m(vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vfloat64m1_t test_vslideup_vx_f64m1_m(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vslideup_vx_f64m2_m(vbool32_t mask, vfloat64m2_t dest, vfloat64m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vfloat64m2_t test_vslideup_vx_f64m2_m(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vslideup_vx_f64m4_m(vbool16_t mask, vfloat64m4_t dest, vfloat64m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vfloat64m4_t test_vslideup_vx_f64m4_m(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vslideup_vx_f64m8_m(vbool8_t mask, vfloat64m8_t dest, vfloat64m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vfloat64m8_t test_vslideup_vx_f64m8_m(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vslideup_vx_i8mf8_m(vbool64_t mask, vint8mf8_t dest, vint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vint8mf8_t test_vslideup_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vslideup_vx_i8mf4_m(vbool32_t mask, vint8mf4_t dest, vint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vint8mf4_t test_vslideup_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vslideup_vx_i8mf2_m(vbool16_t mask, vint8mf2_t dest, vint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vint8mf2_t test_vslideup_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vslideup_vx_i8m1_m(vbool8_t mask, vint8m1_t dest, vint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vint8m1_t test_vslideup_vx_i8m1_m(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vslideup_vx_i8m2_m(vbool4_t mask, vint8m2_t dest, vint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vint8m2_t test_vslideup_vx_i8m2_m(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vslideup_vx_i8m4_m(vbool2_t mask, vint8m4_t dest, vint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vint8m4_t test_vslideup_vx_i8m4_m(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vslideup_vx_i8m8_m(vbool1_t mask, vint8m8_t dest, vint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vint8m8_t test_vslideup_vx_i8m8_m(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vslideup_vx_i16mf4_m(vbool64_t mask, vint16mf4_t dest, vint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vint16mf4_t test_vslideup_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vslideup_vx_i16mf2_m(vbool32_t mask, vint16mf2_t dest, vint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vint16mf2_t test_vslideup_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vslideup_vx_i16m1_m(vbool16_t mask, vint16m1_t dest, vint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vint16m1_t test_vslideup_vx_i16m1_m(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vslideup_vx_i16m2_m(vbool8_t mask, vint16m2_t dest, vint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vint16m2_t test_vslideup_vx_i16m2_m(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vslideup_vx_i16m4_m(vbool4_t mask, vint16m4_t dest, vint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vint16m4_t test_vslideup_vx_i16m4_m(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vslideup_vx_i16m8_m(vbool2_t mask, vint16m8_t dest, vint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vint16m8_t test_vslideup_vx_i16m8_m(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vslideup_vx_i32mf2_m(vbool64_t mask, vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vint32mf2_t test_vslideup_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vslideup_vx_i32m1_m(vbool32_t mask, vint32m1_t dest, vint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vint32m1_t test_vslideup_vx_i32m1_m(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vslideup_vx_i32m2_m(vbool16_t mask, vint32m2_t dest, vint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vint32m2_t test_vslideup_vx_i32m2_m(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vslideup_vx_i32m4_m(vbool8_t mask, vint32m4_t dest, vint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vint32m4_t test_vslideup_vx_i32m4_m(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vslideup_vx_i32m8_m(vbool4_t mask, vint32m8_t dest, vint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vint32m8_t test_vslideup_vx_i32m8_m(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vslideup_vx_i64m1_m(vbool64_t mask, vint64m1_t dest, vint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vint64m1_t test_vslideup_vx_i64m1_m(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vslideup_vx_i64m2_m(vbool32_t mask, vint64m2_t dest, vint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vint64m2_t test_vslideup_vx_i64m2_m(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vslideup_vx_i64m4_m(vbool16_t mask, vint64m4_t dest, vint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vint64m4_t test_vslideup_vx_i64m4_m(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vslideup_vx_i64m8_m(vbool8_t mask, vint64m8_t dest, vint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vint64m8_t test_vslideup_vx_i64m8_m(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vslideup_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t dest, vuint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vuint8mf8_t test_vslideup_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vslideup_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t dest, vuint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vuint8mf4_t test_vslideup_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vslideup_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t dest, vuint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vuint8mf2_t test_vslideup_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vslideup_vx_u8m1_m(vbool8_t mask, vuint8m1_t dest, vuint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vuint8m1_t test_vslideup_vx_u8m1_m(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vslideup_vx_u8m2_m(vbool4_t mask, vuint8m2_t dest, vuint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vuint8m2_t test_vslideup_vx_u8m2_m(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vslideup_vx_u8m4_m(vbool2_t mask, vuint8m4_t dest, vuint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vuint8m4_t test_vslideup_vx_u8m4_m(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vslideup_vx_u8m8_m(vbool1_t mask, vuint8m8_t dest, vuint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vuint8m8_t test_vslideup_vx_u8m8_m(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vslideup_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t dest, vuint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vuint16mf4_t test_vslideup_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vslideup_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t dest, vuint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vuint16mf2_t test_vslideup_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vslideup_vx_u16m1_m(vbool16_t mask, vuint16m1_t dest, vuint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vuint16m1_t test_vslideup_vx_u16m1_m(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vslideup_vx_u16m2_m(vbool8_t mask, vuint16m2_t dest, vuint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vuint16m2_t test_vslideup_vx_u16m2_m(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vslideup_vx_u16m4_m(vbool4_t mask, vuint16m4_t dest, vuint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vuint16m4_t test_vslideup_vx_u16m4_m(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vslideup_vx_u16m8_m(vbool2_t mask, vuint16m8_t dest, vuint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vuint16m8_t test_vslideup_vx_u16m8_m(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vslideup_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vuint32mf2_t test_vslideup_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vslideup_vx_u32m1_m(vbool32_t mask, vuint32m1_t dest, vuint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vuint32m1_t test_vslideup_vx_u32m1_m(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vslideup_vx_u32m2_m(vbool16_t mask, vuint32m2_t dest, vuint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vuint32m2_t test_vslideup_vx_u32m2_m(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vslideup_vx_u32m4_m(vbool8_t mask, vuint32m4_t dest, vuint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vuint32m4_t test_vslideup_vx_u32m4_m(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vslideup_vx_u32m8_m(vbool4_t mask, vuint32m8_t dest, vuint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vuint32m8_t test_vslideup_vx_u32m8_m(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vslideup_vx_u64m1_m(vbool64_t mask, vuint64m1_t dest, vuint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vuint64m1_t test_vslideup_vx_u64m1_m(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vslideup_vx_u64m2_m(vbool32_t mask, vuint64m2_t dest, vuint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vuint64m2_t test_vslideup_vx_u64m2_m(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vslideup_vx_u64m4_m(vbool16_t mask, vuint64m4_t dest, vuint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vuint64m4_t test_vslideup_vx_u64m4_m(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vslideup_vx_u64m8_m(vbool8_t mask, vuint64m8_t dest, vuint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup(mask, dest, src, offset, vl); +vuint64m8_t test_vslideup_vx_u64m8_m(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vslideup\.[ivxfswum.]+\s+} 118 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsll.c b/auto-generated/gnu-overloaded-tests/vsll.c index 80bbc254e..5cdb970b1 100644 --- a/auto-generated/gnu-overloaded-tests/vsll.c +++ b/auto-generated/gnu-overloaded-tests/vsll.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vsll_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vint8mf8_t test_vsll_vv_i8mf8(vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsll(vs2, vs1, vl); } -vint8mf8_t test_vsll_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vint8mf8_t test_vsll_vx_i8mf8(vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vs2, rs1, vl); } -vint8mf4_t test_vsll_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vint8mf4_t test_vsll_vv_i8mf4(vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsll(vs2, vs1, vl); } -vint8mf4_t test_vsll_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vint8mf4_t test_vsll_vx_i8mf4(vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vs2, rs1, vl); } -vint8mf2_t test_vsll_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vint8mf2_t test_vsll_vv_i8mf2(vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsll(vs2, vs1, vl); } -vint8mf2_t test_vsll_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vint8mf2_t test_vsll_vx_i8mf2(vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vs2, rs1, vl); } -vint8m1_t test_vsll_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vint8m1_t test_vsll_vv_i8m1(vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsll(vs2, vs1, vl); } -vint8m1_t test_vsll_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vint8m1_t test_vsll_vx_i8m1(vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vs2, rs1, vl); } -vint8m2_t test_vsll_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vint8m2_t test_vsll_vv_i8m2(vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsll(vs2, vs1, vl); } -vint8m2_t test_vsll_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vint8m2_t test_vsll_vx_i8m2(vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vs2, rs1, vl); } -vint8m4_t test_vsll_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vint8m4_t test_vsll_vv_i8m4(vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsll(vs2, vs1, vl); } -vint8m4_t test_vsll_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vint8m4_t test_vsll_vx_i8m4(vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vs2, rs1, vl); } -vint8m8_t test_vsll_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vint8m8_t test_vsll_vv_i8m8(vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsll(vs2, vs1, vl); } -vint8m8_t test_vsll_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vint8m8_t test_vsll_vx_i8m8(vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vs2, rs1, vl); } -vint16mf4_t test_vsll_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vint16mf4_t test_vsll_vv_i16mf4(vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsll(vs2, vs1, vl); } -vint16mf4_t test_vsll_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vint16mf4_t test_vsll_vx_i16mf4(vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vs2, rs1, vl); } -vint16mf2_t test_vsll_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vint16mf2_t test_vsll_vv_i16mf2(vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsll(vs2, vs1, vl); } -vint16mf2_t test_vsll_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vint16mf2_t test_vsll_vx_i16mf2(vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vs2, rs1, vl); } -vint16m1_t test_vsll_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vint16m1_t test_vsll_vv_i16m1(vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsll(vs2, vs1, vl); } -vint16m1_t test_vsll_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vint16m1_t test_vsll_vx_i16m1(vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vs2, rs1, vl); } -vint16m2_t test_vsll_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vint16m2_t test_vsll_vv_i16m2(vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsll(vs2, vs1, vl); } -vint16m2_t test_vsll_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vint16m2_t test_vsll_vx_i16m2(vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vs2, rs1, vl); } -vint16m4_t test_vsll_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vint16m4_t test_vsll_vv_i16m4(vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsll(vs2, vs1, vl); } -vint16m4_t test_vsll_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vint16m4_t test_vsll_vx_i16m4(vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vs2, rs1, vl); } -vint16m8_t test_vsll_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vint16m8_t test_vsll_vv_i16m8(vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsll(vs2, vs1, vl); } -vint16m8_t test_vsll_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vint16m8_t test_vsll_vx_i16m8(vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vs2, rs1, vl); } -vint32mf2_t test_vsll_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vint32mf2_t test_vsll_vv_i32mf2(vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsll(vs2, vs1, vl); } -vint32mf2_t test_vsll_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vint32mf2_t test_vsll_vx_i32mf2(vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vs2, rs1, vl); } -vint32m1_t test_vsll_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vint32m1_t test_vsll_vv_i32m1(vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsll(vs2, vs1, vl); } -vint32m1_t test_vsll_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vint32m1_t test_vsll_vx_i32m1(vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vs2, rs1, vl); } -vint32m2_t test_vsll_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vint32m2_t test_vsll_vv_i32m2(vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsll(vs2, vs1, vl); } -vint32m2_t test_vsll_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vint32m2_t test_vsll_vx_i32m2(vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vs2, rs1, vl); } -vint32m4_t test_vsll_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vint32m4_t test_vsll_vv_i32m4(vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsll(vs2, vs1, vl); } -vint32m4_t test_vsll_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vint32m4_t test_vsll_vx_i32m4(vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vs2, rs1, vl); } -vint32m8_t test_vsll_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vint32m8_t test_vsll_vv_i32m8(vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsll(vs2, vs1, vl); } -vint32m8_t test_vsll_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vint32m8_t test_vsll_vx_i32m8(vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vs2, rs1, vl); } -vint64m1_t test_vsll_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vint64m1_t test_vsll_vv_i64m1(vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsll(vs2, vs1, vl); } -vint64m1_t test_vsll_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vint64m1_t test_vsll_vx_i64m1(vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vs2, rs1, vl); } -vint64m2_t test_vsll_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vint64m2_t test_vsll_vv_i64m2(vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsll(vs2, vs1, vl); } -vint64m2_t test_vsll_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vint64m2_t test_vsll_vx_i64m2(vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vs2, rs1, vl); } -vint64m4_t test_vsll_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vint64m4_t test_vsll_vv_i64m4(vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsll(vs2, vs1, vl); } -vint64m4_t test_vsll_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vint64m4_t test_vsll_vx_i64m4(vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vs2, rs1, vl); } -vint64m8_t test_vsll_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vint64m8_t test_vsll_vv_i64m8(vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsll(vs2, vs1, vl); } -vint64m8_t test_vsll_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vint64m8_t test_vsll_vx_i64m8(vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vs2, rs1, vl); } -vuint8mf8_t test_vsll_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vuint8mf8_t test_vsll_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsll(vs2, vs1, vl); } -vuint8mf8_t test_vsll_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vuint8mf8_t test_vsll_vx_u8mf8(vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vs2, rs1, vl); } -vuint8mf4_t test_vsll_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vuint8mf4_t test_vsll_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsll(vs2, vs1, vl); } -vuint8mf4_t test_vsll_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vuint8mf4_t test_vsll_vx_u8mf4(vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vs2, rs1, vl); } -vuint8mf2_t test_vsll_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vuint8mf2_t test_vsll_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsll(vs2, vs1, vl); } -vuint8mf2_t test_vsll_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vuint8mf2_t test_vsll_vx_u8mf2(vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vs2, rs1, vl); } -vuint8m1_t test_vsll_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vuint8m1_t test_vsll_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsll(vs2, vs1, vl); } -vuint8m1_t test_vsll_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vuint8m1_t test_vsll_vx_u8m1(vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vs2, rs1, vl); } -vuint8m2_t test_vsll_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vuint8m2_t test_vsll_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsll(vs2, vs1, vl); } -vuint8m2_t test_vsll_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vuint8m2_t test_vsll_vx_u8m2(vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vs2, rs1, vl); } -vuint8m4_t test_vsll_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vuint8m4_t test_vsll_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsll(vs2, vs1, vl); } -vuint8m4_t test_vsll_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vuint8m4_t test_vsll_vx_u8m4(vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vs2, rs1, vl); } -vuint8m8_t test_vsll_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vuint8m8_t test_vsll_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsll(vs2, vs1, vl); } -vuint8m8_t test_vsll_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vuint8m8_t test_vsll_vx_u8m8(vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vs2, rs1, vl); } -vuint16mf4_t test_vsll_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vuint16mf4_t test_vsll_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsll(vs2, vs1, vl); } -vuint16mf4_t test_vsll_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vuint16mf4_t test_vsll_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vs2, rs1, vl); } -vuint16mf2_t test_vsll_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vuint16mf2_t test_vsll_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsll(vs2, vs1, vl); } -vuint16mf2_t test_vsll_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vuint16mf2_t test_vsll_vx_u16mf2(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vs2, rs1, vl); } -vuint16m1_t test_vsll_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vuint16m1_t test_vsll_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsll(vs2, vs1, vl); } -vuint16m1_t test_vsll_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vuint16m1_t test_vsll_vx_u16m1(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vs2, rs1, vl); } -vuint16m2_t test_vsll_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vuint16m2_t test_vsll_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsll(vs2, vs1, vl); } -vuint16m2_t test_vsll_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vuint16m2_t test_vsll_vx_u16m2(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vs2, rs1, vl); } -vuint16m4_t test_vsll_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vuint16m4_t test_vsll_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsll(vs2, vs1, vl); } -vuint16m4_t test_vsll_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vuint16m4_t test_vsll_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vs2, rs1, vl); } -vuint16m8_t test_vsll_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vuint16m8_t test_vsll_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsll(vs2, vs1, vl); } -vuint16m8_t test_vsll_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vuint16m8_t test_vsll_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vs2, rs1, vl); } -vuint32mf2_t test_vsll_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vuint32mf2_t test_vsll_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsll(vs2, vs1, vl); } -vuint32mf2_t test_vsll_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vuint32mf2_t test_vsll_vx_u32mf2(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vs2, rs1, vl); } -vuint32m1_t test_vsll_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vuint32m1_t test_vsll_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsll(vs2, vs1, vl); } -vuint32m1_t test_vsll_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vuint32m1_t test_vsll_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vs2, rs1, vl); } -vuint32m2_t test_vsll_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vuint32m2_t test_vsll_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsll(vs2, vs1, vl); } -vuint32m2_t test_vsll_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vuint32m2_t test_vsll_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vs2, rs1, vl); } -vuint32m4_t test_vsll_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vuint32m4_t test_vsll_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsll(vs2, vs1, vl); } -vuint32m4_t test_vsll_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vuint32m4_t test_vsll_vx_u32m4(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vs2, rs1, vl); } -vuint32m8_t test_vsll_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vuint32m8_t test_vsll_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsll(vs2, vs1, vl); } -vuint32m8_t test_vsll_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vuint32m8_t test_vsll_vx_u32m8(vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vs2, rs1, vl); } -vuint64m1_t test_vsll_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vuint64m1_t test_vsll_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsll(vs2, vs1, vl); } -vuint64m1_t test_vsll_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vuint64m1_t test_vsll_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vs2, rs1, vl); } -vuint64m2_t test_vsll_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vuint64m2_t test_vsll_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsll(vs2, vs1, vl); } -vuint64m2_t test_vsll_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vuint64m2_t test_vsll_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vs2, rs1, vl); } -vuint64m4_t test_vsll_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vuint64m4_t test_vsll_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsll(vs2, vs1, vl); } -vuint64m4_t test_vsll_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vuint64m4_t test_vsll_vx_u64m4(vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vs2, rs1, vl); } -vuint64m8_t test_vsll_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vuint64m8_t test_vsll_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsll(vs2, vs1, vl); } -vuint64m8_t test_vsll_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll(op1, shift, vl); +vuint64m8_t test_vsll_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vs2, rs1, vl); } -vint8mf8_t test_vsll_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vint8mf8_t test_vsll_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsll(vm, vs2, vs1, vl); } -vint8mf8_t test_vsll_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vint8mf8_t test_vsll_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vm, vs2, rs1, vl); } -vint8mf4_t test_vsll_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vint8mf4_t test_vsll_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsll(vm, vs2, vs1, vl); } -vint8mf4_t test_vsll_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vint8mf4_t test_vsll_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vm, vs2, rs1, vl); } -vint8mf2_t test_vsll_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vint8mf2_t test_vsll_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsll(vm, vs2, vs1, vl); } -vint8mf2_t test_vsll_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vint8mf2_t test_vsll_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vm, vs2, rs1, vl); } -vint8m1_t test_vsll_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vint8m1_t test_vsll_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsll(vm, vs2, vs1, vl); } -vint8m1_t test_vsll_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vint8m1_t test_vsll_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vm, vs2, rs1, vl); } -vint8m2_t test_vsll_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vint8m2_t test_vsll_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsll(vm, vs2, vs1, vl); } -vint8m2_t test_vsll_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vint8m2_t test_vsll_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vm, vs2, rs1, vl); } -vint8m4_t test_vsll_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vint8m4_t test_vsll_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsll(vm, vs2, vs1, vl); } -vint8m4_t test_vsll_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vint8m4_t test_vsll_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vm, vs2, rs1, vl); } -vint8m8_t test_vsll_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vint8m8_t test_vsll_vv_i8m8_m(vbool1_t vm, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsll(vm, vs2, vs1, vl); } -vint8m8_t test_vsll_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vint8m8_t test_vsll_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vm, vs2, rs1, vl); } -vint16mf4_t test_vsll_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vint16mf4_t test_vsll_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsll(vm, vs2, vs1, vl); } -vint16mf4_t test_vsll_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vint16mf4_t test_vsll_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vm, vs2, rs1, vl); } -vint16mf2_t test_vsll_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vint16mf2_t test_vsll_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsll(vm, vs2, vs1, vl); } -vint16mf2_t test_vsll_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vint16mf2_t test_vsll_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vm, vs2, rs1, vl); } -vint16m1_t test_vsll_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vint16m1_t test_vsll_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsll(vm, vs2, vs1, vl); } -vint16m1_t test_vsll_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vint16m1_t test_vsll_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vm, vs2, rs1, vl); } -vint16m2_t test_vsll_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vint16m2_t test_vsll_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsll(vm, vs2, vs1, vl); } -vint16m2_t test_vsll_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vint16m2_t test_vsll_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vm, vs2, rs1, vl); } -vint16m4_t test_vsll_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vint16m4_t test_vsll_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsll(vm, vs2, vs1, vl); } -vint16m4_t test_vsll_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vint16m4_t test_vsll_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vm, vs2, rs1, vl); } -vint16m8_t test_vsll_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vint16m8_t test_vsll_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsll(vm, vs2, vs1, vl); } -vint16m8_t test_vsll_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vint16m8_t test_vsll_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vm, vs2, rs1, vl); } -vint32mf2_t test_vsll_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vint32mf2_t test_vsll_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsll(vm, vs2, vs1, vl); } -vint32mf2_t test_vsll_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vint32mf2_t test_vsll_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vm, vs2, rs1, vl); } -vint32m1_t test_vsll_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vint32m1_t test_vsll_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsll(vm, vs2, vs1, vl); } -vint32m1_t test_vsll_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vint32m1_t test_vsll_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vm, vs2, rs1, vl); } -vint32m2_t test_vsll_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vint32m2_t test_vsll_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsll(vm, vs2, vs1, vl); } -vint32m2_t test_vsll_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vint32m2_t test_vsll_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vm, vs2, rs1, vl); } -vint32m4_t test_vsll_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vint32m4_t test_vsll_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsll(vm, vs2, vs1, vl); } -vint32m4_t test_vsll_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vint32m4_t test_vsll_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vm, vs2, rs1, vl); } -vint32m8_t test_vsll_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vint32m8_t test_vsll_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsll(vm, vs2, vs1, vl); } -vint32m8_t test_vsll_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vint32m8_t test_vsll_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vm, vs2, rs1, vl); } -vint64m1_t test_vsll_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vint64m1_t test_vsll_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsll(vm, vs2, vs1, vl); } -vint64m1_t test_vsll_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vint64m1_t test_vsll_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vm, vs2, rs1, vl); } -vint64m2_t test_vsll_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vint64m2_t test_vsll_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsll(vm, vs2, vs1, vl); } -vint64m2_t test_vsll_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vint64m2_t test_vsll_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vm, vs2, rs1, vl); } -vint64m4_t test_vsll_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vint64m4_t test_vsll_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsll(vm, vs2, vs1, vl); } -vint64m4_t test_vsll_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vint64m4_t test_vsll_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vm, vs2, rs1, vl); } -vint64m8_t test_vsll_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vint64m8_t test_vsll_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsll(vm, vs2, vs1, vl); } -vint64m8_t test_vsll_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vint64m8_t test_vsll_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vm, vs2, rs1, vl); } -vuint8mf8_t test_vsll_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vuint8mf8_t test_vsll_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsll(vm, vs2, vs1, vl); } -vuint8mf8_t test_vsll_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vuint8mf8_t test_vsll_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vm, vs2, rs1, vl); } -vuint8mf4_t test_vsll_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vuint8mf4_t test_vsll_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsll(vm, vs2, vs1, vl); } -vuint8mf4_t test_vsll_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vuint8mf4_t test_vsll_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vm, vs2, rs1, vl); } -vuint8mf2_t test_vsll_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vuint8mf2_t test_vsll_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsll(vm, vs2, vs1, vl); } -vuint8mf2_t test_vsll_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vuint8mf2_t test_vsll_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vm, vs2, rs1, vl); } -vuint8m1_t test_vsll_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vuint8m1_t test_vsll_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsll(vm, vs2, vs1, vl); } -vuint8m1_t test_vsll_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vuint8m1_t test_vsll_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vm, vs2, rs1, vl); } -vuint8m2_t test_vsll_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vuint8m2_t test_vsll_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsll(vm, vs2, vs1, vl); } -vuint8m2_t test_vsll_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vuint8m2_t test_vsll_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vm, vs2, rs1, vl); } -vuint8m4_t test_vsll_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vuint8m4_t test_vsll_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsll(vm, vs2, vs1, vl); } -vuint8m4_t test_vsll_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vuint8m4_t test_vsll_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vm, vs2, rs1, vl); } -vuint8m8_t test_vsll_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vuint8m8_t test_vsll_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsll(vm, vs2, vs1, vl); } -vuint8m8_t test_vsll_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vuint8m8_t test_vsll_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vm, vs2, rs1, vl); } -vuint16mf4_t test_vsll_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vuint16mf4_t test_vsll_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsll(vm, vs2, vs1, vl); } -vuint16mf4_t test_vsll_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vuint16mf4_t test_vsll_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vm, vs2, rs1, vl); } -vuint16mf2_t test_vsll_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vuint16mf2_t test_vsll_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsll(vm, vs2, vs1, vl); } -vuint16mf2_t test_vsll_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vuint16mf2_t test_vsll_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vm, vs2, rs1, vl); } -vuint16m1_t test_vsll_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vuint16m1_t test_vsll_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsll(vm, vs2, vs1, vl); } -vuint16m1_t test_vsll_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vuint16m1_t test_vsll_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vm, vs2, rs1, vl); } -vuint16m2_t test_vsll_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vuint16m2_t test_vsll_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsll(vm, vs2, vs1, vl); } -vuint16m2_t test_vsll_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vuint16m2_t test_vsll_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vm, vs2, rs1, vl); } -vuint16m4_t test_vsll_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vuint16m4_t test_vsll_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsll(vm, vs2, vs1, vl); } -vuint16m4_t test_vsll_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vuint16m4_t test_vsll_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vm, vs2, rs1, vl); } -vuint16m8_t test_vsll_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vuint16m8_t test_vsll_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsll(vm, vs2, vs1, vl); } -vuint16m8_t test_vsll_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vuint16m8_t test_vsll_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vm, vs2, rs1, vl); } -vuint32mf2_t test_vsll_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vuint32mf2_t test_vsll_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsll(vm, vs2, vs1, vl); } -vuint32mf2_t test_vsll_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vuint32mf2_t test_vsll_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vm, vs2, rs1, vl); } -vuint32m1_t test_vsll_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vuint32m1_t test_vsll_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsll(vm, vs2, vs1, vl); } -vuint32m1_t test_vsll_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vuint32m1_t test_vsll_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vm, vs2, rs1, vl); } -vuint32m2_t test_vsll_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vuint32m2_t test_vsll_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsll(vm, vs2, vs1, vl); } -vuint32m2_t test_vsll_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vuint32m2_t test_vsll_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vm, vs2, rs1, vl); } -vuint32m4_t test_vsll_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vuint32m4_t test_vsll_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsll(vm, vs2, vs1, vl); } -vuint32m4_t test_vsll_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vuint32m4_t test_vsll_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vm, vs2, rs1, vl); } -vuint32m8_t test_vsll_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vuint32m8_t test_vsll_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsll(vm, vs2, vs1, vl); } -vuint32m8_t test_vsll_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vuint32m8_t test_vsll_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vm, vs2, rs1, vl); } -vuint64m1_t test_vsll_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vuint64m1_t test_vsll_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsll(vm, vs2, vs1, vl); } -vuint64m1_t test_vsll_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vuint64m1_t test_vsll_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vm, vs2, rs1, vl); } -vuint64m2_t test_vsll_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vuint64m2_t test_vsll_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsll(vm, vs2, vs1, vl); } -vuint64m2_t test_vsll_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vuint64m2_t test_vsll_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vm, vs2, rs1, vl); } -vuint64m4_t test_vsll_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vuint64m4_t test_vsll_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsll(vm, vs2, vs1, vl); } -vuint64m4_t test_vsll_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vuint64m4_t test_vsll_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vm, vs2, rs1, vl); } -vuint64m8_t test_vsll_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vuint64m8_t test_vsll_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsll(vm, vs2, vs1, vl); } -vuint64m8_t test_vsll_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll(mask, op1, shift, vl); +vuint64m8_t test_vsll_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsll\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsm.c b/auto-generated/gnu-overloaded-tests/vsm.c index 7d5ea4bf9..4f235f28a 100644 --- a/auto-generated/gnu-overloaded-tests/vsm.c +++ b/auto-generated/gnu-overloaded-tests/vsm.c @@ -6,32 +6,32 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsm_v_b1(uint8_t *base, vbool1_t value, size_t vl) { - return __riscv_vsm(base, value, vl); +void test_vsm_v_b1(uint8_t *rs1, vbool1_t vs3, size_t vl) { + return __riscv_vsm(rs1, vs3, vl); } -void test_vsm_v_b2(uint8_t *base, vbool2_t value, size_t vl) { - return __riscv_vsm(base, value, vl); +void test_vsm_v_b2(uint8_t *rs1, vbool2_t vs3, size_t vl) { + return __riscv_vsm(rs1, vs3, vl); } -void test_vsm_v_b4(uint8_t *base, vbool4_t value, size_t vl) { - return __riscv_vsm(base, value, vl); +void test_vsm_v_b4(uint8_t *rs1, vbool4_t vs3, size_t vl) { + return __riscv_vsm(rs1, vs3, vl); } -void test_vsm_v_b8(uint8_t *base, vbool8_t value, size_t vl) { - return __riscv_vsm(base, value, vl); +void test_vsm_v_b8(uint8_t *rs1, vbool8_t vs3, size_t vl) { + return __riscv_vsm(rs1, vs3, vl); } -void test_vsm_v_b16(uint8_t *base, vbool16_t value, size_t vl) { - return __riscv_vsm(base, value, vl); +void test_vsm_v_b16(uint8_t *rs1, vbool16_t vs3, size_t vl) { + return __riscv_vsm(rs1, vs3, vl); } -void test_vsm_v_b32(uint8_t *base, vbool32_t value, size_t vl) { - return __riscv_vsm(base, value, vl); +void test_vsm_v_b32(uint8_t *rs1, vbool32_t vs3, size_t vl) { + return __riscv_vsm(rs1, vs3, vl); } -void test_vsm_v_b64(uint8_t *base, vbool64_t value, size_t vl) { - return __riscv_vsm(base, value, vl); +void test_vsm_v_b64(uint8_t *rs1, vbool64_t vs3, size_t vl) { + return __riscv_vsm(rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsm\.[ivxfswum.]+\s+} 7 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsmul.c b/auto-generated/gnu-overloaded-tests/vsmul.c index 62753feb1..356a1ebdb 100644 --- a/auto-generated/gnu-overloaded-tests/vsmul.c +++ b/auto-generated/gnu-overloaded-tests/vsmul.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vsmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vsmul_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vsmul(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vsmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vsmul_vx_i8mf8(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vsmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vsmul_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vsmul(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vsmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vsmul_vx_i8mf4(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vsmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vsmul_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vsmul(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vsmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vsmul_vx_i8mf2(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vsmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vsmul_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vsmul(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vsmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vsmul_vx_i8m1(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vsmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vsmul_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vsmul(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vsmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vsmul_vx_i8m2(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vsmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vsmul_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vsmul(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vsmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vsmul_vx_i8m4(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vsmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vsmul_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vsmul(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vsmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vsmul_vx_i8m8(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vsmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vsmul_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vsmul(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vsmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vsmul_vx_i16mf4(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vsmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vsmul_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vsmul(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vsmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vsmul_vx_i16mf2(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vsmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vsmul_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vsmul(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vsmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vsmul_vx_i16m1(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vsmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vsmul_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vsmul(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vsmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vsmul_vx_i16m2(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vsmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vsmul_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vsmul(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vsmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vsmul_vx_i16m4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vsmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vsmul_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vsmul(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vsmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vsmul_vx_i16m8(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vsmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vsmul_vv_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vsmul(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vsmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vsmul_vx_i32mf2(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vsmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vsmul_vv_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vsmul(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vsmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vsmul_vx_i32m1(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vsmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vsmul_vv_i32m2(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vsmul(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vsmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vsmul_vx_i32m2(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vsmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vsmul_vv_i32m4(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vsmul(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vsmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vsmul_vx_i32m4(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vsmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vsmul_vv_i32m8(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vsmul(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vsmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vsmul_vx_i32m8(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vsmul_vv_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vsmul(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vsmul_vx_i64m1(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vsmul_vv_i64m2(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vsmul(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vsmul_vx_i64m2(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vsmul_vv_i64m4(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vsmul(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vsmul_vx_i64m4(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vsmul_vv_i64m8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vsmul(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul(op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vsmul_vx_i64m8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vsmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vsmul_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vsmul(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vsmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vsmul_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vsmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vsmul_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vsmul(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vsmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vsmul_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vsmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vsmul_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vsmul(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vsmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vsmul_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vsmul_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vsmul_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vsmul(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vsmul_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vsmul_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vsmul_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vsmul_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vsmul(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vsmul_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vsmul_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vsmul_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vsmul_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vsmul(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vsmul_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vsmul_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vsmul_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vsmul_vv_i8m8_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vsmul(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vsmul_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vsmul_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vsmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vsmul_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vsmul(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vsmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vsmul_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vsmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vsmul_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vsmul(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vsmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vsmul_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vsmul_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vsmul_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vsmul(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vsmul_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vsmul_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vsmul_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vsmul_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vsmul(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vsmul_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vsmul_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vsmul_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vsmul_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vsmul(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vsmul_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vsmul_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vsmul_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vsmul_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vsmul(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vsmul_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vsmul_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vsmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vsmul_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vsmul(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vsmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vsmul_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vsmul_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vsmul_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vsmul(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vsmul_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vsmul_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vsmul_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vsmul_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vsmul(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vsmul_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vsmul_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vsmul_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vsmul_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vsmul(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vsmul_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vsmul_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vsmul_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vsmul_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vsmul(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vsmul_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vsmul_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vsmul(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vsmul(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vsmul(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vsmul(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vsmul_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vsmul_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsmul\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsoxei16.c b/auto-generated/gnu-overloaded-tests/vsoxei16.c index 34da3d428..52e5aaf1d 100644 --- a/auto-generated/gnu-overloaded-tests/vsoxei16.c +++ b/auto-generated/gnu-overloaded-tests/vsoxei16.c @@ -6,460 +6,460 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxei16_v_f16mf4(float16_t *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_f16mf4(float16_t *rs1, vuint16mf4_t rs2, vfloat16mf4_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f16mf2(float16_t *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_f16mf2(float16_t *rs1, vuint16mf2_t rs2, vfloat16mf2_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f16m1(float16_t *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_f16m1(float16_t *rs1, vuint16m1_t rs2, vfloat16m1_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f16m2(float16_t *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_f16m2(float16_t *rs1, vuint16m2_t rs2, vfloat16m2_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f16m4(float16_t *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_f16m4(float16_t *rs1, vuint16m4_t rs2, vfloat16m4_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f16m8(float16_t *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_f16m8(float16_t *rs1, vuint16m8_t rs2, vfloat16m8_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f32mf2(float32_t *base, vuint16mf4_t bindex, vfloat32mf2_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_f32mf2(float32_t *rs1, vuint16mf4_t rs2, vfloat32mf2_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f32m1(float32_t *base, vuint16mf2_t bindex, vfloat32m1_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_f32m1(float32_t *rs1, vuint16mf2_t rs2, vfloat32m1_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f32m2(float32_t *base, vuint16m1_t bindex, vfloat32m2_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_f32m2(float32_t *rs1, vuint16m1_t rs2, vfloat32m2_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f32m4(float32_t *base, vuint16m2_t bindex, vfloat32m4_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_f32m4(float32_t *rs1, vuint16m2_t rs2, vfloat32m4_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f32m8(float32_t *base, vuint16m4_t bindex, vfloat32m8_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_f32m8(float32_t *rs1, vuint16m4_t rs2, vfloat32m8_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f64m1(float64_t *base, vuint16mf4_t bindex, vfloat64m1_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_f64m1(float64_t *rs1, vuint16mf4_t rs2, vfloat64m1_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f64m2(float64_t *base, vuint16mf2_t bindex, vfloat64m2_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_f64m2(float64_t *rs1, vuint16mf2_t rs2, vfloat64m2_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f64m4(float64_t *base, vuint16m1_t bindex, vfloat64m4_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_f64m4(float64_t *rs1, vuint16m1_t rs2, vfloat64m4_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f64m8(float64_t *base, vuint16m2_t bindex, vfloat64m8_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_f64m8(float64_t *rs1, vuint16m2_t rs2, vfloat64m8_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_i8mf8(int8_t *rs1, vuint16mf4_t rs2, vint8mf8_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_i8mf4(int8_t *rs1, vuint16mf2_t rs2, vint8mf4_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_i8mf2(int8_t *rs1, vuint16m1_t rs2, vint8mf2_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_i8m1(int8_t *rs1, vuint16m2_t rs2, vint8m1_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_i8m2(int8_t *rs1, vuint16m4_t rs2, vint8m2_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i8m4(int8_t *base, vuint16m8_t bindex, vint8m4_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_i8m4(int8_t *rs1, vuint16m8_t rs2, vint8m4_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_i16mf4(int16_t *rs1, vuint16mf4_t rs2, vint16mf4_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_i16mf2(int16_t *rs1, vuint16mf2_t rs2, vint16mf2_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_i16m1(int16_t *rs1, vuint16m1_t rs2, vint16m1_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_i16m2(int16_t *rs1, vuint16m2_t rs2, vint16m2_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i16m4(int16_t *base, vuint16m4_t bindex, vint16m4_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_i16m4(int16_t *rs1, vuint16m4_t rs2, vint16m4_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i16m8(int16_t *base, vuint16m8_t bindex, vint16m8_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_i16m8(int16_t *rs1, vuint16m8_t rs2, vint16m8_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_i32mf2(int32_t *rs1, vuint16mf4_t rs2, vint32mf2_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_i32m1(int32_t *rs1, vuint16mf2_t rs2, vint32m1_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_i32m2(int32_t *rs1, vuint16m1_t rs2, vint32m2_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i32m4(int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_i32m4(int32_t *rs1, vuint16m2_t rs2, vint32m4_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i32m8(int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_i32m8(int32_t *rs1, vuint16m4_t rs2, vint32m8_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_i64m1(int64_t *rs1, vuint16mf4_t rs2, vint64m1_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_i64m2(int64_t *rs1, vuint16mf2_t rs2, vint64m2_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i64m4(int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_i64m4(int64_t *rs1, vuint16m1_t rs2, vint64m4_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i64m8(int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_i64m8(int64_t *rs1, vuint16m2_t rs2, vint64m8_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_u8mf8(uint8_t *rs1, vuint16mf4_t rs2, vuint8mf8_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_u8mf4(uint8_t *rs1, vuint16mf2_t rs2, vuint8mf4_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_u8mf2(uint8_t *rs1, vuint16m1_t rs2, vuint8mf2_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_u8m1(uint8_t *rs1, vuint16m2_t rs2, vuint8m1_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_u8m2(uint8_t *rs1, vuint16m4_t rs2, vuint8m2_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u8m4(uint8_t *base, vuint16m8_t bindex, vuint8m4_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_u8m4(uint8_t *rs1, vuint16m8_t rs2, vuint8m4_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_u16mf4(uint16_t *rs1, vuint16mf4_t rs2, vuint16mf4_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_u16mf2(uint16_t *rs1, vuint16mf2_t rs2, vuint16mf2_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_u16m1(uint16_t *rs1, vuint16m1_t rs2, vuint16m1_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_u16m2(uint16_t *rs1, vuint16m2_t rs2, vuint16m2_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u16m4(uint16_t *base, vuint16m4_t bindex, vuint16m4_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_u16m4(uint16_t *rs1, vuint16m4_t rs2, vuint16m4_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u16m8(uint16_t *base, vuint16m8_t bindex, vuint16m8_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_u16m8(uint16_t *rs1, vuint16m8_t rs2, vuint16m8_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_u32mf2(uint32_t *rs1, vuint16mf4_t rs2, vuint32mf2_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_u32m1(uint32_t *rs1, vuint16mf2_t rs2, vuint32m1_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_u32m2(uint32_t *rs1, vuint16m1_t rs2, vuint32m2_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u32m4(uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_u32m4(uint32_t *rs1, vuint16m2_t rs2, vuint32m4_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u32m8(uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_u32m8(uint32_t *rs1, vuint16m4_t rs2, vuint32m8_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_u64m1(uint64_t *rs1, vuint16mf4_t rs2, vuint64m1_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_u64m2(uint64_t *rs1, vuint16mf2_t rs2, vuint64m2_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u64m4(uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_u64m4(uint64_t *rs1, vuint16m1_t rs2, vuint64m4_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u64m8(uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { - return __riscv_vsoxei16(base, bindex, value, vl); +void test_vsoxei16_v_u64m8(uint64_t *rs1, vuint16m2_t rs2, vuint64m8_t vs3, size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f16mf4_m(vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_f16mf4_m(vbool64_t vm, float16_t *rs1, vuint16mf4_t rs2, vfloat16mf4_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f16mf2_m(vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_f16mf2_m(vbool32_t vm, float16_t *rs1, vuint16mf2_t rs2, vfloat16mf2_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f16m1_m(vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_f16m1_m(vbool16_t vm, float16_t *rs1, vuint16m1_t rs2, vfloat16m1_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f16m2_m(vbool8_t mask, float16_t *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_f16m2_m(vbool8_t vm, float16_t *rs1, vuint16m2_t rs2, vfloat16m2_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f16m4_m(vbool4_t mask, float16_t *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_f16m4_m(vbool4_t vm, float16_t *rs1, vuint16m4_t rs2, vfloat16m4_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f16m8_m(vbool2_t mask, float16_t *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_f16m8_m(vbool2_t vm, float16_t *rs1, vuint16m8_t rs2, vfloat16m8_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f32mf2_m(vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_f32mf2_m(vbool64_t vm, float32_t *rs1, vuint16mf4_t rs2, vfloat32mf2_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f32m1_m(vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_f32m1_m(vbool32_t vm, float32_t *rs1, vuint16mf2_t rs2, vfloat32m1_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f32m2_m(vbool16_t mask, float32_t *base, vuint16m1_t bindex, vfloat32m2_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_f32m2_m(vbool16_t vm, float32_t *rs1, vuint16m1_t rs2, vfloat32m2_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f32m4_m(vbool8_t mask, float32_t *base, vuint16m2_t bindex, vfloat32m4_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_f32m4_m(vbool8_t vm, float32_t *rs1, vuint16m2_t rs2, vfloat32m4_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f32m8_m(vbool4_t mask, float32_t *base, vuint16m4_t bindex, vfloat32m8_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_f32m8_m(vbool4_t vm, float32_t *rs1, vuint16m4_t rs2, vfloat32m8_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f64m1_m(vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_f64m1_m(vbool64_t vm, float64_t *rs1, vuint16mf4_t rs2, vfloat64m1_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f64m2_m(vbool32_t mask, float64_t *base, vuint16mf2_t bindex, vfloat64m2_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_f64m2_m(vbool32_t vm, float64_t *rs1, vuint16mf2_t rs2, vfloat64m2_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f64m4_m(vbool16_t mask, float64_t *base, vuint16m1_t bindex, vfloat64m4_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_f64m4_m(vbool16_t vm, float64_t *rs1, vuint16m1_t rs2, vfloat64m4_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_f64m8_m(vbool8_t mask, float64_t *base, vuint16m2_t bindex, vfloat64m8_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_f64m8_m(vbool8_t vm, float64_t *rs1, vuint16m2_t rs2, vfloat64m8_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_i8mf8_m(vbool64_t vm, int8_t *rs1, vuint16mf4_t rs2, vint8mf8_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_i8mf4_m(vbool32_t vm, int8_t *rs1, vuint16mf2_t rs2, vint8mf4_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_i8mf2_m(vbool16_t vm, int8_t *rs1, vuint16m1_t rs2, vint8mf2_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_i8m1_m(vbool8_t vm, int8_t *rs1, vuint16m2_t rs2, vint8m1_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_i8m2_m(vbool4_t vm, int8_t *rs1, vuint16m4_t rs2, vint8m2_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i8m4_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_i8m4_m(vbool2_t vm, int8_t *rs1, vuint16m8_t rs2, vint8m4_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_i16mf4_m(vbool64_t vm, int16_t *rs1, vuint16mf4_t rs2, vint16mf4_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_i16mf2_m(vbool32_t vm, int16_t *rs1, vuint16mf2_t rs2, vint16mf2_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_i16m1_m(vbool16_t vm, int16_t *rs1, vuint16m1_t rs2, vint16m1_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_i16m2_m(vbool8_t vm, int16_t *rs1, vuint16m2_t rs2, vint16m2_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i16m4_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_i16m4_m(vbool4_t vm, int16_t *rs1, vuint16m4_t rs2, vint16m4_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i16m8_m(vbool2_t mask, int16_t *base, vuint16m8_t bindex, vint16m8_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_i16m8_m(vbool2_t vm, int16_t *rs1, vuint16m8_t rs2, vint16m8_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_i32mf2_m(vbool64_t vm, int32_t *rs1, vuint16mf4_t rs2, vint32mf2_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_i32m1_m(vbool32_t vm, int32_t *rs1, vuint16mf2_t rs2, vint32m1_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_i32m2_m(vbool16_t vm, int32_t *rs1, vuint16m1_t rs2, vint32m2_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i32m4_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_i32m4_m(vbool8_t vm, int32_t *rs1, vuint16m2_t rs2, vint32m4_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i32m8_m(vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_i32m8_m(vbool4_t vm, int32_t *rs1, vuint16m4_t rs2, vint32m8_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_i64m1_m(vbool64_t vm, int64_t *rs1, vuint16mf4_t rs2, vint64m1_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_i64m2_m(vbool32_t vm, int64_t *rs1, vuint16mf2_t rs2, vint64m2_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i64m4_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_i64m4_m(vbool16_t vm, int64_t *rs1, vuint16m1_t rs2, vint64m4_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_i64m8_m(vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_i64m8_m(vbool8_t vm, int64_t *rs1, vuint16m2_t rs2, vint64m8_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_u8mf8_m(vbool64_t vm, uint8_t *rs1, vuint16mf4_t rs2, vuint8mf8_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_u8mf4_m(vbool32_t vm, uint8_t *rs1, vuint16mf2_t rs2, vuint8mf4_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_u8mf2_m(vbool16_t vm, uint8_t *rs1, vuint16m1_t rs2, vuint8mf2_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_u8m1_m(vbool8_t vm, uint8_t *rs1, vuint16m2_t rs2, vuint8m1_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_u8m2_m(vbool4_t vm, uint8_t *rs1, vuint16m4_t rs2, vuint8m2_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_u8m4_m(vbool2_t vm, uint8_t *rs1, vuint16m8_t rs2, vuint8m4_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_u16mf4_m(vbool64_t vm, uint16_t *rs1, vuint16mf4_t rs2, vuint16mf4_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_u16mf2_m(vbool32_t vm, uint16_t *rs1, vuint16mf2_t rs2, vuint16mf2_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_u16m1_m(vbool16_t vm, uint16_t *rs1, vuint16m1_t rs2, vuint16m1_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_u16m2_m(vbool8_t vm, uint16_t *rs1, vuint16m2_t rs2, vuint16m2_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_u16m4_m(vbool4_t vm, uint16_t *rs1, vuint16m4_t rs2, vuint16m4_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint16m8_t bindex, vuint16m8_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_u16m8_m(vbool2_t vm, uint16_t *rs1, vuint16m8_t rs2, vuint16m8_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_u32mf2_m(vbool64_t vm, uint32_t *rs1, vuint16mf4_t rs2, vuint32mf2_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_u32m1_m(vbool32_t vm, uint32_t *rs1, vuint16mf2_t rs2, vuint32m1_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_u32m2_m(vbool16_t vm, uint32_t *rs1, vuint16m1_t rs2, vuint32m2_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_u32m4_m(vbool8_t vm, uint32_t *rs1, vuint16m2_t rs2, vuint32m4_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_u32m8_m(vbool4_t vm, uint32_t *rs1, vuint16m4_t rs2, vuint32m8_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_u64m1_m(vbool64_t vm, uint64_t *rs1, vuint16mf4_t rs2, vuint64m1_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_u64m2_m(vbool32_t vm, uint64_t *rs1, vuint16mf2_t rs2, vuint64m2_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_u64m4_m(vbool16_t vm, uint64_t *rs1, vuint16m1_t rs2, vuint64m4_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } -void test_vsoxei16_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { - return __riscv_vsoxei16(mask, base, bindex, value, vl); +void test_vsoxei16_v_u64m8_m(vbool8_t vm, uint64_t *rs1, vuint16m2_t rs2, vuint64m8_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxei16\.[ivxfswum.]+\s+} 114 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsoxei32.c b/auto-generated/gnu-overloaded-tests/vsoxei32.c index 31263c425..ae60459af 100644 --- a/auto-generated/gnu-overloaded-tests/vsoxei32.c +++ b/auto-generated/gnu-overloaded-tests/vsoxei32.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxei32_v_f16mf4(float16_t *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_f16mf4(float16_t *rs1, vuint32mf2_t rs2, vfloat16mf4_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f16mf2(float16_t *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_f16mf2(float16_t *rs1, vuint32m1_t rs2, vfloat16mf2_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f16m1(float16_t *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_f16m1(float16_t *rs1, vuint32m2_t rs2, vfloat16m1_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f16m2(float16_t *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_f16m2(float16_t *rs1, vuint32m4_t rs2, vfloat16m2_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f16m4(float16_t *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_f16m4(float16_t *rs1, vuint32m8_t rs2, vfloat16m4_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f32mf2(float32_t *base, vuint32mf2_t bindex, vfloat32mf2_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_f32mf2(float32_t *rs1, vuint32mf2_t rs2, vfloat32mf2_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f32m1(float32_t *base, vuint32m1_t bindex, vfloat32m1_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_f32m1(float32_t *rs1, vuint32m1_t rs2, vfloat32m1_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f32m2(float32_t *base, vuint32m2_t bindex, vfloat32m2_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_f32m2(float32_t *rs1, vuint32m2_t rs2, vfloat32m2_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f32m4(float32_t *base, vuint32m4_t bindex, vfloat32m4_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_f32m4(float32_t *rs1, vuint32m4_t rs2, vfloat32m4_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f32m8(float32_t *base, vuint32m8_t bindex, vfloat32m8_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_f32m8(float32_t *rs1, vuint32m8_t rs2, vfloat32m8_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f64m1(float64_t *base, vuint32mf2_t bindex, vfloat64m1_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_f64m1(float64_t *rs1, vuint32mf2_t rs2, vfloat64m1_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f64m2(float64_t *base, vuint32m1_t bindex, vfloat64m2_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_f64m2(float64_t *rs1, vuint32m1_t rs2, vfloat64m2_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f64m4(float64_t *base, vuint32m2_t bindex, vfloat64m4_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_f64m4(float64_t *rs1, vuint32m2_t rs2, vfloat64m4_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f64m8(float64_t *base, vuint32m4_t bindex, vfloat64m8_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_f64m8(float64_t *rs1, vuint32m4_t rs2, vfloat64m8_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_i8mf8(int8_t *rs1, vuint32mf2_t rs2, vint8mf8_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_i8mf4(int8_t *rs1, vuint32m1_t rs2, vint8mf4_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_i8mf2(int8_t *rs1, vuint32m2_t rs2, vint8mf2_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_i8m1(int8_t *rs1, vuint32m4_t rs2, vint8m1_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_i8m2(int8_t *rs1, vuint32m8_t rs2, vint8m2_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_i16mf4(int16_t *rs1, vuint32mf2_t rs2, vint16mf4_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_i16mf2(int16_t *rs1, vuint32m1_t rs2, vint16mf2_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_i16m1(int16_t *rs1, vuint32m2_t rs2, vint16m1_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_i16m2(int16_t *rs1, vuint32m4_t rs2, vint16m2_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i16m4(int16_t *base, vuint32m8_t bindex, vint16m4_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_i16m4(int16_t *rs1, vuint32m8_t rs2, vint16m4_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_i32mf2(int32_t *rs1, vuint32mf2_t rs2, vint32mf2_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_i32m1(int32_t *rs1, vuint32m1_t rs2, vint32m1_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_i32m2(int32_t *rs1, vuint32m2_t rs2, vint32m2_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i32m4(int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_i32m4(int32_t *rs1, vuint32m4_t rs2, vint32m4_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i32m8(int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_i32m8(int32_t *rs1, vuint32m8_t rs2, vint32m8_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_i64m1(int64_t *rs1, vuint32mf2_t rs2, vint64m1_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_i64m2(int64_t *rs1, vuint32m1_t rs2, vint64m2_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i64m4(int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_i64m4(int64_t *rs1, vuint32m2_t rs2, vint64m4_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i64m8(int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_i64m8(int64_t *rs1, vuint32m4_t rs2, vint64m8_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_u8mf8(uint8_t *rs1, vuint32mf2_t rs2, vuint8mf8_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_u8mf4(uint8_t *rs1, vuint32m1_t rs2, vuint8mf4_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_u8mf2(uint8_t *rs1, vuint32m2_t rs2, vuint8mf2_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_u8m1(uint8_t *rs1, vuint32m4_t rs2, vuint8m1_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_u8m2(uint8_t *rs1, vuint32m8_t rs2, vuint8m2_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_u16mf4(uint16_t *rs1, vuint32mf2_t rs2, vuint16mf4_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_u16mf2(uint16_t *rs1, vuint32m1_t rs2, vuint16mf2_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_u16m1(uint16_t *rs1, vuint32m2_t rs2, vuint16m1_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_u16m2(uint16_t *rs1, vuint32m4_t rs2, vuint16m2_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u16m4(uint16_t *base, vuint32m8_t bindex, vuint16m4_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_u16m4(uint16_t *rs1, vuint32m8_t rs2, vuint16m4_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_u32mf2(uint32_t *rs1, vuint32mf2_t rs2, vuint32mf2_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_u32m1(uint32_t *rs1, vuint32m1_t rs2, vuint32m1_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_u32m2(uint32_t *rs1, vuint32m2_t rs2, vuint32m2_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u32m4(uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_u32m4(uint32_t *rs1, vuint32m4_t rs2, vuint32m4_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u32m8(uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_u32m8(uint32_t *rs1, vuint32m8_t rs2, vuint32m8_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_u64m1(uint64_t *rs1, vuint32mf2_t rs2, vuint64m1_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_u64m2(uint64_t *rs1, vuint32m1_t rs2, vuint64m2_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u64m4(uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_u64m4(uint64_t *rs1, vuint32m2_t rs2, vuint64m4_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u64m8(uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { - return __riscv_vsoxei32(base, bindex, value, vl); +void test_vsoxei32_v_u64m8(uint64_t *rs1, vuint32m4_t rs2, vuint64m8_t vs3, size_t vl) { + return __riscv_vsoxei32(rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f16mf4_m(vbool64_t mask, float16_t *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_f16mf4_m(vbool64_t vm, float16_t *rs1, vuint32mf2_t rs2, vfloat16mf4_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f16mf2_m(vbool32_t mask, float16_t *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_f16mf2_m(vbool32_t vm, float16_t *rs1, vuint32m1_t rs2, vfloat16mf2_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f16m1_m(vbool16_t mask, float16_t *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_f16m1_m(vbool16_t vm, float16_t *rs1, vuint32m2_t rs2, vfloat16m1_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f16m2_m(vbool8_t mask, float16_t *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_f16m2_m(vbool8_t vm, float16_t *rs1, vuint32m4_t rs2, vfloat16m2_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f16m4_m(vbool4_t mask, float16_t *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_f16m4_m(vbool4_t vm, float16_t *rs1, vuint32m8_t rs2, vfloat16m4_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f32mf2_m(vbool64_t mask, float32_t *base, vuint32mf2_t bindex, vfloat32mf2_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_f32mf2_m(vbool64_t vm, float32_t *rs1, vuint32mf2_t rs2, vfloat32mf2_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f32m1_m(vbool32_t mask, float32_t *base, vuint32m1_t bindex, vfloat32m1_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_f32m1_m(vbool32_t vm, float32_t *rs1, vuint32m1_t rs2, vfloat32m1_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f32m2_m(vbool16_t mask, float32_t *base, vuint32m2_t bindex, vfloat32m2_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_f32m2_m(vbool16_t vm, float32_t *rs1, vuint32m2_t rs2, vfloat32m2_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f32m4_m(vbool8_t mask, float32_t *base, vuint32m4_t bindex, vfloat32m4_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_f32m4_m(vbool8_t vm, float32_t *rs1, vuint32m4_t rs2, vfloat32m4_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f32m8_m(vbool4_t mask, float32_t *base, vuint32m8_t bindex, vfloat32m8_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_f32m8_m(vbool4_t vm, float32_t *rs1, vuint32m8_t rs2, vfloat32m8_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f64m1_m(vbool64_t mask, float64_t *base, vuint32mf2_t bindex, vfloat64m1_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_f64m1_m(vbool64_t vm, float64_t *rs1, vuint32mf2_t rs2, vfloat64m1_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f64m2_m(vbool32_t mask, float64_t *base, vuint32m1_t bindex, vfloat64m2_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_f64m2_m(vbool32_t vm, float64_t *rs1, vuint32m1_t rs2, vfloat64m2_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f64m4_m(vbool16_t mask, float64_t *base, vuint32m2_t bindex, vfloat64m4_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_f64m4_m(vbool16_t vm, float64_t *rs1, vuint32m2_t rs2, vfloat64m4_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_f64m8_m(vbool8_t mask, float64_t *base, vuint32m4_t bindex, vfloat64m8_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_f64m8_m(vbool8_t vm, float64_t *rs1, vuint32m4_t rs2, vfloat64m8_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_i8mf8_m(vbool64_t vm, int8_t *rs1, vuint32mf2_t rs2, vint8mf8_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_i8mf4_m(vbool32_t vm, int8_t *rs1, vuint32m1_t rs2, vint8mf4_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_i8mf2_m(vbool16_t vm, int8_t *rs1, vuint32m2_t rs2, vint8mf2_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_i8m1_m(vbool8_t vm, int8_t *rs1, vuint32m4_t rs2, vint8m1_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_i8m2_m(vbool4_t vm, int8_t *rs1, vuint32m8_t rs2, vint8m2_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_i16mf4_m(vbool64_t vm, int16_t *rs1, vuint32mf2_t rs2, vint16mf4_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_i16mf2_m(vbool32_t vm, int16_t *rs1, vuint32m1_t rs2, vint16mf2_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_i16m1_m(vbool16_t vm, int16_t *rs1, vuint32m2_t rs2, vint16m1_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_i16m2_m(vbool8_t vm, int16_t *rs1, vuint32m4_t rs2, vint16m2_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i16m4_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_i16m4_m(vbool4_t vm, int16_t *rs1, vuint32m8_t rs2, vint16m4_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_i32mf2_m(vbool64_t vm, int32_t *rs1, vuint32mf2_t rs2, vint32mf2_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_i32m1_m(vbool32_t vm, int32_t *rs1, vuint32m1_t rs2, vint32m1_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_i32m2_m(vbool16_t vm, int32_t *rs1, vuint32m2_t rs2, vint32m2_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i32m4_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_i32m4_m(vbool8_t vm, int32_t *rs1, vuint32m4_t rs2, vint32m4_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i32m8_m(vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_i32m8_m(vbool4_t vm, int32_t *rs1, vuint32m8_t rs2, vint32m8_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_i64m1_m(vbool64_t vm, int64_t *rs1, vuint32mf2_t rs2, vint64m1_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_i64m2_m(vbool32_t vm, int64_t *rs1, vuint32m1_t rs2, vint64m2_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i64m4_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_i64m4_m(vbool16_t vm, int64_t *rs1, vuint32m2_t rs2, vint64m4_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_i64m8_m(vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_i64m8_m(vbool8_t vm, int64_t *rs1, vuint32m4_t rs2, vint64m8_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_u8mf8_m(vbool64_t vm, uint8_t *rs1, vuint32mf2_t rs2, vuint8mf8_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_u8mf4_m(vbool32_t vm, uint8_t *rs1, vuint32m1_t rs2, vuint8mf4_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_u8mf2_m(vbool16_t vm, uint8_t *rs1, vuint32m2_t rs2, vuint8mf2_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_u8m1_m(vbool8_t vm, uint8_t *rs1, vuint32m4_t rs2, vuint8m1_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_u8m2_m(vbool4_t vm, uint8_t *rs1, vuint32m8_t rs2, vuint8m2_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_u16mf4_m(vbool64_t vm, uint16_t *rs1, vuint32mf2_t rs2, vuint16mf4_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_u16mf2_m(vbool32_t vm, uint16_t *rs1, vuint32m1_t rs2, vuint16mf2_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_u16m1_m(vbool16_t vm, uint16_t *rs1, vuint32m2_t rs2, vuint16m1_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_u16m2_m(vbool8_t vm, uint16_t *rs1, vuint32m4_t rs2, vuint16m2_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_u16m4_m(vbool4_t vm, uint16_t *rs1, vuint32m8_t rs2, vuint16m4_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_u32mf2_m(vbool64_t vm, uint32_t *rs1, vuint32mf2_t rs2, vuint32mf2_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_u32m1_m(vbool32_t vm, uint32_t *rs1, vuint32m1_t rs2, vuint32m1_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_u32m2_m(vbool16_t vm, uint32_t *rs1, vuint32m2_t rs2, vuint32m2_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_u32m4_m(vbool8_t vm, uint32_t *rs1, vuint32m4_t rs2, vuint32m4_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_u32m8_m(vbool4_t vm, uint32_t *rs1, vuint32m8_t rs2, vuint32m8_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_u64m1_m(vbool64_t vm, uint64_t *rs1, vuint32mf2_t rs2, vuint64m1_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_u64m2_m(vbool32_t vm, uint64_t *rs1, vuint32m1_t rs2, vuint64m2_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_u64m4_m(vbool16_t vm, uint64_t *rs1, vuint32m2_t rs2, vuint64m4_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } -void test_vsoxei32_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { - return __riscv_vsoxei32(mask, base, bindex, value, vl); +void test_vsoxei32_v_u64m8_m(vbool8_t vm, uint64_t *rs1, vuint32m4_t rs2, vuint64m8_t vs3, size_t vl) { + return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxei32\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsoxei64.c b/auto-generated/gnu-overloaded-tests/vsoxei64.c index ab4af6e4f..cfa71f23e 100644 --- a/auto-generated/gnu-overloaded-tests/vsoxei64.c +++ b/auto-generated/gnu-overloaded-tests/vsoxei64.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxei64_v_f16mf4(float16_t *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) { - return __riscv_vsoxei64(base, bindex, value, vl); +void test_vsoxei64_v_f16mf4(float16_t *rs1, vuint64m1_t rs2, vfloat16mf4_t vs3, size_t vl) { + return __riscv_vsoxei64(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_f16mf2(float16_t *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) { - return __riscv_vsoxei64(base, bindex, value, vl); +void test_vsoxei64_v_f16mf2(float16_t *rs1, vuint64m2_t rs2, vfloat16mf2_t vs3, size_t vl) { + return __riscv_vsoxei64(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_f16m1(float16_t *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) { - return __riscv_vsoxei64(base, bindex, value, vl); +void test_vsoxei64_v_f16m1(float16_t *rs1, vuint64m4_t rs2, vfloat16m1_t vs3, size_t vl) { + return __riscv_vsoxei64(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_f16m2(float16_t *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) { - return __riscv_vsoxei64(base, bindex, value, vl); +void test_vsoxei64_v_f16m2(float16_t *rs1, vuint64m8_t rs2, vfloat16m2_t vs3, size_t vl) { + return __riscv_vsoxei64(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_f32mf2(float32_t *base, vuint64m1_t bindex, vfloat32mf2_t value, size_t vl) { - return __riscv_vsoxei64(base, bindex, value, vl); +void test_vsoxei64_v_f32mf2(float32_t *rs1, vuint64m1_t rs2, vfloat32mf2_t vs3, size_t vl) { + return __riscv_vsoxei64(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_f32m1(float32_t *base, vuint64m2_t bindex, vfloat32m1_t value, size_t vl) { - return __riscv_vsoxei64(base, bindex, value, vl); +void test_vsoxei64_v_f32m1(float32_t *rs1, vuint64m2_t rs2, vfloat32m1_t vs3, size_t vl) { + return __riscv_vsoxei64(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_f32m2(float32_t *base, vuint64m4_t bindex, vfloat32m2_t value, size_t vl) { - return __riscv_vsoxei64(base, bindex, value, vl); +void test_vsoxei64_v_f32m2(float32_t *rs1, vuint64m4_t rs2, vfloat32m2_t vs3, size_t vl) { + return __riscv_vsoxei64(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_f32m4(float32_t *base, vuint64m8_t bindex, vfloat32m4_t value, size_t vl) { - return __riscv_vsoxei64(base, bindex, value, vl); +void test_vsoxei64_v_f32m4(float32_t *rs1, vuint64m8_t rs2, vfloat32m4_t vs3, size_t vl) { + return __riscv_vsoxei64(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_f64m1(float64_t *base, vuint64m1_t bindex, vfloat64m1_t value, size_t vl) { - return __riscv_vsoxei64(base, bindex, value, vl); +void test_vsoxei64_v_f64m1(float64_t *rs1, vuint64m1_t rs2, vfloat64m1_t vs3, size_t vl) { + return __riscv_vsoxei64(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_f64m2(float64_t *base, vuint64m2_t bindex, vfloat64m2_t value, size_t vl) { - return __riscv_vsoxei64(base, bindex, value, vl); +void test_vsoxei64_v_f64m2(float64_t *rs1, vuint64m2_t rs2, vfloat64m2_t vs3, size_t vl) { + return __riscv_vsoxei64(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_f64m4(float64_t *base, vuint64m4_t bindex, vfloat64m4_t value, size_t vl) { - return __riscv_vsoxei64(base, bindex, value, vl); +void test_vsoxei64_v_f64m4(float64_t *rs1, vuint64m4_t rs2, vfloat64m4_t vs3, size_t vl) { + return __riscv_vsoxei64(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_f64m8(float64_t *base, vuint64m8_t bindex, vfloat64m8_t value, size_t vl) { - return __riscv_vsoxei64(base, bindex, value, vl); +void test_vsoxei64_v_f64m8(float64_t *rs1, vuint64m8_t rs2, vfloat64m8_t vs3, size_t vl) { + return __riscv_vsoxei64(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t value, size_t vl) { - return __riscv_vsoxei64(base, bindex, value, vl); +void test_vsoxei64_v_i8mf8(int8_t *rs1, vuint64m1_t rs2, vint8mf8_t vs3, size_t vl) { + return __riscv_vsoxei64(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t value, size_t vl) { - return __riscv_vsoxei64(base, bindex, value, vl); +void test_vsoxei64_v_i8mf4(int8_t *rs1, vuint64m2_t rs2, vint8mf4_t vs3, size_t vl) { + return __riscv_vsoxei64(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t value, size_t vl) { - return __riscv_vsoxei64(base, bindex, value, vl); +void test_vsoxei64_v_i8mf2(int8_t *rs1, vuint64m4_t rs2, vint8mf2_t vs3, size_t vl) { + return __riscv_vsoxei64(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t value, size_t vl) { - return __riscv_vsoxei64(base, bindex, value, vl); +void test_vsoxei64_v_i8m1(int8_t *rs1, vuint64m8_t rs2, vint8m1_t vs3, size_t vl) { + return __riscv_vsoxei64(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t value, size_t vl) { - return __riscv_vsoxei64(base, bindex, value, vl); +void test_vsoxei64_v_i16mf4(int16_t *rs1, vuint64m1_t rs2, vint16mf4_t vs3, size_t vl) { + return __riscv_vsoxei64(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t value, size_t vl) { - return __riscv_vsoxei64(base, bindex, value, vl); +void test_vsoxei64_v_i16mf2(int16_t *rs1, vuint64m2_t rs2, vint16mf2_t vs3, size_t vl) { + return __riscv_vsoxei64(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t value, size_t vl) { - return __riscv_vsoxei64(base, bindex, value, vl); +void test_vsoxei64_v_i16m1(int16_t *rs1, vuint64m4_t rs2, vint16m1_t vs3, size_t vl) { + return __riscv_vsoxei64(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t value, size_t vl) { - return __riscv_vsoxei64(base, bindex, value, vl); +void test_vsoxei64_v_i16m2(int16_t *rs1, vuint64m8_t rs2, vint16m2_t vs3, size_t vl) { + return __riscv_vsoxei64(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { - return __riscv_vsoxei64(base, bindex, value, vl); +void test_vsoxei64_v_i32mf2(int32_t *rs1, vuint64m1_t rs2, vint32mf2_t vs3, size_t vl) { + return __riscv_vsoxei64(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { - return __riscv_vsoxei64(base, bindex, value, vl); +void test_vsoxei64_v_i32m1(int32_t *rs1, vuint64m2_t rs2, vint32m1_t vs3, size_t vl) { + return __riscv_vsoxei64(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { - return __riscv_vsoxei64(base, bindex, value, vl); +void test_vsoxei64_v_i32m2(int32_t *rs1, vuint64m4_t rs2, vint32m2_t vs3, size_t vl) { + return __riscv_vsoxei64(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i32m4(int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { - return __riscv_vsoxei64(base, bindex, value, vl); +void test_vsoxei64_v_i32m4(int32_t *rs1, vuint64m8_t rs2, vint32m4_t vs3, size_t vl) { + return __riscv_vsoxei64(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { - return __riscv_vsoxei64(base, bindex, value, vl); +void test_vsoxei64_v_i64m1(int64_t *rs1, vuint64m1_t rs2, vint64m1_t vs3, size_t vl) { + return __riscv_vsoxei64(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { - return __riscv_vsoxei64(base, bindex, value, vl); +void test_vsoxei64_v_i64m2(int64_t *rs1, vuint64m2_t rs2, vint64m2_t vs3, size_t vl) { + return __riscv_vsoxei64(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i64m4(int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { - return __riscv_vsoxei64(base, bindex, value, vl); +void test_vsoxei64_v_i64m4(int64_t *rs1, vuint64m4_t rs2, vint64m4_t vs3, size_t vl) { + return __riscv_vsoxei64(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i64m8(int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { - return __riscv_vsoxei64(base, bindex, value, vl); +void test_vsoxei64_v_i64m8(int64_t *rs1, vuint64m8_t rs2, vint64m8_t vs3, size_t vl) { + return __riscv_vsoxei64(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value, size_t vl) { - return __riscv_vsoxei64(base, bindex, value, vl); +void test_vsoxei64_v_u8mf8(uint8_t *rs1, vuint64m1_t rs2, vuint8mf8_t vs3, size_t vl) { + return __riscv_vsoxei64(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value, size_t vl) { - return __riscv_vsoxei64(base, bindex, value, vl); +void test_vsoxei64_v_u8mf4(uint8_t *rs1, vuint64m2_t rs2, vuint8mf4_t vs3, size_t vl) { + return __riscv_vsoxei64(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value, size_t vl) { - return __riscv_vsoxei64(base, bindex, value, vl); +void test_vsoxei64_v_u8mf2(uint8_t *rs1, vuint64m4_t rs2, vuint8mf2_t vs3, size_t vl) { + return __riscv_vsoxei64(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t value, size_t vl) { - return __riscv_vsoxei64(base, bindex, value, vl); +void test_vsoxei64_v_u8m1(uint8_t *rs1, vuint64m8_t rs2, vuint8m1_t vs3, size_t vl) { + return __riscv_vsoxei64(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t value, size_t vl) { - return __riscv_vsoxei64(base, bindex, value, vl); +void test_vsoxei64_v_u16mf4(uint16_t *rs1, vuint64m1_t rs2, vuint16mf4_t vs3, size_t vl) { + return __riscv_vsoxei64(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t value, size_t vl) { - return __riscv_vsoxei64(base, bindex, value, vl); +void test_vsoxei64_v_u16mf2(uint16_t *rs1, vuint64m2_t rs2, vuint16mf2_t vs3, size_t vl) { + return __riscv_vsoxei64(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t value, size_t vl) { - return __riscv_vsoxei64(base, bindex, value, vl); +void test_vsoxei64_v_u16m1(uint16_t *rs1, vuint64m4_t rs2, vuint16m1_t vs3, size_t vl) { + return __riscv_vsoxei64(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t value, size_t vl) { - return __riscv_vsoxei64(base, bindex, value, vl); +void test_vsoxei64_v_u16m2(uint16_t *rs1, vuint64m8_t rs2, vuint16m2_t vs3, size_t vl) { + return __riscv_vsoxei64(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { - return __riscv_vsoxei64(base, bindex, value, vl); +void test_vsoxei64_v_u32mf2(uint32_t *rs1, vuint64m1_t rs2, vuint32mf2_t vs3, size_t vl) { + return __riscv_vsoxei64(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { - return __riscv_vsoxei64(base, bindex, value, vl); +void test_vsoxei64_v_u32m1(uint32_t *rs1, vuint64m2_t rs2, vuint32m1_t vs3, size_t vl) { + return __riscv_vsoxei64(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { - return __riscv_vsoxei64(base, bindex, value, vl); +void test_vsoxei64_v_u32m2(uint32_t *rs1, vuint64m4_t rs2, vuint32m2_t vs3, size_t vl) { + return __riscv_vsoxei64(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u32m4(uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { - return __riscv_vsoxei64(base, bindex, value, vl); +void test_vsoxei64_v_u32m4(uint32_t *rs1, vuint64m8_t rs2, vuint32m4_t vs3, size_t vl) { + return __riscv_vsoxei64(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { - return __riscv_vsoxei64(base, bindex, value, vl); +void test_vsoxei64_v_u64m1(uint64_t *rs1, vuint64m1_t rs2, vuint64m1_t vs3, size_t vl) { + return __riscv_vsoxei64(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { - return __riscv_vsoxei64(base, bindex, value, vl); +void test_vsoxei64_v_u64m2(uint64_t *rs1, vuint64m2_t rs2, vuint64m2_t vs3, size_t vl) { + return __riscv_vsoxei64(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u64m4(uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { - return __riscv_vsoxei64(base, bindex, value, vl); +void test_vsoxei64_v_u64m4(uint64_t *rs1, vuint64m4_t rs2, vuint64m4_t vs3, size_t vl) { + return __riscv_vsoxei64(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u64m8(uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { - return __riscv_vsoxei64(base, bindex, value, vl); +void test_vsoxei64_v_u64m8(uint64_t *rs1, vuint64m8_t rs2, vuint64m8_t vs3, size_t vl) { + return __riscv_vsoxei64(rs1, rs2, vs3, vl); } -void test_vsoxei64_v_f16mf4_m(vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) { - return __riscv_vsoxei64(mask, base, bindex, value, vl); +void test_vsoxei64_v_f16mf4_m(vbool64_t vm, float16_t *rs1, vuint64m1_t rs2, vfloat16mf4_t vs3, size_t vl) { + return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_f16mf2_m(vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) { - return __riscv_vsoxei64(mask, base, bindex, value, vl); +void test_vsoxei64_v_f16mf2_m(vbool32_t vm, float16_t *rs1, vuint64m2_t rs2, vfloat16mf2_t vs3, size_t vl) { + return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_f16m1_m(vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) { - return __riscv_vsoxei64(mask, base, bindex, value, vl); +void test_vsoxei64_v_f16m1_m(vbool16_t vm, float16_t *rs1, vuint64m4_t rs2, vfloat16m1_t vs3, size_t vl) { + return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_f16m2_m(vbool8_t mask, float16_t *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) { - return __riscv_vsoxei64(mask, base, bindex, value, vl); +void test_vsoxei64_v_f16m2_m(vbool8_t vm, float16_t *rs1, vuint64m8_t rs2, vfloat16m2_t vs3, size_t vl) { + return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_f32mf2_m(vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2_t value, size_t vl) { - return __riscv_vsoxei64(mask, base, bindex, value, vl); +void test_vsoxei64_v_f32mf2_m(vbool64_t vm, float32_t *rs1, vuint64m1_t rs2, vfloat32mf2_t vs3, size_t vl) { + return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_f32m1_m(vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1_t value, size_t vl) { - return __riscv_vsoxei64(mask, base, bindex, value, vl); +void test_vsoxei64_v_f32m1_m(vbool32_t vm, float32_t *rs1, vuint64m2_t rs2, vfloat32m1_t vs3, size_t vl) { + return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_f32m2_m(vbool16_t mask, float32_t *base, vuint64m4_t bindex, vfloat32m2_t value, size_t vl) { - return __riscv_vsoxei64(mask, base, bindex, value, vl); +void test_vsoxei64_v_f32m2_m(vbool16_t vm, float32_t *rs1, vuint64m4_t rs2, vfloat32m2_t vs3, size_t vl) { + return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_f32m4_m(vbool8_t mask, float32_t *base, vuint64m8_t bindex, vfloat32m4_t value, size_t vl) { - return __riscv_vsoxei64(mask, base, bindex, value, vl); +void test_vsoxei64_v_f32m4_m(vbool8_t vm, float32_t *rs1, vuint64m8_t rs2, vfloat32m4_t vs3, size_t vl) { + return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_f64m1_m(vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1_t value, size_t vl) { - return __riscv_vsoxei64(mask, base, bindex, value, vl); +void test_vsoxei64_v_f64m1_m(vbool64_t vm, float64_t *rs1, vuint64m1_t rs2, vfloat64m1_t vs3, size_t vl) { + return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_f64m2_m(vbool32_t mask, float64_t *base, vuint64m2_t bindex, vfloat64m2_t value, size_t vl) { - return __riscv_vsoxei64(mask, base, bindex, value, vl); +void test_vsoxei64_v_f64m2_m(vbool32_t vm, float64_t *rs1, vuint64m2_t rs2, vfloat64m2_t vs3, size_t vl) { + return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_f64m4_m(vbool16_t mask, float64_t *base, vuint64m4_t bindex, vfloat64m4_t value, size_t vl) { - return __riscv_vsoxei64(mask, base, bindex, value, vl); +void test_vsoxei64_v_f64m4_m(vbool16_t vm, float64_t *rs1, vuint64m4_t rs2, vfloat64m4_t vs3, size_t vl) { + return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_f64m8_m(vbool8_t mask, float64_t *base, vuint64m8_t bindex, vfloat64m8_t value, size_t vl) { - return __riscv_vsoxei64(mask, base, bindex, value, vl); +void test_vsoxei64_v_f64m8_m(vbool8_t vm, float64_t *rs1, vuint64m8_t rs2, vfloat64m8_t vs3, size_t vl) { + return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t value, size_t vl) { - return __riscv_vsoxei64(mask, base, bindex, value, vl); +void test_vsoxei64_v_i8mf8_m(vbool64_t vm, int8_t *rs1, vuint64m1_t rs2, vint8mf8_t vs3, size_t vl) { + return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t value, size_t vl) { - return __riscv_vsoxei64(mask, base, bindex, value, vl); +void test_vsoxei64_v_i8mf4_m(vbool32_t vm, int8_t *rs1, vuint64m2_t rs2, vint8mf4_t vs3, size_t vl) { + return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t value, size_t vl) { - return __riscv_vsoxei64(mask, base, bindex, value, vl); +void test_vsoxei64_v_i8mf2_m(vbool16_t vm, int8_t *rs1, vuint64m4_t rs2, vint8mf2_t vs3, size_t vl) { + return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t value, size_t vl) { - return __riscv_vsoxei64(mask, base, bindex, value, vl); +void test_vsoxei64_v_i8m1_m(vbool8_t vm, int8_t *rs1, vuint64m8_t rs2, vint8m1_t vs3, size_t vl) { + return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t value, size_t vl) { - return __riscv_vsoxei64(mask, base, bindex, value, vl); +void test_vsoxei64_v_i16mf4_m(vbool64_t vm, int16_t *rs1, vuint64m1_t rs2, vint16mf4_t vs3, size_t vl) { + return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t value, size_t vl) { - return __riscv_vsoxei64(mask, base, bindex, value, vl); +void test_vsoxei64_v_i16mf2_m(vbool32_t vm, int16_t *rs1, vuint64m2_t rs2, vint16mf2_t vs3, size_t vl) { + return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t value, size_t vl) { - return __riscv_vsoxei64(mask, base, bindex, value, vl); +void test_vsoxei64_v_i16m1_m(vbool16_t vm, int16_t *rs1, vuint64m4_t rs2, vint16m1_t vs3, size_t vl) { + return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t value, size_t vl) { - return __riscv_vsoxei64(mask, base, bindex, value, vl); +void test_vsoxei64_v_i16m2_m(vbool8_t vm, int16_t *rs1, vuint64m8_t rs2, vint16m2_t vs3, size_t vl) { + return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { - return __riscv_vsoxei64(mask, base, bindex, value, vl); +void test_vsoxei64_v_i32mf2_m(vbool64_t vm, int32_t *rs1, vuint64m1_t rs2, vint32mf2_t vs3, size_t vl) { + return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { - return __riscv_vsoxei64(mask, base, bindex, value, vl); +void test_vsoxei64_v_i32m1_m(vbool32_t vm, int32_t *rs1, vuint64m2_t rs2, vint32m1_t vs3, size_t vl) { + return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { - return __riscv_vsoxei64(mask, base, bindex, value, vl); +void test_vsoxei64_v_i32m2_m(vbool16_t vm, int32_t *rs1, vuint64m4_t rs2, vint32m2_t vs3, size_t vl) { + return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i32m4_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { - return __riscv_vsoxei64(mask, base, bindex, value, vl); +void test_vsoxei64_v_i32m4_m(vbool8_t vm, int32_t *rs1, vuint64m8_t rs2, vint32m4_t vs3, size_t vl) { + return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { - return __riscv_vsoxei64(mask, base, bindex, value, vl); +void test_vsoxei64_v_i64m1_m(vbool64_t vm, int64_t *rs1, vuint64m1_t rs2, vint64m1_t vs3, size_t vl) { + return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { - return __riscv_vsoxei64(mask, base, bindex, value, vl); +void test_vsoxei64_v_i64m2_m(vbool32_t vm, int64_t *rs1, vuint64m2_t rs2, vint64m2_t vs3, size_t vl) { + return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i64m4_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { - return __riscv_vsoxei64(mask, base, bindex, value, vl); +void test_vsoxei64_v_i64m4_m(vbool16_t vm, int64_t *rs1, vuint64m4_t rs2, vint64m4_t vs3, size_t vl) { + return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_i64m8_m(vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { - return __riscv_vsoxei64(mask, base, bindex, value, vl); +void test_vsoxei64_v_i64m8_m(vbool8_t vm, int64_t *rs1, vuint64m8_t rs2, vint64m8_t vs3, size_t vl) { + return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value, size_t vl) { - return __riscv_vsoxei64(mask, base, bindex, value, vl); +void test_vsoxei64_v_u8mf8_m(vbool64_t vm, uint8_t *rs1, vuint64m1_t rs2, vuint8mf8_t vs3, size_t vl) { + return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value, size_t vl) { - return __riscv_vsoxei64(mask, base, bindex, value, vl); +void test_vsoxei64_v_u8mf4_m(vbool32_t vm, uint8_t *rs1, vuint64m2_t rs2, vuint8mf4_t vs3, size_t vl) { + return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value, size_t vl) { - return __riscv_vsoxei64(mask, base, bindex, value, vl); +void test_vsoxei64_v_u8mf2_m(vbool16_t vm, uint8_t *rs1, vuint64m4_t rs2, vuint8mf2_t vs3, size_t vl) { + return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t value, size_t vl) { - return __riscv_vsoxei64(mask, base, bindex, value, vl); +void test_vsoxei64_v_u8m1_m(vbool8_t vm, uint8_t *rs1, vuint64m8_t rs2, vuint8m1_t vs3, size_t vl) { + return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t value, size_t vl) { - return __riscv_vsoxei64(mask, base, bindex, value, vl); +void test_vsoxei64_v_u16mf4_m(vbool64_t vm, uint16_t *rs1, vuint64m1_t rs2, vuint16mf4_t vs3, size_t vl) { + return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t value, size_t vl) { - return __riscv_vsoxei64(mask, base, bindex, value, vl); +void test_vsoxei64_v_u16mf2_m(vbool32_t vm, uint16_t *rs1, vuint64m2_t rs2, vuint16mf2_t vs3, size_t vl) { + return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t value, size_t vl) { - return __riscv_vsoxei64(mask, base, bindex, value, vl); +void test_vsoxei64_v_u16m1_m(vbool16_t vm, uint16_t *rs1, vuint64m4_t rs2, vuint16m1_t vs3, size_t vl) { + return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t value, size_t vl) { - return __riscv_vsoxei64(mask, base, bindex, value, vl); +void test_vsoxei64_v_u16m2_m(vbool8_t vm, uint16_t *rs1, vuint64m8_t rs2, vuint16m2_t vs3, size_t vl) { + return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { - return __riscv_vsoxei64(mask, base, bindex, value, vl); +void test_vsoxei64_v_u32mf2_m(vbool64_t vm, uint32_t *rs1, vuint64m1_t rs2, vuint32mf2_t vs3, size_t vl) { + return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { - return __riscv_vsoxei64(mask, base, bindex, value, vl); +void test_vsoxei64_v_u32m1_m(vbool32_t vm, uint32_t *rs1, vuint64m2_t rs2, vuint32m1_t vs3, size_t vl) { + return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { - return __riscv_vsoxei64(mask, base, bindex, value, vl); +void test_vsoxei64_v_u32m2_m(vbool16_t vm, uint32_t *rs1, vuint64m4_t rs2, vuint32m2_t vs3, size_t vl) { + return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { - return __riscv_vsoxei64(mask, base, bindex, value, vl); +void test_vsoxei64_v_u32m4_m(vbool8_t vm, uint32_t *rs1, vuint64m8_t rs2, vuint32m4_t vs3, size_t vl) { + return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { - return __riscv_vsoxei64(mask, base, bindex, value, vl); +void test_vsoxei64_v_u64m1_m(vbool64_t vm, uint64_t *rs1, vuint64m1_t rs2, vuint64m1_t vs3, size_t vl) { + return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { - return __riscv_vsoxei64(mask, base, bindex, value, vl); +void test_vsoxei64_v_u64m2_m(vbool32_t vm, uint64_t *rs1, vuint64m2_t rs2, vuint64m2_t vs3, size_t vl) { + return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { - return __riscv_vsoxei64(mask, base, bindex, value, vl); +void test_vsoxei64_v_u64m4_m(vbool16_t vm, uint64_t *rs1, vuint64m4_t rs2, vuint64m4_t vs3, size_t vl) { + return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl); } -void test_vsoxei64_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { - return __riscv_vsoxei64(mask, base, bindex, value, vl); +void test_vsoxei64_v_u64m8_m(vbool8_t vm, uint64_t *rs1, vuint64m8_t rs2, vuint64m8_t vs3, size_t vl) { + return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxei64\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsoxei8.c b/auto-generated/gnu-overloaded-tests/vsoxei8.c index 167662c4e..7d1fc009a 100644 --- a/auto-generated/gnu-overloaded-tests/vsoxei8.c +++ b/auto-generated/gnu-overloaded-tests/vsoxei8.c @@ -6,476 +6,476 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxei8_v_f16mf4(float16_t *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_f16mf4(float16_t *rs1, vuint8mf8_t rs2, vfloat16mf4_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f16mf2(float16_t *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_f16mf2(float16_t *rs1, vuint8mf4_t rs2, vfloat16mf2_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f16m1(float16_t *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_f16m1(float16_t *rs1, vuint8mf2_t rs2, vfloat16m1_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f16m2(float16_t *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_f16m2(float16_t *rs1, vuint8m1_t rs2, vfloat16m2_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f16m4(float16_t *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_f16m4(float16_t *rs1, vuint8m2_t rs2, vfloat16m4_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f16m8(float16_t *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_f16m8(float16_t *rs1, vuint8m4_t rs2, vfloat16m8_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f32mf2(float32_t *base, vuint8mf8_t bindex, vfloat32mf2_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_f32mf2(float32_t *rs1, vuint8mf8_t rs2, vfloat32mf2_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f32m1(float32_t *base, vuint8mf4_t bindex, vfloat32m1_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_f32m1(float32_t *rs1, vuint8mf4_t rs2, vfloat32m1_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f32m2(float32_t *base, vuint8mf2_t bindex, vfloat32m2_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_f32m2(float32_t *rs1, vuint8mf2_t rs2, vfloat32m2_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f32m4(float32_t *base, vuint8m1_t bindex, vfloat32m4_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_f32m4(float32_t *rs1, vuint8m1_t rs2, vfloat32m4_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f32m8(float32_t *base, vuint8m2_t bindex, vfloat32m8_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_f32m8(float32_t *rs1, vuint8m2_t rs2, vfloat32m8_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f64m1(float64_t *base, vuint8mf8_t bindex, vfloat64m1_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_f64m1(float64_t *rs1, vuint8mf8_t rs2, vfloat64m1_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f64m2(float64_t *base, vuint8mf4_t bindex, vfloat64m2_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_f64m2(float64_t *rs1, vuint8mf4_t rs2, vfloat64m2_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f64m4(float64_t *base, vuint8mf2_t bindex, vfloat64m4_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_f64m4(float64_t *rs1, vuint8mf2_t rs2, vfloat64m4_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f64m8(float64_t *base, vuint8m1_t bindex, vfloat64m8_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_f64m8(float64_t *rs1, vuint8m1_t rs2, vfloat64m8_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_i8mf8(int8_t *rs1, vuint8mf8_t rs2, vint8mf8_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_i8mf4(int8_t *rs1, vuint8mf4_t rs2, vint8mf4_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_i8mf2(int8_t *rs1, vuint8mf2_t rs2, vint8mf2_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_i8m1(int8_t *rs1, vuint8m1_t rs2, vint8m1_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_i8m2(int8_t *rs1, vuint8m2_t rs2, vint8m2_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i8m4(int8_t *base, vuint8m4_t bindex, vint8m4_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_i8m4(int8_t *rs1, vuint8m4_t rs2, vint8m4_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i8m8(int8_t *base, vuint8m8_t bindex, vint8m8_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_i8m8(int8_t *rs1, vuint8m8_t rs2, vint8m8_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_i16mf4(int16_t *rs1, vuint8mf8_t rs2, vint16mf4_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_i16mf2(int16_t *rs1, vuint8mf4_t rs2, vint16mf2_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_i16m1(int16_t *rs1, vuint8mf2_t rs2, vint16m1_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_i16m2(int16_t *rs1, vuint8m1_t rs2, vint16m2_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i16m4(int16_t *base, vuint8m2_t bindex, vint16m4_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_i16m4(int16_t *rs1, vuint8m2_t rs2, vint16m4_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i16m8(int16_t *base, vuint8m4_t bindex, vint16m8_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_i16m8(int16_t *rs1, vuint8m4_t rs2, vint16m8_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_i32mf2(int32_t *rs1, vuint8mf8_t rs2, vint32mf2_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_i32m1(int32_t *rs1, vuint8mf4_t rs2, vint32m1_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_i32m2(int32_t *rs1, vuint8mf2_t rs2, vint32m2_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i32m4(int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_i32m4(int32_t *rs1, vuint8m1_t rs2, vint32m4_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i32m8(int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_i32m8(int32_t *rs1, vuint8m2_t rs2, vint32m8_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_i64m1(int64_t *rs1, vuint8mf8_t rs2, vint64m1_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_i64m2(int64_t *rs1, vuint8mf4_t rs2, vint64m2_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_i64m4(int64_t *rs1, vuint8mf2_t rs2, vint64m4_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i64m8(int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_i64m8(int64_t *rs1, vuint8m1_t rs2, vint64m8_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_u8mf8(uint8_t *rs1, vuint8mf8_t rs2, vuint8mf8_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_u8mf4(uint8_t *rs1, vuint8mf4_t rs2, vuint8mf4_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_u8mf2(uint8_t *rs1, vuint8mf2_t rs2, vuint8mf2_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_u8m1(uint8_t *rs1, vuint8m1_t rs2, vuint8m1_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_u8m2(uint8_t *rs1, vuint8m2_t rs2, vuint8m2_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u8m4(uint8_t *base, vuint8m4_t bindex, vuint8m4_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_u8m4(uint8_t *rs1, vuint8m4_t rs2, vuint8m4_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u8m8(uint8_t *base, vuint8m8_t bindex, vuint8m8_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_u8m8(uint8_t *rs1, vuint8m8_t rs2, vuint8m8_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_u16mf4(uint16_t *rs1, vuint8mf8_t rs2, vuint16mf4_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_u16mf2(uint16_t *rs1, vuint8mf4_t rs2, vuint16mf2_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_u16m1(uint16_t *rs1, vuint8mf2_t rs2, vuint16m1_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_u16m2(uint16_t *rs1, vuint8m1_t rs2, vuint16m2_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u16m4(uint16_t *base, vuint8m2_t bindex, vuint16m4_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_u16m4(uint16_t *rs1, vuint8m2_t rs2, vuint16m4_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u16m8(uint16_t *base, vuint8m4_t bindex, vuint16m8_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_u16m8(uint16_t *rs1, vuint8m4_t rs2, vuint16m8_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_u32mf2(uint32_t *rs1, vuint8mf8_t rs2, vuint32mf2_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_u32m1(uint32_t *rs1, vuint8mf4_t rs2, vuint32m1_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_u32m2(uint32_t *rs1, vuint8mf2_t rs2, vuint32m2_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_u32m4(uint32_t *rs1, vuint8m1_t rs2, vuint32m4_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u32m8(uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_u32m8(uint32_t *rs1, vuint8m2_t rs2, vuint32m8_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_u64m1(uint64_t *rs1, vuint8mf8_t rs2, vuint64m1_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_u64m2(uint64_t *rs1, vuint8mf4_t rs2, vuint64m2_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_u64m4(uint64_t *rs1, vuint8mf2_t rs2, vuint64m4_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u64m8(uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { - return __riscv_vsoxei8(base, bindex, value, vl); +void test_vsoxei8_v_u64m8(uint64_t *rs1, vuint8m1_t rs2, vuint64m8_t vs3, size_t vl) { + return __riscv_vsoxei8(rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f16mf4_m(vbool64_t mask, float16_t *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_f16mf4_m(vbool64_t vm, float16_t *rs1, vuint8mf8_t rs2, vfloat16mf4_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f16mf2_m(vbool32_t mask, float16_t *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_f16mf2_m(vbool32_t vm, float16_t *rs1, vuint8mf4_t rs2, vfloat16mf2_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f16m1_m(vbool16_t mask, float16_t *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_f16m1_m(vbool16_t vm, float16_t *rs1, vuint8mf2_t rs2, vfloat16m1_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f16m2_m(vbool8_t mask, float16_t *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_f16m2_m(vbool8_t vm, float16_t *rs1, vuint8m1_t rs2, vfloat16m2_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f16m4_m(vbool4_t mask, float16_t *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_f16m4_m(vbool4_t vm, float16_t *rs1, vuint8m2_t rs2, vfloat16m4_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f16m8_m(vbool2_t mask, float16_t *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_f16m8_m(vbool2_t vm, float16_t *rs1, vuint8m4_t rs2, vfloat16m8_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f32mf2_m(vbool64_t mask, float32_t *base, vuint8mf8_t bindex, vfloat32mf2_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_f32mf2_m(vbool64_t vm, float32_t *rs1, vuint8mf8_t rs2, vfloat32mf2_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f32m1_m(vbool32_t mask, float32_t *base, vuint8mf4_t bindex, vfloat32m1_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_f32m1_m(vbool32_t vm, float32_t *rs1, vuint8mf4_t rs2, vfloat32m1_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f32m2_m(vbool16_t mask, float32_t *base, vuint8mf2_t bindex, vfloat32m2_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_f32m2_m(vbool16_t vm, float32_t *rs1, vuint8mf2_t rs2, vfloat32m2_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f32m4_m(vbool8_t mask, float32_t *base, vuint8m1_t bindex, vfloat32m4_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_f32m4_m(vbool8_t vm, float32_t *rs1, vuint8m1_t rs2, vfloat32m4_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f32m8_m(vbool4_t mask, float32_t *base, vuint8m2_t bindex, vfloat32m8_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_f32m8_m(vbool4_t vm, float32_t *rs1, vuint8m2_t rs2, vfloat32m8_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f64m1_m(vbool64_t mask, float64_t *base, vuint8mf8_t bindex, vfloat64m1_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_f64m1_m(vbool64_t vm, float64_t *rs1, vuint8mf8_t rs2, vfloat64m1_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f64m2_m(vbool32_t mask, float64_t *base, vuint8mf4_t bindex, vfloat64m2_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_f64m2_m(vbool32_t vm, float64_t *rs1, vuint8mf4_t rs2, vfloat64m2_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f64m4_m(vbool16_t mask, float64_t *base, vuint8mf2_t bindex, vfloat64m4_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_f64m4_m(vbool16_t vm, float64_t *rs1, vuint8mf2_t rs2, vfloat64m4_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_f64m8_m(vbool8_t mask, float64_t *base, vuint8m1_t bindex, vfloat64m8_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_f64m8_m(vbool8_t vm, float64_t *rs1, vuint8m1_t rs2, vfloat64m8_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_i8mf8_m(vbool64_t vm, int8_t *rs1, vuint8mf8_t rs2, vint8mf8_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_i8mf4_m(vbool32_t vm, int8_t *rs1, vuint8mf4_t rs2, vint8mf4_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_i8mf2_m(vbool16_t vm, int8_t *rs1, vuint8mf2_t rs2, vint8mf2_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_i8m1_m(vbool8_t vm, int8_t *rs1, vuint8m1_t rs2, vint8m1_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_i8m2_m(vbool4_t vm, int8_t *rs1, vuint8m2_t rs2, vint8m2_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i8m4_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_i8m4_m(vbool2_t vm, int8_t *rs1, vuint8m4_t rs2, vint8m4_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i8m8_m(vbool1_t mask, int8_t *base, vuint8m8_t bindex, vint8m8_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_i8m8_m(vbool1_t vm, int8_t *rs1, vuint8m8_t rs2, vint8m8_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_i16mf4_m(vbool64_t vm, int16_t *rs1, vuint8mf8_t rs2, vint16mf4_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_i16mf2_m(vbool32_t vm, int16_t *rs1, vuint8mf4_t rs2, vint16mf2_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_i16m1_m(vbool16_t vm, int16_t *rs1, vuint8mf2_t rs2, vint16m1_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_i16m2_m(vbool8_t vm, int16_t *rs1, vuint8m1_t rs2, vint16m2_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i16m4_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_i16m4_m(vbool4_t vm, int16_t *rs1, vuint8m2_t rs2, vint16m4_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i16m8_m(vbool2_t mask, int16_t *base, vuint8m4_t bindex, vint16m8_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_i16m8_m(vbool2_t vm, int16_t *rs1, vuint8m4_t rs2, vint16m8_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_i32mf2_m(vbool64_t vm, int32_t *rs1, vuint8mf8_t rs2, vint32mf2_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_i32m1_m(vbool32_t vm, int32_t *rs1, vuint8mf4_t rs2, vint32m1_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_i32m2_m(vbool16_t vm, int32_t *rs1, vuint8mf2_t rs2, vint32m2_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i32m4_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_i32m4_m(vbool8_t vm, int32_t *rs1, vuint8m1_t rs2, vint32m4_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i32m8_m(vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_i32m8_m(vbool4_t vm, int32_t *rs1, vuint8m2_t rs2, vint32m8_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_i64m1_m(vbool64_t vm, int64_t *rs1, vuint8mf8_t rs2, vint64m1_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_i64m2_m(vbool32_t vm, int64_t *rs1, vuint8mf4_t rs2, vint64m2_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i64m4_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_i64m4_m(vbool16_t vm, int64_t *rs1, vuint8mf2_t rs2, vint64m4_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_i64m8_m(vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_i64m8_m(vbool8_t vm, int64_t *rs1, vuint8m1_t rs2, vint64m8_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_u8mf8_m(vbool64_t vm, uint8_t *rs1, vuint8mf8_t rs2, vuint8mf8_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_u8mf4_m(vbool32_t vm, uint8_t *rs1, vuint8mf4_t rs2, vuint8mf4_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_u8mf2_m(vbool16_t vm, uint8_t *rs1, vuint8mf2_t rs2, vuint8mf2_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_u8m1_m(vbool8_t vm, uint8_t *rs1, vuint8m1_t rs2, vuint8m1_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_u8m2_m(vbool4_t vm, uint8_t *rs1, vuint8m2_t rs2, vuint8m2_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_u8m4_m(vbool2_t vm, uint8_t *rs1, vuint8m4_t rs2, vuint8m4_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u8m8_m(vbool1_t mask, uint8_t *base, vuint8m8_t bindex, vuint8m8_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_u8m8_m(vbool1_t vm, uint8_t *rs1, vuint8m8_t rs2, vuint8m8_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_u16mf4_m(vbool64_t vm, uint16_t *rs1, vuint8mf8_t rs2, vuint16mf4_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_u16mf2_m(vbool32_t vm, uint16_t *rs1, vuint8mf4_t rs2, vuint16mf2_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_u16m1_m(vbool16_t vm, uint16_t *rs1, vuint8mf2_t rs2, vuint16m1_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_u16m2_m(vbool8_t vm, uint16_t *rs1, vuint8m1_t rs2, vuint16m2_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_u16m4_m(vbool4_t vm, uint16_t *rs1, vuint8m2_t rs2, vuint16m4_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint8m4_t bindex, vuint16m8_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_u16m8_m(vbool2_t vm, uint16_t *rs1, vuint8m4_t rs2, vuint16m8_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_u32mf2_m(vbool64_t vm, uint32_t *rs1, vuint8mf8_t rs2, vuint32mf2_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_u32m1_m(vbool32_t vm, uint32_t *rs1, vuint8mf4_t rs2, vuint32m1_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_u32m2_m(vbool16_t vm, uint32_t *rs1, vuint8mf2_t rs2, vuint32m2_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_u32m4_m(vbool8_t vm, uint32_t *rs1, vuint8m1_t rs2, vuint32m4_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_u32m8_m(vbool4_t vm, uint32_t *rs1, vuint8m2_t rs2, vuint32m8_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_u64m1_m(vbool64_t vm, uint64_t *rs1, vuint8mf8_t rs2, vuint64m1_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_u64m2_m(vbool32_t vm, uint64_t *rs1, vuint8mf4_t rs2, vuint64m2_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_u64m4_m(vbool16_t vm, uint64_t *rs1, vuint8mf2_t rs2, vuint64m4_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } -void test_vsoxei8_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { - return __riscv_vsoxei8(mask, base, bindex, value, vl); +void test_vsoxei8_v_u64m8_m(vbool8_t vm, uint64_t *rs1, vuint8m1_t rs2, vuint64m8_t vs3, size_t vl) { + return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxei8\.[ivxfswum.]+\s+} 118 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsoxseg2ei16.c b/auto-generated/gnu-overloaded-tests/vsoxseg2ei16.c index 2bf19a7b9..9548a0950 100644 --- a/auto-generated/gnu-overloaded-tests/vsoxseg2ei16.c +++ b/auto-generated/gnu-overloaded-tests/vsoxseg2ei16.c @@ -6,388 +6,388 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg2ei16_v_f16mf4x2(float16_t *base, vuint16mf4_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f16mf4x2(float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_f16mf2x2(float16_t *base, vuint16mf2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f16mf2x2(float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_f16m1x2(float16_t *base, vuint16m1_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f16m1x2(float16_t *rs1, vuint16m1_t vs2, vfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_f16m2x2(float16_t *base, vuint16m2_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f16m2x2(float16_t *rs1, vuint16m2_t vs2, vfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_f16m4x2(float16_t *base, vuint16m4_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f16m4x2(float16_t *rs1, vuint16m4_t vs2, vfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_f32mf2x2(float32_t *base, vuint16mf4_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f32mf2x2(float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_f32m1x2(float32_t *base, vuint16mf2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f32m1x2(float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_f32m2x2(float32_t *base, vuint16m1_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f32m2x2(float32_t *rs1, vuint16m1_t vs2, vfloat32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_f32m4x2(float32_t *base, vuint16m2_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f32m4x2(float32_t *rs1, vuint16m2_t vs2, vfloat32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_f64m1x2(float64_t *base, vuint16mf4_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f64m1x2(float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_f64m2x2(float64_t *base, vuint16mf2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f64m2x2(float64_t *rs1, vuint16mf2_t vs2, vfloat64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_f64m4x2(float64_t *base, vuint16m1_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f64m4x2(float64_t *rs1, vuint16m1_t vs2, vfloat64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i8mf8x2(int8_t *base, vuint16mf4_t bindex, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i8mf8x2(int8_t *rs1, vuint16mf4_t vs2, vint8mf8x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i8mf4x2(int8_t *base, vuint16mf2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i8mf4x2(int8_t *rs1, vuint16mf2_t vs2, vint8mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i8mf2x2(int8_t *base, vuint16m1_t bindex, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i8mf2x2(int8_t *rs1, vuint16m1_t vs2, vint8mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i8m1x2(int8_t *base, vuint16m2_t bindex, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i8m1x2(int8_t *rs1, vuint16m2_t vs2, vint8m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i8m2x2(int8_t *base, vuint16m4_t bindex, vint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i8m2x2(int8_t *rs1, vuint16m4_t vs2, vint8m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i8m4x2(int8_t *base, vuint16m8_t bindex, vint8m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i8m4x2(int8_t *rs1, vuint16m8_t vs2, vint8m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i16mf4x2(int16_t *base, vuint16mf4_t bindex, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i16mf4x2(int16_t *rs1, vuint16mf4_t vs2, vint16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i16mf2x2(int16_t *base, vuint16mf2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i16mf2x2(int16_t *rs1, vuint16mf2_t vs2, vint16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i16m1x2(int16_t *base, vuint16m1_t bindex, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i16m1x2(int16_t *rs1, vuint16m1_t vs2, vint16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i16m2x2(int16_t *base, vuint16m2_t bindex, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i16m2x2(int16_t *rs1, vuint16m2_t vs2, vint16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i16m4x2(int16_t *base, vuint16m4_t bindex, vint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i16m4x2(int16_t *rs1, vuint16m4_t vs2, vint16m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i32mf2x2(int32_t *base, vuint16mf4_t bindex, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i32mf2x2(int32_t *rs1, vuint16mf4_t vs2, vint32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i32m1x2(int32_t *base, vuint16mf2_t bindex, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i32m1x2(int32_t *rs1, vuint16mf2_t vs2, vint32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i32m2x2(int32_t *base, vuint16m1_t bindex, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i32m2x2(int32_t *rs1, vuint16m1_t vs2, vint32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i32m4x2(int32_t *base, vuint16m2_t bindex, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i32m4x2(int32_t *rs1, vuint16m2_t vs2, vint32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i64m1x2(int64_t *base, vuint16mf4_t bindex, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i64m1x2(int64_t *rs1, vuint16mf4_t vs2, vint64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i64m2x2(int64_t *base, vuint16mf2_t bindex, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i64m2x2(int64_t *rs1, vuint16mf2_t vs2, vint64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i64m4x2(int64_t *base, vuint16m1_t bindex, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i64m4x2(int64_t *rs1, vuint16m1_t vs2, vint64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u8mf8x2(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u8mf8x2(uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u8mf4x2(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u8mf4x2(uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u8mf2x2(uint8_t *base, vuint16m1_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u8mf2x2(uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u8m1x2(uint8_t *base, vuint16m2_t bindex, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u8m1x2(uint8_t *rs1, vuint16m2_t vs2, vuint8m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u8m2x2(uint8_t *base, vuint16m4_t bindex, vuint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u8m2x2(uint8_t *rs1, vuint16m4_t vs2, vuint8m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u8m4x2(uint8_t *base, vuint16m8_t bindex, vuint8m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u8m4x2(uint8_t *rs1, vuint16m8_t vs2, vuint8m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u16mf4x2(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u16mf4x2(uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u16mf2x2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u16mf2x2(uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u16m1x2(uint16_t *base, vuint16m1_t bindex, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u16m1x2(uint16_t *rs1, vuint16m1_t vs2, vuint16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u16m2x2(uint16_t *base, vuint16m2_t bindex, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u16m2x2(uint16_t *rs1, vuint16m2_t vs2, vuint16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u16m4x2(uint16_t *base, vuint16m4_t bindex, vuint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u16m4x2(uint16_t *rs1, vuint16m4_t vs2, vuint16m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u32mf2x2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u32mf2x2(uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u32m1x2(uint32_t *base, vuint16mf2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u32m1x2(uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u32m2x2(uint32_t *base, vuint16m1_t bindex, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u32m2x2(uint32_t *rs1, vuint16m1_t vs2, vuint32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u32m4x2(uint32_t *base, vuint16m2_t bindex, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u32m4x2(uint32_t *rs1, vuint16m2_t vs2, vuint32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u64m1x2(uint64_t *base, vuint16mf4_t bindex, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u64m1x2(uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u64m2x2(uint64_t *base, vuint16mf2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u64m2x2(uint64_t *rs1, vuint16mf2_t vs2, vuint64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u64m4x2(uint64_t *base, vuint16m1_t bindex, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u64m4x2(uint64_t *rs1, vuint16m1_t vs2, vuint64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_f16mf4x2_m(vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f16mf4x2_m(vbool64_t vm, float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_f16mf2x2_m(vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f16mf2x2_m(vbool32_t vm, float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_f16m1x2_m(vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f16m1x2_m(vbool16_t vm, float16_t *rs1, vuint16m1_t vs2, vfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_f16m2x2_m(vbool8_t mask, float16_t *base, vuint16m2_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f16m2x2_m(vbool8_t vm, float16_t *rs1, vuint16m2_t vs2, vfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_f16m4x2_m(vbool4_t mask, float16_t *base, vuint16m4_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f16m4x2_m(vbool4_t vm, float16_t *rs1, vuint16m4_t vs2, vfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_f32mf2x2_m(vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f32mf2x2_m(vbool64_t vm, float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_f32m1x2_m(vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f32m1x2_m(vbool32_t vm, float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_f32m2x2_m(vbool16_t mask, float32_t *base, vuint16m1_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f32m2x2_m(vbool16_t vm, float32_t *rs1, vuint16m1_t vs2, vfloat32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_f32m4x2_m(vbool8_t mask, float32_t *base, vuint16m2_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f32m4x2_m(vbool8_t vm, float32_t *rs1, vuint16m2_t vs2, vfloat32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_f64m1x2_m(vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f64m1x2_m(vbool64_t vm, float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_f64m2x2_m(vbool32_t mask, float64_t *base, vuint16mf2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f64m2x2_m(vbool32_t vm, float64_t *rs1, vuint16mf2_t vs2, vfloat64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_f64m4x2_m(vbool16_t mask, float64_t *base, vuint16m1_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_f64m4x2_m(vbool16_t vm, float64_t *rs1, vuint16m1_t vs2, vfloat64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i8mf8x2_m(vbool64_t vm, int8_t *rs1, vuint16mf4_t vs2, vint8mf8x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i8mf4x2_m(vbool32_t vm, int8_t *rs1, vuint16mf2_t vs2, vint8mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i8mf2x2_m(vbool16_t vm, int8_t *rs1, vuint16m1_t vs2, vint8mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i8m1x2_m(vbool8_t vm, int8_t *rs1, vuint16m2_t vs2, vint8m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i8m2x2_m(vbool4_t vm, int8_t *rs1, vuint16m4_t vs2, vint8m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i8m4x2_m(vbool2_t vm, int8_t *rs1, vuint16m8_t vs2, vint8m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i16mf4x2_m(vbool64_t vm, int16_t *rs1, vuint16mf4_t vs2, vint16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i16mf2x2_m(vbool32_t vm, int16_t *rs1, vuint16mf2_t vs2, vint16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i16m1x2_m(vbool16_t vm, int16_t *rs1, vuint16m1_t vs2, vint16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i16m2x2_m(vbool8_t vm, int16_t *rs1, vuint16m2_t vs2, vint16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i16m4x2_m(vbool4_t vm, int16_t *rs1, vuint16m4_t vs2, vint16m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i32mf2x2_m(vbool64_t vm, int32_t *rs1, vuint16mf4_t vs2, vint32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i32m1x2_m(vbool32_t vm, int32_t *rs1, vuint16mf2_t vs2, vint32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i32m2x2_m(vbool16_t vm, int32_t *rs1, vuint16m1_t vs2, vint32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i32m4x2_m(vbool8_t vm, int32_t *rs1, vuint16m2_t vs2, vint32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i64m1x2_m(vbool64_t vm, int64_t *rs1, vuint16mf4_t vs2, vint64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i64m2x2_m(vbool32_t vm, int64_t *rs1, vuint16mf2_t vs2, vint64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_i64m4x2_m(vbool16_t vm, int64_t *rs1, vuint16m1_t vs2, vint64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u8mf8x2_m(vbool64_t vm, uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u8mf4x2_m(vbool32_t vm, uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u8mf2x2_m(vbool16_t vm, uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u8m1x2_m(vbool8_t vm, uint8_t *rs1, vuint16m2_t vs2, vuint8m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u8m2x2_m(vbool4_t vm, uint8_t *rs1, vuint16m4_t vs2, vuint8m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u8m4x2_m(vbool2_t vm, uint8_t *rs1, vuint16m8_t vs2, vuint8m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u16mf4x2_m(vbool64_t vm, uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u16mf2x2_m(vbool32_t vm, uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u16m1x2_m(vbool16_t vm, uint16_t *rs1, vuint16m1_t vs2, vuint16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u16m2x2_m(vbool8_t vm, uint16_t *rs1, vuint16m2_t vs2, vuint16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u16m4x2_m(vbool4_t vm, uint16_t *rs1, vuint16m4_t vs2, vuint16m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u32mf2x2_m(vbool64_t vm, uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u32m1x2_m(vbool32_t vm, uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u32m2x2_m(vbool16_t vm, uint32_t *rs1, vuint16m1_t vs2, vuint32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u32m4x2_m(vbool8_t vm, uint32_t *rs1, vuint16m2_t vs2, vuint32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u64m1x2_m(vbool64_t vm, uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u64m2x2_m(vbool32_t vm, uint64_t *rs1, vuint16mf2_t vs2, vuint64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei16_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei16_v_u64m4x2_m(vbool16_t vm, uint64_t *rs1, vuint16m1_t vs2, vuint64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg2ei16\.[ivxfswum.]+\s+} 96 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsoxseg2ei32.c b/auto-generated/gnu-overloaded-tests/vsoxseg2ei32.c index b543de9a2..845a33b4c 100644 --- a/auto-generated/gnu-overloaded-tests/vsoxseg2ei32.c +++ b/auto-generated/gnu-overloaded-tests/vsoxseg2ei32.c @@ -6,372 +6,372 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg2ei32_v_f16mf4x2(float16_t *base, vuint32mf2_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f16mf4x2(float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_f16mf2x2(float16_t *base, vuint32m1_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f16mf2x2(float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_f16m1x2(float16_t *base, vuint32m2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f16m1x2(float16_t *rs1, vuint32m2_t vs2, vfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_f16m2x2(float16_t *base, vuint32m4_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f16m2x2(float16_t *rs1, vuint32m4_t vs2, vfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_f16m4x2(float16_t *base, vuint32m8_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f16m4x2(float16_t *rs1, vuint32m8_t vs2, vfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_f32mf2x2(float32_t *base, vuint32mf2_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f32mf2x2(float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_f32m1x2(float32_t *base, vuint32m1_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f32m1x2(float32_t *rs1, vuint32m1_t vs2, vfloat32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_f32m2x2(float32_t *base, vuint32m2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f32m2x2(float32_t *rs1, vuint32m2_t vs2, vfloat32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_f32m4x2(float32_t *base, vuint32m4_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f32m4x2(float32_t *rs1, vuint32m4_t vs2, vfloat32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_f64m1x2(float64_t *base, vuint32mf2_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f64m1x2(float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_f64m2x2(float64_t *base, vuint32m1_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f64m2x2(float64_t *rs1, vuint32m1_t vs2, vfloat64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_f64m4x2(float64_t *base, vuint32m2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f64m4x2(float64_t *rs1, vuint32m2_t vs2, vfloat64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i8mf8x2(int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i8mf8x2(int8_t *rs1, vuint32mf2_t vs2, vint8mf8x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i8mf4x2(int8_t *base, vuint32m1_t bindex, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i8mf4x2(int8_t *rs1, vuint32m1_t vs2, vint8mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i8mf2x2(int8_t *base, vuint32m2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i8mf2x2(int8_t *rs1, vuint32m2_t vs2, vint8mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i8m1x2(int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i8m1x2(int8_t *rs1, vuint32m4_t vs2, vint8m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i8m2x2(int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i8m2x2(int8_t *rs1, vuint32m8_t vs2, vint8m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i16mf4x2(int16_t *base, vuint32mf2_t bindex, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i16mf4x2(int16_t *rs1, vuint32mf2_t vs2, vint16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i16mf2x2(int16_t *base, vuint32m1_t bindex, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i16mf2x2(int16_t *rs1, vuint32m1_t vs2, vint16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i16m1x2(int16_t *base, vuint32m2_t bindex, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i16m1x2(int16_t *rs1, vuint32m2_t vs2, vint16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i16m2x2(int16_t *base, vuint32m4_t bindex, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i16m2x2(int16_t *rs1, vuint32m4_t vs2, vint16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i16m4x2(int16_t *base, vuint32m8_t bindex, vint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i16m4x2(int16_t *rs1, vuint32m8_t vs2, vint16m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i32mf2x2(int32_t *base, vuint32mf2_t bindex, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i32mf2x2(int32_t *rs1, vuint32mf2_t vs2, vint32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i32m1x2(int32_t *base, vuint32m1_t bindex, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i32m1x2(int32_t *rs1, vuint32m1_t vs2, vint32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i32m2x2(int32_t *base, vuint32m2_t bindex, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i32m2x2(int32_t *rs1, vuint32m2_t vs2, vint32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i32m4x2(int32_t *base, vuint32m4_t bindex, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i32m4x2(int32_t *rs1, vuint32m4_t vs2, vint32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i64m1x2(int64_t *base, vuint32mf2_t bindex, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i64m1x2(int64_t *rs1, vuint32mf2_t vs2, vint64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i64m2x2(int64_t *base, vuint32m1_t bindex, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i64m2x2(int64_t *rs1, vuint32m1_t vs2, vint64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i64m4x2(int64_t *base, vuint32m2_t bindex, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i64m4x2(int64_t *rs1, vuint32m2_t vs2, vint64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u8mf8x2(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u8mf8x2(uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u8mf4x2(uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u8mf4x2(uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u8mf2x2(uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u8mf2x2(uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u8m1x2(uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u8m1x2(uint8_t *rs1, vuint32m4_t vs2, vuint8m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u8m2x2(uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u8m2x2(uint8_t *rs1, vuint32m8_t vs2, vuint8m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u16mf4x2(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u16mf4x2(uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u16mf2x2(uint16_t *base, vuint32m1_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u16mf2x2(uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u16m1x2(uint16_t *base, vuint32m2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u16m1x2(uint16_t *rs1, vuint32m2_t vs2, vuint16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u16m2x2(uint16_t *base, vuint32m4_t bindex, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u16m2x2(uint16_t *rs1, vuint32m4_t vs2, vuint16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u16m4x2(uint16_t *base, vuint32m8_t bindex, vuint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u16m4x2(uint16_t *rs1, vuint32m8_t vs2, vuint16m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u32mf2x2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u32mf2x2(uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u32m1x2(uint32_t *base, vuint32m1_t bindex, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u32m1x2(uint32_t *rs1, vuint32m1_t vs2, vuint32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u32m2x2(uint32_t *base, vuint32m2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u32m2x2(uint32_t *rs1, vuint32m2_t vs2, vuint32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u32m4x2(uint32_t *base, vuint32m4_t bindex, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u32m4x2(uint32_t *rs1, vuint32m4_t vs2, vuint32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u64m1x2(uint64_t *base, vuint32mf2_t bindex, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u64m1x2(uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u64m2x2(uint64_t *base, vuint32m1_t bindex, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u64m2x2(uint64_t *rs1, vuint32m1_t vs2, vuint64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u64m4x2(uint64_t *base, vuint32m2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u64m4x2(uint64_t *rs1, vuint32m2_t vs2, vuint64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_f16mf4x2_m(vbool64_t mask, float16_t *base, vuint32mf2_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f16mf4x2_m(vbool64_t vm, float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_f16mf2x2_m(vbool32_t mask, float16_t *base, vuint32m1_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f16mf2x2_m(vbool32_t vm, float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_f16m1x2_m(vbool16_t mask, float16_t *base, vuint32m2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f16m1x2_m(vbool16_t vm, float16_t *rs1, vuint32m2_t vs2, vfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_f16m2x2_m(vbool8_t mask, float16_t *base, vuint32m4_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f16m2x2_m(vbool8_t vm, float16_t *rs1, vuint32m4_t vs2, vfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_f16m4x2_m(vbool4_t mask, float16_t *base, vuint32m8_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f16m4x2_m(vbool4_t vm, float16_t *rs1, vuint32m8_t vs2, vfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_f32mf2x2_m(vbool64_t mask, float32_t *base, vuint32mf2_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f32mf2x2_m(vbool64_t vm, float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_f32m1x2_m(vbool32_t mask, float32_t *base, vuint32m1_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f32m1x2_m(vbool32_t vm, float32_t *rs1, vuint32m1_t vs2, vfloat32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_f32m2x2_m(vbool16_t mask, float32_t *base, vuint32m2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f32m2x2_m(vbool16_t vm, float32_t *rs1, vuint32m2_t vs2, vfloat32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_f32m4x2_m(vbool8_t mask, float32_t *base, vuint32m4_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f32m4x2_m(vbool8_t vm, float32_t *rs1, vuint32m4_t vs2, vfloat32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_f64m1x2_m(vbool64_t mask, float64_t *base, vuint32mf2_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f64m1x2_m(vbool64_t vm, float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_f64m2x2_m(vbool32_t mask, float64_t *base, vuint32m1_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f64m2x2_m(vbool32_t vm, float64_t *rs1, vuint32m1_t vs2, vfloat64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_f64m4x2_m(vbool16_t mask, float64_t *base, vuint32m2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_f64m4x2_m(vbool16_t vm, float64_t *rs1, vuint32m2_t vs2, vfloat64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i8mf8x2_m(vbool64_t vm, int8_t *rs1, vuint32mf2_t vs2, vint8mf8x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i8mf4x2_m(vbool32_t vm, int8_t *rs1, vuint32m1_t vs2, vint8mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i8mf2x2_m(vbool16_t vm, int8_t *rs1, vuint32m2_t vs2, vint8mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i8m1x2_m(vbool8_t vm, int8_t *rs1, vuint32m4_t vs2, vint8m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i8m2x2_m(vbool4_t vm, int8_t *rs1, vuint32m8_t vs2, vint8m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i16mf4x2_m(vbool64_t vm, int16_t *rs1, vuint32mf2_t vs2, vint16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i16mf2x2_m(vbool32_t vm, int16_t *rs1, vuint32m1_t vs2, vint16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i16m1x2_m(vbool16_t vm, int16_t *rs1, vuint32m2_t vs2, vint16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i16m2x2_m(vbool8_t vm, int16_t *rs1, vuint32m4_t vs2, vint16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i16m4x2_m(vbool4_t vm, int16_t *rs1, vuint32m8_t vs2, vint16m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i32mf2x2_m(vbool64_t vm, int32_t *rs1, vuint32mf2_t vs2, vint32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i32m1x2_m(vbool32_t vm, int32_t *rs1, vuint32m1_t vs2, vint32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i32m2x2_m(vbool16_t vm, int32_t *rs1, vuint32m2_t vs2, vint32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i32m4x2_m(vbool8_t vm, int32_t *rs1, vuint32m4_t vs2, vint32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i64m1x2_m(vbool64_t vm, int64_t *rs1, vuint32mf2_t vs2, vint64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i64m2x2_m(vbool32_t vm, int64_t *rs1, vuint32m1_t vs2, vint64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_i64m4x2_m(vbool16_t vm, int64_t *rs1, vuint32m2_t vs2, vint64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u8mf8x2_m(vbool64_t vm, uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u8mf4x2_m(vbool32_t vm, uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u8mf2x2_m(vbool16_t vm, uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u8m1x2_m(vbool8_t vm, uint8_t *rs1, vuint32m4_t vs2, vuint8m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u8m2x2_m(vbool4_t vm, uint8_t *rs1, vuint32m8_t vs2, vuint8m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u16mf4x2_m(vbool64_t vm, uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u16mf2x2_m(vbool32_t vm, uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u16m1x2_m(vbool16_t vm, uint16_t *rs1, vuint32m2_t vs2, vuint16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u16m2x2_m(vbool8_t vm, uint16_t *rs1, vuint32m4_t vs2, vuint16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u16m4x2_m(vbool4_t vm, uint16_t *rs1, vuint32m8_t vs2, vuint16m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u32mf2x2_m(vbool64_t vm, uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u32m1x2_m(vbool32_t vm, uint32_t *rs1, vuint32m1_t vs2, vuint32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u32m2x2_m(vbool16_t vm, uint32_t *rs1, vuint32m2_t vs2, vuint32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u32m4x2_m(vbool8_t vm, uint32_t *rs1, vuint32m4_t vs2, vuint32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u64m1x2_m(vbool64_t vm, uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u64m2x2_m(vbool32_t vm, uint64_t *rs1, vuint32m1_t vs2, vuint64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei32_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei32_v_u64m4x2_m(vbool16_t vm, uint64_t *rs1, vuint32m2_t vs2, vuint64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg2ei32\.[ivxfswum.]+\s+} 92 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsoxseg2ei64.c b/auto-generated/gnu-overloaded-tests/vsoxseg2ei64.c index 257d717fc..6d18c599c 100644 --- a/auto-generated/gnu-overloaded-tests/vsoxseg2ei64.c +++ b/auto-generated/gnu-overloaded-tests/vsoxseg2ei64.c @@ -6,332 +6,332 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg2ei64_v_f16mf4x2(float16_t *base, vuint64m1_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_f16mf4x2(float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_f16mf2x2(float16_t *base, vuint64m2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_f16mf2x2(float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_f16m1x2(float16_t *base, vuint64m4_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_f16m1x2(float16_t *rs1, vuint64m4_t vs2, vfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_f16m2x2(float16_t *base, vuint64m8_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_f16m2x2(float16_t *rs1, vuint64m8_t vs2, vfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_f32mf2x2(float32_t *base, vuint64m1_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_f32mf2x2(float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_f32m1x2(float32_t *base, vuint64m2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_f32m1x2(float32_t *rs1, vuint64m2_t vs2, vfloat32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_f32m2x2(float32_t *base, vuint64m4_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_f32m2x2(float32_t *rs1, vuint64m4_t vs2, vfloat32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_f32m4x2(float32_t *base, vuint64m8_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_f32m4x2(float32_t *rs1, vuint64m8_t vs2, vfloat32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_f64m1x2(float64_t *base, vuint64m1_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_f64m1x2(float64_t *rs1, vuint64m1_t vs2, vfloat64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_f64m2x2(float64_t *base, vuint64m2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_f64m2x2(float64_t *rs1, vuint64m2_t vs2, vfloat64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_f64m4x2(float64_t *base, vuint64m4_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_f64m4x2(float64_t *rs1, vuint64m4_t vs2, vfloat64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i8mf8x2(int8_t *base, vuint64m1_t bindex, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i8mf8x2(int8_t *rs1, vuint64m1_t vs2, vint8mf8x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i8mf4x2(int8_t *base, vuint64m2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i8mf4x2(int8_t *rs1, vuint64m2_t vs2, vint8mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i8mf2x2(int8_t *base, vuint64m4_t bindex, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i8mf2x2(int8_t *rs1, vuint64m4_t vs2, vint8mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i8m1x2(int8_t *base, vuint64m8_t bindex, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i8m1x2(int8_t *rs1, vuint64m8_t vs2, vint8m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i16mf4x2(int16_t *base, vuint64m1_t bindex, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i16mf4x2(int16_t *rs1, vuint64m1_t vs2, vint16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i16mf2x2(int16_t *base, vuint64m2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i16mf2x2(int16_t *rs1, vuint64m2_t vs2, vint16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i16m1x2(int16_t *base, vuint64m4_t bindex, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i16m1x2(int16_t *rs1, vuint64m4_t vs2, vint16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i16m2x2(int16_t *base, vuint64m8_t bindex, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i16m2x2(int16_t *rs1, vuint64m8_t vs2, vint16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i32mf2x2(int32_t *base, vuint64m1_t bindex, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i32mf2x2(int32_t *rs1, vuint64m1_t vs2, vint32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i32m1x2(int32_t *base, vuint64m2_t bindex, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i32m1x2(int32_t *rs1, vuint64m2_t vs2, vint32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i32m2x2(int32_t *base, vuint64m4_t bindex, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i32m2x2(int32_t *rs1, vuint64m4_t vs2, vint32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i32m4x2(int32_t *base, vuint64m8_t bindex, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i32m4x2(int32_t *rs1, vuint64m8_t vs2, vint32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i64m1x2(int64_t *base, vuint64m1_t bindex, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i64m1x2(int64_t *rs1, vuint64m1_t vs2, vint64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i64m2x2(int64_t *base, vuint64m2_t bindex, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i64m2x2(int64_t *rs1, vuint64m2_t vs2, vint64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i64m4x2(int64_t *base, vuint64m4_t bindex, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i64m4x2(int64_t *rs1, vuint64m4_t vs2, vint64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u8mf8x2(uint8_t *base, vuint64m1_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u8mf8x2(uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u8mf4x2(uint8_t *base, vuint64m2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u8mf4x2(uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u8mf2x2(uint8_t *base, vuint64m4_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u8mf2x2(uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u8m1x2(uint8_t *base, vuint64m8_t bindex, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u8m1x2(uint8_t *rs1, vuint64m8_t vs2, vuint8m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u16mf4x2(uint16_t *base, vuint64m1_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u16mf4x2(uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u16mf2x2(uint16_t *base, vuint64m2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u16mf2x2(uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u16m1x2(uint16_t *base, vuint64m4_t bindex, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u16m1x2(uint16_t *rs1, vuint64m4_t vs2, vuint16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u16m2x2(uint16_t *base, vuint64m8_t bindex, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u16m2x2(uint16_t *rs1, vuint64m8_t vs2, vuint16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u32mf2x2(uint32_t *base, vuint64m1_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u32mf2x2(uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u32m1x2(uint32_t *base, vuint64m2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u32m1x2(uint32_t *rs1, vuint64m2_t vs2, vuint32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u32m2x2(uint32_t *base, vuint64m4_t bindex, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u32m2x2(uint32_t *rs1, vuint64m4_t vs2, vuint32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u32m4x2(uint32_t *base, vuint64m8_t bindex, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u32m4x2(uint32_t *rs1, vuint64m8_t vs2, vuint32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u64m1x2(uint64_t *base, vuint64m1_t bindex, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u64m1x2(uint64_t *rs1, vuint64m1_t vs2, vuint64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u64m2x2(uint64_t *base, vuint64m2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u64m2x2(uint64_t *rs1, vuint64m2_t vs2, vuint64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u64m4x2(uint64_t *base, vuint64m4_t bindex, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u64m4x2(uint64_t *rs1, vuint64m4_t vs2, vuint64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_f16mf4x2_m(vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_f16mf4x2_m(vbool64_t vm, float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_f16mf2x2_m(vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_f16mf2x2_m(vbool32_t vm, float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_f16m1x2_m(vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_f16m1x2_m(vbool16_t vm, float16_t *rs1, vuint64m4_t vs2, vfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_f16m2x2_m(vbool8_t mask, float16_t *base, vuint64m8_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_f16m2x2_m(vbool8_t vm, float16_t *rs1, vuint64m8_t vs2, vfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_f32mf2x2_m(vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_f32mf2x2_m(vbool64_t vm, float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_f32m1x2_m(vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_f32m1x2_m(vbool32_t vm, float32_t *rs1, vuint64m2_t vs2, vfloat32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_f32m2x2_m(vbool16_t mask, float32_t *base, vuint64m4_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_f32m2x2_m(vbool16_t vm, float32_t *rs1, vuint64m4_t vs2, vfloat32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_f32m4x2_m(vbool8_t mask, float32_t *base, vuint64m8_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_f32m4x2_m(vbool8_t vm, float32_t *rs1, vuint64m8_t vs2, vfloat32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_f64m1x2_m(vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_f64m1x2_m(vbool64_t vm, float64_t *rs1, vuint64m1_t vs2, vfloat64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_f64m2x2_m(vbool32_t mask, float64_t *base, vuint64m2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_f64m2x2_m(vbool32_t vm, float64_t *rs1, vuint64m2_t vs2, vfloat64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_f64m4x2_m(vbool16_t mask, float64_t *base, vuint64m4_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_f64m4x2_m(vbool16_t vm, float64_t *rs1, vuint64m4_t vs2, vfloat64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i8mf8x2_m(vbool64_t vm, int8_t *rs1, vuint64m1_t vs2, vint8mf8x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i8mf4x2_m(vbool32_t vm, int8_t *rs1, vuint64m2_t vs2, vint8mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i8mf2x2_m(vbool16_t vm, int8_t *rs1, vuint64m4_t vs2, vint8mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i8m1x2_m(vbool8_t vm, int8_t *rs1, vuint64m8_t vs2, vint8m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i16mf4x2_m(vbool64_t vm, int16_t *rs1, vuint64m1_t vs2, vint16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i16mf2x2_m(vbool32_t vm, int16_t *rs1, vuint64m2_t vs2, vint16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i16m1x2_m(vbool16_t vm, int16_t *rs1, vuint64m4_t vs2, vint16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i16m2x2_m(vbool8_t vm, int16_t *rs1, vuint64m8_t vs2, vint16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i32mf2x2_m(vbool64_t vm, int32_t *rs1, vuint64m1_t vs2, vint32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i32m1x2_m(vbool32_t vm, int32_t *rs1, vuint64m2_t vs2, vint32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i32m2x2_m(vbool16_t vm, int32_t *rs1, vuint64m4_t vs2, vint32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i32m4x2_m(vbool8_t vm, int32_t *rs1, vuint64m8_t vs2, vint32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i64m1x2_m(vbool64_t vm, int64_t *rs1, vuint64m1_t vs2, vint64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i64m2x2_m(vbool32_t vm, int64_t *rs1, vuint64m2_t vs2, vint64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_i64m4x2_m(vbool16_t vm, int64_t *rs1, vuint64m4_t vs2, vint64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u8mf8x2_m(vbool64_t vm, uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u8mf4x2_m(vbool32_t vm, uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u8mf2x2_m(vbool16_t vm, uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u8m1x2_m(vbool8_t vm, uint8_t *rs1, vuint64m8_t vs2, vuint8m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u16mf4x2_m(vbool64_t vm, uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u16mf2x2_m(vbool32_t vm, uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u16m1x2_m(vbool16_t vm, uint16_t *rs1, vuint64m4_t vs2, vuint16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u16m2x2_m(vbool8_t vm, uint16_t *rs1, vuint64m8_t vs2, vuint16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u32mf2x2_m(vbool64_t vm, uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u32m1x2_m(vbool32_t vm, uint32_t *rs1, vuint64m2_t vs2, vuint32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u32m2x2_m(vbool16_t vm, uint32_t *rs1, vuint64m4_t vs2, vuint32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u32m4x2_m(vbool8_t vm, uint32_t *rs1, vuint64m8_t vs2, vuint32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u64m1x2_m(vbool64_t vm, uint64_t *rs1, vuint64m1_t vs2, vuint64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u64m2x2_m(vbool32_t vm, uint64_t *rs1, vuint64m2_t vs2, vuint64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei64_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei64_v_u64m4x2_m(vbool16_t vm, uint64_t *rs1, vuint64m4_t vs2, vuint64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei64(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg2ei64\.[ivxfswum.]+\s+} 82 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsoxseg2ei8.c b/auto-generated/gnu-overloaded-tests/vsoxseg2ei8.c index eae696d53..add80fede 100644 --- a/auto-generated/gnu-overloaded-tests/vsoxseg2ei8.c +++ b/auto-generated/gnu-overloaded-tests/vsoxseg2ei8.c @@ -6,388 +6,388 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg2ei8_v_f16mf4x2(float16_t *base, vuint8mf8_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f16mf4x2(float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_f16mf2x2(float16_t *base, vuint8mf4_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f16mf2x2(float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_f16m1x2(float16_t *base, vuint8mf2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f16m1x2(float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_f16m2x2(float16_t *base, vuint8m1_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f16m2x2(float16_t *rs1, vuint8m1_t vs2, vfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_f16m4x2(float16_t *base, vuint8m2_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f16m4x2(float16_t *rs1, vuint8m2_t vs2, vfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_f32mf2x2(float32_t *base, vuint8mf8_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f32mf2x2(float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_f32m1x2(float32_t *base, vuint8mf4_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f32m1x2(float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_f32m2x2(float32_t *base, vuint8mf2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f32m2x2(float32_t *rs1, vuint8mf2_t vs2, vfloat32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_f32m4x2(float32_t *base, vuint8m1_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f32m4x2(float32_t *rs1, vuint8m1_t vs2, vfloat32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_f64m1x2(float64_t *base, vuint8mf8_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f64m1x2(float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_f64m2x2(float64_t *base, vuint8mf4_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f64m2x2(float64_t *rs1, vuint8mf4_t vs2, vfloat64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_f64m4x2(float64_t *base, vuint8mf2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f64m4x2(float64_t *rs1, vuint8mf2_t vs2, vfloat64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i8mf8x2(int8_t *base, vuint8mf8_t bindex, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i8mf8x2(int8_t *rs1, vuint8mf8_t vs2, vint8mf8x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i8mf4x2(int8_t *base, vuint8mf4_t bindex, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i8mf4x2(int8_t *rs1, vuint8mf4_t vs2, vint8mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i8mf2x2(int8_t *base, vuint8mf2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i8mf2x2(int8_t *rs1, vuint8mf2_t vs2, vint8mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i8m1x2(int8_t *base, vuint8m1_t bindex, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i8m1x2(int8_t *rs1, vuint8m1_t vs2, vint8m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i8m2x2(int8_t *base, vuint8m2_t bindex, vint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i8m2x2(int8_t *rs1, vuint8m2_t vs2, vint8m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i8m4x2(int8_t *base, vuint8m4_t bindex, vint8m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i8m4x2(int8_t *rs1, vuint8m4_t vs2, vint8m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i16mf4x2(int16_t *base, vuint8mf8_t bindex, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i16mf4x2(int16_t *rs1, vuint8mf8_t vs2, vint16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i16mf2x2(int16_t *base, vuint8mf4_t bindex, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i16mf2x2(int16_t *rs1, vuint8mf4_t vs2, vint16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i16m1x2(int16_t *base, vuint8mf2_t bindex, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i16m1x2(int16_t *rs1, vuint8mf2_t vs2, vint16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i16m2x2(int16_t *base, vuint8m1_t bindex, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i16m2x2(int16_t *rs1, vuint8m1_t vs2, vint16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i16m4x2(int16_t *base, vuint8m2_t bindex, vint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i16m4x2(int16_t *rs1, vuint8m2_t vs2, vint16m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i32mf2x2(int32_t *base, vuint8mf8_t bindex, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i32mf2x2(int32_t *rs1, vuint8mf8_t vs2, vint32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i32m1x2(int32_t *base, vuint8mf4_t bindex, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i32m1x2(int32_t *rs1, vuint8mf4_t vs2, vint32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i32m2x2(int32_t *base, vuint8mf2_t bindex, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i32m2x2(int32_t *rs1, vuint8mf2_t vs2, vint32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i32m4x2(int32_t *base, vuint8m1_t bindex, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i32m4x2(int32_t *rs1, vuint8m1_t vs2, vint32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i64m1x2(int64_t *base, vuint8mf8_t bindex, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i64m1x2(int64_t *rs1, vuint8mf8_t vs2, vint64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i64m2x2(int64_t *base, vuint8mf4_t bindex, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i64m2x2(int64_t *rs1, vuint8mf4_t vs2, vint64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i64m4x2(int64_t *base, vuint8mf2_t bindex, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i64m4x2(int64_t *rs1, vuint8mf2_t vs2, vint64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u8mf8x2(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u8mf8x2(uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u8mf4x2(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u8mf4x2(uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u8mf2x2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u8mf2x2(uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u8m1x2(uint8_t *base, vuint8m1_t bindex, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u8m1x2(uint8_t *rs1, vuint8m1_t vs2, vuint8m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u8m2x2(uint8_t *base, vuint8m2_t bindex, vuint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u8m2x2(uint8_t *rs1, vuint8m2_t vs2, vuint8m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u8m4x2(uint8_t *base, vuint8m4_t bindex, vuint8m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u8m4x2(uint8_t *rs1, vuint8m4_t vs2, vuint8m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u16mf4x2(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u16mf4x2(uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u16mf2x2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u16mf2x2(uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u16m1x2(uint16_t *base, vuint8mf2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u16m1x2(uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u16m2x2(uint16_t *base, vuint8m1_t bindex, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u16m2x2(uint16_t *rs1, vuint8m1_t vs2, vuint16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u16m4x2(uint16_t *base, vuint8m2_t bindex, vuint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u16m4x2(uint16_t *rs1, vuint8m2_t vs2, vuint16m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u32mf2x2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u32mf2x2(uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u32m1x2(uint32_t *base, vuint8mf4_t bindex, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u32m1x2(uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u32m2x2(uint32_t *base, vuint8mf2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u32m2x2(uint32_t *rs1, vuint8mf2_t vs2, vuint32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u32m4x2(uint32_t *base, vuint8m1_t bindex, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u32m4x2(uint32_t *rs1, vuint8m1_t vs2, vuint32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u64m1x2(uint64_t *base, vuint8mf8_t bindex, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u64m1x2(uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u64m2x2(uint64_t *base, vuint8mf4_t bindex, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u64m2x2(uint64_t *rs1, vuint8mf4_t vs2, vuint64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u64m4x2(uint64_t *base, vuint8mf2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u64m4x2(uint64_t *rs1, vuint8mf2_t vs2, vuint64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_f16mf4x2_m(vbool64_t mask, float16_t *base, vuint8mf8_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f16mf4x2_m(vbool64_t vm, float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_f16mf2x2_m(vbool32_t mask, float16_t *base, vuint8mf4_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f16mf2x2_m(vbool32_t vm, float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_f16m1x2_m(vbool16_t mask, float16_t *base, vuint8mf2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f16m1x2_m(vbool16_t vm, float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_f16m2x2_m(vbool8_t mask, float16_t *base, vuint8m1_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f16m2x2_m(vbool8_t vm, float16_t *rs1, vuint8m1_t vs2, vfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_f16m4x2_m(vbool4_t mask, float16_t *base, vuint8m2_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f16m4x2_m(vbool4_t vm, float16_t *rs1, vuint8m2_t vs2, vfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_f32mf2x2_m(vbool64_t mask, float32_t *base, vuint8mf8_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f32mf2x2_m(vbool64_t vm, float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_f32m1x2_m(vbool32_t mask, float32_t *base, vuint8mf4_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f32m1x2_m(vbool32_t vm, float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_f32m2x2_m(vbool16_t mask, float32_t *base, vuint8mf2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f32m2x2_m(vbool16_t vm, float32_t *rs1, vuint8mf2_t vs2, vfloat32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_f32m4x2_m(vbool8_t mask, float32_t *base, vuint8m1_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f32m4x2_m(vbool8_t vm, float32_t *rs1, vuint8m1_t vs2, vfloat32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_f64m1x2_m(vbool64_t mask, float64_t *base, vuint8mf8_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f64m1x2_m(vbool64_t vm, float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_f64m2x2_m(vbool32_t mask, float64_t *base, vuint8mf4_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f64m2x2_m(vbool32_t vm, float64_t *rs1, vuint8mf4_t vs2, vfloat64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_f64m4x2_m(vbool16_t mask, float64_t *base, vuint8mf2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_f64m4x2_m(vbool16_t vm, float64_t *rs1, vuint8mf2_t vs2, vfloat64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i8mf8x2_m(vbool64_t vm, int8_t *rs1, vuint8mf8_t vs2, vint8mf8x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i8mf4x2_m(vbool32_t vm, int8_t *rs1, vuint8mf4_t vs2, vint8mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i8mf2x2_m(vbool16_t vm, int8_t *rs1, vuint8mf2_t vs2, vint8mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i8m1x2_m(vbool8_t vm, int8_t *rs1, vuint8m1_t vs2, vint8m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i8m2x2_m(vbool4_t vm, int8_t *rs1, vuint8m2_t vs2, vint8m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i8m4x2_m(vbool2_t vm, int8_t *rs1, vuint8m4_t vs2, vint8m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i16mf4x2_m(vbool64_t vm, int16_t *rs1, vuint8mf8_t vs2, vint16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i16mf2x2_m(vbool32_t vm, int16_t *rs1, vuint8mf4_t vs2, vint16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i16m1x2_m(vbool16_t vm, int16_t *rs1, vuint8mf2_t vs2, vint16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i16m2x2_m(vbool8_t vm, int16_t *rs1, vuint8m1_t vs2, vint16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i16m4x2_m(vbool4_t vm, int16_t *rs1, vuint8m2_t vs2, vint16m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i32mf2x2_m(vbool64_t vm, int32_t *rs1, vuint8mf8_t vs2, vint32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i32m1x2_m(vbool32_t vm, int32_t *rs1, vuint8mf4_t vs2, vint32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i32m2x2_m(vbool16_t vm, int32_t *rs1, vuint8mf2_t vs2, vint32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i32m4x2_m(vbool8_t vm, int32_t *rs1, vuint8m1_t vs2, vint32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i64m1x2_m(vbool64_t vm, int64_t *rs1, vuint8mf8_t vs2, vint64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i64m2x2_m(vbool32_t vm, int64_t *rs1, vuint8mf4_t vs2, vint64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_i64m4x2_m(vbool16_t vm, int64_t *rs1, vuint8mf2_t vs2, vint64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u8mf8x2_m(vbool64_t vm, uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u8mf4x2_m(vbool32_t vm, uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u8mf2x2_m(vbool16_t vm, uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u8m1x2_m(vbool8_t vm, uint8_t *rs1, vuint8m1_t vs2, vuint8m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u8m2x2_m(vbool4_t vm, uint8_t *rs1, vuint8m2_t vs2, vuint8m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u8m4x2_m(vbool2_t vm, uint8_t *rs1, vuint8m4_t vs2, vuint8m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u16mf4x2_m(vbool64_t vm, uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u16mf2x2_m(vbool32_t vm, uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u16m1x2_m(vbool16_t vm, uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u16m2x2_m(vbool8_t vm, uint16_t *rs1, vuint8m1_t vs2, vuint16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u16m4x2_m(vbool4_t vm, uint16_t *rs1, vuint8m2_t vs2, vuint16m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u32mf2x2_m(vbool64_t vm, uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u32m1x2_m(vbool32_t vm, uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u32m2x2_m(vbool16_t vm, uint32_t *rs1, vuint8mf2_t vs2, vuint32m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u32m4x2_m(vbool8_t vm, uint32_t *rs1, vuint8m1_t vs2, vuint32m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u64m1x2_m(vbool64_t vm, uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u64m2x2_m(vbool32_t vm, uint64_t *rs1, vuint8mf4_t vs2, vuint64m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg2ei8_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg2ei8_v_u64m4x2_m(vbool16_t vm, uint64_t *rs1, vuint8mf2_t vs2, vuint64m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg2ei8\.[ivxfswum.]+\s+} 96 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsoxseg3ei16.c b/auto-generated/gnu-overloaded-tests/vsoxseg3ei16.c index bc92e3fc0..dd420fe97 100644 --- a/auto-generated/gnu-overloaded-tests/vsoxseg3ei16.c +++ b/auto-generated/gnu-overloaded-tests/vsoxseg3ei16.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg3ei16_v_f16mf4x3(float16_t *base, vuint16mf4_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_f16mf4x3(float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_f16mf2x3(float16_t *base, vuint16mf2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_f16mf2x3(float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_f16m1x3(float16_t *base, vuint16m1_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_f16m1x3(float16_t *rs1, vuint16m1_t vs2, vfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_f16m2x3(float16_t *base, vuint16m2_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_f16m2x3(float16_t *rs1, vuint16m2_t vs2, vfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_f32mf2x3(float32_t *base, vuint16mf4_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_f32mf2x3(float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_f32m1x3(float32_t *base, vuint16mf2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_f32m1x3(float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_f32m2x3(float32_t *base, vuint16m1_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_f32m2x3(float32_t *rs1, vuint16m1_t vs2, vfloat32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_f64m1x3(float64_t *base, vuint16mf4_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_f64m1x3(float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_f64m2x3(float64_t *base, vuint16mf2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_f64m2x3(float64_t *rs1, vuint16mf2_t vs2, vfloat64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i8mf8x3(int8_t *base, vuint16mf4_t bindex, vint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i8mf8x3(int8_t *rs1, vuint16mf4_t vs2, vint8mf8x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i8mf4x3(int8_t *base, vuint16mf2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i8mf4x3(int8_t *rs1, vuint16mf2_t vs2, vint8mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i8mf2x3(int8_t *base, vuint16m1_t bindex, vint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i8mf2x3(int8_t *rs1, vuint16m1_t vs2, vint8mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i8m1x3(int8_t *base, vuint16m2_t bindex, vint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i8m1x3(int8_t *rs1, vuint16m2_t vs2, vint8m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i8m2x3(int8_t *base, vuint16m4_t bindex, vint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i8m2x3(int8_t *rs1, vuint16m4_t vs2, vint8m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i16mf4x3(int16_t *base, vuint16mf4_t bindex, vint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i16mf4x3(int16_t *rs1, vuint16mf4_t vs2, vint16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i16mf2x3(int16_t *base, vuint16mf2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i16mf2x3(int16_t *rs1, vuint16mf2_t vs2, vint16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i16m1x3(int16_t *base, vuint16m1_t bindex, vint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i16m1x3(int16_t *rs1, vuint16m1_t vs2, vint16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i16m2x3(int16_t *base, vuint16m2_t bindex, vint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i16m2x3(int16_t *rs1, vuint16m2_t vs2, vint16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i32mf2x3(int32_t *base, vuint16mf4_t bindex, vint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i32mf2x3(int32_t *rs1, vuint16mf4_t vs2, vint32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i32m1x3(int32_t *base, vuint16mf2_t bindex, vint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i32m1x3(int32_t *rs1, vuint16mf2_t vs2, vint32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i32m2x3(int32_t *base, vuint16m1_t bindex, vint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i32m2x3(int32_t *rs1, vuint16m1_t vs2, vint32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i64m1x3(int64_t *base, vuint16mf4_t bindex, vint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i64m1x3(int64_t *rs1, vuint16mf4_t vs2, vint64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i64m2x3(int64_t *base, vuint16mf2_t bindex, vint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i64m2x3(int64_t *rs1, vuint16mf2_t vs2, vint64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u8mf8x3(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u8mf8x3(uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u8mf4x3(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u8mf4x3(uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u8mf2x3(uint8_t *base, vuint16m1_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u8mf2x3(uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u8m1x3(uint8_t *base, vuint16m2_t bindex, vuint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u8m1x3(uint8_t *rs1, vuint16m2_t vs2, vuint8m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u8m2x3(uint8_t *base, vuint16m4_t bindex, vuint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u8m2x3(uint8_t *rs1, vuint16m4_t vs2, vuint8m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u16mf4x3(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u16mf4x3(uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u16mf2x3(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u16mf2x3(uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u16m1x3(uint16_t *base, vuint16m1_t bindex, vuint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u16m1x3(uint16_t *rs1, vuint16m1_t vs2, vuint16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u16m2x3(uint16_t *base, vuint16m2_t bindex, vuint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u16m2x3(uint16_t *rs1, vuint16m2_t vs2, vuint16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u32mf2x3(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u32mf2x3(uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u32m1x3(uint32_t *base, vuint16mf2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u32m1x3(uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u32m2x3(uint32_t *base, vuint16m1_t bindex, vuint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u32m2x3(uint32_t *rs1, vuint16m1_t vs2, vuint32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u64m1x3(uint64_t *base, vuint16mf4_t bindex, vuint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u64m1x3(uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u64m2x3(uint64_t *base, vuint16mf2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u64m2x3(uint64_t *rs1, vuint16mf2_t vs2, vuint64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_f16mf4x3_m(vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_f16mf4x3_m(vbool64_t vm, float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_f16mf2x3_m(vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_f16mf2x3_m(vbool32_t vm, float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_f16m1x3_m(vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_f16m1x3_m(vbool16_t vm, float16_t *rs1, vuint16m1_t vs2, vfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_f16m2x3_m(vbool8_t mask, float16_t *base, vuint16m2_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_f16m2x3_m(vbool8_t vm, float16_t *rs1, vuint16m2_t vs2, vfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_f32mf2x3_m(vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_f32mf2x3_m(vbool64_t vm, float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_f32m1x3_m(vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_f32m1x3_m(vbool32_t vm, float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_f32m2x3_m(vbool16_t mask, float32_t *base, vuint16m1_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_f32m2x3_m(vbool16_t vm, float32_t *rs1, vuint16m1_t vs2, vfloat32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_f64m1x3_m(vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_f64m1x3_m(vbool64_t vm, float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_f64m2x3_m(vbool32_t mask, float64_t *base, vuint16mf2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_f64m2x3_m(vbool32_t vm, float64_t *rs1, vuint16mf2_t vs2, vfloat64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i8mf8x3_m(vbool64_t vm, int8_t *rs1, vuint16mf4_t vs2, vint8mf8x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i8mf4x3_m(vbool32_t vm, int8_t *rs1, vuint16mf2_t vs2, vint8mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i8mf2x3_m(vbool16_t vm, int8_t *rs1, vuint16m1_t vs2, vint8mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i8m1x3_m(vbool8_t vm, int8_t *rs1, vuint16m2_t vs2, vint8m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i8m2x3_m(vbool4_t vm, int8_t *rs1, vuint16m4_t vs2, vint8m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i16mf4x3_m(vbool64_t vm, int16_t *rs1, vuint16mf4_t vs2, vint16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i16mf2x3_m(vbool32_t vm, int16_t *rs1, vuint16mf2_t vs2, vint16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i16m1x3_m(vbool16_t vm, int16_t *rs1, vuint16m1_t vs2, vint16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i16m2x3_m(vbool8_t vm, int16_t *rs1, vuint16m2_t vs2, vint16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i32mf2x3_m(vbool64_t vm, int32_t *rs1, vuint16mf4_t vs2, vint32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i32m1x3_m(vbool32_t vm, int32_t *rs1, vuint16mf2_t vs2, vint32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i32m2x3_m(vbool16_t vm, int32_t *rs1, vuint16m1_t vs2, vint32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i64m1x3_m(vbool64_t vm, int64_t *rs1, vuint16mf4_t vs2, vint64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_i64m2x3_m(vbool32_t vm, int64_t *rs1, vuint16mf2_t vs2, vint64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u8mf8x3_m(vbool64_t vm, uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u8mf4x3_m(vbool32_t vm, uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u8mf2x3_m(vbool16_t vm, uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u8m1x3_m(vbool8_t vm, uint8_t *rs1, vuint16m2_t vs2, vuint8m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u8m2x3_m(vbool4_t vm, uint8_t *rs1, vuint16m4_t vs2, vuint8m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u16mf4x3_m(vbool64_t vm, uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u16mf2x3_m(vbool32_t vm, uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u16m1x3_m(vbool16_t vm, uint16_t *rs1, vuint16m1_t vs2, vuint16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u16m2x3_m(vbool8_t vm, uint16_t *rs1, vuint16m2_t vs2, vuint16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u32mf2x3_m(vbool64_t vm, uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u32m1x3_m(vbool32_t vm, uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u32m2x3_m(vbool16_t vm, uint32_t *rs1, vuint16m1_t vs2, vuint32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u64m1x3_m(vbool64_t vm, uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei16_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei16_v_u64m2x3_m(vbool32_t vm, uint64_t *rs1, vuint16mf2_t vs2, vuint64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg3ei16\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsoxseg3ei32.c b/auto-generated/gnu-overloaded-tests/vsoxseg3ei32.c index 809785cd7..cf74b6ca8 100644 --- a/auto-generated/gnu-overloaded-tests/vsoxseg3ei32.c +++ b/auto-generated/gnu-overloaded-tests/vsoxseg3ei32.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg3ei32_v_f16mf4x3(float16_t *base, vuint32mf2_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_f16mf4x3(float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_f16mf2x3(float16_t *base, vuint32m1_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_f16mf2x3(float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_f16m1x3(float16_t *base, vuint32m2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_f16m1x3(float16_t *rs1, vuint32m2_t vs2, vfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_f16m2x3(float16_t *base, vuint32m4_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_f16m2x3(float16_t *rs1, vuint32m4_t vs2, vfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_f32mf2x3(float32_t *base, vuint32mf2_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_f32mf2x3(float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_f32m1x3(float32_t *base, vuint32m1_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_f32m1x3(float32_t *rs1, vuint32m1_t vs2, vfloat32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_f32m2x3(float32_t *base, vuint32m2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_f32m2x3(float32_t *rs1, vuint32m2_t vs2, vfloat32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_f64m1x3(float64_t *base, vuint32mf2_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_f64m1x3(float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_f64m2x3(float64_t *base, vuint32m1_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_f64m2x3(float64_t *rs1, vuint32m1_t vs2, vfloat64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i8mf8x3(int8_t *base, vuint32mf2_t bindex, vint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i8mf8x3(int8_t *rs1, vuint32mf2_t vs2, vint8mf8x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i8mf4x3(int8_t *base, vuint32m1_t bindex, vint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i8mf4x3(int8_t *rs1, vuint32m1_t vs2, vint8mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i8mf2x3(int8_t *base, vuint32m2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i8mf2x3(int8_t *rs1, vuint32m2_t vs2, vint8mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i8m1x3(int8_t *base, vuint32m4_t bindex, vint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i8m1x3(int8_t *rs1, vuint32m4_t vs2, vint8m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i8m2x3(int8_t *base, vuint32m8_t bindex, vint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i8m2x3(int8_t *rs1, vuint32m8_t vs2, vint8m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i16mf4x3(int16_t *base, vuint32mf2_t bindex, vint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i16mf4x3(int16_t *rs1, vuint32mf2_t vs2, vint16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i16mf2x3(int16_t *base, vuint32m1_t bindex, vint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i16mf2x3(int16_t *rs1, vuint32m1_t vs2, vint16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i16m1x3(int16_t *base, vuint32m2_t bindex, vint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i16m1x3(int16_t *rs1, vuint32m2_t vs2, vint16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i16m2x3(int16_t *base, vuint32m4_t bindex, vint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i16m2x3(int16_t *rs1, vuint32m4_t vs2, vint16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i32mf2x3(int32_t *base, vuint32mf2_t bindex, vint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i32mf2x3(int32_t *rs1, vuint32mf2_t vs2, vint32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i32m1x3(int32_t *base, vuint32m1_t bindex, vint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i32m1x3(int32_t *rs1, vuint32m1_t vs2, vint32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i32m2x3(int32_t *base, vuint32m2_t bindex, vint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i32m2x3(int32_t *rs1, vuint32m2_t vs2, vint32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i64m1x3(int64_t *base, vuint32mf2_t bindex, vint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i64m1x3(int64_t *rs1, vuint32mf2_t vs2, vint64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i64m2x3(int64_t *base, vuint32m1_t bindex, vint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i64m2x3(int64_t *rs1, vuint32m1_t vs2, vint64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u8mf8x3(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u8mf8x3(uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u8mf4x3(uint8_t *base, vuint32m1_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u8mf4x3(uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u8mf2x3(uint8_t *base, vuint32m2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u8mf2x3(uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u8m1x3(uint8_t *base, vuint32m4_t bindex, vuint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u8m1x3(uint8_t *rs1, vuint32m4_t vs2, vuint8m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u8m2x3(uint8_t *base, vuint32m8_t bindex, vuint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u8m2x3(uint8_t *rs1, vuint32m8_t vs2, vuint8m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u16mf4x3(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u16mf4x3(uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u16mf2x3(uint16_t *base, vuint32m1_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u16mf2x3(uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u16m1x3(uint16_t *base, vuint32m2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u16m1x3(uint16_t *rs1, vuint32m2_t vs2, vuint16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u16m2x3(uint16_t *base, vuint32m4_t bindex, vuint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u16m2x3(uint16_t *rs1, vuint32m4_t vs2, vuint16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u32mf2x3(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u32mf2x3(uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u32m1x3(uint32_t *base, vuint32m1_t bindex, vuint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u32m1x3(uint32_t *rs1, vuint32m1_t vs2, vuint32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u32m2x3(uint32_t *base, vuint32m2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u32m2x3(uint32_t *rs1, vuint32m2_t vs2, vuint32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u64m1x3(uint64_t *base, vuint32mf2_t bindex, vuint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u64m1x3(uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u64m2x3(uint64_t *base, vuint32m1_t bindex, vuint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u64m2x3(uint64_t *rs1, vuint32m1_t vs2, vuint64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_f16mf4x3_m(vbool64_t mask, float16_t *base, vuint32mf2_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_f16mf4x3_m(vbool64_t vm, float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_f16mf2x3_m(vbool32_t mask, float16_t *base, vuint32m1_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_f16mf2x3_m(vbool32_t vm, float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_f16m1x3_m(vbool16_t mask, float16_t *base, vuint32m2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_f16m1x3_m(vbool16_t vm, float16_t *rs1, vuint32m2_t vs2, vfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_f16m2x3_m(vbool8_t mask, float16_t *base, vuint32m4_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_f16m2x3_m(vbool8_t vm, float16_t *rs1, vuint32m4_t vs2, vfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_f32mf2x3_m(vbool64_t mask, float32_t *base, vuint32mf2_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_f32mf2x3_m(vbool64_t vm, float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_f32m1x3_m(vbool32_t mask, float32_t *base, vuint32m1_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_f32m1x3_m(vbool32_t vm, float32_t *rs1, vuint32m1_t vs2, vfloat32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_f32m2x3_m(vbool16_t mask, float32_t *base, vuint32m2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_f32m2x3_m(vbool16_t vm, float32_t *rs1, vuint32m2_t vs2, vfloat32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_f64m1x3_m(vbool64_t mask, float64_t *base, vuint32mf2_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_f64m1x3_m(vbool64_t vm, float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_f64m2x3_m(vbool32_t mask, float64_t *base, vuint32m1_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_f64m2x3_m(vbool32_t vm, float64_t *rs1, vuint32m1_t vs2, vfloat64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i8mf8x3_m(vbool64_t vm, int8_t *rs1, vuint32mf2_t vs2, vint8mf8x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i8mf4x3_m(vbool32_t vm, int8_t *rs1, vuint32m1_t vs2, vint8mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i8mf2x3_m(vbool16_t vm, int8_t *rs1, vuint32m2_t vs2, vint8mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i8m1x3_m(vbool8_t vm, int8_t *rs1, vuint32m4_t vs2, vint8m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i8m2x3_m(vbool4_t vm, int8_t *rs1, vuint32m8_t vs2, vint8m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i16mf4x3_m(vbool64_t vm, int16_t *rs1, vuint32mf2_t vs2, vint16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i16mf2x3_m(vbool32_t vm, int16_t *rs1, vuint32m1_t vs2, vint16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i16m1x3_m(vbool16_t vm, int16_t *rs1, vuint32m2_t vs2, vint16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i16m2x3_m(vbool8_t vm, int16_t *rs1, vuint32m4_t vs2, vint16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i32mf2x3_m(vbool64_t vm, int32_t *rs1, vuint32mf2_t vs2, vint32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i32m1x3_m(vbool32_t vm, int32_t *rs1, vuint32m1_t vs2, vint32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i32m2x3_m(vbool16_t vm, int32_t *rs1, vuint32m2_t vs2, vint32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i64m1x3_m(vbool64_t vm, int64_t *rs1, vuint32mf2_t vs2, vint64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_i64m2x3_m(vbool32_t vm, int64_t *rs1, vuint32m1_t vs2, vint64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u8mf8x3_m(vbool64_t vm, uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u8mf4x3_m(vbool32_t vm, uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u8mf2x3_m(vbool16_t vm, uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u8m1x3_m(vbool8_t vm, uint8_t *rs1, vuint32m4_t vs2, vuint8m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u8m2x3_m(vbool4_t vm, uint8_t *rs1, vuint32m8_t vs2, vuint8m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u16mf4x3_m(vbool64_t vm, uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u16mf2x3_m(vbool32_t vm, uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u16m1x3_m(vbool16_t vm, uint16_t *rs1, vuint32m2_t vs2, vuint16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u16m2x3_m(vbool8_t vm, uint16_t *rs1, vuint32m4_t vs2, vuint16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u32mf2x3_m(vbool64_t vm, uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u32m1x3_m(vbool32_t vm, uint32_t *rs1, vuint32m1_t vs2, vuint32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u32m2x3_m(vbool16_t vm, uint32_t *rs1, vuint32m2_t vs2, vuint32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u64m1x3_m(vbool64_t vm, uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei32_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei32_v_u64m2x3_m(vbool32_t vm, uint64_t *rs1, vuint32m1_t vs2, vuint64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei32(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg3ei32\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsoxseg3ei64.c b/auto-generated/gnu-overloaded-tests/vsoxseg3ei64.c index 424452b1c..2d1f08e6c 100644 --- a/auto-generated/gnu-overloaded-tests/vsoxseg3ei64.c +++ b/auto-generated/gnu-overloaded-tests/vsoxseg3ei64.c @@ -6,284 +6,284 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg3ei64_v_f16mf4x3(float16_t *base, vuint64m1_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_f16mf4x3(float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_f16mf2x3(float16_t *base, vuint64m2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_f16mf2x3(float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_f16m1x3(float16_t *base, vuint64m4_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_f16m1x3(float16_t *rs1, vuint64m4_t vs2, vfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_f16m2x3(float16_t *base, vuint64m8_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_f16m2x3(float16_t *rs1, vuint64m8_t vs2, vfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_f32mf2x3(float32_t *base, vuint64m1_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_f32mf2x3(float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_f32m1x3(float32_t *base, vuint64m2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_f32m1x3(float32_t *rs1, vuint64m2_t vs2, vfloat32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_f32m2x3(float32_t *base, vuint64m4_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_f32m2x3(float32_t *rs1, vuint64m4_t vs2, vfloat32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_f64m1x3(float64_t *base, vuint64m1_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_f64m1x3(float64_t *rs1, vuint64m1_t vs2, vfloat64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_f64m2x3(float64_t *base, vuint64m2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_f64m2x3(float64_t *rs1, vuint64m2_t vs2, vfloat64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i8mf8x3(int8_t *base, vuint64m1_t bindex, vint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i8mf8x3(int8_t *rs1, vuint64m1_t vs2, vint8mf8x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i8mf4x3(int8_t *base, vuint64m2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i8mf4x3(int8_t *rs1, vuint64m2_t vs2, vint8mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i8mf2x3(int8_t *base, vuint64m4_t bindex, vint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i8mf2x3(int8_t *rs1, vuint64m4_t vs2, vint8mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i8m1x3(int8_t *base, vuint64m8_t bindex, vint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i8m1x3(int8_t *rs1, vuint64m8_t vs2, vint8m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i16mf4x3(int16_t *base, vuint64m1_t bindex, vint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i16mf4x3(int16_t *rs1, vuint64m1_t vs2, vint16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i16mf2x3(int16_t *base, vuint64m2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i16mf2x3(int16_t *rs1, vuint64m2_t vs2, vint16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i16m1x3(int16_t *base, vuint64m4_t bindex, vint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i16m1x3(int16_t *rs1, vuint64m4_t vs2, vint16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i16m2x3(int16_t *base, vuint64m8_t bindex, vint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i16m2x3(int16_t *rs1, vuint64m8_t vs2, vint16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i32mf2x3(int32_t *base, vuint64m1_t bindex, vint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i32mf2x3(int32_t *rs1, vuint64m1_t vs2, vint32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i32m1x3(int32_t *base, vuint64m2_t bindex, vint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i32m1x3(int32_t *rs1, vuint64m2_t vs2, vint32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i32m2x3(int32_t *base, vuint64m4_t bindex, vint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i32m2x3(int32_t *rs1, vuint64m4_t vs2, vint32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i64m1x3(int64_t *base, vuint64m1_t bindex, vint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i64m1x3(int64_t *rs1, vuint64m1_t vs2, vint64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i64m2x3(int64_t *base, vuint64m2_t bindex, vint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i64m2x3(int64_t *rs1, vuint64m2_t vs2, vint64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u8mf8x3(uint8_t *base, vuint64m1_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u8mf8x3(uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u8mf4x3(uint8_t *base, vuint64m2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u8mf4x3(uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u8mf2x3(uint8_t *base, vuint64m4_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u8mf2x3(uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u8m1x3(uint8_t *base, vuint64m8_t bindex, vuint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u8m1x3(uint8_t *rs1, vuint64m8_t vs2, vuint8m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u16mf4x3(uint16_t *base, vuint64m1_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u16mf4x3(uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u16mf2x3(uint16_t *base, vuint64m2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u16mf2x3(uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u16m1x3(uint16_t *base, vuint64m4_t bindex, vuint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u16m1x3(uint16_t *rs1, vuint64m4_t vs2, vuint16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u16m2x3(uint16_t *base, vuint64m8_t bindex, vuint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u16m2x3(uint16_t *rs1, vuint64m8_t vs2, vuint16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u32mf2x3(uint32_t *base, vuint64m1_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u32mf2x3(uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u32m1x3(uint32_t *base, vuint64m2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u32m1x3(uint32_t *rs1, vuint64m2_t vs2, vuint32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u32m2x3(uint32_t *base, vuint64m4_t bindex, vuint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u32m2x3(uint32_t *rs1, vuint64m4_t vs2, vuint32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u64m1x3(uint64_t *base, vuint64m1_t bindex, vuint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u64m1x3(uint64_t *rs1, vuint64m1_t vs2, vuint64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u64m2x3(uint64_t *base, vuint64m2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u64m2x3(uint64_t *rs1, vuint64m2_t vs2, vuint64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_f16mf4x3_m(vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_f16mf4x3_m(vbool64_t vm, float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_f16mf2x3_m(vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_f16mf2x3_m(vbool32_t vm, float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_f16m1x3_m(vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_f16m1x3_m(vbool16_t vm, float16_t *rs1, vuint64m4_t vs2, vfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_f16m2x3_m(vbool8_t mask, float16_t *base, vuint64m8_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_f16m2x3_m(vbool8_t vm, float16_t *rs1, vuint64m8_t vs2, vfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_f32mf2x3_m(vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_f32mf2x3_m(vbool64_t vm, float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_f32m1x3_m(vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_f32m1x3_m(vbool32_t vm, float32_t *rs1, vuint64m2_t vs2, vfloat32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_f32m2x3_m(vbool16_t mask, float32_t *base, vuint64m4_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_f32m2x3_m(vbool16_t vm, float32_t *rs1, vuint64m4_t vs2, vfloat32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_f64m1x3_m(vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_f64m1x3_m(vbool64_t vm, float64_t *rs1, vuint64m1_t vs2, vfloat64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_f64m2x3_m(vbool32_t mask, float64_t *base, vuint64m2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_f64m2x3_m(vbool32_t vm, float64_t *rs1, vuint64m2_t vs2, vfloat64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i8mf8x3_m(vbool64_t vm, int8_t *rs1, vuint64m1_t vs2, vint8mf8x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i8mf4x3_m(vbool32_t vm, int8_t *rs1, vuint64m2_t vs2, vint8mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i8mf2x3_m(vbool16_t vm, int8_t *rs1, vuint64m4_t vs2, vint8mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i8m1x3_m(vbool8_t vm, int8_t *rs1, vuint64m8_t vs2, vint8m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i16mf4x3_m(vbool64_t vm, int16_t *rs1, vuint64m1_t vs2, vint16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i16mf2x3_m(vbool32_t vm, int16_t *rs1, vuint64m2_t vs2, vint16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i16m1x3_m(vbool16_t vm, int16_t *rs1, vuint64m4_t vs2, vint16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i16m2x3_m(vbool8_t vm, int16_t *rs1, vuint64m8_t vs2, vint16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i32mf2x3_m(vbool64_t vm, int32_t *rs1, vuint64m1_t vs2, vint32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i32m1x3_m(vbool32_t vm, int32_t *rs1, vuint64m2_t vs2, vint32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i32m2x3_m(vbool16_t vm, int32_t *rs1, vuint64m4_t vs2, vint32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i64m1x3_m(vbool64_t vm, int64_t *rs1, vuint64m1_t vs2, vint64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_i64m2x3_m(vbool32_t vm, int64_t *rs1, vuint64m2_t vs2, vint64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u8mf8x3_m(vbool64_t vm, uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u8mf4x3_m(vbool32_t vm, uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u8mf2x3_m(vbool16_t vm, uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u8m1x3_m(vbool8_t vm, uint8_t *rs1, vuint64m8_t vs2, vuint8m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u16mf4x3_m(vbool64_t vm, uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u16mf2x3_m(vbool32_t vm, uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u16m1x3_m(vbool16_t vm, uint16_t *rs1, vuint64m4_t vs2, vuint16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u16m2x3_m(vbool8_t vm, uint16_t *rs1, vuint64m8_t vs2, vuint16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u32mf2x3_m(vbool64_t vm, uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u32m1x3_m(vbool32_t vm, uint32_t *rs1, vuint64m2_t vs2, vuint32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u32m2x3_m(vbool16_t vm, uint32_t *rs1, vuint64m4_t vs2, vuint32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u64m1x3_m(vbool64_t vm, uint64_t *rs1, vuint64m1_t vs2, vuint64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei64_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei64_v_u64m2x3_m(vbool32_t vm, uint64_t *rs1, vuint64m2_t vs2, vuint64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei64(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg3ei64\.[ivxfswum.]+\s+} 70 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsoxseg3ei8.c b/auto-generated/gnu-overloaded-tests/vsoxseg3ei8.c index 69aac0c81..71eeb83e5 100644 --- a/auto-generated/gnu-overloaded-tests/vsoxseg3ei8.c +++ b/auto-generated/gnu-overloaded-tests/vsoxseg3ei8.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg3ei8_v_f16mf4x3(float16_t *base, vuint8mf8_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_f16mf4x3(float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_f16mf2x3(float16_t *base, vuint8mf4_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_f16mf2x3(float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_f16m1x3(float16_t *base, vuint8mf2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_f16m1x3(float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_f16m2x3(float16_t *base, vuint8m1_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_f16m2x3(float16_t *rs1, vuint8m1_t vs2, vfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_f32mf2x3(float32_t *base, vuint8mf8_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_f32mf2x3(float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_f32m1x3(float32_t *base, vuint8mf4_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_f32m1x3(float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_f32m2x3(float32_t *base, vuint8mf2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_f32m2x3(float32_t *rs1, vuint8mf2_t vs2, vfloat32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_f64m1x3(float64_t *base, vuint8mf8_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_f64m1x3(float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_f64m2x3(float64_t *base, vuint8mf4_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_f64m2x3(float64_t *rs1, vuint8mf4_t vs2, vfloat64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i8mf8x3(int8_t *base, vuint8mf8_t bindex, vint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i8mf8x3(int8_t *rs1, vuint8mf8_t vs2, vint8mf8x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i8mf4x3(int8_t *base, vuint8mf4_t bindex, vint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i8mf4x3(int8_t *rs1, vuint8mf4_t vs2, vint8mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i8mf2x3(int8_t *base, vuint8mf2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i8mf2x3(int8_t *rs1, vuint8mf2_t vs2, vint8mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i8m1x3(int8_t *base, vuint8m1_t bindex, vint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i8m1x3(int8_t *rs1, vuint8m1_t vs2, vint8m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i8m2x3(int8_t *base, vuint8m2_t bindex, vint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i8m2x3(int8_t *rs1, vuint8m2_t vs2, vint8m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i16mf4x3(int16_t *base, vuint8mf8_t bindex, vint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i16mf4x3(int16_t *rs1, vuint8mf8_t vs2, vint16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i16mf2x3(int16_t *base, vuint8mf4_t bindex, vint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i16mf2x3(int16_t *rs1, vuint8mf4_t vs2, vint16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i16m1x3(int16_t *base, vuint8mf2_t bindex, vint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i16m1x3(int16_t *rs1, vuint8mf2_t vs2, vint16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i16m2x3(int16_t *base, vuint8m1_t bindex, vint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i16m2x3(int16_t *rs1, vuint8m1_t vs2, vint16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i32mf2x3(int32_t *base, vuint8mf8_t bindex, vint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i32mf2x3(int32_t *rs1, vuint8mf8_t vs2, vint32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i32m1x3(int32_t *base, vuint8mf4_t bindex, vint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i32m1x3(int32_t *rs1, vuint8mf4_t vs2, vint32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i32m2x3(int32_t *base, vuint8mf2_t bindex, vint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i32m2x3(int32_t *rs1, vuint8mf2_t vs2, vint32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i64m1x3(int64_t *base, vuint8mf8_t bindex, vint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i64m1x3(int64_t *rs1, vuint8mf8_t vs2, vint64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i64m2x3(int64_t *base, vuint8mf4_t bindex, vint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i64m2x3(int64_t *rs1, vuint8mf4_t vs2, vint64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u8mf8x3(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u8mf8x3(uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u8mf4x3(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u8mf4x3(uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u8mf2x3(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u8mf2x3(uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u8m1x3(uint8_t *base, vuint8m1_t bindex, vuint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u8m1x3(uint8_t *rs1, vuint8m1_t vs2, vuint8m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u8m2x3(uint8_t *base, vuint8m2_t bindex, vuint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u8m2x3(uint8_t *rs1, vuint8m2_t vs2, vuint8m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u16mf4x3(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u16mf4x3(uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u16mf2x3(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u16mf2x3(uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u16m1x3(uint16_t *base, vuint8mf2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u16m1x3(uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u16m2x3(uint16_t *base, vuint8m1_t bindex, vuint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u16m2x3(uint16_t *rs1, vuint8m1_t vs2, vuint16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u32mf2x3(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u32mf2x3(uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u32m1x3(uint32_t *base, vuint8mf4_t bindex, vuint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u32m1x3(uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u32m2x3(uint32_t *base, vuint8mf2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u32m2x3(uint32_t *rs1, vuint8mf2_t vs2, vuint32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u64m1x3(uint64_t *base, vuint8mf8_t bindex, vuint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u64m1x3(uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u64m2x3(uint64_t *base, vuint8mf4_t bindex, vuint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u64m2x3(uint64_t *rs1, vuint8mf4_t vs2, vuint64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_f16mf4x3_m(vbool64_t mask, float16_t *base, vuint8mf8_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_f16mf4x3_m(vbool64_t vm, float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_f16mf2x3_m(vbool32_t mask, float16_t *base, vuint8mf4_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_f16mf2x3_m(vbool32_t vm, float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_f16m1x3_m(vbool16_t mask, float16_t *base, vuint8mf2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_f16m1x3_m(vbool16_t vm, float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_f16m2x3_m(vbool8_t mask, float16_t *base, vuint8m1_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_f16m2x3_m(vbool8_t vm, float16_t *rs1, vuint8m1_t vs2, vfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_f32mf2x3_m(vbool64_t mask, float32_t *base, vuint8mf8_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_f32mf2x3_m(vbool64_t vm, float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_f32m1x3_m(vbool32_t mask, float32_t *base, vuint8mf4_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_f32m1x3_m(vbool32_t vm, float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_f32m2x3_m(vbool16_t mask, float32_t *base, vuint8mf2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_f32m2x3_m(vbool16_t vm, float32_t *rs1, vuint8mf2_t vs2, vfloat32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_f64m1x3_m(vbool64_t mask, float64_t *base, vuint8mf8_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_f64m1x3_m(vbool64_t vm, float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_f64m2x3_m(vbool32_t mask, float64_t *base, vuint8mf4_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_f64m2x3_m(vbool32_t vm, float64_t *rs1, vuint8mf4_t vs2, vfloat64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i8mf8x3_m(vbool64_t vm, int8_t *rs1, vuint8mf8_t vs2, vint8mf8x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i8mf4x3_m(vbool32_t vm, int8_t *rs1, vuint8mf4_t vs2, vint8mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i8mf2x3_m(vbool16_t vm, int8_t *rs1, vuint8mf2_t vs2, vint8mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i8m1x3_m(vbool8_t vm, int8_t *rs1, vuint8m1_t vs2, vint8m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i8m2x3_m(vbool4_t vm, int8_t *rs1, vuint8m2_t vs2, vint8m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i16mf4x3_m(vbool64_t vm, int16_t *rs1, vuint8mf8_t vs2, vint16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i16mf2x3_m(vbool32_t vm, int16_t *rs1, vuint8mf4_t vs2, vint16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i16m1x3_m(vbool16_t vm, int16_t *rs1, vuint8mf2_t vs2, vint16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i16m2x3_m(vbool8_t vm, int16_t *rs1, vuint8m1_t vs2, vint16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i32mf2x3_m(vbool64_t vm, int32_t *rs1, vuint8mf8_t vs2, vint32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i32m1x3_m(vbool32_t vm, int32_t *rs1, vuint8mf4_t vs2, vint32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i32m2x3_m(vbool16_t vm, int32_t *rs1, vuint8mf2_t vs2, vint32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i64m1x3_m(vbool64_t vm, int64_t *rs1, vuint8mf8_t vs2, vint64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_i64m2x3_m(vbool32_t vm, int64_t *rs1, vuint8mf4_t vs2, vint64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u8mf8x3_m(vbool64_t vm, uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u8mf4x3_m(vbool32_t vm, uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u8mf2x3_m(vbool16_t vm, uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u8m1x3_m(vbool8_t vm, uint8_t *rs1, vuint8m1_t vs2, vuint8m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u8m2x3_m(vbool4_t vm, uint8_t *rs1, vuint8m2_t vs2, vuint8m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u16mf4x3_m(vbool64_t vm, uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u16mf2x3_m(vbool32_t vm, uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u16m1x3_m(vbool16_t vm, uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u16m2x3_m(vbool8_t vm, uint16_t *rs1, vuint8m1_t vs2, vuint16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u32mf2x3_m(vbool64_t vm, uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u32m1x3_m(vbool32_t vm, uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u32m2x3_m(vbool16_t vm, uint32_t *rs1, vuint8mf2_t vs2, vuint32m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u64m1x3_m(vbool64_t vm, uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg3ei8_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg3ei8_v_u64m2x3_m(vbool32_t vm, uint64_t *rs1, vuint8mf4_t vs2, vuint64m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei8(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg3ei8\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsoxseg4ei16.c b/auto-generated/gnu-overloaded-tests/vsoxseg4ei16.c index 55588989e..0b8d17986 100644 --- a/auto-generated/gnu-overloaded-tests/vsoxseg4ei16.c +++ b/auto-generated/gnu-overloaded-tests/vsoxseg4ei16.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg4ei16_v_f16mf4x4(float16_t *base, vuint16mf4_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_f16mf4x4(float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_f16mf2x4(float16_t *base, vuint16mf2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_f16mf2x4(float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_f16m1x4(float16_t *base, vuint16m1_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_f16m1x4(float16_t *rs1, vuint16m1_t vs2, vfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_f16m2x4(float16_t *base, vuint16m2_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_f16m2x4(float16_t *rs1, vuint16m2_t vs2, vfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_f32mf2x4(float32_t *base, vuint16mf4_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_f32mf2x4(float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_f32m1x4(float32_t *base, vuint16mf2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_f32m1x4(float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_f32m2x4(float32_t *base, vuint16m1_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_f32m2x4(float32_t *rs1, vuint16m1_t vs2, vfloat32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_f64m1x4(float64_t *base, vuint16mf4_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_f64m1x4(float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_f64m2x4(float64_t *base, vuint16mf2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_f64m2x4(float64_t *rs1, vuint16mf2_t vs2, vfloat64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i8mf8x4(int8_t *base, vuint16mf4_t bindex, vint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i8mf8x4(int8_t *rs1, vuint16mf4_t vs2, vint8mf8x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i8mf4x4(int8_t *base, vuint16mf2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i8mf4x4(int8_t *rs1, vuint16mf2_t vs2, vint8mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i8mf2x4(int8_t *base, vuint16m1_t bindex, vint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i8mf2x4(int8_t *rs1, vuint16m1_t vs2, vint8mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i8m1x4(int8_t *base, vuint16m2_t bindex, vint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i8m1x4(int8_t *rs1, vuint16m2_t vs2, vint8m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i8m2x4(int8_t *base, vuint16m4_t bindex, vint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i8m2x4(int8_t *rs1, vuint16m4_t vs2, vint8m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i16mf4x4(int16_t *base, vuint16mf4_t bindex, vint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i16mf4x4(int16_t *rs1, vuint16mf4_t vs2, vint16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i16mf2x4(int16_t *base, vuint16mf2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i16mf2x4(int16_t *rs1, vuint16mf2_t vs2, vint16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i16m1x4(int16_t *base, vuint16m1_t bindex, vint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i16m1x4(int16_t *rs1, vuint16m1_t vs2, vint16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i16m2x4(int16_t *base, vuint16m2_t bindex, vint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i16m2x4(int16_t *rs1, vuint16m2_t vs2, vint16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i32mf2x4(int32_t *base, vuint16mf4_t bindex, vint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i32mf2x4(int32_t *rs1, vuint16mf4_t vs2, vint32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i32m1x4(int32_t *base, vuint16mf2_t bindex, vint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i32m1x4(int32_t *rs1, vuint16mf2_t vs2, vint32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i32m2x4(int32_t *base, vuint16m1_t bindex, vint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i32m2x4(int32_t *rs1, vuint16m1_t vs2, vint32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i64m1x4(int64_t *base, vuint16mf4_t bindex, vint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i64m1x4(int64_t *rs1, vuint16mf4_t vs2, vint64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i64m2x4(int64_t *base, vuint16mf2_t bindex, vint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i64m2x4(int64_t *rs1, vuint16mf2_t vs2, vint64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u8mf8x4(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u8mf8x4(uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u8mf4x4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u8mf4x4(uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u8mf2x4(uint8_t *base, vuint16m1_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u8mf2x4(uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u8m1x4(uint8_t *base, vuint16m2_t bindex, vuint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u8m1x4(uint8_t *rs1, vuint16m2_t vs2, vuint8m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u8m2x4(uint8_t *base, vuint16m4_t bindex, vuint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u8m2x4(uint8_t *rs1, vuint16m4_t vs2, vuint8m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u16mf4x4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u16mf4x4(uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u16mf2x4(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u16mf2x4(uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u16m1x4(uint16_t *base, vuint16m1_t bindex, vuint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u16m1x4(uint16_t *rs1, vuint16m1_t vs2, vuint16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u16m2x4(uint16_t *base, vuint16m2_t bindex, vuint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u16m2x4(uint16_t *rs1, vuint16m2_t vs2, vuint16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u32mf2x4(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u32mf2x4(uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u32m1x4(uint32_t *base, vuint16mf2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u32m1x4(uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u32m2x4(uint32_t *base, vuint16m1_t bindex, vuint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u32m2x4(uint32_t *rs1, vuint16m1_t vs2, vuint32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u64m1x4(uint64_t *base, vuint16mf4_t bindex, vuint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u64m1x4(uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u64m2x4(uint64_t *base, vuint16mf2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u64m2x4(uint64_t *rs1, vuint16mf2_t vs2, vuint64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_f16mf4x4_m(vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_f16mf4x4_m(vbool64_t vm, float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_f16mf2x4_m(vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_f16mf2x4_m(vbool32_t vm, float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_f16m1x4_m(vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_f16m1x4_m(vbool16_t vm, float16_t *rs1, vuint16m1_t vs2, vfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_f16m2x4_m(vbool8_t mask, float16_t *base, vuint16m2_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_f16m2x4_m(vbool8_t vm, float16_t *rs1, vuint16m2_t vs2, vfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_f32mf2x4_m(vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_f32mf2x4_m(vbool64_t vm, float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_f32m1x4_m(vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_f32m1x4_m(vbool32_t vm, float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_f32m2x4_m(vbool16_t mask, float32_t *base, vuint16m1_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_f32m2x4_m(vbool16_t vm, float32_t *rs1, vuint16m1_t vs2, vfloat32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_f64m1x4_m(vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_f64m1x4_m(vbool64_t vm, float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_f64m2x4_m(vbool32_t mask, float64_t *base, vuint16mf2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_f64m2x4_m(vbool32_t vm, float64_t *rs1, vuint16mf2_t vs2, vfloat64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i8mf8x4_m(vbool64_t vm, int8_t *rs1, vuint16mf4_t vs2, vint8mf8x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i8mf4x4_m(vbool32_t vm, int8_t *rs1, vuint16mf2_t vs2, vint8mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i8mf2x4_m(vbool16_t vm, int8_t *rs1, vuint16m1_t vs2, vint8mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i8m1x4_m(vbool8_t vm, int8_t *rs1, vuint16m2_t vs2, vint8m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i8m2x4_m(vbool4_t vm, int8_t *rs1, vuint16m4_t vs2, vint8m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i16mf4x4_m(vbool64_t vm, int16_t *rs1, vuint16mf4_t vs2, vint16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i16mf2x4_m(vbool32_t vm, int16_t *rs1, vuint16mf2_t vs2, vint16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i16m1x4_m(vbool16_t vm, int16_t *rs1, vuint16m1_t vs2, vint16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i16m2x4_m(vbool8_t vm, int16_t *rs1, vuint16m2_t vs2, vint16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i32mf2x4_m(vbool64_t vm, int32_t *rs1, vuint16mf4_t vs2, vint32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i32m1x4_m(vbool32_t vm, int32_t *rs1, vuint16mf2_t vs2, vint32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i32m2x4_m(vbool16_t vm, int32_t *rs1, vuint16m1_t vs2, vint32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i64m1x4_m(vbool64_t vm, int64_t *rs1, vuint16mf4_t vs2, vint64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_i64m2x4_m(vbool32_t vm, int64_t *rs1, vuint16mf2_t vs2, vint64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u8mf8x4_m(vbool64_t vm, uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u8mf4x4_m(vbool32_t vm, uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u8mf2x4_m(vbool16_t vm, uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u8m1x4_m(vbool8_t vm, uint8_t *rs1, vuint16m2_t vs2, vuint8m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u8m2x4_m(vbool4_t vm, uint8_t *rs1, vuint16m4_t vs2, vuint8m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u16mf4x4_m(vbool64_t vm, uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u16mf2x4_m(vbool32_t vm, uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u16m1x4_m(vbool16_t vm, uint16_t *rs1, vuint16m1_t vs2, vuint16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u16m2x4_m(vbool8_t vm, uint16_t *rs1, vuint16m2_t vs2, vuint16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u32mf2x4_m(vbool64_t vm, uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u32m1x4_m(vbool32_t vm, uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u32m2x4_m(vbool16_t vm, uint32_t *rs1, vuint16m1_t vs2, vuint32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u64m1x4_m(vbool64_t vm, uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei16_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei16_v_u64m2x4_m(vbool32_t vm, uint64_t *rs1, vuint16mf2_t vs2, vuint64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg4ei16\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsoxseg4ei32.c b/auto-generated/gnu-overloaded-tests/vsoxseg4ei32.c index 01dd925c4..2dcec0c3a 100644 --- a/auto-generated/gnu-overloaded-tests/vsoxseg4ei32.c +++ b/auto-generated/gnu-overloaded-tests/vsoxseg4ei32.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg4ei32_v_f16mf4x4(float16_t *base, vuint32mf2_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_f16mf4x4(float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_f16mf2x4(float16_t *base, vuint32m1_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_f16mf2x4(float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_f16m1x4(float16_t *base, vuint32m2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_f16m1x4(float16_t *rs1, vuint32m2_t vs2, vfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_f16m2x4(float16_t *base, vuint32m4_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_f16m2x4(float16_t *rs1, vuint32m4_t vs2, vfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_f32mf2x4(float32_t *base, vuint32mf2_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_f32mf2x4(float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_f32m1x4(float32_t *base, vuint32m1_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_f32m1x4(float32_t *rs1, vuint32m1_t vs2, vfloat32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_f32m2x4(float32_t *base, vuint32m2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_f32m2x4(float32_t *rs1, vuint32m2_t vs2, vfloat32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_f64m1x4(float64_t *base, vuint32mf2_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_f64m1x4(float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_f64m2x4(float64_t *base, vuint32m1_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_f64m2x4(float64_t *rs1, vuint32m1_t vs2, vfloat64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i8mf8x4(int8_t *base, vuint32mf2_t bindex, vint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i8mf8x4(int8_t *rs1, vuint32mf2_t vs2, vint8mf8x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i8mf4x4(int8_t *base, vuint32m1_t bindex, vint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i8mf4x4(int8_t *rs1, vuint32m1_t vs2, vint8mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i8mf2x4(int8_t *base, vuint32m2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i8mf2x4(int8_t *rs1, vuint32m2_t vs2, vint8mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i8m1x4(int8_t *base, vuint32m4_t bindex, vint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i8m1x4(int8_t *rs1, vuint32m4_t vs2, vint8m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i8m2x4(int8_t *base, vuint32m8_t bindex, vint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i8m2x4(int8_t *rs1, vuint32m8_t vs2, vint8m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i16mf4x4(int16_t *base, vuint32mf2_t bindex, vint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i16mf4x4(int16_t *rs1, vuint32mf2_t vs2, vint16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i16mf2x4(int16_t *base, vuint32m1_t bindex, vint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i16mf2x4(int16_t *rs1, vuint32m1_t vs2, vint16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i16m1x4(int16_t *base, vuint32m2_t bindex, vint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i16m1x4(int16_t *rs1, vuint32m2_t vs2, vint16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i16m2x4(int16_t *base, vuint32m4_t bindex, vint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i16m2x4(int16_t *rs1, vuint32m4_t vs2, vint16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i32mf2x4(int32_t *base, vuint32mf2_t bindex, vint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i32mf2x4(int32_t *rs1, vuint32mf2_t vs2, vint32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i32m1x4(int32_t *base, vuint32m1_t bindex, vint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i32m1x4(int32_t *rs1, vuint32m1_t vs2, vint32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i32m2x4(int32_t *base, vuint32m2_t bindex, vint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i32m2x4(int32_t *rs1, vuint32m2_t vs2, vint32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i64m1x4(int64_t *base, vuint32mf2_t bindex, vint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i64m1x4(int64_t *rs1, vuint32mf2_t vs2, vint64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i64m2x4(int64_t *base, vuint32m1_t bindex, vint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i64m2x4(int64_t *rs1, vuint32m1_t vs2, vint64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u8mf8x4(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u8mf8x4(uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u8mf4x4(uint8_t *base, vuint32m1_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u8mf4x4(uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u8mf2x4(uint8_t *base, vuint32m2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u8mf2x4(uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u8m1x4(uint8_t *base, vuint32m4_t bindex, vuint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u8m1x4(uint8_t *rs1, vuint32m4_t vs2, vuint8m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u8m2x4(uint8_t *base, vuint32m8_t bindex, vuint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u8m2x4(uint8_t *rs1, vuint32m8_t vs2, vuint8m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u16mf4x4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u16mf4x4(uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u16mf2x4(uint16_t *base, vuint32m1_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u16mf2x4(uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u16m1x4(uint16_t *base, vuint32m2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u16m1x4(uint16_t *rs1, vuint32m2_t vs2, vuint16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u16m2x4(uint16_t *base, vuint32m4_t bindex, vuint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u16m2x4(uint16_t *rs1, vuint32m4_t vs2, vuint16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u32mf2x4(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u32mf2x4(uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u32m1x4(uint32_t *base, vuint32m1_t bindex, vuint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u32m1x4(uint32_t *rs1, vuint32m1_t vs2, vuint32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u32m2x4(uint32_t *base, vuint32m2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u32m2x4(uint32_t *rs1, vuint32m2_t vs2, vuint32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u64m1x4(uint64_t *base, vuint32mf2_t bindex, vuint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u64m1x4(uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u64m2x4(uint64_t *base, vuint32m1_t bindex, vuint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u64m2x4(uint64_t *rs1, vuint32m1_t vs2, vuint64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_f16mf4x4_m(vbool64_t mask, float16_t *base, vuint32mf2_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_f16mf4x4_m(vbool64_t vm, float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_f16mf2x4_m(vbool32_t mask, float16_t *base, vuint32m1_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_f16mf2x4_m(vbool32_t vm, float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_f16m1x4_m(vbool16_t mask, float16_t *base, vuint32m2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_f16m1x4_m(vbool16_t vm, float16_t *rs1, vuint32m2_t vs2, vfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_f16m2x4_m(vbool8_t mask, float16_t *base, vuint32m4_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_f16m2x4_m(vbool8_t vm, float16_t *rs1, vuint32m4_t vs2, vfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_f32mf2x4_m(vbool64_t mask, float32_t *base, vuint32mf2_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_f32mf2x4_m(vbool64_t vm, float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_f32m1x4_m(vbool32_t mask, float32_t *base, vuint32m1_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_f32m1x4_m(vbool32_t vm, float32_t *rs1, vuint32m1_t vs2, vfloat32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_f32m2x4_m(vbool16_t mask, float32_t *base, vuint32m2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_f32m2x4_m(vbool16_t vm, float32_t *rs1, vuint32m2_t vs2, vfloat32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_f64m1x4_m(vbool64_t mask, float64_t *base, vuint32mf2_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_f64m1x4_m(vbool64_t vm, float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_f64m2x4_m(vbool32_t mask, float64_t *base, vuint32m1_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_f64m2x4_m(vbool32_t vm, float64_t *rs1, vuint32m1_t vs2, vfloat64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i8mf8x4_m(vbool64_t vm, int8_t *rs1, vuint32mf2_t vs2, vint8mf8x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i8mf4x4_m(vbool32_t vm, int8_t *rs1, vuint32m1_t vs2, vint8mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i8mf2x4_m(vbool16_t vm, int8_t *rs1, vuint32m2_t vs2, vint8mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i8m1x4_m(vbool8_t vm, int8_t *rs1, vuint32m4_t vs2, vint8m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i8m2x4_m(vbool4_t vm, int8_t *rs1, vuint32m8_t vs2, vint8m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i16mf4x4_m(vbool64_t vm, int16_t *rs1, vuint32mf2_t vs2, vint16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i16mf2x4_m(vbool32_t vm, int16_t *rs1, vuint32m1_t vs2, vint16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i16m1x4_m(vbool16_t vm, int16_t *rs1, vuint32m2_t vs2, vint16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i16m2x4_m(vbool8_t vm, int16_t *rs1, vuint32m4_t vs2, vint16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i32mf2x4_m(vbool64_t vm, int32_t *rs1, vuint32mf2_t vs2, vint32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i32m1x4_m(vbool32_t vm, int32_t *rs1, vuint32m1_t vs2, vint32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i32m2x4_m(vbool16_t vm, int32_t *rs1, vuint32m2_t vs2, vint32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i64m1x4_m(vbool64_t vm, int64_t *rs1, vuint32mf2_t vs2, vint64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_i64m2x4_m(vbool32_t vm, int64_t *rs1, vuint32m1_t vs2, vint64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u8mf8x4_m(vbool64_t vm, uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u8mf4x4_m(vbool32_t vm, uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u8mf2x4_m(vbool16_t vm, uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u8m1x4_m(vbool8_t vm, uint8_t *rs1, vuint32m4_t vs2, vuint8m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u8m2x4_m(vbool4_t vm, uint8_t *rs1, vuint32m8_t vs2, vuint8m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u16mf4x4_m(vbool64_t vm, uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u16mf2x4_m(vbool32_t vm, uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u16m1x4_m(vbool16_t vm, uint16_t *rs1, vuint32m2_t vs2, vuint16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u16m2x4_m(vbool8_t vm, uint16_t *rs1, vuint32m4_t vs2, vuint16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u32mf2x4_m(vbool64_t vm, uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u32m1x4_m(vbool32_t vm, uint32_t *rs1, vuint32m1_t vs2, vuint32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u32m2x4_m(vbool16_t vm, uint32_t *rs1, vuint32m2_t vs2, vuint32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u64m1x4_m(vbool64_t vm, uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei32_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei32_v_u64m2x4_m(vbool32_t vm, uint64_t *rs1, vuint32m1_t vs2, vuint64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei32(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg4ei32\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsoxseg4ei64.c b/auto-generated/gnu-overloaded-tests/vsoxseg4ei64.c index 044533de4..317db4355 100644 --- a/auto-generated/gnu-overloaded-tests/vsoxseg4ei64.c +++ b/auto-generated/gnu-overloaded-tests/vsoxseg4ei64.c @@ -6,284 +6,284 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg4ei64_v_f16mf4x4(float16_t *base, vuint64m1_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_f16mf4x4(float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_f16mf2x4(float16_t *base, vuint64m2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_f16mf2x4(float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_f16m1x4(float16_t *base, vuint64m4_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_f16m1x4(float16_t *rs1, vuint64m4_t vs2, vfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_f16m2x4(float16_t *base, vuint64m8_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_f16m2x4(float16_t *rs1, vuint64m8_t vs2, vfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_f32mf2x4(float32_t *base, vuint64m1_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_f32mf2x4(float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_f32m1x4(float32_t *base, vuint64m2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_f32m1x4(float32_t *rs1, vuint64m2_t vs2, vfloat32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_f32m2x4(float32_t *base, vuint64m4_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_f32m2x4(float32_t *rs1, vuint64m4_t vs2, vfloat32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_f64m1x4(float64_t *base, vuint64m1_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_f64m1x4(float64_t *rs1, vuint64m1_t vs2, vfloat64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_f64m2x4(float64_t *base, vuint64m2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_f64m2x4(float64_t *rs1, vuint64m2_t vs2, vfloat64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i8mf8x4(int8_t *base, vuint64m1_t bindex, vint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i8mf8x4(int8_t *rs1, vuint64m1_t vs2, vint8mf8x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i8mf4x4(int8_t *base, vuint64m2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i8mf4x4(int8_t *rs1, vuint64m2_t vs2, vint8mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i8mf2x4(int8_t *base, vuint64m4_t bindex, vint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i8mf2x4(int8_t *rs1, vuint64m4_t vs2, vint8mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i8m1x4(int8_t *base, vuint64m8_t bindex, vint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i8m1x4(int8_t *rs1, vuint64m8_t vs2, vint8m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i16mf4x4(int16_t *base, vuint64m1_t bindex, vint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i16mf4x4(int16_t *rs1, vuint64m1_t vs2, vint16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i16mf2x4(int16_t *base, vuint64m2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i16mf2x4(int16_t *rs1, vuint64m2_t vs2, vint16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i16m1x4(int16_t *base, vuint64m4_t bindex, vint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i16m1x4(int16_t *rs1, vuint64m4_t vs2, vint16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i16m2x4(int16_t *base, vuint64m8_t bindex, vint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i16m2x4(int16_t *rs1, vuint64m8_t vs2, vint16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i32mf2x4(int32_t *base, vuint64m1_t bindex, vint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i32mf2x4(int32_t *rs1, vuint64m1_t vs2, vint32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i32m1x4(int32_t *base, vuint64m2_t bindex, vint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i32m1x4(int32_t *rs1, vuint64m2_t vs2, vint32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i32m2x4(int32_t *base, vuint64m4_t bindex, vint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i32m2x4(int32_t *rs1, vuint64m4_t vs2, vint32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i64m1x4(int64_t *base, vuint64m1_t bindex, vint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i64m1x4(int64_t *rs1, vuint64m1_t vs2, vint64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i64m2x4(int64_t *base, vuint64m2_t bindex, vint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i64m2x4(int64_t *rs1, vuint64m2_t vs2, vint64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u8mf8x4(uint8_t *base, vuint64m1_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u8mf8x4(uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u8mf4x4(uint8_t *base, vuint64m2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u8mf4x4(uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u8mf2x4(uint8_t *base, vuint64m4_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u8mf2x4(uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u8m1x4(uint8_t *base, vuint64m8_t bindex, vuint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u8m1x4(uint8_t *rs1, vuint64m8_t vs2, vuint8m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u16mf4x4(uint16_t *base, vuint64m1_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u16mf4x4(uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u16mf2x4(uint16_t *base, vuint64m2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u16mf2x4(uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u16m1x4(uint16_t *base, vuint64m4_t bindex, vuint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u16m1x4(uint16_t *rs1, vuint64m4_t vs2, vuint16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u16m2x4(uint16_t *base, vuint64m8_t bindex, vuint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u16m2x4(uint16_t *rs1, vuint64m8_t vs2, vuint16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u32mf2x4(uint32_t *base, vuint64m1_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u32mf2x4(uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u32m1x4(uint32_t *base, vuint64m2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u32m1x4(uint32_t *rs1, vuint64m2_t vs2, vuint32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u32m2x4(uint32_t *base, vuint64m4_t bindex, vuint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u32m2x4(uint32_t *rs1, vuint64m4_t vs2, vuint32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u64m1x4(uint64_t *base, vuint64m1_t bindex, vuint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u64m1x4(uint64_t *rs1, vuint64m1_t vs2, vuint64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u64m2x4(uint64_t *base, vuint64m2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u64m2x4(uint64_t *rs1, vuint64m2_t vs2, vuint64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_f16mf4x4_m(vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_f16mf4x4_m(vbool64_t vm, float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_f16mf2x4_m(vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_f16mf2x4_m(vbool32_t vm, float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_f16m1x4_m(vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_f16m1x4_m(vbool16_t vm, float16_t *rs1, vuint64m4_t vs2, vfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_f16m2x4_m(vbool8_t mask, float16_t *base, vuint64m8_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_f16m2x4_m(vbool8_t vm, float16_t *rs1, vuint64m8_t vs2, vfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_f32mf2x4_m(vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_f32mf2x4_m(vbool64_t vm, float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_f32m1x4_m(vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_f32m1x4_m(vbool32_t vm, float32_t *rs1, vuint64m2_t vs2, vfloat32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_f32m2x4_m(vbool16_t mask, float32_t *base, vuint64m4_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_f32m2x4_m(vbool16_t vm, float32_t *rs1, vuint64m4_t vs2, vfloat32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_f64m1x4_m(vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_f64m1x4_m(vbool64_t vm, float64_t *rs1, vuint64m1_t vs2, vfloat64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_f64m2x4_m(vbool32_t mask, float64_t *base, vuint64m2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_f64m2x4_m(vbool32_t vm, float64_t *rs1, vuint64m2_t vs2, vfloat64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i8mf8x4_m(vbool64_t vm, int8_t *rs1, vuint64m1_t vs2, vint8mf8x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i8mf4x4_m(vbool32_t vm, int8_t *rs1, vuint64m2_t vs2, vint8mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i8mf2x4_m(vbool16_t vm, int8_t *rs1, vuint64m4_t vs2, vint8mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i8m1x4_m(vbool8_t vm, int8_t *rs1, vuint64m8_t vs2, vint8m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i16mf4x4_m(vbool64_t vm, int16_t *rs1, vuint64m1_t vs2, vint16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i16mf2x4_m(vbool32_t vm, int16_t *rs1, vuint64m2_t vs2, vint16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i16m1x4_m(vbool16_t vm, int16_t *rs1, vuint64m4_t vs2, vint16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i16m2x4_m(vbool8_t vm, int16_t *rs1, vuint64m8_t vs2, vint16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i32mf2x4_m(vbool64_t vm, int32_t *rs1, vuint64m1_t vs2, vint32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i32m1x4_m(vbool32_t vm, int32_t *rs1, vuint64m2_t vs2, vint32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i32m2x4_m(vbool16_t vm, int32_t *rs1, vuint64m4_t vs2, vint32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i64m1x4_m(vbool64_t vm, int64_t *rs1, vuint64m1_t vs2, vint64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_i64m2x4_m(vbool32_t vm, int64_t *rs1, vuint64m2_t vs2, vint64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u8mf8x4_m(vbool64_t vm, uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u8mf4x4_m(vbool32_t vm, uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u8mf2x4_m(vbool16_t vm, uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u8m1x4_m(vbool8_t vm, uint8_t *rs1, vuint64m8_t vs2, vuint8m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u16mf4x4_m(vbool64_t vm, uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u16mf2x4_m(vbool32_t vm, uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u16m1x4_m(vbool16_t vm, uint16_t *rs1, vuint64m4_t vs2, vuint16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u16m2x4_m(vbool8_t vm, uint16_t *rs1, vuint64m8_t vs2, vuint16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u32mf2x4_m(vbool64_t vm, uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u32m1x4_m(vbool32_t vm, uint32_t *rs1, vuint64m2_t vs2, vuint32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u32m2x4_m(vbool16_t vm, uint32_t *rs1, vuint64m4_t vs2, vuint32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u64m1x4_m(vbool64_t vm, uint64_t *rs1, vuint64m1_t vs2, vuint64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei64_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei64_v_u64m2x4_m(vbool32_t vm, uint64_t *rs1, vuint64m2_t vs2, vuint64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei64(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg4ei64\.[ivxfswum.]+\s+} 70 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsoxseg4ei8.c b/auto-generated/gnu-overloaded-tests/vsoxseg4ei8.c index 2882657b8..4b887794d 100644 --- a/auto-generated/gnu-overloaded-tests/vsoxseg4ei8.c +++ b/auto-generated/gnu-overloaded-tests/vsoxseg4ei8.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg4ei8_v_f16mf4x4(float16_t *base, vuint8mf8_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_f16mf4x4(float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_f16mf2x4(float16_t *base, vuint8mf4_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_f16mf2x4(float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_f16m1x4(float16_t *base, vuint8mf2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_f16m1x4(float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_f16m2x4(float16_t *base, vuint8m1_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_f16m2x4(float16_t *rs1, vuint8m1_t vs2, vfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_f32mf2x4(float32_t *base, vuint8mf8_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_f32mf2x4(float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_f32m1x4(float32_t *base, vuint8mf4_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_f32m1x4(float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_f32m2x4(float32_t *base, vuint8mf2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_f32m2x4(float32_t *rs1, vuint8mf2_t vs2, vfloat32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_f64m1x4(float64_t *base, vuint8mf8_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_f64m1x4(float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_f64m2x4(float64_t *base, vuint8mf4_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_f64m2x4(float64_t *rs1, vuint8mf4_t vs2, vfloat64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i8mf8x4(int8_t *base, vuint8mf8_t bindex, vint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i8mf8x4(int8_t *rs1, vuint8mf8_t vs2, vint8mf8x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i8mf4x4(int8_t *base, vuint8mf4_t bindex, vint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i8mf4x4(int8_t *rs1, vuint8mf4_t vs2, vint8mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i8mf2x4(int8_t *base, vuint8mf2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i8mf2x4(int8_t *rs1, vuint8mf2_t vs2, vint8mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i8m1x4(int8_t *base, vuint8m1_t bindex, vint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i8m1x4(int8_t *rs1, vuint8m1_t vs2, vint8m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i8m2x4(int8_t *base, vuint8m2_t bindex, vint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i8m2x4(int8_t *rs1, vuint8m2_t vs2, vint8m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i16mf4x4(int16_t *base, vuint8mf8_t bindex, vint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i16mf4x4(int16_t *rs1, vuint8mf8_t vs2, vint16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i16mf2x4(int16_t *base, vuint8mf4_t bindex, vint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i16mf2x4(int16_t *rs1, vuint8mf4_t vs2, vint16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i16m1x4(int16_t *base, vuint8mf2_t bindex, vint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i16m1x4(int16_t *rs1, vuint8mf2_t vs2, vint16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i16m2x4(int16_t *base, vuint8m1_t bindex, vint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i16m2x4(int16_t *rs1, vuint8m1_t vs2, vint16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i32mf2x4(int32_t *base, vuint8mf8_t bindex, vint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i32mf2x4(int32_t *rs1, vuint8mf8_t vs2, vint32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i32m1x4(int32_t *base, vuint8mf4_t bindex, vint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i32m1x4(int32_t *rs1, vuint8mf4_t vs2, vint32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i32m2x4(int32_t *base, vuint8mf2_t bindex, vint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i32m2x4(int32_t *rs1, vuint8mf2_t vs2, vint32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i64m1x4(int64_t *base, vuint8mf8_t bindex, vint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i64m1x4(int64_t *rs1, vuint8mf8_t vs2, vint64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i64m2x4(int64_t *base, vuint8mf4_t bindex, vint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i64m2x4(int64_t *rs1, vuint8mf4_t vs2, vint64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u8mf8x4(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u8mf8x4(uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u8mf4x4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u8mf4x4(uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u8mf2x4(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u8mf2x4(uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u8m1x4(uint8_t *base, vuint8m1_t bindex, vuint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u8m1x4(uint8_t *rs1, vuint8m1_t vs2, vuint8m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u8m2x4(uint8_t *base, vuint8m2_t bindex, vuint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u8m2x4(uint8_t *rs1, vuint8m2_t vs2, vuint8m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u16mf4x4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u16mf4x4(uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u16mf2x4(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u16mf2x4(uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u16m1x4(uint16_t *base, vuint8mf2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u16m1x4(uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u16m2x4(uint16_t *base, vuint8m1_t bindex, vuint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u16m2x4(uint16_t *rs1, vuint8m1_t vs2, vuint16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u32mf2x4(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u32mf2x4(uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u32m1x4(uint32_t *base, vuint8mf4_t bindex, vuint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u32m1x4(uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u32m2x4(uint32_t *base, vuint8mf2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u32m2x4(uint32_t *rs1, vuint8mf2_t vs2, vuint32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u64m1x4(uint64_t *base, vuint8mf8_t bindex, vuint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u64m1x4(uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u64m2x4(uint64_t *base, vuint8mf4_t bindex, vuint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u64m2x4(uint64_t *rs1, vuint8mf4_t vs2, vuint64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_f16mf4x4_m(vbool64_t mask, float16_t *base, vuint8mf8_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_f16mf4x4_m(vbool64_t vm, float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_f16mf2x4_m(vbool32_t mask, float16_t *base, vuint8mf4_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_f16mf2x4_m(vbool32_t vm, float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_f16m1x4_m(vbool16_t mask, float16_t *base, vuint8mf2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_f16m1x4_m(vbool16_t vm, float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_f16m2x4_m(vbool8_t mask, float16_t *base, vuint8m1_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_f16m2x4_m(vbool8_t vm, float16_t *rs1, vuint8m1_t vs2, vfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_f32mf2x4_m(vbool64_t mask, float32_t *base, vuint8mf8_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_f32mf2x4_m(vbool64_t vm, float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_f32m1x4_m(vbool32_t mask, float32_t *base, vuint8mf4_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_f32m1x4_m(vbool32_t vm, float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_f32m2x4_m(vbool16_t mask, float32_t *base, vuint8mf2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_f32m2x4_m(vbool16_t vm, float32_t *rs1, vuint8mf2_t vs2, vfloat32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_f64m1x4_m(vbool64_t mask, float64_t *base, vuint8mf8_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_f64m1x4_m(vbool64_t vm, float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_f64m2x4_m(vbool32_t mask, float64_t *base, vuint8mf4_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_f64m2x4_m(vbool32_t vm, float64_t *rs1, vuint8mf4_t vs2, vfloat64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i8mf8x4_m(vbool64_t vm, int8_t *rs1, vuint8mf8_t vs2, vint8mf8x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i8mf4x4_m(vbool32_t vm, int8_t *rs1, vuint8mf4_t vs2, vint8mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i8mf2x4_m(vbool16_t vm, int8_t *rs1, vuint8mf2_t vs2, vint8mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i8m1x4_m(vbool8_t vm, int8_t *rs1, vuint8m1_t vs2, vint8m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i8m2x4_m(vbool4_t vm, int8_t *rs1, vuint8m2_t vs2, vint8m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i16mf4x4_m(vbool64_t vm, int16_t *rs1, vuint8mf8_t vs2, vint16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i16mf2x4_m(vbool32_t vm, int16_t *rs1, vuint8mf4_t vs2, vint16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i16m1x4_m(vbool16_t vm, int16_t *rs1, vuint8mf2_t vs2, vint16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i16m2x4_m(vbool8_t vm, int16_t *rs1, vuint8m1_t vs2, vint16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i32mf2x4_m(vbool64_t vm, int32_t *rs1, vuint8mf8_t vs2, vint32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i32m1x4_m(vbool32_t vm, int32_t *rs1, vuint8mf4_t vs2, vint32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i32m2x4_m(vbool16_t vm, int32_t *rs1, vuint8mf2_t vs2, vint32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i64m1x4_m(vbool64_t vm, int64_t *rs1, vuint8mf8_t vs2, vint64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_i64m2x4_m(vbool32_t vm, int64_t *rs1, vuint8mf4_t vs2, vint64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u8mf8x4_m(vbool64_t vm, uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u8mf4x4_m(vbool32_t vm, uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u8mf2x4_m(vbool16_t vm, uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u8m1x4_m(vbool8_t vm, uint8_t *rs1, vuint8m1_t vs2, vuint8m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u8m2x4_m(vbool4_t vm, uint8_t *rs1, vuint8m2_t vs2, vuint8m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u16mf4x4_m(vbool64_t vm, uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u16mf2x4_m(vbool32_t vm, uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u16m1x4_m(vbool16_t vm, uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u16m2x4_m(vbool8_t vm, uint16_t *rs1, vuint8m1_t vs2, vuint16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u32mf2x4_m(vbool64_t vm, uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u32m1x4_m(vbool32_t vm, uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u32m2x4_m(vbool16_t vm, uint32_t *rs1, vuint8mf2_t vs2, vuint32m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u64m1x4_m(vbool64_t vm, uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg4ei8_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg4ei8_v_u64m2x4_m(vbool32_t vm, uint64_t *rs1, vuint8mf4_t vs2, vuint64m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei8(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg4ei8\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsoxseg5ei16.c b/auto-generated/gnu-overloaded-tests/vsoxseg5ei16.c index a9661a548..65a4084b5 100644 --- a/auto-generated/gnu-overloaded-tests/vsoxseg5ei16.c +++ b/auto-generated/gnu-overloaded-tests/vsoxseg5ei16.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg5ei16_v_f16mf4x5(float16_t *base, vuint16mf4_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_f16mf4x5(float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_f16mf2x5(float16_t *base, vuint16mf2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_f16mf2x5(float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_f16m1x5(float16_t *base, vuint16m1_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_f16m1x5(float16_t *rs1, vuint16m1_t vs2, vfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_f32mf2x5(float32_t *base, vuint16mf4_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_f32mf2x5(float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_f32m1x5(float32_t *base, vuint16mf2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_f32m1x5(float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_f64m1x5(float64_t *base, vuint16mf4_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_f64m1x5(float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_i8mf8x5(int8_t *base, vuint16mf4_t bindex, vint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_i8mf8x5(int8_t *rs1, vuint16mf4_t vs2, vint8mf8x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_i8mf4x5(int8_t *base, vuint16mf2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_i8mf4x5(int8_t *rs1, vuint16mf2_t vs2, vint8mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_i8mf2x5(int8_t *base, vuint16m1_t bindex, vint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_i8mf2x5(int8_t *rs1, vuint16m1_t vs2, vint8mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_i8m1x5(int8_t *base, vuint16m2_t bindex, vint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_i8m1x5(int8_t *rs1, vuint16m2_t vs2, vint8m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_i16mf4x5(int16_t *base, vuint16mf4_t bindex, vint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_i16mf4x5(int16_t *rs1, vuint16mf4_t vs2, vint16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_i16mf2x5(int16_t *base, vuint16mf2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_i16mf2x5(int16_t *rs1, vuint16mf2_t vs2, vint16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_i16m1x5(int16_t *base, vuint16m1_t bindex, vint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_i16m1x5(int16_t *rs1, vuint16m1_t vs2, vint16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_i32mf2x5(int32_t *base, vuint16mf4_t bindex, vint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_i32mf2x5(int32_t *rs1, vuint16mf4_t vs2, vint32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_i32m1x5(int32_t *base, vuint16mf2_t bindex, vint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_i32m1x5(int32_t *rs1, vuint16mf2_t vs2, vint32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_i64m1x5(int64_t *base, vuint16mf4_t bindex, vint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_i64m1x5(int64_t *rs1, vuint16mf4_t vs2, vint64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_u8mf8x5(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_u8mf8x5(uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_u8mf4x5(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_u8mf4x5(uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_u8mf2x5(uint8_t *base, vuint16m1_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_u8mf2x5(uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_u8m1x5(uint8_t *base, vuint16m2_t bindex, vuint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_u8m1x5(uint8_t *rs1, vuint16m2_t vs2, vuint8m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_u16mf4x5(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_u16mf4x5(uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_u16mf2x5(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_u16mf2x5(uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_u16m1x5(uint16_t *base, vuint16m1_t bindex, vuint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_u16m1x5(uint16_t *rs1, vuint16m1_t vs2, vuint16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_u32mf2x5(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_u32mf2x5(uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_u32m1x5(uint32_t *base, vuint16mf2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_u32m1x5(uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_u64m1x5(uint64_t *base, vuint16mf4_t bindex, vuint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_u64m1x5(uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_f16mf4x5_m(vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_f16mf4x5_m(vbool64_t vm, float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_f16mf2x5_m(vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_f16mf2x5_m(vbool32_t vm, float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_f16m1x5_m(vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_f16m1x5_m(vbool16_t vm, float16_t *rs1, vuint16m1_t vs2, vfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_f32mf2x5_m(vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_f32mf2x5_m(vbool64_t vm, float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_f32m1x5_m(vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_f32m1x5_m(vbool32_t vm, float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_f64m1x5_m(vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_f64m1x5_m(vbool64_t vm, float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_i8mf8x5_m(vbool64_t vm, int8_t *rs1, vuint16mf4_t vs2, vint8mf8x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_i8mf4x5_m(vbool32_t vm, int8_t *rs1, vuint16mf2_t vs2, vint8mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_i8mf2x5_m(vbool16_t vm, int8_t *rs1, vuint16m1_t vs2, vint8mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_i8m1x5_m(vbool8_t vm, int8_t *rs1, vuint16m2_t vs2, vint8m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_i16mf4x5_m(vbool64_t vm, int16_t *rs1, vuint16mf4_t vs2, vint16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_i16mf2x5_m(vbool32_t vm, int16_t *rs1, vuint16mf2_t vs2, vint16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_i16m1x5_m(vbool16_t vm, int16_t *rs1, vuint16m1_t vs2, vint16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_i32mf2x5_m(vbool64_t vm, int32_t *rs1, vuint16mf4_t vs2, vint32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_i32m1x5_m(vbool32_t vm, int32_t *rs1, vuint16mf2_t vs2, vint32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_i64m1x5_m(vbool64_t vm, int64_t *rs1, vuint16mf4_t vs2, vint64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_u8mf8x5_m(vbool64_t vm, uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_u8mf4x5_m(vbool32_t vm, uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_u8mf2x5_m(vbool16_t vm, uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_u8m1x5_m(vbool8_t vm, uint8_t *rs1, vuint16m2_t vs2, vuint8m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_u16mf4x5_m(vbool64_t vm, uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_u16mf2x5_m(vbool32_t vm, uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_u16m1x5_m(vbool16_t vm, uint16_t *rs1, vuint16m1_t vs2, vuint16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_u32mf2x5_m(vbool64_t vm, uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_u32m1x5_m(vbool32_t vm, uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei16_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei16_v_u64m1x5_m(vbool64_t vm, uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg5ei16\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsoxseg5ei32.c b/auto-generated/gnu-overloaded-tests/vsoxseg5ei32.c index d2ea46a6a..fbf4ebb86 100644 --- a/auto-generated/gnu-overloaded-tests/vsoxseg5ei32.c +++ b/auto-generated/gnu-overloaded-tests/vsoxseg5ei32.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg5ei32_v_f16mf4x5(float16_t *base, vuint32mf2_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_f16mf4x5(float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_f16mf2x5(float16_t *base, vuint32m1_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_f16mf2x5(float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_f16m1x5(float16_t *base, vuint32m2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_f16m1x5(float16_t *rs1, vuint32m2_t vs2, vfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_f32mf2x5(float32_t *base, vuint32mf2_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_f32mf2x5(float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_f32m1x5(float32_t *base, vuint32m1_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_f32m1x5(float32_t *rs1, vuint32m1_t vs2, vfloat32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_f64m1x5(float64_t *base, vuint32mf2_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_f64m1x5(float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_i8mf8x5(int8_t *base, vuint32mf2_t bindex, vint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_i8mf8x5(int8_t *rs1, vuint32mf2_t vs2, vint8mf8x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_i8mf4x5(int8_t *base, vuint32m1_t bindex, vint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_i8mf4x5(int8_t *rs1, vuint32m1_t vs2, vint8mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_i8mf2x5(int8_t *base, vuint32m2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_i8mf2x5(int8_t *rs1, vuint32m2_t vs2, vint8mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_i8m1x5(int8_t *base, vuint32m4_t bindex, vint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_i8m1x5(int8_t *rs1, vuint32m4_t vs2, vint8m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_i16mf4x5(int16_t *base, vuint32mf2_t bindex, vint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_i16mf4x5(int16_t *rs1, vuint32mf2_t vs2, vint16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_i16mf2x5(int16_t *base, vuint32m1_t bindex, vint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_i16mf2x5(int16_t *rs1, vuint32m1_t vs2, vint16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_i16m1x5(int16_t *base, vuint32m2_t bindex, vint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_i16m1x5(int16_t *rs1, vuint32m2_t vs2, vint16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_i32mf2x5(int32_t *base, vuint32mf2_t bindex, vint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_i32mf2x5(int32_t *rs1, vuint32mf2_t vs2, vint32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_i32m1x5(int32_t *base, vuint32m1_t bindex, vint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_i32m1x5(int32_t *rs1, vuint32m1_t vs2, vint32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_i64m1x5(int64_t *base, vuint32mf2_t bindex, vint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_i64m1x5(int64_t *rs1, vuint32mf2_t vs2, vint64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_u8mf8x5(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_u8mf8x5(uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_u8mf4x5(uint8_t *base, vuint32m1_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_u8mf4x5(uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_u8mf2x5(uint8_t *base, vuint32m2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_u8mf2x5(uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_u8m1x5(uint8_t *base, vuint32m4_t bindex, vuint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_u8m1x5(uint8_t *rs1, vuint32m4_t vs2, vuint8m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_u16mf4x5(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_u16mf4x5(uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_u16mf2x5(uint16_t *base, vuint32m1_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_u16mf2x5(uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_u16m1x5(uint16_t *base, vuint32m2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_u16m1x5(uint16_t *rs1, vuint32m2_t vs2, vuint16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_u32mf2x5(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_u32mf2x5(uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_u32m1x5(uint32_t *base, vuint32m1_t bindex, vuint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_u32m1x5(uint32_t *rs1, vuint32m1_t vs2, vuint32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_u64m1x5(uint64_t *base, vuint32mf2_t bindex, vuint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_u64m1x5(uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_f16mf4x5_m(vbool64_t mask, float16_t *base, vuint32mf2_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_f16mf4x5_m(vbool64_t vm, float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_f16mf2x5_m(vbool32_t mask, float16_t *base, vuint32m1_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_f16mf2x5_m(vbool32_t vm, float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_f16m1x5_m(vbool16_t mask, float16_t *base, vuint32m2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_f16m1x5_m(vbool16_t vm, float16_t *rs1, vuint32m2_t vs2, vfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_f32mf2x5_m(vbool64_t mask, float32_t *base, vuint32mf2_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_f32mf2x5_m(vbool64_t vm, float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_f32m1x5_m(vbool32_t mask, float32_t *base, vuint32m1_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_f32m1x5_m(vbool32_t vm, float32_t *rs1, vuint32m1_t vs2, vfloat32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_f64m1x5_m(vbool64_t mask, float64_t *base, vuint32mf2_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_f64m1x5_m(vbool64_t vm, float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_i8mf8x5_m(vbool64_t vm, int8_t *rs1, vuint32mf2_t vs2, vint8mf8x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_i8mf4x5_m(vbool32_t vm, int8_t *rs1, vuint32m1_t vs2, vint8mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_i8mf2x5_m(vbool16_t vm, int8_t *rs1, vuint32m2_t vs2, vint8mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_i8m1x5_m(vbool8_t vm, int8_t *rs1, vuint32m4_t vs2, vint8m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_i16mf4x5_m(vbool64_t vm, int16_t *rs1, vuint32mf2_t vs2, vint16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_i16mf2x5_m(vbool32_t vm, int16_t *rs1, vuint32m1_t vs2, vint16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_i16m1x5_m(vbool16_t vm, int16_t *rs1, vuint32m2_t vs2, vint16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_i32mf2x5_m(vbool64_t vm, int32_t *rs1, vuint32mf2_t vs2, vint32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_i32m1x5_m(vbool32_t vm, int32_t *rs1, vuint32m1_t vs2, vint32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_i64m1x5_m(vbool64_t vm, int64_t *rs1, vuint32mf2_t vs2, vint64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_u8mf8x5_m(vbool64_t vm, uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_u8mf4x5_m(vbool32_t vm, uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_u8mf2x5_m(vbool16_t vm, uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_u8m1x5_m(vbool8_t vm, uint8_t *rs1, vuint32m4_t vs2, vuint8m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_u16mf4x5_m(vbool64_t vm, uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_u16mf2x5_m(vbool32_t vm, uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_u16m1x5_m(vbool16_t vm, uint16_t *rs1, vuint32m2_t vs2, vuint16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_u32mf2x5_m(vbool64_t vm, uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_u32m1x5_m(vbool32_t vm, uint32_t *rs1, vuint32m1_t vs2, vuint32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei32_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei32_v_u64m1x5_m(vbool64_t vm, uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei32(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg5ei32\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsoxseg5ei64.c b/auto-generated/gnu-overloaded-tests/vsoxseg5ei64.c index 70cebd57b..49255ed32 100644 --- a/auto-generated/gnu-overloaded-tests/vsoxseg5ei64.c +++ b/auto-generated/gnu-overloaded-tests/vsoxseg5ei64.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg5ei64_v_f16mf4x5(float16_t *base, vuint64m1_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_f16mf4x5(float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_f16mf2x5(float16_t *base, vuint64m2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_f16mf2x5(float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_f16m1x5(float16_t *base, vuint64m4_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_f16m1x5(float16_t *rs1, vuint64m4_t vs2, vfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_f32mf2x5(float32_t *base, vuint64m1_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_f32mf2x5(float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_f32m1x5(float32_t *base, vuint64m2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_f32m1x5(float32_t *rs1, vuint64m2_t vs2, vfloat32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_f64m1x5(float64_t *base, vuint64m1_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_f64m1x5(float64_t *rs1, vuint64m1_t vs2, vfloat64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_i8mf8x5(int8_t *base, vuint64m1_t bindex, vint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_i8mf8x5(int8_t *rs1, vuint64m1_t vs2, vint8mf8x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_i8mf4x5(int8_t *base, vuint64m2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_i8mf4x5(int8_t *rs1, vuint64m2_t vs2, vint8mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_i8mf2x5(int8_t *base, vuint64m4_t bindex, vint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_i8mf2x5(int8_t *rs1, vuint64m4_t vs2, vint8mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_i8m1x5(int8_t *base, vuint64m8_t bindex, vint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_i8m1x5(int8_t *rs1, vuint64m8_t vs2, vint8m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_i16mf4x5(int16_t *base, vuint64m1_t bindex, vint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_i16mf4x5(int16_t *rs1, vuint64m1_t vs2, vint16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_i16mf2x5(int16_t *base, vuint64m2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_i16mf2x5(int16_t *rs1, vuint64m2_t vs2, vint16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_i16m1x5(int16_t *base, vuint64m4_t bindex, vint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_i16m1x5(int16_t *rs1, vuint64m4_t vs2, vint16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_i32mf2x5(int32_t *base, vuint64m1_t bindex, vint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_i32mf2x5(int32_t *rs1, vuint64m1_t vs2, vint32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_i32m1x5(int32_t *base, vuint64m2_t bindex, vint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_i32m1x5(int32_t *rs1, vuint64m2_t vs2, vint32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_i64m1x5(int64_t *base, vuint64m1_t bindex, vint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_i64m1x5(int64_t *rs1, vuint64m1_t vs2, vint64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_u8mf8x5(uint8_t *base, vuint64m1_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_u8mf8x5(uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_u8mf4x5(uint8_t *base, vuint64m2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_u8mf4x5(uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_u8mf2x5(uint8_t *base, vuint64m4_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_u8mf2x5(uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_u8m1x5(uint8_t *base, vuint64m8_t bindex, vuint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_u8m1x5(uint8_t *rs1, vuint64m8_t vs2, vuint8m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_u16mf4x5(uint16_t *base, vuint64m1_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_u16mf4x5(uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_u16mf2x5(uint16_t *base, vuint64m2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_u16mf2x5(uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_u16m1x5(uint16_t *base, vuint64m4_t bindex, vuint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_u16m1x5(uint16_t *rs1, vuint64m4_t vs2, vuint16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_u32mf2x5(uint32_t *base, vuint64m1_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_u32mf2x5(uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_u32m1x5(uint32_t *base, vuint64m2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_u32m1x5(uint32_t *rs1, vuint64m2_t vs2, vuint32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_u64m1x5(uint64_t *base, vuint64m1_t bindex, vuint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_u64m1x5(uint64_t *rs1, vuint64m1_t vs2, vuint64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_f16mf4x5_m(vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_f16mf4x5_m(vbool64_t vm, float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_f16mf2x5_m(vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_f16mf2x5_m(vbool32_t vm, float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_f16m1x5_m(vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_f16m1x5_m(vbool16_t vm, float16_t *rs1, vuint64m4_t vs2, vfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_f32mf2x5_m(vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_f32mf2x5_m(vbool64_t vm, float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_f32m1x5_m(vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_f32m1x5_m(vbool32_t vm, float32_t *rs1, vuint64m2_t vs2, vfloat32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_f64m1x5_m(vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_f64m1x5_m(vbool64_t vm, float64_t *rs1, vuint64m1_t vs2, vfloat64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_i8mf8x5_m(vbool64_t vm, int8_t *rs1, vuint64m1_t vs2, vint8mf8x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_i8mf4x5_m(vbool32_t vm, int8_t *rs1, vuint64m2_t vs2, vint8mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_i8mf2x5_m(vbool16_t vm, int8_t *rs1, vuint64m4_t vs2, vint8mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_i8m1x5_m(vbool8_t vm, int8_t *rs1, vuint64m8_t vs2, vint8m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_i16mf4x5_m(vbool64_t vm, int16_t *rs1, vuint64m1_t vs2, vint16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_i16mf2x5_m(vbool32_t vm, int16_t *rs1, vuint64m2_t vs2, vint16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_i16m1x5_m(vbool16_t vm, int16_t *rs1, vuint64m4_t vs2, vint16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_i32mf2x5_m(vbool64_t vm, int32_t *rs1, vuint64m1_t vs2, vint32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_i32m1x5_m(vbool32_t vm, int32_t *rs1, vuint64m2_t vs2, vint32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_i64m1x5_m(vbool64_t vm, int64_t *rs1, vuint64m1_t vs2, vint64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_u8mf8x5_m(vbool64_t vm, uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_u8mf4x5_m(vbool32_t vm, uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_u8mf2x5_m(vbool16_t vm, uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_u8m1x5_m(vbool8_t vm, uint8_t *rs1, vuint64m8_t vs2, vuint8m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_u16mf4x5_m(vbool64_t vm, uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_u16mf2x5_m(vbool32_t vm, uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_u16m1x5_m(vbool16_t vm, uint16_t *rs1, vuint64m4_t vs2, vuint16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_u32mf2x5_m(vbool64_t vm, uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_u32m1x5_m(vbool32_t vm, uint32_t *rs1, vuint64m2_t vs2, vuint32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei64_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei64_v_u64m1x5_m(vbool64_t vm, uint64_t *rs1, vuint64m1_t vs2, vuint64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei64(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg5ei64\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsoxseg5ei8.c b/auto-generated/gnu-overloaded-tests/vsoxseg5ei8.c index 653b1247a..047cb7983 100644 --- a/auto-generated/gnu-overloaded-tests/vsoxseg5ei8.c +++ b/auto-generated/gnu-overloaded-tests/vsoxseg5ei8.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg5ei8_v_f16mf4x5(float16_t *base, vuint8mf8_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_f16mf4x5(float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_f16mf2x5(float16_t *base, vuint8mf4_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_f16mf2x5(float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_f16m1x5(float16_t *base, vuint8mf2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_f16m1x5(float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_f32mf2x5(float32_t *base, vuint8mf8_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_f32mf2x5(float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_f32m1x5(float32_t *base, vuint8mf4_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_f32m1x5(float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_f64m1x5(float64_t *base, vuint8mf8_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_f64m1x5(float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_i8mf8x5(int8_t *base, vuint8mf8_t bindex, vint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_i8mf8x5(int8_t *rs1, vuint8mf8_t vs2, vint8mf8x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_i8mf4x5(int8_t *base, vuint8mf4_t bindex, vint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_i8mf4x5(int8_t *rs1, vuint8mf4_t vs2, vint8mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_i8mf2x5(int8_t *base, vuint8mf2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_i8mf2x5(int8_t *rs1, vuint8mf2_t vs2, vint8mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_i8m1x5(int8_t *base, vuint8m1_t bindex, vint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_i8m1x5(int8_t *rs1, vuint8m1_t vs2, vint8m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_i16mf4x5(int16_t *base, vuint8mf8_t bindex, vint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_i16mf4x5(int16_t *rs1, vuint8mf8_t vs2, vint16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_i16mf2x5(int16_t *base, vuint8mf4_t bindex, vint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_i16mf2x5(int16_t *rs1, vuint8mf4_t vs2, vint16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_i16m1x5(int16_t *base, vuint8mf2_t bindex, vint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_i16m1x5(int16_t *rs1, vuint8mf2_t vs2, vint16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_i32mf2x5(int32_t *base, vuint8mf8_t bindex, vint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_i32mf2x5(int32_t *rs1, vuint8mf8_t vs2, vint32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_i32m1x5(int32_t *base, vuint8mf4_t bindex, vint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_i32m1x5(int32_t *rs1, vuint8mf4_t vs2, vint32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_i64m1x5(int64_t *base, vuint8mf8_t bindex, vint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_i64m1x5(int64_t *rs1, vuint8mf8_t vs2, vint64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_u8mf8x5(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_u8mf8x5(uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_u8mf4x5(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_u8mf4x5(uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_u8mf2x5(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_u8mf2x5(uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_u8m1x5(uint8_t *base, vuint8m1_t bindex, vuint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_u8m1x5(uint8_t *rs1, vuint8m1_t vs2, vuint8m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_u16mf4x5(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_u16mf4x5(uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_u16mf2x5(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_u16mf2x5(uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_u16m1x5(uint16_t *base, vuint8mf2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_u16m1x5(uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_u32mf2x5(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_u32mf2x5(uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_u32m1x5(uint32_t *base, vuint8mf4_t bindex, vuint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_u32m1x5(uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_u64m1x5(uint64_t *base, vuint8mf8_t bindex, vuint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_u64m1x5(uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_f16mf4x5_m(vbool64_t mask, float16_t *base, vuint8mf8_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_f16mf4x5_m(vbool64_t vm, float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_f16mf2x5_m(vbool32_t mask, float16_t *base, vuint8mf4_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_f16mf2x5_m(vbool32_t vm, float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_f16m1x5_m(vbool16_t mask, float16_t *base, vuint8mf2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_f16m1x5_m(vbool16_t vm, float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_f32mf2x5_m(vbool64_t mask, float32_t *base, vuint8mf8_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_f32mf2x5_m(vbool64_t vm, float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_f32m1x5_m(vbool32_t mask, float32_t *base, vuint8mf4_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_f32m1x5_m(vbool32_t vm, float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_f64m1x5_m(vbool64_t mask, float64_t *base, vuint8mf8_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_f64m1x5_m(vbool64_t vm, float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_i8mf8x5_m(vbool64_t vm, int8_t *rs1, vuint8mf8_t vs2, vint8mf8x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_i8mf4x5_m(vbool32_t vm, int8_t *rs1, vuint8mf4_t vs2, vint8mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_i8mf2x5_m(vbool16_t vm, int8_t *rs1, vuint8mf2_t vs2, vint8mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_i8m1x5_m(vbool8_t vm, int8_t *rs1, vuint8m1_t vs2, vint8m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_i16mf4x5_m(vbool64_t vm, int16_t *rs1, vuint8mf8_t vs2, vint16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_i16mf2x5_m(vbool32_t vm, int16_t *rs1, vuint8mf4_t vs2, vint16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_i16m1x5_m(vbool16_t vm, int16_t *rs1, vuint8mf2_t vs2, vint16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_i32mf2x5_m(vbool64_t vm, int32_t *rs1, vuint8mf8_t vs2, vint32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_i32m1x5_m(vbool32_t vm, int32_t *rs1, vuint8mf4_t vs2, vint32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_i64m1x5_m(vbool64_t vm, int64_t *rs1, vuint8mf8_t vs2, vint64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_u8mf8x5_m(vbool64_t vm, uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_u8mf4x5_m(vbool32_t vm, uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_u8mf2x5_m(vbool16_t vm, uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_u8m1x5_m(vbool8_t vm, uint8_t *rs1, vuint8m1_t vs2, vuint8m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_u16mf4x5_m(vbool64_t vm, uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_u16mf2x5_m(vbool32_t vm, uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_u16m1x5_m(vbool16_t vm, uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_u32mf2x5_m(vbool64_t vm, uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_u32m1x5_m(vbool32_t vm, uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg5ei8_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg5ei8_v_u64m1x5_m(vbool64_t vm, uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei8(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg5ei8\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsoxseg6ei16.c b/auto-generated/gnu-overloaded-tests/vsoxseg6ei16.c index 81b917563..c3569ac01 100644 --- a/auto-generated/gnu-overloaded-tests/vsoxseg6ei16.c +++ b/auto-generated/gnu-overloaded-tests/vsoxseg6ei16.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg6ei16_v_f16mf4x6(float16_t *base, vuint16mf4_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_f16mf4x6(float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_f16mf2x6(float16_t *base, vuint16mf2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_f16mf2x6(float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_f16m1x6(float16_t *base, vuint16m1_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_f16m1x6(float16_t *rs1, vuint16m1_t vs2, vfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_f32mf2x6(float32_t *base, vuint16mf4_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_f32mf2x6(float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_f32m1x6(float32_t *base, vuint16mf2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_f32m1x6(float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_f64m1x6(float64_t *base, vuint16mf4_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_f64m1x6(float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_i8mf8x6(int8_t *base, vuint16mf4_t bindex, vint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_i8mf8x6(int8_t *rs1, vuint16mf4_t vs2, vint8mf8x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_i8mf4x6(int8_t *base, vuint16mf2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_i8mf4x6(int8_t *rs1, vuint16mf2_t vs2, vint8mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_i8mf2x6(int8_t *base, vuint16m1_t bindex, vint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_i8mf2x6(int8_t *rs1, vuint16m1_t vs2, vint8mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_i8m1x6(int8_t *base, vuint16m2_t bindex, vint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_i8m1x6(int8_t *rs1, vuint16m2_t vs2, vint8m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_i16mf4x6(int16_t *base, vuint16mf4_t bindex, vint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_i16mf4x6(int16_t *rs1, vuint16mf4_t vs2, vint16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_i16mf2x6(int16_t *base, vuint16mf2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_i16mf2x6(int16_t *rs1, vuint16mf2_t vs2, vint16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_i16m1x6(int16_t *base, vuint16m1_t bindex, vint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_i16m1x6(int16_t *rs1, vuint16m1_t vs2, vint16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_i32mf2x6(int32_t *base, vuint16mf4_t bindex, vint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_i32mf2x6(int32_t *rs1, vuint16mf4_t vs2, vint32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_i32m1x6(int32_t *base, vuint16mf2_t bindex, vint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_i32m1x6(int32_t *rs1, vuint16mf2_t vs2, vint32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_i64m1x6(int64_t *base, vuint16mf4_t bindex, vint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_i64m1x6(int64_t *rs1, vuint16mf4_t vs2, vint64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_u8mf8x6(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_u8mf8x6(uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_u8mf4x6(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_u8mf4x6(uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_u8mf2x6(uint8_t *base, vuint16m1_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_u8mf2x6(uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_u8m1x6(uint8_t *base, vuint16m2_t bindex, vuint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_u8m1x6(uint8_t *rs1, vuint16m2_t vs2, vuint8m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_u16mf4x6(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_u16mf4x6(uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_u16mf2x6(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_u16mf2x6(uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_u16m1x6(uint16_t *base, vuint16m1_t bindex, vuint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_u16m1x6(uint16_t *rs1, vuint16m1_t vs2, vuint16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_u32mf2x6(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_u32mf2x6(uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_u32m1x6(uint32_t *base, vuint16mf2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_u32m1x6(uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_u64m1x6(uint64_t *base, vuint16mf4_t bindex, vuint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_u64m1x6(uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_f16mf4x6_m(vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_f16mf4x6_m(vbool64_t vm, float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_f16mf2x6_m(vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_f16mf2x6_m(vbool32_t vm, float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_f16m1x6_m(vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_f16m1x6_m(vbool16_t vm, float16_t *rs1, vuint16m1_t vs2, vfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_f32mf2x6_m(vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_f32mf2x6_m(vbool64_t vm, float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_f32m1x6_m(vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_f32m1x6_m(vbool32_t vm, float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_f64m1x6_m(vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_f64m1x6_m(vbool64_t vm, float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_i8mf8x6_m(vbool64_t vm, int8_t *rs1, vuint16mf4_t vs2, vint8mf8x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_i8mf4x6_m(vbool32_t vm, int8_t *rs1, vuint16mf2_t vs2, vint8mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_i8mf2x6_m(vbool16_t vm, int8_t *rs1, vuint16m1_t vs2, vint8mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_i8m1x6_m(vbool8_t vm, int8_t *rs1, vuint16m2_t vs2, vint8m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_i16mf4x6_m(vbool64_t vm, int16_t *rs1, vuint16mf4_t vs2, vint16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_i16mf2x6_m(vbool32_t vm, int16_t *rs1, vuint16mf2_t vs2, vint16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_i16m1x6_m(vbool16_t vm, int16_t *rs1, vuint16m1_t vs2, vint16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_i32mf2x6_m(vbool64_t vm, int32_t *rs1, vuint16mf4_t vs2, vint32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_i32m1x6_m(vbool32_t vm, int32_t *rs1, vuint16mf2_t vs2, vint32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_i64m1x6_m(vbool64_t vm, int64_t *rs1, vuint16mf4_t vs2, vint64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_u8mf8x6_m(vbool64_t vm, uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_u8mf4x6_m(vbool32_t vm, uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_u8mf2x6_m(vbool16_t vm, uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_u8m1x6_m(vbool8_t vm, uint8_t *rs1, vuint16m2_t vs2, vuint8m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_u16mf4x6_m(vbool64_t vm, uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_u16mf2x6_m(vbool32_t vm, uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_u16m1x6_m(vbool16_t vm, uint16_t *rs1, vuint16m1_t vs2, vuint16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_u32mf2x6_m(vbool64_t vm, uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_u32m1x6_m(vbool32_t vm, uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei16_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei16_v_u64m1x6_m(vbool64_t vm, uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg6ei16\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsoxseg6ei32.c b/auto-generated/gnu-overloaded-tests/vsoxseg6ei32.c index 0076f808f..e5fd0519b 100644 --- a/auto-generated/gnu-overloaded-tests/vsoxseg6ei32.c +++ b/auto-generated/gnu-overloaded-tests/vsoxseg6ei32.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg6ei32_v_f16mf4x6(float16_t *base, vuint32mf2_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_f16mf4x6(float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_f16mf2x6(float16_t *base, vuint32m1_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_f16mf2x6(float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_f16m1x6(float16_t *base, vuint32m2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_f16m1x6(float16_t *rs1, vuint32m2_t vs2, vfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_f32mf2x6(float32_t *base, vuint32mf2_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_f32mf2x6(float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_f32m1x6(float32_t *base, vuint32m1_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_f32m1x6(float32_t *rs1, vuint32m1_t vs2, vfloat32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_f64m1x6(float64_t *base, vuint32mf2_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_f64m1x6(float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_i8mf8x6(int8_t *base, vuint32mf2_t bindex, vint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_i8mf8x6(int8_t *rs1, vuint32mf2_t vs2, vint8mf8x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_i8mf4x6(int8_t *base, vuint32m1_t bindex, vint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_i8mf4x6(int8_t *rs1, vuint32m1_t vs2, vint8mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_i8mf2x6(int8_t *base, vuint32m2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_i8mf2x6(int8_t *rs1, vuint32m2_t vs2, vint8mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_i8m1x6(int8_t *base, vuint32m4_t bindex, vint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_i8m1x6(int8_t *rs1, vuint32m4_t vs2, vint8m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_i16mf4x6(int16_t *base, vuint32mf2_t bindex, vint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_i16mf4x6(int16_t *rs1, vuint32mf2_t vs2, vint16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_i16mf2x6(int16_t *base, vuint32m1_t bindex, vint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_i16mf2x6(int16_t *rs1, vuint32m1_t vs2, vint16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_i16m1x6(int16_t *base, vuint32m2_t bindex, vint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_i16m1x6(int16_t *rs1, vuint32m2_t vs2, vint16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_i32mf2x6(int32_t *base, vuint32mf2_t bindex, vint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_i32mf2x6(int32_t *rs1, vuint32mf2_t vs2, vint32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_i32m1x6(int32_t *base, vuint32m1_t bindex, vint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_i32m1x6(int32_t *rs1, vuint32m1_t vs2, vint32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_i64m1x6(int64_t *base, vuint32mf2_t bindex, vint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_i64m1x6(int64_t *rs1, vuint32mf2_t vs2, vint64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_u8mf8x6(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_u8mf8x6(uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_u8mf4x6(uint8_t *base, vuint32m1_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_u8mf4x6(uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_u8mf2x6(uint8_t *base, vuint32m2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_u8mf2x6(uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_u8m1x6(uint8_t *base, vuint32m4_t bindex, vuint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_u8m1x6(uint8_t *rs1, vuint32m4_t vs2, vuint8m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_u16mf4x6(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_u16mf4x6(uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_u16mf2x6(uint16_t *base, vuint32m1_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_u16mf2x6(uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_u16m1x6(uint16_t *base, vuint32m2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_u16m1x6(uint16_t *rs1, vuint32m2_t vs2, vuint16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_u32mf2x6(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_u32mf2x6(uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_u32m1x6(uint32_t *base, vuint32m1_t bindex, vuint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_u32m1x6(uint32_t *rs1, vuint32m1_t vs2, vuint32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_u64m1x6(uint64_t *base, vuint32mf2_t bindex, vuint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_u64m1x6(uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_f16mf4x6_m(vbool64_t mask, float16_t *base, vuint32mf2_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_f16mf4x6_m(vbool64_t vm, float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_f16mf2x6_m(vbool32_t mask, float16_t *base, vuint32m1_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_f16mf2x6_m(vbool32_t vm, float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_f16m1x6_m(vbool16_t mask, float16_t *base, vuint32m2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_f16m1x6_m(vbool16_t vm, float16_t *rs1, vuint32m2_t vs2, vfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_f32mf2x6_m(vbool64_t mask, float32_t *base, vuint32mf2_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_f32mf2x6_m(vbool64_t vm, float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_f32m1x6_m(vbool32_t mask, float32_t *base, vuint32m1_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_f32m1x6_m(vbool32_t vm, float32_t *rs1, vuint32m1_t vs2, vfloat32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_f64m1x6_m(vbool64_t mask, float64_t *base, vuint32mf2_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_f64m1x6_m(vbool64_t vm, float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_i8mf8x6_m(vbool64_t vm, int8_t *rs1, vuint32mf2_t vs2, vint8mf8x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_i8mf4x6_m(vbool32_t vm, int8_t *rs1, vuint32m1_t vs2, vint8mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_i8mf2x6_m(vbool16_t vm, int8_t *rs1, vuint32m2_t vs2, vint8mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_i8m1x6_m(vbool8_t vm, int8_t *rs1, vuint32m4_t vs2, vint8m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_i16mf4x6_m(vbool64_t vm, int16_t *rs1, vuint32mf2_t vs2, vint16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_i16mf2x6_m(vbool32_t vm, int16_t *rs1, vuint32m1_t vs2, vint16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_i16m1x6_m(vbool16_t vm, int16_t *rs1, vuint32m2_t vs2, vint16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_i32mf2x6_m(vbool64_t vm, int32_t *rs1, vuint32mf2_t vs2, vint32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_i32m1x6_m(vbool32_t vm, int32_t *rs1, vuint32m1_t vs2, vint32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_i64m1x6_m(vbool64_t vm, int64_t *rs1, vuint32mf2_t vs2, vint64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_u8mf8x6_m(vbool64_t vm, uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_u8mf4x6_m(vbool32_t vm, uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_u8mf2x6_m(vbool16_t vm, uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_u8m1x6_m(vbool8_t vm, uint8_t *rs1, vuint32m4_t vs2, vuint8m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_u16mf4x6_m(vbool64_t vm, uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_u16mf2x6_m(vbool32_t vm, uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_u16m1x6_m(vbool16_t vm, uint16_t *rs1, vuint32m2_t vs2, vuint16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_u32mf2x6_m(vbool64_t vm, uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_u32m1x6_m(vbool32_t vm, uint32_t *rs1, vuint32m1_t vs2, vuint32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei32_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei32_v_u64m1x6_m(vbool64_t vm, uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei32(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg6ei32\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsoxseg6ei64.c b/auto-generated/gnu-overloaded-tests/vsoxseg6ei64.c index 384d07e58..6983fe605 100644 --- a/auto-generated/gnu-overloaded-tests/vsoxseg6ei64.c +++ b/auto-generated/gnu-overloaded-tests/vsoxseg6ei64.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg6ei64_v_f16mf4x6(float16_t *base, vuint64m1_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_f16mf4x6(float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_f16mf2x6(float16_t *base, vuint64m2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_f16mf2x6(float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_f16m1x6(float16_t *base, vuint64m4_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_f16m1x6(float16_t *rs1, vuint64m4_t vs2, vfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_f32mf2x6(float32_t *base, vuint64m1_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_f32mf2x6(float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_f32m1x6(float32_t *base, vuint64m2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_f32m1x6(float32_t *rs1, vuint64m2_t vs2, vfloat32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_f64m1x6(float64_t *base, vuint64m1_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_f64m1x6(float64_t *rs1, vuint64m1_t vs2, vfloat64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_i8mf8x6(int8_t *base, vuint64m1_t bindex, vint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_i8mf8x6(int8_t *rs1, vuint64m1_t vs2, vint8mf8x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_i8mf4x6(int8_t *base, vuint64m2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_i8mf4x6(int8_t *rs1, vuint64m2_t vs2, vint8mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_i8mf2x6(int8_t *base, vuint64m4_t bindex, vint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_i8mf2x6(int8_t *rs1, vuint64m4_t vs2, vint8mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_i8m1x6(int8_t *base, vuint64m8_t bindex, vint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_i8m1x6(int8_t *rs1, vuint64m8_t vs2, vint8m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_i16mf4x6(int16_t *base, vuint64m1_t bindex, vint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_i16mf4x6(int16_t *rs1, vuint64m1_t vs2, vint16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_i16mf2x6(int16_t *base, vuint64m2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_i16mf2x6(int16_t *rs1, vuint64m2_t vs2, vint16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_i16m1x6(int16_t *base, vuint64m4_t bindex, vint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_i16m1x6(int16_t *rs1, vuint64m4_t vs2, vint16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_i32mf2x6(int32_t *base, vuint64m1_t bindex, vint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_i32mf2x6(int32_t *rs1, vuint64m1_t vs2, vint32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_i32m1x6(int32_t *base, vuint64m2_t bindex, vint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_i32m1x6(int32_t *rs1, vuint64m2_t vs2, vint32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_i64m1x6(int64_t *base, vuint64m1_t bindex, vint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_i64m1x6(int64_t *rs1, vuint64m1_t vs2, vint64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_u8mf8x6(uint8_t *base, vuint64m1_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_u8mf8x6(uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_u8mf4x6(uint8_t *base, vuint64m2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_u8mf4x6(uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_u8mf2x6(uint8_t *base, vuint64m4_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_u8mf2x6(uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_u8m1x6(uint8_t *base, vuint64m8_t bindex, vuint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_u8m1x6(uint8_t *rs1, vuint64m8_t vs2, vuint8m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_u16mf4x6(uint16_t *base, vuint64m1_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_u16mf4x6(uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_u16mf2x6(uint16_t *base, vuint64m2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_u16mf2x6(uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_u16m1x6(uint16_t *base, vuint64m4_t bindex, vuint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_u16m1x6(uint16_t *rs1, vuint64m4_t vs2, vuint16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_u32mf2x6(uint32_t *base, vuint64m1_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_u32mf2x6(uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_u32m1x6(uint32_t *base, vuint64m2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_u32m1x6(uint32_t *rs1, vuint64m2_t vs2, vuint32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_u64m1x6(uint64_t *base, vuint64m1_t bindex, vuint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_u64m1x6(uint64_t *rs1, vuint64m1_t vs2, vuint64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_f16mf4x6_m(vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_f16mf4x6_m(vbool64_t vm, float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_f16mf2x6_m(vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_f16mf2x6_m(vbool32_t vm, float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_f16m1x6_m(vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_f16m1x6_m(vbool16_t vm, float16_t *rs1, vuint64m4_t vs2, vfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_f32mf2x6_m(vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_f32mf2x6_m(vbool64_t vm, float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_f32m1x6_m(vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_f32m1x6_m(vbool32_t vm, float32_t *rs1, vuint64m2_t vs2, vfloat32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_f64m1x6_m(vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_f64m1x6_m(vbool64_t vm, float64_t *rs1, vuint64m1_t vs2, vfloat64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_i8mf8x6_m(vbool64_t vm, int8_t *rs1, vuint64m1_t vs2, vint8mf8x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_i8mf4x6_m(vbool32_t vm, int8_t *rs1, vuint64m2_t vs2, vint8mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_i8mf2x6_m(vbool16_t vm, int8_t *rs1, vuint64m4_t vs2, vint8mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_i8m1x6_m(vbool8_t vm, int8_t *rs1, vuint64m8_t vs2, vint8m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_i16mf4x6_m(vbool64_t vm, int16_t *rs1, vuint64m1_t vs2, vint16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_i16mf2x6_m(vbool32_t vm, int16_t *rs1, vuint64m2_t vs2, vint16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_i16m1x6_m(vbool16_t vm, int16_t *rs1, vuint64m4_t vs2, vint16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_i32mf2x6_m(vbool64_t vm, int32_t *rs1, vuint64m1_t vs2, vint32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_i32m1x6_m(vbool32_t vm, int32_t *rs1, vuint64m2_t vs2, vint32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_i64m1x6_m(vbool64_t vm, int64_t *rs1, vuint64m1_t vs2, vint64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_u8mf8x6_m(vbool64_t vm, uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_u8mf4x6_m(vbool32_t vm, uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_u8mf2x6_m(vbool16_t vm, uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_u8m1x6_m(vbool8_t vm, uint8_t *rs1, vuint64m8_t vs2, vuint8m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_u16mf4x6_m(vbool64_t vm, uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_u16mf2x6_m(vbool32_t vm, uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_u16m1x6_m(vbool16_t vm, uint16_t *rs1, vuint64m4_t vs2, vuint16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_u32mf2x6_m(vbool64_t vm, uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_u32m1x6_m(vbool32_t vm, uint32_t *rs1, vuint64m2_t vs2, vuint32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei64_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei64_v_u64m1x6_m(vbool64_t vm, uint64_t *rs1, vuint64m1_t vs2, vuint64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei64(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg6ei64\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsoxseg6ei8.c b/auto-generated/gnu-overloaded-tests/vsoxseg6ei8.c index 222bb0a01..1916596e0 100644 --- a/auto-generated/gnu-overloaded-tests/vsoxseg6ei8.c +++ b/auto-generated/gnu-overloaded-tests/vsoxseg6ei8.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg6ei8_v_f16mf4x6(float16_t *base, vuint8mf8_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_f16mf4x6(float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_f16mf2x6(float16_t *base, vuint8mf4_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_f16mf2x6(float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_f16m1x6(float16_t *base, vuint8mf2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_f16m1x6(float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_f32mf2x6(float32_t *base, vuint8mf8_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_f32mf2x6(float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_f32m1x6(float32_t *base, vuint8mf4_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_f32m1x6(float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_f64m1x6(float64_t *base, vuint8mf8_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_f64m1x6(float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_i8mf8x6(int8_t *base, vuint8mf8_t bindex, vint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_i8mf8x6(int8_t *rs1, vuint8mf8_t vs2, vint8mf8x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_i8mf4x6(int8_t *base, vuint8mf4_t bindex, vint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_i8mf4x6(int8_t *rs1, vuint8mf4_t vs2, vint8mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_i8mf2x6(int8_t *base, vuint8mf2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_i8mf2x6(int8_t *rs1, vuint8mf2_t vs2, vint8mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_i8m1x6(int8_t *base, vuint8m1_t bindex, vint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_i8m1x6(int8_t *rs1, vuint8m1_t vs2, vint8m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_i16mf4x6(int16_t *base, vuint8mf8_t bindex, vint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_i16mf4x6(int16_t *rs1, vuint8mf8_t vs2, vint16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_i16mf2x6(int16_t *base, vuint8mf4_t bindex, vint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_i16mf2x6(int16_t *rs1, vuint8mf4_t vs2, vint16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_i16m1x6(int16_t *base, vuint8mf2_t bindex, vint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_i16m1x6(int16_t *rs1, vuint8mf2_t vs2, vint16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_i32mf2x6(int32_t *base, vuint8mf8_t bindex, vint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_i32mf2x6(int32_t *rs1, vuint8mf8_t vs2, vint32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_i32m1x6(int32_t *base, vuint8mf4_t bindex, vint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_i32m1x6(int32_t *rs1, vuint8mf4_t vs2, vint32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_i64m1x6(int64_t *base, vuint8mf8_t bindex, vint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_i64m1x6(int64_t *rs1, vuint8mf8_t vs2, vint64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_u8mf8x6(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_u8mf8x6(uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_u8mf4x6(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_u8mf4x6(uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_u8mf2x6(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_u8mf2x6(uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_u8m1x6(uint8_t *base, vuint8m1_t bindex, vuint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_u8m1x6(uint8_t *rs1, vuint8m1_t vs2, vuint8m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_u16mf4x6(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_u16mf4x6(uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_u16mf2x6(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_u16mf2x6(uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_u16m1x6(uint16_t *base, vuint8mf2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_u16m1x6(uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_u32mf2x6(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_u32mf2x6(uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_u32m1x6(uint32_t *base, vuint8mf4_t bindex, vuint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_u32m1x6(uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_u64m1x6(uint64_t *base, vuint8mf8_t bindex, vuint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_u64m1x6(uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_f16mf4x6_m(vbool64_t mask, float16_t *base, vuint8mf8_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_f16mf4x6_m(vbool64_t vm, float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_f16mf2x6_m(vbool32_t mask, float16_t *base, vuint8mf4_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_f16mf2x6_m(vbool32_t vm, float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_f16m1x6_m(vbool16_t mask, float16_t *base, vuint8mf2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_f16m1x6_m(vbool16_t vm, float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_f32mf2x6_m(vbool64_t mask, float32_t *base, vuint8mf8_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_f32mf2x6_m(vbool64_t vm, float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_f32m1x6_m(vbool32_t mask, float32_t *base, vuint8mf4_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_f32m1x6_m(vbool32_t vm, float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_f64m1x6_m(vbool64_t mask, float64_t *base, vuint8mf8_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_f64m1x6_m(vbool64_t vm, float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_i8mf8x6_m(vbool64_t vm, int8_t *rs1, vuint8mf8_t vs2, vint8mf8x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_i8mf4x6_m(vbool32_t vm, int8_t *rs1, vuint8mf4_t vs2, vint8mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_i8mf2x6_m(vbool16_t vm, int8_t *rs1, vuint8mf2_t vs2, vint8mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_i8m1x6_m(vbool8_t vm, int8_t *rs1, vuint8m1_t vs2, vint8m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_i16mf4x6_m(vbool64_t vm, int16_t *rs1, vuint8mf8_t vs2, vint16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_i16mf2x6_m(vbool32_t vm, int16_t *rs1, vuint8mf4_t vs2, vint16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_i16m1x6_m(vbool16_t vm, int16_t *rs1, vuint8mf2_t vs2, vint16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_i32mf2x6_m(vbool64_t vm, int32_t *rs1, vuint8mf8_t vs2, vint32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_i32m1x6_m(vbool32_t vm, int32_t *rs1, vuint8mf4_t vs2, vint32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_i64m1x6_m(vbool64_t vm, int64_t *rs1, vuint8mf8_t vs2, vint64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_u8mf8x6_m(vbool64_t vm, uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_u8mf4x6_m(vbool32_t vm, uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_u8mf2x6_m(vbool16_t vm, uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_u8m1x6_m(vbool8_t vm, uint8_t *rs1, vuint8m1_t vs2, vuint8m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_u16mf4x6_m(vbool64_t vm, uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_u16mf2x6_m(vbool32_t vm, uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_u16m1x6_m(vbool16_t vm, uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_u32mf2x6_m(vbool64_t vm, uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_u32m1x6_m(vbool32_t vm, uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg6ei8_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg6ei8_v_u64m1x6_m(vbool64_t vm, uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei8(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg6ei8\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsoxseg7ei16.c b/auto-generated/gnu-overloaded-tests/vsoxseg7ei16.c index b23f2ef1f..23daf4683 100644 --- a/auto-generated/gnu-overloaded-tests/vsoxseg7ei16.c +++ b/auto-generated/gnu-overloaded-tests/vsoxseg7ei16.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg7ei16_v_f16mf4x7(float16_t *base, vuint16mf4_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_f16mf4x7(float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_f16mf2x7(float16_t *base, vuint16mf2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_f16mf2x7(float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_f16m1x7(float16_t *base, vuint16m1_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_f16m1x7(float16_t *rs1, vuint16m1_t vs2, vfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_f32mf2x7(float32_t *base, vuint16mf4_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_f32mf2x7(float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_f32m1x7(float32_t *base, vuint16mf2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_f32m1x7(float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_f64m1x7(float64_t *base, vuint16mf4_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_f64m1x7(float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_i8mf8x7(int8_t *base, vuint16mf4_t bindex, vint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_i8mf8x7(int8_t *rs1, vuint16mf4_t vs2, vint8mf8x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_i8mf4x7(int8_t *base, vuint16mf2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_i8mf4x7(int8_t *rs1, vuint16mf2_t vs2, vint8mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_i8mf2x7(int8_t *base, vuint16m1_t bindex, vint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_i8mf2x7(int8_t *rs1, vuint16m1_t vs2, vint8mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_i8m1x7(int8_t *base, vuint16m2_t bindex, vint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_i8m1x7(int8_t *rs1, vuint16m2_t vs2, vint8m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_i16mf4x7(int16_t *base, vuint16mf4_t bindex, vint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_i16mf4x7(int16_t *rs1, vuint16mf4_t vs2, vint16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_i16mf2x7(int16_t *base, vuint16mf2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_i16mf2x7(int16_t *rs1, vuint16mf2_t vs2, vint16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_i16m1x7(int16_t *base, vuint16m1_t bindex, vint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_i16m1x7(int16_t *rs1, vuint16m1_t vs2, vint16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_i32mf2x7(int32_t *base, vuint16mf4_t bindex, vint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_i32mf2x7(int32_t *rs1, vuint16mf4_t vs2, vint32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_i32m1x7(int32_t *base, vuint16mf2_t bindex, vint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_i32m1x7(int32_t *rs1, vuint16mf2_t vs2, vint32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_i64m1x7(int64_t *base, vuint16mf4_t bindex, vint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_i64m1x7(int64_t *rs1, vuint16mf4_t vs2, vint64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_u8mf8x7(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_u8mf8x7(uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_u8mf4x7(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_u8mf4x7(uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_u8mf2x7(uint8_t *base, vuint16m1_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_u8mf2x7(uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_u8m1x7(uint8_t *base, vuint16m2_t bindex, vuint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_u8m1x7(uint8_t *rs1, vuint16m2_t vs2, vuint8m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_u16mf4x7(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_u16mf4x7(uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_u16mf2x7(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_u16mf2x7(uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_u16m1x7(uint16_t *base, vuint16m1_t bindex, vuint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_u16m1x7(uint16_t *rs1, vuint16m1_t vs2, vuint16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_u32mf2x7(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_u32mf2x7(uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_u32m1x7(uint32_t *base, vuint16mf2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_u32m1x7(uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_u64m1x7(uint64_t *base, vuint16mf4_t bindex, vuint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_u64m1x7(uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_f16mf4x7_m(vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_f16mf4x7_m(vbool64_t vm, float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_f16mf2x7_m(vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_f16mf2x7_m(vbool32_t vm, float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_f16m1x7_m(vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_f16m1x7_m(vbool16_t vm, float16_t *rs1, vuint16m1_t vs2, vfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_f32mf2x7_m(vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_f32mf2x7_m(vbool64_t vm, float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_f32m1x7_m(vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_f32m1x7_m(vbool32_t vm, float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_f64m1x7_m(vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_f64m1x7_m(vbool64_t vm, float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_i8mf8x7_m(vbool64_t vm, int8_t *rs1, vuint16mf4_t vs2, vint8mf8x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_i8mf4x7_m(vbool32_t vm, int8_t *rs1, vuint16mf2_t vs2, vint8mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_i8mf2x7_m(vbool16_t vm, int8_t *rs1, vuint16m1_t vs2, vint8mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_i8m1x7_m(vbool8_t vm, int8_t *rs1, vuint16m2_t vs2, vint8m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_i16mf4x7_m(vbool64_t vm, int16_t *rs1, vuint16mf4_t vs2, vint16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_i16mf2x7_m(vbool32_t vm, int16_t *rs1, vuint16mf2_t vs2, vint16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_i16m1x7_m(vbool16_t vm, int16_t *rs1, vuint16m1_t vs2, vint16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_i32mf2x7_m(vbool64_t vm, int32_t *rs1, vuint16mf4_t vs2, vint32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_i32m1x7_m(vbool32_t vm, int32_t *rs1, vuint16mf2_t vs2, vint32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_i64m1x7_m(vbool64_t vm, int64_t *rs1, vuint16mf4_t vs2, vint64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_u8mf8x7_m(vbool64_t vm, uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_u8mf4x7_m(vbool32_t vm, uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_u8mf2x7_m(vbool16_t vm, uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_u8m1x7_m(vbool8_t vm, uint8_t *rs1, vuint16m2_t vs2, vuint8m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_u16mf4x7_m(vbool64_t vm, uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_u16mf2x7_m(vbool32_t vm, uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_u16m1x7_m(vbool16_t vm, uint16_t *rs1, vuint16m1_t vs2, vuint16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_u32mf2x7_m(vbool64_t vm, uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_u32m1x7_m(vbool32_t vm, uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei16_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei16_v_u64m1x7_m(vbool64_t vm, uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg7ei16\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsoxseg7ei32.c b/auto-generated/gnu-overloaded-tests/vsoxseg7ei32.c index ab7afa15a..d0b59862b 100644 --- a/auto-generated/gnu-overloaded-tests/vsoxseg7ei32.c +++ b/auto-generated/gnu-overloaded-tests/vsoxseg7ei32.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg7ei32_v_f16mf4x7(float16_t *base, vuint32mf2_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_f16mf4x7(float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_f16mf2x7(float16_t *base, vuint32m1_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_f16mf2x7(float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_f16m1x7(float16_t *base, vuint32m2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_f16m1x7(float16_t *rs1, vuint32m2_t vs2, vfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_f32mf2x7(float32_t *base, vuint32mf2_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_f32mf2x7(float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_f32m1x7(float32_t *base, vuint32m1_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_f32m1x7(float32_t *rs1, vuint32m1_t vs2, vfloat32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_f64m1x7(float64_t *base, vuint32mf2_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_f64m1x7(float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_i8mf8x7(int8_t *base, vuint32mf2_t bindex, vint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_i8mf8x7(int8_t *rs1, vuint32mf2_t vs2, vint8mf8x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_i8mf4x7(int8_t *base, vuint32m1_t bindex, vint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_i8mf4x7(int8_t *rs1, vuint32m1_t vs2, vint8mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_i8mf2x7(int8_t *base, vuint32m2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_i8mf2x7(int8_t *rs1, vuint32m2_t vs2, vint8mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_i8m1x7(int8_t *base, vuint32m4_t bindex, vint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_i8m1x7(int8_t *rs1, vuint32m4_t vs2, vint8m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_i16mf4x7(int16_t *base, vuint32mf2_t bindex, vint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_i16mf4x7(int16_t *rs1, vuint32mf2_t vs2, vint16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_i16mf2x7(int16_t *base, vuint32m1_t bindex, vint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_i16mf2x7(int16_t *rs1, vuint32m1_t vs2, vint16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_i16m1x7(int16_t *base, vuint32m2_t bindex, vint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_i16m1x7(int16_t *rs1, vuint32m2_t vs2, vint16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_i32mf2x7(int32_t *base, vuint32mf2_t bindex, vint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_i32mf2x7(int32_t *rs1, vuint32mf2_t vs2, vint32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_i32m1x7(int32_t *base, vuint32m1_t bindex, vint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_i32m1x7(int32_t *rs1, vuint32m1_t vs2, vint32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_i64m1x7(int64_t *base, vuint32mf2_t bindex, vint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_i64m1x7(int64_t *rs1, vuint32mf2_t vs2, vint64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_u8mf8x7(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_u8mf8x7(uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_u8mf4x7(uint8_t *base, vuint32m1_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_u8mf4x7(uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_u8mf2x7(uint8_t *base, vuint32m2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_u8mf2x7(uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_u8m1x7(uint8_t *base, vuint32m4_t bindex, vuint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_u8m1x7(uint8_t *rs1, vuint32m4_t vs2, vuint8m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_u16mf4x7(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_u16mf4x7(uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_u16mf2x7(uint16_t *base, vuint32m1_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_u16mf2x7(uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_u16m1x7(uint16_t *base, vuint32m2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_u16m1x7(uint16_t *rs1, vuint32m2_t vs2, vuint16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_u32mf2x7(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_u32mf2x7(uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_u32m1x7(uint32_t *base, vuint32m1_t bindex, vuint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_u32m1x7(uint32_t *rs1, vuint32m1_t vs2, vuint32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_u64m1x7(uint64_t *base, vuint32mf2_t bindex, vuint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_u64m1x7(uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_f16mf4x7_m(vbool64_t mask, float16_t *base, vuint32mf2_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_f16mf4x7_m(vbool64_t vm, float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_f16mf2x7_m(vbool32_t mask, float16_t *base, vuint32m1_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_f16mf2x7_m(vbool32_t vm, float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_f16m1x7_m(vbool16_t mask, float16_t *base, vuint32m2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_f16m1x7_m(vbool16_t vm, float16_t *rs1, vuint32m2_t vs2, vfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_f32mf2x7_m(vbool64_t mask, float32_t *base, vuint32mf2_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_f32mf2x7_m(vbool64_t vm, float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_f32m1x7_m(vbool32_t mask, float32_t *base, vuint32m1_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_f32m1x7_m(vbool32_t vm, float32_t *rs1, vuint32m1_t vs2, vfloat32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_f64m1x7_m(vbool64_t mask, float64_t *base, vuint32mf2_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_f64m1x7_m(vbool64_t vm, float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_i8mf8x7_m(vbool64_t vm, int8_t *rs1, vuint32mf2_t vs2, vint8mf8x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_i8mf4x7_m(vbool32_t vm, int8_t *rs1, vuint32m1_t vs2, vint8mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_i8mf2x7_m(vbool16_t vm, int8_t *rs1, vuint32m2_t vs2, vint8mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_i8m1x7_m(vbool8_t vm, int8_t *rs1, vuint32m4_t vs2, vint8m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_i16mf4x7_m(vbool64_t vm, int16_t *rs1, vuint32mf2_t vs2, vint16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_i16mf2x7_m(vbool32_t vm, int16_t *rs1, vuint32m1_t vs2, vint16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_i16m1x7_m(vbool16_t vm, int16_t *rs1, vuint32m2_t vs2, vint16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_i32mf2x7_m(vbool64_t vm, int32_t *rs1, vuint32mf2_t vs2, vint32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_i32m1x7_m(vbool32_t vm, int32_t *rs1, vuint32m1_t vs2, vint32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_i64m1x7_m(vbool64_t vm, int64_t *rs1, vuint32mf2_t vs2, vint64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_u8mf8x7_m(vbool64_t vm, uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_u8mf4x7_m(vbool32_t vm, uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_u8mf2x7_m(vbool16_t vm, uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_u8m1x7_m(vbool8_t vm, uint8_t *rs1, vuint32m4_t vs2, vuint8m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_u16mf4x7_m(vbool64_t vm, uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_u16mf2x7_m(vbool32_t vm, uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_u16m1x7_m(vbool16_t vm, uint16_t *rs1, vuint32m2_t vs2, vuint16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_u32mf2x7_m(vbool64_t vm, uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_u32m1x7_m(vbool32_t vm, uint32_t *rs1, vuint32m1_t vs2, vuint32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei32_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei32_v_u64m1x7_m(vbool64_t vm, uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei32(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg7ei32\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsoxseg7ei64.c b/auto-generated/gnu-overloaded-tests/vsoxseg7ei64.c index 37bfd9284..6fcde61ce 100644 --- a/auto-generated/gnu-overloaded-tests/vsoxseg7ei64.c +++ b/auto-generated/gnu-overloaded-tests/vsoxseg7ei64.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg7ei64_v_f16mf4x7(float16_t *base, vuint64m1_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_f16mf4x7(float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_f16mf2x7(float16_t *base, vuint64m2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_f16mf2x7(float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_f16m1x7(float16_t *base, vuint64m4_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_f16m1x7(float16_t *rs1, vuint64m4_t vs2, vfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_f32mf2x7(float32_t *base, vuint64m1_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_f32mf2x7(float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_f32m1x7(float32_t *base, vuint64m2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_f32m1x7(float32_t *rs1, vuint64m2_t vs2, vfloat32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_f64m1x7(float64_t *base, vuint64m1_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_f64m1x7(float64_t *rs1, vuint64m1_t vs2, vfloat64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_i8mf8x7(int8_t *base, vuint64m1_t bindex, vint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_i8mf8x7(int8_t *rs1, vuint64m1_t vs2, vint8mf8x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_i8mf4x7(int8_t *base, vuint64m2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_i8mf4x7(int8_t *rs1, vuint64m2_t vs2, vint8mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_i8mf2x7(int8_t *base, vuint64m4_t bindex, vint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_i8mf2x7(int8_t *rs1, vuint64m4_t vs2, vint8mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_i8m1x7(int8_t *base, vuint64m8_t bindex, vint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_i8m1x7(int8_t *rs1, vuint64m8_t vs2, vint8m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_i16mf4x7(int16_t *base, vuint64m1_t bindex, vint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_i16mf4x7(int16_t *rs1, vuint64m1_t vs2, vint16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_i16mf2x7(int16_t *base, vuint64m2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_i16mf2x7(int16_t *rs1, vuint64m2_t vs2, vint16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_i16m1x7(int16_t *base, vuint64m4_t bindex, vint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_i16m1x7(int16_t *rs1, vuint64m4_t vs2, vint16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_i32mf2x7(int32_t *base, vuint64m1_t bindex, vint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_i32mf2x7(int32_t *rs1, vuint64m1_t vs2, vint32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_i32m1x7(int32_t *base, vuint64m2_t bindex, vint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_i32m1x7(int32_t *rs1, vuint64m2_t vs2, vint32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_i64m1x7(int64_t *base, vuint64m1_t bindex, vint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_i64m1x7(int64_t *rs1, vuint64m1_t vs2, vint64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_u8mf8x7(uint8_t *base, vuint64m1_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_u8mf8x7(uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_u8mf4x7(uint8_t *base, vuint64m2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_u8mf4x7(uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_u8mf2x7(uint8_t *base, vuint64m4_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_u8mf2x7(uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_u8m1x7(uint8_t *base, vuint64m8_t bindex, vuint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_u8m1x7(uint8_t *rs1, vuint64m8_t vs2, vuint8m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_u16mf4x7(uint16_t *base, vuint64m1_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_u16mf4x7(uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_u16mf2x7(uint16_t *base, vuint64m2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_u16mf2x7(uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_u16m1x7(uint16_t *base, vuint64m4_t bindex, vuint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_u16m1x7(uint16_t *rs1, vuint64m4_t vs2, vuint16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_u32mf2x7(uint32_t *base, vuint64m1_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_u32mf2x7(uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_u32m1x7(uint32_t *base, vuint64m2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_u32m1x7(uint32_t *rs1, vuint64m2_t vs2, vuint32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_u64m1x7(uint64_t *base, vuint64m1_t bindex, vuint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_u64m1x7(uint64_t *rs1, vuint64m1_t vs2, vuint64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_f16mf4x7_m(vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_f16mf4x7_m(vbool64_t vm, float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_f16mf2x7_m(vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_f16mf2x7_m(vbool32_t vm, float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_f16m1x7_m(vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_f16m1x7_m(vbool16_t vm, float16_t *rs1, vuint64m4_t vs2, vfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_f32mf2x7_m(vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_f32mf2x7_m(vbool64_t vm, float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_f32m1x7_m(vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_f32m1x7_m(vbool32_t vm, float32_t *rs1, vuint64m2_t vs2, vfloat32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_f64m1x7_m(vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_f64m1x7_m(vbool64_t vm, float64_t *rs1, vuint64m1_t vs2, vfloat64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_i8mf8x7_m(vbool64_t vm, int8_t *rs1, vuint64m1_t vs2, vint8mf8x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_i8mf4x7_m(vbool32_t vm, int8_t *rs1, vuint64m2_t vs2, vint8mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_i8mf2x7_m(vbool16_t vm, int8_t *rs1, vuint64m4_t vs2, vint8mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_i8m1x7_m(vbool8_t vm, int8_t *rs1, vuint64m8_t vs2, vint8m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_i16mf4x7_m(vbool64_t vm, int16_t *rs1, vuint64m1_t vs2, vint16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_i16mf2x7_m(vbool32_t vm, int16_t *rs1, vuint64m2_t vs2, vint16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_i16m1x7_m(vbool16_t vm, int16_t *rs1, vuint64m4_t vs2, vint16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_i32mf2x7_m(vbool64_t vm, int32_t *rs1, vuint64m1_t vs2, vint32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_i32m1x7_m(vbool32_t vm, int32_t *rs1, vuint64m2_t vs2, vint32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_i64m1x7_m(vbool64_t vm, int64_t *rs1, vuint64m1_t vs2, vint64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_u8mf8x7_m(vbool64_t vm, uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_u8mf4x7_m(vbool32_t vm, uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_u8mf2x7_m(vbool16_t vm, uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_u8m1x7_m(vbool8_t vm, uint8_t *rs1, vuint64m8_t vs2, vuint8m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_u16mf4x7_m(vbool64_t vm, uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_u16mf2x7_m(vbool32_t vm, uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_u16m1x7_m(vbool16_t vm, uint16_t *rs1, vuint64m4_t vs2, vuint16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_u32mf2x7_m(vbool64_t vm, uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_u32m1x7_m(vbool32_t vm, uint32_t *rs1, vuint64m2_t vs2, vuint32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei64_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei64_v_u64m1x7_m(vbool64_t vm, uint64_t *rs1, vuint64m1_t vs2, vuint64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei64(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg7ei64\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsoxseg7ei8.c b/auto-generated/gnu-overloaded-tests/vsoxseg7ei8.c index b0e676516..34fbb9250 100644 --- a/auto-generated/gnu-overloaded-tests/vsoxseg7ei8.c +++ b/auto-generated/gnu-overloaded-tests/vsoxseg7ei8.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg7ei8_v_f16mf4x7(float16_t *base, vuint8mf8_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_f16mf4x7(float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_f16mf2x7(float16_t *base, vuint8mf4_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_f16mf2x7(float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_f16m1x7(float16_t *base, vuint8mf2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_f16m1x7(float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_f32mf2x7(float32_t *base, vuint8mf8_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_f32mf2x7(float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_f32m1x7(float32_t *base, vuint8mf4_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_f32m1x7(float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_f64m1x7(float64_t *base, vuint8mf8_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_f64m1x7(float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_i8mf8x7(int8_t *base, vuint8mf8_t bindex, vint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_i8mf8x7(int8_t *rs1, vuint8mf8_t vs2, vint8mf8x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_i8mf4x7(int8_t *base, vuint8mf4_t bindex, vint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_i8mf4x7(int8_t *rs1, vuint8mf4_t vs2, vint8mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_i8mf2x7(int8_t *base, vuint8mf2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_i8mf2x7(int8_t *rs1, vuint8mf2_t vs2, vint8mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_i8m1x7(int8_t *base, vuint8m1_t bindex, vint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_i8m1x7(int8_t *rs1, vuint8m1_t vs2, vint8m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_i16mf4x7(int16_t *base, vuint8mf8_t bindex, vint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_i16mf4x7(int16_t *rs1, vuint8mf8_t vs2, vint16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_i16mf2x7(int16_t *base, vuint8mf4_t bindex, vint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_i16mf2x7(int16_t *rs1, vuint8mf4_t vs2, vint16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_i16m1x7(int16_t *base, vuint8mf2_t bindex, vint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_i16m1x7(int16_t *rs1, vuint8mf2_t vs2, vint16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_i32mf2x7(int32_t *base, vuint8mf8_t bindex, vint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_i32mf2x7(int32_t *rs1, vuint8mf8_t vs2, vint32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_i32m1x7(int32_t *base, vuint8mf4_t bindex, vint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_i32m1x7(int32_t *rs1, vuint8mf4_t vs2, vint32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_i64m1x7(int64_t *base, vuint8mf8_t bindex, vint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_i64m1x7(int64_t *rs1, vuint8mf8_t vs2, vint64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_u8mf8x7(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_u8mf8x7(uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_u8mf4x7(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_u8mf4x7(uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_u8mf2x7(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_u8mf2x7(uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_u8m1x7(uint8_t *base, vuint8m1_t bindex, vuint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_u8m1x7(uint8_t *rs1, vuint8m1_t vs2, vuint8m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_u16mf4x7(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_u16mf4x7(uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_u16mf2x7(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_u16mf2x7(uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_u16m1x7(uint16_t *base, vuint8mf2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_u16m1x7(uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_u32mf2x7(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_u32mf2x7(uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_u32m1x7(uint32_t *base, vuint8mf4_t bindex, vuint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_u32m1x7(uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_u64m1x7(uint64_t *base, vuint8mf8_t bindex, vuint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_u64m1x7(uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_f16mf4x7_m(vbool64_t mask, float16_t *base, vuint8mf8_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_f16mf4x7_m(vbool64_t vm, float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_f16mf2x7_m(vbool32_t mask, float16_t *base, vuint8mf4_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_f16mf2x7_m(vbool32_t vm, float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_f16m1x7_m(vbool16_t mask, float16_t *base, vuint8mf2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_f16m1x7_m(vbool16_t vm, float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_f32mf2x7_m(vbool64_t mask, float32_t *base, vuint8mf8_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_f32mf2x7_m(vbool64_t vm, float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_f32m1x7_m(vbool32_t mask, float32_t *base, vuint8mf4_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_f32m1x7_m(vbool32_t vm, float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_f64m1x7_m(vbool64_t mask, float64_t *base, vuint8mf8_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_f64m1x7_m(vbool64_t vm, float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_i8mf8x7_m(vbool64_t vm, int8_t *rs1, vuint8mf8_t vs2, vint8mf8x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_i8mf4x7_m(vbool32_t vm, int8_t *rs1, vuint8mf4_t vs2, vint8mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_i8mf2x7_m(vbool16_t vm, int8_t *rs1, vuint8mf2_t vs2, vint8mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_i8m1x7_m(vbool8_t vm, int8_t *rs1, vuint8m1_t vs2, vint8m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_i16mf4x7_m(vbool64_t vm, int16_t *rs1, vuint8mf8_t vs2, vint16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_i16mf2x7_m(vbool32_t vm, int16_t *rs1, vuint8mf4_t vs2, vint16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_i16m1x7_m(vbool16_t vm, int16_t *rs1, vuint8mf2_t vs2, vint16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_i32mf2x7_m(vbool64_t vm, int32_t *rs1, vuint8mf8_t vs2, vint32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_i32m1x7_m(vbool32_t vm, int32_t *rs1, vuint8mf4_t vs2, vint32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_i64m1x7_m(vbool64_t vm, int64_t *rs1, vuint8mf8_t vs2, vint64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_u8mf8x7_m(vbool64_t vm, uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_u8mf4x7_m(vbool32_t vm, uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_u8mf2x7_m(vbool16_t vm, uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_u8m1x7_m(vbool8_t vm, uint8_t *rs1, vuint8m1_t vs2, vuint8m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_u16mf4x7_m(vbool64_t vm, uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_u16mf2x7_m(vbool32_t vm, uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_u16m1x7_m(vbool16_t vm, uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_u32mf2x7_m(vbool64_t vm, uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_u32m1x7_m(vbool32_t vm, uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg7ei8_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg7ei8_v_u64m1x7_m(vbool64_t vm, uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei8(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg7ei8\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsoxseg8ei16.c b/auto-generated/gnu-overloaded-tests/vsoxseg8ei16.c index 0ae7b36a9..ac933cc2a 100644 --- a/auto-generated/gnu-overloaded-tests/vsoxseg8ei16.c +++ b/auto-generated/gnu-overloaded-tests/vsoxseg8ei16.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg8ei16_v_f16mf4x8(float16_t *base, vuint16mf4_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_f16mf4x8(float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_f16mf2x8(float16_t *base, vuint16mf2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_f16mf2x8(float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_f16m1x8(float16_t *base, vuint16m1_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_f16m1x8(float16_t *rs1, vuint16m1_t vs2, vfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_f32mf2x8(float32_t *base, vuint16mf4_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_f32mf2x8(float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_f32m1x8(float32_t *base, vuint16mf2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_f32m1x8(float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_f64m1x8(float64_t *base, vuint16mf4_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_f64m1x8(float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_i8mf8x8(int8_t *base, vuint16mf4_t bindex, vint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_i8mf8x8(int8_t *rs1, vuint16mf4_t vs2, vint8mf8x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_i8mf4x8(int8_t *base, vuint16mf2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_i8mf4x8(int8_t *rs1, vuint16mf2_t vs2, vint8mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_i8mf2x8(int8_t *base, vuint16m1_t bindex, vint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_i8mf2x8(int8_t *rs1, vuint16m1_t vs2, vint8mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_i8m1x8(int8_t *base, vuint16m2_t bindex, vint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_i8m1x8(int8_t *rs1, vuint16m2_t vs2, vint8m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_i16mf4x8(int16_t *base, vuint16mf4_t bindex, vint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_i16mf4x8(int16_t *rs1, vuint16mf4_t vs2, vint16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_i16mf2x8(int16_t *base, vuint16mf2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_i16mf2x8(int16_t *rs1, vuint16mf2_t vs2, vint16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_i16m1x8(int16_t *base, vuint16m1_t bindex, vint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_i16m1x8(int16_t *rs1, vuint16m1_t vs2, vint16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_i32mf2x8(int32_t *base, vuint16mf4_t bindex, vint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_i32mf2x8(int32_t *rs1, vuint16mf4_t vs2, vint32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_i32m1x8(int32_t *base, vuint16mf2_t bindex, vint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_i32m1x8(int32_t *rs1, vuint16mf2_t vs2, vint32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_i64m1x8(int64_t *base, vuint16mf4_t bindex, vint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_i64m1x8(int64_t *rs1, vuint16mf4_t vs2, vint64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_u8mf8x8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_u8mf8x8(uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_u8mf4x8(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_u8mf4x8(uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_u8mf2x8(uint8_t *base, vuint16m1_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_u8mf2x8(uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_u8m1x8(uint8_t *base, vuint16m2_t bindex, vuint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_u8m1x8(uint8_t *rs1, vuint16m2_t vs2, vuint8m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_u16mf4x8(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_u16mf4x8(uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_u16mf2x8(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_u16mf2x8(uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_u16m1x8(uint16_t *base, vuint16m1_t bindex, vuint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_u16m1x8(uint16_t *rs1, vuint16m1_t vs2, vuint16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_u32mf2x8(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_u32mf2x8(uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_u32m1x8(uint32_t *base, vuint16mf2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_u32m1x8(uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_u64m1x8(uint64_t *base, vuint16mf4_t bindex, vuint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_u64m1x8(uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_f16mf4x8_m(vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_f16mf4x8_m(vbool64_t vm, float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_f16mf2x8_m(vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_f16mf2x8_m(vbool32_t vm, float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_f16m1x8_m(vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_f16m1x8_m(vbool16_t vm, float16_t *rs1, vuint16m1_t vs2, vfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_f32mf2x8_m(vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_f32mf2x8_m(vbool64_t vm, float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_f32m1x8_m(vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_f32m1x8_m(vbool32_t vm, float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_f64m1x8_m(vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_f64m1x8_m(vbool64_t vm, float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_i8mf8x8_m(vbool64_t vm, int8_t *rs1, vuint16mf4_t vs2, vint8mf8x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_i8mf4x8_m(vbool32_t vm, int8_t *rs1, vuint16mf2_t vs2, vint8mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_i8mf2x8_m(vbool16_t vm, int8_t *rs1, vuint16m1_t vs2, vint8mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_i8m1x8_m(vbool8_t vm, int8_t *rs1, vuint16m2_t vs2, vint8m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_i16mf4x8_m(vbool64_t vm, int16_t *rs1, vuint16mf4_t vs2, vint16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_i16mf2x8_m(vbool32_t vm, int16_t *rs1, vuint16mf2_t vs2, vint16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_i16m1x8_m(vbool16_t vm, int16_t *rs1, vuint16m1_t vs2, vint16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_i32mf2x8_m(vbool64_t vm, int32_t *rs1, vuint16mf4_t vs2, vint32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_i32m1x8_m(vbool32_t vm, int32_t *rs1, vuint16mf2_t vs2, vint32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_i64m1x8_m(vbool64_t vm, int64_t *rs1, vuint16mf4_t vs2, vint64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_u8mf8x8_m(vbool64_t vm, uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_u8mf4x8_m(vbool32_t vm, uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_u8mf2x8_m(vbool16_t vm, uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_u8m1x8_m(vbool8_t vm, uint8_t *rs1, vuint16m2_t vs2, vuint8m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_u16mf4x8_m(vbool64_t vm, uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_u16mf2x8_m(vbool32_t vm, uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_u16m1x8_m(vbool16_t vm, uint16_t *rs1, vuint16m1_t vs2, vuint16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_u32mf2x8_m(vbool64_t vm, uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_u32m1x8_m(vbool32_t vm, uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei16_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei16_v_u64m1x8_m(vbool64_t vm, uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg8ei16\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsoxseg8ei32.c b/auto-generated/gnu-overloaded-tests/vsoxseg8ei32.c index 36e23910f..256f8dcfb 100644 --- a/auto-generated/gnu-overloaded-tests/vsoxseg8ei32.c +++ b/auto-generated/gnu-overloaded-tests/vsoxseg8ei32.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg8ei32_v_f16mf4x8(float16_t *base, vuint32mf2_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_f16mf4x8(float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_f16mf2x8(float16_t *base, vuint32m1_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_f16mf2x8(float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_f16m1x8(float16_t *base, vuint32m2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_f16m1x8(float16_t *rs1, vuint32m2_t vs2, vfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_f32mf2x8(float32_t *base, vuint32mf2_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_f32mf2x8(float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_f32m1x8(float32_t *base, vuint32m1_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_f32m1x8(float32_t *rs1, vuint32m1_t vs2, vfloat32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_f64m1x8(float64_t *base, vuint32mf2_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_f64m1x8(float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_i8mf8x8(int8_t *base, vuint32mf2_t bindex, vint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_i8mf8x8(int8_t *rs1, vuint32mf2_t vs2, vint8mf8x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_i8mf4x8(int8_t *base, vuint32m1_t bindex, vint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_i8mf4x8(int8_t *rs1, vuint32m1_t vs2, vint8mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_i8mf2x8(int8_t *base, vuint32m2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_i8mf2x8(int8_t *rs1, vuint32m2_t vs2, vint8mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_i8m1x8(int8_t *base, vuint32m4_t bindex, vint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_i8m1x8(int8_t *rs1, vuint32m4_t vs2, vint8m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_i16mf4x8(int16_t *base, vuint32mf2_t bindex, vint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_i16mf4x8(int16_t *rs1, vuint32mf2_t vs2, vint16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_i16mf2x8(int16_t *base, vuint32m1_t bindex, vint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_i16mf2x8(int16_t *rs1, vuint32m1_t vs2, vint16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_i16m1x8(int16_t *base, vuint32m2_t bindex, vint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_i16m1x8(int16_t *rs1, vuint32m2_t vs2, vint16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_i32mf2x8(int32_t *base, vuint32mf2_t bindex, vint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_i32mf2x8(int32_t *rs1, vuint32mf2_t vs2, vint32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_i32m1x8(int32_t *base, vuint32m1_t bindex, vint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_i32m1x8(int32_t *rs1, vuint32m1_t vs2, vint32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_i64m1x8(int64_t *base, vuint32mf2_t bindex, vint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_i64m1x8(int64_t *rs1, vuint32mf2_t vs2, vint64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_u8mf8x8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_u8mf8x8(uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_u8mf4x8(uint8_t *base, vuint32m1_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_u8mf4x8(uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_u8mf2x8(uint8_t *base, vuint32m2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_u8mf2x8(uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_u8m1x8(uint8_t *base, vuint32m4_t bindex, vuint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_u8m1x8(uint8_t *rs1, vuint32m4_t vs2, vuint8m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_u16mf4x8(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_u16mf4x8(uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_u16mf2x8(uint16_t *base, vuint32m1_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_u16mf2x8(uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_u16m1x8(uint16_t *base, vuint32m2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_u16m1x8(uint16_t *rs1, vuint32m2_t vs2, vuint16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_u32mf2x8(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_u32mf2x8(uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_u32m1x8(uint32_t *base, vuint32m1_t bindex, vuint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_u32m1x8(uint32_t *rs1, vuint32m1_t vs2, vuint32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_u64m1x8(uint64_t *base, vuint32mf2_t bindex, vuint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_u64m1x8(uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_f16mf4x8_m(vbool64_t mask, float16_t *base, vuint32mf2_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_f16mf4x8_m(vbool64_t vm, float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_f16mf2x8_m(vbool32_t mask, float16_t *base, vuint32m1_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_f16mf2x8_m(vbool32_t vm, float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_f16m1x8_m(vbool16_t mask, float16_t *base, vuint32m2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_f16m1x8_m(vbool16_t vm, float16_t *rs1, vuint32m2_t vs2, vfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_f32mf2x8_m(vbool64_t mask, float32_t *base, vuint32mf2_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_f32mf2x8_m(vbool64_t vm, float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_f32m1x8_m(vbool32_t mask, float32_t *base, vuint32m1_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_f32m1x8_m(vbool32_t vm, float32_t *rs1, vuint32m1_t vs2, vfloat32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_f64m1x8_m(vbool64_t mask, float64_t *base, vuint32mf2_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_f64m1x8_m(vbool64_t vm, float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_i8mf8x8_m(vbool64_t vm, int8_t *rs1, vuint32mf2_t vs2, vint8mf8x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_i8mf4x8_m(vbool32_t vm, int8_t *rs1, vuint32m1_t vs2, vint8mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_i8mf2x8_m(vbool16_t vm, int8_t *rs1, vuint32m2_t vs2, vint8mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_i8m1x8_m(vbool8_t vm, int8_t *rs1, vuint32m4_t vs2, vint8m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_i16mf4x8_m(vbool64_t vm, int16_t *rs1, vuint32mf2_t vs2, vint16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_i16mf2x8_m(vbool32_t vm, int16_t *rs1, vuint32m1_t vs2, vint16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_i16m1x8_m(vbool16_t vm, int16_t *rs1, vuint32m2_t vs2, vint16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_i32mf2x8_m(vbool64_t vm, int32_t *rs1, vuint32mf2_t vs2, vint32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_i32m1x8_m(vbool32_t vm, int32_t *rs1, vuint32m1_t vs2, vint32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_i64m1x8_m(vbool64_t vm, int64_t *rs1, vuint32mf2_t vs2, vint64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_u8mf8x8_m(vbool64_t vm, uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_u8mf4x8_m(vbool32_t vm, uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_u8mf2x8_m(vbool16_t vm, uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_u8m1x8_m(vbool8_t vm, uint8_t *rs1, vuint32m4_t vs2, vuint8m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_u16mf4x8_m(vbool64_t vm, uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_u16mf2x8_m(vbool32_t vm, uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_u16m1x8_m(vbool16_t vm, uint16_t *rs1, vuint32m2_t vs2, vuint16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_u32mf2x8_m(vbool64_t vm, uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_u32m1x8_m(vbool32_t vm, uint32_t *rs1, vuint32m1_t vs2, vuint32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei32_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei32_v_u64m1x8_m(vbool64_t vm, uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei32(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg8ei32\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsoxseg8ei64.c b/auto-generated/gnu-overloaded-tests/vsoxseg8ei64.c index 15df5772b..49250ed1e 100644 --- a/auto-generated/gnu-overloaded-tests/vsoxseg8ei64.c +++ b/auto-generated/gnu-overloaded-tests/vsoxseg8ei64.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg8ei64_v_f16mf4x8(float16_t *base, vuint64m1_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_f16mf4x8(float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_f16mf2x8(float16_t *base, vuint64m2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_f16mf2x8(float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_f16m1x8(float16_t *base, vuint64m4_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_f16m1x8(float16_t *rs1, vuint64m4_t vs2, vfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_f32mf2x8(float32_t *base, vuint64m1_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_f32mf2x8(float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_f32m1x8(float32_t *base, vuint64m2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_f32m1x8(float32_t *rs1, vuint64m2_t vs2, vfloat32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_f64m1x8(float64_t *base, vuint64m1_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_f64m1x8(float64_t *rs1, vuint64m1_t vs2, vfloat64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_i8mf8x8(int8_t *base, vuint64m1_t bindex, vint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_i8mf8x8(int8_t *rs1, vuint64m1_t vs2, vint8mf8x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_i8mf4x8(int8_t *base, vuint64m2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_i8mf4x8(int8_t *rs1, vuint64m2_t vs2, vint8mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_i8mf2x8(int8_t *base, vuint64m4_t bindex, vint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_i8mf2x8(int8_t *rs1, vuint64m4_t vs2, vint8mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_i8m1x8(int8_t *base, vuint64m8_t bindex, vint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_i8m1x8(int8_t *rs1, vuint64m8_t vs2, vint8m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_i16mf4x8(int16_t *base, vuint64m1_t bindex, vint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_i16mf4x8(int16_t *rs1, vuint64m1_t vs2, vint16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_i16mf2x8(int16_t *base, vuint64m2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_i16mf2x8(int16_t *rs1, vuint64m2_t vs2, vint16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_i16m1x8(int16_t *base, vuint64m4_t bindex, vint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_i16m1x8(int16_t *rs1, vuint64m4_t vs2, vint16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_i32mf2x8(int32_t *base, vuint64m1_t bindex, vint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_i32mf2x8(int32_t *rs1, vuint64m1_t vs2, vint32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_i32m1x8(int32_t *base, vuint64m2_t bindex, vint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_i32m1x8(int32_t *rs1, vuint64m2_t vs2, vint32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_i64m1x8(int64_t *base, vuint64m1_t bindex, vint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_i64m1x8(int64_t *rs1, vuint64m1_t vs2, vint64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_u8mf8x8(uint8_t *base, vuint64m1_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_u8mf8x8(uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_u8mf4x8(uint8_t *base, vuint64m2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_u8mf4x8(uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_u8mf2x8(uint8_t *base, vuint64m4_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_u8mf2x8(uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_u8m1x8(uint8_t *base, vuint64m8_t bindex, vuint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_u8m1x8(uint8_t *rs1, vuint64m8_t vs2, vuint8m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_u16mf4x8(uint16_t *base, vuint64m1_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_u16mf4x8(uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_u16mf2x8(uint16_t *base, vuint64m2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_u16mf2x8(uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_u16m1x8(uint16_t *base, vuint64m4_t bindex, vuint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_u16m1x8(uint16_t *rs1, vuint64m4_t vs2, vuint16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_u32mf2x8(uint32_t *base, vuint64m1_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_u32mf2x8(uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_u32m1x8(uint32_t *base, vuint64m2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_u32m1x8(uint32_t *rs1, vuint64m2_t vs2, vuint32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_u64m1x8(uint64_t *base, vuint64m1_t bindex, vuint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_u64m1x8(uint64_t *rs1, vuint64m1_t vs2, vuint64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_f16mf4x8_m(vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_f16mf4x8_m(vbool64_t vm, float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_f16mf2x8_m(vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_f16mf2x8_m(vbool32_t vm, float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_f16m1x8_m(vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_f16m1x8_m(vbool16_t vm, float16_t *rs1, vuint64m4_t vs2, vfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_f32mf2x8_m(vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_f32mf2x8_m(vbool64_t vm, float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_f32m1x8_m(vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_f32m1x8_m(vbool32_t vm, float32_t *rs1, vuint64m2_t vs2, vfloat32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_f64m1x8_m(vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_f64m1x8_m(vbool64_t vm, float64_t *rs1, vuint64m1_t vs2, vfloat64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_i8mf8x8_m(vbool64_t vm, int8_t *rs1, vuint64m1_t vs2, vint8mf8x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_i8mf4x8_m(vbool32_t vm, int8_t *rs1, vuint64m2_t vs2, vint8mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_i8mf2x8_m(vbool16_t vm, int8_t *rs1, vuint64m4_t vs2, vint8mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_i8m1x8_m(vbool8_t vm, int8_t *rs1, vuint64m8_t vs2, vint8m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_i16mf4x8_m(vbool64_t vm, int16_t *rs1, vuint64m1_t vs2, vint16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_i16mf2x8_m(vbool32_t vm, int16_t *rs1, vuint64m2_t vs2, vint16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_i16m1x8_m(vbool16_t vm, int16_t *rs1, vuint64m4_t vs2, vint16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_i32mf2x8_m(vbool64_t vm, int32_t *rs1, vuint64m1_t vs2, vint32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_i32m1x8_m(vbool32_t vm, int32_t *rs1, vuint64m2_t vs2, vint32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_i64m1x8_m(vbool64_t vm, int64_t *rs1, vuint64m1_t vs2, vint64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_u8mf8x8_m(vbool64_t vm, uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_u8mf4x8_m(vbool32_t vm, uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_u8mf2x8_m(vbool16_t vm, uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_u8m1x8_m(vbool8_t vm, uint8_t *rs1, vuint64m8_t vs2, vuint8m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_u16mf4x8_m(vbool64_t vm, uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_u16mf2x8_m(vbool32_t vm, uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_u16m1x8_m(vbool16_t vm, uint16_t *rs1, vuint64m4_t vs2, vuint16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_u32mf2x8_m(vbool64_t vm, uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_u32m1x8_m(vbool32_t vm, uint32_t *rs1, vuint64m2_t vs2, vuint32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei64_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei64_v_u64m1x8_m(vbool64_t vm, uint64_t *rs1, vuint64m1_t vs2, vuint64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei64(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg8ei64\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsoxseg8ei8.c b/auto-generated/gnu-overloaded-tests/vsoxseg8ei8.c index 39f717449..c977e9d02 100644 --- a/auto-generated/gnu-overloaded-tests/vsoxseg8ei8.c +++ b/auto-generated/gnu-overloaded-tests/vsoxseg8ei8.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsoxseg8ei8_v_f16mf4x8(float16_t *base, vuint8mf8_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_f16mf4x8(float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_f16mf2x8(float16_t *base, vuint8mf4_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_f16mf2x8(float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_f16m1x8(float16_t *base, vuint8mf2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_f16m1x8(float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_f32mf2x8(float32_t *base, vuint8mf8_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_f32mf2x8(float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_f32m1x8(float32_t *base, vuint8mf4_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_f32m1x8(float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_f64m1x8(float64_t *base, vuint8mf8_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_f64m1x8(float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_i8mf8x8(int8_t *base, vuint8mf8_t bindex, vint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_i8mf8x8(int8_t *rs1, vuint8mf8_t vs2, vint8mf8x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_i8mf4x8(int8_t *base, vuint8mf4_t bindex, vint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_i8mf4x8(int8_t *rs1, vuint8mf4_t vs2, vint8mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_i8mf2x8(int8_t *base, vuint8mf2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_i8mf2x8(int8_t *rs1, vuint8mf2_t vs2, vint8mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_i8m1x8(int8_t *base, vuint8m1_t bindex, vint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_i8m1x8(int8_t *rs1, vuint8m1_t vs2, vint8m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_i16mf4x8(int16_t *base, vuint8mf8_t bindex, vint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_i16mf4x8(int16_t *rs1, vuint8mf8_t vs2, vint16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_i16mf2x8(int16_t *base, vuint8mf4_t bindex, vint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_i16mf2x8(int16_t *rs1, vuint8mf4_t vs2, vint16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_i16m1x8(int16_t *base, vuint8mf2_t bindex, vint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_i16m1x8(int16_t *rs1, vuint8mf2_t vs2, vint16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_i32mf2x8(int32_t *base, vuint8mf8_t bindex, vint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_i32mf2x8(int32_t *rs1, vuint8mf8_t vs2, vint32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_i32m1x8(int32_t *base, vuint8mf4_t bindex, vint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_i32m1x8(int32_t *rs1, vuint8mf4_t vs2, vint32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_i64m1x8(int64_t *base, vuint8mf8_t bindex, vint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_i64m1x8(int64_t *rs1, vuint8mf8_t vs2, vint64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_u8mf8x8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_u8mf8x8(uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_u8mf4x8(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_u8mf4x8(uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_u8mf2x8(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_u8mf2x8(uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_u8m1x8(uint8_t *base, vuint8m1_t bindex, vuint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_u8m1x8(uint8_t *rs1, vuint8m1_t vs2, vuint8m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_u16mf4x8(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_u16mf4x8(uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_u16mf2x8(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_u16mf2x8(uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_u16m1x8(uint16_t *base, vuint8mf2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_u16m1x8(uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_u32mf2x8(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_u32mf2x8(uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_u32m1x8(uint32_t *base, vuint8mf4_t bindex, vuint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_u32m1x8(uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_u64m1x8(uint64_t *base, vuint8mf8_t bindex, vuint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_u64m1x8(uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_f16mf4x8_m(vbool64_t mask, float16_t *base, vuint8mf8_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_f16mf4x8_m(vbool64_t vm, float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_f16mf2x8_m(vbool32_t mask, float16_t *base, vuint8mf4_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_f16mf2x8_m(vbool32_t vm, float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_f16m1x8_m(vbool16_t mask, float16_t *base, vuint8mf2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_f16m1x8_m(vbool16_t vm, float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_f32mf2x8_m(vbool64_t mask, float32_t *base, vuint8mf8_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_f32mf2x8_m(vbool64_t vm, float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_f32m1x8_m(vbool32_t mask, float32_t *base, vuint8mf4_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_f32m1x8_m(vbool32_t vm, float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_f64m1x8_m(vbool64_t mask, float64_t *base, vuint8mf8_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_f64m1x8_m(vbool64_t vm, float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_i8mf8x8_m(vbool64_t vm, int8_t *rs1, vuint8mf8_t vs2, vint8mf8x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_i8mf4x8_m(vbool32_t vm, int8_t *rs1, vuint8mf4_t vs2, vint8mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_i8mf2x8_m(vbool16_t vm, int8_t *rs1, vuint8mf2_t vs2, vint8mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_i8m1x8_m(vbool8_t vm, int8_t *rs1, vuint8m1_t vs2, vint8m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_i16mf4x8_m(vbool64_t vm, int16_t *rs1, vuint8mf8_t vs2, vint16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_i16mf2x8_m(vbool32_t vm, int16_t *rs1, vuint8mf4_t vs2, vint16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_i16m1x8_m(vbool16_t vm, int16_t *rs1, vuint8mf2_t vs2, vint16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_i32mf2x8_m(vbool64_t vm, int32_t *rs1, vuint8mf8_t vs2, vint32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_i32m1x8_m(vbool32_t vm, int32_t *rs1, vuint8mf4_t vs2, vint32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_i64m1x8_m(vbool64_t vm, int64_t *rs1, vuint8mf8_t vs2, vint64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_u8mf8x8_m(vbool64_t vm, uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_u8mf4x8_m(vbool32_t vm, uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_u8mf2x8_m(vbool16_t vm, uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_u8m1x8_m(vbool8_t vm, uint8_t *rs1, vuint8m1_t vs2, vuint8m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_u16mf4x8_m(vbool64_t vm, uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_u16mf2x8_m(vbool32_t vm, uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_u16m1x8_m(vbool16_t vm, uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_u32mf2x8_m(vbool64_t vm, uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_u32m1x8_m(vbool32_t vm, uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsoxseg8ei8_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsoxseg8ei8_v_u64m1x8_m(vbool64_t vm, uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei8(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsoxseg8ei8\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsra.c b/auto-generated/gnu-overloaded-tests/vsra.c index c924a322b..5b42f73b3 100644 --- a/auto-generated/gnu-overloaded-tests/vsra.c +++ b/auto-generated/gnu-overloaded-tests/vsra.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vsra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsra(op1, shift, vl); +vint8mf8_t test_vsra_vv_i8mf8(vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsra(vs2, vs1, vl); } -vint8mf8_t test_vsra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsra(op1, shift, vl); +vint8mf8_t test_vsra_vx_i8mf8(vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra(vs2, rs1, vl); } -vint8mf4_t test_vsra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsra(op1, shift, vl); +vint8mf4_t test_vsra_vv_i8mf4(vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsra(vs2, vs1, vl); } -vint8mf4_t test_vsra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsra(op1, shift, vl); +vint8mf4_t test_vsra_vx_i8mf4(vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra(vs2, rs1, vl); } -vint8mf2_t test_vsra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsra(op1, shift, vl); +vint8mf2_t test_vsra_vv_i8mf2(vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsra(vs2, vs1, vl); } -vint8mf2_t test_vsra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsra(op1, shift, vl); +vint8mf2_t test_vsra_vx_i8mf2(vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra(vs2, rs1, vl); } -vint8m1_t test_vsra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsra(op1, shift, vl); +vint8m1_t test_vsra_vv_i8m1(vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsra(vs2, vs1, vl); } -vint8m1_t test_vsra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra(op1, shift, vl); +vint8m1_t test_vsra_vx_i8m1(vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra(vs2, rs1, vl); } -vint8m2_t test_vsra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsra(op1, shift, vl); +vint8m2_t test_vsra_vv_i8m2(vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsra(vs2, vs1, vl); } -vint8m2_t test_vsra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra(op1, shift, vl); +vint8m2_t test_vsra_vx_i8m2(vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra(vs2, rs1, vl); } -vint8m4_t test_vsra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsra(op1, shift, vl); +vint8m4_t test_vsra_vv_i8m4(vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsra(vs2, vs1, vl); } -vint8m4_t test_vsra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra(op1, shift, vl); +vint8m4_t test_vsra_vx_i8m4(vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra(vs2, rs1, vl); } -vint8m8_t test_vsra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsra(op1, shift, vl); +vint8m8_t test_vsra_vv_i8m8(vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsra(vs2, vs1, vl); } -vint8m8_t test_vsra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra(op1, shift, vl); +vint8m8_t test_vsra_vx_i8m8(vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra(vs2, rs1, vl); } -vint16mf4_t test_vsra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsra(op1, shift, vl); +vint16mf4_t test_vsra_vv_i16mf4(vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsra(vs2, vs1, vl); } -vint16mf4_t test_vsra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsra(op1, shift, vl); +vint16mf4_t test_vsra_vx_i16mf4(vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra(vs2, rs1, vl); } -vint16mf2_t test_vsra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsra(op1, shift, vl); +vint16mf2_t test_vsra_vv_i16mf2(vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsra(vs2, vs1, vl); } -vint16mf2_t test_vsra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsra(op1, shift, vl); +vint16mf2_t test_vsra_vx_i16mf2(vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra(vs2, rs1, vl); } -vint16m1_t test_vsra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsra(op1, shift, vl); +vint16m1_t test_vsra_vv_i16m1(vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsra(vs2, vs1, vl); } -vint16m1_t test_vsra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra(op1, shift, vl); +vint16m1_t test_vsra_vx_i16m1(vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra(vs2, rs1, vl); } -vint16m2_t test_vsra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsra(op1, shift, vl); +vint16m2_t test_vsra_vv_i16m2(vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsra(vs2, vs1, vl); } -vint16m2_t test_vsra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra(op1, shift, vl); +vint16m2_t test_vsra_vx_i16m2(vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra(vs2, rs1, vl); } -vint16m4_t test_vsra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsra(op1, shift, vl); +vint16m4_t test_vsra_vv_i16m4(vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsra(vs2, vs1, vl); } -vint16m4_t test_vsra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra(op1, shift, vl); +vint16m4_t test_vsra_vx_i16m4(vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra(vs2, rs1, vl); } -vint16m8_t test_vsra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsra(op1, shift, vl); +vint16m8_t test_vsra_vv_i16m8(vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsra(vs2, vs1, vl); } -vint16m8_t test_vsra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra(op1, shift, vl); +vint16m8_t test_vsra_vx_i16m8(vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra(vs2, rs1, vl); } -vint32mf2_t test_vsra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsra(op1, shift, vl); +vint32mf2_t test_vsra_vv_i32mf2(vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsra(vs2, vs1, vl); } -vint32mf2_t test_vsra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsra(op1, shift, vl); +vint32mf2_t test_vsra_vx_i32mf2(vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra(vs2, rs1, vl); } -vint32m1_t test_vsra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsra(op1, shift, vl); +vint32m1_t test_vsra_vv_i32m1(vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsra(vs2, vs1, vl); } -vint32m1_t test_vsra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra(op1, shift, vl); +vint32m1_t test_vsra_vx_i32m1(vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra(vs2, rs1, vl); } -vint32m2_t test_vsra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsra(op1, shift, vl); +vint32m2_t test_vsra_vv_i32m2(vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsra(vs2, vs1, vl); } -vint32m2_t test_vsra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra(op1, shift, vl); +vint32m2_t test_vsra_vx_i32m2(vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra(vs2, rs1, vl); } -vint32m4_t test_vsra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsra(op1, shift, vl); +vint32m4_t test_vsra_vv_i32m4(vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsra(vs2, vs1, vl); } -vint32m4_t test_vsra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra(op1, shift, vl); +vint32m4_t test_vsra_vx_i32m4(vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra(vs2, rs1, vl); } -vint32m8_t test_vsra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsra(op1, shift, vl); +vint32m8_t test_vsra_vv_i32m8(vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsra(vs2, vs1, vl); } -vint32m8_t test_vsra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra(op1, shift, vl); +vint32m8_t test_vsra_vx_i32m8(vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra(vs2, rs1, vl); } -vint64m1_t test_vsra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsra(op1, shift, vl); +vint64m1_t test_vsra_vv_i64m1(vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsra(vs2, vs1, vl); } -vint64m1_t test_vsra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra(op1, shift, vl); +vint64m1_t test_vsra_vx_i64m1(vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra(vs2, rs1, vl); } -vint64m2_t test_vsra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsra(op1, shift, vl); +vint64m2_t test_vsra_vv_i64m2(vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsra(vs2, vs1, vl); } -vint64m2_t test_vsra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra(op1, shift, vl); +vint64m2_t test_vsra_vx_i64m2(vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra(vs2, rs1, vl); } -vint64m4_t test_vsra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsra(op1, shift, vl); +vint64m4_t test_vsra_vv_i64m4(vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsra(vs2, vs1, vl); } -vint64m4_t test_vsra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra(op1, shift, vl); +vint64m4_t test_vsra_vx_i64m4(vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra(vs2, rs1, vl); } -vint64m8_t test_vsra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsra(op1, shift, vl); +vint64m8_t test_vsra_vv_i64m8(vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsra(vs2, vs1, vl); } -vint64m8_t test_vsra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra(op1, shift, vl); +vint64m8_t test_vsra_vx_i64m8(vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra(vs2, rs1, vl); } -vint8mf8_t test_vsra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsra(mask, op1, shift, vl); +vint8mf8_t test_vsra_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsra(vm, vs2, vs1, vl); } -vint8mf8_t test_vsra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsra(mask, op1, shift, vl); +vint8mf8_t test_vsra_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra(vm, vs2, rs1, vl); } -vint8mf4_t test_vsra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsra(mask, op1, shift, vl); +vint8mf4_t test_vsra_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsra(vm, vs2, vs1, vl); } -vint8mf4_t test_vsra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsra(mask, op1, shift, vl); +vint8mf4_t test_vsra_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra(vm, vs2, rs1, vl); } -vint8mf2_t test_vsra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsra(mask, op1, shift, vl); +vint8mf2_t test_vsra_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsra(vm, vs2, vs1, vl); } -vint8mf2_t test_vsra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsra(mask, op1, shift, vl); +vint8mf2_t test_vsra_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra(vm, vs2, rs1, vl); } -vint8m1_t test_vsra_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsra(mask, op1, shift, vl); +vint8m1_t test_vsra_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsra(vm, vs2, vs1, vl); } -vint8m1_t test_vsra_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra(mask, op1, shift, vl); +vint8m1_t test_vsra_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra(vm, vs2, rs1, vl); } -vint8m2_t test_vsra_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsra(mask, op1, shift, vl); +vint8m2_t test_vsra_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsra(vm, vs2, vs1, vl); } -vint8m2_t test_vsra_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra(mask, op1, shift, vl); +vint8m2_t test_vsra_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra(vm, vs2, rs1, vl); } -vint8m4_t test_vsra_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsra(mask, op1, shift, vl); +vint8m4_t test_vsra_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsra(vm, vs2, vs1, vl); } -vint8m4_t test_vsra_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra(mask, op1, shift, vl); +vint8m4_t test_vsra_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra(vm, vs2, rs1, vl); } -vint8m8_t test_vsra_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsra(mask, op1, shift, vl); +vint8m8_t test_vsra_vv_i8m8_m(vbool1_t vm, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsra(vm, vs2, vs1, vl); } -vint8m8_t test_vsra_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra(mask, op1, shift, vl); +vint8m8_t test_vsra_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra(vm, vs2, rs1, vl); } -vint16mf4_t test_vsra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsra(mask, op1, shift, vl); +vint16mf4_t test_vsra_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsra(vm, vs2, vs1, vl); } -vint16mf4_t test_vsra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsra(mask, op1, shift, vl); +vint16mf4_t test_vsra_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra(vm, vs2, rs1, vl); } -vint16mf2_t test_vsra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsra(mask, op1, shift, vl); +vint16mf2_t test_vsra_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsra(vm, vs2, vs1, vl); } -vint16mf2_t test_vsra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsra(mask, op1, shift, vl); +vint16mf2_t test_vsra_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra(vm, vs2, rs1, vl); } -vint16m1_t test_vsra_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsra(mask, op1, shift, vl); +vint16m1_t test_vsra_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsra(vm, vs2, vs1, vl); } -vint16m1_t test_vsra_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra(mask, op1, shift, vl); +vint16m1_t test_vsra_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra(vm, vs2, rs1, vl); } -vint16m2_t test_vsra_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsra(mask, op1, shift, vl); +vint16m2_t test_vsra_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsra(vm, vs2, vs1, vl); } -vint16m2_t test_vsra_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra(mask, op1, shift, vl); +vint16m2_t test_vsra_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra(vm, vs2, rs1, vl); } -vint16m4_t test_vsra_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsra(mask, op1, shift, vl); +vint16m4_t test_vsra_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsra(vm, vs2, vs1, vl); } -vint16m4_t test_vsra_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra(mask, op1, shift, vl); +vint16m4_t test_vsra_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra(vm, vs2, rs1, vl); } -vint16m8_t test_vsra_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsra(mask, op1, shift, vl); +vint16m8_t test_vsra_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsra(vm, vs2, vs1, vl); } -vint16m8_t test_vsra_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra(mask, op1, shift, vl); +vint16m8_t test_vsra_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra(vm, vs2, rs1, vl); } -vint32mf2_t test_vsra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsra(mask, op1, shift, vl); +vint32mf2_t test_vsra_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsra(vm, vs2, vs1, vl); } -vint32mf2_t test_vsra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsra(mask, op1, shift, vl); +vint32mf2_t test_vsra_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra(vm, vs2, rs1, vl); } -vint32m1_t test_vsra_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsra(mask, op1, shift, vl); +vint32m1_t test_vsra_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsra(vm, vs2, vs1, vl); } -vint32m1_t test_vsra_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra(mask, op1, shift, vl); +vint32m1_t test_vsra_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra(vm, vs2, rs1, vl); } -vint32m2_t test_vsra_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsra(mask, op1, shift, vl); +vint32m2_t test_vsra_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsra(vm, vs2, vs1, vl); } -vint32m2_t test_vsra_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra(mask, op1, shift, vl); +vint32m2_t test_vsra_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra(vm, vs2, rs1, vl); } -vint32m4_t test_vsra_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsra(mask, op1, shift, vl); +vint32m4_t test_vsra_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsra(vm, vs2, vs1, vl); } -vint32m4_t test_vsra_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra(mask, op1, shift, vl); +vint32m4_t test_vsra_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra(vm, vs2, rs1, vl); } -vint32m8_t test_vsra_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsra(mask, op1, shift, vl); +vint32m8_t test_vsra_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsra(vm, vs2, vs1, vl); } -vint32m8_t test_vsra_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra(mask, op1, shift, vl); +vint32m8_t test_vsra_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra(vm, vs2, rs1, vl); } -vint64m1_t test_vsra_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsra(mask, op1, shift, vl); +vint64m1_t test_vsra_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsra(vm, vs2, vs1, vl); } -vint64m1_t test_vsra_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra(mask, op1, shift, vl); +vint64m1_t test_vsra_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra(vm, vs2, rs1, vl); } -vint64m2_t test_vsra_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsra(mask, op1, shift, vl); +vint64m2_t test_vsra_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsra(vm, vs2, vs1, vl); } -vint64m2_t test_vsra_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra(mask, op1, shift, vl); +vint64m2_t test_vsra_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra(vm, vs2, rs1, vl); } -vint64m4_t test_vsra_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsra(mask, op1, shift, vl); +vint64m4_t test_vsra_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsra(vm, vs2, vs1, vl); } -vint64m4_t test_vsra_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra(mask, op1, shift, vl); +vint64m4_t test_vsra_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra(vm, vs2, rs1, vl); } -vint64m8_t test_vsra_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsra(mask, op1, shift, vl); +vint64m8_t test_vsra_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsra(vm, vs2, vs1, vl); } -vint64m8_t test_vsra_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra(mask, op1, shift, vl); +vint64m8_t test_vsra_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsra\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsrl.c b/auto-generated/gnu-overloaded-tests/vsrl.c index 776f830ac..afc71b481 100644 --- a/auto-generated/gnu-overloaded-tests/vsrl.c +++ b/auto-generated/gnu-overloaded-tests/vsrl.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vsrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsrl(op1, shift, vl); +vuint8mf8_t test_vsrl_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsrl(vs2, vs1, vl); } -vuint8mf8_t test_vsrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl(op1, shift, vl); +vuint8mf8_t test_vsrl_vx_u8mf8(vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl(vs2, rs1, vl); } -vuint8mf4_t test_vsrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsrl(op1, shift, vl); +vuint8mf4_t test_vsrl_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsrl(vs2, vs1, vl); } -vuint8mf4_t test_vsrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl(op1, shift, vl); +vuint8mf4_t test_vsrl_vx_u8mf4(vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl(vs2, rs1, vl); } -vuint8mf2_t test_vsrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsrl(op1, shift, vl); +vuint8mf2_t test_vsrl_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsrl(vs2, vs1, vl); } -vuint8mf2_t test_vsrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl(op1, shift, vl); +vuint8mf2_t test_vsrl_vx_u8mf2(vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl(vs2, rs1, vl); } -vuint8m1_t test_vsrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsrl(op1, shift, vl); +vuint8m1_t test_vsrl_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsrl(vs2, vs1, vl); } -vuint8m1_t test_vsrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl(op1, shift, vl); +vuint8m1_t test_vsrl_vx_u8m1(vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl(vs2, rs1, vl); } -vuint8m2_t test_vsrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsrl(op1, shift, vl); +vuint8m2_t test_vsrl_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsrl(vs2, vs1, vl); } -vuint8m2_t test_vsrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl(op1, shift, vl); +vuint8m2_t test_vsrl_vx_u8m2(vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl(vs2, rs1, vl); } -vuint8m4_t test_vsrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsrl(op1, shift, vl); +vuint8m4_t test_vsrl_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsrl(vs2, vs1, vl); } -vuint8m4_t test_vsrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl(op1, shift, vl); +vuint8m4_t test_vsrl_vx_u8m4(vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl(vs2, rs1, vl); } -vuint8m8_t test_vsrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsrl(op1, shift, vl); +vuint8m8_t test_vsrl_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsrl(vs2, vs1, vl); } -vuint8m8_t test_vsrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl(op1, shift, vl); +vuint8m8_t test_vsrl_vx_u8m8(vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl(vs2, rs1, vl); } -vuint16mf4_t test_vsrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsrl(op1, shift, vl); +vuint16mf4_t test_vsrl_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsrl(vs2, vs1, vl); } -vuint16mf4_t test_vsrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl(op1, shift, vl); +vuint16mf4_t test_vsrl_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl(vs2, rs1, vl); } -vuint16mf2_t test_vsrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsrl(op1, shift, vl); +vuint16mf2_t test_vsrl_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsrl(vs2, vs1, vl); } -vuint16mf2_t test_vsrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl(op1, shift, vl); +vuint16mf2_t test_vsrl_vx_u16mf2(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl(vs2, rs1, vl); } -vuint16m1_t test_vsrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsrl(op1, shift, vl); +vuint16m1_t test_vsrl_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsrl(vs2, vs1, vl); } -vuint16m1_t test_vsrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl(op1, shift, vl); +vuint16m1_t test_vsrl_vx_u16m1(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl(vs2, rs1, vl); } -vuint16m2_t test_vsrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsrl(op1, shift, vl); +vuint16m2_t test_vsrl_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsrl(vs2, vs1, vl); } -vuint16m2_t test_vsrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl(op1, shift, vl); +vuint16m2_t test_vsrl_vx_u16m2(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl(vs2, rs1, vl); } -vuint16m4_t test_vsrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsrl(op1, shift, vl); +vuint16m4_t test_vsrl_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsrl(vs2, vs1, vl); } -vuint16m4_t test_vsrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl(op1, shift, vl); +vuint16m4_t test_vsrl_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl(vs2, rs1, vl); } -vuint16m8_t test_vsrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsrl(op1, shift, vl); +vuint16m8_t test_vsrl_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsrl(vs2, vs1, vl); } -vuint16m8_t test_vsrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl(op1, shift, vl); +vuint16m8_t test_vsrl_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl(vs2, rs1, vl); } -vuint32mf2_t test_vsrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsrl(op1, shift, vl); +vuint32mf2_t test_vsrl_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsrl(vs2, vs1, vl); } -vuint32mf2_t test_vsrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl(op1, shift, vl); +vuint32mf2_t test_vsrl_vx_u32mf2(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl(vs2, rs1, vl); } -vuint32m1_t test_vsrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsrl(op1, shift, vl); +vuint32m1_t test_vsrl_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsrl(vs2, vs1, vl); } -vuint32m1_t test_vsrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl(op1, shift, vl); +vuint32m1_t test_vsrl_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl(vs2, rs1, vl); } -vuint32m2_t test_vsrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsrl(op1, shift, vl); +vuint32m2_t test_vsrl_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsrl(vs2, vs1, vl); } -vuint32m2_t test_vsrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl(op1, shift, vl); +vuint32m2_t test_vsrl_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl(vs2, rs1, vl); } -vuint32m4_t test_vsrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsrl(op1, shift, vl); +vuint32m4_t test_vsrl_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsrl(vs2, vs1, vl); } -vuint32m4_t test_vsrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl(op1, shift, vl); +vuint32m4_t test_vsrl_vx_u32m4(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl(vs2, rs1, vl); } -vuint32m8_t test_vsrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsrl(op1, shift, vl); +vuint32m8_t test_vsrl_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsrl(vs2, vs1, vl); } -vuint32m8_t test_vsrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl(op1, shift, vl); +vuint32m8_t test_vsrl_vx_u32m8(vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl(vs2, rs1, vl); } -vuint64m1_t test_vsrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsrl(op1, shift, vl); +vuint64m1_t test_vsrl_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsrl(vs2, vs1, vl); } -vuint64m1_t test_vsrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl(op1, shift, vl); +vuint64m1_t test_vsrl_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl(vs2, rs1, vl); } -vuint64m2_t test_vsrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsrl(op1, shift, vl); +vuint64m2_t test_vsrl_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsrl(vs2, vs1, vl); } -vuint64m2_t test_vsrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl(op1, shift, vl); +vuint64m2_t test_vsrl_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl(vs2, rs1, vl); } -vuint64m4_t test_vsrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsrl(op1, shift, vl); +vuint64m4_t test_vsrl_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsrl(vs2, vs1, vl); } -vuint64m4_t test_vsrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl(op1, shift, vl); +vuint64m4_t test_vsrl_vx_u64m4(vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl(vs2, rs1, vl); } -vuint64m8_t test_vsrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsrl(op1, shift, vl); +vuint64m8_t test_vsrl_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsrl(vs2, vs1, vl); } -vuint64m8_t test_vsrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl(op1, shift, vl); +vuint64m8_t test_vsrl_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl(vs2, rs1, vl); } -vuint8mf8_t test_vsrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsrl(mask, op1, shift, vl); +vuint8mf8_t test_vsrl_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsrl(vm, vs2, vs1, vl); } -vuint8mf8_t test_vsrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl(mask, op1, shift, vl); +vuint8mf8_t test_vsrl_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl(vm, vs2, rs1, vl); } -vuint8mf4_t test_vsrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsrl(mask, op1, shift, vl); +vuint8mf4_t test_vsrl_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsrl(vm, vs2, vs1, vl); } -vuint8mf4_t test_vsrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl(mask, op1, shift, vl); +vuint8mf4_t test_vsrl_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl(vm, vs2, rs1, vl); } -vuint8mf2_t test_vsrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsrl(mask, op1, shift, vl); +vuint8mf2_t test_vsrl_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsrl(vm, vs2, vs1, vl); } -vuint8mf2_t test_vsrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl(mask, op1, shift, vl); +vuint8mf2_t test_vsrl_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl(vm, vs2, rs1, vl); } -vuint8m1_t test_vsrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsrl(mask, op1, shift, vl); +vuint8m1_t test_vsrl_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsrl(vm, vs2, vs1, vl); } -vuint8m1_t test_vsrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl(mask, op1, shift, vl); +vuint8m1_t test_vsrl_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl(vm, vs2, rs1, vl); } -vuint8m2_t test_vsrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsrl(mask, op1, shift, vl); +vuint8m2_t test_vsrl_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsrl(vm, vs2, vs1, vl); } -vuint8m2_t test_vsrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl(mask, op1, shift, vl); +vuint8m2_t test_vsrl_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl(vm, vs2, rs1, vl); } -vuint8m4_t test_vsrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsrl(mask, op1, shift, vl); +vuint8m4_t test_vsrl_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsrl(vm, vs2, vs1, vl); } -vuint8m4_t test_vsrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl(mask, op1, shift, vl); +vuint8m4_t test_vsrl_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl(vm, vs2, rs1, vl); } -vuint8m8_t test_vsrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsrl(mask, op1, shift, vl); +vuint8m8_t test_vsrl_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsrl(vm, vs2, vs1, vl); } -vuint8m8_t test_vsrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl(mask, op1, shift, vl); +vuint8m8_t test_vsrl_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl(vm, vs2, rs1, vl); } -vuint16mf4_t test_vsrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsrl(mask, op1, shift, vl); +vuint16mf4_t test_vsrl_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsrl(vm, vs2, vs1, vl); } -vuint16mf4_t test_vsrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl(mask, op1, shift, vl); +vuint16mf4_t test_vsrl_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl(vm, vs2, rs1, vl); } -vuint16mf2_t test_vsrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsrl(mask, op1, shift, vl); +vuint16mf2_t test_vsrl_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsrl(vm, vs2, vs1, vl); } -vuint16mf2_t test_vsrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl(mask, op1, shift, vl); +vuint16mf2_t test_vsrl_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl(vm, vs2, rs1, vl); } -vuint16m1_t test_vsrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsrl(mask, op1, shift, vl); +vuint16m1_t test_vsrl_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsrl(vm, vs2, vs1, vl); } -vuint16m1_t test_vsrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl(mask, op1, shift, vl); +vuint16m1_t test_vsrl_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl(vm, vs2, rs1, vl); } -vuint16m2_t test_vsrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsrl(mask, op1, shift, vl); +vuint16m2_t test_vsrl_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsrl(vm, vs2, vs1, vl); } -vuint16m2_t test_vsrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl(mask, op1, shift, vl); +vuint16m2_t test_vsrl_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl(vm, vs2, rs1, vl); } -vuint16m4_t test_vsrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsrl(mask, op1, shift, vl); +vuint16m4_t test_vsrl_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsrl(vm, vs2, vs1, vl); } -vuint16m4_t test_vsrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl(mask, op1, shift, vl); +vuint16m4_t test_vsrl_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl(vm, vs2, rs1, vl); } -vuint16m8_t test_vsrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsrl(mask, op1, shift, vl); +vuint16m8_t test_vsrl_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsrl(vm, vs2, vs1, vl); } -vuint16m8_t test_vsrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl(mask, op1, shift, vl); +vuint16m8_t test_vsrl_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl(vm, vs2, rs1, vl); } -vuint32mf2_t test_vsrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsrl(mask, op1, shift, vl); +vuint32mf2_t test_vsrl_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsrl(vm, vs2, vs1, vl); } -vuint32mf2_t test_vsrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl(mask, op1, shift, vl); +vuint32mf2_t test_vsrl_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl(vm, vs2, rs1, vl); } -vuint32m1_t test_vsrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsrl(mask, op1, shift, vl); +vuint32m1_t test_vsrl_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsrl(vm, vs2, vs1, vl); } -vuint32m1_t test_vsrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl(mask, op1, shift, vl); +vuint32m1_t test_vsrl_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl(vm, vs2, rs1, vl); } -vuint32m2_t test_vsrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsrl(mask, op1, shift, vl); +vuint32m2_t test_vsrl_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsrl(vm, vs2, vs1, vl); } -vuint32m2_t test_vsrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl(mask, op1, shift, vl); +vuint32m2_t test_vsrl_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl(vm, vs2, rs1, vl); } -vuint32m4_t test_vsrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsrl(mask, op1, shift, vl); +vuint32m4_t test_vsrl_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsrl(vm, vs2, vs1, vl); } -vuint32m4_t test_vsrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl(mask, op1, shift, vl); +vuint32m4_t test_vsrl_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl(vm, vs2, rs1, vl); } -vuint32m8_t test_vsrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsrl(mask, op1, shift, vl); +vuint32m8_t test_vsrl_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsrl(vm, vs2, vs1, vl); } -vuint32m8_t test_vsrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl(mask, op1, shift, vl); +vuint32m8_t test_vsrl_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl(vm, vs2, rs1, vl); } -vuint64m1_t test_vsrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsrl(mask, op1, shift, vl); +vuint64m1_t test_vsrl_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsrl(vm, vs2, vs1, vl); } -vuint64m1_t test_vsrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl(mask, op1, shift, vl); +vuint64m1_t test_vsrl_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl(vm, vs2, rs1, vl); } -vuint64m2_t test_vsrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsrl(mask, op1, shift, vl); +vuint64m2_t test_vsrl_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsrl(vm, vs2, vs1, vl); } -vuint64m2_t test_vsrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl(mask, op1, shift, vl); +vuint64m2_t test_vsrl_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl(vm, vs2, rs1, vl); } -vuint64m4_t test_vsrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsrl(mask, op1, shift, vl); +vuint64m4_t test_vsrl_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsrl(vm, vs2, vs1, vl); } -vuint64m4_t test_vsrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl(mask, op1, shift, vl); +vuint64m4_t test_vsrl_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl(vm, vs2, rs1, vl); } -vuint64m8_t test_vsrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsrl(mask, op1, shift, vl); +vuint64m8_t test_vsrl_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsrl(vm, vs2, vs1, vl); } -vuint64m8_t test_vsrl_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl(mask, op1, shift, vl); +vuint64m8_t test_vsrl_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsrl\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsse16.c b/auto-generated/gnu-overloaded-tests/vsse16.c index 0e8754e27..4e0f25cf2 100644 --- a/auto-generated/gnu-overloaded-tests/vsse16.c +++ b/auto-generated/gnu-overloaded-tests/vsse16.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsse16_v_f16mf4(float16_t *base, ptrdiff_t bstride, vfloat16mf4_t value, size_t vl) { - return __riscv_vsse16(base, bstride, value, vl); +void test_vsse16_v_f16mf4(float16_t *rs1, ptrdiff_t rs2, vfloat16mf4_t vs3, size_t vl) { + return __riscv_vsse16(rs1, rs2, vs3, vl); } -void test_vsse16_v_f16mf2(float16_t *base, ptrdiff_t bstride, vfloat16mf2_t value, size_t vl) { - return __riscv_vsse16(base, bstride, value, vl); +void test_vsse16_v_f16mf2(float16_t *rs1, ptrdiff_t rs2, vfloat16mf2_t vs3, size_t vl) { + return __riscv_vsse16(rs1, rs2, vs3, vl); } -void test_vsse16_v_f16m1(float16_t *base, ptrdiff_t bstride, vfloat16m1_t value, size_t vl) { - return __riscv_vsse16(base, bstride, value, vl); +void test_vsse16_v_f16m1(float16_t *rs1, ptrdiff_t rs2, vfloat16m1_t vs3, size_t vl) { + return __riscv_vsse16(rs1, rs2, vs3, vl); } -void test_vsse16_v_f16m2(float16_t *base, ptrdiff_t bstride, vfloat16m2_t value, size_t vl) { - return __riscv_vsse16(base, bstride, value, vl); +void test_vsse16_v_f16m2(float16_t *rs1, ptrdiff_t rs2, vfloat16m2_t vs3, size_t vl) { + return __riscv_vsse16(rs1, rs2, vs3, vl); } -void test_vsse16_v_f16m4(float16_t *base, ptrdiff_t bstride, vfloat16m4_t value, size_t vl) { - return __riscv_vsse16(base, bstride, value, vl); +void test_vsse16_v_f16m4(float16_t *rs1, ptrdiff_t rs2, vfloat16m4_t vs3, size_t vl) { + return __riscv_vsse16(rs1, rs2, vs3, vl); } -void test_vsse16_v_f16m8(float16_t *base, ptrdiff_t bstride, vfloat16m8_t value, size_t vl) { - return __riscv_vsse16(base, bstride, value, vl); +void test_vsse16_v_f16m8(float16_t *rs1, ptrdiff_t rs2, vfloat16m8_t vs3, size_t vl) { + return __riscv_vsse16(rs1, rs2, vs3, vl); } -void test_vsse16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t value, size_t vl) { - return __riscv_vsse16(base, bstride, value, vl); +void test_vsse16_v_i16mf4(int16_t *rs1, ptrdiff_t rs2, vint16mf4_t vs3, size_t vl) { + return __riscv_vsse16(rs1, rs2, vs3, vl); } -void test_vsse16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t value, size_t vl) { - return __riscv_vsse16(base, bstride, value, vl); +void test_vsse16_v_i16mf2(int16_t *rs1, ptrdiff_t rs2, vint16mf2_t vs3, size_t vl) { + return __riscv_vsse16(rs1, rs2, vs3, vl); } -void test_vsse16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t value, size_t vl) { - return __riscv_vsse16(base, bstride, value, vl); +void test_vsse16_v_i16m1(int16_t *rs1, ptrdiff_t rs2, vint16m1_t vs3, size_t vl) { + return __riscv_vsse16(rs1, rs2, vs3, vl); } -void test_vsse16_v_i16m2(int16_t *base, ptrdiff_t bstride, vint16m2_t value, size_t vl) { - return __riscv_vsse16(base, bstride, value, vl); +void test_vsse16_v_i16m2(int16_t *rs1, ptrdiff_t rs2, vint16m2_t vs3, size_t vl) { + return __riscv_vsse16(rs1, rs2, vs3, vl); } -void test_vsse16_v_i16m4(int16_t *base, ptrdiff_t bstride, vint16m4_t value, size_t vl) { - return __riscv_vsse16(base, bstride, value, vl); +void test_vsse16_v_i16m4(int16_t *rs1, ptrdiff_t rs2, vint16m4_t vs3, size_t vl) { + return __riscv_vsse16(rs1, rs2, vs3, vl); } -void test_vsse16_v_i16m8(int16_t *base, ptrdiff_t bstride, vint16m8_t value, size_t vl) { - return __riscv_vsse16(base, bstride, value, vl); +void test_vsse16_v_i16m8(int16_t *rs1, ptrdiff_t rs2, vint16m8_t vs3, size_t vl) { + return __riscv_vsse16(rs1, rs2, vs3, vl); } -void test_vsse16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t value, size_t vl) { - return __riscv_vsse16(base, bstride, value, vl); +void test_vsse16_v_u16mf4(uint16_t *rs1, ptrdiff_t rs2, vuint16mf4_t vs3, size_t vl) { + return __riscv_vsse16(rs1, rs2, vs3, vl); } -void test_vsse16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t value, size_t vl) { - return __riscv_vsse16(base, bstride, value, vl); +void test_vsse16_v_u16mf2(uint16_t *rs1, ptrdiff_t rs2, vuint16mf2_t vs3, size_t vl) { + return __riscv_vsse16(rs1, rs2, vs3, vl); } -void test_vsse16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t value, size_t vl) { - return __riscv_vsse16(base, bstride, value, vl); +void test_vsse16_v_u16m1(uint16_t *rs1, ptrdiff_t rs2, vuint16m1_t vs3, size_t vl) { + return __riscv_vsse16(rs1, rs2, vs3, vl); } -void test_vsse16_v_u16m2(uint16_t *base, ptrdiff_t bstride, vuint16m2_t value, size_t vl) { - return __riscv_vsse16(base, bstride, value, vl); +void test_vsse16_v_u16m2(uint16_t *rs1, ptrdiff_t rs2, vuint16m2_t vs3, size_t vl) { + return __riscv_vsse16(rs1, rs2, vs3, vl); } -void test_vsse16_v_u16m4(uint16_t *base, ptrdiff_t bstride, vuint16m4_t value, size_t vl) { - return __riscv_vsse16(base, bstride, value, vl); +void test_vsse16_v_u16m4(uint16_t *rs1, ptrdiff_t rs2, vuint16m4_t vs3, size_t vl) { + return __riscv_vsse16(rs1, rs2, vs3, vl); } -void test_vsse16_v_u16m8(uint16_t *base, ptrdiff_t bstride, vuint16m8_t value, size_t vl) { - return __riscv_vsse16(base, bstride, value, vl); +void test_vsse16_v_u16m8(uint16_t *rs1, ptrdiff_t rs2, vuint16m8_t vs3, size_t vl) { + return __riscv_vsse16(rs1, rs2, vs3, vl); } -void test_vsse16_v_f16mf4_m(vbool64_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf4_t value, size_t vl) { - return __riscv_vsse16(mask, base, bstride, value, vl); +void test_vsse16_v_f16mf4_m(vbool64_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16mf4_t vs3, size_t vl) { + return __riscv_vsse16(vm, rs1, rs2, vs3, vl); } -void test_vsse16_v_f16mf2_m(vbool32_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf2_t value, size_t vl) { - return __riscv_vsse16(mask, base, bstride, value, vl); +void test_vsse16_v_f16mf2_m(vbool32_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16mf2_t vs3, size_t vl) { + return __riscv_vsse16(vm, rs1, rs2, vs3, vl); } -void test_vsse16_v_f16m1_m(vbool16_t mask, float16_t *base, ptrdiff_t bstride, vfloat16m1_t value, size_t vl) { - return __riscv_vsse16(mask, base, bstride, value, vl); +void test_vsse16_v_f16m1_m(vbool16_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16m1_t vs3, size_t vl) { + return __riscv_vsse16(vm, rs1, rs2, vs3, vl); } -void test_vsse16_v_f16m2_m(vbool8_t mask, float16_t *base, ptrdiff_t bstride, vfloat16m2_t value, size_t vl) { - return __riscv_vsse16(mask, base, bstride, value, vl); +void test_vsse16_v_f16m2_m(vbool8_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16m2_t vs3, size_t vl) { + return __riscv_vsse16(vm, rs1, rs2, vs3, vl); } -void test_vsse16_v_f16m4_m(vbool4_t mask, float16_t *base, ptrdiff_t bstride, vfloat16m4_t value, size_t vl) { - return __riscv_vsse16(mask, base, bstride, value, vl); +void test_vsse16_v_f16m4_m(vbool4_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16m4_t vs3, size_t vl) { + return __riscv_vsse16(vm, rs1, rs2, vs3, vl); } -void test_vsse16_v_f16m8_m(vbool2_t mask, float16_t *base, ptrdiff_t bstride, vfloat16m8_t value, size_t vl) { - return __riscv_vsse16(mask, base, bstride, value, vl); +void test_vsse16_v_f16m8_m(vbool2_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16m8_t vs3, size_t vl) { + return __riscv_vsse16(vm, rs1, rs2, vs3, vl); } -void test_vsse16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t value, size_t vl) { - return __riscv_vsse16(mask, base, bstride, value, vl); +void test_vsse16_v_i16mf4_m(vbool64_t vm, int16_t *rs1, ptrdiff_t rs2, vint16mf4_t vs3, size_t vl) { + return __riscv_vsse16(vm, rs1, rs2, vs3, vl); } -void test_vsse16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t value, size_t vl) { - return __riscv_vsse16(mask, base, bstride, value, vl); +void test_vsse16_v_i16mf2_m(vbool32_t vm, int16_t *rs1, ptrdiff_t rs2, vint16mf2_t vs3, size_t vl) { + return __riscv_vsse16(vm, rs1, rs2, vs3, vl); } -void test_vsse16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t value, size_t vl) { - return __riscv_vsse16(mask, base, bstride, value, vl); +void test_vsse16_v_i16m1_m(vbool16_t vm, int16_t *rs1, ptrdiff_t rs2, vint16m1_t vs3, size_t vl) { + return __riscv_vsse16(vm, rs1, rs2, vs3, vl); } -void test_vsse16_v_i16m2_m(vbool8_t mask, int16_t *base, ptrdiff_t bstride, vint16m2_t value, size_t vl) { - return __riscv_vsse16(mask, base, bstride, value, vl); +void test_vsse16_v_i16m2_m(vbool8_t vm, int16_t *rs1, ptrdiff_t rs2, vint16m2_t vs3, size_t vl) { + return __riscv_vsse16(vm, rs1, rs2, vs3, vl); } -void test_vsse16_v_i16m4_m(vbool4_t mask, int16_t *base, ptrdiff_t bstride, vint16m4_t value, size_t vl) { - return __riscv_vsse16(mask, base, bstride, value, vl); +void test_vsse16_v_i16m4_m(vbool4_t vm, int16_t *rs1, ptrdiff_t rs2, vint16m4_t vs3, size_t vl) { + return __riscv_vsse16(vm, rs1, rs2, vs3, vl); } -void test_vsse16_v_i16m8_m(vbool2_t mask, int16_t *base, ptrdiff_t bstride, vint16m8_t value, size_t vl) { - return __riscv_vsse16(mask, base, bstride, value, vl); +void test_vsse16_v_i16m8_m(vbool2_t vm, int16_t *rs1, ptrdiff_t rs2, vint16m8_t vs3, size_t vl) { + return __riscv_vsse16(vm, rs1, rs2, vs3, vl); } -void test_vsse16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t value, size_t vl) { - return __riscv_vsse16(mask, base, bstride, value, vl); +void test_vsse16_v_u16mf4_m(vbool64_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16mf4_t vs3, size_t vl) { + return __riscv_vsse16(vm, rs1, rs2, vs3, vl); } -void test_vsse16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t value, size_t vl) { - return __riscv_vsse16(mask, base, bstride, value, vl); +void test_vsse16_v_u16mf2_m(vbool32_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16mf2_t vs3, size_t vl) { + return __riscv_vsse16(vm, rs1, rs2, vs3, vl); } -void test_vsse16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t value, size_t vl) { - return __riscv_vsse16(mask, base, bstride, value, vl); +void test_vsse16_v_u16m1_m(vbool16_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16m1_t vs3, size_t vl) { + return __riscv_vsse16(vm, rs1, rs2, vs3, vl); } -void test_vsse16_v_u16m2_m(vbool8_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m2_t value, size_t vl) { - return __riscv_vsse16(mask, base, bstride, value, vl); +void test_vsse16_v_u16m2_m(vbool8_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16m2_t vs3, size_t vl) { + return __riscv_vsse16(vm, rs1, rs2, vs3, vl); } -void test_vsse16_v_u16m4_m(vbool4_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m4_t value, size_t vl) { - return __riscv_vsse16(mask, base, bstride, value, vl); +void test_vsse16_v_u16m4_m(vbool4_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16m4_t vs3, size_t vl) { + return __riscv_vsse16(vm, rs1, rs2, vs3, vl); } -void test_vsse16_v_u16m8_m(vbool2_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m8_t value, size_t vl) { - return __riscv_vsse16(mask, base, bstride, value, vl); +void test_vsse16_v_u16m8_m(vbool2_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16m8_t vs3, size_t vl) { + return __riscv_vsse16(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsse16\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsse32.c b/auto-generated/gnu-overloaded-tests/vsse32.c index 7a6573509..b9a6d5456 100644 --- a/auto-generated/gnu-overloaded-tests/vsse32.c +++ b/auto-generated/gnu-overloaded-tests/vsse32.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsse32_v_f32mf2(float32_t *base, ptrdiff_t bstride, vfloat32mf2_t value, size_t vl) { - return __riscv_vsse32(base, bstride, value, vl); +void test_vsse32_v_f32mf2(float32_t *rs1, ptrdiff_t rs2, vfloat32mf2_t vs3, size_t vl) { + return __riscv_vsse32(rs1, rs2, vs3, vl); } -void test_vsse32_v_f32m1(float32_t *base, ptrdiff_t bstride, vfloat32m1_t value, size_t vl) { - return __riscv_vsse32(base, bstride, value, vl); +void test_vsse32_v_f32m1(float32_t *rs1, ptrdiff_t rs2, vfloat32m1_t vs3, size_t vl) { + return __riscv_vsse32(rs1, rs2, vs3, vl); } -void test_vsse32_v_f32m2(float32_t *base, ptrdiff_t bstride, vfloat32m2_t value, size_t vl) { - return __riscv_vsse32(base, bstride, value, vl); +void test_vsse32_v_f32m2(float32_t *rs1, ptrdiff_t rs2, vfloat32m2_t vs3, size_t vl) { + return __riscv_vsse32(rs1, rs2, vs3, vl); } -void test_vsse32_v_f32m4(float32_t *base, ptrdiff_t bstride, vfloat32m4_t value, size_t vl) { - return __riscv_vsse32(base, bstride, value, vl); +void test_vsse32_v_f32m4(float32_t *rs1, ptrdiff_t rs2, vfloat32m4_t vs3, size_t vl) { + return __riscv_vsse32(rs1, rs2, vs3, vl); } -void test_vsse32_v_f32m8(float32_t *base, ptrdiff_t bstride, vfloat32m8_t value, size_t vl) { - return __riscv_vsse32(base, bstride, value, vl); +void test_vsse32_v_f32m8(float32_t *rs1, ptrdiff_t rs2, vfloat32m8_t vs3, size_t vl) { + return __riscv_vsse32(rs1, rs2, vs3, vl); } -void test_vsse32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t value, size_t vl) { - return __riscv_vsse32(base, bstride, value, vl); +void test_vsse32_v_i32mf2(int32_t *rs1, ptrdiff_t rs2, vint32mf2_t vs3, size_t vl) { + return __riscv_vsse32(rs1, rs2, vs3, vl); } -void test_vsse32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t value, size_t vl) { - return __riscv_vsse32(base, bstride, value, vl); +void test_vsse32_v_i32m1(int32_t *rs1, ptrdiff_t rs2, vint32m1_t vs3, size_t vl) { + return __riscv_vsse32(rs1, rs2, vs3, vl); } -void test_vsse32_v_i32m2(int32_t *base, ptrdiff_t bstride, vint32m2_t value, size_t vl) { - return __riscv_vsse32(base, bstride, value, vl); +void test_vsse32_v_i32m2(int32_t *rs1, ptrdiff_t rs2, vint32m2_t vs3, size_t vl) { + return __riscv_vsse32(rs1, rs2, vs3, vl); } -void test_vsse32_v_i32m4(int32_t *base, ptrdiff_t bstride, vint32m4_t value, size_t vl) { - return __riscv_vsse32(base, bstride, value, vl); +void test_vsse32_v_i32m4(int32_t *rs1, ptrdiff_t rs2, vint32m4_t vs3, size_t vl) { + return __riscv_vsse32(rs1, rs2, vs3, vl); } -void test_vsse32_v_i32m8(int32_t *base, ptrdiff_t bstride, vint32m8_t value, size_t vl) { - return __riscv_vsse32(base, bstride, value, vl); +void test_vsse32_v_i32m8(int32_t *rs1, ptrdiff_t rs2, vint32m8_t vs3, size_t vl) { + return __riscv_vsse32(rs1, rs2, vs3, vl); } -void test_vsse32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t value, size_t vl) { - return __riscv_vsse32(base, bstride, value, vl); +void test_vsse32_v_u32mf2(uint32_t *rs1, ptrdiff_t rs2, vuint32mf2_t vs3, size_t vl) { + return __riscv_vsse32(rs1, rs2, vs3, vl); } -void test_vsse32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t value, size_t vl) { - return __riscv_vsse32(base, bstride, value, vl); +void test_vsse32_v_u32m1(uint32_t *rs1, ptrdiff_t rs2, vuint32m1_t vs3, size_t vl) { + return __riscv_vsse32(rs1, rs2, vs3, vl); } -void test_vsse32_v_u32m2(uint32_t *base, ptrdiff_t bstride, vuint32m2_t value, size_t vl) { - return __riscv_vsse32(base, bstride, value, vl); +void test_vsse32_v_u32m2(uint32_t *rs1, ptrdiff_t rs2, vuint32m2_t vs3, size_t vl) { + return __riscv_vsse32(rs1, rs2, vs3, vl); } -void test_vsse32_v_u32m4(uint32_t *base, ptrdiff_t bstride, vuint32m4_t value, size_t vl) { - return __riscv_vsse32(base, bstride, value, vl); +void test_vsse32_v_u32m4(uint32_t *rs1, ptrdiff_t rs2, vuint32m4_t vs3, size_t vl) { + return __riscv_vsse32(rs1, rs2, vs3, vl); } -void test_vsse32_v_u32m8(uint32_t *base, ptrdiff_t bstride, vuint32m8_t value, size_t vl) { - return __riscv_vsse32(base, bstride, value, vl); +void test_vsse32_v_u32m8(uint32_t *rs1, ptrdiff_t rs2, vuint32m8_t vs3, size_t vl) { + return __riscv_vsse32(rs1, rs2, vs3, vl); } -void test_vsse32_v_f32mf2_m(vbool64_t mask, float32_t *base, ptrdiff_t bstride, vfloat32mf2_t value, size_t vl) { - return __riscv_vsse32(mask, base, bstride, value, vl); +void test_vsse32_v_f32mf2_m(vbool64_t vm, float32_t *rs1, ptrdiff_t rs2, vfloat32mf2_t vs3, size_t vl) { + return __riscv_vsse32(vm, rs1, rs2, vs3, vl); } -void test_vsse32_v_f32m1_m(vbool32_t mask, float32_t *base, ptrdiff_t bstride, vfloat32m1_t value, size_t vl) { - return __riscv_vsse32(mask, base, bstride, value, vl); +void test_vsse32_v_f32m1_m(vbool32_t vm, float32_t *rs1, ptrdiff_t rs2, vfloat32m1_t vs3, size_t vl) { + return __riscv_vsse32(vm, rs1, rs2, vs3, vl); } -void test_vsse32_v_f32m2_m(vbool16_t mask, float32_t *base, ptrdiff_t bstride, vfloat32m2_t value, size_t vl) { - return __riscv_vsse32(mask, base, bstride, value, vl); +void test_vsse32_v_f32m2_m(vbool16_t vm, float32_t *rs1, ptrdiff_t rs2, vfloat32m2_t vs3, size_t vl) { + return __riscv_vsse32(vm, rs1, rs2, vs3, vl); } -void test_vsse32_v_f32m4_m(vbool8_t mask, float32_t *base, ptrdiff_t bstride, vfloat32m4_t value, size_t vl) { - return __riscv_vsse32(mask, base, bstride, value, vl); +void test_vsse32_v_f32m4_m(vbool8_t vm, float32_t *rs1, ptrdiff_t rs2, vfloat32m4_t vs3, size_t vl) { + return __riscv_vsse32(vm, rs1, rs2, vs3, vl); } -void test_vsse32_v_f32m8_m(vbool4_t mask, float32_t *base, ptrdiff_t bstride, vfloat32m8_t value, size_t vl) { - return __riscv_vsse32(mask, base, bstride, value, vl); +void test_vsse32_v_f32m8_m(vbool4_t vm, float32_t *rs1, ptrdiff_t rs2, vfloat32m8_t vs3, size_t vl) { + return __riscv_vsse32(vm, rs1, rs2, vs3, vl); } -void test_vsse32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t value, size_t vl) { - return __riscv_vsse32(mask, base, bstride, value, vl); +void test_vsse32_v_i32mf2_m(vbool64_t vm, int32_t *rs1, ptrdiff_t rs2, vint32mf2_t vs3, size_t vl) { + return __riscv_vsse32(vm, rs1, rs2, vs3, vl); } -void test_vsse32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t value, size_t vl) { - return __riscv_vsse32(mask, base, bstride, value, vl); +void test_vsse32_v_i32m1_m(vbool32_t vm, int32_t *rs1, ptrdiff_t rs2, vint32m1_t vs3, size_t vl) { + return __riscv_vsse32(vm, rs1, rs2, vs3, vl); } -void test_vsse32_v_i32m2_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride, vint32m2_t value, size_t vl) { - return __riscv_vsse32(mask, base, bstride, value, vl); +void test_vsse32_v_i32m2_m(vbool16_t vm, int32_t *rs1, ptrdiff_t rs2, vint32m2_t vs3, size_t vl) { + return __riscv_vsse32(vm, rs1, rs2, vs3, vl); } -void test_vsse32_v_i32m4_m(vbool8_t mask, int32_t *base, ptrdiff_t bstride, vint32m4_t value, size_t vl) { - return __riscv_vsse32(mask, base, bstride, value, vl); +void test_vsse32_v_i32m4_m(vbool8_t vm, int32_t *rs1, ptrdiff_t rs2, vint32m4_t vs3, size_t vl) { + return __riscv_vsse32(vm, rs1, rs2, vs3, vl); } -void test_vsse32_v_i32m8_m(vbool4_t mask, int32_t *base, ptrdiff_t bstride, vint32m8_t value, size_t vl) { - return __riscv_vsse32(mask, base, bstride, value, vl); +void test_vsse32_v_i32m8_m(vbool4_t vm, int32_t *rs1, ptrdiff_t rs2, vint32m8_t vs3, size_t vl) { + return __riscv_vsse32(vm, rs1, rs2, vs3, vl); } -void test_vsse32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t value, size_t vl) { - return __riscv_vsse32(mask, base, bstride, value, vl); +void test_vsse32_v_u32mf2_m(vbool64_t vm, uint32_t *rs1, ptrdiff_t rs2, vuint32mf2_t vs3, size_t vl) { + return __riscv_vsse32(vm, rs1, rs2, vs3, vl); } -void test_vsse32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t value, size_t vl) { - return __riscv_vsse32(mask, base, bstride, value, vl); +void test_vsse32_v_u32m1_m(vbool32_t vm, uint32_t *rs1, ptrdiff_t rs2, vuint32m1_t vs3, size_t vl) { + return __riscv_vsse32(vm, rs1, rs2, vs3, vl); } -void test_vsse32_v_u32m2_m(vbool16_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m2_t value, size_t vl) { - return __riscv_vsse32(mask, base, bstride, value, vl); +void test_vsse32_v_u32m2_m(vbool16_t vm, uint32_t *rs1, ptrdiff_t rs2, vuint32m2_t vs3, size_t vl) { + return __riscv_vsse32(vm, rs1, rs2, vs3, vl); } -void test_vsse32_v_u32m4_m(vbool8_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m4_t value, size_t vl) { - return __riscv_vsse32(mask, base, bstride, value, vl); +void test_vsse32_v_u32m4_m(vbool8_t vm, uint32_t *rs1, ptrdiff_t rs2, vuint32m4_t vs3, size_t vl) { + return __riscv_vsse32(vm, rs1, rs2, vs3, vl); } -void test_vsse32_v_u32m8_m(vbool4_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m8_t value, size_t vl) { - return __riscv_vsse32(mask, base, bstride, value, vl); +void test_vsse32_v_u32m8_m(vbool4_t vm, uint32_t *rs1, ptrdiff_t rs2, vuint32m8_t vs3, size_t vl) { + return __riscv_vsse32(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsse32\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsse64.c b/auto-generated/gnu-overloaded-tests/vsse64.c index f72d9d05a..5d97cf4ab 100644 --- a/auto-generated/gnu-overloaded-tests/vsse64.c +++ b/auto-generated/gnu-overloaded-tests/vsse64.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsse64_v_f64m1(float64_t *base, ptrdiff_t bstride, vfloat64m1_t value, size_t vl) { - return __riscv_vsse64(base, bstride, value, vl); +void test_vsse64_v_f64m1(float64_t *rs1, ptrdiff_t rs2, vfloat64m1_t vs3, size_t vl) { + return __riscv_vsse64(rs1, rs2, vs3, vl); } -void test_vsse64_v_f64m2(float64_t *base, ptrdiff_t bstride, vfloat64m2_t value, size_t vl) { - return __riscv_vsse64(base, bstride, value, vl); +void test_vsse64_v_f64m2(float64_t *rs1, ptrdiff_t rs2, vfloat64m2_t vs3, size_t vl) { + return __riscv_vsse64(rs1, rs2, vs3, vl); } -void test_vsse64_v_f64m4(float64_t *base, ptrdiff_t bstride, vfloat64m4_t value, size_t vl) { - return __riscv_vsse64(base, bstride, value, vl); +void test_vsse64_v_f64m4(float64_t *rs1, ptrdiff_t rs2, vfloat64m4_t vs3, size_t vl) { + return __riscv_vsse64(rs1, rs2, vs3, vl); } -void test_vsse64_v_f64m8(float64_t *base, ptrdiff_t bstride, vfloat64m8_t value, size_t vl) { - return __riscv_vsse64(base, bstride, value, vl); +void test_vsse64_v_f64m8(float64_t *rs1, ptrdiff_t rs2, vfloat64m8_t vs3, size_t vl) { + return __riscv_vsse64(rs1, rs2, vs3, vl); } -void test_vsse64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t value, size_t vl) { - return __riscv_vsse64(base, bstride, value, vl); +void test_vsse64_v_i64m1(int64_t *rs1, ptrdiff_t rs2, vint64m1_t vs3, size_t vl) { + return __riscv_vsse64(rs1, rs2, vs3, vl); } -void test_vsse64_v_i64m2(int64_t *base, ptrdiff_t bstride, vint64m2_t value, size_t vl) { - return __riscv_vsse64(base, bstride, value, vl); +void test_vsse64_v_i64m2(int64_t *rs1, ptrdiff_t rs2, vint64m2_t vs3, size_t vl) { + return __riscv_vsse64(rs1, rs2, vs3, vl); } -void test_vsse64_v_i64m4(int64_t *base, ptrdiff_t bstride, vint64m4_t value, size_t vl) { - return __riscv_vsse64(base, bstride, value, vl); +void test_vsse64_v_i64m4(int64_t *rs1, ptrdiff_t rs2, vint64m4_t vs3, size_t vl) { + return __riscv_vsse64(rs1, rs2, vs3, vl); } -void test_vsse64_v_i64m8(int64_t *base, ptrdiff_t bstride, vint64m8_t value, size_t vl) { - return __riscv_vsse64(base, bstride, value, vl); +void test_vsse64_v_i64m8(int64_t *rs1, ptrdiff_t rs2, vint64m8_t vs3, size_t vl) { + return __riscv_vsse64(rs1, rs2, vs3, vl); } -void test_vsse64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t value, size_t vl) { - return __riscv_vsse64(base, bstride, value, vl); +void test_vsse64_v_u64m1(uint64_t *rs1, ptrdiff_t rs2, vuint64m1_t vs3, size_t vl) { + return __riscv_vsse64(rs1, rs2, vs3, vl); } -void test_vsse64_v_u64m2(uint64_t *base, ptrdiff_t bstride, vuint64m2_t value, size_t vl) { - return __riscv_vsse64(base, bstride, value, vl); +void test_vsse64_v_u64m2(uint64_t *rs1, ptrdiff_t rs2, vuint64m2_t vs3, size_t vl) { + return __riscv_vsse64(rs1, rs2, vs3, vl); } -void test_vsse64_v_u64m4(uint64_t *base, ptrdiff_t bstride, vuint64m4_t value, size_t vl) { - return __riscv_vsse64(base, bstride, value, vl); +void test_vsse64_v_u64m4(uint64_t *rs1, ptrdiff_t rs2, vuint64m4_t vs3, size_t vl) { + return __riscv_vsse64(rs1, rs2, vs3, vl); } -void test_vsse64_v_u64m8(uint64_t *base, ptrdiff_t bstride, vuint64m8_t value, size_t vl) { - return __riscv_vsse64(base, bstride, value, vl); +void test_vsse64_v_u64m8(uint64_t *rs1, ptrdiff_t rs2, vuint64m8_t vs3, size_t vl) { + return __riscv_vsse64(rs1, rs2, vs3, vl); } -void test_vsse64_v_f64m1_m(vbool64_t mask, float64_t *base, ptrdiff_t bstride, vfloat64m1_t value, size_t vl) { - return __riscv_vsse64(mask, base, bstride, value, vl); +void test_vsse64_v_f64m1_m(vbool64_t vm, float64_t *rs1, ptrdiff_t rs2, vfloat64m1_t vs3, size_t vl) { + return __riscv_vsse64(vm, rs1, rs2, vs3, vl); } -void test_vsse64_v_f64m2_m(vbool32_t mask, float64_t *base, ptrdiff_t bstride, vfloat64m2_t value, size_t vl) { - return __riscv_vsse64(mask, base, bstride, value, vl); +void test_vsse64_v_f64m2_m(vbool32_t vm, float64_t *rs1, ptrdiff_t rs2, vfloat64m2_t vs3, size_t vl) { + return __riscv_vsse64(vm, rs1, rs2, vs3, vl); } -void test_vsse64_v_f64m4_m(vbool16_t mask, float64_t *base, ptrdiff_t bstride, vfloat64m4_t value, size_t vl) { - return __riscv_vsse64(mask, base, bstride, value, vl); +void test_vsse64_v_f64m4_m(vbool16_t vm, float64_t *rs1, ptrdiff_t rs2, vfloat64m4_t vs3, size_t vl) { + return __riscv_vsse64(vm, rs1, rs2, vs3, vl); } -void test_vsse64_v_f64m8_m(vbool8_t mask, float64_t *base, ptrdiff_t bstride, vfloat64m8_t value, size_t vl) { - return __riscv_vsse64(mask, base, bstride, value, vl); +void test_vsse64_v_f64m8_m(vbool8_t vm, float64_t *rs1, ptrdiff_t rs2, vfloat64m8_t vs3, size_t vl) { + return __riscv_vsse64(vm, rs1, rs2, vs3, vl); } -void test_vsse64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t value, size_t vl) { - return __riscv_vsse64(mask, base, bstride, value, vl); +void test_vsse64_v_i64m1_m(vbool64_t vm, int64_t *rs1, ptrdiff_t rs2, vint64m1_t vs3, size_t vl) { + return __riscv_vsse64(vm, rs1, rs2, vs3, vl); } -void test_vsse64_v_i64m2_m(vbool32_t mask, int64_t *base, ptrdiff_t bstride, vint64m2_t value, size_t vl) { - return __riscv_vsse64(mask, base, bstride, value, vl); +void test_vsse64_v_i64m2_m(vbool32_t vm, int64_t *rs1, ptrdiff_t rs2, vint64m2_t vs3, size_t vl) { + return __riscv_vsse64(vm, rs1, rs2, vs3, vl); } -void test_vsse64_v_i64m4_m(vbool16_t mask, int64_t *base, ptrdiff_t bstride, vint64m4_t value, size_t vl) { - return __riscv_vsse64(mask, base, bstride, value, vl); +void test_vsse64_v_i64m4_m(vbool16_t vm, int64_t *rs1, ptrdiff_t rs2, vint64m4_t vs3, size_t vl) { + return __riscv_vsse64(vm, rs1, rs2, vs3, vl); } -void test_vsse64_v_i64m8_m(vbool8_t mask, int64_t *base, ptrdiff_t bstride, vint64m8_t value, size_t vl) { - return __riscv_vsse64(mask, base, bstride, value, vl); +void test_vsse64_v_i64m8_m(vbool8_t vm, int64_t *rs1, ptrdiff_t rs2, vint64m8_t vs3, size_t vl) { + return __riscv_vsse64(vm, rs1, rs2, vs3, vl); } -void test_vsse64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t value, size_t vl) { - return __riscv_vsse64(mask, base, bstride, value, vl); +void test_vsse64_v_u64m1_m(vbool64_t vm, uint64_t *rs1, ptrdiff_t rs2, vuint64m1_t vs3, size_t vl) { + return __riscv_vsse64(vm, rs1, rs2, vs3, vl); } -void test_vsse64_v_u64m2_m(vbool32_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m2_t value, size_t vl) { - return __riscv_vsse64(mask, base, bstride, value, vl); +void test_vsse64_v_u64m2_m(vbool32_t vm, uint64_t *rs1, ptrdiff_t rs2, vuint64m2_t vs3, size_t vl) { + return __riscv_vsse64(vm, rs1, rs2, vs3, vl); } -void test_vsse64_v_u64m4_m(vbool16_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m4_t value, size_t vl) { - return __riscv_vsse64(mask, base, bstride, value, vl); +void test_vsse64_v_u64m4_m(vbool16_t vm, uint64_t *rs1, ptrdiff_t rs2, vuint64m4_t vs3, size_t vl) { + return __riscv_vsse64(vm, rs1, rs2, vs3, vl); } -void test_vsse64_v_u64m8_m(vbool8_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m8_t value, size_t vl) { - return __riscv_vsse64(mask, base, bstride, value, vl); +void test_vsse64_v_u64m8_m(vbool8_t vm, uint64_t *rs1, ptrdiff_t rs2, vuint64m8_t vs3, size_t vl) { + return __riscv_vsse64(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsse64\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsse8.c b/auto-generated/gnu-overloaded-tests/vsse8.c index d0bba4565..655ba886d 100644 --- a/auto-generated/gnu-overloaded-tests/vsse8.c +++ b/auto-generated/gnu-overloaded-tests/vsse8.c @@ -6,116 +6,116 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsse8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t value, size_t vl) { - return __riscv_vsse8(base, bstride, value, vl); +void test_vsse8_v_i8mf8(int8_t *rs1, ptrdiff_t rs2, vint8mf8_t vs3, size_t vl) { + return __riscv_vsse8(rs1, rs2, vs3, vl); } -void test_vsse8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t value, size_t vl) { - return __riscv_vsse8(base, bstride, value, vl); +void test_vsse8_v_i8mf4(int8_t *rs1, ptrdiff_t rs2, vint8mf4_t vs3, size_t vl) { + return __riscv_vsse8(rs1, rs2, vs3, vl); } -void test_vsse8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t value, size_t vl) { - return __riscv_vsse8(base, bstride, value, vl); +void test_vsse8_v_i8mf2(int8_t *rs1, ptrdiff_t rs2, vint8mf2_t vs3, size_t vl) { + return __riscv_vsse8(rs1, rs2, vs3, vl); } -void test_vsse8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t value, size_t vl) { - return __riscv_vsse8(base, bstride, value, vl); +void test_vsse8_v_i8m1(int8_t *rs1, ptrdiff_t rs2, vint8m1_t vs3, size_t vl) { + return __riscv_vsse8(rs1, rs2, vs3, vl); } -void test_vsse8_v_i8m2(int8_t *base, ptrdiff_t bstride, vint8m2_t value, size_t vl) { - return __riscv_vsse8(base, bstride, value, vl); +void test_vsse8_v_i8m2(int8_t *rs1, ptrdiff_t rs2, vint8m2_t vs3, size_t vl) { + return __riscv_vsse8(rs1, rs2, vs3, vl); } -void test_vsse8_v_i8m4(int8_t *base, ptrdiff_t bstride, vint8m4_t value, size_t vl) { - return __riscv_vsse8(base, bstride, value, vl); +void test_vsse8_v_i8m4(int8_t *rs1, ptrdiff_t rs2, vint8m4_t vs3, size_t vl) { + return __riscv_vsse8(rs1, rs2, vs3, vl); } -void test_vsse8_v_i8m8(int8_t *base, ptrdiff_t bstride, vint8m8_t value, size_t vl) { - return __riscv_vsse8(base, bstride, value, vl); +void test_vsse8_v_i8m8(int8_t *rs1, ptrdiff_t rs2, vint8m8_t vs3, size_t vl) { + return __riscv_vsse8(rs1, rs2, vs3, vl); } -void test_vsse8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t value, size_t vl) { - return __riscv_vsse8(base, bstride, value, vl); +void test_vsse8_v_u8mf8(uint8_t *rs1, ptrdiff_t rs2, vuint8mf8_t vs3, size_t vl) { + return __riscv_vsse8(rs1, rs2, vs3, vl); } -void test_vsse8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t value, size_t vl) { - return __riscv_vsse8(base, bstride, value, vl); +void test_vsse8_v_u8mf4(uint8_t *rs1, ptrdiff_t rs2, vuint8mf4_t vs3, size_t vl) { + return __riscv_vsse8(rs1, rs2, vs3, vl); } -void test_vsse8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t value, size_t vl) { - return __riscv_vsse8(base, bstride, value, vl); +void test_vsse8_v_u8mf2(uint8_t *rs1, ptrdiff_t rs2, vuint8mf2_t vs3, size_t vl) { + return __riscv_vsse8(rs1, rs2, vs3, vl); } -void test_vsse8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t value, size_t vl) { - return __riscv_vsse8(base, bstride, value, vl); +void test_vsse8_v_u8m1(uint8_t *rs1, ptrdiff_t rs2, vuint8m1_t vs3, size_t vl) { + return __riscv_vsse8(rs1, rs2, vs3, vl); } -void test_vsse8_v_u8m2(uint8_t *base, ptrdiff_t bstride, vuint8m2_t value, size_t vl) { - return __riscv_vsse8(base, bstride, value, vl); +void test_vsse8_v_u8m2(uint8_t *rs1, ptrdiff_t rs2, vuint8m2_t vs3, size_t vl) { + return __riscv_vsse8(rs1, rs2, vs3, vl); } -void test_vsse8_v_u8m4(uint8_t *base, ptrdiff_t bstride, vuint8m4_t value, size_t vl) { - return __riscv_vsse8(base, bstride, value, vl); +void test_vsse8_v_u8m4(uint8_t *rs1, ptrdiff_t rs2, vuint8m4_t vs3, size_t vl) { + return __riscv_vsse8(rs1, rs2, vs3, vl); } -void test_vsse8_v_u8m8(uint8_t *base, ptrdiff_t bstride, vuint8m8_t value, size_t vl) { - return __riscv_vsse8(base, bstride, value, vl); +void test_vsse8_v_u8m8(uint8_t *rs1, ptrdiff_t rs2, vuint8m8_t vs3, size_t vl) { + return __riscv_vsse8(rs1, rs2, vs3, vl); } -void test_vsse8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t value, size_t vl) { - return __riscv_vsse8(mask, base, bstride, value, vl); +void test_vsse8_v_i8mf8_m(vbool64_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf8_t vs3, size_t vl) { + return __riscv_vsse8(vm, rs1, rs2, vs3, vl); } -void test_vsse8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t value, size_t vl) { - return __riscv_vsse8(mask, base, bstride, value, vl); +void test_vsse8_v_i8mf4_m(vbool32_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf4_t vs3, size_t vl) { + return __riscv_vsse8(vm, rs1, rs2, vs3, vl); } -void test_vsse8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t value, size_t vl) { - return __riscv_vsse8(mask, base, bstride, value, vl); +void test_vsse8_v_i8mf2_m(vbool16_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf2_t vs3, size_t vl) { + return __riscv_vsse8(vm, rs1, rs2, vs3, vl); } -void test_vsse8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t value, size_t vl) { - return __riscv_vsse8(mask, base, bstride, value, vl); +void test_vsse8_v_i8m1_m(vbool8_t vm, int8_t *rs1, ptrdiff_t rs2, vint8m1_t vs3, size_t vl) { + return __riscv_vsse8(vm, rs1, rs2, vs3, vl); } -void test_vsse8_v_i8m2_m(vbool4_t mask, int8_t *base, ptrdiff_t bstride, vint8m2_t value, size_t vl) { - return __riscv_vsse8(mask, base, bstride, value, vl); +void test_vsse8_v_i8m2_m(vbool4_t vm, int8_t *rs1, ptrdiff_t rs2, vint8m2_t vs3, size_t vl) { + return __riscv_vsse8(vm, rs1, rs2, vs3, vl); } -void test_vsse8_v_i8m4_m(vbool2_t mask, int8_t *base, ptrdiff_t bstride, vint8m4_t value, size_t vl) { - return __riscv_vsse8(mask, base, bstride, value, vl); +void test_vsse8_v_i8m4_m(vbool2_t vm, int8_t *rs1, ptrdiff_t rs2, vint8m4_t vs3, size_t vl) { + return __riscv_vsse8(vm, rs1, rs2, vs3, vl); } -void test_vsse8_v_i8m8_m(vbool1_t mask, int8_t *base, ptrdiff_t bstride, vint8m8_t value, size_t vl) { - return __riscv_vsse8(mask, base, bstride, value, vl); +void test_vsse8_v_i8m8_m(vbool1_t vm, int8_t *rs1, ptrdiff_t rs2, vint8m8_t vs3, size_t vl) { + return __riscv_vsse8(vm, rs1, rs2, vs3, vl); } -void test_vsse8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t value, size_t vl) { - return __riscv_vsse8(mask, base, bstride, value, vl); +void test_vsse8_v_u8mf8_m(vbool64_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf8_t vs3, size_t vl) { + return __riscv_vsse8(vm, rs1, rs2, vs3, vl); } -void test_vsse8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t value, size_t vl) { - return __riscv_vsse8(mask, base, bstride, value, vl); +void test_vsse8_v_u8mf4_m(vbool32_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf4_t vs3, size_t vl) { + return __riscv_vsse8(vm, rs1, rs2, vs3, vl); } -void test_vsse8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t value, size_t vl) { - return __riscv_vsse8(mask, base, bstride, value, vl); +void test_vsse8_v_u8mf2_m(vbool16_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf2_t vs3, size_t vl) { + return __riscv_vsse8(vm, rs1, rs2, vs3, vl); } -void test_vsse8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t value, size_t vl) { - return __riscv_vsse8(mask, base, bstride, value, vl); +void test_vsse8_v_u8m1_m(vbool8_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8m1_t vs3, size_t vl) { + return __riscv_vsse8(vm, rs1, rs2, vs3, vl); } -void test_vsse8_v_u8m2_m(vbool4_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m2_t value, size_t vl) { - return __riscv_vsse8(mask, base, bstride, value, vl); +void test_vsse8_v_u8m2_m(vbool4_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8m2_t vs3, size_t vl) { + return __riscv_vsse8(vm, rs1, rs2, vs3, vl); } -void test_vsse8_v_u8m4_m(vbool2_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m4_t value, size_t vl) { - return __riscv_vsse8(mask, base, bstride, value, vl); +void test_vsse8_v_u8m4_m(vbool2_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8m4_t vs3, size_t vl) { + return __riscv_vsse8(vm, rs1, rs2, vs3, vl); } -void test_vsse8_v_u8m8_m(vbool1_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m8_t value, size_t vl) { - return __riscv_vsse8(mask, base, bstride, value, vl); +void test_vsse8_v_u8m8_m(vbool1_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8m8_t vs3, size_t vl) { + return __riscv_vsse8(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsse8\.[ivxfswum.]+\s+} 28 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsseg2e16.c b/auto-generated/gnu-overloaded-tests/vsseg2e16.c index 6aa53354a..b0cfd3a4d 100644 --- a/auto-generated/gnu-overloaded-tests/vsseg2e16.c +++ b/auto-generated/gnu-overloaded-tests/vsseg2e16.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg2e16_v_f16mf4x2(float16_t *base, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16(base, v_tuple, vl); +void test_vsseg2e16_v_f16mf4x2(float16_t *rs1, vfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsseg2e16(rs1, vs3, vl); } -void test_vsseg2e16_v_f16mf2x2(float16_t *base, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16(base, v_tuple, vl); +void test_vsseg2e16_v_f16mf2x2(float16_t *rs1, vfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsseg2e16(rs1, vs3, vl); } -void test_vsseg2e16_v_f16m1x2(float16_t *base, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16(base, v_tuple, vl); +void test_vsseg2e16_v_f16m1x2(float16_t *rs1, vfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e16(rs1, vs3, vl); } -void test_vsseg2e16_v_f16m2x2(float16_t *base, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16(base, v_tuple, vl); +void test_vsseg2e16_v_f16m2x2(float16_t *rs1, vfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e16(rs1, vs3, vl); } -void test_vsseg2e16_v_f16m4x2(float16_t *base, vfloat16m4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16(base, v_tuple, vl); +void test_vsseg2e16_v_f16m4x2(float16_t *rs1, vfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e16(rs1, vs3, vl); } -void test_vsseg2e16_v_i16mf4x2(int16_t *base, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16(base, v_tuple, vl); +void test_vsseg2e16_v_i16mf4x2(int16_t *rs1, vint16mf4x2_t vs3, size_t vl) { + return __riscv_vsseg2e16(rs1, vs3, vl); } -void test_vsseg2e16_v_i16mf2x2(int16_t *base, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16(base, v_tuple, vl); +void test_vsseg2e16_v_i16mf2x2(int16_t *rs1, vint16mf2x2_t vs3, size_t vl) { + return __riscv_vsseg2e16(rs1, vs3, vl); } -void test_vsseg2e16_v_i16m1x2(int16_t *base, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16(base, v_tuple, vl); +void test_vsseg2e16_v_i16m1x2(int16_t *rs1, vint16m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e16(rs1, vs3, vl); } -void test_vsseg2e16_v_i16m2x2(int16_t *base, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16(base, v_tuple, vl); +void test_vsseg2e16_v_i16m2x2(int16_t *rs1, vint16m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e16(rs1, vs3, vl); } -void test_vsseg2e16_v_i16m4x2(int16_t *base, vint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16(base, v_tuple, vl); +void test_vsseg2e16_v_i16m4x2(int16_t *rs1, vint16m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e16(rs1, vs3, vl); } -void test_vsseg2e16_v_u16mf4x2(uint16_t *base, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16(base, v_tuple, vl); +void test_vsseg2e16_v_u16mf4x2(uint16_t *rs1, vuint16mf4x2_t vs3, size_t vl) { + return __riscv_vsseg2e16(rs1, vs3, vl); } -void test_vsseg2e16_v_u16mf2x2(uint16_t *base, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16(base, v_tuple, vl); +void test_vsseg2e16_v_u16mf2x2(uint16_t *rs1, vuint16mf2x2_t vs3, size_t vl) { + return __riscv_vsseg2e16(rs1, vs3, vl); } -void test_vsseg2e16_v_u16m1x2(uint16_t *base, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16(base, v_tuple, vl); +void test_vsseg2e16_v_u16m1x2(uint16_t *rs1, vuint16m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e16(rs1, vs3, vl); } -void test_vsseg2e16_v_u16m2x2(uint16_t *base, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16(base, v_tuple, vl); +void test_vsseg2e16_v_u16m2x2(uint16_t *rs1, vuint16m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e16(rs1, vs3, vl); } -void test_vsseg2e16_v_u16m4x2(uint16_t *base, vuint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16(base, v_tuple, vl); +void test_vsseg2e16_v_u16m4x2(uint16_t *rs1, vuint16m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e16(rs1, vs3, vl); } -void test_vsseg2e16_v_f16mf4x2_m(vbool64_t mask, float16_t *base, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16(mask, base, v_tuple, vl); +void test_vsseg2e16_v_f16mf4x2_m(vbool64_t vm, float16_t *rs1, vfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsseg2e16(vm, rs1, vs3, vl); } -void test_vsseg2e16_v_f16mf2x2_m(vbool32_t mask, float16_t *base, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16(mask, base, v_tuple, vl); +void test_vsseg2e16_v_f16mf2x2_m(vbool32_t vm, float16_t *rs1, vfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsseg2e16(vm, rs1, vs3, vl); } -void test_vsseg2e16_v_f16m1x2_m(vbool16_t mask, float16_t *base, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16(mask, base, v_tuple, vl); +void test_vsseg2e16_v_f16m1x2_m(vbool16_t vm, float16_t *rs1, vfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e16(vm, rs1, vs3, vl); } -void test_vsseg2e16_v_f16m2x2_m(vbool8_t mask, float16_t *base, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16(mask, base, v_tuple, vl); +void test_vsseg2e16_v_f16m2x2_m(vbool8_t vm, float16_t *rs1, vfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e16(vm, rs1, vs3, vl); } -void test_vsseg2e16_v_f16m4x2_m(vbool4_t mask, float16_t *base, vfloat16m4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16(mask, base, v_tuple, vl); +void test_vsseg2e16_v_f16m4x2_m(vbool4_t vm, float16_t *rs1, vfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e16(vm, rs1, vs3, vl); } -void test_vsseg2e16_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16(mask, base, v_tuple, vl); +void test_vsseg2e16_v_i16mf4x2_m(vbool64_t vm, int16_t *rs1, vint16mf4x2_t vs3, size_t vl) { + return __riscv_vsseg2e16(vm, rs1, vs3, vl); } -void test_vsseg2e16_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16(mask, base, v_tuple, vl); +void test_vsseg2e16_v_i16mf2x2_m(vbool32_t vm, int16_t *rs1, vint16mf2x2_t vs3, size_t vl) { + return __riscv_vsseg2e16(vm, rs1, vs3, vl); } -void test_vsseg2e16_v_i16m1x2_m(vbool16_t mask, int16_t *base, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16(mask, base, v_tuple, vl); +void test_vsseg2e16_v_i16m1x2_m(vbool16_t vm, int16_t *rs1, vint16m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e16(vm, rs1, vs3, vl); } -void test_vsseg2e16_v_i16m2x2_m(vbool8_t mask, int16_t *base, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16(mask, base, v_tuple, vl); +void test_vsseg2e16_v_i16m2x2_m(vbool8_t vm, int16_t *rs1, vint16m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e16(vm, rs1, vs3, vl); } -void test_vsseg2e16_v_i16m4x2_m(vbool4_t mask, int16_t *base, vint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16(mask, base, v_tuple, vl); +void test_vsseg2e16_v_i16m4x2_m(vbool4_t vm, int16_t *rs1, vint16m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e16(vm, rs1, vs3, vl); } -void test_vsseg2e16_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16(mask, base, v_tuple, vl); +void test_vsseg2e16_v_u16mf4x2_m(vbool64_t vm, uint16_t *rs1, vuint16mf4x2_t vs3, size_t vl) { + return __riscv_vsseg2e16(vm, rs1, vs3, vl); } -void test_vsseg2e16_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16(mask, base, v_tuple, vl); +void test_vsseg2e16_v_u16mf2x2_m(vbool32_t vm, uint16_t *rs1, vuint16mf2x2_t vs3, size_t vl) { + return __riscv_vsseg2e16(vm, rs1, vs3, vl); } -void test_vsseg2e16_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16(mask, base, v_tuple, vl); +void test_vsseg2e16_v_u16m1x2_m(vbool16_t vm, uint16_t *rs1, vuint16m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e16(vm, rs1, vs3, vl); } -void test_vsseg2e16_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16(mask, base, v_tuple, vl); +void test_vsseg2e16_v_u16m2x2_m(vbool8_t vm, uint16_t *rs1, vuint16m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e16(vm, rs1, vs3, vl); } -void test_vsseg2e16_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e16(mask, base, v_tuple, vl); +void test_vsseg2e16_v_u16m4x2_m(vbool4_t vm, uint16_t *rs1, vuint16m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e16(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg2e16\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsseg2e32.c b/auto-generated/gnu-overloaded-tests/vsseg2e32.c index 345f5ec7e..e8bde9bc9 100644 --- a/auto-generated/gnu-overloaded-tests/vsseg2e32.c +++ b/auto-generated/gnu-overloaded-tests/vsseg2e32.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg2e32_v_f32mf2x2(float32_t *base, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32(base, v_tuple, vl); +void test_vsseg2e32_v_f32mf2x2(float32_t *rs1, vfloat32mf2x2_t vs3, size_t vl) { + return __riscv_vsseg2e32(rs1, vs3, vl); } -void test_vsseg2e32_v_f32m1x2(float32_t *base, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32(base, v_tuple, vl); +void test_vsseg2e32_v_f32m1x2(float32_t *rs1, vfloat32m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e32(rs1, vs3, vl); } -void test_vsseg2e32_v_f32m2x2(float32_t *base, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32(base, v_tuple, vl); +void test_vsseg2e32_v_f32m2x2(float32_t *rs1, vfloat32m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e32(rs1, vs3, vl); } -void test_vsseg2e32_v_f32m4x2(float32_t *base, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32(base, v_tuple, vl); +void test_vsseg2e32_v_f32m4x2(float32_t *rs1, vfloat32m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e32(rs1, vs3, vl); } -void test_vsseg2e32_v_i32mf2x2(int32_t *base, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32(base, v_tuple, vl); +void test_vsseg2e32_v_i32mf2x2(int32_t *rs1, vint32mf2x2_t vs3, size_t vl) { + return __riscv_vsseg2e32(rs1, vs3, vl); } -void test_vsseg2e32_v_i32m1x2(int32_t *base, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32(base, v_tuple, vl); +void test_vsseg2e32_v_i32m1x2(int32_t *rs1, vint32m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e32(rs1, vs3, vl); } -void test_vsseg2e32_v_i32m2x2(int32_t *base, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32(base, v_tuple, vl); +void test_vsseg2e32_v_i32m2x2(int32_t *rs1, vint32m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e32(rs1, vs3, vl); } -void test_vsseg2e32_v_i32m4x2(int32_t *base, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32(base, v_tuple, vl); +void test_vsseg2e32_v_i32m4x2(int32_t *rs1, vint32m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e32(rs1, vs3, vl); } -void test_vsseg2e32_v_u32mf2x2(uint32_t *base, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32(base, v_tuple, vl); +void test_vsseg2e32_v_u32mf2x2(uint32_t *rs1, vuint32mf2x2_t vs3, size_t vl) { + return __riscv_vsseg2e32(rs1, vs3, vl); } -void test_vsseg2e32_v_u32m1x2(uint32_t *base, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32(base, v_tuple, vl); +void test_vsseg2e32_v_u32m1x2(uint32_t *rs1, vuint32m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e32(rs1, vs3, vl); } -void test_vsseg2e32_v_u32m2x2(uint32_t *base, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32(base, v_tuple, vl); +void test_vsseg2e32_v_u32m2x2(uint32_t *rs1, vuint32m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e32(rs1, vs3, vl); } -void test_vsseg2e32_v_u32m4x2(uint32_t *base, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32(base, v_tuple, vl); +void test_vsseg2e32_v_u32m4x2(uint32_t *rs1, vuint32m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e32(rs1, vs3, vl); } -void test_vsseg2e32_v_f32mf2x2_m(vbool64_t mask, float32_t *base, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32(mask, base, v_tuple, vl); +void test_vsseg2e32_v_f32mf2x2_m(vbool64_t vm, float32_t *rs1, vfloat32mf2x2_t vs3, size_t vl) { + return __riscv_vsseg2e32(vm, rs1, vs3, vl); } -void test_vsseg2e32_v_f32m1x2_m(vbool32_t mask, float32_t *base, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32(mask, base, v_tuple, vl); +void test_vsseg2e32_v_f32m1x2_m(vbool32_t vm, float32_t *rs1, vfloat32m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e32(vm, rs1, vs3, vl); } -void test_vsseg2e32_v_f32m2x2_m(vbool16_t mask, float32_t *base, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32(mask, base, v_tuple, vl); +void test_vsseg2e32_v_f32m2x2_m(vbool16_t vm, float32_t *rs1, vfloat32m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e32(vm, rs1, vs3, vl); } -void test_vsseg2e32_v_f32m4x2_m(vbool8_t mask, float32_t *base, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32(mask, base, v_tuple, vl); +void test_vsseg2e32_v_f32m4x2_m(vbool8_t vm, float32_t *rs1, vfloat32m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e32(vm, rs1, vs3, vl); } -void test_vsseg2e32_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32(mask, base, v_tuple, vl); +void test_vsseg2e32_v_i32mf2x2_m(vbool64_t vm, int32_t *rs1, vint32mf2x2_t vs3, size_t vl) { + return __riscv_vsseg2e32(vm, rs1, vs3, vl); } -void test_vsseg2e32_v_i32m1x2_m(vbool32_t mask, int32_t *base, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32(mask, base, v_tuple, vl); +void test_vsseg2e32_v_i32m1x2_m(vbool32_t vm, int32_t *rs1, vint32m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e32(vm, rs1, vs3, vl); } -void test_vsseg2e32_v_i32m2x2_m(vbool16_t mask, int32_t *base, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32(mask, base, v_tuple, vl); +void test_vsseg2e32_v_i32m2x2_m(vbool16_t vm, int32_t *rs1, vint32m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e32(vm, rs1, vs3, vl); } -void test_vsseg2e32_v_i32m4x2_m(vbool8_t mask, int32_t *base, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32(mask, base, v_tuple, vl); +void test_vsseg2e32_v_i32m4x2_m(vbool8_t vm, int32_t *rs1, vint32m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e32(vm, rs1, vs3, vl); } -void test_vsseg2e32_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32(mask, base, v_tuple, vl); +void test_vsseg2e32_v_u32mf2x2_m(vbool64_t vm, uint32_t *rs1, vuint32mf2x2_t vs3, size_t vl) { + return __riscv_vsseg2e32(vm, rs1, vs3, vl); } -void test_vsseg2e32_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32(mask, base, v_tuple, vl); +void test_vsseg2e32_v_u32m1x2_m(vbool32_t vm, uint32_t *rs1, vuint32m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e32(vm, rs1, vs3, vl); } -void test_vsseg2e32_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32(mask, base, v_tuple, vl); +void test_vsseg2e32_v_u32m2x2_m(vbool16_t vm, uint32_t *rs1, vuint32m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e32(vm, rs1, vs3, vl); } -void test_vsseg2e32_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e32(mask, base, v_tuple, vl); +void test_vsseg2e32_v_u32m4x2_m(vbool8_t vm, uint32_t *rs1, vuint32m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e32(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg2e32\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsseg2e64.c b/auto-generated/gnu-overloaded-tests/vsseg2e64.c index e87ba7877..a54040742 100644 --- a/auto-generated/gnu-overloaded-tests/vsseg2e64.c +++ b/auto-generated/gnu-overloaded-tests/vsseg2e64.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg2e64_v_f64m1x2(float64_t *base, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e64(base, v_tuple, vl); +void test_vsseg2e64_v_f64m1x2(float64_t *rs1, vfloat64m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e64(rs1, vs3, vl); } -void test_vsseg2e64_v_f64m2x2(float64_t *base, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e64(base, v_tuple, vl); +void test_vsseg2e64_v_f64m2x2(float64_t *rs1, vfloat64m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e64(rs1, vs3, vl); } -void test_vsseg2e64_v_f64m4x2(float64_t *base, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e64(base, v_tuple, vl); +void test_vsseg2e64_v_f64m4x2(float64_t *rs1, vfloat64m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e64(rs1, vs3, vl); } -void test_vsseg2e64_v_i64m1x2(int64_t *base, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e64(base, v_tuple, vl); +void test_vsseg2e64_v_i64m1x2(int64_t *rs1, vint64m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e64(rs1, vs3, vl); } -void test_vsseg2e64_v_i64m2x2(int64_t *base, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e64(base, v_tuple, vl); +void test_vsseg2e64_v_i64m2x2(int64_t *rs1, vint64m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e64(rs1, vs3, vl); } -void test_vsseg2e64_v_i64m4x2(int64_t *base, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e64(base, v_tuple, vl); +void test_vsseg2e64_v_i64m4x2(int64_t *rs1, vint64m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e64(rs1, vs3, vl); } -void test_vsseg2e64_v_u64m1x2(uint64_t *base, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e64(base, v_tuple, vl); +void test_vsseg2e64_v_u64m1x2(uint64_t *rs1, vuint64m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e64(rs1, vs3, vl); } -void test_vsseg2e64_v_u64m2x2(uint64_t *base, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e64(base, v_tuple, vl); +void test_vsseg2e64_v_u64m2x2(uint64_t *rs1, vuint64m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e64(rs1, vs3, vl); } -void test_vsseg2e64_v_u64m4x2(uint64_t *base, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e64(base, v_tuple, vl); +void test_vsseg2e64_v_u64m4x2(uint64_t *rs1, vuint64m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e64(rs1, vs3, vl); } -void test_vsseg2e64_v_f64m1x2_m(vbool64_t mask, float64_t *base, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e64(mask, base, v_tuple, vl); +void test_vsseg2e64_v_f64m1x2_m(vbool64_t vm, float64_t *rs1, vfloat64m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e64(vm, rs1, vs3, vl); } -void test_vsseg2e64_v_f64m2x2_m(vbool32_t mask, float64_t *base, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e64(mask, base, v_tuple, vl); +void test_vsseg2e64_v_f64m2x2_m(vbool32_t vm, float64_t *rs1, vfloat64m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e64(vm, rs1, vs3, vl); } -void test_vsseg2e64_v_f64m4x2_m(vbool16_t mask, float64_t *base, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e64(mask, base, v_tuple, vl); +void test_vsseg2e64_v_f64m4x2_m(vbool16_t vm, float64_t *rs1, vfloat64m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e64(vm, rs1, vs3, vl); } -void test_vsseg2e64_v_i64m1x2_m(vbool64_t mask, int64_t *base, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e64(mask, base, v_tuple, vl); +void test_vsseg2e64_v_i64m1x2_m(vbool64_t vm, int64_t *rs1, vint64m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e64(vm, rs1, vs3, vl); } -void test_vsseg2e64_v_i64m2x2_m(vbool32_t mask, int64_t *base, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e64(mask, base, v_tuple, vl); +void test_vsseg2e64_v_i64m2x2_m(vbool32_t vm, int64_t *rs1, vint64m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e64(vm, rs1, vs3, vl); } -void test_vsseg2e64_v_i64m4x2_m(vbool16_t mask, int64_t *base, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e64(mask, base, v_tuple, vl); +void test_vsseg2e64_v_i64m4x2_m(vbool16_t vm, int64_t *rs1, vint64m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e64(vm, rs1, vs3, vl); } -void test_vsseg2e64_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e64(mask, base, v_tuple, vl); +void test_vsseg2e64_v_u64m1x2_m(vbool64_t vm, uint64_t *rs1, vuint64m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e64(vm, rs1, vs3, vl); } -void test_vsseg2e64_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e64(mask, base, v_tuple, vl); +void test_vsseg2e64_v_u64m2x2_m(vbool32_t vm, uint64_t *rs1, vuint64m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e64(vm, rs1, vs3, vl); } -void test_vsseg2e64_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e64(mask, base, v_tuple, vl); +void test_vsseg2e64_v_u64m4x2_m(vbool16_t vm, uint64_t *rs1, vuint64m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e64(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg2e64\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsseg2e8.c b/auto-generated/gnu-overloaded-tests/vsseg2e8.c index b4df9f423..34f32094b 100644 --- a/auto-generated/gnu-overloaded-tests/vsseg2e8.c +++ b/auto-generated/gnu-overloaded-tests/vsseg2e8.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg2e8_v_i8mf8x2(int8_t *base, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8(base, v_tuple, vl); +void test_vsseg2e8_v_i8mf8x2(int8_t *rs1, vint8mf8x2_t vs3, size_t vl) { + return __riscv_vsseg2e8(rs1, vs3, vl); } -void test_vsseg2e8_v_i8mf4x2(int8_t *base, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8(base, v_tuple, vl); +void test_vsseg2e8_v_i8mf4x2(int8_t *rs1, vint8mf4x2_t vs3, size_t vl) { + return __riscv_vsseg2e8(rs1, vs3, vl); } -void test_vsseg2e8_v_i8mf2x2(int8_t *base, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8(base, v_tuple, vl); +void test_vsseg2e8_v_i8mf2x2(int8_t *rs1, vint8mf2x2_t vs3, size_t vl) { + return __riscv_vsseg2e8(rs1, vs3, vl); } -void test_vsseg2e8_v_i8m1x2(int8_t *base, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8(base, v_tuple, vl); +void test_vsseg2e8_v_i8m1x2(int8_t *rs1, vint8m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e8(rs1, vs3, vl); } -void test_vsseg2e8_v_i8m2x2(int8_t *base, vint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8(base, v_tuple, vl); +void test_vsseg2e8_v_i8m2x2(int8_t *rs1, vint8m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e8(rs1, vs3, vl); } -void test_vsseg2e8_v_i8m4x2(int8_t *base, vint8m4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8(base, v_tuple, vl); +void test_vsseg2e8_v_i8m4x2(int8_t *rs1, vint8m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e8(rs1, vs3, vl); } -void test_vsseg2e8_v_u8mf8x2(uint8_t *base, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8(base, v_tuple, vl); +void test_vsseg2e8_v_u8mf8x2(uint8_t *rs1, vuint8mf8x2_t vs3, size_t vl) { + return __riscv_vsseg2e8(rs1, vs3, vl); } -void test_vsseg2e8_v_u8mf4x2(uint8_t *base, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8(base, v_tuple, vl); +void test_vsseg2e8_v_u8mf4x2(uint8_t *rs1, vuint8mf4x2_t vs3, size_t vl) { + return __riscv_vsseg2e8(rs1, vs3, vl); } -void test_vsseg2e8_v_u8mf2x2(uint8_t *base, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8(base, v_tuple, vl); +void test_vsseg2e8_v_u8mf2x2(uint8_t *rs1, vuint8mf2x2_t vs3, size_t vl) { + return __riscv_vsseg2e8(rs1, vs3, vl); } -void test_vsseg2e8_v_u8m1x2(uint8_t *base, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8(base, v_tuple, vl); +void test_vsseg2e8_v_u8m1x2(uint8_t *rs1, vuint8m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e8(rs1, vs3, vl); } -void test_vsseg2e8_v_u8m2x2(uint8_t *base, vuint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8(base, v_tuple, vl); +void test_vsseg2e8_v_u8m2x2(uint8_t *rs1, vuint8m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e8(rs1, vs3, vl); } -void test_vsseg2e8_v_u8m4x2(uint8_t *base, vuint8m4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8(base, v_tuple, vl); +void test_vsseg2e8_v_u8m4x2(uint8_t *rs1, vuint8m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e8(rs1, vs3, vl); } -void test_vsseg2e8_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8(mask, base, v_tuple, vl); +void test_vsseg2e8_v_i8mf8x2_m(vbool64_t vm, int8_t *rs1, vint8mf8x2_t vs3, size_t vl) { + return __riscv_vsseg2e8(vm, rs1, vs3, vl); } -void test_vsseg2e8_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8(mask, base, v_tuple, vl); +void test_vsseg2e8_v_i8mf4x2_m(vbool32_t vm, int8_t *rs1, vint8mf4x2_t vs3, size_t vl) { + return __riscv_vsseg2e8(vm, rs1, vs3, vl); } -void test_vsseg2e8_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8(mask, base, v_tuple, vl); +void test_vsseg2e8_v_i8mf2x2_m(vbool16_t vm, int8_t *rs1, vint8mf2x2_t vs3, size_t vl) { + return __riscv_vsseg2e8(vm, rs1, vs3, vl); } -void test_vsseg2e8_v_i8m1x2_m(vbool8_t mask, int8_t *base, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8(mask, base, v_tuple, vl); +void test_vsseg2e8_v_i8m1x2_m(vbool8_t vm, int8_t *rs1, vint8m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e8(vm, rs1, vs3, vl); } -void test_vsseg2e8_v_i8m2x2_m(vbool4_t mask, int8_t *base, vint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8(mask, base, v_tuple, vl); +void test_vsseg2e8_v_i8m2x2_m(vbool4_t vm, int8_t *rs1, vint8m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e8(vm, rs1, vs3, vl); } -void test_vsseg2e8_v_i8m4x2_m(vbool2_t mask, int8_t *base, vint8m4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8(mask, base, v_tuple, vl); +void test_vsseg2e8_v_i8m4x2_m(vbool2_t vm, int8_t *rs1, vint8m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e8(vm, rs1, vs3, vl); } -void test_vsseg2e8_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8(mask, base, v_tuple, vl); +void test_vsseg2e8_v_u8mf8x2_m(vbool64_t vm, uint8_t *rs1, vuint8mf8x2_t vs3, size_t vl) { + return __riscv_vsseg2e8(vm, rs1, vs3, vl); } -void test_vsseg2e8_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8(mask, base, v_tuple, vl); +void test_vsseg2e8_v_u8mf4x2_m(vbool32_t vm, uint8_t *rs1, vuint8mf4x2_t vs3, size_t vl) { + return __riscv_vsseg2e8(vm, rs1, vs3, vl); } -void test_vsseg2e8_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8(mask, base, v_tuple, vl); +void test_vsseg2e8_v_u8mf2x2_m(vbool16_t vm, uint8_t *rs1, vuint8mf2x2_t vs3, size_t vl) { + return __riscv_vsseg2e8(vm, rs1, vs3, vl); } -void test_vsseg2e8_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8(mask, base, v_tuple, vl); +void test_vsseg2e8_v_u8m1x2_m(vbool8_t vm, uint8_t *rs1, vuint8m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e8(vm, rs1, vs3, vl); } -void test_vsseg2e8_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8(mask, base, v_tuple, vl); +void test_vsseg2e8_v_u8m2x2_m(vbool4_t vm, uint8_t *rs1, vuint8m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e8(vm, rs1, vs3, vl); } -void test_vsseg2e8_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint8m4x2_t v_tuple, size_t vl) { - return __riscv_vsseg2e8(mask, base, v_tuple, vl); +void test_vsseg2e8_v_u8m4x2_m(vbool2_t vm, uint8_t *rs1, vuint8m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e8(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg2e8\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsseg3e16.c b/auto-generated/gnu-overloaded-tests/vsseg3e16.c index d0b03557b..6e6e7556a 100644 --- a/auto-generated/gnu-overloaded-tests/vsseg3e16.c +++ b/auto-generated/gnu-overloaded-tests/vsseg3e16.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg3e16_v_f16mf4x3(float16_t *base, vfloat16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16(base, v_tuple, vl); +void test_vsseg3e16_v_f16mf4x3(float16_t *rs1, vfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsseg3e16(rs1, vs3, vl); } -void test_vsseg3e16_v_f16mf2x3(float16_t *base, vfloat16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16(base, v_tuple, vl); +void test_vsseg3e16_v_f16mf2x3(float16_t *rs1, vfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsseg3e16(rs1, vs3, vl); } -void test_vsseg3e16_v_f16m1x3(float16_t *base, vfloat16m1x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16(base, v_tuple, vl); +void test_vsseg3e16_v_f16m1x3(float16_t *rs1, vfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e16(rs1, vs3, vl); } -void test_vsseg3e16_v_f16m2x3(float16_t *base, vfloat16m2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16(base, v_tuple, vl); +void test_vsseg3e16_v_f16m2x3(float16_t *rs1, vfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e16(rs1, vs3, vl); } -void test_vsseg3e16_v_i16mf4x3(int16_t *base, vint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16(base, v_tuple, vl); +void test_vsseg3e16_v_i16mf4x3(int16_t *rs1, vint16mf4x3_t vs3, size_t vl) { + return __riscv_vsseg3e16(rs1, vs3, vl); } -void test_vsseg3e16_v_i16mf2x3(int16_t *base, vint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16(base, v_tuple, vl); +void test_vsseg3e16_v_i16mf2x3(int16_t *rs1, vint16mf2x3_t vs3, size_t vl) { + return __riscv_vsseg3e16(rs1, vs3, vl); } -void test_vsseg3e16_v_i16m1x3(int16_t *base, vint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16(base, v_tuple, vl); +void test_vsseg3e16_v_i16m1x3(int16_t *rs1, vint16m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e16(rs1, vs3, vl); } -void test_vsseg3e16_v_i16m2x3(int16_t *base, vint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16(base, v_tuple, vl); +void test_vsseg3e16_v_i16m2x3(int16_t *rs1, vint16m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e16(rs1, vs3, vl); } -void test_vsseg3e16_v_u16mf4x3(uint16_t *base, vuint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16(base, v_tuple, vl); +void test_vsseg3e16_v_u16mf4x3(uint16_t *rs1, vuint16mf4x3_t vs3, size_t vl) { + return __riscv_vsseg3e16(rs1, vs3, vl); } -void test_vsseg3e16_v_u16mf2x3(uint16_t *base, vuint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16(base, v_tuple, vl); +void test_vsseg3e16_v_u16mf2x3(uint16_t *rs1, vuint16mf2x3_t vs3, size_t vl) { + return __riscv_vsseg3e16(rs1, vs3, vl); } -void test_vsseg3e16_v_u16m1x3(uint16_t *base, vuint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16(base, v_tuple, vl); +void test_vsseg3e16_v_u16m1x3(uint16_t *rs1, vuint16m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e16(rs1, vs3, vl); } -void test_vsseg3e16_v_u16m2x3(uint16_t *base, vuint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16(base, v_tuple, vl); +void test_vsseg3e16_v_u16m2x3(uint16_t *rs1, vuint16m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e16(rs1, vs3, vl); } -void test_vsseg3e16_v_f16mf4x3_m(vbool64_t mask, float16_t *base, vfloat16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16(mask, base, v_tuple, vl); +void test_vsseg3e16_v_f16mf4x3_m(vbool64_t vm, float16_t *rs1, vfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsseg3e16(vm, rs1, vs3, vl); } -void test_vsseg3e16_v_f16mf2x3_m(vbool32_t mask, float16_t *base, vfloat16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16(mask, base, v_tuple, vl); +void test_vsseg3e16_v_f16mf2x3_m(vbool32_t vm, float16_t *rs1, vfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsseg3e16(vm, rs1, vs3, vl); } -void test_vsseg3e16_v_f16m1x3_m(vbool16_t mask, float16_t *base, vfloat16m1x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16(mask, base, v_tuple, vl); +void test_vsseg3e16_v_f16m1x3_m(vbool16_t vm, float16_t *rs1, vfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e16(vm, rs1, vs3, vl); } -void test_vsseg3e16_v_f16m2x3_m(vbool8_t mask, float16_t *base, vfloat16m2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16(mask, base, v_tuple, vl); +void test_vsseg3e16_v_f16m2x3_m(vbool8_t vm, float16_t *rs1, vfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e16(vm, rs1, vs3, vl); } -void test_vsseg3e16_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16(mask, base, v_tuple, vl); +void test_vsseg3e16_v_i16mf4x3_m(vbool64_t vm, int16_t *rs1, vint16mf4x3_t vs3, size_t vl) { + return __riscv_vsseg3e16(vm, rs1, vs3, vl); } -void test_vsseg3e16_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16(mask, base, v_tuple, vl); +void test_vsseg3e16_v_i16mf2x3_m(vbool32_t vm, int16_t *rs1, vint16mf2x3_t vs3, size_t vl) { + return __riscv_vsseg3e16(vm, rs1, vs3, vl); } -void test_vsseg3e16_v_i16m1x3_m(vbool16_t mask, int16_t *base, vint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16(mask, base, v_tuple, vl); +void test_vsseg3e16_v_i16m1x3_m(vbool16_t vm, int16_t *rs1, vint16m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e16(vm, rs1, vs3, vl); } -void test_vsseg3e16_v_i16m2x3_m(vbool8_t mask, int16_t *base, vint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16(mask, base, v_tuple, vl); +void test_vsseg3e16_v_i16m2x3_m(vbool8_t vm, int16_t *rs1, vint16m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e16(vm, rs1, vs3, vl); } -void test_vsseg3e16_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16(mask, base, v_tuple, vl); +void test_vsseg3e16_v_u16mf4x3_m(vbool64_t vm, uint16_t *rs1, vuint16mf4x3_t vs3, size_t vl) { + return __riscv_vsseg3e16(vm, rs1, vs3, vl); } -void test_vsseg3e16_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16(mask, base, v_tuple, vl); +void test_vsseg3e16_v_u16mf2x3_m(vbool32_t vm, uint16_t *rs1, vuint16mf2x3_t vs3, size_t vl) { + return __riscv_vsseg3e16(vm, rs1, vs3, vl); } -void test_vsseg3e16_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16(mask, base, v_tuple, vl); +void test_vsseg3e16_v_u16m1x3_m(vbool16_t vm, uint16_t *rs1, vuint16m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e16(vm, rs1, vs3, vl); } -void test_vsseg3e16_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e16(mask, base, v_tuple, vl); +void test_vsseg3e16_v_u16m2x3_m(vbool8_t vm, uint16_t *rs1, vuint16m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e16(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg3e16\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsseg3e32.c b/auto-generated/gnu-overloaded-tests/vsseg3e32.c index d04451049..afce32527 100644 --- a/auto-generated/gnu-overloaded-tests/vsseg3e32.c +++ b/auto-generated/gnu-overloaded-tests/vsseg3e32.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg3e32_v_f32mf2x3(float32_t *base, vfloat32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e32(base, v_tuple, vl); +void test_vsseg3e32_v_f32mf2x3(float32_t *rs1, vfloat32mf2x3_t vs3, size_t vl) { + return __riscv_vsseg3e32(rs1, vs3, vl); } -void test_vsseg3e32_v_f32m1x3(float32_t *base, vfloat32m1x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e32(base, v_tuple, vl); +void test_vsseg3e32_v_f32m1x3(float32_t *rs1, vfloat32m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e32(rs1, vs3, vl); } -void test_vsseg3e32_v_f32m2x3(float32_t *base, vfloat32m2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e32(base, v_tuple, vl); +void test_vsseg3e32_v_f32m2x3(float32_t *rs1, vfloat32m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e32(rs1, vs3, vl); } -void test_vsseg3e32_v_i32mf2x3(int32_t *base, vint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e32(base, v_tuple, vl); +void test_vsseg3e32_v_i32mf2x3(int32_t *rs1, vint32mf2x3_t vs3, size_t vl) { + return __riscv_vsseg3e32(rs1, vs3, vl); } -void test_vsseg3e32_v_i32m1x3(int32_t *base, vint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e32(base, v_tuple, vl); +void test_vsseg3e32_v_i32m1x3(int32_t *rs1, vint32m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e32(rs1, vs3, vl); } -void test_vsseg3e32_v_i32m2x3(int32_t *base, vint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e32(base, v_tuple, vl); +void test_vsseg3e32_v_i32m2x3(int32_t *rs1, vint32m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e32(rs1, vs3, vl); } -void test_vsseg3e32_v_u32mf2x3(uint32_t *base, vuint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e32(base, v_tuple, vl); +void test_vsseg3e32_v_u32mf2x3(uint32_t *rs1, vuint32mf2x3_t vs3, size_t vl) { + return __riscv_vsseg3e32(rs1, vs3, vl); } -void test_vsseg3e32_v_u32m1x3(uint32_t *base, vuint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e32(base, v_tuple, vl); +void test_vsseg3e32_v_u32m1x3(uint32_t *rs1, vuint32m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e32(rs1, vs3, vl); } -void test_vsseg3e32_v_u32m2x3(uint32_t *base, vuint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e32(base, v_tuple, vl); +void test_vsseg3e32_v_u32m2x3(uint32_t *rs1, vuint32m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e32(rs1, vs3, vl); } -void test_vsseg3e32_v_f32mf2x3_m(vbool64_t mask, float32_t *base, vfloat32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e32(mask, base, v_tuple, vl); +void test_vsseg3e32_v_f32mf2x3_m(vbool64_t vm, float32_t *rs1, vfloat32mf2x3_t vs3, size_t vl) { + return __riscv_vsseg3e32(vm, rs1, vs3, vl); } -void test_vsseg3e32_v_f32m1x3_m(vbool32_t mask, float32_t *base, vfloat32m1x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e32(mask, base, v_tuple, vl); +void test_vsseg3e32_v_f32m1x3_m(vbool32_t vm, float32_t *rs1, vfloat32m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e32(vm, rs1, vs3, vl); } -void test_vsseg3e32_v_f32m2x3_m(vbool16_t mask, float32_t *base, vfloat32m2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e32(mask, base, v_tuple, vl); +void test_vsseg3e32_v_f32m2x3_m(vbool16_t vm, float32_t *rs1, vfloat32m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e32(vm, rs1, vs3, vl); } -void test_vsseg3e32_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e32(mask, base, v_tuple, vl); +void test_vsseg3e32_v_i32mf2x3_m(vbool64_t vm, int32_t *rs1, vint32mf2x3_t vs3, size_t vl) { + return __riscv_vsseg3e32(vm, rs1, vs3, vl); } -void test_vsseg3e32_v_i32m1x3_m(vbool32_t mask, int32_t *base, vint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e32(mask, base, v_tuple, vl); +void test_vsseg3e32_v_i32m1x3_m(vbool32_t vm, int32_t *rs1, vint32m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e32(vm, rs1, vs3, vl); } -void test_vsseg3e32_v_i32m2x3_m(vbool16_t mask, int32_t *base, vint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e32(mask, base, v_tuple, vl); +void test_vsseg3e32_v_i32m2x3_m(vbool16_t vm, int32_t *rs1, vint32m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e32(vm, rs1, vs3, vl); } -void test_vsseg3e32_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e32(mask, base, v_tuple, vl); +void test_vsseg3e32_v_u32mf2x3_m(vbool64_t vm, uint32_t *rs1, vuint32mf2x3_t vs3, size_t vl) { + return __riscv_vsseg3e32(vm, rs1, vs3, vl); } -void test_vsseg3e32_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e32(mask, base, v_tuple, vl); +void test_vsseg3e32_v_u32m1x3_m(vbool32_t vm, uint32_t *rs1, vuint32m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e32(vm, rs1, vs3, vl); } -void test_vsseg3e32_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e32(mask, base, v_tuple, vl); +void test_vsseg3e32_v_u32m2x3_m(vbool16_t vm, uint32_t *rs1, vuint32m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e32(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg3e32\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsseg3e64.c b/auto-generated/gnu-overloaded-tests/vsseg3e64.c index 4d130d52b..29ca57198 100644 --- a/auto-generated/gnu-overloaded-tests/vsseg3e64.c +++ b/auto-generated/gnu-overloaded-tests/vsseg3e64.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg3e64_v_f64m1x3(float64_t *base, vfloat64m1x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e64(base, v_tuple, vl); +void test_vsseg3e64_v_f64m1x3(float64_t *rs1, vfloat64m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e64(rs1, vs3, vl); } -void test_vsseg3e64_v_f64m2x3(float64_t *base, vfloat64m2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e64(base, v_tuple, vl); +void test_vsseg3e64_v_f64m2x3(float64_t *rs1, vfloat64m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e64(rs1, vs3, vl); } -void test_vsseg3e64_v_i64m1x3(int64_t *base, vint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e64(base, v_tuple, vl); +void test_vsseg3e64_v_i64m1x3(int64_t *rs1, vint64m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e64(rs1, vs3, vl); } -void test_vsseg3e64_v_i64m2x3(int64_t *base, vint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e64(base, v_tuple, vl); +void test_vsseg3e64_v_i64m2x3(int64_t *rs1, vint64m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e64(rs1, vs3, vl); } -void test_vsseg3e64_v_u64m1x3(uint64_t *base, vuint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e64(base, v_tuple, vl); +void test_vsseg3e64_v_u64m1x3(uint64_t *rs1, vuint64m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e64(rs1, vs3, vl); } -void test_vsseg3e64_v_u64m2x3(uint64_t *base, vuint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e64(base, v_tuple, vl); +void test_vsseg3e64_v_u64m2x3(uint64_t *rs1, vuint64m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e64(rs1, vs3, vl); } -void test_vsseg3e64_v_f64m1x3_m(vbool64_t mask, float64_t *base, vfloat64m1x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e64(mask, base, v_tuple, vl); +void test_vsseg3e64_v_f64m1x3_m(vbool64_t vm, float64_t *rs1, vfloat64m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e64(vm, rs1, vs3, vl); } -void test_vsseg3e64_v_f64m2x3_m(vbool32_t mask, float64_t *base, vfloat64m2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e64(mask, base, v_tuple, vl); +void test_vsseg3e64_v_f64m2x3_m(vbool32_t vm, float64_t *rs1, vfloat64m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e64(vm, rs1, vs3, vl); } -void test_vsseg3e64_v_i64m1x3_m(vbool64_t mask, int64_t *base, vint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e64(mask, base, v_tuple, vl); +void test_vsseg3e64_v_i64m1x3_m(vbool64_t vm, int64_t *rs1, vint64m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e64(vm, rs1, vs3, vl); } -void test_vsseg3e64_v_i64m2x3_m(vbool32_t mask, int64_t *base, vint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e64(mask, base, v_tuple, vl); +void test_vsseg3e64_v_i64m2x3_m(vbool32_t vm, int64_t *rs1, vint64m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e64(vm, rs1, vs3, vl); } -void test_vsseg3e64_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e64(mask, base, v_tuple, vl); +void test_vsseg3e64_v_u64m1x3_m(vbool64_t vm, uint64_t *rs1, vuint64m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e64(vm, rs1, vs3, vl); } -void test_vsseg3e64_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e64(mask, base, v_tuple, vl); +void test_vsseg3e64_v_u64m2x3_m(vbool32_t vm, uint64_t *rs1, vuint64m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e64(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg3e64\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsseg3e8.c b/auto-generated/gnu-overloaded-tests/vsseg3e8.c index 9d0079531..cab99e3be 100644 --- a/auto-generated/gnu-overloaded-tests/vsseg3e8.c +++ b/auto-generated/gnu-overloaded-tests/vsseg3e8.c @@ -6,84 +6,84 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg3e8_v_i8mf8x3(int8_t *base, vint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e8(base, v_tuple, vl); +void test_vsseg3e8_v_i8mf8x3(int8_t *rs1, vint8mf8x3_t vs3, size_t vl) { + return __riscv_vsseg3e8(rs1, vs3, vl); } -void test_vsseg3e8_v_i8mf4x3(int8_t *base, vint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e8(base, v_tuple, vl); +void test_vsseg3e8_v_i8mf4x3(int8_t *rs1, vint8mf4x3_t vs3, size_t vl) { + return __riscv_vsseg3e8(rs1, vs3, vl); } -void test_vsseg3e8_v_i8mf2x3(int8_t *base, vint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e8(base, v_tuple, vl); +void test_vsseg3e8_v_i8mf2x3(int8_t *rs1, vint8mf2x3_t vs3, size_t vl) { + return __riscv_vsseg3e8(rs1, vs3, vl); } -void test_vsseg3e8_v_i8m1x3(int8_t *base, vint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e8(base, v_tuple, vl); +void test_vsseg3e8_v_i8m1x3(int8_t *rs1, vint8m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e8(rs1, vs3, vl); } -void test_vsseg3e8_v_i8m2x3(int8_t *base, vint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e8(base, v_tuple, vl); +void test_vsseg3e8_v_i8m2x3(int8_t *rs1, vint8m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e8(rs1, vs3, vl); } -void test_vsseg3e8_v_u8mf8x3(uint8_t *base, vuint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e8(base, v_tuple, vl); +void test_vsseg3e8_v_u8mf8x3(uint8_t *rs1, vuint8mf8x3_t vs3, size_t vl) { + return __riscv_vsseg3e8(rs1, vs3, vl); } -void test_vsseg3e8_v_u8mf4x3(uint8_t *base, vuint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e8(base, v_tuple, vl); +void test_vsseg3e8_v_u8mf4x3(uint8_t *rs1, vuint8mf4x3_t vs3, size_t vl) { + return __riscv_vsseg3e8(rs1, vs3, vl); } -void test_vsseg3e8_v_u8mf2x3(uint8_t *base, vuint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e8(base, v_tuple, vl); +void test_vsseg3e8_v_u8mf2x3(uint8_t *rs1, vuint8mf2x3_t vs3, size_t vl) { + return __riscv_vsseg3e8(rs1, vs3, vl); } -void test_vsseg3e8_v_u8m1x3(uint8_t *base, vuint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e8(base, v_tuple, vl); +void test_vsseg3e8_v_u8m1x3(uint8_t *rs1, vuint8m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e8(rs1, vs3, vl); } -void test_vsseg3e8_v_u8m2x3(uint8_t *base, vuint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e8(base, v_tuple, vl); +void test_vsseg3e8_v_u8m2x3(uint8_t *rs1, vuint8m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e8(rs1, vs3, vl); } -void test_vsseg3e8_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e8(mask, base, v_tuple, vl); +void test_vsseg3e8_v_i8mf8x3_m(vbool64_t vm, int8_t *rs1, vint8mf8x3_t vs3, size_t vl) { + return __riscv_vsseg3e8(vm, rs1, vs3, vl); } -void test_vsseg3e8_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e8(mask, base, v_tuple, vl); +void test_vsseg3e8_v_i8mf4x3_m(vbool32_t vm, int8_t *rs1, vint8mf4x3_t vs3, size_t vl) { + return __riscv_vsseg3e8(vm, rs1, vs3, vl); } -void test_vsseg3e8_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e8(mask, base, v_tuple, vl); +void test_vsseg3e8_v_i8mf2x3_m(vbool16_t vm, int8_t *rs1, vint8mf2x3_t vs3, size_t vl) { + return __riscv_vsseg3e8(vm, rs1, vs3, vl); } -void test_vsseg3e8_v_i8m1x3_m(vbool8_t mask, int8_t *base, vint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e8(mask, base, v_tuple, vl); +void test_vsseg3e8_v_i8m1x3_m(vbool8_t vm, int8_t *rs1, vint8m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e8(vm, rs1, vs3, vl); } -void test_vsseg3e8_v_i8m2x3_m(vbool4_t mask, int8_t *base, vint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e8(mask, base, v_tuple, vl); +void test_vsseg3e8_v_i8m2x3_m(vbool4_t vm, int8_t *rs1, vint8m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e8(vm, rs1, vs3, vl); } -void test_vsseg3e8_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e8(mask, base, v_tuple, vl); +void test_vsseg3e8_v_u8mf8x3_m(vbool64_t vm, uint8_t *rs1, vuint8mf8x3_t vs3, size_t vl) { + return __riscv_vsseg3e8(vm, rs1, vs3, vl); } -void test_vsseg3e8_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e8(mask, base, v_tuple, vl); +void test_vsseg3e8_v_u8mf4x3_m(vbool32_t vm, uint8_t *rs1, vuint8mf4x3_t vs3, size_t vl) { + return __riscv_vsseg3e8(vm, rs1, vs3, vl); } -void test_vsseg3e8_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e8(mask, base, v_tuple, vl); +void test_vsseg3e8_v_u8mf2x3_m(vbool16_t vm, uint8_t *rs1, vuint8mf2x3_t vs3, size_t vl) { + return __riscv_vsseg3e8(vm, rs1, vs3, vl); } -void test_vsseg3e8_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e8(mask, base, v_tuple, vl); +void test_vsseg3e8_v_u8m1x3_m(vbool8_t vm, uint8_t *rs1, vuint8m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e8(vm, rs1, vs3, vl); } -void test_vsseg3e8_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsseg3e8(mask, base, v_tuple, vl); +void test_vsseg3e8_v_u8m2x3_m(vbool4_t vm, uint8_t *rs1, vuint8m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e8(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg3e8\.[ivxfswum.]+\s+} 20 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsseg4e16.c b/auto-generated/gnu-overloaded-tests/vsseg4e16.c index 01cddbf9d..19c7edb6a 100644 --- a/auto-generated/gnu-overloaded-tests/vsseg4e16.c +++ b/auto-generated/gnu-overloaded-tests/vsseg4e16.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg4e16_v_f16mf4x4(float16_t *base, vfloat16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16(base, v_tuple, vl); +void test_vsseg4e16_v_f16mf4x4(float16_t *rs1, vfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsseg4e16(rs1, vs3, vl); } -void test_vsseg4e16_v_f16mf2x4(float16_t *base, vfloat16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16(base, v_tuple, vl); +void test_vsseg4e16_v_f16mf2x4(float16_t *rs1, vfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsseg4e16(rs1, vs3, vl); } -void test_vsseg4e16_v_f16m1x4(float16_t *base, vfloat16m1x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16(base, v_tuple, vl); +void test_vsseg4e16_v_f16m1x4(float16_t *rs1, vfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e16(rs1, vs3, vl); } -void test_vsseg4e16_v_f16m2x4(float16_t *base, vfloat16m2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16(base, v_tuple, vl); +void test_vsseg4e16_v_f16m2x4(float16_t *rs1, vfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e16(rs1, vs3, vl); } -void test_vsseg4e16_v_i16mf4x4(int16_t *base, vint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16(base, v_tuple, vl); +void test_vsseg4e16_v_i16mf4x4(int16_t *rs1, vint16mf4x4_t vs3, size_t vl) { + return __riscv_vsseg4e16(rs1, vs3, vl); } -void test_vsseg4e16_v_i16mf2x4(int16_t *base, vint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16(base, v_tuple, vl); +void test_vsseg4e16_v_i16mf2x4(int16_t *rs1, vint16mf2x4_t vs3, size_t vl) { + return __riscv_vsseg4e16(rs1, vs3, vl); } -void test_vsseg4e16_v_i16m1x4(int16_t *base, vint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16(base, v_tuple, vl); +void test_vsseg4e16_v_i16m1x4(int16_t *rs1, vint16m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e16(rs1, vs3, vl); } -void test_vsseg4e16_v_i16m2x4(int16_t *base, vint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16(base, v_tuple, vl); +void test_vsseg4e16_v_i16m2x4(int16_t *rs1, vint16m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e16(rs1, vs3, vl); } -void test_vsseg4e16_v_u16mf4x4(uint16_t *base, vuint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16(base, v_tuple, vl); +void test_vsseg4e16_v_u16mf4x4(uint16_t *rs1, vuint16mf4x4_t vs3, size_t vl) { + return __riscv_vsseg4e16(rs1, vs3, vl); } -void test_vsseg4e16_v_u16mf2x4(uint16_t *base, vuint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16(base, v_tuple, vl); +void test_vsseg4e16_v_u16mf2x4(uint16_t *rs1, vuint16mf2x4_t vs3, size_t vl) { + return __riscv_vsseg4e16(rs1, vs3, vl); } -void test_vsseg4e16_v_u16m1x4(uint16_t *base, vuint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16(base, v_tuple, vl); +void test_vsseg4e16_v_u16m1x4(uint16_t *rs1, vuint16m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e16(rs1, vs3, vl); } -void test_vsseg4e16_v_u16m2x4(uint16_t *base, vuint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16(base, v_tuple, vl); +void test_vsseg4e16_v_u16m2x4(uint16_t *rs1, vuint16m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e16(rs1, vs3, vl); } -void test_vsseg4e16_v_f16mf4x4_m(vbool64_t mask, float16_t *base, vfloat16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16(mask, base, v_tuple, vl); +void test_vsseg4e16_v_f16mf4x4_m(vbool64_t vm, float16_t *rs1, vfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsseg4e16(vm, rs1, vs3, vl); } -void test_vsseg4e16_v_f16mf2x4_m(vbool32_t mask, float16_t *base, vfloat16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16(mask, base, v_tuple, vl); +void test_vsseg4e16_v_f16mf2x4_m(vbool32_t vm, float16_t *rs1, vfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsseg4e16(vm, rs1, vs3, vl); } -void test_vsseg4e16_v_f16m1x4_m(vbool16_t mask, float16_t *base, vfloat16m1x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16(mask, base, v_tuple, vl); +void test_vsseg4e16_v_f16m1x4_m(vbool16_t vm, float16_t *rs1, vfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e16(vm, rs1, vs3, vl); } -void test_vsseg4e16_v_f16m2x4_m(vbool8_t mask, float16_t *base, vfloat16m2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16(mask, base, v_tuple, vl); +void test_vsseg4e16_v_f16m2x4_m(vbool8_t vm, float16_t *rs1, vfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e16(vm, rs1, vs3, vl); } -void test_vsseg4e16_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16(mask, base, v_tuple, vl); +void test_vsseg4e16_v_i16mf4x4_m(vbool64_t vm, int16_t *rs1, vint16mf4x4_t vs3, size_t vl) { + return __riscv_vsseg4e16(vm, rs1, vs3, vl); } -void test_vsseg4e16_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16(mask, base, v_tuple, vl); +void test_vsseg4e16_v_i16mf2x4_m(vbool32_t vm, int16_t *rs1, vint16mf2x4_t vs3, size_t vl) { + return __riscv_vsseg4e16(vm, rs1, vs3, vl); } -void test_vsseg4e16_v_i16m1x4_m(vbool16_t mask, int16_t *base, vint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16(mask, base, v_tuple, vl); +void test_vsseg4e16_v_i16m1x4_m(vbool16_t vm, int16_t *rs1, vint16m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e16(vm, rs1, vs3, vl); } -void test_vsseg4e16_v_i16m2x4_m(vbool8_t mask, int16_t *base, vint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16(mask, base, v_tuple, vl); +void test_vsseg4e16_v_i16m2x4_m(vbool8_t vm, int16_t *rs1, vint16m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e16(vm, rs1, vs3, vl); } -void test_vsseg4e16_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16(mask, base, v_tuple, vl); +void test_vsseg4e16_v_u16mf4x4_m(vbool64_t vm, uint16_t *rs1, vuint16mf4x4_t vs3, size_t vl) { + return __riscv_vsseg4e16(vm, rs1, vs3, vl); } -void test_vsseg4e16_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16(mask, base, v_tuple, vl); +void test_vsseg4e16_v_u16mf2x4_m(vbool32_t vm, uint16_t *rs1, vuint16mf2x4_t vs3, size_t vl) { + return __riscv_vsseg4e16(vm, rs1, vs3, vl); } -void test_vsseg4e16_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16(mask, base, v_tuple, vl); +void test_vsseg4e16_v_u16m1x4_m(vbool16_t vm, uint16_t *rs1, vuint16m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e16(vm, rs1, vs3, vl); } -void test_vsseg4e16_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e16(mask, base, v_tuple, vl); +void test_vsseg4e16_v_u16m2x4_m(vbool8_t vm, uint16_t *rs1, vuint16m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e16(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg4e16\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsseg4e32.c b/auto-generated/gnu-overloaded-tests/vsseg4e32.c index 89fc32077..dc7aa5cbe 100644 --- a/auto-generated/gnu-overloaded-tests/vsseg4e32.c +++ b/auto-generated/gnu-overloaded-tests/vsseg4e32.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg4e32_v_f32mf2x4(float32_t *base, vfloat32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e32(base, v_tuple, vl); +void test_vsseg4e32_v_f32mf2x4(float32_t *rs1, vfloat32mf2x4_t vs3, size_t vl) { + return __riscv_vsseg4e32(rs1, vs3, vl); } -void test_vsseg4e32_v_f32m1x4(float32_t *base, vfloat32m1x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e32(base, v_tuple, vl); +void test_vsseg4e32_v_f32m1x4(float32_t *rs1, vfloat32m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e32(rs1, vs3, vl); } -void test_vsseg4e32_v_f32m2x4(float32_t *base, vfloat32m2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e32(base, v_tuple, vl); +void test_vsseg4e32_v_f32m2x4(float32_t *rs1, vfloat32m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e32(rs1, vs3, vl); } -void test_vsseg4e32_v_i32mf2x4(int32_t *base, vint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e32(base, v_tuple, vl); +void test_vsseg4e32_v_i32mf2x4(int32_t *rs1, vint32mf2x4_t vs3, size_t vl) { + return __riscv_vsseg4e32(rs1, vs3, vl); } -void test_vsseg4e32_v_i32m1x4(int32_t *base, vint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e32(base, v_tuple, vl); +void test_vsseg4e32_v_i32m1x4(int32_t *rs1, vint32m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e32(rs1, vs3, vl); } -void test_vsseg4e32_v_i32m2x4(int32_t *base, vint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e32(base, v_tuple, vl); +void test_vsseg4e32_v_i32m2x4(int32_t *rs1, vint32m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e32(rs1, vs3, vl); } -void test_vsseg4e32_v_u32mf2x4(uint32_t *base, vuint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e32(base, v_tuple, vl); +void test_vsseg4e32_v_u32mf2x4(uint32_t *rs1, vuint32mf2x4_t vs3, size_t vl) { + return __riscv_vsseg4e32(rs1, vs3, vl); } -void test_vsseg4e32_v_u32m1x4(uint32_t *base, vuint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e32(base, v_tuple, vl); +void test_vsseg4e32_v_u32m1x4(uint32_t *rs1, vuint32m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e32(rs1, vs3, vl); } -void test_vsseg4e32_v_u32m2x4(uint32_t *base, vuint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e32(base, v_tuple, vl); +void test_vsseg4e32_v_u32m2x4(uint32_t *rs1, vuint32m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e32(rs1, vs3, vl); } -void test_vsseg4e32_v_f32mf2x4_m(vbool64_t mask, float32_t *base, vfloat32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e32(mask, base, v_tuple, vl); +void test_vsseg4e32_v_f32mf2x4_m(vbool64_t vm, float32_t *rs1, vfloat32mf2x4_t vs3, size_t vl) { + return __riscv_vsseg4e32(vm, rs1, vs3, vl); } -void test_vsseg4e32_v_f32m1x4_m(vbool32_t mask, float32_t *base, vfloat32m1x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e32(mask, base, v_tuple, vl); +void test_vsseg4e32_v_f32m1x4_m(vbool32_t vm, float32_t *rs1, vfloat32m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e32(vm, rs1, vs3, vl); } -void test_vsseg4e32_v_f32m2x4_m(vbool16_t mask, float32_t *base, vfloat32m2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e32(mask, base, v_tuple, vl); +void test_vsseg4e32_v_f32m2x4_m(vbool16_t vm, float32_t *rs1, vfloat32m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e32(vm, rs1, vs3, vl); } -void test_vsseg4e32_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e32(mask, base, v_tuple, vl); +void test_vsseg4e32_v_i32mf2x4_m(vbool64_t vm, int32_t *rs1, vint32mf2x4_t vs3, size_t vl) { + return __riscv_vsseg4e32(vm, rs1, vs3, vl); } -void test_vsseg4e32_v_i32m1x4_m(vbool32_t mask, int32_t *base, vint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e32(mask, base, v_tuple, vl); +void test_vsseg4e32_v_i32m1x4_m(vbool32_t vm, int32_t *rs1, vint32m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e32(vm, rs1, vs3, vl); } -void test_vsseg4e32_v_i32m2x4_m(vbool16_t mask, int32_t *base, vint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e32(mask, base, v_tuple, vl); +void test_vsseg4e32_v_i32m2x4_m(vbool16_t vm, int32_t *rs1, vint32m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e32(vm, rs1, vs3, vl); } -void test_vsseg4e32_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e32(mask, base, v_tuple, vl); +void test_vsseg4e32_v_u32mf2x4_m(vbool64_t vm, uint32_t *rs1, vuint32mf2x4_t vs3, size_t vl) { + return __riscv_vsseg4e32(vm, rs1, vs3, vl); } -void test_vsseg4e32_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e32(mask, base, v_tuple, vl); +void test_vsseg4e32_v_u32m1x4_m(vbool32_t vm, uint32_t *rs1, vuint32m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e32(vm, rs1, vs3, vl); } -void test_vsseg4e32_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e32(mask, base, v_tuple, vl); +void test_vsseg4e32_v_u32m2x4_m(vbool16_t vm, uint32_t *rs1, vuint32m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e32(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg4e32\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsseg4e64.c b/auto-generated/gnu-overloaded-tests/vsseg4e64.c index 4ccf3532f..599e51dab 100644 --- a/auto-generated/gnu-overloaded-tests/vsseg4e64.c +++ b/auto-generated/gnu-overloaded-tests/vsseg4e64.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg4e64_v_f64m1x4(float64_t *base, vfloat64m1x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e64(base, v_tuple, vl); +void test_vsseg4e64_v_f64m1x4(float64_t *rs1, vfloat64m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e64(rs1, vs3, vl); } -void test_vsseg4e64_v_f64m2x4(float64_t *base, vfloat64m2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e64(base, v_tuple, vl); +void test_vsseg4e64_v_f64m2x4(float64_t *rs1, vfloat64m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e64(rs1, vs3, vl); } -void test_vsseg4e64_v_i64m1x4(int64_t *base, vint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e64(base, v_tuple, vl); +void test_vsseg4e64_v_i64m1x4(int64_t *rs1, vint64m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e64(rs1, vs3, vl); } -void test_vsseg4e64_v_i64m2x4(int64_t *base, vint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e64(base, v_tuple, vl); +void test_vsseg4e64_v_i64m2x4(int64_t *rs1, vint64m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e64(rs1, vs3, vl); } -void test_vsseg4e64_v_u64m1x4(uint64_t *base, vuint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e64(base, v_tuple, vl); +void test_vsseg4e64_v_u64m1x4(uint64_t *rs1, vuint64m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e64(rs1, vs3, vl); } -void test_vsseg4e64_v_u64m2x4(uint64_t *base, vuint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e64(base, v_tuple, vl); +void test_vsseg4e64_v_u64m2x4(uint64_t *rs1, vuint64m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e64(rs1, vs3, vl); } -void test_vsseg4e64_v_f64m1x4_m(vbool64_t mask, float64_t *base, vfloat64m1x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e64(mask, base, v_tuple, vl); +void test_vsseg4e64_v_f64m1x4_m(vbool64_t vm, float64_t *rs1, vfloat64m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e64(vm, rs1, vs3, vl); } -void test_vsseg4e64_v_f64m2x4_m(vbool32_t mask, float64_t *base, vfloat64m2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e64(mask, base, v_tuple, vl); +void test_vsseg4e64_v_f64m2x4_m(vbool32_t vm, float64_t *rs1, vfloat64m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e64(vm, rs1, vs3, vl); } -void test_vsseg4e64_v_i64m1x4_m(vbool64_t mask, int64_t *base, vint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e64(mask, base, v_tuple, vl); +void test_vsseg4e64_v_i64m1x4_m(vbool64_t vm, int64_t *rs1, vint64m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e64(vm, rs1, vs3, vl); } -void test_vsseg4e64_v_i64m2x4_m(vbool32_t mask, int64_t *base, vint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e64(mask, base, v_tuple, vl); +void test_vsseg4e64_v_i64m2x4_m(vbool32_t vm, int64_t *rs1, vint64m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e64(vm, rs1, vs3, vl); } -void test_vsseg4e64_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e64(mask, base, v_tuple, vl); +void test_vsseg4e64_v_u64m1x4_m(vbool64_t vm, uint64_t *rs1, vuint64m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e64(vm, rs1, vs3, vl); } -void test_vsseg4e64_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e64(mask, base, v_tuple, vl); +void test_vsseg4e64_v_u64m2x4_m(vbool32_t vm, uint64_t *rs1, vuint64m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e64(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg4e64\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsseg4e8.c b/auto-generated/gnu-overloaded-tests/vsseg4e8.c index b34b64828..59265ad4a 100644 --- a/auto-generated/gnu-overloaded-tests/vsseg4e8.c +++ b/auto-generated/gnu-overloaded-tests/vsseg4e8.c @@ -6,84 +6,84 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg4e8_v_i8mf8x4(int8_t *base, vint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e8(base, v_tuple, vl); +void test_vsseg4e8_v_i8mf8x4(int8_t *rs1, vint8mf8x4_t vs3, size_t vl) { + return __riscv_vsseg4e8(rs1, vs3, vl); } -void test_vsseg4e8_v_i8mf4x4(int8_t *base, vint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e8(base, v_tuple, vl); +void test_vsseg4e8_v_i8mf4x4(int8_t *rs1, vint8mf4x4_t vs3, size_t vl) { + return __riscv_vsseg4e8(rs1, vs3, vl); } -void test_vsseg4e8_v_i8mf2x4(int8_t *base, vint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e8(base, v_tuple, vl); +void test_vsseg4e8_v_i8mf2x4(int8_t *rs1, vint8mf2x4_t vs3, size_t vl) { + return __riscv_vsseg4e8(rs1, vs3, vl); } -void test_vsseg4e8_v_i8m1x4(int8_t *base, vint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e8(base, v_tuple, vl); +void test_vsseg4e8_v_i8m1x4(int8_t *rs1, vint8m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e8(rs1, vs3, vl); } -void test_vsseg4e8_v_i8m2x4(int8_t *base, vint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e8(base, v_tuple, vl); +void test_vsseg4e8_v_i8m2x4(int8_t *rs1, vint8m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e8(rs1, vs3, vl); } -void test_vsseg4e8_v_u8mf8x4(uint8_t *base, vuint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e8(base, v_tuple, vl); +void test_vsseg4e8_v_u8mf8x4(uint8_t *rs1, vuint8mf8x4_t vs3, size_t vl) { + return __riscv_vsseg4e8(rs1, vs3, vl); } -void test_vsseg4e8_v_u8mf4x4(uint8_t *base, vuint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e8(base, v_tuple, vl); +void test_vsseg4e8_v_u8mf4x4(uint8_t *rs1, vuint8mf4x4_t vs3, size_t vl) { + return __riscv_vsseg4e8(rs1, vs3, vl); } -void test_vsseg4e8_v_u8mf2x4(uint8_t *base, vuint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e8(base, v_tuple, vl); +void test_vsseg4e8_v_u8mf2x4(uint8_t *rs1, vuint8mf2x4_t vs3, size_t vl) { + return __riscv_vsseg4e8(rs1, vs3, vl); } -void test_vsseg4e8_v_u8m1x4(uint8_t *base, vuint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e8(base, v_tuple, vl); +void test_vsseg4e8_v_u8m1x4(uint8_t *rs1, vuint8m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e8(rs1, vs3, vl); } -void test_vsseg4e8_v_u8m2x4(uint8_t *base, vuint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e8(base, v_tuple, vl); +void test_vsseg4e8_v_u8m2x4(uint8_t *rs1, vuint8m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e8(rs1, vs3, vl); } -void test_vsseg4e8_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e8(mask, base, v_tuple, vl); +void test_vsseg4e8_v_i8mf8x4_m(vbool64_t vm, int8_t *rs1, vint8mf8x4_t vs3, size_t vl) { + return __riscv_vsseg4e8(vm, rs1, vs3, vl); } -void test_vsseg4e8_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e8(mask, base, v_tuple, vl); +void test_vsseg4e8_v_i8mf4x4_m(vbool32_t vm, int8_t *rs1, vint8mf4x4_t vs3, size_t vl) { + return __riscv_vsseg4e8(vm, rs1, vs3, vl); } -void test_vsseg4e8_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e8(mask, base, v_tuple, vl); +void test_vsseg4e8_v_i8mf2x4_m(vbool16_t vm, int8_t *rs1, vint8mf2x4_t vs3, size_t vl) { + return __riscv_vsseg4e8(vm, rs1, vs3, vl); } -void test_vsseg4e8_v_i8m1x4_m(vbool8_t mask, int8_t *base, vint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e8(mask, base, v_tuple, vl); +void test_vsseg4e8_v_i8m1x4_m(vbool8_t vm, int8_t *rs1, vint8m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e8(vm, rs1, vs3, vl); } -void test_vsseg4e8_v_i8m2x4_m(vbool4_t mask, int8_t *base, vint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e8(mask, base, v_tuple, vl); +void test_vsseg4e8_v_i8m2x4_m(vbool4_t vm, int8_t *rs1, vint8m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e8(vm, rs1, vs3, vl); } -void test_vsseg4e8_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e8(mask, base, v_tuple, vl); +void test_vsseg4e8_v_u8mf8x4_m(vbool64_t vm, uint8_t *rs1, vuint8mf8x4_t vs3, size_t vl) { + return __riscv_vsseg4e8(vm, rs1, vs3, vl); } -void test_vsseg4e8_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e8(mask, base, v_tuple, vl); +void test_vsseg4e8_v_u8mf4x4_m(vbool32_t vm, uint8_t *rs1, vuint8mf4x4_t vs3, size_t vl) { + return __riscv_vsseg4e8(vm, rs1, vs3, vl); } -void test_vsseg4e8_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e8(mask, base, v_tuple, vl); +void test_vsseg4e8_v_u8mf2x4_m(vbool16_t vm, uint8_t *rs1, vuint8mf2x4_t vs3, size_t vl) { + return __riscv_vsseg4e8(vm, rs1, vs3, vl); } -void test_vsseg4e8_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e8(mask, base, v_tuple, vl); +void test_vsseg4e8_v_u8m1x4_m(vbool8_t vm, uint8_t *rs1, vuint8m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e8(vm, rs1, vs3, vl); } -void test_vsseg4e8_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsseg4e8(mask, base, v_tuple, vl); +void test_vsseg4e8_v_u8m2x4_m(vbool4_t vm, uint8_t *rs1, vuint8m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e8(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg4e8\.[ivxfswum.]+\s+} 20 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsseg5e16.c b/auto-generated/gnu-overloaded-tests/vsseg5e16.c index 983ee2c7c..b14594056 100644 --- a/auto-generated/gnu-overloaded-tests/vsseg5e16.c +++ b/auto-generated/gnu-overloaded-tests/vsseg5e16.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg5e16_v_f16mf4x5(float16_t *base, vfloat16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e16(base, v_tuple, vl); +void test_vsseg5e16_v_f16mf4x5(float16_t *rs1, vfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsseg5e16(rs1, vs3, vl); } -void test_vsseg5e16_v_f16mf2x5(float16_t *base, vfloat16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e16(base, v_tuple, vl); +void test_vsseg5e16_v_f16mf2x5(float16_t *rs1, vfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsseg5e16(rs1, vs3, vl); } -void test_vsseg5e16_v_f16m1x5(float16_t *base, vfloat16m1x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e16(base, v_tuple, vl); +void test_vsseg5e16_v_f16m1x5(float16_t *rs1, vfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e16(rs1, vs3, vl); } -void test_vsseg5e16_v_i16mf4x5(int16_t *base, vint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e16(base, v_tuple, vl); +void test_vsseg5e16_v_i16mf4x5(int16_t *rs1, vint16mf4x5_t vs3, size_t vl) { + return __riscv_vsseg5e16(rs1, vs3, vl); } -void test_vsseg5e16_v_i16mf2x5(int16_t *base, vint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e16(base, v_tuple, vl); +void test_vsseg5e16_v_i16mf2x5(int16_t *rs1, vint16mf2x5_t vs3, size_t vl) { + return __riscv_vsseg5e16(rs1, vs3, vl); } -void test_vsseg5e16_v_i16m1x5(int16_t *base, vint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e16(base, v_tuple, vl); +void test_vsseg5e16_v_i16m1x5(int16_t *rs1, vint16m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e16(rs1, vs3, vl); } -void test_vsseg5e16_v_u16mf4x5(uint16_t *base, vuint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e16(base, v_tuple, vl); +void test_vsseg5e16_v_u16mf4x5(uint16_t *rs1, vuint16mf4x5_t vs3, size_t vl) { + return __riscv_vsseg5e16(rs1, vs3, vl); } -void test_vsseg5e16_v_u16mf2x5(uint16_t *base, vuint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e16(base, v_tuple, vl); +void test_vsseg5e16_v_u16mf2x5(uint16_t *rs1, vuint16mf2x5_t vs3, size_t vl) { + return __riscv_vsseg5e16(rs1, vs3, vl); } -void test_vsseg5e16_v_u16m1x5(uint16_t *base, vuint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e16(base, v_tuple, vl); +void test_vsseg5e16_v_u16m1x5(uint16_t *rs1, vuint16m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e16(rs1, vs3, vl); } -void test_vsseg5e16_v_f16mf4x5_m(vbool64_t mask, float16_t *base, vfloat16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e16(mask, base, v_tuple, vl); +void test_vsseg5e16_v_f16mf4x5_m(vbool64_t vm, float16_t *rs1, vfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsseg5e16(vm, rs1, vs3, vl); } -void test_vsseg5e16_v_f16mf2x5_m(vbool32_t mask, float16_t *base, vfloat16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e16(mask, base, v_tuple, vl); +void test_vsseg5e16_v_f16mf2x5_m(vbool32_t vm, float16_t *rs1, vfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsseg5e16(vm, rs1, vs3, vl); } -void test_vsseg5e16_v_f16m1x5_m(vbool16_t mask, float16_t *base, vfloat16m1x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e16(mask, base, v_tuple, vl); +void test_vsseg5e16_v_f16m1x5_m(vbool16_t vm, float16_t *rs1, vfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e16(vm, rs1, vs3, vl); } -void test_vsseg5e16_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e16(mask, base, v_tuple, vl); +void test_vsseg5e16_v_i16mf4x5_m(vbool64_t vm, int16_t *rs1, vint16mf4x5_t vs3, size_t vl) { + return __riscv_vsseg5e16(vm, rs1, vs3, vl); } -void test_vsseg5e16_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e16(mask, base, v_tuple, vl); +void test_vsseg5e16_v_i16mf2x5_m(vbool32_t vm, int16_t *rs1, vint16mf2x5_t vs3, size_t vl) { + return __riscv_vsseg5e16(vm, rs1, vs3, vl); } -void test_vsseg5e16_v_i16m1x5_m(vbool16_t mask, int16_t *base, vint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e16(mask, base, v_tuple, vl); +void test_vsseg5e16_v_i16m1x5_m(vbool16_t vm, int16_t *rs1, vint16m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e16(vm, rs1, vs3, vl); } -void test_vsseg5e16_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e16(mask, base, v_tuple, vl); +void test_vsseg5e16_v_u16mf4x5_m(vbool64_t vm, uint16_t *rs1, vuint16mf4x5_t vs3, size_t vl) { + return __riscv_vsseg5e16(vm, rs1, vs3, vl); } -void test_vsseg5e16_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e16(mask, base, v_tuple, vl); +void test_vsseg5e16_v_u16mf2x5_m(vbool32_t vm, uint16_t *rs1, vuint16mf2x5_t vs3, size_t vl) { + return __riscv_vsseg5e16(vm, rs1, vs3, vl); } -void test_vsseg5e16_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e16(mask, base, v_tuple, vl); +void test_vsseg5e16_v_u16m1x5_m(vbool16_t vm, uint16_t *rs1, vuint16m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e16(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg5e16\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsseg5e32.c b/auto-generated/gnu-overloaded-tests/vsseg5e32.c index 4ac1fa2ca..2b8e75d89 100644 --- a/auto-generated/gnu-overloaded-tests/vsseg5e32.c +++ b/auto-generated/gnu-overloaded-tests/vsseg5e32.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg5e32_v_f32mf2x5(float32_t *base, vfloat32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e32(base, v_tuple, vl); +void test_vsseg5e32_v_f32mf2x5(float32_t *rs1, vfloat32mf2x5_t vs3, size_t vl) { + return __riscv_vsseg5e32(rs1, vs3, vl); } -void test_vsseg5e32_v_f32m1x5(float32_t *base, vfloat32m1x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e32(base, v_tuple, vl); +void test_vsseg5e32_v_f32m1x5(float32_t *rs1, vfloat32m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e32(rs1, vs3, vl); } -void test_vsseg5e32_v_i32mf2x5(int32_t *base, vint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e32(base, v_tuple, vl); +void test_vsseg5e32_v_i32mf2x5(int32_t *rs1, vint32mf2x5_t vs3, size_t vl) { + return __riscv_vsseg5e32(rs1, vs3, vl); } -void test_vsseg5e32_v_i32m1x5(int32_t *base, vint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e32(base, v_tuple, vl); +void test_vsseg5e32_v_i32m1x5(int32_t *rs1, vint32m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e32(rs1, vs3, vl); } -void test_vsseg5e32_v_u32mf2x5(uint32_t *base, vuint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e32(base, v_tuple, vl); +void test_vsseg5e32_v_u32mf2x5(uint32_t *rs1, vuint32mf2x5_t vs3, size_t vl) { + return __riscv_vsseg5e32(rs1, vs3, vl); } -void test_vsseg5e32_v_u32m1x5(uint32_t *base, vuint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e32(base, v_tuple, vl); +void test_vsseg5e32_v_u32m1x5(uint32_t *rs1, vuint32m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e32(rs1, vs3, vl); } -void test_vsseg5e32_v_f32mf2x5_m(vbool64_t mask, float32_t *base, vfloat32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e32(mask, base, v_tuple, vl); +void test_vsseg5e32_v_f32mf2x5_m(vbool64_t vm, float32_t *rs1, vfloat32mf2x5_t vs3, size_t vl) { + return __riscv_vsseg5e32(vm, rs1, vs3, vl); } -void test_vsseg5e32_v_f32m1x5_m(vbool32_t mask, float32_t *base, vfloat32m1x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e32(mask, base, v_tuple, vl); +void test_vsseg5e32_v_f32m1x5_m(vbool32_t vm, float32_t *rs1, vfloat32m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e32(vm, rs1, vs3, vl); } -void test_vsseg5e32_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e32(mask, base, v_tuple, vl); +void test_vsseg5e32_v_i32mf2x5_m(vbool64_t vm, int32_t *rs1, vint32mf2x5_t vs3, size_t vl) { + return __riscv_vsseg5e32(vm, rs1, vs3, vl); } -void test_vsseg5e32_v_i32m1x5_m(vbool32_t mask, int32_t *base, vint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e32(mask, base, v_tuple, vl); +void test_vsseg5e32_v_i32m1x5_m(vbool32_t vm, int32_t *rs1, vint32m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e32(vm, rs1, vs3, vl); } -void test_vsseg5e32_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e32(mask, base, v_tuple, vl); +void test_vsseg5e32_v_u32mf2x5_m(vbool64_t vm, uint32_t *rs1, vuint32mf2x5_t vs3, size_t vl) { + return __riscv_vsseg5e32(vm, rs1, vs3, vl); } -void test_vsseg5e32_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e32(mask, base, v_tuple, vl); +void test_vsseg5e32_v_u32m1x5_m(vbool32_t vm, uint32_t *rs1, vuint32m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e32(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg5e32\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsseg5e64.c b/auto-generated/gnu-overloaded-tests/vsseg5e64.c index adadfa82a..14be29bab 100644 --- a/auto-generated/gnu-overloaded-tests/vsseg5e64.c +++ b/auto-generated/gnu-overloaded-tests/vsseg5e64.c @@ -6,28 +6,28 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg5e64_v_f64m1x5(float64_t *base, vfloat64m1x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e64(base, v_tuple, vl); +void test_vsseg5e64_v_f64m1x5(float64_t *rs1, vfloat64m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e64(rs1, vs3, vl); } -void test_vsseg5e64_v_i64m1x5(int64_t *base, vint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e64(base, v_tuple, vl); +void test_vsseg5e64_v_i64m1x5(int64_t *rs1, vint64m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e64(rs1, vs3, vl); } -void test_vsseg5e64_v_u64m1x5(uint64_t *base, vuint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e64(base, v_tuple, vl); +void test_vsseg5e64_v_u64m1x5(uint64_t *rs1, vuint64m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e64(rs1, vs3, vl); } -void test_vsseg5e64_v_f64m1x5_m(vbool64_t mask, float64_t *base, vfloat64m1x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e64(mask, base, v_tuple, vl); +void test_vsseg5e64_v_f64m1x5_m(vbool64_t vm, float64_t *rs1, vfloat64m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e64(vm, rs1, vs3, vl); } -void test_vsseg5e64_v_i64m1x5_m(vbool64_t mask, int64_t *base, vint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e64(mask, base, v_tuple, vl); +void test_vsseg5e64_v_i64m1x5_m(vbool64_t vm, int64_t *rs1, vint64m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e64(vm, rs1, vs3, vl); } -void test_vsseg5e64_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e64(mask, base, v_tuple, vl); +void test_vsseg5e64_v_u64m1x5_m(vbool64_t vm, uint64_t *rs1, vuint64m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e64(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg5e64\.[ivxfswum.]+\s+} 6 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsseg5e8.c b/auto-generated/gnu-overloaded-tests/vsseg5e8.c index ba782721d..5658554bf 100644 --- a/auto-generated/gnu-overloaded-tests/vsseg5e8.c +++ b/auto-generated/gnu-overloaded-tests/vsseg5e8.c @@ -6,68 +6,68 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg5e8_v_i8mf8x5(int8_t *base, vint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e8(base, v_tuple, vl); +void test_vsseg5e8_v_i8mf8x5(int8_t *rs1, vint8mf8x5_t vs3, size_t vl) { + return __riscv_vsseg5e8(rs1, vs3, vl); } -void test_vsseg5e8_v_i8mf4x5(int8_t *base, vint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e8(base, v_tuple, vl); +void test_vsseg5e8_v_i8mf4x5(int8_t *rs1, vint8mf4x5_t vs3, size_t vl) { + return __riscv_vsseg5e8(rs1, vs3, vl); } -void test_vsseg5e8_v_i8mf2x5(int8_t *base, vint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e8(base, v_tuple, vl); +void test_vsseg5e8_v_i8mf2x5(int8_t *rs1, vint8mf2x5_t vs3, size_t vl) { + return __riscv_vsseg5e8(rs1, vs3, vl); } -void test_vsseg5e8_v_i8m1x5(int8_t *base, vint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e8(base, v_tuple, vl); +void test_vsseg5e8_v_i8m1x5(int8_t *rs1, vint8m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e8(rs1, vs3, vl); } -void test_vsseg5e8_v_u8mf8x5(uint8_t *base, vuint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e8(base, v_tuple, vl); +void test_vsseg5e8_v_u8mf8x5(uint8_t *rs1, vuint8mf8x5_t vs3, size_t vl) { + return __riscv_vsseg5e8(rs1, vs3, vl); } -void test_vsseg5e8_v_u8mf4x5(uint8_t *base, vuint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e8(base, v_tuple, vl); +void test_vsseg5e8_v_u8mf4x5(uint8_t *rs1, vuint8mf4x5_t vs3, size_t vl) { + return __riscv_vsseg5e8(rs1, vs3, vl); } -void test_vsseg5e8_v_u8mf2x5(uint8_t *base, vuint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e8(base, v_tuple, vl); +void test_vsseg5e8_v_u8mf2x5(uint8_t *rs1, vuint8mf2x5_t vs3, size_t vl) { + return __riscv_vsseg5e8(rs1, vs3, vl); } -void test_vsseg5e8_v_u8m1x5(uint8_t *base, vuint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e8(base, v_tuple, vl); +void test_vsseg5e8_v_u8m1x5(uint8_t *rs1, vuint8m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e8(rs1, vs3, vl); } -void test_vsseg5e8_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e8(mask, base, v_tuple, vl); +void test_vsseg5e8_v_i8mf8x5_m(vbool64_t vm, int8_t *rs1, vint8mf8x5_t vs3, size_t vl) { + return __riscv_vsseg5e8(vm, rs1, vs3, vl); } -void test_vsseg5e8_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e8(mask, base, v_tuple, vl); +void test_vsseg5e8_v_i8mf4x5_m(vbool32_t vm, int8_t *rs1, vint8mf4x5_t vs3, size_t vl) { + return __riscv_vsseg5e8(vm, rs1, vs3, vl); } -void test_vsseg5e8_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e8(mask, base, v_tuple, vl); +void test_vsseg5e8_v_i8mf2x5_m(vbool16_t vm, int8_t *rs1, vint8mf2x5_t vs3, size_t vl) { + return __riscv_vsseg5e8(vm, rs1, vs3, vl); } -void test_vsseg5e8_v_i8m1x5_m(vbool8_t mask, int8_t *base, vint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e8(mask, base, v_tuple, vl); +void test_vsseg5e8_v_i8m1x5_m(vbool8_t vm, int8_t *rs1, vint8m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e8(vm, rs1, vs3, vl); } -void test_vsseg5e8_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e8(mask, base, v_tuple, vl); +void test_vsseg5e8_v_u8mf8x5_m(vbool64_t vm, uint8_t *rs1, vuint8mf8x5_t vs3, size_t vl) { + return __riscv_vsseg5e8(vm, rs1, vs3, vl); } -void test_vsseg5e8_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e8(mask, base, v_tuple, vl); +void test_vsseg5e8_v_u8mf4x5_m(vbool32_t vm, uint8_t *rs1, vuint8mf4x5_t vs3, size_t vl) { + return __riscv_vsseg5e8(vm, rs1, vs3, vl); } -void test_vsseg5e8_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e8(mask, base, v_tuple, vl); +void test_vsseg5e8_v_u8mf2x5_m(vbool16_t vm, uint8_t *rs1, vuint8mf2x5_t vs3, size_t vl) { + return __riscv_vsseg5e8(vm, rs1, vs3, vl); } -void test_vsseg5e8_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsseg5e8(mask, base, v_tuple, vl); +void test_vsseg5e8_v_u8m1x5_m(vbool8_t vm, uint8_t *rs1, vuint8m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e8(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg5e8\.[ivxfswum.]+\s+} 16 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsseg6e16.c b/auto-generated/gnu-overloaded-tests/vsseg6e16.c index a2c21a7cc..f13d31664 100644 --- a/auto-generated/gnu-overloaded-tests/vsseg6e16.c +++ b/auto-generated/gnu-overloaded-tests/vsseg6e16.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg6e16_v_f16mf4x6(float16_t *base, vfloat16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e16(base, v_tuple, vl); +void test_vsseg6e16_v_f16mf4x6(float16_t *rs1, vfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsseg6e16(rs1, vs3, vl); } -void test_vsseg6e16_v_f16mf2x6(float16_t *base, vfloat16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e16(base, v_tuple, vl); +void test_vsseg6e16_v_f16mf2x6(float16_t *rs1, vfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsseg6e16(rs1, vs3, vl); } -void test_vsseg6e16_v_f16m1x6(float16_t *base, vfloat16m1x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e16(base, v_tuple, vl); +void test_vsseg6e16_v_f16m1x6(float16_t *rs1, vfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e16(rs1, vs3, vl); } -void test_vsseg6e16_v_i16mf4x6(int16_t *base, vint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e16(base, v_tuple, vl); +void test_vsseg6e16_v_i16mf4x6(int16_t *rs1, vint16mf4x6_t vs3, size_t vl) { + return __riscv_vsseg6e16(rs1, vs3, vl); } -void test_vsseg6e16_v_i16mf2x6(int16_t *base, vint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e16(base, v_tuple, vl); +void test_vsseg6e16_v_i16mf2x6(int16_t *rs1, vint16mf2x6_t vs3, size_t vl) { + return __riscv_vsseg6e16(rs1, vs3, vl); } -void test_vsseg6e16_v_i16m1x6(int16_t *base, vint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e16(base, v_tuple, vl); +void test_vsseg6e16_v_i16m1x6(int16_t *rs1, vint16m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e16(rs1, vs3, vl); } -void test_vsseg6e16_v_u16mf4x6(uint16_t *base, vuint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e16(base, v_tuple, vl); +void test_vsseg6e16_v_u16mf4x6(uint16_t *rs1, vuint16mf4x6_t vs3, size_t vl) { + return __riscv_vsseg6e16(rs1, vs3, vl); } -void test_vsseg6e16_v_u16mf2x6(uint16_t *base, vuint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e16(base, v_tuple, vl); +void test_vsseg6e16_v_u16mf2x6(uint16_t *rs1, vuint16mf2x6_t vs3, size_t vl) { + return __riscv_vsseg6e16(rs1, vs3, vl); } -void test_vsseg6e16_v_u16m1x6(uint16_t *base, vuint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e16(base, v_tuple, vl); +void test_vsseg6e16_v_u16m1x6(uint16_t *rs1, vuint16m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e16(rs1, vs3, vl); } -void test_vsseg6e16_v_f16mf4x6_m(vbool64_t mask, float16_t *base, vfloat16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e16(mask, base, v_tuple, vl); +void test_vsseg6e16_v_f16mf4x6_m(vbool64_t vm, float16_t *rs1, vfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsseg6e16(vm, rs1, vs3, vl); } -void test_vsseg6e16_v_f16mf2x6_m(vbool32_t mask, float16_t *base, vfloat16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e16(mask, base, v_tuple, vl); +void test_vsseg6e16_v_f16mf2x6_m(vbool32_t vm, float16_t *rs1, vfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsseg6e16(vm, rs1, vs3, vl); } -void test_vsseg6e16_v_f16m1x6_m(vbool16_t mask, float16_t *base, vfloat16m1x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e16(mask, base, v_tuple, vl); +void test_vsseg6e16_v_f16m1x6_m(vbool16_t vm, float16_t *rs1, vfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e16(vm, rs1, vs3, vl); } -void test_vsseg6e16_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e16(mask, base, v_tuple, vl); +void test_vsseg6e16_v_i16mf4x6_m(vbool64_t vm, int16_t *rs1, vint16mf4x6_t vs3, size_t vl) { + return __riscv_vsseg6e16(vm, rs1, vs3, vl); } -void test_vsseg6e16_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e16(mask, base, v_tuple, vl); +void test_vsseg6e16_v_i16mf2x6_m(vbool32_t vm, int16_t *rs1, vint16mf2x6_t vs3, size_t vl) { + return __riscv_vsseg6e16(vm, rs1, vs3, vl); } -void test_vsseg6e16_v_i16m1x6_m(vbool16_t mask, int16_t *base, vint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e16(mask, base, v_tuple, vl); +void test_vsseg6e16_v_i16m1x6_m(vbool16_t vm, int16_t *rs1, vint16m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e16(vm, rs1, vs3, vl); } -void test_vsseg6e16_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e16(mask, base, v_tuple, vl); +void test_vsseg6e16_v_u16mf4x6_m(vbool64_t vm, uint16_t *rs1, vuint16mf4x6_t vs3, size_t vl) { + return __riscv_vsseg6e16(vm, rs1, vs3, vl); } -void test_vsseg6e16_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e16(mask, base, v_tuple, vl); +void test_vsseg6e16_v_u16mf2x6_m(vbool32_t vm, uint16_t *rs1, vuint16mf2x6_t vs3, size_t vl) { + return __riscv_vsseg6e16(vm, rs1, vs3, vl); } -void test_vsseg6e16_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e16(mask, base, v_tuple, vl); +void test_vsseg6e16_v_u16m1x6_m(vbool16_t vm, uint16_t *rs1, vuint16m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e16(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg6e16\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsseg6e32.c b/auto-generated/gnu-overloaded-tests/vsseg6e32.c index 66d453be6..1e0c56152 100644 --- a/auto-generated/gnu-overloaded-tests/vsseg6e32.c +++ b/auto-generated/gnu-overloaded-tests/vsseg6e32.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg6e32_v_f32mf2x6(float32_t *base, vfloat32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e32(base, v_tuple, vl); +void test_vsseg6e32_v_f32mf2x6(float32_t *rs1, vfloat32mf2x6_t vs3, size_t vl) { + return __riscv_vsseg6e32(rs1, vs3, vl); } -void test_vsseg6e32_v_f32m1x6(float32_t *base, vfloat32m1x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e32(base, v_tuple, vl); +void test_vsseg6e32_v_f32m1x6(float32_t *rs1, vfloat32m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e32(rs1, vs3, vl); } -void test_vsseg6e32_v_i32mf2x6(int32_t *base, vint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e32(base, v_tuple, vl); +void test_vsseg6e32_v_i32mf2x6(int32_t *rs1, vint32mf2x6_t vs3, size_t vl) { + return __riscv_vsseg6e32(rs1, vs3, vl); } -void test_vsseg6e32_v_i32m1x6(int32_t *base, vint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e32(base, v_tuple, vl); +void test_vsseg6e32_v_i32m1x6(int32_t *rs1, vint32m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e32(rs1, vs3, vl); } -void test_vsseg6e32_v_u32mf2x6(uint32_t *base, vuint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e32(base, v_tuple, vl); +void test_vsseg6e32_v_u32mf2x6(uint32_t *rs1, vuint32mf2x6_t vs3, size_t vl) { + return __riscv_vsseg6e32(rs1, vs3, vl); } -void test_vsseg6e32_v_u32m1x6(uint32_t *base, vuint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e32(base, v_tuple, vl); +void test_vsseg6e32_v_u32m1x6(uint32_t *rs1, vuint32m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e32(rs1, vs3, vl); } -void test_vsseg6e32_v_f32mf2x6_m(vbool64_t mask, float32_t *base, vfloat32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e32(mask, base, v_tuple, vl); +void test_vsseg6e32_v_f32mf2x6_m(vbool64_t vm, float32_t *rs1, vfloat32mf2x6_t vs3, size_t vl) { + return __riscv_vsseg6e32(vm, rs1, vs3, vl); } -void test_vsseg6e32_v_f32m1x6_m(vbool32_t mask, float32_t *base, vfloat32m1x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e32(mask, base, v_tuple, vl); +void test_vsseg6e32_v_f32m1x6_m(vbool32_t vm, float32_t *rs1, vfloat32m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e32(vm, rs1, vs3, vl); } -void test_vsseg6e32_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e32(mask, base, v_tuple, vl); +void test_vsseg6e32_v_i32mf2x6_m(vbool64_t vm, int32_t *rs1, vint32mf2x6_t vs3, size_t vl) { + return __riscv_vsseg6e32(vm, rs1, vs3, vl); } -void test_vsseg6e32_v_i32m1x6_m(vbool32_t mask, int32_t *base, vint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e32(mask, base, v_tuple, vl); +void test_vsseg6e32_v_i32m1x6_m(vbool32_t vm, int32_t *rs1, vint32m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e32(vm, rs1, vs3, vl); } -void test_vsseg6e32_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e32(mask, base, v_tuple, vl); +void test_vsseg6e32_v_u32mf2x6_m(vbool64_t vm, uint32_t *rs1, vuint32mf2x6_t vs3, size_t vl) { + return __riscv_vsseg6e32(vm, rs1, vs3, vl); } -void test_vsseg6e32_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e32(mask, base, v_tuple, vl); +void test_vsseg6e32_v_u32m1x6_m(vbool32_t vm, uint32_t *rs1, vuint32m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e32(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg6e32\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsseg6e64.c b/auto-generated/gnu-overloaded-tests/vsseg6e64.c index 95d093ac5..eb931ec08 100644 --- a/auto-generated/gnu-overloaded-tests/vsseg6e64.c +++ b/auto-generated/gnu-overloaded-tests/vsseg6e64.c @@ -6,28 +6,28 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg6e64_v_f64m1x6(float64_t *base, vfloat64m1x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e64(base, v_tuple, vl); +void test_vsseg6e64_v_f64m1x6(float64_t *rs1, vfloat64m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e64(rs1, vs3, vl); } -void test_vsseg6e64_v_i64m1x6(int64_t *base, vint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e64(base, v_tuple, vl); +void test_vsseg6e64_v_i64m1x6(int64_t *rs1, vint64m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e64(rs1, vs3, vl); } -void test_vsseg6e64_v_u64m1x6(uint64_t *base, vuint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e64(base, v_tuple, vl); +void test_vsseg6e64_v_u64m1x6(uint64_t *rs1, vuint64m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e64(rs1, vs3, vl); } -void test_vsseg6e64_v_f64m1x6_m(vbool64_t mask, float64_t *base, vfloat64m1x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e64(mask, base, v_tuple, vl); +void test_vsseg6e64_v_f64m1x6_m(vbool64_t vm, float64_t *rs1, vfloat64m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e64(vm, rs1, vs3, vl); } -void test_vsseg6e64_v_i64m1x6_m(vbool64_t mask, int64_t *base, vint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e64(mask, base, v_tuple, vl); +void test_vsseg6e64_v_i64m1x6_m(vbool64_t vm, int64_t *rs1, vint64m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e64(vm, rs1, vs3, vl); } -void test_vsseg6e64_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e64(mask, base, v_tuple, vl); +void test_vsseg6e64_v_u64m1x6_m(vbool64_t vm, uint64_t *rs1, vuint64m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e64(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg6e64\.[ivxfswum.]+\s+} 6 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsseg6e8.c b/auto-generated/gnu-overloaded-tests/vsseg6e8.c index 0cf001778..aadeda80f 100644 --- a/auto-generated/gnu-overloaded-tests/vsseg6e8.c +++ b/auto-generated/gnu-overloaded-tests/vsseg6e8.c @@ -6,68 +6,68 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg6e8_v_i8mf8x6(int8_t *base, vint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e8(base, v_tuple, vl); +void test_vsseg6e8_v_i8mf8x6(int8_t *rs1, vint8mf8x6_t vs3, size_t vl) { + return __riscv_vsseg6e8(rs1, vs3, vl); } -void test_vsseg6e8_v_i8mf4x6(int8_t *base, vint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e8(base, v_tuple, vl); +void test_vsseg6e8_v_i8mf4x6(int8_t *rs1, vint8mf4x6_t vs3, size_t vl) { + return __riscv_vsseg6e8(rs1, vs3, vl); } -void test_vsseg6e8_v_i8mf2x6(int8_t *base, vint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e8(base, v_tuple, vl); +void test_vsseg6e8_v_i8mf2x6(int8_t *rs1, vint8mf2x6_t vs3, size_t vl) { + return __riscv_vsseg6e8(rs1, vs3, vl); } -void test_vsseg6e8_v_i8m1x6(int8_t *base, vint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e8(base, v_tuple, vl); +void test_vsseg6e8_v_i8m1x6(int8_t *rs1, vint8m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e8(rs1, vs3, vl); } -void test_vsseg6e8_v_u8mf8x6(uint8_t *base, vuint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e8(base, v_tuple, vl); +void test_vsseg6e8_v_u8mf8x6(uint8_t *rs1, vuint8mf8x6_t vs3, size_t vl) { + return __riscv_vsseg6e8(rs1, vs3, vl); } -void test_vsseg6e8_v_u8mf4x6(uint8_t *base, vuint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e8(base, v_tuple, vl); +void test_vsseg6e8_v_u8mf4x6(uint8_t *rs1, vuint8mf4x6_t vs3, size_t vl) { + return __riscv_vsseg6e8(rs1, vs3, vl); } -void test_vsseg6e8_v_u8mf2x6(uint8_t *base, vuint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e8(base, v_tuple, vl); +void test_vsseg6e8_v_u8mf2x6(uint8_t *rs1, vuint8mf2x6_t vs3, size_t vl) { + return __riscv_vsseg6e8(rs1, vs3, vl); } -void test_vsseg6e8_v_u8m1x6(uint8_t *base, vuint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e8(base, v_tuple, vl); +void test_vsseg6e8_v_u8m1x6(uint8_t *rs1, vuint8m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e8(rs1, vs3, vl); } -void test_vsseg6e8_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e8(mask, base, v_tuple, vl); +void test_vsseg6e8_v_i8mf8x6_m(vbool64_t vm, int8_t *rs1, vint8mf8x6_t vs3, size_t vl) { + return __riscv_vsseg6e8(vm, rs1, vs3, vl); } -void test_vsseg6e8_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e8(mask, base, v_tuple, vl); +void test_vsseg6e8_v_i8mf4x6_m(vbool32_t vm, int8_t *rs1, vint8mf4x6_t vs3, size_t vl) { + return __riscv_vsseg6e8(vm, rs1, vs3, vl); } -void test_vsseg6e8_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e8(mask, base, v_tuple, vl); +void test_vsseg6e8_v_i8mf2x6_m(vbool16_t vm, int8_t *rs1, vint8mf2x6_t vs3, size_t vl) { + return __riscv_vsseg6e8(vm, rs1, vs3, vl); } -void test_vsseg6e8_v_i8m1x6_m(vbool8_t mask, int8_t *base, vint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e8(mask, base, v_tuple, vl); +void test_vsseg6e8_v_i8m1x6_m(vbool8_t vm, int8_t *rs1, vint8m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e8(vm, rs1, vs3, vl); } -void test_vsseg6e8_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e8(mask, base, v_tuple, vl); +void test_vsseg6e8_v_u8mf8x6_m(vbool64_t vm, uint8_t *rs1, vuint8mf8x6_t vs3, size_t vl) { + return __riscv_vsseg6e8(vm, rs1, vs3, vl); } -void test_vsseg6e8_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e8(mask, base, v_tuple, vl); +void test_vsseg6e8_v_u8mf4x6_m(vbool32_t vm, uint8_t *rs1, vuint8mf4x6_t vs3, size_t vl) { + return __riscv_vsseg6e8(vm, rs1, vs3, vl); } -void test_vsseg6e8_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e8(mask, base, v_tuple, vl); +void test_vsseg6e8_v_u8mf2x6_m(vbool16_t vm, uint8_t *rs1, vuint8mf2x6_t vs3, size_t vl) { + return __riscv_vsseg6e8(vm, rs1, vs3, vl); } -void test_vsseg6e8_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsseg6e8(mask, base, v_tuple, vl); +void test_vsseg6e8_v_u8m1x6_m(vbool8_t vm, uint8_t *rs1, vuint8m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e8(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg6e8\.[ivxfswum.]+\s+} 16 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsseg7e16.c b/auto-generated/gnu-overloaded-tests/vsseg7e16.c index 483ff9dd7..5b478f5af 100644 --- a/auto-generated/gnu-overloaded-tests/vsseg7e16.c +++ b/auto-generated/gnu-overloaded-tests/vsseg7e16.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg7e16_v_f16mf4x7(float16_t *base, vfloat16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e16(base, v_tuple, vl); +void test_vsseg7e16_v_f16mf4x7(float16_t *rs1, vfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsseg7e16(rs1, vs3, vl); } -void test_vsseg7e16_v_f16mf2x7(float16_t *base, vfloat16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e16(base, v_tuple, vl); +void test_vsseg7e16_v_f16mf2x7(float16_t *rs1, vfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsseg7e16(rs1, vs3, vl); } -void test_vsseg7e16_v_f16m1x7(float16_t *base, vfloat16m1x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e16(base, v_tuple, vl); +void test_vsseg7e16_v_f16m1x7(float16_t *rs1, vfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e16(rs1, vs3, vl); } -void test_vsseg7e16_v_i16mf4x7(int16_t *base, vint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e16(base, v_tuple, vl); +void test_vsseg7e16_v_i16mf4x7(int16_t *rs1, vint16mf4x7_t vs3, size_t vl) { + return __riscv_vsseg7e16(rs1, vs3, vl); } -void test_vsseg7e16_v_i16mf2x7(int16_t *base, vint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e16(base, v_tuple, vl); +void test_vsseg7e16_v_i16mf2x7(int16_t *rs1, vint16mf2x7_t vs3, size_t vl) { + return __riscv_vsseg7e16(rs1, vs3, vl); } -void test_vsseg7e16_v_i16m1x7(int16_t *base, vint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e16(base, v_tuple, vl); +void test_vsseg7e16_v_i16m1x7(int16_t *rs1, vint16m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e16(rs1, vs3, vl); } -void test_vsseg7e16_v_u16mf4x7(uint16_t *base, vuint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e16(base, v_tuple, vl); +void test_vsseg7e16_v_u16mf4x7(uint16_t *rs1, vuint16mf4x7_t vs3, size_t vl) { + return __riscv_vsseg7e16(rs1, vs3, vl); } -void test_vsseg7e16_v_u16mf2x7(uint16_t *base, vuint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e16(base, v_tuple, vl); +void test_vsseg7e16_v_u16mf2x7(uint16_t *rs1, vuint16mf2x7_t vs3, size_t vl) { + return __riscv_vsseg7e16(rs1, vs3, vl); } -void test_vsseg7e16_v_u16m1x7(uint16_t *base, vuint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e16(base, v_tuple, vl); +void test_vsseg7e16_v_u16m1x7(uint16_t *rs1, vuint16m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e16(rs1, vs3, vl); } -void test_vsseg7e16_v_f16mf4x7_m(vbool64_t mask, float16_t *base, vfloat16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e16(mask, base, v_tuple, vl); +void test_vsseg7e16_v_f16mf4x7_m(vbool64_t vm, float16_t *rs1, vfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsseg7e16(vm, rs1, vs3, vl); } -void test_vsseg7e16_v_f16mf2x7_m(vbool32_t mask, float16_t *base, vfloat16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e16(mask, base, v_tuple, vl); +void test_vsseg7e16_v_f16mf2x7_m(vbool32_t vm, float16_t *rs1, vfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsseg7e16(vm, rs1, vs3, vl); } -void test_vsseg7e16_v_f16m1x7_m(vbool16_t mask, float16_t *base, vfloat16m1x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e16(mask, base, v_tuple, vl); +void test_vsseg7e16_v_f16m1x7_m(vbool16_t vm, float16_t *rs1, vfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e16(vm, rs1, vs3, vl); } -void test_vsseg7e16_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e16(mask, base, v_tuple, vl); +void test_vsseg7e16_v_i16mf4x7_m(vbool64_t vm, int16_t *rs1, vint16mf4x7_t vs3, size_t vl) { + return __riscv_vsseg7e16(vm, rs1, vs3, vl); } -void test_vsseg7e16_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e16(mask, base, v_tuple, vl); +void test_vsseg7e16_v_i16mf2x7_m(vbool32_t vm, int16_t *rs1, vint16mf2x7_t vs3, size_t vl) { + return __riscv_vsseg7e16(vm, rs1, vs3, vl); } -void test_vsseg7e16_v_i16m1x7_m(vbool16_t mask, int16_t *base, vint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e16(mask, base, v_tuple, vl); +void test_vsseg7e16_v_i16m1x7_m(vbool16_t vm, int16_t *rs1, vint16m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e16(vm, rs1, vs3, vl); } -void test_vsseg7e16_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e16(mask, base, v_tuple, vl); +void test_vsseg7e16_v_u16mf4x7_m(vbool64_t vm, uint16_t *rs1, vuint16mf4x7_t vs3, size_t vl) { + return __riscv_vsseg7e16(vm, rs1, vs3, vl); } -void test_vsseg7e16_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e16(mask, base, v_tuple, vl); +void test_vsseg7e16_v_u16mf2x7_m(vbool32_t vm, uint16_t *rs1, vuint16mf2x7_t vs3, size_t vl) { + return __riscv_vsseg7e16(vm, rs1, vs3, vl); } -void test_vsseg7e16_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e16(mask, base, v_tuple, vl); +void test_vsseg7e16_v_u16m1x7_m(vbool16_t vm, uint16_t *rs1, vuint16m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e16(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg7e16\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsseg7e32.c b/auto-generated/gnu-overloaded-tests/vsseg7e32.c index e912c096f..c2d96b1ec 100644 --- a/auto-generated/gnu-overloaded-tests/vsseg7e32.c +++ b/auto-generated/gnu-overloaded-tests/vsseg7e32.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg7e32_v_f32mf2x7(float32_t *base, vfloat32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e32(base, v_tuple, vl); +void test_vsseg7e32_v_f32mf2x7(float32_t *rs1, vfloat32mf2x7_t vs3, size_t vl) { + return __riscv_vsseg7e32(rs1, vs3, vl); } -void test_vsseg7e32_v_f32m1x7(float32_t *base, vfloat32m1x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e32(base, v_tuple, vl); +void test_vsseg7e32_v_f32m1x7(float32_t *rs1, vfloat32m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e32(rs1, vs3, vl); } -void test_vsseg7e32_v_i32mf2x7(int32_t *base, vint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e32(base, v_tuple, vl); +void test_vsseg7e32_v_i32mf2x7(int32_t *rs1, vint32mf2x7_t vs3, size_t vl) { + return __riscv_vsseg7e32(rs1, vs3, vl); } -void test_vsseg7e32_v_i32m1x7(int32_t *base, vint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e32(base, v_tuple, vl); +void test_vsseg7e32_v_i32m1x7(int32_t *rs1, vint32m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e32(rs1, vs3, vl); } -void test_vsseg7e32_v_u32mf2x7(uint32_t *base, vuint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e32(base, v_tuple, vl); +void test_vsseg7e32_v_u32mf2x7(uint32_t *rs1, vuint32mf2x7_t vs3, size_t vl) { + return __riscv_vsseg7e32(rs1, vs3, vl); } -void test_vsseg7e32_v_u32m1x7(uint32_t *base, vuint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e32(base, v_tuple, vl); +void test_vsseg7e32_v_u32m1x7(uint32_t *rs1, vuint32m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e32(rs1, vs3, vl); } -void test_vsseg7e32_v_f32mf2x7_m(vbool64_t mask, float32_t *base, vfloat32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e32(mask, base, v_tuple, vl); +void test_vsseg7e32_v_f32mf2x7_m(vbool64_t vm, float32_t *rs1, vfloat32mf2x7_t vs3, size_t vl) { + return __riscv_vsseg7e32(vm, rs1, vs3, vl); } -void test_vsseg7e32_v_f32m1x7_m(vbool32_t mask, float32_t *base, vfloat32m1x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e32(mask, base, v_tuple, vl); +void test_vsseg7e32_v_f32m1x7_m(vbool32_t vm, float32_t *rs1, vfloat32m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e32(vm, rs1, vs3, vl); } -void test_vsseg7e32_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e32(mask, base, v_tuple, vl); +void test_vsseg7e32_v_i32mf2x7_m(vbool64_t vm, int32_t *rs1, vint32mf2x7_t vs3, size_t vl) { + return __riscv_vsseg7e32(vm, rs1, vs3, vl); } -void test_vsseg7e32_v_i32m1x7_m(vbool32_t mask, int32_t *base, vint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e32(mask, base, v_tuple, vl); +void test_vsseg7e32_v_i32m1x7_m(vbool32_t vm, int32_t *rs1, vint32m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e32(vm, rs1, vs3, vl); } -void test_vsseg7e32_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e32(mask, base, v_tuple, vl); +void test_vsseg7e32_v_u32mf2x7_m(vbool64_t vm, uint32_t *rs1, vuint32mf2x7_t vs3, size_t vl) { + return __riscv_vsseg7e32(vm, rs1, vs3, vl); } -void test_vsseg7e32_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e32(mask, base, v_tuple, vl); +void test_vsseg7e32_v_u32m1x7_m(vbool32_t vm, uint32_t *rs1, vuint32m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e32(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg7e32\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsseg7e64.c b/auto-generated/gnu-overloaded-tests/vsseg7e64.c index df0c8f120..247e00e92 100644 --- a/auto-generated/gnu-overloaded-tests/vsseg7e64.c +++ b/auto-generated/gnu-overloaded-tests/vsseg7e64.c @@ -6,28 +6,28 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg7e64_v_f64m1x7(float64_t *base, vfloat64m1x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e64(base, v_tuple, vl); +void test_vsseg7e64_v_f64m1x7(float64_t *rs1, vfloat64m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e64(rs1, vs3, vl); } -void test_vsseg7e64_v_i64m1x7(int64_t *base, vint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e64(base, v_tuple, vl); +void test_vsseg7e64_v_i64m1x7(int64_t *rs1, vint64m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e64(rs1, vs3, vl); } -void test_vsseg7e64_v_u64m1x7(uint64_t *base, vuint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e64(base, v_tuple, vl); +void test_vsseg7e64_v_u64m1x7(uint64_t *rs1, vuint64m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e64(rs1, vs3, vl); } -void test_vsseg7e64_v_f64m1x7_m(vbool64_t mask, float64_t *base, vfloat64m1x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e64(mask, base, v_tuple, vl); +void test_vsseg7e64_v_f64m1x7_m(vbool64_t vm, float64_t *rs1, vfloat64m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e64(vm, rs1, vs3, vl); } -void test_vsseg7e64_v_i64m1x7_m(vbool64_t mask, int64_t *base, vint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e64(mask, base, v_tuple, vl); +void test_vsseg7e64_v_i64m1x7_m(vbool64_t vm, int64_t *rs1, vint64m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e64(vm, rs1, vs3, vl); } -void test_vsseg7e64_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e64(mask, base, v_tuple, vl); +void test_vsseg7e64_v_u64m1x7_m(vbool64_t vm, uint64_t *rs1, vuint64m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e64(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg7e64\.[ivxfswum.]+\s+} 6 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsseg7e8.c b/auto-generated/gnu-overloaded-tests/vsseg7e8.c index 0201a04a9..db95b6e9a 100644 --- a/auto-generated/gnu-overloaded-tests/vsseg7e8.c +++ b/auto-generated/gnu-overloaded-tests/vsseg7e8.c @@ -6,68 +6,68 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg7e8_v_i8mf8x7(int8_t *base, vint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e8(base, v_tuple, vl); +void test_vsseg7e8_v_i8mf8x7(int8_t *rs1, vint8mf8x7_t vs3, size_t vl) { + return __riscv_vsseg7e8(rs1, vs3, vl); } -void test_vsseg7e8_v_i8mf4x7(int8_t *base, vint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e8(base, v_tuple, vl); +void test_vsseg7e8_v_i8mf4x7(int8_t *rs1, vint8mf4x7_t vs3, size_t vl) { + return __riscv_vsseg7e8(rs1, vs3, vl); } -void test_vsseg7e8_v_i8mf2x7(int8_t *base, vint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e8(base, v_tuple, vl); +void test_vsseg7e8_v_i8mf2x7(int8_t *rs1, vint8mf2x7_t vs3, size_t vl) { + return __riscv_vsseg7e8(rs1, vs3, vl); } -void test_vsseg7e8_v_i8m1x7(int8_t *base, vint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e8(base, v_tuple, vl); +void test_vsseg7e8_v_i8m1x7(int8_t *rs1, vint8m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e8(rs1, vs3, vl); } -void test_vsseg7e8_v_u8mf8x7(uint8_t *base, vuint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e8(base, v_tuple, vl); +void test_vsseg7e8_v_u8mf8x7(uint8_t *rs1, vuint8mf8x7_t vs3, size_t vl) { + return __riscv_vsseg7e8(rs1, vs3, vl); } -void test_vsseg7e8_v_u8mf4x7(uint8_t *base, vuint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e8(base, v_tuple, vl); +void test_vsseg7e8_v_u8mf4x7(uint8_t *rs1, vuint8mf4x7_t vs3, size_t vl) { + return __riscv_vsseg7e8(rs1, vs3, vl); } -void test_vsseg7e8_v_u8mf2x7(uint8_t *base, vuint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e8(base, v_tuple, vl); +void test_vsseg7e8_v_u8mf2x7(uint8_t *rs1, vuint8mf2x7_t vs3, size_t vl) { + return __riscv_vsseg7e8(rs1, vs3, vl); } -void test_vsseg7e8_v_u8m1x7(uint8_t *base, vuint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e8(base, v_tuple, vl); +void test_vsseg7e8_v_u8m1x7(uint8_t *rs1, vuint8m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e8(rs1, vs3, vl); } -void test_vsseg7e8_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e8(mask, base, v_tuple, vl); +void test_vsseg7e8_v_i8mf8x7_m(vbool64_t vm, int8_t *rs1, vint8mf8x7_t vs3, size_t vl) { + return __riscv_vsseg7e8(vm, rs1, vs3, vl); } -void test_vsseg7e8_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e8(mask, base, v_tuple, vl); +void test_vsseg7e8_v_i8mf4x7_m(vbool32_t vm, int8_t *rs1, vint8mf4x7_t vs3, size_t vl) { + return __riscv_vsseg7e8(vm, rs1, vs3, vl); } -void test_vsseg7e8_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e8(mask, base, v_tuple, vl); +void test_vsseg7e8_v_i8mf2x7_m(vbool16_t vm, int8_t *rs1, vint8mf2x7_t vs3, size_t vl) { + return __riscv_vsseg7e8(vm, rs1, vs3, vl); } -void test_vsseg7e8_v_i8m1x7_m(vbool8_t mask, int8_t *base, vint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e8(mask, base, v_tuple, vl); +void test_vsseg7e8_v_i8m1x7_m(vbool8_t vm, int8_t *rs1, vint8m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e8(vm, rs1, vs3, vl); } -void test_vsseg7e8_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e8(mask, base, v_tuple, vl); +void test_vsseg7e8_v_u8mf8x7_m(vbool64_t vm, uint8_t *rs1, vuint8mf8x7_t vs3, size_t vl) { + return __riscv_vsseg7e8(vm, rs1, vs3, vl); } -void test_vsseg7e8_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e8(mask, base, v_tuple, vl); +void test_vsseg7e8_v_u8mf4x7_m(vbool32_t vm, uint8_t *rs1, vuint8mf4x7_t vs3, size_t vl) { + return __riscv_vsseg7e8(vm, rs1, vs3, vl); } -void test_vsseg7e8_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e8(mask, base, v_tuple, vl); +void test_vsseg7e8_v_u8mf2x7_m(vbool16_t vm, uint8_t *rs1, vuint8mf2x7_t vs3, size_t vl) { + return __riscv_vsseg7e8(vm, rs1, vs3, vl); } -void test_vsseg7e8_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsseg7e8(mask, base, v_tuple, vl); +void test_vsseg7e8_v_u8m1x7_m(vbool8_t vm, uint8_t *rs1, vuint8m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e8(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg7e8\.[ivxfswum.]+\s+} 16 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsseg8e16.c b/auto-generated/gnu-overloaded-tests/vsseg8e16.c index 7411cbaa5..08747dbed 100644 --- a/auto-generated/gnu-overloaded-tests/vsseg8e16.c +++ b/auto-generated/gnu-overloaded-tests/vsseg8e16.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg8e16_v_f16mf4x8(float16_t *base, vfloat16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e16(base, v_tuple, vl); +void test_vsseg8e16_v_f16mf4x8(float16_t *rs1, vfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsseg8e16(rs1, vs3, vl); } -void test_vsseg8e16_v_f16mf2x8(float16_t *base, vfloat16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e16(base, v_tuple, vl); +void test_vsseg8e16_v_f16mf2x8(float16_t *rs1, vfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsseg8e16(rs1, vs3, vl); } -void test_vsseg8e16_v_f16m1x8(float16_t *base, vfloat16m1x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e16(base, v_tuple, vl); +void test_vsseg8e16_v_f16m1x8(float16_t *rs1, vfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e16(rs1, vs3, vl); } -void test_vsseg8e16_v_i16mf4x8(int16_t *base, vint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e16(base, v_tuple, vl); +void test_vsseg8e16_v_i16mf4x8(int16_t *rs1, vint16mf4x8_t vs3, size_t vl) { + return __riscv_vsseg8e16(rs1, vs3, vl); } -void test_vsseg8e16_v_i16mf2x8(int16_t *base, vint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e16(base, v_tuple, vl); +void test_vsseg8e16_v_i16mf2x8(int16_t *rs1, vint16mf2x8_t vs3, size_t vl) { + return __riscv_vsseg8e16(rs1, vs3, vl); } -void test_vsseg8e16_v_i16m1x8(int16_t *base, vint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e16(base, v_tuple, vl); +void test_vsseg8e16_v_i16m1x8(int16_t *rs1, vint16m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e16(rs1, vs3, vl); } -void test_vsseg8e16_v_u16mf4x8(uint16_t *base, vuint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e16(base, v_tuple, vl); +void test_vsseg8e16_v_u16mf4x8(uint16_t *rs1, vuint16mf4x8_t vs3, size_t vl) { + return __riscv_vsseg8e16(rs1, vs3, vl); } -void test_vsseg8e16_v_u16mf2x8(uint16_t *base, vuint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e16(base, v_tuple, vl); +void test_vsseg8e16_v_u16mf2x8(uint16_t *rs1, vuint16mf2x8_t vs3, size_t vl) { + return __riscv_vsseg8e16(rs1, vs3, vl); } -void test_vsseg8e16_v_u16m1x8(uint16_t *base, vuint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e16(base, v_tuple, vl); +void test_vsseg8e16_v_u16m1x8(uint16_t *rs1, vuint16m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e16(rs1, vs3, vl); } -void test_vsseg8e16_v_f16mf4x8_m(vbool64_t mask, float16_t *base, vfloat16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e16(mask, base, v_tuple, vl); +void test_vsseg8e16_v_f16mf4x8_m(vbool64_t vm, float16_t *rs1, vfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsseg8e16(vm, rs1, vs3, vl); } -void test_vsseg8e16_v_f16mf2x8_m(vbool32_t mask, float16_t *base, vfloat16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e16(mask, base, v_tuple, vl); +void test_vsseg8e16_v_f16mf2x8_m(vbool32_t vm, float16_t *rs1, vfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsseg8e16(vm, rs1, vs3, vl); } -void test_vsseg8e16_v_f16m1x8_m(vbool16_t mask, float16_t *base, vfloat16m1x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e16(mask, base, v_tuple, vl); +void test_vsseg8e16_v_f16m1x8_m(vbool16_t vm, float16_t *rs1, vfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e16(vm, rs1, vs3, vl); } -void test_vsseg8e16_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e16(mask, base, v_tuple, vl); +void test_vsseg8e16_v_i16mf4x8_m(vbool64_t vm, int16_t *rs1, vint16mf4x8_t vs3, size_t vl) { + return __riscv_vsseg8e16(vm, rs1, vs3, vl); } -void test_vsseg8e16_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e16(mask, base, v_tuple, vl); +void test_vsseg8e16_v_i16mf2x8_m(vbool32_t vm, int16_t *rs1, vint16mf2x8_t vs3, size_t vl) { + return __riscv_vsseg8e16(vm, rs1, vs3, vl); } -void test_vsseg8e16_v_i16m1x8_m(vbool16_t mask, int16_t *base, vint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e16(mask, base, v_tuple, vl); +void test_vsseg8e16_v_i16m1x8_m(vbool16_t vm, int16_t *rs1, vint16m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e16(vm, rs1, vs3, vl); } -void test_vsseg8e16_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e16(mask, base, v_tuple, vl); +void test_vsseg8e16_v_u16mf4x8_m(vbool64_t vm, uint16_t *rs1, vuint16mf4x8_t vs3, size_t vl) { + return __riscv_vsseg8e16(vm, rs1, vs3, vl); } -void test_vsseg8e16_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e16(mask, base, v_tuple, vl); +void test_vsseg8e16_v_u16mf2x8_m(vbool32_t vm, uint16_t *rs1, vuint16mf2x8_t vs3, size_t vl) { + return __riscv_vsseg8e16(vm, rs1, vs3, vl); } -void test_vsseg8e16_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e16(mask, base, v_tuple, vl); +void test_vsseg8e16_v_u16m1x8_m(vbool16_t vm, uint16_t *rs1, vuint16m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e16(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg8e16\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsseg8e32.c b/auto-generated/gnu-overloaded-tests/vsseg8e32.c index 4fdd1fc1d..32ee45ef1 100644 --- a/auto-generated/gnu-overloaded-tests/vsseg8e32.c +++ b/auto-generated/gnu-overloaded-tests/vsseg8e32.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg8e32_v_f32mf2x8(float32_t *base, vfloat32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e32(base, v_tuple, vl); +void test_vsseg8e32_v_f32mf2x8(float32_t *rs1, vfloat32mf2x8_t vs3, size_t vl) { + return __riscv_vsseg8e32(rs1, vs3, vl); } -void test_vsseg8e32_v_f32m1x8(float32_t *base, vfloat32m1x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e32(base, v_tuple, vl); +void test_vsseg8e32_v_f32m1x8(float32_t *rs1, vfloat32m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e32(rs1, vs3, vl); } -void test_vsseg8e32_v_i32mf2x8(int32_t *base, vint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e32(base, v_tuple, vl); +void test_vsseg8e32_v_i32mf2x8(int32_t *rs1, vint32mf2x8_t vs3, size_t vl) { + return __riscv_vsseg8e32(rs1, vs3, vl); } -void test_vsseg8e32_v_i32m1x8(int32_t *base, vint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e32(base, v_tuple, vl); +void test_vsseg8e32_v_i32m1x8(int32_t *rs1, vint32m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e32(rs1, vs3, vl); } -void test_vsseg8e32_v_u32mf2x8(uint32_t *base, vuint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e32(base, v_tuple, vl); +void test_vsseg8e32_v_u32mf2x8(uint32_t *rs1, vuint32mf2x8_t vs3, size_t vl) { + return __riscv_vsseg8e32(rs1, vs3, vl); } -void test_vsseg8e32_v_u32m1x8(uint32_t *base, vuint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e32(base, v_tuple, vl); +void test_vsseg8e32_v_u32m1x8(uint32_t *rs1, vuint32m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e32(rs1, vs3, vl); } -void test_vsseg8e32_v_f32mf2x8_m(vbool64_t mask, float32_t *base, vfloat32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e32(mask, base, v_tuple, vl); +void test_vsseg8e32_v_f32mf2x8_m(vbool64_t vm, float32_t *rs1, vfloat32mf2x8_t vs3, size_t vl) { + return __riscv_vsseg8e32(vm, rs1, vs3, vl); } -void test_vsseg8e32_v_f32m1x8_m(vbool32_t mask, float32_t *base, vfloat32m1x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e32(mask, base, v_tuple, vl); +void test_vsseg8e32_v_f32m1x8_m(vbool32_t vm, float32_t *rs1, vfloat32m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e32(vm, rs1, vs3, vl); } -void test_vsseg8e32_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e32(mask, base, v_tuple, vl); +void test_vsseg8e32_v_i32mf2x8_m(vbool64_t vm, int32_t *rs1, vint32mf2x8_t vs3, size_t vl) { + return __riscv_vsseg8e32(vm, rs1, vs3, vl); } -void test_vsseg8e32_v_i32m1x8_m(vbool32_t mask, int32_t *base, vint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e32(mask, base, v_tuple, vl); +void test_vsseg8e32_v_i32m1x8_m(vbool32_t vm, int32_t *rs1, vint32m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e32(vm, rs1, vs3, vl); } -void test_vsseg8e32_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e32(mask, base, v_tuple, vl); +void test_vsseg8e32_v_u32mf2x8_m(vbool64_t vm, uint32_t *rs1, vuint32mf2x8_t vs3, size_t vl) { + return __riscv_vsseg8e32(vm, rs1, vs3, vl); } -void test_vsseg8e32_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e32(mask, base, v_tuple, vl); +void test_vsseg8e32_v_u32m1x8_m(vbool32_t vm, uint32_t *rs1, vuint32m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e32(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg8e32\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsseg8e64.c b/auto-generated/gnu-overloaded-tests/vsseg8e64.c index 818cfb782..354e88360 100644 --- a/auto-generated/gnu-overloaded-tests/vsseg8e64.c +++ b/auto-generated/gnu-overloaded-tests/vsseg8e64.c @@ -6,28 +6,28 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg8e64_v_f64m1x8(float64_t *base, vfloat64m1x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e64(base, v_tuple, vl); +void test_vsseg8e64_v_f64m1x8(float64_t *rs1, vfloat64m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e64(rs1, vs3, vl); } -void test_vsseg8e64_v_i64m1x8(int64_t *base, vint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e64(base, v_tuple, vl); +void test_vsseg8e64_v_i64m1x8(int64_t *rs1, vint64m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e64(rs1, vs3, vl); } -void test_vsseg8e64_v_u64m1x8(uint64_t *base, vuint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e64(base, v_tuple, vl); +void test_vsseg8e64_v_u64m1x8(uint64_t *rs1, vuint64m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e64(rs1, vs3, vl); } -void test_vsseg8e64_v_f64m1x8_m(vbool64_t mask, float64_t *base, vfloat64m1x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e64(mask, base, v_tuple, vl); +void test_vsseg8e64_v_f64m1x8_m(vbool64_t vm, float64_t *rs1, vfloat64m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e64(vm, rs1, vs3, vl); } -void test_vsseg8e64_v_i64m1x8_m(vbool64_t mask, int64_t *base, vint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e64(mask, base, v_tuple, vl); +void test_vsseg8e64_v_i64m1x8_m(vbool64_t vm, int64_t *rs1, vint64m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e64(vm, rs1, vs3, vl); } -void test_vsseg8e64_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e64(mask, base, v_tuple, vl); +void test_vsseg8e64_v_u64m1x8_m(vbool64_t vm, uint64_t *rs1, vuint64m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e64(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg8e64\.[ivxfswum.]+\s+} 6 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsseg8e8.c b/auto-generated/gnu-overloaded-tests/vsseg8e8.c index c7c9804f2..f6c7add81 100644 --- a/auto-generated/gnu-overloaded-tests/vsseg8e8.c +++ b/auto-generated/gnu-overloaded-tests/vsseg8e8.c @@ -6,68 +6,68 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsseg8e8_v_i8mf8x8(int8_t *base, vint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e8(base, v_tuple, vl); +void test_vsseg8e8_v_i8mf8x8(int8_t *rs1, vint8mf8x8_t vs3, size_t vl) { + return __riscv_vsseg8e8(rs1, vs3, vl); } -void test_vsseg8e8_v_i8mf4x8(int8_t *base, vint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e8(base, v_tuple, vl); +void test_vsseg8e8_v_i8mf4x8(int8_t *rs1, vint8mf4x8_t vs3, size_t vl) { + return __riscv_vsseg8e8(rs1, vs3, vl); } -void test_vsseg8e8_v_i8mf2x8(int8_t *base, vint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e8(base, v_tuple, vl); +void test_vsseg8e8_v_i8mf2x8(int8_t *rs1, vint8mf2x8_t vs3, size_t vl) { + return __riscv_vsseg8e8(rs1, vs3, vl); } -void test_vsseg8e8_v_i8m1x8(int8_t *base, vint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e8(base, v_tuple, vl); +void test_vsseg8e8_v_i8m1x8(int8_t *rs1, vint8m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e8(rs1, vs3, vl); } -void test_vsseg8e8_v_u8mf8x8(uint8_t *base, vuint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e8(base, v_tuple, vl); +void test_vsseg8e8_v_u8mf8x8(uint8_t *rs1, vuint8mf8x8_t vs3, size_t vl) { + return __riscv_vsseg8e8(rs1, vs3, vl); } -void test_vsseg8e8_v_u8mf4x8(uint8_t *base, vuint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e8(base, v_tuple, vl); +void test_vsseg8e8_v_u8mf4x8(uint8_t *rs1, vuint8mf4x8_t vs3, size_t vl) { + return __riscv_vsseg8e8(rs1, vs3, vl); } -void test_vsseg8e8_v_u8mf2x8(uint8_t *base, vuint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e8(base, v_tuple, vl); +void test_vsseg8e8_v_u8mf2x8(uint8_t *rs1, vuint8mf2x8_t vs3, size_t vl) { + return __riscv_vsseg8e8(rs1, vs3, vl); } -void test_vsseg8e8_v_u8m1x8(uint8_t *base, vuint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e8(base, v_tuple, vl); +void test_vsseg8e8_v_u8m1x8(uint8_t *rs1, vuint8m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e8(rs1, vs3, vl); } -void test_vsseg8e8_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e8(mask, base, v_tuple, vl); +void test_vsseg8e8_v_i8mf8x8_m(vbool64_t vm, int8_t *rs1, vint8mf8x8_t vs3, size_t vl) { + return __riscv_vsseg8e8(vm, rs1, vs3, vl); } -void test_vsseg8e8_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e8(mask, base, v_tuple, vl); +void test_vsseg8e8_v_i8mf4x8_m(vbool32_t vm, int8_t *rs1, vint8mf4x8_t vs3, size_t vl) { + return __riscv_vsseg8e8(vm, rs1, vs3, vl); } -void test_vsseg8e8_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e8(mask, base, v_tuple, vl); +void test_vsseg8e8_v_i8mf2x8_m(vbool16_t vm, int8_t *rs1, vint8mf2x8_t vs3, size_t vl) { + return __riscv_vsseg8e8(vm, rs1, vs3, vl); } -void test_vsseg8e8_v_i8m1x8_m(vbool8_t mask, int8_t *base, vint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e8(mask, base, v_tuple, vl); +void test_vsseg8e8_v_i8m1x8_m(vbool8_t vm, int8_t *rs1, vint8m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e8(vm, rs1, vs3, vl); } -void test_vsseg8e8_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e8(mask, base, v_tuple, vl); +void test_vsseg8e8_v_u8mf8x8_m(vbool64_t vm, uint8_t *rs1, vuint8mf8x8_t vs3, size_t vl) { + return __riscv_vsseg8e8(vm, rs1, vs3, vl); } -void test_vsseg8e8_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e8(mask, base, v_tuple, vl); +void test_vsseg8e8_v_u8mf4x8_m(vbool32_t vm, uint8_t *rs1, vuint8mf4x8_t vs3, size_t vl) { + return __riscv_vsseg8e8(vm, rs1, vs3, vl); } -void test_vsseg8e8_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e8(mask, base, v_tuple, vl); +void test_vsseg8e8_v_u8mf2x8_m(vbool16_t vm, uint8_t *rs1, vuint8mf2x8_t vs3, size_t vl) { + return __riscv_vsseg8e8(vm, rs1, vs3, vl); } -void test_vsseg8e8_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsseg8e8(mask, base, v_tuple, vl); +void test_vsseg8e8_v_u8m1x8_m(vbool8_t vm, uint8_t *rs1, vuint8m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e8(vm, rs1, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsseg8e8\.[ivxfswum.]+\s+} 16 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vssra.c b/auto-generated/gnu-overloaded-tests/vssra.c index 622960e9f..aa1784966 100644 --- a/auto-generated/gnu-overloaded-tests/vssra.c +++ b/auto-generated/gnu-overloaded-tests/vssra.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vssra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vssra_vv_i8mf8(vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vssra(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vssra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vssra_vx_i8mf8(vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vssra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vssra_vv_i8mf4(vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vssra(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vssra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vssra_vx_i8mf4(vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vssra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vssra_vv_i8mf2(vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vssra(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vssra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vssra_vx_i8mf2(vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vssra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vssra_vv_i8m1(vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vssra(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vssra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vssra_vx_i8m1(vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vssra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vssra_vv_i8m2(vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vssra(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vssra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vssra_vx_i8m2(vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vssra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vssra_vv_i8m4(vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vssra(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vssra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vssra_vx_i8m4(vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vssra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vssra_vv_i8m8(vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vssra(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vssra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vssra_vx_i8m8(vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vssra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vssra_vv_i16mf4(vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vssra(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vssra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vssra_vx_i16mf4(vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vssra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vssra_vv_i16mf2(vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vssra(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vssra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vssra_vx_i16mf2(vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vssra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vssra_vv_i16m1(vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vssra(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vssra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vssra_vx_i16m1(vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vssra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vssra_vv_i16m2(vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vssra(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vssra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vssra_vx_i16m2(vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vssra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vssra_vv_i16m4(vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vssra(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vssra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vssra_vx_i16m4(vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vssra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vssra_vv_i16m8(vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vssra(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vssra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vssra_vx_i16m8(vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vssra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vssra_vv_i32mf2(vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vssra(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vssra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vssra_vx_i32mf2(vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vssra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vssra_vv_i32m1(vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vssra(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vssra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vssra_vx_i32m1(vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vssra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vssra_vv_i32m2(vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vssra(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vssra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vssra_vx_i32m2(vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vssra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vssra_vv_i32m4(vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vssra(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vssra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vssra_vx_i32m4(vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vssra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vssra_vv_i32m8(vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vssra(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vssra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vssra_vx_i32m8(vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vssra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vssra_vv_i64m1(vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vssra(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vssra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vssra_vx_i64m1(vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vssra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vssra_vv_i64m2(vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vssra(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vssra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vssra_vx_i64m2(vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vssra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vssra_vv_i64m4(vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vssra(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vssra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vssra_vx_i64m4(vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vssra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vssra_vv_i64m8(vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vssra(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vssra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra(op1, shift, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vssra_vx_i64m8(vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vssra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vssra_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vssra(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vssra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vssra_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vssra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vssra_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vssra(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vssra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vssra_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vssra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vssra_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vssra(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vssra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vssra_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vssra_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vssra_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vssra(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vssra_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vssra_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vssra_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vssra_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vssra(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vssra_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vssra_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vssra_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vssra_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vssra(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vssra_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vssra_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vssra_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vssra_vv_i8m8_m(vbool1_t vm, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vssra(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vssra_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vssra_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vssra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vssra_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vssra(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vssra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vssra_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vssra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vssra_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vssra(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vssra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vssra_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vssra_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vssra_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vssra(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vssra_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vssra_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vssra_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vssra_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vssra(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vssra_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vssra_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vssra_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vssra_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vssra(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vssra_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vssra_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vssra_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vssra_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vssra(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vssra_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vssra_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vssra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vssra_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vssra(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vssra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vssra_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vssra_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vssra_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vssra(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vssra_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vssra_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vssra_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vssra_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vssra(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vssra_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vssra_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vssra_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vssra_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vssra(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vssra_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vssra_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vssra_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vssra_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vssra(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vssra_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vssra_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vssra_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vssra_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vssra(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vssra_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vssra_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vssra_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vssra_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vssra(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vssra_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vssra_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vssra_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vssra_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vssra(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vssra_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vssra_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vssra_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vssra_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vssra(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vssra_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vssra_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssra\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vssrl.c b/auto-generated/gnu-overloaded-tests/vssrl.c index 91302c855..530937358 100644 --- a/auto-generated/gnu-overloaded-tests/vssrl.c +++ b/auto-generated/gnu-overloaded-tests/vssrl.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vssrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vssrl_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vssrl(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vssrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vssrl_vx_u8mf8(vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vssrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vssrl_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vssrl(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vssrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vssrl_vx_u8mf4(vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vssrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vssrl_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vssrl(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vssrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vssrl_vx_u8mf2(vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vssrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vssrl_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vssrl(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vssrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vssrl_vx_u8m1(vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vssrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vssrl_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vssrl(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vssrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vssrl_vx_u8m2(vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vssrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vssrl_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vssrl(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vssrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vssrl_vx_u8m4(vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vssrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vssrl_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vssrl(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vssrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vssrl_vx_u8m8(vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vssrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vssrl_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vssrl(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vssrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vssrl_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vssrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vssrl_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vssrl(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vssrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vssrl_vx_u16mf2(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vssrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vssrl_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vssrl(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vssrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vssrl_vx_u16m1(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vssrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vssrl_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vssrl(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vssrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vssrl_vx_u16m2(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vssrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vssrl_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vssrl(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vssrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vssrl_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vssrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vssrl_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vssrl(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vssrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vssrl_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vssrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vssrl_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vssrl(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vssrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vssrl_vx_u32mf2(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vssrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vssrl_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vssrl(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vssrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vssrl_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vssrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vssrl_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vssrl(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vssrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vssrl_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vssrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vssrl_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vssrl(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vssrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vssrl_vx_u32m4(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vssrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vssrl_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vssrl(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vssrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vssrl_vx_u32m8(vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vssrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vssrl_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vssrl(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vssrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vssrl_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vssrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vssrl_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vssrl(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vssrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vssrl_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vssrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vssrl_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vssrl(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vssrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vssrl_vx_u64m4(vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vssrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vssrl_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vssrl(vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vssrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vssrl_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl(vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vssrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vssrl_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vssrl(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vssrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vssrl_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vssrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vssrl_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vssrl(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vssrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vssrl_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vssrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vssrl_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vssrl(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vssrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vssrl_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vssrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vssrl_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vssrl(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vssrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vssrl_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vssrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vssrl_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vssrl(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vssrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vssrl_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vssrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vssrl_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vssrl(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vssrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vssrl_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vssrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vssrl_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vssrl(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vssrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vssrl_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vssrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vssrl_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vssrl(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vssrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vssrl_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vssrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vssrl_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vssrl(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vssrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vssrl_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vssrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vssrl_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vssrl(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vssrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vssrl_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vssrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vssrl_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vssrl(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vssrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vssrl_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vssrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vssrl_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vssrl(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vssrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vssrl_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vssrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vssrl_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vssrl(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vssrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vssrl_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vssrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vssrl_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vssrl(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vssrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vssrl_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vssrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vssrl_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vssrl(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vssrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vssrl_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vssrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vssrl_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vssrl(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vssrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vssrl_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vssrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vssrl_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vssrl(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vssrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vssrl_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vssrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vssrl_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vssrl(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vssrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vssrl_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vssrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vssrl_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vssrl(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vssrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vssrl_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vssrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vssrl_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vssrl(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vssrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vssrl_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vssrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vssrl_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vssrl(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vssrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vssrl_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vssrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vssrl_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vssrl(vm, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vssrl_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl(mask, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vssrl_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl(vm, vs2, rs1, __RISCV_VXRM_RNU, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssrl\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vssseg2e16.c b/auto-generated/gnu-overloaded-tests/vssseg2e16.c index a19247788..14c802c97 100644 --- a/auto-generated/gnu-overloaded-tests/vssseg2e16.c +++ b/auto-generated/gnu-overloaded-tests/vssseg2e16.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg2e16_v_f16mf4x2(float16_t *base, ptrdiff_t bstride, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16(base, bstride, v_tuple, vl); +void test_vssseg2e16_v_f16mf4x2(float16_t *rs1, ptrdiff_t rs2, vfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vssseg2e16(rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_f16mf2x2(float16_t *base, ptrdiff_t bstride, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16(base, bstride, v_tuple, vl); +void test_vssseg2e16_v_f16mf2x2(float16_t *rs1, ptrdiff_t rs2, vfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vssseg2e16(rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_f16m1x2(float16_t *base, ptrdiff_t bstride, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16(base, bstride, v_tuple, vl); +void test_vssseg2e16_v_f16m1x2(float16_t *rs1, ptrdiff_t rs2, vfloat16m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e16(rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_f16m2x2(float16_t *base, ptrdiff_t bstride, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16(base, bstride, v_tuple, vl); +void test_vssseg2e16_v_f16m2x2(float16_t *rs1, ptrdiff_t rs2, vfloat16m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e16(rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_f16m4x2(float16_t *base, ptrdiff_t bstride, vfloat16m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16(base, bstride, v_tuple, vl); +void test_vssseg2e16_v_f16m4x2(float16_t *rs1, ptrdiff_t rs2, vfloat16m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e16(rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_i16mf4x2(int16_t *base, ptrdiff_t bstride, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16(base, bstride, v_tuple, vl); +void test_vssseg2e16_v_i16mf4x2(int16_t *rs1, ptrdiff_t rs2, vint16mf4x2_t vs3, size_t vl) { + return __riscv_vssseg2e16(rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_i16mf2x2(int16_t *base, ptrdiff_t bstride, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16(base, bstride, v_tuple, vl); +void test_vssseg2e16_v_i16mf2x2(int16_t *rs1, ptrdiff_t rs2, vint16mf2x2_t vs3, size_t vl) { + return __riscv_vssseg2e16(rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_i16m1x2(int16_t *base, ptrdiff_t bstride, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16(base, bstride, v_tuple, vl); +void test_vssseg2e16_v_i16m1x2(int16_t *rs1, ptrdiff_t rs2, vint16m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e16(rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_i16m2x2(int16_t *base, ptrdiff_t bstride, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16(base, bstride, v_tuple, vl); +void test_vssseg2e16_v_i16m2x2(int16_t *rs1, ptrdiff_t rs2, vint16m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e16(rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_i16m4x2(int16_t *base, ptrdiff_t bstride, vint16m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16(base, bstride, v_tuple, vl); +void test_vssseg2e16_v_i16m4x2(int16_t *rs1, ptrdiff_t rs2, vint16m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e16(rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_u16mf4x2(uint16_t *base, ptrdiff_t bstride, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16(base, bstride, v_tuple, vl); +void test_vssseg2e16_v_u16mf4x2(uint16_t *rs1, ptrdiff_t rs2, vuint16mf4x2_t vs3, size_t vl) { + return __riscv_vssseg2e16(rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_u16mf2x2(uint16_t *base, ptrdiff_t bstride, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16(base, bstride, v_tuple, vl); +void test_vssseg2e16_v_u16mf2x2(uint16_t *rs1, ptrdiff_t rs2, vuint16mf2x2_t vs3, size_t vl) { + return __riscv_vssseg2e16(rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_u16m1x2(uint16_t *base, ptrdiff_t bstride, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16(base, bstride, v_tuple, vl); +void test_vssseg2e16_v_u16m1x2(uint16_t *rs1, ptrdiff_t rs2, vuint16m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e16(rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_u16m2x2(uint16_t *base, ptrdiff_t bstride, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16(base, bstride, v_tuple, vl); +void test_vssseg2e16_v_u16m2x2(uint16_t *rs1, ptrdiff_t rs2, vuint16m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e16(rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_u16m4x2(uint16_t *base, ptrdiff_t bstride, vuint16m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16(base, bstride, v_tuple, vl); +void test_vssseg2e16_v_u16m4x2(uint16_t *rs1, ptrdiff_t rs2, vuint16m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e16(rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_f16mf4x2_m(vbool64_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16(mask, base, bstride, v_tuple, vl); +void test_vssseg2e16_v_f16mf4x2_m(vbool64_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vssseg2e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_f16mf2x2_m(vbool32_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16(mask, base, bstride, v_tuple, vl); +void test_vssseg2e16_v_f16mf2x2_m(vbool32_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vssseg2e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_f16m1x2_m(vbool16_t mask, float16_t *base, ptrdiff_t bstride, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16(mask, base, bstride, v_tuple, vl); +void test_vssseg2e16_v_f16m1x2_m(vbool16_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_f16m2x2_m(vbool8_t mask, float16_t *base, ptrdiff_t bstride, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16(mask, base, bstride, v_tuple, vl); +void test_vssseg2e16_v_f16m2x2_m(vbool8_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_f16m4x2_m(vbool4_t mask, float16_t *base, ptrdiff_t bstride, vfloat16m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16(mask, base, bstride, v_tuple, vl); +void test_vssseg2e16_v_f16m4x2_m(vbool4_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_i16mf4x2_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16(mask, base, bstride, v_tuple, vl); +void test_vssseg2e16_v_i16mf4x2_m(vbool64_t vm, int16_t *rs1, ptrdiff_t rs2, vint16mf4x2_t vs3, size_t vl) { + return __riscv_vssseg2e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_i16mf2x2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16(mask, base, bstride, v_tuple, vl); +void test_vssseg2e16_v_i16mf2x2_m(vbool32_t vm, int16_t *rs1, ptrdiff_t rs2, vint16mf2x2_t vs3, size_t vl) { + return __riscv_vssseg2e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_i16m1x2_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16(mask, base, bstride, v_tuple, vl); +void test_vssseg2e16_v_i16m1x2_m(vbool16_t vm, int16_t *rs1, ptrdiff_t rs2, vint16m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_i16m2x2_m(vbool8_t mask, int16_t *base, ptrdiff_t bstride, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16(mask, base, bstride, v_tuple, vl); +void test_vssseg2e16_v_i16m2x2_m(vbool8_t vm, int16_t *rs1, ptrdiff_t rs2, vint16m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_i16m4x2_m(vbool4_t mask, int16_t *base, ptrdiff_t bstride, vint16m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16(mask, base, bstride, v_tuple, vl); +void test_vssseg2e16_v_i16m4x2_m(vbool4_t vm, int16_t *rs1, ptrdiff_t rs2, vint16m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16(mask, base, bstride, v_tuple, vl); +void test_vssseg2e16_v_u16mf4x2_m(vbool64_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16mf4x2_t vs3, size_t vl) { + return __riscv_vssseg2e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16(mask, base, bstride, v_tuple, vl); +void test_vssseg2e16_v_u16mf2x2_m(vbool32_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16mf2x2_t vs3, size_t vl) { + return __riscv_vssseg2e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_u16m1x2_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16(mask, base, bstride, v_tuple, vl); +void test_vssseg2e16_v_u16m1x2_m(vbool16_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_u16m2x2_m(vbool8_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16(mask, base, bstride, v_tuple, vl); +void test_vssseg2e16_v_u16m2x2_m(vbool8_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e16_v_u16m4x2_m(vbool4_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e16(mask, base, bstride, v_tuple, vl); +void test_vssseg2e16_v_u16m4x2_m(vbool4_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e16(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg2e16\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vssseg2e32.c b/auto-generated/gnu-overloaded-tests/vssseg2e32.c index 27c0895d8..e873e1722 100644 --- a/auto-generated/gnu-overloaded-tests/vssseg2e32.c +++ b/auto-generated/gnu-overloaded-tests/vssseg2e32.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg2e32_v_f32mf2x2(float32_t *base, ptrdiff_t bstride, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32(base, bstride, v_tuple, vl); +void test_vssseg2e32_v_f32mf2x2(float32_t *rs1, ptrdiff_t rs2, vfloat32mf2x2_t vs3, size_t vl) { + return __riscv_vssseg2e32(rs1, rs2, vs3, vl); } -void test_vssseg2e32_v_f32m1x2(float32_t *base, ptrdiff_t bstride, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32(base, bstride, v_tuple, vl); +void test_vssseg2e32_v_f32m1x2(float32_t *rs1, ptrdiff_t rs2, vfloat32m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e32(rs1, rs2, vs3, vl); } -void test_vssseg2e32_v_f32m2x2(float32_t *base, ptrdiff_t bstride, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32(base, bstride, v_tuple, vl); +void test_vssseg2e32_v_f32m2x2(float32_t *rs1, ptrdiff_t rs2, vfloat32m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e32(rs1, rs2, vs3, vl); } -void test_vssseg2e32_v_f32m4x2(float32_t *base, ptrdiff_t bstride, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32(base, bstride, v_tuple, vl); +void test_vssseg2e32_v_f32m4x2(float32_t *rs1, ptrdiff_t rs2, vfloat32m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e32(rs1, rs2, vs3, vl); } -void test_vssseg2e32_v_i32mf2x2(int32_t *base, ptrdiff_t bstride, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32(base, bstride, v_tuple, vl); +void test_vssseg2e32_v_i32mf2x2(int32_t *rs1, ptrdiff_t rs2, vint32mf2x2_t vs3, size_t vl) { + return __riscv_vssseg2e32(rs1, rs2, vs3, vl); } -void test_vssseg2e32_v_i32m1x2(int32_t *base, ptrdiff_t bstride, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32(base, bstride, v_tuple, vl); +void test_vssseg2e32_v_i32m1x2(int32_t *rs1, ptrdiff_t rs2, vint32m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e32(rs1, rs2, vs3, vl); } -void test_vssseg2e32_v_i32m2x2(int32_t *base, ptrdiff_t bstride, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32(base, bstride, v_tuple, vl); +void test_vssseg2e32_v_i32m2x2(int32_t *rs1, ptrdiff_t rs2, vint32m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e32(rs1, rs2, vs3, vl); } -void test_vssseg2e32_v_i32m4x2(int32_t *base, ptrdiff_t bstride, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32(base, bstride, v_tuple, vl); +void test_vssseg2e32_v_i32m4x2(int32_t *rs1, ptrdiff_t rs2, vint32m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e32(rs1, rs2, vs3, vl); } -void test_vssseg2e32_v_u32mf2x2(uint32_t *base, ptrdiff_t bstride, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32(base, bstride, v_tuple, vl); +void test_vssseg2e32_v_u32mf2x2(uint32_t *rs1, ptrdiff_t rs2, vuint32mf2x2_t vs3, size_t vl) { + return __riscv_vssseg2e32(rs1, rs2, vs3, vl); } -void test_vssseg2e32_v_u32m1x2(uint32_t *base, ptrdiff_t bstride, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32(base, bstride, v_tuple, vl); +void test_vssseg2e32_v_u32m1x2(uint32_t *rs1, ptrdiff_t rs2, vuint32m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e32(rs1, rs2, vs3, vl); } -void test_vssseg2e32_v_u32m2x2(uint32_t *base, ptrdiff_t bstride, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32(base, bstride, v_tuple, vl); +void test_vssseg2e32_v_u32m2x2(uint32_t *rs1, ptrdiff_t rs2, vuint32m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e32(rs1, rs2, vs3, vl); } -void test_vssseg2e32_v_u32m4x2(uint32_t *base, ptrdiff_t bstride, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32(base, bstride, v_tuple, vl); +void test_vssseg2e32_v_u32m4x2(uint32_t *rs1, ptrdiff_t rs2, vuint32m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e32(rs1, rs2, vs3, vl); } -void test_vssseg2e32_v_f32mf2x2_m(vbool64_t mask, float32_t *base, ptrdiff_t bstride, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32(mask, base, bstride, v_tuple, vl); +void test_vssseg2e32_v_f32mf2x2_m(vbool64_t vm, float32_t *rs1, ptrdiff_t rs2, vfloat32mf2x2_t vs3, size_t vl) { + return __riscv_vssseg2e32(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e32_v_f32m1x2_m(vbool32_t mask, float32_t *base, ptrdiff_t bstride, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32(mask, base, bstride, v_tuple, vl); +void test_vssseg2e32_v_f32m1x2_m(vbool32_t vm, float32_t *rs1, ptrdiff_t rs2, vfloat32m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e32(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e32_v_f32m2x2_m(vbool16_t mask, float32_t *base, ptrdiff_t bstride, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32(mask, base, bstride, v_tuple, vl); +void test_vssseg2e32_v_f32m2x2_m(vbool16_t vm, float32_t *rs1, ptrdiff_t rs2, vfloat32m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e32(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e32_v_f32m4x2_m(vbool8_t mask, float32_t *base, ptrdiff_t bstride, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32(mask, base, bstride, v_tuple, vl); +void test_vssseg2e32_v_f32m4x2_m(vbool8_t vm, float32_t *rs1, ptrdiff_t rs2, vfloat32m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e32(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e32_v_i32mf2x2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32(mask, base, bstride, v_tuple, vl); +void test_vssseg2e32_v_i32mf2x2_m(vbool64_t vm, int32_t *rs1, ptrdiff_t rs2, vint32mf2x2_t vs3, size_t vl) { + return __riscv_vssseg2e32(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e32_v_i32m1x2_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32(mask, base, bstride, v_tuple, vl); +void test_vssseg2e32_v_i32m1x2_m(vbool32_t vm, int32_t *rs1, ptrdiff_t rs2, vint32m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e32(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e32_v_i32m2x2_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32(mask, base, bstride, v_tuple, vl); +void test_vssseg2e32_v_i32m2x2_m(vbool16_t vm, int32_t *rs1, ptrdiff_t rs2, vint32m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e32(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e32_v_i32m4x2_m(vbool8_t mask, int32_t *base, ptrdiff_t bstride, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32(mask, base, bstride, v_tuple, vl); +void test_vssseg2e32_v_i32m4x2_m(vbool8_t vm, int32_t *rs1, ptrdiff_t rs2, vint32m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e32(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e32_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32(mask, base, bstride, v_tuple, vl); +void test_vssseg2e32_v_u32mf2x2_m(vbool64_t vm, uint32_t *rs1, ptrdiff_t rs2, vuint32mf2x2_t vs3, size_t vl) { + return __riscv_vssseg2e32(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e32_v_u32m1x2_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32(mask, base, bstride, v_tuple, vl); +void test_vssseg2e32_v_u32m1x2_m(vbool32_t vm, uint32_t *rs1, ptrdiff_t rs2, vuint32m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e32(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e32_v_u32m2x2_m(vbool16_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32(mask, base, bstride, v_tuple, vl); +void test_vssseg2e32_v_u32m2x2_m(vbool16_t vm, uint32_t *rs1, ptrdiff_t rs2, vuint32m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e32(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e32_v_u32m4x2_m(vbool8_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e32(mask, base, bstride, v_tuple, vl); +void test_vssseg2e32_v_u32m4x2_m(vbool8_t vm, uint32_t *rs1, ptrdiff_t rs2, vuint32m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e32(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg2e32\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vssseg2e64.c b/auto-generated/gnu-overloaded-tests/vssseg2e64.c index bea4be441..5cbff98c8 100644 --- a/auto-generated/gnu-overloaded-tests/vssseg2e64.c +++ b/auto-generated/gnu-overloaded-tests/vssseg2e64.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg2e64_v_f64m1x2(float64_t *base, ptrdiff_t bstride, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e64(base, bstride, v_tuple, vl); +void test_vssseg2e64_v_f64m1x2(float64_t *rs1, ptrdiff_t rs2, vfloat64m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e64(rs1, rs2, vs3, vl); } -void test_vssseg2e64_v_f64m2x2(float64_t *base, ptrdiff_t bstride, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e64(base, bstride, v_tuple, vl); +void test_vssseg2e64_v_f64m2x2(float64_t *rs1, ptrdiff_t rs2, vfloat64m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e64(rs1, rs2, vs3, vl); } -void test_vssseg2e64_v_f64m4x2(float64_t *base, ptrdiff_t bstride, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e64(base, bstride, v_tuple, vl); +void test_vssseg2e64_v_f64m4x2(float64_t *rs1, ptrdiff_t rs2, vfloat64m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e64(rs1, rs2, vs3, vl); } -void test_vssseg2e64_v_i64m1x2(int64_t *base, ptrdiff_t bstride, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e64(base, bstride, v_tuple, vl); +void test_vssseg2e64_v_i64m1x2(int64_t *rs1, ptrdiff_t rs2, vint64m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e64(rs1, rs2, vs3, vl); } -void test_vssseg2e64_v_i64m2x2(int64_t *base, ptrdiff_t bstride, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e64(base, bstride, v_tuple, vl); +void test_vssseg2e64_v_i64m2x2(int64_t *rs1, ptrdiff_t rs2, vint64m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e64(rs1, rs2, vs3, vl); } -void test_vssseg2e64_v_i64m4x2(int64_t *base, ptrdiff_t bstride, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e64(base, bstride, v_tuple, vl); +void test_vssseg2e64_v_i64m4x2(int64_t *rs1, ptrdiff_t rs2, vint64m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e64(rs1, rs2, vs3, vl); } -void test_vssseg2e64_v_u64m1x2(uint64_t *base, ptrdiff_t bstride, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e64(base, bstride, v_tuple, vl); +void test_vssseg2e64_v_u64m1x2(uint64_t *rs1, ptrdiff_t rs2, vuint64m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e64(rs1, rs2, vs3, vl); } -void test_vssseg2e64_v_u64m2x2(uint64_t *base, ptrdiff_t bstride, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e64(base, bstride, v_tuple, vl); +void test_vssseg2e64_v_u64m2x2(uint64_t *rs1, ptrdiff_t rs2, vuint64m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e64(rs1, rs2, vs3, vl); } -void test_vssseg2e64_v_u64m4x2(uint64_t *base, ptrdiff_t bstride, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e64(base, bstride, v_tuple, vl); +void test_vssseg2e64_v_u64m4x2(uint64_t *rs1, ptrdiff_t rs2, vuint64m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e64(rs1, rs2, vs3, vl); } -void test_vssseg2e64_v_f64m1x2_m(vbool64_t mask, float64_t *base, ptrdiff_t bstride, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e64(mask, base, bstride, v_tuple, vl); +void test_vssseg2e64_v_f64m1x2_m(vbool64_t vm, float64_t *rs1, ptrdiff_t rs2, vfloat64m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e64(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e64_v_f64m2x2_m(vbool32_t mask, float64_t *base, ptrdiff_t bstride, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e64(mask, base, bstride, v_tuple, vl); +void test_vssseg2e64_v_f64m2x2_m(vbool32_t vm, float64_t *rs1, ptrdiff_t rs2, vfloat64m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e64(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e64_v_f64m4x2_m(vbool16_t mask, float64_t *base, ptrdiff_t bstride, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e64(mask, base, bstride, v_tuple, vl); +void test_vssseg2e64_v_f64m4x2_m(vbool16_t vm, float64_t *rs1, ptrdiff_t rs2, vfloat64m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e64(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e64_v_i64m1x2_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e64(mask, base, bstride, v_tuple, vl); +void test_vssseg2e64_v_i64m1x2_m(vbool64_t vm, int64_t *rs1, ptrdiff_t rs2, vint64m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e64(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e64_v_i64m2x2_m(vbool32_t mask, int64_t *base, ptrdiff_t bstride, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e64(mask, base, bstride, v_tuple, vl); +void test_vssseg2e64_v_i64m2x2_m(vbool32_t vm, int64_t *rs1, ptrdiff_t rs2, vint64m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e64(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e64_v_i64m4x2_m(vbool16_t mask, int64_t *base, ptrdiff_t bstride, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e64(mask, base, bstride, v_tuple, vl); +void test_vssseg2e64_v_i64m4x2_m(vbool16_t vm, int64_t *rs1, ptrdiff_t rs2, vint64m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e64(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e64_v_u64m1x2_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e64(mask, base, bstride, v_tuple, vl); +void test_vssseg2e64_v_u64m1x2_m(vbool64_t vm, uint64_t *rs1, ptrdiff_t rs2, vuint64m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e64(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e64_v_u64m2x2_m(vbool32_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e64(mask, base, bstride, v_tuple, vl); +void test_vssseg2e64_v_u64m2x2_m(vbool32_t vm, uint64_t *rs1, ptrdiff_t rs2, vuint64m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e64(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e64_v_u64m4x2_m(vbool16_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e64(mask, base, bstride, v_tuple, vl); +void test_vssseg2e64_v_u64m4x2_m(vbool16_t vm, uint64_t *rs1, ptrdiff_t rs2, vuint64m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e64(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg2e64\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vssseg2e8.c b/auto-generated/gnu-overloaded-tests/vssseg2e8.c index 9b9bd070a..bccbb8f49 100644 --- a/auto-generated/gnu-overloaded-tests/vssseg2e8.c +++ b/auto-generated/gnu-overloaded-tests/vssseg2e8.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg2e8_v_i8mf8x2(int8_t *base, ptrdiff_t bstride, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8(base, bstride, v_tuple, vl); +void test_vssseg2e8_v_i8mf8x2(int8_t *rs1, ptrdiff_t rs2, vint8mf8x2_t vs3, size_t vl) { + return __riscv_vssseg2e8(rs1, rs2, vs3, vl); } -void test_vssseg2e8_v_i8mf4x2(int8_t *base, ptrdiff_t bstride, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8(base, bstride, v_tuple, vl); +void test_vssseg2e8_v_i8mf4x2(int8_t *rs1, ptrdiff_t rs2, vint8mf4x2_t vs3, size_t vl) { + return __riscv_vssseg2e8(rs1, rs2, vs3, vl); } -void test_vssseg2e8_v_i8mf2x2(int8_t *base, ptrdiff_t bstride, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8(base, bstride, v_tuple, vl); +void test_vssseg2e8_v_i8mf2x2(int8_t *rs1, ptrdiff_t rs2, vint8mf2x2_t vs3, size_t vl) { + return __riscv_vssseg2e8(rs1, rs2, vs3, vl); } -void test_vssseg2e8_v_i8m1x2(int8_t *base, ptrdiff_t bstride, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8(base, bstride, v_tuple, vl); +void test_vssseg2e8_v_i8m1x2(int8_t *rs1, ptrdiff_t rs2, vint8m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e8(rs1, rs2, vs3, vl); } -void test_vssseg2e8_v_i8m2x2(int8_t *base, ptrdiff_t bstride, vint8m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8(base, bstride, v_tuple, vl); +void test_vssseg2e8_v_i8m2x2(int8_t *rs1, ptrdiff_t rs2, vint8m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e8(rs1, rs2, vs3, vl); } -void test_vssseg2e8_v_i8m4x2(int8_t *base, ptrdiff_t bstride, vint8m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8(base, bstride, v_tuple, vl); +void test_vssseg2e8_v_i8m4x2(int8_t *rs1, ptrdiff_t rs2, vint8m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e8(rs1, rs2, vs3, vl); } -void test_vssseg2e8_v_u8mf8x2(uint8_t *base, ptrdiff_t bstride, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8(base, bstride, v_tuple, vl); +void test_vssseg2e8_v_u8mf8x2(uint8_t *rs1, ptrdiff_t rs2, vuint8mf8x2_t vs3, size_t vl) { + return __riscv_vssseg2e8(rs1, rs2, vs3, vl); } -void test_vssseg2e8_v_u8mf4x2(uint8_t *base, ptrdiff_t bstride, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8(base, bstride, v_tuple, vl); +void test_vssseg2e8_v_u8mf4x2(uint8_t *rs1, ptrdiff_t rs2, vuint8mf4x2_t vs3, size_t vl) { + return __riscv_vssseg2e8(rs1, rs2, vs3, vl); } -void test_vssseg2e8_v_u8mf2x2(uint8_t *base, ptrdiff_t bstride, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8(base, bstride, v_tuple, vl); +void test_vssseg2e8_v_u8mf2x2(uint8_t *rs1, ptrdiff_t rs2, vuint8mf2x2_t vs3, size_t vl) { + return __riscv_vssseg2e8(rs1, rs2, vs3, vl); } -void test_vssseg2e8_v_u8m1x2(uint8_t *base, ptrdiff_t bstride, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8(base, bstride, v_tuple, vl); +void test_vssseg2e8_v_u8m1x2(uint8_t *rs1, ptrdiff_t rs2, vuint8m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e8(rs1, rs2, vs3, vl); } -void test_vssseg2e8_v_u8m2x2(uint8_t *base, ptrdiff_t bstride, vuint8m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8(base, bstride, v_tuple, vl); +void test_vssseg2e8_v_u8m2x2(uint8_t *rs1, ptrdiff_t rs2, vuint8m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e8(rs1, rs2, vs3, vl); } -void test_vssseg2e8_v_u8m4x2(uint8_t *base, ptrdiff_t bstride, vuint8m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8(base, bstride, v_tuple, vl); +void test_vssseg2e8_v_u8m4x2(uint8_t *rs1, ptrdiff_t rs2, vuint8m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e8(rs1, rs2, vs3, vl); } -void test_vssseg2e8_v_i8mf8x2_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8(mask, base, bstride, v_tuple, vl); +void test_vssseg2e8_v_i8mf8x2_m(vbool64_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf8x2_t vs3, size_t vl) { + return __riscv_vssseg2e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e8_v_i8mf4x2_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8(mask, base, bstride, v_tuple, vl); +void test_vssseg2e8_v_i8mf4x2_m(vbool32_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf4x2_t vs3, size_t vl) { + return __riscv_vssseg2e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e8_v_i8mf2x2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8(mask, base, bstride, v_tuple, vl); +void test_vssseg2e8_v_i8mf2x2_m(vbool16_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf2x2_t vs3, size_t vl) { + return __riscv_vssseg2e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e8_v_i8m1x2_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8(mask, base, bstride, v_tuple, vl); +void test_vssseg2e8_v_i8m1x2_m(vbool8_t vm, int8_t *rs1, ptrdiff_t rs2, vint8m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e8_v_i8m2x2_m(vbool4_t mask, int8_t *base, ptrdiff_t bstride, vint8m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8(mask, base, bstride, v_tuple, vl); +void test_vssseg2e8_v_i8m2x2_m(vbool4_t vm, int8_t *rs1, ptrdiff_t rs2, vint8m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e8_v_i8m4x2_m(vbool2_t mask, int8_t *base, ptrdiff_t bstride, vint8m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8(mask, base, bstride, v_tuple, vl); +void test_vssseg2e8_v_i8m4x2_m(vbool2_t vm, int8_t *rs1, ptrdiff_t rs2, vint8m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e8_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8(mask, base, bstride, v_tuple, vl); +void test_vssseg2e8_v_u8mf8x2_m(vbool64_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf8x2_t vs3, size_t vl) { + return __riscv_vssseg2e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e8_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8(mask, base, bstride, v_tuple, vl); +void test_vssseg2e8_v_u8mf4x2_m(vbool32_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf4x2_t vs3, size_t vl) { + return __riscv_vssseg2e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e8_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8(mask, base, bstride, v_tuple, vl); +void test_vssseg2e8_v_u8mf2x2_m(vbool16_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf2x2_t vs3, size_t vl) { + return __riscv_vssseg2e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e8_v_u8m1x2_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8(mask, base, bstride, v_tuple, vl); +void test_vssseg2e8_v_u8m1x2_m(vbool8_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e8_v_u8m2x2_m(vbool4_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m2x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8(mask, base, bstride, v_tuple, vl); +void test_vssseg2e8_v_u8m2x2_m(vbool4_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg2e8_v_u8m4x2_m(vbool2_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m4x2_t v_tuple, size_t vl) { - return __riscv_vssseg2e8(mask, base, bstride, v_tuple, vl); +void test_vssseg2e8_v_u8m4x2_m(vbool2_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e8(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg2e8\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vssseg3e16.c b/auto-generated/gnu-overloaded-tests/vssseg3e16.c index 46aef7474..97a285f86 100644 --- a/auto-generated/gnu-overloaded-tests/vssseg3e16.c +++ b/auto-generated/gnu-overloaded-tests/vssseg3e16.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg3e16_v_f16mf4x3(float16_t *base, ptrdiff_t bstride, vfloat16mf4x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16(base, bstride, v_tuple, vl); +void test_vssseg3e16_v_f16mf4x3(float16_t *rs1, ptrdiff_t rs2, vfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vssseg3e16(rs1, rs2, vs3, vl); } -void test_vssseg3e16_v_f16mf2x3(float16_t *base, ptrdiff_t bstride, vfloat16mf2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16(base, bstride, v_tuple, vl); +void test_vssseg3e16_v_f16mf2x3(float16_t *rs1, ptrdiff_t rs2, vfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vssseg3e16(rs1, rs2, vs3, vl); } -void test_vssseg3e16_v_f16m1x3(float16_t *base, ptrdiff_t bstride, vfloat16m1x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16(base, bstride, v_tuple, vl); +void test_vssseg3e16_v_f16m1x3(float16_t *rs1, ptrdiff_t rs2, vfloat16m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e16(rs1, rs2, vs3, vl); } -void test_vssseg3e16_v_f16m2x3(float16_t *base, ptrdiff_t bstride, vfloat16m2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16(base, bstride, v_tuple, vl); +void test_vssseg3e16_v_f16m2x3(float16_t *rs1, ptrdiff_t rs2, vfloat16m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e16(rs1, rs2, vs3, vl); } -void test_vssseg3e16_v_i16mf4x3(int16_t *base, ptrdiff_t bstride, vint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16(base, bstride, v_tuple, vl); +void test_vssseg3e16_v_i16mf4x3(int16_t *rs1, ptrdiff_t rs2, vint16mf4x3_t vs3, size_t vl) { + return __riscv_vssseg3e16(rs1, rs2, vs3, vl); } -void test_vssseg3e16_v_i16mf2x3(int16_t *base, ptrdiff_t bstride, vint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16(base, bstride, v_tuple, vl); +void test_vssseg3e16_v_i16mf2x3(int16_t *rs1, ptrdiff_t rs2, vint16mf2x3_t vs3, size_t vl) { + return __riscv_vssseg3e16(rs1, rs2, vs3, vl); } -void test_vssseg3e16_v_i16m1x3(int16_t *base, ptrdiff_t bstride, vint16m1x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16(base, bstride, v_tuple, vl); +void test_vssseg3e16_v_i16m1x3(int16_t *rs1, ptrdiff_t rs2, vint16m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e16(rs1, rs2, vs3, vl); } -void test_vssseg3e16_v_i16m2x3(int16_t *base, ptrdiff_t bstride, vint16m2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16(base, bstride, v_tuple, vl); +void test_vssseg3e16_v_i16m2x3(int16_t *rs1, ptrdiff_t rs2, vint16m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e16(rs1, rs2, vs3, vl); } -void test_vssseg3e16_v_u16mf4x3(uint16_t *base, ptrdiff_t bstride, vuint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16(base, bstride, v_tuple, vl); +void test_vssseg3e16_v_u16mf4x3(uint16_t *rs1, ptrdiff_t rs2, vuint16mf4x3_t vs3, size_t vl) { + return __riscv_vssseg3e16(rs1, rs2, vs3, vl); } -void test_vssseg3e16_v_u16mf2x3(uint16_t *base, ptrdiff_t bstride, vuint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16(base, bstride, v_tuple, vl); +void test_vssseg3e16_v_u16mf2x3(uint16_t *rs1, ptrdiff_t rs2, vuint16mf2x3_t vs3, size_t vl) { + return __riscv_vssseg3e16(rs1, rs2, vs3, vl); } -void test_vssseg3e16_v_u16m1x3(uint16_t *base, ptrdiff_t bstride, vuint16m1x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16(base, bstride, v_tuple, vl); +void test_vssseg3e16_v_u16m1x3(uint16_t *rs1, ptrdiff_t rs2, vuint16m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e16(rs1, rs2, vs3, vl); } -void test_vssseg3e16_v_u16m2x3(uint16_t *base, ptrdiff_t bstride, vuint16m2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16(base, bstride, v_tuple, vl); +void test_vssseg3e16_v_u16m2x3(uint16_t *rs1, ptrdiff_t rs2, vuint16m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e16(rs1, rs2, vs3, vl); } -void test_vssseg3e16_v_f16mf4x3_m(vbool64_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf4x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16(mask, base, bstride, v_tuple, vl); +void test_vssseg3e16_v_f16mf4x3_m(vbool64_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vssseg3e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e16_v_f16mf2x3_m(vbool32_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16(mask, base, bstride, v_tuple, vl); +void test_vssseg3e16_v_f16mf2x3_m(vbool32_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vssseg3e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e16_v_f16m1x3_m(vbool16_t mask, float16_t *base, ptrdiff_t bstride, vfloat16m1x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16(mask, base, bstride, v_tuple, vl); +void test_vssseg3e16_v_f16m1x3_m(vbool16_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e16_v_f16m2x3_m(vbool8_t mask, float16_t *base, ptrdiff_t bstride, vfloat16m2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16(mask, base, bstride, v_tuple, vl); +void test_vssseg3e16_v_f16m2x3_m(vbool8_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e16_v_i16mf4x3_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16(mask, base, bstride, v_tuple, vl); +void test_vssseg3e16_v_i16mf4x3_m(vbool64_t vm, int16_t *rs1, ptrdiff_t rs2, vint16mf4x3_t vs3, size_t vl) { + return __riscv_vssseg3e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e16_v_i16mf2x3_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16(mask, base, bstride, v_tuple, vl); +void test_vssseg3e16_v_i16mf2x3_m(vbool32_t vm, int16_t *rs1, ptrdiff_t rs2, vint16mf2x3_t vs3, size_t vl) { + return __riscv_vssseg3e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e16_v_i16m1x3_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16(mask, base, bstride, v_tuple, vl); +void test_vssseg3e16_v_i16m1x3_m(vbool16_t vm, int16_t *rs1, ptrdiff_t rs2, vint16m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e16_v_i16m2x3_m(vbool8_t mask, int16_t *base, ptrdiff_t bstride, vint16m2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16(mask, base, bstride, v_tuple, vl); +void test_vssseg3e16_v_i16m2x3_m(vbool8_t vm, int16_t *rs1, ptrdiff_t rs2, vint16m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e16_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16(mask, base, bstride, v_tuple, vl); +void test_vssseg3e16_v_u16mf4x3_m(vbool64_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16mf4x3_t vs3, size_t vl) { + return __riscv_vssseg3e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e16_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16(mask, base, bstride, v_tuple, vl); +void test_vssseg3e16_v_u16mf2x3_m(vbool32_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16mf2x3_t vs3, size_t vl) { + return __riscv_vssseg3e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e16_v_u16m1x3_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16(mask, base, bstride, v_tuple, vl); +void test_vssseg3e16_v_u16m1x3_m(vbool16_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e16_v_u16m2x3_m(vbool8_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e16(mask, base, bstride, v_tuple, vl); +void test_vssseg3e16_v_u16m2x3_m(vbool8_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e16(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg3e16\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vssseg3e32.c b/auto-generated/gnu-overloaded-tests/vssseg3e32.c index 76a6258c0..ef4069e35 100644 --- a/auto-generated/gnu-overloaded-tests/vssseg3e32.c +++ b/auto-generated/gnu-overloaded-tests/vssseg3e32.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg3e32_v_f32mf2x3(float32_t *base, ptrdiff_t bstride, vfloat32mf2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e32(base, bstride, v_tuple, vl); +void test_vssseg3e32_v_f32mf2x3(float32_t *rs1, ptrdiff_t rs2, vfloat32mf2x3_t vs3, size_t vl) { + return __riscv_vssseg3e32(rs1, rs2, vs3, vl); } -void test_vssseg3e32_v_f32m1x3(float32_t *base, ptrdiff_t bstride, vfloat32m1x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e32(base, bstride, v_tuple, vl); +void test_vssseg3e32_v_f32m1x3(float32_t *rs1, ptrdiff_t rs2, vfloat32m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e32(rs1, rs2, vs3, vl); } -void test_vssseg3e32_v_f32m2x3(float32_t *base, ptrdiff_t bstride, vfloat32m2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e32(base, bstride, v_tuple, vl); +void test_vssseg3e32_v_f32m2x3(float32_t *rs1, ptrdiff_t rs2, vfloat32m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e32(rs1, rs2, vs3, vl); } -void test_vssseg3e32_v_i32mf2x3(int32_t *base, ptrdiff_t bstride, vint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e32(base, bstride, v_tuple, vl); +void test_vssseg3e32_v_i32mf2x3(int32_t *rs1, ptrdiff_t rs2, vint32mf2x3_t vs3, size_t vl) { + return __riscv_vssseg3e32(rs1, rs2, vs3, vl); } -void test_vssseg3e32_v_i32m1x3(int32_t *base, ptrdiff_t bstride, vint32m1x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e32(base, bstride, v_tuple, vl); +void test_vssseg3e32_v_i32m1x3(int32_t *rs1, ptrdiff_t rs2, vint32m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e32(rs1, rs2, vs3, vl); } -void test_vssseg3e32_v_i32m2x3(int32_t *base, ptrdiff_t bstride, vint32m2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e32(base, bstride, v_tuple, vl); +void test_vssseg3e32_v_i32m2x3(int32_t *rs1, ptrdiff_t rs2, vint32m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e32(rs1, rs2, vs3, vl); } -void test_vssseg3e32_v_u32mf2x3(uint32_t *base, ptrdiff_t bstride, vuint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e32(base, bstride, v_tuple, vl); +void test_vssseg3e32_v_u32mf2x3(uint32_t *rs1, ptrdiff_t rs2, vuint32mf2x3_t vs3, size_t vl) { + return __riscv_vssseg3e32(rs1, rs2, vs3, vl); } -void test_vssseg3e32_v_u32m1x3(uint32_t *base, ptrdiff_t bstride, vuint32m1x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e32(base, bstride, v_tuple, vl); +void test_vssseg3e32_v_u32m1x3(uint32_t *rs1, ptrdiff_t rs2, vuint32m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e32(rs1, rs2, vs3, vl); } -void test_vssseg3e32_v_u32m2x3(uint32_t *base, ptrdiff_t bstride, vuint32m2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e32(base, bstride, v_tuple, vl); +void test_vssseg3e32_v_u32m2x3(uint32_t *rs1, ptrdiff_t rs2, vuint32m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e32(rs1, rs2, vs3, vl); } -void test_vssseg3e32_v_f32mf2x3_m(vbool64_t mask, float32_t *base, ptrdiff_t bstride, vfloat32mf2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e32(mask, base, bstride, v_tuple, vl); +void test_vssseg3e32_v_f32mf2x3_m(vbool64_t vm, float32_t *rs1, ptrdiff_t rs2, vfloat32mf2x3_t vs3, size_t vl) { + return __riscv_vssseg3e32(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e32_v_f32m1x3_m(vbool32_t mask, float32_t *base, ptrdiff_t bstride, vfloat32m1x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e32(mask, base, bstride, v_tuple, vl); +void test_vssseg3e32_v_f32m1x3_m(vbool32_t vm, float32_t *rs1, ptrdiff_t rs2, vfloat32m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e32(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e32_v_f32m2x3_m(vbool16_t mask, float32_t *base, ptrdiff_t bstride, vfloat32m2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e32(mask, base, bstride, v_tuple, vl); +void test_vssseg3e32_v_f32m2x3_m(vbool16_t vm, float32_t *rs1, ptrdiff_t rs2, vfloat32m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e32(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e32_v_i32mf2x3_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e32(mask, base, bstride, v_tuple, vl); +void test_vssseg3e32_v_i32mf2x3_m(vbool64_t vm, int32_t *rs1, ptrdiff_t rs2, vint32mf2x3_t vs3, size_t vl) { + return __riscv_vssseg3e32(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e32_v_i32m1x3_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e32(mask, base, bstride, v_tuple, vl); +void test_vssseg3e32_v_i32m1x3_m(vbool32_t vm, int32_t *rs1, ptrdiff_t rs2, vint32m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e32(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e32_v_i32m2x3_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride, vint32m2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e32(mask, base, bstride, v_tuple, vl); +void test_vssseg3e32_v_i32m2x3_m(vbool16_t vm, int32_t *rs1, ptrdiff_t rs2, vint32m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e32(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e32_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e32(mask, base, bstride, v_tuple, vl); +void test_vssseg3e32_v_u32mf2x3_m(vbool64_t vm, uint32_t *rs1, ptrdiff_t rs2, vuint32mf2x3_t vs3, size_t vl) { + return __riscv_vssseg3e32(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e32_v_u32m1x3_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e32(mask, base, bstride, v_tuple, vl); +void test_vssseg3e32_v_u32m1x3_m(vbool32_t vm, uint32_t *rs1, ptrdiff_t rs2, vuint32m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e32(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e32_v_u32m2x3_m(vbool16_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e32(mask, base, bstride, v_tuple, vl); +void test_vssseg3e32_v_u32m2x3_m(vbool16_t vm, uint32_t *rs1, ptrdiff_t rs2, vuint32m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e32(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg3e32\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vssseg3e64.c b/auto-generated/gnu-overloaded-tests/vssseg3e64.c index 1ded1ccca..766debc1c 100644 --- a/auto-generated/gnu-overloaded-tests/vssseg3e64.c +++ b/auto-generated/gnu-overloaded-tests/vssseg3e64.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg3e64_v_f64m1x3(float64_t *base, ptrdiff_t bstride, vfloat64m1x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e64(base, bstride, v_tuple, vl); +void test_vssseg3e64_v_f64m1x3(float64_t *rs1, ptrdiff_t rs2, vfloat64m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e64(rs1, rs2, vs3, vl); } -void test_vssseg3e64_v_f64m2x3(float64_t *base, ptrdiff_t bstride, vfloat64m2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e64(base, bstride, v_tuple, vl); +void test_vssseg3e64_v_f64m2x3(float64_t *rs1, ptrdiff_t rs2, vfloat64m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e64(rs1, rs2, vs3, vl); } -void test_vssseg3e64_v_i64m1x3(int64_t *base, ptrdiff_t bstride, vint64m1x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e64(base, bstride, v_tuple, vl); +void test_vssseg3e64_v_i64m1x3(int64_t *rs1, ptrdiff_t rs2, vint64m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e64(rs1, rs2, vs3, vl); } -void test_vssseg3e64_v_i64m2x3(int64_t *base, ptrdiff_t bstride, vint64m2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e64(base, bstride, v_tuple, vl); +void test_vssseg3e64_v_i64m2x3(int64_t *rs1, ptrdiff_t rs2, vint64m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e64(rs1, rs2, vs3, vl); } -void test_vssseg3e64_v_u64m1x3(uint64_t *base, ptrdiff_t bstride, vuint64m1x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e64(base, bstride, v_tuple, vl); +void test_vssseg3e64_v_u64m1x3(uint64_t *rs1, ptrdiff_t rs2, vuint64m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e64(rs1, rs2, vs3, vl); } -void test_vssseg3e64_v_u64m2x3(uint64_t *base, ptrdiff_t bstride, vuint64m2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e64(base, bstride, v_tuple, vl); +void test_vssseg3e64_v_u64m2x3(uint64_t *rs1, ptrdiff_t rs2, vuint64m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e64(rs1, rs2, vs3, vl); } -void test_vssseg3e64_v_f64m1x3_m(vbool64_t mask, float64_t *base, ptrdiff_t bstride, vfloat64m1x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e64(mask, base, bstride, v_tuple, vl); +void test_vssseg3e64_v_f64m1x3_m(vbool64_t vm, float64_t *rs1, ptrdiff_t rs2, vfloat64m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e64(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e64_v_f64m2x3_m(vbool32_t mask, float64_t *base, ptrdiff_t bstride, vfloat64m2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e64(mask, base, bstride, v_tuple, vl); +void test_vssseg3e64_v_f64m2x3_m(vbool32_t vm, float64_t *rs1, ptrdiff_t rs2, vfloat64m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e64(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e64_v_i64m1x3_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e64(mask, base, bstride, v_tuple, vl); +void test_vssseg3e64_v_i64m1x3_m(vbool64_t vm, int64_t *rs1, ptrdiff_t rs2, vint64m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e64(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e64_v_i64m2x3_m(vbool32_t mask, int64_t *base, ptrdiff_t bstride, vint64m2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e64(mask, base, bstride, v_tuple, vl); +void test_vssseg3e64_v_i64m2x3_m(vbool32_t vm, int64_t *rs1, ptrdiff_t rs2, vint64m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e64(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e64_v_u64m1x3_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e64(mask, base, bstride, v_tuple, vl); +void test_vssseg3e64_v_u64m1x3_m(vbool64_t vm, uint64_t *rs1, ptrdiff_t rs2, vuint64m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e64(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e64_v_u64m2x3_m(vbool32_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e64(mask, base, bstride, v_tuple, vl); +void test_vssseg3e64_v_u64m2x3_m(vbool32_t vm, uint64_t *rs1, ptrdiff_t rs2, vuint64m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e64(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg3e64\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vssseg3e8.c b/auto-generated/gnu-overloaded-tests/vssseg3e8.c index a7ceae5a6..338769bd8 100644 --- a/auto-generated/gnu-overloaded-tests/vssseg3e8.c +++ b/auto-generated/gnu-overloaded-tests/vssseg3e8.c @@ -6,84 +6,84 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg3e8_v_i8mf8x3(int8_t *base, ptrdiff_t bstride, vint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e8(base, bstride, v_tuple, vl); +void test_vssseg3e8_v_i8mf8x3(int8_t *rs1, ptrdiff_t rs2, vint8mf8x3_t vs3, size_t vl) { + return __riscv_vssseg3e8(rs1, rs2, vs3, vl); } -void test_vssseg3e8_v_i8mf4x3(int8_t *base, ptrdiff_t bstride, vint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e8(base, bstride, v_tuple, vl); +void test_vssseg3e8_v_i8mf4x3(int8_t *rs1, ptrdiff_t rs2, vint8mf4x3_t vs3, size_t vl) { + return __riscv_vssseg3e8(rs1, rs2, vs3, vl); } -void test_vssseg3e8_v_i8mf2x3(int8_t *base, ptrdiff_t bstride, vint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e8(base, bstride, v_tuple, vl); +void test_vssseg3e8_v_i8mf2x3(int8_t *rs1, ptrdiff_t rs2, vint8mf2x3_t vs3, size_t vl) { + return __riscv_vssseg3e8(rs1, rs2, vs3, vl); } -void test_vssseg3e8_v_i8m1x3(int8_t *base, ptrdiff_t bstride, vint8m1x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e8(base, bstride, v_tuple, vl); +void test_vssseg3e8_v_i8m1x3(int8_t *rs1, ptrdiff_t rs2, vint8m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e8(rs1, rs2, vs3, vl); } -void test_vssseg3e8_v_i8m2x3(int8_t *base, ptrdiff_t bstride, vint8m2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e8(base, bstride, v_tuple, vl); +void test_vssseg3e8_v_i8m2x3(int8_t *rs1, ptrdiff_t rs2, vint8m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e8(rs1, rs2, vs3, vl); } -void test_vssseg3e8_v_u8mf8x3(uint8_t *base, ptrdiff_t bstride, vuint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e8(base, bstride, v_tuple, vl); +void test_vssseg3e8_v_u8mf8x3(uint8_t *rs1, ptrdiff_t rs2, vuint8mf8x3_t vs3, size_t vl) { + return __riscv_vssseg3e8(rs1, rs2, vs3, vl); } -void test_vssseg3e8_v_u8mf4x3(uint8_t *base, ptrdiff_t bstride, vuint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e8(base, bstride, v_tuple, vl); +void test_vssseg3e8_v_u8mf4x3(uint8_t *rs1, ptrdiff_t rs2, vuint8mf4x3_t vs3, size_t vl) { + return __riscv_vssseg3e8(rs1, rs2, vs3, vl); } -void test_vssseg3e8_v_u8mf2x3(uint8_t *base, ptrdiff_t bstride, vuint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e8(base, bstride, v_tuple, vl); +void test_vssseg3e8_v_u8mf2x3(uint8_t *rs1, ptrdiff_t rs2, vuint8mf2x3_t vs3, size_t vl) { + return __riscv_vssseg3e8(rs1, rs2, vs3, vl); } -void test_vssseg3e8_v_u8m1x3(uint8_t *base, ptrdiff_t bstride, vuint8m1x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e8(base, bstride, v_tuple, vl); +void test_vssseg3e8_v_u8m1x3(uint8_t *rs1, ptrdiff_t rs2, vuint8m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e8(rs1, rs2, vs3, vl); } -void test_vssseg3e8_v_u8m2x3(uint8_t *base, ptrdiff_t bstride, vuint8m2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e8(base, bstride, v_tuple, vl); +void test_vssseg3e8_v_u8m2x3(uint8_t *rs1, ptrdiff_t rs2, vuint8m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e8(rs1, rs2, vs3, vl); } -void test_vssseg3e8_v_i8mf8x3_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e8(mask, base, bstride, v_tuple, vl); +void test_vssseg3e8_v_i8mf8x3_m(vbool64_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf8x3_t vs3, size_t vl) { + return __riscv_vssseg3e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e8_v_i8mf4x3_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e8(mask, base, bstride, v_tuple, vl); +void test_vssseg3e8_v_i8mf4x3_m(vbool32_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf4x3_t vs3, size_t vl) { + return __riscv_vssseg3e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e8_v_i8mf2x3_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e8(mask, base, bstride, v_tuple, vl); +void test_vssseg3e8_v_i8mf2x3_m(vbool16_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf2x3_t vs3, size_t vl) { + return __riscv_vssseg3e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e8_v_i8m1x3_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e8(mask, base, bstride, v_tuple, vl); +void test_vssseg3e8_v_i8m1x3_m(vbool8_t vm, int8_t *rs1, ptrdiff_t rs2, vint8m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e8_v_i8m2x3_m(vbool4_t mask, int8_t *base, ptrdiff_t bstride, vint8m2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e8(mask, base, bstride, v_tuple, vl); +void test_vssseg3e8_v_i8m2x3_m(vbool4_t vm, int8_t *rs1, ptrdiff_t rs2, vint8m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e8_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e8(mask, base, bstride, v_tuple, vl); +void test_vssseg3e8_v_u8mf8x3_m(vbool64_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf8x3_t vs3, size_t vl) { + return __riscv_vssseg3e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e8_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e8(mask, base, bstride, v_tuple, vl); +void test_vssseg3e8_v_u8mf4x3_m(vbool32_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf4x3_t vs3, size_t vl) { + return __riscv_vssseg3e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e8_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e8(mask, base, bstride, v_tuple, vl); +void test_vssseg3e8_v_u8mf2x3_m(vbool16_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf2x3_t vs3, size_t vl) { + return __riscv_vssseg3e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e8_v_u8m1x3_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e8(mask, base, bstride, v_tuple, vl); +void test_vssseg3e8_v_u8m1x3_m(vbool8_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg3e8_v_u8m2x3_m(vbool4_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m2x3_t v_tuple, size_t vl) { - return __riscv_vssseg3e8(mask, base, bstride, v_tuple, vl); +void test_vssseg3e8_v_u8m2x3_m(vbool4_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e8(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg3e8\.[ivxfswum.]+\s+} 20 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vssseg4e16.c b/auto-generated/gnu-overloaded-tests/vssseg4e16.c index 90c098bbc..c7818080b 100644 --- a/auto-generated/gnu-overloaded-tests/vssseg4e16.c +++ b/auto-generated/gnu-overloaded-tests/vssseg4e16.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg4e16_v_f16mf4x4(float16_t *base, ptrdiff_t bstride, vfloat16mf4x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16(base, bstride, v_tuple, vl); +void test_vssseg4e16_v_f16mf4x4(float16_t *rs1, ptrdiff_t rs2, vfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vssseg4e16(rs1, rs2, vs3, vl); } -void test_vssseg4e16_v_f16mf2x4(float16_t *base, ptrdiff_t bstride, vfloat16mf2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16(base, bstride, v_tuple, vl); +void test_vssseg4e16_v_f16mf2x4(float16_t *rs1, ptrdiff_t rs2, vfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vssseg4e16(rs1, rs2, vs3, vl); } -void test_vssseg4e16_v_f16m1x4(float16_t *base, ptrdiff_t bstride, vfloat16m1x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16(base, bstride, v_tuple, vl); +void test_vssseg4e16_v_f16m1x4(float16_t *rs1, ptrdiff_t rs2, vfloat16m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e16(rs1, rs2, vs3, vl); } -void test_vssseg4e16_v_f16m2x4(float16_t *base, ptrdiff_t bstride, vfloat16m2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16(base, bstride, v_tuple, vl); +void test_vssseg4e16_v_f16m2x4(float16_t *rs1, ptrdiff_t rs2, vfloat16m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e16(rs1, rs2, vs3, vl); } -void test_vssseg4e16_v_i16mf4x4(int16_t *base, ptrdiff_t bstride, vint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16(base, bstride, v_tuple, vl); +void test_vssseg4e16_v_i16mf4x4(int16_t *rs1, ptrdiff_t rs2, vint16mf4x4_t vs3, size_t vl) { + return __riscv_vssseg4e16(rs1, rs2, vs3, vl); } -void test_vssseg4e16_v_i16mf2x4(int16_t *base, ptrdiff_t bstride, vint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16(base, bstride, v_tuple, vl); +void test_vssseg4e16_v_i16mf2x4(int16_t *rs1, ptrdiff_t rs2, vint16mf2x4_t vs3, size_t vl) { + return __riscv_vssseg4e16(rs1, rs2, vs3, vl); } -void test_vssseg4e16_v_i16m1x4(int16_t *base, ptrdiff_t bstride, vint16m1x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16(base, bstride, v_tuple, vl); +void test_vssseg4e16_v_i16m1x4(int16_t *rs1, ptrdiff_t rs2, vint16m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e16(rs1, rs2, vs3, vl); } -void test_vssseg4e16_v_i16m2x4(int16_t *base, ptrdiff_t bstride, vint16m2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16(base, bstride, v_tuple, vl); +void test_vssseg4e16_v_i16m2x4(int16_t *rs1, ptrdiff_t rs2, vint16m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e16(rs1, rs2, vs3, vl); } -void test_vssseg4e16_v_u16mf4x4(uint16_t *base, ptrdiff_t bstride, vuint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16(base, bstride, v_tuple, vl); +void test_vssseg4e16_v_u16mf4x4(uint16_t *rs1, ptrdiff_t rs2, vuint16mf4x4_t vs3, size_t vl) { + return __riscv_vssseg4e16(rs1, rs2, vs3, vl); } -void test_vssseg4e16_v_u16mf2x4(uint16_t *base, ptrdiff_t bstride, vuint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16(base, bstride, v_tuple, vl); +void test_vssseg4e16_v_u16mf2x4(uint16_t *rs1, ptrdiff_t rs2, vuint16mf2x4_t vs3, size_t vl) { + return __riscv_vssseg4e16(rs1, rs2, vs3, vl); } -void test_vssseg4e16_v_u16m1x4(uint16_t *base, ptrdiff_t bstride, vuint16m1x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16(base, bstride, v_tuple, vl); +void test_vssseg4e16_v_u16m1x4(uint16_t *rs1, ptrdiff_t rs2, vuint16m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e16(rs1, rs2, vs3, vl); } -void test_vssseg4e16_v_u16m2x4(uint16_t *base, ptrdiff_t bstride, vuint16m2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16(base, bstride, v_tuple, vl); +void test_vssseg4e16_v_u16m2x4(uint16_t *rs1, ptrdiff_t rs2, vuint16m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e16(rs1, rs2, vs3, vl); } -void test_vssseg4e16_v_f16mf4x4_m(vbool64_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf4x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16(mask, base, bstride, v_tuple, vl); +void test_vssseg4e16_v_f16mf4x4_m(vbool64_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vssseg4e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e16_v_f16mf2x4_m(vbool32_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16(mask, base, bstride, v_tuple, vl); +void test_vssseg4e16_v_f16mf2x4_m(vbool32_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vssseg4e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e16_v_f16m1x4_m(vbool16_t mask, float16_t *base, ptrdiff_t bstride, vfloat16m1x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16(mask, base, bstride, v_tuple, vl); +void test_vssseg4e16_v_f16m1x4_m(vbool16_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e16_v_f16m2x4_m(vbool8_t mask, float16_t *base, ptrdiff_t bstride, vfloat16m2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16(mask, base, bstride, v_tuple, vl); +void test_vssseg4e16_v_f16m2x4_m(vbool8_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e16_v_i16mf4x4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16(mask, base, bstride, v_tuple, vl); +void test_vssseg4e16_v_i16mf4x4_m(vbool64_t vm, int16_t *rs1, ptrdiff_t rs2, vint16mf4x4_t vs3, size_t vl) { + return __riscv_vssseg4e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e16_v_i16mf2x4_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16(mask, base, bstride, v_tuple, vl); +void test_vssseg4e16_v_i16mf2x4_m(vbool32_t vm, int16_t *rs1, ptrdiff_t rs2, vint16mf2x4_t vs3, size_t vl) { + return __riscv_vssseg4e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e16_v_i16m1x4_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16(mask, base, bstride, v_tuple, vl); +void test_vssseg4e16_v_i16m1x4_m(vbool16_t vm, int16_t *rs1, ptrdiff_t rs2, vint16m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e16_v_i16m2x4_m(vbool8_t mask, int16_t *base, ptrdiff_t bstride, vint16m2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16(mask, base, bstride, v_tuple, vl); +void test_vssseg4e16_v_i16m2x4_m(vbool8_t vm, int16_t *rs1, ptrdiff_t rs2, vint16m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e16_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16(mask, base, bstride, v_tuple, vl); +void test_vssseg4e16_v_u16mf4x4_m(vbool64_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16mf4x4_t vs3, size_t vl) { + return __riscv_vssseg4e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e16_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16(mask, base, bstride, v_tuple, vl); +void test_vssseg4e16_v_u16mf2x4_m(vbool32_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16mf2x4_t vs3, size_t vl) { + return __riscv_vssseg4e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e16_v_u16m1x4_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16(mask, base, bstride, v_tuple, vl); +void test_vssseg4e16_v_u16m1x4_m(vbool16_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e16_v_u16m2x4_m(vbool8_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e16(mask, base, bstride, v_tuple, vl); +void test_vssseg4e16_v_u16m2x4_m(vbool8_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e16(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg4e16\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vssseg4e32.c b/auto-generated/gnu-overloaded-tests/vssseg4e32.c index db03c8eae..8cdc61a39 100644 --- a/auto-generated/gnu-overloaded-tests/vssseg4e32.c +++ b/auto-generated/gnu-overloaded-tests/vssseg4e32.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg4e32_v_f32mf2x4(float32_t *base, ptrdiff_t bstride, vfloat32mf2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e32(base, bstride, v_tuple, vl); +void test_vssseg4e32_v_f32mf2x4(float32_t *rs1, ptrdiff_t rs2, vfloat32mf2x4_t vs3, size_t vl) { + return __riscv_vssseg4e32(rs1, rs2, vs3, vl); } -void test_vssseg4e32_v_f32m1x4(float32_t *base, ptrdiff_t bstride, vfloat32m1x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e32(base, bstride, v_tuple, vl); +void test_vssseg4e32_v_f32m1x4(float32_t *rs1, ptrdiff_t rs2, vfloat32m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e32(rs1, rs2, vs3, vl); } -void test_vssseg4e32_v_f32m2x4(float32_t *base, ptrdiff_t bstride, vfloat32m2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e32(base, bstride, v_tuple, vl); +void test_vssseg4e32_v_f32m2x4(float32_t *rs1, ptrdiff_t rs2, vfloat32m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e32(rs1, rs2, vs3, vl); } -void test_vssseg4e32_v_i32mf2x4(int32_t *base, ptrdiff_t bstride, vint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e32(base, bstride, v_tuple, vl); +void test_vssseg4e32_v_i32mf2x4(int32_t *rs1, ptrdiff_t rs2, vint32mf2x4_t vs3, size_t vl) { + return __riscv_vssseg4e32(rs1, rs2, vs3, vl); } -void test_vssseg4e32_v_i32m1x4(int32_t *base, ptrdiff_t bstride, vint32m1x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e32(base, bstride, v_tuple, vl); +void test_vssseg4e32_v_i32m1x4(int32_t *rs1, ptrdiff_t rs2, vint32m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e32(rs1, rs2, vs3, vl); } -void test_vssseg4e32_v_i32m2x4(int32_t *base, ptrdiff_t bstride, vint32m2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e32(base, bstride, v_tuple, vl); +void test_vssseg4e32_v_i32m2x4(int32_t *rs1, ptrdiff_t rs2, vint32m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e32(rs1, rs2, vs3, vl); } -void test_vssseg4e32_v_u32mf2x4(uint32_t *base, ptrdiff_t bstride, vuint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e32(base, bstride, v_tuple, vl); +void test_vssseg4e32_v_u32mf2x4(uint32_t *rs1, ptrdiff_t rs2, vuint32mf2x4_t vs3, size_t vl) { + return __riscv_vssseg4e32(rs1, rs2, vs3, vl); } -void test_vssseg4e32_v_u32m1x4(uint32_t *base, ptrdiff_t bstride, vuint32m1x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e32(base, bstride, v_tuple, vl); +void test_vssseg4e32_v_u32m1x4(uint32_t *rs1, ptrdiff_t rs2, vuint32m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e32(rs1, rs2, vs3, vl); } -void test_vssseg4e32_v_u32m2x4(uint32_t *base, ptrdiff_t bstride, vuint32m2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e32(base, bstride, v_tuple, vl); +void test_vssseg4e32_v_u32m2x4(uint32_t *rs1, ptrdiff_t rs2, vuint32m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e32(rs1, rs2, vs3, vl); } -void test_vssseg4e32_v_f32mf2x4_m(vbool64_t mask, float32_t *base, ptrdiff_t bstride, vfloat32mf2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e32(mask, base, bstride, v_tuple, vl); +void test_vssseg4e32_v_f32mf2x4_m(vbool64_t vm, float32_t *rs1, ptrdiff_t rs2, vfloat32mf2x4_t vs3, size_t vl) { + return __riscv_vssseg4e32(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e32_v_f32m1x4_m(vbool32_t mask, float32_t *base, ptrdiff_t bstride, vfloat32m1x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e32(mask, base, bstride, v_tuple, vl); +void test_vssseg4e32_v_f32m1x4_m(vbool32_t vm, float32_t *rs1, ptrdiff_t rs2, vfloat32m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e32(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e32_v_f32m2x4_m(vbool16_t mask, float32_t *base, ptrdiff_t bstride, vfloat32m2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e32(mask, base, bstride, v_tuple, vl); +void test_vssseg4e32_v_f32m2x4_m(vbool16_t vm, float32_t *rs1, ptrdiff_t rs2, vfloat32m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e32(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e32_v_i32mf2x4_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e32(mask, base, bstride, v_tuple, vl); +void test_vssseg4e32_v_i32mf2x4_m(vbool64_t vm, int32_t *rs1, ptrdiff_t rs2, vint32mf2x4_t vs3, size_t vl) { + return __riscv_vssseg4e32(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e32_v_i32m1x4_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e32(mask, base, bstride, v_tuple, vl); +void test_vssseg4e32_v_i32m1x4_m(vbool32_t vm, int32_t *rs1, ptrdiff_t rs2, vint32m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e32(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e32_v_i32m2x4_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride, vint32m2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e32(mask, base, bstride, v_tuple, vl); +void test_vssseg4e32_v_i32m2x4_m(vbool16_t vm, int32_t *rs1, ptrdiff_t rs2, vint32m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e32(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e32_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e32(mask, base, bstride, v_tuple, vl); +void test_vssseg4e32_v_u32mf2x4_m(vbool64_t vm, uint32_t *rs1, ptrdiff_t rs2, vuint32mf2x4_t vs3, size_t vl) { + return __riscv_vssseg4e32(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e32_v_u32m1x4_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e32(mask, base, bstride, v_tuple, vl); +void test_vssseg4e32_v_u32m1x4_m(vbool32_t vm, uint32_t *rs1, ptrdiff_t rs2, vuint32m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e32(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e32_v_u32m2x4_m(vbool16_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e32(mask, base, bstride, v_tuple, vl); +void test_vssseg4e32_v_u32m2x4_m(vbool16_t vm, uint32_t *rs1, ptrdiff_t rs2, vuint32m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e32(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg4e32\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vssseg4e64.c b/auto-generated/gnu-overloaded-tests/vssseg4e64.c index c70bdeb22..aaacb26a7 100644 --- a/auto-generated/gnu-overloaded-tests/vssseg4e64.c +++ b/auto-generated/gnu-overloaded-tests/vssseg4e64.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg4e64_v_f64m1x4(float64_t *base, ptrdiff_t bstride, vfloat64m1x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e64(base, bstride, v_tuple, vl); +void test_vssseg4e64_v_f64m1x4(float64_t *rs1, ptrdiff_t rs2, vfloat64m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e64(rs1, rs2, vs3, vl); } -void test_vssseg4e64_v_f64m2x4(float64_t *base, ptrdiff_t bstride, vfloat64m2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e64(base, bstride, v_tuple, vl); +void test_vssseg4e64_v_f64m2x4(float64_t *rs1, ptrdiff_t rs2, vfloat64m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e64(rs1, rs2, vs3, vl); } -void test_vssseg4e64_v_i64m1x4(int64_t *base, ptrdiff_t bstride, vint64m1x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e64(base, bstride, v_tuple, vl); +void test_vssseg4e64_v_i64m1x4(int64_t *rs1, ptrdiff_t rs2, vint64m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e64(rs1, rs2, vs3, vl); } -void test_vssseg4e64_v_i64m2x4(int64_t *base, ptrdiff_t bstride, vint64m2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e64(base, bstride, v_tuple, vl); +void test_vssseg4e64_v_i64m2x4(int64_t *rs1, ptrdiff_t rs2, vint64m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e64(rs1, rs2, vs3, vl); } -void test_vssseg4e64_v_u64m1x4(uint64_t *base, ptrdiff_t bstride, vuint64m1x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e64(base, bstride, v_tuple, vl); +void test_vssseg4e64_v_u64m1x4(uint64_t *rs1, ptrdiff_t rs2, vuint64m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e64(rs1, rs2, vs3, vl); } -void test_vssseg4e64_v_u64m2x4(uint64_t *base, ptrdiff_t bstride, vuint64m2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e64(base, bstride, v_tuple, vl); +void test_vssseg4e64_v_u64m2x4(uint64_t *rs1, ptrdiff_t rs2, vuint64m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e64(rs1, rs2, vs3, vl); } -void test_vssseg4e64_v_f64m1x4_m(vbool64_t mask, float64_t *base, ptrdiff_t bstride, vfloat64m1x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e64(mask, base, bstride, v_tuple, vl); +void test_vssseg4e64_v_f64m1x4_m(vbool64_t vm, float64_t *rs1, ptrdiff_t rs2, vfloat64m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e64(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e64_v_f64m2x4_m(vbool32_t mask, float64_t *base, ptrdiff_t bstride, vfloat64m2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e64(mask, base, bstride, v_tuple, vl); +void test_vssseg4e64_v_f64m2x4_m(vbool32_t vm, float64_t *rs1, ptrdiff_t rs2, vfloat64m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e64(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e64_v_i64m1x4_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e64(mask, base, bstride, v_tuple, vl); +void test_vssseg4e64_v_i64m1x4_m(vbool64_t vm, int64_t *rs1, ptrdiff_t rs2, vint64m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e64(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e64_v_i64m2x4_m(vbool32_t mask, int64_t *base, ptrdiff_t bstride, vint64m2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e64(mask, base, bstride, v_tuple, vl); +void test_vssseg4e64_v_i64m2x4_m(vbool32_t vm, int64_t *rs1, ptrdiff_t rs2, vint64m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e64(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e64_v_u64m1x4_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e64(mask, base, bstride, v_tuple, vl); +void test_vssseg4e64_v_u64m1x4_m(vbool64_t vm, uint64_t *rs1, ptrdiff_t rs2, vuint64m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e64(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e64_v_u64m2x4_m(vbool32_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e64(mask, base, bstride, v_tuple, vl); +void test_vssseg4e64_v_u64m2x4_m(vbool32_t vm, uint64_t *rs1, ptrdiff_t rs2, vuint64m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e64(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg4e64\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vssseg4e8.c b/auto-generated/gnu-overloaded-tests/vssseg4e8.c index 4808ed5d0..3e6b8be6b 100644 --- a/auto-generated/gnu-overloaded-tests/vssseg4e8.c +++ b/auto-generated/gnu-overloaded-tests/vssseg4e8.c @@ -6,84 +6,84 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg4e8_v_i8mf8x4(int8_t *base, ptrdiff_t bstride, vint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e8(base, bstride, v_tuple, vl); +void test_vssseg4e8_v_i8mf8x4(int8_t *rs1, ptrdiff_t rs2, vint8mf8x4_t vs3, size_t vl) { + return __riscv_vssseg4e8(rs1, rs2, vs3, vl); } -void test_vssseg4e8_v_i8mf4x4(int8_t *base, ptrdiff_t bstride, vint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e8(base, bstride, v_tuple, vl); +void test_vssseg4e8_v_i8mf4x4(int8_t *rs1, ptrdiff_t rs2, vint8mf4x4_t vs3, size_t vl) { + return __riscv_vssseg4e8(rs1, rs2, vs3, vl); } -void test_vssseg4e8_v_i8mf2x4(int8_t *base, ptrdiff_t bstride, vint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e8(base, bstride, v_tuple, vl); +void test_vssseg4e8_v_i8mf2x4(int8_t *rs1, ptrdiff_t rs2, vint8mf2x4_t vs3, size_t vl) { + return __riscv_vssseg4e8(rs1, rs2, vs3, vl); } -void test_vssseg4e8_v_i8m1x4(int8_t *base, ptrdiff_t bstride, vint8m1x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e8(base, bstride, v_tuple, vl); +void test_vssseg4e8_v_i8m1x4(int8_t *rs1, ptrdiff_t rs2, vint8m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e8(rs1, rs2, vs3, vl); } -void test_vssseg4e8_v_i8m2x4(int8_t *base, ptrdiff_t bstride, vint8m2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e8(base, bstride, v_tuple, vl); +void test_vssseg4e8_v_i8m2x4(int8_t *rs1, ptrdiff_t rs2, vint8m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e8(rs1, rs2, vs3, vl); } -void test_vssseg4e8_v_u8mf8x4(uint8_t *base, ptrdiff_t bstride, vuint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e8(base, bstride, v_tuple, vl); +void test_vssseg4e8_v_u8mf8x4(uint8_t *rs1, ptrdiff_t rs2, vuint8mf8x4_t vs3, size_t vl) { + return __riscv_vssseg4e8(rs1, rs2, vs3, vl); } -void test_vssseg4e8_v_u8mf4x4(uint8_t *base, ptrdiff_t bstride, vuint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e8(base, bstride, v_tuple, vl); +void test_vssseg4e8_v_u8mf4x4(uint8_t *rs1, ptrdiff_t rs2, vuint8mf4x4_t vs3, size_t vl) { + return __riscv_vssseg4e8(rs1, rs2, vs3, vl); } -void test_vssseg4e8_v_u8mf2x4(uint8_t *base, ptrdiff_t bstride, vuint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e8(base, bstride, v_tuple, vl); +void test_vssseg4e8_v_u8mf2x4(uint8_t *rs1, ptrdiff_t rs2, vuint8mf2x4_t vs3, size_t vl) { + return __riscv_vssseg4e8(rs1, rs2, vs3, vl); } -void test_vssseg4e8_v_u8m1x4(uint8_t *base, ptrdiff_t bstride, vuint8m1x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e8(base, bstride, v_tuple, vl); +void test_vssseg4e8_v_u8m1x4(uint8_t *rs1, ptrdiff_t rs2, vuint8m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e8(rs1, rs2, vs3, vl); } -void test_vssseg4e8_v_u8m2x4(uint8_t *base, ptrdiff_t bstride, vuint8m2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e8(base, bstride, v_tuple, vl); +void test_vssseg4e8_v_u8m2x4(uint8_t *rs1, ptrdiff_t rs2, vuint8m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e8(rs1, rs2, vs3, vl); } -void test_vssseg4e8_v_i8mf8x4_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e8(mask, base, bstride, v_tuple, vl); +void test_vssseg4e8_v_i8mf8x4_m(vbool64_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf8x4_t vs3, size_t vl) { + return __riscv_vssseg4e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e8_v_i8mf4x4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e8(mask, base, bstride, v_tuple, vl); +void test_vssseg4e8_v_i8mf4x4_m(vbool32_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf4x4_t vs3, size_t vl) { + return __riscv_vssseg4e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e8_v_i8mf2x4_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e8(mask, base, bstride, v_tuple, vl); +void test_vssseg4e8_v_i8mf2x4_m(vbool16_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf2x4_t vs3, size_t vl) { + return __riscv_vssseg4e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e8_v_i8m1x4_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e8(mask, base, bstride, v_tuple, vl); +void test_vssseg4e8_v_i8m1x4_m(vbool8_t vm, int8_t *rs1, ptrdiff_t rs2, vint8m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e8_v_i8m2x4_m(vbool4_t mask, int8_t *base, ptrdiff_t bstride, vint8m2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e8(mask, base, bstride, v_tuple, vl); +void test_vssseg4e8_v_i8m2x4_m(vbool4_t vm, int8_t *rs1, ptrdiff_t rs2, vint8m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e8_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e8(mask, base, bstride, v_tuple, vl); +void test_vssseg4e8_v_u8mf8x4_m(vbool64_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf8x4_t vs3, size_t vl) { + return __riscv_vssseg4e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e8_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e8(mask, base, bstride, v_tuple, vl); +void test_vssseg4e8_v_u8mf4x4_m(vbool32_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf4x4_t vs3, size_t vl) { + return __riscv_vssseg4e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e8_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e8(mask, base, bstride, v_tuple, vl); +void test_vssseg4e8_v_u8mf2x4_m(vbool16_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf2x4_t vs3, size_t vl) { + return __riscv_vssseg4e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e8_v_u8m1x4_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e8(mask, base, bstride, v_tuple, vl); +void test_vssseg4e8_v_u8m1x4_m(vbool8_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg4e8_v_u8m2x4_m(vbool4_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m2x4_t v_tuple, size_t vl) { - return __riscv_vssseg4e8(mask, base, bstride, v_tuple, vl); +void test_vssseg4e8_v_u8m2x4_m(vbool4_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e8(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg4e8\.[ivxfswum.]+\s+} 20 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vssseg5e16.c b/auto-generated/gnu-overloaded-tests/vssseg5e16.c index c975dea36..dee9e386a 100644 --- a/auto-generated/gnu-overloaded-tests/vssseg5e16.c +++ b/auto-generated/gnu-overloaded-tests/vssseg5e16.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg5e16_v_f16mf4x5(float16_t *base, ptrdiff_t bstride, vfloat16mf4x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e16(base, bstride, v_tuple, vl); +void test_vssseg5e16_v_f16mf4x5(float16_t *rs1, ptrdiff_t rs2, vfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vssseg5e16(rs1, rs2, vs3, vl); } -void test_vssseg5e16_v_f16mf2x5(float16_t *base, ptrdiff_t bstride, vfloat16mf2x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e16(base, bstride, v_tuple, vl); +void test_vssseg5e16_v_f16mf2x5(float16_t *rs1, ptrdiff_t rs2, vfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vssseg5e16(rs1, rs2, vs3, vl); } -void test_vssseg5e16_v_f16m1x5(float16_t *base, ptrdiff_t bstride, vfloat16m1x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e16(base, bstride, v_tuple, vl); +void test_vssseg5e16_v_f16m1x5(float16_t *rs1, ptrdiff_t rs2, vfloat16m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e16(rs1, rs2, vs3, vl); } -void test_vssseg5e16_v_i16mf4x5(int16_t *base, ptrdiff_t bstride, vint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e16(base, bstride, v_tuple, vl); +void test_vssseg5e16_v_i16mf4x5(int16_t *rs1, ptrdiff_t rs2, vint16mf4x5_t vs3, size_t vl) { + return __riscv_vssseg5e16(rs1, rs2, vs3, vl); } -void test_vssseg5e16_v_i16mf2x5(int16_t *base, ptrdiff_t bstride, vint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e16(base, bstride, v_tuple, vl); +void test_vssseg5e16_v_i16mf2x5(int16_t *rs1, ptrdiff_t rs2, vint16mf2x5_t vs3, size_t vl) { + return __riscv_vssseg5e16(rs1, rs2, vs3, vl); } -void test_vssseg5e16_v_i16m1x5(int16_t *base, ptrdiff_t bstride, vint16m1x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e16(base, bstride, v_tuple, vl); +void test_vssseg5e16_v_i16m1x5(int16_t *rs1, ptrdiff_t rs2, vint16m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e16(rs1, rs2, vs3, vl); } -void test_vssseg5e16_v_u16mf4x5(uint16_t *base, ptrdiff_t bstride, vuint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e16(base, bstride, v_tuple, vl); +void test_vssseg5e16_v_u16mf4x5(uint16_t *rs1, ptrdiff_t rs2, vuint16mf4x5_t vs3, size_t vl) { + return __riscv_vssseg5e16(rs1, rs2, vs3, vl); } -void test_vssseg5e16_v_u16mf2x5(uint16_t *base, ptrdiff_t bstride, vuint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e16(base, bstride, v_tuple, vl); +void test_vssseg5e16_v_u16mf2x5(uint16_t *rs1, ptrdiff_t rs2, vuint16mf2x5_t vs3, size_t vl) { + return __riscv_vssseg5e16(rs1, rs2, vs3, vl); } -void test_vssseg5e16_v_u16m1x5(uint16_t *base, ptrdiff_t bstride, vuint16m1x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e16(base, bstride, v_tuple, vl); +void test_vssseg5e16_v_u16m1x5(uint16_t *rs1, ptrdiff_t rs2, vuint16m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e16(rs1, rs2, vs3, vl); } -void test_vssseg5e16_v_f16mf4x5_m(vbool64_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf4x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e16(mask, base, bstride, v_tuple, vl); +void test_vssseg5e16_v_f16mf4x5_m(vbool64_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vssseg5e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg5e16_v_f16mf2x5_m(vbool32_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf2x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e16(mask, base, bstride, v_tuple, vl); +void test_vssseg5e16_v_f16mf2x5_m(vbool32_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vssseg5e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg5e16_v_f16m1x5_m(vbool16_t mask, float16_t *base, ptrdiff_t bstride, vfloat16m1x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e16(mask, base, bstride, v_tuple, vl); +void test_vssseg5e16_v_f16m1x5_m(vbool16_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg5e16_v_i16mf4x5_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e16(mask, base, bstride, v_tuple, vl); +void test_vssseg5e16_v_i16mf4x5_m(vbool64_t vm, int16_t *rs1, ptrdiff_t rs2, vint16mf4x5_t vs3, size_t vl) { + return __riscv_vssseg5e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg5e16_v_i16mf2x5_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e16(mask, base, bstride, v_tuple, vl); +void test_vssseg5e16_v_i16mf2x5_m(vbool32_t vm, int16_t *rs1, ptrdiff_t rs2, vint16mf2x5_t vs3, size_t vl) { + return __riscv_vssseg5e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg5e16_v_i16m1x5_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e16(mask, base, bstride, v_tuple, vl); +void test_vssseg5e16_v_i16m1x5_m(vbool16_t vm, int16_t *rs1, ptrdiff_t rs2, vint16m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg5e16_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e16(mask, base, bstride, v_tuple, vl); +void test_vssseg5e16_v_u16mf4x5_m(vbool64_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16mf4x5_t vs3, size_t vl) { + return __riscv_vssseg5e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg5e16_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e16(mask, base, bstride, v_tuple, vl); +void test_vssseg5e16_v_u16mf2x5_m(vbool32_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16mf2x5_t vs3, size_t vl) { + return __riscv_vssseg5e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg5e16_v_u16m1x5_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e16(mask, base, bstride, v_tuple, vl); +void test_vssseg5e16_v_u16m1x5_m(vbool16_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e16(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg5e16\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vssseg5e32.c b/auto-generated/gnu-overloaded-tests/vssseg5e32.c index 2b5c6cc43..dfdb55305 100644 --- a/auto-generated/gnu-overloaded-tests/vssseg5e32.c +++ b/auto-generated/gnu-overloaded-tests/vssseg5e32.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg5e32_v_f32mf2x5(float32_t *base, ptrdiff_t bstride, vfloat32mf2x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e32(base, bstride, v_tuple, vl); +void test_vssseg5e32_v_f32mf2x5(float32_t *rs1, ptrdiff_t rs2, vfloat32mf2x5_t vs3, size_t vl) { + return __riscv_vssseg5e32(rs1, rs2, vs3, vl); } -void test_vssseg5e32_v_f32m1x5(float32_t *base, ptrdiff_t bstride, vfloat32m1x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e32(base, bstride, v_tuple, vl); +void test_vssseg5e32_v_f32m1x5(float32_t *rs1, ptrdiff_t rs2, vfloat32m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e32(rs1, rs2, vs3, vl); } -void test_vssseg5e32_v_i32mf2x5(int32_t *base, ptrdiff_t bstride, vint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e32(base, bstride, v_tuple, vl); +void test_vssseg5e32_v_i32mf2x5(int32_t *rs1, ptrdiff_t rs2, vint32mf2x5_t vs3, size_t vl) { + return __riscv_vssseg5e32(rs1, rs2, vs3, vl); } -void test_vssseg5e32_v_i32m1x5(int32_t *base, ptrdiff_t bstride, vint32m1x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e32(base, bstride, v_tuple, vl); +void test_vssseg5e32_v_i32m1x5(int32_t *rs1, ptrdiff_t rs2, vint32m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e32(rs1, rs2, vs3, vl); } -void test_vssseg5e32_v_u32mf2x5(uint32_t *base, ptrdiff_t bstride, vuint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e32(base, bstride, v_tuple, vl); +void test_vssseg5e32_v_u32mf2x5(uint32_t *rs1, ptrdiff_t rs2, vuint32mf2x5_t vs3, size_t vl) { + return __riscv_vssseg5e32(rs1, rs2, vs3, vl); } -void test_vssseg5e32_v_u32m1x5(uint32_t *base, ptrdiff_t bstride, vuint32m1x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e32(base, bstride, v_tuple, vl); +void test_vssseg5e32_v_u32m1x5(uint32_t *rs1, ptrdiff_t rs2, vuint32m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e32(rs1, rs2, vs3, vl); } -void test_vssseg5e32_v_f32mf2x5_m(vbool64_t mask, float32_t *base, ptrdiff_t bstride, vfloat32mf2x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e32(mask, base, bstride, v_tuple, vl); +void test_vssseg5e32_v_f32mf2x5_m(vbool64_t vm, float32_t *rs1, ptrdiff_t rs2, vfloat32mf2x5_t vs3, size_t vl) { + return __riscv_vssseg5e32(vm, rs1, rs2, vs3, vl); } -void test_vssseg5e32_v_f32m1x5_m(vbool32_t mask, float32_t *base, ptrdiff_t bstride, vfloat32m1x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e32(mask, base, bstride, v_tuple, vl); +void test_vssseg5e32_v_f32m1x5_m(vbool32_t vm, float32_t *rs1, ptrdiff_t rs2, vfloat32m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e32(vm, rs1, rs2, vs3, vl); } -void test_vssseg5e32_v_i32mf2x5_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e32(mask, base, bstride, v_tuple, vl); +void test_vssseg5e32_v_i32mf2x5_m(vbool64_t vm, int32_t *rs1, ptrdiff_t rs2, vint32mf2x5_t vs3, size_t vl) { + return __riscv_vssseg5e32(vm, rs1, rs2, vs3, vl); } -void test_vssseg5e32_v_i32m1x5_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e32(mask, base, bstride, v_tuple, vl); +void test_vssseg5e32_v_i32m1x5_m(vbool32_t vm, int32_t *rs1, ptrdiff_t rs2, vint32m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e32(vm, rs1, rs2, vs3, vl); } -void test_vssseg5e32_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e32(mask, base, bstride, v_tuple, vl); +void test_vssseg5e32_v_u32mf2x5_m(vbool64_t vm, uint32_t *rs1, ptrdiff_t rs2, vuint32mf2x5_t vs3, size_t vl) { + return __riscv_vssseg5e32(vm, rs1, rs2, vs3, vl); } -void test_vssseg5e32_v_u32m1x5_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e32(mask, base, bstride, v_tuple, vl); +void test_vssseg5e32_v_u32m1x5_m(vbool32_t vm, uint32_t *rs1, ptrdiff_t rs2, vuint32m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e32(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg5e32\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vssseg5e64.c b/auto-generated/gnu-overloaded-tests/vssseg5e64.c index 23ea20f20..ace8a37fa 100644 --- a/auto-generated/gnu-overloaded-tests/vssseg5e64.c +++ b/auto-generated/gnu-overloaded-tests/vssseg5e64.c @@ -6,28 +6,28 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg5e64_v_f64m1x5(float64_t *base, ptrdiff_t bstride, vfloat64m1x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e64(base, bstride, v_tuple, vl); +void test_vssseg5e64_v_f64m1x5(float64_t *rs1, ptrdiff_t rs2, vfloat64m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e64(rs1, rs2, vs3, vl); } -void test_vssseg5e64_v_i64m1x5(int64_t *base, ptrdiff_t bstride, vint64m1x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e64(base, bstride, v_tuple, vl); +void test_vssseg5e64_v_i64m1x5(int64_t *rs1, ptrdiff_t rs2, vint64m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e64(rs1, rs2, vs3, vl); } -void test_vssseg5e64_v_u64m1x5(uint64_t *base, ptrdiff_t bstride, vuint64m1x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e64(base, bstride, v_tuple, vl); +void test_vssseg5e64_v_u64m1x5(uint64_t *rs1, ptrdiff_t rs2, vuint64m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e64(rs1, rs2, vs3, vl); } -void test_vssseg5e64_v_f64m1x5_m(vbool64_t mask, float64_t *base, ptrdiff_t bstride, vfloat64m1x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e64(mask, base, bstride, v_tuple, vl); +void test_vssseg5e64_v_f64m1x5_m(vbool64_t vm, float64_t *rs1, ptrdiff_t rs2, vfloat64m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e64(vm, rs1, rs2, vs3, vl); } -void test_vssseg5e64_v_i64m1x5_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e64(mask, base, bstride, v_tuple, vl); +void test_vssseg5e64_v_i64m1x5_m(vbool64_t vm, int64_t *rs1, ptrdiff_t rs2, vint64m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e64(vm, rs1, rs2, vs3, vl); } -void test_vssseg5e64_v_u64m1x5_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e64(mask, base, bstride, v_tuple, vl); +void test_vssseg5e64_v_u64m1x5_m(vbool64_t vm, uint64_t *rs1, ptrdiff_t rs2, vuint64m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e64(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg5e64\.[ivxfswum.]+\s+} 6 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vssseg5e8.c b/auto-generated/gnu-overloaded-tests/vssseg5e8.c index 2100f6655..631192f78 100644 --- a/auto-generated/gnu-overloaded-tests/vssseg5e8.c +++ b/auto-generated/gnu-overloaded-tests/vssseg5e8.c @@ -6,68 +6,68 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg5e8_v_i8mf8x5(int8_t *base, ptrdiff_t bstride, vint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e8(base, bstride, v_tuple, vl); +void test_vssseg5e8_v_i8mf8x5(int8_t *rs1, ptrdiff_t rs2, vint8mf8x5_t vs3, size_t vl) { + return __riscv_vssseg5e8(rs1, rs2, vs3, vl); } -void test_vssseg5e8_v_i8mf4x5(int8_t *base, ptrdiff_t bstride, vint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e8(base, bstride, v_tuple, vl); +void test_vssseg5e8_v_i8mf4x5(int8_t *rs1, ptrdiff_t rs2, vint8mf4x5_t vs3, size_t vl) { + return __riscv_vssseg5e8(rs1, rs2, vs3, vl); } -void test_vssseg5e8_v_i8mf2x5(int8_t *base, ptrdiff_t bstride, vint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e8(base, bstride, v_tuple, vl); +void test_vssseg5e8_v_i8mf2x5(int8_t *rs1, ptrdiff_t rs2, vint8mf2x5_t vs3, size_t vl) { + return __riscv_vssseg5e8(rs1, rs2, vs3, vl); } -void test_vssseg5e8_v_i8m1x5(int8_t *base, ptrdiff_t bstride, vint8m1x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e8(base, bstride, v_tuple, vl); +void test_vssseg5e8_v_i8m1x5(int8_t *rs1, ptrdiff_t rs2, vint8m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e8(rs1, rs2, vs3, vl); } -void test_vssseg5e8_v_u8mf8x5(uint8_t *base, ptrdiff_t bstride, vuint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e8(base, bstride, v_tuple, vl); +void test_vssseg5e8_v_u8mf8x5(uint8_t *rs1, ptrdiff_t rs2, vuint8mf8x5_t vs3, size_t vl) { + return __riscv_vssseg5e8(rs1, rs2, vs3, vl); } -void test_vssseg5e8_v_u8mf4x5(uint8_t *base, ptrdiff_t bstride, vuint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e8(base, bstride, v_tuple, vl); +void test_vssseg5e8_v_u8mf4x5(uint8_t *rs1, ptrdiff_t rs2, vuint8mf4x5_t vs3, size_t vl) { + return __riscv_vssseg5e8(rs1, rs2, vs3, vl); } -void test_vssseg5e8_v_u8mf2x5(uint8_t *base, ptrdiff_t bstride, vuint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e8(base, bstride, v_tuple, vl); +void test_vssseg5e8_v_u8mf2x5(uint8_t *rs1, ptrdiff_t rs2, vuint8mf2x5_t vs3, size_t vl) { + return __riscv_vssseg5e8(rs1, rs2, vs3, vl); } -void test_vssseg5e8_v_u8m1x5(uint8_t *base, ptrdiff_t bstride, vuint8m1x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e8(base, bstride, v_tuple, vl); +void test_vssseg5e8_v_u8m1x5(uint8_t *rs1, ptrdiff_t rs2, vuint8m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e8(rs1, rs2, vs3, vl); } -void test_vssseg5e8_v_i8mf8x5_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e8(mask, base, bstride, v_tuple, vl); +void test_vssseg5e8_v_i8mf8x5_m(vbool64_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf8x5_t vs3, size_t vl) { + return __riscv_vssseg5e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg5e8_v_i8mf4x5_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e8(mask, base, bstride, v_tuple, vl); +void test_vssseg5e8_v_i8mf4x5_m(vbool32_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf4x5_t vs3, size_t vl) { + return __riscv_vssseg5e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg5e8_v_i8mf2x5_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e8(mask, base, bstride, v_tuple, vl); +void test_vssseg5e8_v_i8mf2x5_m(vbool16_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf2x5_t vs3, size_t vl) { + return __riscv_vssseg5e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg5e8_v_i8m1x5_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e8(mask, base, bstride, v_tuple, vl); +void test_vssseg5e8_v_i8m1x5_m(vbool8_t vm, int8_t *rs1, ptrdiff_t rs2, vint8m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg5e8_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e8(mask, base, bstride, v_tuple, vl); +void test_vssseg5e8_v_u8mf8x5_m(vbool64_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf8x5_t vs3, size_t vl) { + return __riscv_vssseg5e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg5e8_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e8(mask, base, bstride, v_tuple, vl); +void test_vssseg5e8_v_u8mf4x5_m(vbool32_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf4x5_t vs3, size_t vl) { + return __riscv_vssseg5e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg5e8_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e8(mask, base, bstride, v_tuple, vl); +void test_vssseg5e8_v_u8mf2x5_m(vbool16_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf2x5_t vs3, size_t vl) { + return __riscv_vssseg5e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg5e8_v_u8m1x5_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1x5_t v_tuple, size_t vl) { - return __riscv_vssseg5e8(mask, base, bstride, v_tuple, vl); +void test_vssseg5e8_v_u8m1x5_m(vbool8_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e8(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg5e8\.[ivxfswum.]+\s+} 16 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vssseg6e16.c b/auto-generated/gnu-overloaded-tests/vssseg6e16.c index 658f75ba1..1c84ee895 100644 --- a/auto-generated/gnu-overloaded-tests/vssseg6e16.c +++ b/auto-generated/gnu-overloaded-tests/vssseg6e16.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg6e16_v_f16mf4x6(float16_t *base, ptrdiff_t bstride, vfloat16mf4x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e16(base, bstride, v_tuple, vl); +void test_vssseg6e16_v_f16mf4x6(float16_t *rs1, ptrdiff_t rs2, vfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vssseg6e16(rs1, rs2, vs3, vl); } -void test_vssseg6e16_v_f16mf2x6(float16_t *base, ptrdiff_t bstride, vfloat16mf2x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e16(base, bstride, v_tuple, vl); +void test_vssseg6e16_v_f16mf2x6(float16_t *rs1, ptrdiff_t rs2, vfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vssseg6e16(rs1, rs2, vs3, vl); } -void test_vssseg6e16_v_f16m1x6(float16_t *base, ptrdiff_t bstride, vfloat16m1x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e16(base, bstride, v_tuple, vl); +void test_vssseg6e16_v_f16m1x6(float16_t *rs1, ptrdiff_t rs2, vfloat16m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e16(rs1, rs2, vs3, vl); } -void test_vssseg6e16_v_i16mf4x6(int16_t *base, ptrdiff_t bstride, vint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e16(base, bstride, v_tuple, vl); +void test_vssseg6e16_v_i16mf4x6(int16_t *rs1, ptrdiff_t rs2, vint16mf4x6_t vs3, size_t vl) { + return __riscv_vssseg6e16(rs1, rs2, vs3, vl); } -void test_vssseg6e16_v_i16mf2x6(int16_t *base, ptrdiff_t bstride, vint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e16(base, bstride, v_tuple, vl); +void test_vssseg6e16_v_i16mf2x6(int16_t *rs1, ptrdiff_t rs2, vint16mf2x6_t vs3, size_t vl) { + return __riscv_vssseg6e16(rs1, rs2, vs3, vl); } -void test_vssseg6e16_v_i16m1x6(int16_t *base, ptrdiff_t bstride, vint16m1x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e16(base, bstride, v_tuple, vl); +void test_vssseg6e16_v_i16m1x6(int16_t *rs1, ptrdiff_t rs2, vint16m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e16(rs1, rs2, vs3, vl); } -void test_vssseg6e16_v_u16mf4x6(uint16_t *base, ptrdiff_t bstride, vuint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e16(base, bstride, v_tuple, vl); +void test_vssseg6e16_v_u16mf4x6(uint16_t *rs1, ptrdiff_t rs2, vuint16mf4x6_t vs3, size_t vl) { + return __riscv_vssseg6e16(rs1, rs2, vs3, vl); } -void test_vssseg6e16_v_u16mf2x6(uint16_t *base, ptrdiff_t bstride, vuint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e16(base, bstride, v_tuple, vl); +void test_vssseg6e16_v_u16mf2x6(uint16_t *rs1, ptrdiff_t rs2, vuint16mf2x6_t vs3, size_t vl) { + return __riscv_vssseg6e16(rs1, rs2, vs3, vl); } -void test_vssseg6e16_v_u16m1x6(uint16_t *base, ptrdiff_t bstride, vuint16m1x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e16(base, bstride, v_tuple, vl); +void test_vssseg6e16_v_u16m1x6(uint16_t *rs1, ptrdiff_t rs2, vuint16m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e16(rs1, rs2, vs3, vl); } -void test_vssseg6e16_v_f16mf4x6_m(vbool64_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf4x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e16(mask, base, bstride, v_tuple, vl); +void test_vssseg6e16_v_f16mf4x6_m(vbool64_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vssseg6e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg6e16_v_f16mf2x6_m(vbool32_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf2x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e16(mask, base, bstride, v_tuple, vl); +void test_vssseg6e16_v_f16mf2x6_m(vbool32_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vssseg6e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg6e16_v_f16m1x6_m(vbool16_t mask, float16_t *base, ptrdiff_t bstride, vfloat16m1x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e16(mask, base, bstride, v_tuple, vl); +void test_vssseg6e16_v_f16m1x6_m(vbool16_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg6e16_v_i16mf4x6_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e16(mask, base, bstride, v_tuple, vl); +void test_vssseg6e16_v_i16mf4x6_m(vbool64_t vm, int16_t *rs1, ptrdiff_t rs2, vint16mf4x6_t vs3, size_t vl) { + return __riscv_vssseg6e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg6e16_v_i16mf2x6_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e16(mask, base, bstride, v_tuple, vl); +void test_vssseg6e16_v_i16mf2x6_m(vbool32_t vm, int16_t *rs1, ptrdiff_t rs2, vint16mf2x6_t vs3, size_t vl) { + return __riscv_vssseg6e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg6e16_v_i16m1x6_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e16(mask, base, bstride, v_tuple, vl); +void test_vssseg6e16_v_i16m1x6_m(vbool16_t vm, int16_t *rs1, ptrdiff_t rs2, vint16m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg6e16_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e16(mask, base, bstride, v_tuple, vl); +void test_vssseg6e16_v_u16mf4x6_m(vbool64_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16mf4x6_t vs3, size_t vl) { + return __riscv_vssseg6e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg6e16_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e16(mask, base, bstride, v_tuple, vl); +void test_vssseg6e16_v_u16mf2x6_m(vbool32_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16mf2x6_t vs3, size_t vl) { + return __riscv_vssseg6e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg6e16_v_u16m1x6_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e16(mask, base, bstride, v_tuple, vl); +void test_vssseg6e16_v_u16m1x6_m(vbool16_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e16(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg6e16\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vssseg6e32.c b/auto-generated/gnu-overloaded-tests/vssseg6e32.c index 71eeed7f2..ece85a32c 100644 --- a/auto-generated/gnu-overloaded-tests/vssseg6e32.c +++ b/auto-generated/gnu-overloaded-tests/vssseg6e32.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg6e32_v_f32mf2x6(float32_t *base, ptrdiff_t bstride, vfloat32mf2x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e32(base, bstride, v_tuple, vl); +void test_vssseg6e32_v_f32mf2x6(float32_t *rs1, ptrdiff_t rs2, vfloat32mf2x6_t vs3, size_t vl) { + return __riscv_vssseg6e32(rs1, rs2, vs3, vl); } -void test_vssseg6e32_v_f32m1x6(float32_t *base, ptrdiff_t bstride, vfloat32m1x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e32(base, bstride, v_tuple, vl); +void test_vssseg6e32_v_f32m1x6(float32_t *rs1, ptrdiff_t rs2, vfloat32m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e32(rs1, rs2, vs3, vl); } -void test_vssseg6e32_v_i32mf2x6(int32_t *base, ptrdiff_t bstride, vint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e32(base, bstride, v_tuple, vl); +void test_vssseg6e32_v_i32mf2x6(int32_t *rs1, ptrdiff_t rs2, vint32mf2x6_t vs3, size_t vl) { + return __riscv_vssseg6e32(rs1, rs2, vs3, vl); } -void test_vssseg6e32_v_i32m1x6(int32_t *base, ptrdiff_t bstride, vint32m1x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e32(base, bstride, v_tuple, vl); +void test_vssseg6e32_v_i32m1x6(int32_t *rs1, ptrdiff_t rs2, vint32m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e32(rs1, rs2, vs3, vl); } -void test_vssseg6e32_v_u32mf2x6(uint32_t *base, ptrdiff_t bstride, vuint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e32(base, bstride, v_tuple, vl); +void test_vssseg6e32_v_u32mf2x6(uint32_t *rs1, ptrdiff_t rs2, vuint32mf2x6_t vs3, size_t vl) { + return __riscv_vssseg6e32(rs1, rs2, vs3, vl); } -void test_vssseg6e32_v_u32m1x6(uint32_t *base, ptrdiff_t bstride, vuint32m1x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e32(base, bstride, v_tuple, vl); +void test_vssseg6e32_v_u32m1x6(uint32_t *rs1, ptrdiff_t rs2, vuint32m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e32(rs1, rs2, vs3, vl); } -void test_vssseg6e32_v_f32mf2x6_m(vbool64_t mask, float32_t *base, ptrdiff_t bstride, vfloat32mf2x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e32(mask, base, bstride, v_tuple, vl); +void test_vssseg6e32_v_f32mf2x6_m(vbool64_t vm, float32_t *rs1, ptrdiff_t rs2, vfloat32mf2x6_t vs3, size_t vl) { + return __riscv_vssseg6e32(vm, rs1, rs2, vs3, vl); } -void test_vssseg6e32_v_f32m1x6_m(vbool32_t mask, float32_t *base, ptrdiff_t bstride, vfloat32m1x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e32(mask, base, bstride, v_tuple, vl); +void test_vssseg6e32_v_f32m1x6_m(vbool32_t vm, float32_t *rs1, ptrdiff_t rs2, vfloat32m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e32(vm, rs1, rs2, vs3, vl); } -void test_vssseg6e32_v_i32mf2x6_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e32(mask, base, bstride, v_tuple, vl); +void test_vssseg6e32_v_i32mf2x6_m(vbool64_t vm, int32_t *rs1, ptrdiff_t rs2, vint32mf2x6_t vs3, size_t vl) { + return __riscv_vssseg6e32(vm, rs1, rs2, vs3, vl); } -void test_vssseg6e32_v_i32m1x6_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e32(mask, base, bstride, v_tuple, vl); +void test_vssseg6e32_v_i32m1x6_m(vbool32_t vm, int32_t *rs1, ptrdiff_t rs2, vint32m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e32(vm, rs1, rs2, vs3, vl); } -void test_vssseg6e32_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e32(mask, base, bstride, v_tuple, vl); +void test_vssseg6e32_v_u32mf2x6_m(vbool64_t vm, uint32_t *rs1, ptrdiff_t rs2, vuint32mf2x6_t vs3, size_t vl) { + return __riscv_vssseg6e32(vm, rs1, rs2, vs3, vl); } -void test_vssseg6e32_v_u32m1x6_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e32(mask, base, bstride, v_tuple, vl); +void test_vssseg6e32_v_u32m1x6_m(vbool32_t vm, uint32_t *rs1, ptrdiff_t rs2, vuint32m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e32(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg6e32\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vssseg6e64.c b/auto-generated/gnu-overloaded-tests/vssseg6e64.c index d9268fe28..e79e46c5b 100644 --- a/auto-generated/gnu-overloaded-tests/vssseg6e64.c +++ b/auto-generated/gnu-overloaded-tests/vssseg6e64.c @@ -6,28 +6,28 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg6e64_v_f64m1x6(float64_t *base, ptrdiff_t bstride, vfloat64m1x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e64(base, bstride, v_tuple, vl); +void test_vssseg6e64_v_f64m1x6(float64_t *rs1, ptrdiff_t rs2, vfloat64m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e64(rs1, rs2, vs3, vl); } -void test_vssseg6e64_v_i64m1x6(int64_t *base, ptrdiff_t bstride, vint64m1x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e64(base, bstride, v_tuple, vl); +void test_vssseg6e64_v_i64m1x6(int64_t *rs1, ptrdiff_t rs2, vint64m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e64(rs1, rs2, vs3, vl); } -void test_vssseg6e64_v_u64m1x6(uint64_t *base, ptrdiff_t bstride, vuint64m1x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e64(base, bstride, v_tuple, vl); +void test_vssseg6e64_v_u64m1x6(uint64_t *rs1, ptrdiff_t rs2, vuint64m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e64(rs1, rs2, vs3, vl); } -void test_vssseg6e64_v_f64m1x6_m(vbool64_t mask, float64_t *base, ptrdiff_t bstride, vfloat64m1x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e64(mask, base, bstride, v_tuple, vl); +void test_vssseg6e64_v_f64m1x6_m(vbool64_t vm, float64_t *rs1, ptrdiff_t rs2, vfloat64m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e64(vm, rs1, rs2, vs3, vl); } -void test_vssseg6e64_v_i64m1x6_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e64(mask, base, bstride, v_tuple, vl); +void test_vssseg6e64_v_i64m1x6_m(vbool64_t vm, int64_t *rs1, ptrdiff_t rs2, vint64m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e64(vm, rs1, rs2, vs3, vl); } -void test_vssseg6e64_v_u64m1x6_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e64(mask, base, bstride, v_tuple, vl); +void test_vssseg6e64_v_u64m1x6_m(vbool64_t vm, uint64_t *rs1, ptrdiff_t rs2, vuint64m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e64(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg6e64\.[ivxfswum.]+\s+} 6 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vssseg6e8.c b/auto-generated/gnu-overloaded-tests/vssseg6e8.c index 0dc9ae9f3..fa8aa7bc0 100644 --- a/auto-generated/gnu-overloaded-tests/vssseg6e8.c +++ b/auto-generated/gnu-overloaded-tests/vssseg6e8.c @@ -6,68 +6,68 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg6e8_v_i8mf8x6(int8_t *base, ptrdiff_t bstride, vint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e8(base, bstride, v_tuple, vl); +void test_vssseg6e8_v_i8mf8x6(int8_t *rs1, ptrdiff_t rs2, vint8mf8x6_t vs3, size_t vl) { + return __riscv_vssseg6e8(rs1, rs2, vs3, vl); } -void test_vssseg6e8_v_i8mf4x6(int8_t *base, ptrdiff_t bstride, vint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e8(base, bstride, v_tuple, vl); +void test_vssseg6e8_v_i8mf4x6(int8_t *rs1, ptrdiff_t rs2, vint8mf4x6_t vs3, size_t vl) { + return __riscv_vssseg6e8(rs1, rs2, vs3, vl); } -void test_vssseg6e8_v_i8mf2x6(int8_t *base, ptrdiff_t bstride, vint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e8(base, bstride, v_tuple, vl); +void test_vssseg6e8_v_i8mf2x6(int8_t *rs1, ptrdiff_t rs2, vint8mf2x6_t vs3, size_t vl) { + return __riscv_vssseg6e8(rs1, rs2, vs3, vl); } -void test_vssseg6e8_v_i8m1x6(int8_t *base, ptrdiff_t bstride, vint8m1x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e8(base, bstride, v_tuple, vl); +void test_vssseg6e8_v_i8m1x6(int8_t *rs1, ptrdiff_t rs2, vint8m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e8(rs1, rs2, vs3, vl); } -void test_vssseg6e8_v_u8mf8x6(uint8_t *base, ptrdiff_t bstride, vuint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e8(base, bstride, v_tuple, vl); +void test_vssseg6e8_v_u8mf8x6(uint8_t *rs1, ptrdiff_t rs2, vuint8mf8x6_t vs3, size_t vl) { + return __riscv_vssseg6e8(rs1, rs2, vs3, vl); } -void test_vssseg6e8_v_u8mf4x6(uint8_t *base, ptrdiff_t bstride, vuint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e8(base, bstride, v_tuple, vl); +void test_vssseg6e8_v_u8mf4x6(uint8_t *rs1, ptrdiff_t rs2, vuint8mf4x6_t vs3, size_t vl) { + return __riscv_vssseg6e8(rs1, rs2, vs3, vl); } -void test_vssseg6e8_v_u8mf2x6(uint8_t *base, ptrdiff_t bstride, vuint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e8(base, bstride, v_tuple, vl); +void test_vssseg6e8_v_u8mf2x6(uint8_t *rs1, ptrdiff_t rs2, vuint8mf2x6_t vs3, size_t vl) { + return __riscv_vssseg6e8(rs1, rs2, vs3, vl); } -void test_vssseg6e8_v_u8m1x6(uint8_t *base, ptrdiff_t bstride, vuint8m1x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e8(base, bstride, v_tuple, vl); +void test_vssseg6e8_v_u8m1x6(uint8_t *rs1, ptrdiff_t rs2, vuint8m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e8(rs1, rs2, vs3, vl); } -void test_vssseg6e8_v_i8mf8x6_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e8(mask, base, bstride, v_tuple, vl); +void test_vssseg6e8_v_i8mf8x6_m(vbool64_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf8x6_t vs3, size_t vl) { + return __riscv_vssseg6e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg6e8_v_i8mf4x6_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e8(mask, base, bstride, v_tuple, vl); +void test_vssseg6e8_v_i8mf4x6_m(vbool32_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf4x6_t vs3, size_t vl) { + return __riscv_vssseg6e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg6e8_v_i8mf2x6_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e8(mask, base, bstride, v_tuple, vl); +void test_vssseg6e8_v_i8mf2x6_m(vbool16_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf2x6_t vs3, size_t vl) { + return __riscv_vssseg6e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg6e8_v_i8m1x6_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e8(mask, base, bstride, v_tuple, vl); +void test_vssseg6e8_v_i8m1x6_m(vbool8_t vm, int8_t *rs1, ptrdiff_t rs2, vint8m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg6e8_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e8(mask, base, bstride, v_tuple, vl); +void test_vssseg6e8_v_u8mf8x6_m(vbool64_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf8x6_t vs3, size_t vl) { + return __riscv_vssseg6e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg6e8_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e8(mask, base, bstride, v_tuple, vl); +void test_vssseg6e8_v_u8mf4x6_m(vbool32_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf4x6_t vs3, size_t vl) { + return __riscv_vssseg6e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg6e8_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e8(mask, base, bstride, v_tuple, vl); +void test_vssseg6e8_v_u8mf2x6_m(vbool16_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf2x6_t vs3, size_t vl) { + return __riscv_vssseg6e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg6e8_v_u8m1x6_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1x6_t v_tuple, size_t vl) { - return __riscv_vssseg6e8(mask, base, bstride, v_tuple, vl); +void test_vssseg6e8_v_u8m1x6_m(vbool8_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e8(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg6e8\.[ivxfswum.]+\s+} 16 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vssseg7e16.c b/auto-generated/gnu-overloaded-tests/vssseg7e16.c index 66b80aabc..0e3ff43c8 100644 --- a/auto-generated/gnu-overloaded-tests/vssseg7e16.c +++ b/auto-generated/gnu-overloaded-tests/vssseg7e16.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg7e16_v_f16mf4x7(float16_t *base, ptrdiff_t bstride, vfloat16mf4x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e16(base, bstride, v_tuple, vl); +void test_vssseg7e16_v_f16mf4x7(float16_t *rs1, ptrdiff_t rs2, vfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vssseg7e16(rs1, rs2, vs3, vl); } -void test_vssseg7e16_v_f16mf2x7(float16_t *base, ptrdiff_t bstride, vfloat16mf2x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e16(base, bstride, v_tuple, vl); +void test_vssseg7e16_v_f16mf2x7(float16_t *rs1, ptrdiff_t rs2, vfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vssseg7e16(rs1, rs2, vs3, vl); } -void test_vssseg7e16_v_f16m1x7(float16_t *base, ptrdiff_t bstride, vfloat16m1x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e16(base, bstride, v_tuple, vl); +void test_vssseg7e16_v_f16m1x7(float16_t *rs1, ptrdiff_t rs2, vfloat16m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e16(rs1, rs2, vs3, vl); } -void test_vssseg7e16_v_i16mf4x7(int16_t *base, ptrdiff_t bstride, vint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e16(base, bstride, v_tuple, vl); +void test_vssseg7e16_v_i16mf4x7(int16_t *rs1, ptrdiff_t rs2, vint16mf4x7_t vs3, size_t vl) { + return __riscv_vssseg7e16(rs1, rs2, vs3, vl); } -void test_vssseg7e16_v_i16mf2x7(int16_t *base, ptrdiff_t bstride, vint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e16(base, bstride, v_tuple, vl); +void test_vssseg7e16_v_i16mf2x7(int16_t *rs1, ptrdiff_t rs2, vint16mf2x7_t vs3, size_t vl) { + return __riscv_vssseg7e16(rs1, rs2, vs3, vl); } -void test_vssseg7e16_v_i16m1x7(int16_t *base, ptrdiff_t bstride, vint16m1x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e16(base, bstride, v_tuple, vl); +void test_vssseg7e16_v_i16m1x7(int16_t *rs1, ptrdiff_t rs2, vint16m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e16(rs1, rs2, vs3, vl); } -void test_vssseg7e16_v_u16mf4x7(uint16_t *base, ptrdiff_t bstride, vuint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e16(base, bstride, v_tuple, vl); +void test_vssseg7e16_v_u16mf4x7(uint16_t *rs1, ptrdiff_t rs2, vuint16mf4x7_t vs3, size_t vl) { + return __riscv_vssseg7e16(rs1, rs2, vs3, vl); } -void test_vssseg7e16_v_u16mf2x7(uint16_t *base, ptrdiff_t bstride, vuint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e16(base, bstride, v_tuple, vl); +void test_vssseg7e16_v_u16mf2x7(uint16_t *rs1, ptrdiff_t rs2, vuint16mf2x7_t vs3, size_t vl) { + return __riscv_vssseg7e16(rs1, rs2, vs3, vl); } -void test_vssseg7e16_v_u16m1x7(uint16_t *base, ptrdiff_t bstride, vuint16m1x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e16(base, bstride, v_tuple, vl); +void test_vssseg7e16_v_u16m1x7(uint16_t *rs1, ptrdiff_t rs2, vuint16m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e16(rs1, rs2, vs3, vl); } -void test_vssseg7e16_v_f16mf4x7_m(vbool64_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf4x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e16(mask, base, bstride, v_tuple, vl); +void test_vssseg7e16_v_f16mf4x7_m(vbool64_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vssseg7e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg7e16_v_f16mf2x7_m(vbool32_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf2x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e16(mask, base, bstride, v_tuple, vl); +void test_vssseg7e16_v_f16mf2x7_m(vbool32_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vssseg7e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg7e16_v_f16m1x7_m(vbool16_t mask, float16_t *base, ptrdiff_t bstride, vfloat16m1x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e16(mask, base, bstride, v_tuple, vl); +void test_vssseg7e16_v_f16m1x7_m(vbool16_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg7e16_v_i16mf4x7_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e16(mask, base, bstride, v_tuple, vl); +void test_vssseg7e16_v_i16mf4x7_m(vbool64_t vm, int16_t *rs1, ptrdiff_t rs2, vint16mf4x7_t vs3, size_t vl) { + return __riscv_vssseg7e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg7e16_v_i16mf2x7_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e16(mask, base, bstride, v_tuple, vl); +void test_vssseg7e16_v_i16mf2x7_m(vbool32_t vm, int16_t *rs1, ptrdiff_t rs2, vint16mf2x7_t vs3, size_t vl) { + return __riscv_vssseg7e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg7e16_v_i16m1x7_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e16(mask, base, bstride, v_tuple, vl); +void test_vssseg7e16_v_i16m1x7_m(vbool16_t vm, int16_t *rs1, ptrdiff_t rs2, vint16m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg7e16_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e16(mask, base, bstride, v_tuple, vl); +void test_vssseg7e16_v_u16mf4x7_m(vbool64_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16mf4x7_t vs3, size_t vl) { + return __riscv_vssseg7e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg7e16_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e16(mask, base, bstride, v_tuple, vl); +void test_vssseg7e16_v_u16mf2x7_m(vbool32_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16mf2x7_t vs3, size_t vl) { + return __riscv_vssseg7e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg7e16_v_u16m1x7_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e16(mask, base, bstride, v_tuple, vl); +void test_vssseg7e16_v_u16m1x7_m(vbool16_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e16(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg7e16\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vssseg7e32.c b/auto-generated/gnu-overloaded-tests/vssseg7e32.c index 09db2634b..17c82a6aa 100644 --- a/auto-generated/gnu-overloaded-tests/vssseg7e32.c +++ b/auto-generated/gnu-overloaded-tests/vssseg7e32.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg7e32_v_f32mf2x7(float32_t *base, ptrdiff_t bstride, vfloat32mf2x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e32(base, bstride, v_tuple, vl); +void test_vssseg7e32_v_f32mf2x7(float32_t *rs1, ptrdiff_t rs2, vfloat32mf2x7_t vs3, size_t vl) { + return __riscv_vssseg7e32(rs1, rs2, vs3, vl); } -void test_vssseg7e32_v_f32m1x7(float32_t *base, ptrdiff_t bstride, vfloat32m1x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e32(base, bstride, v_tuple, vl); +void test_vssseg7e32_v_f32m1x7(float32_t *rs1, ptrdiff_t rs2, vfloat32m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e32(rs1, rs2, vs3, vl); } -void test_vssseg7e32_v_i32mf2x7(int32_t *base, ptrdiff_t bstride, vint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e32(base, bstride, v_tuple, vl); +void test_vssseg7e32_v_i32mf2x7(int32_t *rs1, ptrdiff_t rs2, vint32mf2x7_t vs3, size_t vl) { + return __riscv_vssseg7e32(rs1, rs2, vs3, vl); } -void test_vssseg7e32_v_i32m1x7(int32_t *base, ptrdiff_t bstride, vint32m1x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e32(base, bstride, v_tuple, vl); +void test_vssseg7e32_v_i32m1x7(int32_t *rs1, ptrdiff_t rs2, vint32m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e32(rs1, rs2, vs3, vl); } -void test_vssseg7e32_v_u32mf2x7(uint32_t *base, ptrdiff_t bstride, vuint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e32(base, bstride, v_tuple, vl); +void test_vssseg7e32_v_u32mf2x7(uint32_t *rs1, ptrdiff_t rs2, vuint32mf2x7_t vs3, size_t vl) { + return __riscv_vssseg7e32(rs1, rs2, vs3, vl); } -void test_vssseg7e32_v_u32m1x7(uint32_t *base, ptrdiff_t bstride, vuint32m1x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e32(base, bstride, v_tuple, vl); +void test_vssseg7e32_v_u32m1x7(uint32_t *rs1, ptrdiff_t rs2, vuint32m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e32(rs1, rs2, vs3, vl); } -void test_vssseg7e32_v_f32mf2x7_m(vbool64_t mask, float32_t *base, ptrdiff_t bstride, vfloat32mf2x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e32(mask, base, bstride, v_tuple, vl); +void test_vssseg7e32_v_f32mf2x7_m(vbool64_t vm, float32_t *rs1, ptrdiff_t rs2, vfloat32mf2x7_t vs3, size_t vl) { + return __riscv_vssseg7e32(vm, rs1, rs2, vs3, vl); } -void test_vssseg7e32_v_f32m1x7_m(vbool32_t mask, float32_t *base, ptrdiff_t bstride, vfloat32m1x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e32(mask, base, bstride, v_tuple, vl); +void test_vssseg7e32_v_f32m1x7_m(vbool32_t vm, float32_t *rs1, ptrdiff_t rs2, vfloat32m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e32(vm, rs1, rs2, vs3, vl); } -void test_vssseg7e32_v_i32mf2x7_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e32(mask, base, bstride, v_tuple, vl); +void test_vssseg7e32_v_i32mf2x7_m(vbool64_t vm, int32_t *rs1, ptrdiff_t rs2, vint32mf2x7_t vs3, size_t vl) { + return __riscv_vssseg7e32(vm, rs1, rs2, vs3, vl); } -void test_vssseg7e32_v_i32m1x7_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e32(mask, base, bstride, v_tuple, vl); +void test_vssseg7e32_v_i32m1x7_m(vbool32_t vm, int32_t *rs1, ptrdiff_t rs2, vint32m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e32(vm, rs1, rs2, vs3, vl); } -void test_vssseg7e32_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e32(mask, base, bstride, v_tuple, vl); +void test_vssseg7e32_v_u32mf2x7_m(vbool64_t vm, uint32_t *rs1, ptrdiff_t rs2, vuint32mf2x7_t vs3, size_t vl) { + return __riscv_vssseg7e32(vm, rs1, rs2, vs3, vl); } -void test_vssseg7e32_v_u32m1x7_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e32(mask, base, bstride, v_tuple, vl); +void test_vssseg7e32_v_u32m1x7_m(vbool32_t vm, uint32_t *rs1, ptrdiff_t rs2, vuint32m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e32(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg7e32\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vssseg7e64.c b/auto-generated/gnu-overloaded-tests/vssseg7e64.c index 50eae0073..a423ae0d0 100644 --- a/auto-generated/gnu-overloaded-tests/vssseg7e64.c +++ b/auto-generated/gnu-overloaded-tests/vssseg7e64.c @@ -6,28 +6,28 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg7e64_v_f64m1x7(float64_t *base, ptrdiff_t bstride, vfloat64m1x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e64(base, bstride, v_tuple, vl); +void test_vssseg7e64_v_f64m1x7(float64_t *rs1, ptrdiff_t rs2, vfloat64m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e64(rs1, rs2, vs3, vl); } -void test_vssseg7e64_v_i64m1x7(int64_t *base, ptrdiff_t bstride, vint64m1x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e64(base, bstride, v_tuple, vl); +void test_vssseg7e64_v_i64m1x7(int64_t *rs1, ptrdiff_t rs2, vint64m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e64(rs1, rs2, vs3, vl); } -void test_vssseg7e64_v_u64m1x7(uint64_t *base, ptrdiff_t bstride, vuint64m1x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e64(base, bstride, v_tuple, vl); +void test_vssseg7e64_v_u64m1x7(uint64_t *rs1, ptrdiff_t rs2, vuint64m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e64(rs1, rs2, vs3, vl); } -void test_vssseg7e64_v_f64m1x7_m(vbool64_t mask, float64_t *base, ptrdiff_t bstride, vfloat64m1x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e64(mask, base, bstride, v_tuple, vl); +void test_vssseg7e64_v_f64m1x7_m(vbool64_t vm, float64_t *rs1, ptrdiff_t rs2, vfloat64m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e64(vm, rs1, rs2, vs3, vl); } -void test_vssseg7e64_v_i64m1x7_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e64(mask, base, bstride, v_tuple, vl); +void test_vssseg7e64_v_i64m1x7_m(vbool64_t vm, int64_t *rs1, ptrdiff_t rs2, vint64m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e64(vm, rs1, rs2, vs3, vl); } -void test_vssseg7e64_v_u64m1x7_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e64(mask, base, bstride, v_tuple, vl); +void test_vssseg7e64_v_u64m1x7_m(vbool64_t vm, uint64_t *rs1, ptrdiff_t rs2, vuint64m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e64(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg7e64\.[ivxfswum.]+\s+} 6 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vssseg7e8.c b/auto-generated/gnu-overloaded-tests/vssseg7e8.c index 602bfb51b..2e8b1e241 100644 --- a/auto-generated/gnu-overloaded-tests/vssseg7e8.c +++ b/auto-generated/gnu-overloaded-tests/vssseg7e8.c @@ -6,68 +6,68 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg7e8_v_i8mf8x7(int8_t *base, ptrdiff_t bstride, vint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e8(base, bstride, v_tuple, vl); +void test_vssseg7e8_v_i8mf8x7(int8_t *rs1, ptrdiff_t rs2, vint8mf8x7_t vs3, size_t vl) { + return __riscv_vssseg7e8(rs1, rs2, vs3, vl); } -void test_vssseg7e8_v_i8mf4x7(int8_t *base, ptrdiff_t bstride, vint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e8(base, bstride, v_tuple, vl); +void test_vssseg7e8_v_i8mf4x7(int8_t *rs1, ptrdiff_t rs2, vint8mf4x7_t vs3, size_t vl) { + return __riscv_vssseg7e8(rs1, rs2, vs3, vl); } -void test_vssseg7e8_v_i8mf2x7(int8_t *base, ptrdiff_t bstride, vint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e8(base, bstride, v_tuple, vl); +void test_vssseg7e8_v_i8mf2x7(int8_t *rs1, ptrdiff_t rs2, vint8mf2x7_t vs3, size_t vl) { + return __riscv_vssseg7e8(rs1, rs2, vs3, vl); } -void test_vssseg7e8_v_i8m1x7(int8_t *base, ptrdiff_t bstride, vint8m1x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e8(base, bstride, v_tuple, vl); +void test_vssseg7e8_v_i8m1x7(int8_t *rs1, ptrdiff_t rs2, vint8m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e8(rs1, rs2, vs3, vl); } -void test_vssseg7e8_v_u8mf8x7(uint8_t *base, ptrdiff_t bstride, vuint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e8(base, bstride, v_tuple, vl); +void test_vssseg7e8_v_u8mf8x7(uint8_t *rs1, ptrdiff_t rs2, vuint8mf8x7_t vs3, size_t vl) { + return __riscv_vssseg7e8(rs1, rs2, vs3, vl); } -void test_vssseg7e8_v_u8mf4x7(uint8_t *base, ptrdiff_t bstride, vuint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e8(base, bstride, v_tuple, vl); +void test_vssseg7e8_v_u8mf4x7(uint8_t *rs1, ptrdiff_t rs2, vuint8mf4x7_t vs3, size_t vl) { + return __riscv_vssseg7e8(rs1, rs2, vs3, vl); } -void test_vssseg7e8_v_u8mf2x7(uint8_t *base, ptrdiff_t bstride, vuint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e8(base, bstride, v_tuple, vl); +void test_vssseg7e8_v_u8mf2x7(uint8_t *rs1, ptrdiff_t rs2, vuint8mf2x7_t vs3, size_t vl) { + return __riscv_vssseg7e8(rs1, rs2, vs3, vl); } -void test_vssseg7e8_v_u8m1x7(uint8_t *base, ptrdiff_t bstride, vuint8m1x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e8(base, bstride, v_tuple, vl); +void test_vssseg7e8_v_u8m1x7(uint8_t *rs1, ptrdiff_t rs2, vuint8m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e8(rs1, rs2, vs3, vl); } -void test_vssseg7e8_v_i8mf8x7_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e8(mask, base, bstride, v_tuple, vl); +void test_vssseg7e8_v_i8mf8x7_m(vbool64_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf8x7_t vs3, size_t vl) { + return __riscv_vssseg7e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg7e8_v_i8mf4x7_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e8(mask, base, bstride, v_tuple, vl); +void test_vssseg7e8_v_i8mf4x7_m(vbool32_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf4x7_t vs3, size_t vl) { + return __riscv_vssseg7e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg7e8_v_i8mf2x7_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e8(mask, base, bstride, v_tuple, vl); +void test_vssseg7e8_v_i8mf2x7_m(vbool16_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf2x7_t vs3, size_t vl) { + return __riscv_vssseg7e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg7e8_v_i8m1x7_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e8(mask, base, bstride, v_tuple, vl); +void test_vssseg7e8_v_i8m1x7_m(vbool8_t vm, int8_t *rs1, ptrdiff_t rs2, vint8m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg7e8_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e8(mask, base, bstride, v_tuple, vl); +void test_vssseg7e8_v_u8mf8x7_m(vbool64_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf8x7_t vs3, size_t vl) { + return __riscv_vssseg7e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg7e8_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e8(mask, base, bstride, v_tuple, vl); +void test_vssseg7e8_v_u8mf4x7_m(vbool32_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf4x7_t vs3, size_t vl) { + return __riscv_vssseg7e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg7e8_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e8(mask, base, bstride, v_tuple, vl); +void test_vssseg7e8_v_u8mf2x7_m(vbool16_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf2x7_t vs3, size_t vl) { + return __riscv_vssseg7e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg7e8_v_u8m1x7_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1x7_t v_tuple, size_t vl) { - return __riscv_vssseg7e8(mask, base, bstride, v_tuple, vl); +void test_vssseg7e8_v_u8m1x7_m(vbool8_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e8(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg7e8\.[ivxfswum.]+\s+} 16 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vssseg8e16.c b/auto-generated/gnu-overloaded-tests/vssseg8e16.c index 65f98b97e..e4448b11d 100644 --- a/auto-generated/gnu-overloaded-tests/vssseg8e16.c +++ b/auto-generated/gnu-overloaded-tests/vssseg8e16.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg8e16_v_f16mf4x8(float16_t *base, ptrdiff_t bstride, vfloat16mf4x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e16(base, bstride, v_tuple, vl); +void test_vssseg8e16_v_f16mf4x8(float16_t *rs1, ptrdiff_t rs2, vfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vssseg8e16(rs1, rs2, vs3, vl); } -void test_vssseg8e16_v_f16mf2x8(float16_t *base, ptrdiff_t bstride, vfloat16mf2x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e16(base, bstride, v_tuple, vl); +void test_vssseg8e16_v_f16mf2x8(float16_t *rs1, ptrdiff_t rs2, vfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vssseg8e16(rs1, rs2, vs3, vl); } -void test_vssseg8e16_v_f16m1x8(float16_t *base, ptrdiff_t bstride, vfloat16m1x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e16(base, bstride, v_tuple, vl); +void test_vssseg8e16_v_f16m1x8(float16_t *rs1, ptrdiff_t rs2, vfloat16m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e16(rs1, rs2, vs3, vl); } -void test_vssseg8e16_v_i16mf4x8(int16_t *base, ptrdiff_t bstride, vint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e16(base, bstride, v_tuple, vl); +void test_vssseg8e16_v_i16mf4x8(int16_t *rs1, ptrdiff_t rs2, vint16mf4x8_t vs3, size_t vl) { + return __riscv_vssseg8e16(rs1, rs2, vs3, vl); } -void test_vssseg8e16_v_i16mf2x8(int16_t *base, ptrdiff_t bstride, vint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e16(base, bstride, v_tuple, vl); +void test_vssseg8e16_v_i16mf2x8(int16_t *rs1, ptrdiff_t rs2, vint16mf2x8_t vs3, size_t vl) { + return __riscv_vssseg8e16(rs1, rs2, vs3, vl); } -void test_vssseg8e16_v_i16m1x8(int16_t *base, ptrdiff_t bstride, vint16m1x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e16(base, bstride, v_tuple, vl); +void test_vssseg8e16_v_i16m1x8(int16_t *rs1, ptrdiff_t rs2, vint16m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e16(rs1, rs2, vs3, vl); } -void test_vssseg8e16_v_u16mf4x8(uint16_t *base, ptrdiff_t bstride, vuint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e16(base, bstride, v_tuple, vl); +void test_vssseg8e16_v_u16mf4x8(uint16_t *rs1, ptrdiff_t rs2, vuint16mf4x8_t vs3, size_t vl) { + return __riscv_vssseg8e16(rs1, rs2, vs3, vl); } -void test_vssseg8e16_v_u16mf2x8(uint16_t *base, ptrdiff_t bstride, vuint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e16(base, bstride, v_tuple, vl); +void test_vssseg8e16_v_u16mf2x8(uint16_t *rs1, ptrdiff_t rs2, vuint16mf2x8_t vs3, size_t vl) { + return __riscv_vssseg8e16(rs1, rs2, vs3, vl); } -void test_vssseg8e16_v_u16m1x8(uint16_t *base, ptrdiff_t bstride, vuint16m1x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e16(base, bstride, v_tuple, vl); +void test_vssseg8e16_v_u16m1x8(uint16_t *rs1, ptrdiff_t rs2, vuint16m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e16(rs1, rs2, vs3, vl); } -void test_vssseg8e16_v_f16mf4x8_m(vbool64_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf4x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e16(mask, base, bstride, v_tuple, vl); +void test_vssseg8e16_v_f16mf4x8_m(vbool64_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vssseg8e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg8e16_v_f16mf2x8_m(vbool32_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf2x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e16(mask, base, bstride, v_tuple, vl); +void test_vssseg8e16_v_f16mf2x8_m(vbool32_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vssseg8e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg8e16_v_f16m1x8_m(vbool16_t mask, float16_t *base, ptrdiff_t bstride, vfloat16m1x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e16(mask, base, bstride, v_tuple, vl); +void test_vssseg8e16_v_f16m1x8_m(vbool16_t vm, float16_t *rs1, ptrdiff_t rs2, vfloat16m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg8e16_v_i16mf4x8_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e16(mask, base, bstride, v_tuple, vl); +void test_vssseg8e16_v_i16mf4x8_m(vbool64_t vm, int16_t *rs1, ptrdiff_t rs2, vint16mf4x8_t vs3, size_t vl) { + return __riscv_vssseg8e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg8e16_v_i16mf2x8_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e16(mask, base, bstride, v_tuple, vl); +void test_vssseg8e16_v_i16mf2x8_m(vbool32_t vm, int16_t *rs1, ptrdiff_t rs2, vint16mf2x8_t vs3, size_t vl) { + return __riscv_vssseg8e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg8e16_v_i16m1x8_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e16(mask, base, bstride, v_tuple, vl); +void test_vssseg8e16_v_i16m1x8_m(vbool16_t vm, int16_t *rs1, ptrdiff_t rs2, vint16m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg8e16_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e16(mask, base, bstride, v_tuple, vl); +void test_vssseg8e16_v_u16mf4x8_m(vbool64_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16mf4x8_t vs3, size_t vl) { + return __riscv_vssseg8e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg8e16_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e16(mask, base, bstride, v_tuple, vl); +void test_vssseg8e16_v_u16mf2x8_m(vbool32_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16mf2x8_t vs3, size_t vl) { + return __riscv_vssseg8e16(vm, rs1, rs2, vs3, vl); } -void test_vssseg8e16_v_u16m1x8_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e16(mask, base, bstride, v_tuple, vl); +void test_vssseg8e16_v_u16m1x8_m(vbool16_t vm, uint16_t *rs1, ptrdiff_t rs2, vuint16m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e16(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg8e16\.[ivxfswum.]+\s+} 18 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vssseg8e32.c b/auto-generated/gnu-overloaded-tests/vssseg8e32.c index c490c2ca0..72dea4bb6 100644 --- a/auto-generated/gnu-overloaded-tests/vssseg8e32.c +++ b/auto-generated/gnu-overloaded-tests/vssseg8e32.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg8e32_v_f32mf2x8(float32_t *base, ptrdiff_t bstride, vfloat32mf2x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e32(base, bstride, v_tuple, vl); +void test_vssseg8e32_v_f32mf2x8(float32_t *rs1, ptrdiff_t rs2, vfloat32mf2x8_t vs3, size_t vl) { + return __riscv_vssseg8e32(rs1, rs2, vs3, vl); } -void test_vssseg8e32_v_f32m1x8(float32_t *base, ptrdiff_t bstride, vfloat32m1x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e32(base, bstride, v_tuple, vl); +void test_vssseg8e32_v_f32m1x8(float32_t *rs1, ptrdiff_t rs2, vfloat32m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e32(rs1, rs2, vs3, vl); } -void test_vssseg8e32_v_i32mf2x8(int32_t *base, ptrdiff_t bstride, vint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e32(base, bstride, v_tuple, vl); +void test_vssseg8e32_v_i32mf2x8(int32_t *rs1, ptrdiff_t rs2, vint32mf2x8_t vs3, size_t vl) { + return __riscv_vssseg8e32(rs1, rs2, vs3, vl); } -void test_vssseg8e32_v_i32m1x8(int32_t *base, ptrdiff_t bstride, vint32m1x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e32(base, bstride, v_tuple, vl); +void test_vssseg8e32_v_i32m1x8(int32_t *rs1, ptrdiff_t rs2, vint32m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e32(rs1, rs2, vs3, vl); } -void test_vssseg8e32_v_u32mf2x8(uint32_t *base, ptrdiff_t bstride, vuint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e32(base, bstride, v_tuple, vl); +void test_vssseg8e32_v_u32mf2x8(uint32_t *rs1, ptrdiff_t rs2, vuint32mf2x8_t vs3, size_t vl) { + return __riscv_vssseg8e32(rs1, rs2, vs3, vl); } -void test_vssseg8e32_v_u32m1x8(uint32_t *base, ptrdiff_t bstride, vuint32m1x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e32(base, bstride, v_tuple, vl); +void test_vssseg8e32_v_u32m1x8(uint32_t *rs1, ptrdiff_t rs2, vuint32m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e32(rs1, rs2, vs3, vl); } -void test_vssseg8e32_v_f32mf2x8_m(vbool64_t mask, float32_t *base, ptrdiff_t bstride, vfloat32mf2x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e32(mask, base, bstride, v_tuple, vl); +void test_vssseg8e32_v_f32mf2x8_m(vbool64_t vm, float32_t *rs1, ptrdiff_t rs2, vfloat32mf2x8_t vs3, size_t vl) { + return __riscv_vssseg8e32(vm, rs1, rs2, vs3, vl); } -void test_vssseg8e32_v_f32m1x8_m(vbool32_t mask, float32_t *base, ptrdiff_t bstride, vfloat32m1x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e32(mask, base, bstride, v_tuple, vl); +void test_vssseg8e32_v_f32m1x8_m(vbool32_t vm, float32_t *rs1, ptrdiff_t rs2, vfloat32m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e32(vm, rs1, rs2, vs3, vl); } -void test_vssseg8e32_v_i32mf2x8_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e32(mask, base, bstride, v_tuple, vl); +void test_vssseg8e32_v_i32mf2x8_m(vbool64_t vm, int32_t *rs1, ptrdiff_t rs2, vint32mf2x8_t vs3, size_t vl) { + return __riscv_vssseg8e32(vm, rs1, rs2, vs3, vl); } -void test_vssseg8e32_v_i32m1x8_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e32(mask, base, bstride, v_tuple, vl); +void test_vssseg8e32_v_i32m1x8_m(vbool32_t vm, int32_t *rs1, ptrdiff_t rs2, vint32m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e32(vm, rs1, rs2, vs3, vl); } -void test_vssseg8e32_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e32(mask, base, bstride, v_tuple, vl); +void test_vssseg8e32_v_u32mf2x8_m(vbool64_t vm, uint32_t *rs1, ptrdiff_t rs2, vuint32mf2x8_t vs3, size_t vl) { + return __riscv_vssseg8e32(vm, rs1, rs2, vs3, vl); } -void test_vssseg8e32_v_u32m1x8_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e32(mask, base, bstride, v_tuple, vl); +void test_vssseg8e32_v_u32m1x8_m(vbool32_t vm, uint32_t *rs1, ptrdiff_t rs2, vuint32m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e32(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg8e32\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vssseg8e64.c b/auto-generated/gnu-overloaded-tests/vssseg8e64.c index e238bb88d..8ec6a4b6f 100644 --- a/auto-generated/gnu-overloaded-tests/vssseg8e64.c +++ b/auto-generated/gnu-overloaded-tests/vssseg8e64.c @@ -6,28 +6,28 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg8e64_v_f64m1x8(float64_t *base, ptrdiff_t bstride, vfloat64m1x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e64(base, bstride, v_tuple, vl); +void test_vssseg8e64_v_f64m1x8(float64_t *rs1, ptrdiff_t rs2, vfloat64m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e64(rs1, rs2, vs3, vl); } -void test_vssseg8e64_v_i64m1x8(int64_t *base, ptrdiff_t bstride, vint64m1x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e64(base, bstride, v_tuple, vl); +void test_vssseg8e64_v_i64m1x8(int64_t *rs1, ptrdiff_t rs2, vint64m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e64(rs1, rs2, vs3, vl); } -void test_vssseg8e64_v_u64m1x8(uint64_t *base, ptrdiff_t bstride, vuint64m1x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e64(base, bstride, v_tuple, vl); +void test_vssseg8e64_v_u64m1x8(uint64_t *rs1, ptrdiff_t rs2, vuint64m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e64(rs1, rs2, vs3, vl); } -void test_vssseg8e64_v_f64m1x8_m(vbool64_t mask, float64_t *base, ptrdiff_t bstride, vfloat64m1x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e64(mask, base, bstride, v_tuple, vl); +void test_vssseg8e64_v_f64m1x8_m(vbool64_t vm, float64_t *rs1, ptrdiff_t rs2, vfloat64m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e64(vm, rs1, rs2, vs3, vl); } -void test_vssseg8e64_v_i64m1x8_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e64(mask, base, bstride, v_tuple, vl); +void test_vssseg8e64_v_i64m1x8_m(vbool64_t vm, int64_t *rs1, ptrdiff_t rs2, vint64m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e64(vm, rs1, rs2, vs3, vl); } -void test_vssseg8e64_v_u64m1x8_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e64(mask, base, bstride, v_tuple, vl); +void test_vssseg8e64_v_u64m1x8_m(vbool64_t vm, uint64_t *rs1, ptrdiff_t rs2, vuint64m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e64(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg8e64\.[ivxfswum.]+\s+} 6 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vssseg8e8.c b/auto-generated/gnu-overloaded-tests/vssseg8e8.c index 1e85bc8ca..0ca240c9f 100644 --- a/auto-generated/gnu-overloaded-tests/vssseg8e8.c +++ b/auto-generated/gnu-overloaded-tests/vssseg8e8.c @@ -6,68 +6,68 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vssseg8e8_v_i8mf8x8(int8_t *base, ptrdiff_t bstride, vint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e8(base, bstride, v_tuple, vl); +void test_vssseg8e8_v_i8mf8x8(int8_t *rs1, ptrdiff_t rs2, vint8mf8x8_t vs3, size_t vl) { + return __riscv_vssseg8e8(rs1, rs2, vs3, vl); } -void test_vssseg8e8_v_i8mf4x8(int8_t *base, ptrdiff_t bstride, vint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e8(base, bstride, v_tuple, vl); +void test_vssseg8e8_v_i8mf4x8(int8_t *rs1, ptrdiff_t rs2, vint8mf4x8_t vs3, size_t vl) { + return __riscv_vssseg8e8(rs1, rs2, vs3, vl); } -void test_vssseg8e8_v_i8mf2x8(int8_t *base, ptrdiff_t bstride, vint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e8(base, bstride, v_tuple, vl); +void test_vssseg8e8_v_i8mf2x8(int8_t *rs1, ptrdiff_t rs2, vint8mf2x8_t vs3, size_t vl) { + return __riscv_vssseg8e8(rs1, rs2, vs3, vl); } -void test_vssseg8e8_v_i8m1x8(int8_t *base, ptrdiff_t bstride, vint8m1x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e8(base, bstride, v_tuple, vl); +void test_vssseg8e8_v_i8m1x8(int8_t *rs1, ptrdiff_t rs2, vint8m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e8(rs1, rs2, vs3, vl); } -void test_vssseg8e8_v_u8mf8x8(uint8_t *base, ptrdiff_t bstride, vuint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e8(base, bstride, v_tuple, vl); +void test_vssseg8e8_v_u8mf8x8(uint8_t *rs1, ptrdiff_t rs2, vuint8mf8x8_t vs3, size_t vl) { + return __riscv_vssseg8e8(rs1, rs2, vs3, vl); } -void test_vssseg8e8_v_u8mf4x8(uint8_t *base, ptrdiff_t bstride, vuint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e8(base, bstride, v_tuple, vl); +void test_vssseg8e8_v_u8mf4x8(uint8_t *rs1, ptrdiff_t rs2, vuint8mf4x8_t vs3, size_t vl) { + return __riscv_vssseg8e8(rs1, rs2, vs3, vl); } -void test_vssseg8e8_v_u8mf2x8(uint8_t *base, ptrdiff_t bstride, vuint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e8(base, bstride, v_tuple, vl); +void test_vssseg8e8_v_u8mf2x8(uint8_t *rs1, ptrdiff_t rs2, vuint8mf2x8_t vs3, size_t vl) { + return __riscv_vssseg8e8(rs1, rs2, vs3, vl); } -void test_vssseg8e8_v_u8m1x8(uint8_t *base, ptrdiff_t bstride, vuint8m1x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e8(base, bstride, v_tuple, vl); +void test_vssseg8e8_v_u8m1x8(uint8_t *rs1, ptrdiff_t rs2, vuint8m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e8(rs1, rs2, vs3, vl); } -void test_vssseg8e8_v_i8mf8x8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e8(mask, base, bstride, v_tuple, vl); +void test_vssseg8e8_v_i8mf8x8_m(vbool64_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf8x8_t vs3, size_t vl) { + return __riscv_vssseg8e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg8e8_v_i8mf4x8_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e8(mask, base, bstride, v_tuple, vl); +void test_vssseg8e8_v_i8mf4x8_m(vbool32_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf4x8_t vs3, size_t vl) { + return __riscv_vssseg8e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg8e8_v_i8mf2x8_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e8(mask, base, bstride, v_tuple, vl); +void test_vssseg8e8_v_i8mf2x8_m(vbool16_t vm, int8_t *rs1, ptrdiff_t rs2, vint8mf2x8_t vs3, size_t vl) { + return __riscv_vssseg8e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg8e8_v_i8m1x8_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e8(mask, base, bstride, v_tuple, vl); +void test_vssseg8e8_v_i8m1x8_m(vbool8_t vm, int8_t *rs1, ptrdiff_t rs2, vint8m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg8e8_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e8(mask, base, bstride, v_tuple, vl); +void test_vssseg8e8_v_u8mf8x8_m(vbool64_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf8x8_t vs3, size_t vl) { + return __riscv_vssseg8e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg8e8_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e8(mask, base, bstride, v_tuple, vl); +void test_vssseg8e8_v_u8mf4x8_m(vbool32_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf4x8_t vs3, size_t vl) { + return __riscv_vssseg8e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg8e8_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e8(mask, base, bstride, v_tuple, vl); +void test_vssseg8e8_v_u8mf2x8_m(vbool16_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8mf2x8_t vs3, size_t vl) { + return __riscv_vssseg8e8(vm, rs1, rs2, vs3, vl); } -void test_vssseg8e8_v_u8m1x8_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1x8_t v_tuple, size_t vl) { - return __riscv_vssseg8e8(mask, base, bstride, v_tuple, vl); +void test_vssseg8e8_v_u8m1x8_m(vbool8_t vm, uint8_t *rs1, ptrdiff_t rs2, vuint8m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e8(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssseg8e8\.[ivxfswum.]+\s+} 16 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vssub.c b/auto-generated/gnu-overloaded-tests/vssub.c index 10befe3f4..15199371d 100644 --- a/auto-generated/gnu-overloaded-tests/vssub.c +++ b/auto-generated/gnu-overloaded-tests/vssub.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vssub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); +vint8mf8_t test_vssub_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vssub(vs2, vs1, vl); } -vint8mf8_t test_vssub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); +vint8mf8_t test_vssub_vx_i8mf8(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub(vs2, rs1, vl); } -vint8mf4_t test_vssub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); +vint8mf4_t test_vssub_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vssub(vs2, vs1, vl); } -vint8mf4_t test_vssub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); +vint8mf4_t test_vssub_vx_i8mf4(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub(vs2, rs1, vl); } -vint8mf2_t test_vssub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); +vint8mf2_t test_vssub_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vssub(vs2, vs1, vl); } -vint8mf2_t test_vssub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); +vint8mf2_t test_vssub_vx_i8mf2(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub(vs2, rs1, vl); } -vint8m1_t test_vssub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); +vint8m1_t test_vssub_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vssub(vs2, vs1, vl); } -vint8m1_t test_vssub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); +vint8m1_t test_vssub_vx_i8m1(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub(vs2, rs1, vl); } -vint8m2_t test_vssub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); +vint8m2_t test_vssub_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vssub(vs2, vs1, vl); } -vint8m2_t test_vssub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); +vint8m2_t test_vssub_vx_i8m2(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub(vs2, rs1, vl); } -vint8m4_t test_vssub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); +vint8m4_t test_vssub_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vssub(vs2, vs1, vl); } -vint8m4_t test_vssub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); +vint8m4_t test_vssub_vx_i8m4(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub(vs2, rs1, vl); } -vint8m8_t test_vssub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); +vint8m8_t test_vssub_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vssub(vs2, vs1, vl); } -vint8m8_t test_vssub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); +vint8m8_t test_vssub_vx_i8m8(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub(vs2, rs1, vl); } -vint16mf4_t test_vssub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); +vint16mf4_t test_vssub_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vssub(vs2, vs1, vl); } -vint16mf4_t test_vssub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); +vint16mf4_t test_vssub_vx_i16mf4(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub(vs2, rs1, vl); } -vint16mf2_t test_vssub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); +vint16mf2_t test_vssub_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vssub(vs2, vs1, vl); } -vint16mf2_t test_vssub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); +vint16mf2_t test_vssub_vx_i16mf2(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub(vs2, rs1, vl); } -vint16m1_t test_vssub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); +vint16m1_t test_vssub_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vssub(vs2, vs1, vl); } -vint16m1_t test_vssub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); +vint16m1_t test_vssub_vx_i16m1(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub(vs2, rs1, vl); } -vint16m2_t test_vssub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); +vint16m2_t test_vssub_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vssub(vs2, vs1, vl); } -vint16m2_t test_vssub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); +vint16m2_t test_vssub_vx_i16m2(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub(vs2, rs1, vl); } -vint16m4_t test_vssub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); +vint16m4_t test_vssub_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vssub(vs2, vs1, vl); } -vint16m4_t test_vssub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); +vint16m4_t test_vssub_vx_i16m4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub(vs2, rs1, vl); } -vint16m8_t test_vssub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); +vint16m8_t test_vssub_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vssub(vs2, vs1, vl); } -vint16m8_t test_vssub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); +vint16m8_t test_vssub_vx_i16m8(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub(vs2, rs1, vl); } -vint32mf2_t test_vssub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); +vint32mf2_t test_vssub_vv_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vssub(vs2, vs1, vl); } -vint32mf2_t test_vssub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); +vint32mf2_t test_vssub_vx_i32mf2(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub(vs2, rs1, vl); } -vint32m1_t test_vssub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); +vint32m1_t test_vssub_vv_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vssub(vs2, vs1, vl); } -vint32m1_t test_vssub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); +vint32m1_t test_vssub_vx_i32m1(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub(vs2, rs1, vl); } -vint32m2_t test_vssub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); +vint32m2_t test_vssub_vv_i32m2(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vssub(vs2, vs1, vl); } -vint32m2_t test_vssub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); +vint32m2_t test_vssub_vx_i32m2(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub(vs2, rs1, vl); } -vint32m4_t test_vssub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); +vint32m4_t test_vssub_vv_i32m4(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vssub(vs2, vs1, vl); } -vint32m4_t test_vssub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); +vint32m4_t test_vssub_vx_i32m4(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub(vs2, rs1, vl); } -vint32m8_t test_vssub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); +vint32m8_t test_vssub_vv_i32m8(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vssub(vs2, vs1, vl); } -vint32m8_t test_vssub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); +vint32m8_t test_vssub_vx_i32m8(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub(vs2, rs1, vl); } -vint64m1_t test_vssub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); +vint64m1_t test_vssub_vv_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vssub(vs2, vs1, vl); } -vint64m1_t test_vssub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); +vint64m1_t test_vssub_vx_i64m1(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub(vs2, rs1, vl); } -vint64m2_t test_vssub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); +vint64m2_t test_vssub_vv_i64m2(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vssub(vs2, vs1, vl); } -vint64m2_t test_vssub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); +vint64m2_t test_vssub_vx_i64m2(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub(vs2, rs1, vl); } -vint64m4_t test_vssub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); +vint64m4_t test_vssub_vv_i64m4(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vssub(vs2, vs1, vl); } -vint64m4_t test_vssub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); +vint64m4_t test_vssub_vx_i64m4(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub(vs2, rs1, vl); } -vint64m8_t test_vssub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); +vint64m8_t test_vssub_vv_i64m8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vssub(vs2, vs1, vl); } -vint64m8_t test_vssub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vssub(op1, op2, vl); +vint64m8_t test_vssub_vx_i64m8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub(vs2, rs1, vl); } -vint8mf8_t test_vssub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); +vint8mf8_t test_vssub_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vssub(vm, vs2, vs1, vl); } -vint8mf8_t test_vssub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); +vint8mf8_t test_vssub_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub(vm, vs2, rs1, vl); } -vint8mf4_t test_vssub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); +vint8mf4_t test_vssub_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vssub(vm, vs2, vs1, vl); } -vint8mf4_t test_vssub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); +vint8mf4_t test_vssub_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub(vm, vs2, rs1, vl); } -vint8mf2_t test_vssub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); +vint8mf2_t test_vssub_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vssub(vm, vs2, vs1, vl); } -vint8mf2_t test_vssub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); +vint8mf2_t test_vssub_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub(vm, vs2, rs1, vl); } -vint8m1_t test_vssub_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); +vint8m1_t test_vssub_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vssub(vm, vs2, vs1, vl); } -vint8m1_t test_vssub_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); +vint8m1_t test_vssub_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub(vm, vs2, rs1, vl); } -vint8m2_t test_vssub_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); +vint8m2_t test_vssub_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vssub(vm, vs2, vs1, vl); } -vint8m2_t test_vssub_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); +vint8m2_t test_vssub_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub(vm, vs2, rs1, vl); } -vint8m4_t test_vssub_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); +vint8m4_t test_vssub_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vssub(vm, vs2, vs1, vl); } -vint8m4_t test_vssub_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); +vint8m4_t test_vssub_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub(vm, vs2, rs1, vl); } -vint8m8_t test_vssub_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); +vint8m8_t test_vssub_vv_i8m8_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vssub(vm, vs2, vs1, vl); } -vint8m8_t test_vssub_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); +vint8m8_t test_vssub_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub(vm, vs2, rs1, vl); } -vint16mf4_t test_vssub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); +vint16mf4_t test_vssub_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vssub(vm, vs2, vs1, vl); } -vint16mf4_t test_vssub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); +vint16mf4_t test_vssub_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub(vm, vs2, rs1, vl); } -vint16mf2_t test_vssub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); +vint16mf2_t test_vssub_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vssub(vm, vs2, vs1, vl); } -vint16mf2_t test_vssub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); +vint16mf2_t test_vssub_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub(vm, vs2, rs1, vl); } -vint16m1_t test_vssub_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); +vint16m1_t test_vssub_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vssub(vm, vs2, vs1, vl); } -vint16m1_t test_vssub_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); +vint16m1_t test_vssub_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub(vm, vs2, rs1, vl); } -vint16m2_t test_vssub_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); +vint16m2_t test_vssub_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vssub(vm, vs2, vs1, vl); } -vint16m2_t test_vssub_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); +vint16m2_t test_vssub_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub(vm, vs2, rs1, vl); } -vint16m4_t test_vssub_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); +vint16m4_t test_vssub_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vssub(vm, vs2, vs1, vl); } -vint16m4_t test_vssub_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); +vint16m4_t test_vssub_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub(vm, vs2, rs1, vl); } -vint16m8_t test_vssub_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); +vint16m8_t test_vssub_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vssub(vm, vs2, vs1, vl); } -vint16m8_t test_vssub_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); +vint16m8_t test_vssub_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub(vm, vs2, rs1, vl); } -vint32mf2_t test_vssub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); +vint32mf2_t test_vssub_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vssub(vm, vs2, vs1, vl); } -vint32mf2_t test_vssub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); +vint32mf2_t test_vssub_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub(vm, vs2, rs1, vl); } -vint32m1_t test_vssub_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); +vint32m1_t test_vssub_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vssub(vm, vs2, vs1, vl); } -vint32m1_t test_vssub_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); +vint32m1_t test_vssub_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub(vm, vs2, rs1, vl); } -vint32m2_t test_vssub_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); +vint32m2_t test_vssub_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vssub(vm, vs2, vs1, vl); } -vint32m2_t test_vssub_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); +vint32m2_t test_vssub_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub(vm, vs2, rs1, vl); } -vint32m4_t test_vssub_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); +vint32m4_t test_vssub_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vssub(vm, vs2, vs1, vl); } -vint32m4_t test_vssub_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); +vint32m4_t test_vssub_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub(vm, vs2, rs1, vl); } -vint32m8_t test_vssub_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); +vint32m8_t test_vssub_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vssub(vm, vs2, vs1, vl); } -vint32m8_t test_vssub_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); +vint32m8_t test_vssub_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub(vm, vs2, rs1, vl); } -vint64m1_t test_vssub_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); +vint64m1_t test_vssub_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vssub(vm, vs2, vs1, vl); } -vint64m1_t test_vssub_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); +vint64m1_t test_vssub_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub(vm, vs2, rs1, vl); } -vint64m2_t test_vssub_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); +vint64m2_t test_vssub_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vssub(vm, vs2, vs1, vl); } -vint64m2_t test_vssub_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); +vint64m2_t test_vssub_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub(vm, vs2, rs1, vl); } -vint64m4_t test_vssub_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); +vint64m4_t test_vssub_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vssub(vm, vs2, vs1, vl); } -vint64m4_t test_vssub_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); +vint64m4_t test_vssub_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub(vm, vs2, rs1, vl); } -vint64m8_t test_vssub_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); +vint64m8_t test_vssub_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vssub(vm, vs2, vs1, vl); } -vint64m8_t test_vssub_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vssub(mask, op1, op2, vl); +vint64m8_t test_vssub_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssub\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vssubu.c b/auto-generated/gnu-overloaded-tests/vssubu.c index 7c776985c..2891736ad 100644 --- a/auto-generated/gnu-overloaded-tests/vssubu.c +++ b/auto-generated/gnu-overloaded-tests/vssubu.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vssubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); +vuint8mf8_t test_vssubu_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vssubu(vs2, vs1, vl); } -vuint8mf8_t test_vssubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); +vuint8mf8_t test_vssubu_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu(vs2, rs1, vl); } -vuint8mf4_t test_vssubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); +vuint8mf4_t test_vssubu_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vssubu(vs2, vs1, vl); } -vuint8mf4_t test_vssubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); +vuint8mf4_t test_vssubu_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu(vs2, rs1, vl); } -vuint8mf2_t test_vssubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); +vuint8mf2_t test_vssubu_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vssubu(vs2, vs1, vl); } -vuint8mf2_t test_vssubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); +vuint8mf2_t test_vssubu_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu(vs2, rs1, vl); } -vuint8m1_t test_vssubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); +vuint8m1_t test_vssubu_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vssubu(vs2, vs1, vl); } -vuint8m1_t test_vssubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); +vuint8m1_t test_vssubu_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu(vs2, rs1, vl); } -vuint8m2_t test_vssubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); +vuint8m2_t test_vssubu_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vssubu(vs2, vs1, vl); } -vuint8m2_t test_vssubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); +vuint8m2_t test_vssubu_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu(vs2, rs1, vl); } -vuint8m4_t test_vssubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); +vuint8m4_t test_vssubu_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vssubu(vs2, vs1, vl); } -vuint8m4_t test_vssubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); +vuint8m4_t test_vssubu_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu(vs2, rs1, vl); } -vuint8m8_t test_vssubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); +vuint8m8_t test_vssubu_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vssubu(vs2, vs1, vl); } -vuint8m8_t test_vssubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); +vuint8m8_t test_vssubu_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu(vs2, rs1, vl); } -vuint16mf4_t test_vssubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); +vuint16mf4_t test_vssubu_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vssubu(vs2, vs1, vl); } -vuint16mf4_t test_vssubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); +vuint16mf4_t test_vssubu_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu(vs2, rs1, vl); } -vuint16mf2_t test_vssubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); +vuint16mf2_t test_vssubu_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vssubu(vs2, vs1, vl); } -vuint16mf2_t test_vssubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); +vuint16mf2_t test_vssubu_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu(vs2, rs1, vl); } -vuint16m1_t test_vssubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); +vuint16m1_t test_vssubu_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vssubu(vs2, vs1, vl); } -vuint16m1_t test_vssubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); +vuint16m1_t test_vssubu_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu(vs2, rs1, vl); } -vuint16m2_t test_vssubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); +vuint16m2_t test_vssubu_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vssubu(vs2, vs1, vl); } -vuint16m2_t test_vssubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); +vuint16m2_t test_vssubu_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu(vs2, rs1, vl); } -vuint16m4_t test_vssubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); +vuint16m4_t test_vssubu_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vssubu(vs2, vs1, vl); } -vuint16m4_t test_vssubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); +vuint16m4_t test_vssubu_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu(vs2, rs1, vl); } -vuint16m8_t test_vssubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); +vuint16m8_t test_vssubu_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vssubu(vs2, vs1, vl); } -vuint16m8_t test_vssubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); +vuint16m8_t test_vssubu_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu(vs2, rs1, vl); } -vuint32mf2_t test_vssubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); +vuint32mf2_t test_vssubu_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vssubu(vs2, vs1, vl); } -vuint32mf2_t test_vssubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); +vuint32mf2_t test_vssubu_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu(vs2, rs1, vl); } -vuint32m1_t test_vssubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); +vuint32m1_t test_vssubu_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vssubu(vs2, vs1, vl); } -vuint32m1_t test_vssubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); +vuint32m1_t test_vssubu_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu(vs2, rs1, vl); } -vuint32m2_t test_vssubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); +vuint32m2_t test_vssubu_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vssubu(vs2, vs1, vl); } -vuint32m2_t test_vssubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); +vuint32m2_t test_vssubu_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu(vs2, rs1, vl); } -vuint32m4_t test_vssubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); +vuint32m4_t test_vssubu_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vssubu(vs2, vs1, vl); } -vuint32m4_t test_vssubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); +vuint32m4_t test_vssubu_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu(vs2, rs1, vl); } -vuint32m8_t test_vssubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); +vuint32m8_t test_vssubu_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vssubu(vs2, vs1, vl); } -vuint32m8_t test_vssubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); +vuint32m8_t test_vssubu_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu(vs2, rs1, vl); } -vuint64m1_t test_vssubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); +vuint64m1_t test_vssubu_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vssubu(vs2, vs1, vl); } -vuint64m1_t test_vssubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); +vuint64m1_t test_vssubu_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu(vs2, rs1, vl); } -vuint64m2_t test_vssubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); +vuint64m2_t test_vssubu_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vssubu(vs2, vs1, vl); } -vuint64m2_t test_vssubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); +vuint64m2_t test_vssubu_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu(vs2, rs1, vl); } -vuint64m4_t test_vssubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); +vuint64m4_t test_vssubu_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vssubu(vs2, vs1, vl); } -vuint64m4_t test_vssubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); +vuint64m4_t test_vssubu_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu(vs2, rs1, vl); } -vuint64m8_t test_vssubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); +vuint64m8_t test_vssubu_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vssubu(vs2, vs1, vl); } -vuint64m8_t test_vssubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu(op1, op2, vl); +vuint64m8_t test_vssubu_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu(vs2, rs1, vl); } -vuint8mf8_t test_vssubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); +vuint8mf8_t test_vssubu_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vssubu(vm, vs2, vs1, vl); } -vuint8mf8_t test_vssubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); +vuint8mf8_t test_vssubu_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu(vm, vs2, rs1, vl); } -vuint8mf4_t test_vssubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); +vuint8mf4_t test_vssubu_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vssubu(vm, vs2, vs1, vl); } -vuint8mf4_t test_vssubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); +vuint8mf4_t test_vssubu_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu(vm, vs2, rs1, vl); } -vuint8mf2_t test_vssubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); +vuint8mf2_t test_vssubu_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vssubu(vm, vs2, vs1, vl); } -vuint8mf2_t test_vssubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); +vuint8mf2_t test_vssubu_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu(vm, vs2, rs1, vl); } -vuint8m1_t test_vssubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); +vuint8m1_t test_vssubu_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vssubu(vm, vs2, vs1, vl); } -vuint8m1_t test_vssubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); +vuint8m1_t test_vssubu_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu(vm, vs2, rs1, vl); } -vuint8m2_t test_vssubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); +vuint8m2_t test_vssubu_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vssubu(vm, vs2, vs1, vl); } -vuint8m2_t test_vssubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); +vuint8m2_t test_vssubu_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu(vm, vs2, rs1, vl); } -vuint8m4_t test_vssubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); +vuint8m4_t test_vssubu_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vssubu(vm, vs2, vs1, vl); } -vuint8m4_t test_vssubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); +vuint8m4_t test_vssubu_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu(vm, vs2, rs1, vl); } -vuint8m8_t test_vssubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); +vuint8m8_t test_vssubu_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vssubu(vm, vs2, vs1, vl); } -vuint8m8_t test_vssubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); +vuint8m8_t test_vssubu_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu(vm, vs2, rs1, vl); } -vuint16mf4_t test_vssubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); +vuint16mf4_t test_vssubu_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vssubu(vm, vs2, vs1, vl); } -vuint16mf4_t test_vssubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); +vuint16mf4_t test_vssubu_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu(vm, vs2, rs1, vl); } -vuint16mf2_t test_vssubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); +vuint16mf2_t test_vssubu_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vssubu(vm, vs2, vs1, vl); } -vuint16mf2_t test_vssubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); +vuint16mf2_t test_vssubu_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu(vm, vs2, rs1, vl); } -vuint16m1_t test_vssubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); +vuint16m1_t test_vssubu_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vssubu(vm, vs2, vs1, vl); } -vuint16m1_t test_vssubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); +vuint16m1_t test_vssubu_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu(vm, vs2, rs1, vl); } -vuint16m2_t test_vssubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); +vuint16m2_t test_vssubu_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vssubu(vm, vs2, vs1, vl); } -vuint16m2_t test_vssubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); +vuint16m2_t test_vssubu_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu(vm, vs2, rs1, vl); } -vuint16m4_t test_vssubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); +vuint16m4_t test_vssubu_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vssubu(vm, vs2, vs1, vl); } -vuint16m4_t test_vssubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); +vuint16m4_t test_vssubu_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu(vm, vs2, rs1, vl); } -vuint16m8_t test_vssubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); +vuint16m8_t test_vssubu_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vssubu(vm, vs2, vs1, vl); } -vuint16m8_t test_vssubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); +vuint16m8_t test_vssubu_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu(vm, vs2, rs1, vl); } -vuint32mf2_t test_vssubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); +vuint32mf2_t test_vssubu_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vssubu(vm, vs2, vs1, vl); } -vuint32mf2_t test_vssubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); +vuint32mf2_t test_vssubu_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu(vm, vs2, rs1, vl); } -vuint32m1_t test_vssubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); +vuint32m1_t test_vssubu_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vssubu(vm, vs2, vs1, vl); } -vuint32m1_t test_vssubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); +vuint32m1_t test_vssubu_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu(vm, vs2, rs1, vl); } -vuint32m2_t test_vssubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); +vuint32m2_t test_vssubu_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vssubu(vm, vs2, vs1, vl); } -vuint32m2_t test_vssubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); +vuint32m2_t test_vssubu_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu(vm, vs2, rs1, vl); } -vuint32m4_t test_vssubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); +vuint32m4_t test_vssubu_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vssubu(vm, vs2, vs1, vl); } -vuint32m4_t test_vssubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); +vuint32m4_t test_vssubu_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu(vm, vs2, rs1, vl); } -vuint32m8_t test_vssubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); +vuint32m8_t test_vssubu_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vssubu(vm, vs2, vs1, vl); } -vuint32m8_t test_vssubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); +vuint32m8_t test_vssubu_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu(vm, vs2, rs1, vl); } -vuint64m1_t test_vssubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); +vuint64m1_t test_vssubu_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vssubu(vm, vs2, vs1, vl); } -vuint64m1_t test_vssubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); +vuint64m1_t test_vssubu_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu(vm, vs2, rs1, vl); } -vuint64m2_t test_vssubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); +vuint64m2_t test_vssubu_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vssubu(vm, vs2, vs1, vl); } -vuint64m2_t test_vssubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); +vuint64m2_t test_vssubu_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu(vm, vs2, rs1, vl); } -vuint64m4_t test_vssubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); +vuint64m4_t test_vssubu_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vssubu(vm, vs2, vs1, vl); } -vuint64m4_t test_vssubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); +vuint64m4_t test_vssubu_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu(vm, vs2, rs1, vl); } -vuint64m8_t test_vssubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); +vuint64m8_t test_vssubu_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vssubu(vm, vs2, vs1, vl); } -vuint64m8_t test_vssubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu(mask, op1, op2, vl); +vuint64m8_t test_vssubu_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssubu\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsub.c b/auto-generated/gnu-overloaded-tests/vsub.c index d7f2b77f7..e3fc1c7c7 100644 --- a/auto-generated/gnu-overloaded-tests/vsub.c +++ b/auto-generated/gnu-overloaded-tests/vsub.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vsub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vint8mf8_t test_vsub_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vsub(vs2, vs1, vl); } -vint8mf8_t test_vsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vint8mf8_t test_vsub_vx_i8mf8(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub(vs2, rs1, vl); } -vint8mf4_t test_vsub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vint8mf4_t test_vsub_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vsub(vs2, vs1, vl); } -vint8mf4_t test_vsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vint8mf4_t test_vsub_vx_i8mf4(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub(vs2, rs1, vl); } -vint8mf2_t test_vsub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vint8mf2_t test_vsub_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vsub(vs2, vs1, vl); } -vint8mf2_t test_vsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vint8mf2_t test_vsub_vx_i8mf2(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub(vs2, rs1, vl); } -vint8m1_t test_vsub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vint8m1_t test_vsub_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vsub(vs2, vs1, vl); } -vint8m1_t test_vsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vint8m1_t test_vsub_vx_i8m1(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub(vs2, rs1, vl); } -vint8m2_t test_vsub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vint8m2_t test_vsub_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vsub(vs2, vs1, vl); } -vint8m2_t test_vsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vint8m2_t test_vsub_vx_i8m2(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub(vs2, rs1, vl); } -vint8m4_t test_vsub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vint8m4_t test_vsub_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vsub(vs2, vs1, vl); } -vint8m4_t test_vsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vint8m4_t test_vsub_vx_i8m4(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub(vs2, rs1, vl); } -vint8m8_t test_vsub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vint8m8_t test_vsub_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vsub(vs2, vs1, vl); } -vint8m8_t test_vsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vint8m8_t test_vsub_vx_i8m8(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub(vs2, rs1, vl); } -vint16mf4_t test_vsub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vint16mf4_t test_vsub_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vsub(vs2, vs1, vl); } -vint16mf4_t test_vsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vint16mf4_t test_vsub_vx_i16mf4(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub(vs2, rs1, vl); } -vint16mf2_t test_vsub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vint16mf2_t test_vsub_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vsub(vs2, vs1, vl); } -vint16mf2_t test_vsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vint16mf2_t test_vsub_vx_i16mf2(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub(vs2, rs1, vl); } -vint16m1_t test_vsub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vint16m1_t test_vsub_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vsub(vs2, vs1, vl); } -vint16m1_t test_vsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vint16m1_t test_vsub_vx_i16m1(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub(vs2, rs1, vl); } -vint16m2_t test_vsub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vint16m2_t test_vsub_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vsub(vs2, vs1, vl); } -vint16m2_t test_vsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vint16m2_t test_vsub_vx_i16m2(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub(vs2, rs1, vl); } -vint16m4_t test_vsub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vint16m4_t test_vsub_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vsub(vs2, vs1, vl); } -vint16m4_t test_vsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vint16m4_t test_vsub_vx_i16m4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub(vs2, rs1, vl); } -vint16m8_t test_vsub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vint16m8_t test_vsub_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vsub(vs2, vs1, vl); } -vint16m8_t test_vsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vint16m8_t test_vsub_vx_i16m8(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub(vs2, rs1, vl); } -vint32mf2_t test_vsub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vint32mf2_t test_vsub_vv_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vsub(vs2, vs1, vl); } -vint32mf2_t test_vsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vint32mf2_t test_vsub_vx_i32mf2(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub(vs2, rs1, vl); } -vint32m1_t test_vsub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vint32m1_t test_vsub_vv_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vsub(vs2, vs1, vl); } -vint32m1_t test_vsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vint32m1_t test_vsub_vx_i32m1(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub(vs2, rs1, vl); } -vint32m2_t test_vsub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vint32m2_t test_vsub_vv_i32m2(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vsub(vs2, vs1, vl); } -vint32m2_t test_vsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vint32m2_t test_vsub_vx_i32m2(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub(vs2, rs1, vl); } -vint32m4_t test_vsub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vint32m4_t test_vsub_vv_i32m4(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vsub(vs2, vs1, vl); } -vint32m4_t test_vsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vint32m4_t test_vsub_vx_i32m4(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub(vs2, rs1, vl); } -vint32m8_t test_vsub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vint32m8_t test_vsub_vv_i32m8(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vsub(vs2, vs1, vl); } -vint32m8_t test_vsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vint32m8_t test_vsub_vx_i32m8(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub(vs2, rs1, vl); } -vint64m1_t test_vsub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vint64m1_t test_vsub_vv_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vsub(vs2, vs1, vl); } -vint64m1_t test_vsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vint64m1_t test_vsub_vx_i64m1(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub(vs2, rs1, vl); } -vint64m2_t test_vsub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vint64m2_t test_vsub_vv_i64m2(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vsub(vs2, vs1, vl); } -vint64m2_t test_vsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vint64m2_t test_vsub_vx_i64m2(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub(vs2, rs1, vl); } -vint64m4_t test_vsub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vint64m4_t test_vsub_vv_i64m4(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vsub(vs2, vs1, vl); } -vint64m4_t test_vsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vint64m4_t test_vsub_vx_i64m4(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub(vs2, rs1, vl); } -vint64m8_t test_vsub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vint64m8_t test_vsub_vv_i64m8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vsub(vs2, vs1, vl); } -vint64m8_t test_vsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vint64m8_t test_vsub_vx_i64m8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub(vs2, rs1, vl); } -vuint8mf8_t test_vsub_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vuint8mf8_t test_vsub_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsub(vs2, vs1, vl); } -vuint8mf8_t test_vsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vuint8mf8_t test_vsub_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub(vs2, rs1, vl); } -vuint8mf4_t test_vsub_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vuint8mf4_t test_vsub_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsub(vs2, vs1, vl); } -vuint8mf4_t test_vsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vuint8mf4_t test_vsub_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub(vs2, rs1, vl); } -vuint8mf2_t test_vsub_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vuint8mf2_t test_vsub_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsub(vs2, vs1, vl); } -vuint8mf2_t test_vsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vuint8mf2_t test_vsub_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub(vs2, rs1, vl); } -vuint8m1_t test_vsub_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vuint8m1_t test_vsub_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsub(vs2, vs1, vl); } -vuint8m1_t test_vsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vuint8m1_t test_vsub_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub(vs2, rs1, vl); } -vuint8m2_t test_vsub_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vuint8m2_t test_vsub_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsub(vs2, vs1, vl); } -vuint8m2_t test_vsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vuint8m2_t test_vsub_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub(vs2, rs1, vl); } -vuint8m4_t test_vsub_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vuint8m4_t test_vsub_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsub(vs2, vs1, vl); } -vuint8m4_t test_vsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vuint8m4_t test_vsub_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub(vs2, rs1, vl); } -vuint8m8_t test_vsub_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vuint8m8_t test_vsub_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsub(vs2, vs1, vl); } -vuint8m8_t test_vsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vuint8m8_t test_vsub_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub(vs2, rs1, vl); } -vuint16mf4_t test_vsub_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vuint16mf4_t test_vsub_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsub(vs2, vs1, vl); } -vuint16mf4_t test_vsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vuint16mf4_t test_vsub_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub(vs2, rs1, vl); } -vuint16mf2_t test_vsub_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vuint16mf2_t test_vsub_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsub(vs2, vs1, vl); } -vuint16mf2_t test_vsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vuint16mf2_t test_vsub_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub(vs2, rs1, vl); } -vuint16m1_t test_vsub_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vuint16m1_t test_vsub_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsub(vs2, vs1, vl); } -vuint16m1_t test_vsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vuint16m1_t test_vsub_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub(vs2, rs1, vl); } -vuint16m2_t test_vsub_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vuint16m2_t test_vsub_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsub(vs2, vs1, vl); } -vuint16m2_t test_vsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vuint16m2_t test_vsub_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub(vs2, rs1, vl); } -vuint16m4_t test_vsub_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vuint16m4_t test_vsub_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsub(vs2, vs1, vl); } -vuint16m4_t test_vsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vuint16m4_t test_vsub_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub(vs2, rs1, vl); } -vuint16m8_t test_vsub_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vuint16m8_t test_vsub_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsub(vs2, vs1, vl); } -vuint16m8_t test_vsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vuint16m8_t test_vsub_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub(vs2, rs1, vl); } -vuint32mf2_t test_vsub_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vuint32mf2_t test_vsub_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsub(vs2, vs1, vl); } -vuint32mf2_t test_vsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vuint32mf2_t test_vsub_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub(vs2, rs1, vl); } -vuint32m1_t test_vsub_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vuint32m1_t test_vsub_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsub(vs2, vs1, vl); } -vuint32m1_t test_vsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vuint32m1_t test_vsub_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub(vs2, rs1, vl); } -vuint32m2_t test_vsub_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vuint32m2_t test_vsub_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsub(vs2, vs1, vl); } -vuint32m2_t test_vsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vuint32m2_t test_vsub_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub(vs2, rs1, vl); } -vuint32m4_t test_vsub_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vuint32m4_t test_vsub_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsub(vs2, vs1, vl); } -vuint32m4_t test_vsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vuint32m4_t test_vsub_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub(vs2, rs1, vl); } -vuint32m8_t test_vsub_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vuint32m8_t test_vsub_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsub(vs2, vs1, vl); } -vuint32m8_t test_vsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vuint32m8_t test_vsub_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub(vs2, rs1, vl); } -vuint64m1_t test_vsub_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vuint64m1_t test_vsub_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsub(vs2, vs1, vl); } -vuint64m1_t test_vsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vuint64m1_t test_vsub_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub(vs2, rs1, vl); } -vuint64m2_t test_vsub_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vuint64m2_t test_vsub_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsub(vs2, vs1, vl); } -vuint64m2_t test_vsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vuint64m2_t test_vsub_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub(vs2, rs1, vl); } -vuint64m4_t test_vsub_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vuint64m4_t test_vsub_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsub(vs2, vs1, vl); } -vuint64m4_t test_vsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vuint64m4_t test_vsub_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub(vs2, rs1, vl); } -vuint64m8_t test_vsub_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vuint64m8_t test_vsub_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsub(vs2, vs1, vl); } -vuint64m8_t test_vsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub(op1, op2, vl); +vuint64m8_t test_vsub_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub(vs2, rs1, vl); } -vint8mf8_t test_vsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vint8mf8_t test_vsub_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vsub(vm, vs2, vs1, vl); } -vint8mf8_t test_vsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vint8mf8_t test_vsub_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub(vm, vs2, rs1, vl); } -vint8mf4_t test_vsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vint8mf4_t test_vsub_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vsub(vm, vs2, vs1, vl); } -vint8mf4_t test_vsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vint8mf4_t test_vsub_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub(vm, vs2, rs1, vl); } -vint8mf2_t test_vsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vint8mf2_t test_vsub_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vsub(vm, vs2, vs1, vl); } -vint8mf2_t test_vsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vint8mf2_t test_vsub_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub(vm, vs2, rs1, vl); } -vint8m1_t test_vsub_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vint8m1_t test_vsub_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vsub(vm, vs2, vs1, vl); } -vint8m1_t test_vsub_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vint8m1_t test_vsub_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub(vm, vs2, rs1, vl); } -vint8m2_t test_vsub_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vint8m2_t test_vsub_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vsub(vm, vs2, vs1, vl); } -vint8m2_t test_vsub_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vint8m2_t test_vsub_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub(vm, vs2, rs1, vl); } -vint8m4_t test_vsub_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vint8m4_t test_vsub_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vsub(vm, vs2, vs1, vl); } -vint8m4_t test_vsub_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vint8m4_t test_vsub_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub(vm, vs2, rs1, vl); } -vint8m8_t test_vsub_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vint8m8_t test_vsub_vv_i8m8_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vsub(vm, vs2, vs1, vl); } -vint8m8_t test_vsub_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vint8m8_t test_vsub_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub(vm, vs2, rs1, vl); } -vint16mf4_t test_vsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vint16mf4_t test_vsub_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vsub(vm, vs2, vs1, vl); } -vint16mf4_t test_vsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vint16mf4_t test_vsub_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub(vm, vs2, rs1, vl); } -vint16mf2_t test_vsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vint16mf2_t test_vsub_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vsub(vm, vs2, vs1, vl); } -vint16mf2_t test_vsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vint16mf2_t test_vsub_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub(vm, vs2, rs1, vl); } -vint16m1_t test_vsub_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vint16m1_t test_vsub_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vsub(vm, vs2, vs1, vl); } -vint16m1_t test_vsub_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vint16m1_t test_vsub_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub(vm, vs2, rs1, vl); } -vint16m2_t test_vsub_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vint16m2_t test_vsub_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vsub(vm, vs2, vs1, vl); } -vint16m2_t test_vsub_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vint16m2_t test_vsub_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub(vm, vs2, rs1, vl); } -vint16m4_t test_vsub_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vint16m4_t test_vsub_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vsub(vm, vs2, vs1, vl); } -vint16m4_t test_vsub_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vint16m4_t test_vsub_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub(vm, vs2, rs1, vl); } -vint16m8_t test_vsub_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vint16m8_t test_vsub_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vsub(vm, vs2, vs1, vl); } -vint16m8_t test_vsub_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vint16m8_t test_vsub_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub(vm, vs2, rs1, vl); } -vint32mf2_t test_vsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vint32mf2_t test_vsub_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vsub(vm, vs2, vs1, vl); } -vint32mf2_t test_vsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vint32mf2_t test_vsub_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub(vm, vs2, rs1, vl); } -vint32m1_t test_vsub_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vint32m1_t test_vsub_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vsub(vm, vs2, vs1, vl); } -vint32m1_t test_vsub_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vint32m1_t test_vsub_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub(vm, vs2, rs1, vl); } -vint32m2_t test_vsub_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vint32m2_t test_vsub_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vsub(vm, vs2, vs1, vl); } -vint32m2_t test_vsub_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vint32m2_t test_vsub_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub(vm, vs2, rs1, vl); } -vint32m4_t test_vsub_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vint32m4_t test_vsub_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vsub(vm, vs2, vs1, vl); } -vint32m4_t test_vsub_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vint32m4_t test_vsub_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub(vm, vs2, rs1, vl); } -vint32m8_t test_vsub_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vint32m8_t test_vsub_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vsub(vm, vs2, vs1, vl); } -vint32m8_t test_vsub_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vint32m8_t test_vsub_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub(vm, vs2, rs1, vl); } -vint64m1_t test_vsub_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vint64m1_t test_vsub_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vsub(vm, vs2, vs1, vl); } -vint64m1_t test_vsub_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vint64m1_t test_vsub_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub(vm, vs2, rs1, vl); } -vint64m2_t test_vsub_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vint64m2_t test_vsub_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vsub(vm, vs2, vs1, vl); } -vint64m2_t test_vsub_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vint64m2_t test_vsub_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub(vm, vs2, rs1, vl); } -vint64m4_t test_vsub_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vint64m4_t test_vsub_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vsub(vm, vs2, vs1, vl); } -vint64m4_t test_vsub_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vint64m4_t test_vsub_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub(vm, vs2, rs1, vl); } -vint64m8_t test_vsub_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vint64m8_t test_vsub_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vsub(vm, vs2, vs1, vl); } -vint64m8_t test_vsub_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vint64m8_t test_vsub_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub(vm, vs2, rs1, vl); } -vuint8mf8_t test_vsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vuint8mf8_t test_vsub_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsub(vm, vs2, vs1, vl); } -vuint8mf8_t test_vsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vuint8mf8_t test_vsub_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub(vm, vs2, rs1, vl); } -vuint8mf4_t test_vsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vuint8mf4_t test_vsub_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsub(vm, vs2, vs1, vl); } -vuint8mf4_t test_vsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vuint8mf4_t test_vsub_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub(vm, vs2, rs1, vl); } -vuint8mf2_t test_vsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vuint8mf2_t test_vsub_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsub(vm, vs2, vs1, vl); } -vuint8mf2_t test_vsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vuint8mf2_t test_vsub_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub(vm, vs2, rs1, vl); } -vuint8m1_t test_vsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vuint8m1_t test_vsub_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsub(vm, vs2, vs1, vl); } -vuint8m1_t test_vsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vuint8m1_t test_vsub_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub(vm, vs2, rs1, vl); } -vuint8m2_t test_vsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vuint8m2_t test_vsub_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsub(vm, vs2, vs1, vl); } -vuint8m2_t test_vsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vuint8m2_t test_vsub_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub(vm, vs2, rs1, vl); } -vuint8m4_t test_vsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vuint8m4_t test_vsub_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsub(vm, vs2, vs1, vl); } -vuint8m4_t test_vsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vuint8m4_t test_vsub_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub(vm, vs2, rs1, vl); } -vuint8m8_t test_vsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vuint8m8_t test_vsub_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsub(vm, vs2, vs1, vl); } -vuint8m8_t test_vsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vuint8m8_t test_vsub_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub(vm, vs2, rs1, vl); } -vuint16mf4_t test_vsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vuint16mf4_t test_vsub_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsub(vm, vs2, vs1, vl); } -vuint16mf4_t test_vsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vuint16mf4_t test_vsub_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub(vm, vs2, rs1, vl); } -vuint16mf2_t test_vsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vuint16mf2_t test_vsub_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsub(vm, vs2, vs1, vl); } -vuint16mf2_t test_vsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vuint16mf2_t test_vsub_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub(vm, vs2, rs1, vl); } -vuint16m1_t test_vsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vuint16m1_t test_vsub_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsub(vm, vs2, vs1, vl); } -vuint16m1_t test_vsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vuint16m1_t test_vsub_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub(vm, vs2, rs1, vl); } -vuint16m2_t test_vsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vuint16m2_t test_vsub_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsub(vm, vs2, vs1, vl); } -vuint16m2_t test_vsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vuint16m2_t test_vsub_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub(vm, vs2, rs1, vl); } -vuint16m4_t test_vsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vuint16m4_t test_vsub_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsub(vm, vs2, vs1, vl); } -vuint16m4_t test_vsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vuint16m4_t test_vsub_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub(vm, vs2, rs1, vl); } -vuint16m8_t test_vsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vuint16m8_t test_vsub_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsub(vm, vs2, vs1, vl); } -vuint16m8_t test_vsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vuint16m8_t test_vsub_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub(vm, vs2, rs1, vl); } -vuint32mf2_t test_vsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vuint32mf2_t test_vsub_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsub(vm, vs2, vs1, vl); } -vuint32mf2_t test_vsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vuint32mf2_t test_vsub_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub(vm, vs2, rs1, vl); } -vuint32m1_t test_vsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vuint32m1_t test_vsub_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsub(vm, vs2, vs1, vl); } -vuint32m1_t test_vsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vuint32m1_t test_vsub_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub(vm, vs2, rs1, vl); } -vuint32m2_t test_vsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vuint32m2_t test_vsub_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsub(vm, vs2, vs1, vl); } -vuint32m2_t test_vsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vuint32m2_t test_vsub_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub(vm, vs2, rs1, vl); } -vuint32m4_t test_vsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vuint32m4_t test_vsub_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsub(vm, vs2, vs1, vl); } -vuint32m4_t test_vsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vuint32m4_t test_vsub_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub(vm, vs2, rs1, vl); } -vuint32m8_t test_vsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vuint32m8_t test_vsub_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsub(vm, vs2, vs1, vl); } -vuint32m8_t test_vsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vuint32m8_t test_vsub_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub(vm, vs2, rs1, vl); } -vuint64m1_t test_vsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vuint64m1_t test_vsub_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsub(vm, vs2, vs1, vl); } -vuint64m1_t test_vsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vuint64m1_t test_vsub_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub(vm, vs2, rs1, vl); } -vuint64m2_t test_vsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vuint64m2_t test_vsub_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsub(vm, vs2, vs1, vl); } -vuint64m2_t test_vsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vuint64m2_t test_vsub_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub(vm, vs2, rs1, vl); } -vuint64m4_t test_vsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vuint64m4_t test_vsub_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsub(vm, vs2, vs1, vl); } -vuint64m4_t test_vsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vuint64m4_t test_vsub_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub(vm, vs2, rs1, vl); } -vuint64m8_t test_vsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vuint64m8_t test_vsub_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsub(vm, vs2, vs1, vl); } -vuint64m8_t test_vsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub(mask, op1, op2, vl); +vuint64m8_t test_vsub_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vsub\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsuxei16.c b/auto-generated/gnu-overloaded-tests/vsuxei16.c index cab658e56..0d32d7d25 100644 --- a/auto-generated/gnu-overloaded-tests/vsuxei16.c +++ b/auto-generated/gnu-overloaded-tests/vsuxei16.c @@ -6,460 +6,460 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxei16_v_f16mf4(float16_t *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_f16mf4(float16_t *rs1, vuint16mf4_t rs2, vfloat16mf4_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f16mf2(float16_t *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_f16mf2(float16_t *rs1, vuint16mf2_t rs2, vfloat16mf2_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f16m1(float16_t *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_f16m1(float16_t *rs1, vuint16m1_t rs2, vfloat16m1_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f16m2(float16_t *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_f16m2(float16_t *rs1, vuint16m2_t rs2, vfloat16m2_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f16m4(float16_t *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_f16m4(float16_t *rs1, vuint16m4_t rs2, vfloat16m4_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f16m8(float16_t *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_f16m8(float16_t *rs1, vuint16m8_t rs2, vfloat16m8_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f32mf2(float32_t *base, vuint16mf4_t bindex, vfloat32mf2_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_f32mf2(float32_t *rs1, vuint16mf4_t rs2, vfloat32mf2_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f32m1(float32_t *base, vuint16mf2_t bindex, vfloat32m1_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_f32m1(float32_t *rs1, vuint16mf2_t rs2, vfloat32m1_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f32m2(float32_t *base, vuint16m1_t bindex, vfloat32m2_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_f32m2(float32_t *rs1, vuint16m1_t rs2, vfloat32m2_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f32m4(float32_t *base, vuint16m2_t bindex, vfloat32m4_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_f32m4(float32_t *rs1, vuint16m2_t rs2, vfloat32m4_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f32m8(float32_t *base, vuint16m4_t bindex, vfloat32m8_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_f32m8(float32_t *rs1, vuint16m4_t rs2, vfloat32m8_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f64m1(float64_t *base, vuint16mf4_t bindex, vfloat64m1_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_f64m1(float64_t *rs1, vuint16mf4_t rs2, vfloat64m1_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f64m2(float64_t *base, vuint16mf2_t bindex, vfloat64m2_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_f64m2(float64_t *rs1, vuint16mf2_t rs2, vfloat64m2_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f64m4(float64_t *base, vuint16m1_t bindex, vfloat64m4_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_f64m4(float64_t *rs1, vuint16m1_t rs2, vfloat64m4_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f64m8(float64_t *base, vuint16m2_t bindex, vfloat64m8_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_f64m8(float64_t *rs1, vuint16m2_t rs2, vfloat64m8_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_i8mf8(int8_t *rs1, vuint16mf4_t rs2, vint8mf8_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_i8mf4(int8_t *rs1, vuint16mf2_t rs2, vint8mf4_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_i8mf2(int8_t *rs1, vuint16m1_t rs2, vint8mf2_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_i8m1(int8_t *rs1, vuint16m2_t rs2, vint8m1_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_i8m2(int8_t *rs1, vuint16m4_t rs2, vint8m2_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i8m4(int8_t *base, vuint16m8_t bindex, vint8m4_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_i8m4(int8_t *rs1, vuint16m8_t rs2, vint8m4_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_i16mf4(int16_t *rs1, vuint16mf4_t rs2, vint16mf4_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_i16mf2(int16_t *rs1, vuint16mf2_t rs2, vint16mf2_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_i16m1(int16_t *rs1, vuint16m1_t rs2, vint16m1_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_i16m2(int16_t *rs1, vuint16m2_t rs2, vint16m2_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i16m4(int16_t *base, vuint16m4_t bindex, vint16m4_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_i16m4(int16_t *rs1, vuint16m4_t rs2, vint16m4_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i16m8(int16_t *base, vuint16m8_t bindex, vint16m8_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_i16m8(int16_t *rs1, vuint16m8_t rs2, vint16m8_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_i32mf2(int32_t *rs1, vuint16mf4_t rs2, vint32mf2_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_i32m1(int32_t *rs1, vuint16mf2_t rs2, vint32m1_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_i32m2(int32_t *rs1, vuint16m1_t rs2, vint32m2_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i32m4(int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_i32m4(int32_t *rs1, vuint16m2_t rs2, vint32m4_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i32m8(int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_i32m8(int32_t *rs1, vuint16m4_t rs2, vint32m8_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_i64m1(int64_t *rs1, vuint16mf4_t rs2, vint64m1_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_i64m2(int64_t *rs1, vuint16mf2_t rs2, vint64m2_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i64m4(int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_i64m4(int64_t *rs1, vuint16m1_t rs2, vint64m4_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i64m8(int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_i64m8(int64_t *rs1, vuint16m2_t rs2, vint64m8_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_u8mf8(uint8_t *rs1, vuint16mf4_t rs2, vuint8mf8_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_u8mf4(uint8_t *rs1, vuint16mf2_t rs2, vuint8mf4_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_u8mf2(uint8_t *rs1, vuint16m1_t rs2, vuint8mf2_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_u8m1(uint8_t *rs1, vuint16m2_t rs2, vuint8m1_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_u8m2(uint8_t *rs1, vuint16m4_t rs2, vuint8m2_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u8m4(uint8_t *base, vuint16m8_t bindex, vuint8m4_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_u8m4(uint8_t *rs1, vuint16m8_t rs2, vuint8m4_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_u16mf4(uint16_t *rs1, vuint16mf4_t rs2, vuint16mf4_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_u16mf2(uint16_t *rs1, vuint16mf2_t rs2, vuint16mf2_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_u16m1(uint16_t *rs1, vuint16m1_t rs2, vuint16m1_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_u16m2(uint16_t *rs1, vuint16m2_t rs2, vuint16m2_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u16m4(uint16_t *base, vuint16m4_t bindex, vuint16m4_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_u16m4(uint16_t *rs1, vuint16m4_t rs2, vuint16m4_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u16m8(uint16_t *base, vuint16m8_t bindex, vuint16m8_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_u16m8(uint16_t *rs1, vuint16m8_t rs2, vuint16m8_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_u32mf2(uint32_t *rs1, vuint16mf4_t rs2, vuint32mf2_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_u32m1(uint32_t *rs1, vuint16mf2_t rs2, vuint32m1_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_u32m2(uint32_t *rs1, vuint16m1_t rs2, vuint32m2_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u32m4(uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_u32m4(uint32_t *rs1, vuint16m2_t rs2, vuint32m4_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u32m8(uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_u32m8(uint32_t *rs1, vuint16m4_t rs2, vuint32m8_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_u64m1(uint64_t *rs1, vuint16mf4_t rs2, vuint64m1_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_u64m2(uint64_t *rs1, vuint16mf2_t rs2, vuint64m2_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u64m4(uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_u64m4(uint64_t *rs1, vuint16m1_t rs2, vuint64m4_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u64m8(uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { - return __riscv_vsuxei16(base, bindex, value, vl); +void test_vsuxei16_v_u64m8(uint64_t *rs1, vuint16m2_t rs2, vuint64m8_t vs3, size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f16mf4_m(vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_f16mf4_m(vbool64_t vm, float16_t *rs1, vuint16mf4_t rs2, vfloat16mf4_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f16mf2_m(vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_f16mf2_m(vbool32_t vm, float16_t *rs1, vuint16mf2_t rs2, vfloat16mf2_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f16m1_m(vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_f16m1_m(vbool16_t vm, float16_t *rs1, vuint16m1_t rs2, vfloat16m1_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f16m2_m(vbool8_t mask, float16_t *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_f16m2_m(vbool8_t vm, float16_t *rs1, vuint16m2_t rs2, vfloat16m2_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f16m4_m(vbool4_t mask, float16_t *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_f16m4_m(vbool4_t vm, float16_t *rs1, vuint16m4_t rs2, vfloat16m4_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f16m8_m(vbool2_t mask, float16_t *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_f16m8_m(vbool2_t vm, float16_t *rs1, vuint16m8_t rs2, vfloat16m8_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f32mf2_m(vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_f32mf2_m(vbool64_t vm, float32_t *rs1, vuint16mf4_t rs2, vfloat32mf2_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f32m1_m(vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_f32m1_m(vbool32_t vm, float32_t *rs1, vuint16mf2_t rs2, vfloat32m1_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f32m2_m(vbool16_t mask, float32_t *base, vuint16m1_t bindex, vfloat32m2_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_f32m2_m(vbool16_t vm, float32_t *rs1, vuint16m1_t rs2, vfloat32m2_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f32m4_m(vbool8_t mask, float32_t *base, vuint16m2_t bindex, vfloat32m4_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_f32m4_m(vbool8_t vm, float32_t *rs1, vuint16m2_t rs2, vfloat32m4_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f32m8_m(vbool4_t mask, float32_t *base, vuint16m4_t bindex, vfloat32m8_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_f32m8_m(vbool4_t vm, float32_t *rs1, vuint16m4_t rs2, vfloat32m8_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f64m1_m(vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_f64m1_m(vbool64_t vm, float64_t *rs1, vuint16mf4_t rs2, vfloat64m1_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f64m2_m(vbool32_t mask, float64_t *base, vuint16mf2_t bindex, vfloat64m2_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_f64m2_m(vbool32_t vm, float64_t *rs1, vuint16mf2_t rs2, vfloat64m2_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f64m4_m(vbool16_t mask, float64_t *base, vuint16m1_t bindex, vfloat64m4_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_f64m4_m(vbool16_t vm, float64_t *rs1, vuint16m1_t rs2, vfloat64m4_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_f64m8_m(vbool8_t mask, float64_t *base, vuint16m2_t bindex, vfloat64m8_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_f64m8_m(vbool8_t vm, float64_t *rs1, vuint16m2_t rs2, vfloat64m8_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_i8mf8_m(vbool64_t vm, int8_t *rs1, vuint16mf4_t rs2, vint8mf8_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_i8mf4_m(vbool32_t vm, int8_t *rs1, vuint16mf2_t rs2, vint8mf4_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_i8mf2_m(vbool16_t vm, int8_t *rs1, vuint16m1_t rs2, vint8mf2_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_i8m1_m(vbool8_t vm, int8_t *rs1, vuint16m2_t rs2, vint8m1_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_i8m2_m(vbool4_t vm, int8_t *rs1, vuint16m4_t rs2, vint8m2_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i8m4_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_i8m4_m(vbool2_t vm, int8_t *rs1, vuint16m8_t rs2, vint8m4_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_i16mf4_m(vbool64_t vm, int16_t *rs1, vuint16mf4_t rs2, vint16mf4_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_i16mf2_m(vbool32_t vm, int16_t *rs1, vuint16mf2_t rs2, vint16mf2_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_i16m1_m(vbool16_t vm, int16_t *rs1, vuint16m1_t rs2, vint16m1_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_i16m2_m(vbool8_t vm, int16_t *rs1, vuint16m2_t rs2, vint16m2_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i16m4_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_i16m4_m(vbool4_t vm, int16_t *rs1, vuint16m4_t rs2, vint16m4_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i16m8_m(vbool2_t mask, int16_t *base, vuint16m8_t bindex, vint16m8_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_i16m8_m(vbool2_t vm, int16_t *rs1, vuint16m8_t rs2, vint16m8_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_i32mf2_m(vbool64_t vm, int32_t *rs1, vuint16mf4_t rs2, vint32mf2_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_i32m1_m(vbool32_t vm, int32_t *rs1, vuint16mf2_t rs2, vint32m1_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_i32m2_m(vbool16_t vm, int32_t *rs1, vuint16m1_t rs2, vint32m2_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i32m4_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_i32m4_m(vbool8_t vm, int32_t *rs1, vuint16m2_t rs2, vint32m4_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i32m8_m(vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_i32m8_m(vbool4_t vm, int32_t *rs1, vuint16m4_t rs2, vint32m8_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_i64m1_m(vbool64_t vm, int64_t *rs1, vuint16mf4_t rs2, vint64m1_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_i64m2_m(vbool32_t vm, int64_t *rs1, vuint16mf2_t rs2, vint64m2_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i64m4_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_i64m4_m(vbool16_t vm, int64_t *rs1, vuint16m1_t rs2, vint64m4_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_i64m8_m(vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_i64m8_m(vbool8_t vm, int64_t *rs1, vuint16m2_t rs2, vint64m8_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_u8mf8_m(vbool64_t vm, uint8_t *rs1, vuint16mf4_t rs2, vuint8mf8_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_u8mf4_m(vbool32_t vm, uint8_t *rs1, vuint16mf2_t rs2, vuint8mf4_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_u8mf2_m(vbool16_t vm, uint8_t *rs1, vuint16m1_t rs2, vuint8mf2_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_u8m1_m(vbool8_t vm, uint8_t *rs1, vuint16m2_t rs2, vuint8m1_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_u8m2_m(vbool4_t vm, uint8_t *rs1, vuint16m4_t rs2, vuint8m2_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_u8m4_m(vbool2_t vm, uint8_t *rs1, vuint16m8_t rs2, vuint8m4_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_u16mf4_m(vbool64_t vm, uint16_t *rs1, vuint16mf4_t rs2, vuint16mf4_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_u16mf2_m(vbool32_t vm, uint16_t *rs1, vuint16mf2_t rs2, vuint16mf2_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_u16m1_m(vbool16_t vm, uint16_t *rs1, vuint16m1_t rs2, vuint16m1_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_u16m2_m(vbool8_t vm, uint16_t *rs1, vuint16m2_t rs2, vuint16m2_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_u16m4_m(vbool4_t vm, uint16_t *rs1, vuint16m4_t rs2, vuint16m4_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint16m8_t bindex, vuint16m8_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_u16m8_m(vbool2_t vm, uint16_t *rs1, vuint16m8_t rs2, vuint16m8_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_u32mf2_m(vbool64_t vm, uint32_t *rs1, vuint16mf4_t rs2, vuint32mf2_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_u32m1_m(vbool32_t vm, uint32_t *rs1, vuint16mf2_t rs2, vuint32m1_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_u32m2_m(vbool16_t vm, uint32_t *rs1, vuint16m1_t rs2, vuint32m2_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_u32m4_m(vbool8_t vm, uint32_t *rs1, vuint16m2_t rs2, vuint32m4_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_u32m8_m(vbool4_t vm, uint32_t *rs1, vuint16m4_t rs2, vuint32m8_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_u64m1_m(vbool64_t vm, uint64_t *rs1, vuint16mf4_t rs2, vuint64m1_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_u64m2_m(vbool32_t vm, uint64_t *rs1, vuint16mf2_t rs2, vuint64m2_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_u64m4_m(vbool16_t vm, uint64_t *rs1, vuint16m1_t rs2, vuint64m4_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } -void test_vsuxei16_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { - return __riscv_vsuxei16(mask, base, bindex, value, vl); +void test_vsuxei16_v_u64m8_m(vbool8_t vm, uint64_t *rs1, vuint16m2_t rs2, vuint64m8_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxei16\.[ivxfswum.]+\s+} 114 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsuxei32.c b/auto-generated/gnu-overloaded-tests/vsuxei32.c index 9ce609819..5bbff39a8 100644 --- a/auto-generated/gnu-overloaded-tests/vsuxei32.c +++ b/auto-generated/gnu-overloaded-tests/vsuxei32.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxei32_v_f16mf4(float16_t *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_f16mf4(float16_t *rs1, vuint32mf2_t rs2, vfloat16mf4_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f16mf2(float16_t *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_f16mf2(float16_t *rs1, vuint32m1_t rs2, vfloat16mf2_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f16m1(float16_t *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_f16m1(float16_t *rs1, vuint32m2_t rs2, vfloat16m1_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f16m2(float16_t *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_f16m2(float16_t *rs1, vuint32m4_t rs2, vfloat16m2_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f16m4(float16_t *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_f16m4(float16_t *rs1, vuint32m8_t rs2, vfloat16m4_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f32mf2(float32_t *base, vuint32mf2_t bindex, vfloat32mf2_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_f32mf2(float32_t *rs1, vuint32mf2_t rs2, vfloat32mf2_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f32m1(float32_t *base, vuint32m1_t bindex, vfloat32m1_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_f32m1(float32_t *rs1, vuint32m1_t rs2, vfloat32m1_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f32m2(float32_t *base, vuint32m2_t bindex, vfloat32m2_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_f32m2(float32_t *rs1, vuint32m2_t rs2, vfloat32m2_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f32m4(float32_t *base, vuint32m4_t bindex, vfloat32m4_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_f32m4(float32_t *rs1, vuint32m4_t rs2, vfloat32m4_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f32m8(float32_t *base, vuint32m8_t bindex, vfloat32m8_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_f32m8(float32_t *rs1, vuint32m8_t rs2, vfloat32m8_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f64m1(float64_t *base, vuint32mf2_t bindex, vfloat64m1_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_f64m1(float64_t *rs1, vuint32mf2_t rs2, vfloat64m1_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f64m2(float64_t *base, vuint32m1_t bindex, vfloat64m2_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_f64m2(float64_t *rs1, vuint32m1_t rs2, vfloat64m2_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f64m4(float64_t *base, vuint32m2_t bindex, vfloat64m4_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_f64m4(float64_t *rs1, vuint32m2_t rs2, vfloat64m4_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f64m8(float64_t *base, vuint32m4_t bindex, vfloat64m8_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_f64m8(float64_t *rs1, vuint32m4_t rs2, vfloat64m8_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_i8mf8(int8_t *rs1, vuint32mf2_t rs2, vint8mf8_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_i8mf4(int8_t *rs1, vuint32m1_t rs2, vint8mf4_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_i8mf2(int8_t *rs1, vuint32m2_t rs2, vint8mf2_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_i8m1(int8_t *rs1, vuint32m4_t rs2, vint8m1_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_i8m2(int8_t *rs1, vuint32m8_t rs2, vint8m2_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_i16mf4(int16_t *rs1, vuint32mf2_t rs2, vint16mf4_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_i16mf2(int16_t *rs1, vuint32m1_t rs2, vint16mf2_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_i16m1(int16_t *rs1, vuint32m2_t rs2, vint16m1_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_i16m2(int16_t *rs1, vuint32m4_t rs2, vint16m2_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i16m4(int16_t *base, vuint32m8_t bindex, vint16m4_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_i16m4(int16_t *rs1, vuint32m8_t rs2, vint16m4_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_i32mf2(int32_t *rs1, vuint32mf2_t rs2, vint32mf2_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_i32m1(int32_t *rs1, vuint32m1_t rs2, vint32m1_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_i32m2(int32_t *rs1, vuint32m2_t rs2, vint32m2_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i32m4(int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_i32m4(int32_t *rs1, vuint32m4_t rs2, vint32m4_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i32m8(int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_i32m8(int32_t *rs1, vuint32m8_t rs2, vint32m8_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_i64m1(int64_t *rs1, vuint32mf2_t rs2, vint64m1_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_i64m2(int64_t *rs1, vuint32m1_t rs2, vint64m2_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i64m4(int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_i64m4(int64_t *rs1, vuint32m2_t rs2, vint64m4_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i64m8(int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_i64m8(int64_t *rs1, vuint32m4_t rs2, vint64m8_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_u8mf8(uint8_t *rs1, vuint32mf2_t rs2, vuint8mf8_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_u8mf4(uint8_t *rs1, vuint32m1_t rs2, vuint8mf4_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_u8mf2(uint8_t *rs1, vuint32m2_t rs2, vuint8mf2_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_u8m1(uint8_t *rs1, vuint32m4_t rs2, vuint8m1_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_u8m2(uint8_t *rs1, vuint32m8_t rs2, vuint8m2_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_u16mf4(uint16_t *rs1, vuint32mf2_t rs2, vuint16mf4_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_u16mf2(uint16_t *rs1, vuint32m1_t rs2, vuint16mf2_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_u16m1(uint16_t *rs1, vuint32m2_t rs2, vuint16m1_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_u16m2(uint16_t *rs1, vuint32m4_t rs2, vuint16m2_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u16m4(uint16_t *base, vuint32m8_t bindex, vuint16m4_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_u16m4(uint16_t *rs1, vuint32m8_t rs2, vuint16m4_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_u32mf2(uint32_t *rs1, vuint32mf2_t rs2, vuint32mf2_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_u32m1(uint32_t *rs1, vuint32m1_t rs2, vuint32m1_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_u32m2(uint32_t *rs1, vuint32m2_t rs2, vuint32m2_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u32m4(uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_u32m4(uint32_t *rs1, vuint32m4_t rs2, vuint32m4_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u32m8(uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_u32m8(uint32_t *rs1, vuint32m8_t rs2, vuint32m8_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_u64m1(uint64_t *rs1, vuint32mf2_t rs2, vuint64m1_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_u64m2(uint64_t *rs1, vuint32m1_t rs2, vuint64m2_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u64m4(uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_u64m4(uint64_t *rs1, vuint32m2_t rs2, vuint64m4_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u64m8(uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { - return __riscv_vsuxei32(base, bindex, value, vl); +void test_vsuxei32_v_u64m8(uint64_t *rs1, vuint32m4_t rs2, vuint64m8_t vs3, size_t vl) { + return __riscv_vsuxei32(rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f16mf4_m(vbool64_t mask, float16_t *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_f16mf4_m(vbool64_t vm, float16_t *rs1, vuint32mf2_t rs2, vfloat16mf4_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f16mf2_m(vbool32_t mask, float16_t *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_f16mf2_m(vbool32_t vm, float16_t *rs1, vuint32m1_t rs2, vfloat16mf2_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f16m1_m(vbool16_t mask, float16_t *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_f16m1_m(vbool16_t vm, float16_t *rs1, vuint32m2_t rs2, vfloat16m1_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f16m2_m(vbool8_t mask, float16_t *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_f16m2_m(vbool8_t vm, float16_t *rs1, vuint32m4_t rs2, vfloat16m2_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f16m4_m(vbool4_t mask, float16_t *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_f16m4_m(vbool4_t vm, float16_t *rs1, vuint32m8_t rs2, vfloat16m4_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f32mf2_m(vbool64_t mask, float32_t *base, vuint32mf2_t bindex, vfloat32mf2_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_f32mf2_m(vbool64_t vm, float32_t *rs1, vuint32mf2_t rs2, vfloat32mf2_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f32m1_m(vbool32_t mask, float32_t *base, vuint32m1_t bindex, vfloat32m1_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_f32m1_m(vbool32_t vm, float32_t *rs1, vuint32m1_t rs2, vfloat32m1_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f32m2_m(vbool16_t mask, float32_t *base, vuint32m2_t bindex, vfloat32m2_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_f32m2_m(vbool16_t vm, float32_t *rs1, vuint32m2_t rs2, vfloat32m2_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f32m4_m(vbool8_t mask, float32_t *base, vuint32m4_t bindex, vfloat32m4_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_f32m4_m(vbool8_t vm, float32_t *rs1, vuint32m4_t rs2, vfloat32m4_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f32m8_m(vbool4_t mask, float32_t *base, vuint32m8_t bindex, vfloat32m8_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_f32m8_m(vbool4_t vm, float32_t *rs1, vuint32m8_t rs2, vfloat32m8_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f64m1_m(vbool64_t mask, float64_t *base, vuint32mf2_t bindex, vfloat64m1_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_f64m1_m(vbool64_t vm, float64_t *rs1, vuint32mf2_t rs2, vfloat64m1_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f64m2_m(vbool32_t mask, float64_t *base, vuint32m1_t bindex, vfloat64m2_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_f64m2_m(vbool32_t vm, float64_t *rs1, vuint32m1_t rs2, vfloat64m2_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f64m4_m(vbool16_t mask, float64_t *base, vuint32m2_t bindex, vfloat64m4_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_f64m4_m(vbool16_t vm, float64_t *rs1, vuint32m2_t rs2, vfloat64m4_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_f64m8_m(vbool8_t mask, float64_t *base, vuint32m4_t bindex, vfloat64m8_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_f64m8_m(vbool8_t vm, float64_t *rs1, vuint32m4_t rs2, vfloat64m8_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_i8mf8_m(vbool64_t vm, int8_t *rs1, vuint32mf2_t rs2, vint8mf8_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_i8mf4_m(vbool32_t vm, int8_t *rs1, vuint32m1_t rs2, vint8mf4_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_i8mf2_m(vbool16_t vm, int8_t *rs1, vuint32m2_t rs2, vint8mf2_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_i8m1_m(vbool8_t vm, int8_t *rs1, vuint32m4_t rs2, vint8m1_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_i8m2_m(vbool4_t vm, int8_t *rs1, vuint32m8_t rs2, vint8m2_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_i16mf4_m(vbool64_t vm, int16_t *rs1, vuint32mf2_t rs2, vint16mf4_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_i16mf2_m(vbool32_t vm, int16_t *rs1, vuint32m1_t rs2, vint16mf2_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_i16m1_m(vbool16_t vm, int16_t *rs1, vuint32m2_t rs2, vint16m1_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_i16m2_m(vbool8_t vm, int16_t *rs1, vuint32m4_t rs2, vint16m2_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i16m4_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_i16m4_m(vbool4_t vm, int16_t *rs1, vuint32m8_t rs2, vint16m4_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_i32mf2_m(vbool64_t vm, int32_t *rs1, vuint32mf2_t rs2, vint32mf2_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_i32m1_m(vbool32_t vm, int32_t *rs1, vuint32m1_t rs2, vint32m1_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_i32m2_m(vbool16_t vm, int32_t *rs1, vuint32m2_t rs2, vint32m2_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i32m4_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_i32m4_m(vbool8_t vm, int32_t *rs1, vuint32m4_t rs2, vint32m4_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i32m8_m(vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_i32m8_m(vbool4_t vm, int32_t *rs1, vuint32m8_t rs2, vint32m8_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_i64m1_m(vbool64_t vm, int64_t *rs1, vuint32mf2_t rs2, vint64m1_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_i64m2_m(vbool32_t vm, int64_t *rs1, vuint32m1_t rs2, vint64m2_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i64m4_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_i64m4_m(vbool16_t vm, int64_t *rs1, vuint32m2_t rs2, vint64m4_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_i64m8_m(vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_i64m8_m(vbool8_t vm, int64_t *rs1, vuint32m4_t rs2, vint64m8_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_u8mf8_m(vbool64_t vm, uint8_t *rs1, vuint32mf2_t rs2, vuint8mf8_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_u8mf4_m(vbool32_t vm, uint8_t *rs1, vuint32m1_t rs2, vuint8mf4_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_u8mf2_m(vbool16_t vm, uint8_t *rs1, vuint32m2_t rs2, vuint8mf2_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_u8m1_m(vbool8_t vm, uint8_t *rs1, vuint32m4_t rs2, vuint8m1_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_u8m2_m(vbool4_t vm, uint8_t *rs1, vuint32m8_t rs2, vuint8m2_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_u16mf4_m(vbool64_t vm, uint16_t *rs1, vuint32mf2_t rs2, vuint16mf4_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_u16mf2_m(vbool32_t vm, uint16_t *rs1, vuint32m1_t rs2, vuint16mf2_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_u16m1_m(vbool16_t vm, uint16_t *rs1, vuint32m2_t rs2, vuint16m1_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_u16m2_m(vbool8_t vm, uint16_t *rs1, vuint32m4_t rs2, vuint16m2_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_u16m4_m(vbool4_t vm, uint16_t *rs1, vuint32m8_t rs2, vuint16m4_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_u32mf2_m(vbool64_t vm, uint32_t *rs1, vuint32mf2_t rs2, vuint32mf2_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_u32m1_m(vbool32_t vm, uint32_t *rs1, vuint32m1_t rs2, vuint32m1_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_u32m2_m(vbool16_t vm, uint32_t *rs1, vuint32m2_t rs2, vuint32m2_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_u32m4_m(vbool8_t vm, uint32_t *rs1, vuint32m4_t rs2, vuint32m4_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_u32m8_m(vbool4_t vm, uint32_t *rs1, vuint32m8_t rs2, vuint32m8_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_u64m1_m(vbool64_t vm, uint64_t *rs1, vuint32mf2_t rs2, vuint64m1_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_u64m2_m(vbool32_t vm, uint64_t *rs1, vuint32m1_t rs2, vuint64m2_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_u64m4_m(vbool16_t vm, uint64_t *rs1, vuint32m2_t rs2, vuint64m4_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } -void test_vsuxei32_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { - return __riscv_vsuxei32(mask, base, bindex, value, vl); +void test_vsuxei32_v_u64m8_m(vbool8_t vm, uint64_t *rs1, vuint32m4_t rs2, vuint64m8_t vs3, size_t vl) { + return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxei32\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsuxei64.c b/auto-generated/gnu-overloaded-tests/vsuxei64.c index 0d9631ee4..ae7abaca9 100644 --- a/auto-generated/gnu-overloaded-tests/vsuxei64.c +++ b/auto-generated/gnu-overloaded-tests/vsuxei64.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxei64_v_f16mf4(float16_t *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) { - return __riscv_vsuxei64(base, bindex, value, vl); +void test_vsuxei64_v_f16mf4(float16_t *rs1, vuint64m1_t rs2, vfloat16mf4_t vs3, size_t vl) { + return __riscv_vsuxei64(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_f16mf2(float16_t *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) { - return __riscv_vsuxei64(base, bindex, value, vl); +void test_vsuxei64_v_f16mf2(float16_t *rs1, vuint64m2_t rs2, vfloat16mf2_t vs3, size_t vl) { + return __riscv_vsuxei64(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_f16m1(float16_t *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) { - return __riscv_vsuxei64(base, bindex, value, vl); +void test_vsuxei64_v_f16m1(float16_t *rs1, vuint64m4_t rs2, vfloat16m1_t vs3, size_t vl) { + return __riscv_vsuxei64(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_f16m2(float16_t *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) { - return __riscv_vsuxei64(base, bindex, value, vl); +void test_vsuxei64_v_f16m2(float16_t *rs1, vuint64m8_t rs2, vfloat16m2_t vs3, size_t vl) { + return __riscv_vsuxei64(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_f32mf2(float32_t *base, vuint64m1_t bindex, vfloat32mf2_t value, size_t vl) { - return __riscv_vsuxei64(base, bindex, value, vl); +void test_vsuxei64_v_f32mf2(float32_t *rs1, vuint64m1_t rs2, vfloat32mf2_t vs3, size_t vl) { + return __riscv_vsuxei64(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_f32m1(float32_t *base, vuint64m2_t bindex, vfloat32m1_t value, size_t vl) { - return __riscv_vsuxei64(base, bindex, value, vl); +void test_vsuxei64_v_f32m1(float32_t *rs1, vuint64m2_t rs2, vfloat32m1_t vs3, size_t vl) { + return __riscv_vsuxei64(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_f32m2(float32_t *base, vuint64m4_t bindex, vfloat32m2_t value, size_t vl) { - return __riscv_vsuxei64(base, bindex, value, vl); +void test_vsuxei64_v_f32m2(float32_t *rs1, vuint64m4_t rs2, vfloat32m2_t vs3, size_t vl) { + return __riscv_vsuxei64(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_f32m4(float32_t *base, vuint64m8_t bindex, vfloat32m4_t value, size_t vl) { - return __riscv_vsuxei64(base, bindex, value, vl); +void test_vsuxei64_v_f32m4(float32_t *rs1, vuint64m8_t rs2, vfloat32m4_t vs3, size_t vl) { + return __riscv_vsuxei64(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_f64m1(float64_t *base, vuint64m1_t bindex, vfloat64m1_t value, size_t vl) { - return __riscv_vsuxei64(base, bindex, value, vl); +void test_vsuxei64_v_f64m1(float64_t *rs1, vuint64m1_t rs2, vfloat64m1_t vs3, size_t vl) { + return __riscv_vsuxei64(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_f64m2(float64_t *base, vuint64m2_t bindex, vfloat64m2_t value, size_t vl) { - return __riscv_vsuxei64(base, bindex, value, vl); +void test_vsuxei64_v_f64m2(float64_t *rs1, vuint64m2_t rs2, vfloat64m2_t vs3, size_t vl) { + return __riscv_vsuxei64(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_f64m4(float64_t *base, vuint64m4_t bindex, vfloat64m4_t value, size_t vl) { - return __riscv_vsuxei64(base, bindex, value, vl); +void test_vsuxei64_v_f64m4(float64_t *rs1, vuint64m4_t rs2, vfloat64m4_t vs3, size_t vl) { + return __riscv_vsuxei64(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_f64m8(float64_t *base, vuint64m8_t bindex, vfloat64m8_t value, size_t vl) { - return __riscv_vsuxei64(base, bindex, value, vl); +void test_vsuxei64_v_f64m8(float64_t *rs1, vuint64m8_t rs2, vfloat64m8_t vs3, size_t vl) { + return __riscv_vsuxei64(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t value, size_t vl) { - return __riscv_vsuxei64(base, bindex, value, vl); +void test_vsuxei64_v_i8mf8(int8_t *rs1, vuint64m1_t rs2, vint8mf8_t vs3, size_t vl) { + return __riscv_vsuxei64(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t value, size_t vl) { - return __riscv_vsuxei64(base, bindex, value, vl); +void test_vsuxei64_v_i8mf4(int8_t *rs1, vuint64m2_t rs2, vint8mf4_t vs3, size_t vl) { + return __riscv_vsuxei64(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t value, size_t vl) { - return __riscv_vsuxei64(base, bindex, value, vl); +void test_vsuxei64_v_i8mf2(int8_t *rs1, vuint64m4_t rs2, vint8mf2_t vs3, size_t vl) { + return __riscv_vsuxei64(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t value, size_t vl) { - return __riscv_vsuxei64(base, bindex, value, vl); +void test_vsuxei64_v_i8m1(int8_t *rs1, vuint64m8_t rs2, vint8m1_t vs3, size_t vl) { + return __riscv_vsuxei64(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t value, size_t vl) { - return __riscv_vsuxei64(base, bindex, value, vl); +void test_vsuxei64_v_i16mf4(int16_t *rs1, vuint64m1_t rs2, vint16mf4_t vs3, size_t vl) { + return __riscv_vsuxei64(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t value, size_t vl) { - return __riscv_vsuxei64(base, bindex, value, vl); +void test_vsuxei64_v_i16mf2(int16_t *rs1, vuint64m2_t rs2, vint16mf2_t vs3, size_t vl) { + return __riscv_vsuxei64(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t value, size_t vl) { - return __riscv_vsuxei64(base, bindex, value, vl); +void test_vsuxei64_v_i16m1(int16_t *rs1, vuint64m4_t rs2, vint16m1_t vs3, size_t vl) { + return __riscv_vsuxei64(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t value, size_t vl) { - return __riscv_vsuxei64(base, bindex, value, vl); +void test_vsuxei64_v_i16m2(int16_t *rs1, vuint64m8_t rs2, vint16m2_t vs3, size_t vl) { + return __riscv_vsuxei64(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { - return __riscv_vsuxei64(base, bindex, value, vl); +void test_vsuxei64_v_i32mf2(int32_t *rs1, vuint64m1_t rs2, vint32mf2_t vs3, size_t vl) { + return __riscv_vsuxei64(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { - return __riscv_vsuxei64(base, bindex, value, vl); +void test_vsuxei64_v_i32m1(int32_t *rs1, vuint64m2_t rs2, vint32m1_t vs3, size_t vl) { + return __riscv_vsuxei64(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { - return __riscv_vsuxei64(base, bindex, value, vl); +void test_vsuxei64_v_i32m2(int32_t *rs1, vuint64m4_t rs2, vint32m2_t vs3, size_t vl) { + return __riscv_vsuxei64(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i32m4(int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { - return __riscv_vsuxei64(base, bindex, value, vl); +void test_vsuxei64_v_i32m4(int32_t *rs1, vuint64m8_t rs2, vint32m4_t vs3, size_t vl) { + return __riscv_vsuxei64(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { - return __riscv_vsuxei64(base, bindex, value, vl); +void test_vsuxei64_v_i64m1(int64_t *rs1, vuint64m1_t rs2, vint64m1_t vs3, size_t vl) { + return __riscv_vsuxei64(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { - return __riscv_vsuxei64(base, bindex, value, vl); +void test_vsuxei64_v_i64m2(int64_t *rs1, vuint64m2_t rs2, vint64m2_t vs3, size_t vl) { + return __riscv_vsuxei64(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i64m4(int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { - return __riscv_vsuxei64(base, bindex, value, vl); +void test_vsuxei64_v_i64m4(int64_t *rs1, vuint64m4_t rs2, vint64m4_t vs3, size_t vl) { + return __riscv_vsuxei64(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i64m8(int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { - return __riscv_vsuxei64(base, bindex, value, vl); +void test_vsuxei64_v_i64m8(int64_t *rs1, vuint64m8_t rs2, vint64m8_t vs3, size_t vl) { + return __riscv_vsuxei64(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value, size_t vl) { - return __riscv_vsuxei64(base, bindex, value, vl); +void test_vsuxei64_v_u8mf8(uint8_t *rs1, vuint64m1_t rs2, vuint8mf8_t vs3, size_t vl) { + return __riscv_vsuxei64(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value, size_t vl) { - return __riscv_vsuxei64(base, bindex, value, vl); +void test_vsuxei64_v_u8mf4(uint8_t *rs1, vuint64m2_t rs2, vuint8mf4_t vs3, size_t vl) { + return __riscv_vsuxei64(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value, size_t vl) { - return __riscv_vsuxei64(base, bindex, value, vl); +void test_vsuxei64_v_u8mf2(uint8_t *rs1, vuint64m4_t rs2, vuint8mf2_t vs3, size_t vl) { + return __riscv_vsuxei64(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t value, size_t vl) { - return __riscv_vsuxei64(base, bindex, value, vl); +void test_vsuxei64_v_u8m1(uint8_t *rs1, vuint64m8_t rs2, vuint8m1_t vs3, size_t vl) { + return __riscv_vsuxei64(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t value, size_t vl) { - return __riscv_vsuxei64(base, bindex, value, vl); +void test_vsuxei64_v_u16mf4(uint16_t *rs1, vuint64m1_t rs2, vuint16mf4_t vs3, size_t vl) { + return __riscv_vsuxei64(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t value, size_t vl) { - return __riscv_vsuxei64(base, bindex, value, vl); +void test_vsuxei64_v_u16mf2(uint16_t *rs1, vuint64m2_t rs2, vuint16mf2_t vs3, size_t vl) { + return __riscv_vsuxei64(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t value, size_t vl) { - return __riscv_vsuxei64(base, bindex, value, vl); +void test_vsuxei64_v_u16m1(uint16_t *rs1, vuint64m4_t rs2, vuint16m1_t vs3, size_t vl) { + return __riscv_vsuxei64(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t value, size_t vl) { - return __riscv_vsuxei64(base, bindex, value, vl); +void test_vsuxei64_v_u16m2(uint16_t *rs1, vuint64m8_t rs2, vuint16m2_t vs3, size_t vl) { + return __riscv_vsuxei64(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { - return __riscv_vsuxei64(base, bindex, value, vl); +void test_vsuxei64_v_u32mf2(uint32_t *rs1, vuint64m1_t rs2, vuint32mf2_t vs3, size_t vl) { + return __riscv_vsuxei64(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { - return __riscv_vsuxei64(base, bindex, value, vl); +void test_vsuxei64_v_u32m1(uint32_t *rs1, vuint64m2_t rs2, vuint32m1_t vs3, size_t vl) { + return __riscv_vsuxei64(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { - return __riscv_vsuxei64(base, bindex, value, vl); +void test_vsuxei64_v_u32m2(uint32_t *rs1, vuint64m4_t rs2, vuint32m2_t vs3, size_t vl) { + return __riscv_vsuxei64(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u32m4(uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { - return __riscv_vsuxei64(base, bindex, value, vl); +void test_vsuxei64_v_u32m4(uint32_t *rs1, vuint64m8_t rs2, vuint32m4_t vs3, size_t vl) { + return __riscv_vsuxei64(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { - return __riscv_vsuxei64(base, bindex, value, vl); +void test_vsuxei64_v_u64m1(uint64_t *rs1, vuint64m1_t rs2, vuint64m1_t vs3, size_t vl) { + return __riscv_vsuxei64(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { - return __riscv_vsuxei64(base, bindex, value, vl); +void test_vsuxei64_v_u64m2(uint64_t *rs1, vuint64m2_t rs2, vuint64m2_t vs3, size_t vl) { + return __riscv_vsuxei64(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u64m4(uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { - return __riscv_vsuxei64(base, bindex, value, vl); +void test_vsuxei64_v_u64m4(uint64_t *rs1, vuint64m4_t rs2, vuint64m4_t vs3, size_t vl) { + return __riscv_vsuxei64(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u64m8(uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { - return __riscv_vsuxei64(base, bindex, value, vl); +void test_vsuxei64_v_u64m8(uint64_t *rs1, vuint64m8_t rs2, vuint64m8_t vs3, size_t vl) { + return __riscv_vsuxei64(rs1, rs2, vs3, vl); } -void test_vsuxei64_v_f16mf4_m(vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) { - return __riscv_vsuxei64(mask, base, bindex, value, vl); +void test_vsuxei64_v_f16mf4_m(vbool64_t vm, float16_t *rs1, vuint64m1_t rs2, vfloat16mf4_t vs3, size_t vl) { + return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_f16mf2_m(vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) { - return __riscv_vsuxei64(mask, base, bindex, value, vl); +void test_vsuxei64_v_f16mf2_m(vbool32_t vm, float16_t *rs1, vuint64m2_t rs2, vfloat16mf2_t vs3, size_t vl) { + return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_f16m1_m(vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) { - return __riscv_vsuxei64(mask, base, bindex, value, vl); +void test_vsuxei64_v_f16m1_m(vbool16_t vm, float16_t *rs1, vuint64m4_t rs2, vfloat16m1_t vs3, size_t vl) { + return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_f16m2_m(vbool8_t mask, float16_t *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) { - return __riscv_vsuxei64(mask, base, bindex, value, vl); +void test_vsuxei64_v_f16m2_m(vbool8_t vm, float16_t *rs1, vuint64m8_t rs2, vfloat16m2_t vs3, size_t vl) { + return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_f32mf2_m(vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2_t value, size_t vl) { - return __riscv_vsuxei64(mask, base, bindex, value, vl); +void test_vsuxei64_v_f32mf2_m(vbool64_t vm, float32_t *rs1, vuint64m1_t rs2, vfloat32mf2_t vs3, size_t vl) { + return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_f32m1_m(vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1_t value, size_t vl) { - return __riscv_vsuxei64(mask, base, bindex, value, vl); +void test_vsuxei64_v_f32m1_m(vbool32_t vm, float32_t *rs1, vuint64m2_t rs2, vfloat32m1_t vs3, size_t vl) { + return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_f32m2_m(vbool16_t mask, float32_t *base, vuint64m4_t bindex, vfloat32m2_t value, size_t vl) { - return __riscv_vsuxei64(mask, base, bindex, value, vl); +void test_vsuxei64_v_f32m2_m(vbool16_t vm, float32_t *rs1, vuint64m4_t rs2, vfloat32m2_t vs3, size_t vl) { + return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_f32m4_m(vbool8_t mask, float32_t *base, vuint64m8_t bindex, vfloat32m4_t value, size_t vl) { - return __riscv_vsuxei64(mask, base, bindex, value, vl); +void test_vsuxei64_v_f32m4_m(vbool8_t vm, float32_t *rs1, vuint64m8_t rs2, vfloat32m4_t vs3, size_t vl) { + return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_f64m1_m(vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1_t value, size_t vl) { - return __riscv_vsuxei64(mask, base, bindex, value, vl); +void test_vsuxei64_v_f64m1_m(vbool64_t vm, float64_t *rs1, vuint64m1_t rs2, vfloat64m1_t vs3, size_t vl) { + return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_f64m2_m(vbool32_t mask, float64_t *base, vuint64m2_t bindex, vfloat64m2_t value, size_t vl) { - return __riscv_vsuxei64(mask, base, bindex, value, vl); +void test_vsuxei64_v_f64m2_m(vbool32_t vm, float64_t *rs1, vuint64m2_t rs2, vfloat64m2_t vs3, size_t vl) { + return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_f64m4_m(vbool16_t mask, float64_t *base, vuint64m4_t bindex, vfloat64m4_t value, size_t vl) { - return __riscv_vsuxei64(mask, base, bindex, value, vl); +void test_vsuxei64_v_f64m4_m(vbool16_t vm, float64_t *rs1, vuint64m4_t rs2, vfloat64m4_t vs3, size_t vl) { + return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_f64m8_m(vbool8_t mask, float64_t *base, vuint64m8_t bindex, vfloat64m8_t value, size_t vl) { - return __riscv_vsuxei64(mask, base, bindex, value, vl); +void test_vsuxei64_v_f64m8_m(vbool8_t vm, float64_t *rs1, vuint64m8_t rs2, vfloat64m8_t vs3, size_t vl) { + return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t value, size_t vl) { - return __riscv_vsuxei64(mask, base, bindex, value, vl); +void test_vsuxei64_v_i8mf8_m(vbool64_t vm, int8_t *rs1, vuint64m1_t rs2, vint8mf8_t vs3, size_t vl) { + return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t value, size_t vl) { - return __riscv_vsuxei64(mask, base, bindex, value, vl); +void test_vsuxei64_v_i8mf4_m(vbool32_t vm, int8_t *rs1, vuint64m2_t rs2, vint8mf4_t vs3, size_t vl) { + return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t value, size_t vl) { - return __riscv_vsuxei64(mask, base, bindex, value, vl); +void test_vsuxei64_v_i8mf2_m(vbool16_t vm, int8_t *rs1, vuint64m4_t rs2, vint8mf2_t vs3, size_t vl) { + return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t value, size_t vl) { - return __riscv_vsuxei64(mask, base, bindex, value, vl); +void test_vsuxei64_v_i8m1_m(vbool8_t vm, int8_t *rs1, vuint64m8_t rs2, vint8m1_t vs3, size_t vl) { + return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t value, size_t vl) { - return __riscv_vsuxei64(mask, base, bindex, value, vl); +void test_vsuxei64_v_i16mf4_m(vbool64_t vm, int16_t *rs1, vuint64m1_t rs2, vint16mf4_t vs3, size_t vl) { + return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t value, size_t vl) { - return __riscv_vsuxei64(mask, base, bindex, value, vl); +void test_vsuxei64_v_i16mf2_m(vbool32_t vm, int16_t *rs1, vuint64m2_t rs2, vint16mf2_t vs3, size_t vl) { + return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t value, size_t vl) { - return __riscv_vsuxei64(mask, base, bindex, value, vl); +void test_vsuxei64_v_i16m1_m(vbool16_t vm, int16_t *rs1, vuint64m4_t rs2, vint16m1_t vs3, size_t vl) { + return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t value, size_t vl) { - return __riscv_vsuxei64(mask, base, bindex, value, vl); +void test_vsuxei64_v_i16m2_m(vbool8_t vm, int16_t *rs1, vuint64m8_t rs2, vint16m2_t vs3, size_t vl) { + return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { - return __riscv_vsuxei64(mask, base, bindex, value, vl); +void test_vsuxei64_v_i32mf2_m(vbool64_t vm, int32_t *rs1, vuint64m1_t rs2, vint32mf2_t vs3, size_t vl) { + return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { - return __riscv_vsuxei64(mask, base, bindex, value, vl); +void test_vsuxei64_v_i32m1_m(vbool32_t vm, int32_t *rs1, vuint64m2_t rs2, vint32m1_t vs3, size_t vl) { + return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { - return __riscv_vsuxei64(mask, base, bindex, value, vl); +void test_vsuxei64_v_i32m2_m(vbool16_t vm, int32_t *rs1, vuint64m4_t rs2, vint32m2_t vs3, size_t vl) { + return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i32m4_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { - return __riscv_vsuxei64(mask, base, bindex, value, vl); +void test_vsuxei64_v_i32m4_m(vbool8_t vm, int32_t *rs1, vuint64m8_t rs2, vint32m4_t vs3, size_t vl) { + return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { - return __riscv_vsuxei64(mask, base, bindex, value, vl); +void test_vsuxei64_v_i64m1_m(vbool64_t vm, int64_t *rs1, vuint64m1_t rs2, vint64m1_t vs3, size_t vl) { + return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { - return __riscv_vsuxei64(mask, base, bindex, value, vl); +void test_vsuxei64_v_i64m2_m(vbool32_t vm, int64_t *rs1, vuint64m2_t rs2, vint64m2_t vs3, size_t vl) { + return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i64m4_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { - return __riscv_vsuxei64(mask, base, bindex, value, vl); +void test_vsuxei64_v_i64m4_m(vbool16_t vm, int64_t *rs1, vuint64m4_t rs2, vint64m4_t vs3, size_t vl) { + return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_i64m8_m(vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { - return __riscv_vsuxei64(mask, base, bindex, value, vl); +void test_vsuxei64_v_i64m8_m(vbool8_t vm, int64_t *rs1, vuint64m8_t rs2, vint64m8_t vs3, size_t vl) { + return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value, size_t vl) { - return __riscv_vsuxei64(mask, base, bindex, value, vl); +void test_vsuxei64_v_u8mf8_m(vbool64_t vm, uint8_t *rs1, vuint64m1_t rs2, vuint8mf8_t vs3, size_t vl) { + return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value, size_t vl) { - return __riscv_vsuxei64(mask, base, bindex, value, vl); +void test_vsuxei64_v_u8mf4_m(vbool32_t vm, uint8_t *rs1, vuint64m2_t rs2, vuint8mf4_t vs3, size_t vl) { + return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value, size_t vl) { - return __riscv_vsuxei64(mask, base, bindex, value, vl); +void test_vsuxei64_v_u8mf2_m(vbool16_t vm, uint8_t *rs1, vuint64m4_t rs2, vuint8mf2_t vs3, size_t vl) { + return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t value, size_t vl) { - return __riscv_vsuxei64(mask, base, bindex, value, vl); +void test_vsuxei64_v_u8m1_m(vbool8_t vm, uint8_t *rs1, vuint64m8_t rs2, vuint8m1_t vs3, size_t vl) { + return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t value, size_t vl) { - return __riscv_vsuxei64(mask, base, bindex, value, vl); +void test_vsuxei64_v_u16mf4_m(vbool64_t vm, uint16_t *rs1, vuint64m1_t rs2, vuint16mf4_t vs3, size_t vl) { + return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t value, size_t vl) { - return __riscv_vsuxei64(mask, base, bindex, value, vl); +void test_vsuxei64_v_u16mf2_m(vbool32_t vm, uint16_t *rs1, vuint64m2_t rs2, vuint16mf2_t vs3, size_t vl) { + return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t value, size_t vl) { - return __riscv_vsuxei64(mask, base, bindex, value, vl); +void test_vsuxei64_v_u16m1_m(vbool16_t vm, uint16_t *rs1, vuint64m4_t rs2, vuint16m1_t vs3, size_t vl) { + return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t value, size_t vl) { - return __riscv_vsuxei64(mask, base, bindex, value, vl); +void test_vsuxei64_v_u16m2_m(vbool8_t vm, uint16_t *rs1, vuint64m8_t rs2, vuint16m2_t vs3, size_t vl) { + return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { - return __riscv_vsuxei64(mask, base, bindex, value, vl); +void test_vsuxei64_v_u32mf2_m(vbool64_t vm, uint32_t *rs1, vuint64m1_t rs2, vuint32mf2_t vs3, size_t vl) { + return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { - return __riscv_vsuxei64(mask, base, bindex, value, vl); +void test_vsuxei64_v_u32m1_m(vbool32_t vm, uint32_t *rs1, vuint64m2_t rs2, vuint32m1_t vs3, size_t vl) { + return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { - return __riscv_vsuxei64(mask, base, bindex, value, vl); +void test_vsuxei64_v_u32m2_m(vbool16_t vm, uint32_t *rs1, vuint64m4_t rs2, vuint32m2_t vs3, size_t vl) { + return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { - return __riscv_vsuxei64(mask, base, bindex, value, vl); +void test_vsuxei64_v_u32m4_m(vbool8_t vm, uint32_t *rs1, vuint64m8_t rs2, vuint32m4_t vs3, size_t vl) { + return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { - return __riscv_vsuxei64(mask, base, bindex, value, vl); +void test_vsuxei64_v_u64m1_m(vbool64_t vm, uint64_t *rs1, vuint64m1_t rs2, vuint64m1_t vs3, size_t vl) { + return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { - return __riscv_vsuxei64(mask, base, bindex, value, vl); +void test_vsuxei64_v_u64m2_m(vbool32_t vm, uint64_t *rs1, vuint64m2_t rs2, vuint64m2_t vs3, size_t vl) { + return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { - return __riscv_vsuxei64(mask, base, bindex, value, vl); +void test_vsuxei64_v_u64m4_m(vbool16_t vm, uint64_t *rs1, vuint64m4_t rs2, vuint64m4_t vs3, size_t vl) { + return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl); } -void test_vsuxei64_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { - return __riscv_vsuxei64(mask, base, bindex, value, vl); +void test_vsuxei64_v_u64m8_m(vbool8_t vm, uint64_t *rs1, vuint64m8_t rs2, vuint64m8_t vs3, size_t vl) { + return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxei64\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsuxei8.c b/auto-generated/gnu-overloaded-tests/vsuxei8.c index b08d4bfa0..82a2682a8 100644 --- a/auto-generated/gnu-overloaded-tests/vsuxei8.c +++ b/auto-generated/gnu-overloaded-tests/vsuxei8.c @@ -6,476 +6,476 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxei8_v_f16mf4(float16_t *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_f16mf4(float16_t *rs1, vuint8mf8_t rs2, vfloat16mf4_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f16mf2(float16_t *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_f16mf2(float16_t *rs1, vuint8mf4_t rs2, vfloat16mf2_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f16m1(float16_t *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_f16m1(float16_t *rs1, vuint8mf2_t rs2, vfloat16m1_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f16m2(float16_t *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_f16m2(float16_t *rs1, vuint8m1_t rs2, vfloat16m2_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f16m4(float16_t *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_f16m4(float16_t *rs1, vuint8m2_t rs2, vfloat16m4_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f16m8(float16_t *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_f16m8(float16_t *rs1, vuint8m4_t rs2, vfloat16m8_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f32mf2(float32_t *base, vuint8mf8_t bindex, vfloat32mf2_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_f32mf2(float32_t *rs1, vuint8mf8_t rs2, vfloat32mf2_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f32m1(float32_t *base, vuint8mf4_t bindex, vfloat32m1_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_f32m1(float32_t *rs1, vuint8mf4_t rs2, vfloat32m1_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f32m2(float32_t *base, vuint8mf2_t bindex, vfloat32m2_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_f32m2(float32_t *rs1, vuint8mf2_t rs2, vfloat32m2_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f32m4(float32_t *base, vuint8m1_t bindex, vfloat32m4_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_f32m4(float32_t *rs1, vuint8m1_t rs2, vfloat32m4_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f32m8(float32_t *base, vuint8m2_t bindex, vfloat32m8_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_f32m8(float32_t *rs1, vuint8m2_t rs2, vfloat32m8_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f64m1(float64_t *base, vuint8mf8_t bindex, vfloat64m1_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_f64m1(float64_t *rs1, vuint8mf8_t rs2, vfloat64m1_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f64m2(float64_t *base, vuint8mf4_t bindex, vfloat64m2_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_f64m2(float64_t *rs1, vuint8mf4_t rs2, vfloat64m2_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f64m4(float64_t *base, vuint8mf2_t bindex, vfloat64m4_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_f64m4(float64_t *rs1, vuint8mf2_t rs2, vfloat64m4_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f64m8(float64_t *base, vuint8m1_t bindex, vfloat64m8_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_f64m8(float64_t *rs1, vuint8m1_t rs2, vfloat64m8_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_i8mf8(int8_t *rs1, vuint8mf8_t rs2, vint8mf8_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_i8mf4(int8_t *rs1, vuint8mf4_t rs2, vint8mf4_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_i8mf2(int8_t *rs1, vuint8mf2_t rs2, vint8mf2_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_i8m1(int8_t *rs1, vuint8m1_t rs2, vint8m1_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_i8m2(int8_t *rs1, vuint8m2_t rs2, vint8m2_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i8m4(int8_t *base, vuint8m4_t bindex, vint8m4_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_i8m4(int8_t *rs1, vuint8m4_t rs2, vint8m4_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i8m8(int8_t *base, vuint8m8_t bindex, vint8m8_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_i8m8(int8_t *rs1, vuint8m8_t rs2, vint8m8_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_i16mf4(int16_t *rs1, vuint8mf8_t rs2, vint16mf4_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_i16mf2(int16_t *rs1, vuint8mf4_t rs2, vint16mf2_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_i16m1(int16_t *rs1, vuint8mf2_t rs2, vint16m1_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_i16m2(int16_t *rs1, vuint8m1_t rs2, vint16m2_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i16m4(int16_t *base, vuint8m2_t bindex, vint16m4_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_i16m4(int16_t *rs1, vuint8m2_t rs2, vint16m4_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i16m8(int16_t *base, vuint8m4_t bindex, vint16m8_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_i16m8(int16_t *rs1, vuint8m4_t rs2, vint16m8_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_i32mf2(int32_t *rs1, vuint8mf8_t rs2, vint32mf2_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_i32m1(int32_t *rs1, vuint8mf4_t rs2, vint32m1_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_i32m2(int32_t *rs1, vuint8mf2_t rs2, vint32m2_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i32m4(int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_i32m4(int32_t *rs1, vuint8m1_t rs2, vint32m4_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i32m8(int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_i32m8(int32_t *rs1, vuint8m2_t rs2, vint32m8_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_i64m1(int64_t *rs1, vuint8mf8_t rs2, vint64m1_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_i64m2(int64_t *rs1, vuint8mf4_t rs2, vint64m2_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_i64m4(int64_t *rs1, vuint8mf2_t rs2, vint64m4_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i64m8(int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_i64m8(int64_t *rs1, vuint8m1_t rs2, vint64m8_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_u8mf8(uint8_t *rs1, vuint8mf8_t rs2, vuint8mf8_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_u8mf4(uint8_t *rs1, vuint8mf4_t rs2, vuint8mf4_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_u8mf2(uint8_t *rs1, vuint8mf2_t rs2, vuint8mf2_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_u8m1(uint8_t *rs1, vuint8m1_t rs2, vuint8m1_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_u8m2(uint8_t *rs1, vuint8m2_t rs2, vuint8m2_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u8m4(uint8_t *base, vuint8m4_t bindex, vuint8m4_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_u8m4(uint8_t *rs1, vuint8m4_t rs2, vuint8m4_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u8m8(uint8_t *base, vuint8m8_t bindex, vuint8m8_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_u8m8(uint8_t *rs1, vuint8m8_t rs2, vuint8m8_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_u16mf4(uint16_t *rs1, vuint8mf8_t rs2, vuint16mf4_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_u16mf2(uint16_t *rs1, vuint8mf4_t rs2, vuint16mf2_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_u16m1(uint16_t *rs1, vuint8mf2_t rs2, vuint16m1_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_u16m2(uint16_t *rs1, vuint8m1_t rs2, vuint16m2_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u16m4(uint16_t *base, vuint8m2_t bindex, vuint16m4_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_u16m4(uint16_t *rs1, vuint8m2_t rs2, vuint16m4_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u16m8(uint16_t *base, vuint8m4_t bindex, vuint16m8_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_u16m8(uint16_t *rs1, vuint8m4_t rs2, vuint16m8_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_u32mf2(uint32_t *rs1, vuint8mf8_t rs2, vuint32mf2_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_u32m1(uint32_t *rs1, vuint8mf4_t rs2, vuint32m1_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_u32m2(uint32_t *rs1, vuint8mf2_t rs2, vuint32m2_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_u32m4(uint32_t *rs1, vuint8m1_t rs2, vuint32m4_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u32m8(uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_u32m8(uint32_t *rs1, vuint8m2_t rs2, vuint32m8_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_u64m1(uint64_t *rs1, vuint8mf8_t rs2, vuint64m1_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_u64m2(uint64_t *rs1, vuint8mf4_t rs2, vuint64m2_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_u64m4(uint64_t *rs1, vuint8mf2_t rs2, vuint64m4_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u64m8(uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { - return __riscv_vsuxei8(base, bindex, value, vl); +void test_vsuxei8_v_u64m8(uint64_t *rs1, vuint8m1_t rs2, vuint64m8_t vs3, size_t vl) { + return __riscv_vsuxei8(rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f16mf4_m(vbool64_t mask, float16_t *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_f16mf4_m(vbool64_t vm, float16_t *rs1, vuint8mf8_t rs2, vfloat16mf4_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f16mf2_m(vbool32_t mask, float16_t *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_f16mf2_m(vbool32_t vm, float16_t *rs1, vuint8mf4_t rs2, vfloat16mf2_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f16m1_m(vbool16_t mask, float16_t *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_f16m1_m(vbool16_t vm, float16_t *rs1, vuint8mf2_t rs2, vfloat16m1_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f16m2_m(vbool8_t mask, float16_t *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_f16m2_m(vbool8_t vm, float16_t *rs1, vuint8m1_t rs2, vfloat16m2_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f16m4_m(vbool4_t mask, float16_t *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_f16m4_m(vbool4_t vm, float16_t *rs1, vuint8m2_t rs2, vfloat16m4_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f16m8_m(vbool2_t mask, float16_t *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_f16m8_m(vbool2_t vm, float16_t *rs1, vuint8m4_t rs2, vfloat16m8_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f32mf2_m(vbool64_t mask, float32_t *base, vuint8mf8_t bindex, vfloat32mf2_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_f32mf2_m(vbool64_t vm, float32_t *rs1, vuint8mf8_t rs2, vfloat32mf2_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f32m1_m(vbool32_t mask, float32_t *base, vuint8mf4_t bindex, vfloat32m1_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_f32m1_m(vbool32_t vm, float32_t *rs1, vuint8mf4_t rs2, vfloat32m1_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f32m2_m(vbool16_t mask, float32_t *base, vuint8mf2_t bindex, vfloat32m2_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_f32m2_m(vbool16_t vm, float32_t *rs1, vuint8mf2_t rs2, vfloat32m2_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f32m4_m(vbool8_t mask, float32_t *base, vuint8m1_t bindex, vfloat32m4_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_f32m4_m(vbool8_t vm, float32_t *rs1, vuint8m1_t rs2, vfloat32m4_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f32m8_m(vbool4_t mask, float32_t *base, vuint8m2_t bindex, vfloat32m8_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_f32m8_m(vbool4_t vm, float32_t *rs1, vuint8m2_t rs2, vfloat32m8_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f64m1_m(vbool64_t mask, float64_t *base, vuint8mf8_t bindex, vfloat64m1_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_f64m1_m(vbool64_t vm, float64_t *rs1, vuint8mf8_t rs2, vfloat64m1_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f64m2_m(vbool32_t mask, float64_t *base, vuint8mf4_t bindex, vfloat64m2_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_f64m2_m(vbool32_t vm, float64_t *rs1, vuint8mf4_t rs2, vfloat64m2_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f64m4_m(vbool16_t mask, float64_t *base, vuint8mf2_t bindex, vfloat64m4_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_f64m4_m(vbool16_t vm, float64_t *rs1, vuint8mf2_t rs2, vfloat64m4_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_f64m8_m(vbool8_t mask, float64_t *base, vuint8m1_t bindex, vfloat64m8_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_f64m8_m(vbool8_t vm, float64_t *rs1, vuint8m1_t rs2, vfloat64m8_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_i8mf8_m(vbool64_t vm, int8_t *rs1, vuint8mf8_t rs2, vint8mf8_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_i8mf4_m(vbool32_t vm, int8_t *rs1, vuint8mf4_t rs2, vint8mf4_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_i8mf2_m(vbool16_t vm, int8_t *rs1, vuint8mf2_t rs2, vint8mf2_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_i8m1_m(vbool8_t vm, int8_t *rs1, vuint8m1_t rs2, vint8m1_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_i8m2_m(vbool4_t vm, int8_t *rs1, vuint8m2_t rs2, vint8m2_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i8m4_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_i8m4_m(vbool2_t vm, int8_t *rs1, vuint8m4_t rs2, vint8m4_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i8m8_m(vbool1_t mask, int8_t *base, vuint8m8_t bindex, vint8m8_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_i8m8_m(vbool1_t vm, int8_t *rs1, vuint8m8_t rs2, vint8m8_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_i16mf4_m(vbool64_t vm, int16_t *rs1, vuint8mf8_t rs2, vint16mf4_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_i16mf2_m(vbool32_t vm, int16_t *rs1, vuint8mf4_t rs2, vint16mf2_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_i16m1_m(vbool16_t vm, int16_t *rs1, vuint8mf2_t rs2, vint16m1_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_i16m2_m(vbool8_t vm, int16_t *rs1, vuint8m1_t rs2, vint16m2_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i16m4_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_i16m4_m(vbool4_t vm, int16_t *rs1, vuint8m2_t rs2, vint16m4_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i16m8_m(vbool2_t mask, int16_t *base, vuint8m4_t bindex, vint16m8_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_i16m8_m(vbool2_t vm, int16_t *rs1, vuint8m4_t rs2, vint16m8_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_i32mf2_m(vbool64_t vm, int32_t *rs1, vuint8mf8_t rs2, vint32mf2_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_i32m1_m(vbool32_t vm, int32_t *rs1, vuint8mf4_t rs2, vint32m1_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_i32m2_m(vbool16_t vm, int32_t *rs1, vuint8mf2_t rs2, vint32m2_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i32m4_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_i32m4_m(vbool8_t vm, int32_t *rs1, vuint8m1_t rs2, vint32m4_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i32m8_m(vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_i32m8_m(vbool4_t vm, int32_t *rs1, vuint8m2_t rs2, vint32m8_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_i64m1_m(vbool64_t vm, int64_t *rs1, vuint8mf8_t rs2, vint64m1_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_i64m2_m(vbool32_t vm, int64_t *rs1, vuint8mf4_t rs2, vint64m2_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i64m4_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_i64m4_m(vbool16_t vm, int64_t *rs1, vuint8mf2_t rs2, vint64m4_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_i64m8_m(vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_i64m8_m(vbool8_t vm, int64_t *rs1, vuint8m1_t rs2, vint64m8_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_u8mf8_m(vbool64_t vm, uint8_t *rs1, vuint8mf8_t rs2, vuint8mf8_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_u8mf4_m(vbool32_t vm, uint8_t *rs1, vuint8mf4_t rs2, vuint8mf4_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_u8mf2_m(vbool16_t vm, uint8_t *rs1, vuint8mf2_t rs2, vuint8mf2_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_u8m1_m(vbool8_t vm, uint8_t *rs1, vuint8m1_t rs2, vuint8m1_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_u8m2_m(vbool4_t vm, uint8_t *rs1, vuint8m2_t rs2, vuint8m2_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_u8m4_m(vbool2_t vm, uint8_t *rs1, vuint8m4_t rs2, vuint8m4_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u8m8_m(vbool1_t mask, uint8_t *base, vuint8m8_t bindex, vuint8m8_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_u8m8_m(vbool1_t vm, uint8_t *rs1, vuint8m8_t rs2, vuint8m8_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_u16mf4_m(vbool64_t vm, uint16_t *rs1, vuint8mf8_t rs2, vuint16mf4_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_u16mf2_m(vbool32_t vm, uint16_t *rs1, vuint8mf4_t rs2, vuint16mf2_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_u16m1_m(vbool16_t vm, uint16_t *rs1, vuint8mf2_t rs2, vuint16m1_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_u16m2_m(vbool8_t vm, uint16_t *rs1, vuint8m1_t rs2, vuint16m2_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_u16m4_m(vbool4_t vm, uint16_t *rs1, vuint8m2_t rs2, vuint16m4_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint8m4_t bindex, vuint16m8_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_u16m8_m(vbool2_t vm, uint16_t *rs1, vuint8m4_t rs2, vuint16m8_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_u32mf2_m(vbool64_t vm, uint32_t *rs1, vuint8mf8_t rs2, vuint32mf2_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_u32m1_m(vbool32_t vm, uint32_t *rs1, vuint8mf4_t rs2, vuint32m1_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_u32m2_m(vbool16_t vm, uint32_t *rs1, vuint8mf2_t rs2, vuint32m2_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_u32m4_m(vbool8_t vm, uint32_t *rs1, vuint8m1_t rs2, vuint32m4_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_u32m8_m(vbool4_t vm, uint32_t *rs1, vuint8m2_t rs2, vuint32m8_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_u64m1_m(vbool64_t vm, uint64_t *rs1, vuint8mf8_t rs2, vuint64m1_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_u64m2_m(vbool32_t vm, uint64_t *rs1, vuint8mf4_t rs2, vuint64m2_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_u64m4_m(vbool16_t vm, uint64_t *rs1, vuint8mf2_t rs2, vuint64m4_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } -void test_vsuxei8_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { - return __riscv_vsuxei8(mask, base, bindex, value, vl); +void test_vsuxei8_v_u64m8_m(vbool8_t vm, uint64_t *rs1, vuint8m1_t rs2, vuint64m8_t vs3, size_t vl) { + return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxei8\.[ivxfswum.]+\s+} 118 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsuxseg2ei16.c b/auto-generated/gnu-overloaded-tests/vsuxseg2ei16.c index be86bb5ab..71346ad43 100644 --- a/auto-generated/gnu-overloaded-tests/vsuxseg2ei16.c +++ b/auto-generated/gnu-overloaded-tests/vsuxseg2ei16.c @@ -6,388 +6,388 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg2ei16_v_f16mf4x2(float16_t *base, vuint16mf4_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f16mf4x2(float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_f16mf2x2(float16_t *base, vuint16mf2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f16mf2x2(float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_f16m1x2(float16_t *base, vuint16m1_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f16m1x2(float16_t *rs1, vuint16m1_t vs2, vfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_f16m2x2(float16_t *base, vuint16m2_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f16m2x2(float16_t *rs1, vuint16m2_t vs2, vfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_f16m4x2(float16_t *base, vuint16m4_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f16m4x2(float16_t *rs1, vuint16m4_t vs2, vfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_f32mf2x2(float32_t *base, vuint16mf4_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f32mf2x2(float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_f32m1x2(float32_t *base, vuint16mf2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f32m1x2(float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_f32m2x2(float32_t *base, vuint16m1_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f32m2x2(float32_t *rs1, vuint16m1_t vs2, vfloat32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_f32m4x2(float32_t *base, vuint16m2_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f32m4x2(float32_t *rs1, vuint16m2_t vs2, vfloat32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_f64m1x2(float64_t *base, vuint16mf4_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f64m1x2(float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_f64m2x2(float64_t *base, vuint16mf2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f64m2x2(float64_t *rs1, vuint16mf2_t vs2, vfloat64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_f64m4x2(float64_t *base, vuint16m1_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f64m4x2(float64_t *rs1, vuint16m1_t vs2, vfloat64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i8mf8x2(int8_t *base, vuint16mf4_t bindex, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i8mf8x2(int8_t *rs1, vuint16mf4_t vs2, vint8mf8x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i8mf4x2(int8_t *base, vuint16mf2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i8mf4x2(int8_t *rs1, vuint16mf2_t vs2, vint8mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i8mf2x2(int8_t *base, vuint16m1_t bindex, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i8mf2x2(int8_t *rs1, vuint16m1_t vs2, vint8mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i8m1x2(int8_t *base, vuint16m2_t bindex, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i8m1x2(int8_t *rs1, vuint16m2_t vs2, vint8m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i8m2x2(int8_t *base, vuint16m4_t bindex, vint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i8m2x2(int8_t *rs1, vuint16m4_t vs2, vint8m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i8m4x2(int8_t *base, vuint16m8_t bindex, vint8m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i8m4x2(int8_t *rs1, vuint16m8_t vs2, vint8m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i16mf4x2(int16_t *base, vuint16mf4_t bindex, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i16mf4x2(int16_t *rs1, vuint16mf4_t vs2, vint16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i16mf2x2(int16_t *base, vuint16mf2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i16mf2x2(int16_t *rs1, vuint16mf2_t vs2, vint16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i16m1x2(int16_t *base, vuint16m1_t bindex, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i16m1x2(int16_t *rs1, vuint16m1_t vs2, vint16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i16m2x2(int16_t *base, vuint16m2_t bindex, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i16m2x2(int16_t *rs1, vuint16m2_t vs2, vint16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i16m4x2(int16_t *base, vuint16m4_t bindex, vint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i16m4x2(int16_t *rs1, vuint16m4_t vs2, vint16m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i32mf2x2(int32_t *base, vuint16mf4_t bindex, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i32mf2x2(int32_t *rs1, vuint16mf4_t vs2, vint32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i32m1x2(int32_t *base, vuint16mf2_t bindex, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i32m1x2(int32_t *rs1, vuint16mf2_t vs2, vint32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i32m2x2(int32_t *base, vuint16m1_t bindex, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i32m2x2(int32_t *rs1, vuint16m1_t vs2, vint32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i32m4x2(int32_t *base, vuint16m2_t bindex, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i32m4x2(int32_t *rs1, vuint16m2_t vs2, vint32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i64m1x2(int64_t *base, vuint16mf4_t bindex, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i64m1x2(int64_t *rs1, vuint16mf4_t vs2, vint64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i64m2x2(int64_t *base, vuint16mf2_t bindex, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i64m2x2(int64_t *rs1, vuint16mf2_t vs2, vint64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i64m4x2(int64_t *base, vuint16m1_t bindex, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i64m4x2(int64_t *rs1, vuint16m1_t vs2, vint64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u8mf8x2(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u8mf8x2(uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u8mf4x2(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u8mf4x2(uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u8mf2x2(uint8_t *base, vuint16m1_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u8mf2x2(uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u8m1x2(uint8_t *base, vuint16m2_t bindex, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u8m1x2(uint8_t *rs1, vuint16m2_t vs2, vuint8m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u8m2x2(uint8_t *base, vuint16m4_t bindex, vuint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u8m2x2(uint8_t *rs1, vuint16m4_t vs2, vuint8m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u8m4x2(uint8_t *base, vuint16m8_t bindex, vuint8m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u8m4x2(uint8_t *rs1, vuint16m8_t vs2, vuint8m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u16mf4x2(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u16mf4x2(uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u16mf2x2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u16mf2x2(uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u16m1x2(uint16_t *base, vuint16m1_t bindex, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u16m1x2(uint16_t *rs1, vuint16m1_t vs2, vuint16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u16m2x2(uint16_t *base, vuint16m2_t bindex, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u16m2x2(uint16_t *rs1, vuint16m2_t vs2, vuint16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u16m4x2(uint16_t *base, vuint16m4_t bindex, vuint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u16m4x2(uint16_t *rs1, vuint16m4_t vs2, vuint16m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u32mf2x2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u32mf2x2(uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u32m1x2(uint32_t *base, vuint16mf2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u32m1x2(uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u32m2x2(uint32_t *base, vuint16m1_t bindex, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u32m2x2(uint32_t *rs1, vuint16m1_t vs2, vuint32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u32m4x2(uint32_t *base, vuint16m2_t bindex, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u32m4x2(uint32_t *rs1, vuint16m2_t vs2, vuint32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u64m1x2(uint64_t *base, vuint16mf4_t bindex, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u64m1x2(uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u64m2x2(uint64_t *base, vuint16mf2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u64m2x2(uint64_t *rs1, vuint16mf2_t vs2, vuint64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u64m4x2(uint64_t *base, vuint16m1_t bindex, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u64m4x2(uint64_t *rs1, vuint16m1_t vs2, vuint64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_f16mf4x2_m(vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f16mf4x2_m(vbool64_t vm, float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_f16mf2x2_m(vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f16mf2x2_m(vbool32_t vm, float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_f16m1x2_m(vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f16m1x2_m(vbool16_t vm, float16_t *rs1, vuint16m1_t vs2, vfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_f16m2x2_m(vbool8_t mask, float16_t *base, vuint16m2_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f16m2x2_m(vbool8_t vm, float16_t *rs1, vuint16m2_t vs2, vfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_f16m4x2_m(vbool4_t mask, float16_t *base, vuint16m4_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f16m4x2_m(vbool4_t vm, float16_t *rs1, vuint16m4_t vs2, vfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_f32mf2x2_m(vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f32mf2x2_m(vbool64_t vm, float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_f32m1x2_m(vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f32m1x2_m(vbool32_t vm, float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_f32m2x2_m(vbool16_t mask, float32_t *base, vuint16m1_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f32m2x2_m(vbool16_t vm, float32_t *rs1, vuint16m1_t vs2, vfloat32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_f32m4x2_m(vbool8_t mask, float32_t *base, vuint16m2_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f32m4x2_m(vbool8_t vm, float32_t *rs1, vuint16m2_t vs2, vfloat32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_f64m1x2_m(vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f64m1x2_m(vbool64_t vm, float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_f64m2x2_m(vbool32_t mask, float64_t *base, vuint16mf2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f64m2x2_m(vbool32_t vm, float64_t *rs1, vuint16mf2_t vs2, vfloat64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_f64m4x2_m(vbool16_t mask, float64_t *base, vuint16m1_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_f64m4x2_m(vbool16_t vm, float64_t *rs1, vuint16m1_t vs2, vfloat64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i8mf8x2_m(vbool64_t vm, int8_t *rs1, vuint16mf4_t vs2, vint8mf8x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i8mf4x2_m(vbool32_t vm, int8_t *rs1, vuint16mf2_t vs2, vint8mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i8mf2x2_m(vbool16_t vm, int8_t *rs1, vuint16m1_t vs2, vint8mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i8m1x2_m(vbool8_t vm, int8_t *rs1, vuint16m2_t vs2, vint8m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i8m2x2_m(vbool4_t vm, int8_t *rs1, vuint16m4_t vs2, vint8m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i8m4x2_m(vbool2_t vm, int8_t *rs1, vuint16m8_t vs2, vint8m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i16mf4x2_m(vbool64_t vm, int16_t *rs1, vuint16mf4_t vs2, vint16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i16mf2x2_m(vbool32_t vm, int16_t *rs1, vuint16mf2_t vs2, vint16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i16m1x2_m(vbool16_t vm, int16_t *rs1, vuint16m1_t vs2, vint16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i16m2x2_m(vbool8_t vm, int16_t *rs1, vuint16m2_t vs2, vint16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i16m4x2_m(vbool4_t vm, int16_t *rs1, vuint16m4_t vs2, vint16m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i32mf2x2_m(vbool64_t vm, int32_t *rs1, vuint16mf4_t vs2, vint32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i32m1x2_m(vbool32_t vm, int32_t *rs1, vuint16mf2_t vs2, vint32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i32m2x2_m(vbool16_t vm, int32_t *rs1, vuint16m1_t vs2, vint32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i32m4x2_m(vbool8_t vm, int32_t *rs1, vuint16m2_t vs2, vint32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i64m1x2_m(vbool64_t vm, int64_t *rs1, vuint16mf4_t vs2, vint64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i64m2x2_m(vbool32_t vm, int64_t *rs1, vuint16mf2_t vs2, vint64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_i64m4x2_m(vbool16_t vm, int64_t *rs1, vuint16m1_t vs2, vint64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u8mf8x2_m(vbool64_t vm, uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u8mf4x2_m(vbool32_t vm, uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u8mf2x2_m(vbool16_t vm, uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u8m1x2_m(vbool8_t vm, uint8_t *rs1, vuint16m2_t vs2, vuint8m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u8m2x2_m(vbool4_t vm, uint8_t *rs1, vuint16m4_t vs2, vuint8m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u8m4x2_m(vbool2_t vm, uint8_t *rs1, vuint16m8_t vs2, vuint8m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u16mf4x2_m(vbool64_t vm, uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u16mf2x2_m(vbool32_t vm, uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u16m1x2_m(vbool16_t vm, uint16_t *rs1, vuint16m1_t vs2, vuint16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u16m2x2_m(vbool8_t vm, uint16_t *rs1, vuint16m2_t vs2, vuint16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u16m4x2_m(vbool4_t vm, uint16_t *rs1, vuint16m4_t vs2, vuint16m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u32mf2x2_m(vbool64_t vm, uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u32m1x2_m(vbool32_t vm, uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u32m2x2_m(vbool16_t vm, uint32_t *rs1, vuint16m1_t vs2, vuint32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u32m4x2_m(vbool8_t vm, uint32_t *rs1, vuint16m2_t vs2, vuint32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u64m1x2_m(vbool64_t vm, uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u64m2x2_m(vbool32_t vm, uint64_t *rs1, vuint16mf2_t vs2, vuint64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei16_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei16_v_u64m4x2_m(vbool16_t vm, uint64_t *rs1, vuint16m1_t vs2, vuint64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg2ei16\.[ivxfswum.]+\s+} 96 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsuxseg2ei32.c b/auto-generated/gnu-overloaded-tests/vsuxseg2ei32.c index 764e22635..c985b3d5b 100644 --- a/auto-generated/gnu-overloaded-tests/vsuxseg2ei32.c +++ b/auto-generated/gnu-overloaded-tests/vsuxseg2ei32.c @@ -6,372 +6,372 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg2ei32_v_f16mf4x2(float16_t *base, vuint32mf2_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f16mf4x2(float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_f16mf2x2(float16_t *base, vuint32m1_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f16mf2x2(float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_f16m1x2(float16_t *base, vuint32m2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f16m1x2(float16_t *rs1, vuint32m2_t vs2, vfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_f16m2x2(float16_t *base, vuint32m4_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f16m2x2(float16_t *rs1, vuint32m4_t vs2, vfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_f16m4x2(float16_t *base, vuint32m8_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f16m4x2(float16_t *rs1, vuint32m8_t vs2, vfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_f32mf2x2(float32_t *base, vuint32mf2_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f32mf2x2(float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_f32m1x2(float32_t *base, vuint32m1_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f32m1x2(float32_t *rs1, vuint32m1_t vs2, vfloat32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_f32m2x2(float32_t *base, vuint32m2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f32m2x2(float32_t *rs1, vuint32m2_t vs2, vfloat32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_f32m4x2(float32_t *base, vuint32m4_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f32m4x2(float32_t *rs1, vuint32m4_t vs2, vfloat32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_f64m1x2(float64_t *base, vuint32mf2_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f64m1x2(float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_f64m2x2(float64_t *base, vuint32m1_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f64m2x2(float64_t *rs1, vuint32m1_t vs2, vfloat64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_f64m4x2(float64_t *base, vuint32m2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f64m4x2(float64_t *rs1, vuint32m2_t vs2, vfloat64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i8mf8x2(int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i8mf8x2(int8_t *rs1, vuint32mf2_t vs2, vint8mf8x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i8mf4x2(int8_t *base, vuint32m1_t bindex, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i8mf4x2(int8_t *rs1, vuint32m1_t vs2, vint8mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i8mf2x2(int8_t *base, vuint32m2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i8mf2x2(int8_t *rs1, vuint32m2_t vs2, vint8mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i8m1x2(int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i8m1x2(int8_t *rs1, vuint32m4_t vs2, vint8m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i8m2x2(int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i8m2x2(int8_t *rs1, vuint32m8_t vs2, vint8m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i16mf4x2(int16_t *base, vuint32mf2_t bindex, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i16mf4x2(int16_t *rs1, vuint32mf2_t vs2, vint16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i16mf2x2(int16_t *base, vuint32m1_t bindex, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i16mf2x2(int16_t *rs1, vuint32m1_t vs2, vint16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i16m1x2(int16_t *base, vuint32m2_t bindex, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i16m1x2(int16_t *rs1, vuint32m2_t vs2, vint16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i16m2x2(int16_t *base, vuint32m4_t bindex, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i16m2x2(int16_t *rs1, vuint32m4_t vs2, vint16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i16m4x2(int16_t *base, vuint32m8_t bindex, vint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i16m4x2(int16_t *rs1, vuint32m8_t vs2, vint16m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i32mf2x2(int32_t *base, vuint32mf2_t bindex, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i32mf2x2(int32_t *rs1, vuint32mf2_t vs2, vint32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i32m1x2(int32_t *base, vuint32m1_t bindex, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i32m1x2(int32_t *rs1, vuint32m1_t vs2, vint32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i32m2x2(int32_t *base, vuint32m2_t bindex, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i32m2x2(int32_t *rs1, vuint32m2_t vs2, vint32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i32m4x2(int32_t *base, vuint32m4_t bindex, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i32m4x2(int32_t *rs1, vuint32m4_t vs2, vint32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i64m1x2(int64_t *base, vuint32mf2_t bindex, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i64m1x2(int64_t *rs1, vuint32mf2_t vs2, vint64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i64m2x2(int64_t *base, vuint32m1_t bindex, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i64m2x2(int64_t *rs1, vuint32m1_t vs2, vint64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i64m4x2(int64_t *base, vuint32m2_t bindex, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i64m4x2(int64_t *rs1, vuint32m2_t vs2, vint64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u8mf8x2(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u8mf8x2(uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u8mf4x2(uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u8mf4x2(uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u8mf2x2(uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u8mf2x2(uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u8m1x2(uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u8m1x2(uint8_t *rs1, vuint32m4_t vs2, vuint8m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u8m2x2(uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u8m2x2(uint8_t *rs1, vuint32m8_t vs2, vuint8m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u16mf4x2(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u16mf4x2(uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u16mf2x2(uint16_t *base, vuint32m1_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u16mf2x2(uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u16m1x2(uint16_t *base, vuint32m2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u16m1x2(uint16_t *rs1, vuint32m2_t vs2, vuint16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u16m2x2(uint16_t *base, vuint32m4_t bindex, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u16m2x2(uint16_t *rs1, vuint32m4_t vs2, vuint16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u16m4x2(uint16_t *base, vuint32m8_t bindex, vuint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u16m4x2(uint16_t *rs1, vuint32m8_t vs2, vuint16m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u32mf2x2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u32mf2x2(uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u32m1x2(uint32_t *base, vuint32m1_t bindex, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u32m1x2(uint32_t *rs1, vuint32m1_t vs2, vuint32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u32m2x2(uint32_t *base, vuint32m2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u32m2x2(uint32_t *rs1, vuint32m2_t vs2, vuint32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u32m4x2(uint32_t *base, vuint32m4_t bindex, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u32m4x2(uint32_t *rs1, vuint32m4_t vs2, vuint32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u64m1x2(uint64_t *base, vuint32mf2_t bindex, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u64m1x2(uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u64m2x2(uint64_t *base, vuint32m1_t bindex, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u64m2x2(uint64_t *rs1, vuint32m1_t vs2, vuint64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u64m4x2(uint64_t *base, vuint32m2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u64m4x2(uint64_t *rs1, vuint32m2_t vs2, vuint64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_f16mf4x2_m(vbool64_t mask, float16_t *base, vuint32mf2_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f16mf4x2_m(vbool64_t vm, float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_f16mf2x2_m(vbool32_t mask, float16_t *base, vuint32m1_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f16mf2x2_m(vbool32_t vm, float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_f16m1x2_m(vbool16_t mask, float16_t *base, vuint32m2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f16m1x2_m(vbool16_t vm, float16_t *rs1, vuint32m2_t vs2, vfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_f16m2x2_m(vbool8_t mask, float16_t *base, vuint32m4_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f16m2x2_m(vbool8_t vm, float16_t *rs1, vuint32m4_t vs2, vfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_f16m4x2_m(vbool4_t mask, float16_t *base, vuint32m8_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f16m4x2_m(vbool4_t vm, float16_t *rs1, vuint32m8_t vs2, vfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_f32mf2x2_m(vbool64_t mask, float32_t *base, vuint32mf2_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f32mf2x2_m(vbool64_t vm, float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_f32m1x2_m(vbool32_t mask, float32_t *base, vuint32m1_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f32m1x2_m(vbool32_t vm, float32_t *rs1, vuint32m1_t vs2, vfloat32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_f32m2x2_m(vbool16_t mask, float32_t *base, vuint32m2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f32m2x2_m(vbool16_t vm, float32_t *rs1, vuint32m2_t vs2, vfloat32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_f32m4x2_m(vbool8_t mask, float32_t *base, vuint32m4_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f32m4x2_m(vbool8_t vm, float32_t *rs1, vuint32m4_t vs2, vfloat32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_f64m1x2_m(vbool64_t mask, float64_t *base, vuint32mf2_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f64m1x2_m(vbool64_t vm, float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_f64m2x2_m(vbool32_t mask, float64_t *base, vuint32m1_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f64m2x2_m(vbool32_t vm, float64_t *rs1, vuint32m1_t vs2, vfloat64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_f64m4x2_m(vbool16_t mask, float64_t *base, vuint32m2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_f64m4x2_m(vbool16_t vm, float64_t *rs1, vuint32m2_t vs2, vfloat64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i8mf8x2_m(vbool64_t vm, int8_t *rs1, vuint32mf2_t vs2, vint8mf8x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i8mf4x2_m(vbool32_t vm, int8_t *rs1, vuint32m1_t vs2, vint8mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i8mf2x2_m(vbool16_t vm, int8_t *rs1, vuint32m2_t vs2, vint8mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i8m1x2_m(vbool8_t vm, int8_t *rs1, vuint32m4_t vs2, vint8m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i8m2x2_m(vbool4_t vm, int8_t *rs1, vuint32m8_t vs2, vint8m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i16mf4x2_m(vbool64_t vm, int16_t *rs1, vuint32mf2_t vs2, vint16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i16mf2x2_m(vbool32_t vm, int16_t *rs1, vuint32m1_t vs2, vint16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i16m1x2_m(vbool16_t vm, int16_t *rs1, vuint32m2_t vs2, vint16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i16m2x2_m(vbool8_t vm, int16_t *rs1, vuint32m4_t vs2, vint16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i16m4x2_m(vbool4_t vm, int16_t *rs1, vuint32m8_t vs2, vint16m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i32mf2x2_m(vbool64_t vm, int32_t *rs1, vuint32mf2_t vs2, vint32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i32m1x2_m(vbool32_t vm, int32_t *rs1, vuint32m1_t vs2, vint32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i32m2x2_m(vbool16_t vm, int32_t *rs1, vuint32m2_t vs2, vint32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i32m4x2_m(vbool8_t vm, int32_t *rs1, vuint32m4_t vs2, vint32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i64m1x2_m(vbool64_t vm, int64_t *rs1, vuint32mf2_t vs2, vint64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i64m2x2_m(vbool32_t vm, int64_t *rs1, vuint32m1_t vs2, vint64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_i64m4x2_m(vbool16_t vm, int64_t *rs1, vuint32m2_t vs2, vint64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u8mf8x2_m(vbool64_t vm, uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u8mf4x2_m(vbool32_t vm, uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u8mf2x2_m(vbool16_t vm, uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u8m1x2_m(vbool8_t vm, uint8_t *rs1, vuint32m4_t vs2, vuint8m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u8m2x2_m(vbool4_t vm, uint8_t *rs1, vuint32m8_t vs2, vuint8m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u16mf4x2_m(vbool64_t vm, uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u16mf2x2_m(vbool32_t vm, uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u16m1x2_m(vbool16_t vm, uint16_t *rs1, vuint32m2_t vs2, vuint16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u16m2x2_m(vbool8_t vm, uint16_t *rs1, vuint32m4_t vs2, vuint16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u16m4x2_m(vbool4_t vm, uint16_t *rs1, vuint32m8_t vs2, vuint16m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u32mf2x2_m(vbool64_t vm, uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u32m1x2_m(vbool32_t vm, uint32_t *rs1, vuint32m1_t vs2, vuint32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u32m2x2_m(vbool16_t vm, uint32_t *rs1, vuint32m2_t vs2, vuint32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u32m4x2_m(vbool8_t vm, uint32_t *rs1, vuint32m4_t vs2, vuint32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u64m1x2_m(vbool64_t vm, uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u64m2x2_m(vbool32_t vm, uint64_t *rs1, vuint32m1_t vs2, vuint64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei32_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei32_v_u64m4x2_m(vbool16_t vm, uint64_t *rs1, vuint32m2_t vs2, vuint64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg2ei32\.[ivxfswum.]+\s+} 92 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsuxseg2ei64.c b/auto-generated/gnu-overloaded-tests/vsuxseg2ei64.c index 1e7a1f446..0d717151c 100644 --- a/auto-generated/gnu-overloaded-tests/vsuxseg2ei64.c +++ b/auto-generated/gnu-overloaded-tests/vsuxseg2ei64.c @@ -6,332 +6,332 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg2ei64_v_f16mf4x2(float16_t *base, vuint64m1_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_f16mf4x2(float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_f16mf2x2(float16_t *base, vuint64m2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_f16mf2x2(float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_f16m1x2(float16_t *base, vuint64m4_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_f16m1x2(float16_t *rs1, vuint64m4_t vs2, vfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_f16m2x2(float16_t *base, vuint64m8_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_f16m2x2(float16_t *rs1, vuint64m8_t vs2, vfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_f32mf2x2(float32_t *base, vuint64m1_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_f32mf2x2(float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_f32m1x2(float32_t *base, vuint64m2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_f32m1x2(float32_t *rs1, vuint64m2_t vs2, vfloat32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_f32m2x2(float32_t *base, vuint64m4_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_f32m2x2(float32_t *rs1, vuint64m4_t vs2, vfloat32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_f32m4x2(float32_t *base, vuint64m8_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_f32m4x2(float32_t *rs1, vuint64m8_t vs2, vfloat32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_f64m1x2(float64_t *base, vuint64m1_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_f64m1x2(float64_t *rs1, vuint64m1_t vs2, vfloat64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_f64m2x2(float64_t *base, vuint64m2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_f64m2x2(float64_t *rs1, vuint64m2_t vs2, vfloat64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_f64m4x2(float64_t *base, vuint64m4_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_f64m4x2(float64_t *rs1, vuint64m4_t vs2, vfloat64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i8mf8x2(int8_t *base, vuint64m1_t bindex, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i8mf8x2(int8_t *rs1, vuint64m1_t vs2, vint8mf8x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i8mf4x2(int8_t *base, vuint64m2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i8mf4x2(int8_t *rs1, vuint64m2_t vs2, vint8mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i8mf2x2(int8_t *base, vuint64m4_t bindex, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i8mf2x2(int8_t *rs1, vuint64m4_t vs2, vint8mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i8m1x2(int8_t *base, vuint64m8_t bindex, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i8m1x2(int8_t *rs1, vuint64m8_t vs2, vint8m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i16mf4x2(int16_t *base, vuint64m1_t bindex, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i16mf4x2(int16_t *rs1, vuint64m1_t vs2, vint16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i16mf2x2(int16_t *base, vuint64m2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i16mf2x2(int16_t *rs1, vuint64m2_t vs2, vint16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i16m1x2(int16_t *base, vuint64m4_t bindex, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i16m1x2(int16_t *rs1, vuint64m4_t vs2, vint16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i16m2x2(int16_t *base, vuint64m8_t bindex, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i16m2x2(int16_t *rs1, vuint64m8_t vs2, vint16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i32mf2x2(int32_t *base, vuint64m1_t bindex, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i32mf2x2(int32_t *rs1, vuint64m1_t vs2, vint32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i32m1x2(int32_t *base, vuint64m2_t bindex, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i32m1x2(int32_t *rs1, vuint64m2_t vs2, vint32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i32m2x2(int32_t *base, vuint64m4_t bindex, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i32m2x2(int32_t *rs1, vuint64m4_t vs2, vint32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i32m4x2(int32_t *base, vuint64m8_t bindex, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i32m4x2(int32_t *rs1, vuint64m8_t vs2, vint32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i64m1x2(int64_t *base, vuint64m1_t bindex, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i64m1x2(int64_t *rs1, vuint64m1_t vs2, vint64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i64m2x2(int64_t *base, vuint64m2_t bindex, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i64m2x2(int64_t *rs1, vuint64m2_t vs2, vint64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i64m4x2(int64_t *base, vuint64m4_t bindex, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i64m4x2(int64_t *rs1, vuint64m4_t vs2, vint64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u8mf8x2(uint8_t *base, vuint64m1_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u8mf8x2(uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u8mf4x2(uint8_t *base, vuint64m2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u8mf4x2(uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u8mf2x2(uint8_t *base, vuint64m4_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u8mf2x2(uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u8m1x2(uint8_t *base, vuint64m8_t bindex, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u8m1x2(uint8_t *rs1, vuint64m8_t vs2, vuint8m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u16mf4x2(uint16_t *base, vuint64m1_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u16mf4x2(uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u16mf2x2(uint16_t *base, vuint64m2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u16mf2x2(uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u16m1x2(uint16_t *base, vuint64m4_t bindex, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u16m1x2(uint16_t *rs1, vuint64m4_t vs2, vuint16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u16m2x2(uint16_t *base, vuint64m8_t bindex, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u16m2x2(uint16_t *rs1, vuint64m8_t vs2, vuint16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u32mf2x2(uint32_t *base, vuint64m1_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u32mf2x2(uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u32m1x2(uint32_t *base, vuint64m2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u32m1x2(uint32_t *rs1, vuint64m2_t vs2, vuint32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u32m2x2(uint32_t *base, vuint64m4_t bindex, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u32m2x2(uint32_t *rs1, vuint64m4_t vs2, vuint32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u32m4x2(uint32_t *base, vuint64m8_t bindex, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u32m4x2(uint32_t *rs1, vuint64m8_t vs2, vuint32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u64m1x2(uint64_t *base, vuint64m1_t bindex, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u64m1x2(uint64_t *rs1, vuint64m1_t vs2, vuint64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u64m2x2(uint64_t *base, vuint64m2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u64m2x2(uint64_t *rs1, vuint64m2_t vs2, vuint64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u64m4x2(uint64_t *base, vuint64m4_t bindex, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u64m4x2(uint64_t *rs1, vuint64m4_t vs2, vuint64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_f16mf4x2_m(vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_f16mf4x2_m(vbool64_t vm, float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_f16mf2x2_m(vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_f16mf2x2_m(vbool32_t vm, float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_f16m1x2_m(vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_f16m1x2_m(vbool16_t vm, float16_t *rs1, vuint64m4_t vs2, vfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_f16m2x2_m(vbool8_t mask, float16_t *base, vuint64m8_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_f16m2x2_m(vbool8_t vm, float16_t *rs1, vuint64m8_t vs2, vfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_f32mf2x2_m(vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_f32mf2x2_m(vbool64_t vm, float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_f32m1x2_m(vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_f32m1x2_m(vbool32_t vm, float32_t *rs1, vuint64m2_t vs2, vfloat32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_f32m2x2_m(vbool16_t mask, float32_t *base, vuint64m4_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_f32m2x2_m(vbool16_t vm, float32_t *rs1, vuint64m4_t vs2, vfloat32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_f32m4x2_m(vbool8_t mask, float32_t *base, vuint64m8_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_f32m4x2_m(vbool8_t vm, float32_t *rs1, vuint64m8_t vs2, vfloat32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_f64m1x2_m(vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_f64m1x2_m(vbool64_t vm, float64_t *rs1, vuint64m1_t vs2, vfloat64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_f64m2x2_m(vbool32_t mask, float64_t *base, vuint64m2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_f64m2x2_m(vbool32_t vm, float64_t *rs1, vuint64m2_t vs2, vfloat64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_f64m4x2_m(vbool16_t mask, float64_t *base, vuint64m4_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_f64m4x2_m(vbool16_t vm, float64_t *rs1, vuint64m4_t vs2, vfloat64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i8mf8x2_m(vbool64_t vm, int8_t *rs1, vuint64m1_t vs2, vint8mf8x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i8mf4x2_m(vbool32_t vm, int8_t *rs1, vuint64m2_t vs2, vint8mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i8mf2x2_m(vbool16_t vm, int8_t *rs1, vuint64m4_t vs2, vint8mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i8m1x2_m(vbool8_t vm, int8_t *rs1, vuint64m8_t vs2, vint8m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i16mf4x2_m(vbool64_t vm, int16_t *rs1, vuint64m1_t vs2, vint16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i16mf2x2_m(vbool32_t vm, int16_t *rs1, vuint64m2_t vs2, vint16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i16m1x2_m(vbool16_t vm, int16_t *rs1, vuint64m4_t vs2, vint16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i16m2x2_m(vbool8_t vm, int16_t *rs1, vuint64m8_t vs2, vint16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i32mf2x2_m(vbool64_t vm, int32_t *rs1, vuint64m1_t vs2, vint32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i32m1x2_m(vbool32_t vm, int32_t *rs1, vuint64m2_t vs2, vint32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i32m2x2_m(vbool16_t vm, int32_t *rs1, vuint64m4_t vs2, vint32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i32m4x2_m(vbool8_t vm, int32_t *rs1, vuint64m8_t vs2, vint32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i64m1x2_m(vbool64_t vm, int64_t *rs1, vuint64m1_t vs2, vint64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i64m2x2_m(vbool32_t vm, int64_t *rs1, vuint64m2_t vs2, vint64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_i64m4x2_m(vbool16_t vm, int64_t *rs1, vuint64m4_t vs2, vint64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u8mf8x2_m(vbool64_t vm, uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u8mf4x2_m(vbool32_t vm, uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u8mf2x2_m(vbool16_t vm, uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u8m1x2_m(vbool8_t vm, uint8_t *rs1, vuint64m8_t vs2, vuint8m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u16mf4x2_m(vbool64_t vm, uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u16mf2x2_m(vbool32_t vm, uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u16m1x2_m(vbool16_t vm, uint16_t *rs1, vuint64m4_t vs2, vuint16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u16m2x2_m(vbool8_t vm, uint16_t *rs1, vuint64m8_t vs2, vuint16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u32mf2x2_m(vbool64_t vm, uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u32m1x2_m(vbool32_t vm, uint32_t *rs1, vuint64m2_t vs2, vuint32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u32m2x2_m(vbool16_t vm, uint32_t *rs1, vuint64m4_t vs2, vuint32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u32m4x2_m(vbool8_t vm, uint32_t *rs1, vuint64m8_t vs2, vuint32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u64m1x2_m(vbool64_t vm, uint64_t *rs1, vuint64m1_t vs2, vuint64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u64m2x2_m(vbool32_t vm, uint64_t *rs1, vuint64m2_t vs2, vuint64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei64_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei64_v_u64m4x2_m(vbool16_t vm, uint64_t *rs1, vuint64m4_t vs2, vuint64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei64(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg2ei64\.[ivxfswum.]+\s+} 82 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsuxseg2ei8.c b/auto-generated/gnu-overloaded-tests/vsuxseg2ei8.c index c8f8ce204..de8f9ee6e 100644 --- a/auto-generated/gnu-overloaded-tests/vsuxseg2ei8.c +++ b/auto-generated/gnu-overloaded-tests/vsuxseg2ei8.c @@ -6,388 +6,388 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg2ei8_v_f16mf4x2(float16_t *base, vuint8mf8_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f16mf4x2(float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_f16mf2x2(float16_t *base, vuint8mf4_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f16mf2x2(float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_f16m1x2(float16_t *base, vuint8mf2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f16m1x2(float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_f16m2x2(float16_t *base, vuint8m1_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f16m2x2(float16_t *rs1, vuint8m1_t vs2, vfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_f16m4x2(float16_t *base, vuint8m2_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f16m4x2(float16_t *rs1, vuint8m2_t vs2, vfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_f32mf2x2(float32_t *base, vuint8mf8_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f32mf2x2(float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_f32m1x2(float32_t *base, vuint8mf4_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f32m1x2(float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_f32m2x2(float32_t *base, vuint8mf2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f32m2x2(float32_t *rs1, vuint8mf2_t vs2, vfloat32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_f32m4x2(float32_t *base, vuint8m1_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f32m4x2(float32_t *rs1, vuint8m1_t vs2, vfloat32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_f64m1x2(float64_t *base, vuint8mf8_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f64m1x2(float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_f64m2x2(float64_t *base, vuint8mf4_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f64m2x2(float64_t *rs1, vuint8mf4_t vs2, vfloat64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_f64m4x2(float64_t *base, vuint8mf2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f64m4x2(float64_t *rs1, vuint8mf2_t vs2, vfloat64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i8mf8x2(int8_t *base, vuint8mf8_t bindex, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i8mf8x2(int8_t *rs1, vuint8mf8_t vs2, vint8mf8x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i8mf4x2(int8_t *base, vuint8mf4_t bindex, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i8mf4x2(int8_t *rs1, vuint8mf4_t vs2, vint8mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i8mf2x2(int8_t *base, vuint8mf2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i8mf2x2(int8_t *rs1, vuint8mf2_t vs2, vint8mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i8m1x2(int8_t *base, vuint8m1_t bindex, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i8m1x2(int8_t *rs1, vuint8m1_t vs2, vint8m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i8m2x2(int8_t *base, vuint8m2_t bindex, vint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i8m2x2(int8_t *rs1, vuint8m2_t vs2, vint8m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i8m4x2(int8_t *base, vuint8m4_t bindex, vint8m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i8m4x2(int8_t *rs1, vuint8m4_t vs2, vint8m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i16mf4x2(int16_t *base, vuint8mf8_t bindex, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i16mf4x2(int16_t *rs1, vuint8mf8_t vs2, vint16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i16mf2x2(int16_t *base, vuint8mf4_t bindex, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i16mf2x2(int16_t *rs1, vuint8mf4_t vs2, vint16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i16m1x2(int16_t *base, vuint8mf2_t bindex, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i16m1x2(int16_t *rs1, vuint8mf2_t vs2, vint16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i16m2x2(int16_t *base, vuint8m1_t bindex, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i16m2x2(int16_t *rs1, vuint8m1_t vs2, vint16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i16m4x2(int16_t *base, vuint8m2_t bindex, vint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i16m4x2(int16_t *rs1, vuint8m2_t vs2, vint16m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i32mf2x2(int32_t *base, vuint8mf8_t bindex, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i32mf2x2(int32_t *rs1, vuint8mf8_t vs2, vint32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i32m1x2(int32_t *base, vuint8mf4_t bindex, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i32m1x2(int32_t *rs1, vuint8mf4_t vs2, vint32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i32m2x2(int32_t *base, vuint8mf2_t bindex, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i32m2x2(int32_t *rs1, vuint8mf2_t vs2, vint32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i32m4x2(int32_t *base, vuint8m1_t bindex, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i32m4x2(int32_t *rs1, vuint8m1_t vs2, vint32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i64m1x2(int64_t *base, vuint8mf8_t bindex, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i64m1x2(int64_t *rs1, vuint8mf8_t vs2, vint64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i64m2x2(int64_t *base, vuint8mf4_t bindex, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i64m2x2(int64_t *rs1, vuint8mf4_t vs2, vint64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i64m4x2(int64_t *base, vuint8mf2_t bindex, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i64m4x2(int64_t *rs1, vuint8mf2_t vs2, vint64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u8mf8x2(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u8mf8x2(uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u8mf4x2(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u8mf4x2(uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u8mf2x2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u8mf2x2(uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u8m1x2(uint8_t *base, vuint8m1_t bindex, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u8m1x2(uint8_t *rs1, vuint8m1_t vs2, vuint8m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u8m2x2(uint8_t *base, vuint8m2_t bindex, vuint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u8m2x2(uint8_t *rs1, vuint8m2_t vs2, vuint8m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u8m4x2(uint8_t *base, vuint8m4_t bindex, vuint8m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u8m4x2(uint8_t *rs1, vuint8m4_t vs2, vuint8m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u16mf4x2(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u16mf4x2(uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u16mf2x2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u16mf2x2(uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u16m1x2(uint16_t *base, vuint8mf2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u16m1x2(uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u16m2x2(uint16_t *base, vuint8m1_t bindex, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u16m2x2(uint16_t *rs1, vuint8m1_t vs2, vuint16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u16m4x2(uint16_t *base, vuint8m2_t bindex, vuint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u16m4x2(uint16_t *rs1, vuint8m2_t vs2, vuint16m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u32mf2x2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u32mf2x2(uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u32m1x2(uint32_t *base, vuint8mf4_t bindex, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u32m1x2(uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u32m2x2(uint32_t *base, vuint8mf2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u32m2x2(uint32_t *rs1, vuint8mf2_t vs2, vuint32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u32m4x2(uint32_t *base, vuint8m1_t bindex, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u32m4x2(uint32_t *rs1, vuint8m1_t vs2, vuint32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u64m1x2(uint64_t *base, vuint8mf8_t bindex, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u64m1x2(uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u64m2x2(uint64_t *base, vuint8mf4_t bindex, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u64m2x2(uint64_t *rs1, vuint8mf4_t vs2, vuint64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u64m4x2(uint64_t *base, vuint8mf2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u64m4x2(uint64_t *rs1, vuint8mf2_t vs2, vuint64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_f16mf4x2_m(vbool64_t mask, float16_t *base, vuint8mf8_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f16mf4x2_m(vbool64_t vm, float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_f16mf2x2_m(vbool32_t mask, float16_t *base, vuint8mf4_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f16mf2x2_m(vbool32_t vm, float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_f16m1x2_m(vbool16_t mask, float16_t *base, vuint8mf2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f16m1x2_m(vbool16_t vm, float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_f16m2x2_m(vbool8_t mask, float16_t *base, vuint8m1_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f16m2x2_m(vbool8_t vm, float16_t *rs1, vuint8m1_t vs2, vfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_f16m4x2_m(vbool4_t mask, float16_t *base, vuint8m2_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f16m4x2_m(vbool4_t vm, float16_t *rs1, vuint8m2_t vs2, vfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_f32mf2x2_m(vbool64_t mask, float32_t *base, vuint8mf8_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f32mf2x2_m(vbool64_t vm, float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_f32m1x2_m(vbool32_t mask, float32_t *base, vuint8mf4_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f32m1x2_m(vbool32_t vm, float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_f32m2x2_m(vbool16_t mask, float32_t *base, vuint8mf2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f32m2x2_m(vbool16_t vm, float32_t *rs1, vuint8mf2_t vs2, vfloat32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_f32m4x2_m(vbool8_t mask, float32_t *base, vuint8m1_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f32m4x2_m(vbool8_t vm, float32_t *rs1, vuint8m1_t vs2, vfloat32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_f64m1x2_m(vbool64_t mask, float64_t *base, vuint8mf8_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f64m1x2_m(vbool64_t vm, float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_f64m2x2_m(vbool32_t mask, float64_t *base, vuint8mf4_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f64m2x2_m(vbool32_t vm, float64_t *rs1, vuint8mf4_t vs2, vfloat64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_f64m4x2_m(vbool16_t mask, float64_t *base, vuint8mf2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_f64m4x2_m(vbool16_t vm, float64_t *rs1, vuint8mf2_t vs2, vfloat64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i8mf8x2_m(vbool64_t vm, int8_t *rs1, vuint8mf8_t vs2, vint8mf8x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i8mf4x2_m(vbool32_t vm, int8_t *rs1, vuint8mf4_t vs2, vint8mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i8mf2x2_m(vbool16_t vm, int8_t *rs1, vuint8mf2_t vs2, vint8mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i8m1x2_m(vbool8_t vm, int8_t *rs1, vuint8m1_t vs2, vint8m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i8m2x2_m(vbool4_t vm, int8_t *rs1, vuint8m2_t vs2, vint8m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i8m4x2_m(vbool2_t vm, int8_t *rs1, vuint8m4_t vs2, vint8m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i16mf4x2_m(vbool64_t vm, int16_t *rs1, vuint8mf8_t vs2, vint16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i16mf2x2_m(vbool32_t vm, int16_t *rs1, vuint8mf4_t vs2, vint16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i16m1x2_m(vbool16_t vm, int16_t *rs1, vuint8mf2_t vs2, vint16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i16m2x2_m(vbool8_t vm, int16_t *rs1, vuint8m1_t vs2, vint16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i16m4x2_m(vbool4_t vm, int16_t *rs1, vuint8m2_t vs2, vint16m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i32mf2x2_m(vbool64_t vm, int32_t *rs1, vuint8mf8_t vs2, vint32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i32m1x2_m(vbool32_t vm, int32_t *rs1, vuint8mf4_t vs2, vint32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i32m2x2_m(vbool16_t vm, int32_t *rs1, vuint8mf2_t vs2, vint32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i32m4x2_m(vbool8_t vm, int32_t *rs1, vuint8m1_t vs2, vint32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i64m1x2_m(vbool64_t vm, int64_t *rs1, vuint8mf8_t vs2, vint64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i64m2x2_m(vbool32_t vm, int64_t *rs1, vuint8mf4_t vs2, vint64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_i64m4x2_m(vbool16_t vm, int64_t *rs1, vuint8mf2_t vs2, vint64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u8mf8x2_m(vbool64_t vm, uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u8mf4x2_m(vbool32_t vm, uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u8mf2x2_m(vbool16_t vm, uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u8m1x2_m(vbool8_t vm, uint8_t *rs1, vuint8m1_t vs2, vuint8m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u8m2x2_m(vbool4_t vm, uint8_t *rs1, vuint8m2_t vs2, vuint8m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u8m4x2_m(vbool2_t vm, uint8_t *rs1, vuint8m4_t vs2, vuint8m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u16mf4x2_m(vbool64_t vm, uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u16mf2x2_m(vbool32_t vm, uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u16m1x2_m(vbool16_t vm, uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u16m2x2_m(vbool8_t vm, uint16_t *rs1, vuint8m1_t vs2, vuint16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u16m4x2_m(vbool4_t vm, uint16_t *rs1, vuint8m2_t vs2, vuint16m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u32mf2x2_m(vbool64_t vm, uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u32m1x2_m(vbool32_t vm, uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u32m2x2_m(vbool16_t vm, uint32_t *rs1, vuint8mf2_t vs2, vuint32m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u32m4x2_m(vbool8_t vm, uint32_t *rs1, vuint8m1_t vs2, vuint32m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u64m1x2_m(vbool64_t vm, uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u64m2x2_m(vbool32_t vm, uint64_t *rs1, vuint8mf4_t vs2, vuint64m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg2ei8_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg2ei8_v_u64m4x2_m(vbool16_t vm, uint64_t *rs1, vuint8mf2_t vs2, vuint64m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg2ei8\.[ivxfswum.]+\s+} 96 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsuxseg3ei16.c b/auto-generated/gnu-overloaded-tests/vsuxseg3ei16.c index 94acec61e..7e2707593 100644 --- a/auto-generated/gnu-overloaded-tests/vsuxseg3ei16.c +++ b/auto-generated/gnu-overloaded-tests/vsuxseg3ei16.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg3ei16_v_f16mf4x3(float16_t *base, vuint16mf4_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_f16mf4x3(float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_f16mf2x3(float16_t *base, vuint16mf2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_f16mf2x3(float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_f16m1x3(float16_t *base, vuint16m1_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_f16m1x3(float16_t *rs1, vuint16m1_t vs2, vfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_f16m2x3(float16_t *base, vuint16m2_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_f16m2x3(float16_t *rs1, vuint16m2_t vs2, vfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_f32mf2x3(float32_t *base, vuint16mf4_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_f32mf2x3(float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_f32m1x3(float32_t *base, vuint16mf2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_f32m1x3(float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_f32m2x3(float32_t *base, vuint16m1_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_f32m2x3(float32_t *rs1, vuint16m1_t vs2, vfloat32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_f64m1x3(float64_t *base, vuint16mf4_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_f64m1x3(float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_f64m2x3(float64_t *base, vuint16mf2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_f64m2x3(float64_t *rs1, vuint16mf2_t vs2, vfloat64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i8mf8x3(int8_t *base, vuint16mf4_t bindex, vint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i8mf8x3(int8_t *rs1, vuint16mf4_t vs2, vint8mf8x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i8mf4x3(int8_t *base, vuint16mf2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i8mf4x3(int8_t *rs1, vuint16mf2_t vs2, vint8mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i8mf2x3(int8_t *base, vuint16m1_t bindex, vint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i8mf2x3(int8_t *rs1, vuint16m1_t vs2, vint8mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i8m1x3(int8_t *base, vuint16m2_t bindex, vint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i8m1x3(int8_t *rs1, vuint16m2_t vs2, vint8m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i8m2x3(int8_t *base, vuint16m4_t bindex, vint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i8m2x3(int8_t *rs1, vuint16m4_t vs2, vint8m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i16mf4x3(int16_t *base, vuint16mf4_t bindex, vint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i16mf4x3(int16_t *rs1, vuint16mf4_t vs2, vint16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i16mf2x3(int16_t *base, vuint16mf2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i16mf2x3(int16_t *rs1, vuint16mf2_t vs2, vint16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i16m1x3(int16_t *base, vuint16m1_t bindex, vint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i16m1x3(int16_t *rs1, vuint16m1_t vs2, vint16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i16m2x3(int16_t *base, vuint16m2_t bindex, vint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i16m2x3(int16_t *rs1, vuint16m2_t vs2, vint16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i32mf2x3(int32_t *base, vuint16mf4_t bindex, vint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i32mf2x3(int32_t *rs1, vuint16mf4_t vs2, vint32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i32m1x3(int32_t *base, vuint16mf2_t bindex, vint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i32m1x3(int32_t *rs1, vuint16mf2_t vs2, vint32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i32m2x3(int32_t *base, vuint16m1_t bindex, vint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i32m2x3(int32_t *rs1, vuint16m1_t vs2, vint32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i64m1x3(int64_t *base, vuint16mf4_t bindex, vint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i64m1x3(int64_t *rs1, vuint16mf4_t vs2, vint64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i64m2x3(int64_t *base, vuint16mf2_t bindex, vint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i64m2x3(int64_t *rs1, vuint16mf2_t vs2, vint64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u8mf8x3(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u8mf8x3(uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u8mf4x3(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u8mf4x3(uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u8mf2x3(uint8_t *base, vuint16m1_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u8mf2x3(uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u8m1x3(uint8_t *base, vuint16m2_t bindex, vuint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u8m1x3(uint8_t *rs1, vuint16m2_t vs2, vuint8m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u8m2x3(uint8_t *base, vuint16m4_t bindex, vuint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u8m2x3(uint8_t *rs1, vuint16m4_t vs2, vuint8m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u16mf4x3(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u16mf4x3(uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u16mf2x3(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u16mf2x3(uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u16m1x3(uint16_t *base, vuint16m1_t bindex, vuint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u16m1x3(uint16_t *rs1, vuint16m1_t vs2, vuint16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u16m2x3(uint16_t *base, vuint16m2_t bindex, vuint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u16m2x3(uint16_t *rs1, vuint16m2_t vs2, vuint16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u32mf2x3(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u32mf2x3(uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u32m1x3(uint32_t *base, vuint16mf2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u32m1x3(uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u32m2x3(uint32_t *base, vuint16m1_t bindex, vuint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u32m2x3(uint32_t *rs1, vuint16m1_t vs2, vuint32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u64m1x3(uint64_t *base, vuint16mf4_t bindex, vuint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u64m1x3(uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u64m2x3(uint64_t *base, vuint16mf2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u64m2x3(uint64_t *rs1, vuint16mf2_t vs2, vuint64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_f16mf4x3_m(vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_f16mf4x3_m(vbool64_t vm, float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_f16mf2x3_m(vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_f16mf2x3_m(vbool32_t vm, float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_f16m1x3_m(vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_f16m1x3_m(vbool16_t vm, float16_t *rs1, vuint16m1_t vs2, vfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_f16m2x3_m(vbool8_t mask, float16_t *base, vuint16m2_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_f16m2x3_m(vbool8_t vm, float16_t *rs1, vuint16m2_t vs2, vfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_f32mf2x3_m(vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_f32mf2x3_m(vbool64_t vm, float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_f32m1x3_m(vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_f32m1x3_m(vbool32_t vm, float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_f32m2x3_m(vbool16_t mask, float32_t *base, vuint16m1_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_f32m2x3_m(vbool16_t vm, float32_t *rs1, vuint16m1_t vs2, vfloat32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_f64m1x3_m(vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_f64m1x3_m(vbool64_t vm, float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_f64m2x3_m(vbool32_t mask, float64_t *base, vuint16mf2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_f64m2x3_m(vbool32_t vm, float64_t *rs1, vuint16mf2_t vs2, vfloat64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i8mf8x3_m(vbool64_t vm, int8_t *rs1, vuint16mf4_t vs2, vint8mf8x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i8mf4x3_m(vbool32_t vm, int8_t *rs1, vuint16mf2_t vs2, vint8mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i8mf2x3_m(vbool16_t vm, int8_t *rs1, vuint16m1_t vs2, vint8mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i8m1x3_m(vbool8_t vm, int8_t *rs1, vuint16m2_t vs2, vint8m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i8m2x3_m(vbool4_t vm, int8_t *rs1, vuint16m4_t vs2, vint8m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i16mf4x3_m(vbool64_t vm, int16_t *rs1, vuint16mf4_t vs2, vint16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i16mf2x3_m(vbool32_t vm, int16_t *rs1, vuint16mf2_t vs2, vint16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i16m1x3_m(vbool16_t vm, int16_t *rs1, vuint16m1_t vs2, vint16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i16m2x3_m(vbool8_t vm, int16_t *rs1, vuint16m2_t vs2, vint16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i32mf2x3_m(vbool64_t vm, int32_t *rs1, vuint16mf4_t vs2, vint32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i32m1x3_m(vbool32_t vm, int32_t *rs1, vuint16mf2_t vs2, vint32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i32m2x3_m(vbool16_t vm, int32_t *rs1, vuint16m1_t vs2, vint32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i64m1x3_m(vbool64_t vm, int64_t *rs1, vuint16mf4_t vs2, vint64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_i64m2x3_m(vbool32_t vm, int64_t *rs1, vuint16mf2_t vs2, vint64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u8mf8x3_m(vbool64_t vm, uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u8mf4x3_m(vbool32_t vm, uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u8mf2x3_m(vbool16_t vm, uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u8m1x3_m(vbool8_t vm, uint8_t *rs1, vuint16m2_t vs2, vuint8m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u8m2x3_m(vbool4_t vm, uint8_t *rs1, vuint16m4_t vs2, vuint8m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u16mf4x3_m(vbool64_t vm, uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u16mf2x3_m(vbool32_t vm, uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u16m1x3_m(vbool16_t vm, uint16_t *rs1, vuint16m1_t vs2, vuint16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u16m2x3_m(vbool8_t vm, uint16_t *rs1, vuint16m2_t vs2, vuint16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u32mf2x3_m(vbool64_t vm, uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u32m1x3_m(vbool32_t vm, uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u32m2x3_m(vbool16_t vm, uint32_t *rs1, vuint16m1_t vs2, vuint32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u64m1x3_m(vbool64_t vm, uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei16_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei16_v_u64m2x3_m(vbool32_t vm, uint64_t *rs1, vuint16mf2_t vs2, vuint64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg3ei16\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsuxseg3ei32.c b/auto-generated/gnu-overloaded-tests/vsuxseg3ei32.c index b9a066c9c..a0f1d54f8 100644 --- a/auto-generated/gnu-overloaded-tests/vsuxseg3ei32.c +++ b/auto-generated/gnu-overloaded-tests/vsuxseg3ei32.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg3ei32_v_f16mf4x3(float16_t *base, vuint32mf2_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_f16mf4x3(float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_f16mf2x3(float16_t *base, vuint32m1_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_f16mf2x3(float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_f16m1x3(float16_t *base, vuint32m2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_f16m1x3(float16_t *rs1, vuint32m2_t vs2, vfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_f16m2x3(float16_t *base, vuint32m4_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_f16m2x3(float16_t *rs1, vuint32m4_t vs2, vfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_f32mf2x3(float32_t *base, vuint32mf2_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_f32mf2x3(float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_f32m1x3(float32_t *base, vuint32m1_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_f32m1x3(float32_t *rs1, vuint32m1_t vs2, vfloat32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_f32m2x3(float32_t *base, vuint32m2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_f32m2x3(float32_t *rs1, vuint32m2_t vs2, vfloat32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_f64m1x3(float64_t *base, vuint32mf2_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_f64m1x3(float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_f64m2x3(float64_t *base, vuint32m1_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_f64m2x3(float64_t *rs1, vuint32m1_t vs2, vfloat64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i8mf8x3(int8_t *base, vuint32mf2_t bindex, vint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i8mf8x3(int8_t *rs1, vuint32mf2_t vs2, vint8mf8x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i8mf4x3(int8_t *base, vuint32m1_t bindex, vint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i8mf4x3(int8_t *rs1, vuint32m1_t vs2, vint8mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i8mf2x3(int8_t *base, vuint32m2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i8mf2x3(int8_t *rs1, vuint32m2_t vs2, vint8mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i8m1x3(int8_t *base, vuint32m4_t bindex, vint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i8m1x3(int8_t *rs1, vuint32m4_t vs2, vint8m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i8m2x3(int8_t *base, vuint32m8_t bindex, vint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i8m2x3(int8_t *rs1, vuint32m8_t vs2, vint8m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i16mf4x3(int16_t *base, vuint32mf2_t bindex, vint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i16mf4x3(int16_t *rs1, vuint32mf2_t vs2, vint16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i16mf2x3(int16_t *base, vuint32m1_t bindex, vint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i16mf2x3(int16_t *rs1, vuint32m1_t vs2, vint16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i16m1x3(int16_t *base, vuint32m2_t bindex, vint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i16m1x3(int16_t *rs1, vuint32m2_t vs2, vint16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i16m2x3(int16_t *base, vuint32m4_t bindex, vint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i16m2x3(int16_t *rs1, vuint32m4_t vs2, vint16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i32mf2x3(int32_t *base, vuint32mf2_t bindex, vint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i32mf2x3(int32_t *rs1, vuint32mf2_t vs2, vint32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i32m1x3(int32_t *base, vuint32m1_t bindex, vint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i32m1x3(int32_t *rs1, vuint32m1_t vs2, vint32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i32m2x3(int32_t *base, vuint32m2_t bindex, vint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i32m2x3(int32_t *rs1, vuint32m2_t vs2, vint32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i64m1x3(int64_t *base, vuint32mf2_t bindex, vint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i64m1x3(int64_t *rs1, vuint32mf2_t vs2, vint64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i64m2x3(int64_t *base, vuint32m1_t bindex, vint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i64m2x3(int64_t *rs1, vuint32m1_t vs2, vint64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u8mf8x3(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u8mf8x3(uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u8mf4x3(uint8_t *base, vuint32m1_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u8mf4x3(uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u8mf2x3(uint8_t *base, vuint32m2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u8mf2x3(uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u8m1x3(uint8_t *base, vuint32m4_t bindex, vuint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u8m1x3(uint8_t *rs1, vuint32m4_t vs2, vuint8m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u8m2x3(uint8_t *base, vuint32m8_t bindex, vuint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u8m2x3(uint8_t *rs1, vuint32m8_t vs2, vuint8m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u16mf4x3(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u16mf4x3(uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u16mf2x3(uint16_t *base, vuint32m1_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u16mf2x3(uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u16m1x3(uint16_t *base, vuint32m2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u16m1x3(uint16_t *rs1, vuint32m2_t vs2, vuint16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u16m2x3(uint16_t *base, vuint32m4_t bindex, vuint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u16m2x3(uint16_t *rs1, vuint32m4_t vs2, vuint16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u32mf2x3(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u32mf2x3(uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u32m1x3(uint32_t *base, vuint32m1_t bindex, vuint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u32m1x3(uint32_t *rs1, vuint32m1_t vs2, vuint32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u32m2x3(uint32_t *base, vuint32m2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u32m2x3(uint32_t *rs1, vuint32m2_t vs2, vuint32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u64m1x3(uint64_t *base, vuint32mf2_t bindex, vuint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u64m1x3(uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u64m2x3(uint64_t *base, vuint32m1_t bindex, vuint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u64m2x3(uint64_t *rs1, vuint32m1_t vs2, vuint64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_f16mf4x3_m(vbool64_t mask, float16_t *base, vuint32mf2_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_f16mf4x3_m(vbool64_t vm, float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_f16mf2x3_m(vbool32_t mask, float16_t *base, vuint32m1_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_f16mf2x3_m(vbool32_t vm, float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_f16m1x3_m(vbool16_t mask, float16_t *base, vuint32m2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_f16m1x3_m(vbool16_t vm, float16_t *rs1, vuint32m2_t vs2, vfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_f16m2x3_m(vbool8_t mask, float16_t *base, vuint32m4_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_f16m2x3_m(vbool8_t vm, float16_t *rs1, vuint32m4_t vs2, vfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_f32mf2x3_m(vbool64_t mask, float32_t *base, vuint32mf2_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_f32mf2x3_m(vbool64_t vm, float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_f32m1x3_m(vbool32_t mask, float32_t *base, vuint32m1_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_f32m1x3_m(vbool32_t vm, float32_t *rs1, vuint32m1_t vs2, vfloat32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_f32m2x3_m(vbool16_t mask, float32_t *base, vuint32m2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_f32m2x3_m(vbool16_t vm, float32_t *rs1, vuint32m2_t vs2, vfloat32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_f64m1x3_m(vbool64_t mask, float64_t *base, vuint32mf2_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_f64m1x3_m(vbool64_t vm, float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_f64m2x3_m(vbool32_t mask, float64_t *base, vuint32m1_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_f64m2x3_m(vbool32_t vm, float64_t *rs1, vuint32m1_t vs2, vfloat64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i8mf8x3_m(vbool64_t vm, int8_t *rs1, vuint32mf2_t vs2, vint8mf8x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i8mf4x3_m(vbool32_t vm, int8_t *rs1, vuint32m1_t vs2, vint8mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i8mf2x3_m(vbool16_t vm, int8_t *rs1, vuint32m2_t vs2, vint8mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i8m1x3_m(vbool8_t vm, int8_t *rs1, vuint32m4_t vs2, vint8m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i8m2x3_m(vbool4_t vm, int8_t *rs1, vuint32m8_t vs2, vint8m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i16mf4x3_m(vbool64_t vm, int16_t *rs1, vuint32mf2_t vs2, vint16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i16mf2x3_m(vbool32_t vm, int16_t *rs1, vuint32m1_t vs2, vint16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i16m1x3_m(vbool16_t vm, int16_t *rs1, vuint32m2_t vs2, vint16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i16m2x3_m(vbool8_t vm, int16_t *rs1, vuint32m4_t vs2, vint16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i32mf2x3_m(vbool64_t vm, int32_t *rs1, vuint32mf2_t vs2, vint32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i32m1x3_m(vbool32_t vm, int32_t *rs1, vuint32m1_t vs2, vint32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i32m2x3_m(vbool16_t vm, int32_t *rs1, vuint32m2_t vs2, vint32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i64m1x3_m(vbool64_t vm, int64_t *rs1, vuint32mf2_t vs2, vint64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_i64m2x3_m(vbool32_t vm, int64_t *rs1, vuint32m1_t vs2, vint64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u8mf8x3_m(vbool64_t vm, uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u8mf4x3_m(vbool32_t vm, uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u8mf2x3_m(vbool16_t vm, uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u8m1x3_m(vbool8_t vm, uint8_t *rs1, vuint32m4_t vs2, vuint8m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u8m2x3_m(vbool4_t vm, uint8_t *rs1, vuint32m8_t vs2, vuint8m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u16mf4x3_m(vbool64_t vm, uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u16mf2x3_m(vbool32_t vm, uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u16m1x3_m(vbool16_t vm, uint16_t *rs1, vuint32m2_t vs2, vuint16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u16m2x3_m(vbool8_t vm, uint16_t *rs1, vuint32m4_t vs2, vuint16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u32mf2x3_m(vbool64_t vm, uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u32m1x3_m(vbool32_t vm, uint32_t *rs1, vuint32m1_t vs2, vuint32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u32m2x3_m(vbool16_t vm, uint32_t *rs1, vuint32m2_t vs2, vuint32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u64m1x3_m(vbool64_t vm, uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei32_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei32_v_u64m2x3_m(vbool32_t vm, uint64_t *rs1, vuint32m1_t vs2, vuint64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei32(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg3ei32\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsuxseg3ei64.c b/auto-generated/gnu-overloaded-tests/vsuxseg3ei64.c index 0f1830c25..880ce3f33 100644 --- a/auto-generated/gnu-overloaded-tests/vsuxseg3ei64.c +++ b/auto-generated/gnu-overloaded-tests/vsuxseg3ei64.c @@ -6,284 +6,284 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg3ei64_v_f16mf4x3(float16_t *base, vuint64m1_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_f16mf4x3(float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_f16mf2x3(float16_t *base, vuint64m2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_f16mf2x3(float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_f16m1x3(float16_t *base, vuint64m4_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_f16m1x3(float16_t *rs1, vuint64m4_t vs2, vfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_f16m2x3(float16_t *base, vuint64m8_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_f16m2x3(float16_t *rs1, vuint64m8_t vs2, vfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_f32mf2x3(float32_t *base, vuint64m1_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_f32mf2x3(float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_f32m1x3(float32_t *base, vuint64m2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_f32m1x3(float32_t *rs1, vuint64m2_t vs2, vfloat32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_f32m2x3(float32_t *base, vuint64m4_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_f32m2x3(float32_t *rs1, vuint64m4_t vs2, vfloat32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_f64m1x3(float64_t *base, vuint64m1_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_f64m1x3(float64_t *rs1, vuint64m1_t vs2, vfloat64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_f64m2x3(float64_t *base, vuint64m2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_f64m2x3(float64_t *rs1, vuint64m2_t vs2, vfloat64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i8mf8x3(int8_t *base, vuint64m1_t bindex, vint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i8mf8x3(int8_t *rs1, vuint64m1_t vs2, vint8mf8x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i8mf4x3(int8_t *base, vuint64m2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i8mf4x3(int8_t *rs1, vuint64m2_t vs2, vint8mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i8mf2x3(int8_t *base, vuint64m4_t bindex, vint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i8mf2x3(int8_t *rs1, vuint64m4_t vs2, vint8mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i8m1x3(int8_t *base, vuint64m8_t bindex, vint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i8m1x3(int8_t *rs1, vuint64m8_t vs2, vint8m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i16mf4x3(int16_t *base, vuint64m1_t bindex, vint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i16mf4x3(int16_t *rs1, vuint64m1_t vs2, vint16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i16mf2x3(int16_t *base, vuint64m2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i16mf2x3(int16_t *rs1, vuint64m2_t vs2, vint16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i16m1x3(int16_t *base, vuint64m4_t bindex, vint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i16m1x3(int16_t *rs1, vuint64m4_t vs2, vint16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i16m2x3(int16_t *base, vuint64m8_t bindex, vint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i16m2x3(int16_t *rs1, vuint64m8_t vs2, vint16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i32mf2x3(int32_t *base, vuint64m1_t bindex, vint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i32mf2x3(int32_t *rs1, vuint64m1_t vs2, vint32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i32m1x3(int32_t *base, vuint64m2_t bindex, vint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i32m1x3(int32_t *rs1, vuint64m2_t vs2, vint32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i32m2x3(int32_t *base, vuint64m4_t bindex, vint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i32m2x3(int32_t *rs1, vuint64m4_t vs2, vint32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i64m1x3(int64_t *base, vuint64m1_t bindex, vint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i64m1x3(int64_t *rs1, vuint64m1_t vs2, vint64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i64m2x3(int64_t *base, vuint64m2_t bindex, vint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i64m2x3(int64_t *rs1, vuint64m2_t vs2, vint64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u8mf8x3(uint8_t *base, vuint64m1_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u8mf8x3(uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u8mf4x3(uint8_t *base, vuint64m2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u8mf4x3(uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u8mf2x3(uint8_t *base, vuint64m4_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u8mf2x3(uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u8m1x3(uint8_t *base, vuint64m8_t bindex, vuint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u8m1x3(uint8_t *rs1, vuint64m8_t vs2, vuint8m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u16mf4x3(uint16_t *base, vuint64m1_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u16mf4x3(uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u16mf2x3(uint16_t *base, vuint64m2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u16mf2x3(uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u16m1x3(uint16_t *base, vuint64m4_t bindex, vuint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u16m1x3(uint16_t *rs1, vuint64m4_t vs2, vuint16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u16m2x3(uint16_t *base, vuint64m8_t bindex, vuint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u16m2x3(uint16_t *rs1, vuint64m8_t vs2, vuint16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u32mf2x3(uint32_t *base, vuint64m1_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u32mf2x3(uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u32m1x3(uint32_t *base, vuint64m2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u32m1x3(uint32_t *rs1, vuint64m2_t vs2, vuint32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u32m2x3(uint32_t *base, vuint64m4_t bindex, vuint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u32m2x3(uint32_t *rs1, vuint64m4_t vs2, vuint32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u64m1x3(uint64_t *base, vuint64m1_t bindex, vuint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u64m1x3(uint64_t *rs1, vuint64m1_t vs2, vuint64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u64m2x3(uint64_t *base, vuint64m2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u64m2x3(uint64_t *rs1, vuint64m2_t vs2, vuint64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_f16mf4x3_m(vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_f16mf4x3_m(vbool64_t vm, float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_f16mf2x3_m(vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_f16mf2x3_m(vbool32_t vm, float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_f16m1x3_m(vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_f16m1x3_m(vbool16_t vm, float16_t *rs1, vuint64m4_t vs2, vfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_f16m2x3_m(vbool8_t mask, float16_t *base, vuint64m8_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_f16m2x3_m(vbool8_t vm, float16_t *rs1, vuint64m8_t vs2, vfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_f32mf2x3_m(vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_f32mf2x3_m(vbool64_t vm, float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_f32m1x3_m(vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_f32m1x3_m(vbool32_t vm, float32_t *rs1, vuint64m2_t vs2, vfloat32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_f32m2x3_m(vbool16_t mask, float32_t *base, vuint64m4_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_f32m2x3_m(vbool16_t vm, float32_t *rs1, vuint64m4_t vs2, vfloat32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_f64m1x3_m(vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_f64m1x3_m(vbool64_t vm, float64_t *rs1, vuint64m1_t vs2, vfloat64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_f64m2x3_m(vbool32_t mask, float64_t *base, vuint64m2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_f64m2x3_m(vbool32_t vm, float64_t *rs1, vuint64m2_t vs2, vfloat64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i8mf8x3_m(vbool64_t vm, int8_t *rs1, vuint64m1_t vs2, vint8mf8x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i8mf4x3_m(vbool32_t vm, int8_t *rs1, vuint64m2_t vs2, vint8mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i8mf2x3_m(vbool16_t vm, int8_t *rs1, vuint64m4_t vs2, vint8mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i8m1x3_m(vbool8_t vm, int8_t *rs1, vuint64m8_t vs2, vint8m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i16mf4x3_m(vbool64_t vm, int16_t *rs1, vuint64m1_t vs2, vint16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i16mf2x3_m(vbool32_t vm, int16_t *rs1, vuint64m2_t vs2, vint16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i16m1x3_m(vbool16_t vm, int16_t *rs1, vuint64m4_t vs2, vint16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i16m2x3_m(vbool8_t vm, int16_t *rs1, vuint64m8_t vs2, vint16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i32mf2x3_m(vbool64_t vm, int32_t *rs1, vuint64m1_t vs2, vint32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i32m1x3_m(vbool32_t vm, int32_t *rs1, vuint64m2_t vs2, vint32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i32m2x3_m(vbool16_t vm, int32_t *rs1, vuint64m4_t vs2, vint32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i64m1x3_m(vbool64_t vm, int64_t *rs1, vuint64m1_t vs2, vint64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_i64m2x3_m(vbool32_t vm, int64_t *rs1, vuint64m2_t vs2, vint64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u8mf8x3_m(vbool64_t vm, uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u8mf4x3_m(vbool32_t vm, uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u8mf2x3_m(vbool16_t vm, uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u8m1x3_m(vbool8_t vm, uint8_t *rs1, vuint64m8_t vs2, vuint8m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u16mf4x3_m(vbool64_t vm, uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u16mf2x3_m(vbool32_t vm, uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u16m1x3_m(vbool16_t vm, uint16_t *rs1, vuint64m4_t vs2, vuint16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u16m2x3_m(vbool8_t vm, uint16_t *rs1, vuint64m8_t vs2, vuint16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u32mf2x3_m(vbool64_t vm, uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u32m1x3_m(vbool32_t vm, uint32_t *rs1, vuint64m2_t vs2, vuint32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u32m2x3_m(vbool16_t vm, uint32_t *rs1, vuint64m4_t vs2, vuint32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u64m1x3_m(vbool64_t vm, uint64_t *rs1, vuint64m1_t vs2, vuint64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei64_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei64_v_u64m2x3_m(vbool32_t vm, uint64_t *rs1, vuint64m2_t vs2, vuint64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei64(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg3ei64\.[ivxfswum.]+\s+} 70 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsuxseg3ei8.c b/auto-generated/gnu-overloaded-tests/vsuxseg3ei8.c index eb90dc6bb..7e0de8e43 100644 --- a/auto-generated/gnu-overloaded-tests/vsuxseg3ei8.c +++ b/auto-generated/gnu-overloaded-tests/vsuxseg3ei8.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg3ei8_v_f16mf4x3(float16_t *base, vuint8mf8_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_f16mf4x3(float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_f16mf2x3(float16_t *base, vuint8mf4_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_f16mf2x3(float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_f16m1x3(float16_t *base, vuint8mf2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_f16m1x3(float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_f16m2x3(float16_t *base, vuint8m1_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_f16m2x3(float16_t *rs1, vuint8m1_t vs2, vfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_f32mf2x3(float32_t *base, vuint8mf8_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_f32mf2x3(float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_f32m1x3(float32_t *base, vuint8mf4_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_f32m1x3(float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_f32m2x3(float32_t *base, vuint8mf2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_f32m2x3(float32_t *rs1, vuint8mf2_t vs2, vfloat32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_f64m1x3(float64_t *base, vuint8mf8_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_f64m1x3(float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_f64m2x3(float64_t *base, vuint8mf4_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_f64m2x3(float64_t *rs1, vuint8mf4_t vs2, vfloat64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i8mf8x3(int8_t *base, vuint8mf8_t bindex, vint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i8mf8x3(int8_t *rs1, vuint8mf8_t vs2, vint8mf8x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i8mf4x3(int8_t *base, vuint8mf4_t bindex, vint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i8mf4x3(int8_t *rs1, vuint8mf4_t vs2, vint8mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i8mf2x3(int8_t *base, vuint8mf2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i8mf2x3(int8_t *rs1, vuint8mf2_t vs2, vint8mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i8m1x3(int8_t *base, vuint8m1_t bindex, vint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i8m1x3(int8_t *rs1, vuint8m1_t vs2, vint8m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i8m2x3(int8_t *base, vuint8m2_t bindex, vint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i8m2x3(int8_t *rs1, vuint8m2_t vs2, vint8m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i16mf4x3(int16_t *base, vuint8mf8_t bindex, vint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i16mf4x3(int16_t *rs1, vuint8mf8_t vs2, vint16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i16mf2x3(int16_t *base, vuint8mf4_t bindex, vint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i16mf2x3(int16_t *rs1, vuint8mf4_t vs2, vint16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i16m1x3(int16_t *base, vuint8mf2_t bindex, vint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i16m1x3(int16_t *rs1, vuint8mf2_t vs2, vint16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i16m2x3(int16_t *base, vuint8m1_t bindex, vint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i16m2x3(int16_t *rs1, vuint8m1_t vs2, vint16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i32mf2x3(int32_t *base, vuint8mf8_t bindex, vint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i32mf2x3(int32_t *rs1, vuint8mf8_t vs2, vint32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i32m1x3(int32_t *base, vuint8mf4_t bindex, vint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i32m1x3(int32_t *rs1, vuint8mf4_t vs2, vint32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i32m2x3(int32_t *base, vuint8mf2_t bindex, vint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i32m2x3(int32_t *rs1, vuint8mf2_t vs2, vint32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i64m1x3(int64_t *base, vuint8mf8_t bindex, vint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i64m1x3(int64_t *rs1, vuint8mf8_t vs2, vint64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i64m2x3(int64_t *base, vuint8mf4_t bindex, vint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i64m2x3(int64_t *rs1, vuint8mf4_t vs2, vint64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u8mf8x3(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u8mf8x3(uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u8mf4x3(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u8mf4x3(uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u8mf2x3(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u8mf2x3(uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u8m1x3(uint8_t *base, vuint8m1_t bindex, vuint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u8m1x3(uint8_t *rs1, vuint8m1_t vs2, vuint8m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u8m2x3(uint8_t *base, vuint8m2_t bindex, vuint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u8m2x3(uint8_t *rs1, vuint8m2_t vs2, vuint8m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u16mf4x3(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u16mf4x3(uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u16mf2x3(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u16mf2x3(uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u16m1x3(uint16_t *base, vuint8mf2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u16m1x3(uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u16m2x3(uint16_t *base, vuint8m1_t bindex, vuint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u16m2x3(uint16_t *rs1, vuint8m1_t vs2, vuint16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u32mf2x3(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u32mf2x3(uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u32m1x3(uint32_t *base, vuint8mf4_t bindex, vuint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u32m1x3(uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u32m2x3(uint32_t *base, vuint8mf2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u32m2x3(uint32_t *rs1, vuint8mf2_t vs2, vuint32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u64m1x3(uint64_t *base, vuint8mf8_t bindex, vuint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u64m1x3(uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u64m2x3(uint64_t *base, vuint8mf4_t bindex, vuint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u64m2x3(uint64_t *rs1, vuint8mf4_t vs2, vuint64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_f16mf4x3_m(vbool64_t mask, float16_t *base, vuint8mf8_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_f16mf4x3_m(vbool64_t vm, float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_f16mf2x3_m(vbool32_t mask, float16_t *base, vuint8mf4_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_f16mf2x3_m(vbool32_t vm, float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_f16m1x3_m(vbool16_t mask, float16_t *base, vuint8mf2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_f16m1x3_m(vbool16_t vm, float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_f16m2x3_m(vbool8_t mask, float16_t *base, vuint8m1_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_f16m2x3_m(vbool8_t vm, float16_t *rs1, vuint8m1_t vs2, vfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_f32mf2x3_m(vbool64_t mask, float32_t *base, vuint8mf8_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_f32mf2x3_m(vbool64_t vm, float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_f32m1x3_m(vbool32_t mask, float32_t *base, vuint8mf4_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_f32m1x3_m(vbool32_t vm, float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_f32m2x3_m(vbool16_t mask, float32_t *base, vuint8mf2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_f32m2x3_m(vbool16_t vm, float32_t *rs1, vuint8mf2_t vs2, vfloat32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_f64m1x3_m(vbool64_t mask, float64_t *base, vuint8mf8_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_f64m1x3_m(vbool64_t vm, float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_f64m2x3_m(vbool32_t mask, float64_t *base, vuint8mf4_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_f64m2x3_m(vbool32_t vm, float64_t *rs1, vuint8mf4_t vs2, vfloat64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i8mf8x3_m(vbool64_t vm, int8_t *rs1, vuint8mf8_t vs2, vint8mf8x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i8mf4x3_m(vbool32_t vm, int8_t *rs1, vuint8mf4_t vs2, vint8mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i8mf2x3_m(vbool16_t vm, int8_t *rs1, vuint8mf2_t vs2, vint8mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i8m1x3_m(vbool8_t vm, int8_t *rs1, vuint8m1_t vs2, vint8m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i8m2x3_m(vbool4_t vm, int8_t *rs1, vuint8m2_t vs2, vint8m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i16mf4x3_m(vbool64_t vm, int16_t *rs1, vuint8mf8_t vs2, vint16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i16mf2x3_m(vbool32_t vm, int16_t *rs1, vuint8mf4_t vs2, vint16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i16m1x3_m(vbool16_t vm, int16_t *rs1, vuint8mf2_t vs2, vint16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i16m2x3_m(vbool8_t vm, int16_t *rs1, vuint8m1_t vs2, vint16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i32mf2x3_m(vbool64_t vm, int32_t *rs1, vuint8mf8_t vs2, vint32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i32m1x3_m(vbool32_t vm, int32_t *rs1, vuint8mf4_t vs2, vint32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i32m2x3_m(vbool16_t vm, int32_t *rs1, vuint8mf2_t vs2, vint32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i64m1x3_m(vbool64_t vm, int64_t *rs1, vuint8mf8_t vs2, vint64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_i64m2x3_m(vbool32_t vm, int64_t *rs1, vuint8mf4_t vs2, vint64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u8mf8x3_m(vbool64_t vm, uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u8mf4x3_m(vbool32_t vm, uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u8mf2x3_m(vbool16_t vm, uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u8m1x3_m(vbool8_t vm, uint8_t *rs1, vuint8m1_t vs2, vuint8m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u8m2x3_m(vbool4_t vm, uint8_t *rs1, vuint8m2_t vs2, vuint8m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u16mf4x3_m(vbool64_t vm, uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u16mf2x3_m(vbool32_t vm, uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u16m1x3_m(vbool16_t vm, uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u16m2x3_m(vbool8_t vm, uint16_t *rs1, vuint8m1_t vs2, vuint16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u32mf2x3_m(vbool64_t vm, uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u32m1x3_m(vbool32_t vm, uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u32m2x3_m(vbool16_t vm, uint32_t *rs1, vuint8mf2_t vs2, vuint32m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u64m1x3_m(vbool64_t vm, uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg3ei8_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x3_t v_tuple, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg3ei8_v_u64m2x3_m(vbool32_t vm, uint64_t *rs1, vuint8mf4_t vs2, vuint64m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei8(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg3ei8\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsuxseg4ei16.c b/auto-generated/gnu-overloaded-tests/vsuxseg4ei16.c index 4b3256841..6c174ed61 100644 --- a/auto-generated/gnu-overloaded-tests/vsuxseg4ei16.c +++ b/auto-generated/gnu-overloaded-tests/vsuxseg4ei16.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg4ei16_v_f16mf4x4(float16_t *base, vuint16mf4_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_f16mf4x4(float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_f16mf2x4(float16_t *base, vuint16mf2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_f16mf2x4(float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_f16m1x4(float16_t *base, vuint16m1_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_f16m1x4(float16_t *rs1, vuint16m1_t vs2, vfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_f16m2x4(float16_t *base, vuint16m2_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_f16m2x4(float16_t *rs1, vuint16m2_t vs2, vfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_f32mf2x4(float32_t *base, vuint16mf4_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_f32mf2x4(float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_f32m1x4(float32_t *base, vuint16mf2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_f32m1x4(float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_f32m2x4(float32_t *base, vuint16m1_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_f32m2x4(float32_t *rs1, vuint16m1_t vs2, vfloat32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_f64m1x4(float64_t *base, vuint16mf4_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_f64m1x4(float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_f64m2x4(float64_t *base, vuint16mf2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_f64m2x4(float64_t *rs1, vuint16mf2_t vs2, vfloat64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i8mf8x4(int8_t *base, vuint16mf4_t bindex, vint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i8mf8x4(int8_t *rs1, vuint16mf4_t vs2, vint8mf8x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i8mf4x4(int8_t *base, vuint16mf2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i8mf4x4(int8_t *rs1, vuint16mf2_t vs2, vint8mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i8mf2x4(int8_t *base, vuint16m1_t bindex, vint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i8mf2x4(int8_t *rs1, vuint16m1_t vs2, vint8mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i8m1x4(int8_t *base, vuint16m2_t bindex, vint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i8m1x4(int8_t *rs1, vuint16m2_t vs2, vint8m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i8m2x4(int8_t *base, vuint16m4_t bindex, vint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i8m2x4(int8_t *rs1, vuint16m4_t vs2, vint8m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i16mf4x4(int16_t *base, vuint16mf4_t bindex, vint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i16mf4x4(int16_t *rs1, vuint16mf4_t vs2, vint16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i16mf2x4(int16_t *base, vuint16mf2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i16mf2x4(int16_t *rs1, vuint16mf2_t vs2, vint16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i16m1x4(int16_t *base, vuint16m1_t bindex, vint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i16m1x4(int16_t *rs1, vuint16m1_t vs2, vint16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i16m2x4(int16_t *base, vuint16m2_t bindex, vint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i16m2x4(int16_t *rs1, vuint16m2_t vs2, vint16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i32mf2x4(int32_t *base, vuint16mf4_t bindex, vint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i32mf2x4(int32_t *rs1, vuint16mf4_t vs2, vint32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i32m1x4(int32_t *base, vuint16mf2_t bindex, vint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i32m1x4(int32_t *rs1, vuint16mf2_t vs2, vint32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i32m2x4(int32_t *base, vuint16m1_t bindex, vint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i32m2x4(int32_t *rs1, vuint16m1_t vs2, vint32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i64m1x4(int64_t *base, vuint16mf4_t bindex, vint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i64m1x4(int64_t *rs1, vuint16mf4_t vs2, vint64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i64m2x4(int64_t *base, vuint16mf2_t bindex, vint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i64m2x4(int64_t *rs1, vuint16mf2_t vs2, vint64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u8mf8x4(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u8mf8x4(uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u8mf4x4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u8mf4x4(uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u8mf2x4(uint8_t *base, vuint16m1_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u8mf2x4(uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u8m1x4(uint8_t *base, vuint16m2_t bindex, vuint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u8m1x4(uint8_t *rs1, vuint16m2_t vs2, vuint8m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u8m2x4(uint8_t *base, vuint16m4_t bindex, vuint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u8m2x4(uint8_t *rs1, vuint16m4_t vs2, vuint8m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u16mf4x4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u16mf4x4(uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u16mf2x4(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u16mf2x4(uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u16m1x4(uint16_t *base, vuint16m1_t bindex, vuint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u16m1x4(uint16_t *rs1, vuint16m1_t vs2, vuint16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u16m2x4(uint16_t *base, vuint16m2_t bindex, vuint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u16m2x4(uint16_t *rs1, vuint16m2_t vs2, vuint16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u32mf2x4(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u32mf2x4(uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u32m1x4(uint32_t *base, vuint16mf2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u32m1x4(uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u32m2x4(uint32_t *base, vuint16m1_t bindex, vuint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u32m2x4(uint32_t *rs1, vuint16m1_t vs2, vuint32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u64m1x4(uint64_t *base, vuint16mf4_t bindex, vuint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u64m1x4(uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u64m2x4(uint64_t *base, vuint16mf2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u64m2x4(uint64_t *rs1, vuint16mf2_t vs2, vuint64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_f16mf4x4_m(vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_f16mf4x4_m(vbool64_t vm, float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_f16mf2x4_m(vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_f16mf2x4_m(vbool32_t vm, float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_f16m1x4_m(vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_f16m1x4_m(vbool16_t vm, float16_t *rs1, vuint16m1_t vs2, vfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_f16m2x4_m(vbool8_t mask, float16_t *base, vuint16m2_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_f16m2x4_m(vbool8_t vm, float16_t *rs1, vuint16m2_t vs2, vfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_f32mf2x4_m(vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_f32mf2x4_m(vbool64_t vm, float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_f32m1x4_m(vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_f32m1x4_m(vbool32_t vm, float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_f32m2x4_m(vbool16_t mask, float32_t *base, vuint16m1_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_f32m2x4_m(vbool16_t vm, float32_t *rs1, vuint16m1_t vs2, vfloat32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_f64m1x4_m(vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_f64m1x4_m(vbool64_t vm, float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_f64m2x4_m(vbool32_t mask, float64_t *base, vuint16mf2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_f64m2x4_m(vbool32_t vm, float64_t *rs1, vuint16mf2_t vs2, vfloat64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i8mf8x4_m(vbool64_t vm, int8_t *rs1, vuint16mf4_t vs2, vint8mf8x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i8mf4x4_m(vbool32_t vm, int8_t *rs1, vuint16mf2_t vs2, vint8mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i8mf2x4_m(vbool16_t vm, int8_t *rs1, vuint16m1_t vs2, vint8mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i8m1x4_m(vbool8_t vm, int8_t *rs1, vuint16m2_t vs2, vint8m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i8m2x4_m(vbool4_t vm, int8_t *rs1, vuint16m4_t vs2, vint8m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i16mf4x4_m(vbool64_t vm, int16_t *rs1, vuint16mf4_t vs2, vint16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i16mf2x4_m(vbool32_t vm, int16_t *rs1, vuint16mf2_t vs2, vint16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i16m1x4_m(vbool16_t vm, int16_t *rs1, vuint16m1_t vs2, vint16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i16m2x4_m(vbool8_t vm, int16_t *rs1, vuint16m2_t vs2, vint16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i32mf2x4_m(vbool64_t vm, int32_t *rs1, vuint16mf4_t vs2, vint32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i32m1x4_m(vbool32_t vm, int32_t *rs1, vuint16mf2_t vs2, vint32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i32m2x4_m(vbool16_t vm, int32_t *rs1, vuint16m1_t vs2, vint32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i64m1x4_m(vbool64_t vm, int64_t *rs1, vuint16mf4_t vs2, vint64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_i64m2x4_m(vbool32_t vm, int64_t *rs1, vuint16mf2_t vs2, vint64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u8mf8x4_m(vbool64_t vm, uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u8mf4x4_m(vbool32_t vm, uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u8mf2x4_m(vbool16_t vm, uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u8m1x4_m(vbool8_t vm, uint8_t *rs1, vuint16m2_t vs2, vuint8m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u8m2x4_m(vbool4_t vm, uint8_t *rs1, vuint16m4_t vs2, vuint8m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u16mf4x4_m(vbool64_t vm, uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u16mf2x4_m(vbool32_t vm, uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u16m1x4_m(vbool16_t vm, uint16_t *rs1, vuint16m1_t vs2, vuint16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u16m2x4_m(vbool8_t vm, uint16_t *rs1, vuint16m2_t vs2, vuint16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u32mf2x4_m(vbool64_t vm, uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u32m1x4_m(vbool32_t vm, uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u32m2x4_m(vbool16_t vm, uint32_t *rs1, vuint16m1_t vs2, vuint32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u64m1x4_m(vbool64_t vm, uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei16_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei16_v_u64m2x4_m(vbool32_t vm, uint64_t *rs1, vuint16mf2_t vs2, vuint64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg4ei16\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsuxseg4ei32.c b/auto-generated/gnu-overloaded-tests/vsuxseg4ei32.c index 7bbf953e6..d16fb6ac8 100644 --- a/auto-generated/gnu-overloaded-tests/vsuxseg4ei32.c +++ b/auto-generated/gnu-overloaded-tests/vsuxseg4ei32.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg4ei32_v_f16mf4x4(float16_t *base, vuint32mf2_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_f16mf4x4(float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_f16mf2x4(float16_t *base, vuint32m1_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_f16mf2x4(float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_f16m1x4(float16_t *base, vuint32m2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_f16m1x4(float16_t *rs1, vuint32m2_t vs2, vfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_f16m2x4(float16_t *base, vuint32m4_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_f16m2x4(float16_t *rs1, vuint32m4_t vs2, vfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_f32mf2x4(float32_t *base, vuint32mf2_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_f32mf2x4(float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_f32m1x4(float32_t *base, vuint32m1_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_f32m1x4(float32_t *rs1, vuint32m1_t vs2, vfloat32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_f32m2x4(float32_t *base, vuint32m2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_f32m2x4(float32_t *rs1, vuint32m2_t vs2, vfloat32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_f64m1x4(float64_t *base, vuint32mf2_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_f64m1x4(float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_f64m2x4(float64_t *base, vuint32m1_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_f64m2x4(float64_t *rs1, vuint32m1_t vs2, vfloat64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i8mf8x4(int8_t *base, vuint32mf2_t bindex, vint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i8mf8x4(int8_t *rs1, vuint32mf2_t vs2, vint8mf8x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i8mf4x4(int8_t *base, vuint32m1_t bindex, vint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i8mf4x4(int8_t *rs1, vuint32m1_t vs2, vint8mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i8mf2x4(int8_t *base, vuint32m2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i8mf2x4(int8_t *rs1, vuint32m2_t vs2, vint8mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i8m1x4(int8_t *base, vuint32m4_t bindex, vint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i8m1x4(int8_t *rs1, vuint32m4_t vs2, vint8m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i8m2x4(int8_t *base, vuint32m8_t bindex, vint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i8m2x4(int8_t *rs1, vuint32m8_t vs2, vint8m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i16mf4x4(int16_t *base, vuint32mf2_t bindex, vint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i16mf4x4(int16_t *rs1, vuint32mf2_t vs2, vint16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i16mf2x4(int16_t *base, vuint32m1_t bindex, vint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i16mf2x4(int16_t *rs1, vuint32m1_t vs2, vint16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i16m1x4(int16_t *base, vuint32m2_t bindex, vint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i16m1x4(int16_t *rs1, vuint32m2_t vs2, vint16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i16m2x4(int16_t *base, vuint32m4_t bindex, vint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i16m2x4(int16_t *rs1, vuint32m4_t vs2, vint16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i32mf2x4(int32_t *base, vuint32mf2_t bindex, vint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i32mf2x4(int32_t *rs1, vuint32mf2_t vs2, vint32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i32m1x4(int32_t *base, vuint32m1_t bindex, vint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i32m1x4(int32_t *rs1, vuint32m1_t vs2, vint32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i32m2x4(int32_t *base, vuint32m2_t bindex, vint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i32m2x4(int32_t *rs1, vuint32m2_t vs2, vint32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i64m1x4(int64_t *base, vuint32mf2_t bindex, vint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i64m1x4(int64_t *rs1, vuint32mf2_t vs2, vint64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i64m2x4(int64_t *base, vuint32m1_t bindex, vint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i64m2x4(int64_t *rs1, vuint32m1_t vs2, vint64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u8mf8x4(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u8mf8x4(uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u8mf4x4(uint8_t *base, vuint32m1_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u8mf4x4(uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u8mf2x4(uint8_t *base, vuint32m2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u8mf2x4(uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u8m1x4(uint8_t *base, vuint32m4_t bindex, vuint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u8m1x4(uint8_t *rs1, vuint32m4_t vs2, vuint8m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u8m2x4(uint8_t *base, vuint32m8_t bindex, vuint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u8m2x4(uint8_t *rs1, vuint32m8_t vs2, vuint8m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u16mf4x4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u16mf4x4(uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u16mf2x4(uint16_t *base, vuint32m1_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u16mf2x4(uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u16m1x4(uint16_t *base, vuint32m2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u16m1x4(uint16_t *rs1, vuint32m2_t vs2, vuint16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u16m2x4(uint16_t *base, vuint32m4_t bindex, vuint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u16m2x4(uint16_t *rs1, vuint32m4_t vs2, vuint16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u32mf2x4(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u32mf2x4(uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u32m1x4(uint32_t *base, vuint32m1_t bindex, vuint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u32m1x4(uint32_t *rs1, vuint32m1_t vs2, vuint32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u32m2x4(uint32_t *base, vuint32m2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u32m2x4(uint32_t *rs1, vuint32m2_t vs2, vuint32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u64m1x4(uint64_t *base, vuint32mf2_t bindex, vuint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u64m1x4(uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u64m2x4(uint64_t *base, vuint32m1_t bindex, vuint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u64m2x4(uint64_t *rs1, vuint32m1_t vs2, vuint64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_f16mf4x4_m(vbool64_t mask, float16_t *base, vuint32mf2_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_f16mf4x4_m(vbool64_t vm, float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_f16mf2x4_m(vbool32_t mask, float16_t *base, vuint32m1_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_f16mf2x4_m(vbool32_t vm, float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_f16m1x4_m(vbool16_t mask, float16_t *base, vuint32m2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_f16m1x4_m(vbool16_t vm, float16_t *rs1, vuint32m2_t vs2, vfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_f16m2x4_m(vbool8_t mask, float16_t *base, vuint32m4_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_f16m2x4_m(vbool8_t vm, float16_t *rs1, vuint32m4_t vs2, vfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_f32mf2x4_m(vbool64_t mask, float32_t *base, vuint32mf2_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_f32mf2x4_m(vbool64_t vm, float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_f32m1x4_m(vbool32_t mask, float32_t *base, vuint32m1_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_f32m1x4_m(vbool32_t vm, float32_t *rs1, vuint32m1_t vs2, vfloat32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_f32m2x4_m(vbool16_t mask, float32_t *base, vuint32m2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_f32m2x4_m(vbool16_t vm, float32_t *rs1, vuint32m2_t vs2, vfloat32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_f64m1x4_m(vbool64_t mask, float64_t *base, vuint32mf2_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_f64m1x4_m(vbool64_t vm, float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_f64m2x4_m(vbool32_t mask, float64_t *base, vuint32m1_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_f64m2x4_m(vbool32_t vm, float64_t *rs1, vuint32m1_t vs2, vfloat64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i8mf8x4_m(vbool64_t vm, int8_t *rs1, vuint32mf2_t vs2, vint8mf8x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i8mf4x4_m(vbool32_t vm, int8_t *rs1, vuint32m1_t vs2, vint8mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i8mf2x4_m(vbool16_t vm, int8_t *rs1, vuint32m2_t vs2, vint8mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i8m1x4_m(vbool8_t vm, int8_t *rs1, vuint32m4_t vs2, vint8m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i8m2x4_m(vbool4_t vm, int8_t *rs1, vuint32m8_t vs2, vint8m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i16mf4x4_m(vbool64_t vm, int16_t *rs1, vuint32mf2_t vs2, vint16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i16mf2x4_m(vbool32_t vm, int16_t *rs1, vuint32m1_t vs2, vint16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i16m1x4_m(vbool16_t vm, int16_t *rs1, vuint32m2_t vs2, vint16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i16m2x4_m(vbool8_t vm, int16_t *rs1, vuint32m4_t vs2, vint16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i32mf2x4_m(vbool64_t vm, int32_t *rs1, vuint32mf2_t vs2, vint32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i32m1x4_m(vbool32_t vm, int32_t *rs1, vuint32m1_t vs2, vint32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i32m2x4_m(vbool16_t vm, int32_t *rs1, vuint32m2_t vs2, vint32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i64m1x4_m(vbool64_t vm, int64_t *rs1, vuint32mf2_t vs2, vint64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_i64m2x4_m(vbool32_t vm, int64_t *rs1, vuint32m1_t vs2, vint64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u8mf8x4_m(vbool64_t vm, uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u8mf4x4_m(vbool32_t vm, uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u8mf2x4_m(vbool16_t vm, uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u8m1x4_m(vbool8_t vm, uint8_t *rs1, vuint32m4_t vs2, vuint8m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u8m2x4_m(vbool4_t vm, uint8_t *rs1, vuint32m8_t vs2, vuint8m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u16mf4x4_m(vbool64_t vm, uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u16mf2x4_m(vbool32_t vm, uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u16m1x4_m(vbool16_t vm, uint16_t *rs1, vuint32m2_t vs2, vuint16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u16m2x4_m(vbool8_t vm, uint16_t *rs1, vuint32m4_t vs2, vuint16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u32mf2x4_m(vbool64_t vm, uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u32m1x4_m(vbool32_t vm, uint32_t *rs1, vuint32m1_t vs2, vuint32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u32m2x4_m(vbool16_t vm, uint32_t *rs1, vuint32m2_t vs2, vuint32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u64m1x4_m(vbool64_t vm, uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei32_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei32_v_u64m2x4_m(vbool32_t vm, uint64_t *rs1, vuint32m1_t vs2, vuint64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei32(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg4ei32\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsuxseg4ei64.c b/auto-generated/gnu-overloaded-tests/vsuxseg4ei64.c index 4c898ebe5..528739278 100644 --- a/auto-generated/gnu-overloaded-tests/vsuxseg4ei64.c +++ b/auto-generated/gnu-overloaded-tests/vsuxseg4ei64.c @@ -6,284 +6,284 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg4ei64_v_f16mf4x4(float16_t *base, vuint64m1_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_f16mf4x4(float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_f16mf2x4(float16_t *base, vuint64m2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_f16mf2x4(float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_f16m1x4(float16_t *base, vuint64m4_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_f16m1x4(float16_t *rs1, vuint64m4_t vs2, vfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_f16m2x4(float16_t *base, vuint64m8_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_f16m2x4(float16_t *rs1, vuint64m8_t vs2, vfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_f32mf2x4(float32_t *base, vuint64m1_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_f32mf2x4(float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_f32m1x4(float32_t *base, vuint64m2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_f32m1x4(float32_t *rs1, vuint64m2_t vs2, vfloat32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_f32m2x4(float32_t *base, vuint64m4_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_f32m2x4(float32_t *rs1, vuint64m4_t vs2, vfloat32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_f64m1x4(float64_t *base, vuint64m1_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_f64m1x4(float64_t *rs1, vuint64m1_t vs2, vfloat64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_f64m2x4(float64_t *base, vuint64m2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_f64m2x4(float64_t *rs1, vuint64m2_t vs2, vfloat64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i8mf8x4(int8_t *base, vuint64m1_t bindex, vint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i8mf8x4(int8_t *rs1, vuint64m1_t vs2, vint8mf8x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i8mf4x4(int8_t *base, vuint64m2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i8mf4x4(int8_t *rs1, vuint64m2_t vs2, vint8mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i8mf2x4(int8_t *base, vuint64m4_t bindex, vint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i8mf2x4(int8_t *rs1, vuint64m4_t vs2, vint8mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i8m1x4(int8_t *base, vuint64m8_t bindex, vint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i8m1x4(int8_t *rs1, vuint64m8_t vs2, vint8m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i16mf4x4(int16_t *base, vuint64m1_t bindex, vint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i16mf4x4(int16_t *rs1, vuint64m1_t vs2, vint16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i16mf2x4(int16_t *base, vuint64m2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i16mf2x4(int16_t *rs1, vuint64m2_t vs2, vint16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i16m1x4(int16_t *base, vuint64m4_t bindex, vint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i16m1x4(int16_t *rs1, vuint64m4_t vs2, vint16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i16m2x4(int16_t *base, vuint64m8_t bindex, vint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i16m2x4(int16_t *rs1, vuint64m8_t vs2, vint16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i32mf2x4(int32_t *base, vuint64m1_t bindex, vint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i32mf2x4(int32_t *rs1, vuint64m1_t vs2, vint32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i32m1x4(int32_t *base, vuint64m2_t bindex, vint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i32m1x4(int32_t *rs1, vuint64m2_t vs2, vint32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i32m2x4(int32_t *base, vuint64m4_t bindex, vint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i32m2x4(int32_t *rs1, vuint64m4_t vs2, vint32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i64m1x4(int64_t *base, vuint64m1_t bindex, vint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i64m1x4(int64_t *rs1, vuint64m1_t vs2, vint64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i64m2x4(int64_t *base, vuint64m2_t bindex, vint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i64m2x4(int64_t *rs1, vuint64m2_t vs2, vint64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u8mf8x4(uint8_t *base, vuint64m1_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u8mf8x4(uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u8mf4x4(uint8_t *base, vuint64m2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u8mf4x4(uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u8mf2x4(uint8_t *base, vuint64m4_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u8mf2x4(uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u8m1x4(uint8_t *base, vuint64m8_t bindex, vuint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u8m1x4(uint8_t *rs1, vuint64m8_t vs2, vuint8m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u16mf4x4(uint16_t *base, vuint64m1_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u16mf4x4(uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u16mf2x4(uint16_t *base, vuint64m2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u16mf2x4(uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u16m1x4(uint16_t *base, vuint64m4_t bindex, vuint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u16m1x4(uint16_t *rs1, vuint64m4_t vs2, vuint16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u16m2x4(uint16_t *base, vuint64m8_t bindex, vuint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u16m2x4(uint16_t *rs1, vuint64m8_t vs2, vuint16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u32mf2x4(uint32_t *base, vuint64m1_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u32mf2x4(uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u32m1x4(uint32_t *base, vuint64m2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u32m1x4(uint32_t *rs1, vuint64m2_t vs2, vuint32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u32m2x4(uint32_t *base, vuint64m4_t bindex, vuint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u32m2x4(uint32_t *rs1, vuint64m4_t vs2, vuint32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u64m1x4(uint64_t *base, vuint64m1_t bindex, vuint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u64m1x4(uint64_t *rs1, vuint64m1_t vs2, vuint64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u64m2x4(uint64_t *base, vuint64m2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u64m2x4(uint64_t *rs1, vuint64m2_t vs2, vuint64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_f16mf4x4_m(vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_f16mf4x4_m(vbool64_t vm, float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_f16mf2x4_m(vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_f16mf2x4_m(vbool32_t vm, float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_f16m1x4_m(vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_f16m1x4_m(vbool16_t vm, float16_t *rs1, vuint64m4_t vs2, vfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_f16m2x4_m(vbool8_t mask, float16_t *base, vuint64m8_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_f16m2x4_m(vbool8_t vm, float16_t *rs1, vuint64m8_t vs2, vfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_f32mf2x4_m(vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_f32mf2x4_m(vbool64_t vm, float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_f32m1x4_m(vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_f32m1x4_m(vbool32_t vm, float32_t *rs1, vuint64m2_t vs2, vfloat32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_f32m2x4_m(vbool16_t mask, float32_t *base, vuint64m4_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_f32m2x4_m(vbool16_t vm, float32_t *rs1, vuint64m4_t vs2, vfloat32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_f64m1x4_m(vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_f64m1x4_m(vbool64_t vm, float64_t *rs1, vuint64m1_t vs2, vfloat64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_f64m2x4_m(vbool32_t mask, float64_t *base, vuint64m2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_f64m2x4_m(vbool32_t vm, float64_t *rs1, vuint64m2_t vs2, vfloat64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i8mf8x4_m(vbool64_t vm, int8_t *rs1, vuint64m1_t vs2, vint8mf8x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i8mf4x4_m(vbool32_t vm, int8_t *rs1, vuint64m2_t vs2, vint8mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i8mf2x4_m(vbool16_t vm, int8_t *rs1, vuint64m4_t vs2, vint8mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i8m1x4_m(vbool8_t vm, int8_t *rs1, vuint64m8_t vs2, vint8m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i16mf4x4_m(vbool64_t vm, int16_t *rs1, vuint64m1_t vs2, vint16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i16mf2x4_m(vbool32_t vm, int16_t *rs1, vuint64m2_t vs2, vint16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i16m1x4_m(vbool16_t vm, int16_t *rs1, vuint64m4_t vs2, vint16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i16m2x4_m(vbool8_t vm, int16_t *rs1, vuint64m8_t vs2, vint16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i32mf2x4_m(vbool64_t vm, int32_t *rs1, vuint64m1_t vs2, vint32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i32m1x4_m(vbool32_t vm, int32_t *rs1, vuint64m2_t vs2, vint32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i32m2x4_m(vbool16_t vm, int32_t *rs1, vuint64m4_t vs2, vint32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i64m1x4_m(vbool64_t vm, int64_t *rs1, vuint64m1_t vs2, vint64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_i64m2x4_m(vbool32_t vm, int64_t *rs1, vuint64m2_t vs2, vint64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u8mf8x4_m(vbool64_t vm, uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u8mf4x4_m(vbool32_t vm, uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u8mf2x4_m(vbool16_t vm, uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u8m1x4_m(vbool8_t vm, uint8_t *rs1, vuint64m8_t vs2, vuint8m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u16mf4x4_m(vbool64_t vm, uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u16mf2x4_m(vbool32_t vm, uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u16m1x4_m(vbool16_t vm, uint16_t *rs1, vuint64m4_t vs2, vuint16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u16m2x4_m(vbool8_t vm, uint16_t *rs1, vuint64m8_t vs2, vuint16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u32mf2x4_m(vbool64_t vm, uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u32m1x4_m(vbool32_t vm, uint32_t *rs1, vuint64m2_t vs2, vuint32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u32m2x4_m(vbool16_t vm, uint32_t *rs1, vuint64m4_t vs2, vuint32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u64m1x4_m(vbool64_t vm, uint64_t *rs1, vuint64m1_t vs2, vuint64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei64_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei64_v_u64m2x4_m(vbool32_t vm, uint64_t *rs1, vuint64m2_t vs2, vuint64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei64(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg4ei64\.[ivxfswum.]+\s+} 70 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsuxseg4ei8.c b/auto-generated/gnu-overloaded-tests/vsuxseg4ei8.c index 370aa8560..4ea03d9e7 100644 --- a/auto-generated/gnu-overloaded-tests/vsuxseg4ei8.c +++ b/auto-generated/gnu-overloaded-tests/vsuxseg4ei8.c @@ -6,300 +6,300 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg4ei8_v_f16mf4x4(float16_t *base, vuint8mf8_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_f16mf4x4(float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_f16mf2x4(float16_t *base, vuint8mf4_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_f16mf2x4(float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_f16m1x4(float16_t *base, vuint8mf2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_f16m1x4(float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_f16m2x4(float16_t *base, vuint8m1_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_f16m2x4(float16_t *rs1, vuint8m1_t vs2, vfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_f32mf2x4(float32_t *base, vuint8mf8_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_f32mf2x4(float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_f32m1x4(float32_t *base, vuint8mf4_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_f32m1x4(float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_f32m2x4(float32_t *base, vuint8mf2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_f32m2x4(float32_t *rs1, vuint8mf2_t vs2, vfloat32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_f64m1x4(float64_t *base, vuint8mf8_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_f64m1x4(float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_f64m2x4(float64_t *base, vuint8mf4_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_f64m2x4(float64_t *rs1, vuint8mf4_t vs2, vfloat64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i8mf8x4(int8_t *base, vuint8mf8_t bindex, vint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i8mf8x4(int8_t *rs1, vuint8mf8_t vs2, vint8mf8x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i8mf4x4(int8_t *base, vuint8mf4_t bindex, vint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i8mf4x4(int8_t *rs1, vuint8mf4_t vs2, vint8mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i8mf2x4(int8_t *base, vuint8mf2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i8mf2x4(int8_t *rs1, vuint8mf2_t vs2, vint8mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i8m1x4(int8_t *base, vuint8m1_t bindex, vint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i8m1x4(int8_t *rs1, vuint8m1_t vs2, vint8m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i8m2x4(int8_t *base, vuint8m2_t bindex, vint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i8m2x4(int8_t *rs1, vuint8m2_t vs2, vint8m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i16mf4x4(int16_t *base, vuint8mf8_t bindex, vint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i16mf4x4(int16_t *rs1, vuint8mf8_t vs2, vint16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i16mf2x4(int16_t *base, vuint8mf4_t bindex, vint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i16mf2x4(int16_t *rs1, vuint8mf4_t vs2, vint16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i16m1x4(int16_t *base, vuint8mf2_t bindex, vint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i16m1x4(int16_t *rs1, vuint8mf2_t vs2, vint16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i16m2x4(int16_t *base, vuint8m1_t bindex, vint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i16m2x4(int16_t *rs1, vuint8m1_t vs2, vint16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i32mf2x4(int32_t *base, vuint8mf8_t bindex, vint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i32mf2x4(int32_t *rs1, vuint8mf8_t vs2, vint32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i32m1x4(int32_t *base, vuint8mf4_t bindex, vint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i32m1x4(int32_t *rs1, vuint8mf4_t vs2, vint32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i32m2x4(int32_t *base, vuint8mf2_t bindex, vint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i32m2x4(int32_t *rs1, vuint8mf2_t vs2, vint32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i64m1x4(int64_t *base, vuint8mf8_t bindex, vint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i64m1x4(int64_t *rs1, vuint8mf8_t vs2, vint64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i64m2x4(int64_t *base, vuint8mf4_t bindex, vint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i64m2x4(int64_t *rs1, vuint8mf4_t vs2, vint64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u8mf8x4(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u8mf8x4(uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u8mf4x4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u8mf4x4(uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u8mf2x4(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u8mf2x4(uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u8m1x4(uint8_t *base, vuint8m1_t bindex, vuint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u8m1x4(uint8_t *rs1, vuint8m1_t vs2, vuint8m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u8m2x4(uint8_t *base, vuint8m2_t bindex, vuint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u8m2x4(uint8_t *rs1, vuint8m2_t vs2, vuint8m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u16mf4x4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u16mf4x4(uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u16mf2x4(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u16mf2x4(uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u16m1x4(uint16_t *base, vuint8mf2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u16m1x4(uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u16m2x4(uint16_t *base, vuint8m1_t bindex, vuint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u16m2x4(uint16_t *rs1, vuint8m1_t vs2, vuint16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u32mf2x4(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u32mf2x4(uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u32m1x4(uint32_t *base, vuint8mf4_t bindex, vuint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u32m1x4(uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u32m2x4(uint32_t *base, vuint8mf2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u32m2x4(uint32_t *rs1, vuint8mf2_t vs2, vuint32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u64m1x4(uint64_t *base, vuint8mf8_t bindex, vuint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u64m1x4(uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u64m2x4(uint64_t *base, vuint8mf4_t bindex, vuint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u64m2x4(uint64_t *rs1, vuint8mf4_t vs2, vuint64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_f16mf4x4_m(vbool64_t mask, float16_t *base, vuint8mf8_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_f16mf4x4_m(vbool64_t vm, float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_f16mf2x4_m(vbool32_t mask, float16_t *base, vuint8mf4_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_f16mf2x4_m(vbool32_t vm, float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_f16m1x4_m(vbool16_t mask, float16_t *base, vuint8mf2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_f16m1x4_m(vbool16_t vm, float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_f16m2x4_m(vbool8_t mask, float16_t *base, vuint8m1_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_f16m2x4_m(vbool8_t vm, float16_t *rs1, vuint8m1_t vs2, vfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_f32mf2x4_m(vbool64_t mask, float32_t *base, vuint8mf8_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_f32mf2x4_m(vbool64_t vm, float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_f32m1x4_m(vbool32_t mask, float32_t *base, vuint8mf4_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_f32m1x4_m(vbool32_t vm, float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_f32m2x4_m(vbool16_t mask, float32_t *base, vuint8mf2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_f32m2x4_m(vbool16_t vm, float32_t *rs1, vuint8mf2_t vs2, vfloat32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_f64m1x4_m(vbool64_t mask, float64_t *base, vuint8mf8_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_f64m1x4_m(vbool64_t vm, float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_f64m2x4_m(vbool32_t mask, float64_t *base, vuint8mf4_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_f64m2x4_m(vbool32_t vm, float64_t *rs1, vuint8mf4_t vs2, vfloat64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i8mf8x4_m(vbool64_t vm, int8_t *rs1, vuint8mf8_t vs2, vint8mf8x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i8mf4x4_m(vbool32_t vm, int8_t *rs1, vuint8mf4_t vs2, vint8mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i8mf2x4_m(vbool16_t vm, int8_t *rs1, vuint8mf2_t vs2, vint8mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i8m1x4_m(vbool8_t vm, int8_t *rs1, vuint8m1_t vs2, vint8m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i8m2x4_m(vbool4_t vm, int8_t *rs1, vuint8m2_t vs2, vint8m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i16mf4x4_m(vbool64_t vm, int16_t *rs1, vuint8mf8_t vs2, vint16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i16mf2x4_m(vbool32_t vm, int16_t *rs1, vuint8mf4_t vs2, vint16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i16m1x4_m(vbool16_t vm, int16_t *rs1, vuint8mf2_t vs2, vint16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i16m2x4_m(vbool8_t vm, int16_t *rs1, vuint8m1_t vs2, vint16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i32mf2x4_m(vbool64_t vm, int32_t *rs1, vuint8mf8_t vs2, vint32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i32m1x4_m(vbool32_t vm, int32_t *rs1, vuint8mf4_t vs2, vint32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i32m2x4_m(vbool16_t vm, int32_t *rs1, vuint8mf2_t vs2, vint32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i64m1x4_m(vbool64_t vm, int64_t *rs1, vuint8mf8_t vs2, vint64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_i64m2x4_m(vbool32_t vm, int64_t *rs1, vuint8mf4_t vs2, vint64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u8mf8x4_m(vbool64_t vm, uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u8mf4x4_m(vbool32_t vm, uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u8mf2x4_m(vbool16_t vm, uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u8m1x4_m(vbool8_t vm, uint8_t *rs1, vuint8m1_t vs2, vuint8m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u8m2x4_m(vbool4_t vm, uint8_t *rs1, vuint8m2_t vs2, vuint8m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u16mf4x4_m(vbool64_t vm, uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u16mf2x4_m(vbool32_t vm, uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u16m1x4_m(vbool16_t vm, uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u16m2x4_m(vbool8_t vm, uint16_t *rs1, vuint8m1_t vs2, vuint16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u32mf2x4_m(vbool64_t vm, uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u32m1x4_m(vbool32_t vm, uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u32m2x4_m(vbool16_t vm, uint32_t *rs1, vuint8mf2_t vs2, vuint32m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u64m1x4_m(vbool64_t vm, uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg4ei8_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x4_t v_tuple, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg4ei8_v_u64m2x4_m(vbool32_t vm, uint64_t *rs1, vuint8mf4_t vs2, vuint64m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei8(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg4ei8\.[ivxfswum.]+\s+} 74 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsuxseg5ei16.c b/auto-generated/gnu-overloaded-tests/vsuxseg5ei16.c index 0a08f4b44..7c0883e1b 100644 --- a/auto-generated/gnu-overloaded-tests/vsuxseg5ei16.c +++ b/auto-generated/gnu-overloaded-tests/vsuxseg5ei16.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg5ei16_v_f16mf4x5(float16_t *base, vuint16mf4_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_f16mf4x5(float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_f16mf2x5(float16_t *base, vuint16mf2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_f16mf2x5(float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_f16m1x5(float16_t *base, vuint16m1_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_f16m1x5(float16_t *rs1, vuint16m1_t vs2, vfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_f32mf2x5(float32_t *base, vuint16mf4_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_f32mf2x5(float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_f32m1x5(float32_t *base, vuint16mf2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_f32m1x5(float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_f64m1x5(float64_t *base, vuint16mf4_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_f64m1x5(float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_i8mf8x5(int8_t *base, vuint16mf4_t bindex, vint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_i8mf8x5(int8_t *rs1, vuint16mf4_t vs2, vint8mf8x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_i8mf4x5(int8_t *base, vuint16mf2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_i8mf4x5(int8_t *rs1, vuint16mf2_t vs2, vint8mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_i8mf2x5(int8_t *base, vuint16m1_t bindex, vint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_i8mf2x5(int8_t *rs1, vuint16m1_t vs2, vint8mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_i8m1x5(int8_t *base, vuint16m2_t bindex, vint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_i8m1x5(int8_t *rs1, vuint16m2_t vs2, vint8m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_i16mf4x5(int16_t *base, vuint16mf4_t bindex, vint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_i16mf4x5(int16_t *rs1, vuint16mf4_t vs2, vint16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_i16mf2x5(int16_t *base, vuint16mf2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_i16mf2x5(int16_t *rs1, vuint16mf2_t vs2, vint16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_i16m1x5(int16_t *base, vuint16m1_t bindex, vint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_i16m1x5(int16_t *rs1, vuint16m1_t vs2, vint16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_i32mf2x5(int32_t *base, vuint16mf4_t bindex, vint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_i32mf2x5(int32_t *rs1, vuint16mf4_t vs2, vint32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_i32m1x5(int32_t *base, vuint16mf2_t bindex, vint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_i32m1x5(int32_t *rs1, vuint16mf2_t vs2, vint32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_i64m1x5(int64_t *base, vuint16mf4_t bindex, vint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_i64m1x5(int64_t *rs1, vuint16mf4_t vs2, vint64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_u8mf8x5(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_u8mf8x5(uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_u8mf4x5(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_u8mf4x5(uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_u8mf2x5(uint8_t *base, vuint16m1_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_u8mf2x5(uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_u8m1x5(uint8_t *base, vuint16m2_t bindex, vuint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_u8m1x5(uint8_t *rs1, vuint16m2_t vs2, vuint8m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_u16mf4x5(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_u16mf4x5(uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_u16mf2x5(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_u16mf2x5(uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_u16m1x5(uint16_t *base, vuint16m1_t bindex, vuint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_u16m1x5(uint16_t *rs1, vuint16m1_t vs2, vuint16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_u32mf2x5(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_u32mf2x5(uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_u32m1x5(uint32_t *base, vuint16mf2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_u32m1x5(uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_u64m1x5(uint64_t *base, vuint16mf4_t bindex, vuint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_u64m1x5(uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_f16mf4x5_m(vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_f16mf4x5_m(vbool64_t vm, float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_f16mf2x5_m(vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_f16mf2x5_m(vbool32_t vm, float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_f16m1x5_m(vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_f16m1x5_m(vbool16_t vm, float16_t *rs1, vuint16m1_t vs2, vfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_f32mf2x5_m(vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_f32mf2x5_m(vbool64_t vm, float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_f32m1x5_m(vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_f32m1x5_m(vbool32_t vm, float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_f64m1x5_m(vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_f64m1x5_m(vbool64_t vm, float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_i8mf8x5_m(vbool64_t vm, int8_t *rs1, vuint16mf4_t vs2, vint8mf8x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_i8mf4x5_m(vbool32_t vm, int8_t *rs1, vuint16mf2_t vs2, vint8mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_i8mf2x5_m(vbool16_t vm, int8_t *rs1, vuint16m1_t vs2, vint8mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_i8m1x5_m(vbool8_t vm, int8_t *rs1, vuint16m2_t vs2, vint8m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_i16mf4x5_m(vbool64_t vm, int16_t *rs1, vuint16mf4_t vs2, vint16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_i16mf2x5_m(vbool32_t vm, int16_t *rs1, vuint16mf2_t vs2, vint16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_i16m1x5_m(vbool16_t vm, int16_t *rs1, vuint16m1_t vs2, vint16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_i32mf2x5_m(vbool64_t vm, int32_t *rs1, vuint16mf4_t vs2, vint32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_i32m1x5_m(vbool32_t vm, int32_t *rs1, vuint16mf2_t vs2, vint32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_i64m1x5_m(vbool64_t vm, int64_t *rs1, vuint16mf4_t vs2, vint64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_u8mf8x5_m(vbool64_t vm, uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_u8mf4x5_m(vbool32_t vm, uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_u8mf2x5_m(vbool16_t vm, uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_u8m1x5_m(vbool8_t vm, uint8_t *rs1, vuint16m2_t vs2, vuint8m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_u16mf4x5_m(vbool64_t vm, uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_u16mf2x5_m(vbool32_t vm, uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_u16m1x5_m(vbool16_t vm, uint16_t *rs1, vuint16m1_t vs2, vuint16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_u32mf2x5_m(vbool64_t vm, uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_u32m1x5_m(vbool32_t vm, uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei16_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei16_v_u64m1x5_m(vbool64_t vm, uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg5ei16\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsuxseg5ei32.c b/auto-generated/gnu-overloaded-tests/vsuxseg5ei32.c index cd31b3286..99cead932 100644 --- a/auto-generated/gnu-overloaded-tests/vsuxseg5ei32.c +++ b/auto-generated/gnu-overloaded-tests/vsuxseg5ei32.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg5ei32_v_f16mf4x5(float16_t *base, vuint32mf2_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_f16mf4x5(float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_f16mf2x5(float16_t *base, vuint32m1_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_f16mf2x5(float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_f16m1x5(float16_t *base, vuint32m2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_f16m1x5(float16_t *rs1, vuint32m2_t vs2, vfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_f32mf2x5(float32_t *base, vuint32mf2_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_f32mf2x5(float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_f32m1x5(float32_t *base, vuint32m1_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_f32m1x5(float32_t *rs1, vuint32m1_t vs2, vfloat32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_f64m1x5(float64_t *base, vuint32mf2_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_f64m1x5(float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_i8mf8x5(int8_t *base, vuint32mf2_t bindex, vint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_i8mf8x5(int8_t *rs1, vuint32mf2_t vs2, vint8mf8x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_i8mf4x5(int8_t *base, vuint32m1_t bindex, vint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_i8mf4x5(int8_t *rs1, vuint32m1_t vs2, vint8mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_i8mf2x5(int8_t *base, vuint32m2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_i8mf2x5(int8_t *rs1, vuint32m2_t vs2, vint8mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_i8m1x5(int8_t *base, vuint32m4_t bindex, vint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_i8m1x5(int8_t *rs1, vuint32m4_t vs2, vint8m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_i16mf4x5(int16_t *base, vuint32mf2_t bindex, vint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_i16mf4x5(int16_t *rs1, vuint32mf2_t vs2, vint16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_i16mf2x5(int16_t *base, vuint32m1_t bindex, vint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_i16mf2x5(int16_t *rs1, vuint32m1_t vs2, vint16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_i16m1x5(int16_t *base, vuint32m2_t bindex, vint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_i16m1x5(int16_t *rs1, vuint32m2_t vs2, vint16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_i32mf2x5(int32_t *base, vuint32mf2_t bindex, vint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_i32mf2x5(int32_t *rs1, vuint32mf2_t vs2, vint32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_i32m1x5(int32_t *base, vuint32m1_t bindex, vint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_i32m1x5(int32_t *rs1, vuint32m1_t vs2, vint32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_i64m1x5(int64_t *base, vuint32mf2_t bindex, vint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_i64m1x5(int64_t *rs1, vuint32mf2_t vs2, vint64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_u8mf8x5(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_u8mf8x5(uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_u8mf4x5(uint8_t *base, vuint32m1_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_u8mf4x5(uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_u8mf2x5(uint8_t *base, vuint32m2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_u8mf2x5(uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_u8m1x5(uint8_t *base, vuint32m4_t bindex, vuint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_u8m1x5(uint8_t *rs1, vuint32m4_t vs2, vuint8m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_u16mf4x5(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_u16mf4x5(uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_u16mf2x5(uint16_t *base, vuint32m1_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_u16mf2x5(uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_u16m1x5(uint16_t *base, vuint32m2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_u16m1x5(uint16_t *rs1, vuint32m2_t vs2, vuint16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_u32mf2x5(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_u32mf2x5(uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_u32m1x5(uint32_t *base, vuint32m1_t bindex, vuint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_u32m1x5(uint32_t *rs1, vuint32m1_t vs2, vuint32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_u64m1x5(uint64_t *base, vuint32mf2_t bindex, vuint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_u64m1x5(uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_f16mf4x5_m(vbool64_t mask, float16_t *base, vuint32mf2_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_f16mf4x5_m(vbool64_t vm, float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_f16mf2x5_m(vbool32_t mask, float16_t *base, vuint32m1_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_f16mf2x5_m(vbool32_t vm, float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_f16m1x5_m(vbool16_t mask, float16_t *base, vuint32m2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_f16m1x5_m(vbool16_t vm, float16_t *rs1, vuint32m2_t vs2, vfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_f32mf2x5_m(vbool64_t mask, float32_t *base, vuint32mf2_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_f32mf2x5_m(vbool64_t vm, float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_f32m1x5_m(vbool32_t mask, float32_t *base, vuint32m1_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_f32m1x5_m(vbool32_t vm, float32_t *rs1, vuint32m1_t vs2, vfloat32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_f64m1x5_m(vbool64_t mask, float64_t *base, vuint32mf2_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_f64m1x5_m(vbool64_t vm, float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_i8mf8x5_m(vbool64_t vm, int8_t *rs1, vuint32mf2_t vs2, vint8mf8x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_i8mf4x5_m(vbool32_t vm, int8_t *rs1, vuint32m1_t vs2, vint8mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_i8mf2x5_m(vbool16_t vm, int8_t *rs1, vuint32m2_t vs2, vint8mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_i8m1x5_m(vbool8_t vm, int8_t *rs1, vuint32m4_t vs2, vint8m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_i16mf4x5_m(vbool64_t vm, int16_t *rs1, vuint32mf2_t vs2, vint16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_i16mf2x5_m(vbool32_t vm, int16_t *rs1, vuint32m1_t vs2, vint16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_i16m1x5_m(vbool16_t vm, int16_t *rs1, vuint32m2_t vs2, vint16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_i32mf2x5_m(vbool64_t vm, int32_t *rs1, vuint32mf2_t vs2, vint32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_i32m1x5_m(vbool32_t vm, int32_t *rs1, vuint32m1_t vs2, vint32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_i64m1x5_m(vbool64_t vm, int64_t *rs1, vuint32mf2_t vs2, vint64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_u8mf8x5_m(vbool64_t vm, uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_u8mf4x5_m(vbool32_t vm, uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_u8mf2x5_m(vbool16_t vm, uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_u8m1x5_m(vbool8_t vm, uint8_t *rs1, vuint32m4_t vs2, vuint8m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_u16mf4x5_m(vbool64_t vm, uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_u16mf2x5_m(vbool32_t vm, uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_u16m1x5_m(vbool16_t vm, uint16_t *rs1, vuint32m2_t vs2, vuint16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_u32mf2x5_m(vbool64_t vm, uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_u32m1x5_m(vbool32_t vm, uint32_t *rs1, vuint32m1_t vs2, vuint32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei32_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei32_v_u64m1x5_m(vbool64_t vm, uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei32(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg5ei32\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsuxseg5ei64.c b/auto-generated/gnu-overloaded-tests/vsuxseg5ei64.c index f52a98f1a..cd52d726d 100644 --- a/auto-generated/gnu-overloaded-tests/vsuxseg5ei64.c +++ b/auto-generated/gnu-overloaded-tests/vsuxseg5ei64.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg5ei64_v_f16mf4x5(float16_t *base, vuint64m1_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_f16mf4x5(float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_f16mf2x5(float16_t *base, vuint64m2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_f16mf2x5(float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_f16m1x5(float16_t *base, vuint64m4_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_f16m1x5(float16_t *rs1, vuint64m4_t vs2, vfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_f32mf2x5(float32_t *base, vuint64m1_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_f32mf2x5(float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_f32m1x5(float32_t *base, vuint64m2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_f32m1x5(float32_t *rs1, vuint64m2_t vs2, vfloat32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_f64m1x5(float64_t *base, vuint64m1_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_f64m1x5(float64_t *rs1, vuint64m1_t vs2, vfloat64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_i8mf8x5(int8_t *base, vuint64m1_t bindex, vint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_i8mf8x5(int8_t *rs1, vuint64m1_t vs2, vint8mf8x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_i8mf4x5(int8_t *base, vuint64m2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_i8mf4x5(int8_t *rs1, vuint64m2_t vs2, vint8mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_i8mf2x5(int8_t *base, vuint64m4_t bindex, vint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_i8mf2x5(int8_t *rs1, vuint64m4_t vs2, vint8mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_i8m1x5(int8_t *base, vuint64m8_t bindex, vint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_i8m1x5(int8_t *rs1, vuint64m8_t vs2, vint8m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_i16mf4x5(int16_t *base, vuint64m1_t bindex, vint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_i16mf4x5(int16_t *rs1, vuint64m1_t vs2, vint16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_i16mf2x5(int16_t *base, vuint64m2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_i16mf2x5(int16_t *rs1, vuint64m2_t vs2, vint16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_i16m1x5(int16_t *base, vuint64m4_t bindex, vint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_i16m1x5(int16_t *rs1, vuint64m4_t vs2, vint16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_i32mf2x5(int32_t *base, vuint64m1_t bindex, vint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_i32mf2x5(int32_t *rs1, vuint64m1_t vs2, vint32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_i32m1x5(int32_t *base, vuint64m2_t bindex, vint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_i32m1x5(int32_t *rs1, vuint64m2_t vs2, vint32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_i64m1x5(int64_t *base, vuint64m1_t bindex, vint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_i64m1x5(int64_t *rs1, vuint64m1_t vs2, vint64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_u8mf8x5(uint8_t *base, vuint64m1_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_u8mf8x5(uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_u8mf4x5(uint8_t *base, vuint64m2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_u8mf4x5(uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_u8mf2x5(uint8_t *base, vuint64m4_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_u8mf2x5(uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_u8m1x5(uint8_t *base, vuint64m8_t bindex, vuint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_u8m1x5(uint8_t *rs1, vuint64m8_t vs2, vuint8m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_u16mf4x5(uint16_t *base, vuint64m1_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_u16mf4x5(uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_u16mf2x5(uint16_t *base, vuint64m2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_u16mf2x5(uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_u16m1x5(uint16_t *base, vuint64m4_t bindex, vuint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_u16m1x5(uint16_t *rs1, vuint64m4_t vs2, vuint16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_u32mf2x5(uint32_t *base, vuint64m1_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_u32mf2x5(uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_u32m1x5(uint32_t *base, vuint64m2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_u32m1x5(uint32_t *rs1, vuint64m2_t vs2, vuint32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_u64m1x5(uint64_t *base, vuint64m1_t bindex, vuint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_u64m1x5(uint64_t *rs1, vuint64m1_t vs2, vuint64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_f16mf4x5_m(vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_f16mf4x5_m(vbool64_t vm, float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_f16mf2x5_m(vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_f16mf2x5_m(vbool32_t vm, float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_f16m1x5_m(vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_f16m1x5_m(vbool16_t vm, float16_t *rs1, vuint64m4_t vs2, vfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_f32mf2x5_m(vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_f32mf2x5_m(vbool64_t vm, float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_f32m1x5_m(vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_f32m1x5_m(vbool32_t vm, float32_t *rs1, vuint64m2_t vs2, vfloat32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_f64m1x5_m(vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_f64m1x5_m(vbool64_t vm, float64_t *rs1, vuint64m1_t vs2, vfloat64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_i8mf8x5_m(vbool64_t vm, int8_t *rs1, vuint64m1_t vs2, vint8mf8x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_i8mf4x5_m(vbool32_t vm, int8_t *rs1, vuint64m2_t vs2, vint8mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_i8mf2x5_m(vbool16_t vm, int8_t *rs1, vuint64m4_t vs2, vint8mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_i8m1x5_m(vbool8_t vm, int8_t *rs1, vuint64m8_t vs2, vint8m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_i16mf4x5_m(vbool64_t vm, int16_t *rs1, vuint64m1_t vs2, vint16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_i16mf2x5_m(vbool32_t vm, int16_t *rs1, vuint64m2_t vs2, vint16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_i16m1x5_m(vbool16_t vm, int16_t *rs1, vuint64m4_t vs2, vint16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_i32mf2x5_m(vbool64_t vm, int32_t *rs1, vuint64m1_t vs2, vint32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_i32m1x5_m(vbool32_t vm, int32_t *rs1, vuint64m2_t vs2, vint32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_i64m1x5_m(vbool64_t vm, int64_t *rs1, vuint64m1_t vs2, vint64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_u8mf8x5_m(vbool64_t vm, uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_u8mf4x5_m(vbool32_t vm, uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_u8mf2x5_m(vbool16_t vm, uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_u8m1x5_m(vbool8_t vm, uint8_t *rs1, vuint64m8_t vs2, vuint8m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_u16mf4x5_m(vbool64_t vm, uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_u16mf2x5_m(vbool32_t vm, uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_u16m1x5_m(vbool16_t vm, uint16_t *rs1, vuint64m4_t vs2, vuint16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_u32mf2x5_m(vbool64_t vm, uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_u32m1x5_m(vbool32_t vm, uint32_t *rs1, vuint64m2_t vs2, vuint32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei64_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei64_v_u64m1x5_m(vbool64_t vm, uint64_t *rs1, vuint64m1_t vs2, vuint64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei64(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg5ei64\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsuxseg5ei8.c b/auto-generated/gnu-overloaded-tests/vsuxseg5ei8.c index f3ce3757d..d31ab854f 100644 --- a/auto-generated/gnu-overloaded-tests/vsuxseg5ei8.c +++ b/auto-generated/gnu-overloaded-tests/vsuxseg5ei8.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg5ei8_v_f16mf4x5(float16_t *base, vuint8mf8_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_f16mf4x5(float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_f16mf2x5(float16_t *base, vuint8mf4_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_f16mf2x5(float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_f16m1x5(float16_t *base, vuint8mf2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_f16m1x5(float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_f32mf2x5(float32_t *base, vuint8mf8_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_f32mf2x5(float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_f32m1x5(float32_t *base, vuint8mf4_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_f32m1x5(float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_f64m1x5(float64_t *base, vuint8mf8_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_f64m1x5(float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_i8mf8x5(int8_t *base, vuint8mf8_t bindex, vint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_i8mf8x5(int8_t *rs1, vuint8mf8_t vs2, vint8mf8x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_i8mf4x5(int8_t *base, vuint8mf4_t bindex, vint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_i8mf4x5(int8_t *rs1, vuint8mf4_t vs2, vint8mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_i8mf2x5(int8_t *base, vuint8mf2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_i8mf2x5(int8_t *rs1, vuint8mf2_t vs2, vint8mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_i8m1x5(int8_t *base, vuint8m1_t bindex, vint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_i8m1x5(int8_t *rs1, vuint8m1_t vs2, vint8m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_i16mf4x5(int16_t *base, vuint8mf8_t bindex, vint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_i16mf4x5(int16_t *rs1, vuint8mf8_t vs2, vint16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_i16mf2x5(int16_t *base, vuint8mf4_t bindex, vint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_i16mf2x5(int16_t *rs1, vuint8mf4_t vs2, vint16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_i16m1x5(int16_t *base, vuint8mf2_t bindex, vint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_i16m1x5(int16_t *rs1, vuint8mf2_t vs2, vint16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_i32mf2x5(int32_t *base, vuint8mf8_t bindex, vint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_i32mf2x5(int32_t *rs1, vuint8mf8_t vs2, vint32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_i32m1x5(int32_t *base, vuint8mf4_t bindex, vint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_i32m1x5(int32_t *rs1, vuint8mf4_t vs2, vint32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_i64m1x5(int64_t *base, vuint8mf8_t bindex, vint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_i64m1x5(int64_t *rs1, vuint8mf8_t vs2, vint64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_u8mf8x5(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_u8mf8x5(uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_u8mf4x5(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_u8mf4x5(uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_u8mf2x5(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_u8mf2x5(uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_u8m1x5(uint8_t *base, vuint8m1_t bindex, vuint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_u8m1x5(uint8_t *rs1, vuint8m1_t vs2, vuint8m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_u16mf4x5(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_u16mf4x5(uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_u16mf2x5(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_u16mf2x5(uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_u16m1x5(uint16_t *base, vuint8mf2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_u16m1x5(uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_u32mf2x5(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_u32mf2x5(uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_u32m1x5(uint32_t *base, vuint8mf4_t bindex, vuint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_u32m1x5(uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_u64m1x5(uint64_t *base, vuint8mf8_t bindex, vuint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_u64m1x5(uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_f16mf4x5_m(vbool64_t mask, float16_t *base, vuint8mf8_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_f16mf4x5_m(vbool64_t vm, float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_f16mf2x5_m(vbool32_t mask, float16_t *base, vuint8mf4_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_f16mf2x5_m(vbool32_t vm, float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_f16m1x5_m(vbool16_t mask, float16_t *base, vuint8mf2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_f16m1x5_m(vbool16_t vm, float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_f32mf2x5_m(vbool64_t mask, float32_t *base, vuint8mf8_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_f32mf2x5_m(vbool64_t vm, float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_f32m1x5_m(vbool32_t mask, float32_t *base, vuint8mf4_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_f32m1x5_m(vbool32_t vm, float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_f64m1x5_m(vbool64_t mask, float64_t *base, vuint8mf8_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_f64m1x5_m(vbool64_t vm, float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_i8mf8x5_m(vbool64_t vm, int8_t *rs1, vuint8mf8_t vs2, vint8mf8x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_i8mf4x5_m(vbool32_t vm, int8_t *rs1, vuint8mf4_t vs2, vint8mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_i8mf2x5_m(vbool16_t vm, int8_t *rs1, vuint8mf2_t vs2, vint8mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_i8m1x5_m(vbool8_t vm, int8_t *rs1, vuint8m1_t vs2, vint8m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_i16mf4x5_m(vbool64_t vm, int16_t *rs1, vuint8mf8_t vs2, vint16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_i16mf2x5_m(vbool32_t vm, int16_t *rs1, vuint8mf4_t vs2, vint16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_i16m1x5_m(vbool16_t vm, int16_t *rs1, vuint8mf2_t vs2, vint16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_i32mf2x5_m(vbool64_t vm, int32_t *rs1, vuint8mf8_t vs2, vint32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_i32m1x5_m(vbool32_t vm, int32_t *rs1, vuint8mf4_t vs2, vint32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_i64m1x5_m(vbool64_t vm, int64_t *rs1, vuint8mf8_t vs2, vint64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_u8mf8x5_m(vbool64_t vm, uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_u8mf4x5_m(vbool32_t vm, uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_u8mf2x5_m(vbool16_t vm, uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_u8m1x5_m(vbool8_t vm, uint8_t *rs1, vuint8m1_t vs2, vuint8m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_u16mf4x5_m(vbool64_t vm, uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_u16mf2x5_m(vbool32_t vm, uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_u16m1x5_m(vbool16_t vm, uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_u32mf2x5_m(vbool64_t vm, uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_u32m1x5_m(vbool32_t vm, uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg5ei8_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x5_t v_tuple, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg5ei8_v_u64m1x5_m(vbool64_t vm, uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei8(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg5ei8\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsuxseg6ei16.c b/auto-generated/gnu-overloaded-tests/vsuxseg6ei16.c index fe32b7b63..ef4b85e83 100644 --- a/auto-generated/gnu-overloaded-tests/vsuxseg6ei16.c +++ b/auto-generated/gnu-overloaded-tests/vsuxseg6ei16.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg6ei16_v_f16mf4x6(float16_t *base, vuint16mf4_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_f16mf4x6(float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_f16mf2x6(float16_t *base, vuint16mf2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_f16mf2x6(float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_f16m1x6(float16_t *base, vuint16m1_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_f16m1x6(float16_t *rs1, vuint16m1_t vs2, vfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_f32mf2x6(float32_t *base, vuint16mf4_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_f32mf2x6(float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_f32m1x6(float32_t *base, vuint16mf2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_f32m1x6(float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_f64m1x6(float64_t *base, vuint16mf4_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_f64m1x6(float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_i8mf8x6(int8_t *base, vuint16mf4_t bindex, vint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_i8mf8x6(int8_t *rs1, vuint16mf4_t vs2, vint8mf8x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_i8mf4x6(int8_t *base, vuint16mf2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_i8mf4x6(int8_t *rs1, vuint16mf2_t vs2, vint8mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_i8mf2x6(int8_t *base, vuint16m1_t bindex, vint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_i8mf2x6(int8_t *rs1, vuint16m1_t vs2, vint8mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_i8m1x6(int8_t *base, vuint16m2_t bindex, vint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_i8m1x6(int8_t *rs1, vuint16m2_t vs2, vint8m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_i16mf4x6(int16_t *base, vuint16mf4_t bindex, vint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_i16mf4x6(int16_t *rs1, vuint16mf4_t vs2, vint16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_i16mf2x6(int16_t *base, vuint16mf2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_i16mf2x6(int16_t *rs1, vuint16mf2_t vs2, vint16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_i16m1x6(int16_t *base, vuint16m1_t bindex, vint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_i16m1x6(int16_t *rs1, vuint16m1_t vs2, vint16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_i32mf2x6(int32_t *base, vuint16mf4_t bindex, vint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_i32mf2x6(int32_t *rs1, vuint16mf4_t vs2, vint32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_i32m1x6(int32_t *base, vuint16mf2_t bindex, vint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_i32m1x6(int32_t *rs1, vuint16mf2_t vs2, vint32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_i64m1x6(int64_t *base, vuint16mf4_t bindex, vint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_i64m1x6(int64_t *rs1, vuint16mf4_t vs2, vint64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_u8mf8x6(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_u8mf8x6(uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_u8mf4x6(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_u8mf4x6(uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_u8mf2x6(uint8_t *base, vuint16m1_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_u8mf2x6(uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_u8m1x6(uint8_t *base, vuint16m2_t bindex, vuint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_u8m1x6(uint8_t *rs1, vuint16m2_t vs2, vuint8m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_u16mf4x6(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_u16mf4x6(uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_u16mf2x6(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_u16mf2x6(uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_u16m1x6(uint16_t *base, vuint16m1_t bindex, vuint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_u16m1x6(uint16_t *rs1, vuint16m1_t vs2, vuint16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_u32mf2x6(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_u32mf2x6(uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_u32m1x6(uint32_t *base, vuint16mf2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_u32m1x6(uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_u64m1x6(uint64_t *base, vuint16mf4_t bindex, vuint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_u64m1x6(uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_f16mf4x6_m(vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_f16mf4x6_m(vbool64_t vm, float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_f16mf2x6_m(vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_f16mf2x6_m(vbool32_t vm, float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_f16m1x6_m(vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_f16m1x6_m(vbool16_t vm, float16_t *rs1, vuint16m1_t vs2, vfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_f32mf2x6_m(vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_f32mf2x6_m(vbool64_t vm, float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_f32m1x6_m(vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_f32m1x6_m(vbool32_t vm, float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_f64m1x6_m(vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_f64m1x6_m(vbool64_t vm, float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_i8mf8x6_m(vbool64_t vm, int8_t *rs1, vuint16mf4_t vs2, vint8mf8x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_i8mf4x6_m(vbool32_t vm, int8_t *rs1, vuint16mf2_t vs2, vint8mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_i8mf2x6_m(vbool16_t vm, int8_t *rs1, vuint16m1_t vs2, vint8mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_i8m1x6_m(vbool8_t vm, int8_t *rs1, vuint16m2_t vs2, vint8m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_i16mf4x6_m(vbool64_t vm, int16_t *rs1, vuint16mf4_t vs2, vint16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_i16mf2x6_m(vbool32_t vm, int16_t *rs1, vuint16mf2_t vs2, vint16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_i16m1x6_m(vbool16_t vm, int16_t *rs1, vuint16m1_t vs2, vint16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_i32mf2x6_m(vbool64_t vm, int32_t *rs1, vuint16mf4_t vs2, vint32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_i32m1x6_m(vbool32_t vm, int32_t *rs1, vuint16mf2_t vs2, vint32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_i64m1x6_m(vbool64_t vm, int64_t *rs1, vuint16mf4_t vs2, vint64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_u8mf8x6_m(vbool64_t vm, uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_u8mf4x6_m(vbool32_t vm, uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_u8mf2x6_m(vbool16_t vm, uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_u8m1x6_m(vbool8_t vm, uint8_t *rs1, vuint16m2_t vs2, vuint8m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_u16mf4x6_m(vbool64_t vm, uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_u16mf2x6_m(vbool32_t vm, uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_u16m1x6_m(vbool16_t vm, uint16_t *rs1, vuint16m1_t vs2, vuint16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_u32mf2x6_m(vbool64_t vm, uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_u32m1x6_m(vbool32_t vm, uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei16_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei16_v_u64m1x6_m(vbool64_t vm, uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg6ei16\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsuxseg6ei32.c b/auto-generated/gnu-overloaded-tests/vsuxseg6ei32.c index 98f126052..f40abc4c6 100644 --- a/auto-generated/gnu-overloaded-tests/vsuxseg6ei32.c +++ b/auto-generated/gnu-overloaded-tests/vsuxseg6ei32.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg6ei32_v_f16mf4x6(float16_t *base, vuint32mf2_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_f16mf4x6(float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_f16mf2x6(float16_t *base, vuint32m1_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_f16mf2x6(float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_f16m1x6(float16_t *base, vuint32m2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_f16m1x6(float16_t *rs1, vuint32m2_t vs2, vfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_f32mf2x6(float32_t *base, vuint32mf2_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_f32mf2x6(float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_f32m1x6(float32_t *base, vuint32m1_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_f32m1x6(float32_t *rs1, vuint32m1_t vs2, vfloat32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_f64m1x6(float64_t *base, vuint32mf2_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_f64m1x6(float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_i8mf8x6(int8_t *base, vuint32mf2_t bindex, vint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_i8mf8x6(int8_t *rs1, vuint32mf2_t vs2, vint8mf8x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_i8mf4x6(int8_t *base, vuint32m1_t bindex, vint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_i8mf4x6(int8_t *rs1, vuint32m1_t vs2, vint8mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_i8mf2x6(int8_t *base, vuint32m2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_i8mf2x6(int8_t *rs1, vuint32m2_t vs2, vint8mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_i8m1x6(int8_t *base, vuint32m4_t bindex, vint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_i8m1x6(int8_t *rs1, vuint32m4_t vs2, vint8m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_i16mf4x6(int16_t *base, vuint32mf2_t bindex, vint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_i16mf4x6(int16_t *rs1, vuint32mf2_t vs2, vint16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_i16mf2x6(int16_t *base, vuint32m1_t bindex, vint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_i16mf2x6(int16_t *rs1, vuint32m1_t vs2, vint16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_i16m1x6(int16_t *base, vuint32m2_t bindex, vint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_i16m1x6(int16_t *rs1, vuint32m2_t vs2, vint16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_i32mf2x6(int32_t *base, vuint32mf2_t bindex, vint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_i32mf2x6(int32_t *rs1, vuint32mf2_t vs2, vint32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_i32m1x6(int32_t *base, vuint32m1_t bindex, vint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_i32m1x6(int32_t *rs1, vuint32m1_t vs2, vint32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_i64m1x6(int64_t *base, vuint32mf2_t bindex, vint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_i64m1x6(int64_t *rs1, vuint32mf2_t vs2, vint64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_u8mf8x6(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_u8mf8x6(uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_u8mf4x6(uint8_t *base, vuint32m1_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_u8mf4x6(uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_u8mf2x6(uint8_t *base, vuint32m2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_u8mf2x6(uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_u8m1x6(uint8_t *base, vuint32m4_t bindex, vuint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_u8m1x6(uint8_t *rs1, vuint32m4_t vs2, vuint8m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_u16mf4x6(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_u16mf4x6(uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_u16mf2x6(uint16_t *base, vuint32m1_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_u16mf2x6(uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_u16m1x6(uint16_t *base, vuint32m2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_u16m1x6(uint16_t *rs1, vuint32m2_t vs2, vuint16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_u32mf2x6(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_u32mf2x6(uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_u32m1x6(uint32_t *base, vuint32m1_t bindex, vuint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_u32m1x6(uint32_t *rs1, vuint32m1_t vs2, vuint32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_u64m1x6(uint64_t *base, vuint32mf2_t bindex, vuint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_u64m1x6(uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_f16mf4x6_m(vbool64_t mask, float16_t *base, vuint32mf2_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_f16mf4x6_m(vbool64_t vm, float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_f16mf2x6_m(vbool32_t mask, float16_t *base, vuint32m1_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_f16mf2x6_m(vbool32_t vm, float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_f16m1x6_m(vbool16_t mask, float16_t *base, vuint32m2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_f16m1x6_m(vbool16_t vm, float16_t *rs1, vuint32m2_t vs2, vfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_f32mf2x6_m(vbool64_t mask, float32_t *base, vuint32mf2_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_f32mf2x6_m(vbool64_t vm, float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_f32m1x6_m(vbool32_t mask, float32_t *base, vuint32m1_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_f32m1x6_m(vbool32_t vm, float32_t *rs1, vuint32m1_t vs2, vfloat32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_f64m1x6_m(vbool64_t mask, float64_t *base, vuint32mf2_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_f64m1x6_m(vbool64_t vm, float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_i8mf8x6_m(vbool64_t vm, int8_t *rs1, vuint32mf2_t vs2, vint8mf8x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_i8mf4x6_m(vbool32_t vm, int8_t *rs1, vuint32m1_t vs2, vint8mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_i8mf2x6_m(vbool16_t vm, int8_t *rs1, vuint32m2_t vs2, vint8mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_i8m1x6_m(vbool8_t vm, int8_t *rs1, vuint32m4_t vs2, vint8m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_i16mf4x6_m(vbool64_t vm, int16_t *rs1, vuint32mf2_t vs2, vint16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_i16mf2x6_m(vbool32_t vm, int16_t *rs1, vuint32m1_t vs2, vint16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_i16m1x6_m(vbool16_t vm, int16_t *rs1, vuint32m2_t vs2, vint16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_i32mf2x6_m(vbool64_t vm, int32_t *rs1, vuint32mf2_t vs2, vint32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_i32m1x6_m(vbool32_t vm, int32_t *rs1, vuint32m1_t vs2, vint32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_i64m1x6_m(vbool64_t vm, int64_t *rs1, vuint32mf2_t vs2, vint64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_u8mf8x6_m(vbool64_t vm, uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_u8mf4x6_m(vbool32_t vm, uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_u8mf2x6_m(vbool16_t vm, uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_u8m1x6_m(vbool8_t vm, uint8_t *rs1, vuint32m4_t vs2, vuint8m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_u16mf4x6_m(vbool64_t vm, uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_u16mf2x6_m(vbool32_t vm, uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_u16m1x6_m(vbool16_t vm, uint16_t *rs1, vuint32m2_t vs2, vuint16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_u32mf2x6_m(vbool64_t vm, uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_u32m1x6_m(vbool32_t vm, uint32_t *rs1, vuint32m1_t vs2, vuint32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei32_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei32_v_u64m1x6_m(vbool64_t vm, uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei32(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg6ei32\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsuxseg6ei64.c b/auto-generated/gnu-overloaded-tests/vsuxseg6ei64.c index a98e4ba79..c25509ff4 100644 --- a/auto-generated/gnu-overloaded-tests/vsuxseg6ei64.c +++ b/auto-generated/gnu-overloaded-tests/vsuxseg6ei64.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg6ei64_v_f16mf4x6(float16_t *base, vuint64m1_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_f16mf4x6(float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_f16mf2x6(float16_t *base, vuint64m2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_f16mf2x6(float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_f16m1x6(float16_t *base, vuint64m4_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_f16m1x6(float16_t *rs1, vuint64m4_t vs2, vfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_f32mf2x6(float32_t *base, vuint64m1_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_f32mf2x6(float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_f32m1x6(float32_t *base, vuint64m2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_f32m1x6(float32_t *rs1, vuint64m2_t vs2, vfloat32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_f64m1x6(float64_t *base, vuint64m1_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_f64m1x6(float64_t *rs1, vuint64m1_t vs2, vfloat64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_i8mf8x6(int8_t *base, vuint64m1_t bindex, vint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_i8mf8x6(int8_t *rs1, vuint64m1_t vs2, vint8mf8x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_i8mf4x6(int8_t *base, vuint64m2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_i8mf4x6(int8_t *rs1, vuint64m2_t vs2, vint8mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_i8mf2x6(int8_t *base, vuint64m4_t bindex, vint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_i8mf2x6(int8_t *rs1, vuint64m4_t vs2, vint8mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_i8m1x6(int8_t *base, vuint64m8_t bindex, vint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_i8m1x6(int8_t *rs1, vuint64m8_t vs2, vint8m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_i16mf4x6(int16_t *base, vuint64m1_t bindex, vint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_i16mf4x6(int16_t *rs1, vuint64m1_t vs2, vint16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_i16mf2x6(int16_t *base, vuint64m2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_i16mf2x6(int16_t *rs1, vuint64m2_t vs2, vint16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_i16m1x6(int16_t *base, vuint64m4_t bindex, vint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_i16m1x6(int16_t *rs1, vuint64m4_t vs2, vint16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_i32mf2x6(int32_t *base, vuint64m1_t bindex, vint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_i32mf2x6(int32_t *rs1, vuint64m1_t vs2, vint32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_i32m1x6(int32_t *base, vuint64m2_t bindex, vint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_i32m1x6(int32_t *rs1, vuint64m2_t vs2, vint32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_i64m1x6(int64_t *base, vuint64m1_t bindex, vint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_i64m1x6(int64_t *rs1, vuint64m1_t vs2, vint64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_u8mf8x6(uint8_t *base, vuint64m1_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_u8mf8x6(uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_u8mf4x6(uint8_t *base, vuint64m2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_u8mf4x6(uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_u8mf2x6(uint8_t *base, vuint64m4_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_u8mf2x6(uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_u8m1x6(uint8_t *base, vuint64m8_t bindex, vuint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_u8m1x6(uint8_t *rs1, vuint64m8_t vs2, vuint8m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_u16mf4x6(uint16_t *base, vuint64m1_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_u16mf4x6(uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_u16mf2x6(uint16_t *base, vuint64m2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_u16mf2x6(uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_u16m1x6(uint16_t *base, vuint64m4_t bindex, vuint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_u16m1x6(uint16_t *rs1, vuint64m4_t vs2, vuint16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_u32mf2x6(uint32_t *base, vuint64m1_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_u32mf2x6(uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_u32m1x6(uint32_t *base, vuint64m2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_u32m1x6(uint32_t *rs1, vuint64m2_t vs2, vuint32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_u64m1x6(uint64_t *base, vuint64m1_t bindex, vuint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_u64m1x6(uint64_t *rs1, vuint64m1_t vs2, vuint64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_f16mf4x6_m(vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_f16mf4x6_m(vbool64_t vm, float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_f16mf2x6_m(vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_f16mf2x6_m(vbool32_t vm, float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_f16m1x6_m(vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_f16m1x6_m(vbool16_t vm, float16_t *rs1, vuint64m4_t vs2, vfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_f32mf2x6_m(vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_f32mf2x6_m(vbool64_t vm, float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_f32m1x6_m(vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_f32m1x6_m(vbool32_t vm, float32_t *rs1, vuint64m2_t vs2, vfloat32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_f64m1x6_m(vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_f64m1x6_m(vbool64_t vm, float64_t *rs1, vuint64m1_t vs2, vfloat64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_i8mf8x6_m(vbool64_t vm, int8_t *rs1, vuint64m1_t vs2, vint8mf8x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_i8mf4x6_m(vbool32_t vm, int8_t *rs1, vuint64m2_t vs2, vint8mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_i8mf2x6_m(vbool16_t vm, int8_t *rs1, vuint64m4_t vs2, vint8mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_i8m1x6_m(vbool8_t vm, int8_t *rs1, vuint64m8_t vs2, vint8m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_i16mf4x6_m(vbool64_t vm, int16_t *rs1, vuint64m1_t vs2, vint16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_i16mf2x6_m(vbool32_t vm, int16_t *rs1, vuint64m2_t vs2, vint16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_i16m1x6_m(vbool16_t vm, int16_t *rs1, vuint64m4_t vs2, vint16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_i32mf2x6_m(vbool64_t vm, int32_t *rs1, vuint64m1_t vs2, vint32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_i32m1x6_m(vbool32_t vm, int32_t *rs1, vuint64m2_t vs2, vint32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_i64m1x6_m(vbool64_t vm, int64_t *rs1, vuint64m1_t vs2, vint64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_u8mf8x6_m(vbool64_t vm, uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_u8mf4x6_m(vbool32_t vm, uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_u8mf2x6_m(vbool16_t vm, uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_u8m1x6_m(vbool8_t vm, uint8_t *rs1, vuint64m8_t vs2, vuint8m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_u16mf4x6_m(vbool64_t vm, uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_u16mf2x6_m(vbool32_t vm, uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_u16m1x6_m(vbool16_t vm, uint16_t *rs1, vuint64m4_t vs2, vuint16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_u32mf2x6_m(vbool64_t vm, uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_u32m1x6_m(vbool32_t vm, uint32_t *rs1, vuint64m2_t vs2, vuint32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei64_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei64_v_u64m1x6_m(vbool64_t vm, uint64_t *rs1, vuint64m1_t vs2, vuint64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei64(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg6ei64\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsuxseg6ei8.c b/auto-generated/gnu-overloaded-tests/vsuxseg6ei8.c index a7ce4815f..73c9ab008 100644 --- a/auto-generated/gnu-overloaded-tests/vsuxseg6ei8.c +++ b/auto-generated/gnu-overloaded-tests/vsuxseg6ei8.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg6ei8_v_f16mf4x6(float16_t *base, vuint8mf8_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_f16mf4x6(float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_f16mf2x6(float16_t *base, vuint8mf4_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_f16mf2x6(float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_f16m1x6(float16_t *base, vuint8mf2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_f16m1x6(float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_f32mf2x6(float32_t *base, vuint8mf8_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_f32mf2x6(float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_f32m1x6(float32_t *base, vuint8mf4_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_f32m1x6(float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_f64m1x6(float64_t *base, vuint8mf8_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_f64m1x6(float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_i8mf8x6(int8_t *base, vuint8mf8_t bindex, vint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_i8mf8x6(int8_t *rs1, vuint8mf8_t vs2, vint8mf8x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_i8mf4x6(int8_t *base, vuint8mf4_t bindex, vint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_i8mf4x6(int8_t *rs1, vuint8mf4_t vs2, vint8mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_i8mf2x6(int8_t *base, vuint8mf2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_i8mf2x6(int8_t *rs1, vuint8mf2_t vs2, vint8mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_i8m1x6(int8_t *base, vuint8m1_t bindex, vint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_i8m1x6(int8_t *rs1, vuint8m1_t vs2, vint8m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_i16mf4x6(int16_t *base, vuint8mf8_t bindex, vint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_i16mf4x6(int16_t *rs1, vuint8mf8_t vs2, vint16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_i16mf2x6(int16_t *base, vuint8mf4_t bindex, vint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_i16mf2x6(int16_t *rs1, vuint8mf4_t vs2, vint16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_i16m1x6(int16_t *base, vuint8mf2_t bindex, vint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_i16m1x6(int16_t *rs1, vuint8mf2_t vs2, vint16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_i32mf2x6(int32_t *base, vuint8mf8_t bindex, vint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_i32mf2x6(int32_t *rs1, vuint8mf8_t vs2, vint32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_i32m1x6(int32_t *base, vuint8mf4_t bindex, vint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_i32m1x6(int32_t *rs1, vuint8mf4_t vs2, vint32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_i64m1x6(int64_t *base, vuint8mf8_t bindex, vint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_i64m1x6(int64_t *rs1, vuint8mf8_t vs2, vint64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_u8mf8x6(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_u8mf8x6(uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_u8mf4x6(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_u8mf4x6(uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_u8mf2x6(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_u8mf2x6(uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_u8m1x6(uint8_t *base, vuint8m1_t bindex, vuint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_u8m1x6(uint8_t *rs1, vuint8m1_t vs2, vuint8m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_u16mf4x6(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_u16mf4x6(uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_u16mf2x6(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_u16mf2x6(uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_u16m1x6(uint16_t *base, vuint8mf2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_u16m1x6(uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_u32mf2x6(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_u32mf2x6(uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_u32m1x6(uint32_t *base, vuint8mf4_t bindex, vuint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_u32m1x6(uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_u64m1x6(uint64_t *base, vuint8mf8_t bindex, vuint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_u64m1x6(uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_f16mf4x6_m(vbool64_t mask, float16_t *base, vuint8mf8_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_f16mf4x6_m(vbool64_t vm, float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_f16mf2x6_m(vbool32_t mask, float16_t *base, vuint8mf4_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_f16mf2x6_m(vbool32_t vm, float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_f16m1x6_m(vbool16_t mask, float16_t *base, vuint8mf2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_f16m1x6_m(vbool16_t vm, float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_f32mf2x6_m(vbool64_t mask, float32_t *base, vuint8mf8_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_f32mf2x6_m(vbool64_t vm, float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_f32m1x6_m(vbool32_t mask, float32_t *base, vuint8mf4_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_f32m1x6_m(vbool32_t vm, float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_f64m1x6_m(vbool64_t mask, float64_t *base, vuint8mf8_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_f64m1x6_m(vbool64_t vm, float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_i8mf8x6_m(vbool64_t vm, int8_t *rs1, vuint8mf8_t vs2, vint8mf8x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_i8mf4x6_m(vbool32_t vm, int8_t *rs1, vuint8mf4_t vs2, vint8mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_i8mf2x6_m(vbool16_t vm, int8_t *rs1, vuint8mf2_t vs2, vint8mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_i8m1x6_m(vbool8_t vm, int8_t *rs1, vuint8m1_t vs2, vint8m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_i16mf4x6_m(vbool64_t vm, int16_t *rs1, vuint8mf8_t vs2, vint16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_i16mf2x6_m(vbool32_t vm, int16_t *rs1, vuint8mf4_t vs2, vint16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_i16m1x6_m(vbool16_t vm, int16_t *rs1, vuint8mf2_t vs2, vint16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_i32mf2x6_m(vbool64_t vm, int32_t *rs1, vuint8mf8_t vs2, vint32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_i32m1x6_m(vbool32_t vm, int32_t *rs1, vuint8mf4_t vs2, vint32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_i64m1x6_m(vbool64_t vm, int64_t *rs1, vuint8mf8_t vs2, vint64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_u8mf8x6_m(vbool64_t vm, uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_u8mf4x6_m(vbool32_t vm, uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_u8mf2x6_m(vbool16_t vm, uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_u8m1x6_m(vbool8_t vm, uint8_t *rs1, vuint8m1_t vs2, vuint8m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_u16mf4x6_m(vbool64_t vm, uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_u16mf2x6_m(vbool32_t vm, uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_u16m1x6_m(vbool16_t vm, uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_u32mf2x6_m(vbool64_t vm, uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_u32m1x6_m(vbool32_t vm, uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg6ei8_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x6_t v_tuple, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg6ei8_v_u64m1x6_m(vbool64_t vm, uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei8(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg6ei8\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsuxseg7ei16.c b/auto-generated/gnu-overloaded-tests/vsuxseg7ei16.c index 099950342..4faa525dd 100644 --- a/auto-generated/gnu-overloaded-tests/vsuxseg7ei16.c +++ b/auto-generated/gnu-overloaded-tests/vsuxseg7ei16.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg7ei16_v_f16mf4x7(float16_t *base, vuint16mf4_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_f16mf4x7(float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_f16mf2x7(float16_t *base, vuint16mf2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_f16mf2x7(float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_f16m1x7(float16_t *base, vuint16m1_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_f16m1x7(float16_t *rs1, vuint16m1_t vs2, vfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_f32mf2x7(float32_t *base, vuint16mf4_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_f32mf2x7(float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_f32m1x7(float32_t *base, vuint16mf2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_f32m1x7(float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_f64m1x7(float64_t *base, vuint16mf4_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_f64m1x7(float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_i8mf8x7(int8_t *base, vuint16mf4_t bindex, vint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_i8mf8x7(int8_t *rs1, vuint16mf4_t vs2, vint8mf8x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_i8mf4x7(int8_t *base, vuint16mf2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_i8mf4x7(int8_t *rs1, vuint16mf2_t vs2, vint8mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_i8mf2x7(int8_t *base, vuint16m1_t bindex, vint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_i8mf2x7(int8_t *rs1, vuint16m1_t vs2, vint8mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_i8m1x7(int8_t *base, vuint16m2_t bindex, vint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_i8m1x7(int8_t *rs1, vuint16m2_t vs2, vint8m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_i16mf4x7(int16_t *base, vuint16mf4_t bindex, vint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_i16mf4x7(int16_t *rs1, vuint16mf4_t vs2, vint16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_i16mf2x7(int16_t *base, vuint16mf2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_i16mf2x7(int16_t *rs1, vuint16mf2_t vs2, vint16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_i16m1x7(int16_t *base, vuint16m1_t bindex, vint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_i16m1x7(int16_t *rs1, vuint16m1_t vs2, vint16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_i32mf2x7(int32_t *base, vuint16mf4_t bindex, vint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_i32mf2x7(int32_t *rs1, vuint16mf4_t vs2, vint32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_i32m1x7(int32_t *base, vuint16mf2_t bindex, vint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_i32m1x7(int32_t *rs1, vuint16mf2_t vs2, vint32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_i64m1x7(int64_t *base, vuint16mf4_t bindex, vint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_i64m1x7(int64_t *rs1, vuint16mf4_t vs2, vint64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_u8mf8x7(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_u8mf8x7(uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_u8mf4x7(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_u8mf4x7(uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_u8mf2x7(uint8_t *base, vuint16m1_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_u8mf2x7(uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_u8m1x7(uint8_t *base, vuint16m2_t bindex, vuint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_u8m1x7(uint8_t *rs1, vuint16m2_t vs2, vuint8m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_u16mf4x7(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_u16mf4x7(uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_u16mf2x7(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_u16mf2x7(uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_u16m1x7(uint16_t *base, vuint16m1_t bindex, vuint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_u16m1x7(uint16_t *rs1, vuint16m1_t vs2, vuint16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_u32mf2x7(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_u32mf2x7(uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_u32m1x7(uint32_t *base, vuint16mf2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_u32m1x7(uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_u64m1x7(uint64_t *base, vuint16mf4_t bindex, vuint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_u64m1x7(uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_f16mf4x7_m(vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_f16mf4x7_m(vbool64_t vm, float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_f16mf2x7_m(vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_f16mf2x7_m(vbool32_t vm, float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_f16m1x7_m(vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_f16m1x7_m(vbool16_t vm, float16_t *rs1, vuint16m1_t vs2, vfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_f32mf2x7_m(vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_f32mf2x7_m(vbool64_t vm, float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_f32m1x7_m(vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_f32m1x7_m(vbool32_t vm, float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_f64m1x7_m(vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_f64m1x7_m(vbool64_t vm, float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_i8mf8x7_m(vbool64_t vm, int8_t *rs1, vuint16mf4_t vs2, vint8mf8x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_i8mf4x7_m(vbool32_t vm, int8_t *rs1, vuint16mf2_t vs2, vint8mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_i8mf2x7_m(vbool16_t vm, int8_t *rs1, vuint16m1_t vs2, vint8mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_i8m1x7_m(vbool8_t vm, int8_t *rs1, vuint16m2_t vs2, vint8m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_i16mf4x7_m(vbool64_t vm, int16_t *rs1, vuint16mf4_t vs2, vint16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_i16mf2x7_m(vbool32_t vm, int16_t *rs1, vuint16mf2_t vs2, vint16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_i16m1x7_m(vbool16_t vm, int16_t *rs1, vuint16m1_t vs2, vint16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_i32mf2x7_m(vbool64_t vm, int32_t *rs1, vuint16mf4_t vs2, vint32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_i32m1x7_m(vbool32_t vm, int32_t *rs1, vuint16mf2_t vs2, vint32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_i64m1x7_m(vbool64_t vm, int64_t *rs1, vuint16mf4_t vs2, vint64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_u8mf8x7_m(vbool64_t vm, uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_u8mf4x7_m(vbool32_t vm, uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_u8mf2x7_m(vbool16_t vm, uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_u8m1x7_m(vbool8_t vm, uint8_t *rs1, vuint16m2_t vs2, vuint8m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_u16mf4x7_m(vbool64_t vm, uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_u16mf2x7_m(vbool32_t vm, uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_u16m1x7_m(vbool16_t vm, uint16_t *rs1, vuint16m1_t vs2, vuint16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_u32mf2x7_m(vbool64_t vm, uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_u32m1x7_m(vbool32_t vm, uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei16_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei16_v_u64m1x7_m(vbool64_t vm, uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg7ei16\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsuxseg7ei32.c b/auto-generated/gnu-overloaded-tests/vsuxseg7ei32.c index db352744a..871609f57 100644 --- a/auto-generated/gnu-overloaded-tests/vsuxseg7ei32.c +++ b/auto-generated/gnu-overloaded-tests/vsuxseg7ei32.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg7ei32_v_f16mf4x7(float16_t *base, vuint32mf2_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_f16mf4x7(float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_f16mf2x7(float16_t *base, vuint32m1_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_f16mf2x7(float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_f16m1x7(float16_t *base, vuint32m2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_f16m1x7(float16_t *rs1, vuint32m2_t vs2, vfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_f32mf2x7(float32_t *base, vuint32mf2_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_f32mf2x7(float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_f32m1x7(float32_t *base, vuint32m1_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_f32m1x7(float32_t *rs1, vuint32m1_t vs2, vfloat32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_f64m1x7(float64_t *base, vuint32mf2_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_f64m1x7(float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_i8mf8x7(int8_t *base, vuint32mf2_t bindex, vint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_i8mf8x7(int8_t *rs1, vuint32mf2_t vs2, vint8mf8x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_i8mf4x7(int8_t *base, vuint32m1_t bindex, vint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_i8mf4x7(int8_t *rs1, vuint32m1_t vs2, vint8mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_i8mf2x7(int8_t *base, vuint32m2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_i8mf2x7(int8_t *rs1, vuint32m2_t vs2, vint8mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_i8m1x7(int8_t *base, vuint32m4_t bindex, vint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_i8m1x7(int8_t *rs1, vuint32m4_t vs2, vint8m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_i16mf4x7(int16_t *base, vuint32mf2_t bindex, vint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_i16mf4x7(int16_t *rs1, vuint32mf2_t vs2, vint16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_i16mf2x7(int16_t *base, vuint32m1_t bindex, vint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_i16mf2x7(int16_t *rs1, vuint32m1_t vs2, vint16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_i16m1x7(int16_t *base, vuint32m2_t bindex, vint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_i16m1x7(int16_t *rs1, vuint32m2_t vs2, vint16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_i32mf2x7(int32_t *base, vuint32mf2_t bindex, vint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_i32mf2x7(int32_t *rs1, vuint32mf2_t vs2, vint32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_i32m1x7(int32_t *base, vuint32m1_t bindex, vint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_i32m1x7(int32_t *rs1, vuint32m1_t vs2, vint32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_i64m1x7(int64_t *base, vuint32mf2_t bindex, vint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_i64m1x7(int64_t *rs1, vuint32mf2_t vs2, vint64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_u8mf8x7(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_u8mf8x7(uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_u8mf4x7(uint8_t *base, vuint32m1_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_u8mf4x7(uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_u8mf2x7(uint8_t *base, vuint32m2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_u8mf2x7(uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_u8m1x7(uint8_t *base, vuint32m4_t bindex, vuint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_u8m1x7(uint8_t *rs1, vuint32m4_t vs2, vuint8m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_u16mf4x7(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_u16mf4x7(uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_u16mf2x7(uint16_t *base, vuint32m1_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_u16mf2x7(uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_u16m1x7(uint16_t *base, vuint32m2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_u16m1x7(uint16_t *rs1, vuint32m2_t vs2, vuint16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_u32mf2x7(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_u32mf2x7(uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_u32m1x7(uint32_t *base, vuint32m1_t bindex, vuint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_u32m1x7(uint32_t *rs1, vuint32m1_t vs2, vuint32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_u64m1x7(uint64_t *base, vuint32mf2_t bindex, vuint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_u64m1x7(uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_f16mf4x7_m(vbool64_t mask, float16_t *base, vuint32mf2_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_f16mf4x7_m(vbool64_t vm, float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_f16mf2x7_m(vbool32_t mask, float16_t *base, vuint32m1_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_f16mf2x7_m(vbool32_t vm, float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_f16m1x7_m(vbool16_t mask, float16_t *base, vuint32m2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_f16m1x7_m(vbool16_t vm, float16_t *rs1, vuint32m2_t vs2, vfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_f32mf2x7_m(vbool64_t mask, float32_t *base, vuint32mf2_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_f32mf2x7_m(vbool64_t vm, float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_f32m1x7_m(vbool32_t mask, float32_t *base, vuint32m1_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_f32m1x7_m(vbool32_t vm, float32_t *rs1, vuint32m1_t vs2, vfloat32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_f64m1x7_m(vbool64_t mask, float64_t *base, vuint32mf2_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_f64m1x7_m(vbool64_t vm, float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_i8mf8x7_m(vbool64_t vm, int8_t *rs1, vuint32mf2_t vs2, vint8mf8x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_i8mf4x7_m(vbool32_t vm, int8_t *rs1, vuint32m1_t vs2, vint8mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_i8mf2x7_m(vbool16_t vm, int8_t *rs1, vuint32m2_t vs2, vint8mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_i8m1x7_m(vbool8_t vm, int8_t *rs1, vuint32m4_t vs2, vint8m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_i16mf4x7_m(vbool64_t vm, int16_t *rs1, vuint32mf2_t vs2, vint16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_i16mf2x7_m(vbool32_t vm, int16_t *rs1, vuint32m1_t vs2, vint16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_i16m1x7_m(vbool16_t vm, int16_t *rs1, vuint32m2_t vs2, vint16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_i32mf2x7_m(vbool64_t vm, int32_t *rs1, vuint32mf2_t vs2, vint32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_i32m1x7_m(vbool32_t vm, int32_t *rs1, vuint32m1_t vs2, vint32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_i64m1x7_m(vbool64_t vm, int64_t *rs1, vuint32mf2_t vs2, vint64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_u8mf8x7_m(vbool64_t vm, uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_u8mf4x7_m(vbool32_t vm, uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_u8mf2x7_m(vbool16_t vm, uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_u8m1x7_m(vbool8_t vm, uint8_t *rs1, vuint32m4_t vs2, vuint8m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_u16mf4x7_m(vbool64_t vm, uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_u16mf2x7_m(vbool32_t vm, uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_u16m1x7_m(vbool16_t vm, uint16_t *rs1, vuint32m2_t vs2, vuint16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_u32mf2x7_m(vbool64_t vm, uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_u32m1x7_m(vbool32_t vm, uint32_t *rs1, vuint32m1_t vs2, vuint32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei32_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei32_v_u64m1x7_m(vbool64_t vm, uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei32(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg7ei32\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsuxseg7ei64.c b/auto-generated/gnu-overloaded-tests/vsuxseg7ei64.c index db0e4ca9e..fddfb74fb 100644 --- a/auto-generated/gnu-overloaded-tests/vsuxseg7ei64.c +++ b/auto-generated/gnu-overloaded-tests/vsuxseg7ei64.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg7ei64_v_f16mf4x7(float16_t *base, vuint64m1_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_f16mf4x7(float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_f16mf2x7(float16_t *base, vuint64m2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_f16mf2x7(float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_f16m1x7(float16_t *base, vuint64m4_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_f16m1x7(float16_t *rs1, vuint64m4_t vs2, vfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_f32mf2x7(float32_t *base, vuint64m1_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_f32mf2x7(float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_f32m1x7(float32_t *base, vuint64m2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_f32m1x7(float32_t *rs1, vuint64m2_t vs2, vfloat32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_f64m1x7(float64_t *base, vuint64m1_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_f64m1x7(float64_t *rs1, vuint64m1_t vs2, vfloat64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_i8mf8x7(int8_t *base, vuint64m1_t bindex, vint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_i8mf8x7(int8_t *rs1, vuint64m1_t vs2, vint8mf8x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_i8mf4x7(int8_t *base, vuint64m2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_i8mf4x7(int8_t *rs1, vuint64m2_t vs2, vint8mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_i8mf2x7(int8_t *base, vuint64m4_t bindex, vint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_i8mf2x7(int8_t *rs1, vuint64m4_t vs2, vint8mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_i8m1x7(int8_t *base, vuint64m8_t bindex, vint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_i8m1x7(int8_t *rs1, vuint64m8_t vs2, vint8m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_i16mf4x7(int16_t *base, vuint64m1_t bindex, vint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_i16mf4x7(int16_t *rs1, vuint64m1_t vs2, vint16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_i16mf2x7(int16_t *base, vuint64m2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_i16mf2x7(int16_t *rs1, vuint64m2_t vs2, vint16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_i16m1x7(int16_t *base, vuint64m4_t bindex, vint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_i16m1x7(int16_t *rs1, vuint64m4_t vs2, vint16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_i32mf2x7(int32_t *base, vuint64m1_t bindex, vint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_i32mf2x7(int32_t *rs1, vuint64m1_t vs2, vint32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_i32m1x7(int32_t *base, vuint64m2_t bindex, vint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_i32m1x7(int32_t *rs1, vuint64m2_t vs2, vint32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_i64m1x7(int64_t *base, vuint64m1_t bindex, vint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_i64m1x7(int64_t *rs1, vuint64m1_t vs2, vint64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_u8mf8x7(uint8_t *base, vuint64m1_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_u8mf8x7(uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_u8mf4x7(uint8_t *base, vuint64m2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_u8mf4x7(uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_u8mf2x7(uint8_t *base, vuint64m4_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_u8mf2x7(uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_u8m1x7(uint8_t *base, vuint64m8_t bindex, vuint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_u8m1x7(uint8_t *rs1, vuint64m8_t vs2, vuint8m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_u16mf4x7(uint16_t *base, vuint64m1_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_u16mf4x7(uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_u16mf2x7(uint16_t *base, vuint64m2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_u16mf2x7(uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_u16m1x7(uint16_t *base, vuint64m4_t bindex, vuint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_u16m1x7(uint16_t *rs1, vuint64m4_t vs2, vuint16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_u32mf2x7(uint32_t *base, vuint64m1_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_u32mf2x7(uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_u32m1x7(uint32_t *base, vuint64m2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_u32m1x7(uint32_t *rs1, vuint64m2_t vs2, vuint32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_u64m1x7(uint64_t *base, vuint64m1_t bindex, vuint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_u64m1x7(uint64_t *rs1, vuint64m1_t vs2, vuint64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_f16mf4x7_m(vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_f16mf4x7_m(vbool64_t vm, float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_f16mf2x7_m(vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_f16mf2x7_m(vbool32_t vm, float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_f16m1x7_m(vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_f16m1x7_m(vbool16_t vm, float16_t *rs1, vuint64m4_t vs2, vfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_f32mf2x7_m(vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_f32mf2x7_m(vbool64_t vm, float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_f32m1x7_m(vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_f32m1x7_m(vbool32_t vm, float32_t *rs1, vuint64m2_t vs2, vfloat32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_f64m1x7_m(vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_f64m1x7_m(vbool64_t vm, float64_t *rs1, vuint64m1_t vs2, vfloat64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_i8mf8x7_m(vbool64_t vm, int8_t *rs1, vuint64m1_t vs2, vint8mf8x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_i8mf4x7_m(vbool32_t vm, int8_t *rs1, vuint64m2_t vs2, vint8mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_i8mf2x7_m(vbool16_t vm, int8_t *rs1, vuint64m4_t vs2, vint8mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_i8m1x7_m(vbool8_t vm, int8_t *rs1, vuint64m8_t vs2, vint8m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_i16mf4x7_m(vbool64_t vm, int16_t *rs1, vuint64m1_t vs2, vint16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_i16mf2x7_m(vbool32_t vm, int16_t *rs1, vuint64m2_t vs2, vint16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_i16m1x7_m(vbool16_t vm, int16_t *rs1, vuint64m4_t vs2, vint16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_i32mf2x7_m(vbool64_t vm, int32_t *rs1, vuint64m1_t vs2, vint32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_i32m1x7_m(vbool32_t vm, int32_t *rs1, vuint64m2_t vs2, vint32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_i64m1x7_m(vbool64_t vm, int64_t *rs1, vuint64m1_t vs2, vint64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_u8mf8x7_m(vbool64_t vm, uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_u8mf4x7_m(vbool32_t vm, uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_u8mf2x7_m(vbool16_t vm, uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_u8m1x7_m(vbool8_t vm, uint8_t *rs1, vuint64m8_t vs2, vuint8m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_u16mf4x7_m(vbool64_t vm, uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_u16mf2x7_m(vbool32_t vm, uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_u16m1x7_m(vbool16_t vm, uint16_t *rs1, vuint64m4_t vs2, vuint16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_u32mf2x7_m(vbool64_t vm, uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_u32m1x7_m(vbool32_t vm, uint32_t *rs1, vuint64m2_t vs2, vuint32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei64_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei64_v_u64m1x7_m(vbool64_t vm, uint64_t *rs1, vuint64m1_t vs2, vuint64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei64(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg7ei64\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsuxseg7ei8.c b/auto-generated/gnu-overloaded-tests/vsuxseg7ei8.c index d10cb3c43..69429ff8e 100644 --- a/auto-generated/gnu-overloaded-tests/vsuxseg7ei8.c +++ b/auto-generated/gnu-overloaded-tests/vsuxseg7ei8.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg7ei8_v_f16mf4x7(float16_t *base, vuint8mf8_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_f16mf4x7(float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_f16mf2x7(float16_t *base, vuint8mf4_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_f16mf2x7(float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_f16m1x7(float16_t *base, vuint8mf2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_f16m1x7(float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_f32mf2x7(float32_t *base, vuint8mf8_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_f32mf2x7(float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_f32m1x7(float32_t *base, vuint8mf4_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_f32m1x7(float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_f64m1x7(float64_t *base, vuint8mf8_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_f64m1x7(float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_i8mf8x7(int8_t *base, vuint8mf8_t bindex, vint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_i8mf8x7(int8_t *rs1, vuint8mf8_t vs2, vint8mf8x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_i8mf4x7(int8_t *base, vuint8mf4_t bindex, vint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_i8mf4x7(int8_t *rs1, vuint8mf4_t vs2, vint8mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_i8mf2x7(int8_t *base, vuint8mf2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_i8mf2x7(int8_t *rs1, vuint8mf2_t vs2, vint8mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_i8m1x7(int8_t *base, vuint8m1_t bindex, vint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_i8m1x7(int8_t *rs1, vuint8m1_t vs2, vint8m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_i16mf4x7(int16_t *base, vuint8mf8_t bindex, vint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_i16mf4x7(int16_t *rs1, vuint8mf8_t vs2, vint16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_i16mf2x7(int16_t *base, vuint8mf4_t bindex, vint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_i16mf2x7(int16_t *rs1, vuint8mf4_t vs2, vint16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_i16m1x7(int16_t *base, vuint8mf2_t bindex, vint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_i16m1x7(int16_t *rs1, vuint8mf2_t vs2, vint16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_i32mf2x7(int32_t *base, vuint8mf8_t bindex, vint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_i32mf2x7(int32_t *rs1, vuint8mf8_t vs2, vint32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_i32m1x7(int32_t *base, vuint8mf4_t bindex, vint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_i32m1x7(int32_t *rs1, vuint8mf4_t vs2, vint32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_i64m1x7(int64_t *base, vuint8mf8_t bindex, vint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_i64m1x7(int64_t *rs1, vuint8mf8_t vs2, vint64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_u8mf8x7(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_u8mf8x7(uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_u8mf4x7(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_u8mf4x7(uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_u8mf2x7(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_u8mf2x7(uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_u8m1x7(uint8_t *base, vuint8m1_t bindex, vuint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_u8m1x7(uint8_t *rs1, vuint8m1_t vs2, vuint8m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_u16mf4x7(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_u16mf4x7(uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_u16mf2x7(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_u16mf2x7(uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_u16m1x7(uint16_t *base, vuint8mf2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_u16m1x7(uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_u32mf2x7(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_u32mf2x7(uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_u32m1x7(uint32_t *base, vuint8mf4_t bindex, vuint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_u32m1x7(uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_u64m1x7(uint64_t *base, vuint8mf8_t bindex, vuint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_u64m1x7(uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_f16mf4x7_m(vbool64_t mask, float16_t *base, vuint8mf8_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_f16mf4x7_m(vbool64_t vm, float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_f16mf2x7_m(vbool32_t mask, float16_t *base, vuint8mf4_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_f16mf2x7_m(vbool32_t vm, float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_f16m1x7_m(vbool16_t mask, float16_t *base, vuint8mf2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_f16m1x7_m(vbool16_t vm, float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_f32mf2x7_m(vbool64_t mask, float32_t *base, vuint8mf8_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_f32mf2x7_m(vbool64_t vm, float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_f32m1x7_m(vbool32_t mask, float32_t *base, vuint8mf4_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_f32m1x7_m(vbool32_t vm, float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_f64m1x7_m(vbool64_t mask, float64_t *base, vuint8mf8_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_f64m1x7_m(vbool64_t vm, float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_i8mf8x7_m(vbool64_t vm, int8_t *rs1, vuint8mf8_t vs2, vint8mf8x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_i8mf4x7_m(vbool32_t vm, int8_t *rs1, vuint8mf4_t vs2, vint8mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_i8mf2x7_m(vbool16_t vm, int8_t *rs1, vuint8mf2_t vs2, vint8mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_i8m1x7_m(vbool8_t vm, int8_t *rs1, vuint8m1_t vs2, vint8m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_i16mf4x7_m(vbool64_t vm, int16_t *rs1, vuint8mf8_t vs2, vint16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_i16mf2x7_m(vbool32_t vm, int16_t *rs1, vuint8mf4_t vs2, vint16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_i16m1x7_m(vbool16_t vm, int16_t *rs1, vuint8mf2_t vs2, vint16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_i32mf2x7_m(vbool64_t vm, int32_t *rs1, vuint8mf8_t vs2, vint32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_i32m1x7_m(vbool32_t vm, int32_t *rs1, vuint8mf4_t vs2, vint32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_i64m1x7_m(vbool64_t vm, int64_t *rs1, vuint8mf8_t vs2, vint64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_u8mf8x7_m(vbool64_t vm, uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_u8mf4x7_m(vbool32_t vm, uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_u8mf2x7_m(vbool16_t vm, uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_u8m1x7_m(vbool8_t vm, uint8_t *rs1, vuint8m1_t vs2, vuint8m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_u16mf4x7_m(vbool64_t vm, uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_u16mf2x7_m(vbool32_t vm, uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_u16m1x7_m(vbool16_t vm, uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_u32mf2x7_m(vbool64_t vm, uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_u32m1x7_m(vbool32_t vm, uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg7ei8_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x7_t v_tuple, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg7ei8_v_u64m1x7_m(vbool64_t vm, uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei8(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg7ei8\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsuxseg8ei16.c b/auto-generated/gnu-overloaded-tests/vsuxseg8ei16.c index 7cb30c042..53ed47145 100644 --- a/auto-generated/gnu-overloaded-tests/vsuxseg8ei16.c +++ b/auto-generated/gnu-overloaded-tests/vsuxseg8ei16.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg8ei16_v_f16mf4x8(float16_t *base, vuint16mf4_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_f16mf4x8(float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_f16mf2x8(float16_t *base, vuint16mf2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_f16mf2x8(float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_f16m1x8(float16_t *base, vuint16m1_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_f16m1x8(float16_t *rs1, vuint16m1_t vs2, vfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_f32mf2x8(float32_t *base, vuint16mf4_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_f32mf2x8(float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_f32m1x8(float32_t *base, vuint16mf2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_f32m1x8(float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_f64m1x8(float64_t *base, vuint16mf4_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_f64m1x8(float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_i8mf8x8(int8_t *base, vuint16mf4_t bindex, vint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_i8mf8x8(int8_t *rs1, vuint16mf4_t vs2, vint8mf8x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_i8mf4x8(int8_t *base, vuint16mf2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_i8mf4x8(int8_t *rs1, vuint16mf2_t vs2, vint8mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_i8mf2x8(int8_t *base, vuint16m1_t bindex, vint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_i8mf2x8(int8_t *rs1, vuint16m1_t vs2, vint8mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_i8m1x8(int8_t *base, vuint16m2_t bindex, vint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_i8m1x8(int8_t *rs1, vuint16m2_t vs2, vint8m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_i16mf4x8(int16_t *base, vuint16mf4_t bindex, vint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_i16mf4x8(int16_t *rs1, vuint16mf4_t vs2, vint16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_i16mf2x8(int16_t *base, vuint16mf2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_i16mf2x8(int16_t *rs1, vuint16mf2_t vs2, vint16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_i16m1x8(int16_t *base, vuint16m1_t bindex, vint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_i16m1x8(int16_t *rs1, vuint16m1_t vs2, vint16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_i32mf2x8(int32_t *base, vuint16mf4_t bindex, vint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_i32mf2x8(int32_t *rs1, vuint16mf4_t vs2, vint32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_i32m1x8(int32_t *base, vuint16mf2_t bindex, vint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_i32m1x8(int32_t *rs1, vuint16mf2_t vs2, vint32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_i64m1x8(int64_t *base, vuint16mf4_t bindex, vint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_i64m1x8(int64_t *rs1, vuint16mf4_t vs2, vint64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_u8mf8x8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_u8mf8x8(uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_u8mf4x8(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_u8mf4x8(uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_u8mf2x8(uint8_t *base, vuint16m1_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_u8mf2x8(uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_u8m1x8(uint8_t *base, vuint16m2_t bindex, vuint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_u8m1x8(uint8_t *rs1, vuint16m2_t vs2, vuint8m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_u16mf4x8(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_u16mf4x8(uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_u16mf2x8(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_u16mf2x8(uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_u16m1x8(uint16_t *base, vuint16m1_t bindex, vuint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_u16m1x8(uint16_t *rs1, vuint16m1_t vs2, vuint16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_u32mf2x8(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_u32mf2x8(uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_u32m1x8(uint32_t *base, vuint16mf2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_u32m1x8(uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_u64m1x8(uint64_t *base, vuint16mf4_t bindex, vuint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_u64m1x8(uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_f16mf4x8_m(vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_f16mf4x8_m(vbool64_t vm, float16_t *rs1, vuint16mf4_t vs2, vfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_f16mf2x8_m(vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_f16mf2x8_m(vbool32_t vm, float16_t *rs1, vuint16mf2_t vs2, vfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_f16m1x8_m(vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_f16m1x8_m(vbool16_t vm, float16_t *rs1, vuint16m1_t vs2, vfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_f32mf2x8_m(vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_f32mf2x8_m(vbool64_t vm, float32_t *rs1, vuint16mf4_t vs2, vfloat32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_f32m1x8_m(vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_f32m1x8_m(vbool32_t vm, float32_t *rs1, vuint16mf2_t vs2, vfloat32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_f64m1x8_m(vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_f64m1x8_m(vbool64_t vm, float64_t *rs1, vuint16mf4_t vs2, vfloat64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_i8mf8x8_m(vbool64_t vm, int8_t *rs1, vuint16mf4_t vs2, vint8mf8x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_i8mf4x8_m(vbool32_t vm, int8_t *rs1, vuint16mf2_t vs2, vint8mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_i8mf2x8_m(vbool16_t vm, int8_t *rs1, vuint16m1_t vs2, vint8mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_i8m1x8_m(vbool8_t vm, int8_t *rs1, vuint16m2_t vs2, vint8m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_i16mf4x8_m(vbool64_t vm, int16_t *rs1, vuint16mf4_t vs2, vint16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_i16mf2x8_m(vbool32_t vm, int16_t *rs1, vuint16mf2_t vs2, vint16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_i16m1x8_m(vbool16_t vm, int16_t *rs1, vuint16m1_t vs2, vint16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_i32mf2x8_m(vbool64_t vm, int32_t *rs1, vuint16mf4_t vs2, vint32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_i32m1x8_m(vbool32_t vm, int32_t *rs1, vuint16mf2_t vs2, vint32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_i64m1x8_m(vbool64_t vm, int64_t *rs1, vuint16mf4_t vs2, vint64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_u8mf8x8_m(vbool64_t vm, uint8_t *rs1, vuint16mf4_t vs2, vuint8mf8x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_u8mf4x8_m(vbool32_t vm, uint8_t *rs1, vuint16mf2_t vs2, vuint8mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_u8mf2x8_m(vbool16_t vm, uint8_t *rs1, vuint16m1_t vs2, vuint8mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_u8m1x8_m(vbool8_t vm, uint8_t *rs1, vuint16m2_t vs2, vuint8m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_u16mf4x8_m(vbool64_t vm, uint16_t *rs1, vuint16mf4_t vs2, vuint16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_u16mf2x8_m(vbool32_t vm, uint16_t *rs1, vuint16mf2_t vs2, vuint16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_u16m1x8_m(vbool16_t vm, uint16_t *rs1, vuint16m1_t vs2, vuint16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_u32mf2x8_m(vbool64_t vm, uint32_t *rs1, vuint16mf4_t vs2, vuint32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_u32m1x8_m(vbool32_t vm, uint32_t *rs1, vuint16mf2_t vs2, vuint32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei16_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei16_v_u64m1x8_m(vbool64_t vm, uint64_t *rs1, vuint16mf4_t vs2, vuint64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg8ei16\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsuxseg8ei32.c b/auto-generated/gnu-overloaded-tests/vsuxseg8ei32.c index 74f6ab1ea..d16f017c5 100644 --- a/auto-generated/gnu-overloaded-tests/vsuxseg8ei32.c +++ b/auto-generated/gnu-overloaded-tests/vsuxseg8ei32.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg8ei32_v_f16mf4x8(float16_t *base, vuint32mf2_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_f16mf4x8(float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_f16mf2x8(float16_t *base, vuint32m1_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_f16mf2x8(float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_f16m1x8(float16_t *base, vuint32m2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_f16m1x8(float16_t *rs1, vuint32m2_t vs2, vfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_f32mf2x8(float32_t *base, vuint32mf2_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_f32mf2x8(float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_f32m1x8(float32_t *base, vuint32m1_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_f32m1x8(float32_t *rs1, vuint32m1_t vs2, vfloat32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_f64m1x8(float64_t *base, vuint32mf2_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_f64m1x8(float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_i8mf8x8(int8_t *base, vuint32mf2_t bindex, vint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_i8mf8x8(int8_t *rs1, vuint32mf2_t vs2, vint8mf8x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_i8mf4x8(int8_t *base, vuint32m1_t bindex, vint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_i8mf4x8(int8_t *rs1, vuint32m1_t vs2, vint8mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_i8mf2x8(int8_t *base, vuint32m2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_i8mf2x8(int8_t *rs1, vuint32m2_t vs2, vint8mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_i8m1x8(int8_t *base, vuint32m4_t bindex, vint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_i8m1x8(int8_t *rs1, vuint32m4_t vs2, vint8m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_i16mf4x8(int16_t *base, vuint32mf2_t bindex, vint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_i16mf4x8(int16_t *rs1, vuint32mf2_t vs2, vint16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_i16mf2x8(int16_t *base, vuint32m1_t bindex, vint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_i16mf2x8(int16_t *rs1, vuint32m1_t vs2, vint16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_i16m1x8(int16_t *base, vuint32m2_t bindex, vint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_i16m1x8(int16_t *rs1, vuint32m2_t vs2, vint16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_i32mf2x8(int32_t *base, vuint32mf2_t bindex, vint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_i32mf2x8(int32_t *rs1, vuint32mf2_t vs2, vint32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_i32m1x8(int32_t *base, vuint32m1_t bindex, vint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_i32m1x8(int32_t *rs1, vuint32m1_t vs2, vint32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_i64m1x8(int64_t *base, vuint32mf2_t bindex, vint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_i64m1x8(int64_t *rs1, vuint32mf2_t vs2, vint64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_u8mf8x8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_u8mf8x8(uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_u8mf4x8(uint8_t *base, vuint32m1_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_u8mf4x8(uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_u8mf2x8(uint8_t *base, vuint32m2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_u8mf2x8(uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_u8m1x8(uint8_t *base, vuint32m4_t bindex, vuint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_u8m1x8(uint8_t *rs1, vuint32m4_t vs2, vuint8m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_u16mf4x8(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_u16mf4x8(uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_u16mf2x8(uint16_t *base, vuint32m1_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_u16mf2x8(uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_u16m1x8(uint16_t *base, vuint32m2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_u16m1x8(uint16_t *rs1, vuint32m2_t vs2, vuint16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_u32mf2x8(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_u32mf2x8(uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_u32m1x8(uint32_t *base, vuint32m1_t bindex, vuint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_u32m1x8(uint32_t *rs1, vuint32m1_t vs2, vuint32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_u64m1x8(uint64_t *base, vuint32mf2_t bindex, vuint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_u64m1x8(uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_f16mf4x8_m(vbool64_t mask, float16_t *base, vuint32mf2_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_f16mf4x8_m(vbool64_t vm, float16_t *rs1, vuint32mf2_t vs2, vfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_f16mf2x8_m(vbool32_t mask, float16_t *base, vuint32m1_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_f16mf2x8_m(vbool32_t vm, float16_t *rs1, vuint32m1_t vs2, vfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_f16m1x8_m(vbool16_t mask, float16_t *base, vuint32m2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_f16m1x8_m(vbool16_t vm, float16_t *rs1, vuint32m2_t vs2, vfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_f32mf2x8_m(vbool64_t mask, float32_t *base, vuint32mf2_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_f32mf2x8_m(vbool64_t vm, float32_t *rs1, vuint32mf2_t vs2, vfloat32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_f32m1x8_m(vbool32_t mask, float32_t *base, vuint32m1_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_f32m1x8_m(vbool32_t vm, float32_t *rs1, vuint32m1_t vs2, vfloat32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_f64m1x8_m(vbool64_t mask, float64_t *base, vuint32mf2_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_f64m1x8_m(vbool64_t vm, float64_t *rs1, vuint32mf2_t vs2, vfloat64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_i8mf8x8_m(vbool64_t vm, int8_t *rs1, vuint32mf2_t vs2, vint8mf8x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_i8mf4x8_m(vbool32_t vm, int8_t *rs1, vuint32m1_t vs2, vint8mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_i8mf2x8_m(vbool16_t vm, int8_t *rs1, vuint32m2_t vs2, vint8mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_i8m1x8_m(vbool8_t vm, int8_t *rs1, vuint32m4_t vs2, vint8m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_i16mf4x8_m(vbool64_t vm, int16_t *rs1, vuint32mf2_t vs2, vint16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_i16mf2x8_m(vbool32_t vm, int16_t *rs1, vuint32m1_t vs2, vint16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_i16m1x8_m(vbool16_t vm, int16_t *rs1, vuint32m2_t vs2, vint16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_i32mf2x8_m(vbool64_t vm, int32_t *rs1, vuint32mf2_t vs2, vint32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_i32m1x8_m(vbool32_t vm, int32_t *rs1, vuint32m1_t vs2, vint32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_i64m1x8_m(vbool64_t vm, int64_t *rs1, vuint32mf2_t vs2, vint64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_u8mf8x8_m(vbool64_t vm, uint8_t *rs1, vuint32mf2_t vs2, vuint8mf8x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_u8mf4x8_m(vbool32_t vm, uint8_t *rs1, vuint32m1_t vs2, vuint8mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_u8mf2x8_m(vbool16_t vm, uint8_t *rs1, vuint32m2_t vs2, vuint8mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_u8m1x8_m(vbool8_t vm, uint8_t *rs1, vuint32m4_t vs2, vuint8m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_u16mf4x8_m(vbool64_t vm, uint16_t *rs1, vuint32mf2_t vs2, vuint16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_u16mf2x8_m(vbool32_t vm, uint16_t *rs1, vuint32m1_t vs2, vuint16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_u16m1x8_m(vbool16_t vm, uint16_t *rs1, vuint32m2_t vs2, vuint16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_u32mf2x8_m(vbool64_t vm, uint32_t *rs1, vuint32mf2_t vs2, vuint32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_u32m1x8_m(vbool32_t vm, uint32_t *rs1, vuint32m1_t vs2, vuint32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei32_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei32_v_u64m1x8_m(vbool64_t vm, uint64_t *rs1, vuint32mf2_t vs2, vuint64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei32(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg8ei32\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsuxseg8ei64.c b/auto-generated/gnu-overloaded-tests/vsuxseg8ei64.c index d956dbe7c..8275a7b62 100644 --- a/auto-generated/gnu-overloaded-tests/vsuxseg8ei64.c +++ b/auto-generated/gnu-overloaded-tests/vsuxseg8ei64.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg8ei64_v_f16mf4x8(float16_t *base, vuint64m1_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_f16mf4x8(float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_f16mf2x8(float16_t *base, vuint64m2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_f16mf2x8(float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_f16m1x8(float16_t *base, vuint64m4_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_f16m1x8(float16_t *rs1, vuint64m4_t vs2, vfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_f32mf2x8(float32_t *base, vuint64m1_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_f32mf2x8(float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_f32m1x8(float32_t *base, vuint64m2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_f32m1x8(float32_t *rs1, vuint64m2_t vs2, vfloat32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_f64m1x8(float64_t *base, vuint64m1_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_f64m1x8(float64_t *rs1, vuint64m1_t vs2, vfloat64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_i8mf8x8(int8_t *base, vuint64m1_t bindex, vint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_i8mf8x8(int8_t *rs1, vuint64m1_t vs2, vint8mf8x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_i8mf4x8(int8_t *base, vuint64m2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_i8mf4x8(int8_t *rs1, vuint64m2_t vs2, vint8mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_i8mf2x8(int8_t *base, vuint64m4_t bindex, vint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_i8mf2x8(int8_t *rs1, vuint64m4_t vs2, vint8mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_i8m1x8(int8_t *base, vuint64m8_t bindex, vint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_i8m1x8(int8_t *rs1, vuint64m8_t vs2, vint8m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_i16mf4x8(int16_t *base, vuint64m1_t bindex, vint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_i16mf4x8(int16_t *rs1, vuint64m1_t vs2, vint16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_i16mf2x8(int16_t *base, vuint64m2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_i16mf2x8(int16_t *rs1, vuint64m2_t vs2, vint16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_i16m1x8(int16_t *base, vuint64m4_t bindex, vint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_i16m1x8(int16_t *rs1, vuint64m4_t vs2, vint16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_i32mf2x8(int32_t *base, vuint64m1_t bindex, vint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_i32mf2x8(int32_t *rs1, vuint64m1_t vs2, vint32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_i32m1x8(int32_t *base, vuint64m2_t bindex, vint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_i32m1x8(int32_t *rs1, vuint64m2_t vs2, vint32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_i64m1x8(int64_t *base, vuint64m1_t bindex, vint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_i64m1x8(int64_t *rs1, vuint64m1_t vs2, vint64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_u8mf8x8(uint8_t *base, vuint64m1_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_u8mf8x8(uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_u8mf4x8(uint8_t *base, vuint64m2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_u8mf4x8(uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_u8mf2x8(uint8_t *base, vuint64m4_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_u8mf2x8(uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_u8m1x8(uint8_t *base, vuint64m8_t bindex, vuint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_u8m1x8(uint8_t *rs1, vuint64m8_t vs2, vuint8m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_u16mf4x8(uint16_t *base, vuint64m1_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_u16mf4x8(uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_u16mf2x8(uint16_t *base, vuint64m2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_u16mf2x8(uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_u16m1x8(uint16_t *base, vuint64m4_t bindex, vuint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_u16m1x8(uint16_t *rs1, vuint64m4_t vs2, vuint16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_u32mf2x8(uint32_t *base, vuint64m1_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_u32mf2x8(uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_u32m1x8(uint32_t *base, vuint64m2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_u32m1x8(uint32_t *rs1, vuint64m2_t vs2, vuint32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_u64m1x8(uint64_t *base, vuint64m1_t bindex, vuint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_u64m1x8(uint64_t *rs1, vuint64m1_t vs2, vuint64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_f16mf4x8_m(vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_f16mf4x8_m(vbool64_t vm, float16_t *rs1, vuint64m1_t vs2, vfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_f16mf2x8_m(vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_f16mf2x8_m(vbool32_t vm, float16_t *rs1, vuint64m2_t vs2, vfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_f16m1x8_m(vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_f16m1x8_m(vbool16_t vm, float16_t *rs1, vuint64m4_t vs2, vfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_f32mf2x8_m(vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_f32mf2x8_m(vbool64_t vm, float32_t *rs1, vuint64m1_t vs2, vfloat32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_f32m1x8_m(vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_f32m1x8_m(vbool32_t vm, float32_t *rs1, vuint64m2_t vs2, vfloat32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_f64m1x8_m(vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_f64m1x8_m(vbool64_t vm, float64_t *rs1, vuint64m1_t vs2, vfloat64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_i8mf8x8_m(vbool64_t vm, int8_t *rs1, vuint64m1_t vs2, vint8mf8x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_i8mf4x8_m(vbool32_t vm, int8_t *rs1, vuint64m2_t vs2, vint8mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_i8mf2x8_m(vbool16_t vm, int8_t *rs1, vuint64m4_t vs2, vint8mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_i8m1x8_m(vbool8_t vm, int8_t *rs1, vuint64m8_t vs2, vint8m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_i16mf4x8_m(vbool64_t vm, int16_t *rs1, vuint64m1_t vs2, vint16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_i16mf2x8_m(vbool32_t vm, int16_t *rs1, vuint64m2_t vs2, vint16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_i16m1x8_m(vbool16_t vm, int16_t *rs1, vuint64m4_t vs2, vint16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_i32mf2x8_m(vbool64_t vm, int32_t *rs1, vuint64m1_t vs2, vint32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_i32m1x8_m(vbool32_t vm, int32_t *rs1, vuint64m2_t vs2, vint32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_i64m1x8_m(vbool64_t vm, int64_t *rs1, vuint64m1_t vs2, vint64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_u8mf8x8_m(vbool64_t vm, uint8_t *rs1, vuint64m1_t vs2, vuint8mf8x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_u8mf4x8_m(vbool32_t vm, uint8_t *rs1, vuint64m2_t vs2, vuint8mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_u8mf2x8_m(vbool16_t vm, uint8_t *rs1, vuint64m4_t vs2, vuint8mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_u8m1x8_m(vbool8_t vm, uint8_t *rs1, vuint64m8_t vs2, vuint8m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_u16mf4x8_m(vbool64_t vm, uint16_t *rs1, vuint64m1_t vs2, vuint16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_u16mf2x8_m(vbool32_t vm, uint16_t *rs1, vuint64m2_t vs2, vuint16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_u16m1x8_m(vbool16_t vm, uint16_t *rs1, vuint64m4_t vs2, vuint16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_u32mf2x8_m(vbool64_t vm, uint32_t *rs1, vuint64m1_t vs2, vuint32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_u32m1x8_m(vbool32_t vm, uint32_t *rs1, vuint64m2_t vs2, vuint32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei64_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei64_v_u64m1x8_m(vbool64_t vm, uint64_t *rs1, vuint64m1_t vs2, vuint64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei64(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg8ei64\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vsuxseg8ei8.c b/auto-generated/gnu-overloaded-tests/vsuxseg8ei8.c index b771d3054..5107cecbc 100644 --- a/auto-generated/gnu-overloaded-tests/vsuxseg8ei8.c +++ b/auto-generated/gnu-overloaded-tests/vsuxseg8ei8.c @@ -6,212 +6,212 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -void test_vsuxseg8ei8_v_f16mf4x8(float16_t *base, vuint8mf8_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_f16mf4x8(float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_f16mf2x8(float16_t *base, vuint8mf4_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_f16mf2x8(float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_f16m1x8(float16_t *base, vuint8mf2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_f16m1x8(float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_f32mf2x8(float32_t *base, vuint8mf8_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_f32mf2x8(float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_f32m1x8(float32_t *base, vuint8mf4_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_f32m1x8(float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_f64m1x8(float64_t *base, vuint8mf8_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_f64m1x8(float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_i8mf8x8(int8_t *base, vuint8mf8_t bindex, vint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_i8mf8x8(int8_t *rs1, vuint8mf8_t vs2, vint8mf8x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_i8mf4x8(int8_t *base, vuint8mf4_t bindex, vint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_i8mf4x8(int8_t *rs1, vuint8mf4_t vs2, vint8mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_i8mf2x8(int8_t *base, vuint8mf2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_i8mf2x8(int8_t *rs1, vuint8mf2_t vs2, vint8mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_i8m1x8(int8_t *base, vuint8m1_t bindex, vint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_i8m1x8(int8_t *rs1, vuint8m1_t vs2, vint8m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_i16mf4x8(int16_t *base, vuint8mf8_t bindex, vint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_i16mf4x8(int16_t *rs1, vuint8mf8_t vs2, vint16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_i16mf2x8(int16_t *base, vuint8mf4_t bindex, vint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_i16mf2x8(int16_t *rs1, vuint8mf4_t vs2, vint16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_i16m1x8(int16_t *base, vuint8mf2_t bindex, vint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_i16m1x8(int16_t *rs1, vuint8mf2_t vs2, vint16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_i32mf2x8(int32_t *base, vuint8mf8_t bindex, vint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_i32mf2x8(int32_t *rs1, vuint8mf8_t vs2, vint32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_i32m1x8(int32_t *base, vuint8mf4_t bindex, vint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_i32m1x8(int32_t *rs1, vuint8mf4_t vs2, vint32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_i64m1x8(int64_t *base, vuint8mf8_t bindex, vint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_i64m1x8(int64_t *rs1, vuint8mf8_t vs2, vint64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_u8mf8x8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_u8mf8x8(uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_u8mf4x8(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_u8mf4x8(uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_u8mf2x8(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_u8mf2x8(uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_u8m1x8(uint8_t *base, vuint8m1_t bindex, vuint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_u8m1x8(uint8_t *rs1, vuint8m1_t vs2, vuint8m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_u16mf4x8(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_u16mf4x8(uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_u16mf2x8(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_u16mf2x8(uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_u16m1x8(uint16_t *base, vuint8mf2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_u16m1x8(uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_u32mf2x8(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_u32mf2x8(uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_u32m1x8(uint32_t *base, vuint8mf4_t bindex, vuint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_u32m1x8(uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_u64m1x8(uint64_t *base, vuint8mf8_t bindex, vuint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_u64m1x8(uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_f16mf4x8_m(vbool64_t mask, float16_t *base, vuint8mf8_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_f16mf4x8_m(vbool64_t vm, float16_t *rs1, vuint8mf8_t vs2, vfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_f16mf2x8_m(vbool32_t mask, float16_t *base, vuint8mf4_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_f16mf2x8_m(vbool32_t vm, float16_t *rs1, vuint8mf4_t vs2, vfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_f16m1x8_m(vbool16_t mask, float16_t *base, vuint8mf2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_f16m1x8_m(vbool16_t vm, float16_t *rs1, vuint8mf2_t vs2, vfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_f32mf2x8_m(vbool64_t mask, float32_t *base, vuint8mf8_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_f32mf2x8_m(vbool64_t vm, float32_t *rs1, vuint8mf8_t vs2, vfloat32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_f32m1x8_m(vbool32_t mask, float32_t *base, vuint8mf4_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_f32m1x8_m(vbool32_t vm, float32_t *rs1, vuint8mf4_t vs2, vfloat32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_f64m1x8_m(vbool64_t mask, float64_t *base, vuint8mf8_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_f64m1x8_m(vbool64_t vm, float64_t *rs1, vuint8mf8_t vs2, vfloat64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_i8mf8x8_m(vbool64_t vm, int8_t *rs1, vuint8mf8_t vs2, vint8mf8x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_i8mf4x8_m(vbool32_t vm, int8_t *rs1, vuint8mf4_t vs2, vint8mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_i8mf2x8_m(vbool16_t vm, int8_t *rs1, vuint8mf2_t vs2, vint8mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_i8m1x8_m(vbool8_t vm, int8_t *rs1, vuint8m1_t vs2, vint8m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_i16mf4x8_m(vbool64_t vm, int16_t *rs1, vuint8mf8_t vs2, vint16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_i16mf2x8_m(vbool32_t vm, int16_t *rs1, vuint8mf4_t vs2, vint16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_i16m1x8_m(vbool16_t vm, int16_t *rs1, vuint8mf2_t vs2, vint16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_i32mf2x8_m(vbool64_t vm, int32_t *rs1, vuint8mf8_t vs2, vint32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_i32m1x8_m(vbool32_t vm, int32_t *rs1, vuint8mf4_t vs2, vint32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_i64m1x8_m(vbool64_t vm, int64_t *rs1, vuint8mf8_t vs2, vint64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_u8mf8x8_m(vbool64_t vm, uint8_t *rs1, vuint8mf8_t vs2, vuint8mf8x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_u8mf4x8_m(vbool32_t vm, uint8_t *rs1, vuint8mf4_t vs2, vuint8mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_u8mf2x8_m(vbool16_t vm, uint8_t *rs1, vuint8mf2_t vs2, vuint8mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_u8m1x8_m(vbool8_t vm, uint8_t *rs1, vuint8m1_t vs2, vuint8m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_u16mf4x8_m(vbool64_t vm, uint16_t *rs1, vuint8mf8_t vs2, vuint16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_u16mf2x8_m(vbool32_t vm, uint16_t *rs1, vuint8mf4_t vs2, vuint16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_u16m1x8_m(vbool16_t vm, uint16_t *rs1, vuint8mf2_t vs2, vuint16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_u32mf2x8_m(vbool64_t vm, uint32_t *rs1, vuint8mf8_t vs2, vuint32mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_u32m1x8_m(vbool32_t vm, uint32_t *rs1, vuint8mf4_t vs2, vuint32m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(vm, rs1, vs2, vs3, vl); } -void test_vsuxseg8ei8_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x8_t v_tuple, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); +void test_vsuxseg8ei8_v_u64m1x8_m(vbool64_t vm, uint64_t *rs1, vuint8mf8_t vs2, vuint64m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei8(vm, rs1, vs2, vs3, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsuxseg8ei8\.[ivxfswum.]+\s+} 52 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vwadd.c b/auto-generated/gnu-overloaded-tests/vwadd.c index 38bfcf62a..ca4904747 100644 --- a/auto-generated/gnu-overloaded-tests/vwadd.c +++ b/auto-generated/gnu-overloaded-tests/vwadd.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint16mf4_t test_vwadd_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwadd_vv(op1, op2, vl); +vint16mf4_t test_vwadd_vv_i16mf4(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwadd_vv(vs2, vs1, vl); } -vint16mf4_t test_vwadd_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx(op1, op2, vl); +vint16mf4_t test_vwadd_vx_i16mf4(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx(vs2, rs1, vl); } -vint16mf4_t test_vwadd_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwadd_wv(op1, op2, vl); +vint16mf4_t test_vwadd_wv_i16mf4(vint16mf4_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwadd_wv(vs2, vs1, vl); } -vint16mf4_t test_vwadd_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx(op1, op2, vl); +vint16mf4_t test_vwadd_wx_i16mf4(vint16mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx(vs2, rs1, vl); } -vint16mf2_t test_vwadd_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwadd_vv(op1, op2, vl); +vint16mf2_t test_vwadd_vv_i16mf2(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwadd_vv(vs2, vs1, vl); } -vint16mf2_t test_vwadd_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx(op1, op2, vl); +vint16mf2_t test_vwadd_vx_i16mf2(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx(vs2, rs1, vl); } -vint16mf2_t test_vwadd_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwadd_wv(op1, op2, vl); +vint16mf2_t test_vwadd_wv_i16mf2(vint16mf2_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwadd_wv(vs2, vs1, vl); } -vint16mf2_t test_vwadd_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx(op1, op2, vl); +vint16mf2_t test_vwadd_wx_i16mf2(vint16mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx(vs2, rs1, vl); } -vint16m1_t test_vwadd_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwadd_vv(op1, op2, vl); +vint16m1_t test_vwadd_vv_i16m1(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwadd_vv(vs2, vs1, vl); } -vint16m1_t test_vwadd_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx(op1, op2, vl); +vint16m1_t test_vwadd_vx_i16m1(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx(vs2, rs1, vl); } -vint16m1_t test_vwadd_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwadd_wv(op1, op2, vl); +vint16m1_t test_vwadd_wv_i16m1(vint16m1_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwadd_wv(vs2, vs1, vl); } -vint16m1_t test_vwadd_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx(op1, op2, vl); +vint16m1_t test_vwadd_wx_i16m1(vint16m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx(vs2, rs1, vl); } -vint16m2_t test_vwadd_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwadd_vv(op1, op2, vl); +vint16m2_t test_vwadd_vv_i16m2(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwadd_vv(vs2, vs1, vl); } -vint16m2_t test_vwadd_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx(op1, op2, vl); +vint16m2_t test_vwadd_vx_i16m2(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx(vs2, rs1, vl); } -vint16m2_t test_vwadd_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwadd_wv(op1, op2, vl); +vint16m2_t test_vwadd_wv_i16m2(vint16m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwadd_wv(vs2, vs1, vl); } -vint16m2_t test_vwadd_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx(op1, op2, vl); +vint16m2_t test_vwadd_wx_i16m2(vint16m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx(vs2, rs1, vl); } -vint16m4_t test_vwadd_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwadd_vv(op1, op2, vl); +vint16m4_t test_vwadd_vv_i16m4(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwadd_vv(vs2, vs1, vl); } -vint16m4_t test_vwadd_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx(op1, op2, vl); +vint16m4_t test_vwadd_vx_i16m4(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx(vs2, rs1, vl); } -vint16m4_t test_vwadd_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwadd_wv(op1, op2, vl); +vint16m4_t test_vwadd_wv_i16m4(vint16m4_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwadd_wv(vs2, vs1, vl); } -vint16m4_t test_vwadd_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx(op1, op2, vl); +vint16m4_t test_vwadd_wx_i16m4(vint16m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx(vs2, rs1, vl); } -vint16m8_t test_vwadd_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwadd_vv(op1, op2, vl); +vint16m8_t test_vwadd_vv_i16m8(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwadd_vv(vs2, vs1, vl); } -vint16m8_t test_vwadd_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx(op1, op2, vl); +vint16m8_t test_vwadd_vx_i16m8(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx(vs2, rs1, vl); } -vint16m8_t test_vwadd_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwadd_wv(op1, op2, vl); +vint16m8_t test_vwadd_wv_i16m8(vint16m8_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwadd_wv(vs2, vs1, vl); } -vint16m8_t test_vwadd_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx(op1, op2, vl); +vint16m8_t test_vwadd_wx_i16m8(vint16m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx(vs2, rs1, vl); } -vint32mf2_t test_vwadd_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwadd_vv(op1, op2, vl); +vint32mf2_t test_vwadd_vv_i32mf2(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwadd_vv(vs2, vs1, vl); } -vint32mf2_t test_vwadd_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx(op1, op2, vl); +vint32mf2_t test_vwadd_vx_i32mf2(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx(vs2, rs1, vl); } -vint32mf2_t test_vwadd_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwadd_wv(op1, op2, vl); +vint32mf2_t test_vwadd_wv_i32mf2(vint32mf2_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwadd_wv(vs2, vs1, vl); } -vint32mf2_t test_vwadd_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx(op1, op2, vl); +vint32mf2_t test_vwadd_wx_i32mf2(vint32mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx(vs2, rs1, vl); } -vint32m1_t test_vwadd_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwadd_vv(op1, op2, vl); +vint32m1_t test_vwadd_vv_i32m1(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwadd_vv(vs2, vs1, vl); } -vint32m1_t test_vwadd_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx(op1, op2, vl); +vint32m1_t test_vwadd_vx_i32m1(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx(vs2, rs1, vl); } -vint32m1_t test_vwadd_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwadd_wv(op1, op2, vl); +vint32m1_t test_vwadd_wv_i32m1(vint32m1_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwadd_wv(vs2, vs1, vl); } -vint32m1_t test_vwadd_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx(op1, op2, vl); +vint32m1_t test_vwadd_wx_i32m1(vint32m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx(vs2, rs1, vl); } -vint32m2_t test_vwadd_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwadd_vv(op1, op2, vl); +vint32m2_t test_vwadd_vv_i32m2(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwadd_vv(vs2, vs1, vl); } -vint32m2_t test_vwadd_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx(op1, op2, vl); +vint32m2_t test_vwadd_vx_i32m2(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx(vs2, rs1, vl); } -vint32m2_t test_vwadd_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwadd_wv(op1, op2, vl); +vint32m2_t test_vwadd_wv_i32m2(vint32m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwadd_wv(vs2, vs1, vl); } -vint32m2_t test_vwadd_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx(op1, op2, vl); +vint32m2_t test_vwadd_wx_i32m2(vint32m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx(vs2, rs1, vl); } -vint32m4_t test_vwadd_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwadd_vv(op1, op2, vl); +vint32m4_t test_vwadd_vv_i32m4(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwadd_vv(vs2, vs1, vl); } -vint32m4_t test_vwadd_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx(op1, op2, vl); +vint32m4_t test_vwadd_vx_i32m4(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx(vs2, rs1, vl); } -vint32m4_t test_vwadd_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwadd_wv(op1, op2, vl); +vint32m4_t test_vwadd_wv_i32m4(vint32m4_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwadd_wv(vs2, vs1, vl); } -vint32m4_t test_vwadd_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx(op1, op2, vl); +vint32m4_t test_vwadd_wx_i32m4(vint32m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx(vs2, rs1, vl); } -vint32m8_t test_vwadd_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwadd_vv(op1, op2, vl); +vint32m8_t test_vwadd_vv_i32m8(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwadd_vv(vs2, vs1, vl); } -vint32m8_t test_vwadd_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx(op1, op2, vl); +vint32m8_t test_vwadd_vx_i32m8(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx(vs2, rs1, vl); } -vint32m8_t test_vwadd_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwadd_wv(op1, op2, vl); +vint32m8_t test_vwadd_wv_i32m8(vint32m8_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwadd_wv(vs2, vs1, vl); } -vint32m8_t test_vwadd_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx(op1, op2, vl); +vint32m8_t test_vwadd_wx_i32m8(vint32m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx(vs2, rs1, vl); } -vint64m1_t test_vwadd_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwadd_vv(op1, op2, vl); +vint64m1_t test_vwadd_vv_i64m1(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwadd_vv(vs2, vs1, vl); } -vint64m1_t test_vwadd_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx(op1, op2, vl); +vint64m1_t test_vwadd_vx_i64m1(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx(vs2, rs1, vl); } -vint64m1_t test_vwadd_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwadd_wv(op1, op2, vl); +vint64m1_t test_vwadd_wv_i64m1(vint64m1_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwadd_wv(vs2, vs1, vl); } -vint64m1_t test_vwadd_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx(op1, op2, vl); +vint64m1_t test_vwadd_wx_i64m1(vint64m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx(vs2, rs1, vl); } -vint64m2_t test_vwadd_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwadd_vv(op1, op2, vl); +vint64m2_t test_vwadd_vv_i64m2(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwadd_vv(vs2, vs1, vl); } -vint64m2_t test_vwadd_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx(op1, op2, vl); +vint64m2_t test_vwadd_vx_i64m2(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx(vs2, rs1, vl); } -vint64m2_t test_vwadd_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwadd_wv(op1, op2, vl); +vint64m2_t test_vwadd_wv_i64m2(vint64m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwadd_wv(vs2, vs1, vl); } -vint64m2_t test_vwadd_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx(op1, op2, vl); +vint64m2_t test_vwadd_wx_i64m2(vint64m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx(vs2, rs1, vl); } -vint64m4_t test_vwadd_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwadd_vv(op1, op2, vl); +vint64m4_t test_vwadd_vv_i64m4(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwadd_vv(vs2, vs1, vl); } -vint64m4_t test_vwadd_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx(op1, op2, vl); +vint64m4_t test_vwadd_vx_i64m4(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx(vs2, rs1, vl); } -vint64m4_t test_vwadd_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwadd_wv(op1, op2, vl); +vint64m4_t test_vwadd_wv_i64m4(vint64m4_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwadd_wv(vs2, vs1, vl); } -vint64m4_t test_vwadd_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx(op1, op2, vl); +vint64m4_t test_vwadd_wx_i64m4(vint64m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx(vs2, rs1, vl); } -vint64m8_t test_vwadd_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwadd_vv(op1, op2, vl); +vint64m8_t test_vwadd_vv_i64m8(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwadd_vv(vs2, vs1, vl); } -vint64m8_t test_vwadd_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx(op1, op2, vl); +vint64m8_t test_vwadd_vx_i64m8(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx(vs2, rs1, vl); } -vint64m8_t test_vwadd_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwadd_wv(op1, op2, vl); +vint64m8_t test_vwadd_wv_i64m8(vint64m8_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwadd_wv(vs2, vs1, vl); } -vint64m8_t test_vwadd_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx(op1, op2, vl); +vint64m8_t test_vwadd_wx_i64m8(vint64m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx(vs2, rs1, vl); } -vint16mf4_t test_vwadd_vv_i16mf4_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwadd_vv(mask, op1, op2, vl); +vint16mf4_t test_vwadd_vv_i16mf4_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwadd_vv(vm, vs2, vs1, vl); } -vint16mf4_t test_vwadd_vx_i16mf4_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx(mask, op1, op2, vl); +vint16mf4_t test_vwadd_vx_i16mf4_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx(vm, vs2, rs1, vl); } -vint16mf4_t test_vwadd_wv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwadd_wv(mask, op1, op2, vl); +vint16mf4_t test_vwadd_wv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwadd_wv(vm, vs2, vs1, vl); } -vint16mf4_t test_vwadd_wx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx(mask, op1, op2, vl); +vint16mf4_t test_vwadd_wx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx(vm, vs2, rs1, vl); } -vint16mf2_t test_vwadd_vv_i16mf2_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwadd_vv(mask, op1, op2, vl); +vint16mf2_t test_vwadd_vv_i16mf2_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwadd_vv(vm, vs2, vs1, vl); } -vint16mf2_t test_vwadd_vx_i16mf2_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx(mask, op1, op2, vl); +vint16mf2_t test_vwadd_vx_i16mf2_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx(vm, vs2, rs1, vl); } -vint16mf2_t test_vwadd_wv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwadd_wv(mask, op1, op2, vl); +vint16mf2_t test_vwadd_wv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwadd_wv(vm, vs2, vs1, vl); } -vint16mf2_t test_vwadd_wx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx(mask, op1, op2, vl); +vint16mf2_t test_vwadd_wx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx(vm, vs2, rs1, vl); } -vint16m1_t test_vwadd_vv_i16m1_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwadd_vv(mask, op1, op2, vl); +vint16m1_t test_vwadd_vv_i16m1_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwadd_vv(vm, vs2, vs1, vl); } -vint16m1_t test_vwadd_vx_i16m1_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx(mask, op1, op2, vl); +vint16m1_t test_vwadd_vx_i16m1_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx(vm, vs2, rs1, vl); } -vint16m1_t test_vwadd_wv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwadd_wv(mask, op1, op2, vl); +vint16m1_t test_vwadd_wv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwadd_wv(vm, vs2, vs1, vl); } -vint16m1_t test_vwadd_wx_i16m1_m(vbool16_t mask, vint16m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx(mask, op1, op2, vl); +vint16m1_t test_vwadd_wx_i16m1_m(vbool16_t vm, vint16m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx(vm, vs2, rs1, vl); } -vint16m2_t test_vwadd_vv_i16m2_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwadd_vv(mask, op1, op2, vl); +vint16m2_t test_vwadd_vv_i16m2_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwadd_vv(vm, vs2, vs1, vl); } -vint16m2_t test_vwadd_vx_i16m2_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx(mask, op1, op2, vl); +vint16m2_t test_vwadd_vx_i16m2_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx(vm, vs2, rs1, vl); } -vint16m2_t test_vwadd_wv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwadd_wv(mask, op1, op2, vl); +vint16m2_t test_vwadd_wv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwadd_wv(vm, vs2, vs1, vl); } -vint16m2_t test_vwadd_wx_i16m2_m(vbool8_t mask, vint16m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx(mask, op1, op2, vl); +vint16m2_t test_vwadd_wx_i16m2_m(vbool8_t vm, vint16m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx(vm, vs2, rs1, vl); } -vint16m4_t test_vwadd_vv_i16m4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwadd_vv(mask, op1, op2, vl); +vint16m4_t test_vwadd_vv_i16m4_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwadd_vv(vm, vs2, vs1, vl); } -vint16m4_t test_vwadd_vx_i16m4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx(mask, op1, op2, vl); +vint16m4_t test_vwadd_vx_i16m4_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx(vm, vs2, rs1, vl); } -vint16m4_t test_vwadd_wv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwadd_wv(mask, op1, op2, vl); +vint16m4_t test_vwadd_wv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwadd_wv(vm, vs2, vs1, vl); } -vint16m4_t test_vwadd_wx_i16m4_m(vbool4_t mask, vint16m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx(mask, op1, op2, vl); +vint16m4_t test_vwadd_wx_i16m4_m(vbool4_t vm, vint16m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx(vm, vs2, rs1, vl); } -vint16m8_t test_vwadd_vv_i16m8_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwadd_vv(mask, op1, op2, vl); +vint16m8_t test_vwadd_vv_i16m8_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwadd_vv(vm, vs2, vs1, vl); } -vint16m8_t test_vwadd_vx_i16m8_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx(mask, op1, op2, vl); +vint16m8_t test_vwadd_vx_i16m8_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx(vm, vs2, rs1, vl); } -vint16m8_t test_vwadd_wv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwadd_wv(mask, op1, op2, vl); +vint16m8_t test_vwadd_wv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwadd_wv(vm, vs2, vs1, vl); } -vint16m8_t test_vwadd_wx_i16m8_m(vbool2_t mask, vint16m8_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx(mask, op1, op2, vl); +vint16m8_t test_vwadd_wx_i16m8_m(vbool2_t vm, vint16m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx(vm, vs2, rs1, vl); } -vint32mf2_t test_vwadd_vv_i32mf2_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwadd_vv(mask, op1, op2, vl); +vint32mf2_t test_vwadd_vv_i32mf2_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwadd_vv(vm, vs2, vs1, vl); } -vint32mf2_t test_vwadd_vx_i32mf2_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx(mask, op1, op2, vl); +vint32mf2_t test_vwadd_vx_i32mf2_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx(vm, vs2, rs1, vl); } -vint32mf2_t test_vwadd_wv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwadd_wv(mask, op1, op2, vl); +vint32mf2_t test_vwadd_wv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwadd_wv(vm, vs2, vs1, vl); } -vint32mf2_t test_vwadd_wx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx(mask, op1, op2, vl); +vint32mf2_t test_vwadd_wx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx(vm, vs2, rs1, vl); } -vint32m1_t test_vwadd_vv_i32m1_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwadd_vv(mask, op1, op2, vl); +vint32m1_t test_vwadd_vv_i32m1_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwadd_vv(vm, vs2, vs1, vl); } -vint32m1_t test_vwadd_vx_i32m1_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx(mask, op1, op2, vl); +vint32m1_t test_vwadd_vx_i32m1_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx(vm, vs2, rs1, vl); } -vint32m1_t test_vwadd_wv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwadd_wv(mask, op1, op2, vl); +vint32m1_t test_vwadd_wv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwadd_wv(vm, vs2, vs1, vl); } -vint32m1_t test_vwadd_wx_i32m1_m(vbool32_t mask, vint32m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx(mask, op1, op2, vl); +vint32m1_t test_vwadd_wx_i32m1_m(vbool32_t vm, vint32m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx(vm, vs2, rs1, vl); } -vint32m2_t test_vwadd_vv_i32m2_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwadd_vv(mask, op1, op2, vl); +vint32m2_t test_vwadd_vv_i32m2_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwadd_vv(vm, vs2, vs1, vl); } -vint32m2_t test_vwadd_vx_i32m2_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx(mask, op1, op2, vl); +vint32m2_t test_vwadd_vx_i32m2_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx(vm, vs2, rs1, vl); } -vint32m2_t test_vwadd_wv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwadd_wv(mask, op1, op2, vl); +vint32m2_t test_vwadd_wv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwadd_wv(vm, vs2, vs1, vl); } -vint32m2_t test_vwadd_wx_i32m2_m(vbool16_t mask, vint32m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx(mask, op1, op2, vl); +vint32m2_t test_vwadd_wx_i32m2_m(vbool16_t vm, vint32m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx(vm, vs2, rs1, vl); } -vint32m4_t test_vwadd_vv_i32m4_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwadd_vv(mask, op1, op2, vl); +vint32m4_t test_vwadd_vv_i32m4_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwadd_vv(vm, vs2, vs1, vl); } -vint32m4_t test_vwadd_vx_i32m4_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx(mask, op1, op2, vl); +vint32m4_t test_vwadd_vx_i32m4_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx(vm, vs2, rs1, vl); } -vint32m4_t test_vwadd_wv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwadd_wv(mask, op1, op2, vl); +vint32m4_t test_vwadd_wv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwadd_wv(vm, vs2, vs1, vl); } -vint32m4_t test_vwadd_wx_i32m4_m(vbool8_t mask, vint32m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx(mask, op1, op2, vl); +vint32m4_t test_vwadd_wx_i32m4_m(vbool8_t vm, vint32m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx(vm, vs2, rs1, vl); } -vint32m8_t test_vwadd_vv_i32m8_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwadd_vv(mask, op1, op2, vl); +vint32m8_t test_vwadd_vv_i32m8_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwadd_vv(vm, vs2, vs1, vl); } -vint32m8_t test_vwadd_vx_i32m8_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx(mask, op1, op2, vl); +vint32m8_t test_vwadd_vx_i32m8_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx(vm, vs2, rs1, vl); } -vint32m8_t test_vwadd_wv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwadd_wv(mask, op1, op2, vl); +vint32m8_t test_vwadd_wv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwadd_wv(vm, vs2, vs1, vl); } -vint32m8_t test_vwadd_wx_i32m8_m(vbool4_t mask, vint32m8_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx(mask, op1, op2, vl); +vint32m8_t test_vwadd_wx_i32m8_m(vbool4_t vm, vint32m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx(vm, vs2, rs1, vl); } -vint64m1_t test_vwadd_vv_i64m1_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwadd_vv(mask, op1, op2, vl); +vint64m1_t test_vwadd_vv_i64m1_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwadd_vv(vm, vs2, vs1, vl); } -vint64m1_t test_vwadd_vx_i64m1_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx(mask, op1, op2, vl); +vint64m1_t test_vwadd_vx_i64m1_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx(vm, vs2, rs1, vl); } -vint64m1_t test_vwadd_wv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwadd_wv(mask, op1, op2, vl); +vint64m1_t test_vwadd_wv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwadd_wv(vm, vs2, vs1, vl); } -vint64m1_t test_vwadd_wx_i64m1_m(vbool64_t mask, vint64m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx(mask, op1, op2, vl); +vint64m1_t test_vwadd_wx_i64m1_m(vbool64_t vm, vint64m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx(vm, vs2, rs1, vl); } -vint64m2_t test_vwadd_vv_i64m2_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwadd_vv(mask, op1, op2, vl); +vint64m2_t test_vwadd_vv_i64m2_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwadd_vv(vm, vs2, vs1, vl); } -vint64m2_t test_vwadd_vx_i64m2_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx(mask, op1, op2, vl); +vint64m2_t test_vwadd_vx_i64m2_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx(vm, vs2, rs1, vl); } -vint64m2_t test_vwadd_wv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwadd_wv(mask, op1, op2, vl); +vint64m2_t test_vwadd_wv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwadd_wv(vm, vs2, vs1, vl); } -vint64m2_t test_vwadd_wx_i64m2_m(vbool32_t mask, vint64m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx(mask, op1, op2, vl); +vint64m2_t test_vwadd_wx_i64m2_m(vbool32_t vm, vint64m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx(vm, vs2, rs1, vl); } -vint64m4_t test_vwadd_vv_i64m4_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwadd_vv(mask, op1, op2, vl); +vint64m4_t test_vwadd_vv_i64m4_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwadd_vv(vm, vs2, vs1, vl); } -vint64m4_t test_vwadd_vx_i64m4_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx(mask, op1, op2, vl); +vint64m4_t test_vwadd_vx_i64m4_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx(vm, vs2, rs1, vl); } -vint64m4_t test_vwadd_wv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwadd_wv(mask, op1, op2, vl); +vint64m4_t test_vwadd_wv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwadd_wv(vm, vs2, vs1, vl); } -vint64m4_t test_vwadd_wx_i64m4_m(vbool16_t mask, vint64m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx(mask, op1, op2, vl); +vint64m4_t test_vwadd_wx_i64m4_m(vbool16_t vm, vint64m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx(vm, vs2, rs1, vl); } -vint64m8_t test_vwadd_vv_i64m8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwadd_vv(mask, op1, op2, vl); +vint64m8_t test_vwadd_vv_i64m8_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwadd_vv(vm, vs2, vs1, vl); } -vint64m8_t test_vwadd_vx_i64m8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx(mask, op1, op2, vl); +vint64m8_t test_vwadd_vx_i64m8_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx(vm, vs2, rs1, vl); } -vint64m8_t test_vwadd_wv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwadd_wv(mask, op1, op2, vl); +vint64m8_t test_vwadd_wv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwadd_wv(vm, vs2, vs1, vl); } -vint64m8_t test_vwadd_wx_i64m8_m(vbool8_t mask, vint64m8_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx(mask, op1, op2, vl); +vint64m8_t test_vwadd_wx_i64m8_m(vbool8_t vm, vint64m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+v[w]?add\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vwaddu.c b/auto-generated/gnu-overloaded-tests/vwaddu.c index a1a40a126..4384a9db3 100644 --- a/auto-generated/gnu-overloaded-tests/vwaddu.c +++ b/auto-generated/gnu-overloaded-tests/vwaddu.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint16mf4_t test_vwaddu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwaddu_vv(op1, op2, vl); +vuint16mf4_t test_vwaddu_vv_u16mf4(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwaddu_vv(vs2, vs1, vl); } -vuint16mf4_t test_vwaddu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx(op1, op2, vl); +vuint16mf4_t test_vwaddu_vx_u16mf4(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx(vs2, rs1, vl); } -vuint16mf4_t test_vwaddu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwaddu_wv(op1, op2, vl); +vuint16mf4_t test_vwaddu_wv_u16mf4(vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwaddu_wv(vs2, vs1, vl); } -vuint16mf4_t test_vwaddu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx(op1, op2, vl); +vuint16mf4_t test_vwaddu_wx_u16mf4(vuint16mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx(vs2, rs1, vl); } -vuint16mf2_t test_vwaddu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwaddu_vv(op1, op2, vl); +vuint16mf2_t test_vwaddu_vv_u16mf2(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwaddu_vv(vs2, vs1, vl); } -vuint16mf2_t test_vwaddu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx(op1, op2, vl); +vuint16mf2_t test_vwaddu_vx_u16mf2(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx(vs2, rs1, vl); } -vuint16mf2_t test_vwaddu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwaddu_wv(op1, op2, vl); +vuint16mf2_t test_vwaddu_wv_u16mf2(vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwaddu_wv(vs2, vs1, vl); } -vuint16mf2_t test_vwaddu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx(op1, op2, vl); +vuint16mf2_t test_vwaddu_wx_u16mf2(vuint16mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx(vs2, rs1, vl); } -vuint16m1_t test_vwaddu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwaddu_vv(op1, op2, vl); +vuint16m1_t test_vwaddu_vv_u16m1(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwaddu_vv(vs2, vs1, vl); } -vuint16m1_t test_vwaddu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx(op1, op2, vl); +vuint16m1_t test_vwaddu_vx_u16m1(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx(vs2, rs1, vl); } -vuint16m1_t test_vwaddu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwaddu_wv(op1, op2, vl); +vuint16m1_t test_vwaddu_wv_u16m1(vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwaddu_wv(vs2, vs1, vl); } -vuint16m1_t test_vwaddu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx(op1, op2, vl); +vuint16m1_t test_vwaddu_wx_u16m1(vuint16m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx(vs2, rs1, vl); } -vuint16m2_t test_vwaddu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwaddu_vv(op1, op2, vl); +vuint16m2_t test_vwaddu_vv_u16m2(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwaddu_vv(vs2, vs1, vl); } -vuint16m2_t test_vwaddu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx(op1, op2, vl); +vuint16m2_t test_vwaddu_vx_u16m2(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx(vs2, rs1, vl); } -vuint16m2_t test_vwaddu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwaddu_wv(op1, op2, vl); +vuint16m2_t test_vwaddu_wv_u16m2(vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwaddu_wv(vs2, vs1, vl); } -vuint16m2_t test_vwaddu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx(op1, op2, vl); +vuint16m2_t test_vwaddu_wx_u16m2(vuint16m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx(vs2, rs1, vl); } -vuint16m4_t test_vwaddu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwaddu_vv(op1, op2, vl); +vuint16m4_t test_vwaddu_vv_u16m4(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwaddu_vv(vs2, vs1, vl); } -vuint16m4_t test_vwaddu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx(op1, op2, vl); +vuint16m4_t test_vwaddu_vx_u16m4(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx(vs2, rs1, vl); } -vuint16m4_t test_vwaddu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwaddu_wv(op1, op2, vl); +vuint16m4_t test_vwaddu_wv_u16m4(vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwaddu_wv(vs2, vs1, vl); } -vuint16m4_t test_vwaddu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx(op1, op2, vl); +vuint16m4_t test_vwaddu_wx_u16m4(vuint16m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx(vs2, rs1, vl); } -vuint16m8_t test_vwaddu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwaddu_vv(op1, op2, vl); +vuint16m8_t test_vwaddu_vv_u16m8(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwaddu_vv(vs2, vs1, vl); } -vuint16m8_t test_vwaddu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx(op1, op2, vl); +vuint16m8_t test_vwaddu_vx_u16m8(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx(vs2, rs1, vl); } -vuint16m8_t test_vwaddu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwaddu_wv(op1, op2, vl); +vuint16m8_t test_vwaddu_wv_u16m8(vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwaddu_wv(vs2, vs1, vl); } -vuint16m8_t test_vwaddu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx(op1, op2, vl); +vuint16m8_t test_vwaddu_wx_u16m8(vuint16m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx(vs2, rs1, vl); } -vuint32mf2_t test_vwaddu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwaddu_vv(op1, op2, vl); +vuint32mf2_t test_vwaddu_vv_u32mf2(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwaddu_vv(vs2, vs1, vl); } -vuint32mf2_t test_vwaddu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx(op1, op2, vl); +vuint32mf2_t test_vwaddu_vx_u32mf2(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx(vs2, rs1, vl); } -vuint32mf2_t test_vwaddu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwaddu_wv(op1, op2, vl); +vuint32mf2_t test_vwaddu_wv_u32mf2(vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwaddu_wv(vs2, vs1, vl); } -vuint32mf2_t test_vwaddu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx(op1, op2, vl); +vuint32mf2_t test_vwaddu_wx_u32mf2(vuint32mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx(vs2, rs1, vl); } -vuint32m1_t test_vwaddu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwaddu_vv(op1, op2, vl); +vuint32m1_t test_vwaddu_vv_u32m1(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwaddu_vv(vs2, vs1, vl); } -vuint32m1_t test_vwaddu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx(op1, op2, vl); +vuint32m1_t test_vwaddu_vx_u32m1(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx(vs2, rs1, vl); } -vuint32m1_t test_vwaddu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwaddu_wv(op1, op2, vl); +vuint32m1_t test_vwaddu_wv_u32m1(vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwaddu_wv(vs2, vs1, vl); } -vuint32m1_t test_vwaddu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx(op1, op2, vl); +vuint32m1_t test_vwaddu_wx_u32m1(vuint32m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx(vs2, rs1, vl); } -vuint32m2_t test_vwaddu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwaddu_vv(op1, op2, vl); +vuint32m2_t test_vwaddu_vv_u32m2(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwaddu_vv(vs2, vs1, vl); } -vuint32m2_t test_vwaddu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx(op1, op2, vl); +vuint32m2_t test_vwaddu_vx_u32m2(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx(vs2, rs1, vl); } -vuint32m2_t test_vwaddu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwaddu_wv(op1, op2, vl); +vuint32m2_t test_vwaddu_wv_u32m2(vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwaddu_wv(vs2, vs1, vl); } -vuint32m2_t test_vwaddu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx(op1, op2, vl); +vuint32m2_t test_vwaddu_wx_u32m2(vuint32m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx(vs2, rs1, vl); } -vuint32m4_t test_vwaddu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwaddu_vv(op1, op2, vl); +vuint32m4_t test_vwaddu_vv_u32m4(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwaddu_vv(vs2, vs1, vl); } -vuint32m4_t test_vwaddu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx(op1, op2, vl); +vuint32m4_t test_vwaddu_vx_u32m4(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx(vs2, rs1, vl); } -vuint32m4_t test_vwaddu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwaddu_wv(op1, op2, vl); +vuint32m4_t test_vwaddu_wv_u32m4(vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwaddu_wv(vs2, vs1, vl); } -vuint32m4_t test_vwaddu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx(op1, op2, vl); +vuint32m4_t test_vwaddu_wx_u32m4(vuint32m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx(vs2, rs1, vl); } -vuint32m8_t test_vwaddu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwaddu_vv(op1, op2, vl); +vuint32m8_t test_vwaddu_vv_u32m8(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwaddu_vv(vs2, vs1, vl); } -vuint32m8_t test_vwaddu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx(op1, op2, vl); +vuint32m8_t test_vwaddu_vx_u32m8(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx(vs2, rs1, vl); } -vuint32m8_t test_vwaddu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwaddu_wv(op1, op2, vl); +vuint32m8_t test_vwaddu_wv_u32m8(vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwaddu_wv(vs2, vs1, vl); } -vuint32m8_t test_vwaddu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx(op1, op2, vl); +vuint32m8_t test_vwaddu_wx_u32m8(vuint32m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx(vs2, rs1, vl); } -vuint64m1_t test_vwaddu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwaddu_vv(op1, op2, vl); +vuint64m1_t test_vwaddu_vv_u64m1(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwaddu_vv(vs2, vs1, vl); } -vuint64m1_t test_vwaddu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx(op1, op2, vl); +vuint64m1_t test_vwaddu_vx_u64m1(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx(vs2, rs1, vl); } -vuint64m1_t test_vwaddu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwaddu_wv(op1, op2, vl); +vuint64m1_t test_vwaddu_wv_u64m1(vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwaddu_wv(vs2, vs1, vl); } -vuint64m1_t test_vwaddu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx(op1, op2, vl); +vuint64m1_t test_vwaddu_wx_u64m1(vuint64m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx(vs2, rs1, vl); } -vuint64m2_t test_vwaddu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwaddu_vv(op1, op2, vl); +vuint64m2_t test_vwaddu_vv_u64m2(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwaddu_vv(vs2, vs1, vl); } -vuint64m2_t test_vwaddu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx(op1, op2, vl); +vuint64m2_t test_vwaddu_vx_u64m2(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx(vs2, rs1, vl); } -vuint64m2_t test_vwaddu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwaddu_wv(op1, op2, vl); +vuint64m2_t test_vwaddu_wv_u64m2(vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwaddu_wv(vs2, vs1, vl); } -vuint64m2_t test_vwaddu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx(op1, op2, vl); +vuint64m2_t test_vwaddu_wx_u64m2(vuint64m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx(vs2, rs1, vl); } -vuint64m4_t test_vwaddu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwaddu_vv(op1, op2, vl); +vuint64m4_t test_vwaddu_vv_u64m4(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwaddu_vv(vs2, vs1, vl); } -vuint64m4_t test_vwaddu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx(op1, op2, vl); +vuint64m4_t test_vwaddu_vx_u64m4(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx(vs2, rs1, vl); } -vuint64m4_t test_vwaddu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwaddu_wv(op1, op2, vl); +vuint64m4_t test_vwaddu_wv_u64m4(vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwaddu_wv(vs2, vs1, vl); } -vuint64m4_t test_vwaddu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx(op1, op2, vl); +vuint64m4_t test_vwaddu_wx_u64m4(vuint64m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx(vs2, rs1, vl); } -vuint64m8_t test_vwaddu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwaddu_vv(op1, op2, vl); +vuint64m8_t test_vwaddu_vv_u64m8(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwaddu_vv(vs2, vs1, vl); } -vuint64m8_t test_vwaddu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx(op1, op2, vl); +vuint64m8_t test_vwaddu_vx_u64m8(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx(vs2, rs1, vl); } -vuint64m8_t test_vwaddu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwaddu_wv(op1, op2, vl); +vuint64m8_t test_vwaddu_wv_u64m8(vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwaddu_wv(vs2, vs1, vl); } -vuint64m8_t test_vwaddu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx(op1, op2, vl); +vuint64m8_t test_vwaddu_wx_u64m8(vuint64m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx(vs2, rs1, vl); } -vuint16mf4_t test_vwaddu_vv_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwaddu_vv(mask, op1, op2, vl); +vuint16mf4_t test_vwaddu_vv_u16mf4_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwaddu_vv(vm, vs2, vs1, vl); } -vuint16mf4_t test_vwaddu_vx_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx(mask, op1, op2, vl); +vuint16mf4_t test_vwaddu_vx_u16mf4_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx(vm, vs2, rs1, vl); } -vuint16mf4_t test_vwaddu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwaddu_wv(mask, op1, op2, vl); +vuint16mf4_t test_vwaddu_wv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwaddu_wv(vm, vs2, vs1, vl); } -vuint16mf4_t test_vwaddu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx(mask, op1, op2, vl); +vuint16mf4_t test_vwaddu_wx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx(vm, vs2, rs1, vl); } -vuint16mf2_t test_vwaddu_vv_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwaddu_vv(mask, op1, op2, vl); +vuint16mf2_t test_vwaddu_vv_u16mf2_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwaddu_vv(vm, vs2, vs1, vl); } -vuint16mf2_t test_vwaddu_vx_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx(mask, op1, op2, vl); +vuint16mf2_t test_vwaddu_vx_u16mf2_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx(vm, vs2, rs1, vl); } -vuint16mf2_t test_vwaddu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwaddu_wv(mask, op1, op2, vl); +vuint16mf2_t test_vwaddu_wv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwaddu_wv(vm, vs2, vs1, vl); } -vuint16mf2_t test_vwaddu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx(mask, op1, op2, vl); +vuint16mf2_t test_vwaddu_wx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx(vm, vs2, rs1, vl); } -vuint16m1_t test_vwaddu_vv_u16m1_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwaddu_vv(mask, op1, op2, vl); +vuint16m1_t test_vwaddu_vv_u16m1_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwaddu_vv(vm, vs2, vs1, vl); } -vuint16m1_t test_vwaddu_vx_u16m1_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx(mask, op1, op2, vl); +vuint16m1_t test_vwaddu_vx_u16m1_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx(vm, vs2, rs1, vl); } -vuint16m1_t test_vwaddu_wv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwaddu_wv(mask, op1, op2, vl); +vuint16m1_t test_vwaddu_wv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwaddu_wv(vm, vs2, vs1, vl); } -vuint16m1_t test_vwaddu_wx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx(mask, op1, op2, vl); +vuint16m1_t test_vwaddu_wx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx(vm, vs2, rs1, vl); } -vuint16m2_t test_vwaddu_vv_u16m2_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwaddu_vv(mask, op1, op2, vl); +vuint16m2_t test_vwaddu_vv_u16m2_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwaddu_vv(vm, vs2, vs1, vl); } -vuint16m2_t test_vwaddu_vx_u16m2_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx(mask, op1, op2, vl); +vuint16m2_t test_vwaddu_vx_u16m2_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx(vm, vs2, rs1, vl); } -vuint16m2_t test_vwaddu_wv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwaddu_wv(mask, op1, op2, vl); +vuint16m2_t test_vwaddu_wv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwaddu_wv(vm, vs2, vs1, vl); } -vuint16m2_t test_vwaddu_wx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx(mask, op1, op2, vl); +vuint16m2_t test_vwaddu_wx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx(vm, vs2, rs1, vl); } -vuint16m4_t test_vwaddu_vv_u16m4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwaddu_vv(mask, op1, op2, vl); +vuint16m4_t test_vwaddu_vv_u16m4_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwaddu_vv(vm, vs2, vs1, vl); } -vuint16m4_t test_vwaddu_vx_u16m4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx(mask, op1, op2, vl); +vuint16m4_t test_vwaddu_vx_u16m4_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx(vm, vs2, rs1, vl); } -vuint16m4_t test_vwaddu_wv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwaddu_wv(mask, op1, op2, vl); +vuint16m4_t test_vwaddu_wv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwaddu_wv(vm, vs2, vs1, vl); } -vuint16m4_t test_vwaddu_wx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx(mask, op1, op2, vl); +vuint16m4_t test_vwaddu_wx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx(vm, vs2, rs1, vl); } -vuint16m8_t test_vwaddu_vv_u16m8_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwaddu_vv(mask, op1, op2, vl); +vuint16m8_t test_vwaddu_vv_u16m8_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwaddu_vv(vm, vs2, vs1, vl); } -vuint16m8_t test_vwaddu_vx_u16m8_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx(mask, op1, op2, vl); +vuint16m8_t test_vwaddu_vx_u16m8_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx(vm, vs2, rs1, vl); } -vuint16m8_t test_vwaddu_wv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwaddu_wv(mask, op1, op2, vl); +vuint16m8_t test_vwaddu_wv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwaddu_wv(vm, vs2, vs1, vl); } -vuint16m8_t test_vwaddu_wx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx(mask, op1, op2, vl); +vuint16m8_t test_vwaddu_wx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx(vm, vs2, rs1, vl); } -vuint32mf2_t test_vwaddu_vv_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwaddu_vv(mask, op1, op2, vl); +vuint32mf2_t test_vwaddu_vv_u32mf2_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwaddu_vv(vm, vs2, vs1, vl); } -vuint32mf2_t test_vwaddu_vx_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx(mask, op1, op2, vl); +vuint32mf2_t test_vwaddu_vx_u32mf2_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx(vm, vs2, rs1, vl); } -vuint32mf2_t test_vwaddu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwaddu_wv(mask, op1, op2, vl); +vuint32mf2_t test_vwaddu_wv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwaddu_wv(vm, vs2, vs1, vl); } -vuint32mf2_t test_vwaddu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx(mask, op1, op2, vl); +vuint32mf2_t test_vwaddu_wx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx(vm, vs2, rs1, vl); } -vuint32m1_t test_vwaddu_vv_u32m1_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwaddu_vv(mask, op1, op2, vl); +vuint32m1_t test_vwaddu_vv_u32m1_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwaddu_vv(vm, vs2, vs1, vl); } -vuint32m1_t test_vwaddu_vx_u32m1_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx(mask, op1, op2, vl); +vuint32m1_t test_vwaddu_vx_u32m1_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx(vm, vs2, rs1, vl); } -vuint32m1_t test_vwaddu_wv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwaddu_wv(mask, op1, op2, vl); +vuint32m1_t test_vwaddu_wv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwaddu_wv(vm, vs2, vs1, vl); } -vuint32m1_t test_vwaddu_wx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx(mask, op1, op2, vl); +vuint32m1_t test_vwaddu_wx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx(vm, vs2, rs1, vl); } -vuint32m2_t test_vwaddu_vv_u32m2_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwaddu_vv(mask, op1, op2, vl); +vuint32m2_t test_vwaddu_vv_u32m2_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwaddu_vv(vm, vs2, vs1, vl); } -vuint32m2_t test_vwaddu_vx_u32m2_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx(mask, op1, op2, vl); +vuint32m2_t test_vwaddu_vx_u32m2_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx(vm, vs2, rs1, vl); } -vuint32m2_t test_vwaddu_wv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwaddu_wv(mask, op1, op2, vl); +vuint32m2_t test_vwaddu_wv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwaddu_wv(vm, vs2, vs1, vl); } -vuint32m2_t test_vwaddu_wx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx(mask, op1, op2, vl); +vuint32m2_t test_vwaddu_wx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx(vm, vs2, rs1, vl); } -vuint32m4_t test_vwaddu_vv_u32m4_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwaddu_vv(mask, op1, op2, vl); +vuint32m4_t test_vwaddu_vv_u32m4_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwaddu_vv(vm, vs2, vs1, vl); } -vuint32m4_t test_vwaddu_vx_u32m4_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx(mask, op1, op2, vl); +vuint32m4_t test_vwaddu_vx_u32m4_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx(vm, vs2, rs1, vl); } -vuint32m4_t test_vwaddu_wv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwaddu_wv(mask, op1, op2, vl); +vuint32m4_t test_vwaddu_wv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwaddu_wv(vm, vs2, vs1, vl); } -vuint32m4_t test_vwaddu_wx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx(mask, op1, op2, vl); +vuint32m4_t test_vwaddu_wx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx(vm, vs2, rs1, vl); } -vuint32m8_t test_vwaddu_vv_u32m8_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwaddu_vv(mask, op1, op2, vl); +vuint32m8_t test_vwaddu_vv_u32m8_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwaddu_vv(vm, vs2, vs1, vl); } -vuint32m8_t test_vwaddu_vx_u32m8_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx(mask, op1, op2, vl); +vuint32m8_t test_vwaddu_vx_u32m8_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx(vm, vs2, rs1, vl); } -vuint32m8_t test_vwaddu_wv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwaddu_wv(mask, op1, op2, vl); +vuint32m8_t test_vwaddu_wv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwaddu_wv(vm, vs2, vs1, vl); } -vuint32m8_t test_vwaddu_wx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx(mask, op1, op2, vl); +vuint32m8_t test_vwaddu_wx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx(vm, vs2, rs1, vl); } -vuint64m1_t test_vwaddu_vv_u64m1_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwaddu_vv(mask, op1, op2, vl); +vuint64m1_t test_vwaddu_vv_u64m1_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwaddu_vv(vm, vs2, vs1, vl); } -vuint64m1_t test_vwaddu_vx_u64m1_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx(mask, op1, op2, vl); +vuint64m1_t test_vwaddu_vx_u64m1_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx(vm, vs2, rs1, vl); } -vuint64m1_t test_vwaddu_wv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwaddu_wv(mask, op1, op2, vl); +vuint64m1_t test_vwaddu_wv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwaddu_wv(vm, vs2, vs1, vl); } -vuint64m1_t test_vwaddu_wx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx(mask, op1, op2, vl); +vuint64m1_t test_vwaddu_wx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx(vm, vs2, rs1, vl); } -vuint64m2_t test_vwaddu_vv_u64m2_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwaddu_vv(mask, op1, op2, vl); +vuint64m2_t test_vwaddu_vv_u64m2_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwaddu_vv(vm, vs2, vs1, vl); } -vuint64m2_t test_vwaddu_vx_u64m2_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx(mask, op1, op2, vl); +vuint64m2_t test_vwaddu_vx_u64m2_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx(vm, vs2, rs1, vl); } -vuint64m2_t test_vwaddu_wv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwaddu_wv(mask, op1, op2, vl); +vuint64m2_t test_vwaddu_wv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwaddu_wv(vm, vs2, vs1, vl); } -vuint64m2_t test_vwaddu_wx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx(mask, op1, op2, vl); +vuint64m2_t test_vwaddu_wx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx(vm, vs2, rs1, vl); } -vuint64m4_t test_vwaddu_vv_u64m4_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwaddu_vv(mask, op1, op2, vl); +vuint64m4_t test_vwaddu_vv_u64m4_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwaddu_vv(vm, vs2, vs1, vl); } -vuint64m4_t test_vwaddu_vx_u64m4_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx(mask, op1, op2, vl); +vuint64m4_t test_vwaddu_vx_u64m4_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx(vm, vs2, rs1, vl); } -vuint64m4_t test_vwaddu_wv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwaddu_wv(mask, op1, op2, vl); +vuint64m4_t test_vwaddu_wv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwaddu_wv(vm, vs2, vs1, vl); } -vuint64m4_t test_vwaddu_wx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx(mask, op1, op2, vl); +vuint64m4_t test_vwaddu_wx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx(vm, vs2, rs1, vl); } -vuint64m8_t test_vwaddu_vv_u64m8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwaddu_vv(mask, op1, op2, vl); +vuint64m8_t test_vwaddu_vv_u64m8_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwaddu_vv(vm, vs2, vs1, vl); } -vuint64m8_t test_vwaddu_vx_u64m8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx(mask, op1, op2, vl); +vuint64m8_t test_vwaddu_vx_u64m8_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx(vm, vs2, rs1, vl); } -vuint64m8_t test_vwaddu_wv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwaddu_wv(mask, op1, op2, vl); +vuint64m8_t test_vwaddu_wv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwaddu_wv(vm, vs2, vs1, vl); } -vuint64m8_t test_vwaddu_wx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx(mask, op1, op2, vl); +vuint64m8_t test_vwaddu_wx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+v[w]?add[u]?\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vwcvt.c b/auto-generated/gnu-overloaded-tests/vwcvt.c index bb4d795c2..bd6dd4287 100644 --- a/auto-generated/gnu-overloaded-tests/vwcvt.c +++ b/auto-generated/gnu-overloaded-tests/vwcvt.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint16mf4_t test_vwcvt_x_x_v_i16mf4(vint8mf8_t src, size_t vl) { - return __riscv_vwcvt_x(src, vl); +vint16mf4_t test_vwcvt_x_x_v_i16mf4(vint8mf8_t vs2, size_t vl) { + return __riscv_vwcvt_x(vs2, vl); } -vint16mf2_t test_vwcvt_x_x_v_i16mf2(vint8mf4_t src, size_t vl) { - return __riscv_vwcvt_x(src, vl); +vint16mf2_t test_vwcvt_x_x_v_i16mf2(vint8mf4_t vs2, size_t vl) { + return __riscv_vwcvt_x(vs2, vl); } -vint16m1_t test_vwcvt_x_x_v_i16m1(vint8mf2_t src, size_t vl) { - return __riscv_vwcvt_x(src, vl); +vint16m1_t test_vwcvt_x_x_v_i16m1(vint8mf2_t vs2, size_t vl) { + return __riscv_vwcvt_x(vs2, vl); } -vint16m2_t test_vwcvt_x_x_v_i16m2(vint8m1_t src, size_t vl) { - return __riscv_vwcvt_x(src, vl); +vint16m2_t test_vwcvt_x_x_v_i16m2(vint8m1_t vs2, size_t vl) { + return __riscv_vwcvt_x(vs2, vl); } -vint16m4_t test_vwcvt_x_x_v_i16m4(vint8m2_t src, size_t vl) { - return __riscv_vwcvt_x(src, vl); +vint16m4_t test_vwcvt_x_x_v_i16m4(vint8m2_t vs2, size_t vl) { + return __riscv_vwcvt_x(vs2, vl); } -vint16m8_t test_vwcvt_x_x_v_i16m8(vint8m4_t src, size_t vl) { - return __riscv_vwcvt_x(src, vl); +vint16m8_t test_vwcvt_x_x_v_i16m8(vint8m4_t vs2, size_t vl) { + return __riscv_vwcvt_x(vs2, vl); } -vint32mf2_t test_vwcvt_x_x_v_i32mf2(vint16mf4_t src, size_t vl) { - return __riscv_vwcvt_x(src, vl); +vint32mf2_t test_vwcvt_x_x_v_i32mf2(vint16mf4_t vs2, size_t vl) { + return __riscv_vwcvt_x(vs2, vl); } -vint32m1_t test_vwcvt_x_x_v_i32m1(vint16mf2_t src, size_t vl) { - return __riscv_vwcvt_x(src, vl); +vint32m1_t test_vwcvt_x_x_v_i32m1(vint16mf2_t vs2, size_t vl) { + return __riscv_vwcvt_x(vs2, vl); } -vint32m2_t test_vwcvt_x_x_v_i32m2(vint16m1_t src, size_t vl) { - return __riscv_vwcvt_x(src, vl); +vint32m2_t test_vwcvt_x_x_v_i32m2(vint16m1_t vs2, size_t vl) { + return __riscv_vwcvt_x(vs2, vl); } -vint32m4_t test_vwcvt_x_x_v_i32m4(vint16m2_t src, size_t vl) { - return __riscv_vwcvt_x(src, vl); +vint32m4_t test_vwcvt_x_x_v_i32m4(vint16m2_t vs2, size_t vl) { + return __riscv_vwcvt_x(vs2, vl); } -vint32m8_t test_vwcvt_x_x_v_i32m8(vint16m4_t src, size_t vl) { - return __riscv_vwcvt_x(src, vl); +vint32m8_t test_vwcvt_x_x_v_i32m8(vint16m4_t vs2, size_t vl) { + return __riscv_vwcvt_x(vs2, vl); } -vint64m1_t test_vwcvt_x_x_v_i64m1(vint32mf2_t src, size_t vl) { - return __riscv_vwcvt_x(src, vl); +vint64m1_t test_vwcvt_x_x_v_i64m1(vint32mf2_t vs2, size_t vl) { + return __riscv_vwcvt_x(vs2, vl); } -vint64m2_t test_vwcvt_x_x_v_i64m2(vint32m1_t src, size_t vl) { - return __riscv_vwcvt_x(src, vl); +vint64m2_t test_vwcvt_x_x_v_i64m2(vint32m1_t vs2, size_t vl) { + return __riscv_vwcvt_x(vs2, vl); } -vint64m4_t test_vwcvt_x_x_v_i64m4(vint32m2_t src, size_t vl) { - return __riscv_vwcvt_x(src, vl); +vint64m4_t test_vwcvt_x_x_v_i64m4(vint32m2_t vs2, size_t vl) { + return __riscv_vwcvt_x(vs2, vl); } -vint64m8_t test_vwcvt_x_x_v_i64m8(vint32m4_t src, size_t vl) { - return __riscv_vwcvt_x(src, vl); +vint64m8_t test_vwcvt_x_x_v_i64m8(vint32m4_t vs2, size_t vl) { + return __riscv_vwcvt_x(vs2, vl); } -vint16mf4_t test_vwcvt_x_x_v_i16mf4_m(vbool64_t mask, vint8mf8_t src, size_t vl) { - return __riscv_vwcvt_x(mask, src, vl); +vint16mf4_t test_vwcvt_x_x_v_i16mf4_m(vbool64_t vm, vint8mf8_t vs2, size_t vl) { + return __riscv_vwcvt_x(vm, vs2, vl); } -vint16mf2_t test_vwcvt_x_x_v_i16mf2_m(vbool32_t mask, vint8mf4_t src, size_t vl) { - return __riscv_vwcvt_x(mask, src, vl); +vint16mf2_t test_vwcvt_x_x_v_i16mf2_m(vbool32_t vm, vint8mf4_t vs2, size_t vl) { + return __riscv_vwcvt_x(vm, vs2, vl); } -vint16m1_t test_vwcvt_x_x_v_i16m1_m(vbool16_t mask, vint8mf2_t src, size_t vl) { - return __riscv_vwcvt_x(mask, src, vl); +vint16m1_t test_vwcvt_x_x_v_i16m1_m(vbool16_t vm, vint8mf2_t vs2, size_t vl) { + return __riscv_vwcvt_x(vm, vs2, vl); } -vint16m2_t test_vwcvt_x_x_v_i16m2_m(vbool8_t mask, vint8m1_t src, size_t vl) { - return __riscv_vwcvt_x(mask, src, vl); +vint16m2_t test_vwcvt_x_x_v_i16m2_m(vbool8_t vm, vint8m1_t vs2, size_t vl) { + return __riscv_vwcvt_x(vm, vs2, vl); } -vint16m4_t test_vwcvt_x_x_v_i16m4_m(vbool4_t mask, vint8m2_t src, size_t vl) { - return __riscv_vwcvt_x(mask, src, vl); +vint16m4_t test_vwcvt_x_x_v_i16m4_m(vbool4_t vm, vint8m2_t vs2, size_t vl) { + return __riscv_vwcvt_x(vm, vs2, vl); } -vint16m8_t test_vwcvt_x_x_v_i16m8_m(vbool2_t mask, vint8m4_t src, size_t vl) { - return __riscv_vwcvt_x(mask, src, vl); +vint16m8_t test_vwcvt_x_x_v_i16m8_m(vbool2_t vm, vint8m4_t vs2, size_t vl) { + return __riscv_vwcvt_x(vm, vs2, vl); } -vint32mf2_t test_vwcvt_x_x_v_i32mf2_m(vbool64_t mask, vint16mf4_t src, size_t vl) { - return __riscv_vwcvt_x(mask, src, vl); +vint32mf2_t test_vwcvt_x_x_v_i32mf2_m(vbool64_t vm, vint16mf4_t vs2, size_t vl) { + return __riscv_vwcvt_x(vm, vs2, vl); } -vint32m1_t test_vwcvt_x_x_v_i32m1_m(vbool32_t mask, vint16mf2_t src, size_t vl) { - return __riscv_vwcvt_x(mask, src, vl); +vint32m1_t test_vwcvt_x_x_v_i32m1_m(vbool32_t vm, vint16mf2_t vs2, size_t vl) { + return __riscv_vwcvt_x(vm, vs2, vl); } -vint32m2_t test_vwcvt_x_x_v_i32m2_m(vbool16_t mask, vint16m1_t src, size_t vl) { - return __riscv_vwcvt_x(mask, src, vl); +vint32m2_t test_vwcvt_x_x_v_i32m2_m(vbool16_t vm, vint16m1_t vs2, size_t vl) { + return __riscv_vwcvt_x(vm, vs2, vl); } -vint32m4_t test_vwcvt_x_x_v_i32m4_m(vbool8_t mask, vint16m2_t src, size_t vl) { - return __riscv_vwcvt_x(mask, src, vl); +vint32m4_t test_vwcvt_x_x_v_i32m4_m(vbool8_t vm, vint16m2_t vs2, size_t vl) { + return __riscv_vwcvt_x(vm, vs2, vl); } -vint32m8_t test_vwcvt_x_x_v_i32m8_m(vbool4_t mask, vint16m4_t src, size_t vl) { - return __riscv_vwcvt_x(mask, src, vl); +vint32m8_t test_vwcvt_x_x_v_i32m8_m(vbool4_t vm, vint16m4_t vs2, size_t vl) { + return __riscv_vwcvt_x(vm, vs2, vl); } -vint64m1_t test_vwcvt_x_x_v_i64m1_m(vbool64_t mask, vint32mf2_t src, size_t vl) { - return __riscv_vwcvt_x(mask, src, vl); +vint64m1_t test_vwcvt_x_x_v_i64m1_m(vbool64_t vm, vint32mf2_t vs2, size_t vl) { + return __riscv_vwcvt_x(vm, vs2, vl); } -vint64m2_t test_vwcvt_x_x_v_i64m2_m(vbool32_t mask, vint32m1_t src, size_t vl) { - return __riscv_vwcvt_x(mask, src, vl); +vint64m2_t test_vwcvt_x_x_v_i64m2_m(vbool32_t vm, vint32m1_t vs2, size_t vl) { + return __riscv_vwcvt_x(vm, vs2, vl); } -vint64m4_t test_vwcvt_x_x_v_i64m4_m(vbool16_t mask, vint32m2_t src, size_t vl) { - return __riscv_vwcvt_x(mask, src, vl); +vint64m4_t test_vwcvt_x_x_v_i64m4_m(vbool16_t vm, vint32m2_t vs2, size_t vl) { + return __riscv_vwcvt_x(vm, vs2, vl); } -vint64m8_t test_vwcvt_x_x_v_i64m8_m(vbool8_t mask, vint32m4_t src, size_t vl) { - return __riscv_vwcvt_x(mask, src, vl); +vint64m8_t test_vwcvt_x_x_v_i64m8_m(vbool8_t vm, vint32m4_t vs2, size_t vl) { + return __riscv_vwcvt_x(vm, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vwcvt\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vwcvtu.c b/auto-generated/gnu-overloaded-tests/vwcvtu.c index 2ff219e1d..97c8a06a9 100644 --- a/auto-generated/gnu-overloaded-tests/vwcvtu.c +++ b/auto-generated/gnu-overloaded-tests/vwcvtu.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint16mf4_t test_vwcvtu_x_x_v_u16mf4(vuint8mf8_t src, size_t vl) { - return __riscv_vwcvtu_x(src, vl); +vuint16mf4_t test_vwcvtu_x_x_v_u16mf4(vuint8mf8_t vs2, size_t vl) { + return __riscv_vwcvtu_x(vs2, vl); } -vuint16mf2_t test_vwcvtu_x_x_v_u16mf2(vuint8mf4_t src, size_t vl) { - return __riscv_vwcvtu_x(src, vl); +vuint16mf2_t test_vwcvtu_x_x_v_u16mf2(vuint8mf4_t vs2, size_t vl) { + return __riscv_vwcvtu_x(vs2, vl); } -vuint16m1_t test_vwcvtu_x_x_v_u16m1(vuint8mf2_t src, size_t vl) { - return __riscv_vwcvtu_x(src, vl); +vuint16m1_t test_vwcvtu_x_x_v_u16m1(vuint8mf2_t vs2, size_t vl) { + return __riscv_vwcvtu_x(vs2, vl); } -vuint16m2_t test_vwcvtu_x_x_v_u16m2(vuint8m1_t src, size_t vl) { - return __riscv_vwcvtu_x(src, vl); +vuint16m2_t test_vwcvtu_x_x_v_u16m2(vuint8m1_t vs2, size_t vl) { + return __riscv_vwcvtu_x(vs2, vl); } -vuint16m4_t test_vwcvtu_x_x_v_u16m4(vuint8m2_t src, size_t vl) { - return __riscv_vwcvtu_x(src, vl); +vuint16m4_t test_vwcvtu_x_x_v_u16m4(vuint8m2_t vs2, size_t vl) { + return __riscv_vwcvtu_x(vs2, vl); } -vuint16m8_t test_vwcvtu_x_x_v_u16m8(vuint8m4_t src, size_t vl) { - return __riscv_vwcvtu_x(src, vl); +vuint16m8_t test_vwcvtu_x_x_v_u16m8(vuint8m4_t vs2, size_t vl) { + return __riscv_vwcvtu_x(vs2, vl); } -vuint32mf2_t test_vwcvtu_x_x_v_u32mf2(vuint16mf4_t src, size_t vl) { - return __riscv_vwcvtu_x(src, vl); +vuint32mf2_t test_vwcvtu_x_x_v_u32mf2(vuint16mf4_t vs2, size_t vl) { + return __riscv_vwcvtu_x(vs2, vl); } -vuint32m1_t test_vwcvtu_x_x_v_u32m1(vuint16mf2_t src, size_t vl) { - return __riscv_vwcvtu_x(src, vl); +vuint32m1_t test_vwcvtu_x_x_v_u32m1(vuint16mf2_t vs2, size_t vl) { + return __riscv_vwcvtu_x(vs2, vl); } -vuint32m2_t test_vwcvtu_x_x_v_u32m2(vuint16m1_t src, size_t vl) { - return __riscv_vwcvtu_x(src, vl); +vuint32m2_t test_vwcvtu_x_x_v_u32m2(vuint16m1_t vs2, size_t vl) { + return __riscv_vwcvtu_x(vs2, vl); } -vuint32m4_t test_vwcvtu_x_x_v_u32m4(vuint16m2_t src, size_t vl) { - return __riscv_vwcvtu_x(src, vl); +vuint32m4_t test_vwcvtu_x_x_v_u32m4(vuint16m2_t vs2, size_t vl) { + return __riscv_vwcvtu_x(vs2, vl); } -vuint32m8_t test_vwcvtu_x_x_v_u32m8(vuint16m4_t src, size_t vl) { - return __riscv_vwcvtu_x(src, vl); +vuint32m8_t test_vwcvtu_x_x_v_u32m8(vuint16m4_t vs2, size_t vl) { + return __riscv_vwcvtu_x(vs2, vl); } -vuint64m1_t test_vwcvtu_x_x_v_u64m1(vuint32mf2_t src, size_t vl) { - return __riscv_vwcvtu_x(src, vl); +vuint64m1_t test_vwcvtu_x_x_v_u64m1(vuint32mf2_t vs2, size_t vl) { + return __riscv_vwcvtu_x(vs2, vl); } -vuint64m2_t test_vwcvtu_x_x_v_u64m2(vuint32m1_t src, size_t vl) { - return __riscv_vwcvtu_x(src, vl); +vuint64m2_t test_vwcvtu_x_x_v_u64m2(vuint32m1_t vs2, size_t vl) { + return __riscv_vwcvtu_x(vs2, vl); } -vuint64m4_t test_vwcvtu_x_x_v_u64m4(vuint32m2_t src, size_t vl) { - return __riscv_vwcvtu_x(src, vl); +vuint64m4_t test_vwcvtu_x_x_v_u64m4(vuint32m2_t vs2, size_t vl) { + return __riscv_vwcvtu_x(vs2, vl); } -vuint64m8_t test_vwcvtu_x_x_v_u64m8(vuint32m4_t src, size_t vl) { - return __riscv_vwcvtu_x(src, vl); +vuint64m8_t test_vwcvtu_x_x_v_u64m8(vuint32m4_t vs2, size_t vl) { + return __riscv_vwcvtu_x(vs2, vl); } -vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_m(vbool64_t mask, vuint8mf8_t src, size_t vl) { - return __riscv_vwcvtu_x(mask, src, vl); +vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vwcvtu_x(vm, vs2, vl); } -vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_m(vbool32_t mask, vuint8mf4_t src, size_t vl) { - return __riscv_vwcvtu_x(mask, src, vl); +vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vwcvtu_x(vm, vs2, vl); } -vuint16m1_t test_vwcvtu_x_x_v_u16m1_m(vbool16_t mask, vuint8mf2_t src, size_t vl) { - return __riscv_vwcvtu_x(mask, src, vl); +vuint16m1_t test_vwcvtu_x_x_v_u16m1_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vwcvtu_x(vm, vs2, vl); } -vuint16m2_t test_vwcvtu_x_x_v_u16m2_m(vbool8_t mask, vuint8m1_t src, size_t vl) { - return __riscv_vwcvtu_x(mask, src, vl); +vuint16m2_t test_vwcvtu_x_x_v_u16m2_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vwcvtu_x(vm, vs2, vl); } -vuint16m4_t test_vwcvtu_x_x_v_u16m4_m(vbool4_t mask, vuint8m2_t src, size_t vl) { - return __riscv_vwcvtu_x(mask, src, vl); +vuint16m4_t test_vwcvtu_x_x_v_u16m4_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vwcvtu_x(vm, vs2, vl); } -vuint16m8_t test_vwcvtu_x_x_v_u16m8_m(vbool2_t mask, vuint8m4_t src, size_t vl) { - return __riscv_vwcvtu_x(mask, src, vl); +vuint16m8_t test_vwcvtu_x_x_v_u16m8_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vwcvtu_x(vm, vs2, vl); } -vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_m(vbool64_t mask, vuint16mf4_t src, size_t vl) { - return __riscv_vwcvtu_x(mask, src, vl); +vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vwcvtu_x(vm, vs2, vl); } -vuint32m1_t test_vwcvtu_x_x_v_u32m1_m(vbool32_t mask, vuint16mf2_t src, size_t vl) { - return __riscv_vwcvtu_x(mask, src, vl); +vuint32m1_t test_vwcvtu_x_x_v_u32m1_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vwcvtu_x(vm, vs2, vl); } -vuint32m2_t test_vwcvtu_x_x_v_u32m2_m(vbool16_t mask, vuint16m1_t src, size_t vl) { - return __riscv_vwcvtu_x(mask, src, vl); +vuint32m2_t test_vwcvtu_x_x_v_u32m2_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vwcvtu_x(vm, vs2, vl); } -vuint32m4_t test_vwcvtu_x_x_v_u32m4_m(vbool8_t mask, vuint16m2_t src, size_t vl) { - return __riscv_vwcvtu_x(mask, src, vl); +vuint32m4_t test_vwcvtu_x_x_v_u32m4_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vwcvtu_x(vm, vs2, vl); } -vuint32m8_t test_vwcvtu_x_x_v_u32m8_m(vbool4_t mask, vuint16m4_t src, size_t vl) { - return __riscv_vwcvtu_x(mask, src, vl); +vuint32m8_t test_vwcvtu_x_x_v_u32m8_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vwcvtu_x(vm, vs2, vl); } -vuint64m1_t test_vwcvtu_x_x_v_u64m1_m(vbool64_t mask, vuint32mf2_t src, size_t vl) { - return __riscv_vwcvtu_x(mask, src, vl); +vuint64m1_t test_vwcvtu_x_x_v_u64m1_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vwcvtu_x(vm, vs2, vl); } -vuint64m2_t test_vwcvtu_x_x_v_u64m2_m(vbool32_t mask, vuint32m1_t src, size_t vl) { - return __riscv_vwcvtu_x(mask, src, vl); +vuint64m2_t test_vwcvtu_x_x_v_u64m2_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vwcvtu_x(vm, vs2, vl); } -vuint64m4_t test_vwcvtu_x_x_v_u64m4_m(vbool16_t mask, vuint32m2_t src, size_t vl) { - return __riscv_vwcvtu_x(mask, src, vl); +vuint64m4_t test_vwcvtu_x_x_v_u64m4_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vwcvtu_x(vm, vs2, vl); } -vuint64m8_t test_vwcvtu_x_x_v_u64m8_m(vbool8_t mask, vuint32m4_t src, size_t vl) { - return __riscv_vwcvtu_x(mask, src, vl); +vuint64m8_t test_vwcvtu_x_x_v_u64m8_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vwcvtu_x(vm, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vwcvtu\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vwmacc.c b/auto-generated/gnu-overloaded-tests/vwmacc.c index 68a41d50f..2e0e99391 100644 --- a/auto-generated/gnu-overloaded-tests/vwmacc.c +++ b/auto-generated/gnu-overloaded-tests/vwmacc.c @@ -126,124 +126,124 @@ vint64m8_t test_vwmacc_vx_i64m8(vint64m8_t vd, int32_t rs1, vint32m4_t vs2, size return __riscv_vwmacc(vd, rs1, vs2, vl); } -vint16mf4_t test_vwmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vwmacc(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vwmacc_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vwmacc(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vwmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vwmacc(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vwmacc_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vwmacc(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vwmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vwmacc(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vwmacc_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vwmacc(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vwmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vwmacc(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vwmacc_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vwmacc(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vwmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vwmacc(mask, vd, vs1, vs2, vl); +vint16m1_t test_vwmacc_vv_i16m1_m(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vwmacc(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vwmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vwmacc(mask, vd, rs1, vs2, vl); +vint16m1_t test_vwmacc_vx_i16m1_m(vbool16_t vm, vint16m1_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vwmacc(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vwmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_vwmacc(mask, vd, vs1, vs2, vl); +vint16m2_t test_vwmacc_vv_i16m2_m(vbool8_t vm, vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { + return __riscv_vwmacc(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vwmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vwmacc(mask, vd, rs1, vs2, vl); +vint16m2_t test_vwmacc_vx_i16m2_m(vbool8_t vm, vint16m2_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vwmacc(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vwmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return __riscv_vwmacc(mask, vd, vs1, vs2, vl); +vint16m4_t test_vwmacc_vv_i16m4_m(vbool4_t vm, vint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { + return __riscv_vwmacc(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vwmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vwmacc(mask, vd, rs1, vs2, vl); +vint16m4_t test_vwmacc_vx_i16m4_m(vbool4_t vm, vint16m4_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vwmacc(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vwmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return __riscv_vwmacc(mask, vd, vs1, vs2, vl); +vint16m8_t test_vwmacc_vv_i16m8_m(vbool2_t vm, vint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { + return __riscv_vwmacc(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vwmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vwmacc(mask, vd, rs1, vs2, vl); +vint16m8_t test_vwmacc_vx_i16m8_m(vbool2_t vm, vint16m8_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vwmacc(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vwmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vwmacc(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vwmacc_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vwmacc(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vwmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vwmacc(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vwmacc_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vwmacc(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vwmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vwmacc(mask, vd, vs1, vs2, vl); +vint32m1_t test_vwmacc_vv_i32m1_m(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vwmacc(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vwmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vwmacc(mask, vd, rs1, vs2, vl); +vint32m1_t test_vwmacc_vx_i32m1_m(vbool32_t vm, vint32m1_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vwmacc(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vwmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_vwmacc(mask, vd, vs1, vs2, vl); +vint32m2_t test_vwmacc_vv_i32m2_m(vbool16_t vm, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { + return __riscv_vwmacc(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vwmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vwmacc(mask, vd, rs1, vs2, vl); +vint32m2_t test_vwmacc_vx_i32m2_m(vbool16_t vm, vint32m2_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vwmacc(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vwmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return __riscv_vwmacc(mask, vd, vs1, vs2, vl); +vint32m4_t test_vwmacc_vv_i32m4_m(vbool8_t vm, vint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { + return __riscv_vwmacc(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vwmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vwmacc(mask, vd, rs1, vs2, vl); +vint32m4_t test_vwmacc_vx_i32m4_m(vbool8_t vm, vint32m4_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vwmacc(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vwmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return __riscv_vwmacc(mask, vd, vs1, vs2, vl); +vint32m8_t test_vwmacc_vv_i32m8_m(vbool4_t vm, vint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { + return __riscv_vwmacc(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vwmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vwmacc(mask, vd, rs1, vs2, vl); +vint32m8_t test_vwmacc_vx_i32m8_m(vbool4_t vm, vint32m8_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vwmacc(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vwmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vwmacc(mask, vd, vs1, vs2, vl); +vint64m1_t test_vwmacc_vv_i64m1_m(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vwmacc(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vwmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vwmacc(mask, vd, rs1, vs2, vl); +vint64m1_t test_vwmacc_vx_i64m1_m(vbool64_t vm, vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vwmacc(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vwmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_vwmacc(mask, vd, vs1, vs2, vl); +vint64m2_t test_vwmacc_vv_i64m2_m(vbool32_t vm, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { + return __riscv_vwmacc(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vwmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vwmacc(mask, vd, rs1, vs2, vl); +vint64m2_t test_vwmacc_vx_i64m2_m(vbool32_t vm, vint64m2_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vwmacc(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vwmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return __riscv_vwmacc(mask, vd, vs1, vs2, vl); +vint64m4_t test_vwmacc_vv_i64m4_m(vbool16_t vm, vint64m4_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { + return __riscv_vwmacc(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vwmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vwmacc(mask, vd, rs1, vs2, vl); +vint64m4_t test_vwmacc_vx_i64m4_m(vbool16_t vm, vint64m4_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vwmacc(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vwmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return __riscv_vwmacc(mask, vd, vs1, vs2, vl); +vint64m8_t test_vwmacc_vv_i64m8_m(vbool8_t vm, vint64m8_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { + return __riscv_vwmacc(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vwmacc_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vwmacc(mask, vd, rs1, vs2, vl); +vint64m8_t test_vwmacc_vx_i64m8_m(vbool8_t vm, vint64m8_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vwmacc(vm, vd, rs1, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vwmacc\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vwmaccsu.c b/auto-generated/gnu-overloaded-tests/vwmaccsu.c index ab408d5ce..68b8b0641 100644 --- a/auto-generated/gnu-overloaded-tests/vwmaccsu.c +++ b/auto-generated/gnu-overloaded-tests/vwmaccsu.c @@ -126,124 +126,124 @@ vint64m8_t test_vwmaccsu_vx_i64m8(vint64m8_t vd, int32_t rs1, vuint32m4_t vs2, s return __riscv_vwmaccsu(vd, rs1, vs2, vl); } -vint16mf4_t test_vwmaccsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t vd, vint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vwmaccsu(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vwmaccsu_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vwmaccsu(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vwmaccsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, int8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vwmaccsu(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vwmaccsu_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vd, int8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vwmaccsu(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vwmaccsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t vd, vint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vwmaccsu(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vwmaccsu_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vwmaccsu(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vwmaccsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, int8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vwmaccsu(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vwmaccsu_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vd, int8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vwmaccsu(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vwmaccsu_vv_i16m1_m(vbool16_t mask, vint16m1_t vd, vint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu(mask, vd, vs1, vs2, vl); +vint16m1_t test_vwmaccsu_vv_i16m1_m(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vwmaccsu_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, int8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu(mask, vd, rs1, vs2, vl); +vint16m1_t test_vwmaccsu_vx_i16m1_m(vbool16_t vm, vint16m1_t vd, int8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vwmaccsu_vv_i16m2_m(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vwmaccsu(mask, vd, vs1, vs2, vl); +vint16m2_t test_vwmaccsu_vv_i16m2_m(vbool8_t vm, vint16m2_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vwmaccsu(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vwmaccsu_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, int8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vwmaccsu(mask, vd, rs1, vs2, vl); +vint16m2_t test_vwmaccsu_vx_i16m2_m(vbool8_t vm, vint16m2_t vd, int8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vwmaccsu(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vwmaccsu_vv_i16m4_m(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vwmaccsu(mask, vd, vs1, vs2, vl); +vint16m4_t test_vwmaccsu_vv_i16m4_m(vbool4_t vm, vint16m4_t vd, vint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vwmaccsu(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vwmaccsu_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, int8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vwmaccsu(mask, vd, rs1, vs2, vl); +vint16m4_t test_vwmaccsu_vx_i16m4_m(vbool4_t vm, vint16m4_t vd, int8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vwmaccsu(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vwmaccsu_vv_i16m8_m(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vwmaccsu(mask, vd, vs1, vs2, vl); +vint16m8_t test_vwmaccsu_vv_i16m8_m(vbool2_t vm, vint16m8_t vd, vint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vwmaccsu(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vwmaccsu_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, int8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vwmaccsu(mask, vd, rs1, vs2, vl); +vint16m8_t test_vwmaccsu_vx_i16m8_m(vbool2_t vm, vint16m8_t vd, int8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vwmaccsu(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vwmaccsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t vd, vint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vwmaccsu(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vwmaccsu_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vwmaccsu(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vwmaccsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, int16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vwmaccsu(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vwmaccsu_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vd, int16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vwmaccsu(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vwmaccsu_vv_i32m1_m(vbool32_t mask, vint32m1_t vd, vint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu(mask, vd, vs1, vs2, vl); +vint32m1_t test_vwmaccsu_vv_i32m1_m(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vwmaccsu_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, int16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu(mask, vd, rs1, vs2, vl); +vint32m1_t test_vwmaccsu_vx_i32m1_m(vbool32_t vm, vint32m1_t vd, int16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vwmaccsu_vv_i32m2_m(vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vwmaccsu(mask, vd, vs1, vs2, vl); +vint32m2_t test_vwmaccsu_vv_i32m2_m(vbool16_t vm, vint32m2_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vwmaccsu(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vwmaccsu_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, int16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vwmaccsu(mask, vd, rs1, vs2, vl); +vint32m2_t test_vwmaccsu_vx_i32m2_m(vbool16_t vm, vint32m2_t vd, int16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vwmaccsu(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vwmaccsu_vv_i32m4_m(vbool8_t mask, vint32m4_t vd, vint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vwmaccsu(mask, vd, vs1, vs2, vl); +vint32m4_t test_vwmaccsu_vv_i32m4_m(vbool8_t vm, vint32m4_t vd, vint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vwmaccsu(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vwmaccsu_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, int16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vwmaccsu(mask, vd, rs1, vs2, vl); +vint32m4_t test_vwmaccsu_vx_i32m4_m(vbool8_t vm, vint32m4_t vd, int16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vwmaccsu(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vwmaccsu_vv_i32m8_m(vbool4_t mask, vint32m8_t vd, vint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vwmaccsu(mask, vd, vs1, vs2, vl); +vint32m8_t test_vwmaccsu_vv_i32m8_m(vbool4_t vm, vint32m8_t vd, vint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vwmaccsu(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vwmaccsu_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, int16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vwmaccsu(mask, vd, rs1, vs2, vl); +vint32m8_t test_vwmaccsu_vx_i32m8_m(vbool4_t vm, vint32m8_t vd, int16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vwmaccsu(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vwmaccsu_vv_i64m1_m(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu(mask, vd, vs1, vs2, vl); +vint64m1_t test_vwmaccsu_vv_i64m1_m(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vwmaccsu_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu(mask, vd, rs1, vs2, vl); +vint64m1_t test_vwmaccsu_vx_i64m1_m(vbool64_t vm, vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vwmaccsu_vv_i64m2_m(vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vwmaccsu(mask, vd, vs1, vs2, vl); +vint64m2_t test_vwmaccsu_vv_i64m2_m(vbool32_t vm, vint64m2_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vwmaccsu(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vwmaccsu_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, int32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vwmaccsu(mask, vd, rs1, vs2, vl); +vint64m2_t test_vwmaccsu_vx_i64m2_m(vbool32_t vm, vint64m2_t vd, int32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vwmaccsu(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vwmaccsu_vv_i64m4_m(vbool16_t mask, vint64m4_t vd, vint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vwmaccsu(mask, vd, vs1, vs2, vl); +vint64m4_t test_vwmaccsu_vv_i64m4_m(vbool16_t vm, vint64m4_t vd, vint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vwmaccsu(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vwmaccsu_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, int32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vwmaccsu(mask, vd, rs1, vs2, vl); +vint64m4_t test_vwmaccsu_vx_i64m4_m(vbool16_t vm, vint64m4_t vd, int32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vwmaccsu(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vwmaccsu_vv_i64m8_m(vbool8_t mask, vint64m8_t vd, vint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vwmaccsu(mask, vd, vs1, vs2, vl); +vint64m8_t test_vwmaccsu_vv_i64m8_m(vbool8_t vm, vint64m8_t vd, vint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vwmaccsu(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vwmaccsu_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, int32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vwmaccsu(mask, vd, rs1, vs2, vl); +vint64m8_t test_vwmaccsu_vx_i64m8_m(vbool8_t vm, vint64m8_t vd, int32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vwmaccsu(vm, vd, rs1, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vwmaccsu\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vwmaccu.c b/auto-generated/gnu-overloaded-tests/vwmaccu.c index 894cb4a3e..07ca31cea 100644 --- a/auto-generated/gnu-overloaded-tests/vwmaccu.c +++ b/auto-generated/gnu-overloaded-tests/vwmaccu.c @@ -126,124 +126,124 @@ vuint64m8_t test_vwmaccu_vx_u64m8(vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2, return __riscv_vwmaccu(vd, rs1, vs2, vl); } -vuint16mf4_t test_vwmaccu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vwmaccu(mask, vd, vs1, vs2, vl); +vuint16mf4_t test_vwmaccu_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vwmaccu(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vwmaccu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vwmaccu(mask, vd, rs1, vs2, vl); +vuint16mf4_t test_vwmaccu_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vwmaccu(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vwmaccu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vwmaccu(mask, vd, vs1, vs2, vl); +vuint16mf2_t test_vwmaccu_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vwmaccu(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vwmaccu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vwmaccu(mask, vd, rs1, vs2, vl); +vuint16mf2_t test_vwmaccu_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vwmaccu(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vwmaccu_vv_u16m1_m(vbool16_t mask, vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vwmaccu(mask, vd, vs1, vs2, vl); +vuint16m1_t test_vwmaccu_vv_u16m1_m(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vwmaccu(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vwmaccu_vx_u16m1_m(vbool16_t mask, vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vwmaccu(mask, vd, rs1, vs2, vl); +vuint16m1_t test_vwmaccu_vx_u16m1_m(vbool16_t vm, vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vwmaccu(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vwmaccu_vv_u16m2_m(vbool8_t mask, vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vwmaccu(mask, vd, vs1, vs2, vl); +vuint16m2_t test_vwmaccu_vv_u16m2_m(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vwmaccu(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vwmaccu_vx_u16m2_m(vbool8_t mask, vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vwmaccu(mask, vd, rs1, vs2, vl); +vuint16m2_t test_vwmaccu_vx_u16m2_m(vbool8_t vm, vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vwmaccu(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vwmaccu_vv_u16m4_m(vbool4_t mask, vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vwmaccu(mask, vd, vs1, vs2, vl); +vuint16m4_t test_vwmaccu_vv_u16m4_m(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vwmaccu(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vwmaccu_vx_u16m4_m(vbool4_t mask, vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vwmaccu(mask, vd, rs1, vs2, vl); +vuint16m4_t test_vwmaccu_vx_u16m4_m(vbool4_t vm, vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vwmaccu(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vwmaccu_vv_u16m8_m(vbool2_t mask, vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vwmaccu(mask, vd, vs1, vs2, vl); +vuint16m8_t test_vwmaccu_vv_u16m8_m(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vwmaccu(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vwmaccu_vx_u16m8_m(vbool2_t mask, vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vwmaccu(mask, vd, rs1, vs2, vl); +vuint16m8_t test_vwmaccu_vx_u16m8_m(vbool2_t vm, vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vwmaccu(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vwmaccu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vwmaccu(mask, vd, vs1, vs2, vl); +vuint32mf2_t test_vwmaccu_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vwmaccu(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vwmaccu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vwmaccu(mask, vd, rs1, vs2, vl); +vuint32mf2_t test_vwmaccu_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vwmaccu(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vwmaccu_vv_u32m1_m(vbool32_t mask, vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vwmaccu(mask, vd, vs1, vs2, vl); +vuint32m1_t test_vwmaccu_vv_u32m1_m(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vwmaccu(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vwmaccu_vx_u32m1_m(vbool32_t mask, vuint32m1_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vwmaccu(mask, vd, rs1, vs2, vl); +vuint32m1_t test_vwmaccu_vx_u32m1_m(vbool32_t vm, vuint32m1_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vwmaccu(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vwmaccu_vv_u32m2_m(vbool16_t mask, vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vwmaccu(mask, vd, vs1, vs2, vl); +vuint32m2_t test_vwmaccu_vv_u32m2_m(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vwmaccu(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vwmaccu_vx_u32m2_m(vbool16_t mask, vuint32m2_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vwmaccu(mask, vd, rs1, vs2, vl); +vuint32m2_t test_vwmaccu_vx_u32m2_m(vbool16_t vm, vuint32m2_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vwmaccu(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vwmaccu_vv_u32m4_m(vbool8_t mask, vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vwmaccu(mask, vd, vs1, vs2, vl); +vuint32m4_t test_vwmaccu_vv_u32m4_m(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vwmaccu(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vwmaccu_vx_u32m4_m(vbool8_t mask, vuint32m4_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vwmaccu(mask, vd, rs1, vs2, vl); +vuint32m4_t test_vwmaccu_vx_u32m4_m(vbool8_t vm, vuint32m4_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vwmaccu(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vwmaccu_vv_u32m8_m(vbool4_t mask, vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vwmaccu(mask, vd, vs1, vs2, vl); +vuint32m8_t test_vwmaccu_vv_u32m8_m(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vwmaccu(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vwmaccu_vx_u32m8_m(vbool4_t mask, vuint32m8_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vwmaccu(mask, vd, rs1, vs2, vl); +vuint32m8_t test_vwmaccu_vx_u32m8_m(vbool4_t vm, vuint32m8_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vwmaccu(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vwmaccu_vv_u64m1_m(vbool64_t mask, vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vwmaccu(mask, vd, vs1, vs2, vl); +vuint64m1_t test_vwmaccu_vv_u64m1_m(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vwmaccu(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vwmaccu_vx_u64m1_m(vbool64_t mask, vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vwmaccu(mask, vd, rs1, vs2, vl); +vuint64m1_t test_vwmaccu_vx_u64m1_m(vbool64_t vm, vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vwmaccu(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vwmaccu_vv_u64m2_m(vbool32_t mask, vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vwmaccu(mask, vd, vs1, vs2, vl); +vuint64m2_t test_vwmaccu_vv_u64m2_m(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vwmaccu(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vwmaccu_vx_u64m2_m(vbool32_t mask, vuint64m2_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vwmaccu(mask, vd, rs1, vs2, vl); +vuint64m2_t test_vwmaccu_vx_u64m2_m(vbool32_t vm, vuint64m2_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vwmaccu(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vwmaccu_vv_u64m4_m(vbool16_t mask, vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vwmaccu(mask, vd, vs1, vs2, vl); +vuint64m4_t test_vwmaccu_vv_u64m4_m(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vwmaccu(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vwmaccu_vx_u64m4_m(vbool16_t mask, vuint64m4_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vwmaccu(mask, vd, rs1, vs2, vl); +vuint64m4_t test_vwmaccu_vx_u64m4_m(vbool16_t vm, vuint64m4_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vwmaccu(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vwmaccu_vv_u64m8_m(vbool8_t mask, vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vwmaccu(mask, vd, vs1, vs2, vl); +vuint64m8_t test_vwmaccu_vv_u64m8_m(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vwmaccu(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vwmaccu_vx_u64m8_m(vbool8_t mask, vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vwmaccu(mask, vd, rs1, vs2, vl); +vuint64m8_t test_vwmaccu_vx_u64m8_m(vbool8_t vm, vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vwmaccu(vm, vd, rs1, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vwmaccu\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vwmaccus.c b/auto-generated/gnu-overloaded-tests/vwmaccus.c index 9a735b3ca..18f9e3e54 100644 --- a/auto-generated/gnu-overloaded-tests/vwmaccus.c +++ b/auto-generated/gnu-overloaded-tests/vwmaccus.c @@ -66,64 +66,64 @@ vint64m8_t test_vwmaccus_vx_i64m8(vint64m8_t vd, uint32_t rs1, vint32m4_t vs2, s return __riscv_vwmaccus(vd, rs1, vs2, vl); } -vint16mf4_t test_vwmaccus_vx_i16mf4_m(vbool64_t mask, vint16mf4_t vd, uint8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vwmaccus(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vwmaccus_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vd, uint8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vwmaccus(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vwmaccus_vx_i16mf2_m(vbool32_t mask, vint16mf2_t vd, uint8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vwmaccus(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vwmaccus_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vd, uint8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vwmaccus(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vwmaccus_vx_i16m1_m(vbool16_t mask, vint16m1_t vd, uint8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vwmaccus(mask, vd, rs1, vs2, vl); +vint16m1_t test_vwmaccus_vx_i16m1_m(vbool16_t vm, vint16m1_t vd, uint8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vwmaccus(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vwmaccus_vx_i16m2_m(vbool8_t mask, vint16m2_t vd, uint8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vwmaccus(mask, vd, rs1, vs2, vl); +vint16m2_t test_vwmaccus_vx_i16m2_m(vbool8_t vm, vint16m2_t vd, uint8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vwmaccus(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vwmaccus_vx_i16m4_m(vbool4_t mask, vint16m4_t vd, uint8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vwmaccus(mask, vd, rs1, vs2, vl); +vint16m4_t test_vwmaccus_vx_i16m4_m(vbool4_t vm, vint16m4_t vd, uint8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vwmaccus(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vwmaccus_vx_i16m8_m(vbool2_t mask, vint16m8_t vd, uint8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vwmaccus(mask, vd, rs1, vs2, vl); +vint16m8_t test_vwmaccus_vx_i16m8_m(vbool2_t vm, vint16m8_t vd, uint8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vwmaccus(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vwmaccus_vx_i32mf2_m(vbool64_t mask, vint32mf2_t vd, uint16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vwmaccus(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vwmaccus_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vd, uint16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vwmaccus(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vwmaccus_vx_i32m1_m(vbool32_t mask, vint32m1_t vd, uint16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vwmaccus(mask, vd, rs1, vs2, vl); +vint32m1_t test_vwmaccus_vx_i32m1_m(vbool32_t vm, vint32m1_t vd, uint16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vwmaccus(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vwmaccus_vx_i32m2_m(vbool16_t mask, vint32m2_t vd, uint16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vwmaccus(mask, vd, rs1, vs2, vl); +vint32m2_t test_vwmaccus_vx_i32m2_m(vbool16_t vm, vint32m2_t vd, uint16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vwmaccus(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vwmaccus_vx_i32m4_m(vbool8_t mask, vint32m4_t vd, uint16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vwmaccus(mask, vd, rs1, vs2, vl); +vint32m4_t test_vwmaccus_vx_i32m4_m(vbool8_t vm, vint32m4_t vd, uint16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vwmaccus(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vwmaccus_vx_i32m8_m(vbool4_t mask, vint32m8_t vd, uint16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vwmaccus(mask, vd, rs1, vs2, vl); +vint32m8_t test_vwmaccus_vx_i32m8_m(vbool4_t vm, vint32m8_t vd, uint16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vwmaccus(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vwmaccus_vx_i64m1_m(vbool64_t mask, vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vwmaccus(mask, vd, rs1, vs2, vl); +vint64m1_t test_vwmaccus_vx_i64m1_m(vbool64_t vm, vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vwmaccus(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vwmaccus_vx_i64m2_m(vbool32_t mask, vint64m2_t vd, uint32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vwmaccus(mask, vd, rs1, vs2, vl); +vint64m2_t test_vwmaccus_vx_i64m2_m(vbool32_t vm, vint64m2_t vd, uint32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vwmaccus(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vwmaccus_vx_i64m4_m(vbool16_t mask, vint64m4_t vd, uint32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vwmaccus(mask, vd, rs1, vs2, vl); +vint64m4_t test_vwmaccus_vx_i64m4_m(vbool16_t vm, vint64m4_t vd, uint32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vwmaccus(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vwmaccus_vx_i64m8_m(vbool8_t mask, vint64m8_t vd, uint32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vwmaccus(mask, vd, rs1, vs2, vl); +vint64m8_t test_vwmaccus_vx_i64m8_m(vbool8_t vm, vint64m8_t vd, uint32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vwmaccus(vm, vd, rs1, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vwmaccus\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vwmul.c b/auto-generated/gnu-overloaded-tests/vwmul.c index 2fe4130ba..93118e7b4 100644 --- a/auto-generated/gnu-overloaded-tests/vwmul.c +++ b/auto-generated/gnu-overloaded-tests/vwmul.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint16mf4_t test_vwmul_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwmul(op1, op2, vl); +vint16mf4_t test_vwmul_vv_i16mf4(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwmul(vs2, vs1, vl); } -vint16mf4_t test_vwmul_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul(op1, op2, vl); +vint16mf4_t test_vwmul_vx_i16mf4(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul(vs2, rs1, vl); } -vint16mf2_t test_vwmul_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwmul(op1, op2, vl); +vint16mf2_t test_vwmul_vv_i16mf2(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwmul(vs2, vs1, vl); } -vint16mf2_t test_vwmul_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul(op1, op2, vl); +vint16mf2_t test_vwmul_vx_i16mf2(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul(vs2, rs1, vl); } -vint16m1_t test_vwmul_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwmul(op1, op2, vl); +vint16m1_t test_vwmul_vv_i16m1(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwmul(vs2, vs1, vl); } -vint16m1_t test_vwmul_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul(op1, op2, vl); +vint16m1_t test_vwmul_vx_i16m1(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul(vs2, rs1, vl); } -vint16m2_t test_vwmul_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwmul(op1, op2, vl); +vint16m2_t test_vwmul_vv_i16m2(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwmul(vs2, vs1, vl); } -vint16m2_t test_vwmul_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul(op1, op2, vl); +vint16m2_t test_vwmul_vx_i16m2(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul(vs2, rs1, vl); } -vint16m4_t test_vwmul_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwmul(op1, op2, vl); +vint16m4_t test_vwmul_vv_i16m4(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwmul(vs2, vs1, vl); } -vint16m4_t test_vwmul_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul(op1, op2, vl); +vint16m4_t test_vwmul_vx_i16m4(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul(vs2, rs1, vl); } -vint16m8_t test_vwmul_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwmul(op1, op2, vl); +vint16m8_t test_vwmul_vv_i16m8(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwmul(vs2, vs1, vl); } -vint16m8_t test_vwmul_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul(op1, op2, vl); +vint16m8_t test_vwmul_vx_i16m8(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul(vs2, rs1, vl); } -vint32mf2_t test_vwmul_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwmul(op1, op2, vl); +vint32mf2_t test_vwmul_vv_i32mf2(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwmul(vs2, vs1, vl); } -vint32mf2_t test_vwmul_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul(op1, op2, vl); +vint32mf2_t test_vwmul_vx_i32mf2(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul(vs2, rs1, vl); } -vint32m1_t test_vwmul_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwmul(op1, op2, vl); +vint32m1_t test_vwmul_vv_i32m1(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwmul(vs2, vs1, vl); } -vint32m1_t test_vwmul_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul(op1, op2, vl); +vint32m1_t test_vwmul_vx_i32m1(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul(vs2, rs1, vl); } -vint32m2_t test_vwmul_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwmul(op1, op2, vl); +vint32m2_t test_vwmul_vv_i32m2(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwmul(vs2, vs1, vl); } -vint32m2_t test_vwmul_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul(op1, op2, vl); +vint32m2_t test_vwmul_vx_i32m2(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul(vs2, rs1, vl); } -vint32m4_t test_vwmul_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwmul(op1, op2, vl); +vint32m4_t test_vwmul_vv_i32m4(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwmul(vs2, vs1, vl); } -vint32m4_t test_vwmul_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul(op1, op2, vl); +vint32m4_t test_vwmul_vx_i32m4(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul(vs2, rs1, vl); } -vint32m8_t test_vwmul_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwmul(op1, op2, vl); +vint32m8_t test_vwmul_vv_i32m8(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwmul(vs2, vs1, vl); } -vint32m8_t test_vwmul_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul(op1, op2, vl); +vint32m8_t test_vwmul_vx_i32m8(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul(vs2, rs1, vl); } -vint64m1_t test_vwmul_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwmul(op1, op2, vl); +vint64m1_t test_vwmul_vv_i64m1(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwmul(vs2, vs1, vl); } -vint64m1_t test_vwmul_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul(op1, op2, vl); +vint64m1_t test_vwmul_vx_i64m1(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul(vs2, rs1, vl); } -vint64m2_t test_vwmul_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwmul(op1, op2, vl); +vint64m2_t test_vwmul_vv_i64m2(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwmul(vs2, vs1, vl); } -vint64m2_t test_vwmul_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul(op1, op2, vl); +vint64m2_t test_vwmul_vx_i64m2(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul(vs2, rs1, vl); } -vint64m4_t test_vwmul_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwmul(op1, op2, vl); +vint64m4_t test_vwmul_vv_i64m4(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwmul(vs2, vs1, vl); } -vint64m4_t test_vwmul_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul(op1, op2, vl); +vint64m4_t test_vwmul_vx_i64m4(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul(vs2, rs1, vl); } -vint64m8_t test_vwmul_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwmul(op1, op2, vl); +vint64m8_t test_vwmul_vv_i64m8(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwmul(vs2, vs1, vl); } -vint64m8_t test_vwmul_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul(op1, op2, vl); +vint64m8_t test_vwmul_vx_i64m8(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul(vs2, rs1, vl); } -vint16mf4_t test_vwmul_vv_i16mf4_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwmul(mask, op1, op2, vl); +vint16mf4_t test_vwmul_vv_i16mf4_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwmul(vm, vs2, vs1, vl); } -vint16mf4_t test_vwmul_vx_i16mf4_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul(mask, op1, op2, vl); +vint16mf4_t test_vwmul_vx_i16mf4_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul(vm, vs2, rs1, vl); } -vint16mf2_t test_vwmul_vv_i16mf2_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwmul(mask, op1, op2, vl); +vint16mf2_t test_vwmul_vv_i16mf2_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwmul(vm, vs2, vs1, vl); } -vint16mf2_t test_vwmul_vx_i16mf2_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul(mask, op1, op2, vl); +vint16mf2_t test_vwmul_vx_i16mf2_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul(vm, vs2, rs1, vl); } -vint16m1_t test_vwmul_vv_i16m1_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwmul(mask, op1, op2, vl); +vint16m1_t test_vwmul_vv_i16m1_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwmul(vm, vs2, vs1, vl); } -vint16m1_t test_vwmul_vx_i16m1_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul(mask, op1, op2, vl); +vint16m1_t test_vwmul_vx_i16m1_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul(vm, vs2, rs1, vl); } -vint16m2_t test_vwmul_vv_i16m2_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwmul(mask, op1, op2, vl); +vint16m2_t test_vwmul_vv_i16m2_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwmul(vm, vs2, vs1, vl); } -vint16m2_t test_vwmul_vx_i16m2_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul(mask, op1, op2, vl); +vint16m2_t test_vwmul_vx_i16m2_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul(vm, vs2, rs1, vl); } -vint16m4_t test_vwmul_vv_i16m4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwmul(mask, op1, op2, vl); +vint16m4_t test_vwmul_vv_i16m4_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwmul(vm, vs2, vs1, vl); } -vint16m4_t test_vwmul_vx_i16m4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul(mask, op1, op2, vl); +vint16m4_t test_vwmul_vx_i16m4_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul(vm, vs2, rs1, vl); } -vint16m8_t test_vwmul_vv_i16m8_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwmul(mask, op1, op2, vl); +vint16m8_t test_vwmul_vv_i16m8_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwmul(vm, vs2, vs1, vl); } -vint16m8_t test_vwmul_vx_i16m8_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul(mask, op1, op2, vl); +vint16m8_t test_vwmul_vx_i16m8_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul(vm, vs2, rs1, vl); } -vint32mf2_t test_vwmul_vv_i32mf2_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwmul(mask, op1, op2, vl); +vint32mf2_t test_vwmul_vv_i32mf2_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwmul(vm, vs2, vs1, vl); } -vint32mf2_t test_vwmul_vx_i32mf2_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul(mask, op1, op2, vl); +vint32mf2_t test_vwmul_vx_i32mf2_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul(vm, vs2, rs1, vl); } -vint32m1_t test_vwmul_vv_i32m1_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwmul(mask, op1, op2, vl); +vint32m1_t test_vwmul_vv_i32m1_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwmul(vm, vs2, vs1, vl); } -vint32m1_t test_vwmul_vx_i32m1_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul(mask, op1, op2, vl); +vint32m1_t test_vwmul_vx_i32m1_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul(vm, vs2, rs1, vl); } -vint32m2_t test_vwmul_vv_i32m2_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwmul(mask, op1, op2, vl); +vint32m2_t test_vwmul_vv_i32m2_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwmul(vm, vs2, vs1, vl); } -vint32m2_t test_vwmul_vx_i32m2_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul(mask, op1, op2, vl); +vint32m2_t test_vwmul_vx_i32m2_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul(vm, vs2, rs1, vl); } -vint32m4_t test_vwmul_vv_i32m4_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwmul(mask, op1, op2, vl); +vint32m4_t test_vwmul_vv_i32m4_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwmul(vm, vs2, vs1, vl); } -vint32m4_t test_vwmul_vx_i32m4_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul(mask, op1, op2, vl); +vint32m4_t test_vwmul_vx_i32m4_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul(vm, vs2, rs1, vl); } -vint32m8_t test_vwmul_vv_i32m8_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwmul(mask, op1, op2, vl); +vint32m8_t test_vwmul_vv_i32m8_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwmul(vm, vs2, vs1, vl); } -vint32m8_t test_vwmul_vx_i32m8_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul(mask, op1, op2, vl); +vint32m8_t test_vwmul_vx_i32m8_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul(vm, vs2, rs1, vl); } -vint64m1_t test_vwmul_vv_i64m1_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwmul(mask, op1, op2, vl); +vint64m1_t test_vwmul_vv_i64m1_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwmul(vm, vs2, vs1, vl); } -vint64m1_t test_vwmul_vx_i64m1_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul(mask, op1, op2, vl); +vint64m1_t test_vwmul_vx_i64m1_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul(vm, vs2, rs1, vl); } -vint64m2_t test_vwmul_vv_i64m2_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwmul(mask, op1, op2, vl); +vint64m2_t test_vwmul_vv_i64m2_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwmul(vm, vs2, vs1, vl); } -vint64m2_t test_vwmul_vx_i64m2_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul(mask, op1, op2, vl); +vint64m2_t test_vwmul_vx_i64m2_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul(vm, vs2, rs1, vl); } -vint64m4_t test_vwmul_vv_i64m4_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwmul(mask, op1, op2, vl); +vint64m4_t test_vwmul_vv_i64m4_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwmul(vm, vs2, vs1, vl); } -vint64m4_t test_vwmul_vx_i64m4_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul(mask, op1, op2, vl); +vint64m4_t test_vwmul_vx_i64m4_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul(vm, vs2, rs1, vl); } -vint64m8_t test_vwmul_vv_i64m8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwmul(mask, op1, op2, vl); +vint64m8_t test_vwmul_vv_i64m8_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwmul(vm, vs2, vs1, vl); } -vint64m8_t test_vwmul_vx_i64m8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul(mask, op1, op2, vl); +vint64m8_t test_vwmul_vx_i64m8_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vwmul\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vwmulsu.c b/auto-generated/gnu-overloaded-tests/vwmulsu.c index 75434a155..c1fd8d0a9 100644 --- a/auto-generated/gnu-overloaded-tests/vwmulsu.c +++ b/auto-generated/gnu-overloaded-tests/vwmulsu.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint16mf4_t test_vwmulsu_vv_i16mf4(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwmulsu(op1, op2, vl); +vint16mf4_t test_vwmulsu_vv_i16mf4(vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwmulsu(vs2, vs1, vl); } -vint16mf4_t test_vwmulsu_vx_i16mf4(vint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu(op1, op2, vl); +vint16mf4_t test_vwmulsu_vx_i16mf4(vint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu(vs2, rs1, vl); } -vint16mf2_t test_vwmulsu_vv_i16mf2(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwmulsu(op1, op2, vl); +vint16mf2_t test_vwmulsu_vv_i16mf2(vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwmulsu(vs2, vs1, vl); } -vint16mf2_t test_vwmulsu_vx_i16mf2(vint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu(op1, op2, vl); +vint16mf2_t test_vwmulsu_vx_i16mf2(vint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu(vs2, rs1, vl); } -vint16m1_t test_vwmulsu_vv_i16m1(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwmulsu(op1, op2, vl); +vint16m1_t test_vwmulsu_vv_i16m1(vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwmulsu(vs2, vs1, vl); } -vint16m1_t test_vwmulsu_vx_i16m1(vint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu(op1, op2, vl); +vint16m1_t test_vwmulsu_vx_i16m1(vint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu(vs2, rs1, vl); } -vint16m2_t test_vwmulsu_vv_i16m2(vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwmulsu(op1, op2, vl); +vint16m2_t test_vwmulsu_vv_i16m2(vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwmulsu(vs2, vs1, vl); } -vint16m2_t test_vwmulsu_vx_i16m2(vint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu(op1, op2, vl); +vint16m2_t test_vwmulsu_vx_i16m2(vint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu(vs2, rs1, vl); } -vint16m4_t test_vwmulsu_vv_i16m4(vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwmulsu(op1, op2, vl); +vint16m4_t test_vwmulsu_vv_i16m4(vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwmulsu(vs2, vs1, vl); } -vint16m4_t test_vwmulsu_vx_i16m4(vint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu(op1, op2, vl); +vint16m4_t test_vwmulsu_vx_i16m4(vint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu(vs2, rs1, vl); } -vint16m8_t test_vwmulsu_vv_i16m8(vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwmulsu(op1, op2, vl); +vint16m8_t test_vwmulsu_vv_i16m8(vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwmulsu(vs2, vs1, vl); } -vint16m8_t test_vwmulsu_vx_i16m8(vint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu(op1, op2, vl); +vint16m8_t test_vwmulsu_vx_i16m8(vint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu(vs2, rs1, vl); } -vint32mf2_t test_vwmulsu_vv_i32mf2(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwmulsu(op1, op2, vl); +vint32mf2_t test_vwmulsu_vv_i32mf2(vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwmulsu(vs2, vs1, vl); } -vint32mf2_t test_vwmulsu_vx_i32mf2(vint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu(op1, op2, vl); +vint32mf2_t test_vwmulsu_vx_i32mf2(vint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu(vs2, rs1, vl); } -vint32m1_t test_vwmulsu_vv_i32m1(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwmulsu(op1, op2, vl); +vint32m1_t test_vwmulsu_vv_i32m1(vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwmulsu(vs2, vs1, vl); } -vint32m1_t test_vwmulsu_vx_i32m1(vint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu(op1, op2, vl); +vint32m1_t test_vwmulsu_vx_i32m1(vint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu(vs2, rs1, vl); } -vint32m2_t test_vwmulsu_vv_i32m2(vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwmulsu(op1, op2, vl); +vint32m2_t test_vwmulsu_vv_i32m2(vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwmulsu(vs2, vs1, vl); } -vint32m2_t test_vwmulsu_vx_i32m2(vint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu(op1, op2, vl); +vint32m2_t test_vwmulsu_vx_i32m2(vint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu(vs2, rs1, vl); } -vint32m4_t test_vwmulsu_vv_i32m4(vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwmulsu(op1, op2, vl); +vint32m4_t test_vwmulsu_vv_i32m4(vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwmulsu(vs2, vs1, vl); } -vint32m4_t test_vwmulsu_vx_i32m4(vint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu(op1, op2, vl); +vint32m4_t test_vwmulsu_vx_i32m4(vint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu(vs2, rs1, vl); } -vint32m8_t test_vwmulsu_vv_i32m8(vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwmulsu(op1, op2, vl); +vint32m8_t test_vwmulsu_vv_i32m8(vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwmulsu(vs2, vs1, vl); } -vint32m8_t test_vwmulsu_vx_i32m8(vint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu(op1, op2, vl); +vint32m8_t test_vwmulsu_vx_i32m8(vint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu(vs2, rs1, vl); } -vint64m1_t test_vwmulsu_vv_i64m1(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwmulsu(op1, op2, vl); +vint64m1_t test_vwmulsu_vv_i64m1(vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwmulsu(vs2, vs1, vl); } -vint64m1_t test_vwmulsu_vx_i64m1(vint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu(op1, op2, vl); +vint64m1_t test_vwmulsu_vx_i64m1(vint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu(vs2, rs1, vl); } -vint64m2_t test_vwmulsu_vv_i64m2(vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwmulsu(op1, op2, vl); +vint64m2_t test_vwmulsu_vv_i64m2(vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwmulsu(vs2, vs1, vl); } -vint64m2_t test_vwmulsu_vx_i64m2(vint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu(op1, op2, vl); +vint64m2_t test_vwmulsu_vx_i64m2(vint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu(vs2, rs1, vl); } -vint64m4_t test_vwmulsu_vv_i64m4(vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwmulsu(op1, op2, vl); +vint64m4_t test_vwmulsu_vv_i64m4(vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwmulsu(vs2, vs1, vl); } -vint64m4_t test_vwmulsu_vx_i64m4(vint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu(op1, op2, vl); +vint64m4_t test_vwmulsu_vx_i64m4(vint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu(vs2, rs1, vl); } -vint64m8_t test_vwmulsu_vv_i64m8(vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwmulsu(op1, op2, vl); +vint64m8_t test_vwmulsu_vv_i64m8(vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwmulsu(vs2, vs1, vl); } -vint64m8_t test_vwmulsu_vx_i64m8(vint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu(op1, op2, vl); +vint64m8_t test_vwmulsu_vx_i64m8(vint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu(vs2, rs1, vl); } -vint16mf4_t test_vwmulsu_vv_i16mf4_m(vbool64_t mask, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwmulsu(mask, op1, op2, vl); +vint16mf4_t test_vwmulsu_vv_i16mf4_m(vbool64_t vm, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwmulsu(vm, vs2, vs1, vl); } -vint16mf4_t test_vwmulsu_vx_i16mf4_m(vbool64_t mask, vint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu(mask, op1, op2, vl); +vint16mf4_t test_vwmulsu_vx_i16mf4_m(vbool64_t vm, vint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu(vm, vs2, rs1, vl); } -vint16mf2_t test_vwmulsu_vv_i16mf2_m(vbool32_t mask, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwmulsu(mask, op1, op2, vl); +vint16mf2_t test_vwmulsu_vv_i16mf2_m(vbool32_t vm, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwmulsu(vm, vs2, vs1, vl); } -vint16mf2_t test_vwmulsu_vx_i16mf2_m(vbool32_t mask, vint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu(mask, op1, op2, vl); +vint16mf2_t test_vwmulsu_vx_i16mf2_m(vbool32_t vm, vint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu(vm, vs2, rs1, vl); } -vint16m1_t test_vwmulsu_vv_i16m1_m(vbool16_t mask, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwmulsu(mask, op1, op2, vl); +vint16m1_t test_vwmulsu_vv_i16m1_m(vbool16_t vm, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwmulsu(vm, vs2, vs1, vl); } -vint16m1_t test_vwmulsu_vx_i16m1_m(vbool16_t mask, vint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu(mask, op1, op2, vl); +vint16m1_t test_vwmulsu_vx_i16m1_m(vbool16_t vm, vint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu(vm, vs2, rs1, vl); } -vint16m2_t test_vwmulsu_vv_i16m2_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwmulsu(mask, op1, op2, vl); +vint16m2_t test_vwmulsu_vv_i16m2_m(vbool8_t vm, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwmulsu(vm, vs2, vs1, vl); } -vint16m2_t test_vwmulsu_vx_i16m2_m(vbool8_t mask, vint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu(mask, op1, op2, vl); +vint16m2_t test_vwmulsu_vx_i16m2_m(vbool8_t vm, vint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu(vm, vs2, rs1, vl); } -vint16m4_t test_vwmulsu_vv_i16m4_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwmulsu(mask, op1, op2, vl); +vint16m4_t test_vwmulsu_vv_i16m4_m(vbool4_t vm, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwmulsu(vm, vs2, vs1, vl); } -vint16m4_t test_vwmulsu_vx_i16m4_m(vbool4_t mask, vint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu(mask, op1, op2, vl); +vint16m4_t test_vwmulsu_vx_i16m4_m(vbool4_t vm, vint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu(vm, vs2, rs1, vl); } -vint16m8_t test_vwmulsu_vv_i16m8_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwmulsu(mask, op1, op2, vl); +vint16m8_t test_vwmulsu_vv_i16m8_m(vbool2_t vm, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwmulsu(vm, vs2, vs1, vl); } -vint16m8_t test_vwmulsu_vx_i16m8_m(vbool2_t mask, vint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu(mask, op1, op2, vl); +vint16m8_t test_vwmulsu_vx_i16m8_m(vbool2_t vm, vint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu(vm, vs2, rs1, vl); } -vint32mf2_t test_vwmulsu_vv_i32mf2_m(vbool64_t mask, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwmulsu(mask, op1, op2, vl); +vint32mf2_t test_vwmulsu_vv_i32mf2_m(vbool64_t vm, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwmulsu(vm, vs2, vs1, vl); } -vint32mf2_t test_vwmulsu_vx_i32mf2_m(vbool64_t mask, vint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu(mask, op1, op2, vl); +vint32mf2_t test_vwmulsu_vx_i32mf2_m(vbool64_t vm, vint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu(vm, vs2, rs1, vl); } -vint32m1_t test_vwmulsu_vv_i32m1_m(vbool32_t mask, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwmulsu(mask, op1, op2, vl); +vint32m1_t test_vwmulsu_vv_i32m1_m(vbool32_t vm, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwmulsu(vm, vs2, vs1, vl); } -vint32m1_t test_vwmulsu_vx_i32m1_m(vbool32_t mask, vint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu(mask, op1, op2, vl); +vint32m1_t test_vwmulsu_vx_i32m1_m(vbool32_t vm, vint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu(vm, vs2, rs1, vl); } -vint32m2_t test_vwmulsu_vv_i32m2_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwmulsu(mask, op1, op2, vl); +vint32m2_t test_vwmulsu_vv_i32m2_m(vbool16_t vm, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwmulsu(vm, vs2, vs1, vl); } -vint32m2_t test_vwmulsu_vx_i32m2_m(vbool16_t mask, vint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu(mask, op1, op2, vl); +vint32m2_t test_vwmulsu_vx_i32m2_m(vbool16_t vm, vint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu(vm, vs2, rs1, vl); } -vint32m4_t test_vwmulsu_vv_i32m4_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwmulsu(mask, op1, op2, vl); +vint32m4_t test_vwmulsu_vv_i32m4_m(vbool8_t vm, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwmulsu(vm, vs2, vs1, vl); } -vint32m4_t test_vwmulsu_vx_i32m4_m(vbool8_t mask, vint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu(mask, op1, op2, vl); +vint32m4_t test_vwmulsu_vx_i32m4_m(vbool8_t vm, vint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu(vm, vs2, rs1, vl); } -vint32m8_t test_vwmulsu_vv_i32m8_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwmulsu(mask, op1, op2, vl); +vint32m8_t test_vwmulsu_vv_i32m8_m(vbool4_t vm, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwmulsu(vm, vs2, vs1, vl); } -vint32m8_t test_vwmulsu_vx_i32m8_m(vbool4_t mask, vint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu(mask, op1, op2, vl); +vint32m8_t test_vwmulsu_vx_i32m8_m(vbool4_t vm, vint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu(vm, vs2, rs1, vl); } -vint64m1_t test_vwmulsu_vv_i64m1_m(vbool64_t mask, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwmulsu(mask, op1, op2, vl); +vint64m1_t test_vwmulsu_vv_i64m1_m(vbool64_t vm, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwmulsu(vm, vs2, vs1, vl); } -vint64m1_t test_vwmulsu_vx_i64m1_m(vbool64_t mask, vint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu(mask, op1, op2, vl); +vint64m1_t test_vwmulsu_vx_i64m1_m(vbool64_t vm, vint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu(vm, vs2, rs1, vl); } -vint64m2_t test_vwmulsu_vv_i64m2_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwmulsu(mask, op1, op2, vl); +vint64m2_t test_vwmulsu_vv_i64m2_m(vbool32_t vm, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwmulsu(vm, vs2, vs1, vl); } -vint64m2_t test_vwmulsu_vx_i64m2_m(vbool32_t mask, vint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu(mask, op1, op2, vl); +vint64m2_t test_vwmulsu_vx_i64m2_m(vbool32_t vm, vint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu(vm, vs2, rs1, vl); } -vint64m4_t test_vwmulsu_vv_i64m4_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwmulsu(mask, op1, op2, vl); +vint64m4_t test_vwmulsu_vv_i64m4_m(vbool16_t vm, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwmulsu(vm, vs2, vs1, vl); } -vint64m4_t test_vwmulsu_vx_i64m4_m(vbool16_t mask, vint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu(mask, op1, op2, vl); +vint64m4_t test_vwmulsu_vx_i64m4_m(vbool16_t vm, vint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu(vm, vs2, rs1, vl); } -vint64m8_t test_vwmulsu_vv_i64m8_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwmulsu(mask, op1, op2, vl); +vint64m8_t test_vwmulsu_vv_i64m8_m(vbool8_t vm, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwmulsu(vm, vs2, vs1, vl); } -vint64m8_t test_vwmulsu_vx_i64m8_m(vbool8_t mask, vint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu(mask, op1, op2, vl); +vint64m8_t test_vwmulsu_vx_i64m8_m(vbool8_t vm, vint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vwmulsu\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vwmulu.c b/auto-generated/gnu-overloaded-tests/vwmulu.c index 3c2e8769e..a77dea88c 100644 --- a/auto-generated/gnu-overloaded-tests/vwmulu.c +++ b/auto-generated/gnu-overloaded-tests/vwmulu.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint16mf4_t test_vwmulu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwmulu(op1, op2, vl); +vuint16mf4_t test_vwmulu_vv_u16mf4(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwmulu(vs2, vs1, vl); } -vuint16mf4_t test_vwmulu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu(op1, op2, vl); +vuint16mf4_t test_vwmulu_vx_u16mf4(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu(vs2, rs1, vl); } -vuint16mf2_t test_vwmulu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwmulu(op1, op2, vl); +vuint16mf2_t test_vwmulu_vv_u16mf2(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwmulu(vs2, vs1, vl); } -vuint16mf2_t test_vwmulu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu(op1, op2, vl); +vuint16mf2_t test_vwmulu_vx_u16mf2(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu(vs2, rs1, vl); } -vuint16m1_t test_vwmulu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwmulu(op1, op2, vl); +vuint16m1_t test_vwmulu_vv_u16m1(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwmulu(vs2, vs1, vl); } -vuint16m1_t test_vwmulu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu(op1, op2, vl); +vuint16m1_t test_vwmulu_vx_u16m1(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu(vs2, rs1, vl); } -vuint16m2_t test_vwmulu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwmulu(op1, op2, vl); +vuint16m2_t test_vwmulu_vv_u16m2(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwmulu(vs2, vs1, vl); } -vuint16m2_t test_vwmulu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu(op1, op2, vl); +vuint16m2_t test_vwmulu_vx_u16m2(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu(vs2, rs1, vl); } -vuint16m4_t test_vwmulu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwmulu(op1, op2, vl); +vuint16m4_t test_vwmulu_vv_u16m4(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwmulu(vs2, vs1, vl); } -vuint16m4_t test_vwmulu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu(op1, op2, vl); +vuint16m4_t test_vwmulu_vx_u16m4(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu(vs2, rs1, vl); } -vuint16m8_t test_vwmulu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwmulu(op1, op2, vl); +vuint16m8_t test_vwmulu_vv_u16m8(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwmulu(vs2, vs1, vl); } -vuint16m8_t test_vwmulu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu(op1, op2, vl); +vuint16m8_t test_vwmulu_vx_u16m8(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu(vs2, rs1, vl); } -vuint32mf2_t test_vwmulu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwmulu(op1, op2, vl); +vuint32mf2_t test_vwmulu_vv_u32mf2(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwmulu(vs2, vs1, vl); } -vuint32mf2_t test_vwmulu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu(op1, op2, vl); +vuint32mf2_t test_vwmulu_vx_u32mf2(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu(vs2, rs1, vl); } -vuint32m1_t test_vwmulu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwmulu(op1, op2, vl); +vuint32m1_t test_vwmulu_vv_u32m1(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwmulu(vs2, vs1, vl); } -vuint32m1_t test_vwmulu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu(op1, op2, vl); +vuint32m1_t test_vwmulu_vx_u32m1(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu(vs2, rs1, vl); } -vuint32m2_t test_vwmulu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwmulu(op1, op2, vl); +vuint32m2_t test_vwmulu_vv_u32m2(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwmulu(vs2, vs1, vl); } -vuint32m2_t test_vwmulu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu(op1, op2, vl); +vuint32m2_t test_vwmulu_vx_u32m2(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu(vs2, rs1, vl); } -vuint32m4_t test_vwmulu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwmulu(op1, op2, vl); +vuint32m4_t test_vwmulu_vv_u32m4(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwmulu(vs2, vs1, vl); } -vuint32m4_t test_vwmulu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu(op1, op2, vl); +vuint32m4_t test_vwmulu_vx_u32m4(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu(vs2, rs1, vl); } -vuint32m8_t test_vwmulu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwmulu(op1, op2, vl); +vuint32m8_t test_vwmulu_vv_u32m8(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwmulu(vs2, vs1, vl); } -vuint32m8_t test_vwmulu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu(op1, op2, vl); +vuint32m8_t test_vwmulu_vx_u32m8(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu(vs2, rs1, vl); } -vuint64m1_t test_vwmulu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwmulu(op1, op2, vl); +vuint64m1_t test_vwmulu_vv_u64m1(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwmulu(vs2, vs1, vl); } -vuint64m1_t test_vwmulu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu(op1, op2, vl); +vuint64m1_t test_vwmulu_vx_u64m1(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu(vs2, rs1, vl); } -vuint64m2_t test_vwmulu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwmulu(op1, op2, vl); +vuint64m2_t test_vwmulu_vv_u64m2(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwmulu(vs2, vs1, vl); } -vuint64m2_t test_vwmulu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu(op1, op2, vl); +vuint64m2_t test_vwmulu_vx_u64m2(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu(vs2, rs1, vl); } -vuint64m4_t test_vwmulu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwmulu(op1, op2, vl); +vuint64m4_t test_vwmulu_vv_u64m4(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwmulu(vs2, vs1, vl); } -vuint64m4_t test_vwmulu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu(op1, op2, vl); +vuint64m4_t test_vwmulu_vx_u64m4(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu(vs2, rs1, vl); } -vuint64m8_t test_vwmulu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwmulu(op1, op2, vl); +vuint64m8_t test_vwmulu_vv_u64m8(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwmulu(vs2, vs1, vl); } -vuint64m8_t test_vwmulu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu(op1, op2, vl); +vuint64m8_t test_vwmulu_vx_u64m8(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu(vs2, rs1, vl); } -vuint16mf4_t test_vwmulu_vv_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwmulu(mask, op1, op2, vl); +vuint16mf4_t test_vwmulu_vv_u16mf4_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwmulu(vm, vs2, vs1, vl); } -vuint16mf4_t test_vwmulu_vx_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu(mask, op1, op2, vl); +vuint16mf4_t test_vwmulu_vx_u16mf4_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu(vm, vs2, rs1, vl); } -vuint16mf2_t test_vwmulu_vv_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwmulu(mask, op1, op2, vl); +vuint16mf2_t test_vwmulu_vv_u16mf2_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwmulu(vm, vs2, vs1, vl); } -vuint16mf2_t test_vwmulu_vx_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu(mask, op1, op2, vl); +vuint16mf2_t test_vwmulu_vx_u16mf2_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu(vm, vs2, rs1, vl); } -vuint16m1_t test_vwmulu_vv_u16m1_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwmulu(mask, op1, op2, vl); +vuint16m1_t test_vwmulu_vv_u16m1_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwmulu(vm, vs2, vs1, vl); } -vuint16m1_t test_vwmulu_vx_u16m1_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu(mask, op1, op2, vl); +vuint16m1_t test_vwmulu_vx_u16m1_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu(vm, vs2, rs1, vl); } -vuint16m2_t test_vwmulu_vv_u16m2_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwmulu(mask, op1, op2, vl); +vuint16m2_t test_vwmulu_vv_u16m2_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwmulu(vm, vs2, vs1, vl); } -vuint16m2_t test_vwmulu_vx_u16m2_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu(mask, op1, op2, vl); +vuint16m2_t test_vwmulu_vx_u16m2_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu(vm, vs2, rs1, vl); } -vuint16m4_t test_vwmulu_vv_u16m4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwmulu(mask, op1, op2, vl); +vuint16m4_t test_vwmulu_vv_u16m4_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwmulu(vm, vs2, vs1, vl); } -vuint16m4_t test_vwmulu_vx_u16m4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu(mask, op1, op2, vl); +vuint16m4_t test_vwmulu_vx_u16m4_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu(vm, vs2, rs1, vl); } -vuint16m8_t test_vwmulu_vv_u16m8_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwmulu(mask, op1, op2, vl); +vuint16m8_t test_vwmulu_vv_u16m8_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwmulu(vm, vs2, vs1, vl); } -vuint16m8_t test_vwmulu_vx_u16m8_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu(mask, op1, op2, vl); +vuint16m8_t test_vwmulu_vx_u16m8_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu(vm, vs2, rs1, vl); } -vuint32mf2_t test_vwmulu_vv_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwmulu(mask, op1, op2, vl); +vuint32mf2_t test_vwmulu_vv_u32mf2_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwmulu(vm, vs2, vs1, vl); } -vuint32mf2_t test_vwmulu_vx_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu(mask, op1, op2, vl); +vuint32mf2_t test_vwmulu_vx_u32mf2_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu(vm, vs2, rs1, vl); } -vuint32m1_t test_vwmulu_vv_u32m1_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwmulu(mask, op1, op2, vl); +vuint32m1_t test_vwmulu_vv_u32m1_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwmulu(vm, vs2, vs1, vl); } -vuint32m1_t test_vwmulu_vx_u32m1_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu(mask, op1, op2, vl); +vuint32m1_t test_vwmulu_vx_u32m1_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu(vm, vs2, rs1, vl); } -vuint32m2_t test_vwmulu_vv_u32m2_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwmulu(mask, op1, op2, vl); +vuint32m2_t test_vwmulu_vv_u32m2_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwmulu(vm, vs2, vs1, vl); } -vuint32m2_t test_vwmulu_vx_u32m2_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu(mask, op1, op2, vl); +vuint32m2_t test_vwmulu_vx_u32m2_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu(vm, vs2, rs1, vl); } -vuint32m4_t test_vwmulu_vv_u32m4_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwmulu(mask, op1, op2, vl); +vuint32m4_t test_vwmulu_vv_u32m4_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwmulu(vm, vs2, vs1, vl); } -vuint32m4_t test_vwmulu_vx_u32m4_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu(mask, op1, op2, vl); +vuint32m4_t test_vwmulu_vx_u32m4_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu(vm, vs2, rs1, vl); } -vuint32m8_t test_vwmulu_vv_u32m8_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwmulu(mask, op1, op2, vl); +vuint32m8_t test_vwmulu_vv_u32m8_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwmulu(vm, vs2, vs1, vl); } -vuint32m8_t test_vwmulu_vx_u32m8_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu(mask, op1, op2, vl); +vuint32m8_t test_vwmulu_vx_u32m8_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu(vm, vs2, rs1, vl); } -vuint64m1_t test_vwmulu_vv_u64m1_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwmulu(mask, op1, op2, vl); +vuint64m1_t test_vwmulu_vv_u64m1_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwmulu(vm, vs2, vs1, vl); } -vuint64m1_t test_vwmulu_vx_u64m1_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu(mask, op1, op2, vl); +vuint64m1_t test_vwmulu_vx_u64m1_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu(vm, vs2, rs1, vl); } -vuint64m2_t test_vwmulu_vv_u64m2_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwmulu(mask, op1, op2, vl); +vuint64m2_t test_vwmulu_vv_u64m2_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwmulu(vm, vs2, vs1, vl); } -vuint64m2_t test_vwmulu_vx_u64m2_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu(mask, op1, op2, vl); +vuint64m2_t test_vwmulu_vx_u64m2_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu(vm, vs2, rs1, vl); } -vuint64m4_t test_vwmulu_vv_u64m4_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwmulu(mask, op1, op2, vl); +vuint64m4_t test_vwmulu_vv_u64m4_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwmulu(vm, vs2, vs1, vl); } -vuint64m4_t test_vwmulu_vx_u64m4_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu(mask, op1, op2, vl); +vuint64m4_t test_vwmulu_vx_u64m4_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu(vm, vs2, rs1, vl); } -vuint64m8_t test_vwmulu_vv_u64m8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwmulu(mask, op1, op2, vl); +vuint64m8_t test_vwmulu_vv_u64m8_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwmulu(vm, vs2, vs1, vl); } -vuint64m8_t test_vwmulu_vx_u64m8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu(mask, op1, op2, vl); +vuint64m8_t test_vwmulu_vx_u64m8_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vwmulu\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vwredsum.c b/auto-generated/gnu-overloaded-tests/vwredsum.c index 5ca3a0e68..f87c7132c 100644 --- a/auto-generated/gnu-overloaded-tests/vwredsum.c +++ b/auto-generated/gnu-overloaded-tests/vwredsum.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint16m1_t test_vwredsum_vs_i8mf8_i16m1(vint8mf8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum(vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf8_i16m1(vint8mf8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum(vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8mf4_i16m1(vint8mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum(vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf4_i16m1(vint8mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum(vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8mf2_i16m1(vint8mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum(vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf2_i16m1(vint8mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum(vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8m1_i16m1(vint8m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum(vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m1_i16m1(vint8m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum(vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8m2_i16m1(vint8m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum(vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m2_i16m1(vint8m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum(vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8m4_i16m1(vint8m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum(vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m4_i16m1(vint8m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum(vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8m8_i16m1(vint8m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum(vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m8_i16m1(vint8m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum(vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16mf4_i32m1(vint16mf4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum(vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16mf4_i32m1(vint16mf4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum(vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16mf2_i32m1(vint16mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum(vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16mf2_i32m1(vint16mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum(vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16m1_i32m1(vint16m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum(vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m1_i32m1(vint16m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum(vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16m2_i32m1(vint16m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum(vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m2_i32m1(vint16m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum(vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16m4_i32m1(vint16m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum(vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m4_i32m1(vint16m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum(vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16m8_i32m1(vint16m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum(vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m8_i32m1(vint16m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum(vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32mf2_i64m1(vint32mf2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum(vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32mf2_i64m1(vint32mf2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vwredsum(vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32m1_i64m1(vint32m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum(vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m1_i64m1(vint32m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vwredsum(vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32m2_i64m1(vint32m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum(vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m2_i64m1(vint32m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vwredsum(vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32m4_i64m1(vint32m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum(vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m4_i64m1(vint32m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vwredsum(vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32m8_i64m1(vint32m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum(vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m8_i64m1(vint32m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vwredsum(vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8mf8_i16m1_m(vbool64_t mask, vint8mf8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum(mask, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf8_i16m1_m(vbool64_t vm, vint8mf8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum(vm, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8mf4_i16m1_m(vbool32_t mask, vint8mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum(mask, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf4_i16m1_m(vbool32_t vm, vint8mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum(vm, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8mf2_i16m1_m(vbool16_t mask, vint8mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum(mask, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf2_i16m1_m(vbool16_t vm, vint8mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum(vm, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8m1_i16m1_m(vbool8_t mask, vint8m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum(mask, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m1_i16m1_m(vbool8_t vm, vint8m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum(vm, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8m2_i16m1_m(vbool4_t mask, vint8m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum(mask, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m2_i16m1_m(vbool4_t vm, vint8m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum(vm, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8m4_i16m1_m(vbool2_t mask, vint8m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum(mask, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m4_i16m1_m(vbool2_t vm, vint8m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum(vm, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8m8_i16m1_m(vbool1_t mask, vint8m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum(mask, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m8_i16m1_m(vbool1_t vm, vint8m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum(vm, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16mf4_i32m1_m(vbool64_t mask, vint16mf4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum(mask, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16mf4_i32m1_m(vbool64_t vm, vint16mf4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum(vm, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16mf2_i32m1_m(vbool32_t mask, vint16mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum(mask, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16mf2_i32m1_m(vbool32_t vm, vint16mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum(vm, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16m1_i32m1_m(vbool16_t mask, vint16m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum(mask, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m1_i32m1_m(vbool16_t vm, vint16m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum(vm, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16m2_i32m1_m(vbool8_t mask, vint16m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum(mask, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m2_i32m1_m(vbool8_t vm, vint16m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum(vm, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16m4_i32m1_m(vbool4_t mask, vint16m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum(mask, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m4_i32m1_m(vbool4_t vm, vint16m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum(vm, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16m8_i32m1_m(vbool2_t mask, vint16m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum(mask, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m8_i32m1_m(vbool2_t vm, vint16m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum(vm, vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32mf2_i64m1_m(vbool64_t mask, vint32mf2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum(mask, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32mf2_i64m1_m(vbool64_t vm, vint32mf2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vwredsum(vm, vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32m1_i64m1_m(vbool32_t mask, vint32m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum(mask, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m1_i64m1_m(vbool32_t vm, vint32m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vwredsum(vm, vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32m2_i64m1_m(vbool16_t mask, vint32m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum(mask, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m2_i64m1_m(vbool16_t vm, vint32m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vwredsum(vm, vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32m4_i64m1_m(vbool8_t mask, vint32m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum(mask, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m4_i64m1_m(vbool8_t vm, vint32m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vwredsum(vm, vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32m8_i64m1_m(vbool4_t mask, vint32m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum(mask, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m8_i64m1_m(vbool4_t vm, vint32m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vwredsum(vm, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vwredsum\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vwredsumu.c b/auto-generated/gnu-overloaded-tests/vwredsumu.c index 654ba1165..f1365c7b3 100644 --- a/auto-generated/gnu-overloaded-tests/vwredsumu.c +++ b/auto-generated/gnu-overloaded-tests/vwredsumu.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1(vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu(vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1(vuint8mf8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu(vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1(vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu(vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1(vuint8mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu(vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1(vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu(vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1(vuint8mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu(vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8m1_u16m1(vuint8m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu(vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m1_u16m1(vuint8m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu(vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8m2_u16m1(vuint8m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu(vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m2_u16m1(vuint8m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu(vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8m4_u16m1(vuint8m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu(vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m4_u16m1(vuint8m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu(vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8m8_u16m1(vuint8m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu(vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m8_u16m1(vuint8m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu(vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1(vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu(vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1(vuint16mf4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu(vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1(vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu(vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1(vuint16mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu(vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16m1_u32m1(vuint16m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu(vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m1_u32m1(vuint16m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu(vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16m2_u32m1(vuint16m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu(vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m2_u32m1(vuint16m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu(vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16m4_u32m1(vuint16m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu(vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m4_u32m1(vuint16m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu(vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16m8_u32m1(vuint16m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu(vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m8_u32m1(vuint16m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu(vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1(vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu(vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1(vuint32mf2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vwredsumu(vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32m1_u64m1(vuint32m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu(vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m1_u64m1(vuint32m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vwredsumu(vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32m2_u64m1(vuint32m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu(vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m2_u64m1(vuint32m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vwredsumu(vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32m4_u64m1(vuint32m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu(vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m4_u64m1(vuint32m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vwredsumu(vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32m8_u64m1(vuint32m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu(vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m8_u64m1(vuint32m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vwredsumu(vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_m(vbool64_t mask, vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu(mask, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_m(vbool64_t vm, vuint8mf8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu(vm, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_m(vbool32_t mask, vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu(mask, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_m(vbool32_t vm, vuint8mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu(vm, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_m(vbool16_t mask, vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu(mask, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_m(vbool16_t vm, vuint8mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu(vm, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_m(vbool8_t mask, vuint8m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu(mask, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_m(vbool8_t vm, vuint8m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu(vm, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_m(vbool4_t mask, vuint8m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu(mask, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_m(vbool4_t vm, vuint8m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu(vm, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_m(vbool2_t mask, vuint8m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu(mask, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_m(vbool2_t vm, vuint8m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu(vm, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_m(vbool1_t mask, vuint8m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu(mask, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_m(vbool1_t vm, vuint8m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu(vm, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_m(vbool64_t mask, vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu(mask, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_m(vbool64_t vm, vuint16mf4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu(vm, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_m(vbool32_t mask, vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu(mask, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_m(vbool32_t vm, vuint16mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu(vm, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_m(vbool16_t mask, vuint16m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu(mask, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_m(vbool16_t vm, vuint16m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu(vm, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_m(vbool8_t mask, vuint16m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu(mask, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_m(vbool8_t vm, vuint16m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu(vm, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_m(vbool4_t mask, vuint16m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu(mask, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_m(vbool4_t vm, vuint16m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu(vm, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_m(vbool2_t mask, vuint16m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu(mask, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_m(vbool2_t vm, vuint16m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu(vm, vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_m(vbool64_t mask, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu(mask, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_m(vbool64_t vm, vuint32mf2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vwredsumu(vm, vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_m(vbool32_t mask, vuint32m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu(mask, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_m(vbool32_t vm, vuint32m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vwredsumu(vm, vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_m(vbool16_t mask, vuint32m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu(mask, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_m(vbool16_t vm, vuint32m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vwredsumu(vm, vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_m(vbool8_t mask, vuint32m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu(mask, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_m(vbool8_t vm, vuint32m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vwredsumu(vm, vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_m(vbool4_t mask, vuint32m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu(mask, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_m(vbool4_t vm, vuint32m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vwredsumu(vm, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vwredsumu\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vwsub.c b/auto-generated/gnu-overloaded-tests/vwsub.c index 644737a41..d20780bc0 100644 --- a/auto-generated/gnu-overloaded-tests/vwsub.c +++ b/auto-generated/gnu-overloaded-tests/vwsub.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint16mf4_t test_vwsub_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwsub_vv(op1, op2, vl); +vint16mf4_t test_vwsub_vv_i16mf4(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwsub_vv(vs2, vs1, vl); } -vint16mf4_t test_vwsub_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx(op1, op2, vl); +vint16mf4_t test_vwsub_vx_i16mf4(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx(vs2, rs1, vl); } -vint16mf4_t test_vwsub_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwsub_wv(op1, op2, vl); +vint16mf4_t test_vwsub_wv_i16mf4(vint16mf4_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwsub_wv(vs2, vs1, vl); } -vint16mf4_t test_vwsub_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx(op1, op2, vl); +vint16mf4_t test_vwsub_wx_i16mf4(vint16mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx(vs2, rs1, vl); } -vint16mf2_t test_vwsub_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwsub_vv(op1, op2, vl); +vint16mf2_t test_vwsub_vv_i16mf2(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwsub_vv(vs2, vs1, vl); } -vint16mf2_t test_vwsub_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx(op1, op2, vl); +vint16mf2_t test_vwsub_vx_i16mf2(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx(vs2, rs1, vl); } -vint16mf2_t test_vwsub_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwsub_wv(op1, op2, vl); +vint16mf2_t test_vwsub_wv_i16mf2(vint16mf2_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwsub_wv(vs2, vs1, vl); } -vint16mf2_t test_vwsub_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx(op1, op2, vl); +vint16mf2_t test_vwsub_wx_i16mf2(vint16mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx(vs2, rs1, vl); } -vint16m1_t test_vwsub_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwsub_vv(op1, op2, vl); +vint16m1_t test_vwsub_vv_i16m1(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwsub_vv(vs2, vs1, vl); } -vint16m1_t test_vwsub_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx(op1, op2, vl); +vint16m1_t test_vwsub_vx_i16m1(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx(vs2, rs1, vl); } -vint16m1_t test_vwsub_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwsub_wv(op1, op2, vl); +vint16m1_t test_vwsub_wv_i16m1(vint16m1_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwsub_wv(vs2, vs1, vl); } -vint16m1_t test_vwsub_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx(op1, op2, vl); +vint16m1_t test_vwsub_wx_i16m1(vint16m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx(vs2, rs1, vl); } -vint16m2_t test_vwsub_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwsub_vv(op1, op2, vl); +vint16m2_t test_vwsub_vv_i16m2(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwsub_vv(vs2, vs1, vl); } -vint16m2_t test_vwsub_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx(op1, op2, vl); +vint16m2_t test_vwsub_vx_i16m2(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx(vs2, rs1, vl); } -vint16m2_t test_vwsub_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwsub_wv(op1, op2, vl); +vint16m2_t test_vwsub_wv_i16m2(vint16m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwsub_wv(vs2, vs1, vl); } -vint16m2_t test_vwsub_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx(op1, op2, vl); +vint16m2_t test_vwsub_wx_i16m2(vint16m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx(vs2, rs1, vl); } -vint16m4_t test_vwsub_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwsub_vv(op1, op2, vl); +vint16m4_t test_vwsub_vv_i16m4(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwsub_vv(vs2, vs1, vl); } -vint16m4_t test_vwsub_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx(op1, op2, vl); +vint16m4_t test_vwsub_vx_i16m4(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx(vs2, rs1, vl); } -vint16m4_t test_vwsub_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwsub_wv(op1, op2, vl); +vint16m4_t test_vwsub_wv_i16m4(vint16m4_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwsub_wv(vs2, vs1, vl); } -vint16m4_t test_vwsub_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx(op1, op2, vl); +vint16m4_t test_vwsub_wx_i16m4(vint16m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx(vs2, rs1, vl); } -vint16m8_t test_vwsub_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwsub_vv(op1, op2, vl); +vint16m8_t test_vwsub_vv_i16m8(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwsub_vv(vs2, vs1, vl); } -vint16m8_t test_vwsub_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx(op1, op2, vl); +vint16m8_t test_vwsub_vx_i16m8(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx(vs2, rs1, vl); } -vint16m8_t test_vwsub_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwsub_wv(op1, op2, vl); +vint16m8_t test_vwsub_wv_i16m8(vint16m8_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwsub_wv(vs2, vs1, vl); } -vint16m8_t test_vwsub_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx(op1, op2, vl); +vint16m8_t test_vwsub_wx_i16m8(vint16m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx(vs2, rs1, vl); } -vint32mf2_t test_vwsub_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwsub_vv(op1, op2, vl); +vint32mf2_t test_vwsub_vv_i32mf2(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwsub_vv(vs2, vs1, vl); } -vint32mf2_t test_vwsub_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx(op1, op2, vl); +vint32mf2_t test_vwsub_vx_i32mf2(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx(vs2, rs1, vl); } -vint32mf2_t test_vwsub_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwsub_wv(op1, op2, vl); +vint32mf2_t test_vwsub_wv_i32mf2(vint32mf2_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwsub_wv(vs2, vs1, vl); } -vint32mf2_t test_vwsub_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx(op1, op2, vl); +vint32mf2_t test_vwsub_wx_i32mf2(vint32mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx(vs2, rs1, vl); } -vint32m1_t test_vwsub_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwsub_vv(op1, op2, vl); +vint32m1_t test_vwsub_vv_i32m1(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwsub_vv(vs2, vs1, vl); } -vint32m1_t test_vwsub_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx(op1, op2, vl); +vint32m1_t test_vwsub_vx_i32m1(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx(vs2, rs1, vl); } -vint32m1_t test_vwsub_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwsub_wv(op1, op2, vl); +vint32m1_t test_vwsub_wv_i32m1(vint32m1_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwsub_wv(vs2, vs1, vl); } -vint32m1_t test_vwsub_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx(op1, op2, vl); +vint32m1_t test_vwsub_wx_i32m1(vint32m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx(vs2, rs1, vl); } -vint32m2_t test_vwsub_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwsub_vv(op1, op2, vl); +vint32m2_t test_vwsub_vv_i32m2(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwsub_vv(vs2, vs1, vl); } -vint32m2_t test_vwsub_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx(op1, op2, vl); +vint32m2_t test_vwsub_vx_i32m2(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx(vs2, rs1, vl); } -vint32m2_t test_vwsub_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwsub_wv(op1, op2, vl); +vint32m2_t test_vwsub_wv_i32m2(vint32m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwsub_wv(vs2, vs1, vl); } -vint32m2_t test_vwsub_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx(op1, op2, vl); +vint32m2_t test_vwsub_wx_i32m2(vint32m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx(vs2, rs1, vl); } -vint32m4_t test_vwsub_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwsub_vv(op1, op2, vl); +vint32m4_t test_vwsub_vv_i32m4(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwsub_vv(vs2, vs1, vl); } -vint32m4_t test_vwsub_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx(op1, op2, vl); +vint32m4_t test_vwsub_vx_i32m4(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx(vs2, rs1, vl); } -vint32m4_t test_vwsub_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwsub_wv(op1, op2, vl); +vint32m4_t test_vwsub_wv_i32m4(vint32m4_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwsub_wv(vs2, vs1, vl); } -vint32m4_t test_vwsub_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx(op1, op2, vl); +vint32m4_t test_vwsub_wx_i32m4(vint32m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx(vs2, rs1, vl); } -vint32m8_t test_vwsub_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwsub_vv(op1, op2, vl); +vint32m8_t test_vwsub_vv_i32m8(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwsub_vv(vs2, vs1, vl); } -vint32m8_t test_vwsub_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx(op1, op2, vl); +vint32m8_t test_vwsub_vx_i32m8(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx(vs2, rs1, vl); } -vint32m8_t test_vwsub_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwsub_wv(op1, op2, vl); +vint32m8_t test_vwsub_wv_i32m8(vint32m8_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwsub_wv(vs2, vs1, vl); } -vint32m8_t test_vwsub_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx(op1, op2, vl); +vint32m8_t test_vwsub_wx_i32m8(vint32m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx(vs2, rs1, vl); } -vint64m1_t test_vwsub_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwsub_vv(op1, op2, vl); +vint64m1_t test_vwsub_vv_i64m1(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwsub_vv(vs2, vs1, vl); } -vint64m1_t test_vwsub_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx(op1, op2, vl); +vint64m1_t test_vwsub_vx_i64m1(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx(vs2, rs1, vl); } -vint64m1_t test_vwsub_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwsub_wv(op1, op2, vl); +vint64m1_t test_vwsub_wv_i64m1(vint64m1_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwsub_wv(vs2, vs1, vl); } -vint64m1_t test_vwsub_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx(op1, op2, vl); +vint64m1_t test_vwsub_wx_i64m1(vint64m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx(vs2, rs1, vl); } -vint64m2_t test_vwsub_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwsub_vv(op1, op2, vl); +vint64m2_t test_vwsub_vv_i64m2(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwsub_vv(vs2, vs1, vl); } -vint64m2_t test_vwsub_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx(op1, op2, vl); +vint64m2_t test_vwsub_vx_i64m2(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx(vs2, rs1, vl); } -vint64m2_t test_vwsub_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwsub_wv(op1, op2, vl); +vint64m2_t test_vwsub_wv_i64m2(vint64m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwsub_wv(vs2, vs1, vl); } -vint64m2_t test_vwsub_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx(op1, op2, vl); +vint64m2_t test_vwsub_wx_i64m2(vint64m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx(vs2, rs1, vl); } -vint64m4_t test_vwsub_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwsub_vv(op1, op2, vl); +vint64m4_t test_vwsub_vv_i64m4(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwsub_vv(vs2, vs1, vl); } -vint64m4_t test_vwsub_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx(op1, op2, vl); +vint64m4_t test_vwsub_vx_i64m4(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx(vs2, rs1, vl); } -vint64m4_t test_vwsub_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwsub_wv(op1, op2, vl); +vint64m4_t test_vwsub_wv_i64m4(vint64m4_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwsub_wv(vs2, vs1, vl); } -vint64m4_t test_vwsub_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx(op1, op2, vl); +vint64m4_t test_vwsub_wx_i64m4(vint64m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx(vs2, rs1, vl); } -vint64m8_t test_vwsub_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwsub_vv(op1, op2, vl); +vint64m8_t test_vwsub_vv_i64m8(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwsub_vv(vs2, vs1, vl); } -vint64m8_t test_vwsub_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx(op1, op2, vl); +vint64m8_t test_vwsub_vx_i64m8(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx(vs2, rs1, vl); } -vint64m8_t test_vwsub_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwsub_wv(op1, op2, vl); +vint64m8_t test_vwsub_wv_i64m8(vint64m8_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwsub_wv(vs2, vs1, vl); } -vint64m8_t test_vwsub_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx(op1, op2, vl); +vint64m8_t test_vwsub_wx_i64m8(vint64m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx(vs2, rs1, vl); } -vint16mf4_t test_vwsub_vv_i16mf4_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwsub_vv(mask, op1, op2, vl); +vint16mf4_t test_vwsub_vv_i16mf4_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwsub_vv(vm, vs2, vs1, vl); } -vint16mf4_t test_vwsub_vx_i16mf4_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx(mask, op1, op2, vl); +vint16mf4_t test_vwsub_vx_i16mf4_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx(vm, vs2, rs1, vl); } -vint16mf4_t test_vwsub_wv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwsub_wv(mask, op1, op2, vl); +vint16mf4_t test_vwsub_wv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwsub_wv(vm, vs2, vs1, vl); } -vint16mf4_t test_vwsub_wx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx(mask, op1, op2, vl); +vint16mf4_t test_vwsub_wx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx(vm, vs2, rs1, vl); } -vint16mf2_t test_vwsub_vv_i16mf2_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwsub_vv(mask, op1, op2, vl); +vint16mf2_t test_vwsub_vv_i16mf2_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwsub_vv(vm, vs2, vs1, vl); } -vint16mf2_t test_vwsub_vx_i16mf2_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx(mask, op1, op2, vl); +vint16mf2_t test_vwsub_vx_i16mf2_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx(vm, vs2, rs1, vl); } -vint16mf2_t test_vwsub_wv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwsub_wv(mask, op1, op2, vl); +vint16mf2_t test_vwsub_wv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwsub_wv(vm, vs2, vs1, vl); } -vint16mf2_t test_vwsub_wx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx(mask, op1, op2, vl); +vint16mf2_t test_vwsub_wx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx(vm, vs2, rs1, vl); } -vint16m1_t test_vwsub_vv_i16m1_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwsub_vv(mask, op1, op2, vl); +vint16m1_t test_vwsub_vv_i16m1_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwsub_vv(vm, vs2, vs1, vl); } -vint16m1_t test_vwsub_vx_i16m1_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx(mask, op1, op2, vl); +vint16m1_t test_vwsub_vx_i16m1_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx(vm, vs2, rs1, vl); } -vint16m1_t test_vwsub_wv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwsub_wv(mask, op1, op2, vl); +vint16m1_t test_vwsub_wv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwsub_wv(vm, vs2, vs1, vl); } -vint16m1_t test_vwsub_wx_i16m1_m(vbool16_t mask, vint16m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx(mask, op1, op2, vl); +vint16m1_t test_vwsub_wx_i16m1_m(vbool16_t vm, vint16m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx(vm, vs2, rs1, vl); } -vint16m2_t test_vwsub_vv_i16m2_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwsub_vv(mask, op1, op2, vl); +vint16m2_t test_vwsub_vv_i16m2_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwsub_vv(vm, vs2, vs1, vl); } -vint16m2_t test_vwsub_vx_i16m2_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx(mask, op1, op2, vl); +vint16m2_t test_vwsub_vx_i16m2_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx(vm, vs2, rs1, vl); } -vint16m2_t test_vwsub_wv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwsub_wv(mask, op1, op2, vl); +vint16m2_t test_vwsub_wv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwsub_wv(vm, vs2, vs1, vl); } -vint16m2_t test_vwsub_wx_i16m2_m(vbool8_t mask, vint16m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx(mask, op1, op2, vl); +vint16m2_t test_vwsub_wx_i16m2_m(vbool8_t vm, vint16m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx(vm, vs2, rs1, vl); } -vint16m4_t test_vwsub_vv_i16m4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwsub_vv(mask, op1, op2, vl); +vint16m4_t test_vwsub_vv_i16m4_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwsub_vv(vm, vs2, vs1, vl); } -vint16m4_t test_vwsub_vx_i16m4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx(mask, op1, op2, vl); +vint16m4_t test_vwsub_vx_i16m4_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx(vm, vs2, rs1, vl); } -vint16m4_t test_vwsub_wv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwsub_wv(mask, op1, op2, vl); +vint16m4_t test_vwsub_wv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwsub_wv(vm, vs2, vs1, vl); } -vint16m4_t test_vwsub_wx_i16m4_m(vbool4_t mask, vint16m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx(mask, op1, op2, vl); +vint16m4_t test_vwsub_wx_i16m4_m(vbool4_t vm, vint16m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx(vm, vs2, rs1, vl); } -vint16m8_t test_vwsub_vv_i16m8_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwsub_vv(mask, op1, op2, vl); +vint16m8_t test_vwsub_vv_i16m8_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwsub_vv(vm, vs2, vs1, vl); } -vint16m8_t test_vwsub_vx_i16m8_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx(mask, op1, op2, vl); +vint16m8_t test_vwsub_vx_i16m8_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx(vm, vs2, rs1, vl); } -vint16m8_t test_vwsub_wv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwsub_wv(mask, op1, op2, vl); +vint16m8_t test_vwsub_wv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwsub_wv(vm, vs2, vs1, vl); } -vint16m8_t test_vwsub_wx_i16m8_m(vbool2_t mask, vint16m8_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx(mask, op1, op2, vl); +vint16m8_t test_vwsub_wx_i16m8_m(vbool2_t vm, vint16m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx(vm, vs2, rs1, vl); } -vint32mf2_t test_vwsub_vv_i32mf2_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwsub_vv(mask, op1, op2, vl); +vint32mf2_t test_vwsub_vv_i32mf2_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwsub_vv(vm, vs2, vs1, vl); } -vint32mf2_t test_vwsub_vx_i32mf2_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx(mask, op1, op2, vl); +vint32mf2_t test_vwsub_vx_i32mf2_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx(vm, vs2, rs1, vl); } -vint32mf2_t test_vwsub_wv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwsub_wv(mask, op1, op2, vl); +vint32mf2_t test_vwsub_wv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwsub_wv(vm, vs2, vs1, vl); } -vint32mf2_t test_vwsub_wx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx(mask, op1, op2, vl); +vint32mf2_t test_vwsub_wx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx(vm, vs2, rs1, vl); } -vint32m1_t test_vwsub_vv_i32m1_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwsub_vv(mask, op1, op2, vl); +vint32m1_t test_vwsub_vv_i32m1_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwsub_vv(vm, vs2, vs1, vl); } -vint32m1_t test_vwsub_vx_i32m1_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx(mask, op1, op2, vl); +vint32m1_t test_vwsub_vx_i32m1_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx(vm, vs2, rs1, vl); } -vint32m1_t test_vwsub_wv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwsub_wv(mask, op1, op2, vl); +vint32m1_t test_vwsub_wv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwsub_wv(vm, vs2, vs1, vl); } -vint32m1_t test_vwsub_wx_i32m1_m(vbool32_t mask, vint32m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx(mask, op1, op2, vl); +vint32m1_t test_vwsub_wx_i32m1_m(vbool32_t vm, vint32m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx(vm, vs2, rs1, vl); } -vint32m2_t test_vwsub_vv_i32m2_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwsub_vv(mask, op1, op2, vl); +vint32m2_t test_vwsub_vv_i32m2_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwsub_vv(vm, vs2, vs1, vl); } -vint32m2_t test_vwsub_vx_i32m2_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx(mask, op1, op2, vl); +vint32m2_t test_vwsub_vx_i32m2_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx(vm, vs2, rs1, vl); } -vint32m2_t test_vwsub_wv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwsub_wv(mask, op1, op2, vl); +vint32m2_t test_vwsub_wv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwsub_wv(vm, vs2, vs1, vl); } -vint32m2_t test_vwsub_wx_i32m2_m(vbool16_t mask, vint32m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx(mask, op1, op2, vl); +vint32m2_t test_vwsub_wx_i32m2_m(vbool16_t vm, vint32m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx(vm, vs2, rs1, vl); } -vint32m4_t test_vwsub_vv_i32m4_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwsub_vv(mask, op1, op2, vl); +vint32m4_t test_vwsub_vv_i32m4_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwsub_vv(vm, vs2, vs1, vl); } -vint32m4_t test_vwsub_vx_i32m4_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx(mask, op1, op2, vl); +vint32m4_t test_vwsub_vx_i32m4_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx(vm, vs2, rs1, vl); } -vint32m4_t test_vwsub_wv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwsub_wv(mask, op1, op2, vl); +vint32m4_t test_vwsub_wv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwsub_wv(vm, vs2, vs1, vl); } -vint32m4_t test_vwsub_wx_i32m4_m(vbool8_t mask, vint32m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx(mask, op1, op2, vl); +vint32m4_t test_vwsub_wx_i32m4_m(vbool8_t vm, vint32m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx(vm, vs2, rs1, vl); } -vint32m8_t test_vwsub_vv_i32m8_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwsub_vv(mask, op1, op2, vl); +vint32m8_t test_vwsub_vv_i32m8_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwsub_vv(vm, vs2, vs1, vl); } -vint32m8_t test_vwsub_vx_i32m8_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx(mask, op1, op2, vl); +vint32m8_t test_vwsub_vx_i32m8_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx(vm, vs2, rs1, vl); } -vint32m8_t test_vwsub_wv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwsub_wv(mask, op1, op2, vl); +vint32m8_t test_vwsub_wv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwsub_wv(vm, vs2, vs1, vl); } -vint32m8_t test_vwsub_wx_i32m8_m(vbool4_t mask, vint32m8_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx(mask, op1, op2, vl); +vint32m8_t test_vwsub_wx_i32m8_m(vbool4_t vm, vint32m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx(vm, vs2, rs1, vl); } -vint64m1_t test_vwsub_vv_i64m1_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwsub_vv(mask, op1, op2, vl); +vint64m1_t test_vwsub_vv_i64m1_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwsub_vv(vm, vs2, vs1, vl); } -vint64m1_t test_vwsub_vx_i64m1_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx(mask, op1, op2, vl); +vint64m1_t test_vwsub_vx_i64m1_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx(vm, vs2, rs1, vl); } -vint64m1_t test_vwsub_wv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwsub_wv(mask, op1, op2, vl); +vint64m1_t test_vwsub_wv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwsub_wv(vm, vs2, vs1, vl); } -vint64m1_t test_vwsub_wx_i64m1_m(vbool64_t mask, vint64m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx(mask, op1, op2, vl); +vint64m1_t test_vwsub_wx_i64m1_m(vbool64_t vm, vint64m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx(vm, vs2, rs1, vl); } -vint64m2_t test_vwsub_vv_i64m2_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwsub_vv(mask, op1, op2, vl); +vint64m2_t test_vwsub_vv_i64m2_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwsub_vv(vm, vs2, vs1, vl); } -vint64m2_t test_vwsub_vx_i64m2_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx(mask, op1, op2, vl); +vint64m2_t test_vwsub_vx_i64m2_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx(vm, vs2, rs1, vl); } -vint64m2_t test_vwsub_wv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwsub_wv(mask, op1, op2, vl); +vint64m2_t test_vwsub_wv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwsub_wv(vm, vs2, vs1, vl); } -vint64m2_t test_vwsub_wx_i64m2_m(vbool32_t mask, vint64m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx(mask, op1, op2, vl); +vint64m2_t test_vwsub_wx_i64m2_m(vbool32_t vm, vint64m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx(vm, vs2, rs1, vl); } -vint64m4_t test_vwsub_vv_i64m4_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwsub_vv(mask, op1, op2, vl); +vint64m4_t test_vwsub_vv_i64m4_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwsub_vv(vm, vs2, vs1, vl); } -vint64m4_t test_vwsub_vx_i64m4_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx(mask, op1, op2, vl); +vint64m4_t test_vwsub_vx_i64m4_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx(vm, vs2, rs1, vl); } -vint64m4_t test_vwsub_wv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwsub_wv(mask, op1, op2, vl); +vint64m4_t test_vwsub_wv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwsub_wv(vm, vs2, vs1, vl); } -vint64m4_t test_vwsub_wx_i64m4_m(vbool16_t mask, vint64m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx(mask, op1, op2, vl); +vint64m4_t test_vwsub_wx_i64m4_m(vbool16_t vm, vint64m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx(vm, vs2, rs1, vl); } -vint64m8_t test_vwsub_vv_i64m8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwsub_vv(mask, op1, op2, vl); +vint64m8_t test_vwsub_vv_i64m8_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwsub_vv(vm, vs2, vs1, vl); } -vint64m8_t test_vwsub_vx_i64m8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx(mask, op1, op2, vl); +vint64m8_t test_vwsub_vx_i64m8_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx(vm, vs2, rs1, vl); } -vint64m8_t test_vwsub_wv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwsub_wv(mask, op1, op2, vl); +vint64m8_t test_vwsub_wv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwsub_wv(vm, vs2, vs1, vl); } -vint64m8_t test_vwsub_wx_i64m8_m(vbool8_t mask, vint64m8_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx(mask, op1, op2, vl); +vint64m8_t test_vwsub_wx_i64m8_m(vbool8_t vm, vint64m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+v[w]?sub\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vwsubu.c b/auto-generated/gnu-overloaded-tests/vwsubu.c index 45aeaa36c..47e2e323e 100644 --- a/auto-generated/gnu-overloaded-tests/vwsubu.c +++ b/auto-generated/gnu-overloaded-tests/vwsubu.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint16mf4_t test_vwsubu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwsubu_vv(op1, op2, vl); +vuint16mf4_t test_vwsubu_vv_u16mf4(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsubu_vv(vs2, vs1, vl); } -vuint16mf4_t test_vwsubu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx(op1, op2, vl); +vuint16mf4_t test_vwsubu_vx_u16mf4(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx(vs2, rs1, vl); } -vuint16mf4_t test_vwsubu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwsubu_wv(op1, op2, vl); +vuint16mf4_t test_vwsubu_wv_u16mf4(vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsubu_wv(vs2, vs1, vl); } -vuint16mf4_t test_vwsubu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx(op1, op2, vl); +vuint16mf4_t test_vwsubu_wx_u16mf4(vuint16mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx(vs2, rs1, vl); } -vuint16mf2_t test_vwsubu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwsubu_vv(op1, op2, vl); +vuint16mf2_t test_vwsubu_vv_u16mf2(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsubu_vv(vs2, vs1, vl); } -vuint16mf2_t test_vwsubu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx(op1, op2, vl); +vuint16mf2_t test_vwsubu_vx_u16mf2(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx(vs2, rs1, vl); } -vuint16mf2_t test_vwsubu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwsubu_wv(op1, op2, vl); +vuint16mf2_t test_vwsubu_wv_u16mf2(vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsubu_wv(vs2, vs1, vl); } -vuint16mf2_t test_vwsubu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx(op1, op2, vl); +vuint16mf2_t test_vwsubu_wx_u16mf2(vuint16mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx(vs2, rs1, vl); } -vuint16m1_t test_vwsubu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwsubu_vv(op1, op2, vl); +vuint16m1_t test_vwsubu_vv_u16m1(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsubu_vv(vs2, vs1, vl); } -vuint16m1_t test_vwsubu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx(op1, op2, vl); +vuint16m1_t test_vwsubu_vx_u16m1(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx(vs2, rs1, vl); } -vuint16m1_t test_vwsubu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwsubu_wv(op1, op2, vl); +vuint16m1_t test_vwsubu_wv_u16m1(vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsubu_wv(vs2, vs1, vl); } -vuint16m1_t test_vwsubu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx(op1, op2, vl); +vuint16m1_t test_vwsubu_wx_u16m1(vuint16m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx(vs2, rs1, vl); } -vuint16m2_t test_vwsubu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwsubu_vv(op1, op2, vl); +vuint16m2_t test_vwsubu_vv_u16m2(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsubu_vv(vs2, vs1, vl); } -vuint16m2_t test_vwsubu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx(op1, op2, vl); +vuint16m2_t test_vwsubu_vx_u16m2(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx(vs2, rs1, vl); } -vuint16m2_t test_vwsubu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwsubu_wv(op1, op2, vl); +vuint16m2_t test_vwsubu_wv_u16m2(vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsubu_wv(vs2, vs1, vl); } -vuint16m2_t test_vwsubu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx(op1, op2, vl); +vuint16m2_t test_vwsubu_wx_u16m2(vuint16m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx(vs2, rs1, vl); } -vuint16m4_t test_vwsubu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwsubu_vv(op1, op2, vl); +vuint16m4_t test_vwsubu_vv_u16m4(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsubu_vv(vs2, vs1, vl); } -vuint16m4_t test_vwsubu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx(op1, op2, vl); +vuint16m4_t test_vwsubu_vx_u16m4(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx(vs2, rs1, vl); } -vuint16m4_t test_vwsubu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwsubu_wv(op1, op2, vl); +vuint16m4_t test_vwsubu_wv_u16m4(vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsubu_wv(vs2, vs1, vl); } -vuint16m4_t test_vwsubu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx(op1, op2, vl); +vuint16m4_t test_vwsubu_wx_u16m4(vuint16m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx(vs2, rs1, vl); } -vuint16m8_t test_vwsubu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwsubu_vv(op1, op2, vl); +vuint16m8_t test_vwsubu_vv_u16m8(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsubu_vv(vs2, vs1, vl); } -vuint16m8_t test_vwsubu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx(op1, op2, vl); +vuint16m8_t test_vwsubu_vx_u16m8(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx(vs2, rs1, vl); } -vuint16m8_t test_vwsubu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwsubu_wv(op1, op2, vl); +vuint16m8_t test_vwsubu_wv_u16m8(vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsubu_wv(vs2, vs1, vl); } -vuint16m8_t test_vwsubu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx(op1, op2, vl); +vuint16m8_t test_vwsubu_wx_u16m8(vuint16m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx(vs2, rs1, vl); } -vuint32mf2_t test_vwsubu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwsubu_vv(op1, op2, vl); +vuint32mf2_t test_vwsubu_vv_u32mf2(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsubu_vv(vs2, vs1, vl); } -vuint32mf2_t test_vwsubu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx(op1, op2, vl); +vuint32mf2_t test_vwsubu_vx_u32mf2(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx(vs2, rs1, vl); } -vuint32mf2_t test_vwsubu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwsubu_wv(op1, op2, vl); +vuint32mf2_t test_vwsubu_wv_u32mf2(vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsubu_wv(vs2, vs1, vl); } -vuint32mf2_t test_vwsubu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx(op1, op2, vl); +vuint32mf2_t test_vwsubu_wx_u32mf2(vuint32mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx(vs2, rs1, vl); } -vuint32m1_t test_vwsubu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwsubu_vv(op1, op2, vl); +vuint32m1_t test_vwsubu_vv_u32m1(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsubu_vv(vs2, vs1, vl); } -vuint32m1_t test_vwsubu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx(op1, op2, vl); +vuint32m1_t test_vwsubu_vx_u32m1(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx(vs2, rs1, vl); } -vuint32m1_t test_vwsubu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwsubu_wv(op1, op2, vl); +vuint32m1_t test_vwsubu_wv_u32m1(vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsubu_wv(vs2, vs1, vl); } -vuint32m1_t test_vwsubu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx(op1, op2, vl); +vuint32m1_t test_vwsubu_wx_u32m1(vuint32m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx(vs2, rs1, vl); } -vuint32m2_t test_vwsubu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwsubu_vv(op1, op2, vl); +vuint32m2_t test_vwsubu_vv_u32m2(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsubu_vv(vs2, vs1, vl); } -vuint32m2_t test_vwsubu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx(op1, op2, vl); +vuint32m2_t test_vwsubu_vx_u32m2(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx(vs2, rs1, vl); } -vuint32m2_t test_vwsubu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwsubu_wv(op1, op2, vl); +vuint32m2_t test_vwsubu_wv_u32m2(vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsubu_wv(vs2, vs1, vl); } -vuint32m2_t test_vwsubu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx(op1, op2, vl); +vuint32m2_t test_vwsubu_wx_u32m2(vuint32m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx(vs2, rs1, vl); } -vuint32m4_t test_vwsubu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwsubu_vv(op1, op2, vl); +vuint32m4_t test_vwsubu_vv_u32m4(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsubu_vv(vs2, vs1, vl); } -vuint32m4_t test_vwsubu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx(op1, op2, vl); +vuint32m4_t test_vwsubu_vx_u32m4(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx(vs2, rs1, vl); } -vuint32m4_t test_vwsubu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwsubu_wv(op1, op2, vl); +vuint32m4_t test_vwsubu_wv_u32m4(vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsubu_wv(vs2, vs1, vl); } -vuint32m4_t test_vwsubu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx(op1, op2, vl); +vuint32m4_t test_vwsubu_wx_u32m4(vuint32m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx(vs2, rs1, vl); } -vuint32m8_t test_vwsubu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwsubu_vv(op1, op2, vl); +vuint32m8_t test_vwsubu_vv_u32m8(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsubu_vv(vs2, vs1, vl); } -vuint32m8_t test_vwsubu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx(op1, op2, vl); +vuint32m8_t test_vwsubu_vx_u32m8(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx(vs2, rs1, vl); } -vuint32m8_t test_vwsubu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwsubu_wv(op1, op2, vl); +vuint32m8_t test_vwsubu_wv_u32m8(vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsubu_wv(vs2, vs1, vl); } -vuint32m8_t test_vwsubu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx(op1, op2, vl); +vuint32m8_t test_vwsubu_wx_u32m8(vuint32m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx(vs2, rs1, vl); } -vuint64m1_t test_vwsubu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwsubu_vv(op1, op2, vl); +vuint64m1_t test_vwsubu_vv_u64m1(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsubu_vv(vs2, vs1, vl); } -vuint64m1_t test_vwsubu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx(op1, op2, vl); +vuint64m1_t test_vwsubu_vx_u64m1(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx(vs2, rs1, vl); } -vuint64m1_t test_vwsubu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwsubu_wv(op1, op2, vl); +vuint64m1_t test_vwsubu_wv_u64m1(vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsubu_wv(vs2, vs1, vl); } -vuint64m1_t test_vwsubu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx(op1, op2, vl); +vuint64m1_t test_vwsubu_wx_u64m1(vuint64m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx(vs2, rs1, vl); } -vuint64m2_t test_vwsubu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwsubu_vv(op1, op2, vl); +vuint64m2_t test_vwsubu_vv_u64m2(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsubu_vv(vs2, vs1, vl); } -vuint64m2_t test_vwsubu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx(op1, op2, vl); +vuint64m2_t test_vwsubu_vx_u64m2(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx(vs2, rs1, vl); } -vuint64m2_t test_vwsubu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwsubu_wv(op1, op2, vl); +vuint64m2_t test_vwsubu_wv_u64m2(vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsubu_wv(vs2, vs1, vl); } -vuint64m2_t test_vwsubu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx(op1, op2, vl); +vuint64m2_t test_vwsubu_wx_u64m2(vuint64m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx(vs2, rs1, vl); } -vuint64m4_t test_vwsubu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwsubu_vv(op1, op2, vl); +vuint64m4_t test_vwsubu_vv_u64m4(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsubu_vv(vs2, vs1, vl); } -vuint64m4_t test_vwsubu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx(op1, op2, vl); +vuint64m4_t test_vwsubu_vx_u64m4(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx(vs2, rs1, vl); } -vuint64m4_t test_vwsubu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwsubu_wv(op1, op2, vl); +vuint64m4_t test_vwsubu_wv_u64m4(vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsubu_wv(vs2, vs1, vl); } -vuint64m4_t test_vwsubu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx(op1, op2, vl); +vuint64m4_t test_vwsubu_wx_u64m4(vuint64m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx(vs2, rs1, vl); } -vuint64m8_t test_vwsubu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwsubu_vv(op1, op2, vl); +vuint64m8_t test_vwsubu_vv_u64m8(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsubu_vv(vs2, vs1, vl); } -vuint64m8_t test_vwsubu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx(op1, op2, vl); +vuint64m8_t test_vwsubu_vx_u64m8(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx(vs2, rs1, vl); } -vuint64m8_t test_vwsubu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwsubu_wv(op1, op2, vl); +vuint64m8_t test_vwsubu_wv_u64m8(vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsubu_wv(vs2, vs1, vl); } -vuint64m8_t test_vwsubu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx(op1, op2, vl); +vuint64m8_t test_vwsubu_wx_u64m8(vuint64m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx(vs2, rs1, vl); } -vuint16mf4_t test_vwsubu_vv_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwsubu_vv(mask, op1, op2, vl); +vuint16mf4_t test_vwsubu_vv_u16mf4_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsubu_vv(vm, vs2, vs1, vl); } -vuint16mf4_t test_vwsubu_vx_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx(mask, op1, op2, vl); +vuint16mf4_t test_vwsubu_vx_u16mf4_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx(vm, vs2, rs1, vl); } -vuint16mf4_t test_vwsubu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwsubu_wv(mask, op1, op2, vl); +vuint16mf4_t test_vwsubu_wv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsubu_wv(vm, vs2, vs1, vl); } -vuint16mf4_t test_vwsubu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx(mask, op1, op2, vl); +vuint16mf4_t test_vwsubu_wx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx(vm, vs2, rs1, vl); } -vuint16mf2_t test_vwsubu_vv_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwsubu_vv(mask, op1, op2, vl); +vuint16mf2_t test_vwsubu_vv_u16mf2_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsubu_vv(vm, vs2, vs1, vl); } -vuint16mf2_t test_vwsubu_vx_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx(mask, op1, op2, vl); +vuint16mf2_t test_vwsubu_vx_u16mf2_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx(vm, vs2, rs1, vl); } -vuint16mf2_t test_vwsubu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwsubu_wv(mask, op1, op2, vl); +vuint16mf2_t test_vwsubu_wv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsubu_wv(vm, vs2, vs1, vl); } -vuint16mf2_t test_vwsubu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx(mask, op1, op2, vl); +vuint16mf2_t test_vwsubu_wx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx(vm, vs2, rs1, vl); } -vuint16m1_t test_vwsubu_vv_u16m1_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwsubu_vv(mask, op1, op2, vl); +vuint16m1_t test_vwsubu_vv_u16m1_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsubu_vv(vm, vs2, vs1, vl); } -vuint16m1_t test_vwsubu_vx_u16m1_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx(mask, op1, op2, vl); +vuint16m1_t test_vwsubu_vx_u16m1_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx(vm, vs2, rs1, vl); } -vuint16m1_t test_vwsubu_wv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwsubu_wv(mask, op1, op2, vl); +vuint16m1_t test_vwsubu_wv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsubu_wv(vm, vs2, vs1, vl); } -vuint16m1_t test_vwsubu_wx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx(mask, op1, op2, vl); +vuint16m1_t test_vwsubu_wx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx(vm, vs2, rs1, vl); } -vuint16m2_t test_vwsubu_vv_u16m2_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwsubu_vv(mask, op1, op2, vl); +vuint16m2_t test_vwsubu_vv_u16m2_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsubu_vv(vm, vs2, vs1, vl); } -vuint16m2_t test_vwsubu_vx_u16m2_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx(mask, op1, op2, vl); +vuint16m2_t test_vwsubu_vx_u16m2_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx(vm, vs2, rs1, vl); } -vuint16m2_t test_vwsubu_wv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwsubu_wv(mask, op1, op2, vl); +vuint16m2_t test_vwsubu_wv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsubu_wv(vm, vs2, vs1, vl); } -vuint16m2_t test_vwsubu_wx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx(mask, op1, op2, vl); +vuint16m2_t test_vwsubu_wx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx(vm, vs2, rs1, vl); } -vuint16m4_t test_vwsubu_vv_u16m4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwsubu_vv(mask, op1, op2, vl); +vuint16m4_t test_vwsubu_vv_u16m4_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsubu_vv(vm, vs2, vs1, vl); } -vuint16m4_t test_vwsubu_vx_u16m4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx(mask, op1, op2, vl); +vuint16m4_t test_vwsubu_vx_u16m4_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx(vm, vs2, rs1, vl); } -vuint16m4_t test_vwsubu_wv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwsubu_wv(mask, op1, op2, vl); +vuint16m4_t test_vwsubu_wv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsubu_wv(vm, vs2, vs1, vl); } -vuint16m4_t test_vwsubu_wx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx(mask, op1, op2, vl); +vuint16m4_t test_vwsubu_wx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx(vm, vs2, rs1, vl); } -vuint16m8_t test_vwsubu_vv_u16m8_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwsubu_vv(mask, op1, op2, vl); +vuint16m8_t test_vwsubu_vv_u16m8_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsubu_vv(vm, vs2, vs1, vl); } -vuint16m8_t test_vwsubu_vx_u16m8_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx(mask, op1, op2, vl); +vuint16m8_t test_vwsubu_vx_u16m8_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx(vm, vs2, rs1, vl); } -vuint16m8_t test_vwsubu_wv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwsubu_wv(mask, op1, op2, vl); +vuint16m8_t test_vwsubu_wv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsubu_wv(vm, vs2, vs1, vl); } -vuint16m8_t test_vwsubu_wx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx(mask, op1, op2, vl); +vuint16m8_t test_vwsubu_wx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx(vm, vs2, rs1, vl); } -vuint32mf2_t test_vwsubu_vv_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwsubu_vv(mask, op1, op2, vl); +vuint32mf2_t test_vwsubu_vv_u32mf2_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsubu_vv(vm, vs2, vs1, vl); } -vuint32mf2_t test_vwsubu_vx_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx(mask, op1, op2, vl); +vuint32mf2_t test_vwsubu_vx_u32mf2_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx(vm, vs2, rs1, vl); } -vuint32mf2_t test_vwsubu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwsubu_wv(mask, op1, op2, vl); +vuint32mf2_t test_vwsubu_wv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsubu_wv(vm, vs2, vs1, vl); } -vuint32mf2_t test_vwsubu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx(mask, op1, op2, vl); +vuint32mf2_t test_vwsubu_wx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx(vm, vs2, rs1, vl); } -vuint32m1_t test_vwsubu_vv_u32m1_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwsubu_vv(mask, op1, op2, vl); +vuint32m1_t test_vwsubu_vv_u32m1_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsubu_vv(vm, vs2, vs1, vl); } -vuint32m1_t test_vwsubu_vx_u32m1_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx(mask, op1, op2, vl); +vuint32m1_t test_vwsubu_vx_u32m1_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx(vm, vs2, rs1, vl); } -vuint32m1_t test_vwsubu_wv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwsubu_wv(mask, op1, op2, vl); +vuint32m1_t test_vwsubu_wv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsubu_wv(vm, vs2, vs1, vl); } -vuint32m1_t test_vwsubu_wx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx(mask, op1, op2, vl); +vuint32m1_t test_vwsubu_wx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx(vm, vs2, rs1, vl); } -vuint32m2_t test_vwsubu_vv_u32m2_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwsubu_vv(mask, op1, op2, vl); +vuint32m2_t test_vwsubu_vv_u32m2_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsubu_vv(vm, vs2, vs1, vl); } -vuint32m2_t test_vwsubu_vx_u32m2_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx(mask, op1, op2, vl); +vuint32m2_t test_vwsubu_vx_u32m2_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx(vm, vs2, rs1, vl); } -vuint32m2_t test_vwsubu_wv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwsubu_wv(mask, op1, op2, vl); +vuint32m2_t test_vwsubu_wv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsubu_wv(vm, vs2, vs1, vl); } -vuint32m2_t test_vwsubu_wx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx(mask, op1, op2, vl); +vuint32m2_t test_vwsubu_wx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx(vm, vs2, rs1, vl); } -vuint32m4_t test_vwsubu_vv_u32m4_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwsubu_vv(mask, op1, op2, vl); +vuint32m4_t test_vwsubu_vv_u32m4_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsubu_vv(vm, vs2, vs1, vl); } -vuint32m4_t test_vwsubu_vx_u32m4_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx(mask, op1, op2, vl); +vuint32m4_t test_vwsubu_vx_u32m4_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx(vm, vs2, rs1, vl); } -vuint32m4_t test_vwsubu_wv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwsubu_wv(mask, op1, op2, vl); +vuint32m4_t test_vwsubu_wv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsubu_wv(vm, vs2, vs1, vl); } -vuint32m4_t test_vwsubu_wx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx(mask, op1, op2, vl); +vuint32m4_t test_vwsubu_wx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx(vm, vs2, rs1, vl); } -vuint32m8_t test_vwsubu_vv_u32m8_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwsubu_vv(mask, op1, op2, vl); +vuint32m8_t test_vwsubu_vv_u32m8_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsubu_vv(vm, vs2, vs1, vl); } -vuint32m8_t test_vwsubu_vx_u32m8_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx(mask, op1, op2, vl); +vuint32m8_t test_vwsubu_vx_u32m8_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx(vm, vs2, rs1, vl); } -vuint32m8_t test_vwsubu_wv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwsubu_wv(mask, op1, op2, vl); +vuint32m8_t test_vwsubu_wv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsubu_wv(vm, vs2, vs1, vl); } -vuint32m8_t test_vwsubu_wx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx(mask, op1, op2, vl); +vuint32m8_t test_vwsubu_wx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx(vm, vs2, rs1, vl); } -vuint64m1_t test_vwsubu_vv_u64m1_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwsubu_vv(mask, op1, op2, vl); +vuint64m1_t test_vwsubu_vv_u64m1_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsubu_vv(vm, vs2, vs1, vl); } -vuint64m1_t test_vwsubu_vx_u64m1_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx(mask, op1, op2, vl); +vuint64m1_t test_vwsubu_vx_u64m1_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx(vm, vs2, rs1, vl); } -vuint64m1_t test_vwsubu_wv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwsubu_wv(mask, op1, op2, vl); +vuint64m1_t test_vwsubu_wv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsubu_wv(vm, vs2, vs1, vl); } -vuint64m1_t test_vwsubu_wx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx(mask, op1, op2, vl); +vuint64m1_t test_vwsubu_wx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx(vm, vs2, rs1, vl); } -vuint64m2_t test_vwsubu_vv_u64m2_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwsubu_vv(mask, op1, op2, vl); +vuint64m2_t test_vwsubu_vv_u64m2_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsubu_vv(vm, vs2, vs1, vl); } -vuint64m2_t test_vwsubu_vx_u64m2_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx(mask, op1, op2, vl); +vuint64m2_t test_vwsubu_vx_u64m2_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx(vm, vs2, rs1, vl); } -vuint64m2_t test_vwsubu_wv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwsubu_wv(mask, op1, op2, vl); +vuint64m2_t test_vwsubu_wv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsubu_wv(vm, vs2, vs1, vl); } -vuint64m2_t test_vwsubu_wx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx(mask, op1, op2, vl); +vuint64m2_t test_vwsubu_wx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx(vm, vs2, rs1, vl); } -vuint64m4_t test_vwsubu_vv_u64m4_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwsubu_vv(mask, op1, op2, vl); +vuint64m4_t test_vwsubu_vv_u64m4_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsubu_vv(vm, vs2, vs1, vl); } -vuint64m4_t test_vwsubu_vx_u64m4_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx(mask, op1, op2, vl); +vuint64m4_t test_vwsubu_vx_u64m4_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx(vm, vs2, rs1, vl); } -vuint64m4_t test_vwsubu_wv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwsubu_wv(mask, op1, op2, vl); +vuint64m4_t test_vwsubu_wv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsubu_wv(vm, vs2, vs1, vl); } -vuint64m4_t test_vwsubu_wx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx(mask, op1, op2, vl); +vuint64m4_t test_vwsubu_wx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx(vm, vs2, rs1, vl); } -vuint64m8_t test_vwsubu_vv_u64m8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwsubu_vv(mask, op1, op2, vl); +vuint64m8_t test_vwsubu_vv_u64m8_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsubu_vv(vm, vs2, vs1, vl); } -vuint64m8_t test_vwsubu_vx_u64m8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx(mask, op1, op2, vl); +vuint64m8_t test_vwsubu_vx_u64m8_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx(vm, vs2, rs1, vl); } -vuint64m8_t test_vwsubu_wv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwsubu_wv(mask, op1, op2, vl); +vuint64m8_t test_vwsubu_wv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsubu_wv(vm, vs2, vs1, vl); } -vuint64m8_t test_vwsubu_wx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx(mask, op1, op2, vl); +vuint64m8_t test_vwsubu_wx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+v[w]?sub[u]?\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vxor.c b/auto-generated/gnu-overloaded-tests/vxor.c index b9d8935c9..20ce99f01 100644 --- a/auto-generated/gnu-overloaded-tests/vxor.c +++ b/auto-generated/gnu-overloaded-tests/vxor.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vxor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vint8mf8_t test_vxor_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vxor(vs2, vs1, vl); } -vint8mf8_t test_vxor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vint8mf8_t test_vxor_vx_i8mf8(vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor(vs2, rs1, vl); } -vint8mf4_t test_vxor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vint8mf4_t test_vxor_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vxor(vs2, vs1, vl); } -vint8mf4_t test_vxor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vint8mf4_t test_vxor_vx_i8mf4(vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor(vs2, rs1, vl); } -vint8mf2_t test_vxor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vint8mf2_t test_vxor_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vxor(vs2, vs1, vl); } -vint8mf2_t test_vxor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vint8mf2_t test_vxor_vx_i8mf2(vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor(vs2, rs1, vl); } -vint8m1_t test_vxor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vint8m1_t test_vxor_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vxor(vs2, vs1, vl); } -vint8m1_t test_vxor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vint8m1_t test_vxor_vx_i8m1(vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor(vs2, rs1, vl); } -vint8m2_t test_vxor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vint8m2_t test_vxor_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vxor(vs2, vs1, vl); } -vint8m2_t test_vxor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vint8m2_t test_vxor_vx_i8m2(vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor(vs2, rs1, vl); } -vint8m4_t test_vxor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vint8m4_t test_vxor_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vxor(vs2, vs1, vl); } -vint8m4_t test_vxor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vint8m4_t test_vxor_vx_i8m4(vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor(vs2, rs1, vl); } -vint8m8_t test_vxor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vint8m8_t test_vxor_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vxor(vs2, vs1, vl); } -vint8m8_t test_vxor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vint8m8_t test_vxor_vx_i8m8(vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor(vs2, rs1, vl); } -vint16mf4_t test_vxor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vint16mf4_t test_vxor_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vxor(vs2, vs1, vl); } -vint16mf4_t test_vxor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vint16mf4_t test_vxor_vx_i16mf4(vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor(vs2, rs1, vl); } -vint16mf2_t test_vxor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vint16mf2_t test_vxor_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vxor(vs2, vs1, vl); } -vint16mf2_t test_vxor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vint16mf2_t test_vxor_vx_i16mf2(vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor(vs2, rs1, vl); } -vint16m1_t test_vxor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vint16m1_t test_vxor_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vxor(vs2, vs1, vl); } -vint16m1_t test_vxor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vint16m1_t test_vxor_vx_i16m1(vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor(vs2, rs1, vl); } -vint16m2_t test_vxor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vint16m2_t test_vxor_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vxor(vs2, vs1, vl); } -vint16m2_t test_vxor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vint16m2_t test_vxor_vx_i16m2(vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor(vs2, rs1, vl); } -vint16m4_t test_vxor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vint16m4_t test_vxor_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vxor(vs2, vs1, vl); } -vint16m4_t test_vxor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vint16m4_t test_vxor_vx_i16m4(vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor(vs2, rs1, vl); } -vint16m8_t test_vxor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vint16m8_t test_vxor_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vxor(vs2, vs1, vl); } -vint16m8_t test_vxor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vint16m8_t test_vxor_vx_i16m8(vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor(vs2, rs1, vl); } -vint32mf2_t test_vxor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vint32mf2_t test_vxor_vv_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vxor(vs2, vs1, vl); } -vint32mf2_t test_vxor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vint32mf2_t test_vxor_vx_i32mf2(vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor(vs2, rs1, vl); } -vint32m1_t test_vxor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vint32m1_t test_vxor_vv_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vxor(vs2, vs1, vl); } -vint32m1_t test_vxor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vint32m1_t test_vxor_vx_i32m1(vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor(vs2, rs1, vl); } -vint32m2_t test_vxor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vint32m2_t test_vxor_vv_i32m2(vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vxor(vs2, vs1, vl); } -vint32m2_t test_vxor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vint32m2_t test_vxor_vx_i32m2(vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor(vs2, rs1, vl); } -vint32m4_t test_vxor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vint32m4_t test_vxor_vv_i32m4(vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vxor(vs2, vs1, vl); } -vint32m4_t test_vxor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vint32m4_t test_vxor_vx_i32m4(vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor(vs2, rs1, vl); } -vint32m8_t test_vxor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vint32m8_t test_vxor_vv_i32m8(vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vxor(vs2, vs1, vl); } -vint32m8_t test_vxor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vint32m8_t test_vxor_vx_i32m8(vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor(vs2, rs1, vl); } -vint64m1_t test_vxor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vint64m1_t test_vxor_vv_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vxor(vs2, vs1, vl); } -vint64m1_t test_vxor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vint64m1_t test_vxor_vx_i64m1(vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor(vs2, rs1, vl); } -vint64m2_t test_vxor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vint64m2_t test_vxor_vv_i64m2(vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vxor(vs2, vs1, vl); } -vint64m2_t test_vxor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vint64m2_t test_vxor_vx_i64m2(vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor(vs2, rs1, vl); } -vint64m4_t test_vxor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vint64m4_t test_vxor_vv_i64m4(vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vxor(vs2, vs1, vl); } -vint64m4_t test_vxor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vint64m4_t test_vxor_vx_i64m4(vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor(vs2, rs1, vl); } -vint64m8_t test_vxor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vint64m8_t test_vxor_vv_i64m8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vxor(vs2, vs1, vl); } -vint64m8_t test_vxor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vint64m8_t test_vxor_vx_i64m8(vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor(vs2, rs1, vl); } -vuint8mf8_t test_vxor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vuint8mf8_t test_vxor_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vxor(vs2, vs1, vl); } -vuint8mf8_t test_vxor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vuint8mf8_t test_vxor_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor(vs2, rs1, vl); } -vuint8mf4_t test_vxor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vuint8mf4_t test_vxor_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vxor(vs2, vs1, vl); } -vuint8mf4_t test_vxor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vuint8mf4_t test_vxor_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor(vs2, rs1, vl); } -vuint8mf2_t test_vxor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vuint8mf2_t test_vxor_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vxor(vs2, vs1, vl); } -vuint8mf2_t test_vxor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vuint8mf2_t test_vxor_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor(vs2, rs1, vl); } -vuint8m1_t test_vxor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vuint8m1_t test_vxor_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vxor(vs2, vs1, vl); } -vuint8m1_t test_vxor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vuint8m1_t test_vxor_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor(vs2, rs1, vl); } -vuint8m2_t test_vxor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vuint8m2_t test_vxor_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vxor(vs2, vs1, vl); } -vuint8m2_t test_vxor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vuint8m2_t test_vxor_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor(vs2, rs1, vl); } -vuint8m4_t test_vxor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vuint8m4_t test_vxor_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vxor(vs2, vs1, vl); } -vuint8m4_t test_vxor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vuint8m4_t test_vxor_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor(vs2, rs1, vl); } -vuint8m8_t test_vxor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vuint8m8_t test_vxor_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vxor(vs2, vs1, vl); } -vuint8m8_t test_vxor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vuint8m8_t test_vxor_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor(vs2, rs1, vl); } -vuint16mf4_t test_vxor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vuint16mf4_t test_vxor_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vxor(vs2, vs1, vl); } -vuint16mf4_t test_vxor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vuint16mf4_t test_vxor_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor(vs2, rs1, vl); } -vuint16mf2_t test_vxor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vuint16mf2_t test_vxor_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vxor(vs2, vs1, vl); } -vuint16mf2_t test_vxor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vuint16mf2_t test_vxor_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor(vs2, rs1, vl); } -vuint16m1_t test_vxor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vuint16m1_t test_vxor_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vxor(vs2, vs1, vl); } -vuint16m1_t test_vxor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vuint16m1_t test_vxor_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor(vs2, rs1, vl); } -vuint16m2_t test_vxor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vuint16m2_t test_vxor_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vxor(vs2, vs1, vl); } -vuint16m2_t test_vxor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vuint16m2_t test_vxor_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor(vs2, rs1, vl); } -vuint16m4_t test_vxor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vuint16m4_t test_vxor_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vxor(vs2, vs1, vl); } -vuint16m4_t test_vxor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vuint16m4_t test_vxor_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor(vs2, rs1, vl); } -vuint16m8_t test_vxor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vuint16m8_t test_vxor_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vxor(vs2, vs1, vl); } -vuint16m8_t test_vxor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vuint16m8_t test_vxor_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor(vs2, rs1, vl); } -vuint32mf2_t test_vxor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vuint32mf2_t test_vxor_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vxor(vs2, vs1, vl); } -vuint32mf2_t test_vxor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vuint32mf2_t test_vxor_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor(vs2, rs1, vl); } -vuint32m1_t test_vxor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vuint32m1_t test_vxor_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vxor(vs2, vs1, vl); } -vuint32m1_t test_vxor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vuint32m1_t test_vxor_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor(vs2, rs1, vl); } -vuint32m2_t test_vxor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vuint32m2_t test_vxor_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vxor(vs2, vs1, vl); } -vuint32m2_t test_vxor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vuint32m2_t test_vxor_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor(vs2, rs1, vl); } -vuint32m4_t test_vxor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vuint32m4_t test_vxor_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vxor(vs2, vs1, vl); } -vuint32m4_t test_vxor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vuint32m4_t test_vxor_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor(vs2, rs1, vl); } -vuint32m8_t test_vxor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vuint32m8_t test_vxor_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vxor(vs2, vs1, vl); } -vuint32m8_t test_vxor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vuint32m8_t test_vxor_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor(vs2, rs1, vl); } -vuint64m1_t test_vxor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vuint64m1_t test_vxor_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vxor(vs2, vs1, vl); } -vuint64m1_t test_vxor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vuint64m1_t test_vxor_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor(vs2, rs1, vl); } -vuint64m2_t test_vxor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vuint64m2_t test_vxor_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vxor(vs2, vs1, vl); } -vuint64m2_t test_vxor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vuint64m2_t test_vxor_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor(vs2, rs1, vl); } -vuint64m4_t test_vxor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vuint64m4_t test_vxor_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vxor(vs2, vs1, vl); } -vuint64m4_t test_vxor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vuint64m4_t test_vxor_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor(vs2, rs1, vl); } -vuint64m8_t test_vxor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vuint64m8_t test_vxor_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vxor(vs2, vs1, vl); } -vuint64m8_t test_vxor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor(op1, op2, vl); +vuint64m8_t test_vxor_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor(vs2, rs1, vl); } -vint8mf8_t test_vxor_vv_i8mf8_m(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vint8mf8_t test_vxor_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vxor(vm, vs2, vs1, vl); } -vint8mf8_t test_vxor_vx_i8mf8_m(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vint8mf8_t test_vxor_vx_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor(vm, vs2, rs1, vl); } -vint8mf4_t test_vxor_vv_i8mf4_m(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vint8mf4_t test_vxor_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vxor(vm, vs2, vs1, vl); } -vint8mf4_t test_vxor_vx_i8mf4_m(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vint8mf4_t test_vxor_vx_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor(vm, vs2, rs1, vl); } -vint8mf2_t test_vxor_vv_i8mf2_m(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vint8mf2_t test_vxor_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vxor(vm, vs2, vs1, vl); } -vint8mf2_t test_vxor_vx_i8mf2_m(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vint8mf2_t test_vxor_vx_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor(vm, vs2, rs1, vl); } -vint8m1_t test_vxor_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vint8m1_t test_vxor_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vxor(vm, vs2, vs1, vl); } -vint8m1_t test_vxor_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vint8m1_t test_vxor_vx_i8m1_m(vbool8_t vm, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor(vm, vs2, rs1, vl); } -vint8m2_t test_vxor_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vint8m2_t test_vxor_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vxor(vm, vs2, vs1, vl); } -vint8m2_t test_vxor_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vint8m2_t test_vxor_vx_i8m2_m(vbool4_t vm, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor(vm, vs2, rs1, vl); } -vint8m4_t test_vxor_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vint8m4_t test_vxor_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vxor(vm, vs2, vs1, vl); } -vint8m4_t test_vxor_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vint8m4_t test_vxor_vx_i8m4_m(vbool2_t vm, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor(vm, vs2, rs1, vl); } -vint8m8_t test_vxor_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vint8m8_t test_vxor_vv_i8m8_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vxor(vm, vs2, vs1, vl); } -vint8m8_t test_vxor_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vint8m8_t test_vxor_vx_i8m8_m(vbool1_t vm, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor(vm, vs2, rs1, vl); } -vint16mf4_t test_vxor_vv_i16mf4_m(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vint16mf4_t test_vxor_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vxor(vm, vs2, vs1, vl); } -vint16mf4_t test_vxor_vx_i16mf4_m(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vint16mf4_t test_vxor_vx_i16mf4_m(vbool64_t vm, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor(vm, vs2, rs1, vl); } -vint16mf2_t test_vxor_vv_i16mf2_m(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vint16mf2_t test_vxor_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vxor(vm, vs2, vs1, vl); } -vint16mf2_t test_vxor_vx_i16mf2_m(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vint16mf2_t test_vxor_vx_i16mf2_m(vbool32_t vm, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor(vm, vs2, rs1, vl); } -vint16m1_t test_vxor_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vint16m1_t test_vxor_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vxor(vm, vs2, vs1, vl); } -vint16m1_t test_vxor_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vint16m1_t test_vxor_vx_i16m1_m(vbool16_t vm, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor(vm, vs2, rs1, vl); } -vint16m2_t test_vxor_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vint16m2_t test_vxor_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vxor(vm, vs2, vs1, vl); } -vint16m2_t test_vxor_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vint16m2_t test_vxor_vx_i16m2_m(vbool8_t vm, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor(vm, vs2, rs1, vl); } -vint16m4_t test_vxor_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vint16m4_t test_vxor_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vxor(vm, vs2, vs1, vl); } -vint16m4_t test_vxor_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vint16m4_t test_vxor_vx_i16m4_m(vbool4_t vm, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor(vm, vs2, rs1, vl); } -vint16m8_t test_vxor_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vint16m8_t test_vxor_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vxor(vm, vs2, vs1, vl); } -vint16m8_t test_vxor_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vint16m8_t test_vxor_vx_i16m8_m(vbool2_t vm, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor(vm, vs2, rs1, vl); } -vint32mf2_t test_vxor_vv_i32mf2_m(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vint32mf2_t test_vxor_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vxor(vm, vs2, vs1, vl); } -vint32mf2_t test_vxor_vx_i32mf2_m(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vint32mf2_t test_vxor_vx_i32mf2_m(vbool64_t vm, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor(vm, vs2, rs1, vl); } -vint32m1_t test_vxor_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vint32m1_t test_vxor_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vxor(vm, vs2, vs1, vl); } -vint32m1_t test_vxor_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vint32m1_t test_vxor_vx_i32m1_m(vbool32_t vm, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor(vm, vs2, rs1, vl); } -vint32m2_t test_vxor_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vint32m2_t test_vxor_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vxor(vm, vs2, vs1, vl); } -vint32m2_t test_vxor_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vint32m2_t test_vxor_vx_i32m2_m(vbool16_t vm, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor(vm, vs2, rs1, vl); } -vint32m4_t test_vxor_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vint32m4_t test_vxor_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vxor(vm, vs2, vs1, vl); } -vint32m4_t test_vxor_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vint32m4_t test_vxor_vx_i32m4_m(vbool8_t vm, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor(vm, vs2, rs1, vl); } -vint32m8_t test_vxor_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vint32m8_t test_vxor_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vxor(vm, vs2, vs1, vl); } -vint32m8_t test_vxor_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vint32m8_t test_vxor_vx_i32m8_m(vbool4_t vm, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor(vm, vs2, rs1, vl); } -vint64m1_t test_vxor_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vint64m1_t test_vxor_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vxor(vm, vs2, vs1, vl); } -vint64m1_t test_vxor_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vint64m1_t test_vxor_vx_i64m1_m(vbool64_t vm, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor(vm, vs2, rs1, vl); } -vint64m2_t test_vxor_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vint64m2_t test_vxor_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vxor(vm, vs2, vs1, vl); } -vint64m2_t test_vxor_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vint64m2_t test_vxor_vx_i64m2_m(vbool32_t vm, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor(vm, vs2, rs1, vl); } -vint64m4_t test_vxor_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vint64m4_t test_vxor_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vxor(vm, vs2, vs1, vl); } -vint64m4_t test_vxor_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vint64m4_t test_vxor_vx_i64m4_m(vbool16_t vm, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor(vm, vs2, rs1, vl); } -vint64m8_t test_vxor_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vint64m8_t test_vxor_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vxor(vm, vs2, vs1, vl); } -vint64m8_t test_vxor_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vint64m8_t test_vxor_vx_i64m8_m(vbool8_t vm, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor(vm, vs2, rs1, vl); } -vuint8mf8_t test_vxor_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vuint8mf8_t test_vxor_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vxor(vm, vs2, vs1, vl); } -vuint8mf8_t test_vxor_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vuint8mf8_t test_vxor_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor(vm, vs2, rs1, vl); } -vuint8mf4_t test_vxor_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vuint8mf4_t test_vxor_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vxor(vm, vs2, vs1, vl); } -vuint8mf4_t test_vxor_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vuint8mf4_t test_vxor_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor(vm, vs2, rs1, vl); } -vuint8mf2_t test_vxor_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vuint8mf2_t test_vxor_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vxor(vm, vs2, vs1, vl); } -vuint8mf2_t test_vxor_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vuint8mf2_t test_vxor_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor(vm, vs2, rs1, vl); } -vuint8m1_t test_vxor_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vuint8m1_t test_vxor_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vxor(vm, vs2, vs1, vl); } -vuint8m1_t test_vxor_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vuint8m1_t test_vxor_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor(vm, vs2, rs1, vl); } -vuint8m2_t test_vxor_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vuint8m2_t test_vxor_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vxor(vm, vs2, vs1, vl); } -vuint8m2_t test_vxor_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vuint8m2_t test_vxor_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor(vm, vs2, rs1, vl); } -vuint8m4_t test_vxor_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vuint8m4_t test_vxor_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vxor(vm, vs2, vs1, vl); } -vuint8m4_t test_vxor_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vuint8m4_t test_vxor_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor(vm, vs2, rs1, vl); } -vuint8m8_t test_vxor_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vuint8m8_t test_vxor_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vxor(vm, vs2, vs1, vl); } -vuint8m8_t test_vxor_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vuint8m8_t test_vxor_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor(vm, vs2, rs1, vl); } -vuint16mf4_t test_vxor_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vuint16mf4_t test_vxor_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vxor(vm, vs2, vs1, vl); } -vuint16mf4_t test_vxor_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vuint16mf4_t test_vxor_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor(vm, vs2, rs1, vl); } -vuint16mf2_t test_vxor_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vuint16mf2_t test_vxor_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vxor(vm, vs2, vs1, vl); } -vuint16mf2_t test_vxor_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vuint16mf2_t test_vxor_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor(vm, vs2, rs1, vl); } -vuint16m1_t test_vxor_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vuint16m1_t test_vxor_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vxor(vm, vs2, vs1, vl); } -vuint16m1_t test_vxor_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vuint16m1_t test_vxor_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor(vm, vs2, rs1, vl); } -vuint16m2_t test_vxor_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vuint16m2_t test_vxor_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vxor(vm, vs2, vs1, vl); } -vuint16m2_t test_vxor_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vuint16m2_t test_vxor_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor(vm, vs2, rs1, vl); } -vuint16m4_t test_vxor_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vuint16m4_t test_vxor_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vxor(vm, vs2, vs1, vl); } -vuint16m4_t test_vxor_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vuint16m4_t test_vxor_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor(vm, vs2, rs1, vl); } -vuint16m8_t test_vxor_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vuint16m8_t test_vxor_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vxor(vm, vs2, vs1, vl); } -vuint16m8_t test_vxor_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vuint16m8_t test_vxor_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor(vm, vs2, rs1, vl); } -vuint32mf2_t test_vxor_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vuint32mf2_t test_vxor_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vxor(vm, vs2, vs1, vl); } -vuint32mf2_t test_vxor_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vuint32mf2_t test_vxor_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor(vm, vs2, rs1, vl); } -vuint32m1_t test_vxor_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vuint32m1_t test_vxor_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vxor(vm, vs2, vs1, vl); } -vuint32m1_t test_vxor_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vuint32m1_t test_vxor_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor(vm, vs2, rs1, vl); } -vuint32m2_t test_vxor_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vuint32m2_t test_vxor_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vxor(vm, vs2, vs1, vl); } -vuint32m2_t test_vxor_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vuint32m2_t test_vxor_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor(vm, vs2, rs1, vl); } -vuint32m4_t test_vxor_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vuint32m4_t test_vxor_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vxor(vm, vs2, vs1, vl); } -vuint32m4_t test_vxor_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vuint32m4_t test_vxor_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor(vm, vs2, rs1, vl); } -vuint32m8_t test_vxor_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vuint32m8_t test_vxor_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vxor(vm, vs2, vs1, vl); } -vuint32m8_t test_vxor_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vuint32m8_t test_vxor_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor(vm, vs2, rs1, vl); } -vuint64m1_t test_vxor_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vuint64m1_t test_vxor_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vxor(vm, vs2, vs1, vl); } -vuint64m1_t test_vxor_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vuint64m1_t test_vxor_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor(vm, vs2, rs1, vl); } -vuint64m2_t test_vxor_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vuint64m2_t test_vxor_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vxor(vm, vs2, vs1, vl); } -vuint64m2_t test_vxor_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vuint64m2_t test_vxor_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor(vm, vs2, rs1, vl); } -vuint64m4_t test_vxor_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vuint64m4_t test_vxor_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vxor(vm, vs2, vs1, vl); } -vuint64m4_t test_vxor_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vuint64m4_t test_vxor_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor(vm, vs2, rs1, vl); } -vuint64m8_t test_vxor_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vuint64m8_t test_vxor_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vxor(vm, vs2, vs1, vl); } -vuint64m8_t test_vxor_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor(mask, op1, op2, vl); +vuint64m8_t test_vxor_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor(vm, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vxor\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vzext_vf2.c b/auto-generated/gnu-overloaded-tests/vzext_vf2.c index 9e812f401..df75a8149 100644 --- a/auto-generated/gnu-overloaded-tests/vzext_vf2.c +++ b/auto-generated/gnu-overloaded-tests/vzext_vf2.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint16mf4_t test_vzext_vf2_u16mf4(vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf2(op1, vl); +vuint16mf4_t test_vzext_vf2_u16mf4(vuint8mf8_t vs2, size_t vl) { + return __riscv_vzext_vf2(vs2, vl); } -vuint16mf2_t test_vzext_vf2_u16mf2(vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf2(op1, vl); +vuint16mf2_t test_vzext_vf2_u16mf2(vuint8mf4_t vs2, size_t vl) { + return __riscv_vzext_vf2(vs2, vl); } -vuint16m1_t test_vzext_vf2_u16m1(vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf2(op1, vl); +vuint16m1_t test_vzext_vf2_u16m1(vuint8mf2_t vs2, size_t vl) { + return __riscv_vzext_vf2(vs2, vl); } -vuint16m2_t test_vzext_vf2_u16m2(vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf2(op1, vl); +vuint16m2_t test_vzext_vf2_u16m2(vuint8m1_t vs2, size_t vl) { + return __riscv_vzext_vf2(vs2, vl); } -vuint16m4_t test_vzext_vf2_u16m4(vuint8m2_t op1, size_t vl) { - return __riscv_vzext_vf2(op1, vl); +vuint16m4_t test_vzext_vf2_u16m4(vuint8m2_t vs2, size_t vl) { + return __riscv_vzext_vf2(vs2, vl); } -vuint16m8_t test_vzext_vf2_u16m8(vuint8m4_t op1, size_t vl) { - return __riscv_vzext_vf2(op1, vl); +vuint16m8_t test_vzext_vf2_u16m8(vuint8m4_t vs2, size_t vl) { + return __riscv_vzext_vf2(vs2, vl); } -vuint32mf2_t test_vzext_vf2_u32mf2(vuint16mf4_t op1, size_t vl) { - return __riscv_vzext_vf2(op1, vl); +vuint32mf2_t test_vzext_vf2_u32mf2(vuint16mf4_t vs2, size_t vl) { + return __riscv_vzext_vf2(vs2, vl); } -vuint32m1_t test_vzext_vf2_u32m1(vuint16mf2_t op1, size_t vl) { - return __riscv_vzext_vf2(op1, vl); +vuint32m1_t test_vzext_vf2_u32m1(vuint16mf2_t vs2, size_t vl) { + return __riscv_vzext_vf2(vs2, vl); } -vuint32m2_t test_vzext_vf2_u32m2(vuint16m1_t op1, size_t vl) { - return __riscv_vzext_vf2(op1, vl); +vuint32m2_t test_vzext_vf2_u32m2(vuint16m1_t vs2, size_t vl) { + return __riscv_vzext_vf2(vs2, vl); } -vuint32m4_t test_vzext_vf2_u32m4(vuint16m2_t op1, size_t vl) { - return __riscv_vzext_vf2(op1, vl); +vuint32m4_t test_vzext_vf2_u32m4(vuint16m2_t vs2, size_t vl) { + return __riscv_vzext_vf2(vs2, vl); } -vuint32m8_t test_vzext_vf2_u32m8(vuint16m4_t op1, size_t vl) { - return __riscv_vzext_vf2(op1, vl); +vuint32m8_t test_vzext_vf2_u32m8(vuint16m4_t vs2, size_t vl) { + return __riscv_vzext_vf2(vs2, vl); } -vuint64m1_t test_vzext_vf2_u64m1(vuint32mf2_t op1, size_t vl) { - return __riscv_vzext_vf2(op1, vl); +vuint64m1_t test_vzext_vf2_u64m1(vuint32mf2_t vs2, size_t vl) { + return __riscv_vzext_vf2(vs2, vl); } -vuint64m2_t test_vzext_vf2_u64m2(vuint32m1_t op1, size_t vl) { - return __riscv_vzext_vf2(op1, vl); +vuint64m2_t test_vzext_vf2_u64m2(vuint32m1_t vs2, size_t vl) { + return __riscv_vzext_vf2(vs2, vl); } -vuint64m4_t test_vzext_vf2_u64m4(vuint32m2_t op1, size_t vl) { - return __riscv_vzext_vf2(op1, vl); +vuint64m4_t test_vzext_vf2_u64m4(vuint32m2_t vs2, size_t vl) { + return __riscv_vzext_vf2(vs2, vl); } -vuint64m8_t test_vzext_vf2_u64m8(vuint32m4_t op1, size_t vl) { - return __riscv_vzext_vf2(op1, vl); +vuint64m8_t test_vzext_vf2_u64m8(vuint32m4_t vs2, size_t vl) { + return __riscv_vzext_vf2(vs2, vl); } -vuint16mf4_t test_vzext_vf2_u16mf4_m(vbool64_t mask, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf2(mask, op1, vl); +vuint16mf4_t test_vzext_vf2_u16mf4_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vzext_vf2(vm, vs2, vl); } -vuint16mf2_t test_vzext_vf2_u16mf2_m(vbool32_t mask, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf2(mask, op1, vl); +vuint16mf2_t test_vzext_vf2_u16mf2_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vzext_vf2(vm, vs2, vl); } -vuint16m1_t test_vzext_vf2_u16m1_m(vbool16_t mask, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf2(mask, op1, vl); +vuint16m1_t test_vzext_vf2_u16m1_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vzext_vf2(vm, vs2, vl); } -vuint16m2_t test_vzext_vf2_u16m2_m(vbool8_t mask, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf2(mask, op1, vl); +vuint16m2_t test_vzext_vf2_u16m2_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vzext_vf2(vm, vs2, vl); } -vuint16m4_t test_vzext_vf2_u16m4_m(vbool4_t mask, vuint8m2_t op1, size_t vl) { - return __riscv_vzext_vf2(mask, op1, vl); +vuint16m4_t test_vzext_vf2_u16m4_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vzext_vf2(vm, vs2, vl); } -vuint16m8_t test_vzext_vf2_u16m8_m(vbool2_t mask, vuint8m4_t op1, size_t vl) { - return __riscv_vzext_vf2(mask, op1, vl); +vuint16m8_t test_vzext_vf2_u16m8_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vzext_vf2(vm, vs2, vl); } -vuint32mf2_t test_vzext_vf2_u32mf2_m(vbool64_t mask, vuint16mf4_t op1, size_t vl) { - return __riscv_vzext_vf2(mask, op1, vl); +vuint32mf2_t test_vzext_vf2_u32mf2_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vzext_vf2(vm, vs2, vl); } -vuint32m1_t test_vzext_vf2_u32m1_m(vbool32_t mask, vuint16mf2_t op1, size_t vl) { - return __riscv_vzext_vf2(mask, op1, vl); +vuint32m1_t test_vzext_vf2_u32m1_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vzext_vf2(vm, vs2, vl); } -vuint32m2_t test_vzext_vf2_u32m2_m(vbool16_t mask, vuint16m1_t op1, size_t vl) { - return __riscv_vzext_vf2(mask, op1, vl); +vuint32m2_t test_vzext_vf2_u32m2_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vzext_vf2(vm, vs2, vl); } -vuint32m4_t test_vzext_vf2_u32m4_m(vbool8_t mask, vuint16m2_t op1, size_t vl) { - return __riscv_vzext_vf2(mask, op1, vl); +vuint32m4_t test_vzext_vf2_u32m4_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vzext_vf2(vm, vs2, vl); } -vuint32m8_t test_vzext_vf2_u32m8_m(vbool4_t mask, vuint16m4_t op1, size_t vl) { - return __riscv_vzext_vf2(mask, op1, vl); +vuint32m8_t test_vzext_vf2_u32m8_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vzext_vf2(vm, vs2, vl); } -vuint64m1_t test_vzext_vf2_u64m1_m(vbool64_t mask, vuint32mf2_t op1, size_t vl) { - return __riscv_vzext_vf2(mask, op1, vl); +vuint64m1_t test_vzext_vf2_u64m1_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vzext_vf2(vm, vs2, vl); } -vuint64m2_t test_vzext_vf2_u64m2_m(vbool32_t mask, vuint32m1_t op1, size_t vl) { - return __riscv_vzext_vf2(mask, op1, vl); +vuint64m2_t test_vzext_vf2_u64m2_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vzext_vf2(vm, vs2, vl); } -vuint64m4_t test_vzext_vf2_u64m4_m(vbool16_t mask, vuint32m2_t op1, size_t vl) { - return __riscv_vzext_vf2(mask, op1, vl); +vuint64m4_t test_vzext_vf2_u64m4_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vzext_vf2(vm, vs2, vl); } -vuint64m8_t test_vzext_vf2_u64m8_m(vbool8_t mask, vuint32m4_t op1, size_t vl) { - return __riscv_vzext_vf2(mask, op1, vl); +vuint64m8_t test_vzext_vf2_u64m8_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vzext_vf2(vm, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vzext\.vf2[ivxfswum.]*\s+} 30 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vzext_vf4.c b/auto-generated/gnu-overloaded-tests/vzext_vf4.c index f93adddd3..2f40fb49a 100644 --- a/auto-generated/gnu-overloaded-tests/vzext_vf4.c +++ b/auto-generated/gnu-overloaded-tests/vzext_vf4.c @@ -6,76 +6,76 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint32mf2_t test_vzext_vf4_u32mf2(vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf4(op1, vl); +vuint32mf2_t test_vzext_vf4_u32mf2(vuint8mf8_t vs2, size_t vl) { + return __riscv_vzext_vf4(vs2, vl); } -vuint32m1_t test_vzext_vf4_u32m1(vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf4(op1, vl); +vuint32m1_t test_vzext_vf4_u32m1(vuint8mf4_t vs2, size_t vl) { + return __riscv_vzext_vf4(vs2, vl); } -vuint32m2_t test_vzext_vf4_u32m2(vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf4(op1, vl); +vuint32m2_t test_vzext_vf4_u32m2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vzext_vf4(vs2, vl); } -vuint32m4_t test_vzext_vf4_u32m4(vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf4(op1, vl); +vuint32m4_t test_vzext_vf4_u32m4(vuint8m1_t vs2, size_t vl) { + return __riscv_vzext_vf4(vs2, vl); } -vuint32m8_t test_vzext_vf4_u32m8(vuint8m2_t op1, size_t vl) { - return __riscv_vzext_vf4(op1, vl); +vuint32m8_t test_vzext_vf4_u32m8(vuint8m2_t vs2, size_t vl) { + return __riscv_vzext_vf4(vs2, vl); } -vuint64m1_t test_vzext_vf4_u64m1(vuint16mf4_t op1, size_t vl) { - return __riscv_vzext_vf4(op1, vl); +vuint64m1_t test_vzext_vf4_u64m1(vuint16mf4_t vs2, size_t vl) { + return __riscv_vzext_vf4(vs2, vl); } -vuint64m2_t test_vzext_vf4_u64m2(vuint16mf2_t op1, size_t vl) { - return __riscv_vzext_vf4(op1, vl); +vuint64m2_t test_vzext_vf4_u64m2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vzext_vf4(vs2, vl); } -vuint64m4_t test_vzext_vf4_u64m4(vuint16m1_t op1, size_t vl) { - return __riscv_vzext_vf4(op1, vl); +vuint64m4_t test_vzext_vf4_u64m4(vuint16m1_t vs2, size_t vl) { + return __riscv_vzext_vf4(vs2, vl); } -vuint64m8_t test_vzext_vf4_u64m8(vuint16m2_t op1, size_t vl) { - return __riscv_vzext_vf4(op1, vl); +vuint64m8_t test_vzext_vf4_u64m8(vuint16m2_t vs2, size_t vl) { + return __riscv_vzext_vf4(vs2, vl); } -vuint32mf2_t test_vzext_vf4_u32mf2_m(vbool64_t mask, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf4(mask, op1, vl); +vuint32mf2_t test_vzext_vf4_u32mf2_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vzext_vf4(vm, vs2, vl); } -vuint32m1_t test_vzext_vf4_u32m1_m(vbool32_t mask, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf4(mask, op1, vl); +vuint32m1_t test_vzext_vf4_u32m1_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vzext_vf4(vm, vs2, vl); } -vuint32m2_t test_vzext_vf4_u32m2_m(vbool16_t mask, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf4(mask, op1, vl); +vuint32m2_t test_vzext_vf4_u32m2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vzext_vf4(vm, vs2, vl); } -vuint32m4_t test_vzext_vf4_u32m4_m(vbool8_t mask, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf4(mask, op1, vl); +vuint32m4_t test_vzext_vf4_u32m4_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vzext_vf4(vm, vs2, vl); } -vuint32m8_t test_vzext_vf4_u32m8_m(vbool4_t mask, vuint8m2_t op1, size_t vl) { - return __riscv_vzext_vf4(mask, op1, vl); +vuint32m8_t test_vzext_vf4_u32m8_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vzext_vf4(vm, vs2, vl); } -vuint64m1_t test_vzext_vf4_u64m1_m(vbool64_t mask, vuint16mf4_t op1, size_t vl) { - return __riscv_vzext_vf4(mask, op1, vl); +vuint64m1_t test_vzext_vf4_u64m1_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vzext_vf4(vm, vs2, vl); } -vuint64m2_t test_vzext_vf4_u64m2_m(vbool32_t mask, vuint16mf2_t op1, size_t vl) { - return __riscv_vzext_vf4(mask, op1, vl); +vuint64m2_t test_vzext_vf4_u64m2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vzext_vf4(vm, vs2, vl); } -vuint64m4_t test_vzext_vf4_u64m4_m(vbool16_t mask, vuint16m1_t op1, size_t vl) { - return __riscv_vzext_vf4(mask, op1, vl); +vuint64m4_t test_vzext_vf4_u64m4_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vzext_vf4(vm, vs2, vl); } -vuint64m8_t test_vzext_vf4_u64m8_m(vbool8_t mask, vuint16m2_t op1, size_t vl) { - return __riscv_vzext_vf4(mask, op1, vl); +vuint64m8_t test_vzext_vf4_u64m8_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vzext_vf4(vm, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vzext\.vf4[ivxfswum.]*\s+} 18 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vzext_vf8.c b/auto-generated/gnu-overloaded-tests/vzext_vf8.c index 26bc61f7d..36dbee7e1 100644 --- a/auto-generated/gnu-overloaded-tests/vzext_vf8.c +++ b/auto-generated/gnu-overloaded-tests/vzext_vf8.c @@ -6,36 +6,36 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint64m1_t test_vzext_vf8_u64m1(vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf8(op1, vl); +vuint64m1_t test_vzext_vf8_u64m1(vuint8mf8_t vs2, size_t vl) { + return __riscv_vzext_vf8(vs2, vl); } -vuint64m2_t test_vzext_vf8_u64m2(vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf8(op1, vl); +vuint64m2_t test_vzext_vf8_u64m2(vuint8mf4_t vs2, size_t vl) { + return __riscv_vzext_vf8(vs2, vl); } -vuint64m4_t test_vzext_vf8_u64m4(vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf8(op1, vl); +vuint64m4_t test_vzext_vf8_u64m4(vuint8mf2_t vs2, size_t vl) { + return __riscv_vzext_vf8(vs2, vl); } -vuint64m8_t test_vzext_vf8_u64m8(vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf8(op1, vl); +vuint64m8_t test_vzext_vf8_u64m8(vuint8m1_t vs2, size_t vl) { + return __riscv_vzext_vf8(vs2, vl); } -vuint64m1_t test_vzext_vf8_u64m1_m(vbool64_t mask, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf8(mask, op1, vl); +vuint64m1_t test_vzext_vf8_u64m1_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vzext_vf8(vm, vs2, vl); } -vuint64m2_t test_vzext_vf8_u64m2_m(vbool32_t mask, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf8(mask, op1, vl); +vuint64m2_t test_vzext_vf8_u64m2_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vzext_vf8(vm, vs2, vl); } -vuint64m4_t test_vzext_vf8_u64m4_m(vbool16_t mask, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf8(mask, op1, vl); +vuint64m4_t test_vzext_vf8_u64m4_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vzext_vf8(vm, vs2, vl); } -vuint64m8_t test_vzext_vf8_u64m8_m(vbool8_t mask, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf8(mask, op1, vl); +vuint64m8_t test_vzext_vf8_u64m8_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vzext_vf8(vm, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vzext\.vf8[ivxfswum.]*\s+} 8 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vaadd.c b/auto-generated/policy_funcs/gnu-api-tests/vaadd.c index b4a16b41a..65b17b772 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vaadd.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vaadd.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vaadd_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vaadd_vv_i8mf8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vaadd_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vaadd_vv_i8mf8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vaadd_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_vx_i8mf8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vaadd_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_vx_i8mf8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vaadd_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vaadd_vv_i8mf4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vaadd_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vaadd_vv_i8mf4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vaadd_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_vx_i8mf4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vaadd_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_vx_i8mf4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vaadd_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vaadd_vv_i8mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vaadd_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vaadd_vv_i8mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vaadd_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_vx_i8mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vaadd_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_vx_i8mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vaadd_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vaadd_vv_i8m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vaadd_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vaadd_vv_i8m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vaadd_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_vx_i8m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vaadd_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_vx_i8m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vaadd_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vaadd_vv_i8m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vaadd_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vaadd_vv_i8m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vaadd_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_vx_i8m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vaadd_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_vx_i8m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vaadd_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vaadd_vv_i8m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vaadd_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vaadd_vv_i8m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vaadd_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_vx_i8m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vaadd_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_vx_i8m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vaadd_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vaadd_vv_i8m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vaadd_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vaadd_vv_i8m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vaadd_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_vx_i8m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vaadd_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_vx_i8m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vaadd_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vaadd_vv_i16mf4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vaadd_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vaadd_vv_i16mf4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vaadd_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_vx_i16mf4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vaadd_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_vx_i16mf4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vaadd_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vaadd_vv_i16mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vaadd_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vaadd_vv_i16mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vaadd_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_vx_i16mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vaadd_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_vx_i16mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vaadd_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vaadd_vv_i16m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vaadd_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vaadd_vv_i16m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vaadd_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_vx_i16m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vaadd_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_vx_i16m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vaadd_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vaadd_vv_i16m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vaadd_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vaadd_vv_i16m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vaadd_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_vx_i16m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vaadd_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_vx_i16m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vaadd_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vaadd_vv_i16m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vaadd_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vaadd_vv_i16m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vaadd_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_vx_i16m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vaadd_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_vx_i16m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vaadd_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vaadd_vv_i16m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vaadd_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vaadd_vv_i16m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vaadd_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_vx_i16m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vaadd_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_vx_i16m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vaadd_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vaadd_vv_i32mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vaadd_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vaadd_vv_i32mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vaadd_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_vx_i32mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vaadd_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_vx_i32mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vaadd_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vaadd_vv_i32m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vaadd_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vaadd_vv_i32m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vaadd_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_vx_i32m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vaadd_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_vx_i32m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vaadd_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vaadd_vv_i32m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vaadd_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vaadd_vv_i32m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vaadd_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_vx_i32m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vaadd_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_vx_i32m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vaadd_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vaadd_vv_i32m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vaadd_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vaadd_vv_i32m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vaadd_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_vx_i32m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vaadd_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_vx_i32m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vaadd_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vaadd_vv_i32m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vaadd_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vaadd_vv_i32m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vaadd_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_vx_i32m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vaadd_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_vx_i32m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vaadd_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vaadd_vv_i64m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vaadd_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vaadd_vv_i64m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vaadd_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd_vx_i64m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vaadd_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd_vx_i64m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vaadd_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vaadd_vv_i64m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vaadd_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vaadd_vv_i64m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vaadd_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd_vx_i64m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vaadd_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd_vx_i64m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vaadd_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vaadd_vv_i64m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vaadd_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vaadd_vv_i64m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vaadd_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd_vx_i64m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vaadd_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd_vx_i64m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vaadd_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vaadd_vv_i64m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vaadd_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vaadd_vv_i64m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vaadd_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd_vx_i64m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vaadd_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd_vx_i64m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vaadd_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vaadd_vv_i8mf8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vaadd_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vaadd_vv_i8mf8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vaadd_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_vx_i8mf8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vaadd_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_vx_i8mf8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vaadd_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vaadd_vv_i8mf4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vaadd_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vaadd_vv_i8mf4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vaadd_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_vx_i8mf4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vaadd_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_vx_i8mf4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vaadd_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vaadd_vv_i8mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vaadd_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vaadd_vv_i8mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vaadd_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_vx_i8mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vaadd_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_vx_i8mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vaadd_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vaadd_vv_i8m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vaadd_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vaadd_vv_i8m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vaadd_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_vx_i8m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vaadd_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_vx_i8m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vaadd_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vaadd_vv_i8m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vaadd_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vaadd_vv_i8m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vaadd_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_vx_i8m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vaadd_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_vx_i8m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vaadd_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vaadd_vv_i8m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vaadd_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vaadd_vv_i8m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vaadd_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_vx_i8m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vaadd_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_vx_i8m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vaadd_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vaadd_vv_i8m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vaadd_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vaadd_vv_i8m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vaadd_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_vx_i8m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vaadd_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_vx_i8m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vaadd_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vaadd_vv_i16mf4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vaadd_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vaadd_vv_i16mf4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vaadd_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_vx_i16mf4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vaadd_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_vx_i16mf4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vaadd_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vaadd_vv_i16mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vaadd_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vaadd_vv_i16mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vaadd_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_vx_i16mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vaadd_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_vx_i16mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vaadd_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vaadd_vv_i16m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vaadd_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vaadd_vv_i16m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vaadd_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_vx_i16m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vaadd_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_vx_i16m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vaadd_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vaadd_vv_i16m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vaadd_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vaadd_vv_i16m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vaadd_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_vx_i16m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vaadd_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_vx_i16m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vaadd_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vaadd_vv_i16m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vaadd_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vaadd_vv_i16m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vaadd_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_vx_i16m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vaadd_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_vx_i16m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vaadd_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vaadd_vv_i16m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vaadd_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vaadd_vv_i16m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vaadd_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_vx_i16m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vaadd_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_vx_i16m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vaadd_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vaadd_vv_i32mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vaadd_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vaadd_vv_i32mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vaadd_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_vx_i32mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vaadd_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_vx_i32mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vaadd_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vaadd_vv_i32m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vaadd_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vaadd_vv_i32m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vaadd_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_vx_i32m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vaadd_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_vx_i32m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vaadd_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vaadd_vv_i32m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vaadd_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vaadd_vv_i32m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vaadd_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_vx_i32m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vaadd_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_vx_i32m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vaadd_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vaadd_vv_i32m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vaadd_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vaadd_vv_i32m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vaadd_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_vx_i32m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vaadd_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_vx_i32m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vaadd_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vaadd_vv_i32m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vaadd_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vaadd_vv_i32m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vaadd_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_vx_i32m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vaadd_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_vx_i32m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vaadd_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vaadd_vv_i64m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vaadd_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vaadd_vv_i64m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vaadd_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd_vx_i64m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vaadd_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd_vx_i64m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vaadd_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vaadd_vv_i64m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vaadd_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vaadd_vv_i64m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vaadd_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd_vx_i64m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vaadd_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd_vx_i64m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vaadd_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vaadd_vv_i64m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vaadd_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vaadd_vv_i64m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vaadd_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd_vx_i64m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vaadd_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd_vx_i64m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vaadd_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vaadd_vv_i64m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vaadd_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vaadd_vv_i64m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vaadd_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd_vx_i64m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vaadd_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd_vx_i64m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vaadd_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vaadd_vv_i8mf8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vaadd_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vaadd_vv_i8mf8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vaadd_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_vx_i8mf8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vaadd_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_vx_i8mf8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vaadd_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vaadd_vv_i8mf4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vaadd_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vaadd_vv_i8mf4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vaadd_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_vx_i8mf4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vaadd_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_vx_i8mf4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vaadd_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vaadd_vv_i8mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vaadd_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vaadd_vv_i8mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vaadd_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_vx_i8mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vaadd_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_vx_i8mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vaadd_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vaadd_vv_i8m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vaadd_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vaadd_vv_i8m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vaadd_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_vx_i8m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vaadd_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_vx_i8m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vaadd_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vaadd_vv_i8m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vaadd_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vaadd_vv_i8m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vaadd_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_vx_i8m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vaadd_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_vx_i8m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vaadd_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vaadd_vv_i8m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vaadd_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vaadd_vv_i8m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vaadd_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_vx_i8m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vaadd_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_vx_i8m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vaadd_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vaadd_vv_i8m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vaadd_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vaadd_vv_i8m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vaadd_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_vx_i8m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vaadd_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_vx_i8m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vaadd_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vaadd_vv_i16mf4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vaadd_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vaadd_vv_i16mf4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vaadd_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_vx_i16mf4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vaadd_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_vx_i16mf4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vaadd_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vaadd_vv_i16mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vaadd_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vaadd_vv_i16mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vaadd_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_vx_i16mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vaadd_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_vx_i16mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vaadd_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vaadd_vv_i16m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vaadd_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vaadd_vv_i16m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vaadd_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_vx_i16m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vaadd_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_vx_i16m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vaadd_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vaadd_vv_i16m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vaadd_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vaadd_vv_i16m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vaadd_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_vx_i16m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vaadd_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_vx_i16m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vaadd_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vaadd_vv_i16m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vaadd_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vaadd_vv_i16m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vaadd_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_vx_i16m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vaadd_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_vx_i16m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vaadd_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vaadd_vv_i16m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vaadd_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vaadd_vv_i16m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vaadd_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_vx_i16m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vaadd_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_vx_i16m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vaadd_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vaadd_vv_i32mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vaadd_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vaadd_vv_i32mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vaadd_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_vx_i32mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vaadd_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_vx_i32mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vaadd_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vaadd_vv_i32m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vaadd_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vaadd_vv_i32m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vaadd_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_vx_i32m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vaadd_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_vx_i32m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vaadd_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vaadd_vv_i32m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vaadd_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vaadd_vv_i32m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vaadd_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_vx_i32m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vaadd_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_vx_i32m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vaadd_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vaadd_vv_i32m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vaadd_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vaadd_vv_i32m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vaadd_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_vx_i32m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vaadd_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_vx_i32m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vaadd_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vaadd_vv_i32m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vaadd_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vaadd_vv_i32m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vaadd_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_vx_i32m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vaadd_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_vx_i32m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vaadd_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vaadd_vv_i64m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vaadd_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vaadd_vv_i64m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vaadd_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd_vx_i64m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vaadd_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd_vx_i64m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vaadd_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vaadd_vv_i64m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vaadd_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vaadd_vv_i64m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vaadd_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd_vx_i64m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vaadd_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd_vx_i64m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vaadd_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vaadd_vv_i64m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vaadd_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vaadd_vv_i64m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vaadd_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd_vx_i64m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vaadd_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd_vx_i64m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vaadd_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vaadd_vv_i64m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vaadd_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vaadd_vv_i64m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vaadd_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd_vx_i64m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vaadd_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd_vx_i64m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vaadd_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vaadd_vv_i8mf8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vaadd_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vaadd_vv_i8mf8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vaadd_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_vx_i8mf8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vaadd_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_vx_i8mf8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vaadd_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vaadd_vv_i8mf4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vaadd_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vaadd_vv_i8mf4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vaadd_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_vx_i8mf4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vaadd_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_vx_i8mf4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vaadd_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vaadd_vv_i8mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vaadd_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vaadd_vv_i8mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vaadd_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_vx_i8mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vaadd_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_vx_i8mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vaadd_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vaadd_vv_i8m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vaadd_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vaadd_vv_i8m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vaadd_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_vx_i8m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vaadd_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_vx_i8m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vaadd_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vaadd_vv_i8m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vaadd_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vaadd_vv_i8m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vaadd_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_vx_i8m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vaadd_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_vx_i8m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vaadd_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vaadd_vv_i8m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vaadd_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vaadd_vv_i8m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vaadd_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_vx_i8m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vaadd_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_vx_i8m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vaadd_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vaadd_vv_i8m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vaadd_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vaadd_vv_i8m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vaadd_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_vx_i8m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vaadd_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_vx_i8m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vaadd_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vaadd_vv_i16mf4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vaadd_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vaadd_vv_i16mf4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vaadd_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_vx_i16mf4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vaadd_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_vx_i16mf4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vaadd_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vaadd_vv_i16mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vaadd_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vaadd_vv_i16mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vaadd_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_vx_i16mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vaadd_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_vx_i16mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vaadd_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vaadd_vv_i16m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vaadd_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vaadd_vv_i16m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vaadd_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_vx_i16m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vaadd_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_vx_i16m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vaadd_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vaadd_vv_i16m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vaadd_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vaadd_vv_i16m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vaadd_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_vx_i16m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vaadd_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_vx_i16m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vaadd_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vaadd_vv_i16m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vaadd_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vaadd_vv_i16m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vaadd_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_vx_i16m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vaadd_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_vx_i16m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vaadd_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vaadd_vv_i16m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vaadd_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vaadd_vv_i16m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vaadd_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_vx_i16m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vaadd_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_vx_i16m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vaadd_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vaadd_vv_i32mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vaadd_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vaadd_vv_i32mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vaadd_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_vx_i32mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vaadd_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_vx_i32mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vaadd_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vaadd_vv_i32m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vaadd_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vaadd_vv_i32m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vaadd_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_vx_i32m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vaadd_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_vx_i32m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vaadd_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vaadd_vv_i32m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vaadd_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vaadd_vv_i32m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vaadd_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_vx_i32m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vaadd_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_vx_i32m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vaadd_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vaadd_vv_i32m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vaadd_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vaadd_vv_i32m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vaadd_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_vx_i32m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vaadd_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_vx_i32m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vaadd_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vaadd_vv_i32m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vaadd_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vaadd_vv_i32m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vaadd_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_vx_i32m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vaadd_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_vx_i32m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vaadd_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vaadd_vv_i64m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vaadd_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vaadd_vv_i64m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vaadd_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd_vx_i64m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vaadd_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd_vx_i64m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vaadd_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vaadd_vv_i64m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vaadd_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vaadd_vv_i64m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vaadd_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd_vx_i64m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vaadd_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd_vx_i64m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vaadd_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vaadd_vv_i64m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vaadd_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vaadd_vv_i64m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vaadd_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd_vx_i64m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vaadd_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd_vx_i64m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vaadd_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vaadd_vv_i64m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vaadd_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vaadd_vv_i64m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vaadd_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd_vx_i64m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vaadd_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd_vx_i64m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vaadd\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vaaddu.c b/auto-generated/policy_funcs/gnu-api-tests/vaaddu.c index 7c96dae2d..e7fcd7b89 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vaaddu.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vaaddu.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vaaddu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vaaddu_vv_u8mf8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vaaddu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u8mf8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vaaddu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_vx_u8mf8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vaaddu_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u8mf8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vaaddu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vaaddu_vv_u8mf4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vaaddu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u8mf4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vaaddu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_vx_u8mf4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vaaddu_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u8mf4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vaaddu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vaaddu_vv_u8mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vaaddu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u8mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vaaddu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_vx_u8mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vaaddu_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u8mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vaaddu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vaaddu_vv_u8m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vaaddu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u8m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vaaddu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_vx_u8m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vaaddu_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u8m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vaaddu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vaaddu_vv_u8m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vaaddu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u8m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vaaddu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_vx_u8m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vaaddu_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u8m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vaaddu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vaaddu_vv_u8m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vaaddu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u8m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vaaddu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_vx_u8m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vaaddu_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u8m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vaaddu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vaaddu_vv_u8m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vaaddu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u8m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vaaddu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_vx_u8m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vaaddu_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u8m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vaaddu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vaaddu_vv_u16mf4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vaaddu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u16mf4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vaaddu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_vx_u16mf4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vaaddu_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u16mf4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vaaddu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vaaddu_vv_u16mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vaaddu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u16mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vaaddu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_vx_u16mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vaaddu_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u16mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vaaddu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vaaddu_vv_u16m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vaaddu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u16m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vaaddu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_vx_u16m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vaaddu_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u16m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vaaddu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vaaddu_vv_u16m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vaaddu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u16m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vaaddu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_vx_u16m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vaaddu_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u16m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vaaddu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vaaddu_vv_u16m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vaaddu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u16m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vaaddu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_vx_u16m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vaaddu_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u16m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vaaddu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vaaddu_vv_u16m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vaaddu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u16m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vaaddu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_vx_u16m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vaaddu_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u16m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vaaddu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vaaddu_vv_u32mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vaaddu_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u32mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vaaddu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_vx_u32mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vaaddu_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u32mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vaaddu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vaaddu_vv_u32m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vaaddu_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u32m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vaaddu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_vx_u32m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vaaddu_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u32m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vaaddu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vaaddu_vv_u32m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vaaddu_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u32m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vaaddu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_vx_u32m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vaaddu_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u32m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vaaddu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vaaddu_vv_u32m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vaaddu_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u32m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vaaddu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_vx_u32m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vaaddu_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u32m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vaaddu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vaaddu_vv_u32m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vaaddu_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u32m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vaaddu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_vx_u32m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vaaddu_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u32m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vaaddu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vaaddu_vv_u64m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vaaddu_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u64m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vaaddu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu_vx_u64m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vaaddu_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u64m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vaaddu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vaaddu_vv_u64m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vaaddu_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u64m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vaaddu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu_vx_u64m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vaaddu_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u64m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vaaddu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vaaddu_vv_u64m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vaaddu_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u64m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vaaddu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu_vx_u64m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vaaddu_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u64m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vaaddu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vaaddu_vv_u64m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vaaddu_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u64m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vaaddu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu_vx_u64m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vaaddu_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u64m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vaaddu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vaaddu_vv_u8mf8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vaaddu_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u8mf8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vaaddu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_vx_u8mf8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vaaddu_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u8mf8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vaaddu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vaaddu_vv_u8mf4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vaaddu_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u8mf4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vaaddu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_vx_u8mf4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vaaddu_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u8mf4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vaaddu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vaaddu_vv_u8mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vaaddu_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u8mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vaaddu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_vx_u8mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vaaddu_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u8mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vaaddu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vaaddu_vv_u8m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vaaddu_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u8m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vaaddu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_vx_u8m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vaaddu_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u8m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vaaddu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vaaddu_vv_u8m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vaaddu_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u8m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vaaddu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_vx_u8m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vaaddu_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u8m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vaaddu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vaaddu_vv_u8m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vaaddu_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u8m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vaaddu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_vx_u8m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vaaddu_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u8m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vaaddu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vaaddu_vv_u8m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vaaddu_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u8m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vaaddu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_vx_u8m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vaaddu_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u8m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vaaddu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vaaddu_vv_u16mf4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vaaddu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u16mf4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vaaddu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_vx_u16mf4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vaaddu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u16mf4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vaaddu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vaaddu_vv_u16mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vaaddu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u16mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vaaddu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_vx_u16mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vaaddu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u16mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vaaddu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vaaddu_vv_u16m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vaaddu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u16m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vaaddu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_vx_u16m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vaaddu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u16m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vaaddu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vaaddu_vv_u16m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vaaddu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u16m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vaaddu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_vx_u16m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vaaddu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u16m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vaaddu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vaaddu_vv_u16m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vaaddu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u16m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vaaddu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_vx_u16m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vaaddu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u16m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vaaddu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vaaddu_vv_u16m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vaaddu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u16m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vaaddu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_vx_u16m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vaaddu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u16m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vaaddu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vaaddu_vv_u32mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vaaddu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u32mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vaaddu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_vx_u32mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vaaddu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u32mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vaaddu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vaaddu_vv_u32m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vaaddu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u32m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vaaddu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_vx_u32m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vaaddu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u32m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vaaddu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vaaddu_vv_u32m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vaaddu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u32m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vaaddu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_vx_u32m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vaaddu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u32m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vaaddu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vaaddu_vv_u32m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vaaddu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u32m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vaaddu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_vx_u32m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vaaddu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u32m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vaaddu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vaaddu_vv_u32m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vaaddu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u32m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vaaddu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_vx_u32m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vaaddu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u32m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vaaddu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vaaddu_vv_u64m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vaaddu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u64m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vaaddu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu_vx_u64m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vaaddu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u64m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vaaddu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vaaddu_vv_u64m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vaaddu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u64m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vaaddu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu_vx_u64m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vaaddu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u64m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vaaddu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vaaddu_vv_u64m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vaaddu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u64m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vaaddu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu_vx_u64m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vaaddu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u64m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vaaddu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vaaddu_vv_u64m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vaaddu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u64m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vaaddu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu_vx_u64m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vaaddu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u64m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vaaddu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vaaddu_vv_u8mf8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vaaddu_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u8mf8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vaaddu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_vx_u8mf8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vaaddu_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u8mf8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vaaddu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vaaddu_vv_u8mf4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vaaddu_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u8mf4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vaaddu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_vx_u8mf4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vaaddu_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u8mf4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vaaddu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vaaddu_vv_u8mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vaaddu_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u8mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vaaddu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_vx_u8mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vaaddu_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u8mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vaaddu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vaaddu_vv_u8m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vaaddu_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u8m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vaaddu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_vx_u8m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vaaddu_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u8m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vaaddu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vaaddu_vv_u8m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vaaddu_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u8m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vaaddu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_vx_u8m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vaaddu_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u8m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vaaddu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vaaddu_vv_u8m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vaaddu_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u8m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vaaddu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_vx_u8m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vaaddu_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u8m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vaaddu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vaaddu_vv_u8m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vaaddu_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u8m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vaaddu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_vx_u8m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vaaddu_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u8m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vaaddu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vaaddu_vv_u16mf4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vaaddu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u16mf4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vaaddu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_vx_u16mf4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vaaddu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u16mf4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vaaddu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vaaddu_vv_u16mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vaaddu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u16mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vaaddu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_vx_u16mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vaaddu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u16mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vaaddu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vaaddu_vv_u16m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vaaddu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u16m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vaaddu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_vx_u16m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vaaddu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u16m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vaaddu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vaaddu_vv_u16m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vaaddu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u16m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vaaddu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_vx_u16m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vaaddu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u16m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vaaddu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vaaddu_vv_u16m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vaaddu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u16m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vaaddu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_vx_u16m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vaaddu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u16m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vaaddu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vaaddu_vv_u16m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vaaddu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u16m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vaaddu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_vx_u16m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vaaddu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u16m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vaaddu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vaaddu_vv_u32mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vaaddu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u32mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vaaddu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_vx_u32mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vaaddu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u32mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vaaddu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vaaddu_vv_u32m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vaaddu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u32m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vaaddu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_vx_u32m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vaaddu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u32m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vaaddu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vaaddu_vv_u32m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vaaddu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u32m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vaaddu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_vx_u32m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vaaddu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u32m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vaaddu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vaaddu_vv_u32m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vaaddu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u32m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vaaddu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_vx_u32m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vaaddu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u32m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vaaddu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vaaddu_vv_u32m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vaaddu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u32m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vaaddu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_vx_u32m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vaaddu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u32m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vaaddu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vaaddu_vv_u64m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vaaddu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u64m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vaaddu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu_vx_u64m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vaaddu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u64m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vaaddu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vaaddu_vv_u64m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vaaddu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u64m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vaaddu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu_vx_u64m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vaaddu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u64m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vaaddu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vaaddu_vv_u64m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vaaddu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u64m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vaaddu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu_vx_u64m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vaaddu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u64m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vaaddu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vaaddu_vv_u64m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vaaddu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u64m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vaaddu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu_vx_u64m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vaaddu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u64m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vaaddu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vaaddu_vv_u8mf8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vaaddu_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u8mf8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vaaddu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_vx_u8mf8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vaaddu_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u8mf8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vaaddu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vaaddu_vv_u8mf4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vaaddu_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u8mf4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vaaddu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_vx_u8mf4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vaaddu_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u8mf4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vaaddu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vaaddu_vv_u8mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vaaddu_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u8mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vaaddu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_vx_u8mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vaaddu_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u8mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vaaddu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vaaddu_vv_u8m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vaaddu_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u8m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vaaddu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_vx_u8m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vaaddu_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u8m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vaaddu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vaaddu_vv_u8m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vaaddu_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u8m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vaaddu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_vx_u8m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vaaddu_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u8m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vaaddu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vaaddu_vv_u8m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vaaddu_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u8m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vaaddu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_vx_u8m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vaaddu_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u8m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vaaddu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vaaddu_vv_u8m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vaaddu_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u8m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vaaddu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_vx_u8m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vaaddu_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u8m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vaaddu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vaaddu_vv_u16mf4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vaaddu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u16mf4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vaaddu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_vx_u16mf4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vaaddu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u16mf4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vaaddu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vaaddu_vv_u16mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vaaddu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u16mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vaaddu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_vx_u16mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vaaddu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u16mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vaaddu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vaaddu_vv_u16m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vaaddu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u16m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vaaddu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_vx_u16m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vaaddu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u16m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vaaddu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vaaddu_vv_u16m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vaaddu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u16m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vaaddu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_vx_u16m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vaaddu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u16m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vaaddu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vaaddu_vv_u16m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vaaddu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u16m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vaaddu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_vx_u16m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vaaddu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u16m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vaaddu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vaaddu_vv_u16m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vaaddu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u16m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vaaddu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_vx_u16m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vaaddu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u16m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vaaddu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vaaddu_vv_u32mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vaaddu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u32mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vaaddu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_vx_u32mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vaaddu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u32mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vaaddu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vaaddu_vv_u32m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vaaddu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u32m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vaaddu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_vx_u32m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vaaddu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u32m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vaaddu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vaaddu_vv_u32m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vaaddu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u32m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vaaddu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_vx_u32m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vaaddu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u32m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vaaddu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vaaddu_vv_u32m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vaaddu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u32m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vaaddu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_vx_u32m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vaaddu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u32m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vaaddu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vaaddu_vv_u32m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vaaddu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u32m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vaaddu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_vx_u32m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vaaddu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u32m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vaaddu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vaaddu_vv_u64m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vaaddu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u64m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vaaddu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu_vx_u64m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vaaddu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u64m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vaaddu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vaaddu_vv_u64m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vaaddu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u64m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vaaddu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu_vx_u64m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vaaddu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u64m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vaaddu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vaaddu_vv_u64m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vaaddu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u64m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vaaddu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu_vx_u64m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vaaddu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u64m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vaaddu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vaaddu_vv_u64m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vaaddu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vaaddu_vv_u64m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vaaddu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu_vx_u64m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vaaddu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu_vx_u64m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vaaddu\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vadc.c b/auto-generated/policy_funcs/gnu-api-tests/vadc.c index dcb8a08ef..3516dbf98 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vadc.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vadc.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vadc_vvm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_vvm_i8mf8_tu(maskedoff, op1, op2, carryin, vl); +vint8mf8_t test_vadc_vvm_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_vvm_i8mf8_tu(vd, vs2, vs1, v0, vl); } -vint8mf8_t test_vadc_vxm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_vxm_i8mf8_tu(maskedoff, op1, op2, carryin, vl); +vint8mf8_t test_vadc_vxm_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_vxm_i8mf8_tu(vd, vs2, rs1, v0, vl); } -vint8mf4_t test_vadc_vvm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_vvm_i8mf4_tu(maskedoff, op1, op2, carryin, vl); +vint8mf4_t test_vadc_vvm_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_vvm_i8mf4_tu(vd, vs2, vs1, v0, vl); } -vint8mf4_t test_vadc_vxm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_vxm_i8mf4_tu(maskedoff, op1, op2, carryin, vl); +vint8mf4_t test_vadc_vxm_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_vxm_i8mf4_tu(vd, vs2, rs1, v0, vl); } -vint8mf2_t test_vadc_vvm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_vvm_i8mf2_tu(maskedoff, op1, op2, carryin, vl); +vint8mf2_t test_vadc_vvm_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_vvm_i8mf2_tu(vd, vs2, vs1, v0, vl); } -vint8mf2_t test_vadc_vxm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_vxm_i8mf2_tu(maskedoff, op1, op2, carryin, vl); +vint8mf2_t test_vadc_vxm_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_vxm_i8mf2_tu(vd, vs2, rs1, v0, vl); } -vint8m1_t test_vadc_vvm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_vvm_i8m1_tu(maskedoff, op1, op2, carryin, vl); +vint8m1_t test_vadc_vvm_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_vvm_i8m1_tu(vd, vs2, vs1, v0, vl); } -vint8m1_t test_vadc_vxm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_vxm_i8m1_tu(maskedoff, op1, op2, carryin, vl); +vint8m1_t test_vadc_vxm_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_vxm_i8m1_tu(vd, vs2, rs1, v0, vl); } -vint8m2_t test_vadc_vvm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc_vvm_i8m2_tu(maskedoff, op1, op2, carryin, vl); +vint8m2_t test_vadc_vvm_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vadc_vvm_i8m2_tu(vd, vs2, vs1, v0, vl); } -vint8m2_t test_vadc_vxm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc_vxm_i8m2_tu(maskedoff, op1, op2, carryin, vl); +vint8m2_t test_vadc_vxm_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vadc_vxm_i8m2_tu(vd, vs2, rs1, v0, vl); } -vint8m4_t test_vadc_vvm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vadc_vvm_i8m4_tu(maskedoff, op1, op2, carryin, vl); +vint8m4_t test_vadc_vvm_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vadc_vvm_i8m4_tu(vd, vs2, vs1, v0, vl); } -vint8m4_t test_vadc_vxm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vadc_vxm_i8m4_tu(maskedoff, op1, op2, carryin, vl); +vint8m4_t test_vadc_vxm_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vadc_vxm_i8m4_tu(vd, vs2, rs1, v0, vl); } -vint8m8_t test_vadc_vvm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, vbool1_t carryin, size_t vl) { - return __riscv_vadc_vvm_i8m8_tu(maskedoff, op1, op2, carryin, vl); +vint8m8_t test_vadc_vvm_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, vbool1_t v0, size_t vl) { + return __riscv_vadc_vvm_i8m8_tu(vd, vs2, vs1, v0, vl); } -vint8m8_t test_vadc_vxm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, vbool1_t carryin, size_t vl) { - return __riscv_vadc_vxm_i8m8_tu(maskedoff, op1, op2, carryin, vl); +vint8m8_t test_vadc_vxm_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, vbool1_t v0, size_t vl) { + return __riscv_vadc_vxm_i8m8_tu(vd, vs2, rs1, v0, vl); } -vint16mf4_t test_vadc_vvm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_vvm_i16mf4_tu(maskedoff, op1, op2, carryin, vl); +vint16mf4_t test_vadc_vvm_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_vvm_i16mf4_tu(vd, vs2, vs1, v0, vl); } -vint16mf4_t test_vadc_vxm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_vxm_i16mf4_tu(maskedoff, op1, op2, carryin, vl); +vint16mf4_t test_vadc_vxm_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_vxm_i16mf4_tu(vd, vs2, rs1, v0, vl); } -vint16mf2_t test_vadc_vvm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_vvm_i16mf2_tu(maskedoff, op1, op2, carryin, vl); +vint16mf2_t test_vadc_vvm_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_vvm_i16mf2_tu(vd, vs2, vs1, v0, vl); } -vint16mf2_t test_vadc_vxm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_vxm_i16mf2_tu(maskedoff, op1, op2, carryin, vl); +vint16mf2_t test_vadc_vxm_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_vxm_i16mf2_tu(vd, vs2, rs1, v0, vl); } -vint16m1_t test_vadc_vvm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_vvm_i16m1_tu(maskedoff, op1, op2, carryin, vl); +vint16m1_t test_vadc_vvm_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_vvm_i16m1_tu(vd, vs2, vs1, v0, vl); } -vint16m1_t test_vadc_vxm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_vxm_i16m1_tu(maskedoff, op1, op2, carryin, vl); +vint16m1_t test_vadc_vxm_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_vxm_i16m1_tu(vd, vs2, rs1, v0, vl); } -vint16m2_t test_vadc_vvm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_vvm_i16m2_tu(maskedoff, op1, op2, carryin, vl); +vint16m2_t test_vadc_vvm_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_vvm_i16m2_tu(vd, vs2, vs1, v0, vl); } -vint16m2_t test_vadc_vxm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_vxm_i16m2_tu(maskedoff, op1, op2, carryin, vl); +vint16m2_t test_vadc_vxm_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_vxm_i16m2_tu(vd, vs2, rs1, v0, vl); } -vint16m4_t test_vadc_vvm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc_vvm_i16m4_tu(maskedoff, op1, op2, carryin, vl); +vint16m4_t test_vadc_vvm_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vadc_vvm_i16m4_tu(vd, vs2, vs1, v0, vl); } -vint16m4_t test_vadc_vxm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc_vxm_i16m4_tu(maskedoff, op1, op2, carryin, vl); +vint16m4_t test_vadc_vxm_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vadc_vxm_i16m4_tu(vd, vs2, rs1, v0, vl); } -vint16m8_t test_vadc_vvm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vadc_vvm_i16m8_tu(maskedoff, op1, op2, carryin, vl); +vint16m8_t test_vadc_vvm_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vadc_vvm_i16m8_tu(vd, vs2, vs1, v0, vl); } -vint16m8_t test_vadc_vxm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vadc_vxm_i16m8_tu(maskedoff, op1, op2, carryin, vl); +vint16m8_t test_vadc_vxm_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vadc_vxm_i16m8_tu(vd, vs2, rs1, v0, vl); } -vint32mf2_t test_vadc_vvm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_vvm_i32mf2_tu(maskedoff, op1, op2, carryin, vl); +vint32mf2_t test_vadc_vvm_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_vvm_i32mf2_tu(vd, vs2, vs1, v0, vl); } -vint32mf2_t test_vadc_vxm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_vxm_i32mf2_tu(maskedoff, op1, op2, carryin, vl); +vint32mf2_t test_vadc_vxm_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_vxm_i32mf2_tu(vd, vs2, rs1, v0, vl); } -vint32m1_t test_vadc_vvm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_vvm_i32m1_tu(maskedoff, op1, op2, carryin, vl); +vint32m1_t test_vadc_vvm_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_vvm_i32m1_tu(vd, vs2, vs1, v0, vl); } -vint32m1_t test_vadc_vxm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_vxm_i32m1_tu(maskedoff, op1, op2, carryin, vl); +vint32m1_t test_vadc_vxm_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_vxm_i32m1_tu(vd, vs2, rs1, v0, vl); } -vint32m2_t test_vadc_vvm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_vvm_i32m2_tu(maskedoff, op1, op2, carryin, vl); +vint32m2_t test_vadc_vvm_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_vvm_i32m2_tu(vd, vs2, vs1, v0, vl); } -vint32m2_t test_vadc_vxm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_vxm_i32m2_tu(maskedoff, op1, op2, carryin, vl); +vint32m2_t test_vadc_vxm_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_vxm_i32m2_tu(vd, vs2, rs1, v0, vl); } -vint32m4_t test_vadc_vvm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_vvm_i32m4_tu(maskedoff, op1, op2, carryin, vl); +vint32m4_t test_vadc_vvm_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_vvm_i32m4_tu(vd, vs2, vs1, v0, vl); } -vint32m4_t test_vadc_vxm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_vxm_i32m4_tu(maskedoff, op1, op2, carryin, vl); +vint32m4_t test_vadc_vxm_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_vxm_i32m4_tu(vd, vs2, rs1, v0, vl); } -vint32m8_t test_vadc_vvm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc_vvm_i32m8_tu(maskedoff, op1, op2, carryin, vl); +vint32m8_t test_vadc_vvm_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vadc_vvm_i32m8_tu(vd, vs2, vs1, v0, vl); } -vint32m8_t test_vadc_vxm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc_vxm_i32m8_tu(maskedoff, op1, op2, carryin, vl); +vint32m8_t test_vadc_vxm_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vadc_vxm_i32m8_tu(vd, vs2, rs1, v0, vl); } -vint64m1_t test_vadc_vvm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_vvm_i64m1_tu(maskedoff, op1, op2, carryin, vl); +vint64m1_t test_vadc_vvm_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_vvm_i64m1_tu(vd, vs2, vs1, v0, vl); } -vint64m1_t test_vadc_vxm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_vxm_i64m1_tu(maskedoff, op1, op2, carryin, vl); +vint64m1_t test_vadc_vxm_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_vxm_i64m1_tu(vd, vs2, rs1, v0, vl); } -vint64m2_t test_vadc_vvm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_vvm_i64m2_tu(maskedoff, op1, op2, carryin, vl); +vint64m2_t test_vadc_vvm_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_vvm_i64m2_tu(vd, vs2, vs1, v0, vl); } -vint64m2_t test_vadc_vxm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_vxm_i64m2_tu(maskedoff, op1, op2, carryin, vl); +vint64m2_t test_vadc_vxm_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_vxm_i64m2_tu(vd, vs2, rs1, v0, vl); } -vint64m4_t test_vadc_vvm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_vvm_i64m4_tu(maskedoff, op1, op2, carryin, vl); +vint64m4_t test_vadc_vvm_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_vvm_i64m4_tu(vd, vs2, vs1, v0, vl); } -vint64m4_t test_vadc_vxm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_vxm_i64m4_tu(maskedoff, op1, op2, carryin, vl); +vint64m4_t test_vadc_vxm_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_vxm_i64m4_tu(vd, vs2, rs1, v0, vl); } -vint64m8_t test_vadc_vvm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_vvm_i64m8_tu(maskedoff, op1, op2, carryin, vl); +vint64m8_t test_vadc_vvm_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_vvm_i64m8_tu(vd, vs2, vs1, v0, vl); } -vint64m8_t test_vadc_vxm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_vxm_i64m8_tu(maskedoff, op1, op2, carryin, vl); +vint64m8_t test_vadc_vxm_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_vxm_i64m8_tu(vd, vs2, rs1, v0, vl); } -vuint8mf8_t test_vadc_vvm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_vvm_u8mf8_tu(maskedoff, op1, op2, carryin, vl); +vuint8mf8_t test_vadc_vvm_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_vvm_u8mf8_tu(vd, vs2, vs1, v0, vl); } -vuint8mf8_t test_vadc_vxm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_vxm_u8mf8_tu(maskedoff, op1, op2, carryin, vl); +vuint8mf8_t test_vadc_vxm_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_vxm_u8mf8_tu(vd, vs2, rs1, v0, vl); } -vuint8mf4_t test_vadc_vvm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_vvm_u8mf4_tu(maskedoff, op1, op2, carryin, vl); +vuint8mf4_t test_vadc_vvm_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_vvm_u8mf4_tu(vd, vs2, vs1, v0, vl); } -vuint8mf4_t test_vadc_vxm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_vxm_u8mf4_tu(maskedoff, op1, op2, carryin, vl); +vuint8mf4_t test_vadc_vxm_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_vxm_u8mf4_tu(vd, vs2, rs1, v0, vl); } -vuint8mf2_t test_vadc_vvm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_vvm_u8mf2_tu(maskedoff, op1, op2, carryin, vl); +vuint8mf2_t test_vadc_vvm_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_vvm_u8mf2_tu(vd, vs2, vs1, v0, vl); } -vuint8mf2_t test_vadc_vxm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_vxm_u8mf2_tu(maskedoff, op1, op2, carryin, vl); +vuint8mf2_t test_vadc_vxm_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_vxm_u8mf2_tu(vd, vs2, rs1, v0, vl); } -vuint8m1_t test_vadc_vvm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_vvm_u8m1_tu(maskedoff, op1, op2, carryin, vl); +vuint8m1_t test_vadc_vvm_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_vvm_u8m1_tu(vd, vs2, vs1, v0, vl); } -vuint8m1_t test_vadc_vxm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_vxm_u8m1_tu(maskedoff, op1, op2, carryin, vl); +vuint8m1_t test_vadc_vxm_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_vxm_u8m1_tu(vd, vs2, rs1, v0, vl); } -vuint8m2_t test_vadc_vvm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc_vvm_u8m2_tu(maskedoff, op1, op2, carryin, vl); +vuint8m2_t test_vadc_vvm_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vadc_vvm_u8m2_tu(vd, vs2, vs1, v0, vl); } -vuint8m2_t test_vadc_vxm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc_vxm_u8m2_tu(maskedoff, op1, op2, carryin, vl); +vuint8m2_t test_vadc_vxm_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vadc_vxm_u8m2_tu(vd, vs2, rs1, v0, vl); } -vuint8m4_t test_vadc_vvm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vadc_vvm_u8m4_tu(maskedoff, op1, op2, carryin, vl); +vuint8m4_t test_vadc_vvm_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vadc_vvm_u8m4_tu(vd, vs2, vs1, v0, vl); } -vuint8m4_t test_vadc_vxm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vadc_vxm_u8m4_tu(maskedoff, op1, op2, carryin, vl); +vuint8m4_t test_vadc_vxm_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vadc_vxm_u8m4_tu(vd, vs2, rs1, v0, vl); } -vuint8m8_t test_vadc_vvm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, vbool1_t carryin, size_t vl) { - return __riscv_vadc_vvm_u8m8_tu(maskedoff, op1, op2, carryin, vl); +vuint8m8_t test_vadc_vvm_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, vbool1_t v0, size_t vl) { + return __riscv_vadc_vvm_u8m8_tu(vd, vs2, vs1, v0, vl); } -vuint8m8_t test_vadc_vxm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, vbool1_t carryin, size_t vl) { - return __riscv_vadc_vxm_u8m8_tu(maskedoff, op1, op2, carryin, vl); +vuint8m8_t test_vadc_vxm_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, vbool1_t v0, size_t vl) { + return __riscv_vadc_vxm_u8m8_tu(vd, vs2, rs1, v0, vl); } -vuint16mf4_t test_vadc_vvm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_vvm_u16mf4_tu(maskedoff, op1, op2, carryin, vl); +vuint16mf4_t test_vadc_vvm_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_vvm_u16mf4_tu(vd, vs2, vs1, v0, vl); } -vuint16mf4_t test_vadc_vxm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_vxm_u16mf4_tu(maskedoff, op1, op2, carryin, vl); +vuint16mf4_t test_vadc_vxm_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_vxm_u16mf4_tu(vd, vs2, rs1, v0, vl); } -vuint16mf2_t test_vadc_vvm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_vvm_u16mf2_tu(maskedoff, op1, op2, carryin, vl); +vuint16mf2_t test_vadc_vvm_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_vvm_u16mf2_tu(vd, vs2, vs1, v0, vl); } -vuint16mf2_t test_vadc_vxm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_vxm_u16mf2_tu(maskedoff, op1, op2, carryin, vl); +vuint16mf2_t test_vadc_vxm_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_vxm_u16mf2_tu(vd, vs2, rs1, v0, vl); } -vuint16m1_t test_vadc_vvm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_vvm_u16m1_tu(maskedoff, op1, op2, carryin, vl); +vuint16m1_t test_vadc_vvm_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_vvm_u16m1_tu(vd, vs2, vs1, v0, vl); } -vuint16m1_t test_vadc_vxm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_vxm_u16m1_tu(maskedoff, op1, op2, carryin, vl); +vuint16m1_t test_vadc_vxm_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_vxm_u16m1_tu(vd, vs2, rs1, v0, vl); } -vuint16m2_t test_vadc_vvm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_vvm_u16m2_tu(maskedoff, op1, op2, carryin, vl); +vuint16m2_t test_vadc_vvm_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_vvm_u16m2_tu(vd, vs2, vs1, v0, vl); } -vuint16m2_t test_vadc_vxm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_vxm_u16m2_tu(maskedoff, op1, op2, carryin, vl); +vuint16m2_t test_vadc_vxm_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_vxm_u16m2_tu(vd, vs2, rs1, v0, vl); } -vuint16m4_t test_vadc_vvm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc_vvm_u16m4_tu(maskedoff, op1, op2, carryin, vl); +vuint16m4_t test_vadc_vvm_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vadc_vvm_u16m4_tu(vd, vs2, vs1, v0, vl); } -vuint16m4_t test_vadc_vxm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc_vxm_u16m4_tu(maskedoff, op1, op2, carryin, vl); +vuint16m4_t test_vadc_vxm_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vadc_vxm_u16m4_tu(vd, vs2, rs1, v0, vl); } -vuint16m8_t test_vadc_vvm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vadc_vvm_u16m8_tu(maskedoff, op1, op2, carryin, vl); +vuint16m8_t test_vadc_vvm_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vadc_vvm_u16m8_tu(vd, vs2, vs1, v0, vl); } -vuint16m8_t test_vadc_vxm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vadc_vxm_u16m8_tu(maskedoff, op1, op2, carryin, vl); +vuint16m8_t test_vadc_vxm_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vadc_vxm_u16m8_tu(vd, vs2, rs1, v0, vl); } -vuint32mf2_t test_vadc_vvm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_vvm_u32mf2_tu(maskedoff, op1, op2, carryin, vl); +vuint32mf2_t test_vadc_vvm_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_vvm_u32mf2_tu(vd, vs2, vs1, v0, vl); } -vuint32mf2_t test_vadc_vxm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_vxm_u32mf2_tu(maskedoff, op1, op2, carryin, vl); +vuint32mf2_t test_vadc_vxm_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_vxm_u32mf2_tu(vd, vs2, rs1, v0, vl); } -vuint32m1_t test_vadc_vvm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_vvm_u32m1_tu(maskedoff, op1, op2, carryin, vl); +vuint32m1_t test_vadc_vvm_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_vvm_u32m1_tu(vd, vs2, vs1, v0, vl); } -vuint32m1_t test_vadc_vxm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_vxm_u32m1_tu(maskedoff, op1, op2, carryin, vl); +vuint32m1_t test_vadc_vxm_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_vxm_u32m1_tu(vd, vs2, rs1, v0, vl); } -vuint32m2_t test_vadc_vvm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_vvm_u32m2_tu(maskedoff, op1, op2, carryin, vl); +vuint32m2_t test_vadc_vvm_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_vvm_u32m2_tu(vd, vs2, vs1, v0, vl); } -vuint32m2_t test_vadc_vxm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_vxm_u32m2_tu(maskedoff, op1, op2, carryin, vl); +vuint32m2_t test_vadc_vxm_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_vxm_u32m2_tu(vd, vs2, rs1, v0, vl); } -vuint32m4_t test_vadc_vvm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_vvm_u32m4_tu(maskedoff, op1, op2, carryin, vl); +vuint32m4_t test_vadc_vvm_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_vvm_u32m4_tu(vd, vs2, vs1, v0, vl); } -vuint32m4_t test_vadc_vxm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_vxm_u32m4_tu(maskedoff, op1, op2, carryin, vl); +vuint32m4_t test_vadc_vxm_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_vxm_u32m4_tu(vd, vs2, rs1, v0, vl); } -vuint32m8_t test_vadc_vvm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc_vvm_u32m8_tu(maskedoff, op1, op2, carryin, vl); +vuint32m8_t test_vadc_vvm_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vadc_vvm_u32m8_tu(vd, vs2, vs1, v0, vl); } -vuint32m8_t test_vadc_vxm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc_vxm_u32m8_tu(maskedoff, op1, op2, carryin, vl); +vuint32m8_t test_vadc_vxm_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vadc_vxm_u32m8_tu(vd, vs2, rs1, v0, vl); } -vuint64m1_t test_vadc_vvm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_vvm_u64m1_tu(maskedoff, op1, op2, carryin, vl); +vuint64m1_t test_vadc_vvm_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_vvm_u64m1_tu(vd, vs2, vs1, v0, vl); } -vuint64m1_t test_vadc_vxm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_vxm_u64m1_tu(maskedoff, op1, op2, carryin, vl); +vuint64m1_t test_vadc_vxm_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_vxm_u64m1_tu(vd, vs2, rs1, v0, vl); } -vuint64m2_t test_vadc_vvm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_vvm_u64m2_tu(maskedoff, op1, op2, carryin, vl); +vuint64m2_t test_vadc_vvm_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_vvm_u64m2_tu(vd, vs2, vs1, v0, vl); } -vuint64m2_t test_vadc_vxm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_vxm_u64m2_tu(maskedoff, op1, op2, carryin, vl); +vuint64m2_t test_vadc_vxm_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_vxm_u64m2_tu(vd, vs2, rs1, v0, vl); } -vuint64m4_t test_vadc_vvm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_vvm_u64m4_tu(maskedoff, op1, op2, carryin, vl); +vuint64m4_t test_vadc_vvm_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_vvm_u64m4_tu(vd, vs2, vs1, v0, vl); } -vuint64m4_t test_vadc_vxm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_vxm_u64m4_tu(maskedoff, op1, op2, carryin, vl); +vuint64m4_t test_vadc_vxm_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_vxm_u64m4_tu(vd, vs2, rs1, v0, vl); } -vuint64m8_t test_vadc_vvm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_vvm_u64m8_tu(maskedoff, op1, op2, carryin, vl); +vuint64m8_t test_vadc_vvm_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_vvm_u64m8_tu(vd, vs2, vs1, v0, vl); } -vuint64m8_t test_vadc_vxm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_vxm_u64m8_tu(maskedoff, op1, op2, carryin, vl); +vuint64m8_t test_vadc_vxm_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_vxm_u64m8_tu(vd, vs2, rs1, v0, vl); } /* { dg-final { scan-assembler-times {vadc\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vadd.c b/auto-generated/policy_funcs/gnu-api-tests/vadd.c index 22571e874..9fa628c8f 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vadd.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vadd.c @@ -6,1412 +6,1412 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vadd_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vadd_vv_i8mf8_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vadd_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vadd_vv_i8mf8_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vadd_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_vx_i8mf8_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vadd_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_vx_i8mf8_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vadd_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vadd_vv_i8mf4_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vadd_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vadd_vv_i8mf4_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vadd_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_vx_i8mf4_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vadd_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_vx_i8mf4_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vadd_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vadd_vv_i8mf2_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vadd_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vadd_vv_i8mf2_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vadd_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_vx_i8mf2_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vadd_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_vx_i8mf2_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vadd_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vadd_vv_i8m1_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vadd_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vadd_vv_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vadd_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_vx_i8m1_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vadd_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_vx_i8m1_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vadd_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vadd_vv_i8m2_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vadd_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vadd_vv_i8m2_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vadd_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_vx_i8m2_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vadd_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_vx_i8m2_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vadd_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vadd_vv_i8m4_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vadd_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vadd_vv_i8m4_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vadd_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_vx_i8m4_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vadd_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_vx_i8m4_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vadd_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vadd_vv_i8m8_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vadd_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vadd_vv_i8m8_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vadd_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_vx_i8m8_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vadd_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_vx_i8m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vadd_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vadd_vv_i16mf4_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vadd_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vadd_vv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vadd_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_vx_i16mf4_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vadd_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vadd_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vadd_vv_i16mf2_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vadd_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vadd_vv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vadd_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_vx_i16mf2_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vadd_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vadd_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vadd_vv_i16m1_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vadd_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vadd_vv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vadd_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_vx_i16m1_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vadd_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vadd_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vadd_vv_i16m2_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vadd_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vadd_vv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vadd_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_vx_i16m2_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vadd_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vadd_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vadd_vv_i16m4_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vadd_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vadd_vv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vadd_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_vx_i16m4_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vadd_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vadd_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vadd_vv_i16m8_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vadd_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vadd_vv_i16m8_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vadd_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_vx_i16m8_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vadd_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vadd_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vadd_vv_i32mf2_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vadd_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vadd_vv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vadd_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_vx_i32mf2_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vadd_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vadd_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vadd_vv_i32m1_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vadd_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vadd_vv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vadd_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_vx_i32m1_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vadd_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vadd_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vadd_vv_i32m2_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vadd_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vadd_vv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vadd_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_vx_i32m2_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vadd_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vadd_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vadd_vv_i32m4_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vadd_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vadd_vv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vadd_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_vx_i32m4_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vadd_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vadd_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vadd_vv_i32m8_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vadd_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vadd_vv_i32m8_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vadd_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_vx_i32m8_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vadd_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vadd_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vadd_vv_i64m1_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vadd_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vadd_vv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vadd_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vadd_vx_i64m1_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vadd_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vadd_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vadd_vv_i64m2_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vadd_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vadd_vv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vadd_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vadd_vx_i64m2_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vadd_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vadd_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vadd_vv_i64m4_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vadd_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vadd_vv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vadd_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vadd_vx_i64m4_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vadd_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vadd_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vadd_vv_i64m8_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vadd_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vadd_vv_i64m8_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vadd_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vadd_vx_i64m8_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vadd_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd_vx_i64m8_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vadd_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vadd_vv_u8mf8_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vadd_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vadd_vv_u8mf8_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vadd_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_vx_u8mf8_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vadd_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_vx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vadd_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vadd_vv_u8mf4_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vadd_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vadd_vv_u8mf4_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vadd_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_vx_u8mf4_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vadd_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_vx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vadd_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vadd_vv_u8mf2_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vadd_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vadd_vv_u8mf2_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vadd_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_vx_u8mf2_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vadd_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_vx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vadd_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vadd_vv_u8m1_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vadd_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vadd_vv_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vadd_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_vx_u8m1_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vadd_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_vx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vadd_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vadd_vv_u8m2_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vadd_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vadd_vv_u8m2_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vadd_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_vx_u8m2_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vadd_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_vx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vadd_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vadd_vv_u8m4_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vadd_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vadd_vv_u8m4_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vadd_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_vx_u8m4_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vadd_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_vx_u8m4_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vadd_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vadd_vv_u8m8_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vadd_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vadd_vv_u8m8_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vadd_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_vx_u8m8_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vadd_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_vx_u8m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vadd_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vadd_vv_u16mf4_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vadd_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vadd_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vadd_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_vx_u16mf4_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vadd_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vadd_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vadd_vv_u16mf2_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vadd_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vadd_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vadd_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_vx_u16mf2_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vadd_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vadd_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vadd_vv_u16m1_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vadd_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vadd_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vadd_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_vx_u16m1_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vadd_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vadd_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vadd_vv_u16m2_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vadd_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vadd_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vadd_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_vx_u16m2_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vadd_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vadd_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vadd_vv_u16m4_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vadd_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vadd_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vadd_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_vx_u16m4_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vadd_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vadd_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vadd_vv_u16m8_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vadd_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vadd_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vadd_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_vx_u16m8_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vadd_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vadd_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vadd_vv_u32mf2_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vadd_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vadd_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vadd_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_vx_u32mf2_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vadd_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vadd_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vadd_vv_u32m1_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vadd_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vadd_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vadd_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_vx_u32m1_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vadd_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vadd_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vadd_vv_u32m2_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vadd_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vadd_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vadd_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_vx_u32m2_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vadd_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vadd_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vadd_vv_u32m4_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vadd_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vadd_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vadd_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_vx_u32m4_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vadd_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vadd_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vadd_vv_u32m8_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vadd_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vadd_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vadd_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_vx_u32m8_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vadd_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vadd_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vadd_vv_u64m1_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vadd_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vadd_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vadd_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd_vx_u64m1_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vadd_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vadd_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vadd_vv_u64m2_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vadd_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vadd_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vadd_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd_vx_u64m2_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vadd_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vadd_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vadd_vv_u64m4_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vadd_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vadd_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vadd_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd_vx_u64m4_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vadd_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vadd_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vadd_vv_u64m8_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vadd_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vadd_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vadd_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd_vx_u64m8_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vadd_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd_vx_u64m8_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vadd_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vadd_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vadd_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vadd_vv_i8mf8_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vadd_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vadd_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_vx_i8mf8_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vadd_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vadd_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vadd_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vadd_vv_i8mf4_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vadd_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vadd_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_vx_i8mf4_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vadd_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vadd_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vadd_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vadd_vv_i8mf2_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vadd_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vadd_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_vx_i8mf2_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vadd_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vadd_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vadd_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vadd_vv_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vadd_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vadd_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_vx_i8m1_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vadd_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vadd_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vadd_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vadd_vv_i8m2_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vadd_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vadd_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_vx_i8m2_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vadd_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vadd_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vadd_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vadd_vv_i8m4_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vadd_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vadd_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_vx_i8m4_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vadd_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vadd_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vadd_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vadd_vv_i8m8_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vadd_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vadd_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_vx_i8m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vadd_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vadd_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vadd_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vadd_vv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vadd_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vadd_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vadd_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vadd_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vadd_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vadd_vv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vadd_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vadd_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vadd_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vadd_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vadd_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vadd_vv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vadd_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vadd_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vadd_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vadd_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vadd_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vadd_vv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vadd_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vadd_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vadd_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vadd_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vadd_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vadd_vv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vadd_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vadd_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vadd_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vadd_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vadd_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vadd_vv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vadd_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vadd_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vadd_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vadd_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vadd_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vadd_vv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vadd_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vadd_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vadd_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vadd_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vadd_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vadd_vv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vadd_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vadd_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vadd_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vadd_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vadd_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vadd_vv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vadd_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vadd_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vadd_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vadd_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vadd_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vadd_vv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vadd_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vadd_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vadd_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vadd_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vadd_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vadd_vv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vadd_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vadd_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vadd_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vadd_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vadd_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vadd_vv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vadd_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vadd_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vadd_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vadd_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vadd_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vadd_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vadd_vv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vadd_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vadd_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vadd_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vadd_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vadd_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vadd_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vadd_vv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vadd_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vadd_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vadd_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vadd_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vadd_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vadd_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vadd_vv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vadd_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vadd_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vadd_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vadd_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vadd_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vadd_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vadd_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vadd_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vadd_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vadd_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vadd_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vadd_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vadd_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vadd_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vadd_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vadd_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vadd_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vadd_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vadd_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vadd_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vadd_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vadd_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vadd_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vadd_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vadd_vv_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vadd_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vadd_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_vx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vadd_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vadd_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vadd_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vadd_vv_u8m2_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vadd_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vadd_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_vx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vadd_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vadd_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vadd_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vadd_vv_u8m4_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vadd_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vadd_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_vx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vadd_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vadd_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vadd_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vadd_vv_u8m8_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vadd_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vadd_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_vx_u8m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vadd_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vadd_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vadd_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vadd_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vadd_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vadd_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vadd_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vadd_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vadd_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vadd_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vadd_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vadd_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vadd_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vadd_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vadd_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vadd_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vadd_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vadd_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vadd_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vadd_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vadd_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vadd_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vadd_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vadd_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vadd_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vadd_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vadd_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vadd_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vadd_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vadd_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vadd_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vadd_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vadd_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vadd_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vadd_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vadd_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vadd_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vadd_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vadd_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vadd_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vadd_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vadd_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vadd_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vadd_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vadd_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vadd_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vadd_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vadd_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vadd_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vadd_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vadd_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vadd_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vadd_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vadd_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vadd_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vadd_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vadd_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vadd_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vadd_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vadd_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vadd_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vadd_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vadd_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vadd_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vadd_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vadd_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vadd_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vadd_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vadd_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vadd_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vadd_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vadd_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vadd_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vadd_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vadd_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vadd_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vadd_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vadd_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vadd_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vadd_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vadd_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vadd_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vadd_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vadd_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vadd_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vadd_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vadd_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vadd_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vadd_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vadd_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vadd_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vadd_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vadd_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vadd_vv_i8mf8_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vadd_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vadd_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_vx_i8mf8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vadd_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vadd_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vadd_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vadd_vv_i8mf4_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vadd_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vadd_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_vx_i8mf4_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vadd_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vadd_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vadd_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vadd_vv_i8mf2_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vadd_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vadd_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_vx_i8mf2_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vadd_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vadd_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vadd_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vadd_vv_i8m1_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vadd_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vadd_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_vx_i8m1_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vadd_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vadd_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vadd_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vadd_vv_i8m2_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vadd_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vadd_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_vx_i8m2_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vadd_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vadd_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vadd_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vadd_vv_i8m4_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vadd_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vadd_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_vx_i8m4_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vadd_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vadd_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vadd_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vadd_vv_i8m8_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vadd_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vadd_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_vx_i8m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vadd_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vadd_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vadd_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vadd_vv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vadd_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vadd_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vadd_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vadd_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vadd_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vadd_vv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vadd_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vadd_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vadd_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vadd_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vadd_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vadd_vv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vadd_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vadd_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vadd_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vadd_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vadd_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vadd_vv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vadd_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vadd_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vadd_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vadd_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vadd_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vadd_vv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vadd_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vadd_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vadd_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vadd_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vadd_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vadd_vv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vadd_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vadd_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vadd_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vadd_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vadd_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vadd_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vadd_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vadd_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vadd_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vadd_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vadd_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vadd_vv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vadd_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vadd_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vadd_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vadd_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vadd_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vadd_vv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vadd_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vadd_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vadd_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vadd_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vadd_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vadd_vv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vadd_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vadd_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vadd_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vadd_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vadd_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vadd_vv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vadd_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vadd_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vadd_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vadd_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vadd_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vadd_vv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vadd_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vadd_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vadd_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vadd_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vadd_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vadd_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vadd_vv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vadd_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vadd_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vadd_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vadd_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vadd_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vadd_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vadd_vv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vadd_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vadd_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vadd_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vadd_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vadd_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vadd_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vadd_vv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vadd_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vadd_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vadd_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vadd_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vadd_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vadd_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vadd_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vadd_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vadd_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vadd_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vadd_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vadd_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vadd_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vadd_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vadd_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vadd_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vadd_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vadd_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vadd_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vadd_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vadd_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vadd_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vadd_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vadd_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vadd_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vadd_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vadd_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vadd_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vadd_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vadd_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vadd_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vadd_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vadd_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vadd_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vadd_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vadd_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vadd_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vadd_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vadd_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vadd_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vadd_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vadd_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vadd_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vadd_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vadd_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vadd_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vadd_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vadd_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vadd_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vadd_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vadd_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vadd_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vadd_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vadd_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vadd_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vadd_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vadd_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vadd_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vadd_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vadd_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vadd_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vadd_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vadd_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vadd_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vadd_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vadd_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vadd_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vadd_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vadd_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vadd_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vadd_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vadd_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vadd_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vadd_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vadd_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vadd_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vadd_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vadd_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vadd_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vadd_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vadd_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vadd_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vadd_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vadd_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vadd_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vadd_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vadd_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vadd_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vadd_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vadd_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vadd_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vadd_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vadd_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vadd_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vadd_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vadd_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vadd_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vadd_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vadd_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vadd_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vadd_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vadd_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vadd_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vadd_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vadd_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vadd_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vadd_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vadd_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vadd_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vadd_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vadd_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vadd_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vadd_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vadd_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vadd_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vadd_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vadd_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vadd_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vadd_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vadd_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vadd_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vadd_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vadd_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vadd_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vadd_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vadd_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vadd_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vadd_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vadd_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vadd_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vadd_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vadd_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vadd_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vadd_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vadd_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vadd_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vadd_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vadd_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vadd_vv_i8mf8_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vadd_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vadd_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_vx_i8mf8_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vadd_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vadd_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vadd_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vadd_vv_i8mf4_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vadd_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vadd_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_vx_i8mf4_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vadd_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vadd_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vadd_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vadd_vv_i8mf2_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vadd_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vadd_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_vx_i8mf2_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vadd_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vadd_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vadd_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vadd_vv_i8m1_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vadd_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vadd_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_vx_i8m1_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vadd_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vadd_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vadd_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vadd_vv_i8m2_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vadd_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vadd_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_vx_i8m2_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vadd_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vadd_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vadd_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vadd_vv_i8m4_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vadd_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vadd_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_vx_i8m4_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vadd_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vadd_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vadd_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vadd_vv_i8m8_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vadd_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vadd_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_vx_i8m8_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vadd_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vadd_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vadd_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vadd_vv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vadd_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vadd_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vadd_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vadd_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vadd_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vadd_vv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vadd_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vadd_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vadd_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vadd_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vadd_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vadd_vv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vadd_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vadd_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vadd_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vadd_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vadd_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vadd_vv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vadd_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vadd_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vadd_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vadd_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vadd_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vadd_vv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vadd_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vadd_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vadd_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vadd_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vadd_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vadd_vv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vadd_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vadd_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vadd_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vadd_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vadd_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vadd_vv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vadd_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vadd_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vadd_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vadd_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vadd_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vadd_vv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vadd_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vadd_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vadd_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vadd_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vadd_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vadd_vv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vadd_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vadd_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vadd_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vadd_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vadd_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vadd_vv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vadd_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vadd_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vadd_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vadd_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vadd_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vadd_vv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vadd_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vadd_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vadd_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vadd_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vadd_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vadd_vv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vadd_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vadd_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vadd_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vadd_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vadd_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vadd_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vadd_vv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vadd_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vadd_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vadd_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vadd_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vadd_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vadd_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vadd_vv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vadd_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vadd_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vadd_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vadd_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vadd_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vadd_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vadd_vv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vadd_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vadd_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vadd_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vadd_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vadd_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vadd_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vadd_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vadd_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vadd_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vadd_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vadd_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vadd_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vadd_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vadd_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vadd_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vadd_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vadd_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vadd_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vadd_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vadd_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vadd_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vadd_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vadd_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vadd_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vadd_vv_u8m1_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vadd_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vadd_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_vx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vadd_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vadd_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vadd_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vadd_vv_u8m2_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vadd_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vadd_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_vx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vadd_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vadd_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vadd_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vadd_vv_u8m4_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vadd_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vadd_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_vx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vadd_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vadd_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vadd_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vadd_vv_u8m8_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vadd_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vadd_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_vx_u8m8_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vadd_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vadd_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vadd_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vadd_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vadd_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vadd_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vadd_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vadd_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vadd_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vadd_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vadd_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vadd_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vadd_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vadd_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vadd_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vadd_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vadd_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vadd_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vadd_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vadd_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vadd_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vadd_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vadd_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vadd_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vadd_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vadd_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vadd_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vadd_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vadd_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vadd_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vadd_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vadd_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vadd_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vadd_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vadd_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vadd_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vadd_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vadd_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vadd_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vadd_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vadd_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vadd_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vadd_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vadd_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vadd_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vadd_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vadd_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vadd_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vadd_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vadd_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vadd_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vadd_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vadd_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vadd_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vadd_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vadd_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vadd_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vadd_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vadd_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vadd_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vadd_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vadd_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vadd_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vadd_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vadd_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vadd_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vadd_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vadd_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vadd_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vadd_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vadd_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vadd_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vadd_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vadd_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vadd_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vadd_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vadd_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vadd_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vadd_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vadd_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vadd_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vadd_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vadd_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vadd_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vadd_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vadd_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vadd_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vadd_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vadd_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vadd_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vadd\.[ivxfswum.]+\s+} 352 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vand.c b/auto-generated/policy_funcs/gnu-api-tests/vand.c index c1e0c6526..a64a799e0 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vand.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vand.c @@ -6,1412 +6,1412 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vand_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vand_vv_i8mf8_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vand_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vand_vv_i8mf8_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vand_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vand_vx_i8mf8_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vand_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_vx_i8mf8_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vand_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vand_vv_i8mf4_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vand_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vand_vv_i8mf4_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vand_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vand_vx_i8mf4_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vand_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_vx_i8mf4_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vand_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vand_vv_i8mf2_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vand_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vand_vv_i8mf2_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vand_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vand_vx_i8mf2_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vand_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_vx_i8mf2_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vand_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vand_vv_i8m1_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vand_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vand_vv_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vand_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vand_vx_i8m1_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vand_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_vx_i8m1_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vand_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vand_vv_i8m2_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vand_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vand_vv_i8m2_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vand_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vand_vx_i8m2_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vand_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_vx_i8m2_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vand_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vand_vv_i8m4_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vand_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vand_vv_i8m4_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vand_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vand_vx_i8m4_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vand_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_vx_i8m4_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vand_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vand_vv_i8m8_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vand_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vand_vv_i8m8_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vand_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vand_vx_i8m8_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vand_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_vx_i8m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vand_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vand_vv_i16mf4_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vand_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vand_vv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vand_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vand_vx_i16mf4_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vand_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vand_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vand_vv_i16mf2_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vand_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vand_vv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vand_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vand_vx_i16mf2_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vand_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vand_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vand_vv_i16m1_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vand_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vand_vv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vand_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vand_vx_i16m1_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vand_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vand_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vand_vv_i16m2_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vand_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vand_vv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vand_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vand_vx_i16m2_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vand_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vand_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vand_vv_i16m4_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vand_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vand_vv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vand_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vand_vx_i16m4_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vand_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vand_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vand_vv_i16m8_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vand_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vand_vv_i16m8_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vand_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vand_vx_i16m8_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vand_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vand_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vand_vv_i32mf2_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vand_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vand_vv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vand_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vand_vx_i32mf2_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vand_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vand_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vand_vv_i32m1_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vand_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vand_vv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vand_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vand_vx_i32m1_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vand_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vand_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vand_vv_i32m2_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vand_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vand_vv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vand_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vand_vx_i32m2_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vand_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vand_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vand_vv_i32m4_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vand_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vand_vv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vand_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vand_vx_i32m4_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vand_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vand_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vand_vv_i32m8_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vand_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vand_vv_i32m8_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vand_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vand_vx_i32m8_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vand_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vand_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vand_vv_i64m1_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vand_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vand_vv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vand_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vand_vx_i64m1_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vand_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vand_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vand_vv_i64m2_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vand_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vand_vv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vand_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vand_vx_i64m2_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vand_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vand_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vand_vv_i64m4_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vand_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vand_vv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vand_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vand_vx_i64m4_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vand_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vand_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vand_vv_i64m8_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vand_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vand_vv_i64m8_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vand_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vand_vx_i64m8_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vand_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand_vx_i64m8_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vand_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vand_vv_u8mf8_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vand_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vand_vv_u8mf8_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vand_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_vx_u8mf8_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vand_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_vx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vand_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vand_vv_u8mf4_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vand_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vand_vv_u8mf4_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vand_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_vx_u8mf4_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vand_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_vx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vand_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vand_vv_u8mf2_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vand_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vand_vv_u8mf2_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vand_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_vx_u8mf2_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vand_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_vx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vand_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vand_vv_u8m1_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vand_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vand_vv_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vand_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_vx_u8m1_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vand_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_vx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vand_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vand_vv_u8m2_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vand_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vand_vv_u8m2_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vand_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_vx_u8m2_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vand_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_vx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vand_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vand_vv_u8m4_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vand_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vand_vv_u8m4_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vand_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_vx_u8m4_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vand_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_vx_u8m4_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vand_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vand_vv_u8m8_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vand_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vand_vv_u8m8_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vand_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_vx_u8m8_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vand_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_vx_u8m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vand_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vand_vv_u16mf4_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vand_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vand_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vand_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_vx_u16mf4_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vand_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vand_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vand_vv_u16mf2_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vand_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vand_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vand_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_vx_u16mf2_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vand_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vand_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vand_vv_u16m1_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vand_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vand_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vand_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_vx_u16m1_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vand_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vand_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vand_vv_u16m2_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vand_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vand_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vand_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_vx_u16m2_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vand_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vand_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vand_vv_u16m4_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vand_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vand_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vand_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_vx_u16m4_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vand_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vand_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vand_vv_u16m8_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vand_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vand_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vand_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_vx_u16m8_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vand_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vand_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vand_vv_u32mf2_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vand_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vand_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vand_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_vx_u32mf2_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vand_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vand_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vand_vv_u32m1_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vand_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vand_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vand_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_vx_u32m1_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vand_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vand_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vand_vv_u32m2_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vand_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vand_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vand_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_vx_u32m2_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vand_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vand_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vand_vv_u32m4_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vand_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vand_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vand_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_vx_u32m4_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vand_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vand_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vand_vv_u32m8_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vand_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vand_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vand_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_vx_u32m8_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vand_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vand_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vand_vv_u64m1_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vand_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vand_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vand_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vand_vx_u64m1_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vand_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vand_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vand_vv_u64m2_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vand_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vand_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vand_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vand_vx_u64m2_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vand_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vand_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vand_vv_u64m4_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vand_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vand_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vand_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vand_vx_u64m4_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vand_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vand_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vand_vv_u64m8_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vand_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vand_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vand_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vand_vx_u64m8_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vand_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand_vx_u64m8_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vand_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vand_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vand_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vand_vv_i8mf8_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vand_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vand_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vand_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_vx_i8mf8_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vand_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vand_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vand_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vand_vv_i8mf4_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vand_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vand_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vand_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_vx_i8mf4_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vand_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vand_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vand_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vand_vv_i8mf2_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vand_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vand_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vand_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_vx_i8mf2_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vand_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vand_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vand_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vand_vv_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vand_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vand_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vand_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_vx_i8m1_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vand_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vand_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vand_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vand_vv_i8m2_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vand_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vand_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vand_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_vx_i8m2_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vand_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vand_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vand_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vand_vv_i8m4_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vand_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vand_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vand_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_vx_i8m4_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vand_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vand_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vand_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vand_vv_i8m8_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vand_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vand_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vand_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_vx_i8m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vand_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vand_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vand_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vand_vv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vand_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vand_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vand_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vand_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vand_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vand_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vand_vv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vand_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vand_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vand_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vand_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vand_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vand_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vand_vv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vand_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vand_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vand_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vand_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vand_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vand_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vand_vv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vand_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vand_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vand_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vand_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vand_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vand_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vand_vv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vand_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vand_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vand_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vand_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vand_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vand_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vand_vv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vand_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vand_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vand_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vand_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vand_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vand_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vand_vv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vand_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vand_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vand_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vand_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vand_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vand_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vand_vv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vand_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vand_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vand_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vand_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vand_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vand_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vand_vv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vand_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vand_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vand_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vand_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vand_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vand_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vand_vv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vand_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vand_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vand_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vand_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vand_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vand_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vand_vv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vand_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vand_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vand_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vand_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vand_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vand_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vand_vv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vand_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vand_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vand_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vand_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vand_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vand_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vand_vv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vand_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vand_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vand_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vand_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vand_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vand_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vand_vv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vand_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vand_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vand_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vand_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vand_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vand_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vand_vv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vand_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vand_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vand_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vand_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vand_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vand_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vand_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vand_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vand_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vand_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vand_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vand_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vand_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vand_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vand_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vand_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vand_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vand_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vand_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vand_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vand_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vand_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vand_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vand_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vand_vv_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vand_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vand_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_vx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vand_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vand_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vand_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vand_vv_u8m2_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vand_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vand_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_vx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vand_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vand_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vand_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vand_vv_u8m4_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vand_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vand_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_vx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vand_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vand_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vand_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vand_vv_u8m8_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vand_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vand_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_vx_u8m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vand_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vand_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vand_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vand_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vand_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vand_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vand_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vand_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vand_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vand_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vand_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vand_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vand_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vand_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vand_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vand_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vand_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vand_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vand_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vand_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vand_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vand_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vand_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vand_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vand_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vand_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vand_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vand_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vand_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vand_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vand_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vand_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vand_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vand_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vand_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vand_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vand_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vand_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vand_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vand_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vand_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vand_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vand_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vand_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vand_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vand_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vand_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vand_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vand_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vand_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vand_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vand_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vand_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vand_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vand_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vand_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vand_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vand_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vand_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vand_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vand_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vand_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vand_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vand_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vand_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vand_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vand_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vand_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vand_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vand_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vand_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vand_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vand_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vand_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vand_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vand_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vand_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vand_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vand_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vand_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vand_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vand_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vand_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vand_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vand_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vand_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vand_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vand_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vand_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vand_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vand_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vand_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vand_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vand_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vand_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vand_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vand_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vand_vv_i8mf8_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vand_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vand_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vand_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_vx_i8mf8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vand_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vand_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vand_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vand_vv_i8mf4_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vand_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vand_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vand_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_vx_i8mf4_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vand_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vand_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vand_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vand_vv_i8mf2_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vand_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vand_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vand_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_vx_i8mf2_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vand_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vand_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vand_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vand_vv_i8m1_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vand_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vand_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vand_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_vx_i8m1_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vand_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vand_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vand_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vand_vv_i8m2_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vand_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vand_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vand_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_vx_i8m2_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vand_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vand_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vand_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vand_vv_i8m4_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vand_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vand_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vand_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_vx_i8m4_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vand_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vand_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vand_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vand_vv_i8m8_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vand_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vand_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vand_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_vx_i8m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vand_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vand_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vand_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vand_vv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vand_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vand_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vand_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vand_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vand_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vand_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vand_vv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vand_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vand_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vand_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vand_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vand_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vand_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vand_vv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vand_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vand_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vand_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vand_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vand_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vand_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vand_vv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vand_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vand_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vand_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vand_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vand_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vand_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vand_vv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vand_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vand_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vand_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vand_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vand_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vand_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vand_vv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vand_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vand_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vand_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vand_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vand_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vand_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vand_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vand_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vand_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vand_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vand_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vand_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vand_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vand_vv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vand_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vand_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vand_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vand_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vand_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vand_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vand_vv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vand_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vand_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vand_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vand_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vand_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vand_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vand_vv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vand_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vand_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vand_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vand_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vand_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vand_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vand_vv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vand_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vand_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vand_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vand_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vand_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vand_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vand_vv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vand_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vand_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vand_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vand_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vand_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vand_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vand_vv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vand_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vand_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vand_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vand_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vand_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vand_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vand_vv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vand_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vand_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vand_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vand_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vand_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vand_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vand_vv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vand_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vand_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vand_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vand_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vand_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vand_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vand_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vand_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vand_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vand_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vand_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vand_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vand_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vand_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vand_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vand_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vand_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vand_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vand_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vand_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vand_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vand_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vand_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vand_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vand_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vand_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vand_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vand_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vand_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vand_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vand_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vand_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vand_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vand_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vand_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vand_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vand_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vand_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vand_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vand_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vand_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vand_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vand_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vand_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vand_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vand_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vand_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vand_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vand_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vand_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vand_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vand_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vand_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vand_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vand_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vand_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vand_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vand_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vand_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vand_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vand_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vand_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vand_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vand_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vand_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vand_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vand_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vand_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vand_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vand_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vand_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vand_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vand_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vand_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vand_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vand_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vand_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vand_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vand_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vand_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vand_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vand_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vand_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vand_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vand_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vand_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vand_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vand_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vand_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vand_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vand_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vand_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vand_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vand_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vand_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vand_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vand_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vand_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vand_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vand_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vand_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vand_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vand_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vand_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vand_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vand_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vand_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vand_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vand_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vand_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vand_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vand_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vand_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vand_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vand_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vand_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vand_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vand_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vand_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vand_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vand_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vand_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vand_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vand_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vand_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vand_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vand_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vand_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vand_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vand_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vand_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vand_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vand_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vand_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vand_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vand_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vand_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vand_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vand_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vand_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vand_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vand_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vand_vv_i8mf8_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vand_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vand_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vand_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_vx_i8mf8_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vand_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vand_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vand_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vand_vv_i8mf4_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vand_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vand_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vand_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_vx_i8mf4_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vand_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vand_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vand_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vand_vv_i8mf2_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vand_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vand_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vand_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_vx_i8mf2_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vand_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vand_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vand_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vand_vv_i8m1_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vand_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vand_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vand_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_vx_i8m1_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vand_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vand_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vand_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vand_vv_i8m2_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vand_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vand_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vand_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_vx_i8m2_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vand_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vand_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vand_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vand_vv_i8m4_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vand_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vand_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vand_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_vx_i8m4_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vand_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vand_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vand_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vand_vv_i8m8_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vand_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vand_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vand_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_vx_i8m8_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vand_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vand_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vand_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vand_vv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vand_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vand_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vand_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vand_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vand_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vand_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vand_vv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vand_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vand_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vand_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vand_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vand_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vand_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vand_vv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vand_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vand_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vand_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vand_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vand_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vand_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vand_vv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vand_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vand_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vand_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vand_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vand_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vand_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vand_vv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vand_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vand_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vand_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vand_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vand_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vand_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vand_vv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vand_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vand_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vand_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vand_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vand_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vand_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vand_vv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vand_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vand_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vand_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vand_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vand_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vand_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vand_vv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vand_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vand_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vand_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vand_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vand_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vand_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vand_vv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vand_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vand_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vand_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vand_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vand_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vand_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vand_vv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vand_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vand_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vand_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vand_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vand_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vand_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vand_vv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vand_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vand_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vand_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vand_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vand_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vand_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vand_vv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vand_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vand_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vand_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vand_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vand_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vand_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vand_vv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vand_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vand_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vand_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vand_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vand_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vand_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vand_vv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vand_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vand_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vand_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vand_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vand_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vand_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vand_vv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vand_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vand_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vand_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vand_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vand_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vand_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vand_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vand_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vand_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vand_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vand_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vand_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vand_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vand_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vand_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vand_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vand_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vand_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vand_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vand_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vand_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vand_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vand_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vand_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vand_vv_u8m1_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vand_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vand_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_vx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vand_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vand_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vand_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vand_vv_u8m2_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vand_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vand_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_vx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vand_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vand_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vand_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vand_vv_u8m4_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vand_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vand_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_vx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vand_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vand_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vand_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vand_vv_u8m8_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vand_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vand_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_vx_u8m8_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vand_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vand_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vand_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vand_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vand_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vand_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vand_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vand_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vand_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vand_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vand_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vand_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vand_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vand_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vand_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vand_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vand_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vand_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vand_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vand_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vand_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vand_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vand_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vand_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vand_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vand_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vand_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vand_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vand_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vand_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vand_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vand_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vand_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vand_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vand_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vand_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vand_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vand_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vand_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vand_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vand_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vand_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vand_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vand_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vand_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vand_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vand_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vand_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vand_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vand_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vand_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vand_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vand_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vand_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vand_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vand_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vand_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vand_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vand_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vand_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vand_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vand_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vand_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vand_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vand_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vand_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vand_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vand_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vand_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vand_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vand_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vand_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vand_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vand_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vand_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vand_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vand_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vand_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vand_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vand_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vand_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vand_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vand_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vand_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vand_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vand_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vand_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vand_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vand_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vand_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vand_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vand_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vand_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vand_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vand\.[ivxfswum.]+\s+} 352 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vasub.c b/auto-generated/policy_funcs/gnu-api-tests/vasub.c index 691666f5b..7a83b2dc2 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vasub.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vasub.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vasub_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vasub_vv_i8mf8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vasub_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vasub_vv_i8mf8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vasub_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_vx_i8mf8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vasub_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_vx_i8mf8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vasub_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vasub_vv_i8mf4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vasub_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vasub_vv_i8mf4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vasub_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_vx_i8mf4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vasub_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_vx_i8mf4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vasub_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vasub_vv_i8mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vasub_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vasub_vv_i8mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vasub_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_vx_i8mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vasub_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_vx_i8mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vasub_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vasub_vv_i8m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vasub_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vasub_vv_i8m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vasub_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_vx_i8m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vasub_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_vx_i8m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vasub_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vasub_vv_i8m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vasub_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vasub_vv_i8m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vasub_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_vx_i8m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vasub_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_vx_i8m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vasub_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vasub_vv_i8m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vasub_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vasub_vv_i8m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vasub_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_vx_i8m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vasub_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_vx_i8m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vasub_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vasub_vv_i8m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vasub_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vasub_vv_i8m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vasub_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_vx_i8m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vasub_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_vx_i8m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vasub_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vasub_vv_i16mf4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vasub_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vasub_vv_i16mf4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vasub_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_vx_i16mf4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vasub_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_vx_i16mf4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vasub_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vasub_vv_i16mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vasub_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vasub_vv_i16mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vasub_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_vx_i16mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vasub_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_vx_i16mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vasub_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vasub_vv_i16m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vasub_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vasub_vv_i16m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vasub_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_vx_i16m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vasub_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_vx_i16m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vasub_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vasub_vv_i16m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vasub_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vasub_vv_i16m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vasub_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_vx_i16m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vasub_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_vx_i16m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vasub_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vasub_vv_i16m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vasub_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vasub_vv_i16m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vasub_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_vx_i16m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vasub_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_vx_i16m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vasub_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vasub_vv_i16m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vasub_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vasub_vv_i16m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vasub_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_vx_i16m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vasub_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_vx_i16m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vasub_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vasub_vv_i32mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vasub_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vasub_vv_i32mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vasub_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_vx_i32mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vasub_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_vx_i32mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vasub_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vasub_vv_i32m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vasub_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vasub_vv_i32m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vasub_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_vx_i32m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vasub_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_vx_i32m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vasub_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vasub_vv_i32m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vasub_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vasub_vv_i32m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vasub_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_vx_i32m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vasub_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_vx_i32m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vasub_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vasub_vv_i32m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vasub_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vasub_vv_i32m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vasub_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_vx_i32m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vasub_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_vx_i32m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vasub_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vasub_vv_i32m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vasub_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vasub_vv_i32m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vasub_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_vx_i32m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vasub_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_vx_i32m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vasub_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vasub_vv_i64m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vasub_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vasub_vv_i64m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vasub_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vasub_vx_i64m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vasub_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub_vx_i64m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vasub_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vasub_vv_i64m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vasub_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vasub_vv_i64m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vasub_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vasub_vx_i64m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vasub_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub_vx_i64m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vasub_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vasub_vv_i64m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vasub_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vasub_vv_i64m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vasub_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vasub_vx_i64m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vasub_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub_vx_i64m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vasub_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vasub_vv_i64m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vasub_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vasub_vv_i64m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vasub_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vasub_vx_i64m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vasub_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub_vx_i64m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vasub_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vasub_vv_i8mf8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vasub_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vasub_vv_i8mf8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vasub_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_vx_i8mf8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vasub_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_vx_i8mf8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vasub_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vasub_vv_i8mf4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vasub_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vasub_vv_i8mf4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vasub_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_vx_i8mf4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vasub_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_vx_i8mf4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vasub_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vasub_vv_i8mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vasub_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vasub_vv_i8mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vasub_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_vx_i8mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vasub_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_vx_i8mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vasub_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vasub_vv_i8m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vasub_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vasub_vv_i8m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vasub_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_vx_i8m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vasub_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_vx_i8m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vasub_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vasub_vv_i8m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vasub_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vasub_vv_i8m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vasub_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_vx_i8m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vasub_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_vx_i8m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vasub_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vasub_vv_i8m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vasub_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vasub_vv_i8m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vasub_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_vx_i8m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vasub_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_vx_i8m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vasub_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vasub_vv_i8m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vasub_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vasub_vv_i8m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vasub_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_vx_i8m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vasub_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_vx_i8m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vasub_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vasub_vv_i16mf4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vasub_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vasub_vv_i16mf4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vasub_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_vx_i16mf4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vasub_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_vx_i16mf4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vasub_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vasub_vv_i16mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vasub_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vasub_vv_i16mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vasub_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_vx_i16mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vasub_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_vx_i16mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vasub_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vasub_vv_i16m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vasub_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vasub_vv_i16m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vasub_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_vx_i16m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vasub_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_vx_i16m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vasub_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vasub_vv_i16m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vasub_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vasub_vv_i16m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vasub_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_vx_i16m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vasub_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_vx_i16m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vasub_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vasub_vv_i16m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vasub_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vasub_vv_i16m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vasub_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_vx_i16m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vasub_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_vx_i16m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vasub_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vasub_vv_i16m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vasub_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vasub_vv_i16m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vasub_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_vx_i16m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vasub_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_vx_i16m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vasub_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vasub_vv_i32mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vasub_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vasub_vv_i32mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vasub_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_vx_i32mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vasub_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_vx_i32mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vasub_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vasub_vv_i32m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vasub_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vasub_vv_i32m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vasub_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_vx_i32m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vasub_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_vx_i32m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vasub_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vasub_vv_i32m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vasub_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vasub_vv_i32m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vasub_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_vx_i32m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vasub_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_vx_i32m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vasub_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vasub_vv_i32m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vasub_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vasub_vv_i32m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vasub_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_vx_i32m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vasub_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_vx_i32m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vasub_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vasub_vv_i32m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vasub_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vasub_vv_i32m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vasub_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_vx_i32m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vasub_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_vx_i32m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vasub_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vasub_vv_i64m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vasub_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vasub_vv_i64m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vasub_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vasub_vx_i64m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vasub_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub_vx_i64m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vasub_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vasub_vv_i64m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vasub_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vasub_vv_i64m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vasub_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vasub_vx_i64m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vasub_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub_vx_i64m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vasub_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vasub_vv_i64m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vasub_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vasub_vv_i64m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vasub_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vasub_vx_i64m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vasub_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub_vx_i64m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vasub_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vasub_vv_i64m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vasub_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vasub_vv_i64m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vasub_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vasub_vx_i64m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vasub_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub_vx_i64m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vasub_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vasub_vv_i8mf8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vasub_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vasub_vv_i8mf8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vasub_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_vx_i8mf8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vasub_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_vx_i8mf8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vasub_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vasub_vv_i8mf4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vasub_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vasub_vv_i8mf4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vasub_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_vx_i8mf4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vasub_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_vx_i8mf4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vasub_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vasub_vv_i8mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vasub_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vasub_vv_i8mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vasub_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_vx_i8mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vasub_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_vx_i8mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vasub_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vasub_vv_i8m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vasub_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vasub_vv_i8m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vasub_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_vx_i8m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vasub_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_vx_i8m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vasub_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vasub_vv_i8m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vasub_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vasub_vv_i8m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vasub_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_vx_i8m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vasub_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_vx_i8m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vasub_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vasub_vv_i8m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vasub_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vasub_vv_i8m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vasub_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_vx_i8m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vasub_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_vx_i8m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vasub_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vasub_vv_i8m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vasub_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vasub_vv_i8m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vasub_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_vx_i8m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vasub_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_vx_i8m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vasub_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vasub_vv_i16mf4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vasub_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vasub_vv_i16mf4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vasub_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_vx_i16mf4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vasub_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_vx_i16mf4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vasub_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vasub_vv_i16mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vasub_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vasub_vv_i16mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vasub_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_vx_i16mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vasub_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_vx_i16mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vasub_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vasub_vv_i16m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vasub_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vasub_vv_i16m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vasub_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_vx_i16m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vasub_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_vx_i16m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vasub_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vasub_vv_i16m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vasub_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vasub_vv_i16m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vasub_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_vx_i16m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vasub_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_vx_i16m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vasub_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vasub_vv_i16m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vasub_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vasub_vv_i16m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vasub_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_vx_i16m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vasub_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_vx_i16m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vasub_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vasub_vv_i16m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vasub_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vasub_vv_i16m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vasub_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_vx_i16m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vasub_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_vx_i16m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vasub_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vasub_vv_i32mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vasub_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vasub_vv_i32mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vasub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_vx_i32mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vasub_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_vx_i32mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vasub_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vasub_vv_i32m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vasub_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vasub_vv_i32m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vasub_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_vx_i32m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vasub_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_vx_i32m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vasub_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vasub_vv_i32m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vasub_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vasub_vv_i32m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vasub_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_vx_i32m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vasub_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_vx_i32m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vasub_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vasub_vv_i32m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vasub_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vasub_vv_i32m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vasub_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_vx_i32m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vasub_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_vx_i32m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vasub_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vasub_vv_i32m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vasub_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vasub_vv_i32m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vasub_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_vx_i32m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vasub_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_vx_i32m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vasub_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vasub_vv_i64m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vasub_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vasub_vv_i64m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vasub_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vasub_vx_i64m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vasub_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub_vx_i64m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vasub_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vasub_vv_i64m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vasub_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vasub_vv_i64m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vasub_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vasub_vx_i64m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vasub_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub_vx_i64m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vasub_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vasub_vv_i64m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vasub_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vasub_vv_i64m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vasub_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vasub_vx_i64m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vasub_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub_vx_i64m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vasub_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vasub_vv_i64m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vasub_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vasub_vv_i64m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vasub_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vasub_vx_i64m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vasub_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub_vx_i64m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vasub_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vasub_vv_i8mf8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vasub_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vasub_vv_i8mf8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vasub_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_vx_i8mf8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vasub_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_vx_i8mf8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vasub_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vasub_vv_i8mf4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vasub_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vasub_vv_i8mf4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vasub_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_vx_i8mf4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vasub_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_vx_i8mf4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vasub_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vasub_vv_i8mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vasub_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vasub_vv_i8mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vasub_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_vx_i8mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vasub_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_vx_i8mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vasub_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vasub_vv_i8m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vasub_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vasub_vv_i8m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vasub_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_vx_i8m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vasub_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_vx_i8m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vasub_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vasub_vv_i8m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vasub_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vasub_vv_i8m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vasub_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_vx_i8m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vasub_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_vx_i8m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vasub_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vasub_vv_i8m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vasub_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vasub_vv_i8m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vasub_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_vx_i8m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vasub_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_vx_i8m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vasub_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vasub_vv_i8m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vasub_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vasub_vv_i8m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vasub_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_vx_i8m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vasub_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_vx_i8m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vasub_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vasub_vv_i16mf4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vasub_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vasub_vv_i16mf4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vasub_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_vx_i16mf4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vasub_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_vx_i16mf4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vasub_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vasub_vv_i16mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vasub_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vasub_vv_i16mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vasub_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_vx_i16mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vasub_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_vx_i16mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vasub_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vasub_vv_i16m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vasub_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vasub_vv_i16m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vasub_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_vx_i16m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vasub_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_vx_i16m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vasub_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vasub_vv_i16m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vasub_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vasub_vv_i16m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vasub_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_vx_i16m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vasub_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_vx_i16m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vasub_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vasub_vv_i16m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vasub_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vasub_vv_i16m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vasub_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_vx_i16m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vasub_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_vx_i16m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vasub_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vasub_vv_i16m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vasub_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vasub_vv_i16m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vasub_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_vx_i16m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vasub_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_vx_i16m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vasub_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vasub_vv_i32mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vasub_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vasub_vv_i32mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vasub_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_vx_i32mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vasub_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_vx_i32mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vasub_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vasub_vv_i32m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vasub_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vasub_vv_i32m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vasub_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_vx_i32m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vasub_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_vx_i32m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vasub_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vasub_vv_i32m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vasub_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vasub_vv_i32m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vasub_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_vx_i32m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vasub_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_vx_i32m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vasub_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vasub_vv_i32m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vasub_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vasub_vv_i32m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vasub_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_vx_i32m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vasub_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_vx_i32m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vasub_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vasub_vv_i32m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vasub_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vasub_vv_i32m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vasub_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_vx_i32m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vasub_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_vx_i32m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vasub_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vasub_vv_i64m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vasub_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vasub_vv_i64m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vasub_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vasub_vx_i64m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vasub_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub_vx_i64m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vasub_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vasub_vv_i64m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vasub_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vasub_vv_i64m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vasub_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vasub_vx_i64m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vasub_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub_vx_i64m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vasub_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vasub_vv_i64m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vasub_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vasub_vv_i64m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vasub_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vasub_vx_i64m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vasub_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub_vx_i64m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vasub_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vasub_vv_i64m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vasub_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vasub_vv_i64m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vasub_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vasub_vx_i64m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vasub_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub_vx_i64m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vasub\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vasubu.c b/auto-generated/policy_funcs/gnu-api-tests/vasubu.c index 9fb418369..b76223ce3 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vasubu.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vasubu.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vasubu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vasubu_vv_u8mf8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vasubu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vasubu_vv_u8mf8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vasubu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_vx_u8mf8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vasubu_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_vx_u8mf8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vasubu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vasubu_vv_u8mf4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vasubu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vasubu_vv_u8mf4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vasubu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_vx_u8mf4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vasubu_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_vx_u8mf4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vasubu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vasubu_vv_u8mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vasubu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vasubu_vv_u8mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vasubu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_vx_u8mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vasubu_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_vx_u8mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vasubu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vasubu_vv_u8m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vasubu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vasubu_vv_u8m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vasubu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_vx_u8m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vasubu_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_vx_u8m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vasubu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vasubu_vv_u8m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vasubu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vasubu_vv_u8m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vasubu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_vx_u8m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vasubu_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_vx_u8m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vasubu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vasubu_vv_u8m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vasubu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vasubu_vv_u8m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vasubu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_vx_u8m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vasubu_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_vx_u8m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vasubu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vasubu_vv_u8m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vasubu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vasubu_vv_u8m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vasubu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_vx_u8m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vasubu_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_vx_u8m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vasubu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vasubu_vv_u16mf4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vasubu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vasubu_vv_u16mf4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vasubu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_vx_u16mf4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vasubu_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_vx_u16mf4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vasubu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vasubu_vv_u16mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vasubu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vasubu_vv_u16mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vasubu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_vx_u16mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vasubu_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_vx_u16mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vasubu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vasubu_vv_u16m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vasubu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vasubu_vv_u16m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vasubu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_vx_u16m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vasubu_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_vx_u16m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vasubu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vasubu_vv_u16m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vasubu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vasubu_vv_u16m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vasubu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_vx_u16m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vasubu_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_vx_u16m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vasubu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vasubu_vv_u16m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vasubu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vasubu_vv_u16m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vasubu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_vx_u16m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vasubu_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_vx_u16m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vasubu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vasubu_vv_u16m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vasubu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vasubu_vv_u16m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vasubu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_vx_u16m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vasubu_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_vx_u16m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vasubu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vasubu_vv_u32mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vasubu_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vasubu_vv_u32mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vasubu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_vx_u32mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vasubu_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_vx_u32mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vasubu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vasubu_vv_u32m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vasubu_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vasubu_vv_u32m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vasubu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_vx_u32m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vasubu_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_vx_u32m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vasubu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vasubu_vv_u32m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vasubu_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vasubu_vv_u32m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vasubu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_vx_u32m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vasubu_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_vx_u32m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vasubu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vasubu_vv_u32m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vasubu_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vasubu_vv_u32m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vasubu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_vx_u32m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vasubu_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_vx_u32m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vasubu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vasubu_vv_u32m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vasubu_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vasubu_vv_u32m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vasubu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_vx_u32m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vasubu_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_vx_u32m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vasubu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vasubu_vv_u64m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vasubu_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vasubu_vv_u64m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vasubu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu_vx_u64m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vasubu_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu_vx_u64m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vasubu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vasubu_vv_u64m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vasubu_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vasubu_vv_u64m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vasubu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu_vx_u64m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vasubu_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu_vx_u64m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vasubu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vasubu_vv_u64m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vasubu_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vasubu_vv_u64m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vasubu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu_vx_u64m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vasubu_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu_vx_u64m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vasubu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vasubu_vv_u64m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vasubu_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vasubu_vv_u64m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vasubu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu_vx_u64m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vasubu_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu_vx_u64m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vasubu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vasubu_vv_u8mf8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vasubu_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vasubu_vv_u8mf8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vasubu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_vx_u8mf8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vasubu_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_vx_u8mf8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vasubu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vasubu_vv_u8mf4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vasubu_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vasubu_vv_u8mf4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vasubu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_vx_u8mf4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vasubu_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_vx_u8mf4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vasubu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vasubu_vv_u8mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vasubu_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vasubu_vv_u8mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vasubu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_vx_u8mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vasubu_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_vx_u8mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vasubu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vasubu_vv_u8m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vasubu_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vasubu_vv_u8m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vasubu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_vx_u8m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vasubu_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_vx_u8m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vasubu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vasubu_vv_u8m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vasubu_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vasubu_vv_u8m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vasubu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_vx_u8m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vasubu_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_vx_u8m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vasubu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vasubu_vv_u8m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vasubu_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vasubu_vv_u8m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vasubu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_vx_u8m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vasubu_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_vx_u8m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vasubu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vasubu_vv_u8m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vasubu_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vasubu_vv_u8m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vasubu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_vx_u8m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vasubu_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_vx_u8m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vasubu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vasubu_vv_u16mf4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vasubu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vasubu_vv_u16mf4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vasubu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_vx_u16mf4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vasubu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_vx_u16mf4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vasubu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vasubu_vv_u16mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vasubu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vasubu_vv_u16mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vasubu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_vx_u16mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vasubu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_vx_u16mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vasubu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vasubu_vv_u16m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vasubu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vasubu_vv_u16m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vasubu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_vx_u16m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vasubu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_vx_u16m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vasubu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vasubu_vv_u16m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vasubu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vasubu_vv_u16m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vasubu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_vx_u16m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vasubu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_vx_u16m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vasubu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vasubu_vv_u16m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vasubu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vasubu_vv_u16m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vasubu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_vx_u16m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vasubu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_vx_u16m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vasubu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vasubu_vv_u16m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vasubu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vasubu_vv_u16m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vasubu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_vx_u16m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vasubu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_vx_u16m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vasubu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vasubu_vv_u32mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vasubu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vasubu_vv_u32mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vasubu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_vx_u32mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vasubu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_vx_u32mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vasubu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vasubu_vv_u32m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vasubu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vasubu_vv_u32m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vasubu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_vx_u32m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vasubu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_vx_u32m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vasubu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vasubu_vv_u32m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vasubu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vasubu_vv_u32m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vasubu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_vx_u32m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vasubu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_vx_u32m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vasubu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vasubu_vv_u32m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vasubu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vasubu_vv_u32m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vasubu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_vx_u32m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vasubu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_vx_u32m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vasubu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vasubu_vv_u32m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vasubu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vasubu_vv_u32m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vasubu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_vx_u32m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vasubu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_vx_u32m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vasubu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vasubu_vv_u64m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vasubu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vasubu_vv_u64m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vasubu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu_vx_u64m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vasubu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu_vx_u64m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vasubu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vasubu_vv_u64m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vasubu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vasubu_vv_u64m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vasubu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu_vx_u64m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vasubu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu_vx_u64m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vasubu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vasubu_vv_u64m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vasubu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vasubu_vv_u64m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vasubu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu_vx_u64m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vasubu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu_vx_u64m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vasubu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vasubu_vv_u64m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vasubu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vasubu_vv_u64m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vasubu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu_vx_u64m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vasubu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu_vx_u64m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vasubu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vasubu_vv_u8mf8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vasubu_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vasubu_vv_u8mf8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vasubu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_vx_u8mf8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vasubu_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_vx_u8mf8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vasubu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vasubu_vv_u8mf4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vasubu_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vasubu_vv_u8mf4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vasubu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_vx_u8mf4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vasubu_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_vx_u8mf4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vasubu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vasubu_vv_u8mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vasubu_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vasubu_vv_u8mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vasubu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_vx_u8mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vasubu_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_vx_u8mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vasubu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vasubu_vv_u8m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vasubu_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vasubu_vv_u8m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vasubu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_vx_u8m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vasubu_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_vx_u8m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vasubu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vasubu_vv_u8m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vasubu_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vasubu_vv_u8m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vasubu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_vx_u8m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vasubu_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_vx_u8m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vasubu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vasubu_vv_u8m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vasubu_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vasubu_vv_u8m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vasubu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_vx_u8m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vasubu_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_vx_u8m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vasubu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vasubu_vv_u8m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vasubu_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vasubu_vv_u8m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vasubu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_vx_u8m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vasubu_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_vx_u8m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vasubu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vasubu_vv_u16mf4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vasubu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vasubu_vv_u16mf4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vasubu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_vx_u16mf4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vasubu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_vx_u16mf4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vasubu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vasubu_vv_u16mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vasubu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vasubu_vv_u16mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vasubu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_vx_u16mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vasubu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_vx_u16mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vasubu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vasubu_vv_u16m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vasubu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vasubu_vv_u16m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vasubu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_vx_u16m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vasubu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_vx_u16m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vasubu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vasubu_vv_u16m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vasubu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vasubu_vv_u16m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vasubu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_vx_u16m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vasubu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_vx_u16m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vasubu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vasubu_vv_u16m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vasubu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vasubu_vv_u16m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vasubu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_vx_u16m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vasubu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_vx_u16m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vasubu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vasubu_vv_u16m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vasubu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vasubu_vv_u16m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vasubu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_vx_u16m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vasubu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_vx_u16m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vasubu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vasubu_vv_u32mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vasubu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vasubu_vv_u32mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vasubu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_vx_u32mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vasubu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_vx_u32mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vasubu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vasubu_vv_u32m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vasubu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vasubu_vv_u32m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vasubu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_vx_u32m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vasubu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_vx_u32m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vasubu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vasubu_vv_u32m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vasubu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vasubu_vv_u32m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vasubu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_vx_u32m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vasubu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_vx_u32m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vasubu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vasubu_vv_u32m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vasubu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vasubu_vv_u32m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vasubu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_vx_u32m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vasubu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_vx_u32m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vasubu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vasubu_vv_u32m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vasubu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vasubu_vv_u32m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vasubu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_vx_u32m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vasubu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_vx_u32m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vasubu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vasubu_vv_u64m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vasubu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vasubu_vv_u64m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vasubu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu_vx_u64m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vasubu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu_vx_u64m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vasubu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vasubu_vv_u64m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vasubu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vasubu_vv_u64m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vasubu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu_vx_u64m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vasubu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu_vx_u64m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vasubu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vasubu_vv_u64m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vasubu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vasubu_vv_u64m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vasubu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu_vx_u64m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vasubu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu_vx_u64m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vasubu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vasubu_vv_u64m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vasubu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vasubu_vv_u64m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vasubu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu_vx_u64m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vasubu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu_vx_u64m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vasubu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vasubu_vv_u8mf8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vasubu_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vasubu_vv_u8mf8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vasubu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_vx_u8mf8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vasubu_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_vx_u8mf8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vasubu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vasubu_vv_u8mf4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vasubu_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vasubu_vv_u8mf4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vasubu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_vx_u8mf4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vasubu_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_vx_u8mf4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vasubu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vasubu_vv_u8mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vasubu_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vasubu_vv_u8mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vasubu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_vx_u8mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vasubu_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_vx_u8mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vasubu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vasubu_vv_u8m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vasubu_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vasubu_vv_u8m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vasubu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_vx_u8m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vasubu_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_vx_u8m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vasubu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vasubu_vv_u8m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vasubu_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vasubu_vv_u8m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vasubu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_vx_u8m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vasubu_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_vx_u8m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vasubu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vasubu_vv_u8m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vasubu_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vasubu_vv_u8m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vasubu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_vx_u8m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vasubu_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_vx_u8m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vasubu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vasubu_vv_u8m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vasubu_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vasubu_vv_u8m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vasubu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_vx_u8m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vasubu_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_vx_u8m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vasubu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vasubu_vv_u16mf4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vasubu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vasubu_vv_u16mf4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vasubu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_vx_u16mf4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vasubu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_vx_u16mf4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vasubu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vasubu_vv_u16mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vasubu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vasubu_vv_u16mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vasubu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_vx_u16mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vasubu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_vx_u16mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vasubu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vasubu_vv_u16m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vasubu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vasubu_vv_u16m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vasubu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_vx_u16m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vasubu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_vx_u16m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vasubu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vasubu_vv_u16m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vasubu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vasubu_vv_u16m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vasubu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_vx_u16m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vasubu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_vx_u16m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vasubu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vasubu_vv_u16m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vasubu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vasubu_vv_u16m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vasubu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_vx_u16m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vasubu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_vx_u16m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vasubu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vasubu_vv_u16m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vasubu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vasubu_vv_u16m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vasubu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_vx_u16m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vasubu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_vx_u16m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vasubu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vasubu_vv_u32mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vasubu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vasubu_vv_u32mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vasubu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_vx_u32mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vasubu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_vx_u32mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vasubu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vasubu_vv_u32m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vasubu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vasubu_vv_u32m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vasubu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_vx_u32m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vasubu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_vx_u32m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vasubu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vasubu_vv_u32m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vasubu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vasubu_vv_u32m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vasubu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_vx_u32m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vasubu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_vx_u32m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vasubu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vasubu_vv_u32m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vasubu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vasubu_vv_u32m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vasubu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_vx_u32m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vasubu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_vx_u32m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vasubu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vasubu_vv_u32m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vasubu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vasubu_vv_u32m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vasubu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_vx_u32m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vasubu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_vx_u32m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vasubu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vasubu_vv_u64m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vasubu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vasubu_vv_u64m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vasubu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu_vx_u64m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vasubu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu_vx_u64m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vasubu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vasubu_vv_u64m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vasubu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vasubu_vv_u64m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vasubu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu_vx_u64m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vasubu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu_vx_u64m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vasubu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vasubu_vv_u64m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vasubu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vasubu_vv_u64m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vasubu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu_vx_u64m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vasubu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu_vx_u64m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vasubu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vasubu_vv_u64m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vasubu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vasubu_vv_u64m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vasubu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu_vx_u64m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vasubu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu_vx_u64m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vasubu\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vcompress.c b/auto-generated/policy_funcs/gnu-api-tests/vcompress.c index ea4ff85f4..2c5d13d55 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vcompress.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vcompress.c @@ -6,240 +6,240 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vcompress_vm_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t src, vbool64_t mask, size_t vl) { - return __riscv_vcompress_vm_f16mf4_tu(maskedoff, src, mask, vl); +vfloat16mf4_t test_vcompress_vm_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vcompress_vm_f16mf4_tu(vd, vs2, vs1, vl); } -vfloat16mf2_t test_vcompress_vm_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t src, vbool32_t mask, size_t vl) { - return __riscv_vcompress_vm_f16mf2_tu(maskedoff, src, mask, vl); +vfloat16mf2_t test_vcompress_vm_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vcompress_vm_f16mf2_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vcompress_vm_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t src, vbool16_t mask, size_t vl) { - return __riscv_vcompress_vm_f16m1_tu(maskedoff, src, mask, vl); +vfloat16m1_t test_vcompress_vm_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vcompress_vm_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m2_t test_vcompress_vm_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t src, vbool8_t mask, size_t vl) { - return __riscv_vcompress_vm_f16m2_tu(maskedoff, src, mask, vl); +vfloat16m2_t test_vcompress_vm_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vcompress_vm_f16m2_tu(vd, vs2, vs1, vl); } -vfloat16m4_t test_vcompress_vm_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t src, vbool4_t mask, size_t vl) { - return __riscv_vcompress_vm_f16m4_tu(maskedoff, src, mask, vl); +vfloat16m4_t test_vcompress_vm_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vcompress_vm_f16m4_tu(vd, vs2, vs1, vl); } -vfloat16m8_t test_vcompress_vm_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t src, vbool2_t mask, size_t vl) { - return __riscv_vcompress_vm_f16m8_tu(maskedoff, src, mask, vl); +vfloat16m8_t test_vcompress_vm_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vbool2_t vs1, size_t vl) { + return __riscv_vcompress_vm_f16m8_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vcompress_vm_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t src, vbool64_t mask, size_t vl) { - return __riscv_vcompress_vm_f32mf2_tu(maskedoff, src, mask, vl); +vfloat32mf2_t test_vcompress_vm_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vcompress_vm_f32mf2_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vcompress_vm_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t src, vbool32_t mask, size_t vl) { - return __riscv_vcompress_vm_f32m1_tu(maskedoff, src, mask, vl); +vfloat32m1_t test_vcompress_vm_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vcompress_vm_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vcompress_vm_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t src, vbool16_t mask, size_t vl) { - return __riscv_vcompress_vm_f32m2_tu(maskedoff, src, mask, vl); +vfloat32m2_t test_vcompress_vm_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vcompress_vm_f32m2_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vcompress_vm_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t src, vbool8_t mask, size_t vl) { - return __riscv_vcompress_vm_f32m4_tu(maskedoff, src, mask, vl); +vfloat32m4_t test_vcompress_vm_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vcompress_vm_f32m4_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vcompress_vm_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t src, vbool4_t mask, size_t vl) { - return __riscv_vcompress_vm_f32m8_tu(maskedoff, src, mask, vl); +vfloat32m8_t test_vcompress_vm_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vcompress_vm_f32m8_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vcompress_vm_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t src, vbool64_t mask, size_t vl) { - return __riscv_vcompress_vm_f64m1_tu(maskedoff, src, mask, vl); +vfloat64m1_t test_vcompress_vm_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vcompress_vm_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vcompress_vm_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t src, vbool32_t mask, size_t vl) { - return __riscv_vcompress_vm_f64m2_tu(maskedoff, src, mask, vl); +vfloat64m2_t test_vcompress_vm_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vcompress_vm_f64m2_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vcompress_vm_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t src, vbool16_t mask, size_t vl) { - return __riscv_vcompress_vm_f64m4_tu(maskedoff, src, mask, vl); +vfloat64m4_t test_vcompress_vm_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vcompress_vm_f64m4_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vcompress_vm_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t src, vbool8_t mask, size_t vl) { - return __riscv_vcompress_vm_f64m8_tu(maskedoff, src, mask, vl); +vfloat64m8_t test_vcompress_vm_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vcompress_vm_f64m8_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vcompress_vm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t src, vbool64_t mask, size_t vl) { - return __riscv_vcompress_vm_i8mf8_tu(maskedoff, src, mask, vl); +vint8mf8_t test_vcompress_vm_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vcompress_vm_i8mf8_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vcompress_vm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t src, vbool32_t mask, size_t vl) { - return __riscv_vcompress_vm_i8mf4_tu(maskedoff, src, mask, vl); +vint8mf4_t test_vcompress_vm_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vcompress_vm_i8mf4_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vcompress_vm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t src, vbool16_t mask, size_t vl) { - return __riscv_vcompress_vm_i8mf2_tu(maskedoff, src, mask, vl); +vint8mf2_t test_vcompress_vm_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vcompress_vm_i8mf2_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vcompress_vm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t src, vbool8_t mask, size_t vl) { - return __riscv_vcompress_vm_i8m1_tu(maskedoff, src, mask, vl); +vint8m1_t test_vcompress_vm_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vcompress_vm_i8m1_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vcompress_vm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t src, vbool4_t mask, size_t vl) { - return __riscv_vcompress_vm_i8m2_tu(maskedoff, src, mask, vl); +vint8m2_t test_vcompress_vm_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vcompress_vm_i8m2_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vcompress_vm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t src, vbool2_t mask, size_t vl) { - return __riscv_vcompress_vm_i8m4_tu(maskedoff, src, mask, vl); +vint8m4_t test_vcompress_vm_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vbool2_t vs1, size_t vl) { + return __riscv_vcompress_vm_i8m4_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vcompress_vm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t src, vbool1_t mask, size_t vl) { - return __riscv_vcompress_vm_i8m8_tu(maskedoff, src, mask, vl); +vint8m8_t test_vcompress_vm_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vbool1_t vs1, size_t vl) { + return __riscv_vcompress_vm_i8m8_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vcompress_vm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t src, vbool64_t mask, size_t vl) { - return __riscv_vcompress_vm_i16mf4_tu(maskedoff, src, mask, vl); +vint16mf4_t test_vcompress_vm_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vcompress_vm_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vcompress_vm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t src, vbool32_t mask, size_t vl) { - return __riscv_vcompress_vm_i16mf2_tu(maskedoff, src, mask, vl); +vint16mf2_t test_vcompress_vm_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vcompress_vm_i16mf2_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vcompress_vm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t src, vbool16_t mask, size_t vl) { - return __riscv_vcompress_vm_i16m1_tu(maskedoff, src, mask, vl); +vint16m1_t test_vcompress_vm_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vcompress_vm_i16m1_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vcompress_vm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t src, vbool8_t mask, size_t vl) { - return __riscv_vcompress_vm_i16m2_tu(maskedoff, src, mask, vl); +vint16m2_t test_vcompress_vm_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vcompress_vm_i16m2_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vcompress_vm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t src, vbool4_t mask, size_t vl) { - return __riscv_vcompress_vm_i16m4_tu(maskedoff, src, mask, vl); +vint16m4_t test_vcompress_vm_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vcompress_vm_i16m4_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vcompress_vm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t src, vbool2_t mask, size_t vl) { - return __riscv_vcompress_vm_i16m8_tu(maskedoff, src, mask, vl); +vint16m8_t test_vcompress_vm_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vbool2_t vs1, size_t vl) { + return __riscv_vcompress_vm_i16m8_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vcompress_vm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t src, vbool64_t mask, size_t vl) { - return __riscv_vcompress_vm_i32mf2_tu(maskedoff, src, mask, vl); +vint32mf2_t test_vcompress_vm_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vcompress_vm_i32mf2_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vcompress_vm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t src, vbool32_t mask, size_t vl) { - return __riscv_vcompress_vm_i32m1_tu(maskedoff, src, mask, vl); +vint32m1_t test_vcompress_vm_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vcompress_vm_i32m1_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vcompress_vm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t src, vbool16_t mask, size_t vl) { - return __riscv_vcompress_vm_i32m2_tu(maskedoff, src, mask, vl); +vint32m2_t test_vcompress_vm_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vcompress_vm_i32m2_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vcompress_vm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t src, vbool8_t mask, size_t vl) { - return __riscv_vcompress_vm_i32m4_tu(maskedoff, src, mask, vl); +vint32m4_t test_vcompress_vm_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vcompress_vm_i32m4_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vcompress_vm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t src, vbool4_t mask, size_t vl) { - return __riscv_vcompress_vm_i32m8_tu(maskedoff, src, mask, vl); +vint32m8_t test_vcompress_vm_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vcompress_vm_i32m8_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vcompress_vm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t src, vbool64_t mask, size_t vl) { - return __riscv_vcompress_vm_i64m1_tu(maskedoff, src, mask, vl); +vint64m1_t test_vcompress_vm_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vcompress_vm_i64m1_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vcompress_vm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t src, vbool32_t mask, size_t vl) { - return __riscv_vcompress_vm_i64m2_tu(maskedoff, src, mask, vl); +vint64m2_t test_vcompress_vm_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vcompress_vm_i64m2_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vcompress_vm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t src, vbool16_t mask, size_t vl) { - return __riscv_vcompress_vm_i64m4_tu(maskedoff, src, mask, vl); +vint64m4_t test_vcompress_vm_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vcompress_vm_i64m4_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vcompress_vm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t src, vbool8_t mask, size_t vl) { - return __riscv_vcompress_vm_i64m8_tu(maskedoff, src, mask, vl); +vint64m8_t test_vcompress_vm_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vcompress_vm_i64m8_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vcompress_vm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t src, vbool64_t mask, size_t vl) { - return __riscv_vcompress_vm_u8mf8_tu(maskedoff, src, mask, vl); +vuint8mf8_t test_vcompress_vm_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vcompress_vm_u8mf8_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vcompress_vm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t src, vbool32_t mask, size_t vl) { - return __riscv_vcompress_vm_u8mf4_tu(maskedoff, src, mask, vl); +vuint8mf4_t test_vcompress_vm_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vcompress_vm_u8mf4_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vcompress_vm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t src, vbool16_t mask, size_t vl) { - return __riscv_vcompress_vm_u8mf2_tu(maskedoff, src, mask, vl); +vuint8mf2_t test_vcompress_vm_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vcompress_vm_u8mf2_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vcompress_vm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t src, vbool8_t mask, size_t vl) { - return __riscv_vcompress_vm_u8m1_tu(maskedoff, src, mask, vl); +vuint8m1_t test_vcompress_vm_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vcompress_vm_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vcompress_vm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t src, vbool4_t mask, size_t vl) { - return __riscv_vcompress_vm_u8m2_tu(maskedoff, src, mask, vl); +vuint8m2_t test_vcompress_vm_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vcompress_vm_u8m2_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vcompress_vm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t src, vbool2_t mask, size_t vl) { - return __riscv_vcompress_vm_u8m4_tu(maskedoff, src, mask, vl); +vuint8m4_t test_vcompress_vm_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vbool2_t vs1, size_t vl) { + return __riscv_vcompress_vm_u8m4_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vcompress_vm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t src, vbool1_t mask, size_t vl) { - return __riscv_vcompress_vm_u8m8_tu(maskedoff, src, mask, vl); +vuint8m8_t test_vcompress_vm_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vbool1_t vs1, size_t vl) { + return __riscv_vcompress_vm_u8m8_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vcompress_vm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t src, vbool64_t mask, size_t vl) { - return __riscv_vcompress_vm_u16mf4_tu(maskedoff, src, mask, vl); +vuint16mf4_t test_vcompress_vm_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vcompress_vm_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vcompress_vm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t src, vbool32_t mask, size_t vl) { - return __riscv_vcompress_vm_u16mf2_tu(maskedoff, src, mask, vl); +vuint16mf2_t test_vcompress_vm_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vcompress_vm_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vcompress_vm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t src, vbool16_t mask, size_t vl) { - return __riscv_vcompress_vm_u16m1_tu(maskedoff, src, mask, vl); +vuint16m1_t test_vcompress_vm_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vcompress_vm_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vcompress_vm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t src, vbool8_t mask, size_t vl) { - return __riscv_vcompress_vm_u16m2_tu(maskedoff, src, mask, vl); +vuint16m2_t test_vcompress_vm_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vcompress_vm_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vcompress_vm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t src, vbool4_t mask, size_t vl) { - return __riscv_vcompress_vm_u16m4_tu(maskedoff, src, mask, vl); +vuint16m4_t test_vcompress_vm_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vcompress_vm_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vcompress_vm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t src, vbool2_t mask, size_t vl) { - return __riscv_vcompress_vm_u16m8_tu(maskedoff, src, mask, vl); +vuint16m8_t test_vcompress_vm_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vbool2_t vs1, size_t vl) { + return __riscv_vcompress_vm_u16m8_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vcompress_vm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t src, vbool64_t mask, size_t vl) { - return __riscv_vcompress_vm_u32mf2_tu(maskedoff, src, mask, vl); +vuint32mf2_t test_vcompress_vm_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vcompress_vm_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vcompress_vm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t src, vbool32_t mask, size_t vl) { - return __riscv_vcompress_vm_u32m1_tu(maskedoff, src, mask, vl); +vuint32m1_t test_vcompress_vm_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vcompress_vm_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vcompress_vm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t src, vbool16_t mask, size_t vl) { - return __riscv_vcompress_vm_u32m2_tu(maskedoff, src, mask, vl); +vuint32m2_t test_vcompress_vm_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vcompress_vm_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vcompress_vm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t src, vbool8_t mask, size_t vl) { - return __riscv_vcompress_vm_u32m4_tu(maskedoff, src, mask, vl); +vuint32m4_t test_vcompress_vm_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vcompress_vm_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vcompress_vm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t src, vbool4_t mask, size_t vl) { - return __riscv_vcompress_vm_u32m8_tu(maskedoff, src, mask, vl); +vuint32m8_t test_vcompress_vm_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vcompress_vm_u32m8_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vcompress_vm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t src, vbool64_t mask, size_t vl) { - return __riscv_vcompress_vm_u64m1_tu(maskedoff, src, mask, vl); +vuint64m1_t test_vcompress_vm_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vcompress_vm_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vcompress_vm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t src, vbool32_t mask, size_t vl) { - return __riscv_vcompress_vm_u64m2_tu(maskedoff, src, mask, vl); +vuint64m2_t test_vcompress_vm_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vcompress_vm_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vcompress_vm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t src, vbool16_t mask, size_t vl) { - return __riscv_vcompress_vm_u64m4_tu(maskedoff, src, mask, vl); +vuint64m4_t test_vcompress_vm_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vcompress_vm_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vcompress_vm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t src, vbool8_t mask, size_t vl) { - return __riscv_vcompress_vm_u64m8_tu(maskedoff, src, mask, vl); +vuint64m8_t test_vcompress_vm_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vcompress_vm_u64m8_tu(vd, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vcompress\.[ivxfswum.]+\s+} 59 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vdiv.c b/auto-generated/policy_funcs/gnu-api-tests/vdiv.c index 07b579d01..b86d1710f 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vdiv.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vdiv.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vdiv_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vdiv_vv_i8mf8_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vdiv_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vdiv_vv_i8mf8_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vdiv_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_vx_i8mf8_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vdiv_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_vx_i8mf8_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vdiv_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vdiv_vv_i8mf4_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vdiv_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vdiv_vv_i8mf4_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vdiv_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_vx_i8mf4_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vdiv_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_vx_i8mf4_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vdiv_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vdiv_vv_i8mf2_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vdiv_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vdiv_vv_i8mf2_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vdiv_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_vx_i8mf2_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vdiv_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_vx_i8mf2_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vdiv_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vdiv_vv_i8m1_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vdiv_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vdiv_vv_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vdiv_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_vx_i8m1_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vdiv_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_vx_i8m1_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vdiv_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vdiv_vv_i8m2_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vdiv_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vdiv_vv_i8m2_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vdiv_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_vx_i8m2_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vdiv_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_vx_i8m2_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vdiv_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vdiv_vv_i8m4_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vdiv_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vdiv_vv_i8m4_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vdiv_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_vx_i8m4_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vdiv_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_vx_i8m4_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vdiv_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vdiv_vv_i8m8_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vdiv_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vdiv_vv_i8m8_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vdiv_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_vx_i8m8_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vdiv_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_vx_i8m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vdiv_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vdiv_vv_i16mf4_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vdiv_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vdiv_vv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vdiv_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_vx_i16mf4_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vdiv_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vdiv_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vdiv_vv_i16mf2_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vdiv_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vdiv_vv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vdiv_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_vx_i16mf2_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vdiv_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vdiv_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vdiv_vv_i16m1_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vdiv_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vdiv_vv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vdiv_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_vx_i16m1_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vdiv_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vdiv_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vdiv_vv_i16m2_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vdiv_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vdiv_vv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vdiv_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_vx_i16m2_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vdiv_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vdiv_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vdiv_vv_i16m4_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vdiv_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vdiv_vv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vdiv_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_vx_i16m4_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vdiv_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vdiv_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vdiv_vv_i16m8_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vdiv_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vdiv_vv_i16m8_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vdiv_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_vx_i16m8_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vdiv_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vdiv_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vdiv_vv_i32mf2_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vdiv_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vdiv_vv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vdiv_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_vx_i32mf2_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vdiv_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vdiv_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vdiv_vv_i32m1_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vdiv_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vdiv_vv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vdiv_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_vx_i32m1_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vdiv_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vdiv_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vdiv_vv_i32m2_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vdiv_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vdiv_vv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vdiv_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_vx_i32m2_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vdiv_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vdiv_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vdiv_vv_i32m4_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vdiv_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vdiv_vv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vdiv_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_vx_i32m4_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vdiv_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vdiv_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vdiv_vv_i32m8_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vdiv_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vdiv_vv_i32m8_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vdiv_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_vx_i32m8_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vdiv_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vdiv_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vdiv_vv_i64m1_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vdiv_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vdiv_vv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vdiv_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv_vx_i64m1_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vdiv_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vdiv_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vdiv_vv_i64m2_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vdiv_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vdiv_vv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vdiv_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv_vx_i64m2_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vdiv_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vdiv_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vdiv_vv_i64m4_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vdiv_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vdiv_vv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vdiv_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv_vx_i64m4_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vdiv_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vdiv_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vdiv_vv_i64m8_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vdiv_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vdiv_vv_i64m8_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vdiv_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv_vx_i64m8_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vdiv_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv_vx_i64m8_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vdiv_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vdiv_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vdiv_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vdiv_vv_i8mf8_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vdiv_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vdiv_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_vx_i8mf8_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vdiv_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vdiv_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vdiv_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vdiv_vv_i8mf4_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vdiv_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vdiv_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_vx_i8mf4_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vdiv_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vdiv_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vdiv_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vdiv_vv_i8mf2_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vdiv_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vdiv_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_vx_i8mf2_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vdiv_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vdiv_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vdiv_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vdiv_vv_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vdiv_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vdiv_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_vx_i8m1_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vdiv_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vdiv_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vdiv_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vdiv_vv_i8m2_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vdiv_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vdiv_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_vx_i8m2_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vdiv_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vdiv_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vdiv_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vdiv_vv_i8m4_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vdiv_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vdiv_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_vx_i8m4_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vdiv_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vdiv_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vdiv_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vdiv_vv_i8m8_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vdiv_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vdiv_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_vx_i8m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vdiv_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vdiv_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vdiv_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vdiv_vv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vdiv_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vdiv_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vdiv_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vdiv_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vdiv_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vdiv_vv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vdiv_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vdiv_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vdiv_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vdiv_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vdiv_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vdiv_vv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vdiv_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vdiv_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vdiv_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vdiv_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vdiv_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vdiv_vv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vdiv_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vdiv_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vdiv_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vdiv_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vdiv_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vdiv_vv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vdiv_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vdiv_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vdiv_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vdiv_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vdiv_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vdiv_vv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vdiv_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vdiv_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vdiv_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vdiv_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vdiv_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vdiv_vv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vdiv_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vdiv_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vdiv_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vdiv_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vdiv_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vdiv_vv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vdiv_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vdiv_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vdiv_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vdiv_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vdiv_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vdiv_vv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vdiv_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vdiv_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vdiv_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vdiv_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vdiv_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vdiv_vv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vdiv_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vdiv_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vdiv_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vdiv_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vdiv_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vdiv_vv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vdiv_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vdiv_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vdiv_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vdiv_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vdiv_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vdiv_vv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vdiv_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vdiv_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vdiv_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vdiv_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vdiv_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vdiv_vv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vdiv_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vdiv_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vdiv_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vdiv_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vdiv_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vdiv_vv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vdiv_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vdiv_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vdiv_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vdiv_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vdiv_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vdiv_vv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vdiv_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vdiv_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vdiv_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vdiv_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vdiv_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vdiv_vv_i8mf8_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vdiv_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vdiv_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_vx_i8mf8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vdiv_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vdiv_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vdiv_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vdiv_vv_i8mf4_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vdiv_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vdiv_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_vx_i8mf4_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vdiv_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vdiv_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vdiv_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vdiv_vv_i8mf2_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vdiv_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vdiv_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_vx_i8mf2_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vdiv_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vdiv_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vdiv_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vdiv_vv_i8m1_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vdiv_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vdiv_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_vx_i8m1_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vdiv_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vdiv_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vdiv_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vdiv_vv_i8m2_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vdiv_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vdiv_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_vx_i8m2_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vdiv_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vdiv_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vdiv_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vdiv_vv_i8m4_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vdiv_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vdiv_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_vx_i8m4_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vdiv_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vdiv_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vdiv_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vdiv_vv_i8m8_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vdiv_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vdiv_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_vx_i8m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vdiv_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vdiv_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vdiv_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vdiv_vv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vdiv_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vdiv_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vdiv_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vdiv_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vdiv_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vdiv_vv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vdiv_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vdiv_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vdiv_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vdiv_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vdiv_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vdiv_vv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vdiv_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vdiv_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vdiv_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vdiv_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vdiv_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vdiv_vv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vdiv_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vdiv_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vdiv_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vdiv_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vdiv_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vdiv_vv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vdiv_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vdiv_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vdiv_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vdiv_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vdiv_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vdiv_vv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vdiv_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vdiv_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vdiv_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vdiv_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vdiv_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vdiv_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vdiv_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vdiv_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vdiv_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vdiv_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vdiv_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vdiv_vv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vdiv_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vdiv_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vdiv_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vdiv_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vdiv_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vdiv_vv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vdiv_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vdiv_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vdiv_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vdiv_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vdiv_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vdiv_vv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vdiv_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vdiv_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vdiv_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vdiv_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vdiv_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vdiv_vv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vdiv_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vdiv_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vdiv_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vdiv_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vdiv_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vdiv_vv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vdiv_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vdiv_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vdiv_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vdiv_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vdiv_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vdiv_vv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vdiv_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vdiv_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vdiv_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vdiv_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vdiv_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vdiv_vv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vdiv_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vdiv_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vdiv_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vdiv_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vdiv_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vdiv_vv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vdiv_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vdiv_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vdiv_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vdiv_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vdiv_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vdiv_vv_i8mf8_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vdiv_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vdiv_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_vx_i8mf8_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vdiv_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vdiv_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vdiv_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vdiv_vv_i8mf4_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vdiv_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vdiv_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_vx_i8mf4_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vdiv_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vdiv_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vdiv_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vdiv_vv_i8mf2_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vdiv_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vdiv_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_vx_i8mf2_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vdiv_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vdiv_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vdiv_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vdiv_vv_i8m1_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vdiv_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vdiv_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_vx_i8m1_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vdiv_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vdiv_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vdiv_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vdiv_vv_i8m2_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vdiv_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vdiv_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_vx_i8m2_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vdiv_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vdiv_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vdiv_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vdiv_vv_i8m4_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vdiv_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vdiv_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_vx_i8m4_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vdiv_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vdiv_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vdiv_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vdiv_vv_i8m8_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vdiv_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vdiv_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_vx_i8m8_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vdiv_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vdiv_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vdiv_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vdiv_vv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vdiv_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vdiv_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vdiv_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vdiv_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vdiv_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vdiv_vv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vdiv_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vdiv_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vdiv_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vdiv_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vdiv_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vdiv_vv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vdiv_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vdiv_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vdiv_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vdiv_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vdiv_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vdiv_vv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vdiv_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vdiv_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vdiv_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vdiv_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vdiv_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vdiv_vv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vdiv_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vdiv_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vdiv_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vdiv_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vdiv_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vdiv_vv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vdiv_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vdiv_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vdiv_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vdiv_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vdiv_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vdiv_vv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vdiv_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vdiv_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vdiv_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vdiv_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vdiv_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vdiv_vv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vdiv_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vdiv_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vdiv_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vdiv_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vdiv_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vdiv_vv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vdiv_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vdiv_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vdiv_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vdiv_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vdiv_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vdiv_vv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vdiv_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vdiv_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vdiv_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vdiv_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vdiv_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vdiv_vv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vdiv_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vdiv_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vdiv_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vdiv_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vdiv_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vdiv_vv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vdiv_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vdiv_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vdiv_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vdiv_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vdiv_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vdiv_vv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vdiv_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vdiv_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vdiv_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vdiv_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vdiv_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vdiv_vv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vdiv_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vdiv_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vdiv_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vdiv_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vdiv_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vdiv_vv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vdiv_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vdiv_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vdiv\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vdivu.c b/auto-generated/policy_funcs/gnu-api-tests/vdivu.c index 67de04be3..654fed2ed 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vdivu.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vdivu.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vdivu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vdivu_vv_u8mf8_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vdivu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vdivu_vv_u8mf8_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vdivu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_vx_u8mf8_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vdivu_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_vx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vdivu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vdivu_vv_u8mf4_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vdivu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vdivu_vv_u8mf4_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vdivu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_vx_u8mf4_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vdivu_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_vx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vdivu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vdivu_vv_u8mf2_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vdivu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vdivu_vv_u8mf2_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vdivu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_vx_u8mf2_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vdivu_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_vx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vdivu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vdivu_vv_u8m1_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vdivu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vdivu_vv_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vdivu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_vx_u8m1_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vdivu_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_vx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vdivu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vdivu_vv_u8m2_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vdivu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vdivu_vv_u8m2_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vdivu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_vx_u8m2_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vdivu_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_vx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vdivu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vdivu_vv_u8m4_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vdivu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vdivu_vv_u8m4_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vdivu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_vx_u8m4_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vdivu_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_vx_u8m4_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vdivu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vdivu_vv_u8m8_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vdivu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vdivu_vv_u8m8_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vdivu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_vx_u8m8_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vdivu_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_vx_u8m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vdivu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vdivu_vv_u16mf4_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vdivu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vdivu_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vdivu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_vx_u16mf4_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vdivu_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vdivu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vdivu_vv_u16mf2_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vdivu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vdivu_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vdivu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_vx_u16mf2_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vdivu_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vdivu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vdivu_vv_u16m1_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vdivu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vdivu_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vdivu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_vx_u16m1_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vdivu_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vdivu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vdivu_vv_u16m2_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vdivu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vdivu_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vdivu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_vx_u16m2_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vdivu_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vdivu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vdivu_vv_u16m4_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vdivu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vdivu_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vdivu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_vx_u16m4_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vdivu_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vdivu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vdivu_vv_u16m8_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vdivu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vdivu_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vdivu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_vx_u16m8_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vdivu_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vdivu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vdivu_vv_u32mf2_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vdivu_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vdivu_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vdivu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_vx_u32mf2_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vdivu_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vdivu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vdivu_vv_u32m1_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vdivu_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vdivu_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vdivu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_vx_u32m1_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vdivu_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vdivu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vdivu_vv_u32m2_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vdivu_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vdivu_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vdivu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_vx_u32m2_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vdivu_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vdivu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vdivu_vv_u32m4_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vdivu_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vdivu_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vdivu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_vx_u32m4_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vdivu_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vdivu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vdivu_vv_u32m8_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vdivu_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vdivu_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vdivu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_vx_u32m8_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vdivu_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vdivu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vdivu_vv_u64m1_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vdivu_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vdivu_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vdivu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu_vx_u64m1_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vdivu_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vdivu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vdivu_vv_u64m2_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vdivu_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vdivu_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vdivu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu_vx_u64m2_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vdivu_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vdivu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vdivu_vv_u64m4_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vdivu_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vdivu_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vdivu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu_vx_u64m4_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vdivu_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vdivu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vdivu_vv_u64m8_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vdivu_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vdivu_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vdivu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu_vx_u64m8_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vdivu_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu_vx_u64m8_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vdivu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vdivu_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vdivu_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vdivu_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vdivu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vdivu_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vdivu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vdivu_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vdivu_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vdivu_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vdivu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vdivu_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vdivu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vdivu_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vdivu_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vdivu_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vdivu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vdivu_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vdivu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vdivu_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vdivu_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vdivu_vv_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vdivu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vdivu_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_vx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vdivu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vdivu_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vdivu_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vdivu_vv_u8m2_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vdivu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vdivu_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_vx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vdivu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vdivu_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vdivu_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vdivu_vv_u8m4_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vdivu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vdivu_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_vx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vdivu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vdivu_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vdivu_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vdivu_vv_u8m8_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vdivu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vdivu_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_vx_u8m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vdivu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vdivu_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vdivu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vdivu_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vdivu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vdivu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vdivu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vdivu_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vdivu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vdivu_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vdivu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vdivu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vdivu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vdivu_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vdivu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vdivu_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vdivu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vdivu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vdivu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vdivu_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vdivu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vdivu_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vdivu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vdivu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vdivu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vdivu_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vdivu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vdivu_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vdivu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vdivu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vdivu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vdivu_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vdivu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vdivu_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vdivu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vdivu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vdivu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vdivu_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vdivu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vdivu_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vdivu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vdivu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vdivu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vdivu_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vdivu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vdivu_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vdivu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vdivu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vdivu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vdivu_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vdivu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vdivu_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vdivu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vdivu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vdivu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vdivu_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vdivu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vdivu_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vdivu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vdivu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vdivu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vdivu_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vdivu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vdivu_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vdivu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vdivu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vdivu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vdivu_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vdivu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vdivu_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vdivu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vdivu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vdivu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vdivu_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vdivu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vdivu_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vdivu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vdivu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vdivu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vdivu_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vdivu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vdivu_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vdivu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vdivu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vdivu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vdivu_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vdivu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vdivu_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vdivu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vdivu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vdivu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vdivu_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vdivu_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vdivu_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vdivu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vdivu_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vdivu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vdivu_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vdivu_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vdivu_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vdivu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vdivu_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vdivu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vdivu_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vdivu_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vdivu_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vdivu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vdivu_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vdivu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vdivu_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vdivu_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vdivu_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vdivu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vdivu_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vdivu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vdivu_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vdivu_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vdivu_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vdivu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vdivu_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vdivu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vdivu_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vdivu_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vdivu_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vdivu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vdivu_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vdivu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vdivu_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vdivu_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vdivu_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vdivu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vdivu_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vdivu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vdivu_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vdivu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vdivu_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vdivu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vdivu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vdivu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vdivu_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vdivu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vdivu_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vdivu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vdivu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vdivu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vdivu_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vdivu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vdivu_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vdivu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vdivu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vdivu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vdivu_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vdivu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vdivu_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vdivu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vdivu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vdivu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vdivu_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vdivu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vdivu_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vdivu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vdivu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vdivu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vdivu_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vdivu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vdivu_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vdivu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vdivu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vdivu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vdivu_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vdivu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vdivu_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vdivu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vdivu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vdivu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vdivu_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vdivu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vdivu_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vdivu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vdivu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vdivu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vdivu_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vdivu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vdivu_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vdivu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vdivu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vdivu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vdivu_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vdivu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vdivu_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vdivu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vdivu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vdivu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vdivu_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vdivu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vdivu_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vdivu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vdivu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vdivu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vdivu_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vdivu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vdivu_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vdivu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vdivu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vdivu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vdivu_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vdivu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vdivu_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vdivu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vdivu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vdivu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vdivu_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vdivu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vdivu_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vdivu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vdivu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vdivu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vdivu_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vdivu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vdivu_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vdivu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vdivu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vdivu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vdivu_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vdivu_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vdivu_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vdivu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vdivu_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vdivu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vdivu_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vdivu_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vdivu_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vdivu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vdivu_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vdivu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vdivu_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vdivu_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vdivu_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vdivu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vdivu_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vdivu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vdivu_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vdivu_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vdivu_vv_u8m1_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vdivu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vdivu_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_vx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vdivu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vdivu_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vdivu_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vdivu_vv_u8m2_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vdivu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vdivu_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_vx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vdivu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vdivu_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vdivu_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vdivu_vv_u8m4_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vdivu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vdivu_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_vx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vdivu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vdivu_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vdivu_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vdivu_vv_u8m8_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vdivu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vdivu_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_vx_u8m8_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vdivu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vdivu_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vdivu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vdivu_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vdivu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vdivu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vdivu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vdivu_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vdivu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vdivu_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vdivu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vdivu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vdivu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vdivu_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vdivu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vdivu_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vdivu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vdivu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vdivu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vdivu_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vdivu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vdivu_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vdivu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vdivu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vdivu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vdivu_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vdivu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vdivu_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vdivu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vdivu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vdivu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vdivu_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vdivu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vdivu_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vdivu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vdivu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vdivu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vdivu_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vdivu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vdivu_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vdivu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vdivu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vdivu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vdivu_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vdivu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vdivu_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vdivu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vdivu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vdivu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vdivu_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vdivu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vdivu_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vdivu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vdivu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vdivu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vdivu_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vdivu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vdivu_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vdivu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vdivu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vdivu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vdivu_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vdivu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vdivu_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vdivu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vdivu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vdivu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vdivu_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vdivu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vdivu_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vdivu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vdivu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vdivu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vdivu_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vdivu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vdivu_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vdivu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vdivu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vdivu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vdivu_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vdivu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vdivu_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vdivu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vdivu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vdivu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vdivu_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vdivu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vdivu_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vdivu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vdivu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vdivu\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfabs.c b/auto-generated/policy_funcs/gnu-api-tests/vfabs.c index 598e12595..a3dc70f1b 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfabs.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfabs.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfabs_v_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfabs_v_f16mf4_tu(maskedoff, op1, vl); +vfloat16mf4_t test_vfabs_v_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfabs_v_f16mf4_tu(vd, vs2, vl); } -vfloat16mf2_t test_vfabs_v_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfabs_v_f16mf2_tu(maskedoff, op1, vl); +vfloat16mf2_t test_vfabs_v_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfabs_v_f16mf2_tu(vd, vs2, vl); } -vfloat16m1_t test_vfabs_v_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfabs_v_f16m1_tu(maskedoff, op1, vl); +vfloat16m1_t test_vfabs_v_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfabs_v_f16m1_tu(vd, vs2, vl); } -vfloat16m2_t test_vfabs_v_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfabs_v_f16m2_tu(maskedoff, op1, vl); +vfloat16m2_t test_vfabs_v_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfabs_v_f16m2_tu(vd, vs2, vl); } -vfloat16m4_t test_vfabs_v_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfabs_v_f16m4_tu(maskedoff, op1, vl); +vfloat16m4_t test_vfabs_v_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfabs_v_f16m4_tu(vd, vs2, vl); } -vfloat16m8_t test_vfabs_v_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfabs_v_f16m8_tu(maskedoff, op1, vl); +vfloat16m8_t test_vfabs_v_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfabs_v_f16m8_tu(vd, vs2, vl); } -vfloat32mf2_t test_vfabs_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfabs_v_f32mf2_tu(maskedoff, op1, vl); +vfloat32mf2_t test_vfabs_v_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfabs_v_f32mf2_tu(vd, vs2, vl); } -vfloat32m1_t test_vfabs_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfabs_v_f32m1_tu(maskedoff, op1, vl); +vfloat32m1_t test_vfabs_v_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfabs_v_f32m1_tu(vd, vs2, vl); } -vfloat32m2_t test_vfabs_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfabs_v_f32m2_tu(maskedoff, op1, vl); +vfloat32m2_t test_vfabs_v_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfabs_v_f32m2_tu(vd, vs2, vl); } -vfloat32m4_t test_vfabs_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfabs_v_f32m4_tu(maskedoff, op1, vl); +vfloat32m4_t test_vfabs_v_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfabs_v_f32m4_tu(vd, vs2, vl); } -vfloat32m8_t test_vfabs_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfabs_v_f32m8_tu(maskedoff, op1, vl); +vfloat32m8_t test_vfabs_v_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfabs_v_f32m8_tu(vd, vs2, vl); } -vfloat64m1_t test_vfabs_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfabs_v_f64m1_tu(maskedoff, op1, vl); +vfloat64m1_t test_vfabs_v_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfabs_v_f64m1_tu(vd, vs2, vl); } -vfloat64m2_t test_vfabs_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfabs_v_f64m2_tu(maskedoff, op1, vl); +vfloat64m2_t test_vfabs_v_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfabs_v_f64m2_tu(vd, vs2, vl); } -vfloat64m4_t test_vfabs_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfabs_v_f64m4_tu(maskedoff, op1, vl); +vfloat64m4_t test_vfabs_v_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfabs_v_f64m4_tu(vd, vs2, vl); } -vfloat64m8_t test_vfabs_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfabs_v_f64m8_tu(maskedoff, op1, vl); +vfloat64m8_t test_vfabs_v_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfabs_v_f64m8_tu(vd, vs2, vl); } -vfloat16mf4_t test_vfabs_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfabs_v_f16mf4_tum(mask, maskedoff, op1, vl); +vfloat16mf4_t test_vfabs_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfabs_v_f16mf4_tum(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfabs_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfabs_v_f16mf2_tum(mask, maskedoff, op1, vl); +vfloat16mf2_t test_vfabs_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfabs_v_f16mf2_tum(vm, vd, vs2, vl); } -vfloat16m1_t test_vfabs_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfabs_v_f16m1_tum(mask, maskedoff, op1, vl); +vfloat16m1_t test_vfabs_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfabs_v_f16m1_tum(vm, vd, vs2, vl); } -vfloat16m2_t test_vfabs_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfabs_v_f16m2_tum(mask, maskedoff, op1, vl); +vfloat16m2_t test_vfabs_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfabs_v_f16m2_tum(vm, vd, vs2, vl); } -vfloat16m4_t test_vfabs_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfabs_v_f16m4_tum(mask, maskedoff, op1, vl); +vfloat16m4_t test_vfabs_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfabs_v_f16m4_tum(vm, vd, vs2, vl); } -vfloat16m8_t test_vfabs_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfabs_v_f16m8_tum(mask, maskedoff, op1, vl); +vfloat16m8_t test_vfabs_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfabs_v_f16m8_tum(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfabs_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfabs_v_f32mf2_tum(mask, maskedoff, op1, vl); +vfloat32mf2_t test_vfabs_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfabs_v_f32mf2_tum(vm, vd, vs2, vl); } -vfloat32m1_t test_vfabs_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfabs_v_f32m1_tum(mask, maskedoff, op1, vl); +vfloat32m1_t test_vfabs_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfabs_v_f32m1_tum(vm, vd, vs2, vl); } -vfloat32m2_t test_vfabs_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfabs_v_f32m2_tum(mask, maskedoff, op1, vl); +vfloat32m2_t test_vfabs_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfabs_v_f32m2_tum(vm, vd, vs2, vl); } -vfloat32m4_t test_vfabs_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfabs_v_f32m4_tum(mask, maskedoff, op1, vl); +vfloat32m4_t test_vfabs_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfabs_v_f32m4_tum(vm, vd, vs2, vl); } -vfloat32m8_t test_vfabs_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfabs_v_f32m8_tum(mask, maskedoff, op1, vl); +vfloat32m8_t test_vfabs_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfabs_v_f32m8_tum(vm, vd, vs2, vl); } -vfloat64m1_t test_vfabs_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfabs_v_f64m1_tum(mask, maskedoff, op1, vl); +vfloat64m1_t test_vfabs_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfabs_v_f64m1_tum(vm, vd, vs2, vl); } -vfloat64m2_t test_vfabs_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfabs_v_f64m2_tum(mask, maskedoff, op1, vl); +vfloat64m2_t test_vfabs_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfabs_v_f64m2_tum(vm, vd, vs2, vl); } -vfloat64m4_t test_vfabs_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfabs_v_f64m4_tum(mask, maskedoff, op1, vl); +vfloat64m4_t test_vfabs_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfabs_v_f64m4_tum(vm, vd, vs2, vl); } -vfloat64m8_t test_vfabs_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfabs_v_f64m8_tum(mask, maskedoff, op1, vl); +vfloat64m8_t test_vfabs_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfabs_v_f64m8_tum(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfabs_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfabs_v_f16mf4_tumu(mask, maskedoff, op1, vl); +vfloat16mf4_t test_vfabs_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfabs_v_f16mf4_tumu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfabs_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfabs_v_f16mf2_tumu(mask, maskedoff, op1, vl); +vfloat16mf2_t test_vfabs_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfabs_v_f16mf2_tumu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfabs_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfabs_v_f16m1_tumu(mask, maskedoff, op1, vl); +vfloat16m1_t test_vfabs_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfabs_v_f16m1_tumu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfabs_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfabs_v_f16m2_tumu(mask, maskedoff, op1, vl); +vfloat16m2_t test_vfabs_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfabs_v_f16m2_tumu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfabs_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfabs_v_f16m4_tumu(mask, maskedoff, op1, vl); +vfloat16m4_t test_vfabs_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfabs_v_f16m4_tumu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfabs_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfabs_v_f16m8_tumu(mask, maskedoff, op1, vl); +vfloat16m8_t test_vfabs_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfabs_v_f16m8_tumu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfabs_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfabs_v_f32mf2_tumu(mask, maskedoff, op1, vl); +vfloat32mf2_t test_vfabs_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfabs_v_f32mf2_tumu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfabs_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfabs_v_f32m1_tumu(mask, maskedoff, op1, vl); +vfloat32m1_t test_vfabs_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfabs_v_f32m1_tumu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfabs_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfabs_v_f32m2_tumu(mask, maskedoff, op1, vl); +vfloat32m2_t test_vfabs_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfabs_v_f32m2_tumu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfabs_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfabs_v_f32m4_tumu(mask, maskedoff, op1, vl); +vfloat32m4_t test_vfabs_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfabs_v_f32m4_tumu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfabs_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfabs_v_f32m8_tumu(mask, maskedoff, op1, vl); +vfloat32m8_t test_vfabs_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfabs_v_f32m8_tumu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfabs_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfabs_v_f64m1_tumu(mask, maskedoff, op1, vl); +vfloat64m1_t test_vfabs_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfabs_v_f64m1_tumu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfabs_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfabs_v_f64m2_tumu(mask, maskedoff, op1, vl); +vfloat64m2_t test_vfabs_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfabs_v_f64m2_tumu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfabs_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfabs_v_f64m4_tumu(mask, maskedoff, op1, vl); +vfloat64m4_t test_vfabs_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfabs_v_f64m4_tumu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfabs_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfabs_v_f64m8_tumu(mask, maskedoff, op1, vl); +vfloat64m8_t test_vfabs_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfabs_v_f64m8_tumu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfabs_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfabs_v_f16mf4_mu(mask, maskedoff, op1, vl); +vfloat16mf4_t test_vfabs_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfabs_v_f16mf4_mu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfabs_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfabs_v_f16mf2_mu(mask, maskedoff, op1, vl); +vfloat16mf2_t test_vfabs_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfabs_v_f16mf2_mu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfabs_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfabs_v_f16m1_mu(mask, maskedoff, op1, vl); +vfloat16m1_t test_vfabs_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfabs_v_f16m1_mu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfabs_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfabs_v_f16m2_mu(mask, maskedoff, op1, vl); +vfloat16m2_t test_vfabs_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfabs_v_f16m2_mu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfabs_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfabs_v_f16m4_mu(mask, maskedoff, op1, vl); +vfloat16m4_t test_vfabs_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfabs_v_f16m4_mu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfabs_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfabs_v_f16m8_mu(mask, maskedoff, op1, vl); +vfloat16m8_t test_vfabs_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfabs_v_f16m8_mu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfabs_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfabs_v_f32mf2_mu(mask, maskedoff, op1, vl); +vfloat32mf2_t test_vfabs_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfabs_v_f32mf2_mu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfabs_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfabs_v_f32m1_mu(mask, maskedoff, op1, vl); +vfloat32m1_t test_vfabs_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfabs_v_f32m1_mu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfabs_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfabs_v_f32m2_mu(mask, maskedoff, op1, vl); +vfloat32m2_t test_vfabs_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfabs_v_f32m2_mu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfabs_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfabs_v_f32m4_mu(mask, maskedoff, op1, vl); +vfloat32m4_t test_vfabs_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfabs_v_f32m4_mu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfabs_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfabs_v_f32m8_mu(mask, maskedoff, op1, vl); +vfloat32m8_t test_vfabs_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfabs_v_f32m8_mu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfabs_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfabs_v_f64m1_mu(mask, maskedoff, op1, vl); +vfloat64m1_t test_vfabs_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfabs_v_f64m1_mu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfabs_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfabs_v_f64m2_mu(mask, maskedoff, op1, vl); +vfloat64m2_t test_vfabs_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfabs_v_f64m2_mu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfabs_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfabs_v_f64m4_mu(mask, maskedoff, op1, vl); +vfloat64m4_t test_vfabs_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfabs_v_f64m4_mu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfabs_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfabs_v_f64m8_mu(mask, maskedoff, op1, vl); +vfloat64m8_t test_vfabs_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfabs_v_f64m8_mu(vm, vd, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfabs\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfadd.c b/auto-generated/policy_funcs/gnu-api-tests/vfadd.c index 4bc36fb52..88ed86ab1 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfadd.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfadd.c @@ -6,964 +6,964 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfadd_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfadd_vv_f16mf4_tu(maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfadd_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16mf4_tu(vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfadd_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16mf4_tu(maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfadd_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16mf4_tu(vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfadd_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfadd_vv_f16mf2_tu(maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfadd_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16mf2_tu(vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfadd_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16mf2_tu(maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfadd_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16mf2_tu(vd, vs2, rs1, vl); } -vfloat16m1_t test_vfadd_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m1_tu(maskedoff, op1, op2, vl); +vfloat16m1_t test_vfadd_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfadd_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m1_tu(maskedoff, op1, op2, vl); +vfloat16m1_t test_vfadd_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m1_tu(vd, vs2, rs1, vl); } -vfloat16m2_t test_vfadd_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m2_tu(maskedoff, op1, op2, vl); +vfloat16m2_t test_vfadd_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m2_tu(vd, vs2, vs1, vl); } -vfloat16m2_t test_vfadd_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m2_tu(maskedoff, op1, op2, vl); +vfloat16m2_t test_vfadd_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m2_tu(vd, vs2, rs1, vl); } -vfloat16m4_t test_vfadd_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m4_tu(maskedoff, op1, op2, vl); +vfloat16m4_t test_vfadd_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m4_tu(vd, vs2, vs1, vl); } -vfloat16m4_t test_vfadd_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m4_tu(maskedoff, op1, op2, vl); +vfloat16m4_t test_vfadd_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m4_tu(vd, vs2, rs1, vl); } -vfloat16m8_t test_vfadd_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m8_tu(maskedoff, op1, op2, vl); +vfloat16m8_t test_vfadd_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m8_tu(vd, vs2, vs1, vl); } -vfloat16m8_t test_vfadd_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m8_tu(maskedoff, op1, op2, vl); +vfloat16m8_t test_vfadd_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m8_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfadd_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfadd_vv_f32mf2_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfadd_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32mf2_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfadd_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32mf2_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfadd_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32mf2_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfadd_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m1_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfadd_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfadd_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m1_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfadd_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m1_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfadd_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m2_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfadd_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m2_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vfadd_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m2_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfadd_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m2_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfadd_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m4_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfadd_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m4_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vfadd_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m4_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfadd_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m4_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfadd_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m8_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfadd_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m8_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vfadd_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m8_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfadd_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m8_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfadd_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m1_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfadd_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfadd_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m1_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfadd_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m1_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfadd_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m2_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfadd_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m2_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vfadd_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m2_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfadd_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m2_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfadd_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m4_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfadd_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m4_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vfadd_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m4_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfadd_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m4_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfadd_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m8_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfadd_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m8_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vfadd_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m8_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfadd_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m8_tu(vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfadd_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfadd_vv_f16mf4_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfadd_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16mf4_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfadd_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16mf4_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfadd_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16mf4_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfadd_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfadd_vv_f16mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfadd_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfadd_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfadd_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfadd_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m1_tum(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfadd_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfadd_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m1_tum(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfadd_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m1_tum(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfadd_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m2_tum(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfadd_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m2_tum(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfadd_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m2_tum(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfadd_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfadd_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m4_tum(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfadd_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m4_tum(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfadd_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m4_tum(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfadd_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m4_tum(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfadd_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m8_tum(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfadd_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m8_tum(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfadd_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m8_tum(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfadd_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m8_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfadd_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfadd_vv_f32mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfadd_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfadd_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfadd_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfadd_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m1_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfadd_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfadd_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m1_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfadd_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m1_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfadd_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m2_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfadd_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m2_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfadd_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m2_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfadd_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfadd_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m4_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfadd_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m4_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfadd_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m4_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfadd_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m4_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfadd_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m8_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfadd_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m8_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfadd_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m8_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfadd_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m8_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfadd_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m1_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfadd_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfadd_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m1_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfadd_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m1_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfadd_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m2_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfadd_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m2_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfadd_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m2_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfadd_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m2_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfadd_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m4_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfadd_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m4_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfadd_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m4_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfadd_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m4_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfadd_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m8_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfadd_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m8_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfadd_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m8_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfadd_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m8_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfadd_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfadd_vv_f16mf4_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfadd_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16mf4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfadd_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16mf4_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfadd_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16mf4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfadd_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfadd_vv_f16mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfadd_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfadd_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfadd_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfadd_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfadd_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfadd_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfadd_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfadd_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfadd_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfadd_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfadd_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfadd_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfadd_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfadd_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfadd_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfadd_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfadd_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfadd_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfadd_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfadd_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfadd_vv_f32mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfadd_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfadd_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfadd_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfadd_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfadd_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfadd_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfadd_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfadd_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfadd_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfadd_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfadd_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfadd_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfadd_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfadd_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfadd_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfadd_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfadd_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfadd_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfadd_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfadd_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfadd_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfadd_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfadd_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfadd_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfadd_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfadd_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfadd_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfadd_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfadd_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfadd_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfadd_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfadd_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfadd_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfadd_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfadd_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfadd_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfadd_vv_f16mf4_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfadd_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16mf4_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfadd_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16mf4_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfadd_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16mf4_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfadd_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfadd_vv_f16mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfadd_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfadd_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfadd_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfadd_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m1_mu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfadd_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m1_mu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfadd_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m1_mu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfadd_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m1_mu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfadd_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m2_mu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfadd_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m2_mu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfadd_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m2_mu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfadd_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfadd_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m4_mu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfadd_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m4_mu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfadd_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m4_mu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfadd_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m4_mu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfadd_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m8_mu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfadd_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m8_mu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfadd_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m8_mu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfadd_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m8_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfadd_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfadd_vv_f32mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfadd_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfadd_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfadd_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfadd_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m1_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfadd_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m1_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfadd_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m1_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfadd_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m1_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfadd_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m2_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfadd_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m2_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfadd_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m2_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfadd_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfadd_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m4_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfadd_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m4_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfadd_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m4_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfadd_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m4_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfadd_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m8_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfadd_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m8_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfadd_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m8_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfadd_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m8_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfadd_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m1_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfadd_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m1_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfadd_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m1_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfadd_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m1_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfadd_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m2_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfadd_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m2_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfadd_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m2_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfadd_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m2_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfadd_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m4_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfadd_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m4_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfadd_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m4_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfadd_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m4_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfadd_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m8_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfadd_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m8_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfadd_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m8_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfadd_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m8_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfadd_vv_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfadd_vv_f16mf4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfadd_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16mf4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfadd_vf_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16mf4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfadd_vf_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16mf4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfadd_vv_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfadd_vv_f16mf2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfadd_vv_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfadd_vf_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16mf2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfadd_vf_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfadd_vv_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfadd_vv_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfadd_vf_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfadd_vf_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfadd_vv_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfadd_vv_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfadd_vf_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfadd_vf_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfadd_vv_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfadd_vv_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfadd_vf_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfadd_vf_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfadd_vv_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfadd_vv_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfadd_vf_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfadd_vf_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfadd_vv_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfadd_vv_f32mf2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfadd_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfadd_vf_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32mf2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfadd_vf_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfadd_vv_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfadd_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfadd_vf_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfadd_vf_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfadd_vv_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfadd_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfadd_vf_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfadd_vf_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfadd_vv_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfadd_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfadd_vf_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfadd_vf_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfadd_vv_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfadd_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfadd_vf_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfadd_vf_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfadd_vv_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfadd_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfadd_vf_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfadd_vf_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfadd_vv_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfadd_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfadd_vf_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfadd_vf_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfadd_vv_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfadd_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfadd_vf_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfadd_vf_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfadd_vv_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfadd_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfadd_vf_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfadd_vf_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfadd_vv_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfadd_vv_f16mf4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfadd_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16mf4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfadd_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16mf4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfadd_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16mf4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfadd_vv_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfadd_vv_f16mf2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfadd_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16mf2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfadd_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16mf2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfadd_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16mf2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfadd_vv_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfadd_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfadd_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfadd_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfadd_vv_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfadd_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfadd_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfadd_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfadd_vv_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfadd_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfadd_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfadd_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfadd_vv_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfadd_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m8_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfadd_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfadd_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfadd_vv_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfadd_vv_f32mf2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfadd_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32mf2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfadd_vf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32mf2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfadd_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32mf2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfadd_vv_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfadd_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfadd_vf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfadd_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfadd_vv_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfadd_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfadd_vf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfadd_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfadd_vv_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfadd_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfadd_vf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfadd_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfadd_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfadd_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m8_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfadd_vf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfadd_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfadd_vv_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfadd_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfadd_vf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfadd_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfadd_vv_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfadd_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfadd_vf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfadd_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfadd_vv_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfadd_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfadd_vf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfadd_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfadd_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfadd_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m8_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfadd_vf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfadd_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfadd_vv_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfadd_vv_f16mf4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfadd_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16mf4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfadd_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16mf4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfadd_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16mf4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfadd_vv_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfadd_vv_f16mf2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfadd_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16mf2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfadd_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16mf2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfadd_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16mf2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfadd_vv_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfadd_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m1_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfadd_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfadd_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfadd_vv_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfadd_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfadd_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfadd_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfadd_vv_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfadd_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfadd_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfadd_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfadd_vv_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfadd_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m8_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfadd_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfadd_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfadd_vv_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfadd_vv_f32mf2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfadd_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32mf2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfadd_vf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32mf2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfadd_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32mf2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfadd_vv_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfadd_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m1_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfadd_vf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfadd_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfadd_vv_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfadd_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfadd_vf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfadd_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfadd_vv_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfadd_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfadd_vf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfadd_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfadd_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfadd_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m8_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfadd_vf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfadd_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfadd_vv_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfadd_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m1_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfadd_vf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfadd_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfadd_vv_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfadd_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfadd_vf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfadd_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfadd_vv_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfadd_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfadd_vf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfadd_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfadd_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfadd_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m8_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfadd_vf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfadd_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfadd_vv_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfadd_vv_f16mf4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfadd_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16mf4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfadd_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16mf4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfadd_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16mf4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfadd_vv_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfadd_vv_f16mf2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfadd_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16mf2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfadd_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16mf2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfadd_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16mf2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfadd_vv_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfadd_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m1_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfadd_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfadd_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfadd_vv_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfadd_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfadd_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfadd_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfadd_vv_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfadd_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfadd_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfadd_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfadd_vv_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfadd_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfadd_vv_f16m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfadd_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_vf_f16m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfadd_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_vf_f16m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfadd_vv_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfadd_vv_f32mf2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfadd_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32mf2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfadd_vf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32mf2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfadd_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32mf2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfadd_vv_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfadd_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m1_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfadd_vf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfadd_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfadd_vv_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfadd_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfadd_vf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfadd_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfadd_vv_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfadd_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfadd_vf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfadd_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfadd_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfadd_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfadd_vv_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfadd_vf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_vf_f32m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfadd_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_vf_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfadd_vv_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfadd_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m1_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfadd_vf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfadd_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfadd_vv_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfadd_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfadd_vf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfadd_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfadd_vv_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfadd_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfadd_vf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfadd_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfadd_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfadd_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfadd_vv_f64m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfadd_vf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_vf_f64m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfadd_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_vf_f64m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfadd\.[ivxfswum.]+\s+} 240 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfclass.c b/auto-generated/policy_funcs/gnu-api-tests/vfclass.c index b9a97279b..7a78a2eab 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfclass.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfclass.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint16mf4_t test_vfclass_v_u16mf4_tu(vuint16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfclass_v_u16mf4_tu(maskedoff, op1, vl); +vuint16mf4_t test_vfclass_v_u16mf4_tu(vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfclass_v_u16mf4_tu(vd, vs2, vl); } -vuint16mf2_t test_vfclass_v_u16mf2_tu(vuint16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfclass_v_u16mf2_tu(maskedoff, op1, vl); +vuint16mf2_t test_vfclass_v_u16mf2_tu(vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfclass_v_u16mf2_tu(vd, vs2, vl); } -vuint16m1_t test_vfclass_v_u16m1_tu(vuint16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfclass_v_u16m1_tu(maskedoff, op1, vl); +vuint16m1_t test_vfclass_v_u16m1_tu(vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfclass_v_u16m1_tu(vd, vs2, vl); } -vuint16m2_t test_vfclass_v_u16m2_tu(vuint16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfclass_v_u16m2_tu(maskedoff, op1, vl); +vuint16m2_t test_vfclass_v_u16m2_tu(vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfclass_v_u16m2_tu(vd, vs2, vl); } -vuint16m4_t test_vfclass_v_u16m4_tu(vuint16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfclass_v_u16m4_tu(maskedoff, op1, vl); +vuint16m4_t test_vfclass_v_u16m4_tu(vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfclass_v_u16m4_tu(vd, vs2, vl); } -vuint16m8_t test_vfclass_v_u16m8_tu(vuint16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfclass_v_u16m8_tu(maskedoff, op1, vl); +vuint16m8_t test_vfclass_v_u16m8_tu(vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfclass_v_u16m8_tu(vd, vs2, vl); } -vuint32mf2_t test_vfclass_v_u32mf2_tu(vuint32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfclass_v_u32mf2_tu(maskedoff, op1, vl); +vuint32mf2_t test_vfclass_v_u32mf2_tu(vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfclass_v_u32mf2_tu(vd, vs2, vl); } -vuint32m1_t test_vfclass_v_u32m1_tu(vuint32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfclass_v_u32m1_tu(maskedoff, op1, vl); +vuint32m1_t test_vfclass_v_u32m1_tu(vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfclass_v_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vfclass_v_u32m2_tu(vuint32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfclass_v_u32m2_tu(maskedoff, op1, vl); +vuint32m2_t test_vfclass_v_u32m2_tu(vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfclass_v_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vfclass_v_u32m4_tu(vuint32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfclass_v_u32m4_tu(maskedoff, op1, vl); +vuint32m4_t test_vfclass_v_u32m4_tu(vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfclass_v_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vfclass_v_u32m8_tu(vuint32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfclass_v_u32m8_tu(maskedoff, op1, vl); +vuint32m8_t test_vfclass_v_u32m8_tu(vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfclass_v_u32m8_tu(vd, vs2, vl); } -vuint64m1_t test_vfclass_v_u64m1_tu(vuint64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfclass_v_u64m1_tu(maskedoff, op1, vl); +vuint64m1_t test_vfclass_v_u64m1_tu(vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfclass_v_u64m1_tu(vd, vs2, vl); } -vuint64m2_t test_vfclass_v_u64m2_tu(vuint64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfclass_v_u64m2_tu(maskedoff, op1, vl); +vuint64m2_t test_vfclass_v_u64m2_tu(vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfclass_v_u64m2_tu(vd, vs2, vl); } -vuint64m4_t test_vfclass_v_u64m4_tu(vuint64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfclass_v_u64m4_tu(maskedoff, op1, vl); +vuint64m4_t test_vfclass_v_u64m4_tu(vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfclass_v_u64m4_tu(vd, vs2, vl); } -vuint64m8_t test_vfclass_v_u64m8_tu(vuint64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfclass_v_u64m8_tu(maskedoff, op1, vl); +vuint64m8_t test_vfclass_v_u64m8_tu(vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfclass_v_u64m8_tu(vd, vs2, vl); } -vuint16mf4_t test_vfclass_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfclass_v_u16mf4_tum(mask, maskedoff, op1, vl); +vuint16mf4_t test_vfclass_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfclass_v_u16mf4_tum(vm, vd, vs2, vl); } -vuint16mf2_t test_vfclass_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfclass_v_u16mf2_tum(mask, maskedoff, op1, vl); +vuint16mf2_t test_vfclass_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfclass_v_u16mf2_tum(vm, vd, vs2, vl); } -vuint16m1_t test_vfclass_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfclass_v_u16m1_tum(mask, maskedoff, op1, vl); +vuint16m1_t test_vfclass_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfclass_v_u16m1_tum(vm, vd, vs2, vl); } -vuint16m2_t test_vfclass_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfclass_v_u16m2_tum(mask, maskedoff, op1, vl); +vuint16m2_t test_vfclass_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfclass_v_u16m2_tum(vm, vd, vs2, vl); } -vuint16m4_t test_vfclass_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfclass_v_u16m4_tum(mask, maskedoff, op1, vl); +vuint16m4_t test_vfclass_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfclass_v_u16m4_tum(vm, vd, vs2, vl); } -vuint16m8_t test_vfclass_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfclass_v_u16m8_tum(mask, maskedoff, op1, vl); +vuint16m8_t test_vfclass_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfclass_v_u16m8_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_vfclass_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfclass_v_u32mf2_tum(mask, maskedoff, op1, vl); +vuint32mf2_t test_vfclass_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfclass_v_u32mf2_tum(vm, vd, vs2, vl); } -vuint32m1_t test_vfclass_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfclass_v_u32m1_tum(mask, maskedoff, op1, vl); +vuint32m1_t test_vfclass_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfclass_v_u32m1_tum(vm, vd, vs2, vl); } -vuint32m2_t test_vfclass_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfclass_v_u32m2_tum(mask, maskedoff, op1, vl); +vuint32m2_t test_vfclass_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfclass_v_u32m2_tum(vm, vd, vs2, vl); } -vuint32m4_t test_vfclass_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfclass_v_u32m4_tum(mask, maskedoff, op1, vl); +vuint32m4_t test_vfclass_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfclass_v_u32m4_tum(vm, vd, vs2, vl); } -vuint32m8_t test_vfclass_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfclass_v_u32m8_tum(mask, maskedoff, op1, vl); +vuint32m8_t test_vfclass_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfclass_v_u32m8_tum(vm, vd, vs2, vl); } -vuint64m1_t test_vfclass_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfclass_v_u64m1_tum(mask, maskedoff, op1, vl); +vuint64m1_t test_vfclass_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfclass_v_u64m1_tum(vm, vd, vs2, vl); } -vuint64m2_t test_vfclass_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfclass_v_u64m2_tum(mask, maskedoff, op1, vl); +vuint64m2_t test_vfclass_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfclass_v_u64m2_tum(vm, vd, vs2, vl); } -vuint64m4_t test_vfclass_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfclass_v_u64m4_tum(mask, maskedoff, op1, vl); +vuint64m4_t test_vfclass_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfclass_v_u64m4_tum(vm, vd, vs2, vl); } -vuint64m8_t test_vfclass_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfclass_v_u64m8_tum(mask, maskedoff, op1, vl); +vuint64m8_t test_vfclass_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfclass_v_u64m8_tum(vm, vd, vs2, vl); } -vuint16mf4_t test_vfclass_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfclass_v_u16mf4_tumu(mask, maskedoff, op1, vl); +vuint16mf4_t test_vfclass_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfclass_v_u16mf4_tumu(vm, vd, vs2, vl); } -vuint16mf2_t test_vfclass_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfclass_v_u16mf2_tumu(mask, maskedoff, op1, vl); +vuint16mf2_t test_vfclass_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfclass_v_u16mf2_tumu(vm, vd, vs2, vl); } -vuint16m1_t test_vfclass_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfclass_v_u16m1_tumu(mask, maskedoff, op1, vl); +vuint16m1_t test_vfclass_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfclass_v_u16m1_tumu(vm, vd, vs2, vl); } -vuint16m2_t test_vfclass_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfclass_v_u16m2_tumu(mask, maskedoff, op1, vl); +vuint16m2_t test_vfclass_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfclass_v_u16m2_tumu(vm, vd, vs2, vl); } -vuint16m4_t test_vfclass_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfclass_v_u16m4_tumu(mask, maskedoff, op1, vl); +vuint16m4_t test_vfclass_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfclass_v_u16m4_tumu(vm, vd, vs2, vl); } -vuint16m8_t test_vfclass_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfclass_v_u16m8_tumu(mask, maskedoff, op1, vl); +vuint16m8_t test_vfclass_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfclass_v_u16m8_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_vfclass_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfclass_v_u32mf2_tumu(mask, maskedoff, op1, vl); +vuint32mf2_t test_vfclass_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfclass_v_u32mf2_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_vfclass_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfclass_v_u32m1_tumu(mask, maskedoff, op1, vl); +vuint32m1_t test_vfclass_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfclass_v_u32m1_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_vfclass_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfclass_v_u32m2_tumu(mask, maskedoff, op1, vl); +vuint32m2_t test_vfclass_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfclass_v_u32m2_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_vfclass_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfclass_v_u32m4_tumu(mask, maskedoff, op1, vl); +vuint32m4_t test_vfclass_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfclass_v_u32m4_tumu(vm, vd, vs2, vl); } -vuint32m8_t test_vfclass_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfclass_v_u32m8_tumu(mask, maskedoff, op1, vl); +vuint32m8_t test_vfclass_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfclass_v_u32m8_tumu(vm, vd, vs2, vl); } -vuint64m1_t test_vfclass_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfclass_v_u64m1_tumu(mask, maskedoff, op1, vl); +vuint64m1_t test_vfclass_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfclass_v_u64m1_tumu(vm, vd, vs2, vl); } -vuint64m2_t test_vfclass_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfclass_v_u64m2_tumu(mask, maskedoff, op1, vl); +vuint64m2_t test_vfclass_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfclass_v_u64m2_tumu(vm, vd, vs2, vl); } -vuint64m4_t test_vfclass_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfclass_v_u64m4_tumu(mask, maskedoff, op1, vl); +vuint64m4_t test_vfclass_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfclass_v_u64m4_tumu(vm, vd, vs2, vl); } -vuint64m8_t test_vfclass_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfclass_v_u64m8_tumu(mask, maskedoff, op1, vl); +vuint64m8_t test_vfclass_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfclass_v_u64m8_tumu(vm, vd, vs2, vl); } -vuint16mf4_t test_vfclass_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfclass_v_u16mf4_mu(mask, maskedoff, op1, vl); +vuint16mf4_t test_vfclass_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfclass_v_u16mf4_mu(vm, vd, vs2, vl); } -vuint16mf2_t test_vfclass_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfclass_v_u16mf2_mu(mask, maskedoff, op1, vl); +vuint16mf2_t test_vfclass_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfclass_v_u16mf2_mu(vm, vd, vs2, vl); } -vuint16m1_t test_vfclass_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfclass_v_u16m1_mu(mask, maskedoff, op1, vl); +vuint16m1_t test_vfclass_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfclass_v_u16m1_mu(vm, vd, vs2, vl); } -vuint16m2_t test_vfclass_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfclass_v_u16m2_mu(mask, maskedoff, op1, vl); +vuint16m2_t test_vfclass_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfclass_v_u16m2_mu(vm, vd, vs2, vl); } -vuint16m4_t test_vfclass_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfclass_v_u16m4_mu(mask, maskedoff, op1, vl); +vuint16m4_t test_vfclass_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfclass_v_u16m4_mu(vm, vd, vs2, vl); } -vuint16m8_t test_vfclass_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfclass_v_u16m8_mu(mask, maskedoff, op1, vl); +vuint16m8_t test_vfclass_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfclass_v_u16m8_mu(vm, vd, vs2, vl); } -vuint32mf2_t test_vfclass_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfclass_v_u32mf2_mu(mask, maskedoff, op1, vl); +vuint32mf2_t test_vfclass_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfclass_v_u32mf2_mu(vm, vd, vs2, vl); } -vuint32m1_t test_vfclass_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfclass_v_u32m1_mu(mask, maskedoff, op1, vl); +vuint32m1_t test_vfclass_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfclass_v_u32m1_mu(vm, vd, vs2, vl); } -vuint32m2_t test_vfclass_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfclass_v_u32m2_mu(mask, maskedoff, op1, vl); +vuint32m2_t test_vfclass_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfclass_v_u32m2_mu(vm, vd, vs2, vl); } -vuint32m4_t test_vfclass_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfclass_v_u32m4_mu(mask, maskedoff, op1, vl); +vuint32m4_t test_vfclass_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfclass_v_u32m4_mu(vm, vd, vs2, vl); } -vuint32m8_t test_vfclass_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfclass_v_u32m8_mu(mask, maskedoff, op1, vl); +vuint32m8_t test_vfclass_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfclass_v_u32m8_mu(vm, vd, vs2, vl); } -vuint64m1_t test_vfclass_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfclass_v_u64m1_mu(mask, maskedoff, op1, vl); +vuint64m1_t test_vfclass_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfclass_v_u64m1_mu(vm, vd, vs2, vl); } -vuint64m2_t test_vfclass_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfclass_v_u64m2_mu(mask, maskedoff, op1, vl); +vuint64m2_t test_vfclass_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfclass_v_u64m2_mu(vm, vd, vs2, vl); } -vuint64m4_t test_vfclass_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfclass_v_u64m4_mu(mask, maskedoff, op1, vl); +vuint64m4_t test_vfclass_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfclass_v_u64m4_mu(vm, vd, vs2, vl); } -vuint64m8_t test_vfclass_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfclass_v_u64m8_mu(mask, maskedoff, op1, vl); +vuint64m8_t test_vfclass_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfclass_v_u64m8_mu(vm, vd, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfclass\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfcvt.c b/auto-generated/policy_funcs/gnu-api-tests/vfcvt.c index 4eb8405a5..91a39b9fe 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfcvt.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfcvt.c @@ -6,1924 +6,1924 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint16mf4_t test_vfcvt_x_f_v_i16mf4_tu(vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16mf4_tu(maskedoff, src, vl); +vint16mf4_t test_vfcvt_x_f_v_i16mf4_tu(vint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16mf4_tu(vd, vs2, vl); } -vint16mf2_t test_vfcvt_x_f_v_i16mf2_tu(vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16mf2_tu(maskedoff, src, vl); +vint16mf2_t test_vfcvt_x_f_v_i16mf2_tu(vint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16mf2_tu(vd, vs2, vl); } -vint16m1_t test_vfcvt_x_f_v_i16m1_tu(vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m1_tu(maskedoff, src, vl); +vint16m1_t test_vfcvt_x_f_v_i16m1_tu(vint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m1_tu(vd, vs2, vl); } -vint16m2_t test_vfcvt_x_f_v_i16m2_tu(vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m2_tu(maskedoff, src, vl); +vint16m2_t test_vfcvt_x_f_v_i16m2_tu(vint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m2_tu(vd, vs2, vl); } -vint16m4_t test_vfcvt_x_f_v_i16m4_tu(vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m4_tu(maskedoff, src, vl); +vint16m4_t test_vfcvt_x_f_v_i16m4_tu(vint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m4_tu(vd, vs2, vl); } -vint16m8_t test_vfcvt_x_f_v_i16m8_tu(vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m8_tu(maskedoff, src, vl); +vint16m8_t test_vfcvt_x_f_v_i16m8_tu(vint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m8_tu(vd, vs2, vl); } -vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_tu(vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16mf4_tu(maskedoff, src, vl); +vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_tu(vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16mf4_tu(vd, vs2, vl); } -vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_tu(vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16mf2_tu(maskedoff, src, vl); +vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_tu(vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16mf2_tu(vd, vs2, vl); } -vuint16m1_t test_vfcvt_xu_f_v_u16m1_tu(vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m1_tu(maskedoff, src, vl); +vuint16m1_t test_vfcvt_xu_f_v_u16m1_tu(vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m1_tu(vd, vs2, vl); } -vuint16m2_t test_vfcvt_xu_f_v_u16m2_tu(vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m2_tu(maskedoff, src, vl); +vuint16m2_t test_vfcvt_xu_f_v_u16m2_tu(vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m2_tu(vd, vs2, vl); } -vuint16m4_t test_vfcvt_xu_f_v_u16m4_tu(vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m4_tu(maskedoff, src, vl); +vuint16m4_t test_vfcvt_xu_f_v_u16m4_tu(vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m4_tu(vd, vs2, vl); } -vuint16m8_t test_vfcvt_xu_f_v_u16m8_tu(vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m8_tu(maskedoff, src, vl); +vuint16m8_t test_vfcvt_xu_f_v_u16m8_tu(vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m8_tu(vd, vs2, vl); } -vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_tu(vfloat16mf4_t maskedoff, vint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16mf4_tu(maskedoff, src, vl); +vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_tu(vfloat16mf4_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16mf4_tu(vd, vs2, vl); } -vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_tu(vfloat16mf2_t maskedoff, vint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16mf2_tu(maskedoff, src, vl); +vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_tu(vfloat16mf2_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16mf2_tu(vd, vs2, vl); } -vfloat16m1_t test_vfcvt_f_x_v_f16m1_tu(vfloat16m1_t maskedoff, vint16m1_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m1_tu(maskedoff, src, vl); +vfloat16m1_t test_vfcvt_f_x_v_f16m1_tu(vfloat16m1_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m1_tu(vd, vs2, vl); } -vfloat16m2_t test_vfcvt_f_x_v_f16m2_tu(vfloat16m2_t maskedoff, vint16m2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m2_tu(maskedoff, src, vl); +vfloat16m2_t test_vfcvt_f_x_v_f16m2_tu(vfloat16m2_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m2_tu(vd, vs2, vl); } -vfloat16m4_t test_vfcvt_f_x_v_f16m4_tu(vfloat16m4_t maskedoff, vint16m4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m4_tu(maskedoff, src, vl); +vfloat16m4_t test_vfcvt_f_x_v_f16m4_tu(vfloat16m4_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m4_tu(vd, vs2, vl); } -vfloat16m8_t test_vfcvt_f_x_v_f16m8_tu(vfloat16m8_t maskedoff, vint16m8_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m8_tu(maskedoff, src, vl); +vfloat16m8_t test_vfcvt_f_x_v_f16m8_tu(vfloat16m8_t vd, vint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m8_tu(vd, vs2, vl); } -vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_tu(vfloat16mf4_t maskedoff, vuint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16mf4_tu(maskedoff, src, vl); +vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_tu(vfloat16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16mf4_tu(vd, vs2, vl); } -vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_tu(vfloat16mf2_t maskedoff, vuint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16mf2_tu(maskedoff, src, vl); +vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_tu(vfloat16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16mf2_tu(vd, vs2, vl); } -vfloat16m1_t test_vfcvt_f_xu_v_f16m1_tu(vfloat16m1_t maskedoff, vuint16m1_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m1_tu(maskedoff, src, vl); +vfloat16m1_t test_vfcvt_f_xu_v_f16m1_tu(vfloat16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m1_tu(vd, vs2, vl); } -vfloat16m2_t test_vfcvt_f_xu_v_f16m2_tu(vfloat16m2_t maskedoff, vuint16m2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m2_tu(maskedoff, src, vl); +vfloat16m2_t test_vfcvt_f_xu_v_f16m2_tu(vfloat16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m2_tu(vd, vs2, vl); } -vfloat16m4_t test_vfcvt_f_xu_v_f16m4_tu(vfloat16m4_t maskedoff, vuint16m4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m4_tu(maskedoff, src, vl); +vfloat16m4_t test_vfcvt_f_xu_v_f16m4_tu(vfloat16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m4_tu(vd, vs2, vl); } -vfloat16m8_t test_vfcvt_f_xu_v_f16m8_tu(vfloat16m8_t maskedoff, vuint16m8_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m8_tu(maskedoff, src, vl); +vfloat16m8_t test_vfcvt_f_xu_v_f16m8_tu(vfloat16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m8_tu(vd, vs2, vl); } -vint32mf2_t test_vfcvt_x_f_v_i32mf2_tu(vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32mf2_tu(maskedoff, src, vl); +vint32mf2_t test_vfcvt_x_f_v_i32mf2_tu(vint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32mf2_tu(vd, vs2, vl); } -vint32m1_t test_vfcvt_x_f_v_i32m1_tu(vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m1_tu(maskedoff, src, vl); +vint32m1_t test_vfcvt_x_f_v_i32m1_tu(vint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m1_tu(vd, vs2, vl); } -vint32m2_t test_vfcvt_x_f_v_i32m2_tu(vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m2_tu(maskedoff, src, vl); +vint32m2_t test_vfcvt_x_f_v_i32m2_tu(vint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m2_tu(vd, vs2, vl); } -vint32m4_t test_vfcvt_x_f_v_i32m4_tu(vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m4_tu(maskedoff, src, vl); +vint32m4_t test_vfcvt_x_f_v_i32m4_tu(vint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m4_tu(vd, vs2, vl); } -vint32m8_t test_vfcvt_x_f_v_i32m8_tu(vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m8_tu(maskedoff, src, vl); +vint32m8_t test_vfcvt_x_f_v_i32m8_tu(vint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m8_tu(vd, vs2, vl); } -vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_tu(vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32mf2_tu(maskedoff, src, vl); +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_tu(vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32mf2_tu(vd, vs2, vl); } -vuint32m1_t test_vfcvt_xu_f_v_u32m1_tu(vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m1_tu(maskedoff, src, vl); +vuint32m1_t test_vfcvt_xu_f_v_u32m1_tu(vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vfcvt_xu_f_v_u32m2_tu(vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m2_tu(maskedoff, src, vl); +vuint32m2_t test_vfcvt_xu_f_v_u32m2_tu(vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vfcvt_xu_f_v_u32m4_tu(vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m4_tu(maskedoff, src, vl); +vuint32m4_t test_vfcvt_xu_f_v_u32m4_tu(vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vfcvt_xu_f_v_u32m8_tu(vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m8_tu(maskedoff, src, vl); +vuint32m8_t test_vfcvt_xu_f_v_u32m8_tu(vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m8_tu(vd, vs2, vl); } -vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_tu(vfloat32mf2_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32mf2_tu(maskedoff, src, vl); +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_tu(vfloat32mf2_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32mf2_tu(vd, vs2, vl); } -vfloat32m1_t test_vfcvt_f_x_v_f32m1_tu(vfloat32m1_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m1_tu(maskedoff, src, vl); +vfloat32m1_t test_vfcvt_f_x_v_f32m1_tu(vfloat32m1_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m1_tu(vd, vs2, vl); } -vfloat32m2_t test_vfcvt_f_x_v_f32m2_tu(vfloat32m2_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m2_tu(maskedoff, src, vl); +vfloat32m2_t test_vfcvt_f_x_v_f32m2_tu(vfloat32m2_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m2_tu(vd, vs2, vl); } -vfloat32m4_t test_vfcvt_f_x_v_f32m4_tu(vfloat32m4_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m4_tu(maskedoff, src, vl); +vfloat32m4_t test_vfcvt_f_x_v_f32m4_tu(vfloat32m4_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m4_tu(vd, vs2, vl); } -vfloat32m8_t test_vfcvt_f_x_v_f32m8_tu(vfloat32m8_t maskedoff, vint32m8_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m8_tu(maskedoff, src, vl); +vfloat32m8_t test_vfcvt_f_x_v_f32m8_tu(vfloat32m8_t vd, vint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m8_tu(vd, vs2, vl); } -vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_tu(vfloat32mf2_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32mf2_tu(maskedoff, src, vl); +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_tu(vfloat32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32mf2_tu(vd, vs2, vl); } -vfloat32m1_t test_vfcvt_f_xu_v_f32m1_tu(vfloat32m1_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m1_tu(maskedoff, src, vl); +vfloat32m1_t test_vfcvt_f_xu_v_f32m1_tu(vfloat32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m1_tu(vd, vs2, vl); } -vfloat32m2_t test_vfcvt_f_xu_v_f32m2_tu(vfloat32m2_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m2_tu(maskedoff, src, vl); +vfloat32m2_t test_vfcvt_f_xu_v_f32m2_tu(vfloat32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m2_tu(vd, vs2, vl); } -vfloat32m4_t test_vfcvt_f_xu_v_f32m4_tu(vfloat32m4_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m4_tu(maskedoff, src, vl); +vfloat32m4_t test_vfcvt_f_xu_v_f32m4_tu(vfloat32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m4_tu(vd, vs2, vl); } -vfloat32m8_t test_vfcvt_f_xu_v_f32m8_tu(vfloat32m8_t maskedoff, vuint32m8_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m8_tu(maskedoff, src, vl); +vfloat32m8_t test_vfcvt_f_xu_v_f32m8_tu(vfloat32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m8_tu(vd, vs2, vl); } -vint64m1_t test_vfcvt_x_f_v_i64m1_tu(vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m1_tu(maskedoff, src, vl); +vint64m1_t test_vfcvt_x_f_v_i64m1_tu(vint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m1_tu(vd, vs2, vl); } -vint64m2_t test_vfcvt_x_f_v_i64m2_tu(vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m2_tu(maskedoff, src, vl); +vint64m2_t test_vfcvt_x_f_v_i64m2_tu(vint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m2_tu(vd, vs2, vl); } -vint64m4_t test_vfcvt_x_f_v_i64m4_tu(vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m4_tu(maskedoff, src, vl); +vint64m4_t test_vfcvt_x_f_v_i64m4_tu(vint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m4_tu(vd, vs2, vl); } -vint64m8_t test_vfcvt_x_f_v_i64m8_tu(vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m8_tu(maskedoff, src, vl); +vint64m8_t test_vfcvt_x_f_v_i64m8_tu(vint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m8_tu(vd, vs2, vl); } -vuint64m1_t test_vfcvt_xu_f_v_u64m1_tu(vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m1_tu(maskedoff, src, vl); +vuint64m1_t test_vfcvt_xu_f_v_u64m1_tu(vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m1_tu(vd, vs2, vl); } -vuint64m2_t test_vfcvt_xu_f_v_u64m2_tu(vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m2_tu(maskedoff, src, vl); +vuint64m2_t test_vfcvt_xu_f_v_u64m2_tu(vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m2_tu(vd, vs2, vl); } -vuint64m4_t test_vfcvt_xu_f_v_u64m4_tu(vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m4_tu(maskedoff, src, vl); +vuint64m4_t test_vfcvt_xu_f_v_u64m4_tu(vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m4_tu(vd, vs2, vl); } -vuint64m8_t test_vfcvt_xu_f_v_u64m8_tu(vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m8_tu(maskedoff, src, vl); +vuint64m8_t test_vfcvt_xu_f_v_u64m8_tu(vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m8_tu(vd, vs2, vl); } -vfloat64m1_t test_vfcvt_f_x_v_f64m1_tu(vfloat64m1_t maskedoff, vint64m1_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m1_tu(maskedoff, src, vl); +vfloat64m1_t test_vfcvt_f_x_v_f64m1_tu(vfloat64m1_t vd, vint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m1_tu(vd, vs2, vl); } -vfloat64m2_t test_vfcvt_f_x_v_f64m2_tu(vfloat64m2_t maskedoff, vint64m2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m2_tu(maskedoff, src, vl); +vfloat64m2_t test_vfcvt_f_x_v_f64m2_tu(vfloat64m2_t vd, vint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m2_tu(vd, vs2, vl); } -vfloat64m4_t test_vfcvt_f_x_v_f64m4_tu(vfloat64m4_t maskedoff, vint64m4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m4_tu(maskedoff, src, vl); +vfloat64m4_t test_vfcvt_f_x_v_f64m4_tu(vfloat64m4_t vd, vint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m4_tu(vd, vs2, vl); } -vfloat64m8_t test_vfcvt_f_x_v_f64m8_tu(vfloat64m8_t maskedoff, vint64m8_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m8_tu(maskedoff, src, vl); +vfloat64m8_t test_vfcvt_f_x_v_f64m8_tu(vfloat64m8_t vd, vint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m8_tu(vd, vs2, vl); } -vfloat64m1_t test_vfcvt_f_xu_v_f64m1_tu(vfloat64m1_t maskedoff, vuint64m1_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m1_tu(maskedoff, src, vl); +vfloat64m1_t test_vfcvt_f_xu_v_f64m1_tu(vfloat64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m1_tu(vd, vs2, vl); } -vfloat64m2_t test_vfcvt_f_xu_v_f64m2_tu(vfloat64m2_t maskedoff, vuint64m2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m2_tu(maskedoff, src, vl); +vfloat64m2_t test_vfcvt_f_xu_v_f64m2_tu(vfloat64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m2_tu(vd, vs2, vl); } -vfloat64m4_t test_vfcvt_f_xu_v_f64m4_tu(vfloat64m4_t maskedoff, vuint64m4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m4_tu(maskedoff, src, vl); +vfloat64m4_t test_vfcvt_f_xu_v_f64m4_tu(vfloat64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m4_tu(vd, vs2, vl); } -vfloat64m8_t test_vfcvt_f_xu_v_f64m8_tu(vfloat64m8_t maskedoff, vuint64m8_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m8_tu(maskedoff, src, vl); +vfloat64m8_t test_vfcvt_f_xu_v_f64m8_tu(vfloat64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m8_tu(vd, vs2, vl); } -vint16mf4_t test_vfcvt_x_f_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16mf4_tum(mask, maskedoff, src, vl); +vint16mf4_t test_vfcvt_x_f_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16mf4_tum(vm, vd, vs2, vl); } -vint16mf2_t test_vfcvt_x_f_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16mf2_tum(mask, maskedoff, src, vl); +vint16mf2_t test_vfcvt_x_f_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16mf2_tum(vm, vd, vs2, vl); } -vint16m1_t test_vfcvt_x_f_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m1_tum(mask, maskedoff, src, vl); +vint16m1_t test_vfcvt_x_f_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m1_tum(vm, vd, vs2, vl); } -vint16m2_t test_vfcvt_x_f_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m2_tum(mask, maskedoff, src, vl); +vint16m2_t test_vfcvt_x_f_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m2_tum(vm, vd, vs2, vl); } -vint16m4_t test_vfcvt_x_f_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m4_tum(mask, maskedoff, src, vl); +vint16m4_t test_vfcvt_x_f_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m4_tum(vm, vd, vs2, vl); } -vint16m8_t test_vfcvt_x_f_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m8_tum(mask, maskedoff, src, vl); +vint16m8_t test_vfcvt_x_f_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m8_tum(vm, vd, vs2, vl); } -vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16mf4_tum(mask, maskedoff, src, vl); +vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16mf4_tum(vm, vd, vs2, vl); } -vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16mf2_tum(mask, maskedoff, src, vl); +vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16mf2_tum(vm, vd, vs2, vl); } -vuint16m1_t test_vfcvt_xu_f_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m1_tum(mask, maskedoff, src, vl); +vuint16m1_t test_vfcvt_xu_f_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m1_tum(vm, vd, vs2, vl); } -vuint16m2_t test_vfcvt_xu_f_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m2_tum(mask, maskedoff, src, vl); +vuint16m2_t test_vfcvt_xu_f_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m2_tum(vm, vd, vs2, vl); } -vuint16m4_t test_vfcvt_xu_f_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m4_tum(mask, maskedoff, src, vl); +vuint16m4_t test_vfcvt_xu_f_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m4_tum(vm, vd, vs2, vl); } -vuint16m8_t test_vfcvt_xu_f_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m8_tum(mask, maskedoff, src, vl); +vuint16m8_t test_vfcvt_xu_f_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m8_tum(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16mf4_tum(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16mf4_tum(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16mf2_tum(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16mf2_tum(vm, vd, vs2, vl); } -vfloat16m1_t test_vfcvt_f_x_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vint16m1_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m1_tum(mask, maskedoff, src, vl); +vfloat16m1_t test_vfcvt_f_x_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m1_tum(vm, vd, vs2, vl); } -vfloat16m2_t test_vfcvt_f_x_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vint16m2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m2_tum(mask, maskedoff, src, vl); +vfloat16m2_t test_vfcvt_f_x_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m2_tum(vm, vd, vs2, vl); } -vfloat16m4_t test_vfcvt_f_x_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vint16m4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m4_tum(mask, maskedoff, src, vl); +vfloat16m4_t test_vfcvt_f_x_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m4_tum(vm, vd, vs2, vl); } -vfloat16m8_t test_vfcvt_f_x_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vint16m8_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m8_tum(mask, maskedoff, src, vl); +vfloat16m8_t test_vfcvt_f_x_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m8_tum(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vuint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16mf4_tum(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16mf4_tum(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vuint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16mf2_tum(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16mf2_tum(vm, vd, vs2, vl); } -vfloat16m1_t test_vfcvt_f_xu_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vuint16m1_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m1_tum(mask, maskedoff, src, vl); +vfloat16m1_t test_vfcvt_f_xu_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m1_tum(vm, vd, vs2, vl); } -vfloat16m2_t test_vfcvt_f_xu_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vuint16m2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m2_tum(mask, maskedoff, src, vl); +vfloat16m2_t test_vfcvt_f_xu_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m2_tum(vm, vd, vs2, vl); } -vfloat16m4_t test_vfcvt_f_xu_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vuint16m4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m4_tum(mask, maskedoff, src, vl); +vfloat16m4_t test_vfcvt_f_xu_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m4_tum(vm, vd, vs2, vl); } -vfloat16m8_t test_vfcvt_f_xu_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vuint16m8_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m8_tum(mask, maskedoff, src, vl); +vfloat16m8_t test_vfcvt_f_xu_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m8_tum(vm, vd, vs2, vl); } -vint32mf2_t test_vfcvt_x_f_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32mf2_tum(mask, maskedoff, src, vl); +vint32mf2_t test_vfcvt_x_f_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32mf2_tum(vm, vd, vs2, vl); } -vint32m1_t test_vfcvt_x_f_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m1_tum(mask, maskedoff, src, vl); +vint32m1_t test_vfcvt_x_f_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m1_tum(vm, vd, vs2, vl); } -vint32m2_t test_vfcvt_x_f_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m2_tum(mask, maskedoff, src, vl); +vint32m2_t test_vfcvt_x_f_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m2_tum(vm, vd, vs2, vl); } -vint32m4_t test_vfcvt_x_f_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m4_tum(mask, maskedoff, src, vl); +vint32m4_t test_vfcvt_x_f_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m4_tum(vm, vd, vs2, vl); } -vint32m8_t test_vfcvt_x_f_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m8_tum(mask, maskedoff, src, vl); +vint32m8_t test_vfcvt_x_f_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m8_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32mf2_tum(mask, maskedoff, src, vl); +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32mf2_tum(vm, vd, vs2, vl); } -vuint32m1_t test_vfcvt_xu_f_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m1_tum(mask, maskedoff, src, vl); +vuint32m1_t test_vfcvt_xu_f_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m1_tum(vm, vd, vs2, vl); } -vuint32m2_t test_vfcvt_xu_f_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m2_tum(mask, maskedoff, src, vl); +vuint32m2_t test_vfcvt_xu_f_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m2_tum(vm, vd, vs2, vl); } -vuint32m4_t test_vfcvt_xu_f_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m4_tum(mask, maskedoff, src, vl); +vuint32m4_t test_vfcvt_xu_f_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m4_tum(vm, vd, vs2, vl); } -vuint32m8_t test_vfcvt_xu_f_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m8_tum(mask, maskedoff, src, vl); +vuint32m8_t test_vfcvt_xu_f_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m8_tum(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32mf2_tum(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32mf2_tum(vm, vd, vs2, vl); } -vfloat32m1_t test_vfcvt_f_x_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m1_tum(mask, maskedoff, src, vl); +vfloat32m1_t test_vfcvt_f_x_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m1_tum(vm, vd, vs2, vl); } -vfloat32m2_t test_vfcvt_f_x_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m2_tum(mask, maskedoff, src, vl); +vfloat32m2_t test_vfcvt_f_x_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m2_tum(vm, vd, vs2, vl); } -vfloat32m4_t test_vfcvt_f_x_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m4_tum(mask, maskedoff, src, vl); +vfloat32m4_t test_vfcvt_f_x_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m4_tum(vm, vd, vs2, vl); } -vfloat32m8_t test_vfcvt_f_x_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vint32m8_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m8_tum(mask, maskedoff, src, vl); +vfloat32m8_t test_vfcvt_f_x_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m8_tum(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32mf2_tum(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32mf2_tum(vm, vd, vs2, vl); } -vfloat32m1_t test_vfcvt_f_xu_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m1_tum(mask, maskedoff, src, vl); +vfloat32m1_t test_vfcvt_f_xu_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m1_tum(vm, vd, vs2, vl); } -vfloat32m2_t test_vfcvt_f_xu_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m2_tum(mask, maskedoff, src, vl); +vfloat32m2_t test_vfcvt_f_xu_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m2_tum(vm, vd, vs2, vl); } -vfloat32m4_t test_vfcvt_f_xu_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m4_tum(mask, maskedoff, src, vl); +vfloat32m4_t test_vfcvt_f_xu_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m4_tum(vm, vd, vs2, vl); } -vfloat32m8_t test_vfcvt_f_xu_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vuint32m8_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m8_tum(mask, maskedoff, src, vl); +vfloat32m8_t test_vfcvt_f_xu_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m8_tum(vm, vd, vs2, vl); } -vint64m1_t test_vfcvt_x_f_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m1_tum(mask, maskedoff, src, vl); +vint64m1_t test_vfcvt_x_f_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m1_tum(vm, vd, vs2, vl); } -vint64m2_t test_vfcvt_x_f_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m2_tum(mask, maskedoff, src, vl); +vint64m2_t test_vfcvt_x_f_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m2_tum(vm, vd, vs2, vl); } -vint64m4_t test_vfcvt_x_f_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m4_tum(mask, maskedoff, src, vl); +vint64m4_t test_vfcvt_x_f_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m4_tum(vm, vd, vs2, vl); } -vint64m8_t test_vfcvt_x_f_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m8_tum(mask, maskedoff, src, vl); +vint64m8_t test_vfcvt_x_f_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m8_tum(vm, vd, vs2, vl); } -vuint64m1_t test_vfcvt_xu_f_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m1_tum(mask, maskedoff, src, vl); +vuint64m1_t test_vfcvt_xu_f_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m1_tum(vm, vd, vs2, vl); } -vuint64m2_t test_vfcvt_xu_f_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m2_tum(mask, maskedoff, src, vl); +vuint64m2_t test_vfcvt_xu_f_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m2_tum(vm, vd, vs2, vl); } -vuint64m4_t test_vfcvt_xu_f_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m4_tum(mask, maskedoff, src, vl); +vuint64m4_t test_vfcvt_xu_f_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m4_tum(vm, vd, vs2, vl); } -vuint64m8_t test_vfcvt_xu_f_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m8_tum(mask, maskedoff, src, vl); +vuint64m8_t test_vfcvt_xu_f_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m8_tum(vm, vd, vs2, vl); } -vfloat64m1_t test_vfcvt_f_x_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vint64m1_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m1_tum(mask, maskedoff, src, vl); +vfloat64m1_t test_vfcvt_f_x_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m1_tum(vm, vd, vs2, vl); } -vfloat64m2_t test_vfcvt_f_x_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vint64m2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m2_tum(mask, maskedoff, src, vl); +vfloat64m2_t test_vfcvt_f_x_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m2_tum(vm, vd, vs2, vl); } -vfloat64m4_t test_vfcvt_f_x_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vint64m4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m4_tum(mask, maskedoff, src, vl); +vfloat64m4_t test_vfcvt_f_x_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m4_tum(vm, vd, vs2, vl); } -vfloat64m8_t test_vfcvt_f_x_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vint64m8_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m8_tum(mask, maskedoff, src, vl); +vfloat64m8_t test_vfcvt_f_x_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m8_tum(vm, vd, vs2, vl); } -vfloat64m1_t test_vfcvt_f_xu_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vuint64m1_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m1_tum(mask, maskedoff, src, vl); +vfloat64m1_t test_vfcvt_f_xu_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m1_tum(vm, vd, vs2, vl); } -vfloat64m2_t test_vfcvt_f_xu_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vuint64m2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m2_tum(mask, maskedoff, src, vl); +vfloat64m2_t test_vfcvt_f_xu_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m2_tum(vm, vd, vs2, vl); } -vfloat64m4_t test_vfcvt_f_xu_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vuint64m4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m4_tum(mask, maskedoff, src, vl); +vfloat64m4_t test_vfcvt_f_xu_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m4_tum(vm, vd, vs2, vl); } -vfloat64m8_t test_vfcvt_f_xu_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vuint64m8_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m8_tum(mask, maskedoff, src, vl); +vfloat64m8_t test_vfcvt_f_xu_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m8_tum(vm, vd, vs2, vl); } -vint16mf4_t test_vfcvt_x_f_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16mf4_tumu(mask, maskedoff, src, vl); +vint16mf4_t test_vfcvt_x_f_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16mf4_tumu(vm, vd, vs2, vl); } -vint16mf2_t test_vfcvt_x_f_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16mf2_tumu(mask, maskedoff, src, vl); +vint16mf2_t test_vfcvt_x_f_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16mf2_tumu(vm, vd, vs2, vl); } -vint16m1_t test_vfcvt_x_f_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m1_tumu(mask, maskedoff, src, vl); +vint16m1_t test_vfcvt_x_f_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m1_tumu(vm, vd, vs2, vl); } -vint16m2_t test_vfcvt_x_f_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m2_tumu(mask, maskedoff, src, vl); +vint16m2_t test_vfcvt_x_f_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m2_tumu(vm, vd, vs2, vl); } -vint16m4_t test_vfcvt_x_f_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m4_tumu(mask, maskedoff, src, vl); +vint16m4_t test_vfcvt_x_f_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m4_tumu(vm, vd, vs2, vl); } -vint16m8_t test_vfcvt_x_f_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m8_tumu(mask, maskedoff, src, vl); +vint16m8_t test_vfcvt_x_f_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m8_tumu(vm, vd, vs2, vl); } -vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16mf4_tumu(mask, maskedoff, src, vl); +vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16mf4_tumu(vm, vd, vs2, vl); } -vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16mf2_tumu(mask, maskedoff, src, vl); +vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16mf2_tumu(vm, vd, vs2, vl); } -vuint16m1_t test_vfcvt_xu_f_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m1_tumu(mask, maskedoff, src, vl); +vuint16m1_t test_vfcvt_xu_f_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m1_tumu(vm, vd, vs2, vl); } -vuint16m2_t test_vfcvt_xu_f_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m2_tumu(mask, maskedoff, src, vl); +vuint16m2_t test_vfcvt_xu_f_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m2_tumu(vm, vd, vs2, vl); } -vuint16m4_t test_vfcvt_xu_f_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m4_tumu(mask, maskedoff, src, vl); +vuint16m4_t test_vfcvt_xu_f_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m4_tumu(vm, vd, vs2, vl); } -vuint16m8_t test_vfcvt_xu_f_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m8_tumu(mask, maskedoff, src, vl); +vuint16m8_t test_vfcvt_xu_f_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m8_tumu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16mf4_tumu(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16mf4_tumu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16mf2_tumu(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16mf2_tumu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfcvt_f_x_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vint16m1_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m1_tumu(mask, maskedoff, src, vl); +vfloat16m1_t test_vfcvt_f_x_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m1_tumu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfcvt_f_x_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vint16m2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m2_tumu(mask, maskedoff, src, vl); +vfloat16m2_t test_vfcvt_f_x_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m2_tumu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfcvt_f_x_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vint16m4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m4_tumu(mask, maskedoff, src, vl); +vfloat16m4_t test_vfcvt_f_x_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m4_tumu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfcvt_f_x_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vint16m8_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m8_tumu(mask, maskedoff, src, vl); +vfloat16m8_t test_vfcvt_f_x_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m8_tumu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vuint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16mf4_tumu(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16mf4_tumu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vuint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16mf2_tumu(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16mf2_tumu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfcvt_f_xu_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vuint16m1_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m1_tumu(mask, maskedoff, src, vl); +vfloat16m1_t test_vfcvt_f_xu_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m1_tumu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfcvt_f_xu_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vuint16m2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m2_tumu(mask, maskedoff, src, vl); +vfloat16m2_t test_vfcvt_f_xu_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m2_tumu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfcvt_f_xu_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vuint16m4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m4_tumu(mask, maskedoff, src, vl); +vfloat16m4_t test_vfcvt_f_xu_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m4_tumu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfcvt_f_xu_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vuint16m8_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m8_tumu(mask, maskedoff, src, vl); +vfloat16m8_t test_vfcvt_f_xu_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m8_tumu(vm, vd, vs2, vl); } -vint32mf2_t test_vfcvt_x_f_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32mf2_tumu(mask, maskedoff, src, vl); +vint32mf2_t test_vfcvt_x_f_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32mf2_tumu(vm, vd, vs2, vl); } -vint32m1_t test_vfcvt_x_f_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m1_tumu(mask, maskedoff, src, vl); +vint32m1_t test_vfcvt_x_f_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m1_tumu(vm, vd, vs2, vl); } -vint32m2_t test_vfcvt_x_f_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m2_tumu(mask, maskedoff, src, vl); +vint32m2_t test_vfcvt_x_f_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m2_tumu(vm, vd, vs2, vl); } -vint32m4_t test_vfcvt_x_f_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m4_tumu(mask, maskedoff, src, vl); +vint32m4_t test_vfcvt_x_f_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m4_tumu(vm, vd, vs2, vl); } -vint32m8_t test_vfcvt_x_f_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m8_tumu(mask, maskedoff, src, vl); +vint32m8_t test_vfcvt_x_f_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m8_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32mf2_tumu(mask, maskedoff, src, vl); +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32mf2_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_vfcvt_xu_f_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m1_tumu(mask, maskedoff, src, vl); +vuint32m1_t test_vfcvt_xu_f_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m1_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_vfcvt_xu_f_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m2_tumu(mask, maskedoff, src, vl); +vuint32m2_t test_vfcvt_xu_f_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m2_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_vfcvt_xu_f_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m4_tumu(mask, maskedoff, src, vl); +vuint32m4_t test_vfcvt_xu_f_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m4_tumu(vm, vd, vs2, vl); } -vuint32m8_t test_vfcvt_xu_f_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m8_tumu(mask, maskedoff, src, vl); +vuint32m8_t test_vfcvt_xu_f_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m8_tumu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32mf2_tumu(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32mf2_tumu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfcvt_f_x_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m1_tumu(mask, maskedoff, src, vl); +vfloat32m1_t test_vfcvt_f_x_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m1_tumu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfcvt_f_x_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m2_tumu(mask, maskedoff, src, vl); +vfloat32m2_t test_vfcvt_f_x_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m2_tumu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfcvt_f_x_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m4_tumu(mask, maskedoff, src, vl); +vfloat32m4_t test_vfcvt_f_x_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m4_tumu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfcvt_f_x_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vint32m8_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m8_tumu(mask, maskedoff, src, vl); +vfloat32m8_t test_vfcvt_f_x_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m8_tumu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32mf2_tumu(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32mf2_tumu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfcvt_f_xu_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m1_tumu(mask, maskedoff, src, vl); +vfloat32m1_t test_vfcvt_f_xu_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m1_tumu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfcvt_f_xu_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m2_tumu(mask, maskedoff, src, vl); +vfloat32m2_t test_vfcvt_f_xu_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m2_tumu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfcvt_f_xu_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m4_tumu(mask, maskedoff, src, vl); +vfloat32m4_t test_vfcvt_f_xu_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m4_tumu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfcvt_f_xu_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vuint32m8_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m8_tumu(mask, maskedoff, src, vl); +vfloat32m8_t test_vfcvt_f_xu_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m8_tumu(vm, vd, vs2, vl); } -vint64m1_t test_vfcvt_x_f_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m1_tumu(mask, maskedoff, src, vl); +vint64m1_t test_vfcvt_x_f_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m1_tumu(vm, vd, vs2, vl); } -vint64m2_t test_vfcvt_x_f_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m2_tumu(mask, maskedoff, src, vl); +vint64m2_t test_vfcvt_x_f_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m2_tumu(vm, vd, vs2, vl); } -vint64m4_t test_vfcvt_x_f_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m4_tumu(mask, maskedoff, src, vl); +vint64m4_t test_vfcvt_x_f_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m4_tumu(vm, vd, vs2, vl); } -vint64m8_t test_vfcvt_x_f_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m8_tumu(mask, maskedoff, src, vl); +vint64m8_t test_vfcvt_x_f_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m8_tumu(vm, vd, vs2, vl); } -vuint64m1_t test_vfcvt_xu_f_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m1_tumu(mask, maskedoff, src, vl); +vuint64m1_t test_vfcvt_xu_f_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m1_tumu(vm, vd, vs2, vl); } -vuint64m2_t test_vfcvt_xu_f_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m2_tumu(mask, maskedoff, src, vl); +vuint64m2_t test_vfcvt_xu_f_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m2_tumu(vm, vd, vs2, vl); } -vuint64m4_t test_vfcvt_xu_f_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m4_tumu(mask, maskedoff, src, vl); +vuint64m4_t test_vfcvt_xu_f_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m4_tumu(vm, vd, vs2, vl); } -vuint64m8_t test_vfcvt_xu_f_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m8_tumu(mask, maskedoff, src, vl); +vuint64m8_t test_vfcvt_xu_f_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m8_tumu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfcvt_f_x_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vint64m1_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m1_tumu(mask, maskedoff, src, vl); +vfloat64m1_t test_vfcvt_f_x_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m1_tumu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfcvt_f_x_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vint64m2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m2_tumu(mask, maskedoff, src, vl); +vfloat64m2_t test_vfcvt_f_x_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m2_tumu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfcvt_f_x_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vint64m4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m4_tumu(mask, maskedoff, src, vl); +vfloat64m4_t test_vfcvt_f_x_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m4_tumu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfcvt_f_x_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vint64m8_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m8_tumu(mask, maskedoff, src, vl); +vfloat64m8_t test_vfcvt_f_x_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m8_tumu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfcvt_f_xu_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vuint64m1_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m1_tumu(mask, maskedoff, src, vl); +vfloat64m1_t test_vfcvt_f_xu_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m1_tumu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfcvt_f_xu_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vuint64m2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m2_tumu(mask, maskedoff, src, vl); +vfloat64m2_t test_vfcvt_f_xu_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m2_tumu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfcvt_f_xu_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vuint64m4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m4_tumu(mask, maskedoff, src, vl); +vfloat64m4_t test_vfcvt_f_xu_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m4_tumu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfcvt_f_xu_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vuint64m8_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m8_tumu(mask, maskedoff, src, vl); +vfloat64m8_t test_vfcvt_f_xu_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m8_tumu(vm, vd, vs2, vl); } -vint16mf4_t test_vfcvt_x_f_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16mf4_mu(mask, maskedoff, src, vl); +vint16mf4_t test_vfcvt_x_f_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16mf4_mu(vm, vd, vs2, vl); } -vint16mf2_t test_vfcvt_x_f_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16mf2_mu(mask, maskedoff, src, vl); +vint16mf2_t test_vfcvt_x_f_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16mf2_mu(vm, vd, vs2, vl); } -vint16m1_t test_vfcvt_x_f_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m1_mu(mask, maskedoff, src, vl); +vint16m1_t test_vfcvt_x_f_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m1_mu(vm, vd, vs2, vl); } -vint16m2_t test_vfcvt_x_f_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m2_mu(mask, maskedoff, src, vl); +vint16m2_t test_vfcvt_x_f_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m2_mu(vm, vd, vs2, vl); } -vint16m4_t test_vfcvt_x_f_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m4_mu(mask, maskedoff, src, vl); +vint16m4_t test_vfcvt_x_f_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m4_mu(vm, vd, vs2, vl); } -vint16m8_t test_vfcvt_x_f_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m8_mu(mask, maskedoff, src, vl); +vint16m8_t test_vfcvt_x_f_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m8_mu(vm, vd, vs2, vl); } -vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16mf4_mu(mask, maskedoff, src, vl); +vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16mf4_mu(vm, vd, vs2, vl); } -vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16mf2_mu(mask, maskedoff, src, vl); +vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16mf2_mu(vm, vd, vs2, vl); } -vuint16m1_t test_vfcvt_xu_f_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m1_mu(mask, maskedoff, src, vl); +vuint16m1_t test_vfcvt_xu_f_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m1_mu(vm, vd, vs2, vl); } -vuint16m2_t test_vfcvt_xu_f_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m2_mu(mask, maskedoff, src, vl); +vuint16m2_t test_vfcvt_xu_f_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m2_mu(vm, vd, vs2, vl); } -vuint16m4_t test_vfcvt_xu_f_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m4_mu(mask, maskedoff, src, vl); +vuint16m4_t test_vfcvt_xu_f_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m4_mu(vm, vd, vs2, vl); } -vuint16m8_t test_vfcvt_xu_f_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m8_mu(mask, maskedoff, src, vl); +vuint16m8_t test_vfcvt_xu_f_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m8_mu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16mf4_mu(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16mf4_mu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16mf2_mu(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16mf2_mu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfcvt_f_x_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vint16m1_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m1_mu(mask, maskedoff, src, vl); +vfloat16m1_t test_vfcvt_f_x_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m1_mu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfcvt_f_x_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vint16m2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m2_mu(mask, maskedoff, src, vl); +vfloat16m2_t test_vfcvt_f_x_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m2_mu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfcvt_f_x_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vint16m4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m4_mu(mask, maskedoff, src, vl); +vfloat16m4_t test_vfcvt_f_x_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m4_mu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfcvt_f_x_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vint16m8_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m8_mu(mask, maskedoff, src, vl); +vfloat16m8_t test_vfcvt_f_x_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m8_mu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vuint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16mf4_mu(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16mf4_mu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vuint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16mf2_mu(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16mf2_mu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfcvt_f_xu_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vuint16m1_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m1_mu(mask, maskedoff, src, vl); +vfloat16m1_t test_vfcvt_f_xu_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m1_mu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfcvt_f_xu_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vuint16m2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m2_mu(mask, maskedoff, src, vl); +vfloat16m2_t test_vfcvt_f_xu_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m2_mu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfcvt_f_xu_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vuint16m4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m4_mu(mask, maskedoff, src, vl); +vfloat16m4_t test_vfcvt_f_xu_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m4_mu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfcvt_f_xu_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vuint16m8_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m8_mu(mask, maskedoff, src, vl); +vfloat16m8_t test_vfcvt_f_xu_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m8_mu(vm, vd, vs2, vl); } -vint32mf2_t test_vfcvt_x_f_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32mf2_mu(mask, maskedoff, src, vl); +vint32mf2_t test_vfcvt_x_f_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32mf2_mu(vm, vd, vs2, vl); } -vint32m1_t test_vfcvt_x_f_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m1_mu(mask, maskedoff, src, vl); +vint32m1_t test_vfcvt_x_f_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m1_mu(vm, vd, vs2, vl); } -vint32m2_t test_vfcvt_x_f_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m2_mu(mask, maskedoff, src, vl); +vint32m2_t test_vfcvt_x_f_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m2_mu(vm, vd, vs2, vl); } -vint32m4_t test_vfcvt_x_f_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m4_mu(mask, maskedoff, src, vl); +vint32m4_t test_vfcvt_x_f_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m4_mu(vm, vd, vs2, vl); } -vint32m8_t test_vfcvt_x_f_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m8_mu(mask, maskedoff, src, vl); +vint32m8_t test_vfcvt_x_f_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m8_mu(vm, vd, vs2, vl); } -vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32mf2_mu(mask, maskedoff, src, vl); +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32mf2_mu(vm, vd, vs2, vl); } -vuint32m1_t test_vfcvt_xu_f_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m1_mu(mask, maskedoff, src, vl); +vuint32m1_t test_vfcvt_xu_f_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m1_mu(vm, vd, vs2, vl); } -vuint32m2_t test_vfcvt_xu_f_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m2_mu(mask, maskedoff, src, vl); +vuint32m2_t test_vfcvt_xu_f_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m2_mu(vm, vd, vs2, vl); } -vuint32m4_t test_vfcvt_xu_f_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m4_mu(mask, maskedoff, src, vl); +vuint32m4_t test_vfcvt_xu_f_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m4_mu(vm, vd, vs2, vl); } -vuint32m8_t test_vfcvt_xu_f_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m8_mu(mask, maskedoff, src, vl); +vuint32m8_t test_vfcvt_xu_f_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m8_mu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32mf2_mu(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32mf2_mu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfcvt_f_x_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m1_mu(mask, maskedoff, src, vl); +vfloat32m1_t test_vfcvt_f_x_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m1_mu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfcvt_f_x_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m2_mu(mask, maskedoff, src, vl); +vfloat32m2_t test_vfcvt_f_x_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m2_mu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfcvt_f_x_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m4_mu(mask, maskedoff, src, vl); +vfloat32m4_t test_vfcvt_f_x_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m4_mu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfcvt_f_x_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vint32m8_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m8_mu(mask, maskedoff, src, vl); +vfloat32m8_t test_vfcvt_f_x_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m8_mu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32mf2_mu(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32mf2_mu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfcvt_f_xu_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m1_mu(mask, maskedoff, src, vl); +vfloat32m1_t test_vfcvt_f_xu_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m1_mu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfcvt_f_xu_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m2_mu(mask, maskedoff, src, vl); +vfloat32m2_t test_vfcvt_f_xu_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m2_mu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfcvt_f_xu_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m4_mu(mask, maskedoff, src, vl); +vfloat32m4_t test_vfcvt_f_xu_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m4_mu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfcvt_f_xu_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vuint32m8_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m8_mu(mask, maskedoff, src, vl); +vfloat32m8_t test_vfcvt_f_xu_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m8_mu(vm, vd, vs2, vl); } -vint64m1_t test_vfcvt_x_f_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m1_mu(mask, maskedoff, src, vl); +vint64m1_t test_vfcvt_x_f_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m1_mu(vm, vd, vs2, vl); } -vint64m2_t test_vfcvt_x_f_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m2_mu(mask, maskedoff, src, vl); +vint64m2_t test_vfcvt_x_f_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m2_mu(vm, vd, vs2, vl); } -vint64m4_t test_vfcvt_x_f_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m4_mu(mask, maskedoff, src, vl); +vint64m4_t test_vfcvt_x_f_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m4_mu(vm, vd, vs2, vl); } -vint64m8_t test_vfcvt_x_f_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m8_mu(mask, maskedoff, src, vl); +vint64m8_t test_vfcvt_x_f_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m8_mu(vm, vd, vs2, vl); } -vuint64m1_t test_vfcvt_xu_f_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m1_mu(mask, maskedoff, src, vl); +vuint64m1_t test_vfcvt_xu_f_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m1_mu(vm, vd, vs2, vl); } -vuint64m2_t test_vfcvt_xu_f_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m2_mu(mask, maskedoff, src, vl); +vuint64m2_t test_vfcvt_xu_f_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m2_mu(vm, vd, vs2, vl); } -vuint64m4_t test_vfcvt_xu_f_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m4_mu(mask, maskedoff, src, vl); +vuint64m4_t test_vfcvt_xu_f_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m4_mu(vm, vd, vs2, vl); } -vuint64m8_t test_vfcvt_xu_f_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m8_mu(mask, maskedoff, src, vl); +vuint64m8_t test_vfcvt_xu_f_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m8_mu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfcvt_f_x_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vint64m1_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m1_mu(mask, maskedoff, src, vl); +vfloat64m1_t test_vfcvt_f_x_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m1_mu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfcvt_f_x_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vint64m2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m2_mu(mask, maskedoff, src, vl); +vfloat64m2_t test_vfcvt_f_x_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m2_mu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfcvt_f_x_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vint64m4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m4_mu(mask, maskedoff, src, vl); +vfloat64m4_t test_vfcvt_f_x_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m4_mu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfcvt_f_x_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vint64m8_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m8_mu(mask, maskedoff, src, vl); +vfloat64m8_t test_vfcvt_f_x_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m8_mu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfcvt_f_xu_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vuint64m1_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m1_mu(mask, maskedoff, src, vl); +vfloat64m1_t test_vfcvt_f_xu_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m1_mu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfcvt_f_xu_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vuint64m2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m2_mu(mask, maskedoff, src, vl); +vfloat64m2_t test_vfcvt_f_xu_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m2_mu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfcvt_f_xu_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vuint64m4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m4_mu(mask, maskedoff, src, vl); +vfloat64m4_t test_vfcvt_f_xu_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m4_mu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfcvt_f_xu_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vuint64m8_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m8_mu(mask, maskedoff, src, vl); +vfloat64m8_t test_vfcvt_f_xu_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m8_mu(vm, vd, vs2, vl); } -vint16mf4_t test_vfcvt_x_f_v_i16mf4_rm_tu(vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16mf4_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint16mf4_t test_vfcvt_x_f_v_i16mf4_rm_tu(vint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint16mf2_t test_vfcvt_x_f_v_i16mf2_rm_tu(vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16mf2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint16mf2_t test_vfcvt_x_f_v_i16mf2_rm_tu(vint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m1_t test_vfcvt_x_f_v_i16m1_rm_tu(vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m1_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m1_t test_vfcvt_x_f_v_i16m1_rm_tu(vint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m2_t test_vfcvt_x_f_v_i16m2_rm_tu(vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m2_t test_vfcvt_x_f_v_i16m2_rm_tu(vint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m4_t test_vfcvt_x_f_v_i16m4_rm_tu(vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m4_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m4_t test_vfcvt_x_f_v_i16m4_rm_tu(vint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m8_t test_vfcvt_x_f_v_i16m8_rm_tu(vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m8_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m8_t test_vfcvt_x_f_v_i16m8_rm_tu(vint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_rm_tu(vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16mf4_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_rm_tu(vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_rm_tu(vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16mf2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_rm_tu(vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m1_t test_vfcvt_xu_f_v_u16m1_rm_tu(vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m1_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m1_t test_vfcvt_xu_f_v_u16m1_rm_tu(vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m2_t test_vfcvt_xu_f_v_u16m2_rm_tu(vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m2_t test_vfcvt_xu_f_v_u16m2_rm_tu(vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m4_t test_vfcvt_xu_f_v_u16m4_rm_tu(vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m4_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m4_t test_vfcvt_xu_f_v_u16m4_rm_tu(vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m8_t test_vfcvt_xu_f_v_u16m8_rm_tu(vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m8_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m8_t test_vfcvt_xu_f_v_u16m8_rm_tu(vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16mf4_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_rm_tu(vfloat16mf4_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16mf2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_rm_tu(vfloat16mf2_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfcvt_f_x_v_f16m1_rm_tu(vfloat16m1_t maskedoff, vint16m1_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m1_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfcvt_f_x_v_f16m1_rm_tu(vfloat16m1_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfcvt_f_x_v_f16m2_rm_tu(vfloat16m2_t maskedoff, vint16m2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfcvt_f_x_v_f16m2_rm_tu(vfloat16m2_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfcvt_f_x_v_f16m4_rm_tu(vfloat16m4_t maskedoff, vint16m4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m4_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfcvt_f_x_v_f16m4_rm_tu(vfloat16m4_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfcvt_f_x_v_f16m8_rm_tu(vfloat16m8_t maskedoff, vint16m8_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m8_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfcvt_f_x_v_f16m8_rm_tu(vfloat16m8_t vd, vint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vuint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16mf4_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_rm_tu(vfloat16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vuint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16mf2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_rm_tu(vfloat16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfcvt_f_xu_v_f16m1_rm_tu(vfloat16m1_t maskedoff, vuint16m1_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m1_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfcvt_f_xu_v_f16m1_rm_tu(vfloat16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfcvt_f_xu_v_f16m2_rm_tu(vfloat16m2_t maskedoff, vuint16m2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfcvt_f_xu_v_f16m2_rm_tu(vfloat16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfcvt_f_xu_v_f16m4_rm_tu(vfloat16m4_t maskedoff, vuint16m4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m4_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfcvt_f_xu_v_f16m4_rm_tu(vfloat16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfcvt_f_xu_v_f16m8_rm_tu(vfloat16m8_t maskedoff, vuint16m8_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m8_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfcvt_f_xu_v_f16m8_rm_tu(vfloat16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint32mf2_t test_vfcvt_x_f_v_i32mf2_rm_tu(vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32mf2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint32mf2_t test_vfcvt_x_f_v_i32mf2_rm_tu(vint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfcvt_x_f_v_i32m1_rm_tu(vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m1_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m1_t test_vfcvt_x_f_v_i32m1_rm_tu(vint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfcvt_x_f_v_i32m2_rm_tu(vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m2_t test_vfcvt_x_f_v_i32m2_rm_tu(vint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfcvt_x_f_v_i32m4_rm_tu(vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m4_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m4_t test_vfcvt_x_f_v_i32m4_rm_tu(vint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m8_t test_vfcvt_x_f_v_i32m8_rm_tu(vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m8_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m8_t test_vfcvt_x_f_v_i32m8_rm_tu(vint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_rm_tu(vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32mf2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_rm_tu(vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfcvt_xu_f_v_u32m1_rm_tu(vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m1_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m1_t test_vfcvt_xu_f_v_u32m1_rm_tu(vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfcvt_xu_f_v_u32m2_rm_tu(vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m2_t test_vfcvt_xu_f_v_u32m2_rm_tu(vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfcvt_xu_f_v_u32m4_rm_tu(vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m4_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m4_t test_vfcvt_xu_f_v_u32m4_rm_tu(vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m8_t test_vfcvt_xu_f_v_u32m8_rm_tu(vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m8_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m8_t test_vfcvt_xu_f_v_u32m8_rm_tu(vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32mf2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_rm_tu(vfloat32mf2_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfcvt_f_x_v_f32m1_rm_tu(vfloat32m1_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m1_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfcvt_f_x_v_f32m1_rm_tu(vfloat32m1_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfcvt_f_x_v_f32m2_rm_tu(vfloat32m2_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfcvt_f_x_v_f32m2_rm_tu(vfloat32m2_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfcvt_f_x_v_f32m4_rm_tu(vfloat32m4_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m4_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfcvt_f_x_v_f32m4_rm_tu(vfloat32m4_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfcvt_f_x_v_f32m8_rm_tu(vfloat32m8_t maskedoff, vint32m8_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m8_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfcvt_f_x_v_f32m8_rm_tu(vfloat32m8_t vd, vint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32mf2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_rm_tu(vfloat32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfcvt_f_xu_v_f32m1_rm_tu(vfloat32m1_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m1_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfcvt_f_xu_v_f32m1_rm_tu(vfloat32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfcvt_f_xu_v_f32m2_rm_tu(vfloat32m2_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfcvt_f_xu_v_f32m2_rm_tu(vfloat32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfcvt_f_xu_v_f32m4_rm_tu(vfloat32m4_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m4_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfcvt_f_xu_v_f32m4_rm_tu(vfloat32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfcvt_f_xu_v_f32m8_rm_tu(vfloat32m8_t maskedoff, vuint32m8_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m8_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfcvt_f_xu_v_f32m8_rm_tu(vfloat32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m1_t test_vfcvt_x_f_v_i64m1_rm_tu(vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m1_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m1_t test_vfcvt_x_f_v_i64m1_rm_tu(vint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m2_t test_vfcvt_x_f_v_i64m2_rm_tu(vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m2_t test_vfcvt_x_f_v_i64m2_rm_tu(vint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m4_t test_vfcvt_x_f_v_i64m4_rm_tu(vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m4_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m4_t test_vfcvt_x_f_v_i64m4_rm_tu(vint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m8_t test_vfcvt_x_f_v_i64m8_rm_tu(vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m8_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m8_t test_vfcvt_x_f_v_i64m8_rm_tu(vint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m1_t test_vfcvt_xu_f_v_u64m1_rm_tu(vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m1_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m1_t test_vfcvt_xu_f_v_u64m1_rm_tu(vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m2_t test_vfcvt_xu_f_v_u64m2_rm_tu(vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m2_t test_vfcvt_xu_f_v_u64m2_rm_tu(vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m4_t test_vfcvt_xu_f_v_u64m4_rm_tu(vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m4_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m4_t test_vfcvt_xu_f_v_u64m4_rm_tu(vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m8_t test_vfcvt_xu_f_v_u64m8_rm_tu(vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m8_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m8_t test_vfcvt_xu_f_v_u64m8_rm_tu(vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfcvt_f_x_v_f64m1_rm_tu(vfloat64m1_t maskedoff, vint64m1_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m1_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfcvt_f_x_v_f64m1_rm_tu(vfloat64m1_t vd, vint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfcvt_f_x_v_f64m2_rm_tu(vfloat64m2_t maskedoff, vint64m2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfcvt_f_x_v_f64m2_rm_tu(vfloat64m2_t vd, vint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfcvt_f_x_v_f64m4_rm_tu(vfloat64m4_t maskedoff, vint64m4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m4_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfcvt_f_x_v_f64m4_rm_tu(vfloat64m4_t vd, vint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfcvt_f_x_v_f64m8_rm_tu(vfloat64m8_t maskedoff, vint64m8_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m8_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfcvt_f_x_v_f64m8_rm_tu(vfloat64m8_t vd, vint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfcvt_f_xu_v_f64m1_rm_tu(vfloat64m1_t maskedoff, vuint64m1_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m1_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfcvt_f_xu_v_f64m1_rm_tu(vfloat64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfcvt_f_xu_v_f64m2_rm_tu(vfloat64m2_t maskedoff, vuint64m2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfcvt_f_xu_v_f64m2_rm_tu(vfloat64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfcvt_f_xu_v_f64m4_rm_tu(vfloat64m4_t maskedoff, vuint64m4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m4_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfcvt_f_xu_v_f64m4_rm_tu(vfloat64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfcvt_f_xu_v_f64m8_rm_tu(vfloat64m8_t maskedoff, vuint64m8_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m8_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfcvt_f_xu_v_f64m8_rm_tu(vfloat64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint16mf4_t test_vfcvt_x_f_v_i16mf4_rm_tum(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16mf4_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16mf4_t test_vfcvt_x_f_v_i16mf4_rm_tum(vbool64_t vm, vint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16mf4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16mf2_t test_vfcvt_x_f_v_i16mf2_rm_tum(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16mf2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16mf2_t test_vfcvt_x_f_v_i16mf2_rm_tum(vbool32_t vm, vint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m1_t test_vfcvt_x_f_v_i16m1_rm_tum(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m1_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m1_t test_vfcvt_x_f_v_i16m1_rm_tum(vbool16_t vm, vint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m2_t test_vfcvt_x_f_v_i16m2_rm_tum(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m2_t test_vfcvt_x_f_v_i16m2_rm_tum(vbool8_t vm, vint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m4_t test_vfcvt_x_f_v_i16m4_rm_tum(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m4_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m4_t test_vfcvt_x_f_v_i16m4_rm_tum(vbool4_t vm, vint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m8_t test_vfcvt_x_f_v_i16m8_rm_tum(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m8_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m8_t test_vfcvt_x_f_v_i16m8_rm_tum(vbool2_t vm, vint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_rm_tum(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16mf4_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_rm_tum(vbool64_t vm, vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16mf4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_rm_tum(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16mf2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_rm_tum(vbool32_t vm, vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m1_t test_vfcvt_xu_f_v_u16m1_rm_tum(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m1_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m1_t test_vfcvt_xu_f_v_u16m1_rm_tum(vbool16_t vm, vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m2_t test_vfcvt_xu_f_v_u16m2_rm_tum(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m2_t test_vfcvt_xu_f_v_u16m2_rm_tum(vbool8_t vm, vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m4_t test_vfcvt_xu_f_v_u16m4_rm_tum(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m4_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m4_t test_vfcvt_xu_f_v_u16m4_rm_tum(vbool4_t vm, vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m8_t test_vfcvt_xu_f_v_u16m8_rm_tum(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m8_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m8_t test_vfcvt_xu_f_v_u16m8_rm_tum(vbool2_t vm, vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16mf4_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16mf4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16mf2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfcvt_f_x_v_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vint16m1_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m1_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfcvt_f_x_v_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfcvt_f_x_v_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vint16m2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfcvt_f_x_v_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfcvt_f_x_v_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vint16m4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m4_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfcvt_f_x_v_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfcvt_f_x_v_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vint16m8_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m8_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfcvt_f_x_v_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vuint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16mf4_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16mf4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vuint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16mf2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfcvt_f_xu_v_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vuint16m1_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m1_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfcvt_f_xu_v_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfcvt_f_xu_v_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vuint16m2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfcvt_f_xu_v_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfcvt_f_xu_v_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vuint16m4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m4_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfcvt_f_xu_v_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfcvt_f_xu_v_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vuint16m8_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m8_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfcvt_f_xu_v_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32mf2_t test_vfcvt_x_f_v_i32mf2_rm_tum(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32mf2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32mf2_t test_vfcvt_x_f_v_i32mf2_rm_tum(vbool64_t vm, vint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfcvt_x_f_v_i32m1_rm_tum(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m1_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m1_t test_vfcvt_x_f_v_i32m1_rm_tum(vbool32_t vm, vint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfcvt_x_f_v_i32m2_rm_tum(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m2_t test_vfcvt_x_f_v_i32m2_rm_tum(vbool16_t vm, vint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfcvt_x_f_v_i32m4_rm_tum(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m4_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m4_t test_vfcvt_x_f_v_i32m4_rm_tum(vbool8_t vm, vint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m8_t test_vfcvt_x_f_v_i32m8_rm_tum(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m8_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m8_t test_vfcvt_x_f_v_i32m8_rm_tum(vbool4_t vm, vint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_rm_tum(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32mf2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_rm_tum(vbool64_t vm, vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfcvt_xu_f_v_u32m1_rm_tum(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m1_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m1_t test_vfcvt_xu_f_v_u32m1_rm_tum(vbool32_t vm, vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfcvt_xu_f_v_u32m2_rm_tum(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m2_t test_vfcvt_xu_f_v_u32m2_rm_tum(vbool16_t vm, vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfcvt_xu_f_v_u32m4_rm_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m4_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m4_t test_vfcvt_xu_f_v_u32m4_rm_tum(vbool8_t vm, vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m8_t test_vfcvt_xu_f_v_u32m8_rm_tum(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m8_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m8_t test_vfcvt_xu_f_v_u32m8_rm_tum(vbool4_t vm, vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32mf2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfcvt_f_x_v_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m1_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfcvt_f_x_v_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfcvt_f_x_v_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfcvt_f_x_v_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfcvt_f_x_v_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m4_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfcvt_f_x_v_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfcvt_f_x_v_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vint32m8_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m8_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfcvt_f_x_v_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32mf2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfcvt_f_xu_v_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m1_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfcvt_f_xu_v_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfcvt_f_xu_v_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfcvt_f_xu_v_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfcvt_f_xu_v_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m4_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfcvt_f_xu_v_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfcvt_f_xu_v_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vuint32m8_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m8_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfcvt_f_xu_v_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m1_t test_vfcvt_x_f_v_i64m1_rm_tum(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m1_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m1_t test_vfcvt_x_f_v_i64m1_rm_tum(vbool64_t vm, vint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m2_t test_vfcvt_x_f_v_i64m2_rm_tum(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m2_t test_vfcvt_x_f_v_i64m2_rm_tum(vbool32_t vm, vint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m4_t test_vfcvt_x_f_v_i64m4_rm_tum(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m4_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m4_t test_vfcvt_x_f_v_i64m4_rm_tum(vbool16_t vm, vint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m8_t test_vfcvt_x_f_v_i64m8_rm_tum(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m8_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m8_t test_vfcvt_x_f_v_i64m8_rm_tum(vbool8_t vm, vint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m1_t test_vfcvt_xu_f_v_u64m1_rm_tum(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m1_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m1_t test_vfcvt_xu_f_v_u64m1_rm_tum(vbool64_t vm, vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m2_t test_vfcvt_xu_f_v_u64m2_rm_tum(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m2_t test_vfcvt_xu_f_v_u64m2_rm_tum(vbool32_t vm, vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m4_t test_vfcvt_xu_f_v_u64m4_rm_tum(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m4_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m4_t test_vfcvt_xu_f_v_u64m4_rm_tum(vbool16_t vm, vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m8_t test_vfcvt_xu_f_v_u64m8_rm_tum(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m8_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m8_t test_vfcvt_xu_f_v_u64m8_rm_tum(vbool8_t vm, vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfcvt_f_x_v_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vint64m1_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m1_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfcvt_f_x_v_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfcvt_f_x_v_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vint64m2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfcvt_f_x_v_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfcvt_f_x_v_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vint64m4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m4_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfcvt_f_x_v_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfcvt_f_x_v_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vint64m8_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m8_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfcvt_f_x_v_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfcvt_f_xu_v_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vuint64m1_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m1_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfcvt_f_xu_v_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfcvt_f_xu_v_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vuint64m2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfcvt_f_xu_v_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfcvt_f_xu_v_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vuint64m4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m4_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfcvt_f_xu_v_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfcvt_f_xu_v_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vuint64m8_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m8_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfcvt_f_xu_v_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16mf4_t test_vfcvt_x_f_v_i16mf4_rm_tumu(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16mf4_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16mf4_t test_vfcvt_x_f_v_i16mf4_rm_tumu(vbool64_t vm, vint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16mf4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16mf2_t test_vfcvt_x_f_v_i16mf2_rm_tumu(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16mf2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16mf2_t test_vfcvt_x_f_v_i16mf2_rm_tumu(vbool32_t vm, vint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m1_t test_vfcvt_x_f_v_i16m1_rm_tumu(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m1_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m1_t test_vfcvt_x_f_v_i16m1_rm_tumu(vbool16_t vm, vint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m2_t test_vfcvt_x_f_v_i16m2_rm_tumu(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m2_t test_vfcvt_x_f_v_i16m2_rm_tumu(vbool8_t vm, vint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m4_t test_vfcvt_x_f_v_i16m4_rm_tumu(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m4_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m4_t test_vfcvt_x_f_v_i16m4_rm_tumu(vbool4_t vm, vint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m8_t test_vfcvt_x_f_v_i16m8_rm_tumu(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m8_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m8_t test_vfcvt_x_f_v_i16m8_rm_tumu(vbool2_t vm, vint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_rm_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16mf4_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_rm_tumu(vbool64_t vm, vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16mf4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_rm_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16mf2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_rm_tumu(vbool32_t vm, vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m1_t test_vfcvt_xu_f_v_u16m1_rm_tumu(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m1_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m1_t test_vfcvt_xu_f_v_u16m1_rm_tumu(vbool16_t vm, vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m2_t test_vfcvt_xu_f_v_u16m2_rm_tumu(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m2_t test_vfcvt_xu_f_v_u16m2_rm_tumu(vbool8_t vm, vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m4_t test_vfcvt_xu_f_v_u16m4_rm_tumu(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m4_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m4_t test_vfcvt_xu_f_v_u16m4_rm_tumu(vbool4_t vm, vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m8_t test_vfcvt_xu_f_v_u16m8_rm_tumu(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m8_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m8_t test_vfcvt_xu_f_v_u16m8_rm_tumu(vbool2_t vm, vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16mf4_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16mf4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16mf2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfcvt_f_x_v_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vint16m1_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m1_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfcvt_f_x_v_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfcvt_f_x_v_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vint16m2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfcvt_f_x_v_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfcvt_f_x_v_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vint16m4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m4_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfcvt_f_x_v_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfcvt_f_x_v_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vint16m8_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m8_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfcvt_f_x_v_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vuint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16mf4_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16mf4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vuint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16mf2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfcvt_f_xu_v_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vuint16m1_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m1_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfcvt_f_xu_v_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfcvt_f_xu_v_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vuint16m2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfcvt_f_xu_v_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfcvt_f_xu_v_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vuint16m4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m4_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfcvt_f_xu_v_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfcvt_f_xu_v_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vuint16m8_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m8_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfcvt_f_xu_v_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32mf2_t test_vfcvt_x_f_v_i32mf2_rm_tumu(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32mf2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32mf2_t test_vfcvt_x_f_v_i32mf2_rm_tumu(vbool64_t vm, vint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfcvt_x_f_v_i32m1_rm_tumu(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m1_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m1_t test_vfcvt_x_f_v_i32m1_rm_tumu(vbool32_t vm, vint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfcvt_x_f_v_i32m2_rm_tumu(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m2_t test_vfcvt_x_f_v_i32m2_rm_tumu(vbool16_t vm, vint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfcvt_x_f_v_i32m4_rm_tumu(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m4_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m4_t test_vfcvt_x_f_v_i32m4_rm_tumu(vbool8_t vm, vint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m8_t test_vfcvt_x_f_v_i32m8_rm_tumu(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m8_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m8_t test_vfcvt_x_f_v_i32m8_rm_tumu(vbool4_t vm, vint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_rm_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32mf2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_rm_tumu(vbool64_t vm, vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfcvt_xu_f_v_u32m1_rm_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m1_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m1_t test_vfcvt_xu_f_v_u32m1_rm_tumu(vbool32_t vm, vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfcvt_xu_f_v_u32m2_rm_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m2_t test_vfcvt_xu_f_v_u32m2_rm_tumu(vbool16_t vm, vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfcvt_xu_f_v_u32m4_rm_tumu(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m4_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m4_t test_vfcvt_xu_f_v_u32m4_rm_tumu(vbool8_t vm, vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m8_t test_vfcvt_xu_f_v_u32m8_rm_tumu(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m8_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m8_t test_vfcvt_xu_f_v_u32m8_rm_tumu(vbool4_t vm, vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32mf2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfcvt_f_x_v_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m1_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfcvt_f_x_v_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfcvt_f_x_v_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfcvt_f_x_v_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfcvt_f_x_v_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m4_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfcvt_f_x_v_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfcvt_f_x_v_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vint32m8_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m8_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfcvt_f_x_v_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32mf2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfcvt_f_xu_v_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m1_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfcvt_f_xu_v_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfcvt_f_xu_v_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfcvt_f_xu_v_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfcvt_f_xu_v_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m4_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfcvt_f_xu_v_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfcvt_f_xu_v_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vuint32m8_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m8_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfcvt_f_xu_v_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m1_t test_vfcvt_x_f_v_i64m1_rm_tumu(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m1_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m1_t test_vfcvt_x_f_v_i64m1_rm_tumu(vbool64_t vm, vint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m2_t test_vfcvt_x_f_v_i64m2_rm_tumu(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m2_t test_vfcvt_x_f_v_i64m2_rm_tumu(vbool32_t vm, vint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m4_t test_vfcvt_x_f_v_i64m4_rm_tumu(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m4_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m4_t test_vfcvt_x_f_v_i64m4_rm_tumu(vbool16_t vm, vint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m8_t test_vfcvt_x_f_v_i64m8_rm_tumu(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m8_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m8_t test_vfcvt_x_f_v_i64m8_rm_tumu(vbool8_t vm, vint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m1_t test_vfcvt_xu_f_v_u64m1_rm_tumu(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m1_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m1_t test_vfcvt_xu_f_v_u64m1_rm_tumu(vbool64_t vm, vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m2_t test_vfcvt_xu_f_v_u64m2_rm_tumu(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m2_t test_vfcvt_xu_f_v_u64m2_rm_tumu(vbool32_t vm, vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m4_t test_vfcvt_xu_f_v_u64m4_rm_tumu(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m4_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m4_t test_vfcvt_xu_f_v_u64m4_rm_tumu(vbool16_t vm, vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m8_t test_vfcvt_xu_f_v_u64m8_rm_tumu(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m8_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m8_t test_vfcvt_xu_f_v_u64m8_rm_tumu(vbool8_t vm, vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfcvt_f_x_v_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vint64m1_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m1_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfcvt_f_x_v_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfcvt_f_x_v_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vint64m2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfcvt_f_x_v_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfcvt_f_x_v_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vint64m4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m4_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfcvt_f_x_v_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfcvt_f_x_v_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vint64m8_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m8_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfcvt_f_x_v_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfcvt_f_xu_v_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vuint64m1_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m1_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfcvt_f_xu_v_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfcvt_f_xu_v_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vuint64m2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfcvt_f_xu_v_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfcvt_f_xu_v_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vuint64m4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m4_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfcvt_f_xu_v_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfcvt_f_xu_v_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vuint64m8_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m8_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfcvt_f_xu_v_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16mf4_t test_vfcvt_x_f_v_i16mf4_rm_mu(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16mf4_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16mf4_t test_vfcvt_x_f_v_i16mf4_rm_mu(vbool64_t vm, vint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16mf4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16mf2_t test_vfcvt_x_f_v_i16mf2_rm_mu(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16mf2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16mf2_t test_vfcvt_x_f_v_i16mf2_rm_mu(vbool32_t vm, vint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m1_t test_vfcvt_x_f_v_i16m1_rm_mu(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m1_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m1_t test_vfcvt_x_f_v_i16m1_rm_mu(vbool16_t vm, vint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m2_t test_vfcvt_x_f_v_i16m2_rm_mu(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m2_t test_vfcvt_x_f_v_i16m2_rm_mu(vbool8_t vm, vint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m4_t test_vfcvt_x_f_v_i16m4_rm_mu(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m4_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m4_t test_vfcvt_x_f_v_i16m4_rm_mu(vbool4_t vm, vint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m8_t test_vfcvt_x_f_v_i16m8_rm_mu(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m8_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m8_t test_vfcvt_x_f_v_i16m8_rm_mu(vbool2_t vm, vint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_rm_mu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16mf4_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_rm_mu(vbool64_t vm, vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16mf4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_rm_mu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16mf2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_rm_mu(vbool32_t vm, vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m1_t test_vfcvt_xu_f_v_u16m1_rm_mu(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m1_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m1_t test_vfcvt_xu_f_v_u16m1_rm_mu(vbool16_t vm, vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m2_t test_vfcvt_xu_f_v_u16m2_rm_mu(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m2_t test_vfcvt_xu_f_v_u16m2_rm_mu(vbool8_t vm, vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m4_t test_vfcvt_xu_f_v_u16m4_rm_mu(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m4_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m4_t test_vfcvt_xu_f_v_u16m4_rm_mu(vbool4_t vm, vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m8_t test_vfcvt_xu_f_v_u16m8_rm_mu(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m8_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m8_t test_vfcvt_xu_f_v_u16m8_rm_mu(vbool2_t vm, vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16mf4_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16mf4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16mf2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfcvt_f_x_v_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vint16m1_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m1_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfcvt_f_x_v_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfcvt_f_x_v_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vint16m2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfcvt_f_x_v_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfcvt_f_x_v_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vint16m4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m4_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfcvt_f_x_v_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfcvt_f_x_v_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vint16m8_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m8_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfcvt_f_x_v_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vuint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16mf4_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16mf4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vuint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16mf2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfcvt_f_xu_v_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vuint16m1_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m1_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfcvt_f_xu_v_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfcvt_f_xu_v_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vuint16m2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfcvt_f_xu_v_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfcvt_f_xu_v_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vuint16m4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m4_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfcvt_f_xu_v_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfcvt_f_xu_v_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vuint16m8_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m8_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfcvt_f_xu_v_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32mf2_t test_vfcvt_x_f_v_i32mf2_rm_mu(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32mf2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32mf2_t test_vfcvt_x_f_v_i32mf2_rm_mu(vbool64_t vm, vint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfcvt_x_f_v_i32m1_rm_mu(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m1_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m1_t test_vfcvt_x_f_v_i32m1_rm_mu(vbool32_t vm, vint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfcvt_x_f_v_i32m2_rm_mu(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m2_t test_vfcvt_x_f_v_i32m2_rm_mu(vbool16_t vm, vint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfcvt_x_f_v_i32m4_rm_mu(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m4_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m4_t test_vfcvt_x_f_v_i32m4_rm_mu(vbool8_t vm, vint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m8_t test_vfcvt_x_f_v_i32m8_rm_mu(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m8_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m8_t test_vfcvt_x_f_v_i32m8_rm_mu(vbool4_t vm, vint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_rm_mu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32mf2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_rm_mu(vbool64_t vm, vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfcvt_xu_f_v_u32m1_rm_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m1_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m1_t test_vfcvt_xu_f_v_u32m1_rm_mu(vbool32_t vm, vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfcvt_xu_f_v_u32m2_rm_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m2_t test_vfcvt_xu_f_v_u32m2_rm_mu(vbool16_t vm, vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfcvt_xu_f_v_u32m4_rm_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m4_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m4_t test_vfcvt_xu_f_v_u32m4_rm_mu(vbool8_t vm, vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m8_t test_vfcvt_xu_f_v_u32m8_rm_mu(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m8_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m8_t test_vfcvt_xu_f_v_u32m8_rm_mu(vbool4_t vm, vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32mf2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfcvt_f_x_v_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m1_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfcvt_f_x_v_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfcvt_f_x_v_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfcvt_f_x_v_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfcvt_f_x_v_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m4_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfcvt_f_x_v_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfcvt_f_x_v_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vint32m8_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m8_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfcvt_f_x_v_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32mf2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfcvt_f_xu_v_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m1_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfcvt_f_xu_v_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfcvt_f_xu_v_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfcvt_f_xu_v_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfcvt_f_xu_v_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m4_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfcvt_f_xu_v_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfcvt_f_xu_v_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vuint32m8_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m8_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfcvt_f_xu_v_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m1_t test_vfcvt_x_f_v_i64m1_rm_mu(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m1_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m1_t test_vfcvt_x_f_v_i64m1_rm_mu(vbool64_t vm, vint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m2_t test_vfcvt_x_f_v_i64m2_rm_mu(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m2_t test_vfcvt_x_f_v_i64m2_rm_mu(vbool32_t vm, vint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m4_t test_vfcvt_x_f_v_i64m4_rm_mu(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m4_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m4_t test_vfcvt_x_f_v_i64m4_rm_mu(vbool16_t vm, vint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m8_t test_vfcvt_x_f_v_i64m8_rm_mu(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m8_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m8_t test_vfcvt_x_f_v_i64m8_rm_mu(vbool8_t vm, vint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m1_t test_vfcvt_xu_f_v_u64m1_rm_mu(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m1_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m1_t test_vfcvt_xu_f_v_u64m1_rm_mu(vbool64_t vm, vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m2_t test_vfcvt_xu_f_v_u64m2_rm_mu(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m2_t test_vfcvt_xu_f_v_u64m2_rm_mu(vbool32_t vm, vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m4_t test_vfcvt_xu_f_v_u64m4_rm_mu(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m4_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m4_t test_vfcvt_xu_f_v_u64m4_rm_mu(vbool16_t vm, vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m8_t test_vfcvt_xu_f_v_u64m8_rm_mu(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m8_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m8_t test_vfcvt_xu_f_v_u64m8_rm_mu(vbool8_t vm, vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfcvt_f_x_v_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vint64m1_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m1_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfcvt_f_x_v_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfcvt_f_x_v_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vint64m2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfcvt_f_x_v_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfcvt_f_x_v_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vint64m4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m4_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfcvt_f_x_v_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfcvt_f_x_v_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vint64m8_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m8_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfcvt_f_x_v_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfcvt_f_xu_v_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vuint64m1_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m1_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfcvt_f_xu_v_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfcvt_f_xu_v_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vuint64m2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfcvt_f_xu_v_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfcvt_f_xu_v_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vuint64m4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m4_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfcvt_f_xu_v_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfcvt_f_xu_v_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vuint64m8_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m8_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfcvt_f_xu_v_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfcvt\.[ivxfswum.]+\s+} 480 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfcvt_rtz.c b/auto-generated/policy_funcs/gnu-api-tests/vfcvt_rtz.c index 92fe6d5c8..5032d0ba3 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfcvt_rtz.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfcvt_rtz.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_tu(vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16mf4_tu(maskedoff, src, vl); +vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_tu(vint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16mf4_tu(vd, vs2, vl); } -vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_tu(vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16mf2_tu(maskedoff, src, vl); +vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_tu(vint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16mf2_tu(vd, vs2, vl); } -vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_tu(vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m1_tu(maskedoff, src, vl); +vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_tu(vint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m1_tu(vd, vs2, vl); } -vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_tu(vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m2_tu(maskedoff, src, vl); +vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_tu(vint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m2_tu(vd, vs2, vl); } -vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_tu(vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m4_tu(maskedoff, src, vl); +vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_tu(vint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m4_tu(vd, vs2, vl); } -vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_tu(vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m8_tu(maskedoff, src, vl); +vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_tu(vint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m8_tu(vd, vs2, vl); } -vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_tu(vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16mf4_tu(maskedoff, src, vl); +vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_tu(vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16mf4_tu(vd, vs2, vl); } -vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_tu(vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16mf2_tu(maskedoff, src, vl); +vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_tu(vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16mf2_tu(vd, vs2, vl); } -vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_tu(vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m1_tu(maskedoff, src, vl); +vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_tu(vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m1_tu(vd, vs2, vl); } -vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_tu(vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m2_tu(maskedoff, src, vl); +vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_tu(vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m2_tu(vd, vs2, vl); } -vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_tu(vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m4_tu(maskedoff, src, vl); +vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_tu(vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m4_tu(vd, vs2, vl); } -vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_tu(vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m8_tu(maskedoff, src, vl); +vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_tu(vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m8_tu(vd, vs2, vl); } -vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tu(vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32mf2_tu(maskedoff, src, vl); +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tu(vint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32mf2_tu(vd, vs2, vl); } -vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_tu(vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m1_tu(maskedoff, src, vl); +vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_tu(vint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m1_tu(vd, vs2, vl); } -vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_tu(vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m2_tu(maskedoff, src, vl); +vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_tu(vint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m2_tu(vd, vs2, vl); } -vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_tu(vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m4_tu(maskedoff, src, vl); +vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_tu(vint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m4_tu(vd, vs2, vl); } -vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_tu(vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m8_tu(maskedoff, src, vl); +vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_tu(vint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m8_tu(vd, vs2, vl); } -vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tu(vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32mf2_tu(maskedoff, src, vl); +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tu(vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32mf2_tu(vd, vs2, vl); } -vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_tu(vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m1_tu(maskedoff, src, vl); +vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_tu(vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_tu(vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m2_tu(maskedoff, src, vl); +vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_tu(vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_tu(vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m4_tu(maskedoff, src, vl); +vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_tu(vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_tu(vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m8_tu(maskedoff, src, vl); +vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_tu(vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m8_tu(vd, vs2, vl); } -vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_tu(vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m1_tu(maskedoff, src, vl); +vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_tu(vint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m1_tu(vd, vs2, vl); } -vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_tu(vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m2_tu(maskedoff, src, vl); +vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_tu(vint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m2_tu(vd, vs2, vl); } -vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_tu(vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m4_tu(maskedoff, src, vl); +vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_tu(vint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m4_tu(vd, vs2, vl); } -vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_tu(vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m8_tu(maskedoff, src, vl); +vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_tu(vint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m8_tu(vd, vs2, vl); } -vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_tu(vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m1_tu(maskedoff, src, vl); +vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_tu(vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m1_tu(vd, vs2, vl); } -vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_tu(vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m2_tu(maskedoff, src, vl); +vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_tu(vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m2_tu(vd, vs2, vl); } -vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_tu(vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m4_tu(maskedoff, src, vl); +vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_tu(vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m4_tu(vd, vs2, vl); } -vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_tu(vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m8_tu(maskedoff, src, vl); +vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_tu(vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m8_tu(vd, vs2, vl); } -vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16mf4_tum(mask, maskedoff, src, vl); +vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16mf4_tum(vm, vd, vs2, vl); } -vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16mf2_tum(mask, maskedoff, src, vl); +vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16mf2_tum(vm, vd, vs2, vl); } -vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m1_tum(mask, maskedoff, src, vl); +vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m1_tum(vm, vd, vs2, vl); } -vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m2_tum(mask, maskedoff, src, vl); +vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m2_tum(vm, vd, vs2, vl); } -vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m4_tum(mask, maskedoff, src, vl); +vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m4_tum(vm, vd, vs2, vl); } -vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m8_tum(mask, maskedoff, src, vl); +vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m8_tum(vm, vd, vs2, vl); } -vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16mf4_tum(mask, maskedoff, src, vl); +vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16mf4_tum(vm, vd, vs2, vl); } -vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16mf2_tum(mask, maskedoff, src, vl); +vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16mf2_tum(vm, vd, vs2, vl); } -vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m1_tum(mask, maskedoff, src, vl); +vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m1_tum(vm, vd, vs2, vl); } -vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m2_tum(mask, maskedoff, src, vl); +vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m2_tum(vm, vd, vs2, vl); } -vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m4_tum(mask, maskedoff, src, vl); +vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m4_tum(vm, vd, vs2, vl); } -vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m8_tum(mask, maskedoff, src, vl); +vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m8_tum(vm, vd, vs2, vl); } -vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32mf2_tum(mask, maskedoff, src, vl); +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32mf2_tum(vm, vd, vs2, vl); } -vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m1_tum(mask, maskedoff, src, vl); +vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m1_tum(vm, vd, vs2, vl); } -vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m2_tum(mask, maskedoff, src, vl); +vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m2_tum(vm, vd, vs2, vl); } -vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m4_tum(mask, maskedoff, src, vl); +vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m4_tum(vm, vd, vs2, vl); } -vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m8_tum(mask, maskedoff, src, vl); +vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m8_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32mf2_tum(mask, maskedoff, src, vl); +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32mf2_tum(vm, vd, vs2, vl); } -vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m1_tum(mask, maskedoff, src, vl); +vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m1_tum(vm, vd, vs2, vl); } -vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m2_tum(mask, maskedoff, src, vl); +vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m2_tum(vm, vd, vs2, vl); } -vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m4_tum(mask, maskedoff, src, vl); +vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m4_tum(vm, vd, vs2, vl); } -vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m8_tum(mask, maskedoff, src, vl); +vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m8_tum(vm, vd, vs2, vl); } -vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m1_tum(mask, maskedoff, src, vl); +vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m1_tum(vm, vd, vs2, vl); } -vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m2_tum(mask, maskedoff, src, vl); +vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m2_tum(vm, vd, vs2, vl); } -vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m4_tum(mask, maskedoff, src, vl); +vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m4_tum(vm, vd, vs2, vl); } -vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m8_tum(mask, maskedoff, src, vl); +vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m8_tum(vm, vd, vs2, vl); } -vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m1_tum(mask, maskedoff, src, vl); +vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m1_tum(vm, vd, vs2, vl); } -vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m2_tum(mask, maskedoff, src, vl); +vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m2_tum(vm, vd, vs2, vl); } -vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m4_tum(mask, maskedoff, src, vl); +vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m4_tum(vm, vd, vs2, vl); } -vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m8_tum(mask, maskedoff, src, vl); +vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m8_tum(vm, vd, vs2, vl); } -vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16mf4_tumu(mask, maskedoff, src, vl); +vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16mf4_tumu(vm, vd, vs2, vl); } -vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16mf2_tumu(mask, maskedoff, src, vl); +vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16mf2_tumu(vm, vd, vs2, vl); } -vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m1_tumu(mask, maskedoff, src, vl); +vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m1_tumu(vm, vd, vs2, vl); } -vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m2_tumu(mask, maskedoff, src, vl); +vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m2_tumu(vm, vd, vs2, vl); } -vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m4_tumu(mask, maskedoff, src, vl); +vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m4_tumu(vm, vd, vs2, vl); } -vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m8_tumu(mask, maskedoff, src, vl); +vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m8_tumu(vm, vd, vs2, vl); } -vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16mf4_tumu(mask, maskedoff, src, vl); +vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16mf4_tumu(vm, vd, vs2, vl); } -vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16mf2_tumu(mask, maskedoff, src, vl); +vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16mf2_tumu(vm, vd, vs2, vl); } -vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m1_tumu(mask, maskedoff, src, vl); +vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m1_tumu(vm, vd, vs2, vl); } -vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m2_tumu(mask, maskedoff, src, vl); +vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m2_tumu(vm, vd, vs2, vl); } -vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m4_tumu(mask, maskedoff, src, vl); +vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m4_tumu(vm, vd, vs2, vl); } -vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m8_tumu(mask, maskedoff, src, vl); +vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m8_tumu(vm, vd, vs2, vl); } -vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32mf2_tumu(mask, maskedoff, src, vl); +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32mf2_tumu(vm, vd, vs2, vl); } -vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m1_tumu(mask, maskedoff, src, vl); +vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m1_tumu(vm, vd, vs2, vl); } -vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m2_tumu(mask, maskedoff, src, vl); +vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m2_tumu(vm, vd, vs2, vl); } -vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m4_tumu(mask, maskedoff, src, vl); +vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m4_tumu(vm, vd, vs2, vl); } -vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m8_tumu(mask, maskedoff, src, vl); +vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m8_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32mf2_tumu(mask, maskedoff, src, vl); +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32mf2_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m1_tumu(mask, maskedoff, src, vl); +vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m1_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m2_tumu(mask, maskedoff, src, vl); +vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m2_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m4_tumu(mask, maskedoff, src, vl); +vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m4_tumu(vm, vd, vs2, vl); } -vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m8_tumu(mask, maskedoff, src, vl); +vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m8_tumu(vm, vd, vs2, vl); } -vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m1_tumu(mask, maskedoff, src, vl); +vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m1_tumu(vm, vd, vs2, vl); } -vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m2_tumu(mask, maskedoff, src, vl); +vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m2_tumu(vm, vd, vs2, vl); } -vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m4_tumu(mask, maskedoff, src, vl); +vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m4_tumu(vm, vd, vs2, vl); } -vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m8_tumu(mask, maskedoff, src, vl); +vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m8_tumu(vm, vd, vs2, vl); } -vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m1_tumu(mask, maskedoff, src, vl); +vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m1_tumu(vm, vd, vs2, vl); } -vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m2_tumu(mask, maskedoff, src, vl); +vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m2_tumu(vm, vd, vs2, vl); } -vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m4_tumu(mask, maskedoff, src, vl); +vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m4_tumu(vm, vd, vs2, vl); } -vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m8_tumu(mask, maskedoff, src, vl); +vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m8_tumu(vm, vd, vs2, vl); } -vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16mf4_mu(mask, maskedoff, src, vl); +vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16mf4_mu(vm, vd, vs2, vl); } -vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16mf2_mu(mask, maskedoff, src, vl); +vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16mf2_mu(vm, vd, vs2, vl); } -vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m1_mu(mask, maskedoff, src, vl); +vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m1_mu(vm, vd, vs2, vl); } -vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m2_mu(mask, maskedoff, src, vl); +vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m2_mu(vm, vd, vs2, vl); } -vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m4_mu(mask, maskedoff, src, vl); +vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m4_mu(vm, vd, vs2, vl); } -vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m8_mu(mask, maskedoff, src, vl); +vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m8_mu(vm, vd, vs2, vl); } -vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16mf4_mu(mask, maskedoff, src, vl); +vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16mf4_mu(vm, vd, vs2, vl); } -vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16mf2_mu(mask, maskedoff, src, vl); +vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16mf2_mu(vm, vd, vs2, vl); } -vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m1_mu(mask, maskedoff, src, vl); +vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m1_mu(vm, vd, vs2, vl); } -vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m2_mu(mask, maskedoff, src, vl); +vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m2_mu(vm, vd, vs2, vl); } -vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m4_mu(mask, maskedoff, src, vl); +vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m4_mu(vm, vd, vs2, vl); } -vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m8_mu(mask, maskedoff, src, vl); +vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m8_mu(vm, vd, vs2, vl); } -vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32mf2_mu(mask, maskedoff, src, vl); +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32mf2_mu(vm, vd, vs2, vl); } -vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m1_mu(mask, maskedoff, src, vl); +vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m1_mu(vm, vd, vs2, vl); } -vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m2_mu(mask, maskedoff, src, vl); +vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m2_mu(vm, vd, vs2, vl); } -vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m4_mu(mask, maskedoff, src, vl); +vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m4_mu(vm, vd, vs2, vl); } -vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m8_mu(mask, maskedoff, src, vl); +vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m8_mu(vm, vd, vs2, vl); } -vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32mf2_mu(mask, maskedoff, src, vl); +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32mf2_mu(vm, vd, vs2, vl); } -vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m1_mu(mask, maskedoff, src, vl); +vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m1_mu(vm, vd, vs2, vl); } -vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m2_mu(mask, maskedoff, src, vl); +vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m2_mu(vm, vd, vs2, vl); } -vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m4_mu(mask, maskedoff, src, vl); +vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m4_mu(vm, vd, vs2, vl); } -vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m8_mu(mask, maskedoff, src, vl); +vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m8_mu(vm, vd, vs2, vl); } -vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m1_mu(mask, maskedoff, src, vl); +vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m1_mu(vm, vd, vs2, vl); } -vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m2_mu(mask, maskedoff, src, vl); +vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m2_mu(vm, vd, vs2, vl); } -vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m4_mu(mask, maskedoff, src, vl); +vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m4_mu(vm, vd, vs2, vl); } -vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m8_mu(mask, maskedoff, src, vl); +vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m8_mu(vm, vd, vs2, vl); } -vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m1_mu(mask, maskedoff, src, vl); +vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m1_mu(vm, vd, vs2, vl); } -vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m2_mu(mask, maskedoff, src, vl); +vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m2_mu(vm, vd, vs2, vl); } -vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m4_mu(mask, maskedoff, src, vl); +vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m4_mu(vm, vd, vs2, vl); } -vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m8_mu(mask, maskedoff, src, vl); +vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m8_mu(vm, vd, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfcvt\.rtz[ivxfswum.]*\s+} 120 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfdiv.c b/auto-generated/policy_funcs/gnu-api-tests/vfdiv.c index 72b3c7810..8712829fd 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfdiv.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfdiv.c @@ -6,964 +6,964 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfdiv_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16mf4_tu(maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfdiv_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16mf4_tu(vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfdiv_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16mf4_tu(maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfdiv_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16mf4_tu(vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfdiv_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16mf2_tu(maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfdiv_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16mf2_tu(vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfdiv_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16mf2_tu(maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfdiv_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16mf2_tu(vd, vs2, rs1, vl); } -vfloat16m1_t test_vfdiv_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m1_tu(maskedoff, op1, op2, vl); +vfloat16m1_t test_vfdiv_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfdiv_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m1_tu(maskedoff, op1, op2, vl); +vfloat16m1_t test_vfdiv_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m1_tu(vd, vs2, rs1, vl); } -vfloat16m2_t test_vfdiv_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m2_tu(maskedoff, op1, op2, vl); +vfloat16m2_t test_vfdiv_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m2_tu(vd, vs2, vs1, vl); } -vfloat16m2_t test_vfdiv_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m2_tu(maskedoff, op1, op2, vl); +vfloat16m2_t test_vfdiv_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m2_tu(vd, vs2, rs1, vl); } -vfloat16m4_t test_vfdiv_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m4_tu(maskedoff, op1, op2, vl); +vfloat16m4_t test_vfdiv_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m4_tu(vd, vs2, vs1, vl); } -vfloat16m4_t test_vfdiv_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m4_tu(maskedoff, op1, op2, vl); +vfloat16m4_t test_vfdiv_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m4_tu(vd, vs2, rs1, vl); } -vfloat16m8_t test_vfdiv_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m8_tu(maskedoff, op1, op2, vl); +vfloat16m8_t test_vfdiv_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m8_tu(vd, vs2, vs1, vl); } -vfloat16m8_t test_vfdiv_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m8_tu(maskedoff, op1, op2, vl); +vfloat16m8_t test_vfdiv_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m8_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfdiv_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32mf2_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfdiv_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32mf2_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfdiv_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32mf2_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfdiv_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32mf2_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfdiv_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m1_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfdiv_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfdiv_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m1_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfdiv_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m1_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfdiv_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m2_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfdiv_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m2_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vfdiv_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m2_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfdiv_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m2_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfdiv_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m4_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfdiv_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m4_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vfdiv_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m4_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfdiv_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m4_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfdiv_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m8_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfdiv_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m8_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vfdiv_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m8_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfdiv_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m8_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfdiv_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m1_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfdiv_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfdiv_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m1_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfdiv_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m1_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfdiv_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m2_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfdiv_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m2_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vfdiv_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m2_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfdiv_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m2_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfdiv_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m4_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfdiv_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m4_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vfdiv_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m4_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfdiv_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m4_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfdiv_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m8_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfdiv_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m8_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vfdiv_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m8_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfdiv_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m8_tu(vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfdiv_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16mf4_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfdiv_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16mf4_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfdiv_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16mf4_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfdiv_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16mf4_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfdiv_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfdiv_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfdiv_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfdiv_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfdiv_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m1_tum(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfdiv_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfdiv_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m1_tum(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfdiv_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m1_tum(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfdiv_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m2_tum(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfdiv_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m2_tum(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfdiv_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m2_tum(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfdiv_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfdiv_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m4_tum(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfdiv_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m4_tum(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfdiv_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m4_tum(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfdiv_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m4_tum(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfdiv_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m8_tum(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfdiv_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m8_tum(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfdiv_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m8_tum(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfdiv_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m8_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfdiv_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfdiv_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfdiv_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfdiv_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfdiv_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m1_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfdiv_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfdiv_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m1_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfdiv_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m1_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfdiv_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m2_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfdiv_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m2_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfdiv_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m2_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfdiv_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfdiv_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m4_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfdiv_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m4_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfdiv_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m4_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfdiv_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m4_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfdiv_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m8_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfdiv_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m8_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfdiv_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m8_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfdiv_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m8_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfdiv_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m1_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfdiv_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfdiv_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m1_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfdiv_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m1_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfdiv_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m2_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfdiv_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m2_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfdiv_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m2_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfdiv_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m2_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfdiv_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m4_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfdiv_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m4_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfdiv_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m4_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfdiv_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m4_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfdiv_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m8_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfdiv_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m8_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfdiv_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m8_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfdiv_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m8_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfdiv_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16mf4_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfdiv_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16mf4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfdiv_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16mf4_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfdiv_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16mf4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfdiv_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfdiv_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfdiv_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfdiv_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfdiv_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfdiv_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfdiv_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfdiv_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfdiv_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfdiv_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfdiv_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfdiv_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfdiv_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfdiv_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfdiv_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfdiv_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfdiv_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfdiv_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfdiv_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfdiv_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfdiv_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfdiv_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfdiv_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfdiv_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfdiv_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfdiv_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfdiv_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfdiv_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfdiv_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfdiv_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfdiv_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfdiv_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfdiv_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfdiv_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfdiv_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfdiv_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfdiv_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfdiv_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfdiv_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfdiv_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfdiv_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfdiv_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfdiv_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfdiv_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfdiv_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfdiv_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfdiv_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfdiv_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfdiv_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfdiv_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfdiv_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfdiv_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfdiv_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfdiv_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfdiv_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfdiv_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfdiv_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16mf4_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfdiv_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16mf4_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfdiv_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16mf4_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfdiv_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16mf4_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfdiv_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfdiv_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfdiv_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfdiv_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfdiv_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m1_mu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfdiv_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m1_mu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfdiv_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m1_mu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfdiv_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m1_mu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfdiv_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m2_mu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfdiv_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m2_mu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfdiv_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m2_mu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfdiv_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfdiv_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m4_mu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfdiv_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m4_mu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfdiv_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m4_mu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfdiv_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m4_mu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfdiv_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m8_mu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfdiv_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m8_mu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfdiv_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m8_mu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfdiv_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m8_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfdiv_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfdiv_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfdiv_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfdiv_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfdiv_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m1_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfdiv_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m1_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfdiv_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m1_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfdiv_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m1_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfdiv_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m2_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfdiv_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m2_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfdiv_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m2_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfdiv_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfdiv_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m4_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfdiv_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m4_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfdiv_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m4_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfdiv_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m4_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfdiv_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m8_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfdiv_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m8_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfdiv_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m8_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfdiv_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m8_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfdiv_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m1_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfdiv_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m1_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfdiv_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m1_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfdiv_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m1_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfdiv_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m2_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfdiv_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m2_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfdiv_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m2_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfdiv_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m2_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfdiv_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m4_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfdiv_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m4_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfdiv_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m4_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfdiv_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m4_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfdiv_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m8_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfdiv_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m8_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfdiv_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m8_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfdiv_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m8_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfdiv_vv_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16mf4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfdiv_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16mf4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfdiv_vf_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16mf4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfdiv_vf_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16mf4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfdiv_vv_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16mf2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfdiv_vv_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfdiv_vf_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16mf2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfdiv_vf_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfdiv_vv_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfdiv_vv_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfdiv_vf_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfdiv_vf_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfdiv_vv_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfdiv_vv_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfdiv_vf_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfdiv_vf_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfdiv_vv_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfdiv_vv_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfdiv_vf_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfdiv_vf_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfdiv_vv_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfdiv_vv_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfdiv_vf_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfdiv_vf_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfdiv_vv_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32mf2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfdiv_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfdiv_vf_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32mf2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfdiv_vf_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfdiv_vv_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfdiv_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfdiv_vf_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfdiv_vf_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfdiv_vv_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfdiv_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfdiv_vf_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfdiv_vf_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfdiv_vv_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfdiv_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfdiv_vf_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfdiv_vf_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfdiv_vv_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfdiv_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfdiv_vf_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfdiv_vf_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfdiv_vv_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfdiv_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfdiv_vf_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfdiv_vf_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfdiv_vv_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfdiv_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfdiv_vf_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfdiv_vf_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfdiv_vv_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfdiv_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfdiv_vf_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfdiv_vf_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfdiv_vv_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfdiv_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfdiv_vf_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfdiv_vf_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfdiv_vv_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16mf4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfdiv_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16mf4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfdiv_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16mf4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfdiv_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16mf4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfdiv_vv_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16mf2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfdiv_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16mf2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfdiv_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16mf2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfdiv_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16mf2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfdiv_vv_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfdiv_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfdiv_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfdiv_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfdiv_vv_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfdiv_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfdiv_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfdiv_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfdiv_vv_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfdiv_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfdiv_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfdiv_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfdiv_vv_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfdiv_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m8_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfdiv_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfdiv_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfdiv_vv_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32mf2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfdiv_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32mf2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfdiv_vf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32mf2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfdiv_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32mf2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfdiv_vv_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfdiv_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfdiv_vf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfdiv_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfdiv_vv_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfdiv_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfdiv_vf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfdiv_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfdiv_vv_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfdiv_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfdiv_vf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfdiv_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfdiv_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfdiv_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m8_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfdiv_vf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfdiv_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfdiv_vv_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfdiv_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfdiv_vf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfdiv_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfdiv_vv_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfdiv_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfdiv_vf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfdiv_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfdiv_vv_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfdiv_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfdiv_vf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfdiv_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfdiv_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfdiv_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m8_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfdiv_vf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfdiv_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfdiv_vv_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16mf4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfdiv_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16mf4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfdiv_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16mf4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfdiv_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16mf4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfdiv_vv_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16mf2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfdiv_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16mf2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfdiv_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16mf2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfdiv_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16mf2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfdiv_vv_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfdiv_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m1_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfdiv_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfdiv_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfdiv_vv_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfdiv_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfdiv_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfdiv_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfdiv_vv_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfdiv_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfdiv_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfdiv_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfdiv_vv_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfdiv_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m8_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfdiv_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfdiv_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfdiv_vv_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32mf2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfdiv_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32mf2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfdiv_vf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32mf2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfdiv_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32mf2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfdiv_vv_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfdiv_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m1_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfdiv_vf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfdiv_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfdiv_vv_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfdiv_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfdiv_vf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfdiv_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfdiv_vv_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfdiv_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfdiv_vf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfdiv_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfdiv_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfdiv_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m8_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfdiv_vf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfdiv_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfdiv_vv_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfdiv_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m1_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfdiv_vf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfdiv_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfdiv_vv_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfdiv_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfdiv_vf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfdiv_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfdiv_vv_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfdiv_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfdiv_vf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfdiv_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfdiv_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfdiv_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m8_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfdiv_vf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfdiv_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfdiv_vv_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16mf4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfdiv_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16mf4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfdiv_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16mf4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfdiv_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16mf4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfdiv_vv_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16mf2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfdiv_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16mf2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfdiv_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16mf2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfdiv_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16mf2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfdiv_vv_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfdiv_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m1_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfdiv_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfdiv_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfdiv_vv_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfdiv_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfdiv_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfdiv_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfdiv_vv_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfdiv_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfdiv_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfdiv_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfdiv_vv_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfdiv_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f16m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfdiv_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_vf_f16m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfdiv_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f16m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfdiv_vv_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32mf2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfdiv_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32mf2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfdiv_vf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32mf2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfdiv_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32mf2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfdiv_vv_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfdiv_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m1_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfdiv_vf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfdiv_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfdiv_vv_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfdiv_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfdiv_vf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfdiv_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfdiv_vv_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfdiv_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfdiv_vf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfdiv_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfdiv_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfdiv_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfdiv_vf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_vf_f32m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfdiv_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfdiv_vv_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfdiv_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m1_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfdiv_vf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfdiv_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfdiv_vv_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfdiv_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfdiv_vf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfdiv_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfdiv_vv_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfdiv_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfdiv_vf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfdiv_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfdiv_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfdiv_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfdiv_vv_f64m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfdiv_vf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_vf_f64m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfdiv_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_vf_f64m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfdiv\.[ivxfswum.]+\s+} 240 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfmacc.c b/auto-generated/policy_funcs/gnu-api-tests/vfmacc.c index ec1057040..b39395b48 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfmacc.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfmacc.c @@ -126,364 +126,364 @@ vfloat64m8_t test_vfmacc_vf_f64m8_tu(vfloat64m8_t vd, float64_t rs1, vfloat64m8_ return __riscv_vfmacc_vf_f64m8_tu(vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmacc_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16mf4_tum(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfmacc_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16mf4_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmacc_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16mf4_tum(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfmacc_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16mf4_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmacc_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16mf2_tum(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfmacc_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmacc_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16mf2_tum(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfmacc_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16mf2_tum(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmacc_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16m1_tum(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfmacc_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16m1_tum(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmacc_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16m1_tum(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfmacc_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16m1_tum(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmacc_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16m2_tum(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfmacc_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16m2_tum(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmacc_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16m2_tum(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfmacc_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16m2_tum(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmacc_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16m4_tum(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfmacc_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16m4_tum(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmacc_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16m4_tum(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfmacc_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16m4_tum(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmacc_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16m8_tum(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfmacc_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16m8_tum(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmacc_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16m8_tum(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfmacc_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16m8_tum(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmacc_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f32mf2_tum(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfmacc_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f32mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmacc_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f32mf2_tum(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfmacc_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f32mf2_tum(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmacc_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f32m1_tum(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfmacc_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f32m1_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmacc_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f32m1_tum(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfmacc_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f32m1_tum(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmacc_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f32m2_tum(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfmacc_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f32m2_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmacc_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f32m2_tum(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfmacc_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f32m2_tum(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmacc_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f32m4_tum(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfmacc_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f32m4_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmacc_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f32m4_tum(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfmacc_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f32m4_tum(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmacc_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f32m8_tum(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfmacc_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f32m8_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmacc_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f32m8_tum(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfmacc_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f32m8_tum(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmacc_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f64m1_tum(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfmacc_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f64m1_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmacc_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f64m1_tum(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfmacc_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f64m1_tum(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmacc_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f64m2_tum(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfmacc_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f64m2_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmacc_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f64m2_tum(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfmacc_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f64m2_tum(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmacc_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f64m4_tum(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfmacc_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f64m4_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmacc_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f64m4_tum(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfmacc_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f64m4_tum(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmacc_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f64m8_tum(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfmacc_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f64m8_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmacc_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f64m8_tum(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfmacc_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f64m8_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmacc_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16mf4_tumu(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfmacc_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16mf4_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmacc_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16mf4_tumu(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfmacc_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16mf4_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmacc_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16mf2_tumu(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfmacc_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmacc_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16mf2_tumu(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfmacc_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16mf2_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmacc_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16m1_tumu(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfmacc_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmacc_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16m1_tumu(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfmacc_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmacc_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16m2_tumu(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfmacc_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmacc_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16m2_tumu(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfmacc_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmacc_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16m4_tumu(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfmacc_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmacc_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16m4_tumu(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfmacc_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmacc_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16m8_tumu(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfmacc_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmacc_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16m8_tumu(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfmacc_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmacc_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f32mf2_tumu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfmacc_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f32mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmacc_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f32mf2_tumu(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfmacc_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f32mf2_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmacc_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f32m1_tumu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfmacc_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f32m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmacc_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f32m1_tumu(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfmacc_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f32m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmacc_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f32m2_tumu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfmacc_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f32m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmacc_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f32m2_tumu(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfmacc_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f32m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmacc_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f32m4_tumu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfmacc_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f32m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmacc_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f32m4_tumu(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfmacc_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f32m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmacc_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f32m8_tumu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfmacc_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f32m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmacc_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f32m8_tumu(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfmacc_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f32m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmacc_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f64m1_tumu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfmacc_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f64m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmacc_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f64m1_tumu(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfmacc_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f64m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmacc_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f64m2_tumu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfmacc_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f64m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmacc_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f64m2_tumu(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfmacc_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f64m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmacc_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f64m4_tumu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfmacc_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f64m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmacc_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f64m4_tumu(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfmacc_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f64m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmacc_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f64m8_tumu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfmacc_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f64m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmacc_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f64m8_tumu(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfmacc_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f64m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmacc_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16mf4_mu(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfmacc_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16mf4_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmacc_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16mf4_mu(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfmacc_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16mf4_mu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmacc_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16mf2_mu(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfmacc_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmacc_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16mf2_mu(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfmacc_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16mf2_mu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmacc_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16m1_mu(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfmacc_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16m1_mu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmacc_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16m1_mu(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfmacc_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16m1_mu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmacc_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16m2_mu(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfmacc_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16m2_mu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmacc_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16m2_mu(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfmacc_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16m2_mu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmacc_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16m4_mu(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfmacc_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16m4_mu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmacc_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16m4_mu(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfmacc_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16m4_mu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmacc_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16m8_mu(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfmacc_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16m8_mu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmacc_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16m8_mu(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfmacc_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16m8_mu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmacc_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f32mf2_mu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfmacc_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f32mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmacc_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f32mf2_mu(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfmacc_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f32mf2_mu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmacc_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f32m1_mu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfmacc_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f32m1_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmacc_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f32m1_mu(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfmacc_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f32m1_mu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmacc_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f32m2_mu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfmacc_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f32m2_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmacc_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f32m2_mu(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfmacc_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f32m2_mu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmacc_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f32m4_mu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfmacc_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f32m4_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmacc_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f32m4_mu(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfmacc_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f32m4_mu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmacc_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f32m8_mu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfmacc_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f32m8_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmacc_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f32m8_mu(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfmacc_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f32m8_mu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmacc_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f64m1_mu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfmacc_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f64m1_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmacc_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f64m1_mu(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfmacc_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f64m1_mu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmacc_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f64m2_mu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfmacc_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f64m2_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmacc_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f64m2_mu(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfmacc_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f64m2_mu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmacc_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f64m4_mu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfmacc_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f64m4_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmacc_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f64m4_mu(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfmacc_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f64m4_mu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmacc_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f64m8_mu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfmacc_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f64m8_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmacc_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f64m8_mu(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfmacc_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f64m8_mu(vm, vd, rs1, vs2, vl); } vfloat16mf4_t test_vfmacc_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -606,364 +606,364 @@ vfloat64m8_t test_vfmacc_vf_f64m8_rm_tu(vfloat64m8_t vd, float64_t rs1, vfloat64 return __riscv_vfmacc_vf_f64m8_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmacc_vv_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16mf4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmacc_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16mf4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmacc_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16mf4_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmacc_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16mf4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmacc_vv_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16mf2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmacc_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmacc_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16mf2_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmacc_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16mf2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmacc_vv_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16m1_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmacc_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmacc_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16m1_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmacc_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmacc_vv_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16m2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmacc_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmacc_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16m2_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmacc_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmacc_vv_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16m4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmacc_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmacc_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16m4_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmacc_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmacc_vv_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmacc_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmacc_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16m8_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmacc_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmacc_vv_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f32mf2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmacc_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmacc_vf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f32mf2_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmacc_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f32mf2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmacc_vv_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f32m1_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmacc_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f32m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmacc_vf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f32m1_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmacc_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f32m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmacc_vv_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f32m2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmacc_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f32m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmacc_vf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f32m2_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmacc_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f32m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmacc_vv_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f32m4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmacc_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f32m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmacc_vf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f32m4_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmacc_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f32m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmacc_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f32m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmacc_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f32m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmacc_vf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f32m8_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmacc_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f32m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmacc_vv_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f64m1_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmacc_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f64m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmacc_vf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f64m1_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmacc_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f64m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmacc_vv_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f64m2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmacc_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f64m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmacc_vf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f64m2_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmacc_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f64m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmacc_vv_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f64m4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmacc_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f64m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmacc_vf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f64m4_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmacc_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f64m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmacc_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f64m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmacc_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f64m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmacc_vf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f64m8_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmacc_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f64m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmacc_vv_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16mf4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmacc_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16mf4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmacc_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16mf4_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmacc_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16mf4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmacc_vv_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16mf2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmacc_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmacc_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16mf2_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmacc_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmacc_vv_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16m1_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmacc_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmacc_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16m1_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmacc_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmacc_vv_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16m2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmacc_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmacc_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16m2_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmacc_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmacc_vv_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16m4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmacc_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmacc_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16m4_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmacc_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmacc_vv_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmacc_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmacc_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16m8_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmacc_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmacc_vv_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f32mf2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmacc_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmacc_vf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f32mf2_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmacc_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f32mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmacc_vv_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f32m1_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmacc_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmacc_vf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f32m1_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmacc_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f32m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmacc_vv_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f32m2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmacc_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmacc_vf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f32m2_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmacc_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f32m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmacc_vv_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f32m4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmacc_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmacc_vf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f32m4_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmacc_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f32m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmacc_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f32m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmacc_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmacc_vf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f32m8_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmacc_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f32m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmacc_vv_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f64m1_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmacc_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f64m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmacc_vf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f64m1_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmacc_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f64m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmacc_vv_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f64m2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmacc_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f64m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmacc_vf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f64m2_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmacc_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f64m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmacc_vv_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f64m4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmacc_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f64m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmacc_vf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f64m4_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmacc_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f64m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmacc_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f64m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmacc_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f64m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmacc_vf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f64m8_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmacc_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f64m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmacc_vv_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16mf4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmacc_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16mf4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmacc_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16mf4_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmacc_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16mf4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmacc_vv_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16mf2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmacc_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmacc_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16mf2_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmacc_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16mf2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmacc_vv_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16m1_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmacc_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmacc_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16m1_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmacc_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmacc_vv_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16m2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmacc_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmacc_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16m2_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmacc_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmacc_vv_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16m4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmacc_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmacc_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16m4_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmacc_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmacc_vv_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmacc_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmacc_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16m8_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmacc_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f16m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmacc_vv_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f32mf2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmacc_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f32mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmacc_vf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f32mf2_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmacc_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f32mf2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmacc_vv_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f32m1_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmacc_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f32m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmacc_vf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f32m1_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmacc_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f32m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmacc_vv_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f32m2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmacc_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f32m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmacc_vf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f32m2_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmacc_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f32m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmacc_vv_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f32m4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmacc_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f32m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmacc_vf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f32m4_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmacc_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f32m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmacc_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f32m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmacc_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f32m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmacc_vf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f32m8_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmacc_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f32m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmacc_vv_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f64m1_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmacc_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f64m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmacc_vf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f64m1_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmacc_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f64m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmacc_vv_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f64m2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmacc_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f64m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmacc_vf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f64m2_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmacc_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f64m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmacc_vv_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f64m4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmacc_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f64m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmacc_vf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f64m4_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmacc_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f64m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmacc_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f64m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmacc_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f64m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmacc_vf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f64m8_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmacc_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmacc_vf_f64m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfmacc\.[ivxfswum.]+\s+} 240 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfmadd.c b/auto-generated/policy_funcs/gnu-api-tests/vfmadd.c index 871a3c2cf..a00444771 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfmadd.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfmadd.c @@ -126,364 +126,364 @@ vfloat64m8_t test_vfmadd_vf_f64m8_tu(vfloat64m8_t vd, float64_t rs1, vfloat64m8_ return __riscv_vfmadd_vf_f64m8_tu(vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmadd_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16mf4_tum(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfmadd_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16mf4_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmadd_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16mf4_tum(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfmadd_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16mf4_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmadd_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16mf2_tum(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfmadd_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmadd_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16mf2_tum(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfmadd_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16mf2_tum(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmadd_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16m1_tum(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfmadd_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16m1_tum(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmadd_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16m1_tum(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfmadd_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16m1_tum(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmadd_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16m2_tum(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfmadd_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16m2_tum(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmadd_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16m2_tum(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfmadd_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16m2_tum(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmadd_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16m4_tum(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfmadd_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16m4_tum(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmadd_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16m4_tum(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfmadd_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16m4_tum(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmadd_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16m8_tum(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfmadd_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16m8_tum(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmadd_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16m8_tum(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfmadd_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16m8_tum(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmadd_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f32mf2_tum(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfmadd_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f32mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmadd_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f32mf2_tum(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfmadd_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f32mf2_tum(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmadd_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f32m1_tum(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfmadd_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f32m1_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmadd_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f32m1_tum(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfmadd_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f32m1_tum(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmadd_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f32m2_tum(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfmadd_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f32m2_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmadd_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f32m2_tum(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfmadd_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f32m2_tum(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmadd_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f32m4_tum(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfmadd_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f32m4_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmadd_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f32m4_tum(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfmadd_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f32m4_tum(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmadd_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f32m8_tum(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfmadd_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f32m8_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmadd_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f32m8_tum(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfmadd_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f32m8_tum(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmadd_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f64m1_tum(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfmadd_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f64m1_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmadd_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f64m1_tum(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfmadd_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f64m1_tum(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmadd_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f64m2_tum(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfmadd_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f64m2_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmadd_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f64m2_tum(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfmadd_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f64m2_tum(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmadd_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f64m4_tum(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfmadd_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f64m4_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmadd_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f64m4_tum(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfmadd_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f64m4_tum(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmadd_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f64m8_tum(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfmadd_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f64m8_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmadd_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f64m8_tum(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfmadd_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f64m8_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmadd_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16mf4_tumu(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfmadd_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16mf4_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmadd_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16mf4_tumu(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfmadd_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16mf4_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmadd_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16mf2_tumu(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfmadd_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmadd_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16mf2_tumu(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfmadd_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16mf2_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmadd_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16m1_tumu(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfmadd_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmadd_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16m1_tumu(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfmadd_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmadd_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16m2_tumu(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfmadd_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmadd_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16m2_tumu(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfmadd_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmadd_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16m4_tumu(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfmadd_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmadd_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16m4_tumu(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfmadd_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmadd_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16m8_tumu(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfmadd_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmadd_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16m8_tumu(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfmadd_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmadd_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f32mf2_tumu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfmadd_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f32mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmadd_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f32mf2_tumu(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfmadd_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f32mf2_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmadd_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f32m1_tumu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfmadd_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f32m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmadd_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f32m1_tumu(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfmadd_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f32m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmadd_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f32m2_tumu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfmadd_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f32m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmadd_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f32m2_tumu(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfmadd_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f32m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmadd_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f32m4_tumu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfmadd_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f32m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmadd_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f32m4_tumu(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfmadd_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f32m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmadd_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f32m8_tumu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfmadd_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f32m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmadd_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f32m8_tumu(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfmadd_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f32m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmadd_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f64m1_tumu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfmadd_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f64m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmadd_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f64m1_tumu(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfmadd_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f64m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmadd_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f64m2_tumu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfmadd_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f64m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmadd_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f64m2_tumu(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfmadd_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f64m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmadd_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f64m4_tumu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfmadd_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f64m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmadd_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f64m4_tumu(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfmadd_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f64m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmadd_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f64m8_tumu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfmadd_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f64m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmadd_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f64m8_tumu(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfmadd_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f64m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmadd_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16mf4_mu(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfmadd_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16mf4_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmadd_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16mf4_mu(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfmadd_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16mf4_mu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmadd_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16mf2_mu(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfmadd_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmadd_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16mf2_mu(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfmadd_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16mf2_mu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmadd_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16m1_mu(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfmadd_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16m1_mu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmadd_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16m1_mu(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfmadd_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16m1_mu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmadd_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16m2_mu(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfmadd_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16m2_mu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmadd_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16m2_mu(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfmadd_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16m2_mu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmadd_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16m4_mu(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfmadd_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16m4_mu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmadd_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16m4_mu(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfmadd_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16m4_mu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmadd_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16m8_mu(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfmadd_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16m8_mu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmadd_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16m8_mu(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfmadd_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16m8_mu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmadd_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f32mf2_mu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfmadd_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f32mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmadd_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f32mf2_mu(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfmadd_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f32mf2_mu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmadd_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f32m1_mu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfmadd_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f32m1_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmadd_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f32m1_mu(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfmadd_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f32m1_mu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmadd_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f32m2_mu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfmadd_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f32m2_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmadd_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f32m2_mu(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfmadd_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f32m2_mu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmadd_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f32m4_mu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfmadd_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f32m4_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmadd_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f32m4_mu(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfmadd_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f32m4_mu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmadd_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f32m8_mu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfmadd_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f32m8_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmadd_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f32m8_mu(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfmadd_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f32m8_mu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmadd_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f64m1_mu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfmadd_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f64m1_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmadd_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f64m1_mu(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfmadd_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f64m1_mu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmadd_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f64m2_mu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfmadd_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f64m2_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmadd_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f64m2_mu(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfmadd_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f64m2_mu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmadd_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f64m4_mu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfmadd_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f64m4_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmadd_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f64m4_mu(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfmadd_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f64m4_mu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmadd_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f64m8_mu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfmadd_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f64m8_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmadd_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f64m8_mu(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfmadd_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f64m8_mu(vm, vd, rs1, vs2, vl); } vfloat16mf4_t test_vfmadd_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -606,364 +606,364 @@ vfloat64m8_t test_vfmadd_vf_f64m8_rm_tu(vfloat64m8_t vd, float64_t rs1, vfloat64 return __riscv_vfmadd_vf_f64m8_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmadd_vv_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16mf4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmadd_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16mf4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmadd_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16mf4_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmadd_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16mf4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmadd_vv_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16mf2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmadd_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmadd_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16mf2_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmadd_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16mf2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmadd_vv_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16m1_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmadd_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmadd_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16m1_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmadd_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmadd_vv_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16m2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmadd_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmadd_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16m2_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmadd_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmadd_vv_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16m4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmadd_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmadd_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16m4_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmadd_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmadd_vv_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmadd_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmadd_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16m8_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmadd_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmadd_vv_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f32mf2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmadd_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmadd_vf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f32mf2_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmadd_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f32mf2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmadd_vv_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f32m1_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmadd_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f32m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmadd_vf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f32m1_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmadd_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f32m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmadd_vv_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f32m2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmadd_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f32m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmadd_vf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f32m2_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmadd_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f32m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmadd_vv_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f32m4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmadd_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f32m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmadd_vf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f32m4_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmadd_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f32m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmadd_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f32m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmadd_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f32m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmadd_vf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f32m8_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmadd_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f32m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmadd_vv_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f64m1_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmadd_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f64m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmadd_vf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f64m1_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmadd_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f64m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmadd_vv_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f64m2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmadd_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f64m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmadd_vf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f64m2_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmadd_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f64m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmadd_vv_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f64m4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmadd_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f64m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmadd_vf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f64m4_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmadd_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f64m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmadd_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f64m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmadd_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f64m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmadd_vf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f64m8_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmadd_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f64m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmadd_vv_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16mf4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmadd_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16mf4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmadd_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16mf4_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmadd_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16mf4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmadd_vv_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16mf2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmadd_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmadd_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16mf2_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmadd_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmadd_vv_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16m1_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmadd_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmadd_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16m1_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmadd_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmadd_vv_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16m2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmadd_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmadd_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16m2_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmadd_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmadd_vv_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16m4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmadd_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmadd_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16m4_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmadd_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmadd_vv_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmadd_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmadd_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16m8_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmadd_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmadd_vv_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f32mf2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmadd_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmadd_vf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f32mf2_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmadd_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f32mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmadd_vv_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f32m1_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmadd_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmadd_vf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f32m1_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmadd_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f32m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmadd_vv_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f32m2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmadd_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmadd_vf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f32m2_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmadd_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f32m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmadd_vv_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f32m4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmadd_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmadd_vf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f32m4_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmadd_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f32m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmadd_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f32m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmadd_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmadd_vf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f32m8_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmadd_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f32m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmadd_vv_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f64m1_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmadd_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f64m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmadd_vf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f64m1_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmadd_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f64m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmadd_vv_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f64m2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmadd_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f64m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmadd_vf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f64m2_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmadd_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f64m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmadd_vv_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f64m4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmadd_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f64m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmadd_vf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f64m4_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmadd_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f64m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmadd_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f64m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmadd_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f64m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmadd_vf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f64m8_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmadd_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f64m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmadd_vv_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16mf4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmadd_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16mf4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmadd_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16mf4_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmadd_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16mf4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmadd_vv_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16mf2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmadd_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmadd_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16mf2_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmadd_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16mf2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmadd_vv_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16m1_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmadd_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmadd_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16m1_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmadd_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmadd_vv_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16m2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmadd_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmadd_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16m2_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmadd_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmadd_vv_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16m4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmadd_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmadd_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16m4_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmadd_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmadd_vv_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmadd_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmadd_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16m8_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmadd_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f16m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmadd_vv_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f32mf2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmadd_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f32mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmadd_vf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f32mf2_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmadd_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f32mf2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmadd_vv_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f32m1_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmadd_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f32m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmadd_vf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f32m1_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmadd_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f32m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmadd_vv_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f32m2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmadd_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f32m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmadd_vf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f32m2_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmadd_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f32m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmadd_vv_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f32m4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmadd_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f32m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmadd_vf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f32m4_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmadd_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f32m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmadd_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f32m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmadd_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f32m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmadd_vf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f32m8_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmadd_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f32m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmadd_vv_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f64m1_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmadd_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f64m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmadd_vf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f64m1_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmadd_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f64m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmadd_vv_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f64m2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmadd_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f64m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmadd_vf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f64m2_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmadd_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f64m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmadd_vv_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f64m4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmadd_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f64m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmadd_vf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f64m4_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmadd_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f64m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmadd_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f64m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmadd_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f64m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmadd_vf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f64m8_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmadd_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmadd_vf_f64m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfmadd\.[ivxfswum.]+\s+} 240 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfmax.c b/auto-generated/policy_funcs/gnu-api-tests/vfmax.c index 29ad3010e..ae1fb5307 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfmax.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfmax.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfmax_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmax_vv_f16mf4_tu(maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmax_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmax_vv_f16mf4_tu(vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfmax_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_vf_f16mf4_tu(maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmax_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_vf_f16mf4_tu(vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfmax_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmax_vv_f16mf2_tu(maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmax_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmax_vv_f16mf2_tu(vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfmax_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_vf_f16mf2_tu(maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmax_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_vf_f16mf2_tu(vd, vs2, rs1, vl); } -vfloat16m1_t test_vfmax_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmax_vv_f16m1_tu(maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmax_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmax_vv_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfmax_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_vf_f16m1_tu(maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmax_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_vf_f16m1_tu(vd, vs2, rs1, vl); } -vfloat16m2_t test_vfmax_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmax_vv_f16m2_tu(maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmax_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmax_vv_f16m2_tu(vd, vs2, vs1, vl); } -vfloat16m2_t test_vfmax_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_vf_f16m2_tu(maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmax_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_vf_f16m2_tu(vd, vs2, rs1, vl); } -vfloat16m4_t test_vfmax_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmax_vv_f16m4_tu(maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmax_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmax_vv_f16m4_tu(vd, vs2, vs1, vl); } -vfloat16m4_t test_vfmax_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_vf_f16m4_tu(maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmax_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_vf_f16m4_tu(vd, vs2, rs1, vl); } -vfloat16m8_t test_vfmax_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmax_vv_f16m8_tu(maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmax_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmax_vv_f16m8_tu(vd, vs2, vs1, vl); } -vfloat16m8_t test_vfmax_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_vf_f16m8_tu(maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmax_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_vf_f16m8_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfmax_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmax_vv_f32mf2_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmax_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmax_vv_f32mf2_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfmax_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_vf_f32mf2_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmax_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_vf_f32mf2_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfmax_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmax_vv_f32m1_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmax_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmax_vv_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfmax_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_vf_f32m1_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmax_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_vf_f32m1_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfmax_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmax_vv_f32m2_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmax_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmax_vv_f32m2_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vfmax_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_vf_f32m2_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmax_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_vf_f32m2_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfmax_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmax_vv_f32m4_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmax_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmax_vv_f32m4_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vfmax_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_vf_f32m4_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmax_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_vf_f32m4_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfmax_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmax_vv_f32m8_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmax_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmax_vv_f32m8_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vfmax_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_vf_f32m8_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmax_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_vf_f32m8_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfmax_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmax_vv_f64m1_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmax_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmax_vv_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfmax_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax_vf_f64m1_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmax_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax_vf_f64m1_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfmax_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmax_vv_f64m2_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmax_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmax_vv_f64m2_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vfmax_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax_vf_f64m2_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmax_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax_vf_f64m2_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfmax_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmax_vv_f64m4_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmax_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmax_vv_f64m4_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vfmax_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax_vf_f64m4_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmax_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax_vf_f64m4_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfmax_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmax_vv_f64m8_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmax_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmax_vv_f64m8_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vfmax_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax_vf_f64m8_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmax_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax_vf_f64m8_tu(vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfmax_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmax_vv_f16mf4_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmax_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmax_vv_f16mf4_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfmax_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_vf_f16mf4_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmax_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_vf_f16mf4_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfmax_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmax_vv_f16mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmax_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmax_vv_f16mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfmax_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_vf_f16mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmax_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_vf_f16mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfmax_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmax_vv_f16m1_tum(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmax_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmax_vv_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfmax_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_vf_f16m1_tum(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmax_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_vf_f16m1_tum(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfmax_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmax_vv_f16m2_tum(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmax_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmax_vv_f16m2_tum(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfmax_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_vf_f16m2_tum(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmax_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_vf_f16m2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfmax_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmax_vv_f16m4_tum(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmax_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmax_vv_f16m4_tum(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfmax_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_vf_f16m4_tum(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmax_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_vf_f16m4_tum(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfmax_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmax_vv_f16m8_tum(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmax_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmax_vv_f16m8_tum(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfmax_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_vf_f16m8_tum(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmax_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_vf_f16m8_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfmax_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmax_vv_f32mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmax_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmax_vv_f32mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfmax_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_vf_f32mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmax_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_vf_f32mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfmax_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmax_vv_f32m1_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmax_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmax_vv_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfmax_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_vf_f32m1_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmax_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_vf_f32m1_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfmax_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmax_vv_f32m2_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmax_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmax_vv_f32m2_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfmax_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_vf_f32m2_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmax_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_vf_f32m2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfmax_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmax_vv_f32m4_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmax_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmax_vv_f32m4_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfmax_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_vf_f32m4_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmax_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_vf_f32m4_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfmax_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmax_vv_f32m8_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmax_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmax_vv_f32m8_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfmax_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_vf_f32m8_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmax_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_vf_f32m8_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfmax_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmax_vv_f64m1_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmax_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmax_vv_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfmax_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax_vf_f64m1_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmax_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax_vf_f64m1_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfmax_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmax_vv_f64m2_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmax_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmax_vv_f64m2_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfmax_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax_vf_f64m2_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmax_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax_vf_f64m2_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfmax_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmax_vv_f64m4_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmax_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmax_vv_f64m4_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfmax_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax_vf_f64m4_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmax_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax_vf_f64m4_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfmax_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmax_vv_f64m8_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmax_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmax_vv_f64m8_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfmax_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax_vf_f64m8_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmax_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax_vf_f64m8_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfmax_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmax_vv_f16mf4_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmax_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmax_vv_f16mf4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfmax_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_vf_f16mf4_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmax_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_vf_f16mf4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfmax_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmax_vv_f16mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmax_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmax_vv_f16mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfmax_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_vf_f16mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmax_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_vf_f16mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfmax_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmax_vv_f16m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmax_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmax_vv_f16m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfmax_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_vf_f16m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmax_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_vf_f16m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfmax_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmax_vv_f16m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmax_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmax_vv_f16m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfmax_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_vf_f16m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmax_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_vf_f16m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfmax_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmax_vv_f16m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmax_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmax_vv_f16m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfmax_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_vf_f16m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmax_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_vf_f16m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfmax_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmax_vv_f16m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmax_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmax_vv_f16m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfmax_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_vf_f16m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmax_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_vf_f16m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfmax_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmax_vv_f32mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmax_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmax_vv_f32mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfmax_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_vf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmax_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_vf_f32mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfmax_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmax_vv_f32m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmax_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmax_vv_f32m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfmax_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_vf_f32m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmax_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_vf_f32m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfmax_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmax_vv_f32m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmax_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmax_vv_f32m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfmax_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_vf_f32m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmax_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_vf_f32m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfmax_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmax_vv_f32m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmax_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmax_vv_f32m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfmax_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_vf_f32m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmax_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_vf_f32m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfmax_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmax_vv_f32m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmax_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmax_vv_f32m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfmax_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_vf_f32m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmax_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_vf_f32m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfmax_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmax_vv_f64m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmax_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmax_vv_f64m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfmax_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax_vf_f64m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmax_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax_vf_f64m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfmax_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmax_vv_f64m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmax_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmax_vv_f64m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfmax_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax_vf_f64m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmax_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax_vf_f64m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfmax_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmax_vv_f64m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmax_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmax_vv_f64m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfmax_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax_vf_f64m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmax_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax_vf_f64m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfmax_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmax_vv_f64m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmax_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmax_vv_f64m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfmax_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax_vf_f64m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmax_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax_vf_f64m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfmax_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmax_vv_f16mf4_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmax_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmax_vv_f16mf4_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfmax_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_vf_f16mf4_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmax_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_vf_f16mf4_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfmax_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmax_vv_f16mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmax_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmax_vv_f16mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfmax_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_vf_f16mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmax_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_vf_f16mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfmax_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmax_vv_f16m1_mu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmax_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmax_vv_f16m1_mu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfmax_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_vf_f16m1_mu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmax_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_vf_f16m1_mu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfmax_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmax_vv_f16m2_mu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmax_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmax_vv_f16m2_mu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfmax_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_vf_f16m2_mu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmax_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_vf_f16m2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfmax_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmax_vv_f16m4_mu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmax_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmax_vv_f16m4_mu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfmax_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_vf_f16m4_mu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmax_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_vf_f16m4_mu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfmax_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmax_vv_f16m8_mu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmax_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmax_vv_f16m8_mu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfmax_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_vf_f16m8_mu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmax_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_vf_f16m8_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfmax_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmax_vv_f32mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmax_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmax_vv_f32mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfmax_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_vf_f32mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmax_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_vf_f32mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfmax_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmax_vv_f32m1_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmax_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmax_vv_f32m1_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfmax_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_vf_f32m1_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmax_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_vf_f32m1_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfmax_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmax_vv_f32m2_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmax_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmax_vv_f32m2_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfmax_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_vf_f32m2_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmax_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_vf_f32m2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfmax_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmax_vv_f32m4_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmax_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmax_vv_f32m4_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfmax_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_vf_f32m4_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmax_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_vf_f32m4_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfmax_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmax_vv_f32m8_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmax_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmax_vv_f32m8_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfmax_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_vf_f32m8_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmax_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_vf_f32m8_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfmax_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmax_vv_f64m1_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmax_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmax_vv_f64m1_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfmax_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax_vf_f64m1_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmax_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax_vf_f64m1_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfmax_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmax_vv_f64m2_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmax_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmax_vv_f64m2_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfmax_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax_vf_f64m2_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmax_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax_vf_f64m2_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfmax_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmax_vv_f64m4_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmax_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmax_vv_f64m4_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfmax_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax_vf_f64m4_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmax_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax_vf_f64m4_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfmax_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmax_vv_f64m8_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmax_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmax_vv_f64m8_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfmax_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax_vf_f64m8_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmax_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax_vf_f64m8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfmax\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfmerge.c b/auto-generated/policy_funcs/gnu-api-tests/vfmerge.c index e9ea67f3b..3cbe7f92b 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfmerge.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfmerge.c @@ -6,64 +6,64 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfmerge_vfm_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, vbool64_t mask, size_t vl) { - return __riscv_vfmerge_vfm_f16mf4_tu(maskedoff, op1, op2, mask, vl); +vfloat16mf4_t test_vfmerge_vfm_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vfmerge_vfm_f16mf4_tu(vd, vs2, rs1, v0, vl); } -vfloat16mf2_t test_vfmerge_vfm_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, vbool32_t mask, size_t vl) { - return __riscv_vfmerge_vfm_f16mf2_tu(maskedoff, op1, op2, mask, vl); +vfloat16mf2_t test_vfmerge_vfm_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vfmerge_vfm_f16mf2_tu(vd, vs2, rs1, v0, vl); } -vfloat16m1_t test_vfmerge_vfm_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, vbool16_t mask, size_t vl) { - return __riscv_vfmerge_vfm_f16m1_tu(maskedoff, op1, op2, mask, vl); +vfloat16m1_t test_vfmerge_vfm_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vfmerge_vfm_f16m1_tu(vd, vs2, rs1, v0, vl); } -vfloat16m2_t test_vfmerge_vfm_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, vbool8_t mask, size_t vl) { - return __riscv_vfmerge_vfm_f16m2_tu(maskedoff, op1, op2, mask, vl); +vfloat16m2_t test_vfmerge_vfm_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vfmerge_vfm_f16m2_tu(vd, vs2, rs1, v0, vl); } -vfloat16m4_t test_vfmerge_vfm_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, vbool4_t mask, size_t vl) { - return __riscv_vfmerge_vfm_f16m4_tu(maskedoff, op1, op2, mask, vl); +vfloat16m4_t test_vfmerge_vfm_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vfmerge_vfm_f16m4_tu(vd, vs2, rs1, v0, vl); } -vfloat16m8_t test_vfmerge_vfm_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, vbool2_t mask, size_t vl) { - return __riscv_vfmerge_vfm_f16m8_tu(maskedoff, op1, op2, mask, vl); +vfloat16m8_t test_vfmerge_vfm_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vfmerge_vfm_f16m8_tu(vd, vs2, rs1, v0, vl); } -vfloat32mf2_t test_vfmerge_vfm_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, vbool64_t mask, size_t vl) { - return __riscv_vfmerge_vfm_f32mf2_tu(maskedoff, op1, op2, mask, vl); +vfloat32mf2_t test_vfmerge_vfm_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vfmerge_vfm_f32mf2_tu(vd, vs2, rs1, v0, vl); } -vfloat32m1_t test_vfmerge_vfm_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, vbool32_t mask, size_t vl) { - return __riscv_vfmerge_vfm_f32m1_tu(maskedoff, op1, op2, mask, vl); +vfloat32m1_t test_vfmerge_vfm_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vfmerge_vfm_f32m1_tu(vd, vs2, rs1, v0, vl); } -vfloat32m2_t test_vfmerge_vfm_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, vbool16_t mask, size_t vl) { - return __riscv_vfmerge_vfm_f32m2_tu(maskedoff, op1, op2, mask, vl); +vfloat32m2_t test_vfmerge_vfm_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vfmerge_vfm_f32m2_tu(vd, vs2, rs1, v0, vl); } -vfloat32m4_t test_vfmerge_vfm_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, vbool8_t mask, size_t vl) { - return __riscv_vfmerge_vfm_f32m4_tu(maskedoff, op1, op2, mask, vl); +vfloat32m4_t test_vfmerge_vfm_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vfmerge_vfm_f32m4_tu(vd, vs2, rs1, v0, vl); } -vfloat32m8_t test_vfmerge_vfm_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, vbool4_t mask, size_t vl) { - return __riscv_vfmerge_vfm_f32m8_tu(maskedoff, op1, op2, mask, vl); +vfloat32m8_t test_vfmerge_vfm_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vfmerge_vfm_f32m8_tu(vd, vs2, rs1, v0, vl); } -vfloat64m1_t test_vfmerge_vfm_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, vbool64_t mask, size_t vl) { - return __riscv_vfmerge_vfm_f64m1_tu(maskedoff, op1, op2, mask, vl); +vfloat64m1_t test_vfmerge_vfm_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vfmerge_vfm_f64m1_tu(vd, vs2, rs1, v0, vl); } -vfloat64m2_t test_vfmerge_vfm_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, vbool32_t mask, size_t vl) { - return __riscv_vfmerge_vfm_f64m2_tu(maskedoff, op1, op2, mask, vl); +vfloat64m2_t test_vfmerge_vfm_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vfmerge_vfm_f64m2_tu(vd, vs2, rs1, v0, vl); } -vfloat64m4_t test_vfmerge_vfm_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, vbool16_t mask, size_t vl) { - return __riscv_vfmerge_vfm_f64m4_tu(maskedoff, op1, op2, mask, vl); +vfloat64m4_t test_vfmerge_vfm_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vfmerge_vfm_f64m4_tu(vd, vs2, rs1, v0, vl); } -vfloat64m8_t test_vfmerge_vfm_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, vbool8_t mask, size_t vl) { - return __riscv_vfmerge_vfm_f64m8_tu(maskedoff, op1, op2, mask, vl); +vfloat64m8_t test_vfmerge_vfm_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vfmerge_vfm_f64m8_tu(vd, vs2, rs1, v0, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfmerge\.[ivxfswum.]+\s+} 15 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfmin.c b/auto-generated/policy_funcs/gnu-api-tests/vfmin.c index 26cf5c0c7..4b02f04db 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfmin.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfmin.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfmin_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmin_vv_f16mf4_tu(maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmin_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmin_vv_f16mf4_tu(vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfmin_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_vf_f16mf4_tu(maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmin_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_vf_f16mf4_tu(vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfmin_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmin_vv_f16mf2_tu(maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmin_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmin_vv_f16mf2_tu(vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfmin_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_vf_f16mf2_tu(maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmin_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_vf_f16mf2_tu(vd, vs2, rs1, vl); } -vfloat16m1_t test_vfmin_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmin_vv_f16m1_tu(maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmin_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmin_vv_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfmin_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_vf_f16m1_tu(maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmin_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_vf_f16m1_tu(vd, vs2, rs1, vl); } -vfloat16m2_t test_vfmin_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmin_vv_f16m2_tu(maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmin_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmin_vv_f16m2_tu(vd, vs2, vs1, vl); } -vfloat16m2_t test_vfmin_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_vf_f16m2_tu(maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmin_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_vf_f16m2_tu(vd, vs2, rs1, vl); } -vfloat16m4_t test_vfmin_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmin_vv_f16m4_tu(maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmin_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmin_vv_f16m4_tu(vd, vs2, vs1, vl); } -vfloat16m4_t test_vfmin_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_vf_f16m4_tu(maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmin_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_vf_f16m4_tu(vd, vs2, rs1, vl); } -vfloat16m8_t test_vfmin_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmin_vv_f16m8_tu(maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmin_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmin_vv_f16m8_tu(vd, vs2, vs1, vl); } -vfloat16m8_t test_vfmin_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_vf_f16m8_tu(maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmin_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_vf_f16m8_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfmin_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmin_vv_f32mf2_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmin_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmin_vv_f32mf2_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfmin_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_vf_f32mf2_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmin_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_vf_f32mf2_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfmin_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmin_vv_f32m1_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmin_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmin_vv_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfmin_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_vf_f32m1_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmin_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_vf_f32m1_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfmin_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmin_vv_f32m2_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmin_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmin_vv_f32m2_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vfmin_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_vf_f32m2_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmin_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_vf_f32m2_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfmin_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmin_vv_f32m4_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmin_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmin_vv_f32m4_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vfmin_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_vf_f32m4_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmin_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_vf_f32m4_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfmin_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmin_vv_f32m8_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmin_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmin_vv_f32m8_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vfmin_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_vf_f32m8_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmin_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_vf_f32m8_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfmin_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmin_vv_f64m1_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmin_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmin_vv_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfmin_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin_vf_f64m1_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmin_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin_vf_f64m1_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfmin_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmin_vv_f64m2_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmin_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmin_vv_f64m2_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vfmin_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin_vf_f64m2_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmin_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin_vf_f64m2_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfmin_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmin_vv_f64m4_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmin_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmin_vv_f64m4_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vfmin_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin_vf_f64m4_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmin_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin_vf_f64m4_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfmin_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmin_vv_f64m8_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmin_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmin_vv_f64m8_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vfmin_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin_vf_f64m8_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmin_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin_vf_f64m8_tu(vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfmin_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmin_vv_f16mf4_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmin_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmin_vv_f16mf4_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfmin_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_vf_f16mf4_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmin_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_vf_f16mf4_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfmin_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmin_vv_f16mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmin_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmin_vv_f16mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfmin_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_vf_f16mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmin_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_vf_f16mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfmin_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmin_vv_f16m1_tum(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmin_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmin_vv_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfmin_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_vf_f16m1_tum(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmin_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_vf_f16m1_tum(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfmin_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmin_vv_f16m2_tum(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmin_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmin_vv_f16m2_tum(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfmin_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_vf_f16m2_tum(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmin_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_vf_f16m2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfmin_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmin_vv_f16m4_tum(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmin_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmin_vv_f16m4_tum(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfmin_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_vf_f16m4_tum(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmin_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_vf_f16m4_tum(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfmin_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmin_vv_f16m8_tum(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmin_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmin_vv_f16m8_tum(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfmin_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_vf_f16m8_tum(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmin_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_vf_f16m8_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfmin_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmin_vv_f32mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmin_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmin_vv_f32mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfmin_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_vf_f32mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmin_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_vf_f32mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfmin_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmin_vv_f32m1_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmin_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmin_vv_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfmin_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_vf_f32m1_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmin_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_vf_f32m1_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfmin_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmin_vv_f32m2_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmin_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmin_vv_f32m2_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfmin_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_vf_f32m2_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmin_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_vf_f32m2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfmin_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmin_vv_f32m4_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmin_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmin_vv_f32m4_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfmin_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_vf_f32m4_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmin_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_vf_f32m4_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfmin_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmin_vv_f32m8_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmin_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmin_vv_f32m8_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfmin_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_vf_f32m8_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmin_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_vf_f32m8_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfmin_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmin_vv_f64m1_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmin_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmin_vv_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfmin_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin_vf_f64m1_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmin_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin_vf_f64m1_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfmin_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmin_vv_f64m2_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmin_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmin_vv_f64m2_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfmin_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin_vf_f64m2_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmin_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin_vf_f64m2_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfmin_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmin_vv_f64m4_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmin_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmin_vv_f64m4_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfmin_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin_vf_f64m4_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmin_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin_vf_f64m4_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfmin_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmin_vv_f64m8_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmin_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmin_vv_f64m8_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfmin_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin_vf_f64m8_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmin_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin_vf_f64m8_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfmin_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmin_vv_f16mf4_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmin_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmin_vv_f16mf4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfmin_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_vf_f16mf4_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmin_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_vf_f16mf4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfmin_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmin_vv_f16mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmin_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmin_vv_f16mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfmin_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_vf_f16mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmin_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_vf_f16mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfmin_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmin_vv_f16m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmin_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmin_vv_f16m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfmin_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_vf_f16m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmin_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_vf_f16m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfmin_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmin_vv_f16m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmin_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmin_vv_f16m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfmin_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_vf_f16m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmin_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_vf_f16m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfmin_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmin_vv_f16m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmin_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmin_vv_f16m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfmin_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_vf_f16m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmin_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_vf_f16m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfmin_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmin_vv_f16m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmin_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmin_vv_f16m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfmin_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_vf_f16m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmin_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_vf_f16m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfmin_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmin_vv_f32mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmin_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmin_vv_f32mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfmin_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_vf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmin_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_vf_f32mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfmin_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmin_vv_f32m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmin_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmin_vv_f32m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfmin_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_vf_f32m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmin_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_vf_f32m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfmin_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmin_vv_f32m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmin_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmin_vv_f32m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfmin_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_vf_f32m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmin_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_vf_f32m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfmin_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmin_vv_f32m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmin_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmin_vv_f32m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfmin_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_vf_f32m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmin_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_vf_f32m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfmin_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmin_vv_f32m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmin_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmin_vv_f32m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfmin_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_vf_f32m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmin_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_vf_f32m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfmin_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmin_vv_f64m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmin_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmin_vv_f64m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfmin_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin_vf_f64m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmin_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin_vf_f64m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfmin_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmin_vv_f64m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmin_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmin_vv_f64m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfmin_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin_vf_f64m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmin_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin_vf_f64m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfmin_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmin_vv_f64m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmin_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmin_vv_f64m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfmin_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin_vf_f64m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmin_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin_vf_f64m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfmin_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmin_vv_f64m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmin_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmin_vv_f64m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfmin_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin_vf_f64m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmin_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin_vf_f64m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfmin_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmin_vv_f16mf4_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmin_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmin_vv_f16mf4_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfmin_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_vf_f16mf4_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmin_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_vf_f16mf4_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfmin_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmin_vv_f16mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmin_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmin_vv_f16mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfmin_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_vf_f16mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmin_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_vf_f16mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfmin_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmin_vv_f16m1_mu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmin_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmin_vv_f16m1_mu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfmin_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_vf_f16m1_mu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmin_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_vf_f16m1_mu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfmin_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmin_vv_f16m2_mu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmin_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmin_vv_f16m2_mu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfmin_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_vf_f16m2_mu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmin_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_vf_f16m2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfmin_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmin_vv_f16m4_mu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmin_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmin_vv_f16m4_mu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfmin_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_vf_f16m4_mu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmin_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_vf_f16m4_mu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfmin_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmin_vv_f16m8_mu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmin_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmin_vv_f16m8_mu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfmin_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_vf_f16m8_mu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmin_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_vf_f16m8_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfmin_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmin_vv_f32mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmin_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmin_vv_f32mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfmin_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_vf_f32mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmin_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_vf_f32mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfmin_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmin_vv_f32m1_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmin_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmin_vv_f32m1_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfmin_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_vf_f32m1_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmin_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_vf_f32m1_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfmin_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmin_vv_f32m2_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmin_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmin_vv_f32m2_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfmin_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_vf_f32m2_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmin_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_vf_f32m2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfmin_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmin_vv_f32m4_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmin_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmin_vv_f32m4_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfmin_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_vf_f32m4_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmin_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_vf_f32m4_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfmin_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmin_vv_f32m8_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmin_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmin_vv_f32m8_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfmin_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_vf_f32m8_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmin_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_vf_f32m8_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfmin_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmin_vv_f64m1_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmin_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmin_vv_f64m1_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfmin_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin_vf_f64m1_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmin_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin_vf_f64m1_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfmin_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmin_vv_f64m2_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmin_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmin_vv_f64m2_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfmin_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin_vf_f64m2_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmin_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin_vf_f64m2_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfmin_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmin_vv_f64m4_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmin_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmin_vv_f64m4_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfmin_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin_vf_f64m4_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmin_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin_vf_f64m4_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfmin_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmin_vv_f64m8_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmin_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmin_vv_f64m8_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfmin_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin_vf_f64m8_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmin_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin_vf_f64m8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfmin\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfmsac.c b/auto-generated/policy_funcs/gnu-api-tests/vfmsac.c index f05e785e8..e9addbab8 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfmsac.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfmsac.c @@ -126,364 +126,364 @@ vfloat64m8_t test_vfmsac_vf_f64m8_tu(vfloat64m8_t vd, float64_t rs1, vfloat64m8_ return __riscv_vfmsac_vf_f64m8_tu(vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmsac_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16mf4_tum(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfmsac_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16mf4_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmsac_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16mf4_tum(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfmsac_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16mf4_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmsac_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16mf2_tum(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfmsac_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmsac_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16mf2_tum(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfmsac_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16mf2_tum(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmsac_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16m1_tum(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfmsac_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16m1_tum(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmsac_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16m1_tum(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfmsac_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16m1_tum(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmsac_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16m2_tum(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfmsac_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16m2_tum(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmsac_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16m2_tum(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfmsac_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16m2_tum(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmsac_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16m4_tum(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfmsac_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16m4_tum(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmsac_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16m4_tum(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfmsac_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16m4_tum(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmsac_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16m8_tum(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfmsac_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16m8_tum(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmsac_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16m8_tum(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfmsac_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16m8_tum(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmsac_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f32mf2_tum(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfmsac_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f32mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmsac_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f32mf2_tum(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfmsac_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f32mf2_tum(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmsac_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f32m1_tum(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfmsac_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f32m1_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmsac_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f32m1_tum(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfmsac_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f32m1_tum(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmsac_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f32m2_tum(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfmsac_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f32m2_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmsac_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f32m2_tum(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfmsac_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f32m2_tum(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmsac_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f32m4_tum(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfmsac_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f32m4_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmsac_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f32m4_tum(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfmsac_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f32m4_tum(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmsac_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f32m8_tum(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfmsac_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f32m8_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmsac_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f32m8_tum(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfmsac_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f32m8_tum(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmsac_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f64m1_tum(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfmsac_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f64m1_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmsac_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f64m1_tum(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfmsac_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f64m1_tum(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmsac_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f64m2_tum(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfmsac_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f64m2_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmsac_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f64m2_tum(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfmsac_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f64m2_tum(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmsac_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f64m4_tum(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfmsac_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f64m4_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmsac_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f64m4_tum(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfmsac_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f64m4_tum(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmsac_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f64m8_tum(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfmsac_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f64m8_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmsac_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f64m8_tum(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfmsac_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f64m8_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmsac_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16mf4_tumu(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfmsac_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16mf4_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmsac_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16mf4_tumu(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfmsac_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16mf4_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmsac_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16mf2_tumu(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfmsac_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmsac_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16mf2_tumu(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfmsac_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16mf2_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmsac_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16m1_tumu(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfmsac_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmsac_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16m1_tumu(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfmsac_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmsac_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16m2_tumu(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfmsac_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmsac_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16m2_tumu(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfmsac_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmsac_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16m4_tumu(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfmsac_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmsac_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16m4_tumu(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfmsac_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmsac_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16m8_tumu(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfmsac_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmsac_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16m8_tumu(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfmsac_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmsac_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f32mf2_tumu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfmsac_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f32mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmsac_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f32mf2_tumu(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfmsac_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f32mf2_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmsac_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f32m1_tumu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfmsac_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f32m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmsac_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f32m1_tumu(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfmsac_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f32m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmsac_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f32m2_tumu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfmsac_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f32m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmsac_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f32m2_tumu(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfmsac_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f32m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmsac_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f32m4_tumu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfmsac_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f32m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmsac_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f32m4_tumu(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfmsac_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f32m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmsac_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f32m8_tumu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfmsac_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f32m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmsac_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f32m8_tumu(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfmsac_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f32m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmsac_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f64m1_tumu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfmsac_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f64m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmsac_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f64m1_tumu(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfmsac_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f64m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmsac_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f64m2_tumu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfmsac_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f64m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmsac_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f64m2_tumu(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfmsac_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f64m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmsac_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f64m4_tumu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfmsac_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f64m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmsac_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f64m4_tumu(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfmsac_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f64m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmsac_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f64m8_tumu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfmsac_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f64m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmsac_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f64m8_tumu(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfmsac_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f64m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmsac_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16mf4_mu(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfmsac_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16mf4_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmsac_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16mf4_mu(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfmsac_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16mf4_mu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmsac_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16mf2_mu(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfmsac_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmsac_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16mf2_mu(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfmsac_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16mf2_mu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmsac_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16m1_mu(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfmsac_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16m1_mu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmsac_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16m1_mu(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfmsac_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16m1_mu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmsac_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16m2_mu(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfmsac_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16m2_mu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmsac_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16m2_mu(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfmsac_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16m2_mu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmsac_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16m4_mu(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfmsac_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16m4_mu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmsac_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16m4_mu(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfmsac_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16m4_mu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmsac_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16m8_mu(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfmsac_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16m8_mu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmsac_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16m8_mu(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfmsac_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16m8_mu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmsac_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f32mf2_mu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfmsac_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f32mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmsac_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f32mf2_mu(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfmsac_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f32mf2_mu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmsac_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f32m1_mu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfmsac_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f32m1_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmsac_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f32m1_mu(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfmsac_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f32m1_mu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmsac_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f32m2_mu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfmsac_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f32m2_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmsac_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f32m2_mu(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfmsac_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f32m2_mu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmsac_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f32m4_mu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfmsac_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f32m4_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmsac_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f32m4_mu(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfmsac_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f32m4_mu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmsac_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f32m8_mu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfmsac_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f32m8_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmsac_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f32m8_mu(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfmsac_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f32m8_mu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmsac_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f64m1_mu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfmsac_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f64m1_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmsac_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f64m1_mu(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfmsac_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f64m1_mu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmsac_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f64m2_mu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfmsac_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f64m2_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmsac_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f64m2_mu(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfmsac_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f64m2_mu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmsac_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f64m4_mu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfmsac_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f64m4_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmsac_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f64m4_mu(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfmsac_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f64m4_mu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmsac_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f64m8_mu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfmsac_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f64m8_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmsac_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f64m8_mu(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfmsac_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f64m8_mu(vm, vd, rs1, vs2, vl); } vfloat16mf4_t test_vfmsac_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -606,364 +606,364 @@ vfloat64m8_t test_vfmsac_vf_f64m8_rm_tu(vfloat64m8_t vd, float64_t rs1, vfloat64 return __riscv_vfmsac_vf_f64m8_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmsac_vv_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16mf4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmsac_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16mf4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmsac_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16mf4_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmsac_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16mf4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmsac_vv_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16mf2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmsac_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmsac_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16mf2_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmsac_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16mf2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmsac_vv_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16m1_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmsac_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmsac_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16m1_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmsac_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsac_vv_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16m2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmsac_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsac_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16m2_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmsac_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsac_vv_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16m4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmsac_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsac_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16m4_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmsac_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsac_vv_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmsac_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsac_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16m8_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmsac_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmsac_vv_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f32mf2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmsac_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmsac_vf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f32mf2_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmsac_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f32mf2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmsac_vv_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f32m1_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmsac_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f32m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmsac_vf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f32m1_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmsac_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f32m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsac_vv_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f32m2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmsac_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f32m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsac_vf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f32m2_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmsac_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f32m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsac_vv_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f32m4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmsac_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f32m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsac_vf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f32m4_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmsac_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f32m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsac_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f32m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmsac_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f32m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsac_vf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f32m8_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmsac_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f32m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsac_vv_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f64m1_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmsac_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f64m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsac_vf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f64m1_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmsac_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f64m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsac_vv_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f64m2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmsac_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f64m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsac_vf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f64m2_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmsac_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f64m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsac_vv_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f64m4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmsac_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f64m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsac_vf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f64m4_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmsac_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f64m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsac_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f64m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmsac_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f64m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsac_vf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f64m8_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmsac_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f64m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmsac_vv_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16mf4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmsac_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16mf4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmsac_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16mf4_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmsac_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16mf4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmsac_vv_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16mf2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmsac_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmsac_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16mf2_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmsac_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmsac_vv_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16m1_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmsac_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmsac_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16m1_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmsac_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsac_vv_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16m2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmsac_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsac_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16m2_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmsac_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsac_vv_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16m4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmsac_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsac_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16m4_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmsac_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsac_vv_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmsac_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsac_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16m8_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmsac_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmsac_vv_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f32mf2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmsac_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmsac_vf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f32mf2_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmsac_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f32mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmsac_vv_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f32m1_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmsac_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmsac_vf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f32m1_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmsac_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f32m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsac_vv_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f32m2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmsac_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsac_vf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f32m2_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmsac_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f32m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsac_vv_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f32m4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmsac_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsac_vf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f32m4_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmsac_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f32m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsac_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f32m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmsac_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsac_vf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f32m8_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmsac_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f32m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsac_vv_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f64m1_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmsac_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f64m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsac_vf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f64m1_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmsac_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f64m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsac_vv_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f64m2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmsac_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f64m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsac_vf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f64m2_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmsac_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f64m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsac_vv_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f64m4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmsac_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f64m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsac_vf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f64m4_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmsac_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f64m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsac_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f64m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmsac_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f64m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsac_vf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f64m8_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmsac_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f64m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmsac_vv_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16mf4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmsac_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16mf4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmsac_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16mf4_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmsac_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16mf4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmsac_vv_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16mf2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmsac_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmsac_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16mf2_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmsac_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16mf2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmsac_vv_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16m1_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmsac_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmsac_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16m1_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmsac_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsac_vv_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16m2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmsac_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsac_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16m2_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmsac_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsac_vv_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16m4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmsac_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsac_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16m4_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmsac_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsac_vv_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmsac_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsac_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16m8_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmsac_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f16m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmsac_vv_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f32mf2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmsac_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f32mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmsac_vf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f32mf2_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmsac_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f32mf2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmsac_vv_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f32m1_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmsac_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f32m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmsac_vf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f32m1_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmsac_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f32m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsac_vv_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f32m2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmsac_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f32m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsac_vf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f32m2_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmsac_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f32m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsac_vv_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f32m4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmsac_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f32m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsac_vf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f32m4_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmsac_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f32m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsac_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f32m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmsac_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f32m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsac_vf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f32m8_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmsac_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f32m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsac_vv_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f64m1_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmsac_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f64m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsac_vf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f64m1_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmsac_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f64m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsac_vv_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f64m2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmsac_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f64m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsac_vf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f64m2_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmsac_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f64m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsac_vv_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f64m4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmsac_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f64m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsac_vf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f64m4_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmsac_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f64m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsac_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f64m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmsac_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f64m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsac_vf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f64m8_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmsac_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsac_vf_f64m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfmsac\.[ivxfswum.]+\s+} 240 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfmsub.c b/auto-generated/policy_funcs/gnu-api-tests/vfmsub.c index e2426f197..49250fe7e 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfmsub.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfmsub.c @@ -126,364 +126,364 @@ vfloat64m8_t test_vfmsub_vf_f64m8_tu(vfloat64m8_t vd, float64_t rs1, vfloat64m8_ return __riscv_vfmsub_vf_f64m8_tu(vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmsub_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16mf4_tum(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfmsub_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16mf4_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmsub_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16mf4_tum(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfmsub_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16mf4_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmsub_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16mf2_tum(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfmsub_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmsub_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16mf2_tum(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfmsub_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16mf2_tum(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmsub_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16m1_tum(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfmsub_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16m1_tum(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmsub_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16m1_tum(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfmsub_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16m1_tum(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmsub_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16m2_tum(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfmsub_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16m2_tum(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmsub_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16m2_tum(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfmsub_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16m2_tum(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmsub_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16m4_tum(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfmsub_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16m4_tum(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmsub_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16m4_tum(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfmsub_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16m4_tum(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmsub_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16m8_tum(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfmsub_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16m8_tum(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmsub_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16m8_tum(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfmsub_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16m8_tum(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmsub_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f32mf2_tum(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfmsub_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f32mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmsub_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f32mf2_tum(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfmsub_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f32mf2_tum(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmsub_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f32m1_tum(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfmsub_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f32m1_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmsub_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f32m1_tum(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfmsub_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f32m1_tum(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmsub_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f32m2_tum(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfmsub_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f32m2_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmsub_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f32m2_tum(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfmsub_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f32m2_tum(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmsub_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f32m4_tum(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfmsub_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f32m4_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmsub_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f32m4_tum(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfmsub_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f32m4_tum(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmsub_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f32m8_tum(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfmsub_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f32m8_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmsub_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f32m8_tum(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfmsub_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f32m8_tum(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmsub_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f64m1_tum(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfmsub_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f64m1_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmsub_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f64m1_tum(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfmsub_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f64m1_tum(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmsub_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f64m2_tum(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfmsub_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f64m2_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmsub_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f64m2_tum(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfmsub_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f64m2_tum(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmsub_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f64m4_tum(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfmsub_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f64m4_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmsub_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f64m4_tum(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfmsub_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f64m4_tum(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmsub_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f64m8_tum(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfmsub_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f64m8_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmsub_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f64m8_tum(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfmsub_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f64m8_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmsub_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16mf4_tumu(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfmsub_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16mf4_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmsub_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16mf4_tumu(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfmsub_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16mf4_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmsub_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16mf2_tumu(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfmsub_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmsub_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16mf2_tumu(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfmsub_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16mf2_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmsub_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16m1_tumu(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfmsub_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmsub_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16m1_tumu(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfmsub_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmsub_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16m2_tumu(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfmsub_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmsub_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16m2_tumu(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfmsub_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmsub_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16m4_tumu(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfmsub_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmsub_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16m4_tumu(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfmsub_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmsub_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16m8_tumu(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfmsub_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmsub_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16m8_tumu(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfmsub_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmsub_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f32mf2_tumu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfmsub_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f32mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmsub_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f32mf2_tumu(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfmsub_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f32mf2_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmsub_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f32m1_tumu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfmsub_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f32m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmsub_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f32m1_tumu(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfmsub_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f32m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmsub_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f32m2_tumu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfmsub_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f32m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmsub_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f32m2_tumu(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfmsub_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f32m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmsub_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f32m4_tumu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfmsub_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f32m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmsub_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f32m4_tumu(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfmsub_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f32m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmsub_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f32m8_tumu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfmsub_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f32m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmsub_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f32m8_tumu(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfmsub_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f32m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmsub_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f64m1_tumu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfmsub_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f64m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmsub_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f64m1_tumu(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfmsub_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f64m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmsub_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f64m2_tumu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfmsub_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f64m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmsub_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f64m2_tumu(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfmsub_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f64m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmsub_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f64m4_tumu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfmsub_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f64m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmsub_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f64m4_tumu(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfmsub_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f64m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmsub_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f64m8_tumu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfmsub_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f64m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmsub_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f64m8_tumu(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfmsub_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f64m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmsub_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16mf4_mu(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfmsub_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16mf4_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmsub_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16mf4_mu(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfmsub_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16mf4_mu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmsub_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16mf2_mu(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfmsub_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmsub_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16mf2_mu(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfmsub_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16mf2_mu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmsub_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16m1_mu(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfmsub_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16m1_mu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmsub_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16m1_mu(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfmsub_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16m1_mu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmsub_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16m2_mu(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfmsub_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16m2_mu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmsub_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16m2_mu(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfmsub_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16m2_mu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmsub_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16m4_mu(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfmsub_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16m4_mu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmsub_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16m4_mu(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfmsub_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16m4_mu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmsub_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16m8_mu(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfmsub_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16m8_mu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmsub_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16m8_mu(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfmsub_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16m8_mu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmsub_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f32mf2_mu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfmsub_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f32mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmsub_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f32mf2_mu(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfmsub_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f32mf2_mu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmsub_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f32m1_mu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfmsub_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f32m1_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmsub_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f32m1_mu(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfmsub_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f32m1_mu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmsub_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f32m2_mu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfmsub_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f32m2_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmsub_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f32m2_mu(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfmsub_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f32m2_mu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmsub_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f32m4_mu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfmsub_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f32m4_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmsub_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f32m4_mu(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfmsub_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f32m4_mu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmsub_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f32m8_mu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfmsub_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f32m8_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmsub_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f32m8_mu(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfmsub_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f32m8_mu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmsub_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f64m1_mu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfmsub_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f64m1_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmsub_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f64m1_mu(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfmsub_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f64m1_mu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmsub_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f64m2_mu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfmsub_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f64m2_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmsub_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f64m2_mu(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfmsub_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f64m2_mu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmsub_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f64m4_mu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfmsub_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f64m4_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmsub_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f64m4_mu(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfmsub_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f64m4_mu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmsub_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f64m8_mu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfmsub_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f64m8_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmsub_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f64m8_mu(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfmsub_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f64m8_mu(vm, vd, rs1, vs2, vl); } vfloat16mf4_t test_vfmsub_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -606,364 +606,364 @@ vfloat64m8_t test_vfmsub_vf_f64m8_rm_tu(vfloat64m8_t vd, float64_t rs1, vfloat64 return __riscv_vfmsub_vf_f64m8_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmsub_vv_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16mf4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmsub_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16mf4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmsub_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16mf4_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmsub_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16mf4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmsub_vv_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16mf2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmsub_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmsub_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16mf2_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmsub_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16mf2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmsub_vv_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16m1_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmsub_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmsub_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16m1_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmsub_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsub_vv_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16m2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmsub_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsub_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16m2_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmsub_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsub_vv_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16m4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmsub_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsub_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16m4_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmsub_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsub_vv_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmsub_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsub_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16m8_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmsub_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmsub_vv_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f32mf2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmsub_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmsub_vf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f32mf2_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmsub_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f32mf2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmsub_vv_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f32m1_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmsub_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f32m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmsub_vf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f32m1_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmsub_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f32m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsub_vv_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f32m2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmsub_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f32m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsub_vf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f32m2_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmsub_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f32m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsub_vv_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f32m4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmsub_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f32m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsub_vf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f32m4_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmsub_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f32m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsub_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f32m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmsub_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f32m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsub_vf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f32m8_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmsub_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f32m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsub_vv_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f64m1_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmsub_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f64m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsub_vf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f64m1_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmsub_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f64m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsub_vv_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f64m2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmsub_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f64m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsub_vf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f64m2_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmsub_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f64m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsub_vv_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f64m4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmsub_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f64m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsub_vf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f64m4_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmsub_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f64m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsub_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f64m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmsub_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f64m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsub_vf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f64m8_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmsub_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f64m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmsub_vv_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16mf4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmsub_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16mf4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmsub_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16mf4_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmsub_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16mf4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmsub_vv_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16mf2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmsub_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmsub_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16mf2_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmsub_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmsub_vv_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16m1_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmsub_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmsub_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16m1_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmsub_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsub_vv_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16m2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmsub_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsub_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16m2_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmsub_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsub_vv_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16m4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmsub_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsub_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16m4_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmsub_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsub_vv_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmsub_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsub_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16m8_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmsub_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmsub_vv_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f32mf2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmsub_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmsub_vf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f32mf2_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmsub_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f32mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmsub_vv_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f32m1_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmsub_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmsub_vf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f32m1_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmsub_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f32m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsub_vv_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f32m2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmsub_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsub_vf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f32m2_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmsub_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f32m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsub_vv_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f32m4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmsub_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsub_vf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f32m4_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmsub_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f32m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsub_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f32m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmsub_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsub_vf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f32m8_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmsub_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f32m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsub_vv_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f64m1_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmsub_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f64m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsub_vf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f64m1_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmsub_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f64m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsub_vv_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f64m2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmsub_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f64m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsub_vf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f64m2_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmsub_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f64m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsub_vv_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f64m4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmsub_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f64m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsub_vf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f64m4_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmsub_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f64m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsub_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f64m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmsub_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f64m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsub_vf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f64m8_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmsub_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f64m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmsub_vv_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16mf4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmsub_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16mf4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmsub_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16mf4_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmsub_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16mf4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmsub_vv_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16mf2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmsub_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmsub_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16mf2_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmsub_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16mf2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmsub_vv_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16m1_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmsub_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmsub_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16m1_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmsub_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsub_vv_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16m2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmsub_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsub_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16m2_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmsub_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsub_vv_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16m4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmsub_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsub_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16m4_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmsub_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsub_vv_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmsub_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsub_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16m8_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmsub_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f16m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmsub_vv_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f32mf2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmsub_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f32mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmsub_vf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f32mf2_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmsub_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f32mf2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmsub_vv_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f32m1_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmsub_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f32m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmsub_vf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f32m1_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmsub_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f32m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsub_vv_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f32m2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmsub_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f32m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsub_vf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f32m2_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmsub_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f32m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsub_vv_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f32m4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmsub_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f32m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsub_vf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f32m4_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmsub_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f32m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsub_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f32m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmsub_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f32m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsub_vf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f32m8_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmsub_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f32m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsub_vv_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f64m1_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmsub_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f64m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsub_vf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f64m1_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmsub_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f64m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsub_vv_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f64m2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmsub_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f64m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsub_vf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f64m2_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmsub_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f64m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsub_vv_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f64m4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmsub_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f64m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsub_vf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f64m4_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmsub_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f64m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsub_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f64m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmsub_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f64m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsub_vf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f64m8_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmsub_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsub_vf_f64m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfmsub\.[ivxfswum.]+\s+} 240 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfmul.c b/auto-generated/policy_funcs/gnu-api-tests/vfmul.c index 25d4c093e..4a85b1ebb 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfmul.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfmul.c @@ -6,964 +6,964 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfmul_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmul_vv_f16mf4_tu(maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmul_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16mf4_tu(vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfmul_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16mf4_tu(maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmul_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16mf4_tu(vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfmul_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmul_vv_f16mf2_tu(maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmul_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16mf2_tu(vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfmul_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16mf2_tu(maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmul_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16mf2_tu(vd, vs2, rs1, vl); } -vfloat16m1_t test_vfmul_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m1_tu(maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmul_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfmul_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m1_tu(maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmul_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m1_tu(vd, vs2, rs1, vl); } -vfloat16m2_t test_vfmul_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m2_tu(maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmul_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m2_tu(vd, vs2, vs1, vl); } -vfloat16m2_t test_vfmul_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m2_tu(maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmul_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m2_tu(vd, vs2, rs1, vl); } -vfloat16m4_t test_vfmul_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m4_tu(maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmul_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m4_tu(vd, vs2, vs1, vl); } -vfloat16m4_t test_vfmul_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m4_tu(maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmul_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m4_tu(vd, vs2, rs1, vl); } -vfloat16m8_t test_vfmul_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m8_tu(maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmul_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m8_tu(vd, vs2, vs1, vl); } -vfloat16m8_t test_vfmul_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m8_tu(maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmul_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m8_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfmul_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmul_vv_f32mf2_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmul_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32mf2_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfmul_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32mf2_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmul_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32mf2_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfmul_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m1_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmul_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfmul_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m1_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmul_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m1_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfmul_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m2_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmul_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m2_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vfmul_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m2_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmul_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m2_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfmul_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m4_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmul_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m4_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vfmul_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m4_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmul_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m4_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfmul_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m8_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmul_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m8_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vfmul_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m8_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmul_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m8_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfmul_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m1_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmul_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfmul_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m1_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmul_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m1_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfmul_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m2_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmul_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m2_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vfmul_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m2_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmul_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m2_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfmul_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m4_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmul_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m4_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vfmul_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m4_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmul_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m4_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfmul_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m8_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmul_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m8_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vfmul_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m8_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmul_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m8_tu(vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfmul_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmul_vv_f16mf4_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmul_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16mf4_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfmul_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16mf4_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmul_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16mf4_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfmul_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmul_vv_f16mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmul_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfmul_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmul_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfmul_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m1_tum(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmul_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfmul_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m1_tum(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmul_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m1_tum(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfmul_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m2_tum(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmul_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m2_tum(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfmul_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m2_tum(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmul_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfmul_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m4_tum(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmul_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m4_tum(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfmul_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m4_tum(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmul_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m4_tum(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfmul_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m8_tum(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmul_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m8_tum(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfmul_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m8_tum(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmul_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m8_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfmul_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmul_vv_f32mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmul_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfmul_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmul_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfmul_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m1_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmul_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfmul_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m1_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmul_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m1_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfmul_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m2_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmul_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m2_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfmul_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m2_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmul_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfmul_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m4_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmul_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m4_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfmul_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m4_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmul_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m4_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfmul_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m8_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmul_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m8_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfmul_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m8_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmul_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m8_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfmul_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m1_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmul_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfmul_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m1_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmul_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m1_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfmul_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m2_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmul_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m2_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfmul_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m2_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmul_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m2_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfmul_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m4_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmul_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m4_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfmul_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m4_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmul_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m4_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfmul_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m8_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmul_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m8_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfmul_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m8_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmul_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m8_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfmul_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmul_vv_f16mf4_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmul_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16mf4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfmul_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16mf4_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmul_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16mf4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfmul_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmul_vv_f16mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmul_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfmul_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmul_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfmul_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmul_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfmul_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmul_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfmul_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmul_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfmul_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmul_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfmul_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmul_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfmul_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmul_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfmul_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmul_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfmul_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmul_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfmul_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmul_vv_f32mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmul_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfmul_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmul_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfmul_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmul_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfmul_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmul_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfmul_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmul_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfmul_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmul_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfmul_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmul_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfmul_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmul_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfmul_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmul_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfmul_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmul_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfmul_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmul_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfmul_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmul_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfmul_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmul_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfmul_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmul_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfmul_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmul_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfmul_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmul_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfmul_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmul_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfmul_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmul_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfmul_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmul_vv_f16mf4_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmul_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16mf4_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfmul_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16mf4_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmul_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16mf4_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfmul_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmul_vv_f16mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmul_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfmul_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmul_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfmul_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m1_mu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmul_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m1_mu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfmul_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m1_mu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmul_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m1_mu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfmul_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m2_mu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmul_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m2_mu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfmul_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m2_mu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmul_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfmul_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m4_mu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmul_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m4_mu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfmul_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m4_mu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmul_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m4_mu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfmul_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m8_mu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmul_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m8_mu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfmul_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m8_mu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmul_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m8_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfmul_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmul_vv_f32mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmul_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfmul_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmul_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfmul_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m1_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmul_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m1_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfmul_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m1_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmul_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m1_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfmul_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m2_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmul_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m2_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfmul_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m2_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmul_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfmul_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m4_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmul_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m4_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfmul_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m4_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmul_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m4_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfmul_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m8_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmul_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m8_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfmul_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m8_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmul_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m8_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfmul_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m1_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmul_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m1_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfmul_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m1_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmul_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m1_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfmul_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m2_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmul_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m2_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfmul_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m2_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmul_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m2_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfmul_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m4_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmul_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m4_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfmul_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m4_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmul_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m4_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfmul_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m8_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmul_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m8_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfmul_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m8_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmul_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m8_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfmul_vv_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmul_vv_f16mf4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmul_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16mf4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmul_vf_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16mf4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmul_vf_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16mf4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmul_vv_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmul_vv_f16mf2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmul_vv_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmul_vf_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16mf2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmul_vf_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmul_vv_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmul_vv_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmul_vf_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmul_vf_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmul_vv_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmul_vv_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmul_vf_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmul_vf_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmul_vv_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmul_vv_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmul_vf_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmul_vf_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmul_vv_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmul_vv_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmul_vf_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmul_vf_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmul_vv_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmul_vv_f32mf2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmul_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmul_vf_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32mf2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmul_vf_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmul_vv_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmul_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmul_vf_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmul_vf_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmul_vv_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmul_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmul_vf_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmul_vf_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmul_vv_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmul_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmul_vf_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmul_vf_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmul_vv_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmul_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmul_vf_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmul_vf_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmul_vv_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmul_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmul_vf_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmul_vf_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmul_vv_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmul_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmul_vf_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmul_vf_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmul_vv_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmul_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmul_vf_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmul_vf_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmul_vv_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmul_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmul_vf_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmul_vf_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmul_vv_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmul_vv_f16mf4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmul_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16mf4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmul_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16mf4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmul_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16mf4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmul_vv_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmul_vv_f16mf2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmul_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16mf2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmul_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16mf2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmul_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16mf2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmul_vv_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmul_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmul_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmul_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmul_vv_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmul_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmul_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmul_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmul_vv_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmul_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmul_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmul_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmul_vv_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmul_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m8_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmul_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmul_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmul_vv_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmul_vv_f32mf2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmul_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32mf2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmul_vf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32mf2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmul_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32mf2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmul_vv_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmul_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmul_vf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmul_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmul_vv_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmul_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmul_vf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmul_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmul_vv_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmul_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmul_vf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmul_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmul_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmul_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m8_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmul_vf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmul_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmul_vv_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmul_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmul_vf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmul_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmul_vv_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmul_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmul_vf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmul_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmul_vv_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmul_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmul_vf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmul_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmul_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmul_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m8_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmul_vf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmul_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmul_vv_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmul_vv_f16mf4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmul_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16mf4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmul_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16mf4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmul_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16mf4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmul_vv_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmul_vv_f16mf2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmul_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16mf2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmul_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16mf2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmul_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16mf2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmul_vv_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmul_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m1_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmul_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmul_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmul_vv_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmul_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmul_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmul_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmul_vv_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmul_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmul_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmul_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmul_vv_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmul_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m8_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmul_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmul_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmul_vv_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmul_vv_f32mf2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmul_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32mf2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmul_vf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32mf2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmul_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32mf2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmul_vv_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmul_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m1_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmul_vf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmul_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmul_vv_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmul_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmul_vf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmul_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmul_vv_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmul_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmul_vf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmul_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmul_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmul_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m8_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmul_vf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmul_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmul_vv_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmul_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m1_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmul_vf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmul_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmul_vv_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmul_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmul_vf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmul_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmul_vv_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmul_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmul_vf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmul_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmul_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmul_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m8_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmul_vf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmul_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmul_vv_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmul_vv_f16mf4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmul_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16mf4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmul_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16mf4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmul_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16mf4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmul_vv_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmul_vv_f16mf2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmul_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16mf2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmul_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16mf2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmul_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16mf2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmul_vv_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmul_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m1_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmul_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmul_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmul_vv_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmul_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmul_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmul_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmul_vv_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmul_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmul_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmul_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmul_vv_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmul_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmul_vv_f16m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmul_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_vf_f16m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmul_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_vf_f16m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmul_vv_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmul_vv_f32mf2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmul_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32mf2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmul_vf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32mf2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmul_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32mf2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmul_vv_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmul_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m1_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmul_vf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmul_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmul_vv_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmul_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmul_vf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmul_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmul_vv_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmul_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmul_vf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmul_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmul_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmul_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmul_vv_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmul_vf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_vf_f32m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmul_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_vf_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmul_vv_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmul_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m1_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmul_vf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmul_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmul_vv_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmul_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmul_vf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmul_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmul_vv_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmul_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmul_vf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmul_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmul_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmul_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmul_vv_f64m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmul_vf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_vf_f64m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmul_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_vf_f64m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfmul\.[ivxfswum.]+\s+} 240 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfmv.c b/auto-generated/policy_funcs/gnu-api-tests/vfmv.c index f4f13cf7a..42cddfdf8 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfmv.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfmv.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfmv_v_f_f16mf4_tu(vfloat16mf4_t maskedoff, float16_t src, size_t vl) { - return __riscv_vfmv_v_f_f16mf4_tu(maskedoff, src, vl); +vfloat16mf4_t test_vfmv_v_f_f16mf4_tu(vfloat16mf4_t vd, float16_t rs1, size_t vl) { + return __riscv_vfmv_v_f_f16mf4_tu(vd, rs1, vl); } -vfloat16mf2_t test_vfmv_v_f_f16mf2_tu(vfloat16mf2_t maskedoff, float16_t src, size_t vl) { - return __riscv_vfmv_v_f_f16mf2_tu(maskedoff, src, vl); +vfloat16mf2_t test_vfmv_v_f_f16mf2_tu(vfloat16mf2_t vd, float16_t rs1, size_t vl) { + return __riscv_vfmv_v_f_f16mf2_tu(vd, rs1, vl); } -vfloat16m1_t test_vfmv_v_f_f16m1_tu(vfloat16m1_t maskedoff, float16_t src, size_t vl) { - return __riscv_vfmv_v_f_f16m1_tu(maskedoff, src, vl); +vfloat16m1_t test_vfmv_v_f_f16m1_tu(vfloat16m1_t vd, float16_t rs1, size_t vl) { + return __riscv_vfmv_v_f_f16m1_tu(vd, rs1, vl); } -vfloat16m2_t test_vfmv_v_f_f16m2_tu(vfloat16m2_t maskedoff, float16_t src, size_t vl) { - return __riscv_vfmv_v_f_f16m2_tu(maskedoff, src, vl); +vfloat16m2_t test_vfmv_v_f_f16m2_tu(vfloat16m2_t vd, float16_t rs1, size_t vl) { + return __riscv_vfmv_v_f_f16m2_tu(vd, rs1, vl); } -vfloat16m4_t test_vfmv_v_f_f16m4_tu(vfloat16m4_t maskedoff, float16_t src, size_t vl) { - return __riscv_vfmv_v_f_f16m4_tu(maskedoff, src, vl); +vfloat16m4_t test_vfmv_v_f_f16m4_tu(vfloat16m4_t vd, float16_t rs1, size_t vl) { + return __riscv_vfmv_v_f_f16m4_tu(vd, rs1, vl); } -vfloat16m8_t test_vfmv_v_f_f16m8_tu(vfloat16m8_t maskedoff, float16_t src, size_t vl) { - return __riscv_vfmv_v_f_f16m8_tu(maskedoff, src, vl); +vfloat16m8_t test_vfmv_v_f_f16m8_tu(vfloat16m8_t vd, float16_t rs1, size_t vl) { + return __riscv_vfmv_v_f_f16m8_tu(vd, rs1, vl); } -vfloat32mf2_t test_vfmv_v_f_f32mf2_tu(vfloat32mf2_t maskedoff, float32_t src, size_t vl) { - return __riscv_vfmv_v_f_f32mf2_tu(maskedoff, src, vl); +vfloat32mf2_t test_vfmv_v_f_f32mf2_tu(vfloat32mf2_t vd, float32_t rs1, size_t vl) { + return __riscv_vfmv_v_f_f32mf2_tu(vd, rs1, vl); } -vfloat32m1_t test_vfmv_v_f_f32m1_tu(vfloat32m1_t maskedoff, float32_t src, size_t vl) { - return __riscv_vfmv_v_f_f32m1_tu(maskedoff, src, vl); +vfloat32m1_t test_vfmv_v_f_f32m1_tu(vfloat32m1_t vd, float32_t rs1, size_t vl) { + return __riscv_vfmv_v_f_f32m1_tu(vd, rs1, vl); } -vfloat32m2_t test_vfmv_v_f_f32m2_tu(vfloat32m2_t maskedoff, float32_t src, size_t vl) { - return __riscv_vfmv_v_f_f32m2_tu(maskedoff, src, vl); +vfloat32m2_t test_vfmv_v_f_f32m2_tu(vfloat32m2_t vd, float32_t rs1, size_t vl) { + return __riscv_vfmv_v_f_f32m2_tu(vd, rs1, vl); } -vfloat32m4_t test_vfmv_v_f_f32m4_tu(vfloat32m4_t maskedoff, float32_t src, size_t vl) { - return __riscv_vfmv_v_f_f32m4_tu(maskedoff, src, vl); +vfloat32m4_t test_vfmv_v_f_f32m4_tu(vfloat32m4_t vd, float32_t rs1, size_t vl) { + return __riscv_vfmv_v_f_f32m4_tu(vd, rs1, vl); } -vfloat32m8_t test_vfmv_v_f_f32m8_tu(vfloat32m8_t maskedoff, float32_t src, size_t vl) { - return __riscv_vfmv_v_f_f32m8_tu(maskedoff, src, vl); +vfloat32m8_t test_vfmv_v_f_f32m8_tu(vfloat32m8_t vd, float32_t rs1, size_t vl) { + return __riscv_vfmv_v_f_f32m8_tu(vd, rs1, vl); } -vfloat64m1_t test_vfmv_v_f_f64m1_tu(vfloat64m1_t maskedoff, float64_t src, size_t vl) { - return __riscv_vfmv_v_f_f64m1_tu(maskedoff, src, vl); +vfloat64m1_t test_vfmv_v_f_f64m1_tu(vfloat64m1_t vd, float64_t rs1, size_t vl) { + return __riscv_vfmv_v_f_f64m1_tu(vd, rs1, vl); } -vfloat64m2_t test_vfmv_v_f_f64m2_tu(vfloat64m2_t maskedoff, float64_t src, size_t vl) { - return __riscv_vfmv_v_f_f64m2_tu(maskedoff, src, vl); +vfloat64m2_t test_vfmv_v_f_f64m2_tu(vfloat64m2_t vd, float64_t rs1, size_t vl) { + return __riscv_vfmv_v_f_f64m2_tu(vd, rs1, vl); } -vfloat64m4_t test_vfmv_v_f_f64m4_tu(vfloat64m4_t maskedoff, float64_t src, size_t vl) { - return __riscv_vfmv_v_f_f64m4_tu(maskedoff, src, vl); +vfloat64m4_t test_vfmv_v_f_f64m4_tu(vfloat64m4_t vd, float64_t rs1, size_t vl) { + return __riscv_vfmv_v_f_f64m4_tu(vd, rs1, vl); } -vfloat64m8_t test_vfmv_v_f_f64m8_tu(vfloat64m8_t maskedoff, float64_t src, size_t vl) { - return __riscv_vfmv_v_f_f64m8_tu(maskedoff, src, vl); +vfloat64m8_t test_vfmv_v_f_f64m8_tu(vfloat64m8_t vd, float64_t rs1, size_t vl) { + return __riscv_vfmv_v_f_f64m8_tu(vd, rs1, vl); } -vfloat16mf4_t test_vfmv_s_f_f16mf4_tu(vfloat16mf4_t maskedoff, float16_t src, size_t vl) { - return __riscv_vfmv_s_f_f16mf4_tu(maskedoff, src, vl); +vfloat16mf4_t test_vfmv_s_f_f16mf4_tu(vfloat16mf4_t vd, float16_t rs1, size_t vl) { + return __riscv_vfmv_s_f_f16mf4_tu(vd, rs1, vl); } -vfloat16mf2_t test_vfmv_s_f_f16mf2_tu(vfloat16mf2_t maskedoff, float16_t src, size_t vl) { - return __riscv_vfmv_s_f_f16mf2_tu(maskedoff, src, vl); +vfloat16mf2_t test_vfmv_s_f_f16mf2_tu(vfloat16mf2_t vd, float16_t rs1, size_t vl) { + return __riscv_vfmv_s_f_f16mf2_tu(vd, rs1, vl); } -vfloat16m1_t test_vfmv_s_f_f16m1_tu(vfloat16m1_t maskedoff, float16_t src, size_t vl) { - return __riscv_vfmv_s_f_f16m1_tu(maskedoff, src, vl); +vfloat16m1_t test_vfmv_s_f_f16m1_tu(vfloat16m1_t vd, float16_t rs1, size_t vl) { + return __riscv_vfmv_s_f_f16m1_tu(vd, rs1, vl); } -vfloat16m2_t test_vfmv_s_f_f16m2_tu(vfloat16m2_t maskedoff, float16_t src, size_t vl) { - return __riscv_vfmv_s_f_f16m2_tu(maskedoff, src, vl); +vfloat16m2_t test_vfmv_s_f_f16m2_tu(vfloat16m2_t vd, float16_t rs1, size_t vl) { + return __riscv_vfmv_s_f_f16m2_tu(vd, rs1, vl); } -vfloat16m4_t test_vfmv_s_f_f16m4_tu(vfloat16m4_t maskedoff, float16_t src, size_t vl) { - return __riscv_vfmv_s_f_f16m4_tu(maskedoff, src, vl); +vfloat16m4_t test_vfmv_s_f_f16m4_tu(vfloat16m4_t vd, float16_t rs1, size_t vl) { + return __riscv_vfmv_s_f_f16m4_tu(vd, rs1, vl); } -vfloat16m8_t test_vfmv_s_f_f16m8_tu(vfloat16m8_t maskedoff, float16_t src, size_t vl) { - return __riscv_vfmv_s_f_f16m8_tu(maskedoff, src, vl); +vfloat16m8_t test_vfmv_s_f_f16m8_tu(vfloat16m8_t vd, float16_t rs1, size_t vl) { + return __riscv_vfmv_s_f_f16m8_tu(vd, rs1, vl); } -vfloat32mf2_t test_vfmv_s_f_f32mf2_tu(vfloat32mf2_t maskedoff, float32_t src, size_t vl) { - return __riscv_vfmv_s_f_f32mf2_tu(maskedoff, src, vl); +vfloat32mf2_t test_vfmv_s_f_f32mf2_tu(vfloat32mf2_t vd, float32_t rs1, size_t vl) { + return __riscv_vfmv_s_f_f32mf2_tu(vd, rs1, vl); } -vfloat32m1_t test_vfmv_s_f_f32m1_tu(vfloat32m1_t maskedoff, float32_t src, size_t vl) { - return __riscv_vfmv_s_f_f32m1_tu(maskedoff, src, vl); +vfloat32m1_t test_vfmv_s_f_f32m1_tu(vfloat32m1_t vd, float32_t rs1, size_t vl) { + return __riscv_vfmv_s_f_f32m1_tu(vd, rs1, vl); } -vfloat32m2_t test_vfmv_s_f_f32m2_tu(vfloat32m2_t maskedoff, float32_t src, size_t vl) { - return __riscv_vfmv_s_f_f32m2_tu(maskedoff, src, vl); +vfloat32m2_t test_vfmv_s_f_f32m2_tu(vfloat32m2_t vd, float32_t rs1, size_t vl) { + return __riscv_vfmv_s_f_f32m2_tu(vd, rs1, vl); } -vfloat32m4_t test_vfmv_s_f_f32m4_tu(vfloat32m4_t maskedoff, float32_t src, size_t vl) { - return __riscv_vfmv_s_f_f32m4_tu(maskedoff, src, vl); +vfloat32m4_t test_vfmv_s_f_f32m4_tu(vfloat32m4_t vd, float32_t rs1, size_t vl) { + return __riscv_vfmv_s_f_f32m4_tu(vd, rs1, vl); } -vfloat32m8_t test_vfmv_s_f_f32m8_tu(vfloat32m8_t maskedoff, float32_t src, size_t vl) { - return __riscv_vfmv_s_f_f32m8_tu(maskedoff, src, vl); +vfloat32m8_t test_vfmv_s_f_f32m8_tu(vfloat32m8_t vd, float32_t rs1, size_t vl) { + return __riscv_vfmv_s_f_f32m8_tu(vd, rs1, vl); } -vfloat64m1_t test_vfmv_s_f_f64m1_tu(vfloat64m1_t maskedoff, float64_t src, size_t vl) { - return __riscv_vfmv_s_f_f64m1_tu(maskedoff, src, vl); +vfloat64m1_t test_vfmv_s_f_f64m1_tu(vfloat64m1_t vd, float64_t rs1, size_t vl) { + return __riscv_vfmv_s_f_f64m1_tu(vd, rs1, vl); } -vfloat64m2_t test_vfmv_s_f_f64m2_tu(vfloat64m2_t maskedoff, float64_t src, size_t vl) { - return __riscv_vfmv_s_f_f64m2_tu(maskedoff, src, vl); +vfloat64m2_t test_vfmv_s_f_f64m2_tu(vfloat64m2_t vd, float64_t rs1, size_t vl) { + return __riscv_vfmv_s_f_f64m2_tu(vd, rs1, vl); } -vfloat64m4_t test_vfmv_s_f_f64m4_tu(vfloat64m4_t maskedoff, float64_t src, size_t vl) { - return __riscv_vfmv_s_f_f64m4_tu(maskedoff, src, vl); +vfloat64m4_t test_vfmv_s_f_f64m4_tu(vfloat64m4_t vd, float64_t rs1, size_t vl) { + return __riscv_vfmv_s_f_f64m4_tu(vd, rs1, vl); } -vfloat64m8_t test_vfmv_s_f_f64m8_tu(vfloat64m8_t maskedoff, float64_t src, size_t vl) { - return __riscv_vfmv_s_f_f64m8_tu(maskedoff, src, vl); +vfloat64m8_t test_vfmv_s_f_f64m8_tu(vfloat64m8_t vd, float64_t rs1, size_t vl) { + return __riscv_vfmv_s_f_f64m8_tu(vd, rs1, vl); } /* { dg-final { scan-assembler-times {vfmv\.[ivxfswum.]+\s+[,\sa-x0-9()]+} 30 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfncvt.c b/auto-generated/policy_funcs/gnu-api-tests/vfncvt.c index f75c19a4e..a93c74c93 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfncvt.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfncvt.c @@ -6,1828 +6,1828 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vfncvt_x_f_w_i8mf8_tu(vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8mf8_tu(maskedoff, src, vl); +vint8mf8_t test_vfncvt_x_f_w_i8mf8_tu(vint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8mf8_tu(vd, vs2, vl); } -vint8mf4_t test_vfncvt_x_f_w_i8mf4_tu(vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8mf4_tu(maskedoff, src, vl); +vint8mf4_t test_vfncvt_x_f_w_i8mf4_tu(vint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8mf4_tu(vd, vs2, vl); } -vint8mf2_t test_vfncvt_x_f_w_i8mf2_tu(vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8mf2_tu(maskedoff, src, vl); +vint8mf2_t test_vfncvt_x_f_w_i8mf2_tu(vint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8mf2_tu(vd, vs2, vl); } -vint8m1_t test_vfncvt_x_f_w_i8m1_tu(vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8m1_tu(maskedoff, src, vl); +vint8m1_t test_vfncvt_x_f_w_i8m1_tu(vint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8m1_tu(vd, vs2, vl); } -vint8m2_t test_vfncvt_x_f_w_i8m2_tu(vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8m2_tu(maskedoff, src, vl); +vint8m2_t test_vfncvt_x_f_w_i8m2_tu(vint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8m2_tu(vd, vs2, vl); } -vint8m4_t test_vfncvt_x_f_w_i8m4_tu(vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8m4_tu(maskedoff, src, vl); +vint8m4_t test_vfncvt_x_f_w_i8m4_tu(vint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8m4_tu(vd, vs2, vl); } -vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_tu(vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8mf8_tu(maskedoff, src, vl); +vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_tu(vuint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8mf8_tu(vd, vs2, vl); } -vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_tu(vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8mf4_tu(maskedoff, src, vl); +vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_tu(vuint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8mf4_tu(vd, vs2, vl); } -vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_tu(vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8mf2_tu(maskedoff, src, vl); +vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_tu(vuint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8mf2_tu(vd, vs2, vl); } -vuint8m1_t test_vfncvt_xu_f_w_u8m1_tu(vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8m1_tu(maskedoff, src, vl); +vuint8m1_t test_vfncvt_xu_f_w_u8m1_tu(vuint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8m1_tu(vd, vs2, vl); } -vuint8m2_t test_vfncvt_xu_f_w_u8m2_tu(vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8m2_tu(maskedoff, src, vl); +vuint8m2_t test_vfncvt_xu_f_w_u8m2_tu(vuint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8m2_tu(vd, vs2, vl); } -vuint8m4_t test_vfncvt_xu_f_w_u8m4_tu(vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8m4_tu(maskedoff, src, vl); +vuint8m4_t test_vfncvt_xu_f_w_u8m4_tu(vuint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8m4_tu(vd, vs2, vl); } -vint16mf4_t test_vfncvt_x_f_w_i16mf4_tu(vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16mf4_tu(maskedoff, src, vl); +vint16mf4_t test_vfncvt_x_f_w_i16mf4_tu(vint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16mf4_tu(vd, vs2, vl); } -vint16mf2_t test_vfncvt_x_f_w_i16mf2_tu(vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16mf2_tu(maskedoff, src, vl); +vint16mf2_t test_vfncvt_x_f_w_i16mf2_tu(vint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16mf2_tu(vd, vs2, vl); } -vint16m1_t test_vfncvt_x_f_w_i16m1_tu(vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16m1_tu(maskedoff, src, vl); +vint16m1_t test_vfncvt_x_f_w_i16m1_tu(vint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16m1_tu(vd, vs2, vl); } -vint16m2_t test_vfncvt_x_f_w_i16m2_tu(vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16m2_tu(maskedoff, src, vl); +vint16m2_t test_vfncvt_x_f_w_i16m2_tu(vint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16m2_tu(vd, vs2, vl); } -vint16m4_t test_vfncvt_x_f_w_i16m4_tu(vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16m4_tu(maskedoff, src, vl); +vint16m4_t test_vfncvt_x_f_w_i16m4_tu(vint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16m4_tu(vd, vs2, vl); } -vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_tu(vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16mf4_tu(maskedoff, src, vl); +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_tu(vuint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16mf4_tu(vd, vs2, vl); } -vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_tu(vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16mf2_tu(maskedoff, src, vl); +vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_tu(vuint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16mf2_tu(vd, vs2, vl); } -vuint16m1_t test_vfncvt_xu_f_w_u16m1_tu(vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16m1_tu(maskedoff, src, vl); +vuint16m1_t test_vfncvt_xu_f_w_u16m1_tu(vuint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16m1_tu(vd, vs2, vl); } -vuint16m2_t test_vfncvt_xu_f_w_u16m2_tu(vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16m2_tu(maskedoff, src, vl); +vuint16m2_t test_vfncvt_xu_f_w_u16m2_tu(vuint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16m2_tu(vd, vs2, vl); } -vuint16m4_t test_vfncvt_xu_f_w_u16m4_tu(vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16m4_tu(maskedoff, src, vl); +vuint16m4_t test_vfncvt_xu_f_w_u16m4_tu(vuint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16m4_tu(vd, vs2, vl); } -vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_tu(vfloat16mf4_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16mf4_tu(maskedoff, src, vl); +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_tu(vfloat16mf4_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16mf4_tu(vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_tu(vfloat16mf2_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16mf2_tu(maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_tu(vfloat16mf2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16mf2_tu(vd, vs2, vl); } -vfloat16m1_t test_vfncvt_f_x_w_f16m1_tu(vfloat16m1_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16m1_tu(maskedoff, src, vl); +vfloat16m1_t test_vfncvt_f_x_w_f16m1_tu(vfloat16m1_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16m1_tu(vd, vs2, vl); } -vfloat16m2_t test_vfncvt_f_x_w_f16m2_tu(vfloat16m2_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16m2_tu(maskedoff, src, vl); +vfloat16m2_t test_vfncvt_f_x_w_f16m2_tu(vfloat16m2_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16m2_tu(vd, vs2, vl); } -vfloat16m4_t test_vfncvt_f_x_w_f16m4_tu(vfloat16m4_t maskedoff, vint32m8_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16m4_tu(maskedoff, src, vl); +vfloat16m4_t test_vfncvt_f_x_w_f16m4_tu(vfloat16m4_t vd, vint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16m4_tu(vd, vs2, vl); } -vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_tu(vfloat16mf4_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16mf4_tu(maskedoff, src, vl); +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_tu(vfloat16mf4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16mf4_tu(vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_tu(vfloat16mf2_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16mf2_tu(maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_tu(vfloat16mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16mf2_tu(vd, vs2, vl); } -vfloat16m1_t test_vfncvt_f_xu_w_f16m1_tu(vfloat16m1_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16m1_tu(maskedoff, src, vl); +vfloat16m1_t test_vfncvt_f_xu_w_f16m1_tu(vfloat16m1_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16m1_tu(vd, vs2, vl); } -vfloat16m2_t test_vfncvt_f_xu_w_f16m2_tu(vfloat16m2_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16m2_tu(maskedoff, src, vl); +vfloat16m2_t test_vfncvt_f_xu_w_f16m2_tu(vfloat16m2_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16m2_tu(vd, vs2, vl); } -vfloat16m4_t test_vfncvt_f_xu_w_f16m4_tu(vfloat16m4_t maskedoff, vuint32m8_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16m4_tu(maskedoff, src, vl); +vfloat16m4_t test_vfncvt_f_xu_w_f16m4_tu(vfloat16m4_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16m4_tu(vd, vs2, vl); } -vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16mf4_tu(maskedoff, src, vl); +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_tu(vfloat16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16mf4_tu(vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16mf2_tu(maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_tu(vfloat16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16mf2_tu(vd, vs2, vl); } -vfloat16m1_t test_vfncvt_f_f_w_f16m1_tu(vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16m1_tu(maskedoff, src, vl); +vfloat16m1_t test_vfncvt_f_f_w_f16m1_tu(vfloat16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16m1_tu(vd, vs2, vl); } -vfloat16m2_t test_vfncvt_f_f_w_f16m2_tu(vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16m2_tu(maskedoff, src, vl); +vfloat16m2_t test_vfncvt_f_f_w_f16m2_tu(vfloat16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16m2_tu(vd, vs2, vl); } -vfloat16m4_t test_vfncvt_f_f_w_f16m4_tu(vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16m4_tu(maskedoff, src, vl); +vfloat16m4_t test_vfncvt_f_f_w_f16m4_tu(vfloat16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16m4_tu(vd, vs2, vl); } -vint32mf2_t test_vfncvt_x_f_w_i32mf2_tu(vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32mf2_tu(maskedoff, src, vl); +vint32mf2_t test_vfncvt_x_f_w_i32mf2_tu(vint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32mf2_tu(vd, vs2, vl); } -vint32m1_t test_vfncvt_x_f_w_i32m1_tu(vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32m1_tu(maskedoff, src, vl); +vint32m1_t test_vfncvt_x_f_w_i32m1_tu(vint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32m1_tu(vd, vs2, vl); } -vint32m2_t test_vfncvt_x_f_w_i32m2_tu(vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32m2_tu(maskedoff, src, vl); +vint32m2_t test_vfncvt_x_f_w_i32m2_tu(vint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32m2_tu(vd, vs2, vl); } -vint32m4_t test_vfncvt_x_f_w_i32m4_tu(vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32m4_tu(maskedoff, src, vl); +vint32m4_t test_vfncvt_x_f_w_i32m4_tu(vint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32m4_tu(vd, vs2, vl); } -vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_tu(vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32mf2_tu(maskedoff, src, vl); +vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_tu(vuint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32mf2_tu(vd, vs2, vl); } -vuint32m1_t test_vfncvt_xu_f_w_u32m1_tu(vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32m1_tu(maskedoff, src, vl); +vuint32m1_t test_vfncvt_xu_f_w_u32m1_tu(vuint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vfncvt_xu_f_w_u32m2_tu(vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32m2_tu(maskedoff, src, vl); +vuint32m2_t test_vfncvt_xu_f_w_u32m2_tu(vuint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vfncvt_xu_f_w_u32m4_tu(vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32m4_tu(maskedoff, src, vl); +vuint32m4_t test_vfncvt_xu_f_w_u32m4_tu(vuint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32m4_tu(vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_tu(vfloat32mf2_t maskedoff, vint64m1_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32mf2_tu(maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_tu(vfloat32mf2_t vd, vint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32mf2_tu(vd, vs2, vl); } -vfloat32m1_t test_vfncvt_f_x_w_f32m1_tu(vfloat32m1_t maskedoff, vint64m2_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32m1_tu(maskedoff, src, vl); +vfloat32m1_t test_vfncvt_f_x_w_f32m1_tu(vfloat32m1_t vd, vint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32m1_tu(vd, vs2, vl); } -vfloat32m2_t test_vfncvt_f_x_w_f32m2_tu(vfloat32m2_t maskedoff, vint64m4_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32m2_tu(maskedoff, src, vl); +vfloat32m2_t test_vfncvt_f_x_w_f32m2_tu(vfloat32m2_t vd, vint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32m2_tu(vd, vs2, vl); } -vfloat32m4_t test_vfncvt_f_x_w_f32m4_tu(vfloat32m4_t maskedoff, vint64m8_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32m4_tu(maskedoff, src, vl); +vfloat32m4_t test_vfncvt_f_x_w_f32m4_tu(vfloat32m4_t vd, vint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32m4_tu(vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_tu(vfloat32mf2_t maskedoff, vuint64m1_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32mf2_tu(maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_tu(vfloat32mf2_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32mf2_tu(vd, vs2, vl); } -vfloat32m1_t test_vfncvt_f_xu_w_f32m1_tu(vfloat32m1_t maskedoff, vuint64m2_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32m1_tu(maskedoff, src, vl); +vfloat32m1_t test_vfncvt_f_xu_w_f32m1_tu(vfloat32m1_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32m1_tu(vd, vs2, vl); } -vfloat32m2_t test_vfncvt_f_xu_w_f32m2_tu(vfloat32m2_t maskedoff, vuint64m4_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32m2_tu(maskedoff, src, vl); +vfloat32m2_t test_vfncvt_f_xu_w_f32m2_tu(vfloat32m2_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32m2_tu(vd, vs2, vl); } -vfloat32m4_t test_vfncvt_f_xu_w_f32m4_tu(vfloat32m4_t maskedoff, vuint64m8_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32m4_tu(maskedoff, src, vl); +vfloat32m4_t test_vfncvt_f_xu_w_f32m4_tu(vfloat32m4_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32m4_tu(vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32mf2_tu(maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_tu(vfloat32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32mf2_tu(vd, vs2, vl); } -vfloat32m1_t test_vfncvt_f_f_w_f32m1_tu(vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32m1_tu(maskedoff, src, vl); +vfloat32m1_t test_vfncvt_f_f_w_f32m1_tu(vfloat32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32m1_tu(vd, vs2, vl); } -vfloat32m2_t test_vfncvt_f_f_w_f32m2_tu(vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32m2_tu(maskedoff, src, vl); +vfloat32m2_t test_vfncvt_f_f_w_f32m2_tu(vfloat32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32m2_tu(vd, vs2, vl); } -vfloat32m4_t test_vfncvt_f_f_w_f32m4_tu(vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32m4_tu(maskedoff, src, vl); +vfloat32m4_t test_vfncvt_f_f_w_f32m4_tu(vfloat32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32m4_tu(vd, vs2, vl); } -vint8mf8_t test_vfncvt_x_f_w_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8mf8_tum(mask, maskedoff, src, vl); +vint8mf8_t test_vfncvt_x_f_w_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8mf8_tum(vm, vd, vs2, vl); } -vint8mf4_t test_vfncvt_x_f_w_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8mf4_tum(mask, maskedoff, src, vl); +vint8mf4_t test_vfncvt_x_f_w_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8mf4_tum(vm, vd, vs2, vl); } -vint8mf2_t test_vfncvt_x_f_w_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8mf2_tum(mask, maskedoff, src, vl); +vint8mf2_t test_vfncvt_x_f_w_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8mf2_tum(vm, vd, vs2, vl); } -vint8m1_t test_vfncvt_x_f_w_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8m1_tum(mask, maskedoff, src, vl); +vint8m1_t test_vfncvt_x_f_w_i8m1_tum(vbool8_t vm, vint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8m1_tum(vm, vd, vs2, vl); } -vint8m2_t test_vfncvt_x_f_w_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8m2_tum(mask, maskedoff, src, vl); +vint8m2_t test_vfncvt_x_f_w_i8m2_tum(vbool4_t vm, vint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8m2_tum(vm, vd, vs2, vl); } -vint8m4_t test_vfncvt_x_f_w_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8m4_tum(mask, maskedoff, src, vl); +vint8m4_t test_vfncvt_x_f_w_i8m4_tum(vbool2_t vm, vint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8m4_tum(vm, vd, vs2, vl); } -vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8mf8_tum(mask, maskedoff, src, vl); +vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8mf8_tum(vm, vd, vs2, vl); } -vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8mf4_tum(mask, maskedoff, src, vl); +vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8mf4_tum(vm, vd, vs2, vl); } -vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8mf2_tum(mask, maskedoff, src, vl); +vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8mf2_tum(vm, vd, vs2, vl); } -vuint8m1_t test_vfncvt_xu_f_w_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8m1_tum(mask, maskedoff, src, vl); +vuint8m1_t test_vfncvt_xu_f_w_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8m1_tum(vm, vd, vs2, vl); } -vuint8m2_t test_vfncvt_xu_f_w_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8m2_tum(mask, maskedoff, src, vl); +vuint8m2_t test_vfncvt_xu_f_w_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8m2_tum(vm, vd, vs2, vl); } -vuint8m4_t test_vfncvt_xu_f_w_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8m4_tum(mask, maskedoff, src, vl); +vuint8m4_t test_vfncvt_xu_f_w_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8m4_tum(vm, vd, vs2, vl); } -vint16mf4_t test_vfncvt_x_f_w_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16mf4_tum(mask, maskedoff, src, vl); +vint16mf4_t test_vfncvt_x_f_w_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16mf4_tum(vm, vd, vs2, vl); } -vint16mf2_t test_vfncvt_x_f_w_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16mf2_tum(mask, maskedoff, src, vl); +vint16mf2_t test_vfncvt_x_f_w_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16mf2_tum(vm, vd, vs2, vl); } -vint16m1_t test_vfncvt_x_f_w_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16m1_tum(mask, maskedoff, src, vl); +vint16m1_t test_vfncvt_x_f_w_i16m1_tum(vbool16_t vm, vint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16m1_tum(vm, vd, vs2, vl); } -vint16m2_t test_vfncvt_x_f_w_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16m2_tum(mask, maskedoff, src, vl); +vint16m2_t test_vfncvt_x_f_w_i16m2_tum(vbool8_t vm, vint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16m2_tum(vm, vd, vs2, vl); } -vint16m4_t test_vfncvt_x_f_w_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16m4_tum(mask, maskedoff, src, vl); +vint16m4_t test_vfncvt_x_f_w_i16m4_tum(vbool4_t vm, vint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16m4_tum(vm, vd, vs2, vl); } -vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16mf4_tum(mask, maskedoff, src, vl); +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16mf4_tum(vm, vd, vs2, vl); } -vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16mf2_tum(mask, maskedoff, src, vl); +vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16mf2_tum(vm, vd, vs2, vl); } -vuint16m1_t test_vfncvt_xu_f_w_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16m1_tum(mask, maskedoff, src, vl); +vuint16m1_t test_vfncvt_xu_f_w_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16m1_tum(vm, vd, vs2, vl); } -vuint16m2_t test_vfncvt_xu_f_w_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16m2_tum(mask, maskedoff, src, vl); +vuint16m2_t test_vfncvt_xu_f_w_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16m2_tum(vm, vd, vs2, vl); } -vuint16m4_t test_vfncvt_xu_f_w_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16m4_tum(mask, maskedoff, src, vl); +vuint16m4_t test_vfncvt_xu_f_w_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16m4_tum(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16mf4_tum(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16mf4_tum(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16mf2_tum(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16mf2_tum(vm, vd, vs2, vl); } -vfloat16m1_t test_vfncvt_f_x_w_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16m1_tum(mask, maskedoff, src, vl); +vfloat16m1_t test_vfncvt_f_x_w_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16m1_tum(vm, vd, vs2, vl); } -vfloat16m2_t test_vfncvt_f_x_w_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16m2_tum(mask, maskedoff, src, vl); +vfloat16m2_t test_vfncvt_f_x_w_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16m2_tum(vm, vd, vs2, vl); } -vfloat16m4_t test_vfncvt_f_x_w_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vint32m8_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16m4_tum(mask, maskedoff, src, vl); +vfloat16m4_t test_vfncvt_f_x_w_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16m4_tum(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16mf4_tum(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16mf4_tum(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16mf2_tum(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16mf2_tum(vm, vd, vs2, vl); } -vfloat16m1_t test_vfncvt_f_xu_w_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16m1_tum(mask, maskedoff, src, vl); +vfloat16m1_t test_vfncvt_f_xu_w_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16m1_tum(vm, vd, vs2, vl); } -vfloat16m2_t test_vfncvt_f_xu_w_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16m2_tum(mask, maskedoff, src, vl); +vfloat16m2_t test_vfncvt_f_xu_w_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16m2_tum(vm, vd, vs2, vl); } -vfloat16m4_t test_vfncvt_f_xu_w_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vuint32m8_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16m4_tum(mask, maskedoff, src, vl); +vfloat16m4_t test_vfncvt_f_xu_w_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16m4_tum(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16mf4_tum(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16mf4_tum(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16mf2_tum(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16mf2_tum(vm, vd, vs2, vl); } -vfloat16m1_t test_vfncvt_f_f_w_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16m1_tum(mask, maskedoff, src, vl); +vfloat16m1_t test_vfncvt_f_f_w_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16m1_tum(vm, vd, vs2, vl); } -vfloat16m2_t test_vfncvt_f_f_w_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16m2_tum(mask, maskedoff, src, vl); +vfloat16m2_t test_vfncvt_f_f_w_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16m2_tum(vm, vd, vs2, vl); } -vfloat16m4_t test_vfncvt_f_f_w_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16m4_tum(mask, maskedoff, src, vl); +vfloat16m4_t test_vfncvt_f_f_w_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16m4_tum(vm, vd, vs2, vl); } -vint32mf2_t test_vfncvt_x_f_w_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32mf2_tum(mask, maskedoff, src, vl); +vint32mf2_t test_vfncvt_x_f_w_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32mf2_tum(vm, vd, vs2, vl); } -vint32m1_t test_vfncvt_x_f_w_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32m1_tum(mask, maskedoff, src, vl); +vint32m1_t test_vfncvt_x_f_w_i32m1_tum(vbool32_t vm, vint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32m1_tum(vm, vd, vs2, vl); } -vint32m2_t test_vfncvt_x_f_w_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32m2_tum(mask, maskedoff, src, vl); +vint32m2_t test_vfncvt_x_f_w_i32m2_tum(vbool16_t vm, vint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32m2_tum(vm, vd, vs2, vl); } -vint32m4_t test_vfncvt_x_f_w_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32m4_tum(mask, maskedoff, src, vl); +vint32m4_t test_vfncvt_x_f_w_i32m4_tum(vbool8_t vm, vint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32m4_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32mf2_tum(mask, maskedoff, src, vl); +vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32mf2_tum(vm, vd, vs2, vl); } -vuint32m1_t test_vfncvt_xu_f_w_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32m1_tum(mask, maskedoff, src, vl); +vuint32m1_t test_vfncvt_xu_f_w_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32m1_tum(vm, vd, vs2, vl); } -vuint32m2_t test_vfncvt_xu_f_w_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32m2_tum(mask, maskedoff, src, vl); +vuint32m2_t test_vfncvt_xu_f_w_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32m2_tum(vm, vd, vs2, vl); } -vuint32m4_t test_vfncvt_xu_f_w_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32m4_tum(mask, maskedoff, src, vl); +vuint32m4_t test_vfncvt_xu_f_w_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32m4_tum(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vint64m1_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32mf2_tum(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32mf2_tum(vm, vd, vs2, vl); } -vfloat32m1_t test_vfncvt_f_x_w_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vint64m2_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32m1_tum(mask, maskedoff, src, vl); +vfloat32m1_t test_vfncvt_f_x_w_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32m1_tum(vm, vd, vs2, vl); } -vfloat32m2_t test_vfncvt_f_x_w_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vint64m4_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32m2_tum(mask, maskedoff, src, vl); +vfloat32m2_t test_vfncvt_f_x_w_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32m2_tum(vm, vd, vs2, vl); } -vfloat32m4_t test_vfncvt_f_x_w_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vint64m8_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32m4_tum(mask, maskedoff, src, vl); +vfloat32m4_t test_vfncvt_f_x_w_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32m4_tum(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vuint64m1_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32mf2_tum(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32mf2_tum(vm, vd, vs2, vl); } -vfloat32m1_t test_vfncvt_f_xu_w_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vuint64m2_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32m1_tum(mask, maskedoff, src, vl); +vfloat32m1_t test_vfncvt_f_xu_w_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32m1_tum(vm, vd, vs2, vl); } -vfloat32m2_t test_vfncvt_f_xu_w_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vuint64m4_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32m2_tum(mask, maskedoff, src, vl); +vfloat32m2_t test_vfncvt_f_xu_w_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32m2_tum(vm, vd, vs2, vl); } -vfloat32m4_t test_vfncvt_f_xu_w_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vuint64m8_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32m4_tum(mask, maskedoff, src, vl); +vfloat32m4_t test_vfncvt_f_xu_w_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32m4_tum(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32mf2_tum(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32mf2_tum(vm, vd, vs2, vl); } -vfloat32m1_t test_vfncvt_f_f_w_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32m1_tum(mask, maskedoff, src, vl); +vfloat32m1_t test_vfncvt_f_f_w_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32m1_tum(vm, vd, vs2, vl); } -vfloat32m2_t test_vfncvt_f_f_w_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32m2_tum(mask, maskedoff, src, vl); +vfloat32m2_t test_vfncvt_f_f_w_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32m2_tum(vm, vd, vs2, vl); } -vfloat32m4_t test_vfncvt_f_f_w_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32m4_tum(mask, maskedoff, src, vl); +vfloat32m4_t test_vfncvt_f_f_w_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32m4_tum(vm, vd, vs2, vl); } -vint8mf8_t test_vfncvt_x_f_w_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8mf8_tumu(mask, maskedoff, src, vl); +vint8mf8_t test_vfncvt_x_f_w_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8mf8_tumu(vm, vd, vs2, vl); } -vint8mf4_t test_vfncvt_x_f_w_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8mf4_tumu(mask, maskedoff, src, vl); +vint8mf4_t test_vfncvt_x_f_w_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8mf4_tumu(vm, vd, vs2, vl); } -vint8mf2_t test_vfncvt_x_f_w_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8mf2_tumu(mask, maskedoff, src, vl); +vint8mf2_t test_vfncvt_x_f_w_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8mf2_tumu(vm, vd, vs2, vl); } -vint8m1_t test_vfncvt_x_f_w_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8m1_tumu(mask, maskedoff, src, vl); +vint8m1_t test_vfncvt_x_f_w_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8m1_tumu(vm, vd, vs2, vl); } -vint8m2_t test_vfncvt_x_f_w_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8m2_tumu(mask, maskedoff, src, vl); +vint8m2_t test_vfncvt_x_f_w_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8m2_tumu(vm, vd, vs2, vl); } -vint8m4_t test_vfncvt_x_f_w_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8m4_tumu(mask, maskedoff, src, vl); +vint8m4_t test_vfncvt_x_f_w_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8m4_tumu(vm, vd, vs2, vl); } -vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8mf8_tumu(mask, maskedoff, src, vl); +vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8mf8_tumu(vm, vd, vs2, vl); } -vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8mf4_tumu(mask, maskedoff, src, vl); +vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8mf4_tumu(vm, vd, vs2, vl); } -vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8mf2_tumu(mask, maskedoff, src, vl); +vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8mf2_tumu(vm, vd, vs2, vl); } -vuint8m1_t test_vfncvt_xu_f_w_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8m1_tumu(mask, maskedoff, src, vl); +vuint8m1_t test_vfncvt_xu_f_w_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8m1_tumu(vm, vd, vs2, vl); } -vuint8m2_t test_vfncvt_xu_f_w_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8m2_tumu(mask, maskedoff, src, vl); +vuint8m2_t test_vfncvt_xu_f_w_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8m2_tumu(vm, vd, vs2, vl); } -vuint8m4_t test_vfncvt_xu_f_w_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8m4_tumu(mask, maskedoff, src, vl); +vuint8m4_t test_vfncvt_xu_f_w_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8m4_tumu(vm, vd, vs2, vl); } -vint16mf4_t test_vfncvt_x_f_w_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16mf4_tumu(mask, maskedoff, src, vl); +vint16mf4_t test_vfncvt_x_f_w_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16mf4_tumu(vm, vd, vs2, vl); } -vint16mf2_t test_vfncvt_x_f_w_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16mf2_tumu(mask, maskedoff, src, vl); +vint16mf2_t test_vfncvt_x_f_w_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16mf2_tumu(vm, vd, vs2, vl); } -vint16m1_t test_vfncvt_x_f_w_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16m1_tumu(mask, maskedoff, src, vl); +vint16m1_t test_vfncvt_x_f_w_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16m1_tumu(vm, vd, vs2, vl); } -vint16m2_t test_vfncvt_x_f_w_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16m2_tumu(mask, maskedoff, src, vl); +vint16m2_t test_vfncvt_x_f_w_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16m2_tumu(vm, vd, vs2, vl); } -vint16m4_t test_vfncvt_x_f_w_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16m4_tumu(mask, maskedoff, src, vl); +vint16m4_t test_vfncvt_x_f_w_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16m4_tumu(vm, vd, vs2, vl); } -vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16mf4_tumu(mask, maskedoff, src, vl); +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16mf4_tumu(vm, vd, vs2, vl); } -vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16mf2_tumu(mask, maskedoff, src, vl); +vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16mf2_tumu(vm, vd, vs2, vl); } -vuint16m1_t test_vfncvt_xu_f_w_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16m1_tumu(mask, maskedoff, src, vl); +vuint16m1_t test_vfncvt_xu_f_w_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16m1_tumu(vm, vd, vs2, vl); } -vuint16m2_t test_vfncvt_xu_f_w_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16m2_tumu(mask, maskedoff, src, vl); +vuint16m2_t test_vfncvt_xu_f_w_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16m2_tumu(vm, vd, vs2, vl); } -vuint16m4_t test_vfncvt_xu_f_w_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16m4_tumu(mask, maskedoff, src, vl); +vuint16m4_t test_vfncvt_xu_f_w_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16m4_tumu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16mf4_tumu(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16mf4_tumu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16mf2_tumu(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16mf2_tumu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfncvt_f_x_w_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16m1_tumu(mask, maskedoff, src, vl); +vfloat16m1_t test_vfncvt_f_x_w_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16m1_tumu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfncvt_f_x_w_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16m2_tumu(mask, maskedoff, src, vl); +vfloat16m2_t test_vfncvt_f_x_w_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16m2_tumu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfncvt_f_x_w_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vint32m8_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16m4_tumu(mask, maskedoff, src, vl); +vfloat16m4_t test_vfncvt_f_x_w_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16m4_tumu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16mf4_tumu(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16mf4_tumu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16mf2_tumu(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16mf2_tumu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfncvt_f_xu_w_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16m1_tumu(mask, maskedoff, src, vl); +vfloat16m1_t test_vfncvt_f_xu_w_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16m1_tumu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfncvt_f_xu_w_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16m2_tumu(mask, maskedoff, src, vl); +vfloat16m2_t test_vfncvt_f_xu_w_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16m2_tumu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfncvt_f_xu_w_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vuint32m8_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16m4_tumu(mask, maskedoff, src, vl); +vfloat16m4_t test_vfncvt_f_xu_w_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16m4_tumu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16mf4_tumu(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16mf4_tumu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16mf2_tumu(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16mf2_tumu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfncvt_f_f_w_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16m1_tumu(mask, maskedoff, src, vl); +vfloat16m1_t test_vfncvt_f_f_w_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16m1_tumu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfncvt_f_f_w_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16m2_tumu(mask, maskedoff, src, vl); +vfloat16m2_t test_vfncvt_f_f_w_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16m2_tumu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfncvt_f_f_w_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16m4_tumu(mask, maskedoff, src, vl); +vfloat16m4_t test_vfncvt_f_f_w_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16m4_tumu(vm, vd, vs2, vl); } -vint32mf2_t test_vfncvt_x_f_w_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32mf2_tumu(mask, maskedoff, src, vl); +vint32mf2_t test_vfncvt_x_f_w_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32mf2_tumu(vm, vd, vs2, vl); } -vint32m1_t test_vfncvt_x_f_w_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32m1_tumu(mask, maskedoff, src, vl); +vint32m1_t test_vfncvt_x_f_w_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32m1_tumu(vm, vd, vs2, vl); } -vint32m2_t test_vfncvt_x_f_w_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32m2_tumu(mask, maskedoff, src, vl); +vint32m2_t test_vfncvt_x_f_w_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32m2_tumu(vm, vd, vs2, vl); } -vint32m4_t test_vfncvt_x_f_w_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32m4_tumu(mask, maskedoff, src, vl); +vint32m4_t test_vfncvt_x_f_w_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32m4_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32mf2_tumu(mask, maskedoff, src, vl); +vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32mf2_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_vfncvt_xu_f_w_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32m1_tumu(mask, maskedoff, src, vl); +vuint32m1_t test_vfncvt_xu_f_w_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32m1_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_vfncvt_xu_f_w_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32m2_tumu(mask, maskedoff, src, vl); +vuint32m2_t test_vfncvt_xu_f_w_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32m2_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_vfncvt_xu_f_w_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32m4_tumu(mask, maskedoff, src, vl); +vuint32m4_t test_vfncvt_xu_f_w_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32m4_tumu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vint64m1_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32mf2_tumu(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32mf2_tumu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfncvt_f_x_w_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vint64m2_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32m1_tumu(mask, maskedoff, src, vl); +vfloat32m1_t test_vfncvt_f_x_w_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32m1_tumu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfncvt_f_x_w_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vint64m4_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32m2_tumu(mask, maskedoff, src, vl); +vfloat32m2_t test_vfncvt_f_x_w_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32m2_tumu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfncvt_f_x_w_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vint64m8_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32m4_tumu(mask, maskedoff, src, vl); +vfloat32m4_t test_vfncvt_f_x_w_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32m4_tumu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vuint64m1_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32mf2_tumu(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32mf2_tumu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfncvt_f_xu_w_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vuint64m2_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32m1_tumu(mask, maskedoff, src, vl); +vfloat32m1_t test_vfncvt_f_xu_w_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32m1_tumu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfncvt_f_xu_w_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vuint64m4_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32m2_tumu(mask, maskedoff, src, vl); +vfloat32m2_t test_vfncvt_f_xu_w_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32m2_tumu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfncvt_f_xu_w_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vuint64m8_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32m4_tumu(mask, maskedoff, src, vl); +vfloat32m4_t test_vfncvt_f_xu_w_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32m4_tumu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32mf2_tumu(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32mf2_tumu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfncvt_f_f_w_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32m1_tumu(mask, maskedoff, src, vl); +vfloat32m1_t test_vfncvt_f_f_w_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32m1_tumu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfncvt_f_f_w_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32m2_tumu(mask, maskedoff, src, vl); +vfloat32m2_t test_vfncvt_f_f_w_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32m2_tumu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfncvt_f_f_w_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32m4_tumu(mask, maskedoff, src, vl); +vfloat32m4_t test_vfncvt_f_f_w_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32m4_tumu(vm, vd, vs2, vl); } -vint8mf8_t test_vfncvt_x_f_w_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8mf8_mu(mask, maskedoff, src, vl); +vint8mf8_t test_vfncvt_x_f_w_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8mf8_mu(vm, vd, vs2, vl); } -vint8mf4_t test_vfncvt_x_f_w_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8mf4_mu(mask, maskedoff, src, vl); +vint8mf4_t test_vfncvt_x_f_w_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8mf4_mu(vm, vd, vs2, vl); } -vint8mf2_t test_vfncvt_x_f_w_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8mf2_mu(mask, maskedoff, src, vl); +vint8mf2_t test_vfncvt_x_f_w_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8mf2_mu(vm, vd, vs2, vl); } -vint8m1_t test_vfncvt_x_f_w_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8m1_mu(mask, maskedoff, src, vl); +vint8m1_t test_vfncvt_x_f_w_i8m1_mu(vbool8_t vm, vint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8m1_mu(vm, vd, vs2, vl); } -vint8m2_t test_vfncvt_x_f_w_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8m2_mu(mask, maskedoff, src, vl); +vint8m2_t test_vfncvt_x_f_w_i8m2_mu(vbool4_t vm, vint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8m2_mu(vm, vd, vs2, vl); } -vint8m4_t test_vfncvt_x_f_w_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8m4_mu(mask, maskedoff, src, vl); +vint8m4_t test_vfncvt_x_f_w_i8m4_mu(vbool2_t vm, vint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8m4_mu(vm, vd, vs2, vl); } -vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8mf8_mu(mask, maskedoff, src, vl); +vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8mf8_mu(vm, vd, vs2, vl); } -vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8mf4_mu(mask, maskedoff, src, vl); +vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8mf4_mu(vm, vd, vs2, vl); } -vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8mf2_mu(mask, maskedoff, src, vl); +vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8mf2_mu(vm, vd, vs2, vl); } -vuint8m1_t test_vfncvt_xu_f_w_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8m1_mu(mask, maskedoff, src, vl); +vuint8m1_t test_vfncvt_xu_f_w_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8m1_mu(vm, vd, vs2, vl); } -vuint8m2_t test_vfncvt_xu_f_w_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8m2_mu(mask, maskedoff, src, vl); +vuint8m2_t test_vfncvt_xu_f_w_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8m2_mu(vm, vd, vs2, vl); } -vuint8m4_t test_vfncvt_xu_f_w_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8m4_mu(mask, maskedoff, src, vl); +vuint8m4_t test_vfncvt_xu_f_w_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8m4_mu(vm, vd, vs2, vl); } -vint16mf4_t test_vfncvt_x_f_w_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16mf4_mu(mask, maskedoff, src, vl); +vint16mf4_t test_vfncvt_x_f_w_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16mf4_mu(vm, vd, vs2, vl); } -vint16mf2_t test_vfncvt_x_f_w_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16mf2_mu(mask, maskedoff, src, vl); +vint16mf2_t test_vfncvt_x_f_w_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16mf2_mu(vm, vd, vs2, vl); } -vint16m1_t test_vfncvt_x_f_w_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16m1_mu(mask, maskedoff, src, vl); +vint16m1_t test_vfncvt_x_f_w_i16m1_mu(vbool16_t vm, vint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16m1_mu(vm, vd, vs2, vl); } -vint16m2_t test_vfncvt_x_f_w_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16m2_mu(mask, maskedoff, src, vl); +vint16m2_t test_vfncvt_x_f_w_i16m2_mu(vbool8_t vm, vint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16m2_mu(vm, vd, vs2, vl); } -vint16m4_t test_vfncvt_x_f_w_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16m4_mu(mask, maskedoff, src, vl); +vint16m4_t test_vfncvt_x_f_w_i16m4_mu(vbool4_t vm, vint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16m4_mu(vm, vd, vs2, vl); } -vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16mf4_mu(mask, maskedoff, src, vl); +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16mf4_mu(vm, vd, vs2, vl); } -vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16mf2_mu(mask, maskedoff, src, vl); +vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16mf2_mu(vm, vd, vs2, vl); } -vuint16m1_t test_vfncvt_xu_f_w_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16m1_mu(mask, maskedoff, src, vl); +vuint16m1_t test_vfncvt_xu_f_w_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16m1_mu(vm, vd, vs2, vl); } -vuint16m2_t test_vfncvt_xu_f_w_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16m2_mu(mask, maskedoff, src, vl); +vuint16m2_t test_vfncvt_xu_f_w_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16m2_mu(vm, vd, vs2, vl); } -vuint16m4_t test_vfncvt_xu_f_w_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16m4_mu(mask, maskedoff, src, vl); +vuint16m4_t test_vfncvt_xu_f_w_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16m4_mu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16mf4_mu(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16mf4_mu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16mf2_mu(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16mf2_mu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfncvt_f_x_w_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16m1_mu(mask, maskedoff, src, vl); +vfloat16m1_t test_vfncvt_f_x_w_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16m1_mu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfncvt_f_x_w_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16m2_mu(mask, maskedoff, src, vl); +vfloat16m2_t test_vfncvt_f_x_w_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16m2_mu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfncvt_f_x_w_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vint32m8_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16m4_mu(mask, maskedoff, src, vl); +vfloat16m4_t test_vfncvt_f_x_w_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16m4_mu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16mf4_mu(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16mf4_mu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16mf2_mu(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16mf2_mu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfncvt_f_xu_w_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16m1_mu(mask, maskedoff, src, vl); +vfloat16m1_t test_vfncvt_f_xu_w_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16m1_mu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfncvt_f_xu_w_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16m2_mu(mask, maskedoff, src, vl); +vfloat16m2_t test_vfncvt_f_xu_w_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16m2_mu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfncvt_f_xu_w_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vuint32m8_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16m4_mu(mask, maskedoff, src, vl); +vfloat16m4_t test_vfncvt_f_xu_w_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16m4_mu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16mf4_mu(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16mf4_mu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16mf2_mu(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16mf2_mu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfncvt_f_f_w_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16m1_mu(mask, maskedoff, src, vl); +vfloat16m1_t test_vfncvt_f_f_w_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16m1_mu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfncvt_f_f_w_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16m2_mu(mask, maskedoff, src, vl); +vfloat16m2_t test_vfncvt_f_f_w_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16m2_mu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfncvt_f_f_w_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16m4_mu(mask, maskedoff, src, vl); +vfloat16m4_t test_vfncvt_f_f_w_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16m4_mu(vm, vd, vs2, vl); } -vint32mf2_t test_vfncvt_x_f_w_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32mf2_mu(mask, maskedoff, src, vl); +vint32mf2_t test_vfncvt_x_f_w_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32mf2_mu(vm, vd, vs2, vl); } -vint32m1_t test_vfncvt_x_f_w_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32m1_mu(mask, maskedoff, src, vl); +vint32m1_t test_vfncvt_x_f_w_i32m1_mu(vbool32_t vm, vint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32m1_mu(vm, vd, vs2, vl); } -vint32m2_t test_vfncvt_x_f_w_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32m2_mu(mask, maskedoff, src, vl); +vint32m2_t test_vfncvt_x_f_w_i32m2_mu(vbool16_t vm, vint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32m2_mu(vm, vd, vs2, vl); } -vint32m4_t test_vfncvt_x_f_w_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32m4_mu(mask, maskedoff, src, vl); +vint32m4_t test_vfncvt_x_f_w_i32m4_mu(vbool8_t vm, vint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32m4_mu(vm, vd, vs2, vl); } -vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32mf2_mu(mask, maskedoff, src, vl); +vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32mf2_mu(vm, vd, vs2, vl); } -vuint32m1_t test_vfncvt_xu_f_w_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32m1_mu(mask, maskedoff, src, vl); +vuint32m1_t test_vfncvt_xu_f_w_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32m1_mu(vm, vd, vs2, vl); } -vuint32m2_t test_vfncvt_xu_f_w_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32m2_mu(mask, maskedoff, src, vl); +vuint32m2_t test_vfncvt_xu_f_w_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32m2_mu(vm, vd, vs2, vl); } -vuint32m4_t test_vfncvt_xu_f_w_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32m4_mu(mask, maskedoff, src, vl); +vuint32m4_t test_vfncvt_xu_f_w_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32m4_mu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vint64m1_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32mf2_mu(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32mf2_mu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfncvt_f_x_w_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vint64m2_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32m1_mu(mask, maskedoff, src, vl); +vfloat32m1_t test_vfncvt_f_x_w_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32m1_mu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfncvt_f_x_w_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vint64m4_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32m2_mu(mask, maskedoff, src, vl); +vfloat32m2_t test_vfncvt_f_x_w_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32m2_mu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfncvt_f_x_w_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vint64m8_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32m4_mu(mask, maskedoff, src, vl); +vfloat32m4_t test_vfncvt_f_x_w_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32m4_mu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vuint64m1_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32mf2_mu(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32mf2_mu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfncvt_f_xu_w_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vuint64m2_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32m1_mu(mask, maskedoff, src, vl); +vfloat32m1_t test_vfncvt_f_xu_w_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32m1_mu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfncvt_f_xu_w_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vuint64m4_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32m2_mu(mask, maskedoff, src, vl); +vfloat32m2_t test_vfncvt_f_xu_w_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32m2_mu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfncvt_f_xu_w_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vuint64m8_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32m4_mu(mask, maskedoff, src, vl); +vfloat32m4_t test_vfncvt_f_xu_w_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32m4_mu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32mf2_mu(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32mf2_mu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfncvt_f_f_w_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32m1_mu(mask, maskedoff, src, vl); +vfloat32m1_t test_vfncvt_f_f_w_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32m1_mu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfncvt_f_f_w_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32m2_mu(mask, maskedoff, src, vl); +vfloat32m2_t test_vfncvt_f_f_w_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32m2_mu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfncvt_f_f_w_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32m4_mu(mask, maskedoff, src, vl); +vfloat32m4_t test_vfncvt_f_f_w_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32m4_mu(vm, vd, vs2, vl); } -vint8mf8_t test_vfncvt_x_f_w_i8mf8_rm_tu(vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8mf8_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint8mf8_t test_vfncvt_x_f_w_i8mf8_rm_tu(vint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8mf8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint8mf4_t test_vfncvt_x_f_w_i8mf4_rm_tu(vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8mf4_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint8mf4_t test_vfncvt_x_f_w_i8mf4_rm_tu(vint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint8mf2_t test_vfncvt_x_f_w_i8mf2_rm_tu(vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8mf2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint8mf2_t test_vfncvt_x_f_w_i8mf2_rm_tu(vint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint8m1_t test_vfncvt_x_f_w_i8m1_rm_tu(vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8m1_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint8m1_t test_vfncvt_x_f_w_i8m1_rm_tu(vint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint8m2_t test_vfncvt_x_f_w_i8m2_rm_tu(vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8m2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint8m2_t test_vfncvt_x_f_w_i8m2_rm_tu(vint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint8m4_t test_vfncvt_x_f_w_i8m4_rm_tu(vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8m4_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint8m4_t test_vfncvt_x_f_w_i8m4_rm_tu(vint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_rm_tu(vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8mf8_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_rm_tu(vuint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8mf8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_rm_tu(vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8mf4_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_rm_tu(vuint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_rm_tu(vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8mf2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_rm_tu(vuint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8m1_t test_vfncvt_xu_f_w_u8m1_rm_tu(vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8m1_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8m1_t test_vfncvt_xu_f_w_u8m1_rm_tu(vuint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8m2_t test_vfncvt_xu_f_w_u8m2_rm_tu(vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8m2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8m2_t test_vfncvt_xu_f_w_u8m2_rm_tu(vuint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8m4_t test_vfncvt_xu_f_w_u8m4_rm_tu(vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8m4_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8m4_t test_vfncvt_xu_f_w_u8m4_rm_tu(vuint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint16mf4_t test_vfncvt_x_f_w_i16mf4_rm_tu(vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16mf4_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint16mf4_t test_vfncvt_x_f_w_i16mf4_rm_tu(vint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint16mf2_t test_vfncvt_x_f_w_i16mf2_rm_tu(vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16mf2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint16mf2_t test_vfncvt_x_f_w_i16mf2_rm_tu(vint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m1_t test_vfncvt_x_f_w_i16m1_rm_tu(vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16m1_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m1_t test_vfncvt_x_f_w_i16m1_rm_tu(vint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m2_t test_vfncvt_x_f_w_i16m2_rm_tu(vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16m2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m2_t test_vfncvt_x_f_w_i16m2_rm_tu(vint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m4_t test_vfncvt_x_f_w_i16m4_rm_tu(vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16m4_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m4_t test_vfncvt_x_f_w_i16m4_rm_tu(vint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_rm_tu(vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16mf4_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_rm_tu(vuint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_rm_tu(vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16mf2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_rm_tu(vuint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m1_t test_vfncvt_xu_f_w_u16m1_rm_tu(vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16m1_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m1_t test_vfncvt_xu_f_w_u16m1_rm_tu(vuint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m2_t test_vfncvt_xu_f_w_u16m2_rm_tu(vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16m2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m2_t test_vfncvt_xu_f_w_u16m2_rm_tu(vuint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m4_t test_vfncvt_xu_f_w_u16m4_rm_tu(vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16m4_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m4_t test_vfncvt_xu_f_w_u16m4_rm_tu(vuint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16mf4_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_rm_tu(vfloat16mf4_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16mf2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_rm_tu(vfloat16mf2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_x_w_f16m1_rm_tu(vfloat16m1_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16m1_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfncvt_f_x_w_f16m1_rm_tu(vfloat16m1_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_x_w_f16m2_rm_tu(vfloat16m2_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16m2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfncvt_f_x_w_f16m2_rm_tu(vfloat16m2_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_x_w_f16m4_rm_tu(vfloat16m4_t maskedoff, vint32m8_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16m4_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfncvt_f_x_w_f16m4_rm_tu(vfloat16m4_t vd, vint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16mf4_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_rm_tu(vfloat16mf4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16mf2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_rm_tu(vfloat16mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_xu_w_f16m1_rm_tu(vfloat16m1_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16m1_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfncvt_f_xu_w_f16m1_rm_tu(vfloat16m1_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_xu_w_f16m2_rm_tu(vfloat16m2_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16m2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfncvt_f_xu_w_f16m2_rm_tu(vfloat16m2_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_xu_w_f16m4_rm_tu(vfloat16m4_t maskedoff, vuint32m8_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16m4_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfncvt_f_xu_w_f16m4_rm_tu(vfloat16m4_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16mf4_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16mf2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_f_w_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16m1_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfncvt_f_f_w_f16m1_rm_tu(vfloat16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_f_w_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16m2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfncvt_f_f_w_f16m2_rm_tu(vfloat16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_f_w_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16m4_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfncvt_f_f_w_f16m4_rm_tu(vfloat16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint32mf2_t test_vfncvt_x_f_w_i32mf2_rm_tu(vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32mf2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint32mf2_t test_vfncvt_x_f_w_i32mf2_rm_tu(vint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfncvt_x_f_w_i32m1_rm_tu(vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32m1_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m1_t test_vfncvt_x_f_w_i32m1_rm_tu(vint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfncvt_x_f_w_i32m2_rm_tu(vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32m2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m2_t test_vfncvt_x_f_w_i32m2_rm_tu(vint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfncvt_x_f_w_i32m4_rm_tu(vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32m4_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m4_t test_vfncvt_x_f_w_i32m4_rm_tu(vint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_rm_tu(vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32mf2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_rm_tu(vuint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfncvt_xu_f_w_u32m1_rm_tu(vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32m1_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m1_t test_vfncvt_xu_f_w_u32m1_rm_tu(vuint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfncvt_xu_f_w_u32m2_rm_tu(vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32m2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m2_t test_vfncvt_xu_f_w_u32m2_rm_tu(vuint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfncvt_xu_f_w_u32m4_rm_tu(vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32m4_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m4_t test_vfncvt_xu_f_w_u32m4_rm_tu(vuint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vint64m1_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32mf2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_rm_tu(vfloat32mf2_t vd, vint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_x_w_f32m1_rm_tu(vfloat32m1_t maskedoff, vint64m2_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32m1_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfncvt_f_x_w_f32m1_rm_tu(vfloat32m1_t vd, vint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_x_w_f32m2_rm_tu(vfloat32m2_t maskedoff, vint64m4_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32m2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfncvt_f_x_w_f32m2_rm_tu(vfloat32m2_t vd, vint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_x_w_f32m4_rm_tu(vfloat32m4_t maskedoff, vint64m8_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32m4_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfncvt_f_x_w_f32m4_rm_tu(vfloat32m4_t vd, vint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vuint64m1_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32mf2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_rm_tu(vfloat32mf2_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_xu_w_f32m1_rm_tu(vfloat32m1_t maskedoff, vuint64m2_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32m1_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfncvt_f_xu_w_f32m1_rm_tu(vfloat32m1_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_xu_w_f32m2_rm_tu(vfloat32m2_t maskedoff, vuint64m4_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32m2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfncvt_f_xu_w_f32m2_rm_tu(vfloat32m2_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_xu_w_f32m4_rm_tu(vfloat32m4_t maskedoff, vuint64m8_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32m4_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfncvt_f_xu_w_f32m4_rm_tu(vfloat32m4_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32mf2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_f_w_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32m1_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfncvt_f_f_w_f32m1_rm_tu(vfloat32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_f_w_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32m2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfncvt_f_f_w_f32m2_rm_tu(vfloat32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_f_w_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32m4_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfncvt_f_f_w_f32m4_rm_tu(vfloat32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint8mf8_t test_vfncvt_x_f_w_i8mf8_rm_tum(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8mf8_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint8mf8_t test_vfncvt_x_f_w_i8mf8_rm_tum(vbool64_t vm, vint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8mf8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8mf4_t test_vfncvt_x_f_w_i8mf4_rm_tum(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8mf4_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint8mf4_t test_vfncvt_x_f_w_i8mf4_rm_tum(vbool32_t vm, vint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8mf4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8mf2_t test_vfncvt_x_f_w_i8mf2_rm_tum(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8mf2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint8mf2_t test_vfncvt_x_f_w_i8mf2_rm_tum(vbool16_t vm, vint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8m1_t test_vfncvt_x_f_w_i8m1_rm_tum(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8m1_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint8m1_t test_vfncvt_x_f_w_i8m1_rm_tum(vbool8_t vm, vint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8m2_t test_vfncvt_x_f_w_i8m2_rm_tum(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8m2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint8m2_t test_vfncvt_x_f_w_i8m2_rm_tum(vbool4_t vm, vint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8m4_t test_vfncvt_x_f_w_i8m4_rm_tum(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8m4_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint8m4_t test_vfncvt_x_f_w_i8m4_rm_tum(vbool2_t vm, vint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_rm_tum(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8mf8_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_rm_tum(vbool64_t vm, vuint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8mf8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_rm_tum(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8mf4_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_rm_tum(vbool32_t vm, vuint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8mf4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_rm_tum(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8mf2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_rm_tum(vbool16_t vm, vuint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8m1_t test_vfncvt_xu_f_w_u8m1_rm_tum(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8m1_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8m1_t test_vfncvt_xu_f_w_u8m1_rm_tum(vbool8_t vm, vuint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8m2_t test_vfncvt_xu_f_w_u8m2_rm_tum(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8m2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8m2_t test_vfncvt_xu_f_w_u8m2_rm_tum(vbool4_t vm, vuint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8m4_t test_vfncvt_xu_f_w_u8m4_rm_tum(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8m4_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8m4_t test_vfncvt_xu_f_w_u8m4_rm_tum(vbool2_t vm, vuint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16mf4_t test_vfncvt_x_f_w_i16mf4_rm_tum(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16mf4_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16mf4_t test_vfncvt_x_f_w_i16mf4_rm_tum(vbool64_t vm, vint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16mf4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16mf2_t test_vfncvt_x_f_w_i16mf2_rm_tum(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16mf2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16mf2_t test_vfncvt_x_f_w_i16mf2_rm_tum(vbool32_t vm, vint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m1_t test_vfncvt_x_f_w_i16m1_rm_tum(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16m1_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m1_t test_vfncvt_x_f_w_i16m1_rm_tum(vbool16_t vm, vint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m2_t test_vfncvt_x_f_w_i16m2_rm_tum(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16m2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m2_t test_vfncvt_x_f_w_i16m2_rm_tum(vbool8_t vm, vint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m4_t test_vfncvt_x_f_w_i16m4_rm_tum(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16m4_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m4_t test_vfncvt_x_f_w_i16m4_rm_tum(vbool4_t vm, vint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_rm_tum(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16mf4_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_rm_tum(vbool64_t vm, vuint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16mf4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_rm_tum(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16mf2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_rm_tum(vbool32_t vm, vuint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m1_t test_vfncvt_xu_f_w_u16m1_rm_tum(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16m1_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m1_t test_vfncvt_xu_f_w_u16m1_rm_tum(vbool16_t vm, vuint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m2_t test_vfncvt_xu_f_w_u16m2_rm_tum(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16m2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m2_t test_vfncvt_xu_f_w_u16m2_rm_tum(vbool8_t vm, vuint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m4_t test_vfncvt_xu_f_w_u16m4_rm_tum(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16m4_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m4_t test_vfncvt_xu_f_w_u16m4_rm_tum(vbool4_t vm, vuint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16mf4_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16mf4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16mf2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_x_w_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16m1_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfncvt_f_x_w_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_x_w_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16m2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfncvt_f_x_w_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_x_w_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vint32m8_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16m4_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfncvt_f_x_w_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16mf4_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16mf4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16mf2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_xu_w_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16m1_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfncvt_f_xu_w_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_xu_w_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16m2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfncvt_f_xu_w_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_xu_w_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vuint32m8_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16m4_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfncvt_f_xu_w_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16mf4_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16mf4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16mf2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_f_w_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16m1_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfncvt_f_f_w_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_f_w_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16m2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfncvt_f_f_w_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_f_w_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16m4_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfncvt_f_f_w_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32mf2_t test_vfncvt_x_f_w_i32mf2_rm_tum(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32mf2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32mf2_t test_vfncvt_x_f_w_i32mf2_rm_tum(vbool64_t vm, vint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfncvt_x_f_w_i32m1_rm_tum(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32m1_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m1_t test_vfncvt_x_f_w_i32m1_rm_tum(vbool32_t vm, vint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfncvt_x_f_w_i32m2_rm_tum(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32m2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m2_t test_vfncvt_x_f_w_i32m2_rm_tum(vbool16_t vm, vint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfncvt_x_f_w_i32m4_rm_tum(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32m4_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m4_t test_vfncvt_x_f_w_i32m4_rm_tum(vbool8_t vm, vint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_rm_tum(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32mf2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_rm_tum(vbool64_t vm, vuint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfncvt_xu_f_w_u32m1_rm_tum(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32m1_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m1_t test_vfncvt_xu_f_w_u32m1_rm_tum(vbool32_t vm, vuint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfncvt_xu_f_w_u32m2_rm_tum(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32m2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m2_t test_vfncvt_xu_f_w_u32m2_rm_tum(vbool16_t vm, vuint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfncvt_xu_f_w_u32m4_rm_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32m4_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m4_t test_vfncvt_xu_f_w_u32m4_rm_tum(vbool8_t vm, vuint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vint64m1_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32mf2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_x_w_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vint64m2_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32m1_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfncvt_f_x_w_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_x_w_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vint64m4_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32m2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfncvt_f_x_w_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_x_w_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vint64m8_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32m4_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfncvt_f_x_w_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vuint64m1_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32mf2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_xu_w_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vuint64m2_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32m1_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfncvt_f_xu_w_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_xu_w_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vuint64m4_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32m2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfncvt_f_xu_w_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_xu_w_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vuint64m8_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32m4_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfncvt_f_xu_w_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32mf2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_f_w_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32m1_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfncvt_f_f_w_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_f_w_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32m2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfncvt_f_f_w_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_f_w_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32m4_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfncvt_f_f_w_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8mf8_t test_vfncvt_x_f_w_i8mf8_rm_tumu(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8mf8_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint8mf8_t test_vfncvt_x_f_w_i8mf8_rm_tumu(vbool64_t vm, vint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8mf8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8mf4_t test_vfncvt_x_f_w_i8mf4_rm_tumu(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8mf4_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint8mf4_t test_vfncvt_x_f_w_i8mf4_rm_tumu(vbool32_t vm, vint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8mf4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8mf2_t test_vfncvt_x_f_w_i8mf2_rm_tumu(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8mf2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint8mf2_t test_vfncvt_x_f_w_i8mf2_rm_tumu(vbool16_t vm, vint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8m1_t test_vfncvt_x_f_w_i8m1_rm_tumu(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8m1_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint8m1_t test_vfncvt_x_f_w_i8m1_rm_tumu(vbool8_t vm, vint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8m2_t test_vfncvt_x_f_w_i8m2_rm_tumu(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8m2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint8m2_t test_vfncvt_x_f_w_i8m2_rm_tumu(vbool4_t vm, vint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8m4_t test_vfncvt_x_f_w_i8m4_rm_tumu(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8m4_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint8m4_t test_vfncvt_x_f_w_i8m4_rm_tumu(vbool2_t vm, vint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_rm_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8mf8_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_rm_tumu(vbool64_t vm, vuint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8mf8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_rm_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8mf4_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_rm_tumu(vbool32_t vm, vuint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8mf4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_rm_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8mf2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_rm_tumu(vbool16_t vm, vuint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8m1_t test_vfncvt_xu_f_w_u8m1_rm_tumu(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8m1_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8m1_t test_vfncvt_xu_f_w_u8m1_rm_tumu(vbool8_t vm, vuint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8m2_t test_vfncvt_xu_f_w_u8m2_rm_tumu(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8m2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8m2_t test_vfncvt_xu_f_w_u8m2_rm_tumu(vbool4_t vm, vuint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8m4_t test_vfncvt_xu_f_w_u8m4_rm_tumu(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8m4_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8m4_t test_vfncvt_xu_f_w_u8m4_rm_tumu(vbool2_t vm, vuint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16mf4_t test_vfncvt_x_f_w_i16mf4_rm_tumu(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16mf4_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16mf4_t test_vfncvt_x_f_w_i16mf4_rm_tumu(vbool64_t vm, vint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16mf4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16mf2_t test_vfncvt_x_f_w_i16mf2_rm_tumu(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16mf2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16mf2_t test_vfncvt_x_f_w_i16mf2_rm_tumu(vbool32_t vm, vint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m1_t test_vfncvt_x_f_w_i16m1_rm_tumu(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16m1_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m1_t test_vfncvt_x_f_w_i16m1_rm_tumu(vbool16_t vm, vint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m2_t test_vfncvt_x_f_w_i16m2_rm_tumu(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16m2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m2_t test_vfncvt_x_f_w_i16m2_rm_tumu(vbool8_t vm, vint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m4_t test_vfncvt_x_f_w_i16m4_rm_tumu(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16m4_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m4_t test_vfncvt_x_f_w_i16m4_rm_tumu(vbool4_t vm, vint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_rm_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16mf4_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_rm_tumu(vbool64_t vm, vuint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16mf4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_rm_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16mf2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_rm_tumu(vbool32_t vm, vuint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m1_t test_vfncvt_xu_f_w_u16m1_rm_tumu(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16m1_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m1_t test_vfncvt_xu_f_w_u16m1_rm_tumu(vbool16_t vm, vuint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m2_t test_vfncvt_xu_f_w_u16m2_rm_tumu(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16m2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m2_t test_vfncvt_xu_f_w_u16m2_rm_tumu(vbool8_t vm, vuint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m4_t test_vfncvt_xu_f_w_u16m4_rm_tumu(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16m4_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m4_t test_vfncvt_xu_f_w_u16m4_rm_tumu(vbool4_t vm, vuint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16mf4_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16mf4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16mf2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_x_w_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16m1_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfncvt_f_x_w_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_x_w_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16m2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfncvt_f_x_w_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_x_w_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vint32m8_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16m4_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfncvt_f_x_w_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16mf4_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16mf4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16mf2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_xu_w_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16m1_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfncvt_f_xu_w_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_xu_w_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16m2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfncvt_f_xu_w_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_xu_w_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vuint32m8_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16m4_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfncvt_f_xu_w_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16mf4_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16mf4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16mf2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_f_w_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16m1_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfncvt_f_f_w_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_f_w_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16m2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfncvt_f_f_w_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_f_w_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16m4_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfncvt_f_f_w_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32mf2_t test_vfncvt_x_f_w_i32mf2_rm_tumu(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32mf2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32mf2_t test_vfncvt_x_f_w_i32mf2_rm_tumu(vbool64_t vm, vint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfncvt_x_f_w_i32m1_rm_tumu(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32m1_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m1_t test_vfncvt_x_f_w_i32m1_rm_tumu(vbool32_t vm, vint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfncvt_x_f_w_i32m2_rm_tumu(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32m2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m2_t test_vfncvt_x_f_w_i32m2_rm_tumu(vbool16_t vm, vint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfncvt_x_f_w_i32m4_rm_tumu(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32m4_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m4_t test_vfncvt_x_f_w_i32m4_rm_tumu(vbool8_t vm, vint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_rm_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32mf2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_rm_tumu(vbool64_t vm, vuint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfncvt_xu_f_w_u32m1_rm_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32m1_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m1_t test_vfncvt_xu_f_w_u32m1_rm_tumu(vbool32_t vm, vuint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfncvt_xu_f_w_u32m2_rm_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32m2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m2_t test_vfncvt_xu_f_w_u32m2_rm_tumu(vbool16_t vm, vuint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfncvt_xu_f_w_u32m4_rm_tumu(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32m4_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m4_t test_vfncvt_xu_f_w_u32m4_rm_tumu(vbool8_t vm, vuint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vint64m1_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32mf2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_x_w_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vint64m2_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32m1_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfncvt_f_x_w_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_x_w_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vint64m4_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32m2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfncvt_f_x_w_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_x_w_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vint64m8_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32m4_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfncvt_f_x_w_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vuint64m1_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32mf2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_xu_w_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vuint64m2_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32m1_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfncvt_f_xu_w_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_xu_w_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vuint64m4_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32m2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfncvt_f_xu_w_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_xu_w_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vuint64m8_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32m4_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfncvt_f_xu_w_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32mf2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_f_w_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32m1_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfncvt_f_f_w_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_f_w_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32m2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfncvt_f_f_w_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_f_w_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32m4_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfncvt_f_f_w_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8mf8_t test_vfncvt_x_f_w_i8mf8_rm_mu(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8mf8_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint8mf8_t test_vfncvt_x_f_w_i8mf8_rm_mu(vbool64_t vm, vint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8mf8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8mf4_t test_vfncvt_x_f_w_i8mf4_rm_mu(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8mf4_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint8mf4_t test_vfncvt_x_f_w_i8mf4_rm_mu(vbool32_t vm, vint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8mf4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8mf2_t test_vfncvt_x_f_w_i8mf2_rm_mu(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8mf2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint8mf2_t test_vfncvt_x_f_w_i8mf2_rm_mu(vbool16_t vm, vint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8m1_t test_vfncvt_x_f_w_i8m1_rm_mu(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8m1_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint8m1_t test_vfncvt_x_f_w_i8m1_rm_mu(vbool8_t vm, vint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8m2_t test_vfncvt_x_f_w_i8m2_rm_mu(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8m2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint8m2_t test_vfncvt_x_f_w_i8m2_rm_mu(vbool4_t vm, vint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8m4_t test_vfncvt_x_f_w_i8m4_rm_mu(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8m4_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint8m4_t test_vfncvt_x_f_w_i8m4_rm_mu(vbool2_t vm, vint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i8m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_rm_mu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8mf8_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_rm_mu(vbool64_t vm, vuint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8mf8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_rm_mu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8mf4_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_rm_mu(vbool32_t vm, vuint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8mf4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_rm_mu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8mf2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_rm_mu(vbool16_t vm, vuint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8m1_t test_vfncvt_xu_f_w_u8m1_rm_mu(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8m1_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8m1_t test_vfncvt_xu_f_w_u8m1_rm_mu(vbool8_t vm, vuint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8m2_t test_vfncvt_xu_f_w_u8m2_rm_mu(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8m2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8m2_t test_vfncvt_xu_f_w_u8m2_rm_mu(vbool4_t vm, vuint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8m4_t test_vfncvt_xu_f_w_u8m4_rm_mu(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8m4_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8m4_t test_vfncvt_xu_f_w_u8m4_rm_mu(vbool2_t vm, vuint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16mf4_t test_vfncvt_x_f_w_i16mf4_rm_mu(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16mf4_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16mf4_t test_vfncvt_x_f_w_i16mf4_rm_mu(vbool64_t vm, vint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16mf4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16mf2_t test_vfncvt_x_f_w_i16mf2_rm_mu(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16mf2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16mf2_t test_vfncvt_x_f_w_i16mf2_rm_mu(vbool32_t vm, vint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m1_t test_vfncvt_x_f_w_i16m1_rm_mu(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16m1_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m1_t test_vfncvt_x_f_w_i16m1_rm_mu(vbool16_t vm, vint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m2_t test_vfncvt_x_f_w_i16m2_rm_mu(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16m2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m2_t test_vfncvt_x_f_w_i16m2_rm_mu(vbool8_t vm, vint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m4_t test_vfncvt_x_f_w_i16m4_rm_mu(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16m4_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m4_t test_vfncvt_x_f_w_i16m4_rm_mu(vbool4_t vm, vint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i16m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_rm_mu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16mf4_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_rm_mu(vbool64_t vm, vuint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16mf4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_rm_mu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16mf2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_rm_mu(vbool32_t vm, vuint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m1_t test_vfncvt_xu_f_w_u16m1_rm_mu(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16m1_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m1_t test_vfncvt_xu_f_w_u16m1_rm_mu(vbool16_t vm, vuint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m2_t test_vfncvt_xu_f_w_u16m2_rm_mu(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16m2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m2_t test_vfncvt_xu_f_w_u16m2_rm_mu(vbool8_t vm, vuint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m4_t test_vfncvt_xu_f_w_u16m4_rm_mu(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16m4_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m4_t test_vfncvt_xu_f_w_u16m4_rm_mu(vbool4_t vm, vuint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16mf4_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16mf4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16mf2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_x_w_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16m1_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfncvt_f_x_w_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_x_w_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16m2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfncvt_f_x_w_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_x_w_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vint32m8_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16m4_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfncvt_f_x_w_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f16m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16mf4_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16mf4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16mf2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_xu_w_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16m1_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfncvt_f_xu_w_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_xu_w_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16m2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfncvt_f_xu_w_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_xu_w_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vuint32m8_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16m4_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfncvt_f_xu_w_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16mf4_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16mf4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16mf2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_f_w_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16m1_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfncvt_f_f_w_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_f_w_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16m2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfncvt_f_f_w_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_f_w_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16m4_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfncvt_f_f_w_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f16m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32mf2_t test_vfncvt_x_f_w_i32mf2_rm_mu(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32mf2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32mf2_t test_vfncvt_x_f_w_i32mf2_rm_mu(vbool64_t vm, vint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfncvt_x_f_w_i32m1_rm_mu(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32m1_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m1_t test_vfncvt_x_f_w_i32m1_rm_mu(vbool32_t vm, vint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfncvt_x_f_w_i32m2_rm_mu(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32m2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m2_t test_vfncvt_x_f_w_i32m2_rm_mu(vbool16_t vm, vint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfncvt_x_f_w_i32m4_rm_mu(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32m4_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m4_t test_vfncvt_x_f_w_i32m4_rm_mu(vbool8_t vm, vint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_i32m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_rm_mu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32mf2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_rm_mu(vbool64_t vm, vuint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfncvt_xu_f_w_u32m1_rm_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32m1_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m1_t test_vfncvt_xu_f_w_u32m1_rm_mu(vbool32_t vm, vuint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfncvt_xu_f_w_u32m2_rm_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32m2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m2_t test_vfncvt_xu_f_w_u32m2_rm_mu(vbool16_t vm, vuint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfncvt_xu_f_w_u32m4_rm_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32m4_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m4_t test_vfncvt_xu_f_w_u32m4_rm_mu(vbool8_t vm, vuint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vint64m1_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32mf2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_x_w_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vint64m2_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32m1_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfncvt_f_x_w_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_x_w_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vint64m4_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32m2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfncvt_f_x_w_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_x_w_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vint64m8_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32m4_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfncvt_f_x_w_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_x_w_f32m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vuint64m1_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32mf2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_xu_w_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vuint64m2_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32m1_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfncvt_f_xu_w_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_xu_w_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vuint64m4_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32m2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfncvt_f_xu_w_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_xu_w_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vuint64m8_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32m4_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfncvt_f_xu_w_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32mf2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_f_w_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32m1_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfncvt_f_f_w_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_f_w_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32m2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfncvt_f_f_w_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_f_w_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32m4_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfncvt_f_f_w_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_f32m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfncvt\.[ivxfswum.]+\s+} 456 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfncvt_rod.c b/auto-generated/policy_funcs/gnu-api-tests/vfncvt_rod.c index c921c08da..deb3dc8a3 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfncvt_rod.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfncvt_rod.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16mf4_tu(maskedoff, src, vl); +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tu(vfloat16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16mf4_tu(vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16mf2_tu(maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_tu(vfloat16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16mf2_tu(vd, vs2, vl); } -vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_tu(vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16m1_tu(maskedoff, src, vl); +vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_tu(vfloat16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16m1_tu(vd, vs2, vl); } -vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_tu(vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16m2_tu(maskedoff, src, vl); +vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_tu(vfloat16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16m2_tu(vd, vs2, vl); } -vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_tu(vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16m4_tu(maskedoff, src, vl); +vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_tu(vfloat16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16m4_tu(vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32mf2_tu(maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_tu(vfloat32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32mf2_tu(vd, vs2, vl); } -vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_tu(vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32m1_tu(maskedoff, src, vl); +vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_tu(vfloat32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32m1_tu(vd, vs2, vl); } -vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_tu(vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32m2_tu(maskedoff, src, vl); +vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_tu(vfloat32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32m2_tu(vd, vs2, vl); } -vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_tu(vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32m4_tu(maskedoff, src, vl); +vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_tu(vfloat32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32m4_tu(vd, vs2, vl); } -vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16mf4_tum(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16mf4_tum(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16mf2_tum(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16mf2_tum(vm, vd, vs2, vl); } -vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16m1_tum(mask, maskedoff, src, vl); +vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16m1_tum(vm, vd, vs2, vl); } -vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16m2_tum(mask, maskedoff, src, vl); +vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16m2_tum(vm, vd, vs2, vl); } -vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16m4_tum(mask, maskedoff, src, vl); +vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16m4_tum(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32mf2_tum(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32mf2_tum(vm, vd, vs2, vl); } -vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32m1_tum(mask, maskedoff, src, vl); +vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32m1_tum(vm, vd, vs2, vl); } -vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32m2_tum(mask, maskedoff, src, vl); +vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32m2_tum(vm, vd, vs2, vl); } -vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32m4_tum(mask, maskedoff, src, vl); +vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32m4_tum(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16mf4_tumu(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16mf4_tumu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16mf2_tumu(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16mf2_tumu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16m1_tumu(mask, maskedoff, src, vl); +vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16m1_tumu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16m2_tumu(mask, maskedoff, src, vl); +vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16m2_tumu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16m4_tumu(mask, maskedoff, src, vl); +vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16m4_tumu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32mf2_tumu(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32mf2_tumu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32m1_tumu(mask, maskedoff, src, vl); +vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32m1_tumu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32m2_tumu(mask, maskedoff, src, vl); +vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32m2_tumu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32m4_tumu(mask, maskedoff, src, vl); +vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32m4_tumu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16mf4_mu(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16mf4_mu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16mf2_mu(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16mf2_mu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16m1_mu(mask, maskedoff, src, vl); +vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16m1_mu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16m2_mu(mask, maskedoff, src, vl); +vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16m2_mu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16m4_mu(mask, maskedoff, src, vl); +vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16m4_mu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32mf2_mu(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32mf2_mu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32m1_mu(mask, maskedoff, src, vl); +vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32m1_mu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32m2_mu(mask, maskedoff, src, vl); +vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32m2_mu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32m4_mu(mask, maskedoff, src, vl); +vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32m4_mu(vm, vd, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfncvt\.rod[ivxfswum.]*\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfncvt_rtz.c b/auto-generated/policy_funcs/gnu-api-tests/vfncvt_rtz.c index 1d2a3632e..a59c73328 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfncvt_rtz.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfncvt_rtz.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_tu(vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8mf8_tu(maskedoff, src, vl); +vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_tu(vint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8mf8_tu(vd, vs2, vl); } -vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_tu(vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8mf4_tu(maskedoff, src, vl); +vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_tu(vint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8mf4_tu(vd, vs2, vl); } -vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_tu(vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8mf2_tu(maskedoff, src, vl); +vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_tu(vint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8mf2_tu(vd, vs2, vl); } -vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_tu(vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8m1_tu(maskedoff, src, vl); +vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_tu(vint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8m1_tu(vd, vs2, vl); } -vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_tu(vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8m2_tu(maskedoff, src, vl); +vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_tu(vint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8m2_tu(vd, vs2, vl); } -vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_tu(vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8m4_tu(maskedoff, src, vl); +vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_tu(vint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8m4_tu(vd, vs2, vl); } -vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_tu(vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8mf8_tu(maskedoff, src, vl); +vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_tu(vuint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8mf8_tu(vd, vs2, vl); } -vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_tu(vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8mf4_tu(maskedoff, src, vl); +vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_tu(vuint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8mf4_tu(vd, vs2, vl); } -vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_tu(vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8mf2_tu(maskedoff, src, vl); +vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_tu(vuint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8mf2_tu(vd, vs2, vl); } -vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_tu(vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8m1_tu(maskedoff, src, vl); +vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_tu(vuint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8m1_tu(vd, vs2, vl); } -vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_tu(vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8m2_tu(maskedoff, src, vl); +vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_tu(vuint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8m2_tu(vd, vs2, vl); } -vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_tu(vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8m4_tu(maskedoff, src, vl); +vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_tu(vuint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8m4_tu(vd, vs2, vl); } -vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tu(vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16mf4_tu(maskedoff, src, vl); +vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tu(vint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16mf4_tu(vd, vs2, vl); } -vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_tu(vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16mf2_tu(maskedoff, src, vl); +vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_tu(vint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16mf2_tu(vd, vs2, vl); } -vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_tu(vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16m1_tu(maskedoff, src, vl); +vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_tu(vint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16m1_tu(vd, vs2, vl); } -vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_tu(vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16m2_tu(maskedoff, src, vl); +vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_tu(vint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16m2_tu(vd, vs2, vl); } -vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_tu(vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16m4_tu(maskedoff, src, vl); +vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_tu(vint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16m4_tu(vd, vs2, vl); } -vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tu(vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16mf4_tu(maskedoff, src, vl); +vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tu(vuint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16mf4_tu(vd, vs2, vl); } -vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_tu(vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16mf2_tu(maskedoff, src, vl); +vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_tu(vuint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16mf2_tu(vd, vs2, vl); } -vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_tu(vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16m1_tu(maskedoff, src, vl); +vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_tu(vuint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16m1_tu(vd, vs2, vl); } -vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_tu(vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16m2_tu(maskedoff, src, vl); +vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_tu(vuint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16m2_tu(vd, vs2, vl); } -vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_tu(vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16m4_tu(maskedoff, src, vl); +vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_tu(vuint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16m4_tu(vd, vs2, vl); } -vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_tu(vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32mf2_tu(maskedoff, src, vl); +vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_tu(vint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32mf2_tu(vd, vs2, vl); } -vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_tu(vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32m1_tu(maskedoff, src, vl); +vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_tu(vint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32m1_tu(vd, vs2, vl); } -vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_tu(vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32m2_tu(maskedoff, src, vl); +vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_tu(vint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32m2_tu(vd, vs2, vl); } -vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_tu(vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32m4_tu(maskedoff, src, vl); +vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_tu(vint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32m4_tu(vd, vs2, vl); } -vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_tu(vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32mf2_tu(maskedoff, src, vl); +vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_tu(vuint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32mf2_tu(vd, vs2, vl); } -vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_tu(vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32m1_tu(maskedoff, src, vl); +vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_tu(vuint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_tu(vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32m2_tu(maskedoff, src, vl); +vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_tu(vuint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_tu(vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32m4_tu(maskedoff, src, vl); +vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_tu(vuint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32m4_tu(vd, vs2, vl); } -vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8mf8_tum(mask, maskedoff, src, vl); +vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8mf8_tum(vm, vd, vs2, vl); } -vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8mf4_tum(mask, maskedoff, src, vl); +vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8mf4_tum(vm, vd, vs2, vl); } -vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8mf2_tum(mask, maskedoff, src, vl); +vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8mf2_tum(vm, vd, vs2, vl); } -vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8m1_tum(mask, maskedoff, src, vl); +vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_tum(vbool8_t vm, vint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8m1_tum(vm, vd, vs2, vl); } -vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8m2_tum(mask, maskedoff, src, vl); +vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_tum(vbool4_t vm, vint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8m2_tum(vm, vd, vs2, vl); } -vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8m4_tum(mask, maskedoff, src, vl); +vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_tum(vbool2_t vm, vint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8m4_tum(vm, vd, vs2, vl); } -vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8mf8_tum(mask, maskedoff, src, vl); +vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8mf8_tum(vm, vd, vs2, vl); } -vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8mf4_tum(mask, maskedoff, src, vl); +vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8mf4_tum(vm, vd, vs2, vl); } -vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8mf2_tum(mask, maskedoff, src, vl); +vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8mf2_tum(vm, vd, vs2, vl); } -vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8m1_tum(mask, maskedoff, src, vl); +vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8m1_tum(vm, vd, vs2, vl); } -vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8m2_tum(mask, maskedoff, src, vl); +vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8m2_tum(vm, vd, vs2, vl); } -vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8m4_tum(mask, maskedoff, src, vl); +vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8m4_tum(vm, vd, vs2, vl); } -vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16mf4_tum(mask, maskedoff, src, vl); +vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16mf4_tum(vm, vd, vs2, vl); } -vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16mf2_tum(mask, maskedoff, src, vl); +vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16mf2_tum(vm, vd, vs2, vl); } -vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16m1_tum(mask, maskedoff, src, vl); +vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_tum(vbool16_t vm, vint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16m1_tum(vm, vd, vs2, vl); } -vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16m2_tum(mask, maskedoff, src, vl); +vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_tum(vbool8_t vm, vint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16m2_tum(vm, vd, vs2, vl); } -vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16m4_tum(mask, maskedoff, src, vl); +vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_tum(vbool4_t vm, vint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16m4_tum(vm, vd, vs2, vl); } -vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16mf4_tum(mask, maskedoff, src, vl); +vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16mf4_tum(vm, vd, vs2, vl); } -vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16mf2_tum(mask, maskedoff, src, vl); +vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16mf2_tum(vm, vd, vs2, vl); } -vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16m1_tum(mask, maskedoff, src, vl); +vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16m1_tum(vm, vd, vs2, vl); } -vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16m2_tum(mask, maskedoff, src, vl); +vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16m2_tum(vm, vd, vs2, vl); } -vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16m4_tum(mask, maskedoff, src, vl); +vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16m4_tum(vm, vd, vs2, vl); } -vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32mf2_tum(mask, maskedoff, src, vl); +vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32mf2_tum(vm, vd, vs2, vl); } -vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32m1_tum(mask, maskedoff, src, vl); +vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_tum(vbool32_t vm, vint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32m1_tum(vm, vd, vs2, vl); } -vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32m2_tum(mask, maskedoff, src, vl); +vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_tum(vbool16_t vm, vint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32m2_tum(vm, vd, vs2, vl); } -vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32m4_tum(mask, maskedoff, src, vl); +vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_tum(vbool8_t vm, vint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32m4_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32mf2_tum(mask, maskedoff, src, vl); +vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32mf2_tum(vm, vd, vs2, vl); } -vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32m1_tum(mask, maskedoff, src, vl); +vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32m1_tum(vm, vd, vs2, vl); } -vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32m2_tum(mask, maskedoff, src, vl); +vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32m2_tum(vm, vd, vs2, vl); } -vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32m4_tum(mask, maskedoff, src, vl); +vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32m4_tum(vm, vd, vs2, vl); } -vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8mf8_tumu(mask, maskedoff, src, vl); +vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8mf8_tumu(vm, vd, vs2, vl); } -vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8mf4_tumu(mask, maskedoff, src, vl); +vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8mf4_tumu(vm, vd, vs2, vl); } -vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8mf2_tumu(mask, maskedoff, src, vl); +vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8mf2_tumu(vm, vd, vs2, vl); } -vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8m1_tumu(mask, maskedoff, src, vl); +vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8m1_tumu(vm, vd, vs2, vl); } -vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8m2_tumu(mask, maskedoff, src, vl); +vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8m2_tumu(vm, vd, vs2, vl); } -vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8m4_tumu(mask, maskedoff, src, vl); +vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8m4_tumu(vm, vd, vs2, vl); } -vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8mf8_tumu(mask, maskedoff, src, vl); +vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8mf8_tumu(vm, vd, vs2, vl); } -vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8mf4_tumu(mask, maskedoff, src, vl); +vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8mf4_tumu(vm, vd, vs2, vl); } -vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8mf2_tumu(mask, maskedoff, src, vl); +vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8mf2_tumu(vm, vd, vs2, vl); } -vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8m1_tumu(mask, maskedoff, src, vl); +vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8m1_tumu(vm, vd, vs2, vl); } -vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8m2_tumu(mask, maskedoff, src, vl); +vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8m2_tumu(vm, vd, vs2, vl); } -vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8m4_tumu(mask, maskedoff, src, vl); +vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8m4_tumu(vm, vd, vs2, vl); } -vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16mf4_tumu(mask, maskedoff, src, vl); +vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16mf4_tumu(vm, vd, vs2, vl); } -vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16mf2_tumu(mask, maskedoff, src, vl); +vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16mf2_tumu(vm, vd, vs2, vl); } -vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16m1_tumu(mask, maskedoff, src, vl); +vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16m1_tumu(vm, vd, vs2, vl); } -vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16m2_tumu(mask, maskedoff, src, vl); +vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16m2_tumu(vm, vd, vs2, vl); } -vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16m4_tumu(mask, maskedoff, src, vl); +vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16m4_tumu(vm, vd, vs2, vl); } -vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16mf4_tumu(mask, maskedoff, src, vl); +vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16mf4_tumu(vm, vd, vs2, vl); } -vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16mf2_tumu(mask, maskedoff, src, vl); +vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16mf2_tumu(vm, vd, vs2, vl); } -vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16m1_tumu(mask, maskedoff, src, vl); +vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16m1_tumu(vm, vd, vs2, vl); } -vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16m2_tumu(mask, maskedoff, src, vl); +vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16m2_tumu(vm, vd, vs2, vl); } -vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16m4_tumu(mask, maskedoff, src, vl); +vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16m4_tumu(vm, vd, vs2, vl); } -vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32mf2_tumu(mask, maskedoff, src, vl); +vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32mf2_tumu(vm, vd, vs2, vl); } -vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32m1_tumu(mask, maskedoff, src, vl); +vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32m1_tumu(vm, vd, vs2, vl); } -vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32m2_tumu(mask, maskedoff, src, vl); +vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32m2_tumu(vm, vd, vs2, vl); } -vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32m4_tumu(mask, maskedoff, src, vl); +vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32m4_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32mf2_tumu(mask, maskedoff, src, vl); +vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32mf2_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32m1_tumu(mask, maskedoff, src, vl); +vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32m1_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32m2_tumu(mask, maskedoff, src, vl); +vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32m2_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32m4_tumu(mask, maskedoff, src, vl); +vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32m4_tumu(vm, vd, vs2, vl); } -vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8mf8_mu(mask, maskedoff, src, vl); +vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8mf8_mu(vm, vd, vs2, vl); } -vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8mf4_mu(mask, maskedoff, src, vl); +vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8mf4_mu(vm, vd, vs2, vl); } -vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8mf2_mu(mask, maskedoff, src, vl); +vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8mf2_mu(vm, vd, vs2, vl); } -vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8m1_mu(mask, maskedoff, src, vl); +vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_mu(vbool8_t vm, vint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8m1_mu(vm, vd, vs2, vl); } -vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8m2_mu(mask, maskedoff, src, vl); +vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_mu(vbool4_t vm, vint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8m2_mu(vm, vd, vs2, vl); } -vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8m4_mu(mask, maskedoff, src, vl); +vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_mu(vbool2_t vm, vint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8m4_mu(vm, vd, vs2, vl); } -vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8mf8_mu(mask, maskedoff, src, vl); +vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8mf8_mu(vm, vd, vs2, vl); } -vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8mf4_mu(mask, maskedoff, src, vl); +vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8mf4_mu(vm, vd, vs2, vl); } -vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8mf2_mu(mask, maskedoff, src, vl); +vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8mf2_mu(vm, vd, vs2, vl); } -vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8m1_mu(mask, maskedoff, src, vl); +vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8m1_mu(vm, vd, vs2, vl); } -vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8m2_mu(mask, maskedoff, src, vl); +vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8m2_mu(vm, vd, vs2, vl); } -vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8m4_mu(mask, maskedoff, src, vl); +vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8m4_mu(vm, vd, vs2, vl); } -vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16mf4_mu(mask, maskedoff, src, vl); +vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16mf4_mu(vm, vd, vs2, vl); } -vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16mf2_mu(mask, maskedoff, src, vl); +vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16mf2_mu(vm, vd, vs2, vl); } -vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16m1_mu(mask, maskedoff, src, vl); +vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_mu(vbool16_t vm, vint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16m1_mu(vm, vd, vs2, vl); } -vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16m2_mu(mask, maskedoff, src, vl); +vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_mu(vbool8_t vm, vint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16m2_mu(vm, vd, vs2, vl); } -vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16m4_mu(mask, maskedoff, src, vl); +vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_mu(vbool4_t vm, vint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16m4_mu(vm, vd, vs2, vl); } -vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16mf4_mu(mask, maskedoff, src, vl); +vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16mf4_mu(vm, vd, vs2, vl); } -vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16mf2_mu(mask, maskedoff, src, vl); +vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16mf2_mu(vm, vd, vs2, vl); } -vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16m1_mu(mask, maskedoff, src, vl); +vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16m1_mu(vm, vd, vs2, vl); } -vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16m2_mu(mask, maskedoff, src, vl); +vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16m2_mu(vm, vd, vs2, vl); } -vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16m4_mu(mask, maskedoff, src, vl); +vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16m4_mu(vm, vd, vs2, vl); } -vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32mf2_mu(mask, maskedoff, src, vl); +vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32mf2_mu(vm, vd, vs2, vl); } -vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32m1_mu(mask, maskedoff, src, vl); +vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_mu(vbool32_t vm, vint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32m1_mu(vm, vd, vs2, vl); } -vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32m2_mu(mask, maskedoff, src, vl); +vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_mu(vbool16_t vm, vint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32m2_mu(vm, vd, vs2, vl); } -vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32m4_mu(mask, maskedoff, src, vl); +vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_mu(vbool8_t vm, vint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32m4_mu(vm, vd, vs2, vl); } -vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32mf2_mu(mask, maskedoff, src, vl); +vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32mf2_mu(vm, vd, vs2, vl); } -vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32m1_mu(mask, maskedoff, src, vl); +vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32m1_mu(vm, vd, vs2, vl); } -vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32m2_mu(mask, maskedoff, src, vl); +vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32m2_mu(vm, vd, vs2, vl); } -vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32m4_mu(mask, maskedoff, src, vl); +vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32m4_mu(vm, vd, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfncvt\.rtz[ivxfswum.]*\s+} 120 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfneg.c b/auto-generated/policy_funcs/gnu-api-tests/vfneg.c index 05b7cf338..78296a3ec 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfneg.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfneg.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfneg_v_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfneg_v_f16mf4_tu(maskedoff, op1, vl); +vfloat16mf4_t test_vfneg_v_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs, size_t vl) { + return __riscv_vfneg_v_f16mf4_tu(vd, vs, vl); } -vfloat16mf2_t test_vfneg_v_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfneg_v_f16mf2_tu(maskedoff, op1, vl); +vfloat16mf2_t test_vfneg_v_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs, size_t vl) { + return __riscv_vfneg_v_f16mf2_tu(vd, vs, vl); } -vfloat16m1_t test_vfneg_v_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfneg_v_f16m1_tu(maskedoff, op1, vl); +vfloat16m1_t test_vfneg_v_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs, size_t vl) { + return __riscv_vfneg_v_f16m1_tu(vd, vs, vl); } -vfloat16m2_t test_vfneg_v_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfneg_v_f16m2_tu(maskedoff, op1, vl); +vfloat16m2_t test_vfneg_v_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs, size_t vl) { + return __riscv_vfneg_v_f16m2_tu(vd, vs, vl); } -vfloat16m4_t test_vfneg_v_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfneg_v_f16m4_tu(maskedoff, op1, vl); +vfloat16m4_t test_vfneg_v_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs, size_t vl) { + return __riscv_vfneg_v_f16m4_tu(vd, vs, vl); } -vfloat16m8_t test_vfneg_v_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfneg_v_f16m8_tu(maskedoff, op1, vl); +vfloat16m8_t test_vfneg_v_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs, size_t vl) { + return __riscv_vfneg_v_f16m8_tu(vd, vs, vl); } -vfloat32mf2_t test_vfneg_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfneg_v_f32mf2_tu(maskedoff, op1, vl); +vfloat32mf2_t test_vfneg_v_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs, size_t vl) { + return __riscv_vfneg_v_f32mf2_tu(vd, vs, vl); } -vfloat32m1_t test_vfneg_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfneg_v_f32m1_tu(maskedoff, op1, vl); +vfloat32m1_t test_vfneg_v_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs, size_t vl) { + return __riscv_vfneg_v_f32m1_tu(vd, vs, vl); } -vfloat32m2_t test_vfneg_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfneg_v_f32m2_tu(maskedoff, op1, vl); +vfloat32m2_t test_vfneg_v_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs, size_t vl) { + return __riscv_vfneg_v_f32m2_tu(vd, vs, vl); } -vfloat32m4_t test_vfneg_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfneg_v_f32m4_tu(maskedoff, op1, vl); +vfloat32m4_t test_vfneg_v_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs, size_t vl) { + return __riscv_vfneg_v_f32m4_tu(vd, vs, vl); } -vfloat32m8_t test_vfneg_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfneg_v_f32m8_tu(maskedoff, op1, vl); +vfloat32m8_t test_vfneg_v_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs, size_t vl) { + return __riscv_vfneg_v_f32m8_tu(vd, vs, vl); } -vfloat64m1_t test_vfneg_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfneg_v_f64m1_tu(maskedoff, op1, vl); +vfloat64m1_t test_vfneg_v_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs, size_t vl) { + return __riscv_vfneg_v_f64m1_tu(vd, vs, vl); } -vfloat64m2_t test_vfneg_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfneg_v_f64m2_tu(maskedoff, op1, vl); +vfloat64m2_t test_vfneg_v_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs, size_t vl) { + return __riscv_vfneg_v_f64m2_tu(vd, vs, vl); } -vfloat64m4_t test_vfneg_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfneg_v_f64m4_tu(maskedoff, op1, vl); +vfloat64m4_t test_vfneg_v_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs, size_t vl) { + return __riscv_vfneg_v_f64m4_tu(vd, vs, vl); } -vfloat64m8_t test_vfneg_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfneg_v_f64m8_tu(maskedoff, op1, vl); +vfloat64m8_t test_vfneg_v_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs, size_t vl) { + return __riscv_vfneg_v_f64m8_tu(vd, vs, vl); } -vfloat16mf4_t test_vfneg_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfneg_v_f16mf4_tum(mask, maskedoff, op1, vl); +vfloat16mf4_t test_vfneg_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs, size_t vl) { + return __riscv_vfneg_v_f16mf4_tum(vm, vd, vs, vl); } -vfloat16mf2_t test_vfneg_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfneg_v_f16mf2_tum(mask, maskedoff, op1, vl); +vfloat16mf2_t test_vfneg_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs, size_t vl) { + return __riscv_vfneg_v_f16mf2_tum(vm, vd, vs, vl); } -vfloat16m1_t test_vfneg_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfneg_v_f16m1_tum(mask, maskedoff, op1, vl); +vfloat16m1_t test_vfneg_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs, size_t vl) { + return __riscv_vfneg_v_f16m1_tum(vm, vd, vs, vl); } -vfloat16m2_t test_vfneg_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfneg_v_f16m2_tum(mask, maskedoff, op1, vl); +vfloat16m2_t test_vfneg_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs, size_t vl) { + return __riscv_vfneg_v_f16m2_tum(vm, vd, vs, vl); } -vfloat16m4_t test_vfneg_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfneg_v_f16m4_tum(mask, maskedoff, op1, vl); +vfloat16m4_t test_vfneg_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs, size_t vl) { + return __riscv_vfneg_v_f16m4_tum(vm, vd, vs, vl); } -vfloat16m8_t test_vfneg_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfneg_v_f16m8_tum(mask, maskedoff, op1, vl); +vfloat16m8_t test_vfneg_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs, size_t vl) { + return __riscv_vfneg_v_f16m8_tum(vm, vd, vs, vl); } -vfloat32mf2_t test_vfneg_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfneg_v_f32mf2_tum(mask, maskedoff, op1, vl); +vfloat32mf2_t test_vfneg_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs, size_t vl) { + return __riscv_vfneg_v_f32mf2_tum(vm, vd, vs, vl); } -vfloat32m1_t test_vfneg_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfneg_v_f32m1_tum(mask, maskedoff, op1, vl); +vfloat32m1_t test_vfneg_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs, size_t vl) { + return __riscv_vfneg_v_f32m1_tum(vm, vd, vs, vl); } -vfloat32m2_t test_vfneg_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfneg_v_f32m2_tum(mask, maskedoff, op1, vl); +vfloat32m2_t test_vfneg_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs, size_t vl) { + return __riscv_vfneg_v_f32m2_tum(vm, vd, vs, vl); } -vfloat32m4_t test_vfneg_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfneg_v_f32m4_tum(mask, maskedoff, op1, vl); +vfloat32m4_t test_vfneg_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs, size_t vl) { + return __riscv_vfneg_v_f32m4_tum(vm, vd, vs, vl); } -vfloat32m8_t test_vfneg_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfneg_v_f32m8_tum(mask, maskedoff, op1, vl); +vfloat32m8_t test_vfneg_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs, size_t vl) { + return __riscv_vfneg_v_f32m8_tum(vm, vd, vs, vl); } -vfloat64m1_t test_vfneg_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfneg_v_f64m1_tum(mask, maskedoff, op1, vl); +vfloat64m1_t test_vfneg_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs, size_t vl) { + return __riscv_vfneg_v_f64m1_tum(vm, vd, vs, vl); } -vfloat64m2_t test_vfneg_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfneg_v_f64m2_tum(mask, maskedoff, op1, vl); +vfloat64m2_t test_vfneg_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs, size_t vl) { + return __riscv_vfneg_v_f64m2_tum(vm, vd, vs, vl); } -vfloat64m4_t test_vfneg_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfneg_v_f64m4_tum(mask, maskedoff, op1, vl); +vfloat64m4_t test_vfneg_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs, size_t vl) { + return __riscv_vfneg_v_f64m4_tum(vm, vd, vs, vl); } -vfloat64m8_t test_vfneg_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfneg_v_f64m8_tum(mask, maskedoff, op1, vl); +vfloat64m8_t test_vfneg_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs, size_t vl) { + return __riscv_vfneg_v_f64m8_tum(vm, vd, vs, vl); } -vfloat16mf4_t test_vfneg_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfneg_v_f16mf4_tumu(mask, maskedoff, op1, vl); +vfloat16mf4_t test_vfneg_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs, size_t vl) { + return __riscv_vfneg_v_f16mf4_tumu(vm, vd, vs, vl); } -vfloat16mf2_t test_vfneg_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfneg_v_f16mf2_tumu(mask, maskedoff, op1, vl); +vfloat16mf2_t test_vfneg_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs, size_t vl) { + return __riscv_vfneg_v_f16mf2_tumu(vm, vd, vs, vl); } -vfloat16m1_t test_vfneg_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfneg_v_f16m1_tumu(mask, maskedoff, op1, vl); +vfloat16m1_t test_vfneg_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs, size_t vl) { + return __riscv_vfneg_v_f16m1_tumu(vm, vd, vs, vl); } -vfloat16m2_t test_vfneg_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfneg_v_f16m2_tumu(mask, maskedoff, op1, vl); +vfloat16m2_t test_vfneg_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs, size_t vl) { + return __riscv_vfneg_v_f16m2_tumu(vm, vd, vs, vl); } -vfloat16m4_t test_vfneg_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfneg_v_f16m4_tumu(mask, maskedoff, op1, vl); +vfloat16m4_t test_vfneg_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs, size_t vl) { + return __riscv_vfneg_v_f16m4_tumu(vm, vd, vs, vl); } -vfloat16m8_t test_vfneg_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfneg_v_f16m8_tumu(mask, maskedoff, op1, vl); +vfloat16m8_t test_vfneg_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs, size_t vl) { + return __riscv_vfneg_v_f16m8_tumu(vm, vd, vs, vl); } -vfloat32mf2_t test_vfneg_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfneg_v_f32mf2_tumu(mask, maskedoff, op1, vl); +vfloat32mf2_t test_vfneg_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs, size_t vl) { + return __riscv_vfneg_v_f32mf2_tumu(vm, vd, vs, vl); } -vfloat32m1_t test_vfneg_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfneg_v_f32m1_tumu(mask, maskedoff, op1, vl); +vfloat32m1_t test_vfneg_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs, size_t vl) { + return __riscv_vfneg_v_f32m1_tumu(vm, vd, vs, vl); } -vfloat32m2_t test_vfneg_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfneg_v_f32m2_tumu(mask, maskedoff, op1, vl); +vfloat32m2_t test_vfneg_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs, size_t vl) { + return __riscv_vfneg_v_f32m2_tumu(vm, vd, vs, vl); } -vfloat32m4_t test_vfneg_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfneg_v_f32m4_tumu(mask, maskedoff, op1, vl); +vfloat32m4_t test_vfneg_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs, size_t vl) { + return __riscv_vfneg_v_f32m4_tumu(vm, vd, vs, vl); } -vfloat32m8_t test_vfneg_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfneg_v_f32m8_tumu(mask, maskedoff, op1, vl); +vfloat32m8_t test_vfneg_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs, size_t vl) { + return __riscv_vfneg_v_f32m8_tumu(vm, vd, vs, vl); } -vfloat64m1_t test_vfneg_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfneg_v_f64m1_tumu(mask, maskedoff, op1, vl); +vfloat64m1_t test_vfneg_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs, size_t vl) { + return __riscv_vfneg_v_f64m1_tumu(vm, vd, vs, vl); } -vfloat64m2_t test_vfneg_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfneg_v_f64m2_tumu(mask, maskedoff, op1, vl); +vfloat64m2_t test_vfneg_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs, size_t vl) { + return __riscv_vfneg_v_f64m2_tumu(vm, vd, vs, vl); } -vfloat64m4_t test_vfneg_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfneg_v_f64m4_tumu(mask, maskedoff, op1, vl); +vfloat64m4_t test_vfneg_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs, size_t vl) { + return __riscv_vfneg_v_f64m4_tumu(vm, vd, vs, vl); } -vfloat64m8_t test_vfneg_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfneg_v_f64m8_tumu(mask, maskedoff, op1, vl); +vfloat64m8_t test_vfneg_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs, size_t vl) { + return __riscv_vfneg_v_f64m8_tumu(vm, vd, vs, vl); } -vfloat16mf4_t test_vfneg_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfneg_v_f16mf4_mu(mask, maskedoff, op1, vl); +vfloat16mf4_t test_vfneg_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs, size_t vl) { + return __riscv_vfneg_v_f16mf4_mu(vm, vd, vs, vl); } -vfloat16mf2_t test_vfneg_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfneg_v_f16mf2_mu(mask, maskedoff, op1, vl); +vfloat16mf2_t test_vfneg_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs, size_t vl) { + return __riscv_vfneg_v_f16mf2_mu(vm, vd, vs, vl); } -vfloat16m1_t test_vfneg_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfneg_v_f16m1_mu(mask, maskedoff, op1, vl); +vfloat16m1_t test_vfneg_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs, size_t vl) { + return __riscv_vfneg_v_f16m1_mu(vm, vd, vs, vl); } -vfloat16m2_t test_vfneg_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfneg_v_f16m2_mu(mask, maskedoff, op1, vl); +vfloat16m2_t test_vfneg_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs, size_t vl) { + return __riscv_vfneg_v_f16m2_mu(vm, vd, vs, vl); } -vfloat16m4_t test_vfneg_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfneg_v_f16m4_mu(mask, maskedoff, op1, vl); +vfloat16m4_t test_vfneg_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs, size_t vl) { + return __riscv_vfneg_v_f16m4_mu(vm, vd, vs, vl); } -vfloat16m8_t test_vfneg_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfneg_v_f16m8_mu(mask, maskedoff, op1, vl); +vfloat16m8_t test_vfneg_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs, size_t vl) { + return __riscv_vfneg_v_f16m8_mu(vm, vd, vs, vl); } -vfloat32mf2_t test_vfneg_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfneg_v_f32mf2_mu(mask, maskedoff, op1, vl); +vfloat32mf2_t test_vfneg_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs, size_t vl) { + return __riscv_vfneg_v_f32mf2_mu(vm, vd, vs, vl); } -vfloat32m1_t test_vfneg_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfneg_v_f32m1_mu(mask, maskedoff, op1, vl); +vfloat32m1_t test_vfneg_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs, size_t vl) { + return __riscv_vfneg_v_f32m1_mu(vm, vd, vs, vl); } -vfloat32m2_t test_vfneg_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfneg_v_f32m2_mu(mask, maskedoff, op1, vl); +vfloat32m2_t test_vfneg_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs, size_t vl) { + return __riscv_vfneg_v_f32m2_mu(vm, vd, vs, vl); } -vfloat32m4_t test_vfneg_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfneg_v_f32m4_mu(mask, maskedoff, op1, vl); +vfloat32m4_t test_vfneg_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs, size_t vl) { + return __riscv_vfneg_v_f32m4_mu(vm, vd, vs, vl); } -vfloat32m8_t test_vfneg_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfneg_v_f32m8_mu(mask, maskedoff, op1, vl); +vfloat32m8_t test_vfneg_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs, size_t vl) { + return __riscv_vfneg_v_f32m8_mu(vm, vd, vs, vl); } -vfloat64m1_t test_vfneg_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfneg_v_f64m1_mu(mask, maskedoff, op1, vl); +vfloat64m1_t test_vfneg_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs, size_t vl) { + return __riscv_vfneg_v_f64m1_mu(vm, vd, vs, vl); } -vfloat64m2_t test_vfneg_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfneg_v_f64m2_mu(mask, maskedoff, op1, vl); +vfloat64m2_t test_vfneg_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs, size_t vl) { + return __riscv_vfneg_v_f64m2_mu(vm, vd, vs, vl); } -vfloat64m4_t test_vfneg_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfneg_v_f64m4_mu(mask, maskedoff, op1, vl); +vfloat64m4_t test_vfneg_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs, size_t vl) { + return __riscv_vfneg_v_f64m4_mu(vm, vd, vs, vl); } -vfloat64m8_t test_vfneg_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfneg_v_f64m8_mu(mask, maskedoff, op1, vl); +vfloat64m8_t test_vfneg_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs, size_t vl) { + return __riscv_vfneg_v_f64m8_mu(vm, vd, vs, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfneg\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfnmacc.c b/auto-generated/policy_funcs/gnu-api-tests/vfnmacc.c index e7133b7cc..b01a2d7a6 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfnmacc.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfnmacc.c @@ -126,364 +126,364 @@ vfloat64m8_t test_vfnmacc_vf_f64m8_tu(vfloat64m8_t vd, float64_t rs1, vfloat64m8 return __riscv_vfnmacc_vf_f64m8_tu(vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmacc_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16mf4_tum(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfnmacc_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16mf4_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmacc_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16mf4_tum(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfnmacc_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16mf4_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmacc_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16mf2_tum(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfnmacc_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmacc_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16mf2_tum(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfnmacc_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16mf2_tum(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmacc_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16m1_tum(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfnmacc_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16m1_tum(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmacc_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16m1_tum(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfnmacc_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16m1_tum(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmacc_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16m2_tum(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfnmacc_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16m2_tum(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmacc_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16m2_tum(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfnmacc_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16m2_tum(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmacc_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16m4_tum(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfnmacc_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16m4_tum(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmacc_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16m4_tum(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfnmacc_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16m4_tum(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmacc_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16m8_tum(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfnmacc_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16m8_tum(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmacc_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16m8_tum(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfnmacc_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16m8_tum(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmacc_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f32mf2_tum(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfnmacc_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f32mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmacc_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f32mf2_tum(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfnmacc_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f32mf2_tum(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmacc_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f32m1_tum(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfnmacc_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f32m1_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmacc_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f32m1_tum(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfnmacc_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f32m1_tum(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmacc_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f32m2_tum(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfnmacc_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f32m2_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmacc_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f32m2_tum(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfnmacc_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f32m2_tum(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmacc_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f32m4_tum(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfnmacc_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f32m4_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmacc_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f32m4_tum(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfnmacc_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f32m4_tum(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmacc_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f32m8_tum(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfnmacc_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f32m8_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmacc_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f32m8_tum(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfnmacc_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f32m8_tum(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmacc_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f64m1_tum(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfnmacc_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f64m1_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmacc_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f64m1_tum(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfnmacc_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f64m1_tum(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmacc_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f64m2_tum(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfnmacc_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f64m2_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmacc_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f64m2_tum(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfnmacc_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f64m2_tum(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmacc_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f64m4_tum(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfnmacc_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f64m4_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmacc_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f64m4_tum(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfnmacc_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f64m4_tum(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmacc_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f64m8_tum(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfnmacc_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f64m8_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmacc_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f64m8_tum(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfnmacc_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f64m8_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmacc_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16mf4_tumu(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfnmacc_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16mf4_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmacc_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16mf4_tumu(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfnmacc_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16mf4_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmacc_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16mf2_tumu(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfnmacc_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmacc_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16mf2_tumu(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfnmacc_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16mf2_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmacc_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16m1_tumu(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfnmacc_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmacc_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16m1_tumu(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfnmacc_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmacc_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16m2_tumu(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfnmacc_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmacc_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16m2_tumu(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfnmacc_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmacc_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16m4_tumu(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfnmacc_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmacc_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16m4_tumu(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfnmacc_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmacc_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16m8_tumu(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfnmacc_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmacc_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16m8_tumu(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfnmacc_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmacc_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f32mf2_tumu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfnmacc_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f32mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmacc_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f32mf2_tumu(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfnmacc_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f32mf2_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmacc_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f32m1_tumu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfnmacc_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f32m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmacc_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f32m1_tumu(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfnmacc_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f32m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmacc_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f32m2_tumu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfnmacc_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f32m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmacc_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f32m2_tumu(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfnmacc_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f32m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmacc_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f32m4_tumu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfnmacc_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f32m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmacc_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f32m4_tumu(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfnmacc_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f32m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmacc_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f32m8_tumu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfnmacc_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f32m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmacc_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f32m8_tumu(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfnmacc_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f32m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmacc_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f64m1_tumu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfnmacc_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f64m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmacc_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f64m1_tumu(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfnmacc_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f64m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmacc_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f64m2_tumu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfnmacc_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f64m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmacc_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f64m2_tumu(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfnmacc_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f64m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmacc_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f64m4_tumu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfnmacc_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f64m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmacc_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f64m4_tumu(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfnmacc_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f64m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmacc_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f64m8_tumu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfnmacc_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f64m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmacc_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f64m8_tumu(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfnmacc_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f64m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmacc_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16mf4_mu(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfnmacc_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16mf4_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmacc_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16mf4_mu(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfnmacc_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16mf4_mu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmacc_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16mf2_mu(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfnmacc_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmacc_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16mf2_mu(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfnmacc_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16mf2_mu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmacc_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16m1_mu(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfnmacc_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16m1_mu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmacc_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16m1_mu(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfnmacc_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16m1_mu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmacc_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16m2_mu(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfnmacc_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16m2_mu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmacc_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16m2_mu(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfnmacc_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16m2_mu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmacc_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16m4_mu(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfnmacc_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16m4_mu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmacc_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16m4_mu(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfnmacc_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16m4_mu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmacc_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16m8_mu(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfnmacc_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16m8_mu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmacc_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16m8_mu(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfnmacc_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16m8_mu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmacc_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f32mf2_mu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfnmacc_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f32mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmacc_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f32mf2_mu(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfnmacc_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f32mf2_mu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmacc_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f32m1_mu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfnmacc_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f32m1_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmacc_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f32m1_mu(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfnmacc_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f32m1_mu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmacc_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f32m2_mu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfnmacc_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f32m2_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmacc_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f32m2_mu(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfnmacc_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f32m2_mu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmacc_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f32m4_mu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfnmacc_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f32m4_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmacc_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f32m4_mu(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfnmacc_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f32m4_mu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmacc_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f32m8_mu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfnmacc_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f32m8_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmacc_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f32m8_mu(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfnmacc_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f32m8_mu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmacc_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f64m1_mu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfnmacc_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f64m1_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmacc_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f64m1_mu(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfnmacc_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f64m1_mu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmacc_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f64m2_mu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfnmacc_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f64m2_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmacc_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f64m2_mu(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfnmacc_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f64m2_mu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmacc_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f64m4_mu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfnmacc_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f64m4_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmacc_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f64m4_mu(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfnmacc_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f64m4_mu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmacc_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f64m8_mu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfnmacc_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f64m8_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmacc_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f64m8_mu(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfnmacc_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f64m8_mu(vm, vd, rs1, vs2, vl); } vfloat16mf4_t test_vfnmacc_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -606,364 +606,364 @@ vfloat64m8_t test_vfnmacc_vf_f64m8_rm_tu(vfloat64m8_t vd, float64_t rs1, vfloat6 return __riscv_vfnmacc_vf_f64m8_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmacc_vv_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16mf4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmacc_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16mf4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmacc_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16mf4_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmacc_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16mf4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmacc_vv_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16mf2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmacc_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmacc_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16mf2_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmacc_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16mf2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmacc_vv_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16m1_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmacc_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmacc_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16m1_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmacc_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmacc_vv_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16m2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmacc_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmacc_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16m2_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmacc_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmacc_vv_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16m4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmacc_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmacc_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16m4_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmacc_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmacc_vv_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmacc_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmacc_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16m8_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmacc_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmacc_vv_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f32mf2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmacc_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmacc_vf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f32mf2_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmacc_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f32mf2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmacc_vv_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f32m1_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmacc_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f32m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmacc_vf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f32m1_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmacc_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f32m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmacc_vv_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f32m2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmacc_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f32m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmacc_vf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f32m2_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmacc_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f32m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmacc_vv_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f32m4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmacc_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f32m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmacc_vf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f32m4_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmacc_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f32m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmacc_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f32m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmacc_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f32m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmacc_vf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f32m8_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmacc_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f32m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmacc_vv_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f64m1_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmacc_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f64m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmacc_vf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f64m1_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmacc_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f64m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmacc_vv_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f64m2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmacc_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f64m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmacc_vf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f64m2_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmacc_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f64m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmacc_vv_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f64m4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmacc_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f64m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmacc_vf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f64m4_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmacc_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f64m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmacc_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f64m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmacc_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f64m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmacc_vf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f64m8_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmacc_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f64m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmacc_vv_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16mf4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmacc_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16mf4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmacc_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16mf4_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmacc_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16mf4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmacc_vv_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16mf2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmacc_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmacc_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16mf2_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmacc_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmacc_vv_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16m1_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmacc_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmacc_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16m1_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmacc_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmacc_vv_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16m2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmacc_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmacc_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16m2_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmacc_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmacc_vv_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16m4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmacc_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmacc_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16m4_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmacc_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmacc_vv_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmacc_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmacc_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16m8_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmacc_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmacc_vv_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f32mf2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmacc_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmacc_vf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f32mf2_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmacc_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f32mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmacc_vv_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f32m1_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmacc_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmacc_vf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f32m1_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmacc_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f32m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmacc_vv_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f32m2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmacc_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmacc_vf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f32m2_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmacc_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f32m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmacc_vv_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f32m4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmacc_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmacc_vf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f32m4_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmacc_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f32m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmacc_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f32m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmacc_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmacc_vf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f32m8_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmacc_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f32m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmacc_vv_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f64m1_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmacc_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f64m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmacc_vf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f64m1_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmacc_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f64m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmacc_vv_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f64m2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmacc_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f64m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmacc_vf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f64m2_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmacc_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f64m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmacc_vv_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f64m4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmacc_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f64m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmacc_vf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f64m4_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmacc_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f64m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmacc_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f64m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmacc_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f64m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmacc_vf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f64m8_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmacc_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f64m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmacc_vv_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16mf4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmacc_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16mf4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmacc_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16mf4_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmacc_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16mf4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmacc_vv_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16mf2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmacc_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmacc_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16mf2_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmacc_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16mf2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmacc_vv_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16m1_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmacc_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmacc_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16m1_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmacc_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmacc_vv_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16m2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmacc_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmacc_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16m2_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmacc_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmacc_vv_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16m4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmacc_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmacc_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16m4_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmacc_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmacc_vv_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmacc_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmacc_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16m8_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmacc_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f16m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmacc_vv_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f32mf2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmacc_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f32mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmacc_vf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f32mf2_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmacc_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f32mf2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmacc_vv_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f32m1_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmacc_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f32m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmacc_vf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f32m1_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmacc_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f32m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmacc_vv_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f32m2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmacc_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f32m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmacc_vf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f32m2_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmacc_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f32m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmacc_vv_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f32m4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmacc_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f32m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmacc_vf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f32m4_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmacc_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f32m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmacc_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f32m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmacc_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f32m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmacc_vf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f32m8_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmacc_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f32m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmacc_vv_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f64m1_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmacc_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f64m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmacc_vf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f64m1_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmacc_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f64m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmacc_vv_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f64m2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmacc_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f64m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmacc_vf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f64m2_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmacc_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f64m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmacc_vv_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f64m4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmacc_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f64m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmacc_vf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f64m4_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmacc_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f64m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmacc_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f64m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmacc_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f64m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmacc_vf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f64m8_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmacc_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_f64m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfnmacc\.[ivxfswum.]+\s+} 240 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfnmadd.c b/auto-generated/policy_funcs/gnu-api-tests/vfnmadd.c index ebff4d2e6..b4a315838 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfnmadd.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfnmadd.c @@ -126,364 +126,364 @@ vfloat64m8_t test_vfnmadd_vf_f64m8_tu(vfloat64m8_t vd, float64_t rs1, vfloat64m8 return __riscv_vfnmadd_vf_f64m8_tu(vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmadd_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16mf4_tum(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfnmadd_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16mf4_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmadd_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16mf4_tum(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfnmadd_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16mf4_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmadd_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16mf2_tum(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfnmadd_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmadd_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16mf2_tum(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfnmadd_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16mf2_tum(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmadd_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16m1_tum(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfnmadd_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16m1_tum(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmadd_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16m1_tum(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfnmadd_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16m1_tum(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmadd_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16m2_tum(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfnmadd_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16m2_tum(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmadd_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16m2_tum(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfnmadd_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16m2_tum(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmadd_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16m4_tum(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfnmadd_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16m4_tum(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmadd_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16m4_tum(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfnmadd_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16m4_tum(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmadd_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16m8_tum(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfnmadd_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16m8_tum(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmadd_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16m8_tum(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfnmadd_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16m8_tum(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmadd_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f32mf2_tum(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfnmadd_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f32mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmadd_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f32mf2_tum(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfnmadd_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f32mf2_tum(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmadd_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f32m1_tum(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfnmadd_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f32m1_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmadd_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f32m1_tum(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfnmadd_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f32m1_tum(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmadd_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f32m2_tum(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfnmadd_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f32m2_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmadd_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f32m2_tum(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfnmadd_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f32m2_tum(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmadd_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f32m4_tum(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfnmadd_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f32m4_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmadd_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f32m4_tum(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfnmadd_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f32m4_tum(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmadd_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f32m8_tum(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfnmadd_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f32m8_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmadd_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f32m8_tum(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfnmadd_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f32m8_tum(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmadd_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f64m1_tum(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfnmadd_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f64m1_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmadd_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f64m1_tum(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfnmadd_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f64m1_tum(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmadd_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f64m2_tum(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfnmadd_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f64m2_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmadd_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f64m2_tum(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfnmadd_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f64m2_tum(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmadd_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f64m4_tum(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfnmadd_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f64m4_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmadd_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f64m4_tum(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfnmadd_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f64m4_tum(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmadd_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f64m8_tum(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfnmadd_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f64m8_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmadd_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f64m8_tum(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfnmadd_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f64m8_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmadd_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16mf4_tumu(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfnmadd_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16mf4_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmadd_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16mf4_tumu(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfnmadd_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16mf4_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmadd_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16mf2_tumu(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfnmadd_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmadd_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16mf2_tumu(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfnmadd_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16mf2_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmadd_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16m1_tumu(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfnmadd_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmadd_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16m1_tumu(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfnmadd_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmadd_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16m2_tumu(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfnmadd_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmadd_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16m2_tumu(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfnmadd_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmadd_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16m4_tumu(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfnmadd_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmadd_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16m4_tumu(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfnmadd_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmadd_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16m8_tumu(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfnmadd_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmadd_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16m8_tumu(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfnmadd_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmadd_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f32mf2_tumu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfnmadd_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f32mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmadd_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f32mf2_tumu(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfnmadd_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f32mf2_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmadd_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f32m1_tumu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfnmadd_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f32m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmadd_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f32m1_tumu(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfnmadd_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f32m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmadd_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f32m2_tumu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfnmadd_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f32m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmadd_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f32m2_tumu(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfnmadd_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f32m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmadd_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f32m4_tumu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfnmadd_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f32m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmadd_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f32m4_tumu(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfnmadd_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f32m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmadd_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f32m8_tumu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfnmadd_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f32m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmadd_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f32m8_tumu(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfnmadd_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f32m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmadd_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f64m1_tumu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfnmadd_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f64m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmadd_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f64m1_tumu(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfnmadd_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f64m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmadd_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f64m2_tumu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfnmadd_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f64m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmadd_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f64m2_tumu(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfnmadd_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f64m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmadd_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f64m4_tumu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfnmadd_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f64m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmadd_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f64m4_tumu(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfnmadd_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f64m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmadd_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f64m8_tumu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfnmadd_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f64m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmadd_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f64m8_tumu(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfnmadd_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f64m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmadd_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16mf4_mu(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfnmadd_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16mf4_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmadd_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16mf4_mu(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfnmadd_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16mf4_mu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmadd_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16mf2_mu(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfnmadd_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmadd_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16mf2_mu(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfnmadd_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16mf2_mu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmadd_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16m1_mu(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfnmadd_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16m1_mu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmadd_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16m1_mu(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfnmadd_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16m1_mu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmadd_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16m2_mu(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfnmadd_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16m2_mu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmadd_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16m2_mu(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfnmadd_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16m2_mu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmadd_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16m4_mu(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfnmadd_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16m4_mu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmadd_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16m4_mu(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfnmadd_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16m4_mu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmadd_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16m8_mu(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfnmadd_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16m8_mu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmadd_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16m8_mu(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfnmadd_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16m8_mu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmadd_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f32mf2_mu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfnmadd_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f32mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmadd_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f32mf2_mu(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfnmadd_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f32mf2_mu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmadd_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f32m1_mu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfnmadd_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f32m1_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmadd_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f32m1_mu(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfnmadd_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f32m1_mu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmadd_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f32m2_mu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfnmadd_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f32m2_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmadd_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f32m2_mu(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfnmadd_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f32m2_mu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmadd_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f32m4_mu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfnmadd_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f32m4_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmadd_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f32m4_mu(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfnmadd_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f32m4_mu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmadd_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f32m8_mu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfnmadd_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f32m8_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmadd_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f32m8_mu(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfnmadd_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f32m8_mu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmadd_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f64m1_mu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfnmadd_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f64m1_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmadd_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f64m1_mu(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfnmadd_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f64m1_mu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmadd_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f64m2_mu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfnmadd_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f64m2_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmadd_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f64m2_mu(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfnmadd_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f64m2_mu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmadd_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f64m4_mu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfnmadd_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f64m4_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmadd_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f64m4_mu(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfnmadd_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f64m4_mu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmadd_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f64m8_mu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfnmadd_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f64m8_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmadd_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f64m8_mu(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfnmadd_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f64m8_mu(vm, vd, rs1, vs2, vl); } vfloat16mf4_t test_vfnmadd_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -606,364 +606,364 @@ vfloat64m8_t test_vfnmadd_vf_f64m8_rm_tu(vfloat64m8_t vd, float64_t rs1, vfloat6 return __riscv_vfnmadd_vf_f64m8_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmadd_vv_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16mf4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmadd_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16mf4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmadd_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16mf4_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmadd_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16mf4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmadd_vv_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16mf2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmadd_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmadd_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16mf2_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmadd_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16mf2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmadd_vv_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16m1_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmadd_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmadd_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16m1_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmadd_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmadd_vv_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16m2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmadd_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmadd_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16m2_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmadd_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmadd_vv_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16m4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmadd_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmadd_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16m4_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmadd_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmadd_vv_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmadd_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmadd_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16m8_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmadd_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmadd_vv_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f32mf2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmadd_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmadd_vf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f32mf2_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmadd_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f32mf2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmadd_vv_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f32m1_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmadd_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f32m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmadd_vf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f32m1_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmadd_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f32m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmadd_vv_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f32m2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmadd_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f32m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmadd_vf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f32m2_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmadd_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f32m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmadd_vv_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f32m4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmadd_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f32m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmadd_vf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f32m4_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmadd_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f32m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmadd_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f32m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmadd_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f32m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmadd_vf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f32m8_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmadd_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f32m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmadd_vv_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f64m1_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmadd_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f64m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmadd_vf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f64m1_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmadd_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f64m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmadd_vv_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f64m2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmadd_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f64m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmadd_vf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f64m2_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmadd_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f64m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmadd_vv_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f64m4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmadd_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f64m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmadd_vf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f64m4_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmadd_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f64m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmadd_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f64m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmadd_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f64m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmadd_vf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f64m8_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmadd_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f64m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmadd_vv_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16mf4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmadd_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16mf4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmadd_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16mf4_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmadd_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16mf4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmadd_vv_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16mf2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmadd_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmadd_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16mf2_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmadd_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmadd_vv_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16m1_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmadd_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmadd_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16m1_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmadd_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmadd_vv_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16m2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmadd_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmadd_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16m2_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmadd_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmadd_vv_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16m4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmadd_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmadd_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16m4_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmadd_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmadd_vv_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmadd_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmadd_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16m8_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmadd_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmadd_vv_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f32mf2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmadd_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmadd_vf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f32mf2_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmadd_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f32mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmadd_vv_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f32m1_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmadd_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmadd_vf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f32m1_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmadd_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f32m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmadd_vv_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f32m2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmadd_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmadd_vf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f32m2_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmadd_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f32m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmadd_vv_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f32m4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmadd_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmadd_vf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f32m4_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmadd_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f32m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmadd_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f32m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmadd_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmadd_vf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f32m8_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmadd_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f32m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmadd_vv_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f64m1_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmadd_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f64m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmadd_vf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f64m1_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmadd_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f64m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmadd_vv_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f64m2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmadd_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f64m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmadd_vf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f64m2_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmadd_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f64m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmadd_vv_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f64m4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmadd_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f64m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmadd_vf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f64m4_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmadd_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f64m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmadd_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f64m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmadd_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f64m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmadd_vf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f64m8_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmadd_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f64m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmadd_vv_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16mf4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmadd_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16mf4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmadd_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16mf4_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmadd_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16mf4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmadd_vv_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16mf2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmadd_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmadd_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16mf2_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmadd_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16mf2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmadd_vv_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16m1_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmadd_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmadd_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16m1_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmadd_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmadd_vv_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16m2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmadd_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmadd_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16m2_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmadd_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmadd_vv_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16m4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmadd_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmadd_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16m4_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmadd_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmadd_vv_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmadd_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmadd_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16m8_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmadd_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f16m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmadd_vv_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f32mf2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmadd_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f32mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmadd_vf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f32mf2_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmadd_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f32mf2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmadd_vv_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f32m1_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmadd_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f32m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmadd_vf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f32m1_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmadd_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f32m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmadd_vv_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f32m2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmadd_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f32m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmadd_vf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f32m2_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmadd_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f32m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmadd_vv_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f32m4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmadd_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f32m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmadd_vf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f32m4_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmadd_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f32m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmadd_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f32m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmadd_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f32m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmadd_vf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f32m8_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmadd_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f32m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmadd_vv_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f64m1_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmadd_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f64m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmadd_vf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f64m1_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmadd_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f64m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmadd_vv_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f64m2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmadd_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f64m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmadd_vf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f64m2_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmadd_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f64m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmadd_vv_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f64m4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmadd_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f64m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmadd_vf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f64m4_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmadd_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f64m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmadd_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f64m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmadd_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f64m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmadd_vf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f64m8_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmadd_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_f64m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfnmadd\.[ivxfswum.]+\s+} 240 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfnmsac.c b/auto-generated/policy_funcs/gnu-api-tests/vfnmsac.c index b4ee019b4..d4d41aed2 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfnmsac.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfnmsac.c @@ -126,364 +126,364 @@ vfloat64m8_t test_vfnmsac_vf_f64m8_tu(vfloat64m8_t vd, float64_t rs1, vfloat64m8 return __riscv_vfnmsac_vf_f64m8_tu(vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmsac_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16mf4_tum(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfnmsac_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16mf4_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmsac_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16mf4_tum(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfnmsac_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16mf4_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmsac_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16mf2_tum(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfnmsac_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmsac_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16mf2_tum(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfnmsac_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16mf2_tum(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmsac_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16m1_tum(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfnmsac_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16m1_tum(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmsac_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16m1_tum(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfnmsac_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16m1_tum(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmsac_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16m2_tum(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfnmsac_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16m2_tum(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmsac_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16m2_tum(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfnmsac_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16m2_tum(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmsac_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16m4_tum(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfnmsac_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16m4_tum(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmsac_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16m4_tum(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfnmsac_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16m4_tum(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmsac_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16m8_tum(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfnmsac_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16m8_tum(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmsac_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16m8_tum(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfnmsac_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16m8_tum(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmsac_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f32mf2_tum(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfnmsac_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f32mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmsac_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f32mf2_tum(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfnmsac_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f32mf2_tum(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmsac_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f32m1_tum(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfnmsac_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f32m1_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmsac_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f32m1_tum(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfnmsac_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f32m1_tum(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmsac_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f32m2_tum(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfnmsac_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f32m2_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmsac_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f32m2_tum(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfnmsac_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f32m2_tum(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmsac_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f32m4_tum(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfnmsac_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f32m4_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmsac_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f32m4_tum(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfnmsac_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f32m4_tum(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmsac_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f32m8_tum(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfnmsac_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f32m8_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmsac_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f32m8_tum(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfnmsac_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f32m8_tum(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmsac_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f64m1_tum(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfnmsac_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f64m1_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmsac_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f64m1_tum(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfnmsac_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f64m1_tum(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmsac_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f64m2_tum(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfnmsac_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f64m2_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmsac_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f64m2_tum(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfnmsac_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f64m2_tum(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmsac_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f64m4_tum(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfnmsac_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f64m4_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmsac_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f64m4_tum(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfnmsac_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f64m4_tum(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmsac_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f64m8_tum(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfnmsac_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f64m8_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmsac_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f64m8_tum(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfnmsac_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f64m8_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmsac_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16mf4_tumu(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfnmsac_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16mf4_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmsac_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16mf4_tumu(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfnmsac_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16mf4_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmsac_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16mf2_tumu(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfnmsac_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmsac_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16mf2_tumu(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfnmsac_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16mf2_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmsac_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16m1_tumu(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfnmsac_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmsac_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16m1_tumu(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfnmsac_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmsac_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16m2_tumu(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfnmsac_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmsac_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16m2_tumu(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfnmsac_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmsac_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16m4_tumu(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfnmsac_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmsac_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16m4_tumu(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfnmsac_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmsac_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16m8_tumu(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfnmsac_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmsac_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16m8_tumu(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfnmsac_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmsac_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f32mf2_tumu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfnmsac_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f32mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmsac_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f32mf2_tumu(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfnmsac_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f32mf2_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmsac_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f32m1_tumu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfnmsac_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f32m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmsac_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f32m1_tumu(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfnmsac_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f32m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmsac_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f32m2_tumu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfnmsac_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f32m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmsac_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f32m2_tumu(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfnmsac_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f32m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmsac_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f32m4_tumu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfnmsac_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f32m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmsac_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f32m4_tumu(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfnmsac_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f32m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmsac_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f32m8_tumu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfnmsac_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f32m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmsac_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f32m8_tumu(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfnmsac_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f32m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmsac_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f64m1_tumu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfnmsac_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f64m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmsac_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f64m1_tumu(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfnmsac_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f64m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmsac_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f64m2_tumu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfnmsac_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f64m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmsac_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f64m2_tumu(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfnmsac_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f64m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmsac_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f64m4_tumu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfnmsac_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f64m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmsac_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f64m4_tumu(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfnmsac_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f64m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmsac_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f64m8_tumu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfnmsac_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f64m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmsac_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f64m8_tumu(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfnmsac_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f64m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmsac_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16mf4_mu(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfnmsac_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16mf4_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmsac_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16mf4_mu(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfnmsac_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16mf4_mu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmsac_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16mf2_mu(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfnmsac_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmsac_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16mf2_mu(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfnmsac_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16mf2_mu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmsac_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16m1_mu(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfnmsac_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16m1_mu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmsac_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16m1_mu(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfnmsac_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16m1_mu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmsac_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16m2_mu(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfnmsac_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16m2_mu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmsac_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16m2_mu(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfnmsac_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16m2_mu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmsac_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16m4_mu(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfnmsac_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16m4_mu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmsac_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16m4_mu(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfnmsac_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16m4_mu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmsac_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16m8_mu(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfnmsac_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16m8_mu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmsac_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16m8_mu(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfnmsac_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16m8_mu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmsac_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f32mf2_mu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfnmsac_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f32mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmsac_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f32mf2_mu(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfnmsac_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f32mf2_mu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmsac_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f32m1_mu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfnmsac_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f32m1_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmsac_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f32m1_mu(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfnmsac_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f32m1_mu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmsac_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f32m2_mu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfnmsac_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f32m2_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmsac_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f32m2_mu(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfnmsac_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f32m2_mu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmsac_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f32m4_mu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfnmsac_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f32m4_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmsac_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f32m4_mu(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfnmsac_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f32m4_mu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmsac_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f32m8_mu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfnmsac_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f32m8_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmsac_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f32m8_mu(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfnmsac_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f32m8_mu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmsac_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f64m1_mu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfnmsac_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f64m1_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmsac_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f64m1_mu(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfnmsac_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f64m1_mu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmsac_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f64m2_mu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfnmsac_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f64m2_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmsac_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f64m2_mu(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfnmsac_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f64m2_mu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmsac_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f64m4_mu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfnmsac_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f64m4_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmsac_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f64m4_mu(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfnmsac_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f64m4_mu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmsac_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f64m8_mu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfnmsac_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f64m8_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmsac_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f64m8_mu(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfnmsac_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f64m8_mu(vm, vd, rs1, vs2, vl); } vfloat16mf4_t test_vfnmsac_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -606,364 +606,364 @@ vfloat64m8_t test_vfnmsac_vf_f64m8_rm_tu(vfloat64m8_t vd, float64_t rs1, vfloat6 return __riscv_vfnmsac_vf_f64m8_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmsac_vv_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16mf4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmsac_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16mf4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmsac_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16mf4_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmsac_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16mf4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmsac_vv_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16mf2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmsac_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmsac_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16mf2_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmsac_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16mf2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmsac_vv_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16m1_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmsac_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmsac_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16m1_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmsac_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmsac_vv_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16m2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmsac_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmsac_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16m2_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmsac_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmsac_vv_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16m4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmsac_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmsac_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16m4_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmsac_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmsac_vv_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmsac_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmsac_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16m8_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmsac_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmsac_vv_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f32mf2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmsac_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmsac_vf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f32mf2_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmsac_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f32mf2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmsac_vv_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f32m1_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmsac_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f32m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmsac_vf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f32m1_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmsac_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f32m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmsac_vv_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f32m2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmsac_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f32m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmsac_vf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f32m2_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmsac_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f32m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmsac_vv_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f32m4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmsac_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f32m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmsac_vf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f32m4_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmsac_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f32m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmsac_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f32m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmsac_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f32m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmsac_vf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f32m8_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmsac_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f32m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmsac_vv_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f64m1_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmsac_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f64m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmsac_vf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f64m1_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmsac_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f64m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmsac_vv_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f64m2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmsac_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f64m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmsac_vf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f64m2_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmsac_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f64m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmsac_vv_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f64m4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmsac_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f64m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmsac_vf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f64m4_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmsac_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f64m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmsac_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f64m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmsac_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f64m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmsac_vf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f64m8_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmsac_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f64m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmsac_vv_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16mf4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmsac_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16mf4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmsac_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16mf4_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmsac_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16mf4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmsac_vv_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16mf2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmsac_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmsac_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16mf2_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmsac_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmsac_vv_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16m1_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmsac_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmsac_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16m1_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmsac_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmsac_vv_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16m2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmsac_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmsac_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16m2_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmsac_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmsac_vv_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16m4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmsac_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmsac_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16m4_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmsac_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmsac_vv_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmsac_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmsac_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16m8_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmsac_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmsac_vv_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f32mf2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmsac_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmsac_vf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f32mf2_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmsac_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f32mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmsac_vv_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f32m1_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmsac_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmsac_vf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f32m1_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmsac_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f32m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmsac_vv_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f32m2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmsac_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmsac_vf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f32m2_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmsac_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f32m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmsac_vv_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f32m4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmsac_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmsac_vf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f32m4_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmsac_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f32m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmsac_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f32m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmsac_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmsac_vf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f32m8_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmsac_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f32m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmsac_vv_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f64m1_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmsac_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f64m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmsac_vf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f64m1_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmsac_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f64m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmsac_vv_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f64m2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmsac_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f64m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmsac_vf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f64m2_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmsac_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f64m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmsac_vv_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f64m4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmsac_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f64m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmsac_vf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f64m4_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmsac_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f64m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmsac_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f64m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmsac_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f64m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmsac_vf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f64m8_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmsac_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f64m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmsac_vv_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16mf4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmsac_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16mf4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmsac_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16mf4_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmsac_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16mf4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmsac_vv_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16mf2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmsac_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmsac_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16mf2_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmsac_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16mf2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmsac_vv_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16m1_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmsac_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmsac_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16m1_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmsac_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmsac_vv_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16m2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmsac_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmsac_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16m2_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmsac_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmsac_vv_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16m4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmsac_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmsac_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16m4_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmsac_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmsac_vv_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmsac_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmsac_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16m8_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmsac_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f16m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmsac_vv_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f32mf2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmsac_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f32mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmsac_vf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f32mf2_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmsac_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f32mf2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmsac_vv_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f32m1_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmsac_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f32m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmsac_vf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f32m1_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmsac_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f32m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmsac_vv_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f32m2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmsac_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f32m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmsac_vf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f32m2_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmsac_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f32m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmsac_vv_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f32m4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmsac_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f32m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmsac_vf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f32m4_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmsac_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f32m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmsac_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f32m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmsac_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f32m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmsac_vf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f32m8_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmsac_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f32m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmsac_vv_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f64m1_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmsac_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f64m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmsac_vf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f64m1_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmsac_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f64m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmsac_vv_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f64m2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmsac_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f64m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmsac_vf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f64m2_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmsac_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f64m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmsac_vv_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f64m4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmsac_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f64m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmsac_vf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f64m4_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmsac_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f64m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmsac_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f64m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmsac_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f64m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmsac_vf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f64m8_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmsac_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_f64m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfnmsac\.[ivxfswum.]+\s+} 240 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfnmsub.c b/auto-generated/policy_funcs/gnu-api-tests/vfnmsub.c index 1bc664719..993ae275f 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfnmsub.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfnmsub.c @@ -126,364 +126,364 @@ vfloat64m8_t test_vfnmsub_vf_f64m8_tu(vfloat64m8_t vd, float64_t rs1, vfloat64m8 return __riscv_vfnmsub_vf_f64m8_tu(vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmsub_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16mf4_tum(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfnmsub_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16mf4_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmsub_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16mf4_tum(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfnmsub_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16mf4_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmsub_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16mf2_tum(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfnmsub_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmsub_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16mf2_tum(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfnmsub_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16mf2_tum(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmsub_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16m1_tum(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfnmsub_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16m1_tum(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmsub_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16m1_tum(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfnmsub_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16m1_tum(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmsub_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16m2_tum(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfnmsub_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16m2_tum(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmsub_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16m2_tum(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfnmsub_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16m2_tum(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmsub_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16m4_tum(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfnmsub_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16m4_tum(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmsub_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16m4_tum(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfnmsub_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16m4_tum(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmsub_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16m8_tum(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfnmsub_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16m8_tum(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmsub_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16m8_tum(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfnmsub_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16m8_tum(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmsub_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f32mf2_tum(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfnmsub_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f32mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmsub_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f32mf2_tum(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfnmsub_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f32mf2_tum(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmsub_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f32m1_tum(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfnmsub_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f32m1_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmsub_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f32m1_tum(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfnmsub_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f32m1_tum(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmsub_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f32m2_tum(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfnmsub_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f32m2_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmsub_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f32m2_tum(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfnmsub_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f32m2_tum(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmsub_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f32m4_tum(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfnmsub_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f32m4_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmsub_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f32m4_tum(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfnmsub_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f32m4_tum(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmsub_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f32m8_tum(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfnmsub_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f32m8_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmsub_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f32m8_tum(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfnmsub_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f32m8_tum(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmsub_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f64m1_tum(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfnmsub_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f64m1_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmsub_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f64m1_tum(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfnmsub_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f64m1_tum(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmsub_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f64m2_tum(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfnmsub_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f64m2_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmsub_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f64m2_tum(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfnmsub_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f64m2_tum(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmsub_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f64m4_tum(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfnmsub_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f64m4_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmsub_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f64m4_tum(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfnmsub_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f64m4_tum(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmsub_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f64m8_tum(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfnmsub_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f64m8_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmsub_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f64m8_tum(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfnmsub_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f64m8_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmsub_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16mf4_tumu(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfnmsub_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16mf4_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmsub_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16mf4_tumu(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfnmsub_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16mf4_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmsub_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16mf2_tumu(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfnmsub_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmsub_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16mf2_tumu(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfnmsub_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16mf2_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmsub_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16m1_tumu(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfnmsub_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmsub_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16m1_tumu(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfnmsub_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmsub_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16m2_tumu(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfnmsub_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmsub_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16m2_tumu(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfnmsub_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmsub_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16m4_tumu(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfnmsub_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmsub_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16m4_tumu(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfnmsub_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmsub_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16m8_tumu(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfnmsub_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmsub_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16m8_tumu(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfnmsub_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmsub_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f32mf2_tumu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfnmsub_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f32mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmsub_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f32mf2_tumu(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfnmsub_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f32mf2_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmsub_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f32m1_tumu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfnmsub_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f32m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmsub_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f32m1_tumu(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfnmsub_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f32m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmsub_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f32m2_tumu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfnmsub_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f32m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmsub_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f32m2_tumu(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfnmsub_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f32m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmsub_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f32m4_tumu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfnmsub_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f32m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmsub_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f32m4_tumu(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfnmsub_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f32m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmsub_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f32m8_tumu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfnmsub_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f32m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmsub_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f32m8_tumu(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfnmsub_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f32m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmsub_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f64m1_tumu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfnmsub_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f64m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmsub_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f64m1_tumu(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfnmsub_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f64m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmsub_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f64m2_tumu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfnmsub_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f64m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmsub_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f64m2_tumu(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfnmsub_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f64m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmsub_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f64m4_tumu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfnmsub_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f64m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmsub_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f64m4_tumu(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfnmsub_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f64m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmsub_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f64m8_tumu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfnmsub_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f64m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmsub_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f64m8_tumu(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfnmsub_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f64m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmsub_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16mf4_mu(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfnmsub_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16mf4_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmsub_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16mf4_mu(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfnmsub_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16mf4_mu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmsub_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16mf2_mu(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfnmsub_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmsub_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16mf2_mu(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfnmsub_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16mf2_mu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmsub_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16m1_mu(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfnmsub_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16m1_mu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmsub_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16m1_mu(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfnmsub_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16m1_mu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmsub_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16m2_mu(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfnmsub_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16m2_mu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmsub_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16m2_mu(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfnmsub_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16m2_mu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmsub_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16m4_mu(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfnmsub_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16m4_mu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmsub_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16m4_mu(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfnmsub_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16m4_mu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmsub_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16m8_mu(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfnmsub_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16m8_mu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmsub_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16m8_mu(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfnmsub_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16m8_mu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmsub_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f32mf2_mu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfnmsub_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f32mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmsub_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f32mf2_mu(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfnmsub_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f32mf2_mu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmsub_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f32m1_mu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfnmsub_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f32m1_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmsub_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f32m1_mu(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfnmsub_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f32m1_mu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmsub_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f32m2_mu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfnmsub_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f32m2_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmsub_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f32m2_mu(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfnmsub_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f32m2_mu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmsub_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f32m4_mu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfnmsub_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f32m4_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmsub_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f32m4_mu(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfnmsub_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f32m4_mu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmsub_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f32m8_mu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfnmsub_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f32m8_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmsub_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f32m8_mu(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfnmsub_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f32m8_mu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmsub_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f64m1_mu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfnmsub_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f64m1_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmsub_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f64m1_mu(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfnmsub_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f64m1_mu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmsub_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f64m2_mu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfnmsub_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f64m2_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmsub_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f64m2_mu(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfnmsub_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f64m2_mu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmsub_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f64m4_mu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfnmsub_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f64m4_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmsub_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f64m4_mu(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfnmsub_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f64m4_mu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmsub_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f64m8_mu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfnmsub_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f64m8_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmsub_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f64m8_mu(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfnmsub_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f64m8_mu(vm, vd, rs1, vs2, vl); } vfloat16mf4_t test_vfnmsub_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -606,364 +606,364 @@ vfloat64m8_t test_vfnmsub_vf_f64m8_rm_tu(vfloat64m8_t vd, float64_t rs1, vfloat6 return __riscv_vfnmsub_vf_f64m8_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmsub_vv_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16mf4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmsub_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16mf4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmsub_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16mf4_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmsub_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16mf4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmsub_vv_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16mf2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmsub_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmsub_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16mf2_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmsub_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16mf2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmsub_vv_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16m1_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmsub_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmsub_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16m1_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmsub_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmsub_vv_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16m2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmsub_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmsub_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16m2_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmsub_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmsub_vv_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16m4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmsub_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmsub_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16m4_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmsub_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmsub_vv_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmsub_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmsub_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16m8_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmsub_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmsub_vv_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f32mf2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmsub_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmsub_vf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f32mf2_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmsub_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f32mf2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmsub_vv_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f32m1_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmsub_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f32m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmsub_vf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f32m1_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmsub_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f32m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmsub_vv_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f32m2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmsub_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f32m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmsub_vf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f32m2_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmsub_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f32m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmsub_vv_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f32m4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmsub_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f32m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmsub_vf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f32m4_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmsub_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f32m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmsub_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f32m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmsub_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f32m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmsub_vf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f32m8_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmsub_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f32m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmsub_vv_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f64m1_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmsub_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f64m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmsub_vf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f64m1_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmsub_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f64m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmsub_vv_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f64m2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmsub_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f64m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmsub_vf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f64m2_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmsub_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f64m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmsub_vv_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f64m4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmsub_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f64m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmsub_vf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f64m4_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmsub_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f64m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmsub_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f64m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmsub_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f64m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmsub_vf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f64m8_rm_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmsub_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f64m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmsub_vv_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16mf4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmsub_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16mf4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmsub_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16mf4_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmsub_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16mf4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmsub_vv_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16mf2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmsub_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmsub_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16mf2_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmsub_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmsub_vv_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16m1_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmsub_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmsub_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16m1_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmsub_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmsub_vv_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16m2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmsub_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmsub_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16m2_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmsub_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmsub_vv_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16m4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmsub_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmsub_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16m4_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmsub_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmsub_vv_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmsub_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmsub_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16m8_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmsub_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmsub_vv_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f32mf2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmsub_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmsub_vf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f32mf2_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmsub_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f32mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmsub_vv_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f32m1_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmsub_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmsub_vf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f32m1_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmsub_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f32m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmsub_vv_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f32m2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmsub_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmsub_vf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f32m2_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmsub_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f32m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmsub_vv_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f32m4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmsub_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmsub_vf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f32m4_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmsub_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f32m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmsub_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f32m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmsub_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmsub_vf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f32m8_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmsub_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f32m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmsub_vv_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f64m1_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmsub_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f64m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmsub_vf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f64m1_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmsub_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f64m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmsub_vv_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f64m2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmsub_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f64m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmsub_vf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f64m2_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmsub_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f64m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmsub_vv_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f64m4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmsub_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f64m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmsub_vf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f64m4_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmsub_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f64m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmsub_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f64m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmsub_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f64m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmsub_vf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f64m8_rm_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmsub_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f64m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmsub_vv_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16mf4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmsub_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16mf4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmsub_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16mf4_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmsub_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16mf4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmsub_vv_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16mf2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmsub_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmsub_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16mf2_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmsub_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16mf2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmsub_vv_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16m1_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmsub_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmsub_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16m1_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmsub_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmsub_vv_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16m2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmsub_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmsub_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16m2_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmsub_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmsub_vv_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16m4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmsub_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmsub_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16m4_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmsub_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmsub_vv_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmsub_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmsub_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16m8_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmsub_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f16m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmsub_vv_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f32mf2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmsub_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f32mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmsub_vf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f32mf2_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmsub_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f32mf2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmsub_vv_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f32m1_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmsub_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f32m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmsub_vf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f32m1_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmsub_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f32m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmsub_vv_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f32m2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmsub_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f32m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmsub_vf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f32m2_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmsub_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f32m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmsub_vv_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f32m4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmsub_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f32m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmsub_vf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f32m4_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmsub_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f32m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmsub_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f32m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmsub_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f32m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmsub_vf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f32m8_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmsub_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f32m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmsub_vv_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f64m1_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmsub_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f64m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmsub_vf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f64m1_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmsub_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f64m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmsub_vv_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f64m2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmsub_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f64m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmsub_vf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f64m2_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmsub_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f64m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmsub_vv_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f64m4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmsub_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f64m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmsub_vf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f64m4_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmsub_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f64m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmsub_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f64m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmsub_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f64m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmsub_vf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f64m8_rm_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmsub_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_f64m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfnmsub\.[ivxfswum.]+\s+} 240 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfrdiv.c b/auto-generated/policy_funcs/gnu-api-tests/vfrdiv.c index a6ee93798..716b7774c 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfrdiv.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfrdiv.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfrdiv_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16mf4_tu(maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfrdiv_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16mf4_tu(vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfrdiv_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16mf2_tu(maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfrdiv_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16mf2_tu(vd, vs2, rs1, vl); } -vfloat16m1_t test_vfrdiv_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m1_tu(maskedoff, op1, op2, vl); +vfloat16m1_t test_vfrdiv_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m1_tu(vd, vs2, rs1, vl); } -vfloat16m2_t test_vfrdiv_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m2_tu(maskedoff, op1, op2, vl); +vfloat16m2_t test_vfrdiv_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m2_tu(vd, vs2, rs1, vl); } -vfloat16m4_t test_vfrdiv_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m4_tu(maskedoff, op1, op2, vl); +vfloat16m4_t test_vfrdiv_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m4_tu(vd, vs2, rs1, vl); } -vfloat16m8_t test_vfrdiv_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m8_tu(maskedoff, op1, op2, vl); +vfloat16m8_t test_vfrdiv_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m8_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfrdiv_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32mf2_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfrdiv_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32mf2_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfrdiv_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m1_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfrdiv_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m1_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfrdiv_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m2_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfrdiv_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m2_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfrdiv_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m4_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfrdiv_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m4_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfrdiv_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m8_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfrdiv_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m8_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfrdiv_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m1_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfrdiv_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m1_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfrdiv_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m2_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfrdiv_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m2_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfrdiv_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m4_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfrdiv_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m4_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfrdiv_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m8_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfrdiv_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m8_tu(vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfrdiv_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16mf4_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfrdiv_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16mf4_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfrdiv_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfrdiv_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfrdiv_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m1_tum(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfrdiv_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m1_tum(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfrdiv_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m2_tum(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfrdiv_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfrdiv_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m4_tum(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfrdiv_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m4_tum(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfrdiv_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m8_tum(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfrdiv_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m8_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfrdiv_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfrdiv_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfrdiv_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m1_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfrdiv_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m1_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfrdiv_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m2_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfrdiv_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfrdiv_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m4_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfrdiv_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m4_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfrdiv_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m8_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfrdiv_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m8_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfrdiv_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m1_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfrdiv_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m1_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfrdiv_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m2_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfrdiv_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m2_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfrdiv_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m4_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfrdiv_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m4_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfrdiv_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m8_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfrdiv_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m8_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfrdiv_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16mf4_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfrdiv_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16mf4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfrdiv_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfrdiv_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfrdiv_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfrdiv_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfrdiv_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfrdiv_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfrdiv_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfrdiv_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfrdiv_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfrdiv_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfrdiv_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfrdiv_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfrdiv_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfrdiv_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfrdiv_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfrdiv_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfrdiv_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfrdiv_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfrdiv_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfrdiv_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfrdiv_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfrdiv_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfrdiv_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfrdiv_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfrdiv_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfrdiv_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfrdiv_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfrdiv_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfrdiv_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16mf4_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfrdiv_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16mf4_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfrdiv_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfrdiv_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfrdiv_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m1_mu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfrdiv_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m1_mu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfrdiv_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m2_mu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfrdiv_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfrdiv_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m4_mu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfrdiv_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m4_mu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfrdiv_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m8_mu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfrdiv_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m8_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfrdiv_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfrdiv_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfrdiv_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m1_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfrdiv_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m1_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfrdiv_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m2_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfrdiv_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfrdiv_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m4_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfrdiv_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m4_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfrdiv_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m8_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfrdiv_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m8_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfrdiv_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m1_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfrdiv_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m1_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfrdiv_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m2_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfrdiv_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m2_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfrdiv_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m4_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfrdiv_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m4_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfrdiv_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m8_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfrdiv_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m8_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfrdiv_vf_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16mf4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfrdiv_vf_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16mf4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfrdiv_vf_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16mf2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfrdiv_vf_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfrdiv_vf_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfrdiv_vf_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrdiv_vf_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfrdiv_vf_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrdiv_vf_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfrdiv_vf_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrdiv_vf_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfrdiv_vf_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrdiv_vf_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32mf2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfrdiv_vf_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfrdiv_vf_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfrdiv_vf_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrdiv_vf_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfrdiv_vf_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrdiv_vf_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfrdiv_vf_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrdiv_vf_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfrdiv_vf_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrdiv_vf_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfrdiv_vf_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrdiv_vf_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfrdiv_vf_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrdiv_vf_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfrdiv_vf_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrdiv_vf_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfrdiv_vf_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfrdiv_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16mf4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfrdiv_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16mf4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfrdiv_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16mf2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfrdiv_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16mf2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfrdiv_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfrdiv_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrdiv_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfrdiv_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrdiv_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfrdiv_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrdiv_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfrdiv_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrdiv_vf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32mf2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfrdiv_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32mf2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfrdiv_vf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfrdiv_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrdiv_vf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfrdiv_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrdiv_vf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfrdiv_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrdiv_vf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfrdiv_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrdiv_vf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfrdiv_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrdiv_vf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfrdiv_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrdiv_vf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfrdiv_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrdiv_vf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfrdiv_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfrdiv_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16mf4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfrdiv_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16mf4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfrdiv_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16mf2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfrdiv_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16mf2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfrdiv_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfrdiv_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrdiv_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfrdiv_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrdiv_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfrdiv_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrdiv_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfrdiv_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrdiv_vf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32mf2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfrdiv_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32mf2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfrdiv_vf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfrdiv_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrdiv_vf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfrdiv_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrdiv_vf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfrdiv_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrdiv_vf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfrdiv_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrdiv_vf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfrdiv_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrdiv_vf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfrdiv_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrdiv_vf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfrdiv_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrdiv_vf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfrdiv_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfrdiv_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16mf4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfrdiv_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16mf4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfrdiv_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16mf2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfrdiv_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16mf2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfrdiv_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfrdiv_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrdiv_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfrdiv_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrdiv_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfrdiv_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrdiv_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfrdiv_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f16m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrdiv_vf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32mf2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfrdiv_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32mf2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfrdiv_vf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfrdiv_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrdiv_vf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfrdiv_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrdiv_vf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfrdiv_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrdiv_vf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfrdiv_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrdiv_vf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfrdiv_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrdiv_vf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfrdiv_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrdiv_vf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfrdiv_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrdiv_vf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfrdiv_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_vf_f64m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfrdiv\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfrec7.c b/auto-generated/policy_funcs/gnu-api-tests/vfrec7.c index 207c78cb2..dbc798f1a 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfrec7.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfrec7.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfrec7_v_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfrec7_v_f16mf4_tu(maskedoff, op1, vl); +vfloat16mf4_t test_vfrec7_v_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16mf4_tu(vd, vs2, vl); } -vfloat16mf2_t test_vfrec7_v_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfrec7_v_f16mf2_tu(maskedoff, op1, vl); +vfloat16mf2_t test_vfrec7_v_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16mf2_tu(vd, vs2, vl); } -vfloat16m1_t test_vfrec7_v_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m1_tu(maskedoff, op1, vl); +vfloat16m1_t test_vfrec7_v_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m1_tu(vd, vs2, vl); } -vfloat16m2_t test_vfrec7_v_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m2_tu(maskedoff, op1, vl); +vfloat16m2_t test_vfrec7_v_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m2_tu(vd, vs2, vl); } -vfloat16m4_t test_vfrec7_v_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m4_tu(maskedoff, op1, vl); +vfloat16m4_t test_vfrec7_v_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m4_tu(vd, vs2, vl); } -vfloat16m8_t test_vfrec7_v_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m8_tu(maskedoff, op1, vl); +vfloat16m8_t test_vfrec7_v_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m8_tu(vd, vs2, vl); } -vfloat32mf2_t test_vfrec7_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfrec7_v_f32mf2_tu(maskedoff, op1, vl); +vfloat32mf2_t test_vfrec7_v_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32mf2_tu(vd, vs2, vl); } -vfloat32m1_t test_vfrec7_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m1_tu(maskedoff, op1, vl); +vfloat32m1_t test_vfrec7_v_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m1_tu(vd, vs2, vl); } -vfloat32m2_t test_vfrec7_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m2_tu(maskedoff, op1, vl); +vfloat32m2_t test_vfrec7_v_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m2_tu(vd, vs2, vl); } -vfloat32m4_t test_vfrec7_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m4_tu(maskedoff, op1, vl); +vfloat32m4_t test_vfrec7_v_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m4_tu(vd, vs2, vl); } -vfloat32m8_t test_vfrec7_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m8_tu(maskedoff, op1, vl); +vfloat32m8_t test_vfrec7_v_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m8_tu(vd, vs2, vl); } -vfloat64m1_t test_vfrec7_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m1_tu(maskedoff, op1, vl); +vfloat64m1_t test_vfrec7_v_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m1_tu(vd, vs2, vl); } -vfloat64m2_t test_vfrec7_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m2_tu(maskedoff, op1, vl); +vfloat64m2_t test_vfrec7_v_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m2_tu(vd, vs2, vl); } -vfloat64m4_t test_vfrec7_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m4_tu(maskedoff, op1, vl); +vfloat64m4_t test_vfrec7_v_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m4_tu(vd, vs2, vl); } -vfloat64m8_t test_vfrec7_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m8_tu(maskedoff, op1, vl); +vfloat64m8_t test_vfrec7_v_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m8_tu(vd, vs2, vl); } -vfloat16mf4_t test_vfrec7_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfrec7_v_f16mf4_tum(mask, maskedoff, op1, vl); +vfloat16mf4_t test_vfrec7_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16mf4_tum(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfrec7_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfrec7_v_f16mf2_tum(mask, maskedoff, op1, vl); +vfloat16mf2_t test_vfrec7_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16mf2_tum(vm, vd, vs2, vl); } -vfloat16m1_t test_vfrec7_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m1_tum(mask, maskedoff, op1, vl); +vfloat16m1_t test_vfrec7_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m1_tum(vm, vd, vs2, vl); } -vfloat16m2_t test_vfrec7_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m2_tum(mask, maskedoff, op1, vl); +vfloat16m2_t test_vfrec7_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m2_tum(vm, vd, vs2, vl); } -vfloat16m4_t test_vfrec7_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m4_tum(mask, maskedoff, op1, vl); +vfloat16m4_t test_vfrec7_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m4_tum(vm, vd, vs2, vl); } -vfloat16m8_t test_vfrec7_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m8_tum(mask, maskedoff, op1, vl); +vfloat16m8_t test_vfrec7_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m8_tum(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfrec7_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfrec7_v_f32mf2_tum(mask, maskedoff, op1, vl); +vfloat32mf2_t test_vfrec7_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32mf2_tum(vm, vd, vs2, vl); } -vfloat32m1_t test_vfrec7_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m1_tum(mask, maskedoff, op1, vl); +vfloat32m1_t test_vfrec7_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m1_tum(vm, vd, vs2, vl); } -vfloat32m2_t test_vfrec7_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m2_tum(mask, maskedoff, op1, vl); +vfloat32m2_t test_vfrec7_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m2_tum(vm, vd, vs2, vl); } -vfloat32m4_t test_vfrec7_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m4_tum(mask, maskedoff, op1, vl); +vfloat32m4_t test_vfrec7_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m4_tum(vm, vd, vs2, vl); } -vfloat32m8_t test_vfrec7_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m8_tum(mask, maskedoff, op1, vl); +vfloat32m8_t test_vfrec7_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m8_tum(vm, vd, vs2, vl); } -vfloat64m1_t test_vfrec7_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m1_tum(mask, maskedoff, op1, vl); +vfloat64m1_t test_vfrec7_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m1_tum(vm, vd, vs2, vl); } -vfloat64m2_t test_vfrec7_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m2_tum(mask, maskedoff, op1, vl); +vfloat64m2_t test_vfrec7_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m2_tum(vm, vd, vs2, vl); } -vfloat64m4_t test_vfrec7_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m4_tum(mask, maskedoff, op1, vl); +vfloat64m4_t test_vfrec7_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m4_tum(vm, vd, vs2, vl); } -vfloat64m8_t test_vfrec7_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m8_tum(mask, maskedoff, op1, vl); +vfloat64m8_t test_vfrec7_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m8_tum(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfrec7_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfrec7_v_f16mf4_tumu(mask, maskedoff, op1, vl); +vfloat16mf4_t test_vfrec7_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16mf4_tumu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfrec7_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfrec7_v_f16mf2_tumu(mask, maskedoff, op1, vl); +vfloat16mf2_t test_vfrec7_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16mf2_tumu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfrec7_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m1_tumu(mask, maskedoff, op1, vl); +vfloat16m1_t test_vfrec7_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m1_tumu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfrec7_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m2_tumu(mask, maskedoff, op1, vl); +vfloat16m2_t test_vfrec7_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m2_tumu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfrec7_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m4_tumu(mask, maskedoff, op1, vl); +vfloat16m4_t test_vfrec7_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m4_tumu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfrec7_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m8_tumu(mask, maskedoff, op1, vl); +vfloat16m8_t test_vfrec7_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m8_tumu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfrec7_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfrec7_v_f32mf2_tumu(mask, maskedoff, op1, vl); +vfloat32mf2_t test_vfrec7_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32mf2_tumu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfrec7_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m1_tumu(mask, maskedoff, op1, vl); +vfloat32m1_t test_vfrec7_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m1_tumu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfrec7_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m2_tumu(mask, maskedoff, op1, vl); +vfloat32m2_t test_vfrec7_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m2_tumu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfrec7_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m4_tumu(mask, maskedoff, op1, vl); +vfloat32m4_t test_vfrec7_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m4_tumu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfrec7_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m8_tumu(mask, maskedoff, op1, vl); +vfloat32m8_t test_vfrec7_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m8_tumu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfrec7_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m1_tumu(mask, maskedoff, op1, vl); +vfloat64m1_t test_vfrec7_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m1_tumu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfrec7_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m2_tumu(mask, maskedoff, op1, vl); +vfloat64m2_t test_vfrec7_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m2_tumu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfrec7_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m4_tumu(mask, maskedoff, op1, vl); +vfloat64m4_t test_vfrec7_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m4_tumu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfrec7_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m8_tumu(mask, maskedoff, op1, vl); +vfloat64m8_t test_vfrec7_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m8_tumu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfrec7_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfrec7_v_f16mf4_mu(mask, maskedoff, op1, vl); +vfloat16mf4_t test_vfrec7_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16mf4_mu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfrec7_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfrec7_v_f16mf2_mu(mask, maskedoff, op1, vl); +vfloat16mf2_t test_vfrec7_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16mf2_mu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfrec7_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m1_mu(mask, maskedoff, op1, vl); +vfloat16m1_t test_vfrec7_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m1_mu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfrec7_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m2_mu(mask, maskedoff, op1, vl); +vfloat16m2_t test_vfrec7_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m2_mu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfrec7_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m4_mu(mask, maskedoff, op1, vl); +vfloat16m4_t test_vfrec7_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m4_mu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfrec7_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m8_mu(mask, maskedoff, op1, vl); +vfloat16m8_t test_vfrec7_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m8_mu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfrec7_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfrec7_v_f32mf2_mu(mask, maskedoff, op1, vl); +vfloat32mf2_t test_vfrec7_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32mf2_mu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfrec7_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m1_mu(mask, maskedoff, op1, vl); +vfloat32m1_t test_vfrec7_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m1_mu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfrec7_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m2_mu(mask, maskedoff, op1, vl); +vfloat32m2_t test_vfrec7_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m2_mu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfrec7_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m4_mu(mask, maskedoff, op1, vl); +vfloat32m4_t test_vfrec7_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m4_mu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfrec7_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m8_mu(mask, maskedoff, op1, vl); +vfloat32m8_t test_vfrec7_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m8_mu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfrec7_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m1_mu(mask, maskedoff, op1, vl); +vfloat64m1_t test_vfrec7_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m1_mu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfrec7_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m2_mu(mask, maskedoff, op1, vl); +vfloat64m2_t test_vfrec7_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m2_mu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfrec7_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m4_mu(mask, maskedoff, op1, vl); +vfloat64m4_t test_vfrec7_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m4_mu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfrec7_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m8_mu(mask, maskedoff, op1, vl); +vfloat64m8_t test_vfrec7_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m8_mu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfrec7_v_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfrec7_v_f16mf4_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfrec7_v_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfrec7_v_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfrec7_v_f16mf2_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfrec7_v_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfrec7_v_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m1_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfrec7_v_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrec7_v_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m2_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfrec7_v_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrec7_v_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m4_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfrec7_v_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrec7_v_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m8_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfrec7_v_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrec7_v_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfrec7_v_f32mf2_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfrec7_v_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfrec7_v_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m1_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfrec7_v_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrec7_v_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m2_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfrec7_v_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrec7_v_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m4_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfrec7_v_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrec7_v_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m8_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfrec7_v_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrec7_v_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m1_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfrec7_v_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrec7_v_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m2_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfrec7_v_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrec7_v_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m4_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfrec7_v_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrec7_v_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m8_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfrec7_v_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfrec7_v_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfrec7_v_f16mf4_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfrec7_v_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16mf4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfrec7_v_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfrec7_v_f16mf2_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfrec7_v_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfrec7_v_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m1_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfrec7_v_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrec7_v_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m2_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfrec7_v_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrec7_v_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m4_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfrec7_v_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrec7_v_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m8_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfrec7_v_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrec7_v_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfrec7_v_f32mf2_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfrec7_v_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfrec7_v_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m1_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfrec7_v_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrec7_v_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m2_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfrec7_v_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrec7_v_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m4_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfrec7_v_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrec7_v_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m8_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfrec7_v_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrec7_v_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m1_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfrec7_v_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrec7_v_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m2_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfrec7_v_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrec7_v_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m4_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfrec7_v_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrec7_v_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m8_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfrec7_v_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfrec7_v_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfrec7_v_f16mf4_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfrec7_v_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16mf4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfrec7_v_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfrec7_v_f16mf2_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfrec7_v_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfrec7_v_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m1_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfrec7_v_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrec7_v_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m2_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfrec7_v_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrec7_v_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m4_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfrec7_v_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrec7_v_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m8_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfrec7_v_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrec7_v_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfrec7_v_f32mf2_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfrec7_v_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfrec7_v_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m1_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfrec7_v_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrec7_v_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m2_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfrec7_v_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrec7_v_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m4_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfrec7_v_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrec7_v_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m8_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfrec7_v_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrec7_v_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m1_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfrec7_v_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrec7_v_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m2_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfrec7_v_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrec7_v_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m4_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfrec7_v_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrec7_v_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m8_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfrec7_v_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfrec7_v_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfrec7_v_f16mf4_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfrec7_v_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16mf4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfrec7_v_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfrec7_v_f16mf2_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfrec7_v_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfrec7_v_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m1_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfrec7_v_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrec7_v_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m2_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfrec7_v_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrec7_v_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m4_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfrec7_v_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrec7_v_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfrec7_v_f16m8_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfrec7_v_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfrec7_v_f16m8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrec7_v_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfrec7_v_f32mf2_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfrec7_v_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfrec7_v_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m1_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfrec7_v_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrec7_v_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m2_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfrec7_v_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrec7_v_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m4_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfrec7_v_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrec7_v_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfrec7_v_f32m8_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfrec7_v_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfrec7_v_f32m8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrec7_v_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m1_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfrec7_v_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrec7_v_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m2_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfrec7_v_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrec7_v_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m4_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfrec7_v_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrec7_v_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfrec7_v_f64m8_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfrec7_v_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfrec7_v_f64m8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfrec7\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfredmax.c b/auto-generated/policy_funcs/gnu-api-tests/vfredmax.c index 4c1ae8304..2dc0ca0d0 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfredmax.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfredmax.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f16mf4_f16m1_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_tu(vfloat16m1_t vd, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f16mf4_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f16mf2_f16m1_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_tu(vfloat16m1_t vd, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f16mf2_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f16m1_f16m1_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f16m1_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f16m2_f16m1_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_tu(vfloat16m1_t vd, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f16m2_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f16m4_f16m1_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_tu(vfloat16m1_t vd, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f16m4_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f16m8_f16m1_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_tu(vfloat16m1_t vd, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f16m8_f16m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f32mf2_f32m1_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tu(vfloat32m1_t vd, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f32mf2_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f32m1_f32m1_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f32m1_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f32m2_f32m1_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_tu(vfloat32m1_t vd, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f32m2_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f32m4_f32m1_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_tu(vfloat32m1_t vd, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f32m4_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f32m8_f32m1_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_tu(vfloat32m1_t vd, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f32m8_f32m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f64m1_f64m1_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f64m1_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f64m2_f64m1_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_tu(vfloat64m1_t vd, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f64m2_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f64m4_f64m1_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_tu(vfloat64m1_t vd, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f64m4_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f64m8_f64m1_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_tu(vfloat64m1_t vd, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f64m8_f64m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_tum(vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f16mf4_f16m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_tum(vbool64_t vm, vfloat16m1_t vd, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f16mf4_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_tum(vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f16mf2_f16m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_tum(vbool32_t vm, vfloat16m1_t vd, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f16mf2_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f16m1_f16m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f16m1_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_tum(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f16m2_f16m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_tum(vbool8_t vm, vfloat16m1_t vd, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f16m2_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_tum(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f16m4_f16m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_tum(vbool4_t vm, vfloat16m1_t vd, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f16m4_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_tum(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f16m8_f16m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_tum(vbool2_t vm, vfloat16m1_t vd, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f16m8_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f32mf2_f32m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tum(vbool64_t vm, vfloat32m1_t vd, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f32mf2_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f32m1_f32m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f32m1_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_tum(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f32m2_f32m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_tum(vbool16_t vm, vfloat32m1_t vd, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f32m2_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_tum(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f32m4_f32m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_tum(vbool8_t vm, vfloat32m1_t vd, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f32m4_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_tum(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f32m8_f32m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_tum(vbool4_t vm, vfloat32m1_t vd, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f32m8_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f64m1_f64m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f64m1_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_tum(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f64m2_f64m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_tum(vbool32_t vm, vfloat64m1_t vd, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f64m2_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_tum(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f64m4_f64m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_tum(vbool16_t vm, vfloat64m1_t vd, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f64m4_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_tum(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f64m8_f64m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_tum(vbool8_t vm, vfloat64m1_t vd, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmax_vs_f64m8_f64m1_tum(vm, vd, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfredmax\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfredmin.c b/auto-generated/policy_funcs/gnu-api-tests/vfredmin.c index b6a501a11..6ecb2b2ac 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfredmin.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfredmin.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f16mf4_f16m1_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_tu(vfloat16m1_t vd, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f16mf4_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f16mf2_f16m1_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_tu(vfloat16m1_t vd, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f16mf2_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f16m1_f16m1_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f16m1_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f16m2_f16m1_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_tu(vfloat16m1_t vd, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f16m2_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f16m4_f16m1_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_tu(vfloat16m1_t vd, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f16m4_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f16m8_f16m1_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_tu(vfloat16m1_t vd, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f16m8_f16m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f32mf2_f32m1_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tu(vfloat32m1_t vd, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f32mf2_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f32m1_f32m1_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f32m1_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f32m2_f32m1_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_tu(vfloat32m1_t vd, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f32m2_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f32m4_f32m1_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_tu(vfloat32m1_t vd, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f32m4_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f32m8_f32m1_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_tu(vfloat32m1_t vd, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f32m8_f32m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f64m1_f64m1_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f64m1_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f64m2_f64m1_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_tu(vfloat64m1_t vd, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f64m2_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f64m4_f64m1_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_tu(vfloat64m1_t vd, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f64m4_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f64m8_f64m1_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_tu(vfloat64m1_t vd, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f64m8_f64m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_tum(vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f16mf4_f16m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_tum(vbool64_t vm, vfloat16m1_t vd, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f16mf4_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_tum(vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f16mf2_f16m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_tum(vbool32_t vm, vfloat16m1_t vd, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f16mf2_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f16m1_f16m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f16m1_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_tum(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f16m2_f16m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_tum(vbool8_t vm, vfloat16m1_t vd, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f16m2_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_tum(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f16m4_f16m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_tum(vbool4_t vm, vfloat16m1_t vd, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f16m4_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_tum(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f16m8_f16m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_tum(vbool2_t vm, vfloat16m1_t vd, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f16m8_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f32mf2_f32m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tum(vbool64_t vm, vfloat32m1_t vd, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f32mf2_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f32m1_f32m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f32m1_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_tum(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f32m2_f32m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_tum(vbool16_t vm, vfloat32m1_t vd, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f32m2_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_tum(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f32m4_f32m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_tum(vbool8_t vm, vfloat32m1_t vd, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f32m4_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_tum(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f32m8_f32m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_tum(vbool4_t vm, vfloat32m1_t vd, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f32m8_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f64m1_f64m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f64m1_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_tum(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f64m2_f64m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_tum(vbool32_t vm, vfloat64m1_t vd, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f64m2_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_tum(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f64m4_f64m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_tum(vbool16_t vm, vfloat64m1_t vd, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f64m4_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_tum(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f64m8_f64m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_tum(vbool8_t vm, vfloat64m1_t vd, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmin_vs_f64m8_f64m1_tum(vm, vd, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfredmin\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfredosum.c b/auto-generated/policy_funcs/gnu-api-tests/vfredosum.c index 0aafeec79..f6adaf148 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfredosum.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfredosum.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16mf4_f16m1_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_tu(vfloat16m1_t vd, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16mf4_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16mf2_f16m1_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_tu(vfloat16m1_t vd, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16mf2_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16m1_f16m1_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16m1_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16m2_f16m1_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_tu(vfloat16m1_t vd, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16m2_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16m4_f16m1_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_tu(vfloat16m1_t vd, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16m4_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16m8_f16m1_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_tu(vfloat16m1_t vd, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16m8_f16m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32mf2_f32m1_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tu(vfloat32m1_t vd, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32mf2_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32m1_f32m1_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32m1_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32m2_f32m1_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_tu(vfloat32m1_t vd, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32m2_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32m4_f32m1_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_tu(vfloat32m1_t vd, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32m4_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32m8_f32m1_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_tu(vfloat32m1_t vd, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32m8_f32m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f64m1_f64m1_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f64m1_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f64m2_f64m1_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_tu(vfloat64m1_t vd, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f64m2_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f64m4_f64m1_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_tu(vfloat64m1_t vd, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f64m4_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f64m8_f64m1_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_tu(vfloat64m1_t vd, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f64m8_f64m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_tum(vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16mf4_f16m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_tum(vbool64_t vm, vfloat16m1_t vd, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16mf4_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_tum(vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16mf2_f16m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_tum(vbool32_t vm, vfloat16m1_t vd, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16mf2_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16m1_f16m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16m1_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_tum(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16m2_f16m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_tum(vbool8_t vm, vfloat16m1_t vd, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16m2_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_tum(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16m4_f16m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_tum(vbool4_t vm, vfloat16m1_t vd, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16m4_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_tum(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16m8_f16m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_tum(vbool2_t vm, vfloat16m1_t vd, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16m8_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32mf2_f32m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tum(vbool64_t vm, vfloat32m1_t vd, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32mf2_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32m1_f32m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32m1_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_tum(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32m2_f32m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_tum(vbool16_t vm, vfloat32m1_t vd, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32m2_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_tum(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32m4_f32m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_tum(vbool8_t vm, vfloat32m1_t vd, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32m4_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_tum(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32m8_f32m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_tum(vbool4_t vm, vfloat32m1_t vd, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32m8_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f64m1_f64m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f64m1_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_tum(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f64m2_f64m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_tum(vbool32_t vm, vfloat64m1_t vd, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f64m2_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_tum(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f64m4_f64m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_tum(vbool16_t vm, vfloat64m1_t vd, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f64m4_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_tum(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f64m8_f64m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_tum(vbool8_t vm, vfloat64m1_t vd, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f64m8_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16mf4_f16m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_rm_tu(vfloat16m1_t vd, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16mf4_f16m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16mf2_f16m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_rm_tu(vfloat16m1_t vd, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16mf2_f16m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16m1_f16m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16m1_f16m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16m2_f16m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16m2_f16m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16m4_f16m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16m4_f16m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16m8_f16m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16m8_f16m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32mf2_f32m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_rm_tu(vfloat32m1_t vd, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32mf2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32m1_f32m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32m1_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32m2_f32m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32m2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32m4_f32m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32m4_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32m8_f32m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32m8_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f64m1_f64m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f64m1_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f64m2_f64m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f64m2_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f64m4_f64m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f64m4_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f64m8_f64m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f64m8_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_rm_tum(vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16mf4_f16m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_rm_tum(vbool64_t vm, vfloat16m1_t vd, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16mf4_f16m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_rm_tum(vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16mf2_f16m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_rm_tum(vbool32_t vm, vfloat16m1_t vd, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16mf2_f16m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16m1_f16m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16m1_f16m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_rm_tum(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16m2_f16m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_rm_tum(vbool8_t vm, vfloat16m1_t vd, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16m2_f16m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_rm_tum(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16m4_f16m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_rm_tum(vbool4_t vm, vfloat16m1_t vd, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16m4_f16m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_rm_tum(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16m8_f16m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_rm_tum(vbool2_t vm, vfloat16m1_t vd, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16m8_f16m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_rm_tum(vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32mf2_f32m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_rm_tum(vbool64_t vm, vfloat32m1_t vd, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32mf2_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32m1_f32m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32m1_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_rm_tum(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32m2_f32m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_rm_tum(vbool16_t vm, vfloat32m1_t vd, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32m2_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_rm_tum(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32m4_f32m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_rm_tum(vbool8_t vm, vfloat32m1_t vd, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32m4_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_rm_tum(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32m8_f32m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_rm_tum(vbool4_t vm, vfloat32m1_t vd, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32m8_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f64m1_f64m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f64m1_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_rm_tum(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f64m2_f64m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_rm_tum(vbool32_t vm, vfloat64m1_t vd, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f64m2_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_rm_tum(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f64m4_f64m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_rm_tum(vbool16_t vm, vfloat64m1_t vd, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f64m4_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_rm_tum(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f64m8_f64m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_rm_tum(vbool8_t vm, vfloat64m1_t vd, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f64m8_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfredosum\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfredusum.c b/auto-generated/policy_funcs/gnu-api-tests/vfredusum.c index df00febec..edb94aa6f 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfredusum.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfredusum.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16mf4_f16m1_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_tu(vfloat16m1_t vd, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16mf4_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16mf2_f16m1_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_tu(vfloat16m1_t vd, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16mf2_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16m1_f16m1_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16m1_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16m2_f16m1_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_tu(vfloat16m1_t vd, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16m2_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16m4_f16m1_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_tu(vfloat16m1_t vd, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16m4_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16m8_f16m1_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_tu(vfloat16m1_t vd, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16m8_f16m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f32mf2_f32m1_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tu(vfloat32m1_t vd, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32mf2_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f32m1_f32m1_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32m1_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f32m2_f32m1_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_tu(vfloat32m1_t vd, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32m2_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f32m4_f32m1_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_tu(vfloat32m1_t vd, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32m4_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f32m8_f32m1_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_tu(vfloat32m1_t vd, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32m8_f32m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f64m1_f64m1_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f64m1_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f64m2_f64m1_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_tu(vfloat64m1_t vd, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f64m2_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f64m4_f64m1_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_tu(vfloat64m1_t vd, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f64m4_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f64m8_f64m1_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_tu(vfloat64m1_t vd, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f64m8_f64m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_tum(vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16mf4_f16m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_tum(vbool64_t vm, vfloat16m1_t vd, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16mf4_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_tum(vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16mf2_f16m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_tum(vbool32_t vm, vfloat16m1_t vd, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16mf2_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16m1_f16m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16m1_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_tum(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16m2_f16m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_tum(vbool8_t vm, vfloat16m1_t vd, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16m2_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_tum(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16m4_f16m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_tum(vbool4_t vm, vfloat16m1_t vd, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16m4_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_tum(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16m8_f16m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_tum(vbool2_t vm, vfloat16m1_t vd, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16m8_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f32mf2_f32m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tum(vbool64_t vm, vfloat32m1_t vd, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32mf2_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f32m1_f32m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32m1_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_tum(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f32m2_f32m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_tum(vbool16_t vm, vfloat32m1_t vd, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32m2_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_tum(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f32m4_f32m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_tum(vbool8_t vm, vfloat32m1_t vd, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32m4_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_tum(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f32m8_f32m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_tum(vbool4_t vm, vfloat32m1_t vd, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32m8_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f64m1_f64m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f64m1_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_tum(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f64m2_f64m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_tum(vbool32_t vm, vfloat64m1_t vd, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f64m2_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_tum(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f64m4_f64m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_tum(vbool16_t vm, vfloat64m1_t vd, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f64m4_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_tum(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f64m8_f64m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_tum(vbool8_t vm, vfloat64m1_t vd, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f64m8_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16mf4_f16m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_rm_tu(vfloat16m1_t vd, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16mf4_f16m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16mf2_f16m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_rm_tu(vfloat16m1_t vd, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16mf2_f16m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16m1_f16m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16m1_f16m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16m2_f16m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16m2_f16m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16m4_f16m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16m4_f16m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16m8_f16m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16m8_f16m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f32mf2_f32m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_rm_tu(vfloat32m1_t vd, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32mf2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f32m1_f32m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32m1_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f32m2_f32m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32m2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f32m4_f32m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32m4_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f32m8_f32m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32m8_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f64m1_f64m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f64m1_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f64m2_f64m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f64m2_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f64m4_f64m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f64m4_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f64m8_f64m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f64m8_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_rm_tum(vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16mf4_f16m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_rm_tum(vbool64_t vm, vfloat16m1_t vd, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16mf4_f16m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_rm_tum(vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16mf2_f16m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_rm_tum(vbool32_t vm, vfloat16m1_t vd, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16mf2_f16m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16m1_f16m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16m1_f16m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_rm_tum(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16m2_f16m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_rm_tum(vbool8_t vm, vfloat16m1_t vd, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16m2_f16m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_rm_tum(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16m4_f16m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_rm_tum(vbool4_t vm, vfloat16m1_t vd, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16m4_f16m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_rm_tum(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f16m8_f16m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_rm_tum(vbool2_t vm, vfloat16m1_t vd, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16m8_f16m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_rm_tum(vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f32mf2_f32m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_rm_tum(vbool64_t vm, vfloat32m1_t vd, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32mf2_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f32m1_f32m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32m1_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_rm_tum(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f32m2_f32m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_rm_tum(vbool16_t vm, vfloat32m1_t vd, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32m2_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_rm_tum(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f32m4_f32m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_rm_tum(vbool8_t vm, vfloat32m1_t vd, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32m4_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_rm_tum(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f32m8_f32m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_rm_tum(vbool4_t vm, vfloat32m1_t vd, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32m8_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f64m1_f64m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f64m1_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_rm_tum(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f64m2_f64m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_rm_tum(vbool32_t vm, vfloat64m1_t vd, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f64m2_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_rm_tum(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f64m4_f64m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_rm_tum(vbool16_t vm, vfloat64m1_t vd, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f64m4_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_rm_tum(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_vs_f64m8_f64m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_rm_tum(vbool8_t vm, vfloat64m1_t vd, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f64m8_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfredusum\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfrsqrt7.c b/auto-generated/policy_funcs/gnu-api-tests/vfrsqrt7.c index c9d926764..46ce68d77 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfrsqrt7.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfrsqrt7.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfrsqrt7_v_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f16mf4_tu(maskedoff, op1, vl); +vfloat16mf4_t test_vfrsqrt7_v_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f16mf4_tu(vd, vs2, vl); } -vfloat16mf2_t test_vfrsqrt7_v_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f16mf2_tu(maskedoff, op1, vl); +vfloat16mf2_t test_vfrsqrt7_v_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f16mf2_tu(vd, vs2, vl); } -vfloat16m1_t test_vfrsqrt7_v_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f16m1_tu(maskedoff, op1, vl); +vfloat16m1_t test_vfrsqrt7_v_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f16m1_tu(vd, vs2, vl); } -vfloat16m2_t test_vfrsqrt7_v_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f16m2_tu(maskedoff, op1, vl); +vfloat16m2_t test_vfrsqrt7_v_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f16m2_tu(vd, vs2, vl); } -vfloat16m4_t test_vfrsqrt7_v_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f16m4_tu(maskedoff, op1, vl); +vfloat16m4_t test_vfrsqrt7_v_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f16m4_tu(vd, vs2, vl); } -vfloat16m8_t test_vfrsqrt7_v_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f16m8_tu(maskedoff, op1, vl); +vfloat16m8_t test_vfrsqrt7_v_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f16m8_tu(vd, vs2, vl); } -vfloat32mf2_t test_vfrsqrt7_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f32mf2_tu(maskedoff, op1, vl); +vfloat32mf2_t test_vfrsqrt7_v_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f32mf2_tu(vd, vs2, vl); } -vfloat32m1_t test_vfrsqrt7_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f32m1_tu(maskedoff, op1, vl); +vfloat32m1_t test_vfrsqrt7_v_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f32m1_tu(vd, vs2, vl); } -vfloat32m2_t test_vfrsqrt7_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f32m2_tu(maskedoff, op1, vl); +vfloat32m2_t test_vfrsqrt7_v_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f32m2_tu(vd, vs2, vl); } -vfloat32m4_t test_vfrsqrt7_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f32m4_tu(maskedoff, op1, vl); +vfloat32m4_t test_vfrsqrt7_v_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f32m4_tu(vd, vs2, vl); } -vfloat32m8_t test_vfrsqrt7_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f32m8_tu(maskedoff, op1, vl); +vfloat32m8_t test_vfrsqrt7_v_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f32m8_tu(vd, vs2, vl); } -vfloat64m1_t test_vfrsqrt7_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f64m1_tu(maskedoff, op1, vl); +vfloat64m1_t test_vfrsqrt7_v_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f64m1_tu(vd, vs2, vl); } -vfloat64m2_t test_vfrsqrt7_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f64m2_tu(maskedoff, op1, vl); +vfloat64m2_t test_vfrsqrt7_v_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f64m2_tu(vd, vs2, vl); } -vfloat64m4_t test_vfrsqrt7_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f64m4_tu(maskedoff, op1, vl); +vfloat64m4_t test_vfrsqrt7_v_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f64m4_tu(vd, vs2, vl); } -vfloat64m8_t test_vfrsqrt7_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f64m8_tu(maskedoff, op1, vl); +vfloat64m8_t test_vfrsqrt7_v_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f64m8_tu(vd, vs2, vl); } -vfloat16mf4_t test_vfrsqrt7_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f16mf4_tum(mask, maskedoff, op1, vl); +vfloat16mf4_t test_vfrsqrt7_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f16mf4_tum(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfrsqrt7_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f16mf2_tum(mask, maskedoff, op1, vl); +vfloat16mf2_t test_vfrsqrt7_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f16mf2_tum(vm, vd, vs2, vl); } -vfloat16m1_t test_vfrsqrt7_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f16m1_tum(mask, maskedoff, op1, vl); +vfloat16m1_t test_vfrsqrt7_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f16m1_tum(vm, vd, vs2, vl); } -vfloat16m2_t test_vfrsqrt7_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f16m2_tum(mask, maskedoff, op1, vl); +vfloat16m2_t test_vfrsqrt7_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f16m2_tum(vm, vd, vs2, vl); } -vfloat16m4_t test_vfrsqrt7_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f16m4_tum(mask, maskedoff, op1, vl); +vfloat16m4_t test_vfrsqrt7_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f16m4_tum(vm, vd, vs2, vl); } -vfloat16m8_t test_vfrsqrt7_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f16m8_tum(mask, maskedoff, op1, vl); +vfloat16m8_t test_vfrsqrt7_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f16m8_tum(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfrsqrt7_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f32mf2_tum(mask, maskedoff, op1, vl); +vfloat32mf2_t test_vfrsqrt7_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f32mf2_tum(vm, vd, vs2, vl); } -vfloat32m1_t test_vfrsqrt7_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f32m1_tum(mask, maskedoff, op1, vl); +vfloat32m1_t test_vfrsqrt7_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f32m1_tum(vm, vd, vs2, vl); } -vfloat32m2_t test_vfrsqrt7_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f32m2_tum(mask, maskedoff, op1, vl); +vfloat32m2_t test_vfrsqrt7_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f32m2_tum(vm, vd, vs2, vl); } -vfloat32m4_t test_vfrsqrt7_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f32m4_tum(mask, maskedoff, op1, vl); +vfloat32m4_t test_vfrsqrt7_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f32m4_tum(vm, vd, vs2, vl); } -vfloat32m8_t test_vfrsqrt7_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f32m8_tum(mask, maskedoff, op1, vl); +vfloat32m8_t test_vfrsqrt7_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f32m8_tum(vm, vd, vs2, vl); } -vfloat64m1_t test_vfrsqrt7_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f64m1_tum(mask, maskedoff, op1, vl); +vfloat64m1_t test_vfrsqrt7_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f64m1_tum(vm, vd, vs2, vl); } -vfloat64m2_t test_vfrsqrt7_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f64m2_tum(mask, maskedoff, op1, vl); +vfloat64m2_t test_vfrsqrt7_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f64m2_tum(vm, vd, vs2, vl); } -vfloat64m4_t test_vfrsqrt7_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f64m4_tum(mask, maskedoff, op1, vl); +vfloat64m4_t test_vfrsqrt7_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f64m4_tum(vm, vd, vs2, vl); } -vfloat64m8_t test_vfrsqrt7_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f64m8_tum(mask, maskedoff, op1, vl); +vfloat64m8_t test_vfrsqrt7_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f64m8_tum(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfrsqrt7_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f16mf4_tumu(mask, maskedoff, op1, vl); +vfloat16mf4_t test_vfrsqrt7_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f16mf4_tumu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfrsqrt7_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f16mf2_tumu(mask, maskedoff, op1, vl); +vfloat16mf2_t test_vfrsqrt7_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f16mf2_tumu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfrsqrt7_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f16m1_tumu(mask, maskedoff, op1, vl); +vfloat16m1_t test_vfrsqrt7_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f16m1_tumu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfrsqrt7_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f16m2_tumu(mask, maskedoff, op1, vl); +vfloat16m2_t test_vfrsqrt7_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f16m2_tumu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfrsqrt7_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f16m4_tumu(mask, maskedoff, op1, vl); +vfloat16m4_t test_vfrsqrt7_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f16m4_tumu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfrsqrt7_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f16m8_tumu(mask, maskedoff, op1, vl); +vfloat16m8_t test_vfrsqrt7_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f16m8_tumu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfrsqrt7_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f32mf2_tumu(mask, maskedoff, op1, vl); +vfloat32mf2_t test_vfrsqrt7_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f32mf2_tumu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfrsqrt7_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f32m1_tumu(mask, maskedoff, op1, vl); +vfloat32m1_t test_vfrsqrt7_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f32m1_tumu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfrsqrt7_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f32m2_tumu(mask, maskedoff, op1, vl); +vfloat32m2_t test_vfrsqrt7_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f32m2_tumu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfrsqrt7_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f32m4_tumu(mask, maskedoff, op1, vl); +vfloat32m4_t test_vfrsqrt7_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f32m4_tumu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfrsqrt7_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f32m8_tumu(mask, maskedoff, op1, vl); +vfloat32m8_t test_vfrsqrt7_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f32m8_tumu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfrsqrt7_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f64m1_tumu(mask, maskedoff, op1, vl); +vfloat64m1_t test_vfrsqrt7_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f64m1_tumu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfrsqrt7_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f64m2_tumu(mask, maskedoff, op1, vl); +vfloat64m2_t test_vfrsqrt7_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f64m2_tumu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfrsqrt7_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f64m4_tumu(mask, maskedoff, op1, vl); +vfloat64m4_t test_vfrsqrt7_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f64m4_tumu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfrsqrt7_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f64m8_tumu(mask, maskedoff, op1, vl); +vfloat64m8_t test_vfrsqrt7_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f64m8_tumu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfrsqrt7_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f16mf4_mu(mask, maskedoff, op1, vl); +vfloat16mf4_t test_vfrsqrt7_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f16mf4_mu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfrsqrt7_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f16mf2_mu(mask, maskedoff, op1, vl); +vfloat16mf2_t test_vfrsqrt7_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f16mf2_mu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfrsqrt7_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f16m1_mu(mask, maskedoff, op1, vl); +vfloat16m1_t test_vfrsqrt7_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f16m1_mu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfrsqrt7_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f16m2_mu(mask, maskedoff, op1, vl); +vfloat16m2_t test_vfrsqrt7_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f16m2_mu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfrsqrt7_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f16m4_mu(mask, maskedoff, op1, vl); +vfloat16m4_t test_vfrsqrt7_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f16m4_mu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfrsqrt7_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f16m8_mu(mask, maskedoff, op1, vl); +vfloat16m8_t test_vfrsqrt7_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f16m8_mu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfrsqrt7_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f32mf2_mu(mask, maskedoff, op1, vl); +vfloat32mf2_t test_vfrsqrt7_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f32mf2_mu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfrsqrt7_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f32m1_mu(mask, maskedoff, op1, vl); +vfloat32m1_t test_vfrsqrt7_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f32m1_mu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfrsqrt7_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f32m2_mu(mask, maskedoff, op1, vl); +vfloat32m2_t test_vfrsqrt7_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f32m2_mu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfrsqrt7_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f32m4_mu(mask, maskedoff, op1, vl); +vfloat32m4_t test_vfrsqrt7_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f32m4_mu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfrsqrt7_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f32m8_mu(mask, maskedoff, op1, vl); +vfloat32m8_t test_vfrsqrt7_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f32m8_mu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfrsqrt7_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f64m1_mu(mask, maskedoff, op1, vl); +vfloat64m1_t test_vfrsqrt7_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f64m1_mu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfrsqrt7_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f64m2_mu(mask, maskedoff, op1, vl); +vfloat64m2_t test_vfrsqrt7_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f64m2_mu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfrsqrt7_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f64m4_mu(mask, maskedoff, op1, vl); +vfloat64m4_t test_vfrsqrt7_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f64m4_mu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfrsqrt7_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfrsqrt7_v_f64m8_mu(mask, maskedoff, op1, vl); +vfloat64m8_t test_vfrsqrt7_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfrsqrt7_v_f64m8_mu(vm, vd, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfrsqrt7\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfrsub.c b/auto-generated/policy_funcs/gnu-api-tests/vfrsub.c index b5977dd87..807888790 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfrsub.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfrsub.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfrsub_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16mf4_tu(maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfrsub_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16mf4_tu(vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfrsub_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16mf2_tu(maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfrsub_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16mf2_tu(vd, vs2, rs1, vl); } -vfloat16m1_t test_vfrsub_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m1_tu(maskedoff, op1, op2, vl); +vfloat16m1_t test_vfrsub_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m1_tu(vd, vs2, rs1, vl); } -vfloat16m2_t test_vfrsub_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m2_tu(maskedoff, op1, op2, vl); +vfloat16m2_t test_vfrsub_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m2_tu(vd, vs2, rs1, vl); } -vfloat16m4_t test_vfrsub_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m4_tu(maskedoff, op1, op2, vl); +vfloat16m4_t test_vfrsub_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m4_tu(vd, vs2, rs1, vl); } -vfloat16m8_t test_vfrsub_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m8_tu(maskedoff, op1, op2, vl); +vfloat16m8_t test_vfrsub_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m8_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfrsub_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32mf2_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfrsub_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32mf2_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfrsub_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m1_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfrsub_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m1_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfrsub_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m2_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfrsub_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m2_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfrsub_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m4_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfrsub_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m4_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfrsub_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m8_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfrsub_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m8_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfrsub_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m1_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfrsub_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m1_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfrsub_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m2_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfrsub_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m2_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfrsub_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m4_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfrsub_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m4_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfrsub_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m8_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfrsub_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m8_tu(vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfrsub_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16mf4_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfrsub_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16mf4_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfrsub_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfrsub_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfrsub_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m1_tum(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfrsub_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m1_tum(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfrsub_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m2_tum(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfrsub_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfrsub_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m4_tum(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfrsub_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m4_tum(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfrsub_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m8_tum(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfrsub_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m8_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfrsub_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfrsub_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfrsub_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m1_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfrsub_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m1_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfrsub_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m2_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfrsub_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfrsub_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m4_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfrsub_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m4_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfrsub_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m8_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfrsub_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m8_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfrsub_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m1_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfrsub_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m1_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfrsub_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m2_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfrsub_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m2_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfrsub_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m4_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfrsub_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m4_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfrsub_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m8_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfrsub_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m8_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfrsub_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16mf4_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfrsub_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16mf4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfrsub_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfrsub_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfrsub_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfrsub_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfrsub_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfrsub_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfrsub_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfrsub_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfrsub_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfrsub_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfrsub_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfrsub_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfrsub_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfrsub_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfrsub_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfrsub_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfrsub_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfrsub_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfrsub_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfrsub_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfrsub_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfrsub_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfrsub_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfrsub_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfrsub_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfrsub_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfrsub_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfrsub_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfrsub_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16mf4_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfrsub_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16mf4_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfrsub_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfrsub_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfrsub_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m1_mu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfrsub_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m1_mu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfrsub_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m2_mu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfrsub_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfrsub_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m4_mu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfrsub_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m4_mu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfrsub_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m8_mu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfrsub_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m8_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfrsub_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfrsub_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfrsub_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m1_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfrsub_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m1_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfrsub_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m2_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfrsub_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfrsub_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m4_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfrsub_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m4_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfrsub_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m8_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfrsub_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m8_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfrsub_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m1_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfrsub_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m1_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfrsub_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m2_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfrsub_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m2_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfrsub_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m4_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfrsub_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m4_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfrsub_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m8_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfrsub_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m8_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfrsub_vf_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16mf4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfrsub_vf_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16mf4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfrsub_vf_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16mf2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfrsub_vf_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfrsub_vf_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfrsub_vf_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrsub_vf_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfrsub_vf_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrsub_vf_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfrsub_vf_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrsub_vf_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfrsub_vf_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrsub_vf_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32mf2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfrsub_vf_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfrsub_vf_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfrsub_vf_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrsub_vf_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfrsub_vf_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrsub_vf_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfrsub_vf_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrsub_vf_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfrsub_vf_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrsub_vf_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfrsub_vf_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrsub_vf_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfrsub_vf_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrsub_vf_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfrsub_vf_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrsub_vf_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfrsub_vf_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfrsub_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16mf4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfrsub_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16mf4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfrsub_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16mf2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfrsub_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16mf2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfrsub_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfrsub_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrsub_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfrsub_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrsub_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfrsub_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrsub_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfrsub_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrsub_vf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32mf2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfrsub_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32mf2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfrsub_vf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfrsub_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrsub_vf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfrsub_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrsub_vf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfrsub_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrsub_vf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfrsub_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrsub_vf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfrsub_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrsub_vf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfrsub_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrsub_vf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfrsub_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrsub_vf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfrsub_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfrsub_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16mf4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfrsub_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16mf4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfrsub_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16mf2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfrsub_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16mf2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfrsub_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfrsub_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrsub_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfrsub_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrsub_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfrsub_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrsub_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfrsub_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrsub_vf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32mf2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfrsub_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32mf2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfrsub_vf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfrsub_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrsub_vf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfrsub_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrsub_vf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfrsub_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrsub_vf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfrsub_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrsub_vf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfrsub_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrsub_vf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfrsub_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrsub_vf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfrsub_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrsub_vf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfrsub_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfrsub_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16mf4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfrsub_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16mf4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfrsub_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16mf2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfrsub_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16mf2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfrsub_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfrsub_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrsub_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfrsub_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrsub_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfrsub_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrsub_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_vf_f16m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfrsub_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f16m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrsub_vf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32mf2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfrsub_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32mf2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfrsub_vf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfrsub_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrsub_vf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfrsub_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrsub_vf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfrsub_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrsub_vf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_vf_f32m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfrsub_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrsub_vf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfrsub_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrsub_vf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfrsub_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrsub_vf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfrsub_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrsub_vf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_vf_f64m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfrsub_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_vf_f64m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfrsub\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfsgnj.c b/auto-generated/policy_funcs/gnu-api-tests/vfsgnj.c index 40912ac1e..d9eec1cf9 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfsgnj.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfsgnj.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfsgnj_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f16mf4_tu(maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnj_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f16mf4_tu(vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnj_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f16mf4_tu(maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnj_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f16mf4_tu(vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnj_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f16mf2_tu(maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnj_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f16mf2_tu(vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnj_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f16mf2_tu(maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnj_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f16mf2_tu(vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsgnj_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f16m1_tu(maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnj_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsgnj_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f16m1_tu(maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnj_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f16m1_tu(vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsgnj_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f16m2_tu(maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnj_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f16m2_tu(vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsgnj_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f16m2_tu(maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnj_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f16m2_tu(vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsgnj_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f16m4_tu(maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnj_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f16m4_tu(vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsgnj_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f16m4_tu(maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnj_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f16m4_tu(vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsgnj_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f16m8_tu(maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnj_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f16m8_tu(vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsgnj_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f16m8_tu(maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnj_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f16m8_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnj_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f32mf2_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnj_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f32mf2_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnj_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f32mf2_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnj_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f32mf2_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsgnj_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f32m1_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnj_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsgnj_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f32m1_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnj_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f32m1_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsgnj_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f32m2_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnj_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f32m2_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsgnj_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f32m2_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnj_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f32m2_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsgnj_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f32m4_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnj_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f32m4_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsgnj_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f32m4_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnj_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f32m4_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsgnj_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f32m8_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnj_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f32m8_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsgnj_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f32m8_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnj_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f32m8_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsgnj_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f64m1_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnj_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsgnj_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f64m1_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnj_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f64m1_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsgnj_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f64m2_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnj_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f64m2_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsgnj_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f64m2_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnj_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f64m2_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsgnj_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f64m4_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnj_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f64m4_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsgnj_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f64m4_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnj_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f64m4_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsgnj_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f64m8_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnj_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f64m8_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsgnj_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f64m8_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnj_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f64m8_tu(vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfsgnj_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f16mf4_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnj_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f16mf4_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnj_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f16mf4_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnj_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f16mf4_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnj_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f16mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnj_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f16mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnj_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f16mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnj_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f16mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsgnj_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f16m1_tum(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnj_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsgnj_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f16m1_tum(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnj_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f16m1_tum(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsgnj_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f16m2_tum(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnj_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f16m2_tum(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsgnj_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f16m2_tum(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnj_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f16m2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsgnj_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f16m4_tum(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnj_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f16m4_tum(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsgnj_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f16m4_tum(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnj_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f16m4_tum(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsgnj_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f16m8_tum(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnj_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f16m8_tum(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsgnj_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f16m8_tum(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnj_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f16m8_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnj_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f32mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnj_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f32mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnj_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f32mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnj_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f32mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsgnj_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f32m1_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnj_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsgnj_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f32m1_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnj_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f32m1_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsgnj_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f32m2_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnj_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f32m2_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsgnj_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f32m2_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnj_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f32m2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsgnj_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f32m4_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnj_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f32m4_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsgnj_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f32m4_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnj_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f32m4_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsgnj_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f32m8_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnj_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f32m8_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsgnj_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f32m8_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnj_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f32m8_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsgnj_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f64m1_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnj_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsgnj_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f64m1_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnj_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f64m1_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsgnj_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f64m2_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnj_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f64m2_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsgnj_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f64m2_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnj_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f64m2_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsgnj_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f64m4_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnj_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f64m4_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsgnj_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f64m4_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnj_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f64m4_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsgnj_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f64m8_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnj_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f64m8_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsgnj_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f64m8_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnj_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f64m8_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfsgnj_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f16mf4_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnj_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f16mf4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnj_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f16mf4_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnj_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f16mf4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnj_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f16mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnj_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f16mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnj_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f16mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnj_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f16mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsgnj_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f16m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnj_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f16m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsgnj_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f16m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnj_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f16m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsgnj_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f16m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnj_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f16m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsgnj_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f16m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnj_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f16m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsgnj_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f16m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnj_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f16m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsgnj_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f16m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnj_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f16m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsgnj_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f16m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnj_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f16m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsgnj_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f16m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnj_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f16m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnj_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f32mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnj_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f32mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnj_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnj_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f32mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsgnj_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f32m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnj_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f32m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsgnj_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f32m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnj_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f32m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsgnj_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f32m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnj_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f32m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsgnj_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f32m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnj_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f32m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsgnj_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f32m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnj_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f32m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsgnj_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f32m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnj_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f32m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsgnj_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f32m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnj_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f32m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsgnj_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f32m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnj_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f32m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsgnj_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f64m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnj_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f64m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsgnj_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f64m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnj_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f64m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsgnj_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f64m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnj_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f64m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsgnj_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f64m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnj_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f64m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsgnj_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f64m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnj_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f64m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsgnj_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f64m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnj_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f64m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsgnj_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f64m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnj_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f64m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsgnj_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f64m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnj_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f64m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfsgnj_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f16mf4_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnj_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f16mf4_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnj_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f16mf4_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnj_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f16mf4_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnj_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f16mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnj_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f16mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnj_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f16mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnj_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f16mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsgnj_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f16m1_mu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnj_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f16m1_mu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsgnj_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f16m1_mu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnj_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f16m1_mu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsgnj_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f16m2_mu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnj_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f16m2_mu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsgnj_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f16m2_mu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnj_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f16m2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsgnj_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f16m4_mu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnj_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f16m4_mu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsgnj_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f16m4_mu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnj_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f16m4_mu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsgnj_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f16m8_mu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnj_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f16m8_mu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsgnj_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f16m8_mu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnj_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f16m8_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnj_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f32mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnj_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f32mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnj_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f32mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnj_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f32mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsgnj_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f32m1_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnj_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f32m1_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsgnj_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f32m1_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnj_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f32m1_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsgnj_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f32m2_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnj_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f32m2_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsgnj_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f32m2_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnj_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f32m2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsgnj_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f32m4_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnj_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f32m4_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsgnj_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f32m4_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnj_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f32m4_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsgnj_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f32m8_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnj_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f32m8_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsgnj_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f32m8_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnj_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f32m8_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsgnj_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f64m1_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnj_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f64m1_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsgnj_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f64m1_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnj_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f64m1_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsgnj_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f64m2_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnj_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f64m2_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsgnj_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f64m2_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnj_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f64m2_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsgnj_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f64m4_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnj_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f64m4_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsgnj_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f64m4_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnj_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f64m4_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsgnj_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f64m8_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnj_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsgnj_vv_f64m8_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsgnj_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj_vf_f64m8_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnj_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj_vf_f64m8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfsgnj\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfsgnjn.c b/auto-generated/policy_funcs/gnu-api-tests/vfsgnjn.c index 1d2712b59..d6dbaea7e 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfsgnjn.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfsgnjn.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfsgnjn_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f16mf4_tu(maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnjn_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f16mf4_tu(vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnjn_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f16mf4_tu(maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnjn_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f16mf4_tu(vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnjn_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f16mf2_tu(maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnjn_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f16mf2_tu(vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnjn_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f16mf2_tu(maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnjn_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f16mf2_tu(vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsgnjn_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f16m1_tu(maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnjn_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsgnjn_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f16m1_tu(maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnjn_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f16m1_tu(vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsgnjn_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f16m2_tu(maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnjn_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f16m2_tu(vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsgnjn_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f16m2_tu(maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnjn_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f16m2_tu(vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsgnjn_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f16m4_tu(maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnjn_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f16m4_tu(vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsgnjn_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f16m4_tu(maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnjn_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f16m4_tu(vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsgnjn_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f16m8_tu(maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnjn_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f16m8_tu(vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsgnjn_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f16m8_tu(maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnjn_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f16m8_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnjn_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f32mf2_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnjn_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f32mf2_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnjn_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f32mf2_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnjn_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f32mf2_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsgnjn_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f32m1_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnjn_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsgnjn_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f32m1_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnjn_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f32m1_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsgnjn_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f32m2_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnjn_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f32m2_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsgnjn_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f32m2_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnjn_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f32m2_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsgnjn_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f32m4_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnjn_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f32m4_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsgnjn_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f32m4_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnjn_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f32m4_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsgnjn_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f32m8_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnjn_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f32m8_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsgnjn_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f32m8_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnjn_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f32m8_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsgnjn_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f64m1_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnjn_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsgnjn_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f64m1_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnjn_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f64m1_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsgnjn_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f64m2_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnjn_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f64m2_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsgnjn_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f64m2_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnjn_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f64m2_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsgnjn_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f64m4_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnjn_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f64m4_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsgnjn_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f64m4_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnjn_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f64m4_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsgnjn_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f64m8_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnjn_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f64m8_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsgnjn_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f64m8_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnjn_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f64m8_tu(vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfsgnjn_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f16mf4_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnjn_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f16mf4_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnjn_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f16mf4_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnjn_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f16mf4_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnjn_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f16mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnjn_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f16mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnjn_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f16mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnjn_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f16mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsgnjn_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f16m1_tum(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnjn_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsgnjn_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f16m1_tum(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnjn_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f16m1_tum(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsgnjn_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f16m2_tum(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnjn_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f16m2_tum(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsgnjn_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f16m2_tum(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnjn_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f16m2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsgnjn_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f16m4_tum(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnjn_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f16m4_tum(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsgnjn_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f16m4_tum(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnjn_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f16m4_tum(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsgnjn_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f16m8_tum(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnjn_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f16m8_tum(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsgnjn_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f16m8_tum(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnjn_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f16m8_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnjn_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f32mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnjn_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f32mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnjn_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f32mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnjn_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f32mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsgnjn_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f32m1_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnjn_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsgnjn_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f32m1_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnjn_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f32m1_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsgnjn_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f32m2_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnjn_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f32m2_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsgnjn_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f32m2_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnjn_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f32m2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsgnjn_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f32m4_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnjn_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f32m4_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsgnjn_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f32m4_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnjn_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f32m4_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsgnjn_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f32m8_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnjn_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f32m8_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsgnjn_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f32m8_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnjn_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f32m8_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsgnjn_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f64m1_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnjn_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsgnjn_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f64m1_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnjn_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f64m1_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsgnjn_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f64m2_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnjn_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f64m2_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsgnjn_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f64m2_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnjn_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f64m2_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsgnjn_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f64m4_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnjn_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f64m4_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsgnjn_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f64m4_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnjn_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f64m4_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsgnjn_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f64m8_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnjn_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f64m8_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsgnjn_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f64m8_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnjn_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f64m8_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfsgnjn_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f16mf4_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnjn_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f16mf4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnjn_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f16mf4_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnjn_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f16mf4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnjn_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f16mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnjn_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f16mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnjn_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f16mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnjn_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f16mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsgnjn_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f16m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnjn_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f16m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsgnjn_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f16m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnjn_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f16m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsgnjn_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f16m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnjn_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f16m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsgnjn_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f16m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnjn_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f16m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsgnjn_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f16m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnjn_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f16m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsgnjn_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f16m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnjn_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f16m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsgnjn_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f16m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnjn_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f16m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsgnjn_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f16m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnjn_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f16m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnjn_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f32mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnjn_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f32mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnjn_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnjn_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f32mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsgnjn_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f32m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnjn_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f32m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsgnjn_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f32m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnjn_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f32m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsgnjn_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f32m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnjn_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f32m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsgnjn_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f32m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnjn_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f32m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsgnjn_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f32m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnjn_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f32m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsgnjn_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f32m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnjn_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f32m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsgnjn_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f32m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnjn_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f32m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsgnjn_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f32m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnjn_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f32m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsgnjn_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f64m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnjn_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f64m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsgnjn_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f64m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnjn_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f64m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsgnjn_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f64m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnjn_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f64m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsgnjn_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f64m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnjn_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f64m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsgnjn_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f64m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnjn_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f64m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsgnjn_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f64m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnjn_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f64m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsgnjn_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f64m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnjn_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f64m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsgnjn_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f64m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnjn_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f64m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfsgnjn_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f16mf4_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnjn_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f16mf4_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnjn_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f16mf4_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnjn_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f16mf4_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnjn_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f16mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnjn_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f16mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnjn_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f16mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnjn_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f16mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsgnjn_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f16m1_mu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnjn_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f16m1_mu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsgnjn_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f16m1_mu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnjn_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f16m1_mu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsgnjn_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f16m2_mu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnjn_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f16m2_mu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsgnjn_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f16m2_mu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnjn_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f16m2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsgnjn_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f16m4_mu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnjn_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f16m4_mu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsgnjn_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f16m4_mu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnjn_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f16m4_mu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsgnjn_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f16m8_mu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnjn_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f16m8_mu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsgnjn_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f16m8_mu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnjn_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f16m8_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnjn_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f32mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnjn_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f32mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnjn_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f32mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnjn_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f32mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsgnjn_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f32m1_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnjn_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f32m1_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsgnjn_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f32m1_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnjn_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f32m1_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsgnjn_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f32m2_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnjn_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f32m2_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsgnjn_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f32m2_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnjn_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f32m2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsgnjn_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f32m4_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnjn_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f32m4_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsgnjn_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f32m4_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnjn_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f32m4_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsgnjn_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f32m8_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnjn_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f32m8_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsgnjn_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f32m8_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnjn_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f32m8_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsgnjn_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f64m1_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnjn_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f64m1_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsgnjn_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f64m1_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnjn_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f64m1_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsgnjn_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f64m2_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnjn_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f64m2_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsgnjn_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f64m2_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnjn_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f64m2_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsgnjn_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f64m4_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnjn_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f64m4_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsgnjn_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f64m4_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnjn_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f64m4_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsgnjn_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f64m8_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnjn_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsgnjn_vv_f64m8_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsgnjn_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn_vf_f64m8_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnjn_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn_vf_f64m8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfsgnjn\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfsgnjx.c b/auto-generated/policy_funcs/gnu-api-tests/vfsgnjx.c index 09c4deda1..a6e2edfb5 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfsgnjx.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfsgnjx.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfsgnjx_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f16mf4_tu(maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnjx_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f16mf4_tu(vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnjx_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f16mf4_tu(maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnjx_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f16mf4_tu(vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnjx_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f16mf2_tu(maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnjx_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f16mf2_tu(vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnjx_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f16mf2_tu(maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnjx_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f16mf2_tu(vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsgnjx_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f16m1_tu(maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnjx_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsgnjx_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f16m1_tu(maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnjx_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f16m1_tu(vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsgnjx_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f16m2_tu(maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnjx_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f16m2_tu(vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsgnjx_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f16m2_tu(maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnjx_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f16m2_tu(vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsgnjx_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f16m4_tu(maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnjx_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f16m4_tu(vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsgnjx_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f16m4_tu(maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnjx_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f16m4_tu(vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsgnjx_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f16m8_tu(maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnjx_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f16m8_tu(vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsgnjx_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f16m8_tu(maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnjx_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f16m8_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnjx_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f32mf2_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnjx_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f32mf2_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnjx_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f32mf2_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnjx_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f32mf2_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsgnjx_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f32m1_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnjx_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsgnjx_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f32m1_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnjx_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f32m1_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsgnjx_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f32m2_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnjx_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f32m2_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsgnjx_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f32m2_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnjx_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f32m2_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsgnjx_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f32m4_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnjx_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f32m4_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsgnjx_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f32m4_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnjx_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f32m4_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsgnjx_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f32m8_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnjx_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f32m8_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsgnjx_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f32m8_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnjx_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f32m8_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsgnjx_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f64m1_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnjx_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsgnjx_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f64m1_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnjx_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f64m1_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsgnjx_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f64m2_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnjx_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f64m2_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsgnjx_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f64m2_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnjx_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f64m2_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsgnjx_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f64m4_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnjx_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f64m4_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsgnjx_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f64m4_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnjx_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f64m4_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsgnjx_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f64m8_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnjx_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f64m8_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsgnjx_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f64m8_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnjx_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f64m8_tu(vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfsgnjx_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f16mf4_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnjx_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f16mf4_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnjx_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f16mf4_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnjx_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f16mf4_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnjx_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f16mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnjx_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f16mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnjx_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f16mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnjx_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f16mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsgnjx_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f16m1_tum(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnjx_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsgnjx_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f16m1_tum(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnjx_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f16m1_tum(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsgnjx_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f16m2_tum(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnjx_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f16m2_tum(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsgnjx_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f16m2_tum(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnjx_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f16m2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsgnjx_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f16m4_tum(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnjx_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f16m4_tum(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsgnjx_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f16m4_tum(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnjx_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f16m4_tum(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsgnjx_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f16m8_tum(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnjx_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f16m8_tum(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsgnjx_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f16m8_tum(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnjx_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f16m8_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnjx_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f32mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnjx_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f32mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnjx_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f32mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnjx_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f32mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsgnjx_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f32m1_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnjx_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsgnjx_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f32m1_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnjx_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f32m1_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsgnjx_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f32m2_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnjx_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f32m2_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsgnjx_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f32m2_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnjx_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f32m2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsgnjx_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f32m4_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnjx_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f32m4_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsgnjx_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f32m4_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnjx_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f32m4_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsgnjx_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f32m8_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnjx_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f32m8_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsgnjx_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f32m8_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnjx_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f32m8_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsgnjx_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f64m1_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnjx_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsgnjx_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f64m1_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnjx_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f64m1_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsgnjx_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f64m2_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnjx_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f64m2_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsgnjx_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f64m2_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnjx_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f64m2_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsgnjx_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f64m4_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnjx_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f64m4_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsgnjx_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f64m4_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnjx_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f64m4_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsgnjx_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f64m8_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnjx_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f64m8_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsgnjx_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f64m8_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnjx_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f64m8_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfsgnjx_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f16mf4_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnjx_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f16mf4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnjx_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f16mf4_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnjx_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f16mf4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnjx_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f16mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnjx_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f16mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnjx_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f16mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnjx_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f16mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsgnjx_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f16m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnjx_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f16m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsgnjx_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f16m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnjx_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f16m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsgnjx_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f16m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnjx_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f16m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsgnjx_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f16m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnjx_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f16m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsgnjx_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f16m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnjx_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f16m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsgnjx_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f16m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnjx_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f16m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsgnjx_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f16m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnjx_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f16m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsgnjx_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f16m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnjx_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f16m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnjx_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f32mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnjx_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f32mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnjx_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnjx_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f32mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsgnjx_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f32m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnjx_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f32m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsgnjx_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f32m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnjx_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f32m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsgnjx_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f32m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnjx_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f32m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsgnjx_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f32m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnjx_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f32m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsgnjx_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f32m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnjx_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f32m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsgnjx_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f32m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnjx_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f32m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsgnjx_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f32m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnjx_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f32m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsgnjx_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f32m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnjx_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f32m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsgnjx_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f64m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnjx_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f64m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsgnjx_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f64m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnjx_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f64m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsgnjx_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f64m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnjx_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f64m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsgnjx_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f64m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnjx_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f64m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsgnjx_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f64m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnjx_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f64m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsgnjx_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f64m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnjx_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f64m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsgnjx_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f64m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnjx_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f64m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsgnjx_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f64m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnjx_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f64m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfsgnjx_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f16mf4_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnjx_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f16mf4_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnjx_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f16mf4_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnjx_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f16mf4_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnjx_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f16mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnjx_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f16mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnjx_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f16mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnjx_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f16mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsgnjx_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f16m1_mu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnjx_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f16m1_mu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsgnjx_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f16m1_mu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnjx_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f16m1_mu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsgnjx_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f16m2_mu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnjx_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f16m2_mu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsgnjx_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f16m2_mu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnjx_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f16m2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsgnjx_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f16m4_mu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnjx_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f16m4_mu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsgnjx_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f16m4_mu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnjx_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f16m4_mu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsgnjx_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f16m8_mu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnjx_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f16m8_mu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsgnjx_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f16m8_mu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnjx_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f16m8_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnjx_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f32mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnjx_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f32mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnjx_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f32mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnjx_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f32mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsgnjx_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f32m1_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnjx_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f32m1_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsgnjx_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f32m1_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnjx_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f32m1_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsgnjx_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f32m2_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnjx_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f32m2_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsgnjx_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f32m2_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnjx_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f32m2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsgnjx_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f32m4_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnjx_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f32m4_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsgnjx_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f32m4_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnjx_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f32m4_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsgnjx_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f32m8_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnjx_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f32m8_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsgnjx_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f32m8_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnjx_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f32m8_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsgnjx_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f64m1_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnjx_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f64m1_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsgnjx_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f64m1_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnjx_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f64m1_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsgnjx_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f64m2_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnjx_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f64m2_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsgnjx_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f64m2_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnjx_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f64m2_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsgnjx_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f64m4_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnjx_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f64m4_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsgnjx_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f64m4_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnjx_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f64m4_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsgnjx_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f64m8_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnjx_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsgnjx_vv_f64m8_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsgnjx_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx_vf_f64m8_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnjx_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx_vf_f64m8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfsgnjx\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfslide1down.c b/auto-generated/policy_funcs/gnu-api-tests/vfslide1down.c index 41eb1e4d6..b91acf85d 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfslide1down.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfslide1down.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfslide1down_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_vf_f16mf4_tu(maskedoff, src, value, vl); +vfloat16mf4_t test_vfslide1down_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f16mf4_tu(vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfslide1down_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_vf_f16mf2_tu(maskedoff, src, value, vl); +vfloat16mf2_t test_vfslide1down_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f16mf2_tu(vd, vs2, rs1, vl); } -vfloat16m1_t test_vfslide1down_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_vf_f16m1_tu(maskedoff, src, value, vl); +vfloat16m1_t test_vfslide1down_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f16m1_tu(vd, vs2, rs1, vl); } -vfloat16m2_t test_vfslide1down_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_vf_f16m2_tu(maskedoff, src, value, vl); +vfloat16m2_t test_vfslide1down_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f16m2_tu(vd, vs2, rs1, vl); } -vfloat16m4_t test_vfslide1down_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_vf_f16m4_tu(maskedoff, src, value, vl); +vfloat16m4_t test_vfslide1down_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f16m4_tu(vd, vs2, rs1, vl); } -vfloat16m8_t test_vfslide1down_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_vf_f16m8_tu(maskedoff, src, value, vl); +vfloat16m8_t test_vfslide1down_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f16m8_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfslide1down_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_vf_f32mf2_tu(maskedoff, src, value, vl); +vfloat32mf2_t test_vfslide1down_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f32mf2_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfslide1down_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_vf_f32m1_tu(maskedoff, src, value, vl); +vfloat32m1_t test_vfslide1down_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f32m1_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfslide1down_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_vf_f32m2_tu(maskedoff, src, value, vl); +vfloat32m2_t test_vfslide1down_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f32m2_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfslide1down_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_vf_f32m4_tu(maskedoff, src, value, vl); +vfloat32m4_t test_vfslide1down_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f32m4_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfslide1down_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_vf_f32m8_tu(maskedoff, src, value, vl); +vfloat32m8_t test_vfslide1down_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f32m8_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfslide1down_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down_vf_f64m1_tu(maskedoff, src, value, vl); +vfloat64m1_t test_vfslide1down_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f64m1_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfslide1down_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down_vf_f64m2_tu(maskedoff, src, value, vl); +vfloat64m2_t test_vfslide1down_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f64m2_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfslide1down_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down_vf_f64m4_tu(maskedoff, src, value, vl); +vfloat64m4_t test_vfslide1down_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f64m4_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfslide1down_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down_vf_f64m8_tu(maskedoff, src, value, vl); +vfloat64m8_t test_vfslide1down_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f64m8_tu(vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfslide1down_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_vf_f16mf4_tum(mask, maskedoff, src, value, vl); +vfloat16mf4_t test_vfslide1down_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f16mf4_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfslide1down_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_vf_f16mf2_tum(mask, maskedoff, src, value, vl); +vfloat16mf2_t test_vfslide1down_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f16mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfslide1down_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_vf_f16m1_tum(mask, maskedoff, src, value, vl); +vfloat16m1_t test_vfslide1down_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f16m1_tum(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfslide1down_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_vf_f16m2_tum(mask, maskedoff, src, value, vl); +vfloat16m2_t test_vfslide1down_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f16m2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfslide1down_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_vf_f16m4_tum(mask, maskedoff, src, value, vl); +vfloat16m4_t test_vfslide1down_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f16m4_tum(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfslide1down_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_vf_f16m8_tum(mask, maskedoff, src, value, vl); +vfloat16m8_t test_vfslide1down_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f16m8_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfslide1down_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_vf_f32mf2_tum(mask, maskedoff, src, value, vl); +vfloat32mf2_t test_vfslide1down_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f32mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfslide1down_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_vf_f32m1_tum(mask, maskedoff, src, value, vl); +vfloat32m1_t test_vfslide1down_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f32m1_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfslide1down_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_vf_f32m2_tum(mask, maskedoff, src, value, vl); +vfloat32m2_t test_vfslide1down_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f32m2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfslide1down_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_vf_f32m4_tum(mask, maskedoff, src, value, vl); +vfloat32m4_t test_vfslide1down_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f32m4_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfslide1down_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_vf_f32m8_tum(mask, maskedoff, src, value, vl); +vfloat32m8_t test_vfslide1down_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f32m8_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfslide1down_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down_vf_f64m1_tum(mask, maskedoff, src, value, vl); +vfloat64m1_t test_vfslide1down_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f64m1_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfslide1down_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down_vf_f64m2_tum(mask, maskedoff, src, value, vl); +vfloat64m2_t test_vfslide1down_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f64m2_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfslide1down_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down_vf_f64m4_tum(mask, maskedoff, src, value, vl); +vfloat64m4_t test_vfslide1down_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f64m4_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfslide1down_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down_vf_f64m8_tum(mask, maskedoff, src, value, vl); +vfloat64m8_t test_vfslide1down_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f64m8_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfslide1down_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_vf_f16mf4_tumu(mask, maskedoff, src, value, vl); +vfloat16mf4_t test_vfslide1down_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f16mf4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfslide1down_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_vf_f16mf2_tumu(mask, maskedoff, src, value, vl); +vfloat16mf2_t test_vfslide1down_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f16mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfslide1down_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_vf_f16m1_tumu(mask, maskedoff, src, value, vl); +vfloat16m1_t test_vfslide1down_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f16m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfslide1down_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_vf_f16m2_tumu(mask, maskedoff, src, value, vl); +vfloat16m2_t test_vfslide1down_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f16m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfslide1down_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_vf_f16m4_tumu(mask, maskedoff, src, value, vl); +vfloat16m4_t test_vfslide1down_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f16m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfslide1down_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_vf_f16m8_tumu(mask, maskedoff, src, value, vl); +vfloat16m8_t test_vfslide1down_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f16m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfslide1down_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_vf_f32mf2_tumu(mask, maskedoff, src, value, vl); +vfloat32mf2_t test_vfslide1down_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f32mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfslide1down_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_vf_f32m1_tumu(mask, maskedoff, src, value, vl); +vfloat32m1_t test_vfslide1down_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f32m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfslide1down_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_vf_f32m2_tumu(mask, maskedoff, src, value, vl); +vfloat32m2_t test_vfslide1down_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f32m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfslide1down_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_vf_f32m4_tumu(mask, maskedoff, src, value, vl); +vfloat32m4_t test_vfslide1down_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f32m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfslide1down_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_vf_f32m8_tumu(mask, maskedoff, src, value, vl); +vfloat32m8_t test_vfslide1down_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f32m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfslide1down_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down_vf_f64m1_tumu(mask, maskedoff, src, value, vl); +vfloat64m1_t test_vfslide1down_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f64m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfslide1down_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down_vf_f64m2_tumu(mask, maskedoff, src, value, vl); +vfloat64m2_t test_vfslide1down_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f64m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfslide1down_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down_vf_f64m4_tumu(mask, maskedoff, src, value, vl); +vfloat64m4_t test_vfslide1down_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f64m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfslide1down_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down_vf_f64m8_tumu(mask, maskedoff, src, value, vl); +vfloat64m8_t test_vfslide1down_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f64m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfslide1down_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_vf_f16mf4_mu(mask, maskedoff, src, value, vl); +vfloat16mf4_t test_vfslide1down_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f16mf4_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfslide1down_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_vf_f16mf2_mu(mask, maskedoff, src, value, vl); +vfloat16mf2_t test_vfslide1down_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f16mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfslide1down_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_vf_f16m1_mu(mask, maskedoff, src, value, vl); +vfloat16m1_t test_vfslide1down_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f16m1_mu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfslide1down_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_vf_f16m2_mu(mask, maskedoff, src, value, vl); +vfloat16m2_t test_vfslide1down_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f16m2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfslide1down_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_vf_f16m4_mu(mask, maskedoff, src, value, vl); +vfloat16m4_t test_vfslide1down_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f16m4_mu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfslide1down_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_vf_f16m8_mu(mask, maskedoff, src, value, vl); +vfloat16m8_t test_vfslide1down_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f16m8_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfslide1down_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_vf_f32mf2_mu(mask, maskedoff, src, value, vl); +vfloat32mf2_t test_vfslide1down_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f32mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfslide1down_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_vf_f32m1_mu(mask, maskedoff, src, value, vl); +vfloat32m1_t test_vfslide1down_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f32m1_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfslide1down_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_vf_f32m2_mu(mask, maskedoff, src, value, vl); +vfloat32m2_t test_vfslide1down_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f32m2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfslide1down_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_vf_f32m4_mu(mask, maskedoff, src, value, vl); +vfloat32m4_t test_vfslide1down_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f32m4_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfslide1down_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_vf_f32m8_mu(mask, maskedoff, src, value, vl); +vfloat32m8_t test_vfslide1down_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f32m8_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfslide1down_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down_vf_f64m1_mu(mask, maskedoff, src, value, vl); +vfloat64m1_t test_vfslide1down_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f64m1_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfslide1down_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down_vf_f64m2_mu(mask, maskedoff, src, value, vl); +vfloat64m2_t test_vfslide1down_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f64m2_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfslide1down_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down_vf_f64m4_mu(mask, maskedoff, src, value, vl); +vfloat64m4_t test_vfslide1down_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f64m4_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfslide1down_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down_vf_f64m8_mu(mask, maskedoff, src, value, vl); +vfloat64m8_t test_vfslide1down_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down_vf_f64m8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfslide1down\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfslide1up.c b/auto-generated/policy_funcs/gnu-api-tests/vfslide1up.c index 273b5d386..41f52ba3e 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfslide1up.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfslide1up.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfslide1up_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_vf_f16mf4_tu(maskedoff, src, value, vl); +vfloat16mf4_t test_vfslide1up_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f16mf4_tu(vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfslide1up_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_vf_f16mf2_tu(maskedoff, src, value, vl); +vfloat16mf2_t test_vfslide1up_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f16mf2_tu(vd, vs2, rs1, vl); } -vfloat16m1_t test_vfslide1up_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_vf_f16m1_tu(maskedoff, src, value, vl); +vfloat16m1_t test_vfslide1up_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f16m1_tu(vd, vs2, rs1, vl); } -vfloat16m2_t test_vfslide1up_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_vf_f16m2_tu(maskedoff, src, value, vl); +vfloat16m2_t test_vfslide1up_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f16m2_tu(vd, vs2, rs1, vl); } -vfloat16m4_t test_vfslide1up_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_vf_f16m4_tu(maskedoff, src, value, vl); +vfloat16m4_t test_vfslide1up_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f16m4_tu(vd, vs2, rs1, vl); } -vfloat16m8_t test_vfslide1up_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_vf_f16m8_tu(maskedoff, src, value, vl); +vfloat16m8_t test_vfslide1up_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f16m8_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfslide1up_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_vf_f32mf2_tu(maskedoff, src, value, vl); +vfloat32mf2_t test_vfslide1up_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f32mf2_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfslide1up_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_vf_f32m1_tu(maskedoff, src, value, vl); +vfloat32m1_t test_vfslide1up_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f32m1_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfslide1up_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_vf_f32m2_tu(maskedoff, src, value, vl); +vfloat32m2_t test_vfslide1up_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f32m2_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfslide1up_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_vf_f32m4_tu(maskedoff, src, value, vl); +vfloat32m4_t test_vfslide1up_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f32m4_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfslide1up_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_vf_f32m8_tu(maskedoff, src, value, vl); +vfloat32m8_t test_vfslide1up_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f32m8_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfslide1up_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up_vf_f64m1_tu(maskedoff, src, value, vl); +vfloat64m1_t test_vfslide1up_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f64m1_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfslide1up_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up_vf_f64m2_tu(maskedoff, src, value, vl); +vfloat64m2_t test_vfslide1up_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f64m2_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfslide1up_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up_vf_f64m4_tu(maskedoff, src, value, vl); +vfloat64m4_t test_vfslide1up_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f64m4_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfslide1up_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up_vf_f64m8_tu(maskedoff, src, value, vl); +vfloat64m8_t test_vfslide1up_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f64m8_tu(vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfslide1up_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_vf_f16mf4_tum(mask, maskedoff, src, value, vl); +vfloat16mf4_t test_vfslide1up_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f16mf4_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfslide1up_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_vf_f16mf2_tum(mask, maskedoff, src, value, vl); +vfloat16mf2_t test_vfslide1up_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f16mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfslide1up_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_vf_f16m1_tum(mask, maskedoff, src, value, vl); +vfloat16m1_t test_vfslide1up_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f16m1_tum(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfslide1up_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_vf_f16m2_tum(mask, maskedoff, src, value, vl); +vfloat16m2_t test_vfslide1up_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f16m2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfslide1up_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_vf_f16m4_tum(mask, maskedoff, src, value, vl); +vfloat16m4_t test_vfslide1up_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f16m4_tum(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfslide1up_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_vf_f16m8_tum(mask, maskedoff, src, value, vl); +vfloat16m8_t test_vfslide1up_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f16m8_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfslide1up_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_vf_f32mf2_tum(mask, maskedoff, src, value, vl); +vfloat32mf2_t test_vfslide1up_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f32mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfslide1up_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_vf_f32m1_tum(mask, maskedoff, src, value, vl); +vfloat32m1_t test_vfslide1up_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f32m1_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfslide1up_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_vf_f32m2_tum(mask, maskedoff, src, value, vl); +vfloat32m2_t test_vfslide1up_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f32m2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfslide1up_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_vf_f32m4_tum(mask, maskedoff, src, value, vl); +vfloat32m4_t test_vfslide1up_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f32m4_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfslide1up_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_vf_f32m8_tum(mask, maskedoff, src, value, vl); +vfloat32m8_t test_vfslide1up_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f32m8_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfslide1up_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up_vf_f64m1_tum(mask, maskedoff, src, value, vl); +vfloat64m1_t test_vfslide1up_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f64m1_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfslide1up_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up_vf_f64m2_tum(mask, maskedoff, src, value, vl); +vfloat64m2_t test_vfslide1up_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f64m2_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfslide1up_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up_vf_f64m4_tum(mask, maskedoff, src, value, vl); +vfloat64m4_t test_vfslide1up_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f64m4_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfslide1up_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up_vf_f64m8_tum(mask, maskedoff, src, value, vl); +vfloat64m8_t test_vfslide1up_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f64m8_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfslide1up_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_vf_f16mf4_tumu(mask, maskedoff, src, value, vl); +vfloat16mf4_t test_vfslide1up_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f16mf4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfslide1up_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_vf_f16mf2_tumu(mask, maskedoff, src, value, vl); +vfloat16mf2_t test_vfslide1up_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f16mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfslide1up_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_vf_f16m1_tumu(mask, maskedoff, src, value, vl); +vfloat16m1_t test_vfslide1up_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f16m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfslide1up_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_vf_f16m2_tumu(mask, maskedoff, src, value, vl); +vfloat16m2_t test_vfslide1up_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f16m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfslide1up_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_vf_f16m4_tumu(mask, maskedoff, src, value, vl); +vfloat16m4_t test_vfslide1up_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f16m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfslide1up_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_vf_f16m8_tumu(mask, maskedoff, src, value, vl); +vfloat16m8_t test_vfslide1up_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f16m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfslide1up_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_vf_f32mf2_tumu(mask, maskedoff, src, value, vl); +vfloat32mf2_t test_vfslide1up_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f32mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfslide1up_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_vf_f32m1_tumu(mask, maskedoff, src, value, vl); +vfloat32m1_t test_vfslide1up_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f32m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfslide1up_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_vf_f32m2_tumu(mask, maskedoff, src, value, vl); +vfloat32m2_t test_vfslide1up_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f32m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfslide1up_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_vf_f32m4_tumu(mask, maskedoff, src, value, vl); +vfloat32m4_t test_vfslide1up_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f32m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfslide1up_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_vf_f32m8_tumu(mask, maskedoff, src, value, vl); +vfloat32m8_t test_vfslide1up_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f32m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfslide1up_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up_vf_f64m1_tumu(mask, maskedoff, src, value, vl); +vfloat64m1_t test_vfslide1up_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f64m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfslide1up_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up_vf_f64m2_tumu(mask, maskedoff, src, value, vl); +vfloat64m2_t test_vfslide1up_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f64m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfslide1up_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up_vf_f64m4_tumu(mask, maskedoff, src, value, vl); +vfloat64m4_t test_vfslide1up_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f64m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfslide1up_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up_vf_f64m8_tumu(mask, maskedoff, src, value, vl); +vfloat64m8_t test_vfslide1up_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f64m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfslide1up_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_vf_f16mf4_mu(mask, maskedoff, src, value, vl); +vfloat16mf4_t test_vfslide1up_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f16mf4_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfslide1up_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_vf_f16mf2_mu(mask, maskedoff, src, value, vl); +vfloat16mf2_t test_vfslide1up_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f16mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfslide1up_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_vf_f16m1_mu(mask, maskedoff, src, value, vl); +vfloat16m1_t test_vfslide1up_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f16m1_mu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfslide1up_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_vf_f16m2_mu(mask, maskedoff, src, value, vl); +vfloat16m2_t test_vfslide1up_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f16m2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfslide1up_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_vf_f16m4_mu(mask, maskedoff, src, value, vl); +vfloat16m4_t test_vfslide1up_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f16m4_mu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfslide1up_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_vf_f16m8_mu(mask, maskedoff, src, value, vl); +vfloat16m8_t test_vfslide1up_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f16m8_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfslide1up_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_vf_f32mf2_mu(mask, maskedoff, src, value, vl); +vfloat32mf2_t test_vfslide1up_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f32mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfslide1up_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_vf_f32m1_mu(mask, maskedoff, src, value, vl); +vfloat32m1_t test_vfslide1up_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f32m1_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfslide1up_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_vf_f32m2_mu(mask, maskedoff, src, value, vl); +vfloat32m2_t test_vfslide1up_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f32m2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfslide1up_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_vf_f32m4_mu(mask, maskedoff, src, value, vl); +vfloat32m4_t test_vfslide1up_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f32m4_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfslide1up_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_vf_f32m8_mu(mask, maskedoff, src, value, vl); +vfloat32m8_t test_vfslide1up_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f32m8_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfslide1up_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up_vf_f64m1_mu(mask, maskedoff, src, value, vl); +vfloat64m1_t test_vfslide1up_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f64m1_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfslide1up_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up_vf_f64m2_mu(mask, maskedoff, src, value, vl); +vfloat64m2_t test_vfslide1up_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f64m2_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfslide1up_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up_vf_f64m4_mu(mask, maskedoff, src, value, vl); +vfloat64m4_t test_vfslide1up_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f64m4_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfslide1up_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up_vf_f64m8_mu(mask, maskedoff, src, value, vl); +vfloat64m8_t test_vfslide1up_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up_vf_f64m8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfslide1up\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfsqrt.c b/auto-generated/policy_funcs/gnu-api-tests/vfsqrt.c index 6404624c9..cae322dfb 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfsqrt.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfsqrt.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfsqrt_v_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16mf4_tu(maskedoff, op1, vl); +vfloat16mf4_t test_vfsqrt_v_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16mf4_tu(vd, vs2, vl); } -vfloat16mf2_t test_vfsqrt_v_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16mf2_tu(maskedoff, op1, vl); +vfloat16mf2_t test_vfsqrt_v_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16mf2_tu(vd, vs2, vl); } -vfloat16m1_t test_vfsqrt_v_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m1_tu(maskedoff, op1, vl); +vfloat16m1_t test_vfsqrt_v_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m1_tu(vd, vs2, vl); } -vfloat16m2_t test_vfsqrt_v_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m2_tu(maskedoff, op1, vl); +vfloat16m2_t test_vfsqrt_v_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m2_tu(vd, vs2, vl); } -vfloat16m4_t test_vfsqrt_v_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m4_tu(maskedoff, op1, vl); +vfloat16m4_t test_vfsqrt_v_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m4_tu(vd, vs2, vl); } -vfloat16m8_t test_vfsqrt_v_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m8_tu(maskedoff, op1, vl); +vfloat16m8_t test_vfsqrt_v_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m8_tu(vd, vs2, vl); } -vfloat32mf2_t test_vfsqrt_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32mf2_tu(maskedoff, op1, vl); +vfloat32mf2_t test_vfsqrt_v_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32mf2_tu(vd, vs2, vl); } -vfloat32m1_t test_vfsqrt_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m1_tu(maskedoff, op1, vl); +vfloat32m1_t test_vfsqrt_v_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m1_tu(vd, vs2, vl); } -vfloat32m2_t test_vfsqrt_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m2_tu(maskedoff, op1, vl); +vfloat32m2_t test_vfsqrt_v_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m2_tu(vd, vs2, vl); } -vfloat32m4_t test_vfsqrt_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m4_tu(maskedoff, op1, vl); +vfloat32m4_t test_vfsqrt_v_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m4_tu(vd, vs2, vl); } -vfloat32m8_t test_vfsqrt_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m8_tu(maskedoff, op1, vl); +vfloat32m8_t test_vfsqrt_v_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m8_tu(vd, vs2, vl); } -vfloat64m1_t test_vfsqrt_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m1_tu(maskedoff, op1, vl); +vfloat64m1_t test_vfsqrt_v_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m1_tu(vd, vs2, vl); } -vfloat64m2_t test_vfsqrt_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m2_tu(maskedoff, op1, vl); +vfloat64m2_t test_vfsqrt_v_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m2_tu(vd, vs2, vl); } -vfloat64m4_t test_vfsqrt_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m4_tu(maskedoff, op1, vl); +vfloat64m4_t test_vfsqrt_v_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m4_tu(vd, vs2, vl); } -vfloat64m8_t test_vfsqrt_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m8_tu(maskedoff, op1, vl); +vfloat64m8_t test_vfsqrt_v_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m8_tu(vd, vs2, vl); } -vfloat16mf4_t test_vfsqrt_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16mf4_tum(mask, maskedoff, op1, vl); +vfloat16mf4_t test_vfsqrt_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16mf4_tum(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfsqrt_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16mf2_tum(mask, maskedoff, op1, vl); +vfloat16mf2_t test_vfsqrt_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16mf2_tum(vm, vd, vs2, vl); } -vfloat16m1_t test_vfsqrt_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m1_tum(mask, maskedoff, op1, vl); +vfloat16m1_t test_vfsqrt_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m1_tum(vm, vd, vs2, vl); } -vfloat16m2_t test_vfsqrt_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m2_tum(mask, maskedoff, op1, vl); +vfloat16m2_t test_vfsqrt_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m2_tum(vm, vd, vs2, vl); } -vfloat16m4_t test_vfsqrt_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m4_tum(mask, maskedoff, op1, vl); +vfloat16m4_t test_vfsqrt_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m4_tum(vm, vd, vs2, vl); } -vfloat16m8_t test_vfsqrt_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m8_tum(mask, maskedoff, op1, vl); +vfloat16m8_t test_vfsqrt_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m8_tum(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfsqrt_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32mf2_tum(mask, maskedoff, op1, vl); +vfloat32mf2_t test_vfsqrt_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32mf2_tum(vm, vd, vs2, vl); } -vfloat32m1_t test_vfsqrt_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m1_tum(mask, maskedoff, op1, vl); +vfloat32m1_t test_vfsqrt_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m1_tum(vm, vd, vs2, vl); } -vfloat32m2_t test_vfsqrt_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m2_tum(mask, maskedoff, op1, vl); +vfloat32m2_t test_vfsqrt_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m2_tum(vm, vd, vs2, vl); } -vfloat32m4_t test_vfsqrt_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m4_tum(mask, maskedoff, op1, vl); +vfloat32m4_t test_vfsqrt_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m4_tum(vm, vd, vs2, vl); } -vfloat32m8_t test_vfsqrt_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m8_tum(mask, maskedoff, op1, vl); +vfloat32m8_t test_vfsqrt_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m8_tum(vm, vd, vs2, vl); } -vfloat64m1_t test_vfsqrt_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m1_tum(mask, maskedoff, op1, vl); +vfloat64m1_t test_vfsqrt_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m1_tum(vm, vd, vs2, vl); } -vfloat64m2_t test_vfsqrt_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m2_tum(mask, maskedoff, op1, vl); +vfloat64m2_t test_vfsqrt_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m2_tum(vm, vd, vs2, vl); } -vfloat64m4_t test_vfsqrt_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m4_tum(mask, maskedoff, op1, vl); +vfloat64m4_t test_vfsqrt_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m4_tum(vm, vd, vs2, vl); } -vfloat64m8_t test_vfsqrt_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m8_tum(mask, maskedoff, op1, vl); +vfloat64m8_t test_vfsqrt_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m8_tum(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfsqrt_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16mf4_tumu(mask, maskedoff, op1, vl); +vfloat16mf4_t test_vfsqrt_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16mf4_tumu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfsqrt_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16mf2_tumu(mask, maskedoff, op1, vl); +vfloat16mf2_t test_vfsqrt_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16mf2_tumu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfsqrt_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m1_tumu(mask, maskedoff, op1, vl); +vfloat16m1_t test_vfsqrt_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m1_tumu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfsqrt_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m2_tumu(mask, maskedoff, op1, vl); +vfloat16m2_t test_vfsqrt_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m2_tumu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfsqrt_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m4_tumu(mask, maskedoff, op1, vl); +vfloat16m4_t test_vfsqrt_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m4_tumu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfsqrt_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m8_tumu(mask, maskedoff, op1, vl); +vfloat16m8_t test_vfsqrt_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m8_tumu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfsqrt_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32mf2_tumu(mask, maskedoff, op1, vl); +vfloat32mf2_t test_vfsqrt_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32mf2_tumu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfsqrt_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m1_tumu(mask, maskedoff, op1, vl); +vfloat32m1_t test_vfsqrt_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m1_tumu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfsqrt_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m2_tumu(mask, maskedoff, op1, vl); +vfloat32m2_t test_vfsqrt_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m2_tumu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfsqrt_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m4_tumu(mask, maskedoff, op1, vl); +vfloat32m4_t test_vfsqrt_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m4_tumu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfsqrt_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m8_tumu(mask, maskedoff, op1, vl); +vfloat32m8_t test_vfsqrt_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m8_tumu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfsqrt_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m1_tumu(mask, maskedoff, op1, vl); +vfloat64m1_t test_vfsqrt_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m1_tumu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfsqrt_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m2_tumu(mask, maskedoff, op1, vl); +vfloat64m2_t test_vfsqrt_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m2_tumu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfsqrt_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m4_tumu(mask, maskedoff, op1, vl); +vfloat64m4_t test_vfsqrt_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m4_tumu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfsqrt_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m8_tumu(mask, maskedoff, op1, vl); +vfloat64m8_t test_vfsqrt_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m8_tumu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfsqrt_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16mf4_mu(mask, maskedoff, op1, vl); +vfloat16mf4_t test_vfsqrt_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16mf4_mu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfsqrt_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16mf2_mu(mask, maskedoff, op1, vl); +vfloat16mf2_t test_vfsqrt_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16mf2_mu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfsqrt_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m1_mu(mask, maskedoff, op1, vl); +vfloat16m1_t test_vfsqrt_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m1_mu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfsqrt_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m2_mu(mask, maskedoff, op1, vl); +vfloat16m2_t test_vfsqrt_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m2_mu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfsqrt_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m4_mu(mask, maskedoff, op1, vl); +vfloat16m4_t test_vfsqrt_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m4_mu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfsqrt_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m8_mu(mask, maskedoff, op1, vl); +vfloat16m8_t test_vfsqrt_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m8_mu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfsqrt_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32mf2_mu(mask, maskedoff, op1, vl); +vfloat32mf2_t test_vfsqrt_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32mf2_mu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfsqrt_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m1_mu(mask, maskedoff, op1, vl); +vfloat32m1_t test_vfsqrt_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m1_mu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfsqrt_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m2_mu(mask, maskedoff, op1, vl); +vfloat32m2_t test_vfsqrt_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m2_mu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfsqrt_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m4_mu(mask, maskedoff, op1, vl); +vfloat32m4_t test_vfsqrt_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m4_mu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfsqrt_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m8_mu(mask, maskedoff, op1, vl); +vfloat32m8_t test_vfsqrt_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m8_mu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfsqrt_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m1_mu(mask, maskedoff, op1, vl); +vfloat64m1_t test_vfsqrt_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m1_mu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfsqrt_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m2_mu(mask, maskedoff, op1, vl); +vfloat64m2_t test_vfsqrt_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m2_mu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfsqrt_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m4_mu(mask, maskedoff, op1, vl); +vfloat64m4_t test_vfsqrt_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m4_mu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfsqrt_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m8_mu(mask, maskedoff, op1, vl); +vfloat64m8_t test_vfsqrt_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m8_mu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfsqrt_v_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16mf4_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfsqrt_v_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsqrt_v_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16mf2_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfsqrt_v_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsqrt_v_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m1_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfsqrt_v_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsqrt_v_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m2_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfsqrt_v_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsqrt_v_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m4_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfsqrt_v_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsqrt_v_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m8_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfsqrt_v_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsqrt_v_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32mf2_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfsqrt_v_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsqrt_v_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m1_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfsqrt_v_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsqrt_v_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m2_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfsqrt_v_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsqrt_v_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m4_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfsqrt_v_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsqrt_v_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m8_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfsqrt_v_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsqrt_v_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m1_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfsqrt_v_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsqrt_v_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m2_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfsqrt_v_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsqrt_v_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m4_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfsqrt_v_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsqrt_v_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m8_rm_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfsqrt_v_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfsqrt_v_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16mf4_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfsqrt_v_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16mf4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsqrt_v_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16mf2_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfsqrt_v_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsqrt_v_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m1_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfsqrt_v_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsqrt_v_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m2_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfsqrt_v_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsqrt_v_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m4_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfsqrt_v_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsqrt_v_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m8_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfsqrt_v_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsqrt_v_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32mf2_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfsqrt_v_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsqrt_v_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m1_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfsqrt_v_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsqrt_v_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m2_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfsqrt_v_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsqrt_v_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m4_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfsqrt_v_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsqrt_v_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m8_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfsqrt_v_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsqrt_v_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m1_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfsqrt_v_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsqrt_v_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m2_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfsqrt_v_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsqrt_v_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m4_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfsqrt_v_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsqrt_v_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m8_rm_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfsqrt_v_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfsqrt_v_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16mf4_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfsqrt_v_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16mf4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsqrt_v_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16mf2_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfsqrt_v_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsqrt_v_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m1_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfsqrt_v_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsqrt_v_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m2_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfsqrt_v_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsqrt_v_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m4_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfsqrt_v_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsqrt_v_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m8_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfsqrt_v_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsqrt_v_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32mf2_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfsqrt_v_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsqrt_v_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m1_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfsqrt_v_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsqrt_v_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m2_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfsqrt_v_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsqrt_v_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m4_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfsqrt_v_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsqrt_v_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m8_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfsqrt_v_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsqrt_v_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m1_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfsqrt_v_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsqrt_v_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m2_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfsqrt_v_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsqrt_v_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m4_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfsqrt_v_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsqrt_v_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m8_rm_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfsqrt_v_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfsqrt_v_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16mf4_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfsqrt_v_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16mf4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsqrt_v_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16mf2_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfsqrt_v_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsqrt_v_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m1_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfsqrt_v_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsqrt_v_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m2_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfsqrt_v_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsqrt_v_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m4_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfsqrt_v_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsqrt_v_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m8_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfsqrt_v_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f16m8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsqrt_v_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32mf2_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfsqrt_v_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsqrt_v_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m1_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfsqrt_v_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsqrt_v_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m2_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfsqrt_v_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsqrt_v_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m4_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfsqrt_v_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsqrt_v_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m8_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfsqrt_v_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f32m8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsqrt_v_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m1_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfsqrt_v_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsqrt_v_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m2_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfsqrt_v_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsqrt_v_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m4_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfsqrt_v_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsqrt_v_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m8_rm_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfsqrt_v_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfsqrt_v_f64m8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfsqrt\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfsub.c b/auto-generated/policy_funcs/gnu-api-tests/vfsub.c index 4b393e903..7154b86a7 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfsub.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfsub.c @@ -6,964 +6,964 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfsub_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsub_vv_f16mf4_tu(maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsub_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16mf4_tu(vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsub_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16mf4_tu(maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsub_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16mf4_tu(vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsub_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsub_vv_f16mf2_tu(maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsub_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16mf2_tu(vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsub_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16mf2_tu(maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsub_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16mf2_tu(vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsub_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m1_tu(maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsub_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsub_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m1_tu(maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsub_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m1_tu(vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsub_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m2_tu(maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsub_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m2_tu(vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsub_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m2_tu(maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsub_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m2_tu(vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsub_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m4_tu(maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsub_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m4_tu(vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsub_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m4_tu(maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsub_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m4_tu(vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsub_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m8_tu(maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsub_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m8_tu(vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsub_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m8_tu(maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsub_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m8_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsub_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsub_vv_f32mf2_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsub_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32mf2_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsub_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32mf2_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsub_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32mf2_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsub_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m1_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsub_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsub_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m1_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsub_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m1_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsub_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m2_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsub_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m2_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsub_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m2_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsub_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m2_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsub_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m4_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsub_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m4_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsub_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m4_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsub_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m4_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsub_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m8_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsub_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m8_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsub_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m8_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsub_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m8_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsub_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m1_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsub_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsub_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m1_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsub_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m1_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsub_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m2_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsub_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m2_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsub_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m2_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsub_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m2_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsub_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m4_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsub_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m4_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsub_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m4_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsub_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m4_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsub_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m8_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsub_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m8_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsub_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m8_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsub_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m8_tu(vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfsub_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsub_vv_f16mf4_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsub_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16mf4_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsub_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16mf4_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsub_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16mf4_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsub_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsub_vv_f16mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsub_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsub_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsub_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsub_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m1_tum(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsub_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsub_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m1_tum(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsub_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m1_tum(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsub_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m2_tum(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsub_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m2_tum(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsub_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m2_tum(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsub_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsub_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m4_tum(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsub_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m4_tum(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsub_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m4_tum(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsub_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m4_tum(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsub_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m8_tum(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsub_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m8_tum(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsub_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m8_tum(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsub_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m8_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsub_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsub_vv_f32mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsub_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsub_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsub_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsub_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m1_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsub_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsub_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m1_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsub_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m1_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsub_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m2_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsub_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m2_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsub_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m2_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsub_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsub_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m4_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsub_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m4_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsub_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m4_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsub_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m4_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsub_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m8_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsub_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m8_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsub_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m8_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsub_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m8_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsub_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m1_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsub_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsub_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m1_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsub_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m1_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsub_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m2_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsub_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m2_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsub_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m2_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsub_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m2_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsub_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m4_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsub_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m4_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsub_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m4_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsub_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m4_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsub_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m8_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsub_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m8_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsub_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m8_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsub_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m8_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfsub_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsub_vv_f16mf4_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsub_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16mf4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsub_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16mf4_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsub_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16mf4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsub_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsub_vv_f16mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsub_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsub_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsub_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsub_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsub_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsub_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsub_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsub_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsub_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsub_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsub_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsub_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsub_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsub_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsub_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsub_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsub_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsub_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsub_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsub_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsub_vv_f32mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsub_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsub_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsub_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsub_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsub_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsub_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsub_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsub_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsub_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsub_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsub_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsub_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsub_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsub_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsub_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsub_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsub_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsub_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsub_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsub_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsub_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsub_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsub_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsub_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsub_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsub_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsub_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsub_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsub_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsub_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsub_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsub_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsub_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsub_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsub_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfsub_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsub_vv_f16mf4_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsub_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16mf4_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsub_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16mf4_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsub_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16mf4_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsub_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsub_vv_f16mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsub_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsub_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsub_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsub_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m1_mu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsub_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m1_mu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsub_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m1_mu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsub_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m1_mu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsub_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m2_mu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsub_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m2_mu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsub_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m2_mu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsub_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsub_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m4_mu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsub_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m4_mu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsub_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m4_mu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsub_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m4_mu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsub_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m8_mu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsub_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m8_mu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsub_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m8_mu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsub_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m8_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsub_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsub_vv_f32mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsub_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsub_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsub_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsub_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m1_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsub_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m1_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsub_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m1_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsub_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m1_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsub_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m2_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsub_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m2_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsub_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m2_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsub_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsub_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m4_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsub_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m4_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsub_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m4_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsub_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m4_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsub_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m8_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsub_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m8_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsub_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m8_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsub_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m8_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsub_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m1_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsub_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m1_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsub_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m1_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsub_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m1_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsub_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m2_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsub_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m2_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsub_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m2_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsub_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m2_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsub_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m4_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsub_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m4_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsub_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m4_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsub_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m4_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsub_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m8_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsub_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m8_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsub_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m8_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsub_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m8_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfsub_vv_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsub_vv_f16mf4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfsub_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16mf4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfsub_vf_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16mf4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfsub_vf_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16mf4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsub_vv_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsub_vv_f16mf2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfsub_vv_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsub_vf_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16mf2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfsub_vf_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsub_vv_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfsub_vv_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsub_vf_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfsub_vf_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsub_vv_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfsub_vv_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsub_vf_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfsub_vf_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsub_vv_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfsub_vv_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsub_vf_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfsub_vf_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsub_vv_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfsub_vv_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsub_vf_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfsub_vf_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsub_vv_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsub_vv_f32mf2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfsub_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsub_vf_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32mf2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfsub_vf_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsub_vv_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfsub_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsub_vf_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfsub_vf_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsub_vv_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfsub_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsub_vf_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfsub_vf_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsub_vv_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfsub_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsub_vf_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfsub_vf_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsub_vv_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfsub_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsub_vf_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfsub_vf_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsub_vv_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfsub_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsub_vf_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfsub_vf_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsub_vv_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfsub_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsub_vf_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfsub_vf_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsub_vv_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfsub_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsub_vf_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfsub_vf_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsub_vv_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfsub_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsub_vf_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfsub_vf_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfsub_vv_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsub_vv_f16mf4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfsub_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16mf4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfsub_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16mf4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfsub_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16mf4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsub_vv_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsub_vv_f16mf2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfsub_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16mf2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsub_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16mf2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfsub_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16mf2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsub_vv_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfsub_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsub_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfsub_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsub_vv_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfsub_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsub_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfsub_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsub_vv_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfsub_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsub_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfsub_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsub_vv_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfsub_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m8_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsub_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfsub_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsub_vv_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsub_vv_f32mf2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfsub_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32mf2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsub_vf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32mf2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfsub_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32mf2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsub_vv_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfsub_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsub_vf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfsub_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsub_vv_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfsub_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsub_vf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfsub_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsub_vv_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfsub_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsub_vf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfsub_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsub_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfsub_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m8_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsub_vf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfsub_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsub_vv_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfsub_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsub_vf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfsub_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsub_vv_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfsub_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsub_vf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfsub_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsub_vv_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfsub_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsub_vf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfsub_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsub_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfsub_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m8_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsub_vf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfsub_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfsub_vv_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsub_vv_f16mf4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfsub_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16mf4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfsub_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16mf4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfsub_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16mf4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsub_vv_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsub_vv_f16mf2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfsub_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16mf2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsub_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16mf2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfsub_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16mf2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsub_vv_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfsub_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m1_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsub_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfsub_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsub_vv_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfsub_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsub_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfsub_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsub_vv_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfsub_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsub_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfsub_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsub_vv_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfsub_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m8_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsub_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfsub_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsub_vv_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsub_vv_f32mf2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfsub_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32mf2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsub_vf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32mf2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfsub_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32mf2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsub_vv_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfsub_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m1_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsub_vf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfsub_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsub_vv_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfsub_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsub_vf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfsub_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsub_vv_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfsub_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsub_vf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfsub_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsub_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfsub_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m8_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsub_vf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfsub_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsub_vv_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfsub_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m1_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsub_vf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfsub_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsub_vv_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfsub_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsub_vf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfsub_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsub_vv_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfsub_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsub_vf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfsub_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsub_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfsub_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m8_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsub_vf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfsub_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfsub_vv_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsub_vv_f16mf4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfsub_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16mf4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfsub_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16mf4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfsub_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16mf4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsub_vv_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsub_vv_f16mf2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfsub_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16mf2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsub_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16mf2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfsub_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16mf2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsub_vv_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfsub_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m1_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsub_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfsub_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsub_vv_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfsub_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsub_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfsub_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsub_vv_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfsub_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsub_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfsub_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsub_vv_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfsub_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsub_vv_f16m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsub_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_vf_f16m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfsub_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_vf_f16m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsub_vv_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsub_vv_f32mf2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfsub_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32mf2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsub_vf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32mf2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfsub_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32mf2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsub_vv_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfsub_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m1_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsub_vf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfsub_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsub_vv_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfsub_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsub_vf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfsub_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsub_vv_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfsub_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsub_vf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfsub_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsub_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfsub_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsub_vv_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsub_vf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_vf_f32m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfsub_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_vf_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsub_vv_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfsub_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m1_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsub_vf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfsub_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsub_vv_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfsub_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsub_vf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfsub_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsub_vv_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfsub_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsub_vf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfsub_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsub_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfsub_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsub_vv_f64m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsub_vf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_vf_f64m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfsub_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_vf_f64m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfsub\.[ivxfswum.]+\s+} 240 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfwadd.c b/auto-generated/policy_funcs/gnu-api-tests/vfwadd.c index 568bda429..90e5ece2c 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfwadd.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfwadd.c @@ -6,1156 +6,1156 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2_t test_vfwadd_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32mf2_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwadd_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32mf2_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwadd_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32mf2_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwadd_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32mf2_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwadd_wv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32mf2_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwadd_wv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32mf2_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwadd_wf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32mf2_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwadd_wf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32mf2_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwadd_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m1_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwadd_vv_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwadd_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m1_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwadd_vf_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m1_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwadd_wv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m1_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwadd_wv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwadd_wf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m1_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwadd_wf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m1_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwadd_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m2_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwadd_vv_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m2_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwadd_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m2_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwadd_vf_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m2_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwadd_wv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m2_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwadd_wv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m2_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwadd_wf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m2_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwadd_wf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m2_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwadd_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m4_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwadd_vv_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m4_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwadd_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m4_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwadd_vf_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m4_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwadd_wv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m4_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwadd_wv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m4_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwadd_wf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m4_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwadd_wf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m4_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwadd_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m8_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwadd_vv_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m8_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwadd_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m8_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwadd_vf_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m8_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwadd_wv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m8_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwadd_wv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m8_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwadd_wf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m8_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwadd_wf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m8_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwadd_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m1_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwadd_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwadd_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m1_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwadd_vf_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m1_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwadd_wv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m1_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwadd_wv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwadd_wf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m1_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwadd_wf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m1_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwadd_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m2_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwadd_vv_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m2_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwadd_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m2_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwadd_vf_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m2_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwadd_wv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m2_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwadd_wv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m2_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwadd_wf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m2_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwadd_wf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m2_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwadd_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m4_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwadd_vv_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m4_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwadd_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m4_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwadd_vf_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m4_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwadd_wv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m4_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwadd_wv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m4_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwadd_wf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m4_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwadd_wf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m4_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwadd_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m8_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwadd_vv_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m8_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwadd_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m8_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwadd_vf_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m8_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwadd_wv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m8_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwadd_wv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m8_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwadd_wf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m8_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwadd_wf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m8_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwadd_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwadd_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwadd_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwadd_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwadd_wv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwadd_wv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwadd_wf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwadd_wf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwadd_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m1_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwadd_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwadd_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m1_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwadd_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m1_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwadd_wv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m1_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwadd_wv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwadd_wf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m1_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwadd_wf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m1_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwadd_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m2_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwadd_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m2_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwadd_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m2_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwadd_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwadd_wv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m2_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwadd_wv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m2_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwadd_wf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m2_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwadd_wf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwadd_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m4_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwadd_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m4_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwadd_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m4_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwadd_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m4_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwadd_wv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m4_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwadd_wv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m4_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwadd_wf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m4_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwadd_wf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m4_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwadd_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m8_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwadd_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m8_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwadd_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m8_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwadd_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m8_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwadd_wv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m8_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwadd_wv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m8_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwadd_wf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m8_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwadd_wf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m8_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwadd_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m1_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwadd_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwadd_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m1_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwadd_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m1_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwadd_wv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m1_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwadd_wv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwadd_wf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m1_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwadd_wf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m1_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwadd_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m2_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwadd_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m2_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwadd_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m2_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwadd_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m2_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwadd_wv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m2_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwadd_wv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m2_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwadd_wf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m2_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwadd_wf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m2_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwadd_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m4_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwadd_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m4_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwadd_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m4_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwadd_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m4_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwadd_wv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m4_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwadd_wv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m4_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwadd_wf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m4_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwadd_wf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m4_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwadd_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m8_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwadd_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m8_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwadd_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m8_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwadd_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m8_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwadd_wv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m8_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwadd_wv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m8_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwadd_wf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m8_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwadd_wf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m8_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwadd_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwadd_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwadd_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwadd_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwadd_wv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwadd_wv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwadd_wf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwadd_wf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwadd_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwadd_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwadd_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwadd_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwadd_wv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwadd_wv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwadd_wf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwadd_wf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwadd_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwadd_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwadd_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwadd_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwadd_wv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwadd_wv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwadd_wf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwadd_wf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwadd_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwadd_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwadd_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwadd_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwadd_wv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwadd_wv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwadd_wf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwadd_wf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwadd_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwadd_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwadd_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwadd_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwadd_wv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwadd_wv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwadd_wf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwadd_wf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwadd_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwadd_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwadd_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwadd_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwadd_wv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwadd_wv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwadd_wf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwadd_wf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwadd_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwadd_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwadd_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwadd_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwadd_wv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwadd_wv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwadd_wf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwadd_wf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwadd_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwadd_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwadd_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwadd_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwadd_wv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwadd_wv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwadd_wf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwadd_wf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwadd_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwadd_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwadd_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwadd_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwadd_wv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwadd_wv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwadd_wf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwadd_wf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwadd_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwadd_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwadd_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwadd_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwadd_wv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwadd_wv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwadd_wf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwadd_wf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwadd_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m1_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwadd_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m1_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwadd_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m1_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwadd_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m1_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwadd_wv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m1_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwadd_wv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m1_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwadd_wf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m1_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwadd_wf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m1_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwadd_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m2_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwadd_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m2_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwadd_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m2_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwadd_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwadd_wv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m2_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwadd_wv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m2_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwadd_wf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m2_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwadd_wf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwadd_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m4_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwadd_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m4_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwadd_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m4_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwadd_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m4_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwadd_wv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m4_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwadd_wv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m4_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwadd_wf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m4_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwadd_wf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m4_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwadd_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m8_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwadd_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m8_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwadd_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m8_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwadd_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m8_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwadd_wv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m8_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwadd_wv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m8_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwadd_wf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m8_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwadd_wf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m8_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwadd_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m1_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwadd_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m1_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwadd_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m1_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwadd_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m1_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwadd_wv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m1_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwadd_wv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m1_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwadd_wf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m1_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwadd_wf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m1_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwadd_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m2_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwadd_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m2_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwadd_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m2_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwadd_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m2_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwadd_wv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m2_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwadd_wv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m2_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwadd_wf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m2_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwadd_wf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m2_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwadd_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m4_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwadd_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m4_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwadd_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m4_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwadd_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m4_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwadd_wv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m4_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwadd_wv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m4_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwadd_wf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m4_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwadd_wf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m4_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwadd_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m8_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwadd_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m8_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwadd_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m8_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwadd_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m8_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwadd_wv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m8_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwadd_wv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m8_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwadd_wf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m8_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwadd_wf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m8_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwadd_vv_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32mf2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_vf_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32mf2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_vf_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_wv_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32mf2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_wv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_wf_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32mf2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_wf_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_vv_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_vf_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_vf_f32m1_rm_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_wv_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_wv_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_wf_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_wf_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_vv_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_vf_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_vf_f32m2_rm_tu(vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_wv_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_wv_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_wf_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_wf_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_vv_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_vf_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_vf_f32m4_rm_tu(vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_wv_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_wv_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_wf_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_wf_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_vv_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_vf_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_vf_f32m8_rm_tu(vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_wv_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_wv_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_wf_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_wf_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_vv_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_vf_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_vf_f64m1_rm_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_wv_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_wv_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_wf_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_wf_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_vv_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_vf_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_vf_f64m2_rm_tu(vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_wv_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_wv_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_wf_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_wf_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_vv_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_vf_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_vf_f64m4_rm_tu(vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_wv_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_wv_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_wf_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_wf_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_vv_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_vf_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_vf_f64m8_rm_tu(vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_wv_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_wv_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_wf_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_wf_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_vv_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32mf2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32mf2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_vf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32mf2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32mf2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_wv_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32mf2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_wv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32mf2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_wf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32mf2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_wf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32mf2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_vv_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_vf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_wv_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_wv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_wf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_wf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_vv_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_vf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_wv_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_wv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_wf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_wf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_vv_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_vf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_wv_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_wv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_wf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_wf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m8_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_vf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_wv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_wv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m8_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_wf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_wf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_vv_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_vf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_wv_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_wv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_wf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_wf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_vv_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_vf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_wv_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_wv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_wf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_wf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_vv_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_vf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_wv_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_wv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_wf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_wf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m8_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_vf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_wv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_wv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m8_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_wf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_wf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_vv_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32mf2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32mf2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_vf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32mf2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32mf2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_wv_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32mf2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_wv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32mf2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_wf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32mf2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_wf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32mf2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_vv_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m1_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_vf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_wv_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_wv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m1_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_wf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_wf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_vv_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_vf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_wv_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_wv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_wf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_wf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_vv_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_vf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_wv_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_wv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_wf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_wf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m8_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_vf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_wv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_wv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m8_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_wf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_wf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_vv_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m1_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_vf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_wv_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_wv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m1_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_wf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_wf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_vv_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_vf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_wv_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_wv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_wf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_wf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_vv_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_vf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_wv_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_wv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_wf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_wf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m8_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_vf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_wv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_wv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m8_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_wf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_wf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_vv_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32mf2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32mf2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_vf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32mf2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32mf2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_wv_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32mf2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_wv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32mf2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_wf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32mf2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_wf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32mf2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_vv_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m1_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_vf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_wv_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_wv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m1_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_wf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_wf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_vv_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_vf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_wv_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_wv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_wf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_wf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_vv_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_vf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_wv_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_wv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_wf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_wf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_vf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_f32m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_wv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_wv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_wf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_f32m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_wf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_vv_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m1_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_vf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_wv_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_wv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m1_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_wf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_wf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_vv_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_vf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_wv_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_wv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_wf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_wf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_vv_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_vf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_wv_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_wv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_wf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_wf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f64m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_vf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_f64m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_f64m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_wv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_wv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f64m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_wf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_f64m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_wf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_f64m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfwadd\.[ivxfswum.]+\s+} 288 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfwcvt.c b/auto-generated/policy_funcs/gnu-api-tests/vfwcvt.c index f27055435..2208c6582 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfwcvt.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfwcvt.c @@ -6,1204 +6,1204 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_tu(vfloat16mf4_t maskedoff, vint8mf8_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f16mf4_tu(maskedoff, src, vl); +vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_tu(vfloat16mf4_t vd, vint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f16mf4_tu(vd, vs2, vl); } -vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_tu(vfloat16mf2_t maskedoff, vint8mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f16mf2_tu(maskedoff, src, vl); +vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_tu(vfloat16mf2_t vd, vint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f16mf2_tu(vd, vs2, vl); } -vfloat16m1_t test_vfwcvt_f_x_v_f16m1_tu(vfloat16m1_t maskedoff, vint8mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f16m1_tu(maskedoff, src, vl); +vfloat16m1_t test_vfwcvt_f_x_v_f16m1_tu(vfloat16m1_t vd, vint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f16m1_tu(vd, vs2, vl); } -vfloat16m2_t test_vfwcvt_f_x_v_f16m2_tu(vfloat16m2_t maskedoff, vint8m1_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f16m2_tu(maskedoff, src, vl); +vfloat16m2_t test_vfwcvt_f_x_v_f16m2_tu(vfloat16m2_t vd, vint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f16m2_tu(vd, vs2, vl); } -vfloat16m4_t test_vfwcvt_f_x_v_f16m4_tu(vfloat16m4_t maskedoff, vint8m2_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f16m4_tu(maskedoff, src, vl); +vfloat16m4_t test_vfwcvt_f_x_v_f16m4_tu(vfloat16m4_t vd, vint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f16m4_tu(vd, vs2, vl); } -vfloat16m8_t test_vfwcvt_f_x_v_f16m8_tu(vfloat16m8_t maskedoff, vint8m4_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f16m8_tu(maskedoff, src, vl); +vfloat16m8_t test_vfwcvt_f_x_v_f16m8_tu(vfloat16m8_t vd, vint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f16m8_tu(vd, vs2, vl); } -vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_tu(vfloat16mf4_t maskedoff, vuint8mf8_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f16mf4_tu(maskedoff, src, vl); +vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_tu(vfloat16mf4_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f16mf4_tu(vd, vs2, vl); } -vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_tu(vfloat16mf2_t maskedoff, vuint8mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f16mf2_tu(maskedoff, src, vl); +vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_tu(vfloat16mf2_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f16mf2_tu(vd, vs2, vl); } -vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_tu(vfloat16m1_t maskedoff, vuint8mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f16m1_tu(maskedoff, src, vl); +vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_tu(vfloat16m1_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f16m1_tu(vd, vs2, vl); } -vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_tu(vfloat16m2_t maskedoff, vuint8m1_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f16m2_tu(maskedoff, src, vl); +vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_tu(vfloat16m2_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f16m2_tu(vd, vs2, vl); } -vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_tu(vfloat16m4_t maskedoff, vuint8m2_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f16m4_tu(maskedoff, src, vl); +vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_tu(vfloat16m4_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f16m4_tu(vd, vs2, vl); } -vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_tu(vfloat16m8_t maskedoff, vuint8m4_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f16m8_tu(maskedoff, src, vl); +vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_tu(vfloat16m8_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f16m8_tu(vd, vs2, vl); } -vint32mf2_t test_vfwcvt_x_f_v_i32mf2_tu(vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32mf2_tu(maskedoff, src, vl); +vint32mf2_t test_vfwcvt_x_f_v_i32mf2_tu(vint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32mf2_tu(vd, vs2, vl); } -vint32m1_t test_vfwcvt_x_f_v_i32m1_tu(vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m1_tu(maskedoff, src, vl); +vint32m1_t test_vfwcvt_x_f_v_i32m1_tu(vint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m1_tu(vd, vs2, vl); } -vint32m2_t test_vfwcvt_x_f_v_i32m2_tu(vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m2_tu(maskedoff, src, vl); +vint32m2_t test_vfwcvt_x_f_v_i32m2_tu(vint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m2_tu(vd, vs2, vl); } -vint32m4_t test_vfwcvt_x_f_v_i32m4_tu(vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m4_tu(maskedoff, src, vl); +vint32m4_t test_vfwcvt_x_f_v_i32m4_tu(vint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m4_tu(vd, vs2, vl); } -vint32m8_t test_vfwcvt_x_f_v_i32m8_tu(vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m8_tu(maskedoff, src, vl); +vint32m8_t test_vfwcvt_x_f_v_i32m8_tu(vint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m8_tu(vd, vs2, vl); } -vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_tu(vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32mf2_tu(maskedoff, src, vl); +vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_tu(vuint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32mf2_tu(vd, vs2, vl); } -vuint32m1_t test_vfwcvt_xu_f_v_u32m1_tu(vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m1_tu(maskedoff, src, vl); +vuint32m1_t test_vfwcvt_xu_f_v_u32m1_tu(vuint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vfwcvt_xu_f_v_u32m2_tu(vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m2_tu(maskedoff, src, vl); +vuint32m2_t test_vfwcvt_xu_f_v_u32m2_tu(vuint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vfwcvt_xu_f_v_u32m4_tu(vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m4_tu(maskedoff, src, vl); +vuint32m4_t test_vfwcvt_xu_f_v_u32m4_tu(vuint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vfwcvt_xu_f_v_u32m8_tu(vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m8_tu(maskedoff, src, vl); +vuint32m8_t test_vfwcvt_xu_f_v_u32m8_tu(vuint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m8_tu(vd, vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_tu(vfloat32mf2_t maskedoff, vint16mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f32mf2_tu(maskedoff, src, vl); +vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_tu(vfloat32mf2_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f32mf2_tu(vd, vs2, vl); } -vfloat32m1_t test_vfwcvt_f_x_v_f32m1_tu(vfloat32m1_t maskedoff, vint16mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f32m1_tu(maskedoff, src, vl); +vfloat32m1_t test_vfwcvt_f_x_v_f32m1_tu(vfloat32m1_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f32m1_tu(vd, vs2, vl); } -vfloat32m2_t test_vfwcvt_f_x_v_f32m2_tu(vfloat32m2_t maskedoff, vint16m1_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f32m2_tu(maskedoff, src, vl); +vfloat32m2_t test_vfwcvt_f_x_v_f32m2_tu(vfloat32m2_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f32m2_tu(vd, vs2, vl); } -vfloat32m4_t test_vfwcvt_f_x_v_f32m4_tu(vfloat32m4_t maskedoff, vint16m2_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f32m4_tu(maskedoff, src, vl); +vfloat32m4_t test_vfwcvt_f_x_v_f32m4_tu(vfloat32m4_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f32m4_tu(vd, vs2, vl); } -vfloat32m8_t test_vfwcvt_f_x_v_f32m8_tu(vfloat32m8_t maskedoff, vint16m4_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f32m8_tu(maskedoff, src, vl); +vfloat32m8_t test_vfwcvt_f_x_v_f32m8_tu(vfloat32m8_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f32m8_tu(vd, vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_tu(vfloat32mf2_t maskedoff, vuint16mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f32mf2_tu(maskedoff, src, vl); +vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_tu(vfloat32mf2_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f32mf2_tu(vd, vs2, vl); } -vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_tu(vfloat32m1_t maskedoff, vuint16mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f32m1_tu(maskedoff, src, vl); +vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_tu(vfloat32m1_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f32m1_tu(vd, vs2, vl); } -vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_tu(vfloat32m2_t maskedoff, vuint16m1_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f32m2_tu(maskedoff, src, vl); +vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_tu(vfloat32m2_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f32m2_tu(vd, vs2, vl); } -vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_tu(vfloat32m4_t maskedoff, vuint16m2_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f32m4_tu(maskedoff, src, vl); +vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_tu(vfloat32m4_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f32m4_tu(vd, vs2, vl); } -vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_tu(vfloat32m8_t maskedoff, vuint16m4_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f32m8_tu(maskedoff, src, vl); +vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_tu(vfloat32m8_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f32m8_tu(vd, vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f32mf2_tu(maskedoff, src, vl); +vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f32mf2_tu(vd, vs2, vl); } -vfloat32m1_t test_vfwcvt_f_f_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f32m1_tu(maskedoff, src, vl); +vfloat32m1_t test_vfwcvt_f_f_v_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f32m1_tu(vd, vs2, vl); } -vfloat32m2_t test_vfwcvt_f_f_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f32m2_tu(maskedoff, src, vl); +vfloat32m2_t test_vfwcvt_f_f_v_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f32m2_tu(vd, vs2, vl); } -vfloat32m4_t test_vfwcvt_f_f_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f32m4_tu(maskedoff, src, vl); +vfloat32m4_t test_vfwcvt_f_f_v_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f32m4_tu(vd, vs2, vl); } -vfloat32m8_t test_vfwcvt_f_f_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f32m8_tu(maskedoff, src, vl); +vfloat32m8_t test_vfwcvt_f_f_v_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f32m8_tu(vd, vs2, vl); } -vint64m1_t test_vfwcvt_x_f_v_i64m1_tu(vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m1_tu(maskedoff, src, vl); +vint64m1_t test_vfwcvt_x_f_v_i64m1_tu(vint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m1_tu(vd, vs2, vl); } -vint64m2_t test_vfwcvt_x_f_v_i64m2_tu(vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m2_tu(maskedoff, src, vl); +vint64m2_t test_vfwcvt_x_f_v_i64m2_tu(vint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m2_tu(vd, vs2, vl); } -vint64m4_t test_vfwcvt_x_f_v_i64m4_tu(vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m4_tu(maskedoff, src, vl); +vint64m4_t test_vfwcvt_x_f_v_i64m4_tu(vint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m4_tu(vd, vs2, vl); } -vint64m8_t test_vfwcvt_x_f_v_i64m8_tu(vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m8_tu(maskedoff, src, vl); +vint64m8_t test_vfwcvt_x_f_v_i64m8_tu(vint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m8_tu(vd, vs2, vl); } -vuint64m1_t test_vfwcvt_xu_f_v_u64m1_tu(vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m1_tu(maskedoff, src, vl); +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_tu(vuint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m1_tu(vd, vs2, vl); } -vuint64m2_t test_vfwcvt_xu_f_v_u64m2_tu(vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m2_tu(maskedoff, src, vl); +vuint64m2_t test_vfwcvt_xu_f_v_u64m2_tu(vuint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m2_tu(vd, vs2, vl); } -vuint64m4_t test_vfwcvt_xu_f_v_u64m4_tu(vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m4_tu(maskedoff, src, vl); +vuint64m4_t test_vfwcvt_xu_f_v_u64m4_tu(vuint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m4_tu(vd, vs2, vl); } -vuint64m8_t test_vfwcvt_xu_f_v_u64m8_tu(vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m8_tu(maskedoff, src, vl); +vuint64m8_t test_vfwcvt_xu_f_v_u64m8_tu(vuint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m8_tu(vd, vs2, vl); } -vfloat64m1_t test_vfwcvt_f_x_v_f64m1_tu(vfloat64m1_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f64m1_tu(maskedoff, src, vl); +vfloat64m1_t test_vfwcvt_f_x_v_f64m1_tu(vfloat64m1_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f64m1_tu(vd, vs2, vl); } -vfloat64m2_t test_vfwcvt_f_x_v_f64m2_tu(vfloat64m2_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f64m2_tu(maskedoff, src, vl); +vfloat64m2_t test_vfwcvt_f_x_v_f64m2_tu(vfloat64m2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f64m2_tu(vd, vs2, vl); } -vfloat64m4_t test_vfwcvt_f_x_v_f64m4_tu(vfloat64m4_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f64m4_tu(maskedoff, src, vl); +vfloat64m4_t test_vfwcvt_f_x_v_f64m4_tu(vfloat64m4_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f64m4_tu(vd, vs2, vl); } -vfloat64m8_t test_vfwcvt_f_x_v_f64m8_tu(vfloat64m8_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f64m8_tu(maskedoff, src, vl); +vfloat64m8_t test_vfwcvt_f_x_v_f64m8_tu(vfloat64m8_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f64m8_tu(vd, vs2, vl); } -vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_tu(vfloat64m1_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f64m1_tu(maskedoff, src, vl); +vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_tu(vfloat64m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f64m1_tu(vd, vs2, vl); } -vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_tu(vfloat64m2_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f64m2_tu(maskedoff, src, vl); +vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_tu(vfloat64m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f64m2_tu(vd, vs2, vl); } -vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_tu(vfloat64m4_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f64m4_tu(maskedoff, src, vl); +vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_tu(vfloat64m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f64m4_tu(vd, vs2, vl); } -vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_tu(vfloat64m8_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f64m8_tu(maskedoff, src, vl); +vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_tu(vfloat64m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f64m8_tu(vd, vs2, vl); } -vfloat64m1_t test_vfwcvt_f_f_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f64m1_tu(maskedoff, src, vl); +vfloat64m1_t test_vfwcvt_f_f_v_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f64m1_tu(vd, vs2, vl); } -vfloat64m2_t test_vfwcvt_f_f_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f64m2_tu(maskedoff, src, vl); +vfloat64m2_t test_vfwcvt_f_f_v_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f64m2_tu(vd, vs2, vl); } -vfloat64m4_t test_vfwcvt_f_f_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f64m4_tu(maskedoff, src, vl); +vfloat64m4_t test_vfwcvt_f_f_v_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f64m4_tu(vd, vs2, vl); } -vfloat64m8_t test_vfwcvt_f_f_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f64m8_tu(maskedoff, src, vl); +vfloat64m8_t test_vfwcvt_f_f_v_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f64m8_tu(vd, vs2, vl); } -vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vint8mf8_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f16mf4_tum(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f16mf4_tum(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vint8mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f16mf2_tum(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f16mf2_tum(vm, vd, vs2, vl); } -vfloat16m1_t test_vfwcvt_f_x_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vint8mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f16m1_tum(mask, maskedoff, src, vl); +vfloat16m1_t test_vfwcvt_f_x_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f16m1_tum(vm, vd, vs2, vl); } -vfloat16m2_t test_vfwcvt_f_x_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vint8m1_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f16m2_tum(mask, maskedoff, src, vl); +vfloat16m2_t test_vfwcvt_f_x_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f16m2_tum(vm, vd, vs2, vl); } -vfloat16m4_t test_vfwcvt_f_x_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vint8m2_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f16m4_tum(mask, maskedoff, src, vl); +vfloat16m4_t test_vfwcvt_f_x_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f16m4_tum(vm, vd, vs2, vl); } -vfloat16m8_t test_vfwcvt_f_x_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vint8m4_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f16m8_tum(mask, maskedoff, src, vl); +vfloat16m8_t test_vfwcvt_f_x_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f16m8_tum(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vuint8mf8_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f16mf4_tum(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f16mf4_tum(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vuint8mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f16mf2_tum(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f16mf2_tum(vm, vd, vs2, vl); } -vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vuint8mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f16m1_tum(mask, maskedoff, src, vl); +vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f16m1_tum(vm, vd, vs2, vl); } -vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vuint8m1_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f16m2_tum(mask, maskedoff, src, vl); +vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f16m2_tum(vm, vd, vs2, vl); } -vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vuint8m2_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f16m4_tum(mask, maskedoff, src, vl); +vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f16m4_tum(vm, vd, vs2, vl); } -vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vuint8m4_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f16m8_tum(mask, maskedoff, src, vl); +vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f16m8_tum(vm, vd, vs2, vl); } -vint32mf2_t test_vfwcvt_x_f_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32mf2_tum(mask, maskedoff, src, vl); +vint32mf2_t test_vfwcvt_x_f_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32mf2_tum(vm, vd, vs2, vl); } -vint32m1_t test_vfwcvt_x_f_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m1_tum(mask, maskedoff, src, vl); +vint32m1_t test_vfwcvt_x_f_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m1_tum(vm, vd, vs2, vl); } -vint32m2_t test_vfwcvt_x_f_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m2_tum(mask, maskedoff, src, vl); +vint32m2_t test_vfwcvt_x_f_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m2_tum(vm, vd, vs2, vl); } -vint32m4_t test_vfwcvt_x_f_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m4_tum(mask, maskedoff, src, vl); +vint32m4_t test_vfwcvt_x_f_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m4_tum(vm, vd, vs2, vl); } -vint32m8_t test_vfwcvt_x_f_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m8_tum(mask, maskedoff, src, vl); +vint32m8_t test_vfwcvt_x_f_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m8_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32mf2_tum(mask, maskedoff, src, vl); +vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32mf2_tum(vm, vd, vs2, vl); } -vuint32m1_t test_vfwcvt_xu_f_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m1_tum(mask, maskedoff, src, vl); +vuint32m1_t test_vfwcvt_xu_f_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m1_tum(vm, vd, vs2, vl); } -vuint32m2_t test_vfwcvt_xu_f_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m2_tum(mask, maskedoff, src, vl); +vuint32m2_t test_vfwcvt_xu_f_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m2_tum(vm, vd, vs2, vl); } -vuint32m4_t test_vfwcvt_xu_f_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m4_tum(mask, maskedoff, src, vl); +vuint32m4_t test_vfwcvt_xu_f_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m4_tum(vm, vd, vs2, vl); } -vuint32m8_t test_vfwcvt_xu_f_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m8_tum(mask, maskedoff, src, vl); +vuint32m8_t test_vfwcvt_xu_f_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m8_tum(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vint16mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f32mf2_tum(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f32mf2_tum(vm, vd, vs2, vl); } -vfloat32m1_t test_vfwcvt_f_x_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vint16mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f32m1_tum(mask, maskedoff, src, vl); +vfloat32m1_t test_vfwcvt_f_x_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f32m1_tum(vm, vd, vs2, vl); } -vfloat32m2_t test_vfwcvt_f_x_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vint16m1_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f32m2_tum(mask, maskedoff, src, vl); +vfloat32m2_t test_vfwcvt_f_x_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f32m2_tum(vm, vd, vs2, vl); } -vfloat32m4_t test_vfwcvt_f_x_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vint16m2_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f32m4_tum(mask, maskedoff, src, vl); +vfloat32m4_t test_vfwcvt_f_x_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f32m4_tum(vm, vd, vs2, vl); } -vfloat32m8_t test_vfwcvt_f_x_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vint16m4_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f32m8_tum(mask, maskedoff, src, vl); +vfloat32m8_t test_vfwcvt_f_x_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f32m8_tum(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vuint16mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f32mf2_tum(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f32mf2_tum(vm, vd, vs2, vl); } -vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vuint16mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f32m1_tum(mask, maskedoff, src, vl); +vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f32m1_tum(vm, vd, vs2, vl); } -vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vuint16m1_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f32m2_tum(mask, maskedoff, src, vl); +vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f32m2_tum(vm, vd, vs2, vl); } -vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vuint16m2_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f32m4_tum(mask, maskedoff, src, vl); +vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f32m4_tum(vm, vd, vs2, vl); } -vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vuint16m4_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f32m8_tum(mask, maskedoff, src, vl); +vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f32m8_tum(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f32mf2_tum(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f32mf2_tum(vm, vd, vs2, vl); } -vfloat32m1_t test_vfwcvt_f_f_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f32m1_tum(mask, maskedoff, src, vl); +vfloat32m1_t test_vfwcvt_f_f_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f32m1_tum(vm, vd, vs2, vl); } -vfloat32m2_t test_vfwcvt_f_f_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f32m2_tum(mask, maskedoff, src, vl); +vfloat32m2_t test_vfwcvt_f_f_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f32m2_tum(vm, vd, vs2, vl); } -vfloat32m4_t test_vfwcvt_f_f_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f32m4_tum(mask, maskedoff, src, vl); +vfloat32m4_t test_vfwcvt_f_f_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f32m4_tum(vm, vd, vs2, vl); } -vfloat32m8_t test_vfwcvt_f_f_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f32m8_tum(mask, maskedoff, src, vl); +vfloat32m8_t test_vfwcvt_f_f_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f32m8_tum(vm, vd, vs2, vl); } -vint64m1_t test_vfwcvt_x_f_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m1_tum(mask, maskedoff, src, vl); +vint64m1_t test_vfwcvt_x_f_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m1_tum(vm, vd, vs2, vl); } -vint64m2_t test_vfwcvt_x_f_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m2_tum(mask, maskedoff, src, vl); +vint64m2_t test_vfwcvt_x_f_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m2_tum(vm, vd, vs2, vl); } -vint64m4_t test_vfwcvt_x_f_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m4_tum(mask, maskedoff, src, vl); +vint64m4_t test_vfwcvt_x_f_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m4_tum(vm, vd, vs2, vl); } -vint64m8_t test_vfwcvt_x_f_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m8_tum(mask, maskedoff, src, vl); +vint64m8_t test_vfwcvt_x_f_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m8_tum(vm, vd, vs2, vl); } -vuint64m1_t test_vfwcvt_xu_f_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m1_tum(mask, maskedoff, src, vl); +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m1_tum(vm, vd, vs2, vl); } -vuint64m2_t test_vfwcvt_xu_f_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m2_tum(mask, maskedoff, src, vl); +vuint64m2_t test_vfwcvt_xu_f_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m2_tum(vm, vd, vs2, vl); } -vuint64m4_t test_vfwcvt_xu_f_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m4_tum(mask, maskedoff, src, vl); +vuint64m4_t test_vfwcvt_xu_f_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m4_tum(vm, vd, vs2, vl); } -vuint64m8_t test_vfwcvt_xu_f_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m8_tum(mask, maskedoff, src, vl); +vuint64m8_t test_vfwcvt_xu_f_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m8_tum(vm, vd, vs2, vl); } -vfloat64m1_t test_vfwcvt_f_x_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f64m1_tum(mask, maskedoff, src, vl); +vfloat64m1_t test_vfwcvt_f_x_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f64m1_tum(vm, vd, vs2, vl); } -vfloat64m2_t test_vfwcvt_f_x_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f64m2_tum(mask, maskedoff, src, vl); +vfloat64m2_t test_vfwcvt_f_x_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f64m2_tum(vm, vd, vs2, vl); } -vfloat64m4_t test_vfwcvt_f_x_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f64m4_tum(mask, maskedoff, src, vl); +vfloat64m4_t test_vfwcvt_f_x_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f64m4_tum(vm, vd, vs2, vl); } -vfloat64m8_t test_vfwcvt_f_x_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f64m8_tum(mask, maskedoff, src, vl); +vfloat64m8_t test_vfwcvt_f_x_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f64m8_tum(vm, vd, vs2, vl); } -vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f64m1_tum(mask, maskedoff, src, vl); +vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f64m1_tum(vm, vd, vs2, vl); } -vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f64m2_tum(mask, maskedoff, src, vl); +vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f64m2_tum(vm, vd, vs2, vl); } -vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f64m4_tum(mask, maskedoff, src, vl); +vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f64m4_tum(vm, vd, vs2, vl); } -vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f64m8_tum(mask, maskedoff, src, vl); +vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f64m8_tum(vm, vd, vs2, vl); } -vfloat64m1_t test_vfwcvt_f_f_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f64m1_tum(mask, maskedoff, src, vl); +vfloat64m1_t test_vfwcvt_f_f_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f64m1_tum(vm, vd, vs2, vl); } -vfloat64m2_t test_vfwcvt_f_f_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f64m2_tum(mask, maskedoff, src, vl); +vfloat64m2_t test_vfwcvt_f_f_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f64m2_tum(vm, vd, vs2, vl); } -vfloat64m4_t test_vfwcvt_f_f_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f64m4_tum(mask, maskedoff, src, vl); +vfloat64m4_t test_vfwcvt_f_f_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f64m4_tum(vm, vd, vs2, vl); } -vfloat64m8_t test_vfwcvt_f_f_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f64m8_tum(mask, maskedoff, src, vl); +vfloat64m8_t test_vfwcvt_f_f_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f64m8_tum(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vint8mf8_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f16mf4_tumu(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f16mf4_tumu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vint8mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f16mf2_tumu(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f16mf2_tumu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfwcvt_f_x_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vint8mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f16m1_tumu(mask, maskedoff, src, vl); +vfloat16m1_t test_vfwcvt_f_x_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f16m1_tumu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfwcvt_f_x_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vint8m1_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f16m2_tumu(mask, maskedoff, src, vl); +vfloat16m2_t test_vfwcvt_f_x_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f16m2_tumu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfwcvt_f_x_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vint8m2_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f16m4_tumu(mask, maskedoff, src, vl); +vfloat16m4_t test_vfwcvt_f_x_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f16m4_tumu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfwcvt_f_x_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vint8m4_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f16m8_tumu(mask, maskedoff, src, vl); +vfloat16m8_t test_vfwcvt_f_x_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f16m8_tumu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vuint8mf8_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f16mf4_tumu(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f16mf4_tumu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vuint8mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f16mf2_tumu(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f16mf2_tumu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vuint8mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f16m1_tumu(mask, maskedoff, src, vl); +vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f16m1_tumu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vuint8m1_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f16m2_tumu(mask, maskedoff, src, vl); +vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f16m2_tumu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vuint8m2_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f16m4_tumu(mask, maskedoff, src, vl); +vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f16m4_tumu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vuint8m4_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f16m8_tumu(mask, maskedoff, src, vl); +vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f16m8_tumu(vm, vd, vs2, vl); } -vint32mf2_t test_vfwcvt_x_f_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32mf2_tumu(mask, maskedoff, src, vl); +vint32mf2_t test_vfwcvt_x_f_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32mf2_tumu(vm, vd, vs2, vl); } -vint32m1_t test_vfwcvt_x_f_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m1_tumu(mask, maskedoff, src, vl); +vint32m1_t test_vfwcvt_x_f_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m1_tumu(vm, vd, vs2, vl); } -vint32m2_t test_vfwcvt_x_f_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m2_tumu(mask, maskedoff, src, vl); +vint32m2_t test_vfwcvt_x_f_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m2_tumu(vm, vd, vs2, vl); } -vint32m4_t test_vfwcvt_x_f_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m4_tumu(mask, maskedoff, src, vl); +vint32m4_t test_vfwcvt_x_f_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m4_tumu(vm, vd, vs2, vl); } -vint32m8_t test_vfwcvt_x_f_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m8_tumu(mask, maskedoff, src, vl); +vint32m8_t test_vfwcvt_x_f_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m8_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32mf2_tumu(mask, maskedoff, src, vl); +vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32mf2_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_vfwcvt_xu_f_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m1_tumu(mask, maskedoff, src, vl); +vuint32m1_t test_vfwcvt_xu_f_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m1_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_vfwcvt_xu_f_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m2_tumu(mask, maskedoff, src, vl); +vuint32m2_t test_vfwcvt_xu_f_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m2_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_vfwcvt_xu_f_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m4_tumu(mask, maskedoff, src, vl); +vuint32m4_t test_vfwcvt_xu_f_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m4_tumu(vm, vd, vs2, vl); } -vuint32m8_t test_vfwcvt_xu_f_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m8_tumu(mask, maskedoff, src, vl); +vuint32m8_t test_vfwcvt_xu_f_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m8_tumu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vint16mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f32mf2_tumu(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f32mf2_tumu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfwcvt_f_x_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vint16mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f32m1_tumu(mask, maskedoff, src, vl); +vfloat32m1_t test_vfwcvt_f_x_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f32m1_tumu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfwcvt_f_x_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vint16m1_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f32m2_tumu(mask, maskedoff, src, vl); +vfloat32m2_t test_vfwcvt_f_x_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f32m2_tumu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfwcvt_f_x_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vint16m2_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f32m4_tumu(mask, maskedoff, src, vl); +vfloat32m4_t test_vfwcvt_f_x_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f32m4_tumu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfwcvt_f_x_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vint16m4_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f32m8_tumu(mask, maskedoff, src, vl); +vfloat32m8_t test_vfwcvt_f_x_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f32m8_tumu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vuint16mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f32mf2_tumu(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f32mf2_tumu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vuint16mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f32m1_tumu(mask, maskedoff, src, vl); +vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f32m1_tumu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vuint16m1_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f32m2_tumu(mask, maskedoff, src, vl); +vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f32m2_tumu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vuint16m2_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f32m4_tumu(mask, maskedoff, src, vl); +vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f32m4_tumu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vuint16m4_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f32m8_tumu(mask, maskedoff, src, vl); +vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f32m8_tumu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f32mf2_tumu(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f32mf2_tumu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfwcvt_f_f_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f32m1_tumu(mask, maskedoff, src, vl); +vfloat32m1_t test_vfwcvt_f_f_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f32m1_tumu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfwcvt_f_f_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f32m2_tumu(mask, maskedoff, src, vl); +vfloat32m2_t test_vfwcvt_f_f_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f32m2_tumu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfwcvt_f_f_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f32m4_tumu(mask, maskedoff, src, vl); +vfloat32m4_t test_vfwcvt_f_f_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f32m4_tumu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfwcvt_f_f_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f32m8_tumu(mask, maskedoff, src, vl); +vfloat32m8_t test_vfwcvt_f_f_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f32m8_tumu(vm, vd, vs2, vl); } -vint64m1_t test_vfwcvt_x_f_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m1_tumu(mask, maskedoff, src, vl); +vint64m1_t test_vfwcvt_x_f_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m1_tumu(vm, vd, vs2, vl); } -vint64m2_t test_vfwcvt_x_f_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m2_tumu(mask, maskedoff, src, vl); +vint64m2_t test_vfwcvt_x_f_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m2_tumu(vm, vd, vs2, vl); } -vint64m4_t test_vfwcvt_x_f_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m4_tumu(mask, maskedoff, src, vl); +vint64m4_t test_vfwcvt_x_f_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m4_tumu(vm, vd, vs2, vl); } -vint64m8_t test_vfwcvt_x_f_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m8_tumu(mask, maskedoff, src, vl); +vint64m8_t test_vfwcvt_x_f_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m8_tumu(vm, vd, vs2, vl); } -vuint64m1_t test_vfwcvt_xu_f_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m1_tumu(mask, maskedoff, src, vl); +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m1_tumu(vm, vd, vs2, vl); } -vuint64m2_t test_vfwcvt_xu_f_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m2_tumu(mask, maskedoff, src, vl); +vuint64m2_t test_vfwcvt_xu_f_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m2_tumu(vm, vd, vs2, vl); } -vuint64m4_t test_vfwcvt_xu_f_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m4_tumu(mask, maskedoff, src, vl); +vuint64m4_t test_vfwcvt_xu_f_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m4_tumu(vm, vd, vs2, vl); } -vuint64m8_t test_vfwcvt_xu_f_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m8_tumu(mask, maskedoff, src, vl); +vuint64m8_t test_vfwcvt_xu_f_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m8_tumu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfwcvt_f_x_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f64m1_tumu(mask, maskedoff, src, vl); +vfloat64m1_t test_vfwcvt_f_x_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f64m1_tumu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfwcvt_f_x_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f64m2_tumu(mask, maskedoff, src, vl); +vfloat64m2_t test_vfwcvt_f_x_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f64m2_tumu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfwcvt_f_x_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f64m4_tumu(mask, maskedoff, src, vl); +vfloat64m4_t test_vfwcvt_f_x_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f64m4_tumu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfwcvt_f_x_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f64m8_tumu(mask, maskedoff, src, vl); +vfloat64m8_t test_vfwcvt_f_x_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f64m8_tumu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f64m1_tumu(mask, maskedoff, src, vl); +vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f64m1_tumu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f64m2_tumu(mask, maskedoff, src, vl); +vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f64m2_tumu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f64m4_tumu(mask, maskedoff, src, vl); +vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f64m4_tumu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f64m8_tumu(mask, maskedoff, src, vl); +vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f64m8_tumu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfwcvt_f_f_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f64m1_tumu(mask, maskedoff, src, vl); +vfloat64m1_t test_vfwcvt_f_f_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f64m1_tumu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfwcvt_f_f_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f64m2_tumu(mask, maskedoff, src, vl); +vfloat64m2_t test_vfwcvt_f_f_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f64m2_tumu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfwcvt_f_f_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f64m4_tumu(mask, maskedoff, src, vl); +vfloat64m4_t test_vfwcvt_f_f_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f64m4_tumu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfwcvt_f_f_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f64m8_tumu(mask, maskedoff, src, vl); +vfloat64m8_t test_vfwcvt_f_f_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f64m8_tumu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vint8mf8_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f16mf4_mu(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f16mf4_mu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vint8mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f16mf2_mu(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f16mf2_mu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfwcvt_f_x_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vint8mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f16m1_mu(mask, maskedoff, src, vl); +vfloat16m1_t test_vfwcvt_f_x_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f16m1_mu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfwcvt_f_x_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vint8m1_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f16m2_mu(mask, maskedoff, src, vl); +vfloat16m2_t test_vfwcvt_f_x_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f16m2_mu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfwcvt_f_x_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vint8m2_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f16m4_mu(mask, maskedoff, src, vl); +vfloat16m4_t test_vfwcvt_f_x_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f16m4_mu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfwcvt_f_x_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vint8m4_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f16m8_mu(mask, maskedoff, src, vl); +vfloat16m8_t test_vfwcvt_f_x_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f16m8_mu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vuint8mf8_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f16mf4_mu(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f16mf4_mu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vuint8mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f16mf2_mu(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f16mf2_mu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vuint8mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f16m1_mu(mask, maskedoff, src, vl); +vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f16m1_mu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vuint8m1_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f16m2_mu(mask, maskedoff, src, vl); +vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f16m2_mu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vuint8m2_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f16m4_mu(mask, maskedoff, src, vl); +vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f16m4_mu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vuint8m4_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f16m8_mu(mask, maskedoff, src, vl); +vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f16m8_mu(vm, vd, vs2, vl); } -vint32mf2_t test_vfwcvt_x_f_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32mf2_mu(mask, maskedoff, src, vl); +vint32mf2_t test_vfwcvt_x_f_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32mf2_mu(vm, vd, vs2, vl); } -vint32m1_t test_vfwcvt_x_f_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m1_mu(mask, maskedoff, src, vl); +vint32m1_t test_vfwcvt_x_f_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m1_mu(vm, vd, vs2, vl); } -vint32m2_t test_vfwcvt_x_f_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m2_mu(mask, maskedoff, src, vl); +vint32m2_t test_vfwcvt_x_f_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m2_mu(vm, vd, vs2, vl); } -vint32m4_t test_vfwcvt_x_f_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m4_mu(mask, maskedoff, src, vl); +vint32m4_t test_vfwcvt_x_f_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m4_mu(vm, vd, vs2, vl); } -vint32m8_t test_vfwcvt_x_f_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m8_mu(mask, maskedoff, src, vl); +vint32m8_t test_vfwcvt_x_f_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m8_mu(vm, vd, vs2, vl); } -vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32mf2_mu(mask, maskedoff, src, vl); +vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32mf2_mu(vm, vd, vs2, vl); } -vuint32m1_t test_vfwcvt_xu_f_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m1_mu(mask, maskedoff, src, vl); +vuint32m1_t test_vfwcvt_xu_f_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m1_mu(vm, vd, vs2, vl); } -vuint32m2_t test_vfwcvt_xu_f_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m2_mu(mask, maskedoff, src, vl); +vuint32m2_t test_vfwcvt_xu_f_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m2_mu(vm, vd, vs2, vl); } -vuint32m4_t test_vfwcvt_xu_f_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m4_mu(mask, maskedoff, src, vl); +vuint32m4_t test_vfwcvt_xu_f_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m4_mu(vm, vd, vs2, vl); } -vuint32m8_t test_vfwcvt_xu_f_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m8_mu(mask, maskedoff, src, vl); +vuint32m8_t test_vfwcvt_xu_f_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m8_mu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vint16mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f32mf2_mu(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f32mf2_mu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfwcvt_f_x_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vint16mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f32m1_mu(mask, maskedoff, src, vl); +vfloat32m1_t test_vfwcvt_f_x_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f32m1_mu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfwcvt_f_x_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vint16m1_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f32m2_mu(mask, maskedoff, src, vl); +vfloat32m2_t test_vfwcvt_f_x_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f32m2_mu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfwcvt_f_x_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vint16m2_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f32m4_mu(mask, maskedoff, src, vl); +vfloat32m4_t test_vfwcvt_f_x_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f32m4_mu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfwcvt_f_x_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vint16m4_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f32m8_mu(mask, maskedoff, src, vl); +vfloat32m8_t test_vfwcvt_f_x_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f32m8_mu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vuint16mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f32mf2_mu(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f32mf2_mu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vuint16mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f32m1_mu(mask, maskedoff, src, vl); +vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f32m1_mu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vuint16m1_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f32m2_mu(mask, maskedoff, src, vl); +vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f32m2_mu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vuint16m2_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f32m4_mu(mask, maskedoff, src, vl); +vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f32m4_mu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vuint16m4_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f32m8_mu(mask, maskedoff, src, vl); +vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f32m8_mu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f32mf2_mu(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f32mf2_mu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfwcvt_f_f_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f32m1_mu(mask, maskedoff, src, vl); +vfloat32m1_t test_vfwcvt_f_f_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f32m1_mu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfwcvt_f_f_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f32m2_mu(mask, maskedoff, src, vl); +vfloat32m2_t test_vfwcvt_f_f_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f32m2_mu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfwcvt_f_f_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f32m4_mu(mask, maskedoff, src, vl); +vfloat32m4_t test_vfwcvt_f_f_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f32m4_mu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfwcvt_f_f_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f32m8_mu(mask, maskedoff, src, vl); +vfloat32m8_t test_vfwcvt_f_f_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f32m8_mu(vm, vd, vs2, vl); } -vint64m1_t test_vfwcvt_x_f_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m1_mu(mask, maskedoff, src, vl); +vint64m1_t test_vfwcvt_x_f_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m1_mu(vm, vd, vs2, vl); } -vint64m2_t test_vfwcvt_x_f_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m2_mu(mask, maskedoff, src, vl); +vint64m2_t test_vfwcvt_x_f_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m2_mu(vm, vd, vs2, vl); } -vint64m4_t test_vfwcvt_x_f_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m4_mu(mask, maskedoff, src, vl); +vint64m4_t test_vfwcvt_x_f_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m4_mu(vm, vd, vs2, vl); } -vint64m8_t test_vfwcvt_x_f_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m8_mu(mask, maskedoff, src, vl); +vint64m8_t test_vfwcvt_x_f_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m8_mu(vm, vd, vs2, vl); } -vuint64m1_t test_vfwcvt_xu_f_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m1_mu(mask, maskedoff, src, vl); +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m1_mu(vm, vd, vs2, vl); } -vuint64m2_t test_vfwcvt_xu_f_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m2_mu(mask, maskedoff, src, vl); +vuint64m2_t test_vfwcvt_xu_f_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m2_mu(vm, vd, vs2, vl); } -vuint64m4_t test_vfwcvt_xu_f_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m4_mu(mask, maskedoff, src, vl); +vuint64m4_t test_vfwcvt_xu_f_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m4_mu(vm, vd, vs2, vl); } -vuint64m8_t test_vfwcvt_xu_f_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m8_mu(mask, maskedoff, src, vl); +vuint64m8_t test_vfwcvt_xu_f_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m8_mu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfwcvt_f_x_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f64m1_mu(mask, maskedoff, src, vl); +vfloat64m1_t test_vfwcvt_f_x_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f64m1_mu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfwcvt_f_x_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f64m2_mu(mask, maskedoff, src, vl); +vfloat64m2_t test_vfwcvt_f_x_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f64m2_mu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfwcvt_f_x_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f64m4_mu(mask, maskedoff, src, vl); +vfloat64m4_t test_vfwcvt_f_x_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f64m4_mu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfwcvt_f_x_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f64m8_mu(mask, maskedoff, src, vl); +vfloat64m8_t test_vfwcvt_f_x_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_f64m8_mu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f64m1_mu(mask, maskedoff, src, vl); +vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f64m1_mu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f64m2_mu(mask, maskedoff, src, vl); +vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f64m2_mu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f64m4_mu(mask, maskedoff, src, vl); +vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f64m4_mu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f64m8_mu(mask, maskedoff, src, vl); +vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f64m8_mu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfwcvt_f_f_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f64m1_mu(mask, maskedoff, src, vl); +vfloat64m1_t test_vfwcvt_f_f_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f64m1_mu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfwcvt_f_f_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f64m2_mu(mask, maskedoff, src, vl); +vfloat64m2_t test_vfwcvt_f_f_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f64m2_mu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfwcvt_f_f_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f64m4_mu(mask, maskedoff, src, vl); +vfloat64m4_t test_vfwcvt_f_f_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f64m4_mu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfwcvt_f_f_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f64m8_mu(mask, maskedoff, src, vl); +vfloat64m8_t test_vfwcvt_f_f_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_f64m8_mu(vm, vd, vs2, vl); } -vint32mf2_t test_vfwcvt_x_f_v_i32mf2_rm_tu(vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32mf2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint32mf2_t test_vfwcvt_x_f_v_i32mf2_rm_tu(vint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfwcvt_x_f_v_i32m1_rm_tu(vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m1_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m1_t test_vfwcvt_x_f_v_i32m1_rm_tu(vint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfwcvt_x_f_v_i32m2_rm_tu(vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m2_t test_vfwcvt_x_f_v_i32m2_rm_tu(vint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfwcvt_x_f_v_i32m4_rm_tu(vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m4_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m4_t test_vfwcvt_x_f_v_i32m4_rm_tu(vint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m8_t test_vfwcvt_x_f_v_i32m8_rm_tu(vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m8_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m8_t test_vfwcvt_x_f_v_i32m8_rm_tu(vint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_rm_tu(vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32mf2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_rm_tu(vuint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfwcvt_xu_f_v_u32m1_rm_tu(vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m1_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m1_t test_vfwcvt_xu_f_v_u32m1_rm_tu(vuint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfwcvt_xu_f_v_u32m2_rm_tu(vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m2_t test_vfwcvt_xu_f_v_u32m2_rm_tu(vuint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfwcvt_xu_f_v_u32m4_rm_tu(vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m4_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m4_t test_vfwcvt_xu_f_v_u32m4_rm_tu(vuint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m8_t test_vfwcvt_xu_f_v_u32m8_rm_tu(vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m8_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m8_t test_vfwcvt_xu_f_v_u32m8_rm_tu(vuint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m1_t test_vfwcvt_x_f_v_i64m1_rm_tu(vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m1_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m1_t test_vfwcvt_x_f_v_i64m1_rm_tu(vint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m2_t test_vfwcvt_x_f_v_i64m2_rm_tu(vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m2_t test_vfwcvt_x_f_v_i64m2_rm_tu(vint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m4_t test_vfwcvt_x_f_v_i64m4_rm_tu(vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m4_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m4_t test_vfwcvt_x_f_v_i64m4_rm_tu(vint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m8_t test_vfwcvt_x_f_v_i64m8_rm_tu(vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m8_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m8_t test_vfwcvt_x_f_v_i64m8_rm_tu(vint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m1_t test_vfwcvt_xu_f_v_u64m1_rm_tu(vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m1_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_rm_tu(vuint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m2_t test_vfwcvt_xu_f_v_u64m2_rm_tu(vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m2_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m2_t test_vfwcvt_xu_f_v_u64m2_rm_tu(vuint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m4_t test_vfwcvt_xu_f_v_u64m4_rm_tu(vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m4_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m4_t test_vfwcvt_xu_f_v_u64m4_rm_tu(vuint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m8_t test_vfwcvt_xu_f_v_u64m8_rm_tu(vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m8_rm_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m8_t test_vfwcvt_xu_f_v_u64m8_rm_tu(vuint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint32mf2_t test_vfwcvt_x_f_v_i32mf2_rm_tum(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32mf2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32mf2_t test_vfwcvt_x_f_v_i32mf2_rm_tum(vbool64_t vm, vint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfwcvt_x_f_v_i32m1_rm_tum(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m1_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m1_t test_vfwcvt_x_f_v_i32m1_rm_tum(vbool32_t vm, vint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfwcvt_x_f_v_i32m2_rm_tum(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m2_t test_vfwcvt_x_f_v_i32m2_rm_tum(vbool16_t vm, vint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfwcvt_x_f_v_i32m4_rm_tum(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m4_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m4_t test_vfwcvt_x_f_v_i32m4_rm_tum(vbool8_t vm, vint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m8_t test_vfwcvt_x_f_v_i32m8_rm_tum(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m8_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m8_t test_vfwcvt_x_f_v_i32m8_rm_tum(vbool4_t vm, vint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_rm_tum(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32mf2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_rm_tum(vbool64_t vm, vuint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfwcvt_xu_f_v_u32m1_rm_tum(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m1_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m1_t test_vfwcvt_xu_f_v_u32m1_rm_tum(vbool32_t vm, vuint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfwcvt_xu_f_v_u32m2_rm_tum(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m2_t test_vfwcvt_xu_f_v_u32m2_rm_tum(vbool16_t vm, vuint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfwcvt_xu_f_v_u32m4_rm_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m4_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m4_t test_vfwcvt_xu_f_v_u32m4_rm_tum(vbool8_t vm, vuint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m8_t test_vfwcvt_xu_f_v_u32m8_rm_tum(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m8_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m8_t test_vfwcvt_xu_f_v_u32m8_rm_tum(vbool4_t vm, vuint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m1_t test_vfwcvt_x_f_v_i64m1_rm_tum(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m1_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m1_t test_vfwcvt_x_f_v_i64m1_rm_tum(vbool64_t vm, vint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m2_t test_vfwcvt_x_f_v_i64m2_rm_tum(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m2_t test_vfwcvt_x_f_v_i64m2_rm_tum(vbool32_t vm, vint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m4_t test_vfwcvt_x_f_v_i64m4_rm_tum(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m4_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m4_t test_vfwcvt_x_f_v_i64m4_rm_tum(vbool16_t vm, vint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m8_t test_vfwcvt_x_f_v_i64m8_rm_tum(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m8_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m8_t test_vfwcvt_x_f_v_i64m8_rm_tum(vbool8_t vm, vint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m1_t test_vfwcvt_xu_f_v_u64m1_rm_tum(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m1_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_rm_tum(vbool64_t vm, vuint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m2_t test_vfwcvt_xu_f_v_u64m2_rm_tum(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m2_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m2_t test_vfwcvt_xu_f_v_u64m2_rm_tum(vbool32_t vm, vuint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m4_t test_vfwcvt_xu_f_v_u64m4_rm_tum(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m4_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m4_t test_vfwcvt_xu_f_v_u64m4_rm_tum(vbool16_t vm, vuint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m8_t test_vfwcvt_xu_f_v_u64m8_rm_tum(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m8_rm_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m8_t test_vfwcvt_xu_f_v_u64m8_rm_tum(vbool8_t vm, vuint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32mf2_t test_vfwcvt_x_f_v_i32mf2_rm_tumu(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32mf2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32mf2_t test_vfwcvt_x_f_v_i32mf2_rm_tumu(vbool64_t vm, vint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfwcvt_x_f_v_i32m1_rm_tumu(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m1_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m1_t test_vfwcvt_x_f_v_i32m1_rm_tumu(vbool32_t vm, vint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfwcvt_x_f_v_i32m2_rm_tumu(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m2_t test_vfwcvt_x_f_v_i32m2_rm_tumu(vbool16_t vm, vint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfwcvt_x_f_v_i32m4_rm_tumu(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m4_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m4_t test_vfwcvt_x_f_v_i32m4_rm_tumu(vbool8_t vm, vint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m8_t test_vfwcvt_x_f_v_i32m8_rm_tumu(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m8_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m8_t test_vfwcvt_x_f_v_i32m8_rm_tumu(vbool4_t vm, vint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_rm_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32mf2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_rm_tumu(vbool64_t vm, vuint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfwcvt_xu_f_v_u32m1_rm_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m1_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m1_t test_vfwcvt_xu_f_v_u32m1_rm_tumu(vbool32_t vm, vuint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfwcvt_xu_f_v_u32m2_rm_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m2_t test_vfwcvt_xu_f_v_u32m2_rm_tumu(vbool16_t vm, vuint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfwcvt_xu_f_v_u32m4_rm_tumu(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m4_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m4_t test_vfwcvt_xu_f_v_u32m4_rm_tumu(vbool8_t vm, vuint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m8_t test_vfwcvt_xu_f_v_u32m8_rm_tumu(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m8_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m8_t test_vfwcvt_xu_f_v_u32m8_rm_tumu(vbool4_t vm, vuint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m1_t test_vfwcvt_x_f_v_i64m1_rm_tumu(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m1_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m1_t test_vfwcvt_x_f_v_i64m1_rm_tumu(vbool64_t vm, vint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m2_t test_vfwcvt_x_f_v_i64m2_rm_tumu(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m2_t test_vfwcvt_x_f_v_i64m2_rm_tumu(vbool32_t vm, vint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m4_t test_vfwcvt_x_f_v_i64m4_rm_tumu(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m4_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m4_t test_vfwcvt_x_f_v_i64m4_rm_tumu(vbool16_t vm, vint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m8_t test_vfwcvt_x_f_v_i64m8_rm_tumu(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m8_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m8_t test_vfwcvt_x_f_v_i64m8_rm_tumu(vbool8_t vm, vint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m1_t test_vfwcvt_xu_f_v_u64m1_rm_tumu(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m1_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_rm_tumu(vbool64_t vm, vuint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m2_t test_vfwcvt_xu_f_v_u64m2_rm_tumu(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m2_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m2_t test_vfwcvt_xu_f_v_u64m2_rm_tumu(vbool32_t vm, vuint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m4_t test_vfwcvt_xu_f_v_u64m4_rm_tumu(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m4_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m4_t test_vfwcvt_xu_f_v_u64m4_rm_tumu(vbool16_t vm, vuint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m8_t test_vfwcvt_xu_f_v_u64m8_rm_tumu(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m8_rm_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m8_t test_vfwcvt_xu_f_v_u64m8_rm_tumu(vbool8_t vm, vuint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32mf2_t test_vfwcvt_x_f_v_i32mf2_rm_mu(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32mf2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32mf2_t test_vfwcvt_x_f_v_i32mf2_rm_mu(vbool64_t vm, vint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfwcvt_x_f_v_i32m1_rm_mu(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m1_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m1_t test_vfwcvt_x_f_v_i32m1_rm_mu(vbool32_t vm, vint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfwcvt_x_f_v_i32m2_rm_mu(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m2_t test_vfwcvt_x_f_v_i32m2_rm_mu(vbool16_t vm, vint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfwcvt_x_f_v_i32m4_rm_mu(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m4_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m4_t test_vfwcvt_x_f_v_i32m4_rm_mu(vbool8_t vm, vint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m8_t test_vfwcvt_x_f_v_i32m8_rm_mu(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m8_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m8_t test_vfwcvt_x_f_v_i32m8_rm_mu(vbool4_t vm, vint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_rm_mu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32mf2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_rm_mu(vbool64_t vm, vuint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfwcvt_xu_f_v_u32m1_rm_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m1_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m1_t test_vfwcvt_xu_f_v_u32m1_rm_mu(vbool32_t vm, vuint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfwcvt_xu_f_v_u32m2_rm_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m2_t test_vfwcvt_xu_f_v_u32m2_rm_mu(vbool16_t vm, vuint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfwcvt_xu_f_v_u32m4_rm_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m4_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m4_t test_vfwcvt_xu_f_v_u32m4_rm_mu(vbool8_t vm, vuint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m8_t test_vfwcvt_xu_f_v_u32m8_rm_mu(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m8_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m8_t test_vfwcvt_xu_f_v_u32m8_rm_mu(vbool4_t vm, vuint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m1_t test_vfwcvt_x_f_v_i64m1_rm_mu(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m1_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m1_t test_vfwcvt_x_f_v_i64m1_rm_mu(vbool64_t vm, vint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m2_t test_vfwcvt_x_f_v_i64m2_rm_mu(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m2_t test_vfwcvt_x_f_v_i64m2_rm_mu(vbool32_t vm, vint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m4_t test_vfwcvt_x_f_v_i64m4_rm_mu(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m4_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m4_t test_vfwcvt_x_f_v_i64m4_rm_mu(vbool16_t vm, vint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m8_t test_vfwcvt_x_f_v_i64m8_rm_mu(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m8_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m8_t test_vfwcvt_x_f_v_i64m8_rm_mu(vbool8_t vm, vint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m1_t test_vfwcvt_xu_f_v_u64m1_rm_mu(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m1_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_rm_mu(vbool64_t vm, vuint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m2_t test_vfwcvt_xu_f_v_u64m2_rm_mu(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m2_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m2_t test_vfwcvt_xu_f_v_u64m2_rm_mu(vbool32_t vm, vuint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m4_t test_vfwcvt_xu_f_v_u64m4_rm_mu(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m4_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m4_t test_vfwcvt_xu_f_v_u64m4_rm_mu(vbool16_t vm, vuint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m8_t test_vfwcvt_xu_f_v_u64m8_rm_mu(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m8_rm_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m8_t test_vfwcvt_xu_f_v_u64m8_rm_mu(vbool8_t vm, vuint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfwcvt\.[ivxfswum.]+\s+} 300 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfwcvt_rtz.c b/auto-generated/policy_funcs/gnu-api-tests/vfwcvt_rtz.c index 81b1a5f06..8e858fb57 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfwcvt_rtz.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfwcvt_rtz.c @@ -6,292 +6,292 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_tu(vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32mf2_tu(maskedoff, src, vl); +vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_tu(vint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32mf2_tu(vd, vs2, vl); } -vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_tu(vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m1_tu(maskedoff, src, vl); +vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_tu(vint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m1_tu(vd, vs2, vl); } -vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_tu(vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m2_tu(maskedoff, src, vl); +vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_tu(vint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m2_tu(vd, vs2, vl); } -vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_tu(vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m4_tu(maskedoff, src, vl); +vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_tu(vint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m4_tu(vd, vs2, vl); } -vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_tu(vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m8_tu(maskedoff, src, vl); +vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_tu(vint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m8_tu(vd, vs2, vl); } -vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_tu(vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32mf2_tu(maskedoff, src, vl); +vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_tu(vuint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32mf2_tu(vd, vs2, vl); } -vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_tu(vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m1_tu(maskedoff, src, vl); +vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_tu(vuint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_tu(vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m2_tu(maskedoff, src, vl); +vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_tu(vuint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_tu(vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m4_tu(maskedoff, src, vl); +vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_tu(vuint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_tu(vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m8_tu(maskedoff, src, vl); +vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_tu(vuint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m8_tu(vd, vs2, vl); } -vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tu(vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m1_tu(maskedoff, src, vl); +vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tu(vint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m1_tu(vd, vs2, vl); } -vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_tu(vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m2_tu(maskedoff, src, vl); +vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_tu(vint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m2_tu(vd, vs2, vl); } -vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_tu(vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m4_tu(maskedoff, src, vl); +vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_tu(vint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m4_tu(vd, vs2, vl); } -vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_tu(vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m8_tu(maskedoff, src, vl); +vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_tu(vint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m8_tu(vd, vs2, vl); } -vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tu(vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m1_tu(maskedoff, src, vl); +vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tu(vuint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m1_tu(vd, vs2, vl); } -vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_tu(vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m2_tu(maskedoff, src, vl); +vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_tu(vuint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m2_tu(vd, vs2, vl); } -vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_tu(vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m4_tu(maskedoff, src, vl); +vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_tu(vuint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m4_tu(vd, vs2, vl); } -vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_tu(vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m8_tu(maskedoff, src, vl); +vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_tu(vuint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m8_tu(vd, vs2, vl); } -vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32mf2_tum(mask, maskedoff, src, vl); +vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32mf2_tum(vm, vd, vs2, vl); } -vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m1_tum(mask, maskedoff, src, vl); +vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m1_tum(vm, vd, vs2, vl); } -vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m2_tum(mask, maskedoff, src, vl); +vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m2_tum(vm, vd, vs2, vl); } -vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m4_tum(mask, maskedoff, src, vl); +vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m4_tum(vm, vd, vs2, vl); } -vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m8_tum(mask, maskedoff, src, vl); +vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m8_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32mf2_tum(mask, maskedoff, src, vl); +vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32mf2_tum(vm, vd, vs2, vl); } -vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m1_tum(mask, maskedoff, src, vl); +vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m1_tum(vm, vd, vs2, vl); } -vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m2_tum(mask, maskedoff, src, vl); +vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m2_tum(vm, vd, vs2, vl); } -vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m4_tum(mask, maskedoff, src, vl); +vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m4_tum(vm, vd, vs2, vl); } -vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m8_tum(mask, maskedoff, src, vl); +vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m8_tum(vm, vd, vs2, vl); } -vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m1_tum(mask, maskedoff, src, vl); +vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m1_tum(vm, vd, vs2, vl); } -vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m2_tum(mask, maskedoff, src, vl); +vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m2_tum(vm, vd, vs2, vl); } -vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m4_tum(mask, maskedoff, src, vl); +vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m4_tum(vm, vd, vs2, vl); } -vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m8_tum(mask, maskedoff, src, vl); +vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m8_tum(vm, vd, vs2, vl); } -vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m1_tum(mask, maskedoff, src, vl); +vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m1_tum(vm, vd, vs2, vl); } -vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m2_tum(mask, maskedoff, src, vl); +vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m2_tum(vm, vd, vs2, vl); } -vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m4_tum(mask, maskedoff, src, vl); +vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m4_tum(vm, vd, vs2, vl); } -vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m8_tum(mask, maskedoff, src, vl); +vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m8_tum(vm, vd, vs2, vl); } -vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32mf2_tumu(mask, maskedoff, src, vl); +vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32mf2_tumu(vm, vd, vs2, vl); } -vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m1_tumu(mask, maskedoff, src, vl); +vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m1_tumu(vm, vd, vs2, vl); } -vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m2_tumu(mask, maskedoff, src, vl); +vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m2_tumu(vm, vd, vs2, vl); } -vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m4_tumu(mask, maskedoff, src, vl); +vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m4_tumu(vm, vd, vs2, vl); } -vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m8_tumu(mask, maskedoff, src, vl); +vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m8_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32mf2_tumu(mask, maskedoff, src, vl); +vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32mf2_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m1_tumu(mask, maskedoff, src, vl); +vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m1_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m2_tumu(mask, maskedoff, src, vl); +vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m2_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m4_tumu(mask, maskedoff, src, vl); +vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m4_tumu(vm, vd, vs2, vl); } -vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m8_tumu(mask, maskedoff, src, vl); +vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m8_tumu(vm, vd, vs2, vl); } -vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m1_tumu(mask, maskedoff, src, vl); +vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m1_tumu(vm, vd, vs2, vl); } -vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m2_tumu(mask, maskedoff, src, vl); +vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m2_tumu(vm, vd, vs2, vl); } -vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m4_tumu(mask, maskedoff, src, vl); +vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m4_tumu(vm, vd, vs2, vl); } -vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m8_tumu(mask, maskedoff, src, vl); +vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m8_tumu(vm, vd, vs2, vl); } -vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m1_tumu(mask, maskedoff, src, vl); +vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m1_tumu(vm, vd, vs2, vl); } -vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m2_tumu(mask, maskedoff, src, vl); +vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m2_tumu(vm, vd, vs2, vl); } -vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m4_tumu(mask, maskedoff, src, vl); +vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m4_tumu(vm, vd, vs2, vl); } -vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m8_tumu(mask, maskedoff, src, vl); +vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m8_tumu(vm, vd, vs2, vl); } -vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32mf2_mu(mask, maskedoff, src, vl); +vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32mf2_mu(vm, vd, vs2, vl); } -vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m1_mu(mask, maskedoff, src, vl); +vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m1_mu(vm, vd, vs2, vl); } -vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m2_mu(mask, maskedoff, src, vl); +vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m2_mu(vm, vd, vs2, vl); } -vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m4_mu(mask, maskedoff, src, vl); +vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m4_mu(vm, vd, vs2, vl); } -vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m8_mu(mask, maskedoff, src, vl); +vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m8_mu(vm, vd, vs2, vl); } -vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32mf2_mu(mask, maskedoff, src, vl); +vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32mf2_mu(vm, vd, vs2, vl); } -vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m1_mu(mask, maskedoff, src, vl); +vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m1_mu(vm, vd, vs2, vl); } -vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m2_mu(mask, maskedoff, src, vl); +vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m2_mu(vm, vd, vs2, vl); } -vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m4_mu(mask, maskedoff, src, vl); +vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m4_mu(vm, vd, vs2, vl); } -vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m8_mu(mask, maskedoff, src, vl); +vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m8_mu(vm, vd, vs2, vl); } -vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m1_mu(mask, maskedoff, src, vl); +vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m1_mu(vm, vd, vs2, vl); } -vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m2_mu(mask, maskedoff, src, vl); +vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m2_mu(vm, vd, vs2, vl); } -vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m4_mu(mask, maskedoff, src, vl); +vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m4_mu(vm, vd, vs2, vl); } -vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m8_mu(mask, maskedoff, src, vl); +vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m8_mu(vm, vd, vs2, vl); } -vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m1_mu(mask, maskedoff, src, vl); +vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m1_mu(vm, vd, vs2, vl); } -vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m2_mu(mask, maskedoff, src, vl); +vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m2_mu(vm, vd, vs2, vl); } -vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m4_mu(mask, maskedoff, src, vl); +vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m4_mu(vm, vd, vs2, vl); } -vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m8_mu(mask, maskedoff, src, vl); +vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m8_mu(vm, vd, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfwcvt\.rtz[ivxfswum.]*\s+} 72 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfwmacc.c b/auto-generated/policy_funcs/gnu-api-tests/vfwmacc.c index 5c47f8518..3b5d2d935 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfwmacc.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfwmacc.c @@ -78,220 +78,220 @@ vfloat64m8_t test_vfwmacc_vf_f64m8_tu(vfloat64m8_t vd, float32_t vs1, vfloat32m4 return __riscv_vfwmacc_vf_f64m8_tu(vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmacc_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f32mf2_tum(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwmacc_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f32mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmacc_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f32mf2_tum(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwmacc_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f32mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmacc_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f32m1_tum(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwmacc_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f32m1_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmacc_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f32m1_tum(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwmacc_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f32m1_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmacc_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f32m2_tum(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwmacc_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f32m2_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmacc_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f32m2_tum(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwmacc_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f32m2_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmacc_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f32m4_tum(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwmacc_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f32m4_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmacc_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f32m4_tum(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwmacc_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f32m4_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmacc_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f32m8_tum(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwmacc_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f32m8_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmacc_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f32m8_tum(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwmacc_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f32m8_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmacc_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f64m1_tum(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwmacc_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f64m1_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmacc_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f64m1_tum(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwmacc_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f64m1_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmacc_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f64m2_tum(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwmacc_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f64m2_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmacc_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f64m2_tum(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwmacc_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f64m2_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmacc_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f64m4_tum(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwmacc_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f64m4_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmacc_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f64m4_tum(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwmacc_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f64m4_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmacc_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f64m8_tum(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwmacc_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f64m8_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmacc_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f64m8_tum(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwmacc_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f64m8_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmacc_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f32mf2_tumu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwmacc_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f32mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmacc_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f32mf2_tumu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwmacc_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f32mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmacc_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f32m1_tumu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwmacc_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f32m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmacc_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f32m1_tumu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwmacc_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f32m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmacc_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f32m2_tumu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwmacc_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f32m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmacc_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f32m2_tumu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwmacc_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f32m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmacc_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f32m4_tumu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwmacc_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f32m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmacc_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f32m4_tumu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwmacc_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f32m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmacc_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f32m8_tumu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwmacc_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f32m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmacc_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f32m8_tumu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwmacc_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f32m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmacc_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f64m1_tumu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwmacc_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f64m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmacc_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f64m1_tumu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwmacc_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f64m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmacc_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f64m2_tumu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwmacc_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f64m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmacc_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f64m2_tumu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwmacc_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f64m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmacc_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f64m4_tumu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwmacc_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f64m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmacc_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f64m4_tumu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwmacc_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f64m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmacc_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f64m8_tumu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwmacc_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f64m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmacc_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f64m8_tumu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwmacc_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f64m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmacc_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f32mf2_mu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwmacc_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f32mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmacc_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f32mf2_mu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwmacc_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f32mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmacc_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f32m1_mu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwmacc_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f32m1_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmacc_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f32m1_mu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwmacc_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f32m1_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmacc_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f32m2_mu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwmacc_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f32m2_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmacc_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f32m2_mu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwmacc_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f32m2_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmacc_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f32m4_mu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwmacc_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f32m4_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmacc_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f32m4_mu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwmacc_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f32m4_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmacc_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f32m8_mu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwmacc_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f32m8_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmacc_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f32m8_mu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwmacc_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f32m8_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmacc_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f64m1_mu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwmacc_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f64m1_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmacc_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f64m1_mu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwmacc_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f64m1_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmacc_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f64m2_mu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwmacc_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f64m2_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmacc_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f64m2_mu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwmacc_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f64m2_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmacc_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f64m4_mu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwmacc_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f64m4_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmacc_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f64m4_mu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwmacc_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f64m4_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmacc_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f64m8_mu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwmacc_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f64m8_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmacc_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f64m8_mu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwmacc_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f64m8_mu(vm, vd, vs1, vs2, vl); } vfloat32mf2_t test_vfwmacc_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -366,220 +366,220 @@ vfloat64m8_t test_vfwmacc_vf_f64m8_rm_tu(vfloat64m8_t vd, float32_t vs1, vfloat3 return __riscv_vfwmacc_vf_f64m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmacc_vv_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f32mf2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmacc_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmacc_vf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f32mf2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmacc_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmacc_vv_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f32m1_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmacc_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f32m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmacc_vf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f32m1_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmacc_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f32m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmacc_vv_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f32m2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmacc_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f32m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmacc_vf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f32m2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmacc_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f32m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmacc_vv_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f32m4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmacc_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f32m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmacc_vf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f32m4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmacc_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f32m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmacc_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f32m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmacc_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f32m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmacc_vf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f32m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmacc_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f32m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmacc_vv_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f64m1_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmacc_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f64m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmacc_vf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f64m1_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmacc_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f64m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmacc_vv_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f64m2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmacc_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f64m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmacc_vf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f64m2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmacc_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f64m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmacc_vv_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f64m4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmacc_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f64m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmacc_vf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f64m4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmacc_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f64m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmacc_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f64m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmacc_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f64m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmacc_vf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f64m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmacc_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f64m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmacc_vv_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f32mf2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmacc_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmacc_vf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f32mf2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmacc_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmacc_vv_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f32m1_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmacc_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmacc_vf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f32m1_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmacc_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmacc_vv_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f32m2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmacc_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmacc_vf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f32m2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmacc_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmacc_vv_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f32m4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmacc_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmacc_vf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f32m4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmacc_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmacc_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f32m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmacc_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmacc_vf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f32m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmacc_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmacc_vv_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f64m1_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmacc_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f64m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmacc_vf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f64m1_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmacc_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f64m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmacc_vv_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f64m2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmacc_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f64m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmacc_vf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f64m2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmacc_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f64m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmacc_vv_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f64m4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmacc_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f64m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmacc_vf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f64m4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmacc_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f64m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmacc_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f64m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmacc_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f64m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmacc_vf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f64m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmacc_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f64m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmacc_vv_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f32mf2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmacc_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f32mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmacc_vf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f32mf2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmacc_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f32mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmacc_vv_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f32m1_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmacc_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f32m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmacc_vf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f32m1_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmacc_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f32m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmacc_vv_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f32m2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmacc_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f32m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmacc_vf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f32m2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmacc_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f32m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmacc_vv_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f32m4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmacc_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f32m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmacc_vf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f32m4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmacc_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f32m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmacc_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f32m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmacc_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f32m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmacc_vf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f32m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmacc_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f32m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmacc_vv_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f64m1_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmacc_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f64m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmacc_vf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f64m1_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmacc_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f64m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmacc_vv_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f64m2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmacc_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f64m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmacc_vf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f64m2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmacc_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f64m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmacc_vv_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f64m4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmacc_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f64m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmacc_vf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f64m4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmacc_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f64m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmacc_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f64m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmacc_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f64m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmacc_vf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f64m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmacc_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_f64m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfwmacc\.[ivxfswum.]+\s+} 144 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfwmsac.c b/auto-generated/policy_funcs/gnu-api-tests/vfwmsac.c index 9c1c17009..6f75ff665 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfwmsac.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfwmsac.c @@ -78,220 +78,220 @@ vfloat64m8_t test_vfwmsac_vf_f64m8_tu(vfloat64m8_t vd, float32_t vs1, vfloat32m4 return __riscv_vfwmsac_vf_f64m8_tu(vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmsac_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f32mf2_tum(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwmsac_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f32mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmsac_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f32mf2_tum(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwmsac_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f32mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmsac_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f32m1_tum(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwmsac_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f32m1_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmsac_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f32m1_tum(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwmsac_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f32m1_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmsac_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f32m2_tum(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwmsac_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f32m2_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmsac_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f32m2_tum(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwmsac_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f32m2_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmsac_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f32m4_tum(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwmsac_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f32m4_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmsac_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f32m4_tum(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwmsac_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f32m4_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmsac_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f32m8_tum(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwmsac_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f32m8_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmsac_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f32m8_tum(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwmsac_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f32m8_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmsac_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f64m1_tum(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwmsac_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f64m1_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmsac_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f64m1_tum(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwmsac_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f64m1_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmsac_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f64m2_tum(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwmsac_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f64m2_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmsac_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f64m2_tum(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwmsac_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f64m2_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmsac_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f64m4_tum(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwmsac_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f64m4_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmsac_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f64m4_tum(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwmsac_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f64m4_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmsac_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f64m8_tum(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwmsac_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f64m8_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmsac_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f64m8_tum(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwmsac_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f64m8_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmsac_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f32mf2_tumu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwmsac_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f32mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmsac_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f32mf2_tumu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwmsac_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f32mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmsac_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f32m1_tumu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwmsac_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f32m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmsac_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f32m1_tumu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwmsac_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f32m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmsac_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f32m2_tumu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwmsac_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f32m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmsac_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f32m2_tumu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwmsac_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f32m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmsac_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f32m4_tumu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwmsac_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f32m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmsac_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f32m4_tumu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwmsac_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f32m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmsac_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f32m8_tumu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwmsac_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f32m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmsac_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f32m8_tumu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwmsac_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f32m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmsac_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f64m1_tumu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwmsac_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f64m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmsac_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f64m1_tumu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwmsac_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f64m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmsac_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f64m2_tumu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwmsac_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f64m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmsac_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f64m2_tumu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwmsac_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f64m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmsac_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f64m4_tumu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwmsac_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f64m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmsac_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f64m4_tumu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwmsac_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f64m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmsac_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f64m8_tumu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwmsac_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f64m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmsac_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f64m8_tumu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwmsac_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f64m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmsac_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f32mf2_mu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwmsac_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f32mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmsac_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f32mf2_mu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwmsac_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f32mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmsac_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f32m1_mu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwmsac_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f32m1_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmsac_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f32m1_mu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwmsac_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f32m1_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmsac_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f32m2_mu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwmsac_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f32m2_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmsac_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f32m2_mu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwmsac_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f32m2_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmsac_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f32m4_mu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwmsac_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f32m4_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmsac_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f32m4_mu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwmsac_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f32m4_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmsac_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f32m8_mu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwmsac_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f32m8_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmsac_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f32m8_mu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwmsac_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f32m8_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmsac_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f64m1_mu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwmsac_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f64m1_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmsac_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f64m1_mu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwmsac_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f64m1_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmsac_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f64m2_mu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwmsac_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f64m2_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmsac_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f64m2_mu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwmsac_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f64m2_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmsac_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f64m4_mu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwmsac_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f64m4_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmsac_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f64m4_mu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwmsac_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f64m4_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmsac_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f64m8_mu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwmsac_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f64m8_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmsac_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f64m8_mu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwmsac_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f64m8_mu(vm, vd, vs1, vs2, vl); } vfloat32mf2_t test_vfwmsac_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -366,220 +366,220 @@ vfloat64m8_t test_vfwmsac_vf_f64m8_rm_tu(vfloat64m8_t vd, float32_t vs1, vfloat3 return __riscv_vfwmsac_vf_f64m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmsac_vv_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f32mf2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmsac_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmsac_vf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f32mf2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmsac_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmsac_vv_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f32m1_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmsac_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f32m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmsac_vf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f32m1_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmsac_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f32m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmsac_vv_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f32m2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmsac_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f32m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmsac_vf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f32m2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmsac_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f32m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmsac_vv_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f32m4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmsac_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f32m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmsac_vf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f32m4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmsac_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f32m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmsac_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f32m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmsac_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f32m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmsac_vf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f32m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmsac_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f32m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmsac_vv_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f64m1_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmsac_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f64m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmsac_vf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f64m1_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmsac_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f64m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmsac_vv_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f64m2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmsac_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f64m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmsac_vf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f64m2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmsac_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f64m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmsac_vv_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f64m4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmsac_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f64m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmsac_vf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f64m4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmsac_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f64m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmsac_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f64m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmsac_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f64m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmsac_vf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f64m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmsac_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f64m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmsac_vv_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f32mf2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmsac_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmsac_vf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f32mf2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmsac_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmsac_vv_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f32m1_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmsac_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmsac_vf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f32m1_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmsac_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmsac_vv_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f32m2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmsac_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmsac_vf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f32m2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmsac_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmsac_vv_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f32m4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmsac_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmsac_vf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f32m4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmsac_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmsac_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f32m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmsac_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmsac_vf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f32m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmsac_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmsac_vv_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f64m1_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmsac_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f64m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmsac_vf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f64m1_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmsac_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f64m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmsac_vv_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f64m2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmsac_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f64m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmsac_vf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f64m2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmsac_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f64m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmsac_vv_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f64m4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmsac_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f64m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmsac_vf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f64m4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmsac_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f64m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmsac_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f64m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmsac_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f64m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmsac_vf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f64m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmsac_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f64m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmsac_vv_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f32mf2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmsac_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f32mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmsac_vf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f32mf2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmsac_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f32mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmsac_vv_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f32m1_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmsac_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f32m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmsac_vf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f32m1_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmsac_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f32m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmsac_vv_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f32m2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmsac_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f32m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmsac_vf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f32m2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmsac_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f32m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmsac_vv_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f32m4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmsac_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f32m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmsac_vf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f32m4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmsac_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f32m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmsac_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f32m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmsac_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f32m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmsac_vf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f32m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmsac_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f32m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmsac_vv_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f64m1_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmsac_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f64m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmsac_vf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f64m1_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmsac_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f64m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmsac_vv_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f64m2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmsac_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f64m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmsac_vf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f64m2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmsac_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f64m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmsac_vv_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f64m4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmsac_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f64m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmsac_vf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f64m4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmsac_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f64m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmsac_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f64m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmsac_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f64m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmsac_vf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f64m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmsac_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_f64m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfwmsac\.[ivxfswum.]+\s+} 144 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfwmul.c b/auto-generated/policy_funcs/gnu-api-tests/vfwmul.c index 44d492cfe..d419635ce 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfwmul.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfwmul.c @@ -6,580 +6,580 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2_t test_vfwmul_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32mf2_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwmul_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32mf2_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwmul_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32mf2_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwmul_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32mf2_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwmul_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m1_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwmul_vv_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwmul_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m1_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwmul_vf_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m1_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwmul_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m2_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwmul_vv_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m2_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwmul_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m2_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwmul_vf_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m2_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwmul_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m4_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwmul_vv_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m4_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwmul_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m4_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwmul_vf_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m4_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwmul_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m8_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwmul_vv_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m8_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwmul_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m8_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwmul_vf_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m8_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwmul_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m1_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwmul_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwmul_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m1_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwmul_vf_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m1_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwmul_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m2_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwmul_vv_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m2_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwmul_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m2_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwmul_vf_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m2_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwmul_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m4_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwmul_vv_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m4_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwmul_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m4_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwmul_vf_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m4_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwmul_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m8_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwmul_vv_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m8_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwmul_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m8_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwmul_vf_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m8_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwmul_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwmul_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwmul_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwmul_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwmul_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m1_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwmul_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwmul_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m1_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwmul_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m1_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwmul_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m2_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwmul_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m2_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwmul_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m2_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwmul_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwmul_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m4_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwmul_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m4_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwmul_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m4_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwmul_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m4_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwmul_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m8_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwmul_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m8_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwmul_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m8_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwmul_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m8_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwmul_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m1_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwmul_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwmul_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m1_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwmul_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m1_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwmul_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m2_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwmul_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m2_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwmul_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m2_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwmul_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m2_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwmul_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m4_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwmul_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m4_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwmul_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m4_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwmul_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m4_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwmul_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m8_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwmul_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m8_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwmul_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m8_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwmul_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m8_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwmul_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwmul_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwmul_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwmul_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwmul_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwmul_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwmul_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwmul_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwmul_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwmul_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwmul_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwmul_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwmul_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwmul_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwmul_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwmul_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwmul_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwmul_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwmul_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwmul_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwmul_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwmul_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwmul_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwmul_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwmul_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwmul_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwmul_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwmul_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwmul_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwmul_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwmul_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwmul_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwmul_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwmul_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwmul_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwmul_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwmul_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwmul_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwmul_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwmul_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwmul_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m1_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwmul_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m1_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwmul_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m1_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwmul_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m1_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwmul_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m2_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwmul_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m2_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwmul_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m2_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwmul_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwmul_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m4_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwmul_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m4_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwmul_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m4_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwmul_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m4_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwmul_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m8_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwmul_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m8_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwmul_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m8_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwmul_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m8_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwmul_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m1_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwmul_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m1_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwmul_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m1_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwmul_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m1_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwmul_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m2_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwmul_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m2_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwmul_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m2_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwmul_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m2_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwmul_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m4_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwmul_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m4_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwmul_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m4_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwmul_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m4_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwmul_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m8_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwmul_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m8_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwmul_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m8_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwmul_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m8_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwmul_vv_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32mf2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmul_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmul_vf_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32mf2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmul_vf_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmul_vv_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmul_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmul_vf_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmul_vf_f32m1_rm_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmul_vv_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmul_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmul_vf_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmul_vf_f32m2_rm_tu(vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmul_vv_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmul_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmul_vf_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmul_vf_f32m4_rm_tu(vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmul_vv_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmul_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmul_vf_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmul_vf_f32m8_rm_tu(vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmul_vv_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmul_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmul_vf_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmul_vf_f64m1_rm_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmul_vv_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmul_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmul_vf_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmul_vf_f64m2_rm_tu(vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmul_vv_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmul_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmul_vf_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmul_vf_f64m4_rm_tu(vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmul_vv_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmul_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmul_vf_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmul_vf_f64m8_rm_tu(vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmul_vv_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32mf2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmul_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32mf2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmul_vf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32mf2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmul_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32mf2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmul_vv_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmul_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmul_vf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmul_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmul_vv_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmul_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmul_vf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmul_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmul_vv_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmul_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmul_vf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmul_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmul_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmul_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m8_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmul_vf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmul_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmul_vv_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmul_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmul_vf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmul_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmul_vv_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmul_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmul_vf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmul_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmul_vv_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmul_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmul_vf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmul_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmul_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmul_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m8_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmul_vf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmul_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmul_vv_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32mf2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmul_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32mf2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmul_vf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32mf2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmul_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32mf2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmul_vv_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmul_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m1_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmul_vf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmul_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmul_vv_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmul_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmul_vf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmul_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmul_vv_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmul_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmul_vf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmul_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmul_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmul_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m8_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmul_vf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmul_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmul_vv_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmul_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m1_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmul_vf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmul_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmul_vv_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmul_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmul_vf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmul_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmul_vv_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmul_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmul_vf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmul_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmul_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmul_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m8_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmul_vf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmul_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmul_vv_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32mf2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmul_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32mf2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmul_vf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32mf2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmul_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32mf2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmul_vv_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmul_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m1_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmul_vf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmul_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmul_vv_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmul_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmul_vf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmul_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmul_vv_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmul_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmul_vf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmul_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmul_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmul_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmul_vf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_vf_f32m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmul_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmul_vv_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmul_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m1_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmul_vf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmul_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmul_vv_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmul_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmul_vf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmul_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmul_vv_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmul_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmul_vf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmul_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmul_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmul_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f64m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmul_vf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_vf_f64m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmul_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_vf_f64m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfwmul\.[ivxfswum.]+\s+} 144 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfwnmacc.c b/auto-generated/policy_funcs/gnu-api-tests/vfwnmacc.c index f966297fc..caa889d92 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfwnmacc.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfwnmacc.c @@ -78,220 +78,220 @@ vfloat64m8_t test_vfwnmacc_vf_f64m8_tu(vfloat64m8_t vd, float32_t vs1, vfloat32m return __riscv_vfwnmacc_vf_f64m8_tu(vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmacc_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32mf2_tum(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwnmacc_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f32mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmacc_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32mf2_tum(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwnmacc_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f32mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmacc_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32m1_tum(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwnmacc_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f32m1_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmacc_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32m1_tum(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwnmacc_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f32m1_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmacc_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32m2_tum(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwnmacc_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f32m2_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmacc_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32m2_tum(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwnmacc_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f32m2_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmacc_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32m4_tum(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwnmacc_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f32m4_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmacc_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32m4_tum(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwnmacc_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f32m4_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmacc_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32m8_tum(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwnmacc_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f32m8_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmacc_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32m8_tum(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwnmacc_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f32m8_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmacc_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f64m1_tum(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwnmacc_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f64m1_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmacc_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f64m1_tum(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwnmacc_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f64m1_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmacc_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f64m2_tum(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwnmacc_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f64m2_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmacc_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f64m2_tum(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwnmacc_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f64m2_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmacc_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f64m4_tum(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwnmacc_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f64m4_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmacc_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f64m4_tum(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwnmacc_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f64m4_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmacc_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f64m8_tum(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwnmacc_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f64m8_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmacc_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f64m8_tum(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwnmacc_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f64m8_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmacc_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32mf2_tumu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwnmacc_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f32mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmacc_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32mf2_tumu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwnmacc_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f32mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmacc_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32m1_tumu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwnmacc_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f32m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmacc_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32m1_tumu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwnmacc_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f32m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmacc_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32m2_tumu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwnmacc_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f32m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmacc_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32m2_tumu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwnmacc_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f32m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmacc_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32m4_tumu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwnmacc_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f32m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmacc_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32m4_tumu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwnmacc_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f32m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmacc_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32m8_tumu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwnmacc_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f32m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmacc_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32m8_tumu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwnmacc_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f32m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmacc_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f64m1_tumu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwnmacc_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f64m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmacc_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f64m1_tumu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwnmacc_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f64m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmacc_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f64m2_tumu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwnmacc_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f64m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmacc_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f64m2_tumu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwnmacc_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f64m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmacc_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f64m4_tumu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwnmacc_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f64m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmacc_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f64m4_tumu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwnmacc_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f64m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmacc_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f64m8_tumu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwnmacc_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f64m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmacc_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f64m8_tumu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwnmacc_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f64m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmacc_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32mf2_mu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwnmacc_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f32mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmacc_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32mf2_mu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwnmacc_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f32mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmacc_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32m1_mu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwnmacc_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f32m1_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmacc_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32m1_mu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwnmacc_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f32m1_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmacc_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32m2_mu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwnmacc_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f32m2_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmacc_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32m2_mu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwnmacc_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f32m2_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmacc_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32m4_mu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwnmacc_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f32m4_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmacc_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32m4_mu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwnmacc_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f32m4_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmacc_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32m8_mu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwnmacc_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f32m8_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmacc_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32m8_mu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwnmacc_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f32m8_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmacc_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f64m1_mu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwnmacc_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f64m1_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmacc_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f64m1_mu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwnmacc_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f64m1_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmacc_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f64m2_mu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwnmacc_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f64m2_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmacc_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f64m2_mu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwnmacc_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f64m2_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmacc_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f64m4_mu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwnmacc_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f64m4_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmacc_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f64m4_mu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwnmacc_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f64m4_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmacc_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f64m8_mu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwnmacc_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f64m8_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmacc_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f64m8_mu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwnmacc_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f64m8_mu(vm, vd, vs1, vs2, vl); } vfloat32mf2_t test_vfwnmacc_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -366,220 +366,220 @@ vfloat64m8_t test_vfwnmacc_vf_f64m8_rm_tu(vfloat64m8_t vd, float32_t vs1, vfloat return __riscv_vfwnmacc_vf_f64m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwnmacc_vv_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32mf2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwnmacc_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwnmacc_vf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32mf2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwnmacc_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwnmacc_vv_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32m1_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwnmacc_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f32m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwnmacc_vf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32m1_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwnmacc_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f32m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwnmacc_vv_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32m2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwnmacc_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f32m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwnmacc_vf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32m2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwnmacc_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f32m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwnmacc_vv_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32m4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwnmacc_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f32m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwnmacc_vf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32m4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwnmacc_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f32m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwnmacc_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwnmacc_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f32m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwnmacc_vf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwnmacc_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f32m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwnmacc_vv_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f64m1_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwnmacc_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f64m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwnmacc_vf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f64m1_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwnmacc_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f64m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwnmacc_vv_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f64m2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwnmacc_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f64m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwnmacc_vf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f64m2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwnmacc_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f64m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwnmacc_vv_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f64m4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwnmacc_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f64m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwnmacc_vf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f64m4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwnmacc_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f64m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwnmacc_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f64m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwnmacc_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f64m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwnmacc_vf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f64m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwnmacc_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f64m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwnmacc_vv_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32mf2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwnmacc_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwnmacc_vf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32mf2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwnmacc_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwnmacc_vv_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32m1_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwnmacc_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwnmacc_vf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32m1_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwnmacc_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwnmacc_vv_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32m2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwnmacc_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwnmacc_vf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32m2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwnmacc_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwnmacc_vv_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32m4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwnmacc_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwnmacc_vf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32m4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwnmacc_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwnmacc_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwnmacc_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwnmacc_vf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwnmacc_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwnmacc_vv_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f64m1_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwnmacc_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f64m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwnmacc_vf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f64m1_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwnmacc_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f64m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwnmacc_vv_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f64m2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwnmacc_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f64m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwnmacc_vf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f64m2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwnmacc_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f64m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwnmacc_vv_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f64m4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwnmacc_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f64m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwnmacc_vf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f64m4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwnmacc_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f64m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwnmacc_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f64m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwnmacc_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f64m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwnmacc_vf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f64m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwnmacc_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f64m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwnmacc_vv_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32mf2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwnmacc_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f32mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwnmacc_vf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32mf2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwnmacc_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f32mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwnmacc_vv_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32m1_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwnmacc_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f32m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwnmacc_vf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32m1_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwnmacc_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f32m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwnmacc_vv_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32m2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwnmacc_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f32m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwnmacc_vf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32m2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwnmacc_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f32m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwnmacc_vv_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32m4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwnmacc_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f32m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwnmacc_vf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32m4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwnmacc_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f32m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwnmacc_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwnmacc_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f32m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwnmacc_vf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwnmacc_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f32m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwnmacc_vv_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f64m1_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwnmacc_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f64m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwnmacc_vf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f64m1_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwnmacc_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f64m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwnmacc_vv_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f64m2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwnmacc_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f64m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwnmacc_vf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f64m2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwnmacc_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f64m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwnmacc_vv_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f64m4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwnmacc_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f64m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwnmacc_vf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f64m4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwnmacc_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f64m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwnmacc_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f64m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwnmacc_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f64m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwnmacc_vf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f64m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwnmacc_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_f64m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfwnmacc\.[ivxfswum.]+\s+} 144 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfwnmsac.c b/auto-generated/policy_funcs/gnu-api-tests/vfwnmsac.c index adc12b0db..828705f30 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfwnmsac.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfwnmsac.c @@ -78,220 +78,220 @@ vfloat64m8_t test_vfwnmsac_vf_f64m8_tu(vfloat64m8_t vd, float32_t vs1, vfloat32m return __riscv_vfwnmsac_vf_f64m8_tu(vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmsac_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32mf2_tum(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwnmsac_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f32mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmsac_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32mf2_tum(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwnmsac_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f32mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmsac_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32m1_tum(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwnmsac_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f32m1_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmsac_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32m1_tum(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwnmsac_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f32m1_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmsac_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32m2_tum(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwnmsac_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f32m2_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmsac_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32m2_tum(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwnmsac_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f32m2_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmsac_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32m4_tum(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwnmsac_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f32m4_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmsac_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32m4_tum(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwnmsac_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f32m4_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmsac_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32m8_tum(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwnmsac_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f32m8_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmsac_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32m8_tum(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwnmsac_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f32m8_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmsac_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f64m1_tum(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwnmsac_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f64m1_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmsac_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f64m1_tum(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwnmsac_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f64m1_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmsac_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f64m2_tum(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwnmsac_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f64m2_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmsac_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f64m2_tum(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwnmsac_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f64m2_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmsac_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f64m4_tum(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwnmsac_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f64m4_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmsac_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f64m4_tum(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwnmsac_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f64m4_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmsac_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f64m8_tum(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwnmsac_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f64m8_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmsac_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f64m8_tum(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwnmsac_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f64m8_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmsac_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32mf2_tumu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwnmsac_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f32mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmsac_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32mf2_tumu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwnmsac_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f32mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmsac_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32m1_tumu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwnmsac_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f32m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmsac_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32m1_tumu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwnmsac_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f32m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmsac_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32m2_tumu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwnmsac_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f32m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmsac_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32m2_tumu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwnmsac_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f32m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmsac_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32m4_tumu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwnmsac_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f32m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmsac_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32m4_tumu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwnmsac_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f32m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmsac_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32m8_tumu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwnmsac_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f32m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmsac_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32m8_tumu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwnmsac_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f32m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmsac_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f64m1_tumu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwnmsac_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f64m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmsac_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f64m1_tumu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwnmsac_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f64m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmsac_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f64m2_tumu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwnmsac_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f64m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmsac_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f64m2_tumu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwnmsac_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f64m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmsac_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f64m4_tumu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwnmsac_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f64m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmsac_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f64m4_tumu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwnmsac_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f64m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmsac_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f64m8_tumu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwnmsac_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f64m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmsac_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f64m8_tumu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwnmsac_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f64m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmsac_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32mf2_mu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwnmsac_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f32mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmsac_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32mf2_mu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwnmsac_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f32mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmsac_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32m1_mu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwnmsac_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f32m1_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmsac_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32m1_mu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwnmsac_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f32m1_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmsac_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32m2_mu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwnmsac_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f32m2_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmsac_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32m2_mu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwnmsac_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f32m2_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmsac_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32m4_mu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwnmsac_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f32m4_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmsac_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32m4_mu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwnmsac_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f32m4_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmsac_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32m8_mu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwnmsac_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f32m8_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmsac_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32m8_mu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwnmsac_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f32m8_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmsac_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f64m1_mu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwnmsac_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f64m1_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmsac_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f64m1_mu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwnmsac_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f64m1_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmsac_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f64m2_mu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwnmsac_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f64m2_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmsac_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f64m2_mu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwnmsac_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f64m2_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmsac_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f64m4_mu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwnmsac_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f64m4_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmsac_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f64m4_mu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwnmsac_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f64m4_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmsac_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f64m8_mu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwnmsac_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f64m8_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmsac_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f64m8_mu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwnmsac_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f64m8_mu(vm, vd, vs1, vs2, vl); } vfloat32mf2_t test_vfwnmsac_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -366,220 +366,220 @@ vfloat64m8_t test_vfwnmsac_vf_f64m8_rm_tu(vfloat64m8_t vd, float32_t vs1, vfloat return __riscv_vfwnmsac_vf_f64m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwnmsac_vv_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32mf2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwnmsac_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwnmsac_vf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32mf2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwnmsac_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwnmsac_vv_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32m1_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwnmsac_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f32m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwnmsac_vf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32m1_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwnmsac_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f32m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwnmsac_vv_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32m2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwnmsac_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f32m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwnmsac_vf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32m2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwnmsac_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f32m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwnmsac_vv_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32m4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwnmsac_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f32m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwnmsac_vf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32m4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwnmsac_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f32m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwnmsac_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwnmsac_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f32m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwnmsac_vf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwnmsac_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f32m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwnmsac_vv_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f64m1_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwnmsac_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f64m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwnmsac_vf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f64m1_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwnmsac_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f64m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwnmsac_vv_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f64m2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwnmsac_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f64m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwnmsac_vf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f64m2_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwnmsac_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f64m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwnmsac_vv_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f64m4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwnmsac_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f64m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwnmsac_vf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f64m4_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwnmsac_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f64m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwnmsac_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f64m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwnmsac_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f64m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwnmsac_vf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f64m8_rm_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwnmsac_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f64m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwnmsac_vv_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32mf2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwnmsac_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwnmsac_vf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32mf2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwnmsac_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwnmsac_vv_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32m1_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwnmsac_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwnmsac_vf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32m1_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwnmsac_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwnmsac_vv_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32m2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwnmsac_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwnmsac_vf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32m2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwnmsac_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwnmsac_vv_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32m4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwnmsac_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwnmsac_vf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32m4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwnmsac_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwnmsac_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwnmsac_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwnmsac_vf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwnmsac_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwnmsac_vv_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f64m1_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwnmsac_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f64m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwnmsac_vf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f64m1_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwnmsac_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f64m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwnmsac_vv_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f64m2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwnmsac_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f64m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwnmsac_vf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f64m2_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwnmsac_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f64m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwnmsac_vv_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f64m4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwnmsac_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f64m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwnmsac_vf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f64m4_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwnmsac_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f64m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwnmsac_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f64m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwnmsac_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f64m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwnmsac_vf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f64m8_rm_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwnmsac_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f64m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwnmsac_vv_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32mf2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwnmsac_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f32mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwnmsac_vf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32mf2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwnmsac_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f32mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwnmsac_vv_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32m1_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwnmsac_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f32m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwnmsac_vf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32m1_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwnmsac_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f32m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwnmsac_vv_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32m2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwnmsac_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f32m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwnmsac_vf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32m2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwnmsac_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f32m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwnmsac_vv_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32m4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwnmsac_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f32m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwnmsac_vf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32m4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwnmsac_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f32m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwnmsac_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwnmsac_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f32m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwnmsac_vf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwnmsac_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f32m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwnmsac_vv_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f64m1_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwnmsac_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f64m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwnmsac_vf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f64m1_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwnmsac_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f64m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwnmsac_vv_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f64m2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwnmsac_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f64m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwnmsac_vf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f64m2_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwnmsac_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f64m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwnmsac_vv_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f64m4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwnmsac_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f64m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwnmsac_vf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f64m4_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwnmsac_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f64m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwnmsac_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f64m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwnmsac_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f64m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwnmsac_vf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f64m8_rm_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwnmsac_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_f64m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfwnmsac\.[ivxfswum.]+\s+} 144 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfwredosum.c b/auto-generated/policy_funcs/gnu-api-tests/vfwredosum.c index 00213cd89..d8be4cc04 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfwredosum.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfwredosum.c @@ -6,180 +6,180 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16mf4_f32m1_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_tu(vfloat32m1_t vd, vfloat16mf4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16mf4_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16mf2_f32m1_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16mf2_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16m1_f32m1_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_tu(vfloat32m1_t vd, vfloat16m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16m1_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16m2_f32m1_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_tu(vfloat32m1_t vd, vfloat16m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16m2_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16m4_f32m1_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_tu(vfloat32m1_t vd, vfloat16m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16m4_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16m8_f32m1_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_tu(vfloat32m1_t vd, vfloat16m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16m8_f32m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32mf2_f64m1_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f32mf2_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32m1_f64m1_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_tu(vfloat64m1_t vd, vfloat32m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f32m1_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32m2_f64m1_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_tu(vfloat64m1_t vd, vfloat32m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f32m2_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32m4_f64m1_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_tu(vfloat64m1_t vd, vfloat32m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f32m4_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32m8_f64m1_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_tu(vfloat64m1_t vd, vfloat32m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f32m8_f64m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_tum(vbool64_t mask, vfloat32m1_t maskedoff, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16mf4_f32m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_tum(vbool64_t vm, vfloat32m1_t vd, vfloat16mf4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16mf4_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16mf2_f32m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16mf2_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_tum(vbool16_t mask, vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16m1_f32m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_tum(vbool16_t vm, vfloat32m1_t vd, vfloat16m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16m1_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_tum(vbool8_t mask, vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16m2_f32m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_tum(vbool8_t vm, vfloat32m1_t vd, vfloat16m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16m2_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_tum(vbool4_t mask, vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16m4_f32m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_tum(vbool4_t vm, vfloat32m1_t vd, vfloat16m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16m4_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_tum(vbool2_t mask, vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16m8_f32m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_tum(vbool2_t vm, vfloat32m1_t vd, vfloat16m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16m8_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32mf2_f64m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f32mf2_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_tum(vbool32_t mask, vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32m1_f64m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_tum(vbool32_t vm, vfloat64m1_t vd, vfloat32m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f32m1_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_tum(vbool16_t mask, vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32m2_f64m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_tum(vbool16_t vm, vfloat64m1_t vd, vfloat32m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f32m2_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_tum(vbool8_t mask, vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32m4_f64m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_tum(vbool8_t vm, vfloat64m1_t vd, vfloat32m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f32m4_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_tum(vbool4_t mask, vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32m8_f64m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_tum(vbool4_t vm, vfloat64m1_t vd, vfloat32m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f32m8_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16mf4_f32m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_rm_tu(vfloat32m1_t vd, vfloat16mf4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16mf4_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16mf2_f32m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_rm_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16mf2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16m1_f32m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_rm_tu(vfloat32m1_t vd, vfloat16m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16m1_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16m2_f32m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_rm_tu(vfloat32m1_t vd, vfloat16m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16m2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16m4_f32m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_rm_tu(vfloat32m1_t vd, vfloat16m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16m4_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16m8_f32m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_rm_tu(vfloat32m1_t vd, vfloat16m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16m8_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32mf2_f64m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_rm_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f32mf2_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32m1_f64m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_rm_tu(vfloat64m1_t vd, vfloat32m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f32m1_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32m2_f64m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_rm_tu(vfloat64m1_t vd, vfloat32m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f32m2_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32m4_f64m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_rm_tu(vfloat64m1_t vd, vfloat32m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f32m4_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32m8_f64m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_rm_tu(vfloat64m1_t vd, vfloat32m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f32m8_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_rm_tum(vbool64_t mask, vfloat32m1_t maskedoff, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16mf4_f32m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_rm_tum(vbool64_t vm, vfloat32m1_t vd, vfloat16mf4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16mf4_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16mf2_f32m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16mf2_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_rm_tum(vbool16_t mask, vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16m1_f32m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_rm_tum(vbool16_t vm, vfloat32m1_t vd, vfloat16m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16m1_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_rm_tum(vbool8_t mask, vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16m2_f32m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_rm_tum(vbool8_t vm, vfloat32m1_t vd, vfloat16m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16m2_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_rm_tum(vbool4_t mask, vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16m4_f32m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_rm_tum(vbool4_t vm, vfloat32m1_t vd, vfloat16m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16m4_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_rm_tum(vbool2_t mask, vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16m8_f32m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_rm_tum(vbool2_t vm, vfloat32m1_t vd, vfloat16m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16m8_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32mf2_f64m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f32mf2_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_rm_tum(vbool32_t mask, vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32m1_f64m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_rm_tum(vbool32_t vm, vfloat64m1_t vd, vfloat32m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f32m1_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_rm_tum(vbool16_t mask, vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32m2_f64m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_rm_tum(vbool16_t vm, vfloat64m1_t vd, vfloat32m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f32m2_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_rm_tum(vbool8_t mask, vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32m4_f64m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_rm_tum(vbool8_t vm, vfloat64m1_t vd, vfloat32m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f32m4_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_rm_tum(vbool4_t mask, vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32m8_f64m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_rm_tum(vbool4_t vm, vfloat64m1_t vd, vfloat32m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f32m8_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfwredosum\.[ivxfswum.]+\s+} 44 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfwredusum.c b/auto-generated/policy_funcs/gnu-api-tests/vfwredusum.c index d965c7ce6..c564aa697 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfwredusum.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfwredusum.c @@ -6,180 +6,180 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16mf4_f32m1_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_tu(vfloat32m1_t vd, vfloat16mf4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16mf4_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16mf2_f32m1_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16mf2_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16m1_f32m1_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_tu(vfloat32m1_t vd, vfloat16m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16m1_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16m2_f32m1_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_tu(vfloat32m1_t vd, vfloat16m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16m2_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16m4_f32m1_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_tu(vfloat32m1_t vd, vfloat16m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16m4_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16m8_f32m1_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_tu(vfloat32m1_t vd, vfloat16m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16m8_f32m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f32mf2_f64m1_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f32mf2_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f32m1_f64m1_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_tu(vfloat64m1_t vd, vfloat32m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f32m1_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f32m2_f64m1_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_tu(vfloat64m1_t vd, vfloat32m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f32m2_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f32m4_f64m1_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_tu(vfloat64m1_t vd, vfloat32m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f32m4_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f32m8_f64m1_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_tu(vfloat64m1_t vd, vfloat32m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f32m8_f64m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_tum(vbool64_t mask, vfloat32m1_t maskedoff, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16mf4_f32m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_tum(vbool64_t vm, vfloat32m1_t vd, vfloat16mf4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16mf4_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16mf2_f32m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16mf2_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_tum(vbool16_t mask, vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16m1_f32m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_tum(vbool16_t vm, vfloat32m1_t vd, vfloat16m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16m1_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_tum(vbool8_t mask, vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16m2_f32m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_tum(vbool8_t vm, vfloat32m1_t vd, vfloat16m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16m2_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_tum(vbool4_t mask, vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16m4_f32m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_tum(vbool4_t vm, vfloat32m1_t vd, vfloat16m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16m4_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_tum(vbool2_t mask, vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16m8_f32m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_tum(vbool2_t vm, vfloat32m1_t vd, vfloat16m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16m8_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f32mf2_f64m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f32mf2_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_tum(vbool32_t mask, vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f32m1_f64m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_tum(vbool32_t vm, vfloat64m1_t vd, vfloat32m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f32m1_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_tum(vbool16_t mask, vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f32m2_f64m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_tum(vbool16_t vm, vfloat64m1_t vd, vfloat32m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f32m2_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_tum(vbool8_t mask, vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f32m4_f64m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_tum(vbool8_t vm, vfloat64m1_t vd, vfloat32m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f32m4_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_tum(vbool4_t mask, vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f32m8_f64m1_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_tum(vbool4_t vm, vfloat64m1_t vd, vfloat32m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f32m8_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16mf4_f32m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_rm_tu(vfloat32m1_t vd, vfloat16mf4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16mf4_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16mf2_f32m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_rm_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16mf2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16m1_f32m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_rm_tu(vfloat32m1_t vd, vfloat16m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16m1_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16m2_f32m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_rm_tu(vfloat32m1_t vd, vfloat16m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16m2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16m4_f32m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_rm_tu(vfloat32m1_t vd, vfloat16m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16m4_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16m8_f32m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_rm_tu(vfloat32m1_t vd, vfloat16m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16m8_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f32mf2_f64m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_rm_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f32mf2_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f32m1_f64m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_rm_tu(vfloat64m1_t vd, vfloat32m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f32m1_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f32m2_f64m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_rm_tu(vfloat64m1_t vd, vfloat32m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f32m2_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f32m4_f64m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_rm_tu(vfloat64m1_t vd, vfloat32m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f32m4_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f32m8_f64m1_rm_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_rm_tu(vfloat64m1_t vd, vfloat32m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f32m8_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_rm_tum(vbool64_t mask, vfloat32m1_t maskedoff, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16mf4_f32m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_rm_tum(vbool64_t vm, vfloat32m1_t vd, vfloat16mf4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16mf4_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16mf2_f32m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16mf2_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_rm_tum(vbool16_t mask, vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16m1_f32m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_rm_tum(vbool16_t vm, vfloat32m1_t vd, vfloat16m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16m1_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_rm_tum(vbool8_t mask, vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16m2_f32m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_rm_tum(vbool8_t vm, vfloat32m1_t vd, vfloat16m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16m2_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_rm_tum(vbool4_t mask, vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16m4_f32m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_rm_tum(vbool4_t vm, vfloat32m1_t vd, vfloat16m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16m4_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_rm_tum(vbool2_t mask, vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f16m8_f32m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_rm_tum(vbool2_t vm, vfloat32m1_t vd, vfloat16m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16m8_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f32mf2_f64m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f32mf2_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_rm_tum(vbool32_t mask, vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f32m1_f64m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_rm_tum(vbool32_t vm, vfloat64m1_t vd, vfloat32m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f32m1_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_rm_tum(vbool16_t mask, vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f32m2_f64m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_rm_tum(vbool16_t vm, vfloat64m1_t vd, vfloat32m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f32m2_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_rm_tum(vbool8_t mask, vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f32m4_f64m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_rm_tum(vbool8_t vm, vfloat64m1_t vd, vfloat32m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f32m4_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_rm_tum(vbool4_t mask, vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_vs_f32m8_f64m1_rm_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_rm_tum(vbool4_t vm, vfloat64m1_t vd, vfloat32m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f32m8_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfwredusum\.[ivxfswum.]+\s+} 44 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vfwsub.c b/auto-generated/policy_funcs/gnu-api-tests/vfwsub.c index bf809e768..eee6a3a6c 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vfwsub.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vfwsub.c @@ -6,1156 +6,1156 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2_t test_vfwsub_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32mf2_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwsub_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32mf2_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwsub_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32mf2_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwsub_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32mf2_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwsub_wv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32mf2_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwsub_wv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32mf2_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwsub_wf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32mf2_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwsub_wf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32mf2_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwsub_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m1_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwsub_vv_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwsub_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m1_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwsub_vf_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m1_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwsub_wv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m1_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwsub_wv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwsub_wf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m1_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwsub_wf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m1_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwsub_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m2_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwsub_vv_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m2_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwsub_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m2_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwsub_vf_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m2_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwsub_wv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m2_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwsub_wv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m2_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwsub_wf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m2_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwsub_wf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m2_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwsub_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m4_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwsub_vv_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m4_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwsub_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m4_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwsub_vf_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m4_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwsub_wv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m4_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwsub_wv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m4_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwsub_wf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m4_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwsub_wf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m4_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwsub_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m8_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwsub_vv_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m8_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwsub_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m8_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwsub_vf_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m8_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwsub_wv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m8_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwsub_wv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m8_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwsub_wf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m8_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwsub_wf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m8_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwsub_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m1_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwsub_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwsub_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m1_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwsub_vf_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m1_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwsub_wv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m1_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwsub_wv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwsub_wf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m1_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwsub_wf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m1_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwsub_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m2_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwsub_vv_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m2_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwsub_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m2_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwsub_vf_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m2_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwsub_wv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m2_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwsub_wv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m2_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwsub_wf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m2_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwsub_wf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m2_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwsub_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m4_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwsub_vv_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m4_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwsub_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m4_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwsub_vf_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m4_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwsub_wv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m4_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwsub_wv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m4_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwsub_wf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m4_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwsub_wf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m4_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwsub_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m8_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwsub_vv_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m8_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwsub_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m8_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwsub_vf_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m8_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwsub_wv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m8_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwsub_wv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m8_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwsub_wf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m8_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwsub_wf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m8_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwsub_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwsub_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwsub_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwsub_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwsub_wv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwsub_wv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwsub_wf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwsub_wf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwsub_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m1_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwsub_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwsub_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m1_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwsub_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m1_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwsub_wv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m1_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwsub_wv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwsub_wf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m1_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwsub_wf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m1_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwsub_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m2_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwsub_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m2_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwsub_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m2_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwsub_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwsub_wv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m2_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwsub_wv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m2_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwsub_wf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m2_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwsub_wf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwsub_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m4_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwsub_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m4_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwsub_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m4_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwsub_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m4_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwsub_wv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m4_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwsub_wv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m4_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwsub_wf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m4_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwsub_wf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m4_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwsub_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m8_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwsub_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m8_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwsub_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m8_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwsub_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m8_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwsub_wv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m8_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwsub_wv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m8_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwsub_wf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m8_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwsub_wf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m8_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwsub_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m1_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwsub_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwsub_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m1_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwsub_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m1_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwsub_wv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m1_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwsub_wv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwsub_wf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m1_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwsub_wf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m1_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwsub_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m2_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwsub_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m2_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwsub_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m2_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwsub_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m2_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwsub_wv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m2_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwsub_wv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m2_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwsub_wf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m2_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwsub_wf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m2_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwsub_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m4_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwsub_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m4_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwsub_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m4_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwsub_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m4_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwsub_wv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m4_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwsub_wv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m4_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwsub_wf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m4_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwsub_wf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m4_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwsub_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m8_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwsub_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m8_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwsub_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m8_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwsub_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m8_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwsub_wv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m8_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwsub_wv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m8_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwsub_wf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m8_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwsub_wf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m8_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwsub_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwsub_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwsub_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwsub_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwsub_wv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwsub_wv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwsub_wf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwsub_wf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwsub_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwsub_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwsub_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwsub_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwsub_wv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwsub_wv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwsub_wf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwsub_wf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwsub_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwsub_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwsub_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwsub_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwsub_wv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwsub_wv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwsub_wf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwsub_wf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwsub_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwsub_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwsub_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwsub_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwsub_wv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwsub_wv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwsub_wf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwsub_wf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwsub_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwsub_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwsub_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwsub_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwsub_wv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwsub_wv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwsub_wf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwsub_wf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwsub_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwsub_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwsub_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwsub_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwsub_wv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwsub_wv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwsub_wf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwsub_wf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwsub_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwsub_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwsub_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwsub_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwsub_wv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwsub_wv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwsub_wf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwsub_wf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwsub_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwsub_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwsub_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwsub_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwsub_wv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwsub_wv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwsub_wf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwsub_wf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwsub_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwsub_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwsub_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwsub_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwsub_wv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwsub_wv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwsub_wf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwsub_wf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwsub_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwsub_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwsub_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwsub_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwsub_wv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwsub_wv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwsub_wf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwsub_wf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwsub_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m1_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwsub_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m1_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwsub_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m1_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwsub_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m1_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwsub_wv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m1_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwsub_wv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m1_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwsub_wf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m1_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwsub_wf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m1_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwsub_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m2_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwsub_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m2_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwsub_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m2_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwsub_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwsub_wv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m2_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwsub_wv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m2_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwsub_wf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m2_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwsub_wf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwsub_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m4_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwsub_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m4_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwsub_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m4_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwsub_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m4_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwsub_wv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m4_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwsub_wv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m4_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwsub_wf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m4_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwsub_wf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m4_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwsub_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m8_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwsub_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m8_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwsub_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m8_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwsub_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m8_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwsub_wv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m8_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwsub_wv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m8_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwsub_wf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m8_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwsub_wf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m8_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwsub_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m1_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwsub_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m1_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwsub_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m1_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwsub_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m1_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwsub_wv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m1_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwsub_wv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m1_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwsub_wf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m1_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwsub_wf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m1_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwsub_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m2_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwsub_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m2_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwsub_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m2_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwsub_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m2_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwsub_wv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m2_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwsub_wv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m2_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwsub_wf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m2_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwsub_wf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m2_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwsub_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m4_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwsub_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m4_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwsub_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m4_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwsub_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m4_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwsub_wv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m4_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwsub_wv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m4_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwsub_wf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m4_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwsub_wf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m4_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwsub_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m8_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwsub_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m8_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwsub_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m8_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwsub_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m8_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwsub_wv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m8_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwsub_wv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m8_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwsub_wf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m8_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwsub_wf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m8_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwsub_vv_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32mf2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_vf_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32mf2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_vf_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_wv_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32mf2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_wv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_wf_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32mf2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_wf_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_vv_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_vf_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_vf_f32m1_rm_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_wv_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_wv_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_wf_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_wf_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_vv_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_vf_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_vf_f32m2_rm_tu(vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_wv_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_wv_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_wf_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_wf_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_vv_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_vf_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_vf_f32m4_rm_tu(vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_wv_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_wv_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_wf_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_wf_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_vv_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_vf_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_vf_f32m8_rm_tu(vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_wv_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_wv_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_wf_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_wf_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_vv_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_vf_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_vf_f64m1_rm_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_wv_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_wv_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_wf_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_wf_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_vv_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_vf_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_vf_f64m2_rm_tu(vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_wv_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_wv_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_wf_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_wf_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_vv_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_vf_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_vf_f64m4_rm_tu(vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_wv_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_wv_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_wf_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_wf_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_vv_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_vf_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_vf_f64m8_rm_tu(vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_wv_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_wv_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_wf_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_wf_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_vv_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32mf2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32mf2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_vf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32mf2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32mf2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_wv_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32mf2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_wv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32mf2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_wf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32mf2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_wf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32mf2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_vv_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_vf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_wv_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_wv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_wf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_wf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_vv_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_vf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_wv_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_wv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_wf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_wf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_vv_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_vf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_wv_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_wv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_wf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_wf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m8_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_vf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_wv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_wv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m8_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_wf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_wf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_vv_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_vf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_wv_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_wv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_wf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_wf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_vv_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_vf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_wv_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_wv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_wf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_wf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_vv_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_vf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_wv_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_wv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_wf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_wf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m8_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_vf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_wv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_wv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m8_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_wf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_wf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_vv_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32mf2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32mf2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_vf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32mf2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32mf2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_wv_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32mf2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_wv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32mf2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_wf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32mf2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_wf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32mf2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_vv_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m1_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_vf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_wv_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_wv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m1_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_wf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_wf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_vv_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_vf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_wv_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_wv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_wf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_wf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_vv_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_vf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_wv_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_wv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_wf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_wf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m8_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_vf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_wv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_wv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m8_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_wf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_wf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_vv_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m1_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_vf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_wv_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_wv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m1_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_wf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_wf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_vv_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_vf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_wv_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_wv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_wf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_wf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_vv_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_vf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_wv_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_wv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_wf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_wf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m8_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_vf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_wv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_wv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m8_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_wf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_wf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_vv_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32mf2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32mf2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_vf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32mf2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32mf2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_wv_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32mf2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_wv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32mf2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_wf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32mf2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_wf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32mf2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_vv_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m1_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_vf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_wv_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_wv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m1_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_wf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_wf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_vv_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_vf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_wv_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_wv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_wf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_wf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_vv_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_vf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_wv_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_wv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_wf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_wf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_vf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_f32m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_wv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_wv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_wf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_f32m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_wf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_vv_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m1_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_vf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_wv_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_wv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m1_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_wf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_wf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_vv_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_vf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_wv_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_wv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_wf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_wf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_vv_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_vf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_wv_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_wv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_wf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_wf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f64m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_vf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_f64m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_f64m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_wv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_wv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f64m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_wf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_f64m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_wf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_f64m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfwsub\.[ivxfswum.]+\s+} 288 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vid.c b/auto-generated/policy_funcs/gnu-api-tests/vid.c index e4b864280..49ab58dd8 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vid.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vid.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vid_v_u8mf8_tu(vuint8mf8_t maskedoff, size_t vl) { - return __riscv_vid_v_u8mf8_tu(maskedoff, vl); +vuint8mf8_t test_vid_v_u8mf8_tu(vuint8mf8_t vd, size_t vl) { + return __riscv_vid_v_u8mf8_tu(vd, vl); } -vuint8mf4_t test_vid_v_u8mf4_tu(vuint8mf4_t maskedoff, size_t vl) { - return __riscv_vid_v_u8mf4_tu(maskedoff, vl); +vuint8mf4_t test_vid_v_u8mf4_tu(vuint8mf4_t vd, size_t vl) { + return __riscv_vid_v_u8mf4_tu(vd, vl); } -vuint8mf2_t test_vid_v_u8mf2_tu(vuint8mf2_t maskedoff, size_t vl) { - return __riscv_vid_v_u8mf2_tu(maskedoff, vl); +vuint8mf2_t test_vid_v_u8mf2_tu(vuint8mf2_t vd, size_t vl) { + return __riscv_vid_v_u8mf2_tu(vd, vl); } -vuint8m1_t test_vid_v_u8m1_tu(vuint8m1_t maskedoff, size_t vl) { - return __riscv_vid_v_u8m1_tu(maskedoff, vl); +vuint8m1_t test_vid_v_u8m1_tu(vuint8m1_t vd, size_t vl) { + return __riscv_vid_v_u8m1_tu(vd, vl); } -vuint8m2_t test_vid_v_u8m2_tu(vuint8m2_t maskedoff, size_t vl) { - return __riscv_vid_v_u8m2_tu(maskedoff, vl); +vuint8m2_t test_vid_v_u8m2_tu(vuint8m2_t vd, size_t vl) { + return __riscv_vid_v_u8m2_tu(vd, vl); } -vuint8m4_t test_vid_v_u8m4_tu(vuint8m4_t maskedoff, size_t vl) { - return __riscv_vid_v_u8m4_tu(maskedoff, vl); +vuint8m4_t test_vid_v_u8m4_tu(vuint8m4_t vd, size_t vl) { + return __riscv_vid_v_u8m4_tu(vd, vl); } -vuint8m8_t test_vid_v_u8m8_tu(vuint8m8_t maskedoff, size_t vl) { - return __riscv_vid_v_u8m8_tu(maskedoff, vl); +vuint8m8_t test_vid_v_u8m8_tu(vuint8m8_t vd, size_t vl) { + return __riscv_vid_v_u8m8_tu(vd, vl); } -vuint16mf4_t test_vid_v_u16mf4_tu(vuint16mf4_t maskedoff, size_t vl) { - return __riscv_vid_v_u16mf4_tu(maskedoff, vl); +vuint16mf4_t test_vid_v_u16mf4_tu(vuint16mf4_t vd, size_t vl) { + return __riscv_vid_v_u16mf4_tu(vd, vl); } -vuint16mf2_t test_vid_v_u16mf2_tu(vuint16mf2_t maskedoff, size_t vl) { - return __riscv_vid_v_u16mf2_tu(maskedoff, vl); +vuint16mf2_t test_vid_v_u16mf2_tu(vuint16mf2_t vd, size_t vl) { + return __riscv_vid_v_u16mf2_tu(vd, vl); } -vuint16m1_t test_vid_v_u16m1_tu(vuint16m1_t maskedoff, size_t vl) { - return __riscv_vid_v_u16m1_tu(maskedoff, vl); +vuint16m1_t test_vid_v_u16m1_tu(vuint16m1_t vd, size_t vl) { + return __riscv_vid_v_u16m1_tu(vd, vl); } -vuint16m2_t test_vid_v_u16m2_tu(vuint16m2_t maskedoff, size_t vl) { - return __riscv_vid_v_u16m2_tu(maskedoff, vl); +vuint16m2_t test_vid_v_u16m2_tu(vuint16m2_t vd, size_t vl) { + return __riscv_vid_v_u16m2_tu(vd, vl); } -vuint16m4_t test_vid_v_u16m4_tu(vuint16m4_t maskedoff, size_t vl) { - return __riscv_vid_v_u16m4_tu(maskedoff, vl); +vuint16m4_t test_vid_v_u16m4_tu(vuint16m4_t vd, size_t vl) { + return __riscv_vid_v_u16m4_tu(vd, vl); } -vuint16m8_t test_vid_v_u16m8_tu(vuint16m8_t maskedoff, size_t vl) { - return __riscv_vid_v_u16m8_tu(maskedoff, vl); +vuint16m8_t test_vid_v_u16m8_tu(vuint16m8_t vd, size_t vl) { + return __riscv_vid_v_u16m8_tu(vd, vl); } -vuint32mf2_t test_vid_v_u32mf2_tu(vuint32mf2_t maskedoff, size_t vl) { - return __riscv_vid_v_u32mf2_tu(maskedoff, vl); +vuint32mf2_t test_vid_v_u32mf2_tu(vuint32mf2_t vd, size_t vl) { + return __riscv_vid_v_u32mf2_tu(vd, vl); } -vuint32m1_t test_vid_v_u32m1_tu(vuint32m1_t maskedoff, size_t vl) { - return __riscv_vid_v_u32m1_tu(maskedoff, vl); +vuint32m1_t test_vid_v_u32m1_tu(vuint32m1_t vd, size_t vl) { + return __riscv_vid_v_u32m1_tu(vd, vl); } -vuint32m2_t test_vid_v_u32m2_tu(vuint32m2_t maskedoff, size_t vl) { - return __riscv_vid_v_u32m2_tu(maskedoff, vl); +vuint32m2_t test_vid_v_u32m2_tu(vuint32m2_t vd, size_t vl) { + return __riscv_vid_v_u32m2_tu(vd, vl); } -vuint32m4_t test_vid_v_u32m4_tu(vuint32m4_t maskedoff, size_t vl) { - return __riscv_vid_v_u32m4_tu(maskedoff, vl); +vuint32m4_t test_vid_v_u32m4_tu(vuint32m4_t vd, size_t vl) { + return __riscv_vid_v_u32m4_tu(vd, vl); } -vuint32m8_t test_vid_v_u32m8_tu(vuint32m8_t maskedoff, size_t vl) { - return __riscv_vid_v_u32m8_tu(maskedoff, vl); +vuint32m8_t test_vid_v_u32m8_tu(vuint32m8_t vd, size_t vl) { + return __riscv_vid_v_u32m8_tu(vd, vl); } -vuint64m1_t test_vid_v_u64m1_tu(vuint64m1_t maskedoff, size_t vl) { - return __riscv_vid_v_u64m1_tu(maskedoff, vl); +vuint64m1_t test_vid_v_u64m1_tu(vuint64m1_t vd, size_t vl) { + return __riscv_vid_v_u64m1_tu(vd, vl); } -vuint64m2_t test_vid_v_u64m2_tu(vuint64m2_t maskedoff, size_t vl) { - return __riscv_vid_v_u64m2_tu(maskedoff, vl); +vuint64m2_t test_vid_v_u64m2_tu(vuint64m2_t vd, size_t vl) { + return __riscv_vid_v_u64m2_tu(vd, vl); } -vuint64m4_t test_vid_v_u64m4_tu(vuint64m4_t maskedoff, size_t vl) { - return __riscv_vid_v_u64m4_tu(maskedoff, vl); +vuint64m4_t test_vid_v_u64m4_tu(vuint64m4_t vd, size_t vl) { + return __riscv_vid_v_u64m4_tu(vd, vl); } -vuint64m8_t test_vid_v_u64m8_tu(vuint64m8_t maskedoff, size_t vl) { - return __riscv_vid_v_u64m8_tu(maskedoff, vl); +vuint64m8_t test_vid_v_u64m8_tu(vuint64m8_t vd, size_t vl) { + return __riscv_vid_v_u64m8_tu(vd, vl); } -vuint8mf8_t test_vid_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, size_t vl) { - return __riscv_vid_v_u8mf8_tum(mask, maskedoff, vl); +vuint8mf8_t test_vid_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, size_t vl) { + return __riscv_vid_v_u8mf8_tum(vm, vd, vl); } -vuint8mf4_t test_vid_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, size_t vl) { - return __riscv_vid_v_u8mf4_tum(mask, maskedoff, vl); +vuint8mf4_t test_vid_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, size_t vl) { + return __riscv_vid_v_u8mf4_tum(vm, vd, vl); } -vuint8mf2_t test_vid_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, size_t vl) { - return __riscv_vid_v_u8mf2_tum(mask, maskedoff, vl); +vuint8mf2_t test_vid_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, size_t vl) { + return __riscv_vid_v_u8mf2_tum(vm, vd, vl); } -vuint8m1_t test_vid_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, size_t vl) { - return __riscv_vid_v_u8m1_tum(mask, maskedoff, vl); +vuint8m1_t test_vid_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, size_t vl) { + return __riscv_vid_v_u8m1_tum(vm, vd, vl); } -vuint8m2_t test_vid_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, size_t vl) { - return __riscv_vid_v_u8m2_tum(mask, maskedoff, vl); +vuint8m2_t test_vid_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, size_t vl) { + return __riscv_vid_v_u8m2_tum(vm, vd, vl); } -vuint8m4_t test_vid_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, size_t vl) { - return __riscv_vid_v_u8m4_tum(mask, maskedoff, vl); +vuint8m4_t test_vid_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, size_t vl) { + return __riscv_vid_v_u8m4_tum(vm, vd, vl); } -vuint8m8_t test_vid_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, size_t vl) { - return __riscv_vid_v_u8m8_tum(mask, maskedoff, vl); +vuint8m8_t test_vid_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, size_t vl) { + return __riscv_vid_v_u8m8_tum(vm, vd, vl); } -vuint16mf4_t test_vid_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, size_t vl) { - return __riscv_vid_v_u16mf4_tum(mask, maskedoff, vl); +vuint16mf4_t test_vid_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, size_t vl) { + return __riscv_vid_v_u16mf4_tum(vm, vd, vl); } -vuint16mf2_t test_vid_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, size_t vl) { - return __riscv_vid_v_u16mf2_tum(mask, maskedoff, vl); +vuint16mf2_t test_vid_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, size_t vl) { + return __riscv_vid_v_u16mf2_tum(vm, vd, vl); } -vuint16m1_t test_vid_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, size_t vl) { - return __riscv_vid_v_u16m1_tum(mask, maskedoff, vl); +vuint16m1_t test_vid_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, size_t vl) { + return __riscv_vid_v_u16m1_tum(vm, vd, vl); } -vuint16m2_t test_vid_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, size_t vl) { - return __riscv_vid_v_u16m2_tum(mask, maskedoff, vl); +vuint16m2_t test_vid_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, size_t vl) { + return __riscv_vid_v_u16m2_tum(vm, vd, vl); } -vuint16m4_t test_vid_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, size_t vl) { - return __riscv_vid_v_u16m4_tum(mask, maskedoff, vl); +vuint16m4_t test_vid_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, size_t vl) { + return __riscv_vid_v_u16m4_tum(vm, vd, vl); } -vuint16m8_t test_vid_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, size_t vl) { - return __riscv_vid_v_u16m8_tum(mask, maskedoff, vl); +vuint16m8_t test_vid_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, size_t vl) { + return __riscv_vid_v_u16m8_tum(vm, vd, vl); } -vuint32mf2_t test_vid_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, size_t vl) { - return __riscv_vid_v_u32mf2_tum(mask, maskedoff, vl); +vuint32mf2_t test_vid_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, size_t vl) { + return __riscv_vid_v_u32mf2_tum(vm, vd, vl); } -vuint32m1_t test_vid_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, size_t vl) { - return __riscv_vid_v_u32m1_tum(mask, maskedoff, vl); +vuint32m1_t test_vid_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, size_t vl) { + return __riscv_vid_v_u32m1_tum(vm, vd, vl); } -vuint32m2_t test_vid_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, size_t vl) { - return __riscv_vid_v_u32m2_tum(mask, maskedoff, vl); +vuint32m2_t test_vid_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, size_t vl) { + return __riscv_vid_v_u32m2_tum(vm, vd, vl); } -vuint32m4_t test_vid_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, size_t vl) { - return __riscv_vid_v_u32m4_tum(mask, maskedoff, vl); +vuint32m4_t test_vid_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, size_t vl) { + return __riscv_vid_v_u32m4_tum(vm, vd, vl); } -vuint32m8_t test_vid_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, size_t vl) { - return __riscv_vid_v_u32m8_tum(mask, maskedoff, vl); +vuint32m8_t test_vid_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, size_t vl) { + return __riscv_vid_v_u32m8_tum(vm, vd, vl); } -vuint64m1_t test_vid_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, size_t vl) { - return __riscv_vid_v_u64m1_tum(mask, maskedoff, vl); +vuint64m1_t test_vid_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, size_t vl) { + return __riscv_vid_v_u64m1_tum(vm, vd, vl); } -vuint64m2_t test_vid_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, size_t vl) { - return __riscv_vid_v_u64m2_tum(mask, maskedoff, vl); +vuint64m2_t test_vid_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, size_t vl) { + return __riscv_vid_v_u64m2_tum(vm, vd, vl); } -vuint64m4_t test_vid_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, size_t vl) { - return __riscv_vid_v_u64m4_tum(mask, maskedoff, vl); +vuint64m4_t test_vid_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, size_t vl) { + return __riscv_vid_v_u64m4_tum(vm, vd, vl); } -vuint64m8_t test_vid_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, size_t vl) { - return __riscv_vid_v_u64m8_tum(mask, maskedoff, vl); +vuint64m8_t test_vid_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, size_t vl) { + return __riscv_vid_v_u64m8_tum(vm, vd, vl); } -vuint8mf8_t test_vid_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, size_t vl) { - return __riscv_vid_v_u8mf8_tumu(mask, maskedoff, vl); +vuint8mf8_t test_vid_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, size_t vl) { + return __riscv_vid_v_u8mf8_tumu(vm, vd, vl); } -vuint8mf4_t test_vid_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, size_t vl) { - return __riscv_vid_v_u8mf4_tumu(mask, maskedoff, vl); +vuint8mf4_t test_vid_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, size_t vl) { + return __riscv_vid_v_u8mf4_tumu(vm, vd, vl); } -vuint8mf2_t test_vid_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, size_t vl) { - return __riscv_vid_v_u8mf2_tumu(mask, maskedoff, vl); +vuint8mf2_t test_vid_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, size_t vl) { + return __riscv_vid_v_u8mf2_tumu(vm, vd, vl); } -vuint8m1_t test_vid_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, size_t vl) { - return __riscv_vid_v_u8m1_tumu(mask, maskedoff, vl); +vuint8m1_t test_vid_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, size_t vl) { + return __riscv_vid_v_u8m1_tumu(vm, vd, vl); } -vuint8m2_t test_vid_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, size_t vl) { - return __riscv_vid_v_u8m2_tumu(mask, maskedoff, vl); +vuint8m2_t test_vid_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, size_t vl) { + return __riscv_vid_v_u8m2_tumu(vm, vd, vl); } -vuint8m4_t test_vid_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, size_t vl) { - return __riscv_vid_v_u8m4_tumu(mask, maskedoff, vl); +vuint8m4_t test_vid_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, size_t vl) { + return __riscv_vid_v_u8m4_tumu(vm, vd, vl); } -vuint8m8_t test_vid_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, size_t vl) { - return __riscv_vid_v_u8m8_tumu(mask, maskedoff, vl); +vuint8m8_t test_vid_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, size_t vl) { + return __riscv_vid_v_u8m8_tumu(vm, vd, vl); } -vuint16mf4_t test_vid_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, size_t vl) { - return __riscv_vid_v_u16mf4_tumu(mask, maskedoff, vl); +vuint16mf4_t test_vid_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, size_t vl) { + return __riscv_vid_v_u16mf4_tumu(vm, vd, vl); } -vuint16mf2_t test_vid_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, size_t vl) { - return __riscv_vid_v_u16mf2_tumu(mask, maskedoff, vl); +vuint16mf2_t test_vid_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, size_t vl) { + return __riscv_vid_v_u16mf2_tumu(vm, vd, vl); } -vuint16m1_t test_vid_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, size_t vl) { - return __riscv_vid_v_u16m1_tumu(mask, maskedoff, vl); +vuint16m1_t test_vid_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, size_t vl) { + return __riscv_vid_v_u16m1_tumu(vm, vd, vl); } -vuint16m2_t test_vid_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, size_t vl) { - return __riscv_vid_v_u16m2_tumu(mask, maskedoff, vl); +vuint16m2_t test_vid_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, size_t vl) { + return __riscv_vid_v_u16m2_tumu(vm, vd, vl); } -vuint16m4_t test_vid_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, size_t vl) { - return __riscv_vid_v_u16m4_tumu(mask, maskedoff, vl); +vuint16m4_t test_vid_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, size_t vl) { + return __riscv_vid_v_u16m4_tumu(vm, vd, vl); } -vuint16m8_t test_vid_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, size_t vl) { - return __riscv_vid_v_u16m8_tumu(mask, maskedoff, vl); +vuint16m8_t test_vid_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, size_t vl) { + return __riscv_vid_v_u16m8_tumu(vm, vd, vl); } -vuint32mf2_t test_vid_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, size_t vl) { - return __riscv_vid_v_u32mf2_tumu(mask, maskedoff, vl); +vuint32mf2_t test_vid_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, size_t vl) { + return __riscv_vid_v_u32mf2_tumu(vm, vd, vl); } -vuint32m1_t test_vid_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, size_t vl) { - return __riscv_vid_v_u32m1_tumu(mask, maskedoff, vl); +vuint32m1_t test_vid_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, size_t vl) { + return __riscv_vid_v_u32m1_tumu(vm, vd, vl); } -vuint32m2_t test_vid_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, size_t vl) { - return __riscv_vid_v_u32m2_tumu(mask, maskedoff, vl); +vuint32m2_t test_vid_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, size_t vl) { + return __riscv_vid_v_u32m2_tumu(vm, vd, vl); } -vuint32m4_t test_vid_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, size_t vl) { - return __riscv_vid_v_u32m4_tumu(mask, maskedoff, vl); +vuint32m4_t test_vid_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, size_t vl) { + return __riscv_vid_v_u32m4_tumu(vm, vd, vl); } -vuint32m8_t test_vid_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, size_t vl) { - return __riscv_vid_v_u32m8_tumu(mask, maskedoff, vl); +vuint32m8_t test_vid_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, size_t vl) { + return __riscv_vid_v_u32m8_tumu(vm, vd, vl); } -vuint64m1_t test_vid_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, size_t vl) { - return __riscv_vid_v_u64m1_tumu(mask, maskedoff, vl); +vuint64m1_t test_vid_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, size_t vl) { + return __riscv_vid_v_u64m1_tumu(vm, vd, vl); } -vuint64m2_t test_vid_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, size_t vl) { - return __riscv_vid_v_u64m2_tumu(mask, maskedoff, vl); +vuint64m2_t test_vid_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, size_t vl) { + return __riscv_vid_v_u64m2_tumu(vm, vd, vl); } -vuint64m4_t test_vid_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, size_t vl) { - return __riscv_vid_v_u64m4_tumu(mask, maskedoff, vl); +vuint64m4_t test_vid_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, size_t vl) { + return __riscv_vid_v_u64m4_tumu(vm, vd, vl); } -vuint64m8_t test_vid_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, size_t vl) { - return __riscv_vid_v_u64m8_tumu(mask, maskedoff, vl); +vuint64m8_t test_vid_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, size_t vl) { + return __riscv_vid_v_u64m8_tumu(vm, vd, vl); } -vuint8mf8_t test_vid_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, size_t vl) { - return __riscv_vid_v_u8mf8_mu(mask, maskedoff, vl); +vuint8mf8_t test_vid_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, size_t vl) { + return __riscv_vid_v_u8mf8_mu(vm, vd, vl); } -vuint8mf4_t test_vid_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, size_t vl) { - return __riscv_vid_v_u8mf4_mu(mask, maskedoff, vl); +vuint8mf4_t test_vid_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, size_t vl) { + return __riscv_vid_v_u8mf4_mu(vm, vd, vl); } -vuint8mf2_t test_vid_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, size_t vl) { - return __riscv_vid_v_u8mf2_mu(mask, maskedoff, vl); +vuint8mf2_t test_vid_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, size_t vl) { + return __riscv_vid_v_u8mf2_mu(vm, vd, vl); } -vuint8m1_t test_vid_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, size_t vl) { - return __riscv_vid_v_u8m1_mu(mask, maskedoff, vl); +vuint8m1_t test_vid_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, size_t vl) { + return __riscv_vid_v_u8m1_mu(vm, vd, vl); } -vuint8m2_t test_vid_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, size_t vl) { - return __riscv_vid_v_u8m2_mu(mask, maskedoff, vl); +vuint8m2_t test_vid_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, size_t vl) { + return __riscv_vid_v_u8m2_mu(vm, vd, vl); } -vuint8m4_t test_vid_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, size_t vl) { - return __riscv_vid_v_u8m4_mu(mask, maskedoff, vl); +vuint8m4_t test_vid_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, size_t vl) { + return __riscv_vid_v_u8m4_mu(vm, vd, vl); } -vuint8m8_t test_vid_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, size_t vl) { - return __riscv_vid_v_u8m8_mu(mask, maskedoff, vl); +vuint8m8_t test_vid_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, size_t vl) { + return __riscv_vid_v_u8m8_mu(vm, vd, vl); } -vuint16mf4_t test_vid_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, size_t vl) { - return __riscv_vid_v_u16mf4_mu(mask, maskedoff, vl); +vuint16mf4_t test_vid_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, size_t vl) { + return __riscv_vid_v_u16mf4_mu(vm, vd, vl); } -vuint16mf2_t test_vid_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, size_t vl) { - return __riscv_vid_v_u16mf2_mu(mask, maskedoff, vl); +vuint16mf2_t test_vid_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, size_t vl) { + return __riscv_vid_v_u16mf2_mu(vm, vd, vl); } -vuint16m1_t test_vid_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, size_t vl) { - return __riscv_vid_v_u16m1_mu(mask, maskedoff, vl); +vuint16m1_t test_vid_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, size_t vl) { + return __riscv_vid_v_u16m1_mu(vm, vd, vl); } -vuint16m2_t test_vid_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, size_t vl) { - return __riscv_vid_v_u16m2_mu(mask, maskedoff, vl); +vuint16m2_t test_vid_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, size_t vl) { + return __riscv_vid_v_u16m2_mu(vm, vd, vl); } -vuint16m4_t test_vid_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, size_t vl) { - return __riscv_vid_v_u16m4_mu(mask, maskedoff, vl); +vuint16m4_t test_vid_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, size_t vl) { + return __riscv_vid_v_u16m4_mu(vm, vd, vl); } -vuint16m8_t test_vid_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, size_t vl) { - return __riscv_vid_v_u16m8_mu(mask, maskedoff, vl); +vuint16m8_t test_vid_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, size_t vl) { + return __riscv_vid_v_u16m8_mu(vm, vd, vl); } -vuint32mf2_t test_vid_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, size_t vl) { - return __riscv_vid_v_u32mf2_mu(mask, maskedoff, vl); +vuint32mf2_t test_vid_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, size_t vl) { + return __riscv_vid_v_u32mf2_mu(vm, vd, vl); } -vuint32m1_t test_vid_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, size_t vl) { - return __riscv_vid_v_u32m1_mu(mask, maskedoff, vl); +vuint32m1_t test_vid_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, size_t vl) { + return __riscv_vid_v_u32m1_mu(vm, vd, vl); } -vuint32m2_t test_vid_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, size_t vl) { - return __riscv_vid_v_u32m2_mu(mask, maskedoff, vl); +vuint32m2_t test_vid_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, size_t vl) { + return __riscv_vid_v_u32m2_mu(vm, vd, vl); } -vuint32m4_t test_vid_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, size_t vl) { - return __riscv_vid_v_u32m4_mu(mask, maskedoff, vl); +vuint32m4_t test_vid_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, size_t vl) { + return __riscv_vid_v_u32m4_mu(vm, vd, vl); } -vuint32m8_t test_vid_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, size_t vl) { - return __riscv_vid_v_u32m8_mu(mask, maskedoff, vl); +vuint32m8_t test_vid_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, size_t vl) { + return __riscv_vid_v_u32m8_mu(vm, vd, vl); } -vuint64m1_t test_vid_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, size_t vl) { - return __riscv_vid_v_u64m1_mu(mask, maskedoff, vl); +vuint64m1_t test_vid_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, size_t vl) { + return __riscv_vid_v_u64m1_mu(vm, vd, vl); } -vuint64m2_t test_vid_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, size_t vl) { - return __riscv_vid_v_u64m2_mu(mask, maskedoff, vl); +vuint64m2_t test_vid_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, size_t vl) { + return __riscv_vid_v_u64m2_mu(vm, vd, vl); } -vuint64m4_t test_vid_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, size_t vl) { - return __riscv_vid_v_u64m4_mu(mask, maskedoff, vl); +vuint64m4_t test_vid_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, size_t vl) { + return __riscv_vid_v_u64m4_mu(vm, vd, vl); } -vuint64m8_t test_vid_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, size_t vl) { - return __riscv_vid_v_u64m8_mu(mask, maskedoff, vl); +vuint64m8_t test_vid_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, size_t vl) { + return __riscv_vid_v_u64m8_mu(vm, vd, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vid\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/viota.c b/auto-generated/policy_funcs/gnu-api-tests/viota.c index 956dd2d64..19031b92c 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/viota.c +++ b/auto-generated/policy_funcs/gnu-api-tests/viota.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_viota_m_u8mf8_tu(vuint8mf8_t maskedoff, vbool64_t op1, size_t vl) { - return __riscv_viota_m_u8mf8_tu(maskedoff, op1, vl); +vuint8mf8_t test_viota_m_u8mf8_tu(vuint8mf8_t vd, vbool64_t vs2, size_t vl) { + return __riscv_viota_m_u8mf8_tu(vd, vs2, vl); } -vuint8mf4_t test_viota_m_u8mf4_tu(vuint8mf4_t maskedoff, vbool32_t op1, size_t vl) { - return __riscv_viota_m_u8mf4_tu(maskedoff, op1, vl); +vuint8mf4_t test_viota_m_u8mf4_tu(vuint8mf4_t vd, vbool32_t vs2, size_t vl) { + return __riscv_viota_m_u8mf4_tu(vd, vs2, vl); } -vuint8mf2_t test_viota_m_u8mf2_tu(vuint8mf2_t maskedoff, vbool16_t op1, size_t vl) { - return __riscv_viota_m_u8mf2_tu(maskedoff, op1, vl); +vuint8mf2_t test_viota_m_u8mf2_tu(vuint8mf2_t vd, vbool16_t vs2, size_t vl) { + return __riscv_viota_m_u8mf2_tu(vd, vs2, vl); } -vuint8m1_t test_viota_m_u8m1_tu(vuint8m1_t maskedoff, vbool8_t op1, size_t vl) { - return __riscv_viota_m_u8m1_tu(maskedoff, op1, vl); +vuint8m1_t test_viota_m_u8m1_tu(vuint8m1_t vd, vbool8_t vs2, size_t vl) { + return __riscv_viota_m_u8m1_tu(vd, vs2, vl); } -vuint8m2_t test_viota_m_u8m2_tu(vuint8m2_t maskedoff, vbool4_t op1, size_t vl) { - return __riscv_viota_m_u8m2_tu(maskedoff, op1, vl); +vuint8m2_t test_viota_m_u8m2_tu(vuint8m2_t vd, vbool4_t vs2, size_t vl) { + return __riscv_viota_m_u8m2_tu(vd, vs2, vl); } -vuint8m4_t test_viota_m_u8m4_tu(vuint8m4_t maskedoff, vbool2_t op1, size_t vl) { - return __riscv_viota_m_u8m4_tu(maskedoff, op1, vl); +vuint8m4_t test_viota_m_u8m4_tu(vuint8m4_t vd, vbool2_t vs2, size_t vl) { + return __riscv_viota_m_u8m4_tu(vd, vs2, vl); } -vuint8m8_t test_viota_m_u8m8_tu(vuint8m8_t maskedoff, vbool1_t op1, size_t vl) { - return __riscv_viota_m_u8m8_tu(maskedoff, op1, vl); +vuint8m8_t test_viota_m_u8m8_tu(vuint8m8_t vd, vbool1_t vs2, size_t vl) { + return __riscv_viota_m_u8m8_tu(vd, vs2, vl); } -vuint16mf4_t test_viota_m_u16mf4_tu(vuint16mf4_t maskedoff, vbool64_t op1, size_t vl) { - return __riscv_viota_m_u16mf4_tu(maskedoff, op1, vl); +vuint16mf4_t test_viota_m_u16mf4_tu(vuint16mf4_t vd, vbool64_t vs2, size_t vl) { + return __riscv_viota_m_u16mf4_tu(vd, vs2, vl); } -vuint16mf2_t test_viota_m_u16mf2_tu(vuint16mf2_t maskedoff, vbool32_t op1, size_t vl) { - return __riscv_viota_m_u16mf2_tu(maskedoff, op1, vl); +vuint16mf2_t test_viota_m_u16mf2_tu(vuint16mf2_t vd, vbool32_t vs2, size_t vl) { + return __riscv_viota_m_u16mf2_tu(vd, vs2, vl); } -vuint16m1_t test_viota_m_u16m1_tu(vuint16m1_t maskedoff, vbool16_t op1, size_t vl) { - return __riscv_viota_m_u16m1_tu(maskedoff, op1, vl); +vuint16m1_t test_viota_m_u16m1_tu(vuint16m1_t vd, vbool16_t vs2, size_t vl) { + return __riscv_viota_m_u16m1_tu(vd, vs2, vl); } -vuint16m2_t test_viota_m_u16m2_tu(vuint16m2_t maskedoff, vbool8_t op1, size_t vl) { - return __riscv_viota_m_u16m2_tu(maskedoff, op1, vl); +vuint16m2_t test_viota_m_u16m2_tu(vuint16m2_t vd, vbool8_t vs2, size_t vl) { + return __riscv_viota_m_u16m2_tu(vd, vs2, vl); } -vuint16m4_t test_viota_m_u16m4_tu(vuint16m4_t maskedoff, vbool4_t op1, size_t vl) { - return __riscv_viota_m_u16m4_tu(maskedoff, op1, vl); +vuint16m4_t test_viota_m_u16m4_tu(vuint16m4_t vd, vbool4_t vs2, size_t vl) { + return __riscv_viota_m_u16m4_tu(vd, vs2, vl); } -vuint16m8_t test_viota_m_u16m8_tu(vuint16m8_t maskedoff, vbool2_t op1, size_t vl) { - return __riscv_viota_m_u16m8_tu(maskedoff, op1, vl); +vuint16m8_t test_viota_m_u16m8_tu(vuint16m8_t vd, vbool2_t vs2, size_t vl) { + return __riscv_viota_m_u16m8_tu(vd, vs2, vl); } -vuint32mf2_t test_viota_m_u32mf2_tu(vuint32mf2_t maskedoff, vbool64_t op1, size_t vl) { - return __riscv_viota_m_u32mf2_tu(maskedoff, op1, vl); +vuint32mf2_t test_viota_m_u32mf2_tu(vuint32mf2_t vd, vbool64_t vs2, size_t vl) { + return __riscv_viota_m_u32mf2_tu(vd, vs2, vl); } -vuint32m1_t test_viota_m_u32m1_tu(vuint32m1_t maskedoff, vbool32_t op1, size_t vl) { - return __riscv_viota_m_u32m1_tu(maskedoff, op1, vl); +vuint32m1_t test_viota_m_u32m1_tu(vuint32m1_t vd, vbool32_t vs2, size_t vl) { + return __riscv_viota_m_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_viota_m_u32m2_tu(vuint32m2_t maskedoff, vbool16_t op1, size_t vl) { - return __riscv_viota_m_u32m2_tu(maskedoff, op1, vl); +vuint32m2_t test_viota_m_u32m2_tu(vuint32m2_t vd, vbool16_t vs2, size_t vl) { + return __riscv_viota_m_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_viota_m_u32m4_tu(vuint32m4_t maskedoff, vbool8_t op1, size_t vl) { - return __riscv_viota_m_u32m4_tu(maskedoff, op1, vl); +vuint32m4_t test_viota_m_u32m4_tu(vuint32m4_t vd, vbool8_t vs2, size_t vl) { + return __riscv_viota_m_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_viota_m_u32m8_tu(vuint32m8_t maskedoff, vbool4_t op1, size_t vl) { - return __riscv_viota_m_u32m8_tu(maskedoff, op1, vl); +vuint32m8_t test_viota_m_u32m8_tu(vuint32m8_t vd, vbool4_t vs2, size_t vl) { + return __riscv_viota_m_u32m8_tu(vd, vs2, vl); } -vuint64m1_t test_viota_m_u64m1_tu(vuint64m1_t maskedoff, vbool64_t op1, size_t vl) { - return __riscv_viota_m_u64m1_tu(maskedoff, op1, vl); +vuint64m1_t test_viota_m_u64m1_tu(vuint64m1_t vd, vbool64_t vs2, size_t vl) { + return __riscv_viota_m_u64m1_tu(vd, vs2, vl); } -vuint64m2_t test_viota_m_u64m2_tu(vuint64m2_t maskedoff, vbool32_t op1, size_t vl) { - return __riscv_viota_m_u64m2_tu(maskedoff, op1, vl); +vuint64m2_t test_viota_m_u64m2_tu(vuint64m2_t vd, vbool32_t vs2, size_t vl) { + return __riscv_viota_m_u64m2_tu(vd, vs2, vl); } -vuint64m4_t test_viota_m_u64m4_tu(vuint64m4_t maskedoff, vbool16_t op1, size_t vl) { - return __riscv_viota_m_u64m4_tu(maskedoff, op1, vl); +vuint64m4_t test_viota_m_u64m4_tu(vuint64m4_t vd, vbool16_t vs2, size_t vl) { + return __riscv_viota_m_u64m4_tu(vd, vs2, vl); } -vuint64m8_t test_viota_m_u64m8_tu(vuint64m8_t maskedoff, vbool8_t op1, size_t vl) { - return __riscv_viota_m_u64m8_tu(maskedoff, op1, vl); +vuint64m8_t test_viota_m_u64m8_tu(vuint64m8_t vd, vbool8_t vs2, size_t vl) { + return __riscv_viota_m_u64m8_tu(vd, vs2, vl); } -vuint8mf8_t test_viota_m_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vbool64_t op1, size_t vl) { - return __riscv_viota_m_u8mf8_tum(mask, maskedoff, op1, vl); +vuint8mf8_t test_viota_m_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vbool64_t vs2, size_t vl) { + return __riscv_viota_m_u8mf8_tum(vm, vd, vs2, vl); } -vuint8mf4_t test_viota_m_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vbool32_t op1, size_t vl) { - return __riscv_viota_m_u8mf4_tum(mask, maskedoff, op1, vl); +vuint8mf4_t test_viota_m_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vbool32_t vs2, size_t vl) { + return __riscv_viota_m_u8mf4_tum(vm, vd, vs2, vl); } -vuint8mf2_t test_viota_m_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vbool16_t op1, size_t vl) { - return __riscv_viota_m_u8mf2_tum(mask, maskedoff, op1, vl); +vuint8mf2_t test_viota_m_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vbool16_t vs2, size_t vl) { + return __riscv_viota_m_u8mf2_tum(vm, vd, vs2, vl); } -vuint8m1_t test_viota_m_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vbool8_t op1, size_t vl) { - return __riscv_viota_m_u8m1_tum(mask, maskedoff, op1, vl); +vuint8m1_t test_viota_m_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vbool8_t vs2, size_t vl) { + return __riscv_viota_m_u8m1_tum(vm, vd, vs2, vl); } -vuint8m2_t test_viota_m_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vbool4_t op1, size_t vl) { - return __riscv_viota_m_u8m2_tum(mask, maskedoff, op1, vl); +vuint8m2_t test_viota_m_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vbool4_t vs2, size_t vl) { + return __riscv_viota_m_u8m2_tum(vm, vd, vs2, vl); } -vuint8m4_t test_viota_m_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vbool2_t op1, size_t vl) { - return __riscv_viota_m_u8m4_tum(mask, maskedoff, op1, vl); +vuint8m4_t test_viota_m_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vbool2_t vs2, size_t vl) { + return __riscv_viota_m_u8m4_tum(vm, vd, vs2, vl); } -vuint8m8_t test_viota_m_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vbool1_t op1, size_t vl) { - return __riscv_viota_m_u8m8_tum(mask, maskedoff, op1, vl); +vuint8m8_t test_viota_m_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vbool1_t vs2, size_t vl) { + return __riscv_viota_m_u8m8_tum(vm, vd, vs2, vl); } -vuint16mf4_t test_viota_m_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vbool64_t op1, size_t vl) { - return __riscv_viota_m_u16mf4_tum(mask, maskedoff, op1, vl); +vuint16mf4_t test_viota_m_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vbool64_t vs2, size_t vl) { + return __riscv_viota_m_u16mf4_tum(vm, vd, vs2, vl); } -vuint16mf2_t test_viota_m_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vbool32_t op1, size_t vl) { - return __riscv_viota_m_u16mf2_tum(mask, maskedoff, op1, vl); +vuint16mf2_t test_viota_m_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vbool32_t vs2, size_t vl) { + return __riscv_viota_m_u16mf2_tum(vm, vd, vs2, vl); } -vuint16m1_t test_viota_m_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vbool16_t op1, size_t vl) { - return __riscv_viota_m_u16m1_tum(mask, maskedoff, op1, vl); +vuint16m1_t test_viota_m_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vbool16_t vs2, size_t vl) { + return __riscv_viota_m_u16m1_tum(vm, vd, vs2, vl); } -vuint16m2_t test_viota_m_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vbool8_t op1, size_t vl) { - return __riscv_viota_m_u16m2_tum(mask, maskedoff, op1, vl); +vuint16m2_t test_viota_m_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vbool8_t vs2, size_t vl) { + return __riscv_viota_m_u16m2_tum(vm, vd, vs2, vl); } -vuint16m4_t test_viota_m_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vbool4_t op1, size_t vl) { - return __riscv_viota_m_u16m4_tum(mask, maskedoff, op1, vl); +vuint16m4_t test_viota_m_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vbool4_t vs2, size_t vl) { + return __riscv_viota_m_u16m4_tum(vm, vd, vs2, vl); } -vuint16m8_t test_viota_m_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vbool2_t op1, size_t vl) { - return __riscv_viota_m_u16m8_tum(mask, maskedoff, op1, vl); +vuint16m8_t test_viota_m_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vbool2_t vs2, size_t vl) { + return __riscv_viota_m_u16m8_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_viota_m_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vbool64_t op1, size_t vl) { - return __riscv_viota_m_u32mf2_tum(mask, maskedoff, op1, vl); +vuint32mf2_t test_viota_m_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vbool64_t vs2, size_t vl) { + return __riscv_viota_m_u32mf2_tum(vm, vd, vs2, vl); } -vuint32m1_t test_viota_m_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vbool32_t op1, size_t vl) { - return __riscv_viota_m_u32m1_tum(mask, maskedoff, op1, vl); +vuint32m1_t test_viota_m_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vbool32_t vs2, size_t vl) { + return __riscv_viota_m_u32m1_tum(vm, vd, vs2, vl); } -vuint32m2_t test_viota_m_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vbool16_t op1, size_t vl) { - return __riscv_viota_m_u32m2_tum(mask, maskedoff, op1, vl); +vuint32m2_t test_viota_m_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vbool16_t vs2, size_t vl) { + return __riscv_viota_m_u32m2_tum(vm, vd, vs2, vl); } -vuint32m4_t test_viota_m_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vbool8_t op1, size_t vl) { - return __riscv_viota_m_u32m4_tum(mask, maskedoff, op1, vl); +vuint32m4_t test_viota_m_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vbool8_t vs2, size_t vl) { + return __riscv_viota_m_u32m4_tum(vm, vd, vs2, vl); } -vuint32m8_t test_viota_m_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vbool4_t op1, size_t vl) { - return __riscv_viota_m_u32m8_tum(mask, maskedoff, op1, vl); +vuint32m8_t test_viota_m_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vbool4_t vs2, size_t vl) { + return __riscv_viota_m_u32m8_tum(vm, vd, vs2, vl); } -vuint64m1_t test_viota_m_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vbool64_t op1, size_t vl) { - return __riscv_viota_m_u64m1_tum(mask, maskedoff, op1, vl); +vuint64m1_t test_viota_m_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vbool64_t vs2, size_t vl) { + return __riscv_viota_m_u64m1_tum(vm, vd, vs2, vl); } -vuint64m2_t test_viota_m_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vbool32_t op1, size_t vl) { - return __riscv_viota_m_u64m2_tum(mask, maskedoff, op1, vl); +vuint64m2_t test_viota_m_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vbool32_t vs2, size_t vl) { + return __riscv_viota_m_u64m2_tum(vm, vd, vs2, vl); } -vuint64m4_t test_viota_m_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vbool16_t op1, size_t vl) { - return __riscv_viota_m_u64m4_tum(mask, maskedoff, op1, vl); +vuint64m4_t test_viota_m_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vbool16_t vs2, size_t vl) { + return __riscv_viota_m_u64m4_tum(vm, vd, vs2, vl); } -vuint64m8_t test_viota_m_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vbool8_t op1, size_t vl) { - return __riscv_viota_m_u64m8_tum(mask, maskedoff, op1, vl); +vuint64m8_t test_viota_m_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vbool8_t vs2, size_t vl) { + return __riscv_viota_m_u64m8_tum(vm, vd, vs2, vl); } -vuint8mf8_t test_viota_m_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vbool64_t op1, size_t vl) { - return __riscv_viota_m_u8mf8_tumu(mask, maskedoff, op1, vl); +vuint8mf8_t test_viota_m_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vbool64_t vs2, size_t vl) { + return __riscv_viota_m_u8mf8_tumu(vm, vd, vs2, vl); } -vuint8mf4_t test_viota_m_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vbool32_t op1, size_t vl) { - return __riscv_viota_m_u8mf4_tumu(mask, maskedoff, op1, vl); +vuint8mf4_t test_viota_m_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vbool32_t vs2, size_t vl) { + return __riscv_viota_m_u8mf4_tumu(vm, vd, vs2, vl); } -vuint8mf2_t test_viota_m_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vbool16_t op1, size_t vl) { - return __riscv_viota_m_u8mf2_tumu(mask, maskedoff, op1, vl); +vuint8mf2_t test_viota_m_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vbool16_t vs2, size_t vl) { + return __riscv_viota_m_u8mf2_tumu(vm, vd, vs2, vl); } -vuint8m1_t test_viota_m_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vbool8_t op1, size_t vl) { - return __riscv_viota_m_u8m1_tumu(mask, maskedoff, op1, vl); +vuint8m1_t test_viota_m_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vbool8_t vs2, size_t vl) { + return __riscv_viota_m_u8m1_tumu(vm, vd, vs2, vl); } -vuint8m2_t test_viota_m_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vbool4_t op1, size_t vl) { - return __riscv_viota_m_u8m2_tumu(mask, maskedoff, op1, vl); +vuint8m2_t test_viota_m_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vbool4_t vs2, size_t vl) { + return __riscv_viota_m_u8m2_tumu(vm, vd, vs2, vl); } -vuint8m4_t test_viota_m_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vbool2_t op1, size_t vl) { - return __riscv_viota_m_u8m4_tumu(mask, maskedoff, op1, vl); +vuint8m4_t test_viota_m_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vbool2_t vs2, size_t vl) { + return __riscv_viota_m_u8m4_tumu(vm, vd, vs2, vl); } -vuint8m8_t test_viota_m_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vbool1_t op1, size_t vl) { - return __riscv_viota_m_u8m8_tumu(mask, maskedoff, op1, vl); +vuint8m8_t test_viota_m_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vbool1_t vs2, size_t vl) { + return __riscv_viota_m_u8m8_tumu(vm, vd, vs2, vl); } -vuint16mf4_t test_viota_m_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vbool64_t op1, size_t vl) { - return __riscv_viota_m_u16mf4_tumu(mask, maskedoff, op1, vl); +vuint16mf4_t test_viota_m_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vbool64_t vs2, size_t vl) { + return __riscv_viota_m_u16mf4_tumu(vm, vd, vs2, vl); } -vuint16mf2_t test_viota_m_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vbool32_t op1, size_t vl) { - return __riscv_viota_m_u16mf2_tumu(mask, maskedoff, op1, vl); +vuint16mf2_t test_viota_m_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vbool32_t vs2, size_t vl) { + return __riscv_viota_m_u16mf2_tumu(vm, vd, vs2, vl); } -vuint16m1_t test_viota_m_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vbool16_t op1, size_t vl) { - return __riscv_viota_m_u16m1_tumu(mask, maskedoff, op1, vl); +vuint16m1_t test_viota_m_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vbool16_t vs2, size_t vl) { + return __riscv_viota_m_u16m1_tumu(vm, vd, vs2, vl); } -vuint16m2_t test_viota_m_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vbool8_t op1, size_t vl) { - return __riscv_viota_m_u16m2_tumu(mask, maskedoff, op1, vl); +vuint16m2_t test_viota_m_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vbool8_t vs2, size_t vl) { + return __riscv_viota_m_u16m2_tumu(vm, vd, vs2, vl); } -vuint16m4_t test_viota_m_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vbool4_t op1, size_t vl) { - return __riscv_viota_m_u16m4_tumu(mask, maskedoff, op1, vl); +vuint16m4_t test_viota_m_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vbool4_t vs2, size_t vl) { + return __riscv_viota_m_u16m4_tumu(vm, vd, vs2, vl); } -vuint16m8_t test_viota_m_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vbool2_t op1, size_t vl) { - return __riscv_viota_m_u16m8_tumu(mask, maskedoff, op1, vl); +vuint16m8_t test_viota_m_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vbool2_t vs2, size_t vl) { + return __riscv_viota_m_u16m8_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_viota_m_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vbool64_t op1, size_t vl) { - return __riscv_viota_m_u32mf2_tumu(mask, maskedoff, op1, vl); +vuint32mf2_t test_viota_m_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vbool64_t vs2, size_t vl) { + return __riscv_viota_m_u32mf2_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_viota_m_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vbool32_t op1, size_t vl) { - return __riscv_viota_m_u32m1_tumu(mask, maskedoff, op1, vl); +vuint32m1_t test_viota_m_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vbool32_t vs2, size_t vl) { + return __riscv_viota_m_u32m1_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_viota_m_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vbool16_t op1, size_t vl) { - return __riscv_viota_m_u32m2_tumu(mask, maskedoff, op1, vl); +vuint32m2_t test_viota_m_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vbool16_t vs2, size_t vl) { + return __riscv_viota_m_u32m2_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_viota_m_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vbool8_t op1, size_t vl) { - return __riscv_viota_m_u32m4_tumu(mask, maskedoff, op1, vl); +vuint32m4_t test_viota_m_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vbool8_t vs2, size_t vl) { + return __riscv_viota_m_u32m4_tumu(vm, vd, vs2, vl); } -vuint32m8_t test_viota_m_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vbool4_t op1, size_t vl) { - return __riscv_viota_m_u32m8_tumu(mask, maskedoff, op1, vl); +vuint32m8_t test_viota_m_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vbool4_t vs2, size_t vl) { + return __riscv_viota_m_u32m8_tumu(vm, vd, vs2, vl); } -vuint64m1_t test_viota_m_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vbool64_t op1, size_t vl) { - return __riscv_viota_m_u64m1_tumu(mask, maskedoff, op1, vl); +vuint64m1_t test_viota_m_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vbool64_t vs2, size_t vl) { + return __riscv_viota_m_u64m1_tumu(vm, vd, vs2, vl); } -vuint64m2_t test_viota_m_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vbool32_t op1, size_t vl) { - return __riscv_viota_m_u64m2_tumu(mask, maskedoff, op1, vl); +vuint64m2_t test_viota_m_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vbool32_t vs2, size_t vl) { + return __riscv_viota_m_u64m2_tumu(vm, vd, vs2, vl); } -vuint64m4_t test_viota_m_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vbool16_t op1, size_t vl) { - return __riscv_viota_m_u64m4_tumu(mask, maskedoff, op1, vl); +vuint64m4_t test_viota_m_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vbool16_t vs2, size_t vl) { + return __riscv_viota_m_u64m4_tumu(vm, vd, vs2, vl); } -vuint64m8_t test_viota_m_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vbool8_t op1, size_t vl) { - return __riscv_viota_m_u64m8_tumu(mask, maskedoff, op1, vl); +vuint64m8_t test_viota_m_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vbool8_t vs2, size_t vl) { + return __riscv_viota_m_u64m8_tumu(vm, vd, vs2, vl); } -vuint8mf8_t test_viota_m_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vbool64_t op1, size_t vl) { - return __riscv_viota_m_u8mf8_mu(mask, maskedoff, op1, vl); +vuint8mf8_t test_viota_m_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vbool64_t vs2, size_t vl) { + return __riscv_viota_m_u8mf8_mu(vm, vd, vs2, vl); } -vuint8mf4_t test_viota_m_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vbool32_t op1, size_t vl) { - return __riscv_viota_m_u8mf4_mu(mask, maskedoff, op1, vl); +vuint8mf4_t test_viota_m_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vbool32_t vs2, size_t vl) { + return __riscv_viota_m_u8mf4_mu(vm, vd, vs2, vl); } -vuint8mf2_t test_viota_m_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vbool16_t op1, size_t vl) { - return __riscv_viota_m_u8mf2_mu(mask, maskedoff, op1, vl); +vuint8mf2_t test_viota_m_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vbool16_t vs2, size_t vl) { + return __riscv_viota_m_u8mf2_mu(vm, vd, vs2, vl); } -vuint8m1_t test_viota_m_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vbool8_t op1, size_t vl) { - return __riscv_viota_m_u8m1_mu(mask, maskedoff, op1, vl); +vuint8m1_t test_viota_m_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vbool8_t vs2, size_t vl) { + return __riscv_viota_m_u8m1_mu(vm, vd, vs2, vl); } -vuint8m2_t test_viota_m_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vbool4_t op1, size_t vl) { - return __riscv_viota_m_u8m2_mu(mask, maskedoff, op1, vl); +vuint8m2_t test_viota_m_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vbool4_t vs2, size_t vl) { + return __riscv_viota_m_u8m2_mu(vm, vd, vs2, vl); } -vuint8m4_t test_viota_m_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vbool2_t op1, size_t vl) { - return __riscv_viota_m_u8m4_mu(mask, maskedoff, op1, vl); +vuint8m4_t test_viota_m_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vbool2_t vs2, size_t vl) { + return __riscv_viota_m_u8m4_mu(vm, vd, vs2, vl); } -vuint8m8_t test_viota_m_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vbool1_t op1, size_t vl) { - return __riscv_viota_m_u8m8_mu(mask, maskedoff, op1, vl); +vuint8m8_t test_viota_m_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vbool1_t vs2, size_t vl) { + return __riscv_viota_m_u8m8_mu(vm, vd, vs2, vl); } -vuint16mf4_t test_viota_m_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vbool64_t op1, size_t vl) { - return __riscv_viota_m_u16mf4_mu(mask, maskedoff, op1, vl); +vuint16mf4_t test_viota_m_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vbool64_t vs2, size_t vl) { + return __riscv_viota_m_u16mf4_mu(vm, vd, vs2, vl); } -vuint16mf2_t test_viota_m_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vbool32_t op1, size_t vl) { - return __riscv_viota_m_u16mf2_mu(mask, maskedoff, op1, vl); +vuint16mf2_t test_viota_m_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vbool32_t vs2, size_t vl) { + return __riscv_viota_m_u16mf2_mu(vm, vd, vs2, vl); } -vuint16m1_t test_viota_m_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vbool16_t op1, size_t vl) { - return __riscv_viota_m_u16m1_mu(mask, maskedoff, op1, vl); +vuint16m1_t test_viota_m_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vbool16_t vs2, size_t vl) { + return __riscv_viota_m_u16m1_mu(vm, vd, vs2, vl); } -vuint16m2_t test_viota_m_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vbool8_t op1, size_t vl) { - return __riscv_viota_m_u16m2_mu(mask, maskedoff, op1, vl); +vuint16m2_t test_viota_m_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vbool8_t vs2, size_t vl) { + return __riscv_viota_m_u16m2_mu(vm, vd, vs2, vl); } -vuint16m4_t test_viota_m_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vbool4_t op1, size_t vl) { - return __riscv_viota_m_u16m4_mu(mask, maskedoff, op1, vl); +vuint16m4_t test_viota_m_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vbool4_t vs2, size_t vl) { + return __riscv_viota_m_u16m4_mu(vm, vd, vs2, vl); } -vuint16m8_t test_viota_m_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vbool2_t op1, size_t vl) { - return __riscv_viota_m_u16m8_mu(mask, maskedoff, op1, vl); +vuint16m8_t test_viota_m_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vbool2_t vs2, size_t vl) { + return __riscv_viota_m_u16m8_mu(vm, vd, vs2, vl); } -vuint32mf2_t test_viota_m_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vbool64_t op1, size_t vl) { - return __riscv_viota_m_u32mf2_mu(mask, maskedoff, op1, vl); +vuint32mf2_t test_viota_m_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vbool64_t vs2, size_t vl) { + return __riscv_viota_m_u32mf2_mu(vm, vd, vs2, vl); } -vuint32m1_t test_viota_m_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vbool32_t op1, size_t vl) { - return __riscv_viota_m_u32m1_mu(mask, maskedoff, op1, vl); +vuint32m1_t test_viota_m_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vbool32_t vs2, size_t vl) { + return __riscv_viota_m_u32m1_mu(vm, vd, vs2, vl); } -vuint32m2_t test_viota_m_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vbool16_t op1, size_t vl) { - return __riscv_viota_m_u32m2_mu(mask, maskedoff, op1, vl); +vuint32m2_t test_viota_m_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vbool16_t vs2, size_t vl) { + return __riscv_viota_m_u32m2_mu(vm, vd, vs2, vl); } -vuint32m4_t test_viota_m_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vbool8_t op1, size_t vl) { - return __riscv_viota_m_u32m4_mu(mask, maskedoff, op1, vl); +vuint32m4_t test_viota_m_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vbool8_t vs2, size_t vl) { + return __riscv_viota_m_u32m4_mu(vm, vd, vs2, vl); } -vuint32m8_t test_viota_m_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vbool4_t op1, size_t vl) { - return __riscv_viota_m_u32m8_mu(mask, maskedoff, op1, vl); +vuint32m8_t test_viota_m_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vbool4_t vs2, size_t vl) { + return __riscv_viota_m_u32m8_mu(vm, vd, vs2, vl); } -vuint64m1_t test_viota_m_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vbool64_t op1, size_t vl) { - return __riscv_viota_m_u64m1_mu(mask, maskedoff, op1, vl); +vuint64m1_t test_viota_m_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vbool64_t vs2, size_t vl) { + return __riscv_viota_m_u64m1_mu(vm, vd, vs2, vl); } -vuint64m2_t test_viota_m_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vbool32_t op1, size_t vl) { - return __riscv_viota_m_u64m2_mu(mask, maskedoff, op1, vl); +vuint64m2_t test_viota_m_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vbool32_t vs2, size_t vl) { + return __riscv_viota_m_u64m2_mu(vm, vd, vs2, vl); } -vuint64m4_t test_viota_m_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vbool16_t op1, size_t vl) { - return __riscv_viota_m_u64m4_mu(mask, maskedoff, op1, vl); +vuint64m4_t test_viota_m_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vbool16_t vs2, size_t vl) { + return __riscv_viota_m_u64m4_mu(vm, vd, vs2, vl); } -vuint64m8_t test_viota_m_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vbool8_t op1, size_t vl) { - return __riscv_viota_m_u64m8_mu(mask, maskedoff, op1, vl); +vuint64m8_t test_viota_m_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vbool8_t vs2, size_t vl) { + return __riscv_viota_m_u64m8_mu(vm, vd, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+viota\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vle16.c b/auto-generated/policy_funcs/gnu-api-tests/vle16.c index cc189fcd6..4b5c0684f 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vle16.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vle16.c @@ -6,292 +6,292 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vle16_v_f16mf4_tu(vfloat16mf4_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_v_f16mf4_tu(maskedoff, base, vl); +vfloat16mf4_t test_vle16_v_f16mf4_tu(vfloat16mf4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_v_f16mf4_tu(vd, rs1, vl); } -vfloat16mf2_t test_vle16_v_f16mf2_tu(vfloat16mf2_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_v_f16mf2_tu(maskedoff, base, vl); +vfloat16mf2_t test_vle16_v_f16mf2_tu(vfloat16mf2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_v_f16mf2_tu(vd, rs1, vl); } -vfloat16m1_t test_vle16_v_f16m1_tu(vfloat16m1_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_v_f16m1_tu(maskedoff, base, vl); +vfloat16m1_t test_vle16_v_f16m1_tu(vfloat16m1_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_v_f16m1_tu(vd, rs1, vl); } -vfloat16m2_t test_vle16_v_f16m2_tu(vfloat16m2_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_v_f16m2_tu(maskedoff, base, vl); +vfloat16m2_t test_vle16_v_f16m2_tu(vfloat16m2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_v_f16m2_tu(vd, rs1, vl); } -vfloat16m4_t test_vle16_v_f16m4_tu(vfloat16m4_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_v_f16m4_tu(maskedoff, base, vl); +vfloat16m4_t test_vle16_v_f16m4_tu(vfloat16m4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_v_f16m4_tu(vd, rs1, vl); } -vfloat16m8_t test_vle16_v_f16m8_tu(vfloat16m8_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_v_f16m8_tu(maskedoff, base, vl); +vfloat16m8_t test_vle16_v_f16m8_tu(vfloat16m8_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_v_f16m8_tu(vd, rs1, vl); } -vint16mf4_t test_vle16_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_v_i16mf4_tu(maskedoff, base, vl); +vint16mf4_t test_vle16_v_i16mf4_tu(vint16mf4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_v_i16mf4_tu(vd, rs1, vl); } -vint16mf2_t test_vle16_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_v_i16mf2_tu(maskedoff, base, vl); +vint16mf2_t test_vle16_v_i16mf2_tu(vint16mf2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_v_i16mf2_tu(vd, rs1, vl); } -vint16m1_t test_vle16_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_v_i16m1_tu(maskedoff, base, vl); +vint16m1_t test_vle16_v_i16m1_tu(vint16m1_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_v_i16m1_tu(vd, rs1, vl); } -vint16m2_t test_vle16_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_v_i16m2_tu(maskedoff, base, vl); +vint16m2_t test_vle16_v_i16m2_tu(vint16m2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_v_i16m2_tu(vd, rs1, vl); } -vint16m4_t test_vle16_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_v_i16m4_tu(maskedoff, base, vl); +vint16m4_t test_vle16_v_i16m4_tu(vint16m4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_v_i16m4_tu(vd, rs1, vl); } -vint16m8_t test_vle16_v_i16m8_tu(vint16m8_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_v_i16m8_tu(maskedoff, base, vl); +vint16m8_t test_vle16_v_i16m8_tu(vint16m8_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_v_i16m8_tu(vd, rs1, vl); } -vuint16mf4_t test_vle16_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_v_u16mf4_tu(maskedoff, base, vl); +vuint16mf4_t test_vle16_v_u16mf4_tu(vuint16mf4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_v_u16mf4_tu(vd, rs1, vl); } -vuint16mf2_t test_vle16_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_v_u16mf2_tu(maskedoff, base, vl); +vuint16mf2_t test_vle16_v_u16mf2_tu(vuint16mf2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_v_u16mf2_tu(vd, rs1, vl); } -vuint16m1_t test_vle16_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_v_u16m1_tu(maskedoff, base, vl); +vuint16m1_t test_vle16_v_u16m1_tu(vuint16m1_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_v_u16m1_tu(vd, rs1, vl); } -vuint16m2_t test_vle16_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_v_u16m2_tu(maskedoff, base, vl); +vuint16m2_t test_vle16_v_u16m2_tu(vuint16m2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_v_u16m2_tu(vd, rs1, vl); } -vuint16m4_t test_vle16_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_v_u16m4_tu(maskedoff, base, vl); +vuint16m4_t test_vle16_v_u16m4_tu(vuint16m4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_v_u16m4_tu(vd, rs1, vl); } -vuint16m8_t test_vle16_v_u16m8_tu(vuint16m8_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_v_u16m8_tu(maskedoff, base, vl); +vuint16m8_t test_vle16_v_u16m8_tu(vuint16m8_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_v_u16m8_tu(vd, rs1, vl); } -vfloat16mf4_t test_vle16_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_v_f16mf4_tum(mask, maskedoff, base, vl); +vfloat16mf4_t test_vle16_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_v_f16mf4_tum(vm, vd, rs1, vl); } -vfloat16mf2_t test_vle16_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_v_f16mf2_tum(mask, maskedoff, base, vl); +vfloat16mf2_t test_vle16_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_v_f16mf2_tum(vm, vd, rs1, vl); } -vfloat16m1_t test_vle16_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_v_f16m1_tum(mask, maskedoff, base, vl); +vfloat16m1_t test_vle16_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_v_f16m1_tum(vm, vd, rs1, vl); } -vfloat16m2_t test_vle16_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_v_f16m2_tum(mask, maskedoff, base, vl); +vfloat16m2_t test_vle16_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_v_f16m2_tum(vm, vd, rs1, vl); } -vfloat16m4_t test_vle16_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_v_f16m4_tum(mask, maskedoff, base, vl); +vfloat16m4_t test_vle16_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_v_f16m4_tum(vm, vd, rs1, vl); } -vfloat16m8_t test_vle16_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_v_f16m8_tum(mask, maskedoff, base, vl); +vfloat16m8_t test_vle16_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_v_f16m8_tum(vm, vd, rs1, vl); } -vint16mf4_t test_vle16_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_v_i16mf4_tum(mask, maskedoff, base, vl); +vint16mf4_t test_vle16_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_v_i16mf4_tum(vm, vd, rs1, vl); } -vint16mf2_t test_vle16_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_v_i16mf2_tum(mask, maskedoff, base, vl); +vint16mf2_t test_vle16_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_v_i16mf2_tum(vm, vd, rs1, vl); } -vint16m1_t test_vle16_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_v_i16m1_tum(mask, maskedoff, base, vl); +vint16m1_t test_vle16_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_v_i16m1_tum(vm, vd, rs1, vl); } -vint16m2_t test_vle16_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_v_i16m2_tum(mask, maskedoff, base, vl); +vint16m2_t test_vle16_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_v_i16m2_tum(vm, vd, rs1, vl); } -vint16m4_t test_vle16_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_v_i16m4_tum(mask, maskedoff, base, vl); +vint16m4_t test_vle16_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_v_i16m4_tum(vm, vd, rs1, vl); } -vint16m8_t test_vle16_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_v_i16m8_tum(mask, maskedoff, base, vl); +vint16m8_t test_vle16_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_v_i16m8_tum(vm, vd, rs1, vl); } -vuint16mf4_t test_vle16_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_v_u16mf4_tum(mask, maskedoff, base, vl); +vuint16mf4_t test_vle16_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_v_u16mf4_tum(vm, vd, rs1, vl); } -vuint16mf2_t test_vle16_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_v_u16mf2_tum(mask, maskedoff, base, vl); +vuint16mf2_t test_vle16_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_v_u16mf2_tum(vm, vd, rs1, vl); } -vuint16m1_t test_vle16_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_v_u16m1_tum(mask, maskedoff, base, vl); +vuint16m1_t test_vle16_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_v_u16m1_tum(vm, vd, rs1, vl); } -vuint16m2_t test_vle16_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_v_u16m2_tum(mask, maskedoff, base, vl); +vuint16m2_t test_vle16_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_v_u16m2_tum(vm, vd, rs1, vl); } -vuint16m4_t test_vle16_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_v_u16m4_tum(mask, maskedoff, base, vl); +vuint16m4_t test_vle16_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_v_u16m4_tum(vm, vd, rs1, vl); } -vuint16m8_t test_vle16_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_v_u16m8_tum(mask, maskedoff, base, vl); +vuint16m8_t test_vle16_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_v_u16m8_tum(vm, vd, rs1, vl); } -vfloat16mf4_t test_vle16_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_v_f16mf4_tumu(mask, maskedoff, base, vl); +vfloat16mf4_t test_vle16_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_v_f16mf4_tumu(vm, vd, rs1, vl); } -vfloat16mf2_t test_vle16_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_v_f16mf2_tumu(mask, maskedoff, base, vl); +vfloat16mf2_t test_vle16_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_v_f16mf2_tumu(vm, vd, rs1, vl); } -vfloat16m1_t test_vle16_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_v_f16m1_tumu(mask, maskedoff, base, vl); +vfloat16m1_t test_vle16_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_v_f16m1_tumu(vm, vd, rs1, vl); } -vfloat16m2_t test_vle16_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_v_f16m2_tumu(mask, maskedoff, base, vl); +vfloat16m2_t test_vle16_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_v_f16m2_tumu(vm, vd, rs1, vl); } -vfloat16m4_t test_vle16_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_v_f16m4_tumu(mask, maskedoff, base, vl); +vfloat16m4_t test_vle16_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_v_f16m4_tumu(vm, vd, rs1, vl); } -vfloat16m8_t test_vle16_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_v_f16m8_tumu(mask, maskedoff, base, vl); +vfloat16m8_t test_vle16_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_v_f16m8_tumu(vm, vd, rs1, vl); } -vint16mf4_t test_vle16_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_v_i16mf4_tumu(mask, maskedoff, base, vl); +vint16mf4_t test_vle16_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_v_i16mf4_tumu(vm, vd, rs1, vl); } -vint16mf2_t test_vle16_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_v_i16mf2_tumu(mask, maskedoff, base, vl); +vint16mf2_t test_vle16_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_v_i16mf2_tumu(vm, vd, rs1, vl); } -vint16m1_t test_vle16_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_v_i16m1_tumu(mask, maskedoff, base, vl); +vint16m1_t test_vle16_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_v_i16m1_tumu(vm, vd, rs1, vl); } -vint16m2_t test_vle16_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_v_i16m2_tumu(mask, maskedoff, base, vl); +vint16m2_t test_vle16_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_v_i16m2_tumu(vm, vd, rs1, vl); } -vint16m4_t test_vle16_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_v_i16m4_tumu(mask, maskedoff, base, vl); +vint16m4_t test_vle16_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_v_i16m4_tumu(vm, vd, rs1, vl); } -vint16m8_t test_vle16_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_v_i16m8_tumu(mask, maskedoff, base, vl); +vint16m8_t test_vle16_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_v_i16m8_tumu(vm, vd, rs1, vl); } -vuint16mf4_t test_vle16_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_v_u16mf4_tumu(mask, maskedoff, base, vl); +vuint16mf4_t test_vle16_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_v_u16mf4_tumu(vm, vd, rs1, vl); } -vuint16mf2_t test_vle16_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_v_u16mf2_tumu(mask, maskedoff, base, vl); +vuint16mf2_t test_vle16_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_v_u16mf2_tumu(vm, vd, rs1, vl); } -vuint16m1_t test_vle16_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_v_u16m1_tumu(mask, maskedoff, base, vl); +vuint16m1_t test_vle16_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_v_u16m1_tumu(vm, vd, rs1, vl); } -vuint16m2_t test_vle16_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_v_u16m2_tumu(mask, maskedoff, base, vl); +vuint16m2_t test_vle16_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_v_u16m2_tumu(vm, vd, rs1, vl); } -vuint16m4_t test_vle16_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_v_u16m4_tumu(mask, maskedoff, base, vl); +vuint16m4_t test_vle16_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_v_u16m4_tumu(vm, vd, rs1, vl); } -vuint16m8_t test_vle16_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_v_u16m8_tumu(mask, maskedoff, base, vl); +vuint16m8_t test_vle16_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_v_u16m8_tumu(vm, vd, rs1, vl); } -vfloat16mf4_t test_vle16_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_v_f16mf4_mu(mask, maskedoff, base, vl); +vfloat16mf4_t test_vle16_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_v_f16mf4_mu(vm, vd, rs1, vl); } -vfloat16mf2_t test_vle16_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_v_f16mf2_mu(mask, maskedoff, base, vl); +vfloat16mf2_t test_vle16_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_v_f16mf2_mu(vm, vd, rs1, vl); } -vfloat16m1_t test_vle16_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_v_f16m1_mu(mask, maskedoff, base, vl); +vfloat16m1_t test_vle16_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_v_f16m1_mu(vm, vd, rs1, vl); } -vfloat16m2_t test_vle16_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_v_f16m2_mu(mask, maskedoff, base, vl); +vfloat16m2_t test_vle16_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_v_f16m2_mu(vm, vd, rs1, vl); } -vfloat16m4_t test_vle16_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_v_f16m4_mu(mask, maskedoff, base, vl); +vfloat16m4_t test_vle16_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_v_f16m4_mu(vm, vd, rs1, vl); } -vfloat16m8_t test_vle16_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_v_f16m8_mu(mask, maskedoff, base, vl); +vfloat16m8_t test_vle16_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_v_f16m8_mu(vm, vd, rs1, vl); } -vint16mf4_t test_vle16_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_v_i16mf4_mu(mask, maskedoff, base, vl); +vint16mf4_t test_vle16_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_v_i16mf4_mu(vm, vd, rs1, vl); } -vint16mf2_t test_vle16_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_v_i16mf2_mu(mask, maskedoff, base, vl); +vint16mf2_t test_vle16_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_v_i16mf2_mu(vm, vd, rs1, vl); } -vint16m1_t test_vle16_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_v_i16m1_mu(mask, maskedoff, base, vl); +vint16m1_t test_vle16_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_v_i16m1_mu(vm, vd, rs1, vl); } -vint16m2_t test_vle16_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_v_i16m2_mu(mask, maskedoff, base, vl); +vint16m2_t test_vle16_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_v_i16m2_mu(vm, vd, rs1, vl); } -vint16m4_t test_vle16_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_v_i16m4_mu(mask, maskedoff, base, vl); +vint16m4_t test_vle16_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_v_i16m4_mu(vm, vd, rs1, vl); } -vint16m8_t test_vle16_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_v_i16m8_mu(mask, maskedoff, base, vl); +vint16m8_t test_vle16_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_v_i16m8_mu(vm, vd, rs1, vl); } -vuint16mf4_t test_vle16_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_v_u16mf4_mu(mask, maskedoff, base, vl); +vuint16mf4_t test_vle16_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_v_u16mf4_mu(vm, vd, rs1, vl); } -vuint16mf2_t test_vle16_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_v_u16mf2_mu(mask, maskedoff, base, vl); +vuint16mf2_t test_vle16_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_v_u16mf2_mu(vm, vd, rs1, vl); } -vuint16m1_t test_vle16_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_v_u16m1_mu(mask, maskedoff, base, vl); +vuint16m1_t test_vle16_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_v_u16m1_mu(vm, vd, rs1, vl); } -vuint16m2_t test_vle16_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_v_u16m2_mu(mask, maskedoff, base, vl); +vuint16m2_t test_vle16_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_v_u16m2_mu(vm, vd, rs1, vl); } -vuint16m4_t test_vle16_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_v_u16m4_mu(mask, maskedoff, base, vl); +vuint16m4_t test_vle16_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_v_u16m4_mu(vm, vd, rs1, vl); } -vuint16m8_t test_vle16_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_v_u16m8_mu(mask, maskedoff, base, vl); +vuint16m8_t test_vle16_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_v_u16m8_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vle16\.[ivxfswum.]+\s+} 78 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vle16ff.c b/auto-generated/policy_funcs/gnu-api-tests/vle16ff.c index c61accc94..68e01ffcf 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vle16ff.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vle16ff.c @@ -6,292 +6,292 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vle16ff_v_f16mf4_tu(vfloat16mf4_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_f16mf4_tu(maskedoff, base, new_vl, vl); +vfloat16mf4_t test_vle16ff_v_f16mf4_tu(vfloat16mf4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_f16mf4_tu(vd, rs1, new_vl, vl); } -vfloat16mf2_t test_vle16ff_v_f16mf2_tu(vfloat16mf2_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_f16mf2_tu(maskedoff, base, new_vl, vl); +vfloat16mf2_t test_vle16ff_v_f16mf2_tu(vfloat16mf2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_f16mf2_tu(vd, rs1, new_vl, vl); } -vfloat16m1_t test_vle16ff_v_f16m1_tu(vfloat16m1_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_f16m1_tu(maskedoff, base, new_vl, vl); +vfloat16m1_t test_vle16ff_v_f16m1_tu(vfloat16m1_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_f16m1_tu(vd, rs1, new_vl, vl); } -vfloat16m2_t test_vle16ff_v_f16m2_tu(vfloat16m2_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_f16m2_tu(maskedoff, base, new_vl, vl); +vfloat16m2_t test_vle16ff_v_f16m2_tu(vfloat16m2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_f16m2_tu(vd, rs1, new_vl, vl); } -vfloat16m4_t test_vle16ff_v_f16m4_tu(vfloat16m4_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_f16m4_tu(maskedoff, base, new_vl, vl); +vfloat16m4_t test_vle16ff_v_f16m4_tu(vfloat16m4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_f16m4_tu(vd, rs1, new_vl, vl); } -vfloat16m8_t test_vle16ff_v_f16m8_tu(vfloat16m8_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_f16m8_tu(maskedoff, base, new_vl, vl); +vfloat16m8_t test_vle16ff_v_f16m8_tu(vfloat16m8_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_f16m8_tu(vd, rs1, new_vl, vl); } -vint16mf4_t test_vle16ff_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_i16mf4_tu(maskedoff, base, new_vl, vl); +vint16mf4_t test_vle16ff_v_i16mf4_tu(vint16mf4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_i16mf4_tu(vd, rs1, new_vl, vl); } -vint16mf2_t test_vle16ff_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_i16mf2_tu(maskedoff, base, new_vl, vl); +vint16mf2_t test_vle16ff_v_i16mf2_tu(vint16mf2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_i16mf2_tu(vd, rs1, new_vl, vl); } -vint16m1_t test_vle16ff_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_i16m1_tu(maskedoff, base, new_vl, vl); +vint16m1_t test_vle16ff_v_i16m1_tu(vint16m1_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_i16m1_tu(vd, rs1, new_vl, vl); } -vint16m2_t test_vle16ff_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_i16m2_tu(maskedoff, base, new_vl, vl); +vint16m2_t test_vle16ff_v_i16m2_tu(vint16m2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_i16m2_tu(vd, rs1, new_vl, vl); } -vint16m4_t test_vle16ff_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_i16m4_tu(maskedoff, base, new_vl, vl); +vint16m4_t test_vle16ff_v_i16m4_tu(vint16m4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_i16m4_tu(vd, rs1, new_vl, vl); } -vint16m8_t test_vle16ff_v_i16m8_tu(vint16m8_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_i16m8_tu(maskedoff, base, new_vl, vl); +vint16m8_t test_vle16ff_v_i16m8_tu(vint16m8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_i16m8_tu(vd, rs1, new_vl, vl); } -vuint16mf4_t test_vle16ff_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_u16mf4_tu(maskedoff, base, new_vl, vl); +vuint16mf4_t test_vle16ff_v_u16mf4_tu(vuint16mf4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_u16mf4_tu(vd, rs1, new_vl, vl); } -vuint16mf2_t test_vle16ff_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_u16mf2_tu(maskedoff, base, new_vl, vl); +vuint16mf2_t test_vle16ff_v_u16mf2_tu(vuint16mf2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_u16mf2_tu(vd, rs1, new_vl, vl); } -vuint16m1_t test_vle16ff_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_u16m1_tu(maskedoff, base, new_vl, vl); +vuint16m1_t test_vle16ff_v_u16m1_tu(vuint16m1_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_u16m1_tu(vd, rs1, new_vl, vl); } -vuint16m2_t test_vle16ff_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_u16m2_tu(maskedoff, base, new_vl, vl); +vuint16m2_t test_vle16ff_v_u16m2_tu(vuint16m2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_u16m2_tu(vd, rs1, new_vl, vl); } -vuint16m4_t test_vle16ff_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_u16m4_tu(maskedoff, base, new_vl, vl); +vuint16m4_t test_vle16ff_v_u16m4_tu(vuint16m4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_u16m4_tu(vd, rs1, new_vl, vl); } -vuint16m8_t test_vle16ff_v_u16m8_tu(vuint16m8_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_u16m8_tu(maskedoff, base, new_vl, vl); +vuint16m8_t test_vle16ff_v_u16m8_tu(vuint16m8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_u16m8_tu(vd, rs1, new_vl, vl); } -vfloat16mf4_t test_vle16ff_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_f16mf4_tum(mask, maskedoff, base, new_vl, vl); +vfloat16mf4_t test_vle16ff_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_f16mf4_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf2_t test_vle16ff_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_f16mf2_tum(mask, maskedoff, base, new_vl, vl); +vfloat16mf2_t test_vle16ff_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_f16mf2_tum(vm, vd, rs1, new_vl, vl); } -vfloat16m1_t test_vle16ff_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_f16m1_tum(mask, maskedoff, base, new_vl, vl); +vfloat16m1_t test_vle16ff_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_f16m1_tum(vm, vd, rs1, new_vl, vl); } -vfloat16m2_t test_vle16ff_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_f16m2_tum(mask, maskedoff, base, new_vl, vl); +vfloat16m2_t test_vle16ff_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_f16m2_tum(vm, vd, rs1, new_vl, vl); } -vfloat16m4_t test_vle16ff_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_f16m4_tum(mask, maskedoff, base, new_vl, vl); +vfloat16m4_t test_vle16ff_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_f16m4_tum(vm, vd, rs1, new_vl, vl); } -vfloat16m8_t test_vle16ff_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_f16m8_tum(mask, maskedoff, base, new_vl, vl); +vfloat16m8_t test_vle16ff_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_f16m8_tum(vm, vd, rs1, new_vl, vl); } -vint16mf4_t test_vle16ff_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_i16mf4_tum(mask, maskedoff, base, new_vl, vl); +vint16mf4_t test_vle16ff_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_i16mf4_tum(vm, vd, rs1, new_vl, vl); } -vint16mf2_t test_vle16ff_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_i16mf2_tum(mask, maskedoff, base, new_vl, vl); +vint16mf2_t test_vle16ff_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_i16mf2_tum(vm, vd, rs1, new_vl, vl); } -vint16m1_t test_vle16ff_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_i16m1_tum(mask, maskedoff, base, new_vl, vl); +vint16m1_t test_vle16ff_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_i16m1_tum(vm, vd, rs1, new_vl, vl); } -vint16m2_t test_vle16ff_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_i16m2_tum(mask, maskedoff, base, new_vl, vl); +vint16m2_t test_vle16ff_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_i16m2_tum(vm, vd, rs1, new_vl, vl); } -vint16m4_t test_vle16ff_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_i16m4_tum(mask, maskedoff, base, new_vl, vl); +vint16m4_t test_vle16ff_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_i16m4_tum(vm, vd, rs1, new_vl, vl); } -vint16m8_t test_vle16ff_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_i16m8_tum(mask, maskedoff, base, new_vl, vl); +vint16m8_t test_vle16ff_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_i16m8_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf4_t test_vle16ff_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_u16mf4_tum(mask, maskedoff, base, new_vl, vl); +vuint16mf4_t test_vle16ff_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_u16mf4_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf2_t test_vle16ff_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_u16mf2_tum(mask, maskedoff, base, new_vl, vl); +vuint16mf2_t test_vle16ff_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_u16mf2_tum(vm, vd, rs1, new_vl, vl); } -vuint16m1_t test_vle16ff_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_u16m1_tum(mask, maskedoff, base, new_vl, vl); +vuint16m1_t test_vle16ff_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_u16m1_tum(vm, vd, rs1, new_vl, vl); } -vuint16m2_t test_vle16ff_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_u16m2_tum(mask, maskedoff, base, new_vl, vl); +vuint16m2_t test_vle16ff_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_u16m2_tum(vm, vd, rs1, new_vl, vl); } -vuint16m4_t test_vle16ff_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_u16m4_tum(mask, maskedoff, base, new_vl, vl); +vuint16m4_t test_vle16ff_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_u16m4_tum(vm, vd, rs1, new_vl, vl); } -vuint16m8_t test_vle16ff_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_u16m8_tum(mask, maskedoff, base, new_vl, vl); +vuint16m8_t test_vle16ff_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_u16m8_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf4_t test_vle16ff_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_f16mf4_tumu(mask, maskedoff, base, new_vl, vl); +vfloat16mf4_t test_vle16ff_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_f16mf4_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2_t test_vle16ff_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_f16mf2_tumu(mask, maskedoff, base, new_vl, vl); +vfloat16mf2_t test_vle16ff_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_f16mf2_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16m1_t test_vle16ff_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_f16m1_tumu(mask, maskedoff, base, new_vl, vl); +vfloat16m1_t test_vle16ff_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_f16m1_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16m2_t test_vle16ff_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_f16m2_tumu(mask, maskedoff, base, new_vl, vl); +vfloat16m2_t test_vle16ff_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_f16m2_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16m4_t test_vle16ff_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_f16m4_tumu(mask, maskedoff, base, new_vl, vl); +vfloat16m4_t test_vle16ff_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_f16m4_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16m8_t test_vle16ff_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_f16m8_tumu(mask, maskedoff, base, new_vl, vl); +vfloat16m8_t test_vle16ff_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_f16m8_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf4_t test_vle16ff_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_i16mf4_tumu(mask, maskedoff, base, new_vl, vl); +vint16mf4_t test_vle16ff_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_i16mf4_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf2_t test_vle16ff_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_i16mf2_tumu(mask, maskedoff, base, new_vl, vl); +vint16mf2_t test_vle16ff_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_i16mf2_tumu(vm, vd, rs1, new_vl, vl); } -vint16m1_t test_vle16ff_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_i16m1_tumu(mask, maskedoff, base, new_vl, vl); +vint16m1_t test_vle16ff_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_i16m1_tumu(vm, vd, rs1, new_vl, vl); } -vint16m2_t test_vle16ff_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_i16m2_tumu(mask, maskedoff, base, new_vl, vl); +vint16m2_t test_vle16ff_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_i16m2_tumu(vm, vd, rs1, new_vl, vl); } -vint16m4_t test_vle16ff_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_i16m4_tumu(mask, maskedoff, base, new_vl, vl); +vint16m4_t test_vle16ff_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_i16m4_tumu(vm, vd, rs1, new_vl, vl); } -vint16m8_t test_vle16ff_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_i16m8_tumu(mask, maskedoff, base, new_vl, vl); +vint16m8_t test_vle16ff_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_i16m8_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf4_t test_vle16ff_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_u16mf4_tumu(mask, maskedoff, base, new_vl, vl); +vuint16mf4_t test_vle16ff_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_u16mf4_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf2_t test_vle16ff_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_u16mf2_tumu(mask, maskedoff, base, new_vl, vl); +vuint16mf2_t test_vle16ff_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_u16mf2_tumu(vm, vd, rs1, new_vl, vl); } -vuint16m1_t test_vle16ff_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_u16m1_tumu(mask, maskedoff, base, new_vl, vl); +vuint16m1_t test_vle16ff_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_u16m1_tumu(vm, vd, rs1, new_vl, vl); } -vuint16m2_t test_vle16ff_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_u16m2_tumu(mask, maskedoff, base, new_vl, vl); +vuint16m2_t test_vle16ff_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_u16m2_tumu(vm, vd, rs1, new_vl, vl); } -vuint16m4_t test_vle16ff_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_u16m4_tumu(mask, maskedoff, base, new_vl, vl); +vuint16m4_t test_vle16ff_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_u16m4_tumu(vm, vd, rs1, new_vl, vl); } -vuint16m8_t test_vle16ff_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_u16m8_tumu(mask, maskedoff, base, new_vl, vl); +vuint16m8_t test_vle16ff_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_u16m8_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf4_t test_vle16ff_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_f16mf4_mu(mask, maskedoff, base, new_vl, vl); +vfloat16mf4_t test_vle16ff_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_f16mf4_mu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2_t test_vle16ff_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_f16mf2_mu(mask, maskedoff, base, new_vl, vl); +vfloat16mf2_t test_vle16ff_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_f16mf2_mu(vm, vd, rs1, new_vl, vl); } -vfloat16m1_t test_vle16ff_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_f16m1_mu(mask, maskedoff, base, new_vl, vl); +vfloat16m1_t test_vle16ff_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_f16m1_mu(vm, vd, rs1, new_vl, vl); } -vfloat16m2_t test_vle16ff_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_f16m2_mu(mask, maskedoff, base, new_vl, vl); +vfloat16m2_t test_vle16ff_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_f16m2_mu(vm, vd, rs1, new_vl, vl); } -vfloat16m4_t test_vle16ff_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_f16m4_mu(mask, maskedoff, base, new_vl, vl); +vfloat16m4_t test_vle16ff_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_f16m4_mu(vm, vd, rs1, new_vl, vl); } -vfloat16m8_t test_vle16ff_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_f16m8_mu(mask, maskedoff, base, new_vl, vl); +vfloat16m8_t test_vle16ff_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_f16m8_mu(vm, vd, rs1, new_vl, vl); } -vint16mf4_t test_vle16ff_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_i16mf4_mu(mask, maskedoff, base, new_vl, vl); +vint16mf4_t test_vle16ff_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_i16mf4_mu(vm, vd, rs1, new_vl, vl); } -vint16mf2_t test_vle16ff_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_i16mf2_mu(mask, maskedoff, base, new_vl, vl); +vint16mf2_t test_vle16ff_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_i16mf2_mu(vm, vd, rs1, new_vl, vl); } -vint16m1_t test_vle16ff_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_i16m1_mu(mask, maskedoff, base, new_vl, vl); +vint16m1_t test_vle16ff_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_i16m1_mu(vm, vd, rs1, new_vl, vl); } -vint16m2_t test_vle16ff_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_i16m2_mu(mask, maskedoff, base, new_vl, vl); +vint16m2_t test_vle16ff_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_i16m2_mu(vm, vd, rs1, new_vl, vl); } -vint16m4_t test_vle16ff_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_i16m4_mu(mask, maskedoff, base, new_vl, vl); +vint16m4_t test_vle16ff_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_i16m4_mu(vm, vd, rs1, new_vl, vl); } -vint16m8_t test_vle16ff_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_i16m8_mu(mask, maskedoff, base, new_vl, vl); +vint16m8_t test_vle16ff_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_i16m8_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf4_t test_vle16ff_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_u16mf4_mu(mask, maskedoff, base, new_vl, vl); +vuint16mf4_t test_vle16ff_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_u16mf4_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf2_t test_vle16ff_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_u16mf2_mu(mask, maskedoff, base, new_vl, vl); +vuint16mf2_t test_vle16ff_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_u16mf2_mu(vm, vd, rs1, new_vl, vl); } -vuint16m1_t test_vle16ff_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_u16m1_mu(mask, maskedoff, base, new_vl, vl); +vuint16m1_t test_vle16ff_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_u16m1_mu(vm, vd, rs1, new_vl, vl); } -vuint16m2_t test_vle16ff_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_u16m2_mu(mask, maskedoff, base, new_vl, vl); +vuint16m2_t test_vle16ff_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_u16m2_mu(vm, vd, rs1, new_vl, vl); } -vuint16m4_t test_vle16ff_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_u16m4_mu(mask, maskedoff, base, new_vl, vl); +vuint16m4_t test_vle16ff_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_u16m4_mu(vm, vd, rs1, new_vl, vl); } -vuint16m8_t test_vle16ff_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_v_u16m8_mu(mask, maskedoff, base, new_vl, vl); +vuint16m8_t test_vle16ff_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_u16m8_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vle16ff\.[ivxfswum.]+\s+} 72 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vle32.c b/auto-generated/policy_funcs/gnu-api-tests/vle32.c index 33498b5a2..907c3ea8e 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vle32.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vle32.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2_t test_vle32_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float32_t *base, size_t vl) { - return __riscv_vle32_v_f32mf2_tu(maskedoff, base, vl); +vfloat32mf2_t test_vle32_v_f32mf2_tu(vfloat32mf2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vle32_v_f32mf2_tu(vd, rs1, vl); } -vfloat32m1_t test_vle32_v_f32m1_tu(vfloat32m1_t maskedoff, const float32_t *base, size_t vl) { - return __riscv_vle32_v_f32m1_tu(maskedoff, base, vl); +vfloat32m1_t test_vle32_v_f32m1_tu(vfloat32m1_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vle32_v_f32m1_tu(vd, rs1, vl); } -vfloat32m2_t test_vle32_v_f32m2_tu(vfloat32m2_t maskedoff, const float32_t *base, size_t vl) { - return __riscv_vle32_v_f32m2_tu(maskedoff, base, vl); +vfloat32m2_t test_vle32_v_f32m2_tu(vfloat32m2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vle32_v_f32m2_tu(vd, rs1, vl); } -vfloat32m4_t test_vle32_v_f32m4_tu(vfloat32m4_t maskedoff, const float32_t *base, size_t vl) { - return __riscv_vle32_v_f32m4_tu(maskedoff, base, vl); +vfloat32m4_t test_vle32_v_f32m4_tu(vfloat32m4_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vle32_v_f32m4_tu(vd, rs1, vl); } -vfloat32m8_t test_vle32_v_f32m8_tu(vfloat32m8_t maskedoff, const float32_t *base, size_t vl) { - return __riscv_vle32_v_f32m8_tu(maskedoff, base, vl); +vfloat32m8_t test_vle32_v_f32m8_tu(vfloat32m8_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vle32_v_f32m8_tu(vd, rs1, vl); } -vint32mf2_t test_vle32_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, size_t vl) { - return __riscv_vle32_v_i32mf2_tu(maskedoff, base, vl); +vint32mf2_t test_vle32_v_i32mf2_tu(vint32mf2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vle32_v_i32mf2_tu(vd, rs1, vl); } -vint32m1_t test_vle32_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, size_t vl) { - return __riscv_vle32_v_i32m1_tu(maskedoff, base, vl); +vint32m1_t test_vle32_v_i32m1_tu(vint32m1_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vle32_v_i32m1_tu(vd, rs1, vl); } -vint32m2_t test_vle32_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, size_t vl) { - return __riscv_vle32_v_i32m2_tu(maskedoff, base, vl); +vint32m2_t test_vle32_v_i32m2_tu(vint32m2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vle32_v_i32m2_tu(vd, rs1, vl); } -vint32m4_t test_vle32_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, size_t vl) { - return __riscv_vle32_v_i32m4_tu(maskedoff, base, vl); +vint32m4_t test_vle32_v_i32m4_tu(vint32m4_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vle32_v_i32m4_tu(vd, rs1, vl); } -vint32m8_t test_vle32_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, size_t vl) { - return __riscv_vle32_v_i32m8_tu(maskedoff, base, vl); +vint32m8_t test_vle32_v_i32m8_tu(vint32m8_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vle32_v_i32m8_tu(vd, rs1, vl); } -vuint32mf2_t test_vle32_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *base, size_t vl) { - return __riscv_vle32_v_u32mf2_tu(maskedoff, base, vl); +vuint32mf2_t test_vle32_v_u32mf2_tu(vuint32mf2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vle32_v_u32mf2_tu(vd, rs1, vl); } -vuint32m1_t test_vle32_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, size_t vl) { - return __riscv_vle32_v_u32m1_tu(maskedoff, base, vl); +vuint32m1_t test_vle32_v_u32m1_tu(vuint32m1_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vle32_v_u32m1_tu(vd, rs1, vl); } -vuint32m2_t test_vle32_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, size_t vl) { - return __riscv_vle32_v_u32m2_tu(maskedoff, base, vl); +vuint32m2_t test_vle32_v_u32m2_tu(vuint32m2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vle32_v_u32m2_tu(vd, rs1, vl); } -vuint32m4_t test_vle32_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, size_t vl) { - return __riscv_vle32_v_u32m4_tu(maskedoff, base, vl); +vuint32m4_t test_vle32_v_u32m4_tu(vuint32m4_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vle32_v_u32m4_tu(vd, rs1, vl); } -vuint32m8_t test_vle32_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base, size_t vl) { - return __riscv_vle32_v_u32m8_tu(maskedoff, base, vl); +vuint32m8_t test_vle32_v_u32m8_tu(vuint32m8_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vle32_v_u32m8_tu(vd, rs1, vl); } -vfloat32mf2_t test_vle32_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, size_t vl) { - return __riscv_vle32_v_f32mf2_tum(mask, maskedoff, base, vl); +vfloat32mf2_t test_vle32_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vle32_v_f32mf2_tum(vm, vd, rs1, vl); } -vfloat32m1_t test_vle32_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, size_t vl) { - return __riscv_vle32_v_f32m1_tum(mask, maskedoff, base, vl); +vfloat32m1_t test_vle32_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vle32_v_f32m1_tum(vm, vd, rs1, vl); } -vfloat32m2_t test_vle32_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, size_t vl) { - return __riscv_vle32_v_f32m2_tum(mask, maskedoff, base, vl); +vfloat32m2_t test_vle32_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vle32_v_f32m2_tum(vm, vd, rs1, vl); } -vfloat32m4_t test_vle32_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, size_t vl) { - return __riscv_vle32_v_f32m4_tum(mask, maskedoff, base, vl); +vfloat32m4_t test_vle32_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vle32_v_f32m4_tum(vm, vd, rs1, vl); } -vfloat32m8_t test_vle32_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, size_t vl) { - return __riscv_vle32_v_f32m8_tum(mask, maskedoff, base, vl); +vfloat32m8_t test_vle32_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vle32_v_f32m8_tum(vm, vd, rs1, vl); } -vint32mf2_t test_vle32_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t vl) { - return __riscv_vle32_v_i32mf2_tum(mask, maskedoff, base, vl); +vint32mf2_t test_vle32_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vle32_v_i32mf2_tum(vm, vd, rs1, vl); } -vint32m1_t test_vle32_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t vl) { - return __riscv_vle32_v_i32m1_tum(mask, maskedoff, base, vl); +vint32m1_t test_vle32_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vle32_v_i32m1_tum(vm, vd, rs1, vl); } -vint32m2_t test_vle32_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t vl) { - return __riscv_vle32_v_i32m2_tum(mask, maskedoff, base, vl); +vint32m2_t test_vle32_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vle32_v_i32m2_tum(vm, vd, rs1, vl); } -vint32m4_t test_vle32_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t vl) { - return __riscv_vle32_v_i32m4_tum(mask, maskedoff, base, vl); +vint32m4_t test_vle32_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vle32_v_i32m4_tum(vm, vd, rs1, vl); } -vint32m8_t test_vle32_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t vl) { - return __riscv_vle32_v_i32m8_tum(mask, maskedoff, base, vl); +vint32m8_t test_vle32_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vle32_v_i32m8_tum(vm, vd, rs1, vl); } -vuint32mf2_t test_vle32_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t vl) { - return __riscv_vle32_v_u32mf2_tum(mask, maskedoff, base, vl); +vuint32mf2_t test_vle32_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vle32_v_u32mf2_tum(vm, vd, rs1, vl); } -vuint32m1_t test_vle32_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t vl) { - return __riscv_vle32_v_u32m1_tum(mask, maskedoff, base, vl); +vuint32m1_t test_vle32_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vle32_v_u32m1_tum(vm, vd, rs1, vl); } -vuint32m2_t test_vle32_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t vl) { - return __riscv_vle32_v_u32m2_tum(mask, maskedoff, base, vl); +vuint32m2_t test_vle32_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vle32_v_u32m2_tum(vm, vd, rs1, vl); } -vuint32m4_t test_vle32_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t vl) { - return __riscv_vle32_v_u32m4_tum(mask, maskedoff, base, vl); +vuint32m4_t test_vle32_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vle32_v_u32m4_tum(vm, vd, rs1, vl); } -vuint32m8_t test_vle32_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t vl) { - return __riscv_vle32_v_u32m8_tum(mask, maskedoff, base, vl); +vuint32m8_t test_vle32_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vle32_v_u32m8_tum(vm, vd, rs1, vl); } -vfloat32mf2_t test_vle32_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, size_t vl) { - return __riscv_vle32_v_f32mf2_tumu(mask, maskedoff, base, vl); +vfloat32mf2_t test_vle32_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vle32_v_f32mf2_tumu(vm, vd, rs1, vl); } -vfloat32m1_t test_vle32_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, size_t vl) { - return __riscv_vle32_v_f32m1_tumu(mask, maskedoff, base, vl); +vfloat32m1_t test_vle32_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vle32_v_f32m1_tumu(vm, vd, rs1, vl); } -vfloat32m2_t test_vle32_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, size_t vl) { - return __riscv_vle32_v_f32m2_tumu(mask, maskedoff, base, vl); +vfloat32m2_t test_vle32_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vle32_v_f32m2_tumu(vm, vd, rs1, vl); } -vfloat32m4_t test_vle32_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, size_t vl) { - return __riscv_vle32_v_f32m4_tumu(mask, maskedoff, base, vl); +vfloat32m4_t test_vle32_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vle32_v_f32m4_tumu(vm, vd, rs1, vl); } -vfloat32m8_t test_vle32_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, size_t vl) { - return __riscv_vle32_v_f32m8_tumu(mask, maskedoff, base, vl); +vfloat32m8_t test_vle32_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vle32_v_f32m8_tumu(vm, vd, rs1, vl); } -vint32mf2_t test_vle32_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t vl) { - return __riscv_vle32_v_i32mf2_tumu(mask, maskedoff, base, vl); +vint32mf2_t test_vle32_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vle32_v_i32mf2_tumu(vm, vd, rs1, vl); } -vint32m1_t test_vle32_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t vl) { - return __riscv_vle32_v_i32m1_tumu(mask, maskedoff, base, vl); +vint32m1_t test_vle32_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vle32_v_i32m1_tumu(vm, vd, rs1, vl); } -vint32m2_t test_vle32_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t vl) { - return __riscv_vle32_v_i32m2_tumu(mask, maskedoff, base, vl); +vint32m2_t test_vle32_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vle32_v_i32m2_tumu(vm, vd, rs1, vl); } -vint32m4_t test_vle32_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t vl) { - return __riscv_vle32_v_i32m4_tumu(mask, maskedoff, base, vl); +vint32m4_t test_vle32_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vle32_v_i32m4_tumu(vm, vd, rs1, vl); } -vint32m8_t test_vle32_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t vl) { - return __riscv_vle32_v_i32m8_tumu(mask, maskedoff, base, vl); +vint32m8_t test_vle32_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vle32_v_i32m8_tumu(vm, vd, rs1, vl); } -vuint32mf2_t test_vle32_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t vl) { - return __riscv_vle32_v_u32mf2_tumu(mask, maskedoff, base, vl); +vuint32mf2_t test_vle32_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vle32_v_u32mf2_tumu(vm, vd, rs1, vl); } -vuint32m1_t test_vle32_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t vl) { - return __riscv_vle32_v_u32m1_tumu(mask, maskedoff, base, vl); +vuint32m1_t test_vle32_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vle32_v_u32m1_tumu(vm, vd, rs1, vl); } -vuint32m2_t test_vle32_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t vl) { - return __riscv_vle32_v_u32m2_tumu(mask, maskedoff, base, vl); +vuint32m2_t test_vle32_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vle32_v_u32m2_tumu(vm, vd, rs1, vl); } -vuint32m4_t test_vle32_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t vl) { - return __riscv_vle32_v_u32m4_tumu(mask, maskedoff, base, vl); +vuint32m4_t test_vle32_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vle32_v_u32m4_tumu(vm, vd, rs1, vl); } -vuint32m8_t test_vle32_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t vl) { - return __riscv_vle32_v_u32m8_tumu(mask, maskedoff, base, vl); +vuint32m8_t test_vle32_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vle32_v_u32m8_tumu(vm, vd, rs1, vl); } -vfloat32mf2_t test_vle32_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, size_t vl) { - return __riscv_vle32_v_f32mf2_mu(mask, maskedoff, base, vl); +vfloat32mf2_t test_vle32_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vle32_v_f32mf2_mu(vm, vd, rs1, vl); } -vfloat32m1_t test_vle32_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, size_t vl) { - return __riscv_vle32_v_f32m1_mu(mask, maskedoff, base, vl); +vfloat32m1_t test_vle32_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vle32_v_f32m1_mu(vm, vd, rs1, vl); } -vfloat32m2_t test_vle32_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, size_t vl) { - return __riscv_vle32_v_f32m2_mu(mask, maskedoff, base, vl); +vfloat32m2_t test_vle32_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vle32_v_f32m2_mu(vm, vd, rs1, vl); } -vfloat32m4_t test_vle32_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, size_t vl) { - return __riscv_vle32_v_f32m4_mu(mask, maskedoff, base, vl); +vfloat32m4_t test_vle32_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vle32_v_f32m4_mu(vm, vd, rs1, vl); } -vfloat32m8_t test_vle32_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, size_t vl) { - return __riscv_vle32_v_f32m8_mu(mask, maskedoff, base, vl); +vfloat32m8_t test_vle32_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vle32_v_f32m8_mu(vm, vd, rs1, vl); } -vint32mf2_t test_vle32_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t vl) { - return __riscv_vle32_v_i32mf2_mu(mask, maskedoff, base, vl); +vint32mf2_t test_vle32_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vle32_v_i32mf2_mu(vm, vd, rs1, vl); } -vint32m1_t test_vle32_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t vl) { - return __riscv_vle32_v_i32m1_mu(mask, maskedoff, base, vl); +vint32m1_t test_vle32_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vle32_v_i32m1_mu(vm, vd, rs1, vl); } -vint32m2_t test_vle32_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t vl) { - return __riscv_vle32_v_i32m2_mu(mask, maskedoff, base, vl); +vint32m2_t test_vle32_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vle32_v_i32m2_mu(vm, vd, rs1, vl); } -vint32m4_t test_vle32_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t vl) { - return __riscv_vle32_v_i32m4_mu(mask, maskedoff, base, vl); +vint32m4_t test_vle32_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vle32_v_i32m4_mu(vm, vd, rs1, vl); } -vint32m8_t test_vle32_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t vl) { - return __riscv_vle32_v_i32m8_mu(mask, maskedoff, base, vl); +vint32m8_t test_vle32_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vle32_v_i32m8_mu(vm, vd, rs1, vl); } -vuint32mf2_t test_vle32_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t vl) { - return __riscv_vle32_v_u32mf2_mu(mask, maskedoff, base, vl); +vuint32mf2_t test_vle32_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vle32_v_u32mf2_mu(vm, vd, rs1, vl); } -vuint32m1_t test_vle32_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t vl) { - return __riscv_vle32_v_u32m1_mu(mask, maskedoff, base, vl); +vuint32m1_t test_vle32_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vle32_v_u32m1_mu(vm, vd, rs1, vl); } -vuint32m2_t test_vle32_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t vl) { - return __riscv_vle32_v_u32m2_mu(mask, maskedoff, base, vl); +vuint32m2_t test_vle32_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vle32_v_u32m2_mu(vm, vd, rs1, vl); } -vuint32m4_t test_vle32_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t vl) { - return __riscv_vle32_v_u32m4_mu(mask, maskedoff, base, vl); +vuint32m4_t test_vle32_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vle32_v_u32m4_mu(vm, vd, rs1, vl); } -vuint32m8_t test_vle32_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t vl) { - return __riscv_vle32_v_u32m8_mu(mask, maskedoff, base, vl); +vuint32m8_t test_vle32_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vle32_v_u32m8_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vle32\.[ivxfswum.]+\s+} 63 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vle32ff.c b/auto-generated/policy_funcs/gnu-api-tests/vle32ff.c index ff59be938..088cf39cd 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vle32ff.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vle32ff.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2_t test_vle32ff_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_f32mf2_tu(maskedoff, base, new_vl, vl); +vfloat32mf2_t test_vle32ff_v_f32mf2_tu(vfloat32mf2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_f32mf2_tu(vd, rs1, new_vl, vl); } -vfloat32m1_t test_vle32ff_v_f32m1_tu(vfloat32m1_t maskedoff, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_f32m1_tu(maskedoff, base, new_vl, vl); +vfloat32m1_t test_vle32ff_v_f32m1_tu(vfloat32m1_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_f32m1_tu(vd, rs1, new_vl, vl); } -vfloat32m2_t test_vle32ff_v_f32m2_tu(vfloat32m2_t maskedoff, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_f32m2_tu(maskedoff, base, new_vl, vl); +vfloat32m2_t test_vle32ff_v_f32m2_tu(vfloat32m2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_f32m2_tu(vd, rs1, new_vl, vl); } -vfloat32m4_t test_vle32ff_v_f32m4_tu(vfloat32m4_t maskedoff, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_f32m4_tu(maskedoff, base, new_vl, vl); +vfloat32m4_t test_vle32ff_v_f32m4_tu(vfloat32m4_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_f32m4_tu(vd, rs1, new_vl, vl); } -vfloat32m8_t test_vle32ff_v_f32m8_tu(vfloat32m8_t maskedoff, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_f32m8_tu(maskedoff, base, new_vl, vl); +vfloat32m8_t test_vle32ff_v_f32m8_tu(vfloat32m8_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_f32m8_tu(vd, rs1, new_vl, vl); } -vint32mf2_t test_vle32ff_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_i32mf2_tu(maskedoff, base, new_vl, vl); +vint32mf2_t test_vle32ff_v_i32mf2_tu(vint32mf2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_i32mf2_tu(vd, rs1, new_vl, vl); } -vint32m1_t test_vle32ff_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_i32m1_tu(maskedoff, base, new_vl, vl); +vint32m1_t test_vle32ff_v_i32m1_tu(vint32m1_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_i32m1_tu(vd, rs1, new_vl, vl); } -vint32m2_t test_vle32ff_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_i32m2_tu(maskedoff, base, new_vl, vl); +vint32m2_t test_vle32ff_v_i32m2_tu(vint32m2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_i32m2_tu(vd, rs1, new_vl, vl); } -vint32m4_t test_vle32ff_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_i32m4_tu(maskedoff, base, new_vl, vl); +vint32m4_t test_vle32ff_v_i32m4_tu(vint32m4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_i32m4_tu(vd, rs1, new_vl, vl); } -vint32m8_t test_vle32ff_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_i32m8_tu(maskedoff, base, new_vl, vl); +vint32m8_t test_vle32ff_v_i32m8_tu(vint32m8_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_i32m8_tu(vd, rs1, new_vl, vl); } -vuint32mf2_t test_vle32ff_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_u32mf2_tu(maskedoff, base, new_vl, vl); +vuint32mf2_t test_vle32ff_v_u32mf2_tu(vuint32mf2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_u32mf2_tu(vd, rs1, new_vl, vl); } -vuint32m1_t test_vle32ff_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_u32m1_tu(maskedoff, base, new_vl, vl); +vuint32m1_t test_vle32ff_v_u32m1_tu(vuint32m1_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_u32m1_tu(vd, rs1, new_vl, vl); } -vuint32m2_t test_vle32ff_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_u32m2_tu(maskedoff, base, new_vl, vl); +vuint32m2_t test_vle32ff_v_u32m2_tu(vuint32m2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_u32m2_tu(vd, rs1, new_vl, vl); } -vuint32m4_t test_vle32ff_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_u32m4_tu(maskedoff, base, new_vl, vl); +vuint32m4_t test_vle32ff_v_u32m4_tu(vuint32m4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_u32m4_tu(vd, rs1, new_vl, vl); } -vuint32m8_t test_vle32ff_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_u32m8_tu(maskedoff, base, new_vl, vl); +vuint32m8_t test_vle32ff_v_u32m8_tu(vuint32m8_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_u32m8_tu(vd, rs1, new_vl, vl); } -vfloat32mf2_t test_vle32ff_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_f32mf2_tum(mask, maskedoff, base, new_vl, vl); +vfloat32mf2_t test_vle32ff_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_f32mf2_tum(vm, vd, rs1, new_vl, vl); } -vfloat32m1_t test_vle32ff_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_f32m1_tum(mask, maskedoff, base, new_vl, vl); +vfloat32m1_t test_vle32ff_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_f32m1_tum(vm, vd, rs1, new_vl, vl); } -vfloat32m2_t test_vle32ff_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_f32m2_tum(mask, maskedoff, base, new_vl, vl); +vfloat32m2_t test_vle32ff_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_f32m2_tum(vm, vd, rs1, new_vl, vl); } -vfloat32m4_t test_vle32ff_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_f32m4_tum(mask, maskedoff, base, new_vl, vl); +vfloat32m4_t test_vle32ff_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_f32m4_tum(vm, vd, rs1, new_vl, vl); } -vfloat32m8_t test_vle32ff_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_f32m8_tum(mask, maskedoff, base, new_vl, vl); +vfloat32m8_t test_vle32ff_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_f32m8_tum(vm, vd, rs1, new_vl, vl); } -vint32mf2_t test_vle32ff_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_i32mf2_tum(mask, maskedoff, base, new_vl, vl); +vint32mf2_t test_vle32ff_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_i32mf2_tum(vm, vd, rs1, new_vl, vl); } -vint32m1_t test_vle32ff_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_i32m1_tum(mask, maskedoff, base, new_vl, vl); +vint32m1_t test_vle32ff_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_i32m1_tum(vm, vd, rs1, new_vl, vl); } -vint32m2_t test_vle32ff_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_i32m2_tum(mask, maskedoff, base, new_vl, vl); +vint32m2_t test_vle32ff_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_i32m2_tum(vm, vd, rs1, new_vl, vl); } -vint32m4_t test_vle32ff_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_i32m4_tum(mask, maskedoff, base, new_vl, vl); +vint32m4_t test_vle32ff_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_i32m4_tum(vm, vd, rs1, new_vl, vl); } -vint32m8_t test_vle32ff_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_i32m8_tum(mask, maskedoff, base, new_vl, vl); +vint32m8_t test_vle32ff_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_i32m8_tum(vm, vd, rs1, new_vl, vl); } -vuint32mf2_t test_vle32ff_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_u32mf2_tum(mask, maskedoff, base, new_vl, vl); +vuint32mf2_t test_vle32ff_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_u32mf2_tum(vm, vd, rs1, new_vl, vl); } -vuint32m1_t test_vle32ff_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_u32m1_tum(mask, maskedoff, base, new_vl, vl); +vuint32m1_t test_vle32ff_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_u32m1_tum(vm, vd, rs1, new_vl, vl); } -vuint32m2_t test_vle32ff_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_u32m2_tum(mask, maskedoff, base, new_vl, vl); +vuint32m2_t test_vle32ff_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_u32m2_tum(vm, vd, rs1, new_vl, vl); } -vuint32m4_t test_vle32ff_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_u32m4_tum(mask, maskedoff, base, new_vl, vl); +vuint32m4_t test_vle32ff_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_u32m4_tum(vm, vd, rs1, new_vl, vl); } -vuint32m8_t test_vle32ff_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_u32m8_tum(mask, maskedoff, base, new_vl, vl); +vuint32m8_t test_vle32ff_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_u32m8_tum(vm, vd, rs1, new_vl, vl); } -vfloat32mf2_t test_vle32ff_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_f32mf2_tumu(mask, maskedoff, base, new_vl, vl); +vfloat32mf2_t test_vle32ff_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_f32mf2_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32m1_t test_vle32ff_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_f32m1_tumu(mask, maskedoff, base, new_vl, vl); +vfloat32m1_t test_vle32ff_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_f32m1_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32m2_t test_vle32ff_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_f32m2_tumu(mask, maskedoff, base, new_vl, vl); +vfloat32m2_t test_vle32ff_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_f32m2_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32m4_t test_vle32ff_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_f32m4_tumu(mask, maskedoff, base, new_vl, vl); +vfloat32m4_t test_vle32ff_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_f32m4_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32m8_t test_vle32ff_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_f32m8_tumu(mask, maskedoff, base, new_vl, vl); +vfloat32m8_t test_vle32ff_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_f32m8_tumu(vm, vd, rs1, new_vl, vl); } -vint32mf2_t test_vle32ff_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_i32mf2_tumu(mask, maskedoff, base, new_vl, vl); +vint32mf2_t test_vle32ff_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_i32mf2_tumu(vm, vd, rs1, new_vl, vl); } -vint32m1_t test_vle32ff_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_i32m1_tumu(mask, maskedoff, base, new_vl, vl); +vint32m1_t test_vle32ff_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_i32m1_tumu(vm, vd, rs1, new_vl, vl); } -vint32m2_t test_vle32ff_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_i32m2_tumu(mask, maskedoff, base, new_vl, vl); +vint32m2_t test_vle32ff_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_i32m2_tumu(vm, vd, rs1, new_vl, vl); } -vint32m4_t test_vle32ff_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_i32m4_tumu(mask, maskedoff, base, new_vl, vl); +vint32m4_t test_vle32ff_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_i32m4_tumu(vm, vd, rs1, new_vl, vl); } -vint32m8_t test_vle32ff_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_i32m8_tumu(mask, maskedoff, base, new_vl, vl); +vint32m8_t test_vle32ff_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_i32m8_tumu(vm, vd, rs1, new_vl, vl); } -vuint32mf2_t test_vle32ff_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_u32mf2_tumu(mask, maskedoff, base, new_vl, vl); +vuint32mf2_t test_vle32ff_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_u32mf2_tumu(vm, vd, rs1, new_vl, vl); } -vuint32m1_t test_vle32ff_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_u32m1_tumu(mask, maskedoff, base, new_vl, vl); +vuint32m1_t test_vle32ff_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_u32m1_tumu(vm, vd, rs1, new_vl, vl); } -vuint32m2_t test_vle32ff_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_u32m2_tumu(mask, maskedoff, base, new_vl, vl); +vuint32m2_t test_vle32ff_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_u32m2_tumu(vm, vd, rs1, new_vl, vl); } -vuint32m4_t test_vle32ff_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_u32m4_tumu(mask, maskedoff, base, new_vl, vl); +vuint32m4_t test_vle32ff_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_u32m4_tumu(vm, vd, rs1, new_vl, vl); } -vuint32m8_t test_vle32ff_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_u32m8_tumu(mask, maskedoff, base, new_vl, vl); +vuint32m8_t test_vle32ff_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_u32m8_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32mf2_t test_vle32ff_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_f32mf2_mu(mask, maskedoff, base, new_vl, vl); +vfloat32mf2_t test_vle32ff_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_f32mf2_mu(vm, vd, rs1, new_vl, vl); } -vfloat32m1_t test_vle32ff_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_f32m1_mu(mask, maskedoff, base, new_vl, vl); +vfloat32m1_t test_vle32ff_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_f32m1_mu(vm, vd, rs1, new_vl, vl); } -vfloat32m2_t test_vle32ff_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_f32m2_mu(mask, maskedoff, base, new_vl, vl); +vfloat32m2_t test_vle32ff_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_f32m2_mu(vm, vd, rs1, new_vl, vl); } -vfloat32m4_t test_vle32ff_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_f32m4_mu(mask, maskedoff, base, new_vl, vl); +vfloat32m4_t test_vle32ff_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_f32m4_mu(vm, vd, rs1, new_vl, vl); } -vfloat32m8_t test_vle32ff_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_f32m8_mu(mask, maskedoff, base, new_vl, vl); +vfloat32m8_t test_vle32ff_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_f32m8_mu(vm, vd, rs1, new_vl, vl); } -vint32mf2_t test_vle32ff_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_i32mf2_mu(mask, maskedoff, base, new_vl, vl); +vint32mf2_t test_vle32ff_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_i32mf2_mu(vm, vd, rs1, new_vl, vl); } -vint32m1_t test_vle32ff_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_i32m1_mu(mask, maskedoff, base, new_vl, vl); +vint32m1_t test_vle32ff_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_i32m1_mu(vm, vd, rs1, new_vl, vl); } -vint32m2_t test_vle32ff_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_i32m2_mu(mask, maskedoff, base, new_vl, vl); +vint32m2_t test_vle32ff_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_i32m2_mu(vm, vd, rs1, new_vl, vl); } -vint32m4_t test_vle32ff_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_i32m4_mu(mask, maskedoff, base, new_vl, vl); +vint32m4_t test_vle32ff_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_i32m4_mu(vm, vd, rs1, new_vl, vl); } -vint32m8_t test_vle32ff_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_i32m8_mu(mask, maskedoff, base, new_vl, vl); +vint32m8_t test_vle32ff_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_i32m8_mu(vm, vd, rs1, new_vl, vl); } -vuint32mf2_t test_vle32ff_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_u32mf2_mu(mask, maskedoff, base, new_vl, vl); +vuint32mf2_t test_vle32ff_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_u32mf2_mu(vm, vd, rs1, new_vl, vl); } -vuint32m1_t test_vle32ff_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_u32m1_mu(mask, maskedoff, base, new_vl, vl); +vuint32m1_t test_vle32ff_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_u32m1_mu(vm, vd, rs1, new_vl, vl); } -vuint32m2_t test_vle32ff_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_u32m2_mu(mask, maskedoff, base, new_vl, vl); +vuint32m2_t test_vle32ff_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_u32m2_mu(vm, vd, rs1, new_vl, vl); } -vuint32m4_t test_vle32ff_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_u32m4_mu(mask, maskedoff, base, new_vl, vl); +vuint32m4_t test_vle32ff_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_u32m4_mu(vm, vd, rs1, new_vl, vl); } -vuint32m8_t test_vle32ff_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_v_u32m8_mu(mask, maskedoff, base, new_vl, vl); +vuint32m8_t test_vle32ff_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_v_u32m8_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vle32ff\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vle64.c b/auto-generated/policy_funcs/gnu-api-tests/vle64.c index ee7e2df78..001e4fbeb 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vle64.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vle64.c @@ -6,196 +6,196 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1_t test_vle64_v_f64m1_tu(vfloat64m1_t maskedoff, const float64_t *base, size_t vl) { - return __riscv_vle64_v_f64m1_tu(maskedoff, base, vl); +vfloat64m1_t test_vle64_v_f64m1_tu(vfloat64m1_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vle64_v_f64m1_tu(vd, rs1, vl); } -vfloat64m2_t test_vle64_v_f64m2_tu(vfloat64m2_t maskedoff, const float64_t *base, size_t vl) { - return __riscv_vle64_v_f64m2_tu(maskedoff, base, vl); +vfloat64m2_t test_vle64_v_f64m2_tu(vfloat64m2_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vle64_v_f64m2_tu(vd, rs1, vl); } -vfloat64m4_t test_vle64_v_f64m4_tu(vfloat64m4_t maskedoff, const float64_t *base, size_t vl) { - return __riscv_vle64_v_f64m4_tu(maskedoff, base, vl); +vfloat64m4_t test_vle64_v_f64m4_tu(vfloat64m4_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vle64_v_f64m4_tu(vd, rs1, vl); } -vfloat64m8_t test_vle64_v_f64m8_tu(vfloat64m8_t maskedoff, const float64_t *base, size_t vl) { - return __riscv_vle64_v_f64m8_tu(maskedoff, base, vl); +vfloat64m8_t test_vle64_v_f64m8_tu(vfloat64m8_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vle64_v_f64m8_tu(vd, rs1, vl); } -vint64m1_t test_vle64_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, size_t vl) { - return __riscv_vle64_v_i64m1_tu(maskedoff, base, vl); +vint64m1_t test_vle64_v_i64m1_tu(vint64m1_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vle64_v_i64m1_tu(vd, rs1, vl); } -vint64m2_t test_vle64_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, size_t vl) { - return __riscv_vle64_v_i64m2_tu(maskedoff, base, vl); +vint64m2_t test_vle64_v_i64m2_tu(vint64m2_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vle64_v_i64m2_tu(vd, rs1, vl); } -vint64m4_t test_vle64_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, size_t vl) { - return __riscv_vle64_v_i64m4_tu(maskedoff, base, vl); +vint64m4_t test_vle64_v_i64m4_tu(vint64m4_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vle64_v_i64m4_tu(vd, rs1, vl); } -vint64m8_t test_vle64_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, size_t vl) { - return __riscv_vle64_v_i64m8_tu(maskedoff, base, vl); +vint64m8_t test_vle64_v_i64m8_tu(vint64m8_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vle64_v_i64m8_tu(vd, rs1, vl); } -vuint64m1_t test_vle64_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, size_t vl) { - return __riscv_vle64_v_u64m1_tu(maskedoff, base, vl); +vuint64m1_t test_vle64_v_u64m1_tu(vuint64m1_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vle64_v_u64m1_tu(vd, rs1, vl); } -vuint64m2_t test_vle64_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, size_t vl) { - return __riscv_vle64_v_u64m2_tu(maskedoff, base, vl); +vuint64m2_t test_vle64_v_u64m2_tu(vuint64m2_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vle64_v_u64m2_tu(vd, rs1, vl); } -vuint64m4_t test_vle64_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, size_t vl) { - return __riscv_vle64_v_u64m4_tu(maskedoff, base, vl); +vuint64m4_t test_vle64_v_u64m4_tu(vuint64m4_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vle64_v_u64m4_tu(vd, rs1, vl); } -vuint64m8_t test_vle64_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, size_t vl) { - return __riscv_vle64_v_u64m8_tu(maskedoff, base, vl); +vuint64m8_t test_vle64_v_u64m8_tu(vuint64m8_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vle64_v_u64m8_tu(vd, rs1, vl); } -vfloat64m1_t test_vle64_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, size_t vl) { - return __riscv_vle64_v_f64m1_tum(mask, maskedoff, base, vl); +vfloat64m1_t test_vle64_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vle64_v_f64m1_tum(vm, vd, rs1, vl); } -vfloat64m2_t test_vle64_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, size_t vl) { - return __riscv_vle64_v_f64m2_tum(mask, maskedoff, base, vl); +vfloat64m2_t test_vle64_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vle64_v_f64m2_tum(vm, vd, rs1, vl); } -vfloat64m4_t test_vle64_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, size_t vl) { - return __riscv_vle64_v_f64m4_tum(mask, maskedoff, base, vl); +vfloat64m4_t test_vle64_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vle64_v_f64m4_tum(vm, vd, rs1, vl); } -vfloat64m8_t test_vle64_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, size_t vl) { - return __riscv_vle64_v_f64m8_tum(mask, maskedoff, base, vl); +vfloat64m8_t test_vle64_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vle64_v_f64m8_tum(vm, vd, rs1, vl); } -vint64m1_t test_vle64_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, size_t vl) { - return __riscv_vle64_v_i64m1_tum(mask, maskedoff, base, vl); +vint64m1_t test_vle64_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vle64_v_i64m1_tum(vm, vd, rs1, vl); } -vint64m2_t test_vle64_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, size_t vl) { - return __riscv_vle64_v_i64m2_tum(mask, maskedoff, base, vl); +vint64m2_t test_vle64_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vle64_v_i64m2_tum(vm, vd, rs1, vl); } -vint64m4_t test_vle64_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, size_t vl) { - return __riscv_vle64_v_i64m4_tum(mask, maskedoff, base, vl); +vint64m4_t test_vle64_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vle64_v_i64m4_tum(vm, vd, rs1, vl); } -vint64m8_t test_vle64_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, size_t vl) { - return __riscv_vle64_v_i64m8_tum(mask, maskedoff, base, vl); +vint64m8_t test_vle64_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vle64_v_i64m8_tum(vm, vd, rs1, vl); } -vuint64m1_t test_vle64_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, size_t vl) { - return __riscv_vle64_v_u64m1_tum(mask, maskedoff, base, vl); +vuint64m1_t test_vle64_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vle64_v_u64m1_tum(vm, vd, rs1, vl); } -vuint64m2_t test_vle64_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, size_t vl) { - return __riscv_vle64_v_u64m2_tum(mask, maskedoff, base, vl); +vuint64m2_t test_vle64_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vle64_v_u64m2_tum(vm, vd, rs1, vl); } -vuint64m4_t test_vle64_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, size_t vl) { - return __riscv_vle64_v_u64m4_tum(mask, maskedoff, base, vl); +vuint64m4_t test_vle64_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vle64_v_u64m4_tum(vm, vd, rs1, vl); } -vuint64m8_t test_vle64_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, size_t vl) { - return __riscv_vle64_v_u64m8_tum(mask, maskedoff, base, vl); +vuint64m8_t test_vle64_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vle64_v_u64m8_tum(vm, vd, rs1, vl); } -vfloat64m1_t test_vle64_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, size_t vl) { - return __riscv_vle64_v_f64m1_tumu(mask, maskedoff, base, vl); +vfloat64m1_t test_vle64_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vle64_v_f64m1_tumu(vm, vd, rs1, vl); } -vfloat64m2_t test_vle64_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, size_t vl) { - return __riscv_vle64_v_f64m2_tumu(mask, maskedoff, base, vl); +vfloat64m2_t test_vle64_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vle64_v_f64m2_tumu(vm, vd, rs1, vl); } -vfloat64m4_t test_vle64_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, size_t vl) { - return __riscv_vle64_v_f64m4_tumu(mask, maskedoff, base, vl); +vfloat64m4_t test_vle64_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vle64_v_f64m4_tumu(vm, vd, rs1, vl); } -vfloat64m8_t test_vle64_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, size_t vl) { - return __riscv_vle64_v_f64m8_tumu(mask, maskedoff, base, vl); +vfloat64m8_t test_vle64_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vle64_v_f64m8_tumu(vm, vd, rs1, vl); } -vint64m1_t test_vle64_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, size_t vl) { - return __riscv_vle64_v_i64m1_tumu(mask, maskedoff, base, vl); +vint64m1_t test_vle64_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vle64_v_i64m1_tumu(vm, vd, rs1, vl); } -vint64m2_t test_vle64_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, size_t vl) { - return __riscv_vle64_v_i64m2_tumu(mask, maskedoff, base, vl); +vint64m2_t test_vle64_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vle64_v_i64m2_tumu(vm, vd, rs1, vl); } -vint64m4_t test_vle64_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, size_t vl) { - return __riscv_vle64_v_i64m4_tumu(mask, maskedoff, base, vl); +vint64m4_t test_vle64_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vle64_v_i64m4_tumu(vm, vd, rs1, vl); } -vint64m8_t test_vle64_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, size_t vl) { - return __riscv_vle64_v_i64m8_tumu(mask, maskedoff, base, vl); +vint64m8_t test_vle64_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vle64_v_i64m8_tumu(vm, vd, rs1, vl); } -vuint64m1_t test_vle64_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, size_t vl) { - return __riscv_vle64_v_u64m1_tumu(mask, maskedoff, base, vl); +vuint64m1_t test_vle64_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vle64_v_u64m1_tumu(vm, vd, rs1, vl); } -vuint64m2_t test_vle64_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, size_t vl) { - return __riscv_vle64_v_u64m2_tumu(mask, maskedoff, base, vl); +vuint64m2_t test_vle64_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vle64_v_u64m2_tumu(vm, vd, rs1, vl); } -vuint64m4_t test_vle64_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, size_t vl) { - return __riscv_vle64_v_u64m4_tumu(mask, maskedoff, base, vl); +vuint64m4_t test_vle64_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vle64_v_u64m4_tumu(vm, vd, rs1, vl); } -vuint64m8_t test_vle64_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, size_t vl) { - return __riscv_vle64_v_u64m8_tumu(mask, maskedoff, base, vl); +vuint64m8_t test_vle64_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vle64_v_u64m8_tumu(vm, vd, rs1, vl); } -vfloat64m1_t test_vle64_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, size_t vl) { - return __riscv_vle64_v_f64m1_mu(mask, maskedoff, base, vl); +vfloat64m1_t test_vle64_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vle64_v_f64m1_mu(vm, vd, rs1, vl); } -vfloat64m2_t test_vle64_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, size_t vl) { - return __riscv_vle64_v_f64m2_mu(mask, maskedoff, base, vl); +vfloat64m2_t test_vle64_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vle64_v_f64m2_mu(vm, vd, rs1, vl); } -vfloat64m4_t test_vle64_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, size_t vl) { - return __riscv_vle64_v_f64m4_mu(mask, maskedoff, base, vl); +vfloat64m4_t test_vle64_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vle64_v_f64m4_mu(vm, vd, rs1, vl); } -vfloat64m8_t test_vle64_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, size_t vl) { - return __riscv_vle64_v_f64m8_mu(mask, maskedoff, base, vl); +vfloat64m8_t test_vle64_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vle64_v_f64m8_mu(vm, vd, rs1, vl); } -vint64m1_t test_vle64_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, size_t vl) { - return __riscv_vle64_v_i64m1_mu(mask, maskedoff, base, vl); +vint64m1_t test_vle64_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vle64_v_i64m1_mu(vm, vd, rs1, vl); } -vint64m2_t test_vle64_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, size_t vl) { - return __riscv_vle64_v_i64m2_mu(mask, maskedoff, base, vl); +vint64m2_t test_vle64_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vle64_v_i64m2_mu(vm, vd, rs1, vl); } -vint64m4_t test_vle64_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, size_t vl) { - return __riscv_vle64_v_i64m4_mu(mask, maskedoff, base, vl); +vint64m4_t test_vle64_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vle64_v_i64m4_mu(vm, vd, rs1, vl); } -vint64m8_t test_vle64_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, size_t vl) { - return __riscv_vle64_v_i64m8_mu(mask, maskedoff, base, vl); +vint64m8_t test_vle64_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vle64_v_i64m8_mu(vm, vd, rs1, vl); } -vuint64m1_t test_vle64_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, size_t vl) { - return __riscv_vle64_v_u64m1_mu(mask, maskedoff, base, vl); +vuint64m1_t test_vle64_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vle64_v_u64m1_mu(vm, vd, rs1, vl); } -vuint64m2_t test_vle64_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, size_t vl) { - return __riscv_vle64_v_u64m2_mu(mask, maskedoff, base, vl); +vuint64m2_t test_vle64_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vle64_v_u64m2_mu(vm, vd, rs1, vl); } -vuint64m4_t test_vle64_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, size_t vl) { - return __riscv_vle64_v_u64m4_mu(mask, maskedoff, base, vl); +vuint64m4_t test_vle64_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vle64_v_u64m4_mu(vm, vd, rs1, vl); } -vuint64m8_t test_vle64_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, size_t vl) { - return __riscv_vle64_v_u64m8_mu(mask, maskedoff, base, vl); +vuint64m8_t test_vle64_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vle64_v_u64m8_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vle64\.[ivxfswum.]+\s+} 48 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vle64ff.c b/auto-generated/policy_funcs/gnu-api-tests/vle64ff.c index d712544e7..f6c7b35b6 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vle64ff.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vle64ff.c @@ -6,196 +6,196 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1_t test_vle64ff_v_f64m1_tu(vfloat64m1_t maskedoff, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_f64m1_tu(maskedoff, base, new_vl, vl); +vfloat64m1_t test_vle64ff_v_f64m1_tu(vfloat64m1_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_f64m1_tu(vd, rs1, new_vl, vl); } -vfloat64m2_t test_vle64ff_v_f64m2_tu(vfloat64m2_t maskedoff, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_f64m2_tu(maskedoff, base, new_vl, vl); +vfloat64m2_t test_vle64ff_v_f64m2_tu(vfloat64m2_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_f64m2_tu(vd, rs1, new_vl, vl); } -vfloat64m4_t test_vle64ff_v_f64m4_tu(vfloat64m4_t maskedoff, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_f64m4_tu(maskedoff, base, new_vl, vl); +vfloat64m4_t test_vle64ff_v_f64m4_tu(vfloat64m4_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_f64m4_tu(vd, rs1, new_vl, vl); } -vfloat64m8_t test_vle64ff_v_f64m8_tu(vfloat64m8_t maskedoff, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_f64m8_tu(maskedoff, base, new_vl, vl); +vfloat64m8_t test_vle64ff_v_f64m8_tu(vfloat64m8_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_f64m8_tu(vd, rs1, new_vl, vl); } -vint64m1_t test_vle64ff_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_i64m1_tu(maskedoff, base, new_vl, vl); +vint64m1_t test_vle64ff_v_i64m1_tu(vint64m1_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_i64m1_tu(vd, rs1, new_vl, vl); } -vint64m2_t test_vle64ff_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_i64m2_tu(maskedoff, base, new_vl, vl); +vint64m2_t test_vle64ff_v_i64m2_tu(vint64m2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_i64m2_tu(vd, rs1, new_vl, vl); } -vint64m4_t test_vle64ff_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_i64m4_tu(maskedoff, base, new_vl, vl); +vint64m4_t test_vle64ff_v_i64m4_tu(vint64m4_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_i64m4_tu(vd, rs1, new_vl, vl); } -vint64m8_t test_vle64ff_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_i64m8_tu(maskedoff, base, new_vl, vl); +vint64m8_t test_vle64ff_v_i64m8_tu(vint64m8_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_i64m8_tu(vd, rs1, new_vl, vl); } -vuint64m1_t test_vle64ff_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_u64m1_tu(maskedoff, base, new_vl, vl); +vuint64m1_t test_vle64ff_v_u64m1_tu(vuint64m1_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_u64m1_tu(vd, rs1, new_vl, vl); } -vuint64m2_t test_vle64ff_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_u64m2_tu(maskedoff, base, new_vl, vl); +vuint64m2_t test_vle64ff_v_u64m2_tu(vuint64m2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_u64m2_tu(vd, rs1, new_vl, vl); } -vuint64m4_t test_vle64ff_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_u64m4_tu(maskedoff, base, new_vl, vl); +vuint64m4_t test_vle64ff_v_u64m4_tu(vuint64m4_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_u64m4_tu(vd, rs1, new_vl, vl); } -vuint64m8_t test_vle64ff_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_u64m8_tu(maskedoff, base, new_vl, vl); +vuint64m8_t test_vle64ff_v_u64m8_tu(vuint64m8_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_u64m8_tu(vd, rs1, new_vl, vl); } -vfloat64m1_t test_vle64ff_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_f64m1_tum(mask, maskedoff, base, new_vl, vl); +vfloat64m1_t test_vle64ff_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_f64m1_tum(vm, vd, rs1, new_vl, vl); } -vfloat64m2_t test_vle64ff_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_f64m2_tum(mask, maskedoff, base, new_vl, vl); +vfloat64m2_t test_vle64ff_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_f64m2_tum(vm, vd, rs1, new_vl, vl); } -vfloat64m4_t test_vle64ff_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_f64m4_tum(mask, maskedoff, base, new_vl, vl); +vfloat64m4_t test_vle64ff_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_f64m4_tum(vm, vd, rs1, new_vl, vl); } -vfloat64m8_t test_vle64ff_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_f64m8_tum(mask, maskedoff, base, new_vl, vl); +vfloat64m8_t test_vle64ff_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_f64m8_tum(vm, vd, rs1, new_vl, vl); } -vint64m1_t test_vle64ff_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_i64m1_tum(mask, maskedoff, base, new_vl, vl); +vint64m1_t test_vle64ff_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_i64m1_tum(vm, vd, rs1, new_vl, vl); } -vint64m2_t test_vle64ff_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_i64m2_tum(mask, maskedoff, base, new_vl, vl); +vint64m2_t test_vle64ff_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_i64m2_tum(vm, vd, rs1, new_vl, vl); } -vint64m4_t test_vle64ff_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_i64m4_tum(mask, maskedoff, base, new_vl, vl); +vint64m4_t test_vle64ff_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_i64m4_tum(vm, vd, rs1, new_vl, vl); } -vint64m8_t test_vle64ff_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_i64m8_tum(mask, maskedoff, base, new_vl, vl); +vint64m8_t test_vle64ff_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_i64m8_tum(vm, vd, rs1, new_vl, vl); } -vuint64m1_t test_vle64ff_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_u64m1_tum(mask, maskedoff, base, new_vl, vl); +vuint64m1_t test_vle64ff_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_u64m1_tum(vm, vd, rs1, new_vl, vl); } -vuint64m2_t test_vle64ff_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_u64m2_tum(mask, maskedoff, base, new_vl, vl); +vuint64m2_t test_vle64ff_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_u64m2_tum(vm, vd, rs1, new_vl, vl); } -vuint64m4_t test_vle64ff_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_u64m4_tum(mask, maskedoff, base, new_vl, vl); +vuint64m4_t test_vle64ff_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_u64m4_tum(vm, vd, rs1, new_vl, vl); } -vuint64m8_t test_vle64ff_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_u64m8_tum(mask, maskedoff, base, new_vl, vl); +vuint64m8_t test_vle64ff_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_u64m8_tum(vm, vd, rs1, new_vl, vl); } -vfloat64m1_t test_vle64ff_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_f64m1_tumu(mask, maskedoff, base, new_vl, vl); +vfloat64m1_t test_vle64ff_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_f64m1_tumu(vm, vd, rs1, new_vl, vl); } -vfloat64m2_t test_vle64ff_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_f64m2_tumu(mask, maskedoff, base, new_vl, vl); +vfloat64m2_t test_vle64ff_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_f64m2_tumu(vm, vd, rs1, new_vl, vl); } -vfloat64m4_t test_vle64ff_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_f64m4_tumu(mask, maskedoff, base, new_vl, vl); +vfloat64m4_t test_vle64ff_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_f64m4_tumu(vm, vd, rs1, new_vl, vl); } -vfloat64m8_t test_vle64ff_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_f64m8_tumu(mask, maskedoff, base, new_vl, vl); +vfloat64m8_t test_vle64ff_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_f64m8_tumu(vm, vd, rs1, new_vl, vl); } -vint64m1_t test_vle64ff_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_i64m1_tumu(mask, maskedoff, base, new_vl, vl); +vint64m1_t test_vle64ff_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_i64m1_tumu(vm, vd, rs1, new_vl, vl); } -vint64m2_t test_vle64ff_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_i64m2_tumu(mask, maskedoff, base, new_vl, vl); +vint64m2_t test_vle64ff_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_i64m2_tumu(vm, vd, rs1, new_vl, vl); } -vint64m4_t test_vle64ff_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_i64m4_tumu(mask, maskedoff, base, new_vl, vl); +vint64m4_t test_vle64ff_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_i64m4_tumu(vm, vd, rs1, new_vl, vl); } -vint64m8_t test_vle64ff_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_i64m8_tumu(mask, maskedoff, base, new_vl, vl); +vint64m8_t test_vle64ff_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_i64m8_tumu(vm, vd, rs1, new_vl, vl); } -vuint64m1_t test_vle64ff_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_u64m1_tumu(mask, maskedoff, base, new_vl, vl); +vuint64m1_t test_vle64ff_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_u64m1_tumu(vm, vd, rs1, new_vl, vl); } -vuint64m2_t test_vle64ff_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_u64m2_tumu(mask, maskedoff, base, new_vl, vl); +vuint64m2_t test_vle64ff_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_u64m2_tumu(vm, vd, rs1, new_vl, vl); } -vuint64m4_t test_vle64ff_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_u64m4_tumu(mask, maskedoff, base, new_vl, vl); +vuint64m4_t test_vle64ff_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_u64m4_tumu(vm, vd, rs1, new_vl, vl); } -vuint64m8_t test_vle64ff_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_u64m8_tumu(mask, maskedoff, base, new_vl, vl); +vuint64m8_t test_vle64ff_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_u64m8_tumu(vm, vd, rs1, new_vl, vl); } -vfloat64m1_t test_vle64ff_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_f64m1_mu(mask, maskedoff, base, new_vl, vl); +vfloat64m1_t test_vle64ff_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_f64m1_mu(vm, vd, rs1, new_vl, vl); } -vfloat64m2_t test_vle64ff_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_f64m2_mu(mask, maskedoff, base, new_vl, vl); +vfloat64m2_t test_vle64ff_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_f64m2_mu(vm, vd, rs1, new_vl, vl); } -vfloat64m4_t test_vle64ff_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_f64m4_mu(mask, maskedoff, base, new_vl, vl); +vfloat64m4_t test_vle64ff_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_f64m4_mu(vm, vd, rs1, new_vl, vl); } -vfloat64m8_t test_vle64ff_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_f64m8_mu(mask, maskedoff, base, new_vl, vl); +vfloat64m8_t test_vle64ff_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_f64m8_mu(vm, vd, rs1, new_vl, vl); } -vint64m1_t test_vle64ff_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_i64m1_mu(mask, maskedoff, base, new_vl, vl); +vint64m1_t test_vle64ff_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_i64m1_mu(vm, vd, rs1, new_vl, vl); } -vint64m2_t test_vle64ff_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_i64m2_mu(mask, maskedoff, base, new_vl, vl); +vint64m2_t test_vle64ff_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_i64m2_mu(vm, vd, rs1, new_vl, vl); } -vint64m4_t test_vle64ff_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_i64m4_mu(mask, maskedoff, base, new_vl, vl); +vint64m4_t test_vle64ff_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_i64m4_mu(vm, vd, rs1, new_vl, vl); } -vint64m8_t test_vle64ff_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_i64m8_mu(mask, maskedoff, base, new_vl, vl); +vint64m8_t test_vle64ff_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_i64m8_mu(vm, vd, rs1, new_vl, vl); } -vuint64m1_t test_vle64ff_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_u64m1_mu(mask, maskedoff, base, new_vl, vl); +vuint64m1_t test_vle64ff_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_u64m1_mu(vm, vd, rs1, new_vl, vl); } -vuint64m2_t test_vle64ff_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_u64m2_mu(mask, maskedoff, base, new_vl, vl); +vuint64m2_t test_vle64ff_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_u64m2_mu(vm, vd, rs1, new_vl, vl); } -vuint64m4_t test_vle64ff_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_u64m4_mu(mask, maskedoff, base, new_vl, vl); +vuint64m4_t test_vle64ff_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_u64m4_mu(vm, vd, rs1, new_vl, vl); } -vuint64m8_t test_vle64ff_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_v_u64m8_mu(mask, maskedoff, base, new_vl, vl); +vuint64m8_t test_vle64ff_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_v_u64m8_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vle64ff\.[ivxfswum.]+\s+} 48 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vle8.c b/auto-generated/policy_funcs/gnu-api-tests/vle8.c index cf377aacd..92028b81a 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vle8.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vle8.c @@ -6,228 +6,228 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vle8_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_v_i8mf8_tu(maskedoff, base, vl); +vint8mf8_t test_vle8_v_i8mf8_tu(vint8mf8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_v_i8mf8_tu(vd, rs1, vl); } -vint8mf4_t test_vle8_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_v_i8mf4_tu(maskedoff, base, vl); +vint8mf4_t test_vle8_v_i8mf4_tu(vint8mf4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_v_i8mf4_tu(vd, rs1, vl); } -vint8mf2_t test_vle8_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_v_i8mf2_tu(maskedoff, base, vl); +vint8mf2_t test_vle8_v_i8mf2_tu(vint8mf2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_v_i8mf2_tu(vd, rs1, vl); } -vint8m1_t test_vle8_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_v_i8m1_tu(maskedoff, base, vl); +vint8m1_t test_vle8_v_i8m1_tu(vint8m1_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_v_i8m1_tu(vd, rs1, vl); } -vint8m2_t test_vle8_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_v_i8m2_tu(maskedoff, base, vl); +vint8m2_t test_vle8_v_i8m2_tu(vint8m2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_v_i8m2_tu(vd, rs1, vl); } -vint8m4_t test_vle8_v_i8m4_tu(vint8m4_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_v_i8m4_tu(maskedoff, base, vl); +vint8m4_t test_vle8_v_i8m4_tu(vint8m4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_v_i8m4_tu(vd, rs1, vl); } -vint8m8_t test_vle8_v_i8m8_tu(vint8m8_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_v_i8m8_tu(maskedoff, base, vl); +vint8m8_t test_vle8_v_i8m8_tu(vint8m8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_v_i8m8_tu(vd, rs1, vl); } -vuint8mf8_t test_vle8_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_v_u8mf8_tu(maskedoff, base, vl); +vuint8mf8_t test_vle8_v_u8mf8_tu(vuint8mf8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_v_u8mf8_tu(vd, rs1, vl); } -vuint8mf4_t test_vle8_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_v_u8mf4_tu(maskedoff, base, vl); +vuint8mf4_t test_vle8_v_u8mf4_tu(vuint8mf4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_v_u8mf4_tu(vd, rs1, vl); } -vuint8mf2_t test_vle8_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_v_u8mf2_tu(maskedoff, base, vl); +vuint8mf2_t test_vle8_v_u8mf2_tu(vuint8mf2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_v_u8mf2_tu(vd, rs1, vl); } -vuint8m1_t test_vle8_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_v_u8m1_tu(maskedoff, base, vl); +vuint8m1_t test_vle8_v_u8m1_tu(vuint8m1_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_v_u8m1_tu(vd, rs1, vl); } -vuint8m2_t test_vle8_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_v_u8m2_tu(maskedoff, base, vl); +vuint8m2_t test_vle8_v_u8m2_tu(vuint8m2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_v_u8m2_tu(vd, rs1, vl); } -vuint8m4_t test_vle8_v_u8m4_tu(vuint8m4_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_v_u8m4_tu(maskedoff, base, vl); +vuint8m4_t test_vle8_v_u8m4_tu(vuint8m4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_v_u8m4_tu(vd, rs1, vl); } -vuint8m8_t test_vle8_v_u8m8_tu(vuint8m8_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_v_u8m8_tu(maskedoff, base, vl); +vuint8m8_t test_vle8_v_u8m8_tu(vuint8m8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_v_u8m8_tu(vd, rs1, vl); } -vint8mf8_t test_vle8_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_v_i8mf8_tum(mask, maskedoff, base, vl); +vint8mf8_t test_vle8_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_v_i8mf8_tum(vm, vd, rs1, vl); } -vint8mf4_t test_vle8_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_v_i8mf4_tum(mask, maskedoff, base, vl); +vint8mf4_t test_vle8_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_v_i8mf4_tum(vm, vd, rs1, vl); } -vint8mf2_t test_vle8_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_v_i8mf2_tum(mask, maskedoff, base, vl); +vint8mf2_t test_vle8_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_v_i8mf2_tum(vm, vd, rs1, vl); } -vint8m1_t test_vle8_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_v_i8m1_tum(mask, maskedoff, base, vl); +vint8m1_t test_vle8_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_v_i8m1_tum(vm, vd, rs1, vl); } -vint8m2_t test_vle8_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_v_i8m2_tum(mask, maskedoff, base, vl); +vint8m2_t test_vle8_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_v_i8m2_tum(vm, vd, rs1, vl); } -vint8m4_t test_vle8_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_v_i8m4_tum(mask, maskedoff, base, vl); +vint8m4_t test_vle8_v_i8m4_tum(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_v_i8m4_tum(vm, vd, rs1, vl); } -vint8m8_t test_vle8_v_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_v_i8m8_tum(mask, maskedoff, base, vl); +vint8m8_t test_vle8_v_i8m8_tum(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_v_i8m8_tum(vm, vd, rs1, vl); } -vuint8mf8_t test_vle8_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_v_u8mf8_tum(mask, maskedoff, base, vl); +vuint8mf8_t test_vle8_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_v_u8mf8_tum(vm, vd, rs1, vl); } -vuint8mf4_t test_vle8_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_v_u8mf4_tum(mask, maskedoff, base, vl); +vuint8mf4_t test_vle8_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_v_u8mf4_tum(vm, vd, rs1, vl); } -vuint8mf2_t test_vle8_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_v_u8mf2_tum(mask, maskedoff, base, vl); +vuint8mf2_t test_vle8_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_v_u8mf2_tum(vm, vd, rs1, vl); } -vuint8m1_t test_vle8_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_v_u8m1_tum(mask, maskedoff, base, vl); +vuint8m1_t test_vle8_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_v_u8m1_tum(vm, vd, rs1, vl); } -vuint8m2_t test_vle8_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_v_u8m2_tum(mask, maskedoff, base, vl); +vuint8m2_t test_vle8_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_v_u8m2_tum(vm, vd, rs1, vl); } -vuint8m4_t test_vle8_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_v_u8m4_tum(mask, maskedoff, base, vl); +vuint8m4_t test_vle8_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_v_u8m4_tum(vm, vd, rs1, vl); } -vuint8m8_t test_vle8_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_v_u8m8_tum(mask, maskedoff, base, vl); +vuint8m8_t test_vle8_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_v_u8m8_tum(vm, vd, rs1, vl); } -vint8mf8_t test_vle8_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_v_i8mf8_tumu(mask, maskedoff, base, vl); +vint8mf8_t test_vle8_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_v_i8mf8_tumu(vm, vd, rs1, vl); } -vint8mf4_t test_vle8_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_v_i8mf4_tumu(mask, maskedoff, base, vl); +vint8mf4_t test_vle8_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_v_i8mf4_tumu(vm, vd, rs1, vl); } -vint8mf2_t test_vle8_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_v_i8mf2_tumu(mask, maskedoff, base, vl); +vint8mf2_t test_vle8_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_v_i8mf2_tumu(vm, vd, rs1, vl); } -vint8m1_t test_vle8_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_v_i8m1_tumu(mask, maskedoff, base, vl); +vint8m1_t test_vle8_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_v_i8m1_tumu(vm, vd, rs1, vl); } -vint8m2_t test_vle8_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_v_i8m2_tumu(mask, maskedoff, base, vl); +vint8m2_t test_vle8_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_v_i8m2_tumu(vm, vd, rs1, vl); } -vint8m4_t test_vle8_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_v_i8m4_tumu(mask, maskedoff, base, vl); +vint8m4_t test_vle8_v_i8m4_tumu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_v_i8m4_tumu(vm, vd, rs1, vl); } -vint8m8_t test_vle8_v_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_v_i8m8_tumu(mask, maskedoff, base, vl); +vint8m8_t test_vle8_v_i8m8_tumu(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_v_i8m8_tumu(vm, vd, rs1, vl); } -vuint8mf8_t test_vle8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_v_u8mf8_tumu(mask, maskedoff, base, vl); +vuint8mf8_t test_vle8_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_v_u8mf8_tumu(vm, vd, rs1, vl); } -vuint8mf4_t test_vle8_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_v_u8mf4_tumu(mask, maskedoff, base, vl); +vuint8mf4_t test_vle8_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_v_u8mf4_tumu(vm, vd, rs1, vl); } -vuint8mf2_t test_vle8_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_v_u8mf2_tumu(mask, maskedoff, base, vl); +vuint8mf2_t test_vle8_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_v_u8mf2_tumu(vm, vd, rs1, vl); } -vuint8m1_t test_vle8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_v_u8m1_tumu(mask, maskedoff, base, vl); +vuint8m1_t test_vle8_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_v_u8m1_tumu(vm, vd, rs1, vl); } -vuint8m2_t test_vle8_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_v_u8m2_tumu(mask, maskedoff, base, vl); +vuint8m2_t test_vle8_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_v_u8m2_tumu(vm, vd, rs1, vl); } -vuint8m4_t test_vle8_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_v_u8m4_tumu(mask, maskedoff, base, vl); +vuint8m4_t test_vle8_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_v_u8m4_tumu(vm, vd, rs1, vl); } -vuint8m8_t test_vle8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_v_u8m8_tumu(mask, maskedoff, base, vl); +vuint8m8_t test_vle8_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_v_u8m8_tumu(vm, vd, rs1, vl); } -vint8mf8_t test_vle8_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_v_i8mf8_mu(mask, maskedoff, base, vl); +vint8mf8_t test_vle8_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_v_i8mf8_mu(vm, vd, rs1, vl); } -vint8mf4_t test_vle8_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_v_i8mf4_mu(mask, maskedoff, base, vl); +vint8mf4_t test_vle8_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_v_i8mf4_mu(vm, vd, rs1, vl); } -vint8mf2_t test_vle8_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_v_i8mf2_mu(mask, maskedoff, base, vl); +vint8mf2_t test_vle8_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_v_i8mf2_mu(vm, vd, rs1, vl); } -vint8m1_t test_vle8_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_v_i8m1_mu(mask, maskedoff, base, vl); +vint8m1_t test_vle8_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_v_i8m1_mu(vm, vd, rs1, vl); } -vint8m2_t test_vle8_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_v_i8m2_mu(mask, maskedoff, base, vl); +vint8m2_t test_vle8_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_v_i8m2_mu(vm, vd, rs1, vl); } -vint8m4_t test_vle8_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_v_i8m4_mu(mask, maskedoff, base, vl); +vint8m4_t test_vle8_v_i8m4_mu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_v_i8m4_mu(vm, vd, rs1, vl); } -vint8m8_t test_vle8_v_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_v_i8m8_mu(mask, maskedoff, base, vl); +vint8m8_t test_vle8_v_i8m8_mu(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_v_i8m8_mu(vm, vd, rs1, vl); } -vuint8mf8_t test_vle8_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_v_u8mf8_mu(mask, maskedoff, base, vl); +vuint8mf8_t test_vle8_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_v_u8mf8_mu(vm, vd, rs1, vl); } -vuint8mf4_t test_vle8_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_v_u8mf4_mu(mask, maskedoff, base, vl); +vuint8mf4_t test_vle8_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_v_u8mf4_mu(vm, vd, rs1, vl); } -vuint8mf2_t test_vle8_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_v_u8mf2_mu(mask, maskedoff, base, vl); +vuint8mf2_t test_vle8_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_v_u8mf2_mu(vm, vd, rs1, vl); } -vuint8m1_t test_vle8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_v_u8m1_mu(mask, maskedoff, base, vl); +vuint8m1_t test_vle8_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_v_u8m1_mu(vm, vd, rs1, vl); } -vuint8m2_t test_vle8_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_v_u8m2_mu(mask, maskedoff, base, vl); +vuint8m2_t test_vle8_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_v_u8m2_mu(vm, vd, rs1, vl); } -vuint8m4_t test_vle8_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_v_u8m4_mu(mask, maskedoff, base, vl); +vuint8m4_t test_vle8_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_v_u8m4_mu(vm, vd, rs1, vl); } -vuint8m8_t test_vle8_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_v_u8m8_mu(mask, maskedoff, base, vl); +vuint8m8_t test_vle8_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_v_u8m8_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vle8\.[ivxfswum.]+\s+} 62 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vle8ff.c b/auto-generated/policy_funcs/gnu-api-tests/vle8ff.c index e51296b88..b117c5363 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vle8ff.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vle8ff.c @@ -6,228 +6,228 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vle8ff_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_i8mf8_tu(maskedoff, base, new_vl, vl); +vint8mf8_t test_vle8ff_v_i8mf8_tu(vint8mf8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_i8mf8_tu(vd, rs1, new_vl, vl); } -vint8mf4_t test_vle8ff_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_i8mf4_tu(maskedoff, base, new_vl, vl); +vint8mf4_t test_vle8ff_v_i8mf4_tu(vint8mf4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_i8mf4_tu(vd, rs1, new_vl, vl); } -vint8mf2_t test_vle8ff_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_i8mf2_tu(maskedoff, base, new_vl, vl); +vint8mf2_t test_vle8ff_v_i8mf2_tu(vint8mf2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_i8mf2_tu(vd, rs1, new_vl, vl); } -vint8m1_t test_vle8ff_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_i8m1_tu(maskedoff, base, new_vl, vl); +vint8m1_t test_vle8ff_v_i8m1_tu(vint8m1_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_i8m1_tu(vd, rs1, new_vl, vl); } -vint8m2_t test_vle8ff_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_i8m2_tu(maskedoff, base, new_vl, vl); +vint8m2_t test_vle8ff_v_i8m2_tu(vint8m2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_i8m2_tu(vd, rs1, new_vl, vl); } -vint8m4_t test_vle8ff_v_i8m4_tu(vint8m4_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_i8m4_tu(maskedoff, base, new_vl, vl); +vint8m4_t test_vle8ff_v_i8m4_tu(vint8m4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_i8m4_tu(vd, rs1, new_vl, vl); } -vint8m8_t test_vle8ff_v_i8m8_tu(vint8m8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_i8m8_tu(maskedoff, base, new_vl, vl); +vint8m8_t test_vle8ff_v_i8m8_tu(vint8m8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_i8m8_tu(vd, rs1, new_vl, vl); } -vuint8mf8_t test_vle8ff_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_u8mf8_tu(maskedoff, base, new_vl, vl); +vuint8mf8_t test_vle8ff_v_u8mf8_tu(vuint8mf8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_u8mf8_tu(vd, rs1, new_vl, vl); } -vuint8mf4_t test_vle8ff_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_u8mf4_tu(maskedoff, base, new_vl, vl); +vuint8mf4_t test_vle8ff_v_u8mf4_tu(vuint8mf4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_u8mf4_tu(vd, rs1, new_vl, vl); } -vuint8mf2_t test_vle8ff_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_u8mf2_tu(maskedoff, base, new_vl, vl); +vuint8mf2_t test_vle8ff_v_u8mf2_tu(vuint8mf2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_u8mf2_tu(vd, rs1, new_vl, vl); } -vuint8m1_t test_vle8ff_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_u8m1_tu(maskedoff, base, new_vl, vl); +vuint8m1_t test_vle8ff_v_u8m1_tu(vuint8m1_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_u8m1_tu(vd, rs1, new_vl, vl); } -vuint8m2_t test_vle8ff_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_u8m2_tu(maskedoff, base, new_vl, vl); +vuint8m2_t test_vle8ff_v_u8m2_tu(vuint8m2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_u8m2_tu(vd, rs1, new_vl, vl); } -vuint8m4_t test_vle8ff_v_u8m4_tu(vuint8m4_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_u8m4_tu(maskedoff, base, new_vl, vl); +vuint8m4_t test_vle8ff_v_u8m4_tu(vuint8m4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_u8m4_tu(vd, rs1, new_vl, vl); } -vuint8m8_t test_vle8ff_v_u8m8_tu(vuint8m8_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_u8m8_tu(maskedoff, base, new_vl, vl); +vuint8m8_t test_vle8ff_v_u8m8_tu(vuint8m8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_u8m8_tu(vd, rs1, new_vl, vl); } -vint8mf8_t test_vle8ff_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_i8mf8_tum(mask, maskedoff, base, new_vl, vl); +vint8mf8_t test_vle8ff_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_i8mf8_tum(vm, vd, rs1, new_vl, vl); } -vint8mf4_t test_vle8ff_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_i8mf4_tum(mask, maskedoff, base, new_vl, vl); +vint8mf4_t test_vle8ff_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_i8mf4_tum(vm, vd, rs1, new_vl, vl); } -vint8mf2_t test_vle8ff_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_i8mf2_tum(mask, maskedoff, base, new_vl, vl); +vint8mf2_t test_vle8ff_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_i8mf2_tum(vm, vd, rs1, new_vl, vl); } -vint8m1_t test_vle8ff_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_i8m1_tum(mask, maskedoff, base, new_vl, vl); +vint8m1_t test_vle8ff_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_i8m1_tum(vm, vd, rs1, new_vl, vl); } -vint8m2_t test_vle8ff_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_i8m2_tum(mask, maskedoff, base, new_vl, vl); +vint8m2_t test_vle8ff_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_i8m2_tum(vm, vd, rs1, new_vl, vl); } -vint8m4_t test_vle8ff_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_i8m4_tum(mask, maskedoff, base, new_vl, vl); +vint8m4_t test_vle8ff_v_i8m4_tum(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_i8m4_tum(vm, vd, rs1, new_vl, vl); } -vint8m8_t test_vle8ff_v_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_i8m8_tum(mask, maskedoff, base, new_vl, vl); +vint8m8_t test_vle8ff_v_i8m8_tum(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_i8m8_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf8_t test_vle8ff_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_u8mf8_tum(mask, maskedoff, base, new_vl, vl); +vuint8mf8_t test_vle8ff_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_u8mf8_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf4_t test_vle8ff_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_u8mf4_tum(mask, maskedoff, base, new_vl, vl); +vuint8mf4_t test_vle8ff_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_u8mf4_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf2_t test_vle8ff_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_u8mf2_tum(mask, maskedoff, base, new_vl, vl); +vuint8mf2_t test_vle8ff_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_u8mf2_tum(vm, vd, rs1, new_vl, vl); } -vuint8m1_t test_vle8ff_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_u8m1_tum(mask, maskedoff, base, new_vl, vl); +vuint8m1_t test_vle8ff_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_u8m1_tum(vm, vd, rs1, new_vl, vl); } -vuint8m2_t test_vle8ff_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_u8m2_tum(mask, maskedoff, base, new_vl, vl); +vuint8m2_t test_vle8ff_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_u8m2_tum(vm, vd, rs1, new_vl, vl); } -vuint8m4_t test_vle8ff_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_u8m4_tum(mask, maskedoff, base, new_vl, vl); +vuint8m4_t test_vle8ff_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_u8m4_tum(vm, vd, rs1, new_vl, vl); } -vuint8m8_t test_vle8ff_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_u8m8_tum(mask, maskedoff, base, new_vl, vl); +vuint8m8_t test_vle8ff_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_u8m8_tum(vm, vd, rs1, new_vl, vl); } -vint8mf8_t test_vle8ff_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_i8mf8_tumu(mask, maskedoff, base, new_vl, vl); +vint8mf8_t test_vle8ff_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_i8mf8_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf4_t test_vle8ff_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_i8mf4_tumu(mask, maskedoff, base, new_vl, vl); +vint8mf4_t test_vle8ff_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_i8mf4_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf2_t test_vle8ff_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_i8mf2_tumu(mask, maskedoff, base, new_vl, vl); +vint8mf2_t test_vle8ff_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_i8mf2_tumu(vm, vd, rs1, new_vl, vl); } -vint8m1_t test_vle8ff_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_i8m1_tumu(mask, maskedoff, base, new_vl, vl); +vint8m1_t test_vle8ff_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_i8m1_tumu(vm, vd, rs1, new_vl, vl); } -vint8m2_t test_vle8ff_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_i8m2_tumu(mask, maskedoff, base, new_vl, vl); +vint8m2_t test_vle8ff_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_i8m2_tumu(vm, vd, rs1, new_vl, vl); } -vint8m4_t test_vle8ff_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_i8m4_tumu(mask, maskedoff, base, new_vl, vl); +vint8m4_t test_vle8ff_v_i8m4_tumu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_i8m4_tumu(vm, vd, rs1, new_vl, vl); } -vint8m8_t test_vle8ff_v_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_i8m8_tumu(mask, maskedoff, base, new_vl, vl); +vint8m8_t test_vle8ff_v_i8m8_tumu(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_i8m8_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf8_t test_vle8ff_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_u8mf8_tumu(mask, maskedoff, base, new_vl, vl); +vuint8mf8_t test_vle8ff_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_u8mf8_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf4_t test_vle8ff_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_u8mf4_tumu(mask, maskedoff, base, new_vl, vl); +vuint8mf4_t test_vle8ff_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_u8mf4_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf2_t test_vle8ff_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_u8mf2_tumu(mask, maskedoff, base, new_vl, vl); +vuint8mf2_t test_vle8ff_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_u8mf2_tumu(vm, vd, rs1, new_vl, vl); } -vuint8m1_t test_vle8ff_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_u8m1_tumu(mask, maskedoff, base, new_vl, vl); +vuint8m1_t test_vle8ff_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_u8m1_tumu(vm, vd, rs1, new_vl, vl); } -vuint8m2_t test_vle8ff_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_u8m2_tumu(mask, maskedoff, base, new_vl, vl); +vuint8m2_t test_vle8ff_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_u8m2_tumu(vm, vd, rs1, new_vl, vl); } -vuint8m4_t test_vle8ff_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_u8m4_tumu(mask, maskedoff, base, new_vl, vl); +vuint8m4_t test_vle8ff_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_u8m4_tumu(vm, vd, rs1, new_vl, vl); } -vuint8m8_t test_vle8ff_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_u8m8_tumu(mask, maskedoff, base, new_vl, vl); +vuint8m8_t test_vle8ff_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_u8m8_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf8_t test_vle8ff_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_i8mf8_mu(mask, maskedoff, base, new_vl, vl); +vint8mf8_t test_vle8ff_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_i8mf8_mu(vm, vd, rs1, new_vl, vl); } -vint8mf4_t test_vle8ff_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_i8mf4_mu(mask, maskedoff, base, new_vl, vl); +vint8mf4_t test_vle8ff_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_i8mf4_mu(vm, vd, rs1, new_vl, vl); } -vint8mf2_t test_vle8ff_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_i8mf2_mu(mask, maskedoff, base, new_vl, vl); +vint8mf2_t test_vle8ff_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_i8mf2_mu(vm, vd, rs1, new_vl, vl); } -vint8m1_t test_vle8ff_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_i8m1_mu(mask, maskedoff, base, new_vl, vl); +vint8m1_t test_vle8ff_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_i8m1_mu(vm, vd, rs1, new_vl, vl); } -vint8m2_t test_vle8ff_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_i8m2_mu(mask, maskedoff, base, new_vl, vl); +vint8m2_t test_vle8ff_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_i8m2_mu(vm, vd, rs1, new_vl, vl); } -vint8m4_t test_vle8ff_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_i8m4_mu(mask, maskedoff, base, new_vl, vl); +vint8m4_t test_vle8ff_v_i8m4_mu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_i8m4_mu(vm, vd, rs1, new_vl, vl); } -vint8m8_t test_vle8ff_v_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_i8m8_mu(mask, maskedoff, base, new_vl, vl); +vint8m8_t test_vle8ff_v_i8m8_mu(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_i8m8_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf8_t test_vle8ff_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_u8mf8_mu(mask, maskedoff, base, new_vl, vl); +vuint8mf8_t test_vle8ff_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_u8mf8_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf4_t test_vle8ff_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_u8mf4_mu(mask, maskedoff, base, new_vl, vl); +vuint8mf4_t test_vle8ff_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_u8mf4_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf2_t test_vle8ff_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_u8mf2_mu(mask, maskedoff, base, new_vl, vl); +vuint8mf2_t test_vle8ff_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_u8mf2_mu(vm, vd, rs1, new_vl, vl); } -vuint8m1_t test_vle8ff_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_u8m1_mu(mask, maskedoff, base, new_vl, vl); +vuint8m1_t test_vle8ff_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_u8m1_mu(vm, vd, rs1, new_vl, vl); } -vuint8m2_t test_vle8ff_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_u8m2_mu(mask, maskedoff, base, new_vl, vl); +vuint8m2_t test_vle8ff_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_u8m2_mu(vm, vd, rs1, new_vl, vl); } -vuint8m4_t test_vle8ff_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_u8m4_mu(mask, maskedoff, base, new_vl, vl); +vuint8m4_t test_vle8ff_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_u8m4_mu(vm, vd, rs1, new_vl, vl); } -vuint8m8_t test_vle8ff_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_v_u8m8_mu(mask, maskedoff, base, new_vl, vl); +vuint8m8_t test_vle8ff_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_v_u8m8_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vle8ff\.[ivxfswum.]+\s+} 56 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vloxei16.c b/auto-generated/policy_funcs/gnu-api-tests/vloxei16.c index c463b134b..b0ad839ad 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vloxei16.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vloxei16.c @@ -6,916 +6,916 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vloxei16_v_f16mf4_tu(vfloat16mf4_t maskedoff, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_f16mf4_tu(maskedoff, base, bindex, vl); +vfloat16mf4_t test_vloxei16_v_f16mf4_tu(vfloat16mf4_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_f16mf4_tu(vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei16_v_f16mf2_tu(vfloat16mf2_t maskedoff, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_f16mf2_tu(maskedoff, base, bindex, vl); +vfloat16mf2_t test_vloxei16_v_f16mf2_tu(vfloat16mf2_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_f16mf2_tu(vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei16_v_f16m1_tu(vfloat16m1_t maskedoff, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_f16m1_tu(maskedoff, base, bindex, vl); +vfloat16m1_t test_vloxei16_v_f16m1_tu(vfloat16m1_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_f16m1_tu(vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei16_v_f16m2_tu(vfloat16m2_t maskedoff, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_f16m2_tu(maskedoff, base, bindex, vl); +vfloat16m2_t test_vloxei16_v_f16m2_tu(vfloat16m2_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_f16m2_tu(vd, rs1, rs2, vl); } -vfloat16m4_t test_vloxei16_v_f16m4_tu(vfloat16m4_t maskedoff, const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_f16m4_tu(maskedoff, base, bindex, vl); +vfloat16m4_t test_vloxei16_v_f16m4_tu(vfloat16m4_t vd, const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_f16m4_tu(vd, rs1, rs2, vl); } -vfloat16m8_t test_vloxei16_v_f16m8_tu(vfloat16m8_t maskedoff, const float16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_v_f16m8_tu(maskedoff, base, bindex, vl); +vfloat16m8_t test_vloxei16_v_f16m8_tu(vfloat16m8_t vd, const float16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_v_f16m8_tu(vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei16_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_f32mf2_tu(maskedoff, base, bindex, vl); +vfloat32mf2_t test_vloxei16_v_f32mf2_tu(vfloat32mf2_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_f32mf2_tu(vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei16_v_f32m1_tu(vfloat32m1_t maskedoff, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_f32m1_tu(maskedoff, base, bindex, vl); +vfloat32m1_t test_vloxei16_v_f32m1_tu(vfloat32m1_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_f32m1_tu(vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei16_v_f32m2_tu(vfloat32m2_t maskedoff, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_f32m2_tu(maskedoff, base, bindex, vl); +vfloat32m2_t test_vloxei16_v_f32m2_tu(vfloat32m2_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_f32m2_tu(vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei16_v_f32m4_tu(vfloat32m4_t maskedoff, const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_f32m4_tu(maskedoff, base, bindex, vl); +vfloat32m4_t test_vloxei16_v_f32m4_tu(vfloat32m4_t vd, const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_f32m4_tu(vd, rs1, rs2, vl); } -vfloat32m8_t test_vloxei16_v_f32m8_tu(vfloat32m8_t maskedoff, const float32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_f32m8_tu(maskedoff, base, bindex, vl); +vfloat32m8_t test_vloxei16_v_f32m8_tu(vfloat32m8_t vd, const float32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_f32m8_tu(vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei16_v_f64m1_tu(vfloat64m1_t maskedoff, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_f64m1_tu(maskedoff, base, bindex, vl); +vfloat64m1_t test_vloxei16_v_f64m1_tu(vfloat64m1_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_f64m1_tu(vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei16_v_f64m2_tu(vfloat64m2_t maskedoff, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_f64m2_tu(maskedoff, base, bindex, vl); +vfloat64m2_t test_vloxei16_v_f64m2_tu(vfloat64m2_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_f64m2_tu(vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei16_v_f64m4_tu(vfloat64m4_t maskedoff, const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_f64m4_tu(maskedoff, base, bindex, vl); +vfloat64m4_t test_vloxei16_v_f64m4_tu(vfloat64m4_t vd, const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_f64m4_tu(vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei16_v_f64m8_tu(vfloat64m8_t maskedoff, const float64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_f64m8_tu(maskedoff, base, bindex, vl); +vfloat64m8_t test_vloxei16_v_f64m8_tu(vfloat64m8_t vd, const float64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_f64m8_tu(vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei16_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_i8mf8_tu(maskedoff, base, bindex, vl); +vint8mf8_t test_vloxei16_v_i8mf8_tu(vint8mf8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_i8mf8_tu(vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei16_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i8mf4_tu(maskedoff, base, bindex, vl); +vint8mf4_t test_vloxei16_v_i8mf4_tu(vint8mf4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i8mf4_tu(vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei16_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_i8mf2_tu(maskedoff, base, bindex, vl); +vint8mf2_t test_vloxei16_v_i8mf2_tu(vint8mf2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_i8mf2_tu(vd, rs1, rs2, vl); } -vint8m1_t test_vloxei16_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i8m1_tu(maskedoff, base, bindex, vl); +vint8m1_t test_vloxei16_v_i8m1_tu(vint8m1_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i8m1_tu(vd, rs1, rs2, vl); } -vint8m2_t test_vloxei16_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_i8m2_tu(maskedoff, base, bindex, vl); +vint8m2_t test_vloxei16_v_i8m2_tu(vint8m2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_i8m2_tu(vd, rs1, rs2, vl); } -vint8m4_t test_vloxei16_v_i8m4_tu(vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_v_i8m4_tu(maskedoff, base, bindex, vl); +vint8m4_t test_vloxei16_v_i8m4_tu(vint8m4_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_v_i8m4_tu(vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei16_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_i16mf4_tu(maskedoff, base, bindex, vl); +vint16mf4_t test_vloxei16_v_i16mf4_tu(vint16mf4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_i16mf4_tu(vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei16_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i16mf2_tu(maskedoff, base, bindex, vl); +vint16mf2_t test_vloxei16_v_i16mf2_tu(vint16mf2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i16mf2_tu(vd, rs1, rs2, vl); } -vint16m1_t test_vloxei16_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_i16m1_tu(maskedoff, base, bindex, vl); +vint16m1_t test_vloxei16_v_i16m1_tu(vint16m1_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_i16m1_tu(vd, rs1, rs2, vl); } -vint16m2_t test_vloxei16_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i16m2_tu(maskedoff, base, bindex, vl); +vint16m2_t test_vloxei16_v_i16m2_tu(vint16m2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i16m2_tu(vd, rs1, rs2, vl); } -vint16m4_t test_vloxei16_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_i16m4_tu(maskedoff, base, bindex, vl); +vint16m4_t test_vloxei16_v_i16m4_tu(vint16m4_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_i16m4_tu(vd, rs1, rs2, vl); } -vint16m8_t test_vloxei16_v_i16m8_tu(vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_v_i16m8_tu(maskedoff, base, bindex, vl); +vint16m8_t test_vloxei16_v_i16m8_tu(vint16m8_t vd, const int16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_v_i16m8_tu(vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei16_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_i32mf2_tu(maskedoff, base, bindex, vl); +vint32mf2_t test_vloxei16_v_i32mf2_tu(vint32mf2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_i32mf2_tu(vd, rs1, rs2, vl); } -vint32m1_t test_vloxei16_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i32m1_tu(maskedoff, base, bindex, vl); +vint32m1_t test_vloxei16_v_i32m1_tu(vint32m1_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i32m1_tu(vd, rs1, rs2, vl); } -vint32m2_t test_vloxei16_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_i32m2_tu(maskedoff, base, bindex, vl); +vint32m2_t test_vloxei16_v_i32m2_tu(vint32m2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_i32m2_tu(vd, rs1, rs2, vl); } -vint32m4_t test_vloxei16_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i32m4_tu(maskedoff, base, bindex, vl); +vint32m4_t test_vloxei16_v_i32m4_tu(vint32m4_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i32m4_tu(vd, rs1, rs2, vl); } -vint32m8_t test_vloxei16_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_i32m8_tu(maskedoff, base, bindex, vl); +vint32m8_t test_vloxei16_v_i32m8_tu(vint32m8_t vd, const int32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_i32m8_tu(vd, rs1, rs2, vl); } -vint64m1_t test_vloxei16_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_i64m1_tu(maskedoff, base, bindex, vl); +vint64m1_t test_vloxei16_v_i64m1_tu(vint64m1_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_i64m1_tu(vd, rs1, rs2, vl); } -vint64m2_t test_vloxei16_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i64m2_tu(maskedoff, base, bindex, vl); +vint64m2_t test_vloxei16_v_i64m2_tu(vint64m2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i64m2_tu(vd, rs1, rs2, vl); } -vint64m4_t test_vloxei16_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_i64m4_tu(maskedoff, base, bindex, vl); +vint64m4_t test_vloxei16_v_i64m4_tu(vint64m4_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_i64m4_tu(vd, rs1, rs2, vl); } -vint64m8_t test_vloxei16_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i64m8_tu(maskedoff, base, bindex, vl); +vint64m8_t test_vloxei16_v_i64m8_tu(vint64m8_t vd, const int64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i64m8_tu(vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei16_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_u8mf8_tu(maskedoff, base, bindex, vl); +vuint8mf8_t test_vloxei16_v_u8mf8_tu(vuint8mf8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_u8mf8_tu(vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei16_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u8mf4_tu(maskedoff, base, bindex, vl); +vuint8mf4_t test_vloxei16_v_u8mf4_tu(vuint8mf4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u8mf4_tu(vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei16_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_u8mf2_tu(maskedoff, base, bindex, vl); +vuint8mf2_t test_vloxei16_v_u8mf2_tu(vuint8mf2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_u8mf2_tu(vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei16_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u8m1_tu(maskedoff, base, bindex, vl); +vuint8m1_t test_vloxei16_v_u8m1_tu(vuint8m1_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u8m1_tu(vd, rs1, rs2, vl); } -vuint8m2_t test_vloxei16_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_u8m2_tu(maskedoff, base, bindex, vl); +vuint8m2_t test_vloxei16_v_u8m2_tu(vuint8m2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_u8m2_tu(vd, rs1, rs2, vl); } -vuint8m4_t test_vloxei16_v_u8m4_tu(vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_v_u8m4_tu(maskedoff, base, bindex, vl); +vuint8m4_t test_vloxei16_v_u8m4_tu(vuint8m4_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_v_u8m4_tu(vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei16_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_u16mf4_tu(maskedoff, base, bindex, vl); +vuint16mf4_t test_vloxei16_v_u16mf4_tu(vuint16mf4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_u16mf4_tu(vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei16_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u16mf2_tu(maskedoff, base, bindex, vl); +vuint16mf2_t test_vloxei16_v_u16mf2_tu(vuint16mf2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u16mf2_tu(vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei16_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_u16m1_tu(maskedoff, base, bindex, vl); +vuint16m1_t test_vloxei16_v_u16m1_tu(vuint16m1_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_u16m1_tu(vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei16_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u16m2_tu(maskedoff, base, bindex, vl); +vuint16m2_t test_vloxei16_v_u16m2_tu(vuint16m2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u16m2_tu(vd, rs1, rs2, vl); } -vuint16m4_t test_vloxei16_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_u16m4_tu(maskedoff, base, bindex, vl); +vuint16m4_t test_vloxei16_v_u16m4_tu(vuint16m4_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_u16m4_tu(vd, rs1, rs2, vl); } -vuint16m8_t test_vloxei16_v_u16m8_tu(vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_v_u16m8_tu(maskedoff, base, bindex, vl); +vuint16m8_t test_vloxei16_v_u16m8_tu(vuint16m8_t vd, const uint16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_v_u16m8_tu(vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei16_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_u32mf2_tu(maskedoff, base, bindex, vl); +vuint32mf2_t test_vloxei16_v_u32mf2_tu(vuint32mf2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_u32mf2_tu(vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei16_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u32m1_tu(maskedoff, base, bindex, vl); +vuint32m1_t test_vloxei16_v_u32m1_tu(vuint32m1_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u32m1_tu(vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei16_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_u32m2_tu(maskedoff, base, bindex, vl); +vuint32m2_t test_vloxei16_v_u32m2_tu(vuint32m2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_u32m2_tu(vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei16_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u32m4_tu(maskedoff, base, bindex, vl); +vuint32m4_t test_vloxei16_v_u32m4_tu(vuint32m4_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u32m4_tu(vd, rs1, rs2, vl); } -vuint32m8_t test_vloxei16_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_u32m8_tu(maskedoff, base, bindex, vl); +vuint32m8_t test_vloxei16_v_u32m8_tu(vuint32m8_t vd, const uint32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_u32m8_tu(vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei16_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_u64m1_tu(maskedoff, base, bindex, vl); +vuint64m1_t test_vloxei16_v_u64m1_tu(vuint64m1_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_u64m1_tu(vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei16_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u64m2_tu(maskedoff, base, bindex, vl); +vuint64m2_t test_vloxei16_v_u64m2_tu(vuint64m2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u64m2_tu(vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei16_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_u64m4_tu(maskedoff, base, bindex, vl); +vuint64m4_t test_vloxei16_v_u64m4_tu(vuint64m4_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_u64m4_tu(vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei16_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u64m8_tu(maskedoff, base, bindex, vl); +vuint64m8_t test_vloxei16_v_u64m8_tu(vuint64m8_t vd, const uint64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u64m8_tu(vd, rs1, rs2, vl); } -vfloat16mf4_t test_vloxei16_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_f16mf4_tum(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vloxei16_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_f16mf4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei16_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_f16mf2_tum(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vloxei16_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_f16mf2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei16_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_f16m1_tum(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vloxei16_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_f16m1_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei16_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_f16m2_tum(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vloxei16_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_f16m2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vloxei16_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_f16m4_tum(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vloxei16_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_f16m4_tum(vm, vd, rs1, rs2, vl); } -vfloat16m8_t test_vloxei16_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, const float16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_v_f16m8_tum(mask, maskedoff, base, bindex, vl); +vfloat16m8_t test_vloxei16_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, const float16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_v_f16m8_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei16_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_f32mf2_tum(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vloxei16_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_f32mf2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei16_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_f32m1_tum(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vloxei16_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_f32m1_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei16_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_f32m2_tum(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vloxei16_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_f32m2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei16_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_f32m4_tum(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vloxei16_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_f32m4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vloxei16_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_f32m8_tum(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vloxei16_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_f32m8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei16_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_f64m1_tum(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vloxei16_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_f64m1_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei16_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_f64m2_tum(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vloxei16_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_f64m2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei16_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_f64m4_tum(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vloxei16_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_f64m4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei16_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_f64m8_tum(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vloxei16_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_f64m8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei16_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_i8mf8_tum(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vloxei16_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_i8mf8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei16_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i8mf4_tum(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vloxei16_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i8mf4_tum(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei16_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_i8mf2_tum(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vloxei16_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_i8mf2_tum(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vloxei16_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i8m1_tum(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vloxei16_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i8m1_tum(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vloxei16_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_i8m2_tum(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vloxei16_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_i8m2_tum(vm, vd, rs1, rs2, vl); } -vint8m4_t test_vloxei16_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_v_i8m4_tum(mask, maskedoff, base, bindex, vl); +vint8m4_t test_vloxei16_v_i8m4_tum(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_v_i8m4_tum(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei16_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_i16mf4_tum(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vloxei16_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_i16mf4_tum(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei16_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i16mf2_tum(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vloxei16_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i16mf2_tum(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vloxei16_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_i16m1_tum(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vloxei16_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_i16m1_tum(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vloxei16_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i16m2_tum(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vloxei16_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i16m2_tum(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vloxei16_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_i16m4_tum(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vloxei16_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_i16m4_tum(vm, vd, rs1, rs2, vl); } -vint16m8_t test_vloxei16_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_v_i16m8_tum(mask, maskedoff, base, bindex, vl); +vint16m8_t test_vloxei16_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_v_i16m8_tum(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei16_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_i32mf2_tum(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vloxei16_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_i32mf2_tum(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vloxei16_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i32m1_tum(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vloxei16_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i32m1_tum(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vloxei16_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_i32m2_tum(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vloxei16_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_i32m2_tum(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vloxei16_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i32m4_tum(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vloxei16_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i32m4_tum(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vloxei16_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_i32m8_tum(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vloxei16_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_i32m8_tum(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vloxei16_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_i64m1_tum(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vloxei16_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_i64m1_tum(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vloxei16_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i64m2_tum(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vloxei16_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i64m2_tum(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vloxei16_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_i64m4_tum(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vloxei16_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_i64m4_tum(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vloxei16_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i64m8_tum(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vloxei16_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i64m8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei16_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_u8mf8_tum(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vloxei16_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_u8mf8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei16_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u8mf4_tum(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vloxei16_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u8mf4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei16_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_u8mf2_tum(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vloxei16_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_u8mf2_tum(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei16_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u8m1_tum(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vloxei16_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u8m1_tum(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vloxei16_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_u8m2_tum(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vloxei16_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_u8m2_tum(vm, vd, rs1, rs2, vl); } -vuint8m4_t test_vloxei16_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_v_u8m4_tum(mask, maskedoff, base, bindex, vl); +vuint8m4_t test_vloxei16_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_v_u8m4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei16_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_u16mf4_tum(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vloxei16_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_u16mf4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei16_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u16mf2_tum(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vloxei16_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u16mf2_tum(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei16_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_u16m1_tum(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vloxei16_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_u16m1_tum(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei16_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u16m2_tum(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vloxei16_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u16m2_tum(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vloxei16_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_u16m4_tum(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vloxei16_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_u16m4_tum(vm, vd, rs1, rs2, vl); } -vuint16m8_t test_vloxei16_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_v_u16m8_tum(mask, maskedoff, base, bindex, vl); +vuint16m8_t test_vloxei16_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_v_u16m8_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei16_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_u32mf2_tum(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vloxei16_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_u32mf2_tum(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei16_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u32m1_tum(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vloxei16_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u32m1_tum(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei16_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_u32m2_tum(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vloxei16_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_u32m2_tum(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei16_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u32m4_tum(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vloxei16_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u32m4_tum(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vloxei16_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_u32m8_tum(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vloxei16_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_u32m8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei16_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_u64m1_tum(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vloxei16_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_u64m1_tum(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei16_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u64m2_tum(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vloxei16_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u64m2_tum(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei16_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_u64m4_tum(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vloxei16_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_u64m4_tum(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei16_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u64m8_tum(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vloxei16_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u64m8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vloxei16_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_f16mf4_tumu(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vloxei16_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_f16mf4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei16_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_f16mf2_tumu(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vloxei16_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_f16mf2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei16_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_f16m1_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vloxei16_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_f16m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei16_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_f16m2_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vloxei16_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_f16m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vloxei16_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_f16m4_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vloxei16_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_f16m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m8_t test_vloxei16_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, const float16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_v_f16m8_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m8_t test_vloxei16_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, const float16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_v_f16m8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei16_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_f32mf2_tumu(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vloxei16_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_f32mf2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei16_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_f32m1_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vloxei16_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_f32m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei16_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_f32m2_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vloxei16_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_f32m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei16_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_f32m4_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vloxei16_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_f32m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vloxei16_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_f32m8_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vloxei16_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_f32m8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei16_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_f64m1_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vloxei16_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_f64m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei16_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_f64m2_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vloxei16_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_f64m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei16_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_f64m4_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vloxei16_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_f64m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei16_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_f64m8_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vloxei16_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_f64m8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei16_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_i8mf8_tumu(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vloxei16_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_i8mf8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei16_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i8mf4_tumu(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vloxei16_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i8mf4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei16_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_i8mf2_tumu(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vloxei16_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_i8mf2_tumu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vloxei16_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i8m1_tumu(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vloxei16_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i8m1_tumu(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vloxei16_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_i8m2_tumu(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vloxei16_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_i8m2_tumu(vm, vd, rs1, rs2, vl); } -vint8m4_t test_vloxei16_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_v_i8m4_tumu(mask, maskedoff, base, bindex, vl); +vint8m4_t test_vloxei16_v_i8m4_tumu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_v_i8m4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei16_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_i16mf4_tumu(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vloxei16_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_i16mf4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei16_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i16mf2_tumu(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vloxei16_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i16mf2_tumu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vloxei16_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_i16m1_tumu(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vloxei16_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_i16m1_tumu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vloxei16_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i16m2_tumu(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vloxei16_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i16m2_tumu(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vloxei16_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_i16m4_tumu(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vloxei16_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_i16m4_tumu(vm, vd, rs1, rs2, vl); } -vint16m8_t test_vloxei16_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_v_i16m8_tumu(mask, maskedoff, base, bindex, vl); +vint16m8_t test_vloxei16_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_v_i16m8_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei16_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_i32mf2_tumu(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vloxei16_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_i32mf2_tumu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vloxei16_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i32m1_tumu(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vloxei16_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i32m1_tumu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vloxei16_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_i32m2_tumu(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vloxei16_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_i32m2_tumu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vloxei16_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i32m4_tumu(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vloxei16_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i32m4_tumu(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vloxei16_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_i32m8_tumu(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vloxei16_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_i32m8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vloxei16_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_i64m1_tumu(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vloxei16_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_i64m1_tumu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vloxei16_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i64m2_tumu(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vloxei16_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i64m2_tumu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vloxei16_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_i64m4_tumu(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vloxei16_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_i64m4_tumu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vloxei16_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i64m8_tumu(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vloxei16_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i64m8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei16_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_u8mf8_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vloxei16_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_u8mf8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei16_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u8mf4_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vloxei16_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u8mf4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei16_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_u8mf2_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vloxei16_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_u8mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei16_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u8m1_tumu(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vloxei16_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u8m1_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vloxei16_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_u8m2_tumu(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vloxei16_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_u8m2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m4_t test_vloxei16_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_v_u8m4_tumu(mask, maskedoff, base, bindex, vl); +vuint8m4_t test_vloxei16_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_v_u8m4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei16_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_u16mf4_tumu(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vloxei16_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_u16mf4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei16_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u16mf2_tumu(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vloxei16_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u16mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei16_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_u16m1_tumu(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vloxei16_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_u16m1_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei16_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u16m2_tumu(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vloxei16_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u16m2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vloxei16_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_u16m4_tumu(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vloxei16_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_u16m4_tumu(vm, vd, rs1, rs2, vl); } -vuint16m8_t test_vloxei16_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_v_u16m8_tumu(mask, maskedoff, base, bindex, vl); +vuint16m8_t test_vloxei16_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_v_u16m8_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei16_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_u32mf2_tumu(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vloxei16_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_u32mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei16_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u32m1_tumu(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vloxei16_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u32m1_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei16_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_u32m2_tumu(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vloxei16_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_u32m2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei16_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u32m4_tumu(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vloxei16_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u32m4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vloxei16_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_u32m8_tumu(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vloxei16_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_u32m8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei16_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_u64m1_tumu(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vloxei16_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_u64m1_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei16_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u64m2_tumu(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vloxei16_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u64m2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei16_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_u64m4_tumu(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vloxei16_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_u64m4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei16_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u64m8_tumu(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vloxei16_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u64m8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vloxei16_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_f16mf4_mu(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vloxei16_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_f16mf4_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei16_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_f16mf2_mu(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vloxei16_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_f16mf2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei16_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_f16m1_mu(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vloxei16_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_f16m1_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei16_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_f16m2_mu(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vloxei16_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_f16m2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vloxei16_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_f16m4_mu(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vloxei16_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_f16m4_mu(vm, vd, rs1, rs2, vl); } -vfloat16m8_t test_vloxei16_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, const float16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_v_f16m8_mu(mask, maskedoff, base, bindex, vl); +vfloat16m8_t test_vloxei16_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, const float16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_v_f16m8_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei16_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_f32mf2_mu(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vloxei16_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_f32mf2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei16_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_f32m1_mu(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vloxei16_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_f32m1_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei16_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_f32m2_mu(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vloxei16_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_f32m2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei16_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_f32m4_mu(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vloxei16_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_f32m4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vloxei16_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_f32m8_mu(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vloxei16_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_f32m8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei16_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_f64m1_mu(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vloxei16_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_f64m1_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei16_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_f64m2_mu(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vloxei16_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_f64m2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei16_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_f64m4_mu(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vloxei16_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_f64m4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei16_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_f64m8_mu(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vloxei16_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_f64m8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei16_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_i8mf8_mu(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vloxei16_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_i8mf8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei16_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i8mf4_mu(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vloxei16_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i8mf4_mu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei16_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_i8mf2_mu(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vloxei16_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_i8mf2_mu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vloxei16_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i8m1_mu(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vloxei16_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i8m1_mu(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vloxei16_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_i8m2_mu(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vloxei16_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_i8m2_mu(vm, vd, rs1, rs2, vl); } -vint8m4_t test_vloxei16_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_v_i8m4_mu(mask, maskedoff, base, bindex, vl); +vint8m4_t test_vloxei16_v_i8m4_mu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_v_i8m4_mu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei16_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_i16mf4_mu(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vloxei16_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_i16mf4_mu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei16_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i16mf2_mu(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vloxei16_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i16mf2_mu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vloxei16_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_i16m1_mu(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vloxei16_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_i16m1_mu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vloxei16_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i16m2_mu(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vloxei16_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i16m2_mu(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vloxei16_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_i16m4_mu(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vloxei16_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_i16m4_mu(vm, vd, rs1, rs2, vl); } -vint16m8_t test_vloxei16_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_v_i16m8_mu(mask, maskedoff, base, bindex, vl); +vint16m8_t test_vloxei16_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_v_i16m8_mu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei16_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_i32mf2_mu(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vloxei16_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_i32mf2_mu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vloxei16_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i32m1_mu(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vloxei16_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i32m1_mu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vloxei16_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_i32m2_mu(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vloxei16_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_i32m2_mu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vloxei16_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i32m4_mu(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vloxei16_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i32m4_mu(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vloxei16_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_i32m8_mu(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vloxei16_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_i32m8_mu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vloxei16_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_i64m1_mu(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vloxei16_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_i64m1_mu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vloxei16_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i64m2_mu(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vloxei16_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i64m2_mu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vloxei16_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_i64m4_mu(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vloxei16_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_i64m4_mu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vloxei16_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_i64m8_mu(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vloxei16_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_i64m8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei16_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_u8mf8_mu(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vloxei16_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_u8mf8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei16_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u8mf4_mu(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vloxei16_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u8mf4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei16_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_u8mf2_mu(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vloxei16_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_u8mf2_mu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei16_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u8m1_mu(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vloxei16_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u8m1_mu(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vloxei16_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_u8m2_mu(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vloxei16_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_u8m2_mu(vm, vd, rs1, rs2, vl); } -vuint8m4_t test_vloxei16_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_v_u8m4_mu(mask, maskedoff, base, bindex, vl); +vuint8m4_t test_vloxei16_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_v_u8m4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei16_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_u16mf4_mu(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vloxei16_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_u16mf4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei16_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u16mf2_mu(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vloxei16_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u16mf2_mu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei16_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_u16m1_mu(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vloxei16_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_u16m1_mu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei16_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u16m2_mu(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vloxei16_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u16m2_mu(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vloxei16_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_u16m4_mu(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vloxei16_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_u16m4_mu(vm, vd, rs1, rs2, vl); } -vuint16m8_t test_vloxei16_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_v_u16m8_mu(mask, maskedoff, base, bindex, vl); +vuint16m8_t test_vloxei16_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_v_u16m8_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei16_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_u32mf2_mu(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vloxei16_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_u32mf2_mu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei16_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u32m1_mu(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vloxei16_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u32m1_mu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei16_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_u32m2_mu(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vloxei16_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_u32m2_mu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei16_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u32m4_mu(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vloxei16_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u32m4_mu(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vloxei16_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_v_u32m8_mu(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vloxei16_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_u32m8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei16_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_v_u64m1_mu(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vloxei16_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_u64m1_mu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei16_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u64m2_mu(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vloxei16_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u64m2_mu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei16_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_v_u64m4_mu(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vloxei16_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_u64m4_mu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei16_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_v_u64m8_mu(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vloxei16_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_u64m8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxei16\.[ivxfswum.]+\s+} 228 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vloxei32.c b/auto-generated/policy_funcs/gnu-api-tests/vloxei32.c index fc8785324..bb04b0490 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vloxei32.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vloxei32.c @@ -6,836 +6,836 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vloxei32_v_f16mf4_tu(vfloat16mf4_t maskedoff, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_f16mf4_tu(maskedoff, base, bindex, vl); +vfloat16mf4_t test_vloxei32_v_f16mf4_tu(vfloat16mf4_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_f16mf4_tu(vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei32_v_f16mf2_tu(vfloat16mf2_t maskedoff, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_f16mf2_tu(maskedoff, base, bindex, vl); +vfloat16mf2_t test_vloxei32_v_f16mf2_tu(vfloat16mf2_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_f16mf2_tu(vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei32_v_f16m1_tu(vfloat16m1_t maskedoff, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_f16m1_tu(maskedoff, base, bindex, vl); +vfloat16m1_t test_vloxei32_v_f16m1_tu(vfloat16m1_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_f16m1_tu(vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei32_v_f16m2_tu(vfloat16m2_t maskedoff, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_f16m2_tu(maskedoff, base, bindex, vl); +vfloat16m2_t test_vloxei32_v_f16m2_tu(vfloat16m2_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_f16m2_tu(vd, rs1, rs2, vl); } -vfloat16m4_t test_vloxei32_v_f16m4_tu(vfloat16m4_t maskedoff, const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_f16m4_tu(maskedoff, base, bindex, vl); +vfloat16m4_t test_vloxei32_v_f16m4_tu(vfloat16m4_t vd, const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_f16m4_tu(vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei32_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_f32mf2_tu(maskedoff, base, bindex, vl); +vfloat32mf2_t test_vloxei32_v_f32mf2_tu(vfloat32mf2_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_f32mf2_tu(vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei32_v_f32m1_tu(vfloat32m1_t maskedoff, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_f32m1_tu(maskedoff, base, bindex, vl); +vfloat32m1_t test_vloxei32_v_f32m1_tu(vfloat32m1_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_f32m1_tu(vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei32_v_f32m2_tu(vfloat32m2_t maskedoff, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_f32m2_tu(maskedoff, base, bindex, vl); +vfloat32m2_t test_vloxei32_v_f32m2_tu(vfloat32m2_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_f32m2_tu(vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei32_v_f32m4_tu(vfloat32m4_t maskedoff, const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_f32m4_tu(maskedoff, base, bindex, vl); +vfloat32m4_t test_vloxei32_v_f32m4_tu(vfloat32m4_t vd, const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_f32m4_tu(vd, rs1, rs2, vl); } -vfloat32m8_t test_vloxei32_v_f32m8_tu(vfloat32m8_t maskedoff, const float32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_f32m8_tu(maskedoff, base, bindex, vl); +vfloat32m8_t test_vloxei32_v_f32m8_tu(vfloat32m8_t vd, const float32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_f32m8_tu(vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei32_v_f64m1_tu(vfloat64m1_t maskedoff, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_f64m1_tu(maskedoff, base, bindex, vl); +vfloat64m1_t test_vloxei32_v_f64m1_tu(vfloat64m1_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_f64m1_tu(vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei32_v_f64m2_tu(vfloat64m2_t maskedoff, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_f64m2_tu(maskedoff, base, bindex, vl); +vfloat64m2_t test_vloxei32_v_f64m2_tu(vfloat64m2_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_f64m2_tu(vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei32_v_f64m4_tu(vfloat64m4_t maskedoff, const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_f64m4_tu(maskedoff, base, bindex, vl); +vfloat64m4_t test_vloxei32_v_f64m4_tu(vfloat64m4_t vd, const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_f64m4_tu(vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei32_v_f64m8_tu(vfloat64m8_t maskedoff, const float64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_f64m8_tu(maskedoff, base, bindex, vl); +vfloat64m8_t test_vloxei32_v_f64m8_tu(vfloat64m8_t vd, const float64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_f64m8_tu(vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei32_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i8mf8_tu(maskedoff, base, bindex, vl); +vint8mf8_t test_vloxei32_v_i8mf8_tu(vint8mf8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i8mf8_tu(vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei32_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_i8mf4_tu(maskedoff, base, bindex, vl); +vint8mf4_t test_vloxei32_v_i8mf4_tu(vint8mf4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_i8mf4_tu(vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei32_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i8mf2_tu(maskedoff, base, bindex, vl); +vint8mf2_t test_vloxei32_v_i8mf2_tu(vint8mf2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i8mf2_tu(vd, rs1, rs2, vl); } -vint8m1_t test_vloxei32_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_i8m1_tu(maskedoff, base, bindex, vl); +vint8m1_t test_vloxei32_v_i8m1_tu(vint8m1_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_i8m1_tu(vd, rs1, rs2, vl); } -vint8m2_t test_vloxei32_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_i8m2_tu(maskedoff, base, bindex, vl); +vint8m2_t test_vloxei32_v_i8m2_tu(vint8m2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_i8m2_tu(vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei32_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i16mf4_tu(maskedoff, base, bindex, vl); +vint16mf4_t test_vloxei32_v_i16mf4_tu(vint16mf4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i16mf4_tu(vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei32_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_i16mf2_tu(maskedoff, base, bindex, vl); +vint16mf2_t test_vloxei32_v_i16mf2_tu(vint16mf2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_i16mf2_tu(vd, rs1, rs2, vl); } -vint16m1_t test_vloxei32_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i16m1_tu(maskedoff, base, bindex, vl); +vint16m1_t test_vloxei32_v_i16m1_tu(vint16m1_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i16m1_tu(vd, rs1, rs2, vl); } -vint16m2_t test_vloxei32_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_i16m2_tu(maskedoff, base, bindex, vl); +vint16m2_t test_vloxei32_v_i16m2_tu(vint16m2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_i16m2_tu(vd, rs1, rs2, vl); } -vint16m4_t test_vloxei32_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_i16m4_tu(maskedoff, base, bindex, vl); +vint16m4_t test_vloxei32_v_i16m4_tu(vint16m4_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_i16m4_tu(vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei32_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i32mf2_tu(maskedoff, base, bindex, vl); +vint32mf2_t test_vloxei32_v_i32mf2_tu(vint32mf2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i32mf2_tu(vd, rs1, rs2, vl); } -vint32m1_t test_vloxei32_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_i32m1_tu(maskedoff, base, bindex, vl); +vint32m1_t test_vloxei32_v_i32m1_tu(vint32m1_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_i32m1_tu(vd, rs1, rs2, vl); } -vint32m2_t test_vloxei32_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i32m2_tu(maskedoff, base, bindex, vl); +vint32m2_t test_vloxei32_v_i32m2_tu(vint32m2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i32m2_tu(vd, rs1, rs2, vl); } -vint32m4_t test_vloxei32_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_i32m4_tu(maskedoff, base, bindex, vl); +vint32m4_t test_vloxei32_v_i32m4_tu(vint32m4_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_i32m4_tu(vd, rs1, rs2, vl); } -vint32m8_t test_vloxei32_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_i32m8_tu(maskedoff, base, bindex, vl); +vint32m8_t test_vloxei32_v_i32m8_tu(vint32m8_t vd, const int32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_i32m8_tu(vd, rs1, rs2, vl); } -vint64m1_t test_vloxei32_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i64m1_tu(maskedoff, base, bindex, vl); +vint64m1_t test_vloxei32_v_i64m1_tu(vint64m1_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i64m1_tu(vd, rs1, rs2, vl); } -vint64m2_t test_vloxei32_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_i64m2_tu(maskedoff, base, bindex, vl); +vint64m2_t test_vloxei32_v_i64m2_tu(vint64m2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_i64m2_tu(vd, rs1, rs2, vl); } -vint64m4_t test_vloxei32_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i64m4_tu(maskedoff, base, bindex, vl); +vint64m4_t test_vloxei32_v_i64m4_tu(vint64m4_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i64m4_tu(vd, rs1, rs2, vl); } -vint64m8_t test_vloxei32_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_i64m8_tu(maskedoff, base, bindex, vl); +vint64m8_t test_vloxei32_v_i64m8_tu(vint64m8_t vd, const int64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_i64m8_tu(vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei32_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u8mf8_tu(maskedoff, base, bindex, vl); +vuint8mf8_t test_vloxei32_v_u8mf8_tu(vuint8mf8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u8mf8_tu(vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei32_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_u8mf4_tu(maskedoff, base, bindex, vl); +vuint8mf4_t test_vloxei32_v_u8mf4_tu(vuint8mf4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_u8mf4_tu(vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei32_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u8mf2_tu(maskedoff, base, bindex, vl); +vuint8mf2_t test_vloxei32_v_u8mf2_tu(vuint8mf2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u8mf2_tu(vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei32_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_u8m1_tu(maskedoff, base, bindex, vl); +vuint8m1_t test_vloxei32_v_u8m1_tu(vuint8m1_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_u8m1_tu(vd, rs1, rs2, vl); } -vuint8m2_t test_vloxei32_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_u8m2_tu(maskedoff, base, bindex, vl); +vuint8m2_t test_vloxei32_v_u8m2_tu(vuint8m2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_u8m2_tu(vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei32_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u16mf4_tu(maskedoff, base, bindex, vl); +vuint16mf4_t test_vloxei32_v_u16mf4_tu(vuint16mf4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u16mf4_tu(vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei32_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_u16mf2_tu(maskedoff, base, bindex, vl); +vuint16mf2_t test_vloxei32_v_u16mf2_tu(vuint16mf2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_u16mf2_tu(vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei32_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u16m1_tu(maskedoff, base, bindex, vl); +vuint16m1_t test_vloxei32_v_u16m1_tu(vuint16m1_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u16m1_tu(vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei32_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_u16m2_tu(maskedoff, base, bindex, vl); +vuint16m2_t test_vloxei32_v_u16m2_tu(vuint16m2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_u16m2_tu(vd, rs1, rs2, vl); } -vuint16m4_t test_vloxei32_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_u16m4_tu(maskedoff, base, bindex, vl); +vuint16m4_t test_vloxei32_v_u16m4_tu(vuint16m4_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_u16m4_tu(vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei32_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u32mf2_tu(maskedoff, base, bindex, vl); +vuint32mf2_t test_vloxei32_v_u32mf2_tu(vuint32mf2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u32mf2_tu(vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei32_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_u32m1_tu(maskedoff, base, bindex, vl); +vuint32m1_t test_vloxei32_v_u32m1_tu(vuint32m1_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_u32m1_tu(vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei32_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u32m2_tu(maskedoff, base, bindex, vl); +vuint32m2_t test_vloxei32_v_u32m2_tu(vuint32m2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u32m2_tu(vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei32_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_u32m4_tu(maskedoff, base, bindex, vl); +vuint32m4_t test_vloxei32_v_u32m4_tu(vuint32m4_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_u32m4_tu(vd, rs1, rs2, vl); } -vuint32m8_t test_vloxei32_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_u32m8_tu(maskedoff, base, bindex, vl); +vuint32m8_t test_vloxei32_v_u32m8_tu(vuint32m8_t vd, const uint32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_u32m8_tu(vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei32_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u64m1_tu(maskedoff, base, bindex, vl); +vuint64m1_t test_vloxei32_v_u64m1_tu(vuint64m1_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u64m1_tu(vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei32_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_u64m2_tu(maskedoff, base, bindex, vl); +vuint64m2_t test_vloxei32_v_u64m2_tu(vuint64m2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_u64m2_tu(vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei32_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u64m4_tu(maskedoff, base, bindex, vl); +vuint64m4_t test_vloxei32_v_u64m4_tu(vuint64m4_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u64m4_tu(vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei32_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_u64m8_tu(maskedoff, base, bindex, vl); +vuint64m8_t test_vloxei32_v_u64m8_tu(vuint64m8_t vd, const uint64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_u64m8_tu(vd, rs1, rs2, vl); } -vfloat16mf4_t test_vloxei32_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_f16mf4_tum(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vloxei32_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_f16mf4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei32_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_f16mf2_tum(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vloxei32_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_f16mf2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei32_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_f16m1_tum(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vloxei32_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_f16m1_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei32_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_f16m2_tum(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vloxei32_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_f16m2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vloxei32_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_f16m4_tum(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vloxei32_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_f16m4_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei32_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_f32mf2_tum(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vloxei32_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_f32mf2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei32_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_f32m1_tum(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vloxei32_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_f32m1_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei32_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_f32m2_tum(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vloxei32_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_f32m2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei32_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_f32m4_tum(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vloxei32_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_f32m4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vloxei32_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_f32m8_tum(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vloxei32_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_f32m8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei32_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_f64m1_tum(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vloxei32_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_f64m1_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei32_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_f64m2_tum(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vloxei32_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_f64m2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei32_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_f64m4_tum(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vloxei32_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_f64m4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei32_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_f64m8_tum(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vloxei32_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_f64m8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei32_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i8mf8_tum(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vloxei32_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i8mf8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei32_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_i8mf4_tum(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vloxei32_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_i8mf4_tum(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei32_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i8mf2_tum(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vloxei32_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i8mf2_tum(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vloxei32_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_i8m1_tum(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vloxei32_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_i8m1_tum(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vloxei32_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_i8m2_tum(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vloxei32_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_i8m2_tum(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei32_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i16mf4_tum(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vloxei32_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i16mf4_tum(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei32_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_i16mf2_tum(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vloxei32_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_i16mf2_tum(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vloxei32_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i16m1_tum(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vloxei32_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i16m1_tum(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vloxei32_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_i16m2_tum(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vloxei32_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_i16m2_tum(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vloxei32_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_i16m4_tum(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vloxei32_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_i16m4_tum(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei32_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i32mf2_tum(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vloxei32_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i32mf2_tum(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vloxei32_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_i32m1_tum(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vloxei32_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_i32m1_tum(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vloxei32_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i32m2_tum(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vloxei32_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i32m2_tum(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vloxei32_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_i32m4_tum(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vloxei32_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_i32m4_tum(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vloxei32_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_i32m8_tum(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vloxei32_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_i32m8_tum(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vloxei32_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i64m1_tum(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vloxei32_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i64m1_tum(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vloxei32_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_i64m2_tum(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vloxei32_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_i64m2_tum(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vloxei32_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i64m4_tum(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vloxei32_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i64m4_tum(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vloxei32_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_i64m8_tum(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vloxei32_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_i64m8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei32_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u8mf8_tum(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vloxei32_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u8mf8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei32_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_u8mf4_tum(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vloxei32_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_u8mf4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei32_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u8mf2_tum(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vloxei32_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u8mf2_tum(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei32_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_u8m1_tum(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vloxei32_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_u8m1_tum(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vloxei32_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_u8m2_tum(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vloxei32_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_u8m2_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei32_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u16mf4_tum(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vloxei32_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u16mf4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei32_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_u16mf2_tum(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vloxei32_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_u16mf2_tum(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei32_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u16m1_tum(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vloxei32_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u16m1_tum(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei32_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_u16m2_tum(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vloxei32_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_u16m2_tum(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vloxei32_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_u16m4_tum(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vloxei32_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_u16m4_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei32_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u32mf2_tum(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vloxei32_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u32mf2_tum(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei32_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_u32m1_tum(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vloxei32_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_u32m1_tum(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei32_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u32m2_tum(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vloxei32_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u32m2_tum(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei32_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_u32m4_tum(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vloxei32_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_u32m4_tum(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vloxei32_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_u32m8_tum(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vloxei32_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_u32m8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei32_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u64m1_tum(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vloxei32_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u64m1_tum(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei32_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_u64m2_tum(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vloxei32_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_u64m2_tum(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei32_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u64m4_tum(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vloxei32_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u64m4_tum(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei32_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_u64m8_tum(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vloxei32_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_u64m8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vloxei32_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_f16mf4_tumu(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vloxei32_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_f16mf4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei32_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_f16mf2_tumu(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vloxei32_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_f16mf2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei32_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_f16m1_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vloxei32_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_f16m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei32_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_f16m2_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vloxei32_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_f16m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vloxei32_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_f16m4_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vloxei32_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_f16m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei32_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_f32mf2_tumu(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vloxei32_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_f32mf2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei32_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_f32m1_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vloxei32_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_f32m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei32_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_f32m2_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vloxei32_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_f32m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei32_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_f32m4_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vloxei32_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_f32m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vloxei32_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_f32m8_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vloxei32_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_f32m8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei32_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_f64m1_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vloxei32_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_f64m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei32_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_f64m2_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vloxei32_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_f64m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei32_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_f64m4_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vloxei32_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_f64m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei32_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_f64m8_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vloxei32_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_f64m8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei32_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i8mf8_tumu(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vloxei32_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i8mf8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei32_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_i8mf4_tumu(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vloxei32_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_i8mf4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei32_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i8mf2_tumu(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vloxei32_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i8mf2_tumu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vloxei32_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_i8m1_tumu(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vloxei32_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_i8m1_tumu(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vloxei32_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_i8m2_tumu(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vloxei32_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_i8m2_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei32_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i16mf4_tumu(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vloxei32_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i16mf4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei32_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_i16mf2_tumu(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vloxei32_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_i16mf2_tumu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vloxei32_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i16m1_tumu(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vloxei32_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i16m1_tumu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vloxei32_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_i16m2_tumu(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vloxei32_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_i16m2_tumu(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vloxei32_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_i16m4_tumu(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vloxei32_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_i16m4_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei32_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i32mf2_tumu(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vloxei32_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i32mf2_tumu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vloxei32_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_i32m1_tumu(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vloxei32_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_i32m1_tumu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vloxei32_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i32m2_tumu(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vloxei32_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i32m2_tumu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vloxei32_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_i32m4_tumu(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vloxei32_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_i32m4_tumu(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vloxei32_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_i32m8_tumu(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vloxei32_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_i32m8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vloxei32_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i64m1_tumu(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vloxei32_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i64m1_tumu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vloxei32_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_i64m2_tumu(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vloxei32_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_i64m2_tumu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vloxei32_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i64m4_tumu(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vloxei32_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i64m4_tumu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vloxei32_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_i64m8_tumu(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vloxei32_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_i64m8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei32_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u8mf8_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vloxei32_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u8mf8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei32_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_u8mf4_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vloxei32_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_u8mf4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei32_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u8mf2_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vloxei32_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u8mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei32_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_u8m1_tumu(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vloxei32_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_u8m1_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vloxei32_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_u8m2_tumu(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vloxei32_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_u8m2_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei32_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u16mf4_tumu(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vloxei32_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u16mf4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei32_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_u16mf2_tumu(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vloxei32_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_u16mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei32_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u16m1_tumu(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vloxei32_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u16m1_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei32_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_u16m2_tumu(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vloxei32_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_u16m2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vloxei32_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_u16m4_tumu(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vloxei32_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_u16m4_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei32_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u32mf2_tumu(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vloxei32_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u32mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei32_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_u32m1_tumu(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vloxei32_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_u32m1_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei32_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u32m2_tumu(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vloxei32_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u32m2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei32_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_u32m4_tumu(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vloxei32_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_u32m4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vloxei32_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_u32m8_tumu(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vloxei32_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_u32m8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei32_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u64m1_tumu(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vloxei32_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u64m1_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei32_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_u64m2_tumu(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vloxei32_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_u64m2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei32_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u64m4_tumu(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vloxei32_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u64m4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei32_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_u64m8_tumu(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vloxei32_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_u64m8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vloxei32_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_f16mf4_mu(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vloxei32_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_f16mf4_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei32_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_f16mf2_mu(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vloxei32_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_f16mf2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei32_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_f16m1_mu(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vloxei32_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_f16m1_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei32_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_f16m2_mu(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vloxei32_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_f16m2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vloxei32_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_f16m4_mu(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vloxei32_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_f16m4_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei32_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_f32mf2_mu(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vloxei32_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_f32mf2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei32_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_f32m1_mu(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vloxei32_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_f32m1_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei32_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_f32m2_mu(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vloxei32_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_f32m2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei32_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_f32m4_mu(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vloxei32_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_f32m4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vloxei32_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_f32m8_mu(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vloxei32_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_f32m8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei32_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_f64m1_mu(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vloxei32_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_f64m1_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei32_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_f64m2_mu(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vloxei32_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_f64m2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei32_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_f64m4_mu(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vloxei32_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_f64m4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei32_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_f64m8_mu(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vloxei32_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_f64m8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei32_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i8mf8_mu(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vloxei32_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i8mf8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei32_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_i8mf4_mu(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vloxei32_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_i8mf4_mu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei32_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i8mf2_mu(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vloxei32_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i8mf2_mu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vloxei32_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_i8m1_mu(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vloxei32_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_i8m1_mu(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vloxei32_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_i8m2_mu(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vloxei32_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_i8m2_mu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei32_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i16mf4_mu(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vloxei32_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i16mf4_mu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei32_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_i16mf2_mu(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vloxei32_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_i16mf2_mu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vloxei32_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i16m1_mu(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vloxei32_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i16m1_mu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vloxei32_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_i16m2_mu(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vloxei32_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_i16m2_mu(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vloxei32_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_i16m4_mu(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vloxei32_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_i16m4_mu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei32_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i32mf2_mu(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vloxei32_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i32mf2_mu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vloxei32_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_i32m1_mu(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vloxei32_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_i32m1_mu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vloxei32_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i32m2_mu(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vloxei32_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i32m2_mu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vloxei32_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_i32m4_mu(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vloxei32_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_i32m4_mu(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vloxei32_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_i32m8_mu(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vloxei32_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_i32m8_mu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vloxei32_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i64m1_mu(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vloxei32_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i64m1_mu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vloxei32_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_i64m2_mu(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vloxei32_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_i64m2_mu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vloxei32_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_i64m4_mu(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vloxei32_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_i64m4_mu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vloxei32_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_i64m8_mu(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vloxei32_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_i64m8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei32_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u8mf8_mu(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vloxei32_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u8mf8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei32_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_u8mf4_mu(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vloxei32_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_u8mf4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei32_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u8mf2_mu(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vloxei32_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u8mf2_mu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei32_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_u8m1_mu(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vloxei32_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_u8m1_mu(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vloxei32_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_u8m2_mu(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vloxei32_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_u8m2_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei32_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u16mf4_mu(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vloxei32_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u16mf4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei32_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_u16mf2_mu(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vloxei32_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_u16mf2_mu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei32_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u16m1_mu(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vloxei32_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u16m1_mu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei32_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_u16m2_mu(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vloxei32_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_u16m2_mu(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vloxei32_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_u16m4_mu(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vloxei32_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_u16m4_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei32_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u32mf2_mu(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vloxei32_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u32mf2_mu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei32_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_u32m1_mu(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vloxei32_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_u32m1_mu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei32_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u32m2_mu(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vloxei32_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u32m2_mu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei32_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_u32m4_mu(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vloxei32_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_u32m4_mu(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vloxei32_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_v_u32m8_mu(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vloxei32_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_v_u32m8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei32_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u64m1_mu(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vloxei32_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u64m1_mu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei32_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_v_u64m2_mu(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vloxei32_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_v_u64m2_mu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei32_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_v_u64m4_mu(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vloxei32_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_v_u64m4_mu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei32_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_v_u64m8_mu(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vloxei32_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_v_u64m8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxei32\.[ivxfswum.]+\s+} 208 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vloxei64.c b/auto-generated/policy_funcs/gnu-api-tests/vloxei64.c index f10e81ce1..681ea3413 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vloxei64.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vloxei64.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vloxei64_v_f16mf4_tu(vfloat16mf4_t maskedoff, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_f16mf4_tu(maskedoff, base, bindex, vl); +vfloat16mf4_t test_vloxei64_v_f16mf4_tu(vfloat16mf4_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_f16mf4_tu(vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei64_v_f16mf2_tu(vfloat16mf2_t maskedoff, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_f16mf2_tu(maskedoff, base, bindex, vl); +vfloat16mf2_t test_vloxei64_v_f16mf2_tu(vfloat16mf2_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_f16mf2_tu(vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei64_v_f16m1_tu(vfloat16m1_t maskedoff, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_f16m1_tu(maskedoff, base, bindex, vl); +vfloat16m1_t test_vloxei64_v_f16m1_tu(vfloat16m1_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_f16m1_tu(vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei64_v_f16m2_tu(vfloat16m2_t maskedoff, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_f16m2_tu(maskedoff, base, bindex, vl); +vfloat16m2_t test_vloxei64_v_f16m2_tu(vfloat16m2_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_f16m2_tu(vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei64_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_f32mf2_tu(maskedoff, base, bindex, vl); +vfloat32mf2_t test_vloxei64_v_f32mf2_tu(vfloat32mf2_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_f32mf2_tu(vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei64_v_f32m1_tu(vfloat32m1_t maskedoff, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_f32m1_tu(maskedoff, base, bindex, vl); +vfloat32m1_t test_vloxei64_v_f32m1_tu(vfloat32m1_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_f32m1_tu(vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei64_v_f32m2_tu(vfloat32m2_t maskedoff, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_f32m2_tu(maskedoff, base, bindex, vl); +vfloat32m2_t test_vloxei64_v_f32m2_tu(vfloat32m2_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_f32m2_tu(vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei64_v_f32m4_tu(vfloat32m4_t maskedoff, const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_f32m4_tu(maskedoff, base, bindex, vl); +vfloat32m4_t test_vloxei64_v_f32m4_tu(vfloat32m4_t vd, const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_f32m4_tu(vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei64_v_f64m1_tu(vfloat64m1_t maskedoff, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_f64m1_tu(maskedoff, base, bindex, vl); +vfloat64m1_t test_vloxei64_v_f64m1_tu(vfloat64m1_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_f64m1_tu(vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei64_v_f64m2_tu(vfloat64m2_t maskedoff, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_f64m2_tu(maskedoff, base, bindex, vl); +vfloat64m2_t test_vloxei64_v_f64m2_tu(vfloat64m2_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_f64m2_tu(vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei64_v_f64m4_tu(vfloat64m4_t maskedoff, const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_f64m4_tu(maskedoff, base, bindex, vl); +vfloat64m4_t test_vloxei64_v_f64m4_tu(vfloat64m4_t vd, const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_f64m4_tu(vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei64_v_f64m8_tu(vfloat64m8_t maskedoff, const float64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_f64m8_tu(maskedoff, base, bindex, vl); +vfloat64m8_t test_vloxei64_v_f64m8_tu(vfloat64m8_t vd, const float64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_f64m8_tu(vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei64_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_i8mf8_tu(maskedoff, base, bindex, vl); +vint8mf8_t test_vloxei64_v_i8mf8_tu(vint8mf8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_i8mf8_tu(vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei64_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_i8mf4_tu(maskedoff, base, bindex, vl); +vint8mf4_t test_vloxei64_v_i8mf4_tu(vint8mf4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_i8mf4_tu(vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei64_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_i8mf2_tu(maskedoff, base, bindex, vl); +vint8mf2_t test_vloxei64_v_i8mf2_tu(vint8mf2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_i8mf2_tu(vd, rs1, rs2, vl); } -vint8m1_t test_vloxei64_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_i8m1_tu(maskedoff, base, bindex, vl); +vint8m1_t test_vloxei64_v_i8m1_tu(vint8m1_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_i8m1_tu(vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei64_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_i16mf4_tu(maskedoff, base, bindex, vl); +vint16mf4_t test_vloxei64_v_i16mf4_tu(vint16mf4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_i16mf4_tu(vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei64_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_i16mf2_tu(maskedoff, base, bindex, vl); +vint16mf2_t test_vloxei64_v_i16mf2_tu(vint16mf2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_i16mf2_tu(vd, rs1, rs2, vl); } -vint16m1_t test_vloxei64_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_i16m1_tu(maskedoff, base, bindex, vl); +vint16m1_t test_vloxei64_v_i16m1_tu(vint16m1_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_i16m1_tu(vd, rs1, rs2, vl); } -vint16m2_t test_vloxei64_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_i16m2_tu(maskedoff, base, bindex, vl); +vint16m2_t test_vloxei64_v_i16m2_tu(vint16m2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_i16m2_tu(vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei64_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_i32mf2_tu(maskedoff, base, bindex, vl); +vint32mf2_t test_vloxei64_v_i32mf2_tu(vint32mf2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_i32mf2_tu(vd, rs1, rs2, vl); } -vint32m1_t test_vloxei64_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_i32m1_tu(maskedoff, base, bindex, vl); +vint32m1_t test_vloxei64_v_i32m1_tu(vint32m1_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_i32m1_tu(vd, rs1, rs2, vl); } -vint32m2_t test_vloxei64_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_i32m2_tu(maskedoff, base, bindex, vl); +vint32m2_t test_vloxei64_v_i32m2_tu(vint32m2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_i32m2_tu(vd, rs1, rs2, vl); } -vint32m4_t test_vloxei64_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_i32m4_tu(maskedoff, base, bindex, vl); +vint32m4_t test_vloxei64_v_i32m4_tu(vint32m4_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_i32m4_tu(vd, rs1, rs2, vl); } -vint64m1_t test_vloxei64_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_i64m1_tu(maskedoff, base, bindex, vl); +vint64m1_t test_vloxei64_v_i64m1_tu(vint64m1_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_i64m1_tu(vd, rs1, rs2, vl); } -vint64m2_t test_vloxei64_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_i64m2_tu(maskedoff, base, bindex, vl); +vint64m2_t test_vloxei64_v_i64m2_tu(vint64m2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_i64m2_tu(vd, rs1, rs2, vl); } -vint64m4_t test_vloxei64_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_i64m4_tu(maskedoff, base, bindex, vl); +vint64m4_t test_vloxei64_v_i64m4_tu(vint64m4_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_i64m4_tu(vd, rs1, rs2, vl); } -vint64m8_t test_vloxei64_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_i64m8_tu(maskedoff, base, bindex, vl); +vint64m8_t test_vloxei64_v_i64m8_tu(vint64m8_t vd, const int64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_i64m8_tu(vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei64_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_u8mf8_tu(maskedoff, base, bindex, vl); +vuint8mf8_t test_vloxei64_v_u8mf8_tu(vuint8mf8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_u8mf8_tu(vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei64_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_u8mf4_tu(maskedoff, base, bindex, vl); +vuint8mf4_t test_vloxei64_v_u8mf4_tu(vuint8mf4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_u8mf4_tu(vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei64_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_u8mf2_tu(maskedoff, base, bindex, vl); +vuint8mf2_t test_vloxei64_v_u8mf2_tu(vuint8mf2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_u8mf2_tu(vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei64_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_u8m1_tu(maskedoff, base, bindex, vl); +vuint8m1_t test_vloxei64_v_u8m1_tu(vuint8m1_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_u8m1_tu(vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei64_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_u16mf4_tu(maskedoff, base, bindex, vl); +vuint16mf4_t test_vloxei64_v_u16mf4_tu(vuint16mf4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_u16mf4_tu(vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei64_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_u16mf2_tu(maskedoff, base, bindex, vl); +vuint16mf2_t test_vloxei64_v_u16mf2_tu(vuint16mf2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_u16mf2_tu(vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei64_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_u16m1_tu(maskedoff, base, bindex, vl); +vuint16m1_t test_vloxei64_v_u16m1_tu(vuint16m1_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_u16m1_tu(vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei64_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_u16m2_tu(maskedoff, base, bindex, vl); +vuint16m2_t test_vloxei64_v_u16m2_tu(vuint16m2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_u16m2_tu(vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei64_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_u32mf2_tu(maskedoff, base, bindex, vl); +vuint32mf2_t test_vloxei64_v_u32mf2_tu(vuint32mf2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_u32mf2_tu(vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei64_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_u32m1_tu(maskedoff, base, bindex, vl); +vuint32m1_t test_vloxei64_v_u32m1_tu(vuint32m1_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_u32m1_tu(vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei64_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_u32m2_tu(maskedoff, base, bindex, vl); +vuint32m2_t test_vloxei64_v_u32m2_tu(vuint32m2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_u32m2_tu(vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei64_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_u32m4_tu(maskedoff, base, bindex, vl); +vuint32m4_t test_vloxei64_v_u32m4_tu(vuint32m4_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_u32m4_tu(vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei64_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_u64m1_tu(maskedoff, base, bindex, vl); +vuint64m1_t test_vloxei64_v_u64m1_tu(vuint64m1_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_u64m1_tu(vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei64_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_u64m2_tu(maskedoff, base, bindex, vl); +vuint64m2_t test_vloxei64_v_u64m2_tu(vuint64m2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_u64m2_tu(vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei64_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_u64m4_tu(maskedoff, base, bindex, vl); +vuint64m4_t test_vloxei64_v_u64m4_tu(vuint64m4_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_u64m4_tu(vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei64_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_u64m8_tu(maskedoff, base, bindex, vl); +vuint64m8_t test_vloxei64_v_u64m8_tu(vuint64m8_t vd, const uint64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_u64m8_tu(vd, rs1, rs2, vl); } -vfloat16mf4_t test_vloxei64_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_f16mf4_tum(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vloxei64_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_f16mf4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei64_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_f16mf2_tum(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vloxei64_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_f16mf2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei64_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_f16m1_tum(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vloxei64_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_f16m1_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei64_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_f16m2_tum(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vloxei64_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_f16m2_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei64_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_f32mf2_tum(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vloxei64_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_f32mf2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei64_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_f32m1_tum(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vloxei64_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_f32m1_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei64_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_f32m2_tum(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vloxei64_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_f32m2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei64_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_f32m4_tum(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vloxei64_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_f32m4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei64_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_f64m1_tum(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vloxei64_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_f64m1_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei64_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_f64m2_tum(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vloxei64_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_f64m2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei64_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_f64m4_tum(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vloxei64_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_f64m4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei64_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_f64m8_tum(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vloxei64_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_f64m8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei64_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_i8mf8_tum(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vloxei64_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_i8mf8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei64_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_i8mf4_tum(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vloxei64_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_i8mf4_tum(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei64_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_i8mf2_tum(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vloxei64_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_i8mf2_tum(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vloxei64_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_i8m1_tum(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vloxei64_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_i8m1_tum(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei64_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_i16mf4_tum(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vloxei64_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_i16mf4_tum(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei64_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_i16mf2_tum(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vloxei64_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_i16mf2_tum(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vloxei64_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_i16m1_tum(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vloxei64_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_i16m1_tum(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vloxei64_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_i16m2_tum(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vloxei64_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_i16m2_tum(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei64_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_i32mf2_tum(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vloxei64_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_i32mf2_tum(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vloxei64_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_i32m1_tum(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vloxei64_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_i32m1_tum(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vloxei64_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_i32m2_tum(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vloxei64_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_i32m2_tum(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vloxei64_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_i32m4_tum(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vloxei64_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_i32m4_tum(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vloxei64_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_i64m1_tum(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vloxei64_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_i64m1_tum(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vloxei64_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_i64m2_tum(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vloxei64_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_i64m2_tum(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vloxei64_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_i64m4_tum(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vloxei64_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_i64m4_tum(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vloxei64_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_i64m8_tum(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vloxei64_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_i64m8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei64_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_u8mf8_tum(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vloxei64_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_u8mf8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei64_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_u8mf4_tum(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vloxei64_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_u8mf4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei64_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_u8mf2_tum(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vloxei64_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_u8mf2_tum(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei64_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_u8m1_tum(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vloxei64_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_u8m1_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei64_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_u16mf4_tum(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vloxei64_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_u16mf4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei64_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_u16mf2_tum(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vloxei64_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_u16mf2_tum(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei64_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_u16m1_tum(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vloxei64_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_u16m1_tum(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei64_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_u16m2_tum(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vloxei64_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_u16m2_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei64_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_u32mf2_tum(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vloxei64_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_u32mf2_tum(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei64_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_u32m1_tum(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vloxei64_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_u32m1_tum(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei64_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_u32m2_tum(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vloxei64_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_u32m2_tum(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei64_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_u32m4_tum(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vloxei64_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_u32m4_tum(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei64_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_u64m1_tum(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vloxei64_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_u64m1_tum(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei64_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_u64m2_tum(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vloxei64_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_u64m2_tum(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei64_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_u64m4_tum(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vloxei64_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_u64m4_tum(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei64_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_u64m8_tum(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vloxei64_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_u64m8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vloxei64_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_f16mf4_tumu(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vloxei64_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_f16mf4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei64_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_f16mf2_tumu(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vloxei64_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_f16mf2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei64_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_f16m1_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vloxei64_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_f16m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei64_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_f16m2_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vloxei64_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_f16m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei64_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_f32mf2_tumu(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vloxei64_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_f32mf2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei64_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_f32m1_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vloxei64_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_f32m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei64_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_f32m2_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vloxei64_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_f32m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei64_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_f32m4_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vloxei64_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_f32m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei64_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_f64m1_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vloxei64_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_f64m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei64_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_f64m2_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vloxei64_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_f64m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei64_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_f64m4_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vloxei64_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_f64m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei64_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_f64m8_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vloxei64_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_f64m8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei64_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_i8mf8_tumu(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vloxei64_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_i8mf8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei64_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_i8mf4_tumu(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vloxei64_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_i8mf4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei64_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_i8mf2_tumu(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vloxei64_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_i8mf2_tumu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vloxei64_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_i8m1_tumu(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vloxei64_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_i8m1_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei64_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_i16mf4_tumu(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vloxei64_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_i16mf4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei64_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_i16mf2_tumu(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vloxei64_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_i16mf2_tumu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vloxei64_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_i16m1_tumu(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vloxei64_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_i16m1_tumu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vloxei64_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_i16m2_tumu(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vloxei64_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_i16m2_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei64_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_i32mf2_tumu(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vloxei64_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_i32mf2_tumu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vloxei64_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_i32m1_tumu(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vloxei64_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_i32m1_tumu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vloxei64_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_i32m2_tumu(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vloxei64_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_i32m2_tumu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vloxei64_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_i32m4_tumu(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vloxei64_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_i32m4_tumu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vloxei64_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_i64m1_tumu(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vloxei64_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_i64m1_tumu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vloxei64_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_i64m2_tumu(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vloxei64_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_i64m2_tumu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vloxei64_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_i64m4_tumu(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vloxei64_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_i64m4_tumu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vloxei64_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_i64m8_tumu(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vloxei64_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_i64m8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei64_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_u8mf8_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vloxei64_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_u8mf8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei64_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_u8mf4_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vloxei64_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_u8mf4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei64_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_u8mf2_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vloxei64_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_u8mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei64_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_u8m1_tumu(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vloxei64_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_u8m1_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei64_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_u16mf4_tumu(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vloxei64_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_u16mf4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei64_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_u16mf2_tumu(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vloxei64_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_u16mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei64_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_u16m1_tumu(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vloxei64_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_u16m1_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei64_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_u16m2_tumu(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vloxei64_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_u16m2_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei64_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_u32mf2_tumu(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vloxei64_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_u32mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei64_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_u32m1_tumu(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vloxei64_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_u32m1_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei64_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_u32m2_tumu(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vloxei64_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_u32m2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei64_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_u32m4_tumu(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vloxei64_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_u32m4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei64_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_u64m1_tumu(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vloxei64_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_u64m1_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei64_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_u64m2_tumu(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vloxei64_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_u64m2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei64_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_u64m4_tumu(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vloxei64_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_u64m4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei64_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_u64m8_tumu(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vloxei64_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_u64m8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vloxei64_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_f16mf4_mu(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vloxei64_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_f16mf4_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei64_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_f16mf2_mu(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vloxei64_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_f16mf2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei64_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_f16m1_mu(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vloxei64_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_f16m1_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei64_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_f16m2_mu(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vloxei64_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_f16m2_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei64_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_f32mf2_mu(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vloxei64_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_f32mf2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei64_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_f32m1_mu(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vloxei64_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_f32m1_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei64_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_f32m2_mu(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vloxei64_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_f32m2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei64_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_f32m4_mu(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vloxei64_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_f32m4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei64_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_f64m1_mu(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vloxei64_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_f64m1_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei64_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_f64m2_mu(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vloxei64_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_f64m2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei64_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_f64m4_mu(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vloxei64_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_f64m4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei64_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_f64m8_mu(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vloxei64_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_f64m8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei64_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_i8mf8_mu(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vloxei64_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_i8mf8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei64_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_i8mf4_mu(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vloxei64_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_i8mf4_mu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei64_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_i8mf2_mu(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vloxei64_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_i8mf2_mu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vloxei64_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_i8m1_mu(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vloxei64_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_i8m1_mu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei64_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_i16mf4_mu(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vloxei64_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_i16mf4_mu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei64_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_i16mf2_mu(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vloxei64_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_i16mf2_mu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vloxei64_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_i16m1_mu(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vloxei64_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_i16m1_mu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vloxei64_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_i16m2_mu(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vloxei64_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_i16m2_mu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei64_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_i32mf2_mu(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vloxei64_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_i32mf2_mu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vloxei64_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_i32m1_mu(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vloxei64_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_i32m1_mu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vloxei64_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_i32m2_mu(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vloxei64_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_i32m2_mu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vloxei64_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_i32m4_mu(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vloxei64_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_i32m4_mu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vloxei64_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_i64m1_mu(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vloxei64_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_i64m1_mu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vloxei64_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_i64m2_mu(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vloxei64_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_i64m2_mu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vloxei64_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_i64m4_mu(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vloxei64_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_i64m4_mu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vloxei64_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_i64m8_mu(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vloxei64_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_i64m8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei64_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_u8mf8_mu(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vloxei64_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_u8mf8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei64_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_u8mf4_mu(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vloxei64_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_u8mf4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei64_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_u8mf2_mu(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vloxei64_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_u8mf2_mu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei64_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_u8m1_mu(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vloxei64_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_u8m1_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei64_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_u16mf4_mu(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vloxei64_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_u16mf4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei64_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_u16mf2_mu(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vloxei64_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_u16mf2_mu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei64_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_u16m1_mu(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vloxei64_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_u16m1_mu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei64_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_u16m2_mu(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vloxei64_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_u16m2_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei64_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_u32mf2_mu(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vloxei64_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_u32mf2_mu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei64_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_u32m1_mu(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vloxei64_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_u32m1_mu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei64_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_u32m2_mu(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vloxei64_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_u32m2_mu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei64_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_u32m4_mu(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vloxei64_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_u32m4_mu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei64_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_v_u64m1_mu(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vloxei64_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_v_u64m1_mu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei64_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_v_u64m2_mu(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vloxei64_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_v_u64m2_mu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei64_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_v_u64m4_mu(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vloxei64_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_v_u64m4_mu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei64_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_v_u64m8_mu(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vloxei64_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_v_u64m8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxei64\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vloxei8.c b/auto-generated/policy_funcs/gnu-api-tests/vloxei8.c index 6de8b052f..2f4708320 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vloxei8.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vloxei8.c @@ -6,948 +6,948 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vloxei8_v_f16mf4_tu(vfloat16mf4_t maskedoff, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_f16mf4_tu(maskedoff, base, bindex, vl); +vfloat16mf4_t test_vloxei8_v_f16mf4_tu(vfloat16mf4_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_f16mf4_tu(vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei8_v_f16mf2_tu(vfloat16mf2_t maskedoff, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_f16mf2_tu(maskedoff, base, bindex, vl); +vfloat16mf2_t test_vloxei8_v_f16mf2_tu(vfloat16mf2_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_f16mf2_tu(vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei8_v_f16m1_tu(vfloat16m1_t maskedoff, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_f16m1_tu(maskedoff, base, bindex, vl); +vfloat16m1_t test_vloxei8_v_f16m1_tu(vfloat16m1_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_f16m1_tu(vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei8_v_f16m2_tu(vfloat16m2_t maskedoff, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_f16m2_tu(maskedoff, base, bindex, vl); +vfloat16m2_t test_vloxei8_v_f16m2_tu(vfloat16m2_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_f16m2_tu(vd, rs1, rs2, vl); } -vfloat16m4_t test_vloxei8_v_f16m4_tu(vfloat16m4_t maskedoff, const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_f16m4_tu(maskedoff, base, bindex, vl); +vfloat16m4_t test_vloxei8_v_f16m4_tu(vfloat16m4_t vd, const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_f16m4_tu(vd, rs1, rs2, vl); } -vfloat16m8_t test_vloxei8_v_f16m8_tu(vfloat16m8_t maskedoff, const float16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_v_f16m8_tu(maskedoff, base, bindex, vl); +vfloat16m8_t test_vloxei8_v_f16m8_tu(vfloat16m8_t vd, const float16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_v_f16m8_tu(vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei8_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_f32mf2_tu(maskedoff, base, bindex, vl); +vfloat32mf2_t test_vloxei8_v_f32mf2_tu(vfloat32mf2_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_f32mf2_tu(vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei8_v_f32m1_tu(vfloat32m1_t maskedoff, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_f32m1_tu(maskedoff, base, bindex, vl); +vfloat32m1_t test_vloxei8_v_f32m1_tu(vfloat32m1_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_f32m1_tu(vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei8_v_f32m2_tu(vfloat32m2_t maskedoff, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_f32m2_tu(maskedoff, base, bindex, vl); +vfloat32m2_t test_vloxei8_v_f32m2_tu(vfloat32m2_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_f32m2_tu(vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei8_v_f32m4_tu(vfloat32m4_t maskedoff, const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_f32m4_tu(maskedoff, base, bindex, vl); +vfloat32m4_t test_vloxei8_v_f32m4_tu(vfloat32m4_t vd, const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_f32m4_tu(vd, rs1, rs2, vl); } -vfloat32m8_t test_vloxei8_v_f32m8_tu(vfloat32m8_t maskedoff, const float32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_f32m8_tu(maskedoff, base, bindex, vl); +vfloat32m8_t test_vloxei8_v_f32m8_tu(vfloat32m8_t vd, const float32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_f32m8_tu(vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei8_v_f64m1_tu(vfloat64m1_t maskedoff, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_f64m1_tu(maskedoff, base, bindex, vl); +vfloat64m1_t test_vloxei8_v_f64m1_tu(vfloat64m1_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_f64m1_tu(vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei8_v_f64m2_tu(vfloat64m2_t maskedoff, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_f64m2_tu(maskedoff, base, bindex, vl); +vfloat64m2_t test_vloxei8_v_f64m2_tu(vfloat64m2_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_f64m2_tu(vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei8_v_f64m4_tu(vfloat64m4_t maskedoff, const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_f64m4_tu(maskedoff, base, bindex, vl); +vfloat64m4_t test_vloxei8_v_f64m4_tu(vfloat64m4_t vd, const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_f64m4_tu(vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei8_v_f64m8_tu(vfloat64m8_t maskedoff, const float64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_f64m8_tu(maskedoff, base, bindex, vl); +vfloat64m8_t test_vloxei8_v_f64m8_tu(vfloat64m8_t vd, const float64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_f64m8_tu(vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei8_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_i8mf8_tu(maskedoff, base, bindex, vl); +vint8mf8_t test_vloxei8_v_i8mf8_tu(vint8mf8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_i8mf8_tu(vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei8_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_i8mf4_tu(maskedoff, base, bindex, vl); +vint8mf4_t test_vloxei8_v_i8mf4_tu(vint8mf4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_i8mf4_tu(vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei8_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_i8mf2_tu(maskedoff, base, bindex, vl); +vint8mf2_t test_vloxei8_v_i8mf2_tu(vint8mf2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_i8mf2_tu(vd, rs1, rs2, vl); } -vint8m1_t test_vloxei8_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_i8m1_tu(maskedoff, base, bindex, vl); +vint8m1_t test_vloxei8_v_i8m1_tu(vint8m1_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_i8m1_tu(vd, rs1, rs2, vl); } -vint8m2_t test_vloxei8_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_i8m2_tu(maskedoff, base, bindex, vl); +vint8m2_t test_vloxei8_v_i8m2_tu(vint8m2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_i8m2_tu(vd, rs1, rs2, vl); } -vint8m4_t test_vloxei8_v_i8m4_tu(vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_v_i8m4_tu(maskedoff, base, bindex, vl); +vint8m4_t test_vloxei8_v_i8m4_tu(vint8m4_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_v_i8m4_tu(vd, rs1, rs2, vl); } -vint8m8_t test_vloxei8_v_i8m8_tu(vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vloxei8_v_i8m8_tu(maskedoff, base, bindex, vl); +vint8m8_t test_vloxei8_v_i8m8_tu(vint8m8_t vd, const int8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vloxei8_v_i8m8_tu(vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei8_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_i16mf4_tu(maskedoff, base, bindex, vl); +vint16mf4_t test_vloxei8_v_i16mf4_tu(vint16mf4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_i16mf4_tu(vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei8_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_i16mf2_tu(maskedoff, base, bindex, vl); +vint16mf2_t test_vloxei8_v_i16mf2_tu(vint16mf2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_i16mf2_tu(vd, rs1, rs2, vl); } -vint16m1_t test_vloxei8_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_i16m1_tu(maskedoff, base, bindex, vl); +vint16m1_t test_vloxei8_v_i16m1_tu(vint16m1_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_i16m1_tu(vd, rs1, rs2, vl); } -vint16m2_t test_vloxei8_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_i16m2_tu(maskedoff, base, bindex, vl); +vint16m2_t test_vloxei8_v_i16m2_tu(vint16m2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_i16m2_tu(vd, rs1, rs2, vl); } -vint16m4_t test_vloxei8_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_i16m4_tu(maskedoff, base, bindex, vl); +vint16m4_t test_vloxei8_v_i16m4_tu(vint16m4_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_i16m4_tu(vd, rs1, rs2, vl); } -vint16m8_t test_vloxei8_v_i16m8_tu(vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_v_i16m8_tu(maskedoff, base, bindex, vl); +vint16m8_t test_vloxei8_v_i16m8_tu(vint16m8_t vd, const int16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_v_i16m8_tu(vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei8_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_i32mf2_tu(maskedoff, base, bindex, vl); +vint32mf2_t test_vloxei8_v_i32mf2_tu(vint32mf2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_i32mf2_tu(vd, rs1, rs2, vl); } -vint32m1_t test_vloxei8_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_i32m1_tu(maskedoff, base, bindex, vl); +vint32m1_t test_vloxei8_v_i32m1_tu(vint32m1_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_i32m1_tu(vd, rs1, rs2, vl); } -vint32m2_t test_vloxei8_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_i32m2_tu(maskedoff, base, bindex, vl); +vint32m2_t test_vloxei8_v_i32m2_tu(vint32m2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_i32m2_tu(vd, rs1, rs2, vl); } -vint32m4_t test_vloxei8_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_i32m4_tu(maskedoff, base, bindex, vl); +vint32m4_t test_vloxei8_v_i32m4_tu(vint32m4_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_i32m4_tu(vd, rs1, rs2, vl); } -vint32m8_t test_vloxei8_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_i32m8_tu(maskedoff, base, bindex, vl); +vint32m8_t test_vloxei8_v_i32m8_tu(vint32m8_t vd, const int32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_i32m8_tu(vd, rs1, rs2, vl); } -vint64m1_t test_vloxei8_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_i64m1_tu(maskedoff, base, bindex, vl); +vint64m1_t test_vloxei8_v_i64m1_tu(vint64m1_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_i64m1_tu(vd, rs1, rs2, vl); } -vint64m2_t test_vloxei8_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_i64m2_tu(maskedoff, base, bindex, vl); +vint64m2_t test_vloxei8_v_i64m2_tu(vint64m2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_i64m2_tu(vd, rs1, rs2, vl); } -vint64m4_t test_vloxei8_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_i64m4_tu(maskedoff, base, bindex, vl); +vint64m4_t test_vloxei8_v_i64m4_tu(vint64m4_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_i64m4_tu(vd, rs1, rs2, vl); } -vint64m8_t test_vloxei8_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_i64m8_tu(maskedoff, base, bindex, vl); +vint64m8_t test_vloxei8_v_i64m8_tu(vint64m8_t vd, const int64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_i64m8_tu(vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei8_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_u8mf8_tu(maskedoff, base, bindex, vl); +vuint8mf8_t test_vloxei8_v_u8mf8_tu(vuint8mf8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_u8mf8_tu(vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei8_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_u8mf4_tu(maskedoff, base, bindex, vl); +vuint8mf4_t test_vloxei8_v_u8mf4_tu(vuint8mf4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_u8mf4_tu(vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei8_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_u8mf2_tu(maskedoff, base, bindex, vl); +vuint8mf2_t test_vloxei8_v_u8mf2_tu(vuint8mf2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_u8mf2_tu(vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei8_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_u8m1_tu(maskedoff, base, bindex, vl); +vuint8m1_t test_vloxei8_v_u8m1_tu(vuint8m1_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_u8m1_tu(vd, rs1, rs2, vl); } -vuint8m2_t test_vloxei8_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_u8m2_tu(maskedoff, base, bindex, vl); +vuint8m2_t test_vloxei8_v_u8m2_tu(vuint8m2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_u8m2_tu(vd, rs1, rs2, vl); } -vuint8m4_t test_vloxei8_v_u8m4_tu(vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_v_u8m4_tu(maskedoff, base, bindex, vl); +vuint8m4_t test_vloxei8_v_u8m4_tu(vuint8m4_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_v_u8m4_tu(vd, rs1, rs2, vl); } -vuint8m8_t test_vloxei8_v_u8m8_tu(vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vloxei8_v_u8m8_tu(maskedoff, base, bindex, vl); +vuint8m8_t test_vloxei8_v_u8m8_tu(vuint8m8_t vd, const uint8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vloxei8_v_u8m8_tu(vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei8_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_u16mf4_tu(maskedoff, base, bindex, vl); +vuint16mf4_t test_vloxei8_v_u16mf4_tu(vuint16mf4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_u16mf4_tu(vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei8_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_u16mf2_tu(maskedoff, base, bindex, vl); +vuint16mf2_t test_vloxei8_v_u16mf2_tu(vuint16mf2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_u16mf2_tu(vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei8_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_u16m1_tu(maskedoff, base, bindex, vl); +vuint16m1_t test_vloxei8_v_u16m1_tu(vuint16m1_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_u16m1_tu(vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei8_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_u16m2_tu(maskedoff, base, bindex, vl); +vuint16m2_t test_vloxei8_v_u16m2_tu(vuint16m2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_u16m2_tu(vd, rs1, rs2, vl); } -vuint16m4_t test_vloxei8_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_u16m4_tu(maskedoff, base, bindex, vl); +vuint16m4_t test_vloxei8_v_u16m4_tu(vuint16m4_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_u16m4_tu(vd, rs1, rs2, vl); } -vuint16m8_t test_vloxei8_v_u16m8_tu(vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_v_u16m8_tu(maskedoff, base, bindex, vl); +vuint16m8_t test_vloxei8_v_u16m8_tu(vuint16m8_t vd, const uint16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_v_u16m8_tu(vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei8_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_u32mf2_tu(maskedoff, base, bindex, vl); +vuint32mf2_t test_vloxei8_v_u32mf2_tu(vuint32mf2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_u32mf2_tu(vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei8_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_u32m1_tu(maskedoff, base, bindex, vl); +vuint32m1_t test_vloxei8_v_u32m1_tu(vuint32m1_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_u32m1_tu(vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei8_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_u32m2_tu(maskedoff, base, bindex, vl); +vuint32m2_t test_vloxei8_v_u32m2_tu(vuint32m2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_u32m2_tu(vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei8_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_u32m4_tu(maskedoff, base, bindex, vl); +vuint32m4_t test_vloxei8_v_u32m4_tu(vuint32m4_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_u32m4_tu(vd, rs1, rs2, vl); } -vuint32m8_t test_vloxei8_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_u32m8_tu(maskedoff, base, bindex, vl); +vuint32m8_t test_vloxei8_v_u32m8_tu(vuint32m8_t vd, const uint32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_u32m8_tu(vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei8_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_u64m1_tu(maskedoff, base, bindex, vl); +vuint64m1_t test_vloxei8_v_u64m1_tu(vuint64m1_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_u64m1_tu(vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei8_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_u64m2_tu(maskedoff, base, bindex, vl); +vuint64m2_t test_vloxei8_v_u64m2_tu(vuint64m2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_u64m2_tu(vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei8_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_u64m4_tu(maskedoff, base, bindex, vl); +vuint64m4_t test_vloxei8_v_u64m4_tu(vuint64m4_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_u64m4_tu(vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei8_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_u64m8_tu(maskedoff, base, bindex, vl); +vuint64m8_t test_vloxei8_v_u64m8_tu(vuint64m8_t vd, const uint64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_u64m8_tu(vd, rs1, rs2, vl); } -vfloat16mf4_t test_vloxei8_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_f16mf4_tum(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vloxei8_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_f16mf4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei8_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_f16mf2_tum(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vloxei8_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_f16mf2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei8_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_f16m1_tum(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vloxei8_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_f16m1_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei8_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_f16m2_tum(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vloxei8_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_f16m2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vloxei8_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_f16m4_tum(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vloxei8_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_f16m4_tum(vm, vd, rs1, rs2, vl); } -vfloat16m8_t test_vloxei8_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, const float16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_v_f16m8_tum(mask, maskedoff, base, bindex, vl); +vfloat16m8_t test_vloxei8_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, const float16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_v_f16m8_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei8_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_f32mf2_tum(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vloxei8_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_f32mf2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei8_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_f32m1_tum(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vloxei8_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_f32m1_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei8_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_f32m2_tum(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vloxei8_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_f32m2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei8_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_f32m4_tum(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vloxei8_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_f32m4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vloxei8_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_f32m8_tum(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vloxei8_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_f32m8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei8_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_f64m1_tum(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vloxei8_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_f64m1_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei8_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_f64m2_tum(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vloxei8_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_f64m2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei8_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_f64m4_tum(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vloxei8_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_f64m4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei8_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_f64m8_tum(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vloxei8_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_f64m8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei8_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_i8mf8_tum(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vloxei8_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_i8mf8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei8_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_i8mf4_tum(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vloxei8_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_i8mf4_tum(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei8_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_i8mf2_tum(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vloxei8_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_i8mf2_tum(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vloxei8_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_i8m1_tum(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vloxei8_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_i8m1_tum(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vloxei8_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_i8m2_tum(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vloxei8_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_i8m2_tum(vm, vd, rs1, rs2, vl); } -vint8m4_t test_vloxei8_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_v_i8m4_tum(mask, maskedoff, base, bindex, vl); +vint8m4_t test_vloxei8_v_i8m4_tum(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_v_i8m4_tum(vm, vd, rs1, rs2, vl); } -vint8m8_t test_vloxei8_v_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vloxei8_v_i8m8_tum(mask, maskedoff, base, bindex, vl); +vint8m8_t test_vloxei8_v_i8m8_tum(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vloxei8_v_i8m8_tum(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei8_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_i16mf4_tum(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vloxei8_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_i16mf4_tum(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei8_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_i16mf2_tum(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vloxei8_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_i16mf2_tum(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vloxei8_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_i16m1_tum(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vloxei8_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_i16m1_tum(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vloxei8_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_i16m2_tum(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vloxei8_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_i16m2_tum(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vloxei8_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_i16m4_tum(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vloxei8_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_i16m4_tum(vm, vd, rs1, rs2, vl); } -vint16m8_t test_vloxei8_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_v_i16m8_tum(mask, maskedoff, base, bindex, vl); +vint16m8_t test_vloxei8_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_v_i16m8_tum(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei8_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_i32mf2_tum(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vloxei8_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_i32mf2_tum(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vloxei8_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_i32m1_tum(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vloxei8_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_i32m1_tum(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vloxei8_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_i32m2_tum(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vloxei8_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_i32m2_tum(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vloxei8_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_i32m4_tum(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vloxei8_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_i32m4_tum(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vloxei8_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_i32m8_tum(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vloxei8_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_i32m8_tum(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vloxei8_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_i64m1_tum(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vloxei8_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_i64m1_tum(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vloxei8_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_i64m2_tum(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vloxei8_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_i64m2_tum(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vloxei8_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_i64m4_tum(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vloxei8_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_i64m4_tum(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vloxei8_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_i64m8_tum(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vloxei8_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_i64m8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei8_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_u8mf8_tum(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vloxei8_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_u8mf8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei8_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_u8mf4_tum(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vloxei8_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_u8mf4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei8_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_u8mf2_tum(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vloxei8_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_u8mf2_tum(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei8_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_u8m1_tum(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vloxei8_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_u8m1_tum(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vloxei8_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_u8m2_tum(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vloxei8_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_u8m2_tum(vm, vd, rs1, rs2, vl); } -vuint8m4_t test_vloxei8_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_v_u8m4_tum(mask, maskedoff, base, bindex, vl); +vuint8m4_t test_vloxei8_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_v_u8m4_tum(vm, vd, rs1, rs2, vl); } -vuint8m8_t test_vloxei8_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vloxei8_v_u8m8_tum(mask, maskedoff, base, bindex, vl); +vuint8m8_t test_vloxei8_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vloxei8_v_u8m8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei8_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_u16mf4_tum(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vloxei8_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_u16mf4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei8_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_u16mf2_tum(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vloxei8_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_u16mf2_tum(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei8_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_u16m1_tum(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vloxei8_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_u16m1_tum(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei8_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_u16m2_tum(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vloxei8_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_u16m2_tum(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vloxei8_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_u16m4_tum(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vloxei8_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_u16m4_tum(vm, vd, rs1, rs2, vl); } -vuint16m8_t test_vloxei8_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_v_u16m8_tum(mask, maskedoff, base, bindex, vl); +vuint16m8_t test_vloxei8_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_v_u16m8_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei8_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_u32mf2_tum(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vloxei8_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_u32mf2_tum(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei8_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_u32m1_tum(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vloxei8_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_u32m1_tum(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei8_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_u32m2_tum(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vloxei8_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_u32m2_tum(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei8_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_u32m4_tum(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vloxei8_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_u32m4_tum(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vloxei8_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_u32m8_tum(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vloxei8_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_u32m8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei8_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_u64m1_tum(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vloxei8_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_u64m1_tum(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei8_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_u64m2_tum(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vloxei8_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_u64m2_tum(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei8_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_u64m4_tum(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vloxei8_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_u64m4_tum(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei8_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_u64m8_tum(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vloxei8_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_u64m8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vloxei8_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_f16mf4_tumu(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vloxei8_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_f16mf4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei8_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_f16mf2_tumu(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vloxei8_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_f16mf2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei8_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_f16m1_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vloxei8_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_f16m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei8_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_f16m2_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vloxei8_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_f16m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vloxei8_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_f16m4_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vloxei8_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_f16m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m8_t test_vloxei8_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, const float16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_v_f16m8_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m8_t test_vloxei8_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, const float16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_v_f16m8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei8_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_f32mf2_tumu(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vloxei8_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_f32mf2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei8_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_f32m1_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vloxei8_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_f32m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei8_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_f32m2_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vloxei8_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_f32m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei8_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_f32m4_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vloxei8_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_f32m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vloxei8_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_f32m8_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vloxei8_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_f32m8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei8_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_f64m1_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vloxei8_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_f64m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei8_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_f64m2_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vloxei8_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_f64m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei8_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_f64m4_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vloxei8_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_f64m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei8_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_f64m8_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vloxei8_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_f64m8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei8_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_i8mf8_tumu(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vloxei8_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_i8mf8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei8_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_i8mf4_tumu(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vloxei8_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_i8mf4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei8_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_i8mf2_tumu(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vloxei8_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_i8mf2_tumu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vloxei8_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_i8m1_tumu(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vloxei8_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_i8m1_tumu(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vloxei8_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_i8m2_tumu(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vloxei8_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_i8m2_tumu(vm, vd, rs1, rs2, vl); } -vint8m4_t test_vloxei8_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_v_i8m4_tumu(mask, maskedoff, base, bindex, vl); +vint8m4_t test_vloxei8_v_i8m4_tumu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_v_i8m4_tumu(vm, vd, rs1, rs2, vl); } -vint8m8_t test_vloxei8_v_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vloxei8_v_i8m8_tumu(mask, maskedoff, base, bindex, vl); +vint8m8_t test_vloxei8_v_i8m8_tumu(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vloxei8_v_i8m8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei8_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_i16mf4_tumu(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vloxei8_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_i16mf4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei8_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_i16mf2_tumu(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vloxei8_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_i16mf2_tumu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vloxei8_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_i16m1_tumu(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vloxei8_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_i16m1_tumu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vloxei8_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_i16m2_tumu(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vloxei8_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_i16m2_tumu(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vloxei8_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_i16m4_tumu(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vloxei8_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_i16m4_tumu(vm, vd, rs1, rs2, vl); } -vint16m8_t test_vloxei8_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_v_i16m8_tumu(mask, maskedoff, base, bindex, vl); +vint16m8_t test_vloxei8_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_v_i16m8_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei8_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_i32mf2_tumu(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vloxei8_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_i32mf2_tumu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vloxei8_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_i32m1_tumu(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vloxei8_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_i32m1_tumu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vloxei8_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_i32m2_tumu(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vloxei8_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_i32m2_tumu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vloxei8_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_i32m4_tumu(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vloxei8_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_i32m4_tumu(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vloxei8_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_i32m8_tumu(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vloxei8_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_i32m8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vloxei8_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_i64m1_tumu(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vloxei8_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_i64m1_tumu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vloxei8_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_i64m2_tumu(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vloxei8_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_i64m2_tumu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vloxei8_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_i64m4_tumu(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vloxei8_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_i64m4_tumu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vloxei8_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_i64m8_tumu(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vloxei8_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_i64m8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_u8mf8_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vloxei8_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_u8mf8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei8_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_u8mf4_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vloxei8_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_u8mf4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei8_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_u8mf2_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vloxei8_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_u8mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_u8m1_tumu(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vloxei8_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_u8m1_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vloxei8_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_u8m2_tumu(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vloxei8_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_u8m2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m4_t test_vloxei8_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_v_u8m4_tumu(mask, maskedoff, base, bindex, vl); +vuint8m4_t test_vloxei8_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_v_u8m4_tumu(vm, vd, rs1, rs2, vl); } -vuint8m8_t test_vloxei8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vloxei8_v_u8m8_tumu(mask, maskedoff, base, bindex, vl); +vuint8m8_t test_vloxei8_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vloxei8_v_u8m8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei8_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_u16mf4_tumu(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vloxei8_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_u16mf4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei8_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_u16mf2_tumu(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vloxei8_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_u16mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei8_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_u16m1_tumu(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vloxei8_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_u16m1_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei8_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_u16m2_tumu(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vloxei8_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_u16m2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vloxei8_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_u16m4_tumu(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vloxei8_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_u16m4_tumu(vm, vd, rs1, rs2, vl); } -vuint16m8_t test_vloxei8_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_v_u16m8_tumu(mask, maskedoff, base, bindex, vl); +vuint16m8_t test_vloxei8_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_v_u16m8_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei8_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_u32mf2_tumu(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vloxei8_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_u32mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei8_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_u32m1_tumu(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vloxei8_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_u32m1_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei8_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_u32m2_tumu(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vloxei8_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_u32m2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei8_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_u32m4_tumu(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vloxei8_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_u32m4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vloxei8_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_u32m8_tumu(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vloxei8_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_u32m8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei8_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_u64m1_tumu(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vloxei8_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_u64m1_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei8_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_u64m2_tumu(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vloxei8_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_u64m2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei8_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_u64m4_tumu(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vloxei8_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_u64m4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei8_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_u64m8_tumu(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vloxei8_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_u64m8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vloxei8_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_f16mf4_mu(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vloxei8_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_f16mf4_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei8_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_f16mf2_mu(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vloxei8_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_f16mf2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei8_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_f16m1_mu(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vloxei8_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_f16m1_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei8_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_f16m2_mu(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vloxei8_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_f16m2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vloxei8_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_f16m4_mu(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vloxei8_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_f16m4_mu(vm, vd, rs1, rs2, vl); } -vfloat16m8_t test_vloxei8_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, const float16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_v_f16m8_mu(mask, maskedoff, base, bindex, vl); +vfloat16m8_t test_vloxei8_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, const float16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_v_f16m8_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei8_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_f32mf2_mu(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vloxei8_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_f32mf2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei8_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_f32m1_mu(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vloxei8_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_f32m1_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei8_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_f32m2_mu(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vloxei8_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_f32m2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei8_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_f32m4_mu(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vloxei8_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_f32m4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vloxei8_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_f32m8_mu(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vloxei8_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_f32m8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei8_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_f64m1_mu(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vloxei8_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_f64m1_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei8_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_f64m2_mu(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vloxei8_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_f64m2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei8_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_f64m4_mu(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vloxei8_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_f64m4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei8_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_f64m8_mu(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vloxei8_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_f64m8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei8_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_i8mf8_mu(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vloxei8_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_i8mf8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei8_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_i8mf4_mu(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vloxei8_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_i8mf4_mu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei8_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_i8mf2_mu(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vloxei8_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_i8mf2_mu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vloxei8_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_i8m1_mu(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vloxei8_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_i8m1_mu(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vloxei8_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_i8m2_mu(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vloxei8_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_i8m2_mu(vm, vd, rs1, rs2, vl); } -vint8m4_t test_vloxei8_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_v_i8m4_mu(mask, maskedoff, base, bindex, vl); +vint8m4_t test_vloxei8_v_i8m4_mu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_v_i8m4_mu(vm, vd, rs1, rs2, vl); } -vint8m8_t test_vloxei8_v_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vloxei8_v_i8m8_mu(mask, maskedoff, base, bindex, vl); +vint8m8_t test_vloxei8_v_i8m8_mu(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vloxei8_v_i8m8_mu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei8_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_i16mf4_mu(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vloxei8_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_i16mf4_mu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei8_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_i16mf2_mu(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vloxei8_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_i16mf2_mu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vloxei8_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_i16m1_mu(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vloxei8_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_i16m1_mu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vloxei8_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_i16m2_mu(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vloxei8_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_i16m2_mu(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vloxei8_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_i16m4_mu(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vloxei8_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_i16m4_mu(vm, vd, rs1, rs2, vl); } -vint16m8_t test_vloxei8_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_v_i16m8_mu(mask, maskedoff, base, bindex, vl); +vint16m8_t test_vloxei8_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_v_i16m8_mu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei8_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_i32mf2_mu(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vloxei8_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_i32mf2_mu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vloxei8_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_i32m1_mu(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vloxei8_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_i32m1_mu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vloxei8_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_i32m2_mu(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vloxei8_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_i32m2_mu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vloxei8_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_i32m4_mu(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vloxei8_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_i32m4_mu(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vloxei8_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_i32m8_mu(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vloxei8_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_i32m8_mu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vloxei8_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_i64m1_mu(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vloxei8_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_i64m1_mu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vloxei8_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_i64m2_mu(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vloxei8_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_i64m2_mu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vloxei8_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_i64m4_mu(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vloxei8_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_i64m4_mu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vloxei8_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_i64m8_mu(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vloxei8_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_i64m8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei8_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_u8mf8_mu(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vloxei8_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_u8mf8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei8_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_u8mf4_mu(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vloxei8_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_u8mf4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei8_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_u8mf2_mu(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vloxei8_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_u8mf2_mu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_u8m1_mu(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vloxei8_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_u8m1_mu(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vloxei8_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_u8m2_mu(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vloxei8_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_u8m2_mu(vm, vd, rs1, rs2, vl); } -vuint8m4_t test_vloxei8_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_v_u8m4_mu(mask, maskedoff, base, bindex, vl); +vuint8m4_t test_vloxei8_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_v_u8m4_mu(vm, vd, rs1, rs2, vl); } -vuint8m8_t test_vloxei8_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vloxei8_v_u8m8_mu(mask, maskedoff, base, bindex, vl); +vuint8m8_t test_vloxei8_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vloxei8_v_u8m8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei8_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_u16mf4_mu(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vloxei8_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_u16mf4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei8_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_u16mf2_mu(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vloxei8_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_u16mf2_mu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei8_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_u16m1_mu(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vloxei8_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_u16m1_mu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei8_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_u16m2_mu(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vloxei8_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_u16m2_mu(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vloxei8_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_u16m4_mu(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vloxei8_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_u16m4_mu(vm, vd, rs1, rs2, vl); } -vuint16m8_t test_vloxei8_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_v_u16m8_mu(mask, maskedoff, base, bindex, vl); +vuint16m8_t test_vloxei8_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_v_u16m8_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei8_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_u32mf2_mu(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vloxei8_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_u32mf2_mu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei8_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_u32m1_mu(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vloxei8_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_u32m1_mu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei8_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_u32m2_mu(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vloxei8_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_u32m2_mu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei8_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_u32m4_mu(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vloxei8_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_u32m4_mu(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vloxei8_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_v_u32m8_mu(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vloxei8_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_v_u32m8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei8_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_v_u64m1_mu(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vloxei8_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_v_u64m1_mu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei8_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_v_u64m2_mu(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vloxei8_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_v_u64m2_mu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei8_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_v_u64m4_mu(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vloxei8_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_v_u64m4_mu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei8_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_v_u64m8_mu(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vloxei8_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_v_u64m8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxei8\.[ivxfswum.]+\s+} 236 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vloxseg2ei16.c b/auto-generated/policy_funcs/gnu-api-tests/vloxseg2ei16.c index d68d0bbea..e33d81c26 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vloxseg2ei16.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vloxseg2ei16.c @@ -6,772 +6,772 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f16mf4x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_tu(vfloat16mf4x2_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f16mf4x2_tu(vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f16mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_tu(vfloat16mf2x2_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f16mf2x2_tu(vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f16m1x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_tu(vfloat16m1x2_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f16m1x2_tu(vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f16m2x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_tu(vfloat16m2x2_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f16m2x2_tu(vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f16m4x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_tu(vfloat16m4x2_t vd, const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f16m4x2_tu(vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f32mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_tu(vfloat32mf2x2_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f32mf2x2_tu(vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f32m1x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_tu(vfloat32m1x2_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f32m1x2_tu(vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f32m2x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_tu(vfloat32m2x2_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f32m2x2_tu(vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f32m4x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_tu(vfloat32m4x2_t vd, const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f32m4x2_tu(vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f64m1x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_tu(vfloat64m1x2_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f64m1x2_tu(vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f64m2x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_tu(vfloat64m2x2_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f64m2x2_tu(vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f64m4x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_tu(vfloat64m4x2_t vd, const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f64m4x2_tu(vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i8mf8x2_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_tu(vint8mf8x2_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i8mf8x2_tu(vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i8mf4x2_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_tu(vint8mf4x2_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i8mf4x2_tu(vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i8mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_tu(vint8mf2x2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i8mf2x2_tu(vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i8m1x2_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_tu(vint8m1x2_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i8m1x2_tu(vd, rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i8m2x2_tu(maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_tu(vint8m2x2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i8m2x2_tu(vd, rs1, rs2, vl); } -vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i8m4x2_tu(maskedoff_tuple, base, bindex, vl); +vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_tu(vint8m4x2_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i8m4x2_tu(vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i16mf4x2_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_tu(vint16mf4x2_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i16mf4x2_tu(vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i16mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_tu(vint16mf2x2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i16mf2x2_tu(vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i16m1x2_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_tu(vint16m1x2_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i16m1x2_tu(vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i16m2x2_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_tu(vint16m2x2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i16m2x2_tu(vd, rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i16m4x2_tu(maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_tu(vint16m4x2_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i16m4x2_tu(vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i32mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_tu(vint32mf2x2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i32mf2x2_tu(vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i32m1x2_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_tu(vint32m1x2_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i32m1x2_tu(vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i32m2x2_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_tu(vint32m2x2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i32m2x2_tu(vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i32m4x2_tu(maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_tu(vint32m4x2_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i32m4x2_tu(vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i64m1x2_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_tu(vint64m1x2_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i64m1x2_tu(vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i64m2x2_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_tu(vint64m2x2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i64m2x2_tu(vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i64m4x2_tu(maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_tu(vint64m4x2_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i64m4x2_tu(vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u8mf8x2_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_tu(vuint8mf8x2_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u8mf8x2_tu(vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u8mf4x2_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_tu(vuint8mf4x2_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u8mf4x2_tu(vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u8mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_tu(vuint8mf2x2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u8mf2x2_tu(vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u8m1x2_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_tu(vuint8m1x2_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u8m1x2_tu(vd, rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u8m2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_tu(vuint8m2x2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u8m2x2_tu(vd, rs1, rs2, vl); } -vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u8m4x2_tu(maskedoff_tuple, base, bindex, vl); +vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_tu(vuint8m4x2_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u8m4x2_tu(vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u16mf4x2_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_tu(vuint16mf4x2_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u16mf4x2_tu(vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u16mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_tu(vuint16mf2x2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u16mf2x2_tu(vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u16m1x2_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_tu(vuint16m1x2_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u16m1x2_tu(vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u16m2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_tu(vuint16m2x2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u16m2x2_tu(vd, rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u16m4x2_tu(maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_tu(vuint16m4x2_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u16m4x2_tu(vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u32mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_tu(vuint32mf2x2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u32mf2x2_tu(vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u32m1x2_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_tu(vuint32m1x2_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u32m1x2_tu(vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u32m2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_tu(vuint32m2x2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u32m2x2_tu(vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u32m4x2_tu(maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_tu(vuint32m4x2_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u32m4x2_tu(vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u64m1x2_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_tu(vuint64m1x2_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u64m1x2_tu(vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u64m2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_tu(vuint64m2x2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u64m2x2_tu(vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u64m4x2_tu(maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_tu(vuint64m4x2_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u64m4x2_tu(vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f16mf4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_tum(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f16mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_tum(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f16m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_tum(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f16m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f16m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_tum(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f16m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f16m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_tum(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f16m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f32mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_tum(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f32m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_tum(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f32m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f32m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_tum(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f32m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f32m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_tum(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f32m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f64m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_tum(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f64m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f64m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_tum(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f64m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f64m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_tum(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f64m4x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i8mf8x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_tum(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i8mf8x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i8mf4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_tum(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i8mf4x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i8mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_tum(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i8mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i8m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_tum(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i8m1x2_tum(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i8m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_tum(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i8m2x2_tum(vm, vd, rs1, rs2, vl); } -vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i8m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_tum(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i8m4x2_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i16mf4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_tum(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i16mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_tum(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i16m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_tum(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i16m1x2_tum(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i16m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_tum(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i16m2x2_tum(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i16m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_tum(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i16m4x2_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i32mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_tum(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i32m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_tum(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i32m1x2_tum(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i32m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_tum(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i32m2x2_tum(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i32m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_tum(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i32m4x2_tum(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i64m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_tum(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i64m1x2_tum(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i64m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_tum(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i64m2x2_tum(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i64m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_tum(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i64m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u8mf8x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_tum(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u8mf8x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u8mf4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_tum(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u8mf4x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u8mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_tum(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u8mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u8m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_tum(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u8m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u8m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_tum(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u8m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u8m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_tum(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u8m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u16mf4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_tum(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u16mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_tum(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u16m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_tum(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u16m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u16m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_tum(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u16m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u16m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_tum(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u16m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u32mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_tum(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u32m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_tum(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u32m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u32m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_tum(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u32m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u32m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_tum(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u32m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u64m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_tum(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u64m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u64m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_tum(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u64m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u64m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_tum(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u64m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f16mf4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_tumu(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f16mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_tumu(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f16m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_tumu(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f16m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_tumu(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f16m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_tumu(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f16m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f32mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_tumu(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f32m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_tumu(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f32m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_tumu(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f32m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_tumu(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f64m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_tumu(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f64m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_tumu(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f64m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_tumu(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i8mf8x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_tumu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i8mf8x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i8mf4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_tumu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i8mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i8mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_tumu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i8mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i8m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_tumu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i8m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i8m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_tumu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i8m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i8m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_tumu(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i8m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i16mf4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_tumu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i16mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_tumu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i16m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_tumu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i16m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_tumu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i16m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_tumu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i16m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i32mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_tumu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i32m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_tumu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i32m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_tumu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i32m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_tumu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i64m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_tumu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i64m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_tumu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i64m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_tumu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u8mf8x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_tumu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u8mf8x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u8mf4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_tumu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u8mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u8mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_tumu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u8mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u8m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_tumu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u8m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u8m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_tumu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u8m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u8m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_tumu(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u8m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u16mf4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_tumu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u16mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_tumu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u16m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_tumu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u16m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_tumu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u16m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_tumu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u16m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u32mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_tumu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u32m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_tumu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u32m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_tumu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u32m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_tumu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u64m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_tumu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u64m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_tumu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u64m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_tumu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f16mf4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_mu(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f16mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_mu(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f16m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_mu(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f16m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f16m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_mu(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f16m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f16m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_mu(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f16m4x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f32mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_mu(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f32m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_mu(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f32m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f32m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_mu(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f32m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f32m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_mu(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f32m4x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f64m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_mu(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f64m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f64m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_mu(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f64m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_f64m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_mu(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_f64m4x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i8mf8x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_mu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i8mf8x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i8mf4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_mu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i8mf4x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i8mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_mu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i8mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i8m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_mu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i8m1x2_mu(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i8m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_mu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i8m2x2_mu(vm, vd, rs1, rs2, vl); } -vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i8m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_mu(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i8m4x2_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i16mf4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_mu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i16mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_mu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i16m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_mu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i16m1x2_mu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i16m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_mu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i16m2x2_mu(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i16m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_mu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i16m4x2_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i32mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_mu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i32m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_mu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i32m1x2_mu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i32m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_mu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i32m2x2_mu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i32m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_mu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i32m4x2_mu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i64m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_mu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i64m1x2_mu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i64m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_mu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i64m2x2_mu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_i64m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_mu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_i64m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u8mf8x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_mu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u8mf8x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u8mf4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_mu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u8mf4x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u8mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_mu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u8mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u8m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_mu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u8m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u8m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_mu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u8m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u8m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_mu(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u8m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u16mf4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_mu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u16mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_mu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u16m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_mu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u16m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u16m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_mu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u16m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u16m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_mu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u16m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u32mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_mu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u32m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_mu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u32m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u32m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_mu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u32m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u32m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_mu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u32m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u64m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_mu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u64m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u64m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_mu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u64m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_v_u64m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_mu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_u64m4x2_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg2ei16\.[ivxfswum.]+\s+} 192 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vloxseg2ei32.c b/auto-generated/policy_funcs/gnu-api-tests/vloxseg2ei32.c index 4879f2e98..2125fdf71 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vloxseg2ei32.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vloxseg2ei32.c @@ -6,740 +6,740 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f16mf4x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_tu(vfloat16mf4x2_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f16mf4x2_tu(vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f16mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_tu(vfloat16mf2x2_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f16mf2x2_tu(vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f16m1x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_tu(vfloat16m1x2_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f16m1x2_tu(vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f16m2x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_tu(vfloat16m2x2_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f16m2x2_tu(vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f16m4x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_tu(vfloat16m4x2_t vd, const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f16m4x2_tu(vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f32mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_tu(vfloat32mf2x2_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f32mf2x2_tu(vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f32m1x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_tu(vfloat32m1x2_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f32m1x2_tu(vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f32m2x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_tu(vfloat32m2x2_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f32m2x2_tu(vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f32m4x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_tu(vfloat32m4x2_t vd, const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f32m4x2_tu(vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f64m1x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_tu(vfloat64m1x2_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f64m1x2_tu(vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f64m2x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_tu(vfloat64m2x2_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f64m2x2_tu(vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f64m4x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_tu(vfloat64m4x2_t vd, const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f64m4x2_tu(vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i8mf8x2_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_tu(vint8mf8x2_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i8mf8x2_tu(vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i8mf4x2_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_tu(vint8mf4x2_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i8mf4x2_tu(vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i8mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_tu(vint8mf2x2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i8mf2x2_tu(vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i8m1x2_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_tu(vint8m1x2_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i8m1x2_tu(vd, rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i8m2x2_tu(maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_tu(vint8m2x2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i8m2x2_tu(vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i16mf4x2_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_tu(vint16mf4x2_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i16mf4x2_tu(vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i16mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_tu(vint16mf2x2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i16mf2x2_tu(vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i16m1x2_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_tu(vint16m1x2_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i16m1x2_tu(vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i16m2x2_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_tu(vint16m2x2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i16m2x2_tu(vd, rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i16m4x2_tu(maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_tu(vint16m4x2_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i16m4x2_tu(vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i32mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_tu(vint32mf2x2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i32mf2x2_tu(vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i32m1x2_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_tu(vint32m1x2_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i32m1x2_tu(vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i32m2x2_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_tu(vint32m2x2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i32m2x2_tu(vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i32m4x2_tu(maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_tu(vint32m4x2_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i32m4x2_tu(vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i64m1x2_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_tu(vint64m1x2_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i64m1x2_tu(vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i64m2x2_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_tu(vint64m2x2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i64m2x2_tu(vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i64m4x2_tu(maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_tu(vint64m4x2_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i64m4x2_tu(vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u8mf8x2_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_tu(vuint8mf8x2_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u8mf8x2_tu(vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u8mf4x2_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_tu(vuint8mf4x2_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u8mf4x2_tu(vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u8mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_tu(vuint8mf2x2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u8mf2x2_tu(vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u8m1x2_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_tu(vuint8m1x2_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u8m1x2_tu(vd, rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u8m2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_tu(vuint8m2x2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u8m2x2_tu(vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u16mf4x2_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_tu(vuint16mf4x2_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u16mf4x2_tu(vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u16mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_tu(vuint16mf2x2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u16mf2x2_tu(vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u16m1x2_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_tu(vuint16m1x2_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u16m1x2_tu(vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u16m2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_tu(vuint16m2x2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u16m2x2_tu(vd, rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u16m4x2_tu(maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_tu(vuint16m4x2_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u16m4x2_tu(vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u32mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_tu(vuint32mf2x2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u32mf2x2_tu(vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u32m1x2_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_tu(vuint32m1x2_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u32m1x2_tu(vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u32m2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_tu(vuint32m2x2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u32m2x2_tu(vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u32m4x2_tu(maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_tu(vuint32m4x2_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u32m4x2_tu(vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u64m1x2_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_tu(vuint64m1x2_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u64m1x2_tu(vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u64m2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_tu(vuint64m2x2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u64m2x2_tu(vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u64m4x2_tu(maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_tu(vuint64m4x2_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u64m4x2_tu(vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f16mf4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_tum(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f16mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_tum(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f16m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_tum(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f16m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f16m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_tum(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f16m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f16m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_tum(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f16m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f32mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_tum(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f32m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_tum(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f32m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f32m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_tum(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f32m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f32m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_tum(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f32m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f64m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_tum(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f64m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f64m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_tum(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f64m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f64m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_tum(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f64m4x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i8mf8x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_tum(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i8mf8x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i8mf4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_tum(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i8mf4x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i8mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_tum(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i8mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i8m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_tum(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i8m1x2_tum(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i8m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_tum(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i8m2x2_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i16mf4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_tum(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i16mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_tum(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i16m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_tum(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i16m1x2_tum(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i16m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_tum(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i16m2x2_tum(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i16m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_tum(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i16m4x2_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i32mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_tum(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i32m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_tum(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i32m1x2_tum(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i32m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_tum(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i32m2x2_tum(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i32m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_tum(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i32m4x2_tum(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i64m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_tum(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i64m1x2_tum(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i64m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_tum(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i64m2x2_tum(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i64m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_tum(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i64m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u8mf8x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_tum(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u8mf8x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u8mf4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_tum(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u8mf4x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u8mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_tum(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u8mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u8m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_tum(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u8m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u8m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_tum(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u8m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u16mf4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_tum(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u16mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_tum(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u16m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_tum(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u16m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u16m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_tum(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u16m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u16m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_tum(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u16m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u32mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_tum(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u32m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_tum(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u32m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u32m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_tum(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u32m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u32m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_tum(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u32m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u64m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_tum(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u64m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u64m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_tum(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u64m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u64m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_tum(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u64m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f16mf4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_tumu(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f16mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_tumu(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f16m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_tumu(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f16m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_tumu(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f16m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_tumu(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f16m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f32mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_tumu(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f32m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_tumu(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f32m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_tumu(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f32m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_tumu(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f64m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_tumu(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f64m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_tumu(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f64m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_tumu(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i8mf8x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_tumu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i8mf8x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i8mf4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_tumu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i8mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i8mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_tumu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i8mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i8m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_tumu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i8m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i8m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_tumu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i8m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i16mf4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_tumu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i16mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_tumu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i16m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_tumu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i16m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_tumu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i16m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_tumu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i16m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i32mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_tumu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i32m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_tumu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i32m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_tumu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i32m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_tumu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i64m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_tumu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i64m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_tumu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i64m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_tumu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u8mf8x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_tumu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u8mf8x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u8mf4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_tumu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u8mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u8mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_tumu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u8mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u8m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_tumu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u8m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u8m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_tumu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u8m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u16mf4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_tumu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u16mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_tumu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u16m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_tumu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u16m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_tumu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u16m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_tumu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u16m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u32mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_tumu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u32m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_tumu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u32m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_tumu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u32m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_tumu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u64m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_tumu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u64m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_tumu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u64m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_tumu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f16mf4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_mu(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f16mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_mu(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f16m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_mu(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f16m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f16m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_mu(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f16m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f16m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_mu(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f16m4x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f32mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_mu(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f32m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_mu(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f32m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f32m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_mu(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f32m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f32m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_mu(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f32m4x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f64m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_mu(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f64m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f64m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_mu(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f64m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_f64m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_mu(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_f64m4x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i8mf8x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_mu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i8mf8x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i8mf4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_mu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i8mf4x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i8mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_mu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i8mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i8m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_mu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i8m1x2_mu(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i8m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_mu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i8m2x2_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i16mf4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_mu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i16mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_mu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i16m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_mu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i16m1x2_mu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i16m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_mu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i16m2x2_mu(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i16m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_mu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i16m4x2_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i32mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_mu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i32m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_mu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i32m1x2_mu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i32m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_mu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i32m2x2_mu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i32m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_mu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i32m4x2_mu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i64m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_mu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i64m1x2_mu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i64m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_mu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i64m2x2_mu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_i64m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_mu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_i64m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u8mf8x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_mu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u8mf8x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u8mf4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_mu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u8mf4x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u8mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_mu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u8mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u8m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_mu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u8m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u8m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_mu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u8m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u16mf4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_mu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u16mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_mu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u16m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_mu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u16m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u16m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_mu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u16m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u16m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_mu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u16m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u32mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_mu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u32m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_mu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u32m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u32m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_mu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u32m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u32m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_mu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u32m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u64m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_mu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u64m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u64m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_mu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u64m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_v_u64m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_mu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_v_u64m4x2_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg2ei32\.[ivxfswum.]+\s+} 184 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vloxseg2ei64.c b/auto-generated/policy_funcs/gnu-api-tests/vloxseg2ei64.c index 1466c58d9..7f52aa19e 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vloxseg2ei64.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vloxseg2ei64.c @@ -6,660 +6,660 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f16mf4x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_tu(vfloat16mf4x2_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f16mf4x2_tu(vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f16mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_tu(vfloat16mf2x2_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f16mf2x2_tu(vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f16m1x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_tu(vfloat16m1x2_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f16m1x2_tu(vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f16m2x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_tu(vfloat16m2x2_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f16m2x2_tu(vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f32mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_tu(vfloat32mf2x2_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f32mf2x2_tu(vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f32m1x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_tu(vfloat32m1x2_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f32m1x2_tu(vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f32m2x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_tu(vfloat32m2x2_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f32m2x2_tu(vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f32m4x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_tu(vfloat32m4x2_t vd, const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f32m4x2_tu(vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f64m1x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_tu(vfloat64m1x2_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f64m1x2_tu(vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f64m2x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_tu(vfloat64m2x2_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f64m2x2_tu(vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f64m4x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_tu(vfloat64m4x2_t vd, const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f64m4x2_tu(vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i8mf8x2_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_tu(vint8mf8x2_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i8mf8x2_tu(vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i8mf4x2_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_tu(vint8mf4x2_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i8mf4x2_tu(vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i8mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_tu(vint8mf2x2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i8mf2x2_tu(vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i8m1x2_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_tu(vint8m1x2_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i8m1x2_tu(vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i16mf4x2_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_tu(vint16mf4x2_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i16mf4x2_tu(vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i16mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_tu(vint16mf2x2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i16mf2x2_tu(vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i16m1x2_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_tu(vint16m1x2_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i16m1x2_tu(vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i16m2x2_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_tu(vint16m2x2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i16m2x2_tu(vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i32mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_tu(vint32mf2x2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i32mf2x2_tu(vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i32m1x2_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_tu(vint32m1x2_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i32m1x2_tu(vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i32m2x2_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_tu(vint32m2x2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i32m2x2_tu(vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i32m4x2_tu(maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_tu(vint32m4x2_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i32m4x2_tu(vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i64m1x2_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_tu(vint64m1x2_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i64m1x2_tu(vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i64m2x2_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_tu(vint64m2x2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i64m2x2_tu(vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i64m4x2_tu(maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_tu(vint64m4x2_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i64m4x2_tu(vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u8mf8x2_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_tu(vuint8mf8x2_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u8mf8x2_tu(vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u8mf4x2_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_tu(vuint8mf4x2_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u8mf4x2_tu(vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u8mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_tu(vuint8mf2x2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u8mf2x2_tu(vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u8m1x2_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_tu(vuint8m1x2_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u8m1x2_tu(vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u16mf4x2_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_tu(vuint16mf4x2_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u16mf4x2_tu(vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u16mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_tu(vuint16mf2x2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u16mf2x2_tu(vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u16m1x2_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_tu(vuint16m1x2_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u16m1x2_tu(vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u16m2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_tu(vuint16m2x2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u16m2x2_tu(vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u32mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_tu(vuint32mf2x2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u32mf2x2_tu(vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u32m1x2_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_tu(vuint32m1x2_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u32m1x2_tu(vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u32m2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_tu(vuint32m2x2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u32m2x2_tu(vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u32m4x2_tu(maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_tu(vuint32m4x2_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u32m4x2_tu(vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u64m1x2_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_tu(vuint64m1x2_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u64m1x2_tu(vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u64m2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_tu(vuint64m2x2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u64m2x2_tu(vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u64m4x2_tu(maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_tu(vuint64m4x2_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u64m4x2_tu(vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f16mf4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_tum(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f16mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_tum(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f16m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_tum(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f16m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f16m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_tum(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f16m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f32mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_tum(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f32m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_tum(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f32m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f32m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_tum(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f32m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f32m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_tum(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f32m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f64m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_tum(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f64m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f64m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_tum(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f64m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f64m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_tum(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f64m4x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i8mf8x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_tum(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i8mf8x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i8mf4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_tum(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i8mf4x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i8mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_tum(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i8mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i8m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_tum(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i8m1x2_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i16mf4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_tum(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i16mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_tum(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i16m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_tum(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i16m1x2_tum(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i16m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_tum(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i16m2x2_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i32mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_tum(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i32m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_tum(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i32m1x2_tum(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i32m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_tum(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i32m2x2_tum(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i32m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_tum(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i32m4x2_tum(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i64m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_tum(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i64m1x2_tum(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i64m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_tum(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i64m2x2_tum(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i64m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_tum(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i64m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u8mf8x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_tum(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u8mf8x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u8mf4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_tum(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u8mf4x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u8mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_tum(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u8mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u8m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_tum(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u8m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u16mf4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_tum(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u16mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_tum(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u16m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_tum(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u16m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u16m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_tum(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u16m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u32mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_tum(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u32m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_tum(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u32m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u32m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_tum(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u32m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u32m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_tum(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u32m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u64m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_tum(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u64m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u64m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_tum(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u64m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u64m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_tum(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u64m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f16mf4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_tumu(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f16mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_tumu(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f16m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_tumu(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f16m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_tumu(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f32mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_tumu(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f32m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_tumu(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f32m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_tumu(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f32m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_tumu(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f64m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_tumu(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f64m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_tumu(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f64m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_tumu(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i8mf8x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_tumu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i8mf8x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i8mf4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_tumu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i8mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i8mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_tumu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i8mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i8m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_tumu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i8m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i16mf4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_tumu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i16mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_tumu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i16m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_tumu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i16m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_tumu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i32mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_tumu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i32m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_tumu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i32m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_tumu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i32m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_tumu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i64m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_tumu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i64m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_tumu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i64m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_tumu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u8mf8x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_tumu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u8mf8x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u8mf4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_tumu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u8mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u8mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_tumu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u8mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u8m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_tumu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u8m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u16mf4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_tumu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u16mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_tumu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u16m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_tumu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u16m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_tumu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u32mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_tumu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u32m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_tumu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u32m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_tumu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u32m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_tumu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u64m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_tumu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u64m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_tumu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u64m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_tumu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f16mf4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_mu(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f16mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_mu(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f16m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_mu(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f16m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f16m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_mu(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f16m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f32mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_mu(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f32m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_mu(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f32m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f32m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_mu(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f32m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f32m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_mu(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f32m4x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f64m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_mu(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f64m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f64m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_mu(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f64m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_f64m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_mu(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_f64m4x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i8mf8x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_mu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i8mf8x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i8mf4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_mu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i8mf4x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i8mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_mu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i8mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i8m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_mu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i8m1x2_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i16mf4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_mu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i16mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_mu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i16m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_mu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i16m1x2_mu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i16m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_mu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i16m2x2_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i32mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_mu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i32m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_mu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i32m1x2_mu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i32m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_mu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i32m2x2_mu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i32m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_mu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i32m4x2_mu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i64m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_mu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i64m1x2_mu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i64m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_mu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i64m2x2_mu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_i64m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_mu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_i64m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u8mf8x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_mu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u8mf8x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u8mf4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_mu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u8mf4x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u8mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_mu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u8mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u8m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_mu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u8m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u16mf4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_mu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u16mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_mu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u16m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_mu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u16m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u16m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_mu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u16m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u32mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_mu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u32m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_mu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u32m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u32m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_mu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u32m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u32m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_mu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u32m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u64m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_mu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u64m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u64m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_mu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u64m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_v_u64m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_mu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_v_u64m4x2_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg2ei64\.[ivxfswum.]+\s+} 164 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vloxseg2ei8.c b/auto-generated/policy_funcs/gnu-api-tests/vloxseg2ei8.c index 6ba4dca7b..605f202b8 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vloxseg2ei8.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vloxseg2ei8.c @@ -6,772 +6,772 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f16mf4x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_tu(vfloat16mf4x2_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f16mf4x2_tu(vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f16mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_tu(vfloat16mf2x2_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f16mf2x2_tu(vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f16m1x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_tu(vfloat16m1x2_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f16m1x2_tu(vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f16m2x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_tu(vfloat16m2x2_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f16m2x2_tu(vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f16m4x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_tu(vfloat16m4x2_t vd, const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f16m4x2_tu(vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f32mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_tu(vfloat32mf2x2_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f32mf2x2_tu(vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f32m1x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_tu(vfloat32m1x2_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f32m1x2_tu(vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f32m2x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_tu(vfloat32m2x2_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f32m2x2_tu(vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f32m4x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_tu(vfloat32m4x2_t vd, const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f32m4x2_tu(vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f64m1x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_tu(vfloat64m1x2_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f64m1x2_tu(vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f64m2x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_tu(vfloat64m2x2_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f64m2x2_tu(vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f64m4x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_tu(vfloat64m4x2_t vd, const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f64m4x2_tu(vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i8mf8x2_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_tu(vint8mf8x2_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i8mf8x2_tu(vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i8mf4x2_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_tu(vint8mf4x2_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i8mf4x2_tu(vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i8mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_tu(vint8mf2x2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i8mf2x2_tu(vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i8m1x2_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_tu(vint8m1x2_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i8m1x2_tu(vd, rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i8m2x2_tu(maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_tu(vint8m2x2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i8m2x2_tu(vd, rs1, rs2, vl); } -vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i8m4x2_tu(maskedoff_tuple, base, bindex, vl); +vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_tu(vint8m4x2_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i8m4x2_tu(vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i16mf4x2_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_tu(vint16mf4x2_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i16mf4x2_tu(vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i16mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_tu(vint16mf2x2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i16mf2x2_tu(vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i16m1x2_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_tu(vint16m1x2_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i16m1x2_tu(vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i16m2x2_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_tu(vint16m2x2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i16m2x2_tu(vd, rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i16m4x2_tu(maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_tu(vint16m4x2_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i16m4x2_tu(vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i32mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_tu(vint32mf2x2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i32mf2x2_tu(vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i32m1x2_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_tu(vint32m1x2_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i32m1x2_tu(vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i32m2x2_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_tu(vint32m2x2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i32m2x2_tu(vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i32m4x2_tu(maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_tu(vint32m4x2_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i32m4x2_tu(vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i64m1x2_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_tu(vint64m1x2_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i64m1x2_tu(vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i64m2x2_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_tu(vint64m2x2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i64m2x2_tu(vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i64m4x2_tu(maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_tu(vint64m4x2_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i64m4x2_tu(vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u8mf8x2_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_tu(vuint8mf8x2_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u8mf8x2_tu(vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u8mf4x2_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_tu(vuint8mf4x2_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u8mf4x2_tu(vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u8mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_tu(vuint8mf2x2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u8mf2x2_tu(vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u8m1x2_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_tu(vuint8m1x2_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u8m1x2_tu(vd, rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u8m2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_tu(vuint8m2x2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u8m2x2_tu(vd, rs1, rs2, vl); } -vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u8m4x2_tu(maskedoff_tuple, base, bindex, vl); +vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_tu(vuint8m4x2_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u8m4x2_tu(vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u16mf4x2_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_tu(vuint16mf4x2_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u16mf4x2_tu(vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u16mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_tu(vuint16mf2x2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u16mf2x2_tu(vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u16m1x2_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_tu(vuint16m1x2_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u16m1x2_tu(vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u16m2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_tu(vuint16m2x2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u16m2x2_tu(vd, rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u16m4x2_tu(maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_tu(vuint16m4x2_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u16m4x2_tu(vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u32mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_tu(vuint32mf2x2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u32mf2x2_tu(vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u32m1x2_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_tu(vuint32m1x2_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u32m1x2_tu(vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u32m2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_tu(vuint32m2x2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u32m2x2_tu(vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u32m4x2_tu(maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_tu(vuint32m4x2_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u32m4x2_tu(vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u64m1x2_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_tu(vuint64m1x2_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u64m1x2_tu(vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u64m2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_tu(vuint64m2x2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u64m2x2_tu(vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u64m4x2_tu(maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_tu(vuint64m4x2_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u64m4x2_tu(vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f16mf4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_tum(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f16mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_tum(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f16m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_tum(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f16m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f16m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_tum(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f16m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f16m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_tum(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f16m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f32mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_tum(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f32m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_tum(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f32m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f32m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_tum(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f32m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f32m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_tum(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f32m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f64m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_tum(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f64m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f64m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_tum(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f64m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f64m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_tum(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f64m4x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i8mf8x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_tum(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i8mf8x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i8mf4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_tum(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i8mf4x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i8mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_tum(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i8mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i8m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_tum(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i8m1x2_tum(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i8m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_tum(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i8m2x2_tum(vm, vd, rs1, rs2, vl); } -vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i8m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_tum(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i8m4x2_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i16mf4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_tum(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i16mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_tum(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i16m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_tum(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i16m1x2_tum(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i16m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_tum(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i16m2x2_tum(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i16m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_tum(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i16m4x2_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i32mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_tum(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i32m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_tum(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i32m1x2_tum(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i32m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_tum(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i32m2x2_tum(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i32m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_tum(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i32m4x2_tum(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i64m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_tum(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i64m1x2_tum(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i64m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_tum(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i64m2x2_tum(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i64m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_tum(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i64m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u8mf8x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_tum(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u8mf8x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u8mf4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_tum(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u8mf4x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u8mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_tum(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u8mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u8m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_tum(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u8m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u8m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_tum(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u8m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u8m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_tum(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u8m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u16mf4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_tum(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u16mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_tum(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u16m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_tum(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u16m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u16m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_tum(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u16m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u16m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_tum(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u16m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u32mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_tum(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u32m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_tum(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u32m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u32m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_tum(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u32m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u32m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_tum(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u32m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u64m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_tum(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u64m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u64m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_tum(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u64m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u64m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_tum(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u64m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f16mf4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_tumu(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f16mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_tumu(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f16m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_tumu(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f16m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_tumu(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f16m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_tumu(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f16m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f32mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_tumu(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f32m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_tumu(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f32m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_tumu(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f32m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_tumu(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f64m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_tumu(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f64m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_tumu(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f64m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_tumu(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i8mf8x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_tumu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i8mf8x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i8mf4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_tumu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i8mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i8mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_tumu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i8mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i8m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_tumu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i8m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i8m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_tumu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i8m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i8m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_tumu(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i8m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i16mf4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_tumu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i16mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_tumu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i16m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_tumu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i16m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_tumu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i16m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_tumu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i16m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i32mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_tumu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i32m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_tumu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i32m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_tumu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i32m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_tumu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i64m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_tumu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i64m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_tumu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i64m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_tumu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u8mf8x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_tumu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u8mf8x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u8mf4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_tumu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u8mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u8mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_tumu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u8mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u8m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_tumu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u8m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u8m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_tumu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u8m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u8m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_tumu(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u8m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u16mf4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_tumu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u16mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_tumu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u16m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_tumu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u16m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_tumu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u16m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_tumu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u16m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u32mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_tumu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u32m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_tumu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u32m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_tumu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u32m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_tumu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u64m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_tumu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u64m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_tumu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u64m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_tumu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f16mf4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_mu(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f16mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_mu(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f16m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_mu(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f16m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f16m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_mu(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f16m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f16m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_mu(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f16m4x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f32mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_mu(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f32m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_mu(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f32m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f32m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_mu(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f32m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f32m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_mu(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f32m4x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f64m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_mu(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f64m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f64m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_mu(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f64m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_f64m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_mu(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_f64m4x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i8mf8x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_mu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i8mf8x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i8mf4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_mu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i8mf4x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i8mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_mu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i8mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i8m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_mu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i8m1x2_mu(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i8m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_mu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i8m2x2_mu(vm, vd, rs1, rs2, vl); } -vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i8m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_mu(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i8m4x2_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i16mf4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_mu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i16mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_mu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i16m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_mu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i16m1x2_mu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i16m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_mu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i16m2x2_mu(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i16m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_mu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i16m4x2_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i32mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_mu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i32m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_mu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i32m1x2_mu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i32m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_mu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i32m2x2_mu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i32m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_mu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i32m4x2_mu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i64m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_mu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i64m1x2_mu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i64m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_mu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i64m2x2_mu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_i64m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_mu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_i64m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u8mf8x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_mu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u8mf8x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u8mf4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_mu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u8mf4x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u8mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_mu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u8mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u8m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_mu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u8m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u8m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_mu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u8m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u8m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_mu(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u8m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u16mf4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_mu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u16mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_mu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u16m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_mu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u16m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u16m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_mu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u16m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u16m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_mu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u16m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u32mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_mu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u32m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_mu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u32m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u32m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_mu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u32m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u32m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_mu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u32m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u64m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_mu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u64m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u64m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_mu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u64m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_v_u64m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_mu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_v_u64m4x2_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg2ei8\.[ivxfswum.]+\s+} 192 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vloxseg3ei16.c b/auto-generated/policy_funcs/gnu-api-tests/vloxseg3ei16.c index ab6a12598..671ba6dd2 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vloxseg3ei16.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vloxseg3ei16.c @@ -6,596 +6,596 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f16mf4x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_tu(vfloat16mf4x3_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f16mf4x3_tu(vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f16mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_tu(vfloat16mf2x3_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f16mf2x3_tu(vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f16m1x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_tu(vfloat16m1x3_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f16m1x3_tu(vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f16m2x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_tu(vfloat16m2x3_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f16m2x3_tu(vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f32mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_tu(vfloat32mf2x3_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f32mf2x3_tu(vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f32m1x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_tu(vfloat32m1x3_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f32m1x3_tu(vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f32m2x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_tu(vfloat32m2x3_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f32m2x3_tu(vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f64m1x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_tu(vfloat64m1x3_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f64m1x3_tu(vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f64m2x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_tu(vfloat64m2x3_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f64m2x3_tu(vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i8mf8x3_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_tu(vint8mf8x3_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i8mf8x3_tu(vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i8mf4x3_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_tu(vint8mf4x3_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i8mf4x3_tu(vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i8mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_tu(vint8mf2x3_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i8mf2x3_tu(vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i8m1x3_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_tu(vint8m1x3_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i8m1x3_tu(vd, rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i8m2x3_tu(maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_tu(vint8m2x3_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i8m2x3_tu(vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i16mf4x3_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_tu(vint16mf4x3_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i16mf4x3_tu(vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i16mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_tu(vint16mf2x3_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i16mf2x3_tu(vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i16m1x3_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_tu(vint16m1x3_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i16m1x3_tu(vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i16m2x3_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_tu(vint16m2x3_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i16m2x3_tu(vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i32mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_tu(vint32mf2x3_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i32mf2x3_tu(vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i32m1x3_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_tu(vint32m1x3_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i32m1x3_tu(vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i32m2x3_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_tu(vint32m2x3_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i32m2x3_tu(vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i64m1x3_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_tu(vint64m1x3_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i64m1x3_tu(vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i64m2x3_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_tu(vint64m2x3_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i64m2x3_tu(vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u8mf8x3_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_tu(vuint8mf8x3_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u8mf8x3_tu(vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u8mf4x3_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_tu(vuint8mf4x3_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u8mf4x3_tu(vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u8mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_tu(vuint8mf2x3_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u8mf2x3_tu(vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u8m1x3_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_tu(vuint8m1x3_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u8m1x3_tu(vd, rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u8m2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_tu(vuint8m2x3_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u8m2x3_tu(vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u16mf4x3_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_tu(vuint16mf4x3_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u16mf4x3_tu(vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u16mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_tu(vuint16mf2x3_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u16mf2x3_tu(vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u16m1x3_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_tu(vuint16m1x3_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u16m1x3_tu(vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u16m2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_tu(vuint16m2x3_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u16m2x3_tu(vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u32mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_tu(vuint32mf2x3_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u32mf2x3_tu(vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u32m1x3_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_tu(vuint32m1x3_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u32m1x3_tu(vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u32m2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_tu(vuint32m2x3_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u32m2x3_tu(vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u64m1x3_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_tu(vuint64m1x3_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u64m1x3_tu(vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u64m2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_tu(vuint64m2x3_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u64m2x3_tu(vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f16mf4x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_tum(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f16mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_tum(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f16m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_tum(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f16m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f16m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_tum(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f16m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f32mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_tum(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f32m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_tum(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f32m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f32m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_tum(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f32m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f64m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_tum(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f64m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f64m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_tum(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f64m2x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i8mf8x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_tum(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i8mf8x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i8mf4x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_tum(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i8mf4x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i8mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_tum(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i8mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i8m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_tum(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i8m1x3_tum(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i8m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_tum(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i8m2x3_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i16mf4x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_tum(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i16mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_tum(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i16m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_tum(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i16m1x3_tum(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i16m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_tum(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i16m2x3_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i32mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_tum(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i32m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_tum(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i32m1x3_tum(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i32m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_tum(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i32m2x3_tum(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i64m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_tum(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i64m1x3_tum(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i64m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_tum(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i64m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u8mf8x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_tum(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u8mf8x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u8mf4x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_tum(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u8mf4x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u8mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_tum(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u8mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u8m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_tum(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u8m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u8m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_tum(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u8m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u16mf4x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_tum(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u16mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_tum(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u16m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_tum(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u16m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u16m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_tum(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u16m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u32mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_tum(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u32m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_tum(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u32m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u32m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_tum(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u32m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u64m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_tum(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u64m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u64m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_tum(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u64m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f16mf4x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_tumu(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f16mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_tumu(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f16m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_tumu(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f16m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_tumu(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f32mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_tumu(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f32m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_tumu(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f32m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_tumu(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f64m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_tumu(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f64m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_tumu(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i8mf8x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_tumu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i8mf8x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i8mf4x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_tumu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i8mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i8mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_tumu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i8mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i8m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_tumu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i8m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i8m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_tumu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i8m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i16mf4x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_tumu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i16mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_tumu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i16m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_tumu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i16m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_tumu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i32mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_tumu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i32m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_tumu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i32m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_tumu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i64m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_tumu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i64m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_tumu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u8mf8x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_tumu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u8mf8x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u8mf4x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_tumu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u8mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u8mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_tumu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u8mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u8m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_tumu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u8m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u8m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_tumu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u8m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u16mf4x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_tumu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u16mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_tumu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u16m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_tumu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u16m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_tumu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u32mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_tumu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u32m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_tumu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u32m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_tumu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u64m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_tumu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u64m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_tumu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f16mf4x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_mu(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f16mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_mu(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f16m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_mu(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f16m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f16m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_mu(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f16m2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f32mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_mu(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f32m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_mu(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f32m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f32m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_mu(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f32m2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f64m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_mu(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f64m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_f64m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_mu(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_f64m2x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i8mf8x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_mu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i8mf8x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i8mf4x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_mu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i8mf4x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i8mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_mu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i8mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i8m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_mu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i8m1x3_mu(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i8m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_mu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i8m2x3_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i16mf4x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_mu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i16mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_mu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i16m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_mu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i16m1x3_mu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i16m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_mu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i16m2x3_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i32mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_mu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i32m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_mu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i32m1x3_mu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i32m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_mu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i32m2x3_mu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i64m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_mu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i64m1x3_mu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_i64m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_mu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_i64m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u8mf8x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_mu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u8mf8x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u8mf4x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_mu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u8mf4x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u8mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_mu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u8mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u8m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_mu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u8m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u8m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_mu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u8m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u16mf4x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_mu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u16mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_mu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u16m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_mu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u16m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u16m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_mu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u16m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u32mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_mu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u32m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_mu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u32m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u32m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_mu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u32m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u64m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_mu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u64m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_v_u64m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_mu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_u64m2x3_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg3ei16\.[ivxfswum.]+\s+} 148 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vloxseg3ei32.c b/auto-generated/policy_funcs/gnu-api-tests/vloxseg3ei32.c index 61bb71472..106a3ad56 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vloxseg3ei32.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vloxseg3ei32.c @@ -6,596 +6,596 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f16mf4x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_tu(vfloat16mf4x3_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f16mf4x3_tu(vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f16mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_tu(vfloat16mf2x3_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f16mf2x3_tu(vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f16m1x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_tu(vfloat16m1x3_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f16m1x3_tu(vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f16m2x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_tu(vfloat16m2x3_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f16m2x3_tu(vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f32mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_tu(vfloat32mf2x3_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f32mf2x3_tu(vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f32m1x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_tu(vfloat32m1x3_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f32m1x3_tu(vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f32m2x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_tu(vfloat32m2x3_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f32m2x3_tu(vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f64m1x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_tu(vfloat64m1x3_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f64m1x3_tu(vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f64m2x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_tu(vfloat64m2x3_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f64m2x3_tu(vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i8mf8x3_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_tu(vint8mf8x3_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i8mf8x3_tu(vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i8mf4x3_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_tu(vint8mf4x3_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i8mf4x3_tu(vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i8mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_tu(vint8mf2x3_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i8mf2x3_tu(vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i8m1x3_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_tu(vint8m1x3_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i8m1x3_tu(vd, rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i8m2x3_tu(maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_tu(vint8m2x3_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i8m2x3_tu(vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i16mf4x3_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_tu(vint16mf4x3_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i16mf4x3_tu(vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i16mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_tu(vint16mf2x3_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i16mf2x3_tu(vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i16m1x3_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_tu(vint16m1x3_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i16m1x3_tu(vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i16m2x3_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_tu(vint16m2x3_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i16m2x3_tu(vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i32mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_tu(vint32mf2x3_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i32mf2x3_tu(vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i32m1x3_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_tu(vint32m1x3_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i32m1x3_tu(vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i32m2x3_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_tu(vint32m2x3_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i32m2x3_tu(vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i64m1x3_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_tu(vint64m1x3_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i64m1x3_tu(vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i64m2x3_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_tu(vint64m2x3_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i64m2x3_tu(vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u8mf8x3_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_tu(vuint8mf8x3_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u8mf8x3_tu(vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u8mf4x3_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_tu(vuint8mf4x3_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u8mf4x3_tu(vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u8mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_tu(vuint8mf2x3_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u8mf2x3_tu(vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u8m1x3_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_tu(vuint8m1x3_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u8m1x3_tu(vd, rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u8m2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_tu(vuint8m2x3_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u8m2x3_tu(vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u16mf4x3_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_tu(vuint16mf4x3_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u16mf4x3_tu(vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u16mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_tu(vuint16mf2x3_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u16mf2x3_tu(vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u16m1x3_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_tu(vuint16m1x3_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u16m1x3_tu(vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u16m2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_tu(vuint16m2x3_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u16m2x3_tu(vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u32mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_tu(vuint32mf2x3_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u32mf2x3_tu(vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u32m1x3_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_tu(vuint32m1x3_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u32m1x3_tu(vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u32m2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_tu(vuint32m2x3_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u32m2x3_tu(vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u64m1x3_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_tu(vuint64m1x3_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u64m1x3_tu(vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u64m2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_tu(vuint64m2x3_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u64m2x3_tu(vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f16mf4x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_tum(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f16mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_tum(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f16m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_tum(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f16m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f16m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_tum(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f16m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f32mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_tum(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f32m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_tum(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f32m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f32m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_tum(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f32m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f64m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_tum(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f64m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f64m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_tum(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f64m2x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i8mf8x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_tum(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i8mf8x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i8mf4x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_tum(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i8mf4x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i8mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_tum(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i8mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i8m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_tum(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i8m1x3_tum(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i8m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_tum(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i8m2x3_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i16mf4x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_tum(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i16mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_tum(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i16m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_tum(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i16m1x3_tum(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i16m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_tum(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i16m2x3_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i32mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_tum(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i32m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_tum(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i32m1x3_tum(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i32m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_tum(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i32m2x3_tum(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i64m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_tum(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i64m1x3_tum(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i64m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_tum(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i64m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u8mf8x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_tum(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u8mf8x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u8mf4x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_tum(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u8mf4x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u8mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_tum(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u8mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u8m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_tum(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u8m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u8m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_tum(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u8m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u16mf4x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_tum(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u16mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_tum(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u16m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_tum(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u16m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u16m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_tum(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u16m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u32mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_tum(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u32m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_tum(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u32m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u32m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_tum(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u32m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u64m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_tum(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u64m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u64m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_tum(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u64m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f16mf4x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_tumu(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f16mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_tumu(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f16m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_tumu(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f16m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_tumu(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f32mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_tumu(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f32m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_tumu(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f32m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_tumu(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f64m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_tumu(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f64m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_tumu(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i8mf8x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_tumu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i8mf8x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i8mf4x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_tumu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i8mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i8mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_tumu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i8mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i8m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_tumu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i8m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i8m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_tumu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i8m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i16mf4x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_tumu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i16mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_tumu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i16m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_tumu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i16m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_tumu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i32mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_tumu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i32m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_tumu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i32m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_tumu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i64m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_tumu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i64m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_tumu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u8mf8x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_tumu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u8mf8x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u8mf4x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_tumu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u8mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u8mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_tumu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u8mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u8m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_tumu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u8m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u8m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_tumu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u8m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u16mf4x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_tumu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u16mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_tumu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u16m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_tumu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u16m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_tumu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u32mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_tumu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u32m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_tumu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u32m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_tumu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u64m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_tumu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u64m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_tumu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f16mf4x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_mu(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f16mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_mu(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f16m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_mu(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f16m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f16m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_mu(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f16m2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f32mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_mu(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f32m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_mu(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f32m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f32m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_mu(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f32m2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f64m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_mu(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f64m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_f64m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_mu(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_f64m2x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i8mf8x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_mu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i8mf8x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i8mf4x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_mu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i8mf4x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i8mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_mu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i8mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i8m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_mu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i8m1x3_mu(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i8m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_mu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i8m2x3_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i16mf4x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_mu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i16mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_mu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i16m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_mu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i16m1x3_mu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i16m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_mu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i16m2x3_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i32mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_mu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i32m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_mu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i32m1x3_mu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i32m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_mu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i32m2x3_mu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i64m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_mu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i64m1x3_mu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_i64m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_mu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_i64m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u8mf8x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_mu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u8mf8x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u8mf4x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_mu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u8mf4x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u8mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_mu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u8mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u8m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_mu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u8m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u8m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_mu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u8m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u16mf4x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_mu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u16mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_mu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u16m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_mu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u16m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u16m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_mu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u16m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u32mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_mu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u32m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_mu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u32m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u32m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_mu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u32m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u64m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_mu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u64m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_v_u64m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_mu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_v_u64m2x3_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg3ei32\.[ivxfswum.]+\s+} 148 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vloxseg3ei64.c b/auto-generated/policy_funcs/gnu-api-tests/vloxseg3ei64.c index 20a228fa2..a6003ac02 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vloxseg3ei64.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vloxseg3ei64.c @@ -6,564 +6,564 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f16mf4x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_tu(vfloat16mf4x3_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f16mf4x3_tu(vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f16mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_tu(vfloat16mf2x3_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f16mf2x3_tu(vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f16m1x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_tu(vfloat16m1x3_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f16m1x3_tu(vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f16m2x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_tu(vfloat16m2x3_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f16m2x3_tu(vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f32mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_tu(vfloat32mf2x3_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f32mf2x3_tu(vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f32m1x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_tu(vfloat32m1x3_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f32m1x3_tu(vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f32m2x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_tu(vfloat32m2x3_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f32m2x3_tu(vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f64m1x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_tu(vfloat64m1x3_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f64m1x3_tu(vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f64m2x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_tu(vfloat64m2x3_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f64m2x3_tu(vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i8mf8x3_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_tu(vint8mf8x3_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i8mf8x3_tu(vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i8mf4x3_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_tu(vint8mf4x3_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i8mf4x3_tu(vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i8mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_tu(vint8mf2x3_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i8mf2x3_tu(vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i8m1x3_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_tu(vint8m1x3_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i8m1x3_tu(vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i16mf4x3_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_tu(vint16mf4x3_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i16mf4x3_tu(vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i16mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_tu(vint16mf2x3_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i16mf2x3_tu(vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i16m1x3_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_tu(vint16m1x3_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i16m1x3_tu(vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i16m2x3_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_tu(vint16m2x3_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i16m2x3_tu(vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i32mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_tu(vint32mf2x3_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i32mf2x3_tu(vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i32m1x3_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_tu(vint32m1x3_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i32m1x3_tu(vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i32m2x3_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_tu(vint32m2x3_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i32m2x3_tu(vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i64m1x3_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_tu(vint64m1x3_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i64m1x3_tu(vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i64m2x3_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_tu(vint64m2x3_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i64m2x3_tu(vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u8mf8x3_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_tu(vuint8mf8x3_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u8mf8x3_tu(vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u8mf4x3_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_tu(vuint8mf4x3_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u8mf4x3_tu(vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u8mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_tu(vuint8mf2x3_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u8mf2x3_tu(vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u8m1x3_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_tu(vuint8m1x3_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u8m1x3_tu(vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u16mf4x3_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_tu(vuint16mf4x3_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u16mf4x3_tu(vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u16mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_tu(vuint16mf2x3_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u16mf2x3_tu(vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u16m1x3_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_tu(vuint16m1x3_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u16m1x3_tu(vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u16m2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_tu(vuint16m2x3_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u16m2x3_tu(vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u32mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_tu(vuint32mf2x3_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u32mf2x3_tu(vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u32m1x3_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_tu(vuint32m1x3_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u32m1x3_tu(vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u32m2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_tu(vuint32m2x3_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u32m2x3_tu(vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u64m1x3_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_tu(vuint64m1x3_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u64m1x3_tu(vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u64m2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_tu(vuint64m2x3_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u64m2x3_tu(vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f16mf4x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_tum(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f16mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_tum(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f16m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_tum(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f16m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f16m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_tum(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f16m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f32mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_tum(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f32m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_tum(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f32m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f32m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_tum(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f32m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f64m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_tum(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f64m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f64m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_tum(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f64m2x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i8mf8x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_tum(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i8mf8x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i8mf4x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_tum(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i8mf4x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i8mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_tum(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i8mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i8m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_tum(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i8m1x3_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i16mf4x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_tum(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i16mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_tum(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i16m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_tum(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i16m1x3_tum(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i16m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_tum(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i16m2x3_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i32mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_tum(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i32m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_tum(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i32m1x3_tum(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i32m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_tum(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i32m2x3_tum(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i64m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_tum(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i64m1x3_tum(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i64m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_tum(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i64m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u8mf8x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_tum(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u8mf8x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u8mf4x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_tum(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u8mf4x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u8mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_tum(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u8mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u8m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_tum(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u8m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u16mf4x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_tum(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u16mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_tum(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u16m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_tum(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u16m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u16m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_tum(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u16m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u32mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_tum(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u32m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_tum(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u32m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u32m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_tum(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u32m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u64m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_tum(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u64m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u64m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_tum(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u64m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f16mf4x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_tumu(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f16mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_tumu(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f16m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_tumu(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f16m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_tumu(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f32mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_tumu(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f32m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_tumu(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f32m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_tumu(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f64m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_tumu(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f64m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_tumu(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i8mf8x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_tumu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i8mf8x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i8mf4x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_tumu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i8mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i8mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_tumu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i8mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i8m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_tumu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i8m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i16mf4x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_tumu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i16mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_tumu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i16m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_tumu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i16m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_tumu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i32mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_tumu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i32m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_tumu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i32m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_tumu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i64m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_tumu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i64m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_tumu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u8mf8x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_tumu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u8mf8x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u8mf4x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_tumu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u8mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u8mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_tumu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u8mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u8m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_tumu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u8m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u16mf4x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_tumu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u16mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_tumu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u16m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_tumu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u16m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_tumu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u32mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_tumu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u32m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_tumu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u32m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_tumu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u64m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_tumu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u64m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_tumu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f16mf4x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_mu(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f16mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_mu(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f16m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_mu(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f16m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f16m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_mu(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f16m2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f32mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_mu(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f32m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_mu(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f32m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f32m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_mu(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f32m2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f64m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_mu(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f64m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_f64m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_mu(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_f64m2x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i8mf8x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_mu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i8mf8x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i8mf4x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_mu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i8mf4x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i8mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_mu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i8mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i8m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_mu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i8m1x3_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i16mf4x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_mu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i16mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_mu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i16m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_mu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i16m1x3_mu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i16m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_mu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i16m2x3_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i32mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_mu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i32m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_mu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i32m1x3_mu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i32m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_mu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i32m2x3_mu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i64m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_mu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i64m1x3_mu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_i64m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_mu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_i64m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u8mf8x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_mu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u8mf8x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u8mf4x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_mu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u8mf4x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u8mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_mu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u8mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u8m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_mu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u8m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u16mf4x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_mu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u16mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_mu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u16m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_mu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u16m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u16m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_mu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u16m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u32mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_mu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u32m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_mu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u32m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u32m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_mu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u32m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u64m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_mu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u64m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_v_u64m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_mu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_v_u64m2x3_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg3ei64\.[ivxfswum.]+\s+} 140 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vloxseg3ei8.c b/auto-generated/policy_funcs/gnu-api-tests/vloxseg3ei8.c index 9169367fd..42401b525 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vloxseg3ei8.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vloxseg3ei8.c @@ -6,596 +6,596 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f16mf4x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_tu(vfloat16mf4x3_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f16mf4x3_tu(vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f16mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_tu(vfloat16mf2x3_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f16mf2x3_tu(vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f16m1x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_tu(vfloat16m1x3_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f16m1x3_tu(vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f16m2x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_tu(vfloat16m2x3_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f16m2x3_tu(vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f32mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_tu(vfloat32mf2x3_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f32mf2x3_tu(vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f32m1x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_tu(vfloat32m1x3_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f32m1x3_tu(vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f32m2x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_tu(vfloat32m2x3_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f32m2x3_tu(vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f64m1x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_tu(vfloat64m1x3_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f64m1x3_tu(vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f64m2x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_tu(vfloat64m2x3_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f64m2x3_tu(vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i8mf8x3_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_tu(vint8mf8x3_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i8mf8x3_tu(vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i8mf4x3_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_tu(vint8mf4x3_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i8mf4x3_tu(vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i8mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_tu(vint8mf2x3_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i8mf2x3_tu(vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i8m1x3_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_tu(vint8m1x3_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i8m1x3_tu(vd, rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i8m2x3_tu(maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_tu(vint8m2x3_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i8m2x3_tu(vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i16mf4x3_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_tu(vint16mf4x3_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i16mf4x3_tu(vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i16mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_tu(vint16mf2x3_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i16mf2x3_tu(vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i16m1x3_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_tu(vint16m1x3_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i16m1x3_tu(vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i16m2x3_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_tu(vint16m2x3_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i16m2x3_tu(vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i32mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_tu(vint32mf2x3_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i32mf2x3_tu(vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i32m1x3_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_tu(vint32m1x3_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i32m1x3_tu(vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i32m2x3_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_tu(vint32m2x3_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i32m2x3_tu(vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i64m1x3_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_tu(vint64m1x3_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i64m1x3_tu(vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i64m2x3_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_tu(vint64m2x3_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i64m2x3_tu(vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u8mf8x3_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_tu(vuint8mf8x3_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u8mf8x3_tu(vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u8mf4x3_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_tu(vuint8mf4x3_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u8mf4x3_tu(vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u8mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_tu(vuint8mf2x3_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u8mf2x3_tu(vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u8m1x3_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_tu(vuint8m1x3_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u8m1x3_tu(vd, rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u8m2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_tu(vuint8m2x3_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u8m2x3_tu(vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u16mf4x3_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_tu(vuint16mf4x3_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u16mf4x3_tu(vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u16mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_tu(vuint16mf2x3_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u16mf2x3_tu(vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u16m1x3_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_tu(vuint16m1x3_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u16m1x3_tu(vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u16m2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_tu(vuint16m2x3_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u16m2x3_tu(vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u32mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_tu(vuint32mf2x3_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u32mf2x3_tu(vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u32m1x3_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_tu(vuint32m1x3_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u32m1x3_tu(vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u32m2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_tu(vuint32m2x3_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u32m2x3_tu(vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u64m1x3_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_tu(vuint64m1x3_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u64m1x3_tu(vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u64m2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_tu(vuint64m2x3_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u64m2x3_tu(vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f16mf4x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_tum(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f16mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_tum(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f16m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_tum(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f16m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f16m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_tum(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f16m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f32mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_tum(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f32m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_tum(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f32m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f32m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_tum(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f32m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f64m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_tum(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f64m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f64m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_tum(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f64m2x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i8mf8x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_tum(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i8mf8x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i8mf4x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_tum(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i8mf4x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i8mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_tum(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i8mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i8m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_tum(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i8m1x3_tum(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i8m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_tum(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i8m2x3_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i16mf4x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_tum(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i16mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_tum(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i16m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_tum(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i16m1x3_tum(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i16m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_tum(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i16m2x3_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i32mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_tum(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i32m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_tum(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i32m1x3_tum(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i32m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_tum(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i32m2x3_tum(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i64m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_tum(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i64m1x3_tum(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i64m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_tum(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i64m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u8mf8x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_tum(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u8mf8x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u8mf4x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_tum(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u8mf4x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u8mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_tum(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u8mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u8m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_tum(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u8m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u8m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_tum(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u8m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u16mf4x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_tum(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u16mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_tum(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u16m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_tum(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u16m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u16m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_tum(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u16m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u32mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_tum(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u32m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_tum(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u32m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u32m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_tum(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u32m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u64m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_tum(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u64m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u64m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_tum(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u64m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f16mf4x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_tumu(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f16mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_tumu(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f16m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_tumu(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f16m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_tumu(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f32mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_tumu(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f32m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_tumu(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f32m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_tumu(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f64m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_tumu(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f64m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_tumu(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i8mf8x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_tumu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i8mf8x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i8mf4x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_tumu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i8mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i8mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_tumu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i8mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i8m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_tumu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i8m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i8m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_tumu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i8m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i16mf4x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_tumu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i16mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_tumu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i16m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_tumu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i16m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_tumu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i32mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_tumu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i32m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_tumu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i32m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_tumu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i64m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_tumu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i64m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_tumu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u8mf8x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_tumu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u8mf8x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u8mf4x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_tumu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u8mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u8mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_tumu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u8mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u8m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_tumu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u8m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u8m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_tumu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u8m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u16mf4x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_tumu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u16mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_tumu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u16m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_tumu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u16m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_tumu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u32mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_tumu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u32m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_tumu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u32m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_tumu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u64m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_tumu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u64m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_tumu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f16mf4x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_mu(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f16mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_mu(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f16m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_mu(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f16m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f16m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_mu(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f16m2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f32mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_mu(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f32m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_mu(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f32m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f32m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_mu(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f32m2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f64m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_mu(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f64m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_f64m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_mu(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_f64m2x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i8mf8x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_mu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i8mf8x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i8mf4x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_mu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i8mf4x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i8mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_mu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i8mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i8m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_mu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i8m1x3_mu(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i8m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_mu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i8m2x3_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i16mf4x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_mu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i16mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_mu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i16m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_mu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i16m1x3_mu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i16m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_mu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i16m2x3_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i32mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_mu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i32m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_mu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i32m1x3_mu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i32m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_mu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i32m2x3_mu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i64m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_mu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i64m1x3_mu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_i64m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_mu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_i64m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u8mf8x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_mu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u8mf8x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u8mf4x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_mu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u8mf4x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u8mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_mu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u8mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u8m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_mu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u8m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u8m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_mu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u8m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u16mf4x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_mu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u16mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_mu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u16m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_mu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u16m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u16m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_mu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u16m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u32mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_mu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u32m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_mu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u32m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u32m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_mu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u32m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u64m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_mu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u64m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_v_u64m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_mu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_v_u64m2x3_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg3ei8\.[ivxfswum.]+\s+} 148 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vloxseg4ei16.c b/auto-generated/policy_funcs/gnu-api-tests/vloxseg4ei16.c index 85cb83376..6f12b2336 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vloxseg4ei16.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vloxseg4ei16.c @@ -6,596 +6,596 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f16mf4x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_tu(vfloat16mf4x4_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f16mf4x4_tu(vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f16mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_tu(vfloat16mf2x4_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f16mf2x4_tu(vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f16m1x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_tu(vfloat16m1x4_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f16m1x4_tu(vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f16m2x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_tu(vfloat16m2x4_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f16m2x4_tu(vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f32mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_tu(vfloat32mf2x4_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f32mf2x4_tu(vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f32m1x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_tu(vfloat32m1x4_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f32m1x4_tu(vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f32m2x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_tu(vfloat32m2x4_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f32m2x4_tu(vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f64m1x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_tu(vfloat64m1x4_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f64m1x4_tu(vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f64m2x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_tu(vfloat64m2x4_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f64m2x4_tu(vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i8mf8x4_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_tu(vint8mf8x4_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i8mf8x4_tu(vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i8mf4x4_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_tu(vint8mf4x4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i8mf4x4_tu(vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i8mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_tu(vint8mf2x4_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i8mf2x4_tu(vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i8m1x4_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_tu(vint8m1x4_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i8m1x4_tu(vd, rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i8m2x4_tu(maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_tu(vint8m2x4_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i8m2x4_tu(vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i16mf4x4_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_tu(vint16mf4x4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i16mf4x4_tu(vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i16mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_tu(vint16mf2x4_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i16mf2x4_tu(vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i16m1x4_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_tu(vint16m1x4_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i16m1x4_tu(vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i16m2x4_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_tu(vint16m2x4_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i16m2x4_tu(vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i32mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_tu(vint32mf2x4_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i32mf2x4_tu(vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i32m1x4_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_tu(vint32m1x4_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i32m1x4_tu(vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i32m2x4_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_tu(vint32m2x4_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i32m2x4_tu(vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i64m1x4_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_tu(vint64m1x4_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i64m1x4_tu(vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i64m2x4_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_tu(vint64m2x4_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i64m2x4_tu(vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u8mf8x4_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_tu(vuint8mf8x4_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u8mf8x4_tu(vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u8mf4x4_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_tu(vuint8mf4x4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u8mf4x4_tu(vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u8mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_tu(vuint8mf2x4_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u8mf2x4_tu(vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u8m1x4_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_tu(vuint8m1x4_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u8m1x4_tu(vd, rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u8m2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_tu(vuint8m2x4_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u8m2x4_tu(vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u16mf4x4_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_tu(vuint16mf4x4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u16mf4x4_tu(vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u16mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_tu(vuint16mf2x4_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u16mf2x4_tu(vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u16m1x4_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_tu(vuint16m1x4_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u16m1x4_tu(vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u16m2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_tu(vuint16m2x4_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u16m2x4_tu(vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u32mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_tu(vuint32mf2x4_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u32mf2x4_tu(vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u32m1x4_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_tu(vuint32m1x4_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u32m1x4_tu(vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u32m2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_tu(vuint32m2x4_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u32m2x4_tu(vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u64m1x4_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_tu(vuint64m1x4_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u64m1x4_tu(vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u64m2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_tu(vuint64m2x4_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u64m2x4_tu(vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f16mf4x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_tum(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f16mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_tum(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f16m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_tum(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f16m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f16m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_tum(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f16m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f32mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_tum(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f32m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_tum(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f32m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f32m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_tum(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f32m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f64m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_tum(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f64m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f64m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_tum(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f64m2x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i8mf8x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_tum(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i8mf8x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i8mf4x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_tum(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i8mf4x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i8mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_tum(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i8mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i8m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_tum(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i8m1x4_tum(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i8m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_tum(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i8m2x4_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i16mf4x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_tum(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i16mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_tum(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i16m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_tum(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i16m1x4_tum(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i16m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_tum(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i16m2x4_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i32mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_tum(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i32m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_tum(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i32m1x4_tum(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i32m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_tum(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i32m2x4_tum(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i64m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_tum(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i64m1x4_tum(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i64m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_tum(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i64m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u8mf8x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_tum(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u8mf8x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u8mf4x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_tum(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u8mf4x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u8mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_tum(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u8mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u8m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_tum(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u8m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u8m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_tum(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u8m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u16mf4x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_tum(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u16mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_tum(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u16m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_tum(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u16m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u16m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_tum(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u16m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u32mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_tum(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u32m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_tum(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u32m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u32m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_tum(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u32m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u64m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_tum(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u64m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u64m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_tum(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u64m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f16mf4x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_tumu(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f16mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_tumu(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f16m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_tumu(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f16m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_tumu(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f32mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_tumu(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f32m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_tumu(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f32m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_tumu(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f64m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_tumu(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f64m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_tumu(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i8mf8x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_tumu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i8mf8x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i8mf4x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_tumu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i8mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i8mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_tumu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i8mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i8m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_tumu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i8m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i8m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_tumu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i8m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i16mf4x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_tumu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i16mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_tumu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i16m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_tumu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i16m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_tumu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i32mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_tumu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i32m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_tumu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i32m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_tumu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i64m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_tumu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i64m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_tumu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u8mf8x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_tumu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u8mf8x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u8mf4x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_tumu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u8mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u8mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_tumu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u8mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u8m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_tumu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u8m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u8m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_tumu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u8m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u16mf4x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_tumu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u16mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_tumu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u16m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_tumu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u16m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_tumu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u32mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_tumu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u32m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_tumu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u32m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_tumu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u64m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_tumu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u64m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_tumu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f16mf4x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_mu(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f16mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_mu(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f16m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_mu(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f16m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f16m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_mu(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f16m2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f32mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_mu(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f32m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_mu(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f32m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f32m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_mu(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f32m2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f64m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_mu(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f64m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_f64m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_mu(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_f64m2x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i8mf8x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_mu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i8mf8x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i8mf4x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_mu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i8mf4x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i8mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_mu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i8mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i8m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_mu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i8m1x4_mu(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i8m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_mu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i8m2x4_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i16mf4x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_mu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i16mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_mu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i16m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_mu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i16m1x4_mu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i16m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_mu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i16m2x4_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i32mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_mu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i32m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_mu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i32m1x4_mu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i32m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_mu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i32m2x4_mu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i64m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_mu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i64m1x4_mu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_i64m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_mu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_i64m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u8mf8x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_mu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u8mf8x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u8mf4x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_mu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u8mf4x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u8mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_mu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u8mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u8m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_mu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u8m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u8m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_mu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u8m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u16mf4x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_mu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u16mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_mu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u16m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_mu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u16m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u16m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_mu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u16m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u32mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_mu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u32m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_mu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u32m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u32m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_mu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u32m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u64m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_mu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u64m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_v_u64m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_mu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_u64m2x4_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg4ei16\.[ivxfswum.]+\s+} 148 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vloxseg4ei32.c b/auto-generated/policy_funcs/gnu-api-tests/vloxseg4ei32.c index c6f2d034b..29e25b3ee 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vloxseg4ei32.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vloxseg4ei32.c @@ -6,596 +6,596 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f16mf4x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_tu(vfloat16mf4x4_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f16mf4x4_tu(vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f16mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_tu(vfloat16mf2x4_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f16mf2x4_tu(vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f16m1x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_tu(vfloat16m1x4_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f16m1x4_tu(vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f16m2x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_tu(vfloat16m2x4_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f16m2x4_tu(vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f32mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_tu(vfloat32mf2x4_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f32mf2x4_tu(vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f32m1x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_tu(vfloat32m1x4_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f32m1x4_tu(vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f32m2x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_tu(vfloat32m2x4_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f32m2x4_tu(vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f64m1x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_tu(vfloat64m1x4_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f64m1x4_tu(vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f64m2x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_tu(vfloat64m2x4_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f64m2x4_tu(vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i8mf8x4_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_tu(vint8mf8x4_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i8mf8x4_tu(vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i8mf4x4_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_tu(vint8mf4x4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i8mf4x4_tu(vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i8mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_tu(vint8mf2x4_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i8mf2x4_tu(vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i8m1x4_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_tu(vint8m1x4_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i8m1x4_tu(vd, rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i8m2x4_tu(maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_tu(vint8m2x4_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i8m2x4_tu(vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i16mf4x4_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_tu(vint16mf4x4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i16mf4x4_tu(vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i16mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_tu(vint16mf2x4_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i16mf2x4_tu(vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i16m1x4_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_tu(vint16m1x4_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i16m1x4_tu(vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i16m2x4_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_tu(vint16m2x4_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i16m2x4_tu(vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i32mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_tu(vint32mf2x4_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i32mf2x4_tu(vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i32m1x4_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_tu(vint32m1x4_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i32m1x4_tu(vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i32m2x4_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_tu(vint32m2x4_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i32m2x4_tu(vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i64m1x4_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_tu(vint64m1x4_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i64m1x4_tu(vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i64m2x4_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_tu(vint64m2x4_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i64m2x4_tu(vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u8mf8x4_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_tu(vuint8mf8x4_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u8mf8x4_tu(vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u8mf4x4_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_tu(vuint8mf4x4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u8mf4x4_tu(vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u8mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_tu(vuint8mf2x4_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u8mf2x4_tu(vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u8m1x4_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_tu(vuint8m1x4_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u8m1x4_tu(vd, rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u8m2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_tu(vuint8m2x4_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u8m2x4_tu(vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u16mf4x4_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_tu(vuint16mf4x4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u16mf4x4_tu(vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u16mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_tu(vuint16mf2x4_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u16mf2x4_tu(vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u16m1x4_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_tu(vuint16m1x4_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u16m1x4_tu(vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u16m2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_tu(vuint16m2x4_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u16m2x4_tu(vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u32mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_tu(vuint32mf2x4_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u32mf2x4_tu(vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u32m1x4_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_tu(vuint32m1x4_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u32m1x4_tu(vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u32m2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_tu(vuint32m2x4_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u32m2x4_tu(vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u64m1x4_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_tu(vuint64m1x4_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u64m1x4_tu(vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u64m2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_tu(vuint64m2x4_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u64m2x4_tu(vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f16mf4x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_tum(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f16mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_tum(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f16m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_tum(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f16m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f16m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_tum(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f16m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f32mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_tum(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f32m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_tum(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f32m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f32m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_tum(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f32m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f64m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_tum(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f64m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f64m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_tum(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f64m2x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i8mf8x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_tum(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i8mf8x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i8mf4x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_tum(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i8mf4x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i8mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_tum(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i8mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i8m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_tum(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i8m1x4_tum(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i8m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_tum(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i8m2x4_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i16mf4x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_tum(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i16mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_tum(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i16m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_tum(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i16m1x4_tum(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i16m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_tum(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i16m2x4_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i32mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_tum(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i32m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_tum(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i32m1x4_tum(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i32m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_tum(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i32m2x4_tum(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i64m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_tum(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i64m1x4_tum(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i64m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_tum(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i64m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u8mf8x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_tum(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u8mf8x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u8mf4x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_tum(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u8mf4x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u8mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_tum(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u8mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u8m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_tum(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u8m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u8m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_tum(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u8m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u16mf4x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_tum(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u16mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_tum(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u16m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_tum(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u16m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u16m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_tum(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u16m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u32mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_tum(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u32m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_tum(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u32m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u32m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_tum(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u32m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u64m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_tum(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u64m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u64m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_tum(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u64m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f16mf4x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_tumu(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f16mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_tumu(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f16m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_tumu(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f16m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_tumu(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f32mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_tumu(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f32m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_tumu(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f32m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_tumu(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f64m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_tumu(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f64m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_tumu(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i8mf8x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_tumu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i8mf8x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i8mf4x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_tumu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i8mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i8mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_tumu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i8mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i8m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_tumu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i8m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i8m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_tumu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i8m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i16mf4x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_tumu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i16mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_tumu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i16m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_tumu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i16m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_tumu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i32mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_tumu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i32m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_tumu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i32m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_tumu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i64m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_tumu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i64m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_tumu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u8mf8x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_tumu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u8mf8x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u8mf4x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_tumu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u8mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u8mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_tumu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u8mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u8m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_tumu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u8m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u8m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_tumu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u8m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u16mf4x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_tumu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u16mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_tumu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u16m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_tumu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u16m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_tumu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u32mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_tumu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u32m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_tumu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u32m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_tumu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u64m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_tumu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u64m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_tumu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f16mf4x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_mu(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f16mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_mu(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f16m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_mu(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f16m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f16m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_mu(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f16m2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f32mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_mu(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f32m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_mu(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f32m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f32m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_mu(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f32m2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f64m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_mu(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f64m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_f64m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_mu(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_f64m2x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i8mf8x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_mu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i8mf8x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i8mf4x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_mu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i8mf4x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i8mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_mu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i8mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i8m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_mu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i8m1x4_mu(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i8m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_mu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i8m2x4_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i16mf4x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_mu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i16mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_mu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i16m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_mu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i16m1x4_mu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i16m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_mu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i16m2x4_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i32mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_mu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i32m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_mu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i32m1x4_mu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i32m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_mu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i32m2x4_mu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i64m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_mu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i64m1x4_mu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_i64m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_mu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_i64m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u8mf8x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_mu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u8mf8x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u8mf4x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_mu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u8mf4x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u8mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_mu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u8mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u8m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_mu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u8m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u8m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_mu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u8m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u16mf4x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_mu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u16mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_mu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u16m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_mu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u16m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u16m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_mu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u16m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u32mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_mu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u32m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_mu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u32m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u32m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_mu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u32m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u64m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_mu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u64m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_v_u64m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_mu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_v_u64m2x4_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg4ei32\.[ivxfswum.]+\s+} 148 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vloxseg4ei64.c b/auto-generated/policy_funcs/gnu-api-tests/vloxseg4ei64.c index ee3dd135c..c75d4f848 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vloxseg4ei64.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vloxseg4ei64.c @@ -6,564 +6,564 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f16mf4x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_tu(vfloat16mf4x4_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f16mf4x4_tu(vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f16mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_tu(vfloat16mf2x4_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f16mf2x4_tu(vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f16m1x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_tu(vfloat16m1x4_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f16m1x4_tu(vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f16m2x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_tu(vfloat16m2x4_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f16m2x4_tu(vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f32mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_tu(vfloat32mf2x4_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f32mf2x4_tu(vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f32m1x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_tu(vfloat32m1x4_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f32m1x4_tu(vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f32m2x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_tu(vfloat32m2x4_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f32m2x4_tu(vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f64m1x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_tu(vfloat64m1x4_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f64m1x4_tu(vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f64m2x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_tu(vfloat64m2x4_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f64m2x4_tu(vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i8mf8x4_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_tu(vint8mf8x4_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i8mf8x4_tu(vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i8mf4x4_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_tu(vint8mf4x4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i8mf4x4_tu(vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i8mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_tu(vint8mf2x4_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i8mf2x4_tu(vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i8m1x4_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_tu(vint8m1x4_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i8m1x4_tu(vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i16mf4x4_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_tu(vint16mf4x4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i16mf4x4_tu(vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i16mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_tu(vint16mf2x4_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i16mf2x4_tu(vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i16m1x4_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_tu(vint16m1x4_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i16m1x4_tu(vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i16m2x4_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_tu(vint16m2x4_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i16m2x4_tu(vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i32mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_tu(vint32mf2x4_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i32mf2x4_tu(vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i32m1x4_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_tu(vint32m1x4_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i32m1x4_tu(vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i32m2x4_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_tu(vint32m2x4_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i32m2x4_tu(vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i64m1x4_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_tu(vint64m1x4_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i64m1x4_tu(vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i64m2x4_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_tu(vint64m2x4_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i64m2x4_tu(vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u8mf8x4_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_tu(vuint8mf8x4_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u8mf8x4_tu(vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u8mf4x4_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_tu(vuint8mf4x4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u8mf4x4_tu(vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u8mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_tu(vuint8mf2x4_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u8mf2x4_tu(vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u8m1x4_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_tu(vuint8m1x4_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u8m1x4_tu(vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u16mf4x4_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_tu(vuint16mf4x4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u16mf4x4_tu(vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u16mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_tu(vuint16mf2x4_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u16mf2x4_tu(vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u16m1x4_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_tu(vuint16m1x4_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u16m1x4_tu(vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u16m2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_tu(vuint16m2x4_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u16m2x4_tu(vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u32mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_tu(vuint32mf2x4_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u32mf2x4_tu(vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u32m1x4_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_tu(vuint32m1x4_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u32m1x4_tu(vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u32m2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_tu(vuint32m2x4_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u32m2x4_tu(vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u64m1x4_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_tu(vuint64m1x4_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u64m1x4_tu(vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u64m2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_tu(vuint64m2x4_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u64m2x4_tu(vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f16mf4x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_tum(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f16mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_tum(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f16m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_tum(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f16m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f16m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_tum(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f16m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f32mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_tum(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f32m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_tum(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f32m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f32m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_tum(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f32m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f64m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_tum(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f64m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f64m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_tum(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f64m2x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i8mf8x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_tum(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i8mf8x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i8mf4x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_tum(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i8mf4x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i8mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_tum(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i8mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i8m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_tum(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i8m1x4_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i16mf4x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_tum(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i16mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_tum(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i16m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_tum(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i16m1x4_tum(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i16m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_tum(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i16m2x4_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i32mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_tum(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i32m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_tum(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i32m1x4_tum(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i32m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_tum(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i32m2x4_tum(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i64m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_tum(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i64m1x4_tum(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i64m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_tum(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i64m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u8mf8x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_tum(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u8mf8x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u8mf4x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_tum(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u8mf4x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u8mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_tum(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u8mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u8m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_tum(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u8m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u16mf4x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_tum(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u16mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_tum(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u16m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_tum(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u16m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u16m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_tum(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u16m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u32mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_tum(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u32m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_tum(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u32m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u32m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_tum(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u32m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u64m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_tum(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u64m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u64m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_tum(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u64m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f16mf4x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_tumu(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f16mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_tumu(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f16m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_tumu(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f16m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_tumu(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f32mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_tumu(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f32m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_tumu(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f32m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_tumu(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f64m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_tumu(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f64m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_tumu(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i8mf8x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_tumu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i8mf8x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i8mf4x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_tumu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i8mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i8mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_tumu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i8mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i8m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_tumu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i8m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i16mf4x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_tumu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i16mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_tumu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i16m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_tumu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i16m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_tumu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i32mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_tumu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i32m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_tumu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i32m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_tumu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i64m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_tumu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i64m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_tumu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u8mf8x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_tumu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u8mf8x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u8mf4x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_tumu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u8mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u8mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_tumu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u8mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u8m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_tumu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u8m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u16mf4x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_tumu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u16mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_tumu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u16m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_tumu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u16m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_tumu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u32mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_tumu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u32m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_tumu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u32m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_tumu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u64m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_tumu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u64m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_tumu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f16mf4x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_mu(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f16mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_mu(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f16m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_mu(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f16m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f16m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_mu(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f16m2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f32mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_mu(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f32m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_mu(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f32m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f32m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_mu(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f32m2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f64m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_mu(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f64m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_f64m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_mu(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_f64m2x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i8mf8x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_mu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i8mf8x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i8mf4x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_mu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i8mf4x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i8mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_mu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i8mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i8m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_mu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i8m1x4_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i16mf4x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_mu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i16mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_mu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i16m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_mu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i16m1x4_mu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i16m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_mu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i16m2x4_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i32mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_mu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i32m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_mu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i32m1x4_mu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i32m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_mu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i32m2x4_mu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i64m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_mu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i64m1x4_mu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_i64m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_mu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_i64m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u8mf8x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_mu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u8mf8x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u8mf4x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_mu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u8mf4x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u8mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_mu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u8mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u8m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_mu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u8m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u16mf4x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_mu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u16mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_mu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u16m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_mu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u16m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u16m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_mu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u16m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u32mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_mu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u32m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_mu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u32m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u32m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_mu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u32m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u64m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_mu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u64m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_v_u64m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_mu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_v_u64m2x4_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg4ei64\.[ivxfswum.]+\s+} 140 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vloxseg4ei8.c b/auto-generated/policy_funcs/gnu-api-tests/vloxseg4ei8.c index 474a13bdb..b0db11966 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vloxseg4ei8.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vloxseg4ei8.c @@ -6,596 +6,596 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f16mf4x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_tu(vfloat16mf4x4_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f16mf4x4_tu(vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f16mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_tu(vfloat16mf2x4_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f16mf2x4_tu(vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f16m1x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_tu(vfloat16m1x4_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f16m1x4_tu(vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f16m2x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_tu(vfloat16m2x4_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f16m2x4_tu(vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f32mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_tu(vfloat32mf2x4_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f32mf2x4_tu(vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f32m1x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_tu(vfloat32m1x4_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f32m1x4_tu(vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f32m2x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_tu(vfloat32m2x4_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f32m2x4_tu(vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f64m1x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_tu(vfloat64m1x4_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f64m1x4_tu(vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f64m2x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_tu(vfloat64m2x4_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f64m2x4_tu(vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i8mf8x4_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_tu(vint8mf8x4_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i8mf8x4_tu(vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i8mf4x4_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_tu(vint8mf4x4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i8mf4x4_tu(vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i8mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_tu(vint8mf2x4_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i8mf2x4_tu(vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i8m1x4_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_tu(vint8m1x4_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i8m1x4_tu(vd, rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i8m2x4_tu(maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_tu(vint8m2x4_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i8m2x4_tu(vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i16mf4x4_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_tu(vint16mf4x4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i16mf4x4_tu(vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i16mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_tu(vint16mf2x4_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i16mf2x4_tu(vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i16m1x4_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_tu(vint16m1x4_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i16m1x4_tu(vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i16m2x4_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_tu(vint16m2x4_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i16m2x4_tu(vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i32mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_tu(vint32mf2x4_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i32mf2x4_tu(vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i32m1x4_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_tu(vint32m1x4_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i32m1x4_tu(vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i32m2x4_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_tu(vint32m2x4_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i32m2x4_tu(vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i64m1x4_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_tu(vint64m1x4_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i64m1x4_tu(vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i64m2x4_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_tu(vint64m2x4_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i64m2x4_tu(vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u8mf8x4_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_tu(vuint8mf8x4_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u8mf8x4_tu(vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u8mf4x4_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_tu(vuint8mf4x4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u8mf4x4_tu(vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u8mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_tu(vuint8mf2x4_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u8mf2x4_tu(vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u8m1x4_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_tu(vuint8m1x4_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u8m1x4_tu(vd, rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u8m2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_tu(vuint8m2x4_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u8m2x4_tu(vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u16mf4x4_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_tu(vuint16mf4x4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u16mf4x4_tu(vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u16mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_tu(vuint16mf2x4_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u16mf2x4_tu(vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u16m1x4_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_tu(vuint16m1x4_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u16m1x4_tu(vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u16m2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_tu(vuint16m2x4_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u16m2x4_tu(vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u32mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_tu(vuint32mf2x4_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u32mf2x4_tu(vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u32m1x4_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_tu(vuint32m1x4_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u32m1x4_tu(vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u32m2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_tu(vuint32m2x4_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u32m2x4_tu(vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u64m1x4_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_tu(vuint64m1x4_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u64m1x4_tu(vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u64m2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_tu(vuint64m2x4_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u64m2x4_tu(vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f16mf4x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_tum(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f16mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_tum(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f16m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_tum(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f16m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f16m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_tum(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f16m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f32mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_tum(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f32m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_tum(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f32m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f32m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_tum(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f32m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f64m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_tum(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f64m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f64m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_tum(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f64m2x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i8mf8x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_tum(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i8mf8x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i8mf4x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_tum(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i8mf4x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i8mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_tum(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i8mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i8m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_tum(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i8m1x4_tum(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i8m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_tum(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i8m2x4_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i16mf4x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_tum(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i16mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_tum(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i16m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_tum(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i16m1x4_tum(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i16m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_tum(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i16m2x4_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i32mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_tum(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i32m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_tum(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i32m1x4_tum(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i32m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_tum(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i32m2x4_tum(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i64m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_tum(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i64m1x4_tum(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i64m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_tum(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i64m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u8mf8x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_tum(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u8mf8x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u8mf4x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_tum(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u8mf4x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u8mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_tum(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u8mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u8m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_tum(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u8m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u8m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_tum(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u8m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u16mf4x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_tum(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u16mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_tum(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u16m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_tum(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u16m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u16m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_tum(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u16m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u32mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_tum(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u32m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_tum(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u32m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u32m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_tum(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u32m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u64m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_tum(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u64m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u64m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_tum(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u64m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f16mf4x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_tumu(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f16mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_tumu(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f16m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_tumu(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f16m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_tumu(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f32mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_tumu(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f32m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_tumu(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f32m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_tumu(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f64m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_tumu(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f64m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_tumu(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i8mf8x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_tumu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i8mf8x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i8mf4x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_tumu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i8mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i8mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_tumu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i8mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i8m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_tumu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i8m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i8m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_tumu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i8m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i16mf4x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_tumu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i16mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_tumu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i16m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_tumu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i16m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_tumu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i32mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_tumu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i32m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_tumu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i32m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_tumu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i64m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_tumu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i64m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_tumu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u8mf8x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_tumu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u8mf8x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u8mf4x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_tumu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u8mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u8mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_tumu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u8mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u8m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_tumu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u8m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u8m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_tumu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u8m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u16mf4x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_tumu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u16mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_tumu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u16m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_tumu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u16m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_tumu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u32mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_tumu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u32m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_tumu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u32m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_tumu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u64m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_tumu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u64m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_tumu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f16mf4x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_mu(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f16mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_mu(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f16m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_mu(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f16m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f16m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_mu(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f16m2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f32mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_mu(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f32m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_mu(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f32m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f32m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_mu(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f32m2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f64m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_mu(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f64m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_f64m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_mu(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_f64m2x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i8mf8x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_mu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i8mf8x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i8mf4x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_mu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i8mf4x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i8mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_mu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i8mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i8m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_mu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i8m1x4_mu(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i8m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_mu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i8m2x4_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i16mf4x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_mu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i16mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_mu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i16m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_mu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i16m1x4_mu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i16m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_mu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i16m2x4_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i32mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_mu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i32m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_mu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i32m1x4_mu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i32m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_mu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i32m2x4_mu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i64m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_mu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i64m1x4_mu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_i64m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_mu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_i64m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u8mf8x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_mu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u8mf8x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u8mf4x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_mu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u8mf4x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u8mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_mu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u8mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u8m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_mu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u8m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u8m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_mu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u8m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u16mf4x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_mu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u16mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_mu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u16m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_mu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u16m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u16m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_mu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u16m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u32mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_mu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u32m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_mu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u32m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u32m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_mu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u32m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u64m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_mu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u64m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_v_u64m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_mu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_v_u64m2x4_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg4ei8\.[ivxfswum.]+\s+} 148 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vloxseg5ei16.c b/auto-generated/policy_funcs/gnu-api-tests/vloxseg5ei16.c index dfa539dda..7b4fbd6f9 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vloxseg5ei16.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vloxseg5ei16.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_f16mf4x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_tu(vfloat16mf4x5_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_f16mf4x5_tu(vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_f16mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_tu(vfloat16mf2x5_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_f16mf2x5_tu(vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_f16m1x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_tu(vfloat16m1x5_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_f16m1x5_tu(vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_f32mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_tu(vfloat32mf2x5_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_f32mf2x5_tu(vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_f32m1x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_tu(vfloat32m1x5_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_f32m1x5_tu(vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_f64m1x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_tu(vfloat64m1x5_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_f64m1x5_tu(vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i8mf8x5_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_tu(vint8mf8x5_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i8mf8x5_tu(vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i8mf4x5_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_tu(vint8mf4x5_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i8mf4x5_tu(vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i8mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_tu(vint8mf2x5_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i8mf2x5_tu(vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i8m1x5_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_tu(vint8m1x5_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i8m1x5_tu(vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i16mf4x5_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_tu(vint16mf4x5_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i16mf4x5_tu(vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i16mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_tu(vint16mf2x5_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i16mf2x5_tu(vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i16m1x5_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_tu(vint16m1x5_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i16m1x5_tu(vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i32mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_tu(vint32mf2x5_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i32mf2x5_tu(vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i32m1x5_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_tu(vint32m1x5_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i32m1x5_tu(vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i64m1x5_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_tu(vint64m1x5_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i64m1x5_tu(vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u8mf8x5_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_tu(vuint8mf8x5_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u8mf8x5_tu(vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u8mf4x5_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_tu(vuint8mf4x5_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u8mf4x5_tu(vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u8mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_tu(vuint8mf2x5_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u8mf2x5_tu(vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u8m1x5_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_tu(vuint8m1x5_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u8m1x5_tu(vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u16mf4x5_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_tu(vuint16mf4x5_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u16mf4x5_tu(vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u16mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_tu(vuint16mf2x5_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u16mf2x5_tu(vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u16m1x5_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_tu(vuint16m1x5_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u16m1x5_tu(vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u32mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_tu(vuint32mf2x5_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u32mf2x5_tu(vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u32m1x5_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_tu(vuint32m1x5_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u32m1x5_tu(vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u64m1x5_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_tu(vuint64m1x5_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u64m1x5_tu(vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_f16mf4x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_tum(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_f16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_f16mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_tum(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_f16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_f16m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_tum(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_f16m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_f32mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_tum(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_f32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_f32m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_tum(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_f32m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_f64m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_tum(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_f64m1x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i8mf8x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_tum(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i8mf8x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i8mf4x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_tum(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i8mf4x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i8mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_tum(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i8mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i8m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_tum(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i8m1x5_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i16mf4x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_tum(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i16mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_tum(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i16m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_tum(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i16m1x5_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i32mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_tum(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i32m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_tum(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i32m1x5_tum(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i64m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_tum(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i64m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u8mf8x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_tum(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u8mf8x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u8mf4x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_tum(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u8mf4x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u8mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_tum(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u8mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u8m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_tum(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u8m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u16mf4x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_tum(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u16mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_tum(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u16m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_tum(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u16m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u32mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_tum(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u32m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_tum(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u32m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u64m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_tum(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u64m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_f16mf4x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_tumu(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_f16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_f16mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_tumu(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_f16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_f16m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_tumu(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_f16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_f32mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_tumu(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_f32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_f32m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_tumu(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_f32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_f64m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_tumu(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_f64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i8mf8x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_tumu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i8mf8x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i8mf4x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_tumu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i8mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i8mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_tumu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i8mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i8m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_tumu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i8m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i16mf4x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_tumu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i16mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_tumu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i16m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_tumu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i32mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_tumu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i32m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_tumu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i64m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_tumu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u8mf8x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_tumu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u8mf8x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u8mf4x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_tumu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u8mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u8mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_tumu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u8mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u8m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_tumu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u8m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u16mf4x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_tumu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u16mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_tumu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u16m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_tumu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u32mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_tumu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u32m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_tumu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u64m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_tumu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_f16mf4x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_mu(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_f16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_f16mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_mu(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_f16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_f16m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_mu(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_f16m1x5_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_f32mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_mu(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_f32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_f32m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_mu(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_f32m1x5_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_f64m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_mu(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_f64m1x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i8mf8x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_mu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i8mf8x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i8mf4x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_mu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i8mf4x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i8mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_mu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i8mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i8m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_mu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i8m1x5_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i16mf4x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_mu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i16mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_mu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i16m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_mu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i16m1x5_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i32mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_mu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i32m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_mu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i32m1x5_mu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_i64m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_mu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_i64m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u8mf8x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_mu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u8mf8x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u8mf4x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_mu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u8mf4x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u8mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_mu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u8mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u8m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_mu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u8m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u16mf4x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_mu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u16mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_mu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u16m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_mu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u16m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u32mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_mu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u32m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_mu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u32m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_v_u64m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_mu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_u64m1x5_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg5ei16\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vloxseg5ei32.c b/auto-generated/policy_funcs/gnu-api-tests/vloxseg5ei32.c index 400808638..b9ada256c 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vloxseg5ei32.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vloxseg5ei32.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_f16mf4x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_tu(vfloat16mf4x5_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_f16mf4x5_tu(vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_f16mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_tu(vfloat16mf2x5_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_f16mf2x5_tu(vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_f16m1x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_tu(vfloat16m1x5_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_f16m1x5_tu(vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_f32mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_tu(vfloat32mf2x5_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_f32mf2x5_tu(vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_f32m1x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_tu(vfloat32m1x5_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_f32m1x5_tu(vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_f64m1x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_tu(vfloat64m1x5_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_f64m1x5_tu(vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i8mf8x5_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_tu(vint8mf8x5_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i8mf8x5_tu(vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i8mf4x5_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_tu(vint8mf4x5_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i8mf4x5_tu(vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i8mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_tu(vint8mf2x5_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i8mf2x5_tu(vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i8m1x5_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_tu(vint8m1x5_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i8m1x5_tu(vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i16mf4x5_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_tu(vint16mf4x5_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i16mf4x5_tu(vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i16mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_tu(vint16mf2x5_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i16mf2x5_tu(vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i16m1x5_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_tu(vint16m1x5_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i16m1x5_tu(vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i32mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_tu(vint32mf2x5_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i32mf2x5_tu(vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i32m1x5_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_tu(vint32m1x5_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i32m1x5_tu(vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i64m1x5_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_tu(vint64m1x5_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i64m1x5_tu(vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u8mf8x5_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_tu(vuint8mf8x5_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u8mf8x5_tu(vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u8mf4x5_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_tu(vuint8mf4x5_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u8mf4x5_tu(vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u8mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_tu(vuint8mf2x5_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u8mf2x5_tu(vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u8m1x5_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_tu(vuint8m1x5_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u8m1x5_tu(vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u16mf4x5_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_tu(vuint16mf4x5_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u16mf4x5_tu(vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u16mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_tu(vuint16mf2x5_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u16mf2x5_tu(vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u16m1x5_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_tu(vuint16m1x5_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u16m1x5_tu(vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u32mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_tu(vuint32mf2x5_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u32mf2x5_tu(vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u32m1x5_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_tu(vuint32m1x5_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u32m1x5_tu(vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u64m1x5_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_tu(vuint64m1x5_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u64m1x5_tu(vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_f16mf4x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_tum(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_f16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_f16mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_tum(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_f16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_f16m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_tum(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_f16m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_f32mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_tum(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_f32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_f32m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_tum(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_f32m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_f64m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_tum(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_f64m1x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i8mf8x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_tum(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i8mf8x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i8mf4x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_tum(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i8mf4x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i8mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_tum(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i8mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i8m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_tum(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i8m1x5_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i16mf4x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_tum(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i16mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_tum(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i16m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_tum(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i16m1x5_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i32mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_tum(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i32m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_tum(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i32m1x5_tum(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i64m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_tum(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i64m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u8mf8x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_tum(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u8mf8x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u8mf4x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_tum(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u8mf4x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u8mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_tum(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u8mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u8m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_tum(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u8m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u16mf4x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_tum(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u16mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_tum(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u16m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_tum(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u16m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u32mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_tum(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u32m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_tum(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u32m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u64m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_tum(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u64m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_f16mf4x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_tumu(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_f16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_f16mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_tumu(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_f16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_f16m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_tumu(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_f16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_f32mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_tumu(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_f32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_f32m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_tumu(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_f32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_f64m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_tumu(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_f64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i8mf8x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_tumu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i8mf8x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i8mf4x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_tumu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i8mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i8mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_tumu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i8mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i8m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_tumu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i8m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i16mf4x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_tumu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i16mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_tumu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i16m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_tumu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i32mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_tumu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i32m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_tumu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i64m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_tumu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u8mf8x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_tumu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u8mf8x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u8mf4x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_tumu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u8mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u8mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_tumu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u8mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u8m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_tumu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u8m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u16mf4x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_tumu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u16mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_tumu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u16m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_tumu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u32mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_tumu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u32m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_tumu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u64m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_tumu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_f16mf4x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_mu(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_f16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_f16mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_mu(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_f16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_f16m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_mu(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_f16m1x5_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_f32mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_mu(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_f32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_f32m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_mu(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_f32m1x5_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_f64m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_mu(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_f64m1x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i8mf8x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_mu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i8mf8x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i8mf4x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_mu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i8mf4x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i8mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_mu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i8mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i8m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_mu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i8m1x5_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i16mf4x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_mu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i16mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_mu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i16m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_mu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i16m1x5_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i32mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_mu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i32m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_mu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i32m1x5_mu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_i64m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_mu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_i64m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u8mf8x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_mu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u8mf8x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u8mf4x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_mu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u8mf4x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u8mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_mu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u8mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u8m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_mu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u8m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u16mf4x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_mu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u16mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_mu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u16m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_mu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u16m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u32mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_mu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u32m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_mu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u32m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_v_u64m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_mu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_v_u64m1x5_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg5ei32\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vloxseg5ei64.c b/auto-generated/policy_funcs/gnu-api-tests/vloxseg5ei64.c index 9929674c2..bc26c6b61 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vloxseg5ei64.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vloxseg5ei64.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_f16mf4x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_tu(vfloat16mf4x5_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_f16mf4x5_tu(vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_f16mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_tu(vfloat16mf2x5_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_f16mf2x5_tu(vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_f16m1x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_tu(vfloat16m1x5_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_f16m1x5_tu(vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_f32mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_tu(vfloat32mf2x5_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_f32mf2x5_tu(vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_f32m1x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_tu(vfloat32m1x5_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_f32m1x5_tu(vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_f64m1x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_tu(vfloat64m1x5_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_f64m1x5_tu(vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i8mf8x5_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_tu(vint8mf8x5_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i8mf8x5_tu(vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i8mf4x5_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_tu(vint8mf4x5_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i8mf4x5_tu(vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i8mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_tu(vint8mf2x5_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i8mf2x5_tu(vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i8m1x5_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_tu(vint8m1x5_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i8m1x5_tu(vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i16mf4x5_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_tu(vint16mf4x5_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i16mf4x5_tu(vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i16mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_tu(vint16mf2x5_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i16mf2x5_tu(vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i16m1x5_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_tu(vint16m1x5_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i16m1x5_tu(vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i32mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_tu(vint32mf2x5_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i32mf2x5_tu(vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i32m1x5_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_tu(vint32m1x5_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i32m1x5_tu(vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i64m1x5_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_tu(vint64m1x5_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i64m1x5_tu(vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u8mf8x5_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_tu(vuint8mf8x5_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u8mf8x5_tu(vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u8mf4x5_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_tu(vuint8mf4x5_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u8mf4x5_tu(vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u8mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_tu(vuint8mf2x5_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u8mf2x5_tu(vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u8m1x5_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_tu(vuint8m1x5_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u8m1x5_tu(vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u16mf4x5_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_tu(vuint16mf4x5_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u16mf4x5_tu(vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u16mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_tu(vuint16mf2x5_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u16mf2x5_tu(vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u16m1x5_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_tu(vuint16m1x5_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u16m1x5_tu(vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u32mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_tu(vuint32mf2x5_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u32mf2x5_tu(vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u32m1x5_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_tu(vuint32m1x5_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u32m1x5_tu(vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u64m1x5_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_tu(vuint64m1x5_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u64m1x5_tu(vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_f16mf4x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_tum(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_f16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_f16mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_tum(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_f16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_f16m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_tum(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_f16m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_f32mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_tum(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_f32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_f32m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_tum(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_f32m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_f64m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_tum(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_f64m1x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i8mf8x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_tum(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i8mf8x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i8mf4x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_tum(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i8mf4x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i8mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_tum(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i8mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i8m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_tum(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i8m1x5_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i16mf4x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_tum(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i16mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_tum(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i16m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_tum(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i16m1x5_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i32mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_tum(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i32m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_tum(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i32m1x5_tum(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i64m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_tum(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i64m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u8mf8x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_tum(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u8mf8x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u8mf4x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_tum(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u8mf4x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u8mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_tum(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u8mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u8m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_tum(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u8m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u16mf4x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_tum(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u16mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_tum(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u16m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_tum(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u16m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u32mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_tum(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u32m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_tum(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u32m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u64m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_tum(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u64m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_f16mf4x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_tumu(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_f16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_f16mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_tumu(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_f16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_f16m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_tumu(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_f16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_f32mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_tumu(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_f32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_f32m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_tumu(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_f32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_f64m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_tumu(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_f64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i8mf8x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_tumu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i8mf8x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i8mf4x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_tumu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i8mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i8mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_tumu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i8mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i8m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_tumu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i8m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i16mf4x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_tumu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i16mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_tumu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i16m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_tumu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i32mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_tumu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i32m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_tumu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i64m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_tumu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u8mf8x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_tumu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u8mf8x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u8mf4x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_tumu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u8mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u8mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_tumu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u8mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u8m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_tumu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u8m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u16mf4x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_tumu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u16mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_tumu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u16m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_tumu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u32mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_tumu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u32m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_tumu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u64m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_tumu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_f16mf4x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_mu(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_f16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_f16mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_mu(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_f16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_f16m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_mu(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_f16m1x5_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_f32mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_mu(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_f32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_f32m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_mu(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_f32m1x5_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_f64m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_mu(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_f64m1x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i8mf8x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_mu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i8mf8x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i8mf4x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_mu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i8mf4x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i8mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_mu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i8mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i8m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_mu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i8m1x5_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i16mf4x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_mu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i16mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_mu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i16m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_mu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i16m1x5_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i32mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_mu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i32m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_mu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i32m1x5_mu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_i64m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_mu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_i64m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u8mf8x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_mu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u8mf8x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u8mf4x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_mu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u8mf4x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u8mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_mu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u8mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u8m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_mu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u8m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u16mf4x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_mu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u16mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_mu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u16m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_mu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u16m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u32mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_mu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u32m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_mu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u32m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_v_u64m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_mu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_v_u64m1x5_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg5ei64\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vloxseg5ei8.c b/auto-generated/policy_funcs/gnu-api-tests/vloxseg5ei8.c index 827fe916a..269a1aba0 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vloxseg5ei8.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vloxseg5ei8.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_f16mf4x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_tu(vfloat16mf4x5_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_f16mf4x5_tu(vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_f16mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_tu(vfloat16mf2x5_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_f16mf2x5_tu(vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_f16m1x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_tu(vfloat16m1x5_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_f16m1x5_tu(vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_f32mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_tu(vfloat32mf2x5_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_f32mf2x5_tu(vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_f32m1x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_tu(vfloat32m1x5_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_f32m1x5_tu(vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_f64m1x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_tu(vfloat64m1x5_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_f64m1x5_tu(vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i8mf8x5_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_tu(vint8mf8x5_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i8mf8x5_tu(vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i8mf4x5_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_tu(vint8mf4x5_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i8mf4x5_tu(vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i8mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_tu(vint8mf2x5_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i8mf2x5_tu(vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i8m1x5_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_tu(vint8m1x5_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i8m1x5_tu(vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i16mf4x5_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_tu(vint16mf4x5_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i16mf4x5_tu(vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i16mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_tu(vint16mf2x5_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i16mf2x5_tu(vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i16m1x5_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_tu(vint16m1x5_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i16m1x5_tu(vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i32mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_tu(vint32mf2x5_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i32mf2x5_tu(vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i32m1x5_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_tu(vint32m1x5_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i32m1x5_tu(vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i64m1x5_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_tu(vint64m1x5_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i64m1x5_tu(vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u8mf8x5_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_tu(vuint8mf8x5_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u8mf8x5_tu(vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u8mf4x5_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_tu(vuint8mf4x5_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u8mf4x5_tu(vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u8mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_tu(vuint8mf2x5_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u8mf2x5_tu(vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u8m1x5_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_tu(vuint8m1x5_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u8m1x5_tu(vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u16mf4x5_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_tu(vuint16mf4x5_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u16mf4x5_tu(vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u16mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_tu(vuint16mf2x5_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u16mf2x5_tu(vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u16m1x5_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_tu(vuint16m1x5_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u16m1x5_tu(vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u32mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_tu(vuint32mf2x5_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u32mf2x5_tu(vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u32m1x5_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_tu(vuint32m1x5_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u32m1x5_tu(vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u64m1x5_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_tu(vuint64m1x5_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u64m1x5_tu(vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_f16mf4x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_tum(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_f16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_f16mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_tum(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_f16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_f16m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_tum(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_f16m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_f32mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_tum(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_f32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_f32m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_tum(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_f32m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_f64m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_tum(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_f64m1x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i8mf8x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_tum(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i8mf8x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i8mf4x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_tum(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i8mf4x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i8mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_tum(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i8mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i8m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_tum(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i8m1x5_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i16mf4x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_tum(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i16mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_tum(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i16m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_tum(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i16m1x5_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i32mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_tum(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i32m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_tum(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i32m1x5_tum(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i64m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_tum(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i64m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u8mf8x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_tum(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u8mf8x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u8mf4x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_tum(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u8mf4x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u8mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_tum(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u8mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u8m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_tum(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u8m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u16mf4x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_tum(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u16mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_tum(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u16m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_tum(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u16m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u32mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_tum(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u32m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_tum(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u32m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u64m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_tum(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u64m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_f16mf4x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_tumu(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_f16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_f16mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_tumu(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_f16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_f16m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_tumu(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_f16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_f32mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_tumu(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_f32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_f32m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_tumu(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_f32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_f64m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_tumu(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_f64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i8mf8x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_tumu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i8mf8x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i8mf4x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_tumu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i8mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i8mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_tumu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i8mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i8m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_tumu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i8m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i16mf4x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_tumu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i16mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_tumu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i16m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_tumu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i32mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_tumu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i32m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_tumu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i64m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_tumu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u8mf8x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_tumu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u8mf8x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u8mf4x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_tumu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u8mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u8mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_tumu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u8mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u8m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_tumu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u8m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u16mf4x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_tumu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u16mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_tumu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u16m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_tumu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u32mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_tumu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u32m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_tumu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u64m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_tumu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_f16mf4x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_mu(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_f16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_f16mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_mu(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_f16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_f16m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_mu(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_f16m1x5_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_f32mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_mu(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_f32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_f32m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_mu(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_f32m1x5_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_f64m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_mu(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_f64m1x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i8mf8x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_mu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i8mf8x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i8mf4x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_mu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i8mf4x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i8mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_mu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i8mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i8m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_mu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i8m1x5_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i16mf4x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_mu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i16mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_mu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i16m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_mu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i16m1x5_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i32mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_mu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i32m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_mu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i32m1x5_mu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_i64m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_mu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_i64m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u8mf8x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_mu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u8mf8x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u8mf4x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_mu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u8mf4x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u8mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_mu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u8mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u8m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_mu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u8m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u16mf4x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_mu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u16mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_mu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u16m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_mu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u16m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u32mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_mu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u32m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_mu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u32m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_v_u64m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_mu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_v_u64m1x5_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg5ei8\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vloxseg6ei16.c b/auto-generated/policy_funcs/gnu-api-tests/vloxseg6ei16.c index 9e3799079..d4d566c88 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vloxseg6ei16.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vloxseg6ei16.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_f16mf4x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_tu(vfloat16mf4x6_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_f16mf4x6_tu(vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_f16mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_tu(vfloat16mf2x6_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_f16mf2x6_tu(vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_f16m1x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_tu(vfloat16m1x6_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_f16m1x6_tu(vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_f32mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_tu(vfloat32mf2x6_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_f32mf2x6_tu(vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_f32m1x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_tu(vfloat32m1x6_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_f32m1x6_tu(vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_f64m1x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_tu(vfloat64m1x6_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_f64m1x6_tu(vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i8mf8x6_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_tu(vint8mf8x6_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i8mf8x6_tu(vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i8mf4x6_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_tu(vint8mf4x6_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i8mf4x6_tu(vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i8mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_tu(vint8mf2x6_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i8mf2x6_tu(vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i8m1x6_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_tu(vint8m1x6_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i8m1x6_tu(vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i16mf4x6_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_tu(vint16mf4x6_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i16mf4x6_tu(vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i16mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_tu(vint16mf2x6_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i16mf2x6_tu(vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i16m1x6_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_tu(vint16m1x6_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i16m1x6_tu(vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i32mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_tu(vint32mf2x6_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i32mf2x6_tu(vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i32m1x6_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_tu(vint32m1x6_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i32m1x6_tu(vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i64m1x6_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_tu(vint64m1x6_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i64m1x6_tu(vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u8mf8x6_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_tu(vuint8mf8x6_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u8mf8x6_tu(vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u8mf4x6_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_tu(vuint8mf4x6_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u8mf4x6_tu(vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u8mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_tu(vuint8mf2x6_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u8mf2x6_tu(vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u8m1x6_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_tu(vuint8m1x6_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u8m1x6_tu(vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u16mf4x6_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_tu(vuint16mf4x6_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u16mf4x6_tu(vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u16mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_tu(vuint16mf2x6_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u16mf2x6_tu(vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u16m1x6_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_tu(vuint16m1x6_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u16m1x6_tu(vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u32mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_tu(vuint32mf2x6_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u32mf2x6_tu(vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u32m1x6_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_tu(vuint32m1x6_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u32m1x6_tu(vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u64m1x6_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_tu(vuint64m1x6_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u64m1x6_tu(vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_f16mf4x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_tum(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_f16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_f16mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_tum(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_f16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_f16m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_tum(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_f16m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_f32mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_tum(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_f32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_f32m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_tum(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_f32m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_f64m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_tum(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_f64m1x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i8mf8x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_tum(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i8mf8x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i8mf4x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_tum(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i8mf4x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i8mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_tum(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i8mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i8m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_tum(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i8m1x6_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i16mf4x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_tum(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i16mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_tum(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i16m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_tum(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i16m1x6_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i32mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_tum(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i32m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_tum(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i32m1x6_tum(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i64m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_tum(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i64m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u8mf8x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_tum(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u8mf8x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u8mf4x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_tum(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u8mf4x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u8mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_tum(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u8mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u8m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_tum(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u8m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u16mf4x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_tum(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u16mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_tum(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u16m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_tum(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u16m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u32mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_tum(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u32m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_tum(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u32m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u64m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_tum(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u64m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_f16mf4x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_tumu(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_f16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_f16mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_tumu(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_f16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_f16m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_tumu(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_f16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_f32mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_tumu(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_f32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_f32m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_tumu(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_f32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_f64m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_tumu(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_f64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i8mf8x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_tumu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i8mf8x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i8mf4x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_tumu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i8mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i8mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_tumu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i8mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i8m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_tumu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i8m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i16mf4x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_tumu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i16mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_tumu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i16m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_tumu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i32mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_tumu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i32m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_tumu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i64m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_tumu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u8mf8x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_tumu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u8mf8x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u8mf4x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_tumu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u8mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u8mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_tumu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u8mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u8m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_tumu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u8m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u16mf4x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_tumu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u16mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_tumu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u16m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_tumu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u32mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_tumu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u32m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_tumu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u64m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_tumu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_f16mf4x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_mu(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_f16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_f16mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_mu(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_f16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_f16m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_mu(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_f16m1x6_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_f32mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_mu(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_f32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_f32m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_mu(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_f32m1x6_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_f64m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_mu(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_f64m1x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i8mf8x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_mu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i8mf8x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i8mf4x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_mu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i8mf4x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i8mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_mu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i8mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i8m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_mu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i8m1x6_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i16mf4x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_mu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i16mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_mu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i16m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_mu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i16m1x6_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i32mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_mu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i32m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_mu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i32m1x6_mu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_i64m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_mu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_i64m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u8mf8x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_mu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u8mf8x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u8mf4x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_mu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u8mf4x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u8mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_mu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u8mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u8m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_mu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u8m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u16mf4x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_mu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u16mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_mu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u16m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_mu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u16m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u32mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_mu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u32m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_mu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u32m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_v_u64m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_mu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_u64m1x6_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg6ei16\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vloxseg6ei32.c b/auto-generated/policy_funcs/gnu-api-tests/vloxseg6ei32.c index cae9d3f53..c257db0d0 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vloxseg6ei32.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vloxseg6ei32.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_f16mf4x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_tu(vfloat16mf4x6_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_f16mf4x6_tu(vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_f16mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_tu(vfloat16mf2x6_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_f16mf2x6_tu(vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_f16m1x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_tu(vfloat16m1x6_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_f16m1x6_tu(vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_f32mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_tu(vfloat32mf2x6_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_f32mf2x6_tu(vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_f32m1x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_tu(vfloat32m1x6_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_f32m1x6_tu(vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_f64m1x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_tu(vfloat64m1x6_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_f64m1x6_tu(vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i8mf8x6_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_tu(vint8mf8x6_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i8mf8x6_tu(vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i8mf4x6_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_tu(vint8mf4x6_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i8mf4x6_tu(vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i8mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_tu(vint8mf2x6_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i8mf2x6_tu(vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i8m1x6_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_tu(vint8m1x6_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i8m1x6_tu(vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i16mf4x6_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_tu(vint16mf4x6_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i16mf4x6_tu(vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i16mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_tu(vint16mf2x6_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i16mf2x6_tu(vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i16m1x6_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_tu(vint16m1x6_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i16m1x6_tu(vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i32mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_tu(vint32mf2x6_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i32mf2x6_tu(vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i32m1x6_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_tu(vint32m1x6_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i32m1x6_tu(vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i64m1x6_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_tu(vint64m1x6_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i64m1x6_tu(vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u8mf8x6_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_tu(vuint8mf8x6_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u8mf8x6_tu(vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u8mf4x6_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_tu(vuint8mf4x6_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u8mf4x6_tu(vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u8mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_tu(vuint8mf2x6_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u8mf2x6_tu(vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u8m1x6_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_tu(vuint8m1x6_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u8m1x6_tu(vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u16mf4x6_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_tu(vuint16mf4x6_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u16mf4x6_tu(vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u16mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_tu(vuint16mf2x6_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u16mf2x6_tu(vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u16m1x6_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_tu(vuint16m1x6_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u16m1x6_tu(vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u32mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_tu(vuint32mf2x6_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u32mf2x6_tu(vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u32m1x6_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_tu(vuint32m1x6_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u32m1x6_tu(vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u64m1x6_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_tu(vuint64m1x6_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u64m1x6_tu(vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_f16mf4x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_tum(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_f16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_f16mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_tum(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_f16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_f16m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_tum(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_f16m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_f32mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_tum(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_f32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_f32m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_tum(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_f32m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_f64m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_tum(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_f64m1x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i8mf8x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_tum(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i8mf8x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i8mf4x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_tum(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i8mf4x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i8mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_tum(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i8mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i8m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_tum(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i8m1x6_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i16mf4x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_tum(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i16mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_tum(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i16m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_tum(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i16m1x6_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i32mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_tum(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i32m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_tum(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i32m1x6_tum(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i64m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_tum(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i64m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u8mf8x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_tum(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u8mf8x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u8mf4x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_tum(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u8mf4x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u8mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_tum(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u8mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u8m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_tum(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u8m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u16mf4x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_tum(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u16mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_tum(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u16m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_tum(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u16m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u32mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_tum(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u32m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_tum(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u32m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u64m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_tum(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u64m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_f16mf4x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_tumu(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_f16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_f16mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_tumu(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_f16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_f16m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_tumu(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_f16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_f32mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_tumu(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_f32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_f32m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_tumu(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_f32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_f64m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_tumu(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_f64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i8mf8x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_tumu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i8mf8x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i8mf4x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_tumu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i8mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i8mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_tumu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i8mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i8m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_tumu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i8m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i16mf4x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_tumu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i16mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_tumu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i16m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_tumu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i32mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_tumu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i32m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_tumu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i64m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_tumu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u8mf8x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_tumu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u8mf8x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u8mf4x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_tumu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u8mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u8mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_tumu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u8mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u8m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_tumu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u8m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u16mf4x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_tumu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u16mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_tumu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u16m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_tumu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u32mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_tumu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u32m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_tumu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u64m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_tumu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_f16mf4x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_mu(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_f16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_f16mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_mu(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_f16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_f16m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_mu(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_f16m1x6_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_f32mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_mu(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_f32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_f32m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_mu(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_f32m1x6_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_f64m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_mu(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_f64m1x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i8mf8x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_mu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i8mf8x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i8mf4x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_mu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i8mf4x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i8mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_mu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i8mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i8m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_mu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i8m1x6_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i16mf4x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_mu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i16mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_mu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i16m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_mu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i16m1x6_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i32mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_mu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i32m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_mu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i32m1x6_mu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_i64m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_mu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_i64m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u8mf8x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_mu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u8mf8x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u8mf4x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_mu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u8mf4x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u8mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_mu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u8mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u8m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_mu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u8m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u16mf4x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_mu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u16mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_mu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u16m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_mu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u16m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u32mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_mu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u32m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_mu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u32m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_v_u64m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_mu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_v_u64m1x6_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg6ei32\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vloxseg6ei64.c b/auto-generated/policy_funcs/gnu-api-tests/vloxseg6ei64.c index f682c3f40..675b74a3b 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vloxseg6ei64.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vloxseg6ei64.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_f16mf4x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_tu(vfloat16mf4x6_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_f16mf4x6_tu(vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_f16mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_tu(vfloat16mf2x6_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_f16mf2x6_tu(vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_f16m1x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_tu(vfloat16m1x6_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_f16m1x6_tu(vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_f32mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_tu(vfloat32mf2x6_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_f32mf2x6_tu(vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_f32m1x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_tu(vfloat32m1x6_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_f32m1x6_tu(vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_f64m1x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_tu(vfloat64m1x6_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_f64m1x6_tu(vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i8mf8x6_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_tu(vint8mf8x6_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i8mf8x6_tu(vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i8mf4x6_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_tu(vint8mf4x6_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i8mf4x6_tu(vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i8mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_tu(vint8mf2x6_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i8mf2x6_tu(vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i8m1x6_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_tu(vint8m1x6_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i8m1x6_tu(vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i16mf4x6_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_tu(vint16mf4x6_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i16mf4x6_tu(vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i16mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_tu(vint16mf2x6_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i16mf2x6_tu(vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i16m1x6_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_tu(vint16m1x6_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i16m1x6_tu(vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i32mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_tu(vint32mf2x6_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i32mf2x6_tu(vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i32m1x6_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_tu(vint32m1x6_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i32m1x6_tu(vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i64m1x6_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_tu(vint64m1x6_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i64m1x6_tu(vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u8mf8x6_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_tu(vuint8mf8x6_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u8mf8x6_tu(vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u8mf4x6_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_tu(vuint8mf4x6_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u8mf4x6_tu(vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u8mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_tu(vuint8mf2x6_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u8mf2x6_tu(vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u8m1x6_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_tu(vuint8m1x6_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u8m1x6_tu(vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u16mf4x6_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_tu(vuint16mf4x6_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u16mf4x6_tu(vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u16mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_tu(vuint16mf2x6_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u16mf2x6_tu(vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u16m1x6_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_tu(vuint16m1x6_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u16m1x6_tu(vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u32mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_tu(vuint32mf2x6_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u32mf2x6_tu(vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u32m1x6_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_tu(vuint32m1x6_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u32m1x6_tu(vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u64m1x6_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_tu(vuint64m1x6_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u64m1x6_tu(vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_f16mf4x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_tum(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_f16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_f16mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_tum(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_f16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_f16m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_tum(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_f16m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_f32mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_tum(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_f32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_f32m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_tum(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_f32m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_f64m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_tum(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_f64m1x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i8mf8x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_tum(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i8mf8x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i8mf4x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_tum(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i8mf4x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i8mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_tum(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i8mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i8m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_tum(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i8m1x6_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i16mf4x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_tum(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i16mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_tum(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i16m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_tum(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i16m1x6_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i32mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_tum(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i32m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_tum(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i32m1x6_tum(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i64m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_tum(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i64m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u8mf8x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_tum(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u8mf8x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u8mf4x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_tum(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u8mf4x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u8mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_tum(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u8mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u8m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_tum(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u8m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u16mf4x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_tum(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u16mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_tum(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u16m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_tum(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u16m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u32mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_tum(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u32m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_tum(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u32m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u64m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_tum(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u64m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_f16mf4x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_tumu(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_f16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_f16mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_tumu(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_f16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_f16m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_tumu(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_f16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_f32mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_tumu(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_f32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_f32m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_tumu(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_f32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_f64m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_tumu(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_f64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i8mf8x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_tumu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i8mf8x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i8mf4x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_tumu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i8mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i8mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_tumu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i8mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i8m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_tumu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i8m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i16mf4x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_tumu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i16mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_tumu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i16m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_tumu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i32mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_tumu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i32m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_tumu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i64m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_tumu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u8mf8x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_tumu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u8mf8x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u8mf4x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_tumu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u8mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u8mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_tumu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u8mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u8m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_tumu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u8m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u16mf4x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_tumu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u16mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_tumu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u16m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_tumu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u32mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_tumu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u32m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_tumu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u64m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_tumu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_f16mf4x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_mu(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_f16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_f16mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_mu(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_f16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_f16m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_mu(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_f16m1x6_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_f32mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_mu(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_f32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_f32m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_mu(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_f32m1x6_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_f64m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_mu(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_f64m1x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i8mf8x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_mu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i8mf8x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i8mf4x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_mu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i8mf4x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i8mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_mu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i8mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i8m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_mu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i8m1x6_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i16mf4x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_mu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i16mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_mu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i16m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_mu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i16m1x6_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i32mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_mu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i32m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_mu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i32m1x6_mu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_i64m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_mu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_i64m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u8mf8x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_mu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u8mf8x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u8mf4x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_mu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u8mf4x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u8mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_mu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u8mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u8m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_mu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u8m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u16mf4x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_mu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u16mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_mu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u16m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_mu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u16m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u32mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_mu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u32m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_mu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u32m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_v_u64m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_mu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_v_u64m1x6_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg6ei64\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vloxseg6ei8.c b/auto-generated/policy_funcs/gnu-api-tests/vloxseg6ei8.c index 929a369a3..043d0104a 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vloxseg6ei8.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vloxseg6ei8.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_f16mf4x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_tu(vfloat16mf4x6_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_f16mf4x6_tu(vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_f16mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_tu(vfloat16mf2x6_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_f16mf2x6_tu(vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_f16m1x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_tu(vfloat16m1x6_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_f16m1x6_tu(vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_f32mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_tu(vfloat32mf2x6_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_f32mf2x6_tu(vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_f32m1x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_tu(vfloat32m1x6_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_f32m1x6_tu(vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_f64m1x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_tu(vfloat64m1x6_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_f64m1x6_tu(vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i8mf8x6_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_tu(vint8mf8x6_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i8mf8x6_tu(vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i8mf4x6_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_tu(vint8mf4x6_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i8mf4x6_tu(vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i8mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_tu(vint8mf2x6_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i8mf2x6_tu(vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i8m1x6_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_tu(vint8m1x6_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i8m1x6_tu(vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i16mf4x6_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_tu(vint16mf4x6_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i16mf4x6_tu(vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i16mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_tu(vint16mf2x6_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i16mf2x6_tu(vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i16m1x6_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_tu(vint16m1x6_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i16m1x6_tu(vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i32mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_tu(vint32mf2x6_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i32mf2x6_tu(vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i32m1x6_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_tu(vint32m1x6_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i32m1x6_tu(vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i64m1x6_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_tu(vint64m1x6_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i64m1x6_tu(vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u8mf8x6_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_tu(vuint8mf8x6_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u8mf8x6_tu(vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u8mf4x6_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_tu(vuint8mf4x6_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u8mf4x6_tu(vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u8mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_tu(vuint8mf2x6_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u8mf2x6_tu(vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u8m1x6_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_tu(vuint8m1x6_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u8m1x6_tu(vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u16mf4x6_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_tu(vuint16mf4x6_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u16mf4x6_tu(vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u16mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_tu(vuint16mf2x6_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u16mf2x6_tu(vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u16m1x6_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_tu(vuint16m1x6_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u16m1x6_tu(vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u32mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_tu(vuint32mf2x6_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u32mf2x6_tu(vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u32m1x6_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_tu(vuint32m1x6_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u32m1x6_tu(vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u64m1x6_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_tu(vuint64m1x6_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u64m1x6_tu(vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_f16mf4x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_tum(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_f16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_f16mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_tum(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_f16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_f16m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_tum(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_f16m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_f32mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_tum(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_f32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_f32m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_tum(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_f32m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_f64m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_tum(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_f64m1x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i8mf8x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_tum(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i8mf8x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i8mf4x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_tum(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i8mf4x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i8mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_tum(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i8mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i8m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_tum(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i8m1x6_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i16mf4x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_tum(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i16mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_tum(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i16m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_tum(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i16m1x6_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i32mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_tum(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i32m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_tum(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i32m1x6_tum(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i64m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_tum(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i64m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u8mf8x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_tum(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u8mf8x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u8mf4x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_tum(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u8mf4x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u8mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_tum(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u8mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u8m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_tum(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u8m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u16mf4x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_tum(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u16mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_tum(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u16m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_tum(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u16m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u32mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_tum(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u32m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_tum(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u32m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u64m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_tum(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u64m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_f16mf4x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_tumu(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_f16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_f16mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_tumu(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_f16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_f16m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_tumu(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_f16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_f32mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_tumu(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_f32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_f32m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_tumu(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_f32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_f64m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_tumu(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_f64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i8mf8x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_tumu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i8mf8x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i8mf4x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_tumu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i8mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i8mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_tumu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i8mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i8m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_tumu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i8m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i16mf4x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_tumu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i16mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_tumu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i16m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_tumu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i32mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_tumu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i32m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_tumu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i64m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_tumu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u8mf8x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_tumu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u8mf8x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u8mf4x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_tumu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u8mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u8mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_tumu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u8mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u8m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_tumu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u8m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u16mf4x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_tumu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u16mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_tumu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u16m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_tumu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u32mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_tumu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u32m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_tumu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u64m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_tumu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_f16mf4x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_mu(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_f16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_f16mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_mu(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_f16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_f16m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_mu(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_f16m1x6_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_f32mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_mu(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_f32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_f32m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_mu(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_f32m1x6_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_f64m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_mu(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_f64m1x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i8mf8x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_mu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i8mf8x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i8mf4x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_mu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i8mf4x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i8mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_mu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i8mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i8m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_mu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i8m1x6_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i16mf4x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_mu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i16mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_mu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i16m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_mu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i16m1x6_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i32mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_mu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i32m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_mu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i32m1x6_mu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_i64m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_mu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_i64m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u8mf8x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_mu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u8mf8x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u8mf4x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_mu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u8mf4x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u8mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_mu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u8mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u8m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_mu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u8m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u16mf4x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_mu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u16mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_mu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u16m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_mu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u16m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u32mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_mu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u32m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_mu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u32m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_v_u64m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_mu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_v_u64m1x6_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg6ei8\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vloxseg7ei16.c b/auto-generated/policy_funcs/gnu-api-tests/vloxseg7ei16.c index 555f16454..67ea96ac7 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vloxseg7ei16.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vloxseg7ei16.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_f16mf4x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_tu(vfloat16mf4x7_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_f16mf4x7_tu(vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_f16mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_tu(vfloat16mf2x7_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_f16mf2x7_tu(vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_f16m1x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_tu(vfloat16m1x7_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_f16m1x7_tu(vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_f32mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_tu(vfloat32mf2x7_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_f32mf2x7_tu(vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_f32m1x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_tu(vfloat32m1x7_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_f32m1x7_tu(vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_f64m1x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_tu(vfloat64m1x7_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_f64m1x7_tu(vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i8mf8x7_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_tu(vint8mf8x7_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i8mf8x7_tu(vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i8mf4x7_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_tu(vint8mf4x7_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i8mf4x7_tu(vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i8mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_tu(vint8mf2x7_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i8mf2x7_tu(vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i8m1x7_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_tu(vint8m1x7_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i8m1x7_tu(vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i16mf4x7_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_tu(vint16mf4x7_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i16mf4x7_tu(vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i16mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_tu(vint16mf2x7_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i16mf2x7_tu(vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i16m1x7_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_tu(vint16m1x7_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i16m1x7_tu(vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i32mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_tu(vint32mf2x7_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i32mf2x7_tu(vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i32m1x7_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_tu(vint32m1x7_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i32m1x7_tu(vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i64m1x7_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_tu(vint64m1x7_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i64m1x7_tu(vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u8mf8x7_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_tu(vuint8mf8x7_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u8mf8x7_tu(vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u8mf4x7_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_tu(vuint8mf4x7_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u8mf4x7_tu(vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u8mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_tu(vuint8mf2x7_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u8mf2x7_tu(vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u8m1x7_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_tu(vuint8m1x7_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u8m1x7_tu(vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u16mf4x7_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_tu(vuint16mf4x7_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u16mf4x7_tu(vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u16mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_tu(vuint16mf2x7_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u16mf2x7_tu(vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u16m1x7_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_tu(vuint16m1x7_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u16m1x7_tu(vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u32mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_tu(vuint32mf2x7_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u32mf2x7_tu(vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u32m1x7_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_tu(vuint32m1x7_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u32m1x7_tu(vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u64m1x7_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_tu(vuint64m1x7_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u64m1x7_tu(vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_f16mf4x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_tum(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_f16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_f16mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_tum(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_f16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_f16m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_tum(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_f16m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_f32mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_tum(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_f32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_f32m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_tum(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_f32m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_f64m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_tum(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_f64m1x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i8mf8x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_tum(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i8mf8x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i8mf4x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_tum(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i8mf4x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i8mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_tum(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i8mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i8m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_tum(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i8m1x7_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i16mf4x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_tum(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i16mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_tum(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i16m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_tum(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i16m1x7_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i32mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_tum(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i32m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_tum(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i32m1x7_tum(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i64m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_tum(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i64m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u8mf8x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_tum(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u8mf8x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u8mf4x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_tum(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u8mf4x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u8mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_tum(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u8mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u8m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_tum(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u8m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u16mf4x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_tum(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u16mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_tum(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u16m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_tum(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u16m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u32mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_tum(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u32m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_tum(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u32m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u64m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_tum(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u64m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_f16mf4x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_tumu(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_f16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_f16mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_tumu(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_f16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_f16m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_tumu(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_f16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_f32mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_tumu(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_f32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_f32m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_tumu(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_f32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_f64m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_tumu(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_f64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i8mf8x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_tumu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i8mf8x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i8mf4x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_tumu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i8mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i8mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_tumu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i8mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i8m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_tumu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i8m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i16mf4x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_tumu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i16mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_tumu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i16m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_tumu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i32mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_tumu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i32m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_tumu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i64m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_tumu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u8mf8x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_tumu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u8mf8x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u8mf4x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_tumu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u8mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u8mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_tumu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u8mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u8m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_tumu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u8m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u16mf4x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_tumu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u16mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_tumu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u16m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_tumu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u32mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_tumu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u32m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_tumu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u64m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_tumu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_f16mf4x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_mu(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_f16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_f16mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_mu(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_f16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_f16m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_mu(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_f16m1x7_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_f32mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_mu(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_f32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_f32m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_mu(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_f32m1x7_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_f64m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_mu(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_f64m1x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i8mf8x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_mu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i8mf8x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i8mf4x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_mu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i8mf4x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i8mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_mu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i8mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i8m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_mu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i8m1x7_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i16mf4x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_mu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i16mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_mu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i16m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_mu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i16m1x7_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i32mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_mu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i32m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_mu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i32m1x7_mu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_i64m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_mu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_i64m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u8mf8x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_mu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u8mf8x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u8mf4x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_mu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u8mf4x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u8mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_mu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u8mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u8m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_mu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u8m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u16mf4x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_mu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u16mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_mu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u16m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_mu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u16m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u32mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_mu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u32m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_mu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u32m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_v_u64m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_mu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_u64m1x7_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg7ei16\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vloxseg7ei32.c b/auto-generated/policy_funcs/gnu-api-tests/vloxseg7ei32.c index 38a0366dd..a618555de 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vloxseg7ei32.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vloxseg7ei32.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_f16mf4x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_tu(vfloat16mf4x7_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_f16mf4x7_tu(vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_f16mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_tu(vfloat16mf2x7_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_f16mf2x7_tu(vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_f16m1x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_tu(vfloat16m1x7_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_f16m1x7_tu(vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_f32mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_tu(vfloat32mf2x7_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_f32mf2x7_tu(vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_f32m1x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_tu(vfloat32m1x7_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_f32m1x7_tu(vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_f64m1x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_tu(vfloat64m1x7_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_f64m1x7_tu(vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i8mf8x7_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_tu(vint8mf8x7_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i8mf8x7_tu(vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i8mf4x7_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_tu(vint8mf4x7_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i8mf4x7_tu(vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i8mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_tu(vint8mf2x7_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i8mf2x7_tu(vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i8m1x7_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_tu(vint8m1x7_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i8m1x7_tu(vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i16mf4x7_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_tu(vint16mf4x7_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i16mf4x7_tu(vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i16mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_tu(vint16mf2x7_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i16mf2x7_tu(vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i16m1x7_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_tu(vint16m1x7_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i16m1x7_tu(vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i32mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_tu(vint32mf2x7_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i32mf2x7_tu(vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i32m1x7_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_tu(vint32m1x7_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i32m1x7_tu(vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i64m1x7_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_tu(vint64m1x7_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i64m1x7_tu(vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u8mf8x7_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_tu(vuint8mf8x7_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u8mf8x7_tu(vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u8mf4x7_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_tu(vuint8mf4x7_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u8mf4x7_tu(vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u8mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_tu(vuint8mf2x7_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u8mf2x7_tu(vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u8m1x7_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_tu(vuint8m1x7_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u8m1x7_tu(vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u16mf4x7_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_tu(vuint16mf4x7_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u16mf4x7_tu(vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u16mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_tu(vuint16mf2x7_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u16mf2x7_tu(vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u16m1x7_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_tu(vuint16m1x7_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u16m1x7_tu(vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u32mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_tu(vuint32mf2x7_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u32mf2x7_tu(vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u32m1x7_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_tu(vuint32m1x7_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u32m1x7_tu(vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u64m1x7_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_tu(vuint64m1x7_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u64m1x7_tu(vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_f16mf4x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_tum(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_f16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_f16mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_tum(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_f16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_f16m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_tum(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_f16m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_f32mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_tum(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_f32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_f32m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_tum(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_f32m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_f64m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_tum(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_f64m1x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i8mf8x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_tum(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i8mf8x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i8mf4x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_tum(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i8mf4x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i8mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_tum(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i8mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i8m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_tum(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i8m1x7_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i16mf4x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_tum(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i16mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_tum(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i16m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_tum(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i16m1x7_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i32mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_tum(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i32m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_tum(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i32m1x7_tum(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i64m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_tum(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i64m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u8mf8x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_tum(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u8mf8x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u8mf4x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_tum(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u8mf4x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u8mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_tum(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u8mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u8m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_tum(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u8m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u16mf4x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_tum(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u16mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_tum(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u16m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_tum(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u16m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u32mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_tum(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u32m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_tum(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u32m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u64m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_tum(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u64m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_f16mf4x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_tumu(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_f16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_f16mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_tumu(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_f16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_f16m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_tumu(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_f16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_f32mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_tumu(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_f32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_f32m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_tumu(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_f32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_f64m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_tumu(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_f64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i8mf8x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_tumu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i8mf8x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i8mf4x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_tumu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i8mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i8mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_tumu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i8mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i8m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_tumu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i8m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i16mf4x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_tumu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i16mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_tumu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i16m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_tumu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i32mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_tumu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i32m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_tumu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i64m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_tumu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u8mf8x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_tumu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u8mf8x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u8mf4x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_tumu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u8mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u8mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_tumu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u8mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u8m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_tumu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u8m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u16mf4x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_tumu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u16mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_tumu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u16m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_tumu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u32mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_tumu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u32m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_tumu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u64m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_tumu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_f16mf4x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_mu(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_f16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_f16mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_mu(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_f16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_f16m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_mu(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_f16m1x7_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_f32mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_mu(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_f32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_f32m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_mu(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_f32m1x7_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_f64m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_mu(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_f64m1x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i8mf8x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_mu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i8mf8x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i8mf4x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_mu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i8mf4x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i8mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_mu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i8mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i8m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_mu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i8m1x7_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i16mf4x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_mu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i16mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_mu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i16m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_mu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i16m1x7_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i32mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_mu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i32m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_mu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i32m1x7_mu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_i64m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_mu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_i64m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u8mf8x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_mu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u8mf8x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u8mf4x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_mu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u8mf4x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u8mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_mu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u8mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u8m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_mu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u8m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u16mf4x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_mu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u16mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_mu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u16m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_mu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u16m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u32mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_mu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u32m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_mu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u32m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_v_u64m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_mu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_v_u64m1x7_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg7ei32\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vloxseg7ei64.c b/auto-generated/policy_funcs/gnu-api-tests/vloxseg7ei64.c index d80e4b222..0a27702b5 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vloxseg7ei64.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vloxseg7ei64.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_f16mf4x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_tu(vfloat16mf4x7_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_f16mf4x7_tu(vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_f16mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_tu(vfloat16mf2x7_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_f16mf2x7_tu(vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_f16m1x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_tu(vfloat16m1x7_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_f16m1x7_tu(vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_f32mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_tu(vfloat32mf2x7_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_f32mf2x7_tu(vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_f32m1x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_tu(vfloat32m1x7_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_f32m1x7_tu(vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_f64m1x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_tu(vfloat64m1x7_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_f64m1x7_tu(vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i8mf8x7_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_tu(vint8mf8x7_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i8mf8x7_tu(vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i8mf4x7_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_tu(vint8mf4x7_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i8mf4x7_tu(vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i8mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_tu(vint8mf2x7_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i8mf2x7_tu(vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i8m1x7_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_tu(vint8m1x7_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i8m1x7_tu(vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i16mf4x7_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_tu(vint16mf4x7_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i16mf4x7_tu(vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i16mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_tu(vint16mf2x7_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i16mf2x7_tu(vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i16m1x7_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_tu(vint16m1x7_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i16m1x7_tu(vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i32mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_tu(vint32mf2x7_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i32mf2x7_tu(vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i32m1x7_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_tu(vint32m1x7_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i32m1x7_tu(vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i64m1x7_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_tu(vint64m1x7_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i64m1x7_tu(vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u8mf8x7_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_tu(vuint8mf8x7_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u8mf8x7_tu(vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u8mf4x7_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_tu(vuint8mf4x7_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u8mf4x7_tu(vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u8mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_tu(vuint8mf2x7_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u8mf2x7_tu(vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u8m1x7_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_tu(vuint8m1x7_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u8m1x7_tu(vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u16mf4x7_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_tu(vuint16mf4x7_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u16mf4x7_tu(vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u16mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_tu(vuint16mf2x7_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u16mf2x7_tu(vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u16m1x7_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_tu(vuint16m1x7_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u16m1x7_tu(vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u32mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_tu(vuint32mf2x7_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u32mf2x7_tu(vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u32m1x7_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_tu(vuint32m1x7_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u32m1x7_tu(vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u64m1x7_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_tu(vuint64m1x7_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u64m1x7_tu(vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_f16mf4x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_tum(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_f16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_f16mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_tum(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_f16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_f16m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_tum(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_f16m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_f32mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_tum(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_f32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_f32m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_tum(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_f32m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_f64m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_tum(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_f64m1x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i8mf8x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_tum(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i8mf8x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i8mf4x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_tum(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i8mf4x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i8mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_tum(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i8mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i8m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_tum(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i8m1x7_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i16mf4x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_tum(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i16mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_tum(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i16m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_tum(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i16m1x7_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i32mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_tum(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i32m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_tum(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i32m1x7_tum(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i64m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_tum(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i64m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u8mf8x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_tum(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u8mf8x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u8mf4x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_tum(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u8mf4x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u8mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_tum(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u8mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u8m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_tum(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u8m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u16mf4x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_tum(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u16mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_tum(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u16m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_tum(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u16m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u32mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_tum(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u32m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_tum(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u32m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u64m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_tum(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u64m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_f16mf4x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_tumu(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_f16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_f16mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_tumu(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_f16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_f16m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_tumu(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_f16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_f32mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_tumu(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_f32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_f32m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_tumu(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_f32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_f64m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_tumu(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_f64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i8mf8x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_tumu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i8mf8x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i8mf4x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_tumu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i8mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i8mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_tumu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i8mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i8m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_tumu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i8m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i16mf4x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_tumu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i16mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_tumu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i16m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_tumu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i32mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_tumu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i32m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_tumu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i64m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_tumu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u8mf8x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_tumu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u8mf8x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u8mf4x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_tumu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u8mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u8mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_tumu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u8mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u8m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_tumu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u8m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u16mf4x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_tumu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u16mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_tumu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u16m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_tumu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u32mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_tumu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u32m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_tumu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u64m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_tumu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_f16mf4x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_mu(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_f16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_f16mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_mu(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_f16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_f16m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_mu(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_f16m1x7_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_f32mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_mu(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_f32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_f32m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_mu(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_f32m1x7_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_f64m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_mu(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_f64m1x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i8mf8x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_mu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i8mf8x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i8mf4x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_mu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i8mf4x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i8mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_mu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i8mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i8m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_mu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i8m1x7_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i16mf4x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_mu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i16mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_mu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i16m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_mu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i16m1x7_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i32mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_mu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i32m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_mu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i32m1x7_mu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_i64m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_mu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_i64m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u8mf8x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_mu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u8mf8x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u8mf4x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_mu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u8mf4x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u8mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_mu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u8mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u8m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_mu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u8m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u16mf4x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_mu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u16mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_mu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u16m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_mu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u16m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u32mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_mu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u32m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_mu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u32m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_v_u64m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_mu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_v_u64m1x7_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg7ei64\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vloxseg7ei8.c b/auto-generated/policy_funcs/gnu-api-tests/vloxseg7ei8.c index 379921bcd..6b70591b8 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vloxseg7ei8.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vloxseg7ei8.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_f16mf4x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_tu(vfloat16mf4x7_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_f16mf4x7_tu(vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_f16mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_tu(vfloat16mf2x7_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_f16mf2x7_tu(vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_f16m1x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_tu(vfloat16m1x7_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_f16m1x7_tu(vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_f32mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_tu(vfloat32mf2x7_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_f32mf2x7_tu(vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_f32m1x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_tu(vfloat32m1x7_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_f32m1x7_tu(vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_f64m1x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_tu(vfloat64m1x7_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_f64m1x7_tu(vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i8mf8x7_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_tu(vint8mf8x7_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i8mf8x7_tu(vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i8mf4x7_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_tu(vint8mf4x7_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i8mf4x7_tu(vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i8mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_tu(vint8mf2x7_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i8mf2x7_tu(vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i8m1x7_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_tu(vint8m1x7_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i8m1x7_tu(vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i16mf4x7_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_tu(vint16mf4x7_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i16mf4x7_tu(vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i16mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_tu(vint16mf2x7_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i16mf2x7_tu(vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i16m1x7_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_tu(vint16m1x7_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i16m1x7_tu(vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i32mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_tu(vint32mf2x7_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i32mf2x7_tu(vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i32m1x7_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_tu(vint32m1x7_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i32m1x7_tu(vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i64m1x7_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_tu(vint64m1x7_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i64m1x7_tu(vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u8mf8x7_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_tu(vuint8mf8x7_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u8mf8x7_tu(vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u8mf4x7_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_tu(vuint8mf4x7_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u8mf4x7_tu(vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u8mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_tu(vuint8mf2x7_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u8mf2x7_tu(vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u8m1x7_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_tu(vuint8m1x7_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u8m1x7_tu(vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u16mf4x7_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_tu(vuint16mf4x7_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u16mf4x7_tu(vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u16mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_tu(vuint16mf2x7_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u16mf2x7_tu(vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u16m1x7_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_tu(vuint16m1x7_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u16m1x7_tu(vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u32mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_tu(vuint32mf2x7_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u32mf2x7_tu(vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u32m1x7_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_tu(vuint32m1x7_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u32m1x7_tu(vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u64m1x7_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_tu(vuint64m1x7_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u64m1x7_tu(vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_f16mf4x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_tum(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_f16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_f16mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_tum(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_f16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_f16m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_tum(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_f16m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_f32mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_tum(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_f32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_f32m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_tum(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_f32m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_f64m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_tum(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_f64m1x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i8mf8x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_tum(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i8mf8x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i8mf4x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_tum(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i8mf4x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i8mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_tum(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i8mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i8m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_tum(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i8m1x7_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i16mf4x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_tum(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i16mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_tum(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i16m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_tum(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i16m1x7_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i32mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_tum(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i32m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_tum(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i32m1x7_tum(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i64m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_tum(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i64m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u8mf8x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_tum(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u8mf8x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u8mf4x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_tum(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u8mf4x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u8mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_tum(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u8mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u8m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_tum(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u8m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u16mf4x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_tum(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u16mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_tum(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u16m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_tum(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u16m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u32mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_tum(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u32m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_tum(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u32m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u64m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_tum(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u64m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_f16mf4x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_tumu(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_f16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_f16mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_tumu(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_f16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_f16m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_tumu(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_f16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_f32mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_tumu(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_f32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_f32m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_tumu(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_f32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_f64m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_tumu(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_f64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i8mf8x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_tumu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i8mf8x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i8mf4x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_tumu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i8mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i8mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_tumu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i8mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i8m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_tumu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i8m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i16mf4x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_tumu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i16mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_tumu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i16m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_tumu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i32mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_tumu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i32m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_tumu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i64m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_tumu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u8mf8x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_tumu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u8mf8x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u8mf4x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_tumu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u8mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u8mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_tumu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u8mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u8m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_tumu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u8m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u16mf4x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_tumu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u16mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_tumu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u16m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_tumu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u32mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_tumu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u32m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_tumu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u64m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_tumu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_f16mf4x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_mu(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_f16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_f16mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_mu(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_f16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_f16m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_mu(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_f16m1x7_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_f32mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_mu(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_f32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_f32m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_mu(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_f32m1x7_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_f64m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_mu(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_f64m1x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i8mf8x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_mu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i8mf8x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i8mf4x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_mu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i8mf4x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i8mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_mu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i8mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i8m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_mu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i8m1x7_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i16mf4x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_mu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i16mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_mu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i16m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_mu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i16m1x7_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i32mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_mu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i32m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_mu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i32m1x7_mu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_i64m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_mu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_i64m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u8mf8x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_mu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u8mf8x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u8mf4x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_mu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u8mf4x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u8mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_mu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u8mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u8m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_mu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u8m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u16mf4x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_mu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u16mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_mu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u16m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_mu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u16m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u32mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_mu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u32m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_mu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u32m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_v_u64m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_mu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_v_u64m1x7_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg7ei8\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vloxseg8ei16.c b/auto-generated/policy_funcs/gnu-api-tests/vloxseg8ei16.c index 63f774887..c2bfa6705 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vloxseg8ei16.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vloxseg8ei16.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_f16mf4x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_tu(vfloat16mf4x8_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_f16mf4x8_tu(vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_f16mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_tu(vfloat16mf2x8_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_f16mf2x8_tu(vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_f16m1x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_tu(vfloat16m1x8_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_f16m1x8_tu(vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_f32mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_tu(vfloat32mf2x8_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_f32mf2x8_tu(vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_f32m1x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_tu(vfloat32m1x8_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_f32m1x8_tu(vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_f64m1x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_tu(vfloat64m1x8_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_f64m1x8_tu(vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i8mf8x8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_tu(vint8mf8x8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i8mf8x8_tu(vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i8mf4x8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_tu(vint8mf4x8_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i8mf4x8_tu(vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i8mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_tu(vint8mf2x8_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i8mf2x8_tu(vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i8m1x8_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_tu(vint8m1x8_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i8m1x8_tu(vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i16mf4x8_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_tu(vint16mf4x8_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i16mf4x8_tu(vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i16mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_tu(vint16mf2x8_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i16mf2x8_tu(vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i16m1x8_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_tu(vint16m1x8_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i16m1x8_tu(vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i32mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_tu(vint32mf2x8_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i32mf2x8_tu(vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i32m1x8_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_tu(vint32m1x8_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i32m1x8_tu(vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i64m1x8_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_tu(vint64m1x8_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i64m1x8_tu(vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u8mf8x8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_tu(vuint8mf8x8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u8mf8x8_tu(vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u8mf4x8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_tu(vuint8mf4x8_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u8mf4x8_tu(vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u8mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_tu(vuint8mf2x8_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u8mf2x8_tu(vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u8m1x8_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_tu(vuint8m1x8_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u8m1x8_tu(vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u16mf4x8_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_tu(vuint16mf4x8_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u16mf4x8_tu(vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u16mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_tu(vuint16mf2x8_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u16mf2x8_tu(vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u16m1x8_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_tu(vuint16m1x8_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u16m1x8_tu(vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u32mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_tu(vuint32mf2x8_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u32mf2x8_tu(vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u32m1x8_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_tu(vuint32m1x8_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u32m1x8_tu(vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u64m1x8_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_tu(vuint64m1x8_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u64m1x8_tu(vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_f16mf4x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_tum(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_f16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_f16mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_tum(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_f16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_f16m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_tum(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_f16m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_f32mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_tum(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_f32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_f32m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_tum(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_f32m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_f64m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_tum(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_f64m1x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i8mf8x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_tum(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i8mf8x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i8mf4x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_tum(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i8mf4x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i8mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_tum(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i8mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i8m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_tum(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i8m1x8_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i16mf4x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_tum(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i16mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_tum(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i16m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_tum(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i16m1x8_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i32mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_tum(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i32m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_tum(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i32m1x8_tum(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i64m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_tum(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i64m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u8mf8x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_tum(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u8mf8x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u8mf4x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_tum(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u8mf4x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u8mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_tum(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u8mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u8m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_tum(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u8m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u16mf4x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_tum(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u16mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_tum(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u16m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_tum(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u16m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u32mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_tum(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u32m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_tum(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u32m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u64m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_tum(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u64m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_f16mf4x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_tumu(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_f16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_f16mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_tumu(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_f16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_f16m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_tumu(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_f16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_f32mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_tumu(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_f32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_f32m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_tumu(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_f32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_f64m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_tumu(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_f64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i8mf8x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_tumu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i8mf8x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i8mf4x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_tumu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i8mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i8mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_tumu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i8mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i8m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_tumu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i8m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i16mf4x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_tumu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i16mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_tumu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i16m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_tumu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i32mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_tumu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i32m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_tumu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i64m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_tumu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u8mf8x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_tumu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u8mf8x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u8mf4x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_tumu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u8mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u8mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_tumu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u8mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u8m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_tumu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u8m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u16mf4x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_tumu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u16mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_tumu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u16m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_tumu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u32mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_tumu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u32m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_tumu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u64m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_tumu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_f16mf4x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_mu(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_f16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_f16mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_mu(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_f16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_f16m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_mu(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_f16m1x8_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_f32mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_mu(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_f32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_f32m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_mu(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_f32m1x8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_f64m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_mu(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_f64m1x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i8mf8x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_mu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i8mf8x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i8mf4x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_mu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i8mf4x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i8mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_mu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i8mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i8m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_mu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i8m1x8_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i16mf4x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_mu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i16mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_mu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i16m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_mu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i16m1x8_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i32mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_mu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i32m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_mu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i32m1x8_mu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_i64m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_mu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_i64m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u8mf8x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_mu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u8mf8x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u8mf4x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_mu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u8mf4x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u8mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_mu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u8mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u8m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_mu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u8m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u16mf4x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_mu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u16mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_mu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u16m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_mu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u16m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u32mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_mu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u32m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_mu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u32m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_v_u64m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_mu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_u64m1x8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg8ei16\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vloxseg8ei32.c b/auto-generated/policy_funcs/gnu-api-tests/vloxseg8ei32.c index 8790d8f9b..8d85ee108 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vloxseg8ei32.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vloxseg8ei32.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_f16mf4x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_tu(vfloat16mf4x8_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_f16mf4x8_tu(vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_f16mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_tu(vfloat16mf2x8_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_f16mf2x8_tu(vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_f16m1x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_tu(vfloat16m1x8_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_f16m1x8_tu(vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_f32mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_tu(vfloat32mf2x8_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_f32mf2x8_tu(vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_f32m1x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_tu(vfloat32m1x8_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_f32m1x8_tu(vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_f64m1x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_tu(vfloat64m1x8_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_f64m1x8_tu(vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i8mf8x8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_tu(vint8mf8x8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i8mf8x8_tu(vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i8mf4x8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_tu(vint8mf4x8_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i8mf4x8_tu(vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i8mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_tu(vint8mf2x8_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i8mf2x8_tu(vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i8m1x8_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_tu(vint8m1x8_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i8m1x8_tu(vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i16mf4x8_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_tu(vint16mf4x8_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i16mf4x8_tu(vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i16mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_tu(vint16mf2x8_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i16mf2x8_tu(vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i16m1x8_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_tu(vint16m1x8_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i16m1x8_tu(vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i32mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_tu(vint32mf2x8_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i32mf2x8_tu(vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i32m1x8_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_tu(vint32m1x8_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i32m1x8_tu(vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i64m1x8_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_tu(vint64m1x8_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i64m1x8_tu(vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u8mf8x8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_tu(vuint8mf8x8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u8mf8x8_tu(vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u8mf4x8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_tu(vuint8mf4x8_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u8mf4x8_tu(vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u8mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_tu(vuint8mf2x8_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u8mf2x8_tu(vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u8m1x8_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_tu(vuint8m1x8_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u8m1x8_tu(vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u16mf4x8_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_tu(vuint16mf4x8_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u16mf4x8_tu(vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u16mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_tu(vuint16mf2x8_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u16mf2x8_tu(vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u16m1x8_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_tu(vuint16m1x8_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u16m1x8_tu(vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u32mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_tu(vuint32mf2x8_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u32mf2x8_tu(vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u32m1x8_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_tu(vuint32m1x8_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u32m1x8_tu(vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u64m1x8_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_tu(vuint64m1x8_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u64m1x8_tu(vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_f16mf4x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_tum(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_f16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_f16mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_tum(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_f16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_f16m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_tum(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_f16m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_f32mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_tum(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_f32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_f32m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_tum(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_f32m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_f64m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_tum(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_f64m1x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i8mf8x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_tum(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i8mf8x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i8mf4x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_tum(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i8mf4x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i8mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_tum(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i8mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i8m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_tum(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i8m1x8_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i16mf4x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_tum(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i16mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_tum(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i16m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_tum(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i16m1x8_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i32mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_tum(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i32m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_tum(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i32m1x8_tum(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i64m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_tum(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i64m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u8mf8x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_tum(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u8mf8x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u8mf4x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_tum(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u8mf4x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u8mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_tum(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u8mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u8m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_tum(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u8m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u16mf4x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_tum(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u16mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_tum(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u16m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_tum(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u16m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u32mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_tum(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u32m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_tum(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u32m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u64m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_tum(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u64m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_f16mf4x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_tumu(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_f16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_f16mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_tumu(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_f16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_f16m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_tumu(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_f16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_f32mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_tumu(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_f32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_f32m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_tumu(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_f32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_f64m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_tumu(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_f64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i8mf8x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_tumu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i8mf8x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i8mf4x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_tumu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i8mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i8mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_tumu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i8mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i8m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_tumu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i8m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i16mf4x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_tumu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i16mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_tumu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i16m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_tumu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i32mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_tumu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i32m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_tumu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i64m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_tumu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u8mf8x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_tumu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u8mf8x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u8mf4x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_tumu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u8mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u8mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_tumu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u8mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u8m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_tumu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u8m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u16mf4x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_tumu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u16mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_tumu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u16m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_tumu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u32mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_tumu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u32m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_tumu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u64m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_tumu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_f16mf4x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_mu(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_f16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_f16mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_mu(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_f16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_f16m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_mu(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_f16m1x8_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_f32mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_mu(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_f32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_f32m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_mu(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_f32m1x8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_f64m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_mu(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_f64m1x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i8mf8x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_mu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i8mf8x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i8mf4x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_mu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i8mf4x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i8mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_mu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i8mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i8m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_mu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i8m1x8_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i16mf4x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_mu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i16mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_mu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i16m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_mu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i16m1x8_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i32mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_mu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i32m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_mu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i32m1x8_mu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_i64m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_mu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_i64m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u8mf8x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_mu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u8mf8x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u8mf4x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_mu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u8mf4x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u8mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_mu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u8mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u8m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_mu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u8m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u16mf4x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_mu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u16mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_mu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u16m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_mu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u16m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u32mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_mu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u32m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_mu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u32m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_v_u64m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_mu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_v_u64m1x8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg8ei32\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vloxseg8ei64.c b/auto-generated/policy_funcs/gnu-api-tests/vloxseg8ei64.c index aea8ee071..4503aa908 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vloxseg8ei64.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vloxseg8ei64.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_f16mf4x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_tu(vfloat16mf4x8_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_f16mf4x8_tu(vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_f16mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_tu(vfloat16mf2x8_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_f16mf2x8_tu(vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_f16m1x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_tu(vfloat16m1x8_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_f16m1x8_tu(vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_f32mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_tu(vfloat32mf2x8_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_f32mf2x8_tu(vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_f32m1x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_tu(vfloat32m1x8_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_f32m1x8_tu(vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_f64m1x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_tu(vfloat64m1x8_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_f64m1x8_tu(vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i8mf8x8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_tu(vint8mf8x8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i8mf8x8_tu(vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i8mf4x8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_tu(vint8mf4x8_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i8mf4x8_tu(vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i8mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_tu(vint8mf2x8_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i8mf2x8_tu(vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i8m1x8_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_tu(vint8m1x8_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i8m1x8_tu(vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i16mf4x8_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_tu(vint16mf4x8_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i16mf4x8_tu(vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i16mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_tu(vint16mf2x8_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i16mf2x8_tu(vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i16m1x8_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_tu(vint16m1x8_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i16m1x8_tu(vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i32mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_tu(vint32mf2x8_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i32mf2x8_tu(vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i32m1x8_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_tu(vint32m1x8_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i32m1x8_tu(vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i64m1x8_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_tu(vint64m1x8_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i64m1x8_tu(vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u8mf8x8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_tu(vuint8mf8x8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u8mf8x8_tu(vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u8mf4x8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_tu(vuint8mf4x8_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u8mf4x8_tu(vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u8mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_tu(vuint8mf2x8_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u8mf2x8_tu(vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u8m1x8_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_tu(vuint8m1x8_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u8m1x8_tu(vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u16mf4x8_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_tu(vuint16mf4x8_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u16mf4x8_tu(vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u16mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_tu(vuint16mf2x8_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u16mf2x8_tu(vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u16m1x8_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_tu(vuint16m1x8_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u16m1x8_tu(vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u32mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_tu(vuint32mf2x8_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u32mf2x8_tu(vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u32m1x8_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_tu(vuint32m1x8_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u32m1x8_tu(vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u64m1x8_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_tu(vuint64m1x8_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u64m1x8_tu(vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_f16mf4x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_tum(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_f16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_f16mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_tum(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_f16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_f16m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_tum(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_f16m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_f32mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_tum(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_f32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_f32m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_tum(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_f32m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_f64m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_tum(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_f64m1x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i8mf8x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_tum(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i8mf8x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i8mf4x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_tum(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i8mf4x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i8mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_tum(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i8mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i8m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_tum(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i8m1x8_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i16mf4x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_tum(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i16mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_tum(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i16m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_tum(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i16m1x8_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i32mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_tum(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i32m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_tum(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i32m1x8_tum(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i64m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_tum(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i64m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u8mf8x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_tum(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u8mf8x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u8mf4x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_tum(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u8mf4x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u8mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_tum(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u8mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u8m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_tum(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u8m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u16mf4x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_tum(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u16mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_tum(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u16m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_tum(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u16m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u32mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_tum(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u32m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_tum(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u32m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u64m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_tum(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u64m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_f16mf4x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_tumu(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_f16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_f16mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_tumu(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_f16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_f16m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_tumu(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_f16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_f32mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_tumu(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_f32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_f32m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_tumu(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_f32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_f64m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_tumu(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_f64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i8mf8x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_tumu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i8mf8x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i8mf4x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_tumu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i8mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i8mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_tumu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i8mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i8m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_tumu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i8m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i16mf4x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_tumu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i16mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_tumu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i16m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_tumu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i32mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_tumu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i32m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_tumu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i64m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_tumu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u8mf8x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_tumu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u8mf8x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u8mf4x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_tumu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u8mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u8mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_tumu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u8mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u8m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_tumu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u8m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u16mf4x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_tumu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u16mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_tumu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u16m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_tumu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u32mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_tumu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u32m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_tumu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u64m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_tumu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_f16mf4x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_mu(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_f16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_f16mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_mu(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_f16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_f16m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_mu(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_f16m1x8_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_f32mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_mu(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_f32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_f32m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_mu(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_f32m1x8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_f64m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_mu(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_f64m1x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i8mf8x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_mu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i8mf8x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i8mf4x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_mu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i8mf4x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i8mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_mu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i8mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i8m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_mu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i8m1x8_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i16mf4x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_mu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i16mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_mu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i16m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_mu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i16m1x8_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i32mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_mu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i32m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_mu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i32m1x8_mu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_i64m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_mu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_i64m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u8mf8x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_mu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u8mf8x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u8mf4x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_mu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u8mf4x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u8mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_mu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u8mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u8m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_mu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u8m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u16mf4x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_mu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u16mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_mu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u16m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_mu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u16m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u32mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_mu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u32m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_mu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u32m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_v_u64m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_mu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_v_u64m1x8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg8ei64\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vloxseg8ei8.c b/auto-generated/policy_funcs/gnu-api-tests/vloxseg8ei8.c index 36c9794f2..8d432867f 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vloxseg8ei8.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vloxseg8ei8.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_f16mf4x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_tu(vfloat16mf4x8_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_f16mf4x8_tu(vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_f16mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_tu(vfloat16mf2x8_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_f16mf2x8_tu(vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_f16m1x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_tu(vfloat16m1x8_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_f16m1x8_tu(vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_f32mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_tu(vfloat32mf2x8_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_f32mf2x8_tu(vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_f32m1x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_tu(vfloat32m1x8_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_f32m1x8_tu(vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_f64m1x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_tu(vfloat64m1x8_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_f64m1x8_tu(vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i8mf8x8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_tu(vint8mf8x8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i8mf8x8_tu(vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i8mf4x8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_tu(vint8mf4x8_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i8mf4x8_tu(vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i8mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_tu(vint8mf2x8_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i8mf2x8_tu(vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i8m1x8_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_tu(vint8m1x8_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i8m1x8_tu(vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i16mf4x8_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_tu(vint16mf4x8_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i16mf4x8_tu(vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i16mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_tu(vint16mf2x8_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i16mf2x8_tu(vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i16m1x8_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_tu(vint16m1x8_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i16m1x8_tu(vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i32mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_tu(vint32mf2x8_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i32mf2x8_tu(vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i32m1x8_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_tu(vint32m1x8_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i32m1x8_tu(vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i64m1x8_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_tu(vint64m1x8_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i64m1x8_tu(vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u8mf8x8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_tu(vuint8mf8x8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u8mf8x8_tu(vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u8mf4x8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_tu(vuint8mf4x8_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u8mf4x8_tu(vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u8mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_tu(vuint8mf2x8_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u8mf2x8_tu(vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u8m1x8_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_tu(vuint8m1x8_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u8m1x8_tu(vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u16mf4x8_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_tu(vuint16mf4x8_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u16mf4x8_tu(vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u16mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_tu(vuint16mf2x8_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u16mf2x8_tu(vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u16m1x8_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_tu(vuint16m1x8_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u16m1x8_tu(vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u32mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_tu(vuint32mf2x8_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u32mf2x8_tu(vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u32m1x8_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_tu(vuint32m1x8_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u32m1x8_tu(vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u64m1x8_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_tu(vuint64m1x8_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u64m1x8_tu(vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_f16mf4x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_tum(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_f16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_f16mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_tum(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_f16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_f16m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_tum(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_f16m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_f32mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_tum(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_f32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_f32m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_tum(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_f32m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_f64m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_tum(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_f64m1x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i8mf8x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_tum(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i8mf8x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i8mf4x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_tum(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i8mf4x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i8mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_tum(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i8mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i8m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_tum(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i8m1x8_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i16mf4x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_tum(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i16mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_tum(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i16m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_tum(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i16m1x8_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i32mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_tum(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i32m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_tum(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i32m1x8_tum(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i64m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_tum(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i64m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u8mf8x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_tum(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u8mf8x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u8mf4x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_tum(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u8mf4x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u8mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_tum(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u8mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u8m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_tum(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u8m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u16mf4x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_tum(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u16mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_tum(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u16m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_tum(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u16m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u32mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_tum(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u32m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_tum(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u32m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u64m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_tum(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u64m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_f16mf4x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_tumu(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_f16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_f16mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_tumu(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_f16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_f16m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_tumu(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_f16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_f32mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_tumu(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_f32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_f32m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_tumu(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_f32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_f64m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_tumu(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_f64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i8mf8x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_tumu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i8mf8x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i8mf4x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_tumu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i8mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i8mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_tumu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i8mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i8m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_tumu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i8m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i16mf4x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_tumu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i16mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_tumu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i16m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_tumu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i32mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_tumu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i32m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_tumu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i64m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_tumu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u8mf8x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_tumu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u8mf8x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u8mf4x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_tumu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u8mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u8mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_tumu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u8mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u8m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_tumu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u8m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u16mf4x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_tumu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u16mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_tumu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u16m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_tumu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u32mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_tumu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u32m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_tumu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u64m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_tumu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_f16mf4x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_mu(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_f16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_f16mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_mu(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_f16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_f16m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_mu(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_f16m1x8_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_f32mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_mu(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_f32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_f32m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_mu(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_f32m1x8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_f64m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_mu(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_f64m1x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i8mf8x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_mu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i8mf8x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i8mf4x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_mu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i8mf4x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i8mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_mu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i8mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i8m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_mu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i8m1x8_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i16mf4x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_mu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i16mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_mu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i16m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_mu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i16m1x8_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i32mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_mu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i32m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_mu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i32m1x8_mu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_i64m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_mu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_i64m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u8mf8x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_mu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u8mf8x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u8mf4x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_mu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u8mf4x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u8mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_mu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u8mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u8m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_mu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u8m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u16mf4x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_mu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u16mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_mu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u16m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_mu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u16m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u32mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_mu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u32m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_mu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u32m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_v_u64m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_mu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_v_u64m1x8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg8ei8\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlse16.c b/auto-generated/policy_funcs/gnu-api-tests/vlse16.c index b48e55424..7780da68e 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlse16.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlse16.c @@ -6,292 +6,292 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vlse16_v_f16mf4_tu(vfloat16mf4_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_f16mf4_tu(maskedoff, base, bstride, vl); +vfloat16mf4_t test_vlse16_v_f16mf4_tu(vfloat16mf4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_f16mf4_tu(vd, rs1, rs2, vl); } -vfloat16mf2_t test_vlse16_v_f16mf2_tu(vfloat16mf2_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_f16mf2_tu(maskedoff, base, bstride, vl); +vfloat16mf2_t test_vlse16_v_f16mf2_tu(vfloat16mf2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_f16mf2_tu(vd, rs1, rs2, vl); } -vfloat16m1_t test_vlse16_v_f16m1_tu(vfloat16m1_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_f16m1_tu(maskedoff, base, bstride, vl); +vfloat16m1_t test_vlse16_v_f16m1_tu(vfloat16m1_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_f16m1_tu(vd, rs1, rs2, vl); } -vfloat16m2_t test_vlse16_v_f16m2_tu(vfloat16m2_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_f16m2_tu(maskedoff, base, bstride, vl); +vfloat16m2_t test_vlse16_v_f16m2_tu(vfloat16m2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_f16m2_tu(vd, rs1, rs2, vl); } -vfloat16m4_t test_vlse16_v_f16m4_tu(vfloat16m4_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_f16m4_tu(maskedoff, base, bstride, vl); +vfloat16m4_t test_vlse16_v_f16m4_tu(vfloat16m4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_f16m4_tu(vd, rs1, rs2, vl); } -vfloat16m8_t test_vlse16_v_f16m8_tu(vfloat16m8_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_f16m8_tu(maskedoff, base, bstride, vl); +vfloat16m8_t test_vlse16_v_f16m8_tu(vfloat16m8_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_f16m8_tu(vd, rs1, rs2, vl); } -vint16mf4_t test_vlse16_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_i16mf4_tu(maskedoff, base, bstride, vl); +vint16mf4_t test_vlse16_v_i16mf4_tu(vint16mf4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_i16mf4_tu(vd, rs1, rs2, vl); } -vint16mf2_t test_vlse16_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_i16mf2_tu(maskedoff, base, bstride, vl); +vint16mf2_t test_vlse16_v_i16mf2_tu(vint16mf2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_i16mf2_tu(vd, rs1, rs2, vl); } -vint16m1_t test_vlse16_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_i16m1_tu(maskedoff, base, bstride, vl); +vint16m1_t test_vlse16_v_i16m1_tu(vint16m1_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_i16m1_tu(vd, rs1, rs2, vl); } -vint16m2_t test_vlse16_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_i16m2_tu(maskedoff, base, bstride, vl); +vint16m2_t test_vlse16_v_i16m2_tu(vint16m2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_i16m2_tu(vd, rs1, rs2, vl); } -vint16m4_t test_vlse16_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_i16m4_tu(maskedoff, base, bstride, vl); +vint16m4_t test_vlse16_v_i16m4_tu(vint16m4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_i16m4_tu(vd, rs1, rs2, vl); } -vint16m8_t test_vlse16_v_i16m8_tu(vint16m8_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_i16m8_tu(maskedoff, base, bstride, vl); +vint16m8_t test_vlse16_v_i16m8_tu(vint16m8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_i16m8_tu(vd, rs1, rs2, vl); } -vuint16mf4_t test_vlse16_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_u16mf4_tu(maskedoff, base, bstride, vl); +vuint16mf4_t test_vlse16_v_u16mf4_tu(vuint16mf4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_u16mf4_tu(vd, rs1, rs2, vl); } -vuint16mf2_t test_vlse16_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_u16mf2_tu(maskedoff, base, bstride, vl); +vuint16mf2_t test_vlse16_v_u16mf2_tu(vuint16mf2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_u16mf2_tu(vd, rs1, rs2, vl); } -vuint16m1_t test_vlse16_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_u16m1_tu(maskedoff, base, bstride, vl); +vuint16m1_t test_vlse16_v_u16m1_tu(vuint16m1_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_u16m1_tu(vd, rs1, rs2, vl); } -vuint16m2_t test_vlse16_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_u16m2_tu(maskedoff, base, bstride, vl); +vuint16m2_t test_vlse16_v_u16m2_tu(vuint16m2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_u16m2_tu(vd, rs1, rs2, vl); } -vuint16m4_t test_vlse16_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_u16m4_tu(maskedoff, base, bstride, vl); +vuint16m4_t test_vlse16_v_u16m4_tu(vuint16m4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_u16m4_tu(vd, rs1, rs2, vl); } -vuint16m8_t test_vlse16_v_u16m8_tu(vuint16m8_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_u16m8_tu(maskedoff, base, bstride, vl); +vuint16m8_t test_vlse16_v_u16m8_tu(vuint16m8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_u16m8_tu(vd, rs1, rs2, vl); } -vfloat16mf4_t test_vlse16_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_f16mf4_tum(mask, maskedoff, base, bstride, vl); +vfloat16mf4_t test_vlse16_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_f16mf4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vlse16_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_f16mf2_tum(mask, maskedoff, base, bstride, vl); +vfloat16mf2_t test_vlse16_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_f16mf2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vlse16_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_f16m1_tum(mask, maskedoff, base, bstride, vl); +vfloat16m1_t test_vlse16_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_f16m1_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vlse16_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_f16m2_tum(mask, maskedoff, base, bstride, vl); +vfloat16m2_t test_vlse16_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_f16m2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vlse16_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_f16m4_tum(mask, maskedoff, base, bstride, vl); +vfloat16m4_t test_vlse16_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_f16m4_tum(vm, vd, rs1, rs2, vl); } -vfloat16m8_t test_vlse16_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_f16m8_tum(mask, maskedoff, base, bstride, vl); +vfloat16m8_t test_vlse16_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_f16m8_tum(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vlse16_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_i16mf4_tum(mask, maskedoff, base, bstride, vl); +vint16mf4_t test_vlse16_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_i16mf4_tum(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vlse16_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_i16mf2_tum(mask, maskedoff, base, bstride, vl); +vint16mf2_t test_vlse16_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_i16mf2_tum(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vlse16_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_i16m1_tum(mask, maskedoff, base, bstride, vl); +vint16m1_t test_vlse16_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_i16m1_tum(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vlse16_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_i16m2_tum(mask, maskedoff, base, bstride, vl); +vint16m2_t test_vlse16_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_i16m2_tum(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vlse16_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_i16m4_tum(mask, maskedoff, base, bstride, vl); +vint16m4_t test_vlse16_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_i16m4_tum(vm, vd, rs1, rs2, vl); } -vint16m8_t test_vlse16_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_i16m8_tum(mask, maskedoff, base, bstride, vl); +vint16m8_t test_vlse16_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_i16m8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vlse16_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_u16mf4_tum(mask, maskedoff, base, bstride, vl); +vuint16mf4_t test_vlse16_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_u16mf4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vlse16_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_u16mf2_tum(mask, maskedoff, base, bstride, vl); +vuint16mf2_t test_vlse16_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_u16mf2_tum(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vlse16_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_u16m1_tum(mask, maskedoff, base, bstride, vl); +vuint16m1_t test_vlse16_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_u16m1_tum(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vlse16_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_u16m2_tum(mask, maskedoff, base, bstride, vl); +vuint16m2_t test_vlse16_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_u16m2_tum(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vlse16_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_u16m4_tum(mask, maskedoff, base, bstride, vl); +vuint16m4_t test_vlse16_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_u16m4_tum(vm, vd, rs1, rs2, vl); } -vuint16m8_t test_vlse16_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_u16m8_tum(mask, maskedoff, base, bstride, vl); +vuint16m8_t test_vlse16_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_u16m8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vlse16_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_f16mf4_tumu(mask, maskedoff, base, bstride, vl); +vfloat16mf4_t test_vlse16_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_f16mf4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vlse16_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_f16mf2_tumu(mask, maskedoff, base, bstride, vl); +vfloat16mf2_t test_vlse16_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_f16mf2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vlse16_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_f16m1_tumu(mask, maskedoff, base, bstride, vl); +vfloat16m1_t test_vlse16_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_f16m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vlse16_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_f16m2_tumu(mask, maskedoff, base, bstride, vl); +vfloat16m2_t test_vlse16_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_f16m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vlse16_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_f16m4_tumu(mask, maskedoff, base, bstride, vl); +vfloat16m4_t test_vlse16_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_f16m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m8_t test_vlse16_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_f16m8_tumu(mask, maskedoff, base, bstride, vl); +vfloat16m8_t test_vlse16_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_f16m8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vlse16_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_i16mf4_tumu(mask, maskedoff, base, bstride, vl); +vint16mf4_t test_vlse16_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_i16mf4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vlse16_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_i16mf2_tumu(mask, maskedoff, base, bstride, vl); +vint16mf2_t test_vlse16_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_i16mf2_tumu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vlse16_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_i16m1_tumu(mask, maskedoff, base, bstride, vl); +vint16m1_t test_vlse16_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_i16m1_tumu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vlse16_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_i16m2_tumu(mask, maskedoff, base, bstride, vl); +vint16m2_t test_vlse16_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_i16m2_tumu(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vlse16_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_i16m4_tumu(mask, maskedoff, base, bstride, vl); +vint16m4_t test_vlse16_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_i16m4_tumu(vm, vd, rs1, rs2, vl); } -vint16m8_t test_vlse16_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_i16m8_tumu(mask, maskedoff, base, bstride, vl); +vint16m8_t test_vlse16_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_i16m8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vlse16_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_u16mf4_tumu(mask, maskedoff, base, bstride, vl); +vuint16mf4_t test_vlse16_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_u16mf4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vlse16_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_u16mf2_tumu(mask, maskedoff, base, bstride, vl); +vuint16mf2_t test_vlse16_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_u16mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vlse16_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_u16m1_tumu(mask, maskedoff, base, bstride, vl); +vuint16m1_t test_vlse16_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_u16m1_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vlse16_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_u16m2_tumu(mask, maskedoff, base, bstride, vl); +vuint16m2_t test_vlse16_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_u16m2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vlse16_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_u16m4_tumu(mask, maskedoff, base, bstride, vl); +vuint16m4_t test_vlse16_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_u16m4_tumu(vm, vd, rs1, rs2, vl); } -vuint16m8_t test_vlse16_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_u16m8_tumu(mask, maskedoff, base, bstride, vl); +vuint16m8_t test_vlse16_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_u16m8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vlse16_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_f16mf4_mu(mask, maskedoff, base, bstride, vl); +vfloat16mf4_t test_vlse16_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_f16mf4_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vlse16_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_f16mf2_mu(mask, maskedoff, base, bstride, vl); +vfloat16mf2_t test_vlse16_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_f16mf2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vlse16_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_f16m1_mu(mask, maskedoff, base, bstride, vl); +vfloat16m1_t test_vlse16_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_f16m1_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vlse16_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_f16m2_mu(mask, maskedoff, base, bstride, vl); +vfloat16m2_t test_vlse16_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_f16m2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vlse16_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_f16m4_mu(mask, maskedoff, base, bstride, vl); +vfloat16m4_t test_vlse16_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_f16m4_mu(vm, vd, rs1, rs2, vl); } -vfloat16m8_t test_vlse16_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_f16m8_mu(mask, maskedoff, base, bstride, vl); +vfloat16m8_t test_vlse16_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_f16m8_mu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vlse16_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_i16mf4_mu(mask, maskedoff, base, bstride, vl); +vint16mf4_t test_vlse16_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_i16mf4_mu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vlse16_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_i16mf2_mu(mask, maskedoff, base, bstride, vl); +vint16mf2_t test_vlse16_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_i16mf2_mu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vlse16_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_i16m1_mu(mask, maskedoff, base, bstride, vl); +vint16m1_t test_vlse16_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_i16m1_mu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vlse16_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_i16m2_mu(mask, maskedoff, base, bstride, vl); +vint16m2_t test_vlse16_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_i16m2_mu(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vlse16_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_i16m4_mu(mask, maskedoff, base, bstride, vl); +vint16m4_t test_vlse16_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_i16m4_mu(vm, vd, rs1, rs2, vl); } -vint16m8_t test_vlse16_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_i16m8_mu(mask, maskedoff, base, bstride, vl); +vint16m8_t test_vlse16_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_i16m8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vlse16_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_u16mf4_mu(mask, maskedoff, base, bstride, vl); +vuint16mf4_t test_vlse16_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_u16mf4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vlse16_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_u16mf2_mu(mask, maskedoff, base, bstride, vl); +vuint16mf2_t test_vlse16_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_u16mf2_mu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vlse16_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_u16m1_mu(mask, maskedoff, base, bstride, vl); +vuint16m1_t test_vlse16_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_u16m1_mu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vlse16_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_u16m2_mu(mask, maskedoff, base, bstride, vl); +vuint16m2_t test_vlse16_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_u16m2_mu(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vlse16_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_u16m4_mu(mask, maskedoff, base, bstride, vl); +vuint16m4_t test_vlse16_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_u16m4_mu(vm, vd, rs1, rs2, vl); } -vuint16m8_t test_vlse16_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_v_u16m8_mu(mask, maskedoff, base, bstride, vl); +vuint16m8_t test_vlse16_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_u16m8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlse16\.[ivxfswum.]+\s+} 72 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlse32.c b/auto-generated/policy_funcs/gnu-api-tests/vlse32.c index b146817c2..92bd88a92 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlse32.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlse32.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2_t test_vlse32_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_f32mf2_tu(maskedoff, base, bstride, vl); +vfloat32mf2_t test_vlse32_v_f32mf2_tu(vfloat32mf2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_f32mf2_tu(vd, rs1, rs2, vl); } -vfloat32m1_t test_vlse32_v_f32m1_tu(vfloat32m1_t maskedoff, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_f32m1_tu(maskedoff, base, bstride, vl); +vfloat32m1_t test_vlse32_v_f32m1_tu(vfloat32m1_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_f32m1_tu(vd, rs1, rs2, vl); } -vfloat32m2_t test_vlse32_v_f32m2_tu(vfloat32m2_t maskedoff, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_f32m2_tu(maskedoff, base, bstride, vl); +vfloat32m2_t test_vlse32_v_f32m2_tu(vfloat32m2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_f32m2_tu(vd, rs1, rs2, vl); } -vfloat32m4_t test_vlse32_v_f32m4_tu(vfloat32m4_t maskedoff, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_f32m4_tu(maskedoff, base, bstride, vl); +vfloat32m4_t test_vlse32_v_f32m4_tu(vfloat32m4_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_f32m4_tu(vd, rs1, rs2, vl); } -vfloat32m8_t test_vlse32_v_f32m8_tu(vfloat32m8_t maskedoff, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_f32m8_tu(maskedoff, base, bstride, vl); +vfloat32m8_t test_vlse32_v_f32m8_tu(vfloat32m8_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_f32m8_tu(vd, rs1, rs2, vl); } -vint32mf2_t test_vlse32_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_i32mf2_tu(maskedoff, base, bstride, vl); +vint32mf2_t test_vlse32_v_i32mf2_tu(vint32mf2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_i32mf2_tu(vd, rs1, rs2, vl); } -vint32m1_t test_vlse32_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_i32m1_tu(maskedoff, base, bstride, vl); +vint32m1_t test_vlse32_v_i32m1_tu(vint32m1_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_i32m1_tu(vd, rs1, rs2, vl); } -vint32m2_t test_vlse32_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_i32m2_tu(maskedoff, base, bstride, vl); +vint32m2_t test_vlse32_v_i32m2_tu(vint32m2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_i32m2_tu(vd, rs1, rs2, vl); } -vint32m4_t test_vlse32_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_i32m4_tu(maskedoff, base, bstride, vl); +vint32m4_t test_vlse32_v_i32m4_tu(vint32m4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_i32m4_tu(vd, rs1, rs2, vl); } -vint32m8_t test_vlse32_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_i32m8_tu(maskedoff, base, bstride, vl); +vint32m8_t test_vlse32_v_i32m8_tu(vint32m8_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_i32m8_tu(vd, rs1, rs2, vl); } -vuint32mf2_t test_vlse32_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_u32mf2_tu(maskedoff, base, bstride, vl); +vuint32mf2_t test_vlse32_v_u32mf2_tu(vuint32mf2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_u32mf2_tu(vd, rs1, rs2, vl); } -vuint32m1_t test_vlse32_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_u32m1_tu(maskedoff, base, bstride, vl); +vuint32m1_t test_vlse32_v_u32m1_tu(vuint32m1_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_u32m1_tu(vd, rs1, rs2, vl); } -vuint32m2_t test_vlse32_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_u32m2_tu(maskedoff, base, bstride, vl); +vuint32m2_t test_vlse32_v_u32m2_tu(vuint32m2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_u32m2_tu(vd, rs1, rs2, vl); } -vuint32m4_t test_vlse32_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_u32m4_tu(maskedoff, base, bstride, vl); +vuint32m4_t test_vlse32_v_u32m4_tu(vuint32m4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_u32m4_tu(vd, rs1, rs2, vl); } -vuint32m8_t test_vlse32_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_u32m8_tu(maskedoff, base, bstride, vl); +vuint32m8_t test_vlse32_v_u32m8_tu(vuint32m8_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_u32m8_tu(vd, rs1, rs2, vl); } -vfloat32mf2_t test_vlse32_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_f32mf2_tum(mask, maskedoff, base, bstride, vl); +vfloat32mf2_t test_vlse32_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_f32mf2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vlse32_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_f32m1_tum(mask, maskedoff, base, bstride, vl); +vfloat32m1_t test_vlse32_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_f32m1_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vlse32_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_f32m2_tum(mask, maskedoff, base, bstride, vl); +vfloat32m2_t test_vlse32_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_f32m2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vlse32_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_f32m4_tum(mask, maskedoff, base, bstride, vl); +vfloat32m4_t test_vlse32_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_f32m4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vlse32_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_f32m8_tum(mask, maskedoff, base, bstride, vl); +vfloat32m8_t test_vlse32_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_f32m8_tum(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vlse32_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_i32mf2_tum(mask, maskedoff, base, bstride, vl); +vint32mf2_t test_vlse32_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_i32mf2_tum(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vlse32_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_i32m1_tum(mask, maskedoff, base, bstride, vl); +vint32m1_t test_vlse32_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_i32m1_tum(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vlse32_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_i32m2_tum(mask, maskedoff, base, bstride, vl); +vint32m2_t test_vlse32_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_i32m2_tum(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vlse32_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_i32m4_tum(mask, maskedoff, base, bstride, vl); +vint32m4_t test_vlse32_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_i32m4_tum(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vlse32_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_i32m8_tum(mask, maskedoff, base, bstride, vl); +vint32m8_t test_vlse32_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_i32m8_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vlse32_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_u32mf2_tum(mask, maskedoff, base, bstride, vl); +vuint32mf2_t test_vlse32_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_u32mf2_tum(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vlse32_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_u32m1_tum(mask, maskedoff, base, bstride, vl); +vuint32m1_t test_vlse32_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_u32m1_tum(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vlse32_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_u32m2_tum(mask, maskedoff, base, bstride, vl); +vuint32m2_t test_vlse32_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_u32m2_tum(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vlse32_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_u32m4_tum(mask, maskedoff, base, bstride, vl); +vuint32m4_t test_vlse32_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_u32m4_tum(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vlse32_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_u32m8_tum(mask, maskedoff, base, bstride, vl); +vuint32m8_t test_vlse32_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_u32m8_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vlse32_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_f32mf2_tumu(mask, maskedoff, base, bstride, vl); +vfloat32mf2_t test_vlse32_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_f32mf2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vlse32_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_f32m1_tumu(mask, maskedoff, base, bstride, vl); +vfloat32m1_t test_vlse32_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_f32m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vlse32_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_f32m2_tumu(mask, maskedoff, base, bstride, vl); +vfloat32m2_t test_vlse32_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_f32m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vlse32_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_f32m4_tumu(mask, maskedoff, base, bstride, vl); +vfloat32m4_t test_vlse32_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_f32m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vlse32_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_f32m8_tumu(mask, maskedoff, base, bstride, vl); +vfloat32m8_t test_vlse32_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_f32m8_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vlse32_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_i32mf2_tumu(mask, maskedoff, base, bstride, vl); +vint32mf2_t test_vlse32_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_i32mf2_tumu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vlse32_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_i32m1_tumu(mask, maskedoff, base, bstride, vl); +vint32m1_t test_vlse32_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_i32m1_tumu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vlse32_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_i32m2_tumu(mask, maskedoff, base, bstride, vl); +vint32m2_t test_vlse32_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_i32m2_tumu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vlse32_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_i32m4_tumu(mask, maskedoff, base, bstride, vl); +vint32m4_t test_vlse32_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_i32m4_tumu(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vlse32_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_i32m8_tumu(mask, maskedoff, base, bstride, vl); +vint32m8_t test_vlse32_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_i32m8_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vlse32_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_u32mf2_tumu(mask, maskedoff, base, bstride, vl); +vuint32mf2_t test_vlse32_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_u32mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vlse32_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_u32m1_tumu(mask, maskedoff, base, bstride, vl); +vuint32m1_t test_vlse32_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_u32m1_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vlse32_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_u32m2_tumu(mask, maskedoff, base, bstride, vl); +vuint32m2_t test_vlse32_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_u32m2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vlse32_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_u32m4_tumu(mask, maskedoff, base, bstride, vl); +vuint32m4_t test_vlse32_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_u32m4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vlse32_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_u32m8_tumu(mask, maskedoff, base, bstride, vl); +vuint32m8_t test_vlse32_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_u32m8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vlse32_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_f32mf2_mu(mask, maskedoff, base, bstride, vl); +vfloat32mf2_t test_vlse32_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_f32mf2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vlse32_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_f32m1_mu(mask, maskedoff, base, bstride, vl); +vfloat32m1_t test_vlse32_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_f32m1_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vlse32_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_f32m2_mu(mask, maskedoff, base, bstride, vl); +vfloat32m2_t test_vlse32_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_f32m2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vlse32_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_f32m4_mu(mask, maskedoff, base, bstride, vl); +vfloat32m4_t test_vlse32_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_f32m4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vlse32_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_f32m8_mu(mask, maskedoff, base, bstride, vl); +vfloat32m8_t test_vlse32_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_f32m8_mu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vlse32_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_i32mf2_mu(mask, maskedoff, base, bstride, vl); +vint32mf2_t test_vlse32_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_i32mf2_mu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vlse32_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_i32m1_mu(mask, maskedoff, base, bstride, vl); +vint32m1_t test_vlse32_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_i32m1_mu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vlse32_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_i32m2_mu(mask, maskedoff, base, bstride, vl); +vint32m2_t test_vlse32_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_i32m2_mu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vlse32_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_i32m4_mu(mask, maskedoff, base, bstride, vl); +vint32m4_t test_vlse32_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_i32m4_mu(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vlse32_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_i32m8_mu(mask, maskedoff, base, bstride, vl); +vint32m8_t test_vlse32_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_i32m8_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vlse32_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_u32mf2_mu(mask, maskedoff, base, bstride, vl); +vuint32mf2_t test_vlse32_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_u32mf2_mu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vlse32_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_u32m1_mu(mask, maskedoff, base, bstride, vl); +vuint32m1_t test_vlse32_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_u32m1_mu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vlse32_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_u32m2_mu(mask, maskedoff, base, bstride, vl); +vuint32m2_t test_vlse32_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_u32m2_mu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vlse32_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_u32m4_mu(mask, maskedoff, base, bstride, vl); +vuint32m4_t test_vlse32_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_u32m4_mu(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vlse32_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_v_u32m8_mu(mask, maskedoff, base, bstride, vl); +vuint32m8_t test_vlse32_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_v_u32m8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlse32\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlse64.c b/auto-generated/policy_funcs/gnu-api-tests/vlse64.c index 610765528..1e7e5e067 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlse64.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlse64.c @@ -6,196 +6,196 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1_t test_vlse64_v_f64m1_tu(vfloat64m1_t maskedoff, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_f64m1_tu(maskedoff, base, bstride, vl); +vfloat64m1_t test_vlse64_v_f64m1_tu(vfloat64m1_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_f64m1_tu(vd, rs1, rs2, vl); } -vfloat64m2_t test_vlse64_v_f64m2_tu(vfloat64m2_t maskedoff, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_f64m2_tu(maskedoff, base, bstride, vl); +vfloat64m2_t test_vlse64_v_f64m2_tu(vfloat64m2_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_f64m2_tu(vd, rs1, rs2, vl); } -vfloat64m4_t test_vlse64_v_f64m4_tu(vfloat64m4_t maskedoff, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_f64m4_tu(maskedoff, base, bstride, vl); +vfloat64m4_t test_vlse64_v_f64m4_tu(vfloat64m4_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_f64m4_tu(vd, rs1, rs2, vl); } -vfloat64m8_t test_vlse64_v_f64m8_tu(vfloat64m8_t maskedoff, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_f64m8_tu(maskedoff, base, bstride, vl); +vfloat64m8_t test_vlse64_v_f64m8_tu(vfloat64m8_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_f64m8_tu(vd, rs1, rs2, vl); } -vint64m1_t test_vlse64_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_i64m1_tu(maskedoff, base, bstride, vl); +vint64m1_t test_vlse64_v_i64m1_tu(vint64m1_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_i64m1_tu(vd, rs1, rs2, vl); } -vint64m2_t test_vlse64_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_i64m2_tu(maskedoff, base, bstride, vl); +vint64m2_t test_vlse64_v_i64m2_tu(vint64m2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_i64m2_tu(vd, rs1, rs2, vl); } -vint64m4_t test_vlse64_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_i64m4_tu(maskedoff, base, bstride, vl); +vint64m4_t test_vlse64_v_i64m4_tu(vint64m4_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_i64m4_tu(vd, rs1, rs2, vl); } -vint64m8_t test_vlse64_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_i64m8_tu(maskedoff, base, bstride, vl); +vint64m8_t test_vlse64_v_i64m8_tu(vint64m8_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_i64m8_tu(vd, rs1, rs2, vl); } -vuint64m1_t test_vlse64_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_u64m1_tu(maskedoff, base, bstride, vl); +vuint64m1_t test_vlse64_v_u64m1_tu(vuint64m1_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_u64m1_tu(vd, rs1, rs2, vl); } -vuint64m2_t test_vlse64_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_u64m2_tu(maskedoff, base, bstride, vl); +vuint64m2_t test_vlse64_v_u64m2_tu(vuint64m2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_u64m2_tu(vd, rs1, rs2, vl); } -vuint64m4_t test_vlse64_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_u64m4_tu(maskedoff, base, bstride, vl); +vuint64m4_t test_vlse64_v_u64m4_tu(vuint64m4_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_u64m4_tu(vd, rs1, rs2, vl); } -vuint64m8_t test_vlse64_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_u64m8_tu(maskedoff, base, bstride, vl); +vuint64m8_t test_vlse64_v_u64m8_tu(vuint64m8_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_u64m8_tu(vd, rs1, rs2, vl); } -vfloat64m1_t test_vlse64_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_f64m1_tum(mask, maskedoff, base, bstride, vl); +vfloat64m1_t test_vlse64_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_f64m1_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vlse64_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_f64m2_tum(mask, maskedoff, base, bstride, vl); +vfloat64m2_t test_vlse64_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_f64m2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vlse64_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_f64m4_tum(mask, maskedoff, base, bstride, vl); +vfloat64m4_t test_vlse64_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_f64m4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vlse64_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_f64m8_tum(mask, maskedoff, base, bstride, vl); +vfloat64m8_t test_vlse64_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_f64m8_tum(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vlse64_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_i64m1_tum(mask, maskedoff, base, bstride, vl); +vint64m1_t test_vlse64_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_i64m1_tum(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vlse64_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_i64m2_tum(mask, maskedoff, base, bstride, vl); +vint64m2_t test_vlse64_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_i64m2_tum(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vlse64_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_i64m4_tum(mask, maskedoff, base, bstride, vl); +vint64m4_t test_vlse64_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_i64m4_tum(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vlse64_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_i64m8_tum(mask, maskedoff, base, bstride, vl); +vint64m8_t test_vlse64_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_i64m8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vlse64_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_u64m1_tum(mask, maskedoff, base, bstride, vl); +vuint64m1_t test_vlse64_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_u64m1_tum(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vlse64_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_u64m2_tum(mask, maskedoff, base, bstride, vl); +vuint64m2_t test_vlse64_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_u64m2_tum(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vlse64_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_u64m4_tum(mask, maskedoff, base, bstride, vl); +vuint64m4_t test_vlse64_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_u64m4_tum(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vlse64_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_u64m8_tum(mask, maskedoff, base, bstride, vl); +vuint64m8_t test_vlse64_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_u64m8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vlse64_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_f64m1_tumu(mask, maskedoff, base, bstride, vl); +vfloat64m1_t test_vlse64_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_f64m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vlse64_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_f64m2_tumu(mask, maskedoff, base, bstride, vl); +vfloat64m2_t test_vlse64_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_f64m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vlse64_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_f64m4_tumu(mask, maskedoff, base, bstride, vl); +vfloat64m4_t test_vlse64_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_f64m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vlse64_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_f64m8_tumu(mask, maskedoff, base, bstride, vl); +vfloat64m8_t test_vlse64_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_f64m8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vlse64_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_i64m1_tumu(mask, maskedoff, base, bstride, vl); +vint64m1_t test_vlse64_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_i64m1_tumu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vlse64_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_i64m2_tumu(mask, maskedoff, base, bstride, vl); +vint64m2_t test_vlse64_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_i64m2_tumu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vlse64_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_i64m4_tumu(mask, maskedoff, base, bstride, vl); +vint64m4_t test_vlse64_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_i64m4_tumu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vlse64_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_i64m8_tumu(mask, maskedoff, base, bstride, vl); +vint64m8_t test_vlse64_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_i64m8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vlse64_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_u64m1_tumu(mask, maskedoff, base, bstride, vl); +vuint64m1_t test_vlse64_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_u64m1_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vlse64_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_u64m2_tumu(mask, maskedoff, base, bstride, vl); +vuint64m2_t test_vlse64_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_u64m2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vlse64_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_u64m4_tumu(mask, maskedoff, base, bstride, vl); +vuint64m4_t test_vlse64_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_u64m4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vlse64_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_u64m8_tumu(mask, maskedoff, base, bstride, vl); +vuint64m8_t test_vlse64_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_u64m8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vlse64_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_f64m1_mu(mask, maskedoff, base, bstride, vl); +vfloat64m1_t test_vlse64_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_f64m1_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vlse64_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_f64m2_mu(mask, maskedoff, base, bstride, vl); +vfloat64m2_t test_vlse64_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_f64m2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vlse64_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_f64m4_mu(mask, maskedoff, base, bstride, vl); +vfloat64m4_t test_vlse64_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_f64m4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vlse64_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_f64m8_mu(mask, maskedoff, base, bstride, vl); +vfloat64m8_t test_vlse64_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_f64m8_mu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vlse64_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_i64m1_mu(mask, maskedoff, base, bstride, vl); +vint64m1_t test_vlse64_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_i64m1_mu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vlse64_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_i64m2_mu(mask, maskedoff, base, bstride, vl); +vint64m2_t test_vlse64_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_i64m2_mu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vlse64_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_i64m4_mu(mask, maskedoff, base, bstride, vl); +vint64m4_t test_vlse64_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_i64m4_mu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vlse64_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_i64m8_mu(mask, maskedoff, base, bstride, vl); +vint64m8_t test_vlse64_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_i64m8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vlse64_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_u64m1_mu(mask, maskedoff, base, bstride, vl); +vuint64m1_t test_vlse64_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_u64m1_mu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vlse64_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_u64m2_mu(mask, maskedoff, base, bstride, vl); +vuint64m2_t test_vlse64_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_u64m2_mu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vlse64_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_u64m4_mu(mask, maskedoff, base, bstride, vl); +vuint64m4_t test_vlse64_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_u64m4_mu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vlse64_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_v_u64m8_mu(mask, maskedoff, base, bstride, vl); +vuint64m8_t test_vlse64_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_v_u64m8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlse64\.[ivxfswum.]+\s+} 48 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlse8.c b/auto-generated/policy_funcs/gnu-api-tests/vlse8.c index 4185cfb1a..a7bb6e2db 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlse8.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlse8.c @@ -6,228 +6,228 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vlse8_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_i8mf8_tu(maskedoff, base, bstride, vl); +vint8mf8_t test_vlse8_v_i8mf8_tu(vint8mf8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_i8mf8_tu(vd, rs1, rs2, vl); } -vint8mf4_t test_vlse8_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_i8mf4_tu(maskedoff, base, bstride, vl); +vint8mf4_t test_vlse8_v_i8mf4_tu(vint8mf4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_i8mf4_tu(vd, rs1, rs2, vl); } -vint8mf2_t test_vlse8_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_i8mf2_tu(maskedoff, base, bstride, vl); +vint8mf2_t test_vlse8_v_i8mf2_tu(vint8mf2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_i8mf2_tu(vd, rs1, rs2, vl); } -vint8m1_t test_vlse8_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_i8m1_tu(maskedoff, base, bstride, vl); +vint8m1_t test_vlse8_v_i8m1_tu(vint8m1_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_i8m1_tu(vd, rs1, rs2, vl); } -vint8m2_t test_vlse8_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_i8m2_tu(maskedoff, base, bstride, vl); +vint8m2_t test_vlse8_v_i8m2_tu(vint8m2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_i8m2_tu(vd, rs1, rs2, vl); } -vint8m4_t test_vlse8_v_i8m4_tu(vint8m4_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_i8m4_tu(maskedoff, base, bstride, vl); +vint8m4_t test_vlse8_v_i8m4_tu(vint8m4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_i8m4_tu(vd, rs1, rs2, vl); } -vint8m8_t test_vlse8_v_i8m8_tu(vint8m8_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_i8m8_tu(maskedoff, base, bstride, vl); +vint8m8_t test_vlse8_v_i8m8_tu(vint8m8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_i8m8_tu(vd, rs1, rs2, vl); } -vuint8mf8_t test_vlse8_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_u8mf8_tu(maskedoff, base, bstride, vl); +vuint8mf8_t test_vlse8_v_u8mf8_tu(vuint8mf8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_u8mf8_tu(vd, rs1, rs2, vl); } -vuint8mf4_t test_vlse8_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_u8mf4_tu(maskedoff, base, bstride, vl); +vuint8mf4_t test_vlse8_v_u8mf4_tu(vuint8mf4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_u8mf4_tu(vd, rs1, rs2, vl); } -vuint8mf2_t test_vlse8_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_u8mf2_tu(maskedoff, base, bstride, vl); +vuint8mf2_t test_vlse8_v_u8mf2_tu(vuint8mf2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_u8mf2_tu(vd, rs1, rs2, vl); } -vuint8m1_t test_vlse8_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_u8m1_tu(maskedoff, base, bstride, vl); +vuint8m1_t test_vlse8_v_u8m1_tu(vuint8m1_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_u8m1_tu(vd, rs1, rs2, vl); } -vuint8m2_t test_vlse8_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_u8m2_tu(maskedoff, base, bstride, vl); +vuint8m2_t test_vlse8_v_u8m2_tu(vuint8m2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_u8m2_tu(vd, rs1, rs2, vl); } -vuint8m4_t test_vlse8_v_u8m4_tu(vuint8m4_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_u8m4_tu(maskedoff, base, bstride, vl); +vuint8m4_t test_vlse8_v_u8m4_tu(vuint8m4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_u8m4_tu(vd, rs1, rs2, vl); } -vuint8m8_t test_vlse8_v_u8m8_tu(vuint8m8_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_u8m8_tu(maskedoff, base, bstride, vl); +vuint8m8_t test_vlse8_v_u8m8_tu(vuint8m8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_u8m8_tu(vd, rs1, rs2, vl); } -vint8mf8_t test_vlse8_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_i8mf8_tum(mask, maskedoff, base, bstride, vl); +vint8mf8_t test_vlse8_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_i8mf8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vlse8_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_i8mf4_tum(mask, maskedoff, base, bstride, vl); +vint8mf4_t test_vlse8_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_i8mf4_tum(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vlse8_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_i8mf2_tum(mask, maskedoff, base, bstride, vl); +vint8mf2_t test_vlse8_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_i8mf2_tum(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vlse8_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_i8m1_tum(mask, maskedoff, base, bstride, vl); +vint8m1_t test_vlse8_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_i8m1_tum(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vlse8_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_i8m2_tum(mask, maskedoff, base, bstride, vl); +vint8m2_t test_vlse8_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_i8m2_tum(vm, vd, rs1, rs2, vl); } -vint8m4_t test_vlse8_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_i8m4_tum(mask, maskedoff, base, bstride, vl); +vint8m4_t test_vlse8_v_i8m4_tum(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_i8m4_tum(vm, vd, rs1, rs2, vl); } -vint8m8_t test_vlse8_v_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_i8m8_tum(mask, maskedoff, base, bstride, vl); +vint8m8_t test_vlse8_v_i8m8_tum(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_i8m8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vlse8_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_u8mf8_tum(mask, maskedoff, base, bstride, vl); +vuint8mf8_t test_vlse8_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_u8mf8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vlse8_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_u8mf4_tum(mask, maskedoff, base, bstride, vl); +vuint8mf4_t test_vlse8_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_u8mf4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vlse8_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_u8mf2_tum(mask, maskedoff, base, bstride, vl); +vuint8mf2_t test_vlse8_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_u8mf2_tum(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vlse8_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_u8m1_tum(mask, maskedoff, base, bstride, vl); +vuint8m1_t test_vlse8_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_u8m1_tum(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vlse8_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_u8m2_tum(mask, maskedoff, base, bstride, vl); +vuint8m2_t test_vlse8_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_u8m2_tum(vm, vd, rs1, rs2, vl); } -vuint8m4_t test_vlse8_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_u8m4_tum(mask, maskedoff, base, bstride, vl); +vuint8m4_t test_vlse8_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_u8m4_tum(vm, vd, rs1, rs2, vl); } -vuint8m8_t test_vlse8_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_u8m8_tum(mask, maskedoff, base, bstride, vl); +vuint8m8_t test_vlse8_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_u8m8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vlse8_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_i8mf8_tumu(mask, maskedoff, base, bstride, vl); +vint8mf8_t test_vlse8_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_i8mf8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vlse8_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_i8mf4_tumu(mask, maskedoff, base, bstride, vl); +vint8mf4_t test_vlse8_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_i8mf4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vlse8_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_i8mf2_tumu(mask, maskedoff, base, bstride, vl); +vint8mf2_t test_vlse8_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_i8mf2_tumu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vlse8_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_i8m1_tumu(mask, maskedoff, base, bstride, vl); +vint8m1_t test_vlse8_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_i8m1_tumu(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vlse8_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_i8m2_tumu(mask, maskedoff, base, bstride, vl); +vint8m2_t test_vlse8_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_i8m2_tumu(vm, vd, rs1, rs2, vl); } -vint8m4_t test_vlse8_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_i8m4_tumu(mask, maskedoff, base, bstride, vl); +vint8m4_t test_vlse8_v_i8m4_tumu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_i8m4_tumu(vm, vd, rs1, rs2, vl); } -vint8m8_t test_vlse8_v_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_i8m8_tumu(mask, maskedoff, base, bstride, vl); +vint8m8_t test_vlse8_v_i8m8_tumu(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_i8m8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vlse8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_u8mf8_tumu(mask, maskedoff, base, bstride, vl); +vuint8mf8_t test_vlse8_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_u8mf8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vlse8_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_u8mf4_tumu(mask, maskedoff, base, bstride, vl); +vuint8mf4_t test_vlse8_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_u8mf4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vlse8_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_u8mf2_tumu(mask, maskedoff, base, bstride, vl); +vuint8mf2_t test_vlse8_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_u8mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vlse8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_u8m1_tumu(mask, maskedoff, base, bstride, vl); +vuint8m1_t test_vlse8_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_u8m1_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vlse8_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_u8m2_tumu(mask, maskedoff, base, bstride, vl); +vuint8m2_t test_vlse8_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_u8m2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m4_t test_vlse8_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_u8m4_tumu(mask, maskedoff, base, bstride, vl); +vuint8m4_t test_vlse8_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_u8m4_tumu(vm, vd, rs1, rs2, vl); } -vuint8m8_t test_vlse8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_u8m8_tumu(mask, maskedoff, base, bstride, vl); +vuint8m8_t test_vlse8_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_u8m8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vlse8_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_i8mf8_mu(mask, maskedoff, base, bstride, vl); +vint8mf8_t test_vlse8_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_i8mf8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vlse8_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_i8mf4_mu(mask, maskedoff, base, bstride, vl); +vint8mf4_t test_vlse8_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_i8mf4_mu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vlse8_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_i8mf2_mu(mask, maskedoff, base, bstride, vl); +vint8mf2_t test_vlse8_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_i8mf2_mu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vlse8_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_i8m1_mu(mask, maskedoff, base, bstride, vl); +vint8m1_t test_vlse8_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_i8m1_mu(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vlse8_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_i8m2_mu(mask, maskedoff, base, bstride, vl); +vint8m2_t test_vlse8_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_i8m2_mu(vm, vd, rs1, rs2, vl); } -vint8m4_t test_vlse8_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_i8m4_mu(mask, maskedoff, base, bstride, vl); +vint8m4_t test_vlse8_v_i8m4_mu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_i8m4_mu(vm, vd, rs1, rs2, vl); } -vint8m8_t test_vlse8_v_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_i8m8_mu(mask, maskedoff, base, bstride, vl); +vint8m8_t test_vlse8_v_i8m8_mu(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_i8m8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vlse8_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_u8mf8_mu(mask, maskedoff, base, bstride, vl); +vuint8mf8_t test_vlse8_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_u8mf8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vlse8_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_u8mf4_mu(mask, maskedoff, base, bstride, vl); +vuint8mf4_t test_vlse8_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_u8mf4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vlse8_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_u8mf2_mu(mask, maskedoff, base, bstride, vl); +vuint8mf2_t test_vlse8_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_u8mf2_mu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vlse8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_u8m1_mu(mask, maskedoff, base, bstride, vl); +vuint8m1_t test_vlse8_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_u8m1_mu(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vlse8_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_u8m2_mu(mask, maskedoff, base, bstride, vl); +vuint8m2_t test_vlse8_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_u8m2_mu(vm, vd, rs1, rs2, vl); } -vuint8m4_t test_vlse8_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_u8m4_mu(mask, maskedoff, base, bstride, vl); +vuint8m4_t test_vlse8_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_u8m4_mu(vm, vd, rs1, rs2, vl); } -vuint8m8_t test_vlse8_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_v_u8m8_mu(mask, maskedoff, base, bstride, vl); +vuint8m8_t test_vlse8_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_v_u8m8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlse8\.[ivxfswum.]+\s+} 56 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg2e16.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg2e16.c index 448b93edb..a6ed6e69a 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg2e16.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg2e16.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x2_t test_vlseg2e16_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_f16mf4x2_tu(maskedoff_tuple, base, vl); +vfloat16mf4x2_t test_vlseg2e16_v_f16mf4x2_tu(vfloat16mf4x2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_f16mf4x2_tu(vd, rs1, vl); } -vfloat16mf2x2_t test_vlseg2e16_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_f16mf2x2_tu(maskedoff_tuple, base, vl); +vfloat16mf2x2_t test_vlseg2e16_v_f16mf2x2_tu(vfloat16mf2x2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_f16mf2x2_tu(vd, rs1, vl); } -vfloat16m1x2_t test_vlseg2e16_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_f16m1x2_tu(maskedoff_tuple, base, vl); +vfloat16m1x2_t test_vlseg2e16_v_f16m1x2_tu(vfloat16m1x2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_f16m1x2_tu(vd, rs1, vl); } -vfloat16m2x2_t test_vlseg2e16_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_f16m2x2_tu(maskedoff_tuple, base, vl); +vfloat16m2x2_t test_vlseg2e16_v_f16m2x2_tu(vfloat16m2x2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_f16m2x2_tu(vd, rs1, vl); } -vfloat16m4x2_t test_vlseg2e16_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_f16m4x2_tu(maskedoff_tuple, base, vl); +vfloat16m4x2_t test_vlseg2e16_v_f16m4x2_tu(vfloat16m4x2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_f16m4x2_tu(vd, rs1, vl); } -vint16mf4x2_t test_vlseg2e16_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_i16mf4x2_tu(maskedoff_tuple, base, vl); +vint16mf4x2_t test_vlseg2e16_v_i16mf4x2_tu(vint16mf4x2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_i16mf4x2_tu(vd, rs1, vl); } -vint16mf2x2_t test_vlseg2e16_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_i16mf2x2_tu(maskedoff_tuple, base, vl); +vint16mf2x2_t test_vlseg2e16_v_i16mf2x2_tu(vint16mf2x2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_i16mf2x2_tu(vd, rs1, vl); } -vint16m1x2_t test_vlseg2e16_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_i16m1x2_tu(maskedoff_tuple, base, vl); +vint16m1x2_t test_vlseg2e16_v_i16m1x2_tu(vint16m1x2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_i16m1x2_tu(vd, rs1, vl); } -vint16m2x2_t test_vlseg2e16_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_i16m2x2_tu(maskedoff_tuple, base, vl); +vint16m2x2_t test_vlseg2e16_v_i16m2x2_tu(vint16m2x2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_i16m2x2_tu(vd, rs1, vl); } -vint16m4x2_t test_vlseg2e16_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_i16m4x2_tu(maskedoff_tuple, base, vl); +vint16m4x2_t test_vlseg2e16_v_i16m4x2_tu(vint16m4x2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_i16m4x2_tu(vd, rs1, vl); } -vuint16mf4x2_t test_vlseg2e16_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_u16mf4x2_tu(maskedoff_tuple, base, vl); +vuint16mf4x2_t test_vlseg2e16_v_u16mf4x2_tu(vuint16mf4x2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_u16mf4x2_tu(vd, rs1, vl); } -vuint16mf2x2_t test_vlseg2e16_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_u16mf2x2_tu(maskedoff_tuple, base, vl); +vuint16mf2x2_t test_vlseg2e16_v_u16mf2x2_tu(vuint16mf2x2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_u16mf2x2_tu(vd, rs1, vl); } -vuint16m1x2_t test_vlseg2e16_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_u16m1x2_tu(maskedoff_tuple, base, vl); +vuint16m1x2_t test_vlseg2e16_v_u16m1x2_tu(vuint16m1x2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_u16m1x2_tu(vd, rs1, vl); } -vuint16m2x2_t test_vlseg2e16_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_u16m2x2_tu(maskedoff_tuple, base, vl); +vuint16m2x2_t test_vlseg2e16_v_u16m2x2_tu(vuint16m2x2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_u16m2x2_tu(vd, rs1, vl); } -vuint16m4x2_t test_vlseg2e16_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_u16m4x2_tu(maskedoff_tuple, base, vl); +vuint16m4x2_t test_vlseg2e16_v_u16m4x2_tu(vuint16m4x2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_u16m4x2_tu(vd, rs1, vl); } -vfloat16mf4x2_t test_vlseg2e16_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_f16mf4x2_tum(mask, maskedoff_tuple, base, vl); +vfloat16mf4x2_t test_vlseg2e16_v_f16mf4x2_tum(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_f16mf4x2_tum(vm, vd, rs1, vl); } -vfloat16mf2x2_t test_vlseg2e16_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_f16mf2x2_tum(mask, maskedoff_tuple, base, vl); +vfloat16mf2x2_t test_vlseg2e16_v_f16mf2x2_tum(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_f16mf2x2_tum(vm, vd, rs1, vl); } -vfloat16m1x2_t test_vlseg2e16_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_f16m1x2_tum(mask, maskedoff_tuple, base, vl); +vfloat16m1x2_t test_vlseg2e16_v_f16m1x2_tum(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_f16m1x2_tum(vm, vd, rs1, vl); } -vfloat16m2x2_t test_vlseg2e16_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_f16m2x2_tum(mask, maskedoff_tuple, base, vl); +vfloat16m2x2_t test_vlseg2e16_v_f16m2x2_tum(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_f16m2x2_tum(vm, vd, rs1, vl); } -vfloat16m4x2_t test_vlseg2e16_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_f16m4x2_tum(mask, maskedoff_tuple, base, vl); +vfloat16m4x2_t test_vlseg2e16_v_f16m4x2_tum(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_f16m4x2_tum(vm, vd, rs1, vl); } -vint16mf4x2_t test_vlseg2e16_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_i16mf4x2_tum(mask, maskedoff_tuple, base, vl); +vint16mf4x2_t test_vlseg2e16_v_i16mf4x2_tum(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_i16mf4x2_tum(vm, vd, rs1, vl); } -vint16mf2x2_t test_vlseg2e16_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_i16mf2x2_tum(mask, maskedoff_tuple, base, vl); +vint16mf2x2_t test_vlseg2e16_v_i16mf2x2_tum(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_i16mf2x2_tum(vm, vd, rs1, vl); } -vint16m1x2_t test_vlseg2e16_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_i16m1x2_tum(mask, maskedoff_tuple, base, vl); +vint16m1x2_t test_vlseg2e16_v_i16m1x2_tum(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_i16m1x2_tum(vm, vd, rs1, vl); } -vint16m2x2_t test_vlseg2e16_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_i16m2x2_tum(mask, maskedoff_tuple, base, vl); +vint16m2x2_t test_vlseg2e16_v_i16m2x2_tum(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_i16m2x2_tum(vm, vd, rs1, vl); } -vint16m4x2_t test_vlseg2e16_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_i16m4x2_tum(mask, maskedoff_tuple, base, vl); +vint16m4x2_t test_vlseg2e16_v_i16m4x2_tum(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_i16m4x2_tum(vm, vd, rs1, vl); } -vuint16mf4x2_t test_vlseg2e16_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_u16mf4x2_tum(mask, maskedoff_tuple, base, vl); +vuint16mf4x2_t test_vlseg2e16_v_u16mf4x2_tum(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_u16mf4x2_tum(vm, vd, rs1, vl); } -vuint16mf2x2_t test_vlseg2e16_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_u16mf2x2_tum(mask, maskedoff_tuple, base, vl); +vuint16mf2x2_t test_vlseg2e16_v_u16mf2x2_tum(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_u16mf2x2_tum(vm, vd, rs1, vl); } -vuint16m1x2_t test_vlseg2e16_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_u16m1x2_tum(mask, maskedoff_tuple, base, vl); +vuint16m1x2_t test_vlseg2e16_v_u16m1x2_tum(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_u16m1x2_tum(vm, vd, rs1, vl); } -vuint16m2x2_t test_vlseg2e16_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_u16m2x2_tum(mask, maskedoff_tuple, base, vl); +vuint16m2x2_t test_vlseg2e16_v_u16m2x2_tum(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_u16m2x2_tum(vm, vd, rs1, vl); } -vuint16m4x2_t test_vlseg2e16_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_u16m4x2_tum(mask, maskedoff_tuple, base, vl); +vuint16m4x2_t test_vlseg2e16_v_u16m4x2_tum(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_u16m4x2_tum(vm, vd, rs1, vl); } -vfloat16mf4x2_t test_vlseg2e16_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_f16mf4x2_tumu(mask, maskedoff_tuple, base, vl); +vfloat16mf4x2_t test_vlseg2e16_v_f16mf4x2_tumu(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_f16mf4x2_tumu(vm, vd, rs1, vl); } -vfloat16mf2x2_t test_vlseg2e16_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_f16mf2x2_tumu(mask, maskedoff_tuple, base, vl); +vfloat16mf2x2_t test_vlseg2e16_v_f16mf2x2_tumu(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_f16mf2x2_tumu(vm, vd, rs1, vl); } -vfloat16m1x2_t test_vlseg2e16_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_f16m1x2_tumu(mask, maskedoff_tuple, base, vl); +vfloat16m1x2_t test_vlseg2e16_v_f16m1x2_tumu(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_f16m1x2_tumu(vm, vd, rs1, vl); } -vfloat16m2x2_t test_vlseg2e16_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_f16m2x2_tumu(mask, maskedoff_tuple, base, vl); +vfloat16m2x2_t test_vlseg2e16_v_f16m2x2_tumu(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_f16m2x2_tumu(vm, vd, rs1, vl); } -vfloat16m4x2_t test_vlseg2e16_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_f16m4x2_tumu(mask, maskedoff_tuple, base, vl); +vfloat16m4x2_t test_vlseg2e16_v_f16m4x2_tumu(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_f16m4x2_tumu(vm, vd, rs1, vl); } -vint16mf4x2_t test_vlseg2e16_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_i16mf4x2_tumu(mask, maskedoff_tuple, base, vl); +vint16mf4x2_t test_vlseg2e16_v_i16mf4x2_tumu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_i16mf4x2_tumu(vm, vd, rs1, vl); } -vint16mf2x2_t test_vlseg2e16_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_i16mf2x2_tumu(mask, maskedoff_tuple, base, vl); +vint16mf2x2_t test_vlseg2e16_v_i16mf2x2_tumu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_i16mf2x2_tumu(vm, vd, rs1, vl); } -vint16m1x2_t test_vlseg2e16_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_i16m1x2_tumu(mask, maskedoff_tuple, base, vl); +vint16m1x2_t test_vlseg2e16_v_i16m1x2_tumu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_i16m1x2_tumu(vm, vd, rs1, vl); } -vint16m2x2_t test_vlseg2e16_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_i16m2x2_tumu(mask, maskedoff_tuple, base, vl); +vint16m2x2_t test_vlseg2e16_v_i16m2x2_tumu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_i16m2x2_tumu(vm, vd, rs1, vl); } -vint16m4x2_t test_vlseg2e16_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_i16m4x2_tumu(mask, maskedoff_tuple, base, vl); +vint16m4x2_t test_vlseg2e16_v_i16m4x2_tumu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_i16m4x2_tumu(vm, vd, rs1, vl); } -vuint16mf4x2_t test_vlseg2e16_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_u16mf4x2_tumu(mask, maskedoff_tuple, base, vl); +vuint16mf4x2_t test_vlseg2e16_v_u16mf4x2_tumu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_u16mf4x2_tumu(vm, vd, rs1, vl); } -vuint16mf2x2_t test_vlseg2e16_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_u16mf2x2_tumu(mask, maskedoff_tuple, base, vl); +vuint16mf2x2_t test_vlseg2e16_v_u16mf2x2_tumu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_u16mf2x2_tumu(vm, vd, rs1, vl); } -vuint16m1x2_t test_vlseg2e16_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_u16m1x2_tumu(mask, maskedoff_tuple, base, vl); +vuint16m1x2_t test_vlseg2e16_v_u16m1x2_tumu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_u16m1x2_tumu(vm, vd, rs1, vl); } -vuint16m2x2_t test_vlseg2e16_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_u16m2x2_tumu(mask, maskedoff_tuple, base, vl); +vuint16m2x2_t test_vlseg2e16_v_u16m2x2_tumu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_u16m2x2_tumu(vm, vd, rs1, vl); } -vuint16m4x2_t test_vlseg2e16_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_u16m4x2_tumu(mask, maskedoff_tuple, base, vl); +vuint16m4x2_t test_vlseg2e16_v_u16m4x2_tumu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_u16m4x2_tumu(vm, vd, rs1, vl); } -vfloat16mf4x2_t test_vlseg2e16_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_f16mf4x2_mu(mask, maskedoff_tuple, base, vl); +vfloat16mf4x2_t test_vlseg2e16_v_f16mf4x2_mu(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_f16mf4x2_mu(vm, vd, rs1, vl); } -vfloat16mf2x2_t test_vlseg2e16_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_f16mf2x2_mu(mask, maskedoff_tuple, base, vl); +vfloat16mf2x2_t test_vlseg2e16_v_f16mf2x2_mu(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_f16mf2x2_mu(vm, vd, rs1, vl); } -vfloat16m1x2_t test_vlseg2e16_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_f16m1x2_mu(mask, maskedoff_tuple, base, vl); +vfloat16m1x2_t test_vlseg2e16_v_f16m1x2_mu(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_f16m1x2_mu(vm, vd, rs1, vl); } -vfloat16m2x2_t test_vlseg2e16_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_f16m2x2_mu(mask, maskedoff_tuple, base, vl); +vfloat16m2x2_t test_vlseg2e16_v_f16m2x2_mu(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_f16m2x2_mu(vm, vd, rs1, vl); } -vfloat16m4x2_t test_vlseg2e16_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_f16m4x2_mu(mask, maskedoff_tuple, base, vl); +vfloat16m4x2_t test_vlseg2e16_v_f16m4x2_mu(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_f16m4x2_mu(vm, vd, rs1, vl); } -vint16mf4x2_t test_vlseg2e16_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_i16mf4x2_mu(mask, maskedoff_tuple, base, vl); +vint16mf4x2_t test_vlseg2e16_v_i16mf4x2_mu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_i16mf4x2_mu(vm, vd, rs1, vl); } -vint16mf2x2_t test_vlseg2e16_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_i16mf2x2_mu(mask, maskedoff_tuple, base, vl); +vint16mf2x2_t test_vlseg2e16_v_i16mf2x2_mu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_i16mf2x2_mu(vm, vd, rs1, vl); } -vint16m1x2_t test_vlseg2e16_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_i16m1x2_mu(mask, maskedoff_tuple, base, vl); +vint16m1x2_t test_vlseg2e16_v_i16m1x2_mu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_i16m1x2_mu(vm, vd, rs1, vl); } -vint16m2x2_t test_vlseg2e16_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_i16m2x2_mu(mask, maskedoff_tuple, base, vl); +vint16m2x2_t test_vlseg2e16_v_i16m2x2_mu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_i16m2x2_mu(vm, vd, rs1, vl); } -vint16m4x2_t test_vlseg2e16_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_i16m4x2_mu(mask, maskedoff_tuple, base, vl); +vint16m4x2_t test_vlseg2e16_v_i16m4x2_mu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_i16m4x2_mu(vm, vd, rs1, vl); } -vuint16mf4x2_t test_vlseg2e16_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_u16mf4x2_mu(mask, maskedoff_tuple, base, vl); +vuint16mf4x2_t test_vlseg2e16_v_u16mf4x2_mu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_u16mf4x2_mu(vm, vd, rs1, vl); } -vuint16mf2x2_t test_vlseg2e16_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_u16mf2x2_mu(mask, maskedoff_tuple, base, vl); +vuint16mf2x2_t test_vlseg2e16_v_u16mf2x2_mu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_u16mf2x2_mu(vm, vd, rs1, vl); } -vuint16m1x2_t test_vlseg2e16_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_u16m1x2_mu(mask, maskedoff_tuple, base, vl); +vuint16m1x2_t test_vlseg2e16_v_u16m1x2_mu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_u16m1x2_mu(vm, vd, rs1, vl); } -vuint16m2x2_t test_vlseg2e16_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_u16m2x2_mu(mask, maskedoff_tuple, base, vl); +vuint16m2x2_t test_vlseg2e16_v_u16m2x2_mu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_u16m2x2_mu(vm, vd, rs1, vl); } -vuint16m4x2_t test_vlseg2e16_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_v_u16m4x2_mu(mask, maskedoff_tuple, base, vl); +vuint16m4x2_t test_vlseg2e16_v_u16m4x2_mu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_v_u16m4x2_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg2e16\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg2e16ff.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg2e16ff.c index 1f8e410fc..088aa0cb7 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg2e16ff.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg2e16ff.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_f16mf4x2_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_tu(vfloat16mf4x2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_f16mf4x2_tu(vd, rs1, new_vl, vl); } -vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_f16mf2x2_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_tu(vfloat16mf2x2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_f16mf2x2_tu(vd, rs1, new_vl, vl); } -vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_f16m1x2_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_tu(vfloat16m1x2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_f16m1x2_tu(vd, rs1, new_vl, vl); } -vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_f16m2x2_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_tu(vfloat16m2x2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_f16m2x2_tu(vd, rs1, new_vl, vl); } -vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_f16m4x2_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_tu(vfloat16m4x2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_f16m4x2_tu(vd, rs1, new_vl, vl); } -vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_i16mf4x2_tu(maskedoff_tuple, base, new_vl, vl); +vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_tu(vint16mf4x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_i16mf4x2_tu(vd, rs1, new_vl, vl); } -vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_i16mf2x2_tu(maskedoff_tuple, base, new_vl, vl); +vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_tu(vint16mf2x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_i16mf2x2_tu(vd, rs1, new_vl, vl); } -vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_i16m1x2_tu(maskedoff_tuple, base, new_vl, vl); +vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_tu(vint16m1x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_i16m1x2_tu(vd, rs1, new_vl, vl); } -vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_i16m2x2_tu(maskedoff_tuple, base, new_vl, vl); +vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_tu(vint16m2x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_i16m2x2_tu(vd, rs1, new_vl, vl); } -vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_i16m4x2_tu(maskedoff_tuple, base, new_vl, vl); +vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_tu(vint16m4x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_i16m4x2_tu(vd, rs1, new_vl, vl); } -vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_u16mf4x2_tu(maskedoff_tuple, base, new_vl, vl); +vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_tu(vuint16mf4x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_u16mf4x2_tu(vd, rs1, new_vl, vl); } -vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_u16mf2x2_tu(maskedoff_tuple, base, new_vl, vl); +vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_tu(vuint16mf2x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_u16mf2x2_tu(vd, rs1, new_vl, vl); } -vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_u16m1x2_tu(maskedoff_tuple, base, new_vl, vl); +vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_tu(vuint16m1x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_u16m1x2_tu(vd, rs1, new_vl, vl); } -vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_u16m2x2_tu(maskedoff_tuple, base, new_vl, vl); +vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_tu(vuint16m2x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_u16m2x2_tu(vd, rs1, new_vl, vl); } -vuint16m4x2_t test_vlseg2e16ff_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_u16m4x2_tu(maskedoff_tuple, base, new_vl, vl); +vuint16m4x2_t test_vlseg2e16ff_v_u16m4x2_tu(vuint16m4x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_u16m4x2_tu(vd, rs1, new_vl, vl); } -vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_f16mf4x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_tum(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_f16mf4x2_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_f16mf2x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_tum(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_f16mf2x2_tum(vm, vd, rs1, new_vl, vl); } -vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_f16m1x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_tum(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_f16m1x2_tum(vm, vd, rs1, new_vl, vl); } -vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_f16m2x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_tum(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_f16m2x2_tum(vm, vd, rs1, new_vl, vl); } -vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_f16m4x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_tum(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_f16m4x2_tum(vm, vd, rs1, new_vl, vl); } -vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_i16mf4x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_tum(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_i16mf4x2_tum(vm, vd, rs1, new_vl, vl); } -vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_i16mf2x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_tum(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_i16mf2x2_tum(vm, vd, rs1, new_vl, vl); } -vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_i16m1x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_tum(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_i16m1x2_tum(vm, vd, rs1, new_vl, vl); } -vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_i16m2x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_tum(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_i16m2x2_tum(vm, vd, rs1, new_vl, vl); } -vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_i16m4x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_tum(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_i16m4x2_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_u16mf4x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_tum(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_u16mf4x2_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_u16mf2x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_tum(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_u16mf2x2_tum(vm, vd, rs1, new_vl, vl); } -vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_u16m1x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_tum(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_u16m1x2_tum(vm, vd, rs1, new_vl, vl); } -vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_u16m2x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_tum(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_u16m2x2_tum(vm, vd, rs1, new_vl, vl); } -vuint16m4x2_t test_vlseg2e16ff_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_u16m4x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m4x2_t test_vlseg2e16ff_v_u16m4x2_tum(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_u16m4x2_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_f16mf4x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_tumu(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_f16mf4x2_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_f16mf2x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_tumu(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_f16mf2x2_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_f16m1x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_tumu(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_f16m1x2_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_f16m2x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_tumu(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_f16m2x2_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_f16m4x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_tumu(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_f16m4x2_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_i16mf4x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_tumu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_i16mf4x2_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_i16mf2x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_tumu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_i16mf2x2_tumu(vm, vd, rs1, new_vl, vl); } -vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_i16m1x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_tumu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_i16m1x2_tumu(vm, vd, rs1, new_vl, vl); } -vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_i16m2x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_tumu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_i16m2x2_tumu(vm, vd, rs1, new_vl, vl); } -vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_i16m4x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_tumu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_i16m4x2_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_u16mf4x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_tumu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_u16mf4x2_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_u16mf2x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_tumu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_u16mf2x2_tumu(vm, vd, rs1, new_vl, vl); } -vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_u16m1x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_tumu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_u16m1x2_tumu(vm, vd, rs1, new_vl, vl); } -vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_u16m2x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_tumu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_u16m2x2_tumu(vm, vd, rs1, new_vl, vl); } -vuint16m4x2_t test_vlseg2e16ff_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_u16m4x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m4x2_t test_vlseg2e16ff_v_u16m4x2_tumu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_u16m4x2_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_f16mf4x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_mu(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_f16mf4x2_mu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_f16mf2x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_mu(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_f16mf2x2_mu(vm, vd, rs1, new_vl, vl); } -vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_f16m1x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_mu(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_f16m1x2_mu(vm, vd, rs1, new_vl, vl); } -vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_f16m2x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_mu(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_f16m2x2_mu(vm, vd, rs1, new_vl, vl); } -vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_f16m4x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_mu(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_f16m4x2_mu(vm, vd, rs1, new_vl, vl); } -vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_i16mf4x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_mu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_i16mf4x2_mu(vm, vd, rs1, new_vl, vl); } -vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_i16mf2x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_mu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_i16mf2x2_mu(vm, vd, rs1, new_vl, vl); } -vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_i16m1x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_mu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_i16m1x2_mu(vm, vd, rs1, new_vl, vl); } -vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_i16m2x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_mu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_i16m2x2_mu(vm, vd, rs1, new_vl, vl); } -vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_i16m4x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_mu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_i16m4x2_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_u16mf4x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_mu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_u16mf4x2_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_u16mf2x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_mu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_u16mf2x2_mu(vm, vd, rs1, new_vl, vl); } -vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_u16m1x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_mu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_u16m1x2_mu(vm, vd, rs1, new_vl, vl); } -vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_u16m2x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_mu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_u16m2x2_mu(vm, vd, rs1, new_vl, vl); } -vuint16m4x2_t test_vlseg2e16ff_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_v_u16m4x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m4x2_t test_vlseg2e16ff_v_u16m4x2_mu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_u16m4x2_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg2e16ff\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg2e32.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg2e32.c index 23c378c7d..2fdd6b9f0 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg2e32.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg2e32.c @@ -6,196 +6,196 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x2_t test_vlseg2e32_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_f32mf2x2_tu(maskedoff_tuple, base, vl); +vfloat32mf2x2_t test_vlseg2e32_v_f32mf2x2_tu(vfloat32mf2x2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_f32mf2x2_tu(vd, rs1, vl); } -vfloat32m1x2_t test_vlseg2e32_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_f32m1x2_tu(maskedoff_tuple, base, vl); +vfloat32m1x2_t test_vlseg2e32_v_f32m1x2_tu(vfloat32m1x2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_f32m1x2_tu(vd, rs1, vl); } -vfloat32m2x2_t test_vlseg2e32_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_f32m2x2_tu(maskedoff_tuple, base, vl); +vfloat32m2x2_t test_vlseg2e32_v_f32m2x2_tu(vfloat32m2x2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_f32m2x2_tu(vd, rs1, vl); } -vfloat32m4x2_t test_vlseg2e32_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_f32m4x2_tu(maskedoff_tuple, base, vl); +vfloat32m4x2_t test_vlseg2e32_v_f32m4x2_tu(vfloat32m4x2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_f32m4x2_tu(vd, rs1, vl); } -vint32mf2x2_t test_vlseg2e32_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_i32mf2x2_tu(maskedoff_tuple, base, vl); +vint32mf2x2_t test_vlseg2e32_v_i32mf2x2_tu(vint32mf2x2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_i32mf2x2_tu(vd, rs1, vl); } -vint32m1x2_t test_vlseg2e32_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_i32m1x2_tu(maskedoff_tuple, base, vl); +vint32m1x2_t test_vlseg2e32_v_i32m1x2_tu(vint32m1x2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_i32m1x2_tu(vd, rs1, vl); } -vint32m2x2_t test_vlseg2e32_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_i32m2x2_tu(maskedoff_tuple, base, vl); +vint32m2x2_t test_vlseg2e32_v_i32m2x2_tu(vint32m2x2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_i32m2x2_tu(vd, rs1, vl); } -vint32m4x2_t test_vlseg2e32_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_i32m4x2_tu(maskedoff_tuple, base, vl); +vint32m4x2_t test_vlseg2e32_v_i32m4x2_tu(vint32m4x2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_i32m4x2_tu(vd, rs1, vl); } -vuint32mf2x2_t test_vlseg2e32_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_u32mf2x2_tu(maskedoff_tuple, base, vl); +vuint32mf2x2_t test_vlseg2e32_v_u32mf2x2_tu(vuint32mf2x2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_u32mf2x2_tu(vd, rs1, vl); } -vuint32m1x2_t test_vlseg2e32_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_u32m1x2_tu(maskedoff_tuple, base, vl); +vuint32m1x2_t test_vlseg2e32_v_u32m1x2_tu(vuint32m1x2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_u32m1x2_tu(vd, rs1, vl); } -vuint32m2x2_t test_vlseg2e32_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_u32m2x2_tu(maskedoff_tuple, base, vl); +vuint32m2x2_t test_vlseg2e32_v_u32m2x2_tu(vuint32m2x2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_u32m2x2_tu(vd, rs1, vl); } -vuint32m4x2_t test_vlseg2e32_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_u32m4x2_tu(maskedoff_tuple, base, vl); +vuint32m4x2_t test_vlseg2e32_v_u32m4x2_tu(vuint32m4x2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_u32m4x2_tu(vd, rs1, vl); } -vfloat32mf2x2_t test_vlseg2e32_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_f32mf2x2_tum(mask, maskedoff_tuple, base, vl); +vfloat32mf2x2_t test_vlseg2e32_v_f32mf2x2_tum(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_f32mf2x2_tum(vm, vd, rs1, vl); } -vfloat32m1x2_t test_vlseg2e32_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_f32m1x2_tum(mask, maskedoff_tuple, base, vl); +vfloat32m1x2_t test_vlseg2e32_v_f32m1x2_tum(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_f32m1x2_tum(vm, vd, rs1, vl); } -vfloat32m2x2_t test_vlseg2e32_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_f32m2x2_tum(mask, maskedoff_tuple, base, vl); +vfloat32m2x2_t test_vlseg2e32_v_f32m2x2_tum(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_f32m2x2_tum(vm, vd, rs1, vl); } -vfloat32m4x2_t test_vlseg2e32_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_f32m4x2_tum(mask, maskedoff_tuple, base, vl); +vfloat32m4x2_t test_vlseg2e32_v_f32m4x2_tum(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_f32m4x2_tum(vm, vd, rs1, vl); } -vint32mf2x2_t test_vlseg2e32_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_i32mf2x2_tum(mask, maskedoff_tuple, base, vl); +vint32mf2x2_t test_vlseg2e32_v_i32mf2x2_tum(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_i32mf2x2_tum(vm, vd, rs1, vl); } -vint32m1x2_t test_vlseg2e32_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_i32m1x2_tum(mask, maskedoff_tuple, base, vl); +vint32m1x2_t test_vlseg2e32_v_i32m1x2_tum(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_i32m1x2_tum(vm, vd, rs1, vl); } -vint32m2x2_t test_vlseg2e32_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_i32m2x2_tum(mask, maskedoff_tuple, base, vl); +vint32m2x2_t test_vlseg2e32_v_i32m2x2_tum(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_i32m2x2_tum(vm, vd, rs1, vl); } -vint32m4x2_t test_vlseg2e32_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_i32m4x2_tum(mask, maskedoff_tuple, base, vl); +vint32m4x2_t test_vlseg2e32_v_i32m4x2_tum(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_i32m4x2_tum(vm, vd, rs1, vl); } -vuint32mf2x2_t test_vlseg2e32_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_u32mf2x2_tum(mask, maskedoff_tuple, base, vl); +vuint32mf2x2_t test_vlseg2e32_v_u32mf2x2_tum(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_u32mf2x2_tum(vm, vd, rs1, vl); } -vuint32m1x2_t test_vlseg2e32_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_u32m1x2_tum(mask, maskedoff_tuple, base, vl); +vuint32m1x2_t test_vlseg2e32_v_u32m1x2_tum(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_u32m1x2_tum(vm, vd, rs1, vl); } -vuint32m2x2_t test_vlseg2e32_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_u32m2x2_tum(mask, maskedoff_tuple, base, vl); +vuint32m2x2_t test_vlseg2e32_v_u32m2x2_tum(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_u32m2x2_tum(vm, vd, rs1, vl); } -vuint32m4x2_t test_vlseg2e32_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_u32m4x2_tum(mask, maskedoff_tuple, base, vl); +vuint32m4x2_t test_vlseg2e32_v_u32m4x2_tum(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_u32m4x2_tum(vm, vd, rs1, vl); } -vfloat32mf2x2_t test_vlseg2e32_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_f32mf2x2_tumu(mask, maskedoff_tuple, base, vl); +vfloat32mf2x2_t test_vlseg2e32_v_f32mf2x2_tumu(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_f32mf2x2_tumu(vm, vd, rs1, vl); } -vfloat32m1x2_t test_vlseg2e32_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_f32m1x2_tumu(mask, maskedoff_tuple, base, vl); +vfloat32m1x2_t test_vlseg2e32_v_f32m1x2_tumu(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_f32m1x2_tumu(vm, vd, rs1, vl); } -vfloat32m2x2_t test_vlseg2e32_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_f32m2x2_tumu(mask, maskedoff_tuple, base, vl); +vfloat32m2x2_t test_vlseg2e32_v_f32m2x2_tumu(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_f32m2x2_tumu(vm, vd, rs1, vl); } -vfloat32m4x2_t test_vlseg2e32_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_f32m4x2_tumu(mask, maskedoff_tuple, base, vl); +vfloat32m4x2_t test_vlseg2e32_v_f32m4x2_tumu(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_f32m4x2_tumu(vm, vd, rs1, vl); } -vint32mf2x2_t test_vlseg2e32_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_i32mf2x2_tumu(mask, maskedoff_tuple, base, vl); +vint32mf2x2_t test_vlseg2e32_v_i32mf2x2_tumu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_i32mf2x2_tumu(vm, vd, rs1, vl); } -vint32m1x2_t test_vlseg2e32_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_i32m1x2_tumu(mask, maskedoff_tuple, base, vl); +vint32m1x2_t test_vlseg2e32_v_i32m1x2_tumu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_i32m1x2_tumu(vm, vd, rs1, vl); } -vint32m2x2_t test_vlseg2e32_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_i32m2x2_tumu(mask, maskedoff_tuple, base, vl); +vint32m2x2_t test_vlseg2e32_v_i32m2x2_tumu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_i32m2x2_tumu(vm, vd, rs1, vl); } -vint32m4x2_t test_vlseg2e32_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_i32m4x2_tumu(mask, maskedoff_tuple, base, vl); +vint32m4x2_t test_vlseg2e32_v_i32m4x2_tumu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_i32m4x2_tumu(vm, vd, rs1, vl); } -vuint32mf2x2_t test_vlseg2e32_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_u32mf2x2_tumu(mask, maskedoff_tuple, base, vl); +vuint32mf2x2_t test_vlseg2e32_v_u32mf2x2_tumu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_u32mf2x2_tumu(vm, vd, rs1, vl); } -vuint32m1x2_t test_vlseg2e32_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_u32m1x2_tumu(mask, maskedoff_tuple, base, vl); +vuint32m1x2_t test_vlseg2e32_v_u32m1x2_tumu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_u32m1x2_tumu(vm, vd, rs1, vl); } -vuint32m2x2_t test_vlseg2e32_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_u32m2x2_tumu(mask, maskedoff_tuple, base, vl); +vuint32m2x2_t test_vlseg2e32_v_u32m2x2_tumu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_u32m2x2_tumu(vm, vd, rs1, vl); } -vuint32m4x2_t test_vlseg2e32_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_u32m4x2_tumu(mask, maskedoff_tuple, base, vl); +vuint32m4x2_t test_vlseg2e32_v_u32m4x2_tumu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_u32m4x2_tumu(vm, vd, rs1, vl); } -vfloat32mf2x2_t test_vlseg2e32_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_f32mf2x2_mu(mask, maskedoff_tuple, base, vl); +vfloat32mf2x2_t test_vlseg2e32_v_f32mf2x2_mu(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_f32mf2x2_mu(vm, vd, rs1, vl); } -vfloat32m1x2_t test_vlseg2e32_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_f32m1x2_mu(mask, maskedoff_tuple, base, vl); +vfloat32m1x2_t test_vlseg2e32_v_f32m1x2_mu(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_f32m1x2_mu(vm, vd, rs1, vl); } -vfloat32m2x2_t test_vlseg2e32_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_f32m2x2_mu(mask, maskedoff_tuple, base, vl); +vfloat32m2x2_t test_vlseg2e32_v_f32m2x2_mu(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_f32m2x2_mu(vm, vd, rs1, vl); } -vfloat32m4x2_t test_vlseg2e32_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_f32m4x2_mu(mask, maskedoff_tuple, base, vl); +vfloat32m4x2_t test_vlseg2e32_v_f32m4x2_mu(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_f32m4x2_mu(vm, vd, rs1, vl); } -vint32mf2x2_t test_vlseg2e32_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_i32mf2x2_mu(mask, maskedoff_tuple, base, vl); +vint32mf2x2_t test_vlseg2e32_v_i32mf2x2_mu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_i32mf2x2_mu(vm, vd, rs1, vl); } -vint32m1x2_t test_vlseg2e32_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_i32m1x2_mu(mask, maskedoff_tuple, base, vl); +vint32m1x2_t test_vlseg2e32_v_i32m1x2_mu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_i32m1x2_mu(vm, vd, rs1, vl); } -vint32m2x2_t test_vlseg2e32_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_i32m2x2_mu(mask, maskedoff_tuple, base, vl); +vint32m2x2_t test_vlseg2e32_v_i32m2x2_mu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_i32m2x2_mu(vm, vd, rs1, vl); } -vint32m4x2_t test_vlseg2e32_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_i32m4x2_mu(mask, maskedoff_tuple, base, vl); +vint32m4x2_t test_vlseg2e32_v_i32m4x2_mu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_i32m4x2_mu(vm, vd, rs1, vl); } -vuint32mf2x2_t test_vlseg2e32_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_u32mf2x2_mu(mask, maskedoff_tuple, base, vl); +vuint32mf2x2_t test_vlseg2e32_v_u32mf2x2_mu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_u32mf2x2_mu(vm, vd, rs1, vl); } -vuint32m1x2_t test_vlseg2e32_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_u32m1x2_mu(mask, maskedoff_tuple, base, vl); +vuint32m1x2_t test_vlseg2e32_v_u32m1x2_mu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_u32m1x2_mu(vm, vd, rs1, vl); } -vuint32m2x2_t test_vlseg2e32_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_u32m2x2_mu(mask, maskedoff_tuple, base, vl); +vuint32m2x2_t test_vlseg2e32_v_u32m2x2_mu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_u32m2x2_mu(vm, vd, rs1, vl); } -vuint32m4x2_t test_vlseg2e32_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg2e32_v_u32m4x2_mu(mask, maskedoff_tuple, base, vl); +vuint32m4x2_t test_vlseg2e32_v_u32m4x2_mu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_v_u32m4x2_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg2e32\.[ivxfswum.]+\s+} 48 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg2e32ff.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg2e32ff.c index 135becf2f..39e74099a 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg2e32ff.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg2e32ff.c @@ -6,196 +6,196 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_f32mf2x2_tu(maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_tu(vfloat32mf2x2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_f32mf2x2_tu(vd, rs1, new_vl, vl); } -vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_f32m1x2_tu(maskedoff_tuple, base, new_vl, vl); +vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_tu(vfloat32m1x2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_f32m1x2_tu(vd, rs1, new_vl, vl); } -vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_f32m2x2_tu(maskedoff_tuple, base, new_vl, vl); +vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_tu(vfloat32m2x2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_f32m2x2_tu(vd, rs1, new_vl, vl); } -vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_f32m4x2_tu(maskedoff_tuple, base, new_vl, vl); +vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_tu(vfloat32m4x2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_f32m4x2_tu(vd, rs1, new_vl, vl); } -vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_i32mf2x2_tu(maskedoff_tuple, base, new_vl, vl); +vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_tu(vint32mf2x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_i32mf2x2_tu(vd, rs1, new_vl, vl); } -vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_i32m1x2_tu(maskedoff_tuple, base, new_vl, vl); +vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_tu(vint32m1x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_i32m1x2_tu(vd, rs1, new_vl, vl); } -vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_i32m2x2_tu(maskedoff_tuple, base, new_vl, vl); +vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_tu(vint32m2x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_i32m2x2_tu(vd, rs1, new_vl, vl); } -vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_i32m4x2_tu(maskedoff_tuple, base, new_vl, vl); +vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_tu(vint32m4x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_i32m4x2_tu(vd, rs1, new_vl, vl); } -vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_u32mf2x2_tu(maskedoff_tuple, base, new_vl, vl); +vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_tu(vuint32mf2x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_u32mf2x2_tu(vd, rs1, new_vl, vl); } -vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_u32m1x2_tu(maskedoff_tuple, base, new_vl, vl); +vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_tu(vuint32m1x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_u32m1x2_tu(vd, rs1, new_vl, vl); } -vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_u32m2x2_tu(maskedoff_tuple, base, new_vl, vl); +vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_tu(vuint32m2x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_u32m2x2_tu(vd, rs1, new_vl, vl); } -vuint32m4x2_t test_vlseg2e32ff_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_u32m4x2_tu(maskedoff_tuple, base, new_vl, vl); +vuint32m4x2_t test_vlseg2e32ff_v_u32m4x2_tu(vuint32m4x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_u32m4x2_tu(vd, rs1, new_vl, vl); } -vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_f32mf2x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_tum(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_f32mf2x2_tum(vm, vd, rs1, new_vl, vl); } -vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_f32m1x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_tum(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_f32m1x2_tum(vm, vd, rs1, new_vl, vl); } -vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_f32m2x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_tum(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_f32m2x2_tum(vm, vd, rs1, new_vl, vl); } -vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_f32m4x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_tum(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_f32m4x2_tum(vm, vd, rs1, new_vl, vl); } -vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_i32mf2x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_tum(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_i32mf2x2_tum(vm, vd, rs1, new_vl, vl); } -vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_i32m1x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_tum(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_i32m1x2_tum(vm, vd, rs1, new_vl, vl); } -vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_i32m2x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_tum(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_i32m2x2_tum(vm, vd, rs1, new_vl, vl); } -vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_i32m4x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_tum(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_i32m4x2_tum(vm, vd, rs1, new_vl, vl); } -vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_u32mf2x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_tum(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_u32mf2x2_tum(vm, vd, rs1, new_vl, vl); } -vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_u32m1x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_tum(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_u32m1x2_tum(vm, vd, rs1, new_vl, vl); } -vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_u32m2x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_tum(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_u32m2x2_tum(vm, vd, rs1, new_vl, vl); } -vuint32m4x2_t test_vlseg2e32ff_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_u32m4x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m4x2_t test_vlseg2e32ff_v_u32m4x2_tum(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_u32m4x2_tum(vm, vd, rs1, new_vl, vl); } -vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_f32mf2x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_tumu(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_f32mf2x2_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_f32m1x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_tumu(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_f32m1x2_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_f32m2x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_tumu(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_f32m2x2_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_f32m4x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_tumu(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_f32m4x2_tumu(vm, vd, rs1, new_vl, vl); } -vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_i32mf2x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_tumu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_i32mf2x2_tumu(vm, vd, rs1, new_vl, vl); } -vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_i32m1x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_tumu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_i32m1x2_tumu(vm, vd, rs1, new_vl, vl); } -vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_i32m2x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_tumu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_i32m2x2_tumu(vm, vd, rs1, new_vl, vl); } -vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_i32m4x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_tumu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_i32m4x2_tumu(vm, vd, rs1, new_vl, vl); } -vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_u32mf2x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_tumu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_u32mf2x2_tumu(vm, vd, rs1, new_vl, vl); } -vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_u32m1x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_tumu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_u32m1x2_tumu(vm, vd, rs1, new_vl, vl); } -vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_u32m2x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_tumu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_u32m2x2_tumu(vm, vd, rs1, new_vl, vl); } -vuint32m4x2_t test_vlseg2e32ff_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_u32m4x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m4x2_t test_vlseg2e32ff_v_u32m4x2_tumu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_u32m4x2_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_f32mf2x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_mu(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_f32mf2x2_mu(vm, vd, rs1, new_vl, vl); } -vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_f32m1x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_mu(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_f32m1x2_mu(vm, vd, rs1, new_vl, vl); } -vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_f32m2x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_mu(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_f32m2x2_mu(vm, vd, rs1, new_vl, vl); } -vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_f32m4x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_mu(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_f32m4x2_mu(vm, vd, rs1, new_vl, vl); } -vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_i32mf2x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_mu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_i32mf2x2_mu(vm, vd, rs1, new_vl, vl); } -vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_i32m1x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_mu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_i32m1x2_mu(vm, vd, rs1, new_vl, vl); } -vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_i32m2x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_mu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_i32m2x2_mu(vm, vd, rs1, new_vl, vl); } -vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_i32m4x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_mu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_i32m4x2_mu(vm, vd, rs1, new_vl, vl); } -vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_u32mf2x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_mu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_u32mf2x2_mu(vm, vd, rs1, new_vl, vl); } -vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_u32m1x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_mu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_u32m1x2_mu(vm, vd, rs1, new_vl, vl); } -vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_u32m2x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_mu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_u32m2x2_mu(vm, vd, rs1, new_vl, vl); } -vuint32m4x2_t test_vlseg2e32ff_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_v_u32m4x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m4x2_t test_vlseg2e32ff_v_u32m4x2_mu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_v_u32m4x2_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg2e32ff\.[ivxfswum.]+\s+} 48 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg2e64.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg2e64.c index 33e988a0b..464fe762e 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg2e64.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg2e64.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x2_t test_vlseg2e64_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_f64m1x2_tu(maskedoff_tuple, base, vl); +vfloat64m1x2_t test_vlseg2e64_v_f64m1x2_tu(vfloat64m1x2_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_f64m1x2_tu(vd, rs1, vl); } -vfloat64m2x2_t test_vlseg2e64_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_f64m2x2_tu(maskedoff_tuple, base, vl); +vfloat64m2x2_t test_vlseg2e64_v_f64m2x2_tu(vfloat64m2x2_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_f64m2x2_tu(vd, rs1, vl); } -vfloat64m4x2_t test_vlseg2e64_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_f64m4x2_tu(maskedoff_tuple, base, vl); +vfloat64m4x2_t test_vlseg2e64_v_f64m4x2_tu(vfloat64m4x2_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_f64m4x2_tu(vd, rs1, vl); } -vint64m1x2_t test_vlseg2e64_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_i64m1x2_tu(maskedoff_tuple, base, vl); +vint64m1x2_t test_vlseg2e64_v_i64m1x2_tu(vint64m1x2_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_i64m1x2_tu(vd, rs1, vl); } -vint64m2x2_t test_vlseg2e64_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_i64m2x2_tu(maskedoff_tuple, base, vl); +vint64m2x2_t test_vlseg2e64_v_i64m2x2_tu(vint64m2x2_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_i64m2x2_tu(vd, rs1, vl); } -vint64m4x2_t test_vlseg2e64_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_i64m4x2_tu(maskedoff_tuple, base, vl); +vint64m4x2_t test_vlseg2e64_v_i64m4x2_tu(vint64m4x2_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_i64m4x2_tu(vd, rs1, vl); } -vuint64m1x2_t test_vlseg2e64_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_u64m1x2_tu(maskedoff_tuple, base, vl); +vuint64m1x2_t test_vlseg2e64_v_u64m1x2_tu(vuint64m1x2_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_u64m1x2_tu(vd, rs1, vl); } -vuint64m2x2_t test_vlseg2e64_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_u64m2x2_tu(maskedoff_tuple, base, vl); +vuint64m2x2_t test_vlseg2e64_v_u64m2x2_tu(vuint64m2x2_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_u64m2x2_tu(vd, rs1, vl); } -vuint64m4x2_t test_vlseg2e64_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_u64m4x2_tu(maskedoff_tuple, base, vl); +vuint64m4x2_t test_vlseg2e64_v_u64m4x2_tu(vuint64m4x2_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_u64m4x2_tu(vd, rs1, vl); } -vfloat64m1x2_t test_vlseg2e64_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_f64m1x2_tum(mask, maskedoff_tuple, base, vl); +vfloat64m1x2_t test_vlseg2e64_v_f64m1x2_tum(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_f64m1x2_tum(vm, vd, rs1, vl); } -vfloat64m2x2_t test_vlseg2e64_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_f64m2x2_tum(mask, maskedoff_tuple, base, vl); +vfloat64m2x2_t test_vlseg2e64_v_f64m2x2_tum(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_f64m2x2_tum(vm, vd, rs1, vl); } -vfloat64m4x2_t test_vlseg2e64_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_f64m4x2_tum(mask, maskedoff_tuple, base, vl); +vfloat64m4x2_t test_vlseg2e64_v_f64m4x2_tum(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_f64m4x2_tum(vm, vd, rs1, vl); } -vint64m1x2_t test_vlseg2e64_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_i64m1x2_tum(mask, maskedoff_tuple, base, vl); +vint64m1x2_t test_vlseg2e64_v_i64m1x2_tum(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_i64m1x2_tum(vm, vd, rs1, vl); } -vint64m2x2_t test_vlseg2e64_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_i64m2x2_tum(mask, maskedoff_tuple, base, vl); +vint64m2x2_t test_vlseg2e64_v_i64m2x2_tum(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_i64m2x2_tum(vm, vd, rs1, vl); } -vint64m4x2_t test_vlseg2e64_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_i64m4x2_tum(mask, maskedoff_tuple, base, vl); +vint64m4x2_t test_vlseg2e64_v_i64m4x2_tum(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_i64m4x2_tum(vm, vd, rs1, vl); } -vuint64m1x2_t test_vlseg2e64_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_u64m1x2_tum(mask, maskedoff_tuple, base, vl); +vuint64m1x2_t test_vlseg2e64_v_u64m1x2_tum(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_u64m1x2_tum(vm, vd, rs1, vl); } -vuint64m2x2_t test_vlseg2e64_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_u64m2x2_tum(mask, maskedoff_tuple, base, vl); +vuint64m2x2_t test_vlseg2e64_v_u64m2x2_tum(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_u64m2x2_tum(vm, vd, rs1, vl); } -vuint64m4x2_t test_vlseg2e64_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_u64m4x2_tum(mask, maskedoff_tuple, base, vl); +vuint64m4x2_t test_vlseg2e64_v_u64m4x2_tum(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_u64m4x2_tum(vm, vd, rs1, vl); } -vfloat64m1x2_t test_vlseg2e64_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_f64m1x2_tumu(mask, maskedoff_tuple, base, vl); +vfloat64m1x2_t test_vlseg2e64_v_f64m1x2_tumu(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_f64m1x2_tumu(vm, vd, rs1, vl); } -vfloat64m2x2_t test_vlseg2e64_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_f64m2x2_tumu(mask, maskedoff_tuple, base, vl); +vfloat64m2x2_t test_vlseg2e64_v_f64m2x2_tumu(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_f64m2x2_tumu(vm, vd, rs1, vl); } -vfloat64m4x2_t test_vlseg2e64_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_f64m4x2_tumu(mask, maskedoff_tuple, base, vl); +vfloat64m4x2_t test_vlseg2e64_v_f64m4x2_tumu(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_f64m4x2_tumu(vm, vd, rs1, vl); } -vint64m1x2_t test_vlseg2e64_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_i64m1x2_tumu(mask, maskedoff_tuple, base, vl); +vint64m1x2_t test_vlseg2e64_v_i64m1x2_tumu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_i64m1x2_tumu(vm, vd, rs1, vl); } -vint64m2x2_t test_vlseg2e64_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_i64m2x2_tumu(mask, maskedoff_tuple, base, vl); +vint64m2x2_t test_vlseg2e64_v_i64m2x2_tumu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_i64m2x2_tumu(vm, vd, rs1, vl); } -vint64m4x2_t test_vlseg2e64_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_i64m4x2_tumu(mask, maskedoff_tuple, base, vl); +vint64m4x2_t test_vlseg2e64_v_i64m4x2_tumu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_i64m4x2_tumu(vm, vd, rs1, vl); } -vuint64m1x2_t test_vlseg2e64_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_u64m1x2_tumu(mask, maskedoff_tuple, base, vl); +vuint64m1x2_t test_vlseg2e64_v_u64m1x2_tumu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_u64m1x2_tumu(vm, vd, rs1, vl); } -vuint64m2x2_t test_vlseg2e64_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_u64m2x2_tumu(mask, maskedoff_tuple, base, vl); +vuint64m2x2_t test_vlseg2e64_v_u64m2x2_tumu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_u64m2x2_tumu(vm, vd, rs1, vl); } -vuint64m4x2_t test_vlseg2e64_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_u64m4x2_tumu(mask, maskedoff_tuple, base, vl); +vuint64m4x2_t test_vlseg2e64_v_u64m4x2_tumu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_u64m4x2_tumu(vm, vd, rs1, vl); } -vfloat64m1x2_t test_vlseg2e64_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_f64m1x2_mu(mask, maskedoff_tuple, base, vl); +vfloat64m1x2_t test_vlseg2e64_v_f64m1x2_mu(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_f64m1x2_mu(vm, vd, rs1, vl); } -vfloat64m2x2_t test_vlseg2e64_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_f64m2x2_mu(mask, maskedoff_tuple, base, vl); +vfloat64m2x2_t test_vlseg2e64_v_f64m2x2_mu(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_f64m2x2_mu(vm, vd, rs1, vl); } -vfloat64m4x2_t test_vlseg2e64_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_f64m4x2_mu(mask, maskedoff_tuple, base, vl); +vfloat64m4x2_t test_vlseg2e64_v_f64m4x2_mu(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_f64m4x2_mu(vm, vd, rs1, vl); } -vint64m1x2_t test_vlseg2e64_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_i64m1x2_mu(mask, maskedoff_tuple, base, vl); +vint64m1x2_t test_vlseg2e64_v_i64m1x2_mu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_i64m1x2_mu(vm, vd, rs1, vl); } -vint64m2x2_t test_vlseg2e64_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_i64m2x2_mu(mask, maskedoff_tuple, base, vl); +vint64m2x2_t test_vlseg2e64_v_i64m2x2_mu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_i64m2x2_mu(vm, vd, rs1, vl); } -vint64m4x2_t test_vlseg2e64_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_i64m4x2_mu(mask, maskedoff_tuple, base, vl); +vint64m4x2_t test_vlseg2e64_v_i64m4x2_mu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_i64m4x2_mu(vm, vd, rs1, vl); } -vuint64m1x2_t test_vlseg2e64_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_u64m1x2_mu(mask, maskedoff_tuple, base, vl); +vuint64m1x2_t test_vlseg2e64_v_u64m1x2_mu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_u64m1x2_mu(vm, vd, rs1, vl); } -vuint64m2x2_t test_vlseg2e64_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_u64m2x2_mu(mask, maskedoff_tuple, base, vl); +vuint64m2x2_t test_vlseg2e64_v_u64m2x2_mu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_u64m2x2_mu(vm, vd, rs1, vl); } -vuint64m4x2_t test_vlseg2e64_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg2e64_v_u64m4x2_mu(mask, maskedoff_tuple, base, vl); +vuint64m4x2_t test_vlseg2e64_v_u64m4x2_mu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_v_u64m4x2_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg2e64\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg2e64ff.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg2e64ff.c index a5f858214..01ba51d1b 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg2e64ff.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg2e64ff.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_f64m1x2_tu(maskedoff_tuple, base, new_vl, vl); +vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_tu(vfloat64m1x2_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_f64m1x2_tu(vd, rs1, new_vl, vl); } -vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_f64m2x2_tu(maskedoff_tuple, base, new_vl, vl); +vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_tu(vfloat64m2x2_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_f64m2x2_tu(vd, rs1, new_vl, vl); } -vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_f64m4x2_tu(maskedoff_tuple, base, new_vl, vl); +vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_tu(vfloat64m4x2_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_f64m4x2_tu(vd, rs1, new_vl, vl); } -vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_i64m1x2_tu(maskedoff_tuple, base, new_vl, vl); +vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_tu(vint64m1x2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_i64m1x2_tu(vd, rs1, new_vl, vl); } -vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_i64m2x2_tu(maskedoff_tuple, base, new_vl, vl); +vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_tu(vint64m2x2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_i64m2x2_tu(vd, rs1, new_vl, vl); } -vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_i64m4x2_tu(maskedoff_tuple, base, new_vl, vl); +vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_tu(vint64m4x2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_i64m4x2_tu(vd, rs1, new_vl, vl); } -vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_u64m1x2_tu(maskedoff_tuple, base, new_vl, vl); +vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_tu(vuint64m1x2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_u64m1x2_tu(vd, rs1, new_vl, vl); } -vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_u64m2x2_tu(maskedoff_tuple, base, new_vl, vl); +vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_tu(vuint64m2x2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_u64m2x2_tu(vd, rs1, new_vl, vl); } -vuint64m4x2_t test_vlseg2e64ff_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_u64m4x2_tu(maskedoff_tuple, base, new_vl, vl); +vuint64m4x2_t test_vlseg2e64ff_v_u64m4x2_tu(vuint64m4x2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_u64m4x2_tu(vd, rs1, new_vl, vl); } -vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_f64m1x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_tum(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_f64m1x2_tum(vm, vd, rs1, new_vl, vl); } -vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_f64m2x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_tum(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_f64m2x2_tum(vm, vd, rs1, new_vl, vl); } -vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_f64m4x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_tum(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_f64m4x2_tum(vm, vd, rs1, new_vl, vl); } -vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_i64m1x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_tum(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_i64m1x2_tum(vm, vd, rs1, new_vl, vl); } -vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_i64m2x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_tum(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_i64m2x2_tum(vm, vd, rs1, new_vl, vl); } -vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_i64m4x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_tum(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_i64m4x2_tum(vm, vd, rs1, new_vl, vl); } -vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_u64m1x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_tum(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_u64m1x2_tum(vm, vd, rs1, new_vl, vl); } -vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_u64m2x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_tum(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_u64m2x2_tum(vm, vd, rs1, new_vl, vl); } -vuint64m4x2_t test_vlseg2e64ff_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_u64m4x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m4x2_t test_vlseg2e64ff_v_u64m4x2_tum(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_u64m4x2_tum(vm, vd, rs1, new_vl, vl); } -vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_f64m1x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_tumu(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_f64m1x2_tumu(vm, vd, rs1, new_vl, vl); } -vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_f64m2x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_tumu(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_f64m2x2_tumu(vm, vd, rs1, new_vl, vl); } -vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_f64m4x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_tumu(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_f64m4x2_tumu(vm, vd, rs1, new_vl, vl); } -vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_i64m1x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_tumu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_i64m1x2_tumu(vm, vd, rs1, new_vl, vl); } -vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_i64m2x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_tumu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_i64m2x2_tumu(vm, vd, rs1, new_vl, vl); } -vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_i64m4x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_tumu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_i64m4x2_tumu(vm, vd, rs1, new_vl, vl); } -vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_u64m1x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_tumu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_u64m1x2_tumu(vm, vd, rs1, new_vl, vl); } -vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_u64m2x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_tumu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_u64m2x2_tumu(vm, vd, rs1, new_vl, vl); } -vuint64m4x2_t test_vlseg2e64ff_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_u64m4x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m4x2_t test_vlseg2e64ff_v_u64m4x2_tumu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_u64m4x2_tumu(vm, vd, rs1, new_vl, vl); } -vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_f64m1x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_mu(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_f64m1x2_mu(vm, vd, rs1, new_vl, vl); } -vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_f64m2x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_mu(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_f64m2x2_mu(vm, vd, rs1, new_vl, vl); } -vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_f64m4x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_mu(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_f64m4x2_mu(vm, vd, rs1, new_vl, vl); } -vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_i64m1x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_mu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_i64m1x2_mu(vm, vd, rs1, new_vl, vl); } -vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_i64m2x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_mu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_i64m2x2_mu(vm, vd, rs1, new_vl, vl); } -vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_i64m4x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_mu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_i64m4x2_mu(vm, vd, rs1, new_vl, vl); } -vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_u64m1x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_mu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_u64m1x2_mu(vm, vd, rs1, new_vl, vl); } -vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_u64m2x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_mu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_u64m2x2_mu(vm, vd, rs1, new_vl, vl); } -vuint64m4x2_t test_vlseg2e64ff_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_v_u64m4x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m4x2_t test_vlseg2e64ff_v_u64m4x2_mu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_v_u64m4x2_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg2e64ff\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg2e8.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg2e8.c index 759790b6e..5ae9028bf 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg2e8.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg2e8.c @@ -6,196 +6,196 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x2_t test_vlseg2e8_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_i8mf8x2_tu(maskedoff_tuple, base, vl); +vint8mf8x2_t test_vlseg2e8_v_i8mf8x2_tu(vint8mf8x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_i8mf8x2_tu(vd, rs1, vl); } -vint8mf4x2_t test_vlseg2e8_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_i8mf4x2_tu(maskedoff_tuple, base, vl); +vint8mf4x2_t test_vlseg2e8_v_i8mf4x2_tu(vint8mf4x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_i8mf4x2_tu(vd, rs1, vl); } -vint8mf2x2_t test_vlseg2e8_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_i8mf2x2_tu(maskedoff_tuple, base, vl); +vint8mf2x2_t test_vlseg2e8_v_i8mf2x2_tu(vint8mf2x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_i8mf2x2_tu(vd, rs1, vl); } -vint8m1x2_t test_vlseg2e8_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_i8m1x2_tu(maskedoff_tuple, base, vl); +vint8m1x2_t test_vlseg2e8_v_i8m1x2_tu(vint8m1x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_i8m1x2_tu(vd, rs1, vl); } -vint8m2x2_t test_vlseg2e8_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_i8m2x2_tu(maskedoff_tuple, base, vl); +vint8m2x2_t test_vlseg2e8_v_i8m2x2_tu(vint8m2x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_i8m2x2_tu(vd, rs1, vl); } -vint8m4x2_t test_vlseg2e8_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_i8m4x2_tu(maskedoff_tuple, base, vl); +vint8m4x2_t test_vlseg2e8_v_i8m4x2_tu(vint8m4x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_i8m4x2_tu(vd, rs1, vl); } -vuint8mf8x2_t test_vlseg2e8_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_u8mf8x2_tu(maskedoff_tuple, base, vl); +vuint8mf8x2_t test_vlseg2e8_v_u8mf8x2_tu(vuint8mf8x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_u8mf8x2_tu(vd, rs1, vl); } -vuint8mf4x2_t test_vlseg2e8_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_u8mf4x2_tu(maskedoff_tuple, base, vl); +vuint8mf4x2_t test_vlseg2e8_v_u8mf4x2_tu(vuint8mf4x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_u8mf4x2_tu(vd, rs1, vl); } -vuint8mf2x2_t test_vlseg2e8_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_u8mf2x2_tu(maskedoff_tuple, base, vl); +vuint8mf2x2_t test_vlseg2e8_v_u8mf2x2_tu(vuint8mf2x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_u8mf2x2_tu(vd, rs1, vl); } -vuint8m1x2_t test_vlseg2e8_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_u8m1x2_tu(maskedoff_tuple, base, vl); +vuint8m1x2_t test_vlseg2e8_v_u8m1x2_tu(vuint8m1x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_u8m1x2_tu(vd, rs1, vl); } -vuint8m2x2_t test_vlseg2e8_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_u8m2x2_tu(maskedoff_tuple, base, vl); +vuint8m2x2_t test_vlseg2e8_v_u8m2x2_tu(vuint8m2x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_u8m2x2_tu(vd, rs1, vl); } -vuint8m4x2_t test_vlseg2e8_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_u8m4x2_tu(maskedoff_tuple, base, vl); +vuint8m4x2_t test_vlseg2e8_v_u8m4x2_tu(vuint8m4x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_u8m4x2_tu(vd, rs1, vl); } -vint8mf8x2_t test_vlseg2e8_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_i8mf8x2_tum(mask, maskedoff_tuple, base, vl); +vint8mf8x2_t test_vlseg2e8_v_i8mf8x2_tum(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_i8mf8x2_tum(vm, vd, rs1, vl); } -vint8mf4x2_t test_vlseg2e8_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_i8mf4x2_tum(mask, maskedoff_tuple, base, vl); +vint8mf4x2_t test_vlseg2e8_v_i8mf4x2_tum(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_i8mf4x2_tum(vm, vd, rs1, vl); } -vint8mf2x2_t test_vlseg2e8_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_i8mf2x2_tum(mask, maskedoff_tuple, base, vl); +vint8mf2x2_t test_vlseg2e8_v_i8mf2x2_tum(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_i8mf2x2_tum(vm, vd, rs1, vl); } -vint8m1x2_t test_vlseg2e8_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_i8m1x2_tum(mask, maskedoff_tuple, base, vl); +vint8m1x2_t test_vlseg2e8_v_i8m1x2_tum(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_i8m1x2_tum(vm, vd, rs1, vl); } -vint8m2x2_t test_vlseg2e8_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_i8m2x2_tum(mask, maskedoff_tuple, base, vl); +vint8m2x2_t test_vlseg2e8_v_i8m2x2_tum(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_i8m2x2_tum(vm, vd, rs1, vl); } -vint8m4x2_t test_vlseg2e8_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_i8m4x2_tum(mask, maskedoff_tuple, base, vl); +vint8m4x2_t test_vlseg2e8_v_i8m4x2_tum(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_i8m4x2_tum(vm, vd, rs1, vl); } -vuint8mf8x2_t test_vlseg2e8_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_u8mf8x2_tum(mask, maskedoff_tuple, base, vl); +vuint8mf8x2_t test_vlseg2e8_v_u8mf8x2_tum(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_u8mf8x2_tum(vm, vd, rs1, vl); } -vuint8mf4x2_t test_vlseg2e8_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_u8mf4x2_tum(mask, maskedoff_tuple, base, vl); +vuint8mf4x2_t test_vlseg2e8_v_u8mf4x2_tum(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_u8mf4x2_tum(vm, vd, rs1, vl); } -vuint8mf2x2_t test_vlseg2e8_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_u8mf2x2_tum(mask, maskedoff_tuple, base, vl); +vuint8mf2x2_t test_vlseg2e8_v_u8mf2x2_tum(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_u8mf2x2_tum(vm, vd, rs1, vl); } -vuint8m1x2_t test_vlseg2e8_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_u8m1x2_tum(mask, maskedoff_tuple, base, vl); +vuint8m1x2_t test_vlseg2e8_v_u8m1x2_tum(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_u8m1x2_tum(vm, vd, rs1, vl); } -vuint8m2x2_t test_vlseg2e8_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_u8m2x2_tum(mask, maskedoff_tuple, base, vl); +vuint8m2x2_t test_vlseg2e8_v_u8m2x2_tum(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_u8m2x2_tum(vm, vd, rs1, vl); } -vuint8m4x2_t test_vlseg2e8_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_u8m4x2_tum(mask, maskedoff_tuple, base, vl); +vuint8m4x2_t test_vlseg2e8_v_u8m4x2_tum(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_u8m4x2_tum(vm, vd, rs1, vl); } -vint8mf8x2_t test_vlseg2e8_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_i8mf8x2_tumu(mask, maskedoff_tuple, base, vl); +vint8mf8x2_t test_vlseg2e8_v_i8mf8x2_tumu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_i8mf8x2_tumu(vm, vd, rs1, vl); } -vint8mf4x2_t test_vlseg2e8_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_i8mf4x2_tumu(mask, maskedoff_tuple, base, vl); +vint8mf4x2_t test_vlseg2e8_v_i8mf4x2_tumu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_i8mf4x2_tumu(vm, vd, rs1, vl); } -vint8mf2x2_t test_vlseg2e8_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_i8mf2x2_tumu(mask, maskedoff_tuple, base, vl); +vint8mf2x2_t test_vlseg2e8_v_i8mf2x2_tumu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_i8mf2x2_tumu(vm, vd, rs1, vl); } -vint8m1x2_t test_vlseg2e8_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_i8m1x2_tumu(mask, maskedoff_tuple, base, vl); +vint8m1x2_t test_vlseg2e8_v_i8m1x2_tumu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_i8m1x2_tumu(vm, vd, rs1, vl); } -vint8m2x2_t test_vlseg2e8_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_i8m2x2_tumu(mask, maskedoff_tuple, base, vl); +vint8m2x2_t test_vlseg2e8_v_i8m2x2_tumu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_i8m2x2_tumu(vm, vd, rs1, vl); } -vint8m4x2_t test_vlseg2e8_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_i8m4x2_tumu(mask, maskedoff_tuple, base, vl); +vint8m4x2_t test_vlseg2e8_v_i8m4x2_tumu(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_i8m4x2_tumu(vm, vd, rs1, vl); } -vuint8mf8x2_t test_vlseg2e8_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_u8mf8x2_tumu(mask, maskedoff_tuple, base, vl); +vuint8mf8x2_t test_vlseg2e8_v_u8mf8x2_tumu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_u8mf8x2_tumu(vm, vd, rs1, vl); } -vuint8mf4x2_t test_vlseg2e8_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_u8mf4x2_tumu(mask, maskedoff_tuple, base, vl); +vuint8mf4x2_t test_vlseg2e8_v_u8mf4x2_tumu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_u8mf4x2_tumu(vm, vd, rs1, vl); } -vuint8mf2x2_t test_vlseg2e8_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_u8mf2x2_tumu(mask, maskedoff_tuple, base, vl); +vuint8mf2x2_t test_vlseg2e8_v_u8mf2x2_tumu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_u8mf2x2_tumu(vm, vd, rs1, vl); } -vuint8m1x2_t test_vlseg2e8_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_u8m1x2_tumu(mask, maskedoff_tuple, base, vl); +vuint8m1x2_t test_vlseg2e8_v_u8m1x2_tumu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_u8m1x2_tumu(vm, vd, rs1, vl); } -vuint8m2x2_t test_vlseg2e8_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_u8m2x2_tumu(mask, maskedoff_tuple, base, vl); +vuint8m2x2_t test_vlseg2e8_v_u8m2x2_tumu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_u8m2x2_tumu(vm, vd, rs1, vl); } -vuint8m4x2_t test_vlseg2e8_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_u8m4x2_tumu(mask, maskedoff_tuple, base, vl); +vuint8m4x2_t test_vlseg2e8_v_u8m4x2_tumu(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_u8m4x2_tumu(vm, vd, rs1, vl); } -vint8mf8x2_t test_vlseg2e8_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_i8mf8x2_mu(mask, maskedoff_tuple, base, vl); +vint8mf8x2_t test_vlseg2e8_v_i8mf8x2_mu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_i8mf8x2_mu(vm, vd, rs1, vl); } -vint8mf4x2_t test_vlseg2e8_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_i8mf4x2_mu(mask, maskedoff_tuple, base, vl); +vint8mf4x2_t test_vlseg2e8_v_i8mf4x2_mu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_i8mf4x2_mu(vm, vd, rs1, vl); } -vint8mf2x2_t test_vlseg2e8_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_i8mf2x2_mu(mask, maskedoff_tuple, base, vl); +vint8mf2x2_t test_vlseg2e8_v_i8mf2x2_mu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_i8mf2x2_mu(vm, vd, rs1, vl); } -vint8m1x2_t test_vlseg2e8_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_i8m1x2_mu(mask, maskedoff_tuple, base, vl); +vint8m1x2_t test_vlseg2e8_v_i8m1x2_mu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_i8m1x2_mu(vm, vd, rs1, vl); } -vint8m2x2_t test_vlseg2e8_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_i8m2x2_mu(mask, maskedoff_tuple, base, vl); +vint8m2x2_t test_vlseg2e8_v_i8m2x2_mu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_i8m2x2_mu(vm, vd, rs1, vl); } -vint8m4x2_t test_vlseg2e8_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_i8m4x2_mu(mask, maskedoff_tuple, base, vl); +vint8m4x2_t test_vlseg2e8_v_i8m4x2_mu(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_i8m4x2_mu(vm, vd, rs1, vl); } -vuint8mf8x2_t test_vlseg2e8_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_u8mf8x2_mu(mask, maskedoff_tuple, base, vl); +vuint8mf8x2_t test_vlseg2e8_v_u8mf8x2_mu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_u8mf8x2_mu(vm, vd, rs1, vl); } -vuint8mf4x2_t test_vlseg2e8_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_u8mf4x2_mu(mask, maskedoff_tuple, base, vl); +vuint8mf4x2_t test_vlseg2e8_v_u8mf4x2_mu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_u8mf4x2_mu(vm, vd, rs1, vl); } -vuint8mf2x2_t test_vlseg2e8_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_u8mf2x2_mu(mask, maskedoff_tuple, base, vl); +vuint8mf2x2_t test_vlseg2e8_v_u8mf2x2_mu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_u8mf2x2_mu(vm, vd, rs1, vl); } -vuint8m1x2_t test_vlseg2e8_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_u8m1x2_mu(mask, maskedoff_tuple, base, vl); +vuint8m1x2_t test_vlseg2e8_v_u8m1x2_mu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_u8m1x2_mu(vm, vd, rs1, vl); } -vuint8m2x2_t test_vlseg2e8_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_u8m2x2_mu(mask, maskedoff_tuple, base, vl); +vuint8m2x2_t test_vlseg2e8_v_u8m2x2_mu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_u8m2x2_mu(vm, vd, rs1, vl); } -vuint8m4x2_t test_vlseg2e8_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_v_u8m4x2_mu(mask, maskedoff_tuple, base, vl); +vuint8m4x2_t test_vlseg2e8_v_u8m4x2_mu(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_v_u8m4x2_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg2e8\.[ivxfswum.]+\s+} 48 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg2e8ff.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg2e8ff.c index 07ea141c6..12197da90 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg2e8ff.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg2e8ff.c @@ -6,196 +6,196 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_i8mf8x2_tu(maskedoff_tuple, base, new_vl, vl); +vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_tu(vint8mf8x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_i8mf8x2_tu(vd, rs1, new_vl, vl); } -vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_i8mf4x2_tu(maskedoff_tuple, base, new_vl, vl); +vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_tu(vint8mf4x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_i8mf4x2_tu(vd, rs1, new_vl, vl); } -vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_i8mf2x2_tu(maskedoff_tuple, base, new_vl, vl); +vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_tu(vint8mf2x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_i8mf2x2_tu(vd, rs1, new_vl, vl); } -vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_i8m1x2_tu(maskedoff_tuple, base, new_vl, vl); +vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_tu(vint8m1x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_i8m1x2_tu(vd, rs1, new_vl, vl); } -vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_i8m2x2_tu(maskedoff_tuple, base, new_vl, vl); +vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_tu(vint8m2x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_i8m2x2_tu(vd, rs1, new_vl, vl); } -vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_i8m4x2_tu(maskedoff_tuple, base, new_vl, vl); +vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_tu(vint8m4x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_i8m4x2_tu(vd, rs1, new_vl, vl); } -vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_u8mf8x2_tu(maskedoff_tuple, base, new_vl, vl); +vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_tu(vuint8mf8x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_u8mf8x2_tu(vd, rs1, new_vl, vl); } -vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_u8mf4x2_tu(maskedoff_tuple, base, new_vl, vl); +vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_tu(vuint8mf4x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_u8mf4x2_tu(vd, rs1, new_vl, vl); } -vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_u8mf2x2_tu(maskedoff_tuple, base, new_vl, vl); +vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_tu(vuint8mf2x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_u8mf2x2_tu(vd, rs1, new_vl, vl); } -vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_u8m1x2_tu(maskedoff_tuple, base, new_vl, vl); +vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_tu(vuint8m1x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_u8m1x2_tu(vd, rs1, new_vl, vl); } -vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_u8m2x2_tu(maskedoff_tuple, base, new_vl, vl); +vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_tu(vuint8m2x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_u8m2x2_tu(vd, rs1, new_vl, vl); } -vuint8m4x2_t test_vlseg2e8ff_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_u8m4x2_tu(maskedoff_tuple, base, new_vl, vl); +vuint8m4x2_t test_vlseg2e8ff_v_u8m4x2_tu(vuint8m4x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_u8m4x2_tu(vd, rs1, new_vl, vl); } -vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_i8mf8x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_tum(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_i8mf8x2_tum(vm, vd, rs1, new_vl, vl); } -vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_i8mf4x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_tum(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_i8mf4x2_tum(vm, vd, rs1, new_vl, vl); } -vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_i8mf2x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_tum(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_i8mf2x2_tum(vm, vd, rs1, new_vl, vl); } -vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_i8m1x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_tum(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_i8m1x2_tum(vm, vd, rs1, new_vl, vl); } -vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_i8m2x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_tum(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_i8m2x2_tum(vm, vd, rs1, new_vl, vl); } -vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_i8m4x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_tum(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_i8m4x2_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_u8mf8x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_tum(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_u8mf8x2_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_u8mf4x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_tum(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_u8mf4x2_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_u8mf2x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_tum(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_u8mf2x2_tum(vm, vd, rs1, new_vl, vl); } -vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_u8m1x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_tum(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_u8m1x2_tum(vm, vd, rs1, new_vl, vl); } -vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_u8m2x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_tum(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_u8m2x2_tum(vm, vd, rs1, new_vl, vl); } -vuint8m4x2_t test_vlseg2e8ff_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_u8m4x2_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m4x2_t test_vlseg2e8ff_v_u8m4x2_tum(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_u8m4x2_tum(vm, vd, rs1, new_vl, vl); } -vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_i8mf8x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_tumu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_i8mf8x2_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_i8mf4x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_tumu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_i8mf4x2_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_i8mf2x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_tumu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_i8mf2x2_tumu(vm, vd, rs1, new_vl, vl); } -vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_i8m1x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_tumu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_i8m1x2_tumu(vm, vd, rs1, new_vl, vl); } -vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_i8m2x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_tumu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_i8m2x2_tumu(vm, vd, rs1, new_vl, vl); } -vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_i8m4x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_tumu(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_i8m4x2_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_u8mf8x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_tumu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_u8mf8x2_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_u8mf4x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_tumu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_u8mf4x2_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_u8mf2x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_tumu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_u8mf2x2_tumu(vm, vd, rs1, new_vl, vl); } -vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_u8m1x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_tumu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_u8m1x2_tumu(vm, vd, rs1, new_vl, vl); } -vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_u8m2x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_tumu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_u8m2x2_tumu(vm, vd, rs1, new_vl, vl); } -vuint8m4x2_t test_vlseg2e8ff_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_u8m4x2_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m4x2_t test_vlseg2e8ff_v_u8m4x2_tumu(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_u8m4x2_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_i8mf8x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_mu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_i8mf8x2_mu(vm, vd, rs1, new_vl, vl); } -vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_i8mf4x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_mu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_i8mf4x2_mu(vm, vd, rs1, new_vl, vl); } -vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_i8mf2x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_mu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_i8mf2x2_mu(vm, vd, rs1, new_vl, vl); } -vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_i8m1x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_mu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_i8m1x2_mu(vm, vd, rs1, new_vl, vl); } -vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_i8m2x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_mu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_i8m2x2_mu(vm, vd, rs1, new_vl, vl); } -vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_i8m4x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_mu(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_i8m4x2_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_u8mf8x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_mu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_u8mf8x2_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_u8mf4x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_mu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_u8mf4x2_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_u8mf2x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_mu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_u8mf2x2_mu(vm, vd, rs1, new_vl, vl); } -vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_u8m1x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_mu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_u8m1x2_mu(vm, vd, rs1, new_vl, vl); } -vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_u8m2x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_mu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_u8m2x2_mu(vm, vd, rs1, new_vl, vl); } -vuint8m4x2_t test_vlseg2e8ff_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_v_u8m4x2_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m4x2_t test_vlseg2e8ff_v_u8m4x2_mu(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_v_u8m4x2_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg2e8ff\.[ivxfswum.]+\s+} 48 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg3e16.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg3e16.c index 3e137a359..e38f11c5f 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg3e16.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg3e16.c @@ -6,196 +6,196 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x3_t test_vlseg3e16_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_f16mf4x3_tu(maskedoff_tuple, base, vl); +vfloat16mf4x3_t test_vlseg3e16_v_f16mf4x3_tu(vfloat16mf4x3_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_f16mf4x3_tu(vd, rs1, vl); } -vfloat16mf2x3_t test_vlseg3e16_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_f16mf2x3_tu(maskedoff_tuple, base, vl); +vfloat16mf2x3_t test_vlseg3e16_v_f16mf2x3_tu(vfloat16mf2x3_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_f16mf2x3_tu(vd, rs1, vl); } -vfloat16m1x3_t test_vlseg3e16_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_f16m1x3_tu(maskedoff_tuple, base, vl); +vfloat16m1x3_t test_vlseg3e16_v_f16m1x3_tu(vfloat16m1x3_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_f16m1x3_tu(vd, rs1, vl); } -vfloat16m2x3_t test_vlseg3e16_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_f16m2x3_tu(maskedoff_tuple, base, vl); +vfloat16m2x3_t test_vlseg3e16_v_f16m2x3_tu(vfloat16m2x3_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_f16m2x3_tu(vd, rs1, vl); } -vint16mf4x3_t test_vlseg3e16_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_i16mf4x3_tu(maskedoff_tuple, base, vl); +vint16mf4x3_t test_vlseg3e16_v_i16mf4x3_tu(vint16mf4x3_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_i16mf4x3_tu(vd, rs1, vl); } -vint16mf2x3_t test_vlseg3e16_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_i16mf2x3_tu(maskedoff_tuple, base, vl); +vint16mf2x3_t test_vlseg3e16_v_i16mf2x3_tu(vint16mf2x3_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_i16mf2x3_tu(vd, rs1, vl); } -vint16m1x3_t test_vlseg3e16_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_i16m1x3_tu(maskedoff_tuple, base, vl); +vint16m1x3_t test_vlseg3e16_v_i16m1x3_tu(vint16m1x3_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_i16m1x3_tu(vd, rs1, vl); } -vint16m2x3_t test_vlseg3e16_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_i16m2x3_tu(maskedoff_tuple, base, vl); +vint16m2x3_t test_vlseg3e16_v_i16m2x3_tu(vint16m2x3_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_i16m2x3_tu(vd, rs1, vl); } -vuint16mf4x3_t test_vlseg3e16_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_u16mf4x3_tu(maskedoff_tuple, base, vl); +vuint16mf4x3_t test_vlseg3e16_v_u16mf4x3_tu(vuint16mf4x3_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_u16mf4x3_tu(vd, rs1, vl); } -vuint16mf2x3_t test_vlseg3e16_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_u16mf2x3_tu(maskedoff_tuple, base, vl); +vuint16mf2x3_t test_vlseg3e16_v_u16mf2x3_tu(vuint16mf2x3_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_u16mf2x3_tu(vd, rs1, vl); } -vuint16m1x3_t test_vlseg3e16_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_u16m1x3_tu(maskedoff_tuple, base, vl); +vuint16m1x3_t test_vlseg3e16_v_u16m1x3_tu(vuint16m1x3_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_u16m1x3_tu(vd, rs1, vl); } -vuint16m2x3_t test_vlseg3e16_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_u16m2x3_tu(maskedoff_tuple, base, vl); +vuint16m2x3_t test_vlseg3e16_v_u16m2x3_tu(vuint16m2x3_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_u16m2x3_tu(vd, rs1, vl); } -vfloat16mf4x3_t test_vlseg3e16_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_f16mf4x3_tum(mask, maskedoff_tuple, base, vl); +vfloat16mf4x3_t test_vlseg3e16_v_f16mf4x3_tum(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_f16mf4x3_tum(vm, vd, rs1, vl); } -vfloat16mf2x3_t test_vlseg3e16_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_f16mf2x3_tum(mask, maskedoff_tuple, base, vl); +vfloat16mf2x3_t test_vlseg3e16_v_f16mf2x3_tum(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_f16mf2x3_tum(vm, vd, rs1, vl); } -vfloat16m1x3_t test_vlseg3e16_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_f16m1x3_tum(mask, maskedoff_tuple, base, vl); +vfloat16m1x3_t test_vlseg3e16_v_f16m1x3_tum(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_f16m1x3_tum(vm, vd, rs1, vl); } -vfloat16m2x3_t test_vlseg3e16_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_f16m2x3_tum(mask, maskedoff_tuple, base, vl); +vfloat16m2x3_t test_vlseg3e16_v_f16m2x3_tum(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_f16m2x3_tum(vm, vd, rs1, vl); } -vint16mf4x3_t test_vlseg3e16_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_i16mf4x3_tum(mask, maskedoff_tuple, base, vl); +vint16mf4x3_t test_vlseg3e16_v_i16mf4x3_tum(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_i16mf4x3_tum(vm, vd, rs1, vl); } -vint16mf2x3_t test_vlseg3e16_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_i16mf2x3_tum(mask, maskedoff_tuple, base, vl); +vint16mf2x3_t test_vlseg3e16_v_i16mf2x3_tum(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_i16mf2x3_tum(vm, vd, rs1, vl); } -vint16m1x3_t test_vlseg3e16_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_i16m1x3_tum(mask, maskedoff_tuple, base, vl); +vint16m1x3_t test_vlseg3e16_v_i16m1x3_tum(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_i16m1x3_tum(vm, vd, rs1, vl); } -vint16m2x3_t test_vlseg3e16_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_i16m2x3_tum(mask, maskedoff_tuple, base, vl); +vint16m2x3_t test_vlseg3e16_v_i16m2x3_tum(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_i16m2x3_tum(vm, vd, rs1, vl); } -vuint16mf4x3_t test_vlseg3e16_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_u16mf4x3_tum(mask, maskedoff_tuple, base, vl); +vuint16mf4x3_t test_vlseg3e16_v_u16mf4x3_tum(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_u16mf4x3_tum(vm, vd, rs1, vl); } -vuint16mf2x3_t test_vlseg3e16_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_u16mf2x3_tum(mask, maskedoff_tuple, base, vl); +vuint16mf2x3_t test_vlseg3e16_v_u16mf2x3_tum(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_u16mf2x3_tum(vm, vd, rs1, vl); } -vuint16m1x3_t test_vlseg3e16_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_u16m1x3_tum(mask, maskedoff_tuple, base, vl); +vuint16m1x3_t test_vlseg3e16_v_u16m1x3_tum(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_u16m1x3_tum(vm, vd, rs1, vl); } -vuint16m2x3_t test_vlseg3e16_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_u16m2x3_tum(mask, maskedoff_tuple, base, vl); +vuint16m2x3_t test_vlseg3e16_v_u16m2x3_tum(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_u16m2x3_tum(vm, vd, rs1, vl); } -vfloat16mf4x3_t test_vlseg3e16_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_f16mf4x3_tumu(mask, maskedoff_tuple, base, vl); +vfloat16mf4x3_t test_vlseg3e16_v_f16mf4x3_tumu(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_f16mf4x3_tumu(vm, vd, rs1, vl); } -vfloat16mf2x3_t test_vlseg3e16_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_f16mf2x3_tumu(mask, maskedoff_tuple, base, vl); +vfloat16mf2x3_t test_vlseg3e16_v_f16mf2x3_tumu(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_f16mf2x3_tumu(vm, vd, rs1, vl); } -vfloat16m1x3_t test_vlseg3e16_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_f16m1x3_tumu(mask, maskedoff_tuple, base, vl); +vfloat16m1x3_t test_vlseg3e16_v_f16m1x3_tumu(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_f16m1x3_tumu(vm, vd, rs1, vl); } -vfloat16m2x3_t test_vlseg3e16_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_f16m2x3_tumu(mask, maskedoff_tuple, base, vl); +vfloat16m2x3_t test_vlseg3e16_v_f16m2x3_tumu(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_f16m2x3_tumu(vm, vd, rs1, vl); } -vint16mf4x3_t test_vlseg3e16_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_i16mf4x3_tumu(mask, maskedoff_tuple, base, vl); +vint16mf4x3_t test_vlseg3e16_v_i16mf4x3_tumu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_i16mf4x3_tumu(vm, vd, rs1, vl); } -vint16mf2x3_t test_vlseg3e16_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_i16mf2x3_tumu(mask, maskedoff_tuple, base, vl); +vint16mf2x3_t test_vlseg3e16_v_i16mf2x3_tumu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_i16mf2x3_tumu(vm, vd, rs1, vl); } -vint16m1x3_t test_vlseg3e16_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_i16m1x3_tumu(mask, maskedoff_tuple, base, vl); +vint16m1x3_t test_vlseg3e16_v_i16m1x3_tumu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_i16m1x3_tumu(vm, vd, rs1, vl); } -vint16m2x3_t test_vlseg3e16_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_i16m2x3_tumu(mask, maskedoff_tuple, base, vl); +vint16m2x3_t test_vlseg3e16_v_i16m2x3_tumu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_i16m2x3_tumu(vm, vd, rs1, vl); } -vuint16mf4x3_t test_vlseg3e16_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_u16mf4x3_tumu(mask, maskedoff_tuple, base, vl); +vuint16mf4x3_t test_vlseg3e16_v_u16mf4x3_tumu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_u16mf4x3_tumu(vm, vd, rs1, vl); } -vuint16mf2x3_t test_vlseg3e16_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_u16mf2x3_tumu(mask, maskedoff_tuple, base, vl); +vuint16mf2x3_t test_vlseg3e16_v_u16mf2x3_tumu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_u16mf2x3_tumu(vm, vd, rs1, vl); } -vuint16m1x3_t test_vlseg3e16_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_u16m1x3_tumu(mask, maskedoff_tuple, base, vl); +vuint16m1x3_t test_vlseg3e16_v_u16m1x3_tumu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_u16m1x3_tumu(vm, vd, rs1, vl); } -vuint16m2x3_t test_vlseg3e16_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_u16m2x3_tumu(mask, maskedoff_tuple, base, vl); +vuint16m2x3_t test_vlseg3e16_v_u16m2x3_tumu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_u16m2x3_tumu(vm, vd, rs1, vl); } -vfloat16mf4x3_t test_vlseg3e16_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_f16mf4x3_mu(mask, maskedoff_tuple, base, vl); +vfloat16mf4x3_t test_vlseg3e16_v_f16mf4x3_mu(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_f16mf4x3_mu(vm, vd, rs1, vl); } -vfloat16mf2x3_t test_vlseg3e16_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_f16mf2x3_mu(mask, maskedoff_tuple, base, vl); +vfloat16mf2x3_t test_vlseg3e16_v_f16mf2x3_mu(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_f16mf2x3_mu(vm, vd, rs1, vl); } -vfloat16m1x3_t test_vlseg3e16_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_f16m1x3_mu(mask, maskedoff_tuple, base, vl); +vfloat16m1x3_t test_vlseg3e16_v_f16m1x3_mu(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_f16m1x3_mu(vm, vd, rs1, vl); } -vfloat16m2x3_t test_vlseg3e16_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_f16m2x3_mu(mask, maskedoff_tuple, base, vl); +vfloat16m2x3_t test_vlseg3e16_v_f16m2x3_mu(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_f16m2x3_mu(vm, vd, rs1, vl); } -vint16mf4x3_t test_vlseg3e16_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_i16mf4x3_mu(mask, maskedoff_tuple, base, vl); +vint16mf4x3_t test_vlseg3e16_v_i16mf4x3_mu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_i16mf4x3_mu(vm, vd, rs1, vl); } -vint16mf2x3_t test_vlseg3e16_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_i16mf2x3_mu(mask, maskedoff_tuple, base, vl); +vint16mf2x3_t test_vlseg3e16_v_i16mf2x3_mu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_i16mf2x3_mu(vm, vd, rs1, vl); } -vint16m1x3_t test_vlseg3e16_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_i16m1x3_mu(mask, maskedoff_tuple, base, vl); +vint16m1x3_t test_vlseg3e16_v_i16m1x3_mu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_i16m1x3_mu(vm, vd, rs1, vl); } -vint16m2x3_t test_vlseg3e16_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_i16m2x3_mu(mask, maskedoff_tuple, base, vl); +vint16m2x3_t test_vlseg3e16_v_i16m2x3_mu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_i16m2x3_mu(vm, vd, rs1, vl); } -vuint16mf4x3_t test_vlseg3e16_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_u16mf4x3_mu(mask, maskedoff_tuple, base, vl); +vuint16mf4x3_t test_vlseg3e16_v_u16mf4x3_mu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_u16mf4x3_mu(vm, vd, rs1, vl); } -vuint16mf2x3_t test_vlseg3e16_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_u16mf2x3_mu(mask, maskedoff_tuple, base, vl); +vuint16mf2x3_t test_vlseg3e16_v_u16mf2x3_mu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_u16mf2x3_mu(vm, vd, rs1, vl); } -vuint16m1x3_t test_vlseg3e16_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_u16m1x3_mu(mask, maskedoff_tuple, base, vl); +vuint16m1x3_t test_vlseg3e16_v_u16m1x3_mu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_u16m1x3_mu(vm, vd, rs1, vl); } -vuint16m2x3_t test_vlseg3e16_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg3e16_v_u16m2x3_mu(mask, maskedoff_tuple, base, vl); +vuint16m2x3_t test_vlseg3e16_v_u16m2x3_mu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_v_u16m2x3_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg3e16\.[ivxfswum.]+\s+} 48 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg3e16ff.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg3e16ff.c index 60e673061..1f76b90bb 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg3e16ff.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg3e16ff.c @@ -6,196 +6,196 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_f16mf4x3_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_tu(vfloat16mf4x3_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_f16mf4x3_tu(vd, rs1, new_vl, vl); } -vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_f16mf2x3_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_tu(vfloat16mf2x3_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_f16mf2x3_tu(vd, rs1, new_vl, vl); } -vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_f16m1x3_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_tu(vfloat16m1x3_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_f16m1x3_tu(vd, rs1, new_vl, vl); } -vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_f16m2x3_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_tu(vfloat16m2x3_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_f16m2x3_tu(vd, rs1, new_vl, vl); } -vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_i16mf4x3_tu(maskedoff_tuple, base, new_vl, vl); +vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_tu(vint16mf4x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_i16mf4x3_tu(vd, rs1, new_vl, vl); } -vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_i16mf2x3_tu(maskedoff_tuple, base, new_vl, vl); +vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_tu(vint16mf2x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_i16mf2x3_tu(vd, rs1, new_vl, vl); } -vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_i16m1x3_tu(maskedoff_tuple, base, new_vl, vl); +vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_tu(vint16m1x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_i16m1x3_tu(vd, rs1, new_vl, vl); } -vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_i16m2x3_tu(maskedoff_tuple, base, new_vl, vl); +vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_tu(vint16m2x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_i16m2x3_tu(vd, rs1, new_vl, vl); } -vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_u16mf4x3_tu(maskedoff_tuple, base, new_vl, vl); +vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_tu(vuint16mf4x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_u16mf4x3_tu(vd, rs1, new_vl, vl); } -vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_u16mf2x3_tu(maskedoff_tuple, base, new_vl, vl); +vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_tu(vuint16mf2x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_u16mf2x3_tu(vd, rs1, new_vl, vl); } -vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_u16m1x3_tu(maskedoff_tuple, base, new_vl, vl); +vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_tu(vuint16m1x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_u16m1x3_tu(vd, rs1, new_vl, vl); } -vuint16m2x3_t test_vlseg3e16ff_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_u16m2x3_tu(maskedoff_tuple, base, new_vl, vl); +vuint16m2x3_t test_vlseg3e16ff_v_u16m2x3_tu(vuint16m2x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_u16m2x3_tu(vd, rs1, new_vl, vl); } -vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_f16mf4x3_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_tum(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_f16mf4x3_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_f16mf2x3_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_tum(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_f16mf2x3_tum(vm, vd, rs1, new_vl, vl); } -vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_f16m1x3_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_tum(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_f16m1x3_tum(vm, vd, rs1, new_vl, vl); } -vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_f16m2x3_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_tum(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_f16m2x3_tum(vm, vd, rs1, new_vl, vl); } -vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_i16mf4x3_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_tum(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_i16mf4x3_tum(vm, vd, rs1, new_vl, vl); } -vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_i16mf2x3_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_tum(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_i16mf2x3_tum(vm, vd, rs1, new_vl, vl); } -vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_i16m1x3_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_tum(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_i16m1x3_tum(vm, vd, rs1, new_vl, vl); } -vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_i16m2x3_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_tum(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_i16m2x3_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_u16mf4x3_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_tum(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_u16mf4x3_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_u16mf2x3_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_tum(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_u16mf2x3_tum(vm, vd, rs1, new_vl, vl); } -vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_u16m1x3_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_tum(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_u16m1x3_tum(vm, vd, rs1, new_vl, vl); } -vuint16m2x3_t test_vlseg3e16ff_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_u16m2x3_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m2x3_t test_vlseg3e16ff_v_u16m2x3_tum(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_u16m2x3_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_f16mf4x3_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_tumu(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_f16mf4x3_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_f16mf2x3_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_tumu(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_f16mf2x3_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_f16m1x3_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_tumu(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_f16m1x3_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_f16m2x3_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_tumu(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_f16m2x3_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_i16mf4x3_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_tumu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_i16mf4x3_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_i16mf2x3_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_tumu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_i16mf2x3_tumu(vm, vd, rs1, new_vl, vl); } -vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_i16m1x3_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_tumu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_i16m1x3_tumu(vm, vd, rs1, new_vl, vl); } -vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_i16m2x3_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_tumu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_i16m2x3_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_u16mf4x3_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_tumu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_u16mf4x3_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_u16mf2x3_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_tumu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_u16mf2x3_tumu(vm, vd, rs1, new_vl, vl); } -vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_u16m1x3_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_tumu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_u16m1x3_tumu(vm, vd, rs1, new_vl, vl); } -vuint16m2x3_t test_vlseg3e16ff_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_u16m2x3_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m2x3_t test_vlseg3e16ff_v_u16m2x3_tumu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_u16m2x3_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_f16mf4x3_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_mu(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_f16mf4x3_mu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_f16mf2x3_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_mu(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_f16mf2x3_mu(vm, vd, rs1, new_vl, vl); } -vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_f16m1x3_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_mu(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_f16m1x3_mu(vm, vd, rs1, new_vl, vl); } -vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_f16m2x3_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_mu(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_f16m2x3_mu(vm, vd, rs1, new_vl, vl); } -vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_i16mf4x3_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_mu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_i16mf4x3_mu(vm, vd, rs1, new_vl, vl); } -vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_i16mf2x3_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_mu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_i16mf2x3_mu(vm, vd, rs1, new_vl, vl); } -vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_i16m1x3_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_mu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_i16m1x3_mu(vm, vd, rs1, new_vl, vl); } -vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_i16m2x3_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_mu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_i16m2x3_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_u16mf4x3_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_mu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_u16mf4x3_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_u16mf2x3_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_mu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_u16mf2x3_mu(vm, vd, rs1, new_vl, vl); } -vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_u16m1x3_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_mu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_u16m1x3_mu(vm, vd, rs1, new_vl, vl); } -vuint16m2x3_t test_vlseg3e16ff_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_v_u16m2x3_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m2x3_t test_vlseg3e16ff_v_u16m2x3_mu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_u16m2x3_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg3e16ff\.[ivxfswum.]+\s+} 48 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg3e32.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg3e32.c index b8148e9d9..c2d97d8a2 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg3e32.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg3e32.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x3_t test_vlseg3e32_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_f32mf2x3_tu(maskedoff_tuple, base, vl); +vfloat32mf2x3_t test_vlseg3e32_v_f32mf2x3_tu(vfloat32mf2x3_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_f32mf2x3_tu(vd, rs1, vl); } -vfloat32m1x3_t test_vlseg3e32_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_f32m1x3_tu(maskedoff_tuple, base, vl); +vfloat32m1x3_t test_vlseg3e32_v_f32m1x3_tu(vfloat32m1x3_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_f32m1x3_tu(vd, rs1, vl); } -vfloat32m2x3_t test_vlseg3e32_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_f32m2x3_tu(maskedoff_tuple, base, vl); +vfloat32m2x3_t test_vlseg3e32_v_f32m2x3_tu(vfloat32m2x3_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_f32m2x3_tu(vd, rs1, vl); } -vint32mf2x3_t test_vlseg3e32_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_i32mf2x3_tu(maskedoff_tuple, base, vl); +vint32mf2x3_t test_vlseg3e32_v_i32mf2x3_tu(vint32mf2x3_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_i32mf2x3_tu(vd, rs1, vl); } -vint32m1x3_t test_vlseg3e32_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_i32m1x3_tu(maskedoff_tuple, base, vl); +vint32m1x3_t test_vlseg3e32_v_i32m1x3_tu(vint32m1x3_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_i32m1x3_tu(vd, rs1, vl); } -vint32m2x3_t test_vlseg3e32_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_i32m2x3_tu(maskedoff_tuple, base, vl); +vint32m2x3_t test_vlseg3e32_v_i32m2x3_tu(vint32m2x3_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_i32m2x3_tu(vd, rs1, vl); } -vuint32mf2x3_t test_vlseg3e32_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_u32mf2x3_tu(maskedoff_tuple, base, vl); +vuint32mf2x3_t test_vlseg3e32_v_u32mf2x3_tu(vuint32mf2x3_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_u32mf2x3_tu(vd, rs1, vl); } -vuint32m1x3_t test_vlseg3e32_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_u32m1x3_tu(maskedoff_tuple, base, vl); +vuint32m1x3_t test_vlseg3e32_v_u32m1x3_tu(vuint32m1x3_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_u32m1x3_tu(vd, rs1, vl); } -vuint32m2x3_t test_vlseg3e32_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_u32m2x3_tu(maskedoff_tuple, base, vl); +vuint32m2x3_t test_vlseg3e32_v_u32m2x3_tu(vuint32m2x3_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_u32m2x3_tu(vd, rs1, vl); } -vfloat32mf2x3_t test_vlseg3e32_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_f32mf2x3_tum(mask, maskedoff_tuple, base, vl); +vfloat32mf2x3_t test_vlseg3e32_v_f32mf2x3_tum(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_f32mf2x3_tum(vm, vd, rs1, vl); } -vfloat32m1x3_t test_vlseg3e32_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_f32m1x3_tum(mask, maskedoff_tuple, base, vl); +vfloat32m1x3_t test_vlseg3e32_v_f32m1x3_tum(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_f32m1x3_tum(vm, vd, rs1, vl); } -vfloat32m2x3_t test_vlseg3e32_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_f32m2x3_tum(mask, maskedoff_tuple, base, vl); +vfloat32m2x3_t test_vlseg3e32_v_f32m2x3_tum(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_f32m2x3_tum(vm, vd, rs1, vl); } -vint32mf2x3_t test_vlseg3e32_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_i32mf2x3_tum(mask, maskedoff_tuple, base, vl); +vint32mf2x3_t test_vlseg3e32_v_i32mf2x3_tum(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_i32mf2x3_tum(vm, vd, rs1, vl); } -vint32m1x3_t test_vlseg3e32_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_i32m1x3_tum(mask, maskedoff_tuple, base, vl); +vint32m1x3_t test_vlseg3e32_v_i32m1x3_tum(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_i32m1x3_tum(vm, vd, rs1, vl); } -vint32m2x3_t test_vlseg3e32_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_i32m2x3_tum(mask, maskedoff_tuple, base, vl); +vint32m2x3_t test_vlseg3e32_v_i32m2x3_tum(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_i32m2x3_tum(vm, vd, rs1, vl); } -vuint32mf2x3_t test_vlseg3e32_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_u32mf2x3_tum(mask, maskedoff_tuple, base, vl); +vuint32mf2x3_t test_vlseg3e32_v_u32mf2x3_tum(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_u32mf2x3_tum(vm, vd, rs1, vl); } -vuint32m1x3_t test_vlseg3e32_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_u32m1x3_tum(mask, maskedoff_tuple, base, vl); +vuint32m1x3_t test_vlseg3e32_v_u32m1x3_tum(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_u32m1x3_tum(vm, vd, rs1, vl); } -vuint32m2x3_t test_vlseg3e32_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_u32m2x3_tum(mask, maskedoff_tuple, base, vl); +vuint32m2x3_t test_vlseg3e32_v_u32m2x3_tum(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_u32m2x3_tum(vm, vd, rs1, vl); } -vfloat32mf2x3_t test_vlseg3e32_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_f32mf2x3_tumu(mask, maskedoff_tuple, base, vl); +vfloat32mf2x3_t test_vlseg3e32_v_f32mf2x3_tumu(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_f32mf2x3_tumu(vm, vd, rs1, vl); } -vfloat32m1x3_t test_vlseg3e32_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_f32m1x3_tumu(mask, maskedoff_tuple, base, vl); +vfloat32m1x3_t test_vlseg3e32_v_f32m1x3_tumu(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_f32m1x3_tumu(vm, vd, rs1, vl); } -vfloat32m2x3_t test_vlseg3e32_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_f32m2x3_tumu(mask, maskedoff_tuple, base, vl); +vfloat32m2x3_t test_vlseg3e32_v_f32m2x3_tumu(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_f32m2x3_tumu(vm, vd, rs1, vl); } -vint32mf2x3_t test_vlseg3e32_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_i32mf2x3_tumu(mask, maskedoff_tuple, base, vl); +vint32mf2x3_t test_vlseg3e32_v_i32mf2x3_tumu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_i32mf2x3_tumu(vm, vd, rs1, vl); } -vint32m1x3_t test_vlseg3e32_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_i32m1x3_tumu(mask, maskedoff_tuple, base, vl); +vint32m1x3_t test_vlseg3e32_v_i32m1x3_tumu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_i32m1x3_tumu(vm, vd, rs1, vl); } -vint32m2x3_t test_vlseg3e32_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_i32m2x3_tumu(mask, maskedoff_tuple, base, vl); +vint32m2x3_t test_vlseg3e32_v_i32m2x3_tumu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_i32m2x3_tumu(vm, vd, rs1, vl); } -vuint32mf2x3_t test_vlseg3e32_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_u32mf2x3_tumu(mask, maskedoff_tuple, base, vl); +vuint32mf2x3_t test_vlseg3e32_v_u32mf2x3_tumu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_u32mf2x3_tumu(vm, vd, rs1, vl); } -vuint32m1x3_t test_vlseg3e32_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_u32m1x3_tumu(mask, maskedoff_tuple, base, vl); +vuint32m1x3_t test_vlseg3e32_v_u32m1x3_tumu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_u32m1x3_tumu(vm, vd, rs1, vl); } -vuint32m2x3_t test_vlseg3e32_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_u32m2x3_tumu(mask, maskedoff_tuple, base, vl); +vuint32m2x3_t test_vlseg3e32_v_u32m2x3_tumu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_u32m2x3_tumu(vm, vd, rs1, vl); } -vfloat32mf2x3_t test_vlseg3e32_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_f32mf2x3_mu(mask, maskedoff_tuple, base, vl); +vfloat32mf2x3_t test_vlseg3e32_v_f32mf2x3_mu(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_f32mf2x3_mu(vm, vd, rs1, vl); } -vfloat32m1x3_t test_vlseg3e32_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_f32m1x3_mu(mask, maskedoff_tuple, base, vl); +vfloat32m1x3_t test_vlseg3e32_v_f32m1x3_mu(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_f32m1x3_mu(vm, vd, rs1, vl); } -vfloat32m2x3_t test_vlseg3e32_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_f32m2x3_mu(mask, maskedoff_tuple, base, vl); +vfloat32m2x3_t test_vlseg3e32_v_f32m2x3_mu(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_f32m2x3_mu(vm, vd, rs1, vl); } -vint32mf2x3_t test_vlseg3e32_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_i32mf2x3_mu(mask, maskedoff_tuple, base, vl); +vint32mf2x3_t test_vlseg3e32_v_i32mf2x3_mu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_i32mf2x3_mu(vm, vd, rs1, vl); } -vint32m1x3_t test_vlseg3e32_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_i32m1x3_mu(mask, maskedoff_tuple, base, vl); +vint32m1x3_t test_vlseg3e32_v_i32m1x3_mu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_i32m1x3_mu(vm, vd, rs1, vl); } -vint32m2x3_t test_vlseg3e32_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_i32m2x3_mu(mask, maskedoff_tuple, base, vl); +vint32m2x3_t test_vlseg3e32_v_i32m2x3_mu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_i32m2x3_mu(vm, vd, rs1, vl); } -vuint32mf2x3_t test_vlseg3e32_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_u32mf2x3_mu(mask, maskedoff_tuple, base, vl); +vuint32mf2x3_t test_vlseg3e32_v_u32mf2x3_mu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_u32mf2x3_mu(vm, vd, rs1, vl); } -vuint32m1x3_t test_vlseg3e32_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_u32m1x3_mu(mask, maskedoff_tuple, base, vl); +vuint32m1x3_t test_vlseg3e32_v_u32m1x3_mu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_u32m1x3_mu(vm, vd, rs1, vl); } -vuint32m2x3_t test_vlseg3e32_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg3e32_v_u32m2x3_mu(mask, maskedoff_tuple, base, vl); +vuint32m2x3_t test_vlseg3e32_v_u32m2x3_mu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_v_u32m2x3_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg3e32\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg3e32ff.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg3e32ff.c index 3acda9764..54ab250dd 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg3e32ff.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg3e32ff.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_f32mf2x3_tu(maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_tu(vfloat32mf2x3_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_f32mf2x3_tu(vd, rs1, new_vl, vl); } -vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_f32m1x3_tu(maskedoff_tuple, base, new_vl, vl); +vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_tu(vfloat32m1x3_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_f32m1x3_tu(vd, rs1, new_vl, vl); } -vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_f32m2x3_tu(maskedoff_tuple, base, new_vl, vl); +vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_tu(vfloat32m2x3_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_f32m2x3_tu(vd, rs1, new_vl, vl); } -vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_i32mf2x3_tu(maskedoff_tuple, base, new_vl, vl); +vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_tu(vint32mf2x3_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_i32mf2x3_tu(vd, rs1, new_vl, vl); } -vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_i32m1x3_tu(maskedoff_tuple, base, new_vl, vl); +vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_tu(vint32m1x3_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_i32m1x3_tu(vd, rs1, new_vl, vl); } -vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_i32m2x3_tu(maskedoff_tuple, base, new_vl, vl); +vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_tu(vint32m2x3_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_i32m2x3_tu(vd, rs1, new_vl, vl); } -vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_u32mf2x3_tu(maskedoff_tuple, base, new_vl, vl); +vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_tu(vuint32mf2x3_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_u32mf2x3_tu(vd, rs1, new_vl, vl); } -vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_u32m1x3_tu(maskedoff_tuple, base, new_vl, vl); +vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_tu(vuint32m1x3_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_u32m1x3_tu(vd, rs1, new_vl, vl); } -vuint32m2x3_t test_vlseg3e32ff_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_u32m2x3_tu(maskedoff_tuple, base, new_vl, vl); +vuint32m2x3_t test_vlseg3e32ff_v_u32m2x3_tu(vuint32m2x3_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_u32m2x3_tu(vd, rs1, new_vl, vl); } -vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_f32mf2x3_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_tum(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_f32mf2x3_tum(vm, vd, rs1, new_vl, vl); } -vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_f32m1x3_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_tum(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_f32m1x3_tum(vm, vd, rs1, new_vl, vl); } -vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_f32m2x3_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_tum(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_f32m2x3_tum(vm, vd, rs1, new_vl, vl); } -vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_i32mf2x3_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_tum(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_i32mf2x3_tum(vm, vd, rs1, new_vl, vl); } -vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_i32m1x3_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_tum(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_i32m1x3_tum(vm, vd, rs1, new_vl, vl); } -vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_i32m2x3_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_tum(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_i32m2x3_tum(vm, vd, rs1, new_vl, vl); } -vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_u32mf2x3_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_tum(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_u32mf2x3_tum(vm, vd, rs1, new_vl, vl); } -vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_u32m1x3_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_tum(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_u32m1x3_tum(vm, vd, rs1, new_vl, vl); } -vuint32m2x3_t test_vlseg3e32ff_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_u32m2x3_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m2x3_t test_vlseg3e32ff_v_u32m2x3_tum(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_u32m2x3_tum(vm, vd, rs1, new_vl, vl); } -vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_f32mf2x3_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_tumu(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_f32mf2x3_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_f32m1x3_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_tumu(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_f32m1x3_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_f32m2x3_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_tumu(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_f32m2x3_tumu(vm, vd, rs1, new_vl, vl); } -vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_i32mf2x3_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_tumu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_i32mf2x3_tumu(vm, vd, rs1, new_vl, vl); } -vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_i32m1x3_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_tumu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_i32m1x3_tumu(vm, vd, rs1, new_vl, vl); } -vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_i32m2x3_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_tumu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_i32m2x3_tumu(vm, vd, rs1, new_vl, vl); } -vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_u32mf2x3_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_tumu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_u32mf2x3_tumu(vm, vd, rs1, new_vl, vl); } -vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_u32m1x3_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_tumu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_u32m1x3_tumu(vm, vd, rs1, new_vl, vl); } -vuint32m2x3_t test_vlseg3e32ff_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_u32m2x3_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m2x3_t test_vlseg3e32ff_v_u32m2x3_tumu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_u32m2x3_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_f32mf2x3_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_mu(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_f32mf2x3_mu(vm, vd, rs1, new_vl, vl); } -vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_f32m1x3_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_mu(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_f32m1x3_mu(vm, vd, rs1, new_vl, vl); } -vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_f32m2x3_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_mu(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_f32m2x3_mu(vm, vd, rs1, new_vl, vl); } -vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_i32mf2x3_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_mu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_i32mf2x3_mu(vm, vd, rs1, new_vl, vl); } -vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_i32m1x3_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_mu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_i32m1x3_mu(vm, vd, rs1, new_vl, vl); } -vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_i32m2x3_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_mu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_i32m2x3_mu(vm, vd, rs1, new_vl, vl); } -vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_u32mf2x3_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_mu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_u32mf2x3_mu(vm, vd, rs1, new_vl, vl); } -vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_u32m1x3_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_mu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_u32m1x3_mu(vm, vd, rs1, new_vl, vl); } -vuint32m2x3_t test_vlseg3e32ff_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_v_u32m2x3_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m2x3_t test_vlseg3e32ff_v_u32m2x3_mu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_v_u32m2x3_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg3e32ff\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg3e64.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg3e64.c index 4e9099e2c..50a3fc719 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg3e64.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg3e64.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x3_t test_vlseg3e64_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg3e64_v_f64m1x3_tu(maskedoff_tuple, base, vl); +vfloat64m1x3_t test_vlseg3e64_v_f64m1x3_tu(vfloat64m1x3_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_v_f64m1x3_tu(vd, rs1, vl); } -vfloat64m2x3_t test_vlseg3e64_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg3e64_v_f64m2x3_tu(maskedoff_tuple, base, vl); +vfloat64m2x3_t test_vlseg3e64_v_f64m2x3_tu(vfloat64m2x3_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_v_f64m2x3_tu(vd, rs1, vl); } -vint64m1x3_t test_vlseg3e64_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg3e64_v_i64m1x3_tu(maskedoff_tuple, base, vl); +vint64m1x3_t test_vlseg3e64_v_i64m1x3_tu(vint64m1x3_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_v_i64m1x3_tu(vd, rs1, vl); } -vint64m2x3_t test_vlseg3e64_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg3e64_v_i64m2x3_tu(maskedoff_tuple, base, vl); +vint64m2x3_t test_vlseg3e64_v_i64m2x3_tu(vint64m2x3_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_v_i64m2x3_tu(vd, rs1, vl); } -vuint64m1x3_t test_vlseg3e64_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg3e64_v_u64m1x3_tu(maskedoff_tuple, base, vl); +vuint64m1x3_t test_vlseg3e64_v_u64m1x3_tu(vuint64m1x3_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_v_u64m1x3_tu(vd, rs1, vl); } -vuint64m2x3_t test_vlseg3e64_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg3e64_v_u64m2x3_tu(maskedoff_tuple, base, vl); +vuint64m2x3_t test_vlseg3e64_v_u64m2x3_tu(vuint64m2x3_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_v_u64m2x3_tu(vd, rs1, vl); } -vfloat64m1x3_t test_vlseg3e64_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg3e64_v_f64m1x3_tum(mask, maskedoff_tuple, base, vl); +vfloat64m1x3_t test_vlseg3e64_v_f64m1x3_tum(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_v_f64m1x3_tum(vm, vd, rs1, vl); } -vfloat64m2x3_t test_vlseg3e64_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg3e64_v_f64m2x3_tum(mask, maskedoff_tuple, base, vl); +vfloat64m2x3_t test_vlseg3e64_v_f64m2x3_tum(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_v_f64m2x3_tum(vm, vd, rs1, vl); } -vint64m1x3_t test_vlseg3e64_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg3e64_v_i64m1x3_tum(mask, maskedoff_tuple, base, vl); +vint64m1x3_t test_vlseg3e64_v_i64m1x3_tum(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_v_i64m1x3_tum(vm, vd, rs1, vl); } -vint64m2x3_t test_vlseg3e64_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg3e64_v_i64m2x3_tum(mask, maskedoff_tuple, base, vl); +vint64m2x3_t test_vlseg3e64_v_i64m2x3_tum(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_v_i64m2x3_tum(vm, vd, rs1, vl); } -vuint64m1x3_t test_vlseg3e64_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg3e64_v_u64m1x3_tum(mask, maskedoff_tuple, base, vl); +vuint64m1x3_t test_vlseg3e64_v_u64m1x3_tum(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_v_u64m1x3_tum(vm, vd, rs1, vl); } -vuint64m2x3_t test_vlseg3e64_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg3e64_v_u64m2x3_tum(mask, maskedoff_tuple, base, vl); +vuint64m2x3_t test_vlseg3e64_v_u64m2x3_tum(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_v_u64m2x3_tum(vm, vd, rs1, vl); } -vfloat64m1x3_t test_vlseg3e64_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg3e64_v_f64m1x3_tumu(mask, maskedoff_tuple, base, vl); +vfloat64m1x3_t test_vlseg3e64_v_f64m1x3_tumu(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_v_f64m1x3_tumu(vm, vd, rs1, vl); } -vfloat64m2x3_t test_vlseg3e64_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg3e64_v_f64m2x3_tumu(mask, maskedoff_tuple, base, vl); +vfloat64m2x3_t test_vlseg3e64_v_f64m2x3_tumu(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_v_f64m2x3_tumu(vm, vd, rs1, vl); } -vint64m1x3_t test_vlseg3e64_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg3e64_v_i64m1x3_tumu(mask, maskedoff_tuple, base, vl); +vint64m1x3_t test_vlseg3e64_v_i64m1x3_tumu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_v_i64m1x3_tumu(vm, vd, rs1, vl); } -vint64m2x3_t test_vlseg3e64_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg3e64_v_i64m2x3_tumu(mask, maskedoff_tuple, base, vl); +vint64m2x3_t test_vlseg3e64_v_i64m2x3_tumu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_v_i64m2x3_tumu(vm, vd, rs1, vl); } -vuint64m1x3_t test_vlseg3e64_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg3e64_v_u64m1x3_tumu(mask, maskedoff_tuple, base, vl); +vuint64m1x3_t test_vlseg3e64_v_u64m1x3_tumu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_v_u64m1x3_tumu(vm, vd, rs1, vl); } -vuint64m2x3_t test_vlseg3e64_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg3e64_v_u64m2x3_tumu(mask, maskedoff_tuple, base, vl); +vuint64m2x3_t test_vlseg3e64_v_u64m2x3_tumu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_v_u64m2x3_tumu(vm, vd, rs1, vl); } -vfloat64m1x3_t test_vlseg3e64_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg3e64_v_f64m1x3_mu(mask, maskedoff_tuple, base, vl); +vfloat64m1x3_t test_vlseg3e64_v_f64m1x3_mu(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_v_f64m1x3_mu(vm, vd, rs1, vl); } -vfloat64m2x3_t test_vlseg3e64_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg3e64_v_f64m2x3_mu(mask, maskedoff_tuple, base, vl); +vfloat64m2x3_t test_vlseg3e64_v_f64m2x3_mu(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_v_f64m2x3_mu(vm, vd, rs1, vl); } -vint64m1x3_t test_vlseg3e64_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg3e64_v_i64m1x3_mu(mask, maskedoff_tuple, base, vl); +vint64m1x3_t test_vlseg3e64_v_i64m1x3_mu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_v_i64m1x3_mu(vm, vd, rs1, vl); } -vint64m2x3_t test_vlseg3e64_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg3e64_v_i64m2x3_mu(mask, maskedoff_tuple, base, vl); +vint64m2x3_t test_vlseg3e64_v_i64m2x3_mu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_v_i64m2x3_mu(vm, vd, rs1, vl); } -vuint64m1x3_t test_vlseg3e64_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg3e64_v_u64m1x3_mu(mask, maskedoff_tuple, base, vl); +vuint64m1x3_t test_vlseg3e64_v_u64m1x3_mu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_v_u64m1x3_mu(vm, vd, rs1, vl); } -vuint64m2x3_t test_vlseg3e64_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg3e64_v_u64m2x3_mu(mask, maskedoff_tuple, base, vl); +vuint64m2x3_t test_vlseg3e64_v_u64m2x3_mu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_v_u64m2x3_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg3e64\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg3e64ff.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg3e64ff.c index aea622be9..e628a36e2 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg3e64ff.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg3e64ff.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_v_f64m1x3_tu(maskedoff_tuple, base, new_vl, vl); +vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_tu(vfloat64m1x3_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_v_f64m1x3_tu(vd, rs1, new_vl, vl); } -vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_v_f64m2x3_tu(maskedoff_tuple, base, new_vl, vl); +vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_tu(vfloat64m2x3_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_v_f64m2x3_tu(vd, rs1, new_vl, vl); } -vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_v_i64m1x3_tu(maskedoff_tuple, base, new_vl, vl); +vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_tu(vint64m1x3_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_v_i64m1x3_tu(vd, rs1, new_vl, vl); } -vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_v_i64m2x3_tu(maskedoff_tuple, base, new_vl, vl); +vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_tu(vint64m2x3_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_v_i64m2x3_tu(vd, rs1, new_vl, vl); } -vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_v_u64m1x3_tu(maskedoff_tuple, base, new_vl, vl); +vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_tu(vuint64m1x3_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_v_u64m1x3_tu(vd, rs1, new_vl, vl); } -vuint64m2x3_t test_vlseg3e64ff_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_v_u64m2x3_tu(maskedoff_tuple, base, new_vl, vl); +vuint64m2x3_t test_vlseg3e64ff_v_u64m2x3_tu(vuint64m2x3_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_v_u64m2x3_tu(vd, rs1, new_vl, vl); } -vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_v_f64m1x3_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_tum(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_v_f64m1x3_tum(vm, vd, rs1, new_vl, vl); } -vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_v_f64m2x3_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_tum(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_v_f64m2x3_tum(vm, vd, rs1, new_vl, vl); } -vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_v_i64m1x3_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_tum(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_v_i64m1x3_tum(vm, vd, rs1, new_vl, vl); } -vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_v_i64m2x3_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_tum(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_v_i64m2x3_tum(vm, vd, rs1, new_vl, vl); } -vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_v_u64m1x3_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_tum(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_v_u64m1x3_tum(vm, vd, rs1, new_vl, vl); } -vuint64m2x3_t test_vlseg3e64ff_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_v_u64m2x3_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m2x3_t test_vlseg3e64ff_v_u64m2x3_tum(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_v_u64m2x3_tum(vm, vd, rs1, new_vl, vl); } -vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_v_f64m1x3_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_tumu(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_v_f64m1x3_tumu(vm, vd, rs1, new_vl, vl); } -vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_v_f64m2x3_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_tumu(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_v_f64m2x3_tumu(vm, vd, rs1, new_vl, vl); } -vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_v_i64m1x3_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_tumu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_v_i64m1x3_tumu(vm, vd, rs1, new_vl, vl); } -vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_v_i64m2x3_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_tumu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_v_i64m2x3_tumu(vm, vd, rs1, new_vl, vl); } -vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_v_u64m1x3_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_tumu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_v_u64m1x3_tumu(vm, vd, rs1, new_vl, vl); } -vuint64m2x3_t test_vlseg3e64ff_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_v_u64m2x3_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m2x3_t test_vlseg3e64ff_v_u64m2x3_tumu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_v_u64m2x3_tumu(vm, vd, rs1, new_vl, vl); } -vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_v_f64m1x3_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_mu(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_v_f64m1x3_mu(vm, vd, rs1, new_vl, vl); } -vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_v_f64m2x3_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_mu(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_v_f64m2x3_mu(vm, vd, rs1, new_vl, vl); } -vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_v_i64m1x3_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_mu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_v_i64m1x3_mu(vm, vd, rs1, new_vl, vl); } -vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_v_i64m2x3_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_mu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_v_i64m2x3_mu(vm, vd, rs1, new_vl, vl); } -vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_v_u64m1x3_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_mu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_v_u64m1x3_mu(vm, vd, rs1, new_vl, vl); } -vuint64m2x3_t test_vlseg3e64ff_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_v_u64m2x3_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m2x3_t test_vlseg3e64ff_v_u64m2x3_mu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_v_u64m2x3_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg3e64ff\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg3e8.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg3e8.c index 46e7720b2..c10b49a52 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg3e8.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg3e8.c @@ -6,164 +6,164 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x3_t test_vlseg3e8_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_i8mf8x3_tu(maskedoff_tuple, base, vl); +vint8mf8x3_t test_vlseg3e8_v_i8mf8x3_tu(vint8mf8x3_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_i8mf8x3_tu(vd, rs1, vl); } -vint8mf4x3_t test_vlseg3e8_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_i8mf4x3_tu(maskedoff_tuple, base, vl); +vint8mf4x3_t test_vlseg3e8_v_i8mf4x3_tu(vint8mf4x3_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_i8mf4x3_tu(vd, rs1, vl); } -vint8mf2x3_t test_vlseg3e8_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_i8mf2x3_tu(maskedoff_tuple, base, vl); +vint8mf2x3_t test_vlseg3e8_v_i8mf2x3_tu(vint8mf2x3_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_i8mf2x3_tu(vd, rs1, vl); } -vint8m1x3_t test_vlseg3e8_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_i8m1x3_tu(maskedoff_tuple, base, vl); +vint8m1x3_t test_vlseg3e8_v_i8m1x3_tu(vint8m1x3_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_i8m1x3_tu(vd, rs1, vl); } -vint8m2x3_t test_vlseg3e8_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_i8m2x3_tu(maskedoff_tuple, base, vl); +vint8m2x3_t test_vlseg3e8_v_i8m2x3_tu(vint8m2x3_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_i8m2x3_tu(vd, rs1, vl); } -vuint8mf8x3_t test_vlseg3e8_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_u8mf8x3_tu(maskedoff_tuple, base, vl); +vuint8mf8x3_t test_vlseg3e8_v_u8mf8x3_tu(vuint8mf8x3_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_u8mf8x3_tu(vd, rs1, vl); } -vuint8mf4x3_t test_vlseg3e8_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_u8mf4x3_tu(maskedoff_tuple, base, vl); +vuint8mf4x3_t test_vlseg3e8_v_u8mf4x3_tu(vuint8mf4x3_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_u8mf4x3_tu(vd, rs1, vl); } -vuint8mf2x3_t test_vlseg3e8_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_u8mf2x3_tu(maskedoff_tuple, base, vl); +vuint8mf2x3_t test_vlseg3e8_v_u8mf2x3_tu(vuint8mf2x3_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_u8mf2x3_tu(vd, rs1, vl); } -vuint8m1x3_t test_vlseg3e8_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_u8m1x3_tu(maskedoff_tuple, base, vl); +vuint8m1x3_t test_vlseg3e8_v_u8m1x3_tu(vuint8m1x3_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_u8m1x3_tu(vd, rs1, vl); } -vuint8m2x3_t test_vlseg3e8_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_u8m2x3_tu(maskedoff_tuple, base, vl); +vuint8m2x3_t test_vlseg3e8_v_u8m2x3_tu(vuint8m2x3_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_u8m2x3_tu(vd, rs1, vl); } -vint8mf8x3_t test_vlseg3e8_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_i8mf8x3_tum(mask, maskedoff_tuple, base, vl); +vint8mf8x3_t test_vlseg3e8_v_i8mf8x3_tum(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_i8mf8x3_tum(vm, vd, rs1, vl); } -vint8mf4x3_t test_vlseg3e8_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_i8mf4x3_tum(mask, maskedoff_tuple, base, vl); +vint8mf4x3_t test_vlseg3e8_v_i8mf4x3_tum(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_i8mf4x3_tum(vm, vd, rs1, vl); } -vint8mf2x3_t test_vlseg3e8_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_i8mf2x3_tum(mask, maskedoff_tuple, base, vl); +vint8mf2x3_t test_vlseg3e8_v_i8mf2x3_tum(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_i8mf2x3_tum(vm, vd, rs1, vl); } -vint8m1x3_t test_vlseg3e8_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_i8m1x3_tum(mask, maskedoff_tuple, base, vl); +vint8m1x3_t test_vlseg3e8_v_i8m1x3_tum(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_i8m1x3_tum(vm, vd, rs1, vl); } -vint8m2x3_t test_vlseg3e8_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_i8m2x3_tum(mask, maskedoff_tuple, base, vl); +vint8m2x3_t test_vlseg3e8_v_i8m2x3_tum(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_i8m2x3_tum(vm, vd, rs1, vl); } -vuint8mf8x3_t test_vlseg3e8_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_u8mf8x3_tum(mask, maskedoff_tuple, base, vl); +vuint8mf8x3_t test_vlseg3e8_v_u8mf8x3_tum(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_u8mf8x3_tum(vm, vd, rs1, vl); } -vuint8mf4x3_t test_vlseg3e8_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_u8mf4x3_tum(mask, maskedoff_tuple, base, vl); +vuint8mf4x3_t test_vlseg3e8_v_u8mf4x3_tum(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_u8mf4x3_tum(vm, vd, rs1, vl); } -vuint8mf2x3_t test_vlseg3e8_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_u8mf2x3_tum(mask, maskedoff_tuple, base, vl); +vuint8mf2x3_t test_vlseg3e8_v_u8mf2x3_tum(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_u8mf2x3_tum(vm, vd, rs1, vl); } -vuint8m1x3_t test_vlseg3e8_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_u8m1x3_tum(mask, maskedoff_tuple, base, vl); +vuint8m1x3_t test_vlseg3e8_v_u8m1x3_tum(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_u8m1x3_tum(vm, vd, rs1, vl); } -vuint8m2x3_t test_vlseg3e8_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_u8m2x3_tum(mask, maskedoff_tuple, base, vl); +vuint8m2x3_t test_vlseg3e8_v_u8m2x3_tum(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_u8m2x3_tum(vm, vd, rs1, vl); } -vint8mf8x3_t test_vlseg3e8_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_i8mf8x3_tumu(mask, maskedoff_tuple, base, vl); +vint8mf8x3_t test_vlseg3e8_v_i8mf8x3_tumu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_i8mf8x3_tumu(vm, vd, rs1, vl); } -vint8mf4x3_t test_vlseg3e8_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_i8mf4x3_tumu(mask, maskedoff_tuple, base, vl); +vint8mf4x3_t test_vlseg3e8_v_i8mf4x3_tumu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_i8mf4x3_tumu(vm, vd, rs1, vl); } -vint8mf2x3_t test_vlseg3e8_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_i8mf2x3_tumu(mask, maskedoff_tuple, base, vl); +vint8mf2x3_t test_vlseg3e8_v_i8mf2x3_tumu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_i8mf2x3_tumu(vm, vd, rs1, vl); } -vint8m1x3_t test_vlseg3e8_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_i8m1x3_tumu(mask, maskedoff_tuple, base, vl); +vint8m1x3_t test_vlseg3e8_v_i8m1x3_tumu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_i8m1x3_tumu(vm, vd, rs1, vl); } -vint8m2x3_t test_vlseg3e8_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_i8m2x3_tumu(mask, maskedoff_tuple, base, vl); +vint8m2x3_t test_vlseg3e8_v_i8m2x3_tumu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_i8m2x3_tumu(vm, vd, rs1, vl); } -vuint8mf8x3_t test_vlseg3e8_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_u8mf8x3_tumu(mask, maskedoff_tuple, base, vl); +vuint8mf8x3_t test_vlseg3e8_v_u8mf8x3_tumu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_u8mf8x3_tumu(vm, vd, rs1, vl); } -vuint8mf4x3_t test_vlseg3e8_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_u8mf4x3_tumu(mask, maskedoff_tuple, base, vl); +vuint8mf4x3_t test_vlseg3e8_v_u8mf4x3_tumu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_u8mf4x3_tumu(vm, vd, rs1, vl); } -vuint8mf2x3_t test_vlseg3e8_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_u8mf2x3_tumu(mask, maskedoff_tuple, base, vl); +vuint8mf2x3_t test_vlseg3e8_v_u8mf2x3_tumu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_u8mf2x3_tumu(vm, vd, rs1, vl); } -vuint8m1x3_t test_vlseg3e8_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_u8m1x3_tumu(mask, maskedoff_tuple, base, vl); +vuint8m1x3_t test_vlseg3e8_v_u8m1x3_tumu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_u8m1x3_tumu(vm, vd, rs1, vl); } -vuint8m2x3_t test_vlseg3e8_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_u8m2x3_tumu(mask, maskedoff_tuple, base, vl); +vuint8m2x3_t test_vlseg3e8_v_u8m2x3_tumu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_u8m2x3_tumu(vm, vd, rs1, vl); } -vint8mf8x3_t test_vlseg3e8_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_i8mf8x3_mu(mask, maskedoff_tuple, base, vl); +vint8mf8x3_t test_vlseg3e8_v_i8mf8x3_mu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_i8mf8x3_mu(vm, vd, rs1, vl); } -vint8mf4x3_t test_vlseg3e8_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_i8mf4x3_mu(mask, maskedoff_tuple, base, vl); +vint8mf4x3_t test_vlseg3e8_v_i8mf4x3_mu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_i8mf4x3_mu(vm, vd, rs1, vl); } -vint8mf2x3_t test_vlseg3e8_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_i8mf2x3_mu(mask, maskedoff_tuple, base, vl); +vint8mf2x3_t test_vlseg3e8_v_i8mf2x3_mu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_i8mf2x3_mu(vm, vd, rs1, vl); } -vint8m1x3_t test_vlseg3e8_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_i8m1x3_mu(mask, maskedoff_tuple, base, vl); +vint8m1x3_t test_vlseg3e8_v_i8m1x3_mu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_i8m1x3_mu(vm, vd, rs1, vl); } -vint8m2x3_t test_vlseg3e8_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_i8m2x3_mu(mask, maskedoff_tuple, base, vl); +vint8m2x3_t test_vlseg3e8_v_i8m2x3_mu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_i8m2x3_mu(vm, vd, rs1, vl); } -vuint8mf8x3_t test_vlseg3e8_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_u8mf8x3_mu(mask, maskedoff_tuple, base, vl); +vuint8mf8x3_t test_vlseg3e8_v_u8mf8x3_mu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_u8mf8x3_mu(vm, vd, rs1, vl); } -vuint8mf4x3_t test_vlseg3e8_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_u8mf4x3_mu(mask, maskedoff_tuple, base, vl); +vuint8mf4x3_t test_vlseg3e8_v_u8mf4x3_mu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_u8mf4x3_mu(vm, vd, rs1, vl); } -vuint8mf2x3_t test_vlseg3e8_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_u8mf2x3_mu(mask, maskedoff_tuple, base, vl); +vuint8mf2x3_t test_vlseg3e8_v_u8mf2x3_mu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_u8mf2x3_mu(vm, vd, rs1, vl); } -vuint8m1x3_t test_vlseg3e8_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_u8m1x3_mu(mask, maskedoff_tuple, base, vl); +vuint8m1x3_t test_vlseg3e8_v_u8m1x3_mu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_u8m1x3_mu(vm, vd, rs1, vl); } -vuint8m2x3_t test_vlseg3e8_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_v_u8m2x3_mu(mask, maskedoff_tuple, base, vl); +vuint8m2x3_t test_vlseg3e8_v_u8m2x3_mu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_v_u8m2x3_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg3e8\.[ivxfswum.]+\s+} 40 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg3e8ff.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg3e8ff.c index 285ad7c7e..189dbedb5 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg3e8ff.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg3e8ff.c @@ -6,164 +6,164 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_i8mf8x3_tu(maskedoff_tuple, base, new_vl, vl); +vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_tu(vint8mf8x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_i8mf8x3_tu(vd, rs1, new_vl, vl); } -vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_i8mf4x3_tu(maskedoff_tuple, base, new_vl, vl); +vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_tu(vint8mf4x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_i8mf4x3_tu(vd, rs1, new_vl, vl); } -vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_i8mf2x3_tu(maskedoff_tuple, base, new_vl, vl); +vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_tu(vint8mf2x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_i8mf2x3_tu(vd, rs1, new_vl, vl); } -vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_i8m1x3_tu(maskedoff_tuple, base, new_vl, vl); +vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_tu(vint8m1x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_i8m1x3_tu(vd, rs1, new_vl, vl); } -vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_i8m2x3_tu(maskedoff_tuple, base, new_vl, vl); +vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_tu(vint8m2x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_i8m2x3_tu(vd, rs1, new_vl, vl); } -vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_u8mf8x3_tu(maskedoff_tuple, base, new_vl, vl); +vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_tu(vuint8mf8x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_u8mf8x3_tu(vd, rs1, new_vl, vl); } -vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_u8mf4x3_tu(maskedoff_tuple, base, new_vl, vl); +vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_tu(vuint8mf4x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_u8mf4x3_tu(vd, rs1, new_vl, vl); } -vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_u8mf2x3_tu(maskedoff_tuple, base, new_vl, vl); +vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_tu(vuint8mf2x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_u8mf2x3_tu(vd, rs1, new_vl, vl); } -vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_u8m1x3_tu(maskedoff_tuple, base, new_vl, vl); +vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_tu(vuint8m1x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_u8m1x3_tu(vd, rs1, new_vl, vl); } -vuint8m2x3_t test_vlseg3e8ff_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_u8m2x3_tu(maskedoff_tuple, base, new_vl, vl); +vuint8m2x3_t test_vlseg3e8ff_v_u8m2x3_tu(vuint8m2x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_u8m2x3_tu(vd, rs1, new_vl, vl); } -vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_i8mf8x3_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_tum(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_i8mf8x3_tum(vm, vd, rs1, new_vl, vl); } -vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_i8mf4x3_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_tum(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_i8mf4x3_tum(vm, vd, rs1, new_vl, vl); } -vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_i8mf2x3_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_tum(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_i8mf2x3_tum(vm, vd, rs1, new_vl, vl); } -vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_i8m1x3_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_tum(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_i8m1x3_tum(vm, vd, rs1, new_vl, vl); } -vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_i8m2x3_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_tum(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_i8m2x3_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_u8mf8x3_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_tum(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_u8mf8x3_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_u8mf4x3_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_tum(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_u8mf4x3_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_u8mf2x3_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_tum(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_u8mf2x3_tum(vm, vd, rs1, new_vl, vl); } -vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_u8m1x3_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_tum(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_u8m1x3_tum(vm, vd, rs1, new_vl, vl); } -vuint8m2x3_t test_vlseg3e8ff_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_u8m2x3_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m2x3_t test_vlseg3e8ff_v_u8m2x3_tum(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_u8m2x3_tum(vm, vd, rs1, new_vl, vl); } -vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_i8mf8x3_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_tumu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_i8mf8x3_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_i8mf4x3_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_tumu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_i8mf4x3_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_i8mf2x3_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_tumu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_i8mf2x3_tumu(vm, vd, rs1, new_vl, vl); } -vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_i8m1x3_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_tumu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_i8m1x3_tumu(vm, vd, rs1, new_vl, vl); } -vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_i8m2x3_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_tumu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_i8m2x3_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_u8mf8x3_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_tumu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_u8mf8x3_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_u8mf4x3_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_tumu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_u8mf4x3_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_u8mf2x3_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_tumu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_u8mf2x3_tumu(vm, vd, rs1, new_vl, vl); } -vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_u8m1x3_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_tumu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_u8m1x3_tumu(vm, vd, rs1, new_vl, vl); } -vuint8m2x3_t test_vlseg3e8ff_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_u8m2x3_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m2x3_t test_vlseg3e8ff_v_u8m2x3_tumu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_u8m2x3_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_i8mf8x3_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_mu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_i8mf8x3_mu(vm, vd, rs1, new_vl, vl); } -vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_i8mf4x3_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_mu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_i8mf4x3_mu(vm, vd, rs1, new_vl, vl); } -vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_i8mf2x3_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_mu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_i8mf2x3_mu(vm, vd, rs1, new_vl, vl); } -vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_i8m1x3_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_mu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_i8m1x3_mu(vm, vd, rs1, new_vl, vl); } -vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_i8m2x3_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_mu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_i8m2x3_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_u8mf8x3_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_mu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_u8mf8x3_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_u8mf4x3_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_mu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_u8mf4x3_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_u8mf2x3_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_mu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_u8mf2x3_mu(vm, vd, rs1, new_vl, vl); } -vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_u8m1x3_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_mu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_u8m1x3_mu(vm, vd, rs1, new_vl, vl); } -vuint8m2x3_t test_vlseg3e8ff_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_v_u8m2x3_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m2x3_t test_vlseg3e8ff_v_u8m2x3_mu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_v_u8m2x3_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg3e8ff\.[ivxfswum.]+\s+} 40 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg4e16.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg4e16.c index 290179bd6..544d7613f 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg4e16.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg4e16.c @@ -6,196 +6,196 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x4_t test_vlseg4e16_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_f16mf4x4_tu(maskedoff_tuple, base, vl); +vfloat16mf4x4_t test_vlseg4e16_v_f16mf4x4_tu(vfloat16mf4x4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_f16mf4x4_tu(vd, rs1, vl); } -vfloat16mf2x4_t test_vlseg4e16_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_f16mf2x4_tu(maskedoff_tuple, base, vl); +vfloat16mf2x4_t test_vlseg4e16_v_f16mf2x4_tu(vfloat16mf2x4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_f16mf2x4_tu(vd, rs1, vl); } -vfloat16m1x4_t test_vlseg4e16_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_f16m1x4_tu(maskedoff_tuple, base, vl); +vfloat16m1x4_t test_vlseg4e16_v_f16m1x4_tu(vfloat16m1x4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_f16m1x4_tu(vd, rs1, vl); } -vfloat16m2x4_t test_vlseg4e16_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_f16m2x4_tu(maskedoff_tuple, base, vl); +vfloat16m2x4_t test_vlseg4e16_v_f16m2x4_tu(vfloat16m2x4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_f16m2x4_tu(vd, rs1, vl); } -vint16mf4x4_t test_vlseg4e16_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_i16mf4x4_tu(maskedoff_tuple, base, vl); +vint16mf4x4_t test_vlseg4e16_v_i16mf4x4_tu(vint16mf4x4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_i16mf4x4_tu(vd, rs1, vl); } -vint16mf2x4_t test_vlseg4e16_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_i16mf2x4_tu(maskedoff_tuple, base, vl); +vint16mf2x4_t test_vlseg4e16_v_i16mf2x4_tu(vint16mf2x4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_i16mf2x4_tu(vd, rs1, vl); } -vint16m1x4_t test_vlseg4e16_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_i16m1x4_tu(maskedoff_tuple, base, vl); +vint16m1x4_t test_vlseg4e16_v_i16m1x4_tu(vint16m1x4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_i16m1x4_tu(vd, rs1, vl); } -vint16m2x4_t test_vlseg4e16_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_i16m2x4_tu(maskedoff_tuple, base, vl); +vint16m2x4_t test_vlseg4e16_v_i16m2x4_tu(vint16m2x4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_i16m2x4_tu(vd, rs1, vl); } -vuint16mf4x4_t test_vlseg4e16_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_u16mf4x4_tu(maskedoff_tuple, base, vl); +vuint16mf4x4_t test_vlseg4e16_v_u16mf4x4_tu(vuint16mf4x4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_u16mf4x4_tu(vd, rs1, vl); } -vuint16mf2x4_t test_vlseg4e16_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_u16mf2x4_tu(maskedoff_tuple, base, vl); +vuint16mf2x4_t test_vlseg4e16_v_u16mf2x4_tu(vuint16mf2x4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_u16mf2x4_tu(vd, rs1, vl); } -vuint16m1x4_t test_vlseg4e16_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_u16m1x4_tu(maskedoff_tuple, base, vl); +vuint16m1x4_t test_vlseg4e16_v_u16m1x4_tu(vuint16m1x4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_u16m1x4_tu(vd, rs1, vl); } -vuint16m2x4_t test_vlseg4e16_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_u16m2x4_tu(maskedoff_tuple, base, vl); +vuint16m2x4_t test_vlseg4e16_v_u16m2x4_tu(vuint16m2x4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_u16m2x4_tu(vd, rs1, vl); } -vfloat16mf4x4_t test_vlseg4e16_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_f16mf4x4_tum(mask, maskedoff_tuple, base, vl); +vfloat16mf4x4_t test_vlseg4e16_v_f16mf4x4_tum(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_f16mf4x4_tum(vm, vd, rs1, vl); } -vfloat16mf2x4_t test_vlseg4e16_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_f16mf2x4_tum(mask, maskedoff_tuple, base, vl); +vfloat16mf2x4_t test_vlseg4e16_v_f16mf2x4_tum(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_f16mf2x4_tum(vm, vd, rs1, vl); } -vfloat16m1x4_t test_vlseg4e16_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_f16m1x4_tum(mask, maskedoff_tuple, base, vl); +vfloat16m1x4_t test_vlseg4e16_v_f16m1x4_tum(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_f16m1x4_tum(vm, vd, rs1, vl); } -vfloat16m2x4_t test_vlseg4e16_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_f16m2x4_tum(mask, maskedoff_tuple, base, vl); +vfloat16m2x4_t test_vlseg4e16_v_f16m2x4_tum(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_f16m2x4_tum(vm, vd, rs1, vl); } -vint16mf4x4_t test_vlseg4e16_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_i16mf4x4_tum(mask, maskedoff_tuple, base, vl); +vint16mf4x4_t test_vlseg4e16_v_i16mf4x4_tum(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_i16mf4x4_tum(vm, vd, rs1, vl); } -vint16mf2x4_t test_vlseg4e16_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_i16mf2x4_tum(mask, maskedoff_tuple, base, vl); +vint16mf2x4_t test_vlseg4e16_v_i16mf2x4_tum(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_i16mf2x4_tum(vm, vd, rs1, vl); } -vint16m1x4_t test_vlseg4e16_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_i16m1x4_tum(mask, maskedoff_tuple, base, vl); +vint16m1x4_t test_vlseg4e16_v_i16m1x4_tum(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_i16m1x4_tum(vm, vd, rs1, vl); } -vint16m2x4_t test_vlseg4e16_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_i16m2x4_tum(mask, maskedoff_tuple, base, vl); +vint16m2x4_t test_vlseg4e16_v_i16m2x4_tum(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_i16m2x4_tum(vm, vd, rs1, vl); } -vuint16mf4x4_t test_vlseg4e16_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_u16mf4x4_tum(mask, maskedoff_tuple, base, vl); +vuint16mf4x4_t test_vlseg4e16_v_u16mf4x4_tum(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_u16mf4x4_tum(vm, vd, rs1, vl); } -vuint16mf2x4_t test_vlseg4e16_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_u16mf2x4_tum(mask, maskedoff_tuple, base, vl); +vuint16mf2x4_t test_vlseg4e16_v_u16mf2x4_tum(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_u16mf2x4_tum(vm, vd, rs1, vl); } -vuint16m1x4_t test_vlseg4e16_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_u16m1x4_tum(mask, maskedoff_tuple, base, vl); +vuint16m1x4_t test_vlseg4e16_v_u16m1x4_tum(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_u16m1x4_tum(vm, vd, rs1, vl); } -vuint16m2x4_t test_vlseg4e16_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_u16m2x4_tum(mask, maskedoff_tuple, base, vl); +vuint16m2x4_t test_vlseg4e16_v_u16m2x4_tum(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_u16m2x4_tum(vm, vd, rs1, vl); } -vfloat16mf4x4_t test_vlseg4e16_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_f16mf4x4_tumu(mask, maskedoff_tuple, base, vl); +vfloat16mf4x4_t test_vlseg4e16_v_f16mf4x4_tumu(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_f16mf4x4_tumu(vm, vd, rs1, vl); } -vfloat16mf2x4_t test_vlseg4e16_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_f16mf2x4_tumu(mask, maskedoff_tuple, base, vl); +vfloat16mf2x4_t test_vlseg4e16_v_f16mf2x4_tumu(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_f16mf2x4_tumu(vm, vd, rs1, vl); } -vfloat16m1x4_t test_vlseg4e16_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_f16m1x4_tumu(mask, maskedoff_tuple, base, vl); +vfloat16m1x4_t test_vlseg4e16_v_f16m1x4_tumu(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_f16m1x4_tumu(vm, vd, rs1, vl); } -vfloat16m2x4_t test_vlseg4e16_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_f16m2x4_tumu(mask, maskedoff_tuple, base, vl); +vfloat16m2x4_t test_vlseg4e16_v_f16m2x4_tumu(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_f16m2x4_tumu(vm, vd, rs1, vl); } -vint16mf4x4_t test_vlseg4e16_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_i16mf4x4_tumu(mask, maskedoff_tuple, base, vl); +vint16mf4x4_t test_vlseg4e16_v_i16mf4x4_tumu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_i16mf4x4_tumu(vm, vd, rs1, vl); } -vint16mf2x4_t test_vlseg4e16_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_i16mf2x4_tumu(mask, maskedoff_tuple, base, vl); +vint16mf2x4_t test_vlseg4e16_v_i16mf2x4_tumu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_i16mf2x4_tumu(vm, vd, rs1, vl); } -vint16m1x4_t test_vlseg4e16_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_i16m1x4_tumu(mask, maskedoff_tuple, base, vl); +vint16m1x4_t test_vlseg4e16_v_i16m1x4_tumu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_i16m1x4_tumu(vm, vd, rs1, vl); } -vint16m2x4_t test_vlseg4e16_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_i16m2x4_tumu(mask, maskedoff_tuple, base, vl); +vint16m2x4_t test_vlseg4e16_v_i16m2x4_tumu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_i16m2x4_tumu(vm, vd, rs1, vl); } -vuint16mf4x4_t test_vlseg4e16_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_u16mf4x4_tumu(mask, maskedoff_tuple, base, vl); +vuint16mf4x4_t test_vlseg4e16_v_u16mf4x4_tumu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_u16mf4x4_tumu(vm, vd, rs1, vl); } -vuint16mf2x4_t test_vlseg4e16_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_u16mf2x4_tumu(mask, maskedoff_tuple, base, vl); +vuint16mf2x4_t test_vlseg4e16_v_u16mf2x4_tumu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_u16mf2x4_tumu(vm, vd, rs1, vl); } -vuint16m1x4_t test_vlseg4e16_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_u16m1x4_tumu(mask, maskedoff_tuple, base, vl); +vuint16m1x4_t test_vlseg4e16_v_u16m1x4_tumu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_u16m1x4_tumu(vm, vd, rs1, vl); } -vuint16m2x4_t test_vlseg4e16_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_u16m2x4_tumu(mask, maskedoff_tuple, base, vl); +vuint16m2x4_t test_vlseg4e16_v_u16m2x4_tumu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_u16m2x4_tumu(vm, vd, rs1, vl); } -vfloat16mf4x4_t test_vlseg4e16_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_f16mf4x4_mu(mask, maskedoff_tuple, base, vl); +vfloat16mf4x4_t test_vlseg4e16_v_f16mf4x4_mu(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_f16mf4x4_mu(vm, vd, rs1, vl); } -vfloat16mf2x4_t test_vlseg4e16_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_f16mf2x4_mu(mask, maskedoff_tuple, base, vl); +vfloat16mf2x4_t test_vlseg4e16_v_f16mf2x4_mu(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_f16mf2x4_mu(vm, vd, rs1, vl); } -vfloat16m1x4_t test_vlseg4e16_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_f16m1x4_mu(mask, maskedoff_tuple, base, vl); +vfloat16m1x4_t test_vlseg4e16_v_f16m1x4_mu(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_f16m1x4_mu(vm, vd, rs1, vl); } -vfloat16m2x4_t test_vlseg4e16_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_f16m2x4_mu(mask, maskedoff_tuple, base, vl); +vfloat16m2x4_t test_vlseg4e16_v_f16m2x4_mu(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_f16m2x4_mu(vm, vd, rs1, vl); } -vint16mf4x4_t test_vlseg4e16_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_i16mf4x4_mu(mask, maskedoff_tuple, base, vl); +vint16mf4x4_t test_vlseg4e16_v_i16mf4x4_mu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_i16mf4x4_mu(vm, vd, rs1, vl); } -vint16mf2x4_t test_vlseg4e16_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_i16mf2x4_mu(mask, maskedoff_tuple, base, vl); +vint16mf2x4_t test_vlseg4e16_v_i16mf2x4_mu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_i16mf2x4_mu(vm, vd, rs1, vl); } -vint16m1x4_t test_vlseg4e16_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_i16m1x4_mu(mask, maskedoff_tuple, base, vl); +vint16m1x4_t test_vlseg4e16_v_i16m1x4_mu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_i16m1x4_mu(vm, vd, rs1, vl); } -vint16m2x4_t test_vlseg4e16_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_i16m2x4_mu(mask, maskedoff_tuple, base, vl); +vint16m2x4_t test_vlseg4e16_v_i16m2x4_mu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_i16m2x4_mu(vm, vd, rs1, vl); } -vuint16mf4x4_t test_vlseg4e16_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_u16mf4x4_mu(mask, maskedoff_tuple, base, vl); +vuint16mf4x4_t test_vlseg4e16_v_u16mf4x4_mu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_u16mf4x4_mu(vm, vd, rs1, vl); } -vuint16mf2x4_t test_vlseg4e16_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_u16mf2x4_mu(mask, maskedoff_tuple, base, vl); +vuint16mf2x4_t test_vlseg4e16_v_u16mf2x4_mu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_u16mf2x4_mu(vm, vd, rs1, vl); } -vuint16m1x4_t test_vlseg4e16_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_u16m1x4_mu(mask, maskedoff_tuple, base, vl); +vuint16m1x4_t test_vlseg4e16_v_u16m1x4_mu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_u16m1x4_mu(vm, vd, rs1, vl); } -vuint16m2x4_t test_vlseg4e16_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg4e16_v_u16m2x4_mu(mask, maskedoff_tuple, base, vl); +vuint16m2x4_t test_vlseg4e16_v_u16m2x4_mu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_v_u16m2x4_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg4e16\.[ivxfswum.]+\s+} 48 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg4e16ff.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg4e16ff.c index 60edc7834..db791f947 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg4e16ff.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg4e16ff.c @@ -6,196 +6,196 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_f16mf4x4_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_tu(vfloat16mf4x4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_f16mf4x4_tu(vd, rs1, new_vl, vl); } -vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_f16mf2x4_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_tu(vfloat16mf2x4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_f16mf2x4_tu(vd, rs1, new_vl, vl); } -vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_f16m1x4_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_tu(vfloat16m1x4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_f16m1x4_tu(vd, rs1, new_vl, vl); } -vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_f16m2x4_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_tu(vfloat16m2x4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_f16m2x4_tu(vd, rs1, new_vl, vl); } -vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_i16mf4x4_tu(maskedoff_tuple, base, new_vl, vl); +vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_tu(vint16mf4x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_i16mf4x4_tu(vd, rs1, new_vl, vl); } -vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_i16mf2x4_tu(maskedoff_tuple, base, new_vl, vl); +vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_tu(vint16mf2x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_i16mf2x4_tu(vd, rs1, new_vl, vl); } -vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_i16m1x4_tu(maskedoff_tuple, base, new_vl, vl); +vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_tu(vint16m1x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_i16m1x4_tu(vd, rs1, new_vl, vl); } -vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_i16m2x4_tu(maskedoff_tuple, base, new_vl, vl); +vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_tu(vint16m2x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_i16m2x4_tu(vd, rs1, new_vl, vl); } -vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_u16mf4x4_tu(maskedoff_tuple, base, new_vl, vl); +vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_tu(vuint16mf4x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_u16mf4x4_tu(vd, rs1, new_vl, vl); } -vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_u16mf2x4_tu(maskedoff_tuple, base, new_vl, vl); +vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_tu(vuint16mf2x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_u16mf2x4_tu(vd, rs1, new_vl, vl); } -vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_u16m1x4_tu(maskedoff_tuple, base, new_vl, vl); +vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_tu(vuint16m1x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_u16m1x4_tu(vd, rs1, new_vl, vl); } -vuint16m2x4_t test_vlseg4e16ff_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_u16m2x4_tu(maskedoff_tuple, base, new_vl, vl); +vuint16m2x4_t test_vlseg4e16ff_v_u16m2x4_tu(vuint16m2x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_u16m2x4_tu(vd, rs1, new_vl, vl); } -vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_f16mf4x4_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_tum(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_f16mf4x4_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_f16mf2x4_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_tum(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_f16mf2x4_tum(vm, vd, rs1, new_vl, vl); } -vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_f16m1x4_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_tum(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_f16m1x4_tum(vm, vd, rs1, new_vl, vl); } -vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_f16m2x4_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_tum(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_f16m2x4_tum(vm, vd, rs1, new_vl, vl); } -vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_i16mf4x4_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_tum(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_i16mf4x4_tum(vm, vd, rs1, new_vl, vl); } -vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_i16mf2x4_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_tum(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_i16mf2x4_tum(vm, vd, rs1, new_vl, vl); } -vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_i16m1x4_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_tum(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_i16m1x4_tum(vm, vd, rs1, new_vl, vl); } -vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_i16m2x4_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_tum(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_i16m2x4_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_u16mf4x4_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_tum(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_u16mf4x4_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_u16mf2x4_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_tum(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_u16mf2x4_tum(vm, vd, rs1, new_vl, vl); } -vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_u16m1x4_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_tum(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_u16m1x4_tum(vm, vd, rs1, new_vl, vl); } -vuint16m2x4_t test_vlseg4e16ff_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_u16m2x4_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m2x4_t test_vlseg4e16ff_v_u16m2x4_tum(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_u16m2x4_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_f16mf4x4_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_tumu(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_f16mf4x4_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_f16mf2x4_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_tumu(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_f16mf2x4_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_f16m1x4_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_tumu(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_f16m1x4_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_f16m2x4_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_tumu(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_f16m2x4_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_i16mf4x4_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_tumu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_i16mf4x4_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_i16mf2x4_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_tumu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_i16mf2x4_tumu(vm, vd, rs1, new_vl, vl); } -vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_i16m1x4_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_tumu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_i16m1x4_tumu(vm, vd, rs1, new_vl, vl); } -vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_i16m2x4_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_tumu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_i16m2x4_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_u16mf4x4_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_tumu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_u16mf4x4_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_u16mf2x4_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_tumu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_u16mf2x4_tumu(vm, vd, rs1, new_vl, vl); } -vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_u16m1x4_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_tumu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_u16m1x4_tumu(vm, vd, rs1, new_vl, vl); } -vuint16m2x4_t test_vlseg4e16ff_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_u16m2x4_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m2x4_t test_vlseg4e16ff_v_u16m2x4_tumu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_u16m2x4_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_f16mf4x4_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_mu(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_f16mf4x4_mu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_f16mf2x4_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_mu(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_f16mf2x4_mu(vm, vd, rs1, new_vl, vl); } -vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_f16m1x4_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_mu(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_f16m1x4_mu(vm, vd, rs1, new_vl, vl); } -vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_f16m2x4_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_mu(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_f16m2x4_mu(vm, vd, rs1, new_vl, vl); } -vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_i16mf4x4_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_mu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_i16mf4x4_mu(vm, vd, rs1, new_vl, vl); } -vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_i16mf2x4_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_mu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_i16mf2x4_mu(vm, vd, rs1, new_vl, vl); } -vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_i16m1x4_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_mu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_i16m1x4_mu(vm, vd, rs1, new_vl, vl); } -vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_i16m2x4_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_mu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_i16m2x4_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_u16mf4x4_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_mu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_u16mf4x4_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_u16mf2x4_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_mu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_u16mf2x4_mu(vm, vd, rs1, new_vl, vl); } -vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_u16m1x4_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_mu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_u16m1x4_mu(vm, vd, rs1, new_vl, vl); } -vuint16m2x4_t test_vlseg4e16ff_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_v_u16m2x4_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m2x4_t test_vlseg4e16ff_v_u16m2x4_mu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_u16m2x4_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg4e16ff\.[ivxfswum.]+\s+} 48 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg4e32.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg4e32.c index 4ac1a3735..e479f806e 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg4e32.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg4e32.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x4_t test_vlseg4e32_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_f32mf2x4_tu(maskedoff_tuple, base, vl); +vfloat32mf2x4_t test_vlseg4e32_v_f32mf2x4_tu(vfloat32mf2x4_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_f32mf2x4_tu(vd, rs1, vl); } -vfloat32m1x4_t test_vlseg4e32_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_f32m1x4_tu(maskedoff_tuple, base, vl); +vfloat32m1x4_t test_vlseg4e32_v_f32m1x4_tu(vfloat32m1x4_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_f32m1x4_tu(vd, rs1, vl); } -vfloat32m2x4_t test_vlseg4e32_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_f32m2x4_tu(maskedoff_tuple, base, vl); +vfloat32m2x4_t test_vlseg4e32_v_f32m2x4_tu(vfloat32m2x4_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_f32m2x4_tu(vd, rs1, vl); } -vint32mf2x4_t test_vlseg4e32_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_i32mf2x4_tu(maskedoff_tuple, base, vl); +vint32mf2x4_t test_vlseg4e32_v_i32mf2x4_tu(vint32mf2x4_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_i32mf2x4_tu(vd, rs1, vl); } -vint32m1x4_t test_vlseg4e32_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_i32m1x4_tu(maskedoff_tuple, base, vl); +vint32m1x4_t test_vlseg4e32_v_i32m1x4_tu(vint32m1x4_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_i32m1x4_tu(vd, rs1, vl); } -vint32m2x4_t test_vlseg4e32_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_i32m2x4_tu(maskedoff_tuple, base, vl); +vint32m2x4_t test_vlseg4e32_v_i32m2x4_tu(vint32m2x4_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_i32m2x4_tu(vd, rs1, vl); } -vuint32mf2x4_t test_vlseg4e32_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_u32mf2x4_tu(maskedoff_tuple, base, vl); +vuint32mf2x4_t test_vlseg4e32_v_u32mf2x4_tu(vuint32mf2x4_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_u32mf2x4_tu(vd, rs1, vl); } -vuint32m1x4_t test_vlseg4e32_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_u32m1x4_tu(maskedoff_tuple, base, vl); +vuint32m1x4_t test_vlseg4e32_v_u32m1x4_tu(vuint32m1x4_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_u32m1x4_tu(vd, rs1, vl); } -vuint32m2x4_t test_vlseg4e32_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_u32m2x4_tu(maskedoff_tuple, base, vl); +vuint32m2x4_t test_vlseg4e32_v_u32m2x4_tu(vuint32m2x4_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_u32m2x4_tu(vd, rs1, vl); } -vfloat32mf2x4_t test_vlseg4e32_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_f32mf2x4_tum(mask, maskedoff_tuple, base, vl); +vfloat32mf2x4_t test_vlseg4e32_v_f32mf2x4_tum(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_f32mf2x4_tum(vm, vd, rs1, vl); } -vfloat32m1x4_t test_vlseg4e32_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_f32m1x4_tum(mask, maskedoff_tuple, base, vl); +vfloat32m1x4_t test_vlseg4e32_v_f32m1x4_tum(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_f32m1x4_tum(vm, vd, rs1, vl); } -vfloat32m2x4_t test_vlseg4e32_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_f32m2x4_tum(mask, maskedoff_tuple, base, vl); +vfloat32m2x4_t test_vlseg4e32_v_f32m2x4_tum(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_f32m2x4_tum(vm, vd, rs1, vl); } -vint32mf2x4_t test_vlseg4e32_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_i32mf2x4_tum(mask, maskedoff_tuple, base, vl); +vint32mf2x4_t test_vlseg4e32_v_i32mf2x4_tum(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_i32mf2x4_tum(vm, vd, rs1, vl); } -vint32m1x4_t test_vlseg4e32_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_i32m1x4_tum(mask, maskedoff_tuple, base, vl); +vint32m1x4_t test_vlseg4e32_v_i32m1x4_tum(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_i32m1x4_tum(vm, vd, rs1, vl); } -vint32m2x4_t test_vlseg4e32_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_i32m2x4_tum(mask, maskedoff_tuple, base, vl); +vint32m2x4_t test_vlseg4e32_v_i32m2x4_tum(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_i32m2x4_tum(vm, vd, rs1, vl); } -vuint32mf2x4_t test_vlseg4e32_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_u32mf2x4_tum(mask, maskedoff_tuple, base, vl); +vuint32mf2x4_t test_vlseg4e32_v_u32mf2x4_tum(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_u32mf2x4_tum(vm, vd, rs1, vl); } -vuint32m1x4_t test_vlseg4e32_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_u32m1x4_tum(mask, maskedoff_tuple, base, vl); +vuint32m1x4_t test_vlseg4e32_v_u32m1x4_tum(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_u32m1x4_tum(vm, vd, rs1, vl); } -vuint32m2x4_t test_vlseg4e32_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_u32m2x4_tum(mask, maskedoff_tuple, base, vl); +vuint32m2x4_t test_vlseg4e32_v_u32m2x4_tum(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_u32m2x4_tum(vm, vd, rs1, vl); } -vfloat32mf2x4_t test_vlseg4e32_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_f32mf2x4_tumu(mask, maskedoff_tuple, base, vl); +vfloat32mf2x4_t test_vlseg4e32_v_f32mf2x4_tumu(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_f32mf2x4_tumu(vm, vd, rs1, vl); } -vfloat32m1x4_t test_vlseg4e32_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_f32m1x4_tumu(mask, maskedoff_tuple, base, vl); +vfloat32m1x4_t test_vlseg4e32_v_f32m1x4_tumu(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_f32m1x4_tumu(vm, vd, rs1, vl); } -vfloat32m2x4_t test_vlseg4e32_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_f32m2x4_tumu(mask, maskedoff_tuple, base, vl); +vfloat32m2x4_t test_vlseg4e32_v_f32m2x4_tumu(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_f32m2x4_tumu(vm, vd, rs1, vl); } -vint32mf2x4_t test_vlseg4e32_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_i32mf2x4_tumu(mask, maskedoff_tuple, base, vl); +vint32mf2x4_t test_vlseg4e32_v_i32mf2x4_tumu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_i32mf2x4_tumu(vm, vd, rs1, vl); } -vint32m1x4_t test_vlseg4e32_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_i32m1x4_tumu(mask, maskedoff_tuple, base, vl); +vint32m1x4_t test_vlseg4e32_v_i32m1x4_tumu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_i32m1x4_tumu(vm, vd, rs1, vl); } -vint32m2x4_t test_vlseg4e32_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_i32m2x4_tumu(mask, maskedoff_tuple, base, vl); +vint32m2x4_t test_vlseg4e32_v_i32m2x4_tumu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_i32m2x4_tumu(vm, vd, rs1, vl); } -vuint32mf2x4_t test_vlseg4e32_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_u32mf2x4_tumu(mask, maskedoff_tuple, base, vl); +vuint32mf2x4_t test_vlseg4e32_v_u32mf2x4_tumu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_u32mf2x4_tumu(vm, vd, rs1, vl); } -vuint32m1x4_t test_vlseg4e32_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_u32m1x4_tumu(mask, maskedoff_tuple, base, vl); +vuint32m1x4_t test_vlseg4e32_v_u32m1x4_tumu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_u32m1x4_tumu(vm, vd, rs1, vl); } -vuint32m2x4_t test_vlseg4e32_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_u32m2x4_tumu(mask, maskedoff_tuple, base, vl); +vuint32m2x4_t test_vlseg4e32_v_u32m2x4_tumu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_u32m2x4_tumu(vm, vd, rs1, vl); } -vfloat32mf2x4_t test_vlseg4e32_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_f32mf2x4_mu(mask, maskedoff_tuple, base, vl); +vfloat32mf2x4_t test_vlseg4e32_v_f32mf2x4_mu(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_f32mf2x4_mu(vm, vd, rs1, vl); } -vfloat32m1x4_t test_vlseg4e32_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_f32m1x4_mu(mask, maskedoff_tuple, base, vl); +vfloat32m1x4_t test_vlseg4e32_v_f32m1x4_mu(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_f32m1x4_mu(vm, vd, rs1, vl); } -vfloat32m2x4_t test_vlseg4e32_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_f32m2x4_mu(mask, maskedoff_tuple, base, vl); +vfloat32m2x4_t test_vlseg4e32_v_f32m2x4_mu(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_f32m2x4_mu(vm, vd, rs1, vl); } -vint32mf2x4_t test_vlseg4e32_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_i32mf2x4_mu(mask, maskedoff_tuple, base, vl); +vint32mf2x4_t test_vlseg4e32_v_i32mf2x4_mu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_i32mf2x4_mu(vm, vd, rs1, vl); } -vint32m1x4_t test_vlseg4e32_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_i32m1x4_mu(mask, maskedoff_tuple, base, vl); +vint32m1x4_t test_vlseg4e32_v_i32m1x4_mu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_i32m1x4_mu(vm, vd, rs1, vl); } -vint32m2x4_t test_vlseg4e32_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_i32m2x4_mu(mask, maskedoff_tuple, base, vl); +vint32m2x4_t test_vlseg4e32_v_i32m2x4_mu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_i32m2x4_mu(vm, vd, rs1, vl); } -vuint32mf2x4_t test_vlseg4e32_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_u32mf2x4_mu(mask, maskedoff_tuple, base, vl); +vuint32mf2x4_t test_vlseg4e32_v_u32mf2x4_mu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_u32mf2x4_mu(vm, vd, rs1, vl); } -vuint32m1x4_t test_vlseg4e32_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_u32m1x4_mu(mask, maskedoff_tuple, base, vl); +vuint32m1x4_t test_vlseg4e32_v_u32m1x4_mu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_u32m1x4_mu(vm, vd, rs1, vl); } -vuint32m2x4_t test_vlseg4e32_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg4e32_v_u32m2x4_mu(mask, maskedoff_tuple, base, vl); +vuint32m2x4_t test_vlseg4e32_v_u32m2x4_mu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_v_u32m2x4_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg4e32\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg4e32ff.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg4e32ff.c index 05ffdbd89..d9b1aac01 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg4e32ff.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg4e32ff.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_f32mf2x4_tu(maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_tu(vfloat32mf2x4_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_f32mf2x4_tu(vd, rs1, new_vl, vl); } -vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_f32m1x4_tu(maskedoff_tuple, base, new_vl, vl); +vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_tu(vfloat32m1x4_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_f32m1x4_tu(vd, rs1, new_vl, vl); } -vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_f32m2x4_tu(maskedoff_tuple, base, new_vl, vl); +vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_tu(vfloat32m2x4_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_f32m2x4_tu(vd, rs1, new_vl, vl); } -vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_i32mf2x4_tu(maskedoff_tuple, base, new_vl, vl); +vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_tu(vint32mf2x4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_i32mf2x4_tu(vd, rs1, new_vl, vl); } -vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_i32m1x4_tu(maskedoff_tuple, base, new_vl, vl); +vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_tu(vint32m1x4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_i32m1x4_tu(vd, rs1, new_vl, vl); } -vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_i32m2x4_tu(maskedoff_tuple, base, new_vl, vl); +vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_tu(vint32m2x4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_i32m2x4_tu(vd, rs1, new_vl, vl); } -vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_u32mf2x4_tu(maskedoff_tuple, base, new_vl, vl); +vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_tu(vuint32mf2x4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_u32mf2x4_tu(vd, rs1, new_vl, vl); } -vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_u32m1x4_tu(maskedoff_tuple, base, new_vl, vl); +vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_tu(vuint32m1x4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_u32m1x4_tu(vd, rs1, new_vl, vl); } -vuint32m2x4_t test_vlseg4e32ff_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_u32m2x4_tu(maskedoff_tuple, base, new_vl, vl); +vuint32m2x4_t test_vlseg4e32ff_v_u32m2x4_tu(vuint32m2x4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_u32m2x4_tu(vd, rs1, new_vl, vl); } -vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_f32mf2x4_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_tum(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_f32mf2x4_tum(vm, vd, rs1, new_vl, vl); } -vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_f32m1x4_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_tum(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_f32m1x4_tum(vm, vd, rs1, new_vl, vl); } -vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_f32m2x4_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_tum(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_f32m2x4_tum(vm, vd, rs1, new_vl, vl); } -vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_i32mf2x4_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_tum(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_i32mf2x4_tum(vm, vd, rs1, new_vl, vl); } -vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_i32m1x4_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_tum(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_i32m1x4_tum(vm, vd, rs1, new_vl, vl); } -vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_i32m2x4_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_tum(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_i32m2x4_tum(vm, vd, rs1, new_vl, vl); } -vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_u32mf2x4_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_tum(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_u32mf2x4_tum(vm, vd, rs1, new_vl, vl); } -vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_u32m1x4_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_tum(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_u32m1x4_tum(vm, vd, rs1, new_vl, vl); } -vuint32m2x4_t test_vlseg4e32ff_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_u32m2x4_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m2x4_t test_vlseg4e32ff_v_u32m2x4_tum(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_u32m2x4_tum(vm, vd, rs1, new_vl, vl); } -vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_f32mf2x4_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_tumu(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_f32mf2x4_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_f32m1x4_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_tumu(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_f32m1x4_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_f32m2x4_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_tumu(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_f32m2x4_tumu(vm, vd, rs1, new_vl, vl); } -vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_i32mf2x4_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_tumu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_i32mf2x4_tumu(vm, vd, rs1, new_vl, vl); } -vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_i32m1x4_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_tumu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_i32m1x4_tumu(vm, vd, rs1, new_vl, vl); } -vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_i32m2x4_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_tumu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_i32m2x4_tumu(vm, vd, rs1, new_vl, vl); } -vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_u32mf2x4_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_tumu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_u32mf2x4_tumu(vm, vd, rs1, new_vl, vl); } -vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_u32m1x4_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_tumu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_u32m1x4_tumu(vm, vd, rs1, new_vl, vl); } -vuint32m2x4_t test_vlseg4e32ff_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_u32m2x4_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m2x4_t test_vlseg4e32ff_v_u32m2x4_tumu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_u32m2x4_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_f32mf2x4_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_mu(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_f32mf2x4_mu(vm, vd, rs1, new_vl, vl); } -vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_f32m1x4_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_mu(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_f32m1x4_mu(vm, vd, rs1, new_vl, vl); } -vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_f32m2x4_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_mu(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_f32m2x4_mu(vm, vd, rs1, new_vl, vl); } -vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_i32mf2x4_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_mu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_i32mf2x4_mu(vm, vd, rs1, new_vl, vl); } -vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_i32m1x4_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_mu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_i32m1x4_mu(vm, vd, rs1, new_vl, vl); } -vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_i32m2x4_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_mu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_i32m2x4_mu(vm, vd, rs1, new_vl, vl); } -vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_u32mf2x4_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_mu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_u32mf2x4_mu(vm, vd, rs1, new_vl, vl); } -vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_u32m1x4_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_mu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_u32m1x4_mu(vm, vd, rs1, new_vl, vl); } -vuint32m2x4_t test_vlseg4e32ff_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_v_u32m2x4_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m2x4_t test_vlseg4e32ff_v_u32m2x4_mu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_v_u32m2x4_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg4e32ff\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg4e64.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg4e64.c index 8720d2bee..721d45c41 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg4e64.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg4e64.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x4_t test_vlseg4e64_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg4e64_v_f64m1x4_tu(maskedoff_tuple, base, vl); +vfloat64m1x4_t test_vlseg4e64_v_f64m1x4_tu(vfloat64m1x4_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_v_f64m1x4_tu(vd, rs1, vl); } -vfloat64m2x4_t test_vlseg4e64_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg4e64_v_f64m2x4_tu(maskedoff_tuple, base, vl); +vfloat64m2x4_t test_vlseg4e64_v_f64m2x4_tu(vfloat64m2x4_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_v_f64m2x4_tu(vd, rs1, vl); } -vint64m1x4_t test_vlseg4e64_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg4e64_v_i64m1x4_tu(maskedoff_tuple, base, vl); +vint64m1x4_t test_vlseg4e64_v_i64m1x4_tu(vint64m1x4_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_v_i64m1x4_tu(vd, rs1, vl); } -vint64m2x4_t test_vlseg4e64_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg4e64_v_i64m2x4_tu(maskedoff_tuple, base, vl); +vint64m2x4_t test_vlseg4e64_v_i64m2x4_tu(vint64m2x4_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_v_i64m2x4_tu(vd, rs1, vl); } -vuint64m1x4_t test_vlseg4e64_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg4e64_v_u64m1x4_tu(maskedoff_tuple, base, vl); +vuint64m1x4_t test_vlseg4e64_v_u64m1x4_tu(vuint64m1x4_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_v_u64m1x4_tu(vd, rs1, vl); } -vuint64m2x4_t test_vlseg4e64_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg4e64_v_u64m2x4_tu(maskedoff_tuple, base, vl); +vuint64m2x4_t test_vlseg4e64_v_u64m2x4_tu(vuint64m2x4_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_v_u64m2x4_tu(vd, rs1, vl); } -vfloat64m1x4_t test_vlseg4e64_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg4e64_v_f64m1x4_tum(mask, maskedoff_tuple, base, vl); +vfloat64m1x4_t test_vlseg4e64_v_f64m1x4_tum(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_v_f64m1x4_tum(vm, vd, rs1, vl); } -vfloat64m2x4_t test_vlseg4e64_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg4e64_v_f64m2x4_tum(mask, maskedoff_tuple, base, vl); +vfloat64m2x4_t test_vlseg4e64_v_f64m2x4_tum(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_v_f64m2x4_tum(vm, vd, rs1, vl); } -vint64m1x4_t test_vlseg4e64_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg4e64_v_i64m1x4_tum(mask, maskedoff_tuple, base, vl); +vint64m1x4_t test_vlseg4e64_v_i64m1x4_tum(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_v_i64m1x4_tum(vm, vd, rs1, vl); } -vint64m2x4_t test_vlseg4e64_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg4e64_v_i64m2x4_tum(mask, maskedoff_tuple, base, vl); +vint64m2x4_t test_vlseg4e64_v_i64m2x4_tum(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_v_i64m2x4_tum(vm, vd, rs1, vl); } -vuint64m1x4_t test_vlseg4e64_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg4e64_v_u64m1x4_tum(mask, maskedoff_tuple, base, vl); +vuint64m1x4_t test_vlseg4e64_v_u64m1x4_tum(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_v_u64m1x4_tum(vm, vd, rs1, vl); } -vuint64m2x4_t test_vlseg4e64_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg4e64_v_u64m2x4_tum(mask, maskedoff_tuple, base, vl); +vuint64m2x4_t test_vlseg4e64_v_u64m2x4_tum(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_v_u64m2x4_tum(vm, vd, rs1, vl); } -vfloat64m1x4_t test_vlseg4e64_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg4e64_v_f64m1x4_tumu(mask, maskedoff_tuple, base, vl); +vfloat64m1x4_t test_vlseg4e64_v_f64m1x4_tumu(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_v_f64m1x4_tumu(vm, vd, rs1, vl); } -vfloat64m2x4_t test_vlseg4e64_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg4e64_v_f64m2x4_tumu(mask, maskedoff_tuple, base, vl); +vfloat64m2x4_t test_vlseg4e64_v_f64m2x4_tumu(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_v_f64m2x4_tumu(vm, vd, rs1, vl); } -vint64m1x4_t test_vlseg4e64_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg4e64_v_i64m1x4_tumu(mask, maskedoff_tuple, base, vl); +vint64m1x4_t test_vlseg4e64_v_i64m1x4_tumu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_v_i64m1x4_tumu(vm, vd, rs1, vl); } -vint64m2x4_t test_vlseg4e64_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg4e64_v_i64m2x4_tumu(mask, maskedoff_tuple, base, vl); +vint64m2x4_t test_vlseg4e64_v_i64m2x4_tumu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_v_i64m2x4_tumu(vm, vd, rs1, vl); } -vuint64m1x4_t test_vlseg4e64_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg4e64_v_u64m1x4_tumu(mask, maskedoff_tuple, base, vl); +vuint64m1x4_t test_vlseg4e64_v_u64m1x4_tumu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_v_u64m1x4_tumu(vm, vd, rs1, vl); } -vuint64m2x4_t test_vlseg4e64_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg4e64_v_u64m2x4_tumu(mask, maskedoff_tuple, base, vl); +vuint64m2x4_t test_vlseg4e64_v_u64m2x4_tumu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_v_u64m2x4_tumu(vm, vd, rs1, vl); } -vfloat64m1x4_t test_vlseg4e64_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg4e64_v_f64m1x4_mu(mask, maskedoff_tuple, base, vl); +vfloat64m1x4_t test_vlseg4e64_v_f64m1x4_mu(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_v_f64m1x4_mu(vm, vd, rs1, vl); } -vfloat64m2x4_t test_vlseg4e64_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg4e64_v_f64m2x4_mu(mask, maskedoff_tuple, base, vl); +vfloat64m2x4_t test_vlseg4e64_v_f64m2x4_mu(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_v_f64m2x4_mu(vm, vd, rs1, vl); } -vint64m1x4_t test_vlseg4e64_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg4e64_v_i64m1x4_mu(mask, maskedoff_tuple, base, vl); +vint64m1x4_t test_vlseg4e64_v_i64m1x4_mu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_v_i64m1x4_mu(vm, vd, rs1, vl); } -vint64m2x4_t test_vlseg4e64_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg4e64_v_i64m2x4_mu(mask, maskedoff_tuple, base, vl); +vint64m2x4_t test_vlseg4e64_v_i64m2x4_mu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_v_i64m2x4_mu(vm, vd, rs1, vl); } -vuint64m1x4_t test_vlseg4e64_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg4e64_v_u64m1x4_mu(mask, maskedoff_tuple, base, vl); +vuint64m1x4_t test_vlseg4e64_v_u64m1x4_mu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_v_u64m1x4_mu(vm, vd, rs1, vl); } -vuint64m2x4_t test_vlseg4e64_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg4e64_v_u64m2x4_mu(mask, maskedoff_tuple, base, vl); +vuint64m2x4_t test_vlseg4e64_v_u64m2x4_mu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_v_u64m2x4_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg4e64\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg4e64ff.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg4e64ff.c index 2c69834ae..91a917c49 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg4e64ff.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg4e64ff.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_v_f64m1x4_tu(maskedoff_tuple, base, new_vl, vl); +vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_tu(vfloat64m1x4_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_v_f64m1x4_tu(vd, rs1, new_vl, vl); } -vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_v_f64m2x4_tu(maskedoff_tuple, base, new_vl, vl); +vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_tu(vfloat64m2x4_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_v_f64m2x4_tu(vd, rs1, new_vl, vl); } -vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_v_i64m1x4_tu(maskedoff_tuple, base, new_vl, vl); +vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_tu(vint64m1x4_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_v_i64m1x4_tu(vd, rs1, new_vl, vl); } -vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_v_i64m2x4_tu(maskedoff_tuple, base, new_vl, vl); +vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_tu(vint64m2x4_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_v_i64m2x4_tu(vd, rs1, new_vl, vl); } -vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_v_u64m1x4_tu(maskedoff_tuple, base, new_vl, vl); +vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_tu(vuint64m1x4_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_v_u64m1x4_tu(vd, rs1, new_vl, vl); } -vuint64m2x4_t test_vlseg4e64ff_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_v_u64m2x4_tu(maskedoff_tuple, base, new_vl, vl); +vuint64m2x4_t test_vlseg4e64ff_v_u64m2x4_tu(vuint64m2x4_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_v_u64m2x4_tu(vd, rs1, new_vl, vl); } -vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_v_f64m1x4_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_tum(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_v_f64m1x4_tum(vm, vd, rs1, new_vl, vl); } -vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_v_f64m2x4_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_tum(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_v_f64m2x4_tum(vm, vd, rs1, new_vl, vl); } -vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_v_i64m1x4_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_tum(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_v_i64m1x4_tum(vm, vd, rs1, new_vl, vl); } -vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_v_i64m2x4_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_tum(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_v_i64m2x4_tum(vm, vd, rs1, new_vl, vl); } -vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_v_u64m1x4_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_tum(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_v_u64m1x4_tum(vm, vd, rs1, new_vl, vl); } -vuint64m2x4_t test_vlseg4e64ff_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_v_u64m2x4_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m2x4_t test_vlseg4e64ff_v_u64m2x4_tum(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_v_u64m2x4_tum(vm, vd, rs1, new_vl, vl); } -vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_v_f64m1x4_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_tumu(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_v_f64m1x4_tumu(vm, vd, rs1, new_vl, vl); } -vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_v_f64m2x4_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_tumu(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_v_f64m2x4_tumu(vm, vd, rs1, new_vl, vl); } -vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_v_i64m1x4_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_tumu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_v_i64m1x4_tumu(vm, vd, rs1, new_vl, vl); } -vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_v_i64m2x4_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_tumu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_v_i64m2x4_tumu(vm, vd, rs1, new_vl, vl); } -vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_v_u64m1x4_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_tumu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_v_u64m1x4_tumu(vm, vd, rs1, new_vl, vl); } -vuint64m2x4_t test_vlseg4e64ff_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_v_u64m2x4_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m2x4_t test_vlseg4e64ff_v_u64m2x4_tumu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_v_u64m2x4_tumu(vm, vd, rs1, new_vl, vl); } -vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_v_f64m1x4_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_mu(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_v_f64m1x4_mu(vm, vd, rs1, new_vl, vl); } -vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_v_f64m2x4_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_mu(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_v_f64m2x4_mu(vm, vd, rs1, new_vl, vl); } -vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_v_i64m1x4_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_mu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_v_i64m1x4_mu(vm, vd, rs1, new_vl, vl); } -vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_v_i64m2x4_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_mu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_v_i64m2x4_mu(vm, vd, rs1, new_vl, vl); } -vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_v_u64m1x4_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_mu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_v_u64m1x4_mu(vm, vd, rs1, new_vl, vl); } -vuint64m2x4_t test_vlseg4e64ff_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_v_u64m2x4_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m2x4_t test_vlseg4e64ff_v_u64m2x4_mu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_v_u64m2x4_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg4e64ff\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg4e8.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg4e8.c index e929f1099..21d9df99f 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg4e8.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg4e8.c @@ -6,164 +6,164 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x4_t test_vlseg4e8_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_i8mf8x4_tu(maskedoff_tuple, base, vl); +vint8mf8x4_t test_vlseg4e8_v_i8mf8x4_tu(vint8mf8x4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_i8mf8x4_tu(vd, rs1, vl); } -vint8mf4x4_t test_vlseg4e8_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_i8mf4x4_tu(maskedoff_tuple, base, vl); +vint8mf4x4_t test_vlseg4e8_v_i8mf4x4_tu(vint8mf4x4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_i8mf4x4_tu(vd, rs1, vl); } -vint8mf2x4_t test_vlseg4e8_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_i8mf2x4_tu(maskedoff_tuple, base, vl); +vint8mf2x4_t test_vlseg4e8_v_i8mf2x4_tu(vint8mf2x4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_i8mf2x4_tu(vd, rs1, vl); } -vint8m1x4_t test_vlseg4e8_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_i8m1x4_tu(maskedoff_tuple, base, vl); +vint8m1x4_t test_vlseg4e8_v_i8m1x4_tu(vint8m1x4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_i8m1x4_tu(vd, rs1, vl); } -vint8m2x4_t test_vlseg4e8_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_i8m2x4_tu(maskedoff_tuple, base, vl); +vint8m2x4_t test_vlseg4e8_v_i8m2x4_tu(vint8m2x4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_i8m2x4_tu(vd, rs1, vl); } -vuint8mf8x4_t test_vlseg4e8_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_u8mf8x4_tu(maskedoff_tuple, base, vl); +vuint8mf8x4_t test_vlseg4e8_v_u8mf8x4_tu(vuint8mf8x4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_u8mf8x4_tu(vd, rs1, vl); } -vuint8mf4x4_t test_vlseg4e8_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_u8mf4x4_tu(maskedoff_tuple, base, vl); +vuint8mf4x4_t test_vlseg4e8_v_u8mf4x4_tu(vuint8mf4x4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_u8mf4x4_tu(vd, rs1, vl); } -vuint8mf2x4_t test_vlseg4e8_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_u8mf2x4_tu(maskedoff_tuple, base, vl); +vuint8mf2x4_t test_vlseg4e8_v_u8mf2x4_tu(vuint8mf2x4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_u8mf2x4_tu(vd, rs1, vl); } -vuint8m1x4_t test_vlseg4e8_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_u8m1x4_tu(maskedoff_tuple, base, vl); +vuint8m1x4_t test_vlseg4e8_v_u8m1x4_tu(vuint8m1x4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_u8m1x4_tu(vd, rs1, vl); } -vuint8m2x4_t test_vlseg4e8_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_u8m2x4_tu(maskedoff_tuple, base, vl); +vuint8m2x4_t test_vlseg4e8_v_u8m2x4_tu(vuint8m2x4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_u8m2x4_tu(vd, rs1, vl); } -vint8mf8x4_t test_vlseg4e8_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_i8mf8x4_tum(mask, maskedoff_tuple, base, vl); +vint8mf8x4_t test_vlseg4e8_v_i8mf8x4_tum(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_i8mf8x4_tum(vm, vd, rs1, vl); } -vint8mf4x4_t test_vlseg4e8_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_i8mf4x4_tum(mask, maskedoff_tuple, base, vl); +vint8mf4x4_t test_vlseg4e8_v_i8mf4x4_tum(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_i8mf4x4_tum(vm, vd, rs1, vl); } -vint8mf2x4_t test_vlseg4e8_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_i8mf2x4_tum(mask, maskedoff_tuple, base, vl); +vint8mf2x4_t test_vlseg4e8_v_i8mf2x4_tum(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_i8mf2x4_tum(vm, vd, rs1, vl); } -vint8m1x4_t test_vlseg4e8_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_i8m1x4_tum(mask, maskedoff_tuple, base, vl); +vint8m1x4_t test_vlseg4e8_v_i8m1x4_tum(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_i8m1x4_tum(vm, vd, rs1, vl); } -vint8m2x4_t test_vlseg4e8_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_i8m2x4_tum(mask, maskedoff_tuple, base, vl); +vint8m2x4_t test_vlseg4e8_v_i8m2x4_tum(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_i8m2x4_tum(vm, vd, rs1, vl); } -vuint8mf8x4_t test_vlseg4e8_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_u8mf8x4_tum(mask, maskedoff_tuple, base, vl); +vuint8mf8x4_t test_vlseg4e8_v_u8mf8x4_tum(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_u8mf8x4_tum(vm, vd, rs1, vl); } -vuint8mf4x4_t test_vlseg4e8_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_u8mf4x4_tum(mask, maskedoff_tuple, base, vl); +vuint8mf4x4_t test_vlseg4e8_v_u8mf4x4_tum(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_u8mf4x4_tum(vm, vd, rs1, vl); } -vuint8mf2x4_t test_vlseg4e8_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_u8mf2x4_tum(mask, maskedoff_tuple, base, vl); +vuint8mf2x4_t test_vlseg4e8_v_u8mf2x4_tum(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_u8mf2x4_tum(vm, vd, rs1, vl); } -vuint8m1x4_t test_vlseg4e8_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_u8m1x4_tum(mask, maskedoff_tuple, base, vl); +vuint8m1x4_t test_vlseg4e8_v_u8m1x4_tum(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_u8m1x4_tum(vm, vd, rs1, vl); } -vuint8m2x4_t test_vlseg4e8_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_u8m2x4_tum(mask, maskedoff_tuple, base, vl); +vuint8m2x4_t test_vlseg4e8_v_u8m2x4_tum(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_u8m2x4_tum(vm, vd, rs1, vl); } -vint8mf8x4_t test_vlseg4e8_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_i8mf8x4_tumu(mask, maskedoff_tuple, base, vl); +vint8mf8x4_t test_vlseg4e8_v_i8mf8x4_tumu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_i8mf8x4_tumu(vm, vd, rs1, vl); } -vint8mf4x4_t test_vlseg4e8_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_i8mf4x4_tumu(mask, maskedoff_tuple, base, vl); +vint8mf4x4_t test_vlseg4e8_v_i8mf4x4_tumu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_i8mf4x4_tumu(vm, vd, rs1, vl); } -vint8mf2x4_t test_vlseg4e8_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_i8mf2x4_tumu(mask, maskedoff_tuple, base, vl); +vint8mf2x4_t test_vlseg4e8_v_i8mf2x4_tumu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_i8mf2x4_tumu(vm, vd, rs1, vl); } -vint8m1x4_t test_vlseg4e8_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_i8m1x4_tumu(mask, maskedoff_tuple, base, vl); +vint8m1x4_t test_vlseg4e8_v_i8m1x4_tumu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_i8m1x4_tumu(vm, vd, rs1, vl); } -vint8m2x4_t test_vlseg4e8_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_i8m2x4_tumu(mask, maskedoff_tuple, base, vl); +vint8m2x4_t test_vlseg4e8_v_i8m2x4_tumu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_i8m2x4_tumu(vm, vd, rs1, vl); } -vuint8mf8x4_t test_vlseg4e8_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_u8mf8x4_tumu(mask, maskedoff_tuple, base, vl); +vuint8mf8x4_t test_vlseg4e8_v_u8mf8x4_tumu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_u8mf8x4_tumu(vm, vd, rs1, vl); } -vuint8mf4x4_t test_vlseg4e8_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_u8mf4x4_tumu(mask, maskedoff_tuple, base, vl); +vuint8mf4x4_t test_vlseg4e8_v_u8mf4x4_tumu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_u8mf4x4_tumu(vm, vd, rs1, vl); } -vuint8mf2x4_t test_vlseg4e8_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_u8mf2x4_tumu(mask, maskedoff_tuple, base, vl); +vuint8mf2x4_t test_vlseg4e8_v_u8mf2x4_tumu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_u8mf2x4_tumu(vm, vd, rs1, vl); } -vuint8m1x4_t test_vlseg4e8_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_u8m1x4_tumu(mask, maskedoff_tuple, base, vl); +vuint8m1x4_t test_vlseg4e8_v_u8m1x4_tumu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_u8m1x4_tumu(vm, vd, rs1, vl); } -vuint8m2x4_t test_vlseg4e8_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_u8m2x4_tumu(mask, maskedoff_tuple, base, vl); +vuint8m2x4_t test_vlseg4e8_v_u8m2x4_tumu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_u8m2x4_tumu(vm, vd, rs1, vl); } -vint8mf8x4_t test_vlseg4e8_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_i8mf8x4_mu(mask, maskedoff_tuple, base, vl); +vint8mf8x4_t test_vlseg4e8_v_i8mf8x4_mu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_i8mf8x4_mu(vm, vd, rs1, vl); } -vint8mf4x4_t test_vlseg4e8_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_i8mf4x4_mu(mask, maskedoff_tuple, base, vl); +vint8mf4x4_t test_vlseg4e8_v_i8mf4x4_mu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_i8mf4x4_mu(vm, vd, rs1, vl); } -vint8mf2x4_t test_vlseg4e8_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_i8mf2x4_mu(mask, maskedoff_tuple, base, vl); +vint8mf2x4_t test_vlseg4e8_v_i8mf2x4_mu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_i8mf2x4_mu(vm, vd, rs1, vl); } -vint8m1x4_t test_vlseg4e8_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_i8m1x4_mu(mask, maskedoff_tuple, base, vl); +vint8m1x4_t test_vlseg4e8_v_i8m1x4_mu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_i8m1x4_mu(vm, vd, rs1, vl); } -vint8m2x4_t test_vlseg4e8_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_i8m2x4_mu(mask, maskedoff_tuple, base, vl); +vint8m2x4_t test_vlseg4e8_v_i8m2x4_mu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_i8m2x4_mu(vm, vd, rs1, vl); } -vuint8mf8x4_t test_vlseg4e8_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_u8mf8x4_mu(mask, maskedoff_tuple, base, vl); +vuint8mf8x4_t test_vlseg4e8_v_u8mf8x4_mu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_u8mf8x4_mu(vm, vd, rs1, vl); } -vuint8mf4x4_t test_vlseg4e8_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_u8mf4x4_mu(mask, maskedoff_tuple, base, vl); +vuint8mf4x4_t test_vlseg4e8_v_u8mf4x4_mu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_u8mf4x4_mu(vm, vd, rs1, vl); } -vuint8mf2x4_t test_vlseg4e8_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_u8mf2x4_mu(mask, maskedoff_tuple, base, vl); +vuint8mf2x4_t test_vlseg4e8_v_u8mf2x4_mu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_u8mf2x4_mu(vm, vd, rs1, vl); } -vuint8m1x4_t test_vlseg4e8_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_u8m1x4_mu(mask, maskedoff_tuple, base, vl); +vuint8m1x4_t test_vlseg4e8_v_u8m1x4_mu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_u8m1x4_mu(vm, vd, rs1, vl); } -vuint8m2x4_t test_vlseg4e8_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_v_u8m2x4_mu(mask, maskedoff_tuple, base, vl); +vuint8m2x4_t test_vlseg4e8_v_u8m2x4_mu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_v_u8m2x4_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg4e8\.[ivxfswum.]+\s+} 40 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg4e8ff.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg4e8ff.c index 8ee8f1d7d..28afadad1 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg4e8ff.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg4e8ff.c @@ -6,164 +6,164 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_i8mf8x4_tu(maskedoff_tuple, base, new_vl, vl); +vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_tu(vint8mf8x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_i8mf8x4_tu(vd, rs1, new_vl, vl); } -vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_i8mf4x4_tu(maskedoff_tuple, base, new_vl, vl); +vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_tu(vint8mf4x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_i8mf4x4_tu(vd, rs1, new_vl, vl); } -vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_i8mf2x4_tu(maskedoff_tuple, base, new_vl, vl); +vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_tu(vint8mf2x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_i8mf2x4_tu(vd, rs1, new_vl, vl); } -vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_i8m1x4_tu(maskedoff_tuple, base, new_vl, vl); +vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_tu(vint8m1x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_i8m1x4_tu(vd, rs1, new_vl, vl); } -vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_i8m2x4_tu(maskedoff_tuple, base, new_vl, vl); +vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_tu(vint8m2x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_i8m2x4_tu(vd, rs1, new_vl, vl); } -vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_u8mf8x4_tu(maskedoff_tuple, base, new_vl, vl); +vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_tu(vuint8mf8x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_u8mf8x4_tu(vd, rs1, new_vl, vl); } -vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_u8mf4x4_tu(maskedoff_tuple, base, new_vl, vl); +vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_tu(vuint8mf4x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_u8mf4x4_tu(vd, rs1, new_vl, vl); } -vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_u8mf2x4_tu(maskedoff_tuple, base, new_vl, vl); +vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_tu(vuint8mf2x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_u8mf2x4_tu(vd, rs1, new_vl, vl); } -vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_u8m1x4_tu(maskedoff_tuple, base, new_vl, vl); +vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_tu(vuint8m1x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_u8m1x4_tu(vd, rs1, new_vl, vl); } -vuint8m2x4_t test_vlseg4e8ff_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_u8m2x4_tu(maskedoff_tuple, base, new_vl, vl); +vuint8m2x4_t test_vlseg4e8ff_v_u8m2x4_tu(vuint8m2x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_u8m2x4_tu(vd, rs1, new_vl, vl); } -vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_i8mf8x4_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_tum(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_i8mf8x4_tum(vm, vd, rs1, new_vl, vl); } -vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_i8mf4x4_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_tum(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_i8mf4x4_tum(vm, vd, rs1, new_vl, vl); } -vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_i8mf2x4_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_tum(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_i8mf2x4_tum(vm, vd, rs1, new_vl, vl); } -vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_i8m1x4_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_tum(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_i8m1x4_tum(vm, vd, rs1, new_vl, vl); } -vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_i8m2x4_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_tum(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_i8m2x4_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_u8mf8x4_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_tum(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_u8mf8x4_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_u8mf4x4_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_tum(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_u8mf4x4_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_u8mf2x4_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_tum(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_u8mf2x4_tum(vm, vd, rs1, new_vl, vl); } -vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_u8m1x4_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_tum(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_u8m1x4_tum(vm, vd, rs1, new_vl, vl); } -vuint8m2x4_t test_vlseg4e8ff_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_u8m2x4_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m2x4_t test_vlseg4e8ff_v_u8m2x4_tum(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_u8m2x4_tum(vm, vd, rs1, new_vl, vl); } -vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_i8mf8x4_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_tumu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_i8mf8x4_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_i8mf4x4_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_tumu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_i8mf4x4_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_i8mf2x4_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_tumu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_i8mf2x4_tumu(vm, vd, rs1, new_vl, vl); } -vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_i8m1x4_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_tumu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_i8m1x4_tumu(vm, vd, rs1, new_vl, vl); } -vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_i8m2x4_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_tumu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_i8m2x4_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_u8mf8x4_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_tumu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_u8mf8x4_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_u8mf4x4_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_tumu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_u8mf4x4_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_u8mf2x4_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_tumu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_u8mf2x4_tumu(vm, vd, rs1, new_vl, vl); } -vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_u8m1x4_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_tumu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_u8m1x4_tumu(vm, vd, rs1, new_vl, vl); } -vuint8m2x4_t test_vlseg4e8ff_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_u8m2x4_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m2x4_t test_vlseg4e8ff_v_u8m2x4_tumu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_u8m2x4_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_i8mf8x4_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_mu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_i8mf8x4_mu(vm, vd, rs1, new_vl, vl); } -vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_i8mf4x4_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_mu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_i8mf4x4_mu(vm, vd, rs1, new_vl, vl); } -vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_i8mf2x4_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_mu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_i8mf2x4_mu(vm, vd, rs1, new_vl, vl); } -vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_i8m1x4_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_mu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_i8m1x4_mu(vm, vd, rs1, new_vl, vl); } -vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_i8m2x4_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_mu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_i8m2x4_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_u8mf8x4_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_mu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_u8mf8x4_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_u8mf4x4_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_mu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_u8mf4x4_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_u8mf2x4_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_mu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_u8mf2x4_mu(vm, vd, rs1, new_vl, vl); } -vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_u8m1x4_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_mu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_u8m1x4_mu(vm, vd, rs1, new_vl, vl); } -vuint8m2x4_t test_vlseg4e8ff_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_v_u8m2x4_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m2x4_t test_vlseg4e8ff_v_u8m2x4_mu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_v_u8m2x4_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg4e8ff\.[ivxfswum.]+\s+} 40 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg5e16.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg5e16.c index 49a4a96b4..79e8d0b68 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg5e16.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg5e16.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x5_t test_vlseg5e16_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_f16mf4x5_tu(maskedoff_tuple, base, vl); +vfloat16mf4x5_t test_vlseg5e16_v_f16mf4x5_tu(vfloat16mf4x5_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_f16mf4x5_tu(vd, rs1, vl); } -vfloat16mf2x5_t test_vlseg5e16_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_f16mf2x5_tu(maskedoff_tuple, base, vl); +vfloat16mf2x5_t test_vlseg5e16_v_f16mf2x5_tu(vfloat16mf2x5_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_f16mf2x5_tu(vd, rs1, vl); } -vfloat16m1x5_t test_vlseg5e16_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_f16m1x5_tu(maskedoff_tuple, base, vl); +vfloat16m1x5_t test_vlseg5e16_v_f16m1x5_tu(vfloat16m1x5_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_f16m1x5_tu(vd, rs1, vl); } -vint16mf4x5_t test_vlseg5e16_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_i16mf4x5_tu(maskedoff_tuple, base, vl); +vint16mf4x5_t test_vlseg5e16_v_i16mf4x5_tu(vint16mf4x5_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_i16mf4x5_tu(vd, rs1, vl); } -vint16mf2x5_t test_vlseg5e16_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_i16mf2x5_tu(maskedoff_tuple, base, vl); +vint16mf2x5_t test_vlseg5e16_v_i16mf2x5_tu(vint16mf2x5_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_i16mf2x5_tu(vd, rs1, vl); } -vint16m1x5_t test_vlseg5e16_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_i16m1x5_tu(maskedoff_tuple, base, vl); +vint16m1x5_t test_vlseg5e16_v_i16m1x5_tu(vint16m1x5_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_i16m1x5_tu(vd, rs1, vl); } -vuint16mf4x5_t test_vlseg5e16_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_u16mf4x5_tu(maskedoff_tuple, base, vl); +vuint16mf4x5_t test_vlseg5e16_v_u16mf4x5_tu(vuint16mf4x5_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_u16mf4x5_tu(vd, rs1, vl); } -vuint16mf2x5_t test_vlseg5e16_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_u16mf2x5_tu(maskedoff_tuple, base, vl); +vuint16mf2x5_t test_vlseg5e16_v_u16mf2x5_tu(vuint16mf2x5_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_u16mf2x5_tu(vd, rs1, vl); } -vuint16m1x5_t test_vlseg5e16_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_u16m1x5_tu(maskedoff_tuple, base, vl); +vuint16m1x5_t test_vlseg5e16_v_u16m1x5_tu(vuint16m1x5_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_u16m1x5_tu(vd, rs1, vl); } -vfloat16mf4x5_t test_vlseg5e16_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_f16mf4x5_tum(mask, maskedoff_tuple, base, vl); +vfloat16mf4x5_t test_vlseg5e16_v_f16mf4x5_tum(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_f16mf4x5_tum(vm, vd, rs1, vl); } -vfloat16mf2x5_t test_vlseg5e16_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_f16mf2x5_tum(mask, maskedoff_tuple, base, vl); +vfloat16mf2x5_t test_vlseg5e16_v_f16mf2x5_tum(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_f16mf2x5_tum(vm, vd, rs1, vl); } -vfloat16m1x5_t test_vlseg5e16_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_f16m1x5_tum(mask, maskedoff_tuple, base, vl); +vfloat16m1x5_t test_vlseg5e16_v_f16m1x5_tum(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_f16m1x5_tum(vm, vd, rs1, vl); } -vint16mf4x5_t test_vlseg5e16_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_i16mf4x5_tum(mask, maskedoff_tuple, base, vl); +vint16mf4x5_t test_vlseg5e16_v_i16mf4x5_tum(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_i16mf4x5_tum(vm, vd, rs1, vl); } -vint16mf2x5_t test_vlseg5e16_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_i16mf2x5_tum(mask, maskedoff_tuple, base, vl); +vint16mf2x5_t test_vlseg5e16_v_i16mf2x5_tum(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_i16mf2x5_tum(vm, vd, rs1, vl); } -vint16m1x5_t test_vlseg5e16_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_i16m1x5_tum(mask, maskedoff_tuple, base, vl); +vint16m1x5_t test_vlseg5e16_v_i16m1x5_tum(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_i16m1x5_tum(vm, vd, rs1, vl); } -vuint16mf4x5_t test_vlseg5e16_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_u16mf4x5_tum(mask, maskedoff_tuple, base, vl); +vuint16mf4x5_t test_vlseg5e16_v_u16mf4x5_tum(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_u16mf4x5_tum(vm, vd, rs1, vl); } -vuint16mf2x5_t test_vlseg5e16_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_u16mf2x5_tum(mask, maskedoff_tuple, base, vl); +vuint16mf2x5_t test_vlseg5e16_v_u16mf2x5_tum(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_u16mf2x5_tum(vm, vd, rs1, vl); } -vuint16m1x5_t test_vlseg5e16_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_u16m1x5_tum(mask, maskedoff_tuple, base, vl); +vuint16m1x5_t test_vlseg5e16_v_u16m1x5_tum(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_u16m1x5_tum(vm, vd, rs1, vl); } -vfloat16mf4x5_t test_vlseg5e16_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_f16mf4x5_tumu(mask, maskedoff_tuple, base, vl); +vfloat16mf4x5_t test_vlseg5e16_v_f16mf4x5_tumu(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_f16mf4x5_tumu(vm, vd, rs1, vl); } -vfloat16mf2x5_t test_vlseg5e16_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_f16mf2x5_tumu(mask, maskedoff_tuple, base, vl); +vfloat16mf2x5_t test_vlseg5e16_v_f16mf2x5_tumu(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_f16mf2x5_tumu(vm, vd, rs1, vl); } -vfloat16m1x5_t test_vlseg5e16_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_f16m1x5_tumu(mask, maskedoff_tuple, base, vl); +vfloat16m1x5_t test_vlseg5e16_v_f16m1x5_tumu(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_f16m1x5_tumu(vm, vd, rs1, vl); } -vint16mf4x5_t test_vlseg5e16_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_i16mf4x5_tumu(mask, maskedoff_tuple, base, vl); +vint16mf4x5_t test_vlseg5e16_v_i16mf4x5_tumu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_i16mf4x5_tumu(vm, vd, rs1, vl); } -vint16mf2x5_t test_vlseg5e16_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_i16mf2x5_tumu(mask, maskedoff_tuple, base, vl); +vint16mf2x5_t test_vlseg5e16_v_i16mf2x5_tumu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_i16mf2x5_tumu(vm, vd, rs1, vl); } -vint16m1x5_t test_vlseg5e16_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_i16m1x5_tumu(mask, maskedoff_tuple, base, vl); +vint16m1x5_t test_vlseg5e16_v_i16m1x5_tumu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_i16m1x5_tumu(vm, vd, rs1, vl); } -vuint16mf4x5_t test_vlseg5e16_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_u16mf4x5_tumu(mask, maskedoff_tuple, base, vl); +vuint16mf4x5_t test_vlseg5e16_v_u16mf4x5_tumu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_u16mf4x5_tumu(vm, vd, rs1, vl); } -vuint16mf2x5_t test_vlseg5e16_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_u16mf2x5_tumu(mask, maskedoff_tuple, base, vl); +vuint16mf2x5_t test_vlseg5e16_v_u16mf2x5_tumu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_u16mf2x5_tumu(vm, vd, rs1, vl); } -vuint16m1x5_t test_vlseg5e16_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_u16m1x5_tumu(mask, maskedoff_tuple, base, vl); +vuint16m1x5_t test_vlseg5e16_v_u16m1x5_tumu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_u16m1x5_tumu(vm, vd, rs1, vl); } -vfloat16mf4x5_t test_vlseg5e16_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_f16mf4x5_mu(mask, maskedoff_tuple, base, vl); +vfloat16mf4x5_t test_vlseg5e16_v_f16mf4x5_mu(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_f16mf4x5_mu(vm, vd, rs1, vl); } -vfloat16mf2x5_t test_vlseg5e16_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_f16mf2x5_mu(mask, maskedoff_tuple, base, vl); +vfloat16mf2x5_t test_vlseg5e16_v_f16mf2x5_mu(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_f16mf2x5_mu(vm, vd, rs1, vl); } -vfloat16m1x5_t test_vlseg5e16_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_f16m1x5_mu(mask, maskedoff_tuple, base, vl); +vfloat16m1x5_t test_vlseg5e16_v_f16m1x5_mu(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_f16m1x5_mu(vm, vd, rs1, vl); } -vint16mf4x5_t test_vlseg5e16_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_i16mf4x5_mu(mask, maskedoff_tuple, base, vl); +vint16mf4x5_t test_vlseg5e16_v_i16mf4x5_mu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_i16mf4x5_mu(vm, vd, rs1, vl); } -vint16mf2x5_t test_vlseg5e16_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_i16mf2x5_mu(mask, maskedoff_tuple, base, vl); +vint16mf2x5_t test_vlseg5e16_v_i16mf2x5_mu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_i16mf2x5_mu(vm, vd, rs1, vl); } -vint16m1x5_t test_vlseg5e16_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_i16m1x5_mu(mask, maskedoff_tuple, base, vl); +vint16m1x5_t test_vlseg5e16_v_i16m1x5_mu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_i16m1x5_mu(vm, vd, rs1, vl); } -vuint16mf4x5_t test_vlseg5e16_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_u16mf4x5_mu(mask, maskedoff_tuple, base, vl); +vuint16mf4x5_t test_vlseg5e16_v_u16mf4x5_mu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_u16mf4x5_mu(vm, vd, rs1, vl); } -vuint16mf2x5_t test_vlseg5e16_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_u16mf2x5_mu(mask, maskedoff_tuple, base, vl); +vuint16mf2x5_t test_vlseg5e16_v_u16mf2x5_mu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_u16mf2x5_mu(vm, vd, rs1, vl); } -vuint16m1x5_t test_vlseg5e16_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg5e16_v_u16m1x5_mu(mask, maskedoff_tuple, base, vl); +vuint16m1x5_t test_vlseg5e16_v_u16m1x5_mu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_v_u16m1x5_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg5e16\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg5e16ff.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg5e16ff.c index dd620f693..fa25eafb0 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg5e16ff.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg5e16ff.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_f16mf4x5_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_tu(vfloat16mf4x5_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_f16mf4x5_tu(vd, rs1, new_vl, vl); } -vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_f16mf2x5_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_tu(vfloat16mf2x5_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_f16mf2x5_tu(vd, rs1, new_vl, vl); } -vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_f16m1x5_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_tu(vfloat16m1x5_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_f16m1x5_tu(vd, rs1, new_vl, vl); } -vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_i16mf4x5_tu(maskedoff_tuple, base, new_vl, vl); +vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_tu(vint16mf4x5_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_i16mf4x5_tu(vd, rs1, new_vl, vl); } -vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_i16mf2x5_tu(maskedoff_tuple, base, new_vl, vl); +vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_tu(vint16mf2x5_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_i16mf2x5_tu(vd, rs1, new_vl, vl); } -vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_i16m1x5_tu(maskedoff_tuple, base, new_vl, vl); +vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_tu(vint16m1x5_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_i16m1x5_tu(vd, rs1, new_vl, vl); } -vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_u16mf4x5_tu(maskedoff_tuple, base, new_vl, vl); +vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_tu(vuint16mf4x5_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_u16mf4x5_tu(vd, rs1, new_vl, vl); } -vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_u16mf2x5_tu(maskedoff_tuple, base, new_vl, vl); +vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_tu(vuint16mf2x5_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_u16mf2x5_tu(vd, rs1, new_vl, vl); } -vuint16m1x5_t test_vlseg5e16ff_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_u16m1x5_tu(maskedoff_tuple, base, new_vl, vl); +vuint16m1x5_t test_vlseg5e16ff_v_u16m1x5_tu(vuint16m1x5_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_u16m1x5_tu(vd, rs1, new_vl, vl); } -vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_f16mf4x5_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_tum(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_f16mf4x5_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_f16mf2x5_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_tum(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_f16mf2x5_tum(vm, vd, rs1, new_vl, vl); } -vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_f16m1x5_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_tum(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_f16m1x5_tum(vm, vd, rs1, new_vl, vl); } -vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_i16mf4x5_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_tum(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_i16mf4x5_tum(vm, vd, rs1, new_vl, vl); } -vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_i16mf2x5_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_tum(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_i16mf2x5_tum(vm, vd, rs1, new_vl, vl); } -vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_i16m1x5_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_tum(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_i16m1x5_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_u16mf4x5_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_tum(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_u16mf4x5_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_u16mf2x5_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_tum(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_u16mf2x5_tum(vm, vd, rs1, new_vl, vl); } -vuint16m1x5_t test_vlseg5e16ff_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_u16m1x5_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m1x5_t test_vlseg5e16ff_v_u16m1x5_tum(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_u16m1x5_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_f16mf4x5_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_tumu(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_f16mf4x5_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_f16mf2x5_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_tumu(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_f16mf2x5_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_f16m1x5_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_tumu(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_f16m1x5_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_i16mf4x5_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_tumu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_i16mf4x5_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_i16mf2x5_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_tumu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_i16mf2x5_tumu(vm, vd, rs1, new_vl, vl); } -vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_i16m1x5_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_tumu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_i16m1x5_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_u16mf4x5_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_tumu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_u16mf4x5_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_u16mf2x5_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_tumu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_u16mf2x5_tumu(vm, vd, rs1, new_vl, vl); } -vuint16m1x5_t test_vlseg5e16ff_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_u16m1x5_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m1x5_t test_vlseg5e16ff_v_u16m1x5_tumu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_u16m1x5_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_f16mf4x5_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_mu(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_f16mf4x5_mu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_f16mf2x5_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_mu(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_f16mf2x5_mu(vm, vd, rs1, new_vl, vl); } -vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_f16m1x5_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_mu(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_f16m1x5_mu(vm, vd, rs1, new_vl, vl); } -vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_i16mf4x5_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_mu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_i16mf4x5_mu(vm, vd, rs1, new_vl, vl); } -vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_i16mf2x5_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_mu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_i16mf2x5_mu(vm, vd, rs1, new_vl, vl); } -vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_i16m1x5_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_mu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_i16m1x5_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_u16mf4x5_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_mu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_u16mf4x5_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_u16mf2x5_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_mu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_u16mf2x5_mu(vm, vd, rs1, new_vl, vl); } -vuint16m1x5_t test_vlseg5e16ff_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_v_u16m1x5_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m1x5_t test_vlseg5e16ff_v_u16m1x5_mu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_u16m1x5_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg5e16ff\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg5e32.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg5e32.c index 4529e6514..9cc732ccb 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg5e32.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg5e32.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x5_t test_vlseg5e32_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg5e32_v_f32mf2x5_tu(maskedoff_tuple, base, vl); +vfloat32mf2x5_t test_vlseg5e32_v_f32mf2x5_tu(vfloat32mf2x5_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_v_f32mf2x5_tu(vd, rs1, vl); } -vfloat32m1x5_t test_vlseg5e32_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg5e32_v_f32m1x5_tu(maskedoff_tuple, base, vl); +vfloat32m1x5_t test_vlseg5e32_v_f32m1x5_tu(vfloat32m1x5_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_v_f32m1x5_tu(vd, rs1, vl); } -vint32mf2x5_t test_vlseg5e32_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg5e32_v_i32mf2x5_tu(maskedoff_tuple, base, vl); +vint32mf2x5_t test_vlseg5e32_v_i32mf2x5_tu(vint32mf2x5_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_v_i32mf2x5_tu(vd, rs1, vl); } -vint32m1x5_t test_vlseg5e32_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg5e32_v_i32m1x5_tu(maskedoff_tuple, base, vl); +vint32m1x5_t test_vlseg5e32_v_i32m1x5_tu(vint32m1x5_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_v_i32m1x5_tu(vd, rs1, vl); } -vuint32mf2x5_t test_vlseg5e32_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg5e32_v_u32mf2x5_tu(maskedoff_tuple, base, vl); +vuint32mf2x5_t test_vlseg5e32_v_u32mf2x5_tu(vuint32mf2x5_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_v_u32mf2x5_tu(vd, rs1, vl); } -vuint32m1x5_t test_vlseg5e32_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg5e32_v_u32m1x5_tu(maskedoff_tuple, base, vl); +vuint32m1x5_t test_vlseg5e32_v_u32m1x5_tu(vuint32m1x5_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_v_u32m1x5_tu(vd, rs1, vl); } -vfloat32mf2x5_t test_vlseg5e32_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg5e32_v_f32mf2x5_tum(mask, maskedoff_tuple, base, vl); +vfloat32mf2x5_t test_vlseg5e32_v_f32mf2x5_tum(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_v_f32mf2x5_tum(vm, vd, rs1, vl); } -vfloat32m1x5_t test_vlseg5e32_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg5e32_v_f32m1x5_tum(mask, maskedoff_tuple, base, vl); +vfloat32m1x5_t test_vlseg5e32_v_f32m1x5_tum(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_v_f32m1x5_tum(vm, vd, rs1, vl); } -vint32mf2x5_t test_vlseg5e32_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg5e32_v_i32mf2x5_tum(mask, maskedoff_tuple, base, vl); +vint32mf2x5_t test_vlseg5e32_v_i32mf2x5_tum(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_v_i32mf2x5_tum(vm, vd, rs1, vl); } -vint32m1x5_t test_vlseg5e32_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg5e32_v_i32m1x5_tum(mask, maskedoff_tuple, base, vl); +vint32m1x5_t test_vlseg5e32_v_i32m1x5_tum(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_v_i32m1x5_tum(vm, vd, rs1, vl); } -vuint32mf2x5_t test_vlseg5e32_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg5e32_v_u32mf2x5_tum(mask, maskedoff_tuple, base, vl); +vuint32mf2x5_t test_vlseg5e32_v_u32mf2x5_tum(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_v_u32mf2x5_tum(vm, vd, rs1, vl); } -vuint32m1x5_t test_vlseg5e32_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg5e32_v_u32m1x5_tum(mask, maskedoff_tuple, base, vl); +vuint32m1x5_t test_vlseg5e32_v_u32m1x5_tum(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_v_u32m1x5_tum(vm, vd, rs1, vl); } -vfloat32mf2x5_t test_vlseg5e32_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg5e32_v_f32mf2x5_tumu(mask, maskedoff_tuple, base, vl); +vfloat32mf2x5_t test_vlseg5e32_v_f32mf2x5_tumu(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_v_f32mf2x5_tumu(vm, vd, rs1, vl); } -vfloat32m1x5_t test_vlseg5e32_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg5e32_v_f32m1x5_tumu(mask, maskedoff_tuple, base, vl); +vfloat32m1x5_t test_vlseg5e32_v_f32m1x5_tumu(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_v_f32m1x5_tumu(vm, vd, rs1, vl); } -vint32mf2x5_t test_vlseg5e32_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg5e32_v_i32mf2x5_tumu(mask, maskedoff_tuple, base, vl); +vint32mf2x5_t test_vlseg5e32_v_i32mf2x5_tumu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_v_i32mf2x5_tumu(vm, vd, rs1, vl); } -vint32m1x5_t test_vlseg5e32_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg5e32_v_i32m1x5_tumu(mask, maskedoff_tuple, base, vl); +vint32m1x5_t test_vlseg5e32_v_i32m1x5_tumu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_v_i32m1x5_tumu(vm, vd, rs1, vl); } -vuint32mf2x5_t test_vlseg5e32_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg5e32_v_u32mf2x5_tumu(mask, maskedoff_tuple, base, vl); +vuint32mf2x5_t test_vlseg5e32_v_u32mf2x5_tumu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_v_u32mf2x5_tumu(vm, vd, rs1, vl); } -vuint32m1x5_t test_vlseg5e32_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg5e32_v_u32m1x5_tumu(mask, maskedoff_tuple, base, vl); +vuint32m1x5_t test_vlseg5e32_v_u32m1x5_tumu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_v_u32m1x5_tumu(vm, vd, rs1, vl); } -vfloat32mf2x5_t test_vlseg5e32_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg5e32_v_f32mf2x5_mu(mask, maskedoff_tuple, base, vl); +vfloat32mf2x5_t test_vlseg5e32_v_f32mf2x5_mu(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_v_f32mf2x5_mu(vm, vd, rs1, vl); } -vfloat32m1x5_t test_vlseg5e32_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg5e32_v_f32m1x5_mu(mask, maskedoff_tuple, base, vl); +vfloat32m1x5_t test_vlseg5e32_v_f32m1x5_mu(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_v_f32m1x5_mu(vm, vd, rs1, vl); } -vint32mf2x5_t test_vlseg5e32_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg5e32_v_i32mf2x5_mu(mask, maskedoff_tuple, base, vl); +vint32mf2x5_t test_vlseg5e32_v_i32mf2x5_mu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_v_i32mf2x5_mu(vm, vd, rs1, vl); } -vint32m1x5_t test_vlseg5e32_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg5e32_v_i32m1x5_mu(mask, maskedoff_tuple, base, vl); +vint32m1x5_t test_vlseg5e32_v_i32m1x5_mu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_v_i32m1x5_mu(vm, vd, rs1, vl); } -vuint32mf2x5_t test_vlseg5e32_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg5e32_v_u32mf2x5_mu(mask, maskedoff_tuple, base, vl); +vuint32mf2x5_t test_vlseg5e32_v_u32mf2x5_mu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_v_u32mf2x5_mu(vm, vd, rs1, vl); } -vuint32m1x5_t test_vlseg5e32_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg5e32_v_u32m1x5_mu(mask, maskedoff_tuple, base, vl); +vuint32m1x5_t test_vlseg5e32_v_u32m1x5_mu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_v_u32m1x5_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg5e32\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg5e32ff.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg5e32ff.c index 37b686d67..a1d7d520a 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg5e32ff.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg5e32ff.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_v_f32mf2x5_tu(maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_tu(vfloat32mf2x5_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_v_f32mf2x5_tu(vd, rs1, new_vl, vl); } -vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_v_f32m1x5_tu(maskedoff_tuple, base, new_vl, vl); +vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_tu(vfloat32m1x5_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_v_f32m1x5_tu(vd, rs1, new_vl, vl); } -vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_v_i32mf2x5_tu(maskedoff_tuple, base, new_vl, vl); +vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_tu(vint32mf2x5_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_v_i32mf2x5_tu(vd, rs1, new_vl, vl); } -vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_v_i32m1x5_tu(maskedoff_tuple, base, new_vl, vl); +vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_tu(vint32m1x5_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_v_i32m1x5_tu(vd, rs1, new_vl, vl); } -vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_v_u32mf2x5_tu(maskedoff_tuple, base, new_vl, vl); +vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_tu(vuint32mf2x5_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_v_u32mf2x5_tu(vd, rs1, new_vl, vl); } -vuint32m1x5_t test_vlseg5e32ff_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_v_u32m1x5_tu(maskedoff_tuple, base, new_vl, vl); +vuint32m1x5_t test_vlseg5e32ff_v_u32m1x5_tu(vuint32m1x5_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_v_u32m1x5_tu(vd, rs1, new_vl, vl); } -vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_v_f32mf2x5_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_tum(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_v_f32mf2x5_tum(vm, vd, rs1, new_vl, vl); } -vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_v_f32m1x5_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_tum(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_v_f32m1x5_tum(vm, vd, rs1, new_vl, vl); } -vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_v_i32mf2x5_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_tum(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_v_i32mf2x5_tum(vm, vd, rs1, new_vl, vl); } -vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_v_i32m1x5_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_tum(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_v_i32m1x5_tum(vm, vd, rs1, new_vl, vl); } -vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_v_u32mf2x5_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_tum(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_v_u32mf2x5_tum(vm, vd, rs1, new_vl, vl); } -vuint32m1x5_t test_vlseg5e32ff_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_v_u32m1x5_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m1x5_t test_vlseg5e32ff_v_u32m1x5_tum(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_v_u32m1x5_tum(vm, vd, rs1, new_vl, vl); } -vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_v_f32mf2x5_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_tumu(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_v_f32mf2x5_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_v_f32m1x5_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_tumu(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_v_f32m1x5_tumu(vm, vd, rs1, new_vl, vl); } -vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_v_i32mf2x5_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_tumu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_v_i32mf2x5_tumu(vm, vd, rs1, new_vl, vl); } -vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_v_i32m1x5_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_tumu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_v_i32m1x5_tumu(vm, vd, rs1, new_vl, vl); } -vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_v_u32mf2x5_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_tumu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_v_u32mf2x5_tumu(vm, vd, rs1, new_vl, vl); } -vuint32m1x5_t test_vlseg5e32ff_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_v_u32m1x5_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m1x5_t test_vlseg5e32ff_v_u32m1x5_tumu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_v_u32m1x5_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_v_f32mf2x5_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_mu(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_v_f32mf2x5_mu(vm, vd, rs1, new_vl, vl); } -vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_v_f32m1x5_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_mu(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_v_f32m1x5_mu(vm, vd, rs1, new_vl, vl); } -vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_v_i32mf2x5_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_mu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_v_i32mf2x5_mu(vm, vd, rs1, new_vl, vl); } -vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_v_i32m1x5_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_mu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_v_i32m1x5_mu(vm, vd, rs1, new_vl, vl); } -vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_v_u32mf2x5_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_mu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_v_u32mf2x5_mu(vm, vd, rs1, new_vl, vl); } -vuint32m1x5_t test_vlseg5e32ff_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_v_u32m1x5_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m1x5_t test_vlseg5e32ff_v_u32m1x5_mu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_v_u32m1x5_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg5e32ff\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg5e64.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg5e64.c index 8101708a6..63c1105a1 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg5e64.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg5e64.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x5_t test_vlseg5e64_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg5e64_v_f64m1x5_tu(maskedoff_tuple, base, vl); +vfloat64m1x5_t test_vlseg5e64_v_f64m1x5_tu(vfloat64m1x5_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg5e64_v_f64m1x5_tu(vd, rs1, vl); } -vint64m1x5_t test_vlseg5e64_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg5e64_v_i64m1x5_tu(maskedoff_tuple, base, vl); +vint64m1x5_t test_vlseg5e64_v_i64m1x5_tu(vint64m1x5_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg5e64_v_i64m1x5_tu(vd, rs1, vl); } -vuint64m1x5_t test_vlseg5e64_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg5e64_v_u64m1x5_tu(maskedoff_tuple, base, vl); +vuint64m1x5_t test_vlseg5e64_v_u64m1x5_tu(vuint64m1x5_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg5e64_v_u64m1x5_tu(vd, rs1, vl); } -vfloat64m1x5_t test_vlseg5e64_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg5e64_v_f64m1x5_tum(mask, maskedoff_tuple, base, vl); +vfloat64m1x5_t test_vlseg5e64_v_f64m1x5_tum(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg5e64_v_f64m1x5_tum(vm, vd, rs1, vl); } -vint64m1x5_t test_vlseg5e64_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg5e64_v_i64m1x5_tum(mask, maskedoff_tuple, base, vl); +vint64m1x5_t test_vlseg5e64_v_i64m1x5_tum(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg5e64_v_i64m1x5_tum(vm, vd, rs1, vl); } -vuint64m1x5_t test_vlseg5e64_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg5e64_v_u64m1x5_tum(mask, maskedoff_tuple, base, vl); +vuint64m1x5_t test_vlseg5e64_v_u64m1x5_tum(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg5e64_v_u64m1x5_tum(vm, vd, rs1, vl); } -vfloat64m1x5_t test_vlseg5e64_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg5e64_v_f64m1x5_tumu(mask, maskedoff_tuple, base, vl); +vfloat64m1x5_t test_vlseg5e64_v_f64m1x5_tumu(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg5e64_v_f64m1x5_tumu(vm, vd, rs1, vl); } -vint64m1x5_t test_vlseg5e64_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg5e64_v_i64m1x5_tumu(mask, maskedoff_tuple, base, vl); +vint64m1x5_t test_vlseg5e64_v_i64m1x5_tumu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg5e64_v_i64m1x5_tumu(vm, vd, rs1, vl); } -vuint64m1x5_t test_vlseg5e64_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg5e64_v_u64m1x5_tumu(mask, maskedoff_tuple, base, vl); +vuint64m1x5_t test_vlseg5e64_v_u64m1x5_tumu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg5e64_v_u64m1x5_tumu(vm, vd, rs1, vl); } -vfloat64m1x5_t test_vlseg5e64_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg5e64_v_f64m1x5_mu(mask, maskedoff_tuple, base, vl); +vfloat64m1x5_t test_vlseg5e64_v_f64m1x5_mu(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg5e64_v_f64m1x5_mu(vm, vd, rs1, vl); } -vint64m1x5_t test_vlseg5e64_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg5e64_v_i64m1x5_mu(mask, maskedoff_tuple, base, vl); +vint64m1x5_t test_vlseg5e64_v_i64m1x5_mu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg5e64_v_i64m1x5_mu(vm, vd, rs1, vl); } -vuint64m1x5_t test_vlseg5e64_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg5e64_v_u64m1x5_mu(mask, maskedoff_tuple, base, vl); +vuint64m1x5_t test_vlseg5e64_v_u64m1x5_mu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg5e64_v_u64m1x5_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg5e64\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg5e64ff.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg5e64ff.c index ad583eb85..0aae78280 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg5e64ff.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg5e64ff.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e64ff_v_f64m1x5_tu(maskedoff_tuple, base, new_vl, vl); +vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_tu(vfloat64m1x5_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e64ff_v_f64m1x5_tu(vd, rs1, new_vl, vl); } -vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e64ff_v_i64m1x5_tu(maskedoff_tuple, base, new_vl, vl); +vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_tu(vint64m1x5_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e64ff_v_i64m1x5_tu(vd, rs1, new_vl, vl); } -vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e64ff_v_u64m1x5_tu(maskedoff_tuple, base, new_vl, vl); +vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5_tu(vuint64m1x5_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e64ff_v_u64m1x5_tu(vd, rs1, new_vl, vl); } -vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e64ff_v_f64m1x5_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_tum(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e64ff_v_f64m1x5_tum(vm, vd, rs1, new_vl, vl); } -vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e64ff_v_i64m1x5_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_tum(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e64ff_v_i64m1x5_tum(vm, vd, rs1, new_vl, vl); } -vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e64ff_v_u64m1x5_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5_tum(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e64ff_v_u64m1x5_tum(vm, vd, rs1, new_vl, vl); } -vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e64ff_v_f64m1x5_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_tumu(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e64ff_v_f64m1x5_tumu(vm, vd, rs1, new_vl, vl); } -vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e64ff_v_i64m1x5_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_tumu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e64ff_v_i64m1x5_tumu(vm, vd, rs1, new_vl, vl); } -vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e64ff_v_u64m1x5_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5_tumu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e64ff_v_u64m1x5_tumu(vm, vd, rs1, new_vl, vl); } -vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e64ff_v_f64m1x5_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_mu(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e64ff_v_f64m1x5_mu(vm, vd, rs1, new_vl, vl); } -vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e64ff_v_i64m1x5_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_mu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e64ff_v_i64m1x5_mu(vm, vd, rs1, new_vl, vl); } -vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e64ff_v_u64m1x5_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5_mu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e64ff_v_u64m1x5_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg5e64ff\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg5e8.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg5e8.c index 1c2ee4605..9ba925f23 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg5e8.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg5e8.c @@ -6,132 +6,132 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x5_t test_vlseg5e8_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_i8mf8x5_tu(maskedoff_tuple, base, vl); +vint8mf8x5_t test_vlseg5e8_v_i8mf8x5_tu(vint8mf8x5_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_i8mf8x5_tu(vd, rs1, vl); } -vint8mf4x5_t test_vlseg5e8_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_i8mf4x5_tu(maskedoff_tuple, base, vl); +vint8mf4x5_t test_vlseg5e8_v_i8mf4x5_tu(vint8mf4x5_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_i8mf4x5_tu(vd, rs1, vl); } -vint8mf2x5_t test_vlseg5e8_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_i8mf2x5_tu(maskedoff_tuple, base, vl); +vint8mf2x5_t test_vlseg5e8_v_i8mf2x5_tu(vint8mf2x5_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_i8mf2x5_tu(vd, rs1, vl); } -vint8m1x5_t test_vlseg5e8_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_i8m1x5_tu(maskedoff_tuple, base, vl); +vint8m1x5_t test_vlseg5e8_v_i8m1x5_tu(vint8m1x5_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_i8m1x5_tu(vd, rs1, vl); } -vuint8mf8x5_t test_vlseg5e8_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_u8mf8x5_tu(maskedoff_tuple, base, vl); +vuint8mf8x5_t test_vlseg5e8_v_u8mf8x5_tu(vuint8mf8x5_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_u8mf8x5_tu(vd, rs1, vl); } -vuint8mf4x5_t test_vlseg5e8_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_u8mf4x5_tu(maskedoff_tuple, base, vl); +vuint8mf4x5_t test_vlseg5e8_v_u8mf4x5_tu(vuint8mf4x5_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_u8mf4x5_tu(vd, rs1, vl); } -vuint8mf2x5_t test_vlseg5e8_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_u8mf2x5_tu(maskedoff_tuple, base, vl); +vuint8mf2x5_t test_vlseg5e8_v_u8mf2x5_tu(vuint8mf2x5_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_u8mf2x5_tu(vd, rs1, vl); } -vuint8m1x5_t test_vlseg5e8_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_u8m1x5_tu(maskedoff_tuple, base, vl); +vuint8m1x5_t test_vlseg5e8_v_u8m1x5_tu(vuint8m1x5_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_u8m1x5_tu(vd, rs1, vl); } -vint8mf8x5_t test_vlseg5e8_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_i8mf8x5_tum(mask, maskedoff_tuple, base, vl); +vint8mf8x5_t test_vlseg5e8_v_i8mf8x5_tum(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_i8mf8x5_tum(vm, vd, rs1, vl); } -vint8mf4x5_t test_vlseg5e8_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_i8mf4x5_tum(mask, maskedoff_tuple, base, vl); +vint8mf4x5_t test_vlseg5e8_v_i8mf4x5_tum(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_i8mf4x5_tum(vm, vd, rs1, vl); } -vint8mf2x5_t test_vlseg5e8_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_i8mf2x5_tum(mask, maskedoff_tuple, base, vl); +vint8mf2x5_t test_vlseg5e8_v_i8mf2x5_tum(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_i8mf2x5_tum(vm, vd, rs1, vl); } -vint8m1x5_t test_vlseg5e8_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_i8m1x5_tum(mask, maskedoff_tuple, base, vl); +vint8m1x5_t test_vlseg5e8_v_i8m1x5_tum(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_i8m1x5_tum(vm, vd, rs1, vl); } -vuint8mf8x5_t test_vlseg5e8_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_u8mf8x5_tum(mask, maskedoff_tuple, base, vl); +vuint8mf8x5_t test_vlseg5e8_v_u8mf8x5_tum(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_u8mf8x5_tum(vm, vd, rs1, vl); } -vuint8mf4x5_t test_vlseg5e8_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_u8mf4x5_tum(mask, maskedoff_tuple, base, vl); +vuint8mf4x5_t test_vlseg5e8_v_u8mf4x5_tum(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_u8mf4x5_tum(vm, vd, rs1, vl); } -vuint8mf2x5_t test_vlseg5e8_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_u8mf2x5_tum(mask, maskedoff_tuple, base, vl); +vuint8mf2x5_t test_vlseg5e8_v_u8mf2x5_tum(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_u8mf2x5_tum(vm, vd, rs1, vl); } -vuint8m1x5_t test_vlseg5e8_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_u8m1x5_tum(mask, maskedoff_tuple, base, vl); +vuint8m1x5_t test_vlseg5e8_v_u8m1x5_tum(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_u8m1x5_tum(vm, vd, rs1, vl); } -vint8mf8x5_t test_vlseg5e8_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_i8mf8x5_tumu(mask, maskedoff_tuple, base, vl); +vint8mf8x5_t test_vlseg5e8_v_i8mf8x5_tumu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_i8mf8x5_tumu(vm, vd, rs1, vl); } -vint8mf4x5_t test_vlseg5e8_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_i8mf4x5_tumu(mask, maskedoff_tuple, base, vl); +vint8mf4x5_t test_vlseg5e8_v_i8mf4x5_tumu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_i8mf4x5_tumu(vm, vd, rs1, vl); } -vint8mf2x5_t test_vlseg5e8_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_i8mf2x5_tumu(mask, maskedoff_tuple, base, vl); +vint8mf2x5_t test_vlseg5e8_v_i8mf2x5_tumu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_i8mf2x5_tumu(vm, vd, rs1, vl); } -vint8m1x5_t test_vlseg5e8_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_i8m1x5_tumu(mask, maskedoff_tuple, base, vl); +vint8m1x5_t test_vlseg5e8_v_i8m1x5_tumu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_i8m1x5_tumu(vm, vd, rs1, vl); } -vuint8mf8x5_t test_vlseg5e8_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_u8mf8x5_tumu(mask, maskedoff_tuple, base, vl); +vuint8mf8x5_t test_vlseg5e8_v_u8mf8x5_tumu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_u8mf8x5_tumu(vm, vd, rs1, vl); } -vuint8mf4x5_t test_vlseg5e8_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_u8mf4x5_tumu(mask, maskedoff_tuple, base, vl); +vuint8mf4x5_t test_vlseg5e8_v_u8mf4x5_tumu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_u8mf4x5_tumu(vm, vd, rs1, vl); } -vuint8mf2x5_t test_vlseg5e8_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_u8mf2x5_tumu(mask, maskedoff_tuple, base, vl); +vuint8mf2x5_t test_vlseg5e8_v_u8mf2x5_tumu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_u8mf2x5_tumu(vm, vd, rs1, vl); } -vuint8m1x5_t test_vlseg5e8_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_u8m1x5_tumu(mask, maskedoff_tuple, base, vl); +vuint8m1x5_t test_vlseg5e8_v_u8m1x5_tumu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_u8m1x5_tumu(vm, vd, rs1, vl); } -vint8mf8x5_t test_vlseg5e8_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_i8mf8x5_mu(mask, maskedoff_tuple, base, vl); +vint8mf8x5_t test_vlseg5e8_v_i8mf8x5_mu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_i8mf8x5_mu(vm, vd, rs1, vl); } -vint8mf4x5_t test_vlseg5e8_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_i8mf4x5_mu(mask, maskedoff_tuple, base, vl); +vint8mf4x5_t test_vlseg5e8_v_i8mf4x5_mu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_i8mf4x5_mu(vm, vd, rs1, vl); } -vint8mf2x5_t test_vlseg5e8_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_i8mf2x5_mu(mask, maskedoff_tuple, base, vl); +vint8mf2x5_t test_vlseg5e8_v_i8mf2x5_mu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_i8mf2x5_mu(vm, vd, rs1, vl); } -vint8m1x5_t test_vlseg5e8_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_i8m1x5_mu(mask, maskedoff_tuple, base, vl); +vint8m1x5_t test_vlseg5e8_v_i8m1x5_mu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_i8m1x5_mu(vm, vd, rs1, vl); } -vuint8mf8x5_t test_vlseg5e8_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_u8mf8x5_mu(mask, maskedoff_tuple, base, vl); +vuint8mf8x5_t test_vlseg5e8_v_u8mf8x5_mu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_u8mf8x5_mu(vm, vd, rs1, vl); } -vuint8mf4x5_t test_vlseg5e8_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_u8mf4x5_mu(mask, maskedoff_tuple, base, vl); +vuint8mf4x5_t test_vlseg5e8_v_u8mf4x5_mu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_u8mf4x5_mu(vm, vd, rs1, vl); } -vuint8mf2x5_t test_vlseg5e8_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_u8mf2x5_mu(mask, maskedoff_tuple, base, vl); +vuint8mf2x5_t test_vlseg5e8_v_u8mf2x5_mu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_u8mf2x5_mu(vm, vd, rs1, vl); } -vuint8m1x5_t test_vlseg5e8_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg5e8_v_u8m1x5_mu(mask, maskedoff_tuple, base, vl); +vuint8m1x5_t test_vlseg5e8_v_u8m1x5_mu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_v_u8m1x5_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg5e8\.[ivxfswum.]+\s+} 32 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg5e8ff.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg5e8ff.c index fa159987c..1a79d912a 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg5e8ff.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg5e8ff.c @@ -6,132 +6,132 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_i8mf8x5_tu(maskedoff_tuple, base, new_vl, vl); +vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_tu(vint8mf8x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_i8mf8x5_tu(vd, rs1, new_vl, vl); } -vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_i8mf4x5_tu(maskedoff_tuple, base, new_vl, vl); +vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_tu(vint8mf4x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_i8mf4x5_tu(vd, rs1, new_vl, vl); } -vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_i8mf2x5_tu(maskedoff_tuple, base, new_vl, vl); +vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_tu(vint8mf2x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_i8mf2x5_tu(vd, rs1, new_vl, vl); } -vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_i8m1x5_tu(maskedoff_tuple, base, new_vl, vl); +vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_tu(vint8m1x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_i8m1x5_tu(vd, rs1, new_vl, vl); } -vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_u8mf8x5_tu(maskedoff_tuple, base, new_vl, vl); +vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_tu(vuint8mf8x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_u8mf8x5_tu(vd, rs1, new_vl, vl); } -vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_u8mf4x5_tu(maskedoff_tuple, base, new_vl, vl); +vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_tu(vuint8mf4x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_u8mf4x5_tu(vd, rs1, new_vl, vl); } -vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_u8mf2x5_tu(maskedoff_tuple, base, new_vl, vl); +vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_tu(vuint8mf2x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_u8mf2x5_tu(vd, rs1, new_vl, vl); } -vuint8m1x5_t test_vlseg5e8ff_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_u8m1x5_tu(maskedoff_tuple, base, new_vl, vl); +vuint8m1x5_t test_vlseg5e8ff_v_u8m1x5_tu(vuint8m1x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_u8m1x5_tu(vd, rs1, new_vl, vl); } -vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_i8mf8x5_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_tum(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_i8mf8x5_tum(vm, vd, rs1, new_vl, vl); } -vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_i8mf4x5_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_tum(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_i8mf4x5_tum(vm, vd, rs1, new_vl, vl); } -vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_i8mf2x5_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_tum(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_i8mf2x5_tum(vm, vd, rs1, new_vl, vl); } -vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_i8m1x5_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_tum(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_i8m1x5_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_u8mf8x5_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_tum(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_u8mf8x5_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_u8mf4x5_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_tum(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_u8mf4x5_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_u8mf2x5_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_tum(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_u8mf2x5_tum(vm, vd, rs1, new_vl, vl); } -vuint8m1x5_t test_vlseg5e8ff_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_u8m1x5_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m1x5_t test_vlseg5e8ff_v_u8m1x5_tum(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_u8m1x5_tum(vm, vd, rs1, new_vl, vl); } -vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_i8mf8x5_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_tumu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_i8mf8x5_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_i8mf4x5_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_tumu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_i8mf4x5_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_i8mf2x5_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_tumu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_i8mf2x5_tumu(vm, vd, rs1, new_vl, vl); } -vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_i8m1x5_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_tumu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_i8m1x5_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_u8mf8x5_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_tumu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_u8mf8x5_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_u8mf4x5_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_tumu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_u8mf4x5_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_u8mf2x5_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_tumu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_u8mf2x5_tumu(vm, vd, rs1, new_vl, vl); } -vuint8m1x5_t test_vlseg5e8ff_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_u8m1x5_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m1x5_t test_vlseg5e8ff_v_u8m1x5_tumu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_u8m1x5_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_i8mf8x5_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_mu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_i8mf8x5_mu(vm, vd, rs1, new_vl, vl); } -vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_i8mf4x5_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_mu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_i8mf4x5_mu(vm, vd, rs1, new_vl, vl); } -vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_i8mf2x5_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_mu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_i8mf2x5_mu(vm, vd, rs1, new_vl, vl); } -vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_i8m1x5_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_mu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_i8m1x5_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_u8mf8x5_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_mu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_u8mf8x5_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_u8mf4x5_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_mu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_u8mf4x5_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_u8mf2x5_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_mu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_u8mf2x5_mu(vm, vd, rs1, new_vl, vl); } -vuint8m1x5_t test_vlseg5e8ff_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_v_u8m1x5_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m1x5_t test_vlseg5e8ff_v_u8m1x5_mu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_v_u8m1x5_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg5e8ff\.[ivxfswum.]+\s+} 32 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg6e16.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg6e16.c index 89ac864bd..c1f09786e 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg6e16.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg6e16.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x6_t test_vlseg6e16_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_f16mf4x6_tu(maskedoff_tuple, base, vl); +vfloat16mf4x6_t test_vlseg6e16_v_f16mf4x6_tu(vfloat16mf4x6_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_f16mf4x6_tu(vd, rs1, vl); } -vfloat16mf2x6_t test_vlseg6e16_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_f16mf2x6_tu(maskedoff_tuple, base, vl); +vfloat16mf2x6_t test_vlseg6e16_v_f16mf2x6_tu(vfloat16mf2x6_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_f16mf2x6_tu(vd, rs1, vl); } -vfloat16m1x6_t test_vlseg6e16_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_f16m1x6_tu(maskedoff_tuple, base, vl); +vfloat16m1x6_t test_vlseg6e16_v_f16m1x6_tu(vfloat16m1x6_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_f16m1x6_tu(vd, rs1, vl); } -vint16mf4x6_t test_vlseg6e16_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_i16mf4x6_tu(maskedoff_tuple, base, vl); +vint16mf4x6_t test_vlseg6e16_v_i16mf4x6_tu(vint16mf4x6_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_i16mf4x6_tu(vd, rs1, vl); } -vint16mf2x6_t test_vlseg6e16_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_i16mf2x6_tu(maskedoff_tuple, base, vl); +vint16mf2x6_t test_vlseg6e16_v_i16mf2x6_tu(vint16mf2x6_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_i16mf2x6_tu(vd, rs1, vl); } -vint16m1x6_t test_vlseg6e16_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_i16m1x6_tu(maskedoff_tuple, base, vl); +vint16m1x6_t test_vlseg6e16_v_i16m1x6_tu(vint16m1x6_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_i16m1x6_tu(vd, rs1, vl); } -vuint16mf4x6_t test_vlseg6e16_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_u16mf4x6_tu(maskedoff_tuple, base, vl); +vuint16mf4x6_t test_vlseg6e16_v_u16mf4x6_tu(vuint16mf4x6_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_u16mf4x6_tu(vd, rs1, vl); } -vuint16mf2x6_t test_vlseg6e16_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_u16mf2x6_tu(maskedoff_tuple, base, vl); +vuint16mf2x6_t test_vlseg6e16_v_u16mf2x6_tu(vuint16mf2x6_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_u16mf2x6_tu(vd, rs1, vl); } -vuint16m1x6_t test_vlseg6e16_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_u16m1x6_tu(maskedoff_tuple, base, vl); +vuint16m1x6_t test_vlseg6e16_v_u16m1x6_tu(vuint16m1x6_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_u16m1x6_tu(vd, rs1, vl); } -vfloat16mf4x6_t test_vlseg6e16_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_f16mf4x6_tum(mask, maskedoff_tuple, base, vl); +vfloat16mf4x6_t test_vlseg6e16_v_f16mf4x6_tum(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_f16mf4x6_tum(vm, vd, rs1, vl); } -vfloat16mf2x6_t test_vlseg6e16_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_f16mf2x6_tum(mask, maskedoff_tuple, base, vl); +vfloat16mf2x6_t test_vlseg6e16_v_f16mf2x6_tum(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_f16mf2x6_tum(vm, vd, rs1, vl); } -vfloat16m1x6_t test_vlseg6e16_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_f16m1x6_tum(mask, maskedoff_tuple, base, vl); +vfloat16m1x6_t test_vlseg6e16_v_f16m1x6_tum(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_f16m1x6_tum(vm, vd, rs1, vl); } -vint16mf4x6_t test_vlseg6e16_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_i16mf4x6_tum(mask, maskedoff_tuple, base, vl); +vint16mf4x6_t test_vlseg6e16_v_i16mf4x6_tum(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_i16mf4x6_tum(vm, vd, rs1, vl); } -vint16mf2x6_t test_vlseg6e16_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_i16mf2x6_tum(mask, maskedoff_tuple, base, vl); +vint16mf2x6_t test_vlseg6e16_v_i16mf2x6_tum(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_i16mf2x6_tum(vm, vd, rs1, vl); } -vint16m1x6_t test_vlseg6e16_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_i16m1x6_tum(mask, maskedoff_tuple, base, vl); +vint16m1x6_t test_vlseg6e16_v_i16m1x6_tum(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_i16m1x6_tum(vm, vd, rs1, vl); } -vuint16mf4x6_t test_vlseg6e16_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_u16mf4x6_tum(mask, maskedoff_tuple, base, vl); +vuint16mf4x6_t test_vlseg6e16_v_u16mf4x6_tum(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_u16mf4x6_tum(vm, vd, rs1, vl); } -vuint16mf2x6_t test_vlseg6e16_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_u16mf2x6_tum(mask, maskedoff_tuple, base, vl); +vuint16mf2x6_t test_vlseg6e16_v_u16mf2x6_tum(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_u16mf2x6_tum(vm, vd, rs1, vl); } -vuint16m1x6_t test_vlseg6e16_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_u16m1x6_tum(mask, maskedoff_tuple, base, vl); +vuint16m1x6_t test_vlseg6e16_v_u16m1x6_tum(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_u16m1x6_tum(vm, vd, rs1, vl); } -vfloat16mf4x6_t test_vlseg6e16_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_f16mf4x6_tumu(mask, maskedoff_tuple, base, vl); +vfloat16mf4x6_t test_vlseg6e16_v_f16mf4x6_tumu(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_f16mf4x6_tumu(vm, vd, rs1, vl); } -vfloat16mf2x6_t test_vlseg6e16_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_f16mf2x6_tumu(mask, maskedoff_tuple, base, vl); +vfloat16mf2x6_t test_vlseg6e16_v_f16mf2x6_tumu(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_f16mf2x6_tumu(vm, vd, rs1, vl); } -vfloat16m1x6_t test_vlseg6e16_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_f16m1x6_tumu(mask, maskedoff_tuple, base, vl); +vfloat16m1x6_t test_vlseg6e16_v_f16m1x6_tumu(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_f16m1x6_tumu(vm, vd, rs1, vl); } -vint16mf4x6_t test_vlseg6e16_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_i16mf4x6_tumu(mask, maskedoff_tuple, base, vl); +vint16mf4x6_t test_vlseg6e16_v_i16mf4x6_tumu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_i16mf4x6_tumu(vm, vd, rs1, vl); } -vint16mf2x6_t test_vlseg6e16_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_i16mf2x6_tumu(mask, maskedoff_tuple, base, vl); +vint16mf2x6_t test_vlseg6e16_v_i16mf2x6_tumu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_i16mf2x6_tumu(vm, vd, rs1, vl); } -vint16m1x6_t test_vlseg6e16_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_i16m1x6_tumu(mask, maskedoff_tuple, base, vl); +vint16m1x6_t test_vlseg6e16_v_i16m1x6_tumu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_i16m1x6_tumu(vm, vd, rs1, vl); } -vuint16mf4x6_t test_vlseg6e16_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_u16mf4x6_tumu(mask, maskedoff_tuple, base, vl); +vuint16mf4x6_t test_vlseg6e16_v_u16mf4x6_tumu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_u16mf4x6_tumu(vm, vd, rs1, vl); } -vuint16mf2x6_t test_vlseg6e16_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_u16mf2x6_tumu(mask, maskedoff_tuple, base, vl); +vuint16mf2x6_t test_vlseg6e16_v_u16mf2x6_tumu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_u16mf2x6_tumu(vm, vd, rs1, vl); } -vuint16m1x6_t test_vlseg6e16_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_u16m1x6_tumu(mask, maskedoff_tuple, base, vl); +vuint16m1x6_t test_vlseg6e16_v_u16m1x6_tumu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_u16m1x6_tumu(vm, vd, rs1, vl); } -vfloat16mf4x6_t test_vlseg6e16_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_f16mf4x6_mu(mask, maskedoff_tuple, base, vl); +vfloat16mf4x6_t test_vlseg6e16_v_f16mf4x6_mu(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_f16mf4x6_mu(vm, vd, rs1, vl); } -vfloat16mf2x6_t test_vlseg6e16_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_f16mf2x6_mu(mask, maskedoff_tuple, base, vl); +vfloat16mf2x6_t test_vlseg6e16_v_f16mf2x6_mu(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_f16mf2x6_mu(vm, vd, rs1, vl); } -vfloat16m1x6_t test_vlseg6e16_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_f16m1x6_mu(mask, maskedoff_tuple, base, vl); +vfloat16m1x6_t test_vlseg6e16_v_f16m1x6_mu(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_f16m1x6_mu(vm, vd, rs1, vl); } -vint16mf4x6_t test_vlseg6e16_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_i16mf4x6_mu(mask, maskedoff_tuple, base, vl); +vint16mf4x6_t test_vlseg6e16_v_i16mf4x6_mu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_i16mf4x6_mu(vm, vd, rs1, vl); } -vint16mf2x6_t test_vlseg6e16_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_i16mf2x6_mu(mask, maskedoff_tuple, base, vl); +vint16mf2x6_t test_vlseg6e16_v_i16mf2x6_mu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_i16mf2x6_mu(vm, vd, rs1, vl); } -vint16m1x6_t test_vlseg6e16_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_i16m1x6_mu(mask, maskedoff_tuple, base, vl); +vint16m1x6_t test_vlseg6e16_v_i16m1x6_mu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_i16m1x6_mu(vm, vd, rs1, vl); } -vuint16mf4x6_t test_vlseg6e16_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_u16mf4x6_mu(mask, maskedoff_tuple, base, vl); +vuint16mf4x6_t test_vlseg6e16_v_u16mf4x6_mu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_u16mf4x6_mu(vm, vd, rs1, vl); } -vuint16mf2x6_t test_vlseg6e16_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_u16mf2x6_mu(mask, maskedoff_tuple, base, vl); +vuint16mf2x6_t test_vlseg6e16_v_u16mf2x6_mu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_u16mf2x6_mu(vm, vd, rs1, vl); } -vuint16m1x6_t test_vlseg6e16_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg6e16_v_u16m1x6_mu(mask, maskedoff_tuple, base, vl); +vuint16m1x6_t test_vlseg6e16_v_u16m1x6_mu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_v_u16m1x6_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg6e16\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg6e16ff.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg6e16ff.c index 43d43922f..c4478ba03 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg6e16ff.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg6e16ff.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_f16mf4x6_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_tu(vfloat16mf4x6_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_f16mf4x6_tu(vd, rs1, new_vl, vl); } -vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_f16mf2x6_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_tu(vfloat16mf2x6_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_f16mf2x6_tu(vd, rs1, new_vl, vl); } -vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_f16m1x6_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_tu(vfloat16m1x6_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_f16m1x6_tu(vd, rs1, new_vl, vl); } -vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_i16mf4x6_tu(maskedoff_tuple, base, new_vl, vl); +vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_tu(vint16mf4x6_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_i16mf4x6_tu(vd, rs1, new_vl, vl); } -vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_i16mf2x6_tu(maskedoff_tuple, base, new_vl, vl); +vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_tu(vint16mf2x6_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_i16mf2x6_tu(vd, rs1, new_vl, vl); } -vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_i16m1x6_tu(maskedoff_tuple, base, new_vl, vl); +vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_tu(vint16m1x6_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_i16m1x6_tu(vd, rs1, new_vl, vl); } -vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_u16mf4x6_tu(maskedoff_tuple, base, new_vl, vl); +vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_tu(vuint16mf4x6_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_u16mf4x6_tu(vd, rs1, new_vl, vl); } -vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_u16mf2x6_tu(maskedoff_tuple, base, new_vl, vl); +vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_tu(vuint16mf2x6_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_u16mf2x6_tu(vd, rs1, new_vl, vl); } -vuint16m1x6_t test_vlseg6e16ff_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_u16m1x6_tu(maskedoff_tuple, base, new_vl, vl); +vuint16m1x6_t test_vlseg6e16ff_v_u16m1x6_tu(vuint16m1x6_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_u16m1x6_tu(vd, rs1, new_vl, vl); } -vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_f16mf4x6_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_tum(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_f16mf4x6_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_f16mf2x6_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_tum(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_f16mf2x6_tum(vm, vd, rs1, new_vl, vl); } -vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_f16m1x6_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_tum(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_f16m1x6_tum(vm, vd, rs1, new_vl, vl); } -vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_i16mf4x6_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_tum(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_i16mf4x6_tum(vm, vd, rs1, new_vl, vl); } -vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_i16mf2x6_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_tum(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_i16mf2x6_tum(vm, vd, rs1, new_vl, vl); } -vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_i16m1x6_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_tum(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_i16m1x6_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_u16mf4x6_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_tum(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_u16mf4x6_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_u16mf2x6_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_tum(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_u16mf2x6_tum(vm, vd, rs1, new_vl, vl); } -vuint16m1x6_t test_vlseg6e16ff_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_u16m1x6_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m1x6_t test_vlseg6e16ff_v_u16m1x6_tum(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_u16m1x6_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_f16mf4x6_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_tumu(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_f16mf4x6_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_f16mf2x6_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_tumu(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_f16mf2x6_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_f16m1x6_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_tumu(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_f16m1x6_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_i16mf4x6_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_tumu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_i16mf4x6_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_i16mf2x6_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_tumu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_i16mf2x6_tumu(vm, vd, rs1, new_vl, vl); } -vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_i16m1x6_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_tumu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_i16m1x6_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_u16mf4x6_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_tumu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_u16mf4x6_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_u16mf2x6_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_tumu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_u16mf2x6_tumu(vm, vd, rs1, new_vl, vl); } -vuint16m1x6_t test_vlseg6e16ff_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_u16m1x6_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m1x6_t test_vlseg6e16ff_v_u16m1x6_tumu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_u16m1x6_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_f16mf4x6_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_mu(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_f16mf4x6_mu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_f16mf2x6_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_mu(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_f16mf2x6_mu(vm, vd, rs1, new_vl, vl); } -vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_f16m1x6_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_mu(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_f16m1x6_mu(vm, vd, rs1, new_vl, vl); } -vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_i16mf4x6_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_mu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_i16mf4x6_mu(vm, vd, rs1, new_vl, vl); } -vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_i16mf2x6_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_mu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_i16mf2x6_mu(vm, vd, rs1, new_vl, vl); } -vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_i16m1x6_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_mu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_i16m1x6_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_u16mf4x6_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_mu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_u16mf4x6_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_u16mf2x6_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_mu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_u16mf2x6_mu(vm, vd, rs1, new_vl, vl); } -vuint16m1x6_t test_vlseg6e16ff_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_v_u16m1x6_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m1x6_t test_vlseg6e16ff_v_u16m1x6_mu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_u16m1x6_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg6e16ff\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg6e32.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg6e32.c index 1c10f6139..5c492370a 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg6e32.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg6e32.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x6_t test_vlseg6e32_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg6e32_v_f32mf2x6_tu(maskedoff_tuple, base, vl); +vfloat32mf2x6_t test_vlseg6e32_v_f32mf2x6_tu(vfloat32mf2x6_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_v_f32mf2x6_tu(vd, rs1, vl); } -vfloat32m1x6_t test_vlseg6e32_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg6e32_v_f32m1x6_tu(maskedoff_tuple, base, vl); +vfloat32m1x6_t test_vlseg6e32_v_f32m1x6_tu(vfloat32m1x6_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_v_f32m1x6_tu(vd, rs1, vl); } -vint32mf2x6_t test_vlseg6e32_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg6e32_v_i32mf2x6_tu(maskedoff_tuple, base, vl); +vint32mf2x6_t test_vlseg6e32_v_i32mf2x6_tu(vint32mf2x6_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_v_i32mf2x6_tu(vd, rs1, vl); } -vint32m1x6_t test_vlseg6e32_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg6e32_v_i32m1x6_tu(maskedoff_tuple, base, vl); +vint32m1x6_t test_vlseg6e32_v_i32m1x6_tu(vint32m1x6_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_v_i32m1x6_tu(vd, rs1, vl); } -vuint32mf2x6_t test_vlseg6e32_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg6e32_v_u32mf2x6_tu(maskedoff_tuple, base, vl); +vuint32mf2x6_t test_vlseg6e32_v_u32mf2x6_tu(vuint32mf2x6_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_v_u32mf2x6_tu(vd, rs1, vl); } -vuint32m1x6_t test_vlseg6e32_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg6e32_v_u32m1x6_tu(maskedoff_tuple, base, vl); +vuint32m1x6_t test_vlseg6e32_v_u32m1x6_tu(vuint32m1x6_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_v_u32m1x6_tu(vd, rs1, vl); } -vfloat32mf2x6_t test_vlseg6e32_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg6e32_v_f32mf2x6_tum(mask, maskedoff_tuple, base, vl); +vfloat32mf2x6_t test_vlseg6e32_v_f32mf2x6_tum(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_v_f32mf2x6_tum(vm, vd, rs1, vl); } -vfloat32m1x6_t test_vlseg6e32_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg6e32_v_f32m1x6_tum(mask, maskedoff_tuple, base, vl); +vfloat32m1x6_t test_vlseg6e32_v_f32m1x6_tum(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_v_f32m1x6_tum(vm, vd, rs1, vl); } -vint32mf2x6_t test_vlseg6e32_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg6e32_v_i32mf2x6_tum(mask, maskedoff_tuple, base, vl); +vint32mf2x6_t test_vlseg6e32_v_i32mf2x6_tum(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_v_i32mf2x6_tum(vm, vd, rs1, vl); } -vint32m1x6_t test_vlseg6e32_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg6e32_v_i32m1x6_tum(mask, maskedoff_tuple, base, vl); +vint32m1x6_t test_vlseg6e32_v_i32m1x6_tum(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_v_i32m1x6_tum(vm, vd, rs1, vl); } -vuint32mf2x6_t test_vlseg6e32_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg6e32_v_u32mf2x6_tum(mask, maskedoff_tuple, base, vl); +vuint32mf2x6_t test_vlseg6e32_v_u32mf2x6_tum(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_v_u32mf2x6_tum(vm, vd, rs1, vl); } -vuint32m1x6_t test_vlseg6e32_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg6e32_v_u32m1x6_tum(mask, maskedoff_tuple, base, vl); +vuint32m1x6_t test_vlseg6e32_v_u32m1x6_tum(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_v_u32m1x6_tum(vm, vd, rs1, vl); } -vfloat32mf2x6_t test_vlseg6e32_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg6e32_v_f32mf2x6_tumu(mask, maskedoff_tuple, base, vl); +vfloat32mf2x6_t test_vlseg6e32_v_f32mf2x6_tumu(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_v_f32mf2x6_tumu(vm, vd, rs1, vl); } -vfloat32m1x6_t test_vlseg6e32_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg6e32_v_f32m1x6_tumu(mask, maskedoff_tuple, base, vl); +vfloat32m1x6_t test_vlseg6e32_v_f32m1x6_tumu(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_v_f32m1x6_tumu(vm, vd, rs1, vl); } -vint32mf2x6_t test_vlseg6e32_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg6e32_v_i32mf2x6_tumu(mask, maskedoff_tuple, base, vl); +vint32mf2x6_t test_vlseg6e32_v_i32mf2x6_tumu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_v_i32mf2x6_tumu(vm, vd, rs1, vl); } -vint32m1x6_t test_vlseg6e32_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg6e32_v_i32m1x6_tumu(mask, maskedoff_tuple, base, vl); +vint32m1x6_t test_vlseg6e32_v_i32m1x6_tumu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_v_i32m1x6_tumu(vm, vd, rs1, vl); } -vuint32mf2x6_t test_vlseg6e32_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg6e32_v_u32mf2x6_tumu(mask, maskedoff_tuple, base, vl); +vuint32mf2x6_t test_vlseg6e32_v_u32mf2x6_tumu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_v_u32mf2x6_tumu(vm, vd, rs1, vl); } -vuint32m1x6_t test_vlseg6e32_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg6e32_v_u32m1x6_tumu(mask, maskedoff_tuple, base, vl); +vuint32m1x6_t test_vlseg6e32_v_u32m1x6_tumu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_v_u32m1x6_tumu(vm, vd, rs1, vl); } -vfloat32mf2x6_t test_vlseg6e32_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg6e32_v_f32mf2x6_mu(mask, maskedoff_tuple, base, vl); +vfloat32mf2x6_t test_vlseg6e32_v_f32mf2x6_mu(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_v_f32mf2x6_mu(vm, vd, rs1, vl); } -vfloat32m1x6_t test_vlseg6e32_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg6e32_v_f32m1x6_mu(mask, maskedoff_tuple, base, vl); +vfloat32m1x6_t test_vlseg6e32_v_f32m1x6_mu(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_v_f32m1x6_mu(vm, vd, rs1, vl); } -vint32mf2x6_t test_vlseg6e32_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg6e32_v_i32mf2x6_mu(mask, maskedoff_tuple, base, vl); +vint32mf2x6_t test_vlseg6e32_v_i32mf2x6_mu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_v_i32mf2x6_mu(vm, vd, rs1, vl); } -vint32m1x6_t test_vlseg6e32_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg6e32_v_i32m1x6_mu(mask, maskedoff_tuple, base, vl); +vint32m1x6_t test_vlseg6e32_v_i32m1x6_mu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_v_i32m1x6_mu(vm, vd, rs1, vl); } -vuint32mf2x6_t test_vlseg6e32_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg6e32_v_u32mf2x6_mu(mask, maskedoff_tuple, base, vl); +vuint32mf2x6_t test_vlseg6e32_v_u32mf2x6_mu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_v_u32mf2x6_mu(vm, vd, rs1, vl); } -vuint32m1x6_t test_vlseg6e32_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg6e32_v_u32m1x6_mu(mask, maskedoff_tuple, base, vl); +vuint32m1x6_t test_vlseg6e32_v_u32m1x6_mu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_v_u32m1x6_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg6e32\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg6e32ff.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg6e32ff.c index 17fda1d6c..73b43b580 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg6e32ff.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg6e32ff.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_v_f32mf2x6_tu(maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_tu(vfloat32mf2x6_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_v_f32mf2x6_tu(vd, rs1, new_vl, vl); } -vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_v_f32m1x6_tu(maskedoff_tuple, base, new_vl, vl); +vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_tu(vfloat32m1x6_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_v_f32m1x6_tu(vd, rs1, new_vl, vl); } -vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_v_i32mf2x6_tu(maskedoff_tuple, base, new_vl, vl); +vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_tu(vint32mf2x6_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_v_i32mf2x6_tu(vd, rs1, new_vl, vl); } -vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_v_i32m1x6_tu(maskedoff_tuple, base, new_vl, vl); +vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_tu(vint32m1x6_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_v_i32m1x6_tu(vd, rs1, new_vl, vl); } -vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_v_u32mf2x6_tu(maskedoff_tuple, base, new_vl, vl); +vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_tu(vuint32mf2x6_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_v_u32mf2x6_tu(vd, rs1, new_vl, vl); } -vuint32m1x6_t test_vlseg6e32ff_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_v_u32m1x6_tu(maskedoff_tuple, base, new_vl, vl); +vuint32m1x6_t test_vlseg6e32ff_v_u32m1x6_tu(vuint32m1x6_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_v_u32m1x6_tu(vd, rs1, new_vl, vl); } -vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_v_f32mf2x6_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_tum(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_v_f32mf2x6_tum(vm, vd, rs1, new_vl, vl); } -vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_v_f32m1x6_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_tum(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_v_f32m1x6_tum(vm, vd, rs1, new_vl, vl); } -vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_v_i32mf2x6_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_tum(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_v_i32mf2x6_tum(vm, vd, rs1, new_vl, vl); } -vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_v_i32m1x6_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_tum(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_v_i32m1x6_tum(vm, vd, rs1, new_vl, vl); } -vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_v_u32mf2x6_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_tum(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_v_u32mf2x6_tum(vm, vd, rs1, new_vl, vl); } -vuint32m1x6_t test_vlseg6e32ff_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_v_u32m1x6_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m1x6_t test_vlseg6e32ff_v_u32m1x6_tum(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_v_u32m1x6_tum(vm, vd, rs1, new_vl, vl); } -vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_v_f32mf2x6_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_tumu(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_v_f32mf2x6_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_v_f32m1x6_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_tumu(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_v_f32m1x6_tumu(vm, vd, rs1, new_vl, vl); } -vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_v_i32mf2x6_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_tumu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_v_i32mf2x6_tumu(vm, vd, rs1, new_vl, vl); } -vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_v_i32m1x6_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_tumu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_v_i32m1x6_tumu(vm, vd, rs1, new_vl, vl); } -vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_v_u32mf2x6_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_tumu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_v_u32mf2x6_tumu(vm, vd, rs1, new_vl, vl); } -vuint32m1x6_t test_vlseg6e32ff_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_v_u32m1x6_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m1x6_t test_vlseg6e32ff_v_u32m1x6_tumu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_v_u32m1x6_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_v_f32mf2x6_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_mu(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_v_f32mf2x6_mu(vm, vd, rs1, new_vl, vl); } -vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_v_f32m1x6_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_mu(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_v_f32m1x6_mu(vm, vd, rs1, new_vl, vl); } -vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_v_i32mf2x6_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_mu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_v_i32mf2x6_mu(vm, vd, rs1, new_vl, vl); } -vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_v_i32m1x6_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_mu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_v_i32m1x6_mu(vm, vd, rs1, new_vl, vl); } -vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_v_u32mf2x6_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_mu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_v_u32mf2x6_mu(vm, vd, rs1, new_vl, vl); } -vuint32m1x6_t test_vlseg6e32ff_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_v_u32m1x6_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m1x6_t test_vlseg6e32ff_v_u32m1x6_mu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_v_u32m1x6_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg6e32ff\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg6e64.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg6e64.c index 62fad8dba..d5eba301e 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg6e64.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg6e64.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x6_t test_vlseg6e64_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg6e64_v_f64m1x6_tu(maskedoff_tuple, base, vl); +vfloat64m1x6_t test_vlseg6e64_v_f64m1x6_tu(vfloat64m1x6_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg6e64_v_f64m1x6_tu(vd, rs1, vl); } -vint64m1x6_t test_vlseg6e64_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg6e64_v_i64m1x6_tu(maskedoff_tuple, base, vl); +vint64m1x6_t test_vlseg6e64_v_i64m1x6_tu(vint64m1x6_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg6e64_v_i64m1x6_tu(vd, rs1, vl); } -vuint64m1x6_t test_vlseg6e64_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg6e64_v_u64m1x6_tu(maskedoff_tuple, base, vl); +vuint64m1x6_t test_vlseg6e64_v_u64m1x6_tu(vuint64m1x6_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg6e64_v_u64m1x6_tu(vd, rs1, vl); } -vfloat64m1x6_t test_vlseg6e64_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg6e64_v_f64m1x6_tum(mask, maskedoff_tuple, base, vl); +vfloat64m1x6_t test_vlseg6e64_v_f64m1x6_tum(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg6e64_v_f64m1x6_tum(vm, vd, rs1, vl); } -vint64m1x6_t test_vlseg6e64_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg6e64_v_i64m1x6_tum(mask, maskedoff_tuple, base, vl); +vint64m1x6_t test_vlseg6e64_v_i64m1x6_tum(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg6e64_v_i64m1x6_tum(vm, vd, rs1, vl); } -vuint64m1x6_t test_vlseg6e64_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg6e64_v_u64m1x6_tum(mask, maskedoff_tuple, base, vl); +vuint64m1x6_t test_vlseg6e64_v_u64m1x6_tum(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg6e64_v_u64m1x6_tum(vm, vd, rs1, vl); } -vfloat64m1x6_t test_vlseg6e64_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg6e64_v_f64m1x6_tumu(mask, maskedoff_tuple, base, vl); +vfloat64m1x6_t test_vlseg6e64_v_f64m1x6_tumu(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg6e64_v_f64m1x6_tumu(vm, vd, rs1, vl); } -vint64m1x6_t test_vlseg6e64_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg6e64_v_i64m1x6_tumu(mask, maskedoff_tuple, base, vl); +vint64m1x6_t test_vlseg6e64_v_i64m1x6_tumu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg6e64_v_i64m1x6_tumu(vm, vd, rs1, vl); } -vuint64m1x6_t test_vlseg6e64_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg6e64_v_u64m1x6_tumu(mask, maskedoff_tuple, base, vl); +vuint64m1x6_t test_vlseg6e64_v_u64m1x6_tumu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg6e64_v_u64m1x6_tumu(vm, vd, rs1, vl); } -vfloat64m1x6_t test_vlseg6e64_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg6e64_v_f64m1x6_mu(mask, maskedoff_tuple, base, vl); +vfloat64m1x6_t test_vlseg6e64_v_f64m1x6_mu(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg6e64_v_f64m1x6_mu(vm, vd, rs1, vl); } -vint64m1x6_t test_vlseg6e64_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg6e64_v_i64m1x6_mu(mask, maskedoff_tuple, base, vl); +vint64m1x6_t test_vlseg6e64_v_i64m1x6_mu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg6e64_v_i64m1x6_mu(vm, vd, rs1, vl); } -vuint64m1x6_t test_vlseg6e64_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg6e64_v_u64m1x6_mu(mask, maskedoff_tuple, base, vl); +vuint64m1x6_t test_vlseg6e64_v_u64m1x6_mu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg6e64_v_u64m1x6_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg6e64\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg6e64ff.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg6e64ff.c index 4e9a822e7..db789872a 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg6e64ff.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg6e64ff.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e64ff_v_f64m1x6_tu(maskedoff_tuple, base, new_vl, vl); +vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_tu(vfloat64m1x6_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e64ff_v_f64m1x6_tu(vd, rs1, new_vl, vl); } -vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e64ff_v_i64m1x6_tu(maskedoff_tuple, base, new_vl, vl); +vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_tu(vint64m1x6_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e64ff_v_i64m1x6_tu(vd, rs1, new_vl, vl); } -vuint64m1x6_t test_vlseg6e64ff_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e64ff_v_u64m1x6_tu(maskedoff_tuple, base, new_vl, vl); +vuint64m1x6_t test_vlseg6e64ff_v_u64m1x6_tu(vuint64m1x6_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e64ff_v_u64m1x6_tu(vd, rs1, new_vl, vl); } -vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e64ff_v_f64m1x6_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_tum(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e64ff_v_f64m1x6_tum(vm, vd, rs1, new_vl, vl); } -vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e64ff_v_i64m1x6_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_tum(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e64ff_v_i64m1x6_tum(vm, vd, rs1, new_vl, vl); } -vuint64m1x6_t test_vlseg6e64ff_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e64ff_v_u64m1x6_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m1x6_t test_vlseg6e64ff_v_u64m1x6_tum(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e64ff_v_u64m1x6_tum(vm, vd, rs1, new_vl, vl); } -vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e64ff_v_f64m1x6_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_tumu(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e64ff_v_f64m1x6_tumu(vm, vd, rs1, new_vl, vl); } -vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e64ff_v_i64m1x6_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_tumu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e64ff_v_i64m1x6_tumu(vm, vd, rs1, new_vl, vl); } -vuint64m1x6_t test_vlseg6e64ff_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e64ff_v_u64m1x6_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m1x6_t test_vlseg6e64ff_v_u64m1x6_tumu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e64ff_v_u64m1x6_tumu(vm, vd, rs1, new_vl, vl); } -vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e64ff_v_f64m1x6_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_mu(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e64ff_v_f64m1x6_mu(vm, vd, rs1, new_vl, vl); } -vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e64ff_v_i64m1x6_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_mu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e64ff_v_i64m1x6_mu(vm, vd, rs1, new_vl, vl); } -vuint64m1x6_t test_vlseg6e64ff_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e64ff_v_u64m1x6_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m1x6_t test_vlseg6e64ff_v_u64m1x6_mu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e64ff_v_u64m1x6_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg6e64ff\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg6e8.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg6e8.c index 05a260830..94bbf3bc2 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg6e8.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg6e8.c @@ -6,132 +6,132 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x6_t test_vlseg6e8_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_i8mf8x6_tu(maskedoff_tuple, base, vl); +vint8mf8x6_t test_vlseg6e8_v_i8mf8x6_tu(vint8mf8x6_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_i8mf8x6_tu(vd, rs1, vl); } -vint8mf4x6_t test_vlseg6e8_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_i8mf4x6_tu(maskedoff_tuple, base, vl); +vint8mf4x6_t test_vlseg6e8_v_i8mf4x6_tu(vint8mf4x6_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_i8mf4x6_tu(vd, rs1, vl); } -vint8mf2x6_t test_vlseg6e8_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_i8mf2x6_tu(maskedoff_tuple, base, vl); +vint8mf2x6_t test_vlseg6e8_v_i8mf2x6_tu(vint8mf2x6_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_i8mf2x6_tu(vd, rs1, vl); } -vint8m1x6_t test_vlseg6e8_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_i8m1x6_tu(maskedoff_tuple, base, vl); +vint8m1x6_t test_vlseg6e8_v_i8m1x6_tu(vint8m1x6_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_i8m1x6_tu(vd, rs1, vl); } -vuint8mf8x6_t test_vlseg6e8_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_u8mf8x6_tu(maskedoff_tuple, base, vl); +vuint8mf8x6_t test_vlseg6e8_v_u8mf8x6_tu(vuint8mf8x6_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_u8mf8x6_tu(vd, rs1, vl); } -vuint8mf4x6_t test_vlseg6e8_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_u8mf4x6_tu(maskedoff_tuple, base, vl); +vuint8mf4x6_t test_vlseg6e8_v_u8mf4x6_tu(vuint8mf4x6_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_u8mf4x6_tu(vd, rs1, vl); } -vuint8mf2x6_t test_vlseg6e8_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_u8mf2x6_tu(maskedoff_tuple, base, vl); +vuint8mf2x6_t test_vlseg6e8_v_u8mf2x6_tu(vuint8mf2x6_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_u8mf2x6_tu(vd, rs1, vl); } -vuint8m1x6_t test_vlseg6e8_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_u8m1x6_tu(maskedoff_tuple, base, vl); +vuint8m1x6_t test_vlseg6e8_v_u8m1x6_tu(vuint8m1x6_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_u8m1x6_tu(vd, rs1, vl); } -vint8mf8x6_t test_vlseg6e8_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_i8mf8x6_tum(mask, maskedoff_tuple, base, vl); +vint8mf8x6_t test_vlseg6e8_v_i8mf8x6_tum(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_i8mf8x6_tum(vm, vd, rs1, vl); } -vint8mf4x6_t test_vlseg6e8_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_i8mf4x6_tum(mask, maskedoff_tuple, base, vl); +vint8mf4x6_t test_vlseg6e8_v_i8mf4x6_tum(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_i8mf4x6_tum(vm, vd, rs1, vl); } -vint8mf2x6_t test_vlseg6e8_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_i8mf2x6_tum(mask, maskedoff_tuple, base, vl); +vint8mf2x6_t test_vlseg6e8_v_i8mf2x6_tum(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_i8mf2x6_tum(vm, vd, rs1, vl); } -vint8m1x6_t test_vlseg6e8_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_i8m1x6_tum(mask, maskedoff_tuple, base, vl); +vint8m1x6_t test_vlseg6e8_v_i8m1x6_tum(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_i8m1x6_tum(vm, vd, rs1, vl); } -vuint8mf8x6_t test_vlseg6e8_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_u8mf8x6_tum(mask, maskedoff_tuple, base, vl); +vuint8mf8x6_t test_vlseg6e8_v_u8mf8x6_tum(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_u8mf8x6_tum(vm, vd, rs1, vl); } -vuint8mf4x6_t test_vlseg6e8_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_u8mf4x6_tum(mask, maskedoff_tuple, base, vl); +vuint8mf4x6_t test_vlseg6e8_v_u8mf4x6_tum(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_u8mf4x6_tum(vm, vd, rs1, vl); } -vuint8mf2x6_t test_vlseg6e8_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_u8mf2x6_tum(mask, maskedoff_tuple, base, vl); +vuint8mf2x6_t test_vlseg6e8_v_u8mf2x6_tum(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_u8mf2x6_tum(vm, vd, rs1, vl); } -vuint8m1x6_t test_vlseg6e8_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_u8m1x6_tum(mask, maskedoff_tuple, base, vl); +vuint8m1x6_t test_vlseg6e8_v_u8m1x6_tum(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_u8m1x6_tum(vm, vd, rs1, vl); } -vint8mf8x6_t test_vlseg6e8_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_i8mf8x6_tumu(mask, maskedoff_tuple, base, vl); +vint8mf8x6_t test_vlseg6e8_v_i8mf8x6_tumu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_i8mf8x6_tumu(vm, vd, rs1, vl); } -vint8mf4x6_t test_vlseg6e8_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_i8mf4x6_tumu(mask, maskedoff_tuple, base, vl); +vint8mf4x6_t test_vlseg6e8_v_i8mf4x6_tumu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_i8mf4x6_tumu(vm, vd, rs1, vl); } -vint8mf2x6_t test_vlseg6e8_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_i8mf2x6_tumu(mask, maskedoff_tuple, base, vl); +vint8mf2x6_t test_vlseg6e8_v_i8mf2x6_tumu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_i8mf2x6_tumu(vm, vd, rs1, vl); } -vint8m1x6_t test_vlseg6e8_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_i8m1x6_tumu(mask, maskedoff_tuple, base, vl); +vint8m1x6_t test_vlseg6e8_v_i8m1x6_tumu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_i8m1x6_tumu(vm, vd, rs1, vl); } -vuint8mf8x6_t test_vlseg6e8_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_u8mf8x6_tumu(mask, maskedoff_tuple, base, vl); +vuint8mf8x6_t test_vlseg6e8_v_u8mf8x6_tumu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_u8mf8x6_tumu(vm, vd, rs1, vl); } -vuint8mf4x6_t test_vlseg6e8_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_u8mf4x6_tumu(mask, maskedoff_tuple, base, vl); +vuint8mf4x6_t test_vlseg6e8_v_u8mf4x6_tumu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_u8mf4x6_tumu(vm, vd, rs1, vl); } -vuint8mf2x6_t test_vlseg6e8_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_u8mf2x6_tumu(mask, maskedoff_tuple, base, vl); +vuint8mf2x6_t test_vlseg6e8_v_u8mf2x6_tumu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_u8mf2x6_tumu(vm, vd, rs1, vl); } -vuint8m1x6_t test_vlseg6e8_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_u8m1x6_tumu(mask, maskedoff_tuple, base, vl); +vuint8m1x6_t test_vlseg6e8_v_u8m1x6_tumu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_u8m1x6_tumu(vm, vd, rs1, vl); } -vint8mf8x6_t test_vlseg6e8_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_i8mf8x6_mu(mask, maskedoff_tuple, base, vl); +vint8mf8x6_t test_vlseg6e8_v_i8mf8x6_mu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_i8mf8x6_mu(vm, vd, rs1, vl); } -vint8mf4x6_t test_vlseg6e8_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_i8mf4x6_mu(mask, maskedoff_tuple, base, vl); +vint8mf4x6_t test_vlseg6e8_v_i8mf4x6_mu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_i8mf4x6_mu(vm, vd, rs1, vl); } -vint8mf2x6_t test_vlseg6e8_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_i8mf2x6_mu(mask, maskedoff_tuple, base, vl); +vint8mf2x6_t test_vlseg6e8_v_i8mf2x6_mu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_i8mf2x6_mu(vm, vd, rs1, vl); } -vint8m1x6_t test_vlseg6e8_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_i8m1x6_mu(mask, maskedoff_tuple, base, vl); +vint8m1x6_t test_vlseg6e8_v_i8m1x6_mu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_i8m1x6_mu(vm, vd, rs1, vl); } -vuint8mf8x6_t test_vlseg6e8_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_u8mf8x6_mu(mask, maskedoff_tuple, base, vl); +vuint8mf8x6_t test_vlseg6e8_v_u8mf8x6_mu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_u8mf8x6_mu(vm, vd, rs1, vl); } -vuint8mf4x6_t test_vlseg6e8_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_u8mf4x6_mu(mask, maskedoff_tuple, base, vl); +vuint8mf4x6_t test_vlseg6e8_v_u8mf4x6_mu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_u8mf4x6_mu(vm, vd, rs1, vl); } -vuint8mf2x6_t test_vlseg6e8_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_u8mf2x6_mu(mask, maskedoff_tuple, base, vl); +vuint8mf2x6_t test_vlseg6e8_v_u8mf2x6_mu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_u8mf2x6_mu(vm, vd, rs1, vl); } -vuint8m1x6_t test_vlseg6e8_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg6e8_v_u8m1x6_mu(mask, maskedoff_tuple, base, vl); +vuint8m1x6_t test_vlseg6e8_v_u8m1x6_mu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_v_u8m1x6_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg6e8\.[ivxfswum.]+\s+} 32 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg6e8ff.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg6e8ff.c index 2a660ef90..e7bf5ec75 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg6e8ff.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg6e8ff.c @@ -6,132 +6,132 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_i8mf8x6_tu(maskedoff_tuple, base, new_vl, vl); +vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_tu(vint8mf8x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_i8mf8x6_tu(vd, rs1, new_vl, vl); } -vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_i8mf4x6_tu(maskedoff_tuple, base, new_vl, vl); +vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_tu(vint8mf4x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_i8mf4x6_tu(vd, rs1, new_vl, vl); } -vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_i8mf2x6_tu(maskedoff_tuple, base, new_vl, vl); +vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_tu(vint8mf2x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_i8mf2x6_tu(vd, rs1, new_vl, vl); } -vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_i8m1x6_tu(maskedoff_tuple, base, new_vl, vl); +vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_tu(vint8m1x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_i8m1x6_tu(vd, rs1, new_vl, vl); } -vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_u8mf8x6_tu(maskedoff_tuple, base, new_vl, vl); +vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_tu(vuint8mf8x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_u8mf8x6_tu(vd, rs1, new_vl, vl); } -vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_u8mf4x6_tu(maskedoff_tuple, base, new_vl, vl); +vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_tu(vuint8mf4x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_u8mf4x6_tu(vd, rs1, new_vl, vl); } -vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_u8mf2x6_tu(maskedoff_tuple, base, new_vl, vl); +vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_tu(vuint8mf2x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_u8mf2x6_tu(vd, rs1, new_vl, vl); } -vuint8m1x6_t test_vlseg6e8ff_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_u8m1x6_tu(maskedoff_tuple, base, new_vl, vl); +vuint8m1x6_t test_vlseg6e8ff_v_u8m1x6_tu(vuint8m1x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_u8m1x6_tu(vd, rs1, new_vl, vl); } -vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_i8mf8x6_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_tum(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_i8mf8x6_tum(vm, vd, rs1, new_vl, vl); } -vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_i8mf4x6_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_tum(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_i8mf4x6_tum(vm, vd, rs1, new_vl, vl); } -vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_i8mf2x6_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_tum(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_i8mf2x6_tum(vm, vd, rs1, new_vl, vl); } -vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_i8m1x6_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_tum(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_i8m1x6_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_u8mf8x6_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_tum(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_u8mf8x6_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_u8mf4x6_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_tum(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_u8mf4x6_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_u8mf2x6_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_tum(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_u8mf2x6_tum(vm, vd, rs1, new_vl, vl); } -vuint8m1x6_t test_vlseg6e8ff_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_u8m1x6_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m1x6_t test_vlseg6e8ff_v_u8m1x6_tum(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_u8m1x6_tum(vm, vd, rs1, new_vl, vl); } -vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_i8mf8x6_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_tumu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_i8mf8x6_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_i8mf4x6_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_tumu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_i8mf4x6_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_i8mf2x6_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_tumu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_i8mf2x6_tumu(vm, vd, rs1, new_vl, vl); } -vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_i8m1x6_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_tumu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_i8m1x6_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_u8mf8x6_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_tumu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_u8mf8x6_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_u8mf4x6_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_tumu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_u8mf4x6_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_u8mf2x6_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_tumu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_u8mf2x6_tumu(vm, vd, rs1, new_vl, vl); } -vuint8m1x6_t test_vlseg6e8ff_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_u8m1x6_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m1x6_t test_vlseg6e8ff_v_u8m1x6_tumu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_u8m1x6_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_i8mf8x6_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_mu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_i8mf8x6_mu(vm, vd, rs1, new_vl, vl); } -vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_i8mf4x6_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_mu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_i8mf4x6_mu(vm, vd, rs1, new_vl, vl); } -vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_i8mf2x6_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_mu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_i8mf2x6_mu(vm, vd, rs1, new_vl, vl); } -vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_i8m1x6_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_mu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_i8m1x6_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_u8mf8x6_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_mu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_u8mf8x6_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_u8mf4x6_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_mu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_u8mf4x6_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_u8mf2x6_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_mu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_u8mf2x6_mu(vm, vd, rs1, new_vl, vl); } -vuint8m1x6_t test_vlseg6e8ff_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_v_u8m1x6_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m1x6_t test_vlseg6e8ff_v_u8m1x6_mu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_v_u8m1x6_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg6e8ff\.[ivxfswum.]+\s+} 32 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg7e16.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg7e16.c index a05df9865..f7dc685a0 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg7e16.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg7e16.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x7_t test_vlseg7e16_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_f16mf4x7_tu(maskedoff_tuple, base, vl); +vfloat16mf4x7_t test_vlseg7e16_v_f16mf4x7_tu(vfloat16mf4x7_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_f16mf4x7_tu(vd, rs1, vl); } -vfloat16mf2x7_t test_vlseg7e16_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_f16mf2x7_tu(maskedoff_tuple, base, vl); +vfloat16mf2x7_t test_vlseg7e16_v_f16mf2x7_tu(vfloat16mf2x7_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_f16mf2x7_tu(vd, rs1, vl); } -vfloat16m1x7_t test_vlseg7e16_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_f16m1x7_tu(maskedoff_tuple, base, vl); +vfloat16m1x7_t test_vlseg7e16_v_f16m1x7_tu(vfloat16m1x7_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_f16m1x7_tu(vd, rs1, vl); } -vint16mf4x7_t test_vlseg7e16_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_i16mf4x7_tu(maskedoff_tuple, base, vl); +vint16mf4x7_t test_vlseg7e16_v_i16mf4x7_tu(vint16mf4x7_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_i16mf4x7_tu(vd, rs1, vl); } -vint16mf2x7_t test_vlseg7e16_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_i16mf2x7_tu(maskedoff_tuple, base, vl); +vint16mf2x7_t test_vlseg7e16_v_i16mf2x7_tu(vint16mf2x7_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_i16mf2x7_tu(vd, rs1, vl); } -vint16m1x7_t test_vlseg7e16_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_i16m1x7_tu(maskedoff_tuple, base, vl); +vint16m1x7_t test_vlseg7e16_v_i16m1x7_tu(vint16m1x7_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_i16m1x7_tu(vd, rs1, vl); } -vuint16mf4x7_t test_vlseg7e16_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_u16mf4x7_tu(maskedoff_tuple, base, vl); +vuint16mf4x7_t test_vlseg7e16_v_u16mf4x7_tu(vuint16mf4x7_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_u16mf4x7_tu(vd, rs1, vl); } -vuint16mf2x7_t test_vlseg7e16_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_u16mf2x7_tu(maskedoff_tuple, base, vl); +vuint16mf2x7_t test_vlseg7e16_v_u16mf2x7_tu(vuint16mf2x7_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_u16mf2x7_tu(vd, rs1, vl); } -vuint16m1x7_t test_vlseg7e16_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_u16m1x7_tu(maskedoff_tuple, base, vl); +vuint16m1x7_t test_vlseg7e16_v_u16m1x7_tu(vuint16m1x7_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_u16m1x7_tu(vd, rs1, vl); } -vfloat16mf4x7_t test_vlseg7e16_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_f16mf4x7_tum(mask, maskedoff_tuple, base, vl); +vfloat16mf4x7_t test_vlseg7e16_v_f16mf4x7_tum(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_f16mf4x7_tum(vm, vd, rs1, vl); } -vfloat16mf2x7_t test_vlseg7e16_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_f16mf2x7_tum(mask, maskedoff_tuple, base, vl); +vfloat16mf2x7_t test_vlseg7e16_v_f16mf2x7_tum(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_f16mf2x7_tum(vm, vd, rs1, vl); } -vfloat16m1x7_t test_vlseg7e16_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_f16m1x7_tum(mask, maskedoff_tuple, base, vl); +vfloat16m1x7_t test_vlseg7e16_v_f16m1x7_tum(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_f16m1x7_tum(vm, vd, rs1, vl); } -vint16mf4x7_t test_vlseg7e16_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_i16mf4x7_tum(mask, maskedoff_tuple, base, vl); +vint16mf4x7_t test_vlseg7e16_v_i16mf4x7_tum(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_i16mf4x7_tum(vm, vd, rs1, vl); } -vint16mf2x7_t test_vlseg7e16_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_i16mf2x7_tum(mask, maskedoff_tuple, base, vl); +vint16mf2x7_t test_vlseg7e16_v_i16mf2x7_tum(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_i16mf2x7_tum(vm, vd, rs1, vl); } -vint16m1x7_t test_vlseg7e16_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_i16m1x7_tum(mask, maskedoff_tuple, base, vl); +vint16m1x7_t test_vlseg7e16_v_i16m1x7_tum(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_i16m1x7_tum(vm, vd, rs1, vl); } -vuint16mf4x7_t test_vlseg7e16_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_u16mf4x7_tum(mask, maskedoff_tuple, base, vl); +vuint16mf4x7_t test_vlseg7e16_v_u16mf4x7_tum(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_u16mf4x7_tum(vm, vd, rs1, vl); } -vuint16mf2x7_t test_vlseg7e16_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_u16mf2x7_tum(mask, maskedoff_tuple, base, vl); +vuint16mf2x7_t test_vlseg7e16_v_u16mf2x7_tum(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_u16mf2x7_tum(vm, vd, rs1, vl); } -vuint16m1x7_t test_vlseg7e16_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_u16m1x7_tum(mask, maskedoff_tuple, base, vl); +vuint16m1x7_t test_vlseg7e16_v_u16m1x7_tum(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_u16m1x7_tum(vm, vd, rs1, vl); } -vfloat16mf4x7_t test_vlseg7e16_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_f16mf4x7_tumu(mask, maskedoff_tuple, base, vl); +vfloat16mf4x7_t test_vlseg7e16_v_f16mf4x7_tumu(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_f16mf4x7_tumu(vm, vd, rs1, vl); } -vfloat16mf2x7_t test_vlseg7e16_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_f16mf2x7_tumu(mask, maskedoff_tuple, base, vl); +vfloat16mf2x7_t test_vlseg7e16_v_f16mf2x7_tumu(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_f16mf2x7_tumu(vm, vd, rs1, vl); } -vfloat16m1x7_t test_vlseg7e16_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_f16m1x7_tumu(mask, maskedoff_tuple, base, vl); +vfloat16m1x7_t test_vlseg7e16_v_f16m1x7_tumu(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_f16m1x7_tumu(vm, vd, rs1, vl); } -vint16mf4x7_t test_vlseg7e16_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_i16mf4x7_tumu(mask, maskedoff_tuple, base, vl); +vint16mf4x7_t test_vlseg7e16_v_i16mf4x7_tumu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_i16mf4x7_tumu(vm, vd, rs1, vl); } -vint16mf2x7_t test_vlseg7e16_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_i16mf2x7_tumu(mask, maskedoff_tuple, base, vl); +vint16mf2x7_t test_vlseg7e16_v_i16mf2x7_tumu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_i16mf2x7_tumu(vm, vd, rs1, vl); } -vint16m1x7_t test_vlseg7e16_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_i16m1x7_tumu(mask, maskedoff_tuple, base, vl); +vint16m1x7_t test_vlseg7e16_v_i16m1x7_tumu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_i16m1x7_tumu(vm, vd, rs1, vl); } -vuint16mf4x7_t test_vlseg7e16_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_u16mf4x7_tumu(mask, maskedoff_tuple, base, vl); +vuint16mf4x7_t test_vlseg7e16_v_u16mf4x7_tumu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_u16mf4x7_tumu(vm, vd, rs1, vl); } -vuint16mf2x7_t test_vlseg7e16_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_u16mf2x7_tumu(mask, maskedoff_tuple, base, vl); +vuint16mf2x7_t test_vlseg7e16_v_u16mf2x7_tumu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_u16mf2x7_tumu(vm, vd, rs1, vl); } -vuint16m1x7_t test_vlseg7e16_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_u16m1x7_tumu(mask, maskedoff_tuple, base, vl); +vuint16m1x7_t test_vlseg7e16_v_u16m1x7_tumu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_u16m1x7_tumu(vm, vd, rs1, vl); } -vfloat16mf4x7_t test_vlseg7e16_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_f16mf4x7_mu(mask, maskedoff_tuple, base, vl); +vfloat16mf4x7_t test_vlseg7e16_v_f16mf4x7_mu(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_f16mf4x7_mu(vm, vd, rs1, vl); } -vfloat16mf2x7_t test_vlseg7e16_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_f16mf2x7_mu(mask, maskedoff_tuple, base, vl); +vfloat16mf2x7_t test_vlseg7e16_v_f16mf2x7_mu(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_f16mf2x7_mu(vm, vd, rs1, vl); } -vfloat16m1x7_t test_vlseg7e16_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_f16m1x7_mu(mask, maskedoff_tuple, base, vl); +vfloat16m1x7_t test_vlseg7e16_v_f16m1x7_mu(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_f16m1x7_mu(vm, vd, rs1, vl); } -vint16mf4x7_t test_vlseg7e16_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_i16mf4x7_mu(mask, maskedoff_tuple, base, vl); +vint16mf4x7_t test_vlseg7e16_v_i16mf4x7_mu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_i16mf4x7_mu(vm, vd, rs1, vl); } -vint16mf2x7_t test_vlseg7e16_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_i16mf2x7_mu(mask, maskedoff_tuple, base, vl); +vint16mf2x7_t test_vlseg7e16_v_i16mf2x7_mu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_i16mf2x7_mu(vm, vd, rs1, vl); } -vint16m1x7_t test_vlseg7e16_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_i16m1x7_mu(mask, maskedoff_tuple, base, vl); +vint16m1x7_t test_vlseg7e16_v_i16m1x7_mu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_i16m1x7_mu(vm, vd, rs1, vl); } -vuint16mf4x7_t test_vlseg7e16_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_u16mf4x7_mu(mask, maskedoff_tuple, base, vl); +vuint16mf4x7_t test_vlseg7e16_v_u16mf4x7_mu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_u16mf4x7_mu(vm, vd, rs1, vl); } -vuint16mf2x7_t test_vlseg7e16_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_u16mf2x7_mu(mask, maskedoff_tuple, base, vl); +vuint16mf2x7_t test_vlseg7e16_v_u16mf2x7_mu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_u16mf2x7_mu(vm, vd, rs1, vl); } -vuint16m1x7_t test_vlseg7e16_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg7e16_v_u16m1x7_mu(mask, maskedoff_tuple, base, vl); +vuint16m1x7_t test_vlseg7e16_v_u16m1x7_mu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_v_u16m1x7_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg7e16\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg7e16ff.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg7e16ff.c index d1bd43d6d..02d6dc230 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg7e16ff.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg7e16ff.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_f16mf4x7_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_tu(vfloat16mf4x7_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_f16mf4x7_tu(vd, rs1, new_vl, vl); } -vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_f16mf2x7_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_tu(vfloat16mf2x7_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_f16mf2x7_tu(vd, rs1, new_vl, vl); } -vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_f16m1x7_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_tu(vfloat16m1x7_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_f16m1x7_tu(vd, rs1, new_vl, vl); } -vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_i16mf4x7_tu(maskedoff_tuple, base, new_vl, vl); +vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_tu(vint16mf4x7_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_i16mf4x7_tu(vd, rs1, new_vl, vl); } -vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_i16mf2x7_tu(maskedoff_tuple, base, new_vl, vl); +vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_tu(vint16mf2x7_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_i16mf2x7_tu(vd, rs1, new_vl, vl); } -vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_i16m1x7_tu(maskedoff_tuple, base, new_vl, vl); +vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_tu(vint16m1x7_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_i16m1x7_tu(vd, rs1, new_vl, vl); } -vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_u16mf4x7_tu(maskedoff_tuple, base, new_vl, vl); +vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_tu(vuint16mf4x7_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_u16mf4x7_tu(vd, rs1, new_vl, vl); } -vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_u16mf2x7_tu(maskedoff_tuple, base, new_vl, vl); +vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_tu(vuint16mf2x7_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_u16mf2x7_tu(vd, rs1, new_vl, vl); } -vuint16m1x7_t test_vlseg7e16ff_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_u16m1x7_tu(maskedoff_tuple, base, new_vl, vl); +vuint16m1x7_t test_vlseg7e16ff_v_u16m1x7_tu(vuint16m1x7_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_u16m1x7_tu(vd, rs1, new_vl, vl); } -vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_f16mf4x7_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_tum(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_f16mf4x7_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_f16mf2x7_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_tum(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_f16mf2x7_tum(vm, vd, rs1, new_vl, vl); } -vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_f16m1x7_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_tum(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_f16m1x7_tum(vm, vd, rs1, new_vl, vl); } -vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_i16mf4x7_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_tum(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_i16mf4x7_tum(vm, vd, rs1, new_vl, vl); } -vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_i16mf2x7_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_tum(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_i16mf2x7_tum(vm, vd, rs1, new_vl, vl); } -vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_i16m1x7_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_tum(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_i16m1x7_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_u16mf4x7_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_tum(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_u16mf4x7_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_u16mf2x7_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_tum(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_u16mf2x7_tum(vm, vd, rs1, new_vl, vl); } -vuint16m1x7_t test_vlseg7e16ff_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_u16m1x7_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m1x7_t test_vlseg7e16ff_v_u16m1x7_tum(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_u16m1x7_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_f16mf4x7_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_tumu(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_f16mf4x7_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_f16mf2x7_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_tumu(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_f16mf2x7_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_f16m1x7_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_tumu(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_f16m1x7_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_i16mf4x7_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_tumu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_i16mf4x7_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_i16mf2x7_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_tumu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_i16mf2x7_tumu(vm, vd, rs1, new_vl, vl); } -vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_i16m1x7_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_tumu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_i16m1x7_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_u16mf4x7_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_tumu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_u16mf4x7_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_u16mf2x7_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_tumu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_u16mf2x7_tumu(vm, vd, rs1, new_vl, vl); } -vuint16m1x7_t test_vlseg7e16ff_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_u16m1x7_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m1x7_t test_vlseg7e16ff_v_u16m1x7_tumu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_u16m1x7_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_f16mf4x7_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_mu(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_f16mf4x7_mu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_f16mf2x7_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_mu(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_f16mf2x7_mu(vm, vd, rs1, new_vl, vl); } -vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_f16m1x7_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_mu(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_f16m1x7_mu(vm, vd, rs1, new_vl, vl); } -vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_i16mf4x7_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_mu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_i16mf4x7_mu(vm, vd, rs1, new_vl, vl); } -vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_i16mf2x7_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_mu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_i16mf2x7_mu(vm, vd, rs1, new_vl, vl); } -vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_i16m1x7_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_mu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_i16m1x7_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_u16mf4x7_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_mu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_u16mf4x7_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_u16mf2x7_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_mu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_u16mf2x7_mu(vm, vd, rs1, new_vl, vl); } -vuint16m1x7_t test_vlseg7e16ff_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_v_u16m1x7_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m1x7_t test_vlseg7e16ff_v_u16m1x7_mu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_u16m1x7_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg7e16ff\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg7e32.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg7e32.c index 26c681a3e..b13bda8ff 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg7e32.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg7e32.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x7_t test_vlseg7e32_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg7e32_v_f32mf2x7_tu(maskedoff_tuple, base, vl); +vfloat32mf2x7_t test_vlseg7e32_v_f32mf2x7_tu(vfloat32mf2x7_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_v_f32mf2x7_tu(vd, rs1, vl); } -vfloat32m1x7_t test_vlseg7e32_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg7e32_v_f32m1x7_tu(maskedoff_tuple, base, vl); +vfloat32m1x7_t test_vlseg7e32_v_f32m1x7_tu(vfloat32m1x7_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_v_f32m1x7_tu(vd, rs1, vl); } -vint32mf2x7_t test_vlseg7e32_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg7e32_v_i32mf2x7_tu(maskedoff_tuple, base, vl); +vint32mf2x7_t test_vlseg7e32_v_i32mf2x7_tu(vint32mf2x7_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_v_i32mf2x7_tu(vd, rs1, vl); } -vint32m1x7_t test_vlseg7e32_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg7e32_v_i32m1x7_tu(maskedoff_tuple, base, vl); +vint32m1x7_t test_vlseg7e32_v_i32m1x7_tu(vint32m1x7_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_v_i32m1x7_tu(vd, rs1, vl); } -vuint32mf2x7_t test_vlseg7e32_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg7e32_v_u32mf2x7_tu(maskedoff_tuple, base, vl); +vuint32mf2x7_t test_vlseg7e32_v_u32mf2x7_tu(vuint32mf2x7_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_v_u32mf2x7_tu(vd, rs1, vl); } -vuint32m1x7_t test_vlseg7e32_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg7e32_v_u32m1x7_tu(maskedoff_tuple, base, vl); +vuint32m1x7_t test_vlseg7e32_v_u32m1x7_tu(vuint32m1x7_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_v_u32m1x7_tu(vd, rs1, vl); } -vfloat32mf2x7_t test_vlseg7e32_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg7e32_v_f32mf2x7_tum(mask, maskedoff_tuple, base, vl); +vfloat32mf2x7_t test_vlseg7e32_v_f32mf2x7_tum(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_v_f32mf2x7_tum(vm, vd, rs1, vl); } -vfloat32m1x7_t test_vlseg7e32_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg7e32_v_f32m1x7_tum(mask, maskedoff_tuple, base, vl); +vfloat32m1x7_t test_vlseg7e32_v_f32m1x7_tum(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_v_f32m1x7_tum(vm, vd, rs1, vl); } -vint32mf2x7_t test_vlseg7e32_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg7e32_v_i32mf2x7_tum(mask, maskedoff_tuple, base, vl); +vint32mf2x7_t test_vlseg7e32_v_i32mf2x7_tum(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_v_i32mf2x7_tum(vm, vd, rs1, vl); } -vint32m1x7_t test_vlseg7e32_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg7e32_v_i32m1x7_tum(mask, maskedoff_tuple, base, vl); +vint32m1x7_t test_vlseg7e32_v_i32m1x7_tum(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_v_i32m1x7_tum(vm, vd, rs1, vl); } -vuint32mf2x7_t test_vlseg7e32_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg7e32_v_u32mf2x7_tum(mask, maskedoff_tuple, base, vl); +vuint32mf2x7_t test_vlseg7e32_v_u32mf2x7_tum(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_v_u32mf2x7_tum(vm, vd, rs1, vl); } -vuint32m1x7_t test_vlseg7e32_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg7e32_v_u32m1x7_tum(mask, maskedoff_tuple, base, vl); +vuint32m1x7_t test_vlseg7e32_v_u32m1x7_tum(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_v_u32m1x7_tum(vm, vd, rs1, vl); } -vfloat32mf2x7_t test_vlseg7e32_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg7e32_v_f32mf2x7_tumu(mask, maskedoff_tuple, base, vl); +vfloat32mf2x7_t test_vlseg7e32_v_f32mf2x7_tumu(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_v_f32mf2x7_tumu(vm, vd, rs1, vl); } -vfloat32m1x7_t test_vlseg7e32_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg7e32_v_f32m1x7_tumu(mask, maskedoff_tuple, base, vl); +vfloat32m1x7_t test_vlseg7e32_v_f32m1x7_tumu(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_v_f32m1x7_tumu(vm, vd, rs1, vl); } -vint32mf2x7_t test_vlseg7e32_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg7e32_v_i32mf2x7_tumu(mask, maskedoff_tuple, base, vl); +vint32mf2x7_t test_vlseg7e32_v_i32mf2x7_tumu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_v_i32mf2x7_tumu(vm, vd, rs1, vl); } -vint32m1x7_t test_vlseg7e32_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg7e32_v_i32m1x7_tumu(mask, maskedoff_tuple, base, vl); +vint32m1x7_t test_vlseg7e32_v_i32m1x7_tumu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_v_i32m1x7_tumu(vm, vd, rs1, vl); } -vuint32mf2x7_t test_vlseg7e32_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg7e32_v_u32mf2x7_tumu(mask, maskedoff_tuple, base, vl); +vuint32mf2x7_t test_vlseg7e32_v_u32mf2x7_tumu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_v_u32mf2x7_tumu(vm, vd, rs1, vl); } -vuint32m1x7_t test_vlseg7e32_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg7e32_v_u32m1x7_tumu(mask, maskedoff_tuple, base, vl); +vuint32m1x7_t test_vlseg7e32_v_u32m1x7_tumu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_v_u32m1x7_tumu(vm, vd, rs1, vl); } -vfloat32mf2x7_t test_vlseg7e32_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg7e32_v_f32mf2x7_mu(mask, maskedoff_tuple, base, vl); +vfloat32mf2x7_t test_vlseg7e32_v_f32mf2x7_mu(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_v_f32mf2x7_mu(vm, vd, rs1, vl); } -vfloat32m1x7_t test_vlseg7e32_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg7e32_v_f32m1x7_mu(mask, maskedoff_tuple, base, vl); +vfloat32m1x7_t test_vlseg7e32_v_f32m1x7_mu(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_v_f32m1x7_mu(vm, vd, rs1, vl); } -vint32mf2x7_t test_vlseg7e32_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg7e32_v_i32mf2x7_mu(mask, maskedoff_tuple, base, vl); +vint32mf2x7_t test_vlseg7e32_v_i32mf2x7_mu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_v_i32mf2x7_mu(vm, vd, rs1, vl); } -vint32m1x7_t test_vlseg7e32_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg7e32_v_i32m1x7_mu(mask, maskedoff_tuple, base, vl); +vint32m1x7_t test_vlseg7e32_v_i32m1x7_mu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_v_i32m1x7_mu(vm, vd, rs1, vl); } -vuint32mf2x7_t test_vlseg7e32_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg7e32_v_u32mf2x7_mu(mask, maskedoff_tuple, base, vl); +vuint32mf2x7_t test_vlseg7e32_v_u32mf2x7_mu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_v_u32mf2x7_mu(vm, vd, rs1, vl); } -vuint32m1x7_t test_vlseg7e32_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg7e32_v_u32m1x7_mu(mask, maskedoff_tuple, base, vl); +vuint32m1x7_t test_vlseg7e32_v_u32m1x7_mu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_v_u32m1x7_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg7e32\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg7e32ff.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg7e32ff.c index e759bf39b..9720abbab 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg7e32ff.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg7e32ff.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_v_f32mf2x7_tu(maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_tu(vfloat32mf2x7_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_v_f32mf2x7_tu(vd, rs1, new_vl, vl); } -vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_v_f32m1x7_tu(maskedoff_tuple, base, new_vl, vl); +vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_tu(vfloat32m1x7_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_v_f32m1x7_tu(vd, rs1, new_vl, vl); } -vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_v_i32mf2x7_tu(maskedoff_tuple, base, new_vl, vl); +vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_tu(vint32mf2x7_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_v_i32mf2x7_tu(vd, rs1, new_vl, vl); } -vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_v_i32m1x7_tu(maskedoff_tuple, base, new_vl, vl); +vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_tu(vint32m1x7_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_v_i32m1x7_tu(vd, rs1, new_vl, vl); } -vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_v_u32mf2x7_tu(maskedoff_tuple, base, new_vl, vl); +vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_tu(vuint32mf2x7_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_v_u32mf2x7_tu(vd, rs1, new_vl, vl); } -vuint32m1x7_t test_vlseg7e32ff_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_v_u32m1x7_tu(maskedoff_tuple, base, new_vl, vl); +vuint32m1x7_t test_vlseg7e32ff_v_u32m1x7_tu(vuint32m1x7_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_v_u32m1x7_tu(vd, rs1, new_vl, vl); } -vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_v_f32mf2x7_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_tum(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_v_f32mf2x7_tum(vm, vd, rs1, new_vl, vl); } -vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_v_f32m1x7_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_tum(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_v_f32m1x7_tum(vm, vd, rs1, new_vl, vl); } -vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_v_i32mf2x7_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_tum(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_v_i32mf2x7_tum(vm, vd, rs1, new_vl, vl); } -vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_v_i32m1x7_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_tum(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_v_i32m1x7_tum(vm, vd, rs1, new_vl, vl); } -vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_v_u32mf2x7_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_tum(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_v_u32mf2x7_tum(vm, vd, rs1, new_vl, vl); } -vuint32m1x7_t test_vlseg7e32ff_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_v_u32m1x7_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m1x7_t test_vlseg7e32ff_v_u32m1x7_tum(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_v_u32m1x7_tum(vm, vd, rs1, new_vl, vl); } -vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_v_f32mf2x7_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_tumu(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_v_f32mf2x7_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_v_f32m1x7_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_tumu(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_v_f32m1x7_tumu(vm, vd, rs1, new_vl, vl); } -vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_v_i32mf2x7_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_tumu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_v_i32mf2x7_tumu(vm, vd, rs1, new_vl, vl); } -vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_v_i32m1x7_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_tumu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_v_i32m1x7_tumu(vm, vd, rs1, new_vl, vl); } -vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_v_u32mf2x7_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_tumu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_v_u32mf2x7_tumu(vm, vd, rs1, new_vl, vl); } -vuint32m1x7_t test_vlseg7e32ff_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_v_u32m1x7_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m1x7_t test_vlseg7e32ff_v_u32m1x7_tumu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_v_u32m1x7_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_v_f32mf2x7_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_mu(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_v_f32mf2x7_mu(vm, vd, rs1, new_vl, vl); } -vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_v_f32m1x7_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_mu(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_v_f32m1x7_mu(vm, vd, rs1, new_vl, vl); } -vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_v_i32mf2x7_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_mu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_v_i32mf2x7_mu(vm, vd, rs1, new_vl, vl); } -vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_v_i32m1x7_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_mu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_v_i32m1x7_mu(vm, vd, rs1, new_vl, vl); } -vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_v_u32mf2x7_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_mu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_v_u32mf2x7_mu(vm, vd, rs1, new_vl, vl); } -vuint32m1x7_t test_vlseg7e32ff_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_v_u32m1x7_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m1x7_t test_vlseg7e32ff_v_u32m1x7_mu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_v_u32m1x7_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg7e32ff\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg7e64.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg7e64.c index 94c105c74..0052b2908 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg7e64.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg7e64.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x7_t test_vlseg7e64_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg7e64_v_f64m1x7_tu(maskedoff_tuple, base, vl); +vfloat64m1x7_t test_vlseg7e64_v_f64m1x7_tu(vfloat64m1x7_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg7e64_v_f64m1x7_tu(vd, rs1, vl); } -vint64m1x7_t test_vlseg7e64_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg7e64_v_i64m1x7_tu(maskedoff_tuple, base, vl); +vint64m1x7_t test_vlseg7e64_v_i64m1x7_tu(vint64m1x7_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg7e64_v_i64m1x7_tu(vd, rs1, vl); } -vuint64m1x7_t test_vlseg7e64_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg7e64_v_u64m1x7_tu(maskedoff_tuple, base, vl); +vuint64m1x7_t test_vlseg7e64_v_u64m1x7_tu(vuint64m1x7_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg7e64_v_u64m1x7_tu(vd, rs1, vl); } -vfloat64m1x7_t test_vlseg7e64_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg7e64_v_f64m1x7_tum(mask, maskedoff_tuple, base, vl); +vfloat64m1x7_t test_vlseg7e64_v_f64m1x7_tum(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg7e64_v_f64m1x7_tum(vm, vd, rs1, vl); } -vint64m1x7_t test_vlseg7e64_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg7e64_v_i64m1x7_tum(mask, maskedoff_tuple, base, vl); +vint64m1x7_t test_vlseg7e64_v_i64m1x7_tum(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg7e64_v_i64m1x7_tum(vm, vd, rs1, vl); } -vuint64m1x7_t test_vlseg7e64_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg7e64_v_u64m1x7_tum(mask, maskedoff_tuple, base, vl); +vuint64m1x7_t test_vlseg7e64_v_u64m1x7_tum(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg7e64_v_u64m1x7_tum(vm, vd, rs1, vl); } -vfloat64m1x7_t test_vlseg7e64_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg7e64_v_f64m1x7_tumu(mask, maskedoff_tuple, base, vl); +vfloat64m1x7_t test_vlseg7e64_v_f64m1x7_tumu(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg7e64_v_f64m1x7_tumu(vm, vd, rs1, vl); } -vint64m1x7_t test_vlseg7e64_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg7e64_v_i64m1x7_tumu(mask, maskedoff_tuple, base, vl); +vint64m1x7_t test_vlseg7e64_v_i64m1x7_tumu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg7e64_v_i64m1x7_tumu(vm, vd, rs1, vl); } -vuint64m1x7_t test_vlseg7e64_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg7e64_v_u64m1x7_tumu(mask, maskedoff_tuple, base, vl); +vuint64m1x7_t test_vlseg7e64_v_u64m1x7_tumu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg7e64_v_u64m1x7_tumu(vm, vd, rs1, vl); } -vfloat64m1x7_t test_vlseg7e64_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg7e64_v_f64m1x7_mu(mask, maskedoff_tuple, base, vl); +vfloat64m1x7_t test_vlseg7e64_v_f64m1x7_mu(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg7e64_v_f64m1x7_mu(vm, vd, rs1, vl); } -vint64m1x7_t test_vlseg7e64_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg7e64_v_i64m1x7_mu(mask, maskedoff_tuple, base, vl); +vint64m1x7_t test_vlseg7e64_v_i64m1x7_mu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg7e64_v_i64m1x7_mu(vm, vd, rs1, vl); } -vuint64m1x7_t test_vlseg7e64_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg7e64_v_u64m1x7_mu(mask, maskedoff_tuple, base, vl); +vuint64m1x7_t test_vlseg7e64_v_u64m1x7_mu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg7e64_v_u64m1x7_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg7e64\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg7e64ff.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg7e64ff.c index 7d41cda80..2f1ce5ecd 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg7e64ff.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg7e64ff.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e64ff_v_f64m1x7_tu(maskedoff_tuple, base, new_vl, vl); +vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_tu(vfloat64m1x7_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e64ff_v_f64m1x7_tu(vd, rs1, new_vl, vl); } -vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e64ff_v_i64m1x7_tu(maskedoff_tuple, base, new_vl, vl); +vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_tu(vint64m1x7_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e64ff_v_i64m1x7_tu(vd, rs1, new_vl, vl); } -vuint64m1x7_t test_vlseg7e64ff_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e64ff_v_u64m1x7_tu(maskedoff_tuple, base, new_vl, vl); +vuint64m1x7_t test_vlseg7e64ff_v_u64m1x7_tu(vuint64m1x7_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e64ff_v_u64m1x7_tu(vd, rs1, new_vl, vl); } -vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e64ff_v_f64m1x7_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_tum(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e64ff_v_f64m1x7_tum(vm, vd, rs1, new_vl, vl); } -vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e64ff_v_i64m1x7_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_tum(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e64ff_v_i64m1x7_tum(vm, vd, rs1, new_vl, vl); } -vuint64m1x7_t test_vlseg7e64ff_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e64ff_v_u64m1x7_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m1x7_t test_vlseg7e64ff_v_u64m1x7_tum(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e64ff_v_u64m1x7_tum(vm, vd, rs1, new_vl, vl); } -vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e64ff_v_f64m1x7_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_tumu(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e64ff_v_f64m1x7_tumu(vm, vd, rs1, new_vl, vl); } -vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e64ff_v_i64m1x7_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_tumu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e64ff_v_i64m1x7_tumu(vm, vd, rs1, new_vl, vl); } -vuint64m1x7_t test_vlseg7e64ff_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e64ff_v_u64m1x7_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m1x7_t test_vlseg7e64ff_v_u64m1x7_tumu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e64ff_v_u64m1x7_tumu(vm, vd, rs1, new_vl, vl); } -vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e64ff_v_f64m1x7_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_mu(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e64ff_v_f64m1x7_mu(vm, vd, rs1, new_vl, vl); } -vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e64ff_v_i64m1x7_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_mu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e64ff_v_i64m1x7_mu(vm, vd, rs1, new_vl, vl); } -vuint64m1x7_t test_vlseg7e64ff_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e64ff_v_u64m1x7_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m1x7_t test_vlseg7e64ff_v_u64m1x7_mu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e64ff_v_u64m1x7_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg7e64ff\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg7e8.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg7e8.c index b666734c7..3a6a6f13d 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg7e8.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg7e8.c @@ -6,132 +6,132 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x7_t test_vlseg7e8_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_i8mf8x7_tu(maskedoff_tuple, base, vl); +vint8mf8x7_t test_vlseg7e8_v_i8mf8x7_tu(vint8mf8x7_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_i8mf8x7_tu(vd, rs1, vl); } -vint8mf4x7_t test_vlseg7e8_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_i8mf4x7_tu(maskedoff_tuple, base, vl); +vint8mf4x7_t test_vlseg7e8_v_i8mf4x7_tu(vint8mf4x7_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_i8mf4x7_tu(vd, rs1, vl); } -vint8mf2x7_t test_vlseg7e8_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_i8mf2x7_tu(maskedoff_tuple, base, vl); +vint8mf2x7_t test_vlseg7e8_v_i8mf2x7_tu(vint8mf2x7_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_i8mf2x7_tu(vd, rs1, vl); } -vint8m1x7_t test_vlseg7e8_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_i8m1x7_tu(maskedoff_tuple, base, vl); +vint8m1x7_t test_vlseg7e8_v_i8m1x7_tu(vint8m1x7_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_i8m1x7_tu(vd, rs1, vl); } -vuint8mf8x7_t test_vlseg7e8_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_u8mf8x7_tu(maskedoff_tuple, base, vl); +vuint8mf8x7_t test_vlseg7e8_v_u8mf8x7_tu(vuint8mf8x7_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_u8mf8x7_tu(vd, rs1, vl); } -vuint8mf4x7_t test_vlseg7e8_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_u8mf4x7_tu(maskedoff_tuple, base, vl); +vuint8mf4x7_t test_vlseg7e8_v_u8mf4x7_tu(vuint8mf4x7_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_u8mf4x7_tu(vd, rs1, vl); } -vuint8mf2x7_t test_vlseg7e8_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_u8mf2x7_tu(maskedoff_tuple, base, vl); +vuint8mf2x7_t test_vlseg7e8_v_u8mf2x7_tu(vuint8mf2x7_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_u8mf2x7_tu(vd, rs1, vl); } -vuint8m1x7_t test_vlseg7e8_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_u8m1x7_tu(maskedoff_tuple, base, vl); +vuint8m1x7_t test_vlseg7e8_v_u8m1x7_tu(vuint8m1x7_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_u8m1x7_tu(vd, rs1, vl); } -vint8mf8x7_t test_vlseg7e8_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_i8mf8x7_tum(mask, maskedoff_tuple, base, vl); +vint8mf8x7_t test_vlseg7e8_v_i8mf8x7_tum(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_i8mf8x7_tum(vm, vd, rs1, vl); } -vint8mf4x7_t test_vlseg7e8_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_i8mf4x7_tum(mask, maskedoff_tuple, base, vl); +vint8mf4x7_t test_vlseg7e8_v_i8mf4x7_tum(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_i8mf4x7_tum(vm, vd, rs1, vl); } -vint8mf2x7_t test_vlseg7e8_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_i8mf2x7_tum(mask, maskedoff_tuple, base, vl); +vint8mf2x7_t test_vlseg7e8_v_i8mf2x7_tum(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_i8mf2x7_tum(vm, vd, rs1, vl); } -vint8m1x7_t test_vlseg7e8_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_i8m1x7_tum(mask, maskedoff_tuple, base, vl); +vint8m1x7_t test_vlseg7e8_v_i8m1x7_tum(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_i8m1x7_tum(vm, vd, rs1, vl); } -vuint8mf8x7_t test_vlseg7e8_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_u8mf8x7_tum(mask, maskedoff_tuple, base, vl); +vuint8mf8x7_t test_vlseg7e8_v_u8mf8x7_tum(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_u8mf8x7_tum(vm, vd, rs1, vl); } -vuint8mf4x7_t test_vlseg7e8_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_u8mf4x7_tum(mask, maskedoff_tuple, base, vl); +vuint8mf4x7_t test_vlseg7e8_v_u8mf4x7_tum(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_u8mf4x7_tum(vm, vd, rs1, vl); } -vuint8mf2x7_t test_vlseg7e8_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_u8mf2x7_tum(mask, maskedoff_tuple, base, vl); +vuint8mf2x7_t test_vlseg7e8_v_u8mf2x7_tum(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_u8mf2x7_tum(vm, vd, rs1, vl); } -vuint8m1x7_t test_vlseg7e8_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_u8m1x7_tum(mask, maskedoff_tuple, base, vl); +vuint8m1x7_t test_vlseg7e8_v_u8m1x7_tum(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_u8m1x7_tum(vm, vd, rs1, vl); } -vint8mf8x7_t test_vlseg7e8_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_i8mf8x7_tumu(mask, maskedoff_tuple, base, vl); +vint8mf8x7_t test_vlseg7e8_v_i8mf8x7_tumu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_i8mf8x7_tumu(vm, vd, rs1, vl); } -vint8mf4x7_t test_vlseg7e8_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_i8mf4x7_tumu(mask, maskedoff_tuple, base, vl); +vint8mf4x7_t test_vlseg7e8_v_i8mf4x7_tumu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_i8mf4x7_tumu(vm, vd, rs1, vl); } -vint8mf2x7_t test_vlseg7e8_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_i8mf2x7_tumu(mask, maskedoff_tuple, base, vl); +vint8mf2x7_t test_vlseg7e8_v_i8mf2x7_tumu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_i8mf2x7_tumu(vm, vd, rs1, vl); } -vint8m1x7_t test_vlseg7e8_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_i8m1x7_tumu(mask, maskedoff_tuple, base, vl); +vint8m1x7_t test_vlseg7e8_v_i8m1x7_tumu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_i8m1x7_tumu(vm, vd, rs1, vl); } -vuint8mf8x7_t test_vlseg7e8_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_u8mf8x7_tumu(mask, maskedoff_tuple, base, vl); +vuint8mf8x7_t test_vlseg7e8_v_u8mf8x7_tumu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_u8mf8x7_tumu(vm, vd, rs1, vl); } -vuint8mf4x7_t test_vlseg7e8_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_u8mf4x7_tumu(mask, maskedoff_tuple, base, vl); +vuint8mf4x7_t test_vlseg7e8_v_u8mf4x7_tumu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_u8mf4x7_tumu(vm, vd, rs1, vl); } -vuint8mf2x7_t test_vlseg7e8_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_u8mf2x7_tumu(mask, maskedoff_tuple, base, vl); +vuint8mf2x7_t test_vlseg7e8_v_u8mf2x7_tumu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_u8mf2x7_tumu(vm, vd, rs1, vl); } -vuint8m1x7_t test_vlseg7e8_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_u8m1x7_tumu(mask, maskedoff_tuple, base, vl); +vuint8m1x7_t test_vlseg7e8_v_u8m1x7_tumu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_u8m1x7_tumu(vm, vd, rs1, vl); } -vint8mf8x7_t test_vlseg7e8_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_i8mf8x7_mu(mask, maskedoff_tuple, base, vl); +vint8mf8x7_t test_vlseg7e8_v_i8mf8x7_mu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_i8mf8x7_mu(vm, vd, rs1, vl); } -vint8mf4x7_t test_vlseg7e8_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_i8mf4x7_mu(mask, maskedoff_tuple, base, vl); +vint8mf4x7_t test_vlseg7e8_v_i8mf4x7_mu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_i8mf4x7_mu(vm, vd, rs1, vl); } -vint8mf2x7_t test_vlseg7e8_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_i8mf2x7_mu(mask, maskedoff_tuple, base, vl); +vint8mf2x7_t test_vlseg7e8_v_i8mf2x7_mu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_i8mf2x7_mu(vm, vd, rs1, vl); } -vint8m1x7_t test_vlseg7e8_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_i8m1x7_mu(mask, maskedoff_tuple, base, vl); +vint8m1x7_t test_vlseg7e8_v_i8m1x7_mu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_i8m1x7_mu(vm, vd, rs1, vl); } -vuint8mf8x7_t test_vlseg7e8_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_u8mf8x7_mu(mask, maskedoff_tuple, base, vl); +vuint8mf8x7_t test_vlseg7e8_v_u8mf8x7_mu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_u8mf8x7_mu(vm, vd, rs1, vl); } -vuint8mf4x7_t test_vlseg7e8_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_u8mf4x7_mu(mask, maskedoff_tuple, base, vl); +vuint8mf4x7_t test_vlseg7e8_v_u8mf4x7_mu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_u8mf4x7_mu(vm, vd, rs1, vl); } -vuint8mf2x7_t test_vlseg7e8_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_u8mf2x7_mu(mask, maskedoff_tuple, base, vl); +vuint8mf2x7_t test_vlseg7e8_v_u8mf2x7_mu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_u8mf2x7_mu(vm, vd, rs1, vl); } -vuint8m1x7_t test_vlseg7e8_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg7e8_v_u8m1x7_mu(mask, maskedoff_tuple, base, vl); +vuint8m1x7_t test_vlseg7e8_v_u8m1x7_mu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_v_u8m1x7_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg7e8\.[ivxfswum.]+\s+} 32 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg7e8ff.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg7e8ff.c index c5f0f881e..3745a6b45 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg7e8ff.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg7e8ff.c @@ -6,132 +6,132 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_i8mf8x7_tu(maskedoff_tuple, base, new_vl, vl); +vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_tu(vint8mf8x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_i8mf8x7_tu(vd, rs1, new_vl, vl); } -vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_i8mf4x7_tu(maskedoff_tuple, base, new_vl, vl); +vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_tu(vint8mf4x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_i8mf4x7_tu(vd, rs1, new_vl, vl); } -vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_i8mf2x7_tu(maskedoff_tuple, base, new_vl, vl); +vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_tu(vint8mf2x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_i8mf2x7_tu(vd, rs1, new_vl, vl); } -vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_i8m1x7_tu(maskedoff_tuple, base, new_vl, vl); +vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_tu(vint8m1x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_i8m1x7_tu(vd, rs1, new_vl, vl); } -vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_u8mf8x7_tu(maskedoff_tuple, base, new_vl, vl); +vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_tu(vuint8mf8x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_u8mf8x7_tu(vd, rs1, new_vl, vl); } -vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_u8mf4x7_tu(maskedoff_tuple, base, new_vl, vl); +vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_tu(vuint8mf4x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_u8mf4x7_tu(vd, rs1, new_vl, vl); } -vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_u8mf2x7_tu(maskedoff_tuple, base, new_vl, vl); +vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_tu(vuint8mf2x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_u8mf2x7_tu(vd, rs1, new_vl, vl); } -vuint8m1x7_t test_vlseg7e8ff_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_u8m1x7_tu(maskedoff_tuple, base, new_vl, vl); +vuint8m1x7_t test_vlseg7e8ff_v_u8m1x7_tu(vuint8m1x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_u8m1x7_tu(vd, rs1, new_vl, vl); } -vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_i8mf8x7_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_tum(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_i8mf8x7_tum(vm, vd, rs1, new_vl, vl); } -vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_i8mf4x7_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_tum(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_i8mf4x7_tum(vm, vd, rs1, new_vl, vl); } -vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_i8mf2x7_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_tum(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_i8mf2x7_tum(vm, vd, rs1, new_vl, vl); } -vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_i8m1x7_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_tum(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_i8m1x7_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_u8mf8x7_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_tum(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_u8mf8x7_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_u8mf4x7_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_tum(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_u8mf4x7_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_u8mf2x7_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_tum(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_u8mf2x7_tum(vm, vd, rs1, new_vl, vl); } -vuint8m1x7_t test_vlseg7e8ff_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_u8m1x7_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m1x7_t test_vlseg7e8ff_v_u8m1x7_tum(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_u8m1x7_tum(vm, vd, rs1, new_vl, vl); } -vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_i8mf8x7_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_tumu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_i8mf8x7_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_i8mf4x7_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_tumu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_i8mf4x7_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_i8mf2x7_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_tumu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_i8mf2x7_tumu(vm, vd, rs1, new_vl, vl); } -vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_i8m1x7_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_tumu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_i8m1x7_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_u8mf8x7_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_tumu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_u8mf8x7_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_u8mf4x7_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_tumu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_u8mf4x7_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_u8mf2x7_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_tumu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_u8mf2x7_tumu(vm, vd, rs1, new_vl, vl); } -vuint8m1x7_t test_vlseg7e8ff_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_u8m1x7_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m1x7_t test_vlseg7e8ff_v_u8m1x7_tumu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_u8m1x7_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_i8mf8x7_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_mu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_i8mf8x7_mu(vm, vd, rs1, new_vl, vl); } -vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_i8mf4x7_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_mu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_i8mf4x7_mu(vm, vd, rs1, new_vl, vl); } -vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_i8mf2x7_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_mu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_i8mf2x7_mu(vm, vd, rs1, new_vl, vl); } -vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_i8m1x7_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_mu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_i8m1x7_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_u8mf8x7_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_mu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_u8mf8x7_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_u8mf4x7_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_mu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_u8mf4x7_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_u8mf2x7_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_mu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_u8mf2x7_mu(vm, vd, rs1, new_vl, vl); } -vuint8m1x7_t test_vlseg7e8ff_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_v_u8m1x7_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m1x7_t test_vlseg7e8ff_v_u8m1x7_mu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_v_u8m1x7_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg7e8ff\.[ivxfswum.]+\s+} 32 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg8e16.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg8e16.c index 6483cc62c..168162f5c 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg8e16.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg8e16.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x8_t test_vlseg8e16_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_f16mf4x8_tu(maskedoff_tuple, base, vl); +vfloat16mf4x8_t test_vlseg8e16_v_f16mf4x8_tu(vfloat16mf4x8_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_f16mf4x8_tu(vd, rs1, vl); } -vfloat16mf2x8_t test_vlseg8e16_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_f16mf2x8_tu(maskedoff_tuple, base, vl); +vfloat16mf2x8_t test_vlseg8e16_v_f16mf2x8_tu(vfloat16mf2x8_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_f16mf2x8_tu(vd, rs1, vl); } -vfloat16m1x8_t test_vlseg8e16_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_f16m1x8_tu(maskedoff_tuple, base, vl); +vfloat16m1x8_t test_vlseg8e16_v_f16m1x8_tu(vfloat16m1x8_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_f16m1x8_tu(vd, rs1, vl); } -vint16mf4x8_t test_vlseg8e16_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_i16mf4x8_tu(maskedoff_tuple, base, vl); +vint16mf4x8_t test_vlseg8e16_v_i16mf4x8_tu(vint16mf4x8_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_i16mf4x8_tu(vd, rs1, vl); } -vint16mf2x8_t test_vlseg8e16_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_i16mf2x8_tu(maskedoff_tuple, base, vl); +vint16mf2x8_t test_vlseg8e16_v_i16mf2x8_tu(vint16mf2x8_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_i16mf2x8_tu(vd, rs1, vl); } -vint16m1x8_t test_vlseg8e16_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_i16m1x8_tu(maskedoff_tuple, base, vl); +vint16m1x8_t test_vlseg8e16_v_i16m1x8_tu(vint16m1x8_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_i16m1x8_tu(vd, rs1, vl); } -vuint16mf4x8_t test_vlseg8e16_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_u16mf4x8_tu(maskedoff_tuple, base, vl); +vuint16mf4x8_t test_vlseg8e16_v_u16mf4x8_tu(vuint16mf4x8_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_u16mf4x8_tu(vd, rs1, vl); } -vuint16mf2x8_t test_vlseg8e16_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_u16mf2x8_tu(maskedoff_tuple, base, vl); +vuint16mf2x8_t test_vlseg8e16_v_u16mf2x8_tu(vuint16mf2x8_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_u16mf2x8_tu(vd, rs1, vl); } -vuint16m1x8_t test_vlseg8e16_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_u16m1x8_tu(maskedoff_tuple, base, vl); +vuint16m1x8_t test_vlseg8e16_v_u16m1x8_tu(vuint16m1x8_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_u16m1x8_tu(vd, rs1, vl); } -vfloat16mf4x8_t test_vlseg8e16_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_f16mf4x8_tum(mask, maskedoff_tuple, base, vl); +vfloat16mf4x8_t test_vlseg8e16_v_f16mf4x8_tum(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_f16mf4x8_tum(vm, vd, rs1, vl); } -vfloat16mf2x8_t test_vlseg8e16_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_f16mf2x8_tum(mask, maskedoff_tuple, base, vl); +vfloat16mf2x8_t test_vlseg8e16_v_f16mf2x8_tum(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_f16mf2x8_tum(vm, vd, rs1, vl); } -vfloat16m1x8_t test_vlseg8e16_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_f16m1x8_tum(mask, maskedoff_tuple, base, vl); +vfloat16m1x8_t test_vlseg8e16_v_f16m1x8_tum(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_f16m1x8_tum(vm, vd, rs1, vl); } -vint16mf4x8_t test_vlseg8e16_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_i16mf4x8_tum(mask, maskedoff_tuple, base, vl); +vint16mf4x8_t test_vlseg8e16_v_i16mf4x8_tum(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_i16mf4x8_tum(vm, vd, rs1, vl); } -vint16mf2x8_t test_vlseg8e16_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_i16mf2x8_tum(mask, maskedoff_tuple, base, vl); +vint16mf2x8_t test_vlseg8e16_v_i16mf2x8_tum(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_i16mf2x8_tum(vm, vd, rs1, vl); } -vint16m1x8_t test_vlseg8e16_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_i16m1x8_tum(mask, maskedoff_tuple, base, vl); +vint16m1x8_t test_vlseg8e16_v_i16m1x8_tum(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_i16m1x8_tum(vm, vd, rs1, vl); } -vuint16mf4x8_t test_vlseg8e16_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_u16mf4x8_tum(mask, maskedoff_tuple, base, vl); +vuint16mf4x8_t test_vlseg8e16_v_u16mf4x8_tum(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_u16mf4x8_tum(vm, vd, rs1, vl); } -vuint16mf2x8_t test_vlseg8e16_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_u16mf2x8_tum(mask, maskedoff_tuple, base, vl); +vuint16mf2x8_t test_vlseg8e16_v_u16mf2x8_tum(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_u16mf2x8_tum(vm, vd, rs1, vl); } -vuint16m1x8_t test_vlseg8e16_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_u16m1x8_tum(mask, maskedoff_tuple, base, vl); +vuint16m1x8_t test_vlseg8e16_v_u16m1x8_tum(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_u16m1x8_tum(vm, vd, rs1, vl); } -vfloat16mf4x8_t test_vlseg8e16_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_f16mf4x8_tumu(mask, maskedoff_tuple, base, vl); +vfloat16mf4x8_t test_vlseg8e16_v_f16mf4x8_tumu(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_f16mf4x8_tumu(vm, vd, rs1, vl); } -vfloat16mf2x8_t test_vlseg8e16_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_f16mf2x8_tumu(mask, maskedoff_tuple, base, vl); +vfloat16mf2x8_t test_vlseg8e16_v_f16mf2x8_tumu(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_f16mf2x8_tumu(vm, vd, rs1, vl); } -vfloat16m1x8_t test_vlseg8e16_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_f16m1x8_tumu(mask, maskedoff_tuple, base, vl); +vfloat16m1x8_t test_vlseg8e16_v_f16m1x8_tumu(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_f16m1x8_tumu(vm, vd, rs1, vl); } -vint16mf4x8_t test_vlseg8e16_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_i16mf4x8_tumu(mask, maskedoff_tuple, base, vl); +vint16mf4x8_t test_vlseg8e16_v_i16mf4x8_tumu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_i16mf4x8_tumu(vm, vd, rs1, vl); } -vint16mf2x8_t test_vlseg8e16_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_i16mf2x8_tumu(mask, maskedoff_tuple, base, vl); +vint16mf2x8_t test_vlseg8e16_v_i16mf2x8_tumu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_i16mf2x8_tumu(vm, vd, rs1, vl); } -vint16m1x8_t test_vlseg8e16_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_i16m1x8_tumu(mask, maskedoff_tuple, base, vl); +vint16m1x8_t test_vlseg8e16_v_i16m1x8_tumu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_i16m1x8_tumu(vm, vd, rs1, vl); } -vuint16mf4x8_t test_vlseg8e16_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_u16mf4x8_tumu(mask, maskedoff_tuple, base, vl); +vuint16mf4x8_t test_vlseg8e16_v_u16mf4x8_tumu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_u16mf4x8_tumu(vm, vd, rs1, vl); } -vuint16mf2x8_t test_vlseg8e16_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_u16mf2x8_tumu(mask, maskedoff_tuple, base, vl); +vuint16mf2x8_t test_vlseg8e16_v_u16mf2x8_tumu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_u16mf2x8_tumu(vm, vd, rs1, vl); } -vuint16m1x8_t test_vlseg8e16_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_u16m1x8_tumu(mask, maskedoff_tuple, base, vl); +vuint16m1x8_t test_vlseg8e16_v_u16m1x8_tumu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_u16m1x8_tumu(vm, vd, rs1, vl); } -vfloat16mf4x8_t test_vlseg8e16_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_f16mf4x8_mu(mask, maskedoff_tuple, base, vl); +vfloat16mf4x8_t test_vlseg8e16_v_f16mf4x8_mu(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_f16mf4x8_mu(vm, vd, rs1, vl); } -vfloat16mf2x8_t test_vlseg8e16_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_f16mf2x8_mu(mask, maskedoff_tuple, base, vl); +vfloat16mf2x8_t test_vlseg8e16_v_f16mf2x8_mu(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_f16mf2x8_mu(vm, vd, rs1, vl); } -vfloat16m1x8_t test_vlseg8e16_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_f16m1x8_mu(mask, maskedoff_tuple, base, vl); +vfloat16m1x8_t test_vlseg8e16_v_f16m1x8_mu(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_f16m1x8_mu(vm, vd, rs1, vl); } -vint16mf4x8_t test_vlseg8e16_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_i16mf4x8_mu(mask, maskedoff_tuple, base, vl); +vint16mf4x8_t test_vlseg8e16_v_i16mf4x8_mu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_i16mf4x8_mu(vm, vd, rs1, vl); } -vint16mf2x8_t test_vlseg8e16_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_i16mf2x8_mu(mask, maskedoff_tuple, base, vl); +vint16mf2x8_t test_vlseg8e16_v_i16mf2x8_mu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_i16mf2x8_mu(vm, vd, rs1, vl); } -vint16m1x8_t test_vlseg8e16_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_i16m1x8_mu(mask, maskedoff_tuple, base, vl); +vint16m1x8_t test_vlseg8e16_v_i16m1x8_mu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_i16m1x8_mu(vm, vd, rs1, vl); } -vuint16mf4x8_t test_vlseg8e16_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_u16mf4x8_mu(mask, maskedoff_tuple, base, vl); +vuint16mf4x8_t test_vlseg8e16_v_u16mf4x8_mu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_u16mf4x8_mu(vm, vd, rs1, vl); } -vuint16mf2x8_t test_vlseg8e16_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_u16mf2x8_mu(mask, maskedoff_tuple, base, vl); +vuint16mf2x8_t test_vlseg8e16_v_u16mf2x8_mu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_u16mf2x8_mu(vm, vd, rs1, vl); } -vuint16m1x8_t test_vlseg8e16_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg8e16_v_u16m1x8_mu(mask, maskedoff_tuple, base, vl); +vuint16m1x8_t test_vlseg8e16_v_u16m1x8_mu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_v_u16m1x8_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg8e16\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg8e16ff.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg8e16ff.c index 7255d1e53..c119246f4 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg8e16ff.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg8e16ff.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_f16mf4x8_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_tu(vfloat16mf4x8_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_f16mf4x8_tu(vd, rs1, new_vl, vl); } -vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_f16mf2x8_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_tu(vfloat16mf2x8_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_f16mf2x8_tu(vd, rs1, new_vl, vl); } -vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_f16m1x8_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_tu(vfloat16m1x8_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_f16m1x8_tu(vd, rs1, new_vl, vl); } -vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_i16mf4x8_tu(maskedoff_tuple, base, new_vl, vl); +vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_tu(vint16mf4x8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_i16mf4x8_tu(vd, rs1, new_vl, vl); } -vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_i16mf2x8_tu(maskedoff_tuple, base, new_vl, vl); +vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_tu(vint16mf2x8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_i16mf2x8_tu(vd, rs1, new_vl, vl); } -vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_i16m1x8_tu(maskedoff_tuple, base, new_vl, vl); +vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_tu(vint16m1x8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_i16m1x8_tu(vd, rs1, new_vl, vl); } -vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_u16mf4x8_tu(maskedoff_tuple, base, new_vl, vl); +vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_tu(vuint16mf4x8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_u16mf4x8_tu(vd, rs1, new_vl, vl); } -vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_u16mf2x8_tu(maskedoff_tuple, base, new_vl, vl); +vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_tu(vuint16mf2x8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_u16mf2x8_tu(vd, rs1, new_vl, vl); } -vuint16m1x8_t test_vlseg8e16ff_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_u16m1x8_tu(maskedoff_tuple, base, new_vl, vl); +vuint16m1x8_t test_vlseg8e16ff_v_u16m1x8_tu(vuint16m1x8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_u16m1x8_tu(vd, rs1, new_vl, vl); } -vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_f16mf4x8_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_tum(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_f16mf4x8_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_f16mf2x8_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_tum(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_f16mf2x8_tum(vm, vd, rs1, new_vl, vl); } -vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_f16m1x8_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_tum(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_f16m1x8_tum(vm, vd, rs1, new_vl, vl); } -vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_i16mf4x8_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_tum(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_i16mf4x8_tum(vm, vd, rs1, new_vl, vl); } -vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_i16mf2x8_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_tum(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_i16mf2x8_tum(vm, vd, rs1, new_vl, vl); } -vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_i16m1x8_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_tum(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_i16m1x8_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_u16mf4x8_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_tum(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_u16mf4x8_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_u16mf2x8_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_tum(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_u16mf2x8_tum(vm, vd, rs1, new_vl, vl); } -vuint16m1x8_t test_vlseg8e16ff_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_u16m1x8_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m1x8_t test_vlseg8e16ff_v_u16m1x8_tum(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_u16m1x8_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_f16mf4x8_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_tumu(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_f16mf4x8_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_f16mf2x8_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_tumu(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_f16mf2x8_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_f16m1x8_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_tumu(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_f16m1x8_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_i16mf4x8_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_tumu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_i16mf4x8_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_i16mf2x8_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_tumu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_i16mf2x8_tumu(vm, vd, rs1, new_vl, vl); } -vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_i16m1x8_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_tumu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_i16m1x8_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_u16mf4x8_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_tumu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_u16mf4x8_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_u16mf2x8_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_tumu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_u16mf2x8_tumu(vm, vd, rs1, new_vl, vl); } -vuint16m1x8_t test_vlseg8e16ff_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_u16m1x8_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m1x8_t test_vlseg8e16ff_v_u16m1x8_tumu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_u16m1x8_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_f16mf4x8_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_mu(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_f16mf4x8_mu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_f16mf2x8_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_mu(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_f16mf2x8_mu(vm, vd, rs1, new_vl, vl); } -vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_f16m1x8_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_mu(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_f16m1x8_mu(vm, vd, rs1, new_vl, vl); } -vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_i16mf4x8_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_mu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_i16mf4x8_mu(vm, vd, rs1, new_vl, vl); } -vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_i16mf2x8_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_mu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_i16mf2x8_mu(vm, vd, rs1, new_vl, vl); } -vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_i16m1x8_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_mu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_i16m1x8_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_u16mf4x8_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_mu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_u16mf4x8_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_u16mf2x8_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_mu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_u16mf2x8_mu(vm, vd, rs1, new_vl, vl); } -vuint16m1x8_t test_vlseg8e16ff_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_v_u16m1x8_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m1x8_t test_vlseg8e16ff_v_u16m1x8_mu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_u16m1x8_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg8e16ff\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg8e32.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg8e32.c index 5fd6dbaac..78292b326 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg8e32.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg8e32.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x8_t test_vlseg8e32_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg8e32_v_f32mf2x8_tu(maskedoff_tuple, base, vl); +vfloat32mf2x8_t test_vlseg8e32_v_f32mf2x8_tu(vfloat32mf2x8_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_v_f32mf2x8_tu(vd, rs1, vl); } -vfloat32m1x8_t test_vlseg8e32_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg8e32_v_f32m1x8_tu(maskedoff_tuple, base, vl); +vfloat32m1x8_t test_vlseg8e32_v_f32m1x8_tu(vfloat32m1x8_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_v_f32m1x8_tu(vd, rs1, vl); } -vint32mf2x8_t test_vlseg8e32_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg8e32_v_i32mf2x8_tu(maskedoff_tuple, base, vl); +vint32mf2x8_t test_vlseg8e32_v_i32mf2x8_tu(vint32mf2x8_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_v_i32mf2x8_tu(vd, rs1, vl); } -vint32m1x8_t test_vlseg8e32_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg8e32_v_i32m1x8_tu(maskedoff_tuple, base, vl); +vint32m1x8_t test_vlseg8e32_v_i32m1x8_tu(vint32m1x8_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_v_i32m1x8_tu(vd, rs1, vl); } -vuint32mf2x8_t test_vlseg8e32_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg8e32_v_u32mf2x8_tu(maskedoff_tuple, base, vl); +vuint32mf2x8_t test_vlseg8e32_v_u32mf2x8_tu(vuint32mf2x8_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_v_u32mf2x8_tu(vd, rs1, vl); } -vuint32m1x8_t test_vlseg8e32_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg8e32_v_u32m1x8_tu(maskedoff_tuple, base, vl); +vuint32m1x8_t test_vlseg8e32_v_u32m1x8_tu(vuint32m1x8_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_v_u32m1x8_tu(vd, rs1, vl); } -vfloat32mf2x8_t test_vlseg8e32_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg8e32_v_f32mf2x8_tum(mask, maskedoff_tuple, base, vl); +vfloat32mf2x8_t test_vlseg8e32_v_f32mf2x8_tum(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_v_f32mf2x8_tum(vm, vd, rs1, vl); } -vfloat32m1x8_t test_vlseg8e32_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg8e32_v_f32m1x8_tum(mask, maskedoff_tuple, base, vl); +vfloat32m1x8_t test_vlseg8e32_v_f32m1x8_tum(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_v_f32m1x8_tum(vm, vd, rs1, vl); } -vint32mf2x8_t test_vlseg8e32_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg8e32_v_i32mf2x8_tum(mask, maskedoff_tuple, base, vl); +vint32mf2x8_t test_vlseg8e32_v_i32mf2x8_tum(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_v_i32mf2x8_tum(vm, vd, rs1, vl); } -vint32m1x8_t test_vlseg8e32_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg8e32_v_i32m1x8_tum(mask, maskedoff_tuple, base, vl); +vint32m1x8_t test_vlseg8e32_v_i32m1x8_tum(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_v_i32m1x8_tum(vm, vd, rs1, vl); } -vuint32mf2x8_t test_vlseg8e32_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg8e32_v_u32mf2x8_tum(mask, maskedoff_tuple, base, vl); +vuint32mf2x8_t test_vlseg8e32_v_u32mf2x8_tum(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_v_u32mf2x8_tum(vm, vd, rs1, vl); } -vuint32m1x8_t test_vlseg8e32_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg8e32_v_u32m1x8_tum(mask, maskedoff_tuple, base, vl); +vuint32m1x8_t test_vlseg8e32_v_u32m1x8_tum(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_v_u32m1x8_tum(vm, vd, rs1, vl); } -vfloat32mf2x8_t test_vlseg8e32_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg8e32_v_f32mf2x8_tumu(mask, maskedoff_tuple, base, vl); +vfloat32mf2x8_t test_vlseg8e32_v_f32mf2x8_tumu(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_v_f32mf2x8_tumu(vm, vd, rs1, vl); } -vfloat32m1x8_t test_vlseg8e32_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg8e32_v_f32m1x8_tumu(mask, maskedoff_tuple, base, vl); +vfloat32m1x8_t test_vlseg8e32_v_f32m1x8_tumu(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_v_f32m1x8_tumu(vm, vd, rs1, vl); } -vint32mf2x8_t test_vlseg8e32_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg8e32_v_i32mf2x8_tumu(mask, maskedoff_tuple, base, vl); +vint32mf2x8_t test_vlseg8e32_v_i32mf2x8_tumu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_v_i32mf2x8_tumu(vm, vd, rs1, vl); } -vint32m1x8_t test_vlseg8e32_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg8e32_v_i32m1x8_tumu(mask, maskedoff_tuple, base, vl); +vint32m1x8_t test_vlseg8e32_v_i32m1x8_tumu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_v_i32m1x8_tumu(vm, vd, rs1, vl); } -vuint32mf2x8_t test_vlseg8e32_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg8e32_v_u32mf2x8_tumu(mask, maskedoff_tuple, base, vl); +vuint32mf2x8_t test_vlseg8e32_v_u32mf2x8_tumu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_v_u32mf2x8_tumu(vm, vd, rs1, vl); } -vuint32m1x8_t test_vlseg8e32_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg8e32_v_u32m1x8_tumu(mask, maskedoff_tuple, base, vl); +vuint32m1x8_t test_vlseg8e32_v_u32m1x8_tumu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_v_u32m1x8_tumu(vm, vd, rs1, vl); } -vfloat32mf2x8_t test_vlseg8e32_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg8e32_v_f32mf2x8_mu(mask, maskedoff_tuple, base, vl); +vfloat32mf2x8_t test_vlseg8e32_v_f32mf2x8_mu(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_v_f32mf2x8_mu(vm, vd, rs1, vl); } -vfloat32m1x8_t test_vlseg8e32_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg8e32_v_f32m1x8_mu(mask, maskedoff_tuple, base, vl); +vfloat32m1x8_t test_vlseg8e32_v_f32m1x8_mu(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_v_f32m1x8_mu(vm, vd, rs1, vl); } -vint32mf2x8_t test_vlseg8e32_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg8e32_v_i32mf2x8_mu(mask, maskedoff_tuple, base, vl); +vint32mf2x8_t test_vlseg8e32_v_i32mf2x8_mu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_v_i32mf2x8_mu(vm, vd, rs1, vl); } -vint32m1x8_t test_vlseg8e32_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg8e32_v_i32m1x8_mu(mask, maskedoff_tuple, base, vl); +vint32m1x8_t test_vlseg8e32_v_i32m1x8_mu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_v_i32m1x8_mu(vm, vd, rs1, vl); } -vuint32mf2x8_t test_vlseg8e32_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg8e32_v_u32mf2x8_mu(mask, maskedoff_tuple, base, vl); +vuint32mf2x8_t test_vlseg8e32_v_u32mf2x8_mu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_v_u32mf2x8_mu(vm, vd, rs1, vl); } -vuint32m1x8_t test_vlseg8e32_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg8e32_v_u32m1x8_mu(mask, maskedoff_tuple, base, vl); +vuint32m1x8_t test_vlseg8e32_v_u32m1x8_mu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_v_u32m1x8_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg8e32\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg8e32ff.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg8e32ff.c index ba0c902ef..ad70e9f87 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg8e32ff.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg8e32ff.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_v_f32mf2x8_tu(maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_tu(vfloat32mf2x8_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_v_f32mf2x8_tu(vd, rs1, new_vl, vl); } -vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_v_f32m1x8_tu(maskedoff_tuple, base, new_vl, vl); +vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_tu(vfloat32m1x8_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_v_f32m1x8_tu(vd, rs1, new_vl, vl); } -vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_v_i32mf2x8_tu(maskedoff_tuple, base, new_vl, vl); +vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_tu(vint32mf2x8_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_v_i32mf2x8_tu(vd, rs1, new_vl, vl); } -vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_v_i32m1x8_tu(maskedoff_tuple, base, new_vl, vl); +vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_tu(vint32m1x8_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_v_i32m1x8_tu(vd, rs1, new_vl, vl); } -vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_v_u32mf2x8_tu(maskedoff_tuple, base, new_vl, vl); +vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_tu(vuint32mf2x8_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_v_u32mf2x8_tu(vd, rs1, new_vl, vl); } -vuint32m1x8_t test_vlseg8e32ff_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_v_u32m1x8_tu(maskedoff_tuple, base, new_vl, vl); +vuint32m1x8_t test_vlseg8e32ff_v_u32m1x8_tu(vuint32m1x8_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_v_u32m1x8_tu(vd, rs1, new_vl, vl); } -vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_v_f32mf2x8_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_tum(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_v_f32mf2x8_tum(vm, vd, rs1, new_vl, vl); } -vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_v_f32m1x8_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_tum(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_v_f32m1x8_tum(vm, vd, rs1, new_vl, vl); } -vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_v_i32mf2x8_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_tum(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_v_i32mf2x8_tum(vm, vd, rs1, new_vl, vl); } -vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_v_i32m1x8_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_tum(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_v_i32m1x8_tum(vm, vd, rs1, new_vl, vl); } -vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_v_u32mf2x8_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_tum(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_v_u32mf2x8_tum(vm, vd, rs1, new_vl, vl); } -vuint32m1x8_t test_vlseg8e32ff_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_v_u32m1x8_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m1x8_t test_vlseg8e32ff_v_u32m1x8_tum(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_v_u32m1x8_tum(vm, vd, rs1, new_vl, vl); } -vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_v_f32mf2x8_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_tumu(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_v_f32mf2x8_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_v_f32m1x8_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_tumu(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_v_f32m1x8_tumu(vm, vd, rs1, new_vl, vl); } -vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_v_i32mf2x8_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_tumu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_v_i32mf2x8_tumu(vm, vd, rs1, new_vl, vl); } -vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_v_i32m1x8_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_tumu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_v_i32m1x8_tumu(vm, vd, rs1, new_vl, vl); } -vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_v_u32mf2x8_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_tumu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_v_u32mf2x8_tumu(vm, vd, rs1, new_vl, vl); } -vuint32m1x8_t test_vlseg8e32ff_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_v_u32m1x8_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m1x8_t test_vlseg8e32ff_v_u32m1x8_tumu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_v_u32m1x8_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_v_f32mf2x8_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_mu(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_v_f32mf2x8_mu(vm, vd, rs1, new_vl, vl); } -vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_v_f32m1x8_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_mu(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_v_f32m1x8_mu(vm, vd, rs1, new_vl, vl); } -vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_v_i32mf2x8_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_mu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_v_i32mf2x8_mu(vm, vd, rs1, new_vl, vl); } -vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_v_i32m1x8_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_mu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_v_i32m1x8_mu(vm, vd, rs1, new_vl, vl); } -vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_v_u32mf2x8_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_mu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_v_u32mf2x8_mu(vm, vd, rs1, new_vl, vl); } -vuint32m1x8_t test_vlseg8e32ff_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_v_u32m1x8_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m1x8_t test_vlseg8e32ff_v_u32m1x8_mu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_v_u32m1x8_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg8e32ff\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg8e64.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg8e64.c index 65a9c658b..d6a8569f4 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg8e64.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg8e64.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x8_t test_vlseg8e64_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg8e64_v_f64m1x8_tu(maskedoff_tuple, base, vl); +vfloat64m1x8_t test_vlseg8e64_v_f64m1x8_tu(vfloat64m1x8_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg8e64_v_f64m1x8_tu(vd, rs1, vl); } -vint64m1x8_t test_vlseg8e64_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg8e64_v_i64m1x8_tu(maskedoff_tuple, base, vl); +vint64m1x8_t test_vlseg8e64_v_i64m1x8_tu(vint64m1x8_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg8e64_v_i64m1x8_tu(vd, rs1, vl); } -vuint64m1x8_t test_vlseg8e64_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg8e64_v_u64m1x8_tu(maskedoff_tuple, base, vl); +vuint64m1x8_t test_vlseg8e64_v_u64m1x8_tu(vuint64m1x8_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg8e64_v_u64m1x8_tu(vd, rs1, vl); } -vfloat64m1x8_t test_vlseg8e64_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg8e64_v_f64m1x8_tum(mask, maskedoff_tuple, base, vl); +vfloat64m1x8_t test_vlseg8e64_v_f64m1x8_tum(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg8e64_v_f64m1x8_tum(vm, vd, rs1, vl); } -vint64m1x8_t test_vlseg8e64_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg8e64_v_i64m1x8_tum(mask, maskedoff_tuple, base, vl); +vint64m1x8_t test_vlseg8e64_v_i64m1x8_tum(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg8e64_v_i64m1x8_tum(vm, vd, rs1, vl); } -vuint64m1x8_t test_vlseg8e64_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg8e64_v_u64m1x8_tum(mask, maskedoff_tuple, base, vl); +vuint64m1x8_t test_vlseg8e64_v_u64m1x8_tum(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg8e64_v_u64m1x8_tum(vm, vd, rs1, vl); } -vfloat64m1x8_t test_vlseg8e64_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg8e64_v_f64m1x8_tumu(mask, maskedoff_tuple, base, vl); +vfloat64m1x8_t test_vlseg8e64_v_f64m1x8_tumu(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg8e64_v_f64m1x8_tumu(vm, vd, rs1, vl); } -vint64m1x8_t test_vlseg8e64_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg8e64_v_i64m1x8_tumu(mask, maskedoff_tuple, base, vl); +vint64m1x8_t test_vlseg8e64_v_i64m1x8_tumu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg8e64_v_i64m1x8_tumu(vm, vd, rs1, vl); } -vuint64m1x8_t test_vlseg8e64_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg8e64_v_u64m1x8_tumu(mask, maskedoff_tuple, base, vl); +vuint64m1x8_t test_vlseg8e64_v_u64m1x8_tumu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg8e64_v_u64m1x8_tumu(vm, vd, rs1, vl); } -vfloat64m1x8_t test_vlseg8e64_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg8e64_v_f64m1x8_mu(mask, maskedoff_tuple, base, vl); +vfloat64m1x8_t test_vlseg8e64_v_f64m1x8_mu(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg8e64_v_f64m1x8_mu(vm, vd, rs1, vl); } -vint64m1x8_t test_vlseg8e64_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg8e64_v_i64m1x8_mu(mask, maskedoff_tuple, base, vl); +vint64m1x8_t test_vlseg8e64_v_i64m1x8_mu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg8e64_v_i64m1x8_mu(vm, vd, rs1, vl); } -vuint64m1x8_t test_vlseg8e64_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg8e64_v_u64m1x8_mu(mask, maskedoff_tuple, base, vl); +vuint64m1x8_t test_vlseg8e64_v_u64m1x8_mu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg8e64_v_u64m1x8_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg8e64\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg8e64ff.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg8e64ff.c index 27d43ce9d..2da186017 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg8e64ff.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg8e64ff.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e64ff_v_f64m1x8_tu(maskedoff_tuple, base, new_vl, vl); +vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_tu(vfloat64m1x8_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e64ff_v_f64m1x8_tu(vd, rs1, new_vl, vl); } -vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e64ff_v_i64m1x8_tu(maskedoff_tuple, base, new_vl, vl); +vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_tu(vint64m1x8_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e64ff_v_i64m1x8_tu(vd, rs1, new_vl, vl); } -vuint64m1x8_t test_vlseg8e64ff_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e64ff_v_u64m1x8_tu(maskedoff_tuple, base, new_vl, vl); +vuint64m1x8_t test_vlseg8e64ff_v_u64m1x8_tu(vuint64m1x8_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e64ff_v_u64m1x8_tu(vd, rs1, new_vl, vl); } -vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e64ff_v_f64m1x8_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_tum(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e64ff_v_f64m1x8_tum(vm, vd, rs1, new_vl, vl); } -vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e64ff_v_i64m1x8_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_tum(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e64ff_v_i64m1x8_tum(vm, vd, rs1, new_vl, vl); } -vuint64m1x8_t test_vlseg8e64ff_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e64ff_v_u64m1x8_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m1x8_t test_vlseg8e64ff_v_u64m1x8_tum(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e64ff_v_u64m1x8_tum(vm, vd, rs1, new_vl, vl); } -vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e64ff_v_f64m1x8_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_tumu(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e64ff_v_f64m1x8_tumu(vm, vd, rs1, new_vl, vl); } -vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e64ff_v_i64m1x8_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_tumu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e64ff_v_i64m1x8_tumu(vm, vd, rs1, new_vl, vl); } -vuint64m1x8_t test_vlseg8e64ff_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e64ff_v_u64m1x8_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m1x8_t test_vlseg8e64ff_v_u64m1x8_tumu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e64ff_v_u64m1x8_tumu(vm, vd, rs1, new_vl, vl); } -vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e64ff_v_f64m1x8_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_mu(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e64ff_v_f64m1x8_mu(vm, vd, rs1, new_vl, vl); } -vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e64ff_v_i64m1x8_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_mu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e64ff_v_i64m1x8_mu(vm, vd, rs1, new_vl, vl); } -vuint64m1x8_t test_vlseg8e64ff_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e64ff_v_u64m1x8_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m1x8_t test_vlseg8e64ff_v_u64m1x8_mu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e64ff_v_u64m1x8_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg8e64ff\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg8e8.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg8e8.c index bd85af08d..d198e99e9 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg8e8.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg8e8.c @@ -6,132 +6,132 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x8_t test_vlseg8e8_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_i8mf8x8_tu(maskedoff_tuple, base, vl); +vint8mf8x8_t test_vlseg8e8_v_i8mf8x8_tu(vint8mf8x8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_i8mf8x8_tu(vd, rs1, vl); } -vint8mf4x8_t test_vlseg8e8_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_i8mf4x8_tu(maskedoff_tuple, base, vl); +vint8mf4x8_t test_vlseg8e8_v_i8mf4x8_tu(vint8mf4x8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_i8mf4x8_tu(vd, rs1, vl); } -vint8mf2x8_t test_vlseg8e8_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_i8mf2x8_tu(maskedoff_tuple, base, vl); +vint8mf2x8_t test_vlseg8e8_v_i8mf2x8_tu(vint8mf2x8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_i8mf2x8_tu(vd, rs1, vl); } -vint8m1x8_t test_vlseg8e8_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_i8m1x8_tu(maskedoff_tuple, base, vl); +vint8m1x8_t test_vlseg8e8_v_i8m1x8_tu(vint8m1x8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_i8m1x8_tu(vd, rs1, vl); } -vuint8mf8x8_t test_vlseg8e8_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_u8mf8x8_tu(maskedoff_tuple, base, vl); +vuint8mf8x8_t test_vlseg8e8_v_u8mf8x8_tu(vuint8mf8x8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_u8mf8x8_tu(vd, rs1, vl); } -vuint8mf4x8_t test_vlseg8e8_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_u8mf4x8_tu(maskedoff_tuple, base, vl); +vuint8mf4x8_t test_vlseg8e8_v_u8mf4x8_tu(vuint8mf4x8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_u8mf4x8_tu(vd, rs1, vl); } -vuint8mf2x8_t test_vlseg8e8_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_u8mf2x8_tu(maskedoff_tuple, base, vl); +vuint8mf2x8_t test_vlseg8e8_v_u8mf2x8_tu(vuint8mf2x8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_u8mf2x8_tu(vd, rs1, vl); } -vuint8m1x8_t test_vlseg8e8_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_u8m1x8_tu(maskedoff_tuple, base, vl); +vuint8m1x8_t test_vlseg8e8_v_u8m1x8_tu(vuint8m1x8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_u8m1x8_tu(vd, rs1, vl); } -vint8mf8x8_t test_vlseg8e8_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_i8mf8x8_tum(mask, maskedoff_tuple, base, vl); +vint8mf8x8_t test_vlseg8e8_v_i8mf8x8_tum(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_i8mf8x8_tum(vm, vd, rs1, vl); } -vint8mf4x8_t test_vlseg8e8_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_i8mf4x8_tum(mask, maskedoff_tuple, base, vl); +vint8mf4x8_t test_vlseg8e8_v_i8mf4x8_tum(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_i8mf4x8_tum(vm, vd, rs1, vl); } -vint8mf2x8_t test_vlseg8e8_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_i8mf2x8_tum(mask, maskedoff_tuple, base, vl); +vint8mf2x8_t test_vlseg8e8_v_i8mf2x8_tum(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_i8mf2x8_tum(vm, vd, rs1, vl); } -vint8m1x8_t test_vlseg8e8_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_i8m1x8_tum(mask, maskedoff_tuple, base, vl); +vint8m1x8_t test_vlseg8e8_v_i8m1x8_tum(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_i8m1x8_tum(vm, vd, rs1, vl); } -vuint8mf8x8_t test_vlseg8e8_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_u8mf8x8_tum(mask, maskedoff_tuple, base, vl); +vuint8mf8x8_t test_vlseg8e8_v_u8mf8x8_tum(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_u8mf8x8_tum(vm, vd, rs1, vl); } -vuint8mf4x8_t test_vlseg8e8_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_u8mf4x8_tum(mask, maskedoff_tuple, base, vl); +vuint8mf4x8_t test_vlseg8e8_v_u8mf4x8_tum(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_u8mf4x8_tum(vm, vd, rs1, vl); } -vuint8mf2x8_t test_vlseg8e8_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_u8mf2x8_tum(mask, maskedoff_tuple, base, vl); +vuint8mf2x8_t test_vlseg8e8_v_u8mf2x8_tum(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_u8mf2x8_tum(vm, vd, rs1, vl); } -vuint8m1x8_t test_vlseg8e8_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_u8m1x8_tum(mask, maskedoff_tuple, base, vl); +vuint8m1x8_t test_vlseg8e8_v_u8m1x8_tum(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_u8m1x8_tum(vm, vd, rs1, vl); } -vint8mf8x8_t test_vlseg8e8_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_i8mf8x8_tumu(mask, maskedoff_tuple, base, vl); +vint8mf8x8_t test_vlseg8e8_v_i8mf8x8_tumu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_i8mf8x8_tumu(vm, vd, rs1, vl); } -vint8mf4x8_t test_vlseg8e8_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_i8mf4x8_tumu(mask, maskedoff_tuple, base, vl); +vint8mf4x8_t test_vlseg8e8_v_i8mf4x8_tumu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_i8mf4x8_tumu(vm, vd, rs1, vl); } -vint8mf2x8_t test_vlseg8e8_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_i8mf2x8_tumu(mask, maskedoff_tuple, base, vl); +vint8mf2x8_t test_vlseg8e8_v_i8mf2x8_tumu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_i8mf2x8_tumu(vm, vd, rs1, vl); } -vint8m1x8_t test_vlseg8e8_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_i8m1x8_tumu(mask, maskedoff_tuple, base, vl); +vint8m1x8_t test_vlseg8e8_v_i8m1x8_tumu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_i8m1x8_tumu(vm, vd, rs1, vl); } -vuint8mf8x8_t test_vlseg8e8_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_u8mf8x8_tumu(mask, maskedoff_tuple, base, vl); +vuint8mf8x8_t test_vlseg8e8_v_u8mf8x8_tumu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_u8mf8x8_tumu(vm, vd, rs1, vl); } -vuint8mf4x8_t test_vlseg8e8_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_u8mf4x8_tumu(mask, maskedoff_tuple, base, vl); +vuint8mf4x8_t test_vlseg8e8_v_u8mf4x8_tumu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_u8mf4x8_tumu(vm, vd, rs1, vl); } -vuint8mf2x8_t test_vlseg8e8_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_u8mf2x8_tumu(mask, maskedoff_tuple, base, vl); +vuint8mf2x8_t test_vlseg8e8_v_u8mf2x8_tumu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_u8mf2x8_tumu(vm, vd, rs1, vl); } -vuint8m1x8_t test_vlseg8e8_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_u8m1x8_tumu(mask, maskedoff_tuple, base, vl); +vuint8m1x8_t test_vlseg8e8_v_u8m1x8_tumu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_u8m1x8_tumu(vm, vd, rs1, vl); } -vint8mf8x8_t test_vlseg8e8_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_i8mf8x8_mu(mask, maskedoff_tuple, base, vl); +vint8mf8x8_t test_vlseg8e8_v_i8mf8x8_mu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_i8mf8x8_mu(vm, vd, rs1, vl); } -vint8mf4x8_t test_vlseg8e8_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_i8mf4x8_mu(mask, maskedoff_tuple, base, vl); +vint8mf4x8_t test_vlseg8e8_v_i8mf4x8_mu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_i8mf4x8_mu(vm, vd, rs1, vl); } -vint8mf2x8_t test_vlseg8e8_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_i8mf2x8_mu(mask, maskedoff_tuple, base, vl); +vint8mf2x8_t test_vlseg8e8_v_i8mf2x8_mu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_i8mf2x8_mu(vm, vd, rs1, vl); } -vint8m1x8_t test_vlseg8e8_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_i8m1x8_mu(mask, maskedoff_tuple, base, vl); +vint8m1x8_t test_vlseg8e8_v_i8m1x8_mu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_i8m1x8_mu(vm, vd, rs1, vl); } -vuint8mf8x8_t test_vlseg8e8_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_u8mf8x8_mu(mask, maskedoff_tuple, base, vl); +vuint8mf8x8_t test_vlseg8e8_v_u8mf8x8_mu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_u8mf8x8_mu(vm, vd, rs1, vl); } -vuint8mf4x8_t test_vlseg8e8_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_u8mf4x8_mu(mask, maskedoff_tuple, base, vl); +vuint8mf4x8_t test_vlseg8e8_v_u8mf4x8_mu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_u8mf4x8_mu(vm, vd, rs1, vl); } -vuint8mf2x8_t test_vlseg8e8_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_u8mf2x8_mu(mask, maskedoff_tuple, base, vl); +vuint8mf2x8_t test_vlseg8e8_v_u8mf2x8_mu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_u8mf2x8_mu(vm, vd, rs1, vl); } -vuint8m1x8_t test_vlseg8e8_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg8e8_v_u8m1x8_mu(mask, maskedoff_tuple, base, vl); +vuint8m1x8_t test_vlseg8e8_v_u8m1x8_mu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_v_u8m1x8_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg8e8\.[ivxfswum.]+\s+} 32 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlseg8e8ff.c b/auto-generated/policy_funcs/gnu-api-tests/vlseg8e8ff.c index 454165fab..125ffb1ac 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlseg8e8ff.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlseg8e8ff.c @@ -6,132 +6,132 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_i8mf8x8_tu(maskedoff_tuple, base, new_vl, vl); +vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_tu(vint8mf8x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_i8mf8x8_tu(vd, rs1, new_vl, vl); } -vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_i8mf4x8_tu(maskedoff_tuple, base, new_vl, vl); +vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_tu(vint8mf4x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_i8mf4x8_tu(vd, rs1, new_vl, vl); } -vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_i8mf2x8_tu(maskedoff_tuple, base, new_vl, vl); +vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_tu(vint8mf2x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_i8mf2x8_tu(vd, rs1, new_vl, vl); } -vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_i8m1x8_tu(maskedoff_tuple, base, new_vl, vl); +vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_tu(vint8m1x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_i8m1x8_tu(vd, rs1, new_vl, vl); } -vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_u8mf8x8_tu(maskedoff_tuple, base, new_vl, vl); +vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_tu(vuint8mf8x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_u8mf8x8_tu(vd, rs1, new_vl, vl); } -vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_u8mf4x8_tu(maskedoff_tuple, base, new_vl, vl); +vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_tu(vuint8mf4x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_u8mf4x8_tu(vd, rs1, new_vl, vl); } -vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_u8mf2x8_tu(maskedoff_tuple, base, new_vl, vl); +vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_tu(vuint8mf2x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_u8mf2x8_tu(vd, rs1, new_vl, vl); } -vuint8m1x8_t test_vlseg8e8ff_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_u8m1x8_tu(maskedoff_tuple, base, new_vl, vl); +vuint8m1x8_t test_vlseg8e8ff_v_u8m1x8_tu(vuint8m1x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_u8m1x8_tu(vd, rs1, new_vl, vl); } -vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_i8mf8x8_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_tum(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_i8mf8x8_tum(vm, vd, rs1, new_vl, vl); } -vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_i8mf4x8_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_tum(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_i8mf4x8_tum(vm, vd, rs1, new_vl, vl); } -vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_i8mf2x8_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_tum(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_i8mf2x8_tum(vm, vd, rs1, new_vl, vl); } -vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_i8m1x8_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_tum(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_i8m1x8_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_u8mf8x8_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_tum(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_u8mf8x8_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_u8mf4x8_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_tum(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_u8mf4x8_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_u8mf2x8_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_tum(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_u8mf2x8_tum(vm, vd, rs1, new_vl, vl); } -vuint8m1x8_t test_vlseg8e8ff_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_u8m1x8_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m1x8_t test_vlseg8e8ff_v_u8m1x8_tum(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_u8m1x8_tum(vm, vd, rs1, new_vl, vl); } -vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_i8mf8x8_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_tumu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_i8mf8x8_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_i8mf4x8_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_tumu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_i8mf4x8_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_i8mf2x8_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_tumu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_i8mf2x8_tumu(vm, vd, rs1, new_vl, vl); } -vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_i8m1x8_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_tumu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_i8m1x8_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_u8mf8x8_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_tumu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_u8mf8x8_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_u8mf4x8_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_tumu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_u8mf4x8_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_u8mf2x8_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_tumu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_u8mf2x8_tumu(vm, vd, rs1, new_vl, vl); } -vuint8m1x8_t test_vlseg8e8ff_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_u8m1x8_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m1x8_t test_vlseg8e8ff_v_u8m1x8_tumu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_u8m1x8_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_i8mf8x8_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_mu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_i8mf8x8_mu(vm, vd, rs1, new_vl, vl); } -vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_i8mf4x8_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_mu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_i8mf4x8_mu(vm, vd, rs1, new_vl, vl); } -vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_i8mf2x8_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_mu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_i8mf2x8_mu(vm, vd, rs1, new_vl, vl); } -vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_i8m1x8_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_mu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_i8m1x8_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_u8mf8x8_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_mu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_u8mf8x8_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_u8mf4x8_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_mu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_u8mf4x8_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_u8mf2x8_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_mu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_u8mf2x8_mu(vm, vd, rs1, new_vl, vl); } -vuint8m1x8_t test_vlseg8e8ff_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_v_u8m1x8_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m1x8_t test_vlseg8e8ff_v_u8m1x8_mu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_v_u8m1x8_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg8e8ff\.[ivxfswum.]+\s+} 32 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlsseg2e16.c b/auto-generated/policy_funcs/gnu-api-tests/vlsseg2e16.c index 6651c92a8..5470c0d00 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlsseg2e16.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlsseg2e16.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16mf4x2_tu(maskedoff_tuple, base, bstride, vl); +vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_tu(vfloat16mf4x2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_f16mf4x2_tu(vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16mf2x2_tu(maskedoff_tuple, base, bstride, vl); +vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_tu(vfloat16mf2x2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_f16mf2x2_tu(vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m1x2_tu(maskedoff_tuple, base, bstride, vl); +vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_tu(vfloat16m1x2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_f16m1x2_tu(vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m2x2_tu(maskedoff_tuple, base, bstride, vl); +vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_tu(vfloat16m2x2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_f16m2x2_tu(vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m4x2_tu(maskedoff_tuple, base, bstride, vl); +vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_tu(vfloat16m4x2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_f16m4x2_tu(vd, rs1, rs2, vl); } -vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16mf4x2_tu(maskedoff_tuple, base, bstride, vl); +vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_tu(vint16mf4x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_i16mf4x2_tu(vd, rs1, rs2, vl); } -vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16mf2x2_tu(maskedoff_tuple, base, bstride, vl); +vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_tu(vint16mf2x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_i16mf2x2_tu(vd, rs1, rs2, vl); } -vint16m1x2_t test_vlsseg2e16_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m1x2_tu(maskedoff_tuple, base, bstride, vl); +vint16m1x2_t test_vlsseg2e16_v_i16m1x2_tu(vint16m1x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_i16m1x2_tu(vd, rs1, rs2, vl); } -vint16m2x2_t test_vlsseg2e16_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m2x2_tu(maskedoff_tuple, base, bstride, vl); +vint16m2x2_t test_vlsseg2e16_v_i16m2x2_tu(vint16m2x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_i16m2x2_tu(vd, rs1, rs2, vl); } -vint16m4x2_t test_vlsseg2e16_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m4x2_tu(maskedoff_tuple, base, bstride, vl); +vint16m4x2_t test_vlsseg2e16_v_i16m4x2_tu(vint16m4x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_i16m4x2_tu(vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16mf4x2_tu(maskedoff_tuple, base, bstride, vl); +vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_tu(vuint16mf4x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_u16mf4x2_tu(vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16mf2x2_tu(maskedoff_tuple, base, bstride, vl); +vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_tu(vuint16mf2x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_u16mf2x2_tu(vd, rs1, rs2, vl); } -vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m1x2_tu(maskedoff_tuple, base, bstride, vl); +vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_tu(vuint16m1x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_u16m1x2_tu(vd, rs1, rs2, vl); } -vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m2x2_tu(maskedoff_tuple, base, bstride, vl); +vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_tu(vuint16m2x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_u16m2x2_tu(vd, rs1, rs2, vl); } -vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m4x2_tu(maskedoff_tuple, base, bstride, vl); +vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_tu(vuint16m4x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_u16m4x2_tu(vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16mf4x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_tum(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_f16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16mf2x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_tum(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_f16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m1x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_tum(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_f16m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m2x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_tum(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_f16m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m4x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_tum(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_f16m4x2_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16mf4x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_tum(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_i16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16mf2x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_tum(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_i16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vlsseg2e16_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m1x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16m1x2_t test_vlsseg2e16_v_i16m1x2_tum(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_i16m1x2_tum(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vlsseg2e16_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m2x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16m2x2_t test_vlsseg2e16_v_i16m2x2_tum(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_i16m2x2_tum(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vlsseg2e16_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m4x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16m4x2_t test_vlsseg2e16_v_i16m4x2_tum(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_i16m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16mf4x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_tum(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_u16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16mf2x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_tum(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_u16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m1x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_tum(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_u16m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m2x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_tum(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_u16m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m4x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_tum(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_u16m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16mf4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_tumu(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_f16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16mf2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_tumu(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_f16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m1x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_tumu(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_f16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_tumu(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_f16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_tumu(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_f16m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16mf4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_tumu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_i16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16mf2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_tumu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_i16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vlsseg2e16_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m1x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16m1x2_t test_vlsseg2e16_v_i16m1x2_tumu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_i16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vlsseg2e16_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16m2x2_t test_vlsseg2e16_v_i16m2x2_tumu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_i16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vlsseg2e16_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16m4x2_t test_vlsseg2e16_v_i16m4x2_tumu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_i16m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16mf4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_tumu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_u16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16mf2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_tumu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_u16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m1x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_tumu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_u16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_tumu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_u16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_tumu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_u16m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16mf4x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_mu(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_f16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16mf2x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_mu(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_f16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m1x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_mu(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_f16m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m2x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_mu(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_f16m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_f16m4x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_mu(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_f16m4x2_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16mf4x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_mu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_i16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16mf2x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_mu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_i16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vlsseg2e16_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m1x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16m1x2_t test_vlsseg2e16_v_i16m1x2_mu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_i16m1x2_mu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vlsseg2e16_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m2x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16m2x2_t test_vlsseg2e16_v_i16m2x2_mu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_i16m2x2_mu(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vlsseg2e16_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_i16m4x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16m4x2_t test_vlsseg2e16_v_i16m4x2_mu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_i16m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16mf4x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_mu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_u16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16mf2x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_mu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_u16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m1x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_mu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_u16m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m2x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_mu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_u16m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_v_u16m4x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_mu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_u16m4x2_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg2e16\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlsseg2e32.c b/auto-generated/policy_funcs/gnu-api-tests/vlsseg2e32.c index 2b7f70e58..faad1a5e4 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlsseg2e32.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlsseg2e32.c @@ -6,196 +6,196 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32mf2x2_tu(maskedoff_tuple, base, bstride, vl); +vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_tu(vfloat32mf2x2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_f32mf2x2_tu(vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m1x2_tu(maskedoff_tuple, base, bstride, vl); +vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_tu(vfloat32m1x2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_f32m1x2_tu(vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m2x2_tu(maskedoff_tuple, base, bstride, vl); +vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_tu(vfloat32m2x2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_f32m2x2_tu(vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m4x2_tu(maskedoff_tuple, base, bstride, vl); +vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_tu(vfloat32m4x2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_f32m4x2_tu(vd, rs1, rs2, vl); } -vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32mf2x2_tu(maskedoff_tuple, base, bstride, vl); +vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_tu(vint32mf2x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_i32mf2x2_tu(vd, rs1, rs2, vl); } -vint32m1x2_t test_vlsseg2e32_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m1x2_tu(maskedoff_tuple, base, bstride, vl); +vint32m1x2_t test_vlsseg2e32_v_i32m1x2_tu(vint32m1x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_i32m1x2_tu(vd, rs1, rs2, vl); } -vint32m2x2_t test_vlsseg2e32_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m2x2_tu(maskedoff_tuple, base, bstride, vl); +vint32m2x2_t test_vlsseg2e32_v_i32m2x2_tu(vint32m2x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_i32m2x2_tu(vd, rs1, rs2, vl); } -vint32m4x2_t test_vlsseg2e32_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m4x2_tu(maskedoff_tuple, base, bstride, vl); +vint32m4x2_t test_vlsseg2e32_v_i32m4x2_tu(vint32m4x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_i32m4x2_tu(vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32mf2x2_tu(maskedoff_tuple, base, bstride, vl); +vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_tu(vuint32mf2x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_u32mf2x2_tu(vd, rs1, rs2, vl); } -vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m1x2_tu(maskedoff_tuple, base, bstride, vl); +vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_tu(vuint32m1x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_u32m1x2_tu(vd, rs1, rs2, vl); } -vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m2x2_tu(maskedoff_tuple, base, bstride, vl); +vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_tu(vuint32m2x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_u32m2x2_tu(vd, rs1, rs2, vl); } -vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m4x2_tu(maskedoff_tuple, base, bstride, vl); +vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_tu(vuint32m4x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_u32m4x2_tu(vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32mf2x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_tum(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_f32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m1x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_tum(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_f32m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m2x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_tum(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_f32m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m4x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_tum(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_f32m4x2_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32mf2x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_tum(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_i32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vlsseg2e32_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m1x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vint32m1x2_t test_vlsseg2e32_v_i32m1x2_tum(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_i32m1x2_tum(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vlsseg2e32_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m2x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vint32m2x2_t test_vlsseg2e32_v_i32m2x2_tum(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_i32m2x2_tum(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vlsseg2e32_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m4x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vint32m4x2_t test_vlsseg2e32_v_i32m4x2_tum(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_i32m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32mf2x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_tum(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_u32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m1x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_tum(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_u32m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m2x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_tum(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_u32m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m4x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_tum(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_u32m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32mf2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_tumu(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_f32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m1x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_tumu(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_f32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_tumu(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_f32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_tumu(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_f32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32mf2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_tumu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_i32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vlsseg2e32_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m1x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint32m1x2_t test_vlsseg2e32_v_i32m1x2_tumu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_i32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vlsseg2e32_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint32m2x2_t test_vlsseg2e32_v_i32m2x2_tumu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_i32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vlsseg2e32_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint32m4x2_t test_vlsseg2e32_v_i32m4x2_tumu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_i32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32mf2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_tumu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_u32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m1x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_tumu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_u32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_tumu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_u32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_tumu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_u32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32mf2x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_mu(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_f32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m1x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_mu(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_f32m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m2x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_mu(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_f32m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_f32m4x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_mu(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_f32m4x2_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32mf2x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_mu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_i32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vlsseg2e32_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m1x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vint32m1x2_t test_vlsseg2e32_v_i32m1x2_mu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_i32m1x2_mu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vlsseg2e32_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m2x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vint32m2x2_t test_vlsseg2e32_v_i32m2x2_mu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_i32m2x2_mu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vlsseg2e32_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_i32m4x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vint32m4x2_t test_vlsseg2e32_v_i32m4x2_mu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_i32m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32mf2x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_mu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_u32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m1x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_mu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_u32m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m2x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_mu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_u32m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_v_u32m4x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_mu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_v_u32m4x2_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg2e32\.[ivxfswum.]+\s+} 48 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlsseg2e64.c b/auto-generated/policy_funcs/gnu-api-tests/vlsseg2e64.c index 90c4e1418..ba35f7c90 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlsseg2e64.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlsseg2e64.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m1x2_tu(maskedoff_tuple, base, bstride, vl); +vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_tu(vfloat64m1x2_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_f64m1x2_tu(vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m2x2_tu(maskedoff_tuple, base, bstride, vl); +vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_tu(vfloat64m2x2_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_f64m2x2_tu(vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m4x2_tu(maskedoff_tuple, base, bstride, vl); +vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_tu(vfloat64m4x2_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_f64m4x2_tu(vd, rs1, rs2, vl); } -vint64m1x2_t test_vlsseg2e64_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m1x2_tu(maskedoff_tuple, base, bstride, vl); +vint64m1x2_t test_vlsseg2e64_v_i64m1x2_tu(vint64m1x2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_i64m1x2_tu(vd, rs1, rs2, vl); } -vint64m2x2_t test_vlsseg2e64_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m2x2_tu(maskedoff_tuple, base, bstride, vl); +vint64m2x2_t test_vlsseg2e64_v_i64m2x2_tu(vint64m2x2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_i64m2x2_tu(vd, rs1, rs2, vl); } -vint64m4x2_t test_vlsseg2e64_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m4x2_tu(maskedoff_tuple, base, bstride, vl); +vint64m4x2_t test_vlsseg2e64_v_i64m4x2_tu(vint64m4x2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_i64m4x2_tu(vd, rs1, rs2, vl); } -vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m1x2_tu(maskedoff_tuple, base, bstride, vl); +vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_tu(vuint64m1x2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_u64m1x2_tu(vd, rs1, rs2, vl); } -vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m2x2_tu(maskedoff_tuple, base, bstride, vl); +vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_tu(vuint64m2x2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_u64m2x2_tu(vd, rs1, rs2, vl); } -vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m4x2_tu(maskedoff_tuple, base, bstride, vl); +vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_tu(vuint64m4x2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_u64m4x2_tu(vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m1x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_tum(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_f64m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m2x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_tum(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_f64m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m4x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_tum(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_f64m4x2_tum(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vlsseg2e64_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m1x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vint64m1x2_t test_vlsseg2e64_v_i64m1x2_tum(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_i64m1x2_tum(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vlsseg2e64_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m2x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vint64m2x2_t test_vlsseg2e64_v_i64m2x2_tum(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_i64m2x2_tum(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vlsseg2e64_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m4x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vint64m4x2_t test_vlsseg2e64_v_i64m4x2_tum(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_i64m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m1x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_tum(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_u64m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m2x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_tum(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_u64m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m4x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_tum(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_u64m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m1x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_tumu(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_f64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_tumu(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_f64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_tumu(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_f64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vlsseg2e64_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m1x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint64m1x2_t test_vlsseg2e64_v_i64m1x2_tumu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_i64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vlsseg2e64_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint64m2x2_t test_vlsseg2e64_v_i64m2x2_tumu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_i64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vlsseg2e64_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint64m4x2_t test_vlsseg2e64_v_i64m4x2_tumu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_i64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m1x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_tumu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_u64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_tumu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_u64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_tumu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_u64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m1x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_mu(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_f64m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m2x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_mu(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_f64m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_f64m4x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_mu(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_f64m4x2_mu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vlsseg2e64_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m1x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vint64m1x2_t test_vlsseg2e64_v_i64m1x2_mu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_i64m1x2_mu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vlsseg2e64_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m2x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vint64m2x2_t test_vlsseg2e64_v_i64m2x2_mu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_i64m2x2_mu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vlsseg2e64_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_i64m4x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vint64m4x2_t test_vlsseg2e64_v_i64m4x2_mu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_i64m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m1x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_mu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_u64m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m2x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_mu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_u64m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_v_u64m4x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_mu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_v_u64m4x2_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg2e64\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlsseg2e8.c b/auto-generated/policy_funcs/gnu-api-tests/vlsseg2e8.c index 719f2caa2..ae8051355 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlsseg2e8.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlsseg2e8.c @@ -6,196 +6,196 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf8x2_tu(maskedoff_tuple, base, bstride, vl); +vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_tu(vint8mf8x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf8x2_tu(vd, rs1, rs2, vl); } -vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf4x2_tu(maskedoff_tuple, base, bstride, vl); +vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_tu(vint8mf4x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf4x2_tu(vd, rs1, rs2, vl); } -vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf2x2_tu(maskedoff_tuple, base, bstride, vl); +vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_tu(vint8mf2x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf2x2_tu(vd, rs1, rs2, vl); } -vint8m1x2_t test_vlsseg2e8_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m1x2_tu(maskedoff_tuple, base, bstride, vl); +vint8m1x2_t test_vlsseg2e8_v_i8m1x2_tu(vint8m1x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_i8m1x2_tu(vd, rs1, rs2, vl); } -vint8m2x2_t test_vlsseg2e8_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m2x2_tu(maskedoff_tuple, base, bstride, vl); +vint8m2x2_t test_vlsseg2e8_v_i8m2x2_tu(vint8m2x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_i8m2x2_tu(vd, rs1, rs2, vl); } -vint8m4x2_t test_vlsseg2e8_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m4x2_tu(maskedoff_tuple, base, bstride, vl); +vint8m4x2_t test_vlsseg2e8_v_i8m4x2_tu(vint8m4x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_i8m4x2_tu(vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf8x2_tu(maskedoff_tuple, base, bstride, vl); +vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_tu(vuint8mf8x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf8x2_tu(vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf4x2_tu(maskedoff_tuple, base, bstride, vl); +vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_tu(vuint8mf4x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf4x2_tu(vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf2x2_tu(maskedoff_tuple, base, bstride, vl); +vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_tu(vuint8mf2x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf2x2_tu(vd, rs1, rs2, vl); } -vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m1x2_tu(maskedoff_tuple, base, bstride, vl); +vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_tu(vuint8m1x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_u8m1x2_tu(vd, rs1, rs2, vl); } -vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m2x2_tu(maskedoff_tuple, base, bstride, vl); +vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_tu(vuint8m2x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_u8m2x2_tu(vd, rs1, rs2, vl); } -vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m4x2_tu(maskedoff_tuple, base, bstride, vl); +vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_tu(vuint8m4x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_u8m4x2_tu(vd, rs1, rs2, vl); } -vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf8x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_tum(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf8x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf4x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_tum(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf4x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf2x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_tum(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vlsseg2e8_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m1x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8m1x2_t test_vlsseg2e8_v_i8m1x2_tum(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_i8m1x2_tum(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vlsseg2e8_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m2x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8m2x2_t test_vlsseg2e8_v_i8m2x2_tum(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_i8m2x2_tum(vm, vd, rs1, rs2, vl); } -vint8m4x2_t test_vlsseg2e8_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m4x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8m4x2_t test_vlsseg2e8_v_i8m4x2_tum(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_i8m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf8x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_tum(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf8x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf4x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_tum(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf4x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf2x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_tum(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m1x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_tum(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_u8m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m2x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_tum(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_u8m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m4x2_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_tum(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_u8m4x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf8x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_tumu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf8x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_tumu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_tumu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vlsseg2e8_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m1x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8m1x2_t test_vlsseg2e8_v_i8m1x2_tumu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_i8m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vlsseg2e8_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8m2x2_t test_vlsseg2e8_v_i8m2x2_tumu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_i8m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint8m4x2_t test_vlsseg2e8_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8m4x2_t test_vlsseg2e8_v_i8m4x2_tumu(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_i8m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf8x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_tumu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf8x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_tumu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_tumu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m1x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_tumu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_u8m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m2x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_tumu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_u8m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m4x2_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_tumu(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_u8m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf8x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_mu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf8x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf4x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_mu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf4x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8mf2x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_mu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_i8mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vlsseg2e8_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m1x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8m1x2_t test_vlsseg2e8_v_i8m1x2_mu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_i8m1x2_mu(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vlsseg2e8_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m2x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8m2x2_t test_vlsseg2e8_v_i8m2x2_mu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_i8m2x2_mu(vm, vd, rs1, rs2, vl); } -vint8m4x2_t test_vlsseg2e8_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_i8m4x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8m4x2_t test_vlsseg2e8_v_i8m4x2_mu(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_i8m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf8x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_mu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf8x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf4x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_mu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf4x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8mf2x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_mu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_u8mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m1x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_mu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_u8m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m2x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_mu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_u8m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_v_u8m4x2_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_mu(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_v_u8m4x2_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg2e8\.[ivxfswum.]+\s+} 48 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlsseg3e16.c b/auto-generated/policy_funcs/gnu-api-tests/vlsseg3e16.c index efa6af9b1..cf06e840f 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlsseg3e16.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlsseg3e16.c @@ -6,196 +6,196 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16mf4x3_tu(maskedoff_tuple, base, bstride, vl); +vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_tu(vfloat16mf4x3_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_f16mf4x3_tu(vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16mf2x3_tu(maskedoff_tuple, base, bstride, vl); +vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_tu(vfloat16mf2x3_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_f16mf2x3_tu(vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16m1x3_tu(maskedoff_tuple, base, bstride, vl); +vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_tu(vfloat16m1x3_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_f16m1x3_tu(vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16m2x3_tu(maskedoff_tuple, base, bstride, vl); +vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_tu(vfloat16m2x3_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_f16m2x3_tu(vd, rs1, rs2, vl); } -vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16mf4x3_tu(maskedoff_tuple, base, bstride, vl); +vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_tu(vint16mf4x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_i16mf4x3_tu(vd, rs1, rs2, vl); } -vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16mf2x3_tu(maskedoff_tuple, base, bstride, vl); +vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_tu(vint16mf2x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_i16mf2x3_tu(vd, rs1, rs2, vl); } -vint16m1x3_t test_vlsseg3e16_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16m1x3_tu(maskedoff_tuple, base, bstride, vl); +vint16m1x3_t test_vlsseg3e16_v_i16m1x3_tu(vint16m1x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_i16m1x3_tu(vd, rs1, rs2, vl); } -vint16m2x3_t test_vlsseg3e16_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16m2x3_tu(maskedoff_tuple, base, bstride, vl); +vint16m2x3_t test_vlsseg3e16_v_i16m2x3_tu(vint16m2x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_i16m2x3_tu(vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16mf4x3_tu(maskedoff_tuple, base, bstride, vl); +vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_tu(vuint16mf4x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_u16mf4x3_tu(vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16mf2x3_tu(maskedoff_tuple, base, bstride, vl); +vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_tu(vuint16mf2x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_u16mf2x3_tu(vd, rs1, rs2, vl); } -vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16m1x3_tu(maskedoff_tuple, base, bstride, vl); +vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_tu(vuint16m1x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_u16m1x3_tu(vd, rs1, rs2, vl); } -vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16m2x3_tu(maskedoff_tuple, base, bstride, vl); +vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_tu(vuint16m2x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_u16m2x3_tu(vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16mf4x3_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_tum(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_f16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16mf2x3_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_tum(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_f16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16m1x3_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_tum(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_f16m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16m2x3_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_tum(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_f16m2x3_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16mf4x3_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_tum(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_i16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16mf2x3_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_tum(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_i16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vlsseg3e16_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16m1x3_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16m1x3_t test_vlsseg3e16_v_i16m1x3_tum(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_i16m1x3_tum(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vlsseg3e16_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16m2x3_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16m2x3_t test_vlsseg3e16_v_i16m2x3_tum(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_i16m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16mf4x3_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_tum(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_u16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16mf2x3_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_tum(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_u16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16m1x3_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_tum(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_u16m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16m2x3_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_tum(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_u16m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16mf4x3_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_tumu(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_f16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16mf2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_tumu(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_f16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16m1x3_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_tumu(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_f16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16m2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_tumu(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_f16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16mf4x3_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_tumu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_i16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16mf2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_tumu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_i16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vlsseg3e16_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16m1x3_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16m1x3_t test_vlsseg3e16_v_i16m1x3_tumu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_i16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vlsseg3e16_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16m2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16m2x3_t test_vlsseg3e16_v_i16m2x3_tumu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_i16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16mf4x3_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_tumu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_u16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16mf2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_tumu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_u16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16m1x3_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_tumu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_u16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16m2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_tumu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_u16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16mf4x3_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_mu(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_f16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16mf2x3_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_mu(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_f16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16m1x3_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_mu(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_f16m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_f16m2x3_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_mu(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_f16m2x3_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16mf4x3_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_mu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_i16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16mf2x3_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_mu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_i16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vlsseg3e16_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16m1x3_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16m1x3_t test_vlsseg3e16_v_i16m1x3_mu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_i16m1x3_mu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vlsseg3e16_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_i16m2x3_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16m2x3_t test_vlsseg3e16_v_i16m2x3_mu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_i16m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16mf4x3_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_mu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_u16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16mf2x3_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_mu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_u16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16m1x3_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_mu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_u16m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_v_u16m2x3_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_mu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_u16m2x3_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg3e16\.[ivxfswum.]+\s+} 48 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlsseg3e32.c b/auto-generated/policy_funcs/gnu-api-tests/vlsseg3e32.c index c18d77314..a56e28fca 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlsseg3e32.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlsseg3e32.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32mf2x3_tu(maskedoff_tuple, base, bstride, vl); +vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_tu(vfloat32mf2x3_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_f32mf2x3_tu(vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32m1x3_tu(maskedoff_tuple, base, bstride, vl); +vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_tu(vfloat32m1x3_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_f32m1x3_tu(vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32m2x3_tu(maskedoff_tuple, base, bstride, vl); +vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_tu(vfloat32m2x3_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_f32m2x3_tu(vd, rs1, rs2, vl); } -vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32mf2x3_tu(maskedoff_tuple, base, bstride, vl); +vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_tu(vint32mf2x3_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_i32mf2x3_tu(vd, rs1, rs2, vl); } -vint32m1x3_t test_vlsseg3e32_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32m1x3_tu(maskedoff_tuple, base, bstride, vl); +vint32m1x3_t test_vlsseg3e32_v_i32m1x3_tu(vint32m1x3_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_i32m1x3_tu(vd, rs1, rs2, vl); } -vint32m2x3_t test_vlsseg3e32_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32m2x3_tu(maskedoff_tuple, base, bstride, vl); +vint32m2x3_t test_vlsseg3e32_v_i32m2x3_tu(vint32m2x3_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_i32m2x3_tu(vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32mf2x3_tu(maskedoff_tuple, base, bstride, vl); +vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_tu(vuint32mf2x3_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_u32mf2x3_tu(vd, rs1, rs2, vl); } -vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32m1x3_tu(maskedoff_tuple, base, bstride, vl); +vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_tu(vuint32m1x3_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_u32m1x3_tu(vd, rs1, rs2, vl); } -vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32m2x3_tu(maskedoff_tuple, base, bstride, vl); +vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_tu(vuint32m2x3_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_u32m2x3_tu(vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32mf2x3_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_tum(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_f32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32m1x3_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_tum(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_f32m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32m2x3_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_tum(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_f32m2x3_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32mf2x3_tum(mask, maskedoff_tuple, base, bstride, vl); +vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_tum(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_i32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vlsseg3e32_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32m1x3_tum(mask, maskedoff_tuple, base, bstride, vl); +vint32m1x3_t test_vlsseg3e32_v_i32m1x3_tum(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_i32m1x3_tum(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vlsseg3e32_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32m2x3_tum(mask, maskedoff_tuple, base, bstride, vl); +vint32m2x3_t test_vlsseg3e32_v_i32m2x3_tum(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_i32m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32mf2x3_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_tum(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_u32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32m1x3_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_tum(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_u32m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32m2x3_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_tum(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_u32m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32mf2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_tumu(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_f32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32m1x3_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_tumu(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_f32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32m2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_tumu(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_f32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32mf2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_tumu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_i32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vlsseg3e32_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32m1x3_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint32m1x3_t test_vlsseg3e32_v_i32m1x3_tumu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_i32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vlsseg3e32_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32m2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint32m2x3_t test_vlsseg3e32_v_i32m2x3_tumu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_i32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32mf2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_tumu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_u32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32m1x3_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_tumu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_u32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32m2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_tumu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_u32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32mf2x3_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_mu(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_f32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32m1x3_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_mu(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_f32m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_f32m2x3_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_mu(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_f32m2x3_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32mf2x3_mu(mask, maskedoff_tuple, base, bstride, vl); +vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_mu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_i32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vlsseg3e32_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32m1x3_mu(mask, maskedoff_tuple, base, bstride, vl); +vint32m1x3_t test_vlsseg3e32_v_i32m1x3_mu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_i32m1x3_mu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vlsseg3e32_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_i32m2x3_mu(mask, maskedoff_tuple, base, bstride, vl); +vint32m2x3_t test_vlsseg3e32_v_i32m2x3_mu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_i32m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32mf2x3_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_mu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_u32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32m1x3_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_mu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_u32m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_v_u32m2x3_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_mu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_v_u32m2x3_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg3e32\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlsseg3e64.c b/auto-generated/policy_funcs/gnu-api-tests/vlsseg3e64.c index cb78e7c9a..28080b3d8 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlsseg3e64.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlsseg3e64.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_f64m1x3_tu(maskedoff_tuple, base, bstride, vl); +vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_tu(vfloat64m1x3_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_v_f64m1x3_tu(vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_f64m2x3_tu(maskedoff_tuple, base, bstride, vl); +vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_tu(vfloat64m2x3_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_v_f64m2x3_tu(vd, rs1, rs2, vl); } -vint64m1x3_t test_vlsseg3e64_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_i64m1x3_tu(maskedoff_tuple, base, bstride, vl); +vint64m1x3_t test_vlsseg3e64_v_i64m1x3_tu(vint64m1x3_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_v_i64m1x3_tu(vd, rs1, rs2, vl); } -vint64m2x3_t test_vlsseg3e64_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_i64m2x3_tu(maskedoff_tuple, base, bstride, vl); +vint64m2x3_t test_vlsseg3e64_v_i64m2x3_tu(vint64m2x3_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_v_i64m2x3_tu(vd, rs1, rs2, vl); } -vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_u64m1x3_tu(maskedoff_tuple, base, bstride, vl); +vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_tu(vuint64m1x3_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_v_u64m1x3_tu(vd, rs1, rs2, vl); } -vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_u64m2x3_tu(maskedoff_tuple, base, bstride, vl); +vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_tu(vuint64m2x3_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_v_u64m2x3_tu(vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_f64m1x3_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_tum(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_v_f64m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_f64m2x3_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_tum(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_v_f64m2x3_tum(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vlsseg3e64_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_i64m1x3_tum(mask, maskedoff_tuple, base, bstride, vl); +vint64m1x3_t test_vlsseg3e64_v_i64m1x3_tum(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_v_i64m1x3_tum(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vlsseg3e64_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_i64m2x3_tum(mask, maskedoff_tuple, base, bstride, vl); +vint64m2x3_t test_vlsseg3e64_v_i64m2x3_tum(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_v_i64m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_u64m1x3_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_tum(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_v_u64m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_u64m2x3_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_tum(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_v_u64m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_f64m1x3_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_tumu(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_v_f64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_f64m2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_tumu(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_v_f64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vlsseg3e64_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_i64m1x3_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint64m1x3_t test_vlsseg3e64_v_i64m1x3_tumu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_v_i64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vlsseg3e64_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_i64m2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint64m2x3_t test_vlsseg3e64_v_i64m2x3_tumu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_v_i64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_u64m1x3_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_tumu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_v_u64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_u64m2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_tumu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_v_u64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_f64m1x3_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_mu(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_v_f64m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_f64m2x3_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_mu(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_v_f64m2x3_mu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vlsseg3e64_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_i64m1x3_mu(mask, maskedoff_tuple, base, bstride, vl); +vint64m1x3_t test_vlsseg3e64_v_i64m1x3_mu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_v_i64m1x3_mu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vlsseg3e64_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_i64m2x3_mu(mask, maskedoff_tuple, base, bstride, vl); +vint64m2x3_t test_vlsseg3e64_v_i64m2x3_mu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_v_i64m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_u64m1x3_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_mu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_v_u64m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_v_u64m2x3_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_mu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_v_u64m2x3_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg3e64\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlsseg3e8.c b/auto-generated/policy_funcs/gnu-api-tests/vlsseg3e8.c index 2a6b13575..1f0de7077 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlsseg3e8.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlsseg3e8.c @@ -6,164 +6,164 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf8x3_tu(maskedoff_tuple, base, bstride, vl); +vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_tu(vint8mf8x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf8x3_tu(vd, rs1, rs2, vl); } -vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf4x3_tu(maskedoff_tuple, base, bstride, vl); +vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_tu(vint8mf4x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf4x3_tu(vd, rs1, rs2, vl); } -vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf2x3_tu(maskedoff_tuple, base, bstride, vl); +vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_tu(vint8mf2x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf2x3_tu(vd, rs1, rs2, vl); } -vint8m1x3_t test_vlsseg3e8_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8m1x3_tu(maskedoff_tuple, base, bstride, vl); +vint8m1x3_t test_vlsseg3e8_v_i8m1x3_tu(vint8m1x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_i8m1x3_tu(vd, rs1, rs2, vl); } -vint8m2x3_t test_vlsseg3e8_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8m2x3_tu(maskedoff_tuple, base, bstride, vl); +vint8m2x3_t test_vlsseg3e8_v_i8m2x3_tu(vint8m2x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_i8m2x3_tu(vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf8x3_tu(maskedoff_tuple, base, bstride, vl); +vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_tu(vuint8mf8x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf8x3_tu(vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf4x3_tu(maskedoff_tuple, base, bstride, vl); +vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_tu(vuint8mf4x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf4x3_tu(vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf2x3_tu(maskedoff_tuple, base, bstride, vl); +vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_tu(vuint8mf2x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf2x3_tu(vd, rs1, rs2, vl); } -vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8m1x3_tu(maskedoff_tuple, base, bstride, vl); +vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_tu(vuint8m1x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_u8m1x3_tu(vd, rs1, rs2, vl); } -vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8m2x3_tu(maskedoff_tuple, base, bstride, vl); +vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_tu(vuint8m2x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_u8m2x3_tu(vd, rs1, rs2, vl); } -vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf8x3_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_tum(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf8x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf4x3_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_tum(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf4x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf2x3_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_tum(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vlsseg3e8_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8m1x3_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8m1x3_t test_vlsseg3e8_v_i8m1x3_tum(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_i8m1x3_tum(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vlsseg3e8_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8m2x3_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8m2x3_t test_vlsseg3e8_v_i8m2x3_tum(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_i8m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf8x3_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_tum(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf8x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf4x3_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_tum(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf4x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf2x3_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_tum(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8m1x3_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_tum(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_u8m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8m2x3_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_tum(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_u8m2x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf8x3_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_tumu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf8x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf4x3_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_tumu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_tumu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vlsseg3e8_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8m1x3_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8m1x3_t test_vlsseg3e8_v_i8m1x3_tumu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_i8m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vlsseg3e8_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8m2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8m2x3_t test_vlsseg3e8_v_i8m2x3_tumu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_i8m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf8x3_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_tumu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf8x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf4x3_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_tumu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_tumu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8m1x3_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_tumu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_u8m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8m2x3_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_tumu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_u8m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf8x3_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_mu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf8x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf4x3_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_mu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf4x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8mf2x3_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_mu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_i8mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vlsseg3e8_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8m1x3_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8m1x3_t test_vlsseg3e8_v_i8m1x3_mu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_i8m1x3_mu(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vlsseg3e8_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_i8m2x3_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8m2x3_t test_vlsseg3e8_v_i8m2x3_mu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_i8m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf8x3_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_mu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf8x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf4x3_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_mu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf4x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8mf2x3_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_mu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_u8mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8m1x3_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_mu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_u8m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_v_u8m2x3_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_mu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_v_u8m2x3_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg3e8\.[ivxfswum.]+\s+} 40 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlsseg4e16.c b/auto-generated/policy_funcs/gnu-api-tests/vlsseg4e16.c index f44378c93..700d2fe69 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlsseg4e16.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlsseg4e16.c @@ -6,196 +6,196 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16mf4x4_tu(maskedoff_tuple, base, bstride, vl); +vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_tu(vfloat16mf4x4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_f16mf4x4_tu(vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16mf2x4_tu(maskedoff_tuple, base, bstride, vl); +vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_tu(vfloat16mf2x4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_f16mf2x4_tu(vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16m1x4_tu(maskedoff_tuple, base, bstride, vl); +vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_tu(vfloat16m1x4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_f16m1x4_tu(vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16m2x4_tu(maskedoff_tuple, base, bstride, vl); +vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_tu(vfloat16m2x4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_f16m2x4_tu(vd, rs1, rs2, vl); } -vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16mf4x4_tu(maskedoff_tuple, base, bstride, vl); +vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_tu(vint16mf4x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_i16mf4x4_tu(vd, rs1, rs2, vl); } -vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16mf2x4_tu(maskedoff_tuple, base, bstride, vl); +vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_tu(vint16mf2x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_i16mf2x4_tu(vd, rs1, rs2, vl); } -vint16m1x4_t test_vlsseg4e16_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16m1x4_tu(maskedoff_tuple, base, bstride, vl); +vint16m1x4_t test_vlsseg4e16_v_i16m1x4_tu(vint16m1x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_i16m1x4_tu(vd, rs1, rs2, vl); } -vint16m2x4_t test_vlsseg4e16_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16m2x4_tu(maskedoff_tuple, base, bstride, vl); +vint16m2x4_t test_vlsseg4e16_v_i16m2x4_tu(vint16m2x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_i16m2x4_tu(vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16mf4x4_tu(maskedoff_tuple, base, bstride, vl); +vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_tu(vuint16mf4x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_u16mf4x4_tu(vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16mf2x4_tu(maskedoff_tuple, base, bstride, vl); +vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_tu(vuint16mf2x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_u16mf2x4_tu(vd, rs1, rs2, vl); } -vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16m1x4_tu(maskedoff_tuple, base, bstride, vl); +vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_tu(vuint16m1x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_u16m1x4_tu(vd, rs1, rs2, vl); } -vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16m2x4_tu(maskedoff_tuple, base, bstride, vl); +vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_tu(vuint16m2x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_u16m2x4_tu(vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16mf4x4_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_tum(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_f16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16mf2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_tum(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_f16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16m1x4_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_tum(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_f16m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16m2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_tum(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_f16m2x4_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16mf4x4_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_tum(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_i16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16mf2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_tum(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_i16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vlsseg4e16_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16m1x4_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16m1x4_t test_vlsseg4e16_v_i16m1x4_tum(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_i16m1x4_tum(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vlsseg4e16_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16m2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16m2x4_t test_vlsseg4e16_v_i16m2x4_tum(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_i16m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16mf4x4_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_tum(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_u16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16mf2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_tum(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_u16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16m1x4_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_tum(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_u16m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16m2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_tum(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_u16m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16mf4x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_tumu(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_f16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16mf2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_tumu(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_f16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16m1x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_tumu(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_f16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16m2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_tumu(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_f16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16mf4x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_tumu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_i16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16mf2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_tumu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_i16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vlsseg4e16_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16m1x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16m1x4_t test_vlsseg4e16_v_i16m1x4_tumu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_i16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vlsseg4e16_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16m2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16m2x4_t test_vlsseg4e16_v_i16m2x4_tumu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_i16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16mf4x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_tumu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_u16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16mf2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_tumu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_u16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16m1x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_tumu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_u16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16m2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_tumu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_u16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16mf4x4_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_mu(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_f16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16mf2x4_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_mu(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_f16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16m1x4_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_mu(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_f16m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_f16m2x4_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_mu(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_f16m2x4_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16mf4x4_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_mu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_i16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16mf2x4_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_mu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_i16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vlsseg4e16_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16m1x4_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16m1x4_t test_vlsseg4e16_v_i16m1x4_mu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_i16m1x4_mu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vlsseg4e16_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_i16m2x4_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16m2x4_t test_vlsseg4e16_v_i16m2x4_mu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_i16m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16mf4x4_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_mu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_u16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16mf2x4_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_mu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_u16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16m1x4_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_mu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_u16m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_v_u16m2x4_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_mu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_u16m2x4_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg4e16\.[ivxfswum.]+\s+} 48 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlsseg4e32.c b/auto-generated/policy_funcs/gnu-api-tests/vlsseg4e32.c index 25398ed81..2d5feb5b0 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlsseg4e32.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlsseg4e32.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32mf2x4_tu(maskedoff_tuple, base, bstride, vl); +vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_tu(vfloat32mf2x4_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_f32mf2x4_tu(vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32m1x4_tu(maskedoff_tuple, base, bstride, vl); +vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_tu(vfloat32m1x4_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_f32m1x4_tu(vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32m2x4_tu(maskedoff_tuple, base, bstride, vl); +vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_tu(vfloat32m2x4_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_f32m2x4_tu(vd, rs1, rs2, vl); } -vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32mf2x4_tu(maskedoff_tuple, base, bstride, vl); +vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_tu(vint32mf2x4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_i32mf2x4_tu(vd, rs1, rs2, vl); } -vint32m1x4_t test_vlsseg4e32_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32m1x4_tu(maskedoff_tuple, base, bstride, vl); +vint32m1x4_t test_vlsseg4e32_v_i32m1x4_tu(vint32m1x4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_i32m1x4_tu(vd, rs1, rs2, vl); } -vint32m2x4_t test_vlsseg4e32_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32m2x4_tu(maskedoff_tuple, base, bstride, vl); +vint32m2x4_t test_vlsseg4e32_v_i32m2x4_tu(vint32m2x4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_i32m2x4_tu(vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32mf2x4_tu(maskedoff_tuple, base, bstride, vl); +vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_tu(vuint32mf2x4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_u32mf2x4_tu(vd, rs1, rs2, vl); } -vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32m1x4_tu(maskedoff_tuple, base, bstride, vl); +vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_tu(vuint32m1x4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_u32m1x4_tu(vd, rs1, rs2, vl); } -vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32m2x4_tu(maskedoff_tuple, base, bstride, vl); +vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_tu(vuint32m2x4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_u32m2x4_tu(vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32mf2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_tum(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_f32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32m1x4_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_tum(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_f32m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32m2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_tum(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_f32m2x4_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32mf2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_tum(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_i32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vlsseg4e32_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32m1x4_tum(mask, maskedoff_tuple, base, bstride, vl); +vint32m1x4_t test_vlsseg4e32_v_i32m1x4_tum(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_i32m1x4_tum(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vlsseg4e32_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32m2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +vint32m2x4_t test_vlsseg4e32_v_i32m2x4_tum(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_i32m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32mf2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_tum(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_u32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32m1x4_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_tum(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_u32m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32m2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_tum(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_u32m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32mf2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_tumu(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_f32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32m1x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_tumu(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_f32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32m2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_tumu(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_f32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32mf2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_tumu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_i32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vlsseg4e32_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32m1x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint32m1x4_t test_vlsseg4e32_v_i32m1x4_tumu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_i32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vlsseg4e32_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32m2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint32m2x4_t test_vlsseg4e32_v_i32m2x4_tumu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_i32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32mf2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_tumu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_u32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32m1x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_tumu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_u32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32m2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_tumu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_u32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32mf2x4_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_mu(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_f32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32m1x4_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_mu(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_f32m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_f32m2x4_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_mu(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_f32m2x4_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32mf2x4_mu(mask, maskedoff_tuple, base, bstride, vl); +vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_mu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_i32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vlsseg4e32_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32m1x4_mu(mask, maskedoff_tuple, base, bstride, vl); +vint32m1x4_t test_vlsseg4e32_v_i32m1x4_mu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_i32m1x4_mu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vlsseg4e32_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_i32m2x4_mu(mask, maskedoff_tuple, base, bstride, vl); +vint32m2x4_t test_vlsseg4e32_v_i32m2x4_mu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_i32m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32mf2x4_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_mu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_u32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32m1x4_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_mu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_u32m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_v_u32m2x4_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_mu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_v_u32m2x4_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg4e32\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlsseg4e64.c b/auto-generated/policy_funcs/gnu-api-tests/vlsseg4e64.c index d0fa39680..c4e56ad13 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlsseg4e64.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlsseg4e64.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_f64m1x4_tu(maskedoff_tuple, base, bstride, vl); +vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_tu(vfloat64m1x4_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_v_f64m1x4_tu(vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_f64m2x4_tu(maskedoff_tuple, base, bstride, vl); +vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_tu(vfloat64m2x4_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_v_f64m2x4_tu(vd, rs1, rs2, vl); } -vint64m1x4_t test_vlsseg4e64_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_i64m1x4_tu(maskedoff_tuple, base, bstride, vl); +vint64m1x4_t test_vlsseg4e64_v_i64m1x4_tu(vint64m1x4_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_v_i64m1x4_tu(vd, rs1, rs2, vl); } -vint64m2x4_t test_vlsseg4e64_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_i64m2x4_tu(maskedoff_tuple, base, bstride, vl); +vint64m2x4_t test_vlsseg4e64_v_i64m2x4_tu(vint64m2x4_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_v_i64m2x4_tu(vd, rs1, rs2, vl); } -vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_u64m1x4_tu(maskedoff_tuple, base, bstride, vl); +vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_tu(vuint64m1x4_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_v_u64m1x4_tu(vd, rs1, rs2, vl); } -vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_u64m2x4_tu(maskedoff_tuple, base, bstride, vl); +vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_tu(vuint64m2x4_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_v_u64m2x4_tu(vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_f64m1x4_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_tum(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_v_f64m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_f64m2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_tum(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_v_f64m2x4_tum(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vlsseg4e64_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_i64m1x4_tum(mask, maskedoff_tuple, base, bstride, vl); +vint64m1x4_t test_vlsseg4e64_v_i64m1x4_tum(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_v_i64m1x4_tum(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vlsseg4e64_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_i64m2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +vint64m2x4_t test_vlsseg4e64_v_i64m2x4_tum(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_v_i64m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_u64m1x4_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_tum(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_v_u64m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_u64m2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_tum(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_v_u64m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_f64m1x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_tumu(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_v_f64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_f64m2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_tumu(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_v_f64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vlsseg4e64_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_i64m1x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint64m1x4_t test_vlsseg4e64_v_i64m1x4_tumu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_v_i64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vlsseg4e64_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_i64m2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint64m2x4_t test_vlsseg4e64_v_i64m2x4_tumu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_v_i64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_u64m1x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_tumu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_v_u64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_u64m2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_tumu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_v_u64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_f64m1x4_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_mu(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_v_f64m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_f64m2x4_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_mu(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_v_f64m2x4_mu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vlsseg4e64_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_i64m1x4_mu(mask, maskedoff_tuple, base, bstride, vl); +vint64m1x4_t test_vlsseg4e64_v_i64m1x4_mu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_v_i64m1x4_mu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vlsseg4e64_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_i64m2x4_mu(mask, maskedoff_tuple, base, bstride, vl); +vint64m2x4_t test_vlsseg4e64_v_i64m2x4_mu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_v_i64m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_u64m1x4_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_mu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_v_u64m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_v_u64m2x4_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_mu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_v_u64m2x4_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg4e64\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlsseg4e8.c b/auto-generated/policy_funcs/gnu-api-tests/vlsseg4e8.c index 9aaf977fe..3a5b04690 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlsseg4e8.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlsseg4e8.c @@ -6,164 +6,164 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf8x4_tu(maskedoff_tuple, base, bstride, vl); +vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_tu(vint8mf8x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf8x4_tu(vd, rs1, rs2, vl); } -vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf4x4_tu(maskedoff_tuple, base, bstride, vl); +vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_tu(vint8mf4x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf4x4_tu(vd, rs1, rs2, vl); } -vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf2x4_tu(maskedoff_tuple, base, bstride, vl); +vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_tu(vint8mf2x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf2x4_tu(vd, rs1, rs2, vl); } -vint8m1x4_t test_vlsseg4e8_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8m1x4_tu(maskedoff_tuple, base, bstride, vl); +vint8m1x4_t test_vlsseg4e8_v_i8m1x4_tu(vint8m1x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_i8m1x4_tu(vd, rs1, rs2, vl); } -vint8m2x4_t test_vlsseg4e8_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8m2x4_tu(maskedoff_tuple, base, bstride, vl); +vint8m2x4_t test_vlsseg4e8_v_i8m2x4_tu(vint8m2x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_i8m2x4_tu(vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf8x4_tu(maskedoff_tuple, base, bstride, vl); +vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_tu(vuint8mf8x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf8x4_tu(vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf4x4_tu(maskedoff_tuple, base, bstride, vl); +vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_tu(vuint8mf4x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf4x4_tu(vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf2x4_tu(maskedoff_tuple, base, bstride, vl); +vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_tu(vuint8mf2x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf2x4_tu(vd, rs1, rs2, vl); } -vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8m1x4_tu(maskedoff_tuple, base, bstride, vl); +vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_tu(vuint8m1x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_u8m1x4_tu(vd, rs1, rs2, vl); } -vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8m2x4_tu(maskedoff_tuple, base, bstride, vl); +vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_tu(vuint8m2x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_u8m2x4_tu(vd, rs1, rs2, vl); } -vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf8x4_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_tum(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf8x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf4x4_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_tum(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf4x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_tum(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vlsseg4e8_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8m1x4_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8m1x4_t test_vlsseg4e8_v_i8m1x4_tum(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_i8m1x4_tum(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vlsseg4e8_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8m2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8m2x4_t test_vlsseg4e8_v_i8m2x4_tum(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_i8m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf8x4_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_tum(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf8x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf4x4_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_tum(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf4x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_tum(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8m1x4_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_tum(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_u8m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8m2x4_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_tum(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_u8m2x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf8x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_tumu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf8x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf4x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_tumu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_tumu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vlsseg4e8_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8m1x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8m1x4_t test_vlsseg4e8_v_i8m1x4_tumu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_i8m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vlsseg4e8_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8m2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8m2x4_t test_vlsseg4e8_v_i8m2x4_tumu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_i8m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf8x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_tumu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf8x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf4x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_tumu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_tumu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8m1x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_tumu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_u8m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8m2x4_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_tumu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_u8m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf8x4_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_mu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf8x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf4x4_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_mu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf4x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8mf2x4_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_mu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_i8mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vlsseg4e8_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8m1x4_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8m1x4_t test_vlsseg4e8_v_i8m1x4_mu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_i8m1x4_mu(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vlsseg4e8_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_i8m2x4_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8m2x4_t test_vlsseg4e8_v_i8m2x4_mu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_i8m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf8x4_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_mu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf8x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf4x4_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_mu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf4x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8mf2x4_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_mu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_u8mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8m1x4_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_mu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_u8m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_v_u8m2x4_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_mu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_v_u8m2x4_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg4e8\.[ivxfswum.]+\s+} 40 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlsseg5e16.c b/auto-generated/policy_funcs/gnu-api-tests/vlsseg5e16.c index 6fdfd7d4e..6bea7f0f5 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlsseg5e16.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlsseg5e16.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16mf4x5_tu(maskedoff_tuple, base, bstride, vl); +vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_tu(vfloat16mf4x5_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_f16mf4x5_tu(vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16mf2x5_tu(maskedoff_tuple, base, bstride, vl); +vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_tu(vfloat16mf2x5_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_f16mf2x5_tu(vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16m1x5_tu(maskedoff_tuple, base, bstride, vl); +vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_tu(vfloat16m1x5_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_f16m1x5_tu(vd, rs1, rs2, vl); } -vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16mf4x5_tu(maskedoff_tuple, base, bstride, vl); +vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_tu(vint16mf4x5_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_i16mf4x5_tu(vd, rs1, rs2, vl); } -vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16mf2x5_tu(maskedoff_tuple, base, bstride, vl); +vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_tu(vint16mf2x5_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_i16mf2x5_tu(vd, rs1, rs2, vl); } -vint16m1x5_t test_vlsseg5e16_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16m1x5_tu(maskedoff_tuple, base, bstride, vl); +vint16m1x5_t test_vlsseg5e16_v_i16m1x5_tu(vint16m1x5_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_i16m1x5_tu(vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16mf4x5_tu(maskedoff_tuple, base, bstride, vl); +vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_tu(vuint16mf4x5_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_u16mf4x5_tu(vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16mf2x5_tu(maskedoff_tuple, base, bstride, vl); +vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_tu(vuint16mf2x5_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_u16mf2x5_tu(vd, rs1, rs2, vl); } -vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16m1x5_tu(maskedoff_tuple, base, bstride, vl); +vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_tu(vuint16m1x5_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_u16m1x5_tu(vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16mf4x5_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_tum(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_f16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16mf2x5_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_tum(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_f16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16m1x5_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_tum(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_f16m1x5_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16mf4x5_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_tum(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_i16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16mf2x5_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_tum(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_i16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vlsseg5e16_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16m1x5_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16m1x5_t test_vlsseg5e16_v_i16m1x5_tum(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_i16m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16mf4x5_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_tum(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_u16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16mf2x5_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_tum(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_u16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16m1x5_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_tum(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_u16m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16mf4x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_tumu(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_f16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16mf2x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_tumu(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_f16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16m1x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_tumu(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_f16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16mf4x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_tumu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_i16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16mf2x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_tumu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_i16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vlsseg5e16_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16m1x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16m1x5_t test_vlsseg5e16_v_i16m1x5_tumu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_i16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16mf4x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_tumu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_u16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16mf2x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_tumu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_u16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16m1x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_tumu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_u16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16mf4x5_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_mu(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_f16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16mf2x5_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_mu(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_f16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_f16m1x5_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_mu(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_f16m1x5_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16mf4x5_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_mu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_i16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16mf2x5_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_mu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_i16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vlsseg5e16_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_i16m1x5_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16m1x5_t test_vlsseg5e16_v_i16m1x5_mu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_i16m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16mf4x5_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_mu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_u16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16mf2x5_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_mu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_u16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_v_u16m1x5_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_mu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_u16m1x5_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg5e16\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlsseg5e32.c b/auto-generated/policy_funcs/gnu-api-tests/vlsseg5e32.c index e35e19052..f05cd99d6 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlsseg5e32.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlsseg5e32.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_f32mf2x5_tu(maskedoff_tuple, base, bstride, vl); +vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_tu(vfloat32mf2x5_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_v_f32mf2x5_tu(vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_f32m1x5_tu(maskedoff_tuple, base, bstride, vl); +vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_tu(vfloat32m1x5_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_v_f32m1x5_tu(vd, rs1, rs2, vl); } -vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_i32mf2x5_tu(maskedoff_tuple, base, bstride, vl); +vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_tu(vint32mf2x5_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_v_i32mf2x5_tu(vd, rs1, rs2, vl); } -vint32m1x5_t test_vlsseg5e32_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_i32m1x5_tu(maskedoff_tuple, base, bstride, vl); +vint32m1x5_t test_vlsseg5e32_v_i32m1x5_tu(vint32m1x5_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_v_i32m1x5_tu(vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_u32mf2x5_tu(maskedoff_tuple, base, bstride, vl); +vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_tu(vuint32mf2x5_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_v_u32mf2x5_tu(vd, rs1, rs2, vl); } -vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_u32m1x5_tu(maskedoff_tuple, base, bstride, vl); +vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_tu(vuint32m1x5_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_v_u32m1x5_tu(vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_f32mf2x5_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_tum(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_v_f32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_f32m1x5_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_tum(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_v_f32m1x5_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_i32mf2x5_tum(mask, maskedoff_tuple, base, bstride, vl); +vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_tum(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_v_i32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vlsseg5e32_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_i32m1x5_tum(mask, maskedoff_tuple, base, bstride, vl); +vint32m1x5_t test_vlsseg5e32_v_i32m1x5_tum(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_v_i32m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_u32mf2x5_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_tum(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_v_u32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_u32m1x5_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_tum(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_v_u32m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_f32mf2x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_tumu(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_v_f32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_f32m1x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_tumu(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_v_f32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_i32mf2x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_tumu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_v_i32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vlsseg5e32_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_i32m1x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint32m1x5_t test_vlsseg5e32_v_i32m1x5_tumu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_v_i32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_u32mf2x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_tumu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_v_u32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_u32m1x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_tumu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_v_u32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_f32mf2x5_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_mu(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_v_f32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_f32m1x5_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_mu(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_v_f32m1x5_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_i32mf2x5_mu(mask, maskedoff_tuple, base, bstride, vl); +vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_mu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_v_i32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vlsseg5e32_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_i32m1x5_mu(mask, maskedoff_tuple, base, bstride, vl); +vint32m1x5_t test_vlsseg5e32_v_i32m1x5_mu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_v_i32m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_u32mf2x5_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_mu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_v_u32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_v_u32m1x5_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_mu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_v_u32m1x5_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg5e32\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlsseg5e64.c b/auto-generated/policy_funcs/gnu-api-tests/vlsseg5e64.c index 4ea99f3ad..fe1898ea9 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlsseg5e64.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlsseg5e64.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_f64m1x5_tu(maskedoff_tuple, base, bstride, vl); +vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_tu(vfloat64m1x5_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e64_v_f64m1x5_tu(vd, rs1, rs2, vl); } -vint64m1x5_t test_vlsseg5e64_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_i64m1x5_tu(maskedoff_tuple, base, bstride, vl); +vint64m1x5_t test_vlsseg5e64_v_i64m1x5_tu(vint64m1x5_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e64_v_i64m1x5_tu(vd, rs1, rs2, vl); } -vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_u64m1x5_tu(maskedoff_tuple, base, bstride, vl); +vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_tu(vuint64m1x5_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e64_v_u64m1x5_tu(vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_f64m1x5_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_tum(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e64_v_f64m1x5_tum(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vlsseg5e64_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_i64m1x5_tum(mask, maskedoff_tuple, base, bstride, vl); +vint64m1x5_t test_vlsseg5e64_v_i64m1x5_tum(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e64_v_i64m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_u64m1x5_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_tum(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e64_v_u64m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_f64m1x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_tumu(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e64_v_f64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vlsseg5e64_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_i64m1x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint64m1x5_t test_vlsseg5e64_v_i64m1x5_tumu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e64_v_i64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_u64m1x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_tumu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e64_v_u64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_f64m1x5_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_mu(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e64_v_f64m1x5_mu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vlsseg5e64_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_i64m1x5_mu(mask, maskedoff_tuple, base, bstride, vl); +vint64m1x5_t test_vlsseg5e64_v_i64m1x5_mu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e64_v_i64m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_v_u64m1x5_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_mu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e64_v_u64m1x5_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg5e64\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlsseg5e8.c b/auto-generated/policy_funcs/gnu-api-tests/vlsseg5e8.c index 0e214de13..c2e8a403d 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlsseg5e8.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlsseg5e8.c @@ -6,132 +6,132 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf8x5_tu(maskedoff_tuple, base, bstride, vl); +vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_tu(vint8mf8x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf8x5_tu(vd, rs1, rs2, vl); } -vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf4x5_tu(maskedoff_tuple, base, bstride, vl); +vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_tu(vint8mf4x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf4x5_tu(vd, rs1, rs2, vl); } -vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf2x5_tu(maskedoff_tuple, base, bstride, vl); +vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_tu(vint8mf2x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf2x5_tu(vd, rs1, rs2, vl); } -vint8m1x5_t test_vlsseg5e8_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8m1x5_tu(maskedoff_tuple, base, bstride, vl); +vint8m1x5_t test_vlsseg5e8_v_i8m1x5_tu(vint8m1x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_i8m1x5_tu(vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf8x5_tu(maskedoff_tuple, base, bstride, vl); +vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_tu(vuint8mf8x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf8x5_tu(vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf4x5_tu(maskedoff_tuple, base, bstride, vl); +vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_tu(vuint8mf4x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf4x5_tu(vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf2x5_tu(maskedoff_tuple, base, bstride, vl); +vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_tu(vuint8mf2x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf2x5_tu(vd, rs1, rs2, vl); } -vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8m1x5_tu(maskedoff_tuple, base, bstride, vl); +vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_tu(vuint8m1x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_u8m1x5_tu(vd, rs1, rs2, vl); } -vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf8x5_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_tum(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf8x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf4x5_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_tum(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf4x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf2x5_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_tum(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vlsseg5e8_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8m1x5_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8m1x5_t test_vlsseg5e8_v_i8m1x5_tum(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_i8m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf8x5_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_tum(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf8x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf4x5_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_tum(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf4x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf2x5_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_tum(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8m1x5_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_tum(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_u8m1x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf8x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_tumu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf8x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf4x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_tumu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf2x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_tumu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vlsseg5e8_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8m1x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8m1x5_t test_vlsseg5e8_v_i8m1x5_tumu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_i8m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf8x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_tumu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf8x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf4x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_tumu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf2x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_tumu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8m1x5_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_tumu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_u8m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf8x5_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_mu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf8x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf4x5_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_mu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf4x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8mf2x5_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_mu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_i8mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vlsseg5e8_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_i8m1x5_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8m1x5_t test_vlsseg5e8_v_i8m1x5_mu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_i8m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf8x5_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_mu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf8x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf4x5_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_mu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf4x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8mf2x5_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_mu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_u8mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_v_u8m1x5_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_mu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_v_u8m1x5_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg5e8\.[ivxfswum.]+\s+} 32 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlsseg6e16.c b/auto-generated/policy_funcs/gnu-api-tests/vlsseg6e16.c index 418b45918..24ddf8011 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlsseg6e16.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlsseg6e16.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16mf4x6_tu(maskedoff_tuple, base, bstride, vl); +vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_tu(vfloat16mf4x6_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_f16mf4x6_tu(vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16mf2x6_tu(maskedoff_tuple, base, bstride, vl); +vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_tu(vfloat16mf2x6_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_f16mf2x6_tu(vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16m1x6_tu(maskedoff_tuple, base, bstride, vl); +vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_tu(vfloat16m1x6_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_f16m1x6_tu(vd, rs1, rs2, vl); } -vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16mf4x6_tu(maskedoff_tuple, base, bstride, vl); +vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_tu(vint16mf4x6_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_i16mf4x6_tu(vd, rs1, rs2, vl); } -vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16mf2x6_tu(maskedoff_tuple, base, bstride, vl); +vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_tu(vint16mf2x6_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_i16mf2x6_tu(vd, rs1, rs2, vl); } -vint16m1x6_t test_vlsseg6e16_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16m1x6_tu(maskedoff_tuple, base, bstride, vl); +vint16m1x6_t test_vlsseg6e16_v_i16m1x6_tu(vint16m1x6_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_i16m1x6_tu(vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16mf4x6_tu(maskedoff_tuple, base, bstride, vl); +vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_tu(vuint16mf4x6_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_u16mf4x6_tu(vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16mf2x6_tu(maskedoff_tuple, base, bstride, vl); +vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_tu(vuint16mf2x6_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_u16mf2x6_tu(vd, rs1, rs2, vl); } -vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16m1x6_tu(maskedoff_tuple, base, bstride, vl); +vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_tu(vuint16m1x6_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_u16m1x6_tu(vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16mf4x6_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_tum(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_f16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16mf2x6_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_tum(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_f16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16m1x6_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_tum(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_f16m1x6_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16mf4x6_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_tum(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_i16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16mf2x6_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_tum(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_i16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vlsseg6e16_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16m1x6_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16m1x6_t test_vlsseg6e16_v_i16m1x6_tum(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_i16m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16mf4x6_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_tum(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_u16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16mf2x6_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_tum(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_u16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16m1x6_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_tum(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_u16m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16mf4x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_tumu(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_f16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16mf2x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_tumu(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_f16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16m1x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_tumu(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_f16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16mf4x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_tumu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_i16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16mf2x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_tumu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_i16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vlsseg6e16_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16m1x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16m1x6_t test_vlsseg6e16_v_i16m1x6_tumu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_i16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16mf4x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_tumu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_u16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16mf2x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_tumu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_u16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16m1x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_tumu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_u16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16mf4x6_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_mu(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_f16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16mf2x6_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_mu(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_f16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_f16m1x6_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_mu(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_f16m1x6_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16mf4x6_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_mu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_i16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16mf2x6_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_mu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_i16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vlsseg6e16_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_i16m1x6_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16m1x6_t test_vlsseg6e16_v_i16m1x6_mu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_i16m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16mf4x6_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_mu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_u16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16mf2x6_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_mu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_u16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_v_u16m1x6_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_mu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_u16m1x6_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg6e16\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlsseg6e32.c b/auto-generated/policy_funcs/gnu-api-tests/vlsseg6e32.c index 3e2e73036..131d77d02 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlsseg6e32.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlsseg6e32.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_f32mf2x6_tu(maskedoff_tuple, base, bstride, vl); +vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_tu(vfloat32mf2x6_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_v_f32mf2x6_tu(vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_f32m1x6_tu(maskedoff_tuple, base, bstride, vl); +vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_tu(vfloat32m1x6_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_v_f32m1x6_tu(vd, rs1, rs2, vl); } -vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_i32mf2x6_tu(maskedoff_tuple, base, bstride, vl); +vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_tu(vint32mf2x6_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_v_i32mf2x6_tu(vd, rs1, rs2, vl); } -vint32m1x6_t test_vlsseg6e32_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_i32m1x6_tu(maskedoff_tuple, base, bstride, vl); +vint32m1x6_t test_vlsseg6e32_v_i32m1x6_tu(vint32m1x6_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_v_i32m1x6_tu(vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_u32mf2x6_tu(maskedoff_tuple, base, bstride, vl); +vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_tu(vuint32mf2x6_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_v_u32mf2x6_tu(vd, rs1, rs2, vl); } -vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_u32m1x6_tu(maskedoff_tuple, base, bstride, vl); +vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_tu(vuint32m1x6_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_v_u32m1x6_tu(vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_f32mf2x6_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_tum(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_v_f32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_f32m1x6_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_tum(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_v_f32m1x6_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_i32mf2x6_tum(mask, maskedoff_tuple, base, bstride, vl); +vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_tum(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_v_i32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vlsseg6e32_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_i32m1x6_tum(mask, maskedoff_tuple, base, bstride, vl); +vint32m1x6_t test_vlsseg6e32_v_i32m1x6_tum(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_v_i32m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_u32mf2x6_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_tum(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_v_u32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_u32m1x6_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_tum(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_v_u32m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_f32mf2x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_tumu(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_v_f32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_f32m1x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_tumu(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_v_f32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_i32mf2x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_tumu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_v_i32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vlsseg6e32_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_i32m1x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint32m1x6_t test_vlsseg6e32_v_i32m1x6_tumu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_v_i32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_u32mf2x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_tumu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_v_u32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_u32m1x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_tumu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_v_u32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_f32mf2x6_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_mu(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_v_f32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_f32m1x6_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_mu(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_v_f32m1x6_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_i32mf2x6_mu(mask, maskedoff_tuple, base, bstride, vl); +vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_mu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_v_i32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vlsseg6e32_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_i32m1x6_mu(mask, maskedoff_tuple, base, bstride, vl); +vint32m1x6_t test_vlsseg6e32_v_i32m1x6_mu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_v_i32m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_u32mf2x6_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_mu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_v_u32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_v_u32m1x6_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_mu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_v_u32m1x6_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg6e32\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlsseg6e64.c b/auto-generated/policy_funcs/gnu-api-tests/vlsseg6e64.c index b9e97a20a..1c67cbe92 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlsseg6e64.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlsseg6e64.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_f64m1x6_tu(maskedoff_tuple, base, bstride, vl); +vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_tu(vfloat64m1x6_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e64_v_f64m1x6_tu(vd, rs1, rs2, vl); } -vint64m1x6_t test_vlsseg6e64_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_i64m1x6_tu(maskedoff_tuple, base, bstride, vl); +vint64m1x6_t test_vlsseg6e64_v_i64m1x6_tu(vint64m1x6_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e64_v_i64m1x6_tu(vd, rs1, rs2, vl); } -vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_u64m1x6_tu(maskedoff_tuple, base, bstride, vl); +vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_tu(vuint64m1x6_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e64_v_u64m1x6_tu(vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_f64m1x6_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_tum(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e64_v_f64m1x6_tum(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vlsseg6e64_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_i64m1x6_tum(mask, maskedoff_tuple, base, bstride, vl); +vint64m1x6_t test_vlsseg6e64_v_i64m1x6_tum(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e64_v_i64m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_u64m1x6_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_tum(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e64_v_u64m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_f64m1x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_tumu(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e64_v_f64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vlsseg6e64_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_i64m1x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint64m1x6_t test_vlsseg6e64_v_i64m1x6_tumu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e64_v_i64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_u64m1x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_tumu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e64_v_u64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_f64m1x6_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_mu(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e64_v_f64m1x6_mu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vlsseg6e64_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_i64m1x6_mu(mask, maskedoff_tuple, base, bstride, vl); +vint64m1x6_t test_vlsseg6e64_v_i64m1x6_mu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e64_v_i64m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_v_u64m1x6_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_mu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e64_v_u64m1x6_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg6e64\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlsseg6e8.c b/auto-generated/policy_funcs/gnu-api-tests/vlsseg6e8.c index 9eebba34d..6a23e01e8 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlsseg6e8.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlsseg6e8.c @@ -6,132 +6,132 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf8x6_tu(maskedoff_tuple, base, bstride, vl); +vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_tu(vint8mf8x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf8x6_tu(vd, rs1, rs2, vl); } -vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf4x6_tu(maskedoff_tuple, base, bstride, vl); +vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_tu(vint8mf4x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf4x6_tu(vd, rs1, rs2, vl); } -vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf2x6_tu(maskedoff_tuple, base, bstride, vl); +vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_tu(vint8mf2x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf2x6_tu(vd, rs1, rs2, vl); } -vint8m1x6_t test_vlsseg6e8_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8m1x6_tu(maskedoff_tuple, base, bstride, vl); +vint8m1x6_t test_vlsseg6e8_v_i8m1x6_tu(vint8m1x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_i8m1x6_tu(vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf8x6_tu(maskedoff_tuple, base, bstride, vl); +vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_tu(vuint8mf8x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf8x6_tu(vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf4x6_tu(maskedoff_tuple, base, bstride, vl); +vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_tu(vuint8mf4x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf4x6_tu(vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf2x6_tu(maskedoff_tuple, base, bstride, vl); +vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_tu(vuint8mf2x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf2x6_tu(vd, rs1, rs2, vl); } -vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8m1x6_tu(maskedoff_tuple, base, bstride, vl); +vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_tu(vuint8m1x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_u8m1x6_tu(vd, rs1, rs2, vl); } -vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf8x6_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_tum(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf8x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf4x6_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_tum(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf4x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf2x6_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_tum(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vlsseg6e8_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8m1x6_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8m1x6_t test_vlsseg6e8_v_i8m1x6_tum(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_i8m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf8x6_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_tum(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf8x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf4x6_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_tum(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf4x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf2x6_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_tum(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8m1x6_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_tum(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_u8m1x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf8x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_tumu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf8x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf4x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_tumu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf2x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_tumu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vlsseg6e8_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8m1x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8m1x6_t test_vlsseg6e8_v_i8m1x6_tumu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_i8m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf8x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_tumu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf8x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf4x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_tumu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf2x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_tumu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8m1x6_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_tumu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_u8m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf8x6_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_mu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf8x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf4x6_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_mu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf4x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8mf2x6_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_mu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_i8mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vlsseg6e8_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_i8m1x6_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8m1x6_t test_vlsseg6e8_v_i8m1x6_mu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_i8m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf8x6_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_mu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf8x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf4x6_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_mu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf4x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8mf2x6_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_mu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_u8mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_v_u8m1x6_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_mu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_v_u8m1x6_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg6e8\.[ivxfswum.]+\s+} 32 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlsseg7e16.c b/auto-generated/policy_funcs/gnu-api-tests/vlsseg7e16.c index 4c05d043f..856b25b72 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlsseg7e16.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlsseg7e16.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16mf4x7_tu(maskedoff_tuple, base, bstride, vl); +vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_tu(vfloat16mf4x7_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_f16mf4x7_tu(vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16mf2x7_tu(maskedoff_tuple, base, bstride, vl); +vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_tu(vfloat16mf2x7_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_f16mf2x7_tu(vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16m1x7_tu(maskedoff_tuple, base, bstride, vl); +vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_tu(vfloat16m1x7_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_f16m1x7_tu(vd, rs1, rs2, vl); } -vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16mf4x7_tu(maskedoff_tuple, base, bstride, vl); +vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_tu(vint16mf4x7_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_i16mf4x7_tu(vd, rs1, rs2, vl); } -vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16mf2x7_tu(maskedoff_tuple, base, bstride, vl); +vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_tu(vint16mf2x7_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_i16mf2x7_tu(vd, rs1, rs2, vl); } -vint16m1x7_t test_vlsseg7e16_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16m1x7_tu(maskedoff_tuple, base, bstride, vl); +vint16m1x7_t test_vlsseg7e16_v_i16m1x7_tu(vint16m1x7_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_i16m1x7_tu(vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16mf4x7_tu(maskedoff_tuple, base, bstride, vl); +vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_tu(vuint16mf4x7_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_u16mf4x7_tu(vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16mf2x7_tu(maskedoff_tuple, base, bstride, vl); +vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_tu(vuint16mf2x7_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_u16mf2x7_tu(vd, rs1, rs2, vl); } -vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16m1x7_tu(maskedoff_tuple, base, bstride, vl); +vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_tu(vuint16m1x7_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_u16m1x7_tu(vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16mf4x7_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_tum(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_f16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16mf2x7_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_tum(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_f16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16m1x7_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_tum(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_f16m1x7_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16mf4x7_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_tum(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_i16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16mf2x7_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_tum(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_i16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vlsseg7e16_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16m1x7_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16m1x7_t test_vlsseg7e16_v_i16m1x7_tum(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_i16m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16mf4x7_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_tum(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_u16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16mf2x7_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_tum(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_u16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16m1x7_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_tum(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_u16m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16mf4x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_tumu(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_f16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16mf2x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_tumu(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_f16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16m1x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_tumu(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_f16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16mf4x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_tumu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_i16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16mf2x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_tumu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_i16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vlsseg7e16_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16m1x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16m1x7_t test_vlsseg7e16_v_i16m1x7_tumu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_i16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16mf4x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_tumu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_u16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16mf2x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_tumu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_u16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16m1x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_tumu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_u16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16mf4x7_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_mu(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_f16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16mf2x7_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_mu(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_f16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_f16m1x7_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_mu(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_f16m1x7_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16mf4x7_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_mu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_i16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16mf2x7_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_mu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_i16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vlsseg7e16_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_i16m1x7_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16m1x7_t test_vlsseg7e16_v_i16m1x7_mu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_i16m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16mf4x7_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_mu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_u16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16mf2x7_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_mu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_u16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_v_u16m1x7_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_mu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_u16m1x7_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg7e16\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlsseg7e32.c b/auto-generated/policy_funcs/gnu-api-tests/vlsseg7e32.c index 1c9f6f952..182f4d019 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlsseg7e32.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlsseg7e32.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_f32mf2x7_tu(maskedoff_tuple, base, bstride, vl); +vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_tu(vfloat32mf2x7_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_v_f32mf2x7_tu(vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_f32m1x7_tu(maskedoff_tuple, base, bstride, vl); +vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_tu(vfloat32m1x7_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_v_f32m1x7_tu(vd, rs1, rs2, vl); } -vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_i32mf2x7_tu(maskedoff_tuple, base, bstride, vl); +vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_tu(vint32mf2x7_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_v_i32mf2x7_tu(vd, rs1, rs2, vl); } -vint32m1x7_t test_vlsseg7e32_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_i32m1x7_tu(maskedoff_tuple, base, bstride, vl); +vint32m1x7_t test_vlsseg7e32_v_i32m1x7_tu(vint32m1x7_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_v_i32m1x7_tu(vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_u32mf2x7_tu(maskedoff_tuple, base, bstride, vl); +vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_tu(vuint32mf2x7_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_v_u32mf2x7_tu(vd, rs1, rs2, vl); } -vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_u32m1x7_tu(maskedoff_tuple, base, bstride, vl); +vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_tu(vuint32m1x7_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_v_u32m1x7_tu(vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_f32mf2x7_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_tum(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_v_f32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_f32m1x7_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_tum(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_v_f32m1x7_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_i32mf2x7_tum(mask, maskedoff_tuple, base, bstride, vl); +vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_tum(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_v_i32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vlsseg7e32_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_i32m1x7_tum(mask, maskedoff_tuple, base, bstride, vl); +vint32m1x7_t test_vlsseg7e32_v_i32m1x7_tum(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_v_i32m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_u32mf2x7_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_tum(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_v_u32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_u32m1x7_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_tum(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_v_u32m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_f32mf2x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_tumu(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_v_f32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_f32m1x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_tumu(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_v_f32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_i32mf2x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_tumu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_v_i32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vlsseg7e32_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_i32m1x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint32m1x7_t test_vlsseg7e32_v_i32m1x7_tumu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_v_i32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_u32mf2x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_tumu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_v_u32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_u32m1x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_tumu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_v_u32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_f32mf2x7_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_mu(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_v_f32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_f32m1x7_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_mu(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_v_f32m1x7_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_i32mf2x7_mu(mask, maskedoff_tuple, base, bstride, vl); +vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_mu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_v_i32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vlsseg7e32_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_i32m1x7_mu(mask, maskedoff_tuple, base, bstride, vl); +vint32m1x7_t test_vlsseg7e32_v_i32m1x7_mu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_v_i32m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_u32mf2x7_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_mu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_v_u32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_v_u32m1x7_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_mu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_v_u32m1x7_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg7e32\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlsseg7e64.c b/auto-generated/policy_funcs/gnu-api-tests/vlsseg7e64.c index 21bda0c2a..2156298d1 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlsseg7e64.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlsseg7e64.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_f64m1x7_tu(maskedoff_tuple, base, bstride, vl); +vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_tu(vfloat64m1x7_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e64_v_f64m1x7_tu(vd, rs1, rs2, vl); } -vint64m1x7_t test_vlsseg7e64_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_i64m1x7_tu(maskedoff_tuple, base, bstride, vl); +vint64m1x7_t test_vlsseg7e64_v_i64m1x7_tu(vint64m1x7_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e64_v_i64m1x7_tu(vd, rs1, rs2, vl); } -vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_u64m1x7_tu(maskedoff_tuple, base, bstride, vl); +vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_tu(vuint64m1x7_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e64_v_u64m1x7_tu(vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_f64m1x7_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_tum(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e64_v_f64m1x7_tum(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vlsseg7e64_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_i64m1x7_tum(mask, maskedoff_tuple, base, bstride, vl); +vint64m1x7_t test_vlsseg7e64_v_i64m1x7_tum(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e64_v_i64m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_u64m1x7_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_tum(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e64_v_u64m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_f64m1x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_tumu(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e64_v_f64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vlsseg7e64_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_i64m1x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint64m1x7_t test_vlsseg7e64_v_i64m1x7_tumu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e64_v_i64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_u64m1x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_tumu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e64_v_u64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_f64m1x7_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_mu(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e64_v_f64m1x7_mu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vlsseg7e64_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_i64m1x7_mu(mask, maskedoff_tuple, base, bstride, vl); +vint64m1x7_t test_vlsseg7e64_v_i64m1x7_mu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e64_v_i64m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_v_u64m1x7_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_mu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e64_v_u64m1x7_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg7e64\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlsseg7e8.c b/auto-generated/policy_funcs/gnu-api-tests/vlsseg7e8.c index 27e4cec8b..10151c0bb 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlsseg7e8.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlsseg7e8.c @@ -6,132 +6,132 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf8x7_tu(maskedoff_tuple, base, bstride, vl); +vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_tu(vint8mf8x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf8x7_tu(vd, rs1, rs2, vl); } -vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf4x7_tu(maskedoff_tuple, base, bstride, vl); +vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_tu(vint8mf4x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf4x7_tu(vd, rs1, rs2, vl); } -vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf2x7_tu(maskedoff_tuple, base, bstride, vl); +vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_tu(vint8mf2x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf2x7_tu(vd, rs1, rs2, vl); } -vint8m1x7_t test_vlsseg7e8_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8m1x7_tu(maskedoff_tuple, base, bstride, vl); +vint8m1x7_t test_vlsseg7e8_v_i8m1x7_tu(vint8m1x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_i8m1x7_tu(vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf8x7_tu(maskedoff_tuple, base, bstride, vl); +vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_tu(vuint8mf8x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf8x7_tu(vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf4x7_tu(maskedoff_tuple, base, bstride, vl); +vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_tu(vuint8mf4x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf4x7_tu(vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf2x7_tu(maskedoff_tuple, base, bstride, vl); +vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_tu(vuint8mf2x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf2x7_tu(vd, rs1, rs2, vl); } -vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8m1x7_tu(maskedoff_tuple, base, bstride, vl); +vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_tu(vuint8m1x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_u8m1x7_tu(vd, rs1, rs2, vl); } -vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf8x7_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_tum(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf8x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf4x7_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_tum(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf4x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf2x7_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_tum(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vlsseg7e8_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8m1x7_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8m1x7_t test_vlsseg7e8_v_i8m1x7_tum(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_i8m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf8x7_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_tum(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf8x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf4x7_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_tum(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf4x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf2x7_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_tum(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8m1x7_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_tum(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_u8m1x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf8x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_tumu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf8x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf4x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_tumu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf2x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_tumu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vlsseg7e8_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8m1x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8m1x7_t test_vlsseg7e8_v_i8m1x7_tumu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_i8m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf8x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_tumu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf8x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf4x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_tumu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf2x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_tumu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8m1x7_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_tumu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_u8m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf8x7_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_mu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf8x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf4x7_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_mu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf4x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8mf2x7_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_mu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_i8mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vlsseg7e8_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_i8m1x7_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8m1x7_t test_vlsseg7e8_v_i8m1x7_mu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_i8m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf8x7_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_mu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf8x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf4x7_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_mu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf4x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8mf2x7_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_mu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_u8mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_v_u8m1x7_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_mu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_v_u8m1x7_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg7e8\.[ivxfswum.]+\s+} 32 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlsseg8e16.c b/auto-generated/policy_funcs/gnu-api-tests/vlsseg8e16.c index 6e2def40a..278b2aecf 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlsseg8e16.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlsseg8e16.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16mf4x8_tu(maskedoff_tuple, base, bstride, vl); +vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_tu(vfloat16mf4x8_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_f16mf4x8_tu(vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16mf2x8_tu(maskedoff_tuple, base, bstride, vl); +vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_tu(vfloat16mf2x8_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_f16mf2x8_tu(vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16m1x8_tu(maskedoff_tuple, base, bstride, vl); +vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_tu(vfloat16m1x8_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_f16m1x8_tu(vd, rs1, rs2, vl); } -vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16mf4x8_tu(maskedoff_tuple, base, bstride, vl); +vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_tu(vint16mf4x8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_i16mf4x8_tu(vd, rs1, rs2, vl); } -vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16mf2x8_tu(maskedoff_tuple, base, bstride, vl); +vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_tu(vint16mf2x8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_i16mf2x8_tu(vd, rs1, rs2, vl); } -vint16m1x8_t test_vlsseg8e16_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16m1x8_tu(maskedoff_tuple, base, bstride, vl); +vint16m1x8_t test_vlsseg8e16_v_i16m1x8_tu(vint16m1x8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_i16m1x8_tu(vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16mf4x8_tu(maskedoff_tuple, base, bstride, vl); +vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_tu(vuint16mf4x8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_u16mf4x8_tu(vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16mf2x8_tu(maskedoff_tuple, base, bstride, vl); +vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_tu(vuint16mf2x8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_u16mf2x8_tu(vd, rs1, rs2, vl); } -vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16m1x8_tu(maskedoff_tuple, base, bstride, vl); +vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_tu(vuint16m1x8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_u16m1x8_tu(vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16mf4x8_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_tum(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_f16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16mf2x8_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_tum(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_f16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16m1x8_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_tum(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_f16m1x8_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16mf4x8_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_tum(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_i16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16mf2x8_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_tum(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_i16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vlsseg8e16_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16m1x8_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16m1x8_t test_vlsseg8e16_v_i16m1x8_tum(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_i16m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16mf4x8_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_tum(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_u16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16mf2x8_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_tum(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_u16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16m1x8_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_tum(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_u16m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16mf4x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_tumu(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_f16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16mf2x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_tumu(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_f16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16m1x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_tumu(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_f16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16mf4x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_tumu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_i16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16mf2x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_tumu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_i16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vlsseg8e16_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16m1x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16m1x8_t test_vlsseg8e16_v_i16m1x8_tumu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_i16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16mf4x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_tumu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_u16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16mf2x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_tumu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_u16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16m1x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_tumu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_u16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16mf4x8_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_mu(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_f16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16mf2x8_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_mu(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_f16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_f16m1x8_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_mu(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_f16m1x8_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16mf4x8_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_mu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_i16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16mf2x8_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_mu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_i16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vlsseg8e16_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_i16m1x8_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16m1x8_t test_vlsseg8e16_v_i16m1x8_mu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_i16m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16mf4x8_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_mu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_u16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16mf2x8_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_mu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_u16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_v_u16m1x8_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_mu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_u16m1x8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg8e16\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlsseg8e32.c b/auto-generated/policy_funcs/gnu-api-tests/vlsseg8e32.c index 348958e11..45104821f 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlsseg8e32.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlsseg8e32.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_f32mf2x8_tu(maskedoff_tuple, base, bstride, vl); +vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_tu(vfloat32mf2x8_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_v_f32mf2x8_tu(vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_f32m1x8_tu(maskedoff_tuple, base, bstride, vl); +vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_tu(vfloat32m1x8_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_v_f32m1x8_tu(vd, rs1, rs2, vl); } -vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_i32mf2x8_tu(maskedoff_tuple, base, bstride, vl); +vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_tu(vint32mf2x8_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_v_i32mf2x8_tu(vd, rs1, rs2, vl); } -vint32m1x8_t test_vlsseg8e32_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_i32m1x8_tu(maskedoff_tuple, base, bstride, vl); +vint32m1x8_t test_vlsseg8e32_v_i32m1x8_tu(vint32m1x8_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_v_i32m1x8_tu(vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_u32mf2x8_tu(maskedoff_tuple, base, bstride, vl); +vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_tu(vuint32mf2x8_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_v_u32mf2x8_tu(vd, rs1, rs2, vl); } -vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_u32m1x8_tu(maskedoff_tuple, base, bstride, vl); +vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_tu(vuint32m1x8_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_v_u32m1x8_tu(vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_f32mf2x8_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_tum(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_v_f32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_f32m1x8_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_tum(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_v_f32m1x8_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_i32mf2x8_tum(mask, maskedoff_tuple, base, bstride, vl); +vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_tum(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_v_i32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vlsseg8e32_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_i32m1x8_tum(mask, maskedoff_tuple, base, bstride, vl); +vint32m1x8_t test_vlsseg8e32_v_i32m1x8_tum(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_v_i32m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_u32mf2x8_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_tum(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_v_u32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_u32m1x8_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_tum(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_v_u32m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_f32mf2x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_tumu(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_v_f32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_f32m1x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_tumu(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_v_f32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_i32mf2x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_tumu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_v_i32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vlsseg8e32_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_i32m1x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint32m1x8_t test_vlsseg8e32_v_i32m1x8_tumu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_v_i32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_u32mf2x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_tumu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_v_u32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_u32m1x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_tumu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_v_u32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_f32mf2x8_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_mu(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_v_f32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_f32m1x8_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_mu(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_v_f32m1x8_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_i32mf2x8_mu(mask, maskedoff_tuple, base, bstride, vl); +vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_mu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_v_i32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vlsseg8e32_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_i32m1x8_mu(mask, maskedoff_tuple, base, bstride, vl); +vint32m1x8_t test_vlsseg8e32_v_i32m1x8_mu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_v_i32m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_u32mf2x8_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_mu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_v_u32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_v_u32m1x8_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_mu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_v_u32m1x8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg8e32\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlsseg8e64.c b/auto-generated/policy_funcs/gnu-api-tests/vlsseg8e64.c index 9772c75a8..803af0a89 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlsseg8e64.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlsseg8e64.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_f64m1x8_tu(maskedoff_tuple, base, bstride, vl); +vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_tu(vfloat64m1x8_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e64_v_f64m1x8_tu(vd, rs1, rs2, vl); } -vint64m1x8_t test_vlsseg8e64_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_i64m1x8_tu(maskedoff_tuple, base, bstride, vl); +vint64m1x8_t test_vlsseg8e64_v_i64m1x8_tu(vint64m1x8_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e64_v_i64m1x8_tu(vd, rs1, rs2, vl); } -vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_u64m1x8_tu(maskedoff_tuple, base, bstride, vl); +vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_tu(vuint64m1x8_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e64_v_u64m1x8_tu(vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_f64m1x8_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_tum(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e64_v_f64m1x8_tum(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vlsseg8e64_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_i64m1x8_tum(mask, maskedoff_tuple, base, bstride, vl); +vint64m1x8_t test_vlsseg8e64_v_i64m1x8_tum(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e64_v_i64m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_u64m1x8_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_tum(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e64_v_u64m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_f64m1x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_tumu(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e64_v_f64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vlsseg8e64_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_i64m1x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint64m1x8_t test_vlsseg8e64_v_i64m1x8_tumu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e64_v_i64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_u64m1x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_tumu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e64_v_u64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_f64m1x8_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_mu(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e64_v_f64m1x8_mu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vlsseg8e64_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_i64m1x8_mu(mask, maskedoff_tuple, base, bstride, vl); +vint64m1x8_t test_vlsseg8e64_v_i64m1x8_mu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e64_v_i64m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_v_u64m1x8_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_mu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e64_v_u64m1x8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg8e64\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vlsseg8e8.c b/auto-generated/policy_funcs/gnu-api-tests/vlsseg8e8.c index ba214dce3..de52b096a 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vlsseg8e8.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vlsseg8e8.c @@ -6,132 +6,132 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf8x8_tu(maskedoff_tuple, base, bstride, vl); +vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_tu(vint8mf8x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf8x8_tu(vd, rs1, rs2, vl); } -vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf4x8_tu(maskedoff_tuple, base, bstride, vl); +vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_tu(vint8mf4x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf4x8_tu(vd, rs1, rs2, vl); } -vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf2x8_tu(maskedoff_tuple, base, bstride, vl); +vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_tu(vint8mf2x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf2x8_tu(vd, rs1, rs2, vl); } -vint8m1x8_t test_vlsseg8e8_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8m1x8_tu(maskedoff_tuple, base, bstride, vl); +vint8m1x8_t test_vlsseg8e8_v_i8m1x8_tu(vint8m1x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_i8m1x8_tu(vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf8x8_tu(maskedoff_tuple, base, bstride, vl); +vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_tu(vuint8mf8x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf8x8_tu(vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf4x8_tu(maskedoff_tuple, base, bstride, vl); +vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_tu(vuint8mf4x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf4x8_tu(vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf2x8_tu(maskedoff_tuple, base, bstride, vl); +vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_tu(vuint8mf2x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf2x8_tu(vd, rs1, rs2, vl); } -vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8m1x8_tu(maskedoff_tuple, base, bstride, vl); +vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_tu(vuint8m1x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_u8m1x8_tu(vd, rs1, rs2, vl); } -vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf8x8_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_tum(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf8x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf4x8_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_tum(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf4x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf2x8_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_tum(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vlsseg8e8_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8m1x8_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8m1x8_t test_vlsseg8e8_v_i8m1x8_tum(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_i8m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf8x8_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_tum(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf8x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf4x8_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_tum(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf4x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf2x8_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_tum(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8m1x8_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_tum(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_u8m1x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf8x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_tumu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf8x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf4x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_tumu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf2x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_tumu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vlsseg8e8_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8m1x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8m1x8_t test_vlsseg8e8_v_i8m1x8_tumu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_i8m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf8x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_tumu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf8x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf4x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_tumu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf2x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_tumu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8m1x8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_tumu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_u8m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf8x8_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_mu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf8x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf4x8_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_mu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf4x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8mf2x8_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_mu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_i8mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vlsseg8e8_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_i8m1x8_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8m1x8_t test_vlsseg8e8_v_i8m1x8_mu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_i8m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf8x8_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_mu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf8x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf4x8_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_mu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf4x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8mf2x8_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_mu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_u8mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_v_u8m1x8_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_mu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_v_u8m1x8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg8e8\.[ivxfswum.]+\s+} 32 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vluxei16.c b/auto-generated/policy_funcs/gnu-api-tests/vluxei16.c index 588ed24f8..d8e6d12cd 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vluxei16.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vluxei16.c @@ -6,916 +6,916 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vluxei16_v_f16mf4_tu(vfloat16mf4_t maskedoff, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_f16mf4_tu(maskedoff, base, bindex, vl); +vfloat16mf4_t test_vluxei16_v_f16mf4_tu(vfloat16mf4_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_f16mf4_tu(vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei16_v_f16mf2_tu(vfloat16mf2_t maskedoff, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_f16mf2_tu(maskedoff, base, bindex, vl); +vfloat16mf2_t test_vluxei16_v_f16mf2_tu(vfloat16mf2_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_f16mf2_tu(vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei16_v_f16m1_tu(vfloat16m1_t maskedoff, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_f16m1_tu(maskedoff, base, bindex, vl); +vfloat16m1_t test_vluxei16_v_f16m1_tu(vfloat16m1_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_f16m1_tu(vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei16_v_f16m2_tu(vfloat16m2_t maskedoff, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_f16m2_tu(maskedoff, base, bindex, vl); +vfloat16m2_t test_vluxei16_v_f16m2_tu(vfloat16m2_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_f16m2_tu(vd, rs1, rs2, vl); } -vfloat16m4_t test_vluxei16_v_f16m4_tu(vfloat16m4_t maskedoff, const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_f16m4_tu(maskedoff, base, bindex, vl); +vfloat16m4_t test_vluxei16_v_f16m4_tu(vfloat16m4_t vd, const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_f16m4_tu(vd, rs1, rs2, vl); } -vfloat16m8_t test_vluxei16_v_f16m8_tu(vfloat16m8_t maskedoff, const float16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_v_f16m8_tu(maskedoff, base, bindex, vl); +vfloat16m8_t test_vluxei16_v_f16m8_tu(vfloat16m8_t vd, const float16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_v_f16m8_tu(vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei16_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_f32mf2_tu(maskedoff, base, bindex, vl); +vfloat32mf2_t test_vluxei16_v_f32mf2_tu(vfloat32mf2_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_f32mf2_tu(vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei16_v_f32m1_tu(vfloat32m1_t maskedoff, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_f32m1_tu(maskedoff, base, bindex, vl); +vfloat32m1_t test_vluxei16_v_f32m1_tu(vfloat32m1_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_f32m1_tu(vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei16_v_f32m2_tu(vfloat32m2_t maskedoff, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_f32m2_tu(maskedoff, base, bindex, vl); +vfloat32m2_t test_vluxei16_v_f32m2_tu(vfloat32m2_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_f32m2_tu(vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei16_v_f32m4_tu(vfloat32m4_t maskedoff, const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_f32m4_tu(maskedoff, base, bindex, vl); +vfloat32m4_t test_vluxei16_v_f32m4_tu(vfloat32m4_t vd, const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_f32m4_tu(vd, rs1, rs2, vl); } -vfloat32m8_t test_vluxei16_v_f32m8_tu(vfloat32m8_t maskedoff, const float32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_f32m8_tu(maskedoff, base, bindex, vl); +vfloat32m8_t test_vluxei16_v_f32m8_tu(vfloat32m8_t vd, const float32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_f32m8_tu(vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei16_v_f64m1_tu(vfloat64m1_t maskedoff, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_f64m1_tu(maskedoff, base, bindex, vl); +vfloat64m1_t test_vluxei16_v_f64m1_tu(vfloat64m1_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_f64m1_tu(vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei16_v_f64m2_tu(vfloat64m2_t maskedoff, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_f64m2_tu(maskedoff, base, bindex, vl); +vfloat64m2_t test_vluxei16_v_f64m2_tu(vfloat64m2_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_f64m2_tu(vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei16_v_f64m4_tu(vfloat64m4_t maskedoff, const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_f64m4_tu(maskedoff, base, bindex, vl); +vfloat64m4_t test_vluxei16_v_f64m4_tu(vfloat64m4_t vd, const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_f64m4_tu(vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei16_v_f64m8_tu(vfloat64m8_t maskedoff, const float64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_f64m8_tu(maskedoff, base, bindex, vl); +vfloat64m8_t test_vluxei16_v_f64m8_tu(vfloat64m8_t vd, const float64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_f64m8_tu(vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei16_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_i8mf8_tu(maskedoff, base, bindex, vl); +vint8mf8_t test_vluxei16_v_i8mf8_tu(vint8mf8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_i8mf8_tu(vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei16_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i8mf4_tu(maskedoff, base, bindex, vl); +vint8mf4_t test_vluxei16_v_i8mf4_tu(vint8mf4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i8mf4_tu(vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei16_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_i8mf2_tu(maskedoff, base, bindex, vl); +vint8mf2_t test_vluxei16_v_i8mf2_tu(vint8mf2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_i8mf2_tu(vd, rs1, rs2, vl); } -vint8m1_t test_vluxei16_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i8m1_tu(maskedoff, base, bindex, vl); +vint8m1_t test_vluxei16_v_i8m1_tu(vint8m1_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i8m1_tu(vd, rs1, rs2, vl); } -vint8m2_t test_vluxei16_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_i8m2_tu(maskedoff, base, bindex, vl); +vint8m2_t test_vluxei16_v_i8m2_tu(vint8m2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_i8m2_tu(vd, rs1, rs2, vl); } -vint8m4_t test_vluxei16_v_i8m4_tu(vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_v_i8m4_tu(maskedoff, base, bindex, vl); +vint8m4_t test_vluxei16_v_i8m4_tu(vint8m4_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_v_i8m4_tu(vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei16_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_i16mf4_tu(maskedoff, base, bindex, vl); +vint16mf4_t test_vluxei16_v_i16mf4_tu(vint16mf4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_i16mf4_tu(vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei16_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i16mf2_tu(maskedoff, base, bindex, vl); +vint16mf2_t test_vluxei16_v_i16mf2_tu(vint16mf2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i16mf2_tu(vd, rs1, rs2, vl); } -vint16m1_t test_vluxei16_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_i16m1_tu(maskedoff, base, bindex, vl); +vint16m1_t test_vluxei16_v_i16m1_tu(vint16m1_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_i16m1_tu(vd, rs1, rs2, vl); } -vint16m2_t test_vluxei16_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i16m2_tu(maskedoff, base, bindex, vl); +vint16m2_t test_vluxei16_v_i16m2_tu(vint16m2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i16m2_tu(vd, rs1, rs2, vl); } -vint16m4_t test_vluxei16_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_i16m4_tu(maskedoff, base, bindex, vl); +vint16m4_t test_vluxei16_v_i16m4_tu(vint16m4_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_i16m4_tu(vd, rs1, rs2, vl); } -vint16m8_t test_vluxei16_v_i16m8_tu(vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_v_i16m8_tu(maskedoff, base, bindex, vl); +vint16m8_t test_vluxei16_v_i16m8_tu(vint16m8_t vd, const int16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_v_i16m8_tu(vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei16_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_i32mf2_tu(maskedoff, base, bindex, vl); +vint32mf2_t test_vluxei16_v_i32mf2_tu(vint32mf2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_i32mf2_tu(vd, rs1, rs2, vl); } -vint32m1_t test_vluxei16_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i32m1_tu(maskedoff, base, bindex, vl); +vint32m1_t test_vluxei16_v_i32m1_tu(vint32m1_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i32m1_tu(vd, rs1, rs2, vl); } -vint32m2_t test_vluxei16_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_i32m2_tu(maskedoff, base, bindex, vl); +vint32m2_t test_vluxei16_v_i32m2_tu(vint32m2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_i32m2_tu(vd, rs1, rs2, vl); } -vint32m4_t test_vluxei16_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i32m4_tu(maskedoff, base, bindex, vl); +vint32m4_t test_vluxei16_v_i32m4_tu(vint32m4_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i32m4_tu(vd, rs1, rs2, vl); } -vint32m8_t test_vluxei16_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_i32m8_tu(maskedoff, base, bindex, vl); +vint32m8_t test_vluxei16_v_i32m8_tu(vint32m8_t vd, const int32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_i32m8_tu(vd, rs1, rs2, vl); } -vint64m1_t test_vluxei16_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_i64m1_tu(maskedoff, base, bindex, vl); +vint64m1_t test_vluxei16_v_i64m1_tu(vint64m1_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_i64m1_tu(vd, rs1, rs2, vl); } -vint64m2_t test_vluxei16_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i64m2_tu(maskedoff, base, bindex, vl); +vint64m2_t test_vluxei16_v_i64m2_tu(vint64m2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i64m2_tu(vd, rs1, rs2, vl); } -vint64m4_t test_vluxei16_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_i64m4_tu(maskedoff, base, bindex, vl); +vint64m4_t test_vluxei16_v_i64m4_tu(vint64m4_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_i64m4_tu(vd, rs1, rs2, vl); } -vint64m8_t test_vluxei16_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i64m8_tu(maskedoff, base, bindex, vl); +vint64m8_t test_vluxei16_v_i64m8_tu(vint64m8_t vd, const int64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i64m8_tu(vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei16_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_u8mf8_tu(maskedoff, base, bindex, vl); +vuint8mf8_t test_vluxei16_v_u8mf8_tu(vuint8mf8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_u8mf8_tu(vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei16_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u8mf4_tu(maskedoff, base, bindex, vl); +vuint8mf4_t test_vluxei16_v_u8mf4_tu(vuint8mf4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u8mf4_tu(vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei16_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_u8mf2_tu(maskedoff, base, bindex, vl); +vuint8mf2_t test_vluxei16_v_u8mf2_tu(vuint8mf2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_u8mf2_tu(vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei16_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u8m1_tu(maskedoff, base, bindex, vl); +vuint8m1_t test_vluxei16_v_u8m1_tu(vuint8m1_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u8m1_tu(vd, rs1, rs2, vl); } -vuint8m2_t test_vluxei16_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_u8m2_tu(maskedoff, base, bindex, vl); +vuint8m2_t test_vluxei16_v_u8m2_tu(vuint8m2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_u8m2_tu(vd, rs1, rs2, vl); } -vuint8m4_t test_vluxei16_v_u8m4_tu(vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_v_u8m4_tu(maskedoff, base, bindex, vl); +vuint8m4_t test_vluxei16_v_u8m4_tu(vuint8m4_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_v_u8m4_tu(vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei16_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_u16mf4_tu(maskedoff, base, bindex, vl); +vuint16mf4_t test_vluxei16_v_u16mf4_tu(vuint16mf4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_u16mf4_tu(vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei16_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u16mf2_tu(maskedoff, base, bindex, vl); +vuint16mf2_t test_vluxei16_v_u16mf2_tu(vuint16mf2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u16mf2_tu(vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei16_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_u16m1_tu(maskedoff, base, bindex, vl); +vuint16m1_t test_vluxei16_v_u16m1_tu(vuint16m1_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_u16m1_tu(vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei16_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u16m2_tu(maskedoff, base, bindex, vl); +vuint16m2_t test_vluxei16_v_u16m2_tu(vuint16m2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u16m2_tu(vd, rs1, rs2, vl); } -vuint16m4_t test_vluxei16_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_u16m4_tu(maskedoff, base, bindex, vl); +vuint16m4_t test_vluxei16_v_u16m4_tu(vuint16m4_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_u16m4_tu(vd, rs1, rs2, vl); } -vuint16m8_t test_vluxei16_v_u16m8_tu(vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_v_u16m8_tu(maskedoff, base, bindex, vl); +vuint16m8_t test_vluxei16_v_u16m8_tu(vuint16m8_t vd, const uint16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_v_u16m8_tu(vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei16_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_u32mf2_tu(maskedoff, base, bindex, vl); +vuint32mf2_t test_vluxei16_v_u32mf2_tu(vuint32mf2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_u32mf2_tu(vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei16_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u32m1_tu(maskedoff, base, bindex, vl); +vuint32m1_t test_vluxei16_v_u32m1_tu(vuint32m1_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u32m1_tu(vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei16_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_u32m2_tu(maskedoff, base, bindex, vl); +vuint32m2_t test_vluxei16_v_u32m2_tu(vuint32m2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_u32m2_tu(vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei16_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u32m4_tu(maskedoff, base, bindex, vl); +vuint32m4_t test_vluxei16_v_u32m4_tu(vuint32m4_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u32m4_tu(vd, rs1, rs2, vl); } -vuint32m8_t test_vluxei16_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_u32m8_tu(maskedoff, base, bindex, vl); +vuint32m8_t test_vluxei16_v_u32m8_tu(vuint32m8_t vd, const uint32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_u32m8_tu(vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei16_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_u64m1_tu(maskedoff, base, bindex, vl); +vuint64m1_t test_vluxei16_v_u64m1_tu(vuint64m1_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_u64m1_tu(vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei16_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u64m2_tu(maskedoff, base, bindex, vl); +vuint64m2_t test_vluxei16_v_u64m2_tu(vuint64m2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u64m2_tu(vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei16_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_u64m4_tu(maskedoff, base, bindex, vl); +vuint64m4_t test_vluxei16_v_u64m4_tu(vuint64m4_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_u64m4_tu(vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei16_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u64m8_tu(maskedoff, base, bindex, vl); +vuint64m8_t test_vluxei16_v_u64m8_tu(vuint64m8_t vd, const uint64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u64m8_tu(vd, rs1, rs2, vl); } -vfloat16mf4_t test_vluxei16_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_f16mf4_tum(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vluxei16_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_f16mf4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei16_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_f16mf2_tum(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vluxei16_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_f16mf2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei16_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_f16m1_tum(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vluxei16_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_f16m1_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei16_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_f16m2_tum(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vluxei16_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_f16m2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vluxei16_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_f16m4_tum(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vluxei16_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_f16m4_tum(vm, vd, rs1, rs2, vl); } -vfloat16m8_t test_vluxei16_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, const float16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_v_f16m8_tum(mask, maskedoff, base, bindex, vl); +vfloat16m8_t test_vluxei16_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, const float16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_v_f16m8_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei16_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_f32mf2_tum(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vluxei16_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_f32mf2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei16_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_f32m1_tum(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vluxei16_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_f32m1_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei16_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_f32m2_tum(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vluxei16_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_f32m2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei16_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_f32m4_tum(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vluxei16_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_f32m4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vluxei16_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_f32m8_tum(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vluxei16_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_f32m8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei16_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_f64m1_tum(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vluxei16_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_f64m1_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei16_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_f64m2_tum(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vluxei16_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_f64m2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei16_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_f64m4_tum(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vluxei16_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_f64m4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei16_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_f64m8_tum(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vluxei16_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_f64m8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei16_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_i8mf8_tum(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vluxei16_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_i8mf8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei16_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i8mf4_tum(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vluxei16_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i8mf4_tum(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei16_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_i8mf2_tum(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vluxei16_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_i8mf2_tum(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vluxei16_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i8m1_tum(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vluxei16_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i8m1_tum(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vluxei16_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_i8m2_tum(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vluxei16_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_i8m2_tum(vm, vd, rs1, rs2, vl); } -vint8m4_t test_vluxei16_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_v_i8m4_tum(mask, maskedoff, base, bindex, vl); +vint8m4_t test_vluxei16_v_i8m4_tum(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_v_i8m4_tum(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei16_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_i16mf4_tum(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vluxei16_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_i16mf4_tum(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei16_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i16mf2_tum(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vluxei16_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i16mf2_tum(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vluxei16_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_i16m1_tum(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vluxei16_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_i16m1_tum(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vluxei16_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i16m2_tum(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vluxei16_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i16m2_tum(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vluxei16_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_i16m4_tum(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vluxei16_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_i16m4_tum(vm, vd, rs1, rs2, vl); } -vint16m8_t test_vluxei16_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_v_i16m8_tum(mask, maskedoff, base, bindex, vl); +vint16m8_t test_vluxei16_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_v_i16m8_tum(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei16_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_i32mf2_tum(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vluxei16_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_i32mf2_tum(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vluxei16_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i32m1_tum(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vluxei16_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i32m1_tum(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vluxei16_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_i32m2_tum(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vluxei16_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_i32m2_tum(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vluxei16_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i32m4_tum(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vluxei16_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i32m4_tum(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vluxei16_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_i32m8_tum(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vluxei16_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_i32m8_tum(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vluxei16_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_i64m1_tum(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vluxei16_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_i64m1_tum(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vluxei16_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i64m2_tum(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vluxei16_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i64m2_tum(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vluxei16_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_i64m4_tum(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vluxei16_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_i64m4_tum(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vluxei16_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i64m8_tum(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vluxei16_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i64m8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei16_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_u8mf8_tum(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vluxei16_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_u8mf8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei16_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u8mf4_tum(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vluxei16_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u8mf4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei16_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_u8mf2_tum(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vluxei16_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_u8mf2_tum(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei16_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u8m1_tum(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vluxei16_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u8m1_tum(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vluxei16_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_u8m2_tum(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vluxei16_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_u8m2_tum(vm, vd, rs1, rs2, vl); } -vuint8m4_t test_vluxei16_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_v_u8m4_tum(mask, maskedoff, base, bindex, vl); +vuint8m4_t test_vluxei16_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_v_u8m4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei16_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_u16mf4_tum(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vluxei16_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_u16mf4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei16_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u16mf2_tum(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vluxei16_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u16mf2_tum(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei16_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_u16m1_tum(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vluxei16_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_u16m1_tum(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei16_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u16m2_tum(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vluxei16_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u16m2_tum(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vluxei16_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_u16m4_tum(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vluxei16_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_u16m4_tum(vm, vd, rs1, rs2, vl); } -vuint16m8_t test_vluxei16_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_v_u16m8_tum(mask, maskedoff, base, bindex, vl); +vuint16m8_t test_vluxei16_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_v_u16m8_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei16_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_u32mf2_tum(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vluxei16_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_u32mf2_tum(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei16_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u32m1_tum(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vluxei16_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u32m1_tum(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei16_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_u32m2_tum(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vluxei16_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_u32m2_tum(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei16_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u32m4_tum(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vluxei16_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u32m4_tum(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vluxei16_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_u32m8_tum(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vluxei16_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_u32m8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei16_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_u64m1_tum(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vluxei16_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_u64m1_tum(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei16_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u64m2_tum(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vluxei16_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u64m2_tum(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei16_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_u64m4_tum(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vluxei16_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_u64m4_tum(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei16_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u64m8_tum(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vluxei16_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u64m8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vluxei16_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_f16mf4_tumu(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vluxei16_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_f16mf4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei16_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_f16mf2_tumu(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vluxei16_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_f16mf2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei16_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_f16m1_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vluxei16_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_f16m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei16_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_f16m2_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vluxei16_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_f16m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vluxei16_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_f16m4_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vluxei16_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_f16m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m8_t test_vluxei16_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, const float16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_v_f16m8_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m8_t test_vluxei16_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, const float16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_v_f16m8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei16_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_f32mf2_tumu(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vluxei16_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_f32mf2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei16_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_f32m1_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vluxei16_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_f32m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei16_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_f32m2_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vluxei16_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_f32m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei16_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_f32m4_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vluxei16_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_f32m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vluxei16_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_f32m8_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vluxei16_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_f32m8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei16_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_f64m1_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vluxei16_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_f64m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei16_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_f64m2_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vluxei16_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_f64m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei16_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_f64m4_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vluxei16_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_f64m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei16_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_f64m8_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vluxei16_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_f64m8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei16_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_i8mf8_tumu(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vluxei16_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_i8mf8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei16_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i8mf4_tumu(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vluxei16_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i8mf4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei16_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_i8mf2_tumu(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vluxei16_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_i8mf2_tumu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vluxei16_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i8m1_tumu(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vluxei16_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i8m1_tumu(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vluxei16_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_i8m2_tumu(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vluxei16_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_i8m2_tumu(vm, vd, rs1, rs2, vl); } -vint8m4_t test_vluxei16_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_v_i8m4_tumu(mask, maskedoff, base, bindex, vl); +vint8m4_t test_vluxei16_v_i8m4_tumu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_v_i8m4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei16_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_i16mf4_tumu(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vluxei16_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_i16mf4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei16_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i16mf2_tumu(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vluxei16_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i16mf2_tumu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vluxei16_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_i16m1_tumu(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vluxei16_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_i16m1_tumu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vluxei16_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i16m2_tumu(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vluxei16_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i16m2_tumu(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vluxei16_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_i16m4_tumu(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vluxei16_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_i16m4_tumu(vm, vd, rs1, rs2, vl); } -vint16m8_t test_vluxei16_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_v_i16m8_tumu(mask, maskedoff, base, bindex, vl); +vint16m8_t test_vluxei16_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_v_i16m8_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei16_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_i32mf2_tumu(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vluxei16_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_i32mf2_tumu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vluxei16_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i32m1_tumu(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vluxei16_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i32m1_tumu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vluxei16_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_i32m2_tumu(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vluxei16_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_i32m2_tumu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vluxei16_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i32m4_tumu(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vluxei16_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i32m4_tumu(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vluxei16_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_i32m8_tumu(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vluxei16_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_i32m8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vluxei16_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_i64m1_tumu(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vluxei16_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_i64m1_tumu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vluxei16_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i64m2_tumu(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vluxei16_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i64m2_tumu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vluxei16_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_i64m4_tumu(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vluxei16_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_i64m4_tumu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vluxei16_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i64m8_tumu(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vluxei16_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i64m8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei16_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_u8mf8_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vluxei16_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_u8mf8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei16_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u8mf4_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vluxei16_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u8mf4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei16_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_u8mf2_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vluxei16_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_u8mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei16_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u8m1_tumu(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vluxei16_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u8m1_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vluxei16_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_u8m2_tumu(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vluxei16_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_u8m2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m4_t test_vluxei16_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_v_u8m4_tumu(mask, maskedoff, base, bindex, vl); +vuint8m4_t test_vluxei16_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_v_u8m4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei16_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_u16mf4_tumu(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vluxei16_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_u16mf4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei16_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u16mf2_tumu(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vluxei16_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u16mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei16_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_u16m1_tumu(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vluxei16_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_u16m1_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei16_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u16m2_tumu(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vluxei16_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u16m2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vluxei16_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_u16m4_tumu(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vluxei16_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_u16m4_tumu(vm, vd, rs1, rs2, vl); } -vuint16m8_t test_vluxei16_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_v_u16m8_tumu(mask, maskedoff, base, bindex, vl); +vuint16m8_t test_vluxei16_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_v_u16m8_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei16_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_u32mf2_tumu(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vluxei16_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_u32mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei16_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u32m1_tumu(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vluxei16_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u32m1_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei16_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_u32m2_tumu(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vluxei16_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_u32m2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei16_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u32m4_tumu(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vluxei16_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u32m4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vluxei16_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_u32m8_tumu(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vluxei16_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_u32m8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei16_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_u64m1_tumu(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vluxei16_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_u64m1_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei16_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u64m2_tumu(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vluxei16_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u64m2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei16_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_u64m4_tumu(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vluxei16_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_u64m4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei16_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u64m8_tumu(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vluxei16_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u64m8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vluxei16_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_f16mf4_mu(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vluxei16_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_f16mf4_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei16_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_f16mf2_mu(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vluxei16_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_f16mf2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei16_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_f16m1_mu(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vluxei16_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_f16m1_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei16_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_f16m2_mu(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vluxei16_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_f16m2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vluxei16_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_f16m4_mu(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vluxei16_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_f16m4_mu(vm, vd, rs1, rs2, vl); } -vfloat16m8_t test_vluxei16_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, const float16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_v_f16m8_mu(mask, maskedoff, base, bindex, vl); +vfloat16m8_t test_vluxei16_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, const float16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_v_f16m8_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei16_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_f32mf2_mu(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vluxei16_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_f32mf2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei16_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_f32m1_mu(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vluxei16_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_f32m1_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei16_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_f32m2_mu(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vluxei16_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_f32m2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei16_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_f32m4_mu(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vluxei16_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_f32m4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vluxei16_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_f32m8_mu(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vluxei16_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_f32m8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei16_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_f64m1_mu(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vluxei16_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_f64m1_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei16_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_f64m2_mu(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vluxei16_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_f64m2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei16_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_f64m4_mu(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vluxei16_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_f64m4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei16_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_f64m8_mu(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vluxei16_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_f64m8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei16_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_i8mf8_mu(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vluxei16_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_i8mf8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei16_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i8mf4_mu(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vluxei16_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i8mf4_mu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei16_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_i8mf2_mu(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vluxei16_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_i8mf2_mu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vluxei16_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i8m1_mu(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vluxei16_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i8m1_mu(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vluxei16_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_i8m2_mu(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vluxei16_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_i8m2_mu(vm, vd, rs1, rs2, vl); } -vint8m4_t test_vluxei16_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_v_i8m4_mu(mask, maskedoff, base, bindex, vl); +vint8m4_t test_vluxei16_v_i8m4_mu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_v_i8m4_mu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei16_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_i16mf4_mu(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vluxei16_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_i16mf4_mu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei16_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i16mf2_mu(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vluxei16_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i16mf2_mu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vluxei16_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_i16m1_mu(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vluxei16_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_i16m1_mu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vluxei16_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i16m2_mu(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vluxei16_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i16m2_mu(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vluxei16_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_i16m4_mu(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vluxei16_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_i16m4_mu(vm, vd, rs1, rs2, vl); } -vint16m8_t test_vluxei16_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_v_i16m8_mu(mask, maskedoff, base, bindex, vl); +vint16m8_t test_vluxei16_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_v_i16m8_mu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei16_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_i32mf2_mu(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vluxei16_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_i32mf2_mu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vluxei16_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i32m1_mu(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vluxei16_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i32m1_mu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vluxei16_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_i32m2_mu(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vluxei16_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_i32m2_mu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vluxei16_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i32m4_mu(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vluxei16_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i32m4_mu(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vluxei16_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_i32m8_mu(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vluxei16_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_i32m8_mu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vluxei16_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_i64m1_mu(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vluxei16_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_i64m1_mu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vluxei16_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i64m2_mu(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vluxei16_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i64m2_mu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vluxei16_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_i64m4_mu(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vluxei16_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_i64m4_mu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vluxei16_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_i64m8_mu(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vluxei16_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_i64m8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei16_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_u8mf8_mu(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vluxei16_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_u8mf8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei16_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u8mf4_mu(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vluxei16_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u8mf4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei16_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_u8mf2_mu(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vluxei16_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_u8mf2_mu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei16_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u8m1_mu(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vluxei16_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u8m1_mu(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vluxei16_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_u8m2_mu(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vluxei16_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_u8m2_mu(vm, vd, rs1, rs2, vl); } -vuint8m4_t test_vluxei16_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_v_u8m4_mu(mask, maskedoff, base, bindex, vl); +vuint8m4_t test_vluxei16_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_v_u8m4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei16_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_u16mf4_mu(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vluxei16_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_u16mf4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei16_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u16mf2_mu(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vluxei16_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u16mf2_mu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei16_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_u16m1_mu(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vluxei16_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_u16m1_mu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei16_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u16m2_mu(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vluxei16_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u16m2_mu(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vluxei16_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_u16m4_mu(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vluxei16_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_u16m4_mu(vm, vd, rs1, rs2, vl); } -vuint16m8_t test_vluxei16_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_v_u16m8_mu(mask, maskedoff, base, bindex, vl); +vuint16m8_t test_vluxei16_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_v_u16m8_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei16_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_u32mf2_mu(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vluxei16_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_u32mf2_mu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei16_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u32m1_mu(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vluxei16_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u32m1_mu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei16_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_u32m2_mu(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vluxei16_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_u32m2_mu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei16_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u32m4_mu(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vluxei16_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u32m4_mu(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vluxei16_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_v_u32m8_mu(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vluxei16_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_u32m8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei16_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_v_u64m1_mu(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vluxei16_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_u64m1_mu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei16_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u64m2_mu(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vluxei16_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u64m2_mu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei16_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_v_u64m4_mu(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vluxei16_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_u64m4_mu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei16_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_v_u64m8_mu(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vluxei16_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_u64m8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxei16\.[ivxfswum.]+\s+} 228 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vluxei32.c b/auto-generated/policy_funcs/gnu-api-tests/vluxei32.c index a5ea38c4d..917277a69 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vluxei32.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vluxei32.c @@ -6,836 +6,836 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vluxei32_v_f16mf4_tu(vfloat16mf4_t maskedoff, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_f16mf4_tu(maskedoff, base, bindex, vl); +vfloat16mf4_t test_vluxei32_v_f16mf4_tu(vfloat16mf4_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_f16mf4_tu(vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei32_v_f16mf2_tu(vfloat16mf2_t maskedoff, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_f16mf2_tu(maskedoff, base, bindex, vl); +vfloat16mf2_t test_vluxei32_v_f16mf2_tu(vfloat16mf2_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_f16mf2_tu(vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei32_v_f16m1_tu(vfloat16m1_t maskedoff, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_f16m1_tu(maskedoff, base, bindex, vl); +vfloat16m1_t test_vluxei32_v_f16m1_tu(vfloat16m1_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_f16m1_tu(vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei32_v_f16m2_tu(vfloat16m2_t maskedoff, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_f16m2_tu(maskedoff, base, bindex, vl); +vfloat16m2_t test_vluxei32_v_f16m2_tu(vfloat16m2_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_f16m2_tu(vd, rs1, rs2, vl); } -vfloat16m4_t test_vluxei32_v_f16m4_tu(vfloat16m4_t maskedoff, const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_f16m4_tu(maskedoff, base, bindex, vl); +vfloat16m4_t test_vluxei32_v_f16m4_tu(vfloat16m4_t vd, const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_f16m4_tu(vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei32_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_f32mf2_tu(maskedoff, base, bindex, vl); +vfloat32mf2_t test_vluxei32_v_f32mf2_tu(vfloat32mf2_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_f32mf2_tu(vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei32_v_f32m1_tu(vfloat32m1_t maskedoff, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_f32m1_tu(maskedoff, base, bindex, vl); +vfloat32m1_t test_vluxei32_v_f32m1_tu(vfloat32m1_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_f32m1_tu(vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei32_v_f32m2_tu(vfloat32m2_t maskedoff, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_f32m2_tu(maskedoff, base, bindex, vl); +vfloat32m2_t test_vluxei32_v_f32m2_tu(vfloat32m2_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_f32m2_tu(vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei32_v_f32m4_tu(vfloat32m4_t maskedoff, const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_f32m4_tu(maskedoff, base, bindex, vl); +vfloat32m4_t test_vluxei32_v_f32m4_tu(vfloat32m4_t vd, const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_f32m4_tu(vd, rs1, rs2, vl); } -vfloat32m8_t test_vluxei32_v_f32m8_tu(vfloat32m8_t maskedoff, const float32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_f32m8_tu(maskedoff, base, bindex, vl); +vfloat32m8_t test_vluxei32_v_f32m8_tu(vfloat32m8_t vd, const float32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_f32m8_tu(vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei32_v_f64m1_tu(vfloat64m1_t maskedoff, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_f64m1_tu(maskedoff, base, bindex, vl); +vfloat64m1_t test_vluxei32_v_f64m1_tu(vfloat64m1_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_f64m1_tu(vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei32_v_f64m2_tu(vfloat64m2_t maskedoff, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_f64m2_tu(maskedoff, base, bindex, vl); +vfloat64m2_t test_vluxei32_v_f64m2_tu(vfloat64m2_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_f64m2_tu(vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei32_v_f64m4_tu(vfloat64m4_t maskedoff, const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_f64m4_tu(maskedoff, base, bindex, vl); +vfloat64m4_t test_vluxei32_v_f64m4_tu(vfloat64m4_t vd, const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_f64m4_tu(vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei32_v_f64m8_tu(vfloat64m8_t maskedoff, const float64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_f64m8_tu(maskedoff, base, bindex, vl); +vfloat64m8_t test_vluxei32_v_f64m8_tu(vfloat64m8_t vd, const float64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_f64m8_tu(vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei32_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i8mf8_tu(maskedoff, base, bindex, vl); +vint8mf8_t test_vluxei32_v_i8mf8_tu(vint8mf8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i8mf8_tu(vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei32_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_i8mf4_tu(maskedoff, base, bindex, vl); +vint8mf4_t test_vluxei32_v_i8mf4_tu(vint8mf4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_i8mf4_tu(vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei32_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i8mf2_tu(maskedoff, base, bindex, vl); +vint8mf2_t test_vluxei32_v_i8mf2_tu(vint8mf2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i8mf2_tu(vd, rs1, rs2, vl); } -vint8m1_t test_vluxei32_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_i8m1_tu(maskedoff, base, bindex, vl); +vint8m1_t test_vluxei32_v_i8m1_tu(vint8m1_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_i8m1_tu(vd, rs1, rs2, vl); } -vint8m2_t test_vluxei32_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_i8m2_tu(maskedoff, base, bindex, vl); +vint8m2_t test_vluxei32_v_i8m2_tu(vint8m2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_i8m2_tu(vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei32_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i16mf4_tu(maskedoff, base, bindex, vl); +vint16mf4_t test_vluxei32_v_i16mf4_tu(vint16mf4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i16mf4_tu(vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei32_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_i16mf2_tu(maskedoff, base, bindex, vl); +vint16mf2_t test_vluxei32_v_i16mf2_tu(vint16mf2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_i16mf2_tu(vd, rs1, rs2, vl); } -vint16m1_t test_vluxei32_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i16m1_tu(maskedoff, base, bindex, vl); +vint16m1_t test_vluxei32_v_i16m1_tu(vint16m1_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i16m1_tu(vd, rs1, rs2, vl); } -vint16m2_t test_vluxei32_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_i16m2_tu(maskedoff, base, bindex, vl); +vint16m2_t test_vluxei32_v_i16m2_tu(vint16m2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_i16m2_tu(vd, rs1, rs2, vl); } -vint16m4_t test_vluxei32_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_i16m4_tu(maskedoff, base, bindex, vl); +vint16m4_t test_vluxei32_v_i16m4_tu(vint16m4_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_i16m4_tu(vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei32_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i32mf2_tu(maskedoff, base, bindex, vl); +vint32mf2_t test_vluxei32_v_i32mf2_tu(vint32mf2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i32mf2_tu(vd, rs1, rs2, vl); } -vint32m1_t test_vluxei32_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_i32m1_tu(maskedoff, base, bindex, vl); +vint32m1_t test_vluxei32_v_i32m1_tu(vint32m1_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_i32m1_tu(vd, rs1, rs2, vl); } -vint32m2_t test_vluxei32_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i32m2_tu(maskedoff, base, bindex, vl); +vint32m2_t test_vluxei32_v_i32m2_tu(vint32m2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i32m2_tu(vd, rs1, rs2, vl); } -vint32m4_t test_vluxei32_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_i32m4_tu(maskedoff, base, bindex, vl); +vint32m4_t test_vluxei32_v_i32m4_tu(vint32m4_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_i32m4_tu(vd, rs1, rs2, vl); } -vint32m8_t test_vluxei32_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_i32m8_tu(maskedoff, base, bindex, vl); +vint32m8_t test_vluxei32_v_i32m8_tu(vint32m8_t vd, const int32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_i32m8_tu(vd, rs1, rs2, vl); } -vint64m1_t test_vluxei32_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i64m1_tu(maskedoff, base, bindex, vl); +vint64m1_t test_vluxei32_v_i64m1_tu(vint64m1_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i64m1_tu(vd, rs1, rs2, vl); } -vint64m2_t test_vluxei32_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_i64m2_tu(maskedoff, base, bindex, vl); +vint64m2_t test_vluxei32_v_i64m2_tu(vint64m2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_i64m2_tu(vd, rs1, rs2, vl); } -vint64m4_t test_vluxei32_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i64m4_tu(maskedoff, base, bindex, vl); +vint64m4_t test_vluxei32_v_i64m4_tu(vint64m4_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i64m4_tu(vd, rs1, rs2, vl); } -vint64m8_t test_vluxei32_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_i64m8_tu(maskedoff, base, bindex, vl); +vint64m8_t test_vluxei32_v_i64m8_tu(vint64m8_t vd, const int64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_i64m8_tu(vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei32_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u8mf8_tu(maskedoff, base, bindex, vl); +vuint8mf8_t test_vluxei32_v_u8mf8_tu(vuint8mf8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u8mf8_tu(vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei32_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_u8mf4_tu(maskedoff, base, bindex, vl); +vuint8mf4_t test_vluxei32_v_u8mf4_tu(vuint8mf4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_u8mf4_tu(vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei32_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u8mf2_tu(maskedoff, base, bindex, vl); +vuint8mf2_t test_vluxei32_v_u8mf2_tu(vuint8mf2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u8mf2_tu(vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei32_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_u8m1_tu(maskedoff, base, bindex, vl); +vuint8m1_t test_vluxei32_v_u8m1_tu(vuint8m1_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_u8m1_tu(vd, rs1, rs2, vl); } -vuint8m2_t test_vluxei32_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_u8m2_tu(maskedoff, base, bindex, vl); +vuint8m2_t test_vluxei32_v_u8m2_tu(vuint8m2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_u8m2_tu(vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei32_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u16mf4_tu(maskedoff, base, bindex, vl); +vuint16mf4_t test_vluxei32_v_u16mf4_tu(vuint16mf4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u16mf4_tu(vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei32_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_u16mf2_tu(maskedoff, base, bindex, vl); +vuint16mf2_t test_vluxei32_v_u16mf2_tu(vuint16mf2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_u16mf2_tu(vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei32_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u16m1_tu(maskedoff, base, bindex, vl); +vuint16m1_t test_vluxei32_v_u16m1_tu(vuint16m1_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u16m1_tu(vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei32_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_u16m2_tu(maskedoff, base, bindex, vl); +vuint16m2_t test_vluxei32_v_u16m2_tu(vuint16m2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_u16m2_tu(vd, rs1, rs2, vl); } -vuint16m4_t test_vluxei32_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_u16m4_tu(maskedoff, base, bindex, vl); +vuint16m4_t test_vluxei32_v_u16m4_tu(vuint16m4_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_u16m4_tu(vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei32_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u32mf2_tu(maskedoff, base, bindex, vl); +vuint32mf2_t test_vluxei32_v_u32mf2_tu(vuint32mf2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u32mf2_tu(vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei32_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_u32m1_tu(maskedoff, base, bindex, vl); +vuint32m1_t test_vluxei32_v_u32m1_tu(vuint32m1_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_u32m1_tu(vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei32_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u32m2_tu(maskedoff, base, bindex, vl); +vuint32m2_t test_vluxei32_v_u32m2_tu(vuint32m2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u32m2_tu(vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei32_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_u32m4_tu(maskedoff, base, bindex, vl); +vuint32m4_t test_vluxei32_v_u32m4_tu(vuint32m4_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_u32m4_tu(vd, rs1, rs2, vl); } -vuint32m8_t test_vluxei32_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_u32m8_tu(maskedoff, base, bindex, vl); +vuint32m8_t test_vluxei32_v_u32m8_tu(vuint32m8_t vd, const uint32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_u32m8_tu(vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei32_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u64m1_tu(maskedoff, base, bindex, vl); +vuint64m1_t test_vluxei32_v_u64m1_tu(vuint64m1_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u64m1_tu(vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei32_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_u64m2_tu(maskedoff, base, bindex, vl); +vuint64m2_t test_vluxei32_v_u64m2_tu(vuint64m2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_u64m2_tu(vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei32_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u64m4_tu(maskedoff, base, bindex, vl); +vuint64m4_t test_vluxei32_v_u64m4_tu(vuint64m4_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u64m4_tu(vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei32_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_u64m8_tu(maskedoff, base, bindex, vl); +vuint64m8_t test_vluxei32_v_u64m8_tu(vuint64m8_t vd, const uint64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_u64m8_tu(vd, rs1, rs2, vl); } -vfloat16mf4_t test_vluxei32_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_f16mf4_tum(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vluxei32_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_f16mf4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei32_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_f16mf2_tum(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vluxei32_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_f16mf2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei32_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_f16m1_tum(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vluxei32_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_f16m1_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei32_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_f16m2_tum(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vluxei32_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_f16m2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vluxei32_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_f16m4_tum(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vluxei32_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_f16m4_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei32_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_f32mf2_tum(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vluxei32_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_f32mf2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei32_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_f32m1_tum(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vluxei32_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_f32m1_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei32_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_f32m2_tum(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vluxei32_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_f32m2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei32_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_f32m4_tum(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vluxei32_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_f32m4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vluxei32_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_f32m8_tum(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vluxei32_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_f32m8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei32_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_f64m1_tum(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vluxei32_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_f64m1_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei32_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_f64m2_tum(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vluxei32_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_f64m2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei32_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_f64m4_tum(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vluxei32_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_f64m4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei32_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_f64m8_tum(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vluxei32_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_f64m8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei32_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i8mf8_tum(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vluxei32_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i8mf8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei32_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_i8mf4_tum(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vluxei32_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_i8mf4_tum(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei32_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i8mf2_tum(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vluxei32_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i8mf2_tum(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vluxei32_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_i8m1_tum(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vluxei32_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_i8m1_tum(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vluxei32_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_i8m2_tum(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vluxei32_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_i8m2_tum(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei32_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i16mf4_tum(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vluxei32_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i16mf4_tum(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei32_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_i16mf2_tum(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vluxei32_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_i16mf2_tum(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vluxei32_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i16m1_tum(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vluxei32_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i16m1_tum(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vluxei32_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_i16m2_tum(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vluxei32_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_i16m2_tum(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vluxei32_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_i16m4_tum(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vluxei32_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_i16m4_tum(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei32_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i32mf2_tum(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vluxei32_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i32mf2_tum(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vluxei32_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_i32m1_tum(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vluxei32_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_i32m1_tum(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vluxei32_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i32m2_tum(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vluxei32_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i32m2_tum(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vluxei32_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_i32m4_tum(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vluxei32_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_i32m4_tum(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vluxei32_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_i32m8_tum(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vluxei32_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_i32m8_tum(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vluxei32_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i64m1_tum(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vluxei32_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i64m1_tum(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vluxei32_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_i64m2_tum(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vluxei32_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_i64m2_tum(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vluxei32_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i64m4_tum(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vluxei32_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i64m4_tum(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vluxei32_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_i64m8_tum(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vluxei32_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_i64m8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei32_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u8mf8_tum(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vluxei32_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u8mf8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei32_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_u8mf4_tum(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vluxei32_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_u8mf4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei32_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u8mf2_tum(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vluxei32_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u8mf2_tum(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei32_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_u8m1_tum(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vluxei32_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_u8m1_tum(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vluxei32_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_u8m2_tum(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vluxei32_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_u8m2_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei32_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u16mf4_tum(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vluxei32_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u16mf4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei32_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_u16mf2_tum(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vluxei32_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_u16mf2_tum(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei32_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u16m1_tum(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vluxei32_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u16m1_tum(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei32_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_u16m2_tum(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vluxei32_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_u16m2_tum(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vluxei32_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_u16m4_tum(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vluxei32_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_u16m4_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei32_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u32mf2_tum(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vluxei32_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u32mf2_tum(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei32_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_u32m1_tum(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vluxei32_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_u32m1_tum(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei32_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u32m2_tum(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vluxei32_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u32m2_tum(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei32_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_u32m4_tum(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vluxei32_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_u32m4_tum(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vluxei32_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_u32m8_tum(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vluxei32_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_u32m8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei32_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u64m1_tum(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vluxei32_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u64m1_tum(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei32_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_u64m2_tum(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vluxei32_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_u64m2_tum(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei32_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u64m4_tum(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vluxei32_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u64m4_tum(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei32_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_u64m8_tum(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vluxei32_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_u64m8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vluxei32_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_f16mf4_tumu(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vluxei32_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_f16mf4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei32_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_f16mf2_tumu(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vluxei32_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_f16mf2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei32_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_f16m1_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vluxei32_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_f16m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei32_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_f16m2_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vluxei32_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_f16m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vluxei32_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_f16m4_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vluxei32_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_f16m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei32_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_f32mf2_tumu(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vluxei32_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_f32mf2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei32_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_f32m1_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vluxei32_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_f32m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei32_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_f32m2_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vluxei32_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_f32m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei32_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_f32m4_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vluxei32_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_f32m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vluxei32_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_f32m8_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vluxei32_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_f32m8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei32_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_f64m1_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vluxei32_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_f64m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei32_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_f64m2_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vluxei32_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_f64m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei32_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_f64m4_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vluxei32_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_f64m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei32_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_f64m8_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vluxei32_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_f64m8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei32_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i8mf8_tumu(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vluxei32_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i8mf8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei32_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_i8mf4_tumu(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vluxei32_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_i8mf4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei32_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i8mf2_tumu(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vluxei32_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i8mf2_tumu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vluxei32_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_i8m1_tumu(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vluxei32_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_i8m1_tumu(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vluxei32_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_i8m2_tumu(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vluxei32_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_i8m2_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei32_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i16mf4_tumu(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vluxei32_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i16mf4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei32_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_i16mf2_tumu(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vluxei32_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_i16mf2_tumu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vluxei32_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i16m1_tumu(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vluxei32_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i16m1_tumu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vluxei32_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_i16m2_tumu(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vluxei32_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_i16m2_tumu(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vluxei32_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_i16m4_tumu(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vluxei32_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_i16m4_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei32_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i32mf2_tumu(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vluxei32_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i32mf2_tumu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vluxei32_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_i32m1_tumu(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vluxei32_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_i32m1_tumu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vluxei32_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i32m2_tumu(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vluxei32_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i32m2_tumu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vluxei32_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_i32m4_tumu(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vluxei32_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_i32m4_tumu(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vluxei32_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_i32m8_tumu(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vluxei32_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_i32m8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vluxei32_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i64m1_tumu(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vluxei32_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i64m1_tumu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vluxei32_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_i64m2_tumu(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vluxei32_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_i64m2_tumu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vluxei32_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i64m4_tumu(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vluxei32_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i64m4_tumu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vluxei32_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_i64m8_tumu(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vluxei32_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_i64m8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei32_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u8mf8_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vluxei32_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u8mf8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei32_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_u8mf4_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vluxei32_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_u8mf4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei32_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u8mf2_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vluxei32_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u8mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei32_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_u8m1_tumu(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vluxei32_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_u8m1_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vluxei32_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_u8m2_tumu(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vluxei32_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_u8m2_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei32_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u16mf4_tumu(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vluxei32_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u16mf4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei32_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_u16mf2_tumu(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vluxei32_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_u16mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei32_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u16m1_tumu(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vluxei32_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u16m1_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei32_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_u16m2_tumu(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vluxei32_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_u16m2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vluxei32_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_u16m4_tumu(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vluxei32_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_u16m4_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei32_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u32mf2_tumu(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vluxei32_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u32mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei32_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_u32m1_tumu(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vluxei32_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_u32m1_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei32_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u32m2_tumu(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vluxei32_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u32m2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei32_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_u32m4_tumu(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vluxei32_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_u32m4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vluxei32_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_u32m8_tumu(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vluxei32_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_u32m8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei32_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u64m1_tumu(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vluxei32_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u64m1_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei32_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_u64m2_tumu(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vluxei32_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_u64m2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei32_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u64m4_tumu(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vluxei32_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u64m4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei32_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_u64m8_tumu(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vluxei32_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_u64m8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vluxei32_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_f16mf4_mu(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vluxei32_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_f16mf4_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei32_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_f16mf2_mu(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vluxei32_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_f16mf2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei32_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_f16m1_mu(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vluxei32_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_f16m1_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei32_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_f16m2_mu(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vluxei32_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_f16m2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vluxei32_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_f16m4_mu(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vluxei32_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_f16m4_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei32_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_f32mf2_mu(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vluxei32_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_f32mf2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei32_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_f32m1_mu(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vluxei32_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_f32m1_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei32_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_f32m2_mu(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vluxei32_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_f32m2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei32_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_f32m4_mu(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vluxei32_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_f32m4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vluxei32_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_f32m8_mu(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vluxei32_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_f32m8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei32_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_f64m1_mu(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vluxei32_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_f64m1_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei32_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_f64m2_mu(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vluxei32_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_f64m2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei32_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_f64m4_mu(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vluxei32_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_f64m4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei32_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_f64m8_mu(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vluxei32_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_f64m8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei32_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i8mf8_mu(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vluxei32_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i8mf8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei32_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_i8mf4_mu(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vluxei32_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_i8mf4_mu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei32_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i8mf2_mu(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vluxei32_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i8mf2_mu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vluxei32_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_i8m1_mu(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vluxei32_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_i8m1_mu(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vluxei32_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_i8m2_mu(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vluxei32_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_i8m2_mu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei32_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i16mf4_mu(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vluxei32_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i16mf4_mu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei32_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_i16mf2_mu(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vluxei32_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_i16mf2_mu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vluxei32_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i16m1_mu(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vluxei32_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i16m1_mu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vluxei32_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_i16m2_mu(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vluxei32_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_i16m2_mu(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vluxei32_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_i16m4_mu(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vluxei32_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_i16m4_mu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei32_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i32mf2_mu(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vluxei32_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i32mf2_mu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vluxei32_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_i32m1_mu(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vluxei32_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_i32m1_mu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vluxei32_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i32m2_mu(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vluxei32_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i32m2_mu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vluxei32_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_i32m4_mu(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vluxei32_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_i32m4_mu(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vluxei32_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_i32m8_mu(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vluxei32_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_i32m8_mu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vluxei32_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i64m1_mu(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vluxei32_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i64m1_mu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vluxei32_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_i64m2_mu(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vluxei32_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_i64m2_mu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vluxei32_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_i64m4_mu(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vluxei32_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_i64m4_mu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vluxei32_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_i64m8_mu(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vluxei32_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_i64m8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei32_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u8mf8_mu(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vluxei32_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u8mf8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei32_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_u8mf4_mu(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vluxei32_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_u8mf4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei32_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u8mf2_mu(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vluxei32_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u8mf2_mu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei32_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_u8m1_mu(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vluxei32_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_u8m1_mu(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vluxei32_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_u8m2_mu(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vluxei32_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_u8m2_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei32_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u16mf4_mu(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vluxei32_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u16mf4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei32_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_u16mf2_mu(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vluxei32_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_u16mf2_mu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei32_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u16m1_mu(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vluxei32_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u16m1_mu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei32_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_u16m2_mu(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vluxei32_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_u16m2_mu(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vluxei32_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_u16m4_mu(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vluxei32_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_u16m4_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei32_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u32mf2_mu(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vluxei32_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u32mf2_mu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei32_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_u32m1_mu(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vluxei32_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_u32m1_mu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei32_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u32m2_mu(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vluxei32_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u32m2_mu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei32_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_u32m4_mu(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vluxei32_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_u32m4_mu(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vluxei32_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_v_u32m8_mu(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vluxei32_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_v_u32m8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei32_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u64m1_mu(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vluxei32_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u64m1_mu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei32_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_v_u64m2_mu(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vluxei32_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_v_u64m2_mu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei32_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_v_u64m4_mu(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vluxei32_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_v_u64m4_mu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei32_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_v_u64m8_mu(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vluxei32_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_v_u64m8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxei32\.[ivxfswum.]+\s+} 208 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vluxei64.c b/auto-generated/policy_funcs/gnu-api-tests/vluxei64.c index 062116890..1b7bd0e05 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vluxei64.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vluxei64.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vluxei64_v_f16mf4_tu(vfloat16mf4_t maskedoff, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_f16mf4_tu(maskedoff, base, bindex, vl); +vfloat16mf4_t test_vluxei64_v_f16mf4_tu(vfloat16mf4_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_f16mf4_tu(vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei64_v_f16mf2_tu(vfloat16mf2_t maskedoff, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_f16mf2_tu(maskedoff, base, bindex, vl); +vfloat16mf2_t test_vluxei64_v_f16mf2_tu(vfloat16mf2_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_f16mf2_tu(vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei64_v_f16m1_tu(vfloat16m1_t maskedoff, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_f16m1_tu(maskedoff, base, bindex, vl); +vfloat16m1_t test_vluxei64_v_f16m1_tu(vfloat16m1_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_f16m1_tu(vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei64_v_f16m2_tu(vfloat16m2_t maskedoff, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_f16m2_tu(maskedoff, base, bindex, vl); +vfloat16m2_t test_vluxei64_v_f16m2_tu(vfloat16m2_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_f16m2_tu(vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei64_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_f32mf2_tu(maskedoff, base, bindex, vl); +vfloat32mf2_t test_vluxei64_v_f32mf2_tu(vfloat32mf2_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_f32mf2_tu(vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei64_v_f32m1_tu(vfloat32m1_t maskedoff, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_f32m1_tu(maskedoff, base, bindex, vl); +vfloat32m1_t test_vluxei64_v_f32m1_tu(vfloat32m1_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_f32m1_tu(vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei64_v_f32m2_tu(vfloat32m2_t maskedoff, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_f32m2_tu(maskedoff, base, bindex, vl); +vfloat32m2_t test_vluxei64_v_f32m2_tu(vfloat32m2_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_f32m2_tu(vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei64_v_f32m4_tu(vfloat32m4_t maskedoff, const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_f32m4_tu(maskedoff, base, bindex, vl); +vfloat32m4_t test_vluxei64_v_f32m4_tu(vfloat32m4_t vd, const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_f32m4_tu(vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei64_v_f64m1_tu(vfloat64m1_t maskedoff, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_f64m1_tu(maskedoff, base, bindex, vl); +vfloat64m1_t test_vluxei64_v_f64m1_tu(vfloat64m1_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_f64m1_tu(vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei64_v_f64m2_tu(vfloat64m2_t maskedoff, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_f64m2_tu(maskedoff, base, bindex, vl); +vfloat64m2_t test_vluxei64_v_f64m2_tu(vfloat64m2_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_f64m2_tu(vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei64_v_f64m4_tu(vfloat64m4_t maskedoff, const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_f64m4_tu(maskedoff, base, bindex, vl); +vfloat64m4_t test_vluxei64_v_f64m4_tu(vfloat64m4_t vd, const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_f64m4_tu(vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei64_v_f64m8_tu(vfloat64m8_t maskedoff, const float64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_f64m8_tu(maskedoff, base, bindex, vl); +vfloat64m8_t test_vluxei64_v_f64m8_tu(vfloat64m8_t vd, const float64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_f64m8_tu(vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei64_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_i8mf8_tu(maskedoff, base, bindex, vl); +vint8mf8_t test_vluxei64_v_i8mf8_tu(vint8mf8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_i8mf8_tu(vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei64_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_i8mf4_tu(maskedoff, base, bindex, vl); +vint8mf4_t test_vluxei64_v_i8mf4_tu(vint8mf4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_i8mf4_tu(vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei64_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_i8mf2_tu(maskedoff, base, bindex, vl); +vint8mf2_t test_vluxei64_v_i8mf2_tu(vint8mf2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_i8mf2_tu(vd, rs1, rs2, vl); } -vint8m1_t test_vluxei64_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_i8m1_tu(maskedoff, base, bindex, vl); +vint8m1_t test_vluxei64_v_i8m1_tu(vint8m1_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_i8m1_tu(vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei64_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_i16mf4_tu(maskedoff, base, bindex, vl); +vint16mf4_t test_vluxei64_v_i16mf4_tu(vint16mf4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_i16mf4_tu(vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei64_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_i16mf2_tu(maskedoff, base, bindex, vl); +vint16mf2_t test_vluxei64_v_i16mf2_tu(vint16mf2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_i16mf2_tu(vd, rs1, rs2, vl); } -vint16m1_t test_vluxei64_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_i16m1_tu(maskedoff, base, bindex, vl); +vint16m1_t test_vluxei64_v_i16m1_tu(vint16m1_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_i16m1_tu(vd, rs1, rs2, vl); } -vint16m2_t test_vluxei64_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_i16m2_tu(maskedoff, base, bindex, vl); +vint16m2_t test_vluxei64_v_i16m2_tu(vint16m2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_i16m2_tu(vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei64_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_i32mf2_tu(maskedoff, base, bindex, vl); +vint32mf2_t test_vluxei64_v_i32mf2_tu(vint32mf2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_i32mf2_tu(vd, rs1, rs2, vl); } -vint32m1_t test_vluxei64_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_i32m1_tu(maskedoff, base, bindex, vl); +vint32m1_t test_vluxei64_v_i32m1_tu(vint32m1_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_i32m1_tu(vd, rs1, rs2, vl); } -vint32m2_t test_vluxei64_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_i32m2_tu(maskedoff, base, bindex, vl); +vint32m2_t test_vluxei64_v_i32m2_tu(vint32m2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_i32m2_tu(vd, rs1, rs2, vl); } -vint32m4_t test_vluxei64_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_i32m4_tu(maskedoff, base, bindex, vl); +vint32m4_t test_vluxei64_v_i32m4_tu(vint32m4_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_i32m4_tu(vd, rs1, rs2, vl); } -vint64m1_t test_vluxei64_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_i64m1_tu(maskedoff, base, bindex, vl); +vint64m1_t test_vluxei64_v_i64m1_tu(vint64m1_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_i64m1_tu(vd, rs1, rs2, vl); } -vint64m2_t test_vluxei64_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_i64m2_tu(maskedoff, base, bindex, vl); +vint64m2_t test_vluxei64_v_i64m2_tu(vint64m2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_i64m2_tu(vd, rs1, rs2, vl); } -vint64m4_t test_vluxei64_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_i64m4_tu(maskedoff, base, bindex, vl); +vint64m4_t test_vluxei64_v_i64m4_tu(vint64m4_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_i64m4_tu(vd, rs1, rs2, vl); } -vint64m8_t test_vluxei64_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_i64m8_tu(maskedoff, base, bindex, vl); +vint64m8_t test_vluxei64_v_i64m8_tu(vint64m8_t vd, const int64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_i64m8_tu(vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei64_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_u8mf8_tu(maskedoff, base, bindex, vl); +vuint8mf8_t test_vluxei64_v_u8mf8_tu(vuint8mf8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_u8mf8_tu(vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei64_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_u8mf4_tu(maskedoff, base, bindex, vl); +vuint8mf4_t test_vluxei64_v_u8mf4_tu(vuint8mf4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_u8mf4_tu(vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei64_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_u8mf2_tu(maskedoff, base, bindex, vl); +vuint8mf2_t test_vluxei64_v_u8mf2_tu(vuint8mf2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_u8mf2_tu(vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei64_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_u8m1_tu(maskedoff, base, bindex, vl); +vuint8m1_t test_vluxei64_v_u8m1_tu(vuint8m1_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_u8m1_tu(vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei64_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_u16mf4_tu(maskedoff, base, bindex, vl); +vuint16mf4_t test_vluxei64_v_u16mf4_tu(vuint16mf4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_u16mf4_tu(vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei64_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_u16mf2_tu(maskedoff, base, bindex, vl); +vuint16mf2_t test_vluxei64_v_u16mf2_tu(vuint16mf2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_u16mf2_tu(vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei64_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_u16m1_tu(maskedoff, base, bindex, vl); +vuint16m1_t test_vluxei64_v_u16m1_tu(vuint16m1_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_u16m1_tu(vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei64_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_u16m2_tu(maskedoff, base, bindex, vl); +vuint16m2_t test_vluxei64_v_u16m2_tu(vuint16m2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_u16m2_tu(vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei64_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_u32mf2_tu(maskedoff, base, bindex, vl); +vuint32mf2_t test_vluxei64_v_u32mf2_tu(vuint32mf2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_u32mf2_tu(vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei64_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_u32m1_tu(maskedoff, base, bindex, vl); +vuint32m1_t test_vluxei64_v_u32m1_tu(vuint32m1_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_u32m1_tu(vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei64_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_u32m2_tu(maskedoff, base, bindex, vl); +vuint32m2_t test_vluxei64_v_u32m2_tu(vuint32m2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_u32m2_tu(vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei64_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_u32m4_tu(maskedoff, base, bindex, vl); +vuint32m4_t test_vluxei64_v_u32m4_tu(vuint32m4_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_u32m4_tu(vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei64_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_u64m1_tu(maskedoff, base, bindex, vl); +vuint64m1_t test_vluxei64_v_u64m1_tu(vuint64m1_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_u64m1_tu(vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei64_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_u64m2_tu(maskedoff, base, bindex, vl); +vuint64m2_t test_vluxei64_v_u64m2_tu(vuint64m2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_u64m2_tu(vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei64_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_u64m4_tu(maskedoff, base, bindex, vl); +vuint64m4_t test_vluxei64_v_u64m4_tu(vuint64m4_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_u64m4_tu(vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei64_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_u64m8_tu(maskedoff, base, bindex, vl); +vuint64m8_t test_vluxei64_v_u64m8_tu(vuint64m8_t vd, const uint64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_u64m8_tu(vd, rs1, rs2, vl); } -vfloat16mf4_t test_vluxei64_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_f16mf4_tum(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vluxei64_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_f16mf4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei64_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_f16mf2_tum(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vluxei64_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_f16mf2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei64_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_f16m1_tum(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vluxei64_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_f16m1_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei64_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_f16m2_tum(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vluxei64_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_f16m2_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei64_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_f32mf2_tum(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vluxei64_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_f32mf2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei64_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_f32m1_tum(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vluxei64_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_f32m1_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei64_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_f32m2_tum(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vluxei64_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_f32m2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei64_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_f32m4_tum(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vluxei64_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_f32m4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei64_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_f64m1_tum(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vluxei64_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_f64m1_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei64_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_f64m2_tum(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vluxei64_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_f64m2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei64_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_f64m4_tum(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vluxei64_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_f64m4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei64_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_f64m8_tum(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vluxei64_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_f64m8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei64_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_i8mf8_tum(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vluxei64_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_i8mf8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei64_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_i8mf4_tum(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vluxei64_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_i8mf4_tum(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei64_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_i8mf2_tum(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vluxei64_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_i8mf2_tum(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vluxei64_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_i8m1_tum(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vluxei64_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_i8m1_tum(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei64_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_i16mf4_tum(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vluxei64_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_i16mf4_tum(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei64_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_i16mf2_tum(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vluxei64_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_i16mf2_tum(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vluxei64_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_i16m1_tum(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vluxei64_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_i16m1_tum(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vluxei64_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_i16m2_tum(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vluxei64_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_i16m2_tum(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei64_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_i32mf2_tum(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vluxei64_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_i32mf2_tum(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vluxei64_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_i32m1_tum(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vluxei64_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_i32m1_tum(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vluxei64_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_i32m2_tum(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vluxei64_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_i32m2_tum(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vluxei64_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_i32m4_tum(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vluxei64_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_i32m4_tum(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vluxei64_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_i64m1_tum(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vluxei64_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_i64m1_tum(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vluxei64_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_i64m2_tum(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vluxei64_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_i64m2_tum(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vluxei64_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_i64m4_tum(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vluxei64_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_i64m4_tum(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vluxei64_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_i64m8_tum(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vluxei64_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_i64m8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei64_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_u8mf8_tum(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vluxei64_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_u8mf8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei64_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_u8mf4_tum(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vluxei64_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_u8mf4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei64_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_u8mf2_tum(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vluxei64_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_u8mf2_tum(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei64_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_u8m1_tum(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vluxei64_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_u8m1_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei64_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_u16mf4_tum(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vluxei64_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_u16mf4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei64_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_u16mf2_tum(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vluxei64_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_u16mf2_tum(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei64_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_u16m1_tum(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vluxei64_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_u16m1_tum(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei64_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_u16m2_tum(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vluxei64_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_u16m2_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei64_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_u32mf2_tum(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vluxei64_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_u32mf2_tum(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei64_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_u32m1_tum(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vluxei64_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_u32m1_tum(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei64_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_u32m2_tum(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vluxei64_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_u32m2_tum(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei64_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_u32m4_tum(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vluxei64_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_u32m4_tum(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei64_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_u64m1_tum(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vluxei64_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_u64m1_tum(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei64_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_u64m2_tum(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vluxei64_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_u64m2_tum(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei64_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_u64m4_tum(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vluxei64_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_u64m4_tum(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei64_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_u64m8_tum(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vluxei64_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_u64m8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vluxei64_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_f16mf4_tumu(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vluxei64_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_f16mf4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei64_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_f16mf2_tumu(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vluxei64_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_f16mf2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei64_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_f16m1_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vluxei64_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_f16m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei64_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_f16m2_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vluxei64_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_f16m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei64_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_f32mf2_tumu(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vluxei64_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_f32mf2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei64_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_f32m1_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vluxei64_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_f32m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei64_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_f32m2_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vluxei64_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_f32m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei64_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_f32m4_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vluxei64_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_f32m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei64_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_f64m1_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vluxei64_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_f64m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei64_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_f64m2_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vluxei64_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_f64m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei64_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_f64m4_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vluxei64_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_f64m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei64_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_f64m8_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vluxei64_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_f64m8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei64_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_i8mf8_tumu(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vluxei64_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_i8mf8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei64_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_i8mf4_tumu(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vluxei64_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_i8mf4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei64_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_i8mf2_tumu(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vluxei64_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_i8mf2_tumu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vluxei64_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_i8m1_tumu(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vluxei64_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_i8m1_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei64_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_i16mf4_tumu(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vluxei64_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_i16mf4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei64_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_i16mf2_tumu(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vluxei64_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_i16mf2_tumu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vluxei64_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_i16m1_tumu(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vluxei64_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_i16m1_tumu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vluxei64_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_i16m2_tumu(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vluxei64_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_i16m2_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei64_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_i32mf2_tumu(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vluxei64_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_i32mf2_tumu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vluxei64_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_i32m1_tumu(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vluxei64_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_i32m1_tumu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vluxei64_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_i32m2_tumu(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vluxei64_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_i32m2_tumu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vluxei64_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_i32m4_tumu(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vluxei64_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_i32m4_tumu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vluxei64_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_i64m1_tumu(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vluxei64_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_i64m1_tumu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vluxei64_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_i64m2_tumu(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vluxei64_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_i64m2_tumu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vluxei64_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_i64m4_tumu(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vluxei64_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_i64m4_tumu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vluxei64_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_i64m8_tumu(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vluxei64_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_i64m8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei64_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_u8mf8_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vluxei64_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_u8mf8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei64_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_u8mf4_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vluxei64_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_u8mf4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei64_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_u8mf2_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vluxei64_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_u8mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei64_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_u8m1_tumu(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vluxei64_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_u8m1_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei64_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_u16mf4_tumu(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vluxei64_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_u16mf4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei64_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_u16mf2_tumu(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vluxei64_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_u16mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei64_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_u16m1_tumu(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vluxei64_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_u16m1_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei64_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_u16m2_tumu(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vluxei64_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_u16m2_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei64_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_u32mf2_tumu(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vluxei64_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_u32mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei64_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_u32m1_tumu(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vluxei64_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_u32m1_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei64_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_u32m2_tumu(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vluxei64_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_u32m2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei64_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_u32m4_tumu(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vluxei64_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_u32m4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei64_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_u64m1_tumu(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vluxei64_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_u64m1_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei64_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_u64m2_tumu(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vluxei64_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_u64m2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei64_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_u64m4_tumu(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vluxei64_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_u64m4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei64_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_u64m8_tumu(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vluxei64_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_u64m8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vluxei64_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_f16mf4_mu(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vluxei64_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_f16mf4_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei64_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_f16mf2_mu(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vluxei64_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_f16mf2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei64_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_f16m1_mu(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vluxei64_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_f16m1_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei64_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_f16m2_mu(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vluxei64_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_f16m2_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei64_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_f32mf2_mu(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vluxei64_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_f32mf2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei64_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_f32m1_mu(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vluxei64_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_f32m1_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei64_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_f32m2_mu(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vluxei64_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_f32m2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei64_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_f32m4_mu(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vluxei64_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_f32m4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei64_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_f64m1_mu(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vluxei64_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_f64m1_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei64_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_f64m2_mu(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vluxei64_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_f64m2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei64_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_f64m4_mu(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vluxei64_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_f64m4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei64_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_f64m8_mu(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vluxei64_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_f64m8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei64_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_i8mf8_mu(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vluxei64_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_i8mf8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei64_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_i8mf4_mu(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vluxei64_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_i8mf4_mu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei64_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_i8mf2_mu(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vluxei64_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_i8mf2_mu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vluxei64_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_i8m1_mu(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vluxei64_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_i8m1_mu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei64_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_i16mf4_mu(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vluxei64_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_i16mf4_mu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei64_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_i16mf2_mu(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vluxei64_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_i16mf2_mu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vluxei64_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_i16m1_mu(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vluxei64_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_i16m1_mu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vluxei64_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_i16m2_mu(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vluxei64_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_i16m2_mu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei64_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_i32mf2_mu(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vluxei64_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_i32mf2_mu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vluxei64_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_i32m1_mu(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vluxei64_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_i32m1_mu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vluxei64_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_i32m2_mu(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vluxei64_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_i32m2_mu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vluxei64_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_i32m4_mu(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vluxei64_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_i32m4_mu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vluxei64_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_i64m1_mu(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vluxei64_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_i64m1_mu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vluxei64_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_i64m2_mu(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vluxei64_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_i64m2_mu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vluxei64_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_i64m4_mu(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vluxei64_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_i64m4_mu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vluxei64_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_i64m8_mu(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vluxei64_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_i64m8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei64_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_u8mf8_mu(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vluxei64_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_u8mf8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei64_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_u8mf4_mu(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vluxei64_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_u8mf4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei64_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_u8mf2_mu(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vluxei64_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_u8mf2_mu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei64_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_u8m1_mu(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vluxei64_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_u8m1_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei64_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_u16mf4_mu(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vluxei64_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_u16mf4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei64_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_u16mf2_mu(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vluxei64_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_u16mf2_mu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei64_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_u16m1_mu(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vluxei64_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_u16m1_mu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei64_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_u16m2_mu(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vluxei64_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_u16m2_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei64_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_u32mf2_mu(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vluxei64_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_u32mf2_mu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei64_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_u32m1_mu(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vluxei64_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_u32m1_mu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei64_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_u32m2_mu(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vluxei64_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_u32m2_mu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei64_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_u32m4_mu(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vluxei64_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_u32m4_mu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei64_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_v_u64m1_mu(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vluxei64_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_v_u64m1_mu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei64_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_v_u64m2_mu(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vluxei64_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_v_u64m2_mu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei64_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_v_u64m4_mu(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vluxei64_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_v_u64m4_mu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei64_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_v_u64m8_mu(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vluxei64_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_v_u64m8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxei64\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vluxei8.c b/auto-generated/policy_funcs/gnu-api-tests/vluxei8.c index 9878bbe98..677d0a469 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vluxei8.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vluxei8.c @@ -6,948 +6,948 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vluxei8_v_f16mf4_tu(vfloat16mf4_t maskedoff, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_f16mf4_tu(maskedoff, base, bindex, vl); +vfloat16mf4_t test_vluxei8_v_f16mf4_tu(vfloat16mf4_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_f16mf4_tu(vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei8_v_f16mf2_tu(vfloat16mf2_t maskedoff, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_f16mf2_tu(maskedoff, base, bindex, vl); +vfloat16mf2_t test_vluxei8_v_f16mf2_tu(vfloat16mf2_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_f16mf2_tu(vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei8_v_f16m1_tu(vfloat16m1_t maskedoff, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_f16m1_tu(maskedoff, base, bindex, vl); +vfloat16m1_t test_vluxei8_v_f16m1_tu(vfloat16m1_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_f16m1_tu(vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei8_v_f16m2_tu(vfloat16m2_t maskedoff, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_f16m2_tu(maskedoff, base, bindex, vl); +vfloat16m2_t test_vluxei8_v_f16m2_tu(vfloat16m2_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_f16m2_tu(vd, rs1, rs2, vl); } -vfloat16m4_t test_vluxei8_v_f16m4_tu(vfloat16m4_t maskedoff, const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_f16m4_tu(maskedoff, base, bindex, vl); +vfloat16m4_t test_vluxei8_v_f16m4_tu(vfloat16m4_t vd, const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_f16m4_tu(vd, rs1, rs2, vl); } -vfloat16m8_t test_vluxei8_v_f16m8_tu(vfloat16m8_t maskedoff, const float16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_v_f16m8_tu(maskedoff, base, bindex, vl); +vfloat16m8_t test_vluxei8_v_f16m8_tu(vfloat16m8_t vd, const float16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_v_f16m8_tu(vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei8_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_f32mf2_tu(maskedoff, base, bindex, vl); +vfloat32mf2_t test_vluxei8_v_f32mf2_tu(vfloat32mf2_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_f32mf2_tu(vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei8_v_f32m1_tu(vfloat32m1_t maskedoff, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_f32m1_tu(maskedoff, base, bindex, vl); +vfloat32m1_t test_vluxei8_v_f32m1_tu(vfloat32m1_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_f32m1_tu(vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei8_v_f32m2_tu(vfloat32m2_t maskedoff, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_f32m2_tu(maskedoff, base, bindex, vl); +vfloat32m2_t test_vluxei8_v_f32m2_tu(vfloat32m2_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_f32m2_tu(vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei8_v_f32m4_tu(vfloat32m4_t maskedoff, const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_f32m4_tu(maskedoff, base, bindex, vl); +vfloat32m4_t test_vluxei8_v_f32m4_tu(vfloat32m4_t vd, const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_f32m4_tu(vd, rs1, rs2, vl); } -vfloat32m8_t test_vluxei8_v_f32m8_tu(vfloat32m8_t maskedoff, const float32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_f32m8_tu(maskedoff, base, bindex, vl); +vfloat32m8_t test_vluxei8_v_f32m8_tu(vfloat32m8_t vd, const float32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_f32m8_tu(vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei8_v_f64m1_tu(vfloat64m1_t maskedoff, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_f64m1_tu(maskedoff, base, bindex, vl); +vfloat64m1_t test_vluxei8_v_f64m1_tu(vfloat64m1_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_f64m1_tu(vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei8_v_f64m2_tu(vfloat64m2_t maskedoff, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_f64m2_tu(maskedoff, base, bindex, vl); +vfloat64m2_t test_vluxei8_v_f64m2_tu(vfloat64m2_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_f64m2_tu(vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei8_v_f64m4_tu(vfloat64m4_t maskedoff, const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_f64m4_tu(maskedoff, base, bindex, vl); +vfloat64m4_t test_vluxei8_v_f64m4_tu(vfloat64m4_t vd, const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_f64m4_tu(vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei8_v_f64m8_tu(vfloat64m8_t maskedoff, const float64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_f64m8_tu(maskedoff, base, bindex, vl); +vfloat64m8_t test_vluxei8_v_f64m8_tu(vfloat64m8_t vd, const float64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_f64m8_tu(vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei8_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_i8mf8_tu(maskedoff, base, bindex, vl); +vint8mf8_t test_vluxei8_v_i8mf8_tu(vint8mf8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_i8mf8_tu(vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei8_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_i8mf4_tu(maskedoff, base, bindex, vl); +vint8mf4_t test_vluxei8_v_i8mf4_tu(vint8mf4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_i8mf4_tu(vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei8_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_i8mf2_tu(maskedoff, base, bindex, vl); +vint8mf2_t test_vluxei8_v_i8mf2_tu(vint8mf2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_i8mf2_tu(vd, rs1, rs2, vl); } -vint8m1_t test_vluxei8_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_i8m1_tu(maskedoff, base, bindex, vl); +vint8m1_t test_vluxei8_v_i8m1_tu(vint8m1_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_i8m1_tu(vd, rs1, rs2, vl); } -vint8m2_t test_vluxei8_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_i8m2_tu(maskedoff, base, bindex, vl); +vint8m2_t test_vluxei8_v_i8m2_tu(vint8m2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_i8m2_tu(vd, rs1, rs2, vl); } -vint8m4_t test_vluxei8_v_i8m4_tu(vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_v_i8m4_tu(maskedoff, base, bindex, vl); +vint8m4_t test_vluxei8_v_i8m4_tu(vint8m4_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_v_i8m4_tu(vd, rs1, rs2, vl); } -vint8m8_t test_vluxei8_v_i8m8_tu(vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vluxei8_v_i8m8_tu(maskedoff, base, bindex, vl); +vint8m8_t test_vluxei8_v_i8m8_tu(vint8m8_t vd, const int8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vluxei8_v_i8m8_tu(vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei8_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_i16mf4_tu(maskedoff, base, bindex, vl); +vint16mf4_t test_vluxei8_v_i16mf4_tu(vint16mf4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_i16mf4_tu(vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei8_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_i16mf2_tu(maskedoff, base, bindex, vl); +vint16mf2_t test_vluxei8_v_i16mf2_tu(vint16mf2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_i16mf2_tu(vd, rs1, rs2, vl); } -vint16m1_t test_vluxei8_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_i16m1_tu(maskedoff, base, bindex, vl); +vint16m1_t test_vluxei8_v_i16m1_tu(vint16m1_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_i16m1_tu(vd, rs1, rs2, vl); } -vint16m2_t test_vluxei8_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_i16m2_tu(maskedoff, base, bindex, vl); +vint16m2_t test_vluxei8_v_i16m2_tu(vint16m2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_i16m2_tu(vd, rs1, rs2, vl); } -vint16m4_t test_vluxei8_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_i16m4_tu(maskedoff, base, bindex, vl); +vint16m4_t test_vluxei8_v_i16m4_tu(vint16m4_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_i16m4_tu(vd, rs1, rs2, vl); } -vint16m8_t test_vluxei8_v_i16m8_tu(vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_v_i16m8_tu(maskedoff, base, bindex, vl); +vint16m8_t test_vluxei8_v_i16m8_tu(vint16m8_t vd, const int16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_v_i16m8_tu(vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei8_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_i32mf2_tu(maskedoff, base, bindex, vl); +vint32mf2_t test_vluxei8_v_i32mf2_tu(vint32mf2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_i32mf2_tu(vd, rs1, rs2, vl); } -vint32m1_t test_vluxei8_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_i32m1_tu(maskedoff, base, bindex, vl); +vint32m1_t test_vluxei8_v_i32m1_tu(vint32m1_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_i32m1_tu(vd, rs1, rs2, vl); } -vint32m2_t test_vluxei8_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_i32m2_tu(maskedoff, base, bindex, vl); +vint32m2_t test_vluxei8_v_i32m2_tu(vint32m2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_i32m2_tu(vd, rs1, rs2, vl); } -vint32m4_t test_vluxei8_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_i32m4_tu(maskedoff, base, bindex, vl); +vint32m4_t test_vluxei8_v_i32m4_tu(vint32m4_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_i32m4_tu(vd, rs1, rs2, vl); } -vint32m8_t test_vluxei8_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_i32m8_tu(maskedoff, base, bindex, vl); +vint32m8_t test_vluxei8_v_i32m8_tu(vint32m8_t vd, const int32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_i32m8_tu(vd, rs1, rs2, vl); } -vint64m1_t test_vluxei8_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_i64m1_tu(maskedoff, base, bindex, vl); +vint64m1_t test_vluxei8_v_i64m1_tu(vint64m1_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_i64m1_tu(vd, rs1, rs2, vl); } -vint64m2_t test_vluxei8_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_i64m2_tu(maskedoff, base, bindex, vl); +vint64m2_t test_vluxei8_v_i64m2_tu(vint64m2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_i64m2_tu(vd, rs1, rs2, vl); } -vint64m4_t test_vluxei8_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_i64m4_tu(maskedoff, base, bindex, vl); +vint64m4_t test_vluxei8_v_i64m4_tu(vint64m4_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_i64m4_tu(vd, rs1, rs2, vl); } -vint64m8_t test_vluxei8_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_i64m8_tu(maskedoff, base, bindex, vl); +vint64m8_t test_vluxei8_v_i64m8_tu(vint64m8_t vd, const int64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_i64m8_tu(vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei8_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_u8mf8_tu(maskedoff, base, bindex, vl); +vuint8mf8_t test_vluxei8_v_u8mf8_tu(vuint8mf8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_u8mf8_tu(vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei8_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_u8mf4_tu(maskedoff, base, bindex, vl); +vuint8mf4_t test_vluxei8_v_u8mf4_tu(vuint8mf4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_u8mf4_tu(vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei8_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_u8mf2_tu(maskedoff, base, bindex, vl); +vuint8mf2_t test_vluxei8_v_u8mf2_tu(vuint8mf2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_u8mf2_tu(vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei8_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_u8m1_tu(maskedoff, base, bindex, vl); +vuint8m1_t test_vluxei8_v_u8m1_tu(vuint8m1_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_u8m1_tu(vd, rs1, rs2, vl); } -vuint8m2_t test_vluxei8_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_u8m2_tu(maskedoff, base, bindex, vl); +vuint8m2_t test_vluxei8_v_u8m2_tu(vuint8m2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_u8m2_tu(vd, rs1, rs2, vl); } -vuint8m4_t test_vluxei8_v_u8m4_tu(vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_v_u8m4_tu(maskedoff, base, bindex, vl); +vuint8m4_t test_vluxei8_v_u8m4_tu(vuint8m4_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_v_u8m4_tu(vd, rs1, rs2, vl); } -vuint8m8_t test_vluxei8_v_u8m8_tu(vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vluxei8_v_u8m8_tu(maskedoff, base, bindex, vl); +vuint8m8_t test_vluxei8_v_u8m8_tu(vuint8m8_t vd, const uint8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vluxei8_v_u8m8_tu(vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei8_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_u16mf4_tu(maskedoff, base, bindex, vl); +vuint16mf4_t test_vluxei8_v_u16mf4_tu(vuint16mf4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_u16mf4_tu(vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei8_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_u16mf2_tu(maskedoff, base, bindex, vl); +vuint16mf2_t test_vluxei8_v_u16mf2_tu(vuint16mf2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_u16mf2_tu(vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei8_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_u16m1_tu(maskedoff, base, bindex, vl); +vuint16m1_t test_vluxei8_v_u16m1_tu(vuint16m1_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_u16m1_tu(vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei8_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_u16m2_tu(maskedoff, base, bindex, vl); +vuint16m2_t test_vluxei8_v_u16m2_tu(vuint16m2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_u16m2_tu(vd, rs1, rs2, vl); } -vuint16m4_t test_vluxei8_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_u16m4_tu(maskedoff, base, bindex, vl); +vuint16m4_t test_vluxei8_v_u16m4_tu(vuint16m4_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_u16m4_tu(vd, rs1, rs2, vl); } -vuint16m8_t test_vluxei8_v_u16m8_tu(vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_v_u16m8_tu(maskedoff, base, bindex, vl); +vuint16m8_t test_vluxei8_v_u16m8_tu(vuint16m8_t vd, const uint16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_v_u16m8_tu(vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei8_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_u32mf2_tu(maskedoff, base, bindex, vl); +vuint32mf2_t test_vluxei8_v_u32mf2_tu(vuint32mf2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_u32mf2_tu(vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei8_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_u32m1_tu(maskedoff, base, bindex, vl); +vuint32m1_t test_vluxei8_v_u32m1_tu(vuint32m1_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_u32m1_tu(vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei8_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_u32m2_tu(maskedoff, base, bindex, vl); +vuint32m2_t test_vluxei8_v_u32m2_tu(vuint32m2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_u32m2_tu(vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei8_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_u32m4_tu(maskedoff, base, bindex, vl); +vuint32m4_t test_vluxei8_v_u32m4_tu(vuint32m4_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_u32m4_tu(vd, rs1, rs2, vl); } -vuint32m8_t test_vluxei8_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_u32m8_tu(maskedoff, base, bindex, vl); +vuint32m8_t test_vluxei8_v_u32m8_tu(vuint32m8_t vd, const uint32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_u32m8_tu(vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei8_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_u64m1_tu(maskedoff, base, bindex, vl); +vuint64m1_t test_vluxei8_v_u64m1_tu(vuint64m1_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_u64m1_tu(vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei8_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_u64m2_tu(maskedoff, base, bindex, vl); +vuint64m2_t test_vluxei8_v_u64m2_tu(vuint64m2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_u64m2_tu(vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei8_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_u64m4_tu(maskedoff, base, bindex, vl); +vuint64m4_t test_vluxei8_v_u64m4_tu(vuint64m4_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_u64m4_tu(vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei8_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_u64m8_tu(maskedoff, base, bindex, vl); +vuint64m8_t test_vluxei8_v_u64m8_tu(vuint64m8_t vd, const uint64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_u64m8_tu(vd, rs1, rs2, vl); } -vfloat16mf4_t test_vluxei8_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_f16mf4_tum(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vluxei8_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_f16mf4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei8_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_f16mf2_tum(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vluxei8_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_f16mf2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei8_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_f16m1_tum(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vluxei8_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_f16m1_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei8_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_f16m2_tum(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vluxei8_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_f16m2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vluxei8_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_f16m4_tum(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vluxei8_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_f16m4_tum(vm, vd, rs1, rs2, vl); } -vfloat16m8_t test_vluxei8_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, const float16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_v_f16m8_tum(mask, maskedoff, base, bindex, vl); +vfloat16m8_t test_vluxei8_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, const float16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_v_f16m8_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei8_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_f32mf2_tum(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vluxei8_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_f32mf2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei8_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_f32m1_tum(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vluxei8_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_f32m1_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei8_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_f32m2_tum(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vluxei8_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_f32m2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei8_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_f32m4_tum(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vluxei8_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_f32m4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vluxei8_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_f32m8_tum(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vluxei8_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_f32m8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei8_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_f64m1_tum(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vluxei8_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_f64m1_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei8_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_f64m2_tum(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vluxei8_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_f64m2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei8_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_f64m4_tum(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vluxei8_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_f64m4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei8_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_f64m8_tum(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vluxei8_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_f64m8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei8_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_i8mf8_tum(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vluxei8_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_i8mf8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei8_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_i8mf4_tum(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vluxei8_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_i8mf4_tum(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei8_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_i8mf2_tum(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vluxei8_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_i8mf2_tum(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vluxei8_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_i8m1_tum(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vluxei8_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_i8m1_tum(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vluxei8_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_i8m2_tum(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vluxei8_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_i8m2_tum(vm, vd, rs1, rs2, vl); } -vint8m4_t test_vluxei8_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_v_i8m4_tum(mask, maskedoff, base, bindex, vl); +vint8m4_t test_vluxei8_v_i8m4_tum(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_v_i8m4_tum(vm, vd, rs1, rs2, vl); } -vint8m8_t test_vluxei8_v_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vluxei8_v_i8m8_tum(mask, maskedoff, base, bindex, vl); +vint8m8_t test_vluxei8_v_i8m8_tum(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vluxei8_v_i8m8_tum(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei8_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_i16mf4_tum(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vluxei8_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_i16mf4_tum(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei8_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_i16mf2_tum(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vluxei8_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_i16mf2_tum(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vluxei8_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_i16m1_tum(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vluxei8_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_i16m1_tum(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vluxei8_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_i16m2_tum(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vluxei8_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_i16m2_tum(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vluxei8_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_i16m4_tum(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vluxei8_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_i16m4_tum(vm, vd, rs1, rs2, vl); } -vint16m8_t test_vluxei8_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_v_i16m8_tum(mask, maskedoff, base, bindex, vl); +vint16m8_t test_vluxei8_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_v_i16m8_tum(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei8_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_i32mf2_tum(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vluxei8_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_i32mf2_tum(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vluxei8_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_i32m1_tum(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vluxei8_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_i32m1_tum(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vluxei8_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_i32m2_tum(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vluxei8_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_i32m2_tum(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vluxei8_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_i32m4_tum(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vluxei8_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_i32m4_tum(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vluxei8_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_i32m8_tum(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vluxei8_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_i32m8_tum(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vluxei8_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_i64m1_tum(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vluxei8_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_i64m1_tum(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vluxei8_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_i64m2_tum(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vluxei8_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_i64m2_tum(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vluxei8_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_i64m4_tum(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vluxei8_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_i64m4_tum(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vluxei8_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_i64m8_tum(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vluxei8_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_i64m8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei8_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_u8mf8_tum(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vluxei8_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_u8mf8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei8_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_u8mf4_tum(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vluxei8_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_u8mf4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei8_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_u8mf2_tum(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vluxei8_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_u8mf2_tum(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei8_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_u8m1_tum(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vluxei8_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_u8m1_tum(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vluxei8_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_u8m2_tum(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vluxei8_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_u8m2_tum(vm, vd, rs1, rs2, vl); } -vuint8m4_t test_vluxei8_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_v_u8m4_tum(mask, maskedoff, base, bindex, vl); +vuint8m4_t test_vluxei8_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_v_u8m4_tum(vm, vd, rs1, rs2, vl); } -vuint8m8_t test_vluxei8_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vluxei8_v_u8m8_tum(mask, maskedoff, base, bindex, vl); +vuint8m8_t test_vluxei8_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vluxei8_v_u8m8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei8_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_u16mf4_tum(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vluxei8_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_u16mf4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei8_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_u16mf2_tum(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vluxei8_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_u16mf2_tum(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei8_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_u16m1_tum(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vluxei8_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_u16m1_tum(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei8_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_u16m2_tum(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vluxei8_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_u16m2_tum(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vluxei8_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_u16m4_tum(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vluxei8_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_u16m4_tum(vm, vd, rs1, rs2, vl); } -vuint16m8_t test_vluxei8_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_v_u16m8_tum(mask, maskedoff, base, bindex, vl); +vuint16m8_t test_vluxei8_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_v_u16m8_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei8_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_u32mf2_tum(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vluxei8_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_u32mf2_tum(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei8_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_u32m1_tum(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vluxei8_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_u32m1_tum(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei8_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_u32m2_tum(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vluxei8_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_u32m2_tum(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei8_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_u32m4_tum(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vluxei8_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_u32m4_tum(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vluxei8_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_u32m8_tum(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vluxei8_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_u32m8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei8_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_u64m1_tum(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vluxei8_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_u64m1_tum(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei8_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_u64m2_tum(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vluxei8_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_u64m2_tum(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei8_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_u64m4_tum(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vluxei8_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_u64m4_tum(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei8_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_u64m8_tum(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vluxei8_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_u64m8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vluxei8_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_f16mf4_tumu(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vluxei8_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_f16mf4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei8_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_f16mf2_tumu(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vluxei8_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_f16mf2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei8_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_f16m1_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vluxei8_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_f16m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei8_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_f16m2_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vluxei8_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_f16m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vluxei8_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_f16m4_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vluxei8_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_f16m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m8_t test_vluxei8_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, const float16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_v_f16m8_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m8_t test_vluxei8_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, const float16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_v_f16m8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei8_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_f32mf2_tumu(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vluxei8_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_f32mf2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei8_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_f32m1_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vluxei8_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_f32m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei8_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_f32m2_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vluxei8_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_f32m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei8_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_f32m4_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vluxei8_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_f32m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vluxei8_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_f32m8_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vluxei8_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_f32m8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei8_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_f64m1_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vluxei8_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_f64m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei8_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_f64m2_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vluxei8_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_f64m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei8_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_f64m4_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vluxei8_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_f64m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei8_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_f64m8_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vluxei8_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_f64m8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei8_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_i8mf8_tumu(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vluxei8_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_i8mf8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei8_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_i8mf4_tumu(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vluxei8_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_i8mf4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei8_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_i8mf2_tumu(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vluxei8_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_i8mf2_tumu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vluxei8_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_i8m1_tumu(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vluxei8_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_i8m1_tumu(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vluxei8_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_i8m2_tumu(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vluxei8_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_i8m2_tumu(vm, vd, rs1, rs2, vl); } -vint8m4_t test_vluxei8_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_v_i8m4_tumu(mask, maskedoff, base, bindex, vl); +vint8m4_t test_vluxei8_v_i8m4_tumu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_v_i8m4_tumu(vm, vd, rs1, rs2, vl); } -vint8m8_t test_vluxei8_v_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vluxei8_v_i8m8_tumu(mask, maskedoff, base, bindex, vl); +vint8m8_t test_vluxei8_v_i8m8_tumu(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vluxei8_v_i8m8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei8_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_i16mf4_tumu(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vluxei8_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_i16mf4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei8_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_i16mf2_tumu(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vluxei8_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_i16mf2_tumu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vluxei8_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_i16m1_tumu(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vluxei8_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_i16m1_tumu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vluxei8_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_i16m2_tumu(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vluxei8_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_i16m2_tumu(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vluxei8_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_i16m4_tumu(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vluxei8_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_i16m4_tumu(vm, vd, rs1, rs2, vl); } -vint16m8_t test_vluxei8_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_v_i16m8_tumu(mask, maskedoff, base, bindex, vl); +vint16m8_t test_vluxei8_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_v_i16m8_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei8_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_i32mf2_tumu(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vluxei8_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_i32mf2_tumu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vluxei8_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_i32m1_tumu(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vluxei8_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_i32m1_tumu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vluxei8_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_i32m2_tumu(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vluxei8_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_i32m2_tumu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vluxei8_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_i32m4_tumu(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vluxei8_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_i32m4_tumu(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vluxei8_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_i32m8_tumu(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vluxei8_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_i32m8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vluxei8_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_i64m1_tumu(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vluxei8_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_i64m1_tumu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vluxei8_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_i64m2_tumu(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vluxei8_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_i64m2_tumu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vluxei8_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_i64m4_tumu(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vluxei8_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_i64m4_tumu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vluxei8_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_i64m8_tumu(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vluxei8_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_i64m8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_u8mf8_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vluxei8_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_u8mf8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei8_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_u8mf4_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vluxei8_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_u8mf4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei8_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_u8mf2_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vluxei8_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_u8mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_u8m1_tumu(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vluxei8_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_u8m1_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vluxei8_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_u8m2_tumu(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vluxei8_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_u8m2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m4_t test_vluxei8_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_v_u8m4_tumu(mask, maskedoff, base, bindex, vl); +vuint8m4_t test_vluxei8_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_v_u8m4_tumu(vm, vd, rs1, rs2, vl); } -vuint8m8_t test_vluxei8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vluxei8_v_u8m8_tumu(mask, maskedoff, base, bindex, vl); +vuint8m8_t test_vluxei8_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vluxei8_v_u8m8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei8_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_u16mf4_tumu(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vluxei8_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_u16mf4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei8_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_u16mf2_tumu(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vluxei8_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_u16mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei8_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_u16m1_tumu(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vluxei8_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_u16m1_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei8_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_u16m2_tumu(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vluxei8_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_u16m2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vluxei8_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_u16m4_tumu(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vluxei8_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_u16m4_tumu(vm, vd, rs1, rs2, vl); } -vuint16m8_t test_vluxei8_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_v_u16m8_tumu(mask, maskedoff, base, bindex, vl); +vuint16m8_t test_vluxei8_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_v_u16m8_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei8_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_u32mf2_tumu(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vluxei8_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_u32mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei8_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_u32m1_tumu(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vluxei8_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_u32m1_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei8_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_u32m2_tumu(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vluxei8_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_u32m2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei8_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_u32m4_tumu(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vluxei8_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_u32m4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vluxei8_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_u32m8_tumu(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vluxei8_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_u32m8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei8_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_u64m1_tumu(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vluxei8_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_u64m1_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei8_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_u64m2_tumu(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vluxei8_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_u64m2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei8_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_u64m4_tumu(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vluxei8_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_u64m4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei8_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_u64m8_tumu(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vluxei8_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_u64m8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vluxei8_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_f16mf4_mu(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vluxei8_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_f16mf4_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei8_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_f16mf2_mu(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vluxei8_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_f16mf2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei8_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_f16m1_mu(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vluxei8_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_f16m1_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei8_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_f16m2_mu(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vluxei8_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_f16m2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vluxei8_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_f16m4_mu(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vluxei8_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_f16m4_mu(vm, vd, rs1, rs2, vl); } -vfloat16m8_t test_vluxei8_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, const float16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_v_f16m8_mu(mask, maskedoff, base, bindex, vl); +vfloat16m8_t test_vluxei8_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, const float16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_v_f16m8_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei8_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_f32mf2_mu(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vluxei8_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_f32mf2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei8_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_f32m1_mu(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vluxei8_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_f32m1_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei8_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_f32m2_mu(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vluxei8_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_f32m2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei8_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_f32m4_mu(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vluxei8_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_f32m4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vluxei8_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_f32m8_mu(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vluxei8_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_f32m8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei8_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_f64m1_mu(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vluxei8_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_f64m1_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei8_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_f64m2_mu(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vluxei8_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_f64m2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei8_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_f64m4_mu(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vluxei8_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_f64m4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei8_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_f64m8_mu(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vluxei8_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_f64m8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei8_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_i8mf8_mu(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vluxei8_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_i8mf8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei8_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_i8mf4_mu(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vluxei8_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_i8mf4_mu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei8_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_i8mf2_mu(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vluxei8_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_i8mf2_mu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vluxei8_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_i8m1_mu(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vluxei8_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_i8m1_mu(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vluxei8_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_i8m2_mu(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vluxei8_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_i8m2_mu(vm, vd, rs1, rs2, vl); } -vint8m4_t test_vluxei8_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_v_i8m4_mu(mask, maskedoff, base, bindex, vl); +vint8m4_t test_vluxei8_v_i8m4_mu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_v_i8m4_mu(vm, vd, rs1, rs2, vl); } -vint8m8_t test_vluxei8_v_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vluxei8_v_i8m8_mu(mask, maskedoff, base, bindex, vl); +vint8m8_t test_vluxei8_v_i8m8_mu(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vluxei8_v_i8m8_mu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei8_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_i16mf4_mu(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vluxei8_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_i16mf4_mu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei8_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_i16mf2_mu(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vluxei8_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_i16mf2_mu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vluxei8_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_i16m1_mu(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vluxei8_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_i16m1_mu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vluxei8_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_i16m2_mu(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vluxei8_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_i16m2_mu(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vluxei8_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_i16m4_mu(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vluxei8_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_i16m4_mu(vm, vd, rs1, rs2, vl); } -vint16m8_t test_vluxei8_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_v_i16m8_mu(mask, maskedoff, base, bindex, vl); +vint16m8_t test_vluxei8_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_v_i16m8_mu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei8_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_i32mf2_mu(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vluxei8_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_i32mf2_mu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vluxei8_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_i32m1_mu(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vluxei8_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_i32m1_mu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vluxei8_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_i32m2_mu(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vluxei8_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_i32m2_mu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vluxei8_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_i32m4_mu(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vluxei8_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_i32m4_mu(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vluxei8_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_i32m8_mu(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vluxei8_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_i32m8_mu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vluxei8_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_i64m1_mu(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vluxei8_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_i64m1_mu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vluxei8_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_i64m2_mu(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vluxei8_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_i64m2_mu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vluxei8_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_i64m4_mu(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vluxei8_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_i64m4_mu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vluxei8_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_i64m8_mu(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vluxei8_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_i64m8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei8_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_u8mf8_mu(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vluxei8_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_u8mf8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei8_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_u8mf4_mu(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vluxei8_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_u8mf4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei8_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_u8mf2_mu(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vluxei8_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_u8mf2_mu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_u8m1_mu(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vluxei8_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_u8m1_mu(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vluxei8_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_u8m2_mu(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vluxei8_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_u8m2_mu(vm, vd, rs1, rs2, vl); } -vuint8m4_t test_vluxei8_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_v_u8m4_mu(mask, maskedoff, base, bindex, vl); +vuint8m4_t test_vluxei8_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_v_u8m4_mu(vm, vd, rs1, rs2, vl); } -vuint8m8_t test_vluxei8_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vluxei8_v_u8m8_mu(mask, maskedoff, base, bindex, vl); +vuint8m8_t test_vluxei8_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vluxei8_v_u8m8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei8_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_u16mf4_mu(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vluxei8_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_u16mf4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei8_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_u16mf2_mu(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vluxei8_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_u16mf2_mu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei8_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_u16m1_mu(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vluxei8_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_u16m1_mu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei8_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_u16m2_mu(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vluxei8_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_u16m2_mu(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vluxei8_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_u16m4_mu(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vluxei8_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_u16m4_mu(vm, vd, rs1, rs2, vl); } -vuint16m8_t test_vluxei8_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_v_u16m8_mu(mask, maskedoff, base, bindex, vl); +vuint16m8_t test_vluxei8_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_v_u16m8_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei8_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_u32mf2_mu(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vluxei8_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_u32mf2_mu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei8_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_u32m1_mu(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vluxei8_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_u32m1_mu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei8_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_u32m2_mu(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vluxei8_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_u32m2_mu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei8_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_u32m4_mu(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vluxei8_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_u32m4_mu(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vluxei8_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_v_u32m8_mu(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vluxei8_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_v_u32m8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei8_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_v_u64m1_mu(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vluxei8_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_v_u64m1_mu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei8_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_v_u64m2_mu(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vluxei8_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_v_u64m2_mu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei8_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_v_u64m4_mu(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vluxei8_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_v_u64m4_mu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei8_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_v_u64m8_mu(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vluxei8_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_v_u64m8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxei8\.[ivxfswum.]+\s+} 236 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vluxseg2ei16.c b/auto-generated/policy_funcs/gnu-api-tests/vluxseg2ei16.c index 0530c44e6..a4cbb02af 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vluxseg2ei16.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vluxseg2ei16.c @@ -6,772 +6,772 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f16mf4x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_tu(vfloat16mf4x2_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f16mf4x2_tu(vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f16mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_tu(vfloat16mf2x2_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f16mf2x2_tu(vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f16m1x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_tu(vfloat16m1x2_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f16m1x2_tu(vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f16m2x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_tu(vfloat16m2x2_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f16m2x2_tu(vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f16m4x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_tu(vfloat16m4x2_t vd, const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f16m4x2_tu(vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f32mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_tu(vfloat32mf2x2_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f32mf2x2_tu(vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f32m1x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_tu(vfloat32m1x2_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f32m1x2_tu(vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f32m2x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_tu(vfloat32m2x2_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f32m2x2_tu(vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f32m4x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_tu(vfloat32m4x2_t vd, const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f32m4x2_tu(vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f64m1x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_tu(vfloat64m1x2_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f64m1x2_tu(vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f64m2x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_tu(vfloat64m2x2_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f64m2x2_tu(vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f64m4x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_tu(vfloat64m4x2_t vd, const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f64m4x2_tu(vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i8mf8x2_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_tu(vint8mf8x2_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i8mf8x2_tu(vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i8mf4x2_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_tu(vint8mf4x2_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i8mf4x2_tu(vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i8mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_tu(vint8mf2x2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i8mf2x2_tu(vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i8m1x2_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_tu(vint8m1x2_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i8m1x2_tu(vd, rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i8m2x2_tu(maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_tu(vint8m2x2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i8m2x2_tu(vd, rs1, rs2, vl); } -vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i8m4x2_tu(maskedoff_tuple, base, bindex, vl); +vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_tu(vint8m4x2_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i8m4x2_tu(vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i16mf4x2_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_tu(vint16mf4x2_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i16mf4x2_tu(vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i16mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_tu(vint16mf2x2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i16mf2x2_tu(vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i16m1x2_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_tu(vint16m1x2_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i16m1x2_tu(vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i16m2x2_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_tu(vint16m2x2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i16m2x2_tu(vd, rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i16m4x2_tu(maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_tu(vint16m4x2_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i16m4x2_tu(vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i32mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_tu(vint32mf2x2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i32mf2x2_tu(vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i32m1x2_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_tu(vint32m1x2_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i32m1x2_tu(vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i32m2x2_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_tu(vint32m2x2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i32m2x2_tu(vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i32m4x2_tu(maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_tu(vint32m4x2_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i32m4x2_tu(vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i64m1x2_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_tu(vint64m1x2_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i64m1x2_tu(vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i64m2x2_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_tu(vint64m2x2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i64m2x2_tu(vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i64m4x2_tu(maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_tu(vint64m4x2_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i64m4x2_tu(vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u8mf8x2_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_tu(vuint8mf8x2_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u8mf8x2_tu(vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u8mf4x2_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_tu(vuint8mf4x2_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u8mf4x2_tu(vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u8mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_tu(vuint8mf2x2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u8mf2x2_tu(vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u8m1x2_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_tu(vuint8m1x2_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u8m1x2_tu(vd, rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u8m2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_tu(vuint8m2x2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u8m2x2_tu(vd, rs1, rs2, vl); } -vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u8m4x2_tu(maskedoff_tuple, base, bindex, vl); +vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_tu(vuint8m4x2_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u8m4x2_tu(vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u16mf4x2_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_tu(vuint16mf4x2_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u16mf4x2_tu(vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u16mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_tu(vuint16mf2x2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u16mf2x2_tu(vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u16m1x2_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_tu(vuint16m1x2_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u16m1x2_tu(vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u16m2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_tu(vuint16m2x2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u16m2x2_tu(vd, rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u16m4x2_tu(maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_tu(vuint16m4x2_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u16m4x2_tu(vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u32mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_tu(vuint32mf2x2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u32mf2x2_tu(vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u32m1x2_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_tu(vuint32m1x2_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u32m1x2_tu(vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u32m2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_tu(vuint32m2x2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u32m2x2_tu(vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u32m4x2_tu(maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_tu(vuint32m4x2_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u32m4x2_tu(vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u64m1x2_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_tu(vuint64m1x2_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u64m1x2_tu(vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u64m2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_tu(vuint64m2x2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u64m2x2_tu(vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u64m4x2_tu(maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_tu(vuint64m4x2_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u64m4x2_tu(vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f16mf4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_tum(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f16mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_tum(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f16m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_tum(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f16m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f16m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_tum(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f16m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f16m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_tum(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f16m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f32mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_tum(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f32m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_tum(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f32m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f32m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_tum(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f32m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f32m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_tum(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f32m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f64m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_tum(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f64m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f64m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_tum(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f64m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f64m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_tum(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f64m4x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i8mf8x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_tum(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i8mf8x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i8mf4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_tum(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i8mf4x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i8mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_tum(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i8mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i8m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_tum(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i8m1x2_tum(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i8m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_tum(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i8m2x2_tum(vm, vd, rs1, rs2, vl); } -vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i8m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_tum(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i8m4x2_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i16mf4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_tum(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i16mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_tum(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i16m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_tum(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i16m1x2_tum(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i16m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_tum(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i16m2x2_tum(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i16m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_tum(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i16m4x2_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i32mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_tum(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i32m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_tum(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i32m1x2_tum(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i32m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_tum(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i32m2x2_tum(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i32m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_tum(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i32m4x2_tum(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i64m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_tum(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i64m1x2_tum(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i64m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_tum(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i64m2x2_tum(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i64m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_tum(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i64m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u8mf8x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_tum(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u8mf8x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u8mf4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_tum(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u8mf4x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u8mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_tum(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u8mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u8m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_tum(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u8m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u8m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_tum(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u8m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u8m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_tum(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u8m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u16mf4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_tum(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u16mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_tum(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u16m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_tum(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u16m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u16m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_tum(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u16m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u16m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_tum(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u16m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u32mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_tum(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u32m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_tum(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u32m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u32m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_tum(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u32m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u32m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_tum(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u32m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u64m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_tum(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u64m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u64m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_tum(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u64m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u64m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_tum(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u64m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f16mf4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_tumu(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f16mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_tumu(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f16m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_tumu(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f16m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_tumu(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f16m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_tumu(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f16m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f32mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_tumu(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f32m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_tumu(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f32m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_tumu(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f32m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_tumu(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f64m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_tumu(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f64m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_tumu(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f64m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_tumu(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i8mf8x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_tumu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i8mf8x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i8mf4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_tumu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i8mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i8mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_tumu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i8mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i8m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_tumu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i8m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i8m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_tumu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i8m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i8m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_tumu(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i8m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i16mf4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_tumu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i16mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_tumu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i16m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_tumu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i16m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_tumu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i16m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_tumu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i16m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i32mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_tumu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i32m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_tumu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i32m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_tumu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i32m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_tumu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i64m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_tumu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i64m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_tumu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i64m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_tumu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u8mf8x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_tumu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u8mf8x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u8mf4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_tumu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u8mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u8mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_tumu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u8mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u8m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_tumu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u8m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u8m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_tumu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u8m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u8m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_tumu(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u8m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u16mf4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_tumu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u16mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_tumu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u16m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_tumu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u16m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_tumu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u16m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_tumu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u16m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u32mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_tumu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u32m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_tumu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u32m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_tumu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u32m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_tumu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u64m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_tumu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u64m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_tumu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u64m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_tumu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f16mf4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_mu(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f16mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_mu(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f16m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_mu(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f16m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f16m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_mu(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f16m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f16m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_mu(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f16m4x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f32mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_mu(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f32m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_mu(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f32m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f32m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_mu(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f32m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f32m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_mu(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f32m4x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f64m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_mu(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f64m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f64m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_mu(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f64m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_f64m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_mu(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_f64m4x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i8mf8x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_mu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i8mf8x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i8mf4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_mu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i8mf4x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i8mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_mu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i8mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i8m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_mu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i8m1x2_mu(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i8m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_mu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i8m2x2_mu(vm, vd, rs1, rs2, vl); } -vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i8m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_mu(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i8m4x2_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i16mf4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_mu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i16mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_mu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i16m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_mu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i16m1x2_mu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i16m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_mu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i16m2x2_mu(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i16m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_mu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i16m4x2_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i32mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_mu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i32m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_mu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i32m1x2_mu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i32m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_mu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i32m2x2_mu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i32m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_mu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i32m4x2_mu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i64m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_mu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i64m1x2_mu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i64m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_mu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i64m2x2_mu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_i64m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_mu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_i64m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u8mf8x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_mu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u8mf8x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u8mf4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_mu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u8mf4x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u8mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_mu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u8mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u8m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_mu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u8m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u8m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_mu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u8m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u8m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_mu(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u8m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u16mf4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_mu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u16mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_mu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u16m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_mu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u16m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u16m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_mu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u16m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u16m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_mu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u16m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u32mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_mu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u32m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_mu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u32m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u32m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_mu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u32m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u32m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_mu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u32m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u64m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_mu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u64m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u64m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_mu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u64m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_v_u64m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_mu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_u64m4x2_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg2ei16\.[ivxfswum.]+\s+} 192 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vluxseg2ei32.c b/auto-generated/policy_funcs/gnu-api-tests/vluxseg2ei32.c index a885a59b8..d6bb2c640 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vluxseg2ei32.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vluxseg2ei32.c @@ -6,740 +6,740 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f16mf4x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_tu(vfloat16mf4x2_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f16mf4x2_tu(vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f16mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_tu(vfloat16mf2x2_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f16mf2x2_tu(vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f16m1x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_tu(vfloat16m1x2_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f16m1x2_tu(vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f16m2x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_tu(vfloat16m2x2_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f16m2x2_tu(vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f16m4x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_tu(vfloat16m4x2_t vd, const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f16m4x2_tu(vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f32mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_tu(vfloat32mf2x2_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f32mf2x2_tu(vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f32m1x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_tu(vfloat32m1x2_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f32m1x2_tu(vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f32m2x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_tu(vfloat32m2x2_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f32m2x2_tu(vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f32m4x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_tu(vfloat32m4x2_t vd, const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f32m4x2_tu(vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f64m1x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_tu(vfloat64m1x2_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f64m1x2_tu(vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f64m2x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_tu(vfloat64m2x2_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f64m2x2_tu(vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f64m4x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_tu(vfloat64m4x2_t vd, const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f64m4x2_tu(vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i8mf8x2_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_tu(vint8mf8x2_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i8mf8x2_tu(vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i8mf4x2_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_tu(vint8mf4x2_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i8mf4x2_tu(vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i8mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_tu(vint8mf2x2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i8mf2x2_tu(vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i8m1x2_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_tu(vint8m1x2_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i8m1x2_tu(vd, rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i8m2x2_tu(maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_tu(vint8m2x2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i8m2x2_tu(vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i16mf4x2_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_tu(vint16mf4x2_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i16mf4x2_tu(vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i16mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_tu(vint16mf2x2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i16mf2x2_tu(vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i16m1x2_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_tu(vint16m1x2_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i16m1x2_tu(vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i16m2x2_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_tu(vint16m2x2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i16m2x2_tu(vd, rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i16m4x2_tu(maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_tu(vint16m4x2_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i16m4x2_tu(vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i32mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_tu(vint32mf2x2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i32mf2x2_tu(vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i32m1x2_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_tu(vint32m1x2_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i32m1x2_tu(vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i32m2x2_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_tu(vint32m2x2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i32m2x2_tu(vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i32m4x2_tu(maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_tu(vint32m4x2_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i32m4x2_tu(vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i64m1x2_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_tu(vint64m1x2_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i64m1x2_tu(vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i64m2x2_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_tu(vint64m2x2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i64m2x2_tu(vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i64m4x2_tu(maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_tu(vint64m4x2_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i64m4x2_tu(vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u8mf8x2_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_tu(vuint8mf8x2_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u8mf8x2_tu(vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u8mf4x2_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_tu(vuint8mf4x2_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u8mf4x2_tu(vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u8mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_tu(vuint8mf2x2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u8mf2x2_tu(vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u8m1x2_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_tu(vuint8m1x2_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u8m1x2_tu(vd, rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u8m2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_tu(vuint8m2x2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u8m2x2_tu(vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u16mf4x2_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_tu(vuint16mf4x2_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u16mf4x2_tu(vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u16mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_tu(vuint16mf2x2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u16mf2x2_tu(vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u16m1x2_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_tu(vuint16m1x2_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u16m1x2_tu(vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u16m2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_tu(vuint16m2x2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u16m2x2_tu(vd, rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u16m4x2_tu(maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_tu(vuint16m4x2_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u16m4x2_tu(vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u32mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_tu(vuint32mf2x2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u32mf2x2_tu(vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u32m1x2_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_tu(vuint32m1x2_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u32m1x2_tu(vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u32m2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_tu(vuint32m2x2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u32m2x2_tu(vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u32m4x2_tu(maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_tu(vuint32m4x2_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u32m4x2_tu(vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u64m1x2_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_tu(vuint64m1x2_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u64m1x2_tu(vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u64m2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_tu(vuint64m2x2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u64m2x2_tu(vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u64m4x2_tu(maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_tu(vuint64m4x2_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u64m4x2_tu(vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f16mf4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_tum(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f16mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_tum(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f16m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_tum(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f16m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f16m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_tum(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f16m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f16m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_tum(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f16m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f32mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_tum(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f32m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_tum(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f32m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f32m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_tum(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f32m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f32m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_tum(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f32m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f64m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_tum(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f64m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f64m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_tum(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f64m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f64m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_tum(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f64m4x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i8mf8x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_tum(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i8mf8x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i8mf4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_tum(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i8mf4x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i8mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_tum(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i8mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i8m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_tum(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i8m1x2_tum(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i8m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_tum(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i8m2x2_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i16mf4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_tum(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i16mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_tum(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i16m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_tum(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i16m1x2_tum(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i16m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_tum(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i16m2x2_tum(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i16m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_tum(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i16m4x2_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i32mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_tum(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i32m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_tum(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i32m1x2_tum(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i32m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_tum(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i32m2x2_tum(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i32m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_tum(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i32m4x2_tum(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i64m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_tum(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i64m1x2_tum(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i64m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_tum(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i64m2x2_tum(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i64m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_tum(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i64m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u8mf8x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_tum(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u8mf8x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u8mf4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_tum(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u8mf4x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u8mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_tum(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u8mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u8m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_tum(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u8m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u8m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_tum(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u8m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u16mf4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_tum(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u16mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_tum(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u16m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_tum(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u16m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u16m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_tum(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u16m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u16m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_tum(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u16m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u32mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_tum(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u32m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_tum(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u32m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u32m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_tum(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u32m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u32m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_tum(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u32m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u64m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_tum(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u64m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u64m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_tum(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u64m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u64m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_tum(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u64m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f16mf4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_tumu(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f16mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_tumu(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f16m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_tumu(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f16m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_tumu(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f16m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_tumu(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f16m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f32mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_tumu(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f32m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_tumu(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f32m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_tumu(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f32m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_tumu(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f64m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_tumu(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f64m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_tumu(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f64m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_tumu(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i8mf8x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_tumu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i8mf8x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i8mf4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_tumu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i8mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i8mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_tumu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i8mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i8m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_tumu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i8m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i8m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_tumu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i8m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i16mf4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_tumu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i16mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_tumu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i16m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_tumu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i16m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_tumu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i16m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_tumu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i16m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i32mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_tumu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i32m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_tumu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i32m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_tumu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i32m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_tumu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i64m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_tumu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i64m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_tumu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i64m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_tumu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u8mf8x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_tumu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u8mf8x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u8mf4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_tumu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u8mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u8mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_tumu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u8mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u8m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_tumu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u8m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u8m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_tumu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u8m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u16mf4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_tumu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u16mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_tumu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u16m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_tumu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u16m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_tumu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u16m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_tumu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u16m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u32mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_tumu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u32m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_tumu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u32m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_tumu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u32m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_tumu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u64m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_tumu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u64m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_tumu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u64m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_tumu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f16mf4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_mu(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f16mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_mu(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f16m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_mu(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f16m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f16m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_mu(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f16m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f16m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_mu(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f16m4x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f32mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_mu(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f32m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_mu(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f32m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f32m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_mu(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f32m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f32m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_mu(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f32m4x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f64m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_mu(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f64m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f64m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_mu(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f64m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_f64m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_mu(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_f64m4x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i8mf8x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_mu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i8mf8x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i8mf4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_mu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i8mf4x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i8mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_mu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i8mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i8m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_mu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i8m1x2_mu(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i8m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_mu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i8m2x2_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i16mf4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_mu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i16mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_mu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i16m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_mu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i16m1x2_mu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i16m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_mu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i16m2x2_mu(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i16m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_mu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i16m4x2_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i32mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_mu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i32m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_mu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i32m1x2_mu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i32m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_mu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i32m2x2_mu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i32m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_mu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i32m4x2_mu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i64m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_mu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i64m1x2_mu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i64m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_mu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i64m2x2_mu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_i64m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_mu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_i64m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u8mf8x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_mu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u8mf8x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u8mf4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_mu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u8mf4x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u8mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_mu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u8mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u8m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_mu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u8m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u8m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_mu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u8m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u16mf4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_mu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u16mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_mu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u16m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_mu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u16m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u16m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_mu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u16m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u16m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_mu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u16m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u32mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_mu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u32m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_mu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u32m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u32m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_mu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u32m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u32m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_mu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u32m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u64m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_mu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u64m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u64m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_mu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u64m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_v_u64m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_mu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_v_u64m4x2_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg2ei32\.[ivxfswum.]+\s+} 184 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vluxseg2ei64.c b/auto-generated/policy_funcs/gnu-api-tests/vluxseg2ei64.c index c9f58b6eb..bf7e2e96a 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vluxseg2ei64.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vluxseg2ei64.c @@ -6,660 +6,660 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f16mf4x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_tu(vfloat16mf4x2_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f16mf4x2_tu(vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f16mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_tu(vfloat16mf2x2_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f16mf2x2_tu(vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f16m1x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_tu(vfloat16m1x2_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f16m1x2_tu(vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f16m2x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_tu(vfloat16m2x2_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f16m2x2_tu(vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f32mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_tu(vfloat32mf2x2_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f32mf2x2_tu(vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f32m1x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_tu(vfloat32m1x2_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f32m1x2_tu(vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f32m2x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_tu(vfloat32m2x2_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f32m2x2_tu(vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f32m4x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_tu(vfloat32m4x2_t vd, const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f32m4x2_tu(vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f64m1x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_tu(vfloat64m1x2_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f64m1x2_tu(vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f64m2x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_tu(vfloat64m2x2_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f64m2x2_tu(vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f64m4x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_tu(vfloat64m4x2_t vd, const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f64m4x2_tu(vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i8mf8x2_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_tu(vint8mf8x2_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i8mf8x2_tu(vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i8mf4x2_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_tu(vint8mf4x2_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i8mf4x2_tu(vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i8mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_tu(vint8mf2x2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i8mf2x2_tu(vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i8m1x2_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_tu(vint8m1x2_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i8m1x2_tu(vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i16mf4x2_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_tu(vint16mf4x2_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i16mf4x2_tu(vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i16mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_tu(vint16mf2x2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i16mf2x2_tu(vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i16m1x2_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_tu(vint16m1x2_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i16m1x2_tu(vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i16m2x2_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_tu(vint16m2x2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i16m2x2_tu(vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i32mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_tu(vint32mf2x2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i32mf2x2_tu(vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i32m1x2_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_tu(vint32m1x2_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i32m1x2_tu(vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i32m2x2_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_tu(vint32m2x2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i32m2x2_tu(vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i32m4x2_tu(maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_tu(vint32m4x2_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i32m4x2_tu(vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i64m1x2_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_tu(vint64m1x2_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i64m1x2_tu(vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i64m2x2_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_tu(vint64m2x2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i64m2x2_tu(vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i64m4x2_tu(maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_tu(vint64m4x2_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i64m4x2_tu(vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u8mf8x2_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_tu(vuint8mf8x2_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u8mf8x2_tu(vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u8mf4x2_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_tu(vuint8mf4x2_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u8mf4x2_tu(vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u8mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_tu(vuint8mf2x2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u8mf2x2_tu(vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u8m1x2_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_tu(vuint8m1x2_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u8m1x2_tu(vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u16mf4x2_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_tu(vuint16mf4x2_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u16mf4x2_tu(vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u16mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_tu(vuint16mf2x2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u16mf2x2_tu(vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u16m1x2_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_tu(vuint16m1x2_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u16m1x2_tu(vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u16m2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_tu(vuint16m2x2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u16m2x2_tu(vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u32mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_tu(vuint32mf2x2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u32mf2x2_tu(vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u32m1x2_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_tu(vuint32m1x2_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u32m1x2_tu(vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u32m2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_tu(vuint32m2x2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u32m2x2_tu(vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u32m4x2_tu(maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_tu(vuint32m4x2_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u32m4x2_tu(vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u64m1x2_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_tu(vuint64m1x2_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u64m1x2_tu(vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u64m2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_tu(vuint64m2x2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u64m2x2_tu(vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u64m4x2_tu(maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_tu(vuint64m4x2_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u64m4x2_tu(vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f16mf4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_tum(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f16mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_tum(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f16m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_tum(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f16m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f16m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_tum(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f16m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f32mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_tum(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f32m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_tum(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f32m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f32m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_tum(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f32m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f32m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_tum(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f32m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f64m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_tum(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f64m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f64m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_tum(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f64m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f64m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_tum(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f64m4x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i8mf8x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_tum(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i8mf8x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i8mf4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_tum(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i8mf4x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i8mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_tum(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i8mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i8m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_tum(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i8m1x2_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i16mf4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_tum(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i16mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_tum(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i16m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_tum(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i16m1x2_tum(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i16m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_tum(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i16m2x2_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i32mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_tum(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i32m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_tum(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i32m1x2_tum(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i32m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_tum(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i32m2x2_tum(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i32m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_tum(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i32m4x2_tum(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i64m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_tum(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i64m1x2_tum(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i64m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_tum(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i64m2x2_tum(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i64m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_tum(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i64m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u8mf8x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_tum(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u8mf8x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u8mf4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_tum(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u8mf4x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u8mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_tum(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u8mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u8m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_tum(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u8m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u16mf4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_tum(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u16mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_tum(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u16m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_tum(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u16m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u16m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_tum(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u16m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u32mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_tum(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u32m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_tum(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u32m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u32m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_tum(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u32m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u32m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_tum(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u32m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u64m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_tum(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u64m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u64m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_tum(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u64m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u64m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_tum(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u64m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f16mf4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_tumu(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f16mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_tumu(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f16m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_tumu(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f16m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_tumu(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f32mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_tumu(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f32m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_tumu(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f32m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_tumu(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f32m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_tumu(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f64m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_tumu(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f64m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_tumu(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f64m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_tumu(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i8mf8x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_tumu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i8mf8x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i8mf4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_tumu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i8mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i8mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_tumu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i8mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i8m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_tumu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i8m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i16mf4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_tumu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i16mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_tumu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i16m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_tumu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i16m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_tumu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i32mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_tumu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i32m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_tumu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i32m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_tumu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i32m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_tumu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i64m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_tumu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i64m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_tumu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i64m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_tumu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u8mf8x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_tumu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u8mf8x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u8mf4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_tumu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u8mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u8mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_tumu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u8mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u8m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_tumu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u8m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u16mf4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_tumu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u16mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_tumu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u16m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_tumu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u16m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_tumu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u32mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_tumu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u32m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_tumu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u32m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_tumu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u32m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_tumu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u64m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_tumu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u64m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_tumu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u64m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_tumu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f16mf4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_mu(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f16mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_mu(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f16m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_mu(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f16m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f16m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_mu(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f16m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f32mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_mu(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f32m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_mu(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f32m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f32m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_mu(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f32m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f32m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_mu(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f32m4x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f64m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_mu(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f64m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f64m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_mu(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f64m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_f64m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_mu(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_f64m4x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i8mf8x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_mu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i8mf8x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i8mf4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_mu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i8mf4x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i8mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_mu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i8mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i8m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_mu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i8m1x2_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i16mf4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_mu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i16mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_mu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i16m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_mu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i16m1x2_mu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i16m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_mu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i16m2x2_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i32mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_mu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i32m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_mu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i32m1x2_mu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i32m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_mu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i32m2x2_mu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i32m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_mu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i32m4x2_mu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i64m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_mu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i64m1x2_mu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i64m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_mu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i64m2x2_mu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_i64m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_mu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_i64m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u8mf8x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_mu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u8mf8x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u8mf4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_mu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u8mf4x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u8mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_mu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u8mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u8m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_mu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u8m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u16mf4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_mu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u16mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_mu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u16m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_mu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u16m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u16m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_mu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u16m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u32mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_mu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u32m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_mu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u32m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u32m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_mu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u32m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u32m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_mu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u32m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u64m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_mu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u64m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u64m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_mu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u64m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_v_u64m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_mu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_v_u64m4x2_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg2ei64\.[ivxfswum.]+\s+} 164 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vluxseg2ei8.c b/auto-generated/policy_funcs/gnu-api-tests/vluxseg2ei8.c index f4f97b029..8cfafe5ef 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vluxseg2ei8.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vluxseg2ei8.c @@ -6,772 +6,772 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f16mf4x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_tu(vfloat16mf4x2_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f16mf4x2_tu(vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f16mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_tu(vfloat16mf2x2_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f16mf2x2_tu(vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f16m1x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_tu(vfloat16m1x2_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f16m1x2_tu(vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f16m2x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_tu(vfloat16m2x2_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f16m2x2_tu(vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f16m4x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_tu(vfloat16m4x2_t vd, const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f16m4x2_tu(vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f32mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_tu(vfloat32mf2x2_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f32mf2x2_tu(vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f32m1x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_tu(vfloat32m1x2_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f32m1x2_tu(vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f32m2x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_tu(vfloat32m2x2_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f32m2x2_tu(vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f32m4x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_tu(vfloat32m4x2_t vd, const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f32m4x2_tu(vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f64m1x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_tu(vfloat64m1x2_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f64m1x2_tu(vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f64m2x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_tu(vfloat64m2x2_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f64m2x2_tu(vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f64m4x2_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_tu(vfloat64m4x2_t vd, const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f64m4x2_tu(vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i8mf8x2_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_tu(vint8mf8x2_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i8mf8x2_tu(vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i8mf4x2_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_tu(vint8mf4x2_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i8mf4x2_tu(vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i8mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_tu(vint8mf2x2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i8mf2x2_tu(vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i8m1x2_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_tu(vint8m1x2_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i8m1x2_tu(vd, rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i8m2x2_tu(maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_tu(vint8m2x2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i8m2x2_tu(vd, rs1, rs2, vl); } -vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i8m4x2_tu(maskedoff_tuple, base, bindex, vl); +vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_tu(vint8m4x2_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i8m4x2_tu(vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i16mf4x2_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_tu(vint16mf4x2_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i16mf4x2_tu(vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i16mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_tu(vint16mf2x2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i16mf2x2_tu(vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i16m1x2_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_tu(vint16m1x2_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i16m1x2_tu(vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i16m2x2_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_tu(vint16m2x2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i16m2x2_tu(vd, rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i16m4x2_tu(maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_tu(vint16m4x2_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i16m4x2_tu(vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i32mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_tu(vint32mf2x2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i32mf2x2_tu(vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i32m1x2_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_tu(vint32m1x2_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i32m1x2_tu(vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i32m2x2_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_tu(vint32m2x2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i32m2x2_tu(vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i32m4x2_tu(maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_tu(vint32m4x2_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i32m4x2_tu(vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i64m1x2_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_tu(vint64m1x2_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i64m1x2_tu(vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i64m2x2_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_tu(vint64m2x2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i64m2x2_tu(vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i64m4x2_tu(maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_tu(vint64m4x2_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i64m4x2_tu(vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u8mf8x2_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_tu(vuint8mf8x2_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u8mf8x2_tu(vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u8mf4x2_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_tu(vuint8mf4x2_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u8mf4x2_tu(vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u8mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_tu(vuint8mf2x2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u8mf2x2_tu(vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u8m1x2_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_tu(vuint8m1x2_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u8m1x2_tu(vd, rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u8m2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_tu(vuint8m2x2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u8m2x2_tu(vd, rs1, rs2, vl); } -vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u8m4x2_tu(maskedoff_tuple, base, bindex, vl); +vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_tu(vuint8m4x2_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u8m4x2_tu(vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u16mf4x2_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_tu(vuint16mf4x2_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u16mf4x2_tu(vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u16mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_tu(vuint16mf2x2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u16mf2x2_tu(vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u16m1x2_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_tu(vuint16m1x2_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u16m1x2_tu(vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u16m2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_tu(vuint16m2x2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u16m2x2_tu(vd, rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u16m4x2_tu(maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_tu(vuint16m4x2_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u16m4x2_tu(vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u32mf2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_tu(vuint32mf2x2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u32mf2x2_tu(vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u32m1x2_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_tu(vuint32m1x2_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u32m1x2_tu(vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u32m2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_tu(vuint32m2x2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u32m2x2_tu(vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u32m4x2_tu(maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_tu(vuint32m4x2_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u32m4x2_tu(vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u64m1x2_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_tu(vuint64m1x2_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u64m1x2_tu(vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u64m2x2_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_tu(vuint64m2x2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u64m2x2_tu(vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u64m4x2_tu(maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_tu(vuint64m4x2_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u64m4x2_tu(vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f16mf4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_tum(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f16mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_tum(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f16m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_tum(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f16m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f16m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_tum(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f16m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f16m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_tum(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f16m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f32mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_tum(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f32m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_tum(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f32m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f32m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_tum(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f32m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f32m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_tum(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f32m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f64m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_tum(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f64m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f64m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_tum(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f64m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f64m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_tum(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f64m4x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i8mf8x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_tum(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i8mf8x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i8mf4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_tum(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i8mf4x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i8mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_tum(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i8mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i8m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_tum(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i8m1x2_tum(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i8m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_tum(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i8m2x2_tum(vm, vd, rs1, rs2, vl); } -vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i8m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_tum(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i8m4x2_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i16mf4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_tum(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i16mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_tum(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i16m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_tum(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i16m1x2_tum(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i16m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_tum(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i16m2x2_tum(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i16m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_tum(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i16m4x2_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i32mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_tum(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i32m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_tum(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i32m1x2_tum(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i32m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_tum(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i32m2x2_tum(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i32m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_tum(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i32m4x2_tum(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i64m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_tum(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i64m1x2_tum(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i64m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_tum(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i64m2x2_tum(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i64m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_tum(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i64m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u8mf8x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_tum(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u8mf8x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u8mf4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_tum(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u8mf4x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u8mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_tum(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u8mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u8m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_tum(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u8m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u8m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_tum(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u8m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u8m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_tum(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u8m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u16mf4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_tum(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u16mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_tum(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u16m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_tum(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u16m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u16m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_tum(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u16m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u16m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_tum(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u16m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u32mf2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_tum(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u32m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_tum(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u32m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u32m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_tum(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u32m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u32m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_tum(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u32m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u64m1x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_tum(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u64m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u64m2x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_tum(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u64m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u64m4x2_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_tum(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u64m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f16mf4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_tumu(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f16mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_tumu(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f16m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_tumu(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f16m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_tumu(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f16m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_tumu(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f16m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f32mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_tumu(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f32m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_tumu(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f32m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_tumu(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f32m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_tumu(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f64m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_tumu(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f64m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_tumu(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f64m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_tumu(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i8mf8x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_tumu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i8mf8x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i8mf4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_tumu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i8mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i8mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_tumu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i8mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i8m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_tumu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i8m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i8m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_tumu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i8m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i8m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_tumu(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i8m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i16mf4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_tumu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i16mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_tumu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i16m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_tumu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i16m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_tumu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i16m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_tumu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i16m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i32mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_tumu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i32m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_tumu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i32m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_tumu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i32m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_tumu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i64m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_tumu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i64m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_tumu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i64m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_tumu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u8mf8x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_tumu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u8mf8x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u8mf4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_tumu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u8mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u8mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_tumu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u8mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u8m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_tumu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u8m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u8m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_tumu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u8m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u8m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_tumu(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u8m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u16mf4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_tumu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u16mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_tumu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u16m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_tumu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u16m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_tumu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u16m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_tumu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u16m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u32mf2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_tumu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u32m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_tumu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u32m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_tumu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u32m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_tumu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u64m1x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_tumu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u64m2x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_tumu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u64m4x2_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_tumu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f16mf4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_mu(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f16mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_mu(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f16m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_mu(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f16m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f16m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_mu(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f16m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f16m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_mu(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f16m4x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f32mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_mu(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f32m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_mu(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f32m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f32m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_mu(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f32m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f32m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_mu(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f32m4x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f64m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_mu(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f64m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f64m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_mu(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f64m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_f64m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_mu(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_f64m4x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i8mf8x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_mu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i8mf8x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i8mf4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_mu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i8mf4x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i8mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_mu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i8mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i8m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_mu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i8m1x2_mu(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i8m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_mu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i8m2x2_mu(vm, vd, rs1, rs2, vl); } -vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i8m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_mu(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i8m4x2_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i16mf4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_mu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i16mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_mu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i16m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_mu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i16m1x2_mu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i16m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_mu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i16m2x2_mu(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i16m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_mu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i16m4x2_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i32mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_mu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i32m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_mu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i32m1x2_mu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i32m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_mu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i32m2x2_mu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i32m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_mu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i32m4x2_mu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i64m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_mu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i64m1x2_mu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i64m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_mu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i64m2x2_mu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_i64m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_mu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_i64m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u8mf8x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_mu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u8mf8x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u8mf4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_mu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u8mf4x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u8mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_mu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u8mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u8m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_mu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u8m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u8m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_mu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u8m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u8m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_mu(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u8m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u16mf4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_mu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u16mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_mu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u16m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_mu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u16m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u16m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_mu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u16m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u16m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_mu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u16m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u32mf2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_mu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u32m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_mu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u32m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u32m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_mu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u32m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u32m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_mu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u32m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u64m1x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_mu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u64m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u64m2x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_mu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u64m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_v_u64m4x2_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_mu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_v_u64m4x2_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg2ei8\.[ivxfswum.]+\s+} 192 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vluxseg3ei16.c b/auto-generated/policy_funcs/gnu-api-tests/vluxseg3ei16.c index 0d37d6ea4..d744c7b40 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vluxseg3ei16.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vluxseg3ei16.c @@ -6,596 +6,596 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f16mf4x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_tu(vfloat16mf4x3_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f16mf4x3_tu(vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f16mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_tu(vfloat16mf2x3_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f16mf2x3_tu(vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f16m1x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_tu(vfloat16m1x3_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f16m1x3_tu(vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f16m2x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_tu(vfloat16m2x3_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f16m2x3_tu(vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f32mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_tu(vfloat32mf2x3_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f32mf2x3_tu(vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f32m1x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_tu(vfloat32m1x3_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f32m1x3_tu(vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f32m2x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_tu(vfloat32m2x3_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f32m2x3_tu(vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f64m1x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_tu(vfloat64m1x3_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f64m1x3_tu(vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f64m2x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_tu(vfloat64m2x3_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f64m2x3_tu(vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i8mf8x3_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_tu(vint8mf8x3_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i8mf8x3_tu(vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i8mf4x3_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_tu(vint8mf4x3_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i8mf4x3_tu(vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i8mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_tu(vint8mf2x3_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i8mf2x3_tu(vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i8m1x3_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_tu(vint8m1x3_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i8m1x3_tu(vd, rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i8m2x3_tu(maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_tu(vint8m2x3_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i8m2x3_tu(vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i16mf4x3_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_tu(vint16mf4x3_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i16mf4x3_tu(vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i16mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_tu(vint16mf2x3_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i16mf2x3_tu(vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i16m1x3_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_tu(vint16m1x3_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i16m1x3_tu(vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i16m2x3_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_tu(vint16m2x3_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i16m2x3_tu(vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i32mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_tu(vint32mf2x3_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i32mf2x3_tu(vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i32m1x3_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_tu(vint32m1x3_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i32m1x3_tu(vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i32m2x3_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_tu(vint32m2x3_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i32m2x3_tu(vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i64m1x3_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_tu(vint64m1x3_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i64m1x3_tu(vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i64m2x3_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_tu(vint64m2x3_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i64m2x3_tu(vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u8mf8x3_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_tu(vuint8mf8x3_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u8mf8x3_tu(vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u8mf4x3_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_tu(vuint8mf4x3_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u8mf4x3_tu(vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u8mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_tu(vuint8mf2x3_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u8mf2x3_tu(vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u8m1x3_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_tu(vuint8m1x3_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u8m1x3_tu(vd, rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u8m2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_tu(vuint8m2x3_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u8m2x3_tu(vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u16mf4x3_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_tu(vuint16mf4x3_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u16mf4x3_tu(vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u16mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_tu(vuint16mf2x3_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u16mf2x3_tu(vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u16m1x3_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_tu(vuint16m1x3_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u16m1x3_tu(vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u16m2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_tu(vuint16m2x3_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u16m2x3_tu(vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u32mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_tu(vuint32mf2x3_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u32mf2x3_tu(vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u32m1x3_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_tu(vuint32m1x3_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u32m1x3_tu(vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u32m2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_tu(vuint32m2x3_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u32m2x3_tu(vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u64m1x3_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_tu(vuint64m1x3_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u64m1x3_tu(vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u64m2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_tu(vuint64m2x3_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u64m2x3_tu(vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f16mf4x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_tum(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f16mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_tum(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f16m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_tum(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f16m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f16m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_tum(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f16m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f32mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_tum(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f32m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_tum(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f32m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f32m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_tum(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f32m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f64m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_tum(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f64m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f64m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_tum(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f64m2x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i8mf8x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_tum(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i8mf8x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i8mf4x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_tum(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i8mf4x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i8mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_tum(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i8mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i8m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_tum(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i8m1x3_tum(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i8m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_tum(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i8m2x3_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i16mf4x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_tum(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i16mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_tum(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i16m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_tum(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i16m1x3_tum(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i16m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_tum(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i16m2x3_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i32mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_tum(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i32m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_tum(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i32m1x3_tum(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i32m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_tum(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i32m2x3_tum(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i64m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_tum(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i64m1x3_tum(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i64m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_tum(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i64m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u8mf8x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_tum(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u8mf8x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u8mf4x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_tum(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u8mf4x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u8mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_tum(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u8mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u8m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_tum(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u8m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u8m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_tum(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u8m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u16mf4x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_tum(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u16mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_tum(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u16m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_tum(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u16m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u16m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_tum(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u16m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u32mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_tum(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u32m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_tum(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u32m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u32m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_tum(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u32m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u64m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_tum(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u64m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u64m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_tum(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u64m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f16mf4x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_tumu(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f16mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_tumu(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f16m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_tumu(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f16m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_tumu(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f32mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_tumu(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f32m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_tumu(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f32m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_tumu(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f64m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_tumu(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f64m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_tumu(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i8mf8x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_tumu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i8mf8x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i8mf4x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_tumu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i8mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i8mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_tumu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i8mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i8m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_tumu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i8m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i8m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_tumu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i8m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i16mf4x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_tumu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i16mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_tumu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i16m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_tumu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i16m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_tumu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i32mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_tumu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i32m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_tumu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i32m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_tumu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i64m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_tumu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i64m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_tumu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u8mf8x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_tumu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u8mf8x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u8mf4x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_tumu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u8mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u8mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_tumu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u8mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u8m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_tumu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u8m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u8m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_tumu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u8m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u16mf4x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_tumu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u16mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_tumu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u16m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_tumu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u16m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_tumu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u32mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_tumu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u32m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_tumu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u32m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_tumu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u64m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_tumu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u64m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_tumu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f16mf4x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_mu(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f16mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_mu(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f16m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_mu(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f16m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f16m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_mu(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f16m2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f32mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_mu(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f32m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_mu(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f32m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f32m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_mu(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f32m2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f64m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_mu(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f64m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_f64m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_mu(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_f64m2x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i8mf8x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_mu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i8mf8x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i8mf4x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_mu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i8mf4x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i8mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_mu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i8mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i8m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_mu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i8m1x3_mu(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i8m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_mu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i8m2x3_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i16mf4x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_mu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i16mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_mu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i16m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_mu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i16m1x3_mu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i16m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_mu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i16m2x3_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i32mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_mu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i32m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_mu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i32m1x3_mu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i32m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_mu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i32m2x3_mu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i64m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_mu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i64m1x3_mu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_i64m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_mu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_i64m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u8mf8x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_mu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u8mf8x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u8mf4x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_mu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u8mf4x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u8mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_mu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u8mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u8m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_mu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u8m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u8m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_mu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u8m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u16mf4x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_mu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u16mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_mu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u16m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_mu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u16m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u16m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_mu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u16m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u32mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_mu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u32m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_mu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u32m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u32m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_mu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u32m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u64m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_mu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u64m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_v_u64m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_mu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_u64m2x3_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg3ei16\.[ivxfswum.]+\s+} 148 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vluxseg3ei32.c b/auto-generated/policy_funcs/gnu-api-tests/vluxseg3ei32.c index 2bc9865a8..2500b3175 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vluxseg3ei32.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vluxseg3ei32.c @@ -6,596 +6,596 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f16mf4x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_tu(vfloat16mf4x3_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f16mf4x3_tu(vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f16mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_tu(vfloat16mf2x3_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f16mf2x3_tu(vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f16m1x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_tu(vfloat16m1x3_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f16m1x3_tu(vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f16m2x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_tu(vfloat16m2x3_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f16m2x3_tu(vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f32mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_tu(vfloat32mf2x3_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f32mf2x3_tu(vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f32m1x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_tu(vfloat32m1x3_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f32m1x3_tu(vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f32m2x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_tu(vfloat32m2x3_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f32m2x3_tu(vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f64m1x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_tu(vfloat64m1x3_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f64m1x3_tu(vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f64m2x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_tu(vfloat64m2x3_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f64m2x3_tu(vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i8mf8x3_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_tu(vint8mf8x3_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i8mf8x3_tu(vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i8mf4x3_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_tu(vint8mf4x3_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i8mf4x3_tu(vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i8mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_tu(vint8mf2x3_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i8mf2x3_tu(vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i8m1x3_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_tu(vint8m1x3_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i8m1x3_tu(vd, rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i8m2x3_tu(maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_tu(vint8m2x3_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i8m2x3_tu(vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i16mf4x3_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_tu(vint16mf4x3_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i16mf4x3_tu(vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i16mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_tu(vint16mf2x3_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i16mf2x3_tu(vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i16m1x3_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_tu(vint16m1x3_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i16m1x3_tu(vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i16m2x3_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_tu(vint16m2x3_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i16m2x3_tu(vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i32mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_tu(vint32mf2x3_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i32mf2x3_tu(vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i32m1x3_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_tu(vint32m1x3_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i32m1x3_tu(vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i32m2x3_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_tu(vint32m2x3_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i32m2x3_tu(vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i64m1x3_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_tu(vint64m1x3_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i64m1x3_tu(vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i64m2x3_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_tu(vint64m2x3_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i64m2x3_tu(vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u8mf8x3_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_tu(vuint8mf8x3_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u8mf8x3_tu(vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u8mf4x3_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_tu(vuint8mf4x3_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u8mf4x3_tu(vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u8mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_tu(vuint8mf2x3_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u8mf2x3_tu(vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u8m1x3_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_tu(vuint8m1x3_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u8m1x3_tu(vd, rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u8m2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_tu(vuint8m2x3_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u8m2x3_tu(vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u16mf4x3_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_tu(vuint16mf4x3_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u16mf4x3_tu(vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u16mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_tu(vuint16mf2x3_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u16mf2x3_tu(vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u16m1x3_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_tu(vuint16m1x3_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u16m1x3_tu(vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u16m2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_tu(vuint16m2x3_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u16m2x3_tu(vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u32mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_tu(vuint32mf2x3_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u32mf2x3_tu(vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u32m1x3_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_tu(vuint32m1x3_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u32m1x3_tu(vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u32m2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_tu(vuint32m2x3_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u32m2x3_tu(vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u64m1x3_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_tu(vuint64m1x3_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u64m1x3_tu(vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u64m2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_tu(vuint64m2x3_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u64m2x3_tu(vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f16mf4x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_tum(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f16mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_tum(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f16m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_tum(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f16m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f16m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_tum(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f16m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f32mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_tum(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f32m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_tum(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f32m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f32m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_tum(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f32m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f64m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_tum(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f64m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f64m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_tum(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f64m2x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i8mf8x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_tum(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i8mf8x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i8mf4x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_tum(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i8mf4x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i8mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_tum(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i8mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i8m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_tum(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i8m1x3_tum(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i8m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_tum(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i8m2x3_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i16mf4x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_tum(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i16mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_tum(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i16m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_tum(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i16m1x3_tum(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i16m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_tum(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i16m2x3_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i32mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_tum(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i32m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_tum(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i32m1x3_tum(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i32m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_tum(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i32m2x3_tum(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i64m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_tum(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i64m1x3_tum(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i64m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_tum(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i64m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u8mf8x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_tum(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u8mf8x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u8mf4x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_tum(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u8mf4x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u8mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_tum(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u8mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u8m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_tum(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u8m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u8m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_tum(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u8m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u16mf4x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_tum(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u16mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_tum(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u16m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_tum(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u16m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u16m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_tum(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u16m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u32mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_tum(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u32m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_tum(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u32m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u32m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_tum(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u32m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u64m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_tum(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u64m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u64m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_tum(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u64m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f16mf4x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_tumu(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f16mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_tumu(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f16m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_tumu(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f16m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_tumu(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f32mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_tumu(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f32m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_tumu(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f32m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_tumu(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f64m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_tumu(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f64m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_tumu(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i8mf8x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_tumu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i8mf8x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i8mf4x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_tumu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i8mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i8mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_tumu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i8mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i8m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_tumu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i8m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i8m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_tumu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i8m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i16mf4x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_tumu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i16mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_tumu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i16m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_tumu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i16m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_tumu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i32mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_tumu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i32m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_tumu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i32m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_tumu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i64m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_tumu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i64m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_tumu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u8mf8x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_tumu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u8mf8x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u8mf4x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_tumu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u8mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u8mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_tumu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u8mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u8m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_tumu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u8m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u8m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_tumu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u8m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u16mf4x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_tumu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u16mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_tumu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u16m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_tumu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u16m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_tumu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u32mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_tumu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u32m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_tumu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u32m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_tumu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u64m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_tumu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u64m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_tumu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f16mf4x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_mu(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f16mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_mu(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f16m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_mu(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f16m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f16m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_mu(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f16m2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f32mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_mu(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f32m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_mu(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f32m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f32m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_mu(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f32m2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f64m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_mu(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f64m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_f64m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_mu(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_f64m2x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i8mf8x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_mu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i8mf8x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i8mf4x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_mu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i8mf4x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i8mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_mu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i8mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i8m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_mu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i8m1x3_mu(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i8m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_mu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i8m2x3_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i16mf4x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_mu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i16mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_mu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i16m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_mu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i16m1x3_mu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i16m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_mu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i16m2x3_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i32mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_mu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i32m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_mu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i32m1x3_mu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i32m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_mu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i32m2x3_mu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i64m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_mu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i64m1x3_mu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_i64m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_mu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_i64m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u8mf8x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_mu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u8mf8x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u8mf4x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_mu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u8mf4x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u8mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_mu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u8mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u8m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_mu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u8m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u8m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_mu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u8m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u16mf4x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_mu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u16mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_mu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u16m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_mu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u16m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u16m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_mu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u16m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u32mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_mu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u32m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_mu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u32m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u32m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_mu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u32m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u64m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_mu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u64m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_v_u64m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_mu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_v_u64m2x3_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg3ei32\.[ivxfswum.]+\s+} 148 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vluxseg3ei64.c b/auto-generated/policy_funcs/gnu-api-tests/vluxseg3ei64.c index 96e98a23b..48c13d481 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vluxseg3ei64.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vluxseg3ei64.c @@ -6,564 +6,564 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f16mf4x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_tu(vfloat16mf4x3_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f16mf4x3_tu(vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f16mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_tu(vfloat16mf2x3_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f16mf2x3_tu(vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f16m1x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_tu(vfloat16m1x3_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f16m1x3_tu(vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f16m2x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_tu(vfloat16m2x3_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f16m2x3_tu(vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f32mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_tu(vfloat32mf2x3_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f32mf2x3_tu(vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f32m1x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_tu(vfloat32m1x3_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f32m1x3_tu(vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f32m2x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_tu(vfloat32m2x3_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f32m2x3_tu(vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f64m1x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_tu(vfloat64m1x3_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f64m1x3_tu(vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f64m2x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_tu(vfloat64m2x3_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f64m2x3_tu(vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i8mf8x3_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_tu(vint8mf8x3_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i8mf8x3_tu(vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i8mf4x3_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_tu(vint8mf4x3_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i8mf4x3_tu(vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i8mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_tu(vint8mf2x3_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i8mf2x3_tu(vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i8m1x3_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_tu(vint8m1x3_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i8m1x3_tu(vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i16mf4x3_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_tu(vint16mf4x3_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i16mf4x3_tu(vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i16mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_tu(vint16mf2x3_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i16mf2x3_tu(vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i16m1x3_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_tu(vint16m1x3_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i16m1x3_tu(vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i16m2x3_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_tu(vint16m2x3_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i16m2x3_tu(vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i32mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_tu(vint32mf2x3_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i32mf2x3_tu(vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i32m1x3_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_tu(vint32m1x3_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i32m1x3_tu(vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i32m2x3_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_tu(vint32m2x3_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i32m2x3_tu(vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i64m1x3_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_tu(vint64m1x3_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i64m1x3_tu(vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i64m2x3_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_tu(vint64m2x3_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i64m2x3_tu(vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u8mf8x3_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_tu(vuint8mf8x3_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u8mf8x3_tu(vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u8mf4x3_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_tu(vuint8mf4x3_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u8mf4x3_tu(vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u8mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_tu(vuint8mf2x3_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u8mf2x3_tu(vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u8m1x3_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_tu(vuint8m1x3_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u8m1x3_tu(vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u16mf4x3_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_tu(vuint16mf4x3_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u16mf4x3_tu(vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u16mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_tu(vuint16mf2x3_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u16mf2x3_tu(vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u16m1x3_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_tu(vuint16m1x3_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u16m1x3_tu(vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u16m2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_tu(vuint16m2x3_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u16m2x3_tu(vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u32mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_tu(vuint32mf2x3_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u32mf2x3_tu(vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u32m1x3_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_tu(vuint32m1x3_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u32m1x3_tu(vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u32m2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_tu(vuint32m2x3_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u32m2x3_tu(vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u64m1x3_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_tu(vuint64m1x3_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u64m1x3_tu(vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u64m2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_tu(vuint64m2x3_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u64m2x3_tu(vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f16mf4x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_tum(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f16mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_tum(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f16m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_tum(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f16m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f16m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_tum(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f16m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f32mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_tum(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f32m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_tum(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f32m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f32m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_tum(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f32m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f64m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_tum(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f64m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f64m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_tum(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f64m2x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i8mf8x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_tum(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i8mf8x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i8mf4x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_tum(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i8mf4x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i8mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_tum(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i8mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i8m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_tum(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i8m1x3_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i16mf4x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_tum(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i16mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_tum(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i16m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_tum(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i16m1x3_tum(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i16m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_tum(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i16m2x3_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i32mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_tum(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i32m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_tum(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i32m1x3_tum(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i32m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_tum(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i32m2x3_tum(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i64m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_tum(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i64m1x3_tum(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i64m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_tum(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i64m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u8mf8x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_tum(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u8mf8x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u8mf4x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_tum(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u8mf4x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u8mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_tum(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u8mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u8m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_tum(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u8m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u16mf4x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_tum(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u16mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_tum(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u16m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_tum(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u16m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u16m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_tum(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u16m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u32mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_tum(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u32m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_tum(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u32m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u32m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_tum(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u32m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u64m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_tum(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u64m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u64m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_tum(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u64m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f16mf4x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_tumu(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f16mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_tumu(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f16m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_tumu(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f16m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_tumu(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f32mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_tumu(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f32m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_tumu(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f32m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_tumu(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f64m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_tumu(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f64m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_tumu(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i8mf8x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_tumu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i8mf8x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i8mf4x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_tumu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i8mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i8mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_tumu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i8mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i8m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_tumu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i8m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i16mf4x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_tumu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i16mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_tumu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i16m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_tumu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i16m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_tumu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i32mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_tumu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i32m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_tumu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i32m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_tumu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i64m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_tumu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i64m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_tumu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u8mf8x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_tumu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u8mf8x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u8mf4x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_tumu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u8mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u8mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_tumu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u8mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u8m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_tumu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u8m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u16mf4x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_tumu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u16mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_tumu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u16m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_tumu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u16m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_tumu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u32mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_tumu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u32m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_tumu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u32m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_tumu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u64m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_tumu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u64m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_tumu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f16mf4x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_mu(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f16mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_mu(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f16m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_mu(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f16m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f16m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_mu(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f16m2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f32mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_mu(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f32m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_mu(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f32m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f32m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_mu(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f32m2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f64m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_mu(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f64m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_f64m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_mu(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_f64m2x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i8mf8x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_mu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i8mf8x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i8mf4x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_mu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i8mf4x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i8mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_mu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i8mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i8m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_mu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i8m1x3_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i16mf4x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_mu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i16mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_mu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i16m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_mu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i16m1x3_mu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i16m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_mu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i16m2x3_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i32mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_mu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i32m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_mu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i32m1x3_mu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i32m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_mu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i32m2x3_mu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i64m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_mu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i64m1x3_mu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_i64m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_mu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_i64m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u8mf8x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_mu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u8mf8x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u8mf4x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_mu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u8mf4x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u8mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_mu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u8mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u8m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_mu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u8m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u16mf4x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_mu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u16mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_mu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u16m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_mu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u16m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u16m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_mu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u16m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u32mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_mu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u32m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_mu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u32m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u32m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_mu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u32m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u64m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_mu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u64m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_v_u64m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_mu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_v_u64m2x3_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg3ei64\.[ivxfswum.]+\s+} 140 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vluxseg3ei8.c b/auto-generated/policy_funcs/gnu-api-tests/vluxseg3ei8.c index e698287df..4f25984d2 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vluxseg3ei8.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vluxseg3ei8.c @@ -6,596 +6,596 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f16mf4x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_tu(vfloat16mf4x3_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f16mf4x3_tu(vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f16mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_tu(vfloat16mf2x3_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f16mf2x3_tu(vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f16m1x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_tu(vfloat16m1x3_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f16m1x3_tu(vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f16m2x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_tu(vfloat16m2x3_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f16m2x3_tu(vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f32mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_tu(vfloat32mf2x3_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f32mf2x3_tu(vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f32m1x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_tu(vfloat32m1x3_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f32m1x3_tu(vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f32m2x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_tu(vfloat32m2x3_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f32m2x3_tu(vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f64m1x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_tu(vfloat64m1x3_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f64m1x3_tu(vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f64m2x3_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_tu(vfloat64m2x3_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f64m2x3_tu(vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i8mf8x3_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_tu(vint8mf8x3_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i8mf8x3_tu(vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i8mf4x3_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_tu(vint8mf4x3_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i8mf4x3_tu(vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i8mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_tu(vint8mf2x3_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i8mf2x3_tu(vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i8m1x3_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_tu(vint8m1x3_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i8m1x3_tu(vd, rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i8m2x3_tu(maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_tu(vint8m2x3_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i8m2x3_tu(vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i16mf4x3_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_tu(vint16mf4x3_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i16mf4x3_tu(vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i16mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_tu(vint16mf2x3_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i16mf2x3_tu(vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i16m1x3_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_tu(vint16m1x3_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i16m1x3_tu(vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i16m2x3_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_tu(vint16m2x3_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i16m2x3_tu(vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i32mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_tu(vint32mf2x3_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i32mf2x3_tu(vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i32m1x3_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_tu(vint32m1x3_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i32m1x3_tu(vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i32m2x3_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_tu(vint32m2x3_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i32m2x3_tu(vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i64m1x3_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_tu(vint64m1x3_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i64m1x3_tu(vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i64m2x3_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_tu(vint64m2x3_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i64m2x3_tu(vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u8mf8x3_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_tu(vuint8mf8x3_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u8mf8x3_tu(vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u8mf4x3_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_tu(vuint8mf4x3_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u8mf4x3_tu(vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u8mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_tu(vuint8mf2x3_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u8mf2x3_tu(vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u8m1x3_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_tu(vuint8m1x3_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u8m1x3_tu(vd, rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u8m2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_tu(vuint8m2x3_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u8m2x3_tu(vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u16mf4x3_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_tu(vuint16mf4x3_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u16mf4x3_tu(vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u16mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_tu(vuint16mf2x3_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u16mf2x3_tu(vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u16m1x3_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_tu(vuint16m1x3_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u16m1x3_tu(vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u16m2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_tu(vuint16m2x3_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u16m2x3_tu(vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u32mf2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_tu(vuint32mf2x3_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u32mf2x3_tu(vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u32m1x3_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_tu(vuint32m1x3_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u32m1x3_tu(vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u32m2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_tu(vuint32m2x3_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u32m2x3_tu(vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u64m1x3_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_tu(vuint64m1x3_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u64m1x3_tu(vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u64m2x3_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_tu(vuint64m2x3_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u64m2x3_tu(vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f16mf4x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_tum(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f16mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_tum(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f16m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_tum(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f16m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f16m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_tum(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f16m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f32mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_tum(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f32m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_tum(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f32m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f32m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_tum(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f32m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f64m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_tum(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f64m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f64m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_tum(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f64m2x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i8mf8x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_tum(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i8mf8x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i8mf4x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_tum(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i8mf4x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i8mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_tum(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i8mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i8m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_tum(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i8m1x3_tum(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i8m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_tum(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i8m2x3_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i16mf4x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_tum(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i16mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_tum(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i16m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_tum(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i16m1x3_tum(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i16m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_tum(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i16m2x3_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i32mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_tum(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i32m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_tum(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i32m1x3_tum(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i32m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_tum(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i32m2x3_tum(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i64m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_tum(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i64m1x3_tum(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i64m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_tum(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i64m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u8mf8x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_tum(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u8mf8x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u8mf4x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_tum(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u8mf4x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u8mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_tum(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u8mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u8m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_tum(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u8m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u8m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_tum(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u8m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u16mf4x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_tum(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u16mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_tum(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u16m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_tum(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u16m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u16m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_tum(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u16m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u32mf2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_tum(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u32m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_tum(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u32m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u32m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_tum(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u32m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u64m1x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_tum(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u64m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u64m2x3_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_tum(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u64m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f16mf4x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_tumu(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f16mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_tumu(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f16m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_tumu(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f16m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_tumu(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f32mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_tumu(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f32m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_tumu(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f32m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_tumu(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f64m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_tumu(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f64m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_tumu(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i8mf8x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_tumu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i8mf8x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i8mf4x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_tumu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i8mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i8mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_tumu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i8mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i8m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_tumu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i8m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i8m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_tumu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i8m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i16mf4x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_tumu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i16mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_tumu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i16m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_tumu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i16m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_tumu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i32mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_tumu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i32m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_tumu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i32m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_tumu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i64m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_tumu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i64m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_tumu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u8mf8x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_tumu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u8mf8x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u8mf4x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_tumu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u8mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u8mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_tumu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u8mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u8m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_tumu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u8m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u8m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_tumu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u8m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u16mf4x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_tumu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u16mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_tumu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u16m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_tumu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u16m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_tumu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u32mf2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_tumu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u32m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_tumu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u32m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_tumu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u64m1x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_tumu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u64m2x3_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_tumu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f16mf4x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_mu(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f16mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_mu(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f16m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_mu(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f16m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f16m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_mu(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f16m2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f32mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_mu(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f32m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_mu(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f32m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f32m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_mu(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f32m2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f64m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_mu(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f64m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_f64m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_mu(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_f64m2x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i8mf8x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_mu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i8mf8x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i8mf4x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_mu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i8mf4x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i8mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_mu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i8mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i8m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_mu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i8m1x3_mu(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i8m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_mu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i8m2x3_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i16mf4x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_mu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i16mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_mu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i16m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_mu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i16m1x3_mu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i16m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_mu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i16m2x3_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i32mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_mu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i32m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_mu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i32m1x3_mu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i32m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_mu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i32m2x3_mu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i64m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_mu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i64m1x3_mu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_i64m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_mu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_i64m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u8mf8x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_mu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u8mf8x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u8mf4x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_mu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u8mf4x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u8mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_mu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u8mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u8m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_mu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u8m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u8m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_mu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u8m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u16mf4x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_mu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u16mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_mu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u16m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_mu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u16m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u16m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_mu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u16m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u32mf2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_mu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u32m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_mu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u32m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u32m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_mu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u32m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u64m1x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_mu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u64m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_v_u64m2x3_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_mu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_v_u64m2x3_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg3ei8\.[ivxfswum.]+\s+} 148 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vluxseg4ei16.c b/auto-generated/policy_funcs/gnu-api-tests/vluxseg4ei16.c index 0097df115..57ff257f1 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vluxseg4ei16.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vluxseg4ei16.c @@ -6,596 +6,596 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f16mf4x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_tu(vfloat16mf4x4_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f16mf4x4_tu(vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f16mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_tu(vfloat16mf2x4_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f16mf2x4_tu(vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f16m1x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_tu(vfloat16m1x4_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f16m1x4_tu(vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f16m2x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_tu(vfloat16m2x4_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f16m2x4_tu(vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f32mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_tu(vfloat32mf2x4_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f32mf2x4_tu(vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f32m1x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_tu(vfloat32m1x4_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f32m1x4_tu(vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f32m2x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_tu(vfloat32m2x4_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f32m2x4_tu(vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f64m1x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_tu(vfloat64m1x4_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f64m1x4_tu(vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f64m2x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_tu(vfloat64m2x4_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f64m2x4_tu(vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i8mf8x4_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_tu(vint8mf8x4_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i8mf8x4_tu(vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i8mf4x4_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_tu(vint8mf4x4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i8mf4x4_tu(vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i8mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_tu(vint8mf2x4_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i8mf2x4_tu(vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i8m1x4_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_tu(vint8m1x4_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i8m1x4_tu(vd, rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i8m2x4_tu(maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_tu(vint8m2x4_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i8m2x4_tu(vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i16mf4x4_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_tu(vint16mf4x4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i16mf4x4_tu(vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i16mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_tu(vint16mf2x4_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i16mf2x4_tu(vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i16m1x4_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_tu(vint16m1x4_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i16m1x4_tu(vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i16m2x4_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_tu(vint16m2x4_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i16m2x4_tu(vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i32mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_tu(vint32mf2x4_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i32mf2x4_tu(vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i32m1x4_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_tu(vint32m1x4_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i32m1x4_tu(vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i32m2x4_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_tu(vint32m2x4_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i32m2x4_tu(vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i64m1x4_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_tu(vint64m1x4_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i64m1x4_tu(vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i64m2x4_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_tu(vint64m2x4_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i64m2x4_tu(vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u8mf8x4_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_tu(vuint8mf8x4_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u8mf8x4_tu(vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u8mf4x4_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_tu(vuint8mf4x4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u8mf4x4_tu(vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u8mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_tu(vuint8mf2x4_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u8mf2x4_tu(vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u8m1x4_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_tu(vuint8m1x4_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u8m1x4_tu(vd, rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u8m2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_tu(vuint8m2x4_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u8m2x4_tu(vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u16mf4x4_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_tu(vuint16mf4x4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u16mf4x4_tu(vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u16mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_tu(vuint16mf2x4_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u16mf2x4_tu(vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u16m1x4_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_tu(vuint16m1x4_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u16m1x4_tu(vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u16m2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_tu(vuint16m2x4_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u16m2x4_tu(vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u32mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_tu(vuint32mf2x4_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u32mf2x4_tu(vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u32m1x4_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_tu(vuint32m1x4_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u32m1x4_tu(vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u32m2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_tu(vuint32m2x4_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u32m2x4_tu(vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u64m1x4_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_tu(vuint64m1x4_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u64m1x4_tu(vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u64m2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_tu(vuint64m2x4_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u64m2x4_tu(vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f16mf4x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_tum(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f16mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_tum(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f16m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_tum(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f16m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f16m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_tum(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f16m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f32mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_tum(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f32m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_tum(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f32m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f32m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_tum(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f32m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f64m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_tum(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f64m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f64m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_tum(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f64m2x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i8mf8x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_tum(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i8mf8x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i8mf4x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_tum(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i8mf4x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i8mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_tum(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i8mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i8m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_tum(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i8m1x4_tum(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i8m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_tum(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i8m2x4_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i16mf4x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_tum(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i16mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_tum(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i16m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_tum(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i16m1x4_tum(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i16m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_tum(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i16m2x4_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i32mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_tum(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i32m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_tum(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i32m1x4_tum(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i32m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_tum(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i32m2x4_tum(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i64m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_tum(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i64m1x4_tum(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i64m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_tum(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i64m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u8mf8x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_tum(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u8mf8x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u8mf4x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_tum(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u8mf4x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u8mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_tum(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u8mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u8m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_tum(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u8m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u8m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_tum(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u8m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u16mf4x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_tum(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u16mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_tum(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u16m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_tum(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u16m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u16m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_tum(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u16m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u32mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_tum(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u32m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_tum(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u32m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u32m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_tum(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u32m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u64m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_tum(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u64m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u64m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_tum(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u64m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f16mf4x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_tumu(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f16mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_tumu(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f16m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_tumu(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f16m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_tumu(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f32mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_tumu(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f32m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_tumu(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f32m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_tumu(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f64m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_tumu(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f64m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_tumu(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i8mf8x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_tumu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i8mf8x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i8mf4x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_tumu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i8mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i8mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_tumu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i8mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i8m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_tumu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i8m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i8m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_tumu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i8m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i16mf4x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_tumu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i16mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_tumu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i16m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_tumu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i16m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_tumu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i32mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_tumu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i32m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_tumu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i32m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_tumu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i64m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_tumu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i64m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_tumu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u8mf8x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_tumu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u8mf8x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u8mf4x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_tumu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u8mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u8mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_tumu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u8mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u8m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_tumu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u8m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u8m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_tumu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u8m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u16mf4x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_tumu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u16mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_tumu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u16m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_tumu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u16m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_tumu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u32mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_tumu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u32m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_tumu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u32m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_tumu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u64m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_tumu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u64m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_tumu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f16mf4x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_mu(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f16mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_mu(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f16m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_mu(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f16m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f16m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_mu(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f16m2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f32mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_mu(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f32m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_mu(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f32m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f32m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_mu(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f32m2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f64m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_mu(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f64m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_f64m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_mu(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_f64m2x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i8mf8x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_mu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i8mf8x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i8mf4x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_mu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i8mf4x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i8mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_mu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i8mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i8m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_mu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i8m1x4_mu(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i8m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_mu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i8m2x4_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i16mf4x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_mu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i16mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_mu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i16m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_mu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i16m1x4_mu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i16m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_mu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i16m2x4_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i32mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_mu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i32m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_mu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i32m1x4_mu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i32m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_mu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i32m2x4_mu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i64m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_mu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i64m1x4_mu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_i64m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_mu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_i64m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u8mf8x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_mu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u8mf8x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u8mf4x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_mu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u8mf4x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u8mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_mu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u8mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u8m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_mu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u8m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u8m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_mu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u8m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u16mf4x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_mu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u16mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_mu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u16m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_mu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u16m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u16m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_mu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u16m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u32mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_mu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u32m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_mu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u32m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u32m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_mu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u32m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u64m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_mu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u64m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_v_u64m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_mu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_u64m2x4_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg4ei16\.[ivxfswum.]+\s+} 148 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vluxseg4ei32.c b/auto-generated/policy_funcs/gnu-api-tests/vluxseg4ei32.c index b3fb8aa93..4f095449d 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vluxseg4ei32.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vluxseg4ei32.c @@ -6,596 +6,596 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f16mf4x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_tu(vfloat16mf4x4_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f16mf4x4_tu(vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f16mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_tu(vfloat16mf2x4_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f16mf2x4_tu(vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f16m1x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_tu(vfloat16m1x4_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f16m1x4_tu(vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f16m2x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_tu(vfloat16m2x4_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f16m2x4_tu(vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f32mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_tu(vfloat32mf2x4_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f32mf2x4_tu(vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f32m1x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_tu(vfloat32m1x4_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f32m1x4_tu(vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f32m2x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_tu(vfloat32m2x4_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f32m2x4_tu(vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f64m1x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_tu(vfloat64m1x4_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f64m1x4_tu(vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f64m2x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_tu(vfloat64m2x4_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f64m2x4_tu(vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i8mf8x4_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_tu(vint8mf8x4_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i8mf8x4_tu(vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i8mf4x4_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_tu(vint8mf4x4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i8mf4x4_tu(vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i8mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_tu(vint8mf2x4_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i8mf2x4_tu(vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i8m1x4_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_tu(vint8m1x4_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i8m1x4_tu(vd, rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i8m2x4_tu(maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_tu(vint8m2x4_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i8m2x4_tu(vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i16mf4x4_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_tu(vint16mf4x4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i16mf4x4_tu(vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i16mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_tu(vint16mf2x4_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i16mf2x4_tu(vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i16m1x4_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_tu(vint16m1x4_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i16m1x4_tu(vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i16m2x4_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_tu(vint16m2x4_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i16m2x4_tu(vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i32mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_tu(vint32mf2x4_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i32mf2x4_tu(vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i32m1x4_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_tu(vint32m1x4_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i32m1x4_tu(vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i32m2x4_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_tu(vint32m2x4_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i32m2x4_tu(vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i64m1x4_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_tu(vint64m1x4_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i64m1x4_tu(vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i64m2x4_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_tu(vint64m2x4_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i64m2x4_tu(vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u8mf8x4_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_tu(vuint8mf8x4_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u8mf8x4_tu(vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u8mf4x4_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_tu(vuint8mf4x4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u8mf4x4_tu(vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u8mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_tu(vuint8mf2x4_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u8mf2x4_tu(vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u8m1x4_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_tu(vuint8m1x4_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u8m1x4_tu(vd, rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u8m2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_tu(vuint8m2x4_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u8m2x4_tu(vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u16mf4x4_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_tu(vuint16mf4x4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u16mf4x4_tu(vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u16mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_tu(vuint16mf2x4_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u16mf2x4_tu(vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u16m1x4_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_tu(vuint16m1x4_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u16m1x4_tu(vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u16m2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_tu(vuint16m2x4_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u16m2x4_tu(vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u32mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_tu(vuint32mf2x4_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u32mf2x4_tu(vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u32m1x4_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_tu(vuint32m1x4_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u32m1x4_tu(vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u32m2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_tu(vuint32m2x4_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u32m2x4_tu(vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u64m1x4_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_tu(vuint64m1x4_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u64m1x4_tu(vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u64m2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_tu(vuint64m2x4_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u64m2x4_tu(vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f16mf4x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_tum(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f16mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_tum(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f16m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_tum(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f16m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f16m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_tum(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f16m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f32mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_tum(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f32m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_tum(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f32m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f32m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_tum(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f32m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f64m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_tum(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f64m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f64m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_tum(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f64m2x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i8mf8x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_tum(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i8mf8x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i8mf4x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_tum(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i8mf4x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i8mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_tum(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i8mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i8m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_tum(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i8m1x4_tum(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i8m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_tum(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i8m2x4_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i16mf4x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_tum(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i16mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_tum(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i16m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_tum(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i16m1x4_tum(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i16m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_tum(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i16m2x4_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i32mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_tum(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i32m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_tum(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i32m1x4_tum(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i32m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_tum(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i32m2x4_tum(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i64m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_tum(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i64m1x4_tum(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i64m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_tum(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i64m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u8mf8x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_tum(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u8mf8x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u8mf4x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_tum(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u8mf4x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u8mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_tum(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u8mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u8m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_tum(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u8m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u8m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_tum(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u8m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u16mf4x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_tum(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u16mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_tum(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u16m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_tum(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u16m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u16m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_tum(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u16m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u32mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_tum(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u32m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_tum(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u32m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u32m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_tum(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u32m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u64m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_tum(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u64m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u64m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_tum(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u64m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f16mf4x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_tumu(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f16mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_tumu(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f16m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_tumu(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f16m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_tumu(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f32mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_tumu(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f32m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_tumu(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f32m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_tumu(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f64m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_tumu(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f64m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_tumu(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i8mf8x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_tumu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i8mf8x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i8mf4x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_tumu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i8mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i8mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_tumu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i8mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i8m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_tumu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i8m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i8m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_tumu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i8m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i16mf4x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_tumu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i16mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_tumu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i16m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_tumu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i16m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_tumu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i32mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_tumu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i32m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_tumu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i32m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_tumu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i64m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_tumu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i64m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_tumu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u8mf8x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_tumu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u8mf8x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u8mf4x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_tumu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u8mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u8mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_tumu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u8mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u8m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_tumu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u8m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u8m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_tumu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u8m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u16mf4x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_tumu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u16mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_tumu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u16m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_tumu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u16m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_tumu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u32mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_tumu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u32m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_tumu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u32m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_tumu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u64m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_tumu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u64m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_tumu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f16mf4x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_mu(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f16mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_mu(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f16m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_mu(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f16m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f16m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_mu(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f16m2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f32mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_mu(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f32m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_mu(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f32m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f32m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_mu(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f32m2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f64m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_mu(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f64m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_f64m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_mu(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_f64m2x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i8mf8x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_mu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i8mf8x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i8mf4x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_mu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i8mf4x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i8mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_mu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i8mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i8m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_mu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i8m1x4_mu(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i8m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_mu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i8m2x4_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i16mf4x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_mu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i16mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_mu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i16m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_mu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i16m1x4_mu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i16m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_mu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i16m2x4_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i32mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_mu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i32m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_mu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i32m1x4_mu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i32m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_mu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i32m2x4_mu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i64m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_mu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i64m1x4_mu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_i64m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_mu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_i64m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u8mf8x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_mu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u8mf8x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u8mf4x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_mu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u8mf4x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u8mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_mu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u8mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u8m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_mu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u8m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u8m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_mu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u8m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u16mf4x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_mu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u16mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_mu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u16m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_mu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u16m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u16m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_mu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u16m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u32mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_mu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u32m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_mu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u32m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u32m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_mu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u32m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u64m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_mu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u64m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_v_u64m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_mu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_v_u64m2x4_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg4ei32\.[ivxfswum.]+\s+} 148 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vluxseg4ei64.c b/auto-generated/policy_funcs/gnu-api-tests/vluxseg4ei64.c index 2bfc5a155..a7ef76217 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vluxseg4ei64.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vluxseg4ei64.c @@ -6,564 +6,564 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f16mf4x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_tu(vfloat16mf4x4_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f16mf4x4_tu(vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f16mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_tu(vfloat16mf2x4_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f16mf2x4_tu(vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f16m1x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_tu(vfloat16m1x4_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f16m1x4_tu(vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f16m2x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_tu(vfloat16m2x4_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f16m2x4_tu(vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f32mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_tu(vfloat32mf2x4_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f32mf2x4_tu(vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f32m1x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_tu(vfloat32m1x4_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f32m1x4_tu(vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f32m2x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_tu(vfloat32m2x4_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f32m2x4_tu(vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f64m1x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_tu(vfloat64m1x4_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f64m1x4_tu(vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f64m2x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_tu(vfloat64m2x4_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f64m2x4_tu(vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i8mf8x4_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_tu(vint8mf8x4_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i8mf8x4_tu(vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i8mf4x4_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_tu(vint8mf4x4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i8mf4x4_tu(vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i8mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_tu(vint8mf2x4_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i8mf2x4_tu(vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i8m1x4_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_tu(vint8m1x4_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i8m1x4_tu(vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i16mf4x4_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_tu(vint16mf4x4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i16mf4x4_tu(vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i16mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_tu(vint16mf2x4_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i16mf2x4_tu(vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i16m1x4_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_tu(vint16m1x4_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i16m1x4_tu(vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i16m2x4_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_tu(vint16m2x4_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i16m2x4_tu(vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i32mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_tu(vint32mf2x4_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i32mf2x4_tu(vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i32m1x4_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_tu(vint32m1x4_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i32m1x4_tu(vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i32m2x4_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_tu(vint32m2x4_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i32m2x4_tu(vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i64m1x4_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_tu(vint64m1x4_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i64m1x4_tu(vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i64m2x4_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_tu(vint64m2x4_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i64m2x4_tu(vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u8mf8x4_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_tu(vuint8mf8x4_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u8mf8x4_tu(vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u8mf4x4_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_tu(vuint8mf4x4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u8mf4x4_tu(vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u8mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_tu(vuint8mf2x4_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u8mf2x4_tu(vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u8m1x4_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_tu(vuint8m1x4_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u8m1x4_tu(vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u16mf4x4_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_tu(vuint16mf4x4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u16mf4x4_tu(vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u16mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_tu(vuint16mf2x4_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u16mf2x4_tu(vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u16m1x4_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_tu(vuint16m1x4_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u16m1x4_tu(vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u16m2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_tu(vuint16m2x4_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u16m2x4_tu(vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u32mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_tu(vuint32mf2x4_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u32mf2x4_tu(vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u32m1x4_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_tu(vuint32m1x4_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u32m1x4_tu(vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u32m2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_tu(vuint32m2x4_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u32m2x4_tu(vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u64m1x4_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_tu(vuint64m1x4_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u64m1x4_tu(vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u64m2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_tu(vuint64m2x4_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u64m2x4_tu(vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f16mf4x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_tum(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f16mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_tum(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f16m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_tum(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f16m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f16m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_tum(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f16m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f32mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_tum(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f32m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_tum(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f32m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f32m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_tum(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f32m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f64m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_tum(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f64m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f64m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_tum(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f64m2x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i8mf8x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_tum(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i8mf8x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i8mf4x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_tum(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i8mf4x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i8mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_tum(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i8mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i8m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_tum(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i8m1x4_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i16mf4x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_tum(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i16mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_tum(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i16m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_tum(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i16m1x4_tum(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i16m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_tum(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i16m2x4_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i32mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_tum(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i32m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_tum(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i32m1x4_tum(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i32m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_tum(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i32m2x4_tum(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i64m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_tum(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i64m1x4_tum(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i64m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_tum(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i64m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u8mf8x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_tum(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u8mf8x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u8mf4x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_tum(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u8mf4x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u8mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_tum(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u8mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u8m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_tum(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u8m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u16mf4x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_tum(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u16mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_tum(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u16m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_tum(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u16m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u16m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_tum(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u16m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u32mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_tum(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u32m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_tum(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u32m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u32m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_tum(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u32m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u64m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_tum(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u64m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u64m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_tum(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u64m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f16mf4x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_tumu(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f16mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_tumu(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f16m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_tumu(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f16m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_tumu(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f32mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_tumu(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f32m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_tumu(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f32m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_tumu(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f64m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_tumu(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f64m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_tumu(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i8mf8x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_tumu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i8mf8x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i8mf4x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_tumu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i8mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i8mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_tumu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i8mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i8m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_tumu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i8m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i16mf4x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_tumu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i16mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_tumu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i16m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_tumu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i16m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_tumu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i32mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_tumu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i32m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_tumu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i32m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_tumu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i64m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_tumu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i64m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_tumu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u8mf8x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_tumu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u8mf8x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u8mf4x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_tumu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u8mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u8mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_tumu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u8mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u8m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_tumu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u8m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u16mf4x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_tumu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u16mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_tumu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u16m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_tumu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u16m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_tumu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u32mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_tumu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u32m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_tumu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u32m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_tumu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u64m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_tumu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u64m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_tumu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f16mf4x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_mu(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f16mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_mu(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f16m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_mu(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f16m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f16m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_mu(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f16m2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f32mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_mu(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f32m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_mu(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f32m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f32m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_mu(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f32m2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f64m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_mu(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f64m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_f64m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_mu(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_f64m2x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i8mf8x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_mu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i8mf8x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i8mf4x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_mu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i8mf4x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i8mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_mu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i8mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i8m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_mu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i8m1x4_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i16mf4x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_mu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i16mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_mu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i16m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_mu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i16m1x4_mu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i16m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_mu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i16m2x4_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i32mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_mu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i32m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_mu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i32m1x4_mu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i32m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_mu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i32m2x4_mu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i64m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_mu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i64m1x4_mu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_i64m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_mu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_i64m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u8mf8x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_mu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u8mf8x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u8mf4x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_mu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u8mf4x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u8mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_mu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u8mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u8m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_mu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u8m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u16mf4x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_mu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u16mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_mu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u16m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_mu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u16m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u16m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_mu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u16m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u32mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_mu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u32m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_mu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u32m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u32m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_mu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u32m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u64m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_mu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u64m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_v_u64m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_mu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_v_u64m2x4_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg4ei64\.[ivxfswum.]+\s+} 140 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vluxseg4ei8.c b/auto-generated/policy_funcs/gnu-api-tests/vluxseg4ei8.c index 4be2a812b..5ada3a600 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vluxseg4ei8.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vluxseg4ei8.c @@ -6,596 +6,596 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f16mf4x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_tu(vfloat16mf4x4_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f16mf4x4_tu(vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f16mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_tu(vfloat16mf2x4_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f16mf2x4_tu(vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f16m1x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_tu(vfloat16m1x4_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f16m1x4_tu(vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f16m2x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_tu(vfloat16m2x4_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f16m2x4_tu(vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f32mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_tu(vfloat32mf2x4_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f32mf2x4_tu(vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f32m1x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_tu(vfloat32m1x4_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f32m1x4_tu(vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f32m2x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_tu(vfloat32m2x4_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f32m2x4_tu(vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f64m1x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_tu(vfloat64m1x4_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f64m1x4_tu(vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f64m2x4_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_tu(vfloat64m2x4_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f64m2x4_tu(vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i8mf8x4_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_tu(vint8mf8x4_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i8mf8x4_tu(vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i8mf4x4_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_tu(vint8mf4x4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i8mf4x4_tu(vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i8mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_tu(vint8mf2x4_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i8mf2x4_tu(vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i8m1x4_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_tu(vint8m1x4_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i8m1x4_tu(vd, rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i8m2x4_tu(maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_tu(vint8m2x4_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i8m2x4_tu(vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i16mf4x4_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_tu(vint16mf4x4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i16mf4x4_tu(vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i16mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_tu(vint16mf2x4_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i16mf2x4_tu(vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i16m1x4_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_tu(vint16m1x4_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i16m1x4_tu(vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i16m2x4_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_tu(vint16m2x4_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i16m2x4_tu(vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i32mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_tu(vint32mf2x4_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i32mf2x4_tu(vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i32m1x4_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_tu(vint32m1x4_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i32m1x4_tu(vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i32m2x4_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_tu(vint32m2x4_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i32m2x4_tu(vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i64m1x4_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_tu(vint64m1x4_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i64m1x4_tu(vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i64m2x4_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_tu(vint64m2x4_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i64m2x4_tu(vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u8mf8x4_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_tu(vuint8mf8x4_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u8mf8x4_tu(vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u8mf4x4_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_tu(vuint8mf4x4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u8mf4x4_tu(vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u8mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_tu(vuint8mf2x4_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u8mf2x4_tu(vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u8m1x4_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_tu(vuint8m1x4_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u8m1x4_tu(vd, rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u8m2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_tu(vuint8m2x4_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u8m2x4_tu(vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u16mf4x4_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_tu(vuint16mf4x4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u16mf4x4_tu(vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u16mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_tu(vuint16mf2x4_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u16mf2x4_tu(vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u16m1x4_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_tu(vuint16m1x4_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u16m1x4_tu(vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u16m2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_tu(vuint16m2x4_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u16m2x4_tu(vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u32mf2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_tu(vuint32mf2x4_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u32mf2x4_tu(vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u32m1x4_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_tu(vuint32m1x4_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u32m1x4_tu(vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u32m2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_tu(vuint32m2x4_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u32m2x4_tu(vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u64m1x4_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_tu(vuint64m1x4_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u64m1x4_tu(vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u64m2x4_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_tu(vuint64m2x4_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u64m2x4_tu(vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f16mf4x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_tum(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f16mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_tum(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f16m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_tum(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f16m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f16m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_tum(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f16m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f32mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_tum(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f32m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_tum(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f32m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f32m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_tum(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f32m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f64m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_tum(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f64m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f64m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_tum(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f64m2x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i8mf8x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_tum(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i8mf8x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i8mf4x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_tum(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i8mf4x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i8mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_tum(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i8mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i8m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_tum(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i8m1x4_tum(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i8m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_tum(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i8m2x4_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i16mf4x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_tum(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i16mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_tum(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i16m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_tum(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i16m1x4_tum(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i16m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_tum(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i16m2x4_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i32mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_tum(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i32m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_tum(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i32m1x4_tum(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i32m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_tum(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i32m2x4_tum(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i64m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_tum(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i64m1x4_tum(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i64m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_tum(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i64m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u8mf8x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_tum(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u8mf8x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u8mf4x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_tum(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u8mf4x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u8mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_tum(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u8mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u8m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_tum(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u8m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u8m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_tum(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u8m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u16mf4x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_tum(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u16mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_tum(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u16m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_tum(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u16m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u16m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_tum(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u16m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u32mf2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_tum(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u32m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_tum(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u32m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u32m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_tum(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u32m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u64m1x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_tum(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u64m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u64m2x4_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_tum(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u64m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f16mf4x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_tumu(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f16mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_tumu(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f16m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_tumu(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f16m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_tumu(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f32mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_tumu(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f32m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_tumu(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f32m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_tumu(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f64m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_tumu(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f64m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_tumu(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i8mf8x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_tumu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i8mf8x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i8mf4x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_tumu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i8mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i8mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_tumu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i8mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i8m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_tumu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i8m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i8m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_tumu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i8m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i16mf4x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_tumu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i16mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_tumu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i16m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_tumu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i16m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_tumu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i32mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_tumu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i32m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_tumu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i32m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_tumu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i64m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_tumu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i64m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_tumu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u8mf8x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_tumu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u8mf8x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u8mf4x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_tumu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u8mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u8mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_tumu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u8mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u8m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_tumu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u8m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u8m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_tumu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u8m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u16mf4x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_tumu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u16mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_tumu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u16m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_tumu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u16m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_tumu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u32mf2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_tumu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u32m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_tumu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u32m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_tumu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u64m1x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_tumu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u64m2x4_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_tumu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f16mf4x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_mu(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f16mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_mu(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f16m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_mu(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f16m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f16m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_mu(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f16m2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f32mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_mu(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f32m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_mu(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f32m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f32m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_mu(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f32m2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f64m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_mu(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f64m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_f64m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_mu(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_f64m2x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i8mf8x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_mu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i8mf8x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i8mf4x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_mu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i8mf4x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i8mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_mu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i8mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i8m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_mu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i8m1x4_mu(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i8m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_mu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i8m2x4_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i16mf4x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_mu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i16mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_mu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i16m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_mu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i16m1x4_mu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i16m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_mu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i16m2x4_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i32mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_mu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i32m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_mu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i32m1x4_mu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i32m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_mu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i32m2x4_mu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i64m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_mu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i64m1x4_mu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_i64m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_mu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_i64m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u8mf8x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_mu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u8mf8x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u8mf4x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_mu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u8mf4x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u8mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_mu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u8mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u8m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_mu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u8m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u8m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_mu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u8m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u16mf4x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_mu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u16mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_mu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u16m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_mu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u16m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u16m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_mu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u16m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u32mf2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_mu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u32m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_mu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u32m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u32m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_mu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u32m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u64m1x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_mu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u64m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_v_u64m2x4_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_mu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_v_u64m2x4_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg4ei8\.[ivxfswum.]+\s+} 148 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vluxseg5ei16.c b/auto-generated/policy_funcs/gnu-api-tests/vluxseg5ei16.c index 3f917690c..cd7640f48 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vluxseg5ei16.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vluxseg5ei16.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_f16mf4x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_tu(vfloat16mf4x5_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_f16mf4x5_tu(vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_f16mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_tu(vfloat16mf2x5_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_f16mf2x5_tu(vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_f16m1x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_tu(vfloat16m1x5_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_f16m1x5_tu(vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_f32mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_tu(vfloat32mf2x5_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_f32mf2x5_tu(vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_f32m1x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_tu(vfloat32m1x5_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_f32m1x5_tu(vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_f64m1x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_tu(vfloat64m1x5_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_f64m1x5_tu(vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i8mf8x5_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_tu(vint8mf8x5_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i8mf8x5_tu(vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i8mf4x5_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_tu(vint8mf4x5_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i8mf4x5_tu(vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i8mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_tu(vint8mf2x5_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i8mf2x5_tu(vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i8m1x5_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_tu(vint8m1x5_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i8m1x5_tu(vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i16mf4x5_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_tu(vint16mf4x5_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i16mf4x5_tu(vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i16mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_tu(vint16mf2x5_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i16mf2x5_tu(vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i16m1x5_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_tu(vint16m1x5_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i16m1x5_tu(vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i32mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_tu(vint32mf2x5_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i32mf2x5_tu(vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i32m1x5_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_tu(vint32m1x5_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i32m1x5_tu(vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i64m1x5_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_tu(vint64m1x5_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i64m1x5_tu(vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u8mf8x5_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_tu(vuint8mf8x5_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u8mf8x5_tu(vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u8mf4x5_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_tu(vuint8mf4x5_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u8mf4x5_tu(vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u8mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_tu(vuint8mf2x5_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u8mf2x5_tu(vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u8m1x5_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_tu(vuint8m1x5_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u8m1x5_tu(vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u16mf4x5_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_tu(vuint16mf4x5_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u16mf4x5_tu(vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u16mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_tu(vuint16mf2x5_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u16mf2x5_tu(vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u16m1x5_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_tu(vuint16m1x5_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u16m1x5_tu(vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u32mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_tu(vuint32mf2x5_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u32mf2x5_tu(vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u32m1x5_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_tu(vuint32m1x5_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u32m1x5_tu(vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u64m1x5_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_tu(vuint64m1x5_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u64m1x5_tu(vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_f16mf4x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_tum(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_f16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_f16mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_tum(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_f16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_f16m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_tum(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_f16m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_f32mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_tum(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_f32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_f32m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_tum(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_f32m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_f64m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_tum(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_f64m1x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i8mf8x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_tum(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i8mf8x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i8mf4x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_tum(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i8mf4x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i8mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_tum(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i8mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i8m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_tum(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i8m1x5_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i16mf4x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_tum(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i16mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_tum(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i16m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_tum(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i16m1x5_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i32mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_tum(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i32m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_tum(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i32m1x5_tum(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i64m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_tum(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i64m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u8mf8x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_tum(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u8mf8x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u8mf4x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_tum(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u8mf4x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u8mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_tum(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u8mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u8m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_tum(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u8m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u16mf4x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_tum(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u16mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_tum(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u16m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_tum(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u16m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u32mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_tum(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u32m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_tum(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u32m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u64m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_tum(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u64m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_f16mf4x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_tumu(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_f16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_f16mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_tumu(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_f16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_f16m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_tumu(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_f16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_f32mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_tumu(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_f32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_f32m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_tumu(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_f32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_f64m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_tumu(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_f64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i8mf8x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_tumu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i8mf8x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i8mf4x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_tumu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i8mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i8mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_tumu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i8mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i8m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_tumu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i8m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i16mf4x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_tumu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i16mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_tumu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i16m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_tumu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i32mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_tumu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i32m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_tumu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i64m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_tumu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u8mf8x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_tumu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u8mf8x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u8mf4x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_tumu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u8mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u8mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_tumu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u8mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u8m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_tumu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u8m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u16mf4x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_tumu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u16mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_tumu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u16m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_tumu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u32mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_tumu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u32m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_tumu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u64m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_tumu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_f16mf4x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_mu(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_f16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_f16mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_mu(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_f16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_f16m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_mu(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_f16m1x5_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_f32mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_mu(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_f32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_f32m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_mu(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_f32m1x5_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_f64m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_mu(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_f64m1x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i8mf8x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_mu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i8mf8x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i8mf4x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_mu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i8mf4x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i8mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_mu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i8mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i8m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_mu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i8m1x5_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i16mf4x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_mu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i16mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_mu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i16m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_mu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i16m1x5_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i32mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_mu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i32m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_mu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i32m1x5_mu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_i64m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_mu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_i64m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u8mf8x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_mu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u8mf8x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u8mf4x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_mu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u8mf4x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u8mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_mu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u8mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u8m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_mu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u8m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u16mf4x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_mu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u16mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_mu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u16m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_mu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u16m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u32mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_mu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u32m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_mu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u32m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_v_u64m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_mu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_u64m1x5_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg5ei16\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vluxseg5ei32.c b/auto-generated/policy_funcs/gnu-api-tests/vluxseg5ei32.c index f2b5f38f3..83873541c 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vluxseg5ei32.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vluxseg5ei32.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_f16mf4x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_tu(vfloat16mf4x5_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_f16mf4x5_tu(vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_f16mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_tu(vfloat16mf2x5_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_f16mf2x5_tu(vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_f16m1x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_tu(vfloat16m1x5_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_f16m1x5_tu(vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_f32mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_tu(vfloat32mf2x5_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_f32mf2x5_tu(vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_f32m1x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_tu(vfloat32m1x5_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_f32m1x5_tu(vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_f64m1x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_tu(vfloat64m1x5_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_f64m1x5_tu(vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i8mf8x5_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_tu(vint8mf8x5_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i8mf8x5_tu(vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i8mf4x5_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_tu(vint8mf4x5_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i8mf4x5_tu(vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i8mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_tu(vint8mf2x5_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i8mf2x5_tu(vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i8m1x5_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_tu(vint8m1x5_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i8m1x5_tu(vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i16mf4x5_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_tu(vint16mf4x5_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i16mf4x5_tu(vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i16mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_tu(vint16mf2x5_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i16mf2x5_tu(vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i16m1x5_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_tu(vint16m1x5_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i16m1x5_tu(vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i32mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_tu(vint32mf2x5_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i32mf2x5_tu(vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i32m1x5_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_tu(vint32m1x5_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i32m1x5_tu(vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i64m1x5_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_tu(vint64m1x5_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i64m1x5_tu(vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u8mf8x5_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_tu(vuint8mf8x5_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u8mf8x5_tu(vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u8mf4x5_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_tu(vuint8mf4x5_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u8mf4x5_tu(vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u8mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_tu(vuint8mf2x5_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u8mf2x5_tu(vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u8m1x5_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_tu(vuint8m1x5_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u8m1x5_tu(vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u16mf4x5_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_tu(vuint16mf4x5_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u16mf4x5_tu(vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u16mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_tu(vuint16mf2x5_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u16mf2x5_tu(vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u16m1x5_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_tu(vuint16m1x5_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u16m1x5_tu(vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u32mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_tu(vuint32mf2x5_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u32mf2x5_tu(vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u32m1x5_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_tu(vuint32m1x5_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u32m1x5_tu(vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u64m1x5_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_tu(vuint64m1x5_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u64m1x5_tu(vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_f16mf4x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_tum(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_f16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_f16mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_tum(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_f16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_f16m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_tum(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_f16m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_f32mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_tum(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_f32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_f32m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_tum(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_f32m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_f64m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_tum(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_f64m1x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i8mf8x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_tum(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i8mf8x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i8mf4x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_tum(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i8mf4x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i8mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_tum(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i8mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i8m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_tum(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i8m1x5_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i16mf4x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_tum(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i16mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_tum(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i16m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_tum(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i16m1x5_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i32mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_tum(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i32m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_tum(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i32m1x5_tum(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i64m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_tum(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i64m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u8mf8x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_tum(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u8mf8x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u8mf4x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_tum(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u8mf4x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u8mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_tum(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u8mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u8m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_tum(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u8m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u16mf4x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_tum(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u16mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_tum(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u16m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_tum(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u16m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u32mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_tum(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u32m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_tum(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u32m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u64m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_tum(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u64m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_f16mf4x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_tumu(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_f16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_f16mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_tumu(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_f16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_f16m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_tumu(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_f16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_f32mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_tumu(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_f32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_f32m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_tumu(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_f32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_f64m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_tumu(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_f64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i8mf8x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_tumu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i8mf8x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i8mf4x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_tumu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i8mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i8mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_tumu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i8mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i8m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_tumu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i8m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i16mf4x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_tumu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i16mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_tumu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i16m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_tumu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i32mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_tumu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i32m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_tumu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i64m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_tumu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u8mf8x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_tumu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u8mf8x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u8mf4x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_tumu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u8mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u8mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_tumu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u8mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u8m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_tumu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u8m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u16mf4x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_tumu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u16mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_tumu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u16m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_tumu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u32mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_tumu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u32m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_tumu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u64m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_tumu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_f16mf4x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_mu(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_f16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_f16mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_mu(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_f16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_f16m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_mu(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_f16m1x5_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_f32mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_mu(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_f32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_f32m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_mu(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_f32m1x5_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_f64m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_mu(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_f64m1x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i8mf8x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_mu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i8mf8x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i8mf4x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_mu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i8mf4x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i8mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_mu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i8mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i8m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_mu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i8m1x5_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i16mf4x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_mu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i16mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_mu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i16m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_mu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i16m1x5_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i32mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_mu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i32m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_mu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i32m1x5_mu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_i64m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_mu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_i64m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u8mf8x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_mu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u8mf8x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u8mf4x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_mu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u8mf4x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u8mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_mu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u8mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u8m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_mu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u8m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u16mf4x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_mu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u16mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_mu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u16m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_mu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u16m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u32mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_mu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u32m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_mu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u32m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_v_u64m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_mu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_v_u64m1x5_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg5ei32\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vluxseg5ei64.c b/auto-generated/policy_funcs/gnu-api-tests/vluxseg5ei64.c index b1624a730..e6c0e151e 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vluxseg5ei64.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vluxseg5ei64.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_f16mf4x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_tu(vfloat16mf4x5_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_f16mf4x5_tu(vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_f16mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_tu(vfloat16mf2x5_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_f16mf2x5_tu(vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_f16m1x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_tu(vfloat16m1x5_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_f16m1x5_tu(vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_f32mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_tu(vfloat32mf2x5_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_f32mf2x5_tu(vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_f32m1x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_tu(vfloat32m1x5_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_f32m1x5_tu(vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_f64m1x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_tu(vfloat64m1x5_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_f64m1x5_tu(vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i8mf8x5_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_tu(vint8mf8x5_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i8mf8x5_tu(vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i8mf4x5_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_tu(vint8mf4x5_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i8mf4x5_tu(vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i8mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_tu(vint8mf2x5_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i8mf2x5_tu(vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i8m1x5_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_tu(vint8m1x5_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i8m1x5_tu(vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i16mf4x5_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_tu(vint16mf4x5_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i16mf4x5_tu(vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i16mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_tu(vint16mf2x5_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i16mf2x5_tu(vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i16m1x5_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_tu(vint16m1x5_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i16m1x5_tu(vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i32mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_tu(vint32mf2x5_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i32mf2x5_tu(vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i32m1x5_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_tu(vint32m1x5_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i32m1x5_tu(vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i64m1x5_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_tu(vint64m1x5_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i64m1x5_tu(vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u8mf8x5_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_tu(vuint8mf8x5_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u8mf8x5_tu(vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u8mf4x5_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_tu(vuint8mf4x5_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u8mf4x5_tu(vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u8mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_tu(vuint8mf2x5_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u8mf2x5_tu(vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u8m1x5_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_tu(vuint8m1x5_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u8m1x5_tu(vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u16mf4x5_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_tu(vuint16mf4x5_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u16mf4x5_tu(vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u16mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_tu(vuint16mf2x5_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u16mf2x5_tu(vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u16m1x5_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_tu(vuint16m1x5_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u16m1x5_tu(vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u32mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_tu(vuint32mf2x5_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u32mf2x5_tu(vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u32m1x5_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_tu(vuint32m1x5_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u32m1x5_tu(vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u64m1x5_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_tu(vuint64m1x5_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u64m1x5_tu(vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_f16mf4x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_tum(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_f16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_f16mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_tum(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_f16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_f16m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_tum(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_f16m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_f32mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_tum(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_f32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_f32m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_tum(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_f32m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_f64m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_tum(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_f64m1x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i8mf8x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_tum(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i8mf8x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i8mf4x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_tum(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i8mf4x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i8mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_tum(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i8mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i8m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_tum(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i8m1x5_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i16mf4x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_tum(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i16mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_tum(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i16m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_tum(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i16m1x5_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i32mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_tum(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i32m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_tum(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i32m1x5_tum(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i64m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_tum(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i64m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u8mf8x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_tum(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u8mf8x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u8mf4x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_tum(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u8mf4x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u8mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_tum(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u8mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u8m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_tum(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u8m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u16mf4x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_tum(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u16mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_tum(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u16m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_tum(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u16m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u32mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_tum(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u32m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_tum(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u32m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u64m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_tum(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u64m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_f16mf4x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_tumu(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_f16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_f16mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_tumu(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_f16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_f16m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_tumu(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_f16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_f32mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_tumu(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_f32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_f32m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_tumu(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_f32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_f64m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_tumu(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_f64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i8mf8x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_tumu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i8mf8x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i8mf4x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_tumu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i8mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i8mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_tumu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i8mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i8m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_tumu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i8m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i16mf4x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_tumu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i16mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_tumu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i16m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_tumu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i32mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_tumu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i32m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_tumu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i64m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_tumu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u8mf8x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_tumu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u8mf8x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u8mf4x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_tumu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u8mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u8mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_tumu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u8mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u8m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_tumu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u8m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u16mf4x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_tumu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u16mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_tumu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u16m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_tumu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u32mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_tumu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u32m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_tumu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u64m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_tumu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_f16mf4x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_mu(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_f16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_f16mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_mu(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_f16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_f16m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_mu(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_f16m1x5_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_f32mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_mu(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_f32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_f32m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_mu(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_f32m1x5_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_f64m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_mu(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_f64m1x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i8mf8x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_mu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i8mf8x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i8mf4x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_mu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i8mf4x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i8mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_mu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i8mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i8m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_mu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i8m1x5_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i16mf4x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_mu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i16mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_mu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i16m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_mu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i16m1x5_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i32mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_mu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i32m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_mu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i32m1x5_mu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_i64m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_mu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_i64m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u8mf8x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_mu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u8mf8x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u8mf4x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_mu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u8mf4x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u8mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_mu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u8mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u8m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_mu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u8m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u16mf4x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_mu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u16mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_mu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u16m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_mu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u16m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u32mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_mu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u32m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_mu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u32m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_v_u64m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_mu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_v_u64m1x5_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg5ei64\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vluxseg5ei8.c b/auto-generated/policy_funcs/gnu-api-tests/vluxseg5ei8.c index dad693d86..34ea76358 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vluxseg5ei8.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vluxseg5ei8.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_f16mf4x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_tu(vfloat16mf4x5_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_f16mf4x5_tu(vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_f16mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_tu(vfloat16mf2x5_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_f16mf2x5_tu(vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_f16m1x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_tu(vfloat16m1x5_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_f16m1x5_tu(vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_f32mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_tu(vfloat32mf2x5_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_f32mf2x5_tu(vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_f32m1x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_tu(vfloat32m1x5_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_f32m1x5_tu(vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_f64m1x5_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_tu(vfloat64m1x5_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_f64m1x5_tu(vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i8mf8x5_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_tu(vint8mf8x5_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i8mf8x5_tu(vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i8mf4x5_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_tu(vint8mf4x5_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i8mf4x5_tu(vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i8mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_tu(vint8mf2x5_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i8mf2x5_tu(vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i8m1x5_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_tu(vint8m1x5_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i8m1x5_tu(vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i16mf4x5_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_tu(vint16mf4x5_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i16mf4x5_tu(vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i16mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_tu(vint16mf2x5_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i16mf2x5_tu(vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i16m1x5_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_tu(vint16m1x5_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i16m1x5_tu(vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i32mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_tu(vint32mf2x5_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i32mf2x5_tu(vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i32m1x5_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_tu(vint32m1x5_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i32m1x5_tu(vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i64m1x5_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_tu(vint64m1x5_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i64m1x5_tu(vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u8mf8x5_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_tu(vuint8mf8x5_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u8mf8x5_tu(vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u8mf4x5_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_tu(vuint8mf4x5_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u8mf4x5_tu(vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u8mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_tu(vuint8mf2x5_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u8mf2x5_tu(vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u8m1x5_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_tu(vuint8m1x5_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u8m1x5_tu(vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u16mf4x5_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_tu(vuint16mf4x5_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u16mf4x5_tu(vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u16mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_tu(vuint16mf2x5_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u16mf2x5_tu(vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u16m1x5_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_tu(vuint16m1x5_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u16m1x5_tu(vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u32mf2x5_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_tu(vuint32mf2x5_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u32mf2x5_tu(vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u32m1x5_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_tu(vuint32m1x5_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u32m1x5_tu(vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u64m1x5_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_tu(vuint64m1x5_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u64m1x5_tu(vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_f16mf4x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_tum(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_f16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_f16mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_tum(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_f16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_f16m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_tum(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_f16m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_f32mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_tum(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_f32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_f32m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_tum(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_f32m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_f64m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_tum(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_f64m1x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i8mf8x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_tum(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i8mf8x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i8mf4x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_tum(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i8mf4x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i8mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_tum(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i8mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i8m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_tum(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i8m1x5_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i16mf4x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_tum(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i16mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_tum(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i16m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_tum(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i16m1x5_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i32mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_tum(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i32m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_tum(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i32m1x5_tum(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i64m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_tum(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i64m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u8mf8x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_tum(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u8mf8x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u8mf4x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_tum(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u8mf4x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u8mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_tum(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u8mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u8m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_tum(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u8m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u16mf4x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_tum(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u16mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_tum(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u16m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_tum(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u16m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u32mf2x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_tum(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u32m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_tum(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u32m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u64m1x5_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_tum(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u64m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_f16mf4x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_tumu(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_f16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_f16mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_tumu(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_f16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_f16m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_tumu(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_f16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_f32mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_tumu(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_f32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_f32m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_tumu(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_f32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_f64m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_tumu(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_f64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i8mf8x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_tumu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i8mf8x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i8mf4x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_tumu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i8mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i8mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_tumu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i8mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i8m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_tumu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i8m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i16mf4x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_tumu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i16mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_tumu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i16m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_tumu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i32mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_tumu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i32m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_tumu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i64m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_tumu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u8mf8x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_tumu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u8mf8x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u8mf4x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_tumu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u8mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u8mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_tumu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u8mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u8m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_tumu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u8m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u16mf4x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_tumu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u16mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_tumu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u16m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_tumu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u32mf2x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_tumu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u32m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_tumu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u64m1x5_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_tumu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_f16mf4x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_mu(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_f16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_f16mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_mu(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_f16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_f16m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_mu(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_f16m1x5_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_f32mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_mu(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_f32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_f32m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_mu(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_f32m1x5_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_f64m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_mu(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_f64m1x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i8mf8x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_mu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i8mf8x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i8mf4x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_mu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i8mf4x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i8mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_mu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i8mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i8m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_mu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i8m1x5_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i16mf4x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_mu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i16mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_mu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i16m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_mu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i16m1x5_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i32mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_mu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i32m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_mu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i32m1x5_mu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_i64m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_mu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_i64m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u8mf8x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_mu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u8mf8x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u8mf4x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_mu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u8mf4x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u8mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_mu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u8mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u8m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_mu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u8m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u16mf4x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_mu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u16mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_mu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u16m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_mu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u16m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u32mf2x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_mu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u32m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_mu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u32m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_v_u64m1x5_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_mu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_v_u64m1x5_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg5ei8\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vluxseg6ei16.c b/auto-generated/policy_funcs/gnu-api-tests/vluxseg6ei16.c index 5a675733c..b7d06d653 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vluxseg6ei16.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vluxseg6ei16.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_f16mf4x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_tu(vfloat16mf4x6_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_f16mf4x6_tu(vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_f16mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_tu(vfloat16mf2x6_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_f16mf2x6_tu(vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_f16m1x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_tu(vfloat16m1x6_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_f16m1x6_tu(vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_f32mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_tu(vfloat32mf2x6_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_f32mf2x6_tu(vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_f32m1x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_tu(vfloat32m1x6_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_f32m1x6_tu(vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_f64m1x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_tu(vfloat64m1x6_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_f64m1x6_tu(vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i8mf8x6_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_tu(vint8mf8x6_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i8mf8x6_tu(vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i8mf4x6_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_tu(vint8mf4x6_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i8mf4x6_tu(vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i8mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_tu(vint8mf2x6_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i8mf2x6_tu(vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i8m1x6_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_tu(vint8m1x6_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i8m1x6_tu(vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i16mf4x6_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_tu(vint16mf4x6_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i16mf4x6_tu(vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i16mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_tu(vint16mf2x6_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i16mf2x6_tu(vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i16m1x6_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_tu(vint16m1x6_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i16m1x6_tu(vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i32mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_tu(vint32mf2x6_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i32mf2x6_tu(vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i32m1x6_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_tu(vint32m1x6_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i32m1x6_tu(vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i64m1x6_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_tu(vint64m1x6_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i64m1x6_tu(vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u8mf8x6_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_tu(vuint8mf8x6_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u8mf8x6_tu(vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u8mf4x6_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_tu(vuint8mf4x6_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u8mf4x6_tu(vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u8mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_tu(vuint8mf2x6_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u8mf2x6_tu(vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u8m1x6_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_tu(vuint8m1x6_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u8m1x6_tu(vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u16mf4x6_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_tu(vuint16mf4x6_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u16mf4x6_tu(vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u16mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_tu(vuint16mf2x6_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u16mf2x6_tu(vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u16m1x6_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_tu(vuint16m1x6_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u16m1x6_tu(vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u32mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_tu(vuint32mf2x6_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u32mf2x6_tu(vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u32m1x6_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_tu(vuint32m1x6_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u32m1x6_tu(vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u64m1x6_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_tu(vuint64m1x6_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u64m1x6_tu(vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_f16mf4x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_tum(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_f16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_f16mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_tum(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_f16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_f16m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_tum(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_f16m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_f32mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_tum(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_f32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_f32m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_tum(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_f32m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_f64m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_tum(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_f64m1x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i8mf8x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_tum(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i8mf8x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i8mf4x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_tum(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i8mf4x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i8mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_tum(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i8mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i8m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_tum(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i8m1x6_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i16mf4x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_tum(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i16mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_tum(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i16m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_tum(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i16m1x6_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i32mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_tum(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i32m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_tum(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i32m1x6_tum(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i64m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_tum(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i64m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u8mf8x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_tum(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u8mf8x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u8mf4x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_tum(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u8mf4x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u8mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_tum(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u8mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u8m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_tum(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u8m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u16mf4x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_tum(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u16mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_tum(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u16m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_tum(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u16m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u32mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_tum(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u32m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_tum(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u32m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u64m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_tum(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u64m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_f16mf4x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_tumu(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_f16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_f16mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_tumu(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_f16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_f16m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_tumu(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_f16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_f32mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_tumu(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_f32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_f32m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_tumu(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_f32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_f64m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_tumu(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_f64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i8mf8x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_tumu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i8mf8x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i8mf4x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_tumu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i8mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i8mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_tumu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i8mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i8m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_tumu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i8m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i16mf4x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_tumu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i16mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_tumu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i16m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_tumu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i32mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_tumu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i32m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_tumu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i64m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_tumu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u8mf8x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_tumu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u8mf8x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u8mf4x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_tumu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u8mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u8mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_tumu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u8mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u8m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_tumu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u8m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u16mf4x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_tumu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u16mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_tumu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u16m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_tumu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u32mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_tumu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u32m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_tumu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u64m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_tumu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_f16mf4x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_mu(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_f16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_f16mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_mu(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_f16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_f16m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_mu(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_f16m1x6_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_f32mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_mu(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_f32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_f32m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_mu(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_f32m1x6_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_f64m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_mu(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_f64m1x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i8mf8x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_mu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i8mf8x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i8mf4x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_mu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i8mf4x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i8mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_mu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i8mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i8m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_mu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i8m1x6_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i16mf4x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_mu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i16mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_mu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i16m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_mu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i16m1x6_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i32mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_mu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i32m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_mu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i32m1x6_mu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_i64m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_mu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_i64m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u8mf8x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_mu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u8mf8x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u8mf4x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_mu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u8mf4x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u8mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_mu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u8mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u8m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_mu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u8m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u16mf4x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_mu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u16mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_mu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u16m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_mu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u16m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u32mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_mu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u32m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_mu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u32m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_v_u64m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_mu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_u64m1x6_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg6ei16\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vluxseg6ei32.c b/auto-generated/policy_funcs/gnu-api-tests/vluxseg6ei32.c index c45ac92c7..bab6c1d2d 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vluxseg6ei32.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vluxseg6ei32.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_f16mf4x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_tu(vfloat16mf4x6_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_f16mf4x6_tu(vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_f16mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_tu(vfloat16mf2x6_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_f16mf2x6_tu(vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_f16m1x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_tu(vfloat16m1x6_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_f16m1x6_tu(vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_f32mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_tu(vfloat32mf2x6_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_f32mf2x6_tu(vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_f32m1x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_tu(vfloat32m1x6_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_f32m1x6_tu(vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_f64m1x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_tu(vfloat64m1x6_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_f64m1x6_tu(vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i8mf8x6_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_tu(vint8mf8x6_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i8mf8x6_tu(vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i8mf4x6_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_tu(vint8mf4x6_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i8mf4x6_tu(vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i8mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_tu(vint8mf2x6_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i8mf2x6_tu(vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i8m1x6_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_tu(vint8m1x6_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i8m1x6_tu(vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i16mf4x6_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_tu(vint16mf4x6_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i16mf4x6_tu(vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i16mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_tu(vint16mf2x6_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i16mf2x6_tu(vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i16m1x6_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_tu(vint16m1x6_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i16m1x6_tu(vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i32mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_tu(vint32mf2x6_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i32mf2x6_tu(vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i32m1x6_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_tu(vint32m1x6_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i32m1x6_tu(vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i64m1x6_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_tu(vint64m1x6_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i64m1x6_tu(vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u8mf8x6_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_tu(vuint8mf8x6_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u8mf8x6_tu(vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u8mf4x6_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_tu(vuint8mf4x6_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u8mf4x6_tu(vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u8mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_tu(vuint8mf2x6_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u8mf2x6_tu(vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u8m1x6_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_tu(vuint8m1x6_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u8m1x6_tu(vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u16mf4x6_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_tu(vuint16mf4x6_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u16mf4x6_tu(vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u16mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_tu(vuint16mf2x6_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u16mf2x6_tu(vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u16m1x6_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_tu(vuint16m1x6_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u16m1x6_tu(vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u32mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_tu(vuint32mf2x6_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u32mf2x6_tu(vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u32m1x6_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_tu(vuint32m1x6_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u32m1x6_tu(vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u64m1x6_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_tu(vuint64m1x6_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u64m1x6_tu(vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_f16mf4x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_tum(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_f16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_f16mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_tum(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_f16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_f16m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_tum(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_f16m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_f32mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_tum(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_f32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_f32m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_tum(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_f32m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_f64m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_tum(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_f64m1x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i8mf8x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_tum(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i8mf8x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i8mf4x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_tum(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i8mf4x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i8mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_tum(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i8mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i8m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_tum(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i8m1x6_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i16mf4x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_tum(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i16mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_tum(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i16m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_tum(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i16m1x6_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i32mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_tum(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i32m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_tum(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i32m1x6_tum(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i64m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_tum(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i64m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u8mf8x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_tum(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u8mf8x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u8mf4x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_tum(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u8mf4x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u8mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_tum(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u8mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u8m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_tum(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u8m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u16mf4x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_tum(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u16mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_tum(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u16m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_tum(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u16m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u32mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_tum(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u32m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_tum(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u32m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u64m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_tum(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u64m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_f16mf4x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_tumu(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_f16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_f16mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_tumu(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_f16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_f16m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_tumu(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_f16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_f32mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_tumu(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_f32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_f32m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_tumu(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_f32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_f64m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_tumu(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_f64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i8mf8x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_tumu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i8mf8x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i8mf4x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_tumu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i8mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i8mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_tumu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i8mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i8m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_tumu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i8m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i16mf4x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_tumu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i16mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_tumu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i16m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_tumu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i32mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_tumu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i32m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_tumu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i64m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_tumu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u8mf8x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_tumu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u8mf8x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u8mf4x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_tumu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u8mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u8mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_tumu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u8mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u8m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_tumu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u8m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u16mf4x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_tumu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u16mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_tumu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u16m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_tumu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u32mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_tumu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u32m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_tumu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u64m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_tumu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_f16mf4x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_mu(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_f16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_f16mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_mu(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_f16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_f16m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_mu(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_f16m1x6_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_f32mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_mu(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_f32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_f32m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_mu(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_f32m1x6_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_f64m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_mu(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_f64m1x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i8mf8x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_mu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i8mf8x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i8mf4x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_mu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i8mf4x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i8mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_mu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i8mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i8m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_mu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i8m1x6_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i16mf4x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_mu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i16mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_mu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i16m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_mu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i16m1x6_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i32mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_mu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i32m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_mu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i32m1x6_mu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_i64m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_mu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_i64m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u8mf8x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_mu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u8mf8x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u8mf4x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_mu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u8mf4x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u8mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_mu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u8mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u8m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_mu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u8m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u16mf4x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_mu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u16mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_mu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u16m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_mu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u16m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u32mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_mu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u32m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_mu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u32m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_v_u64m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_mu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_v_u64m1x6_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg6ei32\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vluxseg6ei64.c b/auto-generated/policy_funcs/gnu-api-tests/vluxseg6ei64.c index 7827ff2c8..61f38dc69 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vluxseg6ei64.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vluxseg6ei64.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_f16mf4x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_tu(vfloat16mf4x6_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_f16mf4x6_tu(vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_f16mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_tu(vfloat16mf2x6_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_f16mf2x6_tu(vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_f16m1x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_tu(vfloat16m1x6_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_f16m1x6_tu(vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_f32mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_tu(vfloat32mf2x6_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_f32mf2x6_tu(vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_f32m1x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_tu(vfloat32m1x6_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_f32m1x6_tu(vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_f64m1x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_tu(vfloat64m1x6_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_f64m1x6_tu(vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i8mf8x6_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_tu(vint8mf8x6_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i8mf8x6_tu(vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i8mf4x6_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_tu(vint8mf4x6_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i8mf4x6_tu(vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i8mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_tu(vint8mf2x6_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i8mf2x6_tu(vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i8m1x6_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_tu(vint8m1x6_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i8m1x6_tu(vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i16mf4x6_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_tu(vint16mf4x6_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i16mf4x6_tu(vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i16mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_tu(vint16mf2x6_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i16mf2x6_tu(vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i16m1x6_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_tu(vint16m1x6_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i16m1x6_tu(vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i32mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_tu(vint32mf2x6_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i32mf2x6_tu(vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i32m1x6_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_tu(vint32m1x6_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i32m1x6_tu(vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i64m1x6_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_tu(vint64m1x6_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i64m1x6_tu(vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u8mf8x6_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_tu(vuint8mf8x6_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u8mf8x6_tu(vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u8mf4x6_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_tu(vuint8mf4x6_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u8mf4x6_tu(vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u8mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_tu(vuint8mf2x6_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u8mf2x6_tu(vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u8m1x6_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_tu(vuint8m1x6_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u8m1x6_tu(vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u16mf4x6_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_tu(vuint16mf4x6_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u16mf4x6_tu(vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u16mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_tu(vuint16mf2x6_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u16mf2x6_tu(vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u16m1x6_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_tu(vuint16m1x6_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u16m1x6_tu(vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u32mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_tu(vuint32mf2x6_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u32mf2x6_tu(vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u32m1x6_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_tu(vuint32m1x6_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u32m1x6_tu(vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u64m1x6_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_tu(vuint64m1x6_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u64m1x6_tu(vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_f16mf4x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_tum(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_f16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_f16mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_tum(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_f16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_f16m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_tum(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_f16m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_f32mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_tum(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_f32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_f32m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_tum(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_f32m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_f64m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_tum(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_f64m1x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i8mf8x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_tum(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i8mf8x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i8mf4x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_tum(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i8mf4x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i8mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_tum(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i8mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i8m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_tum(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i8m1x6_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i16mf4x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_tum(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i16mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_tum(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i16m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_tum(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i16m1x6_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i32mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_tum(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i32m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_tum(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i32m1x6_tum(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i64m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_tum(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i64m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u8mf8x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_tum(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u8mf8x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u8mf4x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_tum(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u8mf4x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u8mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_tum(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u8mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u8m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_tum(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u8m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u16mf4x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_tum(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u16mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_tum(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u16m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_tum(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u16m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u32mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_tum(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u32m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_tum(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u32m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u64m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_tum(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u64m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_f16mf4x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_tumu(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_f16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_f16mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_tumu(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_f16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_f16m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_tumu(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_f16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_f32mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_tumu(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_f32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_f32m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_tumu(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_f32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_f64m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_tumu(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_f64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i8mf8x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_tumu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i8mf8x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i8mf4x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_tumu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i8mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i8mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_tumu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i8mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i8m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_tumu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i8m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i16mf4x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_tumu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i16mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_tumu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i16m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_tumu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i32mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_tumu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i32m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_tumu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i64m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_tumu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u8mf8x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_tumu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u8mf8x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u8mf4x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_tumu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u8mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u8mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_tumu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u8mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u8m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_tumu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u8m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u16mf4x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_tumu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u16mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_tumu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u16m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_tumu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u32mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_tumu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u32m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_tumu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u64m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_tumu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_f16mf4x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_mu(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_f16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_f16mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_mu(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_f16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_f16m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_mu(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_f16m1x6_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_f32mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_mu(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_f32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_f32m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_mu(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_f32m1x6_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_f64m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_mu(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_f64m1x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i8mf8x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_mu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i8mf8x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i8mf4x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_mu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i8mf4x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i8mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_mu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i8mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i8m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_mu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i8m1x6_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i16mf4x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_mu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i16mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_mu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i16m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_mu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i16m1x6_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i32mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_mu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i32m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_mu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i32m1x6_mu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_i64m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_mu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_i64m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u8mf8x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_mu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u8mf8x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u8mf4x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_mu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u8mf4x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u8mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_mu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u8mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u8m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_mu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u8m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u16mf4x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_mu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u16mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_mu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u16m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_mu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u16m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u32mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_mu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u32m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_mu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u32m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_v_u64m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_mu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_v_u64m1x6_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg6ei64\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vluxseg6ei8.c b/auto-generated/policy_funcs/gnu-api-tests/vluxseg6ei8.c index 87eb63ba9..3222ed656 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vluxseg6ei8.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vluxseg6ei8.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_f16mf4x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_tu(vfloat16mf4x6_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_f16mf4x6_tu(vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_f16mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_tu(vfloat16mf2x6_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_f16mf2x6_tu(vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_f16m1x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_tu(vfloat16m1x6_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_f16m1x6_tu(vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_f32mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_tu(vfloat32mf2x6_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_f32mf2x6_tu(vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_f32m1x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_tu(vfloat32m1x6_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_f32m1x6_tu(vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_f64m1x6_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_tu(vfloat64m1x6_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_f64m1x6_tu(vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i8mf8x6_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_tu(vint8mf8x6_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i8mf8x6_tu(vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i8mf4x6_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_tu(vint8mf4x6_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i8mf4x6_tu(vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i8mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_tu(vint8mf2x6_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i8mf2x6_tu(vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i8m1x6_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_tu(vint8m1x6_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i8m1x6_tu(vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i16mf4x6_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_tu(vint16mf4x6_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i16mf4x6_tu(vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i16mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_tu(vint16mf2x6_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i16mf2x6_tu(vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i16m1x6_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_tu(vint16m1x6_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i16m1x6_tu(vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i32mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_tu(vint32mf2x6_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i32mf2x6_tu(vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i32m1x6_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_tu(vint32m1x6_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i32m1x6_tu(vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i64m1x6_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_tu(vint64m1x6_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i64m1x6_tu(vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u8mf8x6_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_tu(vuint8mf8x6_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u8mf8x6_tu(vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u8mf4x6_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_tu(vuint8mf4x6_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u8mf4x6_tu(vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u8mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_tu(vuint8mf2x6_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u8mf2x6_tu(vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u8m1x6_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_tu(vuint8m1x6_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u8m1x6_tu(vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u16mf4x6_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_tu(vuint16mf4x6_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u16mf4x6_tu(vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u16mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_tu(vuint16mf2x6_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u16mf2x6_tu(vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u16m1x6_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_tu(vuint16m1x6_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u16m1x6_tu(vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u32mf2x6_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_tu(vuint32mf2x6_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u32mf2x6_tu(vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u32m1x6_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_tu(vuint32m1x6_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u32m1x6_tu(vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u64m1x6_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_tu(vuint64m1x6_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u64m1x6_tu(vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_f16mf4x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_tum(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_f16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_f16mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_tum(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_f16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_f16m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_tum(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_f16m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_f32mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_tum(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_f32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_f32m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_tum(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_f32m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_f64m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_tum(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_f64m1x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i8mf8x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_tum(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i8mf8x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i8mf4x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_tum(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i8mf4x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i8mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_tum(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i8mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i8m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_tum(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i8m1x6_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i16mf4x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_tum(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i16mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_tum(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i16m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_tum(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i16m1x6_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i32mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_tum(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i32m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_tum(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i32m1x6_tum(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i64m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_tum(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i64m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u8mf8x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_tum(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u8mf8x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u8mf4x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_tum(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u8mf4x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u8mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_tum(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u8mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u8m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_tum(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u8m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u16mf4x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_tum(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u16mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_tum(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u16m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_tum(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u16m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u32mf2x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_tum(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u32m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_tum(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u32m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u64m1x6_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_tum(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u64m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_f16mf4x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_tumu(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_f16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_f16mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_tumu(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_f16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_f16m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_tumu(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_f16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_f32mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_tumu(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_f32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_f32m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_tumu(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_f32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_f64m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_tumu(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_f64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i8mf8x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_tumu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i8mf8x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i8mf4x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_tumu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i8mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i8mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_tumu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i8mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i8m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_tumu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i8m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i16mf4x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_tumu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i16mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_tumu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i16m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_tumu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i32mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_tumu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i32m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_tumu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i64m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_tumu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u8mf8x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_tumu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u8mf8x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u8mf4x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_tumu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u8mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u8mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_tumu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u8mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u8m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_tumu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u8m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u16mf4x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_tumu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u16mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_tumu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u16m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_tumu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u32mf2x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_tumu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u32m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_tumu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u64m1x6_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_tumu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_f16mf4x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_mu(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_f16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_f16mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_mu(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_f16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_f16m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_mu(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_f16m1x6_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_f32mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_mu(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_f32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_f32m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_mu(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_f32m1x6_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_f64m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_mu(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_f64m1x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i8mf8x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_mu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i8mf8x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i8mf4x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_mu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i8mf4x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i8mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_mu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i8mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i8m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_mu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i8m1x6_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i16mf4x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_mu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i16mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_mu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i16m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_mu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i16m1x6_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i32mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_mu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i32m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_mu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i32m1x6_mu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_i64m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_mu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_i64m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u8mf8x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_mu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u8mf8x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u8mf4x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_mu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u8mf4x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u8mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_mu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u8mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u8m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_mu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u8m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u16mf4x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_mu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u16mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_mu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u16m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_mu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u16m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u32mf2x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_mu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u32m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_mu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u32m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_v_u64m1x6_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_mu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_v_u64m1x6_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg6ei8\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vluxseg7ei16.c b/auto-generated/policy_funcs/gnu-api-tests/vluxseg7ei16.c index 9046030a3..f1c7163d6 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vluxseg7ei16.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vluxseg7ei16.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_f16mf4x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_tu(vfloat16mf4x7_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_f16mf4x7_tu(vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_f16mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_tu(vfloat16mf2x7_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_f16mf2x7_tu(vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_f16m1x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_tu(vfloat16m1x7_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_f16m1x7_tu(vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_f32mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_tu(vfloat32mf2x7_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_f32mf2x7_tu(vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_f32m1x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_tu(vfloat32m1x7_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_f32m1x7_tu(vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_f64m1x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_tu(vfloat64m1x7_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_f64m1x7_tu(vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i8mf8x7_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_tu(vint8mf8x7_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i8mf8x7_tu(vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i8mf4x7_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_tu(vint8mf4x7_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i8mf4x7_tu(vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i8mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_tu(vint8mf2x7_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i8mf2x7_tu(vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i8m1x7_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_tu(vint8m1x7_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i8m1x7_tu(vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i16mf4x7_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_tu(vint16mf4x7_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i16mf4x7_tu(vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i16mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_tu(vint16mf2x7_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i16mf2x7_tu(vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i16m1x7_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_tu(vint16m1x7_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i16m1x7_tu(vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i32mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_tu(vint32mf2x7_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i32mf2x7_tu(vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i32m1x7_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_tu(vint32m1x7_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i32m1x7_tu(vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i64m1x7_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_tu(vint64m1x7_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i64m1x7_tu(vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u8mf8x7_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_tu(vuint8mf8x7_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u8mf8x7_tu(vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u8mf4x7_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_tu(vuint8mf4x7_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u8mf4x7_tu(vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u8mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_tu(vuint8mf2x7_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u8mf2x7_tu(vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u8m1x7_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_tu(vuint8m1x7_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u8m1x7_tu(vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u16mf4x7_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_tu(vuint16mf4x7_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u16mf4x7_tu(vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u16mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_tu(vuint16mf2x7_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u16mf2x7_tu(vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u16m1x7_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_tu(vuint16m1x7_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u16m1x7_tu(vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u32mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_tu(vuint32mf2x7_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u32mf2x7_tu(vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u32m1x7_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_tu(vuint32m1x7_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u32m1x7_tu(vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u64m1x7_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_tu(vuint64m1x7_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u64m1x7_tu(vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_f16mf4x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_tum(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_f16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_f16mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_tum(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_f16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_f16m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_tum(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_f16m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_f32mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_tum(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_f32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_f32m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_tum(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_f32m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_f64m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_tum(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_f64m1x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i8mf8x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_tum(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i8mf8x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i8mf4x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_tum(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i8mf4x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i8mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_tum(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i8mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i8m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_tum(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i8m1x7_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i16mf4x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_tum(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i16mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_tum(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i16m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_tum(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i16m1x7_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i32mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_tum(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i32m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_tum(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i32m1x7_tum(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i64m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_tum(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i64m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u8mf8x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_tum(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u8mf8x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u8mf4x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_tum(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u8mf4x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u8mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_tum(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u8mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u8m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_tum(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u8m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u16mf4x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_tum(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u16mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_tum(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u16m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_tum(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u16m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u32mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_tum(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u32m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_tum(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u32m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u64m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_tum(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u64m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_f16mf4x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_tumu(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_f16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_f16mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_tumu(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_f16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_f16m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_tumu(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_f16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_f32mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_tumu(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_f32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_f32m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_tumu(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_f32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_f64m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_tumu(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_f64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i8mf8x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_tumu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i8mf8x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i8mf4x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_tumu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i8mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i8mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_tumu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i8mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i8m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_tumu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i8m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i16mf4x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_tumu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i16mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_tumu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i16m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_tumu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i32mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_tumu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i32m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_tumu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i64m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_tumu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u8mf8x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_tumu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u8mf8x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u8mf4x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_tumu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u8mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u8mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_tumu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u8mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u8m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_tumu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u8m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u16mf4x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_tumu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u16mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_tumu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u16m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_tumu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u32mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_tumu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u32m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_tumu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u64m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_tumu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_f16mf4x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_mu(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_f16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_f16mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_mu(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_f16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_f16m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_mu(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_f16m1x7_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_f32mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_mu(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_f32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_f32m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_mu(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_f32m1x7_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_f64m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_mu(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_f64m1x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i8mf8x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_mu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i8mf8x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i8mf4x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_mu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i8mf4x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i8mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_mu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i8mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i8m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_mu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i8m1x7_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i16mf4x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_mu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i16mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_mu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i16m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_mu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i16m1x7_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i32mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_mu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i32m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_mu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i32m1x7_mu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_i64m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_mu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_i64m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u8mf8x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_mu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u8mf8x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u8mf4x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_mu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u8mf4x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u8mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_mu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u8mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u8m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_mu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u8m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u16mf4x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_mu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u16mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_mu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u16m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_mu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u16m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u32mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_mu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u32m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_mu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u32m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_v_u64m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_mu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_u64m1x7_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg7ei16\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vluxseg7ei32.c b/auto-generated/policy_funcs/gnu-api-tests/vluxseg7ei32.c index 894445b95..c37f4c942 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vluxseg7ei32.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vluxseg7ei32.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_f16mf4x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_tu(vfloat16mf4x7_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_f16mf4x7_tu(vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_f16mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_tu(vfloat16mf2x7_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_f16mf2x7_tu(vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_f16m1x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_tu(vfloat16m1x7_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_f16m1x7_tu(vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_f32mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_tu(vfloat32mf2x7_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_f32mf2x7_tu(vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_f32m1x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_tu(vfloat32m1x7_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_f32m1x7_tu(vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_f64m1x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_tu(vfloat64m1x7_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_f64m1x7_tu(vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i8mf8x7_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_tu(vint8mf8x7_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i8mf8x7_tu(vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i8mf4x7_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_tu(vint8mf4x7_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i8mf4x7_tu(vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i8mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_tu(vint8mf2x7_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i8mf2x7_tu(vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i8m1x7_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_tu(vint8m1x7_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i8m1x7_tu(vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i16mf4x7_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_tu(vint16mf4x7_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i16mf4x7_tu(vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i16mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_tu(vint16mf2x7_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i16mf2x7_tu(vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i16m1x7_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_tu(vint16m1x7_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i16m1x7_tu(vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i32mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_tu(vint32mf2x7_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i32mf2x7_tu(vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i32m1x7_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_tu(vint32m1x7_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i32m1x7_tu(vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i64m1x7_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_tu(vint64m1x7_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i64m1x7_tu(vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u8mf8x7_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_tu(vuint8mf8x7_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u8mf8x7_tu(vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u8mf4x7_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_tu(vuint8mf4x7_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u8mf4x7_tu(vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u8mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_tu(vuint8mf2x7_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u8mf2x7_tu(vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u8m1x7_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_tu(vuint8m1x7_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u8m1x7_tu(vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u16mf4x7_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_tu(vuint16mf4x7_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u16mf4x7_tu(vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u16mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_tu(vuint16mf2x7_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u16mf2x7_tu(vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u16m1x7_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_tu(vuint16m1x7_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u16m1x7_tu(vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u32mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_tu(vuint32mf2x7_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u32mf2x7_tu(vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u32m1x7_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_tu(vuint32m1x7_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u32m1x7_tu(vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u64m1x7_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_tu(vuint64m1x7_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u64m1x7_tu(vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_f16mf4x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_tum(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_f16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_f16mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_tum(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_f16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_f16m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_tum(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_f16m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_f32mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_tum(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_f32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_f32m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_tum(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_f32m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_f64m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_tum(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_f64m1x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i8mf8x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_tum(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i8mf8x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i8mf4x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_tum(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i8mf4x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i8mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_tum(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i8mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i8m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_tum(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i8m1x7_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i16mf4x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_tum(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i16mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_tum(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i16m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_tum(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i16m1x7_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i32mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_tum(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i32m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_tum(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i32m1x7_tum(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i64m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_tum(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i64m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u8mf8x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_tum(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u8mf8x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u8mf4x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_tum(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u8mf4x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u8mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_tum(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u8mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u8m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_tum(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u8m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u16mf4x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_tum(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u16mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_tum(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u16m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_tum(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u16m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u32mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_tum(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u32m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_tum(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u32m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u64m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_tum(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u64m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_f16mf4x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_tumu(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_f16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_f16mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_tumu(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_f16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_f16m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_tumu(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_f16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_f32mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_tumu(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_f32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_f32m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_tumu(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_f32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_f64m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_tumu(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_f64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i8mf8x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_tumu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i8mf8x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i8mf4x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_tumu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i8mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i8mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_tumu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i8mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i8m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_tumu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i8m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i16mf4x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_tumu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i16mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_tumu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i16m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_tumu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i32mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_tumu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i32m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_tumu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i64m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_tumu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u8mf8x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_tumu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u8mf8x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u8mf4x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_tumu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u8mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u8mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_tumu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u8mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u8m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_tumu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u8m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u16mf4x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_tumu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u16mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_tumu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u16m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_tumu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u32mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_tumu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u32m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_tumu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u64m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_tumu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_f16mf4x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_mu(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_f16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_f16mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_mu(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_f16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_f16m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_mu(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_f16m1x7_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_f32mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_mu(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_f32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_f32m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_mu(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_f32m1x7_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_f64m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_mu(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_f64m1x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i8mf8x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_mu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i8mf8x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i8mf4x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_mu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i8mf4x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i8mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_mu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i8mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i8m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_mu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i8m1x7_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i16mf4x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_mu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i16mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_mu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i16m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_mu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i16m1x7_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i32mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_mu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i32m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_mu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i32m1x7_mu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_i64m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_mu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_i64m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u8mf8x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_mu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u8mf8x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u8mf4x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_mu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u8mf4x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u8mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_mu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u8mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u8m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_mu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u8m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u16mf4x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_mu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u16mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_mu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u16m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_mu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u16m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u32mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_mu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u32m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_mu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u32m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_v_u64m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_mu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_v_u64m1x7_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg7ei32\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vluxseg7ei64.c b/auto-generated/policy_funcs/gnu-api-tests/vluxseg7ei64.c index 423825298..8fe8fb25f 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vluxseg7ei64.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vluxseg7ei64.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_f16mf4x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_tu(vfloat16mf4x7_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_f16mf4x7_tu(vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_f16mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_tu(vfloat16mf2x7_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_f16mf2x7_tu(vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_f16m1x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_tu(vfloat16m1x7_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_f16m1x7_tu(vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_f32mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_tu(vfloat32mf2x7_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_f32mf2x7_tu(vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_f32m1x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_tu(vfloat32m1x7_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_f32m1x7_tu(vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_f64m1x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_tu(vfloat64m1x7_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_f64m1x7_tu(vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i8mf8x7_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_tu(vint8mf8x7_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i8mf8x7_tu(vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i8mf4x7_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_tu(vint8mf4x7_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i8mf4x7_tu(vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i8mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_tu(vint8mf2x7_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i8mf2x7_tu(vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i8m1x7_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_tu(vint8m1x7_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i8m1x7_tu(vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i16mf4x7_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_tu(vint16mf4x7_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i16mf4x7_tu(vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i16mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_tu(vint16mf2x7_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i16mf2x7_tu(vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i16m1x7_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_tu(vint16m1x7_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i16m1x7_tu(vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i32mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_tu(vint32mf2x7_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i32mf2x7_tu(vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i32m1x7_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_tu(vint32m1x7_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i32m1x7_tu(vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i64m1x7_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_tu(vint64m1x7_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i64m1x7_tu(vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u8mf8x7_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_tu(vuint8mf8x7_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u8mf8x7_tu(vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u8mf4x7_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_tu(vuint8mf4x7_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u8mf4x7_tu(vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u8mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_tu(vuint8mf2x7_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u8mf2x7_tu(vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u8m1x7_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_tu(vuint8m1x7_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u8m1x7_tu(vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u16mf4x7_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_tu(vuint16mf4x7_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u16mf4x7_tu(vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u16mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_tu(vuint16mf2x7_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u16mf2x7_tu(vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u16m1x7_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_tu(vuint16m1x7_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u16m1x7_tu(vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u32mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_tu(vuint32mf2x7_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u32mf2x7_tu(vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u32m1x7_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_tu(vuint32m1x7_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u32m1x7_tu(vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u64m1x7_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_tu(vuint64m1x7_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u64m1x7_tu(vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_f16mf4x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_tum(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_f16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_f16mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_tum(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_f16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_f16m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_tum(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_f16m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_f32mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_tum(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_f32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_f32m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_tum(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_f32m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_f64m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_tum(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_f64m1x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i8mf8x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_tum(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i8mf8x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i8mf4x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_tum(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i8mf4x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i8mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_tum(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i8mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i8m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_tum(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i8m1x7_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i16mf4x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_tum(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i16mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_tum(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i16m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_tum(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i16m1x7_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i32mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_tum(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i32m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_tum(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i32m1x7_tum(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i64m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_tum(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i64m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u8mf8x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_tum(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u8mf8x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u8mf4x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_tum(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u8mf4x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u8mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_tum(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u8mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u8m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_tum(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u8m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u16mf4x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_tum(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u16mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_tum(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u16m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_tum(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u16m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u32mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_tum(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u32m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_tum(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u32m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u64m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_tum(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u64m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_f16mf4x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_tumu(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_f16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_f16mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_tumu(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_f16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_f16m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_tumu(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_f16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_f32mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_tumu(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_f32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_f32m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_tumu(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_f32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_f64m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_tumu(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_f64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i8mf8x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_tumu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i8mf8x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i8mf4x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_tumu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i8mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i8mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_tumu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i8mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i8m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_tumu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i8m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i16mf4x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_tumu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i16mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_tumu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i16m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_tumu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i32mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_tumu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i32m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_tumu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i64m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_tumu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u8mf8x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_tumu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u8mf8x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u8mf4x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_tumu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u8mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u8mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_tumu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u8mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u8m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_tumu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u8m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u16mf4x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_tumu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u16mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_tumu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u16m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_tumu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u32mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_tumu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u32m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_tumu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u64m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_tumu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_f16mf4x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_mu(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_f16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_f16mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_mu(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_f16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_f16m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_mu(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_f16m1x7_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_f32mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_mu(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_f32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_f32m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_mu(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_f32m1x7_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_f64m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_mu(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_f64m1x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i8mf8x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_mu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i8mf8x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i8mf4x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_mu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i8mf4x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i8mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_mu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i8mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i8m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_mu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i8m1x7_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i16mf4x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_mu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i16mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_mu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i16m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_mu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i16m1x7_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i32mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_mu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i32m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_mu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i32m1x7_mu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_i64m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_mu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_i64m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u8mf8x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_mu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u8mf8x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u8mf4x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_mu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u8mf4x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u8mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_mu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u8mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u8m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_mu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u8m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u16mf4x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_mu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u16mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_mu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u16m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_mu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u16m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u32mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_mu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u32m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_mu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u32m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_v_u64m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_mu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_v_u64m1x7_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg7ei64\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vluxseg7ei8.c b/auto-generated/policy_funcs/gnu-api-tests/vluxseg7ei8.c index 8d96aa9e9..72a335d1d 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vluxseg7ei8.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vluxseg7ei8.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_f16mf4x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_tu(vfloat16mf4x7_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_f16mf4x7_tu(vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_f16mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_tu(vfloat16mf2x7_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_f16mf2x7_tu(vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_f16m1x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_tu(vfloat16m1x7_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_f16m1x7_tu(vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_f32mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_tu(vfloat32mf2x7_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_f32mf2x7_tu(vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_f32m1x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_tu(vfloat32m1x7_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_f32m1x7_tu(vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_f64m1x7_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_tu(vfloat64m1x7_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_f64m1x7_tu(vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i8mf8x7_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_tu(vint8mf8x7_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i8mf8x7_tu(vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i8mf4x7_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_tu(vint8mf4x7_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i8mf4x7_tu(vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i8mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_tu(vint8mf2x7_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i8mf2x7_tu(vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i8m1x7_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_tu(vint8m1x7_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i8m1x7_tu(vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i16mf4x7_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_tu(vint16mf4x7_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i16mf4x7_tu(vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i16mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_tu(vint16mf2x7_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i16mf2x7_tu(vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i16m1x7_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_tu(vint16m1x7_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i16m1x7_tu(vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i32mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_tu(vint32mf2x7_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i32mf2x7_tu(vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i32m1x7_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_tu(vint32m1x7_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i32m1x7_tu(vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i64m1x7_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_tu(vint64m1x7_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i64m1x7_tu(vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u8mf8x7_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_tu(vuint8mf8x7_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u8mf8x7_tu(vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u8mf4x7_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_tu(vuint8mf4x7_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u8mf4x7_tu(vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u8mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_tu(vuint8mf2x7_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u8mf2x7_tu(vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u8m1x7_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_tu(vuint8m1x7_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u8m1x7_tu(vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u16mf4x7_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_tu(vuint16mf4x7_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u16mf4x7_tu(vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u16mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_tu(vuint16mf2x7_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u16mf2x7_tu(vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u16m1x7_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_tu(vuint16m1x7_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u16m1x7_tu(vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u32mf2x7_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_tu(vuint32mf2x7_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u32mf2x7_tu(vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u32m1x7_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_tu(vuint32m1x7_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u32m1x7_tu(vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u64m1x7_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_tu(vuint64m1x7_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u64m1x7_tu(vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_f16mf4x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_tum(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_f16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_f16mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_tum(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_f16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_f16m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_tum(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_f16m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_f32mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_tum(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_f32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_f32m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_tum(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_f32m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_f64m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_tum(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_f64m1x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i8mf8x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_tum(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i8mf8x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i8mf4x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_tum(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i8mf4x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i8mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_tum(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i8mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i8m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_tum(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i8m1x7_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i16mf4x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_tum(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i16mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_tum(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i16m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_tum(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i16m1x7_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i32mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_tum(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i32m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_tum(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i32m1x7_tum(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i64m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_tum(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i64m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u8mf8x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_tum(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u8mf8x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u8mf4x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_tum(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u8mf4x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u8mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_tum(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u8mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u8m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_tum(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u8m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u16mf4x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_tum(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u16mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_tum(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u16m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_tum(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u16m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u32mf2x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_tum(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u32m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_tum(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u32m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u64m1x7_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_tum(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u64m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_f16mf4x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_tumu(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_f16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_f16mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_tumu(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_f16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_f16m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_tumu(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_f16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_f32mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_tumu(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_f32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_f32m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_tumu(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_f32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_f64m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_tumu(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_f64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i8mf8x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_tumu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i8mf8x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i8mf4x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_tumu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i8mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i8mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_tumu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i8mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i8m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_tumu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i8m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i16mf4x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_tumu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i16mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_tumu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i16m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_tumu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i32mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_tumu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i32m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_tumu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i64m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_tumu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u8mf8x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_tumu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u8mf8x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u8mf4x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_tumu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u8mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u8mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_tumu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u8mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u8m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_tumu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u8m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u16mf4x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_tumu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u16mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_tumu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u16m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_tumu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u32mf2x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_tumu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u32m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_tumu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u64m1x7_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_tumu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_f16mf4x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_mu(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_f16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_f16mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_mu(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_f16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_f16m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_mu(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_f16m1x7_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_f32mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_mu(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_f32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_f32m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_mu(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_f32m1x7_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_f64m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_mu(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_f64m1x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i8mf8x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_mu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i8mf8x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i8mf4x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_mu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i8mf4x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i8mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_mu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i8mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i8m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_mu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i8m1x7_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i16mf4x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_mu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i16mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_mu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i16m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_mu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i16m1x7_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i32mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_mu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i32m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_mu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i32m1x7_mu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_i64m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_mu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_i64m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u8mf8x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_mu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u8mf8x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u8mf4x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_mu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u8mf4x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u8mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_mu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u8mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u8m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_mu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u8m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u16mf4x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_mu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u16mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_mu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u16m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_mu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u16m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u32mf2x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_mu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u32m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_mu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u32m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_v_u64m1x7_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_mu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_v_u64m1x7_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg7ei8\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vluxseg8ei16.c b/auto-generated/policy_funcs/gnu-api-tests/vluxseg8ei16.c index 7f04de31c..923db495f 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vluxseg8ei16.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vluxseg8ei16.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_f16mf4x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_tu(vfloat16mf4x8_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_f16mf4x8_tu(vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_f16mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_tu(vfloat16mf2x8_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_f16mf2x8_tu(vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_f16m1x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_tu(vfloat16m1x8_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_f16m1x8_tu(vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_f32mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_tu(vfloat32mf2x8_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_f32mf2x8_tu(vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_f32m1x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_tu(vfloat32m1x8_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_f32m1x8_tu(vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_f64m1x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_tu(vfloat64m1x8_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_f64m1x8_tu(vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i8mf8x8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_tu(vint8mf8x8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i8mf8x8_tu(vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i8mf4x8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_tu(vint8mf4x8_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i8mf4x8_tu(vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i8mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_tu(vint8mf2x8_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i8mf2x8_tu(vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i8m1x8_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_tu(vint8m1x8_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i8m1x8_tu(vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i16mf4x8_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_tu(vint16mf4x8_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i16mf4x8_tu(vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i16mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_tu(vint16mf2x8_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i16mf2x8_tu(vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i16m1x8_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_tu(vint16m1x8_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i16m1x8_tu(vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i32mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_tu(vint32mf2x8_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i32mf2x8_tu(vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i32m1x8_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_tu(vint32m1x8_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i32m1x8_tu(vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i64m1x8_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_tu(vint64m1x8_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i64m1x8_tu(vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u8mf8x8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_tu(vuint8mf8x8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u8mf8x8_tu(vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u8mf4x8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_tu(vuint8mf4x8_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u8mf4x8_tu(vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u8mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_tu(vuint8mf2x8_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u8mf2x8_tu(vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u8m1x8_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_tu(vuint8m1x8_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u8m1x8_tu(vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u16mf4x8_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_tu(vuint16mf4x8_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u16mf4x8_tu(vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u16mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_tu(vuint16mf2x8_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u16mf2x8_tu(vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u16m1x8_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_tu(vuint16m1x8_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u16m1x8_tu(vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u32mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_tu(vuint32mf2x8_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u32mf2x8_tu(vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u32m1x8_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_tu(vuint32m1x8_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u32m1x8_tu(vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u64m1x8_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_tu(vuint64m1x8_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u64m1x8_tu(vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_f16mf4x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_tum(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_f16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_f16mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_tum(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_f16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_f16m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_tum(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_f16m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_f32mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_tum(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_f32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_f32m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_tum(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_f32m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_f64m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_tum(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_f64m1x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i8mf8x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_tum(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i8mf8x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i8mf4x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_tum(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i8mf4x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i8mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_tum(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i8mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i8m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_tum(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i8m1x8_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i16mf4x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_tum(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i16mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_tum(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i16m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_tum(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i16m1x8_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i32mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_tum(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i32m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_tum(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i32m1x8_tum(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i64m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_tum(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i64m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u8mf8x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_tum(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u8mf8x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u8mf4x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_tum(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u8mf4x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u8mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_tum(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u8mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u8m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_tum(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u8m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u16mf4x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_tum(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u16mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_tum(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u16m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_tum(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u16m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u32mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_tum(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u32m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_tum(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u32m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u64m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_tum(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u64m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_f16mf4x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_tumu(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_f16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_f16mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_tumu(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_f16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_f16m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_tumu(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_f16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_f32mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_tumu(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_f32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_f32m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_tumu(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_f32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_f64m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_tumu(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_f64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i8mf8x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_tumu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i8mf8x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i8mf4x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_tumu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i8mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i8mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_tumu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i8mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i8m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_tumu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i8m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i16mf4x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_tumu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i16mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_tumu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i16m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_tumu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i32mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_tumu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i32m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_tumu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i64m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_tumu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u8mf8x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_tumu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u8mf8x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u8mf4x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_tumu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u8mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u8mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_tumu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u8mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u8m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_tumu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u8m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u16mf4x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_tumu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u16mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_tumu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u16m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_tumu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u32mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_tumu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u32m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_tumu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u64m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_tumu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_f16mf4x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_mu(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_f16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_f16mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_mu(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_f16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_f16m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_mu(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_f16m1x8_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_f32mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_mu(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_f32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_f32m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_mu(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_f32m1x8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_f64m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_mu(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_f64m1x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i8mf8x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_mu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i8mf8x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i8mf4x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_mu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i8mf4x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i8mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_mu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i8mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i8m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_mu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i8m1x8_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i16mf4x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_mu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i16mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_mu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i16m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_mu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i16m1x8_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i32mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_mu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i32m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_mu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i32m1x8_mu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_i64m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_mu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_i64m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u8mf8x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_mu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u8mf8x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u8mf4x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_mu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u8mf4x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u8mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_mu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u8mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u8m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_mu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u8m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u16mf4x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_mu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u16mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_mu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u16m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_mu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u16m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u32mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_mu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u32m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_mu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u32m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_v_u64m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_mu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_u64m1x8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg8ei16\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vluxseg8ei32.c b/auto-generated/policy_funcs/gnu-api-tests/vluxseg8ei32.c index dab87153c..64538b5d9 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vluxseg8ei32.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vluxseg8ei32.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_f16mf4x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_tu(vfloat16mf4x8_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_f16mf4x8_tu(vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_f16mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_tu(vfloat16mf2x8_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_f16mf2x8_tu(vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_f16m1x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_tu(vfloat16m1x8_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_f16m1x8_tu(vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_f32mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_tu(vfloat32mf2x8_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_f32mf2x8_tu(vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_f32m1x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_tu(vfloat32m1x8_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_f32m1x8_tu(vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_f64m1x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_tu(vfloat64m1x8_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_f64m1x8_tu(vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i8mf8x8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_tu(vint8mf8x8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i8mf8x8_tu(vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i8mf4x8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_tu(vint8mf4x8_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i8mf4x8_tu(vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i8mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_tu(vint8mf2x8_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i8mf2x8_tu(vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i8m1x8_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_tu(vint8m1x8_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i8m1x8_tu(vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i16mf4x8_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_tu(vint16mf4x8_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i16mf4x8_tu(vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i16mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_tu(vint16mf2x8_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i16mf2x8_tu(vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i16m1x8_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_tu(vint16m1x8_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i16m1x8_tu(vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i32mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_tu(vint32mf2x8_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i32mf2x8_tu(vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i32m1x8_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_tu(vint32m1x8_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i32m1x8_tu(vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i64m1x8_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_tu(vint64m1x8_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i64m1x8_tu(vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u8mf8x8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_tu(vuint8mf8x8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u8mf8x8_tu(vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u8mf4x8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_tu(vuint8mf4x8_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u8mf4x8_tu(vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u8mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_tu(vuint8mf2x8_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u8mf2x8_tu(vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u8m1x8_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_tu(vuint8m1x8_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u8m1x8_tu(vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u16mf4x8_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_tu(vuint16mf4x8_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u16mf4x8_tu(vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u16mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_tu(vuint16mf2x8_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u16mf2x8_tu(vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u16m1x8_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_tu(vuint16m1x8_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u16m1x8_tu(vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u32mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_tu(vuint32mf2x8_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u32mf2x8_tu(vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u32m1x8_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_tu(vuint32m1x8_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u32m1x8_tu(vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u64m1x8_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_tu(vuint64m1x8_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u64m1x8_tu(vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_f16mf4x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_tum(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_f16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_f16mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_tum(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_f16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_f16m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_tum(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_f16m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_f32mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_tum(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_f32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_f32m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_tum(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_f32m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_f64m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_tum(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_f64m1x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i8mf8x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_tum(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i8mf8x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i8mf4x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_tum(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i8mf4x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i8mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_tum(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i8mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i8m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_tum(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i8m1x8_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i16mf4x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_tum(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i16mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_tum(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i16m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_tum(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i16m1x8_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i32mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_tum(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i32m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_tum(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i32m1x8_tum(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i64m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_tum(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i64m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u8mf8x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_tum(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u8mf8x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u8mf4x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_tum(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u8mf4x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u8mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_tum(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u8mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u8m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_tum(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u8m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u16mf4x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_tum(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u16mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_tum(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u16m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_tum(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u16m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u32mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_tum(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u32m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_tum(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u32m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u64m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_tum(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u64m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_f16mf4x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_tumu(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_f16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_f16mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_tumu(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_f16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_f16m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_tumu(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_f16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_f32mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_tumu(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_f32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_f32m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_tumu(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_f32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_f64m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_tumu(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_f64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i8mf8x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_tumu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i8mf8x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i8mf4x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_tumu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i8mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i8mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_tumu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i8mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i8m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_tumu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i8m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i16mf4x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_tumu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i16mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_tumu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i16m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_tumu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i32mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_tumu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i32m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_tumu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i64m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_tumu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u8mf8x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_tumu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u8mf8x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u8mf4x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_tumu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u8mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u8mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_tumu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u8mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u8m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_tumu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u8m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u16mf4x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_tumu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u16mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_tumu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u16m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_tumu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u32mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_tumu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u32m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_tumu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u64m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_tumu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_f16mf4x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_mu(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_f16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_f16mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_mu(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_f16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_f16m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_mu(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_f16m1x8_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_f32mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_mu(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_f32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_f32m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_mu(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_f32m1x8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_f64m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_mu(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_f64m1x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i8mf8x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_mu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i8mf8x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i8mf4x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_mu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i8mf4x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i8mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_mu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i8mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i8m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_mu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i8m1x8_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i16mf4x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_mu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i16mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_mu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i16m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_mu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i16m1x8_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i32mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_mu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i32m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_mu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i32m1x8_mu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_i64m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_mu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_i64m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u8mf8x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_mu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u8mf8x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u8mf4x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_mu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u8mf4x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u8mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_mu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u8mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u8m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_mu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u8m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u16mf4x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_mu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u16mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_mu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u16m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_mu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u16m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u32mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_mu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u32m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_mu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u32m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_v_u64m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_mu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_v_u64m1x8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg8ei32\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vluxseg8ei64.c b/auto-generated/policy_funcs/gnu-api-tests/vluxseg8ei64.c index 00094ff2e..6ee34e6e6 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vluxseg8ei64.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vluxseg8ei64.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_f16mf4x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_tu(vfloat16mf4x8_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_f16mf4x8_tu(vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_f16mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_tu(vfloat16mf2x8_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_f16mf2x8_tu(vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_f16m1x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_tu(vfloat16m1x8_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_f16m1x8_tu(vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_f32mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_tu(vfloat32mf2x8_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_f32mf2x8_tu(vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_f32m1x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_tu(vfloat32m1x8_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_f32m1x8_tu(vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_f64m1x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_tu(vfloat64m1x8_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_f64m1x8_tu(vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i8mf8x8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_tu(vint8mf8x8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i8mf8x8_tu(vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i8mf4x8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_tu(vint8mf4x8_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i8mf4x8_tu(vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i8mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_tu(vint8mf2x8_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i8mf2x8_tu(vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i8m1x8_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_tu(vint8m1x8_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i8m1x8_tu(vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i16mf4x8_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_tu(vint16mf4x8_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i16mf4x8_tu(vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i16mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_tu(vint16mf2x8_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i16mf2x8_tu(vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i16m1x8_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_tu(vint16m1x8_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i16m1x8_tu(vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i32mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_tu(vint32mf2x8_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i32mf2x8_tu(vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i32m1x8_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_tu(vint32m1x8_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i32m1x8_tu(vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i64m1x8_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_tu(vint64m1x8_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i64m1x8_tu(vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u8mf8x8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_tu(vuint8mf8x8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u8mf8x8_tu(vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u8mf4x8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_tu(vuint8mf4x8_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u8mf4x8_tu(vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u8mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_tu(vuint8mf2x8_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u8mf2x8_tu(vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u8m1x8_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_tu(vuint8m1x8_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u8m1x8_tu(vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u16mf4x8_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_tu(vuint16mf4x8_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u16mf4x8_tu(vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u16mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_tu(vuint16mf2x8_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u16mf2x8_tu(vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u16m1x8_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_tu(vuint16m1x8_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u16m1x8_tu(vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u32mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_tu(vuint32mf2x8_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u32mf2x8_tu(vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u32m1x8_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_tu(vuint32m1x8_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u32m1x8_tu(vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u64m1x8_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_tu(vuint64m1x8_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u64m1x8_tu(vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_f16mf4x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_tum(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_f16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_f16mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_tum(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_f16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_f16m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_tum(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_f16m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_f32mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_tum(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_f32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_f32m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_tum(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_f32m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_f64m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_tum(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_f64m1x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i8mf8x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_tum(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i8mf8x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i8mf4x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_tum(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i8mf4x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i8mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_tum(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i8mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i8m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_tum(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i8m1x8_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i16mf4x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_tum(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i16mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_tum(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i16m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_tum(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i16m1x8_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i32mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_tum(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i32m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_tum(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i32m1x8_tum(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i64m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_tum(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i64m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u8mf8x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_tum(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u8mf8x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u8mf4x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_tum(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u8mf4x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u8mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_tum(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u8mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u8m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_tum(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u8m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u16mf4x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_tum(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u16mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_tum(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u16m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_tum(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u16m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u32mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_tum(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u32m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_tum(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u32m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u64m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_tum(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u64m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_f16mf4x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_tumu(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_f16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_f16mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_tumu(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_f16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_f16m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_tumu(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_f16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_f32mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_tumu(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_f32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_f32m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_tumu(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_f32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_f64m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_tumu(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_f64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i8mf8x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_tumu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i8mf8x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i8mf4x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_tumu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i8mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i8mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_tumu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i8mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i8m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_tumu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i8m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i16mf4x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_tumu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i16mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_tumu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i16m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_tumu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i32mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_tumu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i32m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_tumu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i64m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_tumu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u8mf8x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_tumu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u8mf8x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u8mf4x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_tumu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u8mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u8mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_tumu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u8mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u8m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_tumu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u8m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u16mf4x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_tumu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u16mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_tumu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u16m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_tumu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u32mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_tumu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u32m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_tumu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u64m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_tumu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_f16mf4x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_mu(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_f16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_f16mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_mu(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_f16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_f16m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_mu(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_f16m1x8_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_f32mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_mu(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_f32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_f32m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_mu(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_f32m1x8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_f64m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_mu(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_f64m1x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i8mf8x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_mu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i8mf8x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i8mf4x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_mu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i8mf4x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i8mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_mu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i8mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i8m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_mu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i8m1x8_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i16mf4x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_mu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i16mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_mu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i16m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_mu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i16m1x8_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i32mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_mu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i32m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_mu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i32m1x8_mu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_i64m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_mu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_i64m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u8mf8x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_mu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u8mf8x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u8mf4x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_mu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u8mf4x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u8mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_mu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u8mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u8m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_mu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u8m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u16mf4x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_mu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u16mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_mu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u16m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_mu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u16m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u32mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_mu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u32m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_mu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u32m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_v_u64m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_mu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_v_u64m1x8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg8ei64\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vluxseg8ei8.c b/auto-generated/policy_funcs/gnu-api-tests/vluxseg8ei8.c index d86f97c74..393078bfa 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vluxseg8ei8.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vluxseg8ei8.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_f16mf4x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_tu(vfloat16mf4x8_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_f16mf4x8_tu(vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_f16mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_tu(vfloat16mf2x8_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_f16mf2x8_tu(vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_f16m1x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_tu(vfloat16m1x8_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_f16m1x8_tu(vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_f32mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_tu(vfloat32mf2x8_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_f32mf2x8_tu(vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_f32m1x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_tu(vfloat32m1x8_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_f32m1x8_tu(vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_f64m1x8_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_tu(vfloat64m1x8_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_f64m1x8_tu(vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i8mf8x8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_tu(vint8mf8x8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i8mf8x8_tu(vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i8mf4x8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_tu(vint8mf4x8_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i8mf4x8_tu(vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i8mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_tu(vint8mf2x8_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i8mf2x8_tu(vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i8m1x8_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_tu(vint8m1x8_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i8m1x8_tu(vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i16mf4x8_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_tu(vint16mf4x8_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i16mf4x8_tu(vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i16mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_tu(vint16mf2x8_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i16mf2x8_tu(vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i16m1x8_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_tu(vint16m1x8_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i16m1x8_tu(vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i32mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_tu(vint32mf2x8_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i32mf2x8_tu(vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i32m1x8_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_tu(vint32m1x8_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i32m1x8_tu(vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i64m1x8_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_tu(vint64m1x8_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i64m1x8_tu(vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u8mf8x8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_tu(vuint8mf8x8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u8mf8x8_tu(vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u8mf4x8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_tu(vuint8mf4x8_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u8mf4x8_tu(vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u8mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_tu(vuint8mf2x8_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u8mf2x8_tu(vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u8m1x8_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_tu(vuint8m1x8_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u8m1x8_tu(vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u16mf4x8_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_tu(vuint16mf4x8_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u16mf4x8_tu(vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u16mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_tu(vuint16mf2x8_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u16mf2x8_tu(vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u16m1x8_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_tu(vuint16m1x8_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u16m1x8_tu(vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u32mf2x8_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_tu(vuint32mf2x8_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u32mf2x8_tu(vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u32m1x8_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_tu(vuint32m1x8_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u32m1x8_tu(vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u64m1x8_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_tu(vuint64m1x8_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u64m1x8_tu(vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_f16mf4x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_tum(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_f16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_f16mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_tum(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_f16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_f16m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_tum(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_f16m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_f32mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_tum(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_f32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_f32m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_tum(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_f32m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_f64m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_tum(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_f64m1x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i8mf8x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_tum(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i8mf8x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i8mf4x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_tum(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i8mf4x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i8mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_tum(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i8mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i8m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_tum(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i8m1x8_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i16mf4x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_tum(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i16mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_tum(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i16m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_tum(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i16m1x8_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i32mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_tum(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i32m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_tum(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i32m1x8_tum(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i64m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_tum(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i64m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u8mf8x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_tum(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u8mf8x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u8mf4x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_tum(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u8mf4x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u8mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_tum(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u8mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u8m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_tum(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u8m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u16mf4x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_tum(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u16mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_tum(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u16m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_tum(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u16m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u32mf2x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_tum(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u32m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_tum(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u32m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u64m1x8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_tum(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u64m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_f16mf4x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_tumu(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_f16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_f16mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_tumu(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_f16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_f16m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_tumu(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_f16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_f32mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_tumu(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_f32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_f32m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_tumu(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_f32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_f64m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_tumu(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_f64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i8mf8x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_tumu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i8mf8x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i8mf4x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_tumu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i8mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i8mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_tumu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i8mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i8m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_tumu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i8m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i16mf4x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_tumu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i16mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_tumu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i16m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_tumu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i32mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_tumu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i32m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_tumu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i64m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_tumu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u8mf8x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_tumu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u8mf8x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u8mf4x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_tumu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u8mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u8mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_tumu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u8mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u8m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_tumu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u8m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u16mf4x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_tumu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u16mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_tumu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u16m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_tumu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u32mf2x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_tumu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u32m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_tumu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u64m1x8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_tumu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_f16mf4x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_mu(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_f16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_f16mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_mu(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_f16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_f16m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_mu(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_f16m1x8_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_f32mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_mu(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_f32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_f32m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_mu(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_f32m1x8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_f64m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_mu(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_f64m1x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i8mf8x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_mu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i8mf8x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i8mf4x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_mu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i8mf4x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i8mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_mu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i8mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i8m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_mu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i8m1x8_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i16mf4x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_mu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i16mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_mu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i16m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_mu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i16m1x8_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i32mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_mu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i32m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_mu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i32m1x8_mu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_i64m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_mu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_i64m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u8mf8x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_mu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u8mf8x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u8mf4x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_mu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u8mf4x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u8mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_mu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u8mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u8m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_mu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u8m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u16mf4x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_mu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u16mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_mu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u16m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_mu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u16m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u32mf2x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_mu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u32m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_mu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u32m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_v_u64m1x8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_mu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_v_u64m1x8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg8ei8\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vmacc.c b/auto-generated/policy_funcs/gnu-api-tests/vmacc.c index 305d4730a..00dd00a9c 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vmacc.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vmacc.c @@ -358,1060 +358,1060 @@ vuint64m8_t test_vmacc_vx_u64m8_tu(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2 return __riscv_vmacc_vx_u64m8_tu(vd, rs1, vs2, vl); } -vint8mf8_t test_vmacc_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vmacc_vv_i8mf8_tum(mask, vd, vs1, vs2, vl); +vint8mf8_t test_vmacc_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vmacc_vv_i8mf8_tum(vm, vd, vs1, vs2, vl); } -vint8mf8_t test_vmacc_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vmacc_vx_i8mf8_tum(mask, vd, rs1, vs2, vl); +vint8mf8_t test_vmacc_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vmacc_vx_i8mf8_tum(vm, vd, rs1, vs2, vl); } -vint8mf4_t test_vmacc_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vmacc_vv_i8mf4_tum(mask, vd, vs1, vs2, vl); +vint8mf4_t test_vmacc_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vmacc_vv_i8mf4_tum(vm, vd, vs1, vs2, vl); } -vint8mf4_t test_vmacc_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vmacc_vx_i8mf4_tum(mask, vd, rs1, vs2, vl); +vint8mf4_t test_vmacc_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vmacc_vx_i8mf4_tum(vm, vd, rs1, vs2, vl); } -vint8mf2_t test_vmacc_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vmacc_vv_i8mf2_tum(mask, vd, vs1, vs2, vl); +vint8mf2_t test_vmacc_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vmacc_vv_i8mf2_tum(vm, vd, vs1, vs2, vl); } -vint8mf2_t test_vmacc_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vmacc_vx_i8mf2_tum(mask, vd, rs1, vs2, vl); +vint8mf2_t test_vmacc_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vmacc_vx_i8mf2_tum(vm, vd, rs1, vs2, vl); } -vint8m1_t test_vmacc_vv_i8m1_tum(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_vmacc_vv_i8m1_tum(mask, vd, vs1, vs2, vl); +vint8m1_t test_vmacc_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { + return __riscv_vmacc_vv_i8m1_tum(vm, vd, vs1, vs2, vl); } -vint8m1_t test_vmacc_vx_i8m1_tum(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vmacc_vx_i8m1_tum(mask, vd, rs1, vs2, vl); +vint8m1_t test_vmacc_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vmacc_vx_i8m1_tum(vm, vd, rs1, vs2, vl); } -vint8m2_t test_vmacc_vv_i8m2_tum(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return __riscv_vmacc_vv_i8m2_tum(mask, vd, vs1, vs2, vl); +vint8m2_t test_vmacc_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { + return __riscv_vmacc_vv_i8m2_tum(vm, vd, vs1, vs2, vl); } -vint8m2_t test_vmacc_vx_i8m2_tum(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vmacc_vx_i8m2_tum(mask, vd, rs1, vs2, vl); +vint8m2_t test_vmacc_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vmacc_vx_i8m2_tum(vm, vd, rs1, vs2, vl); } -vint8m4_t test_vmacc_vv_i8m4_tum(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return __riscv_vmacc_vv_i8m4_tum(mask, vd, vs1, vs2, vl); +vint8m4_t test_vmacc_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { + return __riscv_vmacc_vv_i8m4_tum(vm, vd, vs1, vs2, vl); } -vint8m4_t test_vmacc_vx_i8m4_tum(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vmacc_vx_i8m4_tum(mask, vd, rs1, vs2, vl); +vint8m4_t test_vmacc_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vmacc_vx_i8m4_tum(vm, vd, rs1, vs2, vl); } -vint8m8_t test_vmacc_vv_i8m8_tum(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return __riscv_vmacc_vv_i8m8_tum(mask, vd, vs1, vs2, vl); +vint8m8_t test_vmacc_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { + return __riscv_vmacc_vv_i8m8_tum(vm, vd, vs1, vs2, vl); } -vint8m8_t test_vmacc_vx_i8m8_tum(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return __riscv_vmacc_vx_i8m8_tum(mask, vd, rs1, vs2, vl); +vint8m8_t test_vmacc_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { + return __riscv_vmacc_vx_i8m8_tum(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vmacc_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vmacc_vv_i16mf4_tum(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vmacc_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vmacc_vv_i16mf4_tum(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vmacc_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vmacc_vx_i16mf4_tum(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vmacc_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vmacc_vx_i16mf4_tum(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vmacc_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vmacc_vv_i16mf2_tum(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vmacc_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vmacc_vv_i16mf2_tum(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vmacc_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vmacc_vx_i16mf2_tum(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vmacc_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vmacc_vx_i16mf2_tum(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vmacc_vv_i16m1_tum(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_vmacc_vv_i16m1_tum(mask, vd, vs1, vs2, vl); +vint16m1_t test_vmacc_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { + return __riscv_vmacc_vv_i16m1_tum(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vmacc_vx_i16m1_tum(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vmacc_vx_i16m1_tum(mask, vd, rs1, vs2, vl); +vint16m1_t test_vmacc_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vmacc_vx_i16m1_tum(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vmacc_vv_i16m2_tum(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return __riscv_vmacc_vv_i16m2_tum(mask, vd, vs1, vs2, vl); +vint16m2_t test_vmacc_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { + return __riscv_vmacc_vv_i16m2_tum(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vmacc_vx_i16m2_tum(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vmacc_vx_i16m2_tum(mask, vd, rs1, vs2, vl); +vint16m2_t test_vmacc_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vmacc_vx_i16m2_tum(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vmacc_vv_i16m4_tum(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return __riscv_vmacc_vv_i16m4_tum(mask, vd, vs1, vs2, vl); +vint16m4_t test_vmacc_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { + return __riscv_vmacc_vv_i16m4_tum(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vmacc_vx_i16m4_tum(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vmacc_vx_i16m4_tum(mask, vd, rs1, vs2, vl); +vint16m4_t test_vmacc_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vmacc_vx_i16m4_tum(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vmacc_vv_i16m8_tum(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return __riscv_vmacc_vv_i16m8_tum(mask, vd, vs1, vs2, vl); +vint16m8_t test_vmacc_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { + return __riscv_vmacc_vv_i16m8_tum(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vmacc_vx_i16m8_tum(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return __riscv_vmacc_vx_i16m8_tum(mask, vd, rs1, vs2, vl); +vint16m8_t test_vmacc_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { + return __riscv_vmacc_vx_i16m8_tum(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vmacc_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vmacc_vv_i32mf2_tum(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vmacc_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vmacc_vv_i32mf2_tum(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vmacc_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vmacc_vx_i32mf2_tum(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vmacc_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vmacc_vx_i32mf2_tum(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vmacc_vv_i32m1_tum(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_vmacc_vv_i32m1_tum(mask, vd, vs1, vs2, vl); +vint32m1_t test_vmacc_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { + return __riscv_vmacc_vv_i32m1_tum(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vmacc_vx_i32m1_tum(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vmacc_vx_i32m1_tum(mask, vd, rs1, vs2, vl); +vint32m1_t test_vmacc_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vmacc_vx_i32m1_tum(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vmacc_vv_i32m2_tum(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return __riscv_vmacc_vv_i32m2_tum(mask, vd, vs1, vs2, vl); +vint32m2_t test_vmacc_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { + return __riscv_vmacc_vv_i32m2_tum(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vmacc_vx_i32m2_tum(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vmacc_vx_i32m2_tum(mask, vd, rs1, vs2, vl); +vint32m2_t test_vmacc_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vmacc_vx_i32m2_tum(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vmacc_vv_i32m4_tum(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return __riscv_vmacc_vv_i32m4_tum(mask, vd, vs1, vs2, vl); +vint32m4_t test_vmacc_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { + return __riscv_vmacc_vv_i32m4_tum(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vmacc_vx_i32m4_tum(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vmacc_vx_i32m4_tum(mask, vd, rs1, vs2, vl); +vint32m4_t test_vmacc_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vmacc_vx_i32m4_tum(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vmacc_vv_i32m8_tum(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return __riscv_vmacc_vv_i32m8_tum(mask, vd, vs1, vs2, vl); +vint32m8_t test_vmacc_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { + return __riscv_vmacc_vv_i32m8_tum(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vmacc_vx_i32m8_tum(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return __riscv_vmacc_vx_i32m8_tum(mask, vd, rs1, vs2, vl); +vint32m8_t test_vmacc_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { + return __riscv_vmacc_vx_i32m8_tum(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vmacc_vv_i64m1_tum(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return __riscv_vmacc_vv_i64m1_tum(mask, vd, vs1, vs2, vl); +vint64m1_t test_vmacc_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { + return __riscv_vmacc_vv_i64m1_tum(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vmacc_vx_i64m1_tum(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return __riscv_vmacc_vx_i64m1_tum(mask, vd, rs1, vs2, vl); +vint64m1_t test_vmacc_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { + return __riscv_vmacc_vx_i64m1_tum(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vmacc_vv_i64m2_tum(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return __riscv_vmacc_vv_i64m2_tum(mask, vd, vs1, vs2, vl); +vint64m2_t test_vmacc_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { + return __riscv_vmacc_vv_i64m2_tum(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vmacc_vx_i64m2_tum(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return __riscv_vmacc_vx_i64m2_tum(mask, vd, rs1, vs2, vl); +vint64m2_t test_vmacc_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { + return __riscv_vmacc_vx_i64m2_tum(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vmacc_vv_i64m4_tum(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return __riscv_vmacc_vv_i64m4_tum(mask, vd, vs1, vs2, vl); +vint64m4_t test_vmacc_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { + return __riscv_vmacc_vv_i64m4_tum(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vmacc_vx_i64m4_tum(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return __riscv_vmacc_vx_i64m4_tum(mask, vd, rs1, vs2, vl); +vint64m4_t test_vmacc_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { + return __riscv_vmacc_vx_i64m4_tum(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vmacc_vv_i64m8_tum(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return __riscv_vmacc_vv_i64m8_tum(mask, vd, vs1, vs2, vl); +vint64m8_t test_vmacc_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { + return __riscv_vmacc_vv_i64m8_tum(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vmacc_vx_i64m8_tum(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return __riscv_vmacc_vx_i64m8_tum(mask, vd, rs1, vs2, vl); +vint64m8_t test_vmacc_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { + return __riscv_vmacc_vx_i64m8_tum(vm, vd, rs1, vs2, vl); } -vuint8mf8_t test_vmacc_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vmacc_vv_u8mf8_tum(mask, vd, vs1, vs2, vl); +vuint8mf8_t test_vmacc_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vmacc_vv_u8mf8_tum(vm, vd, vs1, vs2, vl); } -vuint8mf8_t test_vmacc_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vmacc_vx_u8mf8_tum(mask, vd, rs1, vs2, vl); +vuint8mf8_t test_vmacc_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vmacc_vx_u8mf8_tum(vm, vd, rs1, vs2, vl); } -vuint8mf4_t test_vmacc_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vmacc_vv_u8mf4_tum(mask, vd, vs1, vs2, vl); +vuint8mf4_t test_vmacc_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vmacc_vv_u8mf4_tum(vm, vd, vs1, vs2, vl); } -vuint8mf4_t test_vmacc_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vmacc_vx_u8mf4_tum(mask, vd, rs1, vs2, vl); +vuint8mf4_t test_vmacc_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vmacc_vx_u8mf4_tum(vm, vd, rs1, vs2, vl); } -vuint8mf2_t test_vmacc_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vmacc_vv_u8mf2_tum(mask, vd, vs1, vs2, vl); +vuint8mf2_t test_vmacc_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vmacc_vv_u8mf2_tum(vm, vd, vs1, vs2, vl); } -vuint8mf2_t test_vmacc_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vmacc_vx_u8mf2_tum(mask, vd, rs1, vs2, vl); +vuint8mf2_t test_vmacc_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vmacc_vx_u8mf2_tum(vm, vd, rs1, vs2, vl); } -vuint8m1_t test_vmacc_vv_u8m1_tum(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vmacc_vv_u8m1_tum(mask, vd, vs1, vs2, vl); +vuint8m1_t test_vmacc_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vmacc_vv_u8m1_tum(vm, vd, vs1, vs2, vl); } -vuint8m1_t test_vmacc_vx_u8m1_tum(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vmacc_vx_u8m1_tum(mask, vd, rs1, vs2, vl); +vuint8m1_t test_vmacc_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vmacc_vx_u8m1_tum(vm, vd, rs1, vs2, vl); } -vuint8m2_t test_vmacc_vv_u8m2_tum(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vmacc_vv_u8m2_tum(mask, vd, vs1, vs2, vl); +vuint8m2_t test_vmacc_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vmacc_vv_u8m2_tum(vm, vd, vs1, vs2, vl); } -vuint8m2_t test_vmacc_vx_u8m2_tum(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vmacc_vx_u8m2_tum(mask, vd, rs1, vs2, vl); +vuint8m2_t test_vmacc_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vmacc_vx_u8m2_tum(vm, vd, rs1, vs2, vl); } -vuint8m4_t test_vmacc_vv_u8m4_tum(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vmacc_vv_u8m4_tum(mask, vd, vs1, vs2, vl); +vuint8m4_t test_vmacc_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vmacc_vv_u8m4_tum(vm, vd, vs1, vs2, vl); } -vuint8m4_t test_vmacc_vx_u8m4_tum(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vmacc_vx_u8m4_tum(mask, vd, rs1, vs2, vl); +vuint8m4_t test_vmacc_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vmacc_vx_u8m4_tum(vm, vd, rs1, vs2, vl); } -vuint8m8_t test_vmacc_vv_u8m8_tum(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vmacc_vv_u8m8_tum(mask, vd, vs1, vs2, vl); +vuint8m8_t test_vmacc_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vmacc_vv_u8m8_tum(vm, vd, vs1, vs2, vl); } -vuint8m8_t test_vmacc_vx_u8m8_tum(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vmacc_vx_u8m8_tum(mask, vd, rs1, vs2, vl); +vuint8m8_t test_vmacc_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vmacc_vx_u8m8_tum(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vmacc_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vmacc_vv_u16mf4_tum(mask, vd, vs1, vs2, vl); +vuint16mf4_t test_vmacc_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vmacc_vv_u16mf4_tum(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vmacc_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vmacc_vx_u16mf4_tum(mask, vd, rs1, vs2, vl); +vuint16mf4_t test_vmacc_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vmacc_vx_u16mf4_tum(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vmacc_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vmacc_vv_u16mf2_tum(mask, vd, vs1, vs2, vl); +vuint16mf2_t test_vmacc_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vmacc_vv_u16mf2_tum(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vmacc_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vmacc_vx_u16mf2_tum(mask, vd, rs1, vs2, vl); +vuint16mf2_t test_vmacc_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vmacc_vx_u16mf2_tum(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vmacc_vv_u16m1_tum(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vmacc_vv_u16m1_tum(mask, vd, vs1, vs2, vl); +vuint16m1_t test_vmacc_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vmacc_vv_u16m1_tum(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vmacc_vx_u16m1_tum(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vmacc_vx_u16m1_tum(mask, vd, rs1, vs2, vl); +vuint16m1_t test_vmacc_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vmacc_vx_u16m1_tum(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vmacc_vv_u16m2_tum(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vmacc_vv_u16m2_tum(mask, vd, vs1, vs2, vl); +vuint16m2_t test_vmacc_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vmacc_vv_u16m2_tum(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vmacc_vx_u16m2_tum(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vmacc_vx_u16m2_tum(mask, vd, rs1, vs2, vl); +vuint16m2_t test_vmacc_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vmacc_vx_u16m2_tum(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vmacc_vv_u16m4_tum(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vmacc_vv_u16m4_tum(mask, vd, vs1, vs2, vl); +vuint16m4_t test_vmacc_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vmacc_vv_u16m4_tum(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vmacc_vx_u16m4_tum(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vmacc_vx_u16m4_tum(mask, vd, rs1, vs2, vl); +vuint16m4_t test_vmacc_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vmacc_vx_u16m4_tum(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vmacc_vv_u16m8_tum(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vmacc_vv_u16m8_tum(mask, vd, vs1, vs2, vl); +vuint16m8_t test_vmacc_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vmacc_vv_u16m8_tum(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vmacc_vx_u16m8_tum(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vmacc_vx_u16m8_tum(mask, vd, rs1, vs2, vl); +vuint16m8_t test_vmacc_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vmacc_vx_u16m8_tum(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vmacc_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vmacc_vv_u32mf2_tum(mask, vd, vs1, vs2, vl); +vuint32mf2_t test_vmacc_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vmacc_vv_u32mf2_tum(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vmacc_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vmacc_vx_u32mf2_tum(mask, vd, rs1, vs2, vl); +vuint32mf2_t test_vmacc_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vmacc_vx_u32mf2_tum(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vmacc_vv_u32m1_tum(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vmacc_vv_u32m1_tum(mask, vd, vs1, vs2, vl); +vuint32m1_t test_vmacc_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vmacc_vv_u32m1_tum(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vmacc_vx_u32m1_tum(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vmacc_vx_u32m1_tum(mask, vd, rs1, vs2, vl); +vuint32m1_t test_vmacc_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vmacc_vx_u32m1_tum(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vmacc_vv_u32m2_tum(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vmacc_vv_u32m2_tum(mask, vd, vs1, vs2, vl); +vuint32m2_t test_vmacc_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vmacc_vv_u32m2_tum(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vmacc_vx_u32m2_tum(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vmacc_vx_u32m2_tum(mask, vd, rs1, vs2, vl); +vuint32m2_t test_vmacc_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vmacc_vx_u32m2_tum(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vmacc_vv_u32m4_tum(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vmacc_vv_u32m4_tum(mask, vd, vs1, vs2, vl); +vuint32m4_t test_vmacc_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vmacc_vv_u32m4_tum(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vmacc_vx_u32m4_tum(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vmacc_vx_u32m4_tum(mask, vd, rs1, vs2, vl); +vuint32m4_t test_vmacc_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vmacc_vx_u32m4_tum(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vmacc_vv_u32m8_tum(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vmacc_vv_u32m8_tum(mask, vd, vs1, vs2, vl); +vuint32m8_t test_vmacc_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vmacc_vv_u32m8_tum(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vmacc_vx_u32m8_tum(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vmacc_vx_u32m8_tum(mask, vd, rs1, vs2, vl); +vuint32m8_t test_vmacc_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vmacc_vx_u32m8_tum(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vmacc_vv_u64m1_tum(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vmacc_vv_u64m1_tum(mask, vd, vs1, vs2, vl); +vuint64m1_t test_vmacc_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vmacc_vv_u64m1_tum(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vmacc_vx_u64m1_tum(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vmacc_vx_u64m1_tum(mask, vd, rs1, vs2, vl); +vuint64m1_t test_vmacc_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vmacc_vx_u64m1_tum(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vmacc_vv_u64m2_tum(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vmacc_vv_u64m2_tum(mask, vd, vs1, vs2, vl); +vuint64m2_t test_vmacc_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vmacc_vv_u64m2_tum(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vmacc_vx_u64m2_tum(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vmacc_vx_u64m2_tum(mask, vd, rs1, vs2, vl); +vuint64m2_t test_vmacc_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vmacc_vx_u64m2_tum(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vmacc_vv_u64m4_tum(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vmacc_vv_u64m4_tum(mask, vd, vs1, vs2, vl); +vuint64m4_t test_vmacc_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vmacc_vv_u64m4_tum(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vmacc_vx_u64m4_tum(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vmacc_vx_u64m4_tum(mask, vd, rs1, vs2, vl); +vuint64m4_t test_vmacc_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vmacc_vx_u64m4_tum(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vmacc_vv_u64m8_tum(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vmacc_vv_u64m8_tum(mask, vd, vs1, vs2, vl); +vuint64m8_t test_vmacc_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vmacc_vv_u64m8_tum(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vmacc_vx_u64m8_tum(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vmacc_vx_u64m8_tum(mask, vd, rs1, vs2, vl); +vuint64m8_t test_vmacc_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vmacc_vx_u64m8_tum(vm, vd, rs1, vs2, vl); } -vint8mf8_t test_vmacc_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vmacc_vv_i8mf8_tumu(mask, vd, vs1, vs2, vl); +vint8mf8_t test_vmacc_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vmacc_vv_i8mf8_tumu(vm, vd, vs1, vs2, vl); } -vint8mf8_t test_vmacc_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vmacc_vx_i8mf8_tumu(mask, vd, rs1, vs2, vl); +vint8mf8_t test_vmacc_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vmacc_vx_i8mf8_tumu(vm, vd, rs1, vs2, vl); } -vint8mf4_t test_vmacc_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vmacc_vv_i8mf4_tumu(mask, vd, vs1, vs2, vl); +vint8mf4_t test_vmacc_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vmacc_vv_i8mf4_tumu(vm, vd, vs1, vs2, vl); } -vint8mf4_t test_vmacc_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vmacc_vx_i8mf4_tumu(mask, vd, rs1, vs2, vl); +vint8mf4_t test_vmacc_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vmacc_vx_i8mf4_tumu(vm, vd, rs1, vs2, vl); } -vint8mf2_t test_vmacc_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vmacc_vv_i8mf2_tumu(mask, vd, vs1, vs2, vl); +vint8mf2_t test_vmacc_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vmacc_vv_i8mf2_tumu(vm, vd, vs1, vs2, vl); } -vint8mf2_t test_vmacc_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vmacc_vx_i8mf2_tumu(mask, vd, rs1, vs2, vl); +vint8mf2_t test_vmacc_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vmacc_vx_i8mf2_tumu(vm, vd, rs1, vs2, vl); } -vint8m1_t test_vmacc_vv_i8m1_tumu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_vmacc_vv_i8m1_tumu(mask, vd, vs1, vs2, vl); +vint8m1_t test_vmacc_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { + return __riscv_vmacc_vv_i8m1_tumu(vm, vd, vs1, vs2, vl); } -vint8m1_t test_vmacc_vx_i8m1_tumu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vmacc_vx_i8m1_tumu(mask, vd, rs1, vs2, vl); +vint8m1_t test_vmacc_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vmacc_vx_i8m1_tumu(vm, vd, rs1, vs2, vl); } -vint8m2_t test_vmacc_vv_i8m2_tumu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return __riscv_vmacc_vv_i8m2_tumu(mask, vd, vs1, vs2, vl); +vint8m2_t test_vmacc_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { + return __riscv_vmacc_vv_i8m2_tumu(vm, vd, vs1, vs2, vl); } -vint8m2_t test_vmacc_vx_i8m2_tumu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vmacc_vx_i8m2_tumu(mask, vd, rs1, vs2, vl); +vint8m2_t test_vmacc_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vmacc_vx_i8m2_tumu(vm, vd, rs1, vs2, vl); } -vint8m4_t test_vmacc_vv_i8m4_tumu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return __riscv_vmacc_vv_i8m4_tumu(mask, vd, vs1, vs2, vl); +vint8m4_t test_vmacc_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { + return __riscv_vmacc_vv_i8m4_tumu(vm, vd, vs1, vs2, vl); } -vint8m4_t test_vmacc_vx_i8m4_tumu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vmacc_vx_i8m4_tumu(mask, vd, rs1, vs2, vl); +vint8m4_t test_vmacc_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vmacc_vx_i8m4_tumu(vm, vd, rs1, vs2, vl); } -vint8m8_t test_vmacc_vv_i8m8_tumu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return __riscv_vmacc_vv_i8m8_tumu(mask, vd, vs1, vs2, vl); +vint8m8_t test_vmacc_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { + return __riscv_vmacc_vv_i8m8_tumu(vm, vd, vs1, vs2, vl); } -vint8m8_t test_vmacc_vx_i8m8_tumu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return __riscv_vmacc_vx_i8m8_tumu(mask, vd, rs1, vs2, vl); +vint8m8_t test_vmacc_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { + return __riscv_vmacc_vx_i8m8_tumu(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vmacc_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vmacc_vv_i16mf4_tumu(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vmacc_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vmacc_vv_i16mf4_tumu(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vmacc_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vmacc_vx_i16mf4_tumu(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vmacc_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vmacc_vx_i16mf4_tumu(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vmacc_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vmacc_vv_i16mf2_tumu(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vmacc_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vmacc_vv_i16mf2_tumu(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vmacc_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vmacc_vx_i16mf2_tumu(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vmacc_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vmacc_vx_i16mf2_tumu(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vmacc_vv_i16m1_tumu(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_vmacc_vv_i16m1_tumu(mask, vd, vs1, vs2, vl); +vint16m1_t test_vmacc_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { + return __riscv_vmacc_vv_i16m1_tumu(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vmacc_vx_i16m1_tumu(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vmacc_vx_i16m1_tumu(mask, vd, rs1, vs2, vl); +vint16m1_t test_vmacc_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vmacc_vx_i16m1_tumu(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vmacc_vv_i16m2_tumu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return __riscv_vmacc_vv_i16m2_tumu(mask, vd, vs1, vs2, vl); +vint16m2_t test_vmacc_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { + return __riscv_vmacc_vv_i16m2_tumu(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vmacc_vx_i16m2_tumu(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vmacc_vx_i16m2_tumu(mask, vd, rs1, vs2, vl); +vint16m2_t test_vmacc_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vmacc_vx_i16m2_tumu(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vmacc_vv_i16m4_tumu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return __riscv_vmacc_vv_i16m4_tumu(mask, vd, vs1, vs2, vl); +vint16m4_t test_vmacc_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { + return __riscv_vmacc_vv_i16m4_tumu(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vmacc_vx_i16m4_tumu(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vmacc_vx_i16m4_tumu(mask, vd, rs1, vs2, vl); +vint16m4_t test_vmacc_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vmacc_vx_i16m4_tumu(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vmacc_vv_i16m8_tumu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return __riscv_vmacc_vv_i16m8_tumu(mask, vd, vs1, vs2, vl); +vint16m8_t test_vmacc_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { + return __riscv_vmacc_vv_i16m8_tumu(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vmacc_vx_i16m8_tumu(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return __riscv_vmacc_vx_i16m8_tumu(mask, vd, rs1, vs2, vl); +vint16m8_t test_vmacc_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { + return __riscv_vmacc_vx_i16m8_tumu(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vmacc_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vmacc_vv_i32mf2_tumu(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vmacc_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vmacc_vv_i32mf2_tumu(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vmacc_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vmacc_vx_i32mf2_tumu(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vmacc_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vmacc_vx_i32mf2_tumu(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vmacc_vv_i32m1_tumu(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_vmacc_vv_i32m1_tumu(mask, vd, vs1, vs2, vl); +vint32m1_t test_vmacc_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { + return __riscv_vmacc_vv_i32m1_tumu(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vmacc_vx_i32m1_tumu(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vmacc_vx_i32m1_tumu(mask, vd, rs1, vs2, vl); +vint32m1_t test_vmacc_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vmacc_vx_i32m1_tumu(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vmacc_vv_i32m2_tumu(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return __riscv_vmacc_vv_i32m2_tumu(mask, vd, vs1, vs2, vl); +vint32m2_t test_vmacc_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { + return __riscv_vmacc_vv_i32m2_tumu(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vmacc_vx_i32m2_tumu(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vmacc_vx_i32m2_tumu(mask, vd, rs1, vs2, vl); +vint32m2_t test_vmacc_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vmacc_vx_i32m2_tumu(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vmacc_vv_i32m4_tumu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return __riscv_vmacc_vv_i32m4_tumu(mask, vd, vs1, vs2, vl); +vint32m4_t test_vmacc_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { + return __riscv_vmacc_vv_i32m4_tumu(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vmacc_vx_i32m4_tumu(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vmacc_vx_i32m4_tumu(mask, vd, rs1, vs2, vl); +vint32m4_t test_vmacc_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vmacc_vx_i32m4_tumu(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vmacc_vv_i32m8_tumu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return __riscv_vmacc_vv_i32m8_tumu(mask, vd, vs1, vs2, vl); +vint32m8_t test_vmacc_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { + return __riscv_vmacc_vv_i32m8_tumu(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vmacc_vx_i32m8_tumu(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return __riscv_vmacc_vx_i32m8_tumu(mask, vd, rs1, vs2, vl); +vint32m8_t test_vmacc_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { + return __riscv_vmacc_vx_i32m8_tumu(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vmacc_vv_i64m1_tumu(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return __riscv_vmacc_vv_i64m1_tumu(mask, vd, vs1, vs2, vl); +vint64m1_t test_vmacc_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { + return __riscv_vmacc_vv_i64m1_tumu(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vmacc_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return __riscv_vmacc_vx_i64m1_tumu(mask, vd, rs1, vs2, vl); +vint64m1_t test_vmacc_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { + return __riscv_vmacc_vx_i64m1_tumu(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vmacc_vv_i64m2_tumu(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return __riscv_vmacc_vv_i64m2_tumu(mask, vd, vs1, vs2, vl); +vint64m2_t test_vmacc_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { + return __riscv_vmacc_vv_i64m2_tumu(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vmacc_vx_i64m2_tumu(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return __riscv_vmacc_vx_i64m2_tumu(mask, vd, rs1, vs2, vl); +vint64m2_t test_vmacc_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { + return __riscv_vmacc_vx_i64m2_tumu(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vmacc_vv_i64m4_tumu(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return __riscv_vmacc_vv_i64m4_tumu(mask, vd, vs1, vs2, vl); +vint64m4_t test_vmacc_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { + return __riscv_vmacc_vv_i64m4_tumu(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vmacc_vx_i64m4_tumu(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return __riscv_vmacc_vx_i64m4_tumu(mask, vd, rs1, vs2, vl); +vint64m4_t test_vmacc_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { + return __riscv_vmacc_vx_i64m4_tumu(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vmacc_vv_i64m8_tumu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return __riscv_vmacc_vv_i64m8_tumu(mask, vd, vs1, vs2, vl); +vint64m8_t test_vmacc_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { + return __riscv_vmacc_vv_i64m8_tumu(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vmacc_vx_i64m8_tumu(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return __riscv_vmacc_vx_i64m8_tumu(mask, vd, rs1, vs2, vl); +vint64m8_t test_vmacc_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { + return __riscv_vmacc_vx_i64m8_tumu(vm, vd, rs1, vs2, vl); } -vuint8mf8_t test_vmacc_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vmacc_vv_u8mf8_tumu(mask, vd, vs1, vs2, vl); +vuint8mf8_t test_vmacc_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vmacc_vv_u8mf8_tumu(vm, vd, vs1, vs2, vl); } -vuint8mf8_t test_vmacc_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vmacc_vx_u8mf8_tumu(mask, vd, rs1, vs2, vl); +vuint8mf8_t test_vmacc_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vmacc_vx_u8mf8_tumu(vm, vd, rs1, vs2, vl); } -vuint8mf4_t test_vmacc_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vmacc_vv_u8mf4_tumu(mask, vd, vs1, vs2, vl); +vuint8mf4_t test_vmacc_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vmacc_vv_u8mf4_tumu(vm, vd, vs1, vs2, vl); } -vuint8mf4_t test_vmacc_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vmacc_vx_u8mf4_tumu(mask, vd, rs1, vs2, vl); +vuint8mf4_t test_vmacc_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vmacc_vx_u8mf4_tumu(vm, vd, rs1, vs2, vl); } -vuint8mf2_t test_vmacc_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vmacc_vv_u8mf2_tumu(mask, vd, vs1, vs2, vl); +vuint8mf2_t test_vmacc_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vmacc_vv_u8mf2_tumu(vm, vd, vs1, vs2, vl); } -vuint8mf2_t test_vmacc_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vmacc_vx_u8mf2_tumu(mask, vd, rs1, vs2, vl); +vuint8mf2_t test_vmacc_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vmacc_vx_u8mf2_tumu(vm, vd, rs1, vs2, vl); } -vuint8m1_t test_vmacc_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vmacc_vv_u8m1_tumu(mask, vd, vs1, vs2, vl); +vuint8m1_t test_vmacc_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vmacc_vv_u8m1_tumu(vm, vd, vs1, vs2, vl); } -vuint8m1_t test_vmacc_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vmacc_vx_u8m1_tumu(mask, vd, rs1, vs2, vl); +vuint8m1_t test_vmacc_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vmacc_vx_u8m1_tumu(vm, vd, rs1, vs2, vl); } -vuint8m2_t test_vmacc_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vmacc_vv_u8m2_tumu(mask, vd, vs1, vs2, vl); +vuint8m2_t test_vmacc_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vmacc_vv_u8m2_tumu(vm, vd, vs1, vs2, vl); } -vuint8m2_t test_vmacc_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vmacc_vx_u8m2_tumu(mask, vd, rs1, vs2, vl); +vuint8m2_t test_vmacc_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vmacc_vx_u8m2_tumu(vm, vd, rs1, vs2, vl); } -vuint8m4_t test_vmacc_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vmacc_vv_u8m4_tumu(mask, vd, vs1, vs2, vl); +vuint8m4_t test_vmacc_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vmacc_vv_u8m4_tumu(vm, vd, vs1, vs2, vl); } -vuint8m4_t test_vmacc_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vmacc_vx_u8m4_tumu(mask, vd, rs1, vs2, vl); +vuint8m4_t test_vmacc_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vmacc_vx_u8m4_tumu(vm, vd, rs1, vs2, vl); } -vuint8m8_t test_vmacc_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vmacc_vv_u8m8_tumu(mask, vd, vs1, vs2, vl); +vuint8m8_t test_vmacc_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vmacc_vv_u8m8_tumu(vm, vd, vs1, vs2, vl); } -vuint8m8_t test_vmacc_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vmacc_vx_u8m8_tumu(mask, vd, rs1, vs2, vl); +vuint8m8_t test_vmacc_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vmacc_vx_u8m8_tumu(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vmacc_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vmacc_vv_u16mf4_tumu(mask, vd, vs1, vs2, vl); +vuint16mf4_t test_vmacc_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vmacc_vv_u16mf4_tumu(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vmacc_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vmacc_vx_u16mf4_tumu(mask, vd, rs1, vs2, vl); +vuint16mf4_t test_vmacc_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vmacc_vx_u16mf4_tumu(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vmacc_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vmacc_vv_u16mf2_tumu(mask, vd, vs1, vs2, vl); +vuint16mf2_t test_vmacc_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vmacc_vv_u16mf2_tumu(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vmacc_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vmacc_vx_u16mf2_tumu(mask, vd, rs1, vs2, vl); +vuint16mf2_t test_vmacc_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vmacc_vx_u16mf2_tumu(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vmacc_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vmacc_vv_u16m1_tumu(mask, vd, vs1, vs2, vl); +vuint16m1_t test_vmacc_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vmacc_vv_u16m1_tumu(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vmacc_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vmacc_vx_u16m1_tumu(mask, vd, rs1, vs2, vl); +vuint16m1_t test_vmacc_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vmacc_vx_u16m1_tumu(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vmacc_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vmacc_vv_u16m2_tumu(mask, vd, vs1, vs2, vl); +vuint16m2_t test_vmacc_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vmacc_vv_u16m2_tumu(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vmacc_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vmacc_vx_u16m2_tumu(mask, vd, rs1, vs2, vl); +vuint16m2_t test_vmacc_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vmacc_vx_u16m2_tumu(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vmacc_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vmacc_vv_u16m4_tumu(mask, vd, vs1, vs2, vl); +vuint16m4_t test_vmacc_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vmacc_vv_u16m4_tumu(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vmacc_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vmacc_vx_u16m4_tumu(mask, vd, rs1, vs2, vl); +vuint16m4_t test_vmacc_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vmacc_vx_u16m4_tumu(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vmacc_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vmacc_vv_u16m8_tumu(mask, vd, vs1, vs2, vl); +vuint16m8_t test_vmacc_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vmacc_vv_u16m8_tumu(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vmacc_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vmacc_vx_u16m8_tumu(mask, vd, rs1, vs2, vl); +vuint16m8_t test_vmacc_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vmacc_vx_u16m8_tumu(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vmacc_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vmacc_vv_u32mf2_tumu(mask, vd, vs1, vs2, vl); +vuint32mf2_t test_vmacc_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vmacc_vv_u32mf2_tumu(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vmacc_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vmacc_vx_u32mf2_tumu(mask, vd, rs1, vs2, vl); +vuint32mf2_t test_vmacc_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vmacc_vx_u32mf2_tumu(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vmacc_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vmacc_vv_u32m1_tumu(mask, vd, vs1, vs2, vl); +vuint32m1_t test_vmacc_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vmacc_vv_u32m1_tumu(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vmacc_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vmacc_vx_u32m1_tumu(mask, vd, rs1, vs2, vl); +vuint32m1_t test_vmacc_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vmacc_vx_u32m1_tumu(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vmacc_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vmacc_vv_u32m2_tumu(mask, vd, vs1, vs2, vl); +vuint32m2_t test_vmacc_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vmacc_vv_u32m2_tumu(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vmacc_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vmacc_vx_u32m2_tumu(mask, vd, rs1, vs2, vl); +vuint32m2_t test_vmacc_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vmacc_vx_u32m2_tumu(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vmacc_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vmacc_vv_u32m4_tumu(mask, vd, vs1, vs2, vl); +vuint32m4_t test_vmacc_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vmacc_vv_u32m4_tumu(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vmacc_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vmacc_vx_u32m4_tumu(mask, vd, rs1, vs2, vl); +vuint32m4_t test_vmacc_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vmacc_vx_u32m4_tumu(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vmacc_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vmacc_vv_u32m8_tumu(mask, vd, vs1, vs2, vl); +vuint32m8_t test_vmacc_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vmacc_vv_u32m8_tumu(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vmacc_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vmacc_vx_u32m8_tumu(mask, vd, rs1, vs2, vl); +vuint32m8_t test_vmacc_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vmacc_vx_u32m8_tumu(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vmacc_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vmacc_vv_u64m1_tumu(mask, vd, vs1, vs2, vl); +vuint64m1_t test_vmacc_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vmacc_vv_u64m1_tumu(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vmacc_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vmacc_vx_u64m1_tumu(mask, vd, rs1, vs2, vl); +vuint64m1_t test_vmacc_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vmacc_vx_u64m1_tumu(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vmacc_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vmacc_vv_u64m2_tumu(mask, vd, vs1, vs2, vl); +vuint64m2_t test_vmacc_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vmacc_vv_u64m2_tumu(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vmacc_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vmacc_vx_u64m2_tumu(mask, vd, rs1, vs2, vl); +vuint64m2_t test_vmacc_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vmacc_vx_u64m2_tumu(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vmacc_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vmacc_vv_u64m4_tumu(mask, vd, vs1, vs2, vl); +vuint64m4_t test_vmacc_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vmacc_vv_u64m4_tumu(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vmacc_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vmacc_vx_u64m4_tumu(mask, vd, rs1, vs2, vl); +vuint64m4_t test_vmacc_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vmacc_vx_u64m4_tumu(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vmacc_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vmacc_vv_u64m8_tumu(mask, vd, vs1, vs2, vl); +vuint64m8_t test_vmacc_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vmacc_vv_u64m8_tumu(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vmacc_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vmacc_vx_u64m8_tumu(mask, vd, rs1, vs2, vl); +vuint64m8_t test_vmacc_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vmacc_vx_u64m8_tumu(vm, vd, rs1, vs2, vl); } -vint8mf8_t test_vmacc_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vmacc_vv_i8mf8_mu(mask, vd, vs1, vs2, vl); +vint8mf8_t test_vmacc_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vmacc_vv_i8mf8_mu(vm, vd, vs1, vs2, vl); } -vint8mf8_t test_vmacc_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vmacc_vx_i8mf8_mu(mask, vd, rs1, vs2, vl); +vint8mf8_t test_vmacc_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vmacc_vx_i8mf8_mu(vm, vd, rs1, vs2, vl); } -vint8mf4_t test_vmacc_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vmacc_vv_i8mf4_mu(mask, vd, vs1, vs2, vl); +vint8mf4_t test_vmacc_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vmacc_vv_i8mf4_mu(vm, vd, vs1, vs2, vl); } -vint8mf4_t test_vmacc_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vmacc_vx_i8mf4_mu(mask, vd, rs1, vs2, vl); +vint8mf4_t test_vmacc_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vmacc_vx_i8mf4_mu(vm, vd, rs1, vs2, vl); } -vint8mf2_t test_vmacc_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vmacc_vv_i8mf2_mu(mask, vd, vs1, vs2, vl); +vint8mf2_t test_vmacc_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vmacc_vv_i8mf2_mu(vm, vd, vs1, vs2, vl); } -vint8mf2_t test_vmacc_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vmacc_vx_i8mf2_mu(mask, vd, rs1, vs2, vl); +vint8mf2_t test_vmacc_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vmacc_vx_i8mf2_mu(vm, vd, rs1, vs2, vl); } -vint8m1_t test_vmacc_vv_i8m1_mu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_vmacc_vv_i8m1_mu(mask, vd, vs1, vs2, vl); +vint8m1_t test_vmacc_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { + return __riscv_vmacc_vv_i8m1_mu(vm, vd, vs1, vs2, vl); } -vint8m1_t test_vmacc_vx_i8m1_mu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vmacc_vx_i8m1_mu(mask, vd, rs1, vs2, vl); +vint8m1_t test_vmacc_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vmacc_vx_i8m1_mu(vm, vd, rs1, vs2, vl); } -vint8m2_t test_vmacc_vv_i8m2_mu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return __riscv_vmacc_vv_i8m2_mu(mask, vd, vs1, vs2, vl); +vint8m2_t test_vmacc_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { + return __riscv_vmacc_vv_i8m2_mu(vm, vd, vs1, vs2, vl); } -vint8m2_t test_vmacc_vx_i8m2_mu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vmacc_vx_i8m2_mu(mask, vd, rs1, vs2, vl); +vint8m2_t test_vmacc_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vmacc_vx_i8m2_mu(vm, vd, rs1, vs2, vl); } -vint8m4_t test_vmacc_vv_i8m4_mu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return __riscv_vmacc_vv_i8m4_mu(mask, vd, vs1, vs2, vl); +vint8m4_t test_vmacc_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { + return __riscv_vmacc_vv_i8m4_mu(vm, vd, vs1, vs2, vl); } -vint8m4_t test_vmacc_vx_i8m4_mu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vmacc_vx_i8m4_mu(mask, vd, rs1, vs2, vl); +vint8m4_t test_vmacc_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vmacc_vx_i8m4_mu(vm, vd, rs1, vs2, vl); } -vint8m8_t test_vmacc_vv_i8m8_mu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return __riscv_vmacc_vv_i8m8_mu(mask, vd, vs1, vs2, vl); +vint8m8_t test_vmacc_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { + return __riscv_vmacc_vv_i8m8_mu(vm, vd, vs1, vs2, vl); } -vint8m8_t test_vmacc_vx_i8m8_mu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return __riscv_vmacc_vx_i8m8_mu(mask, vd, rs1, vs2, vl); +vint8m8_t test_vmacc_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { + return __riscv_vmacc_vx_i8m8_mu(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vmacc_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vmacc_vv_i16mf4_mu(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vmacc_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vmacc_vv_i16mf4_mu(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vmacc_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vmacc_vx_i16mf4_mu(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vmacc_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vmacc_vx_i16mf4_mu(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vmacc_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vmacc_vv_i16mf2_mu(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vmacc_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vmacc_vv_i16mf2_mu(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vmacc_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vmacc_vx_i16mf2_mu(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vmacc_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vmacc_vx_i16mf2_mu(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vmacc_vv_i16m1_mu(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_vmacc_vv_i16m1_mu(mask, vd, vs1, vs2, vl); +vint16m1_t test_vmacc_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { + return __riscv_vmacc_vv_i16m1_mu(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vmacc_vx_i16m1_mu(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vmacc_vx_i16m1_mu(mask, vd, rs1, vs2, vl); +vint16m1_t test_vmacc_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vmacc_vx_i16m1_mu(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vmacc_vv_i16m2_mu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return __riscv_vmacc_vv_i16m2_mu(mask, vd, vs1, vs2, vl); +vint16m2_t test_vmacc_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { + return __riscv_vmacc_vv_i16m2_mu(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vmacc_vx_i16m2_mu(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vmacc_vx_i16m2_mu(mask, vd, rs1, vs2, vl); +vint16m2_t test_vmacc_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vmacc_vx_i16m2_mu(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vmacc_vv_i16m4_mu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return __riscv_vmacc_vv_i16m4_mu(mask, vd, vs1, vs2, vl); +vint16m4_t test_vmacc_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { + return __riscv_vmacc_vv_i16m4_mu(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vmacc_vx_i16m4_mu(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vmacc_vx_i16m4_mu(mask, vd, rs1, vs2, vl); +vint16m4_t test_vmacc_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vmacc_vx_i16m4_mu(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vmacc_vv_i16m8_mu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return __riscv_vmacc_vv_i16m8_mu(mask, vd, vs1, vs2, vl); +vint16m8_t test_vmacc_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { + return __riscv_vmacc_vv_i16m8_mu(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vmacc_vx_i16m8_mu(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return __riscv_vmacc_vx_i16m8_mu(mask, vd, rs1, vs2, vl); +vint16m8_t test_vmacc_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { + return __riscv_vmacc_vx_i16m8_mu(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vmacc_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vmacc_vv_i32mf2_mu(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vmacc_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vmacc_vv_i32mf2_mu(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vmacc_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vmacc_vx_i32mf2_mu(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vmacc_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vmacc_vx_i32mf2_mu(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vmacc_vv_i32m1_mu(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_vmacc_vv_i32m1_mu(mask, vd, vs1, vs2, vl); +vint32m1_t test_vmacc_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { + return __riscv_vmacc_vv_i32m1_mu(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vmacc_vx_i32m1_mu(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vmacc_vx_i32m1_mu(mask, vd, rs1, vs2, vl); +vint32m1_t test_vmacc_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vmacc_vx_i32m1_mu(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vmacc_vv_i32m2_mu(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return __riscv_vmacc_vv_i32m2_mu(mask, vd, vs1, vs2, vl); +vint32m2_t test_vmacc_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { + return __riscv_vmacc_vv_i32m2_mu(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vmacc_vx_i32m2_mu(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vmacc_vx_i32m2_mu(mask, vd, rs1, vs2, vl); +vint32m2_t test_vmacc_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vmacc_vx_i32m2_mu(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vmacc_vv_i32m4_mu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return __riscv_vmacc_vv_i32m4_mu(mask, vd, vs1, vs2, vl); +vint32m4_t test_vmacc_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { + return __riscv_vmacc_vv_i32m4_mu(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vmacc_vx_i32m4_mu(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vmacc_vx_i32m4_mu(mask, vd, rs1, vs2, vl); +vint32m4_t test_vmacc_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vmacc_vx_i32m4_mu(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vmacc_vv_i32m8_mu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return __riscv_vmacc_vv_i32m8_mu(mask, vd, vs1, vs2, vl); +vint32m8_t test_vmacc_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { + return __riscv_vmacc_vv_i32m8_mu(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vmacc_vx_i32m8_mu(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return __riscv_vmacc_vx_i32m8_mu(mask, vd, rs1, vs2, vl); +vint32m8_t test_vmacc_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { + return __riscv_vmacc_vx_i32m8_mu(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vmacc_vv_i64m1_mu(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return __riscv_vmacc_vv_i64m1_mu(mask, vd, vs1, vs2, vl); +vint64m1_t test_vmacc_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { + return __riscv_vmacc_vv_i64m1_mu(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vmacc_vx_i64m1_mu(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return __riscv_vmacc_vx_i64m1_mu(mask, vd, rs1, vs2, vl); +vint64m1_t test_vmacc_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { + return __riscv_vmacc_vx_i64m1_mu(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vmacc_vv_i64m2_mu(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return __riscv_vmacc_vv_i64m2_mu(mask, vd, vs1, vs2, vl); +vint64m2_t test_vmacc_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { + return __riscv_vmacc_vv_i64m2_mu(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vmacc_vx_i64m2_mu(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return __riscv_vmacc_vx_i64m2_mu(mask, vd, rs1, vs2, vl); +vint64m2_t test_vmacc_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { + return __riscv_vmacc_vx_i64m2_mu(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vmacc_vv_i64m4_mu(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return __riscv_vmacc_vv_i64m4_mu(mask, vd, vs1, vs2, vl); +vint64m4_t test_vmacc_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { + return __riscv_vmacc_vv_i64m4_mu(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vmacc_vx_i64m4_mu(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return __riscv_vmacc_vx_i64m4_mu(mask, vd, rs1, vs2, vl); +vint64m4_t test_vmacc_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { + return __riscv_vmacc_vx_i64m4_mu(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vmacc_vv_i64m8_mu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return __riscv_vmacc_vv_i64m8_mu(mask, vd, vs1, vs2, vl); +vint64m8_t test_vmacc_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { + return __riscv_vmacc_vv_i64m8_mu(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vmacc_vx_i64m8_mu(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return __riscv_vmacc_vx_i64m8_mu(mask, vd, rs1, vs2, vl); +vint64m8_t test_vmacc_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { + return __riscv_vmacc_vx_i64m8_mu(vm, vd, rs1, vs2, vl); } -vuint8mf8_t test_vmacc_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vmacc_vv_u8mf8_mu(mask, vd, vs1, vs2, vl); +vuint8mf8_t test_vmacc_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vmacc_vv_u8mf8_mu(vm, vd, vs1, vs2, vl); } -vuint8mf8_t test_vmacc_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vmacc_vx_u8mf8_mu(mask, vd, rs1, vs2, vl); +vuint8mf8_t test_vmacc_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vmacc_vx_u8mf8_mu(vm, vd, rs1, vs2, vl); } -vuint8mf4_t test_vmacc_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vmacc_vv_u8mf4_mu(mask, vd, vs1, vs2, vl); +vuint8mf4_t test_vmacc_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vmacc_vv_u8mf4_mu(vm, vd, vs1, vs2, vl); } -vuint8mf4_t test_vmacc_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vmacc_vx_u8mf4_mu(mask, vd, rs1, vs2, vl); +vuint8mf4_t test_vmacc_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vmacc_vx_u8mf4_mu(vm, vd, rs1, vs2, vl); } -vuint8mf2_t test_vmacc_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vmacc_vv_u8mf2_mu(mask, vd, vs1, vs2, vl); +vuint8mf2_t test_vmacc_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vmacc_vv_u8mf2_mu(vm, vd, vs1, vs2, vl); } -vuint8mf2_t test_vmacc_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vmacc_vx_u8mf2_mu(mask, vd, rs1, vs2, vl); +vuint8mf2_t test_vmacc_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vmacc_vx_u8mf2_mu(vm, vd, rs1, vs2, vl); } -vuint8m1_t test_vmacc_vv_u8m1_mu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vmacc_vv_u8m1_mu(mask, vd, vs1, vs2, vl); +vuint8m1_t test_vmacc_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vmacc_vv_u8m1_mu(vm, vd, vs1, vs2, vl); } -vuint8m1_t test_vmacc_vx_u8m1_mu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vmacc_vx_u8m1_mu(mask, vd, rs1, vs2, vl); +vuint8m1_t test_vmacc_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vmacc_vx_u8m1_mu(vm, vd, rs1, vs2, vl); } -vuint8m2_t test_vmacc_vv_u8m2_mu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vmacc_vv_u8m2_mu(mask, vd, vs1, vs2, vl); +vuint8m2_t test_vmacc_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vmacc_vv_u8m2_mu(vm, vd, vs1, vs2, vl); } -vuint8m2_t test_vmacc_vx_u8m2_mu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vmacc_vx_u8m2_mu(mask, vd, rs1, vs2, vl); +vuint8m2_t test_vmacc_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vmacc_vx_u8m2_mu(vm, vd, rs1, vs2, vl); } -vuint8m4_t test_vmacc_vv_u8m4_mu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vmacc_vv_u8m4_mu(mask, vd, vs1, vs2, vl); +vuint8m4_t test_vmacc_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vmacc_vv_u8m4_mu(vm, vd, vs1, vs2, vl); } -vuint8m4_t test_vmacc_vx_u8m4_mu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vmacc_vx_u8m4_mu(mask, vd, rs1, vs2, vl); +vuint8m4_t test_vmacc_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vmacc_vx_u8m4_mu(vm, vd, rs1, vs2, vl); } -vuint8m8_t test_vmacc_vv_u8m8_mu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vmacc_vv_u8m8_mu(mask, vd, vs1, vs2, vl); +vuint8m8_t test_vmacc_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vmacc_vv_u8m8_mu(vm, vd, vs1, vs2, vl); } -vuint8m8_t test_vmacc_vx_u8m8_mu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vmacc_vx_u8m8_mu(mask, vd, rs1, vs2, vl); +vuint8m8_t test_vmacc_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vmacc_vx_u8m8_mu(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vmacc_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vmacc_vv_u16mf4_mu(mask, vd, vs1, vs2, vl); +vuint16mf4_t test_vmacc_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vmacc_vv_u16mf4_mu(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vmacc_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vmacc_vx_u16mf4_mu(mask, vd, rs1, vs2, vl); +vuint16mf4_t test_vmacc_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vmacc_vx_u16mf4_mu(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vmacc_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vmacc_vv_u16mf2_mu(mask, vd, vs1, vs2, vl); +vuint16mf2_t test_vmacc_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vmacc_vv_u16mf2_mu(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vmacc_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vmacc_vx_u16mf2_mu(mask, vd, rs1, vs2, vl); +vuint16mf2_t test_vmacc_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vmacc_vx_u16mf2_mu(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vmacc_vv_u16m1_mu(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vmacc_vv_u16m1_mu(mask, vd, vs1, vs2, vl); +vuint16m1_t test_vmacc_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vmacc_vv_u16m1_mu(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vmacc_vx_u16m1_mu(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vmacc_vx_u16m1_mu(mask, vd, rs1, vs2, vl); +vuint16m1_t test_vmacc_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vmacc_vx_u16m1_mu(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vmacc_vv_u16m2_mu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vmacc_vv_u16m2_mu(mask, vd, vs1, vs2, vl); +vuint16m2_t test_vmacc_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vmacc_vv_u16m2_mu(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vmacc_vx_u16m2_mu(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vmacc_vx_u16m2_mu(mask, vd, rs1, vs2, vl); +vuint16m2_t test_vmacc_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vmacc_vx_u16m2_mu(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vmacc_vv_u16m4_mu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vmacc_vv_u16m4_mu(mask, vd, vs1, vs2, vl); +vuint16m4_t test_vmacc_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vmacc_vv_u16m4_mu(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vmacc_vx_u16m4_mu(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vmacc_vx_u16m4_mu(mask, vd, rs1, vs2, vl); +vuint16m4_t test_vmacc_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vmacc_vx_u16m4_mu(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vmacc_vv_u16m8_mu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vmacc_vv_u16m8_mu(mask, vd, vs1, vs2, vl); +vuint16m8_t test_vmacc_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vmacc_vv_u16m8_mu(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vmacc_vx_u16m8_mu(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vmacc_vx_u16m8_mu(mask, vd, rs1, vs2, vl); +vuint16m8_t test_vmacc_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vmacc_vx_u16m8_mu(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vmacc_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vmacc_vv_u32mf2_mu(mask, vd, vs1, vs2, vl); +vuint32mf2_t test_vmacc_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vmacc_vv_u32mf2_mu(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vmacc_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vmacc_vx_u32mf2_mu(mask, vd, rs1, vs2, vl); +vuint32mf2_t test_vmacc_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vmacc_vx_u32mf2_mu(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vmacc_vv_u32m1_mu(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vmacc_vv_u32m1_mu(mask, vd, vs1, vs2, vl); +vuint32m1_t test_vmacc_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vmacc_vv_u32m1_mu(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vmacc_vx_u32m1_mu(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vmacc_vx_u32m1_mu(mask, vd, rs1, vs2, vl); +vuint32m1_t test_vmacc_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vmacc_vx_u32m1_mu(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vmacc_vv_u32m2_mu(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vmacc_vv_u32m2_mu(mask, vd, vs1, vs2, vl); +vuint32m2_t test_vmacc_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vmacc_vv_u32m2_mu(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vmacc_vx_u32m2_mu(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vmacc_vx_u32m2_mu(mask, vd, rs1, vs2, vl); +vuint32m2_t test_vmacc_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vmacc_vx_u32m2_mu(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vmacc_vv_u32m4_mu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vmacc_vv_u32m4_mu(mask, vd, vs1, vs2, vl); +vuint32m4_t test_vmacc_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vmacc_vv_u32m4_mu(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vmacc_vx_u32m4_mu(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vmacc_vx_u32m4_mu(mask, vd, rs1, vs2, vl); +vuint32m4_t test_vmacc_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vmacc_vx_u32m4_mu(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vmacc_vv_u32m8_mu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vmacc_vv_u32m8_mu(mask, vd, vs1, vs2, vl); +vuint32m8_t test_vmacc_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vmacc_vv_u32m8_mu(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vmacc_vx_u32m8_mu(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vmacc_vx_u32m8_mu(mask, vd, rs1, vs2, vl); +vuint32m8_t test_vmacc_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vmacc_vx_u32m8_mu(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vmacc_vv_u64m1_mu(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vmacc_vv_u64m1_mu(mask, vd, vs1, vs2, vl); +vuint64m1_t test_vmacc_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vmacc_vv_u64m1_mu(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vmacc_vx_u64m1_mu(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vmacc_vx_u64m1_mu(mask, vd, rs1, vs2, vl); +vuint64m1_t test_vmacc_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vmacc_vx_u64m1_mu(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vmacc_vv_u64m2_mu(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vmacc_vv_u64m2_mu(mask, vd, vs1, vs2, vl); +vuint64m2_t test_vmacc_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vmacc_vv_u64m2_mu(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vmacc_vx_u64m2_mu(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vmacc_vx_u64m2_mu(mask, vd, rs1, vs2, vl); +vuint64m2_t test_vmacc_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vmacc_vx_u64m2_mu(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vmacc_vv_u64m4_mu(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vmacc_vv_u64m4_mu(mask, vd, vs1, vs2, vl); +vuint64m4_t test_vmacc_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vmacc_vv_u64m4_mu(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vmacc_vx_u64m4_mu(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vmacc_vx_u64m4_mu(mask, vd, rs1, vs2, vl); +vuint64m4_t test_vmacc_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vmacc_vx_u64m4_mu(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vmacc_vv_u64m8_mu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vmacc_vv_u64m8_mu(mask, vd, vs1, vs2, vl); +vuint64m8_t test_vmacc_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vmacc_vv_u64m8_mu(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vmacc_vx_u64m8_mu(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vmacc_vx_u64m8_mu(mask, vd, rs1, vs2, vl); +vuint64m8_t test_vmacc_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vmacc_vx_u64m8_mu(vm, vd, rs1, vs2, vl); } /* { dg-final { scan-assembler-times {vma[c-d][c-d]\.[ivxfswum.]+\s+} 352 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vmadd.c b/auto-generated/policy_funcs/gnu-api-tests/vmadd.c index 47d848288..130ae3480 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vmadd.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vmadd.c @@ -358,1060 +358,1060 @@ vuint64m8_t test_vmadd_vx_u64m8_tu(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2 return __riscv_vmadd_vx_u64m8_tu(vd, rs1, vs2, vl); } -vint8mf8_t test_vmadd_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vmadd_vv_i8mf8_tum(mask, vd, vs1, vs2, vl); +vint8mf8_t test_vmadd_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vmadd_vv_i8mf8_tum(vm, vd, vs1, vs2, vl); } -vint8mf8_t test_vmadd_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vmadd_vx_i8mf8_tum(mask, vd, rs1, vs2, vl); +vint8mf8_t test_vmadd_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vmadd_vx_i8mf8_tum(vm, vd, rs1, vs2, vl); } -vint8mf4_t test_vmadd_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vmadd_vv_i8mf4_tum(mask, vd, vs1, vs2, vl); +vint8mf4_t test_vmadd_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vmadd_vv_i8mf4_tum(vm, vd, vs1, vs2, vl); } -vint8mf4_t test_vmadd_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vmadd_vx_i8mf4_tum(mask, vd, rs1, vs2, vl); +vint8mf4_t test_vmadd_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vmadd_vx_i8mf4_tum(vm, vd, rs1, vs2, vl); } -vint8mf2_t test_vmadd_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vmadd_vv_i8mf2_tum(mask, vd, vs1, vs2, vl); +vint8mf2_t test_vmadd_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vmadd_vv_i8mf2_tum(vm, vd, vs1, vs2, vl); } -vint8mf2_t test_vmadd_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vmadd_vx_i8mf2_tum(mask, vd, rs1, vs2, vl); +vint8mf2_t test_vmadd_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vmadd_vx_i8mf2_tum(vm, vd, rs1, vs2, vl); } -vint8m1_t test_vmadd_vv_i8m1_tum(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_vmadd_vv_i8m1_tum(mask, vd, vs1, vs2, vl); +vint8m1_t test_vmadd_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { + return __riscv_vmadd_vv_i8m1_tum(vm, vd, vs1, vs2, vl); } -vint8m1_t test_vmadd_vx_i8m1_tum(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vmadd_vx_i8m1_tum(mask, vd, rs1, vs2, vl); +vint8m1_t test_vmadd_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vmadd_vx_i8m1_tum(vm, vd, rs1, vs2, vl); } -vint8m2_t test_vmadd_vv_i8m2_tum(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return __riscv_vmadd_vv_i8m2_tum(mask, vd, vs1, vs2, vl); +vint8m2_t test_vmadd_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { + return __riscv_vmadd_vv_i8m2_tum(vm, vd, vs1, vs2, vl); } -vint8m2_t test_vmadd_vx_i8m2_tum(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vmadd_vx_i8m2_tum(mask, vd, rs1, vs2, vl); +vint8m2_t test_vmadd_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vmadd_vx_i8m2_tum(vm, vd, rs1, vs2, vl); } -vint8m4_t test_vmadd_vv_i8m4_tum(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return __riscv_vmadd_vv_i8m4_tum(mask, vd, vs1, vs2, vl); +vint8m4_t test_vmadd_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { + return __riscv_vmadd_vv_i8m4_tum(vm, vd, vs1, vs2, vl); } -vint8m4_t test_vmadd_vx_i8m4_tum(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vmadd_vx_i8m4_tum(mask, vd, rs1, vs2, vl); +vint8m4_t test_vmadd_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vmadd_vx_i8m4_tum(vm, vd, rs1, vs2, vl); } -vint8m8_t test_vmadd_vv_i8m8_tum(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return __riscv_vmadd_vv_i8m8_tum(mask, vd, vs1, vs2, vl); +vint8m8_t test_vmadd_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { + return __riscv_vmadd_vv_i8m8_tum(vm, vd, vs1, vs2, vl); } -vint8m8_t test_vmadd_vx_i8m8_tum(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return __riscv_vmadd_vx_i8m8_tum(mask, vd, rs1, vs2, vl); +vint8m8_t test_vmadd_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { + return __riscv_vmadd_vx_i8m8_tum(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vmadd_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vmadd_vv_i16mf4_tum(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vmadd_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vmadd_vv_i16mf4_tum(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vmadd_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vmadd_vx_i16mf4_tum(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vmadd_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vmadd_vx_i16mf4_tum(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vmadd_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vmadd_vv_i16mf2_tum(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vmadd_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vmadd_vv_i16mf2_tum(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vmadd_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vmadd_vx_i16mf2_tum(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vmadd_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vmadd_vx_i16mf2_tum(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vmadd_vv_i16m1_tum(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_vmadd_vv_i16m1_tum(mask, vd, vs1, vs2, vl); +vint16m1_t test_vmadd_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { + return __riscv_vmadd_vv_i16m1_tum(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vmadd_vx_i16m1_tum(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vmadd_vx_i16m1_tum(mask, vd, rs1, vs2, vl); +vint16m1_t test_vmadd_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vmadd_vx_i16m1_tum(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vmadd_vv_i16m2_tum(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return __riscv_vmadd_vv_i16m2_tum(mask, vd, vs1, vs2, vl); +vint16m2_t test_vmadd_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { + return __riscv_vmadd_vv_i16m2_tum(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vmadd_vx_i16m2_tum(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vmadd_vx_i16m2_tum(mask, vd, rs1, vs2, vl); +vint16m2_t test_vmadd_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vmadd_vx_i16m2_tum(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vmadd_vv_i16m4_tum(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return __riscv_vmadd_vv_i16m4_tum(mask, vd, vs1, vs2, vl); +vint16m4_t test_vmadd_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { + return __riscv_vmadd_vv_i16m4_tum(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vmadd_vx_i16m4_tum(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vmadd_vx_i16m4_tum(mask, vd, rs1, vs2, vl); +vint16m4_t test_vmadd_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vmadd_vx_i16m4_tum(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vmadd_vv_i16m8_tum(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return __riscv_vmadd_vv_i16m8_tum(mask, vd, vs1, vs2, vl); +vint16m8_t test_vmadd_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { + return __riscv_vmadd_vv_i16m8_tum(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vmadd_vx_i16m8_tum(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return __riscv_vmadd_vx_i16m8_tum(mask, vd, rs1, vs2, vl); +vint16m8_t test_vmadd_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { + return __riscv_vmadd_vx_i16m8_tum(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vmadd_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vmadd_vv_i32mf2_tum(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vmadd_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vmadd_vv_i32mf2_tum(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vmadd_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vmadd_vx_i32mf2_tum(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vmadd_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vmadd_vx_i32mf2_tum(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vmadd_vv_i32m1_tum(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_vmadd_vv_i32m1_tum(mask, vd, vs1, vs2, vl); +vint32m1_t test_vmadd_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { + return __riscv_vmadd_vv_i32m1_tum(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vmadd_vx_i32m1_tum(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vmadd_vx_i32m1_tum(mask, vd, rs1, vs2, vl); +vint32m1_t test_vmadd_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vmadd_vx_i32m1_tum(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vmadd_vv_i32m2_tum(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return __riscv_vmadd_vv_i32m2_tum(mask, vd, vs1, vs2, vl); +vint32m2_t test_vmadd_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { + return __riscv_vmadd_vv_i32m2_tum(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vmadd_vx_i32m2_tum(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vmadd_vx_i32m2_tum(mask, vd, rs1, vs2, vl); +vint32m2_t test_vmadd_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vmadd_vx_i32m2_tum(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vmadd_vv_i32m4_tum(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return __riscv_vmadd_vv_i32m4_tum(mask, vd, vs1, vs2, vl); +vint32m4_t test_vmadd_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { + return __riscv_vmadd_vv_i32m4_tum(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vmadd_vx_i32m4_tum(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vmadd_vx_i32m4_tum(mask, vd, rs1, vs2, vl); +vint32m4_t test_vmadd_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vmadd_vx_i32m4_tum(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vmadd_vv_i32m8_tum(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return __riscv_vmadd_vv_i32m8_tum(mask, vd, vs1, vs2, vl); +vint32m8_t test_vmadd_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { + return __riscv_vmadd_vv_i32m8_tum(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vmadd_vx_i32m8_tum(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return __riscv_vmadd_vx_i32m8_tum(mask, vd, rs1, vs2, vl); +vint32m8_t test_vmadd_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { + return __riscv_vmadd_vx_i32m8_tum(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vmadd_vv_i64m1_tum(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return __riscv_vmadd_vv_i64m1_tum(mask, vd, vs1, vs2, vl); +vint64m1_t test_vmadd_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { + return __riscv_vmadd_vv_i64m1_tum(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vmadd_vx_i64m1_tum(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return __riscv_vmadd_vx_i64m1_tum(mask, vd, rs1, vs2, vl); +vint64m1_t test_vmadd_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { + return __riscv_vmadd_vx_i64m1_tum(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vmadd_vv_i64m2_tum(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return __riscv_vmadd_vv_i64m2_tum(mask, vd, vs1, vs2, vl); +vint64m2_t test_vmadd_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { + return __riscv_vmadd_vv_i64m2_tum(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vmadd_vx_i64m2_tum(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return __riscv_vmadd_vx_i64m2_tum(mask, vd, rs1, vs2, vl); +vint64m2_t test_vmadd_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { + return __riscv_vmadd_vx_i64m2_tum(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vmadd_vv_i64m4_tum(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return __riscv_vmadd_vv_i64m4_tum(mask, vd, vs1, vs2, vl); +vint64m4_t test_vmadd_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { + return __riscv_vmadd_vv_i64m4_tum(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vmadd_vx_i64m4_tum(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return __riscv_vmadd_vx_i64m4_tum(mask, vd, rs1, vs2, vl); +vint64m4_t test_vmadd_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { + return __riscv_vmadd_vx_i64m4_tum(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vmadd_vv_i64m8_tum(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return __riscv_vmadd_vv_i64m8_tum(mask, vd, vs1, vs2, vl); +vint64m8_t test_vmadd_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { + return __riscv_vmadd_vv_i64m8_tum(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vmadd_vx_i64m8_tum(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return __riscv_vmadd_vx_i64m8_tum(mask, vd, rs1, vs2, vl); +vint64m8_t test_vmadd_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { + return __riscv_vmadd_vx_i64m8_tum(vm, vd, rs1, vs2, vl); } -vuint8mf8_t test_vmadd_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vmadd_vv_u8mf8_tum(mask, vd, vs1, vs2, vl); +vuint8mf8_t test_vmadd_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vmadd_vv_u8mf8_tum(vm, vd, vs1, vs2, vl); } -vuint8mf8_t test_vmadd_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vmadd_vx_u8mf8_tum(mask, vd, rs1, vs2, vl); +vuint8mf8_t test_vmadd_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vmadd_vx_u8mf8_tum(vm, vd, rs1, vs2, vl); } -vuint8mf4_t test_vmadd_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vmadd_vv_u8mf4_tum(mask, vd, vs1, vs2, vl); +vuint8mf4_t test_vmadd_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vmadd_vv_u8mf4_tum(vm, vd, vs1, vs2, vl); } -vuint8mf4_t test_vmadd_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vmadd_vx_u8mf4_tum(mask, vd, rs1, vs2, vl); +vuint8mf4_t test_vmadd_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vmadd_vx_u8mf4_tum(vm, vd, rs1, vs2, vl); } -vuint8mf2_t test_vmadd_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vmadd_vv_u8mf2_tum(mask, vd, vs1, vs2, vl); +vuint8mf2_t test_vmadd_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vmadd_vv_u8mf2_tum(vm, vd, vs1, vs2, vl); } -vuint8mf2_t test_vmadd_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vmadd_vx_u8mf2_tum(mask, vd, rs1, vs2, vl); +vuint8mf2_t test_vmadd_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vmadd_vx_u8mf2_tum(vm, vd, rs1, vs2, vl); } -vuint8m1_t test_vmadd_vv_u8m1_tum(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vmadd_vv_u8m1_tum(mask, vd, vs1, vs2, vl); +vuint8m1_t test_vmadd_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vmadd_vv_u8m1_tum(vm, vd, vs1, vs2, vl); } -vuint8m1_t test_vmadd_vx_u8m1_tum(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vmadd_vx_u8m1_tum(mask, vd, rs1, vs2, vl); +vuint8m1_t test_vmadd_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vmadd_vx_u8m1_tum(vm, vd, rs1, vs2, vl); } -vuint8m2_t test_vmadd_vv_u8m2_tum(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vmadd_vv_u8m2_tum(mask, vd, vs1, vs2, vl); +vuint8m2_t test_vmadd_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vmadd_vv_u8m2_tum(vm, vd, vs1, vs2, vl); } -vuint8m2_t test_vmadd_vx_u8m2_tum(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vmadd_vx_u8m2_tum(mask, vd, rs1, vs2, vl); +vuint8m2_t test_vmadd_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vmadd_vx_u8m2_tum(vm, vd, rs1, vs2, vl); } -vuint8m4_t test_vmadd_vv_u8m4_tum(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vmadd_vv_u8m4_tum(mask, vd, vs1, vs2, vl); +vuint8m4_t test_vmadd_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vmadd_vv_u8m4_tum(vm, vd, vs1, vs2, vl); } -vuint8m4_t test_vmadd_vx_u8m4_tum(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vmadd_vx_u8m4_tum(mask, vd, rs1, vs2, vl); +vuint8m4_t test_vmadd_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vmadd_vx_u8m4_tum(vm, vd, rs1, vs2, vl); } -vuint8m8_t test_vmadd_vv_u8m8_tum(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vmadd_vv_u8m8_tum(mask, vd, vs1, vs2, vl); +vuint8m8_t test_vmadd_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vmadd_vv_u8m8_tum(vm, vd, vs1, vs2, vl); } -vuint8m8_t test_vmadd_vx_u8m8_tum(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vmadd_vx_u8m8_tum(mask, vd, rs1, vs2, vl); +vuint8m8_t test_vmadd_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vmadd_vx_u8m8_tum(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vmadd_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vmadd_vv_u16mf4_tum(mask, vd, vs1, vs2, vl); +vuint16mf4_t test_vmadd_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vmadd_vv_u16mf4_tum(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vmadd_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vmadd_vx_u16mf4_tum(mask, vd, rs1, vs2, vl); +vuint16mf4_t test_vmadd_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vmadd_vx_u16mf4_tum(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vmadd_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vmadd_vv_u16mf2_tum(mask, vd, vs1, vs2, vl); +vuint16mf2_t test_vmadd_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vmadd_vv_u16mf2_tum(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vmadd_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vmadd_vx_u16mf2_tum(mask, vd, rs1, vs2, vl); +vuint16mf2_t test_vmadd_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vmadd_vx_u16mf2_tum(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vmadd_vv_u16m1_tum(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vmadd_vv_u16m1_tum(mask, vd, vs1, vs2, vl); +vuint16m1_t test_vmadd_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vmadd_vv_u16m1_tum(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vmadd_vx_u16m1_tum(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vmadd_vx_u16m1_tum(mask, vd, rs1, vs2, vl); +vuint16m1_t test_vmadd_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vmadd_vx_u16m1_tum(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vmadd_vv_u16m2_tum(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vmadd_vv_u16m2_tum(mask, vd, vs1, vs2, vl); +vuint16m2_t test_vmadd_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vmadd_vv_u16m2_tum(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vmadd_vx_u16m2_tum(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vmadd_vx_u16m2_tum(mask, vd, rs1, vs2, vl); +vuint16m2_t test_vmadd_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vmadd_vx_u16m2_tum(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vmadd_vv_u16m4_tum(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vmadd_vv_u16m4_tum(mask, vd, vs1, vs2, vl); +vuint16m4_t test_vmadd_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vmadd_vv_u16m4_tum(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vmadd_vx_u16m4_tum(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vmadd_vx_u16m4_tum(mask, vd, rs1, vs2, vl); +vuint16m4_t test_vmadd_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vmadd_vx_u16m4_tum(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vmadd_vv_u16m8_tum(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vmadd_vv_u16m8_tum(mask, vd, vs1, vs2, vl); +vuint16m8_t test_vmadd_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vmadd_vv_u16m8_tum(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vmadd_vx_u16m8_tum(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vmadd_vx_u16m8_tum(mask, vd, rs1, vs2, vl); +vuint16m8_t test_vmadd_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vmadd_vx_u16m8_tum(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vmadd_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vmadd_vv_u32mf2_tum(mask, vd, vs1, vs2, vl); +vuint32mf2_t test_vmadd_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vmadd_vv_u32mf2_tum(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vmadd_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vmadd_vx_u32mf2_tum(mask, vd, rs1, vs2, vl); +vuint32mf2_t test_vmadd_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vmadd_vx_u32mf2_tum(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vmadd_vv_u32m1_tum(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vmadd_vv_u32m1_tum(mask, vd, vs1, vs2, vl); +vuint32m1_t test_vmadd_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vmadd_vv_u32m1_tum(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vmadd_vx_u32m1_tum(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vmadd_vx_u32m1_tum(mask, vd, rs1, vs2, vl); +vuint32m1_t test_vmadd_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vmadd_vx_u32m1_tum(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vmadd_vv_u32m2_tum(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vmadd_vv_u32m2_tum(mask, vd, vs1, vs2, vl); +vuint32m2_t test_vmadd_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vmadd_vv_u32m2_tum(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vmadd_vx_u32m2_tum(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vmadd_vx_u32m2_tum(mask, vd, rs1, vs2, vl); +vuint32m2_t test_vmadd_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vmadd_vx_u32m2_tum(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vmadd_vv_u32m4_tum(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vmadd_vv_u32m4_tum(mask, vd, vs1, vs2, vl); +vuint32m4_t test_vmadd_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vmadd_vv_u32m4_tum(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vmadd_vx_u32m4_tum(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vmadd_vx_u32m4_tum(mask, vd, rs1, vs2, vl); +vuint32m4_t test_vmadd_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vmadd_vx_u32m4_tum(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vmadd_vv_u32m8_tum(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vmadd_vv_u32m8_tum(mask, vd, vs1, vs2, vl); +vuint32m8_t test_vmadd_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vmadd_vv_u32m8_tum(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vmadd_vx_u32m8_tum(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vmadd_vx_u32m8_tum(mask, vd, rs1, vs2, vl); +vuint32m8_t test_vmadd_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vmadd_vx_u32m8_tum(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vmadd_vv_u64m1_tum(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vmadd_vv_u64m1_tum(mask, vd, vs1, vs2, vl); +vuint64m1_t test_vmadd_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vmadd_vv_u64m1_tum(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vmadd_vx_u64m1_tum(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vmadd_vx_u64m1_tum(mask, vd, rs1, vs2, vl); +vuint64m1_t test_vmadd_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vmadd_vx_u64m1_tum(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vmadd_vv_u64m2_tum(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vmadd_vv_u64m2_tum(mask, vd, vs1, vs2, vl); +vuint64m2_t test_vmadd_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vmadd_vv_u64m2_tum(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vmadd_vx_u64m2_tum(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vmadd_vx_u64m2_tum(mask, vd, rs1, vs2, vl); +vuint64m2_t test_vmadd_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vmadd_vx_u64m2_tum(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vmadd_vv_u64m4_tum(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vmadd_vv_u64m4_tum(mask, vd, vs1, vs2, vl); +vuint64m4_t test_vmadd_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vmadd_vv_u64m4_tum(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vmadd_vx_u64m4_tum(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vmadd_vx_u64m4_tum(mask, vd, rs1, vs2, vl); +vuint64m4_t test_vmadd_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vmadd_vx_u64m4_tum(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vmadd_vv_u64m8_tum(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vmadd_vv_u64m8_tum(mask, vd, vs1, vs2, vl); +vuint64m8_t test_vmadd_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vmadd_vv_u64m8_tum(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vmadd_vx_u64m8_tum(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vmadd_vx_u64m8_tum(mask, vd, rs1, vs2, vl); +vuint64m8_t test_vmadd_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vmadd_vx_u64m8_tum(vm, vd, rs1, vs2, vl); } -vint8mf8_t test_vmadd_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vmadd_vv_i8mf8_tumu(mask, vd, vs1, vs2, vl); +vint8mf8_t test_vmadd_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vmadd_vv_i8mf8_tumu(vm, vd, vs1, vs2, vl); } -vint8mf8_t test_vmadd_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vmadd_vx_i8mf8_tumu(mask, vd, rs1, vs2, vl); +vint8mf8_t test_vmadd_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vmadd_vx_i8mf8_tumu(vm, vd, rs1, vs2, vl); } -vint8mf4_t test_vmadd_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vmadd_vv_i8mf4_tumu(mask, vd, vs1, vs2, vl); +vint8mf4_t test_vmadd_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vmadd_vv_i8mf4_tumu(vm, vd, vs1, vs2, vl); } -vint8mf4_t test_vmadd_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vmadd_vx_i8mf4_tumu(mask, vd, rs1, vs2, vl); +vint8mf4_t test_vmadd_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vmadd_vx_i8mf4_tumu(vm, vd, rs1, vs2, vl); } -vint8mf2_t test_vmadd_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vmadd_vv_i8mf2_tumu(mask, vd, vs1, vs2, vl); +vint8mf2_t test_vmadd_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vmadd_vv_i8mf2_tumu(vm, vd, vs1, vs2, vl); } -vint8mf2_t test_vmadd_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vmadd_vx_i8mf2_tumu(mask, vd, rs1, vs2, vl); +vint8mf2_t test_vmadd_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vmadd_vx_i8mf2_tumu(vm, vd, rs1, vs2, vl); } -vint8m1_t test_vmadd_vv_i8m1_tumu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_vmadd_vv_i8m1_tumu(mask, vd, vs1, vs2, vl); +vint8m1_t test_vmadd_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { + return __riscv_vmadd_vv_i8m1_tumu(vm, vd, vs1, vs2, vl); } -vint8m1_t test_vmadd_vx_i8m1_tumu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vmadd_vx_i8m1_tumu(mask, vd, rs1, vs2, vl); +vint8m1_t test_vmadd_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vmadd_vx_i8m1_tumu(vm, vd, rs1, vs2, vl); } -vint8m2_t test_vmadd_vv_i8m2_tumu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return __riscv_vmadd_vv_i8m2_tumu(mask, vd, vs1, vs2, vl); +vint8m2_t test_vmadd_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { + return __riscv_vmadd_vv_i8m2_tumu(vm, vd, vs1, vs2, vl); } -vint8m2_t test_vmadd_vx_i8m2_tumu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vmadd_vx_i8m2_tumu(mask, vd, rs1, vs2, vl); +vint8m2_t test_vmadd_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vmadd_vx_i8m2_tumu(vm, vd, rs1, vs2, vl); } -vint8m4_t test_vmadd_vv_i8m4_tumu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return __riscv_vmadd_vv_i8m4_tumu(mask, vd, vs1, vs2, vl); +vint8m4_t test_vmadd_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { + return __riscv_vmadd_vv_i8m4_tumu(vm, vd, vs1, vs2, vl); } -vint8m4_t test_vmadd_vx_i8m4_tumu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vmadd_vx_i8m4_tumu(mask, vd, rs1, vs2, vl); +vint8m4_t test_vmadd_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vmadd_vx_i8m4_tumu(vm, vd, rs1, vs2, vl); } -vint8m8_t test_vmadd_vv_i8m8_tumu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return __riscv_vmadd_vv_i8m8_tumu(mask, vd, vs1, vs2, vl); +vint8m8_t test_vmadd_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { + return __riscv_vmadd_vv_i8m8_tumu(vm, vd, vs1, vs2, vl); } -vint8m8_t test_vmadd_vx_i8m8_tumu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return __riscv_vmadd_vx_i8m8_tumu(mask, vd, rs1, vs2, vl); +vint8m8_t test_vmadd_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { + return __riscv_vmadd_vx_i8m8_tumu(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vmadd_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vmadd_vv_i16mf4_tumu(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vmadd_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vmadd_vv_i16mf4_tumu(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vmadd_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vmadd_vx_i16mf4_tumu(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vmadd_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vmadd_vx_i16mf4_tumu(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vmadd_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vmadd_vv_i16mf2_tumu(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vmadd_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vmadd_vv_i16mf2_tumu(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vmadd_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vmadd_vx_i16mf2_tumu(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vmadd_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vmadd_vx_i16mf2_tumu(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vmadd_vv_i16m1_tumu(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_vmadd_vv_i16m1_tumu(mask, vd, vs1, vs2, vl); +vint16m1_t test_vmadd_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { + return __riscv_vmadd_vv_i16m1_tumu(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vmadd_vx_i16m1_tumu(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vmadd_vx_i16m1_tumu(mask, vd, rs1, vs2, vl); +vint16m1_t test_vmadd_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vmadd_vx_i16m1_tumu(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vmadd_vv_i16m2_tumu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return __riscv_vmadd_vv_i16m2_tumu(mask, vd, vs1, vs2, vl); +vint16m2_t test_vmadd_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { + return __riscv_vmadd_vv_i16m2_tumu(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vmadd_vx_i16m2_tumu(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vmadd_vx_i16m2_tumu(mask, vd, rs1, vs2, vl); +vint16m2_t test_vmadd_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vmadd_vx_i16m2_tumu(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vmadd_vv_i16m4_tumu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return __riscv_vmadd_vv_i16m4_tumu(mask, vd, vs1, vs2, vl); +vint16m4_t test_vmadd_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { + return __riscv_vmadd_vv_i16m4_tumu(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vmadd_vx_i16m4_tumu(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vmadd_vx_i16m4_tumu(mask, vd, rs1, vs2, vl); +vint16m4_t test_vmadd_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vmadd_vx_i16m4_tumu(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vmadd_vv_i16m8_tumu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return __riscv_vmadd_vv_i16m8_tumu(mask, vd, vs1, vs2, vl); +vint16m8_t test_vmadd_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { + return __riscv_vmadd_vv_i16m8_tumu(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vmadd_vx_i16m8_tumu(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return __riscv_vmadd_vx_i16m8_tumu(mask, vd, rs1, vs2, vl); +vint16m8_t test_vmadd_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { + return __riscv_vmadd_vx_i16m8_tumu(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vmadd_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vmadd_vv_i32mf2_tumu(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vmadd_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vmadd_vv_i32mf2_tumu(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vmadd_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vmadd_vx_i32mf2_tumu(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vmadd_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vmadd_vx_i32mf2_tumu(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vmadd_vv_i32m1_tumu(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_vmadd_vv_i32m1_tumu(mask, vd, vs1, vs2, vl); +vint32m1_t test_vmadd_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { + return __riscv_vmadd_vv_i32m1_tumu(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vmadd_vx_i32m1_tumu(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vmadd_vx_i32m1_tumu(mask, vd, rs1, vs2, vl); +vint32m1_t test_vmadd_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vmadd_vx_i32m1_tumu(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vmadd_vv_i32m2_tumu(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return __riscv_vmadd_vv_i32m2_tumu(mask, vd, vs1, vs2, vl); +vint32m2_t test_vmadd_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { + return __riscv_vmadd_vv_i32m2_tumu(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vmadd_vx_i32m2_tumu(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vmadd_vx_i32m2_tumu(mask, vd, rs1, vs2, vl); +vint32m2_t test_vmadd_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vmadd_vx_i32m2_tumu(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vmadd_vv_i32m4_tumu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return __riscv_vmadd_vv_i32m4_tumu(mask, vd, vs1, vs2, vl); +vint32m4_t test_vmadd_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { + return __riscv_vmadd_vv_i32m4_tumu(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vmadd_vx_i32m4_tumu(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vmadd_vx_i32m4_tumu(mask, vd, rs1, vs2, vl); +vint32m4_t test_vmadd_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vmadd_vx_i32m4_tumu(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vmadd_vv_i32m8_tumu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return __riscv_vmadd_vv_i32m8_tumu(mask, vd, vs1, vs2, vl); +vint32m8_t test_vmadd_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { + return __riscv_vmadd_vv_i32m8_tumu(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vmadd_vx_i32m8_tumu(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return __riscv_vmadd_vx_i32m8_tumu(mask, vd, rs1, vs2, vl); +vint32m8_t test_vmadd_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { + return __riscv_vmadd_vx_i32m8_tumu(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vmadd_vv_i64m1_tumu(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return __riscv_vmadd_vv_i64m1_tumu(mask, vd, vs1, vs2, vl); +vint64m1_t test_vmadd_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { + return __riscv_vmadd_vv_i64m1_tumu(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vmadd_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return __riscv_vmadd_vx_i64m1_tumu(mask, vd, rs1, vs2, vl); +vint64m1_t test_vmadd_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { + return __riscv_vmadd_vx_i64m1_tumu(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vmadd_vv_i64m2_tumu(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return __riscv_vmadd_vv_i64m2_tumu(mask, vd, vs1, vs2, vl); +vint64m2_t test_vmadd_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { + return __riscv_vmadd_vv_i64m2_tumu(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vmadd_vx_i64m2_tumu(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return __riscv_vmadd_vx_i64m2_tumu(mask, vd, rs1, vs2, vl); +vint64m2_t test_vmadd_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { + return __riscv_vmadd_vx_i64m2_tumu(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vmadd_vv_i64m4_tumu(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return __riscv_vmadd_vv_i64m4_tumu(mask, vd, vs1, vs2, vl); +vint64m4_t test_vmadd_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { + return __riscv_vmadd_vv_i64m4_tumu(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vmadd_vx_i64m4_tumu(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return __riscv_vmadd_vx_i64m4_tumu(mask, vd, rs1, vs2, vl); +vint64m4_t test_vmadd_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { + return __riscv_vmadd_vx_i64m4_tumu(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vmadd_vv_i64m8_tumu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return __riscv_vmadd_vv_i64m8_tumu(mask, vd, vs1, vs2, vl); +vint64m8_t test_vmadd_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { + return __riscv_vmadd_vv_i64m8_tumu(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vmadd_vx_i64m8_tumu(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return __riscv_vmadd_vx_i64m8_tumu(mask, vd, rs1, vs2, vl); +vint64m8_t test_vmadd_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { + return __riscv_vmadd_vx_i64m8_tumu(vm, vd, rs1, vs2, vl); } -vuint8mf8_t test_vmadd_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vmadd_vv_u8mf8_tumu(mask, vd, vs1, vs2, vl); +vuint8mf8_t test_vmadd_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vmadd_vv_u8mf8_tumu(vm, vd, vs1, vs2, vl); } -vuint8mf8_t test_vmadd_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vmadd_vx_u8mf8_tumu(mask, vd, rs1, vs2, vl); +vuint8mf8_t test_vmadd_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vmadd_vx_u8mf8_tumu(vm, vd, rs1, vs2, vl); } -vuint8mf4_t test_vmadd_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vmadd_vv_u8mf4_tumu(mask, vd, vs1, vs2, vl); +vuint8mf4_t test_vmadd_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vmadd_vv_u8mf4_tumu(vm, vd, vs1, vs2, vl); } -vuint8mf4_t test_vmadd_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vmadd_vx_u8mf4_tumu(mask, vd, rs1, vs2, vl); +vuint8mf4_t test_vmadd_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vmadd_vx_u8mf4_tumu(vm, vd, rs1, vs2, vl); } -vuint8mf2_t test_vmadd_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vmadd_vv_u8mf2_tumu(mask, vd, vs1, vs2, vl); +vuint8mf2_t test_vmadd_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vmadd_vv_u8mf2_tumu(vm, vd, vs1, vs2, vl); } -vuint8mf2_t test_vmadd_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vmadd_vx_u8mf2_tumu(mask, vd, rs1, vs2, vl); +vuint8mf2_t test_vmadd_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vmadd_vx_u8mf2_tumu(vm, vd, rs1, vs2, vl); } -vuint8m1_t test_vmadd_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vmadd_vv_u8m1_tumu(mask, vd, vs1, vs2, vl); +vuint8m1_t test_vmadd_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vmadd_vv_u8m1_tumu(vm, vd, vs1, vs2, vl); } -vuint8m1_t test_vmadd_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vmadd_vx_u8m1_tumu(mask, vd, rs1, vs2, vl); +vuint8m1_t test_vmadd_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vmadd_vx_u8m1_tumu(vm, vd, rs1, vs2, vl); } -vuint8m2_t test_vmadd_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vmadd_vv_u8m2_tumu(mask, vd, vs1, vs2, vl); +vuint8m2_t test_vmadd_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vmadd_vv_u8m2_tumu(vm, vd, vs1, vs2, vl); } -vuint8m2_t test_vmadd_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vmadd_vx_u8m2_tumu(mask, vd, rs1, vs2, vl); +vuint8m2_t test_vmadd_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vmadd_vx_u8m2_tumu(vm, vd, rs1, vs2, vl); } -vuint8m4_t test_vmadd_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vmadd_vv_u8m4_tumu(mask, vd, vs1, vs2, vl); +vuint8m4_t test_vmadd_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vmadd_vv_u8m4_tumu(vm, vd, vs1, vs2, vl); } -vuint8m4_t test_vmadd_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vmadd_vx_u8m4_tumu(mask, vd, rs1, vs2, vl); +vuint8m4_t test_vmadd_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vmadd_vx_u8m4_tumu(vm, vd, rs1, vs2, vl); } -vuint8m8_t test_vmadd_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vmadd_vv_u8m8_tumu(mask, vd, vs1, vs2, vl); +vuint8m8_t test_vmadd_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vmadd_vv_u8m8_tumu(vm, vd, vs1, vs2, vl); } -vuint8m8_t test_vmadd_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vmadd_vx_u8m8_tumu(mask, vd, rs1, vs2, vl); +vuint8m8_t test_vmadd_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vmadd_vx_u8m8_tumu(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vmadd_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vmadd_vv_u16mf4_tumu(mask, vd, vs1, vs2, vl); +vuint16mf4_t test_vmadd_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vmadd_vv_u16mf4_tumu(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vmadd_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vmadd_vx_u16mf4_tumu(mask, vd, rs1, vs2, vl); +vuint16mf4_t test_vmadd_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vmadd_vx_u16mf4_tumu(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vmadd_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vmadd_vv_u16mf2_tumu(mask, vd, vs1, vs2, vl); +vuint16mf2_t test_vmadd_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vmadd_vv_u16mf2_tumu(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vmadd_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vmadd_vx_u16mf2_tumu(mask, vd, rs1, vs2, vl); +vuint16mf2_t test_vmadd_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vmadd_vx_u16mf2_tumu(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vmadd_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vmadd_vv_u16m1_tumu(mask, vd, vs1, vs2, vl); +vuint16m1_t test_vmadd_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vmadd_vv_u16m1_tumu(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vmadd_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vmadd_vx_u16m1_tumu(mask, vd, rs1, vs2, vl); +vuint16m1_t test_vmadd_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vmadd_vx_u16m1_tumu(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vmadd_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vmadd_vv_u16m2_tumu(mask, vd, vs1, vs2, vl); +vuint16m2_t test_vmadd_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vmadd_vv_u16m2_tumu(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vmadd_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vmadd_vx_u16m2_tumu(mask, vd, rs1, vs2, vl); +vuint16m2_t test_vmadd_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vmadd_vx_u16m2_tumu(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vmadd_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vmadd_vv_u16m4_tumu(mask, vd, vs1, vs2, vl); +vuint16m4_t test_vmadd_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vmadd_vv_u16m4_tumu(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vmadd_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vmadd_vx_u16m4_tumu(mask, vd, rs1, vs2, vl); +vuint16m4_t test_vmadd_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vmadd_vx_u16m4_tumu(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vmadd_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vmadd_vv_u16m8_tumu(mask, vd, vs1, vs2, vl); +vuint16m8_t test_vmadd_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vmadd_vv_u16m8_tumu(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vmadd_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vmadd_vx_u16m8_tumu(mask, vd, rs1, vs2, vl); +vuint16m8_t test_vmadd_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vmadd_vx_u16m8_tumu(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vmadd_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vmadd_vv_u32mf2_tumu(mask, vd, vs1, vs2, vl); +vuint32mf2_t test_vmadd_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vmadd_vv_u32mf2_tumu(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vmadd_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vmadd_vx_u32mf2_tumu(mask, vd, rs1, vs2, vl); +vuint32mf2_t test_vmadd_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vmadd_vx_u32mf2_tumu(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vmadd_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vmadd_vv_u32m1_tumu(mask, vd, vs1, vs2, vl); +vuint32m1_t test_vmadd_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vmadd_vv_u32m1_tumu(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vmadd_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vmadd_vx_u32m1_tumu(mask, vd, rs1, vs2, vl); +vuint32m1_t test_vmadd_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vmadd_vx_u32m1_tumu(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vmadd_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vmadd_vv_u32m2_tumu(mask, vd, vs1, vs2, vl); +vuint32m2_t test_vmadd_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vmadd_vv_u32m2_tumu(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vmadd_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vmadd_vx_u32m2_tumu(mask, vd, rs1, vs2, vl); +vuint32m2_t test_vmadd_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vmadd_vx_u32m2_tumu(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vmadd_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vmadd_vv_u32m4_tumu(mask, vd, vs1, vs2, vl); +vuint32m4_t test_vmadd_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vmadd_vv_u32m4_tumu(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vmadd_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vmadd_vx_u32m4_tumu(mask, vd, rs1, vs2, vl); +vuint32m4_t test_vmadd_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vmadd_vx_u32m4_tumu(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vmadd_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vmadd_vv_u32m8_tumu(mask, vd, vs1, vs2, vl); +vuint32m8_t test_vmadd_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vmadd_vv_u32m8_tumu(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vmadd_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vmadd_vx_u32m8_tumu(mask, vd, rs1, vs2, vl); +vuint32m8_t test_vmadd_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vmadd_vx_u32m8_tumu(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vmadd_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vmadd_vv_u64m1_tumu(mask, vd, vs1, vs2, vl); +vuint64m1_t test_vmadd_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vmadd_vv_u64m1_tumu(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vmadd_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vmadd_vx_u64m1_tumu(mask, vd, rs1, vs2, vl); +vuint64m1_t test_vmadd_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vmadd_vx_u64m1_tumu(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vmadd_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vmadd_vv_u64m2_tumu(mask, vd, vs1, vs2, vl); +vuint64m2_t test_vmadd_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vmadd_vv_u64m2_tumu(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vmadd_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vmadd_vx_u64m2_tumu(mask, vd, rs1, vs2, vl); +vuint64m2_t test_vmadd_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vmadd_vx_u64m2_tumu(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vmadd_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vmadd_vv_u64m4_tumu(mask, vd, vs1, vs2, vl); +vuint64m4_t test_vmadd_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vmadd_vv_u64m4_tumu(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vmadd_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vmadd_vx_u64m4_tumu(mask, vd, rs1, vs2, vl); +vuint64m4_t test_vmadd_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vmadd_vx_u64m4_tumu(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vmadd_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vmadd_vv_u64m8_tumu(mask, vd, vs1, vs2, vl); +vuint64m8_t test_vmadd_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vmadd_vv_u64m8_tumu(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vmadd_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vmadd_vx_u64m8_tumu(mask, vd, rs1, vs2, vl); +vuint64m8_t test_vmadd_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vmadd_vx_u64m8_tumu(vm, vd, rs1, vs2, vl); } -vint8mf8_t test_vmadd_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vmadd_vv_i8mf8_mu(mask, vd, vs1, vs2, vl); +vint8mf8_t test_vmadd_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vmadd_vv_i8mf8_mu(vm, vd, vs1, vs2, vl); } -vint8mf8_t test_vmadd_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vmadd_vx_i8mf8_mu(mask, vd, rs1, vs2, vl); +vint8mf8_t test_vmadd_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vmadd_vx_i8mf8_mu(vm, vd, rs1, vs2, vl); } -vint8mf4_t test_vmadd_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vmadd_vv_i8mf4_mu(mask, vd, vs1, vs2, vl); +vint8mf4_t test_vmadd_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vmadd_vv_i8mf4_mu(vm, vd, vs1, vs2, vl); } -vint8mf4_t test_vmadd_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vmadd_vx_i8mf4_mu(mask, vd, rs1, vs2, vl); +vint8mf4_t test_vmadd_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vmadd_vx_i8mf4_mu(vm, vd, rs1, vs2, vl); } -vint8mf2_t test_vmadd_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vmadd_vv_i8mf2_mu(mask, vd, vs1, vs2, vl); +vint8mf2_t test_vmadd_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vmadd_vv_i8mf2_mu(vm, vd, vs1, vs2, vl); } -vint8mf2_t test_vmadd_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vmadd_vx_i8mf2_mu(mask, vd, rs1, vs2, vl); +vint8mf2_t test_vmadd_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vmadd_vx_i8mf2_mu(vm, vd, rs1, vs2, vl); } -vint8m1_t test_vmadd_vv_i8m1_mu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_vmadd_vv_i8m1_mu(mask, vd, vs1, vs2, vl); +vint8m1_t test_vmadd_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { + return __riscv_vmadd_vv_i8m1_mu(vm, vd, vs1, vs2, vl); } -vint8m1_t test_vmadd_vx_i8m1_mu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vmadd_vx_i8m1_mu(mask, vd, rs1, vs2, vl); +vint8m1_t test_vmadd_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vmadd_vx_i8m1_mu(vm, vd, rs1, vs2, vl); } -vint8m2_t test_vmadd_vv_i8m2_mu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return __riscv_vmadd_vv_i8m2_mu(mask, vd, vs1, vs2, vl); +vint8m2_t test_vmadd_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { + return __riscv_vmadd_vv_i8m2_mu(vm, vd, vs1, vs2, vl); } -vint8m2_t test_vmadd_vx_i8m2_mu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vmadd_vx_i8m2_mu(mask, vd, rs1, vs2, vl); +vint8m2_t test_vmadd_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vmadd_vx_i8m2_mu(vm, vd, rs1, vs2, vl); } -vint8m4_t test_vmadd_vv_i8m4_mu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return __riscv_vmadd_vv_i8m4_mu(mask, vd, vs1, vs2, vl); +vint8m4_t test_vmadd_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { + return __riscv_vmadd_vv_i8m4_mu(vm, vd, vs1, vs2, vl); } -vint8m4_t test_vmadd_vx_i8m4_mu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vmadd_vx_i8m4_mu(mask, vd, rs1, vs2, vl); +vint8m4_t test_vmadd_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vmadd_vx_i8m4_mu(vm, vd, rs1, vs2, vl); } -vint8m8_t test_vmadd_vv_i8m8_mu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return __riscv_vmadd_vv_i8m8_mu(mask, vd, vs1, vs2, vl); +vint8m8_t test_vmadd_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { + return __riscv_vmadd_vv_i8m8_mu(vm, vd, vs1, vs2, vl); } -vint8m8_t test_vmadd_vx_i8m8_mu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return __riscv_vmadd_vx_i8m8_mu(mask, vd, rs1, vs2, vl); +vint8m8_t test_vmadd_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { + return __riscv_vmadd_vx_i8m8_mu(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vmadd_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vmadd_vv_i16mf4_mu(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vmadd_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vmadd_vv_i16mf4_mu(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vmadd_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vmadd_vx_i16mf4_mu(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vmadd_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vmadd_vx_i16mf4_mu(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vmadd_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vmadd_vv_i16mf2_mu(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vmadd_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vmadd_vv_i16mf2_mu(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vmadd_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vmadd_vx_i16mf2_mu(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vmadd_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vmadd_vx_i16mf2_mu(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vmadd_vv_i16m1_mu(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_vmadd_vv_i16m1_mu(mask, vd, vs1, vs2, vl); +vint16m1_t test_vmadd_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { + return __riscv_vmadd_vv_i16m1_mu(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vmadd_vx_i16m1_mu(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vmadd_vx_i16m1_mu(mask, vd, rs1, vs2, vl); +vint16m1_t test_vmadd_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vmadd_vx_i16m1_mu(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vmadd_vv_i16m2_mu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return __riscv_vmadd_vv_i16m2_mu(mask, vd, vs1, vs2, vl); +vint16m2_t test_vmadd_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { + return __riscv_vmadd_vv_i16m2_mu(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vmadd_vx_i16m2_mu(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vmadd_vx_i16m2_mu(mask, vd, rs1, vs2, vl); +vint16m2_t test_vmadd_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vmadd_vx_i16m2_mu(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vmadd_vv_i16m4_mu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return __riscv_vmadd_vv_i16m4_mu(mask, vd, vs1, vs2, vl); +vint16m4_t test_vmadd_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { + return __riscv_vmadd_vv_i16m4_mu(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vmadd_vx_i16m4_mu(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vmadd_vx_i16m4_mu(mask, vd, rs1, vs2, vl); +vint16m4_t test_vmadd_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vmadd_vx_i16m4_mu(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vmadd_vv_i16m8_mu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return __riscv_vmadd_vv_i16m8_mu(mask, vd, vs1, vs2, vl); +vint16m8_t test_vmadd_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { + return __riscv_vmadd_vv_i16m8_mu(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vmadd_vx_i16m8_mu(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return __riscv_vmadd_vx_i16m8_mu(mask, vd, rs1, vs2, vl); +vint16m8_t test_vmadd_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { + return __riscv_vmadd_vx_i16m8_mu(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vmadd_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vmadd_vv_i32mf2_mu(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vmadd_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vmadd_vv_i32mf2_mu(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vmadd_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vmadd_vx_i32mf2_mu(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vmadd_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vmadd_vx_i32mf2_mu(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vmadd_vv_i32m1_mu(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_vmadd_vv_i32m1_mu(mask, vd, vs1, vs2, vl); +vint32m1_t test_vmadd_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { + return __riscv_vmadd_vv_i32m1_mu(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vmadd_vx_i32m1_mu(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vmadd_vx_i32m1_mu(mask, vd, rs1, vs2, vl); +vint32m1_t test_vmadd_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vmadd_vx_i32m1_mu(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vmadd_vv_i32m2_mu(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return __riscv_vmadd_vv_i32m2_mu(mask, vd, vs1, vs2, vl); +vint32m2_t test_vmadd_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { + return __riscv_vmadd_vv_i32m2_mu(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vmadd_vx_i32m2_mu(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vmadd_vx_i32m2_mu(mask, vd, rs1, vs2, vl); +vint32m2_t test_vmadd_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vmadd_vx_i32m2_mu(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vmadd_vv_i32m4_mu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return __riscv_vmadd_vv_i32m4_mu(mask, vd, vs1, vs2, vl); +vint32m4_t test_vmadd_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { + return __riscv_vmadd_vv_i32m4_mu(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vmadd_vx_i32m4_mu(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vmadd_vx_i32m4_mu(mask, vd, rs1, vs2, vl); +vint32m4_t test_vmadd_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vmadd_vx_i32m4_mu(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vmadd_vv_i32m8_mu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return __riscv_vmadd_vv_i32m8_mu(mask, vd, vs1, vs2, vl); +vint32m8_t test_vmadd_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { + return __riscv_vmadd_vv_i32m8_mu(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vmadd_vx_i32m8_mu(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return __riscv_vmadd_vx_i32m8_mu(mask, vd, rs1, vs2, vl); +vint32m8_t test_vmadd_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { + return __riscv_vmadd_vx_i32m8_mu(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vmadd_vv_i64m1_mu(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return __riscv_vmadd_vv_i64m1_mu(mask, vd, vs1, vs2, vl); +vint64m1_t test_vmadd_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { + return __riscv_vmadd_vv_i64m1_mu(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vmadd_vx_i64m1_mu(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return __riscv_vmadd_vx_i64m1_mu(mask, vd, rs1, vs2, vl); +vint64m1_t test_vmadd_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { + return __riscv_vmadd_vx_i64m1_mu(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vmadd_vv_i64m2_mu(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return __riscv_vmadd_vv_i64m2_mu(mask, vd, vs1, vs2, vl); +vint64m2_t test_vmadd_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { + return __riscv_vmadd_vv_i64m2_mu(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vmadd_vx_i64m2_mu(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return __riscv_vmadd_vx_i64m2_mu(mask, vd, rs1, vs2, vl); +vint64m2_t test_vmadd_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { + return __riscv_vmadd_vx_i64m2_mu(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vmadd_vv_i64m4_mu(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return __riscv_vmadd_vv_i64m4_mu(mask, vd, vs1, vs2, vl); +vint64m4_t test_vmadd_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { + return __riscv_vmadd_vv_i64m4_mu(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vmadd_vx_i64m4_mu(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return __riscv_vmadd_vx_i64m4_mu(mask, vd, rs1, vs2, vl); +vint64m4_t test_vmadd_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { + return __riscv_vmadd_vx_i64m4_mu(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vmadd_vv_i64m8_mu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return __riscv_vmadd_vv_i64m8_mu(mask, vd, vs1, vs2, vl); +vint64m8_t test_vmadd_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { + return __riscv_vmadd_vv_i64m8_mu(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vmadd_vx_i64m8_mu(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return __riscv_vmadd_vx_i64m8_mu(mask, vd, rs1, vs2, vl); +vint64m8_t test_vmadd_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { + return __riscv_vmadd_vx_i64m8_mu(vm, vd, rs1, vs2, vl); } -vuint8mf8_t test_vmadd_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vmadd_vv_u8mf8_mu(mask, vd, vs1, vs2, vl); +vuint8mf8_t test_vmadd_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vmadd_vv_u8mf8_mu(vm, vd, vs1, vs2, vl); } -vuint8mf8_t test_vmadd_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vmadd_vx_u8mf8_mu(mask, vd, rs1, vs2, vl); +vuint8mf8_t test_vmadd_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vmadd_vx_u8mf8_mu(vm, vd, rs1, vs2, vl); } -vuint8mf4_t test_vmadd_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vmadd_vv_u8mf4_mu(mask, vd, vs1, vs2, vl); +vuint8mf4_t test_vmadd_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vmadd_vv_u8mf4_mu(vm, vd, vs1, vs2, vl); } -vuint8mf4_t test_vmadd_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vmadd_vx_u8mf4_mu(mask, vd, rs1, vs2, vl); +vuint8mf4_t test_vmadd_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vmadd_vx_u8mf4_mu(vm, vd, rs1, vs2, vl); } -vuint8mf2_t test_vmadd_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vmadd_vv_u8mf2_mu(mask, vd, vs1, vs2, vl); +vuint8mf2_t test_vmadd_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vmadd_vv_u8mf2_mu(vm, vd, vs1, vs2, vl); } -vuint8mf2_t test_vmadd_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vmadd_vx_u8mf2_mu(mask, vd, rs1, vs2, vl); +vuint8mf2_t test_vmadd_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vmadd_vx_u8mf2_mu(vm, vd, rs1, vs2, vl); } -vuint8m1_t test_vmadd_vv_u8m1_mu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vmadd_vv_u8m1_mu(mask, vd, vs1, vs2, vl); +vuint8m1_t test_vmadd_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vmadd_vv_u8m1_mu(vm, vd, vs1, vs2, vl); } -vuint8m1_t test_vmadd_vx_u8m1_mu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vmadd_vx_u8m1_mu(mask, vd, rs1, vs2, vl); +vuint8m1_t test_vmadd_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vmadd_vx_u8m1_mu(vm, vd, rs1, vs2, vl); } -vuint8m2_t test_vmadd_vv_u8m2_mu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vmadd_vv_u8m2_mu(mask, vd, vs1, vs2, vl); +vuint8m2_t test_vmadd_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vmadd_vv_u8m2_mu(vm, vd, vs1, vs2, vl); } -vuint8m2_t test_vmadd_vx_u8m2_mu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vmadd_vx_u8m2_mu(mask, vd, rs1, vs2, vl); +vuint8m2_t test_vmadd_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vmadd_vx_u8m2_mu(vm, vd, rs1, vs2, vl); } -vuint8m4_t test_vmadd_vv_u8m4_mu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vmadd_vv_u8m4_mu(mask, vd, vs1, vs2, vl); +vuint8m4_t test_vmadd_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vmadd_vv_u8m4_mu(vm, vd, vs1, vs2, vl); } -vuint8m4_t test_vmadd_vx_u8m4_mu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vmadd_vx_u8m4_mu(mask, vd, rs1, vs2, vl); +vuint8m4_t test_vmadd_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vmadd_vx_u8m4_mu(vm, vd, rs1, vs2, vl); } -vuint8m8_t test_vmadd_vv_u8m8_mu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vmadd_vv_u8m8_mu(mask, vd, vs1, vs2, vl); +vuint8m8_t test_vmadd_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vmadd_vv_u8m8_mu(vm, vd, vs1, vs2, vl); } -vuint8m8_t test_vmadd_vx_u8m8_mu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vmadd_vx_u8m8_mu(mask, vd, rs1, vs2, vl); +vuint8m8_t test_vmadd_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vmadd_vx_u8m8_mu(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vmadd_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vmadd_vv_u16mf4_mu(mask, vd, vs1, vs2, vl); +vuint16mf4_t test_vmadd_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vmadd_vv_u16mf4_mu(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vmadd_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vmadd_vx_u16mf4_mu(mask, vd, rs1, vs2, vl); +vuint16mf4_t test_vmadd_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vmadd_vx_u16mf4_mu(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vmadd_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vmadd_vv_u16mf2_mu(mask, vd, vs1, vs2, vl); +vuint16mf2_t test_vmadd_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vmadd_vv_u16mf2_mu(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vmadd_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vmadd_vx_u16mf2_mu(mask, vd, rs1, vs2, vl); +vuint16mf2_t test_vmadd_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vmadd_vx_u16mf2_mu(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vmadd_vv_u16m1_mu(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vmadd_vv_u16m1_mu(mask, vd, vs1, vs2, vl); +vuint16m1_t test_vmadd_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vmadd_vv_u16m1_mu(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vmadd_vx_u16m1_mu(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vmadd_vx_u16m1_mu(mask, vd, rs1, vs2, vl); +vuint16m1_t test_vmadd_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vmadd_vx_u16m1_mu(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vmadd_vv_u16m2_mu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vmadd_vv_u16m2_mu(mask, vd, vs1, vs2, vl); +vuint16m2_t test_vmadd_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vmadd_vv_u16m2_mu(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vmadd_vx_u16m2_mu(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vmadd_vx_u16m2_mu(mask, vd, rs1, vs2, vl); +vuint16m2_t test_vmadd_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vmadd_vx_u16m2_mu(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vmadd_vv_u16m4_mu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vmadd_vv_u16m4_mu(mask, vd, vs1, vs2, vl); +vuint16m4_t test_vmadd_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vmadd_vv_u16m4_mu(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vmadd_vx_u16m4_mu(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vmadd_vx_u16m4_mu(mask, vd, rs1, vs2, vl); +vuint16m4_t test_vmadd_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vmadd_vx_u16m4_mu(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vmadd_vv_u16m8_mu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vmadd_vv_u16m8_mu(mask, vd, vs1, vs2, vl); +vuint16m8_t test_vmadd_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vmadd_vv_u16m8_mu(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vmadd_vx_u16m8_mu(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vmadd_vx_u16m8_mu(mask, vd, rs1, vs2, vl); +vuint16m8_t test_vmadd_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vmadd_vx_u16m8_mu(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vmadd_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vmadd_vv_u32mf2_mu(mask, vd, vs1, vs2, vl); +vuint32mf2_t test_vmadd_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vmadd_vv_u32mf2_mu(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vmadd_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vmadd_vx_u32mf2_mu(mask, vd, rs1, vs2, vl); +vuint32mf2_t test_vmadd_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vmadd_vx_u32mf2_mu(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vmadd_vv_u32m1_mu(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vmadd_vv_u32m1_mu(mask, vd, vs1, vs2, vl); +vuint32m1_t test_vmadd_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vmadd_vv_u32m1_mu(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vmadd_vx_u32m1_mu(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vmadd_vx_u32m1_mu(mask, vd, rs1, vs2, vl); +vuint32m1_t test_vmadd_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vmadd_vx_u32m1_mu(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vmadd_vv_u32m2_mu(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vmadd_vv_u32m2_mu(mask, vd, vs1, vs2, vl); +vuint32m2_t test_vmadd_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vmadd_vv_u32m2_mu(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vmadd_vx_u32m2_mu(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vmadd_vx_u32m2_mu(mask, vd, rs1, vs2, vl); +vuint32m2_t test_vmadd_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vmadd_vx_u32m2_mu(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vmadd_vv_u32m4_mu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vmadd_vv_u32m4_mu(mask, vd, vs1, vs2, vl); +vuint32m4_t test_vmadd_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vmadd_vv_u32m4_mu(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vmadd_vx_u32m4_mu(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vmadd_vx_u32m4_mu(mask, vd, rs1, vs2, vl); +vuint32m4_t test_vmadd_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vmadd_vx_u32m4_mu(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vmadd_vv_u32m8_mu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vmadd_vv_u32m8_mu(mask, vd, vs1, vs2, vl); +vuint32m8_t test_vmadd_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vmadd_vv_u32m8_mu(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vmadd_vx_u32m8_mu(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vmadd_vx_u32m8_mu(mask, vd, rs1, vs2, vl); +vuint32m8_t test_vmadd_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vmadd_vx_u32m8_mu(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vmadd_vv_u64m1_mu(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vmadd_vv_u64m1_mu(mask, vd, vs1, vs2, vl); +vuint64m1_t test_vmadd_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vmadd_vv_u64m1_mu(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vmadd_vx_u64m1_mu(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vmadd_vx_u64m1_mu(mask, vd, rs1, vs2, vl); +vuint64m1_t test_vmadd_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vmadd_vx_u64m1_mu(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vmadd_vv_u64m2_mu(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vmadd_vv_u64m2_mu(mask, vd, vs1, vs2, vl); +vuint64m2_t test_vmadd_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vmadd_vv_u64m2_mu(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vmadd_vx_u64m2_mu(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vmadd_vx_u64m2_mu(mask, vd, rs1, vs2, vl); +vuint64m2_t test_vmadd_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vmadd_vx_u64m2_mu(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vmadd_vv_u64m4_mu(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vmadd_vv_u64m4_mu(mask, vd, vs1, vs2, vl); +vuint64m4_t test_vmadd_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vmadd_vv_u64m4_mu(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vmadd_vx_u64m4_mu(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vmadd_vx_u64m4_mu(mask, vd, rs1, vs2, vl); +vuint64m4_t test_vmadd_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vmadd_vx_u64m4_mu(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vmadd_vv_u64m8_mu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vmadd_vv_u64m8_mu(mask, vd, vs1, vs2, vl); +vuint64m8_t test_vmadd_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vmadd_vv_u64m8_mu(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vmadd_vx_u64m8_mu(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vmadd_vx_u64m8_mu(mask, vd, rs1, vs2, vl); +vuint64m8_t test_vmadd_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vmadd_vx_u64m8_mu(vm, vd, rs1, vs2, vl); } /* { dg-final { scan-assembler-times {vma[c-d][c-d]\.[ivxfswum.]+\s+} 352 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vmax.c b/auto-generated/policy_funcs/gnu-api-tests/vmax.c index e2d0705f7..8222268fe 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vmax.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vmax.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vmax_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmax_vv_i8mf8_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vmax_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmax_vv_i8mf8_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vmax_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_vx_i8mf8_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vmax_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_vx_i8mf8_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vmax_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmax_vv_i8mf4_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vmax_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmax_vv_i8mf4_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vmax_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_vx_i8mf4_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vmax_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_vx_i8mf4_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vmax_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmax_vv_i8mf2_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vmax_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmax_vv_i8mf2_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vmax_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_vx_i8mf2_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vmax_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_vx_i8mf2_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vmax_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmax_vv_i8m1_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vmax_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmax_vv_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vmax_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_vx_i8m1_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vmax_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_vx_i8m1_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vmax_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmax_vv_i8m2_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vmax_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmax_vv_i8m2_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vmax_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_vx_i8m2_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vmax_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_vx_i8m2_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vmax_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmax_vv_i8m4_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vmax_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmax_vv_i8m4_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vmax_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_vx_i8m4_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vmax_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_vx_i8m4_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vmax_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmax_vv_i8m8_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vmax_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmax_vv_i8m8_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vmax_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_vx_i8m8_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vmax_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_vx_i8m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vmax_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmax_vv_i16mf4_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vmax_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmax_vv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vmax_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_vx_i16mf4_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vmax_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vmax_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmax_vv_i16mf2_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vmax_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmax_vv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vmax_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_vx_i16mf2_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vmax_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vmax_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmax_vv_i16m1_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vmax_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmax_vv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vmax_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_vx_i16m1_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vmax_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vmax_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmax_vv_i16m2_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vmax_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmax_vv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vmax_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_vx_i16m2_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vmax_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vmax_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmax_vv_i16m4_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vmax_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmax_vv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vmax_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_vx_i16m4_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vmax_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vmax_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmax_vv_i16m8_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vmax_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmax_vv_i16m8_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vmax_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_vx_i16m8_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vmax_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vmax_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmax_vv_i32mf2_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vmax_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmax_vv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vmax_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_vx_i32mf2_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vmax_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vmax_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmax_vv_i32m1_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vmax_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmax_vv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vmax_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_vx_i32m1_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vmax_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vmax_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmax_vv_i32m2_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vmax_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmax_vv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vmax_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_vx_i32m2_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vmax_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vmax_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmax_vv_i32m4_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vmax_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmax_vv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vmax_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_vx_i32m4_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vmax_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vmax_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmax_vv_i32m8_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vmax_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmax_vv_i32m8_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vmax_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_vx_i32m8_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vmax_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vmax_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmax_vv_i64m1_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vmax_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmax_vv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vmax_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmax_vx_i64m1_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vmax_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vmax_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmax_vv_i64m2_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vmax_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmax_vv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vmax_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmax_vx_i64m2_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vmax_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vmax_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmax_vv_i64m4_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vmax_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmax_vv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vmax_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmax_vx_i64m4_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vmax_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vmax_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmax_vv_i64m8_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vmax_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmax_vv_i64m8_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vmax_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmax_vx_i64m8_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vmax_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax_vx_i64m8_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vmax_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmax_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmax_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmax_vv_i8mf8_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vmax_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmax_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_vx_i8mf8_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vmax_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmax_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmax_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmax_vv_i8mf4_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vmax_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmax_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_vx_i8mf4_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vmax_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmax_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmax_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmax_vv_i8mf2_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vmax_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmax_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_vx_i8mf2_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vmax_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmax_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmax_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmax_vv_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vmax_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmax_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_vx_i8m1_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vmax_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmax_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmax_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmax_vv_i8m2_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vmax_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmax_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_vx_i8m2_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vmax_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmax_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmax_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmax_vv_i8m4_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vmax_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmax_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_vx_i8m4_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vmax_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmax_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmax_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmax_vv_i8m8_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vmax_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmax_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_vx_i8m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vmax_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmax_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmax_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmax_vv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vmax_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmax_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vmax_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmax_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmax_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmax_vv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vmax_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmax_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vmax_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmax_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmax_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmax_vv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vmax_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmax_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vmax_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmax_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmax_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmax_vv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vmax_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmax_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vmax_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmax_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmax_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmax_vv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vmax_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmax_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vmax_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmax_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmax_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmax_vv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vmax_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmax_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vmax_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmax_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmax_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmax_vv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vmax_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmax_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vmax_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmax_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmax_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmax_vv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vmax_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmax_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vmax_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmax_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmax_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmax_vv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vmax_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmax_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vmax_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmax_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmax_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmax_vv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vmax_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmax_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vmax_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmax_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmax_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmax_vv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vmax_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmax_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vmax_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmax_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmax_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmax_vv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vmax_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmax_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmax_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vmax_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmax_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmax_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmax_vv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vmax_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmax_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmax_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vmax_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmax_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmax_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmax_vv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vmax_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmax_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmax_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vmax_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmax_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmax_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmax_vv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vmax_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmax_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmax_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vmax_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmax_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmax_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmax_vv_i8mf8_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vmax_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmax_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_vx_i8mf8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vmax_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmax_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmax_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmax_vv_i8mf4_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vmax_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmax_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_vx_i8mf4_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vmax_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmax_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmax_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmax_vv_i8mf2_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vmax_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmax_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_vx_i8mf2_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vmax_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmax_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmax_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmax_vv_i8m1_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vmax_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmax_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_vx_i8m1_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vmax_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmax_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmax_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmax_vv_i8m2_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vmax_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmax_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_vx_i8m2_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vmax_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmax_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmax_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmax_vv_i8m4_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vmax_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmax_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_vx_i8m4_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vmax_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmax_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmax_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmax_vv_i8m8_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vmax_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmax_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_vx_i8m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vmax_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmax_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmax_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmax_vv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vmax_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmax_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vmax_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmax_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmax_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmax_vv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vmax_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmax_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vmax_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmax_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmax_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmax_vv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vmax_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmax_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vmax_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmax_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmax_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmax_vv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vmax_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmax_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vmax_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmax_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmax_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmax_vv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vmax_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmax_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vmax_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmax_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmax_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmax_vv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vmax_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmax_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vmax_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmax_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmax_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmax_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vmax_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmax_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vmax_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmax_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmax_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmax_vv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vmax_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmax_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vmax_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmax_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmax_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmax_vv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vmax_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmax_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vmax_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmax_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmax_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmax_vv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vmax_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmax_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vmax_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmax_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmax_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmax_vv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vmax_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmax_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vmax_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmax_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmax_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmax_vv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vmax_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmax_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmax_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vmax_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmax_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmax_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmax_vv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vmax_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmax_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmax_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vmax_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmax_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmax_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmax_vv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vmax_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmax_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmax_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vmax_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmax_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmax_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmax_vv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vmax_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmax_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmax_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vmax_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmax_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmax_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmax_vv_i8mf8_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vmax_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmax_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_vx_i8mf8_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vmax_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmax_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmax_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmax_vv_i8mf4_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vmax_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmax_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_vx_i8mf4_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vmax_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmax_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmax_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmax_vv_i8mf2_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vmax_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmax_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_vx_i8mf2_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vmax_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmax_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmax_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmax_vv_i8m1_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vmax_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmax_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_vx_i8m1_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vmax_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmax_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmax_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmax_vv_i8m2_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vmax_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmax_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_vx_i8m2_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vmax_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmax_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmax_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmax_vv_i8m4_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vmax_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmax_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_vx_i8m4_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vmax_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmax_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmax_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmax_vv_i8m8_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vmax_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmax_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_vx_i8m8_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vmax_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmax_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmax_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmax_vv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vmax_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmax_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vmax_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmax_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmax_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmax_vv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vmax_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmax_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vmax_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmax_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmax_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmax_vv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vmax_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmax_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vmax_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmax_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmax_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmax_vv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vmax_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmax_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vmax_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmax_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmax_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmax_vv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vmax_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmax_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vmax_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmax_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmax_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmax_vv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vmax_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmax_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vmax_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmax_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmax_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmax_vv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vmax_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmax_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vmax_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmax_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmax_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmax_vv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vmax_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmax_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vmax_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmax_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmax_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmax_vv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vmax_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmax_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vmax_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmax_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmax_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmax_vv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vmax_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmax_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vmax_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmax_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmax_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmax_vv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vmax_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmax_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vmax_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmax_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmax_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmax_vv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vmax_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmax_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmax_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vmax_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmax_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmax_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmax_vv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vmax_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmax_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmax_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vmax_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmax_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmax_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmax_vv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vmax_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmax_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmax_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vmax_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmax_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmax_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmax_vv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vmax_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmax_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmax_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vmax\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vmaxu.c b/auto-generated/policy_funcs/gnu-api-tests/vmaxu.c index 63cb62f7c..8f4a29c04 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vmaxu.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vmaxu.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vmaxu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmaxu_vv_u8mf8_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vmaxu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u8mf8_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vmaxu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_vx_u8mf8_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vmaxu_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vmaxu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmaxu_vv_u8mf4_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vmaxu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u8mf4_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vmaxu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_vx_u8mf4_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vmaxu_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vmaxu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmaxu_vv_u8mf2_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vmaxu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u8mf2_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vmaxu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_vx_u8mf2_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vmaxu_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vmaxu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmaxu_vv_u8m1_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vmaxu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vmaxu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_vx_u8m1_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vmaxu_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vmaxu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmaxu_vv_u8m2_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vmaxu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u8m2_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vmaxu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_vx_u8m2_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vmaxu_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vmaxu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmaxu_vv_u8m4_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vmaxu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u8m4_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vmaxu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_vx_u8m4_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vmaxu_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u8m4_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vmaxu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmaxu_vv_u8m8_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vmaxu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u8m8_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vmaxu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_vx_u8m8_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vmaxu_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u8m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vmaxu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmaxu_vv_u16mf4_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vmaxu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vmaxu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_vx_u16mf4_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vmaxu_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vmaxu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmaxu_vv_u16mf2_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vmaxu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vmaxu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_vx_u16mf2_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vmaxu_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vmaxu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmaxu_vv_u16m1_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vmaxu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vmaxu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_vx_u16m1_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vmaxu_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vmaxu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmaxu_vv_u16m2_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vmaxu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vmaxu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_vx_u16m2_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vmaxu_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vmaxu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmaxu_vv_u16m4_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vmaxu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vmaxu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_vx_u16m4_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vmaxu_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vmaxu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmaxu_vv_u16m8_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vmaxu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vmaxu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_vx_u16m8_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vmaxu_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vmaxu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmaxu_vv_u32mf2_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vmaxu_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vmaxu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_vx_u32mf2_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vmaxu_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vmaxu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmaxu_vv_u32m1_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vmaxu_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vmaxu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_vx_u32m1_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vmaxu_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vmaxu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmaxu_vv_u32m2_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vmaxu_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vmaxu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_vx_u32m2_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vmaxu_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vmaxu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmaxu_vv_u32m4_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vmaxu_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vmaxu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_vx_u32m4_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vmaxu_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vmaxu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmaxu_vv_u32m8_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vmaxu_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vmaxu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_vx_u32m8_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vmaxu_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vmaxu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmaxu_vv_u64m1_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vmaxu_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vmaxu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu_vx_u64m1_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vmaxu_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vmaxu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmaxu_vv_u64m2_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vmaxu_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vmaxu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu_vx_u64m2_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vmaxu_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vmaxu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmaxu_vv_u64m4_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vmaxu_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vmaxu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu_vx_u64m4_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vmaxu_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vmaxu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmaxu_vv_u64m8_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vmaxu_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vmaxu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu_vx_u64m8_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vmaxu_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u64m8_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vmaxu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmaxu_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmaxu_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vmaxu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmaxu_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vmaxu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmaxu_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmaxu_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vmaxu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmaxu_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vmaxu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmaxu_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmaxu_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vmaxu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmaxu_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vmaxu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmaxu_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmaxu_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vmaxu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmaxu_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vmaxu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmaxu_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmaxu_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u8m2_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vmaxu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmaxu_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vmaxu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmaxu_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmaxu_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u8m4_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vmaxu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmaxu_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vmaxu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmaxu_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmaxu_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u8m8_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vmaxu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmaxu_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u8m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vmaxu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmaxu_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmaxu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vmaxu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmaxu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vmaxu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmaxu_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmaxu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vmaxu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmaxu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vmaxu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmaxu_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmaxu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vmaxu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmaxu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vmaxu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmaxu_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmaxu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vmaxu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmaxu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vmaxu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmaxu_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmaxu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vmaxu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmaxu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vmaxu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmaxu_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmaxu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vmaxu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmaxu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vmaxu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmaxu_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmaxu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vmaxu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmaxu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vmaxu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmaxu_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmaxu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vmaxu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmaxu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vmaxu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmaxu_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmaxu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vmaxu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmaxu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vmaxu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmaxu_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmaxu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vmaxu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmaxu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vmaxu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmaxu_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmaxu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vmaxu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmaxu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vmaxu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmaxu_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmaxu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vmaxu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmaxu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vmaxu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmaxu_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmaxu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vmaxu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmaxu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vmaxu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmaxu_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmaxu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vmaxu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmaxu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vmaxu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmaxu_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmaxu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vmaxu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmaxu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vmaxu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmaxu_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmaxu_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vmaxu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmaxu_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vmaxu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmaxu_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmaxu_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vmaxu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmaxu_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vmaxu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmaxu_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmaxu_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vmaxu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmaxu_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vmaxu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmaxu_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmaxu_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vmaxu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmaxu_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vmaxu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmaxu_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmaxu_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vmaxu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmaxu_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vmaxu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmaxu_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmaxu_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vmaxu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmaxu_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vmaxu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmaxu_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmaxu_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vmaxu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmaxu_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vmaxu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmaxu_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmaxu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vmaxu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmaxu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vmaxu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmaxu_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmaxu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vmaxu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmaxu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vmaxu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmaxu_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmaxu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vmaxu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmaxu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vmaxu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmaxu_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmaxu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vmaxu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmaxu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vmaxu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmaxu_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmaxu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vmaxu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmaxu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vmaxu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmaxu_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmaxu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vmaxu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmaxu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vmaxu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmaxu_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmaxu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vmaxu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmaxu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vmaxu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmaxu_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmaxu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vmaxu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmaxu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vmaxu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmaxu_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmaxu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vmaxu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmaxu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vmaxu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmaxu_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmaxu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vmaxu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmaxu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vmaxu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmaxu_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmaxu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vmaxu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmaxu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vmaxu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmaxu_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmaxu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vmaxu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmaxu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vmaxu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmaxu_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmaxu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vmaxu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmaxu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vmaxu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmaxu_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmaxu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vmaxu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmaxu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vmaxu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmaxu_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmaxu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vmaxu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmaxu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vmaxu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmaxu_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmaxu_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vmaxu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmaxu_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vmaxu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmaxu_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmaxu_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vmaxu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmaxu_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vmaxu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmaxu_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmaxu_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vmaxu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmaxu_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vmaxu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmaxu_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmaxu_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u8m1_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vmaxu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmaxu_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vmaxu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmaxu_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmaxu_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u8m2_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vmaxu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmaxu_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vmaxu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmaxu_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmaxu_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u8m4_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vmaxu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmaxu_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vmaxu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmaxu_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmaxu_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u8m8_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vmaxu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmaxu_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u8m8_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vmaxu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmaxu_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmaxu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vmaxu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmaxu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vmaxu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmaxu_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmaxu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vmaxu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmaxu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vmaxu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmaxu_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmaxu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vmaxu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmaxu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vmaxu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmaxu_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmaxu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vmaxu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmaxu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vmaxu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmaxu_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmaxu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vmaxu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmaxu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vmaxu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmaxu_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmaxu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vmaxu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmaxu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vmaxu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmaxu_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmaxu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vmaxu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmaxu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vmaxu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmaxu_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmaxu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vmaxu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmaxu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vmaxu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmaxu_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmaxu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vmaxu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmaxu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vmaxu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmaxu_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmaxu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vmaxu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmaxu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vmaxu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmaxu_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmaxu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vmaxu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmaxu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vmaxu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmaxu_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmaxu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vmaxu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmaxu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vmaxu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmaxu_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmaxu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vmaxu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmaxu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vmaxu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmaxu_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmaxu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vmaxu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmaxu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vmaxu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmaxu_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmaxu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmaxu_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vmaxu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmaxu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vmaxu\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vmerge.c b/auto-generated/policy_funcs/gnu-api-tests/vmerge.c index bd2c4b9e6..21bcdd5fa 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vmerge.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vmerge.c @@ -6,416 +6,416 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vmerge_vvm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_vvm_i8mf8_tu(maskedoff, op1, op2, mask, vl); +vint8mf8_t test_vmerge_vvm_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_vvm_i8mf8_tu(vd, vs2, vs1, v0, vl); } -vint8mf8_t test_vmerge_vxm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_vxm_i8mf8_tu(maskedoff, op1, op2, mask, vl); +vint8mf8_t test_vmerge_vxm_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_vxm_i8mf8_tu(vd, vs2, rs1, v0, vl); } -vint8mf4_t test_vmerge_vvm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_vvm_i8mf4_tu(maskedoff, op1, op2, mask, vl); +vint8mf4_t test_vmerge_vvm_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_vvm_i8mf4_tu(vd, vs2, vs1, v0, vl); } -vint8mf4_t test_vmerge_vxm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_vxm_i8mf4_tu(maskedoff, op1, op2, mask, vl); +vint8mf4_t test_vmerge_vxm_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_vxm_i8mf4_tu(vd, vs2, rs1, v0, vl); } -vint8mf2_t test_vmerge_vvm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_vvm_i8mf2_tu(maskedoff, op1, op2, mask, vl); +vint8mf2_t test_vmerge_vvm_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_vvm_i8mf2_tu(vd, vs2, vs1, v0, vl); } -vint8mf2_t test_vmerge_vxm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_vxm_i8mf2_tu(maskedoff, op1, op2, mask, vl); +vint8mf2_t test_vmerge_vxm_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_vxm_i8mf2_tu(vd, vs2, rs1, v0, vl); } -vint8m1_t test_vmerge_vvm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_vvm_i8m1_tu(maskedoff, op1, op2, mask, vl); +vint8m1_t test_vmerge_vvm_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_vvm_i8m1_tu(vd, vs2, vs1, v0, vl); } -vint8m1_t test_vmerge_vxm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_vxm_i8m1_tu(maskedoff, op1, op2, mask, vl); +vint8m1_t test_vmerge_vxm_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_vxm_i8m1_tu(vd, vs2, rs1, v0, vl); } -vint8m2_t test_vmerge_vvm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge_vvm_i8m2_tu(maskedoff, op1, op2, mask, vl); +vint8m2_t test_vmerge_vvm_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge_vvm_i8m2_tu(vd, vs2, vs1, v0, vl); } -vint8m2_t test_vmerge_vxm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge_vxm_i8m2_tu(maskedoff, op1, op2, mask, vl); +vint8m2_t test_vmerge_vxm_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge_vxm_i8m2_tu(vd, vs2, rs1, v0, vl); } -vint8m4_t test_vmerge_vvm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, vbool2_t mask, size_t vl) { - return __riscv_vmerge_vvm_i8m4_tu(maskedoff, op1, op2, mask, vl); +vint8m4_t test_vmerge_vvm_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vmerge_vvm_i8m4_tu(vd, vs2, vs1, v0, vl); } -vint8m4_t test_vmerge_vxm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, vbool2_t mask, size_t vl) { - return __riscv_vmerge_vxm_i8m4_tu(maskedoff, op1, op2, mask, vl); +vint8m4_t test_vmerge_vxm_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vmerge_vxm_i8m4_tu(vd, vs2, rs1, v0, vl); } -vint8m8_t test_vmerge_vvm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, vbool1_t mask, size_t vl) { - return __riscv_vmerge_vvm_i8m8_tu(maskedoff, op1, op2, mask, vl); +vint8m8_t test_vmerge_vvm_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, vbool1_t v0, size_t vl) { + return __riscv_vmerge_vvm_i8m8_tu(vd, vs2, vs1, v0, vl); } -vint8m8_t test_vmerge_vxm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, vbool1_t mask, size_t vl) { - return __riscv_vmerge_vxm_i8m8_tu(maskedoff, op1, op2, mask, vl); +vint8m8_t test_vmerge_vxm_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, vbool1_t v0, size_t vl) { + return __riscv_vmerge_vxm_i8m8_tu(vd, vs2, rs1, v0, vl); } -vint16mf4_t test_vmerge_vvm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_vvm_i16mf4_tu(maskedoff, op1, op2, mask, vl); +vint16mf4_t test_vmerge_vvm_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_vvm_i16mf4_tu(vd, vs2, vs1, v0, vl); } -vint16mf4_t test_vmerge_vxm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_vxm_i16mf4_tu(maskedoff, op1, op2, mask, vl); +vint16mf4_t test_vmerge_vxm_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_vxm_i16mf4_tu(vd, vs2, rs1, v0, vl); } -vint16mf2_t test_vmerge_vvm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_vvm_i16mf2_tu(maskedoff, op1, op2, mask, vl); +vint16mf2_t test_vmerge_vvm_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_vvm_i16mf2_tu(vd, vs2, vs1, v0, vl); } -vint16mf2_t test_vmerge_vxm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_vxm_i16mf2_tu(maskedoff, op1, op2, mask, vl); +vint16mf2_t test_vmerge_vxm_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_vxm_i16mf2_tu(vd, vs2, rs1, v0, vl); } -vint16m1_t test_vmerge_vvm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_vvm_i16m1_tu(maskedoff, op1, op2, mask, vl); +vint16m1_t test_vmerge_vvm_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_vvm_i16m1_tu(vd, vs2, vs1, v0, vl); } -vint16m1_t test_vmerge_vxm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_vxm_i16m1_tu(maskedoff, op1, op2, mask, vl); +vint16m1_t test_vmerge_vxm_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_vxm_i16m1_tu(vd, vs2, rs1, v0, vl); } -vint16m2_t test_vmerge_vvm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_vvm_i16m2_tu(maskedoff, op1, op2, mask, vl); +vint16m2_t test_vmerge_vvm_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_vvm_i16m2_tu(vd, vs2, vs1, v0, vl); } -vint16m2_t test_vmerge_vxm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_vxm_i16m2_tu(maskedoff, op1, op2, mask, vl); +vint16m2_t test_vmerge_vxm_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_vxm_i16m2_tu(vd, vs2, rs1, v0, vl); } -vint16m4_t test_vmerge_vvm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge_vvm_i16m4_tu(maskedoff, op1, op2, mask, vl); +vint16m4_t test_vmerge_vvm_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge_vvm_i16m4_tu(vd, vs2, vs1, v0, vl); } -vint16m4_t test_vmerge_vxm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge_vxm_i16m4_tu(maskedoff, op1, op2, mask, vl); +vint16m4_t test_vmerge_vxm_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge_vxm_i16m4_tu(vd, vs2, rs1, v0, vl); } -vint16m8_t test_vmerge_vvm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, vbool2_t mask, size_t vl) { - return __riscv_vmerge_vvm_i16m8_tu(maskedoff, op1, op2, mask, vl); +vint16m8_t test_vmerge_vvm_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vmerge_vvm_i16m8_tu(vd, vs2, vs1, v0, vl); } -vint16m8_t test_vmerge_vxm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, vbool2_t mask, size_t vl) { - return __riscv_vmerge_vxm_i16m8_tu(maskedoff, op1, op2, mask, vl); +vint16m8_t test_vmerge_vxm_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vmerge_vxm_i16m8_tu(vd, vs2, rs1, v0, vl); } -vint32mf2_t test_vmerge_vvm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_vvm_i32mf2_tu(maskedoff, op1, op2, mask, vl); +vint32mf2_t test_vmerge_vvm_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_vvm_i32mf2_tu(vd, vs2, vs1, v0, vl); } -vint32mf2_t test_vmerge_vxm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_vxm_i32mf2_tu(maskedoff, op1, op2, mask, vl); +vint32mf2_t test_vmerge_vxm_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_vxm_i32mf2_tu(vd, vs2, rs1, v0, vl); } -vint32m1_t test_vmerge_vvm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_vvm_i32m1_tu(maskedoff, op1, op2, mask, vl); +vint32m1_t test_vmerge_vvm_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_vvm_i32m1_tu(vd, vs2, vs1, v0, vl); } -vint32m1_t test_vmerge_vxm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_vxm_i32m1_tu(maskedoff, op1, op2, mask, vl); +vint32m1_t test_vmerge_vxm_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_vxm_i32m1_tu(vd, vs2, rs1, v0, vl); } -vint32m2_t test_vmerge_vvm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_vvm_i32m2_tu(maskedoff, op1, op2, mask, vl); +vint32m2_t test_vmerge_vvm_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_vvm_i32m2_tu(vd, vs2, vs1, v0, vl); } -vint32m2_t test_vmerge_vxm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_vxm_i32m2_tu(maskedoff, op1, op2, mask, vl); +vint32m2_t test_vmerge_vxm_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_vxm_i32m2_tu(vd, vs2, rs1, v0, vl); } -vint32m4_t test_vmerge_vvm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_vvm_i32m4_tu(maskedoff, op1, op2, mask, vl); +vint32m4_t test_vmerge_vvm_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_vvm_i32m4_tu(vd, vs2, vs1, v0, vl); } -vint32m4_t test_vmerge_vxm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_vxm_i32m4_tu(maskedoff, op1, op2, mask, vl); +vint32m4_t test_vmerge_vxm_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_vxm_i32m4_tu(vd, vs2, rs1, v0, vl); } -vint32m8_t test_vmerge_vvm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge_vvm_i32m8_tu(maskedoff, op1, op2, mask, vl); +vint32m8_t test_vmerge_vvm_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge_vvm_i32m8_tu(vd, vs2, vs1, v0, vl); } -vint32m8_t test_vmerge_vxm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge_vxm_i32m8_tu(maskedoff, op1, op2, mask, vl); +vint32m8_t test_vmerge_vxm_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge_vxm_i32m8_tu(vd, vs2, rs1, v0, vl); } -vint64m1_t test_vmerge_vvm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_vvm_i64m1_tu(maskedoff, op1, op2, mask, vl); +vint64m1_t test_vmerge_vvm_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_vvm_i64m1_tu(vd, vs2, vs1, v0, vl); } -vint64m1_t test_vmerge_vxm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_vxm_i64m1_tu(maskedoff, op1, op2, mask, vl); +vint64m1_t test_vmerge_vxm_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_vxm_i64m1_tu(vd, vs2, rs1, v0, vl); } -vint64m2_t test_vmerge_vvm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_vvm_i64m2_tu(maskedoff, op1, op2, mask, vl); +vint64m2_t test_vmerge_vvm_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_vvm_i64m2_tu(vd, vs2, vs1, v0, vl); } -vint64m2_t test_vmerge_vxm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_vxm_i64m2_tu(maskedoff, op1, op2, mask, vl); +vint64m2_t test_vmerge_vxm_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_vxm_i64m2_tu(vd, vs2, rs1, v0, vl); } -vint64m4_t test_vmerge_vvm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_vvm_i64m4_tu(maskedoff, op1, op2, mask, vl); +vint64m4_t test_vmerge_vvm_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_vvm_i64m4_tu(vd, vs2, vs1, v0, vl); } -vint64m4_t test_vmerge_vxm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_vxm_i64m4_tu(maskedoff, op1, op2, mask, vl); +vint64m4_t test_vmerge_vxm_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_vxm_i64m4_tu(vd, vs2, rs1, v0, vl); } -vint64m8_t test_vmerge_vvm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_vvm_i64m8_tu(maskedoff, op1, op2, mask, vl); +vint64m8_t test_vmerge_vvm_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_vvm_i64m8_tu(vd, vs2, vs1, v0, vl); } -vint64m8_t test_vmerge_vxm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_vxm_i64m8_tu(maskedoff, op1, op2, mask, vl); +vint64m8_t test_vmerge_vxm_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_vxm_i64m8_tu(vd, vs2, rs1, v0, vl); } -vuint8mf8_t test_vmerge_vvm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_vvm_u8mf8_tu(maskedoff, op1, op2, mask, vl); +vuint8mf8_t test_vmerge_vvm_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_vvm_u8mf8_tu(vd, vs2, vs1, v0, vl); } -vuint8mf8_t test_vmerge_vxm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_vxm_u8mf8_tu(maskedoff, op1, op2, mask, vl); +vuint8mf8_t test_vmerge_vxm_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_vxm_u8mf8_tu(vd, vs2, rs1, v0, vl); } -vuint8mf4_t test_vmerge_vvm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_vvm_u8mf4_tu(maskedoff, op1, op2, mask, vl); +vuint8mf4_t test_vmerge_vvm_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_vvm_u8mf4_tu(vd, vs2, vs1, v0, vl); } -vuint8mf4_t test_vmerge_vxm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_vxm_u8mf4_tu(maskedoff, op1, op2, mask, vl); +vuint8mf4_t test_vmerge_vxm_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_vxm_u8mf4_tu(vd, vs2, rs1, v0, vl); } -vuint8mf2_t test_vmerge_vvm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_vvm_u8mf2_tu(maskedoff, op1, op2, mask, vl); +vuint8mf2_t test_vmerge_vvm_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_vvm_u8mf2_tu(vd, vs2, vs1, v0, vl); } -vuint8mf2_t test_vmerge_vxm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_vxm_u8mf2_tu(maskedoff, op1, op2, mask, vl); +vuint8mf2_t test_vmerge_vxm_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_vxm_u8mf2_tu(vd, vs2, rs1, v0, vl); } -vuint8m1_t test_vmerge_vvm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_vvm_u8m1_tu(maskedoff, op1, op2, mask, vl); +vuint8m1_t test_vmerge_vvm_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_vvm_u8m1_tu(vd, vs2, vs1, v0, vl); } -vuint8m1_t test_vmerge_vxm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_vxm_u8m1_tu(maskedoff, op1, op2, mask, vl); +vuint8m1_t test_vmerge_vxm_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_vxm_u8m1_tu(vd, vs2, rs1, v0, vl); } -vuint8m2_t test_vmerge_vvm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge_vvm_u8m2_tu(maskedoff, op1, op2, mask, vl); +vuint8m2_t test_vmerge_vvm_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge_vvm_u8m2_tu(vd, vs2, vs1, v0, vl); } -vuint8m2_t test_vmerge_vxm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge_vxm_u8m2_tu(maskedoff, op1, op2, mask, vl); +vuint8m2_t test_vmerge_vxm_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge_vxm_u8m2_tu(vd, vs2, rs1, v0, vl); } -vuint8m4_t test_vmerge_vvm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, vbool2_t mask, size_t vl) { - return __riscv_vmerge_vvm_u8m4_tu(maskedoff, op1, op2, mask, vl); +vuint8m4_t test_vmerge_vvm_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vmerge_vvm_u8m4_tu(vd, vs2, vs1, v0, vl); } -vuint8m4_t test_vmerge_vxm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, vbool2_t mask, size_t vl) { - return __riscv_vmerge_vxm_u8m4_tu(maskedoff, op1, op2, mask, vl); +vuint8m4_t test_vmerge_vxm_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vmerge_vxm_u8m4_tu(vd, vs2, rs1, v0, vl); } -vuint8m8_t test_vmerge_vvm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, vbool1_t mask, size_t vl) { - return __riscv_vmerge_vvm_u8m8_tu(maskedoff, op1, op2, mask, vl); +vuint8m8_t test_vmerge_vvm_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, vbool1_t v0, size_t vl) { + return __riscv_vmerge_vvm_u8m8_tu(vd, vs2, vs1, v0, vl); } -vuint8m8_t test_vmerge_vxm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, vbool1_t mask, size_t vl) { - return __riscv_vmerge_vxm_u8m8_tu(maskedoff, op1, op2, mask, vl); +vuint8m8_t test_vmerge_vxm_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, vbool1_t v0, size_t vl) { + return __riscv_vmerge_vxm_u8m8_tu(vd, vs2, rs1, v0, vl); } -vuint16mf4_t test_vmerge_vvm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_vvm_u16mf4_tu(maskedoff, op1, op2, mask, vl); +vuint16mf4_t test_vmerge_vvm_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_vvm_u16mf4_tu(vd, vs2, vs1, v0, vl); } -vuint16mf4_t test_vmerge_vxm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_vxm_u16mf4_tu(maskedoff, op1, op2, mask, vl); +vuint16mf4_t test_vmerge_vxm_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_vxm_u16mf4_tu(vd, vs2, rs1, v0, vl); } -vuint16mf2_t test_vmerge_vvm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_vvm_u16mf2_tu(maskedoff, op1, op2, mask, vl); +vuint16mf2_t test_vmerge_vvm_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_vvm_u16mf2_tu(vd, vs2, vs1, v0, vl); } -vuint16mf2_t test_vmerge_vxm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_vxm_u16mf2_tu(maskedoff, op1, op2, mask, vl); +vuint16mf2_t test_vmerge_vxm_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_vxm_u16mf2_tu(vd, vs2, rs1, v0, vl); } -vuint16m1_t test_vmerge_vvm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_vvm_u16m1_tu(maskedoff, op1, op2, mask, vl); +vuint16m1_t test_vmerge_vvm_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_vvm_u16m1_tu(vd, vs2, vs1, v0, vl); } -vuint16m1_t test_vmerge_vxm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_vxm_u16m1_tu(maskedoff, op1, op2, mask, vl); +vuint16m1_t test_vmerge_vxm_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_vxm_u16m1_tu(vd, vs2, rs1, v0, vl); } -vuint16m2_t test_vmerge_vvm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_vvm_u16m2_tu(maskedoff, op1, op2, mask, vl); +vuint16m2_t test_vmerge_vvm_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_vvm_u16m2_tu(vd, vs2, vs1, v0, vl); } -vuint16m2_t test_vmerge_vxm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_vxm_u16m2_tu(maskedoff, op1, op2, mask, vl); +vuint16m2_t test_vmerge_vxm_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_vxm_u16m2_tu(vd, vs2, rs1, v0, vl); } -vuint16m4_t test_vmerge_vvm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge_vvm_u16m4_tu(maskedoff, op1, op2, mask, vl); +vuint16m4_t test_vmerge_vvm_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge_vvm_u16m4_tu(vd, vs2, vs1, v0, vl); } -vuint16m4_t test_vmerge_vxm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge_vxm_u16m4_tu(maskedoff, op1, op2, mask, vl); +vuint16m4_t test_vmerge_vxm_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge_vxm_u16m4_tu(vd, vs2, rs1, v0, vl); } -vuint16m8_t test_vmerge_vvm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, vbool2_t mask, size_t vl) { - return __riscv_vmerge_vvm_u16m8_tu(maskedoff, op1, op2, mask, vl); +vuint16m8_t test_vmerge_vvm_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vmerge_vvm_u16m8_tu(vd, vs2, vs1, v0, vl); } -vuint16m8_t test_vmerge_vxm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, vbool2_t mask, size_t vl) { - return __riscv_vmerge_vxm_u16m8_tu(maskedoff, op1, op2, mask, vl); +vuint16m8_t test_vmerge_vxm_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vmerge_vxm_u16m8_tu(vd, vs2, rs1, v0, vl); } -vuint32mf2_t test_vmerge_vvm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_vvm_u32mf2_tu(maskedoff, op1, op2, mask, vl); +vuint32mf2_t test_vmerge_vvm_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_vvm_u32mf2_tu(vd, vs2, vs1, v0, vl); } -vuint32mf2_t test_vmerge_vxm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_vxm_u32mf2_tu(maskedoff, op1, op2, mask, vl); +vuint32mf2_t test_vmerge_vxm_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_vxm_u32mf2_tu(vd, vs2, rs1, v0, vl); } -vuint32m1_t test_vmerge_vvm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_vvm_u32m1_tu(maskedoff, op1, op2, mask, vl); +vuint32m1_t test_vmerge_vvm_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_vvm_u32m1_tu(vd, vs2, vs1, v0, vl); } -vuint32m1_t test_vmerge_vxm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_vxm_u32m1_tu(maskedoff, op1, op2, mask, vl); +vuint32m1_t test_vmerge_vxm_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_vxm_u32m1_tu(vd, vs2, rs1, v0, vl); } -vuint32m2_t test_vmerge_vvm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_vvm_u32m2_tu(maskedoff, op1, op2, mask, vl); +vuint32m2_t test_vmerge_vvm_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_vvm_u32m2_tu(vd, vs2, vs1, v0, vl); } -vuint32m2_t test_vmerge_vxm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_vxm_u32m2_tu(maskedoff, op1, op2, mask, vl); +vuint32m2_t test_vmerge_vxm_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_vxm_u32m2_tu(vd, vs2, rs1, v0, vl); } -vuint32m4_t test_vmerge_vvm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_vvm_u32m4_tu(maskedoff, op1, op2, mask, vl); +vuint32m4_t test_vmerge_vvm_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_vvm_u32m4_tu(vd, vs2, vs1, v0, vl); } -vuint32m4_t test_vmerge_vxm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_vxm_u32m4_tu(maskedoff, op1, op2, mask, vl); +vuint32m4_t test_vmerge_vxm_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_vxm_u32m4_tu(vd, vs2, rs1, v0, vl); } -vuint32m8_t test_vmerge_vvm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge_vvm_u32m8_tu(maskedoff, op1, op2, mask, vl); +vuint32m8_t test_vmerge_vvm_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge_vvm_u32m8_tu(vd, vs2, vs1, v0, vl); } -vuint32m8_t test_vmerge_vxm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge_vxm_u32m8_tu(maskedoff, op1, op2, mask, vl); +vuint32m8_t test_vmerge_vxm_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge_vxm_u32m8_tu(vd, vs2, rs1, v0, vl); } -vuint64m1_t test_vmerge_vvm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_vvm_u64m1_tu(maskedoff, op1, op2, mask, vl); +vuint64m1_t test_vmerge_vvm_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_vvm_u64m1_tu(vd, vs2, vs1, v0, vl); } -vuint64m1_t test_vmerge_vxm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_vxm_u64m1_tu(maskedoff, op1, op2, mask, vl); +vuint64m1_t test_vmerge_vxm_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_vxm_u64m1_tu(vd, vs2, rs1, v0, vl); } -vuint64m2_t test_vmerge_vvm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_vvm_u64m2_tu(maskedoff, op1, op2, mask, vl); +vuint64m2_t test_vmerge_vvm_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_vvm_u64m2_tu(vd, vs2, vs1, v0, vl); } -vuint64m2_t test_vmerge_vxm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_vxm_u64m2_tu(maskedoff, op1, op2, mask, vl); +vuint64m2_t test_vmerge_vxm_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_vxm_u64m2_tu(vd, vs2, rs1, v0, vl); } -vuint64m4_t test_vmerge_vvm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_vvm_u64m4_tu(maskedoff, op1, op2, mask, vl); +vuint64m4_t test_vmerge_vvm_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_vvm_u64m4_tu(vd, vs2, vs1, v0, vl); } -vuint64m4_t test_vmerge_vxm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_vxm_u64m4_tu(maskedoff, op1, op2, mask, vl); +vuint64m4_t test_vmerge_vxm_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_vxm_u64m4_tu(vd, vs2, rs1, v0, vl); } -vuint64m8_t test_vmerge_vvm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_vvm_u64m8_tu(maskedoff, op1, op2, mask, vl); +vuint64m8_t test_vmerge_vvm_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_vvm_u64m8_tu(vd, vs2, vs1, v0, vl); } -vuint64m8_t test_vmerge_vxm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_vxm_u64m8_tu(maskedoff, op1, op2, mask, vl); +vuint64m8_t test_vmerge_vxm_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_vxm_u64m8_tu(vd, vs2, rs1, v0, vl); } -vfloat16mf4_t test_vmerge_vvm_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_vvm_f16mf4_tu(maskedoff, op1, op2, mask, vl); +vfloat16mf4_t test_vmerge_vvm_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_vvm_f16mf4_tu(vd, vs2, vs1, v0, vl); } -vfloat16mf2_t test_vmerge_vvm_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_vvm_f16mf2_tu(maskedoff, op1, op2, mask, vl); +vfloat16mf2_t test_vmerge_vvm_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_vvm_f16mf2_tu(vd, vs2, vs1, v0, vl); } -vfloat16m1_t test_vmerge_vvm_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_vvm_f16m1_tu(maskedoff, op1, op2, mask, vl); +vfloat16m1_t test_vmerge_vvm_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_vvm_f16m1_tu(vd, vs2, vs1, v0, vl); } -vfloat16m2_t test_vmerge_vvm_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_vvm_f16m2_tu(maskedoff, op1, op2, mask, vl); +vfloat16m2_t test_vmerge_vvm_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_vvm_f16m2_tu(vd, vs2, vs1, v0, vl); } -vfloat16m4_t test_vmerge_vvm_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge_vvm_f16m4_tu(maskedoff, op1, op2, mask, vl); +vfloat16m4_t test_vmerge_vvm_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge_vvm_f16m4_tu(vd, vs2, vs1, v0, vl); } -vfloat16m8_t test_vmerge_vvm_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, vbool2_t mask, size_t vl) { - return __riscv_vmerge_vvm_f16m8_tu(maskedoff, op1, op2, mask, vl); +vfloat16m8_t test_vmerge_vvm_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vmerge_vvm_f16m8_tu(vd, vs2, vs1, v0, vl); } -vfloat32mf2_t test_vmerge_vvm_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_vvm_f32mf2_tu(maskedoff, op1, op2, mask, vl); +vfloat32mf2_t test_vmerge_vvm_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_vvm_f32mf2_tu(vd, vs2, vs1, v0, vl); } -vfloat32m1_t test_vmerge_vvm_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_vvm_f32m1_tu(maskedoff, op1, op2, mask, vl); +vfloat32m1_t test_vmerge_vvm_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_vvm_f32m1_tu(vd, vs2, vs1, v0, vl); } -vfloat32m2_t test_vmerge_vvm_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_vvm_f32m2_tu(maskedoff, op1, op2, mask, vl); +vfloat32m2_t test_vmerge_vvm_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_vvm_f32m2_tu(vd, vs2, vs1, v0, vl); } -vfloat32m4_t test_vmerge_vvm_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_vvm_f32m4_tu(maskedoff, op1, op2, mask, vl); +vfloat32m4_t test_vmerge_vvm_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_vvm_f32m4_tu(vd, vs2, vs1, v0, vl); } -vfloat32m8_t test_vmerge_vvm_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge_vvm_f32m8_tu(maskedoff, op1, op2, mask, vl); +vfloat32m8_t test_vmerge_vvm_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge_vvm_f32m8_tu(vd, vs2, vs1, v0, vl); } -vfloat64m1_t test_vmerge_vvm_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_vvm_f64m1_tu(maskedoff, op1, op2, mask, vl); +vfloat64m1_t test_vmerge_vvm_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_vvm_f64m1_tu(vd, vs2, vs1, v0, vl); } -vfloat64m2_t test_vmerge_vvm_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_vvm_f64m2_tu(maskedoff, op1, op2, mask, vl); +vfloat64m2_t test_vmerge_vvm_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_vvm_f64m2_tu(vd, vs2, vs1, v0, vl); } -vfloat64m4_t test_vmerge_vvm_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_vvm_f64m4_tu(maskedoff, op1, op2, mask, vl); +vfloat64m4_t test_vmerge_vvm_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_vvm_f64m4_tu(vd, vs2, vs1, v0, vl); } -vfloat64m8_t test_vmerge_vvm_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_vvm_f64m8_tu(maskedoff, op1, op2, mask, vl); +vfloat64m8_t test_vmerge_vvm_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_vvm_f64m8_tu(vd, vs2, vs1, v0, vl); } /* { dg-final { scan-assembler-times {vmerge\.[ivxfswum.]+\s+} 103 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vmfeq.c b/auto-generated/policy_funcs/gnu-api-tests/vmfeq.c index b70e9d509..79a34aabf 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vmfeq.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vmfeq.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmfeq_vv_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vmfeq_vv_f16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfeq_vv_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vmfeq_vv_f16mf4_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmfeq_vf_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfeq_vf_f16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfeq_vf_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfeq_vf_f16mf4_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmfeq_vv_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vmfeq_vv_f16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfeq_vv_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vmfeq_vv_f16mf2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmfeq_vf_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfeq_vf_f16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfeq_vf_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfeq_vf_f16mf2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmfeq_vv_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vmfeq_vv_f16m1_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfeq_vv_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vmfeq_vv_f16m1_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmfeq_vf_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vmfeq_vf_f16m1_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfeq_vf_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfeq_vf_f16m1_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmfeq_vv_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vmfeq_vv_f16m2_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfeq_vv_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vmfeq_vv_f16m2_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmfeq_vf_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfeq_vf_f16m2_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfeq_vf_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfeq_vf_f16m2_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmfeq_vv_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vmfeq_vv_f16m4_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfeq_vv_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vmfeq_vv_f16m4_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmfeq_vf_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfeq_vf_f16m4_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfeq_vf_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfeq_vf_f16m4_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmfeq_vv_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vmfeq_vv_f16m8_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmfeq_vv_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vmfeq_vv_f16m8_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmfeq_vf_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vmfeq_vf_f16m8_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmfeq_vf_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfeq_vf_f16m8_b2_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmfeq_vv_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vmfeq_vv_f32mf2_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfeq_vv_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vmfeq_vv_f32mf2_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmfeq_vf_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfeq_vf_f32mf2_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfeq_vf_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfeq_vf_f32mf2_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmfeq_vv_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vmfeq_vv_f32m1_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfeq_vv_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vmfeq_vv_f32m1_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmfeq_vf_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vmfeq_vf_f32m1_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfeq_vf_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfeq_vf_f32m1_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmfeq_vv_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vmfeq_vv_f32m2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfeq_vv_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vmfeq_vv_f32m2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmfeq_vf_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfeq_vf_f32m2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfeq_vf_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfeq_vf_f32m2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmfeq_vv_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vmfeq_vv_f32m4_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfeq_vv_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vmfeq_vv_f32m4_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmfeq_vf_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vmfeq_vf_f32m4_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfeq_vf_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfeq_vf_f32m4_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmfeq_vv_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vmfeq_vv_f32m8_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfeq_vv_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vmfeq_vv_f32m8_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmfeq_vf_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vmfeq_vf_f32m8_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfeq_vf_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfeq_vf_f32m8_b4_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmfeq_vv_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vmfeq_vv_f64m1_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfeq_vv_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vmfeq_vv_f64m1_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmfeq_vf_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vmfeq_vf_f64m1_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfeq_vf_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfeq_vf_f64m1_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmfeq_vv_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vmfeq_vv_f64m2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfeq_vv_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vmfeq_vv_f64m2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmfeq_vf_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vmfeq_vf_f64m2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfeq_vf_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfeq_vf_f64m2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmfeq_vv_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vmfeq_vv_f64m4_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfeq_vv_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vmfeq_vv_f64m4_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmfeq_vf_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vmfeq_vf_f64m4_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfeq_vf_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfeq_vf_f64m4_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmfeq_vv_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vmfeq_vv_f64m8_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfeq_vv_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vmfeq_vv_f64m8_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmfeq_vf_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vmfeq_vf_f64m8_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfeq_vf_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfeq_vf_f64m8_b8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmfeq\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vmfge.c b/auto-generated/policy_funcs/gnu-api-tests/vmfge.c index b8b5ae912..a7b08c995 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vmfge.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vmfge.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmfge_vv_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vmfge_vv_f16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfge_vv_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vmfge_vv_f16mf4_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmfge_vf_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfge_vf_f16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfge_vf_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfge_vf_f16mf4_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmfge_vv_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vmfge_vv_f16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfge_vv_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vmfge_vv_f16mf2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmfge_vf_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfge_vf_f16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfge_vf_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfge_vf_f16mf2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmfge_vv_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vmfge_vv_f16m1_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfge_vv_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vmfge_vv_f16m1_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmfge_vf_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vmfge_vf_f16m1_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfge_vf_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfge_vf_f16m1_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmfge_vv_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vmfge_vv_f16m2_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfge_vv_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vmfge_vv_f16m2_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmfge_vf_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfge_vf_f16m2_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfge_vf_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfge_vf_f16m2_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmfge_vv_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vmfge_vv_f16m4_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfge_vv_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vmfge_vv_f16m4_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmfge_vf_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfge_vf_f16m4_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfge_vf_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfge_vf_f16m4_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmfge_vv_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vmfge_vv_f16m8_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmfge_vv_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vmfge_vv_f16m8_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmfge_vf_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vmfge_vf_f16m8_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmfge_vf_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfge_vf_f16m8_b2_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmfge_vv_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vmfge_vv_f32mf2_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfge_vv_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vmfge_vv_f32mf2_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmfge_vf_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfge_vf_f32mf2_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfge_vf_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfge_vf_f32mf2_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmfge_vv_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vmfge_vv_f32m1_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfge_vv_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vmfge_vv_f32m1_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmfge_vf_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vmfge_vf_f32m1_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfge_vf_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfge_vf_f32m1_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmfge_vv_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vmfge_vv_f32m2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfge_vv_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vmfge_vv_f32m2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmfge_vf_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfge_vf_f32m2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfge_vf_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfge_vf_f32m2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmfge_vv_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vmfge_vv_f32m4_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfge_vv_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vmfge_vv_f32m4_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmfge_vf_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vmfge_vf_f32m4_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfge_vf_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfge_vf_f32m4_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmfge_vv_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vmfge_vv_f32m8_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfge_vv_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vmfge_vv_f32m8_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmfge_vf_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vmfge_vf_f32m8_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfge_vf_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfge_vf_f32m8_b4_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmfge_vv_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vmfge_vv_f64m1_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfge_vv_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vmfge_vv_f64m1_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmfge_vf_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vmfge_vf_f64m1_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfge_vf_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfge_vf_f64m1_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmfge_vv_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vmfge_vv_f64m2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfge_vv_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vmfge_vv_f64m2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmfge_vf_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vmfge_vf_f64m2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfge_vf_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfge_vf_f64m2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmfge_vv_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vmfge_vv_f64m4_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfge_vv_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vmfge_vv_f64m4_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmfge_vf_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vmfge_vf_f64m4_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfge_vf_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfge_vf_f64m4_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmfge_vv_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vmfge_vv_f64m8_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfge_vv_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vmfge_vv_f64m8_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmfge_vf_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vmfge_vf_f64m8_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfge_vf_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfge_vf_f64m8_b8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmfge\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vmfgt.c b/auto-generated/policy_funcs/gnu-api-tests/vmfgt.c index a043e1116..b3a13171a 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vmfgt.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vmfgt.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmfgt_vv_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vmfgt_vv_f16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfgt_vv_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vmfgt_vv_f16mf4_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmfgt_vf_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfgt_vf_f16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfgt_vf_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfgt_vf_f16mf4_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmfgt_vv_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vmfgt_vv_f16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfgt_vv_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vmfgt_vv_f16mf2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmfgt_vf_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfgt_vf_f16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfgt_vf_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfgt_vf_f16mf2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmfgt_vv_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vmfgt_vv_f16m1_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfgt_vv_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vmfgt_vv_f16m1_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmfgt_vf_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vmfgt_vf_f16m1_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfgt_vf_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfgt_vf_f16m1_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmfgt_vv_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vmfgt_vv_f16m2_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfgt_vv_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vmfgt_vv_f16m2_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmfgt_vf_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfgt_vf_f16m2_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfgt_vf_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfgt_vf_f16m2_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmfgt_vv_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vmfgt_vv_f16m4_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfgt_vv_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vmfgt_vv_f16m4_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmfgt_vf_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfgt_vf_f16m4_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfgt_vf_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfgt_vf_f16m4_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmfgt_vv_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vmfgt_vv_f16m8_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmfgt_vv_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vmfgt_vv_f16m8_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmfgt_vf_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vmfgt_vf_f16m8_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmfgt_vf_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfgt_vf_f16m8_b2_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmfgt_vv_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vmfgt_vv_f32mf2_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfgt_vv_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vmfgt_vv_f32mf2_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmfgt_vf_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfgt_vf_f32mf2_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfgt_vf_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfgt_vf_f32mf2_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmfgt_vv_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vmfgt_vv_f32m1_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfgt_vv_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vmfgt_vv_f32m1_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmfgt_vf_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vmfgt_vf_f32m1_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfgt_vf_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfgt_vf_f32m1_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmfgt_vv_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vmfgt_vv_f32m2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfgt_vv_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vmfgt_vv_f32m2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmfgt_vf_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfgt_vf_f32m2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfgt_vf_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfgt_vf_f32m2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmfgt_vv_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vmfgt_vv_f32m4_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfgt_vv_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vmfgt_vv_f32m4_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmfgt_vf_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vmfgt_vf_f32m4_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfgt_vf_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfgt_vf_f32m4_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmfgt_vv_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vmfgt_vv_f32m8_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfgt_vv_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vmfgt_vv_f32m8_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmfgt_vf_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vmfgt_vf_f32m8_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfgt_vf_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfgt_vf_f32m8_b4_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmfgt_vv_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vmfgt_vv_f64m1_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfgt_vv_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vmfgt_vv_f64m1_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmfgt_vf_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vmfgt_vf_f64m1_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfgt_vf_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfgt_vf_f64m1_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmfgt_vv_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vmfgt_vv_f64m2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfgt_vv_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vmfgt_vv_f64m2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmfgt_vf_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vmfgt_vf_f64m2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfgt_vf_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfgt_vf_f64m2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmfgt_vv_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vmfgt_vv_f64m4_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfgt_vv_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vmfgt_vv_f64m4_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmfgt_vf_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vmfgt_vf_f64m4_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfgt_vf_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfgt_vf_f64m4_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmfgt_vv_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vmfgt_vv_f64m8_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfgt_vv_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vmfgt_vv_f64m8_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmfgt_vf_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vmfgt_vf_f64m8_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfgt_vf_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfgt_vf_f64m8_b8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmfgt\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vmfle.c b/auto-generated/policy_funcs/gnu-api-tests/vmfle.c index cee0736ef..795b11369 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vmfle.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vmfle.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmfle_vv_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vmfle_vv_f16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfle_vv_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vmfle_vv_f16mf4_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmfle_vf_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfle_vf_f16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfle_vf_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfle_vf_f16mf4_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmfle_vv_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vmfle_vv_f16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfle_vv_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vmfle_vv_f16mf2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmfle_vf_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfle_vf_f16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfle_vf_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfle_vf_f16mf2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmfle_vv_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vmfle_vv_f16m1_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfle_vv_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vmfle_vv_f16m1_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmfle_vf_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vmfle_vf_f16m1_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfle_vf_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfle_vf_f16m1_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmfle_vv_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vmfle_vv_f16m2_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfle_vv_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vmfle_vv_f16m2_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmfle_vf_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfle_vf_f16m2_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfle_vf_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfle_vf_f16m2_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmfle_vv_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vmfle_vv_f16m4_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfle_vv_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vmfle_vv_f16m4_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmfle_vf_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfle_vf_f16m4_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfle_vf_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfle_vf_f16m4_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmfle_vv_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vmfle_vv_f16m8_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmfle_vv_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vmfle_vv_f16m8_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmfle_vf_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vmfle_vf_f16m8_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmfle_vf_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfle_vf_f16m8_b2_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmfle_vv_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vmfle_vv_f32mf2_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfle_vv_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vmfle_vv_f32mf2_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmfle_vf_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfle_vf_f32mf2_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfle_vf_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfle_vf_f32mf2_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmfle_vv_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vmfle_vv_f32m1_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfle_vv_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vmfle_vv_f32m1_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmfle_vf_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vmfle_vf_f32m1_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfle_vf_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfle_vf_f32m1_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmfle_vv_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vmfle_vv_f32m2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfle_vv_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vmfle_vv_f32m2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmfle_vf_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfle_vf_f32m2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfle_vf_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfle_vf_f32m2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmfle_vv_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vmfle_vv_f32m4_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfle_vv_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vmfle_vv_f32m4_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmfle_vf_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vmfle_vf_f32m4_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfle_vf_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfle_vf_f32m4_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmfle_vv_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vmfle_vv_f32m8_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfle_vv_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vmfle_vv_f32m8_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmfle_vf_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vmfle_vf_f32m8_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfle_vf_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfle_vf_f32m8_b4_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmfle_vv_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vmfle_vv_f64m1_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfle_vv_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vmfle_vv_f64m1_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmfle_vf_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vmfle_vf_f64m1_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfle_vf_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfle_vf_f64m1_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmfle_vv_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vmfle_vv_f64m2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfle_vv_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vmfle_vv_f64m2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmfle_vf_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vmfle_vf_f64m2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfle_vf_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfle_vf_f64m2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmfle_vv_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vmfle_vv_f64m4_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfle_vv_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vmfle_vv_f64m4_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmfle_vf_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vmfle_vf_f64m4_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfle_vf_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfle_vf_f64m4_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmfle_vv_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vmfle_vv_f64m8_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfle_vv_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vmfle_vv_f64m8_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmfle_vf_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vmfle_vf_f64m8_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfle_vf_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfle_vf_f64m8_b8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmfle\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vmflt.c b/auto-generated/policy_funcs/gnu-api-tests/vmflt.c index 3b1c9ac93..878b62ec5 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vmflt.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vmflt.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmflt_vv_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vmflt_vv_f16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmflt_vv_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vmflt_vv_f16mf4_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmflt_vf_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vmflt_vf_f16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmflt_vf_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmflt_vf_f16mf4_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmflt_vv_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vmflt_vv_f16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmflt_vv_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vmflt_vv_f16mf2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmflt_vf_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vmflt_vf_f16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmflt_vf_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmflt_vf_f16mf2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmflt_vv_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vmflt_vv_f16m1_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmflt_vv_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vmflt_vv_f16m1_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmflt_vf_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vmflt_vf_f16m1_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmflt_vf_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmflt_vf_f16m1_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmflt_vv_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vmflt_vv_f16m2_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmflt_vv_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vmflt_vv_f16m2_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmflt_vf_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vmflt_vf_f16m2_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmflt_vf_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmflt_vf_f16m2_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmflt_vv_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vmflt_vv_f16m4_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmflt_vv_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vmflt_vv_f16m4_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmflt_vf_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vmflt_vf_f16m4_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmflt_vf_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmflt_vf_f16m4_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmflt_vv_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vmflt_vv_f16m8_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmflt_vv_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vmflt_vv_f16m8_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmflt_vf_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vmflt_vf_f16m8_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmflt_vf_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmflt_vf_f16m8_b2_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmflt_vv_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vmflt_vv_f32mf2_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmflt_vv_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vmflt_vv_f32mf2_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmflt_vf_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vmflt_vf_f32mf2_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmflt_vf_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmflt_vf_f32mf2_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmflt_vv_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vmflt_vv_f32m1_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmflt_vv_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vmflt_vv_f32m1_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmflt_vf_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vmflt_vf_f32m1_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmflt_vf_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmflt_vf_f32m1_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmflt_vv_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vmflt_vv_f32m2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmflt_vv_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vmflt_vv_f32m2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmflt_vf_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vmflt_vf_f32m2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmflt_vf_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmflt_vf_f32m2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmflt_vv_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vmflt_vv_f32m4_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmflt_vv_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vmflt_vv_f32m4_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmflt_vf_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vmflt_vf_f32m4_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmflt_vf_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmflt_vf_f32m4_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmflt_vv_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vmflt_vv_f32m8_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmflt_vv_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vmflt_vv_f32m8_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmflt_vf_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vmflt_vf_f32m8_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmflt_vf_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmflt_vf_f32m8_b4_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmflt_vv_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vmflt_vv_f64m1_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmflt_vv_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vmflt_vv_f64m1_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmflt_vf_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vmflt_vf_f64m1_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmflt_vf_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmflt_vf_f64m1_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmflt_vv_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vmflt_vv_f64m2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmflt_vv_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vmflt_vv_f64m2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmflt_vf_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vmflt_vf_f64m2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmflt_vf_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmflt_vf_f64m2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmflt_vv_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vmflt_vv_f64m4_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmflt_vv_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vmflt_vv_f64m4_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmflt_vf_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vmflt_vf_f64m4_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmflt_vf_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmflt_vf_f64m4_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmflt_vv_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vmflt_vv_f64m8_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmflt_vv_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vmflt_vv_f64m8_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmflt_vf_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vmflt_vf_f64m8_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmflt_vf_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmflt_vf_f64m8_b8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmflt\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vmfne.c b/auto-generated/policy_funcs/gnu-api-tests/vmfne.c index 1a918aa4a..7195e9399 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vmfne.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vmfne.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmfne_vv_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vmfne_vv_f16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfne_vv_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vmfne_vv_f16mf4_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmfne_vf_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfne_vf_f16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfne_vf_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfne_vf_f16mf4_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmfne_vv_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vmfne_vv_f16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfne_vv_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vmfne_vv_f16mf2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmfne_vf_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfne_vf_f16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfne_vf_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfne_vf_f16mf2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmfne_vv_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vmfne_vv_f16m1_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfne_vv_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vmfne_vv_f16m1_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmfne_vf_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vmfne_vf_f16m1_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfne_vf_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfne_vf_f16m1_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmfne_vv_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vmfne_vv_f16m2_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfne_vv_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vmfne_vv_f16m2_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmfne_vf_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfne_vf_f16m2_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfne_vf_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfne_vf_f16m2_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmfne_vv_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vmfne_vv_f16m4_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfne_vv_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vmfne_vv_f16m4_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmfne_vf_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfne_vf_f16m4_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfne_vf_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfne_vf_f16m4_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmfne_vv_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vmfne_vv_f16m8_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmfne_vv_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vmfne_vv_f16m8_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmfne_vf_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vmfne_vf_f16m8_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmfne_vf_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfne_vf_f16m8_b2_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmfne_vv_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vmfne_vv_f32mf2_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfne_vv_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vmfne_vv_f32mf2_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmfne_vf_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfne_vf_f32mf2_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfne_vf_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfne_vf_f32mf2_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmfne_vv_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vmfne_vv_f32m1_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfne_vv_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vmfne_vv_f32m1_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmfne_vf_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vmfne_vf_f32m1_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfne_vf_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfne_vf_f32m1_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmfne_vv_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vmfne_vv_f32m2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfne_vv_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vmfne_vv_f32m2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmfne_vf_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfne_vf_f32m2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfne_vf_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfne_vf_f32m2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmfne_vv_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vmfne_vv_f32m4_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfne_vv_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vmfne_vv_f32m4_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmfne_vf_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vmfne_vf_f32m4_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfne_vf_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfne_vf_f32m4_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmfne_vv_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vmfne_vv_f32m8_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfne_vv_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vmfne_vv_f32m8_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmfne_vf_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vmfne_vf_f32m8_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfne_vf_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfne_vf_f32m8_b4_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmfne_vv_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vmfne_vv_f64m1_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfne_vv_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vmfne_vv_f64m1_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmfne_vf_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vmfne_vf_f64m1_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfne_vf_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfne_vf_f64m1_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmfne_vv_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vmfne_vv_f64m2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfne_vv_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vmfne_vv_f64m2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmfne_vf_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vmfne_vf_f64m2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfne_vf_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfne_vf_f64m2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmfne_vv_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vmfne_vv_f64m4_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfne_vv_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vmfne_vv_f64m4_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmfne_vf_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vmfne_vf_f64m4_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfne_vf_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfne_vf_f64m4_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmfne_vv_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vmfne_vv_f64m8_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfne_vv_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vmfne_vv_f64m8_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmfne_vf_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vmfne_vf_f64m8_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfne_vf_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfne_vf_f64m8_b8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmfne\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vmin.c b/auto-generated/policy_funcs/gnu-api-tests/vmin.c index 004290a42..ba085c4c0 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vmin.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vmin.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vmin_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmin_vv_i8mf8_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vmin_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmin_vv_i8mf8_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vmin_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_vx_i8mf8_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vmin_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_vx_i8mf8_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vmin_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmin_vv_i8mf4_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vmin_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmin_vv_i8mf4_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vmin_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_vx_i8mf4_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vmin_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_vx_i8mf4_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vmin_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmin_vv_i8mf2_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vmin_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmin_vv_i8mf2_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vmin_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_vx_i8mf2_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vmin_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_vx_i8mf2_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vmin_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmin_vv_i8m1_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vmin_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmin_vv_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vmin_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_vx_i8m1_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vmin_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_vx_i8m1_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vmin_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmin_vv_i8m2_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vmin_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmin_vv_i8m2_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vmin_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_vx_i8m2_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vmin_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_vx_i8m2_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vmin_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmin_vv_i8m4_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vmin_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmin_vv_i8m4_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vmin_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_vx_i8m4_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vmin_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_vx_i8m4_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vmin_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmin_vv_i8m8_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vmin_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmin_vv_i8m8_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vmin_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_vx_i8m8_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vmin_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_vx_i8m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vmin_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmin_vv_i16mf4_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vmin_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmin_vv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vmin_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_vx_i16mf4_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vmin_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vmin_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmin_vv_i16mf2_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vmin_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmin_vv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vmin_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_vx_i16mf2_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vmin_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vmin_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmin_vv_i16m1_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vmin_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmin_vv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vmin_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_vx_i16m1_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vmin_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vmin_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmin_vv_i16m2_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vmin_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmin_vv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vmin_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_vx_i16m2_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vmin_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vmin_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmin_vv_i16m4_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vmin_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmin_vv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vmin_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_vx_i16m4_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vmin_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vmin_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmin_vv_i16m8_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vmin_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmin_vv_i16m8_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vmin_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_vx_i16m8_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vmin_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vmin_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmin_vv_i32mf2_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vmin_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmin_vv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vmin_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_vx_i32mf2_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vmin_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vmin_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmin_vv_i32m1_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vmin_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmin_vv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vmin_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_vx_i32m1_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vmin_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vmin_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmin_vv_i32m2_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vmin_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmin_vv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vmin_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_vx_i32m2_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vmin_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vmin_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmin_vv_i32m4_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vmin_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmin_vv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vmin_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_vx_i32m4_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vmin_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vmin_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmin_vv_i32m8_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vmin_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmin_vv_i32m8_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vmin_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_vx_i32m8_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vmin_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vmin_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmin_vv_i64m1_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vmin_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmin_vv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vmin_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmin_vx_i64m1_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vmin_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vmin_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmin_vv_i64m2_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vmin_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmin_vv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vmin_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmin_vx_i64m2_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vmin_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vmin_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmin_vv_i64m4_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vmin_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmin_vv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vmin_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmin_vx_i64m4_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vmin_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vmin_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmin_vv_i64m8_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vmin_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmin_vv_i64m8_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vmin_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmin_vx_i64m8_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vmin_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin_vx_i64m8_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vmin_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmin_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmin_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmin_vv_i8mf8_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vmin_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmin_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_vx_i8mf8_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vmin_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmin_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmin_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmin_vv_i8mf4_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vmin_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmin_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_vx_i8mf4_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vmin_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmin_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmin_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmin_vv_i8mf2_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vmin_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmin_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_vx_i8mf2_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vmin_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmin_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmin_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmin_vv_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vmin_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmin_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_vx_i8m1_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vmin_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmin_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmin_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmin_vv_i8m2_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vmin_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmin_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_vx_i8m2_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vmin_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmin_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmin_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmin_vv_i8m4_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vmin_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmin_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_vx_i8m4_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vmin_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmin_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmin_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmin_vv_i8m8_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vmin_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmin_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_vx_i8m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vmin_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmin_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmin_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmin_vv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vmin_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmin_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vmin_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmin_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmin_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmin_vv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vmin_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmin_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vmin_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmin_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmin_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmin_vv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vmin_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmin_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vmin_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmin_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmin_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmin_vv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vmin_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmin_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vmin_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmin_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmin_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmin_vv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vmin_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmin_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vmin_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmin_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmin_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmin_vv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vmin_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmin_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vmin_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmin_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmin_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmin_vv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vmin_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmin_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vmin_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmin_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmin_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmin_vv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vmin_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmin_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vmin_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmin_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmin_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmin_vv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vmin_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmin_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vmin_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmin_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmin_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmin_vv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vmin_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmin_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vmin_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmin_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmin_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmin_vv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vmin_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmin_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vmin_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmin_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmin_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmin_vv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vmin_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmin_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmin_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vmin_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmin_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmin_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmin_vv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vmin_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmin_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmin_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vmin_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmin_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmin_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmin_vv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vmin_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmin_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmin_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vmin_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmin_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmin_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmin_vv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vmin_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmin_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmin_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vmin_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmin_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmin_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmin_vv_i8mf8_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vmin_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmin_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_vx_i8mf8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vmin_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmin_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmin_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmin_vv_i8mf4_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vmin_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmin_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_vx_i8mf4_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vmin_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmin_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmin_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmin_vv_i8mf2_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vmin_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmin_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_vx_i8mf2_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vmin_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmin_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmin_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmin_vv_i8m1_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vmin_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmin_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_vx_i8m1_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vmin_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmin_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmin_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmin_vv_i8m2_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vmin_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmin_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_vx_i8m2_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vmin_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmin_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmin_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmin_vv_i8m4_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vmin_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmin_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_vx_i8m4_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vmin_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmin_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmin_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmin_vv_i8m8_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vmin_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmin_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_vx_i8m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vmin_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmin_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmin_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmin_vv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vmin_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmin_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vmin_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmin_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmin_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmin_vv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vmin_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmin_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vmin_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmin_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmin_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmin_vv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vmin_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmin_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vmin_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmin_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmin_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmin_vv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vmin_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmin_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vmin_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmin_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmin_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmin_vv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vmin_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmin_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vmin_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmin_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmin_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmin_vv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vmin_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmin_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vmin_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmin_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmin_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmin_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vmin_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmin_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vmin_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmin_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmin_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmin_vv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vmin_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmin_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vmin_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmin_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmin_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmin_vv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vmin_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmin_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vmin_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmin_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmin_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmin_vv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vmin_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmin_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vmin_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmin_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmin_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmin_vv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vmin_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmin_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vmin_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmin_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmin_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmin_vv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vmin_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmin_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmin_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vmin_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmin_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmin_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmin_vv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vmin_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmin_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmin_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vmin_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmin_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmin_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmin_vv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vmin_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmin_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmin_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vmin_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmin_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmin_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmin_vv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vmin_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmin_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmin_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vmin_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmin_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmin_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmin_vv_i8mf8_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vmin_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmin_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_vx_i8mf8_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vmin_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmin_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmin_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmin_vv_i8mf4_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vmin_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmin_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_vx_i8mf4_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vmin_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmin_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmin_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmin_vv_i8mf2_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vmin_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmin_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_vx_i8mf2_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vmin_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmin_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmin_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmin_vv_i8m1_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vmin_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmin_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_vx_i8m1_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vmin_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmin_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmin_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmin_vv_i8m2_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vmin_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmin_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_vx_i8m2_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vmin_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmin_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmin_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmin_vv_i8m4_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vmin_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmin_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_vx_i8m4_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vmin_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmin_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmin_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmin_vv_i8m8_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vmin_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmin_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_vx_i8m8_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vmin_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmin_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmin_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmin_vv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vmin_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmin_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vmin_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmin_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmin_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmin_vv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vmin_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmin_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vmin_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmin_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmin_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmin_vv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vmin_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmin_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vmin_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmin_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmin_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmin_vv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vmin_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmin_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vmin_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmin_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmin_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmin_vv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vmin_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmin_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vmin_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmin_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmin_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmin_vv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vmin_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmin_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vmin_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmin_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmin_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmin_vv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vmin_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmin_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vmin_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmin_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmin_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmin_vv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vmin_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmin_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vmin_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmin_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmin_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmin_vv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vmin_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmin_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vmin_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmin_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmin_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmin_vv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vmin_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmin_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vmin_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmin_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmin_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmin_vv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vmin_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmin_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vmin_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmin_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmin_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmin_vv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vmin_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmin_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmin_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vmin_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmin_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmin_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmin_vv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vmin_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmin_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmin_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vmin_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmin_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmin_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmin_vv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vmin_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmin_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmin_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vmin_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmin_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmin_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmin_vv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vmin_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmin_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmin_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vmin\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vminu.c b/auto-generated/policy_funcs/gnu-api-tests/vminu.c index b4b2c77c5..379a19dc0 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vminu.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vminu.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vminu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vminu_vv_u8mf8_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vminu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vminu_vv_u8mf8_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vminu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_vx_u8mf8_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vminu_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_vx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vminu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vminu_vv_u8mf4_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vminu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vminu_vv_u8mf4_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vminu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_vx_u8mf4_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vminu_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_vx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vminu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vminu_vv_u8mf2_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vminu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vminu_vv_u8mf2_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vminu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_vx_u8mf2_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vminu_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_vx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vminu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vminu_vv_u8m1_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vminu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vminu_vv_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vminu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_vx_u8m1_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vminu_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_vx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vminu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vminu_vv_u8m2_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vminu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vminu_vv_u8m2_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vminu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_vx_u8m2_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vminu_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_vx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vminu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vminu_vv_u8m4_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vminu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vminu_vv_u8m4_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vminu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_vx_u8m4_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vminu_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_vx_u8m4_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vminu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vminu_vv_u8m8_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vminu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vminu_vv_u8m8_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vminu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_vx_u8m8_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vminu_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_vx_u8m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vminu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vminu_vv_u16mf4_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vminu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vminu_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vminu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_vx_u16mf4_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vminu_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vminu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vminu_vv_u16mf2_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vminu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vminu_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vminu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_vx_u16mf2_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vminu_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vminu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vminu_vv_u16m1_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vminu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vminu_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vminu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_vx_u16m1_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vminu_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vminu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vminu_vv_u16m2_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vminu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vminu_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vminu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_vx_u16m2_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vminu_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vminu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vminu_vv_u16m4_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vminu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vminu_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vminu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_vx_u16m4_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vminu_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vminu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vminu_vv_u16m8_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vminu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vminu_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vminu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_vx_u16m8_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vminu_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vminu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vminu_vv_u32mf2_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vminu_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vminu_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vminu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_vx_u32mf2_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vminu_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vminu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vminu_vv_u32m1_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vminu_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vminu_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vminu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_vx_u32m1_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vminu_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vminu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vminu_vv_u32m2_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vminu_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vminu_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vminu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_vx_u32m2_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vminu_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vminu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vminu_vv_u32m4_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vminu_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vminu_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vminu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_vx_u32m4_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vminu_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vminu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vminu_vv_u32m8_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vminu_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vminu_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vminu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_vx_u32m8_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vminu_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vminu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vminu_vv_u64m1_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vminu_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vminu_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vminu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu_vx_u64m1_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vminu_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vminu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vminu_vv_u64m2_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vminu_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vminu_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vminu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu_vx_u64m2_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vminu_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vminu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vminu_vv_u64m4_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vminu_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vminu_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vminu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu_vx_u64m4_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vminu_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vminu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vminu_vv_u64m8_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vminu_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vminu_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vminu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu_vx_u64m8_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vminu_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu_vx_u64m8_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vminu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vminu_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vminu_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vminu_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vminu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vminu_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vminu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vminu_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vminu_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vminu_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vminu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vminu_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vminu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vminu_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vminu_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vminu_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vminu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vminu_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vminu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vminu_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vminu_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vminu_vv_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vminu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vminu_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_vx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vminu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vminu_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vminu_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vminu_vv_u8m2_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vminu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vminu_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_vx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vminu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vminu_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vminu_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vminu_vv_u8m4_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vminu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vminu_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_vx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vminu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vminu_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vminu_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vminu_vv_u8m8_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vminu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vminu_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_vx_u8m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vminu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vminu_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vminu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vminu_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vminu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vminu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vminu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vminu_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vminu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vminu_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vminu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vminu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vminu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vminu_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vminu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vminu_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vminu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vminu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vminu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vminu_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vminu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vminu_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vminu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vminu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vminu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vminu_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vminu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vminu_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vminu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vminu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vminu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vminu_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vminu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vminu_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vminu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vminu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vminu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vminu_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vminu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vminu_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vminu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vminu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vminu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vminu_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vminu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vminu_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vminu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vminu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vminu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vminu_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vminu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vminu_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vminu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vminu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vminu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vminu_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vminu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vminu_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vminu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vminu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vminu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vminu_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vminu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vminu_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vminu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vminu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vminu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vminu_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vminu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vminu_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vminu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vminu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vminu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vminu_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vminu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vminu_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vminu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vminu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vminu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vminu_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vminu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vminu_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vminu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vminu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vminu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vminu_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vminu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vminu_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vminu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vminu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vminu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vminu_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vminu_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vminu_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vminu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vminu_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vminu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vminu_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vminu_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vminu_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vminu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vminu_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vminu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vminu_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vminu_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vminu_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vminu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vminu_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vminu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vminu_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vminu_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vminu_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vminu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vminu_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vminu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vminu_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vminu_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vminu_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vminu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vminu_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vminu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vminu_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vminu_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vminu_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vminu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vminu_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vminu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vminu_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vminu_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vminu_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vminu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vminu_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vminu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vminu_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vminu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vminu_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vminu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vminu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vminu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vminu_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vminu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vminu_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vminu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vminu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vminu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vminu_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vminu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vminu_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vminu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vminu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vminu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vminu_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vminu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vminu_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vminu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vminu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vminu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vminu_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vminu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vminu_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vminu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vminu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vminu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vminu_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vminu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vminu_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vminu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vminu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vminu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vminu_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vminu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vminu_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vminu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vminu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vminu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vminu_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vminu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vminu_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vminu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vminu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vminu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vminu_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vminu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vminu_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vminu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vminu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vminu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vminu_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vminu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vminu_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vminu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vminu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vminu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vminu_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vminu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vminu_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vminu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vminu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vminu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vminu_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vminu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vminu_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vminu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vminu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vminu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vminu_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vminu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vminu_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vminu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vminu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vminu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vminu_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vminu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vminu_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vminu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vminu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vminu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vminu_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vminu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vminu_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vminu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vminu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vminu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vminu_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vminu_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vminu_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vminu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vminu_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vminu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vminu_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vminu_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vminu_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vminu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vminu_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vminu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vminu_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vminu_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vminu_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vminu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vminu_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vminu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vminu_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vminu_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vminu_vv_u8m1_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vminu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vminu_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_vx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vminu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vminu_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vminu_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vminu_vv_u8m2_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vminu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vminu_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_vx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vminu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vminu_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vminu_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vminu_vv_u8m4_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vminu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vminu_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_vx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vminu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vminu_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vminu_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vminu_vv_u8m8_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vminu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vminu_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_vx_u8m8_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vminu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vminu_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vminu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vminu_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vminu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vminu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vminu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vminu_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vminu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vminu_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vminu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vminu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vminu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vminu_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vminu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vminu_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vminu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vminu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vminu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vminu_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vminu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vminu_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vminu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vminu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vminu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vminu_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vminu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vminu_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vminu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vminu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vminu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vminu_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vminu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vminu_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vminu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vminu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vminu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vminu_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vminu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vminu_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vminu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vminu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vminu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vminu_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vminu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vminu_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vminu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vminu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vminu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vminu_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vminu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vminu_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vminu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vminu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vminu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vminu_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vminu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vminu_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vminu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vminu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vminu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vminu_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vminu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vminu_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vminu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vminu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vminu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vminu_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vminu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vminu_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vminu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vminu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vminu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vminu_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vminu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vminu_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vminu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vminu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vminu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vminu_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vminu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vminu_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vminu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vminu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vminu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vminu_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vminu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vminu_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vminu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vminu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vminu\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vmsbf.c b/auto-generated/policy_funcs/gnu-api-tests/vmsbf.c index 4048aa9ce..ca1b87662 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vmsbf.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vmsbf.c @@ -6,32 +6,32 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool1_t test_vmsbf_m_b1_mu(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1, size_t vl) { - return __riscv_vmsbf_m_b1_mu(mask, maskedoff, op1, vl); +vbool1_t test_vmsbf_m_b1_mu(vbool1_t vm, vbool1_t vd, vbool1_t vs2, size_t vl) { + return __riscv_vmsbf_m_b1_mu(vm, vd, vs2, vl); } -vbool2_t test_vmsbf_m_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1, size_t vl) { - return __riscv_vmsbf_m_b2_mu(mask, maskedoff, op1, vl); +vbool2_t test_vmsbf_m_b2_mu(vbool2_t vm, vbool2_t vd, vbool2_t vs2, size_t vl) { + return __riscv_vmsbf_m_b2_mu(vm, vd, vs2, vl); } -vbool4_t test_vmsbf_m_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1, size_t vl) { - return __riscv_vmsbf_m_b4_mu(mask, maskedoff, op1, vl); +vbool4_t test_vmsbf_m_b4_mu(vbool4_t vm, vbool4_t vd, vbool4_t vs2, size_t vl) { + return __riscv_vmsbf_m_b4_mu(vm, vd, vs2, vl); } -vbool8_t test_vmsbf_m_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1, size_t vl) { - return __riscv_vmsbf_m_b8_mu(mask, maskedoff, op1, vl); +vbool8_t test_vmsbf_m_b8_mu(vbool8_t vm, vbool8_t vd, vbool8_t vs2, size_t vl) { + return __riscv_vmsbf_m_b8_mu(vm, vd, vs2, vl); } -vbool16_t test_vmsbf_m_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1, size_t vl) { - return __riscv_vmsbf_m_b16_mu(mask, maskedoff, op1, vl); +vbool16_t test_vmsbf_m_b16_mu(vbool16_t vm, vbool16_t vd, vbool16_t vs2, size_t vl) { + return __riscv_vmsbf_m_b16_mu(vm, vd, vs2, vl); } -vbool32_t test_vmsbf_m_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1, size_t vl) { - return __riscv_vmsbf_m_b32_mu(mask, maskedoff, op1, vl); +vbool32_t test_vmsbf_m_b32_mu(vbool32_t vm, vbool32_t vd, vbool32_t vs2, size_t vl) { + return __riscv_vmsbf_m_b32_mu(vm, vd, vs2, vl); } -vbool64_t test_vmsbf_m_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1, size_t vl) { - return __riscv_vmsbf_m_b64_mu(mask, maskedoff, op1, vl); +vbool64_t test_vmsbf_m_b64_mu(vbool64_t vm, vbool64_t vd, vbool64_t vs2, size_t vl) { + return __riscv_vmsbf_m_b64_mu(vm, vd, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmsbf\.[ivxfswum.]+\s+} 7 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vmseq.c b/auto-generated/policy_funcs/gnu-api-tests/vmseq.c index f021b9643..a1adf371f 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vmseq.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vmseq.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmseq_vv_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmseq_vv_i8mf8_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vv_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmseq_vv_i8mf8_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmseq_vx_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmseq_vx_i8mf8_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vx_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmseq_vx_i8mf8_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmseq_vv_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmseq_vv_i8mf4_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vv_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmseq_vv_i8mf4_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmseq_vx_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmseq_vx_i8mf4_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vx_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmseq_vx_i8mf4_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmseq_vv_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmseq_vv_i8mf2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vv_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmseq_vv_i8mf2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmseq_vx_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmseq_vx_i8mf2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vx_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmseq_vx_i8mf2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmseq_vv_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmseq_vv_i8m1_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vv_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmseq_vv_i8m1_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmseq_vx_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmseq_vx_i8m1_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vx_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmseq_vx_i8m1_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmseq_vv_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmseq_vv_i8m2_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vv_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmseq_vv_i8m2_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmseq_vx_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmseq_vx_i8m2_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vx_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmseq_vx_i8m2_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmseq_vv_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmseq_vv_i8m4_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmseq_vv_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmseq_vv_i8m4_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmseq_vx_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmseq_vx_i8m4_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmseq_vx_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmseq_vx_i8m4_b2_mu(vm, vd, vs2, rs1, vl); } -vbool1_t test_vmseq_vv_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmseq_vv_i8m8_b1_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmseq_vv_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmseq_vv_i8m8_b1_mu(vm, vd, vs2, vs1, vl); } -vbool1_t test_vmseq_vx_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmseq_vx_i8m8_b1_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmseq_vx_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmseq_vx_i8m8_b1_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmseq_vv_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmseq_vv_i16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vv_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmseq_vv_i16mf4_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmseq_vx_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmseq_vx_i16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vx_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmseq_vx_i16mf4_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmseq_vv_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmseq_vv_i16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vv_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmseq_vv_i16mf2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmseq_vx_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmseq_vx_i16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vx_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmseq_vx_i16mf2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmseq_vv_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmseq_vv_i16m1_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vv_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmseq_vv_i16m1_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmseq_vx_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmseq_vx_i16m1_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vx_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmseq_vx_i16m1_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmseq_vv_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmseq_vv_i16m2_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vv_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmseq_vv_i16m2_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmseq_vx_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmseq_vx_i16m2_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vx_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmseq_vx_i16m2_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmseq_vv_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmseq_vv_i16m4_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vv_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmseq_vv_i16m4_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmseq_vx_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmseq_vx_i16m4_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vx_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmseq_vx_i16m4_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmseq_vv_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmseq_vv_i16m8_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmseq_vv_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmseq_vv_i16m8_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmseq_vx_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmseq_vx_i16m8_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmseq_vx_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmseq_vx_i16m8_b2_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmseq_vv_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmseq_vv_i32mf2_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vv_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmseq_vv_i32mf2_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmseq_vx_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmseq_vx_i32mf2_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vx_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmseq_vx_i32mf2_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmseq_vv_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmseq_vv_i32m1_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vv_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmseq_vv_i32m1_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmseq_vx_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmseq_vx_i32m1_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vx_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmseq_vx_i32m1_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmseq_vv_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmseq_vv_i32m2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vv_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmseq_vv_i32m2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmseq_vx_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmseq_vx_i32m2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vx_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmseq_vx_i32m2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmseq_vv_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmseq_vv_i32m4_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vv_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmseq_vv_i32m4_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmseq_vx_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmseq_vx_i32m4_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vx_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmseq_vx_i32m4_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmseq_vv_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmseq_vv_i32m8_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vv_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmseq_vv_i32m8_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmseq_vx_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmseq_vx_i32m8_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vx_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmseq_vx_i32m8_b4_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmseq_vv_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmseq_vv_i64m1_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vv_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmseq_vv_i64m1_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmseq_vx_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmseq_vx_i64m1_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vx_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmseq_vx_i64m1_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmseq_vv_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmseq_vv_i64m2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vv_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmseq_vv_i64m2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmseq_vx_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmseq_vx_i64m2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vx_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmseq_vx_i64m2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmseq_vv_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmseq_vv_i64m4_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vv_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmseq_vv_i64m4_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmseq_vx_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmseq_vx_i64m4_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vx_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmseq_vx_i64m4_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmseq_vv_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmseq_vv_i64m8_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vv_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmseq_vv_i64m8_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmseq_vx_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmseq_vx_i64m8_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vx_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmseq_vx_i64m8_b8_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmseq_vv_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmseq_vv_u8mf8_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vv_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmseq_vv_u8mf8_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmseq_vx_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmseq_vx_u8mf8_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vx_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmseq_vx_u8mf8_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmseq_vv_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmseq_vv_u8mf4_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vv_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmseq_vv_u8mf4_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmseq_vx_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmseq_vx_u8mf4_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vx_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmseq_vx_u8mf4_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmseq_vv_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmseq_vv_u8mf2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vv_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmseq_vv_u8mf2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmseq_vx_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmseq_vx_u8mf2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vx_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmseq_vx_u8mf2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmseq_vv_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmseq_vv_u8m1_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vv_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmseq_vv_u8m1_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmseq_vx_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmseq_vx_u8m1_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vx_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmseq_vx_u8m1_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmseq_vv_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmseq_vv_u8m2_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vv_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmseq_vv_u8m2_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmseq_vx_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmseq_vx_u8m2_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vx_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmseq_vx_u8m2_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmseq_vv_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmseq_vv_u8m4_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmseq_vv_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmseq_vv_u8m4_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmseq_vx_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmseq_vx_u8m4_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmseq_vx_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmseq_vx_u8m4_b2_mu(vm, vd, vs2, rs1, vl); } -vbool1_t test_vmseq_vv_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmseq_vv_u8m8_b1_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmseq_vv_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmseq_vv_u8m8_b1_mu(vm, vd, vs2, vs1, vl); } -vbool1_t test_vmseq_vx_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmseq_vx_u8m8_b1_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmseq_vx_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmseq_vx_u8m8_b1_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmseq_vv_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmseq_vv_u16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vv_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmseq_vv_u16mf4_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmseq_vx_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmseq_vx_u16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vx_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmseq_vx_u16mf4_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmseq_vv_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmseq_vv_u16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vv_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmseq_vv_u16mf2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmseq_vx_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmseq_vx_u16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vx_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmseq_vx_u16mf2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmseq_vv_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmseq_vv_u16m1_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vv_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmseq_vv_u16m1_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmseq_vx_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmseq_vx_u16m1_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vx_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmseq_vx_u16m1_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmseq_vv_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmseq_vv_u16m2_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vv_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmseq_vv_u16m2_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmseq_vx_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmseq_vx_u16m2_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vx_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmseq_vx_u16m2_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmseq_vv_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmseq_vv_u16m4_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vv_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmseq_vv_u16m4_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmseq_vx_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmseq_vx_u16m4_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vx_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmseq_vx_u16m4_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmseq_vv_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmseq_vv_u16m8_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmseq_vv_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmseq_vv_u16m8_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmseq_vx_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmseq_vx_u16m8_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmseq_vx_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmseq_vx_u16m8_b2_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmseq_vv_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmseq_vv_u32mf2_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vv_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmseq_vv_u32mf2_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmseq_vx_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmseq_vx_u32mf2_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vx_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmseq_vx_u32mf2_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmseq_vv_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmseq_vv_u32m1_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vv_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmseq_vv_u32m1_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmseq_vx_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmseq_vx_u32m1_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vx_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmseq_vx_u32m1_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmseq_vv_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmseq_vv_u32m2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vv_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmseq_vv_u32m2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmseq_vx_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmseq_vx_u32m2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vx_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmseq_vx_u32m2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmseq_vv_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmseq_vv_u32m4_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vv_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmseq_vv_u32m4_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmseq_vx_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmseq_vx_u32m4_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vx_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmseq_vx_u32m4_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmseq_vv_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmseq_vv_u32m8_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vv_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmseq_vv_u32m8_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmseq_vx_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmseq_vx_u32m8_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vx_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmseq_vx_u32m8_b4_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmseq_vv_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmseq_vv_u64m1_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vv_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmseq_vv_u64m1_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmseq_vx_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmseq_vx_u64m1_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vx_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmseq_vx_u64m1_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmseq_vv_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmseq_vv_u64m2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vv_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmseq_vv_u64m2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmseq_vx_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmseq_vx_u64m2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vx_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmseq_vx_u64m2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmseq_vv_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmseq_vv_u64m4_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vv_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmseq_vv_u64m4_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmseq_vx_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmseq_vx_u64m4_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vx_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmseq_vx_u64m4_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmseq_vv_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmseq_vv_u64m8_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vv_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmseq_vv_u64m8_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmseq_vx_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmseq_vx_u64m8_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vx_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmseq_vx_u64m8_b8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vmseq\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vmsge.c b/auto-generated/policy_funcs/gnu-api-tests/vmsge.c index 987275a8b..9dee76481 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vmsge.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vmsge.c @@ -6,180 +6,180 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmsge_vv_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmsge_vv_i8mf8_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsge_vv_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmsge_vv_i8mf8_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsge_vx_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsge_vx_i8mf8_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsge_vx_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsge_vx_i8mf8_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsge_vv_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmsge_vv_i8mf4_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsge_vv_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmsge_vv_i8mf4_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsge_vx_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsge_vx_i8mf4_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsge_vx_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsge_vx_i8mf4_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsge_vv_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmsge_vv_i8mf2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsge_vv_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmsge_vv_i8mf2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsge_vx_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsge_vx_i8mf2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsge_vx_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsge_vx_i8mf2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsge_vv_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmsge_vv_i8m1_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsge_vv_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmsge_vv_i8m1_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsge_vx_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmsge_vx_i8m1_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsge_vx_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsge_vx_i8m1_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsge_vv_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmsge_vv_i8m2_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsge_vv_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmsge_vv_i8m2_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsge_vx_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsge_vx_i8m2_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsge_vx_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsge_vx_i8m2_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsge_vv_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmsge_vv_i8m4_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsge_vv_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmsge_vv_i8m4_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsge_vx_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsge_vx_i8m4_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsge_vx_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsge_vx_i8m4_b2_mu(vm, vd, vs2, rs1, vl); } -vbool1_t test_vmsge_vv_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmsge_vv_i8m8_b1_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsge_vv_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmsge_vv_i8m8_b1_mu(vm, vd, vs2, vs1, vl); } -vbool1_t test_vmsge_vx_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsge_vx_i8m8_b1_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsge_vx_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsge_vx_i8m8_b1_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsge_vv_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmsge_vv_i16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsge_vv_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmsge_vv_i16mf4_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsge_vx_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsge_vx_i16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsge_vx_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsge_vx_i16mf4_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsge_vv_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmsge_vv_i16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsge_vv_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmsge_vv_i16mf2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsge_vx_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsge_vx_i16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsge_vx_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsge_vx_i16mf2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsge_vv_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmsge_vv_i16m1_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsge_vv_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmsge_vv_i16m1_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsge_vx_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmsge_vx_i16m1_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsge_vx_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsge_vx_i16m1_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsge_vv_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmsge_vv_i16m2_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsge_vv_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmsge_vv_i16m2_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsge_vx_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsge_vx_i16m2_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsge_vx_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsge_vx_i16m2_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsge_vv_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmsge_vv_i16m4_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsge_vv_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmsge_vv_i16m4_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsge_vx_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsge_vx_i16m4_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsge_vx_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsge_vx_i16m4_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsge_vv_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmsge_vv_i16m8_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsge_vv_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmsge_vv_i16m8_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsge_vx_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmsge_vx_i16m8_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsge_vx_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsge_vx_i16m8_b2_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsge_vv_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmsge_vv_i32mf2_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsge_vv_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmsge_vv_i32mf2_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsge_vx_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsge_vx_i32mf2_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsge_vx_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsge_vx_i32mf2_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsge_vv_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmsge_vv_i32m1_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsge_vv_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmsge_vv_i32m1_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsge_vx_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmsge_vx_i32m1_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsge_vx_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsge_vx_i32m1_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsge_vv_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmsge_vv_i32m2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsge_vv_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmsge_vv_i32m2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsge_vx_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsge_vx_i32m2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsge_vx_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsge_vx_i32m2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsge_vv_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmsge_vv_i32m4_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsge_vv_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmsge_vv_i32m4_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsge_vx_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmsge_vx_i32m4_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsge_vx_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsge_vx_i32m4_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsge_vv_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmsge_vv_i32m8_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsge_vv_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmsge_vv_i32m8_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsge_vx_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmsge_vx_i32m8_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsge_vx_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsge_vx_i32m8_b4_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsge_vv_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmsge_vv_i64m1_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsge_vv_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmsge_vv_i64m1_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsge_vx_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmsge_vx_i64m1_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsge_vx_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsge_vx_i64m1_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsge_vv_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmsge_vv_i64m2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsge_vv_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmsge_vv_i64m2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsge_vx_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmsge_vx_i64m2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsge_vx_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsge_vx_i64m2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsge_vv_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmsge_vv_i64m4_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsge_vv_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmsge_vv_i64m4_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsge_vx_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmsge_vx_i64m4_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsge_vx_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsge_vx_i64m4_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsge_vv_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmsge_vv_i64m8_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsge_vv_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmsge_vv_i64m8_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsge_vx_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmsge_vx_i64m8_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsge_vx_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsge_vx_i64m8_b8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vms[gl][et]\.[ivxfswum.]+\s+} 44 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vmsgeu.c b/auto-generated/policy_funcs/gnu-api-tests/vmsgeu.c index decd1207a..27688b1be 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vmsgeu.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vmsgeu.c @@ -6,180 +6,180 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmsgeu_vv_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u8mf8_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgeu_vv_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u8mf8_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsgeu_vx_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u8mf8_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgeu_vx_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u8mf8_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsgeu_vv_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u8mf4_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgeu_vv_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u8mf4_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsgeu_vx_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u8mf4_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgeu_vx_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u8mf4_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsgeu_vv_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u8mf2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgeu_vv_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u8mf2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsgeu_vx_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u8mf2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgeu_vx_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u8mf2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsgeu_vv_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u8m1_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgeu_vv_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u8m1_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsgeu_vx_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u8m1_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgeu_vx_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u8m1_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsgeu_vv_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u8m2_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgeu_vv_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u8m2_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsgeu_vx_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u8m2_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgeu_vx_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u8m2_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsgeu_vv_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u8m4_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgeu_vv_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u8m4_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsgeu_vx_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u8m4_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgeu_vx_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u8m4_b2_mu(vm, vd, vs2, rs1, vl); } -vbool1_t test_vmsgeu_vv_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u8m8_b1_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsgeu_vv_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u8m8_b1_mu(vm, vd, vs2, vs1, vl); } -vbool1_t test_vmsgeu_vx_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u8m8_b1_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsgeu_vx_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u8m8_b1_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsgeu_vv_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgeu_vv_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u16mf4_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsgeu_vx_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgeu_vx_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u16mf4_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsgeu_vv_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgeu_vv_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u16mf2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsgeu_vx_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgeu_vx_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u16mf2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsgeu_vv_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u16m1_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgeu_vv_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u16m1_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsgeu_vx_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u16m1_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgeu_vx_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u16m1_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsgeu_vv_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u16m2_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgeu_vv_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u16m2_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsgeu_vx_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u16m2_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgeu_vx_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u16m2_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsgeu_vv_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u16m4_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgeu_vv_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u16m4_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsgeu_vx_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u16m4_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgeu_vx_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u16m4_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsgeu_vv_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u16m8_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgeu_vv_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u16m8_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsgeu_vx_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u16m8_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgeu_vx_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u16m8_b2_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsgeu_vv_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u32mf2_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgeu_vv_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u32mf2_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsgeu_vx_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u32mf2_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgeu_vx_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u32mf2_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsgeu_vv_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u32m1_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgeu_vv_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u32m1_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsgeu_vx_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u32m1_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgeu_vx_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u32m1_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsgeu_vv_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u32m2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgeu_vv_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u32m2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsgeu_vx_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u32m2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgeu_vx_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u32m2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsgeu_vv_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u32m4_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgeu_vv_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u32m4_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsgeu_vx_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u32m4_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgeu_vx_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u32m4_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsgeu_vv_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u32m8_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgeu_vv_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u32m8_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsgeu_vx_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u32m8_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgeu_vx_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u32m8_b4_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsgeu_vv_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u64m1_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgeu_vv_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u64m1_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsgeu_vx_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u64m1_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgeu_vx_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u64m1_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsgeu_vv_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u64m2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgeu_vv_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u64m2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsgeu_vx_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u64m2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgeu_vx_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u64m2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsgeu_vv_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u64m4_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgeu_vv_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u64m4_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsgeu_vx_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u64m4_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgeu_vx_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u64m4_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsgeu_vv_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u64m8_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgeu_vv_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmsgeu_vv_u64m8_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsgeu_vx_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u64m8_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgeu_vx_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgeu_vx_u64m8_b8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vms[gl][et]u\.[ivxfswum.]+\s+} 44 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vmsgt.c b/auto-generated/policy_funcs/gnu-api-tests/vmsgt.c index b60b14d0d..0c4934fcf 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vmsgt.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vmsgt.c @@ -6,180 +6,180 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmsgt_vv_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmsgt_vv_i8mf8_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgt_vv_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i8mf8_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsgt_vx_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsgt_vx_i8mf8_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgt_vx_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i8mf8_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsgt_vv_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmsgt_vv_i8mf4_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgt_vv_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i8mf4_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsgt_vx_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsgt_vx_i8mf4_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgt_vx_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i8mf4_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsgt_vv_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmsgt_vv_i8mf2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgt_vv_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i8mf2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsgt_vx_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsgt_vx_i8mf2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgt_vx_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i8mf2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsgt_vv_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmsgt_vv_i8m1_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgt_vv_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i8m1_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsgt_vx_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmsgt_vx_i8m1_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgt_vx_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i8m1_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsgt_vv_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmsgt_vv_i8m2_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgt_vv_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i8m2_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsgt_vx_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsgt_vx_i8m2_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgt_vx_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i8m2_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsgt_vv_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmsgt_vv_i8m4_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgt_vv_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i8m4_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsgt_vx_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsgt_vx_i8m4_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgt_vx_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i8m4_b2_mu(vm, vd, vs2, rs1, vl); } -vbool1_t test_vmsgt_vv_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmsgt_vv_i8m8_b1_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsgt_vv_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i8m8_b1_mu(vm, vd, vs2, vs1, vl); } -vbool1_t test_vmsgt_vx_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsgt_vx_i8m8_b1_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsgt_vx_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i8m8_b1_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsgt_vv_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmsgt_vv_i16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgt_vv_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i16mf4_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsgt_vx_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsgt_vx_i16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgt_vx_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i16mf4_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsgt_vv_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmsgt_vv_i16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgt_vv_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i16mf2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsgt_vx_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsgt_vx_i16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgt_vx_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i16mf2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsgt_vv_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmsgt_vv_i16m1_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgt_vv_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i16m1_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsgt_vx_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmsgt_vx_i16m1_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgt_vx_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i16m1_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsgt_vv_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmsgt_vv_i16m2_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgt_vv_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i16m2_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsgt_vx_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsgt_vx_i16m2_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgt_vx_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i16m2_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsgt_vv_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmsgt_vv_i16m4_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgt_vv_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i16m4_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsgt_vx_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsgt_vx_i16m4_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgt_vx_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i16m4_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsgt_vv_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmsgt_vv_i16m8_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgt_vv_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i16m8_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsgt_vx_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmsgt_vx_i16m8_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgt_vx_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i16m8_b2_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsgt_vv_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmsgt_vv_i32mf2_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgt_vv_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i32mf2_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsgt_vx_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsgt_vx_i32mf2_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgt_vx_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i32mf2_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsgt_vv_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmsgt_vv_i32m1_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgt_vv_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i32m1_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsgt_vx_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmsgt_vx_i32m1_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgt_vx_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i32m1_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsgt_vv_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmsgt_vv_i32m2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgt_vv_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i32m2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsgt_vx_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsgt_vx_i32m2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgt_vx_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i32m2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsgt_vv_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmsgt_vv_i32m4_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgt_vv_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i32m4_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsgt_vx_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmsgt_vx_i32m4_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgt_vx_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i32m4_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsgt_vv_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmsgt_vv_i32m8_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgt_vv_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i32m8_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsgt_vx_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmsgt_vx_i32m8_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgt_vx_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i32m8_b4_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsgt_vv_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmsgt_vv_i64m1_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgt_vv_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i64m1_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsgt_vx_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmsgt_vx_i64m1_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgt_vx_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i64m1_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsgt_vv_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmsgt_vv_i64m2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgt_vv_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i64m2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsgt_vx_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmsgt_vx_i64m2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgt_vx_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i64m2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsgt_vv_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmsgt_vv_i64m4_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgt_vv_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i64m4_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsgt_vx_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmsgt_vx_i64m4_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgt_vx_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i64m4_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsgt_vv_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmsgt_vv_i64m8_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgt_vv_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmsgt_vv_i64m8_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsgt_vx_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmsgt_vx_i64m8_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgt_vx_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsgt_vx_i64m8_b8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vmsgt\.[ivxfswum.]+\s+} 44 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vmsgtu.c b/auto-generated/policy_funcs/gnu-api-tests/vmsgtu.c index 5b734c11e..31edf23c0 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vmsgtu.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vmsgtu.c @@ -6,180 +6,180 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmsgtu_vv_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u8mf8_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgtu_vv_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u8mf8_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsgtu_vx_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u8mf8_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgtu_vx_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u8mf8_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsgtu_vv_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u8mf4_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgtu_vv_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u8mf4_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsgtu_vx_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u8mf4_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgtu_vx_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u8mf4_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsgtu_vv_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u8mf2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgtu_vv_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u8mf2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsgtu_vx_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u8mf2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgtu_vx_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u8mf2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsgtu_vv_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u8m1_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgtu_vv_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u8m1_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsgtu_vx_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u8m1_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgtu_vx_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u8m1_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsgtu_vv_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u8m2_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgtu_vv_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u8m2_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsgtu_vx_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u8m2_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgtu_vx_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u8m2_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsgtu_vv_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u8m4_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgtu_vv_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u8m4_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsgtu_vx_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u8m4_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgtu_vx_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u8m4_b2_mu(vm, vd, vs2, rs1, vl); } -vbool1_t test_vmsgtu_vv_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u8m8_b1_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsgtu_vv_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u8m8_b1_mu(vm, vd, vs2, vs1, vl); } -vbool1_t test_vmsgtu_vx_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u8m8_b1_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsgtu_vx_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u8m8_b1_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsgtu_vv_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgtu_vv_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u16mf4_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsgtu_vx_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgtu_vx_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u16mf4_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsgtu_vv_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgtu_vv_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u16mf2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsgtu_vx_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgtu_vx_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u16mf2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsgtu_vv_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u16m1_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgtu_vv_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u16m1_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsgtu_vx_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u16m1_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgtu_vx_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u16m1_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsgtu_vv_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u16m2_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgtu_vv_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u16m2_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsgtu_vx_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u16m2_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgtu_vx_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u16m2_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsgtu_vv_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u16m4_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgtu_vv_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u16m4_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsgtu_vx_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u16m4_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgtu_vx_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u16m4_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsgtu_vv_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u16m8_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgtu_vv_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u16m8_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsgtu_vx_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u16m8_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgtu_vx_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u16m8_b2_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsgtu_vv_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u32mf2_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgtu_vv_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u32mf2_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsgtu_vx_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u32mf2_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgtu_vx_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u32mf2_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsgtu_vv_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u32m1_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgtu_vv_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u32m1_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsgtu_vx_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u32m1_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgtu_vx_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u32m1_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsgtu_vv_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u32m2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgtu_vv_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u32m2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsgtu_vx_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u32m2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgtu_vx_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u32m2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsgtu_vv_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u32m4_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgtu_vv_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u32m4_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsgtu_vx_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u32m4_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgtu_vx_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u32m4_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsgtu_vv_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u32m8_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgtu_vv_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u32m8_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsgtu_vx_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u32m8_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgtu_vx_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u32m8_b4_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsgtu_vv_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u64m1_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgtu_vv_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u64m1_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsgtu_vx_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u64m1_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgtu_vx_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u64m1_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsgtu_vv_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u64m2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgtu_vv_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u64m2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsgtu_vx_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u64m2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgtu_vx_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u64m2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsgtu_vv_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u64m4_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgtu_vv_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u64m4_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsgtu_vx_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u64m4_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgtu_vx_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u64m4_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsgtu_vv_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u64m8_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgtu_vv_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmsgtu_vv_u64m8_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsgtu_vx_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u64m8_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgtu_vx_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgtu_vx_u64m8_b8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vmsgtu\.[ivxfswum.]+\s+} 44 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vmsif.c b/auto-generated/policy_funcs/gnu-api-tests/vmsif.c index efa8af17c..b67db5832 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vmsif.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vmsif.c @@ -6,32 +6,32 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool1_t test_vmsif_m_b1_mu(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1, size_t vl) { - return __riscv_vmsif_m_b1_mu(mask, maskedoff, op1, vl); +vbool1_t test_vmsif_m_b1_mu(vbool1_t vm, vbool1_t vd, vbool1_t vs2, size_t vl) { + return __riscv_vmsif_m_b1_mu(vm, vd, vs2, vl); } -vbool2_t test_vmsif_m_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1, size_t vl) { - return __riscv_vmsif_m_b2_mu(mask, maskedoff, op1, vl); +vbool2_t test_vmsif_m_b2_mu(vbool2_t vm, vbool2_t vd, vbool2_t vs2, size_t vl) { + return __riscv_vmsif_m_b2_mu(vm, vd, vs2, vl); } -vbool4_t test_vmsif_m_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1, size_t vl) { - return __riscv_vmsif_m_b4_mu(mask, maskedoff, op1, vl); +vbool4_t test_vmsif_m_b4_mu(vbool4_t vm, vbool4_t vd, vbool4_t vs2, size_t vl) { + return __riscv_vmsif_m_b4_mu(vm, vd, vs2, vl); } -vbool8_t test_vmsif_m_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1, size_t vl) { - return __riscv_vmsif_m_b8_mu(mask, maskedoff, op1, vl); +vbool8_t test_vmsif_m_b8_mu(vbool8_t vm, vbool8_t vd, vbool8_t vs2, size_t vl) { + return __riscv_vmsif_m_b8_mu(vm, vd, vs2, vl); } -vbool16_t test_vmsif_m_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1, size_t vl) { - return __riscv_vmsif_m_b16_mu(mask, maskedoff, op1, vl); +vbool16_t test_vmsif_m_b16_mu(vbool16_t vm, vbool16_t vd, vbool16_t vs2, size_t vl) { + return __riscv_vmsif_m_b16_mu(vm, vd, vs2, vl); } -vbool32_t test_vmsif_m_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1, size_t vl) { - return __riscv_vmsif_m_b32_mu(mask, maskedoff, op1, vl); +vbool32_t test_vmsif_m_b32_mu(vbool32_t vm, vbool32_t vd, vbool32_t vs2, size_t vl) { + return __riscv_vmsif_m_b32_mu(vm, vd, vs2, vl); } -vbool64_t test_vmsif_m_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1, size_t vl) { - return __riscv_vmsif_m_b64_mu(mask, maskedoff, op1, vl); +vbool64_t test_vmsif_m_b64_mu(vbool64_t vm, vbool64_t vd, vbool64_t vs2, size_t vl) { + return __riscv_vmsif_m_b64_mu(vm, vd, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmsif\.[ivxfswum.]+\s+} 7 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vmsle.c b/auto-generated/policy_funcs/gnu-api-tests/vmsle.c index 474002510..111b54f18 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vmsle.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vmsle.c @@ -6,180 +6,180 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmsle_vv_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmsle_vv_i8mf8_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsle_vv_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmsle_vv_i8mf8_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsle_vx_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsle_vx_i8mf8_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsle_vx_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsle_vx_i8mf8_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsle_vv_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmsle_vv_i8mf4_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsle_vv_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmsle_vv_i8mf4_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsle_vx_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsle_vx_i8mf4_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsle_vx_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsle_vx_i8mf4_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsle_vv_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmsle_vv_i8mf2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsle_vv_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmsle_vv_i8mf2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsle_vx_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsle_vx_i8mf2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsle_vx_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsle_vx_i8mf2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsle_vv_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmsle_vv_i8m1_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsle_vv_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmsle_vv_i8m1_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsle_vx_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmsle_vx_i8m1_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsle_vx_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsle_vx_i8m1_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsle_vv_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmsle_vv_i8m2_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsle_vv_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmsle_vv_i8m2_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsle_vx_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsle_vx_i8m2_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsle_vx_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsle_vx_i8m2_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsle_vv_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmsle_vv_i8m4_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsle_vv_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmsle_vv_i8m4_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsle_vx_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsle_vx_i8m4_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsle_vx_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsle_vx_i8m4_b2_mu(vm, vd, vs2, rs1, vl); } -vbool1_t test_vmsle_vv_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmsle_vv_i8m8_b1_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsle_vv_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmsle_vv_i8m8_b1_mu(vm, vd, vs2, vs1, vl); } -vbool1_t test_vmsle_vx_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsle_vx_i8m8_b1_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsle_vx_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsle_vx_i8m8_b1_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsle_vv_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmsle_vv_i16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsle_vv_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmsle_vv_i16mf4_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsle_vx_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsle_vx_i16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsle_vx_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsle_vx_i16mf4_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsle_vv_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmsle_vv_i16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsle_vv_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmsle_vv_i16mf2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsle_vx_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsle_vx_i16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsle_vx_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsle_vx_i16mf2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsle_vv_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmsle_vv_i16m1_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsle_vv_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmsle_vv_i16m1_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsle_vx_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmsle_vx_i16m1_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsle_vx_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsle_vx_i16m1_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsle_vv_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmsle_vv_i16m2_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsle_vv_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmsle_vv_i16m2_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsle_vx_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsle_vx_i16m2_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsle_vx_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsle_vx_i16m2_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsle_vv_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmsle_vv_i16m4_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsle_vv_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmsle_vv_i16m4_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsle_vx_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsle_vx_i16m4_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsle_vx_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsle_vx_i16m4_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsle_vv_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmsle_vv_i16m8_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsle_vv_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmsle_vv_i16m8_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsle_vx_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmsle_vx_i16m8_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsle_vx_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsle_vx_i16m8_b2_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsle_vv_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmsle_vv_i32mf2_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsle_vv_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmsle_vv_i32mf2_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsle_vx_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsle_vx_i32mf2_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsle_vx_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsle_vx_i32mf2_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsle_vv_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmsle_vv_i32m1_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsle_vv_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmsle_vv_i32m1_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsle_vx_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmsle_vx_i32m1_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsle_vx_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsle_vx_i32m1_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsle_vv_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmsle_vv_i32m2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsle_vv_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmsle_vv_i32m2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsle_vx_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsle_vx_i32m2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsle_vx_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsle_vx_i32m2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsle_vv_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmsle_vv_i32m4_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsle_vv_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmsle_vv_i32m4_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsle_vx_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmsle_vx_i32m4_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsle_vx_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsle_vx_i32m4_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsle_vv_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmsle_vv_i32m8_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsle_vv_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmsle_vv_i32m8_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsle_vx_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmsle_vx_i32m8_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsle_vx_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsle_vx_i32m8_b4_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsle_vv_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmsle_vv_i64m1_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsle_vv_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmsle_vv_i64m1_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsle_vx_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmsle_vx_i64m1_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsle_vx_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsle_vx_i64m1_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsle_vv_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmsle_vv_i64m2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsle_vv_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmsle_vv_i64m2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsle_vx_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmsle_vx_i64m2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsle_vx_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsle_vx_i64m2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsle_vv_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmsle_vv_i64m4_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsle_vv_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmsle_vv_i64m4_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsle_vx_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmsle_vx_i64m4_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsle_vx_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsle_vx_i64m4_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsle_vv_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmsle_vv_i64m8_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsle_vv_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmsle_vv_i64m8_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsle_vx_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmsle_vx_i64m8_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsle_vx_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsle_vx_i64m8_b8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vmsle\.[ivxfswum.]+\s+} 44 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vmsleu.c b/auto-generated/policy_funcs/gnu-api-tests/vmsleu.c index 81ff9c8a0..b4036cf7f 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vmsleu.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vmsleu.c @@ -6,180 +6,180 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmsleu_vv_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmsleu_vv_u8mf8_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsleu_vv_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u8mf8_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsleu_vx_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsleu_vx_u8mf8_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsleu_vx_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u8mf8_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsleu_vv_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmsleu_vv_u8mf4_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsleu_vv_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u8mf4_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsleu_vx_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsleu_vx_u8mf4_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsleu_vx_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u8mf4_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsleu_vv_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmsleu_vv_u8mf2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsleu_vv_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u8mf2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsleu_vx_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsleu_vx_u8mf2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsleu_vx_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u8mf2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsleu_vv_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmsleu_vv_u8m1_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsleu_vv_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u8m1_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsleu_vx_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsleu_vx_u8m1_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsleu_vx_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u8m1_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsleu_vv_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmsleu_vv_u8m2_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsleu_vv_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u8m2_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsleu_vx_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsleu_vx_u8m2_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsleu_vx_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u8m2_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsleu_vv_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmsleu_vv_u8m4_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsleu_vv_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u8m4_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsleu_vx_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsleu_vx_u8m4_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsleu_vx_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u8m4_b2_mu(vm, vd, vs2, rs1, vl); } -vbool1_t test_vmsleu_vv_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmsleu_vv_u8m8_b1_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsleu_vv_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u8m8_b1_mu(vm, vd, vs2, vs1, vl); } -vbool1_t test_vmsleu_vx_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsleu_vx_u8m8_b1_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsleu_vx_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u8m8_b1_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsleu_vv_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmsleu_vv_u16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsleu_vv_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u16mf4_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsleu_vx_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsleu_vx_u16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsleu_vx_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u16mf4_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsleu_vv_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmsleu_vv_u16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsleu_vv_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u16mf2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsleu_vx_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsleu_vx_u16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsleu_vx_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u16mf2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsleu_vv_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmsleu_vv_u16m1_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsleu_vv_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u16m1_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsleu_vx_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsleu_vx_u16m1_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsleu_vx_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u16m1_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsleu_vv_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmsleu_vv_u16m2_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsleu_vv_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u16m2_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsleu_vx_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsleu_vx_u16m2_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsleu_vx_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u16m2_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsleu_vv_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmsleu_vv_u16m4_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsleu_vv_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u16m4_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsleu_vx_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsleu_vx_u16m4_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsleu_vx_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u16m4_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsleu_vv_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmsleu_vv_u16m8_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsleu_vv_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u16m8_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsleu_vx_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsleu_vx_u16m8_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsleu_vx_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u16m8_b2_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsleu_vv_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmsleu_vv_u32mf2_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsleu_vv_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u32mf2_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsleu_vx_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsleu_vx_u32mf2_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsleu_vx_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u32mf2_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsleu_vv_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmsleu_vv_u32m1_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsleu_vv_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u32m1_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsleu_vx_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsleu_vx_u32m1_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsleu_vx_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u32m1_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsleu_vv_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmsleu_vv_u32m2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsleu_vv_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u32m2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsleu_vx_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsleu_vx_u32m2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsleu_vx_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u32m2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsleu_vv_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmsleu_vv_u32m4_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsleu_vv_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u32m4_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsleu_vx_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsleu_vx_u32m4_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsleu_vx_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u32m4_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsleu_vv_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmsleu_vv_u32m8_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsleu_vv_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u32m8_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsleu_vx_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsleu_vx_u32m8_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsleu_vx_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u32m8_b4_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsleu_vv_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmsleu_vv_u64m1_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsleu_vv_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u64m1_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsleu_vx_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsleu_vx_u64m1_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsleu_vx_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u64m1_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsleu_vv_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmsleu_vv_u64m2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsleu_vv_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u64m2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsleu_vx_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsleu_vx_u64m2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsleu_vx_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u64m2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsleu_vv_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmsleu_vv_u64m4_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsleu_vv_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u64m4_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsleu_vx_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsleu_vx_u64m4_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsleu_vx_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u64m4_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsleu_vv_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmsleu_vv_u64m8_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsleu_vv_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmsleu_vv_u64m8_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsleu_vx_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsleu_vx_u64m8_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsleu_vx_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsleu_vx_u64m8_b8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vmsleu\.[ivxfswum.]+\s+} 44 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vmslt.c b/auto-generated/policy_funcs/gnu-api-tests/vmslt.c index a39bd93da..b2780394b 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vmslt.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vmslt.c @@ -6,180 +6,180 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmslt_vv_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmslt_vv_i8mf8_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmslt_vv_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmslt_vv_i8mf8_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmslt_vx_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmslt_vx_i8mf8_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmslt_vx_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmslt_vx_i8mf8_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmslt_vv_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmslt_vv_i8mf4_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmslt_vv_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmslt_vv_i8mf4_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmslt_vx_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmslt_vx_i8mf4_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmslt_vx_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmslt_vx_i8mf4_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmslt_vv_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmslt_vv_i8mf2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmslt_vv_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmslt_vv_i8mf2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmslt_vx_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmslt_vx_i8mf2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmslt_vx_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmslt_vx_i8mf2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmslt_vv_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmslt_vv_i8m1_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmslt_vv_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmslt_vv_i8m1_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmslt_vx_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmslt_vx_i8m1_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmslt_vx_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmslt_vx_i8m1_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmslt_vv_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmslt_vv_i8m2_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmslt_vv_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmslt_vv_i8m2_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmslt_vx_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmslt_vx_i8m2_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmslt_vx_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmslt_vx_i8m2_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmslt_vv_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmslt_vv_i8m4_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmslt_vv_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmslt_vv_i8m4_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmslt_vx_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmslt_vx_i8m4_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmslt_vx_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmslt_vx_i8m4_b2_mu(vm, vd, vs2, rs1, vl); } -vbool1_t test_vmslt_vv_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmslt_vv_i8m8_b1_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmslt_vv_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmslt_vv_i8m8_b1_mu(vm, vd, vs2, vs1, vl); } -vbool1_t test_vmslt_vx_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmslt_vx_i8m8_b1_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmslt_vx_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmslt_vx_i8m8_b1_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmslt_vv_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmslt_vv_i16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmslt_vv_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmslt_vv_i16mf4_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmslt_vx_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmslt_vx_i16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmslt_vx_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmslt_vx_i16mf4_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmslt_vv_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmslt_vv_i16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmslt_vv_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmslt_vv_i16mf2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmslt_vx_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmslt_vx_i16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmslt_vx_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmslt_vx_i16mf2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmslt_vv_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmslt_vv_i16m1_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmslt_vv_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmslt_vv_i16m1_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmslt_vx_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmslt_vx_i16m1_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmslt_vx_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmslt_vx_i16m1_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmslt_vv_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmslt_vv_i16m2_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmslt_vv_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmslt_vv_i16m2_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmslt_vx_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmslt_vx_i16m2_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmslt_vx_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmslt_vx_i16m2_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmslt_vv_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmslt_vv_i16m4_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmslt_vv_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmslt_vv_i16m4_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmslt_vx_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmslt_vx_i16m4_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmslt_vx_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmslt_vx_i16m4_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmslt_vv_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmslt_vv_i16m8_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmslt_vv_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmslt_vv_i16m8_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmslt_vx_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmslt_vx_i16m8_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmslt_vx_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmslt_vx_i16m8_b2_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmslt_vv_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmslt_vv_i32mf2_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmslt_vv_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmslt_vv_i32mf2_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmslt_vx_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmslt_vx_i32mf2_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmslt_vx_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmslt_vx_i32mf2_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmslt_vv_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmslt_vv_i32m1_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmslt_vv_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmslt_vv_i32m1_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmslt_vx_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmslt_vx_i32m1_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmslt_vx_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmslt_vx_i32m1_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmslt_vv_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmslt_vv_i32m2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmslt_vv_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmslt_vv_i32m2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmslt_vx_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmslt_vx_i32m2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmslt_vx_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmslt_vx_i32m2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmslt_vv_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmslt_vv_i32m4_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmslt_vv_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmslt_vv_i32m4_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmslt_vx_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmslt_vx_i32m4_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmslt_vx_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmslt_vx_i32m4_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmslt_vv_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmslt_vv_i32m8_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmslt_vv_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmslt_vv_i32m8_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmslt_vx_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmslt_vx_i32m8_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmslt_vx_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmslt_vx_i32m8_b4_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmslt_vv_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmslt_vv_i64m1_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmslt_vv_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmslt_vv_i64m1_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmslt_vx_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmslt_vx_i64m1_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmslt_vx_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmslt_vx_i64m1_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmslt_vv_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmslt_vv_i64m2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmslt_vv_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmslt_vv_i64m2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmslt_vx_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmslt_vx_i64m2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmslt_vx_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmslt_vx_i64m2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmslt_vv_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmslt_vv_i64m4_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmslt_vv_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmslt_vv_i64m4_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmslt_vx_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmslt_vx_i64m4_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmslt_vx_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmslt_vx_i64m4_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmslt_vv_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmslt_vv_i64m8_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmslt_vv_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmslt_vv_i64m8_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmslt_vx_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmslt_vx_i64m8_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmslt_vx_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmslt_vx_i64m8_b8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vms[gl][et]\.[ivxfswum.]+\s+} 44 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vmsltu.c b/auto-generated/policy_funcs/gnu-api-tests/vmsltu.c index 909583f6b..9af7de785 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vmsltu.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vmsltu.c @@ -6,180 +6,180 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmsltu_vv_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmsltu_vv_u8mf8_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsltu_vv_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u8mf8_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsltu_vx_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsltu_vx_u8mf8_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsltu_vx_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u8mf8_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsltu_vv_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmsltu_vv_u8mf4_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsltu_vv_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u8mf4_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsltu_vx_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsltu_vx_u8mf4_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsltu_vx_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u8mf4_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsltu_vv_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmsltu_vv_u8mf2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsltu_vv_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u8mf2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsltu_vx_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsltu_vx_u8mf2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsltu_vx_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u8mf2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsltu_vv_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmsltu_vv_u8m1_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsltu_vv_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u8m1_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsltu_vx_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsltu_vx_u8m1_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsltu_vx_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u8m1_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsltu_vv_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmsltu_vv_u8m2_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsltu_vv_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u8m2_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsltu_vx_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsltu_vx_u8m2_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsltu_vx_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u8m2_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsltu_vv_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmsltu_vv_u8m4_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsltu_vv_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u8m4_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsltu_vx_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsltu_vx_u8m4_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsltu_vx_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u8m4_b2_mu(vm, vd, vs2, rs1, vl); } -vbool1_t test_vmsltu_vv_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmsltu_vv_u8m8_b1_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsltu_vv_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u8m8_b1_mu(vm, vd, vs2, vs1, vl); } -vbool1_t test_vmsltu_vx_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsltu_vx_u8m8_b1_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsltu_vx_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u8m8_b1_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsltu_vv_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmsltu_vv_u16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsltu_vv_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u16mf4_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsltu_vx_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsltu_vx_u16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsltu_vx_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u16mf4_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsltu_vv_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmsltu_vv_u16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsltu_vv_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u16mf2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsltu_vx_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsltu_vx_u16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsltu_vx_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u16mf2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsltu_vv_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmsltu_vv_u16m1_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsltu_vv_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u16m1_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsltu_vx_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsltu_vx_u16m1_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsltu_vx_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u16m1_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsltu_vv_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmsltu_vv_u16m2_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsltu_vv_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u16m2_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsltu_vx_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsltu_vx_u16m2_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsltu_vx_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u16m2_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsltu_vv_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmsltu_vv_u16m4_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsltu_vv_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u16m4_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsltu_vx_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsltu_vx_u16m4_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsltu_vx_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u16m4_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsltu_vv_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmsltu_vv_u16m8_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsltu_vv_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u16m8_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsltu_vx_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsltu_vx_u16m8_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsltu_vx_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u16m8_b2_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsltu_vv_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmsltu_vv_u32mf2_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsltu_vv_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u32mf2_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsltu_vx_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsltu_vx_u32mf2_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsltu_vx_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u32mf2_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsltu_vv_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmsltu_vv_u32m1_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsltu_vv_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u32m1_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsltu_vx_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsltu_vx_u32m1_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsltu_vx_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u32m1_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsltu_vv_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmsltu_vv_u32m2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsltu_vv_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u32m2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsltu_vx_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsltu_vx_u32m2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsltu_vx_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u32m2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsltu_vv_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmsltu_vv_u32m4_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsltu_vv_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u32m4_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsltu_vx_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsltu_vx_u32m4_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsltu_vx_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u32m4_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsltu_vv_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmsltu_vv_u32m8_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsltu_vv_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u32m8_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsltu_vx_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsltu_vx_u32m8_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsltu_vx_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u32m8_b4_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsltu_vv_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmsltu_vv_u64m1_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsltu_vv_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u64m1_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsltu_vx_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsltu_vx_u64m1_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsltu_vx_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u64m1_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsltu_vv_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmsltu_vv_u64m2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsltu_vv_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u64m2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsltu_vx_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsltu_vx_u64m2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsltu_vx_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u64m2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsltu_vv_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmsltu_vv_u64m4_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsltu_vv_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u64m4_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsltu_vx_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsltu_vx_u64m4_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsltu_vx_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u64m4_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsltu_vv_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmsltu_vv_u64m8_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsltu_vv_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmsltu_vv_u64m8_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsltu_vx_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsltu_vx_u64m8_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsltu_vx_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsltu_vx_u64m8_b8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vms[gl][et]u\.[ivxfswum.]+\s+} 44 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vmsne.c b/auto-generated/policy_funcs/gnu-api-tests/vmsne.c index d5b59076b..5bef7ffd1 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vmsne.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vmsne.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmsne_vv_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmsne_vv_i8mf8_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vv_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmsne_vv_i8mf8_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsne_vx_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsne_vx_i8mf8_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vx_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsne_vx_i8mf8_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsne_vv_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmsne_vv_i8mf4_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vv_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmsne_vv_i8mf4_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsne_vx_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsne_vx_i8mf4_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vx_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsne_vx_i8mf4_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsne_vv_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmsne_vv_i8mf2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vv_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmsne_vv_i8mf2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsne_vx_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsne_vx_i8mf2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vx_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsne_vx_i8mf2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsne_vv_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmsne_vv_i8m1_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vv_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmsne_vv_i8m1_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsne_vx_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmsne_vx_i8m1_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vx_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsne_vx_i8m1_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsne_vv_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmsne_vv_i8m2_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vv_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmsne_vv_i8m2_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsne_vx_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsne_vx_i8m2_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vx_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsne_vx_i8m2_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsne_vv_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmsne_vv_i8m4_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsne_vv_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmsne_vv_i8m4_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsne_vx_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsne_vx_i8m4_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsne_vx_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsne_vx_i8m4_b2_mu(vm, vd, vs2, rs1, vl); } -vbool1_t test_vmsne_vv_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmsne_vv_i8m8_b1_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsne_vv_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmsne_vv_i8m8_b1_mu(vm, vd, vs2, vs1, vl); } -vbool1_t test_vmsne_vx_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsne_vx_i8m8_b1_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsne_vx_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsne_vx_i8m8_b1_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsne_vv_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmsne_vv_i16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vv_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmsne_vv_i16mf4_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsne_vx_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsne_vx_i16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vx_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsne_vx_i16mf4_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsne_vv_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmsne_vv_i16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vv_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmsne_vv_i16mf2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsne_vx_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsne_vx_i16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vx_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsne_vx_i16mf2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsne_vv_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmsne_vv_i16m1_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vv_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmsne_vv_i16m1_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsne_vx_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmsne_vx_i16m1_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vx_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsne_vx_i16m1_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsne_vv_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmsne_vv_i16m2_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vv_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmsne_vv_i16m2_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsne_vx_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsne_vx_i16m2_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vx_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsne_vx_i16m2_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsne_vv_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmsne_vv_i16m4_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vv_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmsne_vv_i16m4_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsne_vx_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsne_vx_i16m4_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vx_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsne_vx_i16m4_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsne_vv_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmsne_vv_i16m8_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsne_vv_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmsne_vv_i16m8_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsne_vx_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmsne_vx_i16m8_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsne_vx_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsne_vx_i16m8_b2_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsne_vv_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmsne_vv_i32mf2_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vv_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmsne_vv_i32mf2_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsne_vx_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsne_vx_i32mf2_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vx_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsne_vx_i32mf2_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsne_vv_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmsne_vv_i32m1_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vv_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmsne_vv_i32m1_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsne_vx_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmsne_vx_i32m1_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vx_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsne_vx_i32m1_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsne_vv_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmsne_vv_i32m2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vv_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmsne_vv_i32m2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsne_vx_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsne_vx_i32m2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vx_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsne_vx_i32m2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsne_vv_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmsne_vv_i32m4_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vv_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmsne_vv_i32m4_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsne_vx_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmsne_vx_i32m4_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vx_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsne_vx_i32m4_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsne_vv_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmsne_vv_i32m8_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vv_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmsne_vv_i32m8_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsne_vx_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmsne_vx_i32m8_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vx_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsne_vx_i32m8_b4_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsne_vv_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmsne_vv_i64m1_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vv_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmsne_vv_i64m1_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsne_vx_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmsne_vx_i64m1_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vx_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsne_vx_i64m1_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsne_vv_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmsne_vv_i64m2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vv_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmsne_vv_i64m2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsne_vx_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmsne_vx_i64m2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vx_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsne_vx_i64m2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsne_vv_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmsne_vv_i64m4_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vv_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmsne_vv_i64m4_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsne_vx_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmsne_vx_i64m4_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vx_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsne_vx_i64m4_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsne_vv_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmsne_vv_i64m8_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vv_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmsne_vv_i64m8_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsne_vx_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmsne_vx_i64m8_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vx_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsne_vx_i64m8_b8_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsne_vv_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmsne_vv_u8mf8_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vv_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmsne_vv_u8mf8_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsne_vx_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsne_vx_u8mf8_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vx_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsne_vx_u8mf8_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsne_vv_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmsne_vv_u8mf4_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vv_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmsne_vv_u8mf4_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsne_vx_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsne_vx_u8mf4_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vx_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsne_vx_u8mf4_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsne_vv_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmsne_vv_u8mf2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vv_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmsne_vv_u8mf2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsne_vx_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsne_vx_u8mf2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vx_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsne_vx_u8mf2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsne_vv_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmsne_vv_u8m1_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vv_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmsne_vv_u8m1_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsne_vx_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsne_vx_u8m1_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vx_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsne_vx_u8m1_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsne_vv_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmsne_vv_u8m2_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vv_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmsne_vv_u8m2_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsne_vx_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsne_vx_u8m2_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vx_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsne_vx_u8m2_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsne_vv_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmsne_vv_u8m4_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsne_vv_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmsne_vv_u8m4_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsne_vx_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsne_vx_u8m4_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsne_vx_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsne_vx_u8m4_b2_mu(vm, vd, vs2, rs1, vl); } -vbool1_t test_vmsne_vv_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmsne_vv_u8m8_b1_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsne_vv_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmsne_vv_u8m8_b1_mu(vm, vd, vs2, vs1, vl); } -vbool1_t test_vmsne_vx_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsne_vx_u8m8_b1_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsne_vx_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsne_vx_u8m8_b1_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsne_vv_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmsne_vv_u16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vv_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmsne_vv_u16mf4_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsne_vx_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsne_vx_u16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vx_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsne_vx_u16mf4_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsne_vv_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmsne_vv_u16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vv_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmsne_vv_u16mf2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsne_vx_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsne_vx_u16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vx_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsne_vx_u16mf2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsne_vv_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmsne_vv_u16m1_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vv_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmsne_vv_u16m1_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsne_vx_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsne_vx_u16m1_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vx_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsne_vx_u16m1_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsne_vv_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmsne_vv_u16m2_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vv_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmsne_vv_u16m2_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsne_vx_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsne_vx_u16m2_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vx_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsne_vx_u16m2_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsne_vv_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmsne_vv_u16m4_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vv_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmsne_vv_u16m4_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsne_vx_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsne_vx_u16m4_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vx_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsne_vx_u16m4_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsne_vv_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmsne_vv_u16m8_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsne_vv_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmsne_vv_u16m8_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsne_vx_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsne_vx_u16m8_b2_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsne_vx_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsne_vx_u16m8_b2_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsne_vv_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmsne_vv_u32mf2_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vv_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmsne_vv_u32mf2_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsne_vx_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsne_vx_u32mf2_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vx_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsne_vx_u32mf2_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsne_vv_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmsne_vv_u32m1_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vv_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmsne_vv_u32m1_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsne_vx_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsne_vx_u32m1_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vx_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsne_vx_u32m1_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsne_vv_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmsne_vv_u32m2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vv_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmsne_vv_u32m2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsne_vx_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsne_vx_u32m2_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vx_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsne_vx_u32m2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsne_vv_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmsne_vv_u32m4_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vv_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmsne_vv_u32m4_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsne_vx_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsne_vx_u32m4_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vx_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsne_vx_u32m4_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsne_vv_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmsne_vv_u32m8_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vv_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmsne_vv_u32m8_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsne_vx_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsne_vx_u32m8_b4_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vx_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsne_vx_u32m8_b4_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsne_vv_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmsne_vv_u64m1_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vv_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmsne_vv_u64m1_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsne_vx_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsne_vx_u64m1_b64_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vx_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsne_vx_u64m1_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsne_vv_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmsne_vv_u64m2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vv_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmsne_vv_u64m2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsne_vx_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsne_vx_u64m2_b32_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vx_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsne_vx_u64m2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsne_vv_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmsne_vv_u64m4_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vv_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmsne_vv_u64m4_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsne_vx_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsne_vx_u64m4_b16_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vx_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsne_vx_u64m4_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsne_vv_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmsne_vv_u64m8_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vv_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmsne_vv_u64m8_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsne_vx_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsne_vx_u64m8_b8_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vx_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsne_vx_u64m8_b8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vmsne\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vmsof.c b/auto-generated/policy_funcs/gnu-api-tests/vmsof.c index 8e1301b2f..99f28d6cc 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vmsof.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vmsof.c @@ -6,32 +6,32 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool1_t test_vmsof_m_b1_mu(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1, size_t vl) { - return __riscv_vmsof_m_b1_mu(mask, maskedoff, op1, vl); +vbool1_t test_vmsof_m_b1_mu(vbool1_t vm, vbool1_t vd, vbool1_t vs2, size_t vl) { + return __riscv_vmsof_m_b1_mu(vm, vd, vs2, vl); } -vbool2_t test_vmsof_m_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1, size_t vl) { - return __riscv_vmsof_m_b2_mu(mask, maskedoff, op1, vl); +vbool2_t test_vmsof_m_b2_mu(vbool2_t vm, vbool2_t vd, vbool2_t vs2, size_t vl) { + return __riscv_vmsof_m_b2_mu(vm, vd, vs2, vl); } -vbool4_t test_vmsof_m_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1, size_t vl) { - return __riscv_vmsof_m_b4_mu(mask, maskedoff, op1, vl); +vbool4_t test_vmsof_m_b4_mu(vbool4_t vm, vbool4_t vd, vbool4_t vs2, size_t vl) { + return __riscv_vmsof_m_b4_mu(vm, vd, vs2, vl); } -vbool8_t test_vmsof_m_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1, size_t vl) { - return __riscv_vmsof_m_b8_mu(mask, maskedoff, op1, vl); +vbool8_t test_vmsof_m_b8_mu(vbool8_t vm, vbool8_t vd, vbool8_t vs2, size_t vl) { + return __riscv_vmsof_m_b8_mu(vm, vd, vs2, vl); } -vbool16_t test_vmsof_m_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1, size_t vl) { - return __riscv_vmsof_m_b16_mu(mask, maskedoff, op1, vl); +vbool16_t test_vmsof_m_b16_mu(vbool16_t vm, vbool16_t vd, vbool16_t vs2, size_t vl) { + return __riscv_vmsof_m_b16_mu(vm, vd, vs2, vl); } -vbool32_t test_vmsof_m_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1, size_t vl) { - return __riscv_vmsof_m_b32_mu(mask, maskedoff, op1, vl); +vbool32_t test_vmsof_m_b32_mu(vbool32_t vm, vbool32_t vd, vbool32_t vs2, size_t vl) { + return __riscv_vmsof_m_b32_mu(vm, vd, vs2, vl); } -vbool64_t test_vmsof_m_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1, size_t vl) { - return __riscv_vmsof_m_b64_mu(mask, maskedoff, op1, vl); +vbool64_t test_vmsof_m_b64_mu(vbool64_t vm, vbool64_t vd, vbool64_t vs2, size_t vl) { + return __riscv_vmsof_m_b64_mu(vm, vd, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmsof\.[ivxfswum.]+\s+} 7 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vmul.c b/auto-generated/policy_funcs/gnu-api-tests/vmul.c index 69097b6c8..917e1e963 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vmul.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vmul.c @@ -6,1412 +6,1412 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vmul_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmul_vv_i8mf8_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vmul_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmul_vv_i8mf8_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vmul_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_vx_i8mf8_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vmul_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_vx_i8mf8_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vmul_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmul_vv_i8mf4_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vmul_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmul_vv_i8mf4_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vmul_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_vx_i8mf4_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vmul_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_vx_i8mf4_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vmul_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmul_vv_i8mf2_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vmul_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmul_vv_i8mf2_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vmul_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_vx_i8mf2_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vmul_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_vx_i8mf2_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vmul_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmul_vv_i8m1_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vmul_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmul_vv_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vmul_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_vx_i8m1_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vmul_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_vx_i8m1_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vmul_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmul_vv_i8m2_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vmul_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmul_vv_i8m2_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vmul_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_vx_i8m2_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vmul_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_vx_i8m2_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vmul_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmul_vv_i8m4_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vmul_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmul_vv_i8m4_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vmul_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_vx_i8m4_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vmul_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_vx_i8m4_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vmul_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmul_vv_i8m8_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vmul_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmul_vv_i8m8_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vmul_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_vx_i8m8_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vmul_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_vx_i8m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vmul_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmul_vv_i16mf4_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vmul_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmul_vv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vmul_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_vx_i16mf4_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vmul_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vmul_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmul_vv_i16mf2_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vmul_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmul_vv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vmul_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_vx_i16mf2_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vmul_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vmul_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmul_vv_i16m1_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vmul_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmul_vv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vmul_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_vx_i16m1_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vmul_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vmul_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmul_vv_i16m2_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vmul_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmul_vv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vmul_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_vx_i16m2_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vmul_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vmul_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmul_vv_i16m4_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vmul_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmul_vv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vmul_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_vx_i16m4_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vmul_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vmul_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmul_vv_i16m8_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vmul_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmul_vv_i16m8_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vmul_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_vx_i16m8_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vmul_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vmul_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmul_vv_i32mf2_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vmul_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmul_vv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vmul_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_vx_i32mf2_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vmul_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vmul_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmul_vv_i32m1_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vmul_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmul_vv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vmul_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_vx_i32m1_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vmul_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vmul_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmul_vv_i32m2_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vmul_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmul_vv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vmul_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_vx_i32m2_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vmul_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vmul_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmul_vv_i32m4_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vmul_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmul_vv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vmul_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_vx_i32m4_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vmul_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vmul_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmul_vv_i32m8_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vmul_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmul_vv_i32m8_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vmul_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_vx_i32m8_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vmul_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vmul_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmul_vv_i64m1_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vmul_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmul_vv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vmul_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmul_vx_i64m1_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vmul_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vmul_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmul_vv_i64m2_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vmul_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmul_vv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vmul_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmul_vx_i64m2_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vmul_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vmul_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmul_vv_i64m4_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vmul_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmul_vv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vmul_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmul_vx_i64m4_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vmul_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vmul_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmul_vv_i64m8_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vmul_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmul_vv_i64m8_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vmul_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmul_vx_i64m8_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vmul_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul_vx_i64m8_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vmul_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmul_vv_u8mf8_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vmul_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmul_vv_u8mf8_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vmul_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_vx_u8mf8_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vmul_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_vx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vmul_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmul_vv_u8mf4_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vmul_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmul_vv_u8mf4_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vmul_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_vx_u8mf4_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vmul_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_vx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vmul_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmul_vv_u8mf2_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vmul_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmul_vv_u8mf2_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vmul_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_vx_u8mf2_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vmul_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_vx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vmul_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmul_vv_u8m1_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vmul_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmul_vv_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vmul_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_vx_u8m1_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vmul_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_vx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vmul_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmul_vv_u8m2_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vmul_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmul_vv_u8m2_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vmul_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_vx_u8m2_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vmul_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_vx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vmul_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmul_vv_u8m4_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vmul_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmul_vv_u8m4_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vmul_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_vx_u8m4_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vmul_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_vx_u8m4_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vmul_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmul_vv_u8m8_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vmul_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmul_vv_u8m8_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vmul_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_vx_u8m8_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vmul_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_vx_u8m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vmul_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmul_vv_u16mf4_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vmul_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmul_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vmul_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_vx_u16mf4_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vmul_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vmul_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmul_vv_u16mf2_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vmul_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmul_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vmul_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_vx_u16mf2_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vmul_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vmul_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmul_vv_u16m1_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vmul_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmul_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vmul_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_vx_u16m1_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vmul_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vmul_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmul_vv_u16m2_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vmul_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmul_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vmul_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_vx_u16m2_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vmul_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vmul_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmul_vv_u16m4_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vmul_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmul_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vmul_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_vx_u16m4_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vmul_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vmul_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmul_vv_u16m8_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vmul_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmul_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vmul_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_vx_u16m8_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vmul_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vmul_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmul_vv_u32mf2_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vmul_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmul_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vmul_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_vx_u32mf2_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vmul_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vmul_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmul_vv_u32m1_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vmul_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmul_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vmul_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_vx_u32m1_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vmul_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vmul_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmul_vv_u32m2_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vmul_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmul_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vmul_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_vx_u32m2_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vmul_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vmul_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmul_vv_u32m4_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vmul_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmul_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vmul_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_vx_u32m4_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vmul_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vmul_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmul_vv_u32m8_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vmul_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmul_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vmul_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_vx_u32m8_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vmul_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vmul_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmul_vv_u64m1_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vmul_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmul_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vmul_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul_vx_u64m1_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vmul_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vmul_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmul_vv_u64m2_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vmul_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmul_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vmul_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul_vx_u64m2_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vmul_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vmul_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmul_vv_u64m4_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vmul_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmul_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vmul_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul_vx_u64m4_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vmul_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vmul_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmul_vv_u64m8_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vmul_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmul_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vmul_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul_vx_u64m8_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vmul_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul_vx_u64m8_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vmul_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmul_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmul_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmul_vv_i8mf8_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vmul_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmul_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_vx_i8mf8_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vmul_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmul_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmul_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmul_vv_i8mf4_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vmul_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmul_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_vx_i8mf4_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vmul_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmul_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmul_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmul_vv_i8mf2_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vmul_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmul_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_vx_i8mf2_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vmul_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmul_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmul_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmul_vv_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vmul_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmul_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_vx_i8m1_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vmul_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmul_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmul_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmul_vv_i8m2_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vmul_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmul_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_vx_i8m2_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vmul_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmul_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmul_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmul_vv_i8m4_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vmul_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmul_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_vx_i8m4_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vmul_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmul_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmul_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmul_vv_i8m8_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vmul_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmul_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_vx_i8m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vmul_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmul_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmul_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmul_vv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vmul_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmul_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vmul_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmul_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmul_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmul_vv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vmul_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmul_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vmul_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmul_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmul_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmul_vv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vmul_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmul_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vmul_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmul_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmul_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmul_vv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vmul_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmul_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vmul_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmul_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmul_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmul_vv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vmul_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmul_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vmul_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmul_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmul_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmul_vv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vmul_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmul_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vmul_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmul_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmul_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmul_vv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vmul_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmul_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vmul_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmul_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmul_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmul_vv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vmul_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmul_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vmul_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmul_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmul_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmul_vv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vmul_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmul_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vmul_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmul_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmul_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmul_vv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vmul_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmul_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vmul_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmul_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmul_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmul_vv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vmul_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmul_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vmul_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmul_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmul_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmul_vv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vmul_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmul_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmul_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vmul_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmul_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmul_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmul_vv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vmul_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmul_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmul_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vmul_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmul_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmul_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmul_vv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vmul_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmul_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmul_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vmul_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmul_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmul_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmul_vv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vmul_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmul_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmul_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vmul_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmul_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmul_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmul_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vmul_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmul_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vmul_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmul_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmul_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmul_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vmul_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmul_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vmul_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmul_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmul_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmul_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vmul_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmul_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vmul_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmul_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmul_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmul_vv_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vmul_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmul_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_vx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vmul_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmul_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmul_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmul_vv_u8m2_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vmul_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmul_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_vx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vmul_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmul_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmul_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmul_vv_u8m4_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vmul_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmul_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_vx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vmul_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmul_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmul_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmul_vv_u8m8_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vmul_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmul_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_vx_u8m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vmul_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmul_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmul_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmul_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vmul_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmul_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vmul_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmul_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmul_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmul_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vmul_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmul_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vmul_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmul_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmul_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmul_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vmul_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmul_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vmul_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmul_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmul_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmul_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vmul_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmul_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vmul_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmul_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmul_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmul_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vmul_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmul_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vmul_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmul_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmul_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmul_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vmul_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmul_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vmul_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmul_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmul_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmul_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vmul_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmul_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vmul_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmul_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmul_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmul_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vmul_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmul_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vmul_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmul_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmul_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmul_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vmul_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmul_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vmul_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmul_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmul_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmul_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vmul_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmul_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vmul_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmul_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmul_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmul_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vmul_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmul_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vmul_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmul_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmul_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmul_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vmul_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmul_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vmul_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmul_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmul_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmul_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vmul_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmul_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vmul_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmul_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmul_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmul_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vmul_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmul_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vmul_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmul_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmul_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmul_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vmul_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmul_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vmul_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmul_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmul_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmul_vv_i8mf8_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vmul_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmul_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_vx_i8mf8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vmul_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmul_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmul_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmul_vv_i8mf4_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vmul_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmul_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_vx_i8mf4_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vmul_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmul_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmul_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmul_vv_i8mf2_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vmul_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmul_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_vx_i8mf2_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vmul_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmul_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmul_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmul_vv_i8m1_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vmul_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmul_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_vx_i8m1_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vmul_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmul_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmul_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmul_vv_i8m2_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vmul_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmul_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_vx_i8m2_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vmul_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmul_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmul_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmul_vv_i8m4_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vmul_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmul_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_vx_i8m4_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vmul_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmul_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmul_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmul_vv_i8m8_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vmul_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmul_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_vx_i8m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vmul_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmul_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmul_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmul_vv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vmul_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmul_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vmul_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmul_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmul_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmul_vv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vmul_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmul_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vmul_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmul_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmul_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmul_vv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vmul_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmul_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vmul_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmul_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmul_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmul_vv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vmul_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmul_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vmul_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmul_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmul_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmul_vv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vmul_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmul_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vmul_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmul_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmul_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmul_vv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vmul_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmul_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vmul_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmul_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmul_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmul_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vmul_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmul_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vmul_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmul_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmul_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmul_vv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vmul_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmul_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vmul_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmul_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmul_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmul_vv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vmul_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmul_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vmul_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmul_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmul_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmul_vv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vmul_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmul_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vmul_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmul_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmul_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmul_vv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vmul_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmul_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vmul_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmul_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmul_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmul_vv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vmul_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmul_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmul_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vmul_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmul_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmul_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmul_vv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vmul_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmul_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmul_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vmul_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmul_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmul_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmul_vv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vmul_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmul_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmul_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vmul_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmul_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmul_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmul_vv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vmul_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmul_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmul_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vmul_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmul_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmul_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmul_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vmul_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmul_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vmul_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmul_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmul_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmul_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vmul_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmul_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vmul_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmul_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmul_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmul_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vmul_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmul_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vmul_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmul_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmul_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmul_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vmul_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmul_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vmul_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmul_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmul_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmul_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vmul_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmul_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vmul_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmul_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmul_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmul_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vmul_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmul_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vmul_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmul_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmul_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmul_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vmul_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmul_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vmul_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmul_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmul_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmul_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vmul_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmul_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vmul_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmul_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmul_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmul_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vmul_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmul_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vmul_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmul_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmul_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmul_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vmul_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmul_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vmul_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmul_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmul_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmul_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vmul_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmul_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vmul_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmul_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmul_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmul_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vmul_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmul_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vmul_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmul_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmul_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmul_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vmul_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmul_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vmul_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmul_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmul_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmul_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vmul_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmul_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vmul_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmul_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmul_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmul_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vmul_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmul_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vmul_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmul_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmul_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmul_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vmul_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmul_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vmul_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmul_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmul_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmul_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vmul_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmul_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vmul_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmul_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmul_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmul_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vmul_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmul_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vmul_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmul_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmul_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmul_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vmul_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmul_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vmul_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmul_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmul_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmul_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vmul_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmul_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vmul_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmul_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmul_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmul_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vmul_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmul_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vmul_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmul_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmul_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmul_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vmul_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmul_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vmul_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmul_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmul_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmul_vv_i8mf8_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vmul_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmul_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_vx_i8mf8_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vmul_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmul_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmul_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmul_vv_i8mf4_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vmul_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmul_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_vx_i8mf4_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vmul_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmul_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmul_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmul_vv_i8mf2_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vmul_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmul_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_vx_i8mf2_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vmul_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmul_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmul_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmul_vv_i8m1_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vmul_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmul_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_vx_i8m1_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vmul_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmul_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmul_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmul_vv_i8m2_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vmul_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmul_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_vx_i8m2_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vmul_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmul_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmul_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmul_vv_i8m4_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vmul_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmul_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_vx_i8m4_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vmul_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmul_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmul_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmul_vv_i8m8_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vmul_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmul_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_vx_i8m8_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vmul_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmul_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmul_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmul_vv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vmul_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmul_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vmul_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmul_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmul_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmul_vv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vmul_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmul_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vmul_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmul_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmul_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmul_vv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vmul_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmul_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vmul_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmul_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmul_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmul_vv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vmul_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmul_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vmul_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmul_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmul_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmul_vv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vmul_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmul_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vmul_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmul_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmul_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmul_vv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vmul_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmul_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vmul_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmul_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmul_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmul_vv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vmul_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmul_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vmul_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmul_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmul_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmul_vv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vmul_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmul_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vmul_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmul_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmul_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmul_vv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vmul_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmul_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vmul_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmul_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmul_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmul_vv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vmul_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmul_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vmul_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmul_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmul_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmul_vv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vmul_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmul_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vmul_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmul_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmul_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmul_vv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vmul_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmul_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmul_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vmul_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmul_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmul_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmul_vv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vmul_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmul_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmul_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vmul_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmul_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmul_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmul_vv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vmul_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmul_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmul_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vmul_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmul_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmul_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmul_vv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vmul_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmul_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmul_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vmul_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmul_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmul_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmul_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vmul_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmul_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vmul_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmul_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmul_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmul_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vmul_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmul_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vmul_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmul_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmul_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmul_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vmul_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmul_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vmul_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmul_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmul_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmul_vv_u8m1_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vmul_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmul_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_vx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vmul_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmul_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmul_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmul_vv_u8m2_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vmul_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmul_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_vx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vmul_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmul_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmul_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmul_vv_u8m4_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vmul_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmul_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_vx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vmul_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmul_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmul_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmul_vv_u8m8_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vmul_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmul_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_vx_u8m8_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vmul_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmul_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmul_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmul_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vmul_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmul_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vmul_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmul_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmul_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmul_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vmul_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmul_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vmul_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmul_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmul_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmul_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vmul_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmul_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vmul_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmul_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmul_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmul_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vmul_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmul_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vmul_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmul_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmul_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmul_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vmul_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmul_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vmul_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmul_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmul_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmul_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vmul_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmul_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vmul_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmul_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmul_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmul_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vmul_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmul_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vmul_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmul_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmul_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmul_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vmul_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmul_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vmul_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmul_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmul_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmul_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vmul_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmul_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vmul_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmul_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmul_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmul_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vmul_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmul_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vmul_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmul_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmul_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmul_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vmul_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmul_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vmul_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmul_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmul_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmul_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vmul_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmul_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vmul_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmul_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmul_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmul_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vmul_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmul_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vmul_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmul_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmul_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmul_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vmul_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmul_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vmul_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmul_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmul_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmul_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vmul_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmul_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vmul\.[ivxfswum.]+\s+} 352 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vmulh.c b/auto-generated/policy_funcs/gnu-api-tests/vmulh.c index 05ee5c860..15b07100f 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vmulh.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vmulh.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vmulh_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmulh_vv_i8mf8_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vmulh_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmulh_vv_i8mf8_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vmulh_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_vx_i8mf8_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vmulh_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_vx_i8mf8_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vmulh_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmulh_vv_i8mf4_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vmulh_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmulh_vv_i8mf4_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vmulh_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_vx_i8mf4_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vmulh_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_vx_i8mf4_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vmulh_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmulh_vv_i8mf2_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vmulh_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmulh_vv_i8mf2_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vmulh_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_vx_i8mf2_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vmulh_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_vx_i8mf2_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vmulh_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmulh_vv_i8m1_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vmulh_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmulh_vv_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vmulh_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_vx_i8m1_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vmulh_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_vx_i8m1_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vmulh_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmulh_vv_i8m2_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vmulh_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmulh_vv_i8m2_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vmulh_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_vx_i8m2_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vmulh_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_vx_i8m2_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vmulh_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmulh_vv_i8m4_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vmulh_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmulh_vv_i8m4_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vmulh_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_vx_i8m4_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vmulh_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_vx_i8m4_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vmulh_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmulh_vv_i8m8_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vmulh_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmulh_vv_i8m8_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vmulh_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_vx_i8m8_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vmulh_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_vx_i8m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vmulh_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmulh_vv_i16mf4_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vmulh_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmulh_vv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vmulh_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_vx_i16mf4_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vmulh_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vmulh_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmulh_vv_i16mf2_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vmulh_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmulh_vv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vmulh_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_vx_i16mf2_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vmulh_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vmulh_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmulh_vv_i16m1_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vmulh_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmulh_vv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vmulh_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_vx_i16m1_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vmulh_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vmulh_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmulh_vv_i16m2_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vmulh_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmulh_vv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vmulh_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_vx_i16m2_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vmulh_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vmulh_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmulh_vv_i16m4_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vmulh_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmulh_vv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vmulh_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_vx_i16m4_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vmulh_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vmulh_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmulh_vv_i16m8_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vmulh_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmulh_vv_i16m8_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vmulh_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_vx_i16m8_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vmulh_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vmulh_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmulh_vv_i32mf2_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vmulh_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmulh_vv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vmulh_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_vx_i32mf2_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vmulh_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vmulh_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmulh_vv_i32m1_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vmulh_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmulh_vv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vmulh_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_vx_i32m1_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vmulh_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vmulh_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmulh_vv_i32m2_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vmulh_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmulh_vv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vmulh_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_vx_i32m2_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vmulh_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vmulh_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmulh_vv_i32m4_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vmulh_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmulh_vv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vmulh_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_vx_i32m4_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vmulh_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vmulh_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmulh_vv_i32m8_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vmulh_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmulh_vv_i32m8_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vmulh_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_vx_i32m8_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vmulh_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vmulh_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmulh_vv_i64m1_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vmulh_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmulh_vv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vmulh_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh_vx_i64m1_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vmulh_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vmulh_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmulh_vv_i64m2_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vmulh_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmulh_vv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vmulh_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh_vx_i64m2_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vmulh_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vmulh_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmulh_vv_i64m4_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vmulh_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmulh_vv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vmulh_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh_vx_i64m4_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vmulh_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vmulh_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmulh_vv_i64m8_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vmulh_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmulh_vv_i64m8_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vmulh_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh_vx_i64m8_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vmulh_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh_vx_i64m8_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vmulh_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmulh_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmulh_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmulh_vv_i8mf8_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vmulh_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmulh_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_vx_i8mf8_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vmulh_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmulh_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmulh_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmulh_vv_i8mf4_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vmulh_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmulh_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_vx_i8mf4_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vmulh_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmulh_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmulh_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmulh_vv_i8mf2_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vmulh_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmulh_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_vx_i8mf2_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vmulh_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmulh_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmulh_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmulh_vv_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vmulh_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmulh_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_vx_i8m1_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vmulh_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmulh_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmulh_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmulh_vv_i8m2_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vmulh_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmulh_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_vx_i8m2_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vmulh_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmulh_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmulh_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmulh_vv_i8m4_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vmulh_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmulh_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_vx_i8m4_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vmulh_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmulh_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmulh_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmulh_vv_i8m8_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vmulh_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmulh_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_vx_i8m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vmulh_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmulh_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmulh_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmulh_vv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vmulh_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmulh_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vmulh_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmulh_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmulh_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmulh_vv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vmulh_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmulh_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vmulh_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmulh_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmulh_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmulh_vv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vmulh_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmulh_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vmulh_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmulh_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmulh_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmulh_vv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vmulh_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmulh_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vmulh_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmulh_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmulh_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmulh_vv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vmulh_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmulh_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vmulh_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmulh_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmulh_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmulh_vv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vmulh_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmulh_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vmulh_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmulh_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmulh_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmulh_vv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vmulh_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmulh_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vmulh_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmulh_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmulh_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmulh_vv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vmulh_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmulh_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vmulh_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmulh_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmulh_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmulh_vv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vmulh_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmulh_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vmulh_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmulh_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmulh_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmulh_vv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vmulh_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmulh_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vmulh_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmulh_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmulh_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmulh_vv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vmulh_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmulh_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vmulh_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmulh_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmulh_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmulh_vv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vmulh_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmulh_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vmulh_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmulh_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmulh_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmulh_vv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vmulh_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmulh_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vmulh_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmulh_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmulh_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmulh_vv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vmulh_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmulh_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vmulh_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmulh_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmulh_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmulh_vv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vmulh_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmulh_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vmulh_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmulh_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmulh_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmulh_vv_i8mf8_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vmulh_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmulh_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_vx_i8mf8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vmulh_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmulh_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmulh_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmulh_vv_i8mf4_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vmulh_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmulh_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_vx_i8mf4_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vmulh_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmulh_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmulh_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmulh_vv_i8mf2_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vmulh_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmulh_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_vx_i8mf2_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vmulh_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmulh_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmulh_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmulh_vv_i8m1_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vmulh_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmulh_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_vx_i8m1_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vmulh_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmulh_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmulh_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmulh_vv_i8m2_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vmulh_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmulh_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_vx_i8m2_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vmulh_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmulh_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmulh_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmulh_vv_i8m4_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vmulh_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmulh_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_vx_i8m4_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vmulh_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmulh_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmulh_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmulh_vv_i8m8_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vmulh_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmulh_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_vx_i8m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vmulh_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmulh_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmulh_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmulh_vv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vmulh_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmulh_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vmulh_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmulh_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmulh_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmulh_vv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vmulh_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmulh_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vmulh_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmulh_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmulh_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmulh_vv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vmulh_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmulh_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vmulh_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmulh_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmulh_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmulh_vv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vmulh_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmulh_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vmulh_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmulh_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmulh_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmulh_vv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vmulh_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmulh_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vmulh_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmulh_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmulh_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmulh_vv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vmulh_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmulh_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vmulh_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmulh_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmulh_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmulh_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vmulh_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmulh_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vmulh_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmulh_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmulh_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmulh_vv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vmulh_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmulh_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vmulh_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmulh_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmulh_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmulh_vv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vmulh_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmulh_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vmulh_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmulh_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmulh_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmulh_vv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vmulh_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmulh_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vmulh_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmulh_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmulh_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmulh_vv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vmulh_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmulh_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vmulh_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmulh_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmulh_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmulh_vv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vmulh_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmulh_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vmulh_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmulh_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmulh_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmulh_vv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vmulh_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmulh_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vmulh_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmulh_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmulh_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmulh_vv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vmulh_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmulh_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vmulh_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmulh_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmulh_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmulh_vv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vmulh_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmulh_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vmulh_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmulh_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmulh_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmulh_vv_i8mf8_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vmulh_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmulh_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_vx_i8mf8_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vmulh_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmulh_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmulh_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmulh_vv_i8mf4_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vmulh_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmulh_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_vx_i8mf4_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vmulh_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmulh_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmulh_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmulh_vv_i8mf2_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vmulh_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmulh_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_vx_i8mf2_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vmulh_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmulh_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmulh_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmulh_vv_i8m1_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vmulh_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmulh_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_vx_i8m1_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vmulh_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmulh_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmulh_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmulh_vv_i8m2_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vmulh_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmulh_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_vx_i8m2_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vmulh_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmulh_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmulh_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmulh_vv_i8m4_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vmulh_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmulh_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_vx_i8m4_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vmulh_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmulh_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmulh_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmulh_vv_i8m8_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vmulh_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmulh_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_vx_i8m8_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vmulh_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmulh_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmulh_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmulh_vv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vmulh_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmulh_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vmulh_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmulh_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmulh_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmulh_vv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vmulh_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmulh_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vmulh_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmulh_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmulh_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmulh_vv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vmulh_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmulh_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vmulh_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmulh_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmulh_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmulh_vv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vmulh_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmulh_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vmulh_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmulh_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmulh_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmulh_vv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vmulh_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmulh_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vmulh_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmulh_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmulh_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmulh_vv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vmulh_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmulh_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vmulh_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmulh_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmulh_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmulh_vv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vmulh_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmulh_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vmulh_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmulh_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmulh_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmulh_vv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vmulh_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmulh_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vmulh_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmulh_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmulh_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmulh_vv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vmulh_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmulh_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vmulh_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmulh_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmulh_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmulh_vv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vmulh_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmulh_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vmulh_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmulh_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmulh_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmulh_vv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vmulh_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmulh_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vmulh_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmulh_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmulh_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmulh_vv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vmulh_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmulh_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vmulh_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmulh_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmulh_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmulh_vv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vmulh_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmulh_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vmulh_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmulh_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmulh_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmulh_vv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vmulh_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmulh_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vmulh_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmulh_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmulh_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmulh_vv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vmulh_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmulh_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmulh\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vmulhsu.c b/auto-generated/policy_funcs/gnu-api-tests/vmulhsu.c index ce300f65c..d7b44507f 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vmulhsu.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vmulhsu.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vmulhsu_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i8mf8_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vmulhsu_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i8mf8_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vmulhsu_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i8mf8_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vmulhsu_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i8mf8_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vmulhsu_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i8mf4_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vmulhsu_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i8mf4_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vmulhsu_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i8mf4_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vmulhsu_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i8mf4_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vmulhsu_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i8mf2_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vmulhsu_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i8mf2_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vmulhsu_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i8mf2_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vmulhsu_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i8mf2_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vmulhsu_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i8m1_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vmulhsu_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vmulhsu_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i8m1_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vmulhsu_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i8m1_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vmulhsu_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i8m2_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vmulhsu_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i8m2_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vmulhsu_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i8m2_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vmulhsu_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i8m2_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vmulhsu_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i8m4_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vmulhsu_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i8m4_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vmulhsu_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i8m4_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vmulhsu_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i8m4_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vmulhsu_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i8m8_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vmulhsu_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i8m8_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vmulhsu_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i8m8_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vmulhsu_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i8m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vmulhsu_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i16mf4_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vmulhsu_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vmulhsu_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i16mf4_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vmulhsu_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vmulhsu_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i16mf2_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vmulhsu_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vmulhsu_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i16mf2_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vmulhsu_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vmulhsu_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i16m1_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vmulhsu_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vmulhsu_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i16m1_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vmulhsu_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vmulhsu_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i16m2_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vmulhsu_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vmulhsu_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i16m2_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vmulhsu_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vmulhsu_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i16m4_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vmulhsu_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vmulhsu_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i16m4_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vmulhsu_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vmulhsu_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i16m8_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vmulhsu_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i16m8_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vmulhsu_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i16m8_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vmulhsu_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vmulhsu_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i32mf2_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vmulhsu_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vmulhsu_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i32mf2_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vmulhsu_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vmulhsu_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i32m1_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vmulhsu_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vmulhsu_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i32m1_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vmulhsu_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vmulhsu_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i32m2_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vmulhsu_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vmulhsu_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i32m2_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vmulhsu_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vmulhsu_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i32m4_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vmulhsu_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vmulhsu_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i32m4_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vmulhsu_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vmulhsu_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i32m8_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vmulhsu_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i32m8_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vmulhsu_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i32m8_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vmulhsu_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vmulhsu_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i64m1_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vmulhsu_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vmulhsu_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i64m1_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vmulhsu_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vmulhsu_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i64m2_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vmulhsu_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vmulhsu_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i64m2_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vmulhsu_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vmulhsu_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i64m4_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vmulhsu_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vmulhsu_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i64m4_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vmulhsu_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vmulhsu_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i64m8_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vmulhsu_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i64m8_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vmulhsu_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i64m8_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vmulhsu_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i64m8_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vmulhsu_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmulhsu_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i8mf8_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vmulhsu_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmulhsu_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i8mf8_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vmulhsu_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmulhsu_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i8mf4_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vmulhsu_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmulhsu_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i8mf4_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vmulhsu_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmulhsu_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i8mf2_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vmulhsu_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmulhsu_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i8mf2_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vmulhsu_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmulhsu_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vmulhsu_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmulhsu_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i8m1_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vmulhsu_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmulhsu_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i8m2_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vmulhsu_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmulhsu_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i8m2_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vmulhsu_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmulhsu_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i8m4_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vmulhsu_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmulhsu_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i8m4_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vmulhsu_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmulhsu_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i8m8_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vmulhsu_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmulhsu_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i8m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vmulhsu_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmulhsu_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vmulhsu_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmulhsu_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vmulhsu_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmulhsu_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vmulhsu_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmulhsu_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vmulhsu_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmulhsu_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vmulhsu_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmulhsu_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vmulhsu_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmulhsu_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vmulhsu_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmulhsu_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vmulhsu_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmulhsu_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vmulhsu_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmulhsu_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vmulhsu_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmulhsu_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vmulhsu_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmulhsu_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vmulhsu_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmulhsu_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vmulhsu_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmulhsu_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vmulhsu_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmulhsu_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vmulhsu_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmulhsu_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vmulhsu_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmulhsu_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vmulhsu_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmulhsu_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vmulhsu_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmulhsu_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vmulhsu_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmulhsu_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vmulhsu_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmulhsu_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vmulhsu_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmulhsu_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vmulhsu_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmulhsu_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vmulhsu_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmulhsu_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vmulhsu_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmulhsu_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vmulhsu_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmulhsu_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vmulhsu_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmulhsu_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vmulhsu_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmulhsu_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vmulhsu_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmulhsu_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vmulhsu_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmulhsu_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vmulhsu_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmulhsu_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i8mf8_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vmulhsu_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmulhsu_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i8mf8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vmulhsu_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmulhsu_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i8mf4_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vmulhsu_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmulhsu_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i8mf4_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vmulhsu_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmulhsu_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i8mf2_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vmulhsu_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmulhsu_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i8mf2_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vmulhsu_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmulhsu_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i8m1_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vmulhsu_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmulhsu_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i8m1_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vmulhsu_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmulhsu_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i8m2_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vmulhsu_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmulhsu_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i8m2_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vmulhsu_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmulhsu_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i8m4_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vmulhsu_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmulhsu_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i8m4_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vmulhsu_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmulhsu_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i8m8_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vmulhsu_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmulhsu_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i8m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vmulhsu_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmulhsu_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vmulhsu_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmulhsu_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vmulhsu_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmulhsu_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vmulhsu_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmulhsu_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vmulhsu_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmulhsu_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vmulhsu_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmulhsu_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vmulhsu_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmulhsu_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vmulhsu_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmulhsu_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vmulhsu_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmulhsu_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vmulhsu_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmulhsu_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vmulhsu_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmulhsu_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vmulhsu_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmulhsu_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vmulhsu_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmulhsu_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vmulhsu_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmulhsu_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vmulhsu_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmulhsu_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vmulhsu_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmulhsu_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vmulhsu_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmulhsu_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vmulhsu_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmulhsu_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vmulhsu_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmulhsu_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vmulhsu_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmulhsu_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vmulhsu_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmulhsu_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vmulhsu_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmulhsu_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vmulhsu_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmulhsu_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vmulhsu_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmulhsu_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vmulhsu_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmulhsu_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vmulhsu_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmulhsu_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vmulhsu_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmulhsu_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vmulhsu_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmulhsu_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vmulhsu_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmulhsu_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vmulhsu_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmulhsu_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vmulhsu_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmulhsu_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i8mf8_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vmulhsu_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmulhsu_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i8mf8_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vmulhsu_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmulhsu_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i8mf4_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vmulhsu_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmulhsu_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i8mf4_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vmulhsu_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmulhsu_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i8mf2_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vmulhsu_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmulhsu_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i8mf2_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vmulhsu_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmulhsu_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i8m1_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vmulhsu_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmulhsu_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i8m1_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vmulhsu_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmulhsu_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i8m2_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vmulhsu_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmulhsu_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i8m2_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vmulhsu_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmulhsu_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i8m4_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vmulhsu_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmulhsu_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i8m4_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vmulhsu_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmulhsu_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i8m8_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vmulhsu_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmulhsu_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i8m8_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vmulhsu_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmulhsu_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vmulhsu_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmulhsu_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vmulhsu_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmulhsu_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vmulhsu_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmulhsu_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vmulhsu_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmulhsu_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vmulhsu_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmulhsu_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vmulhsu_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmulhsu_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vmulhsu_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmulhsu_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vmulhsu_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmulhsu_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vmulhsu_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmulhsu_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vmulhsu_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmulhsu_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vmulhsu_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmulhsu_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vmulhsu_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmulhsu_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vmulhsu_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmulhsu_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vmulhsu_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmulhsu_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vmulhsu_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmulhsu_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vmulhsu_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmulhsu_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vmulhsu_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmulhsu_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vmulhsu_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmulhsu_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vmulhsu_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmulhsu_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vmulhsu_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmulhsu_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vmulhsu_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmulhsu_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vmulhsu_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmulhsu_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vmulhsu_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmulhsu_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vmulhsu_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmulhsu_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vmulhsu_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmulhsu_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vmulhsu_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmulhsu_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vmulhsu_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmulhsu_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vmulhsu_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmulhsu_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmulhsu_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmulhsu_vv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vmulhsu_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmulhsu_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmulhsu\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vmulhu.c b/auto-generated/policy_funcs/gnu-api-tests/vmulhu.c index c212e0d31..2df60f1b9 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vmulhu.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vmulhu.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vmulhu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmulhu_vv_u8mf8_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vmulhu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u8mf8_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vmulhu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_vx_u8mf8_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vmulhu_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vmulhu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmulhu_vv_u8mf4_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vmulhu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u8mf4_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vmulhu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_vx_u8mf4_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vmulhu_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vmulhu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmulhu_vv_u8mf2_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vmulhu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u8mf2_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vmulhu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_vx_u8mf2_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vmulhu_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vmulhu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmulhu_vv_u8m1_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vmulhu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vmulhu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_vx_u8m1_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vmulhu_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vmulhu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmulhu_vv_u8m2_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vmulhu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u8m2_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vmulhu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_vx_u8m2_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vmulhu_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vmulhu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmulhu_vv_u8m4_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vmulhu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u8m4_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vmulhu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_vx_u8m4_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vmulhu_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u8m4_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vmulhu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmulhu_vv_u8m8_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vmulhu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u8m8_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vmulhu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_vx_u8m8_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vmulhu_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u8m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vmulhu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmulhu_vv_u16mf4_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vmulhu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vmulhu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_vx_u16mf4_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vmulhu_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vmulhu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmulhu_vv_u16mf2_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vmulhu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vmulhu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_vx_u16mf2_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vmulhu_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vmulhu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmulhu_vv_u16m1_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vmulhu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vmulhu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_vx_u16m1_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vmulhu_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vmulhu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmulhu_vv_u16m2_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vmulhu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vmulhu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_vx_u16m2_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vmulhu_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vmulhu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmulhu_vv_u16m4_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vmulhu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vmulhu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_vx_u16m4_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vmulhu_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vmulhu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmulhu_vv_u16m8_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vmulhu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vmulhu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_vx_u16m8_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vmulhu_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vmulhu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmulhu_vv_u32mf2_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vmulhu_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vmulhu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_vx_u32mf2_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vmulhu_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vmulhu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmulhu_vv_u32m1_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vmulhu_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vmulhu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_vx_u32m1_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vmulhu_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vmulhu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmulhu_vv_u32m2_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vmulhu_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vmulhu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_vx_u32m2_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vmulhu_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vmulhu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmulhu_vv_u32m4_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vmulhu_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vmulhu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_vx_u32m4_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vmulhu_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vmulhu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmulhu_vv_u32m8_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vmulhu_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vmulhu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_vx_u32m8_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vmulhu_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vmulhu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmulhu_vv_u64m1_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vmulhu_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vmulhu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu_vx_u64m1_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vmulhu_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vmulhu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmulhu_vv_u64m2_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vmulhu_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vmulhu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu_vx_u64m2_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vmulhu_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vmulhu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmulhu_vv_u64m4_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vmulhu_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vmulhu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu_vx_u64m4_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vmulhu_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vmulhu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmulhu_vv_u64m8_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vmulhu_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vmulhu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu_vx_u64m8_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vmulhu_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u64m8_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vmulhu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmulhu_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmulhu_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vmulhu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmulhu_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vmulhu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmulhu_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmulhu_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vmulhu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmulhu_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vmulhu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmulhu_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmulhu_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vmulhu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmulhu_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vmulhu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmulhu_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmulhu_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vmulhu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmulhu_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vmulhu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmulhu_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmulhu_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u8m2_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vmulhu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmulhu_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vmulhu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmulhu_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmulhu_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u8m4_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vmulhu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmulhu_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vmulhu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmulhu_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmulhu_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u8m8_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vmulhu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmulhu_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u8m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vmulhu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmulhu_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmulhu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vmulhu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmulhu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vmulhu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmulhu_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmulhu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vmulhu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmulhu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vmulhu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmulhu_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmulhu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vmulhu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmulhu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vmulhu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmulhu_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmulhu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vmulhu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmulhu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vmulhu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmulhu_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmulhu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vmulhu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmulhu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vmulhu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmulhu_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmulhu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vmulhu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmulhu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vmulhu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmulhu_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmulhu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vmulhu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmulhu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vmulhu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmulhu_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmulhu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vmulhu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmulhu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vmulhu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmulhu_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmulhu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vmulhu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmulhu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vmulhu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmulhu_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmulhu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vmulhu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmulhu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vmulhu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmulhu_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmulhu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vmulhu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmulhu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vmulhu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmulhu_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmulhu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vmulhu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmulhu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vmulhu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmulhu_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmulhu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vmulhu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmulhu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vmulhu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmulhu_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmulhu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vmulhu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmulhu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vmulhu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmulhu_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmulhu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vmulhu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmulhu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vmulhu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmulhu_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmulhu_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vmulhu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmulhu_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vmulhu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmulhu_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmulhu_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vmulhu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmulhu_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vmulhu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmulhu_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmulhu_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vmulhu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmulhu_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vmulhu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmulhu_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmulhu_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vmulhu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmulhu_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vmulhu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmulhu_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmulhu_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vmulhu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmulhu_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vmulhu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmulhu_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmulhu_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vmulhu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmulhu_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vmulhu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmulhu_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmulhu_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vmulhu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmulhu_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vmulhu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmulhu_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmulhu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vmulhu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmulhu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vmulhu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmulhu_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmulhu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vmulhu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmulhu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vmulhu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmulhu_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmulhu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vmulhu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmulhu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vmulhu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmulhu_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmulhu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vmulhu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmulhu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vmulhu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmulhu_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmulhu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vmulhu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmulhu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vmulhu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmulhu_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmulhu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vmulhu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmulhu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vmulhu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmulhu_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmulhu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vmulhu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmulhu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vmulhu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmulhu_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmulhu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vmulhu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmulhu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vmulhu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmulhu_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmulhu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vmulhu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmulhu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vmulhu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmulhu_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmulhu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vmulhu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmulhu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vmulhu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmulhu_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmulhu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vmulhu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmulhu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vmulhu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmulhu_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmulhu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vmulhu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmulhu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vmulhu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmulhu_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmulhu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vmulhu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmulhu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vmulhu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmulhu_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmulhu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vmulhu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmulhu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vmulhu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmulhu_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmulhu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vmulhu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmulhu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vmulhu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmulhu_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmulhu_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vmulhu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmulhu_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vmulhu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmulhu_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmulhu_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vmulhu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmulhu_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vmulhu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmulhu_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmulhu_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vmulhu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmulhu_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vmulhu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmulhu_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmulhu_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u8m1_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vmulhu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmulhu_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vmulhu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmulhu_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmulhu_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u8m2_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vmulhu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmulhu_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vmulhu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmulhu_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmulhu_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u8m4_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vmulhu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmulhu_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vmulhu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmulhu_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmulhu_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u8m8_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vmulhu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmulhu_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u8m8_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vmulhu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmulhu_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmulhu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vmulhu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmulhu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vmulhu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmulhu_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmulhu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vmulhu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmulhu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vmulhu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmulhu_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmulhu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vmulhu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmulhu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vmulhu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmulhu_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmulhu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vmulhu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmulhu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vmulhu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmulhu_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmulhu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vmulhu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmulhu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vmulhu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmulhu_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmulhu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vmulhu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmulhu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vmulhu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmulhu_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmulhu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vmulhu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmulhu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vmulhu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmulhu_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmulhu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vmulhu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmulhu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vmulhu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmulhu_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmulhu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vmulhu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmulhu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vmulhu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmulhu_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmulhu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vmulhu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmulhu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vmulhu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmulhu_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmulhu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vmulhu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmulhu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vmulhu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmulhu_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmulhu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vmulhu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmulhu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vmulhu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmulhu_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmulhu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vmulhu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmulhu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vmulhu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmulhu_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmulhu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vmulhu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmulhu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vmulhu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmulhu_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmulhu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmulhu_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vmulhu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmulhu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmulhu\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vmv.c b/auto-generated/policy_funcs/gnu-api-tests/vmv.c index 39a2ca340..5b8ea38f5 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vmv.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vmv.c @@ -6,592 +6,592 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vmv_v_v_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t src, size_t vl) { - return __riscv_vmv_v_v_i8mf8_tu(maskedoff, src, vl); +vint8mf8_t test_vmv_v_v_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs1, size_t vl) { + return __riscv_vmv_v_v_i8mf8_tu(vd, vs1, vl); } -vint8mf8_t test_vmv_v_x_i8mf8_tu(vint8mf8_t maskedoff, int8_t src, size_t vl) { - return __riscv_vmv_v_x_i8mf8_tu(maskedoff, src, vl); +vint8mf8_t test_vmv_v_x_i8mf8_tu(vint8mf8_t vd, int8_t rs1, size_t vl) { + return __riscv_vmv_v_x_i8mf8_tu(vd, rs1, vl); } -vint8mf4_t test_vmv_v_v_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t src, size_t vl) { - return __riscv_vmv_v_v_i8mf4_tu(maskedoff, src, vl); +vint8mf4_t test_vmv_v_v_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs1, size_t vl) { + return __riscv_vmv_v_v_i8mf4_tu(vd, vs1, vl); } -vint8mf4_t test_vmv_v_x_i8mf4_tu(vint8mf4_t maskedoff, int8_t src, size_t vl) { - return __riscv_vmv_v_x_i8mf4_tu(maskedoff, src, vl); +vint8mf4_t test_vmv_v_x_i8mf4_tu(vint8mf4_t vd, int8_t rs1, size_t vl) { + return __riscv_vmv_v_x_i8mf4_tu(vd, rs1, vl); } -vint8mf2_t test_vmv_v_v_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t src, size_t vl) { - return __riscv_vmv_v_v_i8mf2_tu(maskedoff, src, vl); +vint8mf2_t test_vmv_v_v_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs1, size_t vl) { + return __riscv_vmv_v_v_i8mf2_tu(vd, vs1, vl); } -vint8mf2_t test_vmv_v_x_i8mf2_tu(vint8mf2_t maskedoff, int8_t src, size_t vl) { - return __riscv_vmv_v_x_i8mf2_tu(maskedoff, src, vl); +vint8mf2_t test_vmv_v_x_i8mf2_tu(vint8mf2_t vd, int8_t rs1, size_t vl) { + return __riscv_vmv_v_x_i8mf2_tu(vd, rs1, vl); } -vint8m1_t test_vmv_v_v_i8m1_tu(vint8m1_t maskedoff, vint8m1_t src, size_t vl) { - return __riscv_vmv_v_v_i8m1_tu(maskedoff, src, vl); +vint8m1_t test_vmv_v_v_i8m1_tu(vint8m1_t vd, vint8m1_t vs1, size_t vl) { + return __riscv_vmv_v_v_i8m1_tu(vd, vs1, vl); } -vint8m1_t test_vmv_v_x_i8m1_tu(vint8m1_t maskedoff, int8_t src, size_t vl) { - return __riscv_vmv_v_x_i8m1_tu(maskedoff, src, vl); +vint8m1_t test_vmv_v_x_i8m1_tu(vint8m1_t vd, int8_t rs1, size_t vl) { + return __riscv_vmv_v_x_i8m1_tu(vd, rs1, vl); } -vint8m2_t test_vmv_v_v_i8m2_tu(vint8m2_t maskedoff, vint8m2_t src, size_t vl) { - return __riscv_vmv_v_v_i8m2_tu(maskedoff, src, vl); +vint8m2_t test_vmv_v_v_i8m2_tu(vint8m2_t vd, vint8m2_t vs1, size_t vl) { + return __riscv_vmv_v_v_i8m2_tu(vd, vs1, vl); } -vint8m2_t test_vmv_v_x_i8m2_tu(vint8m2_t maskedoff, int8_t src, size_t vl) { - return __riscv_vmv_v_x_i8m2_tu(maskedoff, src, vl); +vint8m2_t test_vmv_v_x_i8m2_tu(vint8m2_t vd, int8_t rs1, size_t vl) { + return __riscv_vmv_v_x_i8m2_tu(vd, rs1, vl); } -vint8m4_t test_vmv_v_v_i8m4_tu(vint8m4_t maskedoff, vint8m4_t src, size_t vl) { - return __riscv_vmv_v_v_i8m4_tu(maskedoff, src, vl); +vint8m4_t test_vmv_v_v_i8m4_tu(vint8m4_t vd, vint8m4_t vs1, size_t vl) { + return __riscv_vmv_v_v_i8m4_tu(vd, vs1, vl); } -vint8m4_t test_vmv_v_x_i8m4_tu(vint8m4_t maskedoff, int8_t src, size_t vl) { - return __riscv_vmv_v_x_i8m4_tu(maskedoff, src, vl); +vint8m4_t test_vmv_v_x_i8m4_tu(vint8m4_t vd, int8_t rs1, size_t vl) { + return __riscv_vmv_v_x_i8m4_tu(vd, rs1, vl); } -vint8m8_t test_vmv_v_v_i8m8_tu(vint8m8_t maskedoff, vint8m8_t src, size_t vl) { - return __riscv_vmv_v_v_i8m8_tu(maskedoff, src, vl); +vint8m8_t test_vmv_v_v_i8m8_tu(vint8m8_t vd, vint8m8_t vs1, size_t vl) { + return __riscv_vmv_v_v_i8m8_tu(vd, vs1, vl); } -vint8m8_t test_vmv_v_x_i8m8_tu(vint8m8_t maskedoff, int8_t src, size_t vl) { - return __riscv_vmv_v_x_i8m8_tu(maskedoff, src, vl); +vint8m8_t test_vmv_v_x_i8m8_tu(vint8m8_t vd, int8_t rs1, size_t vl) { + return __riscv_vmv_v_x_i8m8_tu(vd, rs1, vl); } -vint16mf4_t test_vmv_v_v_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t src, size_t vl) { - return __riscv_vmv_v_v_i16mf4_tu(maskedoff, src, vl); +vint16mf4_t test_vmv_v_v_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs1, size_t vl) { + return __riscv_vmv_v_v_i16mf4_tu(vd, vs1, vl); } -vint16mf4_t test_vmv_v_x_i16mf4_tu(vint16mf4_t maskedoff, int16_t src, size_t vl) { - return __riscv_vmv_v_x_i16mf4_tu(maskedoff, src, vl); +vint16mf4_t test_vmv_v_x_i16mf4_tu(vint16mf4_t vd, int16_t rs1, size_t vl) { + return __riscv_vmv_v_x_i16mf4_tu(vd, rs1, vl); } -vint16mf2_t test_vmv_v_v_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t src, size_t vl) { - return __riscv_vmv_v_v_i16mf2_tu(maskedoff, src, vl); +vint16mf2_t test_vmv_v_v_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs1, size_t vl) { + return __riscv_vmv_v_v_i16mf2_tu(vd, vs1, vl); } -vint16mf2_t test_vmv_v_x_i16mf2_tu(vint16mf2_t maskedoff, int16_t src, size_t vl) { - return __riscv_vmv_v_x_i16mf2_tu(maskedoff, src, vl); +vint16mf2_t test_vmv_v_x_i16mf2_tu(vint16mf2_t vd, int16_t rs1, size_t vl) { + return __riscv_vmv_v_x_i16mf2_tu(vd, rs1, vl); } -vint16m1_t test_vmv_v_v_i16m1_tu(vint16m1_t maskedoff, vint16m1_t src, size_t vl) { - return __riscv_vmv_v_v_i16m1_tu(maskedoff, src, vl); +vint16m1_t test_vmv_v_v_i16m1_tu(vint16m1_t vd, vint16m1_t vs1, size_t vl) { + return __riscv_vmv_v_v_i16m1_tu(vd, vs1, vl); } -vint16m1_t test_vmv_v_x_i16m1_tu(vint16m1_t maskedoff, int16_t src, size_t vl) { - return __riscv_vmv_v_x_i16m1_tu(maskedoff, src, vl); +vint16m1_t test_vmv_v_x_i16m1_tu(vint16m1_t vd, int16_t rs1, size_t vl) { + return __riscv_vmv_v_x_i16m1_tu(vd, rs1, vl); } -vint16m2_t test_vmv_v_v_i16m2_tu(vint16m2_t maskedoff, vint16m2_t src, size_t vl) { - return __riscv_vmv_v_v_i16m2_tu(maskedoff, src, vl); +vint16m2_t test_vmv_v_v_i16m2_tu(vint16m2_t vd, vint16m2_t vs1, size_t vl) { + return __riscv_vmv_v_v_i16m2_tu(vd, vs1, vl); } -vint16m2_t test_vmv_v_x_i16m2_tu(vint16m2_t maskedoff, int16_t src, size_t vl) { - return __riscv_vmv_v_x_i16m2_tu(maskedoff, src, vl); +vint16m2_t test_vmv_v_x_i16m2_tu(vint16m2_t vd, int16_t rs1, size_t vl) { + return __riscv_vmv_v_x_i16m2_tu(vd, rs1, vl); } -vint16m4_t test_vmv_v_v_i16m4_tu(vint16m4_t maskedoff, vint16m4_t src, size_t vl) { - return __riscv_vmv_v_v_i16m4_tu(maskedoff, src, vl); +vint16m4_t test_vmv_v_v_i16m4_tu(vint16m4_t vd, vint16m4_t vs1, size_t vl) { + return __riscv_vmv_v_v_i16m4_tu(vd, vs1, vl); } -vint16m4_t test_vmv_v_x_i16m4_tu(vint16m4_t maskedoff, int16_t src, size_t vl) { - return __riscv_vmv_v_x_i16m4_tu(maskedoff, src, vl); +vint16m4_t test_vmv_v_x_i16m4_tu(vint16m4_t vd, int16_t rs1, size_t vl) { + return __riscv_vmv_v_x_i16m4_tu(vd, rs1, vl); } -vint16m8_t test_vmv_v_v_i16m8_tu(vint16m8_t maskedoff, vint16m8_t src, size_t vl) { - return __riscv_vmv_v_v_i16m8_tu(maskedoff, src, vl); +vint16m8_t test_vmv_v_v_i16m8_tu(vint16m8_t vd, vint16m8_t vs1, size_t vl) { + return __riscv_vmv_v_v_i16m8_tu(vd, vs1, vl); } -vint16m8_t test_vmv_v_x_i16m8_tu(vint16m8_t maskedoff, int16_t src, size_t vl) { - return __riscv_vmv_v_x_i16m8_tu(maskedoff, src, vl); +vint16m8_t test_vmv_v_x_i16m8_tu(vint16m8_t vd, int16_t rs1, size_t vl) { + return __riscv_vmv_v_x_i16m8_tu(vd, rs1, vl); } -vint32mf2_t test_vmv_v_v_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vmv_v_v_i32mf2_tu(maskedoff, src, vl); +vint32mf2_t test_vmv_v_v_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs1, size_t vl) { + return __riscv_vmv_v_v_i32mf2_tu(vd, vs1, vl); } -vint32mf2_t test_vmv_v_x_i32mf2_tu(vint32mf2_t maskedoff, int32_t src, size_t vl) { - return __riscv_vmv_v_x_i32mf2_tu(maskedoff, src, vl); +vint32mf2_t test_vmv_v_x_i32mf2_tu(vint32mf2_t vd, int32_t rs1, size_t vl) { + return __riscv_vmv_v_x_i32mf2_tu(vd, rs1, vl); } -vint32m1_t test_vmv_v_v_i32m1_tu(vint32m1_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vmv_v_v_i32m1_tu(maskedoff, src, vl); +vint32m1_t test_vmv_v_v_i32m1_tu(vint32m1_t vd, vint32m1_t vs1, size_t vl) { + return __riscv_vmv_v_v_i32m1_tu(vd, vs1, vl); } -vint32m1_t test_vmv_v_x_i32m1_tu(vint32m1_t maskedoff, int32_t src, size_t vl) { - return __riscv_vmv_v_x_i32m1_tu(maskedoff, src, vl); +vint32m1_t test_vmv_v_x_i32m1_tu(vint32m1_t vd, int32_t rs1, size_t vl) { + return __riscv_vmv_v_x_i32m1_tu(vd, rs1, vl); } -vint32m2_t test_vmv_v_v_i32m2_tu(vint32m2_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vmv_v_v_i32m2_tu(maskedoff, src, vl); +vint32m2_t test_vmv_v_v_i32m2_tu(vint32m2_t vd, vint32m2_t vs1, size_t vl) { + return __riscv_vmv_v_v_i32m2_tu(vd, vs1, vl); } -vint32m2_t test_vmv_v_x_i32m2_tu(vint32m2_t maskedoff, int32_t src, size_t vl) { - return __riscv_vmv_v_x_i32m2_tu(maskedoff, src, vl); +vint32m2_t test_vmv_v_x_i32m2_tu(vint32m2_t vd, int32_t rs1, size_t vl) { + return __riscv_vmv_v_x_i32m2_tu(vd, rs1, vl); } -vint32m4_t test_vmv_v_v_i32m4_tu(vint32m4_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vmv_v_v_i32m4_tu(maskedoff, src, vl); +vint32m4_t test_vmv_v_v_i32m4_tu(vint32m4_t vd, vint32m4_t vs1, size_t vl) { + return __riscv_vmv_v_v_i32m4_tu(vd, vs1, vl); } -vint32m4_t test_vmv_v_x_i32m4_tu(vint32m4_t maskedoff, int32_t src, size_t vl) { - return __riscv_vmv_v_x_i32m4_tu(maskedoff, src, vl); +vint32m4_t test_vmv_v_x_i32m4_tu(vint32m4_t vd, int32_t rs1, size_t vl) { + return __riscv_vmv_v_x_i32m4_tu(vd, rs1, vl); } -vint32m8_t test_vmv_v_v_i32m8_tu(vint32m8_t maskedoff, vint32m8_t src, size_t vl) { - return __riscv_vmv_v_v_i32m8_tu(maskedoff, src, vl); +vint32m8_t test_vmv_v_v_i32m8_tu(vint32m8_t vd, vint32m8_t vs1, size_t vl) { + return __riscv_vmv_v_v_i32m8_tu(vd, vs1, vl); } -vint32m8_t test_vmv_v_x_i32m8_tu(vint32m8_t maskedoff, int32_t src, size_t vl) { - return __riscv_vmv_v_x_i32m8_tu(maskedoff, src, vl); +vint32m8_t test_vmv_v_x_i32m8_tu(vint32m8_t vd, int32_t rs1, size_t vl) { + return __riscv_vmv_v_x_i32m8_tu(vd, rs1, vl); } -vint64m1_t test_vmv_v_v_i64m1_tu(vint64m1_t maskedoff, vint64m1_t src, size_t vl) { - return __riscv_vmv_v_v_i64m1_tu(maskedoff, src, vl); +vint64m1_t test_vmv_v_v_i64m1_tu(vint64m1_t vd, vint64m1_t vs1, size_t vl) { + return __riscv_vmv_v_v_i64m1_tu(vd, vs1, vl); } -vint64m1_t test_vmv_v_x_i64m1_tu(vint64m1_t maskedoff, int64_t src, size_t vl) { - return __riscv_vmv_v_x_i64m1_tu(maskedoff, src, vl); +vint64m1_t test_vmv_v_x_i64m1_tu(vint64m1_t vd, int64_t rs1, size_t vl) { + return __riscv_vmv_v_x_i64m1_tu(vd, rs1, vl); } -vint64m2_t test_vmv_v_v_i64m2_tu(vint64m2_t maskedoff, vint64m2_t src, size_t vl) { - return __riscv_vmv_v_v_i64m2_tu(maskedoff, src, vl); +vint64m2_t test_vmv_v_v_i64m2_tu(vint64m2_t vd, vint64m2_t vs1, size_t vl) { + return __riscv_vmv_v_v_i64m2_tu(vd, vs1, vl); } -vint64m2_t test_vmv_v_x_i64m2_tu(vint64m2_t maskedoff, int64_t src, size_t vl) { - return __riscv_vmv_v_x_i64m2_tu(maskedoff, src, vl); +vint64m2_t test_vmv_v_x_i64m2_tu(vint64m2_t vd, int64_t rs1, size_t vl) { + return __riscv_vmv_v_x_i64m2_tu(vd, rs1, vl); } -vint64m4_t test_vmv_v_v_i64m4_tu(vint64m4_t maskedoff, vint64m4_t src, size_t vl) { - return __riscv_vmv_v_v_i64m4_tu(maskedoff, src, vl); +vint64m4_t test_vmv_v_v_i64m4_tu(vint64m4_t vd, vint64m4_t vs1, size_t vl) { + return __riscv_vmv_v_v_i64m4_tu(vd, vs1, vl); } -vint64m4_t test_vmv_v_x_i64m4_tu(vint64m4_t maskedoff, int64_t src, size_t vl) { - return __riscv_vmv_v_x_i64m4_tu(maskedoff, src, vl); +vint64m4_t test_vmv_v_x_i64m4_tu(vint64m4_t vd, int64_t rs1, size_t vl) { + return __riscv_vmv_v_x_i64m4_tu(vd, rs1, vl); } -vint64m8_t test_vmv_v_v_i64m8_tu(vint64m8_t maskedoff, vint64m8_t src, size_t vl) { - return __riscv_vmv_v_v_i64m8_tu(maskedoff, src, vl); +vint64m8_t test_vmv_v_v_i64m8_tu(vint64m8_t vd, vint64m8_t vs1, size_t vl) { + return __riscv_vmv_v_v_i64m8_tu(vd, vs1, vl); } -vint64m8_t test_vmv_v_x_i64m8_tu(vint64m8_t maskedoff, int64_t src, size_t vl) { - return __riscv_vmv_v_x_i64m8_tu(maskedoff, src, vl); +vint64m8_t test_vmv_v_x_i64m8_tu(vint64m8_t vd, int64_t rs1, size_t vl) { + return __riscv_vmv_v_x_i64m8_tu(vd, rs1, vl); } -vuint8mf8_t test_vmv_v_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t src, size_t vl) { - return __riscv_vmv_v_v_u8mf8_tu(maskedoff, src, vl); +vuint8mf8_t test_vmv_v_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmv_v_v_u8mf8_tu(vd, vs1, vl); } -vuint8mf8_t test_vmv_v_x_u8mf8_tu(vuint8mf8_t maskedoff, uint8_t src, size_t vl) { - return __riscv_vmv_v_x_u8mf8_tu(maskedoff, src, vl); +vuint8mf8_t test_vmv_v_x_u8mf8_tu(vuint8mf8_t vd, uint8_t rs1, size_t vl) { + return __riscv_vmv_v_x_u8mf8_tu(vd, rs1, vl); } -vuint8mf4_t test_vmv_v_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t src, size_t vl) { - return __riscv_vmv_v_v_u8mf4_tu(maskedoff, src, vl); +vuint8mf4_t test_vmv_v_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmv_v_v_u8mf4_tu(vd, vs1, vl); } -vuint8mf4_t test_vmv_v_x_u8mf4_tu(vuint8mf4_t maskedoff, uint8_t src, size_t vl) { - return __riscv_vmv_v_x_u8mf4_tu(maskedoff, src, vl); +vuint8mf4_t test_vmv_v_x_u8mf4_tu(vuint8mf4_t vd, uint8_t rs1, size_t vl) { + return __riscv_vmv_v_x_u8mf4_tu(vd, rs1, vl); } -vuint8mf2_t test_vmv_v_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t src, size_t vl) { - return __riscv_vmv_v_v_u8mf2_tu(maskedoff, src, vl); +vuint8mf2_t test_vmv_v_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmv_v_v_u8mf2_tu(vd, vs1, vl); } -vuint8mf2_t test_vmv_v_x_u8mf2_tu(vuint8mf2_t maskedoff, uint8_t src, size_t vl) { - return __riscv_vmv_v_x_u8mf2_tu(maskedoff, src, vl); +vuint8mf2_t test_vmv_v_x_u8mf2_tu(vuint8mf2_t vd, uint8_t rs1, size_t vl) { + return __riscv_vmv_v_x_u8mf2_tu(vd, rs1, vl); } -vuint8m1_t test_vmv_v_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t src, size_t vl) { - return __riscv_vmv_v_v_u8m1_tu(maskedoff, src, vl); +vuint8m1_t test_vmv_v_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs1, size_t vl) { + return __riscv_vmv_v_v_u8m1_tu(vd, vs1, vl); } -vuint8m1_t test_vmv_v_x_u8m1_tu(vuint8m1_t maskedoff, uint8_t src, size_t vl) { - return __riscv_vmv_v_x_u8m1_tu(maskedoff, src, vl); +vuint8m1_t test_vmv_v_x_u8m1_tu(vuint8m1_t vd, uint8_t rs1, size_t vl) { + return __riscv_vmv_v_x_u8m1_tu(vd, rs1, vl); } -vuint8m2_t test_vmv_v_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t src, size_t vl) { - return __riscv_vmv_v_v_u8m2_tu(maskedoff, src, vl); +vuint8m2_t test_vmv_v_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs1, size_t vl) { + return __riscv_vmv_v_v_u8m2_tu(vd, vs1, vl); } -vuint8m2_t test_vmv_v_x_u8m2_tu(vuint8m2_t maskedoff, uint8_t src, size_t vl) { - return __riscv_vmv_v_x_u8m2_tu(maskedoff, src, vl); +vuint8m2_t test_vmv_v_x_u8m2_tu(vuint8m2_t vd, uint8_t rs1, size_t vl) { + return __riscv_vmv_v_x_u8m2_tu(vd, rs1, vl); } -vuint8m4_t test_vmv_v_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t src, size_t vl) { - return __riscv_vmv_v_v_u8m4_tu(maskedoff, src, vl); +vuint8m4_t test_vmv_v_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs1, size_t vl) { + return __riscv_vmv_v_v_u8m4_tu(vd, vs1, vl); } -vuint8m4_t test_vmv_v_x_u8m4_tu(vuint8m4_t maskedoff, uint8_t src, size_t vl) { - return __riscv_vmv_v_x_u8m4_tu(maskedoff, src, vl); +vuint8m4_t test_vmv_v_x_u8m4_tu(vuint8m4_t vd, uint8_t rs1, size_t vl) { + return __riscv_vmv_v_x_u8m4_tu(vd, rs1, vl); } -vuint8m8_t test_vmv_v_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t src, size_t vl) { - return __riscv_vmv_v_v_u8m8_tu(maskedoff, src, vl); +vuint8m8_t test_vmv_v_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs1, size_t vl) { + return __riscv_vmv_v_v_u8m8_tu(vd, vs1, vl); } -vuint8m8_t test_vmv_v_x_u8m8_tu(vuint8m8_t maskedoff, uint8_t src, size_t vl) { - return __riscv_vmv_v_x_u8m8_tu(maskedoff, src, vl); +vuint8m8_t test_vmv_v_x_u8m8_tu(vuint8m8_t vd, uint8_t rs1, size_t vl) { + return __riscv_vmv_v_x_u8m8_tu(vd, rs1, vl); } -vuint16mf4_t test_vmv_v_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t src, size_t vl) { - return __riscv_vmv_v_v_u16mf4_tu(maskedoff, src, vl); +vuint16mf4_t test_vmv_v_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmv_v_v_u16mf4_tu(vd, vs1, vl); } -vuint16mf4_t test_vmv_v_x_u16mf4_tu(vuint16mf4_t maskedoff, uint16_t src, size_t vl) { - return __riscv_vmv_v_x_u16mf4_tu(maskedoff, src, vl); +vuint16mf4_t test_vmv_v_x_u16mf4_tu(vuint16mf4_t vd, uint16_t rs1, size_t vl) { + return __riscv_vmv_v_x_u16mf4_tu(vd, rs1, vl); } -vuint16mf2_t test_vmv_v_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t src, size_t vl) { - return __riscv_vmv_v_v_u16mf2_tu(maskedoff, src, vl); +vuint16mf2_t test_vmv_v_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmv_v_v_u16mf2_tu(vd, vs1, vl); } -vuint16mf2_t test_vmv_v_x_u16mf2_tu(vuint16mf2_t maskedoff, uint16_t src, size_t vl) { - return __riscv_vmv_v_x_u16mf2_tu(maskedoff, src, vl); +vuint16mf2_t test_vmv_v_x_u16mf2_tu(vuint16mf2_t vd, uint16_t rs1, size_t vl) { + return __riscv_vmv_v_x_u16mf2_tu(vd, rs1, vl); } -vuint16m1_t test_vmv_v_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t src, size_t vl) { - return __riscv_vmv_v_v_u16m1_tu(maskedoff, src, vl); +vuint16m1_t test_vmv_v_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs1, size_t vl) { + return __riscv_vmv_v_v_u16m1_tu(vd, vs1, vl); } -vuint16m1_t test_vmv_v_x_u16m1_tu(vuint16m1_t maskedoff, uint16_t src, size_t vl) { - return __riscv_vmv_v_x_u16m1_tu(maskedoff, src, vl); +vuint16m1_t test_vmv_v_x_u16m1_tu(vuint16m1_t vd, uint16_t rs1, size_t vl) { + return __riscv_vmv_v_x_u16m1_tu(vd, rs1, vl); } -vuint16m2_t test_vmv_v_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t src, size_t vl) { - return __riscv_vmv_v_v_u16m2_tu(maskedoff, src, vl); +vuint16m2_t test_vmv_v_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs1, size_t vl) { + return __riscv_vmv_v_v_u16m2_tu(vd, vs1, vl); } -vuint16m2_t test_vmv_v_x_u16m2_tu(vuint16m2_t maskedoff, uint16_t src, size_t vl) { - return __riscv_vmv_v_x_u16m2_tu(maskedoff, src, vl); +vuint16m2_t test_vmv_v_x_u16m2_tu(vuint16m2_t vd, uint16_t rs1, size_t vl) { + return __riscv_vmv_v_x_u16m2_tu(vd, rs1, vl); } -vuint16m4_t test_vmv_v_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t src, size_t vl) { - return __riscv_vmv_v_v_u16m4_tu(maskedoff, src, vl); +vuint16m4_t test_vmv_v_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs1, size_t vl) { + return __riscv_vmv_v_v_u16m4_tu(vd, vs1, vl); } -vuint16m4_t test_vmv_v_x_u16m4_tu(vuint16m4_t maskedoff, uint16_t src, size_t vl) { - return __riscv_vmv_v_x_u16m4_tu(maskedoff, src, vl); +vuint16m4_t test_vmv_v_x_u16m4_tu(vuint16m4_t vd, uint16_t rs1, size_t vl) { + return __riscv_vmv_v_x_u16m4_tu(vd, rs1, vl); } -vuint16m8_t test_vmv_v_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t src, size_t vl) { - return __riscv_vmv_v_v_u16m8_tu(maskedoff, src, vl); +vuint16m8_t test_vmv_v_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs1, size_t vl) { + return __riscv_vmv_v_v_u16m8_tu(vd, vs1, vl); } -vuint16m8_t test_vmv_v_x_u16m8_tu(vuint16m8_t maskedoff, uint16_t src, size_t vl) { - return __riscv_vmv_v_x_u16m8_tu(maskedoff, src, vl); +vuint16m8_t test_vmv_v_x_u16m8_tu(vuint16m8_t vd, uint16_t rs1, size_t vl) { + return __riscv_vmv_v_x_u16m8_tu(vd, rs1, vl); } -vuint32mf2_t test_vmv_v_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vmv_v_v_u32mf2_tu(maskedoff, src, vl); +vuint32mf2_t test_vmv_v_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmv_v_v_u32mf2_tu(vd, vs1, vl); } -vuint32mf2_t test_vmv_v_x_u32mf2_tu(vuint32mf2_t maskedoff, uint32_t src, size_t vl) { - return __riscv_vmv_v_x_u32mf2_tu(maskedoff, src, vl); +vuint32mf2_t test_vmv_v_x_u32mf2_tu(vuint32mf2_t vd, uint32_t rs1, size_t vl) { + return __riscv_vmv_v_x_u32mf2_tu(vd, rs1, vl); } -vuint32m1_t test_vmv_v_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vmv_v_v_u32m1_tu(maskedoff, src, vl); +vuint32m1_t test_vmv_v_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs1, size_t vl) { + return __riscv_vmv_v_v_u32m1_tu(vd, vs1, vl); } -vuint32m1_t test_vmv_v_x_u32m1_tu(vuint32m1_t maskedoff, uint32_t src, size_t vl) { - return __riscv_vmv_v_x_u32m1_tu(maskedoff, src, vl); +vuint32m1_t test_vmv_v_x_u32m1_tu(vuint32m1_t vd, uint32_t rs1, size_t vl) { + return __riscv_vmv_v_x_u32m1_tu(vd, rs1, vl); } -vuint32m2_t test_vmv_v_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vmv_v_v_u32m2_tu(maskedoff, src, vl); +vuint32m2_t test_vmv_v_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs1, size_t vl) { + return __riscv_vmv_v_v_u32m2_tu(vd, vs1, vl); } -vuint32m2_t test_vmv_v_x_u32m2_tu(vuint32m2_t maskedoff, uint32_t src, size_t vl) { - return __riscv_vmv_v_x_u32m2_tu(maskedoff, src, vl); +vuint32m2_t test_vmv_v_x_u32m2_tu(vuint32m2_t vd, uint32_t rs1, size_t vl) { + return __riscv_vmv_v_x_u32m2_tu(vd, rs1, vl); } -vuint32m4_t test_vmv_v_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vmv_v_v_u32m4_tu(maskedoff, src, vl); +vuint32m4_t test_vmv_v_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs1, size_t vl) { + return __riscv_vmv_v_v_u32m4_tu(vd, vs1, vl); } -vuint32m4_t test_vmv_v_x_u32m4_tu(vuint32m4_t maskedoff, uint32_t src, size_t vl) { - return __riscv_vmv_v_x_u32m4_tu(maskedoff, src, vl); +vuint32m4_t test_vmv_v_x_u32m4_tu(vuint32m4_t vd, uint32_t rs1, size_t vl) { + return __riscv_vmv_v_x_u32m4_tu(vd, rs1, vl); } -vuint32m8_t test_vmv_v_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t src, size_t vl) { - return __riscv_vmv_v_v_u32m8_tu(maskedoff, src, vl); +vuint32m8_t test_vmv_v_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs1, size_t vl) { + return __riscv_vmv_v_v_u32m8_tu(vd, vs1, vl); } -vuint32m8_t test_vmv_v_x_u32m8_tu(vuint32m8_t maskedoff, uint32_t src, size_t vl) { - return __riscv_vmv_v_x_u32m8_tu(maskedoff, src, vl); +vuint32m8_t test_vmv_v_x_u32m8_tu(vuint32m8_t vd, uint32_t rs1, size_t vl) { + return __riscv_vmv_v_x_u32m8_tu(vd, rs1, vl); } -vuint64m1_t test_vmv_v_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t src, size_t vl) { - return __riscv_vmv_v_v_u64m1_tu(maskedoff, src, vl); +vuint64m1_t test_vmv_v_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs1, size_t vl) { + return __riscv_vmv_v_v_u64m1_tu(vd, vs1, vl); } -vuint64m1_t test_vmv_v_x_u64m1_tu(vuint64m1_t maskedoff, uint64_t src, size_t vl) { - return __riscv_vmv_v_x_u64m1_tu(maskedoff, src, vl); +vuint64m1_t test_vmv_v_x_u64m1_tu(vuint64m1_t vd, uint64_t rs1, size_t vl) { + return __riscv_vmv_v_x_u64m1_tu(vd, rs1, vl); } -vuint64m2_t test_vmv_v_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t src, size_t vl) { - return __riscv_vmv_v_v_u64m2_tu(maskedoff, src, vl); +vuint64m2_t test_vmv_v_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs1, size_t vl) { + return __riscv_vmv_v_v_u64m2_tu(vd, vs1, vl); } -vuint64m2_t test_vmv_v_x_u64m2_tu(vuint64m2_t maskedoff, uint64_t src, size_t vl) { - return __riscv_vmv_v_x_u64m2_tu(maskedoff, src, vl); +vuint64m2_t test_vmv_v_x_u64m2_tu(vuint64m2_t vd, uint64_t rs1, size_t vl) { + return __riscv_vmv_v_x_u64m2_tu(vd, rs1, vl); } -vuint64m4_t test_vmv_v_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t src, size_t vl) { - return __riscv_vmv_v_v_u64m4_tu(maskedoff, src, vl); +vuint64m4_t test_vmv_v_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs1, size_t vl) { + return __riscv_vmv_v_v_u64m4_tu(vd, vs1, vl); } -vuint64m4_t test_vmv_v_x_u64m4_tu(vuint64m4_t maskedoff, uint64_t src, size_t vl) { - return __riscv_vmv_v_x_u64m4_tu(maskedoff, src, vl); +vuint64m4_t test_vmv_v_x_u64m4_tu(vuint64m4_t vd, uint64_t rs1, size_t vl) { + return __riscv_vmv_v_x_u64m4_tu(vd, rs1, vl); } -vuint64m8_t test_vmv_v_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t src, size_t vl) { - return __riscv_vmv_v_v_u64m8_tu(maskedoff, src, vl); +vuint64m8_t test_vmv_v_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs1, size_t vl) { + return __riscv_vmv_v_v_u64m8_tu(vd, vs1, vl); } -vuint64m8_t test_vmv_v_x_u64m8_tu(vuint64m8_t maskedoff, uint64_t src, size_t vl) { - return __riscv_vmv_v_x_u64m8_tu(maskedoff, src, vl); +vuint64m8_t test_vmv_v_x_u64m8_tu(vuint64m8_t vd, uint64_t rs1, size_t vl) { + return __riscv_vmv_v_x_u64m8_tu(vd, rs1, vl); } -vfloat16mf4_t test_vmv_v_v_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vmv_v_v_f16mf4_tu(maskedoff, src, vl); +vfloat16mf4_t test_vmv_v_v_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vmv_v_v_f16mf4_tu(vd, vs1, vl); } -vfloat16mf2_t test_vmv_v_v_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vmv_v_v_f16mf2_tu(maskedoff, src, vl); +vfloat16mf2_t test_vmv_v_v_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vmv_v_v_f16mf2_tu(vd, vs1, vl); } -vfloat16m1_t test_vmv_v_v_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vmv_v_v_f16m1_tu(maskedoff, src, vl); +vfloat16m1_t test_vmv_v_v_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, size_t vl) { + return __riscv_vmv_v_v_f16m1_tu(vd, vs1, vl); } -vfloat16m2_t test_vmv_v_v_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vmv_v_v_f16m2_tu(maskedoff, src, vl); +vfloat16m2_t test_vmv_v_v_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, size_t vl) { + return __riscv_vmv_v_v_f16m2_tu(vd, vs1, vl); } -vfloat16m4_t test_vmv_v_v_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vmv_v_v_f16m4_tu(maskedoff, src, vl); +vfloat16m4_t test_vmv_v_v_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, size_t vl) { + return __riscv_vmv_v_v_f16m4_tu(vd, vs1, vl); } -vfloat16m8_t test_vmv_v_v_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vmv_v_v_f16m8_tu(maskedoff, src, vl); +vfloat16m8_t test_vmv_v_v_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, size_t vl) { + return __riscv_vmv_v_v_f16m8_tu(vd, vs1, vl); } -vfloat32mf2_t test_vmv_v_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vmv_v_v_f32mf2_tu(maskedoff, src, vl); +vfloat32mf2_t test_vmv_v_v_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vmv_v_v_f32mf2_tu(vd, vs1, vl); } -vfloat32m1_t test_vmv_v_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vmv_v_v_f32m1_tu(maskedoff, src, vl); +vfloat32m1_t test_vmv_v_v_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, size_t vl) { + return __riscv_vmv_v_v_f32m1_tu(vd, vs1, vl); } -vfloat32m2_t test_vmv_v_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vmv_v_v_f32m2_tu(maskedoff, src, vl); +vfloat32m2_t test_vmv_v_v_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, size_t vl) { + return __riscv_vmv_v_v_f32m2_tu(vd, vs1, vl); } -vfloat32m4_t test_vmv_v_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vmv_v_v_f32m4_tu(maskedoff, src, vl); +vfloat32m4_t test_vmv_v_v_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, size_t vl) { + return __riscv_vmv_v_v_f32m4_tu(vd, vs1, vl); } -vfloat32m8_t test_vmv_v_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vmv_v_v_f32m8_tu(maskedoff, src, vl); +vfloat32m8_t test_vmv_v_v_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, size_t vl) { + return __riscv_vmv_v_v_f32m8_tu(vd, vs1, vl); } -vfloat64m1_t test_vmv_v_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vmv_v_v_f64m1_tu(maskedoff, src, vl); +vfloat64m1_t test_vmv_v_v_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, size_t vl) { + return __riscv_vmv_v_v_f64m1_tu(vd, vs1, vl); } -vfloat64m2_t test_vmv_v_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vmv_v_v_f64m2_tu(maskedoff, src, vl); +vfloat64m2_t test_vmv_v_v_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, size_t vl) { + return __riscv_vmv_v_v_f64m2_tu(vd, vs1, vl); } -vfloat64m4_t test_vmv_v_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vmv_v_v_f64m4_tu(maskedoff, src, vl); +vfloat64m4_t test_vmv_v_v_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, size_t vl) { + return __riscv_vmv_v_v_f64m4_tu(vd, vs1, vl); } -vfloat64m8_t test_vmv_v_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vmv_v_v_f64m8_tu(maskedoff, src, vl); +vfloat64m8_t test_vmv_v_v_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, size_t vl) { + return __riscv_vmv_v_v_f64m8_tu(vd, vs1, vl); } -vint8mf8_t test_vmv_s_x_i8mf8_tu(vint8mf8_t maskedoff, int8_t src, size_t vl) { - return __riscv_vmv_s_x_i8mf8_tu(maskedoff, src, vl); +vint8mf8_t test_vmv_s_x_i8mf8_tu(vint8mf8_t vd, int8_t rs1, size_t vl) { + return __riscv_vmv_s_x_i8mf8_tu(vd, rs1, vl); } -vint8mf4_t test_vmv_s_x_i8mf4_tu(vint8mf4_t maskedoff, int8_t src, size_t vl) { - return __riscv_vmv_s_x_i8mf4_tu(maskedoff, src, vl); +vint8mf4_t test_vmv_s_x_i8mf4_tu(vint8mf4_t vd, int8_t rs1, size_t vl) { + return __riscv_vmv_s_x_i8mf4_tu(vd, rs1, vl); } -vint8mf2_t test_vmv_s_x_i8mf2_tu(vint8mf2_t maskedoff, int8_t src, size_t vl) { - return __riscv_vmv_s_x_i8mf2_tu(maskedoff, src, vl); +vint8mf2_t test_vmv_s_x_i8mf2_tu(vint8mf2_t vd, int8_t rs1, size_t vl) { + return __riscv_vmv_s_x_i8mf2_tu(vd, rs1, vl); } -vint8m1_t test_vmv_s_x_i8m1_tu(vint8m1_t maskedoff, int8_t src, size_t vl) { - return __riscv_vmv_s_x_i8m1_tu(maskedoff, src, vl); +vint8m1_t test_vmv_s_x_i8m1_tu(vint8m1_t vd, int8_t rs1, size_t vl) { + return __riscv_vmv_s_x_i8m1_tu(vd, rs1, vl); } -vint8m2_t test_vmv_s_x_i8m2_tu(vint8m2_t maskedoff, int8_t src, size_t vl) { - return __riscv_vmv_s_x_i8m2_tu(maskedoff, src, vl); +vint8m2_t test_vmv_s_x_i8m2_tu(vint8m2_t vd, int8_t rs1, size_t vl) { + return __riscv_vmv_s_x_i8m2_tu(vd, rs1, vl); } -vint8m4_t test_vmv_s_x_i8m4_tu(vint8m4_t maskedoff, int8_t src, size_t vl) { - return __riscv_vmv_s_x_i8m4_tu(maskedoff, src, vl); +vint8m4_t test_vmv_s_x_i8m4_tu(vint8m4_t vd, int8_t rs1, size_t vl) { + return __riscv_vmv_s_x_i8m4_tu(vd, rs1, vl); } -vint8m8_t test_vmv_s_x_i8m8_tu(vint8m8_t maskedoff, int8_t src, size_t vl) { - return __riscv_vmv_s_x_i8m8_tu(maskedoff, src, vl); +vint8m8_t test_vmv_s_x_i8m8_tu(vint8m8_t vd, int8_t rs1, size_t vl) { + return __riscv_vmv_s_x_i8m8_tu(vd, rs1, vl); } -vint16mf4_t test_vmv_s_x_i16mf4_tu(vint16mf4_t maskedoff, int16_t src, size_t vl) { - return __riscv_vmv_s_x_i16mf4_tu(maskedoff, src, vl); +vint16mf4_t test_vmv_s_x_i16mf4_tu(vint16mf4_t vd, int16_t rs1, size_t vl) { + return __riscv_vmv_s_x_i16mf4_tu(vd, rs1, vl); } -vint16mf2_t test_vmv_s_x_i16mf2_tu(vint16mf2_t maskedoff, int16_t src, size_t vl) { - return __riscv_vmv_s_x_i16mf2_tu(maskedoff, src, vl); +vint16mf2_t test_vmv_s_x_i16mf2_tu(vint16mf2_t vd, int16_t rs1, size_t vl) { + return __riscv_vmv_s_x_i16mf2_tu(vd, rs1, vl); } -vint16m1_t test_vmv_s_x_i16m1_tu(vint16m1_t maskedoff, int16_t src, size_t vl) { - return __riscv_vmv_s_x_i16m1_tu(maskedoff, src, vl); +vint16m1_t test_vmv_s_x_i16m1_tu(vint16m1_t vd, int16_t rs1, size_t vl) { + return __riscv_vmv_s_x_i16m1_tu(vd, rs1, vl); } -vint16m2_t test_vmv_s_x_i16m2_tu(vint16m2_t maskedoff, int16_t src, size_t vl) { - return __riscv_vmv_s_x_i16m2_tu(maskedoff, src, vl); +vint16m2_t test_vmv_s_x_i16m2_tu(vint16m2_t vd, int16_t rs1, size_t vl) { + return __riscv_vmv_s_x_i16m2_tu(vd, rs1, vl); } -vint16m4_t test_vmv_s_x_i16m4_tu(vint16m4_t maskedoff, int16_t src, size_t vl) { - return __riscv_vmv_s_x_i16m4_tu(maskedoff, src, vl); +vint16m4_t test_vmv_s_x_i16m4_tu(vint16m4_t vd, int16_t rs1, size_t vl) { + return __riscv_vmv_s_x_i16m4_tu(vd, rs1, vl); } -vint16m8_t test_vmv_s_x_i16m8_tu(vint16m8_t maskedoff, int16_t src, size_t vl) { - return __riscv_vmv_s_x_i16m8_tu(maskedoff, src, vl); +vint16m8_t test_vmv_s_x_i16m8_tu(vint16m8_t vd, int16_t rs1, size_t vl) { + return __riscv_vmv_s_x_i16m8_tu(vd, rs1, vl); } -vint32mf2_t test_vmv_s_x_i32mf2_tu(vint32mf2_t maskedoff, int32_t src, size_t vl) { - return __riscv_vmv_s_x_i32mf2_tu(maskedoff, src, vl); +vint32mf2_t test_vmv_s_x_i32mf2_tu(vint32mf2_t vd, int32_t rs1, size_t vl) { + return __riscv_vmv_s_x_i32mf2_tu(vd, rs1, vl); } -vint32m1_t test_vmv_s_x_i32m1_tu(vint32m1_t maskedoff, int32_t src, size_t vl) { - return __riscv_vmv_s_x_i32m1_tu(maskedoff, src, vl); +vint32m1_t test_vmv_s_x_i32m1_tu(vint32m1_t vd, int32_t rs1, size_t vl) { + return __riscv_vmv_s_x_i32m1_tu(vd, rs1, vl); } -vint32m2_t test_vmv_s_x_i32m2_tu(vint32m2_t maskedoff, int32_t src, size_t vl) { - return __riscv_vmv_s_x_i32m2_tu(maskedoff, src, vl); +vint32m2_t test_vmv_s_x_i32m2_tu(vint32m2_t vd, int32_t rs1, size_t vl) { + return __riscv_vmv_s_x_i32m2_tu(vd, rs1, vl); } -vint32m4_t test_vmv_s_x_i32m4_tu(vint32m4_t maskedoff, int32_t src, size_t vl) { - return __riscv_vmv_s_x_i32m4_tu(maskedoff, src, vl); +vint32m4_t test_vmv_s_x_i32m4_tu(vint32m4_t vd, int32_t rs1, size_t vl) { + return __riscv_vmv_s_x_i32m4_tu(vd, rs1, vl); } -vint32m8_t test_vmv_s_x_i32m8_tu(vint32m8_t maskedoff, int32_t src, size_t vl) { - return __riscv_vmv_s_x_i32m8_tu(maskedoff, src, vl); +vint32m8_t test_vmv_s_x_i32m8_tu(vint32m8_t vd, int32_t rs1, size_t vl) { + return __riscv_vmv_s_x_i32m8_tu(vd, rs1, vl); } -vint64m1_t test_vmv_s_x_i64m1_tu(vint64m1_t maskedoff, int64_t src, size_t vl) { - return __riscv_vmv_s_x_i64m1_tu(maskedoff, src, vl); +vint64m1_t test_vmv_s_x_i64m1_tu(vint64m1_t vd, int64_t rs1, size_t vl) { + return __riscv_vmv_s_x_i64m1_tu(vd, rs1, vl); } -vint64m2_t test_vmv_s_x_i64m2_tu(vint64m2_t maskedoff, int64_t src, size_t vl) { - return __riscv_vmv_s_x_i64m2_tu(maskedoff, src, vl); +vint64m2_t test_vmv_s_x_i64m2_tu(vint64m2_t vd, int64_t rs1, size_t vl) { + return __riscv_vmv_s_x_i64m2_tu(vd, rs1, vl); } -vint64m4_t test_vmv_s_x_i64m4_tu(vint64m4_t maskedoff, int64_t src, size_t vl) { - return __riscv_vmv_s_x_i64m4_tu(maskedoff, src, vl); +vint64m4_t test_vmv_s_x_i64m4_tu(vint64m4_t vd, int64_t rs1, size_t vl) { + return __riscv_vmv_s_x_i64m4_tu(vd, rs1, vl); } -vint64m8_t test_vmv_s_x_i64m8_tu(vint64m8_t maskedoff, int64_t src, size_t vl) { - return __riscv_vmv_s_x_i64m8_tu(maskedoff, src, vl); +vint64m8_t test_vmv_s_x_i64m8_tu(vint64m8_t vd, int64_t rs1, size_t vl) { + return __riscv_vmv_s_x_i64m8_tu(vd, rs1, vl); } -vuint8mf8_t test_vmv_s_x_u8mf8_tu(vuint8mf8_t maskedoff, uint8_t src, size_t vl) { - return __riscv_vmv_s_x_u8mf8_tu(maskedoff, src, vl); +vuint8mf8_t test_vmv_s_x_u8mf8_tu(vuint8mf8_t vd, uint8_t rs1, size_t vl) { + return __riscv_vmv_s_x_u8mf8_tu(vd, rs1, vl); } -vuint8mf4_t test_vmv_s_x_u8mf4_tu(vuint8mf4_t maskedoff, uint8_t src, size_t vl) { - return __riscv_vmv_s_x_u8mf4_tu(maskedoff, src, vl); +vuint8mf4_t test_vmv_s_x_u8mf4_tu(vuint8mf4_t vd, uint8_t rs1, size_t vl) { + return __riscv_vmv_s_x_u8mf4_tu(vd, rs1, vl); } -vuint8mf2_t test_vmv_s_x_u8mf2_tu(vuint8mf2_t maskedoff, uint8_t src, size_t vl) { - return __riscv_vmv_s_x_u8mf2_tu(maskedoff, src, vl); +vuint8mf2_t test_vmv_s_x_u8mf2_tu(vuint8mf2_t vd, uint8_t rs1, size_t vl) { + return __riscv_vmv_s_x_u8mf2_tu(vd, rs1, vl); } -vuint8m1_t test_vmv_s_x_u8m1_tu(vuint8m1_t maskedoff, uint8_t src, size_t vl) { - return __riscv_vmv_s_x_u8m1_tu(maskedoff, src, vl); +vuint8m1_t test_vmv_s_x_u8m1_tu(vuint8m1_t vd, uint8_t rs1, size_t vl) { + return __riscv_vmv_s_x_u8m1_tu(vd, rs1, vl); } -vuint8m2_t test_vmv_s_x_u8m2_tu(vuint8m2_t maskedoff, uint8_t src, size_t vl) { - return __riscv_vmv_s_x_u8m2_tu(maskedoff, src, vl); +vuint8m2_t test_vmv_s_x_u8m2_tu(vuint8m2_t vd, uint8_t rs1, size_t vl) { + return __riscv_vmv_s_x_u8m2_tu(vd, rs1, vl); } -vuint8m4_t test_vmv_s_x_u8m4_tu(vuint8m4_t maskedoff, uint8_t src, size_t vl) { - return __riscv_vmv_s_x_u8m4_tu(maskedoff, src, vl); +vuint8m4_t test_vmv_s_x_u8m4_tu(vuint8m4_t vd, uint8_t rs1, size_t vl) { + return __riscv_vmv_s_x_u8m4_tu(vd, rs1, vl); } -vuint8m8_t test_vmv_s_x_u8m8_tu(vuint8m8_t maskedoff, uint8_t src, size_t vl) { - return __riscv_vmv_s_x_u8m8_tu(maskedoff, src, vl); +vuint8m8_t test_vmv_s_x_u8m8_tu(vuint8m8_t vd, uint8_t rs1, size_t vl) { + return __riscv_vmv_s_x_u8m8_tu(vd, rs1, vl); } -vuint16mf4_t test_vmv_s_x_u16mf4_tu(vuint16mf4_t maskedoff, uint16_t src, size_t vl) { - return __riscv_vmv_s_x_u16mf4_tu(maskedoff, src, vl); +vuint16mf4_t test_vmv_s_x_u16mf4_tu(vuint16mf4_t vd, uint16_t rs1, size_t vl) { + return __riscv_vmv_s_x_u16mf4_tu(vd, rs1, vl); } -vuint16mf2_t test_vmv_s_x_u16mf2_tu(vuint16mf2_t maskedoff, uint16_t src, size_t vl) { - return __riscv_vmv_s_x_u16mf2_tu(maskedoff, src, vl); +vuint16mf2_t test_vmv_s_x_u16mf2_tu(vuint16mf2_t vd, uint16_t rs1, size_t vl) { + return __riscv_vmv_s_x_u16mf2_tu(vd, rs1, vl); } -vuint16m1_t test_vmv_s_x_u16m1_tu(vuint16m1_t maskedoff, uint16_t src, size_t vl) { - return __riscv_vmv_s_x_u16m1_tu(maskedoff, src, vl); +vuint16m1_t test_vmv_s_x_u16m1_tu(vuint16m1_t vd, uint16_t rs1, size_t vl) { + return __riscv_vmv_s_x_u16m1_tu(vd, rs1, vl); } -vuint16m2_t test_vmv_s_x_u16m2_tu(vuint16m2_t maskedoff, uint16_t src, size_t vl) { - return __riscv_vmv_s_x_u16m2_tu(maskedoff, src, vl); +vuint16m2_t test_vmv_s_x_u16m2_tu(vuint16m2_t vd, uint16_t rs1, size_t vl) { + return __riscv_vmv_s_x_u16m2_tu(vd, rs1, vl); } -vuint16m4_t test_vmv_s_x_u16m4_tu(vuint16m4_t maskedoff, uint16_t src, size_t vl) { - return __riscv_vmv_s_x_u16m4_tu(maskedoff, src, vl); +vuint16m4_t test_vmv_s_x_u16m4_tu(vuint16m4_t vd, uint16_t rs1, size_t vl) { + return __riscv_vmv_s_x_u16m4_tu(vd, rs1, vl); } -vuint16m8_t test_vmv_s_x_u16m8_tu(vuint16m8_t maskedoff, uint16_t src, size_t vl) { - return __riscv_vmv_s_x_u16m8_tu(maskedoff, src, vl); +vuint16m8_t test_vmv_s_x_u16m8_tu(vuint16m8_t vd, uint16_t rs1, size_t vl) { + return __riscv_vmv_s_x_u16m8_tu(vd, rs1, vl); } -vuint32mf2_t test_vmv_s_x_u32mf2_tu(vuint32mf2_t maskedoff, uint32_t src, size_t vl) { - return __riscv_vmv_s_x_u32mf2_tu(maskedoff, src, vl); +vuint32mf2_t test_vmv_s_x_u32mf2_tu(vuint32mf2_t vd, uint32_t rs1, size_t vl) { + return __riscv_vmv_s_x_u32mf2_tu(vd, rs1, vl); } -vuint32m1_t test_vmv_s_x_u32m1_tu(vuint32m1_t maskedoff, uint32_t src, size_t vl) { - return __riscv_vmv_s_x_u32m1_tu(maskedoff, src, vl); +vuint32m1_t test_vmv_s_x_u32m1_tu(vuint32m1_t vd, uint32_t rs1, size_t vl) { + return __riscv_vmv_s_x_u32m1_tu(vd, rs1, vl); } -vuint32m2_t test_vmv_s_x_u32m2_tu(vuint32m2_t maskedoff, uint32_t src, size_t vl) { - return __riscv_vmv_s_x_u32m2_tu(maskedoff, src, vl); +vuint32m2_t test_vmv_s_x_u32m2_tu(vuint32m2_t vd, uint32_t rs1, size_t vl) { + return __riscv_vmv_s_x_u32m2_tu(vd, rs1, vl); } -vuint32m4_t test_vmv_s_x_u32m4_tu(vuint32m4_t maskedoff, uint32_t src, size_t vl) { - return __riscv_vmv_s_x_u32m4_tu(maskedoff, src, vl); +vuint32m4_t test_vmv_s_x_u32m4_tu(vuint32m4_t vd, uint32_t rs1, size_t vl) { + return __riscv_vmv_s_x_u32m4_tu(vd, rs1, vl); } -vuint32m8_t test_vmv_s_x_u32m8_tu(vuint32m8_t maskedoff, uint32_t src, size_t vl) { - return __riscv_vmv_s_x_u32m8_tu(maskedoff, src, vl); +vuint32m8_t test_vmv_s_x_u32m8_tu(vuint32m8_t vd, uint32_t rs1, size_t vl) { + return __riscv_vmv_s_x_u32m8_tu(vd, rs1, vl); } -vuint64m1_t test_vmv_s_x_u64m1_tu(vuint64m1_t maskedoff, uint64_t src, size_t vl) { - return __riscv_vmv_s_x_u64m1_tu(maskedoff, src, vl); +vuint64m1_t test_vmv_s_x_u64m1_tu(vuint64m1_t vd, uint64_t rs1, size_t vl) { + return __riscv_vmv_s_x_u64m1_tu(vd, rs1, vl); } -vuint64m2_t test_vmv_s_x_u64m2_tu(vuint64m2_t maskedoff, uint64_t src, size_t vl) { - return __riscv_vmv_s_x_u64m2_tu(maskedoff, src, vl); +vuint64m2_t test_vmv_s_x_u64m2_tu(vuint64m2_t vd, uint64_t rs1, size_t vl) { + return __riscv_vmv_s_x_u64m2_tu(vd, rs1, vl); } -vuint64m4_t test_vmv_s_x_u64m4_tu(vuint64m4_t maskedoff, uint64_t src, size_t vl) { - return __riscv_vmv_s_x_u64m4_tu(maskedoff, src, vl); +vuint64m4_t test_vmv_s_x_u64m4_tu(vuint64m4_t vd, uint64_t rs1, size_t vl) { + return __riscv_vmv_s_x_u64m4_tu(vd, rs1, vl); } -vuint64m8_t test_vmv_s_x_u64m8_tu(vuint64m8_t maskedoff, uint64_t src, size_t vl) { - return __riscv_vmv_s_x_u64m8_tu(maskedoff, src, vl); +vuint64m8_t test_vmv_s_x_u64m8_tu(vuint64m8_t vd, uint64_t rs1, size_t vl) { + return __riscv_vmv_s_x_u64m8_tu(vd, rs1, vl); } /* { dg-final { scan-assembler-times {v[ml][s]*[ve][0-9]*\.[ivxfswum.]+\s+} 201 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vnclip.c b/auto-generated/policy_funcs/gnu-api-tests/vnclip.c index 4a4b05ec3..5b9146642 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vnclip.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vnclip.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vnclip_wv_i8mf8_tu(vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclip_wv_i8mf8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vnclip_wv_i8mf8_tu(vint8mf8_t vd, vint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnclip_wv_i8mf8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vnclip_wx_i8mf8_tu(vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8mf8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vnclip_wx_i8mf8_tu(vint8mf8_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i8mf8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vnclip_wv_i8mf4_tu(vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclip_wv_i8mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vnclip_wv_i8mf4_tu(vint8mf4_t vd, vint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnclip_wv_i8mf4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vnclip_wx_i8mf4_tu(vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vnclip_wx_i8mf4_tu(vint8mf4_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i8mf4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vnclip_wv_i8mf2_tu(vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclip_wv_i8mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vnclip_wv_i8mf2_tu(vint8mf2_t vd, vint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnclip_wv_i8mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vnclip_wx_i8mf2_tu(vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vnclip_wx_i8mf2_tu(vint8mf2_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i8mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vnclip_wv_i8m1_tu(vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclip_wv_i8m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vnclip_wv_i8m1_tu(vint8m1_t vd, vint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnclip_wv_i8m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vnclip_wx_i8m1_tu(vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vnclip_wx_i8m1_tu(vint8m1_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i8m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vnclip_wv_i8m2_tu(vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclip_wv_i8m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vnclip_wv_i8m2_tu(vint8m2_t vd, vint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnclip_wv_i8m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vnclip_wx_i8m2_tu(vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vnclip_wx_i8m2_tu(vint8m2_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i8m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vnclip_wv_i8m4_tu(vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclip_wv_i8m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vnclip_wv_i8m4_tu(vint8m4_t vd, vint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnclip_wv_i8m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vnclip_wx_i8m4_tu(vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vnclip_wx_i8m4_tu(vint8m4_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i8m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vnclip_wv_i16mf4_tu(vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclip_wv_i16mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vnclip_wv_i16mf4_tu(vint16mf4_t vd, vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnclip_wv_i16mf4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vnclip_wx_i16mf4_tu(vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vnclip_wx_i16mf4_tu(vint16mf4_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i16mf4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vnclip_wv_i16mf2_tu(vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclip_wv_i16mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vnclip_wv_i16mf2_tu(vint16mf2_t vd, vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnclip_wv_i16mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vnclip_wx_i16mf2_tu(vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vnclip_wx_i16mf2_tu(vint16mf2_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i16mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vnclip_wv_i16m1_tu(vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclip_wv_i16m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vnclip_wv_i16m1_tu(vint16m1_t vd, vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnclip_wv_i16m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vnclip_wx_i16m1_tu(vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vnclip_wx_i16m1_tu(vint16m1_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i16m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vnclip_wv_i16m2_tu(vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclip_wv_i16m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vnclip_wv_i16m2_tu(vint16m2_t vd, vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnclip_wv_i16m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vnclip_wx_i16m2_tu(vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vnclip_wx_i16m2_tu(vint16m2_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i16m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vnclip_wv_i16m4_tu(vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclip_wv_i16m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vnclip_wv_i16m4_tu(vint16m4_t vd, vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnclip_wv_i16m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vnclip_wx_i16m4_tu(vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vnclip_wx_i16m4_tu(vint16m4_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i16m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vnclip_wv_i32mf2_tu(vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclip_wv_i32mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vnclip_wv_i32mf2_tu(vint32mf2_t vd, vint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnclip_wv_i32mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vnclip_wx_i32mf2_tu(vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vnclip_wx_i32mf2_tu(vint32mf2_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i32mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vnclip_wv_i32m1_tu(vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclip_wv_i32m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vnclip_wv_i32m1_tu(vint32m1_t vd, vint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnclip_wv_i32m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vnclip_wx_i32m1_tu(vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vnclip_wx_i32m1_tu(vint32m1_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i32m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vnclip_wv_i32m2_tu(vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclip_wv_i32m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vnclip_wv_i32m2_tu(vint32m2_t vd, vint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnclip_wv_i32m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vnclip_wx_i32m2_tu(vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vnclip_wx_i32m2_tu(vint32m2_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i32m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vnclip_wv_i32m4_tu(vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclip_wv_i32m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vnclip_wv_i32m4_tu(vint32m4_t vd, vint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnclip_wv_i32m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vnclip_wx_i32m4_tu(vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vnclip_wx_i32m4_tu(vint32m4_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i32m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vnclip_wv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclip_wv_i8mf8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vnclip_wv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnclip_wv_i8mf8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vnclip_wx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8mf8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vnclip_wx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i8mf8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vnclip_wv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclip_wv_i8mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vnclip_wv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnclip_wv_i8mf4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vnclip_wx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vnclip_wx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i8mf4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vnclip_wv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclip_wv_i8mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vnclip_wv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnclip_wv_i8mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vnclip_wx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vnclip_wx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i8mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vnclip_wv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclip_wv_i8m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vnclip_wv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnclip_wv_i8m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vnclip_wx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vnclip_wx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i8m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vnclip_wv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclip_wv_i8m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vnclip_wv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnclip_wv_i8m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vnclip_wx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vnclip_wx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i8m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vnclip_wv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclip_wv_i8m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vnclip_wv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnclip_wv_i8m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vnclip_wx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vnclip_wx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i8m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vnclip_wv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclip_wv_i16mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vnclip_wv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnclip_wv_i16mf4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vnclip_wx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vnclip_wx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i16mf4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vnclip_wv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclip_wv_i16mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vnclip_wv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnclip_wv_i16mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vnclip_wx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vnclip_wx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i16mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vnclip_wv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclip_wv_i16m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vnclip_wv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnclip_wv_i16m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vnclip_wx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vnclip_wx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i16m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vnclip_wv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclip_wv_i16m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vnclip_wv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnclip_wv_i16m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vnclip_wx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vnclip_wx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i16m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vnclip_wv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclip_wv_i16m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vnclip_wv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnclip_wv_i16m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vnclip_wx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vnclip_wx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i16m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vnclip_wv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclip_wv_i32mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vnclip_wv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnclip_wv_i32mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vnclip_wx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vnclip_wx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i32mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vnclip_wv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclip_wv_i32m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vnclip_wv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnclip_wv_i32m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vnclip_wx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vnclip_wx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i32m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vnclip_wv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclip_wv_i32m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vnclip_wv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnclip_wv_i32m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vnclip_wx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vnclip_wx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i32m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vnclip_wv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclip_wv_i32m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vnclip_wv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnclip_wv_i32m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vnclip_wx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vnclip_wx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i32m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vnclip_wv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclip_wv_i8mf8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vnclip_wv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnclip_wv_i8mf8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vnclip_wx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8mf8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vnclip_wx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i8mf8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vnclip_wv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclip_wv_i8mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vnclip_wv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnclip_wv_i8mf4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vnclip_wx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vnclip_wx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i8mf4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vnclip_wv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclip_wv_i8mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vnclip_wv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnclip_wv_i8mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vnclip_wx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vnclip_wx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i8mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vnclip_wv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclip_wv_i8m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vnclip_wv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnclip_wv_i8m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vnclip_wx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vnclip_wx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i8m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vnclip_wv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclip_wv_i8m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vnclip_wv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnclip_wv_i8m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vnclip_wx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vnclip_wx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i8m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vnclip_wv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclip_wv_i8m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vnclip_wv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnclip_wv_i8m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vnclip_wx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vnclip_wx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i8m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vnclip_wv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclip_wv_i16mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vnclip_wv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnclip_wv_i16mf4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vnclip_wx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vnclip_wx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i16mf4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vnclip_wv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclip_wv_i16mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vnclip_wv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnclip_wv_i16mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vnclip_wx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vnclip_wx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i16mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vnclip_wv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclip_wv_i16m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vnclip_wv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnclip_wv_i16m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vnclip_wx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vnclip_wx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i16m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vnclip_wv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclip_wv_i16m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vnclip_wv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnclip_wv_i16m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vnclip_wx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vnclip_wx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i16m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vnclip_wv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclip_wv_i16m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vnclip_wv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnclip_wv_i16m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vnclip_wx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vnclip_wx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i16m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vnclip_wv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclip_wv_i32mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vnclip_wv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnclip_wv_i32mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vnclip_wx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vnclip_wx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i32mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vnclip_wv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclip_wv_i32m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vnclip_wv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnclip_wv_i32m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vnclip_wx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vnclip_wx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i32m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vnclip_wv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclip_wv_i32m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vnclip_wv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnclip_wv_i32m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vnclip_wx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vnclip_wx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i32m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vnclip_wv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclip_wv_i32m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vnclip_wv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnclip_wv_i32m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vnclip_wx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vnclip_wx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i32m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vnclip_wv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclip_wv_i8mf8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vnclip_wv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnclip_wv_i8mf8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vnclip_wx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8mf8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vnclip_wx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i8mf8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vnclip_wv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclip_wv_i8mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vnclip_wv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnclip_wv_i8mf4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vnclip_wx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vnclip_wx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i8mf4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vnclip_wv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclip_wv_i8mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vnclip_wv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnclip_wv_i8mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vnclip_wx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vnclip_wx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i8mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vnclip_wv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclip_wv_i8m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vnclip_wv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnclip_wv_i8m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vnclip_wx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vnclip_wx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i8m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vnclip_wv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclip_wv_i8m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vnclip_wv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnclip_wv_i8m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vnclip_wx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vnclip_wx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i8m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vnclip_wv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclip_wv_i8m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vnclip_wv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnclip_wv_i8m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vnclip_wx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i8m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vnclip_wx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i8m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vnclip_wv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclip_wv_i16mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vnclip_wv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnclip_wv_i16mf4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vnclip_wx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vnclip_wx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i16mf4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vnclip_wv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclip_wv_i16mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vnclip_wv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnclip_wv_i16mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vnclip_wx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vnclip_wx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i16mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vnclip_wv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclip_wv_i16m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vnclip_wv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnclip_wv_i16m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vnclip_wx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vnclip_wx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i16m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vnclip_wv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclip_wv_i16m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vnclip_wv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnclip_wv_i16m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vnclip_wx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vnclip_wx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i16m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vnclip_wv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclip_wv_i16m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vnclip_wv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnclip_wv_i16m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vnclip_wx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i16m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vnclip_wx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i16m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vnclip_wv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclip_wv_i32mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vnclip_wv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnclip_wv_i32mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vnclip_wx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vnclip_wx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i32mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vnclip_wv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclip_wv_i32m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vnclip_wv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnclip_wv_i32m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vnclip_wx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vnclip_wx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i32m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vnclip_wv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclip_wv_i32m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vnclip_wv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnclip_wv_i32m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vnclip_wx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vnclip_wx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i32m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vnclip_wv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclip_wv_i32m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vnclip_wv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnclip_wv_i32m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vnclip_wx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_wx_i32m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vnclip_wx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_wx_i32m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vnclip\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vnclipu.c b/auto-generated/policy_funcs/gnu-api-tests/vnclipu.c index 39cc42b13..51b3d4d53 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vnclipu.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vnclipu.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vnclipu_wv_u8mf8_tu(vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8mf8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vnclipu_wv_u8mf8_tu(vuint8mf8_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u8mf8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vnclipu_wx_u8mf8_tu(vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8mf8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vnclipu_wx_u8mf8_tu(vuint8mf8_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u8mf8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vnclipu_wv_u8mf4_tu(vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vnclipu_wv_u8mf4_tu(vuint8mf4_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u8mf4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vnclipu_wx_u8mf4_tu(vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vnclipu_wx_u8mf4_tu(vuint8mf4_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u8mf4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vnclipu_wv_u8mf2_tu(vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vnclipu_wv_u8mf2_tu(vuint8mf2_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u8mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vnclipu_wx_u8mf2_tu(vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vnclipu_wx_u8mf2_tu(vuint8mf2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u8mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vnclipu_wv_u8m1_tu(vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vnclipu_wv_u8m1_tu(vuint8m1_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u8m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vnclipu_wx_u8m1_tu(vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vnclipu_wx_u8m1_tu(vuint8m1_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u8m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vnclipu_wv_u8m2_tu(vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vnclipu_wv_u8m2_tu(vuint8m2_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u8m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vnclipu_wx_u8m2_tu(vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vnclipu_wx_u8m2_tu(vuint8m2_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u8m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vnclipu_wv_u8m4_tu(vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vnclipu_wv_u8m4_tu(vuint8m4_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u8m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vnclipu_wx_u8m4_tu(vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vnclipu_wx_u8m4_tu(vuint8m4_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u8m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vnclipu_wv_u16mf4_tu(vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vnclipu_wv_u16mf4_tu(vuint16mf4_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u16mf4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vnclipu_wx_u16mf4_tu(vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vnclipu_wx_u16mf4_tu(vuint16mf4_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u16mf4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vnclipu_wv_u16mf2_tu(vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vnclipu_wv_u16mf2_tu(vuint16mf2_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u16mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vnclipu_wx_u16mf2_tu(vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vnclipu_wx_u16mf2_tu(vuint16mf2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u16mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vnclipu_wv_u16m1_tu(vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vnclipu_wv_u16m1_tu(vuint16m1_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u16m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vnclipu_wx_u16m1_tu(vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vnclipu_wx_u16m1_tu(vuint16m1_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u16m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vnclipu_wv_u16m2_tu(vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vnclipu_wv_u16m2_tu(vuint16m2_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u16m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vnclipu_wx_u16m2_tu(vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vnclipu_wx_u16m2_tu(vuint16m2_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u16m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vnclipu_wv_u16m4_tu(vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vnclipu_wv_u16m4_tu(vuint16m4_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u16m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vnclipu_wx_u16m4_tu(vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vnclipu_wx_u16m4_tu(vuint16m4_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u16m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vnclipu_wv_u32mf2_tu(vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vnclipu_wv_u32mf2_tu(vuint32mf2_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u32mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vnclipu_wx_u32mf2_tu(vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vnclipu_wx_u32mf2_tu(vuint32mf2_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u32mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vnclipu_wv_u32m1_tu(vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vnclipu_wv_u32m1_tu(vuint32m1_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u32m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vnclipu_wx_u32m1_tu(vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vnclipu_wx_u32m1_tu(vuint32m1_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u32m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vnclipu_wv_u32m2_tu(vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vnclipu_wv_u32m2_tu(vuint32m2_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u32m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vnclipu_wx_u32m2_tu(vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vnclipu_wx_u32m2_tu(vuint32m2_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u32m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vnclipu_wv_u32m4_tu(vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vnclipu_wv_u32m4_tu(vuint32m4_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u32m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vnclipu_wx_u32m4_tu(vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vnclipu_wx_u32m4_tu(vuint32m4_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u32m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vnclipu_wv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8mf8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vnclipu_wv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u8mf8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vnclipu_wx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8mf8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vnclipu_wx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u8mf8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vnclipu_wv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vnclipu_wv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u8mf4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vnclipu_wx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vnclipu_wx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u8mf4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vnclipu_wv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vnclipu_wv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u8mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vnclipu_wx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vnclipu_wx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u8mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vnclipu_wv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vnclipu_wv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u8m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vnclipu_wx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vnclipu_wx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u8m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vnclipu_wv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vnclipu_wv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u8m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vnclipu_wx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vnclipu_wx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u8m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vnclipu_wv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vnclipu_wv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u8m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vnclipu_wx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vnclipu_wx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u8m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vnclipu_wv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vnclipu_wv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u16mf4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vnclipu_wx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vnclipu_wx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u16mf4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vnclipu_wv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vnclipu_wv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u16mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vnclipu_wx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vnclipu_wx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u16mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vnclipu_wv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vnclipu_wv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u16m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vnclipu_wx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vnclipu_wx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u16m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vnclipu_wv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vnclipu_wv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u16m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vnclipu_wx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vnclipu_wx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u16m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vnclipu_wv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vnclipu_wv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u16m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vnclipu_wx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vnclipu_wx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u16m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vnclipu_wv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vnclipu_wv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u32mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vnclipu_wx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vnclipu_wx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u32mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vnclipu_wv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vnclipu_wv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u32m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vnclipu_wx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vnclipu_wx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u32m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vnclipu_wv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vnclipu_wv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u32m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vnclipu_wx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vnclipu_wx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u32m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vnclipu_wv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vnclipu_wv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u32m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vnclipu_wx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vnclipu_wx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u32m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vnclipu_wv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8mf8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vnclipu_wv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u8mf8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vnclipu_wx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8mf8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vnclipu_wx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u8mf8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vnclipu_wv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vnclipu_wv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u8mf4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vnclipu_wx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vnclipu_wx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u8mf4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vnclipu_wv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vnclipu_wv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u8mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vnclipu_wx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vnclipu_wx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u8mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vnclipu_wv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vnclipu_wv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u8m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vnclipu_wx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vnclipu_wx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u8m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vnclipu_wv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vnclipu_wv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u8m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vnclipu_wx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vnclipu_wx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u8m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vnclipu_wv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vnclipu_wv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u8m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vnclipu_wx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vnclipu_wx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u8m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vnclipu_wv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vnclipu_wv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u16mf4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vnclipu_wx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vnclipu_wx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u16mf4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vnclipu_wv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vnclipu_wv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u16mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vnclipu_wx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vnclipu_wx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u16mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vnclipu_wv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vnclipu_wv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u16m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vnclipu_wx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vnclipu_wx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u16m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vnclipu_wv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vnclipu_wv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u16m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vnclipu_wx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vnclipu_wx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u16m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vnclipu_wv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vnclipu_wv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u16m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vnclipu_wx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vnclipu_wx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u16m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vnclipu_wv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vnclipu_wv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u32mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vnclipu_wx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vnclipu_wx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u32mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vnclipu_wv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vnclipu_wv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u32m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vnclipu_wx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vnclipu_wx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u32m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vnclipu_wv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vnclipu_wv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u32m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vnclipu_wx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vnclipu_wx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u32m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vnclipu_wv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vnclipu_wv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u32m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vnclipu_wx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vnclipu_wx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u32m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vnclipu_wv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8mf8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vnclipu_wv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u8mf8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vnclipu_wx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8mf8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vnclipu_wx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u8mf8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vnclipu_wv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vnclipu_wv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u8mf4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vnclipu_wx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vnclipu_wx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u8mf4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vnclipu_wv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vnclipu_wv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u8mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vnclipu_wx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vnclipu_wx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u8mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vnclipu_wv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vnclipu_wv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u8m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vnclipu_wx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vnclipu_wx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u8m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vnclipu_wv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vnclipu_wv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u8m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vnclipu_wx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vnclipu_wx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u8m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vnclipu_wv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u8m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vnclipu_wv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u8m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vnclipu_wx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u8m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vnclipu_wx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u8m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vnclipu_wv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vnclipu_wv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u16mf4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vnclipu_wx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vnclipu_wx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u16mf4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vnclipu_wv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vnclipu_wv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u16mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vnclipu_wx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vnclipu_wx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u16mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vnclipu_wv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vnclipu_wv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u16m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vnclipu_wx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vnclipu_wx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u16m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vnclipu_wv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vnclipu_wv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u16m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vnclipu_wx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vnclipu_wx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u16m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vnclipu_wv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u16m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vnclipu_wv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u16m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vnclipu_wx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u16m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vnclipu_wx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u16m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vnclipu_wv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vnclipu_wv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u32mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vnclipu_wx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vnclipu_wx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u32mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vnclipu_wv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vnclipu_wv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u32m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vnclipu_wx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vnclipu_wx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u32m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vnclipu_wv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vnclipu_wv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u32m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vnclipu_wx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vnclipu_wx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u32m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vnclipu_wv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclipu_wv_u32m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vnclipu_wv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnclipu_wv_u32m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vnclipu_wx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_wx_u32m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vnclipu_wx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_wx_u32m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vnclipu\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vncvt.c b/auto-generated/policy_funcs/gnu-api-tests/vncvt.c index c4afee1b6..90513dae3 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vncvt.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vncvt.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vncvt_x_x_w_i8mf8_tu(vint8mf8_t maskedoff, vint16mf4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i8mf8_tu(maskedoff, src, vl); +vint8mf8_t test_vncvt_x_x_w_i8mf8_tu(vint8mf8_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i8mf8_tu(vd, vs2, vl); } -vint8mf4_t test_vncvt_x_x_w_i8mf4_tu(vint8mf4_t maskedoff, vint16mf2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i8mf4_tu(maskedoff, src, vl); +vint8mf4_t test_vncvt_x_x_w_i8mf4_tu(vint8mf4_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i8mf4_tu(vd, vs2, vl); } -vint8mf2_t test_vncvt_x_x_w_i8mf2_tu(vint8mf2_t maskedoff, vint16m1_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i8mf2_tu(maskedoff, src, vl); +vint8mf2_t test_vncvt_x_x_w_i8mf2_tu(vint8mf2_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i8mf2_tu(vd, vs2, vl); } -vint8m1_t test_vncvt_x_x_w_i8m1_tu(vint8m1_t maskedoff, vint16m2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i8m1_tu(maskedoff, src, vl); +vint8m1_t test_vncvt_x_x_w_i8m1_tu(vint8m1_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i8m1_tu(vd, vs2, vl); } -vint8m2_t test_vncvt_x_x_w_i8m2_tu(vint8m2_t maskedoff, vint16m4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i8m2_tu(maskedoff, src, vl); +vint8m2_t test_vncvt_x_x_w_i8m2_tu(vint8m2_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i8m2_tu(vd, vs2, vl); } -vint8m4_t test_vncvt_x_x_w_i8m4_tu(vint8m4_t maskedoff, vint16m8_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i8m4_tu(maskedoff, src, vl); +vint8m4_t test_vncvt_x_x_w_i8m4_tu(vint8m4_t vd, vint16m8_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i8m4_tu(vd, vs2, vl); } -vuint8mf8_t test_vncvt_x_x_w_u8mf8_tu(vuint8mf8_t maskedoff, vuint16mf4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u8mf8_tu(maskedoff, src, vl); +vuint8mf8_t test_vncvt_x_x_w_u8mf8_tu(vuint8mf8_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u8mf8_tu(vd, vs2, vl); } -vuint8mf4_t test_vncvt_x_x_w_u8mf4_tu(vuint8mf4_t maskedoff, vuint16mf2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u8mf4_tu(maskedoff, src, vl); +vuint8mf4_t test_vncvt_x_x_w_u8mf4_tu(vuint8mf4_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u8mf4_tu(vd, vs2, vl); } -vuint8mf2_t test_vncvt_x_x_w_u8mf2_tu(vuint8mf2_t maskedoff, vuint16m1_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u8mf2_tu(maskedoff, src, vl); +vuint8mf2_t test_vncvt_x_x_w_u8mf2_tu(vuint8mf2_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u8mf2_tu(vd, vs2, vl); } -vuint8m1_t test_vncvt_x_x_w_u8m1_tu(vuint8m1_t maskedoff, vuint16m2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u8m1_tu(maskedoff, src, vl); +vuint8m1_t test_vncvt_x_x_w_u8m1_tu(vuint8m1_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u8m1_tu(vd, vs2, vl); } -vuint8m2_t test_vncvt_x_x_w_u8m2_tu(vuint8m2_t maskedoff, vuint16m4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u8m2_tu(maskedoff, src, vl); +vuint8m2_t test_vncvt_x_x_w_u8m2_tu(vuint8m2_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u8m2_tu(vd, vs2, vl); } -vuint8m4_t test_vncvt_x_x_w_u8m4_tu(vuint8m4_t maskedoff, vuint16m8_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u8m4_tu(maskedoff, src, vl); +vuint8m4_t test_vncvt_x_x_w_u8m4_tu(vuint8m4_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u8m4_tu(vd, vs2, vl); } -vint16mf4_t test_vncvt_x_x_w_i16mf4_tu(vint16mf4_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i16mf4_tu(maskedoff, src, vl); +vint16mf4_t test_vncvt_x_x_w_i16mf4_tu(vint16mf4_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i16mf4_tu(vd, vs2, vl); } -vint16mf2_t test_vncvt_x_x_w_i16mf2_tu(vint16mf2_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i16mf2_tu(maskedoff, src, vl); +vint16mf2_t test_vncvt_x_x_w_i16mf2_tu(vint16mf2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i16mf2_tu(vd, vs2, vl); } -vint16m1_t test_vncvt_x_x_w_i16m1_tu(vint16m1_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i16m1_tu(maskedoff, src, vl); +vint16m1_t test_vncvt_x_x_w_i16m1_tu(vint16m1_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i16m1_tu(vd, vs2, vl); } -vint16m2_t test_vncvt_x_x_w_i16m2_tu(vint16m2_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i16m2_tu(maskedoff, src, vl); +vint16m2_t test_vncvt_x_x_w_i16m2_tu(vint16m2_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i16m2_tu(vd, vs2, vl); } -vint16m4_t test_vncvt_x_x_w_i16m4_tu(vint16m4_t maskedoff, vint32m8_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i16m4_tu(maskedoff, src, vl); +vint16m4_t test_vncvt_x_x_w_i16m4_tu(vint16m4_t vd, vint32m8_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i16m4_tu(vd, vs2, vl); } -vuint16mf4_t test_vncvt_x_x_w_u16mf4_tu(vuint16mf4_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u16mf4_tu(maskedoff, src, vl); +vuint16mf4_t test_vncvt_x_x_w_u16mf4_tu(vuint16mf4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u16mf4_tu(vd, vs2, vl); } -vuint16mf2_t test_vncvt_x_x_w_u16mf2_tu(vuint16mf2_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u16mf2_tu(maskedoff, src, vl); +vuint16mf2_t test_vncvt_x_x_w_u16mf2_tu(vuint16mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u16mf2_tu(vd, vs2, vl); } -vuint16m1_t test_vncvt_x_x_w_u16m1_tu(vuint16m1_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u16m1_tu(maskedoff, src, vl); +vuint16m1_t test_vncvt_x_x_w_u16m1_tu(vuint16m1_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u16m1_tu(vd, vs2, vl); } -vuint16m2_t test_vncvt_x_x_w_u16m2_tu(vuint16m2_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u16m2_tu(maskedoff, src, vl); +vuint16m2_t test_vncvt_x_x_w_u16m2_tu(vuint16m2_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u16m2_tu(vd, vs2, vl); } -vuint16m4_t test_vncvt_x_x_w_u16m4_tu(vuint16m4_t maskedoff, vuint32m8_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u16m4_tu(maskedoff, src, vl); +vuint16m4_t test_vncvt_x_x_w_u16m4_tu(vuint16m4_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u16m4_tu(vd, vs2, vl); } -vint32mf2_t test_vncvt_x_x_w_i32mf2_tu(vint32mf2_t maskedoff, vint64m1_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i32mf2_tu(maskedoff, src, vl); +vint32mf2_t test_vncvt_x_x_w_i32mf2_tu(vint32mf2_t vd, vint64m1_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i32mf2_tu(vd, vs2, vl); } -vint32m1_t test_vncvt_x_x_w_i32m1_tu(vint32m1_t maskedoff, vint64m2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i32m1_tu(maskedoff, src, vl); +vint32m1_t test_vncvt_x_x_w_i32m1_tu(vint32m1_t vd, vint64m2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i32m1_tu(vd, vs2, vl); } -vint32m2_t test_vncvt_x_x_w_i32m2_tu(vint32m2_t maskedoff, vint64m4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i32m2_tu(maskedoff, src, vl); +vint32m2_t test_vncvt_x_x_w_i32m2_tu(vint32m2_t vd, vint64m4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i32m2_tu(vd, vs2, vl); } -vint32m4_t test_vncvt_x_x_w_i32m4_tu(vint32m4_t maskedoff, vint64m8_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i32m4_tu(maskedoff, src, vl); +vint32m4_t test_vncvt_x_x_w_i32m4_tu(vint32m4_t vd, vint64m8_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i32m4_tu(vd, vs2, vl); } -vuint32mf2_t test_vncvt_x_x_w_u32mf2_tu(vuint32mf2_t maskedoff, vuint64m1_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u32mf2_tu(maskedoff, src, vl); +vuint32mf2_t test_vncvt_x_x_w_u32mf2_tu(vuint32mf2_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u32mf2_tu(vd, vs2, vl); } -vuint32m1_t test_vncvt_x_x_w_u32m1_tu(vuint32m1_t maskedoff, vuint64m2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u32m1_tu(maskedoff, src, vl); +vuint32m1_t test_vncvt_x_x_w_u32m1_tu(vuint32m1_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vncvt_x_x_w_u32m2_tu(vuint32m2_t maskedoff, vuint64m4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u32m2_tu(maskedoff, src, vl); +vuint32m2_t test_vncvt_x_x_w_u32m2_tu(vuint32m2_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vncvt_x_x_w_u32m4_tu(vuint32m4_t maskedoff, vuint64m8_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u32m4_tu(maskedoff, src, vl); +vuint32m4_t test_vncvt_x_x_w_u32m4_tu(vuint32m4_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u32m4_tu(vd, vs2, vl); } -vint8mf8_t test_vncvt_x_x_w_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i8mf8_tum(mask, maskedoff, src, vl); +vint8mf8_t test_vncvt_x_x_w_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i8mf8_tum(vm, vd, vs2, vl); } -vint8mf4_t test_vncvt_x_x_w_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i8mf4_tum(mask, maskedoff, src, vl); +vint8mf4_t test_vncvt_x_x_w_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i8mf4_tum(vm, vd, vs2, vl); } -vint8mf2_t test_vncvt_x_x_w_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i8mf2_tum(mask, maskedoff, src, vl); +vint8mf2_t test_vncvt_x_x_w_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i8mf2_tum(vm, vd, vs2, vl); } -vint8m1_t test_vncvt_x_x_w_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i8m1_tum(mask, maskedoff, src, vl); +vint8m1_t test_vncvt_x_x_w_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i8m1_tum(vm, vd, vs2, vl); } -vint8m2_t test_vncvt_x_x_w_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i8m2_tum(mask, maskedoff, src, vl); +vint8m2_t test_vncvt_x_x_w_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i8m2_tum(vm, vd, vs2, vl); } -vint8m4_t test_vncvt_x_x_w_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i8m4_tum(mask, maskedoff, src, vl); +vint8m4_t test_vncvt_x_x_w_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i8m4_tum(vm, vd, vs2, vl); } -vuint8mf8_t test_vncvt_x_x_w_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u8mf8_tum(mask, maskedoff, src, vl); +vuint8mf8_t test_vncvt_x_x_w_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u8mf8_tum(vm, vd, vs2, vl); } -vuint8mf4_t test_vncvt_x_x_w_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u8mf4_tum(mask, maskedoff, src, vl); +vuint8mf4_t test_vncvt_x_x_w_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u8mf4_tum(vm, vd, vs2, vl); } -vuint8mf2_t test_vncvt_x_x_w_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u8mf2_tum(mask, maskedoff, src, vl); +vuint8mf2_t test_vncvt_x_x_w_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u8mf2_tum(vm, vd, vs2, vl); } -vuint8m1_t test_vncvt_x_x_w_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u8m1_tum(mask, maskedoff, src, vl); +vuint8m1_t test_vncvt_x_x_w_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u8m1_tum(vm, vd, vs2, vl); } -vuint8m2_t test_vncvt_x_x_w_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u8m2_tum(mask, maskedoff, src, vl); +vuint8m2_t test_vncvt_x_x_w_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u8m2_tum(vm, vd, vs2, vl); } -vuint8m4_t test_vncvt_x_x_w_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u8m4_tum(mask, maskedoff, src, vl); +vuint8m4_t test_vncvt_x_x_w_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u8m4_tum(vm, vd, vs2, vl); } -vint16mf4_t test_vncvt_x_x_w_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i16mf4_tum(mask, maskedoff, src, vl); +vint16mf4_t test_vncvt_x_x_w_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i16mf4_tum(vm, vd, vs2, vl); } -vint16mf2_t test_vncvt_x_x_w_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i16mf2_tum(mask, maskedoff, src, vl); +vint16mf2_t test_vncvt_x_x_w_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i16mf2_tum(vm, vd, vs2, vl); } -vint16m1_t test_vncvt_x_x_w_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i16m1_tum(mask, maskedoff, src, vl); +vint16m1_t test_vncvt_x_x_w_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i16m1_tum(vm, vd, vs2, vl); } -vint16m2_t test_vncvt_x_x_w_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i16m2_tum(mask, maskedoff, src, vl); +vint16m2_t test_vncvt_x_x_w_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i16m2_tum(vm, vd, vs2, vl); } -vint16m4_t test_vncvt_x_x_w_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i16m4_tum(mask, maskedoff, src, vl); +vint16m4_t test_vncvt_x_x_w_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i16m4_tum(vm, vd, vs2, vl); } -vuint16mf4_t test_vncvt_x_x_w_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u16mf4_tum(mask, maskedoff, src, vl); +vuint16mf4_t test_vncvt_x_x_w_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u16mf4_tum(vm, vd, vs2, vl); } -vuint16mf2_t test_vncvt_x_x_w_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u16mf2_tum(mask, maskedoff, src, vl); +vuint16mf2_t test_vncvt_x_x_w_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u16mf2_tum(vm, vd, vs2, vl); } -vuint16m1_t test_vncvt_x_x_w_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u16m1_tum(mask, maskedoff, src, vl); +vuint16m1_t test_vncvt_x_x_w_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u16m1_tum(vm, vd, vs2, vl); } -vuint16m2_t test_vncvt_x_x_w_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u16m2_tum(mask, maskedoff, src, vl); +vuint16m2_t test_vncvt_x_x_w_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u16m2_tum(vm, vd, vs2, vl); } -vuint16m4_t test_vncvt_x_x_w_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u16m4_tum(mask, maskedoff, src, vl); +vuint16m4_t test_vncvt_x_x_w_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u16m4_tum(vm, vd, vs2, vl); } -vint32mf2_t test_vncvt_x_x_w_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i32mf2_tum(mask, maskedoff, src, vl); +vint32mf2_t test_vncvt_x_x_w_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint64m1_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i32mf2_tum(vm, vd, vs2, vl); } -vint32m1_t test_vncvt_x_x_w_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i32m1_tum(mask, maskedoff, src, vl); +vint32m1_t test_vncvt_x_x_w_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i32m1_tum(vm, vd, vs2, vl); } -vint32m2_t test_vncvt_x_x_w_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i32m2_tum(mask, maskedoff, src, vl); +vint32m2_t test_vncvt_x_x_w_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i32m2_tum(vm, vd, vs2, vl); } -vint32m4_t test_vncvt_x_x_w_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i32m4_tum(mask, maskedoff, src, vl); +vint32m4_t test_vncvt_x_x_w_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i32m4_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_vncvt_x_x_w_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u32mf2_tum(mask, maskedoff, src, vl); +vuint32mf2_t test_vncvt_x_x_w_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u32mf2_tum(vm, vd, vs2, vl); } -vuint32m1_t test_vncvt_x_x_w_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u32m1_tum(mask, maskedoff, src, vl); +vuint32m1_t test_vncvt_x_x_w_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u32m1_tum(vm, vd, vs2, vl); } -vuint32m2_t test_vncvt_x_x_w_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u32m2_tum(mask, maskedoff, src, vl); +vuint32m2_t test_vncvt_x_x_w_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u32m2_tum(vm, vd, vs2, vl); } -vuint32m4_t test_vncvt_x_x_w_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u32m4_tum(mask, maskedoff, src, vl); +vuint32m4_t test_vncvt_x_x_w_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u32m4_tum(vm, vd, vs2, vl); } -vint8mf8_t test_vncvt_x_x_w_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i8mf8_tumu(mask, maskedoff, src, vl); +vint8mf8_t test_vncvt_x_x_w_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i8mf8_tumu(vm, vd, vs2, vl); } -vint8mf4_t test_vncvt_x_x_w_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i8mf4_tumu(mask, maskedoff, src, vl); +vint8mf4_t test_vncvt_x_x_w_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i8mf4_tumu(vm, vd, vs2, vl); } -vint8mf2_t test_vncvt_x_x_w_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i8mf2_tumu(mask, maskedoff, src, vl); +vint8mf2_t test_vncvt_x_x_w_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i8mf2_tumu(vm, vd, vs2, vl); } -vint8m1_t test_vncvt_x_x_w_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i8m1_tumu(mask, maskedoff, src, vl); +vint8m1_t test_vncvt_x_x_w_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i8m1_tumu(vm, vd, vs2, vl); } -vint8m2_t test_vncvt_x_x_w_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i8m2_tumu(mask, maskedoff, src, vl); +vint8m2_t test_vncvt_x_x_w_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i8m2_tumu(vm, vd, vs2, vl); } -vint8m4_t test_vncvt_x_x_w_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i8m4_tumu(mask, maskedoff, src, vl); +vint8m4_t test_vncvt_x_x_w_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i8m4_tumu(vm, vd, vs2, vl); } -vuint8mf8_t test_vncvt_x_x_w_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u8mf8_tumu(mask, maskedoff, src, vl); +vuint8mf8_t test_vncvt_x_x_w_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u8mf8_tumu(vm, vd, vs2, vl); } -vuint8mf4_t test_vncvt_x_x_w_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u8mf4_tumu(mask, maskedoff, src, vl); +vuint8mf4_t test_vncvt_x_x_w_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u8mf4_tumu(vm, vd, vs2, vl); } -vuint8mf2_t test_vncvt_x_x_w_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u8mf2_tumu(mask, maskedoff, src, vl); +vuint8mf2_t test_vncvt_x_x_w_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u8mf2_tumu(vm, vd, vs2, vl); } -vuint8m1_t test_vncvt_x_x_w_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u8m1_tumu(mask, maskedoff, src, vl); +vuint8m1_t test_vncvt_x_x_w_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u8m1_tumu(vm, vd, vs2, vl); } -vuint8m2_t test_vncvt_x_x_w_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u8m2_tumu(mask, maskedoff, src, vl); +vuint8m2_t test_vncvt_x_x_w_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u8m2_tumu(vm, vd, vs2, vl); } -vuint8m4_t test_vncvt_x_x_w_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u8m4_tumu(mask, maskedoff, src, vl); +vuint8m4_t test_vncvt_x_x_w_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u8m4_tumu(vm, vd, vs2, vl); } -vint16mf4_t test_vncvt_x_x_w_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i16mf4_tumu(mask, maskedoff, src, vl); +vint16mf4_t test_vncvt_x_x_w_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i16mf4_tumu(vm, vd, vs2, vl); } -vint16mf2_t test_vncvt_x_x_w_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i16mf2_tumu(mask, maskedoff, src, vl); +vint16mf2_t test_vncvt_x_x_w_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i16mf2_tumu(vm, vd, vs2, vl); } -vint16m1_t test_vncvt_x_x_w_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i16m1_tumu(mask, maskedoff, src, vl); +vint16m1_t test_vncvt_x_x_w_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i16m1_tumu(vm, vd, vs2, vl); } -vint16m2_t test_vncvt_x_x_w_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i16m2_tumu(mask, maskedoff, src, vl); +vint16m2_t test_vncvt_x_x_w_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i16m2_tumu(vm, vd, vs2, vl); } -vint16m4_t test_vncvt_x_x_w_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i16m4_tumu(mask, maskedoff, src, vl); +vint16m4_t test_vncvt_x_x_w_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i16m4_tumu(vm, vd, vs2, vl); } -vuint16mf4_t test_vncvt_x_x_w_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u16mf4_tumu(mask, maskedoff, src, vl); +vuint16mf4_t test_vncvt_x_x_w_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u16mf4_tumu(vm, vd, vs2, vl); } -vuint16mf2_t test_vncvt_x_x_w_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u16mf2_tumu(mask, maskedoff, src, vl); +vuint16mf2_t test_vncvt_x_x_w_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u16mf2_tumu(vm, vd, vs2, vl); } -vuint16m1_t test_vncvt_x_x_w_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u16m1_tumu(mask, maskedoff, src, vl); +vuint16m1_t test_vncvt_x_x_w_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u16m1_tumu(vm, vd, vs2, vl); } -vuint16m2_t test_vncvt_x_x_w_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u16m2_tumu(mask, maskedoff, src, vl); +vuint16m2_t test_vncvt_x_x_w_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u16m2_tumu(vm, vd, vs2, vl); } -vuint16m4_t test_vncvt_x_x_w_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u16m4_tumu(mask, maskedoff, src, vl); +vuint16m4_t test_vncvt_x_x_w_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u16m4_tumu(vm, vd, vs2, vl); } -vint32mf2_t test_vncvt_x_x_w_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i32mf2_tumu(mask, maskedoff, src, vl); +vint32mf2_t test_vncvt_x_x_w_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint64m1_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i32mf2_tumu(vm, vd, vs2, vl); } -vint32m1_t test_vncvt_x_x_w_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i32m1_tumu(mask, maskedoff, src, vl); +vint32m1_t test_vncvt_x_x_w_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i32m1_tumu(vm, vd, vs2, vl); } -vint32m2_t test_vncvt_x_x_w_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i32m2_tumu(mask, maskedoff, src, vl); +vint32m2_t test_vncvt_x_x_w_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i32m2_tumu(vm, vd, vs2, vl); } -vint32m4_t test_vncvt_x_x_w_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i32m4_tumu(mask, maskedoff, src, vl); +vint32m4_t test_vncvt_x_x_w_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i32m4_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_vncvt_x_x_w_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u32mf2_tumu(mask, maskedoff, src, vl); +vuint32mf2_t test_vncvt_x_x_w_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u32mf2_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_vncvt_x_x_w_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u32m1_tumu(mask, maskedoff, src, vl); +vuint32m1_t test_vncvt_x_x_w_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u32m1_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_vncvt_x_x_w_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u32m2_tumu(mask, maskedoff, src, vl); +vuint32m2_t test_vncvt_x_x_w_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u32m2_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_vncvt_x_x_w_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u32m4_tumu(mask, maskedoff, src, vl); +vuint32m4_t test_vncvt_x_x_w_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u32m4_tumu(vm, vd, vs2, vl); } -vint8mf8_t test_vncvt_x_x_w_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i8mf8_mu(mask, maskedoff, src, vl); +vint8mf8_t test_vncvt_x_x_w_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i8mf8_mu(vm, vd, vs2, vl); } -vint8mf4_t test_vncvt_x_x_w_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i8mf4_mu(mask, maskedoff, src, vl); +vint8mf4_t test_vncvt_x_x_w_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i8mf4_mu(vm, vd, vs2, vl); } -vint8mf2_t test_vncvt_x_x_w_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i8mf2_mu(mask, maskedoff, src, vl); +vint8mf2_t test_vncvt_x_x_w_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i8mf2_mu(vm, vd, vs2, vl); } -vint8m1_t test_vncvt_x_x_w_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i8m1_mu(mask, maskedoff, src, vl); +vint8m1_t test_vncvt_x_x_w_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i8m1_mu(vm, vd, vs2, vl); } -vint8m2_t test_vncvt_x_x_w_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i8m2_mu(mask, maskedoff, src, vl); +vint8m2_t test_vncvt_x_x_w_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i8m2_mu(vm, vd, vs2, vl); } -vint8m4_t test_vncvt_x_x_w_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i8m4_mu(mask, maskedoff, src, vl); +vint8m4_t test_vncvt_x_x_w_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i8m4_mu(vm, vd, vs2, vl); } -vuint8mf8_t test_vncvt_x_x_w_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u8mf8_mu(mask, maskedoff, src, vl); +vuint8mf8_t test_vncvt_x_x_w_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u8mf8_mu(vm, vd, vs2, vl); } -vuint8mf4_t test_vncvt_x_x_w_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u8mf4_mu(mask, maskedoff, src, vl); +vuint8mf4_t test_vncvt_x_x_w_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u8mf4_mu(vm, vd, vs2, vl); } -vuint8mf2_t test_vncvt_x_x_w_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u8mf2_mu(mask, maskedoff, src, vl); +vuint8mf2_t test_vncvt_x_x_w_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u8mf2_mu(vm, vd, vs2, vl); } -vuint8m1_t test_vncvt_x_x_w_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u8m1_mu(mask, maskedoff, src, vl); +vuint8m1_t test_vncvt_x_x_w_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u8m1_mu(vm, vd, vs2, vl); } -vuint8m2_t test_vncvt_x_x_w_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u8m2_mu(mask, maskedoff, src, vl); +vuint8m2_t test_vncvt_x_x_w_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u8m2_mu(vm, vd, vs2, vl); } -vuint8m4_t test_vncvt_x_x_w_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u8m4_mu(mask, maskedoff, src, vl); +vuint8m4_t test_vncvt_x_x_w_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u8m4_mu(vm, vd, vs2, vl); } -vint16mf4_t test_vncvt_x_x_w_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i16mf4_mu(mask, maskedoff, src, vl); +vint16mf4_t test_vncvt_x_x_w_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i16mf4_mu(vm, vd, vs2, vl); } -vint16mf2_t test_vncvt_x_x_w_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i16mf2_mu(mask, maskedoff, src, vl); +vint16mf2_t test_vncvt_x_x_w_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i16mf2_mu(vm, vd, vs2, vl); } -vint16m1_t test_vncvt_x_x_w_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i16m1_mu(mask, maskedoff, src, vl); +vint16m1_t test_vncvt_x_x_w_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i16m1_mu(vm, vd, vs2, vl); } -vint16m2_t test_vncvt_x_x_w_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i16m2_mu(mask, maskedoff, src, vl); +vint16m2_t test_vncvt_x_x_w_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i16m2_mu(vm, vd, vs2, vl); } -vint16m4_t test_vncvt_x_x_w_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i16m4_mu(mask, maskedoff, src, vl); +vint16m4_t test_vncvt_x_x_w_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i16m4_mu(vm, vd, vs2, vl); } -vuint16mf4_t test_vncvt_x_x_w_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u16mf4_mu(mask, maskedoff, src, vl); +vuint16mf4_t test_vncvt_x_x_w_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u16mf4_mu(vm, vd, vs2, vl); } -vuint16mf2_t test_vncvt_x_x_w_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u16mf2_mu(mask, maskedoff, src, vl); +vuint16mf2_t test_vncvt_x_x_w_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u16mf2_mu(vm, vd, vs2, vl); } -vuint16m1_t test_vncvt_x_x_w_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u16m1_mu(mask, maskedoff, src, vl); +vuint16m1_t test_vncvt_x_x_w_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u16m1_mu(vm, vd, vs2, vl); } -vuint16m2_t test_vncvt_x_x_w_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u16m2_mu(mask, maskedoff, src, vl); +vuint16m2_t test_vncvt_x_x_w_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u16m2_mu(vm, vd, vs2, vl); } -vuint16m4_t test_vncvt_x_x_w_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u16m4_mu(mask, maskedoff, src, vl); +vuint16m4_t test_vncvt_x_x_w_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u16m4_mu(vm, vd, vs2, vl); } -vint32mf2_t test_vncvt_x_x_w_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i32mf2_mu(mask, maskedoff, src, vl); +vint32mf2_t test_vncvt_x_x_w_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint64m1_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i32mf2_mu(vm, vd, vs2, vl); } -vint32m1_t test_vncvt_x_x_w_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i32m1_mu(mask, maskedoff, src, vl); +vint32m1_t test_vncvt_x_x_w_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i32m1_mu(vm, vd, vs2, vl); } -vint32m2_t test_vncvt_x_x_w_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i32m2_mu(mask, maskedoff, src, vl); +vint32m2_t test_vncvt_x_x_w_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i32m2_mu(vm, vd, vs2, vl); } -vint32m4_t test_vncvt_x_x_w_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t src, size_t vl) { - return __riscv_vncvt_x_x_w_i32m4_mu(mask, maskedoff, src, vl); +vint32m4_t test_vncvt_x_x_w_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_i32m4_mu(vm, vd, vs2, vl); } -vuint32mf2_t test_vncvt_x_x_w_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u32mf2_mu(mask, maskedoff, src, vl); +vuint32mf2_t test_vncvt_x_x_w_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u32mf2_mu(vm, vd, vs2, vl); } -vuint32m1_t test_vncvt_x_x_w_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u32m1_mu(mask, maskedoff, src, vl); +vuint32m1_t test_vncvt_x_x_w_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u32m1_mu(vm, vd, vs2, vl); } -vuint32m2_t test_vncvt_x_x_w_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u32m2_mu(mask, maskedoff, src, vl); +vuint32m2_t test_vncvt_x_x_w_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u32m2_mu(vm, vd, vs2, vl); } -vuint32m4_t test_vncvt_x_x_w_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t src, size_t vl) { - return __riscv_vncvt_x_x_w_u32m4_mu(mask, maskedoff, src, vl); +vuint32m4_t test_vncvt_x_x_w_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vncvt_x_x_w_u32m4_mu(vm, vd, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vncvt\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vneg.c b/auto-generated/policy_funcs/gnu-api-tests/vneg.c index 344c91438..282359321 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vneg.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vneg.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vneg_v_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vneg_v_i8mf8_tu(maskedoff, op1, vl); +vint8mf8_t test_vneg_v_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs, size_t vl) { + return __riscv_vneg_v_i8mf8_tu(vd, vs, vl); } -vint8mf4_t test_vneg_v_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vneg_v_i8mf4_tu(maskedoff, op1, vl); +vint8mf4_t test_vneg_v_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs, size_t vl) { + return __riscv_vneg_v_i8mf4_tu(vd, vs, vl); } -vint8mf2_t test_vneg_v_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vneg_v_i8mf2_tu(maskedoff, op1, vl); +vint8mf2_t test_vneg_v_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs, size_t vl) { + return __riscv_vneg_v_i8mf2_tu(vd, vs, vl); } -vint8m1_t test_vneg_v_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vneg_v_i8m1_tu(maskedoff, op1, vl); +vint8m1_t test_vneg_v_i8m1_tu(vint8m1_t vd, vint8m1_t vs, size_t vl) { + return __riscv_vneg_v_i8m1_tu(vd, vs, vl); } -vint8m2_t test_vneg_v_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, size_t vl) { - return __riscv_vneg_v_i8m2_tu(maskedoff, op1, vl); +vint8m2_t test_vneg_v_i8m2_tu(vint8m2_t vd, vint8m2_t vs, size_t vl) { + return __riscv_vneg_v_i8m2_tu(vd, vs, vl); } -vint8m4_t test_vneg_v_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, size_t vl) { - return __riscv_vneg_v_i8m4_tu(maskedoff, op1, vl); +vint8m4_t test_vneg_v_i8m4_tu(vint8m4_t vd, vint8m4_t vs, size_t vl) { + return __riscv_vneg_v_i8m4_tu(vd, vs, vl); } -vint8m8_t test_vneg_v_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, size_t vl) { - return __riscv_vneg_v_i8m8_tu(maskedoff, op1, vl); +vint8m8_t test_vneg_v_i8m8_tu(vint8m8_t vd, vint8m8_t vs, size_t vl) { + return __riscv_vneg_v_i8m8_tu(vd, vs, vl); } -vint16mf4_t test_vneg_v_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl) { - return __riscv_vneg_v_i16mf4_tu(maskedoff, op1, vl); +vint16mf4_t test_vneg_v_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs, size_t vl) { + return __riscv_vneg_v_i16mf4_tu(vd, vs, vl); } -vint16mf2_t test_vneg_v_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl) { - return __riscv_vneg_v_i16mf2_tu(maskedoff, op1, vl); +vint16mf2_t test_vneg_v_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs, size_t vl) { + return __riscv_vneg_v_i16mf2_tu(vd, vs, vl); } -vint16m1_t test_vneg_v_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, size_t vl) { - return __riscv_vneg_v_i16m1_tu(maskedoff, op1, vl); +vint16m1_t test_vneg_v_i16m1_tu(vint16m1_t vd, vint16m1_t vs, size_t vl) { + return __riscv_vneg_v_i16m1_tu(vd, vs, vl); } -vint16m2_t test_vneg_v_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, size_t vl) { - return __riscv_vneg_v_i16m2_tu(maskedoff, op1, vl); +vint16m2_t test_vneg_v_i16m2_tu(vint16m2_t vd, vint16m2_t vs, size_t vl) { + return __riscv_vneg_v_i16m2_tu(vd, vs, vl); } -vint16m4_t test_vneg_v_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, size_t vl) { - return __riscv_vneg_v_i16m4_tu(maskedoff, op1, vl); +vint16m4_t test_vneg_v_i16m4_tu(vint16m4_t vd, vint16m4_t vs, size_t vl) { + return __riscv_vneg_v_i16m4_tu(vd, vs, vl); } -vint16m8_t test_vneg_v_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, size_t vl) { - return __riscv_vneg_v_i16m8_tu(maskedoff, op1, vl); +vint16m8_t test_vneg_v_i16m8_tu(vint16m8_t vd, vint16m8_t vs, size_t vl) { + return __riscv_vneg_v_i16m8_tu(vd, vs, vl); } -vint32mf2_t test_vneg_v_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl) { - return __riscv_vneg_v_i32mf2_tu(maskedoff, op1, vl); +vint32mf2_t test_vneg_v_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs, size_t vl) { + return __riscv_vneg_v_i32mf2_tu(vd, vs, vl); } -vint32m1_t test_vneg_v_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, size_t vl) { - return __riscv_vneg_v_i32m1_tu(maskedoff, op1, vl); +vint32m1_t test_vneg_v_i32m1_tu(vint32m1_t vd, vint32m1_t vs, size_t vl) { + return __riscv_vneg_v_i32m1_tu(vd, vs, vl); } -vint32m2_t test_vneg_v_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, size_t vl) { - return __riscv_vneg_v_i32m2_tu(maskedoff, op1, vl); +vint32m2_t test_vneg_v_i32m2_tu(vint32m2_t vd, vint32m2_t vs, size_t vl) { + return __riscv_vneg_v_i32m2_tu(vd, vs, vl); } -vint32m4_t test_vneg_v_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, size_t vl) { - return __riscv_vneg_v_i32m4_tu(maskedoff, op1, vl); +vint32m4_t test_vneg_v_i32m4_tu(vint32m4_t vd, vint32m4_t vs, size_t vl) { + return __riscv_vneg_v_i32m4_tu(vd, vs, vl); } -vint32m8_t test_vneg_v_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, size_t vl) { - return __riscv_vneg_v_i32m8_tu(maskedoff, op1, vl); +vint32m8_t test_vneg_v_i32m8_tu(vint32m8_t vd, vint32m8_t vs, size_t vl) { + return __riscv_vneg_v_i32m8_tu(vd, vs, vl); } -vint64m1_t test_vneg_v_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, size_t vl) { - return __riscv_vneg_v_i64m1_tu(maskedoff, op1, vl); +vint64m1_t test_vneg_v_i64m1_tu(vint64m1_t vd, vint64m1_t vs, size_t vl) { + return __riscv_vneg_v_i64m1_tu(vd, vs, vl); } -vint64m2_t test_vneg_v_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, size_t vl) { - return __riscv_vneg_v_i64m2_tu(maskedoff, op1, vl); +vint64m2_t test_vneg_v_i64m2_tu(vint64m2_t vd, vint64m2_t vs, size_t vl) { + return __riscv_vneg_v_i64m2_tu(vd, vs, vl); } -vint64m4_t test_vneg_v_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, size_t vl) { - return __riscv_vneg_v_i64m4_tu(maskedoff, op1, vl); +vint64m4_t test_vneg_v_i64m4_tu(vint64m4_t vd, vint64m4_t vs, size_t vl) { + return __riscv_vneg_v_i64m4_tu(vd, vs, vl); } -vint64m8_t test_vneg_v_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, size_t vl) { - return __riscv_vneg_v_i64m8_tu(maskedoff, op1, vl); +vint64m8_t test_vneg_v_i64m8_tu(vint64m8_t vd, vint64m8_t vs, size_t vl) { + return __riscv_vneg_v_i64m8_tu(vd, vs, vl); } -vint8mf8_t test_vneg_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vneg_v_i8mf8_tum(mask, maskedoff, op1, vl); +vint8mf8_t test_vneg_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs, size_t vl) { + return __riscv_vneg_v_i8mf8_tum(vm, vd, vs, vl); } -vint8mf4_t test_vneg_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vneg_v_i8mf4_tum(mask, maskedoff, op1, vl); +vint8mf4_t test_vneg_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs, size_t vl) { + return __riscv_vneg_v_i8mf4_tum(vm, vd, vs, vl); } -vint8mf2_t test_vneg_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vneg_v_i8mf2_tum(mask, maskedoff, op1, vl); +vint8mf2_t test_vneg_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs, size_t vl) { + return __riscv_vneg_v_i8mf2_tum(vm, vd, vs, vl); } -vint8m1_t test_vneg_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vneg_v_i8m1_tum(mask, maskedoff, op1, vl); +vint8m1_t test_vneg_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs, size_t vl) { + return __riscv_vneg_v_i8m1_tum(vm, vd, vs, vl); } -vint8m2_t test_vneg_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t vl) { - return __riscv_vneg_v_i8m2_tum(mask, maskedoff, op1, vl); +vint8m2_t test_vneg_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs, size_t vl) { + return __riscv_vneg_v_i8m2_tum(vm, vd, vs, vl); } -vint8m4_t test_vneg_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t vl) { - return __riscv_vneg_v_i8m4_tum(mask, maskedoff, op1, vl); +vint8m4_t test_vneg_v_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs, size_t vl) { + return __riscv_vneg_v_i8m4_tum(vm, vd, vs, vl); } -vint8m8_t test_vneg_v_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t vl) { - return __riscv_vneg_v_i8m8_tum(mask, maskedoff, op1, vl); +vint8m8_t test_vneg_v_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs, size_t vl) { + return __riscv_vneg_v_i8m8_tum(vm, vd, vs, vl); } -vint16mf4_t test_vneg_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl) { - return __riscv_vneg_v_i16mf4_tum(mask, maskedoff, op1, vl); +vint16mf4_t test_vneg_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs, size_t vl) { + return __riscv_vneg_v_i16mf4_tum(vm, vd, vs, vl); } -vint16mf2_t test_vneg_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl) { - return __riscv_vneg_v_i16mf2_tum(mask, maskedoff, op1, vl); +vint16mf2_t test_vneg_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs, size_t vl) { + return __riscv_vneg_v_i16mf2_tum(vm, vd, vs, vl); } -vint16m1_t test_vneg_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t vl) { - return __riscv_vneg_v_i16m1_tum(mask, maskedoff, op1, vl); +vint16m1_t test_vneg_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs, size_t vl) { + return __riscv_vneg_v_i16m1_tum(vm, vd, vs, vl); } -vint16m2_t test_vneg_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t vl) { - return __riscv_vneg_v_i16m2_tum(mask, maskedoff, op1, vl); +vint16m2_t test_vneg_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs, size_t vl) { + return __riscv_vneg_v_i16m2_tum(vm, vd, vs, vl); } -vint16m4_t test_vneg_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t vl) { - return __riscv_vneg_v_i16m4_tum(mask, maskedoff, op1, vl); +vint16m4_t test_vneg_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs, size_t vl) { + return __riscv_vneg_v_i16m4_tum(vm, vd, vs, vl); } -vint16m8_t test_vneg_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t vl) { - return __riscv_vneg_v_i16m8_tum(mask, maskedoff, op1, vl); +vint16m8_t test_vneg_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs, size_t vl) { + return __riscv_vneg_v_i16m8_tum(vm, vd, vs, vl); } -vint32mf2_t test_vneg_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl) { - return __riscv_vneg_v_i32mf2_tum(mask, maskedoff, op1, vl); +vint32mf2_t test_vneg_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs, size_t vl) { + return __riscv_vneg_v_i32mf2_tum(vm, vd, vs, vl); } -vint32m1_t test_vneg_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t vl) { - return __riscv_vneg_v_i32m1_tum(mask, maskedoff, op1, vl); +vint32m1_t test_vneg_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs, size_t vl) { + return __riscv_vneg_v_i32m1_tum(vm, vd, vs, vl); } -vint32m2_t test_vneg_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t vl) { - return __riscv_vneg_v_i32m2_tum(mask, maskedoff, op1, vl); +vint32m2_t test_vneg_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs, size_t vl) { + return __riscv_vneg_v_i32m2_tum(vm, vd, vs, vl); } -vint32m4_t test_vneg_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t vl) { - return __riscv_vneg_v_i32m4_tum(mask, maskedoff, op1, vl); +vint32m4_t test_vneg_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs, size_t vl) { + return __riscv_vneg_v_i32m4_tum(vm, vd, vs, vl); } -vint32m8_t test_vneg_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t vl) { - return __riscv_vneg_v_i32m8_tum(mask, maskedoff, op1, vl); +vint32m8_t test_vneg_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs, size_t vl) { + return __riscv_vneg_v_i32m8_tum(vm, vd, vs, vl); } -vint64m1_t test_vneg_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t vl) { - return __riscv_vneg_v_i64m1_tum(mask, maskedoff, op1, vl); +vint64m1_t test_vneg_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs, size_t vl) { + return __riscv_vneg_v_i64m1_tum(vm, vd, vs, vl); } -vint64m2_t test_vneg_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t vl) { - return __riscv_vneg_v_i64m2_tum(mask, maskedoff, op1, vl); +vint64m2_t test_vneg_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs, size_t vl) { + return __riscv_vneg_v_i64m2_tum(vm, vd, vs, vl); } -vint64m4_t test_vneg_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t vl) { - return __riscv_vneg_v_i64m4_tum(mask, maskedoff, op1, vl); +vint64m4_t test_vneg_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs, size_t vl) { + return __riscv_vneg_v_i64m4_tum(vm, vd, vs, vl); } -vint64m8_t test_vneg_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t vl) { - return __riscv_vneg_v_i64m8_tum(mask, maskedoff, op1, vl); +vint64m8_t test_vneg_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs, size_t vl) { + return __riscv_vneg_v_i64m8_tum(vm, vd, vs, vl); } -vint8mf8_t test_vneg_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vneg_v_i8mf8_tumu(mask, maskedoff, op1, vl); +vint8mf8_t test_vneg_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs, size_t vl) { + return __riscv_vneg_v_i8mf8_tumu(vm, vd, vs, vl); } -vint8mf4_t test_vneg_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vneg_v_i8mf4_tumu(mask, maskedoff, op1, vl); +vint8mf4_t test_vneg_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs, size_t vl) { + return __riscv_vneg_v_i8mf4_tumu(vm, vd, vs, vl); } -vint8mf2_t test_vneg_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vneg_v_i8mf2_tumu(mask, maskedoff, op1, vl); +vint8mf2_t test_vneg_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs, size_t vl) { + return __riscv_vneg_v_i8mf2_tumu(vm, vd, vs, vl); } -vint8m1_t test_vneg_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vneg_v_i8m1_tumu(mask, maskedoff, op1, vl); +vint8m1_t test_vneg_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs, size_t vl) { + return __riscv_vneg_v_i8m1_tumu(vm, vd, vs, vl); } -vint8m2_t test_vneg_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t vl) { - return __riscv_vneg_v_i8m2_tumu(mask, maskedoff, op1, vl); +vint8m2_t test_vneg_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs, size_t vl) { + return __riscv_vneg_v_i8m2_tumu(vm, vd, vs, vl); } -vint8m4_t test_vneg_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t vl) { - return __riscv_vneg_v_i8m4_tumu(mask, maskedoff, op1, vl); +vint8m4_t test_vneg_v_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs, size_t vl) { + return __riscv_vneg_v_i8m4_tumu(vm, vd, vs, vl); } -vint8m8_t test_vneg_v_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t vl) { - return __riscv_vneg_v_i8m8_tumu(mask, maskedoff, op1, vl); +vint8m8_t test_vneg_v_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs, size_t vl) { + return __riscv_vneg_v_i8m8_tumu(vm, vd, vs, vl); } -vint16mf4_t test_vneg_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl) { - return __riscv_vneg_v_i16mf4_tumu(mask, maskedoff, op1, vl); +vint16mf4_t test_vneg_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs, size_t vl) { + return __riscv_vneg_v_i16mf4_tumu(vm, vd, vs, vl); } -vint16mf2_t test_vneg_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl) { - return __riscv_vneg_v_i16mf2_tumu(mask, maskedoff, op1, vl); +vint16mf2_t test_vneg_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs, size_t vl) { + return __riscv_vneg_v_i16mf2_tumu(vm, vd, vs, vl); } -vint16m1_t test_vneg_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t vl) { - return __riscv_vneg_v_i16m1_tumu(mask, maskedoff, op1, vl); +vint16m1_t test_vneg_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs, size_t vl) { + return __riscv_vneg_v_i16m1_tumu(vm, vd, vs, vl); } -vint16m2_t test_vneg_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t vl) { - return __riscv_vneg_v_i16m2_tumu(mask, maskedoff, op1, vl); +vint16m2_t test_vneg_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs, size_t vl) { + return __riscv_vneg_v_i16m2_tumu(vm, vd, vs, vl); } -vint16m4_t test_vneg_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t vl) { - return __riscv_vneg_v_i16m4_tumu(mask, maskedoff, op1, vl); +vint16m4_t test_vneg_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs, size_t vl) { + return __riscv_vneg_v_i16m4_tumu(vm, vd, vs, vl); } -vint16m8_t test_vneg_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t vl) { - return __riscv_vneg_v_i16m8_tumu(mask, maskedoff, op1, vl); +vint16m8_t test_vneg_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs, size_t vl) { + return __riscv_vneg_v_i16m8_tumu(vm, vd, vs, vl); } -vint32mf2_t test_vneg_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl) { - return __riscv_vneg_v_i32mf2_tumu(mask, maskedoff, op1, vl); +vint32mf2_t test_vneg_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs, size_t vl) { + return __riscv_vneg_v_i32mf2_tumu(vm, vd, vs, vl); } -vint32m1_t test_vneg_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t vl) { - return __riscv_vneg_v_i32m1_tumu(mask, maskedoff, op1, vl); +vint32m1_t test_vneg_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs, size_t vl) { + return __riscv_vneg_v_i32m1_tumu(vm, vd, vs, vl); } -vint32m2_t test_vneg_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t vl) { - return __riscv_vneg_v_i32m2_tumu(mask, maskedoff, op1, vl); +vint32m2_t test_vneg_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs, size_t vl) { + return __riscv_vneg_v_i32m2_tumu(vm, vd, vs, vl); } -vint32m4_t test_vneg_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t vl) { - return __riscv_vneg_v_i32m4_tumu(mask, maskedoff, op1, vl); +vint32m4_t test_vneg_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs, size_t vl) { + return __riscv_vneg_v_i32m4_tumu(vm, vd, vs, vl); } -vint32m8_t test_vneg_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t vl) { - return __riscv_vneg_v_i32m8_tumu(mask, maskedoff, op1, vl); +vint32m8_t test_vneg_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs, size_t vl) { + return __riscv_vneg_v_i32m8_tumu(vm, vd, vs, vl); } -vint64m1_t test_vneg_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t vl) { - return __riscv_vneg_v_i64m1_tumu(mask, maskedoff, op1, vl); +vint64m1_t test_vneg_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs, size_t vl) { + return __riscv_vneg_v_i64m1_tumu(vm, vd, vs, vl); } -vint64m2_t test_vneg_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t vl) { - return __riscv_vneg_v_i64m2_tumu(mask, maskedoff, op1, vl); +vint64m2_t test_vneg_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs, size_t vl) { + return __riscv_vneg_v_i64m2_tumu(vm, vd, vs, vl); } -vint64m4_t test_vneg_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t vl) { - return __riscv_vneg_v_i64m4_tumu(mask, maskedoff, op1, vl); +vint64m4_t test_vneg_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs, size_t vl) { + return __riscv_vneg_v_i64m4_tumu(vm, vd, vs, vl); } -vint64m8_t test_vneg_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t vl) { - return __riscv_vneg_v_i64m8_tumu(mask, maskedoff, op1, vl); +vint64m8_t test_vneg_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs, size_t vl) { + return __riscv_vneg_v_i64m8_tumu(vm, vd, vs, vl); } -vint8mf8_t test_vneg_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vneg_v_i8mf8_mu(mask, maskedoff, op1, vl); +vint8mf8_t test_vneg_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs, size_t vl) { + return __riscv_vneg_v_i8mf8_mu(vm, vd, vs, vl); } -vint8mf4_t test_vneg_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vneg_v_i8mf4_mu(mask, maskedoff, op1, vl); +vint8mf4_t test_vneg_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs, size_t vl) { + return __riscv_vneg_v_i8mf4_mu(vm, vd, vs, vl); } -vint8mf2_t test_vneg_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vneg_v_i8mf2_mu(mask, maskedoff, op1, vl); +vint8mf2_t test_vneg_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs, size_t vl) { + return __riscv_vneg_v_i8mf2_mu(vm, vd, vs, vl); } -vint8m1_t test_vneg_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vneg_v_i8m1_mu(mask, maskedoff, op1, vl); +vint8m1_t test_vneg_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs, size_t vl) { + return __riscv_vneg_v_i8m1_mu(vm, vd, vs, vl); } -vint8m2_t test_vneg_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t vl) { - return __riscv_vneg_v_i8m2_mu(mask, maskedoff, op1, vl); +vint8m2_t test_vneg_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs, size_t vl) { + return __riscv_vneg_v_i8m2_mu(vm, vd, vs, vl); } -vint8m4_t test_vneg_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t vl) { - return __riscv_vneg_v_i8m4_mu(mask, maskedoff, op1, vl); +vint8m4_t test_vneg_v_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs, size_t vl) { + return __riscv_vneg_v_i8m4_mu(vm, vd, vs, vl); } -vint8m8_t test_vneg_v_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t vl) { - return __riscv_vneg_v_i8m8_mu(mask, maskedoff, op1, vl); +vint8m8_t test_vneg_v_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs, size_t vl) { + return __riscv_vneg_v_i8m8_mu(vm, vd, vs, vl); } -vint16mf4_t test_vneg_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl) { - return __riscv_vneg_v_i16mf4_mu(mask, maskedoff, op1, vl); +vint16mf4_t test_vneg_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs, size_t vl) { + return __riscv_vneg_v_i16mf4_mu(vm, vd, vs, vl); } -vint16mf2_t test_vneg_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl) { - return __riscv_vneg_v_i16mf2_mu(mask, maskedoff, op1, vl); +vint16mf2_t test_vneg_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs, size_t vl) { + return __riscv_vneg_v_i16mf2_mu(vm, vd, vs, vl); } -vint16m1_t test_vneg_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t vl) { - return __riscv_vneg_v_i16m1_mu(mask, maskedoff, op1, vl); +vint16m1_t test_vneg_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs, size_t vl) { + return __riscv_vneg_v_i16m1_mu(vm, vd, vs, vl); } -vint16m2_t test_vneg_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t vl) { - return __riscv_vneg_v_i16m2_mu(mask, maskedoff, op1, vl); +vint16m2_t test_vneg_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs, size_t vl) { + return __riscv_vneg_v_i16m2_mu(vm, vd, vs, vl); } -vint16m4_t test_vneg_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t vl) { - return __riscv_vneg_v_i16m4_mu(mask, maskedoff, op1, vl); +vint16m4_t test_vneg_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs, size_t vl) { + return __riscv_vneg_v_i16m4_mu(vm, vd, vs, vl); } -vint16m8_t test_vneg_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t vl) { - return __riscv_vneg_v_i16m8_mu(mask, maskedoff, op1, vl); +vint16m8_t test_vneg_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs, size_t vl) { + return __riscv_vneg_v_i16m8_mu(vm, vd, vs, vl); } -vint32mf2_t test_vneg_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl) { - return __riscv_vneg_v_i32mf2_mu(mask, maskedoff, op1, vl); +vint32mf2_t test_vneg_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs, size_t vl) { + return __riscv_vneg_v_i32mf2_mu(vm, vd, vs, vl); } -vint32m1_t test_vneg_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t vl) { - return __riscv_vneg_v_i32m1_mu(mask, maskedoff, op1, vl); +vint32m1_t test_vneg_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs, size_t vl) { + return __riscv_vneg_v_i32m1_mu(vm, vd, vs, vl); } -vint32m2_t test_vneg_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t vl) { - return __riscv_vneg_v_i32m2_mu(mask, maskedoff, op1, vl); +vint32m2_t test_vneg_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs, size_t vl) { + return __riscv_vneg_v_i32m2_mu(vm, vd, vs, vl); } -vint32m4_t test_vneg_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t vl) { - return __riscv_vneg_v_i32m4_mu(mask, maskedoff, op1, vl); +vint32m4_t test_vneg_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs, size_t vl) { + return __riscv_vneg_v_i32m4_mu(vm, vd, vs, vl); } -vint32m8_t test_vneg_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t vl) { - return __riscv_vneg_v_i32m8_mu(mask, maskedoff, op1, vl); +vint32m8_t test_vneg_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs, size_t vl) { + return __riscv_vneg_v_i32m8_mu(vm, vd, vs, vl); } -vint64m1_t test_vneg_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t vl) { - return __riscv_vneg_v_i64m1_mu(mask, maskedoff, op1, vl); +vint64m1_t test_vneg_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs, size_t vl) { + return __riscv_vneg_v_i64m1_mu(vm, vd, vs, vl); } -vint64m2_t test_vneg_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t vl) { - return __riscv_vneg_v_i64m2_mu(mask, maskedoff, op1, vl); +vint64m2_t test_vneg_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs, size_t vl) { + return __riscv_vneg_v_i64m2_mu(vm, vd, vs, vl); } -vint64m4_t test_vneg_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t vl) { - return __riscv_vneg_v_i64m4_mu(mask, maskedoff, op1, vl); +vint64m4_t test_vneg_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs, size_t vl) { + return __riscv_vneg_v_i64m4_mu(vm, vd, vs, vl); } -vint64m8_t test_vneg_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t vl) { - return __riscv_vneg_v_i64m8_mu(mask, maskedoff, op1, vl); +vint64m8_t test_vneg_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs, size_t vl) { + return __riscv_vneg_v_i64m8_mu(vm, vd, vs, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vneg\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vnmsac.c b/auto-generated/policy_funcs/gnu-api-tests/vnmsac.c index ef5846e07..fd5ebbdf1 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vnmsac.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vnmsac.c @@ -358,1060 +358,1060 @@ vuint64m8_t test_vnmsac_vx_u64m8_tu(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs return __riscv_vnmsac_vx_u64m8_tu(vd, rs1, vs2, vl); } -vint8mf8_t test_vnmsac_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i8mf8_tum(mask, vd, vs1, vs2, vl); +vint8mf8_t test_vnmsac_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i8mf8_tum(vm, vd, vs1, vs2, vl); } -vint8mf8_t test_vnmsac_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i8mf8_tum(mask, vd, rs1, vs2, vl); +vint8mf8_t test_vnmsac_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i8mf8_tum(vm, vd, rs1, vs2, vl); } -vint8mf4_t test_vnmsac_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i8mf4_tum(mask, vd, vs1, vs2, vl); +vint8mf4_t test_vnmsac_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i8mf4_tum(vm, vd, vs1, vs2, vl); } -vint8mf4_t test_vnmsac_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i8mf4_tum(mask, vd, rs1, vs2, vl); +vint8mf4_t test_vnmsac_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i8mf4_tum(vm, vd, rs1, vs2, vl); } -vint8mf2_t test_vnmsac_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i8mf2_tum(mask, vd, vs1, vs2, vl); +vint8mf2_t test_vnmsac_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i8mf2_tum(vm, vd, vs1, vs2, vl); } -vint8mf2_t test_vnmsac_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i8mf2_tum(mask, vd, rs1, vs2, vl); +vint8mf2_t test_vnmsac_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i8mf2_tum(vm, vd, rs1, vs2, vl); } -vint8m1_t test_vnmsac_vv_i8m1_tum(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i8m1_tum(mask, vd, vs1, vs2, vl); +vint8m1_t test_vnmsac_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i8m1_tum(vm, vd, vs1, vs2, vl); } -vint8m1_t test_vnmsac_vx_i8m1_tum(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i8m1_tum(mask, vd, rs1, vs2, vl); +vint8m1_t test_vnmsac_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i8m1_tum(vm, vd, rs1, vs2, vl); } -vint8m2_t test_vnmsac_vv_i8m2_tum(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i8m2_tum(mask, vd, vs1, vs2, vl); +vint8m2_t test_vnmsac_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i8m2_tum(vm, vd, vs1, vs2, vl); } -vint8m2_t test_vnmsac_vx_i8m2_tum(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i8m2_tum(mask, vd, rs1, vs2, vl); +vint8m2_t test_vnmsac_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i8m2_tum(vm, vd, rs1, vs2, vl); } -vint8m4_t test_vnmsac_vv_i8m4_tum(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i8m4_tum(mask, vd, vs1, vs2, vl); +vint8m4_t test_vnmsac_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i8m4_tum(vm, vd, vs1, vs2, vl); } -vint8m4_t test_vnmsac_vx_i8m4_tum(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i8m4_tum(mask, vd, rs1, vs2, vl); +vint8m4_t test_vnmsac_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i8m4_tum(vm, vd, rs1, vs2, vl); } -vint8m8_t test_vnmsac_vv_i8m8_tum(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i8m8_tum(mask, vd, vs1, vs2, vl); +vint8m8_t test_vnmsac_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i8m8_tum(vm, vd, vs1, vs2, vl); } -vint8m8_t test_vnmsac_vx_i8m8_tum(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i8m8_tum(mask, vd, rs1, vs2, vl); +vint8m8_t test_vnmsac_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i8m8_tum(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vnmsac_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i16mf4_tum(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vnmsac_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i16mf4_tum(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vnmsac_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i16mf4_tum(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vnmsac_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i16mf4_tum(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vnmsac_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i16mf2_tum(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vnmsac_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i16mf2_tum(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vnmsac_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i16mf2_tum(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vnmsac_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i16mf2_tum(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vnmsac_vv_i16m1_tum(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i16m1_tum(mask, vd, vs1, vs2, vl); +vint16m1_t test_vnmsac_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i16m1_tum(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vnmsac_vx_i16m1_tum(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i16m1_tum(mask, vd, rs1, vs2, vl); +vint16m1_t test_vnmsac_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i16m1_tum(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vnmsac_vv_i16m2_tum(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i16m2_tum(mask, vd, vs1, vs2, vl); +vint16m2_t test_vnmsac_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i16m2_tum(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vnmsac_vx_i16m2_tum(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i16m2_tum(mask, vd, rs1, vs2, vl); +vint16m2_t test_vnmsac_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i16m2_tum(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vnmsac_vv_i16m4_tum(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i16m4_tum(mask, vd, vs1, vs2, vl); +vint16m4_t test_vnmsac_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i16m4_tum(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vnmsac_vx_i16m4_tum(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i16m4_tum(mask, vd, rs1, vs2, vl); +vint16m4_t test_vnmsac_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i16m4_tum(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vnmsac_vv_i16m8_tum(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i16m8_tum(mask, vd, vs1, vs2, vl); +vint16m8_t test_vnmsac_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i16m8_tum(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vnmsac_vx_i16m8_tum(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i16m8_tum(mask, vd, rs1, vs2, vl); +vint16m8_t test_vnmsac_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i16m8_tum(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vnmsac_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i32mf2_tum(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vnmsac_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i32mf2_tum(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vnmsac_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i32mf2_tum(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vnmsac_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i32mf2_tum(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vnmsac_vv_i32m1_tum(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i32m1_tum(mask, vd, vs1, vs2, vl); +vint32m1_t test_vnmsac_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i32m1_tum(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vnmsac_vx_i32m1_tum(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i32m1_tum(mask, vd, rs1, vs2, vl); +vint32m1_t test_vnmsac_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i32m1_tum(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vnmsac_vv_i32m2_tum(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i32m2_tum(mask, vd, vs1, vs2, vl); +vint32m2_t test_vnmsac_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i32m2_tum(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vnmsac_vx_i32m2_tum(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i32m2_tum(mask, vd, rs1, vs2, vl); +vint32m2_t test_vnmsac_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i32m2_tum(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vnmsac_vv_i32m4_tum(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i32m4_tum(mask, vd, vs1, vs2, vl); +vint32m4_t test_vnmsac_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i32m4_tum(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vnmsac_vx_i32m4_tum(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i32m4_tum(mask, vd, rs1, vs2, vl); +vint32m4_t test_vnmsac_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i32m4_tum(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vnmsac_vv_i32m8_tum(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i32m8_tum(mask, vd, vs1, vs2, vl); +vint32m8_t test_vnmsac_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i32m8_tum(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vnmsac_vx_i32m8_tum(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i32m8_tum(mask, vd, rs1, vs2, vl); +vint32m8_t test_vnmsac_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i32m8_tum(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vnmsac_vv_i64m1_tum(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i64m1_tum(mask, vd, vs1, vs2, vl); +vint64m1_t test_vnmsac_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i64m1_tum(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vnmsac_vx_i64m1_tum(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i64m1_tum(mask, vd, rs1, vs2, vl); +vint64m1_t test_vnmsac_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i64m1_tum(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vnmsac_vv_i64m2_tum(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i64m2_tum(mask, vd, vs1, vs2, vl); +vint64m2_t test_vnmsac_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i64m2_tum(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vnmsac_vx_i64m2_tum(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i64m2_tum(mask, vd, rs1, vs2, vl); +vint64m2_t test_vnmsac_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i64m2_tum(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vnmsac_vv_i64m4_tum(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i64m4_tum(mask, vd, vs1, vs2, vl); +vint64m4_t test_vnmsac_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i64m4_tum(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vnmsac_vx_i64m4_tum(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i64m4_tum(mask, vd, rs1, vs2, vl); +vint64m4_t test_vnmsac_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i64m4_tum(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vnmsac_vv_i64m8_tum(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i64m8_tum(mask, vd, vs1, vs2, vl); +vint64m8_t test_vnmsac_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i64m8_tum(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vnmsac_vx_i64m8_tum(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i64m8_tum(mask, vd, rs1, vs2, vl); +vint64m8_t test_vnmsac_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i64m8_tum(vm, vd, rs1, vs2, vl); } -vuint8mf8_t test_vnmsac_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u8mf8_tum(mask, vd, vs1, vs2, vl); +vuint8mf8_t test_vnmsac_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u8mf8_tum(vm, vd, vs1, vs2, vl); } -vuint8mf8_t test_vnmsac_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u8mf8_tum(mask, vd, rs1, vs2, vl); +vuint8mf8_t test_vnmsac_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u8mf8_tum(vm, vd, rs1, vs2, vl); } -vuint8mf4_t test_vnmsac_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u8mf4_tum(mask, vd, vs1, vs2, vl); +vuint8mf4_t test_vnmsac_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u8mf4_tum(vm, vd, vs1, vs2, vl); } -vuint8mf4_t test_vnmsac_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u8mf4_tum(mask, vd, rs1, vs2, vl); +vuint8mf4_t test_vnmsac_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u8mf4_tum(vm, vd, rs1, vs2, vl); } -vuint8mf2_t test_vnmsac_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u8mf2_tum(mask, vd, vs1, vs2, vl); +vuint8mf2_t test_vnmsac_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u8mf2_tum(vm, vd, vs1, vs2, vl); } -vuint8mf2_t test_vnmsac_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u8mf2_tum(mask, vd, rs1, vs2, vl); +vuint8mf2_t test_vnmsac_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u8mf2_tum(vm, vd, rs1, vs2, vl); } -vuint8m1_t test_vnmsac_vv_u8m1_tum(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u8m1_tum(mask, vd, vs1, vs2, vl); +vuint8m1_t test_vnmsac_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u8m1_tum(vm, vd, vs1, vs2, vl); } -vuint8m1_t test_vnmsac_vx_u8m1_tum(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u8m1_tum(mask, vd, rs1, vs2, vl); +vuint8m1_t test_vnmsac_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u8m1_tum(vm, vd, rs1, vs2, vl); } -vuint8m2_t test_vnmsac_vv_u8m2_tum(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u8m2_tum(mask, vd, vs1, vs2, vl); +vuint8m2_t test_vnmsac_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u8m2_tum(vm, vd, vs1, vs2, vl); } -vuint8m2_t test_vnmsac_vx_u8m2_tum(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u8m2_tum(mask, vd, rs1, vs2, vl); +vuint8m2_t test_vnmsac_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u8m2_tum(vm, vd, rs1, vs2, vl); } -vuint8m4_t test_vnmsac_vv_u8m4_tum(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u8m4_tum(mask, vd, vs1, vs2, vl); +vuint8m4_t test_vnmsac_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u8m4_tum(vm, vd, vs1, vs2, vl); } -vuint8m4_t test_vnmsac_vx_u8m4_tum(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u8m4_tum(mask, vd, rs1, vs2, vl); +vuint8m4_t test_vnmsac_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u8m4_tum(vm, vd, rs1, vs2, vl); } -vuint8m8_t test_vnmsac_vv_u8m8_tum(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u8m8_tum(mask, vd, vs1, vs2, vl); +vuint8m8_t test_vnmsac_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u8m8_tum(vm, vd, vs1, vs2, vl); } -vuint8m8_t test_vnmsac_vx_u8m8_tum(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u8m8_tum(mask, vd, rs1, vs2, vl); +vuint8m8_t test_vnmsac_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u8m8_tum(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vnmsac_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u16mf4_tum(mask, vd, vs1, vs2, vl); +vuint16mf4_t test_vnmsac_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u16mf4_tum(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vnmsac_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u16mf4_tum(mask, vd, rs1, vs2, vl); +vuint16mf4_t test_vnmsac_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u16mf4_tum(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vnmsac_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u16mf2_tum(mask, vd, vs1, vs2, vl); +vuint16mf2_t test_vnmsac_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u16mf2_tum(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vnmsac_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u16mf2_tum(mask, vd, rs1, vs2, vl); +vuint16mf2_t test_vnmsac_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u16mf2_tum(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vnmsac_vv_u16m1_tum(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u16m1_tum(mask, vd, vs1, vs2, vl); +vuint16m1_t test_vnmsac_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u16m1_tum(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vnmsac_vx_u16m1_tum(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u16m1_tum(mask, vd, rs1, vs2, vl); +vuint16m1_t test_vnmsac_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u16m1_tum(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vnmsac_vv_u16m2_tum(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u16m2_tum(mask, vd, vs1, vs2, vl); +vuint16m2_t test_vnmsac_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u16m2_tum(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vnmsac_vx_u16m2_tum(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u16m2_tum(mask, vd, rs1, vs2, vl); +vuint16m2_t test_vnmsac_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u16m2_tum(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vnmsac_vv_u16m4_tum(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u16m4_tum(mask, vd, vs1, vs2, vl); +vuint16m4_t test_vnmsac_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u16m4_tum(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vnmsac_vx_u16m4_tum(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u16m4_tum(mask, vd, rs1, vs2, vl); +vuint16m4_t test_vnmsac_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u16m4_tum(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vnmsac_vv_u16m8_tum(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u16m8_tum(mask, vd, vs1, vs2, vl); +vuint16m8_t test_vnmsac_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u16m8_tum(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vnmsac_vx_u16m8_tum(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u16m8_tum(mask, vd, rs1, vs2, vl); +vuint16m8_t test_vnmsac_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u16m8_tum(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vnmsac_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u32mf2_tum(mask, vd, vs1, vs2, vl); +vuint32mf2_t test_vnmsac_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u32mf2_tum(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vnmsac_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u32mf2_tum(mask, vd, rs1, vs2, vl); +vuint32mf2_t test_vnmsac_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u32mf2_tum(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vnmsac_vv_u32m1_tum(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u32m1_tum(mask, vd, vs1, vs2, vl); +vuint32m1_t test_vnmsac_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u32m1_tum(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vnmsac_vx_u32m1_tum(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u32m1_tum(mask, vd, rs1, vs2, vl); +vuint32m1_t test_vnmsac_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u32m1_tum(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vnmsac_vv_u32m2_tum(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u32m2_tum(mask, vd, vs1, vs2, vl); +vuint32m2_t test_vnmsac_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u32m2_tum(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vnmsac_vx_u32m2_tum(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u32m2_tum(mask, vd, rs1, vs2, vl); +vuint32m2_t test_vnmsac_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u32m2_tum(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vnmsac_vv_u32m4_tum(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u32m4_tum(mask, vd, vs1, vs2, vl); +vuint32m4_t test_vnmsac_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u32m4_tum(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vnmsac_vx_u32m4_tum(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u32m4_tum(mask, vd, rs1, vs2, vl); +vuint32m4_t test_vnmsac_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u32m4_tum(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vnmsac_vv_u32m8_tum(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u32m8_tum(mask, vd, vs1, vs2, vl); +vuint32m8_t test_vnmsac_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u32m8_tum(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vnmsac_vx_u32m8_tum(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u32m8_tum(mask, vd, rs1, vs2, vl); +vuint32m8_t test_vnmsac_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u32m8_tum(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vnmsac_vv_u64m1_tum(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u64m1_tum(mask, vd, vs1, vs2, vl); +vuint64m1_t test_vnmsac_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u64m1_tum(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vnmsac_vx_u64m1_tum(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u64m1_tum(mask, vd, rs1, vs2, vl); +vuint64m1_t test_vnmsac_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u64m1_tum(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vnmsac_vv_u64m2_tum(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u64m2_tum(mask, vd, vs1, vs2, vl); +vuint64m2_t test_vnmsac_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u64m2_tum(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vnmsac_vx_u64m2_tum(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u64m2_tum(mask, vd, rs1, vs2, vl); +vuint64m2_t test_vnmsac_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u64m2_tum(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vnmsac_vv_u64m4_tum(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u64m4_tum(mask, vd, vs1, vs2, vl); +vuint64m4_t test_vnmsac_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u64m4_tum(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vnmsac_vx_u64m4_tum(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u64m4_tum(mask, vd, rs1, vs2, vl); +vuint64m4_t test_vnmsac_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u64m4_tum(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vnmsac_vv_u64m8_tum(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u64m8_tum(mask, vd, vs1, vs2, vl); +vuint64m8_t test_vnmsac_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u64m8_tum(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vnmsac_vx_u64m8_tum(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u64m8_tum(mask, vd, rs1, vs2, vl); +vuint64m8_t test_vnmsac_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u64m8_tum(vm, vd, rs1, vs2, vl); } -vint8mf8_t test_vnmsac_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i8mf8_tumu(mask, vd, vs1, vs2, vl); +vint8mf8_t test_vnmsac_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i8mf8_tumu(vm, vd, vs1, vs2, vl); } -vint8mf8_t test_vnmsac_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i8mf8_tumu(mask, vd, rs1, vs2, vl); +vint8mf8_t test_vnmsac_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i8mf8_tumu(vm, vd, rs1, vs2, vl); } -vint8mf4_t test_vnmsac_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i8mf4_tumu(mask, vd, vs1, vs2, vl); +vint8mf4_t test_vnmsac_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i8mf4_tumu(vm, vd, vs1, vs2, vl); } -vint8mf4_t test_vnmsac_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i8mf4_tumu(mask, vd, rs1, vs2, vl); +vint8mf4_t test_vnmsac_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i8mf4_tumu(vm, vd, rs1, vs2, vl); } -vint8mf2_t test_vnmsac_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i8mf2_tumu(mask, vd, vs1, vs2, vl); +vint8mf2_t test_vnmsac_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i8mf2_tumu(vm, vd, vs1, vs2, vl); } -vint8mf2_t test_vnmsac_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i8mf2_tumu(mask, vd, rs1, vs2, vl); +vint8mf2_t test_vnmsac_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i8mf2_tumu(vm, vd, rs1, vs2, vl); } -vint8m1_t test_vnmsac_vv_i8m1_tumu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i8m1_tumu(mask, vd, vs1, vs2, vl); +vint8m1_t test_vnmsac_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i8m1_tumu(vm, vd, vs1, vs2, vl); } -vint8m1_t test_vnmsac_vx_i8m1_tumu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i8m1_tumu(mask, vd, rs1, vs2, vl); +vint8m1_t test_vnmsac_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i8m1_tumu(vm, vd, rs1, vs2, vl); } -vint8m2_t test_vnmsac_vv_i8m2_tumu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i8m2_tumu(mask, vd, vs1, vs2, vl); +vint8m2_t test_vnmsac_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i8m2_tumu(vm, vd, vs1, vs2, vl); } -vint8m2_t test_vnmsac_vx_i8m2_tumu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i8m2_tumu(mask, vd, rs1, vs2, vl); +vint8m2_t test_vnmsac_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i8m2_tumu(vm, vd, rs1, vs2, vl); } -vint8m4_t test_vnmsac_vv_i8m4_tumu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i8m4_tumu(mask, vd, vs1, vs2, vl); +vint8m4_t test_vnmsac_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i8m4_tumu(vm, vd, vs1, vs2, vl); } -vint8m4_t test_vnmsac_vx_i8m4_tumu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i8m4_tumu(mask, vd, rs1, vs2, vl); +vint8m4_t test_vnmsac_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i8m4_tumu(vm, vd, rs1, vs2, vl); } -vint8m8_t test_vnmsac_vv_i8m8_tumu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i8m8_tumu(mask, vd, vs1, vs2, vl); +vint8m8_t test_vnmsac_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i8m8_tumu(vm, vd, vs1, vs2, vl); } -vint8m8_t test_vnmsac_vx_i8m8_tumu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i8m8_tumu(mask, vd, rs1, vs2, vl); +vint8m8_t test_vnmsac_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i8m8_tumu(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vnmsac_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i16mf4_tumu(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vnmsac_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i16mf4_tumu(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vnmsac_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i16mf4_tumu(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vnmsac_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i16mf4_tumu(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vnmsac_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i16mf2_tumu(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vnmsac_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i16mf2_tumu(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vnmsac_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i16mf2_tumu(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vnmsac_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i16mf2_tumu(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vnmsac_vv_i16m1_tumu(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i16m1_tumu(mask, vd, vs1, vs2, vl); +vint16m1_t test_vnmsac_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i16m1_tumu(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vnmsac_vx_i16m1_tumu(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i16m1_tumu(mask, vd, rs1, vs2, vl); +vint16m1_t test_vnmsac_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i16m1_tumu(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vnmsac_vv_i16m2_tumu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i16m2_tumu(mask, vd, vs1, vs2, vl); +vint16m2_t test_vnmsac_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i16m2_tumu(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vnmsac_vx_i16m2_tumu(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i16m2_tumu(mask, vd, rs1, vs2, vl); +vint16m2_t test_vnmsac_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i16m2_tumu(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vnmsac_vv_i16m4_tumu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i16m4_tumu(mask, vd, vs1, vs2, vl); +vint16m4_t test_vnmsac_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i16m4_tumu(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vnmsac_vx_i16m4_tumu(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i16m4_tumu(mask, vd, rs1, vs2, vl); +vint16m4_t test_vnmsac_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i16m4_tumu(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vnmsac_vv_i16m8_tumu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i16m8_tumu(mask, vd, vs1, vs2, vl); +vint16m8_t test_vnmsac_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i16m8_tumu(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vnmsac_vx_i16m8_tumu(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i16m8_tumu(mask, vd, rs1, vs2, vl); +vint16m8_t test_vnmsac_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i16m8_tumu(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vnmsac_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i32mf2_tumu(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vnmsac_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i32mf2_tumu(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vnmsac_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i32mf2_tumu(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vnmsac_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i32mf2_tumu(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vnmsac_vv_i32m1_tumu(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i32m1_tumu(mask, vd, vs1, vs2, vl); +vint32m1_t test_vnmsac_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i32m1_tumu(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vnmsac_vx_i32m1_tumu(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i32m1_tumu(mask, vd, rs1, vs2, vl); +vint32m1_t test_vnmsac_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i32m1_tumu(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vnmsac_vv_i32m2_tumu(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i32m2_tumu(mask, vd, vs1, vs2, vl); +vint32m2_t test_vnmsac_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i32m2_tumu(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vnmsac_vx_i32m2_tumu(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i32m2_tumu(mask, vd, rs1, vs2, vl); +vint32m2_t test_vnmsac_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i32m2_tumu(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vnmsac_vv_i32m4_tumu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i32m4_tumu(mask, vd, vs1, vs2, vl); +vint32m4_t test_vnmsac_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i32m4_tumu(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vnmsac_vx_i32m4_tumu(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i32m4_tumu(mask, vd, rs1, vs2, vl); +vint32m4_t test_vnmsac_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i32m4_tumu(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vnmsac_vv_i32m8_tumu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i32m8_tumu(mask, vd, vs1, vs2, vl); +vint32m8_t test_vnmsac_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i32m8_tumu(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vnmsac_vx_i32m8_tumu(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i32m8_tumu(mask, vd, rs1, vs2, vl); +vint32m8_t test_vnmsac_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i32m8_tumu(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vnmsac_vv_i64m1_tumu(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i64m1_tumu(mask, vd, vs1, vs2, vl); +vint64m1_t test_vnmsac_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i64m1_tumu(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vnmsac_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i64m1_tumu(mask, vd, rs1, vs2, vl); +vint64m1_t test_vnmsac_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i64m1_tumu(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vnmsac_vv_i64m2_tumu(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i64m2_tumu(mask, vd, vs1, vs2, vl); +vint64m2_t test_vnmsac_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i64m2_tumu(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vnmsac_vx_i64m2_tumu(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i64m2_tumu(mask, vd, rs1, vs2, vl); +vint64m2_t test_vnmsac_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i64m2_tumu(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vnmsac_vv_i64m4_tumu(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i64m4_tumu(mask, vd, vs1, vs2, vl); +vint64m4_t test_vnmsac_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i64m4_tumu(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vnmsac_vx_i64m4_tumu(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i64m4_tumu(mask, vd, rs1, vs2, vl); +vint64m4_t test_vnmsac_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i64m4_tumu(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vnmsac_vv_i64m8_tumu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i64m8_tumu(mask, vd, vs1, vs2, vl); +vint64m8_t test_vnmsac_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i64m8_tumu(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vnmsac_vx_i64m8_tumu(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i64m8_tumu(mask, vd, rs1, vs2, vl); +vint64m8_t test_vnmsac_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i64m8_tumu(vm, vd, rs1, vs2, vl); } -vuint8mf8_t test_vnmsac_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u8mf8_tumu(mask, vd, vs1, vs2, vl); +vuint8mf8_t test_vnmsac_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u8mf8_tumu(vm, vd, vs1, vs2, vl); } -vuint8mf8_t test_vnmsac_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u8mf8_tumu(mask, vd, rs1, vs2, vl); +vuint8mf8_t test_vnmsac_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u8mf8_tumu(vm, vd, rs1, vs2, vl); } -vuint8mf4_t test_vnmsac_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u8mf4_tumu(mask, vd, vs1, vs2, vl); +vuint8mf4_t test_vnmsac_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u8mf4_tumu(vm, vd, vs1, vs2, vl); } -vuint8mf4_t test_vnmsac_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u8mf4_tumu(mask, vd, rs1, vs2, vl); +vuint8mf4_t test_vnmsac_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u8mf4_tumu(vm, vd, rs1, vs2, vl); } -vuint8mf2_t test_vnmsac_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u8mf2_tumu(mask, vd, vs1, vs2, vl); +vuint8mf2_t test_vnmsac_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u8mf2_tumu(vm, vd, vs1, vs2, vl); } -vuint8mf2_t test_vnmsac_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u8mf2_tumu(mask, vd, rs1, vs2, vl); +vuint8mf2_t test_vnmsac_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u8mf2_tumu(vm, vd, rs1, vs2, vl); } -vuint8m1_t test_vnmsac_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u8m1_tumu(mask, vd, vs1, vs2, vl); +vuint8m1_t test_vnmsac_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u8m1_tumu(vm, vd, vs1, vs2, vl); } -vuint8m1_t test_vnmsac_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u8m1_tumu(mask, vd, rs1, vs2, vl); +vuint8m1_t test_vnmsac_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u8m1_tumu(vm, vd, rs1, vs2, vl); } -vuint8m2_t test_vnmsac_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u8m2_tumu(mask, vd, vs1, vs2, vl); +vuint8m2_t test_vnmsac_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u8m2_tumu(vm, vd, vs1, vs2, vl); } -vuint8m2_t test_vnmsac_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u8m2_tumu(mask, vd, rs1, vs2, vl); +vuint8m2_t test_vnmsac_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u8m2_tumu(vm, vd, rs1, vs2, vl); } -vuint8m4_t test_vnmsac_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u8m4_tumu(mask, vd, vs1, vs2, vl); +vuint8m4_t test_vnmsac_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u8m4_tumu(vm, vd, vs1, vs2, vl); } -vuint8m4_t test_vnmsac_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u8m4_tumu(mask, vd, rs1, vs2, vl); +vuint8m4_t test_vnmsac_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u8m4_tumu(vm, vd, rs1, vs2, vl); } -vuint8m8_t test_vnmsac_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u8m8_tumu(mask, vd, vs1, vs2, vl); +vuint8m8_t test_vnmsac_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u8m8_tumu(vm, vd, vs1, vs2, vl); } -vuint8m8_t test_vnmsac_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u8m8_tumu(mask, vd, rs1, vs2, vl); +vuint8m8_t test_vnmsac_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u8m8_tumu(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vnmsac_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u16mf4_tumu(mask, vd, vs1, vs2, vl); +vuint16mf4_t test_vnmsac_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u16mf4_tumu(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vnmsac_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u16mf4_tumu(mask, vd, rs1, vs2, vl); +vuint16mf4_t test_vnmsac_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u16mf4_tumu(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vnmsac_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u16mf2_tumu(mask, vd, vs1, vs2, vl); +vuint16mf2_t test_vnmsac_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u16mf2_tumu(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vnmsac_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u16mf2_tumu(mask, vd, rs1, vs2, vl); +vuint16mf2_t test_vnmsac_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u16mf2_tumu(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vnmsac_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u16m1_tumu(mask, vd, vs1, vs2, vl); +vuint16m1_t test_vnmsac_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u16m1_tumu(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vnmsac_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u16m1_tumu(mask, vd, rs1, vs2, vl); +vuint16m1_t test_vnmsac_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u16m1_tumu(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vnmsac_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u16m2_tumu(mask, vd, vs1, vs2, vl); +vuint16m2_t test_vnmsac_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u16m2_tumu(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vnmsac_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u16m2_tumu(mask, vd, rs1, vs2, vl); +vuint16m2_t test_vnmsac_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u16m2_tumu(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vnmsac_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u16m4_tumu(mask, vd, vs1, vs2, vl); +vuint16m4_t test_vnmsac_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u16m4_tumu(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vnmsac_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u16m4_tumu(mask, vd, rs1, vs2, vl); +vuint16m4_t test_vnmsac_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u16m4_tumu(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vnmsac_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u16m8_tumu(mask, vd, vs1, vs2, vl); +vuint16m8_t test_vnmsac_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u16m8_tumu(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vnmsac_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u16m8_tumu(mask, vd, rs1, vs2, vl); +vuint16m8_t test_vnmsac_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u16m8_tumu(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vnmsac_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u32mf2_tumu(mask, vd, vs1, vs2, vl); +vuint32mf2_t test_vnmsac_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u32mf2_tumu(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vnmsac_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u32mf2_tumu(mask, vd, rs1, vs2, vl); +vuint32mf2_t test_vnmsac_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u32mf2_tumu(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vnmsac_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u32m1_tumu(mask, vd, vs1, vs2, vl); +vuint32m1_t test_vnmsac_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u32m1_tumu(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vnmsac_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u32m1_tumu(mask, vd, rs1, vs2, vl); +vuint32m1_t test_vnmsac_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u32m1_tumu(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vnmsac_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u32m2_tumu(mask, vd, vs1, vs2, vl); +vuint32m2_t test_vnmsac_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u32m2_tumu(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vnmsac_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u32m2_tumu(mask, vd, rs1, vs2, vl); +vuint32m2_t test_vnmsac_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u32m2_tumu(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vnmsac_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u32m4_tumu(mask, vd, vs1, vs2, vl); +vuint32m4_t test_vnmsac_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u32m4_tumu(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vnmsac_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u32m4_tumu(mask, vd, rs1, vs2, vl); +vuint32m4_t test_vnmsac_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u32m4_tumu(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vnmsac_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u32m8_tumu(mask, vd, vs1, vs2, vl); +vuint32m8_t test_vnmsac_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u32m8_tumu(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vnmsac_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u32m8_tumu(mask, vd, rs1, vs2, vl); +vuint32m8_t test_vnmsac_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u32m8_tumu(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vnmsac_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u64m1_tumu(mask, vd, vs1, vs2, vl); +vuint64m1_t test_vnmsac_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u64m1_tumu(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vnmsac_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u64m1_tumu(mask, vd, rs1, vs2, vl); +vuint64m1_t test_vnmsac_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u64m1_tumu(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vnmsac_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u64m2_tumu(mask, vd, vs1, vs2, vl); +vuint64m2_t test_vnmsac_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u64m2_tumu(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vnmsac_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u64m2_tumu(mask, vd, rs1, vs2, vl); +vuint64m2_t test_vnmsac_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u64m2_tumu(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vnmsac_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u64m4_tumu(mask, vd, vs1, vs2, vl); +vuint64m4_t test_vnmsac_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u64m4_tumu(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vnmsac_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u64m4_tumu(mask, vd, rs1, vs2, vl); +vuint64m4_t test_vnmsac_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u64m4_tumu(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vnmsac_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u64m8_tumu(mask, vd, vs1, vs2, vl); +vuint64m8_t test_vnmsac_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u64m8_tumu(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vnmsac_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u64m8_tumu(mask, vd, rs1, vs2, vl); +vuint64m8_t test_vnmsac_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u64m8_tumu(vm, vd, rs1, vs2, vl); } -vint8mf8_t test_vnmsac_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i8mf8_mu(mask, vd, vs1, vs2, vl); +vint8mf8_t test_vnmsac_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i8mf8_mu(vm, vd, vs1, vs2, vl); } -vint8mf8_t test_vnmsac_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i8mf8_mu(mask, vd, rs1, vs2, vl); +vint8mf8_t test_vnmsac_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i8mf8_mu(vm, vd, rs1, vs2, vl); } -vint8mf4_t test_vnmsac_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i8mf4_mu(mask, vd, vs1, vs2, vl); +vint8mf4_t test_vnmsac_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i8mf4_mu(vm, vd, vs1, vs2, vl); } -vint8mf4_t test_vnmsac_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i8mf4_mu(mask, vd, rs1, vs2, vl); +vint8mf4_t test_vnmsac_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i8mf4_mu(vm, vd, rs1, vs2, vl); } -vint8mf2_t test_vnmsac_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i8mf2_mu(mask, vd, vs1, vs2, vl); +vint8mf2_t test_vnmsac_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i8mf2_mu(vm, vd, vs1, vs2, vl); } -vint8mf2_t test_vnmsac_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i8mf2_mu(mask, vd, rs1, vs2, vl); +vint8mf2_t test_vnmsac_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i8mf2_mu(vm, vd, rs1, vs2, vl); } -vint8m1_t test_vnmsac_vv_i8m1_mu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i8m1_mu(mask, vd, vs1, vs2, vl); +vint8m1_t test_vnmsac_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i8m1_mu(vm, vd, vs1, vs2, vl); } -vint8m1_t test_vnmsac_vx_i8m1_mu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i8m1_mu(mask, vd, rs1, vs2, vl); +vint8m1_t test_vnmsac_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i8m1_mu(vm, vd, rs1, vs2, vl); } -vint8m2_t test_vnmsac_vv_i8m2_mu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i8m2_mu(mask, vd, vs1, vs2, vl); +vint8m2_t test_vnmsac_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i8m2_mu(vm, vd, vs1, vs2, vl); } -vint8m2_t test_vnmsac_vx_i8m2_mu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i8m2_mu(mask, vd, rs1, vs2, vl); +vint8m2_t test_vnmsac_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i8m2_mu(vm, vd, rs1, vs2, vl); } -vint8m4_t test_vnmsac_vv_i8m4_mu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i8m4_mu(mask, vd, vs1, vs2, vl); +vint8m4_t test_vnmsac_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i8m4_mu(vm, vd, vs1, vs2, vl); } -vint8m4_t test_vnmsac_vx_i8m4_mu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i8m4_mu(mask, vd, rs1, vs2, vl); +vint8m4_t test_vnmsac_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i8m4_mu(vm, vd, rs1, vs2, vl); } -vint8m8_t test_vnmsac_vv_i8m8_mu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i8m8_mu(mask, vd, vs1, vs2, vl); +vint8m8_t test_vnmsac_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i8m8_mu(vm, vd, vs1, vs2, vl); } -vint8m8_t test_vnmsac_vx_i8m8_mu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i8m8_mu(mask, vd, rs1, vs2, vl); +vint8m8_t test_vnmsac_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i8m8_mu(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vnmsac_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i16mf4_mu(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vnmsac_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i16mf4_mu(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vnmsac_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i16mf4_mu(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vnmsac_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i16mf4_mu(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vnmsac_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i16mf2_mu(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vnmsac_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i16mf2_mu(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vnmsac_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i16mf2_mu(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vnmsac_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i16mf2_mu(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vnmsac_vv_i16m1_mu(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i16m1_mu(mask, vd, vs1, vs2, vl); +vint16m1_t test_vnmsac_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i16m1_mu(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vnmsac_vx_i16m1_mu(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i16m1_mu(mask, vd, rs1, vs2, vl); +vint16m1_t test_vnmsac_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i16m1_mu(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vnmsac_vv_i16m2_mu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i16m2_mu(mask, vd, vs1, vs2, vl); +vint16m2_t test_vnmsac_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i16m2_mu(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vnmsac_vx_i16m2_mu(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i16m2_mu(mask, vd, rs1, vs2, vl); +vint16m2_t test_vnmsac_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i16m2_mu(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vnmsac_vv_i16m4_mu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i16m4_mu(mask, vd, vs1, vs2, vl); +vint16m4_t test_vnmsac_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i16m4_mu(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vnmsac_vx_i16m4_mu(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i16m4_mu(mask, vd, rs1, vs2, vl); +vint16m4_t test_vnmsac_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i16m4_mu(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vnmsac_vv_i16m8_mu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i16m8_mu(mask, vd, vs1, vs2, vl); +vint16m8_t test_vnmsac_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i16m8_mu(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vnmsac_vx_i16m8_mu(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i16m8_mu(mask, vd, rs1, vs2, vl); +vint16m8_t test_vnmsac_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i16m8_mu(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vnmsac_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i32mf2_mu(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vnmsac_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i32mf2_mu(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vnmsac_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i32mf2_mu(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vnmsac_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i32mf2_mu(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vnmsac_vv_i32m1_mu(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i32m1_mu(mask, vd, vs1, vs2, vl); +vint32m1_t test_vnmsac_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i32m1_mu(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vnmsac_vx_i32m1_mu(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i32m1_mu(mask, vd, rs1, vs2, vl); +vint32m1_t test_vnmsac_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i32m1_mu(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vnmsac_vv_i32m2_mu(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i32m2_mu(mask, vd, vs1, vs2, vl); +vint32m2_t test_vnmsac_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i32m2_mu(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vnmsac_vx_i32m2_mu(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i32m2_mu(mask, vd, rs1, vs2, vl); +vint32m2_t test_vnmsac_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i32m2_mu(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vnmsac_vv_i32m4_mu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i32m4_mu(mask, vd, vs1, vs2, vl); +vint32m4_t test_vnmsac_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i32m4_mu(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vnmsac_vx_i32m4_mu(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i32m4_mu(mask, vd, rs1, vs2, vl); +vint32m4_t test_vnmsac_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i32m4_mu(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vnmsac_vv_i32m8_mu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i32m8_mu(mask, vd, vs1, vs2, vl); +vint32m8_t test_vnmsac_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i32m8_mu(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vnmsac_vx_i32m8_mu(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i32m8_mu(mask, vd, rs1, vs2, vl); +vint32m8_t test_vnmsac_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i32m8_mu(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vnmsac_vv_i64m1_mu(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i64m1_mu(mask, vd, vs1, vs2, vl); +vint64m1_t test_vnmsac_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i64m1_mu(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vnmsac_vx_i64m1_mu(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i64m1_mu(mask, vd, rs1, vs2, vl); +vint64m1_t test_vnmsac_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i64m1_mu(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vnmsac_vv_i64m2_mu(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i64m2_mu(mask, vd, vs1, vs2, vl); +vint64m2_t test_vnmsac_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i64m2_mu(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vnmsac_vx_i64m2_mu(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i64m2_mu(mask, vd, rs1, vs2, vl); +vint64m2_t test_vnmsac_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i64m2_mu(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vnmsac_vv_i64m4_mu(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i64m4_mu(mask, vd, vs1, vs2, vl); +vint64m4_t test_vnmsac_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i64m4_mu(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vnmsac_vx_i64m4_mu(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i64m4_mu(mask, vd, rs1, vs2, vl); +vint64m4_t test_vnmsac_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i64m4_mu(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vnmsac_vv_i64m8_mu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return __riscv_vnmsac_vv_i64m8_mu(mask, vd, vs1, vs2, vl); +vint64m8_t test_vnmsac_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { + return __riscv_vnmsac_vv_i64m8_mu(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vnmsac_vx_i64m8_mu(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return __riscv_vnmsac_vx_i64m8_mu(mask, vd, rs1, vs2, vl); +vint64m8_t test_vnmsac_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { + return __riscv_vnmsac_vx_i64m8_mu(vm, vd, rs1, vs2, vl); } -vuint8mf8_t test_vnmsac_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u8mf8_mu(mask, vd, vs1, vs2, vl); +vuint8mf8_t test_vnmsac_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u8mf8_mu(vm, vd, vs1, vs2, vl); } -vuint8mf8_t test_vnmsac_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u8mf8_mu(mask, vd, rs1, vs2, vl); +vuint8mf8_t test_vnmsac_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u8mf8_mu(vm, vd, rs1, vs2, vl); } -vuint8mf4_t test_vnmsac_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u8mf4_mu(mask, vd, vs1, vs2, vl); +vuint8mf4_t test_vnmsac_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u8mf4_mu(vm, vd, vs1, vs2, vl); } -vuint8mf4_t test_vnmsac_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u8mf4_mu(mask, vd, rs1, vs2, vl); +vuint8mf4_t test_vnmsac_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u8mf4_mu(vm, vd, rs1, vs2, vl); } -vuint8mf2_t test_vnmsac_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u8mf2_mu(mask, vd, vs1, vs2, vl); +vuint8mf2_t test_vnmsac_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u8mf2_mu(vm, vd, vs1, vs2, vl); } -vuint8mf2_t test_vnmsac_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u8mf2_mu(mask, vd, rs1, vs2, vl); +vuint8mf2_t test_vnmsac_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u8mf2_mu(vm, vd, rs1, vs2, vl); } -vuint8m1_t test_vnmsac_vv_u8m1_mu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u8m1_mu(mask, vd, vs1, vs2, vl); +vuint8m1_t test_vnmsac_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u8m1_mu(vm, vd, vs1, vs2, vl); } -vuint8m1_t test_vnmsac_vx_u8m1_mu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u8m1_mu(mask, vd, rs1, vs2, vl); +vuint8m1_t test_vnmsac_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u8m1_mu(vm, vd, rs1, vs2, vl); } -vuint8m2_t test_vnmsac_vv_u8m2_mu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u8m2_mu(mask, vd, vs1, vs2, vl); +vuint8m2_t test_vnmsac_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u8m2_mu(vm, vd, vs1, vs2, vl); } -vuint8m2_t test_vnmsac_vx_u8m2_mu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u8m2_mu(mask, vd, rs1, vs2, vl); +vuint8m2_t test_vnmsac_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u8m2_mu(vm, vd, rs1, vs2, vl); } -vuint8m4_t test_vnmsac_vv_u8m4_mu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u8m4_mu(mask, vd, vs1, vs2, vl); +vuint8m4_t test_vnmsac_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u8m4_mu(vm, vd, vs1, vs2, vl); } -vuint8m4_t test_vnmsac_vx_u8m4_mu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u8m4_mu(mask, vd, rs1, vs2, vl); +vuint8m4_t test_vnmsac_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u8m4_mu(vm, vd, rs1, vs2, vl); } -vuint8m8_t test_vnmsac_vv_u8m8_mu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u8m8_mu(mask, vd, vs1, vs2, vl); +vuint8m8_t test_vnmsac_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u8m8_mu(vm, vd, vs1, vs2, vl); } -vuint8m8_t test_vnmsac_vx_u8m8_mu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u8m8_mu(mask, vd, rs1, vs2, vl); +vuint8m8_t test_vnmsac_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u8m8_mu(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vnmsac_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u16mf4_mu(mask, vd, vs1, vs2, vl); +vuint16mf4_t test_vnmsac_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u16mf4_mu(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vnmsac_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u16mf4_mu(mask, vd, rs1, vs2, vl); +vuint16mf4_t test_vnmsac_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u16mf4_mu(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vnmsac_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u16mf2_mu(mask, vd, vs1, vs2, vl); +vuint16mf2_t test_vnmsac_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u16mf2_mu(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vnmsac_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u16mf2_mu(mask, vd, rs1, vs2, vl); +vuint16mf2_t test_vnmsac_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u16mf2_mu(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vnmsac_vv_u16m1_mu(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u16m1_mu(mask, vd, vs1, vs2, vl); +vuint16m1_t test_vnmsac_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u16m1_mu(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vnmsac_vx_u16m1_mu(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u16m1_mu(mask, vd, rs1, vs2, vl); +vuint16m1_t test_vnmsac_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u16m1_mu(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vnmsac_vv_u16m2_mu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u16m2_mu(mask, vd, vs1, vs2, vl); +vuint16m2_t test_vnmsac_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u16m2_mu(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vnmsac_vx_u16m2_mu(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u16m2_mu(mask, vd, rs1, vs2, vl); +vuint16m2_t test_vnmsac_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u16m2_mu(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vnmsac_vv_u16m4_mu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u16m4_mu(mask, vd, vs1, vs2, vl); +vuint16m4_t test_vnmsac_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u16m4_mu(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vnmsac_vx_u16m4_mu(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u16m4_mu(mask, vd, rs1, vs2, vl); +vuint16m4_t test_vnmsac_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u16m4_mu(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vnmsac_vv_u16m8_mu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u16m8_mu(mask, vd, vs1, vs2, vl); +vuint16m8_t test_vnmsac_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u16m8_mu(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vnmsac_vx_u16m8_mu(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u16m8_mu(mask, vd, rs1, vs2, vl); +vuint16m8_t test_vnmsac_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u16m8_mu(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vnmsac_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u32mf2_mu(mask, vd, vs1, vs2, vl); +vuint32mf2_t test_vnmsac_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u32mf2_mu(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vnmsac_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u32mf2_mu(mask, vd, rs1, vs2, vl); +vuint32mf2_t test_vnmsac_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u32mf2_mu(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vnmsac_vv_u32m1_mu(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u32m1_mu(mask, vd, vs1, vs2, vl); +vuint32m1_t test_vnmsac_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u32m1_mu(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vnmsac_vx_u32m1_mu(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u32m1_mu(mask, vd, rs1, vs2, vl); +vuint32m1_t test_vnmsac_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u32m1_mu(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vnmsac_vv_u32m2_mu(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u32m2_mu(mask, vd, vs1, vs2, vl); +vuint32m2_t test_vnmsac_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u32m2_mu(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vnmsac_vx_u32m2_mu(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u32m2_mu(mask, vd, rs1, vs2, vl); +vuint32m2_t test_vnmsac_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u32m2_mu(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vnmsac_vv_u32m4_mu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u32m4_mu(mask, vd, vs1, vs2, vl); +vuint32m4_t test_vnmsac_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u32m4_mu(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vnmsac_vx_u32m4_mu(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u32m4_mu(mask, vd, rs1, vs2, vl); +vuint32m4_t test_vnmsac_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u32m4_mu(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vnmsac_vv_u32m8_mu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u32m8_mu(mask, vd, vs1, vs2, vl); +vuint32m8_t test_vnmsac_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u32m8_mu(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vnmsac_vx_u32m8_mu(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u32m8_mu(mask, vd, rs1, vs2, vl); +vuint32m8_t test_vnmsac_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u32m8_mu(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vnmsac_vv_u64m1_mu(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u64m1_mu(mask, vd, vs1, vs2, vl); +vuint64m1_t test_vnmsac_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u64m1_mu(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vnmsac_vx_u64m1_mu(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u64m1_mu(mask, vd, rs1, vs2, vl); +vuint64m1_t test_vnmsac_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u64m1_mu(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vnmsac_vv_u64m2_mu(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u64m2_mu(mask, vd, vs1, vs2, vl); +vuint64m2_t test_vnmsac_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u64m2_mu(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vnmsac_vx_u64m2_mu(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u64m2_mu(mask, vd, rs1, vs2, vl); +vuint64m2_t test_vnmsac_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u64m2_mu(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vnmsac_vv_u64m4_mu(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u64m4_mu(mask, vd, vs1, vs2, vl); +vuint64m4_t test_vnmsac_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u64m4_mu(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vnmsac_vx_u64m4_mu(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u64m4_mu(mask, vd, rs1, vs2, vl); +vuint64m4_t test_vnmsac_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u64m4_mu(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vnmsac_vv_u64m8_mu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vnmsac_vv_u64m8_mu(mask, vd, vs1, vs2, vl); +vuint64m8_t test_vnmsac_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vnmsac_vv_u64m8_mu(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vnmsac_vx_u64m8_mu(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vnmsac_vx_u64m8_mu(mask, vd, rs1, vs2, vl); +vuint64m8_t test_vnmsac_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vnmsac_vx_u64m8_mu(vm, vd, rs1, vs2, vl); } /* { dg-final { scan-assembler-times {vnms[acub]+\.[ivxfswum.]+\s+} 352 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vnmsub.c b/auto-generated/policy_funcs/gnu-api-tests/vnmsub.c index 86d43d06f..558f20d22 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vnmsub.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vnmsub.c @@ -358,1060 +358,1060 @@ vuint64m8_t test_vnmsub_vx_u64m8_tu(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs return __riscv_vnmsub_vx_u64m8_tu(vd, rs1, vs2, vl); } -vint8mf8_t test_vnmsub_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i8mf8_tum(mask, vd, vs1, vs2, vl); +vint8mf8_t test_vnmsub_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i8mf8_tum(vm, vd, vs1, vs2, vl); } -vint8mf8_t test_vnmsub_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i8mf8_tum(mask, vd, rs1, vs2, vl); +vint8mf8_t test_vnmsub_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i8mf8_tum(vm, vd, rs1, vs2, vl); } -vint8mf4_t test_vnmsub_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i8mf4_tum(mask, vd, vs1, vs2, vl); +vint8mf4_t test_vnmsub_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i8mf4_tum(vm, vd, vs1, vs2, vl); } -vint8mf4_t test_vnmsub_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i8mf4_tum(mask, vd, rs1, vs2, vl); +vint8mf4_t test_vnmsub_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i8mf4_tum(vm, vd, rs1, vs2, vl); } -vint8mf2_t test_vnmsub_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i8mf2_tum(mask, vd, vs1, vs2, vl); +vint8mf2_t test_vnmsub_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i8mf2_tum(vm, vd, vs1, vs2, vl); } -vint8mf2_t test_vnmsub_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i8mf2_tum(mask, vd, rs1, vs2, vl); +vint8mf2_t test_vnmsub_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i8mf2_tum(vm, vd, rs1, vs2, vl); } -vint8m1_t test_vnmsub_vv_i8m1_tum(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i8m1_tum(mask, vd, vs1, vs2, vl); +vint8m1_t test_vnmsub_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i8m1_tum(vm, vd, vs1, vs2, vl); } -vint8m1_t test_vnmsub_vx_i8m1_tum(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i8m1_tum(mask, vd, rs1, vs2, vl); +vint8m1_t test_vnmsub_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i8m1_tum(vm, vd, rs1, vs2, vl); } -vint8m2_t test_vnmsub_vv_i8m2_tum(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i8m2_tum(mask, vd, vs1, vs2, vl); +vint8m2_t test_vnmsub_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i8m2_tum(vm, vd, vs1, vs2, vl); } -vint8m2_t test_vnmsub_vx_i8m2_tum(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i8m2_tum(mask, vd, rs1, vs2, vl); +vint8m2_t test_vnmsub_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i8m2_tum(vm, vd, rs1, vs2, vl); } -vint8m4_t test_vnmsub_vv_i8m4_tum(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i8m4_tum(mask, vd, vs1, vs2, vl); +vint8m4_t test_vnmsub_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i8m4_tum(vm, vd, vs1, vs2, vl); } -vint8m4_t test_vnmsub_vx_i8m4_tum(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i8m4_tum(mask, vd, rs1, vs2, vl); +vint8m4_t test_vnmsub_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i8m4_tum(vm, vd, rs1, vs2, vl); } -vint8m8_t test_vnmsub_vv_i8m8_tum(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i8m8_tum(mask, vd, vs1, vs2, vl); +vint8m8_t test_vnmsub_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i8m8_tum(vm, vd, vs1, vs2, vl); } -vint8m8_t test_vnmsub_vx_i8m8_tum(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i8m8_tum(mask, vd, rs1, vs2, vl); +vint8m8_t test_vnmsub_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i8m8_tum(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vnmsub_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i16mf4_tum(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vnmsub_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i16mf4_tum(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vnmsub_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i16mf4_tum(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vnmsub_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i16mf4_tum(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vnmsub_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i16mf2_tum(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vnmsub_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i16mf2_tum(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vnmsub_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i16mf2_tum(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vnmsub_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i16mf2_tum(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vnmsub_vv_i16m1_tum(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i16m1_tum(mask, vd, vs1, vs2, vl); +vint16m1_t test_vnmsub_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i16m1_tum(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vnmsub_vx_i16m1_tum(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i16m1_tum(mask, vd, rs1, vs2, vl); +vint16m1_t test_vnmsub_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i16m1_tum(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vnmsub_vv_i16m2_tum(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i16m2_tum(mask, vd, vs1, vs2, vl); +vint16m2_t test_vnmsub_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i16m2_tum(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vnmsub_vx_i16m2_tum(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i16m2_tum(mask, vd, rs1, vs2, vl); +vint16m2_t test_vnmsub_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i16m2_tum(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vnmsub_vv_i16m4_tum(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i16m4_tum(mask, vd, vs1, vs2, vl); +vint16m4_t test_vnmsub_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i16m4_tum(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vnmsub_vx_i16m4_tum(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i16m4_tum(mask, vd, rs1, vs2, vl); +vint16m4_t test_vnmsub_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i16m4_tum(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vnmsub_vv_i16m8_tum(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i16m8_tum(mask, vd, vs1, vs2, vl); +vint16m8_t test_vnmsub_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i16m8_tum(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vnmsub_vx_i16m8_tum(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i16m8_tum(mask, vd, rs1, vs2, vl); +vint16m8_t test_vnmsub_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i16m8_tum(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vnmsub_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i32mf2_tum(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vnmsub_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i32mf2_tum(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vnmsub_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i32mf2_tum(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vnmsub_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i32mf2_tum(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vnmsub_vv_i32m1_tum(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i32m1_tum(mask, vd, vs1, vs2, vl); +vint32m1_t test_vnmsub_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i32m1_tum(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vnmsub_vx_i32m1_tum(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i32m1_tum(mask, vd, rs1, vs2, vl); +vint32m1_t test_vnmsub_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i32m1_tum(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vnmsub_vv_i32m2_tum(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i32m2_tum(mask, vd, vs1, vs2, vl); +vint32m2_t test_vnmsub_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i32m2_tum(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vnmsub_vx_i32m2_tum(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i32m2_tum(mask, vd, rs1, vs2, vl); +vint32m2_t test_vnmsub_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i32m2_tum(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vnmsub_vv_i32m4_tum(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i32m4_tum(mask, vd, vs1, vs2, vl); +vint32m4_t test_vnmsub_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i32m4_tum(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vnmsub_vx_i32m4_tum(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i32m4_tum(mask, vd, rs1, vs2, vl); +vint32m4_t test_vnmsub_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i32m4_tum(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vnmsub_vv_i32m8_tum(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i32m8_tum(mask, vd, vs1, vs2, vl); +vint32m8_t test_vnmsub_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i32m8_tum(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vnmsub_vx_i32m8_tum(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i32m8_tum(mask, vd, rs1, vs2, vl); +vint32m8_t test_vnmsub_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i32m8_tum(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vnmsub_vv_i64m1_tum(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i64m1_tum(mask, vd, vs1, vs2, vl); +vint64m1_t test_vnmsub_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i64m1_tum(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vnmsub_vx_i64m1_tum(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i64m1_tum(mask, vd, rs1, vs2, vl); +vint64m1_t test_vnmsub_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i64m1_tum(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vnmsub_vv_i64m2_tum(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i64m2_tum(mask, vd, vs1, vs2, vl); +vint64m2_t test_vnmsub_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i64m2_tum(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vnmsub_vx_i64m2_tum(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i64m2_tum(mask, vd, rs1, vs2, vl); +vint64m2_t test_vnmsub_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i64m2_tum(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vnmsub_vv_i64m4_tum(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i64m4_tum(mask, vd, vs1, vs2, vl); +vint64m4_t test_vnmsub_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i64m4_tum(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vnmsub_vx_i64m4_tum(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i64m4_tum(mask, vd, rs1, vs2, vl); +vint64m4_t test_vnmsub_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i64m4_tum(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vnmsub_vv_i64m8_tum(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i64m8_tum(mask, vd, vs1, vs2, vl); +vint64m8_t test_vnmsub_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i64m8_tum(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vnmsub_vx_i64m8_tum(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i64m8_tum(mask, vd, rs1, vs2, vl); +vint64m8_t test_vnmsub_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i64m8_tum(vm, vd, rs1, vs2, vl); } -vuint8mf8_t test_vnmsub_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u8mf8_tum(mask, vd, vs1, vs2, vl); +vuint8mf8_t test_vnmsub_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u8mf8_tum(vm, vd, vs1, vs2, vl); } -vuint8mf8_t test_vnmsub_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u8mf8_tum(mask, vd, rs1, vs2, vl); +vuint8mf8_t test_vnmsub_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u8mf8_tum(vm, vd, rs1, vs2, vl); } -vuint8mf4_t test_vnmsub_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u8mf4_tum(mask, vd, vs1, vs2, vl); +vuint8mf4_t test_vnmsub_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u8mf4_tum(vm, vd, vs1, vs2, vl); } -vuint8mf4_t test_vnmsub_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u8mf4_tum(mask, vd, rs1, vs2, vl); +vuint8mf4_t test_vnmsub_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u8mf4_tum(vm, vd, rs1, vs2, vl); } -vuint8mf2_t test_vnmsub_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u8mf2_tum(mask, vd, vs1, vs2, vl); +vuint8mf2_t test_vnmsub_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u8mf2_tum(vm, vd, vs1, vs2, vl); } -vuint8mf2_t test_vnmsub_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u8mf2_tum(mask, vd, rs1, vs2, vl); +vuint8mf2_t test_vnmsub_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u8mf2_tum(vm, vd, rs1, vs2, vl); } -vuint8m1_t test_vnmsub_vv_u8m1_tum(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u8m1_tum(mask, vd, vs1, vs2, vl); +vuint8m1_t test_vnmsub_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u8m1_tum(vm, vd, vs1, vs2, vl); } -vuint8m1_t test_vnmsub_vx_u8m1_tum(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u8m1_tum(mask, vd, rs1, vs2, vl); +vuint8m1_t test_vnmsub_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u8m1_tum(vm, vd, rs1, vs2, vl); } -vuint8m2_t test_vnmsub_vv_u8m2_tum(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u8m2_tum(mask, vd, vs1, vs2, vl); +vuint8m2_t test_vnmsub_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u8m2_tum(vm, vd, vs1, vs2, vl); } -vuint8m2_t test_vnmsub_vx_u8m2_tum(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u8m2_tum(mask, vd, rs1, vs2, vl); +vuint8m2_t test_vnmsub_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u8m2_tum(vm, vd, rs1, vs2, vl); } -vuint8m4_t test_vnmsub_vv_u8m4_tum(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u8m4_tum(mask, vd, vs1, vs2, vl); +vuint8m4_t test_vnmsub_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u8m4_tum(vm, vd, vs1, vs2, vl); } -vuint8m4_t test_vnmsub_vx_u8m4_tum(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u8m4_tum(mask, vd, rs1, vs2, vl); +vuint8m4_t test_vnmsub_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u8m4_tum(vm, vd, rs1, vs2, vl); } -vuint8m8_t test_vnmsub_vv_u8m8_tum(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u8m8_tum(mask, vd, vs1, vs2, vl); +vuint8m8_t test_vnmsub_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u8m8_tum(vm, vd, vs1, vs2, vl); } -vuint8m8_t test_vnmsub_vx_u8m8_tum(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u8m8_tum(mask, vd, rs1, vs2, vl); +vuint8m8_t test_vnmsub_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u8m8_tum(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vnmsub_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u16mf4_tum(mask, vd, vs1, vs2, vl); +vuint16mf4_t test_vnmsub_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u16mf4_tum(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vnmsub_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u16mf4_tum(mask, vd, rs1, vs2, vl); +vuint16mf4_t test_vnmsub_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u16mf4_tum(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vnmsub_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u16mf2_tum(mask, vd, vs1, vs2, vl); +vuint16mf2_t test_vnmsub_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u16mf2_tum(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vnmsub_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u16mf2_tum(mask, vd, rs1, vs2, vl); +vuint16mf2_t test_vnmsub_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u16mf2_tum(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vnmsub_vv_u16m1_tum(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u16m1_tum(mask, vd, vs1, vs2, vl); +vuint16m1_t test_vnmsub_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u16m1_tum(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vnmsub_vx_u16m1_tum(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u16m1_tum(mask, vd, rs1, vs2, vl); +vuint16m1_t test_vnmsub_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u16m1_tum(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vnmsub_vv_u16m2_tum(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u16m2_tum(mask, vd, vs1, vs2, vl); +vuint16m2_t test_vnmsub_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u16m2_tum(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vnmsub_vx_u16m2_tum(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u16m2_tum(mask, vd, rs1, vs2, vl); +vuint16m2_t test_vnmsub_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u16m2_tum(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vnmsub_vv_u16m4_tum(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u16m4_tum(mask, vd, vs1, vs2, vl); +vuint16m4_t test_vnmsub_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u16m4_tum(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vnmsub_vx_u16m4_tum(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u16m4_tum(mask, vd, rs1, vs2, vl); +vuint16m4_t test_vnmsub_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u16m4_tum(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vnmsub_vv_u16m8_tum(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u16m8_tum(mask, vd, vs1, vs2, vl); +vuint16m8_t test_vnmsub_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u16m8_tum(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vnmsub_vx_u16m8_tum(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u16m8_tum(mask, vd, rs1, vs2, vl); +vuint16m8_t test_vnmsub_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u16m8_tum(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vnmsub_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u32mf2_tum(mask, vd, vs1, vs2, vl); +vuint32mf2_t test_vnmsub_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u32mf2_tum(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vnmsub_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u32mf2_tum(mask, vd, rs1, vs2, vl); +vuint32mf2_t test_vnmsub_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u32mf2_tum(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vnmsub_vv_u32m1_tum(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u32m1_tum(mask, vd, vs1, vs2, vl); +vuint32m1_t test_vnmsub_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u32m1_tum(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vnmsub_vx_u32m1_tum(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u32m1_tum(mask, vd, rs1, vs2, vl); +vuint32m1_t test_vnmsub_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u32m1_tum(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vnmsub_vv_u32m2_tum(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u32m2_tum(mask, vd, vs1, vs2, vl); +vuint32m2_t test_vnmsub_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u32m2_tum(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vnmsub_vx_u32m2_tum(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u32m2_tum(mask, vd, rs1, vs2, vl); +vuint32m2_t test_vnmsub_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u32m2_tum(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vnmsub_vv_u32m4_tum(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u32m4_tum(mask, vd, vs1, vs2, vl); +vuint32m4_t test_vnmsub_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u32m4_tum(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vnmsub_vx_u32m4_tum(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u32m4_tum(mask, vd, rs1, vs2, vl); +vuint32m4_t test_vnmsub_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u32m4_tum(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vnmsub_vv_u32m8_tum(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u32m8_tum(mask, vd, vs1, vs2, vl); +vuint32m8_t test_vnmsub_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u32m8_tum(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vnmsub_vx_u32m8_tum(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u32m8_tum(mask, vd, rs1, vs2, vl); +vuint32m8_t test_vnmsub_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u32m8_tum(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vnmsub_vv_u64m1_tum(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u64m1_tum(mask, vd, vs1, vs2, vl); +vuint64m1_t test_vnmsub_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u64m1_tum(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vnmsub_vx_u64m1_tum(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u64m1_tum(mask, vd, rs1, vs2, vl); +vuint64m1_t test_vnmsub_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u64m1_tum(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vnmsub_vv_u64m2_tum(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u64m2_tum(mask, vd, vs1, vs2, vl); +vuint64m2_t test_vnmsub_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u64m2_tum(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vnmsub_vx_u64m2_tum(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u64m2_tum(mask, vd, rs1, vs2, vl); +vuint64m2_t test_vnmsub_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u64m2_tum(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vnmsub_vv_u64m4_tum(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u64m4_tum(mask, vd, vs1, vs2, vl); +vuint64m4_t test_vnmsub_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u64m4_tum(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vnmsub_vx_u64m4_tum(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u64m4_tum(mask, vd, rs1, vs2, vl); +vuint64m4_t test_vnmsub_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u64m4_tum(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vnmsub_vv_u64m8_tum(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u64m8_tum(mask, vd, vs1, vs2, vl); +vuint64m8_t test_vnmsub_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u64m8_tum(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vnmsub_vx_u64m8_tum(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u64m8_tum(mask, vd, rs1, vs2, vl); +vuint64m8_t test_vnmsub_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u64m8_tum(vm, vd, rs1, vs2, vl); } -vint8mf8_t test_vnmsub_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i8mf8_tumu(mask, vd, vs1, vs2, vl); +vint8mf8_t test_vnmsub_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i8mf8_tumu(vm, vd, vs1, vs2, vl); } -vint8mf8_t test_vnmsub_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i8mf8_tumu(mask, vd, rs1, vs2, vl); +vint8mf8_t test_vnmsub_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i8mf8_tumu(vm, vd, rs1, vs2, vl); } -vint8mf4_t test_vnmsub_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i8mf4_tumu(mask, vd, vs1, vs2, vl); +vint8mf4_t test_vnmsub_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i8mf4_tumu(vm, vd, vs1, vs2, vl); } -vint8mf4_t test_vnmsub_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i8mf4_tumu(mask, vd, rs1, vs2, vl); +vint8mf4_t test_vnmsub_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i8mf4_tumu(vm, vd, rs1, vs2, vl); } -vint8mf2_t test_vnmsub_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i8mf2_tumu(mask, vd, vs1, vs2, vl); +vint8mf2_t test_vnmsub_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i8mf2_tumu(vm, vd, vs1, vs2, vl); } -vint8mf2_t test_vnmsub_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i8mf2_tumu(mask, vd, rs1, vs2, vl); +vint8mf2_t test_vnmsub_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i8mf2_tumu(vm, vd, rs1, vs2, vl); } -vint8m1_t test_vnmsub_vv_i8m1_tumu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i8m1_tumu(mask, vd, vs1, vs2, vl); +vint8m1_t test_vnmsub_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i8m1_tumu(vm, vd, vs1, vs2, vl); } -vint8m1_t test_vnmsub_vx_i8m1_tumu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i8m1_tumu(mask, vd, rs1, vs2, vl); +vint8m1_t test_vnmsub_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i8m1_tumu(vm, vd, rs1, vs2, vl); } -vint8m2_t test_vnmsub_vv_i8m2_tumu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i8m2_tumu(mask, vd, vs1, vs2, vl); +vint8m2_t test_vnmsub_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i8m2_tumu(vm, vd, vs1, vs2, vl); } -vint8m2_t test_vnmsub_vx_i8m2_tumu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i8m2_tumu(mask, vd, rs1, vs2, vl); +vint8m2_t test_vnmsub_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i8m2_tumu(vm, vd, rs1, vs2, vl); } -vint8m4_t test_vnmsub_vv_i8m4_tumu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i8m4_tumu(mask, vd, vs1, vs2, vl); +vint8m4_t test_vnmsub_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i8m4_tumu(vm, vd, vs1, vs2, vl); } -vint8m4_t test_vnmsub_vx_i8m4_tumu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i8m4_tumu(mask, vd, rs1, vs2, vl); +vint8m4_t test_vnmsub_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i8m4_tumu(vm, vd, rs1, vs2, vl); } -vint8m8_t test_vnmsub_vv_i8m8_tumu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i8m8_tumu(mask, vd, vs1, vs2, vl); +vint8m8_t test_vnmsub_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i8m8_tumu(vm, vd, vs1, vs2, vl); } -vint8m8_t test_vnmsub_vx_i8m8_tumu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i8m8_tumu(mask, vd, rs1, vs2, vl); +vint8m8_t test_vnmsub_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i8m8_tumu(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vnmsub_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i16mf4_tumu(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vnmsub_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i16mf4_tumu(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vnmsub_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i16mf4_tumu(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vnmsub_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i16mf4_tumu(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vnmsub_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i16mf2_tumu(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vnmsub_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i16mf2_tumu(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vnmsub_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i16mf2_tumu(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vnmsub_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i16mf2_tumu(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vnmsub_vv_i16m1_tumu(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i16m1_tumu(mask, vd, vs1, vs2, vl); +vint16m1_t test_vnmsub_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i16m1_tumu(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vnmsub_vx_i16m1_tumu(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i16m1_tumu(mask, vd, rs1, vs2, vl); +vint16m1_t test_vnmsub_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i16m1_tumu(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vnmsub_vv_i16m2_tumu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i16m2_tumu(mask, vd, vs1, vs2, vl); +vint16m2_t test_vnmsub_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i16m2_tumu(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vnmsub_vx_i16m2_tumu(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i16m2_tumu(mask, vd, rs1, vs2, vl); +vint16m2_t test_vnmsub_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i16m2_tumu(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vnmsub_vv_i16m4_tumu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i16m4_tumu(mask, vd, vs1, vs2, vl); +vint16m4_t test_vnmsub_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i16m4_tumu(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vnmsub_vx_i16m4_tumu(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i16m4_tumu(mask, vd, rs1, vs2, vl); +vint16m4_t test_vnmsub_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i16m4_tumu(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vnmsub_vv_i16m8_tumu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i16m8_tumu(mask, vd, vs1, vs2, vl); +vint16m8_t test_vnmsub_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i16m8_tumu(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vnmsub_vx_i16m8_tumu(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i16m8_tumu(mask, vd, rs1, vs2, vl); +vint16m8_t test_vnmsub_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i16m8_tumu(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vnmsub_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i32mf2_tumu(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vnmsub_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i32mf2_tumu(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vnmsub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i32mf2_tumu(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vnmsub_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i32mf2_tumu(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vnmsub_vv_i32m1_tumu(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i32m1_tumu(mask, vd, vs1, vs2, vl); +vint32m1_t test_vnmsub_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i32m1_tumu(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vnmsub_vx_i32m1_tumu(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i32m1_tumu(mask, vd, rs1, vs2, vl); +vint32m1_t test_vnmsub_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i32m1_tumu(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vnmsub_vv_i32m2_tumu(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i32m2_tumu(mask, vd, vs1, vs2, vl); +vint32m2_t test_vnmsub_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i32m2_tumu(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vnmsub_vx_i32m2_tumu(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i32m2_tumu(mask, vd, rs1, vs2, vl); +vint32m2_t test_vnmsub_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i32m2_tumu(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vnmsub_vv_i32m4_tumu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i32m4_tumu(mask, vd, vs1, vs2, vl); +vint32m4_t test_vnmsub_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i32m4_tumu(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vnmsub_vx_i32m4_tumu(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i32m4_tumu(mask, vd, rs1, vs2, vl); +vint32m4_t test_vnmsub_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i32m4_tumu(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vnmsub_vv_i32m8_tumu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i32m8_tumu(mask, vd, vs1, vs2, vl); +vint32m8_t test_vnmsub_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i32m8_tumu(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vnmsub_vx_i32m8_tumu(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i32m8_tumu(mask, vd, rs1, vs2, vl); +vint32m8_t test_vnmsub_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i32m8_tumu(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vnmsub_vv_i64m1_tumu(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i64m1_tumu(mask, vd, vs1, vs2, vl); +vint64m1_t test_vnmsub_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i64m1_tumu(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vnmsub_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i64m1_tumu(mask, vd, rs1, vs2, vl); +vint64m1_t test_vnmsub_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i64m1_tumu(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vnmsub_vv_i64m2_tumu(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i64m2_tumu(mask, vd, vs1, vs2, vl); +vint64m2_t test_vnmsub_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i64m2_tumu(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vnmsub_vx_i64m2_tumu(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i64m2_tumu(mask, vd, rs1, vs2, vl); +vint64m2_t test_vnmsub_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i64m2_tumu(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vnmsub_vv_i64m4_tumu(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i64m4_tumu(mask, vd, vs1, vs2, vl); +vint64m4_t test_vnmsub_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i64m4_tumu(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vnmsub_vx_i64m4_tumu(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i64m4_tumu(mask, vd, rs1, vs2, vl); +vint64m4_t test_vnmsub_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i64m4_tumu(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vnmsub_vv_i64m8_tumu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i64m8_tumu(mask, vd, vs1, vs2, vl); +vint64m8_t test_vnmsub_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i64m8_tumu(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vnmsub_vx_i64m8_tumu(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i64m8_tumu(mask, vd, rs1, vs2, vl); +vint64m8_t test_vnmsub_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i64m8_tumu(vm, vd, rs1, vs2, vl); } -vuint8mf8_t test_vnmsub_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u8mf8_tumu(mask, vd, vs1, vs2, vl); +vuint8mf8_t test_vnmsub_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u8mf8_tumu(vm, vd, vs1, vs2, vl); } -vuint8mf8_t test_vnmsub_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u8mf8_tumu(mask, vd, rs1, vs2, vl); +vuint8mf8_t test_vnmsub_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u8mf8_tumu(vm, vd, rs1, vs2, vl); } -vuint8mf4_t test_vnmsub_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u8mf4_tumu(mask, vd, vs1, vs2, vl); +vuint8mf4_t test_vnmsub_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u8mf4_tumu(vm, vd, vs1, vs2, vl); } -vuint8mf4_t test_vnmsub_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u8mf4_tumu(mask, vd, rs1, vs2, vl); +vuint8mf4_t test_vnmsub_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u8mf4_tumu(vm, vd, rs1, vs2, vl); } -vuint8mf2_t test_vnmsub_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u8mf2_tumu(mask, vd, vs1, vs2, vl); +vuint8mf2_t test_vnmsub_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u8mf2_tumu(vm, vd, vs1, vs2, vl); } -vuint8mf2_t test_vnmsub_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u8mf2_tumu(mask, vd, rs1, vs2, vl); +vuint8mf2_t test_vnmsub_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u8mf2_tumu(vm, vd, rs1, vs2, vl); } -vuint8m1_t test_vnmsub_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u8m1_tumu(mask, vd, vs1, vs2, vl); +vuint8m1_t test_vnmsub_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u8m1_tumu(vm, vd, vs1, vs2, vl); } -vuint8m1_t test_vnmsub_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u8m1_tumu(mask, vd, rs1, vs2, vl); +vuint8m1_t test_vnmsub_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u8m1_tumu(vm, vd, rs1, vs2, vl); } -vuint8m2_t test_vnmsub_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u8m2_tumu(mask, vd, vs1, vs2, vl); +vuint8m2_t test_vnmsub_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u8m2_tumu(vm, vd, vs1, vs2, vl); } -vuint8m2_t test_vnmsub_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u8m2_tumu(mask, vd, rs1, vs2, vl); +vuint8m2_t test_vnmsub_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u8m2_tumu(vm, vd, rs1, vs2, vl); } -vuint8m4_t test_vnmsub_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u8m4_tumu(mask, vd, vs1, vs2, vl); +vuint8m4_t test_vnmsub_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u8m4_tumu(vm, vd, vs1, vs2, vl); } -vuint8m4_t test_vnmsub_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u8m4_tumu(mask, vd, rs1, vs2, vl); +vuint8m4_t test_vnmsub_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u8m4_tumu(vm, vd, rs1, vs2, vl); } -vuint8m8_t test_vnmsub_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u8m8_tumu(mask, vd, vs1, vs2, vl); +vuint8m8_t test_vnmsub_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u8m8_tumu(vm, vd, vs1, vs2, vl); } -vuint8m8_t test_vnmsub_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u8m8_tumu(mask, vd, rs1, vs2, vl); +vuint8m8_t test_vnmsub_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u8m8_tumu(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vnmsub_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u16mf4_tumu(mask, vd, vs1, vs2, vl); +vuint16mf4_t test_vnmsub_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u16mf4_tumu(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vnmsub_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u16mf4_tumu(mask, vd, rs1, vs2, vl); +vuint16mf4_t test_vnmsub_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u16mf4_tumu(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vnmsub_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u16mf2_tumu(mask, vd, vs1, vs2, vl); +vuint16mf2_t test_vnmsub_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u16mf2_tumu(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vnmsub_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u16mf2_tumu(mask, vd, rs1, vs2, vl); +vuint16mf2_t test_vnmsub_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u16mf2_tumu(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vnmsub_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u16m1_tumu(mask, vd, vs1, vs2, vl); +vuint16m1_t test_vnmsub_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u16m1_tumu(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vnmsub_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u16m1_tumu(mask, vd, rs1, vs2, vl); +vuint16m1_t test_vnmsub_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u16m1_tumu(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vnmsub_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u16m2_tumu(mask, vd, vs1, vs2, vl); +vuint16m2_t test_vnmsub_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u16m2_tumu(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vnmsub_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u16m2_tumu(mask, vd, rs1, vs2, vl); +vuint16m2_t test_vnmsub_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u16m2_tumu(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vnmsub_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u16m4_tumu(mask, vd, vs1, vs2, vl); +vuint16m4_t test_vnmsub_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u16m4_tumu(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vnmsub_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u16m4_tumu(mask, vd, rs1, vs2, vl); +vuint16m4_t test_vnmsub_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u16m4_tumu(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vnmsub_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u16m8_tumu(mask, vd, vs1, vs2, vl); +vuint16m8_t test_vnmsub_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u16m8_tumu(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vnmsub_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u16m8_tumu(mask, vd, rs1, vs2, vl); +vuint16m8_t test_vnmsub_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u16m8_tumu(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vnmsub_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u32mf2_tumu(mask, vd, vs1, vs2, vl); +vuint32mf2_t test_vnmsub_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u32mf2_tumu(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vnmsub_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u32mf2_tumu(mask, vd, rs1, vs2, vl); +vuint32mf2_t test_vnmsub_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u32mf2_tumu(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vnmsub_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u32m1_tumu(mask, vd, vs1, vs2, vl); +vuint32m1_t test_vnmsub_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u32m1_tumu(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vnmsub_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u32m1_tumu(mask, vd, rs1, vs2, vl); +vuint32m1_t test_vnmsub_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u32m1_tumu(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vnmsub_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u32m2_tumu(mask, vd, vs1, vs2, vl); +vuint32m2_t test_vnmsub_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u32m2_tumu(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vnmsub_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u32m2_tumu(mask, vd, rs1, vs2, vl); +vuint32m2_t test_vnmsub_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u32m2_tumu(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vnmsub_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u32m4_tumu(mask, vd, vs1, vs2, vl); +vuint32m4_t test_vnmsub_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u32m4_tumu(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vnmsub_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u32m4_tumu(mask, vd, rs1, vs2, vl); +vuint32m4_t test_vnmsub_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u32m4_tumu(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vnmsub_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u32m8_tumu(mask, vd, vs1, vs2, vl); +vuint32m8_t test_vnmsub_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u32m8_tumu(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vnmsub_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u32m8_tumu(mask, vd, rs1, vs2, vl); +vuint32m8_t test_vnmsub_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u32m8_tumu(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vnmsub_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u64m1_tumu(mask, vd, vs1, vs2, vl); +vuint64m1_t test_vnmsub_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u64m1_tumu(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vnmsub_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u64m1_tumu(mask, vd, rs1, vs2, vl); +vuint64m1_t test_vnmsub_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u64m1_tumu(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vnmsub_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u64m2_tumu(mask, vd, vs1, vs2, vl); +vuint64m2_t test_vnmsub_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u64m2_tumu(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vnmsub_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u64m2_tumu(mask, vd, rs1, vs2, vl); +vuint64m2_t test_vnmsub_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u64m2_tumu(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vnmsub_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u64m4_tumu(mask, vd, vs1, vs2, vl); +vuint64m4_t test_vnmsub_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u64m4_tumu(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vnmsub_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u64m4_tumu(mask, vd, rs1, vs2, vl); +vuint64m4_t test_vnmsub_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u64m4_tumu(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vnmsub_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u64m8_tumu(mask, vd, vs1, vs2, vl); +vuint64m8_t test_vnmsub_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u64m8_tumu(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vnmsub_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u64m8_tumu(mask, vd, rs1, vs2, vl); +vuint64m8_t test_vnmsub_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u64m8_tumu(vm, vd, rs1, vs2, vl); } -vint8mf8_t test_vnmsub_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i8mf8_mu(mask, vd, vs1, vs2, vl); +vint8mf8_t test_vnmsub_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i8mf8_mu(vm, vd, vs1, vs2, vl); } -vint8mf8_t test_vnmsub_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i8mf8_mu(mask, vd, rs1, vs2, vl); +vint8mf8_t test_vnmsub_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i8mf8_mu(vm, vd, rs1, vs2, vl); } -vint8mf4_t test_vnmsub_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i8mf4_mu(mask, vd, vs1, vs2, vl); +vint8mf4_t test_vnmsub_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i8mf4_mu(vm, vd, vs1, vs2, vl); } -vint8mf4_t test_vnmsub_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i8mf4_mu(mask, vd, rs1, vs2, vl); +vint8mf4_t test_vnmsub_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i8mf4_mu(vm, vd, rs1, vs2, vl); } -vint8mf2_t test_vnmsub_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i8mf2_mu(mask, vd, vs1, vs2, vl); +vint8mf2_t test_vnmsub_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i8mf2_mu(vm, vd, vs1, vs2, vl); } -vint8mf2_t test_vnmsub_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i8mf2_mu(mask, vd, rs1, vs2, vl); +vint8mf2_t test_vnmsub_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i8mf2_mu(vm, vd, rs1, vs2, vl); } -vint8m1_t test_vnmsub_vv_i8m1_mu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i8m1_mu(mask, vd, vs1, vs2, vl); +vint8m1_t test_vnmsub_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i8m1_mu(vm, vd, vs1, vs2, vl); } -vint8m1_t test_vnmsub_vx_i8m1_mu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i8m1_mu(mask, vd, rs1, vs2, vl); +vint8m1_t test_vnmsub_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i8m1_mu(vm, vd, rs1, vs2, vl); } -vint8m2_t test_vnmsub_vv_i8m2_mu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i8m2_mu(mask, vd, vs1, vs2, vl); +vint8m2_t test_vnmsub_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i8m2_mu(vm, vd, vs1, vs2, vl); } -vint8m2_t test_vnmsub_vx_i8m2_mu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i8m2_mu(mask, vd, rs1, vs2, vl); +vint8m2_t test_vnmsub_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i8m2_mu(vm, vd, rs1, vs2, vl); } -vint8m4_t test_vnmsub_vv_i8m4_mu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i8m4_mu(mask, vd, vs1, vs2, vl); +vint8m4_t test_vnmsub_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i8m4_mu(vm, vd, vs1, vs2, vl); } -vint8m4_t test_vnmsub_vx_i8m4_mu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i8m4_mu(mask, vd, rs1, vs2, vl); +vint8m4_t test_vnmsub_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i8m4_mu(vm, vd, rs1, vs2, vl); } -vint8m8_t test_vnmsub_vv_i8m8_mu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i8m8_mu(mask, vd, vs1, vs2, vl); +vint8m8_t test_vnmsub_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i8m8_mu(vm, vd, vs1, vs2, vl); } -vint8m8_t test_vnmsub_vx_i8m8_mu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i8m8_mu(mask, vd, rs1, vs2, vl); +vint8m8_t test_vnmsub_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i8m8_mu(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vnmsub_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i16mf4_mu(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vnmsub_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i16mf4_mu(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vnmsub_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i16mf4_mu(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vnmsub_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i16mf4_mu(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vnmsub_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i16mf2_mu(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vnmsub_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i16mf2_mu(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vnmsub_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i16mf2_mu(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vnmsub_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i16mf2_mu(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vnmsub_vv_i16m1_mu(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i16m1_mu(mask, vd, vs1, vs2, vl); +vint16m1_t test_vnmsub_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i16m1_mu(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vnmsub_vx_i16m1_mu(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i16m1_mu(mask, vd, rs1, vs2, vl); +vint16m1_t test_vnmsub_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i16m1_mu(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vnmsub_vv_i16m2_mu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i16m2_mu(mask, vd, vs1, vs2, vl); +vint16m2_t test_vnmsub_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i16m2_mu(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vnmsub_vx_i16m2_mu(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i16m2_mu(mask, vd, rs1, vs2, vl); +vint16m2_t test_vnmsub_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i16m2_mu(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vnmsub_vv_i16m4_mu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i16m4_mu(mask, vd, vs1, vs2, vl); +vint16m4_t test_vnmsub_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i16m4_mu(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vnmsub_vx_i16m4_mu(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i16m4_mu(mask, vd, rs1, vs2, vl); +vint16m4_t test_vnmsub_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i16m4_mu(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vnmsub_vv_i16m8_mu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i16m8_mu(mask, vd, vs1, vs2, vl); +vint16m8_t test_vnmsub_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i16m8_mu(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vnmsub_vx_i16m8_mu(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i16m8_mu(mask, vd, rs1, vs2, vl); +vint16m8_t test_vnmsub_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i16m8_mu(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vnmsub_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i32mf2_mu(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vnmsub_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i32mf2_mu(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vnmsub_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i32mf2_mu(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vnmsub_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i32mf2_mu(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vnmsub_vv_i32m1_mu(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i32m1_mu(mask, vd, vs1, vs2, vl); +vint32m1_t test_vnmsub_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i32m1_mu(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vnmsub_vx_i32m1_mu(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i32m1_mu(mask, vd, rs1, vs2, vl); +vint32m1_t test_vnmsub_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i32m1_mu(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vnmsub_vv_i32m2_mu(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i32m2_mu(mask, vd, vs1, vs2, vl); +vint32m2_t test_vnmsub_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i32m2_mu(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vnmsub_vx_i32m2_mu(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i32m2_mu(mask, vd, rs1, vs2, vl); +vint32m2_t test_vnmsub_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i32m2_mu(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vnmsub_vv_i32m4_mu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i32m4_mu(mask, vd, vs1, vs2, vl); +vint32m4_t test_vnmsub_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i32m4_mu(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vnmsub_vx_i32m4_mu(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i32m4_mu(mask, vd, rs1, vs2, vl); +vint32m4_t test_vnmsub_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i32m4_mu(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vnmsub_vv_i32m8_mu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i32m8_mu(mask, vd, vs1, vs2, vl); +vint32m8_t test_vnmsub_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i32m8_mu(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vnmsub_vx_i32m8_mu(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i32m8_mu(mask, vd, rs1, vs2, vl); +vint32m8_t test_vnmsub_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i32m8_mu(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vnmsub_vv_i64m1_mu(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i64m1_mu(mask, vd, vs1, vs2, vl); +vint64m1_t test_vnmsub_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i64m1_mu(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vnmsub_vx_i64m1_mu(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i64m1_mu(mask, vd, rs1, vs2, vl); +vint64m1_t test_vnmsub_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i64m1_mu(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vnmsub_vv_i64m2_mu(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i64m2_mu(mask, vd, vs1, vs2, vl); +vint64m2_t test_vnmsub_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i64m2_mu(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vnmsub_vx_i64m2_mu(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i64m2_mu(mask, vd, rs1, vs2, vl); +vint64m2_t test_vnmsub_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i64m2_mu(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vnmsub_vv_i64m4_mu(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i64m4_mu(mask, vd, vs1, vs2, vl); +vint64m4_t test_vnmsub_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i64m4_mu(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vnmsub_vx_i64m4_mu(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i64m4_mu(mask, vd, rs1, vs2, vl); +vint64m4_t test_vnmsub_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i64m4_mu(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vnmsub_vv_i64m8_mu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return __riscv_vnmsub_vv_i64m8_mu(mask, vd, vs1, vs2, vl); +vint64m8_t test_vnmsub_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { + return __riscv_vnmsub_vv_i64m8_mu(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vnmsub_vx_i64m8_mu(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return __riscv_vnmsub_vx_i64m8_mu(mask, vd, rs1, vs2, vl); +vint64m8_t test_vnmsub_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { + return __riscv_vnmsub_vx_i64m8_mu(vm, vd, rs1, vs2, vl); } -vuint8mf8_t test_vnmsub_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u8mf8_mu(mask, vd, vs1, vs2, vl); +vuint8mf8_t test_vnmsub_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u8mf8_mu(vm, vd, vs1, vs2, vl); } -vuint8mf8_t test_vnmsub_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u8mf8_mu(mask, vd, rs1, vs2, vl); +vuint8mf8_t test_vnmsub_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u8mf8_mu(vm, vd, rs1, vs2, vl); } -vuint8mf4_t test_vnmsub_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u8mf4_mu(mask, vd, vs1, vs2, vl); +vuint8mf4_t test_vnmsub_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u8mf4_mu(vm, vd, vs1, vs2, vl); } -vuint8mf4_t test_vnmsub_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u8mf4_mu(mask, vd, rs1, vs2, vl); +vuint8mf4_t test_vnmsub_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u8mf4_mu(vm, vd, rs1, vs2, vl); } -vuint8mf2_t test_vnmsub_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u8mf2_mu(mask, vd, vs1, vs2, vl); +vuint8mf2_t test_vnmsub_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u8mf2_mu(vm, vd, vs1, vs2, vl); } -vuint8mf2_t test_vnmsub_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u8mf2_mu(mask, vd, rs1, vs2, vl); +vuint8mf2_t test_vnmsub_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u8mf2_mu(vm, vd, rs1, vs2, vl); } -vuint8m1_t test_vnmsub_vv_u8m1_mu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u8m1_mu(mask, vd, vs1, vs2, vl); +vuint8m1_t test_vnmsub_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u8m1_mu(vm, vd, vs1, vs2, vl); } -vuint8m1_t test_vnmsub_vx_u8m1_mu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u8m1_mu(mask, vd, rs1, vs2, vl); +vuint8m1_t test_vnmsub_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u8m1_mu(vm, vd, rs1, vs2, vl); } -vuint8m2_t test_vnmsub_vv_u8m2_mu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u8m2_mu(mask, vd, vs1, vs2, vl); +vuint8m2_t test_vnmsub_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u8m2_mu(vm, vd, vs1, vs2, vl); } -vuint8m2_t test_vnmsub_vx_u8m2_mu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u8m2_mu(mask, vd, rs1, vs2, vl); +vuint8m2_t test_vnmsub_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u8m2_mu(vm, vd, rs1, vs2, vl); } -vuint8m4_t test_vnmsub_vv_u8m4_mu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u8m4_mu(mask, vd, vs1, vs2, vl); +vuint8m4_t test_vnmsub_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u8m4_mu(vm, vd, vs1, vs2, vl); } -vuint8m4_t test_vnmsub_vx_u8m4_mu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u8m4_mu(mask, vd, rs1, vs2, vl); +vuint8m4_t test_vnmsub_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u8m4_mu(vm, vd, rs1, vs2, vl); } -vuint8m8_t test_vnmsub_vv_u8m8_mu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u8m8_mu(mask, vd, vs1, vs2, vl); +vuint8m8_t test_vnmsub_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u8m8_mu(vm, vd, vs1, vs2, vl); } -vuint8m8_t test_vnmsub_vx_u8m8_mu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u8m8_mu(mask, vd, rs1, vs2, vl); +vuint8m8_t test_vnmsub_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u8m8_mu(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vnmsub_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u16mf4_mu(mask, vd, vs1, vs2, vl); +vuint16mf4_t test_vnmsub_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u16mf4_mu(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vnmsub_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u16mf4_mu(mask, vd, rs1, vs2, vl); +vuint16mf4_t test_vnmsub_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u16mf4_mu(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vnmsub_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u16mf2_mu(mask, vd, vs1, vs2, vl); +vuint16mf2_t test_vnmsub_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u16mf2_mu(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vnmsub_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u16mf2_mu(mask, vd, rs1, vs2, vl); +vuint16mf2_t test_vnmsub_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u16mf2_mu(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vnmsub_vv_u16m1_mu(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u16m1_mu(mask, vd, vs1, vs2, vl); +vuint16m1_t test_vnmsub_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u16m1_mu(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vnmsub_vx_u16m1_mu(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u16m1_mu(mask, vd, rs1, vs2, vl); +vuint16m1_t test_vnmsub_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u16m1_mu(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vnmsub_vv_u16m2_mu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u16m2_mu(mask, vd, vs1, vs2, vl); +vuint16m2_t test_vnmsub_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u16m2_mu(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vnmsub_vx_u16m2_mu(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u16m2_mu(mask, vd, rs1, vs2, vl); +vuint16m2_t test_vnmsub_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u16m2_mu(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vnmsub_vv_u16m4_mu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u16m4_mu(mask, vd, vs1, vs2, vl); +vuint16m4_t test_vnmsub_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u16m4_mu(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vnmsub_vx_u16m4_mu(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u16m4_mu(mask, vd, rs1, vs2, vl); +vuint16m4_t test_vnmsub_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u16m4_mu(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vnmsub_vv_u16m8_mu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u16m8_mu(mask, vd, vs1, vs2, vl); +vuint16m8_t test_vnmsub_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u16m8_mu(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vnmsub_vx_u16m8_mu(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u16m8_mu(mask, vd, rs1, vs2, vl); +vuint16m8_t test_vnmsub_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u16m8_mu(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vnmsub_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u32mf2_mu(mask, vd, vs1, vs2, vl); +vuint32mf2_t test_vnmsub_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u32mf2_mu(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vnmsub_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u32mf2_mu(mask, vd, rs1, vs2, vl); +vuint32mf2_t test_vnmsub_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u32mf2_mu(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vnmsub_vv_u32m1_mu(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u32m1_mu(mask, vd, vs1, vs2, vl); +vuint32m1_t test_vnmsub_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u32m1_mu(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vnmsub_vx_u32m1_mu(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u32m1_mu(mask, vd, rs1, vs2, vl); +vuint32m1_t test_vnmsub_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u32m1_mu(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vnmsub_vv_u32m2_mu(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u32m2_mu(mask, vd, vs1, vs2, vl); +vuint32m2_t test_vnmsub_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u32m2_mu(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vnmsub_vx_u32m2_mu(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u32m2_mu(mask, vd, rs1, vs2, vl); +vuint32m2_t test_vnmsub_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u32m2_mu(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vnmsub_vv_u32m4_mu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u32m4_mu(mask, vd, vs1, vs2, vl); +vuint32m4_t test_vnmsub_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u32m4_mu(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vnmsub_vx_u32m4_mu(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u32m4_mu(mask, vd, rs1, vs2, vl); +vuint32m4_t test_vnmsub_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u32m4_mu(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vnmsub_vv_u32m8_mu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u32m8_mu(mask, vd, vs1, vs2, vl); +vuint32m8_t test_vnmsub_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u32m8_mu(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vnmsub_vx_u32m8_mu(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u32m8_mu(mask, vd, rs1, vs2, vl); +vuint32m8_t test_vnmsub_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u32m8_mu(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vnmsub_vv_u64m1_mu(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u64m1_mu(mask, vd, vs1, vs2, vl); +vuint64m1_t test_vnmsub_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u64m1_mu(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vnmsub_vx_u64m1_mu(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u64m1_mu(mask, vd, rs1, vs2, vl); +vuint64m1_t test_vnmsub_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u64m1_mu(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vnmsub_vv_u64m2_mu(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u64m2_mu(mask, vd, vs1, vs2, vl); +vuint64m2_t test_vnmsub_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u64m2_mu(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vnmsub_vx_u64m2_mu(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u64m2_mu(mask, vd, rs1, vs2, vl); +vuint64m2_t test_vnmsub_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u64m2_mu(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vnmsub_vv_u64m4_mu(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u64m4_mu(mask, vd, vs1, vs2, vl); +vuint64m4_t test_vnmsub_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u64m4_mu(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vnmsub_vx_u64m4_mu(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u64m4_mu(mask, vd, rs1, vs2, vl); +vuint64m4_t test_vnmsub_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u64m4_mu(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vnmsub_vv_u64m8_mu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vnmsub_vv_u64m8_mu(mask, vd, vs1, vs2, vl); +vuint64m8_t test_vnmsub_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vnmsub_vv_u64m8_mu(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vnmsub_vx_u64m8_mu(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vnmsub_vx_u64m8_mu(mask, vd, rs1, vs2, vl); +vuint64m8_t test_vnmsub_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vnmsub_vx_u64m8_mu(vm, vd, rs1, vs2, vl); } /* { dg-final { scan-assembler-times {vnms[acub]+\.[ivxfswum.]+\s+} 352 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vnot.c b/auto-generated/policy_funcs/gnu-api-tests/vnot.c index b853433bc..622aaf79f 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vnot.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vnot.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vnot_v_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vnot_v_i8mf8_tu(maskedoff, op1, vl); +vint8mf8_t test_vnot_v_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs, size_t vl) { + return __riscv_vnot_v_i8mf8_tu(vd, vs, vl); } -vint8mf4_t test_vnot_v_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vnot_v_i8mf4_tu(maskedoff, op1, vl); +vint8mf4_t test_vnot_v_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs, size_t vl) { + return __riscv_vnot_v_i8mf4_tu(vd, vs, vl); } -vint8mf2_t test_vnot_v_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vnot_v_i8mf2_tu(maskedoff, op1, vl); +vint8mf2_t test_vnot_v_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs, size_t vl) { + return __riscv_vnot_v_i8mf2_tu(vd, vs, vl); } -vint8m1_t test_vnot_v_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vnot_v_i8m1_tu(maskedoff, op1, vl); +vint8m1_t test_vnot_v_i8m1_tu(vint8m1_t vd, vint8m1_t vs, size_t vl) { + return __riscv_vnot_v_i8m1_tu(vd, vs, vl); } -vint8m2_t test_vnot_v_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, size_t vl) { - return __riscv_vnot_v_i8m2_tu(maskedoff, op1, vl); +vint8m2_t test_vnot_v_i8m2_tu(vint8m2_t vd, vint8m2_t vs, size_t vl) { + return __riscv_vnot_v_i8m2_tu(vd, vs, vl); } -vint8m4_t test_vnot_v_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, size_t vl) { - return __riscv_vnot_v_i8m4_tu(maskedoff, op1, vl); +vint8m4_t test_vnot_v_i8m4_tu(vint8m4_t vd, vint8m4_t vs, size_t vl) { + return __riscv_vnot_v_i8m4_tu(vd, vs, vl); } -vint8m8_t test_vnot_v_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, size_t vl) { - return __riscv_vnot_v_i8m8_tu(maskedoff, op1, vl); +vint8m8_t test_vnot_v_i8m8_tu(vint8m8_t vd, vint8m8_t vs, size_t vl) { + return __riscv_vnot_v_i8m8_tu(vd, vs, vl); } -vint16mf4_t test_vnot_v_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl) { - return __riscv_vnot_v_i16mf4_tu(maskedoff, op1, vl); +vint16mf4_t test_vnot_v_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs, size_t vl) { + return __riscv_vnot_v_i16mf4_tu(vd, vs, vl); } -vint16mf2_t test_vnot_v_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl) { - return __riscv_vnot_v_i16mf2_tu(maskedoff, op1, vl); +vint16mf2_t test_vnot_v_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs, size_t vl) { + return __riscv_vnot_v_i16mf2_tu(vd, vs, vl); } -vint16m1_t test_vnot_v_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, size_t vl) { - return __riscv_vnot_v_i16m1_tu(maskedoff, op1, vl); +vint16m1_t test_vnot_v_i16m1_tu(vint16m1_t vd, vint16m1_t vs, size_t vl) { + return __riscv_vnot_v_i16m1_tu(vd, vs, vl); } -vint16m2_t test_vnot_v_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, size_t vl) { - return __riscv_vnot_v_i16m2_tu(maskedoff, op1, vl); +vint16m2_t test_vnot_v_i16m2_tu(vint16m2_t vd, vint16m2_t vs, size_t vl) { + return __riscv_vnot_v_i16m2_tu(vd, vs, vl); } -vint16m4_t test_vnot_v_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, size_t vl) { - return __riscv_vnot_v_i16m4_tu(maskedoff, op1, vl); +vint16m4_t test_vnot_v_i16m4_tu(vint16m4_t vd, vint16m4_t vs, size_t vl) { + return __riscv_vnot_v_i16m4_tu(vd, vs, vl); } -vint16m8_t test_vnot_v_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, size_t vl) { - return __riscv_vnot_v_i16m8_tu(maskedoff, op1, vl); +vint16m8_t test_vnot_v_i16m8_tu(vint16m8_t vd, vint16m8_t vs, size_t vl) { + return __riscv_vnot_v_i16m8_tu(vd, vs, vl); } -vint32mf2_t test_vnot_v_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl) { - return __riscv_vnot_v_i32mf2_tu(maskedoff, op1, vl); +vint32mf2_t test_vnot_v_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs, size_t vl) { + return __riscv_vnot_v_i32mf2_tu(vd, vs, vl); } -vint32m1_t test_vnot_v_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, size_t vl) { - return __riscv_vnot_v_i32m1_tu(maskedoff, op1, vl); +vint32m1_t test_vnot_v_i32m1_tu(vint32m1_t vd, vint32m1_t vs, size_t vl) { + return __riscv_vnot_v_i32m1_tu(vd, vs, vl); } -vint32m2_t test_vnot_v_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, size_t vl) { - return __riscv_vnot_v_i32m2_tu(maskedoff, op1, vl); +vint32m2_t test_vnot_v_i32m2_tu(vint32m2_t vd, vint32m2_t vs, size_t vl) { + return __riscv_vnot_v_i32m2_tu(vd, vs, vl); } -vint32m4_t test_vnot_v_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, size_t vl) { - return __riscv_vnot_v_i32m4_tu(maskedoff, op1, vl); +vint32m4_t test_vnot_v_i32m4_tu(vint32m4_t vd, vint32m4_t vs, size_t vl) { + return __riscv_vnot_v_i32m4_tu(vd, vs, vl); } -vint32m8_t test_vnot_v_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, size_t vl) { - return __riscv_vnot_v_i32m8_tu(maskedoff, op1, vl); +vint32m8_t test_vnot_v_i32m8_tu(vint32m8_t vd, vint32m8_t vs, size_t vl) { + return __riscv_vnot_v_i32m8_tu(vd, vs, vl); } -vint64m1_t test_vnot_v_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, size_t vl) { - return __riscv_vnot_v_i64m1_tu(maskedoff, op1, vl); +vint64m1_t test_vnot_v_i64m1_tu(vint64m1_t vd, vint64m1_t vs, size_t vl) { + return __riscv_vnot_v_i64m1_tu(vd, vs, vl); } -vint64m2_t test_vnot_v_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, size_t vl) { - return __riscv_vnot_v_i64m2_tu(maskedoff, op1, vl); +vint64m2_t test_vnot_v_i64m2_tu(vint64m2_t vd, vint64m2_t vs, size_t vl) { + return __riscv_vnot_v_i64m2_tu(vd, vs, vl); } -vint64m4_t test_vnot_v_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, size_t vl) { - return __riscv_vnot_v_i64m4_tu(maskedoff, op1, vl); +vint64m4_t test_vnot_v_i64m4_tu(vint64m4_t vd, vint64m4_t vs, size_t vl) { + return __riscv_vnot_v_i64m4_tu(vd, vs, vl); } -vint64m8_t test_vnot_v_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, size_t vl) { - return __riscv_vnot_v_i64m8_tu(maskedoff, op1, vl); +vint64m8_t test_vnot_v_i64m8_tu(vint64m8_t vd, vint64m8_t vs, size_t vl) { + return __riscv_vnot_v_i64m8_tu(vd, vs, vl); } -vuint8mf8_t test_vnot_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vnot_v_u8mf8_tu(maskedoff, op1, vl); +vuint8mf8_t test_vnot_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs, size_t vl) { + return __riscv_vnot_v_u8mf8_tu(vd, vs, vl); } -vuint8mf4_t test_vnot_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vnot_v_u8mf4_tu(maskedoff, op1, vl); +vuint8mf4_t test_vnot_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs, size_t vl) { + return __riscv_vnot_v_u8mf4_tu(vd, vs, vl); } -vuint8mf2_t test_vnot_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vnot_v_u8mf2_tu(maskedoff, op1, vl); +vuint8mf2_t test_vnot_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs, size_t vl) { + return __riscv_vnot_v_u8mf2_tu(vd, vs, vl); } -vuint8m1_t test_vnot_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vnot_v_u8m1_tu(maskedoff, op1, vl); +vuint8m1_t test_vnot_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs, size_t vl) { + return __riscv_vnot_v_u8m1_tu(vd, vs, vl); } -vuint8m2_t test_vnot_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, size_t vl) { - return __riscv_vnot_v_u8m2_tu(maskedoff, op1, vl); +vuint8m2_t test_vnot_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs, size_t vl) { + return __riscv_vnot_v_u8m2_tu(vd, vs, vl); } -vuint8m4_t test_vnot_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, size_t vl) { - return __riscv_vnot_v_u8m4_tu(maskedoff, op1, vl); +vuint8m4_t test_vnot_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs, size_t vl) { + return __riscv_vnot_v_u8m4_tu(vd, vs, vl); } -vuint8m8_t test_vnot_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, size_t vl) { - return __riscv_vnot_v_u8m8_tu(maskedoff, op1, vl); +vuint8m8_t test_vnot_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs, size_t vl) { + return __riscv_vnot_v_u8m8_tu(vd, vs, vl); } -vuint16mf4_t test_vnot_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t vl) { - return __riscv_vnot_v_u16mf4_tu(maskedoff, op1, vl); +vuint16mf4_t test_vnot_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs, size_t vl) { + return __riscv_vnot_v_u16mf4_tu(vd, vs, vl); } -vuint16mf2_t test_vnot_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t vl) { - return __riscv_vnot_v_u16mf2_tu(maskedoff, op1, vl); +vuint16mf2_t test_vnot_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs, size_t vl) { + return __riscv_vnot_v_u16mf2_tu(vd, vs, vl); } -vuint16m1_t test_vnot_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, size_t vl) { - return __riscv_vnot_v_u16m1_tu(maskedoff, op1, vl); +vuint16m1_t test_vnot_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs, size_t vl) { + return __riscv_vnot_v_u16m1_tu(vd, vs, vl); } -vuint16m2_t test_vnot_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, size_t vl) { - return __riscv_vnot_v_u16m2_tu(maskedoff, op1, vl); +vuint16m2_t test_vnot_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs, size_t vl) { + return __riscv_vnot_v_u16m2_tu(vd, vs, vl); } -vuint16m4_t test_vnot_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, size_t vl) { - return __riscv_vnot_v_u16m4_tu(maskedoff, op1, vl); +vuint16m4_t test_vnot_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs, size_t vl) { + return __riscv_vnot_v_u16m4_tu(vd, vs, vl); } -vuint16m8_t test_vnot_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, size_t vl) { - return __riscv_vnot_v_u16m8_tu(maskedoff, op1, vl); +vuint16m8_t test_vnot_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs, size_t vl) { + return __riscv_vnot_v_u16m8_tu(vd, vs, vl); } -vuint32mf2_t test_vnot_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t vl) { - return __riscv_vnot_v_u32mf2_tu(maskedoff, op1, vl); +vuint32mf2_t test_vnot_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs, size_t vl) { + return __riscv_vnot_v_u32mf2_tu(vd, vs, vl); } -vuint32m1_t test_vnot_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, size_t vl) { - return __riscv_vnot_v_u32m1_tu(maskedoff, op1, vl); +vuint32m1_t test_vnot_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs, size_t vl) { + return __riscv_vnot_v_u32m1_tu(vd, vs, vl); } -vuint32m2_t test_vnot_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, size_t vl) { - return __riscv_vnot_v_u32m2_tu(maskedoff, op1, vl); +vuint32m2_t test_vnot_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs, size_t vl) { + return __riscv_vnot_v_u32m2_tu(vd, vs, vl); } -vuint32m4_t test_vnot_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, size_t vl) { - return __riscv_vnot_v_u32m4_tu(maskedoff, op1, vl); +vuint32m4_t test_vnot_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs, size_t vl) { + return __riscv_vnot_v_u32m4_tu(vd, vs, vl); } -vuint32m8_t test_vnot_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, size_t vl) { - return __riscv_vnot_v_u32m8_tu(maskedoff, op1, vl); +vuint32m8_t test_vnot_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs, size_t vl) { + return __riscv_vnot_v_u32m8_tu(vd, vs, vl); } -vuint64m1_t test_vnot_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, size_t vl) { - return __riscv_vnot_v_u64m1_tu(maskedoff, op1, vl); +vuint64m1_t test_vnot_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs, size_t vl) { + return __riscv_vnot_v_u64m1_tu(vd, vs, vl); } -vuint64m2_t test_vnot_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, size_t vl) { - return __riscv_vnot_v_u64m2_tu(maskedoff, op1, vl); +vuint64m2_t test_vnot_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs, size_t vl) { + return __riscv_vnot_v_u64m2_tu(vd, vs, vl); } -vuint64m4_t test_vnot_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, size_t vl) { - return __riscv_vnot_v_u64m4_tu(maskedoff, op1, vl); +vuint64m4_t test_vnot_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs, size_t vl) { + return __riscv_vnot_v_u64m4_tu(vd, vs, vl); } -vuint64m8_t test_vnot_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, size_t vl) { - return __riscv_vnot_v_u64m8_tu(maskedoff, op1, vl); +vuint64m8_t test_vnot_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs, size_t vl) { + return __riscv_vnot_v_u64m8_tu(vd, vs, vl); } -vint8mf8_t test_vnot_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vnot_v_i8mf8_tum(mask, maskedoff, op1, vl); +vint8mf8_t test_vnot_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs, size_t vl) { + return __riscv_vnot_v_i8mf8_tum(vm, vd, vs, vl); } -vint8mf4_t test_vnot_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vnot_v_i8mf4_tum(mask, maskedoff, op1, vl); +vint8mf4_t test_vnot_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs, size_t vl) { + return __riscv_vnot_v_i8mf4_tum(vm, vd, vs, vl); } -vint8mf2_t test_vnot_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vnot_v_i8mf2_tum(mask, maskedoff, op1, vl); +vint8mf2_t test_vnot_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs, size_t vl) { + return __riscv_vnot_v_i8mf2_tum(vm, vd, vs, vl); } -vint8m1_t test_vnot_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vnot_v_i8m1_tum(mask, maskedoff, op1, vl); +vint8m1_t test_vnot_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs, size_t vl) { + return __riscv_vnot_v_i8m1_tum(vm, vd, vs, vl); } -vint8m2_t test_vnot_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t vl) { - return __riscv_vnot_v_i8m2_tum(mask, maskedoff, op1, vl); +vint8m2_t test_vnot_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs, size_t vl) { + return __riscv_vnot_v_i8m2_tum(vm, vd, vs, vl); } -vint8m4_t test_vnot_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t vl) { - return __riscv_vnot_v_i8m4_tum(mask, maskedoff, op1, vl); +vint8m4_t test_vnot_v_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs, size_t vl) { + return __riscv_vnot_v_i8m4_tum(vm, vd, vs, vl); } -vint8m8_t test_vnot_v_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t vl) { - return __riscv_vnot_v_i8m8_tum(mask, maskedoff, op1, vl); +vint8m8_t test_vnot_v_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs, size_t vl) { + return __riscv_vnot_v_i8m8_tum(vm, vd, vs, vl); } -vint16mf4_t test_vnot_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl) { - return __riscv_vnot_v_i16mf4_tum(mask, maskedoff, op1, vl); +vint16mf4_t test_vnot_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs, size_t vl) { + return __riscv_vnot_v_i16mf4_tum(vm, vd, vs, vl); } -vint16mf2_t test_vnot_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl) { - return __riscv_vnot_v_i16mf2_tum(mask, maskedoff, op1, vl); +vint16mf2_t test_vnot_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs, size_t vl) { + return __riscv_vnot_v_i16mf2_tum(vm, vd, vs, vl); } -vint16m1_t test_vnot_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t vl) { - return __riscv_vnot_v_i16m1_tum(mask, maskedoff, op1, vl); +vint16m1_t test_vnot_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs, size_t vl) { + return __riscv_vnot_v_i16m1_tum(vm, vd, vs, vl); } -vint16m2_t test_vnot_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t vl) { - return __riscv_vnot_v_i16m2_tum(mask, maskedoff, op1, vl); +vint16m2_t test_vnot_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs, size_t vl) { + return __riscv_vnot_v_i16m2_tum(vm, vd, vs, vl); } -vint16m4_t test_vnot_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t vl) { - return __riscv_vnot_v_i16m4_tum(mask, maskedoff, op1, vl); +vint16m4_t test_vnot_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs, size_t vl) { + return __riscv_vnot_v_i16m4_tum(vm, vd, vs, vl); } -vint16m8_t test_vnot_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t vl) { - return __riscv_vnot_v_i16m8_tum(mask, maskedoff, op1, vl); +vint16m8_t test_vnot_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs, size_t vl) { + return __riscv_vnot_v_i16m8_tum(vm, vd, vs, vl); } -vint32mf2_t test_vnot_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl) { - return __riscv_vnot_v_i32mf2_tum(mask, maskedoff, op1, vl); +vint32mf2_t test_vnot_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs, size_t vl) { + return __riscv_vnot_v_i32mf2_tum(vm, vd, vs, vl); } -vint32m1_t test_vnot_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t vl) { - return __riscv_vnot_v_i32m1_tum(mask, maskedoff, op1, vl); +vint32m1_t test_vnot_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs, size_t vl) { + return __riscv_vnot_v_i32m1_tum(vm, vd, vs, vl); } -vint32m2_t test_vnot_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t vl) { - return __riscv_vnot_v_i32m2_tum(mask, maskedoff, op1, vl); +vint32m2_t test_vnot_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs, size_t vl) { + return __riscv_vnot_v_i32m2_tum(vm, vd, vs, vl); } -vint32m4_t test_vnot_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t vl) { - return __riscv_vnot_v_i32m4_tum(mask, maskedoff, op1, vl); +vint32m4_t test_vnot_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs, size_t vl) { + return __riscv_vnot_v_i32m4_tum(vm, vd, vs, vl); } -vint32m8_t test_vnot_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t vl) { - return __riscv_vnot_v_i32m8_tum(mask, maskedoff, op1, vl); +vint32m8_t test_vnot_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs, size_t vl) { + return __riscv_vnot_v_i32m8_tum(vm, vd, vs, vl); } -vint64m1_t test_vnot_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t vl) { - return __riscv_vnot_v_i64m1_tum(mask, maskedoff, op1, vl); +vint64m1_t test_vnot_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs, size_t vl) { + return __riscv_vnot_v_i64m1_tum(vm, vd, vs, vl); } -vint64m2_t test_vnot_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t vl) { - return __riscv_vnot_v_i64m2_tum(mask, maskedoff, op1, vl); +vint64m2_t test_vnot_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs, size_t vl) { + return __riscv_vnot_v_i64m2_tum(vm, vd, vs, vl); } -vint64m4_t test_vnot_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t vl) { - return __riscv_vnot_v_i64m4_tum(mask, maskedoff, op1, vl); +vint64m4_t test_vnot_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs, size_t vl) { + return __riscv_vnot_v_i64m4_tum(vm, vd, vs, vl); } -vint64m8_t test_vnot_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t vl) { - return __riscv_vnot_v_i64m8_tum(mask, maskedoff, op1, vl); +vint64m8_t test_vnot_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs, size_t vl) { + return __riscv_vnot_v_i64m8_tum(vm, vd, vs, vl); } -vuint8mf8_t test_vnot_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vnot_v_u8mf8_tum(mask, maskedoff, op1, vl); +vuint8mf8_t test_vnot_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs, size_t vl) { + return __riscv_vnot_v_u8mf8_tum(vm, vd, vs, vl); } -vuint8mf4_t test_vnot_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vnot_v_u8mf4_tum(mask, maskedoff, op1, vl); +vuint8mf4_t test_vnot_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs, size_t vl) { + return __riscv_vnot_v_u8mf4_tum(vm, vd, vs, vl); } -vuint8mf2_t test_vnot_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vnot_v_u8mf2_tum(mask, maskedoff, op1, vl); +vuint8mf2_t test_vnot_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs, size_t vl) { + return __riscv_vnot_v_u8mf2_tum(vm, vd, vs, vl); } -vuint8m1_t test_vnot_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vnot_v_u8m1_tum(mask, maskedoff, op1, vl); +vuint8m1_t test_vnot_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs, size_t vl) { + return __riscv_vnot_v_u8m1_tum(vm, vd, vs, vl); } -vuint8m2_t test_vnot_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t vl) { - return __riscv_vnot_v_u8m2_tum(mask, maskedoff, op1, vl); +vuint8m2_t test_vnot_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs, size_t vl) { + return __riscv_vnot_v_u8m2_tum(vm, vd, vs, vl); } -vuint8m4_t test_vnot_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t vl) { - return __riscv_vnot_v_u8m4_tum(mask, maskedoff, op1, vl); +vuint8m4_t test_vnot_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs, size_t vl) { + return __riscv_vnot_v_u8m4_tum(vm, vd, vs, vl); } -vuint8m8_t test_vnot_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t vl) { - return __riscv_vnot_v_u8m8_tum(mask, maskedoff, op1, vl); +vuint8m8_t test_vnot_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs, size_t vl) { + return __riscv_vnot_v_u8m8_tum(vm, vd, vs, vl); } -vuint16mf4_t test_vnot_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t vl) { - return __riscv_vnot_v_u16mf4_tum(mask, maskedoff, op1, vl); +vuint16mf4_t test_vnot_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs, size_t vl) { + return __riscv_vnot_v_u16mf4_tum(vm, vd, vs, vl); } -vuint16mf2_t test_vnot_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t vl) { - return __riscv_vnot_v_u16mf2_tum(mask, maskedoff, op1, vl); +vuint16mf2_t test_vnot_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs, size_t vl) { + return __riscv_vnot_v_u16mf2_tum(vm, vd, vs, vl); } -vuint16m1_t test_vnot_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t vl) { - return __riscv_vnot_v_u16m1_tum(mask, maskedoff, op1, vl); +vuint16m1_t test_vnot_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs, size_t vl) { + return __riscv_vnot_v_u16m1_tum(vm, vd, vs, vl); } -vuint16m2_t test_vnot_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t vl) { - return __riscv_vnot_v_u16m2_tum(mask, maskedoff, op1, vl); +vuint16m2_t test_vnot_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs, size_t vl) { + return __riscv_vnot_v_u16m2_tum(vm, vd, vs, vl); } -vuint16m4_t test_vnot_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t vl) { - return __riscv_vnot_v_u16m4_tum(mask, maskedoff, op1, vl); +vuint16m4_t test_vnot_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs, size_t vl) { + return __riscv_vnot_v_u16m4_tum(vm, vd, vs, vl); } -vuint16m8_t test_vnot_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t vl) { - return __riscv_vnot_v_u16m8_tum(mask, maskedoff, op1, vl); +vuint16m8_t test_vnot_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs, size_t vl) { + return __riscv_vnot_v_u16m8_tum(vm, vd, vs, vl); } -vuint32mf2_t test_vnot_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t vl) { - return __riscv_vnot_v_u32mf2_tum(mask, maskedoff, op1, vl); +vuint32mf2_t test_vnot_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs, size_t vl) { + return __riscv_vnot_v_u32mf2_tum(vm, vd, vs, vl); } -vuint32m1_t test_vnot_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t vl) { - return __riscv_vnot_v_u32m1_tum(mask, maskedoff, op1, vl); +vuint32m1_t test_vnot_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs, size_t vl) { + return __riscv_vnot_v_u32m1_tum(vm, vd, vs, vl); } -vuint32m2_t test_vnot_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t vl) { - return __riscv_vnot_v_u32m2_tum(mask, maskedoff, op1, vl); +vuint32m2_t test_vnot_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs, size_t vl) { + return __riscv_vnot_v_u32m2_tum(vm, vd, vs, vl); } -vuint32m4_t test_vnot_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t vl) { - return __riscv_vnot_v_u32m4_tum(mask, maskedoff, op1, vl); +vuint32m4_t test_vnot_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs, size_t vl) { + return __riscv_vnot_v_u32m4_tum(vm, vd, vs, vl); } -vuint32m8_t test_vnot_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t vl) { - return __riscv_vnot_v_u32m8_tum(mask, maskedoff, op1, vl); +vuint32m8_t test_vnot_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs, size_t vl) { + return __riscv_vnot_v_u32m8_tum(vm, vd, vs, vl); } -vuint64m1_t test_vnot_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t vl) { - return __riscv_vnot_v_u64m1_tum(mask, maskedoff, op1, vl); +vuint64m1_t test_vnot_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs, size_t vl) { + return __riscv_vnot_v_u64m1_tum(vm, vd, vs, vl); } -vuint64m2_t test_vnot_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t vl) { - return __riscv_vnot_v_u64m2_tum(mask, maskedoff, op1, vl); +vuint64m2_t test_vnot_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs, size_t vl) { + return __riscv_vnot_v_u64m2_tum(vm, vd, vs, vl); } -vuint64m4_t test_vnot_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t vl) { - return __riscv_vnot_v_u64m4_tum(mask, maskedoff, op1, vl); +vuint64m4_t test_vnot_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs, size_t vl) { + return __riscv_vnot_v_u64m4_tum(vm, vd, vs, vl); } -vuint64m8_t test_vnot_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t vl) { - return __riscv_vnot_v_u64m8_tum(mask, maskedoff, op1, vl); +vuint64m8_t test_vnot_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs, size_t vl) { + return __riscv_vnot_v_u64m8_tum(vm, vd, vs, vl); } -vint8mf8_t test_vnot_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vnot_v_i8mf8_tumu(mask, maskedoff, op1, vl); +vint8mf8_t test_vnot_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs, size_t vl) { + return __riscv_vnot_v_i8mf8_tumu(vm, vd, vs, vl); } -vint8mf4_t test_vnot_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vnot_v_i8mf4_tumu(mask, maskedoff, op1, vl); +vint8mf4_t test_vnot_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs, size_t vl) { + return __riscv_vnot_v_i8mf4_tumu(vm, vd, vs, vl); } -vint8mf2_t test_vnot_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vnot_v_i8mf2_tumu(mask, maskedoff, op1, vl); +vint8mf2_t test_vnot_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs, size_t vl) { + return __riscv_vnot_v_i8mf2_tumu(vm, vd, vs, vl); } -vint8m1_t test_vnot_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vnot_v_i8m1_tumu(mask, maskedoff, op1, vl); +vint8m1_t test_vnot_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs, size_t vl) { + return __riscv_vnot_v_i8m1_tumu(vm, vd, vs, vl); } -vint8m2_t test_vnot_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t vl) { - return __riscv_vnot_v_i8m2_tumu(mask, maskedoff, op1, vl); +vint8m2_t test_vnot_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs, size_t vl) { + return __riscv_vnot_v_i8m2_tumu(vm, vd, vs, vl); } -vint8m4_t test_vnot_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t vl) { - return __riscv_vnot_v_i8m4_tumu(mask, maskedoff, op1, vl); +vint8m4_t test_vnot_v_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs, size_t vl) { + return __riscv_vnot_v_i8m4_tumu(vm, vd, vs, vl); } -vint8m8_t test_vnot_v_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t vl) { - return __riscv_vnot_v_i8m8_tumu(mask, maskedoff, op1, vl); +vint8m8_t test_vnot_v_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs, size_t vl) { + return __riscv_vnot_v_i8m8_tumu(vm, vd, vs, vl); } -vint16mf4_t test_vnot_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl) { - return __riscv_vnot_v_i16mf4_tumu(mask, maskedoff, op1, vl); +vint16mf4_t test_vnot_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs, size_t vl) { + return __riscv_vnot_v_i16mf4_tumu(vm, vd, vs, vl); } -vint16mf2_t test_vnot_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl) { - return __riscv_vnot_v_i16mf2_tumu(mask, maskedoff, op1, vl); +vint16mf2_t test_vnot_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs, size_t vl) { + return __riscv_vnot_v_i16mf2_tumu(vm, vd, vs, vl); } -vint16m1_t test_vnot_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t vl) { - return __riscv_vnot_v_i16m1_tumu(mask, maskedoff, op1, vl); +vint16m1_t test_vnot_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs, size_t vl) { + return __riscv_vnot_v_i16m1_tumu(vm, vd, vs, vl); } -vint16m2_t test_vnot_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t vl) { - return __riscv_vnot_v_i16m2_tumu(mask, maskedoff, op1, vl); +vint16m2_t test_vnot_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs, size_t vl) { + return __riscv_vnot_v_i16m2_tumu(vm, vd, vs, vl); } -vint16m4_t test_vnot_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t vl) { - return __riscv_vnot_v_i16m4_tumu(mask, maskedoff, op1, vl); +vint16m4_t test_vnot_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs, size_t vl) { + return __riscv_vnot_v_i16m4_tumu(vm, vd, vs, vl); } -vint16m8_t test_vnot_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t vl) { - return __riscv_vnot_v_i16m8_tumu(mask, maskedoff, op1, vl); +vint16m8_t test_vnot_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs, size_t vl) { + return __riscv_vnot_v_i16m8_tumu(vm, vd, vs, vl); } -vint32mf2_t test_vnot_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl) { - return __riscv_vnot_v_i32mf2_tumu(mask, maskedoff, op1, vl); +vint32mf2_t test_vnot_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs, size_t vl) { + return __riscv_vnot_v_i32mf2_tumu(vm, vd, vs, vl); } -vint32m1_t test_vnot_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t vl) { - return __riscv_vnot_v_i32m1_tumu(mask, maskedoff, op1, vl); +vint32m1_t test_vnot_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs, size_t vl) { + return __riscv_vnot_v_i32m1_tumu(vm, vd, vs, vl); } -vint32m2_t test_vnot_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t vl) { - return __riscv_vnot_v_i32m2_tumu(mask, maskedoff, op1, vl); +vint32m2_t test_vnot_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs, size_t vl) { + return __riscv_vnot_v_i32m2_tumu(vm, vd, vs, vl); } -vint32m4_t test_vnot_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t vl) { - return __riscv_vnot_v_i32m4_tumu(mask, maskedoff, op1, vl); +vint32m4_t test_vnot_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs, size_t vl) { + return __riscv_vnot_v_i32m4_tumu(vm, vd, vs, vl); } -vint32m8_t test_vnot_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t vl) { - return __riscv_vnot_v_i32m8_tumu(mask, maskedoff, op1, vl); +vint32m8_t test_vnot_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs, size_t vl) { + return __riscv_vnot_v_i32m8_tumu(vm, vd, vs, vl); } -vint64m1_t test_vnot_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t vl) { - return __riscv_vnot_v_i64m1_tumu(mask, maskedoff, op1, vl); +vint64m1_t test_vnot_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs, size_t vl) { + return __riscv_vnot_v_i64m1_tumu(vm, vd, vs, vl); } -vint64m2_t test_vnot_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t vl) { - return __riscv_vnot_v_i64m2_tumu(mask, maskedoff, op1, vl); +vint64m2_t test_vnot_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs, size_t vl) { + return __riscv_vnot_v_i64m2_tumu(vm, vd, vs, vl); } -vint64m4_t test_vnot_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t vl) { - return __riscv_vnot_v_i64m4_tumu(mask, maskedoff, op1, vl); +vint64m4_t test_vnot_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs, size_t vl) { + return __riscv_vnot_v_i64m4_tumu(vm, vd, vs, vl); } -vint64m8_t test_vnot_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t vl) { - return __riscv_vnot_v_i64m8_tumu(mask, maskedoff, op1, vl); +vint64m8_t test_vnot_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs, size_t vl) { + return __riscv_vnot_v_i64m8_tumu(vm, vd, vs, vl); } -vuint8mf8_t test_vnot_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vnot_v_u8mf8_tumu(mask, maskedoff, op1, vl); +vuint8mf8_t test_vnot_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs, size_t vl) { + return __riscv_vnot_v_u8mf8_tumu(vm, vd, vs, vl); } -vuint8mf4_t test_vnot_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vnot_v_u8mf4_tumu(mask, maskedoff, op1, vl); +vuint8mf4_t test_vnot_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs, size_t vl) { + return __riscv_vnot_v_u8mf4_tumu(vm, vd, vs, vl); } -vuint8mf2_t test_vnot_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vnot_v_u8mf2_tumu(mask, maskedoff, op1, vl); +vuint8mf2_t test_vnot_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs, size_t vl) { + return __riscv_vnot_v_u8mf2_tumu(vm, vd, vs, vl); } -vuint8m1_t test_vnot_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vnot_v_u8m1_tumu(mask, maskedoff, op1, vl); +vuint8m1_t test_vnot_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs, size_t vl) { + return __riscv_vnot_v_u8m1_tumu(vm, vd, vs, vl); } -vuint8m2_t test_vnot_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t vl) { - return __riscv_vnot_v_u8m2_tumu(mask, maskedoff, op1, vl); +vuint8m2_t test_vnot_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs, size_t vl) { + return __riscv_vnot_v_u8m2_tumu(vm, vd, vs, vl); } -vuint8m4_t test_vnot_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t vl) { - return __riscv_vnot_v_u8m4_tumu(mask, maskedoff, op1, vl); +vuint8m4_t test_vnot_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs, size_t vl) { + return __riscv_vnot_v_u8m4_tumu(vm, vd, vs, vl); } -vuint8m8_t test_vnot_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t vl) { - return __riscv_vnot_v_u8m8_tumu(mask, maskedoff, op1, vl); +vuint8m8_t test_vnot_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs, size_t vl) { + return __riscv_vnot_v_u8m8_tumu(vm, vd, vs, vl); } -vuint16mf4_t test_vnot_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t vl) { - return __riscv_vnot_v_u16mf4_tumu(mask, maskedoff, op1, vl); +vuint16mf4_t test_vnot_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs, size_t vl) { + return __riscv_vnot_v_u16mf4_tumu(vm, vd, vs, vl); } -vuint16mf2_t test_vnot_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t vl) { - return __riscv_vnot_v_u16mf2_tumu(mask, maskedoff, op1, vl); +vuint16mf2_t test_vnot_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs, size_t vl) { + return __riscv_vnot_v_u16mf2_tumu(vm, vd, vs, vl); } -vuint16m1_t test_vnot_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t vl) { - return __riscv_vnot_v_u16m1_tumu(mask, maskedoff, op1, vl); +vuint16m1_t test_vnot_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs, size_t vl) { + return __riscv_vnot_v_u16m1_tumu(vm, vd, vs, vl); } -vuint16m2_t test_vnot_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t vl) { - return __riscv_vnot_v_u16m2_tumu(mask, maskedoff, op1, vl); +vuint16m2_t test_vnot_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs, size_t vl) { + return __riscv_vnot_v_u16m2_tumu(vm, vd, vs, vl); } -vuint16m4_t test_vnot_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t vl) { - return __riscv_vnot_v_u16m4_tumu(mask, maskedoff, op1, vl); +vuint16m4_t test_vnot_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs, size_t vl) { + return __riscv_vnot_v_u16m4_tumu(vm, vd, vs, vl); } -vuint16m8_t test_vnot_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t vl) { - return __riscv_vnot_v_u16m8_tumu(mask, maskedoff, op1, vl); +vuint16m8_t test_vnot_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs, size_t vl) { + return __riscv_vnot_v_u16m8_tumu(vm, vd, vs, vl); } -vuint32mf2_t test_vnot_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t vl) { - return __riscv_vnot_v_u32mf2_tumu(mask, maskedoff, op1, vl); +vuint32mf2_t test_vnot_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs, size_t vl) { + return __riscv_vnot_v_u32mf2_tumu(vm, vd, vs, vl); } -vuint32m1_t test_vnot_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t vl) { - return __riscv_vnot_v_u32m1_tumu(mask, maskedoff, op1, vl); +vuint32m1_t test_vnot_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs, size_t vl) { + return __riscv_vnot_v_u32m1_tumu(vm, vd, vs, vl); } -vuint32m2_t test_vnot_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t vl) { - return __riscv_vnot_v_u32m2_tumu(mask, maskedoff, op1, vl); +vuint32m2_t test_vnot_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs, size_t vl) { + return __riscv_vnot_v_u32m2_tumu(vm, vd, vs, vl); } -vuint32m4_t test_vnot_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t vl) { - return __riscv_vnot_v_u32m4_tumu(mask, maskedoff, op1, vl); +vuint32m4_t test_vnot_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs, size_t vl) { + return __riscv_vnot_v_u32m4_tumu(vm, vd, vs, vl); } -vuint32m8_t test_vnot_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t vl) { - return __riscv_vnot_v_u32m8_tumu(mask, maskedoff, op1, vl); +vuint32m8_t test_vnot_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs, size_t vl) { + return __riscv_vnot_v_u32m8_tumu(vm, vd, vs, vl); } -vuint64m1_t test_vnot_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t vl) { - return __riscv_vnot_v_u64m1_tumu(mask, maskedoff, op1, vl); +vuint64m1_t test_vnot_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs, size_t vl) { + return __riscv_vnot_v_u64m1_tumu(vm, vd, vs, vl); } -vuint64m2_t test_vnot_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t vl) { - return __riscv_vnot_v_u64m2_tumu(mask, maskedoff, op1, vl); +vuint64m2_t test_vnot_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs, size_t vl) { + return __riscv_vnot_v_u64m2_tumu(vm, vd, vs, vl); } -vuint64m4_t test_vnot_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t vl) { - return __riscv_vnot_v_u64m4_tumu(mask, maskedoff, op1, vl); +vuint64m4_t test_vnot_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs, size_t vl) { + return __riscv_vnot_v_u64m4_tumu(vm, vd, vs, vl); } -vuint64m8_t test_vnot_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t vl) { - return __riscv_vnot_v_u64m8_tumu(mask, maskedoff, op1, vl); +vuint64m8_t test_vnot_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs, size_t vl) { + return __riscv_vnot_v_u64m8_tumu(vm, vd, vs, vl); } -vint8mf8_t test_vnot_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vnot_v_i8mf8_mu(mask, maskedoff, op1, vl); +vint8mf8_t test_vnot_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs, size_t vl) { + return __riscv_vnot_v_i8mf8_mu(vm, vd, vs, vl); } -vint8mf4_t test_vnot_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vnot_v_i8mf4_mu(mask, maskedoff, op1, vl); +vint8mf4_t test_vnot_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs, size_t vl) { + return __riscv_vnot_v_i8mf4_mu(vm, vd, vs, vl); } -vint8mf2_t test_vnot_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vnot_v_i8mf2_mu(mask, maskedoff, op1, vl); +vint8mf2_t test_vnot_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs, size_t vl) { + return __riscv_vnot_v_i8mf2_mu(vm, vd, vs, vl); } -vint8m1_t test_vnot_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vnot_v_i8m1_mu(mask, maskedoff, op1, vl); +vint8m1_t test_vnot_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs, size_t vl) { + return __riscv_vnot_v_i8m1_mu(vm, vd, vs, vl); } -vint8m2_t test_vnot_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t vl) { - return __riscv_vnot_v_i8m2_mu(mask, maskedoff, op1, vl); +vint8m2_t test_vnot_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs, size_t vl) { + return __riscv_vnot_v_i8m2_mu(vm, vd, vs, vl); } -vint8m4_t test_vnot_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t vl) { - return __riscv_vnot_v_i8m4_mu(mask, maskedoff, op1, vl); +vint8m4_t test_vnot_v_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs, size_t vl) { + return __riscv_vnot_v_i8m4_mu(vm, vd, vs, vl); } -vint8m8_t test_vnot_v_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t vl) { - return __riscv_vnot_v_i8m8_mu(mask, maskedoff, op1, vl); +vint8m8_t test_vnot_v_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs, size_t vl) { + return __riscv_vnot_v_i8m8_mu(vm, vd, vs, vl); } -vint16mf4_t test_vnot_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl) { - return __riscv_vnot_v_i16mf4_mu(mask, maskedoff, op1, vl); +vint16mf4_t test_vnot_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs, size_t vl) { + return __riscv_vnot_v_i16mf4_mu(vm, vd, vs, vl); } -vint16mf2_t test_vnot_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl) { - return __riscv_vnot_v_i16mf2_mu(mask, maskedoff, op1, vl); +vint16mf2_t test_vnot_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs, size_t vl) { + return __riscv_vnot_v_i16mf2_mu(vm, vd, vs, vl); } -vint16m1_t test_vnot_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t vl) { - return __riscv_vnot_v_i16m1_mu(mask, maskedoff, op1, vl); +vint16m1_t test_vnot_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs, size_t vl) { + return __riscv_vnot_v_i16m1_mu(vm, vd, vs, vl); } -vint16m2_t test_vnot_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t vl) { - return __riscv_vnot_v_i16m2_mu(mask, maskedoff, op1, vl); +vint16m2_t test_vnot_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs, size_t vl) { + return __riscv_vnot_v_i16m2_mu(vm, vd, vs, vl); } -vint16m4_t test_vnot_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t vl) { - return __riscv_vnot_v_i16m4_mu(mask, maskedoff, op1, vl); +vint16m4_t test_vnot_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs, size_t vl) { + return __riscv_vnot_v_i16m4_mu(vm, vd, vs, vl); } -vint16m8_t test_vnot_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t vl) { - return __riscv_vnot_v_i16m8_mu(mask, maskedoff, op1, vl); +vint16m8_t test_vnot_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs, size_t vl) { + return __riscv_vnot_v_i16m8_mu(vm, vd, vs, vl); } -vint32mf2_t test_vnot_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl) { - return __riscv_vnot_v_i32mf2_mu(mask, maskedoff, op1, vl); +vint32mf2_t test_vnot_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs, size_t vl) { + return __riscv_vnot_v_i32mf2_mu(vm, vd, vs, vl); } -vint32m1_t test_vnot_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t vl) { - return __riscv_vnot_v_i32m1_mu(mask, maskedoff, op1, vl); +vint32m1_t test_vnot_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs, size_t vl) { + return __riscv_vnot_v_i32m1_mu(vm, vd, vs, vl); } -vint32m2_t test_vnot_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t vl) { - return __riscv_vnot_v_i32m2_mu(mask, maskedoff, op1, vl); +vint32m2_t test_vnot_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs, size_t vl) { + return __riscv_vnot_v_i32m2_mu(vm, vd, vs, vl); } -vint32m4_t test_vnot_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t vl) { - return __riscv_vnot_v_i32m4_mu(mask, maskedoff, op1, vl); +vint32m4_t test_vnot_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs, size_t vl) { + return __riscv_vnot_v_i32m4_mu(vm, vd, vs, vl); } -vint32m8_t test_vnot_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t vl) { - return __riscv_vnot_v_i32m8_mu(mask, maskedoff, op1, vl); +vint32m8_t test_vnot_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs, size_t vl) { + return __riscv_vnot_v_i32m8_mu(vm, vd, vs, vl); } -vint64m1_t test_vnot_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t vl) { - return __riscv_vnot_v_i64m1_mu(mask, maskedoff, op1, vl); +vint64m1_t test_vnot_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs, size_t vl) { + return __riscv_vnot_v_i64m1_mu(vm, vd, vs, vl); } -vint64m2_t test_vnot_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t vl) { - return __riscv_vnot_v_i64m2_mu(mask, maskedoff, op1, vl); +vint64m2_t test_vnot_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs, size_t vl) { + return __riscv_vnot_v_i64m2_mu(vm, vd, vs, vl); } -vint64m4_t test_vnot_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t vl) { - return __riscv_vnot_v_i64m4_mu(mask, maskedoff, op1, vl); +vint64m4_t test_vnot_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs, size_t vl) { + return __riscv_vnot_v_i64m4_mu(vm, vd, vs, vl); } -vint64m8_t test_vnot_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t vl) { - return __riscv_vnot_v_i64m8_mu(mask, maskedoff, op1, vl); +vint64m8_t test_vnot_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs, size_t vl) { + return __riscv_vnot_v_i64m8_mu(vm, vd, vs, vl); } -vuint8mf8_t test_vnot_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vnot_v_u8mf8_mu(mask, maskedoff, op1, vl); +vuint8mf8_t test_vnot_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs, size_t vl) { + return __riscv_vnot_v_u8mf8_mu(vm, vd, vs, vl); } -vuint8mf4_t test_vnot_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vnot_v_u8mf4_mu(mask, maskedoff, op1, vl); +vuint8mf4_t test_vnot_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs, size_t vl) { + return __riscv_vnot_v_u8mf4_mu(vm, vd, vs, vl); } -vuint8mf2_t test_vnot_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vnot_v_u8mf2_mu(mask, maskedoff, op1, vl); +vuint8mf2_t test_vnot_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs, size_t vl) { + return __riscv_vnot_v_u8mf2_mu(vm, vd, vs, vl); } -vuint8m1_t test_vnot_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vnot_v_u8m1_mu(mask, maskedoff, op1, vl); +vuint8m1_t test_vnot_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs, size_t vl) { + return __riscv_vnot_v_u8m1_mu(vm, vd, vs, vl); } -vuint8m2_t test_vnot_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t vl) { - return __riscv_vnot_v_u8m2_mu(mask, maskedoff, op1, vl); +vuint8m2_t test_vnot_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs, size_t vl) { + return __riscv_vnot_v_u8m2_mu(vm, vd, vs, vl); } -vuint8m4_t test_vnot_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t vl) { - return __riscv_vnot_v_u8m4_mu(mask, maskedoff, op1, vl); +vuint8m4_t test_vnot_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs, size_t vl) { + return __riscv_vnot_v_u8m4_mu(vm, vd, vs, vl); } -vuint8m8_t test_vnot_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t vl) { - return __riscv_vnot_v_u8m8_mu(mask, maskedoff, op1, vl); +vuint8m8_t test_vnot_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs, size_t vl) { + return __riscv_vnot_v_u8m8_mu(vm, vd, vs, vl); } -vuint16mf4_t test_vnot_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t vl) { - return __riscv_vnot_v_u16mf4_mu(mask, maskedoff, op1, vl); +vuint16mf4_t test_vnot_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs, size_t vl) { + return __riscv_vnot_v_u16mf4_mu(vm, vd, vs, vl); } -vuint16mf2_t test_vnot_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t vl) { - return __riscv_vnot_v_u16mf2_mu(mask, maskedoff, op1, vl); +vuint16mf2_t test_vnot_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs, size_t vl) { + return __riscv_vnot_v_u16mf2_mu(vm, vd, vs, vl); } -vuint16m1_t test_vnot_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t vl) { - return __riscv_vnot_v_u16m1_mu(mask, maskedoff, op1, vl); +vuint16m1_t test_vnot_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs, size_t vl) { + return __riscv_vnot_v_u16m1_mu(vm, vd, vs, vl); } -vuint16m2_t test_vnot_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t vl) { - return __riscv_vnot_v_u16m2_mu(mask, maskedoff, op1, vl); +vuint16m2_t test_vnot_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs, size_t vl) { + return __riscv_vnot_v_u16m2_mu(vm, vd, vs, vl); } -vuint16m4_t test_vnot_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t vl) { - return __riscv_vnot_v_u16m4_mu(mask, maskedoff, op1, vl); +vuint16m4_t test_vnot_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs, size_t vl) { + return __riscv_vnot_v_u16m4_mu(vm, vd, vs, vl); } -vuint16m8_t test_vnot_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t vl) { - return __riscv_vnot_v_u16m8_mu(mask, maskedoff, op1, vl); +vuint16m8_t test_vnot_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs, size_t vl) { + return __riscv_vnot_v_u16m8_mu(vm, vd, vs, vl); } -vuint32mf2_t test_vnot_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t vl) { - return __riscv_vnot_v_u32mf2_mu(mask, maskedoff, op1, vl); +vuint32mf2_t test_vnot_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs, size_t vl) { + return __riscv_vnot_v_u32mf2_mu(vm, vd, vs, vl); } -vuint32m1_t test_vnot_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t vl) { - return __riscv_vnot_v_u32m1_mu(mask, maskedoff, op1, vl); +vuint32m1_t test_vnot_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs, size_t vl) { + return __riscv_vnot_v_u32m1_mu(vm, vd, vs, vl); } -vuint32m2_t test_vnot_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t vl) { - return __riscv_vnot_v_u32m2_mu(mask, maskedoff, op1, vl); +vuint32m2_t test_vnot_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs, size_t vl) { + return __riscv_vnot_v_u32m2_mu(vm, vd, vs, vl); } -vuint32m4_t test_vnot_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t vl) { - return __riscv_vnot_v_u32m4_mu(mask, maskedoff, op1, vl); +vuint32m4_t test_vnot_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs, size_t vl) { + return __riscv_vnot_v_u32m4_mu(vm, vd, vs, vl); } -vuint32m8_t test_vnot_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t vl) { - return __riscv_vnot_v_u32m8_mu(mask, maskedoff, op1, vl); +vuint32m8_t test_vnot_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs, size_t vl) { + return __riscv_vnot_v_u32m8_mu(vm, vd, vs, vl); } -vuint64m1_t test_vnot_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t vl) { - return __riscv_vnot_v_u64m1_mu(mask, maskedoff, op1, vl); +vuint64m1_t test_vnot_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs, size_t vl) { + return __riscv_vnot_v_u64m1_mu(vm, vd, vs, vl); } -vuint64m2_t test_vnot_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t vl) { - return __riscv_vnot_v_u64m2_mu(mask, maskedoff, op1, vl); +vuint64m2_t test_vnot_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs, size_t vl) { + return __riscv_vnot_v_u64m2_mu(vm, vd, vs, vl); } -vuint64m4_t test_vnot_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t vl) { - return __riscv_vnot_v_u64m4_mu(mask, maskedoff, op1, vl); +vuint64m4_t test_vnot_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs, size_t vl) { + return __riscv_vnot_v_u64m4_mu(vm, vd, vs, vl); } -vuint64m8_t test_vnot_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t vl) { - return __riscv_vnot_v_u64m8_mu(mask, maskedoff, op1, vl); +vuint64m8_t test_vnot_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs, size_t vl) { + return __riscv_vnot_v_u64m8_mu(vm, vd, vs, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vnot\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vnsra.c b/auto-generated/policy_funcs/gnu-api-tests/vnsra.c index a1a29a254..d25c0a4c7 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vnsra.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vnsra.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vnsra_wv_i8mf8_tu(vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnsra_wv_i8mf8_tu(maskedoff, op1, shift, vl); +vint8mf8_t test_vnsra_wv_i8mf8_tu(vint8mf8_t vd, vint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnsra_wv_i8mf8_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vnsra_wx_i8mf8_tu(vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i8mf8_tu(maskedoff, op1, shift, vl); +vint8mf8_t test_vnsra_wx_i8mf8_tu(vint8mf8_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i8mf8_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vnsra_wv_i8mf4_tu(vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnsra_wv_i8mf4_tu(maskedoff, op1, shift, vl); +vint8mf4_t test_vnsra_wv_i8mf4_tu(vint8mf4_t vd, vint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnsra_wv_i8mf4_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vnsra_wx_i8mf4_tu(vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i8mf4_tu(maskedoff, op1, shift, vl); +vint8mf4_t test_vnsra_wx_i8mf4_tu(vint8mf4_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i8mf4_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vnsra_wv_i8mf2_tu(vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnsra_wv_i8mf2_tu(maskedoff, op1, shift, vl); +vint8mf2_t test_vnsra_wv_i8mf2_tu(vint8mf2_t vd, vint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnsra_wv_i8mf2_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vnsra_wx_i8mf2_tu(vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i8mf2_tu(maskedoff, op1, shift, vl); +vint8mf2_t test_vnsra_wx_i8mf2_tu(vint8mf2_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i8mf2_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vnsra_wv_i8m1_tu(vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnsra_wv_i8m1_tu(maskedoff, op1, shift, vl); +vint8m1_t test_vnsra_wv_i8m1_tu(vint8m1_t vd, vint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnsra_wv_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vnsra_wx_i8m1_tu(vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i8m1_tu(maskedoff, op1, shift, vl); +vint8m1_t test_vnsra_wx_i8m1_tu(vint8m1_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i8m1_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vnsra_wv_i8m2_tu(vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnsra_wv_i8m2_tu(maskedoff, op1, shift, vl); +vint8m2_t test_vnsra_wv_i8m2_tu(vint8m2_t vd, vint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnsra_wv_i8m2_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vnsra_wx_i8m2_tu(vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i8m2_tu(maskedoff, op1, shift, vl); +vint8m2_t test_vnsra_wx_i8m2_tu(vint8m2_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i8m2_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vnsra_wv_i8m4_tu(vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnsra_wv_i8m4_tu(maskedoff, op1, shift, vl); +vint8m4_t test_vnsra_wv_i8m4_tu(vint8m4_t vd, vint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnsra_wv_i8m4_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vnsra_wx_i8m4_tu(vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i8m4_tu(maskedoff, op1, shift, vl); +vint8m4_t test_vnsra_wx_i8m4_tu(vint8m4_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i8m4_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vnsra_wv_i16mf4_tu(vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnsra_wv_i16mf4_tu(maskedoff, op1, shift, vl); +vint16mf4_t test_vnsra_wv_i16mf4_tu(vint16mf4_t vd, vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnsra_wv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vnsra_wx_i16mf4_tu(vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i16mf4_tu(maskedoff, op1, shift, vl); +vint16mf4_t test_vnsra_wx_i16mf4_tu(vint16mf4_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vnsra_wv_i16mf2_tu(vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnsra_wv_i16mf2_tu(maskedoff, op1, shift, vl); +vint16mf2_t test_vnsra_wv_i16mf2_tu(vint16mf2_t vd, vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnsra_wv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vnsra_wx_i16mf2_tu(vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i16mf2_tu(maskedoff, op1, shift, vl); +vint16mf2_t test_vnsra_wx_i16mf2_tu(vint16mf2_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vnsra_wv_i16m1_tu(vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnsra_wv_i16m1_tu(maskedoff, op1, shift, vl); +vint16m1_t test_vnsra_wv_i16m1_tu(vint16m1_t vd, vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnsra_wv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vnsra_wx_i16m1_tu(vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i16m1_tu(maskedoff, op1, shift, vl); +vint16m1_t test_vnsra_wx_i16m1_tu(vint16m1_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vnsra_wv_i16m2_tu(vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnsra_wv_i16m2_tu(maskedoff, op1, shift, vl); +vint16m2_t test_vnsra_wv_i16m2_tu(vint16m2_t vd, vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnsra_wv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vnsra_wx_i16m2_tu(vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i16m2_tu(maskedoff, op1, shift, vl); +vint16m2_t test_vnsra_wx_i16m2_tu(vint16m2_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vnsra_wv_i16m4_tu(vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnsra_wv_i16m4_tu(maskedoff, op1, shift, vl); +vint16m4_t test_vnsra_wv_i16m4_tu(vint16m4_t vd, vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnsra_wv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vnsra_wx_i16m4_tu(vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i16m4_tu(maskedoff, op1, shift, vl); +vint16m4_t test_vnsra_wx_i16m4_tu(vint16m4_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i16m4_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vnsra_wv_i32mf2_tu(vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnsra_wv_i32mf2_tu(maskedoff, op1, shift, vl); +vint32mf2_t test_vnsra_wv_i32mf2_tu(vint32mf2_t vd, vint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnsra_wv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vnsra_wx_i32mf2_tu(vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i32mf2_tu(maskedoff, op1, shift, vl); +vint32mf2_t test_vnsra_wx_i32mf2_tu(vint32mf2_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vnsra_wv_i32m1_tu(vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnsra_wv_i32m1_tu(maskedoff, op1, shift, vl); +vint32m1_t test_vnsra_wv_i32m1_tu(vint32m1_t vd, vint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnsra_wv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vnsra_wx_i32m1_tu(vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i32m1_tu(maskedoff, op1, shift, vl); +vint32m1_t test_vnsra_wx_i32m1_tu(vint32m1_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vnsra_wv_i32m2_tu(vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnsra_wv_i32m2_tu(maskedoff, op1, shift, vl); +vint32m2_t test_vnsra_wv_i32m2_tu(vint32m2_t vd, vint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnsra_wv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vnsra_wx_i32m2_tu(vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i32m2_tu(maskedoff, op1, shift, vl); +vint32m2_t test_vnsra_wx_i32m2_tu(vint32m2_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vnsra_wv_i32m4_tu(vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnsra_wv_i32m4_tu(maskedoff, op1, shift, vl); +vint32m4_t test_vnsra_wv_i32m4_tu(vint32m4_t vd, vint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnsra_wv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vnsra_wx_i32m4_tu(vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i32m4_tu(maskedoff, op1, shift, vl); +vint32m4_t test_vnsra_wx_i32m4_tu(vint32m4_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i32m4_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vnsra_wv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnsra_wv_i8mf8_tum(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vnsra_wv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnsra_wv_i8mf8_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vnsra_wx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i8mf8_tum(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vnsra_wx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i8mf8_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vnsra_wv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnsra_wv_i8mf4_tum(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vnsra_wv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnsra_wv_i8mf4_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vnsra_wx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i8mf4_tum(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vnsra_wx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i8mf4_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vnsra_wv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnsra_wv_i8mf2_tum(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vnsra_wv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnsra_wv_i8mf2_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vnsra_wx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i8mf2_tum(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vnsra_wx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i8mf2_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vnsra_wv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnsra_wv_i8m1_tum(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vnsra_wv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnsra_wv_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vnsra_wx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i8m1_tum(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vnsra_wx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i8m1_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vnsra_wv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnsra_wv_i8m2_tum(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vnsra_wv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnsra_wv_i8m2_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vnsra_wx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i8m2_tum(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vnsra_wx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i8m2_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vnsra_wv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnsra_wv_i8m4_tum(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vnsra_wv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnsra_wv_i8m4_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vnsra_wx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i8m4_tum(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vnsra_wx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i8m4_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vnsra_wv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnsra_wv_i16mf4_tum(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vnsra_wv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnsra_wv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vnsra_wx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i16mf4_tum(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vnsra_wx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vnsra_wv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnsra_wv_i16mf2_tum(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vnsra_wv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnsra_wv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vnsra_wx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i16mf2_tum(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vnsra_wx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vnsra_wv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnsra_wv_i16m1_tum(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vnsra_wv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnsra_wv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vnsra_wx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i16m1_tum(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vnsra_wx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vnsra_wv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnsra_wv_i16m2_tum(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vnsra_wv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnsra_wv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vnsra_wx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i16m2_tum(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vnsra_wx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vnsra_wv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnsra_wv_i16m4_tum(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vnsra_wv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnsra_wv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vnsra_wx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i16m4_tum(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vnsra_wx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vnsra_wv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnsra_wv_i32mf2_tum(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vnsra_wv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnsra_wv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vnsra_wx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i32mf2_tum(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vnsra_wx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vnsra_wv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnsra_wv_i32m1_tum(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vnsra_wv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnsra_wv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vnsra_wx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i32m1_tum(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vnsra_wx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vnsra_wv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnsra_wv_i32m2_tum(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vnsra_wv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnsra_wv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vnsra_wx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i32m2_tum(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vnsra_wx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vnsra_wv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnsra_wv_i32m4_tum(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vnsra_wv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnsra_wv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vnsra_wx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i32m4_tum(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vnsra_wx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vnsra_wv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnsra_wv_i8mf8_tumu(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vnsra_wv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnsra_wv_i8mf8_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vnsra_wx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i8mf8_tumu(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vnsra_wx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i8mf8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vnsra_wv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnsra_wv_i8mf4_tumu(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vnsra_wv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnsra_wv_i8mf4_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vnsra_wx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i8mf4_tumu(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vnsra_wx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i8mf4_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vnsra_wv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnsra_wv_i8mf2_tumu(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vnsra_wv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnsra_wv_i8mf2_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vnsra_wx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i8mf2_tumu(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vnsra_wx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i8mf2_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vnsra_wv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnsra_wv_i8m1_tumu(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vnsra_wv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnsra_wv_i8m1_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vnsra_wx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i8m1_tumu(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vnsra_wx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i8m1_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vnsra_wv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnsra_wv_i8m2_tumu(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vnsra_wv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnsra_wv_i8m2_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vnsra_wx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i8m2_tumu(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vnsra_wx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i8m2_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vnsra_wv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnsra_wv_i8m4_tumu(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vnsra_wv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnsra_wv_i8m4_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vnsra_wx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i8m4_tumu(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vnsra_wx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i8m4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vnsra_wv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnsra_wv_i16mf4_tumu(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vnsra_wv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnsra_wv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vnsra_wx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i16mf4_tumu(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vnsra_wx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vnsra_wv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnsra_wv_i16mf2_tumu(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vnsra_wv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnsra_wv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vnsra_wx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i16mf2_tumu(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vnsra_wx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vnsra_wv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnsra_wv_i16m1_tumu(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vnsra_wv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnsra_wv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vnsra_wx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i16m1_tumu(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vnsra_wx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vnsra_wv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnsra_wv_i16m2_tumu(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vnsra_wv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnsra_wv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vnsra_wx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i16m2_tumu(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vnsra_wx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vnsra_wv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnsra_wv_i16m4_tumu(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vnsra_wv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnsra_wv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vnsra_wx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i16m4_tumu(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vnsra_wx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vnsra_wv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnsra_wv_i32mf2_tumu(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vnsra_wv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnsra_wv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vnsra_wx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i32mf2_tumu(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vnsra_wx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vnsra_wv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnsra_wv_i32m1_tumu(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vnsra_wv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnsra_wv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vnsra_wx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i32m1_tumu(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vnsra_wx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vnsra_wv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnsra_wv_i32m2_tumu(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vnsra_wv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnsra_wv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vnsra_wx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i32m2_tumu(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vnsra_wx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vnsra_wv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnsra_wv_i32m4_tumu(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vnsra_wv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnsra_wv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vnsra_wx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i32m4_tumu(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vnsra_wx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vnsra_wv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnsra_wv_i8mf8_mu(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vnsra_wv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnsra_wv_i8mf8_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vnsra_wx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i8mf8_mu(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vnsra_wx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i8mf8_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vnsra_wv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnsra_wv_i8mf4_mu(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vnsra_wv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnsra_wv_i8mf4_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vnsra_wx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i8mf4_mu(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vnsra_wx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i8mf4_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vnsra_wv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnsra_wv_i8mf2_mu(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vnsra_wv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnsra_wv_i8mf2_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vnsra_wx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i8mf2_mu(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vnsra_wx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i8mf2_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vnsra_wv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnsra_wv_i8m1_mu(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vnsra_wv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnsra_wv_i8m1_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vnsra_wx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i8m1_mu(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vnsra_wx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i8m1_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vnsra_wv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnsra_wv_i8m2_mu(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vnsra_wv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnsra_wv_i8m2_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vnsra_wx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i8m2_mu(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vnsra_wx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i8m2_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vnsra_wv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnsra_wv_i8m4_mu(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vnsra_wv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnsra_wv_i8m4_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vnsra_wx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i8m4_mu(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vnsra_wx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i8m4_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vnsra_wv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnsra_wv_i16mf4_mu(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vnsra_wv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnsra_wv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vnsra_wx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i16mf4_mu(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vnsra_wx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vnsra_wv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnsra_wv_i16mf2_mu(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vnsra_wv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnsra_wv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vnsra_wx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i16mf2_mu(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vnsra_wx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vnsra_wv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnsra_wv_i16m1_mu(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vnsra_wv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnsra_wv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vnsra_wx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i16m1_mu(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vnsra_wx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vnsra_wv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnsra_wv_i16m2_mu(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vnsra_wv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnsra_wv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vnsra_wx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i16m2_mu(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vnsra_wx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vnsra_wv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnsra_wv_i16m4_mu(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vnsra_wv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnsra_wv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vnsra_wx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i16m4_mu(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vnsra_wx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vnsra_wv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnsra_wv_i32mf2_mu(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vnsra_wv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnsra_wv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vnsra_wx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i32mf2_mu(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vnsra_wx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vnsra_wv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnsra_wv_i32m1_mu(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vnsra_wv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnsra_wv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vnsra_wx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i32m1_mu(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vnsra_wx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vnsra_wv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnsra_wv_i32m2_mu(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vnsra_wv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnsra_wv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vnsra_wx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i32m2_mu(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vnsra_wx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vnsra_wv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnsra_wv_i32m4_mu(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vnsra_wv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnsra_wv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vnsra_wx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_wx_i32m4_mu(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vnsra_wx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_wx_i32m4_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vnsra\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vnsrl.c b/auto-generated/policy_funcs/gnu-api-tests/vnsrl.c index d2a26701b..b12aeb704 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vnsrl.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vnsrl.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vnsrl_wv_u8mf8_tu(vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnsrl_wv_u8mf8_tu(maskedoff, op1, shift, vl); +vuint8mf8_t test_vnsrl_wv_u8mf8_tu(vuint8mf8_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u8mf8_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vnsrl_wx_u8mf8_tu(vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u8mf8_tu(maskedoff, op1, shift, vl); +vuint8mf8_t test_vnsrl_wx_u8mf8_tu(vuint8mf8_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vnsrl_wv_u8mf4_tu(vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnsrl_wv_u8mf4_tu(maskedoff, op1, shift, vl); +vuint8mf4_t test_vnsrl_wv_u8mf4_tu(vuint8mf4_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u8mf4_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vnsrl_wx_u8mf4_tu(vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u8mf4_tu(maskedoff, op1, shift, vl); +vuint8mf4_t test_vnsrl_wx_u8mf4_tu(vuint8mf4_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vnsrl_wv_u8mf2_tu(vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnsrl_wv_u8mf2_tu(maskedoff, op1, shift, vl); +vuint8mf2_t test_vnsrl_wv_u8mf2_tu(vuint8mf2_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u8mf2_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vnsrl_wx_u8mf2_tu(vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u8mf2_tu(maskedoff, op1, shift, vl); +vuint8mf2_t test_vnsrl_wx_u8mf2_tu(vuint8mf2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vnsrl_wv_u8m1_tu(vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnsrl_wv_u8m1_tu(maskedoff, op1, shift, vl); +vuint8m1_t test_vnsrl_wv_u8m1_tu(vuint8m1_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vnsrl_wx_u8m1_tu(vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u8m1_tu(maskedoff, op1, shift, vl); +vuint8m1_t test_vnsrl_wx_u8m1_tu(vuint8m1_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vnsrl_wv_u8m2_tu(vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnsrl_wv_u8m2_tu(maskedoff, op1, shift, vl); +vuint8m2_t test_vnsrl_wv_u8m2_tu(vuint8m2_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u8m2_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vnsrl_wx_u8m2_tu(vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u8m2_tu(maskedoff, op1, shift, vl); +vuint8m2_t test_vnsrl_wx_u8m2_tu(vuint8m2_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vnsrl_wv_u8m4_tu(vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnsrl_wv_u8m4_tu(maskedoff, op1, shift, vl); +vuint8m4_t test_vnsrl_wv_u8m4_tu(vuint8m4_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u8m4_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vnsrl_wx_u8m4_tu(vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u8m4_tu(maskedoff, op1, shift, vl); +vuint8m4_t test_vnsrl_wx_u8m4_tu(vuint8m4_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u8m4_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vnsrl_wv_u16mf4_tu(vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnsrl_wv_u16mf4_tu(maskedoff, op1, shift, vl); +vuint16mf4_t test_vnsrl_wv_u16mf4_tu(vuint16mf4_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vnsrl_wx_u16mf4_tu(vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u16mf4_tu(maskedoff, op1, shift, vl); +vuint16mf4_t test_vnsrl_wx_u16mf4_tu(vuint16mf4_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vnsrl_wv_u16mf2_tu(vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnsrl_wv_u16mf2_tu(maskedoff, op1, shift, vl); +vuint16mf2_t test_vnsrl_wv_u16mf2_tu(vuint16mf2_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vnsrl_wx_u16mf2_tu(vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u16mf2_tu(maskedoff, op1, shift, vl); +vuint16mf2_t test_vnsrl_wx_u16mf2_tu(vuint16mf2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vnsrl_wv_u16m1_tu(vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnsrl_wv_u16m1_tu(maskedoff, op1, shift, vl); +vuint16m1_t test_vnsrl_wv_u16m1_tu(vuint16m1_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vnsrl_wx_u16m1_tu(vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u16m1_tu(maskedoff, op1, shift, vl); +vuint16m1_t test_vnsrl_wx_u16m1_tu(vuint16m1_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vnsrl_wv_u16m2_tu(vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnsrl_wv_u16m2_tu(maskedoff, op1, shift, vl); +vuint16m2_t test_vnsrl_wv_u16m2_tu(vuint16m2_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vnsrl_wx_u16m2_tu(vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u16m2_tu(maskedoff, op1, shift, vl); +vuint16m2_t test_vnsrl_wx_u16m2_tu(vuint16m2_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vnsrl_wv_u16m4_tu(vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnsrl_wv_u16m4_tu(maskedoff, op1, shift, vl); +vuint16m4_t test_vnsrl_wv_u16m4_tu(vuint16m4_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vnsrl_wx_u16m4_tu(vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u16m4_tu(maskedoff, op1, shift, vl); +vuint16m4_t test_vnsrl_wx_u16m4_tu(vuint16m4_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u16m4_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vnsrl_wv_u32mf2_tu(vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnsrl_wv_u32mf2_tu(maskedoff, op1, shift, vl); +vuint32mf2_t test_vnsrl_wv_u32mf2_tu(vuint32mf2_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vnsrl_wx_u32mf2_tu(vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u32mf2_tu(maskedoff, op1, shift, vl); +vuint32mf2_t test_vnsrl_wx_u32mf2_tu(vuint32mf2_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vnsrl_wv_u32m1_tu(vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnsrl_wv_u32m1_tu(maskedoff, op1, shift, vl); +vuint32m1_t test_vnsrl_wv_u32m1_tu(vuint32m1_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vnsrl_wx_u32m1_tu(vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u32m1_tu(maskedoff, op1, shift, vl); +vuint32m1_t test_vnsrl_wx_u32m1_tu(vuint32m1_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vnsrl_wv_u32m2_tu(vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnsrl_wv_u32m2_tu(maskedoff, op1, shift, vl); +vuint32m2_t test_vnsrl_wv_u32m2_tu(vuint32m2_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vnsrl_wx_u32m2_tu(vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u32m2_tu(maskedoff, op1, shift, vl); +vuint32m2_t test_vnsrl_wx_u32m2_tu(vuint32m2_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vnsrl_wv_u32m4_tu(vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnsrl_wv_u32m4_tu(maskedoff, op1, shift, vl); +vuint32m4_t test_vnsrl_wv_u32m4_tu(vuint32m4_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vnsrl_wx_u32m4_tu(vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u32m4_tu(maskedoff, op1, shift, vl); +vuint32m4_t test_vnsrl_wx_u32m4_tu(vuint32m4_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u32m4_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vnsrl_wv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnsrl_wv_u8mf8_tum(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vnsrl_wv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u8mf8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vnsrl_wx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u8mf8_tum(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vnsrl_wx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vnsrl_wv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnsrl_wv_u8mf4_tum(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vnsrl_wv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u8mf4_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vnsrl_wx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u8mf4_tum(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vnsrl_wx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vnsrl_wv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnsrl_wv_u8mf2_tum(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vnsrl_wv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u8mf2_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vnsrl_wx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u8mf2_tum(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vnsrl_wx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vnsrl_wv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnsrl_wv_u8m1_tum(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vnsrl_wv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vnsrl_wx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u8m1_tum(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vnsrl_wx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vnsrl_wv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnsrl_wv_u8m2_tum(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vnsrl_wv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u8m2_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vnsrl_wx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u8m2_tum(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vnsrl_wx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vnsrl_wv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnsrl_wv_u8m4_tum(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vnsrl_wv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u8m4_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vnsrl_wx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u8m4_tum(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vnsrl_wx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vnsrl_wv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnsrl_wv_u16mf4_tum(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vnsrl_wv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vnsrl_wx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u16mf4_tum(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vnsrl_wx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vnsrl_wv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnsrl_wv_u16mf2_tum(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vnsrl_wv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vnsrl_wx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u16mf2_tum(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vnsrl_wx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vnsrl_wv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnsrl_wv_u16m1_tum(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vnsrl_wv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vnsrl_wx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u16m1_tum(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vnsrl_wx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vnsrl_wv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnsrl_wv_u16m2_tum(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vnsrl_wv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vnsrl_wx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u16m2_tum(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vnsrl_wx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vnsrl_wv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnsrl_wv_u16m4_tum(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vnsrl_wv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vnsrl_wx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u16m4_tum(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vnsrl_wx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vnsrl_wv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnsrl_wv_u32mf2_tum(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vnsrl_wv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vnsrl_wx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u32mf2_tum(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vnsrl_wx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vnsrl_wv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnsrl_wv_u32m1_tum(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vnsrl_wv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vnsrl_wx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u32m1_tum(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vnsrl_wx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vnsrl_wv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnsrl_wv_u32m2_tum(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vnsrl_wv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vnsrl_wx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u32m2_tum(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vnsrl_wx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vnsrl_wv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnsrl_wv_u32m4_tum(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vnsrl_wv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vnsrl_wx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u32m4_tum(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vnsrl_wx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vnsrl_wv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnsrl_wv_u8mf8_tumu(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vnsrl_wv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u8mf8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vnsrl_wx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u8mf8_tumu(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vnsrl_wx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vnsrl_wv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnsrl_wv_u8mf4_tumu(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vnsrl_wv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u8mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vnsrl_wx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u8mf4_tumu(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vnsrl_wx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vnsrl_wv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnsrl_wv_u8mf2_tumu(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vnsrl_wv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u8mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vnsrl_wx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u8mf2_tumu(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vnsrl_wx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vnsrl_wv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnsrl_wv_u8m1_tumu(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vnsrl_wv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u8m1_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vnsrl_wx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u8m1_tumu(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vnsrl_wx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vnsrl_wv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnsrl_wv_u8m2_tumu(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vnsrl_wv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u8m2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vnsrl_wx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u8m2_tumu(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vnsrl_wx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vnsrl_wv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnsrl_wv_u8m4_tumu(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vnsrl_wv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u8m4_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vnsrl_wx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u8m4_tumu(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vnsrl_wx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vnsrl_wv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnsrl_wv_u16mf4_tumu(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vnsrl_wv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vnsrl_wx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u16mf4_tumu(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vnsrl_wx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vnsrl_wv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnsrl_wv_u16mf2_tumu(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vnsrl_wv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vnsrl_wx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u16mf2_tumu(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vnsrl_wx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vnsrl_wv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnsrl_wv_u16m1_tumu(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vnsrl_wv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vnsrl_wx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u16m1_tumu(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vnsrl_wx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vnsrl_wv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnsrl_wv_u16m2_tumu(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vnsrl_wv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vnsrl_wx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u16m2_tumu(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vnsrl_wx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vnsrl_wv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnsrl_wv_u16m4_tumu(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vnsrl_wv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vnsrl_wx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u16m4_tumu(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vnsrl_wx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vnsrl_wv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnsrl_wv_u32mf2_tumu(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vnsrl_wv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vnsrl_wx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u32mf2_tumu(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vnsrl_wx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vnsrl_wv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnsrl_wv_u32m1_tumu(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vnsrl_wv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vnsrl_wx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u32m1_tumu(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vnsrl_wx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vnsrl_wv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnsrl_wv_u32m2_tumu(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vnsrl_wv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vnsrl_wx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u32m2_tumu(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vnsrl_wx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vnsrl_wv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnsrl_wv_u32m4_tumu(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vnsrl_wv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vnsrl_wx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u32m4_tumu(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vnsrl_wx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vnsrl_wv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnsrl_wv_u8mf8_mu(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vnsrl_wv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u8mf8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vnsrl_wx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u8mf8_mu(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vnsrl_wx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vnsrl_wv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnsrl_wv_u8mf4_mu(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vnsrl_wv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u8mf4_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vnsrl_wx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u8mf4_mu(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vnsrl_wx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vnsrl_wv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnsrl_wv_u8mf2_mu(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vnsrl_wv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u8mf2_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vnsrl_wx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u8mf2_mu(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vnsrl_wx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vnsrl_wv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnsrl_wv_u8m1_mu(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vnsrl_wv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u8m1_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vnsrl_wx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u8m1_mu(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vnsrl_wx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vnsrl_wv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnsrl_wv_u8m2_mu(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vnsrl_wv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u8m2_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vnsrl_wx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u8m2_mu(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vnsrl_wx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vnsrl_wv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnsrl_wv_u8m4_mu(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vnsrl_wv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u8m4_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vnsrl_wx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u8m4_mu(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vnsrl_wx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vnsrl_wv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnsrl_wv_u16mf4_mu(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vnsrl_wv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vnsrl_wx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u16mf4_mu(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vnsrl_wx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vnsrl_wv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnsrl_wv_u16mf2_mu(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vnsrl_wv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vnsrl_wx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u16mf2_mu(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vnsrl_wx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vnsrl_wv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnsrl_wv_u16m1_mu(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vnsrl_wv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vnsrl_wx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u16m1_mu(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vnsrl_wx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vnsrl_wv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnsrl_wv_u16m2_mu(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vnsrl_wv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vnsrl_wx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u16m2_mu(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vnsrl_wx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vnsrl_wv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnsrl_wv_u16m4_mu(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vnsrl_wv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vnsrl_wx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u16m4_mu(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vnsrl_wx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vnsrl_wv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnsrl_wv_u32mf2_mu(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vnsrl_wv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vnsrl_wx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u32mf2_mu(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vnsrl_wx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vnsrl_wv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnsrl_wv_u32m1_mu(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vnsrl_wv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vnsrl_wx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u32m1_mu(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vnsrl_wx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vnsrl_wv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnsrl_wv_u32m2_mu(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vnsrl_wv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vnsrl_wx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u32m2_mu(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vnsrl_wx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vnsrl_wv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnsrl_wv_u32m4_mu(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vnsrl_wv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnsrl_wv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vnsrl_wx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_wx_u32m4_mu(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vnsrl_wx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_wx_u32m4_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vnsrl\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vor.c b/auto-generated/policy_funcs/gnu-api-tests/vor.c index 9d8c7d3af..20ba1596a 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vor.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vor.c @@ -6,1412 +6,1412 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vor_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vor_vv_i8mf8_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vor_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vor_vv_i8mf8_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vor_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vor_vx_i8mf8_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vor_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_vx_i8mf8_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vor_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vor_vv_i8mf4_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vor_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vor_vv_i8mf4_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vor_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vor_vx_i8mf4_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vor_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_vx_i8mf4_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vor_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vor_vv_i8mf2_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vor_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vor_vv_i8mf2_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vor_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vor_vx_i8mf2_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vor_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_vx_i8mf2_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vor_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vor_vv_i8m1_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vor_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vor_vv_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vor_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vor_vx_i8m1_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vor_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_vx_i8m1_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vor_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vor_vv_i8m2_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vor_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vor_vv_i8m2_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vor_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vor_vx_i8m2_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vor_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_vx_i8m2_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vor_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vor_vv_i8m4_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vor_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vor_vv_i8m4_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vor_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vor_vx_i8m4_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vor_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_vx_i8m4_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vor_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vor_vv_i8m8_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vor_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vor_vv_i8m8_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vor_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vor_vx_i8m8_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vor_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_vx_i8m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vor_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vor_vv_i16mf4_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vor_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vor_vv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vor_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vor_vx_i16mf4_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vor_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vor_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vor_vv_i16mf2_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vor_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vor_vv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vor_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vor_vx_i16mf2_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vor_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vor_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vor_vv_i16m1_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vor_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vor_vv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vor_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vor_vx_i16m1_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vor_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vor_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vor_vv_i16m2_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vor_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vor_vv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vor_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vor_vx_i16m2_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vor_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vor_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vor_vv_i16m4_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vor_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vor_vv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vor_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vor_vx_i16m4_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vor_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vor_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vor_vv_i16m8_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vor_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vor_vv_i16m8_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vor_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vor_vx_i16m8_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vor_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vor_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vor_vv_i32mf2_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vor_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vor_vv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vor_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vor_vx_i32mf2_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vor_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vor_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vor_vv_i32m1_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vor_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vor_vv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vor_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vor_vx_i32m1_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vor_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vor_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vor_vv_i32m2_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vor_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vor_vv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vor_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vor_vx_i32m2_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vor_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vor_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vor_vv_i32m4_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vor_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vor_vv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vor_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vor_vx_i32m4_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vor_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vor_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vor_vv_i32m8_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vor_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vor_vv_i32m8_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vor_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vor_vx_i32m8_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vor_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vor_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vor_vv_i64m1_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vor_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vor_vv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vor_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vor_vx_i64m1_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vor_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vor_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vor_vv_i64m2_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vor_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vor_vv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vor_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vor_vx_i64m2_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vor_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vor_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vor_vv_i64m4_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vor_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vor_vv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vor_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vor_vx_i64m4_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vor_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vor_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vor_vv_i64m8_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vor_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vor_vv_i64m8_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vor_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vor_vx_i64m8_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vor_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor_vx_i64m8_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vor_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vor_vv_u8mf8_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vor_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vor_vv_u8mf8_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vor_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_vx_u8mf8_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vor_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_vx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vor_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vor_vv_u8mf4_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vor_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vor_vv_u8mf4_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vor_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_vx_u8mf4_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vor_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_vx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vor_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vor_vv_u8mf2_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vor_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vor_vv_u8mf2_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vor_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_vx_u8mf2_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vor_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_vx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vor_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vor_vv_u8m1_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vor_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vor_vv_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vor_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_vx_u8m1_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vor_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_vx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vor_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vor_vv_u8m2_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vor_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vor_vv_u8m2_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vor_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_vx_u8m2_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vor_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_vx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vor_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vor_vv_u8m4_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vor_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vor_vv_u8m4_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vor_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_vx_u8m4_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vor_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_vx_u8m4_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vor_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vor_vv_u8m8_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vor_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vor_vv_u8m8_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vor_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_vx_u8m8_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vor_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_vx_u8m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vor_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vor_vv_u16mf4_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vor_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vor_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vor_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_vx_u16mf4_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vor_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vor_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vor_vv_u16mf2_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vor_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vor_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vor_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_vx_u16mf2_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vor_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vor_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vor_vv_u16m1_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vor_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vor_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vor_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_vx_u16m1_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vor_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vor_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vor_vv_u16m2_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vor_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vor_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vor_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_vx_u16m2_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vor_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vor_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vor_vv_u16m4_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vor_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vor_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vor_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_vx_u16m4_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vor_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vor_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vor_vv_u16m8_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vor_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vor_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vor_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_vx_u16m8_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vor_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vor_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vor_vv_u32mf2_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vor_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vor_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vor_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_vx_u32mf2_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vor_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vor_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vor_vv_u32m1_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vor_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vor_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vor_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_vx_u32m1_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vor_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vor_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vor_vv_u32m2_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vor_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vor_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vor_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_vx_u32m2_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vor_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vor_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vor_vv_u32m4_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vor_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vor_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vor_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_vx_u32m4_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vor_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vor_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vor_vv_u32m8_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vor_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vor_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vor_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_vx_u32m8_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vor_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vor_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vor_vv_u64m1_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vor_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vor_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vor_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vor_vx_u64m1_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vor_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vor_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vor_vv_u64m2_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vor_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vor_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vor_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vor_vx_u64m2_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vor_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vor_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vor_vv_u64m4_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vor_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vor_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vor_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vor_vx_u64m4_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vor_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vor_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vor_vv_u64m8_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vor_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vor_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vor_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vor_vx_u64m8_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vor_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor_vx_u64m8_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vor_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vor_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vor_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vor_vv_i8mf8_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vor_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vor_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vor_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_vx_i8mf8_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vor_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vor_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vor_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vor_vv_i8mf4_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vor_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vor_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vor_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_vx_i8mf4_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vor_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vor_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vor_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vor_vv_i8mf2_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vor_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vor_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vor_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_vx_i8mf2_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vor_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vor_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vor_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vor_vv_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vor_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vor_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vor_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_vx_i8m1_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vor_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vor_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vor_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vor_vv_i8m2_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vor_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vor_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vor_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_vx_i8m2_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vor_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vor_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vor_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vor_vv_i8m4_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vor_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vor_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vor_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_vx_i8m4_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vor_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vor_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vor_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vor_vv_i8m8_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vor_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vor_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vor_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_vx_i8m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vor_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vor_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vor_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vor_vv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vor_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vor_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vor_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vor_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vor_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vor_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vor_vv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vor_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vor_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vor_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vor_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vor_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vor_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vor_vv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vor_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vor_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vor_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vor_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vor_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vor_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vor_vv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vor_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vor_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vor_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vor_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vor_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vor_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vor_vv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vor_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vor_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vor_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vor_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vor_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vor_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vor_vv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vor_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vor_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vor_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vor_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vor_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vor_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vor_vv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vor_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vor_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vor_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vor_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vor_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vor_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vor_vv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vor_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vor_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vor_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vor_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vor_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vor_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vor_vv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vor_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vor_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vor_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vor_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vor_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vor_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vor_vv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vor_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vor_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vor_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vor_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vor_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vor_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vor_vv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vor_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vor_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vor_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vor_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vor_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vor_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vor_vv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vor_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vor_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vor_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vor_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vor_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vor_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vor_vv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vor_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vor_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vor_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vor_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vor_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vor_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vor_vv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vor_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vor_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vor_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vor_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vor_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vor_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vor_vv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vor_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vor_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vor_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vor_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vor_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vor_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vor_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vor_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vor_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vor_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vor_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vor_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vor_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vor_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vor_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vor_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vor_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vor_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vor_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vor_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vor_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vor_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vor_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vor_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vor_vv_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vor_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vor_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_vx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vor_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vor_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vor_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vor_vv_u8m2_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vor_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vor_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_vx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vor_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vor_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vor_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vor_vv_u8m4_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vor_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vor_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_vx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vor_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vor_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vor_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vor_vv_u8m8_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vor_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vor_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_vx_u8m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vor_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vor_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vor_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vor_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vor_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vor_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vor_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vor_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vor_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vor_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vor_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vor_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vor_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vor_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vor_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vor_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vor_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vor_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vor_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vor_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vor_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vor_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vor_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vor_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vor_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vor_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vor_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vor_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vor_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vor_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vor_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vor_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vor_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vor_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vor_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vor_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vor_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vor_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vor_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vor_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vor_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vor_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vor_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vor_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vor_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vor_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vor_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vor_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vor_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vor_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vor_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vor_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vor_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vor_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vor_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vor_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vor_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vor_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vor_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vor_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vor_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vor_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vor_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vor_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vor_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vor_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vor_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vor_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vor_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vor_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vor_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vor_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vor_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vor_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vor_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vor_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vor_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vor_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vor_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vor_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vor_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vor_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vor_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vor_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vor_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vor_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vor_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vor_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vor_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vor_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vor_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vor_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vor_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vor_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vor_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vor_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vor_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vor_vv_i8mf8_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vor_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vor_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vor_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_vx_i8mf8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vor_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vor_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vor_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vor_vv_i8mf4_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vor_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vor_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vor_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_vx_i8mf4_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vor_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vor_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vor_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vor_vv_i8mf2_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vor_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vor_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vor_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_vx_i8mf2_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vor_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vor_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vor_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vor_vv_i8m1_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vor_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vor_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vor_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_vx_i8m1_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vor_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vor_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vor_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vor_vv_i8m2_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vor_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vor_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vor_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_vx_i8m2_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vor_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vor_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vor_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vor_vv_i8m4_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vor_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vor_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vor_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_vx_i8m4_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vor_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vor_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vor_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vor_vv_i8m8_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vor_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vor_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vor_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_vx_i8m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vor_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vor_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vor_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vor_vv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vor_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vor_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vor_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vor_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vor_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vor_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vor_vv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vor_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vor_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vor_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vor_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vor_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vor_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vor_vv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vor_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vor_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vor_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vor_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vor_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vor_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vor_vv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vor_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vor_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vor_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vor_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vor_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vor_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vor_vv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vor_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vor_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vor_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vor_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vor_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vor_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vor_vv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vor_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vor_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vor_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vor_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vor_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vor_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vor_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vor_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vor_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vor_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vor_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vor_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vor_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vor_vv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vor_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vor_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vor_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vor_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vor_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vor_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vor_vv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vor_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vor_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vor_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vor_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vor_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vor_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vor_vv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vor_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vor_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vor_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vor_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vor_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vor_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vor_vv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vor_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vor_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vor_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vor_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vor_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vor_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vor_vv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vor_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vor_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vor_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vor_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vor_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vor_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vor_vv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vor_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vor_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vor_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vor_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vor_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vor_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vor_vv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vor_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vor_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vor_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vor_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vor_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vor_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vor_vv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vor_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vor_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vor_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vor_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vor_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vor_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vor_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vor_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vor_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vor_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vor_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vor_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vor_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vor_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vor_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vor_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vor_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vor_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vor_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vor_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vor_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vor_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vor_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vor_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vor_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vor_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vor_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vor_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vor_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vor_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vor_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vor_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vor_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vor_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vor_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vor_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vor_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vor_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vor_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vor_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vor_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vor_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vor_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vor_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vor_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vor_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vor_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vor_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vor_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vor_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vor_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vor_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vor_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vor_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vor_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vor_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vor_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vor_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vor_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vor_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vor_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vor_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vor_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vor_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vor_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vor_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vor_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vor_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vor_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vor_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vor_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vor_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vor_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vor_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vor_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vor_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vor_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vor_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vor_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vor_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vor_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vor_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vor_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vor_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vor_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vor_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vor_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vor_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vor_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vor_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vor_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vor_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vor_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vor_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vor_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vor_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vor_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vor_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vor_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vor_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vor_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vor_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vor_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vor_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vor_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vor_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vor_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vor_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vor_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vor_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vor_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vor_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vor_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vor_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vor_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vor_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vor_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vor_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vor_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vor_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vor_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vor_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vor_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vor_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vor_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vor_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vor_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vor_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vor_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vor_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vor_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vor_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vor_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vor_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vor_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vor_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vor_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vor_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vor_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vor_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vor_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vor_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vor_vv_i8mf8_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vor_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vor_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vor_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_vx_i8mf8_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vor_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vor_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vor_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vor_vv_i8mf4_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vor_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vor_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vor_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_vx_i8mf4_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vor_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vor_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vor_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vor_vv_i8mf2_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vor_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vor_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vor_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_vx_i8mf2_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vor_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vor_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vor_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vor_vv_i8m1_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vor_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vor_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vor_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_vx_i8m1_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vor_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vor_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vor_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vor_vv_i8m2_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vor_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vor_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vor_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_vx_i8m2_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vor_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vor_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vor_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vor_vv_i8m4_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vor_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vor_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vor_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_vx_i8m4_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vor_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vor_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vor_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vor_vv_i8m8_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vor_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vor_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vor_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_vx_i8m8_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vor_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vor_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vor_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vor_vv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vor_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vor_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vor_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vor_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vor_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vor_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vor_vv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vor_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vor_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vor_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vor_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vor_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vor_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vor_vv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vor_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vor_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vor_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vor_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vor_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vor_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vor_vv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vor_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vor_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vor_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vor_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vor_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vor_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vor_vv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vor_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vor_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vor_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vor_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vor_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vor_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vor_vv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vor_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vor_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vor_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vor_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vor_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vor_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vor_vv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vor_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vor_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vor_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vor_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vor_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vor_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vor_vv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vor_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vor_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vor_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vor_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vor_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vor_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vor_vv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vor_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vor_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vor_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vor_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vor_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vor_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vor_vv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vor_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vor_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vor_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vor_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vor_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vor_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vor_vv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vor_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vor_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vor_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vor_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vor_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vor_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vor_vv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vor_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vor_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vor_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vor_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vor_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vor_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vor_vv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vor_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vor_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vor_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vor_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vor_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vor_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vor_vv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vor_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vor_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vor_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vor_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vor_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vor_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vor_vv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vor_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vor_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vor_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vor_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vor_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vor_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vor_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vor_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vor_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vor_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vor_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vor_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vor_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vor_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vor_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vor_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vor_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vor_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vor_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vor_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vor_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vor_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vor_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vor_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vor_vv_u8m1_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vor_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vor_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_vx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vor_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vor_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vor_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vor_vv_u8m2_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vor_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vor_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_vx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vor_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vor_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vor_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vor_vv_u8m4_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vor_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vor_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_vx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vor_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vor_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vor_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vor_vv_u8m8_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vor_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vor_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_vx_u8m8_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vor_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vor_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vor_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vor_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vor_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vor_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vor_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vor_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vor_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vor_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vor_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vor_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vor_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vor_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vor_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vor_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vor_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vor_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vor_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vor_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vor_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vor_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vor_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vor_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vor_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vor_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vor_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vor_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vor_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vor_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vor_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vor_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vor_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vor_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vor_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vor_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vor_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vor_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vor_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vor_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vor_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vor_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vor_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vor_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vor_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vor_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vor_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vor_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vor_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vor_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vor_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vor_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vor_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vor_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vor_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vor_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vor_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vor_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vor_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vor_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vor_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vor_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vor_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vor_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vor_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vor_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vor_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vor_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vor_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vor_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vor_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vor_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vor_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vor_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vor_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vor_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vor_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vor_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vor_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vor_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vor_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vor_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vor_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vor_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vor_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vor_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vor_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vor_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vor_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vor_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vor_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vor_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vor_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vor_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vor\.[ivxfswum.]+\s+} 352 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vredand.c b/auto-generated/policy_funcs/gnu-api-tests/vredand.c index 7759418f3..8e4607a16 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vredand.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vredand.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8m1_t test_vredand_vs_i8mf8_i8m1_tu(vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i8mf8_i8m1_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf8_i8m1_tu(vint8m1_t vd, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i8mf8_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8mf4_i8m1_tu(vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i8mf4_i8m1_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf4_i8m1_tu(vint8m1_t vd, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i8mf4_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8mf2_i8m1_tu(vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i8mf2_i8m1_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf2_i8m1_tu(vint8m1_t vd, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i8mf2_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8m1_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i8m1_i8m1_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m1_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i8m1_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8m2_i8m1_tu(vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i8m2_i8m1_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m2_i8m1_tu(vint8m1_t vd, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i8m2_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8m4_i8m1_tu(vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i8m4_i8m1_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m4_i8m1_tu(vint8m1_t vd, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i8m4_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8m8_i8m1_tu(vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i8m8_i8m1_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m8_i8m1_tu(vint8m1_t vd, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i8m8_i8m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16mf4_i16m1_tu(vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i16mf4_i16m1_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16mf4_i16m1_tu(vint16m1_t vd, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i16mf4_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16mf2_i16m1_tu(vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i16mf2_i16m1_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16mf2_i16m1_tu(vint16m1_t vd, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i16mf2_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16m1_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i16m1_i16m1_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m1_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i16m1_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16m2_i16m1_tu(vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i16m2_i16m1_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m2_i16m1_tu(vint16m1_t vd, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i16m2_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16m4_i16m1_tu(vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i16m4_i16m1_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m4_i16m1_tu(vint16m1_t vd, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i16m4_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16m8_i16m1_tu(vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i16m8_i16m1_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m8_i16m1_tu(vint16m1_t vd, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i16m8_i16m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32mf2_i32m1_tu(vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i32mf2_i32m1_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32mf2_i32m1_tu(vint32m1_t vd, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i32mf2_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32m1_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i32m1_i32m1_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m1_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i32m1_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32m2_i32m1_tu(vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i32m2_i32m1_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m2_i32m1_tu(vint32m1_t vd, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i32m2_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32m4_i32m1_tu(vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i32m4_i32m1_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m4_i32m1_tu(vint32m1_t vd, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i32m4_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32m8_i32m1_tu(vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i32m8_i32m1_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m8_i32m1_tu(vint32m1_t vd, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i32m8_i32m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredand_vs_i64m1_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i64m1_i64m1_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m1_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i64m1_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredand_vs_i64m2_i64m1_tu(vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i64m2_i64m1_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m2_i64m1_tu(vint64m1_t vd, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i64m2_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredand_vs_i64m4_i64m1_tu(vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i64m4_i64m1_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m4_i64m1_tu(vint64m1_t vd, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i64m4_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredand_vs_i64m8_i64m1_tu(vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i64m8_i64m1_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m8_i64m1_tu(vint64m1_t vd, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i64m8_i64m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8mf8_u8m1_tu(vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u8mf8_u8m1_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf8_u8m1_tu(vuint8m1_t vd, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u8mf8_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8mf4_u8m1_tu(vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u8mf4_u8m1_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf4_u8m1_tu(vuint8m1_t vd, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u8mf4_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8mf2_u8m1_tu(vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u8mf2_u8m1_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf2_u8m1_tu(vuint8m1_t vd, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u8mf2_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8m1_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u8m1_u8m1_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m1_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u8m1_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8m2_u8m1_tu(vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u8m2_u8m1_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m2_u8m1_tu(vuint8m1_t vd, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u8m2_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8m4_u8m1_tu(vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u8m4_u8m1_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m4_u8m1_tu(vuint8m1_t vd, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u8m4_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8m8_u8m1_tu(vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u8m8_u8m1_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m8_u8m1_tu(vuint8m1_t vd, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u8m8_u8m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16mf4_u16m1_tu(vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u16mf4_u16m1_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16mf4_u16m1_tu(vuint16m1_t vd, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u16mf4_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16mf2_u16m1_tu(vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u16mf2_u16m1_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16mf2_u16m1_tu(vuint16m1_t vd, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u16mf2_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16m1_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u16m1_u16m1_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m1_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u16m1_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16m2_u16m1_tu(vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u16m2_u16m1_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m2_u16m1_tu(vuint16m1_t vd, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u16m2_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16m4_u16m1_tu(vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u16m4_u16m1_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m4_u16m1_tu(vuint16m1_t vd, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u16m4_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16m8_u16m1_tu(vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u16m8_u16m1_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m8_u16m1_tu(vuint16m1_t vd, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u16m8_u16m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32mf2_u32m1_tu(vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u32mf2_u32m1_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u32mf2_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32m1_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u32m1_u32m1_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u32m1_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32m2_u32m1_tu(vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u32m2_u32m1_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m2_u32m1_tu(vuint32m1_t vd, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u32m2_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32m4_u32m1_tu(vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u32m4_u32m1_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m4_u32m1_tu(vuint32m1_t vd, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u32m4_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32m8_u32m1_tu(vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u32m8_u32m1_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m8_u32m1_tu(vuint32m1_t vd, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u32m8_u32m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredand_vs_u64m1_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u64m1_u64m1_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m1_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u64m1_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredand_vs_u64m2_u64m1_tu(vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u64m2_u64m1_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m2_u64m1_tu(vuint64m1_t vd, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u64m2_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredand_vs_u64m4_u64m1_tu(vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u64m4_u64m1_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m4_u64m1_tu(vuint64m1_t vd, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u64m4_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredand_vs_u64m8_u64m1_tu(vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u64m8_u64m1_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m8_u64m1_tu(vuint64m1_t vd, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u64m8_u64m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8mf8_i8m1_tum(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i8mf8_i8m1_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf8_i8m1_tum(vbool64_t vm, vint8m1_t vd, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i8mf8_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8mf4_i8m1_tum(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i8mf4_i8m1_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf4_i8m1_tum(vbool32_t vm, vint8m1_t vd, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i8mf4_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8mf2_i8m1_tum(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i8mf2_i8m1_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf2_i8m1_tum(vbool16_t vm, vint8m1_t vd, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i8mf2_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8m1_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i8m1_i8m1_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m1_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i8m1_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8m2_i8m1_tum(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i8m2_i8m1_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m2_i8m1_tum(vbool4_t vm, vint8m1_t vd, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i8m2_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8m4_i8m1_tum(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i8m4_i8m1_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m4_i8m1_tum(vbool2_t vm, vint8m1_t vd, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i8m4_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8m8_i8m1_tum(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i8m8_i8m1_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m8_i8m1_tum(vbool1_t vm, vint8m1_t vd, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i8m8_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16mf4_i16m1_tum(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i16mf4_i16m1_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16mf4_i16m1_tum(vbool64_t vm, vint16m1_t vd, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i16mf4_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16mf2_i16m1_tum(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i16mf2_i16m1_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16mf2_i16m1_tum(vbool32_t vm, vint16m1_t vd, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i16mf2_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16m1_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i16m1_i16m1_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m1_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i16m1_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16m2_i16m1_tum(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i16m2_i16m1_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m2_i16m1_tum(vbool8_t vm, vint16m1_t vd, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i16m2_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16m4_i16m1_tum(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i16m4_i16m1_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m4_i16m1_tum(vbool4_t vm, vint16m1_t vd, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i16m4_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16m8_i16m1_tum(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i16m8_i16m1_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m8_i16m1_tum(vbool2_t vm, vint16m1_t vd, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i16m8_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i32mf2_i32m1_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32mf2_i32m1_tum(vbool64_t vm, vint32m1_t vd, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i32mf2_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32m1_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i32m1_i32m1_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m1_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i32m1_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32m2_i32m1_tum(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i32m2_i32m1_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m2_i32m1_tum(vbool16_t vm, vint32m1_t vd, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i32m2_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32m4_i32m1_tum(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i32m4_i32m1_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m4_i32m1_tum(vbool8_t vm, vint32m1_t vd, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i32m4_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32m8_i32m1_tum(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i32m8_i32m1_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m8_i32m1_tum(vbool4_t vm, vint32m1_t vd, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i32m8_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredand_vs_i64m1_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i64m1_i64m1_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m1_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i64m1_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredand_vs_i64m2_i64m1_tum(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i64m2_i64m1_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m2_i64m1_tum(vbool32_t vm, vint64m1_t vd, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i64m2_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredand_vs_i64m4_i64m1_tum(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i64m4_i64m1_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m4_i64m1_tum(vbool16_t vm, vint64m1_t vd, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i64m4_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredand_vs_i64m8_i64m1_tum(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i64m8_i64m1_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m8_i64m1_tum(vbool8_t vm, vint64m1_t vd, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredand_vs_i64m8_i64m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8mf8_u8m1_tum(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u8mf8_u8m1_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf8_u8m1_tum(vbool64_t vm, vuint8m1_t vd, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u8mf8_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8mf4_u8m1_tum(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u8mf4_u8m1_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf4_u8m1_tum(vbool32_t vm, vuint8m1_t vd, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u8mf4_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8mf2_u8m1_tum(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u8mf2_u8m1_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf2_u8m1_tum(vbool16_t vm, vuint8m1_t vd, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u8mf2_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8m1_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u8m1_u8m1_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m1_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u8m1_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8m2_u8m1_tum(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u8m2_u8m1_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m2_u8m1_tum(vbool4_t vm, vuint8m1_t vd, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u8m2_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8m4_u8m1_tum(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u8m4_u8m1_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m4_u8m1_tum(vbool2_t vm, vuint8m1_t vd, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u8m4_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8m8_u8m1_tum(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u8m8_u8m1_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m8_u8m1_tum(vbool1_t vm, vuint8m1_t vd, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u8m8_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16mf4_u16m1_tum(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u16mf4_u16m1_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16mf4_u16m1_tum(vbool64_t vm, vuint16m1_t vd, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u16mf4_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16mf2_u16m1_tum(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u16mf2_u16m1_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16mf2_u16m1_tum(vbool32_t vm, vuint16m1_t vd, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u16mf2_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16m1_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u16m1_u16m1_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m1_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u16m1_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16m2_u16m1_tum(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u16m2_u16m1_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m2_u16m1_tum(vbool8_t vm, vuint16m1_t vd, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u16m2_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16m4_u16m1_tum(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u16m4_u16m1_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m4_u16m1_tum(vbool4_t vm, vuint16m1_t vd, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u16m4_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16m8_u16m1_tum(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u16m8_u16m1_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m8_u16m1_tum(vbool2_t vm, vuint16m1_t vd, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u16m8_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u32mf2_u32m1_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32mf2_u32m1_tum(vbool64_t vm, vuint32m1_t vd, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u32mf2_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32m1_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u32m1_u32m1_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m1_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u32m1_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32m2_u32m1_tum(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u32m2_u32m1_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m2_u32m1_tum(vbool16_t vm, vuint32m1_t vd, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u32m2_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32m4_u32m1_tum(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u32m4_u32m1_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m4_u32m1_tum(vbool8_t vm, vuint32m1_t vd, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u32m4_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32m8_u32m1_tum(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u32m8_u32m1_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m8_u32m1_tum(vbool4_t vm, vuint32m1_t vd, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u32m8_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredand_vs_u64m1_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u64m1_u64m1_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m1_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u64m1_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredand_vs_u64m2_u64m1_tum(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u64m2_u64m1_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m2_u64m1_tum(vbool32_t vm, vuint64m1_t vd, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u64m2_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredand_vs_u64m4_u64m1_tum(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u64m4_u64m1_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m4_u64m1_tum(vbool16_t vm, vuint64m1_t vd, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u64m4_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredand_vs_u64m8_u64m1_tum(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u64m8_u64m1_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m8_u64m1_tum(vbool8_t vm, vuint64m1_t vd, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredand_vs_u64m8_u64m1_tum(vm, vd, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vredand\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vredmax.c b/auto-generated/policy_funcs/gnu-api-tests/vredmax.c index 247120592..40e496334 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vredmax.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vredmax.c @@ -6,180 +6,180 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8m1_t test_vredmax_vs_i8mf8_i8m1_tu(vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i8mf8_i8m1_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf8_i8m1_tu(vint8m1_t vd, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i8mf8_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8mf4_i8m1_tu(vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i8mf4_i8m1_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf4_i8m1_tu(vint8m1_t vd, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i8mf4_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8mf2_i8m1_tu(vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i8mf2_i8m1_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf2_i8m1_tu(vint8m1_t vd, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i8mf2_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8m1_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i8m1_i8m1_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m1_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i8m1_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8m2_i8m1_tu(vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i8m2_i8m1_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m2_i8m1_tu(vint8m1_t vd, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i8m2_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8m4_i8m1_tu(vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i8m4_i8m1_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m4_i8m1_tu(vint8m1_t vd, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i8m4_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8m8_i8m1_tu(vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i8m8_i8m1_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m8_i8m1_tu(vint8m1_t vd, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i8m8_i8m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16mf4_i16m1_tu(vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i16mf4_i16m1_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16mf4_i16m1_tu(vint16m1_t vd, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i16mf4_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16mf2_i16m1_tu(vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i16mf2_i16m1_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16mf2_i16m1_tu(vint16m1_t vd, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i16mf2_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16m1_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i16m1_i16m1_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m1_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i16m1_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16m2_i16m1_tu(vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i16m2_i16m1_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m2_i16m1_tu(vint16m1_t vd, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i16m2_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16m4_i16m1_tu(vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i16m4_i16m1_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m4_i16m1_tu(vint16m1_t vd, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i16m4_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16m8_i16m1_tu(vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i16m8_i16m1_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m8_i16m1_tu(vint16m1_t vd, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i16m8_i16m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32mf2_i32m1_tu(vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i32mf2_i32m1_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32mf2_i32m1_tu(vint32m1_t vd, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i32mf2_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32m1_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i32m1_i32m1_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m1_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i32m1_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32m2_i32m1_tu(vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i32m2_i32m1_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m2_i32m1_tu(vint32m1_t vd, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i32m2_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32m4_i32m1_tu(vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i32m4_i32m1_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m4_i32m1_tu(vint32m1_t vd, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i32m4_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32m8_i32m1_tu(vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i32m8_i32m1_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m8_i32m1_tu(vint32m1_t vd, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i32m8_i32m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredmax_vs_i64m1_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i64m1_i64m1_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m1_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i64m1_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredmax_vs_i64m2_i64m1_tu(vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i64m2_i64m1_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m2_i64m1_tu(vint64m1_t vd, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i64m2_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredmax_vs_i64m4_i64m1_tu(vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i64m4_i64m1_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m4_i64m1_tu(vint64m1_t vd, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i64m4_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredmax_vs_i64m8_i64m1_tu(vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i64m8_i64m1_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m8_i64m1_tu(vint64m1_t vd, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i64m8_i64m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8mf8_i8m1_tum(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i8mf8_i8m1_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf8_i8m1_tum(vbool64_t vm, vint8m1_t vd, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i8mf8_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8mf4_i8m1_tum(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i8mf4_i8m1_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf4_i8m1_tum(vbool32_t vm, vint8m1_t vd, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i8mf4_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8mf2_i8m1_tum(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i8mf2_i8m1_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf2_i8m1_tum(vbool16_t vm, vint8m1_t vd, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i8mf2_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8m1_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i8m1_i8m1_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m1_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i8m1_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8m2_i8m1_tum(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i8m2_i8m1_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m2_i8m1_tum(vbool4_t vm, vint8m1_t vd, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i8m2_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8m4_i8m1_tum(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i8m4_i8m1_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m4_i8m1_tum(vbool2_t vm, vint8m1_t vd, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i8m4_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8m8_i8m1_tum(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i8m8_i8m1_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m8_i8m1_tum(vbool1_t vm, vint8m1_t vd, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i8m8_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16mf4_i16m1_tum(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i16mf4_i16m1_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16mf4_i16m1_tum(vbool64_t vm, vint16m1_t vd, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i16mf4_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16mf2_i16m1_tum(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i16mf2_i16m1_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16mf2_i16m1_tum(vbool32_t vm, vint16m1_t vd, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i16mf2_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16m1_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i16m1_i16m1_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m1_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i16m1_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16m2_i16m1_tum(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i16m2_i16m1_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m2_i16m1_tum(vbool8_t vm, vint16m1_t vd, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i16m2_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16m4_i16m1_tum(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i16m4_i16m1_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m4_i16m1_tum(vbool4_t vm, vint16m1_t vd, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i16m4_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16m8_i16m1_tum(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i16m8_i16m1_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m8_i16m1_tum(vbool2_t vm, vint16m1_t vd, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i16m8_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i32mf2_i32m1_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32mf2_i32m1_tum(vbool64_t vm, vint32m1_t vd, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i32mf2_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32m1_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i32m1_i32m1_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m1_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i32m1_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32m2_i32m1_tum(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i32m2_i32m1_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m2_i32m1_tum(vbool16_t vm, vint32m1_t vd, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i32m2_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32m4_i32m1_tum(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i32m4_i32m1_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m4_i32m1_tum(vbool8_t vm, vint32m1_t vd, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i32m4_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32m8_i32m1_tum(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i32m8_i32m1_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m8_i32m1_tum(vbool4_t vm, vint32m1_t vd, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i32m8_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredmax_vs_i64m1_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i64m1_i64m1_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m1_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i64m1_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredmax_vs_i64m2_i64m1_tum(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i64m2_i64m1_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m2_i64m1_tum(vbool32_t vm, vint64m1_t vd, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i64m2_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredmax_vs_i64m4_i64m1_tum(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i64m4_i64m1_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m4_i64m1_tum(vbool16_t vm, vint64m1_t vd, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i64m4_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredmax_vs_i64m8_i64m1_tum(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i64m8_i64m1_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m8_i64m1_tum(vbool8_t vm, vint64m1_t vd, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmax_vs_i64m8_i64m1_tum(vm, vd, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vredmax\.[ivxfswum.]+\s+} 44 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vredmaxu.c b/auto-generated/policy_funcs/gnu-api-tests/vredmaxu.c index f1a37b10b..12345e714 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vredmaxu.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vredmaxu.c @@ -6,180 +6,180 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_tu(vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u8mf8_u8m1_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_tu(vuint8m1_t vd, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u8mf8_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_tu(vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u8mf4_u8m1_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_tu(vuint8m1_t vd, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u8mf4_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_tu(vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u8mf2_u8m1_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_tu(vuint8m1_t vd, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u8mf2_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u8m1_u8m1_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u8m1_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_tu(vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u8m2_u8m1_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_tu(vuint8m1_t vd, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u8m2_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_tu(vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u8m4_u8m1_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_tu(vuint8m1_t vd, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u8m4_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_tu(vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u8m8_u8m1_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_tu(vuint8m1_t vd, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u8m8_u8m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_tu(vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u16mf4_u16m1_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_tu(vuint16m1_t vd, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u16mf4_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_tu(vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u16mf2_u16m1_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_tu(vuint16m1_t vd, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u16mf2_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u16m1_u16m1_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u16m1_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_tu(vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u16m2_u16m1_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_tu(vuint16m1_t vd, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u16m2_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_tu(vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u16m4_u16m1_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_tu(vuint16m1_t vd, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u16m4_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_tu(vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u16m8_u16m1_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_tu(vuint16m1_t vd, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u16m8_u16m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tu(vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u32mf2_u32m1_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u32mf2_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u32m1_u32m1_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u32m1_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_tu(vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u32m2_u32m1_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_tu(vuint32m1_t vd, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u32m2_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_tu(vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u32m4_u32m1_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_tu(vuint32m1_t vd, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u32m4_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_tu(vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u32m8_u32m1_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_tu(vuint32m1_t vd, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u32m8_u32m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u64m1_u64m1_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u64m1_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_tu(vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u64m2_u64m1_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_tu(vuint64m1_t vd, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u64m2_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_tu(vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u64m4_u64m1_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_tu(vuint64m1_t vd, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u64m4_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_tu(vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u64m8_u64m1_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_tu(vuint64m1_t vd, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u64m8_u64m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_tum(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u8mf8_u8m1_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_tum(vbool64_t vm, vuint8m1_t vd, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u8mf8_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_tum(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u8mf4_u8m1_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_tum(vbool32_t vm, vuint8m1_t vd, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u8mf4_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_tum(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u8mf2_u8m1_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_tum(vbool16_t vm, vuint8m1_t vd, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u8mf2_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u8m1_u8m1_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u8m1_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_tum(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u8m2_u8m1_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_tum(vbool4_t vm, vuint8m1_t vd, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u8m2_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_tum(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u8m4_u8m1_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_tum(vbool2_t vm, vuint8m1_t vd, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u8m4_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_tum(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u8m8_u8m1_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_tum(vbool1_t vm, vuint8m1_t vd, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u8m8_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_tum(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u16mf4_u16m1_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_tum(vbool64_t vm, vuint16m1_t vd, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u16mf4_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_tum(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u16mf2_u16m1_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_tum(vbool32_t vm, vuint16m1_t vd, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u16mf2_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u16m1_u16m1_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u16m1_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_tum(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u16m2_u16m1_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_tum(vbool8_t vm, vuint16m1_t vd, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u16m2_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_tum(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u16m4_u16m1_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_tum(vbool4_t vm, vuint16m1_t vd, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u16m4_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_tum(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u16m8_u16m1_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_tum(vbool2_t vm, vuint16m1_t vd, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u16m8_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u32mf2_u32m1_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tum(vbool64_t vm, vuint32m1_t vd, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u32mf2_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u32m1_u32m1_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u32m1_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_tum(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u32m2_u32m1_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_tum(vbool16_t vm, vuint32m1_t vd, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u32m2_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_tum(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u32m4_u32m1_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_tum(vbool8_t vm, vuint32m1_t vd, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u32m4_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_tum(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u32m8_u32m1_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_tum(vbool4_t vm, vuint32m1_t vd, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u32m8_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u64m1_u64m1_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u64m1_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_tum(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u64m2_u64m1_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_tum(vbool32_t vm, vuint64m1_t vd, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u64m2_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_tum(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u64m4_u64m1_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_tum(vbool16_t vm, vuint64m1_t vd, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u64m4_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_tum(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u64m8_u64m1_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_tum(vbool8_t vm, vuint64m1_t vd, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredmaxu_vs_u64m8_u64m1_tum(vm, vd, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vredmaxu\.[ivxfswum.]+\s+} 44 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vredmin.c b/auto-generated/policy_funcs/gnu-api-tests/vredmin.c index c454f2dd4..47aab0c85 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vredmin.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vredmin.c @@ -6,180 +6,180 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8m1_t test_vredmin_vs_i8mf8_i8m1_tu(vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i8mf8_i8m1_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf8_i8m1_tu(vint8m1_t vd, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i8mf8_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8mf4_i8m1_tu(vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i8mf4_i8m1_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf4_i8m1_tu(vint8m1_t vd, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i8mf4_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8mf2_i8m1_tu(vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i8mf2_i8m1_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf2_i8m1_tu(vint8m1_t vd, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i8mf2_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8m1_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i8m1_i8m1_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m1_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i8m1_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8m2_i8m1_tu(vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i8m2_i8m1_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m2_i8m1_tu(vint8m1_t vd, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i8m2_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8m4_i8m1_tu(vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i8m4_i8m1_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m4_i8m1_tu(vint8m1_t vd, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i8m4_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8m8_i8m1_tu(vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i8m8_i8m1_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m8_i8m1_tu(vint8m1_t vd, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i8m8_i8m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16mf4_i16m1_tu(vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i16mf4_i16m1_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16mf4_i16m1_tu(vint16m1_t vd, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i16mf4_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16mf2_i16m1_tu(vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i16mf2_i16m1_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16mf2_i16m1_tu(vint16m1_t vd, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i16mf2_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16m1_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i16m1_i16m1_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m1_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i16m1_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16m2_i16m1_tu(vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i16m2_i16m1_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m2_i16m1_tu(vint16m1_t vd, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i16m2_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16m4_i16m1_tu(vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i16m4_i16m1_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m4_i16m1_tu(vint16m1_t vd, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i16m4_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16m8_i16m1_tu(vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i16m8_i16m1_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m8_i16m1_tu(vint16m1_t vd, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i16m8_i16m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32mf2_i32m1_tu(vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i32mf2_i32m1_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32mf2_i32m1_tu(vint32m1_t vd, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i32mf2_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32m1_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i32m1_i32m1_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m1_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i32m1_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32m2_i32m1_tu(vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i32m2_i32m1_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m2_i32m1_tu(vint32m1_t vd, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i32m2_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32m4_i32m1_tu(vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i32m4_i32m1_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m4_i32m1_tu(vint32m1_t vd, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i32m4_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32m8_i32m1_tu(vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i32m8_i32m1_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m8_i32m1_tu(vint32m1_t vd, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i32m8_i32m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredmin_vs_i64m1_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i64m1_i64m1_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m1_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i64m1_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredmin_vs_i64m2_i64m1_tu(vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i64m2_i64m1_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m2_i64m1_tu(vint64m1_t vd, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i64m2_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredmin_vs_i64m4_i64m1_tu(vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i64m4_i64m1_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m4_i64m1_tu(vint64m1_t vd, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i64m4_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredmin_vs_i64m8_i64m1_tu(vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i64m8_i64m1_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m8_i64m1_tu(vint64m1_t vd, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i64m8_i64m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8mf8_i8m1_tum(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i8mf8_i8m1_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf8_i8m1_tum(vbool64_t vm, vint8m1_t vd, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i8mf8_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8mf4_i8m1_tum(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i8mf4_i8m1_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf4_i8m1_tum(vbool32_t vm, vint8m1_t vd, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i8mf4_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8mf2_i8m1_tum(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i8mf2_i8m1_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf2_i8m1_tum(vbool16_t vm, vint8m1_t vd, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i8mf2_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8m1_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i8m1_i8m1_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m1_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i8m1_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8m2_i8m1_tum(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i8m2_i8m1_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m2_i8m1_tum(vbool4_t vm, vint8m1_t vd, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i8m2_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8m4_i8m1_tum(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i8m4_i8m1_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m4_i8m1_tum(vbool2_t vm, vint8m1_t vd, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i8m4_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8m8_i8m1_tum(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i8m8_i8m1_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m8_i8m1_tum(vbool1_t vm, vint8m1_t vd, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i8m8_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16mf4_i16m1_tum(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i16mf4_i16m1_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16mf4_i16m1_tum(vbool64_t vm, vint16m1_t vd, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i16mf4_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16mf2_i16m1_tum(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i16mf2_i16m1_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16mf2_i16m1_tum(vbool32_t vm, vint16m1_t vd, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i16mf2_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16m1_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i16m1_i16m1_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m1_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i16m1_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16m2_i16m1_tum(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i16m2_i16m1_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m2_i16m1_tum(vbool8_t vm, vint16m1_t vd, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i16m2_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16m4_i16m1_tum(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i16m4_i16m1_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m4_i16m1_tum(vbool4_t vm, vint16m1_t vd, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i16m4_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16m8_i16m1_tum(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i16m8_i16m1_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m8_i16m1_tum(vbool2_t vm, vint16m1_t vd, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i16m8_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i32mf2_i32m1_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32mf2_i32m1_tum(vbool64_t vm, vint32m1_t vd, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i32mf2_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32m1_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i32m1_i32m1_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m1_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i32m1_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32m2_i32m1_tum(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i32m2_i32m1_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m2_i32m1_tum(vbool16_t vm, vint32m1_t vd, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i32m2_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32m4_i32m1_tum(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i32m4_i32m1_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m4_i32m1_tum(vbool8_t vm, vint32m1_t vd, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i32m4_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32m8_i32m1_tum(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i32m8_i32m1_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m8_i32m1_tum(vbool4_t vm, vint32m1_t vd, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i32m8_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredmin_vs_i64m1_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i64m1_i64m1_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m1_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i64m1_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredmin_vs_i64m2_i64m1_tum(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i64m2_i64m1_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m2_i64m1_tum(vbool32_t vm, vint64m1_t vd, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i64m2_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredmin_vs_i64m4_i64m1_tum(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i64m4_i64m1_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m4_i64m1_tum(vbool16_t vm, vint64m1_t vd, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i64m4_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredmin_vs_i64m8_i64m1_tum(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i64m8_i64m1_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m8_i64m1_tum(vbool8_t vm, vint64m1_t vd, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmin_vs_i64m8_i64m1_tum(vm, vd, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vredmin\.[ivxfswum.]+\s+} 44 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vredminu.c b/auto-generated/policy_funcs/gnu-api-tests/vredminu.c index 6ddc4266b..5729fd065 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vredminu.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vredminu.c @@ -6,180 +6,180 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8m1_t test_vredminu_vs_u8mf8_u8m1_tu(vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u8mf8_u8m1_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf8_u8m1_tu(vuint8m1_t vd, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u8mf8_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8mf4_u8m1_tu(vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u8mf4_u8m1_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf4_u8m1_tu(vuint8m1_t vd, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u8mf4_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8mf2_u8m1_tu(vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u8mf2_u8m1_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf2_u8m1_tu(vuint8m1_t vd, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u8mf2_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8m1_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u8m1_u8m1_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m1_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u8m1_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8m2_u8m1_tu(vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u8m2_u8m1_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m2_u8m1_tu(vuint8m1_t vd, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u8m2_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8m4_u8m1_tu(vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u8m4_u8m1_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m4_u8m1_tu(vuint8m1_t vd, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u8m4_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8m8_u8m1_tu(vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u8m8_u8m1_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m8_u8m1_tu(vuint8m1_t vd, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u8m8_u8m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16mf4_u16m1_tu(vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u16mf4_u16m1_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16mf4_u16m1_tu(vuint16m1_t vd, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u16mf4_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16mf2_u16m1_tu(vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u16mf2_u16m1_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16mf2_u16m1_tu(vuint16m1_t vd, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u16mf2_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16m1_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u16m1_u16m1_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m1_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u16m1_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16m2_u16m1_tu(vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u16m2_u16m1_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m2_u16m1_tu(vuint16m1_t vd, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u16m2_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16m4_u16m1_tu(vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u16m4_u16m1_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m4_u16m1_tu(vuint16m1_t vd, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u16m4_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16m8_u16m1_tu(vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u16m8_u16m1_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m8_u16m1_tu(vuint16m1_t vd, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u16m8_u16m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tu(vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u32mf2_u32m1_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u32mf2_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32m1_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u32m1_u32m1_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u32m1_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32m2_u32m1_tu(vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u32m2_u32m1_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m2_u32m1_tu(vuint32m1_t vd, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u32m2_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32m4_u32m1_tu(vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u32m4_u32m1_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m4_u32m1_tu(vuint32m1_t vd, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u32m4_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32m8_u32m1_tu(vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u32m8_u32m1_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m8_u32m1_tu(vuint32m1_t vd, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u32m8_u32m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredminu_vs_u64m1_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u64m1_u64m1_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m1_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u64m1_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredminu_vs_u64m2_u64m1_tu(vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u64m2_u64m1_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m2_u64m1_tu(vuint64m1_t vd, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u64m2_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredminu_vs_u64m4_u64m1_tu(vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u64m4_u64m1_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m4_u64m1_tu(vuint64m1_t vd, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u64m4_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredminu_vs_u64m8_u64m1_tu(vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u64m8_u64m1_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m8_u64m1_tu(vuint64m1_t vd, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u64m8_u64m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8mf8_u8m1_tum(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u8mf8_u8m1_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf8_u8m1_tum(vbool64_t vm, vuint8m1_t vd, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u8mf8_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8mf4_u8m1_tum(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u8mf4_u8m1_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf4_u8m1_tum(vbool32_t vm, vuint8m1_t vd, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u8mf4_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8mf2_u8m1_tum(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u8mf2_u8m1_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf2_u8m1_tum(vbool16_t vm, vuint8m1_t vd, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u8mf2_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8m1_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u8m1_u8m1_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m1_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u8m1_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8m2_u8m1_tum(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u8m2_u8m1_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m2_u8m1_tum(vbool4_t vm, vuint8m1_t vd, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u8m2_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8m4_u8m1_tum(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u8m4_u8m1_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m4_u8m1_tum(vbool2_t vm, vuint8m1_t vd, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u8m4_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8m8_u8m1_tum(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u8m8_u8m1_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m8_u8m1_tum(vbool1_t vm, vuint8m1_t vd, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u8m8_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16mf4_u16m1_tum(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u16mf4_u16m1_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16mf4_u16m1_tum(vbool64_t vm, vuint16m1_t vd, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u16mf4_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16mf2_u16m1_tum(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u16mf2_u16m1_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16mf2_u16m1_tum(vbool32_t vm, vuint16m1_t vd, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u16mf2_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16m1_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u16m1_u16m1_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m1_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u16m1_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16m2_u16m1_tum(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u16m2_u16m1_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m2_u16m1_tum(vbool8_t vm, vuint16m1_t vd, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u16m2_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16m4_u16m1_tum(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u16m4_u16m1_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m4_u16m1_tum(vbool4_t vm, vuint16m1_t vd, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u16m4_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16m8_u16m1_tum(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u16m8_u16m1_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m8_u16m1_tum(vbool2_t vm, vuint16m1_t vd, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u16m8_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u32mf2_u32m1_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tum(vbool64_t vm, vuint32m1_t vd, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u32mf2_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32m1_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u32m1_u32m1_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m1_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u32m1_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32m2_u32m1_tum(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u32m2_u32m1_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m2_u32m1_tum(vbool16_t vm, vuint32m1_t vd, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u32m2_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32m4_u32m1_tum(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u32m4_u32m1_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m4_u32m1_tum(vbool8_t vm, vuint32m1_t vd, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u32m4_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32m8_u32m1_tum(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u32m8_u32m1_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m8_u32m1_tum(vbool4_t vm, vuint32m1_t vd, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u32m8_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredminu_vs_u64m1_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u64m1_u64m1_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m1_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u64m1_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredminu_vs_u64m2_u64m1_tum(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u64m2_u64m1_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m2_u64m1_tum(vbool32_t vm, vuint64m1_t vd, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u64m2_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredminu_vs_u64m4_u64m1_tum(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u64m4_u64m1_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m4_u64m1_tum(vbool16_t vm, vuint64m1_t vd, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u64m4_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredminu_vs_u64m8_u64m1_tum(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u64m8_u64m1_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m8_u64m1_tum(vbool8_t vm, vuint64m1_t vd, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredminu_vs_u64m8_u64m1_tum(vm, vd, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vredminu\.[ivxfswum.]+\s+} 44 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vredor.c b/auto-generated/policy_funcs/gnu-api-tests/vredor.c index 54c86b5b3..0dce8bb6d 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vredor.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vredor.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8m1_t test_vredor_vs_i8mf8_i8m1_tu(vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i8mf8_i8m1_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf8_i8m1_tu(vint8m1_t vd, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i8mf8_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8mf4_i8m1_tu(vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i8mf4_i8m1_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf4_i8m1_tu(vint8m1_t vd, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i8mf4_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8mf2_i8m1_tu(vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i8mf2_i8m1_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf2_i8m1_tu(vint8m1_t vd, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i8mf2_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8m1_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i8m1_i8m1_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m1_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i8m1_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8m2_i8m1_tu(vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i8m2_i8m1_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m2_i8m1_tu(vint8m1_t vd, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i8m2_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8m4_i8m1_tu(vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i8m4_i8m1_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m4_i8m1_tu(vint8m1_t vd, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i8m4_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8m8_i8m1_tu(vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i8m8_i8m1_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m8_i8m1_tu(vint8m1_t vd, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i8m8_i8m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16mf4_i16m1_tu(vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i16mf4_i16m1_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16mf4_i16m1_tu(vint16m1_t vd, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i16mf4_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16mf2_i16m1_tu(vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i16mf2_i16m1_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16mf2_i16m1_tu(vint16m1_t vd, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i16mf2_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16m1_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i16m1_i16m1_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m1_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i16m1_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16m2_i16m1_tu(vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i16m2_i16m1_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m2_i16m1_tu(vint16m1_t vd, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i16m2_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16m4_i16m1_tu(vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i16m4_i16m1_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m4_i16m1_tu(vint16m1_t vd, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i16m4_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16m8_i16m1_tu(vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i16m8_i16m1_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m8_i16m1_tu(vint16m1_t vd, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i16m8_i16m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32mf2_i32m1_tu(vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i32mf2_i32m1_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32mf2_i32m1_tu(vint32m1_t vd, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i32mf2_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32m1_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i32m1_i32m1_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m1_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i32m1_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32m2_i32m1_tu(vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i32m2_i32m1_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m2_i32m1_tu(vint32m1_t vd, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i32m2_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32m4_i32m1_tu(vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i32m4_i32m1_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m4_i32m1_tu(vint32m1_t vd, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i32m4_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32m8_i32m1_tu(vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i32m8_i32m1_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m8_i32m1_tu(vint32m1_t vd, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i32m8_i32m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredor_vs_i64m1_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i64m1_i64m1_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m1_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i64m1_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredor_vs_i64m2_i64m1_tu(vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i64m2_i64m1_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m2_i64m1_tu(vint64m1_t vd, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i64m2_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredor_vs_i64m4_i64m1_tu(vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i64m4_i64m1_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m4_i64m1_tu(vint64m1_t vd, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i64m4_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredor_vs_i64m8_i64m1_tu(vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i64m8_i64m1_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m8_i64m1_tu(vint64m1_t vd, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i64m8_i64m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8mf8_u8m1_tu(vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u8mf8_u8m1_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf8_u8m1_tu(vuint8m1_t vd, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u8mf8_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8mf4_u8m1_tu(vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u8mf4_u8m1_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf4_u8m1_tu(vuint8m1_t vd, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u8mf4_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8mf2_u8m1_tu(vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u8mf2_u8m1_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf2_u8m1_tu(vuint8m1_t vd, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u8mf2_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8m1_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u8m1_u8m1_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m1_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u8m1_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8m2_u8m1_tu(vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u8m2_u8m1_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m2_u8m1_tu(vuint8m1_t vd, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u8m2_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8m4_u8m1_tu(vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u8m4_u8m1_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m4_u8m1_tu(vuint8m1_t vd, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u8m4_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8m8_u8m1_tu(vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u8m8_u8m1_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m8_u8m1_tu(vuint8m1_t vd, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u8m8_u8m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16mf4_u16m1_tu(vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u16mf4_u16m1_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16mf4_u16m1_tu(vuint16m1_t vd, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u16mf4_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16mf2_u16m1_tu(vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u16mf2_u16m1_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16mf2_u16m1_tu(vuint16m1_t vd, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u16mf2_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16m1_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u16m1_u16m1_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m1_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u16m1_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16m2_u16m1_tu(vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u16m2_u16m1_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m2_u16m1_tu(vuint16m1_t vd, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u16m2_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16m4_u16m1_tu(vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u16m4_u16m1_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m4_u16m1_tu(vuint16m1_t vd, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u16m4_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16m8_u16m1_tu(vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u16m8_u16m1_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m8_u16m1_tu(vuint16m1_t vd, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u16m8_u16m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32mf2_u32m1_tu(vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u32mf2_u32m1_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u32mf2_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32m1_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u32m1_u32m1_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u32m1_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32m2_u32m1_tu(vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u32m2_u32m1_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m2_u32m1_tu(vuint32m1_t vd, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u32m2_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32m4_u32m1_tu(vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u32m4_u32m1_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m4_u32m1_tu(vuint32m1_t vd, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u32m4_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32m8_u32m1_tu(vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u32m8_u32m1_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m8_u32m1_tu(vuint32m1_t vd, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u32m8_u32m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredor_vs_u64m1_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u64m1_u64m1_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m1_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u64m1_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredor_vs_u64m2_u64m1_tu(vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u64m2_u64m1_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m2_u64m1_tu(vuint64m1_t vd, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u64m2_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredor_vs_u64m4_u64m1_tu(vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u64m4_u64m1_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m4_u64m1_tu(vuint64m1_t vd, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u64m4_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredor_vs_u64m8_u64m1_tu(vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u64m8_u64m1_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m8_u64m1_tu(vuint64m1_t vd, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u64m8_u64m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8mf8_i8m1_tum(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i8mf8_i8m1_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf8_i8m1_tum(vbool64_t vm, vint8m1_t vd, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i8mf8_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8mf4_i8m1_tum(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i8mf4_i8m1_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf4_i8m1_tum(vbool32_t vm, vint8m1_t vd, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i8mf4_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8mf2_i8m1_tum(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i8mf2_i8m1_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf2_i8m1_tum(vbool16_t vm, vint8m1_t vd, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i8mf2_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8m1_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i8m1_i8m1_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m1_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i8m1_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8m2_i8m1_tum(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i8m2_i8m1_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m2_i8m1_tum(vbool4_t vm, vint8m1_t vd, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i8m2_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8m4_i8m1_tum(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i8m4_i8m1_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m4_i8m1_tum(vbool2_t vm, vint8m1_t vd, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i8m4_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8m8_i8m1_tum(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i8m8_i8m1_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m8_i8m1_tum(vbool1_t vm, vint8m1_t vd, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i8m8_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16mf4_i16m1_tum(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i16mf4_i16m1_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16mf4_i16m1_tum(vbool64_t vm, vint16m1_t vd, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i16mf4_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16mf2_i16m1_tum(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i16mf2_i16m1_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16mf2_i16m1_tum(vbool32_t vm, vint16m1_t vd, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i16mf2_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16m1_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i16m1_i16m1_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m1_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i16m1_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16m2_i16m1_tum(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i16m2_i16m1_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m2_i16m1_tum(vbool8_t vm, vint16m1_t vd, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i16m2_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16m4_i16m1_tum(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i16m4_i16m1_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m4_i16m1_tum(vbool4_t vm, vint16m1_t vd, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i16m4_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16m8_i16m1_tum(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i16m8_i16m1_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m8_i16m1_tum(vbool2_t vm, vint16m1_t vd, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i16m8_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i32mf2_i32m1_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32mf2_i32m1_tum(vbool64_t vm, vint32m1_t vd, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i32mf2_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32m1_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i32m1_i32m1_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m1_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i32m1_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32m2_i32m1_tum(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i32m2_i32m1_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m2_i32m1_tum(vbool16_t vm, vint32m1_t vd, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i32m2_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32m4_i32m1_tum(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i32m4_i32m1_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m4_i32m1_tum(vbool8_t vm, vint32m1_t vd, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i32m4_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32m8_i32m1_tum(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i32m8_i32m1_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m8_i32m1_tum(vbool4_t vm, vint32m1_t vd, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i32m8_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredor_vs_i64m1_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i64m1_i64m1_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m1_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i64m1_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredor_vs_i64m2_i64m1_tum(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i64m2_i64m1_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m2_i64m1_tum(vbool32_t vm, vint64m1_t vd, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i64m2_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredor_vs_i64m4_i64m1_tum(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i64m4_i64m1_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m4_i64m1_tum(vbool16_t vm, vint64m1_t vd, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i64m4_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredor_vs_i64m8_i64m1_tum(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i64m8_i64m1_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m8_i64m1_tum(vbool8_t vm, vint64m1_t vd, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredor_vs_i64m8_i64m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8mf8_u8m1_tum(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u8mf8_u8m1_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf8_u8m1_tum(vbool64_t vm, vuint8m1_t vd, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u8mf8_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8mf4_u8m1_tum(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u8mf4_u8m1_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf4_u8m1_tum(vbool32_t vm, vuint8m1_t vd, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u8mf4_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8mf2_u8m1_tum(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u8mf2_u8m1_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf2_u8m1_tum(vbool16_t vm, vuint8m1_t vd, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u8mf2_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8m1_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u8m1_u8m1_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m1_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u8m1_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8m2_u8m1_tum(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u8m2_u8m1_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m2_u8m1_tum(vbool4_t vm, vuint8m1_t vd, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u8m2_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8m4_u8m1_tum(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u8m4_u8m1_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m4_u8m1_tum(vbool2_t vm, vuint8m1_t vd, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u8m4_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8m8_u8m1_tum(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u8m8_u8m1_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m8_u8m1_tum(vbool1_t vm, vuint8m1_t vd, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u8m8_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16mf4_u16m1_tum(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u16mf4_u16m1_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16mf4_u16m1_tum(vbool64_t vm, vuint16m1_t vd, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u16mf4_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16mf2_u16m1_tum(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u16mf2_u16m1_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16mf2_u16m1_tum(vbool32_t vm, vuint16m1_t vd, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u16mf2_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16m1_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u16m1_u16m1_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m1_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u16m1_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16m2_u16m1_tum(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u16m2_u16m1_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m2_u16m1_tum(vbool8_t vm, vuint16m1_t vd, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u16m2_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16m4_u16m1_tum(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u16m4_u16m1_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m4_u16m1_tum(vbool4_t vm, vuint16m1_t vd, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u16m4_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16m8_u16m1_tum(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u16m8_u16m1_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m8_u16m1_tum(vbool2_t vm, vuint16m1_t vd, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u16m8_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u32mf2_u32m1_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32mf2_u32m1_tum(vbool64_t vm, vuint32m1_t vd, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u32mf2_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32m1_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u32m1_u32m1_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m1_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u32m1_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32m2_u32m1_tum(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u32m2_u32m1_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m2_u32m1_tum(vbool16_t vm, vuint32m1_t vd, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u32m2_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32m4_u32m1_tum(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u32m4_u32m1_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m4_u32m1_tum(vbool8_t vm, vuint32m1_t vd, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u32m4_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32m8_u32m1_tum(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u32m8_u32m1_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m8_u32m1_tum(vbool4_t vm, vuint32m1_t vd, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u32m8_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredor_vs_u64m1_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u64m1_u64m1_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m1_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u64m1_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredor_vs_u64m2_u64m1_tum(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u64m2_u64m1_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m2_u64m1_tum(vbool32_t vm, vuint64m1_t vd, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u64m2_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredor_vs_u64m4_u64m1_tum(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u64m4_u64m1_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m4_u64m1_tum(vbool16_t vm, vuint64m1_t vd, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u64m4_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredor_vs_u64m8_u64m1_tum(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u64m8_u64m1_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m8_u64m1_tum(vbool8_t vm, vuint64m1_t vd, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredor_vs_u64m8_u64m1_tum(vm, vd, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vredor\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vredsum.c b/auto-generated/policy_funcs/gnu-api-tests/vredsum.c index 65b160ec8..24aad2436 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vredsum.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vredsum.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8m1_t test_vredsum_vs_i8mf8_i8m1_tu(vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i8mf8_i8m1_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf8_i8m1_tu(vint8m1_t vd, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i8mf8_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8mf4_i8m1_tu(vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i8mf4_i8m1_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf4_i8m1_tu(vint8m1_t vd, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i8mf4_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8mf2_i8m1_tu(vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i8mf2_i8m1_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf2_i8m1_tu(vint8m1_t vd, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i8mf2_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8m1_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i8m1_i8m1_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m1_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i8m1_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8m2_i8m1_tu(vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i8m2_i8m1_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m2_i8m1_tu(vint8m1_t vd, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i8m2_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8m4_i8m1_tu(vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i8m4_i8m1_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m4_i8m1_tu(vint8m1_t vd, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i8m4_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8m8_i8m1_tu(vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i8m8_i8m1_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m8_i8m1_tu(vint8m1_t vd, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i8m8_i8m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16mf4_i16m1_tu(vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i16mf4_i16m1_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16mf4_i16m1_tu(vint16m1_t vd, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i16mf4_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16mf2_i16m1_tu(vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i16mf2_i16m1_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16mf2_i16m1_tu(vint16m1_t vd, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i16mf2_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16m1_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i16m1_i16m1_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m1_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i16m1_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16m2_i16m1_tu(vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i16m2_i16m1_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m2_i16m1_tu(vint16m1_t vd, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i16m2_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16m4_i16m1_tu(vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i16m4_i16m1_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m4_i16m1_tu(vint16m1_t vd, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i16m4_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16m8_i16m1_tu(vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i16m8_i16m1_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m8_i16m1_tu(vint16m1_t vd, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i16m8_i16m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32mf2_i32m1_tu(vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i32mf2_i32m1_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32mf2_i32m1_tu(vint32m1_t vd, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i32mf2_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32m1_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i32m1_i32m1_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m1_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i32m1_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32m2_i32m1_tu(vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i32m2_i32m1_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m2_i32m1_tu(vint32m1_t vd, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i32m2_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32m4_i32m1_tu(vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i32m4_i32m1_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m4_i32m1_tu(vint32m1_t vd, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i32m4_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32m8_i32m1_tu(vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i32m8_i32m1_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m8_i32m1_tu(vint32m1_t vd, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i32m8_i32m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredsum_vs_i64m1_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i64m1_i64m1_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m1_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i64m1_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredsum_vs_i64m2_i64m1_tu(vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i64m2_i64m1_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m2_i64m1_tu(vint64m1_t vd, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i64m2_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredsum_vs_i64m4_i64m1_tu(vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i64m4_i64m1_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m4_i64m1_tu(vint64m1_t vd, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i64m4_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredsum_vs_i64m8_i64m1_tu(vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i64m8_i64m1_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m8_i64m1_tu(vint64m1_t vd, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i64m8_i64m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8mf8_u8m1_tu(vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u8mf8_u8m1_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf8_u8m1_tu(vuint8m1_t vd, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u8mf8_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8mf4_u8m1_tu(vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u8mf4_u8m1_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf4_u8m1_tu(vuint8m1_t vd, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u8mf4_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8mf2_u8m1_tu(vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u8mf2_u8m1_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf2_u8m1_tu(vuint8m1_t vd, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u8mf2_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8m1_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u8m1_u8m1_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m1_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u8m1_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8m2_u8m1_tu(vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u8m2_u8m1_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m2_u8m1_tu(vuint8m1_t vd, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u8m2_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8m4_u8m1_tu(vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u8m4_u8m1_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m4_u8m1_tu(vuint8m1_t vd, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u8m4_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8m8_u8m1_tu(vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u8m8_u8m1_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m8_u8m1_tu(vuint8m1_t vd, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u8m8_u8m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16mf4_u16m1_tu(vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u16mf4_u16m1_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16mf4_u16m1_tu(vuint16m1_t vd, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u16mf4_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16mf2_u16m1_tu(vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u16mf2_u16m1_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16mf2_u16m1_tu(vuint16m1_t vd, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u16mf2_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16m1_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u16m1_u16m1_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m1_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u16m1_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16m2_u16m1_tu(vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u16m2_u16m1_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m2_u16m1_tu(vuint16m1_t vd, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u16m2_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16m4_u16m1_tu(vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u16m4_u16m1_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m4_u16m1_tu(vuint16m1_t vd, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u16m4_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16m8_u16m1_tu(vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u16m8_u16m1_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m8_u16m1_tu(vuint16m1_t vd, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u16m8_u16m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tu(vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u32mf2_u32m1_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u32mf2_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32m1_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u32m1_u32m1_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u32m1_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32m2_u32m1_tu(vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u32m2_u32m1_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m2_u32m1_tu(vuint32m1_t vd, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u32m2_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32m4_u32m1_tu(vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u32m4_u32m1_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m4_u32m1_tu(vuint32m1_t vd, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u32m4_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32m8_u32m1_tu(vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u32m8_u32m1_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m8_u32m1_tu(vuint32m1_t vd, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u32m8_u32m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredsum_vs_u64m1_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u64m1_u64m1_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m1_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u64m1_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredsum_vs_u64m2_u64m1_tu(vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u64m2_u64m1_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m2_u64m1_tu(vuint64m1_t vd, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u64m2_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredsum_vs_u64m4_u64m1_tu(vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u64m4_u64m1_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m4_u64m1_tu(vuint64m1_t vd, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u64m4_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredsum_vs_u64m8_u64m1_tu(vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u64m8_u64m1_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m8_u64m1_tu(vuint64m1_t vd, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u64m8_u64m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8mf8_i8m1_tum(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i8mf8_i8m1_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf8_i8m1_tum(vbool64_t vm, vint8m1_t vd, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i8mf8_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8mf4_i8m1_tum(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i8mf4_i8m1_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf4_i8m1_tum(vbool32_t vm, vint8m1_t vd, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i8mf4_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8mf2_i8m1_tum(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i8mf2_i8m1_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf2_i8m1_tum(vbool16_t vm, vint8m1_t vd, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i8mf2_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8m1_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i8m1_i8m1_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m1_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i8m1_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8m2_i8m1_tum(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i8m2_i8m1_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m2_i8m1_tum(vbool4_t vm, vint8m1_t vd, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i8m2_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8m4_i8m1_tum(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i8m4_i8m1_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m4_i8m1_tum(vbool2_t vm, vint8m1_t vd, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i8m4_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8m8_i8m1_tum(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i8m8_i8m1_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m8_i8m1_tum(vbool1_t vm, vint8m1_t vd, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i8m8_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16mf4_i16m1_tum(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i16mf4_i16m1_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16mf4_i16m1_tum(vbool64_t vm, vint16m1_t vd, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i16mf4_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16mf2_i16m1_tum(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i16mf2_i16m1_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16mf2_i16m1_tum(vbool32_t vm, vint16m1_t vd, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i16mf2_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16m1_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i16m1_i16m1_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m1_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i16m1_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16m2_i16m1_tum(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i16m2_i16m1_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m2_i16m1_tum(vbool8_t vm, vint16m1_t vd, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i16m2_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16m4_i16m1_tum(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i16m4_i16m1_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m4_i16m1_tum(vbool4_t vm, vint16m1_t vd, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i16m4_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16m8_i16m1_tum(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i16m8_i16m1_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m8_i16m1_tum(vbool2_t vm, vint16m1_t vd, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i16m8_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i32mf2_i32m1_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32mf2_i32m1_tum(vbool64_t vm, vint32m1_t vd, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i32mf2_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32m1_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i32m1_i32m1_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m1_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i32m1_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32m2_i32m1_tum(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i32m2_i32m1_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m2_i32m1_tum(vbool16_t vm, vint32m1_t vd, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i32m2_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32m4_i32m1_tum(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i32m4_i32m1_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m4_i32m1_tum(vbool8_t vm, vint32m1_t vd, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i32m4_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32m8_i32m1_tum(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i32m8_i32m1_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m8_i32m1_tum(vbool4_t vm, vint32m1_t vd, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i32m8_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredsum_vs_i64m1_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i64m1_i64m1_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m1_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i64m1_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredsum_vs_i64m2_i64m1_tum(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i64m2_i64m1_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m2_i64m1_tum(vbool32_t vm, vint64m1_t vd, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i64m2_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredsum_vs_i64m4_i64m1_tum(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i64m4_i64m1_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m4_i64m1_tum(vbool16_t vm, vint64m1_t vd, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i64m4_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredsum_vs_i64m8_i64m1_tum(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i64m8_i64m1_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m8_i64m1_tum(vbool8_t vm, vint64m1_t vd, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_i64m8_i64m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8mf8_u8m1_tum(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u8mf8_u8m1_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf8_u8m1_tum(vbool64_t vm, vuint8m1_t vd, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u8mf8_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8mf4_u8m1_tum(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u8mf4_u8m1_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf4_u8m1_tum(vbool32_t vm, vuint8m1_t vd, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u8mf4_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8mf2_u8m1_tum(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u8mf2_u8m1_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf2_u8m1_tum(vbool16_t vm, vuint8m1_t vd, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u8mf2_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8m1_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u8m1_u8m1_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m1_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u8m1_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8m2_u8m1_tum(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u8m2_u8m1_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m2_u8m1_tum(vbool4_t vm, vuint8m1_t vd, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u8m2_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8m4_u8m1_tum(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u8m4_u8m1_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m4_u8m1_tum(vbool2_t vm, vuint8m1_t vd, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u8m4_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8m8_u8m1_tum(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u8m8_u8m1_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m8_u8m1_tum(vbool1_t vm, vuint8m1_t vd, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u8m8_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16mf4_u16m1_tum(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u16mf4_u16m1_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16mf4_u16m1_tum(vbool64_t vm, vuint16m1_t vd, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u16mf4_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16mf2_u16m1_tum(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u16mf2_u16m1_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16mf2_u16m1_tum(vbool32_t vm, vuint16m1_t vd, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u16mf2_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16m1_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u16m1_u16m1_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m1_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u16m1_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16m2_u16m1_tum(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u16m2_u16m1_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m2_u16m1_tum(vbool8_t vm, vuint16m1_t vd, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u16m2_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16m4_u16m1_tum(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u16m4_u16m1_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m4_u16m1_tum(vbool4_t vm, vuint16m1_t vd, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u16m4_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16m8_u16m1_tum(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u16m8_u16m1_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m8_u16m1_tum(vbool2_t vm, vuint16m1_t vd, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u16m8_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u32mf2_u32m1_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tum(vbool64_t vm, vuint32m1_t vd, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u32mf2_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32m1_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u32m1_u32m1_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m1_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u32m1_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32m2_u32m1_tum(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u32m2_u32m1_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m2_u32m1_tum(vbool16_t vm, vuint32m1_t vd, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u32m2_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32m4_u32m1_tum(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u32m4_u32m1_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m4_u32m1_tum(vbool8_t vm, vuint32m1_t vd, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u32m4_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32m8_u32m1_tum(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u32m8_u32m1_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m8_u32m1_tum(vbool4_t vm, vuint32m1_t vd, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u32m8_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredsum_vs_u64m1_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u64m1_u64m1_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m1_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u64m1_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredsum_vs_u64m2_u64m1_tum(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u64m2_u64m1_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m2_u64m1_tum(vbool32_t vm, vuint64m1_t vd, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u64m2_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredsum_vs_u64m4_u64m1_tum(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u64m4_u64m1_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m4_u64m1_tum(vbool16_t vm, vuint64m1_t vd, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u64m4_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredsum_vs_u64m8_u64m1_tum(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u64m8_u64m1_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m8_u64m1_tum(vbool8_t vm, vuint64m1_t vd, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredsum_vs_u64m8_u64m1_tum(vm, vd, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vredsum\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vredxor.c b/auto-generated/policy_funcs/gnu-api-tests/vredxor.c index c99232285..13e295fb3 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vredxor.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vredxor.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8m1_t test_vredxor_vs_i8mf8_i8m1_tu(vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i8mf8_i8m1_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf8_i8m1_tu(vint8m1_t vd, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i8mf8_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8mf4_i8m1_tu(vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i8mf4_i8m1_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf4_i8m1_tu(vint8m1_t vd, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i8mf4_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8mf2_i8m1_tu(vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i8mf2_i8m1_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf2_i8m1_tu(vint8m1_t vd, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i8mf2_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8m1_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i8m1_i8m1_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m1_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i8m1_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8m2_i8m1_tu(vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i8m2_i8m1_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m2_i8m1_tu(vint8m1_t vd, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i8m2_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8m4_i8m1_tu(vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i8m4_i8m1_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m4_i8m1_tu(vint8m1_t vd, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i8m4_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8m8_i8m1_tu(vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i8m8_i8m1_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m8_i8m1_tu(vint8m1_t vd, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i8m8_i8m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16mf4_i16m1_tu(vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i16mf4_i16m1_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16mf4_i16m1_tu(vint16m1_t vd, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i16mf4_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16mf2_i16m1_tu(vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i16mf2_i16m1_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16mf2_i16m1_tu(vint16m1_t vd, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i16mf2_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16m1_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i16m1_i16m1_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m1_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i16m1_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16m2_i16m1_tu(vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i16m2_i16m1_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m2_i16m1_tu(vint16m1_t vd, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i16m2_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16m4_i16m1_tu(vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i16m4_i16m1_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m4_i16m1_tu(vint16m1_t vd, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i16m4_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16m8_i16m1_tu(vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i16m8_i16m1_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m8_i16m1_tu(vint16m1_t vd, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i16m8_i16m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32mf2_i32m1_tu(vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i32mf2_i32m1_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32mf2_i32m1_tu(vint32m1_t vd, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i32mf2_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32m1_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i32m1_i32m1_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m1_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i32m1_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32m2_i32m1_tu(vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i32m2_i32m1_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m2_i32m1_tu(vint32m1_t vd, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i32m2_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32m4_i32m1_tu(vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i32m4_i32m1_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m4_i32m1_tu(vint32m1_t vd, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i32m4_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32m8_i32m1_tu(vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i32m8_i32m1_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m8_i32m1_tu(vint32m1_t vd, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i32m8_i32m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredxor_vs_i64m1_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i64m1_i64m1_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m1_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i64m1_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredxor_vs_i64m2_i64m1_tu(vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i64m2_i64m1_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m2_i64m1_tu(vint64m1_t vd, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i64m2_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredxor_vs_i64m4_i64m1_tu(vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i64m4_i64m1_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m4_i64m1_tu(vint64m1_t vd, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i64m4_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredxor_vs_i64m8_i64m1_tu(vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i64m8_i64m1_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m8_i64m1_tu(vint64m1_t vd, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i64m8_i64m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8mf8_u8m1_tu(vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u8mf8_u8m1_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf8_u8m1_tu(vuint8m1_t vd, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u8mf8_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8mf4_u8m1_tu(vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u8mf4_u8m1_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf4_u8m1_tu(vuint8m1_t vd, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u8mf4_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8mf2_u8m1_tu(vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u8mf2_u8m1_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf2_u8m1_tu(vuint8m1_t vd, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u8mf2_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8m1_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u8m1_u8m1_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m1_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u8m1_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8m2_u8m1_tu(vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u8m2_u8m1_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m2_u8m1_tu(vuint8m1_t vd, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u8m2_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8m4_u8m1_tu(vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u8m4_u8m1_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m4_u8m1_tu(vuint8m1_t vd, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u8m4_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8m8_u8m1_tu(vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u8m8_u8m1_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m8_u8m1_tu(vuint8m1_t vd, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u8m8_u8m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16mf4_u16m1_tu(vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u16mf4_u16m1_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16mf4_u16m1_tu(vuint16m1_t vd, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u16mf4_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16mf2_u16m1_tu(vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u16mf2_u16m1_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16mf2_u16m1_tu(vuint16m1_t vd, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u16mf2_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16m1_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u16m1_u16m1_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m1_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u16m1_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16m2_u16m1_tu(vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u16m2_u16m1_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m2_u16m1_tu(vuint16m1_t vd, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u16m2_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16m4_u16m1_tu(vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u16m4_u16m1_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m4_u16m1_tu(vuint16m1_t vd, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u16m4_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16m8_u16m1_tu(vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u16m8_u16m1_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m8_u16m1_tu(vuint16m1_t vd, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u16m8_u16m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tu(vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u32mf2_u32m1_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u32mf2_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32m1_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u32m1_u32m1_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u32m1_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32m2_u32m1_tu(vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u32m2_u32m1_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m2_u32m1_tu(vuint32m1_t vd, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u32m2_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32m4_u32m1_tu(vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u32m4_u32m1_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m4_u32m1_tu(vuint32m1_t vd, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u32m4_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32m8_u32m1_tu(vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u32m8_u32m1_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m8_u32m1_tu(vuint32m1_t vd, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u32m8_u32m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredxor_vs_u64m1_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u64m1_u64m1_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m1_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u64m1_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredxor_vs_u64m2_u64m1_tu(vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u64m2_u64m1_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m2_u64m1_tu(vuint64m1_t vd, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u64m2_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredxor_vs_u64m4_u64m1_tu(vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u64m4_u64m1_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m4_u64m1_tu(vuint64m1_t vd, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u64m4_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredxor_vs_u64m8_u64m1_tu(vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u64m8_u64m1_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m8_u64m1_tu(vuint64m1_t vd, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u64m8_u64m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8mf8_i8m1_tum(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i8mf8_i8m1_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf8_i8m1_tum(vbool64_t vm, vint8m1_t vd, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i8mf8_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8mf4_i8m1_tum(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i8mf4_i8m1_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf4_i8m1_tum(vbool32_t vm, vint8m1_t vd, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i8mf4_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8mf2_i8m1_tum(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i8mf2_i8m1_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf2_i8m1_tum(vbool16_t vm, vint8m1_t vd, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i8mf2_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8m1_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i8m1_i8m1_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m1_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i8m1_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8m2_i8m1_tum(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i8m2_i8m1_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m2_i8m1_tum(vbool4_t vm, vint8m1_t vd, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i8m2_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8m4_i8m1_tum(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i8m4_i8m1_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m4_i8m1_tum(vbool2_t vm, vint8m1_t vd, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i8m4_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8m8_i8m1_tum(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i8m8_i8m1_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m8_i8m1_tum(vbool1_t vm, vint8m1_t vd, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i8m8_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16mf4_i16m1_tum(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i16mf4_i16m1_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16mf4_i16m1_tum(vbool64_t vm, vint16m1_t vd, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i16mf4_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16mf2_i16m1_tum(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i16mf2_i16m1_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16mf2_i16m1_tum(vbool32_t vm, vint16m1_t vd, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i16mf2_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16m1_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i16m1_i16m1_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m1_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i16m1_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16m2_i16m1_tum(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i16m2_i16m1_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m2_i16m1_tum(vbool8_t vm, vint16m1_t vd, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i16m2_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16m4_i16m1_tum(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i16m4_i16m1_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m4_i16m1_tum(vbool4_t vm, vint16m1_t vd, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i16m4_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16m8_i16m1_tum(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i16m8_i16m1_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m8_i16m1_tum(vbool2_t vm, vint16m1_t vd, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i16m8_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i32mf2_i32m1_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32mf2_i32m1_tum(vbool64_t vm, vint32m1_t vd, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i32mf2_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32m1_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i32m1_i32m1_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m1_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i32m1_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32m2_i32m1_tum(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i32m2_i32m1_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m2_i32m1_tum(vbool16_t vm, vint32m1_t vd, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i32m2_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32m4_i32m1_tum(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i32m4_i32m1_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m4_i32m1_tum(vbool8_t vm, vint32m1_t vd, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i32m4_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32m8_i32m1_tum(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i32m8_i32m1_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m8_i32m1_tum(vbool4_t vm, vint32m1_t vd, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i32m8_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredxor_vs_i64m1_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i64m1_i64m1_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m1_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i64m1_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredxor_vs_i64m2_i64m1_tum(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i64m2_i64m1_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m2_i64m1_tum(vbool32_t vm, vint64m1_t vd, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i64m2_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredxor_vs_i64m4_i64m1_tum(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i64m4_i64m1_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m4_i64m1_tum(vbool16_t vm, vint64m1_t vd, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i64m4_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredxor_vs_i64m8_i64m1_tum(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i64m8_i64m1_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m8_i64m1_tum(vbool8_t vm, vint64m1_t vd, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_i64m8_i64m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8mf8_u8m1_tum(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u8mf8_u8m1_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf8_u8m1_tum(vbool64_t vm, vuint8m1_t vd, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u8mf8_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8mf4_u8m1_tum(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u8mf4_u8m1_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf4_u8m1_tum(vbool32_t vm, vuint8m1_t vd, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u8mf4_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8mf2_u8m1_tum(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u8mf2_u8m1_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf2_u8m1_tum(vbool16_t vm, vuint8m1_t vd, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u8mf2_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8m1_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u8m1_u8m1_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m1_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u8m1_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8m2_u8m1_tum(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u8m2_u8m1_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m2_u8m1_tum(vbool4_t vm, vuint8m1_t vd, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u8m2_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8m4_u8m1_tum(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u8m4_u8m1_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m4_u8m1_tum(vbool2_t vm, vuint8m1_t vd, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u8m4_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8m8_u8m1_tum(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u8m8_u8m1_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m8_u8m1_tum(vbool1_t vm, vuint8m1_t vd, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u8m8_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16mf4_u16m1_tum(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u16mf4_u16m1_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16mf4_u16m1_tum(vbool64_t vm, vuint16m1_t vd, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u16mf4_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16mf2_u16m1_tum(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u16mf2_u16m1_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16mf2_u16m1_tum(vbool32_t vm, vuint16m1_t vd, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u16mf2_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16m1_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u16m1_u16m1_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m1_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u16m1_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16m2_u16m1_tum(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u16m2_u16m1_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m2_u16m1_tum(vbool8_t vm, vuint16m1_t vd, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u16m2_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16m4_u16m1_tum(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u16m4_u16m1_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m4_u16m1_tum(vbool4_t vm, vuint16m1_t vd, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u16m4_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16m8_u16m1_tum(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u16m8_u16m1_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m8_u16m1_tum(vbool2_t vm, vuint16m1_t vd, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u16m8_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u32mf2_u32m1_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tum(vbool64_t vm, vuint32m1_t vd, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u32mf2_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32m1_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u32m1_u32m1_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m1_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u32m1_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32m2_u32m1_tum(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u32m2_u32m1_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m2_u32m1_tum(vbool16_t vm, vuint32m1_t vd, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u32m2_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32m4_u32m1_tum(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u32m4_u32m1_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m4_u32m1_tum(vbool8_t vm, vuint32m1_t vd, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u32m4_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32m8_u32m1_tum(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u32m8_u32m1_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m8_u32m1_tum(vbool4_t vm, vuint32m1_t vd, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u32m8_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredxor_vs_u64m1_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u64m1_u64m1_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m1_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u64m1_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredxor_vs_u64m2_u64m1_tum(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u64m2_u64m1_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m2_u64m1_tum(vbool32_t vm, vuint64m1_t vd, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u64m2_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredxor_vs_u64m4_u64m1_tum(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u64m4_u64m1_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m4_u64m1_tum(vbool16_t vm, vuint64m1_t vd, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u64m4_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredxor_vs_u64m8_u64m1_tum(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u64m8_u64m1_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m8_u64m1_tum(vbool8_t vm, vuint64m1_t vd, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredxor_vs_u64m8_u64m1_tum(vm, vd, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vredxor\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vrem.c b/auto-generated/policy_funcs/gnu-api-tests/vrem.c index 628b002b6..6e62d7976 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vrem.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vrem.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vrem_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vrem_vv_i8mf8_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vrem_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vrem_vv_i8mf8_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vrem_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_vx_i8mf8_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vrem_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_vx_i8mf8_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vrem_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vrem_vv_i8mf4_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vrem_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vrem_vv_i8mf4_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vrem_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_vx_i8mf4_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vrem_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_vx_i8mf4_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vrem_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vrem_vv_i8mf2_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vrem_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vrem_vv_i8mf2_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vrem_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_vx_i8mf2_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vrem_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_vx_i8mf2_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vrem_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vrem_vv_i8m1_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vrem_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vrem_vv_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vrem_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_vx_i8m1_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vrem_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_vx_i8m1_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vrem_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vrem_vv_i8m2_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vrem_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vrem_vv_i8m2_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vrem_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_vx_i8m2_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vrem_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_vx_i8m2_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vrem_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vrem_vv_i8m4_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vrem_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vrem_vv_i8m4_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vrem_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_vx_i8m4_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vrem_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_vx_i8m4_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vrem_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vrem_vv_i8m8_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vrem_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vrem_vv_i8m8_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vrem_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_vx_i8m8_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vrem_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_vx_i8m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vrem_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vrem_vv_i16mf4_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vrem_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vrem_vv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vrem_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_vx_i16mf4_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vrem_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vrem_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vrem_vv_i16mf2_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vrem_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vrem_vv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vrem_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_vx_i16mf2_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vrem_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vrem_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vrem_vv_i16m1_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vrem_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vrem_vv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vrem_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_vx_i16m1_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vrem_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vrem_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vrem_vv_i16m2_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vrem_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vrem_vv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vrem_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_vx_i16m2_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vrem_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vrem_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vrem_vv_i16m4_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vrem_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vrem_vv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vrem_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_vx_i16m4_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vrem_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vrem_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vrem_vv_i16m8_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vrem_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vrem_vv_i16m8_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vrem_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_vx_i16m8_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vrem_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vrem_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vrem_vv_i32mf2_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vrem_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vrem_vv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vrem_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_vx_i32mf2_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vrem_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vrem_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vrem_vv_i32m1_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vrem_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vrem_vv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vrem_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_vx_i32m1_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vrem_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vrem_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vrem_vv_i32m2_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vrem_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vrem_vv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vrem_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_vx_i32m2_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vrem_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vrem_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vrem_vv_i32m4_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vrem_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vrem_vv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vrem_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_vx_i32m4_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vrem_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vrem_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vrem_vv_i32m8_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vrem_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vrem_vv_i32m8_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vrem_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_vx_i32m8_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vrem_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vrem_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vrem_vv_i64m1_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vrem_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vrem_vv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vrem_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vrem_vx_i64m1_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vrem_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vrem_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vrem_vv_i64m2_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vrem_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vrem_vv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vrem_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vrem_vx_i64m2_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vrem_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vrem_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vrem_vv_i64m4_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vrem_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vrem_vv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vrem_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vrem_vx_i64m4_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vrem_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vrem_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vrem_vv_i64m8_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vrem_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vrem_vv_i64m8_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vrem_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vrem_vx_i64m8_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vrem_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem_vx_i64m8_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vrem_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vrem_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vrem_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vrem_vv_i8mf8_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vrem_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vrem_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_vx_i8mf8_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vrem_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vrem_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vrem_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vrem_vv_i8mf4_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vrem_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vrem_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_vx_i8mf4_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vrem_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vrem_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vrem_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vrem_vv_i8mf2_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vrem_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vrem_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_vx_i8mf2_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vrem_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vrem_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vrem_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vrem_vv_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vrem_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vrem_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_vx_i8m1_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vrem_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vrem_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vrem_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vrem_vv_i8m2_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vrem_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vrem_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_vx_i8m2_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vrem_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vrem_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vrem_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vrem_vv_i8m4_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vrem_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vrem_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_vx_i8m4_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vrem_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vrem_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vrem_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vrem_vv_i8m8_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vrem_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vrem_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_vx_i8m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vrem_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vrem_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vrem_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vrem_vv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vrem_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vrem_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vrem_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vrem_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vrem_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vrem_vv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vrem_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vrem_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vrem_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vrem_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vrem_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vrem_vv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vrem_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vrem_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vrem_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vrem_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vrem_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vrem_vv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vrem_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vrem_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vrem_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vrem_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vrem_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vrem_vv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vrem_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vrem_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vrem_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vrem_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vrem_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vrem_vv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vrem_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vrem_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vrem_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vrem_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vrem_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vrem_vv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vrem_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vrem_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vrem_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vrem_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vrem_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vrem_vv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vrem_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vrem_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vrem_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vrem_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vrem_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vrem_vv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vrem_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vrem_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vrem_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vrem_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vrem_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vrem_vv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vrem_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vrem_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vrem_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vrem_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vrem_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vrem_vv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vrem_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vrem_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vrem_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vrem_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vrem_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vrem_vv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vrem_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vrem_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vrem_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vrem_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vrem_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vrem_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vrem_vv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vrem_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vrem_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vrem_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vrem_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vrem_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vrem_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vrem_vv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vrem_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vrem_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vrem_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vrem_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vrem_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vrem_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vrem_vv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vrem_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vrem_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vrem_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vrem_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vrem_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vrem_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vrem_vv_i8mf8_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vrem_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vrem_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_vx_i8mf8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vrem_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vrem_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vrem_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vrem_vv_i8mf4_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vrem_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vrem_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_vx_i8mf4_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vrem_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vrem_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vrem_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vrem_vv_i8mf2_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vrem_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vrem_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_vx_i8mf2_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vrem_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vrem_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vrem_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vrem_vv_i8m1_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vrem_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vrem_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_vx_i8m1_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vrem_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vrem_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vrem_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vrem_vv_i8m2_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vrem_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vrem_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_vx_i8m2_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vrem_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vrem_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vrem_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vrem_vv_i8m4_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vrem_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vrem_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_vx_i8m4_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vrem_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vrem_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vrem_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vrem_vv_i8m8_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vrem_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vrem_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_vx_i8m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vrem_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vrem_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vrem_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vrem_vv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vrem_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vrem_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vrem_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vrem_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vrem_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vrem_vv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vrem_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vrem_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vrem_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vrem_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vrem_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vrem_vv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vrem_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vrem_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vrem_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vrem_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vrem_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vrem_vv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vrem_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vrem_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vrem_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vrem_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vrem_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vrem_vv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vrem_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vrem_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vrem_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vrem_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vrem_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vrem_vv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vrem_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vrem_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vrem_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vrem_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vrem_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vrem_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vrem_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vrem_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vrem_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vrem_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vrem_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vrem_vv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vrem_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vrem_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vrem_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vrem_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vrem_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vrem_vv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vrem_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vrem_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vrem_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vrem_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vrem_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vrem_vv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vrem_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vrem_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vrem_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vrem_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vrem_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vrem_vv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vrem_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vrem_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vrem_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vrem_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vrem_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vrem_vv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vrem_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vrem_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vrem_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vrem_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vrem_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vrem_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vrem_vv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vrem_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vrem_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vrem_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vrem_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vrem_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vrem_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vrem_vv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vrem_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vrem_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vrem_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vrem_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vrem_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vrem_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vrem_vv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vrem_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vrem_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vrem_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vrem_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vrem_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vrem_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vrem_vv_i8mf8_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vrem_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vrem_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_vx_i8mf8_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vrem_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vrem_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vrem_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vrem_vv_i8mf4_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vrem_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vrem_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_vx_i8mf4_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vrem_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vrem_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vrem_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vrem_vv_i8mf2_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vrem_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vrem_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_vx_i8mf2_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vrem_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vrem_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vrem_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vrem_vv_i8m1_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vrem_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vrem_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_vx_i8m1_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vrem_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vrem_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vrem_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vrem_vv_i8m2_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vrem_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vrem_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_vx_i8m2_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vrem_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vrem_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vrem_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vrem_vv_i8m4_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vrem_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vrem_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_vx_i8m4_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vrem_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vrem_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vrem_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vrem_vv_i8m8_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vrem_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vrem_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_vx_i8m8_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vrem_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vrem_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vrem_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vrem_vv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vrem_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vrem_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vrem_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vrem_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vrem_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vrem_vv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vrem_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vrem_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vrem_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vrem_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vrem_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vrem_vv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vrem_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vrem_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vrem_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vrem_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vrem_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vrem_vv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vrem_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vrem_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vrem_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vrem_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vrem_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vrem_vv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vrem_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vrem_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vrem_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vrem_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vrem_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vrem_vv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vrem_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vrem_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vrem_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vrem_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vrem_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vrem_vv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vrem_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vrem_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vrem_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vrem_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vrem_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vrem_vv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vrem_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vrem_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vrem_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vrem_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vrem_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vrem_vv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vrem_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vrem_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vrem_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vrem_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vrem_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vrem_vv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vrem_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vrem_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vrem_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vrem_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vrem_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vrem_vv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vrem_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vrem_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vrem_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vrem_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vrem_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vrem_vv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vrem_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vrem_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vrem_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vrem_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vrem_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vrem_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vrem_vv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vrem_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vrem_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vrem_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vrem_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vrem_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vrem_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vrem_vv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vrem_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vrem_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vrem_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vrem_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vrem_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vrem_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vrem_vv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vrem_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vrem_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vrem_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vrem\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vremu.c b/auto-generated/policy_funcs/gnu-api-tests/vremu.c index fca50a43e..25f5b2c42 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vremu.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vremu.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vremu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vremu_vv_u8mf8_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vremu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vremu_vv_u8mf8_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vremu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_vx_u8mf8_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vremu_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_vx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vremu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vremu_vv_u8mf4_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vremu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vremu_vv_u8mf4_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vremu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_vx_u8mf4_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vremu_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_vx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vremu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vremu_vv_u8mf2_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vremu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vremu_vv_u8mf2_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vremu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_vx_u8mf2_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vremu_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_vx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vremu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vremu_vv_u8m1_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vremu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vremu_vv_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vremu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_vx_u8m1_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vremu_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_vx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vremu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vremu_vv_u8m2_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vremu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vremu_vv_u8m2_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vremu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_vx_u8m2_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vremu_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_vx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vremu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vremu_vv_u8m4_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vremu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vremu_vv_u8m4_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vremu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_vx_u8m4_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vremu_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_vx_u8m4_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vremu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vremu_vv_u8m8_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vremu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vremu_vv_u8m8_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vremu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_vx_u8m8_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vremu_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_vx_u8m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vremu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vremu_vv_u16mf4_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vremu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vremu_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vremu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_vx_u16mf4_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vremu_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vremu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vremu_vv_u16mf2_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vremu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vremu_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vremu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_vx_u16mf2_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vremu_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vremu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vremu_vv_u16m1_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vremu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vremu_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vremu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_vx_u16m1_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vremu_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vremu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vremu_vv_u16m2_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vremu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vremu_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vremu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_vx_u16m2_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vremu_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vremu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vremu_vv_u16m4_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vremu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vremu_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vremu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_vx_u16m4_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vremu_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vremu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vremu_vv_u16m8_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vremu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vremu_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vremu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_vx_u16m8_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vremu_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vremu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vremu_vv_u32mf2_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vremu_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vremu_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vremu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_vx_u32mf2_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vremu_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vremu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vremu_vv_u32m1_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vremu_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vremu_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vremu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_vx_u32m1_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vremu_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vremu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vremu_vv_u32m2_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vremu_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vremu_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vremu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_vx_u32m2_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vremu_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vremu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vremu_vv_u32m4_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vremu_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vremu_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vremu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_vx_u32m4_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vremu_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vremu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vremu_vv_u32m8_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vremu_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vremu_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vremu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_vx_u32m8_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vremu_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vremu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vremu_vv_u64m1_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vremu_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vremu_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vremu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu_vx_u64m1_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vremu_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vremu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vremu_vv_u64m2_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vremu_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vremu_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vremu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu_vx_u64m2_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vremu_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vremu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vremu_vv_u64m4_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vremu_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vremu_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vremu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu_vx_u64m4_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vremu_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vremu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vremu_vv_u64m8_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vremu_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vremu_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vremu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu_vx_u64m8_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vremu_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu_vx_u64m8_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vremu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vremu_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vremu_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vremu_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vremu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vremu_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vremu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vremu_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vremu_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vremu_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vremu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vremu_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vremu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vremu_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vremu_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vremu_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vremu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vremu_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vremu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vremu_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vremu_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vremu_vv_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vremu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vremu_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_vx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vremu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vremu_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vremu_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vremu_vv_u8m2_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vremu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vremu_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_vx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vremu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vremu_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vremu_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vremu_vv_u8m4_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vremu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vremu_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_vx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vremu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vremu_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vremu_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vremu_vv_u8m8_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vremu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vremu_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_vx_u8m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vremu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vremu_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vremu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vremu_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vremu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vremu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vremu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vremu_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vremu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vremu_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vremu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vremu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vremu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vremu_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vremu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vremu_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vremu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vremu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vremu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vremu_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vremu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vremu_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vremu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vremu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vremu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vremu_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vremu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vremu_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vremu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vremu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vremu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vremu_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vremu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vremu_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vremu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vremu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vremu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vremu_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vremu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vremu_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vremu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vremu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vremu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vremu_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vremu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vremu_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vremu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vremu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vremu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vremu_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vremu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vremu_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vremu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vremu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vremu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vremu_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vremu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vremu_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vremu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vremu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vremu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vremu_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vremu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vremu_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vremu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vremu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vremu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vremu_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vremu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vremu_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vremu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vremu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vremu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vremu_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vremu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vremu_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vremu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vremu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vremu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vremu_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vremu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vremu_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vremu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vremu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vremu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vremu_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vremu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vremu_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vremu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vremu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vremu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vremu_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vremu_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vremu_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vremu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vremu_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vremu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vremu_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vremu_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vremu_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vremu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vremu_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vremu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vremu_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vremu_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vremu_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vremu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vremu_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vremu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vremu_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vremu_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vremu_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vremu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vremu_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vremu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vremu_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vremu_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vremu_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vremu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vremu_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vremu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vremu_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vremu_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vremu_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vremu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vremu_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vremu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vremu_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vremu_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vremu_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vremu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vremu_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vremu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vremu_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vremu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vremu_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vremu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vremu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vremu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vremu_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vremu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vremu_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vremu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vremu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vremu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vremu_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vremu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vremu_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vremu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vremu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vremu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vremu_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vremu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vremu_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vremu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vremu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vremu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vremu_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vremu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vremu_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vremu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vremu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vremu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vremu_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vremu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vremu_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vremu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vremu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vremu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vremu_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vremu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vremu_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vremu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vremu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vremu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vremu_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vremu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vremu_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vremu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vremu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vremu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vremu_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vremu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vremu_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vremu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vremu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vremu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vremu_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vremu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vremu_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vremu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vremu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vremu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vremu_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vremu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vremu_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vremu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vremu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vremu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vremu_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vremu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vremu_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vremu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vremu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vremu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vremu_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vremu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vremu_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vremu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vremu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vremu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vremu_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vremu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vremu_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vremu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vremu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vremu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vremu_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vremu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vremu_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vremu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vremu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vremu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vremu_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vremu_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vremu_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vremu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vremu_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vremu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vremu_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vremu_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vremu_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vremu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vremu_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vremu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vremu_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vremu_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vremu_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vremu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vremu_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vremu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vremu_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vremu_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vremu_vv_u8m1_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vremu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vremu_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_vx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vremu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vremu_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vremu_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vremu_vv_u8m2_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vremu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vremu_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_vx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vremu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vremu_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vremu_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vremu_vv_u8m4_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vremu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vremu_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_vx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vremu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vremu_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vremu_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vremu_vv_u8m8_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vremu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vremu_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_vx_u8m8_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vremu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vremu_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vremu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vremu_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vremu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vremu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vremu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vremu_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vremu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vremu_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vremu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vremu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vremu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vremu_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vremu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vremu_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vremu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vremu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vremu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vremu_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vremu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vremu_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vremu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vremu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vremu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vremu_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vremu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vremu_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vremu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vremu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vremu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vremu_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vremu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vremu_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vremu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vremu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vremu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vremu_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vremu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vremu_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vremu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vremu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vremu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vremu_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vremu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vremu_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vremu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vremu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vremu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vremu_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vremu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vremu_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vremu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vremu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vremu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vremu_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vremu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vremu_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vremu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vremu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vremu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vremu_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vremu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vremu_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vremu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vremu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vremu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vremu_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vremu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vremu_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vremu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vremu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vremu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vremu_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vremu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vremu_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vremu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vremu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vremu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vremu_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vremu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vremu_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vremu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vremu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vremu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vremu_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vremu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vremu_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vremu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vremu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vremu\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vrgather.c b/auto-generated/policy_funcs/gnu-api-tests/vrgather.c index b9ab7388c..217e95710 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vrgather.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vrgather.c @@ -6,1892 +6,1892 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vrgather_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t index, size_t vl) { - return __riscv_vrgather_vv_f16mf4_tu(maskedoff, op1, index, vl); +vfloat16mf4_t test_vrgather_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgather_vv_f16mf4_tu(vd, vs2, vs1, vl); } -vfloat16mf4_t test_vrgather_vx_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f16mf4_tu(maskedoff, op1, index, vl); +vfloat16mf4_t test_vrgather_vx_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f16mf4_tu(vd, vs2, vs1, vl); } -vfloat16mf2_t test_vrgather_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t index, size_t vl) { - return __riscv_vrgather_vv_f16mf2_tu(maskedoff, op1, index, vl); +vfloat16mf2_t test_vrgather_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_f16mf2_tu(vd, vs2, vs1, vl); } -vfloat16mf2_t test_vrgather_vx_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f16mf2_tu(maskedoff, op1, index, vl); +vfloat16mf2_t test_vrgather_vx_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f16mf2_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vrgather_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t index, size_t vl) { - return __riscv_vrgather_vv_f16m1_tu(maskedoff, op1, index, vl); +vfloat16m1_t test_vrgather_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vrgather_vx_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f16m1_tu(maskedoff, op1, index, vl); +vfloat16m1_t test_vrgather_vx_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m2_t test_vrgather_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t index, size_t vl) { - return __riscv_vrgather_vv_f16m2_tu(maskedoff, op1, index, vl); +vfloat16m2_t test_vrgather_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_f16m2_tu(vd, vs2, vs1, vl); } -vfloat16m2_t test_vrgather_vx_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f16m2_tu(maskedoff, op1, index, vl); +vfloat16m2_t test_vrgather_vx_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f16m2_tu(vd, vs2, vs1, vl); } -vfloat16m4_t test_vrgather_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t index, size_t vl) { - return __riscv_vrgather_vv_f16m4_tu(maskedoff, op1, index, vl); +vfloat16m4_t test_vrgather_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_f16m4_tu(vd, vs2, vs1, vl); } -vfloat16m4_t test_vrgather_vx_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f16m4_tu(maskedoff, op1, index, vl); +vfloat16m4_t test_vrgather_vx_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f16m4_tu(vd, vs2, vs1, vl); } -vfloat16m8_t test_vrgather_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t index, size_t vl) { - return __riscv_vrgather_vv_f16m8_tu(maskedoff, op1, index, vl); +vfloat16m8_t test_vrgather_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_f16m8_tu(vd, vs2, vs1, vl); } -vfloat16m8_t test_vrgather_vx_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f16m8_tu(maskedoff, op1, index, vl); +vfloat16m8_t test_vrgather_vx_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f16m8_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vrgather_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) { - return __riscv_vrgather_vv_f32mf2_tu(maskedoff, op1, index, vl); +vfloat32mf2_t test_vrgather_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_f32mf2_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vrgather_vx_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f32mf2_tu(maskedoff, op1, index, vl); +vfloat32mf2_t test_vrgather_vx_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f32mf2_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vrgather_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vuint32m1_t index, size_t vl) { - return __riscv_vrgather_vv_f32m1_tu(maskedoff, op1, index, vl); +vfloat32m1_t test_vrgather_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vrgather_vx_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f32m1_tu(maskedoff, op1, index, vl); +vfloat32m1_t test_vrgather_vx_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vrgather_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vuint32m2_t index, size_t vl) { - return __riscv_vrgather_vv_f32m2_tu(maskedoff, op1, index, vl); +vfloat32m2_t test_vrgather_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_f32m2_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vrgather_vx_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f32m2_tu(maskedoff, op1, index, vl); +vfloat32m2_t test_vrgather_vx_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f32m2_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vrgather_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vuint32m4_t index, size_t vl) { - return __riscv_vrgather_vv_f32m4_tu(maskedoff, op1, index, vl); +vfloat32m4_t test_vrgather_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_f32m4_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vrgather_vx_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f32m4_tu(maskedoff, op1, index, vl); +vfloat32m4_t test_vrgather_vx_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f32m4_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vrgather_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vuint32m8_t index, size_t vl) { - return __riscv_vrgather_vv_f32m8_tu(maskedoff, op1, index, vl); +vfloat32m8_t test_vrgather_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_f32m8_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vrgather_vx_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f32m8_tu(maskedoff, op1, index, vl); +vfloat32m8_t test_vrgather_vx_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f32m8_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vrgather_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vuint64m1_t index, size_t vl) { - return __riscv_vrgather_vv_f64m1_tu(maskedoff, op1, index, vl); +vfloat64m1_t test_vrgather_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vrgather_vx_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f64m1_tu(maskedoff, op1, index, vl); +vfloat64m1_t test_vrgather_vx_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vrgather_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vuint64m2_t index, size_t vl) { - return __riscv_vrgather_vv_f64m2_tu(maskedoff, op1, index, vl); +vfloat64m2_t test_vrgather_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_f64m2_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vrgather_vx_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f64m2_tu(maskedoff, op1, index, vl); +vfloat64m2_t test_vrgather_vx_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f64m2_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vrgather_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vuint64m4_t index, size_t vl) { - return __riscv_vrgather_vv_f64m4_tu(maskedoff, op1, index, vl); +vfloat64m4_t test_vrgather_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_f64m4_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vrgather_vx_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f64m4_tu(maskedoff, op1, index, vl); +vfloat64m4_t test_vrgather_vx_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f64m4_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vrgather_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vuint64m8_t index, size_t vl) { - return __riscv_vrgather_vv_f64m8_tu(maskedoff, op1, index, vl); +vfloat64m8_t test_vrgather_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_f64m8_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vrgather_vx_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f64m8_tu(maskedoff, op1, index, vl); +vfloat64m8_t test_vrgather_vx_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f64m8_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vrgather_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t index, size_t vl) { - return __riscv_vrgather_vv_i8mf8_tu(maskedoff, op1, index, vl); +vint8mf8_t test_vrgather_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrgather_vv_i8mf8_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vrgather_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i8mf8_tu(maskedoff, op1, index, vl); +vint8mf8_t test_vrgather_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i8mf8_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vrgather_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t index, size_t vl) { - return __riscv_vrgather_vv_i8mf4_tu(maskedoff, op1, index, vl); +vint8mf4_t test_vrgather_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrgather_vv_i8mf4_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vrgather_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i8mf4_tu(maskedoff, op1, index, vl); +vint8mf4_t test_vrgather_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i8mf4_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vrgather_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t index, size_t vl) { - return __riscv_vrgather_vv_i8mf2_tu(maskedoff, op1, index, vl); +vint8mf2_t test_vrgather_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_i8mf2_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vrgather_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i8mf2_tu(maskedoff, op1, index, vl); +vint8mf2_t test_vrgather_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i8mf2_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vrgather_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t index, size_t vl) { - return __riscv_vrgather_vv_i8m1_tu(maskedoff, op1, index, vl); +vint8m1_t test_vrgather_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vrgather_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i8m1_tu(maskedoff, op1, index, vl); +vint8m1_t test_vrgather_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i8m1_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vrgather_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t index, size_t vl) { - return __riscv_vrgather_vv_i8m2_tu(maskedoff, op1, index, vl); +vint8m2_t test_vrgather_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_i8m2_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vrgather_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i8m2_tu(maskedoff, op1, index, vl); +vint8m2_t test_vrgather_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i8m2_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vrgather_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t index, size_t vl) { - return __riscv_vrgather_vv_i8m4_tu(maskedoff, op1, index, vl); +vint8m4_t test_vrgather_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_i8m4_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vrgather_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i8m4_tu(maskedoff, op1, index, vl); +vint8m4_t test_vrgather_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i8m4_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vrgather_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t index, size_t vl) { - return __riscv_vrgather_vv_i8m8_tu(maskedoff, op1, index, vl); +vint8m8_t test_vrgather_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_i8m8_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vrgather_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i8m8_tu(maskedoff, op1, index, vl); +vint8m8_t test_vrgather_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i8m8_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vrgather_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t index, size_t vl) { - return __riscv_vrgather_vv_i16mf4_tu(maskedoff, op1, index, vl); +vint16mf4_t test_vrgather_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgather_vv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vrgather_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i16mf4_tu(maskedoff, op1, index, vl); +vint16mf4_t test_vrgather_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vrgather_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t index, size_t vl) { - return __riscv_vrgather_vv_i16mf2_tu(maskedoff, op1, index, vl); +vint16mf2_t test_vrgather_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vrgather_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i16mf2_tu(maskedoff, op1, index, vl); +vint16mf2_t test_vrgather_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i16mf2_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vrgather_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t index, size_t vl) { - return __riscv_vrgather_vv_i16m1_tu(maskedoff, op1, index, vl); +vint16m1_t test_vrgather_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vrgather_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i16m1_tu(maskedoff, op1, index, vl); +vint16m1_t test_vrgather_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i16m1_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vrgather_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t index, size_t vl) { - return __riscv_vrgather_vv_i16m2_tu(maskedoff, op1, index, vl); +vint16m2_t test_vrgather_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vrgather_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i16m2_tu(maskedoff, op1, index, vl); +vint16m2_t test_vrgather_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i16m2_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vrgather_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t index, size_t vl) { - return __riscv_vrgather_vv_i16m4_tu(maskedoff, op1, index, vl); +vint16m4_t test_vrgather_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vrgather_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i16m4_tu(maskedoff, op1, index, vl); +vint16m4_t test_vrgather_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i16m4_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vrgather_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t index, size_t vl) { - return __riscv_vrgather_vv_i16m8_tu(maskedoff, op1, index, vl); +vint16m8_t test_vrgather_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_i16m8_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vrgather_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i16m8_tu(maskedoff, op1, index, vl); +vint16m8_t test_vrgather_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i16m8_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vrgather_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t index, size_t vl) { - return __riscv_vrgather_vv_i32mf2_tu(maskedoff, op1, index, vl); +vint32mf2_t test_vrgather_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vrgather_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i32mf2_tu(maskedoff, op1, index, vl); +vint32mf2_t test_vrgather_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i32mf2_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vrgather_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t index, size_t vl) { - return __riscv_vrgather_vv_i32m1_tu(maskedoff, op1, index, vl); +vint32m1_t test_vrgather_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vrgather_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i32m1_tu(maskedoff, op1, index, vl); +vint32m1_t test_vrgather_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i32m1_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vrgather_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t index, size_t vl) { - return __riscv_vrgather_vv_i32m2_tu(maskedoff, op1, index, vl); +vint32m2_t test_vrgather_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vrgather_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i32m2_tu(maskedoff, op1, index, vl); +vint32m2_t test_vrgather_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i32m2_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vrgather_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t index, size_t vl) { - return __riscv_vrgather_vv_i32m4_tu(maskedoff, op1, index, vl); +vint32m4_t test_vrgather_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vrgather_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i32m4_tu(maskedoff, op1, index, vl); +vint32m4_t test_vrgather_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i32m4_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vrgather_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t index, size_t vl) { - return __riscv_vrgather_vv_i32m8_tu(maskedoff, op1, index, vl); +vint32m8_t test_vrgather_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_i32m8_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vrgather_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i32m8_tu(maskedoff, op1, index, vl); +vint32m8_t test_vrgather_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i32m8_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vrgather_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t index, size_t vl) { - return __riscv_vrgather_vv_i64m1_tu(maskedoff, op1, index, vl); +vint64m1_t test_vrgather_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vrgather_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i64m1_tu(maskedoff, op1, index, vl); +vint64m1_t test_vrgather_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i64m1_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vrgather_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t index, size_t vl) { - return __riscv_vrgather_vv_i64m2_tu(maskedoff, op1, index, vl); +vint64m2_t test_vrgather_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vrgather_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i64m2_tu(maskedoff, op1, index, vl); +vint64m2_t test_vrgather_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i64m2_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vrgather_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t index, size_t vl) { - return __riscv_vrgather_vv_i64m4_tu(maskedoff, op1, index, vl); +vint64m4_t test_vrgather_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vrgather_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i64m4_tu(maskedoff, op1, index, vl); +vint64m4_t test_vrgather_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i64m4_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vrgather_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t index, size_t vl) { - return __riscv_vrgather_vv_i64m8_tu(maskedoff, op1, index, vl); +vint64m8_t test_vrgather_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_i64m8_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vrgather_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i64m8_tu(maskedoff, op1, index, vl); +vint64m8_t test_vrgather_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i64m8_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vrgather_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t index, size_t vl) { - return __riscv_vrgather_vv_u8mf8_tu(maskedoff, op1, index, vl); +vuint8mf8_t test_vrgather_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrgather_vv_u8mf8_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vrgather_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u8mf8_tu(maskedoff, op1, index, vl); +vuint8mf8_t test_vrgather_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u8mf8_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vrgather_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t index, size_t vl) { - return __riscv_vrgather_vv_u8mf4_tu(maskedoff, op1, index, vl); +vuint8mf4_t test_vrgather_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrgather_vv_u8mf4_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vrgather_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u8mf4_tu(maskedoff, op1, index, vl); +vuint8mf4_t test_vrgather_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u8mf4_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vrgather_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t index, size_t vl) { - return __riscv_vrgather_vv_u8mf2_tu(maskedoff, op1, index, vl); +vuint8mf2_t test_vrgather_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_u8mf2_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vrgather_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u8mf2_tu(maskedoff, op1, index, vl); +vuint8mf2_t test_vrgather_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u8mf2_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vrgather_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t index, size_t vl) { - return __riscv_vrgather_vv_u8m1_tu(maskedoff, op1, index, vl); +vuint8m1_t test_vrgather_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vrgather_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u8m1_tu(maskedoff, op1, index, vl); +vuint8m1_t test_vrgather_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vrgather_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t index, size_t vl) { - return __riscv_vrgather_vv_u8m2_tu(maskedoff, op1, index, vl); +vuint8m2_t test_vrgather_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_u8m2_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vrgather_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u8m2_tu(maskedoff, op1, index, vl); +vuint8m2_t test_vrgather_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u8m2_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vrgather_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t index, size_t vl) { - return __riscv_vrgather_vv_u8m4_tu(maskedoff, op1, index, vl); +vuint8m4_t test_vrgather_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_u8m4_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vrgather_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u8m4_tu(maskedoff, op1, index, vl); +vuint8m4_t test_vrgather_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u8m4_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vrgather_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t index, size_t vl) { - return __riscv_vrgather_vv_u8m8_tu(maskedoff, op1, index, vl); +vuint8m8_t test_vrgather_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_u8m8_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vrgather_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u8m8_tu(maskedoff, op1, index, vl); +vuint8m8_t test_vrgather_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u8m8_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vrgather_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t index, size_t vl) { - return __riscv_vrgather_vv_u16mf4_tu(maskedoff, op1, index, vl); +vuint16mf4_t test_vrgather_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgather_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vrgather_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u16mf4_tu(maskedoff, op1, index, vl); +vuint16mf4_t test_vrgather_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vrgather_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t index, size_t vl) { - return __riscv_vrgather_vv_u16mf2_tu(maskedoff, op1, index, vl); +vuint16mf2_t test_vrgather_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vrgather_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u16mf2_tu(maskedoff, op1, index, vl); +vuint16mf2_t test_vrgather_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vrgather_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t index, size_t vl) { - return __riscv_vrgather_vv_u16m1_tu(maskedoff, op1, index, vl); +vuint16m1_t test_vrgather_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vrgather_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u16m1_tu(maskedoff, op1, index, vl); +vuint16m1_t test_vrgather_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vrgather_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t index, size_t vl) { - return __riscv_vrgather_vv_u16m2_tu(maskedoff, op1, index, vl); +vuint16m2_t test_vrgather_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vrgather_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u16m2_tu(maskedoff, op1, index, vl); +vuint16m2_t test_vrgather_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vrgather_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t index, size_t vl) { - return __riscv_vrgather_vv_u16m4_tu(maskedoff, op1, index, vl); +vuint16m4_t test_vrgather_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vrgather_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u16m4_tu(maskedoff, op1, index, vl); +vuint16m4_t test_vrgather_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vrgather_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t index, size_t vl) { - return __riscv_vrgather_vv_u16m8_tu(maskedoff, op1, index, vl); +vuint16m8_t test_vrgather_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vrgather_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u16m8_tu(maskedoff, op1, index, vl); +vuint16m8_t test_vrgather_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u16m8_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vrgather_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t index, size_t vl) { - return __riscv_vrgather_vv_u32mf2_tu(maskedoff, op1, index, vl); +vuint32mf2_t test_vrgather_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vrgather_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u32mf2_tu(maskedoff, op1, index, vl); +vuint32mf2_t test_vrgather_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vrgather_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t index, size_t vl) { - return __riscv_vrgather_vv_u32m1_tu(maskedoff, op1, index, vl); +vuint32m1_t test_vrgather_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vrgather_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u32m1_tu(maskedoff, op1, index, vl); +vuint32m1_t test_vrgather_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vrgather_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t index, size_t vl) { - return __riscv_vrgather_vv_u32m2_tu(maskedoff, op1, index, vl); +vuint32m2_t test_vrgather_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vrgather_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u32m2_tu(maskedoff, op1, index, vl); +vuint32m2_t test_vrgather_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vrgather_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t index, size_t vl) { - return __riscv_vrgather_vv_u32m4_tu(maskedoff, op1, index, vl); +vuint32m4_t test_vrgather_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vrgather_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u32m4_tu(maskedoff, op1, index, vl); +vuint32m4_t test_vrgather_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vrgather_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t index, size_t vl) { - return __riscv_vrgather_vv_u32m8_tu(maskedoff, op1, index, vl); +vuint32m8_t test_vrgather_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vrgather_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u32m8_tu(maskedoff, op1, index, vl); +vuint32m8_t test_vrgather_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u32m8_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vrgather_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t index, size_t vl) { - return __riscv_vrgather_vv_u64m1_tu(maskedoff, op1, index, vl); +vuint64m1_t test_vrgather_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vrgather_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u64m1_tu(maskedoff, op1, index, vl); +vuint64m1_t test_vrgather_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vrgather_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t index, size_t vl) { - return __riscv_vrgather_vv_u64m2_tu(maskedoff, op1, index, vl); +vuint64m2_t test_vrgather_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vrgather_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u64m2_tu(maskedoff, op1, index, vl); +vuint64m2_t test_vrgather_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vrgather_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t index, size_t vl) { - return __riscv_vrgather_vv_u64m4_tu(maskedoff, op1, index, vl); +vuint64m4_t test_vrgather_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vrgather_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u64m4_tu(maskedoff, op1, index, vl); +vuint64m4_t test_vrgather_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vrgather_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t index, size_t vl) { - return __riscv_vrgather_vv_u64m8_tu(maskedoff, op1, index, vl); +vuint64m8_t test_vrgather_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vrgather_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u64m8_tu(maskedoff, op1, index, vl); +vuint64m8_t test_vrgather_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u64m8_tu(vd, vs2, vs1, vl); } -vfloat16mf4_t test_vrgather_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t index, size_t vl) { - return __riscv_vrgather_vv_f16mf4_tum(mask, maskedoff, op1, index, vl); +vfloat16mf4_t test_vrgather_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgather_vv_f16mf4_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vrgather_vx_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f16mf4_tum(mask, maskedoff, op1, index, vl); +vfloat16mf4_t test_vrgather_vx_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f16mf4_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vrgather_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t index, size_t vl) { - return __riscv_vrgather_vv_f16mf2_tum(mask, maskedoff, op1, index, vl); +vfloat16mf2_t test_vrgather_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_f16mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vrgather_vx_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f16mf2_tum(mask, maskedoff, op1, index, vl); +vfloat16mf2_t test_vrgather_vx_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f16mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vrgather_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t index, size_t vl) { - return __riscv_vrgather_vv_f16m1_tum(mask, maskedoff, op1, index, vl); +vfloat16m1_t test_vrgather_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vrgather_vx_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f16m1_tum(mask, maskedoff, op1, index, vl); +vfloat16m1_t test_vrgather_vx_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vrgather_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t index, size_t vl) { - return __riscv_vrgather_vv_f16m2_tum(mask, maskedoff, op1, index, vl); +vfloat16m2_t test_vrgather_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_f16m2_tum(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vrgather_vx_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f16m2_tum(mask, maskedoff, op1, index, vl); +vfloat16m2_t test_vrgather_vx_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f16m2_tum(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vrgather_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t index, size_t vl) { - return __riscv_vrgather_vv_f16m4_tum(mask, maskedoff, op1, index, vl); +vfloat16m4_t test_vrgather_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_f16m4_tum(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vrgather_vx_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f16m4_tum(mask, maskedoff, op1, index, vl); +vfloat16m4_t test_vrgather_vx_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f16m4_tum(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vrgather_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t index, size_t vl) { - return __riscv_vrgather_vv_f16m8_tum(mask, maskedoff, op1, index, vl); +vfloat16m8_t test_vrgather_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_f16m8_tum(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vrgather_vx_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f16m8_tum(mask, maskedoff, op1, index, vl); +vfloat16m8_t test_vrgather_vx_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f16m8_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vrgather_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) { - return __riscv_vrgather_vv_f32mf2_tum(mask, maskedoff, op1, index, vl); +vfloat32mf2_t test_vrgather_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_f32mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vrgather_vx_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f32mf2_tum(mask, maskedoff, op1, index, vl); +vfloat32mf2_t test_vrgather_vx_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f32mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vrgather_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vuint32m1_t index, size_t vl) { - return __riscv_vrgather_vv_f32m1_tum(mask, maskedoff, op1, index, vl); +vfloat32m1_t test_vrgather_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vrgather_vx_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f32m1_tum(mask, maskedoff, op1, index, vl); +vfloat32m1_t test_vrgather_vx_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vrgather_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vuint32m2_t index, size_t vl) { - return __riscv_vrgather_vv_f32m2_tum(mask, maskedoff, op1, index, vl); +vfloat32m2_t test_vrgather_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_f32m2_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vrgather_vx_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f32m2_tum(mask, maskedoff, op1, index, vl); +vfloat32m2_t test_vrgather_vx_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f32m2_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vrgather_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vuint32m4_t index, size_t vl) { - return __riscv_vrgather_vv_f32m4_tum(mask, maskedoff, op1, index, vl); +vfloat32m4_t test_vrgather_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_f32m4_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vrgather_vx_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f32m4_tum(mask, maskedoff, op1, index, vl); +vfloat32m4_t test_vrgather_vx_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f32m4_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vrgather_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vuint32m8_t index, size_t vl) { - return __riscv_vrgather_vv_f32m8_tum(mask, maskedoff, op1, index, vl); +vfloat32m8_t test_vrgather_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_f32m8_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vrgather_vx_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f32m8_tum(mask, maskedoff, op1, index, vl); +vfloat32m8_t test_vrgather_vx_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f32m8_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vrgather_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vuint64m1_t index, size_t vl) { - return __riscv_vrgather_vv_f64m1_tum(mask, maskedoff, op1, index, vl); +vfloat64m1_t test_vrgather_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vrgather_vx_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f64m1_tum(mask, maskedoff, op1, index, vl); +vfloat64m1_t test_vrgather_vx_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vrgather_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vuint64m2_t index, size_t vl) { - return __riscv_vrgather_vv_f64m2_tum(mask, maskedoff, op1, index, vl); +vfloat64m2_t test_vrgather_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_f64m2_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vrgather_vx_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f64m2_tum(mask, maskedoff, op1, index, vl); +vfloat64m2_t test_vrgather_vx_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f64m2_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vrgather_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vuint64m4_t index, size_t vl) { - return __riscv_vrgather_vv_f64m4_tum(mask, maskedoff, op1, index, vl); +vfloat64m4_t test_vrgather_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_f64m4_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vrgather_vx_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f64m4_tum(mask, maskedoff, op1, index, vl); +vfloat64m4_t test_vrgather_vx_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f64m4_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vrgather_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vuint64m8_t index, size_t vl) { - return __riscv_vrgather_vv_f64m8_tum(mask, maskedoff, op1, index, vl); +vfloat64m8_t test_vrgather_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_f64m8_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vrgather_vx_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f64m8_tum(mask, maskedoff, op1, index, vl); +vfloat64m8_t test_vrgather_vx_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f64m8_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vrgather_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t index, size_t vl) { - return __riscv_vrgather_vv_i8mf8_tum(mask, maskedoff, op1, index, vl); +vint8mf8_t test_vrgather_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrgather_vv_i8mf8_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vrgather_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i8mf8_tum(mask, maskedoff, op1, index, vl); +vint8mf8_t test_vrgather_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i8mf8_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vrgather_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t index, size_t vl) { - return __riscv_vrgather_vv_i8mf4_tum(mask, maskedoff, op1, index, vl); +vint8mf4_t test_vrgather_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrgather_vv_i8mf4_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vrgather_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i8mf4_tum(mask, maskedoff, op1, index, vl); +vint8mf4_t test_vrgather_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i8mf4_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vrgather_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t index, size_t vl) { - return __riscv_vrgather_vv_i8mf2_tum(mask, maskedoff, op1, index, vl); +vint8mf2_t test_vrgather_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_i8mf2_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vrgather_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i8mf2_tum(mask, maskedoff, op1, index, vl); +vint8mf2_t test_vrgather_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i8mf2_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vrgather_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t index, size_t vl) { - return __riscv_vrgather_vv_i8m1_tum(mask, maskedoff, op1, index, vl); +vint8m1_t test_vrgather_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vrgather_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i8m1_tum(mask, maskedoff, op1, index, vl); +vint8m1_t test_vrgather_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vrgather_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t index, size_t vl) { - return __riscv_vrgather_vv_i8m2_tum(mask, maskedoff, op1, index, vl); +vint8m2_t test_vrgather_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_i8m2_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vrgather_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i8m2_tum(mask, maskedoff, op1, index, vl); +vint8m2_t test_vrgather_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i8m2_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vrgather_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t index, size_t vl) { - return __riscv_vrgather_vv_i8m4_tum(mask, maskedoff, op1, index, vl); +vint8m4_t test_vrgather_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_i8m4_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vrgather_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i8m4_tum(mask, maskedoff, op1, index, vl); +vint8m4_t test_vrgather_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i8m4_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vrgather_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t index, size_t vl) { - return __riscv_vrgather_vv_i8m8_tum(mask, maskedoff, op1, index, vl); +vint8m8_t test_vrgather_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_i8m8_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vrgather_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i8m8_tum(mask, maskedoff, op1, index, vl); +vint8m8_t test_vrgather_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i8m8_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vrgather_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t index, size_t vl) { - return __riscv_vrgather_vv_i16mf4_tum(mask, maskedoff, op1, index, vl); +vint16mf4_t test_vrgather_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgather_vv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vrgather_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i16mf4_tum(mask, maskedoff, op1, index, vl); +vint16mf4_t test_vrgather_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vrgather_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t index, size_t vl) { - return __riscv_vrgather_vv_i16mf2_tum(mask, maskedoff, op1, index, vl); +vint16mf2_t test_vrgather_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vrgather_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i16mf2_tum(mask, maskedoff, op1, index, vl); +vint16mf2_t test_vrgather_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vrgather_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t index, size_t vl) { - return __riscv_vrgather_vv_i16m1_tum(mask, maskedoff, op1, index, vl); +vint16m1_t test_vrgather_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vrgather_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i16m1_tum(mask, maskedoff, op1, index, vl); +vint16m1_t test_vrgather_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vrgather_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t index, size_t vl) { - return __riscv_vrgather_vv_i16m2_tum(mask, maskedoff, op1, index, vl); +vint16m2_t test_vrgather_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vrgather_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i16m2_tum(mask, maskedoff, op1, index, vl); +vint16m2_t test_vrgather_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vrgather_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t index, size_t vl) { - return __riscv_vrgather_vv_i16m4_tum(mask, maskedoff, op1, index, vl); +vint16m4_t test_vrgather_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vrgather_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i16m4_tum(mask, maskedoff, op1, index, vl); +vint16m4_t test_vrgather_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vrgather_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t index, size_t vl) { - return __riscv_vrgather_vv_i16m8_tum(mask, maskedoff, op1, index, vl); +vint16m8_t test_vrgather_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vrgather_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i16m8_tum(mask, maskedoff, op1, index, vl); +vint16m8_t test_vrgather_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vrgather_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t index, size_t vl) { - return __riscv_vrgather_vv_i32mf2_tum(mask, maskedoff, op1, index, vl); +vint32mf2_t test_vrgather_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vrgather_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i32mf2_tum(mask, maskedoff, op1, index, vl); +vint32mf2_t test_vrgather_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vrgather_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t index, size_t vl) { - return __riscv_vrgather_vv_i32m1_tum(mask, maskedoff, op1, index, vl); +vint32m1_t test_vrgather_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vrgather_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i32m1_tum(mask, maskedoff, op1, index, vl); +vint32m1_t test_vrgather_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vrgather_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t index, size_t vl) { - return __riscv_vrgather_vv_i32m2_tum(mask, maskedoff, op1, index, vl); +vint32m2_t test_vrgather_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vrgather_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i32m2_tum(mask, maskedoff, op1, index, vl); +vint32m2_t test_vrgather_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vrgather_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t index, size_t vl) { - return __riscv_vrgather_vv_i32m4_tum(mask, maskedoff, op1, index, vl); +vint32m4_t test_vrgather_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vrgather_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i32m4_tum(mask, maskedoff, op1, index, vl); +vint32m4_t test_vrgather_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vrgather_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t index, size_t vl) { - return __riscv_vrgather_vv_i32m8_tum(mask, maskedoff, op1, index, vl); +vint32m8_t test_vrgather_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vrgather_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i32m8_tum(mask, maskedoff, op1, index, vl); +vint32m8_t test_vrgather_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vrgather_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t index, size_t vl) { - return __riscv_vrgather_vv_i64m1_tum(mask, maskedoff, op1, index, vl); +vint64m1_t test_vrgather_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vrgather_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i64m1_tum(mask, maskedoff, op1, index, vl); +vint64m1_t test_vrgather_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vrgather_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t index, size_t vl) { - return __riscv_vrgather_vv_i64m2_tum(mask, maskedoff, op1, index, vl); +vint64m2_t test_vrgather_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vrgather_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i64m2_tum(mask, maskedoff, op1, index, vl); +vint64m2_t test_vrgather_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vrgather_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t index, size_t vl) { - return __riscv_vrgather_vv_i64m4_tum(mask, maskedoff, op1, index, vl); +vint64m4_t test_vrgather_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vrgather_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i64m4_tum(mask, maskedoff, op1, index, vl); +vint64m4_t test_vrgather_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vrgather_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t index, size_t vl) { - return __riscv_vrgather_vv_i64m8_tum(mask, maskedoff, op1, index, vl); +vint64m8_t test_vrgather_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vrgather_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i64m8_tum(mask, maskedoff, op1, index, vl); +vint64m8_t test_vrgather_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i64m8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vrgather_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t index, size_t vl) { - return __riscv_vrgather_vv_u8mf8_tum(mask, maskedoff, op1, index, vl); +vuint8mf8_t test_vrgather_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrgather_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vrgather_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u8mf8_tum(mask, maskedoff, op1, index, vl); +vuint8mf8_t test_vrgather_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u8mf8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vrgather_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t index, size_t vl) { - return __riscv_vrgather_vv_u8mf4_tum(mask, maskedoff, op1, index, vl); +vuint8mf4_t test_vrgather_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrgather_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vrgather_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u8mf4_tum(mask, maskedoff, op1, index, vl); +vuint8mf4_t test_vrgather_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u8mf4_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vrgather_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t index, size_t vl) { - return __riscv_vrgather_vv_u8mf2_tum(mask, maskedoff, op1, index, vl); +vuint8mf2_t test_vrgather_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vrgather_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u8mf2_tum(mask, maskedoff, op1, index, vl); +vuint8mf2_t test_vrgather_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u8mf2_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vrgather_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t index, size_t vl) { - return __riscv_vrgather_vv_u8m1_tum(mask, maskedoff, op1, index, vl); +vuint8m1_t test_vrgather_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vrgather_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u8m1_tum(mask, maskedoff, op1, index, vl); +vuint8m1_t test_vrgather_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vrgather_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t index, size_t vl) { - return __riscv_vrgather_vv_u8m2_tum(mask, maskedoff, op1, index, vl); +vuint8m2_t test_vrgather_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_u8m2_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vrgather_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u8m2_tum(mask, maskedoff, op1, index, vl); +vuint8m2_t test_vrgather_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u8m2_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vrgather_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t index, size_t vl) { - return __riscv_vrgather_vv_u8m4_tum(mask, maskedoff, op1, index, vl); +vuint8m4_t test_vrgather_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_u8m4_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vrgather_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u8m4_tum(mask, maskedoff, op1, index, vl); +vuint8m4_t test_vrgather_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u8m4_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vrgather_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t index, size_t vl) { - return __riscv_vrgather_vv_u8m8_tum(mask, maskedoff, op1, index, vl); +vuint8m8_t test_vrgather_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_u8m8_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vrgather_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u8m8_tum(mask, maskedoff, op1, index, vl); +vuint8m8_t test_vrgather_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u8m8_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vrgather_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t index, size_t vl) { - return __riscv_vrgather_vv_u16mf4_tum(mask, maskedoff, op1, index, vl); +vuint16mf4_t test_vrgather_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgather_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vrgather_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u16mf4_tum(mask, maskedoff, op1, index, vl); +vuint16mf4_t test_vrgather_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vrgather_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t index, size_t vl) { - return __riscv_vrgather_vv_u16mf2_tum(mask, maskedoff, op1, index, vl); +vuint16mf2_t test_vrgather_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vrgather_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u16mf2_tum(mask, maskedoff, op1, index, vl); +vuint16mf2_t test_vrgather_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vrgather_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t index, size_t vl) { - return __riscv_vrgather_vv_u16m1_tum(mask, maskedoff, op1, index, vl); +vuint16m1_t test_vrgather_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vrgather_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u16m1_tum(mask, maskedoff, op1, index, vl); +vuint16m1_t test_vrgather_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vrgather_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t index, size_t vl) { - return __riscv_vrgather_vv_u16m2_tum(mask, maskedoff, op1, index, vl); +vuint16m2_t test_vrgather_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vrgather_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u16m2_tum(mask, maskedoff, op1, index, vl); +vuint16m2_t test_vrgather_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vrgather_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t index, size_t vl) { - return __riscv_vrgather_vv_u16m4_tum(mask, maskedoff, op1, index, vl); +vuint16m4_t test_vrgather_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vrgather_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u16m4_tum(mask, maskedoff, op1, index, vl); +vuint16m4_t test_vrgather_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vrgather_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t index, size_t vl) { - return __riscv_vrgather_vv_u16m8_tum(mask, maskedoff, op1, index, vl); +vuint16m8_t test_vrgather_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vrgather_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u16m8_tum(mask, maskedoff, op1, index, vl); +vuint16m8_t test_vrgather_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vrgather_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t index, size_t vl) { - return __riscv_vrgather_vv_u32mf2_tum(mask, maskedoff, op1, index, vl); +vuint32mf2_t test_vrgather_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vrgather_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u32mf2_tum(mask, maskedoff, op1, index, vl); +vuint32mf2_t test_vrgather_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vrgather_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t index, size_t vl) { - return __riscv_vrgather_vv_u32m1_tum(mask, maskedoff, op1, index, vl); +vuint32m1_t test_vrgather_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vrgather_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u32m1_tum(mask, maskedoff, op1, index, vl); +vuint32m1_t test_vrgather_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vrgather_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t index, size_t vl) { - return __riscv_vrgather_vv_u32m2_tum(mask, maskedoff, op1, index, vl); +vuint32m2_t test_vrgather_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vrgather_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u32m2_tum(mask, maskedoff, op1, index, vl); +vuint32m2_t test_vrgather_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vrgather_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t index, size_t vl) { - return __riscv_vrgather_vv_u32m4_tum(mask, maskedoff, op1, index, vl); +vuint32m4_t test_vrgather_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vrgather_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u32m4_tum(mask, maskedoff, op1, index, vl); +vuint32m4_t test_vrgather_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vrgather_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t index, size_t vl) { - return __riscv_vrgather_vv_u32m8_tum(mask, maskedoff, op1, index, vl); +vuint32m8_t test_vrgather_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vrgather_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u32m8_tum(mask, maskedoff, op1, index, vl); +vuint32m8_t test_vrgather_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vrgather_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t index, size_t vl) { - return __riscv_vrgather_vv_u64m1_tum(mask, maskedoff, op1, index, vl); +vuint64m1_t test_vrgather_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vrgather_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u64m1_tum(mask, maskedoff, op1, index, vl); +vuint64m1_t test_vrgather_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vrgather_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t index, size_t vl) { - return __riscv_vrgather_vv_u64m2_tum(mask, maskedoff, op1, index, vl); +vuint64m2_t test_vrgather_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vrgather_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u64m2_tum(mask, maskedoff, op1, index, vl); +vuint64m2_t test_vrgather_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vrgather_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t index, size_t vl) { - return __riscv_vrgather_vv_u64m4_tum(mask, maskedoff, op1, index, vl); +vuint64m4_t test_vrgather_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vrgather_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u64m4_tum(mask, maskedoff, op1, index, vl); +vuint64m4_t test_vrgather_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vrgather_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t index, size_t vl) { - return __riscv_vrgather_vv_u64m8_tum(mask, maskedoff, op1, index, vl); +vuint64m8_t test_vrgather_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vrgather_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u64m8_tum(mask, maskedoff, op1, index, vl); +vuint64m8_t test_vrgather_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u64m8_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vrgather_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t index, size_t vl) { - return __riscv_vrgather_vv_f16mf4_tumu(mask, maskedoff, op1, index, vl); +vfloat16mf4_t test_vrgather_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgather_vv_f16mf4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vrgather_vx_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f16mf4_tumu(mask, maskedoff, op1, index, vl); +vfloat16mf4_t test_vrgather_vx_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f16mf4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vrgather_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t index, size_t vl) { - return __riscv_vrgather_vv_f16mf2_tumu(mask, maskedoff, op1, index, vl); +vfloat16mf2_t test_vrgather_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_f16mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vrgather_vx_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f16mf2_tumu(mask, maskedoff, op1, index, vl); +vfloat16mf2_t test_vrgather_vx_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f16mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vrgather_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t index, size_t vl) { - return __riscv_vrgather_vv_f16m1_tumu(mask, maskedoff, op1, index, vl); +vfloat16m1_t test_vrgather_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_f16m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vrgather_vx_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f16m1_tumu(mask, maskedoff, op1, index, vl); +vfloat16m1_t test_vrgather_vx_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f16m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vrgather_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t index, size_t vl) { - return __riscv_vrgather_vv_f16m2_tumu(mask, maskedoff, op1, index, vl); +vfloat16m2_t test_vrgather_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_f16m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vrgather_vx_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f16m2_tumu(mask, maskedoff, op1, index, vl); +vfloat16m2_t test_vrgather_vx_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f16m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vrgather_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t index, size_t vl) { - return __riscv_vrgather_vv_f16m4_tumu(mask, maskedoff, op1, index, vl); +vfloat16m4_t test_vrgather_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_f16m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vrgather_vx_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f16m4_tumu(mask, maskedoff, op1, index, vl); +vfloat16m4_t test_vrgather_vx_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f16m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vrgather_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t index, size_t vl) { - return __riscv_vrgather_vv_f16m8_tumu(mask, maskedoff, op1, index, vl); +vfloat16m8_t test_vrgather_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_f16m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vrgather_vx_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f16m8_tumu(mask, maskedoff, op1, index, vl); +vfloat16m8_t test_vrgather_vx_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f16m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vrgather_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) { - return __riscv_vrgather_vv_f32mf2_tumu(mask, maskedoff, op1, index, vl); +vfloat32mf2_t test_vrgather_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_f32mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vrgather_vx_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f32mf2_tumu(mask, maskedoff, op1, index, vl); +vfloat32mf2_t test_vrgather_vx_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f32mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vrgather_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vuint32m1_t index, size_t vl) { - return __riscv_vrgather_vv_f32m1_tumu(mask, maskedoff, op1, index, vl); +vfloat32m1_t test_vrgather_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_f32m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vrgather_vx_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f32m1_tumu(mask, maskedoff, op1, index, vl); +vfloat32m1_t test_vrgather_vx_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f32m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vrgather_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vuint32m2_t index, size_t vl) { - return __riscv_vrgather_vv_f32m2_tumu(mask, maskedoff, op1, index, vl); +vfloat32m2_t test_vrgather_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_f32m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vrgather_vx_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f32m2_tumu(mask, maskedoff, op1, index, vl); +vfloat32m2_t test_vrgather_vx_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f32m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vrgather_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vuint32m4_t index, size_t vl) { - return __riscv_vrgather_vv_f32m4_tumu(mask, maskedoff, op1, index, vl); +vfloat32m4_t test_vrgather_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_f32m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vrgather_vx_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f32m4_tumu(mask, maskedoff, op1, index, vl); +vfloat32m4_t test_vrgather_vx_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f32m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vrgather_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vuint32m8_t index, size_t vl) { - return __riscv_vrgather_vv_f32m8_tumu(mask, maskedoff, op1, index, vl); +vfloat32m8_t test_vrgather_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_f32m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vrgather_vx_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f32m8_tumu(mask, maskedoff, op1, index, vl); +vfloat32m8_t test_vrgather_vx_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f32m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vrgather_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vuint64m1_t index, size_t vl) { - return __riscv_vrgather_vv_f64m1_tumu(mask, maskedoff, op1, index, vl); +vfloat64m1_t test_vrgather_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_f64m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vrgather_vx_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f64m1_tumu(mask, maskedoff, op1, index, vl); +vfloat64m1_t test_vrgather_vx_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f64m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vrgather_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vuint64m2_t index, size_t vl) { - return __riscv_vrgather_vv_f64m2_tumu(mask, maskedoff, op1, index, vl); +vfloat64m2_t test_vrgather_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_f64m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vrgather_vx_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f64m2_tumu(mask, maskedoff, op1, index, vl); +vfloat64m2_t test_vrgather_vx_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f64m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vrgather_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vuint64m4_t index, size_t vl) { - return __riscv_vrgather_vv_f64m4_tumu(mask, maskedoff, op1, index, vl); +vfloat64m4_t test_vrgather_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_f64m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vrgather_vx_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f64m4_tumu(mask, maskedoff, op1, index, vl); +vfloat64m4_t test_vrgather_vx_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f64m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vrgather_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vuint64m8_t index, size_t vl) { - return __riscv_vrgather_vv_f64m8_tumu(mask, maskedoff, op1, index, vl); +vfloat64m8_t test_vrgather_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_f64m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vrgather_vx_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f64m8_tumu(mask, maskedoff, op1, index, vl); +vfloat64m8_t test_vrgather_vx_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f64m8_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vrgather_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t index, size_t vl) { - return __riscv_vrgather_vv_i8mf8_tumu(mask, maskedoff, op1, index, vl); +vint8mf8_t test_vrgather_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrgather_vv_i8mf8_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vrgather_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i8mf8_tumu(mask, maskedoff, op1, index, vl); +vint8mf8_t test_vrgather_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i8mf8_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vrgather_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t index, size_t vl) { - return __riscv_vrgather_vv_i8mf4_tumu(mask, maskedoff, op1, index, vl); +vint8mf4_t test_vrgather_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrgather_vv_i8mf4_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vrgather_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i8mf4_tumu(mask, maskedoff, op1, index, vl); +vint8mf4_t test_vrgather_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i8mf4_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vrgather_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t index, size_t vl) { - return __riscv_vrgather_vv_i8mf2_tumu(mask, maskedoff, op1, index, vl); +vint8mf2_t test_vrgather_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_i8mf2_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vrgather_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i8mf2_tumu(mask, maskedoff, op1, index, vl); +vint8mf2_t test_vrgather_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i8mf2_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vrgather_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t index, size_t vl) { - return __riscv_vrgather_vv_i8m1_tumu(mask, maskedoff, op1, index, vl); +vint8m1_t test_vrgather_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_i8m1_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vrgather_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i8m1_tumu(mask, maskedoff, op1, index, vl); +vint8m1_t test_vrgather_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i8m1_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vrgather_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t index, size_t vl) { - return __riscv_vrgather_vv_i8m2_tumu(mask, maskedoff, op1, index, vl); +vint8m2_t test_vrgather_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_i8m2_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vrgather_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i8m2_tumu(mask, maskedoff, op1, index, vl); +vint8m2_t test_vrgather_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i8m2_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vrgather_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t index, size_t vl) { - return __riscv_vrgather_vv_i8m4_tumu(mask, maskedoff, op1, index, vl); +vint8m4_t test_vrgather_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_i8m4_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vrgather_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i8m4_tumu(mask, maskedoff, op1, index, vl); +vint8m4_t test_vrgather_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i8m4_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vrgather_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t index, size_t vl) { - return __riscv_vrgather_vv_i8m8_tumu(mask, maskedoff, op1, index, vl); +vint8m8_t test_vrgather_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_i8m8_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vrgather_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i8m8_tumu(mask, maskedoff, op1, index, vl); +vint8m8_t test_vrgather_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i8m8_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vrgather_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t index, size_t vl) { - return __riscv_vrgather_vv_i16mf4_tumu(mask, maskedoff, op1, index, vl); +vint16mf4_t test_vrgather_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgather_vv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vrgather_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i16mf4_tumu(mask, maskedoff, op1, index, vl); +vint16mf4_t test_vrgather_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vrgather_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t index, size_t vl) { - return __riscv_vrgather_vv_i16mf2_tumu(mask, maskedoff, op1, index, vl); +vint16mf2_t test_vrgather_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vrgather_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i16mf2_tumu(mask, maskedoff, op1, index, vl); +vint16mf2_t test_vrgather_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vrgather_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t index, size_t vl) { - return __riscv_vrgather_vv_i16m1_tumu(mask, maskedoff, op1, index, vl); +vint16m1_t test_vrgather_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vrgather_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i16m1_tumu(mask, maskedoff, op1, index, vl); +vint16m1_t test_vrgather_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vrgather_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t index, size_t vl) { - return __riscv_vrgather_vv_i16m2_tumu(mask, maskedoff, op1, index, vl); +vint16m2_t test_vrgather_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vrgather_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i16m2_tumu(mask, maskedoff, op1, index, vl); +vint16m2_t test_vrgather_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vrgather_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t index, size_t vl) { - return __riscv_vrgather_vv_i16m4_tumu(mask, maskedoff, op1, index, vl); +vint16m4_t test_vrgather_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vrgather_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i16m4_tumu(mask, maskedoff, op1, index, vl); +vint16m4_t test_vrgather_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vrgather_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t index, size_t vl) { - return __riscv_vrgather_vv_i16m8_tumu(mask, maskedoff, op1, index, vl); +vint16m8_t test_vrgather_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vrgather_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i16m8_tumu(mask, maskedoff, op1, index, vl); +vint16m8_t test_vrgather_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vrgather_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t index, size_t vl) { - return __riscv_vrgather_vv_i32mf2_tumu(mask, maskedoff, op1, index, vl); +vint32mf2_t test_vrgather_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vrgather_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i32mf2_tumu(mask, maskedoff, op1, index, vl); +vint32mf2_t test_vrgather_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vrgather_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t index, size_t vl) { - return __riscv_vrgather_vv_i32m1_tumu(mask, maskedoff, op1, index, vl); +vint32m1_t test_vrgather_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vrgather_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i32m1_tumu(mask, maskedoff, op1, index, vl); +vint32m1_t test_vrgather_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vrgather_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t index, size_t vl) { - return __riscv_vrgather_vv_i32m2_tumu(mask, maskedoff, op1, index, vl); +vint32m2_t test_vrgather_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vrgather_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i32m2_tumu(mask, maskedoff, op1, index, vl); +vint32m2_t test_vrgather_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vrgather_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t index, size_t vl) { - return __riscv_vrgather_vv_i32m4_tumu(mask, maskedoff, op1, index, vl); +vint32m4_t test_vrgather_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vrgather_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i32m4_tumu(mask, maskedoff, op1, index, vl); +vint32m4_t test_vrgather_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vrgather_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t index, size_t vl) { - return __riscv_vrgather_vv_i32m8_tumu(mask, maskedoff, op1, index, vl); +vint32m8_t test_vrgather_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vrgather_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i32m8_tumu(mask, maskedoff, op1, index, vl); +vint32m8_t test_vrgather_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vrgather_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t index, size_t vl) { - return __riscv_vrgather_vv_i64m1_tumu(mask, maskedoff, op1, index, vl); +vint64m1_t test_vrgather_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vrgather_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i64m1_tumu(mask, maskedoff, op1, index, vl); +vint64m1_t test_vrgather_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vrgather_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t index, size_t vl) { - return __riscv_vrgather_vv_i64m2_tumu(mask, maskedoff, op1, index, vl); +vint64m2_t test_vrgather_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vrgather_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i64m2_tumu(mask, maskedoff, op1, index, vl); +vint64m2_t test_vrgather_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vrgather_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t index, size_t vl) { - return __riscv_vrgather_vv_i64m4_tumu(mask, maskedoff, op1, index, vl); +vint64m4_t test_vrgather_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vrgather_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i64m4_tumu(mask, maskedoff, op1, index, vl); +vint64m4_t test_vrgather_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vrgather_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t index, size_t vl) { - return __riscv_vrgather_vv_i64m8_tumu(mask, maskedoff, op1, index, vl); +vint64m8_t test_vrgather_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vrgather_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i64m8_tumu(mask, maskedoff, op1, index, vl); +vint64m8_t test_vrgather_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vrgather_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t index, size_t vl) { - return __riscv_vrgather_vv_u8mf8_tumu(mask, maskedoff, op1, index, vl); +vuint8mf8_t test_vrgather_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrgather_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vrgather_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u8mf8_tumu(mask, maskedoff, op1, index, vl); +vuint8mf8_t test_vrgather_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u8mf8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vrgather_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t index, size_t vl) { - return __riscv_vrgather_vv_u8mf4_tumu(mask, maskedoff, op1, index, vl); +vuint8mf4_t test_vrgather_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrgather_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vrgather_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u8mf4_tumu(mask, maskedoff, op1, index, vl); +vuint8mf4_t test_vrgather_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u8mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vrgather_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t index, size_t vl) { - return __riscv_vrgather_vv_u8mf2_tumu(mask, maskedoff, op1, index, vl); +vuint8mf2_t test_vrgather_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vrgather_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u8mf2_tumu(mask, maskedoff, op1, index, vl); +vuint8mf2_t test_vrgather_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u8mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vrgather_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t index, size_t vl) { - return __riscv_vrgather_vv_u8m1_tumu(mask, maskedoff, op1, index, vl); +vuint8m1_t test_vrgather_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vrgather_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u8m1_tumu(mask, maskedoff, op1, index, vl); +vuint8m1_t test_vrgather_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u8m1_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vrgather_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t index, size_t vl) { - return __riscv_vrgather_vv_u8m2_tumu(mask, maskedoff, op1, index, vl); +vuint8m2_t test_vrgather_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vrgather_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u8m2_tumu(mask, maskedoff, op1, index, vl); +vuint8m2_t test_vrgather_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u8m2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vrgather_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t index, size_t vl) { - return __riscv_vrgather_vv_u8m4_tumu(mask, maskedoff, op1, index, vl); +vuint8m4_t test_vrgather_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vrgather_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u8m4_tumu(mask, maskedoff, op1, index, vl); +vuint8m4_t test_vrgather_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u8m4_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vrgather_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t index, size_t vl) { - return __riscv_vrgather_vv_u8m8_tumu(mask, maskedoff, op1, index, vl); +vuint8m8_t test_vrgather_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vrgather_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u8m8_tumu(mask, maskedoff, op1, index, vl); +vuint8m8_t test_vrgather_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u8m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vrgather_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t index, size_t vl) { - return __riscv_vrgather_vv_u16mf4_tumu(mask, maskedoff, op1, index, vl); +vuint16mf4_t test_vrgather_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgather_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vrgather_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u16mf4_tumu(mask, maskedoff, op1, index, vl); +vuint16mf4_t test_vrgather_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vrgather_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t index, size_t vl) { - return __riscv_vrgather_vv_u16mf2_tumu(mask, maskedoff, op1, index, vl); +vuint16mf2_t test_vrgather_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vrgather_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u16mf2_tumu(mask, maskedoff, op1, index, vl); +vuint16mf2_t test_vrgather_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vrgather_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t index, size_t vl) { - return __riscv_vrgather_vv_u16m1_tumu(mask, maskedoff, op1, index, vl); +vuint16m1_t test_vrgather_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vrgather_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u16m1_tumu(mask, maskedoff, op1, index, vl); +vuint16m1_t test_vrgather_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vrgather_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t index, size_t vl) { - return __riscv_vrgather_vv_u16m2_tumu(mask, maskedoff, op1, index, vl); +vuint16m2_t test_vrgather_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vrgather_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u16m2_tumu(mask, maskedoff, op1, index, vl); +vuint16m2_t test_vrgather_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vrgather_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t index, size_t vl) { - return __riscv_vrgather_vv_u16m4_tumu(mask, maskedoff, op1, index, vl); +vuint16m4_t test_vrgather_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vrgather_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u16m4_tumu(mask, maskedoff, op1, index, vl); +vuint16m4_t test_vrgather_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vrgather_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t index, size_t vl) { - return __riscv_vrgather_vv_u16m8_tumu(mask, maskedoff, op1, index, vl); +vuint16m8_t test_vrgather_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vrgather_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u16m8_tumu(mask, maskedoff, op1, index, vl); +vuint16m8_t test_vrgather_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vrgather_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t index, size_t vl) { - return __riscv_vrgather_vv_u32mf2_tumu(mask, maskedoff, op1, index, vl); +vuint32mf2_t test_vrgather_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vrgather_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u32mf2_tumu(mask, maskedoff, op1, index, vl); +vuint32mf2_t test_vrgather_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vrgather_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t index, size_t vl) { - return __riscv_vrgather_vv_u32m1_tumu(mask, maskedoff, op1, index, vl); +vuint32m1_t test_vrgather_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vrgather_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u32m1_tumu(mask, maskedoff, op1, index, vl); +vuint32m1_t test_vrgather_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vrgather_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t index, size_t vl) { - return __riscv_vrgather_vv_u32m2_tumu(mask, maskedoff, op1, index, vl); +vuint32m2_t test_vrgather_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vrgather_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u32m2_tumu(mask, maskedoff, op1, index, vl); +vuint32m2_t test_vrgather_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vrgather_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t index, size_t vl) { - return __riscv_vrgather_vv_u32m4_tumu(mask, maskedoff, op1, index, vl); +vuint32m4_t test_vrgather_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vrgather_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u32m4_tumu(mask, maskedoff, op1, index, vl); +vuint32m4_t test_vrgather_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vrgather_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t index, size_t vl) { - return __riscv_vrgather_vv_u32m8_tumu(mask, maskedoff, op1, index, vl); +vuint32m8_t test_vrgather_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vrgather_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u32m8_tumu(mask, maskedoff, op1, index, vl); +vuint32m8_t test_vrgather_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vrgather_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t index, size_t vl) { - return __riscv_vrgather_vv_u64m1_tumu(mask, maskedoff, op1, index, vl); +vuint64m1_t test_vrgather_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vrgather_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u64m1_tumu(mask, maskedoff, op1, index, vl); +vuint64m1_t test_vrgather_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vrgather_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t index, size_t vl) { - return __riscv_vrgather_vv_u64m2_tumu(mask, maskedoff, op1, index, vl); +vuint64m2_t test_vrgather_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vrgather_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u64m2_tumu(mask, maskedoff, op1, index, vl); +vuint64m2_t test_vrgather_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vrgather_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t index, size_t vl) { - return __riscv_vrgather_vv_u64m4_tumu(mask, maskedoff, op1, index, vl); +vuint64m4_t test_vrgather_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vrgather_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u64m4_tumu(mask, maskedoff, op1, index, vl); +vuint64m4_t test_vrgather_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vrgather_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t index, size_t vl) { - return __riscv_vrgather_vv_u64m8_tumu(mask, maskedoff, op1, index, vl); +vuint64m8_t test_vrgather_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vrgather_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u64m8_tumu(mask, maskedoff, op1, index, vl); +vuint64m8_t test_vrgather_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vrgather_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t index, size_t vl) { - return __riscv_vrgather_vv_f16mf4_mu(mask, maskedoff, op1, index, vl); +vfloat16mf4_t test_vrgather_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgather_vv_f16mf4_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vrgather_vx_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f16mf4_mu(mask, maskedoff, op1, index, vl); +vfloat16mf4_t test_vrgather_vx_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f16mf4_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vrgather_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t index, size_t vl) { - return __riscv_vrgather_vv_f16mf2_mu(mask, maskedoff, op1, index, vl); +vfloat16mf2_t test_vrgather_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_f16mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vrgather_vx_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f16mf2_mu(mask, maskedoff, op1, index, vl); +vfloat16mf2_t test_vrgather_vx_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f16mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vrgather_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t index, size_t vl) { - return __riscv_vrgather_vv_f16m1_mu(mask, maskedoff, op1, index, vl); +vfloat16m1_t test_vrgather_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_f16m1_mu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vrgather_vx_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f16m1_mu(mask, maskedoff, op1, index, vl); +vfloat16m1_t test_vrgather_vx_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f16m1_mu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vrgather_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t index, size_t vl) { - return __riscv_vrgather_vv_f16m2_mu(mask, maskedoff, op1, index, vl); +vfloat16m2_t test_vrgather_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_f16m2_mu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vrgather_vx_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f16m2_mu(mask, maskedoff, op1, index, vl); +vfloat16m2_t test_vrgather_vx_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f16m2_mu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vrgather_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t index, size_t vl) { - return __riscv_vrgather_vv_f16m4_mu(mask, maskedoff, op1, index, vl); +vfloat16m4_t test_vrgather_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_f16m4_mu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vrgather_vx_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f16m4_mu(mask, maskedoff, op1, index, vl); +vfloat16m4_t test_vrgather_vx_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f16m4_mu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vrgather_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t index, size_t vl) { - return __riscv_vrgather_vv_f16m8_mu(mask, maskedoff, op1, index, vl); +vfloat16m8_t test_vrgather_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_f16m8_mu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vrgather_vx_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f16m8_mu(mask, maskedoff, op1, index, vl); +vfloat16m8_t test_vrgather_vx_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f16m8_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vrgather_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) { - return __riscv_vrgather_vv_f32mf2_mu(mask, maskedoff, op1, index, vl); +vfloat32mf2_t test_vrgather_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_f32mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vrgather_vx_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f32mf2_mu(mask, maskedoff, op1, index, vl); +vfloat32mf2_t test_vrgather_vx_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f32mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vrgather_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vuint32m1_t index, size_t vl) { - return __riscv_vrgather_vv_f32m1_mu(mask, maskedoff, op1, index, vl); +vfloat32m1_t test_vrgather_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_f32m1_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vrgather_vx_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f32m1_mu(mask, maskedoff, op1, index, vl); +vfloat32m1_t test_vrgather_vx_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f32m1_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vrgather_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vuint32m2_t index, size_t vl) { - return __riscv_vrgather_vv_f32m2_mu(mask, maskedoff, op1, index, vl); +vfloat32m2_t test_vrgather_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_f32m2_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vrgather_vx_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f32m2_mu(mask, maskedoff, op1, index, vl); +vfloat32m2_t test_vrgather_vx_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f32m2_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vrgather_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vuint32m4_t index, size_t vl) { - return __riscv_vrgather_vv_f32m4_mu(mask, maskedoff, op1, index, vl); +vfloat32m4_t test_vrgather_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_f32m4_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vrgather_vx_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f32m4_mu(mask, maskedoff, op1, index, vl); +vfloat32m4_t test_vrgather_vx_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f32m4_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vrgather_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vuint32m8_t index, size_t vl) { - return __riscv_vrgather_vv_f32m8_mu(mask, maskedoff, op1, index, vl); +vfloat32m8_t test_vrgather_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_f32m8_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vrgather_vx_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f32m8_mu(mask, maskedoff, op1, index, vl); +vfloat32m8_t test_vrgather_vx_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f32m8_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vrgather_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vuint64m1_t index, size_t vl) { - return __riscv_vrgather_vv_f64m1_mu(mask, maskedoff, op1, index, vl); +vfloat64m1_t test_vrgather_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_f64m1_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vrgather_vx_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f64m1_mu(mask, maskedoff, op1, index, vl); +vfloat64m1_t test_vrgather_vx_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f64m1_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vrgather_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vuint64m2_t index, size_t vl) { - return __riscv_vrgather_vv_f64m2_mu(mask, maskedoff, op1, index, vl); +vfloat64m2_t test_vrgather_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_f64m2_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vrgather_vx_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f64m2_mu(mask, maskedoff, op1, index, vl); +vfloat64m2_t test_vrgather_vx_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f64m2_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vrgather_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vuint64m4_t index, size_t vl) { - return __riscv_vrgather_vv_f64m4_mu(mask, maskedoff, op1, index, vl); +vfloat64m4_t test_vrgather_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_f64m4_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vrgather_vx_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f64m4_mu(mask, maskedoff, op1, index, vl); +vfloat64m4_t test_vrgather_vx_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f64m4_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vrgather_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vuint64m8_t index, size_t vl) { - return __riscv_vrgather_vv_f64m8_mu(mask, maskedoff, op1, index, vl); +vfloat64m8_t test_vrgather_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_f64m8_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vrgather_vx_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f64m8_mu(mask, maskedoff, op1, index, vl); +vfloat64m8_t test_vrgather_vx_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_f64m8_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vrgather_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t index, size_t vl) { - return __riscv_vrgather_vv_i8mf8_mu(mask, maskedoff, op1, index, vl); +vint8mf8_t test_vrgather_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrgather_vv_i8mf8_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vrgather_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i8mf8_mu(mask, maskedoff, op1, index, vl); +vint8mf8_t test_vrgather_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i8mf8_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vrgather_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t index, size_t vl) { - return __riscv_vrgather_vv_i8mf4_mu(mask, maskedoff, op1, index, vl); +vint8mf4_t test_vrgather_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrgather_vv_i8mf4_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vrgather_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i8mf4_mu(mask, maskedoff, op1, index, vl); +vint8mf4_t test_vrgather_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i8mf4_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vrgather_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t index, size_t vl) { - return __riscv_vrgather_vv_i8mf2_mu(mask, maskedoff, op1, index, vl); +vint8mf2_t test_vrgather_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_i8mf2_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vrgather_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i8mf2_mu(mask, maskedoff, op1, index, vl); +vint8mf2_t test_vrgather_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i8mf2_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vrgather_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t index, size_t vl) { - return __riscv_vrgather_vv_i8m1_mu(mask, maskedoff, op1, index, vl); +vint8m1_t test_vrgather_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_i8m1_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vrgather_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i8m1_mu(mask, maskedoff, op1, index, vl); +vint8m1_t test_vrgather_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i8m1_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vrgather_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t index, size_t vl) { - return __riscv_vrgather_vv_i8m2_mu(mask, maskedoff, op1, index, vl); +vint8m2_t test_vrgather_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_i8m2_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vrgather_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i8m2_mu(mask, maskedoff, op1, index, vl); +vint8m2_t test_vrgather_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i8m2_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vrgather_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t index, size_t vl) { - return __riscv_vrgather_vv_i8m4_mu(mask, maskedoff, op1, index, vl); +vint8m4_t test_vrgather_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_i8m4_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vrgather_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i8m4_mu(mask, maskedoff, op1, index, vl); +vint8m4_t test_vrgather_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i8m4_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vrgather_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t index, size_t vl) { - return __riscv_vrgather_vv_i8m8_mu(mask, maskedoff, op1, index, vl); +vint8m8_t test_vrgather_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_i8m8_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vrgather_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i8m8_mu(mask, maskedoff, op1, index, vl); +vint8m8_t test_vrgather_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i8m8_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vrgather_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t index, size_t vl) { - return __riscv_vrgather_vv_i16mf4_mu(mask, maskedoff, op1, index, vl); +vint16mf4_t test_vrgather_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgather_vv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vrgather_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i16mf4_mu(mask, maskedoff, op1, index, vl); +vint16mf4_t test_vrgather_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vrgather_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t index, size_t vl) { - return __riscv_vrgather_vv_i16mf2_mu(mask, maskedoff, op1, index, vl); +vint16mf2_t test_vrgather_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vrgather_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i16mf2_mu(mask, maskedoff, op1, index, vl); +vint16mf2_t test_vrgather_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vrgather_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t index, size_t vl) { - return __riscv_vrgather_vv_i16m1_mu(mask, maskedoff, op1, index, vl); +vint16m1_t test_vrgather_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vrgather_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i16m1_mu(mask, maskedoff, op1, index, vl); +vint16m1_t test_vrgather_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vrgather_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t index, size_t vl) { - return __riscv_vrgather_vv_i16m2_mu(mask, maskedoff, op1, index, vl); +vint16m2_t test_vrgather_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vrgather_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i16m2_mu(mask, maskedoff, op1, index, vl); +vint16m2_t test_vrgather_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vrgather_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t index, size_t vl) { - return __riscv_vrgather_vv_i16m4_mu(mask, maskedoff, op1, index, vl); +vint16m4_t test_vrgather_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vrgather_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i16m4_mu(mask, maskedoff, op1, index, vl); +vint16m4_t test_vrgather_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vrgather_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t index, size_t vl) { - return __riscv_vrgather_vv_i16m8_mu(mask, maskedoff, op1, index, vl); +vint16m8_t test_vrgather_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vrgather_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i16m8_mu(mask, maskedoff, op1, index, vl); +vint16m8_t test_vrgather_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vrgather_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t index, size_t vl) { - return __riscv_vrgather_vv_i32mf2_mu(mask, maskedoff, op1, index, vl); +vint32mf2_t test_vrgather_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vrgather_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i32mf2_mu(mask, maskedoff, op1, index, vl); +vint32mf2_t test_vrgather_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vrgather_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t index, size_t vl) { - return __riscv_vrgather_vv_i32m1_mu(mask, maskedoff, op1, index, vl); +vint32m1_t test_vrgather_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vrgather_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i32m1_mu(mask, maskedoff, op1, index, vl); +vint32m1_t test_vrgather_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vrgather_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t index, size_t vl) { - return __riscv_vrgather_vv_i32m2_mu(mask, maskedoff, op1, index, vl); +vint32m2_t test_vrgather_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vrgather_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i32m2_mu(mask, maskedoff, op1, index, vl); +vint32m2_t test_vrgather_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vrgather_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t index, size_t vl) { - return __riscv_vrgather_vv_i32m4_mu(mask, maskedoff, op1, index, vl); +vint32m4_t test_vrgather_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vrgather_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i32m4_mu(mask, maskedoff, op1, index, vl); +vint32m4_t test_vrgather_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vrgather_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t index, size_t vl) { - return __riscv_vrgather_vv_i32m8_mu(mask, maskedoff, op1, index, vl); +vint32m8_t test_vrgather_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vrgather_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i32m8_mu(mask, maskedoff, op1, index, vl); +vint32m8_t test_vrgather_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vrgather_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t index, size_t vl) { - return __riscv_vrgather_vv_i64m1_mu(mask, maskedoff, op1, index, vl); +vint64m1_t test_vrgather_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vrgather_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i64m1_mu(mask, maskedoff, op1, index, vl); +vint64m1_t test_vrgather_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vrgather_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t index, size_t vl) { - return __riscv_vrgather_vv_i64m2_mu(mask, maskedoff, op1, index, vl); +vint64m2_t test_vrgather_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vrgather_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i64m2_mu(mask, maskedoff, op1, index, vl); +vint64m2_t test_vrgather_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vrgather_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t index, size_t vl) { - return __riscv_vrgather_vv_i64m4_mu(mask, maskedoff, op1, index, vl); +vint64m4_t test_vrgather_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vrgather_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i64m4_mu(mask, maskedoff, op1, index, vl); +vint64m4_t test_vrgather_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vrgather_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t index, size_t vl) { - return __riscv_vrgather_vv_i64m8_mu(mask, maskedoff, op1, index, vl); +vint64m8_t test_vrgather_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vrgather_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i64m8_mu(mask, maskedoff, op1, index, vl); +vint64m8_t test_vrgather_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_i64m8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vrgather_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t index, size_t vl) { - return __riscv_vrgather_vv_u8mf8_mu(mask, maskedoff, op1, index, vl); +vuint8mf8_t test_vrgather_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrgather_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vrgather_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u8mf8_mu(mask, maskedoff, op1, index, vl); +vuint8mf8_t test_vrgather_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u8mf8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vrgather_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t index, size_t vl) { - return __riscv_vrgather_vv_u8mf4_mu(mask, maskedoff, op1, index, vl); +vuint8mf4_t test_vrgather_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrgather_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vrgather_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u8mf4_mu(mask, maskedoff, op1, index, vl); +vuint8mf4_t test_vrgather_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u8mf4_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vrgather_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t index, size_t vl) { - return __riscv_vrgather_vv_u8mf2_mu(mask, maskedoff, op1, index, vl); +vuint8mf2_t test_vrgather_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vrgather_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u8mf2_mu(mask, maskedoff, op1, index, vl); +vuint8mf2_t test_vrgather_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u8mf2_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vrgather_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t index, size_t vl) { - return __riscv_vrgather_vv_u8m1_mu(mask, maskedoff, op1, index, vl); +vuint8m1_t test_vrgather_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_u8m1_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vrgather_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u8m1_mu(mask, maskedoff, op1, index, vl); +vuint8m1_t test_vrgather_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u8m1_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vrgather_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t index, size_t vl) { - return __riscv_vrgather_vv_u8m2_mu(mask, maskedoff, op1, index, vl); +vuint8m2_t test_vrgather_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_u8m2_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vrgather_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u8m2_mu(mask, maskedoff, op1, index, vl); +vuint8m2_t test_vrgather_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u8m2_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vrgather_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t index, size_t vl) { - return __riscv_vrgather_vv_u8m4_mu(mask, maskedoff, op1, index, vl); +vuint8m4_t test_vrgather_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_u8m4_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vrgather_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u8m4_mu(mask, maskedoff, op1, index, vl); +vuint8m4_t test_vrgather_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u8m4_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vrgather_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t index, size_t vl) { - return __riscv_vrgather_vv_u8m8_mu(mask, maskedoff, op1, index, vl); +vuint8m8_t test_vrgather_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_u8m8_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vrgather_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u8m8_mu(mask, maskedoff, op1, index, vl); +vuint8m8_t test_vrgather_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u8m8_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vrgather_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t index, size_t vl) { - return __riscv_vrgather_vv_u16mf4_mu(mask, maskedoff, op1, index, vl); +vuint16mf4_t test_vrgather_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgather_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vrgather_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u16mf4_mu(mask, maskedoff, op1, index, vl); +vuint16mf4_t test_vrgather_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vrgather_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t index, size_t vl) { - return __riscv_vrgather_vv_u16mf2_mu(mask, maskedoff, op1, index, vl); +vuint16mf2_t test_vrgather_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vrgather_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u16mf2_mu(mask, maskedoff, op1, index, vl); +vuint16mf2_t test_vrgather_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vrgather_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t index, size_t vl) { - return __riscv_vrgather_vv_u16m1_mu(mask, maskedoff, op1, index, vl); +vuint16m1_t test_vrgather_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vrgather_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u16m1_mu(mask, maskedoff, op1, index, vl); +vuint16m1_t test_vrgather_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vrgather_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t index, size_t vl) { - return __riscv_vrgather_vv_u16m2_mu(mask, maskedoff, op1, index, vl); +vuint16m2_t test_vrgather_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vrgather_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u16m2_mu(mask, maskedoff, op1, index, vl); +vuint16m2_t test_vrgather_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vrgather_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t index, size_t vl) { - return __riscv_vrgather_vv_u16m4_mu(mask, maskedoff, op1, index, vl); +vuint16m4_t test_vrgather_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vrgather_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u16m4_mu(mask, maskedoff, op1, index, vl); +vuint16m4_t test_vrgather_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vrgather_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t index, size_t vl) { - return __riscv_vrgather_vv_u16m8_mu(mask, maskedoff, op1, index, vl); +vuint16m8_t test_vrgather_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vrgather_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u16m8_mu(mask, maskedoff, op1, index, vl); +vuint16m8_t test_vrgather_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vrgather_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t index, size_t vl) { - return __riscv_vrgather_vv_u32mf2_mu(mask, maskedoff, op1, index, vl); +vuint32mf2_t test_vrgather_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrgather_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vrgather_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u32mf2_mu(mask, maskedoff, op1, index, vl); +vuint32mf2_t test_vrgather_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vrgather_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t index, size_t vl) { - return __riscv_vrgather_vv_u32m1_mu(mask, maskedoff, op1, index, vl); +vuint32m1_t test_vrgather_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vrgather_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u32m1_mu(mask, maskedoff, op1, index, vl); +vuint32m1_t test_vrgather_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vrgather_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t index, size_t vl) { - return __riscv_vrgather_vv_u32m2_mu(mask, maskedoff, op1, index, vl); +vuint32m2_t test_vrgather_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vrgather_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u32m2_mu(mask, maskedoff, op1, index, vl); +vuint32m2_t test_vrgather_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vrgather_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t index, size_t vl) { - return __riscv_vrgather_vv_u32m4_mu(mask, maskedoff, op1, index, vl); +vuint32m4_t test_vrgather_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vrgather_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u32m4_mu(mask, maskedoff, op1, index, vl); +vuint32m4_t test_vrgather_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vrgather_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t index, size_t vl) { - return __riscv_vrgather_vv_u32m8_mu(mask, maskedoff, op1, index, vl); +vuint32m8_t test_vrgather_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vrgather_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u32m8_mu(mask, maskedoff, op1, index, vl); +vuint32m8_t test_vrgather_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vrgather_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t index, size_t vl) { - return __riscv_vrgather_vv_u64m1_mu(mask, maskedoff, op1, index, vl); +vuint64m1_t test_vrgather_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrgather_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vrgather_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u64m1_mu(mask, maskedoff, op1, index, vl); +vuint64m1_t test_vrgather_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vrgather_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t index, size_t vl) { - return __riscv_vrgather_vv_u64m2_mu(mask, maskedoff, op1, index, vl); +vuint64m2_t test_vrgather_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrgather_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vrgather_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u64m2_mu(mask, maskedoff, op1, index, vl); +vuint64m2_t test_vrgather_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vrgather_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t index, size_t vl) { - return __riscv_vrgather_vv_u64m4_mu(mask, maskedoff, op1, index, vl); +vuint64m4_t test_vrgather_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrgather_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vrgather_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u64m4_mu(mask, maskedoff, op1, index, vl); +vuint64m4_t test_vrgather_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vrgather_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t index, size_t vl) { - return __riscv_vrgather_vv_u64m8_mu(mask, maskedoff, op1, index, vl); +vuint64m8_t test_vrgather_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrgather_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vrgather_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u64m8_mu(mask, maskedoff, op1, index, vl); +vuint64m8_t test_vrgather_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_vx_u64m8_mu(vm, vd, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vrgather\.[ivxfswum.]+\s+} 472 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vrgatherei16.c b/auto-generated/policy_funcs/gnu-api-tests/vrgatherei16.c index ad498ad14..337b0e224 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vrgatherei16.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vrgatherei16.c @@ -6,916 +6,916 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vrgatherei16_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f16mf4_tu(maskedoff, op1, op2, vl); +vfloat16mf4_t test_vrgatherei16_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f16mf4_tu(vd, vs2, vs1, vl); } -vfloat16mf2_t test_vrgatherei16_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f16mf2_tu(maskedoff, op1, op2, vl); +vfloat16mf2_t test_vrgatherei16_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f16mf2_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vrgatherei16_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f16m1_tu(maskedoff, op1, op2, vl); +vfloat16m1_t test_vrgatherei16_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m2_t test_vrgatherei16_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f16m2_tu(maskedoff, op1, op2, vl); +vfloat16m2_t test_vrgatherei16_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f16m2_tu(vd, vs2, vs1, vl); } -vfloat16m4_t test_vrgatherei16_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f16m4_tu(maskedoff, op1, op2, vl); +vfloat16m4_t test_vrgatherei16_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f16m4_tu(vd, vs2, vs1, vl); } -vfloat16m8_t test_vrgatherei16_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f16m8_tu(maskedoff, op1, op2, vl); +vfloat16m8_t test_vrgatherei16_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f16m8_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vrgatherei16_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f32mf2_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vrgatherei16_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f32mf2_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vrgatherei16_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f32m1_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vrgatherei16_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vrgatherei16_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f32m2_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vrgatherei16_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f32m2_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vrgatherei16_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f32m4_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vrgatherei16_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f32m4_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vrgatherei16_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f32m8_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vrgatherei16_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f32m8_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vrgatherei16_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f64m1_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vrgatherei16_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vrgatherei16_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f64m2_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vrgatherei16_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f64m2_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vrgatherei16_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f64m4_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vrgatherei16_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f64m4_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vrgatherei16_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f64m8_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vrgatherei16_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f64m8_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vrgatherei16_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i8mf8_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vrgatherei16_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i8mf8_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vrgatherei16_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i8mf4_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vrgatherei16_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i8mf4_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vrgatherei16_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i8mf2_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vrgatherei16_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i8mf2_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vrgatherei16_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i8m1_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vrgatherei16_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i8m1_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vrgatherei16_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i8m2_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vrgatherei16_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i8m2_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vrgatherei16_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i8m4_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vrgatherei16_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i8m4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vrgatherei16_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i16mf4_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vrgatherei16_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vrgatherei16_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i16mf2_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vrgatherei16_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vrgatherei16_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i16m1_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vrgatherei16_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vrgatherei16_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i16m2_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vrgatherei16_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vrgatherei16_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i16m4_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vrgatherei16_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vrgatherei16_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i16m8_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vrgatherei16_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i16m8_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vrgatherei16_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i32mf2_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vrgatherei16_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vrgatherei16_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i32m1_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vrgatherei16_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vrgatherei16_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i32m2_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vrgatherei16_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vrgatherei16_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i32m4_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vrgatherei16_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vrgatherei16_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i32m8_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vrgatherei16_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i32m8_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vrgatherei16_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i64m1_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vrgatherei16_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vrgatherei16_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i64m2_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vrgatherei16_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vrgatherei16_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i64m4_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vrgatherei16_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vrgatherei16_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i64m8_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vrgatherei16_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i64m8_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vrgatherei16_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u8mf8_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vrgatherei16_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u8mf8_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vrgatherei16_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u8mf4_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vrgatherei16_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u8mf4_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vrgatherei16_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u8mf2_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vrgatherei16_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u8mf2_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vrgatherei16_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u8m1_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vrgatherei16_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vrgatherei16_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u8m2_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vrgatherei16_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u8m2_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vrgatherei16_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u8m4_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vrgatherei16_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u8m4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vrgatherei16_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u16mf4_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vrgatherei16_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vrgatherei16_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u16mf2_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vrgatherei16_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vrgatherei16_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u16m1_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vrgatherei16_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vrgatherei16_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u16m2_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vrgatherei16_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vrgatherei16_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u16m4_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vrgatherei16_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vrgatherei16_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u16m8_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vrgatherei16_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vrgatherei16_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u32mf2_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vrgatherei16_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vrgatherei16_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u32m1_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vrgatherei16_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vrgatherei16_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u32m2_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vrgatherei16_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vrgatherei16_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u32m4_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vrgatherei16_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vrgatherei16_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u32m8_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vrgatherei16_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vrgatherei16_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u64m1_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vrgatherei16_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vrgatherei16_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u64m2_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vrgatherei16_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vrgatherei16_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u64m4_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vrgatherei16_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vrgatherei16_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u64m8_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vrgatherei16_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u64m8_tu(vd, vs2, vs1, vl); } -vfloat16mf4_t test_vrgatherei16_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f16mf4_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vrgatherei16_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f16mf4_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vrgatherei16_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f16mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vrgatherei16_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f16mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vrgatherei16_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f16m1_tum(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vrgatherei16_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vrgatherei16_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f16m2_tum(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vrgatherei16_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f16m2_tum(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vrgatherei16_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f16m4_tum(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vrgatherei16_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f16m4_tum(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vrgatherei16_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f16m8_tum(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vrgatherei16_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f16m8_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vrgatherei16_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f32mf2_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vrgatherei16_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f32mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vrgatherei16_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f32m1_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vrgatherei16_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vrgatherei16_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f32m2_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vrgatherei16_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f32m2_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vrgatherei16_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f32m4_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vrgatherei16_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f32m4_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vrgatherei16_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f32m8_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vrgatherei16_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f32m8_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vrgatherei16_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f64m1_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vrgatherei16_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vrgatherei16_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f64m2_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vrgatherei16_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f64m2_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vrgatherei16_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f64m4_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vrgatherei16_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f64m4_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vrgatherei16_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f64m8_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vrgatherei16_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f64m8_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vrgatherei16_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vrgatherei16_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i8mf8_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vrgatherei16_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vrgatherei16_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i8mf4_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vrgatherei16_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vrgatherei16_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i8mf2_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vrgatherei16_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vrgatherei16_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vrgatherei16_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vrgatherei16_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i8m2_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vrgatherei16_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vrgatherei16_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i8m4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vrgatherei16_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vrgatherei16_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vrgatherei16_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vrgatherei16_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vrgatherei16_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vrgatherei16_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vrgatherei16_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vrgatherei16_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vrgatherei16_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vrgatherei16_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vrgatherei16_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vrgatherei16_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vrgatherei16_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vrgatherei16_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vrgatherei16_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vrgatherei16_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vrgatherei16_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vrgatherei16_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vrgatherei16_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vrgatherei16_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vrgatherei16_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vrgatherei16_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vrgatherei16_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vrgatherei16_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vrgatherei16_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vrgatherei16_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vrgatherei16_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vrgatherei16_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vrgatherei16_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vrgatherei16_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vrgatherei16_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vrgatherei16_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vrgatherei16_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vrgatherei16_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vrgatherei16_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vrgatherei16_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vrgatherei16_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vrgatherei16_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vrgatherei16_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vrgatherei16_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u8m2_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vrgatherei16_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vrgatherei16_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u8m4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vrgatherei16_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vrgatherei16_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vrgatherei16_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vrgatherei16_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vrgatherei16_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vrgatherei16_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vrgatherei16_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vrgatherei16_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vrgatherei16_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vrgatherei16_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vrgatherei16_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vrgatherei16_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vrgatherei16_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vrgatherei16_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vrgatherei16_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vrgatherei16_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vrgatherei16_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vrgatherei16_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vrgatherei16_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vrgatherei16_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vrgatherei16_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vrgatherei16_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vrgatherei16_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vrgatherei16_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vrgatherei16_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vrgatherei16_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vrgatherei16_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vrgatherei16_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vrgatherei16_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vrgatherei16_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vrgatherei16_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f16mf4_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vrgatherei16_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f16mf4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vrgatherei16_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f16mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vrgatherei16_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f16mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vrgatherei16_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f16m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vrgatherei16_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f16m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vrgatherei16_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f16m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vrgatherei16_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f16m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vrgatherei16_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f16m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vrgatherei16_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f16m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vrgatherei16_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f16m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vrgatherei16_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f16m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vrgatherei16_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f32mf2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vrgatherei16_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f32mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vrgatherei16_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f32m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vrgatherei16_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f32m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vrgatherei16_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f32m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vrgatherei16_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f32m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vrgatherei16_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f32m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vrgatherei16_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f32m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vrgatherei16_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f32m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vrgatherei16_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f32m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vrgatherei16_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f64m1_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vrgatherei16_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f64m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vrgatherei16_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f64m2_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vrgatherei16_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f64m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vrgatherei16_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f64m4_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vrgatherei16_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f64m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vrgatherei16_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f64m8_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vrgatherei16_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f64m8_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vrgatherei16_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vrgatherei16_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i8mf8_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vrgatherei16_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vrgatherei16_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i8mf4_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vrgatherei16_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vrgatherei16_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i8mf2_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vrgatherei16_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vrgatherei16_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i8m1_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vrgatherei16_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vrgatherei16_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i8m2_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vrgatherei16_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vrgatherei16_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i8m4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vrgatherei16_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vrgatherei16_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vrgatherei16_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vrgatherei16_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vrgatherei16_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vrgatherei16_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vrgatherei16_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vrgatherei16_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vrgatherei16_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vrgatherei16_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vrgatherei16_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vrgatherei16_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vrgatherei16_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vrgatherei16_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vrgatherei16_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vrgatherei16_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vrgatherei16_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vrgatherei16_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vrgatherei16_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vrgatherei16_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vrgatherei16_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vrgatherei16_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vrgatherei16_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vrgatherei16_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vrgatherei16_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vrgatherei16_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vrgatherei16_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vrgatherei16_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vrgatherei16_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vrgatherei16_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vrgatherei16_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vrgatherei16_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vrgatherei16_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vrgatherei16_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vrgatherei16_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vrgatherei16_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vrgatherei16_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vrgatherei16_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vrgatherei16_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vrgatherei16_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vrgatherei16_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vrgatherei16_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vrgatherei16_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vrgatherei16_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vrgatherei16_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vrgatherei16_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vrgatherei16_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vrgatherei16_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vrgatherei16_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vrgatherei16_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vrgatherei16_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vrgatherei16_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vrgatherei16_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vrgatherei16_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vrgatherei16_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vrgatherei16_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vrgatherei16_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vrgatherei16_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vrgatherei16_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vrgatherei16_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vrgatherei16_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vrgatherei16_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vrgatherei16_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vrgatherei16_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vrgatherei16_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vrgatherei16_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vrgatherei16_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vrgatherei16_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vrgatherei16_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vrgatherei16_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vrgatherei16_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vrgatherei16_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vrgatherei16_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f16mf4_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vrgatherei16_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f16mf4_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vrgatherei16_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f16mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vrgatherei16_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f16mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vrgatherei16_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f16m1_mu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vrgatherei16_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f16m1_mu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vrgatherei16_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f16m2_mu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vrgatherei16_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f16m2_mu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vrgatherei16_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f16m4_mu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vrgatherei16_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f16m4_mu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vrgatherei16_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f16m8_mu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vrgatherei16_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f16m8_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vrgatherei16_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f32mf2_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vrgatherei16_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f32mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vrgatherei16_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f32m1_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vrgatherei16_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f32m1_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vrgatherei16_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f32m2_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vrgatherei16_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f32m2_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vrgatherei16_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f32m4_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vrgatherei16_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f32m4_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vrgatherei16_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f32m8_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vrgatherei16_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f32m8_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vrgatherei16_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f64m1_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vrgatherei16_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f64m1_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vrgatherei16_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f64m2_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vrgatherei16_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f64m2_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vrgatherei16_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f64m4_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vrgatherei16_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f64m4_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vrgatherei16_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_f64m8_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vrgatherei16_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_f64m8_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vrgatherei16_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vrgatherei16_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i8mf8_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vrgatherei16_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vrgatherei16_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i8mf4_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vrgatherei16_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vrgatherei16_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i8mf2_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vrgatherei16_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vrgatherei16_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i8m1_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vrgatherei16_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vrgatherei16_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i8m2_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vrgatherei16_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vrgatherei16_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i8m4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vrgatherei16_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vrgatherei16_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vrgatherei16_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vrgatherei16_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vrgatherei16_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vrgatherei16_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vrgatherei16_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vrgatherei16_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vrgatherei16_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vrgatherei16_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vrgatherei16_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vrgatherei16_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vrgatherei16_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vrgatherei16_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vrgatherei16_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vrgatherei16_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vrgatherei16_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vrgatherei16_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vrgatherei16_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vrgatherei16_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vrgatherei16_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vrgatherei16_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vrgatherei16_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vrgatherei16_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vrgatherei16_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vrgatherei16_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vrgatherei16_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vrgatherei16_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vrgatherei16_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vrgatherei16_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vrgatherei16_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vrgatherei16_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vrgatherei16_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vrgatherei16_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vrgatherei16_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vrgatherei16_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vrgatherei16_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vrgatherei16_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u8m1_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vrgatherei16_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vrgatherei16_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u8m2_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vrgatherei16_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vrgatherei16_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u8m4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vrgatherei16_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vrgatherei16_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vrgatherei16_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vrgatherei16_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vrgatherei16_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vrgatherei16_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vrgatherei16_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vrgatherei16_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vrgatherei16_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vrgatherei16_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vrgatherei16_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vrgatherei16_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vrgatherei16_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vrgatherei16_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vrgatherei16_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vrgatherei16_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vrgatherei16_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vrgatherei16_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vrgatherei16_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vrgatherei16_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vrgatherei16_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vrgatherei16_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vrgatherei16_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vrgatherei16_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vrgatherei16_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vrgatherei16_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vrgatherei16_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vrgatherei16_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vrgatherei16_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vrgatherei16_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vrgatherei16\.[ivxfswum.]+\s+} 228 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vrsub.c b/auto-generated/policy_funcs/gnu-api-tests/vrsub.c index 7bf66103c..fbc7b947e 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vrsub.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vrsub.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vrsub_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_vx_i8mf8_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vrsub_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_vx_i8mf8_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vrsub_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_vx_i8mf4_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vrsub_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_vx_i8mf4_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vrsub_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_vx_i8mf2_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vrsub_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_vx_i8mf2_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vrsub_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_vx_i8m1_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vrsub_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_vx_i8m1_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vrsub_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_vx_i8m2_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vrsub_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_vx_i8m2_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vrsub_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_vx_i8m4_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vrsub_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_vx_i8m4_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vrsub_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_vx_i8m8_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vrsub_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_vx_i8m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vrsub_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_vx_i16mf4_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vrsub_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vrsub_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_vx_i16mf2_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vrsub_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vrsub_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_vx_i16m1_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vrsub_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vrsub_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_vx_i16m2_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vrsub_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vrsub_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_vx_i16m4_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vrsub_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vrsub_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_vx_i16m8_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vrsub_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vrsub_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_vx_i32mf2_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vrsub_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vrsub_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_vx_i32m1_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vrsub_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vrsub_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_vx_i32m2_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vrsub_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vrsub_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_vx_i32m4_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vrsub_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vrsub_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_vx_i32m8_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vrsub_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vrsub_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub_vx_i64m1_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vrsub_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vrsub_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub_vx_i64m2_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vrsub_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vrsub_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub_vx_i64m4_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vrsub_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vrsub_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub_vx_i64m8_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vrsub_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub_vx_i64m8_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vrsub_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_vx_u8mf8_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vrsub_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_vx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vrsub_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_vx_u8mf4_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vrsub_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_vx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vrsub_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_vx_u8mf2_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vrsub_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_vx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vrsub_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_vx_u8m1_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vrsub_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_vx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vrsub_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_vx_u8m2_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vrsub_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_vx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vrsub_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_vx_u8m4_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vrsub_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_vx_u8m4_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vrsub_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_vx_u8m8_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vrsub_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_vx_u8m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vrsub_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_vx_u16mf4_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vrsub_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vrsub_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_vx_u16mf2_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vrsub_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vrsub_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_vx_u16m1_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vrsub_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vrsub_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_vx_u16m2_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vrsub_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vrsub_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_vx_u16m4_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vrsub_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vrsub_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_vx_u16m8_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vrsub_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vrsub_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_vx_u32mf2_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vrsub_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vrsub_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_vx_u32m1_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vrsub_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vrsub_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_vx_u32m2_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vrsub_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vrsub_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_vx_u32m4_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vrsub_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vrsub_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_vx_u32m8_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vrsub_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vrsub_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub_vx_u64m1_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vrsub_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vrsub_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub_vx_u64m2_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vrsub_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vrsub_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub_vx_u64m4_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vrsub_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vrsub_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub_vx_u64m8_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vrsub_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub_vx_u64m8_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vrsub_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vrsub_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_vx_i8mf8_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vrsub_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vrsub_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_vx_i8mf4_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vrsub_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vrsub_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_vx_i8mf2_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vrsub_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vrsub_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_vx_i8m1_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vrsub_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vrsub_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_vx_i8m2_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vrsub_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vrsub_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_vx_i8m4_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vrsub_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vrsub_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_vx_i8m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vrsub_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vrsub_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vrsub_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vrsub_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vrsub_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vrsub_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vrsub_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vrsub_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vrsub_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vrsub_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vrsub_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vrsub_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vrsub_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vrsub_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vrsub_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vrsub_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vrsub_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vrsub_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vrsub_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vrsub_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vrsub_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vrsub_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vrsub_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vrsub_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vrsub_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vrsub_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vrsub_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vrsub_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vrsub_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vrsub_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vrsub_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vrsub_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vrsub_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vrsub_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vrsub_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vrsub_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vrsub_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vrsub_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_vx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vrsub_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vrsub_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_vx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vrsub_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vrsub_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_vx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vrsub_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vrsub_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_vx_u8m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vrsub_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vrsub_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vrsub_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vrsub_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vrsub_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vrsub_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vrsub_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vrsub_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vrsub_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vrsub_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vrsub_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vrsub_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vrsub_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vrsub_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vrsub_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vrsub_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vrsub_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vrsub_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vrsub_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vrsub_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vrsub_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vrsub_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vrsub_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vrsub_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vrsub_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vrsub_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vrsub_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vrsub_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vrsub_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vrsub_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vrsub_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vrsub_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_vx_i8mf8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vrsub_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vrsub_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_vx_i8mf4_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vrsub_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vrsub_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_vx_i8mf2_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vrsub_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vrsub_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_vx_i8m1_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vrsub_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vrsub_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_vx_i8m2_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vrsub_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vrsub_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_vx_i8m4_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vrsub_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vrsub_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_vx_i8m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vrsub_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vrsub_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vrsub_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vrsub_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vrsub_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vrsub_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vrsub_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vrsub_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vrsub_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vrsub_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vrsub_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vrsub_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vrsub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vrsub_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vrsub_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vrsub_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vrsub_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vrsub_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vrsub_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vrsub_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vrsub_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vrsub_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vrsub_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vrsub_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vrsub_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vrsub_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vrsub_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vrsub_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vrsub_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vrsub_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vrsub_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vrsub_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vrsub_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vrsub_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vrsub_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vrsub_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vrsub_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vrsub_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vrsub_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vrsub_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vrsub_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vrsub_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vrsub_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vrsub_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vrsub_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vrsub_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vrsub_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vrsub_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vrsub_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vrsub_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vrsub_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vrsub_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vrsub_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vrsub_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vrsub_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vrsub_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vrsub_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vrsub_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vrsub_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vrsub_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vrsub_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vrsub_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vrsub_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vrsub_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vrsub_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vrsub_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vrsub_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vrsub_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vrsub_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vrsub_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vrsub_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vrsub_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vrsub_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vrsub_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vrsub_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vrsub_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_vx_i8mf8_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vrsub_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vrsub_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_vx_i8mf4_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vrsub_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vrsub_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_vx_i8mf2_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vrsub_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vrsub_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_vx_i8m1_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vrsub_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vrsub_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_vx_i8m2_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vrsub_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vrsub_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_vx_i8m4_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vrsub_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vrsub_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_vx_i8m8_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vrsub_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vrsub_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vrsub_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vrsub_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vrsub_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vrsub_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vrsub_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vrsub_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vrsub_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vrsub_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vrsub_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vrsub_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vrsub_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vrsub_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vrsub_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vrsub_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vrsub_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vrsub_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vrsub_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vrsub_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vrsub_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vrsub_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vrsub_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vrsub_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vrsub_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vrsub_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vrsub_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vrsub_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vrsub_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vrsub_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vrsub_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vrsub_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vrsub_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vrsub_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vrsub_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vrsub_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vrsub_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vrsub_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_vx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vrsub_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vrsub_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_vx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vrsub_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vrsub_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_vx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vrsub_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vrsub_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_vx_u8m8_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vrsub_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vrsub_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vrsub_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vrsub_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vrsub_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vrsub_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vrsub_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vrsub_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vrsub_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vrsub_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vrsub_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vrsub_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vrsub_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vrsub_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vrsub_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vrsub_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vrsub_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vrsub_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vrsub_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vrsub_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vrsub_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vrsub_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vrsub_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vrsub_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vrsub_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vrsub_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vrsub_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vrsub_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vrsub_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vrsub_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vrsub\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vsadd.c b/auto-generated/policy_funcs/gnu-api-tests/vsadd.c index dc11f7053..095e3e777 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vsadd.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vsadd.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vsadd_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsadd_vv_i8mf8_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vsadd_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vsadd_vv_i8mf8_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vsadd_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8mf8_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vsadd_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_vx_i8mf8_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vsadd_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsadd_vv_i8mf4_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vsadd_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vsadd_vv_i8mf4_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vsadd_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8mf4_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vsadd_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_vx_i8mf4_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vsadd_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsadd_vv_i8mf2_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vsadd_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vsadd_vv_i8mf2_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vsadd_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8mf2_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vsadd_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_vx_i8mf2_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vsadd_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m1_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vsadd_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vsadd_vv_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vsadd_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m1_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vsadd_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_vx_i8m1_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vsadd_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m2_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vsadd_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vsadd_vv_i8m2_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vsadd_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m2_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vsadd_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_vx_i8m2_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vsadd_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m4_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vsadd_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vsadd_vv_i8m4_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vsadd_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m4_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vsadd_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_vx_i8m4_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vsadd_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m8_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vsadd_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vsadd_vv_i8m8_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vsadd_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m8_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vsadd_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_vx_i8m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vsadd_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsadd_vv_i16mf4_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vsadd_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vsadd_vv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vsadd_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16mf4_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vsadd_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vsadd_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsadd_vv_i16mf2_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vsadd_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vsadd_vv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vsadd_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16mf2_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vsadd_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vsadd_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m1_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vsadd_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vsadd_vv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vsadd_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m1_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vsadd_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vsadd_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m2_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vsadd_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vsadd_vv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vsadd_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m2_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vsadd_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vsadd_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m4_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vsadd_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vsadd_vv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vsadd_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m4_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vsadd_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vsadd_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m8_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vsadd_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vsadd_vv_i16m8_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vsadd_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m8_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vsadd_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vsadd_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsadd_vv_i32mf2_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vsadd_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vsadd_vv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vsadd_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32mf2_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vsadd_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vsadd_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m1_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vsadd_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vsadd_vv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vsadd_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m1_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vsadd_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vsadd_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m2_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vsadd_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vsadd_vv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vsadd_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m2_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vsadd_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vsadd_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m4_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vsadd_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vsadd_vv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vsadd_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m4_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vsadd_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vsadd_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m8_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vsadd_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vsadd_vv_i32m8_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vsadd_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m8_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vsadd_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vsadd_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m1_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vsadd_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vsadd_vv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vsadd_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m1_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vsadd_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vsadd_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m2_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vsadd_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vsadd_vv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vsadd_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m2_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vsadd_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vsadd_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m4_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vsadd_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vsadd_vv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vsadd_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m4_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vsadd_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vsadd_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m8_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vsadd_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vsadd_vv_i64m8_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vsadd_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m8_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vsadd_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd_vx_i64m8_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vsadd_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsadd_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vsadd_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vsadd_vv_i8mf8_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vsadd_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vsadd_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_vx_i8mf8_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vsadd_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsadd_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vsadd_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vsadd_vv_i8mf4_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vsadd_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vsadd_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_vx_i8mf4_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vsadd_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsadd_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vsadd_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vsadd_vv_i8mf2_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vsadd_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vsadd_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_vx_i8mf2_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vsadd_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vsadd_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vsadd_vv_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vsadd_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vsadd_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_vx_i8m1_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vsadd_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vsadd_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vsadd_vv_i8m2_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vsadd_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vsadd_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_vx_i8m2_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vsadd_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vsadd_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vsadd_vv_i8m4_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vsadd_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vsadd_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_vx_i8m4_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vsadd_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vsadd_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vsadd_vv_i8m8_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vsadd_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vsadd_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_vx_i8m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vsadd_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsadd_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vsadd_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vsadd_vv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vsadd_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vsadd_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vsadd_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsadd_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vsadd_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vsadd_vv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vsadd_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vsadd_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vsadd_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vsadd_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vsadd_vv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vsadd_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vsadd_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vsadd_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vsadd_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vsadd_vv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vsadd_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vsadd_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vsadd_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vsadd_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vsadd_vv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vsadd_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vsadd_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vsadd_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vsadd_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vsadd_vv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vsadd_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vsadd_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vsadd_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsadd_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vsadd_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vsadd_vv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vsadd_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vsadd_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vsadd_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vsadd_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vsadd_vv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vsadd_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vsadd_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vsadd_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vsadd_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vsadd_vv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vsadd_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vsadd_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vsadd_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vsadd_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vsadd_vv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vsadd_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vsadd_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vsadd_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vsadd_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vsadd_vv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vsadd_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vsadd_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vsadd_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vsadd_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vsadd_vv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vsadd_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vsadd_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vsadd_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vsadd_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vsadd_vv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vsadd_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vsadd_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vsadd_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vsadd_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vsadd_vv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vsadd_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vsadd_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vsadd_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vsadd_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vsadd_vv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vsadd_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vsadd_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vsadd_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsadd_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vsadd_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vsadd_vv_i8mf8_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vsadd_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vsadd_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_vx_i8mf8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vsadd_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsadd_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vsadd_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vsadd_vv_i8mf4_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vsadd_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vsadd_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_vx_i8mf4_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vsadd_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsadd_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vsadd_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vsadd_vv_i8mf2_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vsadd_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vsadd_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_vx_i8mf2_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vsadd_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vsadd_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vsadd_vv_i8m1_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vsadd_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vsadd_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_vx_i8m1_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vsadd_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vsadd_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vsadd_vv_i8m2_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vsadd_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vsadd_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_vx_i8m2_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vsadd_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vsadd_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vsadd_vv_i8m4_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vsadd_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vsadd_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_vx_i8m4_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vsadd_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vsadd_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vsadd_vv_i8m8_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vsadd_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vsadd_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_vx_i8m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vsadd_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsadd_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vsadd_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vsadd_vv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vsadd_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vsadd_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vsadd_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsadd_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vsadd_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vsadd_vv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vsadd_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vsadd_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vsadd_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vsadd_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vsadd_vv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vsadd_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vsadd_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vsadd_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vsadd_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vsadd_vv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vsadd_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vsadd_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vsadd_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vsadd_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vsadd_vv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vsadd_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vsadd_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vsadd_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vsadd_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vsadd_vv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vsadd_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vsadd_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vsadd_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsadd_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vsadd_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vsadd_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vsadd_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vsadd_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vsadd_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vsadd_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vsadd_vv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vsadd_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vsadd_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vsadd_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vsadd_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vsadd_vv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vsadd_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vsadd_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vsadd_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vsadd_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vsadd_vv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vsadd_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vsadd_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vsadd_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vsadd_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vsadd_vv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vsadd_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vsadd_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vsadd_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vsadd_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vsadd_vv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vsadd_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vsadd_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vsadd_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vsadd_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vsadd_vv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vsadd_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vsadd_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vsadd_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vsadd_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vsadd_vv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vsadd_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vsadd_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vsadd_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vsadd_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vsadd_vv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vsadd_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vsadd_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vsadd_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsadd_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vsadd_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vsadd_vv_i8mf8_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vsadd_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vsadd_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_vx_i8mf8_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vsadd_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsadd_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vsadd_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vsadd_vv_i8mf4_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vsadd_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vsadd_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_vx_i8mf4_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vsadd_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsadd_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vsadd_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vsadd_vv_i8mf2_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vsadd_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vsadd_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_vx_i8mf2_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vsadd_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vsadd_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vsadd_vv_i8m1_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vsadd_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vsadd_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_vx_i8m1_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vsadd_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vsadd_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vsadd_vv_i8m2_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vsadd_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vsadd_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_vx_i8m2_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vsadd_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vsadd_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vsadd_vv_i8m4_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vsadd_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vsadd_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_vx_i8m4_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vsadd_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vsadd_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vsadd_vv_i8m8_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vsadd_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vsadd_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_vx_i8m8_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vsadd_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsadd_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vsadd_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vsadd_vv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vsadd_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vsadd_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vsadd_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsadd_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vsadd_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vsadd_vv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vsadd_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vsadd_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vsadd_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vsadd_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vsadd_vv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vsadd_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vsadd_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vsadd_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vsadd_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vsadd_vv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vsadd_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vsadd_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vsadd_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vsadd_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vsadd_vv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vsadd_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vsadd_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vsadd_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vsadd_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vsadd_vv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vsadd_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vsadd_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vsadd_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsadd_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vsadd_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vsadd_vv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vsadd_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vsadd_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vsadd_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vsadd_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vsadd_vv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vsadd_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vsadd_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vsadd_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vsadd_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vsadd_vv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vsadd_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vsadd_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vsadd_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vsadd_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vsadd_vv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vsadd_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vsadd_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vsadd_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vsadd_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vsadd_vv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vsadd_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vsadd_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vsadd_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vsadd_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vsadd_vv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vsadd_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vsadd_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vsadd_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vsadd_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vsadd_vv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vsadd_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vsadd_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vsadd_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vsadd_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vsadd_vv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vsadd_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vsadd_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vsadd_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vsadd_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vsadd_vv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vsadd_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vsadd_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsadd\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vsaddu.c b/auto-generated/policy_funcs/gnu-api-tests/vsaddu.c index 7a9383aca..3985fdecd 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vsaddu.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vsaddu.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vsaddu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8mf8_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vsaddu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u8mf8_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vsaddu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8mf8_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vsaddu_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vsaddu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8mf4_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vsaddu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u8mf4_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vsaddu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8mf4_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vsaddu_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vsaddu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8mf2_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vsaddu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u8mf2_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vsaddu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8mf2_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vsaddu_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vsaddu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m1_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vsaddu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vsaddu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m1_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vsaddu_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vsaddu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m2_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vsaddu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u8m2_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vsaddu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m2_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vsaddu_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vsaddu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m4_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vsaddu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u8m4_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vsaddu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m4_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vsaddu_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u8m4_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vsaddu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m8_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vsaddu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u8m8_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vsaddu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m8_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vsaddu_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u8m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vsaddu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16mf4_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vsaddu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vsaddu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16mf4_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vsaddu_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vsaddu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16mf2_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vsaddu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vsaddu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16mf2_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vsaddu_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vsaddu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m1_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vsaddu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vsaddu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m1_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vsaddu_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vsaddu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m2_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vsaddu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vsaddu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m2_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vsaddu_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vsaddu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m4_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vsaddu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vsaddu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m4_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vsaddu_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vsaddu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m8_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vsaddu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vsaddu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m8_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vsaddu_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vsaddu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32mf2_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vsaddu_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vsaddu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32mf2_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vsaddu_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vsaddu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m1_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vsaddu_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vsaddu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m1_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vsaddu_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vsaddu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m2_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vsaddu_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vsaddu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m2_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vsaddu_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vsaddu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m4_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vsaddu_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vsaddu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m4_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vsaddu_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vsaddu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m8_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vsaddu_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vsaddu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m8_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vsaddu_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vsaddu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m1_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vsaddu_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vsaddu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m1_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vsaddu_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vsaddu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m2_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vsaddu_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vsaddu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m2_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vsaddu_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vsaddu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m4_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vsaddu_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vsaddu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m4_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vsaddu_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vsaddu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m8_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vsaddu_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vsaddu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m8_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vsaddu_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u64m8_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vsaddu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vsaddu_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vsaddu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vsaddu_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vsaddu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vsaddu_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vsaddu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vsaddu_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vsaddu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vsaddu_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vsaddu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vsaddu_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vsaddu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vsaddu_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vsaddu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vsaddu_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vsaddu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vsaddu_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u8m2_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vsaddu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vsaddu_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vsaddu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vsaddu_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u8m4_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vsaddu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vsaddu_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vsaddu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vsaddu_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u8m8_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vsaddu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vsaddu_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u8m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vsaddu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vsaddu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vsaddu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vsaddu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vsaddu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vsaddu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vsaddu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vsaddu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vsaddu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vsaddu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vsaddu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vsaddu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vsaddu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vsaddu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vsaddu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vsaddu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vsaddu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vsaddu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vsaddu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vsaddu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vsaddu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vsaddu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vsaddu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vsaddu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vsaddu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vsaddu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vsaddu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vsaddu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vsaddu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vsaddu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vsaddu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vsaddu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vsaddu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vsaddu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vsaddu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vsaddu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vsaddu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vsaddu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vsaddu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vsaddu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vsaddu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vsaddu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vsaddu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vsaddu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vsaddu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vsaddu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vsaddu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vsaddu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vsaddu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vsaddu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vsaddu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vsaddu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vsaddu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vsaddu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vsaddu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vsaddu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vsaddu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vsaddu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vsaddu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vsaddu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vsaddu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vsaddu_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vsaddu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vsaddu_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vsaddu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vsaddu_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vsaddu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vsaddu_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vsaddu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vsaddu_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vsaddu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vsaddu_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vsaddu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vsaddu_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vsaddu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vsaddu_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vsaddu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vsaddu_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vsaddu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vsaddu_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vsaddu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vsaddu_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vsaddu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vsaddu_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vsaddu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vsaddu_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vsaddu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vsaddu_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vsaddu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vsaddu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vsaddu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vsaddu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vsaddu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vsaddu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vsaddu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vsaddu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vsaddu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vsaddu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vsaddu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vsaddu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vsaddu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vsaddu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vsaddu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vsaddu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vsaddu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vsaddu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vsaddu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vsaddu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vsaddu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vsaddu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vsaddu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vsaddu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vsaddu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vsaddu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vsaddu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vsaddu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vsaddu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vsaddu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vsaddu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vsaddu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vsaddu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vsaddu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vsaddu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vsaddu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vsaddu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vsaddu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vsaddu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vsaddu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vsaddu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vsaddu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vsaddu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vsaddu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vsaddu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vsaddu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vsaddu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vsaddu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vsaddu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vsaddu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vsaddu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vsaddu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vsaddu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vsaddu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vsaddu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vsaddu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vsaddu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vsaddu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vsaddu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vsaddu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vsaddu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vsaddu_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vsaddu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vsaddu_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vsaddu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vsaddu_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vsaddu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vsaddu_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vsaddu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vsaddu_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vsaddu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vsaddu_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vsaddu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vsaddu_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u8m1_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vsaddu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vsaddu_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vsaddu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vsaddu_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u8m2_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vsaddu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vsaddu_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vsaddu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vsaddu_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u8m4_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vsaddu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vsaddu_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vsaddu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vsaddu_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u8m8_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vsaddu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vsaddu_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u8m8_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vsaddu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vsaddu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vsaddu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vsaddu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vsaddu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vsaddu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vsaddu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vsaddu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vsaddu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vsaddu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vsaddu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vsaddu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vsaddu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vsaddu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vsaddu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vsaddu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vsaddu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vsaddu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vsaddu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vsaddu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vsaddu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vsaddu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vsaddu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vsaddu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vsaddu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vsaddu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vsaddu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vsaddu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vsaddu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vsaddu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vsaddu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vsaddu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vsaddu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vsaddu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vsaddu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vsaddu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vsaddu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vsaddu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vsaddu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vsaddu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vsaddu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vsaddu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vsaddu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vsaddu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vsaddu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vsaddu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vsaddu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vsaddu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vsaddu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vsaddu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vsaddu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vsaddu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vsaddu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vsaddu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vsaddu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vsaddu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vsaddu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vsaddu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsaddu_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vsaddu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vsaddu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsaddu\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vsbc.c b/auto-generated/policy_funcs/gnu-api-tests/vsbc.c index 2c329a316..097b78fa5 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vsbc.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vsbc.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vsbc_vvm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_i8mf8_tu(maskedoff, op1, op2, borrowin, vl); +vint8mf8_t test_vsbc_vvm_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_vvm_i8mf8_tu(vd, vs2, vs1, v0, vl); } -vint8mf8_t test_vsbc_vxm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_i8mf8_tu(maskedoff, op1, op2, borrowin, vl); +vint8mf8_t test_vsbc_vxm_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_vxm_i8mf8_tu(vd, vs2, rs1, v0, vl); } -vint8mf4_t test_vsbc_vvm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_i8mf4_tu(maskedoff, op1, op2, borrowin, vl); +vint8mf4_t test_vsbc_vvm_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_vvm_i8mf4_tu(vd, vs2, vs1, v0, vl); } -vint8mf4_t test_vsbc_vxm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_i8mf4_tu(maskedoff, op1, op2, borrowin, vl); +vint8mf4_t test_vsbc_vxm_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_vxm_i8mf4_tu(vd, vs2, rs1, v0, vl); } -vint8mf2_t test_vsbc_vvm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_i8mf2_tu(maskedoff, op1, op2, borrowin, vl); +vint8mf2_t test_vsbc_vvm_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_vvm_i8mf2_tu(vd, vs2, vs1, v0, vl); } -vint8mf2_t test_vsbc_vxm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_i8mf2_tu(maskedoff, op1, op2, borrowin, vl); +vint8mf2_t test_vsbc_vxm_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_vxm_i8mf2_tu(vd, vs2, rs1, v0, vl); } -vint8m1_t test_vsbc_vvm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_i8m1_tu(maskedoff, op1, op2, borrowin, vl); +vint8m1_t test_vsbc_vvm_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_vvm_i8m1_tu(vd, vs2, vs1, v0, vl); } -vint8m1_t test_vsbc_vxm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_i8m1_tu(maskedoff, op1, op2, borrowin, vl); +vint8m1_t test_vsbc_vxm_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_vxm_i8m1_tu(vd, vs2, rs1, v0, vl); } -vint8m2_t test_vsbc_vvm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_i8m2_tu(maskedoff, op1, op2, borrowin, vl); +vint8m2_t test_vsbc_vvm_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc_vvm_i8m2_tu(vd, vs2, vs1, v0, vl); } -vint8m2_t test_vsbc_vxm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_i8m2_tu(maskedoff, op1, op2, borrowin, vl); +vint8m2_t test_vsbc_vxm_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc_vxm_i8m2_tu(vd, vs2, rs1, v0, vl); } -vint8m4_t test_vsbc_vvm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_i8m4_tu(maskedoff, op1, op2, borrowin, vl); +vint8m4_t test_vsbc_vvm_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vsbc_vvm_i8m4_tu(vd, vs2, vs1, v0, vl); } -vint8m4_t test_vsbc_vxm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_i8m4_tu(maskedoff, op1, op2, borrowin, vl); +vint8m4_t test_vsbc_vxm_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vsbc_vxm_i8m4_tu(vd, vs2, rs1, v0, vl); } -vint8m8_t test_vsbc_vvm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, vbool1_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_i8m8_tu(maskedoff, op1, op2, borrowin, vl); +vint8m8_t test_vsbc_vvm_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, vbool1_t v0, size_t vl) { + return __riscv_vsbc_vvm_i8m8_tu(vd, vs2, vs1, v0, vl); } -vint8m8_t test_vsbc_vxm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, vbool1_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_i8m8_tu(maskedoff, op1, op2, borrowin, vl); +vint8m8_t test_vsbc_vxm_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, vbool1_t v0, size_t vl) { + return __riscv_vsbc_vxm_i8m8_tu(vd, vs2, rs1, v0, vl); } -vint16mf4_t test_vsbc_vvm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_i16mf4_tu(maskedoff, op1, op2, borrowin, vl); +vint16mf4_t test_vsbc_vvm_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_vvm_i16mf4_tu(vd, vs2, vs1, v0, vl); } -vint16mf4_t test_vsbc_vxm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_i16mf4_tu(maskedoff, op1, op2, borrowin, vl); +vint16mf4_t test_vsbc_vxm_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_vxm_i16mf4_tu(vd, vs2, rs1, v0, vl); } -vint16mf2_t test_vsbc_vvm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_i16mf2_tu(maskedoff, op1, op2, borrowin, vl); +vint16mf2_t test_vsbc_vvm_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_vvm_i16mf2_tu(vd, vs2, vs1, v0, vl); } -vint16mf2_t test_vsbc_vxm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_i16mf2_tu(maskedoff, op1, op2, borrowin, vl); +vint16mf2_t test_vsbc_vxm_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_vxm_i16mf2_tu(vd, vs2, rs1, v0, vl); } -vint16m1_t test_vsbc_vvm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_i16m1_tu(maskedoff, op1, op2, borrowin, vl); +vint16m1_t test_vsbc_vvm_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_vvm_i16m1_tu(vd, vs2, vs1, v0, vl); } -vint16m1_t test_vsbc_vxm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_i16m1_tu(maskedoff, op1, op2, borrowin, vl); +vint16m1_t test_vsbc_vxm_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_vxm_i16m1_tu(vd, vs2, rs1, v0, vl); } -vint16m2_t test_vsbc_vvm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_i16m2_tu(maskedoff, op1, op2, borrowin, vl); +vint16m2_t test_vsbc_vvm_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_vvm_i16m2_tu(vd, vs2, vs1, v0, vl); } -vint16m2_t test_vsbc_vxm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_i16m2_tu(maskedoff, op1, op2, borrowin, vl); +vint16m2_t test_vsbc_vxm_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_vxm_i16m2_tu(vd, vs2, rs1, v0, vl); } -vint16m4_t test_vsbc_vvm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_i16m4_tu(maskedoff, op1, op2, borrowin, vl); +vint16m4_t test_vsbc_vvm_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc_vvm_i16m4_tu(vd, vs2, vs1, v0, vl); } -vint16m4_t test_vsbc_vxm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_i16m4_tu(maskedoff, op1, op2, borrowin, vl); +vint16m4_t test_vsbc_vxm_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc_vxm_i16m4_tu(vd, vs2, rs1, v0, vl); } -vint16m8_t test_vsbc_vvm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_i16m8_tu(maskedoff, op1, op2, borrowin, vl); +vint16m8_t test_vsbc_vvm_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vsbc_vvm_i16m8_tu(vd, vs2, vs1, v0, vl); } -vint16m8_t test_vsbc_vxm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_i16m8_tu(maskedoff, op1, op2, borrowin, vl); +vint16m8_t test_vsbc_vxm_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vsbc_vxm_i16m8_tu(vd, vs2, rs1, v0, vl); } -vint32mf2_t test_vsbc_vvm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_i32mf2_tu(maskedoff, op1, op2, borrowin, vl); +vint32mf2_t test_vsbc_vvm_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_vvm_i32mf2_tu(vd, vs2, vs1, v0, vl); } -vint32mf2_t test_vsbc_vxm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_i32mf2_tu(maskedoff, op1, op2, borrowin, vl); +vint32mf2_t test_vsbc_vxm_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_vxm_i32mf2_tu(vd, vs2, rs1, v0, vl); } -vint32m1_t test_vsbc_vvm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_i32m1_tu(maskedoff, op1, op2, borrowin, vl); +vint32m1_t test_vsbc_vvm_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_vvm_i32m1_tu(vd, vs2, vs1, v0, vl); } -vint32m1_t test_vsbc_vxm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_i32m1_tu(maskedoff, op1, op2, borrowin, vl); +vint32m1_t test_vsbc_vxm_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_vxm_i32m1_tu(vd, vs2, rs1, v0, vl); } -vint32m2_t test_vsbc_vvm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_i32m2_tu(maskedoff, op1, op2, borrowin, vl); +vint32m2_t test_vsbc_vvm_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_vvm_i32m2_tu(vd, vs2, vs1, v0, vl); } -vint32m2_t test_vsbc_vxm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_i32m2_tu(maskedoff, op1, op2, borrowin, vl); +vint32m2_t test_vsbc_vxm_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_vxm_i32m2_tu(vd, vs2, rs1, v0, vl); } -vint32m4_t test_vsbc_vvm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_i32m4_tu(maskedoff, op1, op2, borrowin, vl); +vint32m4_t test_vsbc_vvm_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_vvm_i32m4_tu(vd, vs2, vs1, v0, vl); } -vint32m4_t test_vsbc_vxm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_i32m4_tu(maskedoff, op1, op2, borrowin, vl); +vint32m4_t test_vsbc_vxm_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_vxm_i32m4_tu(vd, vs2, rs1, v0, vl); } -vint32m8_t test_vsbc_vvm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_i32m8_tu(maskedoff, op1, op2, borrowin, vl); +vint32m8_t test_vsbc_vvm_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc_vvm_i32m8_tu(vd, vs2, vs1, v0, vl); } -vint32m8_t test_vsbc_vxm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_i32m8_tu(maskedoff, op1, op2, borrowin, vl); +vint32m8_t test_vsbc_vxm_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc_vxm_i32m8_tu(vd, vs2, rs1, v0, vl); } -vint64m1_t test_vsbc_vvm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_i64m1_tu(maskedoff, op1, op2, borrowin, vl); +vint64m1_t test_vsbc_vvm_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_vvm_i64m1_tu(vd, vs2, vs1, v0, vl); } -vint64m1_t test_vsbc_vxm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_i64m1_tu(maskedoff, op1, op2, borrowin, vl); +vint64m1_t test_vsbc_vxm_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_vxm_i64m1_tu(vd, vs2, rs1, v0, vl); } -vint64m2_t test_vsbc_vvm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_i64m2_tu(maskedoff, op1, op2, borrowin, vl); +vint64m2_t test_vsbc_vvm_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_vvm_i64m2_tu(vd, vs2, vs1, v0, vl); } -vint64m2_t test_vsbc_vxm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_i64m2_tu(maskedoff, op1, op2, borrowin, vl); +vint64m2_t test_vsbc_vxm_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_vxm_i64m2_tu(vd, vs2, rs1, v0, vl); } -vint64m4_t test_vsbc_vvm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_i64m4_tu(maskedoff, op1, op2, borrowin, vl); +vint64m4_t test_vsbc_vvm_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_vvm_i64m4_tu(vd, vs2, vs1, v0, vl); } -vint64m4_t test_vsbc_vxm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_i64m4_tu(maskedoff, op1, op2, borrowin, vl); +vint64m4_t test_vsbc_vxm_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_vxm_i64m4_tu(vd, vs2, rs1, v0, vl); } -vint64m8_t test_vsbc_vvm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_i64m8_tu(maskedoff, op1, op2, borrowin, vl); +vint64m8_t test_vsbc_vvm_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_vvm_i64m8_tu(vd, vs2, vs1, v0, vl); } -vint64m8_t test_vsbc_vxm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_i64m8_tu(maskedoff, op1, op2, borrowin, vl); +vint64m8_t test_vsbc_vxm_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_vxm_i64m8_tu(vd, vs2, rs1, v0, vl); } -vuint8mf8_t test_vsbc_vvm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_u8mf8_tu(maskedoff, op1, op2, borrowin, vl); +vuint8mf8_t test_vsbc_vvm_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_vvm_u8mf8_tu(vd, vs2, vs1, v0, vl); } -vuint8mf8_t test_vsbc_vxm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_u8mf8_tu(maskedoff, op1, op2, borrowin, vl); +vuint8mf8_t test_vsbc_vxm_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_vxm_u8mf8_tu(vd, vs2, rs1, v0, vl); } -vuint8mf4_t test_vsbc_vvm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_u8mf4_tu(maskedoff, op1, op2, borrowin, vl); +vuint8mf4_t test_vsbc_vvm_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_vvm_u8mf4_tu(vd, vs2, vs1, v0, vl); } -vuint8mf4_t test_vsbc_vxm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_u8mf4_tu(maskedoff, op1, op2, borrowin, vl); +vuint8mf4_t test_vsbc_vxm_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_vxm_u8mf4_tu(vd, vs2, rs1, v0, vl); } -vuint8mf2_t test_vsbc_vvm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_u8mf2_tu(maskedoff, op1, op2, borrowin, vl); +vuint8mf2_t test_vsbc_vvm_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_vvm_u8mf2_tu(vd, vs2, vs1, v0, vl); } -vuint8mf2_t test_vsbc_vxm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_u8mf2_tu(maskedoff, op1, op2, borrowin, vl); +vuint8mf2_t test_vsbc_vxm_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_vxm_u8mf2_tu(vd, vs2, rs1, v0, vl); } -vuint8m1_t test_vsbc_vvm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_u8m1_tu(maskedoff, op1, op2, borrowin, vl); +vuint8m1_t test_vsbc_vvm_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_vvm_u8m1_tu(vd, vs2, vs1, v0, vl); } -vuint8m1_t test_vsbc_vxm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_u8m1_tu(maskedoff, op1, op2, borrowin, vl); +vuint8m1_t test_vsbc_vxm_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_vxm_u8m1_tu(vd, vs2, rs1, v0, vl); } -vuint8m2_t test_vsbc_vvm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_u8m2_tu(maskedoff, op1, op2, borrowin, vl); +vuint8m2_t test_vsbc_vvm_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc_vvm_u8m2_tu(vd, vs2, vs1, v0, vl); } -vuint8m2_t test_vsbc_vxm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_u8m2_tu(maskedoff, op1, op2, borrowin, vl); +vuint8m2_t test_vsbc_vxm_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc_vxm_u8m2_tu(vd, vs2, rs1, v0, vl); } -vuint8m4_t test_vsbc_vvm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_u8m4_tu(maskedoff, op1, op2, borrowin, vl); +vuint8m4_t test_vsbc_vvm_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vsbc_vvm_u8m4_tu(vd, vs2, vs1, v0, vl); } -vuint8m4_t test_vsbc_vxm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_u8m4_tu(maskedoff, op1, op2, borrowin, vl); +vuint8m4_t test_vsbc_vxm_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vsbc_vxm_u8m4_tu(vd, vs2, rs1, v0, vl); } -vuint8m8_t test_vsbc_vvm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, vbool1_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_u8m8_tu(maskedoff, op1, op2, borrowin, vl); +vuint8m8_t test_vsbc_vvm_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, vbool1_t v0, size_t vl) { + return __riscv_vsbc_vvm_u8m8_tu(vd, vs2, vs1, v0, vl); } -vuint8m8_t test_vsbc_vxm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, vbool1_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_u8m8_tu(maskedoff, op1, op2, borrowin, vl); +vuint8m8_t test_vsbc_vxm_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, vbool1_t v0, size_t vl) { + return __riscv_vsbc_vxm_u8m8_tu(vd, vs2, rs1, v0, vl); } -vuint16mf4_t test_vsbc_vvm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_u16mf4_tu(maskedoff, op1, op2, borrowin, vl); +vuint16mf4_t test_vsbc_vvm_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_vvm_u16mf4_tu(vd, vs2, vs1, v0, vl); } -vuint16mf4_t test_vsbc_vxm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_u16mf4_tu(maskedoff, op1, op2, borrowin, vl); +vuint16mf4_t test_vsbc_vxm_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_vxm_u16mf4_tu(vd, vs2, rs1, v0, vl); } -vuint16mf2_t test_vsbc_vvm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_u16mf2_tu(maskedoff, op1, op2, borrowin, vl); +vuint16mf2_t test_vsbc_vvm_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_vvm_u16mf2_tu(vd, vs2, vs1, v0, vl); } -vuint16mf2_t test_vsbc_vxm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_u16mf2_tu(maskedoff, op1, op2, borrowin, vl); +vuint16mf2_t test_vsbc_vxm_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_vxm_u16mf2_tu(vd, vs2, rs1, v0, vl); } -vuint16m1_t test_vsbc_vvm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_u16m1_tu(maskedoff, op1, op2, borrowin, vl); +vuint16m1_t test_vsbc_vvm_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_vvm_u16m1_tu(vd, vs2, vs1, v0, vl); } -vuint16m1_t test_vsbc_vxm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_u16m1_tu(maskedoff, op1, op2, borrowin, vl); +vuint16m1_t test_vsbc_vxm_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_vxm_u16m1_tu(vd, vs2, rs1, v0, vl); } -vuint16m2_t test_vsbc_vvm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_u16m2_tu(maskedoff, op1, op2, borrowin, vl); +vuint16m2_t test_vsbc_vvm_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_vvm_u16m2_tu(vd, vs2, vs1, v0, vl); } -vuint16m2_t test_vsbc_vxm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_u16m2_tu(maskedoff, op1, op2, borrowin, vl); +vuint16m2_t test_vsbc_vxm_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_vxm_u16m2_tu(vd, vs2, rs1, v0, vl); } -vuint16m4_t test_vsbc_vvm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_u16m4_tu(maskedoff, op1, op2, borrowin, vl); +vuint16m4_t test_vsbc_vvm_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc_vvm_u16m4_tu(vd, vs2, vs1, v0, vl); } -vuint16m4_t test_vsbc_vxm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_u16m4_tu(maskedoff, op1, op2, borrowin, vl); +vuint16m4_t test_vsbc_vxm_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc_vxm_u16m4_tu(vd, vs2, rs1, v0, vl); } -vuint16m8_t test_vsbc_vvm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_u16m8_tu(maskedoff, op1, op2, borrowin, vl); +vuint16m8_t test_vsbc_vvm_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vsbc_vvm_u16m8_tu(vd, vs2, vs1, v0, vl); } -vuint16m8_t test_vsbc_vxm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_u16m8_tu(maskedoff, op1, op2, borrowin, vl); +vuint16m8_t test_vsbc_vxm_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vsbc_vxm_u16m8_tu(vd, vs2, rs1, v0, vl); } -vuint32mf2_t test_vsbc_vvm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_u32mf2_tu(maskedoff, op1, op2, borrowin, vl); +vuint32mf2_t test_vsbc_vvm_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_vvm_u32mf2_tu(vd, vs2, vs1, v0, vl); } -vuint32mf2_t test_vsbc_vxm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_u32mf2_tu(maskedoff, op1, op2, borrowin, vl); +vuint32mf2_t test_vsbc_vxm_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_vxm_u32mf2_tu(vd, vs2, rs1, v0, vl); } -vuint32m1_t test_vsbc_vvm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_u32m1_tu(maskedoff, op1, op2, borrowin, vl); +vuint32m1_t test_vsbc_vvm_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_vvm_u32m1_tu(vd, vs2, vs1, v0, vl); } -vuint32m1_t test_vsbc_vxm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_u32m1_tu(maskedoff, op1, op2, borrowin, vl); +vuint32m1_t test_vsbc_vxm_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_vxm_u32m1_tu(vd, vs2, rs1, v0, vl); } -vuint32m2_t test_vsbc_vvm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_u32m2_tu(maskedoff, op1, op2, borrowin, vl); +vuint32m2_t test_vsbc_vvm_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_vvm_u32m2_tu(vd, vs2, vs1, v0, vl); } -vuint32m2_t test_vsbc_vxm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_u32m2_tu(maskedoff, op1, op2, borrowin, vl); +vuint32m2_t test_vsbc_vxm_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_vxm_u32m2_tu(vd, vs2, rs1, v0, vl); } -vuint32m4_t test_vsbc_vvm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_u32m4_tu(maskedoff, op1, op2, borrowin, vl); +vuint32m4_t test_vsbc_vvm_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_vvm_u32m4_tu(vd, vs2, vs1, v0, vl); } -vuint32m4_t test_vsbc_vxm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_u32m4_tu(maskedoff, op1, op2, borrowin, vl); +vuint32m4_t test_vsbc_vxm_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_vxm_u32m4_tu(vd, vs2, rs1, v0, vl); } -vuint32m8_t test_vsbc_vvm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_u32m8_tu(maskedoff, op1, op2, borrowin, vl); +vuint32m8_t test_vsbc_vvm_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc_vvm_u32m8_tu(vd, vs2, vs1, v0, vl); } -vuint32m8_t test_vsbc_vxm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_u32m8_tu(maskedoff, op1, op2, borrowin, vl); +vuint32m8_t test_vsbc_vxm_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc_vxm_u32m8_tu(vd, vs2, rs1, v0, vl); } -vuint64m1_t test_vsbc_vvm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_u64m1_tu(maskedoff, op1, op2, borrowin, vl); +vuint64m1_t test_vsbc_vvm_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_vvm_u64m1_tu(vd, vs2, vs1, v0, vl); } -vuint64m1_t test_vsbc_vxm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_u64m1_tu(maskedoff, op1, op2, borrowin, vl); +vuint64m1_t test_vsbc_vxm_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_vxm_u64m1_tu(vd, vs2, rs1, v0, vl); } -vuint64m2_t test_vsbc_vvm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_u64m2_tu(maskedoff, op1, op2, borrowin, vl); +vuint64m2_t test_vsbc_vvm_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_vvm_u64m2_tu(vd, vs2, vs1, v0, vl); } -vuint64m2_t test_vsbc_vxm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_u64m2_tu(maskedoff, op1, op2, borrowin, vl); +vuint64m2_t test_vsbc_vxm_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_vxm_u64m2_tu(vd, vs2, rs1, v0, vl); } -vuint64m4_t test_vsbc_vvm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_u64m4_tu(maskedoff, op1, op2, borrowin, vl); +vuint64m4_t test_vsbc_vvm_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_vvm_u64m4_tu(vd, vs2, vs1, v0, vl); } -vuint64m4_t test_vsbc_vxm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_u64m4_tu(maskedoff, op1, op2, borrowin, vl); +vuint64m4_t test_vsbc_vxm_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_vxm_u64m4_tu(vd, vs2, rs1, v0, vl); } -vuint64m8_t test_vsbc_vvm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_vvm_u64m8_tu(maskedoff, op1, op2, borrowin, vl); +vuint64m8_t test_vsbc_vvm_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_vvm_u64m8_tu(vd, vs2, vs1, v0, vl); } -vuint64m8_t test_vsbc_vxm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_vxm_u64m8_tu(maskedoff, op1, op2, borrowin, vl); +vuint64m8_t test_vsbc_vxm_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_vxm_u64m8_tu(vd, vs2, rs1, v0, vl); } /* { dg-final { scan-assembler-times {vsbc\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vsext_vf2.c b/auto-generated/policy_funcs/gnu-api-tests/vsext_vf2.c index 095c0a883..91330cf5a 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vsext_vf2.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vsext_vf2.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint16mf4_t test_vsext_vf2_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf2_i16mf4_tu(maskedoff, op1, vl); +vint16mf4_t test_vsext_vf2_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs2, size_t vl) { + return __riscv_vsext_vf2_i16mf4_tu(vd, vs2, vl); } -vint16mf2_t test_vsext_vf2_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf2_i16mf2_tu(maskedoff, op1, vl); +vint16mf2_t test_vsext_vf2_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs2, size_t vl) { + return __riscv_vsext_vf2_i16mf2_tu(vd, vs2, vl); } -vint16m1_t test_vsext_vf2_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf2_i16m1_tu(maskedoff, op1, vl); +vint16m1_t test_vsext_vf2_i16m1_tu(vint16m1_t vd, vint8mf2_t vs2, size_t vl) { + return __riscv_vsext_vf2_i16m1_tu(vd, vs2, vl); } -vint16m2_t test_vsext_vf2_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf2_i16m2_tu(maskedoff, op1, vl); +vint16m2_t test_vsext_vf2_i16m2_tu(vint16m2_t vd, vint8m1_t vs2, size_t vl) { + return __riscv_vsext_vf2_i16m2_tu(vd, vs2, vl); } -vint16m4_t test_vsext_vf2_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, size_t vl) { - return __riscv_vsext_vf2_i16m4_tu(maskedoff, op1, vl); +vint16m4_t test_vsext_vf2_i16m4_tu(vint16m4_t vd, vint8m2_t vs2, size_t vl) { + return __riscv_vsext_vf2_i16m4_tu(vd, vs2, vl); } -vint16m8_t test_vsext_vf2_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, size_t vl) { - return __riscv_vsext_vf2_i16m8_tu(maskedoff, op1, vl); +vint16m8_t test_vsext_vf2_i16m8_tu(vint16m8_t vd, vint8m4_t vs2, size_t vl) { + return __riscv_vsext_vf2_i16m8_tu(vd, vs2, vl); } -vint32mf2_t test_vsext_vf2_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, size_t vl) { - return __riscv_vsext_vf2_i32mf2_tu(maskedoff, op1, vl); +vint32mf2_t test_vsext_vf2_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vsext_vf2_i32mf2_tu(vd, vs2, vl); } -vint32m1_t test_vsext_vf2_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, size_t vl) { - return __riscv_vsext_vf2_i32m1_tu(maskedoff, op1, vl); +vint32m1_t test_vsext_vf2_i32m1_tu(vint32m1_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vsext_vf2_i32m1_tu(vd, vs2, vl); } -vint32m2_t test_vsext_vf2_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, size_t vl) { - return __riscv_vsext_vf2_i32m2_tu(maskedoff, op1, vl); +vint32m2_t test_vsext_vf2_i32m2_tu(vint32m2_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vsext_vf2_i32m2_tu(vd, vs2, vl); } -vint32m4_t test_vsext_vf2_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, size_t vl) { - return __riscv_vsext_vf2_i32m4_tu(maskedoff, op1, vl); +vint32m4_t test_vsext_vf2_i32m4_tu(vint32m4_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vsext_vf2_i32m4_tu(vd, vs2, vl); } -vint32m8_t test_vsext_vf2_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, size_t vl) { - return __riscv_vsext_vf2_i32m8_tu(maskedoff, op1, vl); +vint32m8_t test_vsext_vf2_i32m8_tu(vint32m8_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vsext_vf2_i32m8_tu(vd, vs2, vl); } -vint64m1_t test_vsext_vf2_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, size_t vl) { - return __riscv_vsext_vf2_i64m1_tu(maskedoff, op1, vl); +vint64m1_t test_vsext_vf2_i64m1_tu(vint64m1_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vsext_vf2_i64m1_tu(vd, vs2, vl); } -vint64m2_t test_vsext_vf2_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, size_t vl) { - return __riscv_vsext_vf2_i64m2_tu(maskedoff, op1, vl); +vint64m2_t test_vsext_vf2_i64m2_tu(vint64m2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vsext_vf2_i64m2_tu(vd, vs2, vl); } -vint64m4_t test_vsext_vf2_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, size_t vl) { - return __riscv_vsext_vf2_i64m4_tu(maskedoff, op1, vl); +vint64m4_t test_vsext_vf2_i64m4_tu(vint64m4_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vsext_vf2_i64m4_tu(vd, vs2, vl); } -vint64m8_t test_vsext_vf2_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, size_t vl) { - return __riscv_vsext_vf2_i64m8_tu(maskedoff, op1, vl); +vint64m8_t test_vsext_vf2_i64m8_tu(vint64m8_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vsext_vf2_i64m8_tu(vd, vs2, vl); } -vint16mf4_t test_vsext_vf2_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf2_i16mf4_tum(mask, maskedoff, op1, vl); +vint16mf4_t test_vsext_vf2_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, size_t vl) { + return __riscv_vsext_vf2_i16mf4_tum(vm, vd, vs2, vl); } -vint16mf2_t test_vsext_vf2_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf2_i16mf2_tum(mask, maskedoff, op1, vl); +vint16mf2_t test_vsext_vf2_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, size_t vl) { + return __riscv_vsext_vf2_i16mf2_tum(vm, vd, vs2, vl); } -vint16m1_t test_vsext_vf2_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf2_i16m1_tum(mask, maskedoff, op1, vl); +vint16m1_t test_vsext_vf2_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, size_t vl) { + return __riscv_vsext_vf2_i16m1_tum(vm, vd, vs2, vl); } -vint16m2_t test_vsext_vf2_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf2_i16m2_tum(mask, maskedoff, op1, vl); +vint16m2_t test_vsext_vf2_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, size_t vl) { + return __riscv_vsext_vf2_i16m2_tum(vm, vd, vs2, vl); } -vint16m4_t test_vsext_vf2_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, size_t vl) { - return __riscv_vsext_vf2_i16m4_tum(mask, maskedoff, op1, vl); +vint16m4_t test_vsext_vf2_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, size_t vl) { + return __riscv_vsext_vf2_i16m4_tum(vm, vd, vs2, vl); } -vint16m8_t test_vsext_vf2_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, size_t vl) { - return __riscv_vsext_vf2_i16m8_tum(mask, maskedoff, op1, vl); +vint16m8_t test_vsext_vf2_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, size_t vl) { + return __riscv_vsext_vf2_i16m8_tum(vm, vd, vs2, vl); } -vint32mf2_t test_vsext_vf2_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, size_t vl) { - return __riscv_vsext_vf2_i32mf2_tum(mask, maskedoff, op1, vl); +vint32mf2_t test_vsext_vf2_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vsext_vf2_i32mf2_tum(vm, vd, vs2, vl); } -vint32m1_t test_vsext_vf2_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, size_t vl) { - return __riscv_vsext_vf2_i32m1_tum(mask, maskedoff, op1, vl); +vint32m1_t test_vsext_vf2_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vsext_vf2_i32m1_tum(vm, vd, vs2, vl); } -vint32m2_t test_vsext_vf2_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, size_t vl) { - return __riscv_vsext_vf2_i32m2_tum(mask, maskedoff, op1, vl); +vint32m2_t test_vsext_vf2_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vsext_vf2_i32m2_tum(vm, vd, vs2, vl); } -vint32m4_t test_vsext_vf2_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, size_t vl) { - return __riscv_vsext_vf2_i32m4_tum(mask, maskedoff, op1, vl); +vint32m4_t test_vsext_vf2_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vsext_vf2_i32m4_tum(vm, vd, vs2, vl); } -vint32m8_t test_vsext_vf2_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, size_t vl) { - return __riscv_vsext_vf2_i32m8_tum(mask, maskedoff, op1, vl); +vint32m8_t test_vsext_vf2_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vsext_vf2_i32m8_tum(vm, vd, vs2, vl); } -vint64m1_t test_vsext_vf2_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, size_t vl) { - return __riscv_vsext_vf2_i64m1_tum(mask, maskedoff, op1, vl); +vint64m1_t test_vsext_vf2_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vsext_vf2_i64m1_tum(vm, vd, vs2, vl); } -vint64m2_t test_vsext_vf2_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, size_t vl) { - return __riscv_vsext_vf2_i64m2_tum(mask, maskedoff, op1, vl); +vint64m2_t test_vsext_vf2_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vsext_vf2_i64m2_tum(vm, vd, vs2, vl); } -vint64m4_t test_vsext_vf2_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, size_t vl) { - return __riscv_vsext_vf2_i64m4_tum(mask, maskedoff, op1, vl); +vint64m4_t test_vsext_vf2_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vsext_vf2_i64m4_tum(vm, vd, vs2, vl); } -vint64m8_t test_vsext_vf2_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, size_t vl) { - return __riscv_vsext_vf2_i64m8_tum(mask, maskedoff, op1, vl); +vint64m8_t test_vsext_vf2_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vsext_vf2_i64m8_tum(vm, vd, vs2, vl); } -vint16mf4_t test_vsext_vf2_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf2_i16mf4_tumu(mask, maskedoff, op1, vl); +vint16mf4_t test_vsext_vf2_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, size_t vl) { + return __riscv_vsext_vf2_i16mf4_tumu(vm, vd, vs2, vl); } -vint16mf2_t test_vsext_vf2_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf2_i16mf2_tumu(mask, maskedoff, op1, vl); +vint16mf2_t test_vsext_vf2_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, size_t vl) { + return __riscv_vsext_vf2_i16mf2_tumu(vm, vd, vs2, vl); } -vint16m1_t test_vsext_vf2_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf2_i16m1_tumu(mask, maskedoff, op1, vl); +vint16m1_t test_vsext_vf2_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, size_t vl) { + return __riscv_vsext_vf2_i16m1_tumu(vm, vd, vs2, vl); } -vint16m2_t test_vsext_vf2_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf2_i16m2_tumu(mask, maskedoff, op1, vl); +vint16m2_t test_vsext_vf2_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, size_t vl) { + return __riscv_vsext_vf2_i16m2_tumu(vm, vd, vs2, vl); } -vint16m4_t test_vsext_vf2_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, size_t vl) { - return __riscv_vsext_vf2_i16m4_tumu(mask, maskedoff, op1, vl); +vint16m4_t test_vsext_vf2_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, size_t vl) { + return __riscv_vsext_vf2_i16m4_tumu(vm, vd, vs2, vl); } -vint16m8_t test_vsext_vf2_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, size_t vl) { - return __riscv_vsext_vf2_i16m8_tumu(mask, maskedoff, op1, vl); +vint16m8_t test_vsext_vf2_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, size_t vl) { + return __riscv_vsext_vf2_i16m8_tumu(vm, vd, vs2, vl); } -vint32mf2_t test_vsext_vf2_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, size_t vl) { - return __riscv_vsext_vf2_i32mf2_tumu(mask, maskedoff, op1, vl); +vint32mf2_t test_vsext_vf2_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vsext_vf2_i32mf2_tumu(vm, vd, vs2, vl); } -vint32m1_t test_vsext_vf2_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, size_t vl) { - return __riscv_vsext_vf2_i32m1_tumu(mask, maskedoff, op1, vl); +vint32m1_t test_vsext_vf2_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vsext_vf2_i32m1_tumu(vm, vd, vs2, vl); } -vint32m2_t test_vsext_vf2_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, size_t vl) { - return __riscv_vsext_vf2_i32m2_tumu(mask, maskedoff, op1, vl); +vint32m2_t test_vsext_vf2_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vsext_vf2_i32m2_tumu(vm, vd, vs2, vl); } -vint32m4_t test_vsext_vf2_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, size_t vl) { - return __riscv_vsext_vf2_i32m4_tumu(mask, maskedoff, op1, vl); +vint32m4_t test_vsext_vf2_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vsext_vf2_i32m4_tumu(vm, vd, vs2, vl); } -vint32m8_t test_vsext_vf2_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, size_t vl) { - return __riscv_vsext_vf2_i32m8_tumu(mask, maskedoff, op1, vl); +vint32m8_t test_vsext_vf2_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vsext_vf2_i32m8_tumu(vm, vd, vs2, vl); } -vint64m1_t test_vsext_vf2_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, size_t vl) { - return __riscv_vsext_vf2_i64m1_tumu(mask, maskedoff, op1, vl); +vint64m1_t test_vsext_vf2_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vsext_vf2_i64m1_tumu(vm, vd, vs2, vl); } -vint64m2_t test_vsext_vf2_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, size_t vl) { - return __riscv_vsext_vf2_i64m2_tumu(mask, maskedoff, op1, vl); +vint64m2_t test_vsext_vf2_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vsext_vf2_i64m2_tumu(vm, vd, vs2, vl); } -vint64m4_t test_vsext_vf2_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, size_t vl) { - return __riscv_vsext_vf2_i64m4_tumu(mask, maskedoff, op1, vl); +vint64m4_t test_vsext_vf2_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vsext_vf2_i64m4_tumu(vm, vd, vs2, vl); } -vint64m8_t test_vsext_vf2_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, size_t vl) { - return __riscv_vsext_vf2_i64m8_tumu(mask, maskedoff, op1, vl); +vint64m8_t test_vsext_vf2_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vsext_vf2_i64m8_tumu(vm, vd, vs2, vl); } -vint16mf4_t test_vsext_vf2_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf2_i16mf4_mu(mask, maskedoff, op1, vl); +vint16mf4_t test_vsext_vf2_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, size_t vl) { + return __riscv_vsext_vf2_i16mf4_mu(vm, vd, vs2, vl); } -vint16mf2_t test_vsext_vf2_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf2_i16mf2_mu(mask, maskedoff, op1, vl); +vint16mf2_t test_vsext_vf2_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, size_t vl) { + return __riscv_vsext_vf2_i16mf2_mu(vm, vd, vs2, vl); } -vint16m1_t test_vsext_vf2_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf2_i16m1_mu(mask, maskedoff, op1, vl); +vint16m1_t test_vsext_vf2_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, size_t vl) { + return __riscv_vsext_vf2_i16m1_mu(vm, vd, vs2, vl); } -vint16m2_t test_vsext_vf2_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf2_i16m2_mu(mask, maskedoff, op1, vl); +vint16m2_t test_vsext_vf2_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, size_t vl) { + return __riscv_vsext_vf2_i16m2_mu(vm, vd, vs2, vl); } -vint16m4_t test_vsext_vf2_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, size_t vl) { - return __riscv_vsext_vf2_i16m4_mu(mask, maskedoff, op1, vl); +vint16m4_t test_vsext_vf2_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, size_t vl) { + return __riscv_vsext_vf2_i16m4_mu(vm, vd, vs2, vl); } -vint16m8_t test_vsext_vf2_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, size_t vl) { - return __riscv_vsext_vf2_i16m8_mu(mask, maskedoff, op1, vl); +vint16m8_t test_vsext_vf2_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, size_t vl) { + return __riscv_vsext_vf2_i16m8_mu(vm, vd, vs2, vl); } -vint32mf2_t test_vsext_vf2_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, size_t vl) { - return __riscv_vsext_vf2_i32mf2_mu(mask, maskedoff, op1, vl); +vint32mf2_t test_vsext_vf2_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vsext_vf2_i32mf2_mu(vm, vd, vs2, vl); } -vint32m1_t test_vsext_vf2_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, size_t vl) { - return __riscv_vsext_vf2_i32m1_mu(mask, maskedoff, op1, vl); +vint32m1_t test_vsext_vf2_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vsext_vf2_i32m1_mu(vm, vd, vs2, vl); } -vint32m2_t test_vsext_vf2_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, size_t vl) { - return __riscv_vsext_vf2_i32m2_mu(mask, maskedoff, op1, vl); +vint32m2_t test_vsext_vf2_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vsext_vf2_i32m2_mu(vm, vd, vs2, vl); } -vint32m4_t test_vsext_vf2_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, size_t vl) { - return __riscv_vsext_vf2_i32m4_mu(mask, maskedoff, op1, vl); +vint32m4_t test_vsext_vf2_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vsext_vf2_i32m4_mu(vm, vd, vs2, vl); } -vint32m8_t test_vsext_vf2_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, size_t vl) { - return __riscv_vsext_vf2_i32m8_mu(mask, maskedoff, op1, vl); +vint32m8_t test_vsext_vf2_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vsext_vf2_i32m8_mu(vm, vd, vs2, vl); } -vint64m1_t test_vsext_vf2_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, size_t vl) { - return __riscv_vsext_vf2_i64m1_mu(mask, maskedoff, op1, vl); +vint64m1_t test_vsext_vf2_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vsext_vf2_i64m1_mu(vm, vd, vs2, vl); } -vint64m2_t test_vsext_vf2_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, size_t vl) { - return __riscv_vsext_vf2_i64m2_mu(mask, maskedoff, op1, vl); +vint64m2_t test_vsext_vf2_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vsext_vf2_i64m2_mu(vm, vd, vs2, vl); } -vint64m4_t test_vsext_vf2_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, size_t vl) { - return __riscv_vsext_vf2_i64m4_mu(mask, maskedoff, op1, vl); +vint64m4_t test_vsext_vf2_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vsext_vf2_i64m4_mu(vm, vd, vs2, vl); } -vint64m8_t test_vsext_vf2_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, size_t vl) { - return __riscv_vsext_vf2_i64m8_mu(mask, maskedoff, op1, vl); +vint64m8_t test_vsext_vf2_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vsext_vf2_i64m8_mu(vm, vd, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsext\.vf2[ivxfswum.]*\s+} 60 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vsext_vf4.c b/auto-generated/policy_funcs/gnu-api-tests/vsext_vf4.c index 781ce8544..3546f4611 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vsext_vf4.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vsext_vf4.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint32mf2_t test_vsext_vf4_i32mf2_tu(vint32mf2_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf4_i32mf2_tu(maskedoff, op1, vl); +vint32mf2_t test_vsext_vf4_i32mf2_tu(vint32mf2_t vd, vint8mf8_t vs2, size_t vl) { + return __riscv_vsext_vf4_i32mf2_tu(vd, vs2, vl); } -vint32m1_t test_vsext_vf4_i32m1_tu(vint32m1_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m1_tu(maskedoff, op1, vl); +vint32m1_t test_vsext_vf4_i32m1_tu(vint32m1_t vd, vint8mf4_t vs2, size_t vl) { + return __riscv_vsext_vf4_i32m1_tu(vd, vs2, vl); } -vint32m2_t test_vsext_vf4_i32m2_tu(vint32m2_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m2_tu(maskedoff, op1, vl); +vint32m2_t test_vsext_vf4_i32m2_tu(vint32m2_t vd, vint8mf2_t vs2, size_t vl) { + return __riscv_vsext_vf4_i32m2_tu(vd, vs2, vl); } -vint32m4_t test_vsext_vf4_i32m4_tu(vint32m4_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m4_tu(maskedoff, op1, vl); +vint32m4_t test_vsext_vf4_i32m4_tu(vint32m4_t vd, vint8m1_t vs2, size_t vl) { + return __riscv_vsext_vf4_i32m4_tu(vd, vs2, vl); } -vint32m8_t test_vsext_vf4_i32m8_tu(vint32m8_t maskedoff, vint8m2_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m8_tu(maskedoff, op1, vl); +vint32m8_t test_vsext_vf4_i32m8_tu(vint32m8_t vd, vint8m2_t vs2, size_t vl) { + return __riscv_vsext_vf4_i32m8_tu(vd, vs2, vl); } -vint64m1_t test_vsext_vf4_i64m1_tu(vint64m1_t maskedoff, vint16mf4_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m1_tu(maskedoff, op1, vl); +vint64m1_t test_vsext_vf4_i64m1_tu(vint64m1_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vsext_vf4_i64m1_tu(vd, vs2, vl); } -vint64m2_t test_vsext_vf4_i64m2_tu(vint64m2_t maskedoff, vint16mf2_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m2_tu(maskedoff, op1, vl); +vint64m2_t test_vsext_vf4_i64m2_tu(vint64m2_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vsext_vf4_i64m2_tu(vd, vs2, vl); } -vint64m4_t test_vsext_vf4_i64m4_tu(vint64m4_t maskedoff, vint16m1_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m4_tu(maskedoff, op1, vl); +vint64m4_t test_vsext_vf4_i64m4_tu(vint64m4_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vsext_vf4_i64m4_tu(vd, vs2, vl); } -vint64m8_t test_vsext_vf4_i64m8_tu(vint64m8_t maskedoff, vint16m2_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m8_tu(maskedoff, op1, vl); +vint64m8_t test_vsext_vf4_i64m8_tu(vint64m8_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vsext_vf4_i64m8_tu(vd, vs2, vl); } -vint32mf2_t test_vsext_vf4_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf4_i32mf2_tum(mask, maskedoff, op1, vl); +vint32mf2_t test_vsext_vf4_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint8mf8_t vs2, size_t vl) { + return __riscv_vsext_vf4_i32mf2_tum(vm, vd, vs2, vl); } -vint32m1_t test_vsext_vf4_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m1_tum(mask, maskedoff, op1, vl); +vint32m1_t test_vsext_vf4_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint8mf4_t vs2, size_t vl) { + return __riscv_vsext_vf4_i32m1_tum(vm, vd, vs2, vl); } -vint32m2_t test_vsext_vf4_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m2_tum(mask, maskedoff, op1, vl); +vint32m2_t test_vsext_vf4_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint8mf2_t vs2, size_t vl) { + return __riscv_vsext_vf4_i32m2_tum(vm, vd, vs2, vl); } -vint32m4_t test_vsext_vf4_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m4_tum(mask, maskedoff, op1, vl); +vint32m4_t test_vsext_vf4_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint8m1_t vs2, size_t vl) { + return __riscv_vsext_vf4_i32m4_tum(vm, vd, vs2, vl); } -vint32m8_t test_vsext_vf4_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint8m2_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m8_tum(mask, maskedoff, op1, vl); +vint32m8_t test_vsext_vf4_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint8m2_t vs2, size_t vl) { + return __riscv_vsext_vf4_i32m8_tum(vm, vd, vs2, vl); } -vint64m1_t test_vsext_vf4_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint16mf4_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m1_tum(mask, maskedoff, op1, vl); +vint64m1_t test_vsext_vf4_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vsext_vf4_i64m1_tum(vm, vd, vs2, vl); } -vint64m2_t test_vsext_vf4_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint16mf2_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m2_tum(mask, maskedoff, op1, vl); +vint64m2_t test_vsext_vf4_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vsext_vf4_i64m2_tum(vm, vd, vs2, vl); } -vint64m4_t test_vsext_vf4_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint16m1_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m4_tum(mask, maskedoff, op1, vl); +vint64m4_t test_vsext_vf4_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vsext_vf4_i64m4_tum(vm, vd, vs2, vl); } -vint64m8_t test_vsext_vf4_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint16m2_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m8_tum(mask, maskedoff, op1, vl); +vint64m8_t test_vsext_vf4_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vsext_vf4_i64m8_tum(vm, vd, vs2, vl); } -vint32mf2_t test_vsext_vf4_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf4_i32mf2_tumu(mask, maskedoff, op1, vl); +vint32mf2_t test_vsext_vf4_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint8mf8_t vs2, size_t vl) { + return __riscv_vsext_vf4_i32mf2_tumu(vm, vd, vs2, vl); } -vint32m1_t test_vsext_vf4_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m1_tumu(mask, maskedoff, op1, vl); +vint32m1_t test_vsext_vf4_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint8mf4_t vs2, size_t vl) { + return __riscv_vsext_vf4_i32m1_tumu(vm, vd, vs2, vl); } -vint32m2_t test_vsext_vf4_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m2_tumu(mask, maskedoff, op1, vl); +vint32m2_t test_vsext_vf4_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint8mf2_t vs2, size_t vl) { + return __riscv_vsext_vf4_i32m2_tumu(vm, vd, vs2, vl); } -vint32m4_t test_vsext_vf4_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m4_tumu(mask, maskedoff, op1, vl); +vint32m4_t test_vsext_vf4_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint8m1_t vs2, size_t vl) { + return __riscv_vsext_vf4_i32m4_tumu(vm, vd, vs2, vl); } -vint32m8_t test_vsext_vf4_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint8m2_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m8_tumu(mask, maskedoff, op1, vl); +vint32m8_t test_vsext_vf4_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint8m2_t vs2, size_t vl) { + return __riscv_vsext_vf4_i32m8_tumu(vm, vd, vs2, vl); } -vint64m1_t test_vsext_vf4_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint16mf4_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m1_tumu(mask, maskedoff, op1, vl); +vint64m1_t test_vsext_vf4_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vsext_vf4_i64m1_tumu(vm, vd, vs2, vl); } -vint64m2_t test_vsext_vf4_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint16mf2_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m2_tumu(mask, maskedoff, op1, vl); +vint64m2_t test_vsext_vf4_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vsext_vf4_i64m2_tumu(vm, vd, vs2, vl); } -vint64m4_t test_vsext_vf4_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint16m1_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m4_tumu(mask, maskedoff, op1, vl); +vint64m4_t test_vsext_vf4_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vsext_vf4_i64m4_tumu(vm, vd, vs2, vl); } -vint64m8_t test_vsext_vf4_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint16m2_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m8_tumu(mask, maskedoff, op1, vl); +vint64m8_t test_vsext_vf4_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vsext_vf4_i64m8_tumu(vm, vd, vs2, vl); } -vint32mf2_t test_vsext_vf4_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf4_i32mf2_mu(mask, maskedoff, op1, vl); +vint32mf2_t test_vsext_vf4_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint8mf8_t vs2, size_t vl) { + return __riscv_vsext_vf4_i32mf2_mu(vm, vd, vs2, vl); } -vint32m1_t test_vsext_vf4_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m1_mu(mask, maskedoff, op1, vl); +vint32m1_t test_vsext_vf4_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint8mf4_t vs2, size_t vl) { + return __riscv_vsext_vf4_i32m1_mu(vm, vd, vs2, vl); } -vint32m2_t test_vsext_vf4_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m2_mu(mask, maskedoff, op1, vl); +vint32m2_t test_vsext_vf4_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint8mf2_t vs2, size_t vl) { + return __riscv_vsext_vf4_i32m2_mu(vm, vd, vs2, vl); } -vint32m4_t test_vsext_vf4_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m4_mu(mask, maskedoff, op1, vl); +vint32m4_t test_vsext_vf4_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint8m1_t vs2, size_t vl) { + return __riscv_vsext_vf4_i32m4_mu(vm, vd, vs2, vl); } -vint32m8_t test_vsext_vf4_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint8m2_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m8_mu(mask, maskedoff, op1, vl); +vint32m8_t test_vsext_vf4_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint8m2_t vs2, size_t vl) { + return __riscv_vsext_vf4_i32m8_mu(vm, vd, vs2, vl); } -vint64m1_t test_vsext_vf4_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint16mf4_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m1_mu(mask, maskedoff, op1, vl); +vint64m1_t test_vsext_vf4_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vsext_vf4_i64m1_mu(vm, vd, vs2, vl); } -vint64m2_t test_vsext_vf4_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint16mf2_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m2_mu(mask, maskedoff, op1, vl); +vint64m2_t test_vsext_vf4_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vsext_vf4_i64m2_mu(vm, vd, vs2, vl); } -vint64m4_t test_vsext_vf4_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint16m1_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m4_mu(mask, maskedoff, op1, vl); +vint64m4_t test_vsext_vf4_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vsext_vf4_i64m4_mu(vm, vd, vs2, vl); } -vint64m8_t test_vsext_vf4_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint16m2_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m8_mu(mask, maskedoff, op1, vl); +vint64m8_t test_vsext_vf4_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vsext_vf4_i64m8_mu(vm, vd, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsext\.vf4[ivxfswum.]*\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vsext_vf8.c b/auto-generated/policy_funcs/gnu-api-tests/vsext_vf8.c index a93af2f0e..6b31441fc 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vsext_vf8.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vsext_vf8.c @@ -6,68 +6,68 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint64m1_t test_vsext_vf8_i64m1_tu(vint64m1_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m1_tu(maskedoff, op1, vl); +vint64m1_t test_vsext_vf8_i64m1_tu(vint64m1_t vd, vint8mf8_t vs2, size_t vl) { + return __riscv_vsext_vf8_i64m1_tu(vd, vs2, vl); } -vint64m2_t test_vsext_vf8_i64m2_tu(vint64m2_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m2_tu(maskedoff, op1, vl); +vint64m2_t test_vsext_vf8_i64m2_tu(vint64m2_t vd, vint8mf4_t vs2, size_t vl) { + return __riscv_vsext_vf8_i64m2_tu(vd, vs2, vl); } -vint64m4_t test_vsext_vf8_i64m4_tu(vint64m4_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m4_tu(maskedoff, op1, vl); +vint64m4_t test_vsext_vf8_i64m4_tu(vint64m4_t vd, vint8mf2_t vs2, size_t vl) { + return __riscv_vsext_vf8_i64m4_tu(vd, vs2, vl); } -vint64m8_t test_vsext_vf8_i64m8_tu(vint64m8_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m8_tu(maskedoff, op1, vl); +vint64m8_t test_vsext_vf8_i64m8_tu(vint64m8_t vd, vint8m1_t vs2, size_t vl) { + return __riscv_vsext_vf8_i64m8_tu(vd, vs2, vl); } -vint64m1_t test_vsext_vf8_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m1_tum(mask, maskedoff, op1, vl); +vint64m1_t test_vsext_vf8_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint8mf8_t vs2, size_t vl) { + return __riscv_vsext_vf8_i64m1_tum(vm, vd, vs2, vl); } -vint64m2_t test_vsext_vf8_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m2_tum(mask, maskedoff, op1, vl); +vint64m2_t test_vsext_vf8_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint8mf4_t vs2, size_t vl) { + return __riscv_vsext_vf8_i64m2_tum(vm, vd, vs2, vl); } -vint64m4_t test_vsext_vf8_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m4_tum(mask, maskedoff, op1, vl); +vint64m4_t test_vsext_vf8_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint8mf2_t vs2, size_t vl) { + return __riscv_vsext_vf8_i64m4_tum(vm, vd, vs2, vl); } -vint64m8_t test_vsext_vf8_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m8_tum(mask, maskedoff, op1, vl); +vint64m8_t test_vsext_vf8_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint8m1_t vs2, size_t vl) { + return __riscv_vsext_vf8_i64m8_tum(vm, vd, vs2, vl); } -vint64m1_t test_vsext_vf8_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m1_tumu(mask, maskedoff, op1, vl); +vint64m1_t test_vsext_vf8_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint8mf8_t vs2, size_t vl) { + return __riscv_vsext_vf8_i64m1_tumu(vm, vd, vs2, vl); } -vint64m2_t test_vsext_vf8_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m2_tumu(mask, maskedoff, op1, vl); +vint64m2_t test_vsext_vf8_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint8mf4_t vs2, size_t vl) { + return __riscv_vsext_vf8_i64m2_tumu(vm, vd, vs2, vl); } -vint64m4_t test_vsext_vf8_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m4_tumu(mask, maskedoff, op1, vl); +vint64m4_t test_vsext_vf8_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint8mf2_t vs2, size_t vl) { + return __riscv_vsext_vf8_i64m4_tumu(vm, vd, vs2, vl); } -vint64m8_t test_vsext_vf8_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m8_tumu(mask, maskedoff, op1, vl); +vint64m8_t test_vsext_vf8_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint8m1_t vs2, size_t vl) { + return __riscv_vsext_vf8_i64m8_tumu(vm, vd, vs2, vl); } -vint64m1_t test_vsext_vf8_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m1_mu(mask, maskedoff, op1, vl); +vint64m1_t test_vsext_vf8_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint8mf8_t vs2, size_t vl) { + return __riscv_vsext_vf8_i64m1_mu(vm, vd, vs2, vl); } -vint64m2_t test_vsext_vf8_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m2_mu(mask, maskedoff, op1, vl); +vint64m2_t test_vsext_vf8_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint8mf4_t vs2, size_t vl) { + return __riscv_vsext_vf8_i64m2_mu(vm, vd, vs2, vl); } -vint64m4_t test_vsext_vf8_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m4_mu(mask, maskedoff, op1, vl); +vint64m4_t test_vsext_vf8_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint8mf2_t vs2, size_t vl) { + return __riscv_vsext_vf8_i64m4_mu(vm, vd, vs2, vl); } -vint64m8_t test_vsext_vf8_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m8_mu(mask, maskedoff, op1, vl); +vint64m8_t test_vsext_vf8_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint8m1_t vs2, size_t vl) { + return __riscv_vsext_vf8_i64m8_mu(vm, vd, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsext\.vf8[ivxfswum.]*\s+} 16 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vslide1down.c b/auto-generated/policy_funcs/gnu-api-tests/vslide1down.c index 7cc0e6bea..ae20ec764 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vslide1down.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vslide1down.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vslide1down_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_vx_i8mf8_tu(maskedoff, src, value, vl); +vint8mf8_t test_vslide1down_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i8mf8_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vslide1down_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_vx_i8mf4_tu(maskedoff, src, value, vl); +vint8mf4_t test_vslide1down_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i8mf4_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vslide1down_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_vx_i8mf2_tu(maskedoff, src, value, vl); +vint8mf2_t test_vslide1down_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i8mf2_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vslide1down_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_vx_i8m1_tu(maskedoff, src, value, vl); +vint8m1_t test_vslide1down_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i8m1_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vslide1down_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_vx_i8m2_tu(maskedoff, src, value, vl); +vint8m2_t test_vslide1down_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i8m2_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vslide1down_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_vx_i8m4_tu(maskedoff, src, value, vl); +vint8m4_t test_vslide1down_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i8m4_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vslide1down_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_vx_i8m8_tu(maskedoff, src, value, vl); +vint8m8_t test_vslide1down_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i8m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vslide1down_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_vx_i16mf4_tu(maskedoff, src, value, vl); +vint16mf4_t test_vslide1down_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vslide1down_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_vx_i16mf2_tu(maskedoff, src, value, vl); +vint16mf2_t test_vslide1down_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vslide1down_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_vx_i16m1_tu(maskedoff, src, value, vl); +vint16m1_t test_vslide1down_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vslide1down_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_vx_i16m2_tu(maskedoff, src, value, vl); +vint16m2_t test_vslide1down_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vslide1down_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_vx_i16m4_tu(maskedoff, src, value, vl); +vint16m4_t test_vslide1down_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vslide1down_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_vx_i16m8_tu(maskedoff, src, value, vl); +vint16m8_t test_vslide1down_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vslide1down_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_vx_i32mf2_tu(maskedoff, src, value, vl); +vint32mf2_t test_vslide1down_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vslide1down_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_vx_i32m1_tu(maskedoff, src, value, vl); +vint32m1_t test_vslide1down_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vslide1down_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_vx_i32m2_tu(maskedoff, src, value, vl); +vint32m2_t test_vslide1down_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vslide1down_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_vx_i32m4_tu(maskedoff, src, value, vl); +vint32m4_t test_vslide1down_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vslide1down_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_vx_i32m8_tu(maskedoff, src, value, vl); +vint32m8_t test_vslide1down_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vslide1down_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl) { - return __riscv_vslide1down_vx_i64m1_tu(maskedoff, src, value, vl); +vint64m1_t test_vslide1down_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vslide1down_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl) { - return __riscv_vslide1down_vx_i64m2_tu(maskedoff, src, value, vl); +vint64m2_t test_vslide1down_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vslide1down_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl) { - return __riscv_vslide1down_vx_i64m4_tu(maskedoff, src, value, vl); +vint64m4_t test_vslide1down_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vslide1down_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl) { - return __riscv_vslide1down_vx_i64m8_tu(maskedoff, src, value, vl); +vint64m8_t test_vslide1down_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i64m8_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vslide1down_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_vx_u8mf8_tu(maskedoff, src, value, vl); +vuint8mf8_t test_vslide1down_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vslide1down_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_vx_u8mf4_tu(maskedoff, src, value, vl); +vuint8mf4_t test_vslide1down_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vslide1down_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_vx_u8mf2_tu(maskedoff, src, value, vl); +vuint8mf2_t test_vslide1down_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vslide1down_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_vx_u8m1_tu(maskedoff, src, value, vl); +vuint8m1_t test_vslide1down_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vslide1down_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_vx_u8m2_tu(maskedoff, src, value, vl); +vuint8m2_t test_vslide1down_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vslide1down_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_vx_u8m4_tu(maskedoff, src, value, vl); +vuint8m4_t test_vslide1down_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u8m4_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vslide1down_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_vx_u8m8_tu(maskedoff, src, value, vl); +vuint8m8_t test_vslide1down_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u8m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vslide1down_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_vx_u16mf4_tu(maskedoff, src, value, vl); +vuint16mf4_t test_vslide1down_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vslide1down_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_vx_u16mf2_tu(maskedoff, src, value, vl); +vuint16mf2_t test_vslide1down_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vslide1down_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_vx_u16m1_tu(maskedoff, src, value, vl); +vuint16m1_t test_vslide1down_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vslide1down_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_vx_u16m2_tu(maskedoff, src, value, vl); +vuint16m2_t test_vslide1down_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vslide1down_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_vx_u16m4_tu(maskedoff, src, value, vl); +vuint16m4_t test_vslide1down_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vslide1down_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_vx_u16m8_tu(maskedoff, src, value, vl); +vuint16m8_t test_vslide1down_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vslide1down_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_vx_u32mf2_tu(maskedoff, src, value, vl); +vuint32mf2_t test_vslide1down_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vslide1down_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_vx_u32m1_tu(maskedoff, src, value, vl); +vuint32m1_t test_vslide1down_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vslide1down_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_vx_u32m2_tu(maskedoff, src, value, vl); +vuint32m2_t test_vslide1down_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vslide1down_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_vx_u32m4_tu(maskedoff, src, value, vl); +vuint32m4_t test_vslide1down_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vslide1down_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_vx_u32m8_tu(maskedoff, src, value, vl); +vuint32m8_t test_vslide1down_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vslide1down_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down_vx_u64m1_tu(maskedoff, src, value, vl); +vuint64m1_t test_vslide1down_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vslide1down_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down_vx_u64m2_tu(maskedoff, src, value, vl); +vuint64m2_t test_vslide1down_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vslide1down_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down_vx_u64m4_tu(maskedoff, src, value, vl); +vuint64m4_t test_vslide1down_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vslide1down_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down_vx_u64m8_tu(maskedoff, src, value, vl); +vuint64m8_t test_vslide1down_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u64m8_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vslide1down_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_vx_i8mf8_tum(mask, maskedoff, src, value, vl); +vint8mf8_t test_vslide1down_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i8mf8_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vslide1down_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_vx_i8mf4_tum(mask, maskedoff, src, value, vl); +vint8mf4_t test_vslide1down_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i8mf4_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vslide1down_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_vx_i8mf2_tum(mask, maskedoff, src, value, vl); +vint8mf2_t test_vslide1down_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i8mf2_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vslide1down_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_vx_i8m1_tum(mask, maskedoff, src, value, vl); +vint8m1_t test_vslide1down_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i8m1_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vslide1down_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_vx_i8m2_tum(mask, maskedoff, src, value, vl); +vint8m2_t test_vslide1down_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i8m2_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vslide1down_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_vx_i8m4_tum(mask, maskedoff, src, value, vl); +vint8m4_t test_vslide1down_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i8m4_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vslide1down_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_vx_i8m8_tum(mask, maskedoff, src, value, vl); +vint8m8_t test_vslide1down_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i8m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vslide1down_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_vx_i16mf4_tum(mask, maskedoff, src, value, vl); +vint16mf4_t test_vslide1down_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vslide1down_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_vx_i16mf2_tum(mask, maskedoff, src, value, vl); +vint16mf2_t test_vslide1down_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vslide1down_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_vx_i16m1_tum(mask, maskedoff, src, value, vl); +vint16m1_t test_vslide1down_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vslide1down_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_vx_i16m2_tum(mask, maskedoff, src, value, vl); +vint16m2_t test_vslide1down_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vslide1down_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_vx_i16m4_tum(mask, maskedoff, src, value, vl); +vint16m4_t test_vslide1down_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vslide1down_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_vx_i16m8_tum(mask, maskedoff, src, value, vl); +vint16m8_t test_vslide1down_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vslide1down_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_vx_i32mf2_tum(mask, maskedoff, src, value, vl); +vint32mf2_t test_vslide1down_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vslide1down_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_vx_i32m1_tum(mask, maskedoff, src, value, vl); +vint32m1_t test_vslide1down_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vslide1down_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_vx_i32m2_tum(mask, maskedoff, src, value, vl); +vint32m2_t test_vslide1down_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vslide1down_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_vx_i32m4_tum(mask, maskedoff, src, value, vl); +vint32m4_t test_vslide1down_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vslide1down_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_vx_i32m8_tum(mask, maskedoff, src, value, vl); +vint32m8_t test_vslide1down_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vslide1down_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl) { - return __riscv_vslide1down_vx_i64m1_tum(mask, maskedoff, src, value, vl); +vint64m1_t test_vslide1down_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vslide1down_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl) { - return __riscv_vslide1down_vx_i64m2_tum(mask, maskedoff, src, value, vl); +vint64m2_t test_vslide1down_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vslide1down_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl) { - return __riscv_vslide1down_vx_i64m4_tum(mask, maskedoff, src, value, vl); +vint64m4_t test_vslide1down_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vslide1down_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl) { - return __riscv_vslide1down_vx_i64m8_tum(mask, maskedoff, src, value, vl); +vint64m8_t test_vslide1down_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vslide1down_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_vx_u8mf8_tum(mask, maskedoff, src, value, vl); +vuint8mf8_t test_vslide1down_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vslide1down_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_vx_u8mf4_tum(mask, maskedoff, src, value, vl); +vuint8mf4_t test_vslide1down_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vslide1down_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_vx_u8mf2_tum(mask, maskedoff, src, value, vl); +vuint8mf2_t test_vslide1down_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vslide1down_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_vx_u8m1_tum(mask, maskedoff, src, value, vl); +vuint8m1_t test_vslide1down_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vslide1down_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_vx_u8m2_tum(mask, maskedoff, src, value, vl); +vuint8m2_t test_vslide1down_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vslide1down_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_vx_u8m4_tum(mask, maskedoff, src, value, vl); +vuint8m4_t test_vslide1down_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vslide1down_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_vx_u8m8_tum(mask, maskedoff, src, value, vl); +vuint8m8_t test_vslide1down_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u8m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vslide1down_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_vx_u16mf4_tum(mask, maskedoff, src, value, vl); +vuint16mf4_t test_vslide1down_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vslide1down_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_vx_u16mf2_tum(mask, maskedoff, src, value, vl); +vuint16mf2_t test_vslide1down_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vslide1down_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_vx_u16m1_tum(mask, maskedoff, src, value, vl); +vuint16m1_t test_vslide1down_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vslide1down_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_vx_u16m2_tum(mask, maskedoff, src, value, vl); +vuint16m2_t test_vslide1down_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vslide1down_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_vx_u16m4_tum(mask, maskedoff, src, value, vl); +vuint16m4_t test_vslide1down_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vslide1down_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_vx_u16m8_tum(mask, maskedoff, src, value, vl); +vuint16m8_t test_vslide1down_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vslide1down_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_vx_u32mf2_tum(mask, maskedoff, src, value, vl); +vuint32mf2_t test_vslide1down_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vslide1down_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_vx_u32m1_tum(mask, maskedoff, src, value, vl); +vuint32m1_t test_vslide1down_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vslide1down_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_vx_u32m2_tum(mask, maskedoff, src, value, vl); +vuint32m2_t test_vslide1down_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vslide1down_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_vx_u32m4_tum(mask, maskedoff, src, value, vl); +vuint32m4_t test_vslide1down_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vslide1down_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_vx_u32m8_tum(mask, maskedoff, src, value, vl); +vuint32m8_t test_vslide1down_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vslide1down_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down_vx_u64m1_tum(mask, maskedoff, src, value, vl); +vuint64m1_t test_vslide1down_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vslide1down_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down_vx_u64m2_tum(mask, maskedoff, src, value, vl); +vuint64m2_t test_vslide1down_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vslide1down_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down_vx_u64m4_tum(mask, maskedoff, src, value, vl); +vuint64m4_t test_vslide1down_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vslide1down_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down_vx_u64m8_tum(mask, maskedoff, src, value, vl); +vuint64m8_t test_vslide1down_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vslide1down_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_vx_i8mf8_tumu(mask, maskedoff, src, value, vl); +vint8mf8_t test_vslide1down_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i8mf8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vslide1down_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_vx_i8mf4_tumu(mask, maskedoff, src, value, vl); +vint8mf4_t test_vslide1down_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i8mf4_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vslide1down_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_vx_i8mf2_tumu(mask, maskedoff, src, value, vl); +vint8mf2_t test_vslide1down_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i8mf2_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vslide1down_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_vx_i8m1_tumu(mask, maskedoff, src, value, vl); +vint8m1_t test_vslide1down_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i8m1_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vslide1down_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_vx_i8m2_tumu(mask, maskedoff, src, value, vl); +vint8m2_t test_vslide1down_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i8m2_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vslide1down_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_vx_i8m4_tumu(mask, maskedoff, src, value, vl); +vint8m4_t test_vslide1down_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i8m4_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vslide1down_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_vx_i8m8_tumu(mask, maskedoff, src, value, vl); +vint8m8_t test_vslide1down_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i8m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vslide1down_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_vx_i16mf4_tumu(mask, maskedoff, src, value, vl); +vint16mf4_t test_vslide1down_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vslide1down_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_vx_i16mf2_tumu(mask, maskedoff, src, value, vl); +vint16mf2_t test_vslide1down_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vslide1down_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_vx_i16m1_tumu(mask, maskedoff, src, value, vl); +vint16m1_t test_vslide1down_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vslide1down_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_vx_i16m2_tumu(mask, maskedoff, src, value, vl); +vint16m2_t test_vslide1down_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vslide1down_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_vx_i16m4_tumu(mask, maskedoff, src, value, vl); +vint16m4_t test_vslide1down_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vslide1down_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_vx_i16m8_tumu(mask, maskedoff, src, value, vl); +vint16m8_t test_vslide1down_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vslide1down_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_vx_i32mf2_tumu(mask, maskedoff, src, value, vl); +vint32mf2_t test_vslide1down_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vslide1down_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_vx_i32m1_tumu(mask, maskedoff, src, value, vl); +vint32m1_t test_vslide1down_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vslide1down_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_vx_i32m2_tumu(mask, maskedoff, src, value, vl); +vint32m2_t test_vslide1down_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vslide1down_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_vx_i32m4_tumu(mask, maskedoff, src, value, vl); +vint32m4_t test_vslide1down_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vslide1down_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_vx_i32m8_tumu(mask, maskedoff, src, value, vl); +vint32m8_t test_vslide1down_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vslide1down_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl) { - return __riscv_vslide1down_vx_i64m1_tumu(mask, maskedoff, src, value, vl); +vint64m1_t test_vslide1down_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vslide1down_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl) { - return __riscv_vslide1down_vx_i64m2_tumu(mask, maskedoff, src, value, vl); +vint64m2_t test_vslide1down_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vslide1down_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl) { - return __riscv_vslide1down_vx_i64m4_tumu(mask, maskedoff, src, value, vl); +vint64m4_t test_vslide1down_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vslide1down_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl) { - return __riscv_vslide1down_vx_i64m8_tumu(mask, maskedoff, src, value, vl); +vint64m8_t test_vslide1down_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vslide1down_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_vx_u8mf8_tumu(mask, maskedoff, src, value, vl); +vuint8mf8_t test_vslide1down_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vslide1down_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_vx_u8mf4_tumu(mask, maskedoff, src, value, vl); +vuint8mf4_t test_vslide1down_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vslide1down_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_vx_u8mf2_tumu(mask, maskedoff, src, value, vl); +vuint8mf2_t test_vslide1down_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vslide1down_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_vx_u8m1_tumu(mask, maskedoff, src, value, vl); +vuint8m1_t test_vslide1down_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vslide1down_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_vx_u8m2_tumu(mask, maskedoff, src, value, vl); +vuint8m2_t test_vslide1down_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vslide1down_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_vx_u8m4_tumu(mask, maskedoff, src, value, vl); +vuint8m4_t test_vslide1down_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vslide1down_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_vx_u8m8_tumu(mask, maskedoff, src, value, vl); +vuint8m8_t test_vslide1down_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vslide1down_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_vx_u16mf4_tumu(mask, maskedoff, src, value, vl); +vuint16mf4_t test_vslide1down_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vslide1down_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_vx_u16mf2_tumu(mask, maskedoff, src, value, vl); +vuint16mf2_t test_vslide1down_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vslide1down_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_vx_u16m1_tumu(mask, maskedoff, src, value, vl); +vuint16m1_t test_vslide1down_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vslide1down_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_vx_u16m2_tumu(mask, maskedoff, src, value, vl); +vuint16m2_t test_vslide1down_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vslide1down_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_vx_u16m4_tumu(mask, maskedoff, src, value, vl); +vuint16m4_t test_vslide1down_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vslide1down_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_vx_u16m8_tumu(mask, maskedoff, src, value, vl); +vuint16m8_t test_vslide1down_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vslide1down_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_vx_u32mf2_tumu(mask, maskedoff, src, value, vl); +vuint32mf2_t test_vslide1down_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vslide1down_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_vx_u32m1_tumu(mask, maskedoff, src, value, vl); +vuint32m1_t test_vslide1down_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vslide1down_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_vx_u32m2_tumu(mask, maskedoff, src, value, vl); +vuint32m2_t test_vslide1down_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vslide1down_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_vx_u32m4_tumu(mask, maskedoff, src, value, vl); +vuint32m4_t test_vslide1down_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vslide1down_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_vx_u32m8_tumu(mask, maskedoff, src, value, vl); +vuint32m8_t test_vslide1down_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vslide1down_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down_vx_u64m1_tumu(mask, maskedoff, src, value, vl); +vuint64m1_t test_vslide1down_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vslide1down_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down_vx_u64m2_tumu(mask, maskedoff, src, value, vl); +vuint64m2_t test_vslide1down_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vslide1down_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down_vx_u64m4_tumu(mask, maskedoff, src, value, vl); +vuint64m4_t test_vslide1down_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vslide1down_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down_vx_u64m8_tumu(mask, maskedoff, src, value, vl); +vuint64m8_t test_vslide1down_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vslide1down_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_vx_i8mf8_mu(mask, maskedoff, src, value, vl); +vint8mf8_t test_vslide1down_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i8mf8_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vslide1down_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_vx_i8mf4_mu(mask, maskedoff, src, value, vl); +vint8mf4_t test_vslide1down_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i8mf4_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vslide1down_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_vx_i8mf2_mu(mask, maskedoff, src, value, vl); +vint8mf2_t test_vslide1down_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i8mf2_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vslide1down_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_vx_i8m1_mu(mask, maskedoff, src, value, vl); +vint8m1_t test_vslide1down_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i8m1_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vslide1down_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_vx_i8m2_mu(mask, maskedoff, src, value, vl); +vint8m2_t test_vslide1down_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i8m2_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vslide1down_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_vx_i8m4_mu(mask, maskedoff, src, value, vl); +vint8m4_t test_vslide1down_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i8m4_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vslide1down_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_vx_i8m8_mu(mask, maskedoff, src, value, vl); +vint8m8_t test_vslide1down_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i8m8_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vslide1down_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_vx_i16mf4_mu(mask, maskedoff, src, value, vl); +vint16mf4_t test_vslide1down_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vslide1down_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_vx_i16mf2_mu(mask, maskedoff, src, value, vl); +vint16mf2_t test_vslide1down_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vslide1down_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_vx_i16m1_mu(mask, maskedoff, src, value, vl); +vint16m1_t test_vslide1down_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vslide1down_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_vx_i16m2_mu(mask, maskedoff, src, value, vl); +vint16m2_t test_vslide1down_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vslide1down_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_vx_i16m4_mu(mask, maskedoff, src, value, vl); +vint16m4_t test_vslide1down_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vslide1down_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_vx_i16m8_mu(mask, maskedoff, src, value, vl); +vint16m8_t test_vslide1down_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vslide1down_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_vx_i32mf2_mu(mask, maskedoff, src, value, vl); +vint32mf2_t test_vslide1down_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vslide1down_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_vx_i32m1_mu(mask, maskedoff, src, value, vl); +vint32m1_t test_vslide1down_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vslide1down_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_vx_i32m2_mu(mask, maskedoff, src, value, vl); +vint32m2_t test_vslide1down_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vslide1down_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_vx_i32m4_mu(mask, maskedoff, src, value, vl); +vint32m4_t test_vslide1down_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vslide1down_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_vx_i32m8_mu(mask, maskedoff, src, value, vl); +vint32m8_t test_vslide1down_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vslide1down_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl) { - return __riscv_vslide1down_vx_i64m1_mu(mask, maskedoff, src, value, vl); +vint64m1_t test_vslide1down_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vslide1down_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl) { - return __riscv_vslide1down_vx_i64m2_mu(mask, maskedoff, src, value, vl); +vint64m2_t test_vslide1down_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vslide1down_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl) { - return __riscv_vslide1down_vx_i64m4_mu(mask, maskedoff, src, value, vl); +vint64m4_t test_vslide1down_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vslide1down_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl) { - return __riscv_vslide1down_vx_i64m8_mu(mask, maskedoff, src, value, vl); +vint64m8_t test_vslide1down_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vslide1down_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_vx_u8mf8_mu(mask, maskedoff, src, value, vl); +vuint8mf8_t test_vslide1down_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vslide1down_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_vx_u8mf4_mu(mask, maskedoff, src, value, vl); +vuint8mf4_t test_vslide1down_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vslide1down_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_vx_u8mf2_mu(mask, maskedoff, src, value, vl); +vuint8mf2_t test_vslide1down_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vslide1down_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_vx_u8m1_mu(mask, maskedoff, src, value, vl); +vuint8m1_t test_vslide1down_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vslide1down_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_vx_u8m2_mu(mask, maskedoff, src, value, vl); +vuint8m2_t test_vslide1down_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vslide1down_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_vx_u8m4_mu(mask, maskedoff, src, value, vl); +vuint8m4_t test_vslide1down_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vslide1down_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_vx_u8m8_mu(mask, maskedoff, src, value, vl); +vuint8m8_t test_vslide1down_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u8m8_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vslide1down_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_vx_u16mf4_mu(mask, maskedoff, src, value, vl); +vuint16mf4_t test_vslide1down_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vslide1down_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_vx_u16mf2_mu(mask, maskedoff, src, value, vl); +vuint16mf2_t test_vslide1down_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vslide1down_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_vx_u16m1_mu(mask, maskedoff, src, value, vl); +vuint16m1_t test_vslide1down_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vslide1down_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_vx_u16m2_mu(mask, maskedoff, src, value, vl); +vuint16m2_t test_vslide1down_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vslide1down_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_vx_u16m4_mu(mask, maskedoff, src, value, vl); +vuint16m4_t test_vslide1down_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vslide1down_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_vx_u16m8_mu(mask, maskedoff, src, value, vl); +vuint16m8_t test_vslide1down_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vslide1down_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_vx_u32mf2_mu(mask, maskedoff, src, value, vl); +vuint32mf2_t test_vslide1down_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vslide1down_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_vx_u32m1_mu(mask, maskedoff, src, value, vl); +vuint32m1_t test_vslide1down_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vslide1down_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_vx_u32m2_mu(mask, maskedoff, src, value, vl); +vuint32m2_t test_vslide1down_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vslide1down_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_vx_u32m4_mu(mask, maskedoff, src, value, vl); +vuint32m4_t test_vslide1down_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vslide1down_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_vx_u32m8_mu(mask, maskedoff, src, value, vl); +vuint32m8_t test_vslide1down_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vslide1down_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down_vx_u64m1_mu(mask, maskedoff, src, value, vl); +vuint64m1_t test_vslide1down_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vslide1down_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down_vx_u64m2_mu(mask, maskedoff, src, value, vl); +vuint64m2_t test_vslide1down_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vslide1down_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down_vx_u64m4_mu(mask, maskedoff, src, value, vl); +vuint64m4_t test_vslide1down_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vslide1down_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down_vx_u64m8_mu(mask, maskedoff, src, value, vl); +vuint64m8_t test_vslide1down_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vslide1down\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vslide1up.c b/auto-generated/policy_funcs/gnu-api-tests/vslide1up.c index 6cb0137ca..75171878c 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vslide1up.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vslide1up.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vslide1up_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_vx_i8mf8_tu(maskedoff, src, value, vl); +vint8mf8_t test_vslide1up_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i8mf8_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vslide1up_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_vx_i8mf4_tu(maskedoff, src, value, vl); +vint8mf4_t test_vslide1up_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i8mf4_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vslide1up_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_vx_i8mf2_tu(maskedoff, src, value, vl); +vint8mf2_t test_vslide1up_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i8mf2_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vslide1up_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_vx_i8m1_tu(maskedoff, src, value, vl); +vint8m1_t test_vslide1up_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i8m1_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vslide1up_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_vx_i8m2_tu(maskedoff, src, value, vl); +vint8m2_t test_vslide1up_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i8m2_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vslide1up_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_vx_i8m4_tu(maskedoff, src, value, vl); +vint8m4_t test_vslide1up_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i8m4_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vslide1up_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_vx_i8m8_tu(maskedoff, src, value, vl); +vint8m8_t test_vslide1up_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i8m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vslide1up_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_vx_i16mf4_tu(maskedoff, src, value, vl); +vint16mf4_t test_vslide1up_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vslide1up_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_vx_i16mf2_tu(maskedoff, src, value, vl); +vint16mf2_t test_vslide1up_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vslide1up_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_vx_i16m1_tu(maskedoff, src, value, vl); +vint16m1_t test_vslide1up_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vslide1up_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_vx_i16m2_tu(maskedoff, src, value, vl); +vint16m2_t test_vslide1up_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vslide1up_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_vx_i16m4_tu(maskedoff, src, value, vl); +vint16m4_t test_vslide1up_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vslide1up_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_vx_i16m8_tu(maskedoff, src, value, vl); +vint16m8_t test_vslide1up_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vslide1up_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_vx_i32mf2_tu(maskedoff, src, value, vl); +vint32mf2_t test_vslide1up_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vslide1up_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_vx_i32m1_tu(maskedoff, src, value, vl); +vint32m1_t test_vslide1up_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vslide1up_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_vx_i32m2_tu(maskedoff, src, value, vl); +vint32m2_t test_vslide1up_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vslide1up_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_vx_i32m4_tu(maskedoff, src, value, vl); +vint32m4_t test_vslide1up_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vslide1up_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_vx_i32m8_tu(maskedoff, src, value, vl); +vint32m8_t test_vslide1up_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vslide1up_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl) { - return __riscv_vslide1up_vx_i64m1_tu(maskedoff, src, value, vl); +vint64m1_t test_vslide1up_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vslide1up_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl) { - return __riscv_vslide1up_vx_i64m2_tu(maskedoff, src, value, vl); +vint64m2_t test_vslide1up_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vslide1up_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl) { - return __riscv_vslide1up_vx_i64m4_tu(maskedoff, src, value, vl); +vint64m4_t test_vslide1up_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vslide1up_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl) { - return __riscv_vslide1up_vx_i64m8_tu(maskedoff, src, value, vl); +vint64m8_t test_vslide1up_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i64m8_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vslide1up_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_vx_u8mf8_tu(maskedoff, src, value, vl); +vuint8mf8_t test_vslide1up_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vslide1up_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_vx_u8mf4_tu(maskedoff, src, value, vl); +vuint8mf4_t test_vslide1up_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vslide1up_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_vx_u8mf2_tu(maskedoff, src, value, vl); +vuint8mf2_t test_vslide1up_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vslide1up_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_vx_u8m1_tu(maskedoff, src, value, vl); +vuint8m1_t test_vslide1up_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vslide1up_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_vx_u8m2_tu(maskedoff, src, value, vl); +vuint8m2_t test_vslide1up_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vslide1up_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_vx_u8m4_tu(maskedoff, src, value, vl); +vuint8m4_t test_vslide1up_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u8m4_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vslide1up_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_vx_u8m8_tu(maskedoff, src, value, vl); +vuint8m8_t test_vslide1up_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u8m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vslide1up_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_vx_u16mf4_tu(maskedoff, src, value, vl); +vuint16mf4_t test_vslide1up_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vslide1up_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_vx_u16mf2_tu(maskedoff, src, value, vl); +vuint16mf2_t test_vslide1up_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vslide1up_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_vx_u16m1_tu(maskedoff, src, value, vl); +vuint16m1_t test_vslide1up_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vslide1up_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_vx_u16m2_tu(maskedoff, src, value, vl); +vuint16m2_t test_vslide1up_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vslide1up_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_vx_u16m4_tu(maskedoff, src, value, vl); +vuint16m4_t test_vslide1up_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vslide1up_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_vx_u16m8_tu(maskedoff, src, value, vl); +vuint16m8_t test_vslide1up_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vslide1up_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_vx_u32mf2_tu(maskedoff, src, value, vl); +vuint32mf2_t test_vslide1up_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vslide1up_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_vx_u32m1_tu(maskedoff, src, value, vl); +vuint32m1_t test_vslide1up_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vslide1up_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_vx_u32m2_tu(maskedoff, src, value, vl); +vuint32m2_t test_vslide1up_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vslide1up_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_vx_u32m4_tu(maskedoff, src, value, vl); +vuint32m4_t test_vslide1up_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vslide1up_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_vx_u32m8_tu(maskedoff, src, value, vl); +vuint32m8_t test_vslide1up_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vslide1up_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up_vx_u64m1_tu(maskedoff, src, value, vl); +vuint64m1_t test_vslide1up_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vslide1up_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up_vx_u64m2_tu(maskedoff, src, value, vl); +vuint64m2_t test_vslide1up_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vslide1up_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up_vx_u64m4_tu(maskedoff, src, value, vl); +vuint64m4_t test_vslide1up_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vslide1up_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up_vx_u64m8_tu(maskedoff, src, value, vl); +vuint64m8_t test_vslide1up_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u64m8_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vslide1up_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_vx_i8mf8_tum(mask, maskedoff, src, value, vl); +vint8mf8_t test_vslide1up_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i8mf8_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vslide1up_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_vx_i8mf4_tum(mask, maskedoff, src, value, vl); +vint8mf4_t test_vslide1up_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i8mf4_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vslide1up_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_vx_i8mf2_tum(mask, maskedoff, src, value, vl); +vint8mf2_t test_vslide1up_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i8mf2_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vslide1up_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_vx_i8m1_tum(mask, maskedoff, src, value, vl); +vint8m1_t test_vslide1up_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i8m1_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vslide1up_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_vx_i8m2_tum(mask, maskedoff, src, value, vl); +vint8m2_t test_vslide1up_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i8m2_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vslide1up_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_vx_i8m4_tum(mask, maskedoff, src, value, vl); +vint8m4_t test_vslide1up_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i8m4_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vslide1up_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_vx_i8m8_tum(mask, maskedoff, src, value, vl); +vint8m8_t test_vslide1up_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i8m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vslide1up_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_vx_i16mf4_tum(mask, maskedoff, src, value, vl); +vint16mf4_t test_vslide1up_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vslide1up_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_vx_i16mf2_tum(mask, maskedoff, src, value, vl); +vint16mf2_t test_vslide1up_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vslide1up_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_vx_i16m1_tum(mask, maskedoff, src, value, vl); +vint16m1_t test_vslide1up_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vslide1up_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_vx_i16m2_tum(mask, maskedoff, src, value, vl); +vint16m2_t test_vslide1up_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vslide1up_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_vx_i16m4_tum(mask, maskedoff, src, value, vl); +vint16m4_t test_vslide1up_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vslide1up_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_vx_i16m8_tum(mask, maskedoff, src, value, vl); +vint16m8_t test_vslide1up_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vslide1up_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_vx_i32mf2_tum(mask, maskedoff, src, value, vl); +vint32mf2_t test_vslide1up_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vslide1up_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_vx_i32m1_tum(mask, maskedoff, src, value, vl); +vint32m1_t test_vslide1up_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vslide1up_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_vx_i32m2_tum(mask, maskedoff, src, value, vl); +vint32m2_t test_vslide1up_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vslide1up_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_vx_i32m4_tum(mask, maskedoff, src, value, vl); +vint32m4_t test_vslide1up_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vslide1up_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_vx_i32m8_tum(mask, maskedoff, src, value, vl); +vint32m8_t test_vslide1up_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vslide1up_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl) { - return __riscv_vslide1up_vx_i64m1_tum(mask, maskedoff, src, value, vl); +vint64m1_t test_vslide1up_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vslide1up_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl) { - return __riscv_vslide1up_vx_i64m2_tum(mask, maskedoff, src, value, vl); +vint64m2_t test_vslide1up_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vslide1up_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl) { - return __riscv_vslide1up_vx_i64m4_tum(mask, maskedoff, src, value, vl); +vint64m4_t test_vslide1up_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vslide1up_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl) { - return __riscv_vslide1up_vx_i64m8_tum(mask, maskedoff, src, value, vl); +vint64m8_t test_vslide1up_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vslide1up_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_vx_u8mf8_tum(mask, maskedoff, src, value, vl); +vuint8mf8_t test_vslide1up_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vslide1up_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_vx_u8mf4_tum(mask, maskedoff, src, value, vl); +vuint8mf4_t test_vslide1up_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vslide1up_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_vx_u8mf2_tum(mask, maskedoff, src, value, vl); +vuint8mf2_t test_vslide1up_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vslide1up_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_vx_u8m1_tum(mask, maskedoff, src, value, vl); +vuint8m1_t test_vslide1up_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vslide1up_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_vx_u8m2_tum(mask, maskedoff, src, value, vl); +vuint8m2_t test_vslide1up_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vslide1up_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_vx_u8m4_tum(mask, maskedoff, src, value, vl); +vuint8m4_t test_vslide1up_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vslide1up_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_vx_u8m8_tum(mask, maskedoff, src, value, vl); +vuint8m8_t test_vslide1up_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u8m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vslide1up_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_vx_u16mf4_tum(mask, maskedoff, src, value, vl); +vuint16mf4_t test_vslide1up_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vslide1up_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_vx_u16mf2_tum(mask, maskedoff, src, value, vl); +vuint16mf2_t test_vslide1up_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vslide1up_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_vx_u16m1_tum(mask, maskedoff, src, value, vl); +vuint16m1_t test_vslide1up_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vslide1up_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_vx_u16m2_tum(mask, maskedoff, src, value, vl); +vuint16m2_t test_vslide1up_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vslide1up_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_vx_u16m4_tum(mask, maskedoff, src, value, vl); +vuint16m4_t test_vslide1up_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vslide1up_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_vx_u16m8_tum(mask, maskedoff, src, value, vl); +vuint16m8_t test_vslide1up_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vslide1up_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_vx_u32mf2_tum(mask, maskedoff, src, value, vl); +vuint32mf2_t test_vslide1up_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vslide1up_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_vx_u32m1_tum(mask, maskedoff, src, value, vl); +vuint32m1_t test_vslide1up_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vslide1up_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_vx_u32m2_tum(mask, maskedoff, src, value, vl); +vuint32m2_t test_vslide1up_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vslide1up_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_vx_u32m4_tum(mask, maskedoff, src, value, vl); +vuint32m4_t test_vslide1up_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vslide1up_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_vx_u32m8_tum(mask, maskedoff, src, value, vl); +vuint32m8_t test_vslide1up_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vslide1up_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up_vx_u64m1_tum(mask, maskedoff, src, value, vl); +vuint64m1_t test_vslide1up_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vslide1up_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up_vx_u64m2_tum(mask, maskedoff, src, value, vl); +vuint64m2_t test_vslide1up_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vslide1up_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up_vx_u64m4_tum(mask, maskedoff, src, value, vl); +vuint64m4_t test_vslide1up_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vslide1up_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up_vx_u64m8_tum(mask, maskedoff, src, value, vl); +vuint64m8_t test_vslide1up_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vslide1up_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_vx_i8mf8_tumu(mask, maskedoff, src, value, vl); +vint8mf8_t test_vslide1up_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i8mf8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vslide1up_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_vx_i8mf4_tumu(mask, maskedoff, src, value, vl); +vint8mf4_t test_vslide1up_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i8mf4_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vslide1up_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_vx_i8mf2_tumu(mask, maskedoff, src, value, vl); +vint8mf2_t test_vslide1up_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i8mf2_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vslide1up_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_vx_i8m1_tumu(mask, maskedoff, src, value, vl); +vint8m1_t test_vslide1up_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i8m1_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vslide1up_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_vx_i8m2_tumu(mask, maskedoff, src, value, vl); +vint8m2_t test_vslide1up_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i8m2_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vslide1up_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_vx_i8m4_tumu(mask, maskedoff, src, value, vl); +vint8m4_t test_vslide1up_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i8m4_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vslide1up_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_vx_i8m8_tumu(mask, maskedoff, src, value, vl); +vint8m8_t test_vslide1up_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i8m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vslide1up_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_vx_i16mf4_tumu(mask, maskedoff, src, value, vl); +vint16mf4_t test_vslide1up_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vslide1up_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_vx_i16mf2_tumu(mask, maskedoff, src, value, vl); +vint16mf2_t test_vslide1up_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vslide1up_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_vx_i16m1_tumu(mask, maskedoff, src, value, vl); +vint16m1_t test_vslide1up_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vslide1up_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_vx_i16m2_tumu(mask, maskedoff, src, value, vl); +vint16m2_t test_vslide1up_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vslide1up_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_vx_i16m4_tumu(mask, maskedoff, src, value, vl); +vint16m4_t test_vslide1up_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vslide1up_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_vx_i16m8_tumu(mask, maskedoff, src, value, vl); +vint16m8_t test_vslide1up_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vslide1up_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_vx_i32mf2_tumu(mask, maskedoff, src, value, vl); +vint32mf2_t test_vslide1up_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vslide1up_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_vx_i32m1_tumu(mask, maskedoff, src, value, vl); +vint32m1_t test_vslide1up_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vslide1up_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_vx_i32m2_tumu(mask, maskedoff, src, value, vl); +vint32m2_t test_vslide1up_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vslide1up_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_vx_i32m4_tumu(mask, maskedoff, src, value, vl); +vint32m4_t test_vslide1up_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vslide1up_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_vx_i32m8_tumu(mask, maskedoff, src, value, vl); +vint32m8_t test_vslide1up_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vslide1up_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl) { - return __riscv_vslide1up_vx_i64m1_tumu(mask, maskedoff, src, value, vl); +vint64m1_t test_vslide1up_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vslide1up_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl) { - return __riscv_vslide1up_vx_i64m2_tumu(mask, maskedoff, src, value, vl); +vint64m2_t test_vslide1up_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vslide1up_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl) { - return __riscv_vslide1up_vx_i64m4_tumu(mask, maskedoff, src, value, vl); +vint64m4_t test_vslide1up_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vslide1up_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl) { - return __riscv_vslide1up_vx_i64m8_tumu(mask, maskedoff, src, value, vl); +vint64m8_t test_vslide1up_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vslide1up_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_vx_u8mf8_tumu(mask, maskedoff, src, value, vl); +vuint8mf8_t test_vslide1up_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vslide1up_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_vx_u8mf4_tumu(mask, maskedoff, src, value, vl); +vuint8mf4_t test_vslide1up_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vslide1up_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_vx_u8mf2_tumu(mask, maskedoff, src, value, vl); +vuint8mf2_t test_vslide1up_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vslide1up_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_vx_u8m1_tumu(mask, maskedoff, src, value, vl); +vuint8m1_t test_vslide1up_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vslide1up_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_vx_u8m2_tumu(mask, maskedoff, src, value, vl); +vuint8m2_t test_vslide1up_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vslide1up_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_vx_u8m4_tumu(mask, maskedoff, src, value, vl); +vuint8m4_t test_vslide1up_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vslide1up_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_vx_u8m8_tumu(mask, maskedoff, src, value, vl); +vuint8m8_t test_vslide1up_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vslide1up_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_vx_u16mf4_tumu(mask, maskedoff, src, value, vl); +vuint16mf4_t test_vslide1up_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vslide1up_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_vx_u16mf2_tumu(mask, maskedoff, src, value, vl); +vuint16mf2_t test_vslide1up_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vslide1up_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_vx_u16m1_tumu(mask, maskedoff, src, value, vl); +vuint16m1_t test_vslide1up_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vslide1up_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_vx_u16m2_tumu(mask, maskedoff, src, value, vl); +vuint16m2_t test_vslide1up_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vslide1up_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_vx_u16m4_tumu(mask, maskedoff, src, value, vl); +vuint16m4_t test_vslide1up_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vslide1up_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_vx_u16m8_tumu(mask, maskedoff, src, value, vl); +vuint16m8_t test_vslide1up_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vslide1up_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_vx_u32mf2_tumu(mask, maskedoff, src, value, vl); +vuint32mf2_t test_vslide1up_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vslide1up_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_vx_u32m1_tumu(mask, maskedoff, src, value, vl); +vuint32m1_t test_vslide1up_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vslide1up_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_vx_u32m2_tumu(mask, maskedoff, src, value, vl); +vuint32m2_t test_vslide1up_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vslide1up_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_vx_u32m4_tumu(mask, maskedoff, src, value, vl); +vuint32m4_t test_vslide1up_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vslide1up_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_vx_u32m8_tumu(mask, maskedoff, src, value, vl); +vuint32m8_t test_vslide1up_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vslide1up_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up_vx_u64m1_tumu(mask, maskedoff, src, value, vl); +vuint64m1_t test_vslide1up_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vslide1up_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up_vx_u64m2_tumu(mask, maskedoff, src, value, vl); +vuint64m2_t test_vslide1up_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vslide1up_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up_vx_u64m4_tumu(mask, maskedoff, src, value, vl); +vuint64m4_t test_vslide1up_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vslide1up_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up_vx_u64m8_tumu(mask, maskedoff, src, value, vl); +vuint64m8_t test_vslide1up_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vslide1up_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_vx_i8mf8_mu(mask, maskedoff, src, value, vl); +vint8mf8_t test_vslide1up_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i8mf8_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vslide1up_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_vx_i8mf4_mu(mask, maskedoff, src, value, vl); +vint8mf4_t test_vslide1up_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i8mf4_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vslide1up_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_vx_i8mf2_mu(mask, maskedoff, src, value, vl); +vint8mf2_t test_vslide1up_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i8mf2_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vslide1up_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_vx_i8m1_mu(mask, maskedoff, src, value, vl); +vint8m1_t test_vslide1up_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i8m1_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vslide1up_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_vx_i8m2_mu(mask, maskedoff, src, value, vl); +vint8m2_t test_vslide1up_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i8m2_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vslide1up_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_vx_i8m4_mu(mask, maskedoff, src, value, vl); +vint8m4_t test_vslide1up_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i8m4_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vslide1up_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_vx_i8m8_mu(mask, maskedoff, src, value, vl); +vint8m8_t test_vslide1up_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i8m8_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vslide1up_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_vx_i16mf4_mu(mask, maskedoff, src, value, vl); +vint16mf4_t test_vslide1up_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vslide1up_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_vx_i16mf2_mu(mask, maskedoff, src, value, vl); +vint16mf2_t test_vslide1up_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vslide1up_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_vx_i16m1_mu(mask, maskedoff, src, value, vl); +vint16m1_t test_vslide1up_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vslide1up_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_vx_i16m2_mu(mask, maskedoff, src, value, vl); +vint16m2_t test_vslide1up_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vslide1up_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_vx_i16m4_mu(mask, maskedoff, src, value, vl); +vint16m4_t test_vslide1up_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vslide1up_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_vx_i16m8_mu(mask, maskedoff, src, value, vl); +vint16m8_t test_vslide1up_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vslide1up_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_vx_i32mf2_mu(mask, maskedoff, src, value, vl); +vint32mf2_t test_vslide1up_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vslide1up_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_vx_i32m1_mu(mask, maskedoff, src, value, vl); +vint32m1_t test_vslide1up_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vslide1up_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_vx_i32m2_mu(mask, maskedoff, src, value, vl); +vint32m2_t test_vslide1up_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vslide1up_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_vx_i32m4_mu(mask, maskedoff, src, value, vl); +vint32m4_t test_vslide1up_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vslide1up_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_vx_i32m8_mu(mask, maskedoff, src, value, vl); +vint32m8_t test_vslide1up_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vslide1up_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl) { - return __riscv_vslide1up_vx_i64m1_mu(mask, maskedoff, src, value, vl); +vint64m1_t test_vslide1up_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vslide1up_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl) { - return __riscv_vslide1up_vx_i64m2_mu(mask, maskedoff, src, value, vl); +vint64m2_t test_vslide1up_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vslide1up_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl) { - return __riscv_vslide1up_vx_i64m4_mu(mask, maskedoff, src, value, vl); +vint64m4_t test_vslide1up_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vslide1up_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl) { - return __riscv_vslide1up_vx_i64m8_mu(mask, maskedoff, src, value, vl); +vint64m8_t test_vslide1up_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vslide1up_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_vx_u8mf8_mu(mask, maskedoff, src, value, vl); +vuint8mf8_t test_vslide1up_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vslide1up_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_vx_u8mf4_mu(mask, maskedoff, src, value, vl); +vuint8mf4_t test_vslide1up_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vslide1up_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_vx_u8mf2_mu(mask, maskedoff, src, value, vl); +vuint8mf2_t test_vslide1up_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vslide1up_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_vx_u8m1_mu(mask, maskedoff, src, value, vl); +vuint8m1_t test_vslide1up_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vslide1up_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_vx_u8m2_mu(mask, maskedoff, src, value, vl); +vuint8m2_t test_vslide1up_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vslide1up_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_vx_u8m4_mu(mask, maskedoff, src, value, vl); +vuint8m4_t test_vslide1up_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vslide1up_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_vx_u8m8_mu(mask, maskedoff, src, value, vl); +vuint8m8_t test_vslide1up_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u8m8_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vslide1up_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_vx_u16mf4_mu(mask, maskedoff, src, value, vl); +vuint16mf4_t test_vslide1up_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vslide1up_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_vx_u16mf2_mu(mask, maskedoff, src, value, vl); +vuint16mf2_t test_vslide1up_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vslide1up_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_vx_u16m1_mu(mask, maskedoff, src, value, vl); +vuint16m1_t test_vslide1up_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vslide1up_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_vx_u16m2_mu(mask, maskedoff, src, value, vl); +vuint16m2_t test_vslide1up_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vslide1up_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_vx_u16m4_mu(mask, maskedoff, src, value, vl); +vuint16m4_t test_vslide1up_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vslide1up_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_vx_u16m8_mu(mask, maskedoff, src, value, vl); +vuint16m8_t test_vslide1up_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vslide1up_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_vx_u32mf2_mu(mask, maskedoff, src, value, vl); +vuint32mf2_t test_vslide1up_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vslide1up_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_vx_u32m1_mu(mask, maskedoff, src, value, vl); +vuint32m1_t test_vslide1up_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vslide1up_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_vx_u32m2_mu(mask, maskedoff, src, value, vl); +vuint32m2_t test_vslide1up_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vslide1up_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_vx_u32m4_mu(mask, maskedoff, src, value, vl); +vuint32m4_t test_vslide1up_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vslide1up_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_vx_u32m8_mu(mask, maskedoff, src, value, vl); +vuint32m8_t test_vslide1up_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vslide1up_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up_vx_u64m1_mu(mask, maskedoff, src, value, vl); +vuint64m1_t test_vslide1up_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vslide1up_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up_vx_u64m2_mu(mask, maskedoff, src, value, vl); +vuint64m2_t test_vslide1up_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vslide1up_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up_vx_u64m4_mu(mask, maskedoff, src, value, vl); +vuint64m4_t test_vslide1up_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vslide1up_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up_vx_u64m8_mu(mask, maskedoff, src, value, vl); +vuint64m8_t test_vslide1up_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vslide1up\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vslidedown.c b/auto-generated/policy_funcs/gnu-api-tests/vslidedown.c index 48fbadfb5..d82dfd09c 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vslidedown.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vslidedown.c @@ -6,948 +6,948 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vslidedown_vx_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f16mf4_tu(maskedoff, src, offset, vl); +vfloat16mf4_t test_vslidedown_vx_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f16mf4_tu(vd, vs2, rs1, vl); } -vfloat16mf2_t test_vslidedown_vx_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f16mf2_tu(maskedoff, src, offset, vl); +vfloat16mf2_t test_vslidedown_vx_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f16mf2_tu(vd, vs2, rs1, vl); } -vfloat16m1_t test_vslidedown_vx_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f16m1_tu(maskedoff, src, offset, vl); +vfloat16m1_t test_vslidedown_vx_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f16m1_tu(vd, vs2, rs1, vl); } -vfloat16m2_t test_vslidedown_vx_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f16m2_tu(maskedoff, src, offset, vl); +vfloat16m2_t test_vslidedown_vx_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f16m2_tu(vd, vs2, rs1, vl); } -vfloat16m4_t test_vslidedown_vx_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f16m4_tu(maskedoff, src, offset, vl); +vfloat16m4_t test_vslidedown_vx_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f16m4_tu(vd, vs2, rs1, vl); } -vfloat16m8_t test_vslidedown_vx_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f16m8_tu(maskedoff, src, offset, vl); +vfloat16m8_t test_vslidedown_vx_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f16m8_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vslidedown_vx_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f32mf2_tu(maskedoff, src, offset, vl); +vfloat32mf2_t test_vslidedown_vx_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f32mf2_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vslidedown_vx_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f32m1_tu(maskedoff, src, offset, vl); +vfloat32m1_t test_vslidedown_vx_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f32m1_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vslidedown_vx_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f32m2_tu(maskedoff, src, offset, vl); +vfloat32m2_t test_vslidedown_vx_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f32m2_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vslidedown_vx_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f32m4_tu(maskedoff, src, offset, vl); +vfloat32m4_t test_vslidedown_vx_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f32m4_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vslidedown_vx_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f32m8_tu(maskedoff, src, offset, vl); +vfloat32m8_t test_vslidedown_vx_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f32m8_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vslidedown_vx_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f64m1_tu(maskedoff, src, offset, vl); +vfloat64m1_t test_vslidedown_vx_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f64m1_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vslidedown_vx_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f64m2_tu(maskedoff, src, offset, vl); +vfloat64m2_t test_vslidedown_vx_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f64m2_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vslidedown_vx_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f64m4_tu(maskedoff, src, offset, vl); +vfloat64m4_t test_vslidedown_vx_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f64m4_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vslidedown_vx_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f64m8_tu(maskedoff, src, offset, vl); +vfloat64m8_t test_vslidedown_vx_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f64m8_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vslidedown_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i8mf8_tu(maskedoff, src, offset, vl); +vint8mf8_t test_vslidedown_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i8mf8_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vslidedown_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i8mf4_tu(maskedoff, src, offset, vl); +vint8mf4_t test_vslidedown_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i8mf4_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vslidedown_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i8mf2_tu(maskedoff, src, offset, vl); +vint8mf2_t test_vslidedown_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i8mf2_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vslidedown_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i8m1_tu(maskedoff, src, offset, vl); +vint8m1_t test_vslidedown_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i8m1_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vslidedown_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i8m2_tu(maskedoff, src, offset, vl); +vint8m2_t test_vslidedown_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i8m2_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vslidedown_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i8m4_tu(maskedoff, src, offset, vl); +vint8m4_t test_vslidedown_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i8m4_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vslidedown_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i8m8_tu(maskedoff, src, offset, vl); +vint8m8_t test_vslidedown_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i8m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vslidedown_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i16mf4_tu(maskedoff, src, offset, vl); +vint16mf4_t test_vslidedown_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vslidedown_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i16mf2_tu(maskedoff, src, offset, vl); +vint16mf2_t test_vslidedown_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vslidedown_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i16m1_tu(maskedoff, src, offset, vl); +vint16m1_t test_vslidedown_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vslidedown_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i16m2_tu(maskedoff, src, offset, vl); +vint16m2_t test_vslidedown_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vslidedown_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i16m4_tu(maskedoff, src, offset, vl); +vint16m4_t test_vslidedown_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vslidedown_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i16m8_tu(maskedoff, src, offset, vl); +vint16m8_t test_vslidedown_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vslidedown_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i32mf2_tu(maskedoff, src, offset, vl); +vint32mf2_t test_vslidedown_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vslidedown_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i32m1_tu(maskedoff, src, offset, vl); +vint32m1_t test_vslidedown_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vslidedown_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i32m2_tu(maskedoff, src, offset, vl); +vint32m2_t test_vslidedown_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vslidedown_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i32m4_tu(maskedoff, src, offset, vl); +vint32m4_t test_vslidedown_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vslidedown_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i32m8_tu(maskedoff, src, offset, vl); +vint32m8_t test_vslidedown_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vslidedown_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i64m1_tu(maskedoff, src, offset, vl); +vint64m1_t test_vslidedown_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vslidedown_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i64m2_tu(maskedoff, src, offset, vl); +vint64m2_t test_vslidedown_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vslidedown_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i64m4_tu(maskedoff, src, offset, vl); +vint64m4_t test_vslidedown_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vslidedown_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i64m8_tu(maskedoff, src, offset, vl); +vint64m8_t test_vslidedown_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i64m8_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vslidedown_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u8mf8_tu(maskedoff, src, offset, vl); +vuint8mf8_t test_vslidedown_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vslidedown_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u8mf4_tu(maskedoff, src, offset, vl); +vuint8mf4_t test_vslidedown_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vslidedown_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u8mf2_tu(maskedoff, src, offset, vl); +vuint8mf2_t test_vslidedown_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vslidedown_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u8m1_tu(maskedoff, src, offset, vl); +vuint8m1_t test_vslidedown_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vslidedown_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u8m2_tu(maskedoff, src, offset, vl); +vuint8m2_t test_vslidedown_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vslidedown_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u8m4_tu(maskedoff, src, offset, vl); +vuint8m4_t test_vslidedown_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u8m4_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vslidedown_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u8m8_tu(maskedoff, src, offset, vl); +vuint8m8_t test_vslidedown_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u8m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vslidedown_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u16mf4_tu(maskedoff, src, offset, vl); +vuint16mf4_t test_vslidedown_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vslidedown_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u16mf2_tu(maskedoff, src, offset, vl); +vuint16mf2_t test_vslidedown_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vslidedown_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u16m1_tu(maskedoff, src, offset, vl); +vuint16m1_t test_vslidedown_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vslidedown_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u16m2_tu(maskedoff, src, offset, vl); +vuint16m2_t test_vslidedown_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vslidedown_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u16m4_tu(maskedoff, src, offset, vl); +vuint16m4_t test_vslidedown_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vslidedown_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u16m8_tu(maskedoff, src, offset, vl); +vuint16m8_t test_vslidedown_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vslidedown_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u32mf2_tu(maskedoff, src, offset, vl); +vuint32mf2_t test_vslidedown_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vslidedown_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u32m1_tu(maskedoff, src, offset, vl); +vuint32m1_t test_vslidedown_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vslidedown_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u32m2_tu(maskedoff, src, offset, vl); +vuint32m2_t test_vslidedown_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vslidedown_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u32m4_tu(maskedoff, src, offset, vl); +vuint32m4_t test_vslidedown_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vslidedown_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u32m8_tu(maskedoff, src, offset, vl); +vuint32m8_t test_vslidedown_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vslidedown_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u64m1_tu(maskedoff, src, offset, vl); +vuint64m1_t test_vslidedown_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vslidedown_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u64m2_tu(maskedoff, src, offset, vl); +vuint64m2_t test_vslidedown_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vslidedown_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u64m4_tu(maskedoff, src, offset, vl); +vuint64m4_t test_vslidedown_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vslidedown_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u64m8_tu(maskedoff, src, offset, vl); +vuint64m8_t test_vslidedown_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u64m8_tu(vd, vs2, rs1, vl); } -vfloat16mf4_t test_vslidedown_vx_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f16mf4_tum(mask, maskedoff, src, offset, vl); +vfloat16mf4_t test_vslidedown_vx_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f16mf4_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vslidedown_vx_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f16mf2_tum(mask, maskedoff, src, offset, vl); +vfloat16mf2_t test_vslidedown_vx_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f16mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vslidedown_vx_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f16m1_tum(mask, maskedoff, src, offset, vl); +vfloat16m1_t test_vslidedown_vx_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f16m1_tum(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vslidedown_vx_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f16m2_tum(mask, maskedoff, src, offset, vl); +vfloat16m2_t test_vslidedown_vx_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f16m2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vslidedown_vx_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f16m4_tum(mask, maskedoff, src, offset, vl); +vfloat16m4_t test_vslidedown_vx_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f16m4_tum(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vslidedown_vx_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f16m8_tum(mask, maskedoff, src, offset, vl); +vfloat16m8_t test_vslidedown_vx_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f16m8_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vslidedown_vx_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f32mf2_tum(mask, maskedoff, src, offset, vl); +vfloat32mf2_t test_vslidedown_vx_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f32mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vslidedown_vx_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f32m1_tum(mask, maskedoff, src, offset, vl); +vfloat32m1_t test_vslidedown_vx_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f32m1_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vslidedown_vx_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f32m2_tum(mask, maskedoff, src, offset, vl); +vfloat32m2_t test_vslidedown_vx_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f32m2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vslidedown_vx_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f32m4_tum(mask, maskedoff, src, offset, vl); +vfloat32m4_t test_vslidedown_vx_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f32m4_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vslidedown_vx_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f32m8_tum(mask, maskedoff, src, offset, vl); +vfloat32m8_t test_vslidedown_vx_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f32m8_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vslidedown_vx_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f64m1_tum(mask, maskedoff, src, offset, vl); +vfloat64m1_t test_vslidedown_vx_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f64m1_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vslidedown_vx_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f64m2_tum(mask, maskedoff, src, offset, vl); +vfloat64m2_t test_vslidedown_vx_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f64m2_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vslidedown_vx_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f64m4_tum(mask, maskedoff, src, offset, vl); +vfloat64m4_t test_vslidedown_vx_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f64m4_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vslidedown_vx_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f64m8_tum(mask, maskedoff, src, offset, vl); +vfloat64m8_t test_vslidedown_vx_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f64m8_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vslidedown_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i8mf8_tum(mask, maskedoff, src, offset, vl); +vint8mf8_t test_vslidedown_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i8mf8_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vslidedown_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i8mf4_tum(mask, maskedoff, src, offset, vl); +vint8mf4_t test_vslidedown_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i8mf4_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vslidedown_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i8mf2_tum(mask, maskedoff, src, offset, vl); +vint8mf2_t test_vslidedown_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i8mf2_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vslidedown_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i8m1_tum(mask, maskedoff, src, offset, vl); +vint8m1_t test_vslidedown_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i8m1_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vslidedown_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i8m2_tum(mask, maskedoff, src, offset, vl); +vint8m2_t test_vslidedown_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i8m2_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vslidedown_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i8m4_tum(mask, maskedoff, src, offset, vl); +vint8m4_t test_vslidedown_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i8m4_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vslidedown_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i8m8_tum(mask, maskedoff, src, offset, vl); +vint8m8_t test_vslidedown_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i8m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vslidedown_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i16mf4_tum(mask, maskedoff, src, offset, vl); +vint16mf4_t test_vslidedown_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vslidedown_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i16mf2_tum(mask, maskedoff, src, offset, vl); +vint16mf2_t test_vslidedown_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vslidedown_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i16m1_tum(mask, maskedoff, src, offset, vl); +vint16m1_t test_vslidedown_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vslidedown_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i16m2_tum(mask, maskedoff, src, offset, vl); +vint16m2_t test_vslidedown_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vslidedown_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i16m4_tum(mask, maskedoff, src, offset, vl); +vint16m4_t test_vslidedown_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vslidedown_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i16m8_tum(mask, maskedoff, src, offset, vl); +vint16m8_t test_vslidedown_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vslidedown_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i32mf2_tum(mask, maskedoff, src, offset, vl); +vint32mf2_t test_vslidedown_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vslidedown_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i32m1_tum(mask, maskedoff, src, offset, vl); +vint32m1_t test_vslidedown_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vslidedown_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i32m2_tum(mask, maskedoff, src, offset, vl); +vint32m2_t test_vslidedown_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vslidedown_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i32m4_tum(mask, maskedoff, src, offset, vl); +vint32m4_t test_vslidedown_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vslidedown_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i32m8_tum(mask, maskedoff, src, offset, vl); +vint32m8_t test_vslidedown_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vslidedown_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i64m1_tum(mask, maskedoff, src, offset, vl); +vint64m1_t test_vslidedown_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vslidedown_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i64m2_tum(mask, maskedoff, src, offset, vl); +vint64m2_t test_vslidedown_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vslidedown_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i64m4_tum(mask, maskedoff, src, offset, vl); +vint64m4_t test_vslidedown_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vslidedown_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i64m8_tum(mask, maskedoff, src, offset, vl); +vint64m8_t test_vslidedown_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vslidedown_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u8mf8_tum(mask, maskedoff, src, offset, vl); +vuint8mf8_t test_vslidedown_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vslidedown_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u8mf4_tum(mask, maskedoff, src, offset, vl); +vuint8mf4_t test_vslidedown_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vslidedown_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u8mf2_tum(mask, maskedoff, src, offset, vl); +vuint8mf2_t test_vslidedown_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vslidedown_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u8m1_tum(mask, maskedoff, src, offset, vl); +vuint8m1_t test_vslidedown_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vslidedown_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u8m2_tum(mask, maskedoff, src, offset, vl); +vuint8m2_t test_vslidedown_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vslidedown_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u8m4_tum(mask, maskedoff, src, offset, vl); +vuint8m4_t test_vslidedown_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vslidedown_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u8m8_tum(mask, maskedoff, src, offset, vl); +vuint8m8_t test_vslidedown_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u8m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vslidedown_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u16mf4_tum(mask, maskedoff, src, offset, vl); +vuint16mf4_t test_vslidedown_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vslidedown_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u16mf2_tum(mask, maskedoff, src, offset, vl); +vuint16mf2_t test_vslidedown_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vslidedown_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u16m1_tum(mask, maskedoff, src, offset, vl); +vuint16m1_t test_vslidedown_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vslidedown_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u16m2_tum(mask, maskedoff, src, offset, vl); +vuint16m2_t test_vslidedown_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vslidedown_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u16m4_tum(mask, maskedoff, src, offset, vl); +vuint16m4_t test_vslidedown_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vslidedown_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u16m8_tum(mask, maskedoff, src, offset, vl); +vuint16m8_t test_vslidedown_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vslidedown_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u32mf2_tum(mask, maskedoff, src, offset, vl); +vuint32mf2_t test_vslidedown_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vslidedown_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u32m1_tum(mask, maskedoff, src, offset, vl); +vuint32m1_t test_vslidedown_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vslidedown_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u32m2_tum(mask, maskedoff, src, offset, vl); +vuint32m2_t test_vslidedown_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vslidedown_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u32m4_tum(mask, maskedoff, src, offset, vl); +vuint32m4_t test_vslidedown_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vslidedown_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u32m8_tum(mask, maskedoff, src, offset, vl); +vuint32m8_t test_vslidedown_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vslidedown_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u64m1_tum(mask, maskedoff, src, offset, vl); +vuint64m1_t test_vslidedown_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vslidedown_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u64m2_tum(mask, maskedoff, src, offset, vl); +vuint64m2_t test_vslidedown_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vslidedown_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u64m4_tum(mask, maskedoff, src, offset, vl); +vuint64m4_t test_vslidedown_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vslidedown_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u64m8_tum(mask, maskedoff, src, offset, vl); +vuint64m8_t test_vslidedown_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vslidedown_vx_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f16mf4_tumu(mask, maskedoff, src, offset, vl); +vfloat16mf4_t test_vslidedown_vx_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f16mf4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vslidedown_vx_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f16mf2_tumu(mask, maskedoff, src, offset, vl); +vfloat16mf2_t test_vslidedown_vx_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f16mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vslidedown_vx_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f16m1_tumu(mask, maskedoff, src, offset, vl); +vfloat16m1_t test_vslidedown_vx_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f16m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vslidedown_vx_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f16m2_tumu(mask, maskedoff, src, offset, vl); +vfloat16m2_t test_vslidedown_vx_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f16m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vslidedown_vx_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f16m4_tumu(mask, maskedoff, src, offset, vl); +vfloat16m4_t test_vslidedown_vx_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f16m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vslidedown_vx_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f16m8_tumu(mask, maskedoff, src, offset, vl); +vfloat16m8_t test_vslidedown_vx_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f16m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vslidedown_vx_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f32mf2_tumu(mask, maskedoff, src, offset, vl); +vfloat32mf2_t test_vslidedown_vx_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f32mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vslidedown_vx_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f32m1_tumu(mask, maskedoff, src, offset, vl); +vfloat32m1_t test_vslidedown_vx_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f32m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vslidedown_vx_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f32m2_tumu(mask, maskedoff, src, offset, vl); +vfloat32m2_t test_vslidedown_vx_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f32m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vslidedown_vx_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f32m4_tumu(mask, maskedoff, src, offset, vl); +vfloat32m4_t test_vslidedown_vx_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f32m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vslidedown_vx_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f32m8_tumu(mask, maskedoff, src, offset, vl); +vfloat32m8_t test_vslidedown_vx_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f32m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vslidedown_vx_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f64m1_tumu(mask, maskedoff, src, offset, vl); +vfloat64m1_t test_vslidedown_vx_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f64m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vslidedown_vx_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f64m2_tumu(mask, maskedoff, src, offset, vl); +vfloat64m2_t test_vslidedown_vx_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f64m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vslidedown_vx_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f64m4_tumu(mask, maskedoff, src, offset, vl); +vfloat64m4_t test_vslidedown_vx_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f64m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vslidedown_vx_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f64m8_tumu(mask, maskedoff, src, offset, vl); +vfloat64m8_t test_vslidedown_vx_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f64m8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vslidedown_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i8mf8_tumu(mask, maskedoff, src, offset, vl); +vint8mf8_t test_vslidedown_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i8mf8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vslidedown_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i8mf4_tumu(mask, maskedoff, src, offset, vl); +vint8mf4_t test_vslidedown_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i8mf4_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vslidedown_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i8mf2_tumu(mask, maskedoff, src, offset, vl); +vint8mf2_t test_vslidedown_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i8mf2_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vslidedown_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i8m1_tumu(mask, maskedoff, src, offset, vl); +vint8m1_t test_vslidedown_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i8m1_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vslidedown_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i8m2_tumu(mask, maskedoff, src, offset, vl); +vint8m2_t test_vslidedown_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i8m2_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vslidedown_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i8m4_tumu(mask, maskedoff, src, offset, vl); +vint8m4_t test_vslidedown_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i8m4_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vslidedown_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i8m8_tumu(mask, maskedoff, src, offset, vl); +vint8m8_t test_vslidedown_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i8m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vslidedown_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i16mf4_tumu(mask, maskedoff, src, offset, vl); +vint16mf4_t test_vslidedown_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vslidedown_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i16mf2_tumu(mask, maskedoff, src, offset, vl); +vint16mf2_t test_vslidedown_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vslidedown_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i16m1_tumu(mask, maskedoff, src, offset, vl); +vint16m1_t test_vslidedown_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vslidedown_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i16m2_tumu(mask, maskedoff, src, offset, vl); +vint16m2_t test_vslidedown_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vslidedown_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i16m4_tumu(mask, maskedoff, src, offset, vl); +vint16m4_t test_vslidedown_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vslidedown_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i16m8_tumu(mask, maskedoff, src, offset, vl); +vint16m8_t test_vslidedown_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vslidedown_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i32mf2_tumu(mask, maskedoff, src, offset, vl); +vint32mf2_t test_vslidedown_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vslidedown_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i32m1_tumu(mask, maskedoff, src, offset, vl); +vint32m1_t test_vslidedown_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vslidedown_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i32m2_tumu(mask, maskedoff, src, offset, vl); +vint32m2_t test_vslidedown_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vslidedown_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i32m4_tumu(mask, maskedoff, src, offset, vl); +vint32m4_t test_vslidedown_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vslidedown_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i32m8_tumu(mask, maskedoff, src, offset, vl); +vint32m8_t test_vslidedown_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vslidedown_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i64m1_tumu(mask, maskedoff, src, offset, vl); +vint64m1_t test_vslidedown_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vslidedown_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i64m2_tumu(mask, maskedoff, src, offset, vl); +vint64m2_t test_vslidedown_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vslidedown_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i64m4_tumu(mask, maskedoff, src, offset, vl); +vint64m4_t test_vslidedown_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vslidedown_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i64m8_tumu(mask, maskedoff, src, offset, vl); +vint64m8_t test_vslidedown_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vslidedown_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u8mf8_tumu(mask, maskedoff, src, offset, vl); +vuint8mf8_t test_vslidedown_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vslidedown_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u8mf4_tumu(mask, maskedoff, src, offset, vl); +vuint8mf4_t test_vslidedown_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vslidedown_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u8mf2_tumu(mask, maskedoff, src, offset, vl); +vuint8mf2_t test_vslidedown_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vslidedown_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u8m1_tumu(mask, maskedoff, src, offset, vl); +vuint8m1_t test_vslidedown_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vslidedown_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u8m2_tumu(mask, maskedoff, src, offset, vl); +vuint8m2_t test_vslidedown_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vslidedown_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u8m4_tumu(mask, maskedoff, src, offset, vl); +vuint8m4_t test_vslidedown_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vslidedown_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u8m8_tumu(mask, maskedoff, src, offset, vl); +vuint8m8_t test_vslidedown_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vslidedown_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u16mf4_tumu(mask, maskedoff, src, offset, vl); +vuint16mf4_t test_vslidedown_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vslidedown_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u16mf2_tumu(mask, maskedoff, src, offset, vl); +vuint16mf2_t test_vslidedown_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vslidedown_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u16m1_tumu(mask, maskedoff, src, offset, vl); +vuint16m1_t test_vslidedown_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vslidedown_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u16m2_tumu(mask, maskedoff, src, offset, vl); +vuint16m2_t test_vslidedown_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vslidedown_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u16m4_tumu(mask, maskedoff, src, offset, vl); +vuint16m4_t test_vslidedown_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vslidedown_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u16m8_tumu(mask, maskedoff, src, offset, vl); +vuint16m8_t test_vslidedown_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vslidedown_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u32mf2_tumu(mask, maskedoff, src, offset, vl); +vuint32mf2_t test_vslidedown_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vslidedown_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u32m1_tumu(mask, maskedoff, src, offset, vl); +vuint32m1_t test_vslidedown_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vslidedown_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u32m2_tumu(mask, maskedoff, src, offset, vl); +vuint32m2_t test_vslidedown_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vslidedown_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u32m4_tumu(mask, maskedoff, src, offset, vl); +vuint32m4_t test_vslidedown_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vslidedown_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u32m8_tumu(mask, maskedoff, src, offset, vl); +vuint32m8_t test_vslidedown_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vslidedown_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u64m1_tumu(mask, maskedoff, src, offset, vl); +vuint64m1_t test_vslidedown_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vslidedown_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u64m2_tumu(mask, maskedoff, src, offset, vl); +vuint64m2_t test_vslidedown_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vslidedown_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u64m4_tumu(mask, maskedoff, src, offset, vl); +vuint64m4_t test_vslidedown_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vslidedown_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u64m8_tumu(mask, maskedoff, src, offset, vl); +vuint64m8_t test_vslidedown_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vslidedown_vx_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f16mf4_mu(mask, maskedoff, src, offset, vl); +vfloat16mf4_t test_vslidedown_vx_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f16mf4_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vslidedown_vx_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f16mf2_mu(mask, maskedoff, src, offset, vl); +vfloat16mf2_t test_vslidedown_vx_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f16mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vslidedown_vx_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f16m1_mu(mask, maskedoff, src, offset, vl); +vfloat16m1_t test_vslidedown_vx_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f16m1_mu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vslidedown_vx_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f16m2_mu(mask, maskedoff, src, offset, vl); +vfloat16m2_t test_vslidedown_vx_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f16m2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vslidedown_vx_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f16m4_mu(mask, maskedoff, src, offset, vl); +vfloat16m4_t test_vslidedown_vx_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f16m4_mu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vslidedown_vx_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f16m8_mu(mask, maskedoff, src, offset, vl); +vfloat16m8_t test_vslidedown_vx_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f16m8_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vslidedown_vx_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f32mf2_mu(mask, maskedoff, src, offset, vl); +vfloat32mf2_t test_vslidedown_vx_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f32mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vslidedown_vx_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f32m1_mu(mask, maskedoff, src, offset, vl); +vfloat32m1_t test_vslidedown_vx_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f32m1_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vslidedown_vx_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f32m2_mu(mask, maskedoff, src, offset, vl); +vfloat32m2_t test_vslidedown_vx_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f32m2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vslidedown_vx_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f32m4_mu(mask, maskedoff, src, offset, vl); +vfloat32m4_t test_vslidedown_vx_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f32m4_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vslidedown_vx_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f32m8_mu(mask, maskedoff, src, offset, vl); +vfloat32m8_t test_vslidedown_vx_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f32m8_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vslidedown_vx_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f64m1_mu(mask, maskedoff, src, offset, vl); +vfloat64m1_t test_vslidedown_vx_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f64m1_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vslidedown_vx_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f64m2_mu(mask, maskedoff, src, offset, vl); +vfloat64m2_t test_vslidedown_vx_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f64m2_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vslidedown_vx_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f64m4_mu(mask, maskedoff, src, offset, vl); +vfloat64m4_t test_vslidedown_vx_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f64m4_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vslidedown_vx_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f64m8_mu(mask, maskedoff, src, offset, vl); +vfloat64m8_t test_vslidedown_vx_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_f64m8_mu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vslidedown_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i8mf8_mu(mask, maskedoff, src, offset, vl); +vint8mf8_t test_vslidedown_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i8mf8_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vslidedown_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i8mf4_mu(mask, maskedoff, src, offset, vl); +vint8mf4_t test_vslidedown_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i8mf4_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vslidedown_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i8mf2_mu(mask, maskedoff, src, offset, vl); +vint8mf2_t test_vslidedown_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i8mf2_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vslidedown_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i8m1_mu(mask, maskedoff, src, offset, vl); +vint8m1_t test_vslidedown_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i8m1_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vslidedown_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i8m2_mu(mask, maskedoff, src, offset, vl); +vint8m2_t test_vslidedown_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i8m2_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vslidedown_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i8m4_mu(mask, maskedoff, src, offset, vl); +vint8m4_t test_vslidedown_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i8m4_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vslidedown_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i8m8_mu(mask, maskedoff, src, offset, vl); +vint8m8_t test_vslidedown_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i8m8_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vslidedown_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i16mf4_mu(mask, maskedoff, src, offset, vl); +vint16mf4_t test_vslidedown_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vslidedown_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i16mf2_mu(mask, maskedoff, src, offset, vl); +vint16mf2_t test_vslidedown_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vslidedown_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i16m1_mu(mask, maskedoff, src, offset, vl); +vint16m1_t test_vslidedown_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vslidedown_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i16m2_mu(mask, maskedoff, src, offset, vl); +vint16m2_t test_vslidedown_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vslidedown_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i16m4_mu(mask, maskedoff, src, offset, vl); +vint16m4_t test_vslidedown_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vslidedown_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i16m8_mu(mask, maskedoff, src, offset, vl); +vint16m8_t test_vslidedown_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vslidedown_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i32mf2_mu(mask, maskedoff, src, offset, vl); +vint32mf2_t test_vslidedown_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vslidedown_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i32m1_mu(mask, maskedoff, src, offset, vl); +vint32m1_t test_vslidedown_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vslidedown_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i32m2_mu(mask, maskedoff, src, offset, vl); +vint32m2_t test_vslidedown_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vslidedown_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i32m4_mu(mask, maskedoff, src, offset, vl); +vint32m4_t test_vslidedown_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vslidedown_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i32m8_mu(mask, maskedoff, src, offset, vl); +vint32m8_t test_vslidedown_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vslidedown_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i64m1_mu(mask, maskedoff, src, offset, vl); +vint64m1_t test_vslidedown_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vslidedown_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i64m2_mu(mask, maskedoff, src, offset, vl); +vint64m2_t test_vslidedown_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vslidedown_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i64m4_mu(mask, maskedoff, src, offset, vl); +vint64m4_t test_vslidedown_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vslidedown_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i64m8_mu(mask, maskedoff, src, offset, vl); +vint64m8_t test_vslidedown_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vslidedown_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u8mf8_mu(mask, maskedoff, src, offset, vl); +vuint8mf8_t test_vslidedown_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vslidedown_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u8mf4_mu(mask, maskedoff, src, offset, vl); +vuint8mf4_t test_vslidedown_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vslidedown_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u8mf2_mu(mask, maskedoff, src, offset, vl); +vuint8mf2_t test_vslidedown_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vslidedown_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u8m1_mu(mask, maskedoff, src, offset, vl); +vuint8m1_t test_vslidedown_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vslidedown_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u8m2_mu(mask, maskedoff, src, offset, vl); +vuint8m2_t test_vslidedown_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vslidedown_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u8m4_mu(mask, maskedoff, src, offset, vl); +vuint8m4_t test_vslidedown_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vslidedown_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u8m8_mu(mask, maskedoff, src, offset, vl); +vuint8m8_t test_vslidedown_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u8m8_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vslidedown_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u16mf4_mu(mask, maskedoff, src, offset, vl); +vuint16mf4_t test_vslidedown_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vslidedown_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u16mf2_mu(mask, maskedoff, src, offset, vl); +vuint16mf2_t test_vslidedown_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vslidedown_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u16m1_mu(mask, maskedoff, src, offset, vl); +vuint16m1_t test_vslidedown_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vslidedown_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u16m2_mu(mask, maskedoff, src, offset, vl); +vuint16m2_t test_vslidedown_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vslidedown_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u16m4_mu(mask, maskedoff, src, offset, vl); +vuint16m4_t test_vslidedown_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vslidedown_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u16m8_mu(mask, maskedoff, src, offset, vl); +vuint16m8_t test_vslidedown_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vslidedown_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u32mf2_mu(mask, maskedoff, src, offset, vl); +vuint32mf2_t test_vslidedown_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vslidedown_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u32m1_mu(mask, maskedoff, src, offset, vl); +vuint32m1_t test_vslidedown_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vslidedown_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u32m2_mu(mask, maskedoff, src, offset, vl); +vuint32m2_t test_vslidedown_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vslidedown_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u32m4_mu(mask, maskedoff, src, offset, vl); +vuint32m4_t test_vslidedown_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vslidedown_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u32m8_mu(mask, maskedoff, src, offset, vl); +vuint32m8_t test_vslidedown_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vslidedown_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u64m1_mu(mask, maskedoff, src, offset, vl); +vuint64m1_t test_vslidedown_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vslidedown_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u64m2_mu(mask, maskedoff, src, offset, vl); +vuint64m2_t test_vslidedown_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vslidedown_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u64m4_mu(mask, maskedoff, src, offset, vl); +vuint64m4_t test_vslidedown_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vslidedown_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u64m8_mu(mask, maskedoff, src, offset, vl); +vuint64m8_t test_vslidedown_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vslidedown\.[ivxfswum.]+\s+} 236 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vslideup.c b/auto-generated/policy_funcs/gnu-api-tests/vslideup.c index 913d6e9a0..1154c068b 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vslideup.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vslideup.c @@ -6,948 +6,948 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vslideup_vx_f16mf4_tu(vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f16mf4_tu(dest, src, offset, vl); +vfloat16mf4_t test_vslideup_vx_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f16mf4_tu(vd, vs2, rs1, vl); } -vfloat16mf2_t test_vslideup_vx_f16mf2_tu(vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f16mf2_tu(dest, src, offset, vl); +vfloat16mf2_t test_vslideup_vx_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f16mf2_tu(vd, vs2, rs1, vl); } -vfloat16m1_t test_vslideup_vx_f16m1_tu(vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f16m1_tu(dest, src, offset, vl); +vfloat16m1_t test_vslideup_vx_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f16m1_tu(vd, vs2, rs1, vl); } -vfloat16m2_t test_vslideup_vx_f16m2_tu(vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f16m2_tu(dest, src, offset, vl); +vfloat16m2_t test_vslideup_vx_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f16m2_tu(vd, vs2, rs1, vl); } -vfloat16m4_t test_vslideup_vx_f16m4_tu(vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f16m4_tu(dest, src, offset, vl); +vfloat16m4_t test_vslideup_vx_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f16m4_tu(vd, vs2, rs1, vl); } -vfloat16m8_t test_vslideup_vx_f16m8_tu(vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f16m8_tu(dest, src, offset, vl); +vfloat16m8_t test_vslideup_vx_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f16m8_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vslideup_vx_f32mf2_tu(vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f32mf2_tu(dest, src, offset, vl); +vfloat32mf2_t test_vslideup_vx_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f32mf2_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vslideup_vx_f32m1_tu(vfloat32m1_t dest, vfloat32m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f32m1_tu(dest, src, offset, vl); +vfloat32m1_t test_vslideup_vx_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f32m1_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vslideup_vx_f32m2_tu(vfloat32m2_t dest, vfloat32m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f32m2_tu(dest, src, offset, vl); +vfloat32m2_t test_vslideup_vx_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f32m2_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vslideup_vx_f32m4_tu(vfloat32m4_t dest, vfloat32m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f32m4_tu(dest, src, offset, vl); +vfloat32m4_t test_vslideup_vx_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f32m4_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vslideup_vx_f32m8_tu(vfloat32m8_t dest, vfloat32m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f32m8_tu(dest, src, offset, vl); +vfloat32m8_t test_vslideup_vx_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f32m8_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vslideup_vx_f64m1_tu(vfloat64m1_t dest, vfloat64m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f64m1_tu(dest, src, offset, vl); +vfloat64m1_t test_vslideup_vx_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f64m1_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vslideup_vx_f64m2_tu(vfloat64m2_t dest, vfloat64m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f64m2_tu(dest, src, offset, vl); +vfloat64m2_t test_vslideup_vx_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f64m2_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vslideup_vx_f64m4_tu(vfloat64m4_t dest, vfloat64m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f64m4_tu(dest, src, offset, vl); +vfloat64m4_t test_vslideup_vx_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f64m4_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vslideup_vx_f64m8_tu(vfloat64m8_t dest, vfloat64m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f64m8_tu(dest, src, offset, vl); +vfloat64m8_t test_vslideup_vx_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f64m8_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vslideup_vx_i8mf8_tu(vint8mf8_t dest, vint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i8mf8_tu(dest, src, offset, vl); +vint8mf8_t test_vslideup_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i8mf8_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vslideup_vx_i8mf4_tu(vint8mf4_t dest, vint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i8mf4_tu(dest, src, offset, vl); +vint8mf4_t test_vslideup_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i8mf4_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vslideup_vx_i8mf2_tu(vint8mf2_t dest, vint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i8mf2_tu(dest, src, offset, vl); +vint8mf2_t test_vslideup_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i8mf2_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vslideup_vx_i8m1_tu(vint8m1_t dest, vint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i8m1_tu(dest, src, offset, vl); +vint8m1_t test_vslideup_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i8m1_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vslideup_vx_i8m2_tu(vint8m2_t dest, vint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i8m2_tu(dest, src, offset, vl); +vint8m2_t test_vslideup_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i8m2_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vslideup_vx_i8m4_tu(vint8m4_t dest, vint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i8m4_tu(dest, src, offset, vl); +vint8m4_t test_vslideup_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i8m4_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vslideup_vx_i8m8_tu(vint8m8_t dest, vint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i8m8_tu(dest, src, offset, vl); +vint8m8_t test_vslideup_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i8m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vslideup_vx_i16mf4_tu(vint16mf4_t dest, vint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i16mf4_tu(dest, src, offset, vl); +vint16mf4_t test_vslideup_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vslideup_vx_i16mf2_tu(vint16mf2_t dest, vint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i16mf2_tu(dest, src, offset, vl); +vint16mf2_t test_vslideup_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vslideup_vx_i16m1_tu(vint16m1_t dest, vint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i16m1_tu(dest, src, offset, vl); +vint16m1_t test_vslideup_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vslideup_vx_i16m2_tu(vint16m2_t dest, vint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i16m2_tu(dest, src, offset, vl); +vint16m2_t test_vslideup_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vslideup_vx_i16m4_tu(vint16m4_t dest, vint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i16m4_tu(dest, src, offset, vl); +vint16m4_t test_vslideup_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vslideup_vx_i16m8_tu(vint16m8_t dest, vint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i16m8_tu(dest, src, offset, vl); +vint16m8_t test_vslideup_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vslideup_vx_i32mf2_tu(vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i32mf2_tu(dest, src, offset, vl); +vint32mf2_t test_vslideup_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vslideup_vx_i32m1_tu(vint32m1_t dest, vint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i32m1_tu(dest, src, offset, vl); +vint32m1_t test_vslideup_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vslideup_vx_i32m2_tu(vint32m2_t dest, vint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i32m2_tu(dest, src, offset, vl); +vint32m2_t test_vslideup_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vslideup_vx_i32m4_tu(vint32m4_t dest, vint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i32m4_tu(dest, src, offset, vl); +vint32m4_t test_vslideup_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vslideup_vx_i32m8_tu(vint32m8_t dest, vint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i32m8_tu(dest, src, offset, vl); +vint32m8_t test_vslideup_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vslideup_vx_i64m1_tu(vint64m1_t dest, vint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i64m1_tu(dest, src, offset, vl); +vint64m1_t test_vslideup_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vslideup_vx_i64m2_tu(vint64m2_t dest, vint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i64m2_tu(dest, src, offset, vl); +vint64m2_t test_vslideup_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vslideup_vx_i64m4_tu(vint64m4_t dest, vint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i64m4_tu(dest, src, offset, vl); +vint64m4_t test_vslideup_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vslideup_vx_i64m8_tu(vint64m8_t dest, vint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i64m8_tu(dest, src, offset, vl); +vint64m8_t test_vslideup_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i64m8_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vslideup_vx_u8mf8_tu(vuint8mf8_t dest, vuint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u8mf8_tu(dest, src, offset, vl); +vuint8mf8_t test_vslideup_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vslideup_vx_u8mf4_tu(vuint8mf4_t dest, vuint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u8mf4_tu(dest, src, offset, vl); +vuint8mf4_t test_vslideup_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vslideup_vx_u8mf2_tu(vuint8mf2_t dest, vuint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u8mf2_tu(dest, src, offset, vl); +vuint8mf2_t test_vslideup_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vslideup_vx_u8m1_tu(vuint8m1_t dest, vuint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u8m1_tu(dest, src, offset, vl); +vuint8m1_t test_vslideup_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vslideup_vx_u8m2_tu(vuint8m2_t dest, vuint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u8m2_tu(dest, src, offset, vl); +vuint8m2_t test_vslideup_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vslideup_vx_u8m4_tu(vuint8m4_t dest, vuint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u8m4_tu(dest, src, offset, vl); +vuint8m4_t test_vslideup_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u8m4_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vslideup_vx_u8m8_tu(vuint8m8_t dest, vuint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u8m8_tu(dest, src, offset, vl); +vuint8m8_t test_vslideup_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u8m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vslideup_vx_u16mf4_tu(vuint16mf4_t dest, vuint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u16mf4_tu(dest, src, offset, vl); +vuint16mf4_t test_vslideup_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vslideup_vx_u16mf2_tu(vuint16mf2_t dest, vuint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u16mf2_tu(dest, src, offset, vl); +vuint16mf2_t test_vslideup_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vslideup_vx_u16m1_tu(vuint16m1_t dest, vuint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u16m1_tu(dest, src, offset, vl); +vuint16m1_t test_vslideup_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vslideup_vx_u16m2_tu(vuint16m2_t dest, vuint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u16m2_tu(dest, src, offset, vl); +vuint16m2_t test_vslideup_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vslideup_vx_u16m4_tu(vuint16m4_t dest, vuint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u16m4_tu(dest, src, offset, vl); +vuint16m4_t test_vslideup_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vslideup_vx_u16m8_tu(vuint16m8_t dest, vuint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u16m8_tu(dest, src, offset, vl); +vuint16m8_t test_vslideup_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vslideup_vx_u32mf2_tu(vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u32mf2_tu(dest, src, offset, vl); +vuint32mf2_t test_vslideup_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vslideup_vx_u32m1_tu(vuint32m1_t dest, vuint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u32m1_tu(dest, src, offset, vl); +vuint32m1_t test_vslideup_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vslideup_vx_u32m2_tu(vuint32m2_t dest, vuint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u32m2_tu(dest, src, offset, vl); +vuint32m2_t test_vslideup_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vslideup_vx_u32m4_tu(vuint32m4_t dest, vuint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u32m4_tu(dest, src, offset, vl); +vuint32m4_t test_vslideup_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vslideup_vx_u32m8_tu(vuint32m8_t dest, vuint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u32m8_tu(dest, src, offset, vl); +vuint32m8_t test_vslideup_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vslideup_vx_u64m1_tu(vuint64m1_t dest, vuint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u64m1_tu(dest, src, offset, vl); +vuint64m1_t test_vslideup_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vslideup_vx_u64m2_tu(vuint64m2_t dest, vuint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u64m2_tu(dest, src, offset, vl); +vuint64m2_t test_vslideup_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vslideup_vx_u64m4_tu(vuint64m4_t dest, vuint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u64m4_tu(dest, src, offset, vl); +vuint64m4_t test_vslideup_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vslideup_vx_u64m8_tu(vuint64m8_t dest, vuint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u64m8_tu(dest, src, offset, vl); +vuint64m8_t test_vslideup_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u64m8_tu(vd, vs2, rs1, vl); } -vfloat16mf4_t test_vslideup_vx_f16mf4_tum(vbool64_t mask, vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f16mf4_tum(mask, dest, src, offset, vl); +vfloat16mf4_t test_vslideup_vx_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f16mf4_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vslideup_vx_f16mf2_tum(vbool32_t mask, vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f16mf2_tum(mask, dest, src, offset, vl); +vfloat16mf2_t test_vslideup_vx_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f16mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vslideup_vx_f16m1_tum(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f16m1_tum(mask, dest, src, offset, vl); +vfloat16m1_t test_vslideup_vx_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f16m1_tum(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vslideup_vx_f16m2_tum(vbool8_t mask, vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f16m2_tum(mask, dest, src, offset, vl); +vfloat16m2_t test_vslideup_vx_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f16m2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vslideup_vx_f16m4_tum(vbool4_t mask, vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f16m4_tum(mask, dest, src, offset, vl); +vfloat16m4_t test_vslideup_vx_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f16m4_tum(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vslideup_vx_f16m8_tum(vbool2_t mask, vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f16m8_tum(mask, dest, src, offset, vl); +vfloat16m8_t test_vslideup_vx_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f16m8_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vslideup_vx_f32mf2_tum(vbool64_t mask, vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f32mf2_tum(mask, dest, src, offset, vl); +vfloat32mf2_t test_vslideup_vx_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f32mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vslideup_vx_f32m1_tum(vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f32m1_tum(mask, dest, src, offset, vl); +vfloat32m1_t test_vslideup_vx_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f32m1_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vslideup_vx_f32m2_tum(vbool16_t mask, vfloat32m2_t dest, vfloat32m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f32m2_tum(mask, dest, src, offset, vl); +vfloat32m2_t test_vslideup_vx_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f32m2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vslideup_vx_f32m4_tum(vbool8_t mask, vfloat32m4_t dest, vfloat32m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f32m4_tum(mask, dest, src, offset, vl); +vfloat32m4_t test_vslideup_vx_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f32m4_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vslideup_vx_f32m8_tum(vbool4_t mask, vfloat32m8_t dest, vfloat32m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f32m8_tum(mask, dest, src, offset, vl); +vfloat32m8_t test_vslideup_vx_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f32m8_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vslideup_vx_f64m1_tum(vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f64m1_tum(mask, dest, src, offset, vl); +vfloat64m1_t test_vslideup_vx_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f64m1_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vslideup_vx_f64m2_tum(vbool32_t mask, vfloat64m2_t dest, vfloat64m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f64m2_tum(mask, dest, src, offset, vl); +vfloat64m2_t test_vslideup_vx_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f64m2_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vslideup_vx_f64m4_tum(vbool16_t mask, vfloat64m4_t dest, vfloat64m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f64m4_tum(mask, dest, src, offset, vl); +vfloat64m4_t test_vslideup_vx_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f64m4_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vslideup_vx_f64m8_tum(vbool8_t mask, vfloat64m8_t dest, vfloat64m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f64m8_tum(mask, dest, src, offset, vl); +vfloat64m8_t test_vslideup_vx_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f64m8_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vslideup_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t dest, vint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i8mf8_tum(mask, dest, src, offset, vl); +vint8mf8_t test_vslideup_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i8mf8_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vslideup_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t dest, vint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i8mf4_tum(mask, dest, src, offset, vl); +vint8mf4_t test_vslideup_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i8mf4_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vslideup_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t dest, vint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i8mf2_tum(mask, dest, src, offset, vl); +vint8mf2_t test_vslideup_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i8mf2_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vslideup_vx_i8m1_tum(vbool8_t mask, vint8m1_t dest, vint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i8m1_tum(mask, dest, src, offset, vl); +vint8m1_t test_vslideup_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i8m1_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vslideup_vx_i8m2_tum(vbool4_t mask, vint8m2_t dest, vint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i8m2_tum(mask, dest, src, offset, vl); +vint8m2_t test_vslideup_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i8m2_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vslideup_vx_i8m4_tum(vbool2_t mask, vint8m4_t dest, vint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i8m4_tum(mask, dest, src, offset, vl); +vint8m4_t test_vslideup_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i8m4_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vslideup_vx_i8m8_tum(vbool1_t mask, vint8m8_t dest, vint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i8m8_tum(mask, dest, src, offset, vl); +vint8m8_t test_vslideup_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i8m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vslideup_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t dest, vint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i16mf4_tum(mask, dest, src, offset, vl); +vint16mf4_t test_vslideup_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vslideup_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t dest, vint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i16mf2_tum(mask, dest, src, offset, vl); +vint16mf2_t test_vslideup_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vslideup_vx_i16m1_tum(vbool16_t mask, vint16m1_t dest, vint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i16m1_tum(mask, dest, src, offset, vl); +vint16m1_t test_vslideup_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vslideup_vx_i16m2_tum(vbool8_t mask, vint16m2_t dest, vint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i16m2_tum(mask, dest, src, offset, vl); +vint16m2_t test_vslideup_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vslideup_vx_i16m4_tum(vbool4_t mask, vint16m4_t dest, vint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i16m4_tum(mask, dest, src, offset, vl); +vint16m4_t test_vslideup_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vslideup_vx_i16m8_tum(vbool2_t mask, vint16m8_t dest, vint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i16m8_tum(mask, dest, src, offset, vl); +vint16m8_t test_vslideup_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vslideup_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i32mf2_tum(mask, dest, src, offset, vl); +vint32mf2_t test_vslideup_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vslideup_vx_i32m1_tum(vbool32_t mask, vint32m1_t dest, vint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i32m1_tum(mask, dest, src, offset, vl); +vint32m1_t test_vslideup_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vslideup_vx_i32m2_tum(vbool16_t mask, vint32m2_t dest, vint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i32m2_tum(mask, dest, src, offset, vl); +vint32m2_t test_vslideup_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vslideup_vx_i32m4_tum(vbool8_t mask, vint32m4_t dest, vint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i32m4_tum(mask, dest, src, offset, vl); +vint32m4_t test_vslideup_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vslideup_vx_i32m8_tum(vbool4_t mask, vint32m8_t dest, vint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i32m8_tum(mask, dest, src, offset, vl); +vint32m8_t test_vslideup_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vslideup_vx_i64m1_tum(vbool64_t mask, vint64m1_t dest, vint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i64m1_tum(mask, dest, src, offset, vl); +vint64m1_t test_vslideup_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vslideup_vx_i64m2_tum(vbool32_t mask, vint64m2_t dest, vint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i64m2_tum(mask, dest, src, offset, vl); +vint64m2_t test_vslideup_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vslideup_vx_i64m4_tum(vbool16_t mask, vint64m4_t dest, vint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i64m4_tum(mask, dest, src, offset, vl); +vint64m4_t test_vslideup_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vslideup_vx_i64m8_tum(vbool8_t mask, vint64m8_t dest, vint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i64m8_tum(mask, dest, src, offset, vl); +vint64m8_t test_vslideup_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vslideup_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t dest, vuint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u8mf8_tum(mask, dest, src, offset, vl); +vuint8mf8_t test_vslideup_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vslideup_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t dest, vuint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u8mf4_tum(mask, dest, src, offset, vl); +vuint8mf4_t test_vslideup_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vslideup_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t dest, vuint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u8mf2_tum(mask, dest, src, offset, vl); +vuint8mf2_t test_vslideup_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vslideup_vx_u8m1_tum(vbool8_t mask, vuint8m1_t dest, vuint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u8m1_tum(mask, dest, src, offset, vl); +vuint8m1_t test_vslideup_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vslideup_vx_u8m2_tum(vbool4_t mask, vuint8m2_t dest, vuint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u8m2_tum(mask, dest, src, offset, vl); +vuint8m2_t test_vslideup_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vslideup_vx_u8m4_tum(vbool2_t mask, vuint8m4_t dest, vuint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u8m4_tum(mask, dest, src, offset, vl); +vuint8m4_t test_vslideup_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vslideup_vx_u8m8_tum(vbool1_t mask, vuint8m8_t dest, vuint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u8m8_tum(mask, dest, src, offset, vl); +vuint8m8_t test_vslideup_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u8m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vslideup_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t dest, vuint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u16mf4_tum(mask, dest, src, offset, vl); +vuint16mf4_t test_vslideup_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vslideup_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t dest, vuint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u16mf2_tum(mask, dest, src, offset, vl); +vuint16mf2_t test_vslideup_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vslideup_vx_u16m1_tum(vbool16_t mask, vuint16m1_t dest, vuint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u16m1_tum(mask, dest, src, offset, vl); +vuint16m1_t test_vslideup_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vslideup_vx_u16m2_tum(vbool8_t mask, vuint16m2_t dest, vuint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u16m2_tum(mask, dest, src, offset, vl); +vuint16m2_t test_vslideup_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vslideup_vx_u16m4_tum(vbool4_t mask, vuint16m4_t dest, vuint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u16m4_tum(mask, dest, src, offset, vl); +vuint16m4_t test_vslideup_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vslideup_vx_u16m8_tum(vbool2_t mask, vuint16m8_t dest, vuint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u16m8_tum(mask, dest, src, offset, vl); +vuint16m8_t test_vslideup_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vslideup_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u32mf2_tum(mask, dest, src, offset, vl); +vuint32mf2_t test_vslideup_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vslideup_vx_u32m1_tum(vbool32_t mask, vuint32m1_t dest, vuint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u32m1_tum(mask, dest, src, offset, vl); +vuint32m1_t test_vslideup_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vslideup_vx_u32m2_tum(vbool16_t mask, vuint32m2_t dest, vuint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u32m2_tum(mask, dest, src, offset, vl); +vuint32m2_t test_vslideup_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vslideup_vx_u32m4_tum(vbool8_t mask, vuint32m4_t dest, vuint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u32m4_tum(mask, dest, src, offset, vl); +vuint32m4_t test_vslideup_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vslideup_vx_u32m8_tum(vbool4_t mask, vuint32m8_t dest, vuint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u32m8_tum(mask, dest, src, offset, vl); +vuint32m8_t test_vslideup_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vslideup_vx_u64m1_tum(vbool64_t mask, vuint64m1_t dest, vuint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u64m1_tum(mask, dest, src, offset, vl); +vuint64m1_t test_vslideup_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vslideup_vx_u64m2_tum(vbool32_t mask, vuint64m2_t dest, vuint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u64m2_tum(mask, dest, src, offset, vl); +vuint64m2_t test_vslideup_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vslideup_vx_u64m4_tum(vbool16_t mask, vuint64m4_t dest, vuint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u64m4_tum(mask, dest, src, offset, vl); +vuint64m4_t test_vslideup_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vslideup_vx_u64m8_tum(vbool8_t mask, vuint64m8_t dest, vuint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u64m8_tum(mask, dest, src, offset, vl); +vuint64m8_t test_vslideup_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vslideup_vx_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f16mf4_tumu(mask, dest, src, offset, vl); +vfloat16mf4_t test_vslideup_vx_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f16mf4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vslideup_vx_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f16mf2_tumu(mask, dest, src, offset, vl); +vfloat16mf2_t test_vslideup_vx_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f16mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vslideup_vx_f16m1_tumu(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f16m1_tumu(mask, dest, src, offset, vl); +vfloat16m1_t test_vslideup_vx_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f16m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vslideup_vx_f16m2_tumu(vbool8_t mask, vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f16m2_tumu(mask, dest, src, offset, vl); +vfloat16m2_t test_vslideup_vx_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f16m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vslideup_vx_f16m4_tumu(vbool4_t mask, vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f16m4_tumu(mask, dest, src, offset, vl); +vfloat16m4_t test_vslideup_vx_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f16m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vslideup_vx_f16m8_tumu(vbool2_t mask, vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f16m8_tumu(mask, dest, src, offset, vl); +vfloat16m8_t test_vslideup_vx_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f16m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vslideup_vx_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f32mf2_tumu(mask, dest, src, offset, vl); +vfloat32mf2_t test_vslideup_vx_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f32mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vslideup_vx_f32m1_tumu(vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f32m1_tumu(mask, dest, src, offset, vl); +vfloat32m1_t test_vslideup_vx_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f32m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vslideup_vx_f32m2_tumu(vbool16_t mask, vfloat32m2_t dest, vfloat32m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f32m2_tumu(mask, dest, src, offset, vl); +vfloat32m2_t test_vslideup_vx_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f32m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vslideup_vx_f32m4_tumu(vbool8_t mask, vfloat32m4_t dest, vfloat32m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f32m4_tumu(mask, dest, src, offset, vl); +vfloat32m4_t test_vslideup_vx_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f32m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vslideup_vx_f32m8_tumu(vbool4_t mask, vfloat32m8_t dest, vfloat32m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f32m8_tumu(mask, dest, src, offset, vl); +vfloat32m8_t test_vslideup_vx_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f32m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vslideup_vx_f64m1_tumu(vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f64m1_tumu(mask, dest, src, offset, vl); +vfloat64m1_t test_vslideup_vx_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f64m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vslideup_vx_f64m2_tumu(vbool32_t mask, vfloat64m2_t dest, vfloat64m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f64m2_tumu(mask, dest, src, offset, vl); +vfloat64m2_t test_vslideup_vx_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f64m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vslideup_vx_f64m4_tumu(vbool16_t mask, vfloat64m4_t dest, vfloat64m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f64m4_tumu(mask, dest, src, offset, vl); +vfloat64m4_t test_vslideup_vx_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f64m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vslideup_vx_f64m8_tumu(vbool8_t mask, vfloat64m8_t dest, vfloat64m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f64m8_tumu(mask, dest, src, offset, vl); +vfloat64m8_t test_vslideup_vx_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f64m8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vslideup_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t dest, vint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i8mf8_tumu(mask, dest, src, offset, vl); +vint8mf8_t test_vslideup_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i8mf8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vslideup_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t dest, vint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i8mf4_tumu(mask, dest, src, offset, vl); +vint8mf4_t test_vslideup_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i8mf4_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vslideup_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t dest, vint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i8mf2_tumu(mask, dest, src, offset, vl); +vint8mf2_t test_vslideup_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i8mf2_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vslideup_vx_i8m1_tumu(vbool8_t mask, vint8m1_t dest, vint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i8m1_tumu(mask, dest, src, offset, vl); +vint8m1_t test_vslideup_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i8m1_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vslideup_vx_i8m2_tumu(vbool4_t mask, vint8m2_t dest, vint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i8m2_tumu(mask, dest, src, offset, vl); +vint8m2_t test_vslideup_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i8m2_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vslideup_vx_i8m4_tumu(vbool2_t mask, vint8m4_t dest, vint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i8m4_tumu(mask, dest, src, offset, vl); +vint8m4_t test_vslideup_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i8m4_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vslideup_vx_i8m8_tumu(vbool1_t mask, vint8m8_t dest, vint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i8m8_tumu(mask, dest, src, offset, vl); +vint8m8_t test_vslideup_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i8m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vslideup_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t dest, vint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i16mf4_tumu(mask, dest, src, offset, vl); +vint16mf4_t test_vslideup_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vslideup_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t dest, vint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i16mf2_tumu(mask, dest, src, offset, vl); +vint16mf2_t test_vslideup_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vslideup_vx_i16m1_tumu(vbool16_t mask, vint16m1_t dest, vint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i16m1_tumu(mask, dest, src, offset, vl); +vint16m1_t test_vslideup_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vslideup_vx_i16m2_tumu(vbool8_t mask, vint16m2_t dest, vint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i16m2_tumu(mask, dest, src, offset, vl); +vint16m2_t test_vslideup_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vslideup_vx_i16m4_tumu(vbool4_t mask, vint16m4_t dest, vint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i16m4_tumu(mask, dest, src, offset, vl); +vint16m4_t test_vslideup_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vslideup_vx_i16m8_tumu(vbool2_t mask, vint16m8_t dest, vint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i16m8_tumu(mask, dest, src, offset, vl); +vint16m8_t test_vslideup_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vslideup_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i32mf2_tumu(mask, dest, src, offset, vl); +vint32mf2_t test_vslideup_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vslideup_vx_i32m1_tumu(vbool32_t mask, vint32m1_t dest, vint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i32m1_tumu(mask, dest, src, offset, vl); +vint32m1_t test_vslideup_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vslideup_vx_i32m2_tumu(vbool16_t mask, vint32m2_t dest, vint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i32m2_tumu(mask, dest, src, offset, vl); +vint32m2_t test_vslideup_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vslideup_vx_i32m4_tumu(vbool8_t mask, vint32m4_t dest, vint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i32m4_tumu(mask, dest, src, offset, vl); +vint32m4_t test_vslideup_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vslideup_vx_i32m8_tumu(vbool4_t mask, vint32m8_t dest, vint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i32m8_tumu(mask, dest, src, offset, vl); +vint32m8_t test_vslideup_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vslideup_vx_i64m1_tumu(vbool64_t mask, vint64m1_t dest, vint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i64m1_tumu(mask, dest, src, offset, vl); +vint64m1_t test_vslideup_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vslideup_vx_i64m2_tumu(vbool32_t mask, vint64m2_t dest, vint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i64m2_tumu(mask, dest, src, offset, vl); +vint64m2_t test_vslideup_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vslideup_vx_i64m4_tumu(vbool16_t mask, vint64m4_t dest, vint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i64m4_tumu(mask, dest, src, offset, vl); +vint64m4_t test_vslideup_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vslideup_vx_i64m8_tumu(vbool8_t mask, vint64m8_t dest, vint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i64m8_tumu(mask, dest, src, offset, vl); +vint64m8_t test_vslideup_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vslideup_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t dest, vuint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u8mf8_tumu(mask, dest, src, offset, vl); +vuint8mf8_t test_vslideup_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vslideup_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t dest, vuint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u8mf4_tumu(mask, dest, src, offset, vl); +vuint8mf4_t test_vslideup_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vslideup_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t dest, vuint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u8mf2_tumu(mask, dest, src, offset, vl); +vuint8mf2_t test_vslideup_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vslideup_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t dest, vuint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u8m1_tumu(mask, dest, src, offset, vl); +vuint8m1_t test_vslideup_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vslideup_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t dest, vuint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u8m2_tumu(mask, dest, src, offset, vl); +vuint8m2_t test_vslideup_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vslideup_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t dest, vuint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u8m4_tumu(mask, dest, src, offset, vl); +vuint8m4_t test_vslideup_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vslideup_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t dest, vuint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u8m8_tumu(mask, dest, src, offset, vl); +vuint8m8_t test_vslideup_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vslideup_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t dest, vuint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u16mf4_tumu(mask, dest, src, offset, vl); +vuint16mf4_t test_vslideup_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vslideup_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t dest, vuint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u16mf2_tumu(mask, dest, src, offset, vl); +vuint16mf2_t test_vslideup_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vslideup_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t dest, vuint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u16m1_tumu(mask, dest, src, offset, vl); +vuint16m1_t test_vslideup_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vslideup_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t dest, vuint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u16m2_tumu(mask, dest, src, offset, vl); +vuint16m2_t test_vslideup_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vslideup_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t dest, vuint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u16m4_tumu(mask, dest, src, offset, vl); +vuint16m4_t test_vslideup_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vslideup_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t dest, vuint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u16m8_tumu(mask, dest, src, offset, vl); +vuint16m8_t test_vslideup_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vslideup_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u32mf2_tumu(mask, dest, src, offset, vl); +vuint32mf2_t test_vslideup_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vslideup_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t dest, vuint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u32m1_tumu(mask, dest, src, offset, vl); +vuint32m1_t test_vslideup_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vslideup_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t dest, vuint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u32m2_tumu(mask, dest, src, offset, vl); +vuint32m2_t test_vslideup_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vslideup_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t dest, vuint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u32m4_tumu(mask, dest, src, offset, vl); +vuint32m4_t test_vslideup_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vslideup_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t dest, vuint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u32m8_tumu(mask, dest, src, offset, vl); +vuint32m8_t test_vslideup_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vslideup_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t dest, vuint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u64m1_tumu(mask, dest, src, offset, vl); +vuint64m1_t test_vslideup_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vslideup_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t dest, vuint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u64m2_tumu(mask, dest, src, offset, vl); +vuint64m2_t test_vslideup_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vslideup_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t dest, vuint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u64m4_tumu(mask, dest, src, offset, vl); +vuint64m4_t test_vslideup_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vslideup_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t dest, vuint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u64m8_tumu(mask, dest, src, offset, vl); +vuint64m8_t test_vslideup_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vslideup_vx_f16mf4_mu(vbool64_t mask, vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f16mf4_mu(mask, dest, src, offset, vl); +vfloat16mf4_t test_vslideup_vx_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f16mf4_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vslideup_vx_f16mf2_mu(vbool32_t mask, vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f16mf2_mu(mask, dest, src, offset, vl); +vfloat16mf2_t test_vslideup_vx_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f16mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vslideup_vx_f16m1_mu(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f16m1_mu(mask, dest, src, offset, vl); +vfloat16m1_t test_vslideup_vx_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f16m1_mu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vslideup_vx_f16m2_mu(vbool8_t mask, vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f16m2_mu(mask, dest, src, offset, vl); +vfloat16m2_t test_vslideup_vx_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f16m2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vslideup_vx_f16m4_mu(vbool4_t mask, vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f16m4_mu(mask, dest, src, offset, vl); +vfloat16m4_t test_vslideup_vx_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f16m4_mu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vslideup_vx_f16m8_mu(vbool2_t mask, vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f16m8_mu(mask, dest, src, offset, vl); +vfloat16m8_t test_vslideup_vx_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f16m8_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vslideup_vx_f32mf2_mu(vbool64_t mask, vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f32mf2_mu(mask, dest, src, offset, vl); +vfloat32mf2_t test_vslideup_vx_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f32mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vslideup_vx_f32m1_mu(vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f32m1_mu(mask, dest, src, offset, vl); +vfloat32m1_t test_vslideup_vx_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f32m1_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vslideup_vx_f32m2_mu(vbool16_t mask, vfloat32m2_t dest, vfloat32m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f32m2_mu(mask, dest, src, offset, vl); +vfloat32m2_t test_vslideup_vx_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f32m2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vslideup_vx_f32m4_mu(vbool8_t mask, vfloat32m4_t dest, vfloat32m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f32m4_mu(mask, dest, src, offset, vl); +vfloat32m4_t test_vslideup_vx_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f32m4_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vslideup_vx_f32m8_mu(vbool4_t mask, vfloat32m8_t dest, vfloat32m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f32m8_mu(mask, dest, src, offset, vl); +vfloat32m8_t test_vslideup_vx_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f32m8_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vslideup_vx_f64m1_mu(vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f64m1_mu(mask, dest, src, offset, vl); +vfloat64m1_t test_vslideup_vx_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f64m1_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vslideup_vx_f64m2_mu(vbool32_t mask, vfloat64m2_t dest, vfloat64m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f64m2_mu(mask, dest, src, offset, vl); +vfloat64m2_t test_vslideup_vx_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f64m2_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vslideup_vx_f64m4_mu(vbool16_t mask, vfloat64m4_t dest, vfloat64m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f64m4_mu(mask, dest, src, offset, vl); +vfloat64m4_t test_vslideup_vx_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f64m4_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vslideup_vx_f64m8_mu(vbool8_t mask, vfloat64m8_t dest, vfloat64m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_f64m8_mu(mask, dest, src, offset, vl); +vfloat64m8_t test_vslideup_vx_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_f64m8_mu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vslideup_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t dest, vint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i8mf8_mu(mask, dest, src, offset, vl); +vint8mf8_t test_vslideup_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i8mf8_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vslideup_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t dest, vint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i8mf4_mu(mask, dest, src, offset, vl); +vint8mf4_t test_vslideup_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i8mf4_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vslideup_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t dest, vint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i8mf2_mu(mask, dest, src, offset, vl); +vint8mf2_t test_vslideup_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i8mf2_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vslideup_vx_i8m1_mu(vbool8_t mask, vint8m1_t dest, vint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i8m1_mu(mask, dest, src, offset, vl); +vint8m1_t test_vslideup_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i8m1_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vslideup_vx_i8m2_mu(vbool4_t mask, vint8m2_t dest, vint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i8m2_mu(mask, dest, src, offset, vl); +vint8m2_t test_vslideup_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i8m2_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vslideup_vx_i8m4_mu(vbool2_t mask, vint8m4_t dest, vint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i8m4_mu(mask, dest, src, offset, vl); +vint8m4_t test_vslideup_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i8m4_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vslideup_vx_i8m8_mu(vbool1_t mask, vint8m8_t dest, vint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i8m8_mu(mask, dest, src, offset, vl); +vint8m8_t test_vslideup_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i8m8_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vslideup_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t dest, vint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i16mf4_mu(mask, dest, src, offset, vl); +vint16mf4_t test_vslideup_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vslideup_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t dest, vint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i16mf2_mu(mask, dest, src, offset, vl); +vint16mf2_t test_vslideup_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vslideup_vx_i16m1_mu(vbool16_t mask, vint16m1_t dest, vint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i16m1_mu(mask, dest, src, offset, vl); +vint16m1_t test_vslideup_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vslideup_vx_i16m2_mu(vbool8_t mask, vint16m2_t dest, vint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i16m2_mu(mask, dest, src, offset, vl); +vint16m2_t test_vslideup_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vslideup_vx_i16m4_mu(vbool4_t mask, vint16m4_t dest, vint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i16m4_mu(mask, dest, src, offset, vl); +vint16m4_t test_vslideup_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vslideup_vx_i16m8_mu(vbool2_t mask, vint16m8_t dest, vint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i16m8_mu(mask, dest, src, offset, vl); +vint16m8_t test_vslideup_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vslideup_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i32mf2_mu(mask, dest, src, offset, vl); +vint32mf2_t test_vslideup_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vslideup_vx_i32m1_mu(vbool32_t mask, vint32m1_t dest, vint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i32m1_mu(mask, dest, src, offset, vl); +vint32m1_t test_vslideup_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vslideup_vx_i32m2_mu(vbool16_t mask, vint32m2_t dest, vint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i32m2_mu(mask, dest, src, offset, vl); +vint32m2_t test_vslideup_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vslideup_vx_i32m4_mu(vbool8_t mask, vint32m4_t dest, vint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i32m4_mu(mask, dest, src, offset, vl); +vint32m4_t test_vslideup_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vslideup_vx_i32m8_mu(vbool4_t mask, vint32m8_t dest, vint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i32m8_mu(mask, dest, src, offset, vl); +vint32m8_t test_vslideup_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vslideup_vx_i64m1_mu(vbool64_t mask, vint64m1_t dest, vint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i64m1_mu(mask, dest, src, offset, vl); +vint64m1_t test_vslideup_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vslideup_vx_i64m2_mu(vbool32_t mask, vint64m2_t dest, vint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i64m2_mu(mask, dest, src, offset, vl); +vint64m2_t test_vslideup_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vslideup_vx_i64m4_mu(vbool16_t mask, vint64m4_t dest, vint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i64m4_mu(mask, dest, src, offset, vl); +vint64m4_t test_vslideup_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vslideup_vx_i64m8_mu(vbool8_t mask, vint64m8_t dest, vint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_i64m8_mu(mask, dest, src, offset, vl); +vint64m8_t test_vslideup_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vslideup_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t dest, vuint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u8mf8_mu(mask, dest, src, offset, vl); +vuint8mf8_t test_vslideup_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vslideup_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t dest, vuint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u8mf4_mu(mask, dest, src, offset, vl); +vuint8mf4_t test_vslideup_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vslideup_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t dest, vuint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u8mf2_mu(mask, dest, src, offset, vl); +vuint8mf2_t test_vslideup_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vslideup_vx_u8m1_mu(vbool8_t mask, vuint8m1_t dest, vuint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u8m1_mu(mask, dest, src, offset, vl); +vuint8m1_t test_vslideup_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vslideup_vx_u8m2_mu(vbool4_t mask, vuint8m2_t dest, vuint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u8m2_mu(mask, dest, src, offset, vl); +vuint8m2_t test_vslideup_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vslideup_vx_u8m4_mu(vbool2_t mask, vuint8m4_t dest, vuint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u8m4_mu(mask, dest, src, offset, vl); +vuint8m4_t test_vslideup_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vslideup_vx_u8m8_mu(vbool1_t mask, vuint8m8_t dest, vuint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u8m8_mu(mask, dest, src, offset, vl); +vuint8m8_t test_vslideup_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u8m8_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vslideup_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t dest, vuint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u16mf4_mu(mask, dest, src, offset, vl); +vuint16mf4_t test_vslideup_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vslideup_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t dest, vuint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u16mf2_mu(mask, dest, src, offset, vl); +vuint16mf2_t test_vslideup_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vslideup_vx_u16m1_mu(vbool16_t mask, vuint16m1_t dest, vuint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u16m1_mu(mask, dest, src, offset, vl); +vuint16m1_t test_vslideup_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vslideup_vx_u16m2_mu(vbool8_t mask, vuint16m2_t dest, vuint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u16m2_mu(mask, dest, src, offset, vl); +vuint16m2_t test_vslideup_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vslideup_vx_u16m4_mu(vbool4_t mask, vuint16m4_t dest, vuint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u16m4_mu(mask, dest, src, offset, vl); +vuint16m4_t test_vslideup_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vslideup_vx_u16m8_mu(vbool2_t mask, vuint16m8_t dest, vuint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u16m8_mu(mask, dest, src, offset, vl); +vuint16m8_t test_vslideup_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vslideup_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u32mf2_mu(mask, dest, src, offset, vl); +vuint32mf2_t test_vslideup_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vslideup_vx_u32m1_mu(vbool32_t mask, vuint32m1_t dest, vuint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u32m1_mu(mask, dest, src, offset, vl); +vuint32m1_t test_vslideup_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vslideup_vx_u32m2_mu(vbool16_t mask, vuint32m2_t dest, vuint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u32m2_mu(mask, dest, src, offset, vl); +vuint32m2_t test_vslideup_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vslideup_vx_u32m4_mu(vbool8_t mask, vuint32m4_t dest, vuint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u32m4_mu(mask, dest, src, offset, vl); +vuint32m4_t test_vslideup_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vslideup_vx_u32m8_mu(vbool4_t mask, vuint32m8_t dest, vuint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u32m8_mu(mask, dest, src, offset, vl); +vuint32m8_t test_vslideup_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vslideup_vx_u64m1_mu(vbool64_t mask, vuint64m1_t dest, vuint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u64m1_mu(mask, dest, src, offset, vl); +vuint64m1_t test_vslideup_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vslideup_vx_u64m2_mu(vbool32_t mask, vuint64m2_t dest, vuint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u64m2_mu(mask, dest, src, offset, vl); +vuint64m2_t test_vslideup_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vslideup_vx_u64m4_mu(vbool16_t mask, vuint64m4_t dest, vuint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u64m4_mu(mask, dest, src, offset, vl); +vuint64m4_t test_vslideup_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vslideup_vx_u64m8_mu(vbool8_t mask, vuint64m8_t dest, vuint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_vx_u64m8_mu(mask, dest, src, offset, vl); +vuint64m8_t test_vslideup_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vslideup\.[ivxfswum.]+\s+} 236 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vsll.c b/auto-generated/policy_funcs/gnu-api-tests/vsll.c index 23b3e84b9..00b943818 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vsll.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vsll.c @@ -6,1412 +6,1412 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vsll_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsll_vv_i8mf8_tu(maskedoff, op1, shift, vl); +vint8mf8_t test_vsll_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsll_vv_i8mf8_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vsll_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i8mf8_tu(maskedoff, op1, shift, vl); +vint8mf8_t test_vsll_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i8mf8_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vsll_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsll_vv_i8mf4_tu(maskedoff, op1, shift, vl); +vint8mf4_t test_vsll_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsll_vv_i8mf4_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vsll_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i8mf4_tu(maskedoff, op1, shift, vl); +vint8mf4_t test_vsll_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i8mf4_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vsll_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsll_vv_i8mf2_tu(maskedoff, op1, shift, vl); +vint8mf2_t test_vsll_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsll_vv_i8mf2_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vsll_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i8mf2_tu(maskedoff, op1, shift, vl); +vint8mf2_t test_vsll_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i8mf2_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vsll_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsll_vv_i8m1_tu(maskedoff, op1, shift, vl); +vint8m1_t test_vsll_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsll_vv_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vsll_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i8m1_tu(maskedoff, op1, shift, vl); +vint8m1_t test_vsll_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i8m1_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vsll_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsll_vv_i8m2_tu(maskedoff, op1, shift, vl); +vint8m2_t test_vsll_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsll_vv_i8m2_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vsll_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i8m2_tu(maskedoff, op1, shift, vl); +vint8m2_t test_vsll_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i8m2_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vsll_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsll_vv_i8m4_tu(maskedoff, op1, shift, vl); +vint8m4_t test_vsll_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsll_vv_i8m4_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vsll_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i8m4_tu(maskedoff, op1, shift, vl); +vint8m4_t test_vsll_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i8m4_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vsll_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsll_vv_i8m8_tu(maskedoff, op1, shift, vl); +vint8m8_t test_vsll_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsll_vv_i8m8_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vsll_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i8m8_tu(maskedoff, op1, shift, vl); +vint8m8_t test_vsll_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i8m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vsll_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsll_vv_i16mf4_tu(maskedoff, op1, shift, vl); +vint16mf4_t test_vsll_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsll_vv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vsll_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i16mf4_tu(maskedoff, op1, shift, vl); +vint16mf4_t test_vsll_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vsll_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsll_vv_i16mf2_tu(maskedoff, op1, shift, vl); +vint16mf2_t test_vsll_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsll_vv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vsll_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i16mf2_tu(maskedoff, op1, shift, vl); +vint16mf2_t test_vsll_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vsll_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsll_vv_i16m1_tu(maskedoff, op1, shift, vl); +vint16m1_t test_vsll_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsll_vv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vsll_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i16m1_tu(maskedoff, op1, shift, vl); +vint16m1_t test_vsll_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vsll_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsll_vv_i16m2_tu(maskedoff, op1, shift, vl); +vint16m2_t test_vsll_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsll_vv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vsll_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i16m2_tu(maskedoff, op1, shift, vl); +vint16m2_t test_vsll_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vsll_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsll_vv_i16m4_tu(maskedoff, op1, shift, vl); +vint16m4_t test_vsll_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsll_vv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vsll_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i16m4_tu(maskedoff, op1, shift, vl); +vint16m4_t test_vsll_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vsll_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsll_vv_i16m8_tu(maskedoff, op1, shift, vl); +vint16m8_t test_vsll_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsll_vv_i16m8_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vsll_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i16m8_tu(maskedoff, op1, shift, vl); +vint16m8_t test_vsll_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vsll_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsll_vv_i32mf2_tu(maskedoff, op1, shift, vl); +vint32mf2_t test_vsll_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsll_vv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vsll_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i32mf2_tu(maskedoff, op1, shift, vl); +vint32mf2_t test_vsll_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vsll_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsll_vv_i32m1_tu(maskedoff, op1, shift, vl); +vint32m1_t test_vsll_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsll_vv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vsll_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i32m1_tu(maskedoff, op1, shift, vl); +vint32m1_t test_vsll_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vsll_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsll_vv_i32m2_tu(maskedoff, op1, shift, vl); +vint32m2_t test_vsll_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsll_vv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vsll_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i32m2_tu(maskedoff, op1, shift, vl); +vint32m2_t test_vsll_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vsll_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsll_vv_i32m4_tu(maskedoff, op1, shift, vl); +vint32m4_t test_vsll_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsll_vv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vsll_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i32m4_tu(maskedoff, op1, shift, vl); +vint32m4_t test_vsll_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vsll_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsll_vv_i32m8_tu(maskedoff, op1, shift, vl); +vint32m8_t test_vsll_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsll_vv_i32m8_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vsll_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i32m8_tu(maskedoff, op1, shift, vl); +vint32m8_t test_vsll_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vsll_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsll_vv_i64m1_tu(maskedoff, op1, shift, vl); +vint64m1_t test_vsll_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsll_vv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vsll_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i64m1_tu(maskedoff, op1, shift, vl); +vint64m1_t test_vsll_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vsll_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsll_vv_i64m2_tu(maskedoff, op1, shift, vl); +vint64m2_t test_vsll_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsll_vv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vsll_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i64m2_tu(maskedoff, op1, shift, vl); +vint64m2_t test_vsll_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vsll_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsll_vv_i64m4_tu(maskedoff, op1, shift, vl); +vint64m4_t test_vsll_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsll_vv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vsll_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i64m4_tu(maskedoff, op1, shift, vl); +vint64m4_t test_vsll_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vsll_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsll_vv_i64m8_tu(maskedoff, op1, shift, vl); +vint64m8_t test_vsll_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsll_vv_i64m8_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vsll_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i64m8_tu(maskedoff, op1, shift, vl); +vint64m8_t test_vsll_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i64m8_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vsll_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsll_vv_u8mf8_tu(maskedoff, op1, shift, vl); +vuint8mf8_t test_vsll_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsll_vv_u8mf8_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vsll_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u8mf8_tu(maskedoff, op1, shift, vl); +vuint8mf8_t test_vsll_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vsll_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsll_vv_u8mf4_tu(maskedoff, op1, shift, vl); +vuint8mf4_t test_vsll_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsll_vv_u8mf4_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vsll_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u8mf4_tu(maskedoff, op1, shift, vl); +vuint8mf4_t test_vsll_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vsll_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsll_vv_u8mf2_tu(maskedoff, op1, shift, vl); +vuint8mf2_t test_vsll_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsll_vv_u8mf2_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vsll_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u8mf2_tu(maskedoff, op1, shift, vl); +vuint8mf2_t test_vsll_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vsll_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsll_vv_u8m1_tu(maskedoff, op1, shift, vl); +vuint8m1_t test_vsll_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsll_vv_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vsll_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u8m1_tu(maskedoff, op1, shift, vl); +vuint8m1_t test_vsll_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vsll_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsll_vv_u8m2_tu(maskedoff, op1, shift, vl); +vuint8m2_t test_vsll_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsll_vv_u8m2_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vsll_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u8m2_tu(maskedoff, op1, shift, vl); +vuint8m2_t test_vsll_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vsll_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsll_vv_u8m4_tu(maskedoff, op1, shift, vl); +vuint8m4_t test_vsll_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsll_vv_u8m4_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vsll_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u8m4_tu(maskedoff, op1, shift, vl); +vuint8m4_t test_vsll_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u8m4_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vsll_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsll_vv_u8m8_tu(maskedoff, op1, shift, vl); +vuint8m8_t test_vsll_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsll_vv_u8m8_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vsll_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u8m8_tu(maskedoff, op1, shift, vl); +vuint8m8_t test_vsll_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u8m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vsll_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsll_vv_u16mf4_tu(maskedoff, op1, shift, vl); +vuint16mf4_t test_vsll_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsll_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vsll_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u16mf4_tu(maskedoff, op1, shift, vl); +vuint16mf4_t test_vsll_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vsll_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsll_vv_u16mf2_tu(maskedoff, op1, shift, vl); +vuint16mf2_t test_vsll_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsll_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vsll_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u16mf2_tu(maskedoff, op1, shift, vl); +vuint16mf2_t test_vsll_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vsll_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsll_vv_u16m1_tu(maskedoff, op1, shift, vl); +vuint16m1_t test_vsll_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsll_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vsll_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u16m1_tu(maskedoff, op1, shift, vl); +vuint16m1_t test_vsll_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vsll_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsll_vv_u16m2_tu(maskedoff, op1, shift, vl); +vuint16m2_t test_vsll_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsll_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vsll_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u16m2_tu(maskedoff, op1, shift, vl); +vuint16m2_t test_vsll_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vsll_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsll_vv_u16m4_tu(maskedoff, op1, shift, vl); +vuint16m4_t test_vsll_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsll_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vsll_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u16m4_tu(maskedoff, op1, shift, vl); +vuint16m4_t test_vsll_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vsll_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsll_vv_u16m8_tu(maskedoff, op1, shift, vl); +vuint16m8_t test_vsll_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsll_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vsll_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u16m8_tu(maskedoff, op1, shift, vl); +vuint16m8_t test_vsll_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vsll_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsll_vv_u32mf2_tu(maskedoff, op1, shift, vl); +vuint32mf2_t test_vsll_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsll_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vsll_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u32mf2_tu(maskedoff, op1, shift, vl); +vuint32mf2_t test_vsll_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vsll_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsll_vv_u32m1_tu(maskedoff, op1, shift, vl); +vuint32m1_t test_vsll_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsll_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vsll_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u32m1_tu(maskedoff, op1, shift, vl); +vuint32m1_t test_vsll_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vsll_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsll_vv_u32m2_tu(maskedoff, op1, shift, vl); +vuint32m2_t test_vsll_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsll_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vsll_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u32m2_tu(maskedoff, op1, shift, vl); +vuint32m2_t test_vsll_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vsll_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsll_vv_u32m4_tu(maskedoff, op1, shift, vl); +vuint32m4_t test_vsll_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsll_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vsll_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u32m4_tu(maskedoff, op1, shift, vl); +vuint32m4_t test_vsll_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vsll_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsll_vv_u32m8_tu(maskedoff, op1, shift, vl); +vuint32m8_t test_vsll_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsll_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vsll_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u32m8_tu(maskedoff, op1, shift, vl); +vuint32m8_t test_vsll_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vsll_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsll_vv_u64m1_tu(maskedoff, op1, shift, vl); +vuint64m1_t test_vsll_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsll_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vsll_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u64m1_tu(maskedoff, op1, shift, vl); +vuint64m1_t test_vsll_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vsll_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsll_vv_u64m2_tu(maskedoff, op1, shift, vl); +vuint64m2_t test_vsll_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsll_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vsll_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u64m2_tu(maskedoff, op1, shift, vl); +vuint64m2_t test_vsll_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vsll_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsll_vv_u64m4_tu(maskedoff, op1, shift, vl); +vuint64m4_t test_vsll_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsll_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vsll_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u64m4_tu(maskedoff, op1, shift, vl); +vuint64m4_t test_vsll_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vsll_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsll_vv_u64m8_tu(maskedoff, op1, shift, vl); +vuint64m8_t test_vsll_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsll_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vsll_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u64m8_tu(maskedoff, op1, shift, vl); +vuint64m8_t test_vsll_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u64m8_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vsll_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsll_vv_i8mf8_tum(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vsll_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsll_vv_i8mf8_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vsll_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i8mf8_tum(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vsll_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i8mf8_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vsll_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsll_vv_i8mf4_tum(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vsll_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsll_vv_i8mf4_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vsll_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i8mf4_tum(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vsll_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i8mf4_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vsll_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsll_vv_i8mf2_tum(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vsll_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsll_vv_i8mf2_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vsll_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i8mf2_tum(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vsll_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i8mf2_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vsll_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsll_vv_i8m1_tum(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vsll_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsll_vv_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vsll_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i8m1_tum(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vsll_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i8m1_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vsll_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsll_vv_i8m2_tum(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vsll_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsll_vv_i8m2_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vsll_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i8m2_tum(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vsll_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i8m2_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vsll_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsll_vv_i8m4_tum(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vsll_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsll_vv_i8m4_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vsll_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i8m4_tum(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vsll_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i8m4_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vsll_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsll_vv_i8m8_tum(mask, maskedoff, op1, shift, vl); +vint8m8_t test_vsll_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsll_vv_i8m8_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vsll_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i8m8_tum(mask, maskedoff, op1, shift, vl); +vint8m8_t test_vsll_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i8m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vsll_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsll_vv_i16mf4_tum(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vsll_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsll_vv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vsll_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i16mf4_tum(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vsll_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vsll_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsll_vv_i16mf2_tum(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vsll_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsll_vv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vsll_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i16mf2_tum(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vsll_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vsll_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsll_vv_i16m1_tum(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vsll_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsll_vv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vsll_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i16m1_tum(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vsll_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vsll_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsll_vv_i16m2_tum(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vsll_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsll_vv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vsll_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i16m2_tum(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vsll_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vsll_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsll_vv_i16m4_tum(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vsll_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsll_vv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vsll_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i16m4_tum(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vsll_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vsll_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsll_vv_i16m8_tum(mask, maskedoff, op1, shift, vl); +vint16m8_t test_vsll_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsll_vv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vsll_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i16m8_tum(mask, maskedoff, op1, shift, vl); +vint16m8_t test_vsll_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vsll_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsll_vv_i32mf2_tum(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vsll_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsll_vv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vsll_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i32mf2_tum(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vsll_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vsll_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsll_vv_i32m1_tum(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vsll_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsll_vv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vsll_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i32m1_tum(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vsll_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vsll_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsll_vv_i32m2_tum(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vsll_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsll_vv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vsll_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i32m2_tum(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vsll_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vsll_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsll_vv_i32m4_tum(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vsll_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsll_vv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vsll_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i32m4_tum(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vsll_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vsll_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsll_vv_i32m8_tum(mask, maskedoff, op1, shift, vl); +vint32m8_t test_vsll_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsll_vv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vsll_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i32m8_tum(mask, maskedoff, op1, shift, vl); +vint32m8_t test_vsll_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vsll_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsll_vv_i64m1_tum(mask, maskedoff, op1, shift, vl); +vint64m1_t test_vsll_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsll_vv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vsll_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i64m1_tum(mask, maskedoff, op1, shift, vl); +vint64m1_t test_vsll_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vsll_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsll_vv_i64m2_tum(mask, maskedoff, op1, shift, vl); +vint64m2_t test_vsll_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsll_vv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vsll_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i64m2_tum(mask, maskedoff, op1, shift, vl); +vint64m2_t test_vsll_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vsll_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsll_vv_i64m4_tum(mask, maskedoff, op1, shift, vl); +vint64m4_t test_vsll_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsll_vv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vsll_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i64m4_tum(mask, maskedoff, op1, shift, vl); +vint64m4_t test_vsll_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vsll_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsll_vv_i64m8_tum(mask, maskedoff, op1, shift, vl); +vint64m8_t test_vsll_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsll_vv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vsll_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i64m8_tum(mask, maskedoff, op1, shift, vl); +vint64m8_t test_vsll_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vsll_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsll_vv_u8mf8_tum(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vsll_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsll_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vsll_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u8mf8_tum(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vsll_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vsll_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsll_vv_u8mf4_tum(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vsll_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsll_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vsll_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u8mf4_tum(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vsll_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vsll_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsll_vv_u8mf2_tum(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vsll_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsll_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vsll_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u8mf2_tum(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vsll_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vsll_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsll_vv_u8m1_tum(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vsll_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsll_vv_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vsll_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u8m1_tum(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vsll_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vsll_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsll_vv_u8m2_tum(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vsll_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsll_vv_u8m2_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vsll_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u8m2_tum(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vsll_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vsll_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsll_vv_u8m4_tum(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vsll_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsll_vv_u8m4_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vsll_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u8m4_tum(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vsll_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vsll_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsll_vv_u8m8_tum(mask, maskedoff, op1, shift, vl); +vuint8m8_t test_vsll_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsll_vv_u8m8_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vsll_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u8m8_tum(mask, maskedoff, op1, shift, vl); +vuint8m8_t test_vsll_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u8m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vsll_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsll_vv_u16mf4_tum(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vsll_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsll_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vsll_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u16mf4_tum(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vsll_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vsll_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsll_vv_u16mf2_tum(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vsll_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsll_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vsll_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u16mf2_tum(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vsll_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vsll_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsll_vv_u16m1_tum(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vsll_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsll_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vsll_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u16m1_tum(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vsll_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vsll_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsll_vv_u16m2_tum(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vsll_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsll_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vsll_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u16m2_tum(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vsll_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vsll_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsll_vv_u16m4_tum(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vsll_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsll_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vsll_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u16m4_tum(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vsll_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vsll_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsll_vv_u16m8_tum(mask, maskedoff, op1, shift, vl); +vuint16m8_t test_vsll_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsll_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vsll_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u16m8_tum(mask, maskedoff, op1, shift, vl); +vuint16m8_t test_vsll_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vsll_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsll_vv_u32mf2_tum(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vsll_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsll_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vsll_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u32mf2_tum(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vsll_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vsll_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsll_vv_u32m1_tum(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vsll_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsll_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vsll_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u32m1_tum(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vsll_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vsll_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsll_vv_u32m2_tum(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vsll_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsll_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vsll_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u32m2_tum(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vsll_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vsll_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsll_vv_u32m4_tum(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vsll_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsll_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vsll_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u32m4_tum(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vsll_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vsll_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsll_vv_u32m8_tum(mask, maskedoff, op1, shift, vl); +vuint32m8_t test_vsll_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsll_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vsll_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u32m8_tum(mask, maskedoff, op1, shift, vl); +vuint32m8_t test_vsll_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vsll_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsll_vv_u64m1_tum(mask, maskedoff, op1, shift, vl); +vuint64m1_t test_vsll_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsll_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vsll_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u64m1_tum(mask, maskedoff, op1, shift, vl); +vuint64m1_t test_vsll_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vsll_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsll_vv_u64m2_tum(mask, maskedoff, op1, shift, vl); +vuint64m2_t test_vsll_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsll_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vsll_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u64m2_tum(mask, maskedoff, op1, shift, vl); +vuint64m2_t test_vsll_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vsll_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsll_vv_u64m4_tum(mask, maskedoff, op1, shift, vl); +vuint64m4_t test_vsll_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsll_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vsll_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u64m4_tum(mask, maskedoff, op1, shift, vl); +vuint64m4_t test_vsll_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vsll_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsll_vv_u64m8_tum(mask, maskedoff, op1, shift, vl); +vuint64m8_t test_vsll_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsll_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vsll_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u64m8_tum(mask, maskedoff, op1, shift, vl); +vuint64m8_t test_vsll_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vsll_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsll_vv_i8mf8_tumu(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vsll_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsll_vv_i8mf8_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vsll_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i8mf8_tumu(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vsll_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i8mf8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vsll_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsll_vv_i8mf4_tumu(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vsll_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsll_vv_i8mf4_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vsll_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i8mf4_tumu(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vsll_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i8mf4_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vsll_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsll_vv_i8mf2_tumu(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vsll_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsll_vv_i8mf2_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vsll_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i8mf2_tumu(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vsll_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i8mf2_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vsll_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsll_vv_i8m1_tumu(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vsll_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsll_vv_i8m1_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vsll_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i8m1_tumu(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vsll_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i8m1_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vsll_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsll_vv_i8m2_tumu(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vsll_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsll_vv_i8m2_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vsll_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i8m2_tumu(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vsll_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i8m2_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vsll_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsll_vv_i8m4_tumu(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vsll_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsll_vv_i8m4_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vsll_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i8m4_tumu(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vsll_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i8m4_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vsll_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsll_vv_i8m8_tumu(mask, maskedoff, op1, shift, vl); +vint8m8_t test_vsll_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsll_vv_i8m8_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vsll_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i8m8_tumu(mask, maskedoff, op1, shift, vl); +vint8m8_t test_vsll_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i8m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vsll_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsll_vv_i16mf4_tumu(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vsll_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsll_vv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vsll_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i16mf4_tumu(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vsll_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vsll_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsll_vv_i16mf2_tumu(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vsll_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsll_vv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vsll_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i16mf2_tumu(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vsll_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vsll_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsll_vv_i16m1_tumu(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vsll_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsll_vv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vsll_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i16m1_tumu(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vsll_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vsll_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsll_vv_i16m2_tumu(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vsll_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsll_vv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vsll_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i16m2_tumu(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vsll_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vsll_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsll_vv_i16m4_tumu(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vsll_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsll_vv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vsll_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i16m4_tumu(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vsll_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vsll_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsll_vv_i16m8_tumu(mask, maskedoff, op1, shift, vl); +vint16m8_t test_vsll_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsll_vv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vsll_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i16m8_tumu(mask, maskedoff, op1, shift, vl); +vint16m8_t test_vsll_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vsll_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsll_vv_i32mf2_tumu(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vsll_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsll_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vsll_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i32mf2_tumu(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vsll_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vsll_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsll_vv_i32m1_tumu(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vsll_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsll_vv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vsll_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i32m1_tumu(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vsll_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vsll_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsll_vv_i32m2_tumu(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vsll_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsll_vv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vsll_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i32m2_tumu(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vsll_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vsll_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsll_vv_i32m4_tumu(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vsll_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsll_vv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vsll_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i32m4_tumu(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vsll_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vsll_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsll_vv_i32m8_tumu(mask, maskedoff, op1, shift, vl); +vint32m8_t test_vsll_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsll_vv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vsll_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i32m8_tumu(mask, maskedoff, op1, shift, vl); +vint32m8_t test_vsll_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vsll_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsll_vv_i64m1_tumu(mask, maskedoff, op1, shift, vl); +vint64m1_t test_vsll_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsll_vv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vsll_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i64m1_tumu(mask, maskedoff, op1, shift, vl); +vint64m1_t test_vsll_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vsll_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsll_vv_i64m2_tumu(mask, maskedoff, op1, shift, vl); +vint64m2_t test_vsll_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsll_vv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vsll_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i64m2_tumu(mask, maskedoff, op1, shift, vl); +vint64m2_t test_vsll_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vsll_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsll_vv_i64m4_tumu(mask, maskedoff, op1, shift, vl); +vint64m4_t test_vsll_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsll_vv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vsll_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i64m4_tumu(mask, maskedoff, op1, shift, vl); +vint64m4_t test_vsll_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vsll_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsll_vv_i64m8_tumu(mask, maskedoff, op1, shift, vl); +vint64m8_t test_vsll_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsll_vv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vsll_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i64m8_tumu(mask, maskedoff, op1, shift, vl); +vint64m8_t test_vsll_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vsll_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsll_vv_u8mf8_tumu(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vsll_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsll_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vsll_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u8mf8_tumu(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vsll_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vsll_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsll_vv_u8mf4_tumu(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vsll_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsll_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vsll_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u8mf4_tumu(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vsll_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vsll_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsll_vv_u8mf2_tumu(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vsll_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsll_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vsll_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u8mf2_tumu(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vsll_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vsll_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsll_vv_u8m1_tumu(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vsll_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsll_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vsll_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u8m1_tumu(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vsll_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vsll_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsll_vv_u8m2_tumu(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vsll_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsll_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vsll_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u8m2_tumu(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vsll_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vsll_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsll_vv_u8m4_tumu(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vsll_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsll_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vsll_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u8m4_tumu(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vsll_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vsll_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsll_vv_u8m8_tumu(mask, maskedoff, op1, shift, vl); +vuint8m8_t test_vsll_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsll_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vsll_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u8m8_tumu(mask, maskedoff, op1, shift, vl); +vuint8m8_t test_vsll_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vsll_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsll_vv_u16mf4_tumu(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vsll_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsll_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vsll_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u16mf4_tumu(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vsll_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vsll_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsll_vv_u16mf2_tumu(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vsll_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsll_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vsll_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u16mf2_tumu(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vsll_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vsll_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsll_vv_u16m1_tumu(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vsll_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsll_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vsll_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u16m1_tumu(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vsll_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vsll_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsll_vv_u16m2_tumu(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vsll_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsll_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vsll_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u16m2_tumu(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vsll_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vsll_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsll_vv_u16m4_tumu(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vsll_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsll_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vsll_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u16m4_tumu(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vsll_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vsll_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsll_vv_u16m8_tumu(mask, maskedoff, op1, shift, vl); +vuint16m8_t test_vsll_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsll_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vsll_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u16m8_tumu(mask, maskedoff, op1, shift, vl); +vuint16m8_t test_vsll_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vsll_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsll_vv_u32mf2_tumu(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vsll_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsll_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vsll_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u32mf2_tumu(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vsll_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vsll_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsll_vv_u32m1_tumu(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vsll_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsll_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vsll_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u32m1_tumu(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vsll_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vsll_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsll_vv_u32m2_tumu(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vsll_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsll_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vsll_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u32m2_tumu(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vsll_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vsll_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsll_vv_u32m4_tumu(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vsll_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsll_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vsll_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u32m4_tumu(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vsll_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vsll_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsll_vv_u32m8_tumu(mask, maskedoff, op1, shift, vl); +vuint32m8_t test_vsll_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsll_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vsll_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u32m8_tumu(mask, maskedoff, op1, shift, vl); +vuint32m8_t test_vsll_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vsll_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsll_vv_u64m1_tumu(mask, maskedoff, op1, shift, vl); +vuint64m1_t test_vsll_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsll_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vsll_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u64m1_tumu(mask, maskedoff, op1, shift, vl); +vuint64m1_t test_vsll_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vsll_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsll_vv_u64m2_tumu(mask, maskedoff, op1, shift, vl); +vuint64m2_t test_vsll_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsll_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vsll_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u64m2_tumu(mask, maskedoff, op1, shift, vl); +vuint64m2_t test_vsll_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vsll_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsll_vv_u64m4_tumu(mask, maskedoff, op1, shift, vl); +vuint64m4_t test_vsll_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsll_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vsll_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u64m4_tumu(mask, maskedoff, op1, shift, vl); +vuint64m4_t test_vsll_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vsll_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsll_vv_u64m8_tumu(mask, maskedoff, op1, shift, vl); +vuint64m8_t test_vsll_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsll_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vsll_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u64m8_tumu(mask, maskedoff, op1, shift, vl); +vuint64m8_t test_vsll_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vsll_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsll_vv_i8mf8_mu(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vsll_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsll_vv_i8mf8_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vsll_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i8mf8_mu(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vsll_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i8mf8_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vsll_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsll_vv_i8mf4_mu(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vsll_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsll_vv_i8mf4_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vsll_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i8mf4_mu(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vsll_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i8mf4_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vsll_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsll_vv_i8mf2_mu(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vsll_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsll_vv_i8mf2_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vsll_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i8mf2_mu(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vsll_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i8mf2_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vsll_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsll_vv_i8m1_mu(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vsll_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsll_vv_i8m1_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vsll_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i8m1_mu(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vsll_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i8m1_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vsll_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsll_vv_i8m2_mu(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vsll_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsll_vv_i8m2_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vsll_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i8m2_mu(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vsll_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i8m2_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vsll_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsll_vv_i8m4_mu(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vsll_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsll_vv_i8m4_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vsll_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i8m4_mu(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vsll_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i8m4_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vsll_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsll_vv_i8m8_mu(mask, maskedoff, op1, shift, vl); +vint8m8_t test_vsll_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsll_vv_i8m8_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vsll_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i8m8_mu(mask, maskedoff, op1, shift, vl); +vint8m8_t test_vsll_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i8m8_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vsll_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsll_vv_i16mf4_mu(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vsll_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsll_vv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vsll_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i16mf4_mu(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vsll_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vsll_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsll_vv_i16mf2_mu(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vsll_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsll_vv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vsll_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i16mf2_mu(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vsll_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vsll_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsll_vv_i16m1_mu(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vsll_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsll_vv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vsll_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i16m1_mu(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vsll_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vsll_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsll_vv_i16m2_mu(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vsll_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsll_vv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vsll_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i16m2_mu(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vsll_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vsll_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsll_vv_i16m4_mu(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vsll_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsll_vv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vsll_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i16m4_mu(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vsll_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vsll_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsll_vv_i16m8_mu(mask, maskedoff, op1, shift, vl); +vint16m8_t test_vsll_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsll_vv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vsll_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i16m8_mu(mask, maskedoff, op1, shift, vl); +vint16m8_t test_vsll_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vsll_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsll_vv_i32mf2_mu(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vsll_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsll_vv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vsll_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i32mf2_mu(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vsll_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vsll_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsll_vv_i32m1_mu(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vsll_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsll_vv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vsll_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i32m1_mu(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vsll_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vsll_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsll_vv_i32m2_mu(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vsll_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsll_vv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vsll_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i32m2_mu(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vsll_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vsll_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsll_vv_i32m4_mu(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vsll_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsll_vv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vsll_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i32m4_mu(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vsll_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vsll_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsll_vv_i32m8_mu(mask, maskedoff, op1, shift, vl); +vint32m8_t test_vsll_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsll_vv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vsll_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i32m8_mu(mask, maskedoff, op1, shift, vl); +vint32m8_t test_vsll_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vsll_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsll_vv_i64m1_mu(mask, maskedoff, op1, shift, vl); +vint64m1_t test_vsll_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsll_vv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vsll_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i64m1_mu(mask, maskedoff, op1, shift, vl); +vint64m1_t test_vsll_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vsll_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsll_vv_i64m2_mu(mask, maskedoff, op1, shift, vl); +vint64m2_t test_vsll_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsll_vv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vsll_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i64m2_mu(mask, maskedoff, op1, shift, vl); +vint64m2_t test_vsll_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vsll_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsll_vv_i64m4_mu(mask, maskedoff, op1, shift, vl); +vint64m4_t test_vsll_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsll_vv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vsll_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i64m4_mu(mask, maskedoff, op1, shift, vl); +vint64m4_t test_vsll_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vsll_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsll_vv_i64m8_mu(mask, maskedoff, op1, shift, vl); +vint64m8_t test_vsll_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsll_vv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vsll_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_i64m8_mu(mask, maskedoff, op1, shift, vl); +vint64m8_t test_vsll_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vsll_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsll_vv_u8mf8_mu(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vsll_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsll_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vsll_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u8mf8_mu(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vsll_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vsll_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsll_vv_u8mf4_mu(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vsll_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsll_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vsll_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u8mf4_mu(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vsll_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vsll_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsll_vv_u8mf2_mu(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vsll_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsll_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vsll_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u8mf2_mu(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vsll_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vsll_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsll_vv_u8m1_mu(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vsll_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsll_vv_u8m1_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vsll_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u8m1_mu(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vsll_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vsll_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsll_vv_u8m2_mu(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vsll_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsll_vv_u8m2_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vsll_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u8m2_mu(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vsll_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vsll_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsll_vv_u8m4_mu(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vsll_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsll_vv_u8m4_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vsll_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u8m4_mu(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vsll_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vsll_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsll_vv_u8m8_mu(mask, maskedoff, op1, shift, vl); +vuint8m8_t test_vsll_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsll_vv_u8m8_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vsll_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u8m8_mu(mask, maskedoff, op1, shift, vl); +vuint8m8_t test_vsll_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u8m8_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vsll_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsll_vv_u16mf4_mu(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vsll_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsll_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vsll_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u16mf4_mu(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vsll_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vsll_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsll_vv_u16mf2_mu(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vsll_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsll_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vsll_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u16mf2_mu(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vsll_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vsll_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsll_vv_u16m1_mu(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vsll_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsll_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vsll_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u16m1_mu(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vsll_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vsll_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsll_vv_u16m2_mu(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vsll_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsll_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vsll_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u16m2_mu(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vsll_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vsll_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsll_vv_u16m4_mu(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vsll_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsll_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vsll_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u16m4_mu(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vsll_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vsll_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsll_vv_u16m8_mu(mask, maskedoff, op1, shift, vl); +vuint16m8_t test_vsll_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsll_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vsll_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u16m8_mu(mask, maskedoff, op1, shift, vl); +vuint16m8_t test_vsll_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vsll_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsll_vv_u32mf2_mu(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vsll_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsll_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vsll_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u32mf2_mu(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vsll_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vsll_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsll_vv_u32m1_mu(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vsll_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsll_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vsll_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u32m1_mu(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vsll_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vsll_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsll_vv_u32m2_mu(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vsll_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsll_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vsll_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u32m2_mu(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vsll_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vsll_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsll_vv_u32m4_mu(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vsll_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsll_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vsll_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u32m4_mu(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vsll_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vsll_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsll_vv_u32m8_mu(mask, maskedoff, op1, shift, vl); +vuint32m8_t test_vsll_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsll_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vsll_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u32m8_mu(mask, maskedoff, op1, shift, vl); +vuint32m8_t test_vsll_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vsll_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsll_vv_u64m1_mu(mask, maskedoff, op1, shift, vl); +vuint64m1_t test_vsll_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsll_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vsll_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u64m1_mu(mask, maskedoff, op1, shift, vl); +vuint64m1_t test_vsll_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vsll_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsll_vv_u64m2_mu(mask, maskedoff, op1, shift, vl); +vuint64m2_t test_vsll_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsll_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vsll_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u64m2_mu(mask, maskedoff, op1, shift, vl); +vuint64m2_t test_vsll_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vsll_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsll_vv_u64m4_mu(mask, maskedoff, op1, shift, vl); +vuint64m4_t test_vsll_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsll_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vsll_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u64m4_mu(mask, maskedoff, op1, shift, vl); +vuint64m4_t test_vsll_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vsll_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsll_vv_u64m8_mu(mask, maskedoff, op1, shift, vl); +vuint64m8_t test_vsll_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsll_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vsll_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_vx_u64m8_mu(mask, maskedoff, op1, shift, vl); +vuint64m8_t test_vsll_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsll\.[ivxfswum.]+\s+} 352 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vsmul.c b/auto-generated/policy_funcs/gnu-api-tests/vsmul.c index 28a39fd5d..66dfe60f5 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vsmul.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vsmul.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vsmul_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsmul_vv_i8mf8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vsmul_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vsmul_vv_i8mf8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vsmul_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8mf8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vsmul_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_vx_i8mf8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vsmul_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsmul_vv_i8mf4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vsmul_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vsmul_vv_i8mf4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vsmul_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8mf4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vsmul_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_vx_i8mf4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vsmul_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsmul_vv_i8mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vsmul_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vsmul_vv_i8mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vsmul_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vsmul_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_vx_i8mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vsmul_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vsmul_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vsmul_vv_i8m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vsmul_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vsmul_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_vx_i8m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vsmul_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vsmul_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vsmul_vv_i8m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vsmul_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vsmul_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_vx_i8m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vsmul_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vsmul_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vsmul_vv_i8m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vsmul_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vsmul_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_vx_i8m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vsmul_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vsmul_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vsmul_vv_i8m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vsmul_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vsmul_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_vx_i8m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vsmul_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsmul_vv_i16mf4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vsmul_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vsmul_vv_i16mf4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vsmul_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16mf4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vsmul_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_vx_i16mf4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vsmul_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsmul_vv_i16mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vsmul_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vsmul_vv_i16mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vsmul_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vsmul_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_vx_i16mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vsmul_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vsmul_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vsmul_vv_i16m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vsmul_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vsmul_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_vx_i16m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vsmul_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vsmul_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vsmul_vv_i16m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vsmul_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vsmul_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_vx_i16m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vsmul_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vsmul_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vsmul_vv_i16m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vsmul_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vsmul_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_vx_i16m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vsmul_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vsmul_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vsmul_vv_i16m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vsmul_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vsmul_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_vx_i16m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vsmul_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsmul_vv_i32mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vsmul_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vsmul_vv_i32mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vsmul_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32mf2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vsmul_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_vx_i32mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vsmul_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vsmul_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vsmul_vv_i32m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vsmul_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vsmul_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_vx_i32m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vsmul_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vsmul_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vsmul_vv_i32m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vsmul_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vsmul_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_vx_i32m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vsmul_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vsmul_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vsmul_vv_i32m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vsmul_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vsmul_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_vx_i32m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vsmul_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vsmul_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vsmul_vv_i32m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vsmul_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vsmul_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_vx_i32m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vsmul_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vsmul_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vsmul_vv_i64m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vsmul_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m1_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vsmul_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul_vx_i64m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vsmul_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vsmul_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vsmul_vv_i64m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vsmul_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m2_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vsmul_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul_vx_i64m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vsmul_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vsmul_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vsmul_vv_i64m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vsmul_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m4_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vsmul_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul_vx_i64m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vsmul_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vsmul_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vsmul_vv_i64m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vsmul_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m8_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vsmul_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul_vx_i64m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vsmul_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsmul_vv_i8mf8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vsmul_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vsmul_vv_i8mf8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vsmul_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8mf8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vsmul_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_vx_i8mf8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vsmul_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsmul_vv_i8mf4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vsmul_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vsmul_vv_i8mf4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vsmul_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8mf4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vsmul_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_vx_i8mf4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vsmul_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsmul_vv_i8mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vsmul_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vsmul_vv_i8mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vsmul_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vsmul_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_vx_i8mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vsmul_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vsmul_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vsmul_vv_i8m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vsmul_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vsmul_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_vx_i8m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vsmul_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vsmul_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vsmul_vv_i8m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vsmul_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vsmul_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_vx_i8m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vsmul_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vsmul_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vsmul_vv_i8m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vsmul_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vsmul_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_vx_i8m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vsmul_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vsmul_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vsmul_vv_i8m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vsmul_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vsmul_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_vx_i8m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vsmul_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsmul_vv_i16mf4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vsmul_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vsmul_vv_i16mf4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vsmul_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16mf4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vsmul_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_vx_i16mf4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vsmul_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsmul_vv_i16mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vsmul_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vsmul_vv_i16mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vsmul_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vsmul_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_vx_i16mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vsmul_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vsmul_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vsmul_vv_i16m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vsmul_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vsmul_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_vx_i16m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vsmul_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vsmul_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vsmul_vv_i16m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vsmul_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vsmul_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_vx_i16m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vsmul_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vsmul_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vsmul_vv_i16m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vsmul_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vsmul_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_vx_i16m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vsmul_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vsmul_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vsmul_vv_i16m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vsmul_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vsmul_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_vx_i16m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vsmul_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsmul_vv_i32mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vsmul_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vsmul_vv_i32mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vsmul_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32mf2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vsmul_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_vx_i32mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vsmul_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vsmul_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vsmul_vv_i32m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vsmul_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vsmul_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_vx_i32m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vsmul_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vsmul_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vsmul_vv_i32m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vsmul_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vsmul_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_vx_i32m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vsmul_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vsmul_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vsmul_vv_i32m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vsmul_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vsmul_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_vx_i32m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vsmul_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vsmul_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vsmul_vv_i32m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vsmul_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vsmul_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_vx_i32m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vsmul_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vsmul_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vsmul_vv_i64m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vsmul_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m1_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vsmul_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul_vx_i64m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vsmul_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vsmul_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vsmul_vv_i64m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vsmul_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m2_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vsmul_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul_vx_i64m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vsmul_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vsmul_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vsmul_vv_i64m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vsmul_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m4_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vsmul_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul_vx_i64m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vsmul_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vsmul_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vsmul_vv_i64m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vsmul_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m8_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vsmul_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul_vx_i64m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vsmul_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsmul_vv_i8mf8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vsmul_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vsmul_vv_i8mf8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vsmul_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8mf8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vsmul_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_vx_i8mf8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vsmul_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsmul_vv_i8mf4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vsmul_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vsmul_vv_i8mf4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vsmul_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8mf4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vsmul_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_vx_i8mf4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vsmul_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsmul_vv_i8mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vsmul_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vsmul_vv_i8mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vsmul_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vsmul_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_vx_i8mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vsmul_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vsmul_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vsmul_vv_i8m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vsmul_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vsmul_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_vx_i8m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vsmul_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vsmul_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vsmul_vv_i8m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vsmul_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vsmul_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_vx_i8m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vsmul_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vsmul_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vsmul_vv_i8m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vsmul_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vsmul_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_vx_i8m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vsmul_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vsmul_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vsmul_vv_i8m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vsmul_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vsmul_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_vx_i8m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vsmul_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsmul_vv_i16mf4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vsmul_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vsmul_vv_i16mf4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vsmul_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16mf4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vsmul_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_vx_i16mf4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vsmul_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsmul_vv_i16mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vsmul_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vsmul_vv_i16mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vsmul_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vsmul_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_vx_i16mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vsmul_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vsmul_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vsmul_vv_i16m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vsmul_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vsmul_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_vx_i16m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vsmul_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vsmul_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vsmul_vv_i16m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vsmul_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vsmul_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_vx_i16m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vsmul_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vsmul_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vsmul_vv_i16m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vsmul_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vsmul_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_vx_i16m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vsmul_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vsmul_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vsmul_vv_i16m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vsmul_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vsmul_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_vx_i16m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vsmul_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsmul_vv_i32mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vsmul_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vsmul_vv_i32mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vsmul_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32mf2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vsmul_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_vx_i32mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vsmul_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vsmul_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vsmul_vv_i32m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vsmul_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vsmul_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_vx_i32m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vsmul_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vsmul_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vsmul_vv_i32m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vsmul_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vsmul_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_vx_i32m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vsmul_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vsmul_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vsmul_vv_i32m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vsmul_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vsmul_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_vx_i32m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vsmul_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vsmul_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vsmul_vv_i32m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vsmul_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vsmul_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_vx_i32m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vsmul_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vsmul_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vsmul_vv_i64m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vsmul_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m1_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vsmul_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul_vx_i64m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vsmul_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vsmul_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vsmul_vv_i64m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vsmul_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m2_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vsmul_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul_vx_i64m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vsmul_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vsmul_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vsmul_vv_i64m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vsmul_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m4_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vsmul_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul_vx_i64m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vsmul_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vsmul_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vsmul_vv_i64m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vsmul_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m8_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vsmul_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul_vx_i64m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vsmul_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsmul_vv_i8mf8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vsmul_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vsmul_vv_i8mf8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vsmul_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8mf8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vsmul_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_vx_i8mf8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vsmul_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsmul_vv_i8mf4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vsmul_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vsmul_vv_i8mf4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vsmul_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8mf4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vsmul_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_vx_i8mf4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vsmul_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsmul_vv_i8mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vsmul_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vsmul_vv_i8mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vsmul_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vsmul_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_vx_i8mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vsmul_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vsmul_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vsmul_vv_i8m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vsmul_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vsmul_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_vx_i8m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vsmul_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vsmul_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vsmul_vv_i8m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vsmul_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vsmul_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_vx_i8m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vsmul_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vsmul_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vsmul_vv_i8m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vsmul_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vsmul_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_vx_i8m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vsmul_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vsmul_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vsmul_vv_i8m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vsmul_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vsmul_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_vx_i8m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vsmul_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsmul_vv_i16mf4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vsmul_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vsmul_vv_i16mf4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vsmul_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16mf4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vsmul_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_vx_i16mf4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vsmul_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsmul_vv_i16mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vsmul_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vsmul_vv_i16mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vsmul_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vsmul_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_vx_i16mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vsmul_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vsmul_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vsmul_vv_i16m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vsmul_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vsmul_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_vx_i16m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vsmul_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vsmul_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vsmul_vv_i16m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vsmul_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vsmul_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_vx_i16m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vsmul_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vsmul_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vsmul_vv_i16m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vsmul_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vsmul_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_vx_i16m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vsmul_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vsmul_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vsmul_vv_i16m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vsmul_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vsmul_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_vx_i16m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vsmul_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsmul_vv_i32mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vsmul_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vsmul_vv_i32mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vsmul_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32mf2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vsmul_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_vx_i32mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vsmul_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vsmul_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vsmul_vv_i32m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vsmul_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vsmul_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_vx_i32m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vsmul_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vsmul_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vsmul_vv_i32m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vsmul_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vsmul_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_vx_i32m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vsmul_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vsmul_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vsmul_vv_i32m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vsmul_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vsmul_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_vx_i32m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vsmul_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vsmul_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vsmul_vv_i32m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vsmul_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vsmul_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_vx_i32m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vsmul_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vsmul_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vsmul_vv_i64m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vsmul_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m1_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vsmul_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul_vx_i64m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vsmul_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vsmul_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vsmul_vv_i64m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vsmul_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m2_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vsmul_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul_vx_i64m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vsmul_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vsmul_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vsmul_vv_i64m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vsmul_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m4_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vsmul_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul_vx_i64m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vsmul_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vsmul_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vsmul_vv_i64m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vsmul_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m8_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vsmul_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul_vx_i64m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsmul\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vsra.c b/auto-generated/policy_funcs/gnu-api-tests/vsra.c index 4c7acf85d..a7a2cae14 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vsra.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vsra.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vsra_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsra_vv_i8mf8_tu(maskedoff, op1, shift, vl); +vint8mf8_t test_vsra_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsra_vv_i8mf8_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vsra_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i8mf8_tu(maskedoff, op1, shift, vl); +vint8mf8_t test_vsra_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i8mf8_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vsra_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsra_vv_i8mf4_tu(maskedoff, op1, shift, vl); +vint8mf4_t test_vsra_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsra_vv_i8mf4_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vsra_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i8mf4_tu(maskedoff, op1, shift, vl); +vint8mf4_t test_vsra_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i8mf4_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vsra_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsra_vv_i8mf2_tu(maskedoff, op1, shift, vl); +vint8mf2_t test_vsra_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsra_vv_i8mf2_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vsra_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i8mf2_tu(maskedoff, op1, shift, vl); +vint8mf2_t test_vsra_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i8mf2_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vsra_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsra_vv_i8m1_tu(maskedoff, op1, shift, vl); +vint8m1_t test_vsra_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsra_vv_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vsra_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i8m1_tu(maskedoff, op1, shift, vl); +vint8m1_t test_vsra_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i8m1_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vsra_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsra_vv_i8m2_tu(maskedoff, op1, shift, vl); +vint8m2_t test_vsra_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsra_vv_i8m2_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vsra_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i8m2_tu(maskedoff, op1, shift, vl); +vint8m2_t test_vsra_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i8m2_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vsra_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsra_vv_i8m4_tu(maskedoff, op1, shift, vl); +vint8m4_t test_vsra_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsra_vv_i8m4_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vsra_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i8m4_tu(maskedoff, op1, shift, vl); +vint8m4_t test_vsra_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i8m4_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vsra_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsra_vv_i8m8_tu(maskedoff, op1, shift, vl); +vint8m8_t test_vsra_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsra_vv_i8m8_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vsra_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i8m8_tu(maskedoff, op1, shift, vl); +vint8m8_t test_vsra_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i8m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vsra_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsra_vv_i16mf4_tu(maskedoff, op1, shift, vl); +vint16mf4_t test_vsra_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsra_vv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vsra_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i16mf4_tu(maskedoff, op1, shift, vl); +vint16mf4_t test_vsra_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vsra_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsra_vv_i16mf2_tu(maskedoff, op1, shift, vl); +vint16mf2_t test_vsra_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsra_vv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vsra_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i16mf2_tu(maskedoff, op1, shift, vl); +vint16mf2_t test_vsra_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vsra_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsra_vv_i16m1_tu(maskedoff, op1, shift, vl); +vint16m1_t test_vsra_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsra_vv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vsra_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i16m1_tu(maskedoff, op1, shift, vl); +vint16m1_t test_vsra_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vsra_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsra_vv_i16m2_tu(maskedoff, op1, shift, vl); +vint16m2_t test_vsra_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsra_vv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vsra_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i16m2_tu(maskedoff, op1, shift, vl); +vint16m2_t test_vsra_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vsra_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsra_vv_i16m4_tu(maskedoff, op1, shift, vl); +vint16m4_t test_vsra_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsra_vv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vsra_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i16m4_tu(maskedoff, op1, shift, vl); +vint16m4_t test_vsra_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vsra_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsra_vv_i16m8_tu(maskedoff, op1, shift, vl); +vint16m8_t test_vsra_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsra_vv_i16m8_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vsra_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i16m8_tu(maskedoff, op1, shift, vl); +vint16m8_t test_vsra_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vsra_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsra_vv_i32mf2_tu(maskedoff, op1, shift, vl); +vint32mf2_t test_vsra_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsra_vv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vsra_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i32mf2_tu(maskedoff, op1, shift, vl); +vint32mf2_t test_vsra_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vsra_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsra_vv_i32m1_tu(maskedoff, op1, shift, vl); +vint32m1_t test_vsra_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsra_vv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vsra_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i32m1_tu(maskedoff, op1, shift, vl); +vint32m1_t test_vsra_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vsra_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsra_vv_i32m2_tu(maskedoff, op1, shift, vl); +vint32m2_t test_vsra_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsra_vv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vsra_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i32m2_tu(maskedoff, op1, shift, vl); +vint32m2_t test_vsra_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vsra_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsra_vv_i32m4_tu(maskedoff, op1, shift, vl); +vint32m4_t test_vsra_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsra_vv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vsra_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i32m4_tu(maskedoff, op1, shift, vl); +vint32m4_t test_vsra_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vsra_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsra_vv_i32m8_tu(maskedoff, op1, shift, vl); +vint32m8_t test_vsra_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsra_vv_i32m8_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vsra_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i32m8_tu(maskedoff, op1, shift, vl); +vint32m8_t test_vsra_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vsra_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsra_vv_i64m1_tu(maskedoff, op1, shift, vl); +vint64m1_t test_vsra_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsra_vv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vsra_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i64m1_tu(maskedoff, op1, shift, vl); +vint64m1_t test_vsra_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vsra_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsra_vv_i64m2_tu(maskedoff, op1, shift, vl); +vint64m2_t test_vsra_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsra_vv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vsra_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i64m2_tu(maskedoff, op1, shift, vl); +vint64m2_t test_vsra_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vsra_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsra_vv_i64m4_tu(maskedoff, op1, shift, vl); +vint64m4_t test_vsra_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsra_vv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vsra_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i64m4_tu(maskedoff, op1, shift, vl); +vint64m4_t test_vsra_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vsra_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsra_vv_i64m8_tu(maskedoff, op1, shift, vl); +vint64m8_t test_vsra_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsra_vv_i64m8_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vsra_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i64m8_tu(maskedoff, op1, shift, vl); +vint64m8_t test_vsra_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i64m8_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vsra_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsra_vv_i8mf8_tum(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vsra_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsra_vv_i8mf8_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vsra_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i8mf8_tum(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vsra_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i8mf8_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vsra_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsra_vv_i8mf4_tum(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vsra_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsra_vv_i8mf4_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vsra_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i8mf4_tum(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vsra_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i8mf4_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vsra_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsra_vv_i8mf2_tum(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vsra_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsra_vv_i8mf2_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vsra_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i8mf2_tum(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vsra_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i8mf2_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vsra_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsra_vv_i8m1_tum(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vsra_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsra_vv_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vsra_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i8m1_tum(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vsra_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i8m1_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vsra_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsra_vv_i8m2_tum(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vsra_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsra_vv_i8m2_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vsra_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i8m2_tum(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vsra_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i8m2_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vsra_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsra_vv_i8m4_tum(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vsra_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsra_vv_i8m4_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vsra_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i8m4_tum(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vsra_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i8m4_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vsra_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsra_vv_i8m8_tum(mask, maskedoff, op1, shift, vl); +vint8m8_t test_vsra_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsra_vv_i8m8_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vsra_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i8m8_tum(mask, maskedoff, op1, shift, vl); +vint8m8_t test_vsra_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i8m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vsra_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsra_vv_i16mf4_tum(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vsra_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsra_vv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vsra_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i16mf4_tum(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vsra_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vsra_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsra_vv_i16mf2_tum(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vsra_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsra_vv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vsra_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i16mf2_tum(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vsra_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vsra_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsra_vv_i16m1_tum(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vsra_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsra_vv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vsra_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i16m1_tum(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vsra_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vsra_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsra_vv_i16m2_tum(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vsra_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsra_vv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vsra_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i16m2_tum(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vsra_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vsra_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsra_vv_i16m4_tum(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vsra_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsra_vv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vsra_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i16m4_tum(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vsra_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vsra_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsra_vv_i16m8_tum(mask, maskedoff, op1, shift, vl); +vint16m8_t test_vsra_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsra_vv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vsra_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i16m8_tum(mask, maskedoff, op1, shift, vl); +vint16m8_t test_vsra_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vsra_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsra_vv_i32mf2_tum(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vsra_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsra_vv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vsra_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i32mf2_tum(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vsra_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vsra_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsra_vv_i32m1_tum(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vsra_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsra_vv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vsra_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i32m1_tum(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vsra_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vsra_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsra_vv_i32m2_tum(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vsra_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsra_vv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vsra_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i32m2_tum(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vsra_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vsra_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsra_vv_i32m4_tum(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vsra_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsra_vv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vsra_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i32m4_tum(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vsra_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vsra_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsra_vv_i32m8_tum(mask, maskedoff, op1, shift, vl); +vint32m8_t test_vsra_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsra_vv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vsra_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i32m8_tum(mask, maskedoff, op1, shift, vl); +vint32m8_t test_vsra_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vsra_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsra_vv_i64m1_tum(mask, maskedoff, op1, shift, vl); +vint64m1_t test_vsra_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsra_vv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vsra_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i64m1_tum(mask, maskedoff, op1, shift, vl); +vint64m1_t test_vsra_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vsra_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsra_vv_i64m2_tum(mask, maskedoff, op1, shift, vl); +vint64m2_t test_vsra_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsra_vv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vsra_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i64m2_tum(mask, maskedoff, op1, shift, vl); +vint64m2_t test_vsra_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vsra_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsra_vv_i64m4_tum(mask, maskedoff, op1, shift, vl); +vint64m4_t test_vsra_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsra_vv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vsra_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i64m4_tum(mask, maskedoff, op1, shift, vl); +vint64m4_t test_vsra_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vsra_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsra_vv_i64m8_tum(mask, maskedoff, op1, shift, vl); +vint64m8_t test_vsra_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsra_vv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vsra_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i64m8_tum(mask, maskedoff, op1, shift, vl); +vint64m8_t test_vsra_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vsra_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsra_vv_i8mf8_tumu(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vsra_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsra_vv_i8mf8_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vsra_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i8mf8_tumu(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vsra_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i8mf8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vsra_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsra_vv_i8mf4_tumu(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vsra_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsra_vv_i8mf4_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vsra_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i8mf4_tumu(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vsra_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i8mf4_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vsra_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsra_vv_i8mf2_tumu(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vsra_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsra_vv_i8mf2_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vsra_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i8mf2_tumu(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vsra_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i8mf2_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vsra_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsra_vv_i8m1_tumu(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vsra_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsra_vv_i8m1_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vsra_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i8m1_tumu(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vsra_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i8m1_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vsra_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsra_vv_i8m2_tumu(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vsra_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsra_vv_i8m2_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vsra_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i8m2_tumu(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vsra_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i8m2_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vsra_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsra_vv_i8m4_tumu(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vsra_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsra_vv_i8m4_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vsra_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i8m4_tumu(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vsra_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i8m4_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vsra_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsra_vv_i8m8_tumu(mask, maskedoff, op1, shift, vl); +vint8m8_t test_vsra_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsra_vv_i8m8_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vsra_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i8m8_tumu(mask, maskedoff, op1, shift, vl); +vint8m8_t test_vsra_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i8m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vsra_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsra_vv_i16mf4_tumu(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vsra_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsra_vv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vsra_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i16mf4_tumu(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vsra_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vsra_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsra_vv_i16mf2_tumu(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vsra_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsra_vv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vsra_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i16mf2_tumu(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vsra_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vsra_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsra_vv_i16m1_tumu(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vsra_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsra_vv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vsra_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i16m1_tumu(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vsra_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vsra_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsra_vv_i16m2_tumu(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vsra_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsra_vv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vsra_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i16m2_tumu(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vsra_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vsra_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsra_vv_i16m4_tumu(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vsra_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsra_vv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vsra_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i16m4_tumu(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vsra_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vsra_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsra_vv_i16m8_tumu(mask, maskedoff, op1, shift, vl); +vint16m8_t test_vsra_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsra_vv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vsra_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i16m8_tumu(mask, maskedoff, op1, shift, vl); +vint16m8_t test_vsra_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vsra_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsra_vv_i32mf2_tumu(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vsra_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsra_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vsra_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i32mf2_tumu(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vsra_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vsra_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsra_vv_i32m1_tumu(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vsra_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsra_vv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vsra_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i32m1_tumu(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vsra_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vsra_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsra_vv_i32m2_tumu(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vsra_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsra_vv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vsra_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i32m2_tumu(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vsra_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vsra_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsra_vv_i32m4_tumu(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vsra_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsra_vv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vsra_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i32m4_tumu(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vsra_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vsra_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsra_vv_i32m8_tumu(mask, maskedoff, op1, shift, vl); +vint32m8_t test_vsra_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsra_vv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vsra_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i32m8_tumu(mask, maskedoff, op1, shift, vl); +vint32m8_t test_vsra_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vsra_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsra_vv_i64m1_tumu(mask, maskedoff, op1, shift, vl); +vint64m1_t test_vsra_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsra_vv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vsra_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i64m1_tumu(mask, maskedoff, op1, shift, vl); +vint64m1_t test_vsra_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vsra_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsra_vv_i64m2_tumu(mask, maskedoff, op1, shift, vl); +vint64m2_t test_vsra_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsra_vv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vsra_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i64m2_tumu(mask, maskedoff, op1, shift, vl); +vint64m2_t test_vsra_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vsra_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsra_vv_i64m4_tumu(mask, maskedoff, op1, shift, vl); +vint64m4_t test_vsra_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsra_vv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vsra_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i64m4_tumu(mask, maskedoff, op1, shift, vl); +vint64m4_t test_vsra_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vsra_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsra_vv_i64m8_tumu(mask, maskedoff, op1, shift, vl); +vint64m8_t test_vsra_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsra_vv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vsra_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i64m8_tumu(mask, maskedoff, op1, shift, vl); +vint64m8_t test_vsra_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vsra_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsra_vv_i8mf8_mu(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vsra_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsra_vv_i8mf8_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vsra_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i8mf8_mu(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vsra_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i8mf8_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vsra_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsra_vv_i8mf4_mu(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vsra_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsra_vv_i8mf4_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vsra_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i8mf4_mu(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vsra_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i8mf4_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vsra_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsra_vv_i8mf2_mu(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vsra_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsra_vv_i8mf2_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vsra_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i8mf2_mu(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vsra_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i8mf2_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vsra_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsra_vv_i8m1_mu(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vsra_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsra_vv_i8m1_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vsra_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i8m1_mu(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vsra_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i8m1_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vsra_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsra_vv_i8m2_mu(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vsra_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsra_vv_i8m2_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vsra_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i8m2_mu(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vsra_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i8m2_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vsra_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsra_vv_i8m4_mu(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vsra_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsra_vv_i8m4_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vsra_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i8m4_mu(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vsra_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i8m4_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vsra_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsra_vv_i8m8_mu(mask, maskedoff, op1, shift, vl); +vint8m8_t test_vsra_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsra_vv_i8m8_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vsra_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i8m8_mu(mask, maskedoff, op1, shift, vl); +vint8m8_t test_vsra_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i8m8_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vsra_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsra_vv_i16mf4_mu(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vsra_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsra_vv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vsra_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i16mf4_mu(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vsra_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vsra_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsra_vv_i16mf2_mu(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vsra_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsra_vv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vsra_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i16mf2_mu(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vsra_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vsra_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsra_vv_i16m1_mu(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vsra_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsra_vv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vsra_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i16m1_mu(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vsra_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vsra_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsra_vv_i16m2_mu(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vsra_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsra_vv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vsra_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i16m2_mu(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vsra_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vsra_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsra_vv_i16m4_mu(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vsra_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsra_vv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vsra_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i16m4_mu(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vsra_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vsra_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsra_vv_i16m8_mu(mask, maskedoff, op1, shift, vl); +vint16m8_t test_vsra_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsra_vv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vsra_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i16m8_mu(mask, maskedoff, op1, shift, vl); +vint16m8_t test_vsra_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vsra_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsra_vv_i32mf2_mu(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vsra_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsra_vv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vsra_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i32mf2_mu(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vsra_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vsra_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsra_vv_i32m1_mu(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vsra_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsra_vv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vsra_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i32m1_mu(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vsra_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vsra_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsra_vv_i32m2_mu(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vsra_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsra_vv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vsra_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i32m2_mu(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vsra_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vsra_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsra_vv_i32m4_mu(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vsra_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsra_vv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vsra_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i32m4_mu(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vsra_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vsra_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsra_vv_i32m8_mu(mask, maskedoff, op1, shift, vl); +vint32m8_t test_vsra_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsra_vv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vsra_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i32m8_mu(mask, maskedoff, op1, shift, vl); +vint32m8_t test_vsra_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vsra_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsra_vv_i64m1_mu(mask, maskedoff, op1, shift, vl); +vint64m1_t test_vsra_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsra_vv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vsra_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i64m1_mu(mask, maskedoff, op1, shift, vl); +vint64m1_t test_vsra_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vsra_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsra_vv_i64m2_mu(mask, maskedoff, op1, shift, vl); +vint64m2_t test_vsra_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsra_vv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vsra_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i64m2_mu(mask, maskedoff, op1, shift, vl); +vint64m2_t test_vsra_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vsra_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsra_vv_i64m4_mu(mask, maskedoff, op1, shift, vl); +vint64m4_t test_vsra_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsra_vv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vsra_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i64m4_mu(mask, maskedoff, op1, shift, vl); +vint64m4_t test_vsra_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vsra_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsra_vv_i64m8_mu(mask, maskedoff, op1, shift, vl); +vint64m8_t test_vsra_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsra_vv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vsra_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_vx_i64m8_mu(mask, maskedoff, op1, shift, vl); +vint64m8_t test_vsra_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsra\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vsrl.c b/auto-generated/policy_funcs/gnu-api-tests/vsrl.c index 72132c884..d568fb449 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vsrl.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vsrl.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vsrl_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsrl_vv_u8mf8_tu(maskedoff, op1, shift, vl); +vuint8mf8_t test_vsrl_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsrl_vv_u8mf8_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vsrl_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u8mf8_tu(maskedoff, op1, shift, vl); +vuint8mf8_t test_vsrl_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vsrl_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsrl_vv_u8mf4_tu(maskedoff, op1, shift, vl); +vuint8mf4_t test_vsrl_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsrl_vv_u8mf4_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vsrl_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u8mf4_tu(maskedoff, op1, shift, vl); +vuint8mf4_t test_vsrl_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vsrl_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsrl_vv_u8mf2_tu(maskedoff, op1, shift, vl); +vuint8mf2_t test_vsrl_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsrl_vv_u8mf2_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vsrl_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u8mf2_tu(maskedoff, op1, shift, vl); +vuint8mf2_t test_vsrl_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vsrl_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsrl_vv_u8m1_tu(maskedoff, op1, shift, vl); +vuint8m1_t test_vsrl_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsrl_vv_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vsrl_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u8m1_tu(maskedoff, op1, shift, vl); +vuint8m1_t test_vsrl_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vsrl_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsrl_vv_u8m2_tu(maskedoff, op1, shift, vl); +vuint8m2_t test_vsrl_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsrl_vv_u8m2_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vsrl_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u8m2_tu(maskedoff, op1, shift, vl); +vuint8m2_t test_vsrl_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vsrl_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsrl_vv_u8m4_tu(maskedoff, op1, shift, vl); +vuint8m4_t test_vsrl_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsrl_vv_u8m4_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vsrl_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u8m4_tu(maskedoff, op1, shift, vl); +vuint8m4_t test_vsrl_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u8m4_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vsrl_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsrl_vv_u8m8_tu(maskedoff, op1, shift, vl); +vuint8m8_t test_vsrl_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsrl_vv_u8m8_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vsrl_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u8m8_tu(maskedoff, op1, shift, vl); +vuint8m8_t test_vsrl_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u8m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vsrl_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsrl_vv_u16mf4_tu(maskedoff, op1, shift, vl); +vuint16mf4_t test_vsrl_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsrl_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vsrl_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u16mf4_tu(maskedoff, op1, shift, vl); +vuint16mf4_t test_vsrl_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vsrl_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsrl_vv_u16mf2_tu(maskedoff, op1, shift, vl); +vuint16mf2_t test_vsrl_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsrl_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vsrl_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u16mf2_tu(maskedoff, op1, shift, vl); +vuint16mf2_t test_vsrl_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vsrl_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsrl_vv_u16m1_tu(maskedoff, op1, shift, vl); +vuint16m1_t test_vsrl_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsrl_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vsrl_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u16m1_tu(maskedoff, op1, shift, vl); +vuint16m1_t test_vsrl_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vsrl_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsrl_vv_u16m2_tu(maskedoff, op1, shift, vl); +vuint16m2_t test_vsrl_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsrl_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vsrl_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u16m2_tu(maskedoff, op1, shift, vl); +vuint16m2_t test_vsrl_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vsrl_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsrl_vv_u16m4_tu(maskedoff, op1, shift, vl); +vuint16m4_t test_vsrl_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsrl_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vsrl_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u16m4_tu(maskedoff, op1, shift, vl); +vuint16m4_t test_vsrl_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vsrl_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsrl_vv_u16m8_tu(maskedoff, op1, shift, vl); +vuint16m8_t test_vsrl_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsrl_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vsrl_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u16m8_tu(maskedoff, op1, shift, vl); +vuint16m8_t test_vsrl_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vsrl_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsrl_vv_u32mf2_tu(maskedoff, op1, shift, vl); +vuint32mf2_t test_vsrl_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsrl_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vsrl_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u32mf2_tu(maskedoff, op1, shift, vl); +vuint32mf2_t test_vsrl_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vsrl_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsrl_vv_u32m1_tu(maskedoff, op1, shift, vl); +vuint32m1_t test_vsrl_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsrl_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vsrl_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u32m1_tu(maskedoff, op1, shift, vl); +vuint32m1_t test_vsrl_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vsrl_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsrl_vv_u32m2_tu(maskedoff, op1, shift, vl); +vuint32m2_t test_vsrl_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsrl_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vsrl_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u32m2_tu(maskedoff, op1, shift, vl); +vuint32m2_t test_vsrl_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vsrl_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsrl_vv_u32m4_tu(maskedoff, op1, shift, vl); +vuint32m4_t test_vsrl_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsrl_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vsrl_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u32m4_tu(maskedoff, op1, shift, vl); +vuint32m4_t test_vsrl_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vsrl_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsrl_vv_u32m8_tu(maskedoff, op1, shift, vl); +vuint32m8_t test_vsrl_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsrl_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vsrl_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u32m8_tu(maskedoff, op1, shift, vl); +vuint32m8_t test_vsrl_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vsrl_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsrl_vv_u64m1_tu(maskedoff, op1, shift, vl); +vuint64m1_t test_vsrl_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsrl_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vsrl_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u64m1_tu(maskedoff, op1, shift, vl); +vuint64m1_t test_vsrl_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vsrl_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsrl_vv_u64m2_tu(maskedoff, op1, shift, vl); +vuint64m2_t test_vsrl_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsrl_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vsrl_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u64m2_tu(maskedoff, op1, shift, vl); +vuint64m2_t test_vsrl_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vsrl_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsrl_vv_u64m4_tu(maskedoff, op1, shift, vl); +vuint64m4_t test_vsrl_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsrl_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vsrl_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u64m4_tu(maskedoff, op1, shift, vl); +vuint64m4_t test_vsrl_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vsrl_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsrl_vv_u64m8_tu(maskedoff, op1, shift, vl); +vuint64m8_t test_vsrl_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsrl_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vsrl_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u64m8_tu(maskedoff, op1, shift, vl); +vuint64m8_t test_vsrl_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u64m8_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vsrl_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsrl_vv_u8mf8_tum(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vsrl_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsrl_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vsrl_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u8mf8_tum(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vsrl_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vsrl_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsrl_vv_u8mf4_tum(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vsrl_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsrl_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vsrl_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u8mf4_tum(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vsrl_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vsrl_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsrl_vv_u8mf2_tum(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vsrl_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsrl_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vsrl_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u8mf2_tum(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vsrl_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vsrl_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsrl_vv_u8m1_tum(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vsrl_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsrl_vv_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vsrl_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u8m1_tum(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vsrl_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vsrl_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsrl_vv_u8m2_tum(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vsrl_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsrl_vv_u8m2_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vsrl_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u8m2_tum(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vsrl_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vsrl_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsrl_vv_u8m4_tum(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vsrl_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsrl_vv_u8m4_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vsrl_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u8m4_tum(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vsrl_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vsrl_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsrl_vv_u8m8_tum(mask, maskedoff, op1, shift, vl); +vuint8m8_t test_vsrl_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsrl_vv_u8m8_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vsrl_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u8m8_tum(mask, maskedoff, op1, shift, vl); +vuint8m8_t test_vsrl_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u8m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vsrl_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsrl_vv_u16mf4_tum(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vsrl_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsrl_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vsrl_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u16mf4_tum(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vsrl_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vsrl_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsrl_vv_u16mf2_tum(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vsrl_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsrl_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vsrl_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u16mf2_tum(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vsrl_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vsrl_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsrl_vv_u16m1_tum(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vsrl_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsrl_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vsrl_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u16m1_tum(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vsrl_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vsrl_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsrl_vv_u16m2_tum(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vsrl_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsrl_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vsrl_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u16m2_tum(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vsrl_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vsrl_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsrl_vv_u16m4_tum(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vsrl_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsrl_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vsrl_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u16m4_tum(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vsrl_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vsrl_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsrl_vv_u16m8_tum(mask, maskedoff, op1, shift, vl); +vuint16m8_t test_vsrl_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsrl_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vsrl_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u16m8_tum(mask, maskedoff, op1, shift, vl); +vuint16m8_t test_vsrl_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vsrl_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsrl_vv_u32mf2_tum(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vsrl_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsrl_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vsrl_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u32mf2_tum(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vsrl_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vsrl_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsrl_vv_u32m1_tum(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vsrl_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsrl_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vsrl_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u32m1_tum(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vsrl_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vsrl_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsrl_vv_u32m2_tum(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vsrl_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsrl_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vsrl_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u32m2_tum(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vsrl_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vsrl_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsrl_vv_u32m4_tum(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vsrl_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsrl_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vsrl_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u32m4_tum(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vsrl_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vsrl_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsrl_vv_u32m8_tum(mask, maskedoff, op1, shift, vl); +vuint32m8_t test_vsrl_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsrl_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vsrl_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u32m8_tum(mask, maskedoff, op1, shift, vl); +vuint32m8_t test_vsrl_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vsrl_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsrl_vv_u64m1_tum(mask, maskedoff, op1, shift, vl); +vuint64m1_t test_vsrl_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsrl_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vsrl_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u64m1_tum(mask, maskedoff, op1, shift, vl); +vuint64m1_t test_vsrl_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vsrl_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsrl_vv_u64m2_tum(mask, maskedoff, op1, shift, vl); +vuint64m2_t test_vsrl_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsrl_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vsrl_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u64m2_tum(mask, maskedoff, op1, shift, vl); +vuint64m2_t test_vsrl_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vsrl_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsrl_vv_u64m4_tum(mask, maskedoff, op1, shift, vl); +vuint64m4_t test_vsrl_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsrl_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vsrl_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u64m4_tum(mask, maskedoff, op1, shift, vl); +vuint64m4_t test_vsrl_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vsrl_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsrl_vv_u64m8_tum(mask, maskedoff, op1, shift, vl); +vuint64m8_t test_vsrl_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsrl_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vsrl_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u64m8_tum(mask, maskedoff, op1, shift, vl); +vuint64m8_t test_vsrl_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vsrl_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsrl_vv_u8mf8_tumu(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vsrl_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsrl_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vsrl_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u8mf8_tumu(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vsrl_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vsrl_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsrl_vv_u8mf4_tumu(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vsrl_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsrl_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vsrl_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u8mf4_tumu(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vsrl_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vsrl_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsrl_vv_u8mf2_tumu(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vsrl_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsrl_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vsrl_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u8mf2_tumu(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vsrl_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vsrl_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsrl_vv_u8m1_tumu(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vsrl_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsrl_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vsrl_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u8m1_tumu(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vsrl_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vsrl_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsrl_vv_u8m2_tumu(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vsrl_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsrl_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vsrl_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u8m2_tumu(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vsrl_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vsrl_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsrl_vv_u8m4_tumu(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vsrl_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsrl_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vsrl_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u8m4_tumu(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vsrl_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vsrl_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsrl_vv_u8m8_tumu(mask, maskedoff, op1, shift, vl); +vuint8m8_t test_vsrl_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsrl_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vsrl_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u8m8_tumu(mask, maskedoff, op1, shift, vl); +vuint8m8_t test_vsrl_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vsrl_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsrl_vv_u16mf4_tumu(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vsrl_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsrl_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vsrl_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u16mf4_tumu(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vsrl_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vsrl_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsrl_vv_u16mf2_tumu(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vsrl_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsrl_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vsrl_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u16mf2_tumu(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vsrl_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vsrl_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsrl_vv_u16m1_tumu(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vsrl_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsrl_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vsrl_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u16m1_tumu(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vsrl_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vsrl_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsrl_vv_u16m2_tumu(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vsrl_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsrl_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vsrl_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u16m2_tumu(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vsrl_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vsrl_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsrl_vv_u16m4_tumu(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vsrl_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsrl_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vsrl_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u16m4_tumu(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vsrl_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vsrl_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsrl_vv_u16m8_tumu(mask, maskedoff, op1, shift, vl); +vuint16m8_t test_vsrl_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsrl_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vsrl_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u16m8_tumu(mask, maskedoff, op1, shift, vl); +vuint16m8_t test_vsrl_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vsrl_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsrl_vv_u32mf2_tumu(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vsrl_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsrl_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vsrl_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u32mf2_tumu(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vsrl_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vsrl_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsrl_vv_u32m1_tumu(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vsrl_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsrl_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vsrl_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u32m1_tumu(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vsrl_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vsrl_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsrl_vv_u32m2_tumu(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vsrl_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsrl_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vsrl_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u32m2_tumu(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vsrl_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vsrl_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsrl_vv_u32m4_tumu(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vsrl_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsrl_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vsrl_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u32m4_tumu(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vsrl_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vsrl_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsrl_vv_u32m8_tumu(mask, maskedoff, op1, shift, vl); +vuint32m8_t test_vsrl_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsrl_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vsrl_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u32m8_tumu(mask, maskedoff, op1, shift, vl); +vuint32m8_t test_vsrl_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vsrl_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsrl_vv_u64m1_tumu(mask, maskedoff, op1, shift, vl); +vuint64m1_t test_vsrl_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsrl_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vsrl_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u64m1_tumu(mask, maskedoff, op1, shift, vl); +vuint64m1_t test_vsrl_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vsrl_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsrl_vv_u64m2_tumu(mask, maskedoff, op1, shift, vl); +vuint64m2_t test_vsrl_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsrl_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vsrl_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u64m2_tumu(mask, maskedoff, op1, shift, vl); +vuint64m2_t test_vsrl_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vsrl_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsrl_vv_u64m4_tumu(mask, maskedoff, op1, shift, vl); +vuint64m4_t test_vsrl_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsrl_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vsrl_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u64m4_tumu(mask, maskedoff, op1, shift, vl); +vuint64m4_t test_vsrl_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vsrl_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsrl_vv_u64m8_tumu(mask, maskedoff, op1, shift, vl); +vuint64m8_t test_vsrl_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsrl_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vsrl_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u64m8_tumu(mask, maskedoff, op1, shift, vl); +vuint64m8_t test_vsrl_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vsrl_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsrl_vv_u8mf8_mu(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vsrl_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsrl_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vsrl_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u8mf8_mu(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vsrl_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vsrl_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsrl_vv_u8mf4_mu(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vsrl_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsrl_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vsrl_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u8mf4_mu(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vsrl_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vsrl_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsrl_vv_u8mf2_mu(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vsrl_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsrl_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vsrl_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u8mf2_mu(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vsrl_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vsrl_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsrl_vv_u8m1_mu(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vsrl_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsrl_vv_u8m1_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vsrl_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u8m1_mu(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vsrl_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vsrl_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsrl_vv_u8m2_mu(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vsrl_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsrl_vv_u8m2_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vsrl_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u8m2_mu(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vsrl_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vsrl_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsrl_vv_u8m4_mu(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vsrl_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsrl_vv_u8m4_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vsrl_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u8m4_mu(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vsrl_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vsrl_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsrl_vv_u8m8_mu(mask, maskedoff, op1, shift, vl); +vuint8m8_t test_vsrl_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsrl_vv_u8m8_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vsrl_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u8m8_mu(mask, maskedoff, op1, shift, vl); +vuint8m8_t test_vsrl_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u8m8_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vsrl_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsrl_vv_u16mf4_mu(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vsrl_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsrl_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vsrl_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u16mf4_mu(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vsrl_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vsrl_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsrl_vv_u16mf2_mu(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vsrl_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsrl_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vsrl_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u16mf2_mu(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vsrl_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vsrl_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsrl_vv_u16m1_mu(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vsrl_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsrl_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vsrl_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u16m1_mu(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vsrl_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vsrl_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsrl_vv_u16m2_mu(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vsrl_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsrl_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vsrl_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u16m2_mu(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vsrl_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vsrl_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsrl_vv_u16m4_mu(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vsrl_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsrl_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vsrl_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u16m4_mu(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vsrl_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vsrl_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsrl_vv_u16m8_mu(mask, maskedoff, op1, shift, vl); +vuint16m8_t test_vsrl_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsrl_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vsrl_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u16m8_mu(mask, maskedoff, op1, shift, vl); +vuint16m8_t test_vsrl_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vsrl_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsrl_vv_u32mf2_mu(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vsrl_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsrl_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vsrl_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u32mf2_mu(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vsrl_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vsrl_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsrl_vv_u32m1_mu(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vsrl_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsrl_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vsrl_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u32m1_mu(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vsrl_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vsrl_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsrl_vv_u32m2_mu(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vsrl_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsrl_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vsrl_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u32m2_mu(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vsrl_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vsrl_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsrl_vv_u32m4_mu(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vsrl_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsrl_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vsrl_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u32m4_mu(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vsrl_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vsrl_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsrl_vv_u32m8_mu(mask, maskedoff, op1, shift, vl); +vuint32m8_t test_vsrl_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsrl_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vsrl_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u32m8_mu(mask, maskedoff, op1, shift, vl); +vuint32m8_t test_vsrl_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vsrl_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsrl_vv_u64m1_mu(mask, maskedoff, op1, shift, vl); +vuint64m1_t test_vsrl_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsrl_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vsrl_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u64m1_mu(mask, maskedoff, op1, shift, vl); +vuint64m1_t test_vsrl_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vsrl_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsrl_vv_u64m2_mu(mask, maskedoff, op1, shift, vl); +vuint64m2_t test_vsrl_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsrl_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vsrl_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u64m2_mu(mask, maskedoff, op1, shift, vl); +vuint64m2_t test_vsrl_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vsrl_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsrl_vv_u64m4_mu(mask, maskedoff, op1, shift, vl); +vuint64m4_t test_vsrl_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsrl_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vsrl_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u64m4_mu(mask, maskedoff, op1, shift, vl); +vuint64m4_t test_vsrl_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vsrl_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsrl_vv_u64m8_mu(mask, maskedoff, op1, shift, vl); +vuint64m8_t test_vsrl_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsrl_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vsrl_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_vx_u64m8_mu(mask, maskedoff, op1, shift, vl); +vuint64m8_t test_vsrl_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsrl\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vssra.c b/auto-generated/policy_funcs/gnu-api-tests/vssra.c index df93be2ae..f91ba34ca 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vssra.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vssra.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vssra_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssra_vv_i8mf8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vssra_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vssra_vv_i8mf8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vssra_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8mf8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vssra_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i8mf8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vssra_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssra_vv_i8mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vssra_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vssra_vv_i8mf4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vssra_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vssra_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i8mf4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vssra_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssra_vv_i8mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vssra_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vssra_vv_i8mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vssra_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vssra_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i8mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vssra_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssra_vv_i8m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vssra_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vssra_vv_i8m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vssra_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vssra_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i8m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vssra_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssra_vv_i8m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vssra_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vssra_vv_i8m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vssra_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vssra_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i8m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vssra_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssra_vv_i8m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vssra_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vssra_vv_i8m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vssra_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vssra_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i8m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vssra_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssra_vv_i8m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vssra_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vssra_vv_i8m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vssra_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vssra_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i8m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vssra_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssra_vv_i16mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vssra_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vssra_vv_i16mf4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vssra_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vssra_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i16mf4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vssra_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssra_vv_i16mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vssra_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vssra_vv_i16mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vssra_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vssra_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i16mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vssra_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssra_vv_i16m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vssra_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vssra_vv_i16m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vssra_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vssra_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i16m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vssra_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssra_vv_i16m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vssra_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vssra_vv_i16m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vssra_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vssra_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i16m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vssra_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssra_vv_i16m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vssra_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vssra_vv_i16m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vssra_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vssra_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i16m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vssra_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssra_vv_i16m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vssra_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vssra_vv_i16m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vssra_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vssra_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i16m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vssra_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssra_vv_i32mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vssra_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vssra_vv_i32mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vssra_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vssra_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i32mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vssra_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssra_vv_i32m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vssra_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vssra_vv_i32m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vssra_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vssra_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i32m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vssra_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssra_vv_i32m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vssra_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vssra_vv_i32m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vssra_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vssra_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i32m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vssra_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssra_vv_i32m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vssra_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vssra_vv_i32m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vssra_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vssra_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i32m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vssra_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssra_vv_i32m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vssra_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vssra_vv_i32m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vssra_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vssra_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i32m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vssra_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssra_vv_i64m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vssra_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vssra_vv_i64m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vssra_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vssra_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i64m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vssra_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssra_vv_i64m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vssra_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vssra_vv_i64m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vssra_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vssra_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i64m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vssra_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssra_vv_i64m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vssra_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vssra_vv_i64m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vssra_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vssra_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i64m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vssra_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssra_vv_i64m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vssra_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vssra_vv_i64m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vssra_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vssra_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i64m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vssra_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssra_vv_i8mf8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vssra_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vssra_vv_i8mf8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vssra_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8mf8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vssra_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i8mf8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vssra_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssra_vv_i8mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vssra_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vssra_vv_i8mf4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vssra_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vssra_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i8mf4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vssra_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssra_vv_i8mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vssra_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vssra_vv_i8mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vssra_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vssra_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i8mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vssra_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssra_vv_i8m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vssra_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vssra_vv_i8m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vssra_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vssra_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i8m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vssra_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssra_vv_i8m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vssra_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vssra_vv_i8m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vssra_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vssra_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i8m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vssra_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssra_vv_i8m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vssra_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vssra_vv_i8m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vssra_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vssra_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i8m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vssra_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssra_vv_i8m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vssra_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vssra_vv_i8m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vssra_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vssra_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i8m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vssra_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssra_vv_i16mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vssra_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vssra_vv_i16mf4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vssra_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vssra_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i16mf4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vssra_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssra_vv_i16mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vssra_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vssra_vv_i16mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vssra_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vssra_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i16mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vssra_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssra_vv_i16m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vssra_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vssra_vv_i16m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vssra_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vssra_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i16m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vssra_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssra_vv_i16m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vssra_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vssra_vv_i16m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vssra_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vssra_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i16m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vssra_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssra_vv_i16m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vssra_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vssra_vv_i16m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vssra_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vssra_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i16m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vssra_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssra_vv_i16m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vssra_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vssra_vv_i16m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vssra_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vssra_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i16m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vssra_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssra_vv_i32mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vssra_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vssra_vv_i32mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vssra_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vssra_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i32mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vssra_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssra_vv_i32m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vssra_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vssra_vv_i32m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vssra_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vssra_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i32m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vssra_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssra_vv_i32m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vssra_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vssra_vv_i32m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vssra_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vssra_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i32m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vssra_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssra_vv_i32m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vssra_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vssra_vv_i32m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vssra_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vssra_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i32m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vssra_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssra_vv_i32m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vssra_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vssra_vv_i32m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vssra_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vssra_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i32m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vssra_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssra_vv_i64m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vssra_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vssra_vv_i64m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vssra_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vssra_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i64m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vssra_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssra_vv_i64m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vssra_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vssra_vv_i64m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vssra_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vssra_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i64m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vssra_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssra_vv_i64m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vssra_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vssra_vv_i64m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vssra_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vssra_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i64m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vssra_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssra_vv_i64m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vssra_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vssra_vv_i64m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vssra_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vssra_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i64m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vssra_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssra_vv_i8mf8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vssra_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vssra_vv_i8mf8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vssra_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8mf8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vssra_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i8mf8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vssra_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssra_vv_i8mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vssra_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vssra_vv_i8mf4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vssra_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vssra_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i8mf4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vssra_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssra_vv_i8mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vssra_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vssra_vv_i8mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vssra_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vssra_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i8mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vssra_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssra_vv_i8m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vssra_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vssra_vv_i8m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vssra_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vssra_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i8m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vssra_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssra_vv_i8m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vssra_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vssra_vv_i8m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vssra_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vssra_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i8m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vssra_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssra_vv_i8m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vssra_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vssra_vv_i8m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vssra_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vssra_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i8m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vssra_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssra_vv_i8m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vssra_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vssra_vv_i8m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vssra_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vssra_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i8m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vssra_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssra_vv_i16mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vssra_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vssra_vv_i16mf4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vssra_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vssra_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i16mf4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vssra_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssra_vv_i16mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vssra_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vssra_vv_i16mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vssra_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vssra_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i16mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vssra_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssra_vv_i16m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vssra_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vssra_vv_i16m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vssra_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vssra_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i16m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vssra_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssra_vv_i16m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vssra_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vssra_vv_i16m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vssra_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vssra_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i16m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vssra_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssra_vv_i16m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vssra_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vssra_vv_i16m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vssra_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vssra_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i16m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vssra_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssra_vv_i16m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vssra_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vssra_vv_i16m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vssra_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vssra_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i16m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vssra_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssra_vv_i32mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vssra_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vssra_vv_i32mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vssra_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vssra_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i32mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vssra_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssra_vv_i32m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vssra_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vssra_vv_i32m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vssra_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vssra_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i32m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vssra_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssra_vv_i32m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vssra_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vssra_vv_i32m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vssra_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vssra_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i32m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vssra_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssra_vv_i32m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vssra_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vssra_vv_i32m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vssra_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vssra_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i32m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vssra_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssra_vv_i32m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vssra_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vssra_vv_i32m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vssra_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vssra_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i32m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vssra_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssra_vv_i64m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vssra_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vssra_vv_i64m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vssra_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vssra_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i64m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vssra_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssra_vv_i64m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vssra_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vssra_vv_i64m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vssra_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vssra_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i64m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vssra_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssra_vv_i64m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vssra_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vssra_vv_i64m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vssra_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vssra_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i64m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vssra_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssra_vv_i64m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vssra_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vssra_vv_i64m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vssra_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vssra_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i64m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vssra_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssra_vv_i8mf8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vssra_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vssra_vv_i8mf8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vssra_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8mf8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vssra_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i8mf8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vssra_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssra_vv_i8mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vssra_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vssra_vv_i8mf4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vssra_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vssra_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i8mf4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vssra_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssra_vv_i8mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vssra_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vssra_vv_i8mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vssra_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vssra_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i8mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vssra_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssra_vv_i8m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vssra_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vssra_vv_i8m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vssra_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vssra_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i8m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vssra_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssra_vv_i8m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vssra_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vssra_vv_i8m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vssra_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vssra_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i8m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vssra_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssra_vv_i8m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vssra_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vssra_vv_i8m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vssra_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vssra_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i8m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vssra_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssra_vv_i8m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vssra_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vssra_vv_i8m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vssra_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i8m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vssra_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i8m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vssra_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssra_vv_i16mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vssra_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vssra_vv_i16mf4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vssra_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vssra_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i16mf4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vssra_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssra_vv_i16mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vssra_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vssra_vv_i16mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vssra_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vssra_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i16mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vssra_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssra_vv_i16m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vssra_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vssra_vv_i16m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vssra_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vssra_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i16m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vssra_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssra_vv_i16m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vssra_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vssra_vv_i16m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vssra_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vssra_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i16m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vssra_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssra_vv_i16m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vssra_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vssra_vv_i16m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vssra_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vssra_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i16m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vssra_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssra_vv_i16m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vssra_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vssra_vv_i16m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vssra_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i16m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vssra_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i16m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vssra_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssra_vv_i32mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vssra_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vssra_vv_i32mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vssra_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vssra_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i32mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vssra_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssra_vv_i32m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vssra_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vssra_vv_i32m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vssra_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vssra_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i32m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vssra_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssra_vv_i32m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vssra_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vssra_vv_i32m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vssra_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vssra_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i32m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vssra_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssra_vv_i32m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vssra_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vssra_vv_i32m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vssra_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vssra_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i32m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vssra_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssra_vv_i32m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vssra_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vssra_vv_i32m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vssra_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i32m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vssra_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i32m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vssra_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssra_vv_i64m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vssra_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vssra_vv_i64m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vssra_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vssra_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i64m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vssra_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssra_vv_i64m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vssra_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vssra_vv_i64m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vssra_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vssra_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i64m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vssra_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssra_vv_i64m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vssra_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vssra_vv_i64m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vssra_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vssra_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i64m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vssra_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssra_vv_i64m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vssra_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vssra_vv_i64m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vssra_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_vx_i64m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vssra_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_vx_i64m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssra\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vssrl.c b/auto-generated/policy_funcs/gnu-api-tests/vssrl.c index 290c9fcaf..c2eaadd5d 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vssrl.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vssrl.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vssrl_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssrl_vv_u8mf8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vssrl_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vssrl_vv_u8mf8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vssrl_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8mf8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vssrl_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u8mf8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vssrl_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssrl_vv_u8mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vssrl_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vssrl_vv_u8mf4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vssrl_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vssrl_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u8mf4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vssrl_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssrl_vv_u8mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vssrl_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vssrl_vv_u8mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vssrl_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vssrl_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u8mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vssrl_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vssrl_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vssrl_vv_u8m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vssrl_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vssrl_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u8m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vssrl_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vssrl_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vssrl_vv_u8m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vssrl_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vssrl_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u8m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vssrl_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vssrl_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vssrl_vv_u8m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vssrl_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vssrl_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u8m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vssrl_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vssrl_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vssrl_vv_u8m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vssrl_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vssrl_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u8m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vssrl_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssrl_vv_u16mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vssrl_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vssrl_vv_u16mf4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vssrl_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16mf4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vssrl_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u16mf4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vssrl_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssrl_vv_u16mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vssrl_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vssrl_vv_u16mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vssrl_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vssrl_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u16mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vssrl_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vssrl_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vssrl_vv_u16m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vssrl_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vssrl_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u16m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vssrl_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vssrl_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vssrl_vv_u16m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vssrl_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vssrl_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u16m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vssrl_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vssrl_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vssrl_vv_u16m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vssrl_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vssrl_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u16m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vssrl_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vssrl_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vssrl_vv_u16m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vssrl_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vssrl_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u16m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vssrl_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssrl_vv_u32mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vssrl_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vssrl_vv_u32mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vssrl_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32mf2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vssrl_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u32mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vssrl_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vssrl_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vssrl_vv_u32m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vssrl_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vssrl_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u32m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vssrl_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vssrl_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vssrl_vv_u32m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vssrl_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vssrl_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u32m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vssrl_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vssrl_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vssrl_vv_u32m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vssrl_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vssrl_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u32m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vssrl_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vssrl_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vssrl_vv_u32m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vssrl_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vssrl_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u32m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vssrl_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vssrl_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vssrl_vv_u64m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vssrl_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m1_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vssrl_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u64m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vssrl_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vssrl_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vssrl_vv_u64m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vssrl_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m2_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vssrl_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u64m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vssrl_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vssrl_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vssrl_vv_u64m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vssrl_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m4_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vssrl_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u64m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vssrl_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vssrl_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vssrl_vv_u64m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vssrl_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m8_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vssrl_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u64m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vssrl_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssrl_vv_u8mf8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vssrl_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vssrl_vv_u8mf8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vssrl_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8mf8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vssrl_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u8mf8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vssrl_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssrl_vv_u8mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vssrl_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vssrl_vv_u8mf4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vssrl_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vssrl_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u8mf4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vssrl_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssrl_vv_u8mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vssrl_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vssrl_vv_u8mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vssrl_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vssrl_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u8mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vssrl_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vssrl_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vssrl_vv_u8m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vssrl_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vssrl_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u8m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vssrl_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vssrl_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vssrl_vv_u8m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vssrl_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vssrl_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u8m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vssrl_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vssrl_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vssrl_vv_u8m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vssrl_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vssrl_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u8m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vssrl_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vssrl_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vssrl_vv_u8m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vssrl_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vssrl_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u8m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vssrl_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssrl_vv_u16mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vssrl_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vssrl_vv_u16mf4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vssrl_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16mf4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vssrl_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u16mf4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vssrl_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssrl_vv_u16mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vssrl_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vssrl_vv_u16mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vssrl_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vssrl_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u16mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vssrl_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vssrl_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vssrl_vv_u16m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vssrl_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vssrl_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u16m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vssrl_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vssrl_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vssrl_vv_u16m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vssrl_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vssrl_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u16m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vssrl_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vssrl_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vssrl_vv_u16m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vssrl_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vssrl_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u16m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vssrl_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vssrl_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vssrl_vv_u16m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vssrl_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vssrl_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u16m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vssrl_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssrl_vv_u32mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vssrl_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vssrl_vv_u32mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vssrl_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32mf2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vssrl_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u32mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vssrl_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vssrl_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vssrl_vv_u32m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vssrl_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vssrl_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u32m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vssrl_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vssrl_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vssrl_vv_u32m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vssrl_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vssrl_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u32m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vssrl_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vssrl_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vssrl_vv_u32m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vssrl_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vssrl_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u32m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vssrl_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vssrl_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vssrl_vv_u32m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vssrl_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vssrl_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u32m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vssrl_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vssrl_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vssrl_vv_u64m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vssrl_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m1_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vssrl_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u64m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vssrl_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vssrl_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vssrl_vv_u64m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vssrl_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m2_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vssrl_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u64m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vssrl_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vssrl_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vssrl_vv_u64m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vssrl_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m4_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vssrl_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u64m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vssrl_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vssrl_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vssrl_vv_u64m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vssrl_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m8_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vssrl_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u64m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vssrl_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssrl_vv_u8mf8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vssrl_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vssrl_vv_u8mf8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vssrl_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8mf8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vssrl_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u8mf8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vssrl_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssrl_vv_u8mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vssrl_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vssrl_vv_u8mf4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vssrl_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vssrl_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u8mf4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vssrl_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssrl_vv_u8mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vssrl_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vssrl_vv_u8mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vssrl_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vssrl_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u8mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vssrl_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vssrl_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vssrl_vv_u8m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vssrl_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vssrl_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u8m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vssrl_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vssrl_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vssrl_vv_u8m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vssrl_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vssrl_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u8m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vssrl_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vssrl_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vssrl_vv_u8m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vssrl_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vssrl_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u8m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vssrl_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vssrl_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vssrl_vv_u8m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vssrl_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vssrl_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u8m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vssrl_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssrl_vv_u16mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vssrl_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vssrl_vv_u16mf4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vssrl_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16mf4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vssrl_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u16mf4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vssrl_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssrl_vv_u16mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vssrl_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vssrl_vv_u16mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vssrl_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vssrl_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u16mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vssrl_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vssrl_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vssrl_vv_u16m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vssrl_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vssrl_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u16m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vssrl_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vssrl_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vssrl_vv_u16m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vssrl_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vssrl_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u16m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vssrl_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vssrl_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vssrl_vv_u16m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vssrl_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vssrl_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u16m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vssrl_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vssrl_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vssrl_vv_u16m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vssrl_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vssrl_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u16m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vssrl_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssrl_vv_u32mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vssrl_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vssrl_vv_u32mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vssrl_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32mf2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vssrl_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u32mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vssrl_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vssrl_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vssrl_vv_u32m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vssrl_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vssrl_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u32m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vssrl_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vssrl_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vssrl_vv_u32m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vssrl_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vssrl_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u32m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vssrl_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vssrl_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vssrl_vv_u32m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vssrl_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vssrl_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u32m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vssrl_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vssrl_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vssrl_vv_u32m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vssrl_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vssrl_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u32m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vssrl_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vssrl_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vssrl_vv_u64m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vssrl_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m1_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vssrl_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u64m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vssrl_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vssrl_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vssrl_vv_u64m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vssrl_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m2_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vssrl_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u64m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vssrl_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vssrl_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vssrl_vv_u64m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vssrl_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m4_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vssrl_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u64m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vssrl_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vssrl_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vssrl_vv_u64m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vssrl_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m8_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vssrl_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u64m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vssrl_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssrl_vv_u8mf8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vssrl_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vssrl_vv_u8mf8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vssrl_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8mf8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vssrl_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u8mf8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vssrl_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssrl_vv_u8mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vssrl_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vssrl_vv_u8mf4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vssrl_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vssrl_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u8mf4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vssrl_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssrl_vv_u8mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vssrl_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vssrl_vv_u8mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vssrl_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vssrl_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u8mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vssrl_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vssrl_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vssrl_vv_u8m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vssrl_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vssrl_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u8m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vssrl_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vssrl_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vssrl_vv_u8m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vssrl_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vssrl_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u8m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vssrl_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vssrl_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vssrl_vv_u8m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vssrl_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vssrl_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u8m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vssrl_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u8m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vssrl_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vssrl_vv_u8m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vssrl_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u8m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vssrl_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u8m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vssrl_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssrl_vv_u16mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vssrl_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vssrl_vv_u16mf4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vssrl_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16mf4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vssrl_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u16mf4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vssrl_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssrl_vv_u16mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vssrl_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vssrl_vv_u16mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vssrl_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vssrl_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u16mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vssrl_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vssrl_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vssrl_vv_u16m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vssrl_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vssrl_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u16m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vssrl_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vssrl_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vssrl_vv_u16m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vssrl_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vssrl_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u16m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vssrl_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vssrl_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vssrl_vv_u16m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vssrl_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vssrl_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u16m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vssrl_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u16m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vssrl_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vssrl_vv_u16m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vssrl_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u16m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vssrl_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u16m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vssrl_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssrl_vv_u32mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vssrl_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vssrl_vv_u32mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vssrl_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32mf2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vssrl_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u32mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vssrl_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vssrl_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vssrl_vv_u32m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vssrl_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vssrl_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u32m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vssrl_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vssrl_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vssrl_vv_u32m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vssrl_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vssrl_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u32m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vssrl_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vssrl_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vssrl_vv_u32m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vssrl_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vssrl_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u32m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vssrl_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u32m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vssrl_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vssrl_vv_u32m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vssrl_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u32m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vssrl_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u32m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vssrl_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vssrl_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vssrl_vv_u64m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vssrl_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m1_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vssrl_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u64m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vssrl_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vssrl_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vssrl_vv_u64m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vssrl_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m2_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vssrl_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u64m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vssrl_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vssrl_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vssrl_vv_u64m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vssrl_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m4_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vssrl_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u64m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vssrl_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssrl_vv_u64m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vssrl_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vssrl_vv_u64m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vssrl_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_vx_u64m8_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vssrl_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_vx_u64m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssrl\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vssub.c b/auto-generated/policy_funcs/gnu-api-tests/vssub.c index e755064ba..cc7e333ed 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vssub.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vssub.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vssub_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vssub_vv_i8mf8_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vssub_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vssub_vv_i8mf8_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vssub_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8mf8_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vssub_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_vx_i8mf8_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vssub_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vssub_vv_i8mf4_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vssub_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vssub_vv_i8mf4_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vssub_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8mf4_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vssub_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_vx_i8mf4_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vssub_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vssub_vv_i8mf2_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vssub_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vssub_vv_i8mf2_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vssub_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8mf2_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vssub_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_vx_i8mf2_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vssub_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vssub_vv_i8m1_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vssub_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vssub_vv_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vssub_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m1_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vssub_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_vx_i8m1_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vssub_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vssub_vv_i8m2_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vssub_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vssub_vv_i8m2_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vssub_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m2_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vssub_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_vx_i8m2_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vssub_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vssub_vv_i8m4_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vssub_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vssub_vv_i8m4_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vssub_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m4_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vssub_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_vx_i8m4_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vssub_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vssub_vv_i8m8_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vssub_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vssub_vv_i8m8_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vssub_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m8_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vssub_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_vx_i8m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vssub_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vssub_vv_i16mf4_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vssub_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vssub_vv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vssub_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16mf4_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vssub_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vssub_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vssub_vv_i16mf2_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vssub_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vssub_vv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vssub_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16mf2_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vssub_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vssub_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vssub_vv_i16m1_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vssub_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vssub_vv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vssub_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m1_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vssub_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vssub_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vssub_vv_i16m2_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vssub_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vssub_vv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vssub_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m2_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vssub_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vssub_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vssub_vv_i16m4_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vssub_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vssub_vv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vssub_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m4_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vssub_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vssub_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vssub_vv_i16m8_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vssub_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vssub_vv_i16m8_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vssub_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m8_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vssub_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vssub_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vssub_vv_i32mf2_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vssub_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vssub_vv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vssub_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32mf2_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vssub_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vssub_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vssub_vv_i32m1_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vssub_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vssub_vv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vssub_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m1_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vssub_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vssub_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vssub_vv_i32m2_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vssub_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vssub_vv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vssub_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m2_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vssub_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vssub_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vssub_vv_i32m4_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vssub_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vssub_vv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vssub_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m4_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vssub_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vssub_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vssub_vv_i32m8_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vssub_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vssub_vv_i32m8_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vssub_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m8_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vssub_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vssub_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vssub_vv_i64m1_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vssub_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vssub_vv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vssub_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m1_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vssub_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vssub_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vssub_vv_i64m2_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vssub_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vssub_vv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vssub_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m2_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vssub_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vssub_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vssub_vv_i64m4_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vssub_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vssub_vv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vssub_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m4_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vssub_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vssub_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vssub_vv_i64m8_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vssub_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vssub_vv_i64m8_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vssub_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m8_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vssub_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub_vx_i64m8_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vssub_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vssub_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vssub_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vssub_vv_i8mf8_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vssub_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vssub_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_vx_i8mf8_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vssub_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vssub_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vssub_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vssub_vv_i8mf4_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vssub_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vssub_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_vx_i8mf4_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vssub_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vssub_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vssub_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vssub_vv_i8mf2_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vssub_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vssub_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_vx_i8mf2_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vssub_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vssub_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vssub_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vssub_vv_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vssub_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vssub_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_vx_i8m1_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vssub_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vssub_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vssub_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vssub_vv_i8m2_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vssub_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vssub_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_vx_i8m2_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vssub_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vssub_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vssub_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vssub_vv_i8m4_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vssub_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vssub_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_vx_i8m4_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vssub_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vssub_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vssub_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vssub_vv_i8m8_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vssub_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vssub_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_vx_i8m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vssub_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vssub_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vssub_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vssub_vv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vssub_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vssub_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vssub_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vssub_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vssub_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vssub_vv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vssub_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vssub_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vssub_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vssub_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vssub_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vssub_vv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vssub_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vssub_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vssub_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vssub_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vssub_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vssub_vv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vssub_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vssub_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vssub_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vssub_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vssub_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vssub_vv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vssub_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vssub_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vssub_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vssub_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vssub_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vssub_vv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vssub_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vssub_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vssub_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vssub_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vssub_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vssub_vv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vssub_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vssub_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vssub_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vssub_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vssub_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vssub_vv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vssub_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vssub_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vssub_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vssub_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vssub_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vssub_vv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vssub_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vssub_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vssub_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vssub_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vssub_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vssub_vv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vssub_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vssub_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vssub_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vssub_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vssub_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vssub_vv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vssub_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vssub_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vssub_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vssub_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vssub_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vssub_vv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vssub_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vssub_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vssub_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vssub_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vssub_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vssub_vv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vssub_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vssub_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vssub_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vssub_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vssub_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vssub_vv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vssub_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vssub_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vssub_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vssub_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vssub_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vssub_vv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vssub_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vssub_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vssub_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vssub_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vssub_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vssub_vv_i8mf8_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vssub_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vssub_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_vx_i8mf8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vssub_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vssub_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vssub_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vssub_vv_i8mf4_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vssub_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vssub_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_vx_i8mf4_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vssub_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vssub_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vssub_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vssub_vv_i8mf2_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vssub_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vssub_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_vx_i8mf2_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vssub_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vssub_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vssub_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vssub_vv_i8m1_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vssub_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vssub_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_vx_i8m1_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vssub_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vssub_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vssub_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vssub_vv_i8m2_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vssub_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vssub_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_vx_i8m2_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vssub_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vssub_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vssub_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vssub_vv_i8m4_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vssub_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vssub_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_vx_i8m4_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vssub_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vssub_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vssub_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vssub_vv_i8m8_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vssub_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vssub_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_vx_i8m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vssub_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vssub_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vssub_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vssub_vv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vssub_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vssub_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vssub_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vssub_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vssub_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vssub_vv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vssub_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vssub_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vssub_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vssub_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vssub_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vssub_vv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vssub_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vssub_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vssub_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vssub_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vssub_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vssub_vv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vssub_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vssub_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vssub_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vssub_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vssub_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vssub_vv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vssub_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vssub_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vssub_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vssub_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vssub_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vssub_vv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vssub_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vssub_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vssub_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vssub_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vssub_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vssub_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vssub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vssub_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vssub_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vssub_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vssub_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vssub_vv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vssub_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vssub_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vssub_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vssub_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vssub_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vssub_vv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vssub_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vssub_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vssub_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vssub_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vssub_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vssub_vv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vssub_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vssub_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vssub_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vssub_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vssub_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vssub_vv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vssub_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vssub_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vssub_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vssub_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vssub_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vssub_vv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vssub_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vssub_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vssub_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vssub_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vssub_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vssub_vv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vssub_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vssub_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vssub_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vssub_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vssub_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vssub_vv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vssub_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vssub_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vssub_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vssub_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vssub_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vssub_vv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vssub_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vssub_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vssub_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vssub_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vssub_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vssub_vv_i8mf8_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vssub_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vssub_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_vx_i8mf8_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vssub_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vssub_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vssub_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vssub_vv_i8mf4_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vssub_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vssub_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_vx_i8mf4_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vssub_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vssub_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vssub_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vssub_vv_i8mf2_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vssub_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vssub_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_vx_i8mf2_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vssub_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vssub_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vssub_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vssub_vv_i8m1_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vssub_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vssub_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_vx_i8m1_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vssub_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vssub_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vssub_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vssub_vv_i8m2_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vssub_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vssub_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_vx_i8m2_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vssub_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vssub_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vssub_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vssub_vv_i8m4_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vssub_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vssub_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_vx_i8m4_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vssub_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vssub_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vssub_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vssub_vv_i8m8_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vssub_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vssub_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_vx_i8m8_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vssub_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vssub_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vssub_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vssub_vv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vssub_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vssub_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vssub_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vssub_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vssub_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vssub_vv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vssub_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vssub_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vssub_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vssub_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vssub_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vssub_vv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vssub_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vssub_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vssub_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vssub_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vssub_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vssub_vv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vssub_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vssub_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vssub_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vssub_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vssub_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vssub_vv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vssub_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vssub_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vssub_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vssub_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vssub_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vssub_vv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vssub_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vssub_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vssub_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vssub_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vssub_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vssub_vv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vssub_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vssub_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vssub_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vssub_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vssub_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vssub_vv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vssub_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vssub_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vssub_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vssub_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vssub_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vssub_vv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vssub_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vssub_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vssub_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vssub_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vssub_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vssub_vv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vssub_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vssub_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vssub_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vssub_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vssub_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vssub_vv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vssub_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vssub_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vssub_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vssub_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vssub_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vssub_vv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vssub_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vssub_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vssub_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vssub_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vssub_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vssub_vv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vssub_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vssub_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vssub_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vssub_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vssub_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vssub_vv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vssub_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vssub_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vssub_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vssub_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vssub_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vssub_vv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vssub_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vssub_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssub\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vssubu.c b/auto-generated/policy_funcs/gnu-api-tests/vssubu.c index 9fd9b14ae..169bfeab0 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vssubu.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vssubu.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vssubu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vssubu_vv_u8mf8_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vssubu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vssubu_vv_u8mf8_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vssubu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8mf8_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vssubu_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_vx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vssubu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vssubu_vv_u8mf4_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vssubu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vssubu_vv_u8mf4_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vssubu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8mf4_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vssubu_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_vx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vssubu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vssubu_vv_u8mf2_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vssubu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vssubu_vv_u8mf2_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vssubu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8mf2_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vssubu_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_vx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vssubu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m1_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vssubu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vssubu_vv_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vssubu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m1_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vssubu_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_vx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vssubu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m2_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vssubu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vssubu_vv_u8m2_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vssubu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m2_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vssubu_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_vx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vssubu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m4_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vssubu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vssubu_vv_u8m4_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vssubu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m4_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vssubu_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_vx_u8m4_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vssubu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m8_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vssubu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vssubu_vv_u8m8_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vssubu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m8_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vssubu_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_vx_u8m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vssubu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vssubu_vv_u16mf4_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vssubu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vssubu_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vssubu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16mf4_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vssubu_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vssubu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vssubu_vv_u16mf2_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vssubu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vssubu_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vssubu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16mf2_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vssubu_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vssubu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m1_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vssubu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vssubu_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vssubu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m1_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vssubu_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vssubu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m2_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vssubu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vssubu_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vssubu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m2_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vssubu_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vssubu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m4_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vssubu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vssubu_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vssubu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m4_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vssubu_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vssubu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m8_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vssubu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vssubu_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vssubu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m8_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vssubu_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vssubu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vssubu_vv_u32mf2_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vssubu_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vssubu_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vssubu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32mf2_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vssubu_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vssubu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m1_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vssubu_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vssubu_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vssubu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m1_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vssubu_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vssubu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m2_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vssubu_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vssubu_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vssubu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m2_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vssubu_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vssubu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m4_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vssubu_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vssubu_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vssubu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m4_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vssubu_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vssubu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m8_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vssubu_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vssubu_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vssubu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m8_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vssubu_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vssubu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m1_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vssubu_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vssubu_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vssubu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m1_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vssubu_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vssubu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m2_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vssubu_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vssubu_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vssubu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m2_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vssubu_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vssubu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m4_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vssubu_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vssubu_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vssubu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m4_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vssubu_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vssubu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m8_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vssubu_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vssubu_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vssubu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m8_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vssubu_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu_vx_u64m8_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vssubu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vssubu_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vssubu_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vssubu_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vssubu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vssubu_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vssubu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vssubu_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vssubu_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vssubu_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vssubu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vssubu_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vssubu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vssubu_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vssubu_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vssubu_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vssubu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vssubu_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vssubu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vssubu_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vssubu_vv_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vssubu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vssubu_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_vx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vssubu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vssubu_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vssubu_vv_u8m2_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vssubu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vssubu_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_vx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vssubu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vssubu_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vssubu_vv_u8m4_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vssubu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vssubu_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_vx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vssubu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vssubu_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vssubu_vv_u8m8_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vssubu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vssubu_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_vx_u8m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vssubu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vssubu_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vssubu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vssubu_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vssubu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vssubu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vssubu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vssubu_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vssubu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vssubu_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vssubu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vssubu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vssubu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vssubu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vssubu_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vssubu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vssubu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vssubu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vssubu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vssubu_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vssubu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vssubu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vssubu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vssubu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vssubu_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vssubu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vssubu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vssubu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vssubu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vssubu_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vssubu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vssubu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vssubu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vssubu_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vssubu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vssubu_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vssubu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vssubu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vssubu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vssubu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vssubu_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vssubu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vssubu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vssubu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vssubu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vssubu_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vssubu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vssubu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vssubu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vssubu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vssubu_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vssubu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vssubu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vssubu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vssubu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vssubu_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vssubu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vssubu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vssubu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vssubu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vssubu_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vssubu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vssubu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vssubu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vssubu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vssubu_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vssubu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vssubu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vssubu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vssubu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vssubu_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vssubu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vssubu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vssubu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vssubu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vssubu_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vssubu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vssubu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vssubu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vssubu_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vssubu_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vssubu_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vssubu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vssubu_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vssubu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vssubu_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vssubu_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vssubu_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vssubu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vssubu_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vssubu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vssubu_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vssubu_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vssubu_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vssubu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vssubu_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vssubu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vssubu_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vssubu_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vssubu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vssubu_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vssubu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vssubu_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vssubu_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vssubu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vssubu_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vssubu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vssubu_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vssubu_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vssubu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vssubu_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vssubu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vssubu_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vssubu_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vssubu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vssubu_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vssubu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vssubu_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vssubu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vssubu_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vssubu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vssubu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vssubu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vssubu_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vssubu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vssubu_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vssubu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vssubu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vssubu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vssubu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vssubu_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vssubu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vssubu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vssubu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vssubu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vssubu_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vssubu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vssubu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vssubu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vssubu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vssubu_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vssubu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vssubu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vssubu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vssubu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vssubu_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vssubu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vssubu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vssubu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vssubu_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vssubu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vssubu_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vssubu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vssubu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vssubu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vssubu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vssubu_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vssubu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vssubu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vssubu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vssubu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vssubu_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vssubu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vssubu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vssubu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vssubu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vssubu_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vssubu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vssubu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vssubu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vssubu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vssubu_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vssubu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vssubu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vssubu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vssubu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vssubu_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vssubu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vssubu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vssubu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vssubu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vssubu_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vssubu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vssubu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vssubu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vssubu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vssubu_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vssubu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vssubu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vssubu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vssubu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vssubu_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vssubu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vssubu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vssubu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vssubu_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vssubu_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vssubu_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vssubu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vssubu_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vssubu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vssubu_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vssubu_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vssubu_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vssubu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vssubu_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vssubu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vssubu_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vssubu_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vssubu_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vssubu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vssubu_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vssubu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vssubu_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vssubu_vv_u8m1_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vssubu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vssubu_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_vx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vssubu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vssubu_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vssubu_vv_u8m2_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vssubu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vssubu_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_vx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vssubu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vssubu_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vssubu_vv_u8m4_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vssubu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vssubu_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_vx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vssubu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vssubu_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vssubu_vv_u8m8_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vssubu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vssubu_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_vx_u8m8_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vssubu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vssubu_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vssubu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vssubu_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vssubu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vssubu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vssubu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vssubu_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vssubu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vssubu_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vssubu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vssubu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vssubu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vssubu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vssubu_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vssubu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vssubu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vssubu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vssubu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vssubu_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vssubu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vssubu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vssubu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vssubu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vssubu_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vssubu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vssubu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vssubu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vssubu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vssubu_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vssubu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vssubu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vssubu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vssubu_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vssubu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vssubu_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vssubu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vssubu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vssubu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vssubu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vssubu_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vssubu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vssubu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vssubu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vssubu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vssubu_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vssubu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vssubu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vssubu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vssubu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vssubu_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vssubu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vssubu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vssubu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vssubu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vssubu_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vssubu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vssubu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vssubu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vssubu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vssubu_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vssubu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vssubu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vssubu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vssubu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vssubu_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vssubu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vssubu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vssubu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vssubu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vssubu_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vssubu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vssubu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vssubu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vssubu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vssubu_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vssubu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vssubu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssubu\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vsub.c b/auto-generated/policy_funcs/gnu-api-tests/vsub.c index 592d5586f..cfc45f2f6 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vsub.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vsub.c @@ -6,1412 +6,1412 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vsub_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsub_vv_i8mf8_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vsub_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vsub_vv_i8mf8_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vsub_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_vx_i8mf8_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vsub_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_vx_i8mf8_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vsub_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsub_vv_i8mf4_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vsub_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vsub_vv_i8mf4_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vsub_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_vx_i8mf4_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vsub_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_vx_i8mf4_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vsub_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsub_vv_i8mf2_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vsub_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vsub_vv_i8mf2_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vsub_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_vx_i8mf2_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vsub_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_vx_i8mf2_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vsub_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsub_vv_i8m1_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vsub_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vsub_vv_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vsub_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_vx_i8m1_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vsub_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_vx_i8m1_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vsub_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsub_vv_i8m2_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vsub_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vsub_vv_i8m2_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vsub_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_vx_i8m2_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vsub_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_vx_i8m2_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vsub_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsub_vv_i8m4_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vsub_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vsub_vv_i8m4_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vsub_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_vx_i8m4_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vsub_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_vx_i8m4_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vsub_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsub_vv_i8m8_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vsub_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vsub_vv_i8m8_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vsub_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_vx_i8m8_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vsub_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_vx_i8m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vsub_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsub_vv_i16mf4_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vsub_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vsub_vv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vsub_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_vx_i16mf4_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vsub_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vsub_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsub_vv_i16mf2_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vsub_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vsub_vv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vsub_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_vx_i16mf2_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vsub_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vsub_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsub_vv_i16m1_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vsub_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vsub_vv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vsub_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_vx_i16m1_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vsub_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vsub_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsub_vv_i16m2_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vsub_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vsub_vv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vsub_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_vx_i16m2_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vsub_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vsub_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsub_vv_i16m4_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vsub_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vsub_vv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vsub_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_vx_i16m4_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vsub_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vsub_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsub_vv_i16m8_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vsub_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vsub_vv_i16m8_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vsub_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_vx_i16m8_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vsub_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vsub_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsub_vv_i32mf2_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vsub_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vsub_vv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vsub_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_vx_i32mf2_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vsub_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vsub_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsub_vv_i32m1_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vsub_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vsub_vv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vsub_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_vx_i32m1_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vsub_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vsub_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsub_vv_i32m2_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vsub_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vsub_vv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vsub_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_vx_i32m2_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vsub_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vsub_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsub_vv_i32m4_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vsub_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vsub_vv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vsub_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_vx_i32m4_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vsub_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vsub_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsub_vv_i32m8_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vsub_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vsub_vv_i32m8_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vsub_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_vx_i32m8_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vsub_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vsub_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsub_vv_i64m1_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vsub_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vsub_vv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vsub_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsub_vx_i64m1_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vsub_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vsub_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsub_vv_i64m2_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vsub_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vsub_vv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vsub_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsub_vx_i64m2_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vsub_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vsub_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsub_vv_i64m4_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vsub_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vsub_vv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vsub_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsub_vx_i64m4_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vsub_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vsub_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsub_vv_i64m8_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vsub_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vsub_vv_i64m8_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vsub_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsub_vx_i64m8_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vsub_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub_vx_i64m8_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vsub_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vsub_vv_u8mf8_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vsub_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsub_vv_u8mf8_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vsub_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_vx_u8mf8_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vsub_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_vx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vsub_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vsub_vv_u8mf4_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vsub_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsub_vv_u8mf4_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vsub_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_vx_u8mf4_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vsub_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_vx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vsub_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vsub_vv_u8mf2_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vsub_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsub_vv_u8mf2_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vsub_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_vx_u8mf2_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vsub_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_vx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vsub_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vsub_vv_u8m1_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vsub_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsub_vv_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vsub_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_vx_u8m1_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vsub_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_vx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vsub_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vsub_vv_u8m2_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vsub_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsub_vv_u8m2_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vsub_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_vx_u8m2_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vsub_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_vx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vsub_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vsub_vv_u8m4_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vsub_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsub_vv_u8m4_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vsub_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_vx_u8m4_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vsub_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_vx_u8m4_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vsub_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vsub_vv_u8m8_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vsub_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsub_vv_u8m8_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vsub_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_vx_u8m8_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vsub_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_vx_u8m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vsub_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vsub_vv_u16mf4_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vsub_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsub_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vsub_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_vx_u16mf4_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vsub_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vsub_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vsub_vv_u16mf2_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vsub_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsub_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vsub_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_vx_u16mf2_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vsub_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vsub_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vsub_vv_u16m1_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vsub_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsub_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vsub_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_vx_u16m1_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vsub_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vsub_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vsub_vv_u16m2_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vsub_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsub_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vsub_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_vx_u16m2_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vsub_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vsub_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vsub_vv_u16m4_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vsub_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsub_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vsub_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_vx_u16m4_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vsub_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vsub_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vsub_vv_u16m8_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vsub_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsub_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vsub_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_vx_u16m8_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vsub_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vsub_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vsub_vv_u32mf2_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vsub_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsub_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vsub_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_vx_u32mf2_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vsub_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vsub_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vsub_vv_u32m1_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vsub_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsub_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vsub_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_vx_u32m1_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vsub_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vsub_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vsub_vv_u32m2_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vsub_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsub_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vsub_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_vx_u32m2_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vsub_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vsub_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vsub_vv_u32m4_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vsub_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsub_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vsub_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_vx_u32m4_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vsub_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vsub_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vsub_vv_u32m8_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vsub_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsub_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vsub_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_vx_u32m8_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vsub_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vsub_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vsub_vv_u64m1_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vsub_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsub_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vsub_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub_vx_u64m1_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vsub_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vsub_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vsub_vv_u64m2_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vsub_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsub_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vsub_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub_vx_u64m2_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vsub_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vsub_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vsub_vv_u64m4_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vsub_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsub_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vsub_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub_vx_u64m4_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vsub_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vsub_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vsub_vv_u64m8_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vsub_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsub_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vsub_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub_vx_u64m8_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vsub_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub_vx_u64m8_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vsub_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsub_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vsub_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vsub_vv_i8mf8_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vsub_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vsub_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_vx_i8mf8_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vsub_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsub_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vsub_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vsub_vv_i8mf4_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vsub_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vsub_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_vx_i8mf4_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vsub_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsub_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vsub_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vsub_vv_i8mf2_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vsub_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vsub_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_vx_i8mf2_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vsub_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsub_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vsub_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vsub_vv_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vsub_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vsub_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_vx_i8m1_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vsub_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsub_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vsub_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vsub_vv_i8m2_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vsub_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vsub_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_vx_i8m2_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vsub_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsub_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vsub_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vsub_vv_i8m4_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vsub_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vsub_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_vx_i8m4_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vsub_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsub_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vsub_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vsub_vv_i8m8_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vsub_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vsub_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_vx_i8m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vsub_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsub_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vsub_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vsub_vv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vsub_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vsub_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vsub_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsub_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vsub_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vsub_vv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vsub_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vsub_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vsub_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsub_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vsub_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vsub_vv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vsub_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vsub_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vsub_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsub_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vsub_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vsub_vv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vsub_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vsub_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vsub_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsub_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vsub_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vsub_vv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vsub_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vsub_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vsub_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsub_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vsub_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vsub_vv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vsub_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vsub_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vsub_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsub_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vsub_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vsub_vv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vsub_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vsub_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vsub_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsub_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vsub_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vsub_vv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vsub_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vsub_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vsub_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsub_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vsub_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vsub_vv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vsub_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vsub_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vsub_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsub_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vsub_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vsub_vv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vsub_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vsub_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vsub_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsub_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vsub_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vsub_vv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vsub_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vsub_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vsub_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsub_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vsub_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vsub_vv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vsub_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsub_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vsub_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vsub_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsub_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vsub_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vsub_vv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vsub_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsub_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vsub_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vsub_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsub_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vsub_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vsub_vv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vsub_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsub_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vsub_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vsub_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsub_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vsub_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vsub_vv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vsub_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsub_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vsub_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vsub_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vsub_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vsub_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsub_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vsub_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vsub_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vsub_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vsub_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vsub_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsub_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vsub_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vsub_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vsub_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vsub_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vsub_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsub_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vsub_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vsub_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vsub_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vsub_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vsub_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsub_vv_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vsub_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vsub_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_vx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vsub_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vsub_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vsub_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsub_vv_u8m2_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vsub_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vsub_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_vx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vsub_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vsub_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vsub_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsub_vv_u8m4_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vsub_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vsub_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_vx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vsub_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vsub_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vsub_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsub_vv_u8m8_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vsub_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vsub_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_vx_u8m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vsub_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vsub_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vsub_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsub_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vsub_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vsub_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vsub_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vsub_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vsub_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsub_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vsub_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vsub_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vsub_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vsub_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vsub_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsub_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vsub_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vsub_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vsub_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vsub_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vsub_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsub_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vsub_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vsub_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vsub_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vsub_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vsub_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsub_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vsub_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vsub_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vsub_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vsub_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vsub_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsub_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vsub_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vsub_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vsub_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vsub_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vsub_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsub_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vsub_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vsub_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vsub_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vsub_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vsub_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsub_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vsub_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vsub_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vsub_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vsub_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vsub_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsub_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vsub_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vsub_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vsub_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vsub_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vsub_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsub_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vsub_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vsub_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vsub_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vsub_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vsub_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsub_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vsub_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vsub_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vsub_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vsub_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vsub_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsub_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vsub_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vsub_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vsub_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vsub_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vsub_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsub_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vsub_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vsub_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vsub_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vsub_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vsub_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsub_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vsub_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vsub_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vsub_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vsub_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vsub_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsub_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vsub_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vsub_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vsub_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsub_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vsub_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vsub_vv_i8mf8_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vsub_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vsub_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_vx_i8mf8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vsub_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsub_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vsub_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vsub_vv_i8mf4_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vsub_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vsub_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_vx_i8mf4_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vsub_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsub_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vsub_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vsub_vv_i8mf2_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vsub_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vsub_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_vx_i8mf2_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vsub_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsub_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vsub_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vsub_vv_i8m1_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vsub_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vsub_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_vx_i8m1_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vsub_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsub_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vsub_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vsub_vv_i8m2_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vsub_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vsub_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_vx_i8m2_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vsub_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsub_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vsub_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vsub_vv_i8m4_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vsub_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vsub_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_vx_i8m4_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vsub_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsub_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vsub_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vsub_vv_i8m8_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vsub_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vsub_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_vx_i8m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vsub_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsub_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vsub_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vsub_vv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vsub_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vsub_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vsub_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsub_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vsub_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vsub_vv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vsub_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vsub_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vsub_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsub_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vsub_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vsub_vv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vsub_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vsub_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vsub_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsub_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vsub_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vsub_vv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vsub_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vsub_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vsub_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsub_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vsub_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vsub_vv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vsub_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vsub_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vsub_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsub_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vsub_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vsub_vv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vsub_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vsub_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vsub_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsub_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vsub_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vsub_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vsub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vsub_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vsub_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsub_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vsub_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vsub_vv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vsub_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vsub_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vsub_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsub_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vsub_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vsub_vv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vsub_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vsub_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vsub_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsub_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vsub_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vsub_vv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vsub_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vsub_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vsub_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsub_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vsub_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vsub_vv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vsub_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vsub_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vsub_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsub_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vsub_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vsub_vv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vsub_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsub_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vsub_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vsub_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsub_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vsub_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vsub_vv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vsub_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsub_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vsub_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vsub_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsub_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vsub_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vsub_vv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vsub_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsub_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vsub_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vsub_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsub_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vsub_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vsub_vv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vsub_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsub_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vsub_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vsub_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vsub_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vsub_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsub_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vsub_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vsub_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vsub_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vsub_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vsub_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsub_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vsub_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vsub_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vsub_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vsub_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vsub_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsub_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vsub_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vsub_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vsub_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vsub_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vsub_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsub_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vsub_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vsub_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vsub_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vsub_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vsub_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsub_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vsub_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vsub_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vsub_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vsub_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vsub_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsub_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vsub_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vsub_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vsub_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vsub_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vsub_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsub_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vsub_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vsub_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vsub_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vsub_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vsub_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsub_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vsub_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vsub_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vsub_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vsub_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vsub_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsub_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vsub_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vsub_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vsub_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vsub_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vsub_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsub_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vsub_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vsub_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vsub_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vsub_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vsub_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsub_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vsub_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vsub_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vsub_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vsub_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vsub_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsub_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vsub_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vsub_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vsub_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vsub_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vsub_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsub_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vsub_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vsub_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vsub_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vsub_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vsub_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsub_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vsub_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vsub_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vsub_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vsub_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vsub_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsub_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vsub_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vsub_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vsub_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vsub_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vsub_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsub_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vsub_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vsub_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vsub_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vsub_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vsub_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsub_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vsub_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vsub_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vsub_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vsub_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vsub_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsub_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vsub_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vsub_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vsub_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vsub_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vsub_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsub_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vsub_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vsub_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vsub_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vsub_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vsub_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsub_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vsub_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vsub_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vsub_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vsub_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vsub_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsub_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vsub_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vsub_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vsub_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vsub_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vsub_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsub_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vsub_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vsub_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vsub_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsub_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vsub_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vsub_vv_i8mf8_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vsub_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vsub_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_vx_i8mf8_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vsub_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsub_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vsub_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vsub_vv_i8mf4_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vsub_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vsub_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_vx_i8mf4_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vsub_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsub_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vsub_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vsub_vv_i8mf2_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vsub_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vsub_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_vx_i8mf2_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vsub_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsub_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vsub_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vsub_vv_i8m1_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vsub_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vsub_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_vx_i8m1_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vsub_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsub_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vsub_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vsub_vv_i8m2_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vsub_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vsub_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_vx_i8m2_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vsub_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsub_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vsub_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vsub_vv_i8m4_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vsub_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vsub_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_vx_i8m4_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vsub_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsub_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vsub_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vsub_vv_i8m8_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vsub_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vsub_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_vx_i8m8_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vsub_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsub_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vsub_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vsub_vv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vsub_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vsub_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vsub_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsub_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vsub_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vsub_vv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vsub_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vsub_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vsub_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsub_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vsub_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vsub_vv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vsub_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vsub_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vsub_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsub_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vsub_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vsub_vv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vsub_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vsub_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vsub_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsub_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vsub_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vsub_vv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vsub_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vsub_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vsub_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsub_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vsub_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vsub_vv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vsub_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vsub_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vsub_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsub_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vsub_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vsub_vv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vsub_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vsub_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vsub_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsub_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vsub_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vsub_vv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vsub_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vsub_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vsub_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsub_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vsub_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vsub_vv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vsub_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vsub_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vsub_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsub_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vsub_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vsub_vv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vsub_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vsub_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vsub_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsub_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vsub_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vsub_vv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vsub_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vsub_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vsub_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsub_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vsub_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vsub_vv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vsub_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsub_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vsub_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vsub_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsub_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vsub_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vsub_vv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vsub_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsub_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vsub_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vsub_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsub_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vsub_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vsub_vv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vsub_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsub_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vsub_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vsub_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsub_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vsub_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vsub_vv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vsub_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsub_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vsub_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vsub_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vsub_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vsub_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsub_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vsub_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vsub_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vsub_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vsub_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vsub_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsub_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vsub_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vsub_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vsub_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vsub_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vsub_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsub_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vsub_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vsub_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vsub_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vsub_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vsub_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsub_vv_u8m1_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vsub_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vsub_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_vx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vsub_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vsub_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vsub_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsub_vv_u8m2_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vsub_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vsub_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_vx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vsub_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vsub_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vsub_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsub_vv_u8m4_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vsub_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vsub_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_vx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vsub_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vsub_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vsub_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsub_vv_u8m8_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vsub_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vsub_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_vx_u8m8_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vsub_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vsub_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vsub_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsub_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vsub_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vsub_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vsub_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vsub_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vsub_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsub_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vsub_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vsub_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vsub_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vsub_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vsub_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsub_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vsub_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vsub_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vsub_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vsub_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vsub_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsub_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vsub_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vsub_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vsub_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vsub_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vsub_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsub_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vsub_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vsub_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vsub_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vsub_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vsub_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsub_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vsub_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vsub_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vsub_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vsub_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vsub_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsub_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vsub_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vsub_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vsub_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vsub_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vsub_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsub_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vsub_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vsub_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vsub_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vsub_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vsub_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsub_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vsub_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vsub_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vsub_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vsub_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vsub_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsub_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vsub_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vsub_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vsub_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vsub_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vsub_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsub_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vsub_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vsub_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vsub_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vsub_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vsub_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsub_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vsub_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vsub_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vsub_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vsub_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vsub_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsub_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vsub_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vsub_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vsub_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vsub_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vsub_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsub_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vsub_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vsub_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vsub_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vsub_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vsub_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsub_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vsub_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vsub_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vsub\.[ivxfswum.]+\s+} 352 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vwadd.c b/auto-generated/policy_funcs/gnu-api-tests/vwadd.c index d033db3df..d4abc39e1 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vwadd.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vwadd.c @@ -6,964 +6,964 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint16mf4_t test_vwadd_vv_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwadd_vv_i16mf4_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vwadd_vv_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwadd_vv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vwadd_vx_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_i16mf4_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vwadd_vx_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vwadd_wv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwadd_wv_i16mf4_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vwadd_wv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwadd_wv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vwadd_wx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_i16mf4_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vwadd_wx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vwadd_vv_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwadd_vv_i16mf2_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vwadd_vv_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwadd_vv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vwadd_vx_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_i16mf2_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vwadd_vx_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vwadd_wv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwadd_wv_i16mf2_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vwadd_wv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwadd_wv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vwadd_wx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_i16mf2_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vwadd_wx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vwadd_vv_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwadd_vv_i16m1_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vwadd_vv_i16m1_tu(vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwadd_vv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vwadd_vx_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_i16m1_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vwadd_vx_i16m1_tu(vint16m1_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vwadd_wv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwadd_wv_i16m1_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vwadd_wv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwadd_wv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vwadd_wx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_i16m1_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vwadd_wx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vwadd_vv_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwadd_vv_i16m2_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vwadd_vv_i16m2_tu(vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwadd_vv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vwadd_vx_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_i16m2_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vwadd_vx_i16m2_tu(vint16m2_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vwadd_wv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwadd_wv_i16m2_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vwadd_wv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwadd_wv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vwadd_wx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_i16m2_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vwadd_wx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vwadd_vv_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwadd_vv_i16m4_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vwadd_vv_i16m4_tu(vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwadd_vv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vwadd_vx_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_i16m4_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vwadd_vx_i16m4_tu(vint16m4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vwadd_wv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwadd_wv_i16m4_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vwadd_wv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwadd_wv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vwadd_wx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_i16m4_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vwadd_wx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vwadd_vv_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwadd_vv_i16m8_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vwadd_vv_i16m8_tu(vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwadd_vv_i16m8_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vwadd_vx_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_i16m8_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vwadd_vx_i16m8_tu(vint16m8_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vwadd_wv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwadd_wv_i16m8_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vwadd_wv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwadd_wv_i16m8_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vwadd_wx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_i16m8_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vwadd_wx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vwadd_vv_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwadd_vv_i32mf2_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vwadd_vv_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwadd_vv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vwadd_vx_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_i32mf2_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vwadd_vx_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vwadd_wv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwadd_wv_i32mf2_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vwadd_wv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwadd_wv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vwadd_wx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_i32mf2_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vwadd_wx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vwadd_vv_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwadd_vv_i32m1_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vwadd_vv_i32m1_tu(vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwadd_vv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vwadd_vx_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_i32m1_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vwadd_vx_i32m1_tu(vint32m1_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vwadd_wv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwadd_wv_i32m1_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vwadd_wv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwadd_wv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vwadd_wx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_i32m1_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vwadd_wx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vwadd_vv_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwadd_vv_i32m2_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vwadd_vv_i32m2_tu(vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwadd_vv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vwadd_vx_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_i32m2_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vwadd_vx_i32m2_tu(vint32m2_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vwadd_wv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwadd_wv_i32m2_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vwadd_wv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwadd_wv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vwadd_wx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_i32m2_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vwadd_wx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vwadd_vv_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwadd_vv_i32m4_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vwadd_vv_i32m4_tu(vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwadd_vv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vwadd_vx_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_i32m4_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vwadd_vx_i32m4_tu(vint32m4_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vwadd_wv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwadd_wv_i32m4_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vwadd_wv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwadd_wv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vwadd_wx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_i32m4_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vwadd_wx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vwadd_vv_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwadd_vv_i32m8_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vwadd_vv_i32m8_tu(vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwadd_vv_i32m8_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vwadd_vx_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_i32m8_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vwadd_vx_i32m8_tu(vint32m8_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vwadd_wv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwadd_wv_i32m8_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vwadd_wv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwadd_wv_i32m8_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vwadd_wx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_i32m8_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vwadd_wx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vwadd_vv_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwadd_vv_i64m1_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vwadd_vv_i64m1_tu(vint64m1_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwadd_vv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vwadd_vx_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx_i64m1_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vwadd_vx_i64m1_tu(vint64m1_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vwadd_wv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwadd_wv_i64m1_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vwadd_wv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwadd_wv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vwadd_wx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx_i64m1_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vwadd_wx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vwadd_vv_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwadd_vv_i64m2_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vwadd_vv_i64m2_tu(vint64m2_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwadd_vv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vwadd_vx_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx_i64m2_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vwadd_vx_i64m2_tu(vint64m2_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vwadd_wv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwadd_wv_i64m2_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vwadd_wv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwadd_wv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vwadd_wx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx_i64m2_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vwadd_wx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vwadd_vv_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwadd_vv_i64m4_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vwadd_vv_i64m4_tu(vint64m4_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwadd_vv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vwadd_vx_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx_i64m4_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vwadd_vx_i64m4_tu(vint64m4_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vwadd_wv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwadd_wv_i64m4_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vwadd_wv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwadd_wv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vwadd_wx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx_i64m4_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vwadd_wx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vwadd_vv_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwadd_vv_i64m8_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vwadd_vv_i64m8_tu(vint64m8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwadd_vv_i64m8_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vwadd_vx_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx_i64m8_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vwadd_vx_i64m8_tu(vint64m8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx_i64m8_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vwadd_wv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwadd_wv_i64m8_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vwadd_wv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwadd_wv_i64m8_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vwadd_wx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx_i64m8_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vwadd_wx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx_i64m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vwadd_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwadd_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwadd_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwadd_vv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwadd_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwadd_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vwadd_wv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwadd_wv_i16mf4_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwadd_wv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwadd_wv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwadd_wx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_i16mf4_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwadd_wx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwadd_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwadd_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwadd_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwadd_vv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwadd_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwadd_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwadd_wv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwadd_wv_i16mf2_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwadd_wv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwadd_wv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwadd_wx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_i16mf2_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwadd_wx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwadd_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwadd_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwadd_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwadd_vv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwadd_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwadd_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwadd_wv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwadd_wv_i16m1_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwadd_wv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwadd_wv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwadd_wx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_i16m1_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwadd_wx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwadd_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwadd_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwadd_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwadd_vv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwadd_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwadd_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwadd_wv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwadd_wv_i16m2_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwadd_wv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwadd_wv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwadd_wx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_i16m2_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwadd_wx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwadd_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwadd_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwadd_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwadd_vv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwadd_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwadd_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwadd_wv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwadd_wv_i16m4_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwadd_wv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwadd_wv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwadd_wx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_i16m4_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwadd_wx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwadd_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwadd_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwadd_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwadd_vv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwadd_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwadd_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwadd_wv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwadd_wv_i16m8_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwadd_wv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwadd_wv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwadd_wx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_i16m8_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwadd_wx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwadd_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwadd_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwadd_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwadd_vv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwadd_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwadd_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwadd_wv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwadd_wv_i32mf2_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwadd_wv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwadd_wv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwadd_wx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_i32mf2_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwadd_wx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwadd_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwadd_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwadd_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwadd_vv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwadd_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwadd_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwadd_wv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwadd_wv_i32m1_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwadd_wv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwadd_wv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwadd_wx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_i32m1_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwadd_wx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwadd_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwadd_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwadd_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwadd_vv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwadd_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwadd_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwadd_wv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwadd_wv_i32m2_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwadd_wv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwadd_wv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwadd_wx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_i32m2_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwadd_wx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwadd_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwadd_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwadd_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwadd_vv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwadd_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwadd_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwadd_wv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwadd_wv_i32m4_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwadd_wv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwadd_wv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwadd_wx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_i32m4_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwadd_wx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwadd_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwadd_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwadd_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwadd_vv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwadd_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwadd_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwadd_wv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwadd_wv_i32m8_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwadd_wv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwadd_wv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwadd_wx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_i32m8_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwadd_wx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwadd_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwadd_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwadd_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwadd_vv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwadd_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwadd_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwadd_wv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwadd_wv_i64m1_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwadd_wv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwadd_wv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwadd_wx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx_i64m1_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwadd_wx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwadd_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwadd_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwadd_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwadd_vv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwadd_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwadd_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwadd_wv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwadd_wv_i64m2_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwadd_wv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwadd_wv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwadd_wx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx_i64m2_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwadd_wx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwadd_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwadd_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwadd_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwadd_vv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwadd_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwadd_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwadd_wv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwadd_wv_i64m4_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwadd_wv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwadd_wv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwadd_wx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx_i64m4_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwadd_wx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwadd_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwadd_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwadd_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwadd_vv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwadd_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwadd_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwadd_wv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwadd_wv_i64m8_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwadd_wv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwadd_wv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwadd_wx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx_i64m8_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwadd_wx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vwadd_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwadd_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwadd_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwadd_vv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwadd_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwadd_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vwadd_wv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwadd_wv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwadd_wv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwadd_wv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwadd_wx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwadd_wx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwadd_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwadd_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwadd_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwadd_vv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwadd_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwadd_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwadd_wv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwadd_wv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwadd_wv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwadd_wv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwadd_wx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwadd_wx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwadd_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwadd_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwadd_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwadd_vv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwadd_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwadd_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwadd_wv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwadd_wv_i16m1_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwadd_wv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwadd_wv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwadd_wx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_i16m1_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwadd_wx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwadd_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwadd_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwadd_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwadd_vv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwadd_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwadd_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwadd_wv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwadd_wv_i16m2_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwadd_wv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwadd_wv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwadd_wx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_i16m2_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwadd_wx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwadd_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwadd_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwadd_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwadd_vv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwadd_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwadd_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwadd_wv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwadd_wv_i16m4_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwadd_wv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwadd_wv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwadd_wx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_i16m4_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwadd_wx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwadd_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwadd_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwadd_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwadd_vv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwadd_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwadd_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwadd_wv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwadd_wv_i16m8_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwadd_wv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwadd_wv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwadd_wx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_i16m8_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwadd_wx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwadd_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwadd_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwadd_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwadd_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwadd_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwadd_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwadd_wv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwadd_wv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwadd_wv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwadd_wv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwadd_wx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwadd_wx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwadd_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwadd_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwadd_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwadd_vv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwadd_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwadd_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwadd_wv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwadd_wv_i32m1_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwadd_wv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwadd_wv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwadd_wx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_i32m1_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwadd_wx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwadd_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwadd_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwadd_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwadd_vv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwadd_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwadd_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwadd_wv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwadd_wv_i32m2_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwadd_wv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwadd_wv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwadd_wx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_i32m2_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwadd_wx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwadd_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwadd_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwadd_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwadd_vv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwadd_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwadd_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwadd_wv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwadd_wv_i32m4_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwadd_wv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwadd_wv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwadd_wx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_i32m4_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwadd_wx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwadd_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwadd_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwadd_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwadd_vv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwadd_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwadd_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwadd_wv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwadd_wv_i32m8_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwadd_wv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwadd_wv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwadd_wx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_i32m8_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwadd_wx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwadd_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwadd_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwadd_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwadd_vv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwadd_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwadd_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwadd_wv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwadd_wv_i64m1_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwadd_wv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwadd_wv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwadd_wx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx_i64m1_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwadd_wx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwadd_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwadd_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwadd_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwadd_vv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwadd_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwadd_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwadd_wv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwadd_wv_i64m2_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwadd_wv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwadd_wv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwadd_wx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx_i64m2_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwadd_wx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwadd_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwadd_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwadd_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwadd_vv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwadd_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwadd_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwadd_wv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwadd_wv_i64m4_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwadd_wv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwadd_wv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwadd_wx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx_i64m4_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwadd_wx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwadd_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwadd_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwadd_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwadd_vv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwadd_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwadd_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwadd_wv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwadd_wv_i64m8_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwadd_wv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwadd_wv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwadd_wx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx_i64m8_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwadd_wx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vwadd_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwadd_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwadd_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwadd_vv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwadd_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwadd_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vwadd_wv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwadd_wv_i16mf4_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwadd_wv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwadd_wv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwadd_wx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_i16mf4_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwadd_wx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwadd_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwadd_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwadd_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwadd_vv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwadd_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwadd_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwadd_wv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwadd_wv_i16mf2_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwadd_wv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwadd_wv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwadd_wx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_i16mf2_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwadd_wx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwadd_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwadd_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwadd_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwadd_vv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwadd_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwadd_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwadd_wv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwadd_wv_i16m1_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwadd_wv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwadd_wv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwadd_wx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_i16m1_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwadd_wx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwadd_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwadd_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwadd_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwadd_vv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwadd_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwadd_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwadd_wv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwadd_wv_i16m2_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwadd_wv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwadd_wv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwadd_wx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_i16m2_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwadd_wx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwadd_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwadd_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwadd_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwadd_vv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwadd_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwadd_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwadd_wv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwadd_wv_i16m4_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwadd_wv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwadd_wv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwadd_wx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_i16m4_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwadd_wx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwadd_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwadd_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwadd_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwadd_vv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwadd_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwadd_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwadd_wv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwadd_wv_i16m8_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwadd_wv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwadd_wv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwadd_wx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_i16m8_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwadd_wx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwadd_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwadd_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwadd_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwadd_vv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwadd_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwadd_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwadd_wv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwadd_wv_i32mf2_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwadd_wv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwadd_wv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwadd_wx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_i32mf2_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwadd_wx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwadd_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwadd_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwadd_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwadd_vv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwadd_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwadd_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwadd_wv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwadd_wv_i32m1_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwadd_wv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwadd_wv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwadd_wx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_i32m1_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwadd_wx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwadd_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwadd_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwadd_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwadd_vv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwadd_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwadd_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwadd_wv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwadd_wv_i32m2_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwadd_wv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwadd_wv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwadd_wx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_i32m2_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwadd_wx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwadd_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwadd_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwadd_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwadd_vv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwadd_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwadd_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwadd_wv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwadd_wv_i32m4_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwadd_wv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwadd_wv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwadd_wx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_i32m4_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwadd_wx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwadd_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwadd_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwadd_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwadd_vv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwadd_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwadd_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwadd_wv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwadd_wv_i32m8_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwadd_wv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwadd_wv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwadd_wx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_i32m8_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwadd_wx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwadd_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwadd_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwadd_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwadd_vv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwadd_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwadd_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwadd_wv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwadd_wv_i64m1_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwadd_wv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwadd_wv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwadd_wx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx_i64m1_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwadd_wx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwadd_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwadd_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwadd_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwadd_vv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwadd_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwadd_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwadd_wv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwadd_wv_i64m2_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwadd_wv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwadd_wv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwadd_wx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx_i64m2_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwadd_wx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwadd_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwadd_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwadd_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwadd_vv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwadd_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwadd_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwadd_wv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwadd_wv_i64m4_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwadd_wv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwadd_wv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwadd_wx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx_i64m4_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwadd_wx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwadd_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwadd_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwadd_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwadd_vv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwadd_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwadd_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwadd_wv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwadd_wv_i64m8_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwadd_wv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwadd_wv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwadd_wx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx_i64m8_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwadd_wx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx_i64m8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+v[w]?add\.[ivxfswum.]+\s+} 240 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vwaddu.c b/auto-generated/policy_funcs/gnu-api-tests/vwaddu.c index 95d5bd4f1..5f3caf3f7 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vwaddu.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vwaddu.c @@ -6,964 +6,964 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint16mf4_t test_vwaddu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwaddu_vv_u16mf4_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vwaddu_vv_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vwaddu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_u16mf4_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vwaddu_vx_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vwaddu_wv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwaddu_wv_u16mf4_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vwaddu_wv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vwaddu_wx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_u16mf4_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vwaddu_wx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vwaddu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwaddu_vv_u16mf2_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vwaddu_vv_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vwaddu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_u16mf2_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vwaddu_vx_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vwaddu_wv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwaddu_wv_u16mf2_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vwaddu_wv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vwaddu_wx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_u16mf2_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vwaddu_wx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vwaddu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwaddu_vv_u16m1_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vwaddu_vv_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vwaddu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_u16m1_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vwaddu_vx_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vwaddu_wv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwaddu_wv_u16m1_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vwaddu_wv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vwaddu_wx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_u16m1_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vwaddu_wx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vwaddu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwaddu_vv_u16m2_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vwaddu_vv_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vwaddu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_u16m2_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vwaddu_vx_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vwaddu_wv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwaddu_wv_u16m2_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vwaddu_wv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vwaddu_wx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_u16m2_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vwaddu_wx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vwaddu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwaddu_vv_u16m4_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vwaddu_vv_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vwaddu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_u16m4_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vwaddu_vx_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vwaddu_wv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwaddu_wv_u16m4_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vwaddu_wv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vwaddu_wx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_u16m4_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vwaddu_wx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vwaddu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwaddu_vv_u16m8_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vwaddu_vv_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vwaddu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_u16m8_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vwaddu_vx_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vwaddu_wv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwaddu_wv_u16m8_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vwaddu_wv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vwaddu_wx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_u16m8_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vwaddu_wx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vwaddu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwaddu_vv_u32mf2_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vwaddu_vv_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vwaddu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_u32mf2_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vwaddu_vx_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vwaddu_wv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwaddu_wv_u32mf2_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vwaddu_wv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vwaddu_wx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_u32mf2_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vwaddu_wx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vwaddu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwaddu_vv_u32m1_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vwaddu_vv_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vwaddu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_u32m1_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vwaddu_vx_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vwaddu_wv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwaddu_wv_u32m1_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vwaddu_wv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vwaddu_wx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_u32m1_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vwaddu_wx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vwaddu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwaddu_vv_u32m2_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vwaddu_vv_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vwaddu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_u32m2_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vwaddu_vx_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vwaddu_wv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwaddu_wv_u32m2_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vwaddu_wv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vwaddu_wx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_u32m2_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vwaddu_wx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vwaddu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwaddu_vv_u32m4_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vwaddu_vv_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vwaddu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_u32m4_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vwaddu_vx_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vwaddu_wv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwaddu_wv_u32m4_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vwaddu_wv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vwaddu_wx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_u32m4_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vwaddu_wx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vwaddu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwaddu_vv_u32m8_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vwaddu_vv_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vwaddu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_u32m8_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vwaddu_vx_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vwaddu_wv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwaddu_wv_u32m8_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vwaddu_wv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vwaddu_wx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_u32m8_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vwaddu_wx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vwaddu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwaddu_vv_u64m1_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vwaddu_vv_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vwaddu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx_u64m1_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vwaddu_vx_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vwaddu_wv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwaddu_wv_u64m1_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vwaddu_wv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vwaddu_wx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx_u64m1_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vwaddu_wx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vwaddu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwaddu_vv_u64m2_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vwaddu_vv_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vwaddu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx_u64m2_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vwaddu_vx_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vwaddu_wv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwaddu_wv_u64m2_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vwaddu_wv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vwaddu_wx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx_u64m2_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vwaddu_wx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vwaddu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwaddu_vv_u64m4_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vwaddu_vv_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vwaddu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx_u64m4_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vwaddu_vx_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vwaddu_wv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwaddu_wv_u64m4_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vwaddu_wv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vwaddu_wx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx_u64m4_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vwaddu_wx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vwaddu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwaddu_vv_u64m8_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vwaddu_vv_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vwaddu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx_u64m8_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vwaddu_vx_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u64m8_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vwaddu_wv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwaddu_wv_u64m8_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vwaddu_wv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vwaddu_wx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx_u64m8_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vwaddu_wx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u64m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vwaddu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwaddu_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwaddu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwaddu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwaddu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vwaddu_wv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwaddu_wv_u16mf4_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwaddu_wv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwaddu_wx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_u16mf4_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwaddu_wx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwaddu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwaddu_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwaddu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwaddu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwaddu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwaddu_wv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwaddu_wv_u16mf2_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwaddu_wv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwaddu_wx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_u16mf2_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwaddu_wx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwaddu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwaddu_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwaddu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwaddu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwaddu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwaddu_wv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwaddu_wv_u16m1_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwaddu_wv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwaddu_wx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_u16m1_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwaddu_wx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwaddu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwaddu_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwaddu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwaddu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwaddu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwaddu_wv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwaddu_wv_u16m2_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwaddu_wv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwaddu_wx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_u16m2_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwaddu_wx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwaddu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwaddu_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwaddu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwaddu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwaddu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwaddu_wv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwaddu_wv_u16m4_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwaddu_wv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwaddu_wx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_u16m4_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwaddu_wx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwaddu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwaddu_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwaddu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwaddu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwaddu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwaddu_wv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwaddu_wv_u16m8_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwaddu_wv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwaddu_wx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_u16m8_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwaddu_wx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwaddu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwaddu_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwaddu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwaddu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwaddu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwaddu_wv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwaddu_wv_u32mf2_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwaddu_wv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwaddu_wx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_u32mf2_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwaddu_wx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwaddu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwaddu_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwaddu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwaddu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwaddu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwaddu_wv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwaddu_wv_u32m1_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwaddu_wv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwaddu_wx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_u32m1_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwaddu_wx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwaddu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwaddu_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwaddu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwaddu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwaddu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwaddu_wv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwaddu_wv_u32m2_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwaddu_wv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwaddu_wx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_u32m2_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwaddu_wx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwaddu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwaddu_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwaddu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwaddu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwaddu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwaddu_wv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwaddu_wv_u32m4_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwaddu_wv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwaddu_wx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_u32m4_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwaddu_wx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwaddu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwaddu_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwaddu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwaddu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwaddu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwaddu_wv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwaddu_wv_u32m8_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwaddu_wv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwaddu_wx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_u32m8_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwaddu_wx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwaddu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwaddu_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwaddu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwaddu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwaddu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwaddu_wv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwaddu_wv_u64m1_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwaddu_wv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwaddu_wx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx_u64m1_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwaddu_wx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwaddu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwaddu_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwaddu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwaddu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwaddu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwaddu_wv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwaddu_wv_u64m2_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwaddu_wv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwaddu_wx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx_u64m2_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwaddu_wx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwaddu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwaddu_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwaddu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwaddu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwaddu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwaddu_wv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwaddu_wv_u64m4_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwaddu_wv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwaddu_wx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx_u64m4_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwaddu_wx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwaddu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwaddu_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwaddu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwaddu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwaddu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwaddu_wv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwaddu_wv_u64m8_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwaddu_wv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwaddu_wx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx_u64m8_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwaddu_wx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vwaddu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwaddu_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwaddu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwaddu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwaddu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vwaddu_wv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwaddu_wv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwaddu_wv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwaddu_wx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwaddu_wx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwaddu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwaddu_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwaddu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwaddu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwaddu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwaddu_wv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwaddu_wv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwaddu_wv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwaddu_wx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwaddu_wx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwaddu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwaddu_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwaddu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwaddu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwaddu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwaddu_wv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwaddu_wv_u16m1_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwaddu_wv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwaddu_wx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_u16m1_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwaddu_wx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwaddu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwaddu_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwaddu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwaddu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwaddu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwaddu_wv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwaddu_wv_u16m2_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwaddu_wv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwaddu_wx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_u16m2_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwaddu_wx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwaddu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwaddu_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwaddu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwaddu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwaddu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwaddu_wv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwaddu_wv_u16m4_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwaddu_wv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwaddu_wx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_u16m4_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwaddu_wx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwaddu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwaddu_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwaddu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwaddu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwaddu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwaddu_wv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwaddu_wv_u16m8_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwaddu_wv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwaddu_wx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_u16m8_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwaddu_wx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwaddu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwaddu_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwaddu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwaddu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwaddu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwaddu_wv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwaddu_wv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwaddu_wv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwaddu_wx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwaddu_wx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwaddu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwaddu_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwaddu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwaddu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwaddu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwaddu_wv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwaddu_wv_u32m1_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwaddu_wv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwaddu_wx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_u32m1_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwaddu_wx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwaddu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwaddu_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwaddu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwaddu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwaddu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwaddu_wv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwaddu_wv_u32m2_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwaddu_wv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwaddu_wx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_u32m2_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwaddu_wx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwaddu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwaddu_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwaddu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwaddu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwaddu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwaddu_wv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwaddu_wv_u32m4_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwaddu_wv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwaddu_wx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_u32m4_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwaddu_wx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwaddu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwaddu_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwaddu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwaddu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwaddu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwaddu_wv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwaddu_wv_u32m8_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwaddu_wv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwaddu_wx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_u32m8_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwaddu_wx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwaddu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwaddu_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwaddu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwaddu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwaddu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwaddu_wv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwaddu_wv_u64m1_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwaddu_wv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwaddu_wx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx_u64m1_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwaddu_wx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwaddu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwaddu_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwaddu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwaddu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwaddu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwaddu_wv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwaddu_wv_u64m2_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwaddu_wv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwaddu_wx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx_u64m2_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwaddu_wx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwaddu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwaddu_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwaddu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwaddu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwaddu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwaddu_wv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwaddu_wv_u64m4_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwaddu_wv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwaddu_wx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx_u64m4_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwaddu_wx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwaddu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwaddu_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwaddu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwaddu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwaddu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwaddu_wv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwaddu_wv_u64m8_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwaddu_wv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwaddu_wx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx_u64m8_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwaddu_wx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vwaddu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwaddu_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwaddu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwaddu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwaddu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vwaddu_wv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwaddu_wv_u16mf4_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwaddu_wv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwaddu_wx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_u16mf4_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwaddu_wx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwaddu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwaddu_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwaddu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwaddu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwaddu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwaddu_wv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwaddu_wv_u16mf2_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwaddu_wv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwaddu_wx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_u16mf2_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwaddu_wx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwaddu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwaddu_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwaddu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwaddu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwaddu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwaddu_wv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwaddu_wv_u16m1_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwaddu_wv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwaddu_wx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_u16m1_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwaddu_wx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwaddu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwaddu_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwaddu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwaddu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwaddu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwaddu_wv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwaddu_wv_u16m2_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwaddu_wv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwaddu_wx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_u16m2_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwaddu_wx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwaddu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwaddu_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwaddu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwaddu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwaddu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwaddu_wv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwaddu_wv_u16m4_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwaddu_wv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwaddu_wx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_u16m4_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwaddu_wx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwaddu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwaddu_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwaddu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwaddu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwaddu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwaddu_wv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwaddu_wv_u16m8_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwaddu_wv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwaddu_wx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_u16m8_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwaddu_wx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwaddu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwaddu_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwaddu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwaddu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwaddu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwaddu_wv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwaddu_wv_u32mf2_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwaddu_wv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwaddu_wx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_u32mf2_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwaddu_wx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwaddu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwaddu_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwaddu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwaddu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwaddu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwaddu_wv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwaddu_wv_u32m1_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwaddu_wv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwaddu_wx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_u32m1_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwaddu_wx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwaddu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwaddu_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwaddu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwaddu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwaddu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwaddu_wv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwaddu_wv_u32m2_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwaddu_wv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwaddu_wx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_u32m2_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwaddu_wx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwaddu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwaddu_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwaddu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwaddu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwaddu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwaddu_wv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwaddu_wv_u32m4_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwaddu_wv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwaddu_wx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_u32m4_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwaddu_wx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwaddu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwaddu_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwaddu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwaddu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwaddu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwaddu_wv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwaddu_wv_u32m8_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwaddu_wv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwaddu_wx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_u32m8_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwaddu_wx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwaddu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwaddu_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwaddu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwaddu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwaddu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwaddu_wv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwaddu_wv_u64m1_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwaddu_wv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwaddu_wx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx_u64m1_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwaddu_wx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwaddu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwaddu_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwaddu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwaddu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwaddu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwaddu_wv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwaddu_wv_u64m2_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwaddu_wv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwaddu_wx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx_u64m2_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwaddu_wx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwaddu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwaddu_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwaddu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwaddu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwaddu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwaddu_wv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwaddu_wv_u64m4_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwaddu_wv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwaddu_wx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx_u64m4_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwaddu_wx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwaddu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwaddu_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwaddu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwaddu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwaddu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwaddu_wv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwaddu_wv_u64m8_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwaddu_wv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwaddu_wx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx_u64m8_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwaddu_wx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx_u64m8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+v[w]?add[u]?\.[ivxfswum.]+\s+} 240 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vwcvt.c b/auto-generated/policy_funcs/gnu-api-tests/vwcvt.c index 53281c3dd..104b0170f 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vwcvt.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vwcvt.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint16mf4_t test_vwcvt_x_x_v_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i16mf4_tu(maskedoff, src, vl); +vint16mf4_t test_vwcvt_x_x_v_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i16mf4_tu(vd, vs2, vl); } -vint16mf2_t test_vwcvt_x_x_v_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i16mf2_tu(maskedoff, src, vl); +vint16mf2_t test_vwcvt_x_x_v_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i16mf2_tu(vd, vs2, vl); } -vint16m1_t test_vwcvt_x_x_v_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i16m1_tu(maskedoff, src, vl); +vint16m1_t test_vwcvt_x_x_v_i16m1_tu(vint16m1_t vd, vint8mf2_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i16m1_tu(vd, vs2, vl); } -vint16m2_t test_vwcvt_x_x_v_i16m2_tu(vint16m2_t maskedoff, vint8m1_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i16m2_tu(maskedoff, src, vl); +vint16m2_t test_vwcvt_x_x_v_i16m2_tu(vint16m2_t vd, vint8m1_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i16m2_tu(vd, vs2, vl); } -vint16m4_t test_vwcvt_x_x_v_i16m4_tu(vint16m4_t maskedoff, vint8m2_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i16m4_tu(maskedoff, src, vl); +vint16m4_t test_vwcvt_x_x_v_i16m4_tu(vint16m4_t vd, vint8m2_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i16m4_tu(vd, vs2, vl); } -vint16m8_t test_vwcvt_x_x_v_i16m8_tu(vint16m8_t maskedoff, vint8m4_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i16m8_tu(maskedoff, src, vl); +vint16m8_t test_vwcvt_x_x_v_i16m8_tu(vint16m8_t vd, vint8m4_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i16m8_tu(vd, vs2, vl); } -vint32mf2_t test_vwcvt_x_x_v_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i32mf2_tu(maskedoff, src, vl); +vint32mf2_t test_vwcvt_x_x_v_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i32mf2_tu(vd, vs2, vl); } -vint32m1_t test_vwcvt_x_x_v_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i32m1_tu(maskedoff, src, vl); +vint32m1_t test_vwcvt_x_x_v_i32m1_tu(vint32m1_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i32m1_tu(vd, vs2, vl); } -vint32m2_t test_vwcvt_x_x_v_i32m2_tu(vint32m2_t maskedoff, vint16m1_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i32m2_tu(maskedoff, src, vl); +vint32m2_t test_vwcvt_x_x_v_i32m2_tu(vint32m2_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i32m2_tu(vd, vs2, vl); } -vint32m4_t test_vwcvt_x_x_v_i32m4_tu(vint32m4_t maskedoff, vint16m2_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i32m4_tu(maskedoff, src, vl); +vint32m4_t test_vwcvt_x_x_v_i32m4_tu(vint32m4_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i32m4_tu(vd, vs2, vl); } -vint32m8_t test_vwcvt_x_x_v_i32m8_tu(vint32m8_t maskedoff, vint16m4_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i32m8_tu(maskedoff, src, vl); +vint32m8_t test_vwcvt_x_x_v_i32m8_tu(vint32m8_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i32m8_tu(vd, vs2, vl); } -vint64m1_t test_vwcvt_x_x_v_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i64m1_tu(maskedoff, src, vl); +vint64m1_t test_vwcvt_x_x_v_i64m1_tu(vint64m1_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i64m1_tu(vd, vs2, vl); } -vint64m2_t test_vwcvt_x_x_v_i64m2_tu(vint64m2_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i64m2_tu(maskedoff, src, vl); +vint64m2_t test_vwcvt_x_x_v_i64m2_tu(vint64m2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i64m2_tu(vd, vs2, vl); } -vint64m4_t test_vwcvt_x_x_v_i64m4_tu(vint64m4_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i64m4_tu(maskedoff, src, vl); +vint64m4_t test_vwcvt_x_x_v_i64m4_tu(vint64m4_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i64m4_tu(vd, vs2, vl); } -vint64m8_t test_vwcvt_x_x_v_i64m8_tu(vint64m8_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i64m8_tu(maskedoff, src, vl); +vint64m8_t test_vwcvt_x_x_v_i64m8_tu(vint64m8_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i64m8_tu(vd, vs2, vl); } -vint16mf4_t test_vwcvt_x_x_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i16mf4_tum(mask, maskedoff, src, vl); +vint16mf4_t test_vwcvt_x_x_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i16mf4_tum(vm, vd, vs2, vl); } -vint16mf2_t test_vwcvt_x_x_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i16mf2_tum(mask, maskedoff, src, vl); +vint16mf2_t test_vwcvt_x_x_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i16mf2_tum(vm, vd, vs2, vl); } -vint16m1_t test_vwcvt_x_x_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i16m1_tum(mask, maskedoff, src, vl); +vint16m1_t test_vwcvt_x_x_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i16m1_tum(vm, vd, vs2, vl); } -vint16m2_t test_vwcvt_x_x_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i16m2_tum(mask, maskedoff, src, vl); +vint16m2_t test_vwcvt_x_x_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i16m2_tum(vm, vd, vs2, vl); } -vint16m4_t test_vwcvt_x_x_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i16m4_tum(mask, maskedoff, src, vl); +vint16m4_t test_vwcvt_x_x_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i16m4_tum(vm, vd, vs2, vl); } -vint16m8_t test_vwcvt_x_x_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i16m8_tum(mask, maskedoff, src, vl); +vint16m8_t test_vwcvt_x_x_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i16m8_tum(vm, vd, vs2, vl); } -vint32mf2_t test_vwcvt_x_x_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i32mf2_tum(mask, maskedoff, src, vl); +vint32mf2_t test_vwcvt_x_x_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i32mf2_tum(vm, vd, vs2, vl); } -vint32m1_t test_vwcvt_x_x_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i32m1_tum(mask, maskedoff, src, vl); +vint32m1_t test_vwcvt_x_x_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i32m1_tum(vm, vd, vs2, vl); } -vint32m2_t test_vwcvt_x_x_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i32m2_tum(mask, maskedoff, src, vl); +vint32m2_t test_vwcvt_x_x_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i32m2_tum(vm, vd, vs2, vl); } -vint32m4_t test_vwcvt_x_x_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i32m4_tum(mask, maskedoff, src, vl); +vint32m4_t test_vwcvt_x_x_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i32m4_tum(vm, vd, vs2, vl); } -vint32m8_t test_vwcvt_x_x_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i32m8_tum(mask, maskedoff, src, vl); +vint32m8_t test_vwcvt_x_x_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i32m8_tum(vm, vd, vs2, vl); } -vint64m1_t test_vwcvt_x_x_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i64m1_tum(mask, maskedoff, src, vl); +vint64m1_t test_vwcvt_x_x_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i64m1_tum(vm, vd, vs2, vl); } -vint64m2_t test_vwcvt_x_x_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i64m2_tum(mask, maskedoff, src, vl); +vint64m2_t test_vwcvt_x_x_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i64m2_tum(vm, vd, vs2, vl); } -vint64m4_t test_vwcvt_x_x_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i64m4_tum(mask, maskedoff, src, vl); +vint64m4_t test_vwcvt_x_x_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i64m4_tum(vm, vd, vs2, vl); } -vint64m8_t test_vwcvt_x_x_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i64m8_tum(mask, maskedoff, src, vl); +vint64m8_t test_vwcvt_x_x_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i64m8_tum(vm, vd, vs2, vl); } -vint16mf4_t test_vwcvt_x_x_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i16mf4_tumu(mask, maskedoff, src, vl); +vint16mf4_t test_vwcvt_x_x_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i16mf4_tumu(vm, vd, vs2, vl); } -vint16mf2_t test_vwcvt_x_x_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i16mf2_tumu(mask, maskedoff, src, vl); +vint16mf2_t test_vwcvt_x_x_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i16mf2_tumu(vm, vd, vs2, vl); } -vint16m1_t test_vwcvt_x_x_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i16m1_tumu(mask, maskedoff, src, vl); +vint16m1_t test_vwcvt_x_x_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i16m1_tumu(vm, vd, vs2, vl); } -vint16m2_t test_vwcvt_x_x_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i16m2_tumu(mask, maskedoff, src, vl); +vint16m2_t test_vwcvt_x_x_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i16m2_tumu(vm, vd, vs2, vl); } -vint16m4_t test_vwcvt_x_x_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i16m4_tumu(mask, maskedoff, src, vl); +vint16m4_t test_vwcvt_x_x_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i16m4_tumu(vm, vd, vs2, vl); } -vint16m8_t test_vwcvt_x_x_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i16m8_tumu(mask, maskedoff, src, vl); +vint16m8_t test_vwcvt_x_x_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i16m8_tumu(vm, vd, vs2, vl); } -vint32mf2_t test_vwcvt_x_x_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i32mf2_tumu(mask, maskedoff, src, vl); +vint32mf2_t test_vwcvt_x_x_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i32mf2_tumu(vm, vd, vs2, vl); } -vint32m1_t test_vwcvt_x_x_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i32m1_tumu(mask, maskedoff, src, vl); +vint32m1_t test_vwcvt_x_x_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i32m1_tumu(vm, vd, vs2, vl); } -vint32m2_t test_vwcvt_x_x_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i32m2_tumu(mask, maskedoff, src, vl); +vint32m2_t test_vwcvt_x_x_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i32m2_tumu(vm, vd, vs2, vl); } -vint32m4_t test_vwcvt_x_x_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i32m4_tumu(mask, maskedoff, src, vl); +vint32m4_t test_vwcvt_x_x_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i32m4_tumu(vm, vd, vs2, vl); } -vint32m8_t test_vwcvt_x_x_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i32m8_tumu(mask, maskedoff, src, vl); +vint32m8_t test_vwcvt_x_x_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i32m8_tumu(vm, vd, vs2, vl); } -vint64m1_t test_vwcvt_x_x_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i64m1_tumu(mask, maskedoff, src, vl); +vint64m1_t test_vwcvt_x_x_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i64m1_tumu(vm, vd, vs2, vl); } -vint64m2_t test_vwcvt_x_x_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i64m2_tumu(mask, maskedoff, src, vl); +vint64m2_t test_vwcvt_x_x_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i64m2_tumu(vm, vd, vs2, vl); } -vint64m4_t test_vwcvt_x_x_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i64m4_tumu(mask, maskedoff, src, vl); +vint64m4_t test_vwcvt_x_x_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i64m4_tumu(vm, vd, vs2, vl); } -vint64m8_t test_vwcvt_x_x_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i64m8_tumu(mask, maskedoff, src, vl); +vint64m8_t test_vwcvt_x_x_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i64m8_tumu(vm, vd, vs2, vl); } -vint16mf4_t test_vwcvt_x_x_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i16mf4_mu(mask, maskedoff, src, vl); +vint16mf4_t test_vwcvt_x_x_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i16mf4_mu(vm, vd, vs2, vl); } -vint16mf2_t test_vwcvt_x_x_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i16mf2_mu(mask, maskedoff, src, vl); +vint16mf2_t test_vwcvt_x_x_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i16mf2_mu(vm, vd, vs2, vl); } -vint16m1_t test_vwcvt_x_x_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i16m1_mu(mask, maskedoff, src, vl); +vint16m1_t test_vwcvt_x_x_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i16m1_mu(vm, vd, vs2, vl); } -vint16m2_t test_vwcvt_x_x_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i16m2_mu(mask, maskedoff, src, vl); +vint16m2_t test_vwcvt_x_x_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i16m2_mu(vm, vd, vs2, vl); } -vint16m4_t test_vwcvt_x_x_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i16m4_mu(mask, maskedoff, src, vl); +vint16m4_t test_vwcvt_x_x_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i16m4_mu(vm, vd, vs2, vl); } -vint16m8_t test_vwcvt_x_x_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i16m8_mu(mask, maskedoff, src, vl); +vint16m8_t test_vwcvt_x_x_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i16m8_mu(vm, vd, vs2, vl); } -vint32mf2_t test_vwcvt_x_x_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i32mf2_mu(mask, maskedoff, src, vl); +vint32mf2_t test_vwcvt_x_x_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i32mf2_mu(vm, vd, vs2, vl); } -vint32m1_t test_vwcvt_x_x_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i32m1_mu(mask, maskedoff, src, vl); +vint32m1_t test_vwcvt_x_x_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i32m1_mu(vm, vd, vs2, vl); } -vint32m2_t test_vwcvt_x_x_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i32m2_mu(mask, maskedoff, src, vl); +vint32m2_t test_vwcvt_x_x_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i32m2_mu(vm, vd, vs2, vl); } -vint32m4_t test_vwcvt_x_x_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i32m4_mu(mask, maskedoff, src, vl); +vint32m4_t test_vwcvt_x_x_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i32m4_mu(vm, vd, vs2, vl); } -vint32m8_t test_vwcvt_x_x_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i32m8_mu(mask, maskedoff, src, vl); +vint32m8_t test_vwcvt_x_x_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i32m8_mu(vm, vd, vs2, vl); } -vint64m1_t test_vwcvt_x_x_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i64m1_mu(mask, maskedoff, src, vl); +vint64m1_t test_vwcvt_x_x_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i64m1_mu(vm, vd, vs2, vl); } -vint64m2_t test_vwcvt_x_x_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i64m2_mu(mask, maskedoff, src, vl); +vint64m2_t test_vwcvt_x_x_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i64m2_mu(vm, vd, vs2, vl); } -vint64m4_t test_vwcvt_x_x_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i64m4_mu(mask, maskedoff, src, vl); +vint64m4_t test_vwcvt_x_x_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i64m4_mu(vm, vd, vs2, vl); } -vint64m8_t test_vwcvt_x_x_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vwcvt_x_x_v_i64m8_mu(mask, maskedoff, src, vl); +vint64m8_t test_vwcvt_x_x_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vwcvt_x_x_v_i64m8_mu(vm, vd, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vwcvt\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vwcvtu.c b/auto-generated/policy_funcs/gnu-api-tests/vwcvtu.c index ff4cf5b73..077929557 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vwcvtu.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vwcvtu.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u16mf4_tu(maskedoff, src, vl); +vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u16mf4_tu(vd, vs2, vl); } -vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u16mf2_tu(maskedoff, src, vl); +vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u16mf2_tu(vd, vs2, vl); } -vuint16m1_t test_vwcvtu_x_x_v_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u16m1_tu(maskedoff, src, vl); +vuint16m1_t test_vwcvtu_x_x_v_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u16m1_tu(vd, vs2, vl); } -vuint16m2_t test_vwcvtu_x_x_v_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u16m2_tu(maskedoff, src, vl); +vuint16m2_t test_vwcvtu_x_x_v_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u16m2_tu(vd, vs2, vl); } -vuint16m4_t test_vwcvtu_x_x_v_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u16m4_tu(maskedoff, src, vl); +vuint16m4_t test_vwcvtu_x_x_v_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u16m4_tu(vd, vs2, vl); } -vuint16m8_t test_vwcvtu_x_x_v_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u16m8_tu(maskedoff, src, vl); +vuint16m8_t test_vwcvtu_x_x_v_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u16m8_tu(vd, vs2, vl); } -vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u32mf2_tu(maskedoff, src, vl); +vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u32mf2_tu(vd, vs2, vl); } -vuint32m1_t test_vwcvtu_x_x_v_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u32m1_tu(maskedoff, src, vl); +vuint32m1_t test_vwcvtu_x_x_v_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vwcvtu_x_x_v_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u32m2_tu(maskedoff, src, vl); +vuint32m2_t test_vwcvtu_x_x_v_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vwcvtu_x_x_v_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u32m4_tu(maskedoff, src, vl); +vuint32m4_t test_vwcvtu_x_x_v_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vwcvtu_x_x_v_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u32m8_tu(maskedoff, src, vl); +vuint32m8_t test_vwcvtu_x_x_v_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u32m8_tu(vd, vs2, vl); } -vuint64m1_t test_vwcvtu_x_x_v_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u64m1_tu(maskedoff, src, vl); +vuint64m1_t test_vwcvtu_x_x_v_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u64m1_tu(vd, vs2, vl); } -vuint64m2_t test_vwcvtu_x_x_v_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u64m2_tu(maskedoff, src, vl); +vuint64m2_t test_vwcvtu_x_x_v_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u64m2_tu(vd, vs2, vl); } -vuint64m4_t test_vwcvtu_x_x_v_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u64m4_tu(maskedoff, src, vl); +vuint64m4_t test_vwcvtu_x_x_v_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u64m4_tu(vd, vs2, vl); } -vuint64m8_t test_vwcvtu_x_x_v_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u64m8_tu(maskedoff, src, vl); +vuint64m8_t test_vwcvtu_x_x_v_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u64m8_tu(vd, vs2, vl); } -vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u16mf4_tum(mask, maskedoff, src, vl); +vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u16mf4_tum(vm, vd, vs2, vl); } -vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u16mf2_tum(mask, maskedoff, src, vl); +vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u16mf2_tum(vm, vd, vs2, vl); } -vuint16m1_t test_vwcvtu_x_x_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u16m1_tum(mask, maskedoff, src, vl); +vuint16m1_t test_vwcvtu_x_x_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u16m1_tum(vm, vd, vs2, vl); } -vuint16m2_t test_vwcvtu_x_x_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u16m2_tum(mask, maskedoff, src, vl); +vuint16m2_t test_vwcvtu_x_x_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u16m2_tum(vm, vd, vs2, vl); } -vuint16m4_t test_vwcvtu_x_x_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u16m4_tum(mask, maskedoff, src, vl); +vuint16m4_t test_vwcvtu_x_x_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u16m4_tum(vm, vd, vs2, vl); } -vuint16m8_t test_vwcvtu_x_x_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u16m8_tum(mask, maskedoff, src, vl); +vuint16m8_t test_vwcvtu_x_x_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u16m8_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u32mf2_tum(mask, maskedoff, src, vl); +vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u32mf2_tum(vm, vd, vs2, vl); } -vuint32m1_t test_vwcvtu_x_x_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u32m1_tum(mask, maskedoff, src, vl); +vuint32m1_t test_vwcvtu_x_x_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u32m1_tum(vm, vd, vs2, vl); } -vuint32m2_t test_vwcvtu_x_x_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u32m2_tum(mask, maskedoff, src, vl); +vuint32m2_t test_vwcvtu_x_x_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u32m2_tum(vm, vd, vs2, vl); } -vuint32m4_t test_vwcvtu_x_x_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u32m4_tum(mask, maskedoff, src, vl); +vuint32m4_t test_vwcvtu_x_x_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u32m4_tum(vm, vd, vs2, vl); } -vuint32m8_t test_vwcvtu_x_x_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u32m8_tum(mask, maskedoff, src, vl); +vuint32m8_t test_vwcvtu_x_x_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u32m8_tum(vm, vd, vs2, vl); } -vuint64m1_t test_vwcvtu_x_x_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u64m1_tum(mask, maskedoff, src, vl); +vuint64m1_t test_vwcvtu_x_x_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u64m1_tum(vm, vd, vs2, vl); } -vuint64m2_t test_vwcvtu_x_x_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u64m2_tum(mask, maskedoff, src, vl); +vuint64m2_t test_vwcvtu_x_x_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u64m2_tum(vm, vd, vs2, vl); } -vuint64m4_t test_vwcvtu_x_x_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u64m4_tum(mask, maskedoff, src, vl); +vuint64m4_t test_vwcvtu_x_x_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u64m4_tum(vm, vd, vs2, vl); } -vuint64m8_t test_vwcvtu_x_x_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u64m8_tum(mask, maskedoff, src, vl); +vuint64m8_t test_vwcvtu_x_x_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u64m8_tum(vm, vd, vs2, vl); } -vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u16mf4_tumu(mask, maskedoff, src, vl); +vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u16mf4_tumu(vm, vd, vs2, vl); } -vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u16mf2_tumu(mask, maskedoff, src, vl); +vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u16mf2_tumu(vm, vd, vs2, vl); } -vuint16m1_t test_vwcvtu_x_x_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u16m1_tumu(mask, maskedoff, src, vl); +vuint16m1_t test_vwcvtu_x_x_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u16m1_tumu(vm, vd, vs2, vl); } -vuint16m2_t test_vwcvtu_x_x_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u16m2_tumu(mask, maskedoff, src, vl); +vuint16m2_t test_vwcvtu_x_x_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u16m2_tumu(vm, vd, vs2, vl); } -vuint16m4_t test_vwcvtu_x_x_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u16m4_tumu(mask, maskedoff, src, vl); +vuint16m4_t test_vwcvtu_x_x_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u16m4_tumu(vm, vd, vs2, vl); } -vuint16m8_t test_vwcvtu_x_x_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u16m8_tumu(mask, maskedoff, src, vl); +vuint16m8_t test_vwcvtu_x_x_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u16m8_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u32mf2_tumu(mask, maskedoff, src, vl); +vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u32mf2_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_vwcvtu_x_x_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u32m1_tumu(mask, maskedoff, src, vl); +vuint32m1_t test_vwcvtu_x_x_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u32m1_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_vwcvtu_x_x_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u32m2_tumu(mask, maskedoff, src, vl); +vuint32m2_t test_vwcvtu_x_x_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u32m2_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_vwcvtu_x_x_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u32m4_tumu(mask, maskedoff, src, vl); +vuint32m4_t test_vwcvtu_x_x_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u32m4_tumu(vm, vd, vs2, vl); } -vuint32m8_t test_vwcvtu_x_x_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u32m8_tumu(mask, maskedoff, src, vl); +vuint32m8_t test_vwcvtu_x_x_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u32m8_tumu(vm, vd, vs2, vl); } -vuint64m1_t test_vwcvtu_x_x_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u64m1_tumu(mask, maskedoff, src, vl); +vuint64m1_t test_vwcvtu_x_x_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u64m1_tumu(vm, vd, vs2, vl); } -vuint64m2_t test_vwcvtu_x_x_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u64m2_tumu(mask, maskedoff, src, vl); +vuint64m2_t test_vwcvtu_x_x_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u64m2_tumu(vm, vd, vs2, vl); } -vuint64m4_t test_vwcvtu_x_x_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u64m4_tumu(mask, maskedoff, src, vl); +vuint64m4_t test_vwcvtu_x_x_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u64m4_tumu(vm, vd, vs2, vl); } -vuint64m8_t test_vwcvtu_x_x_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u64m8_tumu(mask, maskedoff, src, vl); +vuint64m8_t test_vwcvtu_x_x_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u64m8_tumu(vm, vd, vs2, vl); } -vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u16mf4_mu(mask, maskedoff, src, vl); +vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u16mf4_mu(vm, vd, vs2, vl); } -vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u16mf2_mu(mask, maskedoff, src, vl); +vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u16mf2_mu(vm, vd, vs2, vl); } -vuint16m1_t test_vwcvtu_x_x_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u16m1_mu(mask, maskedoff, src, vl); +vuint16m1_t test_vwcvtu_x_x_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u16m1_mu(vm, vd, vs2, vl); } -vuint16m2_t test_vwcvtu_x_x_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u16m2_mu(mask, maskedoff, src, vl); +vuint16m2_t test_vwcvtu_x_x_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u16m2_mu(vm, vd, vs2, vl); } -vuint16m4_t test_vwcvtu_x_x_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u16m4_mu(mask, maskedoff, src, vl); +vuint16m4_t test_vwcvtu_x_x_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u16m4_mu(vm, vd, vs2, vl); } -vuint16m8_t test_vwcvtu_x_x_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u16m8_mu(mask, maskedoff, src, vl); +vuint16m8_t test_vwcvtu_x_x_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u16m8_mu(vm, vd, vs2, vl); } -vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u32mf2_mu(mask, maskedoff, src, vl); +vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u32mf2_mu(vm, vd, vs2, vl); } -vuint32m1_t test_vwcvtu_x_x_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u32m1_mu(mask, maskedoff, src, vl); +vuint32m1_t test_vwcvtu_x_x_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u32m1_mu(vm, vd, vs2, vl); } -vuint32m2_t test_vwcvtu_x_x_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u32m2_mu(mask, maskedoff, src, vl); +vuint32m2_t test_vwcvtu_x_x_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u32m2_mu(vm, vd, vs2, vl); } -vuint32m4_t test_vwcvtu_x_x_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u32m4_mu(mask, maskedoff, src, vl); +vuint32m4_t test_vwcvtu_x_x_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u32m4_mu(vm, vd, vs2, vl); } -vuint32m8_t test_vwcvtu_x_x_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u32m8_mu(mask, maskedoff, src, vl); +vuint32m8_t test_vwcvtu_x_x_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u32m8_mu(vm, vd, vs2, vl); } -vuint64m1_t test_vwcvtu_x_x_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u64m1_mu(mask, maskedoff, src, vl); +vuint64m1_t test_vwcvtu_x_x_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u64m1_mu(vm, vd, vs2, vl); } -vuint64m2_t test_vwcvtu_x_x_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u64m2_mu(mask, maskedoff, src, vl); +vuint64m2_t test_vwcvtu_x_x_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u64m2_mu(vm, vd, vs2, vl); } -vuint64m4_t test_vwcvtu_x_x_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u64m4_mu(mask, maskedoff, src, vl); +vuint64m4_t test_vwcvtu_x_x_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u64m4_mu(vm, vd, vs2, vl); } -vuint64m8_t test_vwcvtu_x_x_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vwcvtu_x_x_v_u64m8_mu(mask, maskedoff, src, vl); +vuint64m8_t test_vwcvtu_x_x_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_x_v_u64m8_mu(vm, vd, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vwcvtu\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vwmacc.c b/auto-generated/policy_funcs/gnu-api-tests/vwmacc.c index 57cbe00e5..3ee0e9bca 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vwmacc.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vwmacc.c @@ -126,364 +126,364 @@ vint64m8_t test_vwmacc_vx_i64m8_tu(vint64m8_t vd, int32_t rs1, vint32m4_t vs2, s return __riscv_vwmacc_vx_i64m8_tu(vd, rs1, vs2, vl); } -vint16mf4_t test_vwmacc_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i16mf4_tum(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vwmacc_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i16mf4_tum(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vwmacc_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i16mf4_tum(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vwmacc_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i16mf4_tum(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vwmacc_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i16mf2_tum(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vwmacc_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i16mf2_tum(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vwmacc_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i16mf2_tum(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vwmacc_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i16mf2_tum(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vwmacc_vv_i16m1_tum(vbool16_t mask, vint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i16m1_tum(mask, vd, vs1, vs2, vl); +vint16m1_t test_vwmacc_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i16m1_tum(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vwmacc_vx_i16m1_tum(vbool16_t mask, vint16m1_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i16m1_tum(mask, vd, rs1, vs2, vl); +vint16m1_t test_vwmacc_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i16m1_tum(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vwmacc_vv_i16m2_tum(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i16m2_tum(mask, vd, vs1, vs2, vl); +vint16m2_t test_vwmacc_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i16m2_tum(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vwmacc_vx_i16m2_tum(vbool8_t mask, vint16m2_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i16m2_tum(mask, vd, rs1, vs2, vl); +vint16m2_t test_vwmacc_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i16m2_tum(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vwmacc_vv_i16m4_tum(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i16m4_tum(mask, vd, vs1, vs2, vl); +vint16m4_t test_vwmacc_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i16m4_tum(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vwmacc_vx_i16m4_tum(vbool4_t mask, vint16m4_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i16m4_tum(mask, vd, rs1, vs2, vl); +vint16m4_t test_vwmacc_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i16m4_tum(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vwmacc_vv_i16m8_tum(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i16m8_tum(mask, vd, vs1, vs2, vl); +vint16m8_t test_vwmacc_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i16m8_tum(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vwmacc_vx_i16m8_tum(vbool2_t mask, vint16m8_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i16m8_tum(mask, vd, rs1, vs2, vl); +vint16m8_t test_vwmacc_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i16m8_tum(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vwmacc_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i32mf2_tum(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vwmacc_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i32mf2_tum(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vwmacc_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i32mf2_tum(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vwmacc_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i32mf2_tum(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vwmacc_vv_i32m1_tum(vbool32_t mask, vint32m1_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i32m1_tum(mask, vd, vs1, vs2, vl); +vint32m1_t test_vwmacc_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i32m1_tum(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vwmacc_vx_i32m1_tum(vbool32_t mask, vint32m1_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i32m1_tum(mask, vd, rs1, vs2, vl); +vint32m1_t test_vwmacc_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i32m1_tum(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vwmacc_vv_i32m2_tum(vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i32m2_tum(mask, vd, vs1, vs2, vl); +vint32m2_t test_vwmacc_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i32m2_tum(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vwmacc_vx_i32m2_tum(vbool16_t mask, vint32m2_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i32m2_tum(mask, vd, rs1, vs2, vl); +vint32m2_t test_vwmacc_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i32m2_tum(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vwmacc_vv_i32m4_tum(vbool8_t mask, vint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i32m4_tum(mask, vd, vs1, vs2, vl); +vint32m4_t test_vwmacc_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i32m4_tum(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vwmacc_vx_i32m4_tum(vbool8_t mask, vint32m4_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i32m4_tum(mask, vd, rs1, vs2, vl); +vint32m4_t test_vwmacc_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i32m4_tum(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vwmacc_vv_i32m8_tum(vbool4_t mask, vint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i32m8_tum(mask, vd, vs1, vs2, vl); +vint32m8_t test_vwmacc_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i32m8_tum(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vwmacc_vx_i32m8_tum(vbool4_t mask, vint32m8_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i32m8_tum(mask, vd, rs1, vs2, vl); +vint32m8_t test_vwmacc_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i32m8_tum(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vwmacc_vv_i64m1_tum(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i64m1_tum(mask, vd, vs1, vs2, vl); +vint64m1_t test_vwmacc_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i64m1_tum(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vwmacc_vx_i64m1_tum(vbool64_t mask, vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i64m1_tum(mask, vd, rs1, vs2, vl); +vint64m1_t test_vwmacc_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i64m1_tum(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vwmacc_vv_i64m2_tum(vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i64m2_tum(mask, vd, vs1, vs2, vl); +vint64m2_t test_vwmacc_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i64m2_tum(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vwmacc_vx_i64m2_tum(vbool32_t mask, vint64m2_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i64m2_tum(mask, vd, rs1, vs2, vl); +vint64m2_t test_vwmacc_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i64m2_tum(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vwmacc_vv_i64m4_tum(vbool16_t mask, vint64m4_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i64m4_tum(mask, vd, vs1, vs2, vl); +vint64m4_t test_vwmacc_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i64m4_tum(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vwmacc_vx_i64m4_tum(vbool16_t mask, vint64m4_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i64m4_tum(mask, vd, rs1, vs2, vl); +vint64m4_t test_vwmacc_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i64m4_tum(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vwmacc_vv_i64m8_tum(vbool8_t mask, vint64m8_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i64m8_tum(mask, vd, vs1, vs2, vl); +vint64m8_t test_vwmacc_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i64m8_tum(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vwmacc_vx_i64m8_tum(vbool8_t mask, vint64m8_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i64m8_tum(mask, vd, rs1, vs2, vl); +vint64m8_t test_vwmacc_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i64m8_tum(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vwmacc_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i16mf4_tumu(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vwmacc_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i16mf4_tumu(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vwmacc_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i16mf4_tumu(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vwmacc_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i16mf4_tumu(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vwmacc_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i16mf2_tumu(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vwmacc_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i16mf2_tumu(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vwmacc_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i16mf2_tumu(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vwmacc_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i16mf2_tumu(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vwmacc_vv_i16m1_tumu(vbool16_t mask, vint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i16m1_tumu(mask, vd, vs1, vs2, vl); +vint16m1_t test_vwmacc_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i16m1_tumu(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vwmacc_vx_i16m1_tumu(vbool16_t mask, vint16m1_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i16m1_tumu(mask, vd, rs1, vs2, vl); +vint16m1_t test_vwmacc_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i16m1_tumu(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vwmacc_vv_i16m2_tumu(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i16m2_tumu(mask, vd, vs1, vs2, vl); +vint16m2_t test_vwmacc_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i16m2_tumu(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vwmacc_vx_i16m2_tumu(vbool8_t mask, vint16m2_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i16m2_tumu(mask, vd, rs1, vs2, vl); +vint16m2_t test_vwmacc_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i16m2_tumu(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vwmacc_vv_i16m4_tumu(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i16m4_tumu(mask, vd, vs1, vs2, vl); +vint16m4_t test_vwmacc_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i16m4_tumu(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vwmacc_vx_i16m4_tumu(vbool4_t mask, vint16m4_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i16m4_tumu(mask, vd, rs1, vs2, vl); +vint16m4_t test_vwmacc_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i16m4_tumu(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vwmacc_vv_i16m8_tumu(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i16m8_tumu(mask, vd, vs1, vs2, vl); +vint16m8_t test_vwmacc_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i16m8_tumu(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vwmacc_vx_i16m8_tumu(vbool2_t mask, vint16m8_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i16m8_tumu(mask, vd, rs1, vs2, vl); +vint16m8_t test_vwmacc_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i16m8_tumu(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vwmacc_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i32mf2_tumu(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vwmacc_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i32mf2_tumu(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vwmacc_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i32mf2_tumu(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vwmacc_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i32mf2_tumu(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vwmacc_vv_i32m1_tumu(vbool32_t mask, vint32m1_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i32m1_tumu(mask, vd, vs1, vs2, vl); +vint32m1_t test_vwmacc_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i32m1_tumu(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vwmacc_vx_i32m1_tumu(vbool32_t mask, vint32m1_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i32m1_tumu(mask, vd, rs1, vs2, vl); +vint32m1_t test_vwmacc_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i32m1_tumu(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vwmacc_vv_i32m2_tumu(vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i32m2_tumu(mask, vd, vs1, vs2, vl); +vint32m2_t test_vwmacc_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i32m2_tumu(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vwmacc_vx_i32m2_tumu(vbool16_t mask, vint32m2_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i32m2_tumu(mask, vd, rs1, vs2, vl); +vint32m2_t test_vwmacc_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i32m2_tumu(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vwmacc_vv_i32m4_tumu(vbool8_t mask, vint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i32m4_tumu(mask, vd, vs1, vs2, vl); +vint32m4_t test_vwmacc_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i32m4_tumu(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vwmacc_vx_i32m4_tumu(vbool8_t mask, vint32m4_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i32m4_tumu(mask, vd, rs1, vs2, vl); +vint32m4_t test_vwmacc_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i32m4_tumu(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vwmacc_vv_i32m8_tumu(vbool4_t mask, vint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i32m8_tumu(mask, vd, vs1, vs2, vl); +vint32m8_t test_vwmacc_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i32m8_tumu(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vwmacc_vx_i32m8_tumu(vbool4_t mask, vint32m8_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i32m8_tumu(mask, vd, rs1, vs2, vl); +vint32m8_t test_vwmacc_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i32m8_tumu(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vwmacc_vv_i64m1_tumu(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i64m1_tumu(mask, vd, vs1, vs2, vl); +vint64m1_t test_vwmacc_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i64m1_tumu(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vwmacc_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i64m1_tumu(mask, vd, rs1, vs2, vl); +vint64m1_t test_vwmacc_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i64m1_tumu(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vwmacc_vv_i64m2_tumu(vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i64m2_tumu(mask, vd, vs1, vs2, vl); +vint64m2_t test_vwmacc_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i64m2_tumu(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vwmacc_vx_i64m2_tumu(vbool32_t mask, vint64m2_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i64m2_tumu(mask, vd, rs1, vs2, vl); +vint64m2_t test_vwmacc_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i64m2_tumu(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vwmacc_vv_i64m4_tumu(vbool16_t mask, vint64m4_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i64m4_tumu(mask, vd, vs1, vs2, vl); +vint64m4_t test_vwmacc_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i64m4_tumu(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vwmacc_vx_i64m4_tumu(vbool16_t mask, vint64m4_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i64m4_tumu(mask, vd, rs1, vs2, vl); +vint64m4_t test_vwmacc_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i64m4_tumu(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vwmacc_vv_i64m8_tumu(vbool8_t mask, vint64m8_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i64m8_tumu(mask, vd, vs1, vs2, vl); +vint64m8_t test_vwmacc_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i64m8_tumu(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vwmacc_vx_i64m8_tumu(vbool8_t mask, vint64m8_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i64m8_tumu(mask, vd, rs1, vs2, vl); +vint64m8_t test_vwmacc_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i64m8_tumu(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vwmacc_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i16mf4_mu(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vwmacc_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i16mf4_mu(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vwmacc_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i16mf4_mu(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vwmacc_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i16mf4_mu(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vwmacc_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i16mf2_mu(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vwmacc_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i16mf2_mu(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vwmacc_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i16mf2_mu(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vwmacc_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i16mf2_mu(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vwmacc_vv_i16m1_mu(vbool16_t mask, vint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i16m1_mu(mask, vd, vs1, vs2, vl); +vint16m1_t test_vwmacc_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i16m1_mu(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vwmacc_vx_i16m1_mu(vbool16_t mask, vint16m1_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i16m1_mu(mask, vd, rs1, vs2, vl); +vint16m1_t test_vwmacc_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i16m1_mu(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vwmacc_vv_i16m2_mu(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i16m2_mu(mask, vd, vs1, vs2, vl); +vint16m2_t test_vwmacc_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i16m2_mu(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vwmacc_vx_i16m2_mu(vbool8_t mask, vint16m2_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i16m2_mu(mask, vd, rs1, vs2, vl); +vint16m2_t test_vwmacc_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i16m2_mu(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vwmacc_vv_i16m4_mu(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i16m4_mu(mask, vd, vs1, vs2, vl); +vint16m4_t test_vwmacc_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i16m4_mu(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vwmacc_vx_i16m4_mu(vbool4_t mask, vint16m4_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i16m4_mu(mask, vd, rs1, vs2, vl); +vint16m4_t test_vwmacc_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i16m4_mu(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vwmacc_vv_i16m8_mu(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i16m8_mu(mask, vd, vs1, vs2, vl); +vint16m8_t test_vwmacc_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i16m8_mu(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vwmacc_vx_i16m8_mu(vbool2_t mask, vint16m8_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i16m8_mu(mask, vd, rs1, vs2, vl); +vint16m8_t test_vwmacc_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i16m8_mu(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vwmacc_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i32mf2_mu(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vwmacc_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i32mf2_mu(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vwmacc_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i32mf2_mu(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vwmacc_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i32mf2_mu(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vwmacc_vv_i32m1_mu(vbool32_t mask, vint32m1_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i32m1_mu(mask, vd, vs1, vs2, vl); +vint32m1_t test_vwmacc_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i32m1_mu(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vwmacc_vx_i32m1_mu(vbool32_t mask, vint32m1_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i32m1_mu(mask, vd, rs1, vs2, vl); +vint32m1_t test_vwmacc_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i32m1_mu(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vwmacc_vv_i32m2_mu(vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i32m2_mu(mask, vd, vs1, vs2, vl); +vint32m2_t test_vwmacc_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i32m2_mu(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vwmacc_vx_i32m2_mu(vbool16_t mask, vint32m2_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i32m2_mu(mask, vd, rs1, vs2, vl); +vint32m2_t test_vwmacc_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i32m2_mu(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vwmacc_vv_i32m4_mu(vbool8_t mask, vint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i32m4_mu(mask, vd, vs1, vs2, vl); +vint32m4_t test_vwmacc_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i32m4_mu(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vwmacc_vx_i32m4_mu(vbool8_t mask, vint32m4_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i32m4_mu(mask, vd, rs1, vs2, vl); +vint32m4_t test_vwmacc_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i32m4_mu(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vwmacc_vv_i32m8_mu(vbool4_t mask, vint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i32m8_mu(mask, vd, vs1, vs2, vl); +vint32m8_t test_vwmacc_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i32m8_mu(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vwmacc_vx_i32m8_mu(vbool4_t mask, vint32m8_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i32m8_mu(mask, vd, rs1, vs2, vl); +vint32m8_t test_vwmacc_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i32m8_mu(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vwmacc_vv_i64m1_mu(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i64m1_mu(mask, vd, vs1, vs2, vl); +vint64m1_t test_vwmacc_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i64m1_mu(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vwmacc_vx_i64m1_mu(vbool64_t mask, vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i64m1_mu(mask, vd, rs1, vs2, vl); +vint64m1_t test_vwmacc_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i64m1_mu(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vwmacc_vv_i64m2_mu(vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i64m2_mu(mask, vd, vs1, vs2, vl); +vint64m2_t test_vwmacc_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i64m2_mu(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vwmacc_vx_i64m2_mu(vbool32_t mask, vint64m2_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i64m2_mu(mask, vd, rs1, vs2, vl); +vint64m2_t test_vwmacc_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i64m2_mu(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vwmacc_vv_i64m4_mu(vbool16_t mask, vint64m4_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i64m4_mu(mask, vd, vs1, vs2, vl); +vint64m4_t test_vwmacc_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i64m4_mu(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vwmacc_vx_i64m4_mu(vbool16_t mask, vint64m4_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i64m4_mu(mask, vd, rs1, vs2, vl); +vint64m4_t test_vwmacc_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i64m4_mu(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vwmacc_vv_i64m8_mu(vbool8_t mask, vint64m8_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return __riscv_vwmacc_vv_i64m8_mu(mask, vd, vs1, vs2, vl); +vint64m8_t test_vwmacc_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { + return __riscv_vwmacc_vv_i64m8_mu(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vwmacc_vx_i64m8_mu(vbool8_t mask, vint64m8_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vwmacc_vx_i64m8_mu(mask, vd, rs1, vs2, vl); +vint64m8_t test_vwmacc_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vwmacc_vx_i64m8_mu(vm, vd, rs1, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vwmacc\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vwmaccsu.c b/auto-generated/policy_funcs/gnu-api-tests/vwmaccsu.c index b1d7b9e21..5e8e66b2f 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vwmaccsu.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vwmaccsu.c @@ -126,364 +126,364 @@ vint64m8_t test_vwmaccsu_vx_i64m8_tu(vint64m8_t vd, int32_t rs1, vuint32m4_t vs2 return __riscv_vwmaccsu_vx_i64m8_tu(vd, rs1, vs2, vl); } -vint16mf4_t test_vwmaccsu_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, vint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i16mf4_tum(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vwmaccsu_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i16mf4_tum(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vwmaccsu_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, int8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i16mf4_tum(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vwmaccsu_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, int8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i16mf4_tum(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vwmaccsu_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, vint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i16mf2_tum(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vwmaccsu_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i16mf2_tum(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vwmaccsu_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, int8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i16mf2_tum(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vwmaccsu_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, int8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i16mf2_tum(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vwmaccsu_vv_i16m1_tum(vbool16_t mask, vint16m1_t vd, vint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i16m1_tum(mask, vd, vs1, vs2, vl); +vint16m1_t test_vwmaccsu_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i16m1_tum(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vwmaccsu_vx_i16m1_tum(vbool16_t mask, vint16m1_t vd, int8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i16m1_tum(mask, vd, rs1, vs2, vl); +vint16m1_t test_vwmaccsu_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, int8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i16m1_tum(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vwmaccsu_vv_i16m2_tum(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i16m2_tum(mask, vd, vs1, vs2, vl); +vint16m2_t test_vwmaccsu_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i16m2_tum(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vwmaccsu_vx_i16m2_tum(vbool8_t mask, vint16m2_t vd, int8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i16m2_tum(mask, vd, rs1, vs2, vl); +vint16m2_t test_vwmaccsu_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, int8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i16m2_tum(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vwmaccsu_vv_i16m4_tum(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i16m4_tum(mask, vd, vs1, vs2, vl); +vint16m4_t test_vwmaccsu_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i16m4_tum(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vwmaccsu_vx_i16m4_tum(vbool4_t mask, vint16m4_t vd, int8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i16m4_tum(mask, vd, rs1, vs2, vl); +vint16m4_t test_vwmaccsu_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, int8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i16m4_tum(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vwmaccsu_vv_i16m8_tum(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i16m8_tum(mask, vd, vs1, vs2, vl); +vint16m8_t test_vwmaccsu_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i16m8_tum(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vwmaccsu_vx_i16m8_tum(vbool2_t mask, vint16m8_t vd, int8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i16m8_tum(mask, vd, rs1, vs2, vl); +vint16m8_t test_vwmaccsu_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, int8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i16m8_tum(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vwmaccsu_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, vint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i32mf2_tum(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vwmaccsu_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i32mf2_tum(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vwmaccsu_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, int16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i32mf2_tum(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vwmaccsu_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, int16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i32mf2_tum(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vwmaccsu_vv_i32m1_tum(vbool32_t mask, vint32m1_t vd, vint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i32m1_tum(mask, vd, vs1, vs2, vl); +vint32m1_t test_vwmaccsu_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i32m1_tum(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vwmaccsu_vx_i32m1_tum(vbool32_t mask, vint32m1_t vd, int16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i32m1_tum(mask, vd, rs1, vs2, vl); +vint32m1_t test_vwmaccsu_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, int16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i32m1_tum(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vwmaccsu_vv_i32m2_tum(vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i32m2_tum(mask, vd, vs1, vs2, vl); +vint32m2_t test_vwmaccsu_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i32m2_tum(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vwmaccsu_vx_i32m2_tum(vbool16_t mask, vint32m2_t vd, int16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i32m2_tum(mask, vd, rs1, vs2, vl); +vint32m2_t test_vwmaccsu_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, int16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i32m2_tum(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vwmaccsu_vv_i32m4_tum(vbool8_t mask, vint32m4_t vd, vint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i32m4_tum(mask, vd, vs1, vs2, vl); +vint32m4_t test_vwmaccsu_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i32m4_tum(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vwmaccsu_vx_i32m4_tum(vbool8_t mask, vint32m4_t vd, int16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i32m4_tum(mask, vd, rs1, vs2, vl); +vint32m4_t test_vwmaccsu_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, int16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i32m4_tum(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vwmaccsu_vv_i32m8_tum(vbool4_t mask, vint32m8_t vd, vint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i32m8_tum(mask, vd, vs1, vs2, vl); +vint32m8_t test_vwmaccsu_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i32m8_tum(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vwmaccsu_vx_i32m8_tum(vbool4_t mask, vint32m8_t vd, int16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i32m8_tum(mask, vd, rs1, vs2, vl); +vint32m8_t test_vwmaccsu_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, int16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i32m8_tum(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vwmaccsu_vv_i64m1_tum(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i64m1_tum(mask, vd, vs1, vs2, vl); +vint64m1_t test_vwmaccsu_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i64m1_tum(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vwmaccsu_vx_i64m1_tum(vbool64_t mask, vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i64m1_tum(mask, vd, rs1, vs2, vl); +vint64m1_t test_vwmaccsu_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i64m1_tum(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vwmaccsu_vv_i64m2_tum(vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i64m2_tum(mask, vd, vs1, vs2, vl); +vint64m2_t test_vwmaccsu_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i64m2_tum(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vwmaccsu_vx_i64m2_tum(vbool32_t mask, vint64m2_t vd, int32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i64m2_tum(mask, vd, rs1, vs2, vl); +vint64m2_t test_vwmaccsu_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, int32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i64m2_tum(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vwmaccsu_vv_i64m4_tum(vbool16_t mask, vint64m4_t vd, vint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i64m4_tum(mask, vd, vs1, vs2, vl); +vint64m4_t test_vwmaccsu_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i64m4_tum(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vwmaccsu_vx_i64m4_tum(vbool16_t mask, vint64m4_t vd, int32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i64m4_tum(mask, vd, rs1, vs2, vl); +vint64m4_t test_vwmaccsu_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, int32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i64m4_tum(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vwmaccsu_vv_i64m8_tum(vbool8_t mask, vint64m8_t vd, vint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i64m8_tum(mask, vd, vs1, vs2, vl); +vint64m8_t test_vwmaccsu_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i64m8_tum(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vwmaccsu_vx_i64m8_tum(vbool8_t mask, vint64m8_t vd, int32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i64m8_tum(mask, vd, rs1, vs2, vl); +vint64m8_t test_vwmaccsu_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, int32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i64m8_tum(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vwmaccsu_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, vint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i16mf4_tumu(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vwmaccsu_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i16mf4_tumu(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vwmaccsu_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, int8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i16mf4_tumu(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vwmaccsu_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, int8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i16mf4_tumu(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vwmaccsu_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, vint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i16mf2_tumu(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vwmaccsu_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i16mf2_tumu(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vwmaccsu_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, int8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i16mf2_tumu(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vwmaccsu_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, int8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i16mf2_tumu(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vwmaccsu_vv_i16m1_tumu(vbool16_t mask, vint16m1_t vd, vint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i16m1_tumu(mask, vd, vs1, vs2, vl); +vint16m1_t test_vwmaccsu_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i16m1_tumu(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vwmaccsu_vx_i16m1_tumu(vbool16_t mask, vint16m1_t vd, int8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i16m1_tumu(mask, vd, rs1, vs2, vl); +vint16m1_t test_vwmaccsu_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, int8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i16m1_tumu(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vwmaccsu_vv_i16m2_tumu(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i16m2_tumu(mask, vd, vs1, vs2, vl); +vint16m2_t test_vwmaccsu_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i16m2_tumu(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vwmaccsu_vx_i16m2_tumu(vbool8_t mask, vint16m2_t vd, int8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i16m2_tumu(mask, vd, rs1, vs2, vl); +vint16m2_t test_vwmaccsu_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, int8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i16m2_tumu(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vwmaccsu_vv_i16m4_tumu(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i16m4_tumu(mask, vd, vs1, vs2, vl); +vint16m4_t test_vwmaccsu_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i16m4_tumu(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vwmaccsu_vx_i16m4_tumu(vbool4_t mask, vint16m4_t vd, int8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i16m4_tumu(mask, vd, rs1, vs2, vl); +vint16m4_t test_vwmaccsu_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, int8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i16m4_tumu(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vwmaccsu_vv_i16m8_tumu(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i16m8_tumu(mask, vd, vs1, vs2, vl); +vint16m8_t test_vwmaccsu_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i16m8_tumu(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vwmaccsu_vx_i16m8_tumu(vbool2_t mask, vint16m8_t vd, int8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i16m8_tumu(mask, vd, rs1, vs2, vl); +vint16m8_t test_vwmaccsu_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, int8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i16m8_tumu(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vwmaccsu_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, vint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i32mf2_tumu(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vwmaccsu_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i32mf2_tumu(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vwmaccsu_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, int16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i32mf2_tumu(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vwmaccsu_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, int16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i32mf2_tumu(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vwmaccsu_vv_i32m1_tumu(vbool32_t mask, vint32m1_t vd, vint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i32m1_tumu(mask, vd, vs1, vs2, vl); +vint32m1_t test_vwmaccsu_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i32m1_tumu(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vwmaccsu_vx_i32m1_tumu(vbool32_t mask, vint32m1_t vd, int16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i32m1_tumu(mask, vd, rs1, vs2, vl); +vint32m1_t test_vwmaccsu_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, int16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i32m1_tumu(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vwmaccsu_vv_i32m2_tumu(vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i32m2_tumu(mask, vd, vs1, vs2, vl); +vint32m2_t test_vwmaccsu_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i32m2_tumu(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vwmaccsu_vx_i32m2_tumu(vbool16_t mask, vint32m2_t vd, int16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i32m2_tumu(mask, vd, rs1, vs2, vl); +vint32m2_t test_vwmaccsu_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, int16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i32m2_tumu(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vwmaccsu_vv_i32m4_tumu(vbool8_t mask, vint32m4_t vd, vint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i32m4_tumu(mask, vd, vs1, vs2, vl); +vint32m4_t test_vwmaccsu_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i32m4_tumu(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vwmaccsu_vx_i32m4_tumu(vbool8_t mask, vint32m4_t vd, int16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i32m4_tumu(mask, vd, rs1, vs2, vl); +vint32m4_t test_vwmaccsu_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, int16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i32m4_tumu(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vwmaccsu_vv_i32m8_tumu(vbool4_t mask, vint32m8_t vd, vint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i32m8_tumu(mask, vd, vs1, vs2, vl); +vint32m8_t test_vwmaccsu_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i32m8_tumu(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vwmaccsu_vx_i32m8_tumu(vbool4_t mask, vint32m8_t vd, int16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i32m8_tumu(mask, vd, rs1, vs2, vl); +vint32m8_t test_vwmaccsu_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, int16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i32m8_tumu(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vwmaccsu_vv_i64m1_tumu(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i64m1_tumu(mask, vd, vs1, vs2, vl); +vint64m1_t test_vwmaccsu_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i64m1_tumu(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vwmaccsu_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i64m1_tumu(mask, vd, rs1, vs2, vl); +vint64m1_t test_vwmaccsu_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i64m1_tumu(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vwmaccsu_vv_i64m2_tumu(vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i64m2_tumu(mask, vd, vs1, vs2, vl); +vint64m2_t test_vwmaccsu_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i64m2_tumu(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vwmaccsu_vx_i64m2_tumu(vbool32_t mask, vint64m2_t vd, int32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i64m2_tumu(mask, vd, rs1, vs2, vl); +vint64m2_t test_vwmaccsu_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, int32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i64m2_tumu(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vwmaccsu_vv_i64m4_tumu(vbool16_t mask, vint64m4_t vd, vint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i64m4_tumu(mask, vd, vs1, vs2, vl); +vint64m4_t test_vwmaccsu_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i64m4_tumu(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vwmaccsu_vx_i64m4_tumu(vbool16_t mask, vint64m4_t vd, int32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i64m4_tumu(mask, vd, rs1, vs2, vl); +vint64m4_t test_vwmaccsu_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, int32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i64m4_tumu(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vwmaccsu_vv_i64m8_tumu(vbool8_t mask, vint64m8_t vd, vint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i64m8_tumu(mask, vd, vs1, vs2, vl); +vint64m8_t test_vwmaccsu_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i64m8_tumu(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vwmaccsu_vx_i64m8_tumu(vbool8_t mask, vint64m8_t vd, int32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i64m8_tumu(mask, vd, rs1, vs2, vl); +vint64m8_t test_vwmaccsu_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, int32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i64m8_tumu(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vwmaccsu_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, vint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i16mf4_mu(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vwmaccsu_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i16mf4_mu(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vwmaccsu_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, int8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i16mf4_mu(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vwmaccsu_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, int8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i16mf4_mu(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vwmaccsu_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, vint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i16mf2_mu(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vwmaccsu_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i16mf2_mu(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vwmaccsu_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, int8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i16mf2_mu(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vwmaccsu_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, int8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i16mf2_mu(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vwmaccsu_vv_i16m1_mu(vbool16_t mask, vint16m1_t vd, vint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i16m1_mu(mask, vd, vs1, vs2, vl); +vint16m1_t test_vwmaccsu_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i16m1_mu(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vwmaccsu_vx_i16m1_mu(vbool16_t mask, vint16m1_t vd, int8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i16m1_mu(mask, vd, rs1, vs2, vl); +vint16m1_t test_vwmaccsu_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, int8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i16m1_mu(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vwmaccsu_vv_i16m2_mu(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i16m2_mu(mask, vd, vs1, vs2, vl); +vint16m2_t test_vwmaccsu_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i16m2_mu(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vwmaccsu_vx_i16m2_mu(vbool8_t mask, vint16m2_t vd, int8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i16m2_mu(mask, vd, rs1, vs2, vl); +vint16m2_t test_vwmaccsu_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, int8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i16m2_mu(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vwmaccsu_vv_i16m4_mu(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i16m4_mu(mask, vd, vs1, vs2, vl); +vint16m4_t test_vwmaccsu_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i16m4_mu(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vwmaccsu_vx_i16m4_mu(vbool4_t mask, vint16m4_t vd, int8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i16m4_mu(mask, vd, rs1, vs2, vl); +vint16m4_t test_vwmaccsu_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, int8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i16m4_mu(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vwmaccsu_vv_i16m8_mu(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i16m8_mu(mask, vd, vs1, vs2, vl); +vint16m8_t test_vwmaccsu_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i16m8_mu(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vwmaccsu_vx_i16m8_mu(vbool2_t mask, vint16m8_t vd, int8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i16m8_mu(mask, vd, rs1, vs2, vl); +vint16m8_t test_vwmaccsu_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, int8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i16m8_mu(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vwmaccsu_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, vint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i32mf2_mu(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vwmaccsu_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i32mf2_mu(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vwmaccsu_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, int16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i32mf2_mu(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vwmaccsu_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, int16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i32mf2_mu(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vwmaccsu_vv_i32m1_mu(vbool32_t mask, vint32m1_t vd, vint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i32m1_mu(mask, vd, vs1, vs2, vl); +vint32m1_t test_vwmaccsu_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i32m1_mu(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vwmaccsu_vx_i32m1_mu(vbool32_t mask, vint32m1_t vd, int16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i32m1_mu(mask, vd, rs1, vs2, vl); +vint32m1_t test_vwmaccsu_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, int16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i32m1_mu(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vwmaccsu_vv_i32m2_mu(vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i32m2_mu(mask, vd, vs1, vs2, vl); +vint32m2_t test_vwmaccsu_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i32m2_mu(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vwmaccsu_vx_i32m2_mu(vbool16_t mask, vint32m2_t vd, int16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i32m2_mu(mask, vd, rs1, vs2, vl); +vint32m2_t test_vwmaccsu_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, int16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i32m2_mu(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vwmaccsu_vv_i32m4_mu(vbool8_t mask, vint32m4_t vd, vint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i32m4_mu(mask, vd, vs1, vs2, vl); +vint32m4_t test_vwmaccsu_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i32m4_mu(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vwmaccsu_vx_i32m4_mu(vbool8_t mask, vint32m4_t vd, int16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i32m4_mu(mask, vd, rs1, vs2, vl); +vint32m4_t test_vwmaccsu_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, int16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i32m4_mu(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vwmaccsu_vv_i32m8_mu(vbool4_t mask, vint32m8_t vd, vint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i32m8_mu(mask, vd, vs1, vs2, vl); +vint32m8_t test_vwmaccsu_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i32m8_mu(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vwmaccsu_vx_i32m8_mu(vbool4_t mask, vint32m8_t vd, int16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i32m8_mu(mask, vd, rs1, vs2, vl); +vint32m8_t test_vwmaccsu_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, int16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i32m8_mu(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vwmaccsu_vv_i64m1_mu(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i64m1_mu(mask, vd, vs1, vs2, vl); +vint64m1_t test_vwmaccsu_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i64m1_mu(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vwmaccsu_vx_i64m1_mu(vbool64_t mask, vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i64m1_mu(mask, vd, rs1, vs2, vl); +vint64m1_t test_vwmaccsu_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i64m1_mu(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vwmaccsu_vv_i64m2_mu(vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i64m2_mu(mask, vd, vs1, vs2, vl); +vint64m2_t test_vwmaccsu_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i64m2_mu(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vwmaccsu_vx_i64m2_mu(vbool32_t mask, vint64m2_t vd, int32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i64m2_mu(mask, vd, rs1, vs2, vl); +vint64m2_t test_vwmaccsu_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, int32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i64m2_mu(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vwmaccsu_vv_i64m4_mu(vbool16_t mask, vint64m4_t vd, vint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i64m4_mu(mask, vd, vs1, vs2, vl); +vint64m4_t test_vwmaccsu_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i64m4_mu(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vwmaccsu_vx_i64m4_mu(vbool16_t mask, vint64m4_t vd, int32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i64m4_mu(mask, vd, rs1, vs2, vl); +vint64m4_t test_vwmaccsu_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, int32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i64m4_mu(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vwmaccsu_vv_i64m8_mu(vbool8_t mask, vint64m8_t vd, vint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vwmaccsu_vv_i64m8_mu(mask, vd, vs1, vs2, vl); +vint64m8_t test_vwmaccsu_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vwmaccsu_vv_i64m8_mu(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vwmaccsu_vx_i64m8_mu(vbool8_t mask, vint64m8_t vd, int32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vwmaccsu_vx_i64m8_mu(mask, vd, rs1, vs2, vl); +vint64m8_t test_vwmaccsu_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, int32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vwmaccsu_vx_i64m8_mu(vm, vd, rs1, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vwmaccsu\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vwmaccu.c b/auto-generated/policy_funcs/gnu-api-tests/vwmaccu.c index 668be8228..bc41c2159 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vwmaccu.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vwmaccu.c @@ -126,364 +126,364 @@ vuint64m8_t test_vwmaccu_vx_u64m8_tu(vuint64m8_t vd, uint32_t rs1, vuint32m4_t v return __riscv_vwmaccu_vx_u64m8_tu(vd, rs1, vs2, vl); } -vuint16mf4_t test_vwmaccu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u16mf4_tum(mask, vd, vs1, vs2, vl); +vuint16mf4_t test_vwmaccu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u16mf4_tum(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vwmaccu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u16mf4_tum(mask, vd, rs1, vs2, vl); +vuint16mf4_t test_vwmaccu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u16mf4_tum(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vwmaccu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u16mf2_tum(mask, vd, vs1, vs2, vl); +vuint16mf2_t test_vwmaccu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u16mf2_tum(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vwmaccu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u16mf2_tum(mask, vd, rs1, vs2, vl); +vuint16mf2_t test_vwmaccu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u16mf2_tum(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vwmaccu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u16m1_tum(mask, vd, vs1, vs2, vl); +vuint16m1_t test_vwmaccu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u16m1_tum(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vwmaccu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u16m1_tum(mask, vd, rs1, vs2, vl); +vuint16m1_t test_vwmaccu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u16m1_tum(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vwmaccu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u16m2_tum(mask, vd, vs1, vs2, vl); +vuint16m2_t test_vwmaccu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u16m2_tum(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vwmaccu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u16m2_tum(mask, vd, rs1, vs2, vl); +vuint16m2_t test_vwmaccu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u16m2_tum(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vwmaccu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u16m4_tum(mask, vd, vs1, vs2, vl); +vuint16m4_t test_vwmaccu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u16m4_tum(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vwmaccu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u16m4_tum(mask, vd, rs1, vs2, vl); +vuint16m4_t test_vwmaccu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u16m4_tum(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vwmaccu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u16m8_tum(mask, vd, vs1, vs2, vl); +vuint16m8_t test_vwmaccu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u16m8_tum(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vwmaccu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u16m8_tum(mask, vd, rs1, vs2, vl); +vuint16m8_t test_vwmaccu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u16m8_tum(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vwmaccu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u32mf2_tum(mask, vd, vs1, vs2, vl); +vuint32mf2_t test_vwmaccu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u32mf2_tum(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vwmaccu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u32mf2_tum(mask, vd, rs1, vs2, vl); +vuint32mf2_t test_vwmaccu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u32mf2_tum(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vwmaccu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u32m1_tum(mask, vd, vs1, vs2, vl); +vuint32m1_t test_vwmaccu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u32m1_tum(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vwmaccu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u32m1_tum(mask, vd, rs1, vs2, vl); +vuint32m1_t test_vwmaccu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u32m1_tum(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vwmaccu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u32m2_tum(mask, vd, vs1, vs2, vl); +vuint32m2_t test_vwmaccu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u32m2_tum(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vwmaccu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u32m2_tum(mask, vd, rs1, vs2, vl); +vuint32m2_t test_vwmaccu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u32m2_tum(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vwmaccu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u32m4_tum(mask, vd, vs1, vs2, vl); +vuint32m4_t test_vwmaccu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u32m4_tum(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vwmaccu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u32m4_tum(mask, vd, rs1, vs2, vl); +vuint32m4_t test_vwmaccu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u32m4_tum(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vwmaccu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u32m8_tum(mask, vd, vs1, vs2, vl); +vuint32m8_t test_vwmaccu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u32m8_tum(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vwmaccu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u32m8_tum(mask, vd, rs1, vs2, vl); +vuint32m8_t test_vwmaccu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u32m8_tum(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vwmaccu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u64m1_tum(mask, vd, vs1, vs2, vl); +vuint64m1_t test_vwmaccu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u64m1_tum(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vwmaccu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u64m1_tum(mask, vd, rs1, vs2, vl); +vuint64m1_t test_vwmaccu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u64m1_tum(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vwmaccu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u64m2_tum(mask, vd, vs1, vs2, vl); +vuint64m2_t test_vwmaccu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u64m2_tum(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vwmaccu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u64m2_tum(mask, vd, rs1, vs2, vl); +vuint64m2_t test_vwmaccu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u64m2_tum(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vwmaccu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u64m4_tum(mask, vd, vs1, vs2, vl); +vuint64m4_t test_vwmaccu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u64m4_tum(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vwmaccu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u64m4_tum(mask, vd, rs1, vs2, vl); +vuint64m4_t test_vwmaccu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u64m4_tum(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vwmaccu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u64m8_tum(mask, vd, vs1, vs2, vl); +vuint64m8_t test_vwmaccu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u64m8_tum(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vwmaccu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u64m8_tum(mask, vd, rs1, vs2, vl); +vuint64m8_t test_vwmaccu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u64m8_tum(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vwmaccu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u16mf4_tumu(mask, vd, vs1, vs2, vl); +vuint16mf4_t test_vwmaccu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u16mf4_tumu(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vwmaccu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u16mf4_tumu(mask, vd, rs1, vs2, vl); +vuint16mf4_t test_vwmaccu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u16mf4_tumu(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vwmaccu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u16mf2_tumu(mask, vd, vs1, vs2, vl); +vuint16mf2_t test_vwmaccu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u16mf2_tumu(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vwmaccu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u16mf2_tumu(mask, vd, rs1, vs2, vl); +vuint16mf2_t test_vwmaccu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u16mf2_tumu(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vwmaccu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u16m1_tumu(mask, vd, vs1, vs2, vl); +vuint16m1_t test_vwmaccu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u16m1_tumu(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vwmaccu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u16m1_tumu(mask, vd, rs1, vs2, vl); +vuint16m1_t test_vwmaccu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u16m1_tumu(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vwmaccu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u16m2_tumu(mask, vd, vs1, vs2, vl); +vuint16m2_t test_vwmaccu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u16m2_tumu(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vwmaccu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u16m2_tumu(mask, vd, rs1, vs2, vl); +vuint16m2_t test_vwmaccu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u16m2_tumu(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vwmaccu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u16m4_tumu(mask, vd, vs1, vs2, vl); +vuint16m4_t test_vwmaccu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u16m4_tumu(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vwmaccu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u16m4_tumu(mask, vd, rs1, vs2, vl); +vuint16m4_t test_vwmaccu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u16m4_tumu(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vwmaccu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u16m8_tumu(mask, vd, vs1, vs2, vl); +vuint16m8_t test_vwmaccu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u16m8_tumu(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vwmaccu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u16m8_tumu(mask, vd, rs1, vs2, vl); +vuint16m8_t test_vwmaccu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u16m8_tumu(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vwmaccu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u32mf2_tumu(mask, vd, vs1, vs2, vl); +vuint32mf2_t test_vwmaccu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u32mf2_tumu(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vwmaccu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u32mf2_tumu(mask, vd, rs1, vs2, vl); +vuint32mf2_t test_vwmaccu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u32mf2_tumu(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vwmaccu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u32m1_tumu(mask, vd, vs1, vs2, vl); +vuint32m1_t test_vwmaccu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u32m1_tumu(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vwmaccu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u32m1_tumu(mask, vd, rs1, vs2, vl); +vuint32m1_t test_vwmaccu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u32m1_tumu(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vwmaccu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u32m2_tumu(mask, vd, vs1, vs2, vl); +vuint32m2_t test_vwmaccu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u32m2_tumu(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vwmaccu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u32m2_tumu(mask, vd, rs1, vs2, vl); +vuint32m2_t test_vwmaccu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u32m2_tumu(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vwmaccu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u32m4_tumu(mask, vd, vs1, vs2, vl); +vuint32m4_t test_vwmaccu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u32m4_tumu(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vwmaccu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u32m4_tumu(mask, vd, rs1, vs2, vl); +vuint32m4_t test_vwmaccu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u32m4_tumu(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vwmaccu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u32m8_tumu(mask, vd, vs1, vs2, vl); +vuint32m8_t test_vwmaccu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u32m8_tumu(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vwmaccu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u32m8_tumu(mask, vd, rs1, vs2, vl); +vuint32m8_t test_vwmaccu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u32m8_tumu(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vwmaccu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u64m1_tumu(mask, vd, vs1, vs2, vl); +vuint64m1_t test_vwmaccu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u64m1_tumu(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vwmaccu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u64m1_tumu(mask, vd, rs1, vs2, vl); +vuint64m1_t test_vwmaccu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u64m1_tumu(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vwmaccu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u64m2_tumu(mask, vd, vs1, vs2, vl); +vuint64m2_t test_vwmaccu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u64m2_tumu(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vwmaccu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u64m2_tumu(mask, vd, rs1, vs2, vl); +vuint64m2_t test_vwmaccu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u64m2_tumu(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vwmaccu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u64m4_tumu(mask, vd, vs1, vs2, vl); +vuint64m4_t test_vwmaccu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u64m4_tumu(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vwmaccu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u64m4_tumu(mask, vd, rs1, vs2, vl); +vuint64m4_t test_vwmaccu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u64m4_tumu(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vwmaccu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u64m8_tumu(mask, vd, vs1, vs2, vl); +vuint64m8_t test_vwmaccu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u64m8_tumu(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vwmaccu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u64m8_tumu(mask, vd, rs1, vs2, vl); +vuint64m8_t test_vwmaccu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u64m8_tumu(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vwmaccu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u16mf4_mu(mask, vd, vs1, vs2, vl); +vuint16mf4_t test_vwmaccu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u16mf4_mu(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vwmaccu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u16mf4_mu(mask, vd, rs1, vs2, vl); +vuint16mf4_t test_vwmaccu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u16mf4_mu(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vwmaccu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u16mf2_mu(mask, vd, vs1, vs2, vl); +vuint16mf2_t test_vwmaccu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u16mf2_mu(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vwmaccu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u16mf2_mu(mask, vd, rs1, vs2, vl); +vuint16mf2_t test_vwmaccu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u16mf2_mu(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vwmaccu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u16m1_mu(mask, vd, vs1, vs2, vl); +vuint16m1_t test_vwmaccu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u16m1_mu(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vwmaccu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u16m1_mu(mask, vd, rs1, vs2, vl); +vuint16m1_t test_vwmaccu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u16m1_mu(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vwmaccu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u16m2_mu(mask, vd, vs1, vs2, vl); +vuint16m2_t test_vwmaccu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u16m2_mu(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vwmaccu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u16m2_mu(mask, vd, rs1, vs2, vl); +vuint16m2_t test_vwmaccu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u16m2_mu(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vwmaccu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u16m4_mu(mask, vd, vs1, vs2, vl); +vuint16m4_t test_vwmaccu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u16m4_mu(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vwmaccu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u16m4_mu(mask, vd, rs1, vs2, vl); +vuint16m4_t test_vwmaccu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u16m4_mu(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vwmaccu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u16m8_mu(mask, vd, vs1, vs2, vl); +vuint16m8_t test_vwmaccu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u16m8_mu(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vwmaccu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u16m8_mu(mask, vd, rs1, vs2, vl); +vuint16m8_t test_vwmaccu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u16m8_mu(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vwmaccu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u32mf2_mu(mask, vd, vs1, vs2, vl); +vuint32mf2_t test_vwmaccu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u32mf2_mu(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vwmaccu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u32mf2_mu(mask, vd, rs1, vs2, vl); +vuint32mf2_t test_vwmaccu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u32mf2_mu(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vwmaccu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u32m1_mu(mask, vd, vs1, vs2, vl); +vuint32m1_t test_vwmaccu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u32m1_mu(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vwmaccu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u32m1_mu(mask, vd, rs1, vs2, vl); +vuint32m1_t test_vwmaccu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u32m1_mu(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vwmaccu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u32m2_mu(mask, vd, vs1, vs2, vl); +vuint32m2_t test_vwmaccu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u32m2_mu(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vwmaccu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u32m2_mu(mask, vd, rs1, vs2, vl); +vuint32m2_t test_vwmaccu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u32m2_mu(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vwmaccu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u32m4_mu(mask, vd, vs1, vs2, vl); +vuint32m4_t test_vwmaccu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u32m4_mu(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vwmaccu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u32m4_mu(mask, vd, rs1, vs2, vl); +vuint32m4_t test_vwmaccu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u32m4_mu(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vwmaccu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u32m8_mu(mask, vd, vs1, vs2, vl); +vuint32m8_t test_vwmaccu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u32m8_mu(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vwmaccu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u32m8_mu(mask, vd, rs1, vs2, vl); +vuint32m8_t test_vwmaccu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u32m8_mu(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vwmaccu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u64m1_mu(mask, vd, vs1, vs2, vl); +vuint64m1_t test_vwmaccu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u64m1_mu(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vwmaccu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u64m1_mu(mask, vd, rs1, vs2, vl); +vuint64m1_t test_vwmaccu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u64m1_mu(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vwmaccu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u64m2_mu(mask, vd, vs1, vs2, vl); +vuint64m2_t test_vwmaccu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u64m2_mu(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vwmaccu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u64m2_mu(mask, vd, rs1, vs2, vl); +vuint64m2_t test_vwmaccu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u64m2_mu(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vwmaccu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u64m4_mu(mask, vd, vs1, vs2, vl); +vuint64m4_t test_vwmaccu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u64m4_mu(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vwmaccu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u64m4_mu(mask, vd, rs1, vs2, vl); +vuint64m4_t test_vwmaccu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u64m4_mu(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vwmaccu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vwmaccu_vv_u64m8_mu(mask, vd, vs1, vs2, vl); +vuint64m8_t test_vwmaccu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vwmaccu_vv_u64m8_mu(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vwmaccu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vwmaccu_vx_u64m8_mu(mask, vd, rs1, vs2, vl); +vuint64m8_t test_vwmaccu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vwmaccu_vx_u64m8_mu(vm, vd, rs1, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vwmaccu\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vwmaccus.c b/auto-generated/policy_funcs/gnu-api-tests/vwmaccus.c index 4c55a5ca2..d97d6d1ef 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vwmaccus.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vwmaccus.c @@ -66,184 +66,184 @@ vint64m8_t test_vwmaccus_vx_i64m8_tu(vint64m8_t vd, uint32_t rs1, vint32m4_t vs2 return __riscv_vwmaccus_vx_i64m8_tu(vd, rs1, vs2, vl); } -vint16mf4_t test_vwmaccus_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, uint8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i16mf4_tum(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vwmaccus_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, uint8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i16mf4_tum(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vwmaccus_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, uint8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i16mf2_tum(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vwmaccus_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, uint8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i16mf2_tum(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vwmaccus_vx_i16m1_tum(vbool16_t mask, vint16m1_t vd, uint8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i16m1_tum(mask, vd, rs1, vs2, vl); +vint16m1_t test_vwmaccus_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, uint8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i16m1_tum(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vwmaccus_vx_i16m2_tum(vbool8_t mask, vint16m2_t vd, uint8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i16m2_tum(mask, vd, rs1, vs2, vl); +vint16m2_t test_vwmaccus_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, uint8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i16m2_tum(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vwmaccus_vx_i16m4_tum(vbool4_t mask, vint16m4_t vd, uint8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i16m4_tum(mask, vd, rs1, vs2, vl); +vint16m4_t test_vwmaccus_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, uint8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i16m4_tum(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vwmaccus_vx_i16m8_tum(vbool2_t mask, vint16m8_t vd, uint8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i16m8_tum(mask, vd, rs1, vs2, vl); +vint16m8_t test_vwmaccus_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, uint8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i16m8_tum(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vwmaccus_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, uint16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i32mf2_tum(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vwmaccus_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, uint16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i32mf2_tum(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vwmaccus_vx_i32m1_tum(vbool32_t mask, vint32m1_t vd, uint16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i32m1_tum(mask, vd, rs1, vs2, vl); +vint32m1_t test_vwmaccus_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, uint16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i32m1_tum(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vwmaccus_vx_i32m2_tum(vbool16_t mask, vint32m2_t vd, uint16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i32m2_tum(mask, vd, rs1, vs2, vl); +vint32m2_t test_vwmaccus_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, uint16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i32m2_tum(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vwmaccus_vx_i32m4_tum(vbool8_t mask, vint32m4_t vd, uint16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i32m4_tum(mask, vd, rs1, vs2, vl); +vint32m4_t test_vwmaccus_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, uint16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i32m4_tum(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vwmaccus_vx_i32m8_tum(vbool4_t mask, vint32m8_t vd, uint16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i32m8_tum(mask, vd, rs1, vs2, vl); +vint32m8_t test_vwmaccus_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, uint16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i32m8_tum(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vwmaccus_vx_i64m1_tum(vbool64_t mask, vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i64m1_tum(mask, vd, rs1, vs2, vl); +vint64m1_t test_vwmaccus_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i64m1_tum(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vwmaccus_vx_i64m2_tum(vbool32_t mask, vint64m2_t vd, uint32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i64m2_tum(mask, vd, rs1, vs2, vl); +vint64m2_t test_vwmaccus_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, uint32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i64m2_tum(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vwmaccus_vx_i64m4_tum(vbool16_t mask, vint64m4_t vd, uint32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i64m4_tum(mask, vd, rs1, vs2, vl); +vint64m4_t test_vwmaccus_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, uint32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i64m4_tum(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vwmaccus_vx_i64m8_tum(vbool8_t mask, vint64m8_t vd, uint32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i64m8_tum(mask, vd, rs1, vs2, vl); +vint64m8_t test_vwmaccus_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, uint32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i64m8_tum(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vwmaccus_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, uint8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i16mf4_tumu(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vwmaccus_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, uint8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i16mf4_tumu(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vwmaccus_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, uint8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i16mf2_tumu(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vwmaccus_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, uint8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i16mf2_tumu(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vwmaccus_vx_i16m1_tumu(vbool16_t mask, vint16m1_t vd, uint8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i16m1_tumu(mask, vd, rs1, vs2, vl); +vint16m1_t test_vwmaccus_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, uint8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i16m1_tumu(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vwmaccus_vx_i16m2_tumu(vbool8_t mask, vint16m2_t vd, uint8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i16m2_tumu(mask, vd, rs1, vs2, vl); +vint16m2_t test_vwmaccus_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, uint8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i16m2_tumu(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vwmaccus_vx_i16m4_tumu(vbool4_t mask, vint16m4_t vd, uint8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i16m4_tumu(mask, vd, rs1, vs2, vl); +vint16m4_t test_vwmaccus_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, uint8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i16m4_tumu(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vwmaccus_vx_i16m8_tumu(vbool2_t mask, vint16m8_t vd, uint8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i16m8_tumu(mask, vd, rs1, vs2, vl); +vint16m8_t test_vwmaccus_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, uint8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i16m8_tumu(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vwmaccus_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, uint16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i32mf2_tumu(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vwmaccus_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, uint16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i32mf2_tumu(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vwmaccus_vx_i32m1_tumu(vbool32_t mask, vint32m1_t vd, uint16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i32m1_tumu(mask, vd, rs1, vs2, vl); +vint32m1_t test_vwmaccus_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, uint16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i32m1_tumu(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vwmaccus_vx_i32m2_tumu(vbool16_t mask, vint32m2_t vd, uint16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i32m2_tumu(mask, vd, rs1, vs2, vl); +vint32m2_t test_vwmaccus_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, uint16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i32m2_tumu(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vwmaccus_vx_i32m4_tumu(vbool8_t mask, vint32m4_t vd, uint16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i32m4_tumu(mask, vd, rs1, vs2, vl); +vint32m4_t test_vwmaccus_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, uint16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i32m4_tumu(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vwmaccus_vx_i32m8_tumu(vbool4_t mask, vint32m8_t vd, uint16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i32m8_tumu(mask, vd, rs1, vs2, vl); +vint32m8_t test_vwmaccus_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, uint16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i32m8_tumu(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vwmaccus_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i64m1_tumu(mask, vd, rs1, vs2, vl); +vint64m1_t test_vwmaccus_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i64m1_tumu(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vwmaccus_vx_i64m2_tumu(vbool32_t mask, vint64m2_t vd, uint32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i64m2_tumu(mask, vd, rs1, vs2, vl); +vint64m2_t test_vwmaccus_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, uint32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i64m2_tumu(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vwmaccus_vx_i64m4_tumu(vbool16_t mask, vint64m4_t vd, uint32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i64m4_tumu(mask, vd, rs1, vs2, vl); +vint64m4_t test_vwmaccus_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, uint32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i64m4_tumu(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vwmaccus_vx_i64m8_tumu(vbool8_t mask, vint64m8_t vd, uint32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i64m8_tumu(mask, vd, rs1, vs2, vl); +vint64m8_t test_vwmaccus_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, uint32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i64m8_tumu(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vwmaccus_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, uint8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i16mf4_mu(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vwmaccus_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, uint8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i16mf4_mu(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vwmaccus_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, uint8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i16mf2_mu(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vwmaccus_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, uint8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i16mf2_mu(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vwmaccus_vx_i16m1_mu(vbool16_t mask, vint16m1_t vd, uint8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i16m1_mu(mask, vd, rs1, vs2, vl); +vint16m1_t test_vwmaccus_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, uint8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i16m1_mu(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vwmaccus_vx_i16m2_mu(vbool8_t mask, vint16m2_t vd, uint8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i16m2_mu(mask, vd, rs1, vs2, vl); +vint16m2_t test_vwmaccus_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, uint8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i16m2_mu(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vwmaccus_vx_i16m4_mu(vbool4_t mask, vint16m4_t vd, uint8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i16m4_mu(mask, vd, rs1, vs2, vl); +vint16m4_t test_vwmaccus_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, uint8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i16m4_mu(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vwmaccus_vx_i16m8_mu(vbool2_t mask, vint16m8_t vd, uint8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i16m8_mu(mask, vd, rs1, vs2, vl); +vint16m8_t test_vwmaccus_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, uint8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i16m8_mu(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vwmaccus_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, uint16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i32mf2_mu(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vwmaccus_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, uint16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i32mf2_mu(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vwmaccus_vx_i32m1_mu(vbool32_t mask, vint32m1_t vd, uint16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i32m1_mu(mask, vd, rs1, vs2, vl); +vint32m1_t test_vwmaccus_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, uint16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i32m1_mu(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vwmaccus_vx_i32m2_mu(vbool16_t mask, vint32m2_t vd, uint16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i32m2_mu(mask, vd, rs1, vs2, vl); +vint32m2_t test_vwmaccus_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, uint16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i32m2_mu(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vwmaccus_vx_i32m4_mu(vbool8_t mask, vint32m4_t vd, uint16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i32m4_mu(mask, vd, rs1, vs2, vl); +vint32m4_t test_vwmaccus_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, uint16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i32m4_mu(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vwmaccus_vx_i32m8_mu(vbool4_t mask, vint32m8_t vd, uint16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i32m8_mu(mask, vd, rs1, vs2, vl); +vint32m8_t test_vwmaccus_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, uint16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i32m8_mu(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vwmaccus_vx_i64m1_mu(vbool64_t mask, vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i64m1_mu(mask, vd, rs1, vs2, vl); +vint64m1_t test_vwmaccus_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i64m1_mu(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vwmaccus_vx_i64m2_mu(vbool32_t mask, vint64m2_t vd, uint32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i64m2_mu(mask, vd, rs1, vs2, vl); +vint64m2_t test_vwmaccus_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, uint32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i64m2_mu(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vwmaccus_vx_i64m4_mu(vbool16_t mask, vint64m4_t vd, uint32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i64m4_mu(mask, vd, rs1, vs2, vl); +vint64m4_t test_vwmaccus_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, uint32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i64m4_mu(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vwmaccus_vx_i64m8_mu(vbool8_t mask, vint64m8_t vd, uint32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vwmaccus_vx_i64m8_mu(mask, vd, rs1, vs2, vl); +vint64m8_t test_vwmaccus_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, uint32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vwmaccus_vx_i64m8_mu(vm, vd, rs1, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vwmaccus\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vwmul.c b/auto-generated/policy_funcs/gnu-api-tests/vwmul.c index 16e5d925a..f86876f04 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vwmul.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vwmul.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint16mf4_t test_vwmul_vv_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwmul_vv_i16mf4_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vwmul_vv_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwmul_vv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vwmul_vx_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_vx_i16mf4_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vwmul_vx_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vwmul_vv_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwmul_vv_i16mf2_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vwmul_vv_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwmul_vv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vwmul_vx_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_vx_i16mf2_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vwmul_vx_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vwmul_vv_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwmul_vv_i16m1_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vwmul_vv_i16m1_tu(vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwmul_vv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vwmul_vx_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_vx_i16m1_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vwmul_vx_i16m1_tu(vint16m1_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vwmul_vv_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwmul_vv_i16m2_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vwmul_vv_i16m2_tu(vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwmul_vv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vwmul_vx_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_vx_i16m2_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vwmul_vx_i16m2_tu(vint16m2_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vwmul_vv_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwmul_vv_i16m4_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vwmul_vv_i16m4_tu(vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwmul_vv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vwmul_vx_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_vx_i16m4_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vwmul_vx_i16m4_tu(vint16m4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vwmul_vv_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwmul_vv_i16m8_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vwmul_vv_i16m8_tu(vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwmul_vv_i16m8_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vwmul_vx_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_vx_i16m8_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vwmul_vx_i16m8_tu(vint16m8_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vwmul_vv_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwmul_vv_i32mf2_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vwmul_vv_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwmul_vv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vwmul_vx_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_vx_i32mf2_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vwmul_vx_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vwmul_vv_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwmul_vv_i32m1_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vwmul_vv_i32m1_tu(vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwmul_vv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vwmul_vx_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_vx_i32m1_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vwmul_vx_i32m1_tu(vint32m1_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vwmul_vv_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwmul_vv_i32m2_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vwmul_vv_i32m2_tu(vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwmul_vv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vwmul_vx_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_vx_i32m2_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vwmul_vx_i32m2_tu(vint32m2_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vwmul_vv_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwmul_vv_i32m4_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vwmul_vv_i32m4_tu(vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwmul_vv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vwmul_vx_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_vx_i32m4_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vwmul_vx_i32m4_tu(vint32m4_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vwmul_vv_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwmul_vv_i32m8_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vwmul_vv_i32m8_tu(vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwmul_vv_i32m8_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vwmul_vx_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_vx_i32m8_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vwmul_vx_i32m8_tu(vint32m8_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vwmul_vv_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwmul_vv_i64m1_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vwmul_vv_i64m1_tu(vint64m1_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwmul_vv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vwmul_vx_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul_vx_i64m1_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vwmul_vx_i64m1_tu(vint64m1_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vwmul_vv_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwmul_vv_i64m2_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vwmul_vv_i64m2_tu(vint64m2_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwmul_vv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vwmul_vx_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul_vx_i64m2_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vwmul_vx_i64m2_tu(vint64m2_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vwmul_vv_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwmul_vv_i64m4_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vwmul_vv_i64m4_tu(vint64m4_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwmul_vv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vwmul_vx_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul_vx_i64m4_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vwmul_vx_i64m4_tu(vint64m4_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vwmul_vv_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwmul_vv_i64m8_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vwmul_vv_i64m8_tu(vint64m8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwmul_vv_i64m8_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vwmul_vx_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul_vx_i64m8_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vwmul_vx_i64m8_tu(vint64m8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul_vx_i64m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vwmul_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwmul_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwmul_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwmul_vv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwmul_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwmul_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwmul_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwmul_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwmul_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwmul_vv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwmul_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwmul_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwmul_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwmul_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwmul_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwmul_vv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwmul_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwmul_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwmul_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwmul_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwmul_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwmul_vv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwmul_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwmul_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwmul_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwmul_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwmul_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwmul_vv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwmul_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwmul_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwmul_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwmul_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwmul_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwmul_vv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwmul_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwmul_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwmul_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwmul_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwmul_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwmul_vv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwmul_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwmul_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwmul_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwmul_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwmul_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwmul_vv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwmul_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwmul_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwmul_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwmul_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwmul_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwmul_vv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwmul_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwmul_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwmul_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwmul_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwmul_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwmul_vv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwmul_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwmul_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwmul_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwmul_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwmul_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwmul_vv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwmul_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwmul_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwmul_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwmul_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwmul_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwmul_vv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwmul_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwmul_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwmul_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwmul_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwmul_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwmul_vv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwmul_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwmul_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwmul_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwmul_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwmul_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwmul_vv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwmul_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwmul_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwmul_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwmul_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwmul_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwmul_vv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwmul_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwmul_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vwmul_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwmul_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwmul_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwmul_vv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwmul_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwmul_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwmul_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwmul_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwmul_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwmul_vv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwmul_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwmul_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwmul_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwmul_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwmul_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwmul_vv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwmul_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwmul_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwmul_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwmul_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwmul_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwmul_vv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwmul_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwmul_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwmul_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwmul_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwmul_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwmul_vv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwmul_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwmul_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwmul_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwmul_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwmul_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwmul_vv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwmul_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwmul_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwmul_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwmul_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwmul_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwmul_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwmul_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwmul_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwmul_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwmul_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwmul_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwmul_vv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwmul_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwmul_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwmul_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwmul_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwmul_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwmul_vv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwmul_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwmul_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwmul_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwmul_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwmul_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwmul_vv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwmul_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwmul_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwmul_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwmul_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwmul_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwmul_vv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwmul_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwmul_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwmul_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwmul_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwmul_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwmul_vv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwmul_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwmul_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwmul_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwmul_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwmul_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwmul_vv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwmul_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwmul_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwmul_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwmul_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwmul_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwmul_vv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwmul_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwmul_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwmul_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwmul_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwmul_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwmul_vv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwmul_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwmul_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vwmul_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwmul_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwmul_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwmul_vv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwmul_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwmul_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwmul_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwmul_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwmul_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwmul_vv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwmul_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwmul_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwmul_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwmul_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwmul_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwmul_vv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwmul_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwmul_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwmul_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwmul_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwmul_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwmul_vv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwmul_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwmul_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwmul_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwmul_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwmul_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwmul_vv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwmul_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwmul_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwmul_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwmul_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwmul_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwmul_vv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwmul_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwmul_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwmul_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwmul_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwmul_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwmul_vv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwmul_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwmul_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwmul_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwmul_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwmul_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwmul_vv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwmul_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwmul_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwmul_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwmul_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwmul_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwmul_vv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwmul_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwmul_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwmul_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwmul_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwmul_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwmul_vv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwmul_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwmul_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwmul_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwmul_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwmul_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwmul_vv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwmul_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwmul_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwmul_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwmul_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwmul_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwmul_vv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwmul_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwmul_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwmul_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwmul_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwmul_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwmul_vv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwmul_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwmul_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwmul_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwmul_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwmul_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwmul_vv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwmul_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwmul_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwmul_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwmul_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwmul_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwmul_vv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwmul_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwmul_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vwmul\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vwmulsu.c b/auto-generated/policy_funcs/gnu-api-tests/vwmulsu.c index 5186023d1..7ea211e27 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vwmulsu.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vwmulsu.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint16mf4_t test_vwmulsu_vv_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i16mf4_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vwmulsu_vv_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vwmulsu_vx_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i16mf4_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vwmulsu_vx_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vwmulsu_vv_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i16mf2_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vwmulsu_vv_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vwmulsu_vx_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i16mf2_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vwmulsu_vx_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vwmulsu_vv_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i16m1_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vwmulsu_vv_i16m1_tu(vint16m1_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vwmulsu_vx_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i16m1_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vwmulsu_vx_i16m1_tu(vint16m1_t vd, vint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vwmulsu_vv_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i16m2_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vwmulsu_vv_i16m2_tu(vint16m2_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vwmulsu_vx_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i16m2_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vwmulsu_vx_i16m2_tu(vint16m2_t vd, vint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vwmulsu_vv_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i16m4_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vwmulsu_vv_i16m4_tu(vint16m4_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vwmulsu_vx_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i16m4_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vwmulsu_vx_i16m4_tu(vint16m4_t vd, vint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vwmulsu_vv_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i16m8_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vwmulsu_vv_i16m8_tu(vint16m8_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i16m8_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vwmulsu_vx_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i16m8_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vwmulsu_vx_i16m8_tu(vint16m8_t vd, vint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vwmulsu_vv_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i32mf2_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vwmulsu_vv_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vwmulsu_vx_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i32mf2_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vwmulsu_vx_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vwmulsu_vv_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i32m1_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vwmulsu_vv_i32m1_tu(vint32m1_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vwmulsu_vx_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i32m1_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vwmulsu_vx_i32m1_tu(vint32m1_t vd, vint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vwmulsu_vv_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i32m2_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vwmulsu_vv_i32m2_tu(vint32m2_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vwmulsu_vx_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i32m2_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vwmulsu_vx_i32m2_tu(vint32m2_t vd, vint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vwmulsu_vv_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i32m4_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vwmulsu_vv_i32m4_tu(vint32m4_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vwmulsu_vx_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i32m4_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vwmulsu_vx_i32m4_tu(vint32m4_t vd, vint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vwmulsu_vv_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i32m8_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vwmulsu_vv_i32m8_tu(vint32m8_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i32m8_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vwmulsu_vx_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i32m8_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vwmulsu_vx_i32m8_tu(vint32m8_t vd, vint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vwmulsu_vv_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i64m1_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vwmulsu_vv_i64m1_tu(vint64m1_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vwmulsu_vx_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i64m1_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vwmulsu_vx_i64m1_tu(vint64m1_t vd, vint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vwmulsu_vv_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i64m2_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vwmulsu_vv_i64m2_tu(vint64m2_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vwmulsu_vx_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i64m2_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vwmulsu_vx_i64m2_tu(vint64m2_t vd, vint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vwmulsu_vv_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i64m4_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vwmulsu_vv_i64m4_tu(vint64m4_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vwmulsu_vx_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i64m4_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vwmulsu_vx_i64m4_tu(vint64m4_t vd, vint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vwmulsu_vv_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i64m8_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vwmulsu_vv_i64m8_tu(vint64m8_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i64m8_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vwmulsu_vx_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i64m8_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vwmulsu_vx_i64m8_tu(vint64m8_t vd, vint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i64m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vwmulsu_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwmulsu_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwmulsu_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwmulsu_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwmulsu_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwmulsu_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwmulsu_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwmulsu_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwmulsu_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwmulsu_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwmulsu_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwmulsu_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwmulsu_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwmulsu_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwmulsu_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwmulsu_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwmulsu_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwmulsu_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwmulsu_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwmulsu_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwmulsu_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwmulsu_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwmulsu_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwmulsu_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwmulsu_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwmulsu_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwmulsu_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwmulsu_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwmulsu_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwmulsu_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwmulsu_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwmulsu_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwmulsu_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwmulsu_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwmulsu_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwmulsu_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwmulsu_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwmulsu_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwmulsu_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwmulsu_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwmulsu_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwmulsu_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwmulsu_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwmulsu_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwmulsu_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwmulsu_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwmulsu_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwmulsu_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwmulsu_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwmulsu_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwmulsu_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwmulsu_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwmulsu_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwmulsu_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwmulsu_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwmulsu_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwmulsu_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwmulsu_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwmulsu_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwmulsu_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vwmulsu_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwmulsu_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwmulsu_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwmulsu_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwmulsu_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwmulsu_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwmulsu_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwmulsu_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwmulsu_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwmulsu_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwmulsu_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwmulsu_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwmulsu_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwmulsu_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwmulsu_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwmulsu_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwmulsu_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwmulsu_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwmulsu_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwmulsu_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwmulsu_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwmulsu_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwmulsu_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwmulsu_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwmulsu_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwmulsu_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwmulsu_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwmulsu_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwmulsu_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwmulsu_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwmulsu_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwmulsu_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwmulsu_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwmulsu_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwmulsu_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwmulsu_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwmulsu_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwmulsu_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwmulsu_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwmulsu_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwmulsu_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwmulsu_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwmulsu_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwmulsu_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwmulsu_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwmulsu_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwmulsu_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwmulsu_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwmulsu_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwmulsu_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwmulsu_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwmulsu_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwmulsu_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwmulsu_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwmulsu_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwmulsu_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwmulsu_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwmulsu_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwmulsu_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwmulsu_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vwmulsu_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwmulsu_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwmulsu_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwmulsu_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwmulsu_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwmulsu_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwmulsu_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwmulsu_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwmulsu_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwmulsu_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwmulsu_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwmulsu_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwmulsu_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwmulsu_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwmulsu_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwmulsu_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwmulsu_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwmulsu_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwmulsu_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwmulsu_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwmulsu_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwmulsu_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwmulsu_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwmulsu_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwmulsu_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwmulsu_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwmulsu_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwmulsu_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwmulsu_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwmulsu_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwmulsu_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwmulsu_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwmulsu_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwmulsu_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwmulsu_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwmulsu_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwmulsu_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwmulsu_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwmulsu_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwmulsu_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwmulsu_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwmulsu_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwmulsu_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwmulsu_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwmulsu_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwmulsu_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwmulsu_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwmulsu_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwmulsu_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwmulsu_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwmulsu_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwmulsu_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwmulsu_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwmulsu_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwmulsu_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwmulsu_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwmulsu_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwmulsu_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwmulsu_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwmulsu_vv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwmulsu_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwmulsu_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vwmulsu\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vwmulu.c b/auto-generated/policy_funcs/gnu-api-tests/vwmulu.c index 4898d6211..6fbd5dd6e 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vwmulu.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vwmulu.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint16mf4_t test_vwmulu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwmulu_vv_u16mf4_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vwmulu_vv_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vwmulu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_vx_u16mf4_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vwmulu_vx_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vwmulu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwmulu_vv_u16mf2_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vwmulu_vv_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vwmulu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_vx_u16mf2_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vwmulu_vx_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vwmulu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwmulu_vv_u16m1_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vwmulu_vv_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vwmulu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_vx_u16m1_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vwmulu_vx_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vwmulu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwmulu_vv_u16m2_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vwmulu_vv_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vwmulu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_vx_u16m2_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vwmulu_vx_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vwmulu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwmulu_vv_u16m4_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vwmulu_vv_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vwmulu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_vx_u16m4_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vwmulu_vx_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vwmulu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwmulu_vv_u16m8_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vwmulu_vv_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vwmulu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_vx_u16m8_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vwmulu_vx_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vwmulu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwmulu_vv_u32mf2_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vwmulu_vv_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vwmulu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_vx_u32mf2_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vwmulu_vx_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vwmulu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwmulu_vv_u32m1_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vwmulu_vv_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vwmulu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_vx_u32m1_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vwmulu_vx_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vwmulu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwmulu_vv_u32m2_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vwmulu_vv_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vwmulu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_vx_u32m2_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vwmulu_vx_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vwmulu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwmulu_vv_u32m4_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vwmulu_vv_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vwmulu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_vx_u32m4_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vwmulu_vx_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vwmulu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwmulu_vv_u32m8_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vwmulu_vv_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vwmulu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_vx_u32m8_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vwmulu_vx_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vwmulu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwmulu_vv_u64m1_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vwmulu_vv_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vwmulu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu_vx_u64m1_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vwmulu_vx_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vwmulu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwmulu_vv_u64m2_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vwmulu_vv_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vwmulu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu_vx_u64m2_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vwmulu_vx_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vwmulu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwmulu_vv_u64m4_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vwmulu_vv_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vwmulu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu_vx_u64m4_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vwmulu_vx_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vwmulu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwmulu_vv_u64m8_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vwmulu_vv_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vwmulu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu_vx_u64m8_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vwmulu_vx_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u64m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vwmulu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwmulu_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwmulu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwmulu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwmulu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwmulu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwmulu_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwmulu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwmulu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwmulu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwmulu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwmulu_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwmulu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwmulu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwmulu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwmulu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwmulu_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwmulu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwmulu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwmulu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwmulu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwmulu_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwmulu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwmulu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwmulu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwmulu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwmulu_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwmulu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwmulu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwmulu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwmulu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwmulu_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwmulu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwmulu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwmulu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwmulu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwmulu_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwmulu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwmulu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwmulu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwmulu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwmulu_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwmulu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwmulu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwmulu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwmulu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwmulu_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwmulu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwmulu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwmulu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwmulu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwmulu_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwmulu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwmulu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwmulu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwmulu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwmulu_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwmulu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwmulu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwmulu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwmulu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwmulu_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwmulu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwmulu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwmulu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwmulu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwmulu_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwmulu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwmulu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwmulu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwmulu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwmulu_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwmulu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwmulu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwmulu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vwmulu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwmulu_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwmulu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwmulu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwmulu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwmulu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwmulu_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwmulu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwmulu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwmulu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwmulu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwmulu_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwmulu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwmulu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwmulu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwmulu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwmulu_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwmulu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwmulu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwmulu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwmulu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwmulu_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwmulu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwmulu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwmulu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwmulu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwmulu_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwmulu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwmulu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwmulu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwmulu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwmulu_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwmulu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwmulu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwmulu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwmulu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwmulu_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwmulu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwmulu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwmulu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwmulu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwmulu_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwmulu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwmulu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwmulu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwmulu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwmulu_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwmulu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwmulu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwmulu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwmulu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwmulu_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwmulu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwmulu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwmulu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwmulu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwmulu_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwmulu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwmulu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwmulu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwmulu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwmulu_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwmulu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwmulu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwmulu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwmulu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwmulu_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwmulu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwmulu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwmulu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwmulu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwmulu_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwmulu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwmulu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwmulu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vwmulu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwmulu_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwmulu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwmulu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwmulu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwmulu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwmulu_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwmulu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwmulu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwmulu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwmulu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwmulu_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwmulu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwmulu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwmulu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwmulu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwmulu_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwmulu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwmulu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwmulu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwmulu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwmulu_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwmulu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwmulu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwmulu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwmulu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwmulu_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwmulu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwmulu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwmulu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwmulu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwmulu_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwmulu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwmulu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwmulu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwmulu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwmulu_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwmulu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwmulu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwmulu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwmulu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwmulu_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwmulu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwmulu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwmulu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwmulu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwmulu_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwmulu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwmulu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwmulu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwmulu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwmulu_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwmulu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwmulu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwmulu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwmulu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwmulu_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwmulu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwmulu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwmulu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwmulu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwmulu_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwmulu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwmulu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwmulu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwmulu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwmulu_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwmulu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwmulu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwmulu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwmulu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwmulu_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwmulu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwmulu_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwmulu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwmulu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vwmulu\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vwredsum.c b/auto-generated/policy_funcs/gnu-api-tests/vwredsum.c index fba8893e9..0572697ff 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vwredsum.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vwredsum.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint16m1_t test_vwredsum_vs_i8mf8_i16m1_tu(vint16m1_t maskedoff, vint8mf8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i8mf8_i16m1_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf8_i16m1_tu(vint16m1_t vd, vint8mf8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i8mf8_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8mf4_i16m1_tu(vint16m1_t maskedoff, vint8mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i8mf4_i16m1_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf4_i16m1_tu(vint16m1_t vd, vint8mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i8mf4_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8mf2_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i8mf2_i16m1_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf2_i16m1_tu(vint16m1_t vd, vint8mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i8mf2_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8m1_i16m1_tu(vint16m1_t maskedoff, vint8m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i8m1_i16m1_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m1_i16m1_tu(vint16m1_t vd, vint8m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i8m1_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8m2_i16m1_tu(vint16m1_t maskedoff, vint8m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i8m2_i16m1_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m2_i16m1_tu(vint16m1_t vd, vint8m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i8m2_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8m4_i16m1_tu(vint16m1_t maskedoff, vint8m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i8m4_i16m1_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m4_i16m1_tu(vint16m1_t vd, vint8m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i8m4_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8m8_i16m1_tu(vint16m1_t maskedoff, vint8m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i8m8_i16m1_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m8_i16m1_tu(vint16m1_t vd, vint8m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i8m8_i16m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16mf4_i32m1_tu(vint32m1_t maskedoff, vint16mf4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i16mf4_i32m1_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16mf4_i32m1_tu(vint32m1_t vd, vint16mf4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i16mf4_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16mf2_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i16mf2_i32m1_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16mf2_i32m1_tu(vint32m1_t vd, vint16mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i16mf2_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16m1_i32m1_tu(vint32m1_t maskedoff, vint16m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i16m1_i32m1_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m1_i32m1_tu(vint32m1_t vd, vint16m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i16m1_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16m2_i32m1_tu(vint32m1_t maskedoff, vint16m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i16m2_i32m1_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m2_i32m1_tu(vint32m1_t vd, vint16m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i16m2_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16m4_i32m1_tu(vint32m1_t maskedoff, vint16m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i16m4_i32m1_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m4_i32m1_tu(vint32m1_t vd, vint16m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i16m4_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16m8_i32m1_tu(vint32m1_t maskedoff, vint16m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i16m8_i32m1_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m8_i32m1_tu(vint32m1_t vd, vint16m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i16m8_i32m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i32mf2_i64m1_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tu(vint64m1_t vd, vint32mf2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i32mf2_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32m1_i64m1_tu(vint64m1_t maskedoff, vint32m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i32m1_i64m1_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m1_i64m1_tu(vint64m1_t vd, vint32m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i32m1_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32m2_i64m1_tu(vint64m1_t maskedoff, vint32m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i32m2_i64m1_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m2_i64m1_tu(vint64m1_t vd, vint32m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i32m2_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32m4_i64m1_tu(vint64m1_t maskedoff, vint32m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i32m4_i64m1_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m4_i64m1_tu(vint64m1_t vd, vint32m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i32m4_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32m8_i64m1_tu(vint64m1_t maskedoff, vint32m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i32m8_i64m1_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m8_i64m1_tu(vint64m1_t vd, vint32m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i32m8_i64m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8mf8_i16m1_tum(vbool64_t mask, vint16m1_t maskedoff, vint8mf8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i8mf8_i16m1_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf8_i16m1_tum(vbool64_t vm, vint16m1_t vd, vint8mf8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i8mf8_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8mf4_i16m1_tum(vbool32_t mask, vint16m1_t maskedoff, vint8mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i8mf4_i16m1_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf4_i16m1_tum(vbool32_t vm, vint16m1_t vd, vint8mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i8mf4_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8mf2_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i8mf2_i16m1_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf2_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i8mf2_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8m1_i16m1_tum(vbool8_t mask, vint16m1_t maskedoff, vint8m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i8m1_i16m1_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m1_i16m1_tum(vbool8_t vm, vint16m1_t vd, vint8m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i8m1_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8m2_i16m1_tum(vbool4_t mask, vint16m1_t maskedoff, vint8m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i8m2_i16m1_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m2_i16m1_tum(vbool4_t vm, vint16m1_t vd, vint8m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i8m2_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8m4_i16m1_tum(vbool2_t mask, vint16m1_t maskedoff, vint8m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i8m4_i16m1_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m4_i16m1_tum(vbool2_t vm, vint16m1_t vd, vint8m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i8m4_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8m8_i16m1_tum(vbool1_t mask, vint16m1_t maskedoff, vint8m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i8m8_i16m1_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m8_i16m1_tum(vbool1_t vm, vint16m1_t vd, vint8m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i8m8_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16mf4_i32m1_tum(vbool64_t mask, vint32m1_t maskedoff, vint16mf4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i16mf4_i32m1_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16mf4_i32m1_tum(vbool64_t vm, vint32m1_t vd, vint16mf4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i16mf4_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16mf2_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i16mf2_i32m1_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16mf2_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i16mf2_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16m1_i32m1_tum(vbool16_t mask, vint32m1_t maskedoff, vint16m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i16m1_i32m1_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m1_i32m1_tum(vbool16_t vm, vint32m1_t vd, vint16m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i16m1_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16m2_i32m1_tum(vbool8_t mask, vint32m1_t maskedoff, vint16m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i16m2_i32m1_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m2_i32m1_tum(vbool8_t vm, vint32m1_t vd, vint16m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i16m2_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16m4_i32m1_tum(vbool4_t mask, vint32m1_t maskedoff, vint16m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i16m4_i32m1_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m4_i32m1_tum(vbool4_t vm, vint32m1_t vd, vint16m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i16m4_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16m8_i32m1_tum(vbool2_t mask, vint32m1_t maskedoff, vint16m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i16m8_i32m1_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m8_i32m1_tum(vbool2_t vm, vint32m1_t vd, vint16m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i16m8_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i32mf2_i64m1_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i32mf2_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32m1_i64m1_tum(vbool32_t mask, vint64m1_t maskedoff, vint32m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i32m1_i64m1_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m1_i64m1_tum(vbool32_t vm, vint64m1_t vd, vint32m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i32m1_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32m2_i64m1_tum(vbool16_t mask, vint64m1_t maskedoff, vint32m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i32m2_i64m1_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m2_i64m1_tum(vbool16_t vm, vint64m1_t vd, vint32m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i32m2_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32m4_i64m1_tum(vbool8_t mask, vint64m1_t maskedoff, vint32m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i32m4_i64m1_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m4_i64m1_tum(vbool8_t vm, vint64m1_t vd, vint32m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i32m4_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32m8_i64m1_tum(vbool4_t mask, vint64m1_t maskedoff, vint32m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i32m8_i64m1_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m8_i64m1_tum(vbool4_t vm, vint64m1_t vd, vint32m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vwredsum_vs_i32m8_i64m1_tum(vm, vd, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vwredsum\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vwredsumu.c b/auto-generated/policy_funcs/gnu-api-tests/vwredsumu.c index 81e4110df..fb1b07806 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vwredsumu.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vwredsumu.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_tu(vuint16m1_t maskedoff, vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u8mf8_u16m1_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_tu(vuint16m1_t vd, vuint8mf8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u8mf8_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_tu(vuint16m1_t maskedoff, vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u8mf4_u16m1_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_tu(vuint16m1_t vd, vuint8mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u8mf4_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u8mf2_u16m1_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u8mf2_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_tu(vuint16m1_t maskedoff, vuint8m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u8m1_u16m1_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_tu(vuint16m1_t vd, vuint8m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u8m1_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_tu(vuint16m1_t maskedoff, vuint8m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u8m2_u16m1_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_tu(vuint16m1_t vd, vuint8m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u8m2_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_tu(vuint16m1_t maskedoff, vuint8m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u8m4_u16m1_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_tu(vuint16m1_t vd, vuint8m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u8m4_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_tu(vuint16m1_t maskedoff, vuint8m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u8m8_u16m1_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_tu(vuint16m1_t vd, vuint8m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u8m8_u16m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_tu(vuint32m1_t maskedoff, vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u16mf4_u32m1_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_tu(vuint32m1_t vd, vuint16mf4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u16mf4_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u16mf2_u32m1_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u16mf2_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_tu(vuint32m1_t maskedoff, vuint16m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u16m1_u32m1_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_tu(vuint32m1_t vd, vuint16m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u16m1_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_tu(vuint32m1_t maskedoff, vuint16m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u16m2_u32m1_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_tu(vuint32m1_t vd, vuint16m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u16m2_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_tu(vuint32m1_t maskedoff, vuint16m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u16m4_u32m1_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_tu(vuint32m1_t vd, vuint16m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u16m4_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_tu(vuint32m1_t maskedoff, vuint16m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u16m8_u32m1_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_tu(vuint32m1_t vd, vuint16m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u16m8_u32m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u32mf2_u64m1_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u32mf2_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_tu(vuint64m1_t maskedoff, vuint32m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u32m1_u64m1_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_tu(vuint64m1_t vd, vuint32m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u32m1_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_tu(vuint64m1_t maskedoff, vuint32m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u32m2_u64m1_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_tu(vuint64m1_t vd, vuint32m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u32m2_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_tu(vuint64m1_t maskedoff, vuint32m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u32m4_u64m1_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_tu(vuint64m1_t vd, vuint32m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u32m4_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_tu(vuint64m1_t maskedoff, vuint32m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u32m8_u64m1_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_tu(vuint64m1_t vd, vuint32m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u32m8_u64m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_tum(vbool64_t mask, vuint16m1_t maskedoff, vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u8mf8_u16m1_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_tum(vbool64_t vm, vuint16m1_t vd, vuint8mf8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u8mf8_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_tum(vbool32_t mask, vuint16m1_t maskedoff, vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u8mf4_u16m1_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_tum(vbool32_t vm, vuint16m1_t vd, vuint8mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u8mf4_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u8mf2_u16m1_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u8mf2_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_tum(vbool8_t mask, vuint16m1_t maskedoff, vuint8m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u8m1_u16m1_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_tum(vbool8_t vm, vuint16m1_t vd, vuint8m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u8m1_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_tum(vbool4_t mask, vuint16m1_t maskedoff, vuint8m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u8m2_u16m1_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_tum(vbool4_t vm, vuint16m1_t vd, vuint8m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u8m2_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_tum(vbool2_t mask, vuint16m1_t maskedoff, vuint8m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u8m4_u16m1_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_tum(vbool2_t vm, vuint16m1_t vd, vuint8m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u8m4_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_tum(vbool1_t mask, vuint16m1_t maskedoff, vuint8m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u8m8_u16m1_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_tum(vbool1_t vm, vuint16m1_t vd, vuint8m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u8m8_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_tum(vbool64_t mask, vuint32m1_t maskedoff, vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u16mf4_u32m1_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_tum(vbool64_t vm, vuint32m1_t vd, vuint16mf4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u16mf4_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u16mf2_u32m1_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u16mf2_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_tum(vbool16_t mask, vuint32m1_t maskedoff, vuint16m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u16m1_u32m1_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_tum(vbool16_t vm, vuint32m1_t vd, vuint16m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u16m1_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_tum(vbool8_t mask, vuint32m1_t maskedoff, vuint16m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u16m2_u32m1_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_tum(vbool8_t vm, vuint32m1_t vd, vuint16m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u16m2_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_tum(vbool4_t mask, vuint32m1_t maskedoff, vuint16m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u16m4_u32m1_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_tum(vbool4_t vm, vuint32m1_t vd, vuint16m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u16m4_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_tum(vbool2_t mask, vuint32m1_t maskedoff, vuint16m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u16m8_u32m1_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_tum(vbool2_t vm, vuint32m1_t vd, vuint16m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u16m8_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u32mf2_u64m1_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u32mf2_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_tum(vbool32_t mask, vuint64m1_t maskedoff, vuint32m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u32m1_u64m1_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_tum(vbool32_t vm, vuint64m1_t vd, vuint32m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u32m1_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_tum(vbool16_t mask, vuint64m1_t maskedoff, vuint32m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u32m2_u64m1_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_tum(vbool16_t vm, vuint64m1_t vd, vuint32m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u32m2_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_tum(vbool8_t mask, vuint64m1_t maskedoff, vuint32m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u32m4_u64m1_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_tum(vbool8_t vm, vuint64m1_t vd, vuint32m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u32m4_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_tum(vbool4_t mask, vuint64m1_t maskedoff, vuint32m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u32m8_u64m1_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_tum(vbool4_t vm, vuint64m1_t vd, vuint32m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vwredsumu_vs_u32m8_u64m1_tum(vm, vd, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vwredsumu\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vwsub.c b/auto-generated/policy_funcs/gnu-api-tests/vwsub.c index fc5846500..aa2a8a851 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vwsub.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vwsub.c @@ -6,964 +6,964 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint16mf4_t test_vwsub_vv_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwsub_vv_i16mf4_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vwsub_vv_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwsub_vv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vwsub_vx_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_i16mf4_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vwsub_vx_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vwsub_wv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwsub_wv_i16mf4_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vwsub_wv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwsub_wv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vwsub_wx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_i16mf4_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vwsub_wx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vwsub_vv_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwsub_vv_i16mf2_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vwsub_vv_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwsub_vv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vwsub_vx_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_i16mf2_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vwsub_vx_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vwsub_wv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwsub_wv_i16mf2_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vwsub_wv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwsub_wv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vwsub_wx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_i16mf2_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vwsub_wx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vwsub_vv_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwsub_vv_i16m1_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vwsub_vv_i16m1_tu(vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwsub_vv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vwsub_vx_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_i16m1_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vwsub_vx_i16m1_tu(vint16m1_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vwsub_wv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwsub_wv_i16m1_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vwsub_wv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwsub_wv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vwsub_wx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_i16m1_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vwsub_wx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vwsub_vv_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwsub_vv_i16m2_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vwsub_vv_i16m2_tu(vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwsub_vv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vwsub_vx_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_i16m2_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vwsub_vx_i16m2_tu(vint16m2_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vwsub_wv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwsub_wv_i16m2_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vwsub_wv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwsub_wv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vwsub_wx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_i16m2_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vwsub_wx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vwsub_vv_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwsub_vv_i16m4_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vwsub_vv_i16m4_tu(vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwsub_vv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vwsub_vx_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_i16m4_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vwsub_vx_i16m4_tu(vint16m4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vwsub_wv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwsub_wv_i16m4_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vwsub_wv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwsub_wv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vwsub_wx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_i16m4_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vwsub_wx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vwsub_vv_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwsub_vv_i16m8_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vwsub_vv_i16m8_tu(vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwsub_vv_i16m8_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vwsub_vx_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_i16m8_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vwsub_vx_i16m8_tu(vint16m8_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vwsub_wv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwsub_wv_i16m8_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vwsub_wv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwsub_wv_i16m8_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vwsub_wx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_i16m8_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vwsub_wx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vwsub_vv_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwsub_vv_i32mf2_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vwsub_vv_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwsub_vv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vwsub_vx_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_i32mf2_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vwsub_vx_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vwsub_wv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwsub_wv_i32mf2_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vwsub_wv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwsub_wv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vwsub_wx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_i32mf2_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vwsub_wx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vwsub_vv_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwsub_vv_i32m1_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vwsub_vv_i32m1_tu(vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwsub_vv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vwsub_vx_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_i32m1_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vwsub_vx_i32m1_tu(vint32m1_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vwsub_wv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwsub_wv_i32m1_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vwsub_wv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwsub_wv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vwsub_wx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_i32m1_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vwsub_wx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vwsub_vv_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwsub_vv_i32m2_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vwsub_vv_i32m2_tu(vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwsub_vv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vwsub_vx_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_i32m2_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vwsub_vx_i32m2_tu(vint32m2_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vwsub_wv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwsub_wv_i32m2_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vwsub_wv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwsub_wv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vwsub_wx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_i32m2_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vwsub_wx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vwsub_vv_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwsub_vv_i32m4_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vwsub_vv_i32m4_tu(vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwsub_vv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vwsub_vx_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_i32m4_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vwsub_vx_i32m4_tu(vint32m4_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vwsub_wv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwsub_wv_i32m4_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vwsub_wv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwsub_wv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vwsub_wx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_i32m4_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vwsub_wx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vwsub_vv_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwsub_vv_i32m8_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vwsub_vv_i32m8_tu(vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwsub_vv_i32m8_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vwsub_vx_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_i32m8_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vwsub_vx_i32m8_tu(vint32m8_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vwsub_wv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwsub_wv_i32m8_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vwsub_wv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwsub_wv_i32m8_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vwsub_wx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_i32m8_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vwsub_wx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vwsub_vv_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwsub_vv_i64m1_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vwsub_vv_i64m1_tu(vint64m1_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwsub_vv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vwsub_vx_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx_i64m1_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vwsub_vx_i64m1_tu(vint64m1_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vwsub_wv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwsub_wv_i64m1_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vwsub_wv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwsub_wv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vwsub_wx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx_i64m1_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vwsub_wx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vwsub_vv_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwsub_vv_i64m2_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vwsub_vv_i64m2_tu(vint64m2_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwsub_vv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vwsub_vx_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx_i64m2_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vwsub_vx_i64m2_tu(vint64m2_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vwsub_wv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwsub_wv_i64m2_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vwsub_wv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwsub_wv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vwsub_wx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx_i64m2_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vwsub_wx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vwsub_vv_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwsub_vv_i64m4_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vwsub_vv_i64m4_tu(vint64m4_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwsub_vv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vwsub_vx_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx_i64m4_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vwsub_vx_i64m4_tu(vint64m4_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vwsub_wv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwsub_wv_i64m4_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vwsub_wv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwsub_wv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vwsub_wx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx_i64m4_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vwsub_wx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vwsub_vv_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwsub_vv_i64m8_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vwsub_vv_i64m8_tu(vint64m8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwsub_vv_i64m8_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vwsub_vx_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx_i64m8_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vwsub_vx_i64m8_tu(vint64m8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx_i64m8_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vwsub_wv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwsub_wv_i64m8_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vwsub_wv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwsub_wv_i64m8_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vwsub_wx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx_i64m8_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vwsub_wx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx_i64m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vwsub_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwsub_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwsub_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwsub_vv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwsub_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwsub_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vwsub_wv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwsub_wv_i16mf4_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwsub_wv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwsub_wv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwsub_wx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_i16mf4_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwsub_wx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwsub_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwsub_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwsub_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwsub_vv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwsub_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwsub_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwsub_wv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwsub_wv_i16mf2_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwsub_wv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwsub_wv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwsub_wx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_i16mf2_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwsub_wx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwsub_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwsub_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwsub_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwsub_vv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwsub_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwsub_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwsub_wv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwsub_wv_i16m1_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwsub_wv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwsub_wv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwsub_wx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_i16m1_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwsub_wx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwsub_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwsub_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwsub_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwsub_vv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwsub_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwsub_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwsub_wv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwsub_wv_i16m2_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwsub_wv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwsub_wv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwsub_wx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_i16m2_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwsub_wx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwsub_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwsub_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwsub_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwsub_vv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwsub_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwsub_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwsub_wv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwsub_wv_i16m4_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwsub_wv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwsub_wv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwsub_wx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_i16m4_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwsub_wx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwsub_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwsub_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwsub_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwsub_vv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwsub_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwsub_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwsub_wv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwsub_wv_i16m8_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwsub_wv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwsub_wv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwsub_wx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_i16m8_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwsub_wx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwsub_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwsub_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwsub_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwsub_vv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwsub_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwsub_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwsub_wv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwsub_wv_i32mf2_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwsub_wv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwsub_wv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwsub_wx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_i32mf2_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwsub_wx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwsub_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwsub_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwsub_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwsub_vv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwsub_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwsub_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwsub_wv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwsub_wv_i32m1_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwsub_wv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwsub_wv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwsub_wx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_i32m1_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwsub_wx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwsub_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwsub_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwsub_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwsub_vv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwsub_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwsub_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwsub_wv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwsub_wv_i32m2_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwsub_wv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwsub_wv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwsub_wx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_i32m2_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwsub_wx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwsub_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwsub_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwsub_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwsub_vv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwsub_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwsub_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwsub_wv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwsub_wv_i32m4_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwsub_wv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwsub_wv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwsub_wx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_i32m4_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwsub_wx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwsub_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwsub_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwsub_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwsub_vv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwsub_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwsub_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwsub_wv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwsub_wv_i32m8_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwsub_wv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwsub_wv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwsub_wx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_i32m8_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwsub_wx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwsub_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwsub_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwsub_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwsub_vv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwsub_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwsub_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwsub_wv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwsub_wv_i64m1_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwsub_wv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwsub_wv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwsub_wx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx_i64m1_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwsub_wx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwsub_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwsub_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwsub_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwsub_vv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwsub_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwsub_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwsub_wv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwsub_wv_i64m2_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwsub_wv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwsub_wv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwsub_wx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx_i64m2_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwsub_wx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwsub_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwsub_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwsub_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwsub_vv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwsub_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwsub_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwsub_wv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwsub_wv_i64m4_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwsub_wv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwsub_wv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwsub_wx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx_i64m4_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwsub_wx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwsub_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwsub_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwsub_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwsub_vv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwsub_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwsub_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwsub_wv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwsub_wv_i64m8_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwsub_wv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwsub_wv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwsub_wx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx_i64m8_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwsub_wx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vwsub_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwsub_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwsub_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwsub_vv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwsub_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwsub_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vwsub_wv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwsub_wv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwsub_wv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwsub_wv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwsub_wx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwsub_wx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwsub_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwsub_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwsub_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwsub_vv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwsub_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwsub_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwsub_wv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwsub_wv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwsub_wv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwsub_wv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwsub_wx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwsub_wx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwsub_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwsub_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwsub_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwsub_vv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwsub_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwsub_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwsub_wv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwsub_wv_i16m1_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwsub_wv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwsub_wv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwsub_wx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_i16m1_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwsub_wx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwsub_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwsub_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwsub_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwsub_vv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwsub_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwsub_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwsub_wv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwsub_wv_i16m2_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwsub_wv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwsub_wv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwsub_wx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_i16m2_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwsub_wx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwsub_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwsub_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwsub_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwsub_vv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwsub_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwsub_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwsub_wv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwsub_wv_i16m4_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwsub_wv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwsub_wv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwsub_wx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_i16m4_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwsub_wx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwsub_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwsub_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwsub_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwsub_vv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwsub_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwsub_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwsub_wv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwsub_wv_i16m8_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwsub_wv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwsub_wv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwsub_wx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_i16m8_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwsub_wx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwsub_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwsub_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwsub_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwsub_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwsub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwsub_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwsub_wv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwsub_wv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwsub_wv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwsub_wv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwsub_wx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwsub_wx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwsub_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwsub_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwsub_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwsub_vv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwsub_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwsub_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwsub_wv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwsub_wv_i32m1_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwsub_wv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwsub_wv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwsub_wx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_i32m1_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwsub_wx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwsub_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwsub_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwsub_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwsub_vv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwsub_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwsub_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwsub_wv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwsub_wv_i32m2_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwsub_wv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwsub_wv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwsub_wx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_i32m2_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwsub_wx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwsub_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwsub_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwsub_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwsub_vv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwsub_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwsub_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwsub_wv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwsub_wv_i32m4_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwsub_wv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwsub_wv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwsub_wx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_i32m4_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwsub_wx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwsub_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwsub_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwsub_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwsub_vv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwsub_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwsub_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwsub_wv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwsub_wv_i32m8_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwsub_wv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwsub_wv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwsub_wx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_i32m8_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwsub_wx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwsub_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwsub_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwsub_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwsub_vv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwsub_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwsub_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwsub_wv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwsub_wv_i64m1_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwsub_wv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwsub_wv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwsub_wx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx_i64m1_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwsub_wx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwsub_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwsub_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwsub_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwsub_vv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwsub_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwsub_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwsub_wv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwsub_wv_i64m2_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwsub_wv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwsub_wv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwsub_wx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx_i64m2_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwsub_wx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwsub_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwsub_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwsub_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwsub_vv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwsub_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwsub_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwsub_wv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwsub_wv_i64m4_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwsub_wv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwsub_wv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwsub_wx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx_i64m4_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwsub_wx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwsub_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwsub_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwsub_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwsub_vv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwsub_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwsub_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwsub_wv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwsub_wv_i64m8_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwsub_wv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwsub_wv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwsub_wx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx_i64m8_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwsub_wx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vwsub_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwsub_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwsub_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwsub_vv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwsub_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwsub_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vwsub_wv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwsub_wv_i16mf4_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwsub_wv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwsub_wv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwsub_wx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_i16mf4_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwsub_wx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwsub_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwsub_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwsub_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwsub_vv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwsub_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwsub_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwsub_wv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwsub_wv_i16mf2_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwsub_wv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwsub_wv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwsub_wx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_i16mf2_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwsub_wx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwsub_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwsub_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwsub_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwsub_vv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwsub_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwsub_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwsub_wv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwsub_wv_i16m1_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwsub_wv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwsub_wv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwsub_wx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_i16m1_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwsub_wx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwsub_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwsub_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwsub_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwsub_vv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwsub_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwsub_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwsub_wv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwsub_wv_i16m2_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwsub_wv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwsub_wv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwsub_wx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_i16m2_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwsub_wx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwsub_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwsub_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwsub_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwsub_vv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwsub_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwsub_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwsub_wv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwsub_wv_i16m4_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwsub_wv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwsub_wv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwsub_wx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_i16m4_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwsub_wx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwsub_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwsub_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwsub_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwsub_vv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwsub_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwsub_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwsub_wv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwsub_wv_i16m8_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwsub_wv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwsub_wv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwsub_wx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_i16m8_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwsub_wx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwsub_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwsub_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwsub_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwsub_vv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwsub_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwsub_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwsub_wv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwsub_wv_i32mf2_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwsub_wv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwsub_wv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwsub_wx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_i32mf2_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwsub_wx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwsub_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwsub_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwsub_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwsub_vv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwsub_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwsub_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwsub_wv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwsub_wv_i32m1_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwsub_wv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwsub_wv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwsub_wx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_i32m1_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwsub_wx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwsub_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwsub_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwsub_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwsub_vv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwsub_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwsub_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwsub_wv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwsub_wv_i32m2_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwsub_wv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwsub_wv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwsub_wx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_i32m2_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwsub_wx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwsub_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwsub_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwsub_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwsub_vv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwsub_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwsub_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwsub_wv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwsub_wv_i32m4_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwsub_wv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwsub_wv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwsub_wx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_i32m4_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwsub_wx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwsub_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwsub_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwsub_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwsub_vv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwsub_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwsub_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwsub_wv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwsub_wv_i32m8_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwsub_wv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwsub_wv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwsub_wx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_i32m8_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwsub_wx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwsub_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwsub_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwsub_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwsub_vv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwsub_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwsub_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwsub_wv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwsub_wv_i64m1_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwsub_wv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwsub_wv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwsub_wx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx_i64m1_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwsub_wx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwsub_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwsub_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwsub_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwsub_vv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwsub_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwsub_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwsub_wv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwsub_wv_i64m2_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwsub_wv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwsub_wv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwsub_wx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx_i64m2_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwsub_wx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwsub_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwsub_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwsub_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwsub_vv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwsub_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwsub_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwsub_wv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwsub_wv_i64m4_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwsub_wv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwsub_wv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwsub_wx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx_i64m4_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwsub_wx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwsub_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwsub_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwsub_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwsub_vv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwsub_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwsub_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwsub_wv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwsub_wv_i64m8_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwsub_wv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwsub_wv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwsub_wx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx_i64m8_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwsub_wx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx_i64m8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+v[w]?sub\.[ivxfswum.]+\s+} 240 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vwsubu.c b/auto-generated/policy_funcs/gnu-api-tests/vwsubu.c index ff340a794..d48bebbe4 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vwsubu.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vwsubu.c @@ -6,964 +6,964 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint16mf4_t test_vwsubu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwsubu_vv_u16mf4_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vwsubu_vv_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vwsubu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_u16mf4_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vwsubu_vx_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vwsubu_wv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwsubu_wv_u16mf4_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vwsubu_wv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vwsubu_wx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_u16mf4_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vwsubu_wx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vwsubu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwsubu_vv_u16mf2_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vwsubu_vv_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vwsubu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_u16mf2_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vwsubu_vx_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vwsubu_wv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwsubu_wv_u16mf2_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vwsubu_wv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vwsubu_wx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_u16mf2_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vwsubu_wx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vwsubu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwsubu_vv_u16m1_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vwsubu_vv_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vwsubu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_u16m1_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vwsubu_vx_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vwsubu_wv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwsubu_wv_u16m1_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vwsubu_wv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vwsubu_wx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_u16m1_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vwsubu_wx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vwsubu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwsubu_vv_u16m2_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vwsubu_vv_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vwsubu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_u16m2_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vwsubu_vx_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vwsubu_wv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwsubu_wv_u16m2_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vwsubu_wv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vwsubu_wx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_u16m2_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vwsubu_wx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vwsubu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwsubu_vv_u16m4_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vwsubu_vv_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vwsubu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_u16m4_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vwsubu_vx_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vwsubu_wv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwsubu_wv_u16m4_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vwsubu_wv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vwsubu_wx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_u16m4_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vwsubu_wx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vwsubu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwsubu_vv_u16m8_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vwsubu_vv_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vwsubu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_u16m8_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vwsubu_vx_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vwsubu_wv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwsubu_wv_u16m8_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vwsubu_wv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vwsubu_wx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_u16m8_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vwsubu_wx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vwsubu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwsubu_vv_u32mf2_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vwsubu_vv_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vwsubu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_u32mf2_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vwsubu_vx_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vwsubu_wv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwsubu_wv_u32mf2_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vwsubu_wv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vwsubu_wx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_u32mf2_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vwsubu_wx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vwsubu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwsubu_vv_u32m1_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vwsubu_vv_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vwsubu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_u32m1_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vwsubu_vx_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vwsubu_wv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwsubu_wv_u32m1_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vwsubu_wv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vwsubu_wx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_u32m1_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vwsubu_wx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vwsubu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwsubu_vv_u32m2_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vwsubu_vv_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vwsubu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_u32m2_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vwsubu_vx_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vwsubu_wv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwsubu_wv_u32m2_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vwsubu_wv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vwsubu_wx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_u32m2_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vwsubu_wx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vwsubu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwsubu_vv_u32m4_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vwsubu_vv_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vwsubu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_u32m4_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vwsubu_vx_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vwsubu_wv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwsubu_wv_u32m4_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vwsubu_wv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vwsubu_wx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_u32m4_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vwsubu_wx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vwsubu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwsubu_vv_u32m8_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vwsubu_vv_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vwsubu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_u32m8_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vwsubu_vx_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vwsubu_wv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwsubu_wv_u32m8_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vwsubu_wv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vwsubu_wx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_u32m8_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vwsubu_wx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vwsubu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwsubu_vv_u64m1_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vwsubu_vv_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vwsubu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx_u64m1_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vwsubu_vx_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vwsubu_wv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwsubu_wv_u64m1_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vwsubu_wv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vwsubu_wx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx_u64m1_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vwsubu_wx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vwsubu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwsubu_vv_u64m2_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vwsubu_vv_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vwsubu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx_u64m2_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vwsubu_vx_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vwsubu_wv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwsubu_wv_u64m2_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vwsubu_wv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vwsubu_wx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx_u64m2_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vwsubu_wx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vwsubu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwsubu_vv_u64m4_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vwsubu_vv_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vwsubu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx_u64m4_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vwsubu_vx_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vwsubu_wv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwsubu_wv_u64m4_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vwsubu_wv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vwsubu_wx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx_u64m4_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vwsubu_wx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vwsubu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwsubu_vv_u64m8_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vwsubu_vv_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vwsubu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx_u64m8_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vwsubu_vx_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u64m8_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vwsubu_wv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwsubu_wv_u64m8_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vwsubu_wv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vwsubu_wx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx_u64m8_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vwsubu_wx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u64m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vwsubu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwsubu_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwsubu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwsubu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwsubu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vwsubu_wv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwsubu_wv_u16mf4_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwsubu_wv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwsubu_wx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_u16mf4_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwsubu_wx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwsubu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwsubu_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwsubu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwsubu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwsubu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwsubu_wv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwsubu_wv_u16mf2_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwsubu_wv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwsubu_wx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_u16mf2_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwsubu_wx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwsubu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwsubu_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwsubu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwsubu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwsubu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwsubu_wv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwsubu_wv_u16m1_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwsubu_wv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwsubu_wx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_u16m1_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwsubu_wx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwsubu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwsubu_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwsubu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwsubu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwsubu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwsubu_wv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwsubu_wv_u16m2_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwsubu_wv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwsubu_wx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_u16m2_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwsubu_wx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwsubu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwsubu_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwsubu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwsubu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwsubu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwsubu_wv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwsubu_wv_u16m4_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwsubu_wv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwsubu_wx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_u16m4_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwsubu_wx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwsubu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwsubu_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwsubu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwsubu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwsubu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwsubu_wv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwsubu_wv_u16m8_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwsubu_wv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwsubu_wx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_u16m8_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwsubu_wx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwsubu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwsubu_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwsubu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwsubu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwsubu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwsubu_wv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwsubu_wv_u32mf2_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwsubu_wv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwsubu_wx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_u32mf2_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwsubu_wx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwsubu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwsubu_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwsubu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwsubu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwsubu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwsubu_wv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwsubu_wv_u32m1_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwsubu_wv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwsubu_wx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_u32m1_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwsubu_wx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwsubu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwsubu_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwsubu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwsubu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwsubu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwsubu_wv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwsubu_wv_u32m2_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwsubu_wv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwsubu_wx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_u32m2_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwsubu_wx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwsubu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwsubu_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwsubu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwsubu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwsubu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwsubu_wv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwsubu_wv_u32m4_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwsubu_wv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwsubu_wx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_u32m4_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwsubu_wx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwsubu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwsubu_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwsubu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwsubu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwsubu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwsubu_wv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwsubu_wv_u32m8_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwsubu_wv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwsubu_wx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_u32m8_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwsubu_wx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwsubu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwsubu_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwsubu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwsubu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwsubu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwsubu_wv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwsubu_wv_u64m1_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwsubu_wv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwsubu_wx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx_u64m1_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwsubu_wx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwsubu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwsubu_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwsubu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwsubu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwsubu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwsubu_wv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwsubu_wv_u64m2_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwsubu_wv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwsubu_wx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx_u64m2_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwsubu_wx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwsubu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwsubu_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwsubu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwsubu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwsubu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwsubu_wv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwsubu_wv_u64m4_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwsubu_wv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwsubu_wx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx_u64m4_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwsubu_wx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwsubu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwsubu_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwsubu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwsubu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwsubu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwsubu_wv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwsubu_wv_u64m8_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwsubu_wv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwsubu_wx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx_u64m8_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwsubu_wx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vwsubu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwsubu_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwsubu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwsubu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwsubu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vwsubu_wv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwsubu_wv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwsubu_wv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwsubu_wx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwsubu_wx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwsubu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwsubu_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwsubu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwsubu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwsubu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwsubu_wv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwsubu_wv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwsubu_wv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwsubu_wx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwsubu_wx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwsubu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwsubu_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwsubu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwsubu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwsubu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwsubu_wv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwsubu_wv_u16m1_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwsubu_wv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwsubu_wx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_u16m1_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwsubu_wx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwsubu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwsubu_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwsubu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwsubu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwsubu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwsubu_wv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwsubu_wv_u16m2_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwsubu_wv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwsubu_wx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_u16m2_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwsubu_wx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwsubu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwsubu_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwsubu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwsubu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwsubu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwsubu_wv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwsubu_wv_u16m4_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwsubu_wv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwsubu_wx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_u16m4_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwsubu_wx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwsubu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwsubu_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwsubu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwsubu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwsubu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwsubu_wv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwsubu_wv_u16m8_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwsubu_wv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwsubu_wx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_u16m8_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwsubu_wx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwsubu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwsubu_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwsubu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwsubu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwsubu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwsubu_wv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwsubu_wv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwsubu_wv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwsubu_wx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwsubu_wx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwsubu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwsubu_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwsubu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwsubu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwsubu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwsubu_wv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwsubu_wv_u32m1_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwsubu_wv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwsubu_wx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_u32m1_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwsubu_wx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwsubu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwsubu_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwsubu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwsubu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwsubu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwsubu_wv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwsubu_wv_u32m2_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwsubu_wv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwsubu_wx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_u32m2_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwsubu_wx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwsubu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwsubu_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwsubu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwsubu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwsubu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwsubu_wv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwsubu_wv_u32m4_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwsubu_wv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwsubu_wx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_u32m4_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwsubu_wx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwsubu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwsubu_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwsubu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwsubu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwsubu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwsubu_wv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwsubu_wv_u32m8_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwsubu_wv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwsubu_wx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_u32m8_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwsubu_wx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwsubu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwsubu_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwsubu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwsubu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwsubu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwsubu_wv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwsubu_wv_u64m1_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwsubu_wv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwsubu_wx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx_u64m1_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwsubu_wx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwsubu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwsubu_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwsubu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwsubu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwsubu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwsubu_wv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwsubu_wv_u64m2_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwsubu_wv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwsubu_wx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx_u64m2_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwsubu_wx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwsubu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwsubu_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwsubu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwsubu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwsubu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwsubu_wv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwsubu_wv_u64m4_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwsubu_wv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwsubu_wx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx_u64m4_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwsubu_wx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwsubu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwsubu_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwsubu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwsubu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwsubu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwsubu_wv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwsubu_wv_u64m8_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwsubu_wv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwsubu_wx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx_u64m8_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwsubu_wx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vwsubu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwsubu_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwsubu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwsubu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwsubu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vwsubu_wv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwsubu_wv_u16mf4_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwsubu_wv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwsubu_wx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_u16mf4_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwsubu_wx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwsubu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwsubu_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwsubu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwsubu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwsubu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwsubu_wv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwsubu_wv_u16mf2_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwsubu_wv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwsubu_wx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_u16mf2_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwsubu_wx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwsubu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwsubu_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwsubu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwsubu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwsubu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwsubu_wv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwsubu_wv_u16m1_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwsubu_wv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwsubu_wx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_u16m1_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwsubu_wx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwsubu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwsubu_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwsubu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwsubu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwsubu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwsubu_wv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwsubu_wv_u16m2_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwsubu_wv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwsubu_wx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_u16m2_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwsubu_wx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwsubu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwsubu_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwsubu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwsubu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwsubu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwsubu_wv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwsubu_wv_u16m4_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwsubu_wv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwsubu_wx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_u16m4_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwsubu_wx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwsubu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwsubu_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwsubu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwsubu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwsubu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwsubu_wv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwsubu_wv_u16m8_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwsubu_wv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwsubu_wx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_u16m8_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwsubu_wx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwsubu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwsubu_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwsubu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwsubu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwsubu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwsubu_wv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwsubu_wv_u32mf2_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwsubu_wv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwsubu_wx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_u32mf2_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwsubu_wx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwsubu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwsubu_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwsubu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwsubu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwsubu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwsubu_wv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwsubu_wv_u32m1_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwsubu_wv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwsubu_wx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_u32m1_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwsubu_wx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwsubu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwsubu_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwsubu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwsubu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwsubu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwsubu_wv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwsubu_wv_u32m2_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwsubu_wv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwsubu_wx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_u32m2_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwsubu_wx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwsubu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwsubu_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwsubu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwsubu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwsubu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwsubu_wv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwsubu_wv_u32m4_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwsubu_wv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwsubu_wx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_u32m4_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwsubu_wx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwsubu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwsubu_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwsubu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwsubu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwsubu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwsubu_wv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwsubu_wv_u32m8_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwsubu_wv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwsubu_wx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_u32m8_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwsubu_wx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwsubu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwsubu_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwsubu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwsubu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwsubu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwsubu_wv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwsubu_wv_u64m1_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwsubu_wv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwsubu_wx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx_u64m1_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwsubu_wx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwsubu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwsubu_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwsubu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwsubu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwsubu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwsubu_wv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwsubu_wv_u64m2_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwsubu_wv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwsubu_wx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx_u64m2_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwsubu_wx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwsubu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwsubu_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwsubu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwsubu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwsubu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwsubu_wv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwsubu_wv_u64m4_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwsubu_wv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwsubu_wx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx_u64m4_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwsubu_wx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwsubu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwsubu_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwsubu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwsubu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwsubu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwsubu_wv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwsubu_wv_u64m8_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwsubu_wv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwsubu_wx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx_u64m8_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwsubu_wx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx_u64m8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+v[w]?sub[u]?\.[ivxfswum.]+\s+} 240 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vxor.c b/auto-generated/policy_funcs/gnu-api-tests/vxor.c index e3e135245..2972095ce 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vxor.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vxor.c @@ -6,1412 +6,1412 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vxor_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vxor_vv_i8mf8_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vxor_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vxor_vv_i8mf8_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vxor_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_vx_i8mf8_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vxor_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_vx_i8mf8_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vxor_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vxor_vv_i8mf4_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vxor_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vxor_vv_i8mf4_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vxor_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_vx_i8mf4_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vxor_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_vx_i8mf4_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vxor_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vxor_vv_i8mf2_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vxor_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vxor_vv_i8mf2_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vxor_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_vx_i8mf2_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vxor_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_vx_i8mf2_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vxor_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vxor_vv_i8m1_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vxor_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vxor_vv_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vxor_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_vx_i8m1_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vxor_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_vx_i8m1_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vxor_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vxor_vv_i8m2_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vxor_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vxor_vv_i8m2_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vxor_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_vx_i8m2_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vxor_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_vx_i8m2_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vxor_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vxor_vv_i8m4_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vxor_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vxor_vv_i8m4_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vxor_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_vx_i8m4_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vxor_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_vx_i8m4_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vxor_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vxor_vv_i8m8_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vxor_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vxor_vv_i8m8_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vxor_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_vx_i8m8_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vxor_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_vx_i8m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vxor_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vxor_vv_i16mf4_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vxor_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vxor_vv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vxor_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_vx_i16mf4_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vxor_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vxor_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vxor_vv_i16mf2_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vxor_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vxor_vv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vxor_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_vx_i16mf2_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vxor_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vxor_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vxor_vv_i16m1_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vxor_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vxor_vv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vxor_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_vx_i16m1_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vxor_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vxor_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vxor_vv_i16m2_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vxor_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vxor_vv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vxor_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_vx_i16m2_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vxor_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vxor_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vxor_vv_i16m4_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vxor_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vxor_vv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vxor_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_vx_i16m4_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vxor_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vxor_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vxor_vv_i16m8_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vxor_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vxor_vv_i16m8_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vxor_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_vx_i16m8_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vxor_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vxor_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vxor_vv_i32mf2_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vxor_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vxor_vv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vxor_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_vx_i32mf2_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vxor_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vxor_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vxor_vv_i32m1_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vxor_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vxor_vv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vxor_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_vx_i32m1_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vxor_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vxor_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vxor_vv_i32m2_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vxor_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vxor_vv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vxor_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_vx_i32m2_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vxor_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vxor_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vxor_vv_i32m4_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vxor_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vxor_vv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vxor_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_vx_i32m4_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vxor_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vxor_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vxor_vv_i32m8_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vxor_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vxor_vv_i32m8_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vxor_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_vx_i32m8_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vxor_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vxor_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vxor_vv_i64m1_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vxor_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vxor_vv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vxor_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vxor_vx_i64m1_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vxor_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vxor_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vxor_vv_i64m2_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vxor_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vxor_vv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vxor_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vxor_vx_i64m2_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vxor_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vxor_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vxor_vv_i64m4_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vxor_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vxor_vv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vxor_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vxor_vx_i64m4_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vxor_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vxor_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vxor_vv_i64m8_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vxor_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vxor_vv_i64m8_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vxor_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vxor_vx_i64m8_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vxor_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor_vx_i64m8_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vxor_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vxor_vv_u8mf8_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vxor_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vxor_vv_u8mf8_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vxor_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_vx_u8mf8_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vxor_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_vx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vxor_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vxor_vv_u8mf4_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vxor_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vxor_vv_u8mf4_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vxor_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_vx_u8mf4_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vxor_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_vx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vxor_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vxor_vv_u8mf2_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vxor_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vxor_vv_u8mf2_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vxor_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_vx_u8mf2_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vxor_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_vx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vxor_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vxor_vv_u8m1_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vxor_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vxor_vv_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vxor_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_vx_u8m1_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vxor_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_vx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vxor_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vxor_vv_u8m2_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vxor_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vxor_vv_u8m2_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vxor_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_vx_u8m2_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vxor_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_vx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vxor_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vxor_vv_u8m4_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vxor_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vxor_vv_u8m4_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vxor_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_vx_u8m4_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vxor_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_vx_u8m4_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vxor_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vxor_vv_u8m8_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vxor_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vxor_vv_u8m8_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vxor_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_vx_u8m8_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vxor_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_vx_u8m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vxor_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vxor_vv_u16mf4_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vxor_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vxor_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vxor_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_vx_u16mf4_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vxor_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vxor_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vxor_vv_u16mf2_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vxor_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vxor_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vxor_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_vx_u16mf2_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vxor_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vxor_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vxor_vv_u16m1_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vxor_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vxor_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vxor_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_vx_u16m1_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vxor_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vxor_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vxor_vv_u16m2_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vxor_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vxor_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vxor_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_vx_u16m2_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vxor_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vxor_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vxor_vv_u16m4_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vxor_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vxor_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vxor_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_vx_u16m4_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vxor_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vxor_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vxor_vv_u16m8_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vxor_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vxor_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vxor_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_vx_u16m8_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vxor_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vxor_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vxor_vv_u32mf2_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vxor_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vxor_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vxor_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_vx_u32mf2_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vxor_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vxor_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vxor_vv_u32m1_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vxor_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vxor_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vxor_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_vx_u32m1_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vxor_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vxor_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vxor_vv_u32m2_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vxor_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vxor_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vxor_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_vx_u32m2_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vxor_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vxor_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vxor_vv_u32m4_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vxor_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vxor_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vxor_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_vx_u32m4_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vxor_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vxor_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vxor_vv_u32m8_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vxor_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vxor_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vxor_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_vx_u32m8_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vxor_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vxor_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vxor_vv_u64m1_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vxor_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vxor_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vxor_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor_vx_u64m1_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vxor_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vxor_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vxor_vv_u64m2_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vxor_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vxor_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vxor_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor_vx_u64m2_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vxor_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vxor_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vxor_vv_u64m4_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vxor_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vxor_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vxor_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor_vx_u64m4_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vxor_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vxor_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vxor_vv_u64m8_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vxor_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vxor_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vxor_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor_vx_u64m8_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vxor_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor_vx_u64m8_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vxor_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vxor_vv_i8mf8_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vxor_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vxor_vv_i8mf8_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vxor_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_vx_i8mf8_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vxor_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_vx_i8mf8_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vxor_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vxor_vv_i8mf4_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vxor_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vxor_vv_i8mf4_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vxor_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_vx_i8mf4_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vxor_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_vx_i8mf4_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vxor_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vxor_vv_i8mf2_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vxor_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vxor_vv_i8mf2_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vxor_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_vx_i8mf2_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vxor_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_vx_i8mf2_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vxor_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vxor_vv_i8m1_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vxor_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vxor_vv_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vxor_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_vx_i8m1_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vxor_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_vx_i8m1_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vxor_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vxor_vv_i8m2_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vxor_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vxor_vv_i8m2_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vxor_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_vx_i8m2_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vxor_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_vx_i8m2_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vxor_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vxor_vv_i8m4_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vxor_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vxor_vv_i8m4_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vxor_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_vx_i8m4_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vxor_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_vx_i8m4_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vxor_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vxor_vv_i8m8_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vxor_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vxor_vv_i8m8_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vxor_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_vx_i8m8_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vxor_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_vx_i8m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vxor_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vxor_vv_i16mf4_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vxor_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vxor_vv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vxor_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_vx_i16mf4_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vxor_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vxor_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vxor_vv_i16mf2_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vxor_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vxor_vv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vxor_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_vx_i16mf2_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vxor_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vxor_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vxor_vv_i16m1_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vxor_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vxor_vv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vxor_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_vx_i16m1_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vxor_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vxor_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vxor_vv_i16m2_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vxor_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vxor_vv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vxor_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_vx_i16m2_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vxor_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vxor_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vxor_vv_i16m4_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vxor_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vxor_vv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vxor_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_vx_i16m4_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vxor_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vxor_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vxor_vv_i16m8_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vxor_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vxor_vv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vxor_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_vx_i16m8_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vxor_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vxor_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vxor_vv_i32mf2_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vxor_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vxor_vv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vxor_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_vx_i32mf2_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vxor_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vxor_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vxor_vv_i32m1_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vxor_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vxor_vv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vxor_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_vx_i32m1_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vxor_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vxor_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vxor_vv_i32m2_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vxor_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vxor_vv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vxor_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_vx_i32m2_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vxor_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vxor_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vxor_vv_i32m4_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vxor_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vxor_vv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vxor_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_vx_i32m4_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vxor_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vxor_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vxor_vv_i32m8_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vxor_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vxor_vv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vxor_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_vx_i32m8_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vxor_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vxor_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vxor_vv_i64m1_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vxor_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vxor_vv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vxor_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vxor_vx_i64m1_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vxor_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vxor_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vxor_vv_i64m2_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vxor_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vxor_vv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vxor_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vxor_vx_i64m2_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vxor_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vxor_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vxor_vv_i64m4_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vxor_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vxor_vv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vxor_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vxor_vx_i64m4_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vxor_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vxor_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vxor_vv_i64m8_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vxor_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vxor_vv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vxor_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vxor_vx_i64m8_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vxor_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vxor_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vxor_vv_u8mf8_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vxor_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vxor_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vxor_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_vx_u8mf8_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vxor_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vxor_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vxor_vv_u8mf4_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vxor_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vxor_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vxor_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_vx_u8mf4_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vxor_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vxor_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vxor_vv_u8mf2_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vxor_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vxor_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vxor_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_vx_u8mf2_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vxor_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vxor_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vxor_vv_u8m1_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vxor_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vxor_vv_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vxor_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_vx_u8m1_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vxor_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_vx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vxor_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vxor_vv_u8m2_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vxor_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vxor_vv_u8m2_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vxor_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_vx_u8m2_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vxor_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_vx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vxor_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vxor_vv_u8m4_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vxor_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vxor_vv_u8m4_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vxor_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_vx_u8m4_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vxor_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_vx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vxor_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vxor_vv_u8m8_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vxor_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vxor_vv_u8m8_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vxor_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_vx_u8m8_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vxor_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_vx_u8m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vxor_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vxor_vv_u16mf4_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vxor_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vxor_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vxor_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_vx_u16mf4_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vxor_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vxor_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vxor_vv_u16mf2_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vxor_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vxor_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vxor_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_vx_u16mf2_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vxor_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vxor_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vxor_vv_u16m1_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vxor_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vxor_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vxor_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_vx_u16m1_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vxor_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vxor_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vxor_vv_u16m2_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vxor_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vxor_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vxor_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_vx_u16m2_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vxor_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vxor_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vxor_vv_u16m4_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vxor_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vxor_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vxor_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_vx_u16m4_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vxor_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vxor_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vxor_vv_u16m8_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vxor_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vxor_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vxor_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_vx_u16m8_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vxor_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vxor_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vxor_vv_u32mf2_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vxor_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vxor_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vxor_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_vx_u32mf2_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vxor_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vxor_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vxor_vv_u32m1_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vxor_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vxor_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vxor_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_vx_u32m1_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vxor_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vxor_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vxor_vv_u32m2_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vxor_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vxor_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vxor_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_vx_u32m2_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vxor_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vxor_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vxor_vv_u32m4_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vxor_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vxor_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vxor_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_vx_u32m4_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vxor_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vxor_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vxor_vv_u32m8_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vxor_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vxor_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vxor_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_vx_u32m8_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vxor_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vxor_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vxor_vv_u64m1_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vxor_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vxor_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vxor_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor_vx_u64m1_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vxor_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vxor_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vxor_vv_u64m2_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vxor_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vxor_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vxor_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor_vx_u64m2_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vxor_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vxor_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vxor_vv_u64m4_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vxor_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vxor_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vxor_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor_vx_u64m4_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vxor_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vxor_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vxor_vv_u64m8_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vxor_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vxor_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vxor_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor_vx_u64m8_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vxor_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vxor_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vxor_vv_i8mf8_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vxor_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vxor_vv_i8mf8_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vxor_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_vx_i8mf8_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vxor_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_vx_i8mf8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vxor_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vxor_vv_i8mf4_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vxor_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vxor_vv_i8mf4_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vxor_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_vx_i8mf4_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vxor_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_vx_i8mf4_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vxor_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vxor_vv_i8mf2_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vxor_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vxor_vv_i8mf2_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vxor_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_vx_i8mf2_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vxor_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_vx_i8mf2_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vxor_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vxor_vv_i8m1_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vxor_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vxor_vv_i8m1_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vxor_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_vx_i8m1_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vxor_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_vx_i8m1_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vxor_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vxor_vv_i8m2_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vxor_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vxor_vv_i8m2_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vxor_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_vx_i8m2_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vxor_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_vx_i8m2_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vxor_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vxor_vv_i8m4_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vxor_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vxor_vv_i8m4_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vxor_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_vx_i8m4_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vxor_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_vx_i8m4_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vxor_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vxor_vv_i8m8_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vxor_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vxor_vv_i8m8_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vxor_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_vx_i8m8_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vxor_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_vx_i8m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vxor_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vxor_vv_i16mf4_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vxor_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vxor_vv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vxor_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_vx_i16mf4_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vxor_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vxor_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vxor_vv_i16mf2_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vxor_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vxor_vv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vxor_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_vx_i16mf2_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vxor_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vxor_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vxor_vv_i16m1_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vxor_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vxor_vv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vxor_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_vx_i16m1_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vxor_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vxor_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vxor_vv_i16m2_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vxor_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vxor_vv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vxor_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_vx_i16m2_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vxor_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vxor_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vxor_vv_i16m4_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vxor_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vxor_vv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vxor_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_vx_i16m4_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vxor_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vxor_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vxor_vv_i16m8_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vxor_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vxor_vv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vxor_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_vx_i16m8_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vxor_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vxor_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vxor_vv_i32mf2_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vxor_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vxor_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vxor_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_vx_i32mf2_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vxor_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vxor_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vxor_vv_i32m1_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vxor_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vxor_vv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vxor_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_vx_i32m1_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vxor_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vxor_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vxor_vv_i32m2_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vxor_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vxor_vv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vxor_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_vx_i32m2_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vxor_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vxor_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vxor_vv_i32m4_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vxor_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vxor_vv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vxor_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_vx_i32m4_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vxor_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vxor_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vxor_vv_i32m8_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vxor_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vxor_vv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vxor_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_vx_i32m8_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vxor_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vxor_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vxor_vv_i64m1_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vxor_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vxor_vv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vxor_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vxor_vx_i64m1_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vxor_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vxor_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vxor_vv_i64m2_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vxor_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vxor_vv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vxor_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vxor_vx_i64m2_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vxor_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vxor_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vxor_vv_i64m4_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vxor_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vxor_vv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vxor_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vxor_vx_i64m4_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vxor_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vxor_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vxor_vv_i64m8_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vxor_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vxor_vv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vxor_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vxor_vx_i64m8_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vxor_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vxor_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vxor_vv_u8mf8_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vxor_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vxor_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vxor_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_vx_u8mf8_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vxor_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vxor_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vxor_vv_u8mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vxor_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vxor_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vxor_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_vx_u8mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vxor_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vxor_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vxor_vv_u8mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vxor_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vxor_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vxor_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_vx_u8mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vxor_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vxor_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vxor_vv_u8m1_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vxor_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vxor_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vxor_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_vx_u8m1_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vxor_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vxor_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vxor_vv_u8m2_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vxor_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vxor_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vxor_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_vx_u8m2_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vxor_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vxor_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vxor_vv_u8m4_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vxor_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vxor_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vxor_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_vx_u8m4_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vxor_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vxor_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vxor_vv_u8m8_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vxor_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vxor_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vxor_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_vx_u8m8_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vxor_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vxor_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vxor_vv_u16mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vxor_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vxor_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vxor_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_vx_u16mf4_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vxor_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vxor_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vxor_vv_u16mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vxor_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vxor_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vxor_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_vx_u16mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vxor_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vxor_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vxor_vv_u16m1_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vxor_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vxor_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vxor_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_vx_u16m1_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vxor_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vxor_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vxor_vv_u16m2_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vxor_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vxor_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vxor_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_vx_u16m2_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vxor_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vxor_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vxor_vv_u16m4_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vxor_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vxor_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vxor_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_vx_u16m4_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vxor_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vxor_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vxor_vv_u16m8_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vxor_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vxor_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vxor_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_vx_u16m8_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vxor_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vxor_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vxor_vv_u32mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vxor_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vxor_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vxor_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_vx_u32mf2_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vxor_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vxor_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vxor_vv_u32m1_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vxor_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vxor_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vxor_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_vx_u32m1_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vxor_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vxor_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vxor_vv_u32m2_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vxor_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vxor_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vxor_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_vx_u32m2_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vxor_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vxor_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vxor_vv_u32m4_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vxor_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vxor_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vxor_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_vx_u32m4_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vxor_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vxor_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vxor_vv_u32m8_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vxor_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vxor_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vxor_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_vx_u32m8_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vxor_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vxor_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vxor_vv_u64m1_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vxor_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vxor_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vxor_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor_vx_u64m1_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vxor_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vxor_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vxor_vv_u64m2_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vxor_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vxor_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vxor_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor_vx_u64m2_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vxor_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vxor_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vxor_vv_u64m4_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vxor_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vxor_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vxor_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor_vx_u64m4_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vxor_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vxor_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vxor_vv_u64m8_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vxor_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vxor_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vxor_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor_vx_u64m8_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vxor_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vxor_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vxor_vv_i8mf8_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vxor_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vxor_vv_i8mf8_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vxor_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_vx_i8mf8_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vxor_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_vx_i8mf8_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vxor_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vxor_vv_i8mf4_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vxor_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vxor_vv_i8mf4_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vxor_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_vx_i8mf4_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vxor_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_vx_i8mf4_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vxor_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vxor_vv_i8mf2_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vxor_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vxor_vv_i8mf2_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vxor_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_vx_i8mf2_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vxor_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_vx_i8mf2_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vxor_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vxor_vv_i8m1_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vxor_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vxor_vv_i8m1_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vxor_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_vx_i8m1_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vxor_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_vx_i8m1_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vxor_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vxor_vv_i8m2_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vxor_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vxor_vv_i8m2_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vxor_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_vx_i8m2_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vxor_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_vx_i8m2_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vxor_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vxor_vv_i8m4_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vxor_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vxor_vv_i8m4_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vxor_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_vx_i8m4_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vxor_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_vx_i8m4_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vxor_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vxor_vv_i8m8_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vxor_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vxor_vv_i8m8_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vxor_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_vx_i8m8_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vxor_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_vx_i8m8_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vxor_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vxor_vv_i16mf4_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vxor_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vxor_vv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vxor_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_vx_i16mf4_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vxor_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vxor_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vxor_vv_i16mf2_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vxor_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vxor_vv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vxor_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_vx_i16mf2_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vxor_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vxor_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vxor_vv_i16m1_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vxor_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vxor_vv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vxor_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_vx_i16m1_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vxor_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vxor_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vxor_vv_i16m2_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vxor_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vxor_vv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vxor_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_vx_i16m2_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vxor_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vxor_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vxor_vv_i16m4_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vxor_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vxor_vv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vxor_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_vx_i16m4_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vxor_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vxor_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vxor_vv_i16m8_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vxor_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vxor_vv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vxor_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_vx_i16m8_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vxor_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vxor_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vxor_vv_i32mf2_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vxor_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vxor_vv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vxor_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_vx_i32mf2_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vxor_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vxor_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vxor_vv_i32m1_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vxor_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vxor_vv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vxor_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_vx_i32m1_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vxor_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vxor_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vxor_vv_i32m2_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vxor_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vxor_vv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vxor_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_vx_i32m2_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vxor_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vxor_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vxor_vv_i32m4_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vxor_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vxor_vv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vxor_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_vx_i32m4_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vxor_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vxor_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vxor_vv_i32m8_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vxor_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vxor_vv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vxor_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_vx_i32m8_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vxor_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vxor_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vxor_vv_i64m1_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vxor_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vxor_vv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vxor_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vxor_vx_i64m1_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vxor_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vxor_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vxor_vv_i64m2_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vxor_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vxor_vv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vxor_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vxor_vx_i64m2_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vxor_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vxor_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vxor_vv_i64m4_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vxor_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vxor_vv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vxor_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vxor_vx_i64m4_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vxor_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vxor_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vxor_vv_i64m8_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vxor_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vxor_vv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vxor_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vxor_vx_i64m8_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vxor_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vxor_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vxor_vv_u8mf8_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vxor_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vxor_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vxor_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_vx_u8mf8_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vxor_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vxor_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vxor_vv_u8mf4_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vxor_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vxor_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vxor_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_vx_u8mf4_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vxor_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vxor_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vxor_vv_u8mf2_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vxor_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vxor_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vxor_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_vx_u8mf2_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vxor_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vxor_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vxor_vv_u8m1_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vxor_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vxor_vv_u8m1_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vxor_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_vx_u8m1_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vxor_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_vx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vxor_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vxor_vv_u8m2_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vxor_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vxor_vv_u8m2_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vxor_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_vx_u8m2_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vxor_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_vx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vxor_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vxor_vv_u8m4_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vxor_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vxor_vv_u8m4_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vxor_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_vx_u8m4_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vxor_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_vx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vxor_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vxor_vv_u8m8_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vxor_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vxor_vv_u8m8_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vxor_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_vx_u8m8_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vxor_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_vx_u8m8_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vxor_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vxor_vv_u16mf4_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vxor_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vxor_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vxor_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_vx_u16mf4_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vxor_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vxor_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vxor_vv_u16mf2_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vxor_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vxor_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vxor_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_vx_u16mf2_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vxor_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vxor_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vxor_vv_u16m1_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vxor_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vxor_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vxor_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_vx_u16m1_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vxor_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vxor_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vxor_vv_u16m2_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vxor_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vxor_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vxor_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_vx_u16m2_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vxor_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vxor_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vxor_vv_u16m4_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vxor_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vxor_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vxor_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_vx_u16m4_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vxor_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vxor_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vxor_vv_u16m8_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vxor_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vxor_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vxor_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_vx_u16m8_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vxor_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vxor_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vxor_vv_u32mf2_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vxor_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vxor_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vxor_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_vx_u32mf2_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vxor_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vxor_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vxor_vv_u32m1_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vxor_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vxor_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vxor_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_vx_u32m1_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vxor_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vxor_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vxor_vv_u32m2_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vxor_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vxor_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vxor_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_vx_u32m2_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vxor_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vxor_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vxor_vv_u32m4_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vxor_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vxor_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vxor_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_vx_u32m4_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vxor_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vxor_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vxor_vv_u32m8_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vxor_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vxor_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vxor_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_vx_u32m8_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vxor_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vxor_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vxor_vv_u64m1_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vxor_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vxor_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vxor_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor_vx_u64m1_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vxor_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vxor_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vxor_vv_u64m2_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vxor_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vxor_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vxor_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor_vx_u64m2_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vxor_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vxor_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vxor_vv_u64m4_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vxor_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vxor_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vxor_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor_vx_u64m4_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vxor_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vxor_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vxor_vv_u64m8_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vxor_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vxor_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vxor_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor_vx_u64m8_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vxor_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vxor\.[ivxfswum.]+\s+} 352 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vzext_vf2.c b/auto-generated/policy_funcs/gnu-api-tests/vzext_vf2.c index f13bad1b0..bbc9527c2 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vzext_vf2.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vzext_vf2.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint16mf4_t test_vzext_vf2_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf2_u16mf4_tu(maskedoff, op1, vl); +vuint16mf4_t test_vzext_vf2_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vzext_vf2_u16mf4_tu(vd, vs2, vl); } -vuint16mf2_t test_vzext_vf2_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf2_u16mf2_tu(maskedoff, op1, vl); +vuint16mf2_t test_vzext_vf2_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vzext_vf2_u16mf2_tu(vd, vs2, vl); } -vuint16m1_t test_vzext_vf2_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf2_u16m1_tu(maskedoff, op1, vl); +vuint16m1_t test_vzext_vf2_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vzext_vf2_u16m1_tu(vd, vs2, vl); } -vuint16m2_t test_vzext_vf2_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf2_u16m2_tu(maskedoff, op1, vl); +vuint16m2_t test_vzext_vf2_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vzext_vf2_u16m2_tu(vd, vs2, vl); } -vuint16m4_t test_vzext_vf2_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t op1, size_t vl) { - return __riscv_vzext_vf2_u16m4_tu(maskedoff, op1, vl); +vuint16m4_t test_vzext_vf2_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vzext_vf2_u16m4_tu(vd, vs2, vl); } -vuint16m8_t test_vzext_vf2_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t op1, size_t vl) { - return __riscv_vzext_vf2_u16m8_tu(maskedoff, op1, vl); +vuint16m8_t test_vzext_vf2_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vzext_vf2_u16m8_tu(vd, vs2, vl); } -vuint32mf2_t test_vzext_vf2_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t op1, size_t vl) { - return __riscv_vzext_vf2_u32mf2_tu(maskedoff, op1, vl); +vuint32mf2_t test_vzext_vf2_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vzext_vf2_u32mf2_tu(vd, vs2, vl); } -vuint32m1_t test_vzext_vf2_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t op1, size_t vl) { - return __riscv_vzext_vf2_u32m1_tu(maskedoff, op1, vl); +vuint32m1_t test_vzext_vf2_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vzext_vf2_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vzext_vf2_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t op1, size_t vl) { - return __riscv_vzext_vf2_u32m2_tu(maskedoff, op1, vl); +vuint32m2_t test_vzext_vf2_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vzext_vf2_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vzext_vf2_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t op1, size_t vl) { - return __riscv_vzext_vf2_u32m4_tu(maskedoff, op1, vl); +vuint32m4_t test_vzext_vf2_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vzext_vf2_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vzext_vf2_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t op1, size_t vl) { - return __riscv_vzext_vf2_u32m8_tu(maskedoff, op1, vl); +vuint32m8_t test_vzext_vf2_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vzext_vf2_u32m8_tu(vd, vs2, vl); } -vuint64m1_t test_vzext_vf2_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t op1, size_t vl) { - return __riscv_vzext_vf2_u64m1_tu(maskedoff, op1, vl); +vuint64m1_t test_vzext_vf2_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vzext_vf2_u64m1_tu(vd, vs2, vl); } -vuint64m2_t test_vzext_vf2_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t op1, size_t vl) { - return __riscv_vzext_vf2_u64m2_tu(maskedoff, op1, vl); +vuint64m2_t test_vzext_vf2_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vzext_vf2_u64m2_tu(vd, vs2, vl); } -vuint64m4_t test_vzext_vf2_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t op1, size_t vl) { - return __riscv_vzext_vf2_u64m4_tu(maskedoff, op1, vl); +vuint64m4_t test_vzext_vf2_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vzext_vf2_u64m4_tu(vd, vs2, vl); } -vuint64m8_t test_vzext_vf2_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t op1, size_t vl) { - return __riscv_vzext_vf2_u64m8_tu(maskedoff, op1, vl); +vuint64m8_t test_vzext_vf2_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vzext_vf2_u64m8_tu(vd, vs2, vl); } -vuint16mf4_t test_vzext_vf2_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf2_u16mf4_tum(mask, maskedoff, op1, vl); +vuint16mf4_t test_vzext_vf2_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vzext_vf2_u16mf4_tum(vm, vd, vs2, vl); } -vuint16mf2_t test_vzext_vf2_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf2_u16mf2_tum(mask, maskedoff, op1, vl); +vuint16mf2_t test_vzext_vf2_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vzext_vf2_u16mf2_tum(vm, vd, vs2, vl); } -vuint16m1_t test_vzext_vf2_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf2_u16m1_tum(mask, maskedoff, op1, vl); +vuint16m1_t test_vzext_vf2_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vzext_vf2_u16m1_tum(vm, vd, vs2, vl); } -vuint16m2_t test_vzext_vf2_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf2_u16m2_tum(mask, maskedoff, op1, vl); +vuint16m2_t test_vzext_vf2_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vzext_vf2_u16m2_tum(vm, vd, vs2, vl); } -vuint16m4_t test_vzext_vf2_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, size_t vl) { - return __riscv_vzext_vf2_u16m4_tum(mask, maskedoff, op1, vl); +vuint16m4_t test_vzext_vf2_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vzext_vf2_u16m4_tum(vm, vd, vs2, vl); } -vuint16m8_t test_vzext_vf2_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, size_t vl) { - return __riscv_vzext_vf2_u16m8_tum(mask, maskedoff, op1, vl); +vuint16m8_t test_vzext_vf2_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vzext_vf2_u16m8_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_vzext_vf2_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, size_t vl) { - return __riscv_vzext_vf2_u32mf2_tum(mask, maskedoff, op1, vl); +vuint32mf2_t test_vzext_vf2_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vzext_vf2_u32mf2_tum(vm, vd, vs2, vl); } -vuint32m1_t test_vzext_vf2_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, size_t vl) { - return __riscv_vzext_vf2_u32m1_tum(mask, maskedoff, op1, vl); +vuint32m1_t test_vzext_vf2_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vzext_vf2_u32m1_tum(vm, vd, vs2, vl); } -vuint32m2_t test_vzext_vf2_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, size_t vl) { - return __riscv_vzext_vf2_u32m2_tum(mask, maskedoff, op1, vl); +vuint32m2_t test_vzext_vf2_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vzext_vf2_u32m2_tum(vm, vd, vs2, vl); } -vuint32m4_t test_vzext_vf2_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, size_t vl) { - return __riscv_vzext_vf2_u32m4_tum(mask, maskedoff, op1, vl); +vuint32m4_t test_vzext_vf2_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vzext_vf2_u32m4_tum(vm, vd, vs2, vl); } -vuint32m8_t test_vzext_vf2_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, size_t vl) { - return __riscv_vzext_vf2_u32m8_tum(mask, maskedoff, op1, vl); +vuint32m8_t test_vzext_vf2_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vzext_vf2_u32m8_tum(vm, vd, vs2, vl); } -vuint64m1_t test_vzext_vf2_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, size_t vl) { - return __riscv_vzext_vf2_u64m1_tum(mask, maskedoff, op1, vl); +vuint64m1_t test_vzext_vf2_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vzext_vf2_u64m1_tum(vm, vd, vs2, vl); } -vuint64m2_t test_vzext_vf2_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, size_t vl) { - return __riscv_vzext_vf2_u64m2_tum(mask, maskedoff, op1, vl); +vuint64m2_t test_vzext_vf2_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vzext_vf2_u64m2_tum(vm, vd, vs2, vl); } -vuint64m4_t test_vzext_vf2_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, size_t vl) { - return __riscv_vzext_vf2_u64m4_tum(mask, maskedoff, op1, vl); +vuint64m4_t test_vzext_vf2_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vzext_vf2_u64m4_tum(vm, vd, vs2, vl); } -vuint64m8_t test_vzext_vf2_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, size_t vl) { - return __riscv_vzext_vf2_u64m8_tum(mask, maskedoff, op1, vl); +vuint64m8_t test_vzext_vf2_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vzext_vf2_u64m8_tum(vm, vd, vs2, vl); } -vuint16mf4_t test_vzext_vf2_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf2_u16mf4_tumu(mask, maskedoff, op1, vl); +vuint16mf4_t test_vzext_vf2_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vzext_vf2_u16mf4_tumu(vm, vd, vs2, vl); } -vuint16mf2_t test_vzext_vf2_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf2_u16mf2_tumu(mask, maskedoff, op1, vl); +vuint16mf2_t test_vzext_vf2_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vzext_vf2_u16mf2_tumu(vm, vd, vs2, vl); } -vuint16m1_t test_vzext_vf2_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf2_u16m1_tumu(mask, maskedoff, op1, vl); +vuint16m1_t test_vzext_vf2_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vzext_vf2_u16m1_tumu(vm, vd, vs2, vl); } -vuint16m2_t test_vzext_vf2_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf2_u16m2_tumu(mask, maskedoff, op1, vl); +vuint16m2_t test_vzext_vf2_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vzext_vf2_u16m2_tumu(vm, vd, vs2, vl); } -vuint16m4_t test_vzext_vf2_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, size_t vl) { - return __riscv_vzext_vf2_u16m4_tumu(mask, maskedoff, op1, vl); +vuint16m4_t test_vzext_vf2_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vzext_vf2_u16m4_tumu(vm, vd, vs2, vl); } -vuint16m8_t test_vzext_vf2_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, size_t vl) { - return __riscv_vzext_vf2_u16m8_tumu(mask, maskedoff, op1, vl); +vuint16m8_t test_vzext_vf2_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vzext_vf2_u16m8_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_vzext_vf2_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, size_t vl) { - return __riscv_vzext_vf2_u32mf2_tumu(mask, maskedoff, op1, vl); +vuint32mf2_t test_vzext_vf2_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vzext_vf2_u32mf2_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_vzext_vf2_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, size_t vl) { - return __riscv_vzext_vf2_u32m1_tumu(mask, maskedoff, op1, vl); +vuint32m1_t test_vzext_vf2_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vzext_vf2_u32m1_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_vzext_vf2_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, size_t vl) { - return __riscv_vzext_vf2_u32m2_tumu(mask, maskedoff, op1, vl); +vuint32m2_t test_vzext_vf2_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vzext_vf2_u32m2_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_vzext_vf2_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, size_t vl) { - return __riscv_vzext_vf2_u32m4_tumu(mask, maskedoff, op1, vl); +vuint32m4_t test_vzext_vf2_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vzext_vf2_u32m4_tumu(vm, vd, vs2, vl); } -vuint32m8_t test_vzext_vf2_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, size_t vl) { - return __riscv_vzext_vf2_u32m8_tumu(mask, maskedoff, op1, vl); +vuint32m8_t test_vzext_vf2_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vzext_vf2_u32m8_tumu(vm, vd, vs2, vl); } -vuint64m1_t test_vzext_vf2_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, size_t vl) { - return __riscv_vzext_vf2_u64m1_tumu(mask, maskedoff, op1, vl); +vuint64m1_t test_vzext_vf2_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vzext_vf2_u64m1_tumu(vm, vd, vs2, vl); } -vuint64m2_t test_vzext_vf2_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, size_t vl) { - return __riscv_vzext_vf2_u64m2_tumu(mask, maskedoff, op1, vl); +vuint64m2_t test_vzext_vf2_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vzext_vf2_u64m2_tumu(vm, vd, vs2, vl); } -vuint64m4_t test_vzext_vf2_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, size_t vl) { - return __riscv_vzext_vf2_u64m4_tumu(mask, maskedoff, op1, vl); +vuint64m4_t test_vzext_vf2_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vzext_vf2_u64m4_tumu(vm, vd, vs2, vl); } -vuint64m8_t test_vzext_vf2_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, size_t vl) { - return __riscv_vzext_vf2_u64m8_tumu(mask, maskedoff, op1, vl); +vuint64m8_t test_vzext_vf2_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vzext_vf2_u64m8_tumu(vm, vd, vs2, vl); } -vuint16mf4_t test_vzext_vf2_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf2_u16mf4_mu(mask, maskedoff, op1, vl); +vuint16mf4_t test_vzext_vf2_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vzext_vf2_u16mf4_mu(vm, vd, vs2, vl); } -vuint16mf2_t test_vzext_vf2_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf2_u16mf2_mu(mask, maskedoff, op1, vl); +vuint16mf2_t test_vzext_vf2_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vzext_vf2_u16mf2_mu(vm, vd, vs2, vl); } -vuint16m1_t test_vzext_vf2_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf2_u16m1_mu(mask, maskedoff, op1, vl); +vuint16m1_t test_vzext_vf2_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vzext_vf2_u16m1_mu(vm, vd, vs2, vl); } -vuint16m2_t test_vzext_vf2_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf2_u16m2_mu(mask, maskedoff, op1, vl); +vuint16m2_t test_vzext_vf2_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vzext_vf2_u16m2_mu(vm, vd, vs2, vl); } -vuint16m4_t test_vzext_vf2_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, size_t vl) { - return __riscv_vzext_vf2_u16m4_mu(mask, maskedoff, op1, vl); +vuint16m4_t test_vzext_vf2_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vzext_vf2_u16m4_mu(vm, vd, vs2, vl); } -vuint16m8_t test_vzext_vf2_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, size_t vl) { - return __riscv_vzext_vf2_u16m8_mu(mask, maskedoff, op1, vl); +vuint16m8_t test_vzext_vf2_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vzext_vf2_u16m8_mu(vm, vd, vs2, vl); } -vuint32mf2_t test_vzext_vf2_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, size_t vl) { - return __riscv_vzext_vf2_u32mf2_mu(mask, maskedoff, op1, vl); +vuint32mf2_t test_vzext_vf2_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vzext_vf2_u32mf2_mu(vm, vd, vs2, vl); } -vuint32m1_t test_vzext_vf2_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, size_t vl) { - return __riscv_vzext_vf2_u32m1_mu(mask, maskedoff, op1, vl); +vuint32m1_t test_vzext_vf2_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vzext_vf2_u32m1_mu(vm, vd, vs2, vl); } -vuint32m2_t test_vzext_vf2_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, size_t vl) { - return __riscv_vzext_vf2_u32m2_mu(mask, maskedoff, op1, vl); +vuint32m2_t test_vzext_vf2_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vzext_vf2_u32m2_mu(vm, vd, vs2, vl); } -vuint32m4_t test_vzext_vf2_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, size_t vl) { - return __riscv_vzext_vf2_u32m4_mu(mask, maskedoff, op1, vl); +vuint32m4_t test_vzext_vf2_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vzext_vf2_u32m4_mu(vm, vd, vs2, vl); } -vuint32m8_t test_vzext_vf2_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, size_t vl) { - return __riscv_vzext_vf2_u32m8_mu(mask, maskedoff, op1, vl); +vuint32m8_t test_vzext_vf2_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vzext_vf2_u32m8_mu(vm, vd, vs2, vl); } -vuint64m1_t test_vzext_vf2_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, size_t vl) { - return __riscv_vzext_vf2_u64m1_mu(mask, maskedoff, op1, vl); +vuint64m1_t test_vzext_vf2_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vzext_vf2_u64m1_mu(vm, vd, vs2, vl); } -vuint64m2_t test_vzext_vf2_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, size_t vl) { - return __riscv_vzext_vf2_u64m2_mu(mask, maskedoff, op1, vl); +vuint64m2_t test_vzext_vf2_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vzext_vf2_u64m2_mu(vm, vd, vs2, vl); } -vuint64m4_t test_vzext_vf2_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, size_t vl) { - return __riscv_vzext_vf2_u64m4_mu(mask, maskedoff, op1, vl); +vuint64m4_t test_vzext_vf2_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vzext_vf2_u64m4_mu(vm, vd, vs2, vl); } -vuint64m8_t test_vzext_vf2_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, size_t vl) { - return __riscv_vzext_vf2_u64m8_mu(mask, maskedoff, op1, vl); +vuint64m8_t test_vzext_vf2_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vzext_vf2_u64m8_mu(vm, vd, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vzext\.vf2[ivxfswum.]*\s+} 60 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vzext_vf4.c b/auto-generated/policy_funcs/gnu-api-tests/vzext_vf4.c index 3e069e27c..804c8236c 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vzext_vf4.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vzext_vf4.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint32mf2_t test_vzext_vf4_u32mf2_tu(vuint32mf2_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf4_u32mf2_tu(maskedoff, op1, vl); +vuint32mf2_t test_vzext_vf4_u32mf2_tu(vuint32mf2_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vzext_vf4_u32mf2_tu(vd, vs2, vl); } -vuint32m1_t test_vzext_vf4_u32m1_tu(vuint32m1_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m1_tu(maskedoff, op1, vl); +vuint32m1_t test_vzext_vf4_u32m1_tu(vuint32m1_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vzext_vf4_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vzext_vf4_u32m2_tu(vuint32m2_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m2_tu(maskedoff, op1, vl); +vuint32m2_t test_vzext_vf4_u32m2_tu(vuint32m2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vzext_vf4_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vzext_vf4_u32m4_tu(vuint32m4_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m4_tu(maskedoff, op1, vl); +vuint32m4_t test_vzext_vf4_u32m4_tu(vuint32m4_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vzext_vf4_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vzext_vf4_u32m8_tu(vuint32m8_t maskedoff, vuint8m2_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m8_tu(maskedoff, op1, vl); +vuint32m8_t test_vzext_vf4_u32m8_tu(vuint32m8_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vzext_vf4_u32m8_tu(vd, vs2, vl); } -vuint64m1_t test_vzext_vf4_u64m1_tu(vuint64m1_t maskedoff, vuint16mf4_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m1_tu(maskedoff, op1, vl); +vuint64m1_t test_vzext_vf4_u64m1_tu(vuint64m1_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vzext_vf4_u64m1_tu(vd, vs2, vl); } -vuint64m2_t test_vzext_vf4_u64m2_tu(vuint64m2_t maskedoff, vuint16mf2_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m2_tu(maskedoff, op1, vl); +vuint64m2_t test_vzext_vf4_u64m2_tu(vuint64m2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vzext_vf4_u64m2_tu(vd, vs2, vl); } -vuint64m4_t test_vzext_vf4_u64m4_tu(vuint64m4_t maskedoff, vuint16m1_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m4_tu(maskedoff, op1, vl); +vuint64m4_t test_vzext_vf4_u64m4_tu(vuint64m4_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vzext_vf4_u64m4_tu(vd, vs2, vl); } -vuint64m8_t test_vzext_vf4_u64m8_tu(vuint64m8_t maskedoff, vuint16m2_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m8_tu(maskedoff, op1, vl); +vuint64m8_t test_vzext_vf4_u64m8_tu(vuint64m8_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vzext_vf4_u64m8_tu(vd, vs2, vl); } -vuint32mf2_t test_vzext_vf4_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf4_u32mf2_tum(mask, maskedoff, op1, vl); +vuint32mf2_t test_vzext_vf4_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vzext_vf4_u32mf2_tum(vm, vd, vs2, vl); } -vuint32m1_t test_vzext_vf4_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m1_tum(mask, maskedoff, op1, vl); +vuint32m1_t test_vzext_vf4_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vzext_vf4_u32m1_tum(vm, vd, vs2, vl); } -vuint32m2_t test_vzext_vf4_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m2_tum(mask, maskedoff, op1, vl); +vuint32m2_t test_vzext_vf4_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vzext_vf4_u32m2_tum(vm, vd, vs2, vl); } -vuint32m4_t test_vzext_vf4_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m4_tum(mask, maskedoff, op1, vl); +vuint32m4_t test_vzext_vf4_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vzext_vf4_u32m4_tum(vm, vd, vs2, vl); } -vuint32m8_t test_vzext_vf4_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint8m2_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m8_tum(mask, maskedoff, op1, vl); +vuint32m8_t test_vzext_vf4_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vzext_vf4_u32m8_tum(vm, vd, vs2, vl); } -vuint64m1_t test_vzext_vf4_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint16mf4_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m1_tum(mask, maskedoff, op1, vl); +vuint64m1_t test_vzext_vf4_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vzext_vf4_u64m1_tum(vm, vd, vs2, vl); } -vuint64m2_t test_vzext_vf4_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint16mf2_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m2_tum(mask, maskedoff, op1, vl); +vuint64m2_t test_vzext_vf4_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vzext_vf4_u64m2_tum(vm, vd, vs2, vl); } -vuint64m4_t test_vzext_vf4_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint16m1_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m4_tum(mask, maskedoff, op1, vl); +vuint64m4_t test_vzext_vf4_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vzext_vf4_u64m4_tum(vm, vd, vs2, vl); } -vuint64m8_t test_vzext_vf4_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint16m2_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m8_tum(mask, maskedoff, op1, vl); +vuint64m8_t test_vzext_vf4_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vzext_vf4_u64m8_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_vzext_vf4_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf4_u32mf2_tumu(mask, maskedoff, op1, vl); +vuint32mf2_t test_vzext_vf4_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vzext_vf4_u32mf2_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_vzext_vf4_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m1_tumu(mask, maskedoff, op1, vl); +vuint32m1_t test_vzext_vf4_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vzext_vf4_u32m1_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_vzext_vf4_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m2_tumu(mask, maskedoff, op1, vl); +vuint32m2_t test_vzext_vf4_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vzext_vf4_u32m2_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_vzext_vf4_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m4_tumu(mask, maskedoff, op1, vl); +vuint32m4_t test_vzext_vf4_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vzext_vf4_u32m4_tumu(vm, vd, vs2, vl); } -vuint32m8_t test_vzext_vf4_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint8m2_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m8_tumu(mask, maskedoff, op1, vl); +vuint32m8_t test_vzext_vf4_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vzext_vf4_u32m8_tumu(vm, vd, vs2, vl); } -vuint64m1_t test_vzext_vf4_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint16mf4_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m1_tumu(mask, maskedoff, op1, vl); +vuint64m1_t test_vzext_vf4_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vzext_vf4_u64m1_tumu(vm, vd, vs2, vl); } -vuint64m2_t test_vzext_vf4_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint16mf2_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m2_tumu(mask, maskedoff, op1, vl); +vuint64m2_t test_vzext_vf4_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vzext_vf4_u64m2_tumu(vm, vd, vs2, vl); } -vuint64m4_t test_vzext_vf4_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint16m1_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m4_tumu(mask, maskedoff, op1, vl); +vuint64m4_t test_vzext_vf4_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vzext_vf4_u64m4_tumu(vm, vd, vs2, vl); } -vuint64m8_t test_vzext_vf4_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint16m2_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m8_tumu(mask, maskedoff, op1, vl); +vuint64m8_t test_vzext_vf4_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vzext_vf4_u64m8_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_vzext_vf4_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf4_u32mf2_mu(mask, maskedoff, op1, vl); +vuint32mf2_t test_vzext_vf4_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vzext_vf4_u32mf2_mu(vm, vd, vs2, vl); } -vuint32m1_t test_vzext_vf4_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m1_mu(mask, maskedoff, op1, vl); +vuint32m1_t test_vzext_vf4_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vzext_vf4_u32m1_mu(vm, vd, vs2, vl); } -vuint32m2_t test_vzext_vf4_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m2_mu(mask, maskedoff, op1, vl); +vuint32m2_t test_vzext_vf4_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vzext_vf4_u32m2_mu(vm, vd, vs2, vl); } -vuint32m4_t test_vzext_vf4_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m4_mu(mask, maskedoff, op1, vl); +vuint32m4_t test_vzext_vf4_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vzext_vf4_u32m4_mu(vm, vd, vs2, vl); } -vuint32m8_t test_vzext_vf4_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint8m2_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m8_mu(mask, maskedoff, op1, vl); +vuint32m8_t test_vzext_vf4_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vzext_vf4_u32m8_mu(vm, vd, vs2, vl); } -vuint64m1_t test_vzext_vf4_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint16mf4_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m1_mu(mask, maskedoff, op1, vl); +vuint64m1_t test_vzext_vf4_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vzext_vf4_u64m1_mu(vm, vd, vs2, vl); } -vuint64m2_t test_vzext_vf4_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint16mf2_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m2_mu(mask, maskedoff, op1, vl); +vuint64m2_t test_vzext_vf4_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vzext_vf4_u64m2_mu(vm, vd, vs2, vl); } -vuint64m4_t test_vzext_vf4_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint16m1_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m4_mu(mask, maskedoff, op1, vl); +vuint64m4_t test_vzext_vf4_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vzext_vf4_u64m4_mu(vm, vd, vs2, vl); } -vuint64m8_t test_vzext_vf4_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint16m2_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m8_mu(mask, maskedoff, op1, vl); +vuint64m8_t test_vzext_vf4_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vzext_vf4_u64m8_mu(vm, vd, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vzext\.vf4[ivxfswum.]*\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-api-tests/vzext_vf8.c b/auto-generated/policy_funcs/gnu-api-tests/vzext_vf8.c index 8acb7c3fb..724456dde 100644 --- a/auto-generated/policy_funcs/gnu-api-tests/vzext_vf8.c +++ b/auto-generated/policy_funcs/gnu-api-tests/vzext_vf8.c @@ -6,68 +6,68 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint64m1_t test_vzext_vf8_u64m1_tu(vuint64m1_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m1_tu(maskedoff, op1, vl); +vuint64m1_t test_vzext_vf8_u64m1_tu(vuint64m1_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vzext_vf8_u64m1_tu(vd, vs2, vl); } -vuint64m2_t test_vzext_vf8_u64m2_tu(vuint64m2_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m2_tu(maskedoff, op1, vl); +vuint64m2_t test_vzext_vf8_u64m2_tu(vuint64m2_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vzext_vf8_u64m2_tu(vd, vs2, vl); } -vuint64m4_t test_vzext_vf8_u64m4_tu(vuint64m4_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m4_tu(maskedoff, op1, vl); +vuint64m4_t test_vzext_vf8_u64m4_tu(vuint64m4_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vzext_vf8_u64m4_tu(vd, vs2, vl); } -vuint64m8_t test_vzext_vf8_u64m8_tu(vuint64m8_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m8_tu(maskedoff, op1, vl); +vuint64m8_t test_vzext_vf8_u64m8_tu(vuint64m8_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vzext_vf8_u64m8_tu(vd, vs2, vl); } -vuint64m1_t test_vzext_vf8_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m1_tum(mask, maskedoff, op1, vl); +vuint64m1_t test_vzext_vf8_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vzext_vf8_u64m1_tum(vm, vd, vs2, vl); } -vuint64m2_t test_vzext_vf8_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m2_tum(mask, maskedoff, op1, vl); +vuint64m2_t test_vzext_vf8_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vzext_vf8_u64m2_tum(vm, vd, vs2, vl); } -vuint64m4_t test_vzext_vf8_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m4_tum(mask, maskedoff, op1, vl); +vuint64m4_t test_vzext_vf8_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vzext_vf8_u64m4_tum(vm, vd, vs2, vl); } -vuint64m8_t test_vzext_vf8_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m8_tum(mask, maskedoff, op1, vl); +vuint64m8_t test_vzext_vf8_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vzext_vf8_u64m8_tum(vm, vd, vs2, vl); } -vuint64m1_t test_vzext_vf8_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m1_tumu(mask, maskedoff, op1, vl); +vuint64m1_t test_vzext_vf8_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vzext_vf8_u64m1_tumu(vm, vd, vs2, vl); } -vuint64m2_t test_vzext_vf8_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m2_tumu(mask, maskedoff, op1, vl); +vuint64m2_t test_vzext_vf8_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vzext_vf8_u64m2_tumu(vm, vd, vs2, vl); } -vuint64m4_t test_vzext_vf8_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m4_tumu(mask, maskedoff, op1, vl); +vuint64m4_t test_vzext_vf8_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vzext_vf8_u64m4_tumu(vm, vd, vs2, vl); } -vuint64m8_t test_vzext_vf8_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m8_tumu(mask, maskedoff, op1, vl); +vuint64m8_t test_vzext_vf8_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vzext_vf8_u64m8_tumu(vm, vd, vs2, vl); } -vuint64m1_t test_vzext_vf8_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m1_mu(mask, maskedoff, op1, vl); +vuint64m1_t test_vzext_vf8_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vzext_vf8_u64m1_mu(vm, vd, vs2, vl); } -vuint64m2_t test_vzext_vf8_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m2_mu(mask, maskedoff, op1, vl); +vuint64m2_t test_vzext_vf8_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vzext_vf8_u64m2_mu(vm, vd, vs2, vl); } -vuint64m4_t test_vzext_vf8_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m4_mu(mask, maskedoff, op1, vl); +vuint64m4_t test_vzext_vf8_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vzext_vf8_u64m4_mu(vm, vd, vs2, vl); } -vuint64m8_t test_vzext_vf8_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m8_mu(mask, maskedoff, op1, vl); +vuint64m8_t test_vzext_vf8_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vzext_vf8_u64m8_mu(vm, vd, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vzext\.vf8[ivxfswum.]*\s+} 16 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vaadd.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vaadd.c index 9829c3adc..e62f06739 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vaadd.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vaadd.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vaadd_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vaadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vaadd_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vaadd_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vaadd_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vaadd_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vaadd_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vaadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vaadd_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vaadd_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vaadd_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vaadd_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vaadd_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vaadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vaadd_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vaadd_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vaadd_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vaadd_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vaadd_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vaadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vaadd_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vaadd_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vaadd_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vaadd_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vaadd_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vaadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vaadd_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vaadd_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vaadd_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vaadd_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vaadd_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vaadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vaadd_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vaadd_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vaadd_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vaadd_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vaadd_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vaadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vaadd_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vaadd_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vaadd_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vaadd_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vaadd_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vaadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vaadd_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vaadd_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vaadd_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vaadd_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vaadd_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vaadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vaadd_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vaadd_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vaadd_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vaadd_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vaadd_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vaadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vaadd_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vaadd_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vaadd_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vaadd_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vaadd_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vaadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vaadd_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vaadd_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vaadd_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vaadd_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vaadd_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vaadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vaadd_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vaadd_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vaadd_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vaadd_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vaadd_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vaadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vaadd_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vaadd_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vaadd_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vaadd_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vaadd_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vaadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vaadd_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vaadd_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vaadd_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vaadd_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vaadd_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vaadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vaadd_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vaadd_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vaadd_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vaadd_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vaadd_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vaadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vaadd_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vaadd_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vaadd_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vaadd_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vaadd_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vaadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vaadd_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vaadd_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vaadd_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vaadd_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vaadd_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vaadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vaadd_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vaadd_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vaadd_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vaadd_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vaadd_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vaadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vaadd_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vaadd_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vaadd_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vaadd_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vaadd_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vaadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vaadd_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vaadd_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vaadd_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vaadd_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vaadd_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vaadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vaadd_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vaadd_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vaadd_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vaadd_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vaadd_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vaadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vaadd_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vaadd_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vaadd_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vaadd_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vaadd_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vaadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vaadd_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vaadd_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vaadd_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vaadd_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vaadd_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vaadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vaadd_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vaadd_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vaadd_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vaadd_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vaadd_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vaadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vaadd_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vaadd_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vaadd_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vaadd_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vaadd_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vaadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vaadd_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vaadd_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vaadd_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vaadd_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vaadd_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vaadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vaadd_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vaadd_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vaadd_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vaadd_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vaadd_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vaadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vaadd_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vaadd_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vaadd_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vaadd_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vaadd_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vaadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vaadd_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vaadd_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vaadd_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vaadd_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vaadd_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vaadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vaadd_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vaadd_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vaadd_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vaadd_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vaadd_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vaadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vaadd_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vaadd_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vaadd_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vaadd_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vaadd_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vaadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vaadd_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vaadd_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vaadd_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vaadd_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vaadd_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vaadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vaadd_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vaadd_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vaadd_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vaadd_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vaadd_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vaadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vaadd_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vaadd_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vaadd_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vaadd_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vaadd_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vaadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vaadd_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vaadd_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vaadd_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vaadd_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vaadd_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vaadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vaadd_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vaadd_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vaadd_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vaadd_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vaadd_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vaadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vaadd_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vaadd_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vaadd_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vaadd_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vaadd_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vaadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vaadd_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vaadd_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vaadd_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vaadd_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vaadd_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vaadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vaadd_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vaadd_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vaadd_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vaadd_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vaadd_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vaadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vaadd_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vaadd_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vaadd_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vaadd_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vaadd_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vaadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vaadd_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vaadd_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vaadd_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vaadd_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vaadd_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vaadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vaadd_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vaadd_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vaadd_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vaadd_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vaadd_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vaadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vaadd_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vaadd_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vaadd_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vaadd_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vaadd_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vaadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vaadd_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vaadd_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vaadd_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vaadd_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vaadd_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vaadd_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vaadd_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vaadd_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vaadd_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vaadd_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vaadd_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vaadd_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vaadd_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vaadd_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vaadd_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vaadd_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vaadd_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vaadd_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vaadd_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vaadd_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vaadd_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vaadd_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vaadd_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vaadd_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vaadd_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vaadd_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vaadd_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vaadd_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vaadd_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vaadd_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vaadd_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vaadd_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vaadd_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vaadd_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vaadd_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vaadd_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vaadd_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vaadd_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vaadd_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vaadd_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vaadd_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vaadd_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vaadd_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vaadd_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vaadd_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vaadd_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vaadd_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vaadd_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vaadd_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vaadd_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vaadd_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vaadd_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vaadd_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vaadd_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vaadd_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vaadd_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vaadd_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vaadd_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vaadd_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vaadd_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vaadd_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vaadd_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vaadd_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vaadd_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vaadd_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vaadd_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vaadd_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vaadd_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vaadd_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vaadd_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vaadd_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vaadd_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vaadd_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vaadd_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vaadd_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vaadd_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vaadd_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vaadd_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vaadd_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vaadd_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vaadd_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vaadd_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vaadd_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vaadd_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vaadd_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vaadd_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vaadd_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vaadd_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vaadd_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vaadd_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vaadd_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vaadd_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vaadd_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vaadd_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vaadd_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vaadd_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vaadd_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vaadd_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vaadd_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vaadd_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vaadd_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vaadd_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vaadd_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vaadd_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vaadd_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vaadd_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vaadd_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vaadd_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vaadd_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vaadd_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vaadd_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vaadd_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vaadd_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vaadd_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vaadd_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vaadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vaadd_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vaadd_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vaadd_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vaadd_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vaadd_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vaadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vaadd_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vaadd_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vaadd_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vaadd_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vaadd_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vaadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vaadd_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vaadd_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vaadd_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vaadd_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vaadd_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vaadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vaadd_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vaadd_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vaadd_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vaadd_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vaadd_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vaadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vaadd_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vaadd_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vaadd_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vaadd_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vaadd_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vaadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vaadd_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vaadd_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vaadd_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vaadd_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vaadd_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vaadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vaadd_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vaadd_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vaadd_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vaadd_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vaadd_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vaadd_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vaadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vaadd_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vaadd_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vaadd_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vaadd_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vaadd_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vaadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vaadd_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vaadd_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vaadd_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vaadd_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vaadd_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vaadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vaadd_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vaadd_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vaadd_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vaadd_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vaadd_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vaadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vaadd_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vaadd_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vaadd_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vaadd_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vaadd_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vaadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vaadd_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vaadd_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vaadd_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vaadd_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vaadd_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vaadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vaadd_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vaadd_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vaadd_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vaadd_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vaadd_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vaadd_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vaadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vaadd_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vaadd_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vaadd_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vaadd_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vaadd_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vaadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vaadd_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vaadd_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vaadd_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vaadd_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vaadd_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vaadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vaadd_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vaadd_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vaadd_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vaadd_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vaadd_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vaadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vaadd_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vaadd_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vaadd_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vaadd_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vaadd_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vaadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vaadd_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vaadd_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vaadd_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vaadd_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vaadd_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vaadd_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vaadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vaadd_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vaadd_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vaadd_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vaadd_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vaadd_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vaadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vaadd_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vaadd_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vaadd_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vaadd_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vaadd_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vaadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vaadd_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vaadd_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vaadd_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vaadd_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vaadd_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vaadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vaadd_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vaadd_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vaadd_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vaadd_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vaadd_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vaadd\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vaaddu.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vaaddu.c index 8d3b443db..ff7c304ad 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vaaddu.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vaaddu.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vaaddu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vaaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vaaddu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vaaddu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vaaddu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vaaddu_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vaaddu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vaaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vaaddu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vaaddu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vaaddu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vaaddu_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vaaddu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vaaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vaaddu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vaaddu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vaaddu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vaaddu_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vaaddu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vaaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vaaddu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vaaddu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vaaddu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vaaddu_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vaaddu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vaaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vaaddu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vaaddu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vaaddu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vaaddu_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vaaddu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vaaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vaaddu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vaaddu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vaaddu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vaaddu_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vaaddu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vaaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vaaddu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vaaddu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vaaddu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vaaddu_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vaaddu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vaaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vaaddu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vaaddu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vaaddu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vaaddu_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vaaddu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vaaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vaaddu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vaaddu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vaaddu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vaaddu_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vaaddu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vaaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vaaddu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vaaddu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vaaddu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vaaddu_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vaaddu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vaaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vaaddu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vaaddu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vaaddu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vaaddu_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vaaddu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vaaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vaaddu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vaaddu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vaaddu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vaaddu_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vaaddu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vaaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vaaddu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vaaddu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vaaddu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vaaddu_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vaaddu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vaaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vaaddu_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vaaddu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vaaddu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vaaddu_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vaaddu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vaaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vaaddu_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vaaddu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vaaddu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vaaddu_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vaaddu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vaaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vaaddu_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vaaddu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vaaddu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vaaddu_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vaaddu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vaaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vaaddu_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vaaddu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vaaddu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vaaddu_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vaaddu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vaaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vaaddu_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vaaddu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vaaddu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vaaddu_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vaaddu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vaaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vaaddu_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vaaddu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vaaddu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vaaddu_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vaaddu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vaaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vaaddu_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vaaddu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vaaddu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vaaddu_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vaaddu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vaaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vaaddu_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vaaddu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vaaddu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vaaddu_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vaaddu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vaaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vaaddu_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vaaddu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vaaddu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vaaddu_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vaaddu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vaaddu_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vaaddu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vaaddu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vaaddu_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vaaddu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vaaddu_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vaaddu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vaaddu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vaaddu_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vaaddu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vaaddu_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vaaddu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vaaddu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vaaddu_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vaaddu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vaaddu_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vaaddu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vaaddu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vaaddu_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vaaddu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vaaddu_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vaaddu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vaaddu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vaaddu_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vaaddu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vaaddu_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vaaddu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vaaddu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vaaddu_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vaaddu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vaaddu_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vaaddu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vaaddu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vaaddu_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vaaddu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vaaddu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vaaddu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vaaddu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vaaddu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vaaddu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vaaddu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vaaddu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vaaddu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vaaddu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vaaddu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vaaddu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vaaddu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vaaddu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vaaddu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vaaddu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vaaddu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vaaddu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vaaddu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vaaddu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vaaddu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vaaddu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vaaddu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vaaddu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vaaddu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vaaddu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vaaddu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vaaddu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vaaddu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vaaddu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vaaddu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vaaddu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vaaddu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vaaddu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vaaddu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vaaddu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vaaddu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vaaddu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vaaddu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vaaddu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vaaddu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vaaddu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vaaddu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vaaddu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vaaddu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vaaddu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vaaddu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vaaddu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vaaddu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vaaddu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vaaddu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vaaddu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vaaddu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vaaddu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vaaddu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vaaddu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vaaddu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vaaddu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vaaddu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vaaddu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vaaddu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vaaddu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vaaddu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vaaddu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vaaddu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vaaddu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vaaddu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vaaddu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vaaddu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vaaddu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vaaddu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vaaddu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vaaddu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vaaddu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vaaddu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vaaddu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vaaddu_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vaaddu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vaaddu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vaaddu_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vaaddu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vaaddu_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vaaddu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vaaddu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vaaddu_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vaaddu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vaaddu_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vaaddu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vaaddu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vaaddu_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vaaddu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vaaddu_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vaaddu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vaaddu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vaaddu_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vaaddu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vaaddu_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vaaddu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vaaddu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vaaddu_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vaaddu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vaaddu_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vaaddu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vaaddu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vaaddu_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vaaddu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vaaddu_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vaaddu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vaaddu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vaaddu_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vaaddu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vaaddu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vaaddu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vaaddu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vaaddu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vaaddu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vaaddu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vaaddu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vaaddu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vaaddu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vaaddu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vaaddu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vaaddu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vaaddu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vaaddu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vaaddu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vaaddu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vaaddu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vaaddu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vaaddu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vaaddu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vaaddu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vaaddu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vaaddu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vaaddu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vaaddu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vaaddu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vaaddu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vaaddu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vaaddu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vaaddu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vaaddu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vaaddu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vaaddu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vaaddu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vaaddu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vaaddu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vaaddu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vaaddu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vaaddu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vaaddu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vaaddu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vaaddu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vaaddu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vaaddu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vaaddu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vaaddu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vaaddu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vaaddu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vaaddu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vaaddu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vaaddu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vaaddu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vaaddu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vaaddu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vaaddu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vaaddu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vaaddu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vaaddu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vaaddu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vaaddu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vaaddu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vaaddu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vaaddu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vaaddu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vaaddu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vaaddu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vaaddu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vaaddu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vaaddu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vaaddu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vaaddu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vaaddu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vaaddu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vaaddu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vaaddu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vaaddu_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vaaddu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vaaddu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vaaddu_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vaaddu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vaaddu_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vaaddu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vaaddu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vaaddu_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vaaddu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vaaddu_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vaaddu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vaaddu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vaaddu_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vaaddu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vaaddu_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vaaddu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vaaddu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vaaddu_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vaaddu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vaaddu_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vaaddu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vaaddu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vaaddu_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vaaddu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vaaddu_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vaaddu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vaaddu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vaaddu_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vaaddu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vaaddu_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vaaddu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vaaddu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vaaddu_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vaaddu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vaaddu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vaaddu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vaaddu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vaaddu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vaaddu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vaaddu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vaaddu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vaaddu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vaaddu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vaaddu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vaaddu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vaaddu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vaaddu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vaaddu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vaaddu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vaaddu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vaaddu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vaaddu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vaaddu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vaaddu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vaaddu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vaaddu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vaaddu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vaaddu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vaaddu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vaaddu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vaaddu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vaaddu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vaaddu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vaaddu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vaaddu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vaaddu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vaaddu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vaaddu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vaaddu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vaaddu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vaaddu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vaaddu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vaaddu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vaaddu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vaaddu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vaaddu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vaaddu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vaaddu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vaaddu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vaaddu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vaaddu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vaaddu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vaaddu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vaaddu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vaaddu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vaaddu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vaaddu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vaaddu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vaaddu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vaaddu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vaaddu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vaaddu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vaaddu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vaaddu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vaaddu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vaaddu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vaaddu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vaaddu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vaaddu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vaaddu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vaaddu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vaaddu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vaaddu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vaaddu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vaaddu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vaaddu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vaaddu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vaaddu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vaaddu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vaaddu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vaaddu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vaaddu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vaaddu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vaaddu\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vadc.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vadc.c index db069b414..ee4965b5d 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vadc.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vadc.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vadc_vvm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vint8mf8_t test_vadc_vvm_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, vs1, v0, vl); } -vint8mf8_t test_vadc_vxm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vint8mf8_t test_vadc_vxm_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, rs1, v0, vl); } -vint8mf4_t test_vadc_vvm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vint8mf4_t test_vadc_vvm_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, vs1, v0, vl); } -vint8mf4_t test_vadc_vxm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vint8mf4_t test_vadc_vxm_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, rs1, v0, vl); } -vint8mf2_t test_vadc_vvm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vint8mf2_t test_vadc_vvm_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, vs1, v0, vl); } -vint8mf2_t test_vadc_vxm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vint8mf2_t test_vadc_vxm_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, rs1, v0, vl); } -vint8m1_t test_vadc_vvm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vint8m1_t test_vadc_vvm_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, vs1, v0, vl); } -vint8m1_t test_vadc_vxm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vint8m1_t test_vadc_vxm_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, rs1, v0, vl); } -vint8m2_t test_vadc_vvm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vint8m2_t test_vadc_vvm_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, vs1, v0, vl); } -vint8m2_t test_vadc_vxm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vint8m2_t test_vadc_vxm_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, rs1, v0, vl); } -vint8m4_t test_vadc_vvm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vint8m4_t test_vadc_vvm_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, vs1, v0, vl); } -vint8m4_t test_vadc_vxm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vint8m4_t test_vadc_vxm_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, rs1, v0, vl); } -vint8m8_t test_vadc_vvm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, vbool1_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vint8m8_t test_vadc_vvm_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, vbool1_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, vs1, v0, vl); } -vint8m8_t test_vadc_vxm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, vbool1_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vint8m8_t test_vadc_vxm_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, vbool1_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, rs1, v0, vl); } -vint16mf4_t test_vadc_vvm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vint16mf4_t test_vadc_vvm_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, vs1, v0, vl); } -vint16mf4_t test_vadc_vxm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vint16mf4_t test_vadc_vxm_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, rs1, v0, vl); } -vint16mf2_t test_vadc_vvm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vint16mf2_t test_vadc_vvm_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, vs1, v0, vl); } -vint16mf2_t test_vadc_vxm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vint16mf2_t test_vadc_vxm_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, rs1, v0, vl); } -vint16m1_t test_vadc_vvm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vint16m1_t test_vadc_vvm_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, vs1, v0, vl); } -vint16m1_t test_vadc_vxm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vint16m1_t test_vadc_vxm_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, rs1, v0, vl); } -vint16m2_t test_vadc_vvm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vint16m2_t test_vadc_vvm_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, vs1, v0, vl); } -vint16m2_t test_vadc_vxm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vint16m2_t test_vadc_vxm_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, rs1, v0, vl); } -vint16m4_t test_vadc_vvm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vint16m4_t test_vadc_vvm_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, vs1, v0, vl); } -vint16m4_t test_vadc_vxm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vint16m4_t test_vadc_vxm_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, rs1, v0, vl); } -vint16m8_t test_vadc_vvm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vint16m8_t test_vadc_vvm_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, vs1, v0, vl); } -vint16m8_t test_vadc_vxm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vint16m8_t test_vadc_vxm_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, rs1, v0, vl); } -vint32mf2_t test_vadc_vvm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vint32mf2_t test_vadc_vvm_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, vs1, v0, vl); } -vint32mf2_t test_vadc_vxm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vint32mf2_t test_vadc_vxm_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, rs1, v0, vl); } -vint32m1_t test_vadc_vvm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vint32m1_t test_vadc_vvm_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, vs1, v0, vl); } -vint32m1_t test_vadc_vxm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vint32m1_t test_vadc_vxm_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, rs1, v0, vl); } -vint32m2_t test_vadc_vvm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vint32m2_t test_vadc_vvm_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, vs1, v0, vl); } -vint32m2_t test_vadc_vxm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vint32m2_t test_vadc_vxm_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, rs1, v0, vl); } -vint32m4_t test_vadc_vvm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vint32m4_t test_vadc_vvm_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, vs1, v0, vl); } -vint32m4_t test_vadc_vxm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vint32m4_t test_vadc_vxm_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, rs1, v0, vl); } -vint32m8_t test_vadc_vvm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vint32m8_t test_vadc_vvm_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, vs1, v0, vl); } -vint32m8_t test_vadc_vxm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vint32m8_t test_vadc_vxm_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, rs1, v0, vl); } -vint64m1_t test_vadc_vvm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vint64m1_t test_vadc_vvm_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, vs1, v0, vl); } -vint64m1_t test_vadc_vxm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vint64m1_t test_vadc_vxm_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, rs1, v0, vl); } -vint64m2_t test_vadc_vvm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vint64m2_t test_vadc_vvm_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, vs1, v0, vl); } -vint64m2_t test_vadc_vxm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vint64m2_t test_vadc_vxm_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, rs1, v0, vl); } -vint64m4_t test_vadc_vvm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vint64m4_t test_vadc_vvm_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, vs1, v0, vl); } -vint64m4_t test_vadc_vxm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vint64m4_t test_vadc_vxm_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, rs1, v0, vl); } -vint64m8_t test_vadc_vvm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vint64m8_t test_vadc_vvm_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, vs1, v0, vl); } -vint64m8_t test_vadc_vxm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vint64m8_t test_vadc_vxm_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, rs1, v0, vl); } -vuint8mf8_t test_vadc_vvm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vuint8mf8_t test_vadc_vvm_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, vs1, v0, vl); } -vuint8mf8_t test_vadc_vxm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vuint8mf8_t test_vadc_vxm_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, rs1, v0, vl); } -vuint8mf4_t test_vadc_vvm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vuint8mf4_t test_vadc_vvm_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, vs1, v0, vl); } -vuint8mf4_t test_vadc_vxm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vuint8mf4_t test_vadc_vxm_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, rs1, v0, vl); } -vuint8mf2_t test_vadc_vvm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vuint8mf2_t test_vadc_vvm_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, vs1, v0, vl); } -vuint8mf2_t test_vadc_vxm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vuint8mf2_t test_vadc_vxm_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, rs1, v0, vl); } -vuint8m1_t test_vadc_vvm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vuint8m1_t test_vadc_vvm_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, vs1, v0, vl); } -vuint8m1_t test_vadc_vxm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vuint8m1_t test_vadc_vxm_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, rs1, v0, vl); } -vuint8m2_t test_vadc_vvm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vuint8m2_t test_vadc_vvm_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, vs1, v0, vl); } -vuint8m2_t test_vadc_vxm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vuint8m2_t test_vadc_vxm_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, rs1, v0, vl); } -vuint8m4_t test_vadc_vvm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vuint8m4_t test_vadc_vvm_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, vs1, v0, vl); } -vuint8m4_t test_vadc_vxm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vuint8m4_t test_vadc_vxm_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, rs1, v0, vl); } -vuint8m8_t test_vadc_vvm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, vbool1_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vuint8m8_t test_vadc_vvm_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, vbool1_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, vs1, v0, vl); } -vuint8m8_t test_vadc_vxm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, vbool1_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vuint8m8_t test_vadc_vxm_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, vbool1_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, rs1, v0, vl); } -vuint16mf4_t test_vadc_vvm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vuint16mf4_t test_vadc_vvm_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, vs1, v0, vl); } -vuint16mf4_t test_vadc_vxm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vuint16mf4_t test_vadc_vxm_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, rs1, v0, vl); } -vuint16mf2_t test_vadc_vvm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vuint16mf2_t test_vadc_vvm_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, vs1, v0, vl); } -vuint16mf2_t test_vadc_vxm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vuint16mf2_t test_vadc_vxm_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, rs1, v0, vl); } -vuint16m1_t test_vadc_vvm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vuint16m1_t test_vadc_vvm_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, vs1, v0, vl); } -vuint16m1_t test_vadc_vxm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vuint16m1_t test_vadc_vxm_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, rs1, v0, vl); } -vuint16m2_t test_vadc_vvm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vuint16m2_t test_vadc_vvm_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, vs1, v0, vl); } -vuint16m2_t test_vadc_vxm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vuint16m2_t test_vadc_vxm_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, rs1, v0, vl); } -vuint16m4_t test_vadc_vvm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vuint16m4_t test_vadc_vvm_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, vs1, v0, vl); } -vuint16m4_t test_vadc_vxm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vuint16m4_t test_vadc_vxm_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, rs1, v0, vl); } -vuint16m8_t test_vadc_vvm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vuint16m8_t test_vadc_vvm_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, vs1, v0, vl); } -vuint16m8_t test_vadc_vxm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, vbool2_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vuint16m8_t test_vadc_vxm_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, rs1, v0, vl); } -vuint32mf2_t test_vadc_vvm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vuint32mf2_t test_vadc_vvm_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, vs1, v0, vl); } -vuint32mf2_t test_vadc_vxm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vuint32mf2_t test_vadc_vxm_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, rs1, v0, vl); } -vuint32m1_t test_vadc_vvm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vuint32m1_t test_vadc_vvm_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, vs1, v0, vl); } -vuint32m1_t test_vadc_vxm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vuint32m1_t test_vadc_vxm_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, rs1, v0, vl); } -vuint32m2_t test_vadc_vvm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vuint32m2_t test_vadc_vvm_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, vs1, v0, vl); } -vuint32m2_t test_vadc_vxm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vuint32m2_t test_vadc_vxm_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, rs1, v0, vl); } -vuint32m4_t test_vadc_vvm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vuint32m4_t test_vadc_vvm_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, vs1, v0, vl); } -vuint32m4_t test_vadc_vxm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vuint32m4_t test_vadc_vxm_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, rs1, v0, vl); } -vuint32m8_t test_vadc_vvm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vuint32m8_t test_vadc_vvm_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, vs1, v0, vl); } -vuint32m8_t test_vadc_vxm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, vbool4_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vuint32m8_t test_vadc_vxm_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, rs1, v0, vl); } -vuint64m1_t test_vadc_vvm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vuint64m1_t test_vadc_vvm_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, vs1, v0, vl); } -vuint64m1_t test_vadc_vxm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, vbool64_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vuint64m1_t test_vadc_vxm_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, rs1, v0, vl); } -vuint64m2_t test_vadc_vvm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vuint64m2_t test_vadc_vvm_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, vs1, v0, vl); } -vuint64m2_t test_vadc_vxm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, vbool32_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vuint64m2_t test_vadc_vxm_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, rs1, v0, vl); } -vuint64m4_t test_vadc_vvm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vuint64m4_t test_vadc_vvm_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, vs1, v0, vl); } -vuint64m4_t test_vadc_vxm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, vbool16_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vuint64m4_t test_vadc_vxm_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, rs1, v0, vl); } -vuint64m8_t test_vadc_vvm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vuint64m8_t test_vadc_vvm_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, vs1, v0, vl); } -vuint64m8_t test_vadc_vxm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, vbool8_t carryin, size_t vl) { - return __riscv_vadc_tu(maskedoff, op1, op2, carryin, vl); +vuint64m8_t test_vadc_vxm_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vadc_tu(vd, vs2, rs1, v0, vl); } /* { dg-final { scan-assembler-times {vadc\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vadd.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vadd.c index 523ea8943..d2a55b35c 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vadd.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vadd.c @@ -6,1412 +6,1412 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vadd_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vadd_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vadd_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vadd_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vadd_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vadd_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vadd_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vadd_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vadd_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vadd_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vadd_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vadd_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vadd_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vadd_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vadd_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vadd_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vadd_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vadd_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vadd_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vadd_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vadd_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vadd_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vadd_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vadd_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vadd_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vadd_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vadd_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vadd_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vadd_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vadd_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vadd_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vadd_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vadd_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vadd_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vadd_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vadd_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vadd_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vadd_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vadd_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vadd_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vadd_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vadd_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vadd_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vadd_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vadd_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vadd_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vadd_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vadd_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vadd_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vadd_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vadd_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vadd_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vadd_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vadd_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vadd_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vadd_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vadd_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vadd_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vadd_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vadd_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vadd_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vadd_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vadd_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vadd_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vadd_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vadd_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vadd_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vadd_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vadd_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vadd_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vadd_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vadd_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vadd_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vadd_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vadd_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vadd_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vadd_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vadd_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vadd_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vadd_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vadd_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vadd_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vadd_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vadd_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vadd_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vadd_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vadd_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vadd_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vadd_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vadd_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vadd_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vadd_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vadd_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vadd_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vadd_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vadd_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vadd_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vadd_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vadd_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vadd_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vadd_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vadd_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vadd_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vadd_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vadd_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vadd_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vadd_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vadd_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vadd_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vadd_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vadd_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vadd_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vadd_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vadd_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vadd_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vadd_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vadd_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vadd_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vadd_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vadd_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vadd_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vadd_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vadd_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vadd_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vadd_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vadd_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vadd_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vadd_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vadd_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vadd_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vadd_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vadd_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vadd_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vadd_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vadd_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vadd_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vadd_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vadd_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vadd_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vadd_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vadd_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vadd_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vadd_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vadd_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vadd_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vadd_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vadd_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vadd_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vadd_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vadd_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vadd_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vadd_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vadd_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vadd_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vadd_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vadd_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vadd_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vadd_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vadd_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vadd_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vadd_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vadd_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vadd_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vadd_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vadd_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vadd_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vadd_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vadd_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vadd_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vadd_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vadd_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vadd_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vadd_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vadd_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vadd_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vadd_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vadd_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vadd_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vadd_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vadd_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vadd_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vadd_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vadd_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vadd_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vadd_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vadd_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vadd_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vadd_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vadd_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vadd_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vadd_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vadd_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vadd_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vadd_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vadd_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vadd_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vadd_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vadd_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vadd_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vadd_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vadd_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vadd_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vadd_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vadd_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vadd_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vadd_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vadd_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vadd_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vadd_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vadd_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vadd_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vadd_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vadd_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vadd_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vadd_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vadd_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vadd_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vadd_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vadd_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vadd_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vadd_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vadd_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vadd_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vadd_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vadd_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vadd_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vadd_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vadd_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vadd_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vadd_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vadd_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vadd_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vadd_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vadd_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vadd_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vadd_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vadd_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vadd_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vadd_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vadd_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vadd_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vadd_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vadd_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vadd_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vadd_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vadd_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vadd_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vadd_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vadd_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vadd_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vadd_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vadd_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vadd_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vadd_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vadd_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vadd_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vadd_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vadd_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vadd_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vadd_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vadd_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vadd_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vadd_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vadd_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vadd_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vadd_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vadd_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vadd_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vadd_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vadd_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vadd_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vadd_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vadd_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vadd_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vadd_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vadd_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vadd_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vadd_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vadd_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vadd_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vadd_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vadd_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vadd_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vadd_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vadd_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vadd_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vadd_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vadd_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vadd_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vadd_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vadd_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vadd_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vadd_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vadd_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vadd_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vadd_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vadd_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vadd_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vadd_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vadd_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vadd_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vadd_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vadd_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vadd_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vadd_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vadd_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vadd_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vadd_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vadd_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vadd_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vadd_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vadd_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vadd_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vadd_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vadd_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vadd_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vadd_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vadd_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vadd_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vadd_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vadd_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vadd_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vadd_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vadd_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vadd_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vadd_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vadd_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vadd_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vadd_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vadd_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vadd_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vadd_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vadd_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vadd_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vadd_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vadd_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vadd_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vadd_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vadd_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vadd_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vadd_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vadd_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vadd_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vadd_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vadd_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vadd_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vadd_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vadd_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vadd_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vadd_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vadd_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vadd_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vadd_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vadd_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vadd_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vadd_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vadd_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vadd_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vadd_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vadd_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vadd_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vadd_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vadd_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vadd_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vadd_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vadd_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vadd_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vadd_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vadd_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vadd_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vadd_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vadd_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vadd_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vadd_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vadd_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vadd_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vadd_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vadd_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vadd_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vadd_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vadd_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vadd_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vadd_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vadd_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vadd_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vadd_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vadd_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vadd_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vadd_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vadd_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vadd_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vadd_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vadd_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vadd_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vadd_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vadd_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vadd_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vadd_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vadd_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vadd_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vadd_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vadd_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vadd_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vadd_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vadd_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vadd_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vadd_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vadd_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vadd_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vadd_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vadd_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vadd_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vadd_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vadd_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vadd_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vadd_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vadd_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vadd_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vadd_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vadd_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vadd_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vadd_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vadd_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vadd_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vadd_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vadd_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vadd_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vadd_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vadd_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vadd_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vadd_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vadd_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vadd_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vadd_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vadd_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vadd_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vadd_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vadd_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vadd_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vadd_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vadd_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vadd_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vadd_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vadd_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vadd_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vadd_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vadd_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vadd_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vadd_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vadd_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vadd_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vadd_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vadd_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vadd_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vadd_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vadd_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vadd_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vadd_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vadd_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vadd_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vadd_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vadd_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vadd_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vadd_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vadd_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vadd_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vadd_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vadd_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vadd_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vadd_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vadd_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vadd_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vadd_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vadd_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vadd_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vadd_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vadd_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vadd_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vadd_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vadd_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vadd_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vadd_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vadd_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vadd_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vadd_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vadd_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vadd_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vadd_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vadd_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vadd_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vadd_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vadd_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vadd_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vadd_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vadd_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vadd_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vadd_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vadd_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vadd_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vadd_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vadd_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vadd_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vadd_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vadd_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vadd_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vadd_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vadd_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vadd_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vadd_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vadd_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vadd_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vadd_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vadd_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vadd_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vadd_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vadd_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vadd_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vadd_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vadd_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vadd_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vadd_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vadd_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vadd_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vadd_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vadd_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vadd_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vadd_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vadd_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vadd_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vadd_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vadd_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vadd_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vadd_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vadd_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vadd_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vadd_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vadd_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vadd_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vadd_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vadd_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vadd_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vadd_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vadd_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vadd_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vadd_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vadd_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vadd_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vadd_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vadd_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vadd_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vadd_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vadd_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vadd_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vadd_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vadd_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vadd_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vadd_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vadd_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vadd_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vadd_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vadd_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vadd_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vadd_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vadd_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vadd_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vadd_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vadd_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vadd_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vadd_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vadd_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vadd_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vadd_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vadd_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vadd_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vadd_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vadd_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vadd_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vadd_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vadd_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vadd_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vadd_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vadd_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vadd_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vadd_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vadd_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vadd_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vadd_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vadd_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vadd_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vadd_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vadd_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vadd_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vadd_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vadd_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vadd_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vadd_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vadd_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vadd_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vadd_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vadd_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vadd_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vadd_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vadd_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vadd_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vadd_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vadd_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vadd_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vadd_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vadd_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vadd_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vadd_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vadd_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vadd_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vadd_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vadd_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vadd_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vadd_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vadd_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vadd_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vadd_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vadd_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vadd_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vadd_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vadd_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vadd_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vadd_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vadd_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vadd_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vadd_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vadd_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vadd_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vadd_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vadd_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vadd_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vadd_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vadd_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vadd_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vadd_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vadd_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vadd_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vadd_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vadd_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vadd_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vadd_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vadd_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vadd_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vadd_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vadd_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vadd_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vadd_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vadd_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vadd_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vadd_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vadd_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vadd_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vadd_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vadd_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vadd_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vadd_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vadd_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vadd_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vadd_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vadd_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vadd_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vadd_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vadd_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vadd_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vadd_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vadd_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vadd_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vadd_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vadd_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vadd_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vadd_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vadd_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vadd_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vadd_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vadd_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vadd_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vadd_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vadd_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vadd_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vadd_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vadd_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vadd_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vadd_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vadd_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vadd_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vadd_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vadd_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vadd_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vadd_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vadd_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vadd_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vadd_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vadd_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vadd_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vadd_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vadd_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vadd_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vadd_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vadd_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vadd_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vadd_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vadd_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vadd_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vadd\.[ivxfswum.]+\s+} 352 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vand.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vand.c index 6541663dc..23134d161 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vand.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vand.c @@ -6,1412 +6,1412 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vand_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vand_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vand_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vand_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vand_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vand_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vand_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vand_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vand_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vand_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vand_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vand_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vand_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vand_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vand_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vand_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vand_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vand_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vand_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vand_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vand_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vand_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vand_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vand_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vand_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vand_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vand_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vand_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vand_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vand_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vand_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vand_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vand_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vand_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vand_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vand_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vand_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vand_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vand_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vand_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vand_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vand_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vand_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vand_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vand_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vand_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vand_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vand_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vand_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vand_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vand_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vand_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vand_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vand_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vand_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vand_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vand_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vand_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vand_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vand_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vand_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vand_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vand_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vand_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vand_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vand_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vand_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vand_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vand_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vand_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vand_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vand_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vand_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vand_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vand_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vand_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vand_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vand_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vand_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vand_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vand_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vand_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vand_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vand_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vand_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vand_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vand_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vand_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vand_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vand_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vand_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vand_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vand_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vand_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vand_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vand_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vand_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vand_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vand_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vand_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vand_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vand_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vand_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vand_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vand_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vand_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vand_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vand_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vand_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vand_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vand_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vand_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vand_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vand_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vand_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vand_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vand_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vand_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vand_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vand_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vand_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vand_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vand_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vand_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vand_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vand_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vand_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vand_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vand_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vand_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vand_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vand_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vand_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vand_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vand_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vand_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vand_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vand_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vand_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vand_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vand_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vand_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vand_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vand_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vand_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vand_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vand_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vand_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vand_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vand_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vand_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vand_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vand_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vand_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vand_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vand_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vand_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vand_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vand_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vand_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vand_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vand_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vand_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vand_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vand_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vand_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vand_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vand_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vand_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vand_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vand_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vand_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vand_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vand_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vand_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vand_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vand_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vand_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vand_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vand_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vand_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vand_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vand_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vand_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vand_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vand_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vand_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vand_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vand_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vand_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vand_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vand_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vand_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vand_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vand_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vand_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vand_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vand_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vand_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vand_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vand_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vand_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vand_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vand_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vand_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vand_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vand_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vand_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vand_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vand_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vand_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vand_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vand_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vand_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vand_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vand_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vand_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vand_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vand_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vand_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vand_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vand_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vand_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vand_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vand_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vand_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vand_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vand_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vand_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vand_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vand_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vand_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vand_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vand_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vand_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vand_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vand_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vand_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vand_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vand_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vand_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vand_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vand_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vand_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vand_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vand_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vand_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vand_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vand_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vand_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vand_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vand_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vand_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vand_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vand_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vand_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vand_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vand_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vand_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vand_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vand_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vand_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vand_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vand_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vand_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vand_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vand_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vand_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vand_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vand_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vand_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vand_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vand_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vand_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vand_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vand_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vand_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vand_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vand_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vand_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vand_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vand_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vand_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vand_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vand_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vand_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vand_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vand_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vand_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vand_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vand_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vand_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vand_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vand_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vand_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vand_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vand_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vand_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vand_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vand_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vand_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vand_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vand_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vand_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vand_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vand_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vand_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vand_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vand_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vand_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vand_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vand_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vand_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vand_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vand_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vand_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vand_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vand_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vand_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vand_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vand_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vand_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vand_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vand_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vand_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vand_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vand_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vand_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vand_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vand_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vand_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vand_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vand_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vand_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vand_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vand_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vand_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vand_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vand_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vand_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vand_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vand_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vand_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vand_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vand_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vand_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vand_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vand_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vand_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vand_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vand_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vand_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vand_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vand_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vand_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vand_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vand_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vand_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vand_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vand_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vand_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vand_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vand_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vand_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vand_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vand_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vand_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vand_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vand_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vand_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vand_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vand_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vand_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vand_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vand_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vand_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vand_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vand_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vand_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vand_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vand_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vand_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vand_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vand_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vand_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vand_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vand_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vand_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vand_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vand_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vand_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vand_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vand_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vand_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vand_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vand_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vand_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vand_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vand_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vand_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vand_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vand_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vand_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vand_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vand_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vand_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vand_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vand_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vand_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vand_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vand_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vand_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vand_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vand_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vand_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vand_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vand_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vand_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vand_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vand_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vand_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vand_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vand_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vand_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vand_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vand_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vand_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vand_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vand_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vand_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vand_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vand_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vand_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vand_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vand_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vand_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vand_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vand_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vand_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vand_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vand_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vand_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vand_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vand_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vand_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vand_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vand_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vand_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vand_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vand_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vand_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vand_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vand_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vand_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vand_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vand_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vand_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vand_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vand_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vand_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vand_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vand_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vand_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vand_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vand_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vand_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vand_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vand_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vand_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vand_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vand_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vand_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vand_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vand_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vand_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vand_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vand_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vand_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vand_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vand_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vand_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vand_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vand_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vand_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vand_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vand_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vand_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vand_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vand_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vand_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vand_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vand_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vand_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vand_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vand_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vand_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vand_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vand_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vand_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vand_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vand_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vand_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vand_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vand_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vand_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vand_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vand_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vand_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vand_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vand_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vand_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vand_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vand_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vand_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vand_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vand_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vand_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vand_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vand_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vand_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vand_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vand_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vand_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vand_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vand_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vand_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vand_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vand_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vand_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vand_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vand_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vand_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vand_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vand_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vand_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vand_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vand_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vand_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vand_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vand_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vand_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vand_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vand_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vand_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vand_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vand_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vand_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vand_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vand_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vand_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vand_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vand_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vand_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vand_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vand_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vand_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vand_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vand_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vand_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vand_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vand_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vand_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vand_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vand_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vand_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vand_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vand_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vand_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vand_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vand_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vand_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vand_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vand_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vand_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vand_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vand_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vand_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vand_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vand_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vand_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vand_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vand_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vand_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vand_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vand_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vand_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vand_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vand_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vand_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vand_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vand_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vand_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vand_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vand_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vand_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vand_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vand_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vand_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vand_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vand_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vand_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vand_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vand_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vand_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vand_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vand_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vand_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vand_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vand_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vand_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vand_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vand_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vand_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vand_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vand_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vand_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vand_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vand_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vand_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vand_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vand_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vand_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vand_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vand_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vand_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vand_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vand_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vand_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vand_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vand_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vand_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vand_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vand_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vand_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vand_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vand_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vand_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vand_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vand_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vand_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vand_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vand_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vand_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vand_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vand_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vand_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vand_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vand_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vand_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vand_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vand_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vand_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vand_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vand_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vand_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vand_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vand_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vand_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vand_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vand_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vand_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vand_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vand_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vand_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vand_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vand_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vand_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vand_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vand_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vand_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vand_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vand_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vand_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vand_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vand_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vand_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vand_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vand_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vand_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vand_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vand_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vand_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vand_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vand_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vand_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vand_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vand_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vand_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vand_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vand_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vand_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vand_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vand_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vand_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vand_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vand_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vand_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vand_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vand_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vand_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vand_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vand_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vand_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vand_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vand_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vand_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vand_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vand_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vand_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vand\.[ivxfswum.]+\s+} 352 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vasub.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vasub.c index 903d296ff..83824e930 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vasub.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vasub.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vasub_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vasub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vasub_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vasub_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vasub_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vasub_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vasub_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vasub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vasub_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vasub_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vasub_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vasub_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vasub_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vasub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vasub_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vasub_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vasub_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vasub_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vasub_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vasub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vasub_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vasub_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vasub_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vasub_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vasub_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vasub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vasub_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vasub_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vasub_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vasub_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vasub_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vasub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vasub_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vasub_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vasub_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vasub_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vasub_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vasub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vasub_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vasub_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vasub_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vasub_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vasub_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vasub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vasub_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vasub_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vasub_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vasub_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vasub_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vasub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vasub_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vasub_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vasub_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vasub_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vasub_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vasub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vasub_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vasub_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vasub_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vasub_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vasub_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vasub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vasub_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vasub_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vasub_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vasub_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vasub_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vasub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vasub_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vasub_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vasub_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vasub_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vasub_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vasub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vasub_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vasub_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vasub_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vasub_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vasub_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vasub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vasub_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vasub_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vasub_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vasub_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vasub_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vasub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vasub_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vasub_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vasub_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vasub_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vasub_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vasub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vasub_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vasub_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vasub_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vasub_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vasub_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vasub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vasub_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vasub_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vasub_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vasub_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vasub_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vasub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vasub_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vasub_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vasub_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vasub_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vasub_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vasub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vasub_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vasub_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vasub_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vasub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vasub_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vasub_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vasub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vasub_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vasub_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vasub_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vasub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vasub_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vasub_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vasub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vasub_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vasub_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vasub_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vasub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vasub_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vasub_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vasub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vasub_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vasub_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vasub_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vasub_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vasub_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vasub_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vasub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vasub_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vasub_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vasub_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vasub_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vasub_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vasub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vasub_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vasub_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vasub_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vasub_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vasub_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vasub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vasub_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vasub_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vasub_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vasub_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vasub_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vasub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vasub_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vasub_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vasub_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vasub_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vasub_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vasub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vasub_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vasub_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vasub_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vasub_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vasub_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vasub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vasub_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vasub_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vasub_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vasub_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vasub_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vasub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vasub_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vasub_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vasub_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vasub_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vasub_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vasub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vasub_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vasub_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vasub_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vasub_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vasub_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vasub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vasub_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vasub_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vasub_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vasub_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vasub_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vasub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vasub_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vasub_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vasub_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vasub_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vasub_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vasub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vasub_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vasub_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vasub_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vasub_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vasub_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vasub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vasub_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vasub_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vasub_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vasub_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vasub_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vasub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vasub_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vasub_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vasub_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vasub_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vasub_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vasub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vasub_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vasub_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vasub_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vasub_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vasub_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vasub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vasub_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vasub_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vasub_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vasub_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vasub_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vasub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vasub_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vasub_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vasub_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vasub_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vasub_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vasub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vasub_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vasub_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vasub_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vasub_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vasub_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vasub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vasub_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vasub_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vasub_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vasub_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vasub_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vasub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vasub_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vasub_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vasub_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vasub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vasub_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vasub_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vasub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vasub_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vasub_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vasub_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vasub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vasub_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vasub_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vasub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vasub_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vasub_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vasub_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vasub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vasub_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vasub_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vasub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vasub_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vasub_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vasub_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vasub_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vasub_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vasub_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vasub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vasub_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vasub_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vasub_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vasub_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vasub_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vasub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vasub_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vasub_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vasub_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vasub_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vasub_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vasub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vasub_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vasub_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vasub_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vasub_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vasub_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vasub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vasub_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vasub_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vasub_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vasub_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vasub_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vasub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vasub_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vasub_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vasub_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vasub_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vasub_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vasub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vasub_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vasub_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vasub_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vasub_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vasub_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vasub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vasub_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vasub_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vasub_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vasub_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vasub_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vasub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vasub_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vasub_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vasub_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vasub_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vasub_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vasub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vasub_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vasub_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vasub_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vasub_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vasub_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vasub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vasub_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vasub_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vasub_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vasub_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vasub_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vasub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vasub_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vasub_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vasub_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vasub_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vasub_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vasub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vasub_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vasub_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vasub_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vasub_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vasub_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vasub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vasub_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vasub_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vasub_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vasub_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vasub_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vasub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vasub_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vasub_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vasub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vasub_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vasub_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vasub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vasub_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vasub_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vasub_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vasub_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vasub_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vasub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vasub_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vasub_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vasub_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vasub_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vasub_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vasub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vasub_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vasub_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vasub_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vasub_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vasub_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vasub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vasub_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vasub_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vasub_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vasub_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vasub_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vasub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vasub_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vasub_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vasub_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vasub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vasub_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vasub_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vasub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vasub_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vasub_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vasub_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vasub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vasub_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vasub_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vasub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vasub_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vasub_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vasub_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vasub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vasub_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vasub_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vasub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vasub_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vasub_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vasub_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vasub_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vasub_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vasub_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vasub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vasub_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vasub_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vasub_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vasub_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vasub_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vasub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vasub_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vasub_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vasub_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vasub_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vasub_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vasub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vasub_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vasub_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vasub_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vasub_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vasub_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vasub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vasub_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vasub_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vasub_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vasub_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vasub_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vasub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vasub_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vasub_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vasub_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vasub_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vasub_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vasub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vasub_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vasub_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vasub_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vasub_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vasub_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vasub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vasub_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vasub_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vasub_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vasub_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vasub_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vasub_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vasub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vasub_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vasub_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vasub_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vasub_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vasub_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vasub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vasub_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vasub_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vasub_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vasub_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vasub_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vasub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vasub_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vasub_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vasub_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vasub_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vasub_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vasub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vasub_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vasub_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vasub_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vasub_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vasub_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vasub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vasub_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vasub_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vasub_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vasub_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vasub_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vasub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vasub_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vasub_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vasub_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vasub_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vasub_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vasub_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vasub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vasub_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vasub_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vasub_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vasub_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vasub_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vasub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vasub_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vasub_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vasub_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vasub_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vasub_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vasub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vasub_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vasub_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vasub_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vasub_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vasub_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vasub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vasub_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vasub_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vasub_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vasub_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vasub_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vasub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vasub_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vasub_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vasub_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vasub_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vasub_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vasub_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vasub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vasub_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vasub_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vasub_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vasub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vasub_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vasub_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vasub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vasub_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vasub_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vasub_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vasub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vasub_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vasub_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vasub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vasub_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vasub_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vasub_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vasub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vasub_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vasub_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vasub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vasub_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vasub_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vasub_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vasub_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vasub_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vasub_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vasub\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vasubu.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vasubu.c index a5c447e5b..0fb60acf0 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vasubu.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vasubu.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vasubu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vasubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vasubu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vasubu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vasubu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vasubu_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vasubu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vasubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vasubu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vasubu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vasubu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vasubu_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vasubu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vasubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vasubu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vasubu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vasubu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vasubu_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vasubu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vasubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vasubu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vasubu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vasubu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vasubu_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vasubu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vasubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vasubu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vasubu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vasubu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vasubu_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vasubu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vasubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vasubu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vasubu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vasubu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vasubu_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vasubu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vasubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vasubu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vasubu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vasubu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vasubu_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vasubu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vasubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vasubu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vasubu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vasubu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vasubu_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vasubu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vasubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vasubu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vasubu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vasubu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vasubu_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vasubu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vasubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vasubu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vasubu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vasubu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vasubu_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vasubu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vasubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vasubu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vasubu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vasubu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vasubu_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vasubu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vasubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vasubu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vasubu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vasubu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vasubu_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vasubu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vasubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vasubu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vasubu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vasubu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vasubu_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vasubu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vasubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vasubu_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vasubu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vasubu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vasubu_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vasubu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vasubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vasubu_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vasubu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vasubu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vasubu_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vasubu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vasubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vasubu_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vasubu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vasubu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vasubu_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vasubu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vasubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vasubu_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vasubu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vasubu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vasubu_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vasubu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vasubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vasubu_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vasubu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vasubu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vasubu_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vasubu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vasubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vasubu_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vasubu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vasubu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vasubu_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vasubu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vasubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vasubu_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vasubu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vasubu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vasubu_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vasubu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vasubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vasubu_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vasubu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vasubu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vasubu_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vasubu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vasubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vasubu_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vasubu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vasubu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vasubu_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vasubu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vasubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vasubu_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vasubu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vasubu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vasubu_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vasubu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vasubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vasubu_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vasubu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vasubu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vasubu_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vasubu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vasubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vasubu_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vasubu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vasubu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vasubu_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vasubu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vasubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vasubu_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vasubu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vasubu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vasubu_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vasubu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vasubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vasubu_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vasubu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vasubu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vasubu_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vasubu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vasubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vasubu_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vasubu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vasubu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vasubu_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vasubu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vasubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vasubu_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vasubu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vasubu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vasubu_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vasubu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vasubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vasubu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vasubu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vasubu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vasubu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vasubu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vasubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vasubu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vasubu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vasubu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vasubu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vasubu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vasubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vasubu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vasubu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vasubu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vasubu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vasubu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vasubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vasubu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vasubu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vasubu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vasubu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vasubu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vasubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vasubu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vasubu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vasubu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vasubu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vasubu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vasubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vasubu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vasubu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vasubu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vasubu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vasubu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vasubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vasubu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vasubu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vasubu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vasubu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vasubu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vasubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vasubu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vasubu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vasubu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vasubu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vasubu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vasubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vasubu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vasubu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vasubu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vasubu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vasubu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vasubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vasubu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vasubu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vasubu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vasubu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vasubu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vasubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vasubu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vasubu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vasubu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vasubu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vasubu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vasubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vasubu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vasubu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vasubu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vasubu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vasubu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vasubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vasubu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vasubu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vasubu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vasubu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vasubu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vasubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vasubu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vasubu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vasubu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vasubu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vasubu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vasubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vasubu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vasubu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vasubu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vasubu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vasubu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vasubu_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vasubu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vasubu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vasubu_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vasubu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vasubu_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vasubu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vasubu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vasubu_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vasubu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vasubu_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vasubu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vasubu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vasubu_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vasubu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vasubu_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vasubu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vasubu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vasubu_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vasubu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vasubu_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vasubu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vasubu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vasubu_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vasubu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vasubu_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vasubu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vasubu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vasubu_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vasubu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vasubu_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vasubu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vasubu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vasubu_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vasubu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vasubu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vasubu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vasubu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vasubu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vasubu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vasubu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vasubu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vasubu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vasubu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vasubu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vasubu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vasubu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vasubu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vasubu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vasubu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vasubu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vasubu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vasubu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vasubu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vasubu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vasubu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vasubu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vasubu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vasubu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vasubu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vasubu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vasubu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vasubu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vasubu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vasubu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vasubu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vasubu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vasubu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vasubu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vasubu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vasubu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vasubu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vasubu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vasubu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vasubu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vasubu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vasubu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vasubu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vasubu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vasubu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vasubu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vasubu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vasubu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vasubu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vasubu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vasubu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vasubu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vasubu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vasubu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vasubu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vasubu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vasubu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vasubu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vasubu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vasubu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vasubu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vasubu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vasubu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vasubu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vasubu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vasubu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vasubu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vasubu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vasubu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vasubu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vasubu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vasubu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vasubu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vasubu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vasubu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vasubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vasubu_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vasubu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vasubu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vasubu_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vasubu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vasubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vasubu_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vasubu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vasubu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vasubu_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vasubu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vasubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vasubu_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vasubu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vasubu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vasubu_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vasubu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vasubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vasubu_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vasubu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vasubu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vasubu_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vasubu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vasubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vasubu_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vasubu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vasubu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vasubu_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vasubu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vasubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vasubu_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vasubu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vasubu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vasubu_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vasubu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vasubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vasubu_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vasubu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vasubu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vasubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vasubu_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vasubu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vasubu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vasubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vasubu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vasubu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vasubu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vasubu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vasubu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vasubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vasubu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vasubu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vasubu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vasubu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vasubu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vasubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vasubu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vasubu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vasubu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vasubu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vasubu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vasubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vasubu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vasubu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vasubu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vasubu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vasubu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vasubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vasubu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vasubu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vasubu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vasubu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vasubu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vasubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vasubu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vasubu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vasubu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vasubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vasubu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vasubu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vasubu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vasubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vasubu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vasubu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vasubu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vasubu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vasubu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vasubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vasubu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vasubu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vasubu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vasubu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vasubu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vasubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vasubu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vasubu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vasubu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vasubu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vasubu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vasubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vasubu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vasubu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vasubu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vasubu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vasubu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vasubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vasubu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vasubu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vasubu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vasubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vasubu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vasubu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vasubu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vasubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vasubu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vasubu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vasubu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vasubu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vasubu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vasubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vasubu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vasubu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vasubu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vasubu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vasubu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vasubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vasubu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vasubu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vasubu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vasubu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vasubu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vasubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vasubu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vasubu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vasubu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vasubu_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vasubu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vasubu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vasubu\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vcompress.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vcompress.c index d25a2d533..5bd8eac88 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vcompress.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vcompress.c @@ -6,240 +6,240 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vcompress_vm_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t src, vbool64_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vfloat16mf4_t test_vcompress_vm_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vfloat16mf2_t test_vcompress_vm_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t src, vbool32_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vfloat16mf2_t test_vcompress_vm_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vcompress_vm_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t src, vbool16_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vfloat16m1_t test_vcompress_vm_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vfloat16m2_t test_vcompress_vm_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t src, vbool8_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vfloat16m2_t test_vcompress_vm_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vfloat16m4_t test_vcompress_vm_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t src, vbool4_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vfloat16m4_t test_vcompress_vm_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vfloat16m8_t test_vcompress_vm_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t src, vbool2_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vfloat16m8_t test_vcompress_vm_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vbool2_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vcompress_vm_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t src, vbool64_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vfloat32mf2_t test_vcompress_vm_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vcompress_vm_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t src, vbool32_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vfloat32m1_t test_vcompress_vm_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vcompress_vm_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t src, vbool16_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vfloat32m2_t test_vcompress_vm_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vcompress_vm_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t src, vbool8_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vfloat32m4_t test_vcompress_vm_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vcompress_vm_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t src, vbool4_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vfloat32m8_t test_vcompress_vm_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vcompress_vm_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t src, vbool64_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vfloat64m1_t test_vcompress_vm_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vcompress_vm_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t src, vbool32_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vfloat64m2_t test_vcompress_vm_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vcompress_vm_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t src, vbool16_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vfloat64m4_t test_vcompress_vm_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vcompress_vm_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t src, vbool8_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vfloat64m8_t test_vcompress_vm_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vcompress_vm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t src, vbool64_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vint8mf8_t test_vcompress_vm_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vcompress_vm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t src, vbool32_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vint8mf4_t test_vcompress_vm_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vcompress_vm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t src, vbool16_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vint8mf2_t test_vcompress_vm_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vcompress_vm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t src, vbool8_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vint8m1_t test_vcompress_vm_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vcompress_vm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t src, vbool4_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vint8m2_t test_vcompress_vm_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vcompress_vm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t src, vbool2_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vint8m4_t test_vcompress_vm_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vbool2_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vcompress_vm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t src, vbool1_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vint8m8_t test_vcompress_vm_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vbool1_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vcompress_vm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t src, vbool64_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vint16mf4_t test_vcompress_vm_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vcompress_vm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t src, vbool32_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vint16mf2_t test_vcompress_vm_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vcompress_vm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t src, vbool16_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vint16m1_t test_vcompress_vm_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vcompress_vm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t src, vbool8_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vint16m2_t test_vcompress_vm_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vcompress_vm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t src, vbool4_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vint16m4_t test_vcompress_vm_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vcompress_vm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t src, vbool2_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vint16m8_t test_vcompress_vm_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vbool2_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vcompress_vm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t src, vbool64_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vint32mf2_t test_vcompress_vm_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vcompress_vm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t src, vbool32_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vint32m1_t test_vcompress_vm_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vcompress_vm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t src, vbool16_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vint32m2_t test_vcompress_vm_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vcompress_vm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t src, vbool8_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vint32m4_t test_vcompress_vm_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vcompress_vm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t src, vbool4_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vint32m8_t test_vcompress_vm_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vcompress_vm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t src, vbool64_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vint64m1_t test_vcompress_vm_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vcompress_vm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t src, vbool32_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vint64m2_t test_vcompress_vm_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vcompress_vm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t src, vbool16_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vint64m4_t test_vcompress_vm_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vcompress_vm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t src, vbool8_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vint64m8_t test_vcompress_vm_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vcompress_vm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t src, vbool64_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vuint8mf8_t test_vcompress_vm_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vcompress_vm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t src, vbool32_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vuint8mf4_t test_vcompress_vm_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vcompress_vm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t src, vbool16_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vuint8mf2_t test_vcompress_vm_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vcompress_vm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t src, vbool8_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vuint8m1_t test_vcompress_vm_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vcompress_vm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t src, vbool4_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vuint8m2_t test_vcompress_vm_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vcompress_vm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t src, vbool2_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vuint8m4_t test_vcompress_vm_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vbool2_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vcompress_vm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t src, vbool1_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vuint8m8_t test_vcompress_vm_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vbool1_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vcompress_vm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t src, vbool64_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vuint16mf4_t test_vcompress_vm_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vcompress_vm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t src, vbool32_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vuint16mf2_t test_vcompress_vm_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vcompress_vm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t src, vbool16_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vuint16m1_t test_vcompress_vm_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vcompress_vm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t src, vbool8_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vuint16m2_t test_vcompress_vm_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vcompress_vm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t src, vbool4_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vuint16m4_t test_vcompress_vm_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vcompress_vm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t src, vbool2_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vuint16m8_t test_vcompress_vm_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vbool2_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vcompress_vm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t src, vbool64_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vuint32mf2_t test_vcompress_vm_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vcompress_vm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t src, vbool32_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vuint32m1_t test_vcompress_vm_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vcompress_vm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t src, vbool16_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vuint32m2_t test_vcompress_vm_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vcompress_vm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t src, vbool8_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vuint32m4_t test_vcompress_vm_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vcompress_vm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t src, vbool4_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vuint32m8_t test_vcompress_vm_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vbool4_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vcompress_vm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t src, vbool64_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vuint64m1_t test_vcompress_vm_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vbool64_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vcompress_vm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t src, vbool32_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vuint64m2_t test_vcompress_vm_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vbool32_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vcompress_vm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t src, vbool16_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vuint64m4_t test_vcompress_vm_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vbool16_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vcompress_vm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t src, vbool8_t mask, size_t vl) { - return __riscv_vcompress_tu(maskedoff, src, mask, vl); +vuint64m8_t test_vcompress_vm_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vbool8_t vs1, size_t vl) { + return __riscv_vcompress_tu(vd, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vcompress\.[ivxfswum.]+\s+} 59 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vdiv.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vdiv.c index 66a6b2631..32e5ffee1 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vdiv.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vdiv.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vdiv_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vdiv_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vdiv_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vdiv_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vdiv_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vdiv_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vdiv_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vdiv_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vdiv_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vdiv_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vdiv_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vdiv_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vdiv_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vdiv_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vdiv_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vdiv_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vdiv_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vdiv_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vdiv_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vdiv_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vdiv_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vdiv_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vdiv_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vdiv_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vdiv_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vdiv_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vdiv_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vdiv_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vdiv_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vdiv_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vdiv_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vdiv_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vdiv_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vdiv_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vdiv_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vdiv_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vdiv_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vdiv_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vdiv_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vdiv_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vdiv_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vdiv_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vdiv_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vdiv_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vdiv_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vdiv_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vdiv_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vdiv_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vdiv_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vdiv_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vdiv_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vdiv_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vdiv_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vdiv_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vdiv_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vdiv_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vdiv_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vdiv_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vdiv_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vdiv_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vdiv_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vdiv_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vdiv_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vdiv_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vdiv_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vdiv_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vdiv_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vdiv_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vdiv_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vdiv_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vdiv_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vdiv_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vdiv_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vdiv_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vdiv_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vdiv_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vdiv_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vdiv_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vdiv_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vdiv_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vdiv_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vdiv_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vdiv_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vdiv_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vdiv_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vdiv_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vdiv_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vdiv_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vdiv_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vdiv_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vdiv_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vdiv_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vdiv_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vdiv_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vdiv_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vdiv_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vdiv_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vdiv_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vdiv_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vdiv_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vdiv_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vdiv_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vdiv_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vdiv_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vdiv_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vdiv_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vdiv_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vdiv_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vdiv_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vdiv_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vdiv_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vdiv_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vdiv_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vdiv_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vdiv_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vdiv_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vdiv_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vdiv_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vdiv_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vdiv_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vdiv_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vdiv_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vdiv_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vdiv_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vdiv_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vdiv_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vdiv_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vdiv_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vdiv_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vdiv_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vdiv_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vdiv_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vdiv_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vdiv_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vdiv_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vdiv_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vdiv_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vdiv_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vdiv_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vdiv_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vdiv_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vdiv_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vdiv_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vdiv_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vdiv_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vdiv_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vdiv_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vdiv_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vdiv_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vdiv_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vdiv_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vdiv_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vdiv_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vdiv_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vdiv_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vdiv_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vdiv_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vdiv_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vdiv_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vdiv_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vdiv_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vdiv_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vdiv_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vdiv_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vdiv_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vdiv_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vdiv_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vdiv_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vdiv_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vdiv_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vdiv_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vdiv_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vdiv_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vdiv_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vdiv_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vdiv_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vdiv_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vdiv_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vdiv_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vdiv_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vdiv_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vdiv_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vdiv_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vdiv_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vdiv_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vdiv_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vdiv_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vdiv_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vdiv_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vdiv_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vdiv_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vdiv_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vdiv_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vdiv_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vdiv_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vdiv_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vdiv_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vdiv_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vdiv_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vdiv_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vdiv_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vdiv_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vdiv_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vdiv_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vdiv_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vdiv_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vdiv_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vdiv_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vdiv_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vdiv_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vdiv_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vdiv_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vdiv_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vdiv_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vdiv_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vdiv_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vdiv_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vdiv_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vdiv_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vdiv_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vdiv_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vdiv_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vdiv_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vdiv_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vdiv_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vdiv_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vdiv_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vdiv_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vdiv_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vdiv_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vdiv_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vdiv_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vdiv_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vdiv_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vdiv_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vdiv_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vdiv_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vdiv_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vdiv_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vdiv_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vdiv_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vdiv_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vdiv_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vdiv_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vdiv_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vdiv_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vdiv_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vdiv_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vdiv_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vdiv_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vdiv_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vdiv_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vdiv_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vdiv_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vdiv_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vdiv_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vdiv_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vdiv_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vdiv_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vdiv_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vdiv_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vdiv_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vdiv_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vdiv_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vdiv_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vdiv_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vdiv_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vdiv_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vdiv_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vdiv_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vdiv_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vdiv_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vdiv_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vdiv_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vdiv_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vdiv_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vdiv_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vdiv_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vdiv_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vdiv_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vdiv_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vdiv_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vdiv_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vdiv_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vdiv_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vdiv_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vdiv_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vdiv_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vdiv_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vdiv_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vdiv_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vdiv_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vdiv_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vdiv_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vdiv_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vdiv_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vdiv_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vdiv_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vdiv_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vdiv_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vdiv_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vdiv_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vdiv_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vdiv_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vdiv_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vdiv_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vdiv_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vdiv_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vdiv_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vdiv_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vdiv_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vdiv_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vdiv_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vdiv_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vdiv_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vdiv_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vdiv_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vdiv_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vdiv_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vdiv_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vdiv_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vdiv_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vdiv_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vdiv_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vdiv_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vdiv_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vdiv_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vdiv_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vdiv_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vdiv_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vdiv_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vdiv_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vdiv_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vdiv_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vdiv_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vdiv_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vdiv_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vdiv_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vdiv_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vdiv_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vdiv_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vdiv_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vdiv_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vdiv_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vdiv_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vdiv_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vdiv_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vdiv_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vdiv_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vdiv_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vdiv_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vdiv_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vdiv_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vdiv_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vdiv_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vdiv_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vdiv_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vdiv_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vdiv_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vdiv_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vdiv_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vdiv_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vdiv_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vdiv_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vdiv_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vdiv_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vdiv_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vdiv_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vdiv_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vdiv_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vdiv_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vdiv_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vdiv_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vdiv_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vdiv_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vdiv_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vdiv_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vdiv_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vdiv_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vdiv_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vdiv_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vdiv_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vdiv_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vdiv_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vdiv_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vdiv_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vdiv_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vdiv_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vdiv_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vdiv_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vdiv_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vdiv_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vdiv_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vdiv_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vdiv_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vdiv_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vdiv_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vdiv_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vdiv_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vdiv_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vdiv_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vdiv_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vdiv_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vdiv_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vdiv_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vdiv_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vdiv_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vdiv_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vdiv_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vdiv_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vdiv_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vdiv_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vdiv_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vdiv_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vdiv_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vdiv_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vdiv_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vdiv_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vdiv_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vdiv_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vdiv_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vdiv_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vdiv_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vdiv_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vdiv_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vdiv_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vdiv_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vdiv_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vdiv_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vdiv_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vdiv_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vdiv_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vdiv_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vdiv_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vdiv_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vdiv_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vdiv_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vdiv_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vdiv_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vdiv_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vdiv_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vdiv_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vdiv_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vdiv_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vdiv_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vdiv_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vdiv_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vdiv_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vdiv_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vdiv_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vdiv_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vdiv_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vdiv_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vdiv_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vdiv_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vdiv_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vdiv_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vdiv_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vdiv_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vdiv_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vdiv_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vdiv_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vdiv_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vdiv_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vdiv_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vdiv_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vdiv_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vdiv\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vdivu.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vdivu.c index c639ef750..ae7aa3264 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vdivu.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vdivu.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vdivu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vdivu_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vdivu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vdivu_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vdivu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vdivu_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vdivu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vdivu_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vdivu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vdivu_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vdivu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vdivu_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vdivu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vdivu_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vdivu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vdivu_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vdivu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vdivu_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vdivu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vdivu_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vdivu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vdivu_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vdivu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vdivu_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vdivu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vdivu_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vdivu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vdivu_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vdivu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vdivu_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vdivu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vdivu_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vdivu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vdivu_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vdivu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vdivu_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vdivu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vdivu_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vdivu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vdivu_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vdivu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vdivu_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vdivu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vdivu_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vdivu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vdivu_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vdivu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vdivu_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vdivu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vdivu_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vdivu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vdivu_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vdivu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vdivu_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vdivu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vdivu_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vdivu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vdivu_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vdivu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vdivu_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vdivu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vdivu_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vdivu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vdivu_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vdivu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vdivu_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vdivu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vdivu_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vdivu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vdivu_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vdivu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vdivu_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vdivu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vdivu_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vdivu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vdivu_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vdivu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vdivu_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vdivu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vdivu_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vdivu_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vdivu_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vdivu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vdivu_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vdivu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vdivu_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vdivu_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vdivu_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vdivu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vdivu_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vdivu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vdivu_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vdivu_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vdivu_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vdivu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vdivu_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vdivu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vdivu_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vdivu_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vdivu_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vdivu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vdivu_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vdivu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vdivu_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vdivu_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vdivu_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vdivu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vdivu_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vdivu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vdivu_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vdivu_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vdivu_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vdivu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vdivu_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vdivu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vdivu_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vdivu_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vdivu_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vdivu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vdivu_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vdivu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vdivu_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vdivu_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vdivu_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vdivu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vdivu_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vdivu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vdivu_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vdivu_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vdivu_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vdivu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vdivu_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vdivu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vdivu_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vdivu_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vdivu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vdivu_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vdivu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vdivu_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vdivu_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vdivu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vdivu_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vdivu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vdivu_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vdivu_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vdivu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vdivu_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vdivu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vdivu_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vdivu_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vdivu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vdivu_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vdivu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vdivu_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vdivu_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vdivu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vdivu_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vdivu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vdivu_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vdivu_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vdivu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vdivu_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vdivu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vdivu_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vdivu_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vdivu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vdivu_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vdivu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vdivu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vdivu_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vdivu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vdivu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vdivu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vdivu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vdivu_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vdivu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vdivu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vdivu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vdivu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vdivu_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vdivu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vdivu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vdivu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vdivu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vdivu_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vdivu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vdivu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vdivu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vdivu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vdivu_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vdivu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vdivu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vdivu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vdivu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vdivu_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vdivu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vdivu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vdivu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vdivu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vdivu_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vdivu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vdivu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vdivu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vdivu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vdivu_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vdivu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vdivu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vdivu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vdivu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vdivu_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vdivu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vdivu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vdivu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vdivu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vdivu_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vdivu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vdivu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vdivu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vdivu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vdivu_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vdivu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vdivu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vdivu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vdivu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vdivu_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vdivu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vdivu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vdivu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vdivu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vdivu_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vdivu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vdivu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vdivu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vdivu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vdivu_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vdivu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vdivu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vdivu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vdivu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vdivu_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vdivu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vdivu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vdivu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vdivu_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vdivu_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vdivu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vdivu_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vdivu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vdivu_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vdivu_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vdivu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vdivu_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vdivu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vdivu_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vdivu_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vdivu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vdivu_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vdivu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vdivu_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vdivu_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vdivu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vdivu_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vdivu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vdivu_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vdivu_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vdivu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vdivu_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vdivu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vdivu_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vdivu_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vdivu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vdivu_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vdivu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vdivu_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vdivu_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vdivu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vdivu_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vdivu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vdivu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vdivu_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vdivu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vdivu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vdivu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vdivu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vdivu_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vdivu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vdivu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vdivu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vdivu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vdivu_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vdivu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vdivu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vdivu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vdivu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vdivu_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vdivu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vdivu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vdivu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vdivu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vdivu_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vdivu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vdivu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vdivu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vdivu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vdivu_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vdivu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vdivu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vdivu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vdivu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vdivu_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vdivu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vdivu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vdivu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vdivu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vdivu_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vdivu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vdivu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vdivu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vdivu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vdivu_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vdivu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vdivu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vdivu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vdivu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vdivu_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vdivu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vdivu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vdivu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vdivu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vdivu_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vdivu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vdivu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vdivu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vdivu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vdivu_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vdivu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vdivu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vdivu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vdivu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vdivu_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vdivu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vdivu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vdivu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vdivu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vdivu_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vdivu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vdivu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vdivu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vdivu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vdivu_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vdivu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vdivu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vdivu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vdivu_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vdivu_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vdivu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vdivu_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vdivu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vdivu_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vdivu_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vdivu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vdivu_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vdivu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vdivu_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vdivu_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vdivu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vdivu_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vdivu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vdivu_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vdivu_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vdivu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vdivu_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vdivu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vdivu_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vdivu_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vdivu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vdivu_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vdivu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vdivu_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vdivu_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vdivu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vdivu_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vdivu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vdivu_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vdivu_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vdivu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vdivu_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vdivu_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vdivu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vdivu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vdivu_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vdivu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vdivu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vdivu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vdivu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vdivu_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vdivu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vdivu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vdivu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vdivu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vdivu_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vdivu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vdivu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vdivu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vdivu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vdivu_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vdivu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vdivu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vdivu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vdivu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vdivu_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vdivu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vdivu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vdivu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vdivu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vdivu_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vdivu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vdivu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vdivu_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vdivu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vdivu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vdivu_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vdivu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vdivu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vdivu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vdivu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vdivu_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vdivu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vdivu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vdivu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vdivu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vdivu_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vdivu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vdivu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vdivu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vdivu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vdivu_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vdivu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vdivu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vdivu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vdivu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vdivu_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vdivu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vdivu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vdivu_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vdivu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vdivu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vdivu_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vdivu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vdivu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vdivu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vdivu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vdivu_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vdivu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vdivu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vdivu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vdivu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vdivu_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vdivu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vdivu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vdivu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vdivu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vdivu_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vdivu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vdivu_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vdivu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vdivu_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vdivu\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfabs.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfabs.c index 694d5d74a..c08cb8ff1 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfabs.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfabs.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfabs_v_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfabs_tu(maskedoff, op1, vl); +vfloat16mf4_t test_vfabs_v_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfabs_tu(vd, vs2, vl); } -vfloat16mf2_t test_vfabs_v_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfabs_tu(maskedoff, op1, vl); +vfloat16mf2_t test_vfabs_v_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfabs_tu(vd, vs2, vl); } -vfloat16m1_t test_vfabs_v_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfabs_tu(maskedoff, op1, vl); +vfloat16m1_t test_vfabs_v_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfabs_tu(vd, vs2, vl); } -vfloat16m2_t test_vfabs_v_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfabs_tu(maskedoff, op1, vl); +vfloat16m2_t test_vfabs_v_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfabs_tu(vd, vs2, vl); } -vfloat16m4_t test_vfabs_v_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfabs_tu(maskedoff, op1, vl); +vfloat16m4_t test_vfabs_v_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfabs_tu(vd, vs2, vl); } -vfloat16m8_t test_vfabs_v_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfabs_tu(maskedoff, op1, vl); +vfloat16m8_t test_vfabs_v_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfabs_tu(vd, vs2, vl); } -vfloat32mf2_t test_vfabs_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfabs_tu(maskedoff, op1, vl); +vfloat32mf2_t test_vfabs_v_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfabs_tu(vd, vs2, vl); } -vfloat32m1_t test_vfabs_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfabs_tu(maskedoff, op1, vl); +vfloat32m1_t test_vfabs_v_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfabs_tu(vd, vs2, vl); } -vfloat32m2_t test_vfabs_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfabs_tu(maskedoff, op1, vl); +vfloat32m2_t test_vfabs_v_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfabs_tu(vd, vs2, vl); } -vfloat32m4_t test_vfabs_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfabs_tu(maskedoff, op1, vl); +vfloat32m4_t test_vfabs_v_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfabs_tu(vd, vs2, vl); } -vfloat32m8_t test_vfabs_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfabs_tu(maskedoff, op1, vl); +vfloat32m8_t test_vfabs_v_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfabs_tu(vd, vs2, vl); } -vfloat64m1_t test_vfabs_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfabs_tu(maskedoff, op1, vl); +vfloat64m1_t test_vfabs_v_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfabs_tu(vd, vs2, vl); } -vfloat64m2_t test_vfabs_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfabs_tu(maskedoff, op1, vl); +vfloat64m2_t test_vfabs_v_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfabs_tu(vd, vs2, vl); } -vfloat64m4_t test_vfabs_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfabs_tu(maskedoff, op1, vl); +vfloat64m4_t test_vfabs_v_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfabs_tu(vd, vs2, vl); } -vfloat64m8_t test_vfabs_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfabs_tu(maskedoff, op1, vl); +vfloat64m8_t test_vfabs_v_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfabs_tu(vd, vs2, vl); } -vfloat16mf4_t test_vfabs_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfabs_tum(mask, maskedoff, op1, vl); +vfloat16mf4_t test_vfabs_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfabs_tum(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfabs_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfabs_tum(mask, maskedoff, op1, vl); +vfloat16mf2_t test_vfabs_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfabs_tum(vm, vd, vs2, vl); } -vfloat16m1_t test_vfabs_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfabs_tum(mask, maskedoff, op1, vl); +vfloat16m1_t test_vfabs_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfabs_tum(vm, vd, vs2, vl); } -vfloat16m2_t test_vfabs_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfabs_tum(mask, maskedoff, op1, vl); +vfloat16m2_t test_vfabs_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfabs_tum(vm, vd, vs2, vl); } -vfloat16m4_t test_vfabs_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfabs_tum(mask, maskedoff, op1, vl); +vfloat16m4_t test_vfabs_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfabs_tum(vm, vd, vs2, vl); } -vfloat16m8_t test_vfabs_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfabs_tum(mask, maskedoff, op1, vl); +vfloat16m8_t test_vfabs_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfabs_tum(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfabs_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfabs_tum(mask, maskedoff, op1, vl); +vfloat32mf2_t test_vfabs_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfabs_tum(vm, vd, vs2, vl); } -vfloat32m1_t test_vfabs_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfabs_tum(mask, maskedoff, op1, vl); +vfloat32m1_t test_vfabs_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfabs_tum(vm, vd, vs2, vl); } -vfloat32m2_t test_vfabs_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfabs_tum(mask, maskedoff, op1, vl); +vfloat32m2_t test_vfabs_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfabs_tum(vm, vd, vs2, vl); } -vfloat32m4_t test_vfabs_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfabs_tum(mask, maskedoff, op1, vl); +vfloat32m4_t test_vfabs_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfabs_tum(vm, vd, vs2, vl); } -vfloat32m8_t test_vfabs_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfabs_tum(mask, maskedoff, op1, vl); +vfloat32m8_t test_vfabs_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfabs_tum(vm, vd, vs2, vl); } -vfloat64m1_t test_vfabs_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfabs_tum(mask, maskedoff, op1, vl); +vfloat64m1_t test_vfabs_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfabs_tum(vm, vd, vs2, vl); } -vfloat64m2_t test_vfabs_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfabs_tum(mask, maskedoff, op1, vl); +vfloat64m2_t test_vfabs_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfabs_tum(vm, vd, vs2, vl); } -vfloat64m4_t test_vfabs_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfabs_tum(mask, maskedoff, op1, vl); +vfloat64m4_t test_vfabs_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfabs_tum(vm, vd, vs2, vl); } -vfloat64m8_t test_vfabs_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfabs_tum(mask, maskedoff, op1, vl); +vfloat64m8_t test_vfabs_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfabs_tum(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfabs_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfabs_tumu(mask, maskedoff, op1, vl); +vfloat16mf4_t test_vfabs_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfabs_tumu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfabs_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfabs_tumu(mask, maskedoff, op1, vl); +vfloat16mf2_t test_vfabs_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfabs_tumu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfabs_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfabs_tumu(mask, maskedoff, op1, vl); +vfloat16m1_t test_vfabs_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfabs_tumu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfabs_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfabs_tumu(mask, maskedoff, op1, vl); +vfloat16m2_t test_vfabs_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfabs_tumu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfabs_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfabs_tumu(mask, maskedoff, op1, vl); +vfloat16m4_t test_vfabs_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfabs_tumu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfabs_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfabs_tumu(mask, maskedoff, op1, vl); +vfloat16m8_t test_vfabs_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfabs_tumu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfabs_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfabs_tumu(mask, maskedoff, op1, vl); +vfloat32mf2_t test_vfabs_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfabs_tumu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfabs_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfabs_tumu(mask, maskedoff, op1, vl); +vfloat32m1_t test_vfabs_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfabs_tumu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfabs_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfabs_tumu(mask, maskedoff, op1, vl); +vfloat32m2_t test_vfabs_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfabs_tumu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfabs_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfabs_tumu(mask, maskedoff, op1, vl); +vfloat32m4_t test_vfabs_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfabs_tumu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfabs_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfabs_tumu(mask, maskedoff, op1, vl); +vfloat32m8_t test_vfabs_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfabs_tumu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfabs_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfabs_tumu(mask, maskedoff, op1, vl); +vfloat64m1_t test_vfabs_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfabs_tumu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfabs_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfabs_tumu(mask, maskedoff, op1, vl); +vfloat64m2_t test_vfabs_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfabs_tumu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfabs_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfabs_tumu(mask, maskedoff, op1, vl); +vfloat64m4_t test_vfabs_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfabs_tumu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfabs_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfabs_tumu(mask, maskedoff, op1, vl); +vfloat64m8_t test_vfabs_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfabs_tumu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfabs_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfabs_mu(mask, maskedoff, op1, vl); +vfloat16mf4_t test_vfabs_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfabs_mu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfabs_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfabs_mu(mask, maskedoff, op1, vl); +vfloat16mf2_t test_vfabs_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfabs_mu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfabs_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfabs_mu(mask, maskedoff, op1, vl); +vfloat16m1_t test_vfabs_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfabs_mu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfabs_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfabs_mu(mask, maskedoff, op1, vl); +vfloat16m2_t test_vfabs_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfabs_mu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfabs_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfabs_mu(mask, maskedoff, op1, vl); +vfloat16m4_t test_vfabs_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfabs_mu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfabs_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfabs_mu(mask, maskedoff, op1, vl); +vfloat16m8_t test_vfabs_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfabs_mu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfabs_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfabs_mu(mask, maskedoff, op1, vl); +vfloat32mf2_t test_vfabs_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfabs_mu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfabs_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfabs_mu(mask, maskedoff, op1, vl); +vfloat32m1_t test_vfabs_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfabs_mu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfabs_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfabs_mu(mask, maskedoff, op1, vl); +vfloat32m2_t test_vfabs_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfabs_mu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfabs_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfabs_mu(mask, maskedoff, op1, vl); +vfloat32m4_t test_vfabs_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfabs_mu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfabs_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfabs_mu(mask, maskedoff, op1, vl); +vfloat32m8_t test_vfabs_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfabs_mu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfabs_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfabs_mu(mask, maskedoff, op1, vl); +vfloat64m1_t test_vfabs_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfabs_mu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfabs_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfabs_mu(mask, maskedoff, op1, vl); +vfloat64m2_t test_vfabs_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfabs_mu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfabs_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfabs_mu(mask, maskedoff, op1, vl); +vfloat64m4_t test_vfabs_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfabs_mu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfabs_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfabs_mu(mask, maskedoff, op1, vl); +vfloat64m8_t test_vfabs_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfabs_mu(vm, vd, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfabs\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfadd.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfadd.c index a2f3d8b12..20adab80a 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfadd.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfadd.c @@ -6,964 +6,964 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfadd_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfadd_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfadd_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfadd_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfadd_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfadd_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfadd_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfadd_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, rs1, vl); } -vfloat16m1_t test_vfadd_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +vfloat16m1_t test_vfadd_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfadd_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +vfloat16m1_t test_vfadd_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, rs1, vl); } -vfloat16m2_t test_vfadd_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +vfloat16m2_t test_vfadd_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, vs1, vl); } -vfloat16m2_t test_vfadd_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +vfloat16m2_t test_vfadd_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, rs1, vl); } -vfloat16m4_t test_vfadd_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +vfloat16m4_t test_vfadd_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, vs1, vl); } -vfloat16m4_t test_vfadd_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +vfloat16m4_t test_vfadd_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, rs1, vl); } -vfloat16m8_t test_vfadd_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +vfloat16m8_t test_vfadd_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, vs1, vl); } -vfloat16m8_t test_vfadd_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +vfloat16m8_t test_vfadd_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfadd_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfadd_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfadd_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfadd_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfadd_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfadd_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfadd_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfadd_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfadd_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfadd_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vfadd_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfadd_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfadd_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfadd_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vfadd_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfadd_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfadd_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfadd_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vfadd_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfadd_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfadd_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfadd_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfadd_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfadd_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfadd_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfadd_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vfadd_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfadd_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfadd_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfadd_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vfadd_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfadd_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfadd_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfadd_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vfadd_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfadd_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfadd_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfadd_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfadd_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfadd_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfadd_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfadd_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfadd_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfadd_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfadd_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfadd_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfadd_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfadd_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfadd_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfadd_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfadd_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfadd_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfadd_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfadd_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfadd_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfadd_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfadd_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfadd_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfadd_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfadd_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfadd_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfadd_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfadd_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfadd_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfadd_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfadd_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfadd_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfadd_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfadd_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfadd_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfadd_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfadd_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfadd_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfadd_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfadd_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfadd_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfadd_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfadd_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfadd_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfadd_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfadd_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfadd_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfadd_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfadd_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfadd_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfadd_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfadd_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfadd_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfadd_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfadd_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfadd_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfadd_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfadd_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfadd_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfadd_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfadd_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfadd_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfadd_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfadd_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfadd_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfadd_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfadd_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfadd_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfadd_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfadd_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfadd_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfadd_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfadd_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfadd_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfadd_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfadd_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfadd_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfadd_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfadd_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfadd_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfadd_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfadd_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfadd_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfadd_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfadd_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfadd_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfadd_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfadd_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfadd_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfadd_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfadd_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfadd_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfadd_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfadd_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfadd_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfadd_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfadd_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfadd_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfadd_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfadd_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfadd_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfadd_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfadd_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfadd_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfadd_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfadd_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfadd_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfadd_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfadd_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfadd_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfadd_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfadd_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfadd_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfadd_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfadd_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfadd_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfadd_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfadd_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfadd_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfadd_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfadd_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfadd_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfadd_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfadd_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfadd_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfadd_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfadd_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfadd_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfadd_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfadd_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfadd_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfadd_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfadd_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfadd_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfadd_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfadd_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfadd_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfadd_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfadd_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfadd_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfadd_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfadd_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfadd_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfadd_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfadd_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfadd_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfadd_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfadd_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfadd_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfadd_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfadd_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfadd_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfadd_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfadd_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfadd_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfadd_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfadd_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfadd_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfadd_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfadd_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfadd_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfadd_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfadd_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfadd_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfadd_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfadd_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfadd_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfadd_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfadd_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfadd_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfadd_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfadd_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfadd_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfadd_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfadd_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfadd_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfadd_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfadd_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfadd_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfadd_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfadd_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfadd_vv_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfadd_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfadd_vf_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfadd_vf_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfadd_vv_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfadd_vv_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfadd_vf_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfadd_vf_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfadd_vv_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfadd_vv_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfadd_vf_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfadd_vf_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfadd_vv_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfadd_vv_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfadd_vf_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfadd_vf_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfadd_vv_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfadd_vv_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfadd_vf_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfadd_vf_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfadd_vv_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfadd_vv_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfadd_vf_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfadd_vf_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfadd_vv_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfadd_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfadd_vf_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfadd_vf_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfadd_vv_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfadd_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfadd_vf_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfadd_vf_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfadd_vv_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfadd_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfadd_vf_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfadd_vf_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfadd_vv_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfadd_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfadd_vf_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfadd_vf_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfadd_vv_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfadd_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfadd_vf_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfadd_vf_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfadd_vv_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfadd_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfadd_vf_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfadd_vf_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfadd_vv_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfadd_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfadd_vf_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfadd_vf_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfadd_vv_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfadd_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfadd_vf_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfadd_vf_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfadd_vv_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfadd_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfadd_vf_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfadd_vf_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfadd_vv_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfadd_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfadd_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfadd_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfadd_vv_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfadd_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfadd_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfadd_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfadd_vv_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfadd_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfadd_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfadd_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfadd_vv_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfadd_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfadd_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfadd_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfadd_vv_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfadd_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfadd_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfadd_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfadd_vv_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfadd_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfadd_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfadd_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfadd_vv_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfadd_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfadd_vf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfadd_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfadd_vv_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfadd_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfadd_vf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfadd_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfadd_vv_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfadd_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfadd_vf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfadd_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfadd_vv_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfadd_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfadd_vf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfadd_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfadd_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfadd_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfadd_vf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfadd_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfadd_vv_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfadd_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfadd_vf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfadd_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfadd_vv_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfadd_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfadd_vf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfadd_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfadd_vv_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfadd_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfadd_vf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfadd_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfadd_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfadd_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfadd_vf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfadd_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfadd_vv_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfadd_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfadd_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfadd_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfadd_vv_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfadd_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfadd_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfadd_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfadd_vv_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfadd_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfadd_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfadd_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfadd_vv_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfadd_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfadd_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfadd_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfadd_vv_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfadd_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfadd_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfadd_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfadd_vv_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfadd_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfadd_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfadd_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfadd_vv_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfadd_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfadd_vf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfadd_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfadd_vv_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfadd_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfadd_vf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfadd_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfadd_vv_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfadd_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfadd_vf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfadd_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfadd_vv_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfadd_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfadd_vf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfadd_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfadd_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfadd_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfadd_vf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfadd_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfadd_vv_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfadd_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfadd_vf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfadd_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfadd_vv_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfadd_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfadd_vf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfadd_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfadd_vv_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfadd_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfadd_vf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfadd_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfadd_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfadd_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfadd_vf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfadd_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfadd_vv_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfadd_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfadd_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfadd_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfadd_vv_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfadd_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfadd_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfadd_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfadd_vv_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfadd_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfadd_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfadd_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfadd_vv_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfadd_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfadd_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfadd_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfadd_vv_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfadd_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfadd_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfadd_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfadd_vv_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfadd_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfadd_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfadd_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfadd_vv_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfadd_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfadd_vf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfadd_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfadd_vv_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfadd_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfadd_vf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfadd_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfadd_vv_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfadd_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfadd_vf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfadd_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfadd_vv_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfadd_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfadd_vf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfadd_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfadd_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfadd_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfadd_vf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfadd_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfadd_vv_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfadd_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfadd_vf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfadd_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfadd_vv_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfadd_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfadd_vf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfadd_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfadd_vv_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfadd_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfadd_vf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfadd_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfadd_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfadd_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfadd_vf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfadd_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfadd_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfadd\.[ivxfswum.]+\s+} 240 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfclass.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfclass.c index bea38d170..097f4028f 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfclass.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfclass.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint16mf4_t test_vfclass_v_u16mf4_tu(vuint16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfclass_tu(maskedoff, op1, vl); +vuint16mf4_t test_vfclass_v_u16mf4_tu(vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfclass_tu(vd, vs2, vl); } -vuint16mf2_t test_vfclass_v_u16mf2_tu(vuint16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfclass_tu(maskedoff, op1, vl); +vuint16mf2_t test_vfclass_v_u16mf2_tu(vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfclass_tu(vd, vs2, vl); } -vuint16m1_t test_vfclass_v_u16m1_tu(vuint16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfclass_tu(maskedoff, op1, vl); +vuint16m1_t test_vfclass_v_u16m1_tu(vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfclass_tu(vd, vs2, vl); } -vuint16m2_t test_vfclass_v_u16m2_tu(vuint16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfclass_tu(maskedoff, op1, vl); +vuint16m2_t test_vfclass_v_u16m2_tu(vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfclass_tu(vd, vs2, vl); } -vuint16m4_t test_vfclass_v_u16m4_tu(vuint16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfclass_tu(maskedoff, op1, vl); +vuint16m4_t test_vfclass_v_u16m4_tu(vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfclass_tu(vd, vs2, vl); } -vuint16m8_t test_vfclass_v_u16m8_tu(vuint16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfclass_tu(maskedoff, op1, vl); +vuint16m8_t test_vfclass_v_u16m8_tu(vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfclass_tu(vd, vs2, vl); } -vuint32mf2_t test_vfclass_v_u32mf2_tu(vuint32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfclass_tu(maskedoff, op1, vl); +vuint32mf2_t test_vfclass_v_u32mf2_tu(vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfclass_tu(vd, vs2, vl); } -vuint32m1_t test_vfclass_v_u32m1_tu(vuint32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfclass_tu(maskedoff, op1, vl); +vuint32m1_t test_vfclass_v_u32m1_tu(vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfclass_tu(vd, vs2, vl); } -vuint32m2_t test_vfclass_v_u32m2_tu(vuint32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfclass_tu(maskedoff, op1, vl); +vuint32m2_t test_vfclass_v_u32m2_tu(vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfclass_tu(vd, vs2, vl); } -vuint32m4_t test_vfclass_v_u32m4_tu(vuint32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfclass_tu(maskedoff, op1, vl); +vuint32m4_t test_vfclass_v_u32m4_tu(vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfclass_tu(vd, vs2, vl); } -vuint32m8_t test_vfclass_v_u32m8_tu(vuint32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfclass_tu(maskedoff, op1, vl); +vuint32m8_t test_vfclass_v_u32m8_tu(vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfclass_tu(vd, vs2, vl); } -vuint64m1_t test_vfclass_v_u64m1_tu(vuint64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfclass_tu(maskedoff, op1, vl); +vuint64m1_t test_vfclass_v_u64m1_tu(vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfclass_tu(vd, vs2, vl); } -vuint64m2_t test_vfclass_v_u64m2_tu(vuint64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfclass_tu(maskedoff, op1, vl); +vuint64m2_t test_vfclass_v_u64m2_tu(vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfclass_tu(vd, vs2, vl); } -vuint64m4_t test_vfclass_v_u64m4_tu(vuint64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfclass_tu(maskedoff, op1, vl); +vuint64m4_t test_vfclass_v_u64m4_tu(vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfclass_tu(vd, vs2, vl); } -vuint64m8_t test_vfclass_v_u64m8_tu(vuint64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfclass_tu(maskedoff, op1, vl); +vuint64m8_t test_vfclass_v_u64m8_tu(vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfclass_tu(vd, vs2, vl); } -vuint16mf4_t test_vfclass_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfclass_tum(mask, maskedoff, op1, vl); +vuint16mf4_t test_vfclass_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfclass_tum(vm, vd, vs2, vl); } -vuint16mf2_t test_vfclass_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfclass_tum(mask, maskedoff, op1, vl); +vuint16mf2_t test_vfclass_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfclass_tum(vm, vd, vs2, vl); } -vuint16m1_t test_vfclass_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfclass_tum(mask, maskedoff, op1, vl); +vuint16m1_t test_vfclass_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfclass_tum(vm, vd, vs2, vl); } -vuint16m2_t test_vfclass_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfclass_tum(mask, maskedoff, op1, vl); +vuint16m2_t test_vfclass_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfclass_tum(vm, vd, vs2, vl); } -vuint16m4_t test_vfclass_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfclass_tum(mask, maskedoff, op1, vl); +vuint16m4_t test_vfclass_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfclass_tum(vm, vd, vs2, vl); } -vuint16m8_t test_vfclass_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfclass_tum(mask, maskedoff, op1, vl); +vuint16m8_t test_vfclass_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfclass_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_vfclass_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfclass_tum(mask, maskedoff, op1, vl); +vuint32mf2_t test_vfclass_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfclass_tum(vm, vd, vs2, vl); } -vuint32m1_t test_vfclass_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfclass_tum(mask, maskedoff, op1, vl); +vuint32m1_t test_vfclass_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfclass_tum(vm, vd, vs2, vl); } -vuint32m2_t test_vfclass_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfclass_tum(mask, maskedoff, op1, vl); +vuint32m2_t test_vfclass_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfclass_tum(vm, vd, vs2, vl); } -vuint32m4_t test_vfclass_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfclass_tum(mask, maskedoff, op1, vl); +vuint32m4_t test_vfclass_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfclass_tum(vm, vd, vs2, vl); } -vuint32m8_t test_vfclass_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfclass_tum(mask, maskedoff, op1, vl); +vuint32m8_t test_vfclass_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfclass_tum(vm, vd, vs2, vl); } -vuint64m1_t test_vfclass_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfclass_tum(mask, maskedoff, op1, vl); +vuint64m1_t test_vfclass_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfclass_tum(vm, vd, vs2, vl); } -vuint64m2_t test_vfclass_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfclass_tum(mask, maskedoff, op1, vl); +vuint64m2_t test_vfclass_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfclass_tum(vm, vd, vs2, vl); } -vuint64m4_t test_vfclass_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfclass_tum(mask, maskedoff, op1, vl); +vuint64m4_t test_vfclass_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfclass_tum(vm, vd, vs2, vl); } -vuint64m8_t test_vfclass_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfclass_tum(mask, maskedoff, op1, vl); +vuint64m8_t test_vfclass_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfclass_tum(vm, vd, vs2, vl); } -vuint16mf4_t test_vfclass_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfclass_tumu(mask, maskedoff, op1, vl); +vuint16mf4_t test_vfclass_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfclass_tumu(vm, vd, vs2, vl); } -vuint16mf2_t test_vfclass_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfclass_tumu(mask, maskedoff, op1, vl); +vuint16mf2_t test_vfclass_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfclass_tumu(vm, vd, vs2, vl); } -vuint16m1_t test_vfclass_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfclass_tumu(mask, maskedoff, op1, vl); +vuint16m1_t test_vfclass_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfclass_tumu(vm, vd, vs2, vl); } -vuint16m2_t test_vfclass_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfclass_tumu(mask, maskedoff, op1, vl); +vuint16m2_t test_vfclass_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfclass_tumu(vm, vd, vs2, vl); } -vuint16m4_t test_vfclass_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfclass_tumu(mask, maskedoff, op1, vl); +vuint16m4_t test_vfclass_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfclass_tumu(vm, vd, vs2, vl); } -vuint16m8_t test_vfclass_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfclass_tumu(mask, maskedoff, op1, vl); +vuint16m8_t test_vfclass_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfclass_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_vfclass_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfclass_tumu(mask, maskedoff, op1, vl); +vuint32mf2_t test_vfclass_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfclass_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_vfclass_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfclass_tumu(mask, maskedoff, op1, vl); +vuint32m1_t test_vfclass_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfclass_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_vfclass_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfclass_tumu(mask, maskedoff, op1, vl); +vuint32m2_t test_vfclass_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfclass_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_vfclass_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfclass_tumu(mask, maskedoff, op1, vl); +vuint32m4_t test_vfclass_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfclass_tumu(vm, vd, vs2, vl); } -vuint32m8_t test_vfclass_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfclass_tumu(mask, maskedoff, op1, vl); +vuint32m8_t test_vfclass_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfclass_tumu(vm, vd, vs2, vl); } -vuint64m1_t test_vfclass_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfclass_tumu(mask, maskedoff, op1, vl); +vuint64m1_t test_vfclass_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfclass_tumu(vm, vd, vs2, vl); } -vuint64m2_t test_vfclass_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfclass_tumu(mask, maskedoff, op1, vl); +vuint64m2_t test_vfclass_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfclass_tumu(vm, vd, vs2, vl); } -vuint64m4_t test_vfclass_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfclass_tumu(mask, maskedoff, op1, vl); +vuint64m4_t test_vfclass_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfclass_tumu(vm, vd, vs2, vl); } -vuint64m8_t test_vfclass_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfclass_tumu(mask, maskedoff, op1, vl); +vuint64m8_t test_vfclass_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfclass_tumu(vm, vd, vs2, vl); } -vuint16mf4_t test_vfclass_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfclass_mu(mask, maskedoff, op1, vl); +vuint16mf4_t test_vfclass_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfclass_mu(vm, vd, vs2, vl); } -vuint16mf2_t test_vfclass_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfclass_mu(mask, maskedoff, op1, vl); +vuint16mf2_t test_vfclass_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfclass_mu(vm, vd, vs2, vl); } -vuint16m1_t test_vfclass_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfclass_mu(mask, maskedoff, op1, vl); +vuint16m1_t test_vfclass_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfclass_mu(vm, vd, vs2, vl); } -vuint16m2_t test_vfclass_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfclass_mu(mask, maskedoff, op1, vl); +vuint16m2_t test_vfclass_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfclass_mu(vm, vd, vs2, vl); } -vuint16m4_t test_vfclass_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfclass_mu(mask, maskedoff, op1, vl); +vuint16m4_t test_vfclass_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfclass_mu(vm, vd, vs2, vl); } -vuint16m8_t test_vfclass_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfclass_mu(mask, maskedoff, op1, vl); +vuint16m8_t test_vfclass_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfclass_mu(vm, vd, vs2, vl); } -vuint32mf2_t test_vfclass_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfclass_mu(mask, maskedoff, op1, vl); +vuint32mf2_t test_vfclass_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfclass_mu(vm, vd, vs2, vl); } -vuint32m1_t test_vfclass_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfclass_mu(mask, maskedoff, op1, vl); +vuint32m1_t test_vfclass_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfclass_mu(vm, vd, vs2, vl); } -vuint32m2_t test_vfclass_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfclass_mu(mask, maskedoff, op1, vl); +vuint32m2_t test_vfclass_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfclass_mu(vm, vd, vs2, vl); } -vuint32m4_t test_vfclass_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfclass_mu(mask, maskedoff, op1, vl); +vuint32m4_t test_vfclass_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfclass_mu(vm, vd, vs2, vl); } -vuint32m8_t test_vfclass_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfclass_mu(mask, maskedoff, op1, vl); +vuint32m8_t test_vfclass_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfclass_mu(vm, vd, vs2, vl); } -vuint64m1_t test_vfclass_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfclass_mu(mask, maskedoff, op1, vl); +vuint64m1_t test_vfclass_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfclass_mu(vm, vd, vs2, vl); } -vuint64m2_t test_vfclass_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfclass_mu(mask, maskedoff, op1, vl); +vuint64m2_t test_vfclass_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfclass_mu(vm, vd, vs2, vl); } -vuint64m4_t test_vfclass_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfclass_mu(mask, maskedoff, op1, vl); +vuint64m4_t test_vfclass_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfclass_mu(vm, vd, vs2, vl); } -vuint64m8_t test_vfclass_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfclass_mu(mask, maskedoff, op1, vl); +vuint64m8_t test_vfclass_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfclass_mu(vm, vd, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfclass\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfcvt.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfcvt.c index b5e4ce258..c1755a205 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfcvt.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfcvt.c @@ -6,1924 +6,1924 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint16mf4_t test_vfcvt_x_f_v_i16mf4_tu(vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_x_tu(maskedoff, src, vl); +vint16mf4_t test_vfcvt_x_f_v_i16mf4_tu(vint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_x_tu(vd, vs2, vl); } -vint16mf2_t test_vfcvt_x_f_v_i16mf2_tu(vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_x_tu(maskedoff, src, vl); +vint16mf2_t test_vfcvt_x_f_v_i16mf2_tu(vint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x_tu(vd, vs2, vl); } -vint16m1_t test_vfcvt_x_f_v_i16m1_tu(vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_x_tu(maskedoff, src, vl); +vint16m1_t test_vfcvt_x_f_v_i16m1_tu(vint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_tu(vd, vs2, vl); } -vint16m2_t test_vfcvt_x_f_v_i16m2_tu(vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_x_tu(maskedoff, src, vl); +vint16m2_t test_vfcvt_x_f_v_i16m2_tu(vint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_tu(vd, vs2, vl); } -vint16m4_t test_vfcvt_x_f_v_i16m4_tu(vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_x_tu(maskedoff, src, vl); +vint16m4_t test_vfcvt_x_f_v_i16m4_tu(vint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_tu(vd, vs2, vl); } -vint16m8_t test_vfcvt_x_f_v_i16m8_tu(vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_x_tu(maskedoff, src, vl); +vint16m8_t test_vfcvt_x_f_v_i16m8_tu(vint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_tu(vd, vs2, vl); } -vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_tu(vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_xu_tu(maskedoff, src, vl); +vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_tu(vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tu(vd, vs2, vl); } -vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_tu(vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_xu_tu(maskedoff, src, vl); +vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_tu(vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tu(vd, vs2, vl); } -vuint16m1_t test_vfcvt_xu_f_v_u16m1_tu(vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_xu_tu(maskedoff, src, vl); +vuint16m1_t test_vfcvt_xu_f_v_u16m1_tu(vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tu(vd, vs2, vl); } -vuint16m2_t test_vfcvt_xu_f_v_u16m2_tu(vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_xu_tu(maskedoff, src, vl); +vuint16m2_t test_vfcvt_xu_f_v_u16m2_tu(vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tu(vd, vs2, vl); } -vuint16m4_t test_vfcvt_xu_f_v_u16m4_tu(vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_xu_tu(maskedoff, src, vl); +vuint16m4_t test_vfcvt_xu_f_v_u16m4_tu(vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tu(vd, vs2, vl); } -vuint16m8_t test_vfcvt_xu_f_v_u16m8_tu(vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_xu_tu(maskedoff, src, vl); +vuint16m8_t test_vfcvt_xu_f_v_u16m8_tu(vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tu(vd, vs2, vl); } -vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_tu(vfloat16mf4_t maskedoff, vint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, vl); +vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_tu(vfloat16mf4_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, vl); } -vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_tu(vfloat16mf2_t maskedoff, vint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, vl); +vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_tu(vfloat16mf2_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, vl); } -vfloat16m1_t test_vfcvt_f_x_v_f16m1_tu(vfloat16m1_t maskedoff, vint16m1_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, vl); +vfloat16m1_t test_vfcvt_f_x_v_f16m1_tu(vfloat16m1_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, vl); } -vfloat16m2_t test_vfcvt_f_x_v_f16m2_tu(vfloat16m2_t maskedoff, vint16m2_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, vl); +vfloat16m2_t test_vfcvt_f_x_v_f16m2_tu(vfloat16m2_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, vl); } -vfloat16m4_t test_vfcvt_f_x_v_f16m4_tu(vfloat16m4_t maskedoff, vint16m4_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, vl); +vfloat16m4_t test_vfcvt_f_x_v_f16m4_tu(vfloat16m4_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, vl); } -vfloat16m8_t test_vfcvt_f_x_v_f16m8_tu(vfloat16m8_t maskedoff, vint16m8_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, vl); +vfloat16m8_t test_vfcvt_f_x_v_f16m8_tu(vfloat16m8_t vd, vint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, vl); } -vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_tu(vfloat16mf4_t maskedoff, vuint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, vl); +vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_tu(vfloat16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, vl); } -vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_tu(vfloat16mf2_t maskedoff, vuint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, vl); +vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_tu(vfloat16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, vl); } -vfloat16m1_t test_vfcvt_f_xu_v_f16m1_tu(vfloat16m1_t maskedoff, vuint16m1_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, vl); +vfloat16m1_t test_vfcvt_f_xu_v_f16m1_tu(vfloat16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, vl); } -vfloat16m2_t test_vfcvt_f_xu_v_f16m2_tu(vfloat16m2_t maskedoff, vuint16m2_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, vl); +vfloat16m2_t test_vfcvt_f_xu_v_f16m2_tu(vfloat16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, vl); } -vfloat16m4_t test_vfcvt_f_xu_v_f16m4_tu(vfloat16m4_t maskedoff, vuint16m4_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, vl); +vfloat16m4_t test_vfcvt_f_xu_v_f16m4_tu(vfloat16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, vl); } -vfloat16m8_t test_vfcvt_f_xu_v_f16m8_tu(vfloat16m8_t maskedoff, vuint16m8_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, vl); +vfloat16m8_t test_vfcvt_f_xu_v_f16m8_tu(vfloat16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, vl); } -vint32mf2_t test_vfcvt_x_f_v_i32mf2_tu(vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_x_tu(maskedoff, src, vl); +vint32mf2_t test_vfcvt_x_f_v_i32mf2_tu(vint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x_tu(vd, vs2, vl); } -vint32m1_t test_vfcvt_x_f_v_i32m1_tu(vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_x_tu(maskedoff, src, vl); +vint32m1_t test_vfcvt_x_f_v_i32m1_tu(vint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_tu(vd, vs2, vl); } -vint32m2_t test_vfcvt_x_f_v_i32m2_tu(vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_x_tu(maskedoff, src, vl); +vint32m2_t test_vfcvt_x_f_v_i32m2_tu(vint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_tu(vd, vs2, vl); } -vint32m4_t test_vfcvt_x_f_v_i32m4_tu(vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_x_tu(maskedoff, src, vl); +vint32m4_t test_vfcvt_x_f_v_i32m4_tu(vint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_tu(vd, vs2, vl); } -vint32m8_t test_vfcvt_x_f_v_i32m8_tu(vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_x_tu(maskedoff, src, vl); +vint32m8_t test_vfcvt_x_f_v_i32m8_tu(vint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_tu(vd, vs2, vl); } -vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_tu(vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_xu_tu(maskedoff, src, vl); +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_tu(vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tu(vd, vs2, vl); } -vuint32m1_t test_vfcvt_xu_f_v_u32m1_tu(vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_xu_tu(maskedoff, src, vl); +vuint32m1_t test_vfcvt_xu_f_v_u32m1_tu(vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tu(vd, vs2, vl); } -vuint32m2_t test_vfcvt_xu_f_v_u32m2_tu(vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_xu_tu(maskedoff, src, vl); +vuint32m2_t test_vfcvt_xu_f_v_u32m2_tu(vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tu(vd, vs2, vl); } -vuint32m4_t test_vfcvt_xu_f_v_u32m4_tu(vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_xu_tu(maskedoff, src, vl); +vuint32m4_t test_vfcvt_xu_f_v_u32m4_tu(vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tu(vd, vs2, vl); } -vuint32m8_t test_vfcvt_xu_f_v_u32m8_tu(vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_xu_tu(maskedoff, src, vl); +vuint32m8_t test_vfcvt_xu_f_v_u32m8_tu(vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tu(vd, vs2, vl); } -vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_tu(vfloat32mf2_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, vl); +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_tu(vfloat32mf2_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, vl); } -vfloat32m1_t test_vfcvt_f_x_v_f32m1_tu(vfloat32m1_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, vl); +vfloat32m1_t test_vfcvt_f_x_v_f32m1_tu(vfloat32m1_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, vl); } -vfloat32m2_t test_vfcvt_f_x_v_f32m2_tu(vfloat32m2_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, vl); +vfloat32m2_t test_vfcvt_f_x_v_f32m2_tu(vfloat32m2_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, vl); } -vfloat32m4_t test_vfcvt_f_x_v_f32m4_tu(vfloat32m4_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, vl); +vfloat32m4_t test_vfcvt_f_x_v_f32m4_tu(vfloat32m4_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, vl); } -vfloat32m8_t test_vfcvt_f_x_v_f32m8_tu(vfloat32m8_t maskedoff, vint32m8_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, vl); +vfloat32m8_t test_vfcvt_f_x_v_f32m8_tu(vfloat32m8_t vd, vint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, vl); } -vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_tu(vfloat32mf2_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, vl); +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_tu(vfloat32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, vl); } -vfloat32m1_t test_vfcvt_f_xu_v_f32m1_tu(vfloat32m1_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, vl); +vfloat32m1_t test_vfcvt_f_xu_v_f32m1_tu(vfloat32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, vl); } -vfloat32m2_t test_vfcvt_f_xu_v_f32m2_tu(vfloat32m2_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, vl); +vfloat32m2_t test_vfcvt_f_xu_v_f32m2_tu(vfloat32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, vl); } -vfloat32m4_t test_vfcvt_f_xu_v_f32m4_tu(vfloat32m4_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, vl); +vfloat32m4_t test_vfcvt_f_xu_v_f32m4_tu(vfloat32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, vl); } -vfloat32m8_t test_vfcvt_f_xu_v_f32m8_tu(vfloat32m8_t maskedoff, vuint32m8_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, vl); +vfloat32m8_t test_vfcvt_f_xu_v_f32m8_tu(vfloat32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, vl); } -vint64m1_t test_vfcvt_x_f_v_i64m1_tu(vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_x_tu(maskedoff, src, vl); +vint64m1_t test_vfcvt_x_f_v_i64m1_tu(vint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_tu(vd, vs2, vl); } -vint64m2_t test_vfcvt_x_f_v_i64m2_tu(vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_x_tu(maskedoff, src, vl); +vint64m2_t test_vfcvt_x_f_v_i64m2_tu(vint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_tu(vd, vs2, vl); } -vint64m4_t test_vfcvt_x_f_v_i64m4_tu(vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_x_tu(maskedoff, src, vl); +vint64m4_t test_vfcvt_x_f_v_i64m4_tu(vint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_tu(vd, vs2, vl); } -vint64m8_t test_vfcvt_x_f_v_i64m8_tu(vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_x_tu(maskedoff, src, vl); +vint64m8_t test_vfcvt_x_f_v_i64m8_tu(vint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_tu(vd, vs2, vl); } -vuint64m1_t test_vfcvt_xu_f_v_u64m1_tu(vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_xu_tu(maskedoff, src, vl); +vuint64m1_t test_vfcvt_xu_f_v_u64m1_tu(vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tu(vd, vs2, vl); } -vuint64m2_t test_vfcvt_xu_f_v_u64m2_tu(vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_xu_tu(maskedoff, src, vl); +vuint64m2_t test_vfcvt_xu_f_v_u64m2_tu(vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tu(vd, vs2, vl); } -vuint64m4_t test_vfcvt_xu_f_v_u64m4_tu(vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_xu_tu(maskedoff, src, vl); +vuint64m4_t test_vfcvt_xu_f_v_u64m4_tu(vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tu(vd, vs2, vl); } -vuint64m8_t test_vfcvt_xu_f_v_u64m8_tu(vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_xu_tu(maskedoff, src, vl); +vuint64m8_t test_vfcvt_xu_f_v_u64m8_tu(vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tu(vd, vs2, vl); } -vfloat64m1_t test_vfcvt_f_x_v_f64m1_tu(vfloat64m1_t maskedoff, vint64m1_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, vl); +vfloat64m1_t test_vfcvt_f_x_v_f64m1_tu(vfloat64m1_t vd, vint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, vl); } -vfloat64m2_t test_vfcvt_f_x_v_f64m2_tu(vfloat64m2_t maskedoff, vint64m2_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, vl); +vfloat64m2_t test_vfcvt_f_x_v_f64m2_tu(vfloat64m2_t vd, vint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, vl); } -vfloat64m4_t test_vfcvt_f_x_v_f64m4_tu(vfloat64m4_t maskedoff, vint64m4_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, vl); +vfloat64m4_t test_vfcvt_f_x_v_f64m4_tu(vfloat64m4_t vd, vint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, vl); } -vfloat64m8_t test_vfcvt_f_x_v_f64m8_tu(vfloat64m8_t maskedoff, vint64m8_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, vl); +vfloat64m8_t test_vfcvt_f_x_v_f64m8_tu(vfloat64m8_t vd, vint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, vl); } -vfloat64m1_t test_vfcvt_f_xu_v_f64m1_tu(vfloat64m1_t maskedoff, vuint64m1_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, vl); +vfloat64m1_t test_vfcvt_f_xu_v_f64m1_tu(vfloat64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, vl); } -vfloat64m2_t test_vfcvt_f_xu_v_f64m2_tu(vfloat64m2_t maskedoff, vuint64m2_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, vl); +vfloat64m2_t test_vfcvt_f_xu_v_f64m2_tu(vfloat64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, vl); } -vfloat64m4_t test_vfcvt_f_xu_v_f64m4_tu(vfloat64m4_t maskedoff, vuint64m4_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, vl); +vfloat64m4_t test_vfcvt_f_xu_v_f64m4_tu(vfloat64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, vl); } -vfloat64m8_t test_vfcvt_f_xu_v_f64m8_tu(vfloat64m8_t maskedoff, vuint64m8_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, vl); +vfloat64m8_t test_vfcvt_f_xu_v_f64m8_tu(vfloat64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, vl); } -vint16mf4_t test_vfcvt_x_f_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_x_tum(mask, maskedoff, src, vl); +vint16mf4_t test_vfcvt_x_f_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_x_tum(vm, vd, vs2, vl); } -vint16mf2_t test_vfcvt_x_f_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_x_tum(mask, maskedoff, src, vl); +vint16mf2_t test_vfcvt_x_f_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x_tum(vm, vd, vs2, vl); } -vint16m1_t test_vfcvt_x_f_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_x_tum(mask, maskedoff, src, vl); +vint16m1_t test_vfcvt_x_f_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_tum(vm, vd, vs2, vl); } -vint16m2_t test_vfcvt_x_f_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_x_tum(mask, maskedoff, src, vl); +vint16m2_t test_vfcvt_x_f_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_tum(vm, vd, vs2, vl); } -vint16m4_t test_vfcvt_x_f_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_x_tum(mask, maskedoff, src, vl); +vint16m4_t test_vfcvt_x_f_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_tum(vm, vd, vs2, vl); } -vint16m8_t test_vfcvt_x_f_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_x_tum(mask, maskedoff, src, vl); +vint16m8_t test_vfcvt_x_f_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_tum(vm, vd, vs2, vl); } -vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_xu_tum(mask, maskedoff, src, vl); +vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tum(vm, vd, vs2, vl); } -vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_xu_tum(mask, maskedoff, src, vl); +vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tum(vm, vd, vs2, vl); } -vuint16m1_t test_vfcvt_xu_f_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_xu_tum(mask, maskedoff, src, vl); +vuint16m1_t test_vfcvt_xu_f_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tum(vm, vd, vs2, vl); } -vuint16m2_t test_vfcvt_xu_f_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_xu_tum(mask, maskedoff, src, vl); +vuint16m2_t test_vfcvt_xu_f_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tum(vm, vd, vs2, vl); } -vuint16m4_t test_vfcvt_xu_f_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_xu_tum(mask, maskedoff, src, vl); +vuint16m4_t test_vfcvt_xu_f_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tum(vm, vd, vs2, vl); } -vuint16m8_t test_vfcvt_xu_f_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_xu_tum(mask, maskedoff, src, vl); +vuint16m8_t test_vfcvt_xu_f_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tum(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, vl); } -vfloat16m1_t test_vfcvt_f_x_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vint16m1_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl); +vfloat16m1_t test_vfcvt_f_x_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, vl); } -vfloat16m2_t test_vfcvt_f_x_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vint16m2_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl); +vfloat16m2_t test_vfcvt_f_x_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, vl); } -vfloat16m4_t test_vfcvt_f_x_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vint16m4_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl); +vfloat16m4_t test_vfcvt_f_x_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, vl); } -vfloat16m8_t test_vfcvt_f_x_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vint16m8_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl); +vfloat16m8_t test_vfcvt_f_x_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vuint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vuint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, vl); } -vfloat16m1_t test_vfcvt_f_xu_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vuint16m1_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl); +vfloat16m1_t test_vfcvt_f_xu_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, vl); } -vfloat16m2_t test_vfcvt_f_xu_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vuint16m2_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl); +vfloat16m2_t test_vfcvt_f_xu_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, vl); } -vfloat16m4_t test_vfcvt_f_xu_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vuint16m4_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl); +vfloat16m4_t test_vfcvt_f_xu_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, vl); } -vfloat16m8_t test_vfcvt_f_xu_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vuint16m8_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl); +vfloat16m8_t test_vfcvt_f_xu_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, vl); } -vint32mf2_t test_vfcvt_x_f_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_x_tum(mask, maskedoff, src, vl); +vint32mf2_t test_vfcvt_x_f_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x_tum(vm, vd, vs2, vl); } -vint32m1_t test_vfcvt_x_f_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_x_tum(mask, maskedoff, src, vl); +vint32m1_t test_vfcvt_x_f_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_tum(vm, vd, vs2, vl); } -vint32m2_t test_vfcvt_x_f_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_x_tum(mask, maskedoff, src, vl); +vint32m2_t test_vfcvt_x_f_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_tum(vm, vd, vs2, vl); } -vint32m4_t test_vfcvt_x_f_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_x_tum(mask, maskedoff, src, vl); +vint32m4_t test_vfcvt_x_f_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_tum(vm, vd, vs2, vl); } -vint32m8_t test_vfcvt_x_f_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_x_tum(mask, maskedoff, src, vl); +vint32m8_t test_vfcvt_x_f_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_xu_tum(mask, maskedoff, src, vl); +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tum(vm, vd, vs2, vl); } -vuint32m1_t test_vfcvt_xu_f_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_xu_tum(mask, maskedoff, src, vl); +vuint32m1_t test_vfcvt_xu_f_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tum(vm, vd, vs2, vl); } -vuint32m2_t test_vfcvt_xu_f_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_xu_tum(mask, maskedoff, src, vl); +vuint32m2_t test_vfcvt_xu_f_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tum(vm, vd, vs2, vl); } -vuint32m4_t test_vfcvt_xu_f_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_xu_tum(mask, maskedoff, src, vl); +vuint32m4_t test_vfcvt_xu_f_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tum(vm, vd, vs2, vl); } -vuint32m8_t test_vfcvt_xu_f_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_xu_tum(mask, maskedoff, src, vl); +vuint32m8_t test_vfcvt_xu_f_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tum(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, vl); } -vfloat32m1_t test_vfcvt_f_x_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl); +vfloat32m1_t test_vfcvt_f_x_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, vl); } -vfloat32m2_t test_vfcvt_f_x_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl); +vfloat32m2_t test_vfcvt_f_x_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, vl); } -vfloat32m4_t test_vfcvt_f_x_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl); +vfloat32m4_t test_vfcvt_f_x_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, vl); } -vfloat32m8_t test_vfcvt_f_x_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vint32m8_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl); +vfloat32m8_t test_vfcvt_f_x_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, vl); } -vfloat32m1_t test_vfcvt_f_xu_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl); +vfloat32m1_t test_vfcvt_f_xu_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, vl); } -vfloat32m2_t test_vfcvt_f_xu_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl); +vfloat32m2_t test_vfcvt_f_xu_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, vl); } -vfloat32m4_t test_vfcvt_f_xu_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl); +vfloat32m4_t test_vfcvt_f_xu_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, vl); } -vfloat32m8_t test_vfcvt_f_xu_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vuint32m8_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl); +vfloat32m8_t test_vfcvt_f_xu_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, vl); } -vint64m1_t test_vfcvt_x_f_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_x_tum(mask, maskedoff, src, vl); +vint64m1_t test_vfcvt_x_f_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_tum(vm, vd, vs2, vl); } -vint64m2_t test_vfcvt_x_f_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_x_tum(mask, maskedoff, src, vl); +vint64m2_t test_vfcvt_x_f_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_tum(vm, vd, vs2, vl); } -vint64m4_t test_vfcvt_x_f_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_x_tum(mask, maskedoff, src, vl); +vint64m4_t test_vfcvt_x_f_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_tum(vm, vd, vs2, vl); } -vint64m8_t test_vfcvt_x_f_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_x_tum(mask, maskedoff, src, vl); +vint64m8_t test_vfcvt_x_f_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_tum(vm, vd, vs2, vl); } -vuint64m1_t test_vfcvt_xu_f_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_xu_tum(mask, maskedoff, src, vl); +vuint64m1_t test_vfcvt_xu_f_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tum(vm, vd, vs2, vl); } -vuint64m2_t test_vfcvt_xu_f_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_xu_tum(mask, maskedoff, src, vl); +vuint64m2_t test_vfcvt_xu_f_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tum(vm, vd, vs2, vl); } -vuint64m4_t test_vfcvt_xu_f_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_xu_tum(mask, maskedoff, src, vl); +vuint64m4_t test_vfcvt_xu_f_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tum(vm, vd, vs2, vl); } -vuint64m8_t test_vfcvt_xu_f_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_xu_tum(mask, maskedoff, src, vl); +vuint64m8_t test_vfcvt_xu_f_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tum(vm, vd, vs2, vl); } -vfloat64m1_t test_vfcvt_f_x_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vint64m1_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl); +vfloat64m1_t test_vfcvt_f_x_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, vl); } -vfloat64m2_t test_vfcvt_f_x_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vint64m2_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl); +vfloat64m2_t test_vfcvt_f_x_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, vl); } -vfloat64m4_t test_vfcvt_f_x_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vint64m4_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl); +vfloat64m4_t test_vfcvt_f_x_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, vl); } -vfloat64m8_t test_vfcvt_f_x_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vint64m8_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl); +vfloat64m8_t test_vfcvt_f_x_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, vl); } -vfloat64m1_t test_vfcvt_f_xu_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vuint64m1_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl); +vfloat64m1_t test_vfcvt_f_xu_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, vl); } -vfloat64m2_t test_vfcvt_f_xu_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vuint64m2_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl); +vfloat64m2_t test_vfcvt_f_xu_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, vl); } -vfloat64m4_t test_vfcvt_f_xu_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vuint64m4_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl); +vfloat64m4_t test_vfcvt_f_xu_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, vl); } -vfloat64m8_t test_vfcvt_f_xu_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vuint64m8_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, vl); +vfloat64m8_t test_vfcvt_f_xu_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, vl); } -vint16mf4_t test_vfcvt_x_f_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_x_tumu(mask, maskedoff, src, vl); +vint16mf4_t test_vfcvt_x_f_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_x_tumu(vm, vd, vs2, vl); } -vint16mf2_t test_vfcvt_x_f_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_x_tumu(mask, maskedoff, src, vl); +vint16mf2_t test_vfcvt_x_f_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x_tumu(vm, vd, vs2, vl); } -vint16m1_t test_vfcvt_x_f_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_x_tumu(mask, maskedoff, src, vl); +vint16m1_t test_vfcvt_x_f_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_tumu(vm, vd, vs2, vl); } -vint16m2_t test_vfcvt_x_f_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_x_tumu(mask, maskedoff, src, vl); +vint16m2_t test_vfcvt_x_f_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_tumu(vm, vd, vs2, vl); } -vint16m4_t test_vfcvt_x_f_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_x_tumu(mask, maskedoff, src, vl); +vint16m4_t test_vfcvt_x_f_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_tumu(vm, vd, vs2, vl); } -vint16m8_t test_vfcvt_x_f_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_x_tumu(mask, maskedoff, src, vl); +vint16m8_t test_vfcvt_x_f_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_tumu(vm, vd, vs2, vl); } -vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, vl); +vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tumu(vm, vd, vs2, vl); } -vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, vl); +vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tumu(vm, vd, vs2, vl); } -vuint16m1_t test_vfcvt_xu_f_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, vl); +vuint16m1_t test_vfcvt_xu_f_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tumu(vm, vd, vs2, vl); } -vuint16m2_t test_vfcvt_xu_f_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, vl); +vuint16m2_t test_vfcvt_xu_f_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tumu(vm, vd, vs2, vl); } -vuint16m4_t test_vfcvt_xu_f_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, vl); +vuint16m4_t test_vfcvt_xu_f_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tumu(vm, vd, vs2, vl); } -vuint16m8_t test_vfcvt_xu_f_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, vl); +vuint16m8_t test_vfcvt_xu_f_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tumu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfcvt_f_x_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vint16m1_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl); +vfloat16m1_t test_vfcvt_f_x_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfcvt_f_x_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vint16m2_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl); +vfloat16m2_t test_vfcvt_f_x_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfcvt_f_x_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vint16m4_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl); +vfloat16m4_t test_vfcvt_f_x_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfcvt_f_x_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vint16m8_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl); +vfloat16m8_t test_vfcvt_f_x_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vuint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vuint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfcvt_f_xu_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vuint16m1_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl); +vfloat16m1_t test_vfcvt_f_xu_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfcvt_f_xu_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vuint16m2_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl); +vfloat16m2_t test_vfcvt_f_xu_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfcvt_f_xu_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vuint16m4_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl); +vfloat16m4_t test_vfcvt_f_xu_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfcvt_f_xu_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vuint16m8_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl); +vfloat16m8_t test_vfcvt_f_xu_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, vl); } -vint32mf2_t test_vfcvt_x_f_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_x_tumu(mask, maskedoff, src, vl); +vint32mf2_t test_vfcvt_x_f_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x_tumu(vm, vd, vs2, vl); } -vint32m1_t test_vfcvt_x_f_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_x_tumu(mask, maskedoff, src, vl); +vint32m1_t test_vfcvt_x_f_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_tumu(vm, vd, vs2, vl); } -vint32m2_t test_vfcvt_x_f_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_x_tumu(mask, maskedoff, src, vl); +vint32m2_t test_vfcvt_x_f_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_tumu(vm, vd, vs2, vl); } -vint32m4_t test_vfcvt_x_f_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_x_tumu(mask, maskedoff, src, vl); +vint32m4_t test_vfcvt_x_f_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_tumu(vm, vd, vs2, vl); } -vint32m8_t test_vfcvt_x_f_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_x_tumu(mask, maskedoff, src, vl); +vint32m8_t test_vfcvt_x_f_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, vl); +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_vfcvt_xu_f_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, vl); +vuint32m1_t test_vfcvt_xu_f_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_vfcvt_xu_f_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, vl); +vuint32m2_t test_vfcvt_xu_f_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_vfcvt_xu_f_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, vl); +vuint32m4_t test_vfcvt_xu_f_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tumu(vm, vd, vs2, vl); } -vuint32m8_t test_vfcvt_xu_f_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, vl); +vuint32m8_t test_vfcvt_xu_f_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tumu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfcvt_f_x_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl); +vfloat32m1_t test_vfcvt_f_x_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfcvt_f_x_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl); +vfloat32m2_t test_vfcvt_f_x_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfcvt_f_x_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl); +vfloat32m4_t test_vfcvt_f_x_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfcvt_f_x_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vint32m8_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl); +vfloat32m8_t test_vfcvt_f_x_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfcvt_f_xu_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl); +vfloat32m1_t test_vfcvt_f_xu_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfcvt_f_xu_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl); +vfloat32m2_t test_vfcvt_f_xu_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfcvt_f_xu_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl); +vfloat32m4_t test_vfcvt_f_xu_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfcvt_f_xu_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vuint32m8_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl); +vfloat32m8_t test_vfcvt_f_xu_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, vl); } -vint64m1_t test_vfcvt_x_f_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_x_tumu(mask, maskedoff, src, vl); +vint64m1_t test_vfcvt_x_f_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_tumu(vm, vd, vs2, vl); } -vint64m2_t test_vfcvt_x_f_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_x_tumu(mask, maskedoff, src, vl); +vint64m2_t test_vfcvt_x_f_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_tumu(vm, vd, vs2, vl); } -vint64m4_t test_vfcvt_x_f_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_x_tumu(mask, maskedoff, src, vl); +vint64m4_t test_vfcvt_x_f_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_tumu(vm, vd, vs2, vl); } -vint64m8_t test_vfcvt_x_f_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_x_tumu(mask, maskedoff, src, vl); +vint64m8_t test_vfcvt_x_f_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_tumu(vm, vd, vs2, vl); } -vuint64m1_t test_vfcvt_xu_f_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, vl); +vuint64m1_t test_vfcvt_xu_f_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tumu(vm, vd, vs2, vl); } -vuint64m2_t test_vfcvt_xu_f_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, vl); +vuint64m2_t test_vfcvt_xu_f_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tumu(vm, vd, vs2, vl); } -vuint64m4_t test_vfcvt_xu_f_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, vl); +vuint64m4_t test_vfcvt_xu_f_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tumu(vm, vd, vs2, vl); } -vuint64m8_t test_vfcvt_xu_f_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, vl); +vuint64m8_t test_vfcvt_xu_f_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tumu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfcvt_f_x_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vint64m1_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl); +vfloat64m1_t test_vfcvt_f_x_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfcvt_f_x_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vint64m2_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl); +vfloat64m2_t test_vfcvt_f_x_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfcvt_f_x_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vint64m4_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl); +vfloat64m4_t test_vfcvt_f_x_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfcvt_f_x_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vint64m8_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl); +vfloat64m8_t test_vfcvt_f_x_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfcvt_f_xu_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vuint64m1_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl); +vfloat64m1_t test_vfcvt_f_xu_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfcvt_f_xu_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vuint64m2_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl); +vfloat64m2_t test_vfcvt_f_xu_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfcvt_f_xu_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vuint64m4_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl); +vfloat64m4_t test_vfcvt_f_xu_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfcvt_f_xu_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vuint64m8_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, vl); +vfloat64m8_t test_vfcvt_f_xu_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, vl); } -vint16mf4_t test_vfcvt_x_f_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_x_mu(mask, maskedoff, src, vl); +vint16mf4_t test_vfcvt_x_f_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_x_mu(vm, vd, vs2, vl); } -vint16mf2_t test_vfcvt_x_f_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_x_mu(mask, maskedoff, src, vl); +vint16mf2_t test_vfcvt_x_f_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x_mu(vm, vd, vs2, vl); } -vint16m1_t test_vfcvt_x_f_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_x_mu(mask, maskedoff, src, vl); +vint16m1_t test_vfcvt_x_f_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_mu(vm, vd, vs2, vl); } -vint16m2_t test_vfcvt_x_f_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_x_mu(mask, maskedoff, src, vl); +vint16m2_t test_vfcvt_x_f_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_mu(vm, vd, vs2, vl); } -vint16m4_t test_vfcvt_x_f_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_x_mu(mask, maskedoff, src, vl); +vint16m4_t test_vfcvt_x_f_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_mu(vm, vd, vs2, vl); } -vint16m8_t test_vfcvt_x_f_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_x_mu(mask, maskedoff, src, vl); +vint16m8_t test_vfcvt_x_f_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_mu(vm, vd, vs2, vl); } -vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_xu_mu(mask, maskedoff, src, vl); +vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_mu(vm, vd, vs2, vl); } -vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_xu_mu(mask, maskedoff, src, vl); +vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_mu(vm, vd, vs2, vl); } -vuint16m1_t test_vfcvt_xu_f_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_xu_mu(mask, maskedoff, src, vl); +vuint16m1_t test_vfcvt_xu_f_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_mu(vm, vd, vs2, vl); } -vuint16m2_t test_vfcvt_xu_f_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_xu_mu(mask, maskedoff, src, vl); +vuint16m2_t test_vfcvt_xu_f_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_mu(vm, vd, vs2, vl); } -vuint16m4_t test_vfcvt_xu_f_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_xu_mu(mask, maskedoff, src, vl); +vuint16m4_t test_vfcvt_xu_f_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_mu(vm, vd, vs2, vl); } -vuint16m8_t test_vfcvt_xu_f_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_xu_mu(mask, maskedoff, src, vl); +vuint16m8_t test_vfcvt_xu_f_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_mu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfcvt_f_x_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vint16m1_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl); +vfloat16m1_t test_vfcvt_f_x_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfcvt_f_x_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vint16m2_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl); +vfloat16m2_t test_vfcvt_f_x_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfcvt_f_x_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vint16m4_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl); +vfloat16m4_t test_vfcvt_f_x_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfcvt_f_x_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vint16m8_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl); +vfloat16m8_t test_vfcvt_f_x_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vuint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vuint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfcvt_f_xu_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vuint16m1_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl); +vfloat16m1_t test_vfcvt_f_xu_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfcvt_f_xu_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vuint16m2_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl); +vfloat16m2_t test_vfcvt_f_xu_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfcvt_f_xu_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vuint16m4_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl); +vfloat16m4_t test_vfcvt_f_xu_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfcvt_f_xu_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vuint16m8_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl); +vfloat16m8_t test_vfcvt_f_xu_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, vl); } -vint32mf2_t test_vfcvt_x_f_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_x_mu(mask, maskedoff, src, vl); +vint32mf2_t test_vfcvt_x_f_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x_mu(vm, vd, vs2, vl); } -vint32m1_t test_vfcvt_x_f_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_x_mu(mask, maskedoff, src, vl); +vint32m1_t test_vfcvt_x_f_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_mu(vm, vd, vs2, vl); } -vint32m2_t test_vfcvt_x_f_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_x_mu(mask, maskedoff, src, vl); +vint32m2_t test_vfcvt_x_f_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_mu(vm, vd, vs2, vl); } -vint32m4_t test_vfcvt_x_f_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_x_mu(mask, maskedoff, src, vl); +vint32m4_t test_vfcvt_x_f_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_mu(vm, vd, vs2, vl); } -vint32m8_t test_vfcvt_x_f_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_x_mu(mask, maskedoff, src, vl); +vint32m8_t test_vfcvt_x_f_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_mu(vm, vd, vs2, vl); } -vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_xu_mu(mask, maskedoff, src, vl); +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_mu(vm, vd, vs2, vl); } -vuint32m1_t test_vfcvt_xu_f_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_xu_mu(mask, maskedoff, src, vl); +vuint32m1_t test_vfcvt_xu_f_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_mu(vm, vd, vs2, vl); } -vuint32m2_t test_vfcvt_xu_f_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_xu_mu(mask, maskedoff, src, vl); +vuint32m2_t test_vfcvt_xu_f_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_mu(vm, vd, vs2, vl); } -vuint32m4_t test_vfcvt_xu_f_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_xu_mu(mask, maskedoff, src, vl); +vuint32m4_t test_vfcvt_xu_f_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_mu(vm, vd, vs2, vl); } -vuint32m8_t test_vfcvt_xu_f_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_xu_mu(mask, maskedoff, src, vl); +vuint32m8_t test_vfcvt_xu_f_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_mu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfcvt_f_x_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl); +vfloat32m1_t test_vfcvt_f_x_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfcvt_f_x_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl); +vfloat32m2_t test_vfcvt_f_x_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfcvt_f_x_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl); +vfloat32m4_t test_vfcvt_f_x_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfcvt_f_x_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vint32m8_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl); +vfloat32m8_t test_vfcvt_f_x_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfcvt_f_xu_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl); +vfloat32m1_t test_vfcvt_f_xu_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfcvt_f_xu_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl); +vfloat32m2_t test_vfcvt_f_xu_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfcvt_f_xu_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl); +vfloat32m4_t test_vfcvt_f_xu_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfcvt_f_xu_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vuint32m8_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl); +vfloat32m8_t test_vfcvt_f_xu_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, vl); } -vint64m1_t test_vfcvt_x_f_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_x_mu(mask, maskedoff, src, vl); +vint64m1_t test_vfcvt_x_f_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_mu(vm, vd, vs2, vl); } -vint64m2_t test_vfcvt_x_f_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_x_mu(mask, maskedoff, src, vl); +vint64m2_t test_vfcvt_x_f_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_mu(vm, vd, vs2, vl); } -vint64m4_t test_vfcvt_x_f_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_x_mu(mask, maskedoff, src, vl); +vint64m4_t test_vfcvt_x_f_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_mu(vm, vd, vs2, vl); } -vint64m8_t test_vfcvt_x_f_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_x_mu(mask, maskedoff, src, vl); +vint64m8_t test_vfcvt_x_f_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_mu(vm, vd, vs2, vl); } -vuint64m1_t test_vfcvt_xu_f_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_xu_mu(mask, maskedoff, src, vl); +vuint64m1_t test_vfcvt_xu_f_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_mu(vm, vd, vs2, vl); } -vuint64m2_t test_vfcvt_xu_f_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_xu_mu(mask, maskedoff, src, vl); +vuint64m2_t test_vfcvt_xu_f_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_mu(vm, vd, vs2, vl); } -vuint64m4_t test_vfcvt_xu_f_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_xu_mu(mask, maskedoff, src, vl); +vuint64m4_t test_vfcvt_xu_f_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_mu(vm, vd, vs2, vl); } -vuint64m8_t test_vfcvt_xu_f_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_xu_mu(mask, maskedoff, src, vl); +vuint64m8_t test_vfcvt_xu_f_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_mu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfcvt_f_x_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vint64m1_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl); +vfloat64m1_t test_vfcvt_f_x_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfcvt_f_x_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vint64m2_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl); +vfloat64m2_t test_vfcvt_f_x_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfcvt_f_x_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vint64m4_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl); +vfloat64m4_t test_vfcvt_f_x_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfcvt_f_x_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vint64m8_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl); +vfloat64m8_t test_vfcvt_f_x_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfcvt_f_xu_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vuint64m1_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl); +vfloat64m1_t test_vfcvt_f_xu_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfcvt_f_xu_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vuint64m2_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl); +vfloat64m2_t test_vfcvt_f_xu_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfcvt_f_xu_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vuint64m4_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl); +vfloat64m4_t test_vfcvt_f_xu_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfcvt_f_xu_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vuint64m8_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, vl); +vfloat64m8_t test_vfcvt_f_xu_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, vl); } -vint16mf4_t test_vfcvt_x_f_v_i16mf4_rm_tu(vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_x_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint16mf4_t test_vfcvt_x_f_v_i16mf4_rm_tu(vint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint16mf2_t test_vfcvt_x_f_v_i16mf2_rm_tu(vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_x_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint16mf2_t test_vfcvt_x_f_v_i16mf2_rm_tu(vint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m1_t test_vfcvt_x_f_v_i16m1_rm_tu(vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_x_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m1_t test_vfcvt_x_f_v_i16m1_rm_tu(vint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m2_t test_vfcvt_x_f_v_i16m2_rm_tu(vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_x_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m2_t test_vfcvt_x_f_v_i16m2_rm_tu(vint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m4_t test_vfcvt_x_f_v_i16m4_rm_tu(vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_x_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m4_t test_vfcvt_x_f_v_i16m4_rm_tu(vint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m8_t test_vfcvt_x_f_v_i16m8_rm_tu(vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_x_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m8_t test_vfcvt_x_f_v_i16m8_rm_tu(vint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_rm_tu(vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_xu_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_rm_tu(vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_rm_tu(vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_xu_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_rm_tu(vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m1_t test_vfcvt_xu_f_v_u16m1_rm_tu(vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_xu_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m1_t test_vfcvt_xu_f_v_u16m1_rm_tu(vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m2_t test_vfcvt_xu_f_v_u16m2_rm_tu(vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_xu_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m2_t test_vfcvt_xu_f_v_u16m2_rm_tu(vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m4_t test_vfcvt_xu_f_v_u16m4_rm_tu(vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_xu_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m4_t test_vfcvt_xu_f_v_u16m4_rm_tu(vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m8_t test_vfcvt_xu_f_v_u16m8_rm_tu(vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_xu_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m8_t test_vfcvt_xu_f_v_u16m8_rm_tu(vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_rm_tu(vfloat16mf4_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_rm_tu(vfloat16mf2_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfcvt_f_x_v_f16m1_rm_tu(vfloat16m1_t maskedoff, vint16m1_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfcvt_f_x_v_f16m1_rm_tu(vfloat16m1_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfcvt_f_x_v_f16m2_rm_tu(vfloat16m2_t maskedoff, vint16m2_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfcvt_f_x_v_f16m2_rm_tu(vfloat16m2_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfcvt_f_x_v_f16m4_rm_tu(vfloat16m4_t maskedoff, vint16m4_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfcvt_f_x_v_f16m4_rm_tu(vfloat16m4_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfcvt_f_x_v_f16m8_rm_tu(vfloat16m8_t maskedoff, vint16m8_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfcvt_f_x_v_f16m8_rm_tu(vfloat16m8_t vd, vint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vuint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_rm_tu(vfloat16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vuint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_rm_tu(vfloat16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfcvt_f_xu_v_f16m1_rm_tu(vfloat16m1_t maskedoff, vuint16m1_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfcvt_f_xu_v_f16m1_rm_tu(vfloat16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfcvt_f_xu_v_f16m2_rm_tu(vfloat16m2_t maskedoff, vuint16m2_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfcvt_f_xu_v_f16m2_rm_tu(vfloat16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfcvt_f_xu_v_f16m4_rm_tu(vfloat16m4_t maskedoff, vuint16m4_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfcvt_f_xu_v_f16m4_rm_tu(vfloat16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfcvt_f_xu_v_f16m8_rm_tu(vfloat16m8_t maskedoff, vuint16m8_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfcvt_f_xu_v_f16m8_rm_tu(vfloat16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint32mf2_t test_vfcvt_x_f_v_i32mf2_rm_tu(vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_x_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint32mf2_t test_vfcvt_x_f_v_i32mf2_rm_tu(vint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfcvt_x_f_v_i32m1_rm_tu(vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_x_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m1_t test_vfcvt_x_f_v_i32m1_rm_tu(vint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfcvt_x_f_v_i32m2_rm_tu(vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_x_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m2_t test_vfcvt_x_f_v_i32m2_rm_tu(vint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfcvt_x_f_v_i32m4_rm_tu(vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_x_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m4_t test_vfcvt_x_f_v_i32m4_rm_tu(vint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m8_t test_vfcvt_x_f_v_i32m8_rm_tu(vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_x_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m8_t test_vfcvt_x_f_v_i32m8_rm_tu(vint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_rm_tu(vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_xu_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_rm_tu(vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfcvt_xu_f_v_u32m1_rm_tu(vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_xu_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m1_t test_vfcvt_xu_f_v_u32m1_rm_tu(vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfcvt_xu_f_v_u32m2_rm_tu(vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_xu_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m2_t test_vfcvt_xu_f_v_u32m2_rm_tu(vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfcvt_xu_f_v_u32m4_rm_tu(vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_xu_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m4_t test_vfcvt_xu_f_v_u32m4_rm_tu(vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m8_t test_vfcvt_xu_f_v_u32m8_rm_tu(vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_xu_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m8_t test_vfcvt_xu_f_v_u32m8_rm_tu(vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_rm_tu(vfloat32mf2_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfcvt_f_x_v_f32m1_rm_tu(vfloat32m1_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfcvt_f_x_v_f32m1_rm_tu(vfloat32m1_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfcvt_f_x_v_f32m2_rm_tu(vfloat32m2_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfcvt_f_x_v_f32m2_rm_tu(vfloat32m2_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfcvt_f_x_v_f32m4_rm_tu(vfloat32m4_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfcvt_f_x_v_f32m4_rm_tu(vfloat32m4_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfcvt_f_x_v_f32m8_rm_tu(vfloat32m8_t maskedoff, vint32m8_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfcvt_f_x_v_f32m8_rm_tu(vfloat32m8_t vd, vint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_rm_tu(vfloat32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfcvt_f_xu_v_f32m1_rm_tu(vfloat32m1_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfcvt_f_xu_v_f32m1_rm_tu(vfloat32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfcvt_f_xu_v_f32m2_rm_tu(vfloat32m2_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfcvt_f_xu_v_f32m2_rm_tu(vfloat32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfcvt_f_xu_v_f32m4_rm_tu(vfloat32m4_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfcvt_f_xu_v_f32m4_rm_tu(vfloat32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfcvt_f_xu_v_f32m8_rm_tu(vfloat32m8_t maskedoff, vuint32m8_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfcvt_f_xu_v_f32m8_rm_tu(vfloat32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m1_t test_vfcvt_x_f_v_i64m1_rm_tu(vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_x_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m1_t test_vfcvt_x_f_v_i64m1_rm_tu(vint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m2_t test_vfcvt_x_f_v_i64m2_rm_tu(vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_x_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m2_t test_vfcvt_x_f_v_i64m2_rm_tu(vint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m4_t test_vfcvt_x_f_v_i64m4_rm_tu(vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_x_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m4_t test_vfcvt_x_f_v_i64m4_rm_tu(vint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m8_t test_vfcvt_x_f_v_i64m8_rm_tu(vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_x_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m8_t test_vfcvt_x_f_v_i64m8_rm_tu(vint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m1_t test_vfcvt_xu_f_v_u64m1_rm_tu(vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_xu_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m1_t test_vfcvt_xu_f_v_u64m1_rm_tu(vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m2_t test_vfcvt_xu_f_v_u64m2_rm_tu(vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_xu_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m2_t test_vfcvt_xu_f_v_u64m2_rm_tu(vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m4_t test_vfcvt_xu_f_v_u64m4_rm_tu(vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_xu_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m4_t test_vfcvt_xu_f_v_u64m4_rm_tu(vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m8_t test_vfcvt_xu_f_v_u64m8_rm_tu(vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_xu_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m8_t test_vfcvt_xu_f_v_u64m8_rm_tu(vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfcvt_f_x_v_f64m1_rm_tu(vfloat64m1_t maskedoff, vint64m1_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfcvt_f_x_v_f64m1_rm_tu(vfloat64m1_t vd, vint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfcvt_f_x_v_f64m2_rm_tu(vfloat64m2_t maskedoff, vint64m2_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfcvt_f_x_v_f64m2_rm_tu(vfloat64m2_t vd, vint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfcvt_f_x_v_f64m4_rm_tu(vfloat64m4_t maskedoff, vint64m4_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfcvt_f_x_v_f64m4_rm_tu(vfloat64m4_t vd, vint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfcvt_f_x_v_f64m8_rm_tu(vfloat64m8_t maskedoff, vint64m8_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfcvt_f_x_v_f64m8_rm_tu(vfloat64m8_t vd, vint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfcvt_f_xu_v_f64m1_rm_tu(vfloat64m1_t maskedoff, vuint64m1_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfcvt_f_xu_v_f64m1_rm_tu(vfloat64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfcvt_f_xu_v_f64m2_rm_tu(vfloat64m2_t maskedoff, vuint64m2_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfcvt_f_xu_v_f64m2_rm_tu(vfloat64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfcvt_f_xu_v_f64m4_rm_tu(vfloat64m4_t maskedoff, vuint64m4_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfcvt_f_xu_v_f64m4_rm_tu(vfloat64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfcvt_f_xu_v_f64m8_rm_tu(vfloat64m8_t maskedoff, vuint64m8_t src, size_t vl) { - return __riscv_vfcvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfcvt_f_xu_v_f64m8_rm_tu(vfloat64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint16mf4_t test_vfcvt_x_f_v_i16mf4_rm_tum(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_x_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16mf4_t test_vfcvt_x_f_v_i16mf4_rm_tum(vbool64_t vm, vint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16mf2_t test_vfcvt_x_f_v_i16mf2_rm_tum(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_x_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16mf2_t test_vfcvt_x_f_v_i16mf2_rm_tum(vbool32_t vm, vint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m1_t test_vfcvt_x_f_v_i16m1_rm_tum(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_x_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m1_t test_vfcvt_x_f_v_i16m1_rm_tum(vbool16_t vm, vint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m2_t test_vfcvt_x_f_v_i16m2_rm_tum(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_x_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m2_t test_vfcvt_x_f_v_i16m2_rm_tum(vbool8_t vm, vint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m4_t test_vfcvt_x_f_v_i16m4_rm_tum(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_x_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m4_t test_vfcvt_x_f_v_i16m4_rm_tum(vbool4_t vm, vint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m8_t test_vfcvt_x_f_v_i16m8_rm_tum(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_x_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m8_t test_vfcvt_x_f_v_i16m8_rm_tum(vbool2_t vm, vint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_rm_tum(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_xu_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_rm_tum(vbool64_t vm, vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_rm_tum(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_xu_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_rm_tum(vbool32_t vm, vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m1_t test_vfcvt_xu_f_v_u16m1_rm_tum(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_xu_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m1_t test_vfcvt_xu_f_v_u16m1_rm_tum(vbool16_t vm, vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m2_t test_vfcvt_xu_f_v_u16m2_rm_tum(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_xu_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m2_t test_vfcvt_xu_f_v_u16m2_rm_tum(vbool8_t vm, vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m4_t test_vfcvt_xu_f_v_u16m4_rm_tum(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_xu_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m4_t test_vfcvt_xu_f_v_u16m4_rm_tum(vbool4_t vm, vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m8_t test_vfcvt_xu_f_v_u16m8_rm_tum(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_xu_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m8_t test_vfcvt_xu_f_v_u16m8_rm_tum(vbool2_t vm, vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfcvt_f_x_v_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vint16m1_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfcvt_f_x_v_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfcvt_f_x_v_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vint16m2_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfcvt_f_x_v_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfcvt_f_x_v_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vint16m4_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfcvt_f_x_v_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfcvt_f_x_v_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vint16m8_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfcvt_f_x_v_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vuint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vuint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfcvt_f_xu_v_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vuint16m1_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfcvt_f_xu_v_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfcvt_f_xu_v_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vuint16m2_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfcvt_f_xu_v_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfcvt_f_xu_v_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vuint16m4_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfcvt_f_xu_v_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfcvt_f_xu_v_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vuint16m8_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfcvt_f_xu_v_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32mf2_t test_vfcvt_x_f_v_i32mf2_rm_tum(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_x_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32mf2_t test_vfcvt_x_f_v_i32mf2_rm_tum(vbool64_t vm, vint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfcvt_x_f_v_i32m1_rm_tum(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_x_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m1_t test_vfcvt_x_f_v_i32m1_rm_tum(vbool32_t vm, vint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfcvt_x_f_v_i32m2_rm_tum(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_x_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m2_t test_vfcvt_x_f_v_i32m2_rm_tum(vbool16_t vm, vint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfcvt_x_f_v_i32m4_rm_tum(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_x_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m4_t test_vfcvt_x_f_v_i32m4_rm_tum(vbool8_t vm, vint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m8_t test_vfcvt_x_f_v_i32m8_rm_tum(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_x_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m8_t test_vfcvt_x_f_v_i32m8_rm_tum(vbool4_t vm, vint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_rm_tum(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_xu_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_rm_tum(vbool64_t vm, vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfcvt_xu_f_v_u32m1_rm_tum(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_xu_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m1_t test_vfcvt_xu_f_v_u32m1_rm_tum(vbool32_t vm, vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfcvt_xu_f_v_u32m2_rm_tum(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_xu_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m2_t test_vfcvt_xu_f_v_u32m2_rm_tum(vbool16_t vm, vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfcvt_xu_f_v_u32m4_rm_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_xu_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m4_t test_vfcvt_xu_f_v_u32m4_rm_tum(vbool8_t vm, vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m8_t test_vfcvt_xu_f_v_u32m8_rm_tum(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_xu_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m8_t test_vfcvt_xu_f_v_u32m8_rm_tum(vbool4_t vm, vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfcvt_f_x_v_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfcvt_f_x_v_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfcvt_f_x_v_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfcvt_f_x_v_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfcvt_f_x_v_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfcvt_f_x_v_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfcvt_f_x_v_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vint32m8_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfcvt_f_x_v_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfcvt_f_xu_v_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfcvt_f_xu_v_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfcvt_f_xu_v_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfcvt_f_xu_v_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfcvt_f_xu_v_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfcvt_f_xu_v_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfcvt_f_xu_v_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vuint32m8_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfcvt_f_xu_v_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m1_t test_vfcvt_x_f_v_i64m1_rm_tum(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_x_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m1_t test_vfcvt_x_f_v_i64m1_rm_tum(vbool64_t vm, vint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m2_t test_vfcvt_x_f_v_i64m2_rm_tum(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_x_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m2_t test_vfcvt_x_f_v_i64m2_rm_tum(vbool32_t vm, vint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m4_t test_vfcvt_x_f_v_i64m4_rm_tum(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_x_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m4_t test_vfcvt_x_f_v_i64m4_rm_tum(vbool16_t vm, vint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m8_t test_vfcvt_x_f_v_i64m8_rm_tum(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_x_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m8_t test_vfcvt_x_f_v_i64m8_rm_tum(vbool8_t vm, vint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m1_t test_vfcvt_xu_f_v_u64m1_rm_tum(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_xu_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m1_t test_vfcvt_xu_f_v_u64m1_rm_tum(vbool64_t vm, vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m2_t test_vfcvt_xu_f_v_u64m2_rm_tum(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_xu_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m2_t test_vfcvt_xu_f_v_u64m2_rm_tum(vbool32_t vm, vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m4_t test_vfcvt_xu_f_v_u64m4_rm_tum(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_xu_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m4_t test_vfcvt_xu_f_v_u64m4_rm_tum(vbool16_t vm, vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m8_t test_vfcvt_xu_f_v_u64m8_rm_tum(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_xu_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m8_t test_vfcvt_xu_f_v_u64m8_rm_tum(vbool8_t vm, vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfcvt_f_x_v_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vint64m1_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfcvt_f_x_v_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfcvt_f_x_v_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vint64m2_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfcvt_f_x_v_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfcvt_f_x_v_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vint64m4_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfcvt_f_x_v_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfcvt_f_x_v_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vint64m8_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfcvt_f_x_v_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfcvt_f_xu_v_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vuint64m1_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfcvt_f_xu_v_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfcvt_f_xu_v_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vuint64m2_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfcvt_f_xu_v_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfcvt_f_xu_v_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vuint64m4_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfcvt_f_xu_v_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfcvt_f_xu_v_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vuint64m8_t src, size_t vl) { - return __riscv_vfcvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfcvt_f_xu_v_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16mf4_t test_vfcvt_x_f_v_i16mf4_rm_tumu(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_x_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16mf4_t test_vfcvt_x_f_v_i16mf4_rm_tumu(vbool64_t vm, vint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16mf2_t test_vfcvt_x_f_v_i16mf2_rm_tumu(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_x_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16mf2_t test_vfcvt_x_f_v_i16mf2_rm_tumu(vbool32_t vm, vint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m1_t test_vfcvt_x_f_v_i16m1_rm_tumu(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_x_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m1_t test_vfcvt_x_f_v_i16m1_rm_tumu(vbool16_t vm, vint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m2_t test_vfcvt_x_f_v_i16m2_rm_tumu(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_x_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m2_t test_vfcvt_x_f_v_i16m2_rm_tumu(vbool8_t vm, vint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m4_t test_vfcvt_x_f_v_i16m4_rm_tumu(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_x_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m4_t test_vfcvt_x_f_v_i16m4_rm_tumu(vbool4_t vm, vint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m8_t test_vfcvt_x_f_v_i16m8_rm_tumu(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_x_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m8_t test_vfcvt_x_f_v_i16m8_rm_tumu(vbool2_t vm, vint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_rm_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_rm_tumu(vbool64_t vm, vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_rm_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_rm_tumu(vbool32_t vm, vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m1_t test_vfcvt_xu_f_v_u16m1_rm_tumu(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m1_t test_vfcvt_xu_f_v_u16m1_rm_tumu(vbool16_t vm, vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m2_t test_vfcvt_xu_f_v_u16m2_rm_tumu(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m2_t test_vfcvt_xu_f_v_u16m2_rm_tumu(vbool8_t vm, vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m4_t test_vfcvt_xu_f_v_u16m4_rm_tumu(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m4_t test_vfcvt_xu_f_v_u16m4_rm_tumu(vbool4_t vm, vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m8_t test_vfcvt_xu_f_v_u16m8_rm_tumu(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m8_t test_vfcvt_xu_f_v_u16m8_rm_tumu(vbool2_t vm, vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfcvt_f_x_v_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vint16m1_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfcvt_f_x_v_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfcvt_f_x_v_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vint16m2_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfcvt_f_x_v_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfcvt_f_x_v_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vint16m4_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfcvt_f_x_v_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfcvt_f_x_v_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vint16m8_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfcvt_f_x_v_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vuint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vuint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfcvt_f_xu_v_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vuint16m1_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfcvt_f_xu_v_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfcvt_f_xu_v_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vuint16m2_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfcvt_f_xu_v_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfcvt_f_xu_v_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vuint16m4_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfcvt_f_xu_v_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfcvt_f_xu_v_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vuint16m8_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfcvt_f_xu_v_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32mf2_t test_vfcvt_x_f_v_i32mf2_rm_tumu(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_x_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32mf2_t test_vfcvt_x_f_v_i32mf2_rm_tumu(vbool64_t vm, vint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfcvt_x_f_v_i32m1_rm_tumu(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_x_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m1_t test_vfcvt_x_f_v_i32m1_rm_tumu(vbool32_t vm, vint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfcvt_x_f_v_i32m2_rm_tumu(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_x_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m2_t test_vfcvt_x_f_v_i32m2_rm_tumu(vbool16_t vm, vint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfcvt_x_f_v_i32m4_rm_tumu(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_x_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m4_t test_vfcvt_x_f_v_i32m4_rm_tumu(vbool8_t vm, vint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m8_t test_vfcvt_x_f_v_i32m8_rm_tumu(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_x_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m8_t test_vfcvt_x_f_v_i32m8_rm_tumu(vbool4_t vm, vint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_rm_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_rm_tumu(vbool64_t vm, vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfcvt_xu_f_v_u32m1_rm_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m1_t test_vfcvt_xu_f_v_u32m1_rm_tumu(vbool32_t vm, vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfcvt_xu_f_v_u32m2_rm_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m2_t test_vfcvt_xu_f_v_u32m2_rm_tumu(vbool16_t vm, vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfcvt_xu_f_v_u32m4_rm_tumu(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m4_t test_vfcvt_xu_f_v_u32m4_rm_tumu(vbool8_t vm, vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m8_t test_vfcvt_xu_f_v_u32m8_rm_tumu(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m8_t test_vfcvt_xu_f_v_u32m8_rm_tumu(vbool4_t vm, vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfcvt_f_x_v_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfcvt_f_x_v_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfcvt_f_x_v_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfcvt_f_x_v_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfcvt_f_x_v_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfcvt_f_x_v_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfcvt_f_x_v_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vint32m8_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfcvt_f_x_v_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfcvt_f_xu_v_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfcvt_f_xu_v_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfcvt_f_xu_v_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfcvt_f_xu_v_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfcvt_f_xu_v_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfcvt_f_xu_v_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfcvt_f_xu_v_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vuint32m8_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfcvt_f_xu_v_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m1_t test_vfcvt_x_f_v_i64m1_rm_tumu(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_x_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m1_t test_vfcvt_x_f_v_i64m1_rm_tumu(vbool64_t vm, vint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m2_t test_vfcvt_x_f_v_i64m2_rm_tumu(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_x_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m2_t test_vfcvt_x_f_v_i64m2_rm_tumu(vbool32_t vm, vint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m4_t test_vfcvt_x_f_v_i64m4_rm_tumu(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_x_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m4_t test_vfcvt_x_f_v_i64m4_rm_tumu(vbool16_t vm, vint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m8_t test_vfcvt_x_f_v_i64m8_rm_tumu(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_x_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m8_t test_vfcvt_x_f_v_i64m8_rm_tumu(vbool8_t vm, vint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m1_t test_vfcvt_xu_f_v_u64m1_rm_tumu(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m1_t test_vfcvt_xu_f_v_u64m1_rm_tumu(vbool64_t vm, vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m2_t test_vfcvt_xu_f_v_u64m2_rm_tumu(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m2_t test_vfcvt_xu_f_v_u64m2_rm_tumu(vbool32_t vm, vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m4_t test_vfcvt_xu_f_v_u64m4_rm_tumu(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m4_t test_vfcvt_xu_f_v_u64m4_rm_tumu(vbool16_t vm, vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m8_t test_vfcvt_xu_f_v_u64m8_rm_tumu(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m8_t test_vfcvt_xu_f_v_u64m8_rm_tumu(vbool8_t vm, vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfcvt_f_x_v_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vint64m1_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfcvt_f_x_v_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfcvt_f_x_v_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vint64m2_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfcvt_f_x_v_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfcvt_f_x_v_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vint64m4_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfcvt_f_x_v_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfcvt_f_x_v_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vint64m8_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfcvt_f_x_v_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfcvt_f_xu_v_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vuint64m1_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfcvt_f_xu_v_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfcvt_f_xu_v_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vuint64m2_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfcvt_f_xu_v_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfcvt_f_xu_v_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vuint64m4_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfcvt_f_xu_v_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfcvt_f_xu_v_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vuint64m8_t src, size_t vl) { - return __riscv_vfcvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfcvt_f_xu_v_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16mf4_t test_vfcvt_x_f_v_i16mf4_rm_mu(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_x_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16mf4_t test_vfcvt_x_f_v_i16mf4_rm_mu(vbool64_t vm, vint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16mf2_t test_vfcvt_x_f_v_i16mf2_rm_mu(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_x_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16mf2_t test_vfcvt_x_f_v_i16mf2_rm_mu(vbool32_t vm, vint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m1_t test_vfcvt_x_f_v_i16m1_rm_mu(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_x_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m1_t test_vfcvt_x_f_v_i16m1_rm_mu(vbool16_t vm, vint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m2_t test_vfcvt_x_f_v_i16m2_rm_mu(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_x_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m2_t test_vfcvt_x_f_v_i16m2_rm_mu(vbool8_t vm, vint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m4_t test_vfcvt_x_f_v_i16m4_rm_mu(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_x_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m4_t test_vfcvt_x_f_v_i16m4_rm_mu(vbool4_t vm, vint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m8_t test_vfcvt_x_f_v_i16m8_rm_mu(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_x_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m8_t test_vfcvt_x_f_v_i16m8_rm_mu(vbool2_t vm, vint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_rm_mu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_xu_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_rm_mu(vbool64_t vm, vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_rm_mu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_xu_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_rm_mu(vbool32_t vm, vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m1_t test_vfcvt_xu_f_v_u16m1_rm_mu(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_xu_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m1_t test_vfcvt_xu_f_v_u16m1_rm_mu(vbool16_t vm, vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m2_t test_vfcvt_xu_f_v_u16m2_rm_mu(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_xu_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m2_t test_vfcvt_xu_f_v_u16m2_rm_mu(vbool8_t vm, vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m4_t test_vfcvt_xu_f_v_u16m4_rm_mu(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_xu_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m4_t test_vfcvt_xu_f_v_u16m4_rm_mu(vbool4_t vm, vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m8_t test_vfcvt_xu_f_v_u16m8_rm_mu(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_xu_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m8_t test_vfcvt_xu_f_v_u16m8_rm_mu(vbool2_t vm, vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfcvt_f_x_v_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vint16m1_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfcvt_f_x_v_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfcvt_f_x_v_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vint16m2_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfcvt_f_x_v_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfcvt_f_x_v_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vint16m4_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfcvt_f_x_v_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfcvt_f_x_v_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vint16m8_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfcvt_f_x_v_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vuint16mf4_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vuint16mf2_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfcvt_f_xu_v_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vuint16m1_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfcvt_f_xu_v_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfcvt_f_xu_v_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vuint16m2_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfcvt_f_xu_v_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfcvt_f_xu_v_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vuint16m4_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfcvt_f_xu_v_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfcvt_f_xu_v_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vuint16m8_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfcvt_f_xu_v_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32mf2_t test_vfcvt_x_f_v_i32mf2_rm_mu(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_x_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32mf2_t test_vfcvt_x_f_v_i32mf2_rm_mu(vbool64_t vm, vint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfcvt_x_f_v_i32m1_rm_mu(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_x_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m1_t test_vfcvt_x_f_v_i32m1_rm_mu(vbool32_t vm, vint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfcvt_x_f_v_i32m2_rm_mu(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_x_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m2_t test_vfcvt_x_f_v_i32m2_rm_mu(vbool16_t vm, vint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfcvt_x_f_v_i32m4_rm_mu(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_x_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m4_t test_vfcvt_x_f_v_i32m4_rm_mu(vbool8_t vm, vint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m8_t test_vfcvt_x_f_v_i32m8_rm_mu(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_x_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m8_t test_vfcvt_x_f_v_i32m8_rm_mu(vbool4_t vm, vint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_rm_mu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_xu_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_rm_mu(vbool64_t vm, vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfcvt_xu_f_v_u32m1_rm_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_xu_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m1_t test_vfcvt_xu_f_v_u32m1_rm_mu(vbool32_t vm, vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfcvt_xu_f_v_u32m2_rm_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_xu_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m2_t test_vfcvt_xu_f_v_u32m2_rm_mu(vbool16_t vm, vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfcvt_xu_f_v_u32m4_rm_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_xu_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m4_t test_vfcvt_xu_f_v_u32m4_rm_mu(vbool8_t vm, vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m8_t test_vfcvt_xu_f_v_u32m8_rm_mu(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_xu_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m8_t test_vfcvt_xu_f_v_u32m8_rm_mu(vbool4_t vm, vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfcvt_f_x_v_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfcvt_f_x_v_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfcvt_f_x_v_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfcvt_f_x_v_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfcvt_f_x_v_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfcvt_f_x_v_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfcvt_f_x_v_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vint32m8_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfcvt_f_x_v_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfcvt_f_xu_v_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfcvt_f_xu_v_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfcvt_f_xu_v_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfcvt_f_xu_v_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfcvt_f_xu_v_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfcvt_f_xu_v_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfcvt_f_xu_v_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vuint32m8_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfcvt_f_xu_v_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m1_t test_vfcvt_x_f_v_i64m1_rm_mu(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_x_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m1_t test_vfcvt_x_f_v_i64m1_rm_mu(vbool64_t vm, vint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m2_t test_vfcvt_x_f_v_i64m2_rm_mu(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_x_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m2_t test_vfcvt_x_f_v_i64m2_rm_mu(vbool32_t vm, vint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m4_t test_vfcvt_x_f_v_i64m4_rm_mu(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_x_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m4_t test_vfcvt_x_f_v_i64m4_rm_mu(vbool16_t vm, vint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m8_t test_vfcvt_x_f_v_i64m8_rm_mu(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_x_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m8_t test_vfcvt_x_f_v_i64m8_rm_mu(vbool8_t vm, vint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m1_t test_vfcvt_xu_f_v_u64m1_rm_mu(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_xu_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m1_t test_vfcvt_xu_f_v_u64m1_rm_mu(vbool64_t vm, vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m2_t test_vfcvt_xu_f_v_u64m2_rm_mu(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_xu_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m2_t test_vfcvt_xu_f_v_u64m2_rm_mu(vbool32_t vm, vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m4_t test_vfcvt_xu_f_v_u64m4_rm_mu(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_xu_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m4_t test_vfcvt_xu_f_v_u64m4_rm_mu(vbool16_t vm, vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m8_t test_vfcvt_xu_f_v_u64m8_rm_mu(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_xu_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m8_t test_vfcvt_xu_f_v_u64m8_rm_mu(vbool8_t vm, vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfcvt_f_x_v_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vint64m1_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfcvt_f_x_v_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfcvt_f_x_v_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vint64m2_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfcvt_f_x_v_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfcvt_f_x_v_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vint64m4_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfcvt_f_x_v_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfcvt_f_x_v_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vint64m8_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfcvt_f_x_v_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfcvt_f_xu_v_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vuint64m1_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfcvt_f_xu_v_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfcvt_f_xu_v_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vuint64m2_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfcvt_f_xu_v_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfcvt_f_xu_v_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vuint64m4_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfcvt_f_xu_v_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfcvt_f_xu_v_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vuint64m8_t src, size_t vl) { - return __riscv_vfcvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfcvt_f_xu_v_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vfcvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfcvt\.[ivxfswum.]+\s+} 480 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfcvt_rtz.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfcvt_rtz.c index 3bade41d3..16c06bf2e 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfcvt_rtz.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfcvt_rtz.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_tu(vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl); +vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_tu(vint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_tu(vd, vs2, vl); } -vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_tu(vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl); +vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_tu(vint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_tu(vd, vs2, vl); } -vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_tu(vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl); +vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_tu(vint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_tu(vd, vs2, vl); } -vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_tu(vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl); +vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_tu(vint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_tu(vd, vs2, vl); } -vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_tu(vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl); +vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_tu(vint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_tu(vd, vs2, vl); } -vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_tu(vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl); +vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_tu(vint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_tu(vd, vs2, vl); } -vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_tu(vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl); +vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_tu(vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_tu(vd, vs2, vl); } -vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_tu(vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl); +vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_tu(vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_tu(vd, vs2, vl); } -vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_tu(vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl); +vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_tu(vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_tu(vd, vs2, vl); } -vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_tu(vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl); +vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_tu(vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_tu(vd, vs2, vl); } -vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_tu(vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl); +vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_tu(vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_tu(vd, vs2, vl); } -vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_tu(vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl); +vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_tu(vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_tu(vd, vs2, vl); } -vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tu(vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl); +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tu(vint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_tu(vd, vs2, vl); } -vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_tu(vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl); +vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_tu(vint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_tu(vd, vs2, vl); } -vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_tu(vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl); +vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_tu(vint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_tu(vd, vs2, vl); } -vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_tu(vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl); +vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_tu(vint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_tu(vd, vs2, vl); } -vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_tu(vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl); +vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_tu(vint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_tu(vd, vs2, vl); } -vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tu(vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl); +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tu(vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_tu(vd, vs2, vl); } -vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_tu(vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl); +vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_tu(vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_tu(vd, vs2, vl); } -vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_tu(vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl); +vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_tu(vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_tu(vd, vs2, vl); } -vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_tu(vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl); +vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_tu(vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_tu(vd, vs2, vl); } -vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_tu(vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl); +vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_tu(vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_tu(vd, vs2, vl); } -vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_tu(vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl); +vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_tu(vint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_tu(vd, vs2, vl); } -vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_tu(vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl); +vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_tu(vint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_tu(vd, vs2, vl); } -vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_tu(vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl); +vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_tu(vint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_tu(vd, vs2, vl); } -vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_tu(vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl); +vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_tu(vint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_tu(vd, vs2, vl); } -vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_tu(vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl); +vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_tu(vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_tu(vd, vs2, vl); } -vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_tu(vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl); +vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_tu(vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_tu(vd, vs2, vl); } -vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_tu(vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl); +vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_tu(vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_tu(vd, vs2, vl); } -vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_tu(vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl); +vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_tu(vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_tu(vd, vs2, vl); } -vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl); +vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_tum(vm, vd, vs2, vl); } -vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl); +vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_tum(vm, vd, vs2, vl); } -vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl); +vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_tum(vm, vd, vs2, vl); } -vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl); +vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_tum(vm, vd, vs2, vl); } -vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl); +vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_tum(vm, vd, vs2, vl); } -vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl); +vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_tum(vm, vd, vs2, vl); } -vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl); +vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_tum(vm, vd, vs2, vl); } -vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl); +vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_tum(vm, vd, vs2, vl); } -vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl); +vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_tum(vm, vd, vs2, vl); } -vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl); +vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_tum(vm, vd, vs2, vl); } -vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl); +vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_tum(vm, vd, vs2, vl); } -vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl); +vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_tum(vm, vd, vs2, vl); } -vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl); +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_tum(vm, vd, vs2, vl); } -vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl); +vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_tum(vm, vd, vs2, vl); } -vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl); +vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_tum(vm, vd, vs2, vl); } -vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl); +vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_tum(vm, vd, vs2, vl); } -vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl); +vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl); +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_tum(vm, vd, vs2, vl); } -vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl); +vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_tum(vm, vd, vs2, vl); } -vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl); +vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_tum(vm, vd, vs2, vl); } -vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl); +vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_tum(vm, vd, vs2, vl); } -vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl); +vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_tum(vm, vd, vs2, vl); } -vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl); +vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_tum(vm, vd, vs2, vl); } -vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl); +vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_tum(vm, vd, vs2, vl); } -vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl); +vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_tum(vm, vd, vs2, vl); } -vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl); +vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_tum(vm, vd, vs2, vl); } -vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl); +vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_tum(vm, vd, vs2, vl); } -vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl); +vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_tum(vm, vd, vs2, vl); } -vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl); +vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_tum(vm, vd, vs2, vl); } -vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl); +vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_tum(vm, vd, vs2, vl); } -vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl); +vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_tumu(vm, vd, vs2, vl); } -vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl); +vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_tumu(vm, vd, vs2, vl); } -vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl); +vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_tumu(vm, vd, vs2, vl); } -vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl); +vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_tumu(vm, vd, vs2, vl); } -vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl); +vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_tumu(vm, vd, vs2, vl); } -vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl); +vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_tumu(vm, vd, vs2, vl); } -vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_tumu(vm, vd, vs2, vl); } -vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_tumu(vm, vd, vs2, vl); } -vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_tumu(vm, vd, vs2, vl); } -vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_tumu(vm, vd, vs2, vl); } -vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_tumu(vm, vd, vs2, vl); } -vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_tumu(vm, vd, vs2, vl); } -vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl); +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_tumu(vm, vd, vs2, vl); } -vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl); +vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_tumu(vm, vd, vs2, vl); } -vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl); +vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_tumu(vm, vd, vs2, vl); } -vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl); +vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_tumu(vm, vd, vs2, vl); } -vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl); +vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_tumu(vm, vd, vs2, vl); } -vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_tumu(vm, vd, vs2, vl); } -vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl); +vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_tumu(vm, vd, vs2, vl); } -vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl); +vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_tumu(vm, vd, vs2, vl); } -vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl); +vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_tumu(vm, vd, vs2, vl); } -vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl); +vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_tumu(vm, vd, vs2, vl); } -vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_tumu(vm, vd, vs2, vl); } -vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_tumu(vm, vd, vs2, vl); } -vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_tumu(vm, vd, vs2, vl); } -vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_tumu(vm, vd, vs2, vl); } -vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl); +vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_mu(vm, vd, vs2, vl); } -vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl); +vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_mu(vm, vd, vs2, vl); } -vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl); +vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_mu(vm, vd, vs2, vl); } -vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl); +vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_mu(vm, vd, vs2, vl); } -vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl); +vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_mu(vm, vd, vs2, vl); } -vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl); +vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_mu(vm, vd, vs2, vl); } -vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl); +vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_mu(vm, vd, vs2, vl); } -vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl); +vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_mu(vm, vd, vs2, vl); } -vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl); +vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_mu(vm, vd, vs2, vl); } -vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl); +vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_mu(vm, vd, vs2, vl); } -vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl); +vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_mu(vm, vd, vs2, vl); } -vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl); +vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_mu(vm, vd, vs2, vl); } -vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl); +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_mu(vm, vd, vs2, vl); } -vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl); +vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_mu(vm, vd, vs2, vl); } -vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl); +vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_mu(vm, vd, vs2, vl); } -vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl); +vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_mu(vm, vd, vs2, vl); } -vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl); +vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_mu(vm, vd, vs2, vl); } -vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl); +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_mu(vm, vd, vs2, vl); } -vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl); +vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_mu(vm, vd, vs2, vl); } -vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl); +vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_mu(vm, vd, vs2, vl); } -vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl); +vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_mu(vm, vd, vs2, vl); } -vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl); +vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_mu(vm, vd, vs2, vl); } -vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl); +vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_mu(vm, vd, vs2, vl); } -vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl); +vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_mu(vm, vd, vs2, vl); } -vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl); +vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_mu(vm, vd, vs2, vl); } -vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl); +vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_x_mu(vm, vd, vs2, vl); } -vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl); +vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_mu(vm, vd, vs2, vl); } -vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl); +vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_mu(vm, vd, vs2, vl); } -vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl); +vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_mu(vm, vd, vs2, vl); } -vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl); +vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfcvt_rtz_xu_mu(vm, vd, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfcvt\.rtz[ivxfswum.]*\s+} 120 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfdiv.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfdiv.c index a869625d9..0a40d19fa 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfdiv.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfdiv.c @@ -6,964 +6,964 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfdiv_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfdiv_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfdiv_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfdiv_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfdiv_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfdiv_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfdiv_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfdiv_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, rs1, vl); } -vfloat16m1_t test_vfdiv_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, vl); +vfloat16m1_t test_vfdiv_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfdiv_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, vl); +vfloat16m1_t test_vfdiv_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, rs1, vl); } -vfloat16m2_t test_vfdiv_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, vl); +vfloat16m2_t test_vfdiv_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, vs1, vl); } -vfloat16m2_t test_vfdiv_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, vl); +vfloat16m2_t test_vfdiv_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, rs1, vl); } -vfloat16m4_t test_vfdiv_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, vl); +vfloat16m4_t test_vfdiv_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, vs1, vl); } -vfloat16m4_t test_vfdiv_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, vl); +vfloat16m4_t test_vfdiv_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, rs1, vl); } -vfloat16m8_t test_vfdiv_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, vl); +vfloat16m8_t test_vfdiv_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, vs1, vl); } -vfloat16m8_t test_vfdiv_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, vl); +vfloat16m8_t test_vfdiv_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfdiv_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfdiv_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfdiv_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfdiv_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfdiv_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfdiv_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfdiv_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfdiv_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfdiv_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfdiv_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vfdiv_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfdiv_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfdiv_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfdiv_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vfdiv_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfdiv_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfdiv_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfdiv_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vfdiv_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfdiv_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfdiv_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfdiv_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfdiv_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfdiv_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfdiv_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfdiv_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vfdiv_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfdiv_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfdiv_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfdiv_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vfdiv_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfdiv_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfdiv_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfdiv_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vfdiv_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfdiv_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfdiv_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfdiv_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfdiv_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfdiv_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfdiv_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfdiv_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfdiv_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfdiv_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfdiv_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfdiv_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfdiv_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfdiv_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfdiv_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfdiv_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfdiv_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfdiv_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfdiv_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfdiv_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfdiv_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfdiv_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfdiv_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfdiv_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfdiv_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfdiv_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfdiv_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfdiv_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfdiv_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfdiv_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfdiv_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfdiv_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfdiv_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfdiv_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfdiv_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfdiv_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfdiv_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfdiv_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfdiv_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfdiv_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfdiv_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfdiv_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfdiv_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfdiv_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfdiv_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfdiv_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfdiv_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfdiv_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfdiv_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfdiv_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfdiv_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfdiv_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfdiv_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfdiv_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfdiv_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfdiv_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfdiv_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfdiv_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfdiv_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfdiv_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfdiv_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfdiv_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfdiv_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfdiv_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfdiv_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfdiv_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfdiv_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfdiv_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfdiv_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfdiv_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfdiv_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfdiv_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfdiv_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfdiv_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfdiv_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfdiv_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfdiv_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfdiv_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfdiv_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfdiv_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfdiv_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfdiv_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfdiv_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfdiv_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfdiv_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfdiv_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfdiv_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfdiv_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfdiv_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfdiv_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfdiv_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfdiv_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfdiv_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfdiv_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfdiv_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfdiv_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfdiv_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfdiv_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfdiv_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfdiv_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfdiv_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfdiv_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfdiv_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfdiv_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfdiv_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfdiv_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfdiv_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfdiv_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfdiv_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfdiv_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfdiv_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfdiv_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfdiv_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfdiv_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfdiv_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfdiv_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfdiv_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfdiv_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfdiv_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfdiv_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfdiv_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfdiv_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfdiv_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfdiv_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfdiv_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfdiv_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfdiv_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfdiv_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfdiv_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfdiv_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfdiv_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfdiv_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfdiv_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfdiv_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfdiv_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfdiv_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfdiv_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfdiv_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfdiv_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfdiv_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfdiv_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfdiv_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfdiv_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfdiv_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfdiv_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfdiv_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfdiv_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfdiv_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfdiv_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfdiv_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfdiv_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfdiv_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfdiv_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfdiv_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfdiv_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfdiv_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfdiv_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfdiv_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfdiv_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfdiv_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfdiv_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfdiv_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfdiv_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfdiv_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfdiv_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfdiv_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfdiv_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfdiv_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfdiv_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfdiv_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfdiv_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfdiv_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfdiv_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfdiv_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfdiv_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfdiv_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfdiv_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfdiv_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfdiv_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfdiv_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfdiv_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfdiv_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfdiv_vv_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfdiv_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfdiv_vf_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfdiv_vf_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfdiv_vv_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfdiv_vv_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfdiv_vf_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfdiv_vf_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfdiv_vv_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfdiv_vv_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfdiv_vf_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfdiv_vf_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfdiv_vv_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfdiv_vv_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfdiv_vf_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfdiv_vf_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfdiv_vv_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfdiv_vv_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfdiv_vf_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfdiv_vf_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfdiv_vv_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfdiv_vv_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfdiv_vf_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfdiv_vf_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfdiv_vv_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfdiv_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfdiv_vf_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfdiv_vf_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfdiv_vv_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfdiv_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfdiv_vf_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfdiv_vf_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfdiv_vv_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfdiv_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfdiv_vf_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfdiv_vf_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfdiv_vv_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfdiv_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfdiv_vf_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfdiv_vf_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfdiv_vv_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfdiv_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfdiv_vf_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfdiv_vf_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfdiv_vv_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfdiv_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfdiv_vf_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfdiv_vf_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfdiv_vv_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfdiv_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfdiv_vf_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfdiv_vf_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfdiv_vv_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfdiv_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfdiv_vf_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfdiv_vf_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfdiv_vv_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfdiv_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfdiv_vf_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfdiv_vf_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfdiv_vv_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfdiv_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfdiv_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfdiv_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfdiv_vv_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfdiv_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfdiv_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfdiv_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfdiv_vv_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfdiv_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfdiv_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfdiv_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfdiv_vv_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfdiv_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfdiv_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfdiv_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfdiv_vv_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfdiv_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfdiv_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfdiv_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfdiv_vv_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfdiv_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfdiv_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfdiv_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfdiv_vv_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfdiv_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfdiv_vf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfdiv_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfdiv_vv_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfdiv_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfdiv_vf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfdiv_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfdiv_vv_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfdiv_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfdiv_vf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfdiv_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfdiv_vv_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfdiv_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfdiv_vf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfdiv_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfdiv_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfdiv_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfdiv_vf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfdiv_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfdiv_vv_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfdiv_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfdiv_vf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfdiv_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfdiv_vv_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfdiv_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfdiv_vf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfdiv_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfdiv_vv_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfdiv_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfdiv_vf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfdiv_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfdiv_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfdiv_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfdiv_vf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfdiv_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfdiv_vv_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfdiv_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfdiv_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfdiv_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfdiv_vv_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfdiv_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfdiv_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfdiv_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfdiv_vv_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfdiv_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfdiv_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfdiv_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfdiv_vv_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfdiv_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfdiv_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfdiv_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfdiv_vv_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfdiv_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfdiv_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfdiv_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfdiv_vv_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfdiv_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfdiv_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfdiv_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfdiv_vv_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfdiv_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfdiv_vf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfdiv_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfdiv_vv_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfdiv_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfdiv_vf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfdiv_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfdiv_vv_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfdiv_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfdiv_vf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfdiv_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfdiv_vv_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfdiv_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfdiv_vf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfdiv_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfdiv_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfdiv_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfdiv_vf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfdiv_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfdiv_vv_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfdiv_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfdiv_vf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfdiv_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfdiv_vv_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfdiv_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfdiv_vf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfdiv_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfdiv_vv_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfdiv_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfdiv_vf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfdiv_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfdiv_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfdiv_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfdiv_vf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfdiv_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfdiv_vv_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfdiv_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfdiv_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfdiv_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfdiv_vv_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfdiv_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfdiv_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfdiv_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfdiv_vv_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfdiv_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfdiv_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfdiv_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfdiv_vv_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfdiv_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfdiv_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfdiv_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfdiv_vv_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfdiv_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfdiv_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfdiv_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfdiv_vv_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfdiv_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfdiv_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfdiv_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfdiv_vv_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfdiv_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfdiv_vf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfdiv_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfdiv_vv_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfdiv_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfdiv_vf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfdiv_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfdiv_vv_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfdiv_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfdiv_vf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfdiv_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfdiv_vv_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfdiv_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfdiv_vf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfdiv_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfdiv_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfdiv_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfdiv_vf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfdiv_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfdiv_vv_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfdiv_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfdiv_vf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfdiv_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfdiv_vv_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfdiv_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfdiv_vf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfdiv_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfdiv_vv_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfdiv_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfdiv_vf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfdiv_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfdiv_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfdiv_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfdiv_vf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfdiv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfdiv_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfdiv_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfdiv\.[ivxfswum.]+\s+} 240 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfmacc.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfmacc.c index cd1d86fee..41a55398e 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfmacc.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfmacc.c @@ -126,364 +126,364 @@ vfloat64m8_t test_vfmacc_vf_f64m8_tu(vfloat64m8_t vd, float64_t rs1, vfloat64m8_ return __riscv_vfmacc_tu(vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmacc_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfmacc_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmacc_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfmacc_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmacc_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfmacc_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmacc_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfmacc_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmacc_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfmacc_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmacc_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfmacc_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmacc_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfmacc_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmacc_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfmacc_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmacc_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfmacc_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmacc_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfmacc_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmacc_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfmacc_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmacc_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfmacc_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmacc_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfmacc_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmacc_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfmacc_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmacc_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfmacc_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmacc_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfmacc_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmacc_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfmacc_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmacc_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfmacc_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmacc_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfmacc_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmacc_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfmacc_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmacc_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfmacc_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmacc_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfmacc_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmacc_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfmacc_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmacc_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfmacc_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmacc_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfmacc_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmacc_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfmacc_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmacc_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfmacc_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmacc_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfmacc_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmacc_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfmacc_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmacc_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfmacc_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmacc_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfmacc_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmacc_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfmacc_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmacc_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfmacc_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmacc_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfmacc_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmacc_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfmacc_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmacc_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfmacc_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmacc_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfmacc_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmacc_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfmacc_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmacc_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfmacc_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmacc_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfmacc_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmacc_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfmacc_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmacc_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfmacc_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmacc_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfmacc_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmacc_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfmacc_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmacc_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfmacc_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmacc_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfmacc_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmacc_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfmacc_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmacc_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfmacc_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmacc_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfmacc_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmacc_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfmacc_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmacc_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfmacc_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmacc_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfmacc_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmacc_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfmacc_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmacc_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfmacc_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmacc_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfmacc_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmacc_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfmacc_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmacc_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfmacc_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmacc_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfmacc_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmacc_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfmacc_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmacc_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfmacc_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmacc_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfmacc_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmacc_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfmacc_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmacc_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfmacc_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmacc_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfmacc_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmacc_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfmacc_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmacc_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfmacc_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmacc_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfmacc_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmacc_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfmacc_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmacc_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfmacc_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmacc_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfmacc_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmacc_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfmacc_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmacc_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfmacc_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmacc_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfmacc_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmacc_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfmacc_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmacc_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfmacc_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmacc_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfmacc_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmacc_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfmacc_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmacc_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfmacc_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmacc_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfmacc_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmacc_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfmacc_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmacc_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfmacc_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmacc_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfmacc_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmacc_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfmacc_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmacc_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfmacc_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmacc_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfmacc_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmacc_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfmacc_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmacc_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfmacc_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmacc_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfmacc_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmacc_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfmacc_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmacc_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfmacc_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, rs1, vs2, vl); } vfloat16mf4_t test_vfmacc_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -606,364 +606,364 @@ vfloat64m8_t test_vfmacc_vf_f64m8_rm_tu(vfloat64m8_t vd, float64_t rs1, vfloat64 return __riscv_vfmacc_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmacc_vv_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmacc_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmacc_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmacc_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmacc_vv_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmacc_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmacc_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmacc_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmacc_vv_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmacc_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmacc_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmacc_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmacc_vv_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmacc_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmacc_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmacc_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmacc_vv_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmacc_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmacc_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmacc_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmacc_vv_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmacc_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmacc_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmacc_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmacc_vv_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmacc_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmacc_vf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmacc_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmacc_vv_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmacc_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmacc_vf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmacc_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmacc_vv_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmacc_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmacc_vf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmacc_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmacc_vv_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmacc_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmacc_vf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmacc_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmacc_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmacc_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmacc_vf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmacc_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmacc_vv_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmacc_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmacc_vf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmacc_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmacc_vv_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmacc_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmacc_vf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmacc_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmacc_vv_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmacc_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmacc_vf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmacc_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmacc_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmacc_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmacc_vf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmacc_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmacc_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmacc_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmacc_vv_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmacc_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmacc_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmacc_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmacc_vv_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmacc_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmacc_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmacc_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmacc_vv_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmacc_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmacc_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmacc_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmacc_vv_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmacc_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmacc_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmacc_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmacc_vv_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmacc_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmacc_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmacc_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmacc_vv_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmacc_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmacc_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmacc_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmacc_vv_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmacc_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmacc_vf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmacc_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmacc_vv_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmacc_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmacc_vf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmacc_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmacc_vv_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmacc_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmacc_vf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmacc_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmacc_vv_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmacc_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmacc_vf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmacc_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmacc_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmacc_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmacc_vf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmacc_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmacc_vv_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmacc_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmacc_vf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmacc_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmacc_vv_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmacc_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmacc_vf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmacc_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmacc_vv_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmacc_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmacc_vf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmacc_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmacc_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmacc_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmacc_vf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmacc_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmacc_vv_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmacc_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmacc_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmacc_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmacc_vv_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmacc_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmacc_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmacc_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmacc_vv_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmacc_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmacc_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmacc_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmacc_vv_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmacc_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmacc_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmacc_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmacc_vv_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmacc_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmacc_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmacc_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmacc_vv_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmacc_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmacc_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmacc_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmacc_vv_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmacc_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmacc_vf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmacc_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmacc_vv_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmacc_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmacc_vf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmacc_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmacc_vv_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmacc_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmacc_vf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmacc_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmacc_vv_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmacc_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmacc_vf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmacc_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmacc_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmacc_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmacc_vf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmacc_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmacc_vv_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmacc_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmacc_vf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmacc_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmacc_vv_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmacc_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmacc_vf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmacc_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmacc_vv_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmacc_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmacc_vf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmacc_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmacc_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmacc_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmacc_vf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmacc_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmacc_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmacc_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfmacc\.[ivxfswum.]+\s+} 240 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfmadd.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfmadd.c index 1ba8af857..87da202f1 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfmadd.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfmadd.c @@ -126,364 +126,364 @@ vfloat64m8_t test_vfmadd_vf_f64m8_tu(vfloat64m8_t vd, float64_t rs1, vfloat64m8_ return __riscv_vfmadd_tu(vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmadd_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfmadd_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmadd_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfmadd_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmadd_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfmadd_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmadd_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfmadd_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmadd_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfmadd_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmadd_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfmadd_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmadd_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfmadd_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmadd_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfmadd_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmadd_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfmadd_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmadd_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfmadd_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmadd_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfmadd_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmadd_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfmadd_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmadd_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfmadd_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmadd_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfmadd_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmadd_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfmadd_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmadd_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfmadd_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmadd_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfmadd_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmadd_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfmadd_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmadd_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfmadd_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmadd_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfmadd_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmadd_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfmadd_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmadd_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfmadd_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmadd_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfmadd_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmadd_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfmadd_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmadd_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfmadd_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmadd_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfmadd_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmadd_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfmadd_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmadd_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfmadd_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmadd_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfmadd_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmadd_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfmadd_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmadd_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfmadd_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmadd_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfmadd_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmadd_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfmadd_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmadd_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfmadd_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmadd_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfmadd_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmadd_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfmadd_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmadd_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfmadd_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmadd_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfmadd_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmadd_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfmadd_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmadd_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfmadd_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmadd_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfmadd_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmadd_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfmadd_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmadd_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfmadd_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmadd_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfmadd_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmadd_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfmadd_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmadd_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfmadd_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmadd_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfmadd_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmadd_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfmadd_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmadd_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfmadd_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmadd_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfmadd_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmadd_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfmadd_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmadd_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfmadd_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmadd_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfmadd_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmadd_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfmadd_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmadd_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfmadd_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmadd_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfmadd_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmadd_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfmadd_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmadd_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfmadd_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmadd_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfmadd_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmadd_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfmadd_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmadd_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfmadd_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmadd_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfmadd_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmadd_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfmadd_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmadd_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfmadd_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmadd_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfmadd_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmadd_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfmadd_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmadd_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfmadd_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmadd_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfmadd_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmadd_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfmadd_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmadd_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfmadd_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmadd_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfmadd_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmadd_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfmadd_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmadd_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfmadd_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmadd_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfmadd_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmadd_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfmadd_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmadd_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfmadd_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmadd_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfmadd_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmadd_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfmadd_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmadd_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfmadd_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmadd_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfmadd_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmadd_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfmadd_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmadd_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfmadd_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmadd_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfmadd_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmadd_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfmadd_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmadd_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfmadd_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmadd_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfmadd_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmadd_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfmadd_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmadd_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfmadd_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmadd_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfmadd_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmadd_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfmadd_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, rs1, vs2, vl); } vfloat16mf4_t test_vfmadd_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -606,364 +606,364 @@ vfloat64m8_t test_vfmadd_vf_f64m8_rm_tu(vfloat64m8_t vd, float64_t rs1, vfloat64 return __riscv_vfmadd_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmadd_vv_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmadd_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmadd_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmadd_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmadd_vv_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmadd_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmadd_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmadd_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmadd_vv_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmadd_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmadd_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmadd_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmadd_vv_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmadd_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmadd_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmadd_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmadd_vv_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmadd_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmadd_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmadd_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmadd_vv_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmadd_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmadd_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmadd_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmadd_vv_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmadd_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmadd_vf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmadd_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmadd_vv_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmadd_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmadd_vf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmadd_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmadd_vv_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmadd_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmadd_vf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmadd_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmadd_vv_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmadd_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmadd_vf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmadd_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmadd_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmadd_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmadd_vf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmadd_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmadd_vv_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmadd_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmadd_vf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmadd_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmadd_vv_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmadd_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmadd_vf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmadd_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmadd_vv_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmadd_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmadd_vf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmadd_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmadd_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmadd_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmadd_vf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmadd_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmadd_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmadd_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmadd_vv_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmadd_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmadd_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmadd_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmadd_vv_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmadd_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmadd_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmadd_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmadd_vv_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmadd_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmadd_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmadd_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmadd_vv_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmadd_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmadd_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmadd_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmadd_vv_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmadd_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmadd_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmadd_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmadd_vv_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmadd_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmadd_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmadd_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmadd_vv_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmadd_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmadd_vf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmadd_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmadd_vv_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmadd_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmadd_vf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmadd_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmadd_vv_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmadd_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmadd_vf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmadd_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmadd_vv_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmadd_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmadd_vf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmadd_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmadd_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmadd_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmadd_vf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmadd_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmadd_vv_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmadd_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmadd_vf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmadd_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmadd_vv_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmadd_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmadd_vf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmadd_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmadd_vv_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmadd_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmadd_vf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmadd_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmadd_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmadd_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmadd_vf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmadd_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmadd_vv_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmadd_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmadd_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmadd_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmadd_vv_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmadd_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmadd_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmadd_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmadd_vv_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmadd_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmadd_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmadd_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmadd_vv_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmadd_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmadd_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmadd_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmadd_vv_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmadd_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmadd_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmadd_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmadd_vv_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmadd_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmadd_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmadd_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmadd_vv_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmadd_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmadd_vf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmadd_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmadd_vv_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmadd_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmadd_vf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmadd_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmadd_vv_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmadd_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmadd_vf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmadd_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmadd_vv_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmadd_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmadd_vf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmadd_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmadd_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmadd_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmadd_vf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmadd_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmadd_vv_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmadd_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmadd_vf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmadd_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmadd_vv_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmadd_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmadd_vf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmadd_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmadd_vv_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmadd_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmadd_vf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmadd_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmadd_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmadd_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmadd_vf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmadd_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmadd_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmadd_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfmadd\.[ivxfswum.]+\s+} 240 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfmax.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfmax.c index edda43b8f..70437951d 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfmax.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfmax.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfmax_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmax_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmax_tu(vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfmax_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmax_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_tu(vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfmax_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmax_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmax_tu(vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfmax_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmax_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_tu(vd, vs2, rs1, vl); } -vfloat16m1_t test_vfmax_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmax_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmax_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfmax_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmax_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_tu(vd, vs2, rs1, vl); } -vfloat16m2_t test_vfmax_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmax_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmax_tu(vd, vs2, vs1, vl); } -vfloat16m2_t test_vfmax_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmax_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_tu(vd, vs2, rs1, vl); } -vfloat16m4_t test_vfmax_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmax_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmax_tu(vd, vs2, vs1, vl); } -vfloat16m4_t test_vfmax_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmax_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_tu(vd, vs2, rs1, vl); } -vfloat16m8_t test_vfmax_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmax_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmax_tu(vd, vs2, vs1, vl); } -vfloat16m8_t test_vfmax_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmax_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfmax_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmax_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmax_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfmax_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmax_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfmax_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmax_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmax_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfmax_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmax_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfmax_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmax_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmax_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vfmax_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmax_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfmax_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmax_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmax_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vfmax_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmax_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfmax_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmax_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmax_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vfmax_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmax_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfmax_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmax_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmax_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfmax_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmax_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfmax_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmax_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmax_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vfmax_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmax_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfmax_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmax_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmax_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vfmax_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmax_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfmax_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmax_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmax_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vfmax_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmax_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax_tu(vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfmax_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmax_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmax_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfmax_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmax_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfmax_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmax_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmax_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfmax_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmax_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_tum(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfmax_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmax_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmax_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfmax_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmax_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_tum(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfmax_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmax_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmax_tum(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfmax_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmax_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_tum(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfmax_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmax_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmax_tum(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfmax_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmax_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_tum(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfmax_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmax_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmax_tum(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfmax_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmax_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfmax_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmax_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmax_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfmax_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmax_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfmax_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmax_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmax_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfmax_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmax_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfmax_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmax_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmax_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfmax_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmax_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfmax_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmax_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmax_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfmax_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmax_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfmax_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmax_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmax_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfmax_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmax_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfmax_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmax_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmax_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfmax_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmax_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfmax_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmax_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmax_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfmax_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmax_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfmax_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmax_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmax_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfmax_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmax_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfmax_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmax_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmax_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfmax_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmax_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfmax_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmax_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmax_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfmax_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmax_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfmax_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmax_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmax_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfmax_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmax_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfmax_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmax_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmax_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfmax_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmax_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfmax_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmax_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmax_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfmax_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmax_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfmax_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmax_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmax_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfmax_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmax_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfmax_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmax_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmax_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfmax_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmax_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfmax_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmax_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmax_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfmax_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmax_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfmax_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmax_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmax_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfmax_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmax_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfmax_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmax_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmax_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfmax_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmax_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfmax_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmax_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmax_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfmax_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmax_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfmax_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmax_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmax_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfmax_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmax_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfmax_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmax_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmax_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfmax_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmax_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfmax_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmax_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmax_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfmax_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmax_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfmax_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmax_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmax_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfmax_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmax_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfmax_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmax_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmax_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfmax_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmax_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfmax_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmax_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmax_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfmax_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmax_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfmax_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmax_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmax_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfmax_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmax_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_mu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfmax_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmax_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmax_mu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfmax_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmax_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_mu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfmax_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmax_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmax_mu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfmax_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmax_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_mu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfmax_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmax_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmax_mu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfmax_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmax_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_mu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfmax_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmax_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmax_mu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfmax_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmax_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmax_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfmax_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmax_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmax_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfmax_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmax_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfmax_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmax_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmax_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfmax_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmax_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfmax_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmax_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmax_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfmax_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmax_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfmax_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmax_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmax_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfmax_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmax_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfmax_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmax_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmax_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfmax_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmax_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmax_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfmax_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmax_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmax_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfmax_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmax_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfmax_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmax_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmax_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfmax_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmax_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfmax_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmax_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmax_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfmax_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmax_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfmax_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmax_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmax_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfmax_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmax_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmax_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfmax\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfmerge.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfmerge.c index d643b9adf..f08c3052a 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfmerge.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfmerge.c @@ -6,64 +6,64 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfmerge_vfm_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, vbool64_t mask, size_t vl) { - return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl); +vfloat16mf4_t test_vfmerge_vfm_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vfmerge_tu(vd, vs2, rs1, v0, vl); } -vfloat16mf2_t test_vfmerge_vfm_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, vbool32_t mask, size_t vl) { - return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl); +vfloat16mf2_t test_vfmerge_vfm_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vfmerge_tu(vd, vs2, rs1, v0, vl); } -vfloat16m1_t test_vfmerge_vfm_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, vbool16_t mask, size_t vl) { - return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl); +vfloat16m1_t test_vfmerge_vfm_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vfmerge_tu(vd, vs2, rs1, v0, vl); } -vfloat16m2_t test_vfmerge_vfm_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, vbool8_t mask, size_t vl) { - return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl); +vfloat16m2_t test_vfmerge_vfm_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vfmerge_tu(vd, vs2, rs1, v0, vl); } -vfloat16m4_t test_vfmerge_vfm_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, vbool4_t mask, size_t vl) { - return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl); +vfloat16m4_t test_vfmerge_vfm_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vfmerge_tu(vd, vs2, rs1, v0, vl); } -vfloat16m8_t test_vfmerge_vfm_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, vbool2_t mask, size_t vl) { - return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl); +vfloat16m8_t test_vfmerge_vfm_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vfmerge_tu(vd, vs2, rs1, v0, vl); } -vfloat32mf2_t test_vfmerge_vfm_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, vbool64_t mask, size_t vl) { - return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl); +vfloat32mf2_t test_vfmerge_vfm_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vfmerge_tu(vd, vs2, rs1, v0, vl); } -vfloat32m1_t test_vfmerge_vfm_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, vbool32_t mask, size_t vl) { - return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl); +vfloat32m1_t test_vfmerge_vfm_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vfmerge_tu(vd, vs2, rs1, v0, vl); } -vfloat32m2_t test_vfmerge_vfm_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, vbool16_t mask, size_t vl) { - return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl); +vfloat32m2_t test_vfmerge_vfm_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vfmerge_tu(vd, vs2, rs1, v0, vl); } -vfloat32m4_t test_vfmerge_vfm_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, vbool8_t mask, size_t vl) { - return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl); +vfloat32m4_t test_vfmerge_vfm_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vfmerge_tu(vd, vs2, rs1, v0, vl); } -vfloat32m8_t test_vfmerge_vfm_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, vbool4_t mask, size_t vl) { - return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl); +vfloat32m8_t test_vfmerge_vfm_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vfmerge_tu(vd, vs2, rs1, v0, vl); } -vfloat64m1_t test_vfmerge_vfm_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, vbool64_t mask, size_t vl) { - return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl); +vfloat64m1_t test_vfmerge_vfm_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vfmerge_tu(vd, vs2, rs1, v0, vl); } -vfloat64m2_t test_vfmerge_vfm_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, vbool32_t mask, size_t vl) { - return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl); +vfloat64m2_t test_vfmerge_vfm_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vfmerge_tu(vd, vs2, rs1, v0, vl); } -vfloat64m4_t test_vfmerge_vfm_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, vbool16_t mask, size_t vl) { - return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl); +vfloat64m4_t test_vfmerge_vfm_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vfmerge_tu(vd, vs2, rs1, v0, vl); } -vfloat64m8_t test_vfmerge_vfm_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, vbool8_t mask, size_t vl) { - return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl); +vfloat64m8_t test_vfmerge_vfm_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vfmerge_tu(vd, vs2, rs1, v0, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfmerge\.[ivxfswum.]+\s+} 15 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfmin.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfmin.c index fdef1ae2f..d8eef12a2 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfmin.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfmin.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfmin_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmin_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmin_tu(vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfmin_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmin_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_tu(vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfmin_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmin_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmin_tu(vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfmin_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmin_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_tu(vd, vs2, rs1, vl); } -vfloat16m1_t test_vfmin_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmin_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmin_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfmin_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmin_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_tu(vd, vs2, rs1, vl); } -vfloat16m2_t test_vfmin_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmin_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmin_tu(vd, vs2, vs1, vl); } -vfloat16m2_t test_vfmin_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmin_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_tu(vd, vs2, rs1, vl); } -vfloat16m4_t test_vfmin_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmin_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmin_tu(vd, vs2, vs1, vl); } -vfloat16m4_t test_vfmin_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmin_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_tu(vd, vs2, rs1, vl); } -vfloat16m8_t test_vfmin_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmin_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmin_tu(vd, vs2, vs1, vl); } -vfloat16m8_t test_vfmin_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmin_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfmin_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmin_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmin_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfmin_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmin_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfmin_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmin_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmin_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfmin_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmin_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfmin_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmin_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmin_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vfmin_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmin_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfmin_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmin_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmin_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vfmin_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmin_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfmin_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmin_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmin_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vfmin_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmin_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfmin_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmin_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmin_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfmin_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmin_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfmin_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmin_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmin_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vfmin_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmin_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfmin_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmin_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmin_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vfmin_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmin_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfmin_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmin_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmin_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vfmin_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmin_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin_tu(vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfmin_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmin_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmin_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfmin_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmin_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfmin_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmin_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmin_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfmin_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmin_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_tum(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfmin_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmin_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmin_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfmin_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmin_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_tum(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfmin_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmin_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmin_tum(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfmin_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmin_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_tum(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfmin_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmin_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmin_tum(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfmin_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmin_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_tum(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfmin_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmin_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmin_tum(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfmin_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmin_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfmin_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmin_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmin_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfmin_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmin_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfmin_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmin_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmin_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfmin_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmin_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfmin_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmin_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmin_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfmin_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmin_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfmin_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmin_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmin_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfmin_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmin_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfmin_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmin_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmin_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfmin_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmin_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfmin_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmin_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmin_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfmin_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmin_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfmin_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmin_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmin_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfmin_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmin_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfmin_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmin_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmin_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfmin_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmin_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfmin_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmin_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmin_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfmin_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmin_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfmin_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmin_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmin_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfmin_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmin_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfmin_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmin_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmin_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfmin_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmin_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfmin_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmin_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmin_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfmin_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmin_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfmin_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmin_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmin_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfmin_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmin_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfmin_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmin_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmin_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfmin_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmin_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfmin_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmin_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmin_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfmin_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmin_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfmin_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmin_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmin_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfmin_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmin_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfmin_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmin_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmin_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfmin_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmin_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfmin_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmin_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmin_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfmin_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmin_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfmin_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmin_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmin_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfmin_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmin_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfmin_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmin_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmin_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfmin_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmin_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfmin_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmin_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmin_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfmin_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmin_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfmin_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmin_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmin_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfmin_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmin_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfmin_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmin_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmin_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfmin_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmin_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfmin_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmin_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmin_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfmin_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmin_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfmin_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmin_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmin_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfmin_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmin_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfmin_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmin_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmin_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfmin_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmin_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_mu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfmin_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmin_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmin_mu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfmin_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmin_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_mu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfmin_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmin_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmin_mu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfmin_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmin_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_mu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfmin_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmin_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmin_mu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfmin_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmin_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_mu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfmin_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmin_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmin_mu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfmin_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmin_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmin_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfmin_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmin_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmin_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfmin_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmin_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfmin_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmin_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmin_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfmin_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmin_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfmin_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmin_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmin_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfmin_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmin_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfmin_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmin_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmin_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfmin_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmin_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfmin_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmin_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmin_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfmin_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmin_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmin_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfmin_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmin_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmin_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfmin_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmin_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfmin_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmin_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmin_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfmin_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmin_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfmin_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmin_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmin_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfmin_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmin_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfmin_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmin_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmin_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfmin_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmin_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmin_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfmin\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfmsac.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfmsac.c index d56f035f9..eacd96e2f 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfmsac.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfmsac.c @@ -126,364 +126,364 @@ vfloat64m8_t test_vfmsac_vf_f64m8_tu(vfloat64m8_t vd, float64_t rs1, vfloat64m8_ return __riscv_vfmsac_tu(vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmsac_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfmsac_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmsac_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfmsac_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmsac_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfmsac_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmsac_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfmsac_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmsac_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfmsac_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmsac_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfmsac_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmsac_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfmsac_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmsac_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfmsac_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmsac_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfmsac_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmsac_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfmsac_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmsac_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfmsac_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmsac_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfmsac_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmsac_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfmsac_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmsac_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfmsac_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmsac_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfmsac_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmsac_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfmsac_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmsac_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfmsac_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmsac_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfmsac_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmsac_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfmsac_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmsac_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfmsac_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmsac_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfmsac_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmsac_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfmsac_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmsac_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfmsac_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmsac_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfmsac_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmsac_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfmsac_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmsac_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfmsac_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmsac_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfmsac_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmsac_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfmsac_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmsac_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfmsac_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmsac_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfmsac_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmsac_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfmsac_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmsac_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfmsac_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmsac_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfmsac_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmsac_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfmsac_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmsac_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfmsac_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmsac_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfmsac_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmsac_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfmsac_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmsac_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfmsac_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmsac_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfmsac_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmsac_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfmsac_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmsac_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfmsac_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmsac_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfmsac_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmsac_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfmsac_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmsac_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfmsac_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmsac_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfmsac_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmsac_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfmsac_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmsac_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfmsac_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmsac_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfmsac_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmsac_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfmsac_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmsac_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfmsac_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmsac_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfmsac_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmsac_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfmsac_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmsac_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfmsac_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmsac_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfmsac_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmsac_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfmsac_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmsac_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfmsac_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmsac_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfmsac_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmsac_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfmsac_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmsac_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfmsac_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmsac_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfmsac_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmsac_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfmsac_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmsac_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfmsac_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmsac_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfmsac_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmsac_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfmsac_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmsac_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfmsac_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmsac_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfmsac_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmsac_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfmsac_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmsac_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfmsac_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmsac_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfmsac_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmsac_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfmsac_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmsac_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfmsac_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmsac_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfmsac_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmsac_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfmsac_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmsac_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfmsac_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmsac_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfmsac_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmsac_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfmsac_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmsac_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfmsac_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmsac_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfmsac_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmsac_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfmsac_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmsac_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfmsac_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmsac_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfmsac_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmsac_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfmsac_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmsac_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfmsac_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmsac_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfmsac_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmsac_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfmsac_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmsac_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfmsac_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmsac_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfmsac_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmsac_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfmsac_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmsac_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfmsac_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmsac_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfmsac_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, rs1, vs2, vl); } vfloat16mf4_t test_vfmsac_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -606,364 +606,364 @@ vfloat64m8_t test_vfmsac_vf_f64m8_rm_tu(vfloat64m8_t vd, float64_t rs1, vfloat64 return __riscv_vfmsac_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmsac_vv_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmsac_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmsac_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmsac_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmsac_vv_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmsac_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmsac_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmsac_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmsac_vv_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmsac_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmsac_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmsac_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsac_vv_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmsac_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsac_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmsac_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsac_vv_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmsac_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsac_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmsac_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsac_vv_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmsac_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsac_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmsac_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmsac_vv_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmsac_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmsac_vf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmsac_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmsac_vv_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmsac_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmsac_vf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmsac_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsac_vv_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmsac_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsac_vf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmsac_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsac_vv_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmsac_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsac_vf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmsac_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsac_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmsac_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsac_vf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmsac_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsac_vv_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmsac_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsac_vf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmsac_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsac_vv_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmsac_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsac_vf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmsac_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsac_vv_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmsac_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsac_vf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmsac_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsac_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmsac_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsac_vf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsac_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmsac_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsac_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmsac_vv_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmsac_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmsac_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmsac_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmsac_vv_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmsac_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmsac_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmsac_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmsac_vv_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmsac_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmsac_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmsac_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsac_vv_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmsac_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsac_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmsac_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsac_vv_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmsac_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsac_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmsac_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsac_vv_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmsac_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsac_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmsac_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmsac_vv_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmsac_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmsac_vf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmsac_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmsac_vv_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmsac_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmsac_vf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmsac_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsac_vv_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmsac_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsac_vf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmsac_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsac_vv_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmsac_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsac_vf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmsac_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsac_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmsac_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsac_vf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmsac_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsac_vv_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmsac_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsac_vf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmsac_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsac_vv_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmsac_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsac_vf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmsac_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsac_vv_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmsac_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsac_vf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmsac_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsac_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmsac_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsac_vf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmsac_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmsac_vv_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmsac_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmsac_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmsac_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmsac_vv_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmsac_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmsac_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmsac_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmsac_vv_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmsac_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmsac_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmsac_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsac_vv_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmsac_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsac_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmsac_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsac_vv_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmsac_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsac_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmsac_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsac_vv_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmsac_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsac_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmsac_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmsac_vv_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmsac_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmsac_vf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmsac_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmsac_vv_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmsac_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmsac_vf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmsac_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsac_vv_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmsac_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsac_vf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmsac_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsac_vv_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmsac_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsac_vf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmsac_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsac_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmsac_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsac_vf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmsac_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsac_vv_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmsac_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsac_vf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmsac_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsac_vv_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmsac_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsac_vf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmsac_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsac_vv_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmsac_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsac_vf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmsac_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsac_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmsac_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsac_vf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsac_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmsac_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsac_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfmsac\.[ivxfswum.]+\s+} 240 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfmsub.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfmsub.c index ce25dbe3f..ccee14fee 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfmsub.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfmsub.c @@ -126,364 +126,364 @@ vfloat64m8_t test_vfmsub_vf_f64m8_tu(vfloat64m8_t vd, float64_t rs1, vfloat64m8_ return __riscv_vfmsub_tu(vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmsub_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfmsub_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmsub_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfmsub_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmsub_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfmsub_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmsub_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfmsub_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmsub_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfmsub_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmsub_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfmsub_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmsub_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfmsub_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmsub_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfmsub_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmsub_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfmsub_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmsub_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfmsub_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmsub_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfmsub_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmsub_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfmsub_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmsub_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfmsub_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmsub_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfmsub_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmsub_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfmsub_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmsub_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfmsub_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmsub_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfmsub_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmsub_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfmsub_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmsub_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfmsub_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmsub_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfmsub_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmsub_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfmsub_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmsub_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfmsub_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmsub_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfmsub_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmsub_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfmsub_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmsub_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfmsub_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmsub_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfmsub_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmsub_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfmsub_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmsub_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfmsub_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmsub_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfmsub_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmsub_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfmsub_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmsub_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfmsub_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmsub_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfmsub_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmsub_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfmsub_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmsub_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfmsub_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmsub_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfmsub_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmsub_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfmsub_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmsub_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfmsub_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmsub_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfmsub_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmsub_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfmsub_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmsub_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfmsub_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmsub_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfmsub_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmsub_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfmsub_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmsub_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfmsub_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmsub_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfmsub_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmsub_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfmsub_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmsub_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfmsub_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmsub_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfmsub_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmsub_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfmsub_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmsub_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfmsub_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmsub_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfmsub_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmsub_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfmsub_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmsub_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfmsub_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmsub_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfmsub_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmsub_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfmsub_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmsub_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfmsub_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmsub_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfmsub_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmsub_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfmsub_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmsub_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfmsub_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmsub_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfmsub_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmsub_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfmsub_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmsub_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfmsub_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmsub_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfmsub_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmsub_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfmsub_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmsub_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfmsub_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmsub_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfmsub_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmsub_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfmsub_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmsub_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfmsub_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmsub_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfmsub_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmsub_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfmsub_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmsub_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfmsub_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmsub_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfmsub_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmsub_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfmsub_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmsub_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfmsub_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmsub_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfmsub_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmsub_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfmsub_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmsub_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfmsub_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmsub_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfmsub_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmsub_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfmsub_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmsub_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfmsub_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmsub_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfmsub_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmsub_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfmsub_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmsub_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfmsub_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmsub_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfmsub_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmsub_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfmsub_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmsub_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfmsub_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmsub_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfmsub_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmsub_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfmsub_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmsub_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfmsub_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmsub_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfmsub_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmsub_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfmsub_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, rs1, vs2, vl); } vfloat16mf4_t test_vfmsub_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -606,364 +606,364 @@ vfloat64m8_t test_vfmsub_vf_f64m8_rm_tu(vfloat64m8_t vd, float64_t rs1, vfloat64 return __riscv_vfmsub_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmsub_vv_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmsub_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmsub_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmsub_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmsub_vv_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmsub_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmsub_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmsub_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmsub_vv_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmsub_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmsub_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmsub_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsub_vv_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmsub_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsub_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmsub_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsub_vv_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmsub_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsub_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmsub_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsub_vv_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmsub_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsub_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmsub_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmsub_vv_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmsub_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmsub_vf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmsub_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmsub_vv_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmsub_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmsub_vf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmsub_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsub_vv_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmsub_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsub_vf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmsub_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsub_vv_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmsub_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsub_vf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmsub_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsub_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmsub_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsub_vf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmsub_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsub_vv_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmsub_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsub_vf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmsub_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsub_vv_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmsub_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsub_vf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmsub_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsub_vv_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmsub_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsub_vf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmsub_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsub_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmsub_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsub_vf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsub_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmsub_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsub_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmsub_vv_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmsub_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmsub_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmsub_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmsub_vv_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmsub_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmsub_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmsub_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmsub_vv_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmsub_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmsub_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmsub_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsub_vv_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmsub_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsub_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmsub_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsub_vv_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmsub_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsub_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmsub_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsub_vv_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmsub_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsub_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmsub_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmsub_vv_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmsub_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmsub_vf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmsub_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmsub_vv_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmsub_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmsub_vf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmsub_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsub_vv_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmsub_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsub_vf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmsub_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsub_vv_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmsub_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsub_vf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmsub_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsub_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmsub_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsub_vf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmsub_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsub_vv_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmsub_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsub_vf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmsub_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsub_vv_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmsub_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsub_vf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmsub_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsub_vv_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmsub_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsub_vf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmsub_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsub_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmsub_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsub_vf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmsub_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmsub_vv_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmsub_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmsub_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmsub_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmsub_vv_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmsub_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmsub_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmsub_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmsub_vv_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmsub_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmsub_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmsub_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsub_vv_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmsub_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsub_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmsub_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsub_vv_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmsub_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsub_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmsub_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsub_vv_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmsub_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsub_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmsub_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmsub_vv_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmsub_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmsub_vf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmsub_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmsub_vv_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmsub_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmsub_vf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmsub_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsub_vv_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmsub_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsub_vf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmsub_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsub_vv_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmsub_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsub_vf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmsub_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsub_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmsub_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsub_vf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmsub_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsub_vv_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmsub_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsub_vf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmsub_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsub_vv_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmsub_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsub_vf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmsub_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsub_vv_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmsub_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsub_vf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmsub_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsub_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmsub_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsub_vf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfmsub_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmsub_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfmsub_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfmsub\.[ivxfswum.]+\s+} 240 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfmul.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfmul.c index 9d59fa73e..9adbe9342 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfmul.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfmul.c @@ -6,964 +6,964 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfmul_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmul_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfmul_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmul_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfmul_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmul_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfmul_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmul_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, rs1, vl); } -vfloat16m1_t test_vfmul_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmul_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfmul_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmul_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, rs1, vl); } -vfloat16m2_t test_vfmul_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmul_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, vs1, vl); } -vfloat16m2_t test_vfmul_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmul_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, rs1, vl); } -vfloat16m4_t test_vfmul_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmul_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, vs1, vl); } -vfloat16m4_t test_vfmul_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmul_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, rs1, vl); } -vfloat16m8_t test_vfmul_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmul_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, vs1, vl); } -vfloat16m8_t test_vfmul_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmul_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfmul_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmul_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfmul_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmul_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfmul_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmul_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfmul_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmul_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfmul_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmul_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vfmul_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmul_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfmul_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmul_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vfmul_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmul_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfmul_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmul_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vfmul_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmul_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfmul_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmul_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfmul_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmul_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfmul_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmul_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vfmul_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmul_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfmul_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmul_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vfmul_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmul_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfmul_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmul_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vfmul_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmul_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfmul_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmul_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfmul_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmul_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfmul_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmul_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfmul_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmul_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfmul_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmul_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfmul_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmul_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfmul_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmul_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfmul_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmul_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfmul_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmul_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfmul_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmul_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfmul_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmul_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfmul_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmul_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfmul_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmul_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfmul_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmul_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfmul_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmul_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfmul_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmul_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfmul_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmul_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfmul_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmul_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfmul_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmul_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfmul_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmul_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfmul_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmul_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfmul_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmul_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfmul_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmul_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfmul_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmul_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfmul_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmul_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfmul_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmul_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfmul_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmul_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfmul_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmul_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfmul_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmul_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfmul_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmul_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfmul_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmul_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfmul_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmul_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfmul_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmul_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfmul_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmul_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfmul_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmul_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfmul_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmul_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfmul_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmul_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfmul_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmul_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfmul_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmul_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfmul_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmul_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfmul_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmul_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfmul_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmul_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfmul_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmul_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfmul_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmul_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfmul_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmul_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfmul_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmul_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfmul_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmul_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfmul_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmul_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfmul_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmul_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfmul_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmul_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfmul_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmul_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfmul_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmul_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfmul_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmul_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfmul_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmul_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfmul_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmul_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfmul_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmul_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfmul_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmul_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfmul_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmul_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfmul_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmul_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfmul_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmul_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfmul_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmul_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfmul_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfmul_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfmul_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmul_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfmul_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfmul_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfmul_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmul_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfmul_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfmul_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfmul_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmul_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfmul_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfmul_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfmul_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmul_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfmul_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfmul_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfmul_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmul_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfmul_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfmul_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfmul_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmul_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfmul_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmul_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfmul_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmul_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfmul_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmul_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfmul_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmul_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfmul_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmul_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfmul_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmul_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfmul_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmul_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfmul_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmul_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfmul_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmul_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfmul_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmul_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfmul_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmul_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfmul_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmul_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfmul_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfmul_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfmul_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmul_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfmul_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfmul_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfmul_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmul_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfmul_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfmul_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfmul_vv_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmul_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmul_vf_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmul_vf_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmul_vv_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmul_vv_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmul_vf_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmul_vf_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmul_vv_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmul_vv_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmul_vf_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmul_vf_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmul_vv_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmul_vv_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmul_vf_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmul_vf_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmul_vv_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmul_vv_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmul_vf_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmul_vf_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmul_vv_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmul_vv_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmul_vf_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmul_vf_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmul_vv_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmul_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmul_vf_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmul_vf_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmul_vv_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmul_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmul_vf_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmul_vf_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmul_vv_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmul_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmul_vf_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmul_vf_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmul_vv_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmul_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmul_vf_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmul_vf_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmul_vv_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmul_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmul_vf_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmul_vf_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmul_vv_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmul_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmul_vf_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmul_vf_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmul_vv_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmul_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmul_vf_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmul_vf_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmul_vv_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmul_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmul_vf_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmul_vf_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmul_vv_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmul_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmul_vf_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmul_vf_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmul_vv_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmul_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmul_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmul_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmul_vv_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmul_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmul_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmul_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmul_vv_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmul_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmul_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmul_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmul_vv_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmul_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmul_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmul_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmul_vv_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmul_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmul_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmul_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmul_vv_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmul_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmul_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmul_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmul_vv_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmul_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmul_vf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmul_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmul_vv_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmul_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmul_vf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmul_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmul_vv_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmul_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmul_vf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmul_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmul_vv_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmul_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmul_vf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmul_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmul_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmul_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmul_vf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmul_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmul_vv_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmul_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmul_vf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmul_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmul_vv_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmul_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmul_vf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmul_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmul_vv_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmul_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmul_vf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmul_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmul_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmul_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmul_vf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmul_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmul_vv_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmul_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmul_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmul_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmul_vv_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmul_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmul_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmul_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmul_vv_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmul_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmul_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmul_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmul_vv_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmul_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmul_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmul_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmul_vv_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmul_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmul_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmul_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmul_vv_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmul_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmul_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmul_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmul_vv_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmul_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmul_vf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmul_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmul_vv_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmul_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmul_vf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmul_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmul_vv_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmul_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmul_vf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmul_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmul_vv_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmul_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmul_vf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmul_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmul_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmul_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmul_vf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmul_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmul_vv_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmul_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmul_vf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmul_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmul_vv_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmul_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmul_vf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmul_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmul_vv_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmul_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmul_vf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmul_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmul_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmul_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmul_vf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmul_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmul_vv_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmul_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmul_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmul_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmul_vv_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmul_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmul_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmul_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmul_vv_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmul_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmul_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfmul_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmul_vv_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmul_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmul_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfmul_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmul_vv_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmul_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmul_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfmul_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmul_vv_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmul_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmul_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfmul_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmul_vv_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmul_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmul_vf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmul_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmul_vv_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmul_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmul_vf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfmul_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmul_vv_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmul_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmul_vf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfmul_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmul_vv_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmul_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmul_vf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfmul_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmul_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmul_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmul_vf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfmul_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmul_vv_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmul_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmul_vf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfmul_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmul_vv_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmul_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmul_vf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfmul_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmul_vv_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmul_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmul_vf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfmul_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmul_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmul_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmul_vf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfmul_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfmul\.[ivxfswum.]+\s+} 240 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfmv.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfmv.c index 83411f158..1807c469f 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfmv.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfmv.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfmv_v_f_f16mf4_tu(vfloat16mf4_t maskedoff, float16_t src, size_t vl) { - return __riscv_vfmv_v_tu(maskedoff, src, vl); +vfloat16mf4_t test_vfmv_v_f_f16mf4_tu(vfloat16mf4_t vd, float16_t rs1, size_t vl) { + return __riscv_vfmv_v_tu(vd, rs1, vl); } -vfloat16mf2_t test_vfmv_v_f_f16mf2_tu(vfloat16mf2_t maskedoff, float16_t src, size_t vl) { - return __riscv_vfmv_v_tu(maskedoff, src, vl); +vfloat16mf2_t test_vfmv_v_f_f16mf2_tu(vfloat16mf2_t vd, float16_t rs1, size_t vl) { + return __riscv_vfmv_v_tu(vd, rs1, vl); } -vfloat16m1_t test_vfmv_v_f_f16m1_tu(vfloat16m1_t maskedoff, float16_t src, size_t vl) { - return __riscv_vfmv_v_tu(maskedoff, src, vl); +vfloat16m1_t test_vfmv_v_f_f16m1_tu(vfloat16m1_t vd, float16_t rs1, size_t vl) { + return __riscv_vfmv_v_tu(vd, rs1, vl); } -vfloat16m2_t test_vfmv_v_f_f16m2_tu(vfloat16m2_t maskedoff, float16_t src, size_t vl) { - return __riscv_vfmv_v_tu(maskedoff, src, vl); +vfloat16m2_t test_vfmv_v_f_f16m2_tu(vfloat16m2_t vd, float16_t rs1, size_t vl) { + return __riscv_vfmv_v_tu(vd, rs1, vl); } -vfloat16m4_t test_vfmv_v_f_f16m4_tu(vfloat16m4_t maskedoff, float16_t src, size_t vl) { - return __riscv_vfmv_v_tu(maskedoff, src, vl); +vfloat16m4_t test_vfmv_v_f_f16m4_tu(vfloat16m4_t vd, float16_t rs1, size_t vl) { + return __riscv_vfmv_v_tu(vd, rs1, vl); } -vfloat16m8_t test_vfmv_v_f_f16m8_tu(vfloat16m8_t maskedoff, float16_t src, size_t vl) { - return __riscv_vfmv_v_tu(maskedoff, src, vl); +vfloat16m8_t test_vfmv_v_f_f16m8_tu(vfloat16m8_t vd, float16_t rs1, size_t vl) { + return __riscv_vfmv_v_tu(vd, rs1, vl); } -vfloat32mf2_t test_vfmv_v_f_f32mf2_tu(vfloat32mf2_t maskedoff, float32_t src, size_t vl) { - return __riscv_vfmv_v_tu(maskedoff, src, vl); +vfloat32mf2_t test_vfmv_v_f_f32mf2_tu(vfloat32mf2_t vd, float32_t rs1, size_t vl) { + return __riscv_vfmv_v_tu(vd, rs1, vl); } -vfloat32m1_t test_vfmv_v_f_f32m1_tu(vfloat32m1_t maskedoff, float32_t src, size_t vl) { - return __riscv_vfmv_v_tu(maskedoff, src, vl); +vfloat32m1_t test_vfmv_v_f_f32m1_tu(vfloat32m1_t vd, float32_t rs1, size_t vl) { + return __riscv_vfmv_v_tu(vd, rs1, vl); } -vfloat32m2_t test_vfmv_v_f_f32m2_tu(vfloat32m2_t maskedoff, float32_t src, size_t vl) { - return __riscv_vfmv_v_tu(maskedoff, src, vl); +vfloat32m2_t test_vfmv_v_f_f32m2_tu(vfloat32m2_t vd, float32_t rs1, size_t vl) { + return __riscv_vfmv_v_tu(vd, rs1, vl); } -vfloat32m4_t test_vfmv_v_f_f32m4_tu(vfloat32m4_t maskedoff, float32_t src, size_t vl) { - return __riscv_vfmv_v_tu(maskedoff, src, vl); +vfloat32m4_t test_vfmv_v_f_f32m4_tu(vfloat32m4_t vd, float32_t rs1, size_t vl) { + return __riscv_vfmv_v_tu(vd, rs1, vl); } -vfloat32m8_t test_vfmv_v_f_f32m8_tu(vfloat32m8_t maskedoff, float32_t src, size_t vl) { - return __riscv_vfmv_v_tu(maskedoff, src, vl); +vfloat32m8_t test_vfmv_v_f_f32m8_tu(vfloat32m8_t vd, float32_t rs1, size_t vl) { + return __riscv_vfmv_v_tu(vd, rs1, vl); } -vfloat64m1_t test_vfmv_v_f_f64m1_tu(vfloat64m1_t maskedoff, float64_t src, size_t vl) { - return __riscv_vfmv_v_tu(maskedoff, src, vl); +vfloat64m1_t test_vfmv_v_f_f64m1_tu(vfloat64m1_t vd, float64_t rs1, size_t vl) { + return __riscv_vfmv_v_tu(vd, rs1, vl); } -vfloat64m2_t test_vfmv_v_f_f64m2_tu(vfloat64m2_t maskedoff, float64_t src, size_t vl) { - return __riscv_vfmv_v_tu(maskedoff, src, vl); +vfloat64m2_t test_vfmv_v_f_f64m2_tu(vfloat64m2_t vd, float64_t rs1, size_t vl) { + return __riscv_vfmv_v_tu(vd, rs1, vl); } -vfloat64m4_t test_vfmv_v_f_f64m4_tu(vfloat64m4_t maskedoff, float64_t src, size_t vl) { - return __riscv_vfmv_v_tu(maskedoff, src, vl); +vfloat64m4_t test_vfmv_v_f_f64m4_tu(vfloat64m4_t vd, float64_t rs1, size_t vl) { + return __riscv_vfmv_v_tu(vd, rs1, vl); } -vfloat64m8_t test_vfmv_v_f_f64m8_tu(vfloat64m8_t maskedoff, float64_t src, size_t vl) { - return __riscv_vfmv_v_tu(maskedoff, src, vl); +vfloat64m8_t test_vfmv_v_f_f64m8_tu(vfloat64m8_t vd, float64_t rs1, size_t vl) { + return __riscv_vfmv_v_tu(vd, rs1, vl); } -vfloat16mf4_t test_vfmv_s_f_f16mf4_tu(vfloat16mf4_t maskedoff, float16_t src, size_t vl) { - return __riscv_vfmv_s_tu(maskedoff, src, vl); +vfloat16mf4_t test_vfmv_s_f_f16mf4_tu(vfloat16mf4_t vd, float16_t rs1, size_t vl) { + return __riscv_vfmv_s_tu(vd, rs1, vl); } -vfloat16mf2_t test_vfmv_s_f_f16mf2_tu(vfloat16mf2_t maskedoff, float16_t src, size_t vl) { - return __riscv_vfmv_s_tu(maskedoff, src, vl); +vfloat16mf2_t test_vfmv_s_f_f16mf2_tu(vfloat16mf2_t vd, float16_t rs1, size_t vl) { + return __riscv_vfmv_s_tu(vd, rs1, vl); } -vfloat16m1_t test_vfmv_s_f_f16m1_tu(vfloat16m1_t maskedoff, float16_t src, size_t vl) { - return __riscv_vfmv_s_tu(maskedoff, src, vl); +vfloat16m1_t test_vfmv_s_f_f16m1_tu(vfloat16m1_t vd, float16_t rs1, size_t vl) { + return __riscv_vfmv_s_tu(vd, rs1, vl); } -vfloat16m2_t test_vfmv_s_f_f16m2_tu(vfloat16m2_t maskedoff, float16_t src, size_t vl) { - return __riscv_vfmv_s_tu(maskedoff, src, vl); +vfloat16m2_t test_vfmv_s_f_f16m2_tu(vfloat16m2_t vd, float16_t rs1, size_t vl) { + return __riscv_vfmv_s_tu(vd, rs1, vl); } -vfloat16m4_t test_vfmv_s_f_f16m4_tu(vfloat16m4_t maskedoff, float16_t src, size_t vl) { - return __riscv_vfmv_s_tu(maskedoff, src, vl); +vfloat16m4_t test_vfmv_s_f_f16m4_tu(vfloat16m4_t vd, float16_t rs1, size_t vl) { + return __riscv_vfmv_s_tu(vd, rs1, vl); } -vfloat16m8_t test_vfmv_s_f_f16m8_tu(vfloat16m8_t maskedoff, float16_t src, size_t vl) { - return __riscv_vfmv_s_tu(maskedoff, src, vl); +vfloat16m8_t test_vfmv_s_f_f16m8_tu(vfloat16m8_t vd, float16_t rs1, size_t vl) { + return __riscv_vfmv_s_tu(vd, rs1, vl); } -vfloat32mf2_t test_vfmv_s_f_f32mf2_tu(vfloat32mf2_t maskedoff, float32_t src, size_t vl) { - return __riscv_vfmv_s_tu(maskedoff, src, vl); +vfloat32mf2_t test_vfmv_s_f_f32mf2_tu(vfloat32mf2_t vd, float32_t rs1, size_t vl) { + return __riscv_vfmv_s_tu(vd, rs1, vl); } -vfloat32m1_t test_vfmv_s_f_f32m1_tu(vfloat32m1_t maskedoff, float32_t src, size_t vl) { - return __riscv_vfmv_s_tu(maskedoff, src, vl); +vfloat32m1_t test_vfmv_s_f_f32m1_tu(vfloat32m1_t vd, float32_t rs1, size_t vl) { + return __riscv_vfmv_s_tu(vd, rs1, vl); } -vfloat32m2_t test_vfmv_s_f_f32m2_tu(vfloat32m2_t maskedoff, float32_t src, size_t vl) { - return __riscv_vfmv_s_tu(maskedoff, src, vl); +vfloat32m2_t test_vfmv_s_f_f32m2_tu(vfloat32m2_t vd, float32_t rs1, size_t vl) { + return __riscv_vfmv_s_tu(vd, rs1, vl); } -vfloat32m4_t test_vfmv_s_f_f32m4_tu(vfloat32m4_t maskedoff, float32_t src, size_t vl) { - return __riscv_vfmv_s_tu(maskedoff, src, vl); +vfloat32m4_t test_vfmv_s_f_f32m4_tu(vfloat32m4_t vd, float32_t rs1, size_t vl) { + return __riscv_vfmv_s_tu(vd, rs1, vl); } -vfloat32m8_t test_vfmv_s_f_f32m8_tu(vfloat32m8_t maskedoff, float32_t src, size_t vl) { - return __riscv_vfmv_s_tu(maskedoff, src, vl); +vfloat32m8_t test_vfmv_s_f_f32m8_tu(vfloat32m8_t vd, float32_t rs1, size_t vl) { + return __riscv_vfmv_s_tu(vd, rs1, vl); } -vfloat64m1_t test_vfmv_s_f_f64m1_tu(vfloat64m1_t maskedoff, float64_t src, size_t vl) { - return __riscv_vfmv_s_tu(maskedoff, src, vl); +vfloat64m1_t test_vfmv_s_f_f64m1_tu(vfloat64m1_t vd, float64_t rs1, size_t vl) { + return __riscv_vfmv_s_tu(vd, rs1, vl); } -vfloat64m2_t test_vfmv_s_f_f64m2_tu(vfloat64m2_t maskedoff, float64_t src, size_t vl) { - return __riscv_vfmv_s_tu(maskedoff, src, vl); +vfloat64m2_t test_vfmv_s_f_f64m2_tu(vfloat64m2_t vd, float64_t rs1, size_t vl) { + return __riscv_vfmv_s_tu(vd, rs1, vl); } -vfloat64m4_t test_vfmv_s_f_f64m4_tu(vfloat64m4_t maskedoff, float64_t src, size_t vl) { - return __riscv_vfmv_s_tu(maskedoff, src, vl); +vfloat64m4_t test_vfmv_s_f_f64m4_tu(vfloat64m4_t vd, float64_t rs1, size_t vl) { + return __riscv_vfmv_s_tu(vd, rs1, vl); } -vfloat64m8_t test_vfmv_s_f_f64m8_tu(vfloat64m8_t maskedoff, float64_t src, size_t vl) { - return __riscv_vfmv_s_tu(maskedoff, src, vl); +vfloat64m8_t test_vfmv_s_f_f64m8_tu(vfloat64m8_t vd, float64_t rs1, size_t vl) { + return __riscv_vfmv_s_tu(vd, rs1, vl); } /* { dg-final { scan-assembler-times {vfmv\.[ivxfswum.]+\s+[,\sa-x0-9()]+} 30 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfncvt.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfncvt.c index e47d0ab04..25050dffd 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfncvt.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfncvt.c @@ -6,1828 +6,1828 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vfncvt_x_f_w_i8mf8_tu(vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_x_tu(maskedoff, src, vl); +vint8mf8_t test_vfncvt_x_f_w_i8mf8_tu(vint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, vl); } -vint8mf4_t test_vfncvt_x_f_w_i8mf4_tu(vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_x_tu(maskedoff, src, vl); +vint8mf4_t test_vfncvt_x_f_w_i8mf4_tu(vint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, vl); } -vint8mf2_t test_vfncvt_x_f_w_i8mf2_tu(vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_x_tu(maskedoff, src, vl); +vint8mf2_t test_vfncvt_x_f_w_i8mf2_tu(vint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, vl); } -vint8m1_t test_vfncvt_x_f_w_i8m1_tu(vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_x_tu(maskedoff, src, vl); +vint8m1_t test_vfncvt_x_f_w_i8m1_tu(vint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, vl); } -vint8m2_t test_vfncvt_x_f_w_i8m2_tu(vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_x_tu(maskedoff, src, vl); +vint8m2_t test_vfncvt_x_f_w_i8m2_tu(vint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, vl); } -vint8m4_t test_vfncvt_x_f_w_i8m4_tu(vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_x_tu(maskedoff, src, vl); +vint8m4_t test_vfncvt_x_f_w_i8m4_tu(vint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, vl); } -vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_tu(vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_xu_tu(maskedoff, src, vl); +vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_tu(vuint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, vl); } -vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_tu(vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_xu_tu(maskedoff, src, vl); +vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_tu(vuint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, vl); } -vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_tu(vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_xu_tu(maskedoff, src, vl); +vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_tu(vuint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, vl); } -vuint8m1_t test_vfncvt_xu_f_w_u8m1_tu(vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_xu_tu(maskedoff, src, vl); +vuint8m1_t test_vfncvt_xu_f_w_u8m1_tu(vuint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, vl); } -vuint8m2_t test_vfncvt_xu_f_w_u8m2_tu(vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_xu_tu(maskedoff, src, vl); +vuint8m2_t test_vfncvt_xu_f_w_u8m2_tu(vuint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, vl); } -vuint8m4_t test_vfncvt_xu_f_w_u8m4_tu(vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_xu_tu(maskedoff, src, vl); +vuint8m4_t test_vfncvt_xu_f_w_u8m4_tu(vuint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, vl); } -vint16mf4_t test_vfncvt_x_f_w_i16mf4_tu(vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_x_tu(maskedoff, src, vl); +vint16mf4_t test_vfncvt_x_f_w_i16mf4_tu(vint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, vl); } -vint16mf2_t test_vfncvt_x_f_w_i16mf2_tu(vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_x_tu(maskedoff, src, vl); +vint16mf2_t test_vfncvt_x_f_w_i16mf2_tu(vint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, vl); } -vint16m1_t test_vfncvt_x_f_w_i16m1_tu(vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_x_tu(maskedoff, src, vl); +vint16m1_t test_vfncvt_x_f_w_i16m1_tu(vint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, vl); } -vint16m2_t test_vfncvt_x_f_w_i16m2_tu(vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_x_tu(maskedoff, src, vl); +vint16m2_t test_vfncvt_x_f_w_i16m2_tu(vint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, vl); } -vint16m4_t test_vfncvt_x_f_w_i16m4_tu(vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_x_tu(maskedoff, src, vl); +vint16m4_t test_vfncvt_x_f_w_i16m4_tu(vint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, vl); } -vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_tu(vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_xu_tu(maskedoff, src, vl); +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_tu(vuint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, vl); } -vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_tu(vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_xu_tu(maskedoff, src, vl); +vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_tu(vuint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, vl); } -vuint16m1_t test_vfncvt_xu_f_w_u16m1_tu(vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_xu_tu(maskedoff, src, vl); +vuint16m1_t test_vfncvt_xu_f_w_u16m1_tu(vuint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, vl); } -vuint16m2_t test_vfncvt_xu_f_w_u16m2_tu(vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_xu_tu(maskedoff, src, vl); +vuint16m2_t test_vfncvt_xu_f_w_u16m2_tu(vuint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, vl); } -vuint16m4_t test_vfncvt_xu_f_w_u16m4_tu(vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_xu_tu(maskedoff, src, vl); +vuint16m4_t test_vfncvt_xu_f_w_u16m4_tu(vuint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, vl); } -vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_tu(vfloat16mf4_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, vl); +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_tu(vfloat16mf4_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_tu(vfloat16mf2_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_tu(vfloat16mf2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, vl); } -vfloat16m1_t test_vfncvt_f_x_w_f16m1_tu(vfloat16m1_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, vl); +vfloat16m1_t test_vfncvt_f_x_w_f16m1_tu(vfloat16m1_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, vl); } -vfloat16m2_t test_vfncvt_f_x_w_f16m2_tu(vfloat16m2_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, vl); +vfloat16m2_t test_vfncvt_f_x_w_f16m2_tu(vfloat16m2_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, vl); } -vfloat16m4_t test_vfncvt_f_x_w_f16m4_tu(vfloat16m4_t maskedoff, vint32m8_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, vl); +vfloat16m4_t test_vfncvt_f_x_w_f16m4_tu(vfloat16m4_t vd, vint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, vl); } -vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_tu(vfloat16mf4_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, vl); +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_tu(vfloat16mf4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_tu(vfloat16mf2_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_tu(vfloat16mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, vl); } -vfloat16m1_t test_vfncvt_f_xu_w_f16m1_tu(vfloat16m1_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, vl); +vfloat16m1_t test_vfncvt_f_xu_w_f16m1_tu(vfloat16m1_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, vl); } -vfloat16m2_t test_vfncvt_f_xu_w_f16m2_tu(vfloat16m2_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, vl); +vfloat16m2_t test_vfncvt_f_xu_w_f16m2_tu(vfloat16m2_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, vl); } -vfloat16m4_t test_vfncvt_f_xu_w_f16m4_tu(vfloat16m4_t maskedoff, vuint32m8_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, vl); +vfloat16m4_t test_vfncvt_f_xu_w_f16m4_tu(vfloat16m4_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, vl); } -vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, vl); +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_tu(vfloat16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_tu(vfloat16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, vl); } -vfloat16m1_t test_vfncvt_f_f_w_f16m1_tu(vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, vl); +vfloat16m1_t test_vfncvt_f_f_w_f16m1_tu(vfloat16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, vl); } -vfloat16m2_t test_vfncvt_f_f_w_f16m2_tu(vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, vl); +vfloat16m2_t test_vfncvt_f_f_w_f16m2_tu(vfloat16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, vl); } -vfloat16m4_t test_vfncvt_f_f_w_f16m4_tu(vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, vl); +vfloat16m4_t test_vfncvt_f_f_w_f16m4_tu(vfloat16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, vl); } -vint32mf2_t test_vfncvt_x_f_w_i32mf2_tu(vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_x_tu(maskedoff, src, vl); +vint32mf2_t test_vfncvt_x_f_w_i32mf2_tu(vint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, vl); } -vint32m1_t test_vfncvt_x_f_w_i32m1_tu(vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_x_tu(maskedoff, src, vl); +vint32m1_t test_vfncvt_x_f_w_i32m1_tu(vint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, vl); } -vint32m2_t test_vfncvt_x_f_w_i32m2_tu(vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_x_tu(maskedoff, src, vl); +vint32m2_t test_vfncvt_x_f_w_i32m2_tu(vint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, vl); } -vint32m4_t test_vfncvt_x_f_w_i32m4_tu(vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_x_tu(maskedoff, src, vl); +vint32m4_t test_vfncvt_x_f_w_i32m4_tu(vint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, vl); } -vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_tu(vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_xu_tu(maskedoff, src, vl); +vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_tu(vuint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, vl); } -vuint32m1_t test_vfncvt_xu_f_w_u32m1_tu(vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_xu_tu(maskedoff, src, vl); +vuint32m1_t test_vfncvt_xu_f_w_u32m1_tu(vuint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, vl); } -vuint32m2_t test_vfncvt_xu_f_w_u32m2_tu(vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_xu_tu(maskedoff, src, vl); +vuint32m2_t test_vfncvt_xu_f_w_u32m2_tu(vuint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, vl); } -vuint32m4_t test_vfncvt_xu_f_w_u32m4_tu(vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_xu_tu(maskedoff, src, vl); +vuint32m4_t test_vfncvt_xu_f_w_u32m4_tu(vuint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_tu(vfloat32mf2_t maskedoff, vint64m1_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_tu(vfloat32mf2_t vd, vint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, vl); } -vfloat32m1_t test_vfncvt_f_x_w_f32m1_tu(vfloat32m1_t maskedoff, vint64m2_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, vl); +vfloat32m1_t test_vfncvt_f_x_w_f32m1_tu(vfloat32m1_t vd, vint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, vl); } -vfloat32m2_t test_vfncvt_f_x_w_f32m2_tu(vfloat32m2_t maskedoff, vint64m4_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, vl); +vfloat32m2_t test_vfncvt_f_x_w_f32m2_tu(vfloat32m2_t vd, vint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, vl); } -vfloat32m4_t test_vfncvt_f_x_w_f32m4_tu(vfloat32m4_t maskedoff, vint64m8_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, vl); +vfloat32m4_t test_vfncvt_f_x_w_f32m4_tu(vfloat32m4_t vd, vint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_tu(vfloat32mf2_t maskedoff, vuint64m1_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_tu(vfloat32mf2_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, vl); } -vfloat32m1_t test_vfncvt_f_xu_w_f32m1_tu(vfloat32m1_t maskedoff, vuint64m2_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, vl); +vfloat32m1_t test_vfncvt_f_xu_w_f32m1_tu(vfloat32m1_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, vl); } -vfloat32m2_t test_vfncvt_f_xu_w_f32m2_tu(vfloat32m2_t maskedoff, vuint64m4_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, vl); +vfloat32m2_t test_vfncvt_f_xu_w_f32m2_tu(vfloat32m2_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, vl); } -vfloat32m4_t test_vfncvt_f_xu_w_f32m4_tu(vfloat32m4_t maskedoff, vuint64m8_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, vl); +vfloat32m4_t test_vfncvt_f_xu_w_f32m4_tu(vfloat32m4_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_tu(vfloat32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, vl); } -vfloat32m1_t test_vfncvt_f_f_w_f32m1_tu(vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, vl); +vfloat32m1_t test_vfncvt_f_f_w_f32m1_tu(vfloat32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, vl); } -vfloat32m2_t test_vfncvt_f_f_w_f32m2_tu(vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, vl); +vfloat32m2_t test_vfncvt_f_f_w_f32m2_tu(vfloat32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, vl); } -vfloat32m4_t test_vfncvt_f_f_w_f32m4_tu(vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, vl); +vfloat32m4_t test_vfncvt_f_f_w_f32m4_tu(vfloat32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, vl); } -vint8mf8_t test_vfncvt_x_f_w_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_x_tum(mask, maskedoff, src, vl); +vint8mf8_t test_vfncvt_x_f_w_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, vl); } -vint8mf4_t test_vfncvt_x_f_w_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_x_tum(mask, maskedoff, src, vl); +vint8mf4_t test_vfncvt_x_f_w_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, vl); } -vint8mf2_t test_vfncvt_x_f_w_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_x_tum(mask, maskedoff, src, vl); +vint8mf2_t test_vfncvt_x_f_w_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, vl); } -vint8m1_t test_vfncvt_x_f_w_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_x_tum(mask, maskedoff, src, vl); +vint8m1_t test_vfncvt_x_f_w_i8m1_tum(vbool8_t vm, vint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, vl); } -vint8m2_t test_vfncvt_x_f_w_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_x_tum(mask, maskedoff, src, vl); +vint8m2_t test_vfncvt_x_f_w_i8m2_tum(vbool4_t vm, vint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, vl); } -vint8m4_t test_vfncvt_x_f_w_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_x_tum(mask, maskedoff, src, vl); +vint8m4_t test_vfncvt_x_f_w_i8m4_tum(vbool2_t vm, vint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, vl); } -vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_xu_tum(mask, maskedoff, src, vl); +vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl); } -vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_xu_tum(mask, maskedoff, src, vl); +vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl); } -vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_xu_tum(mask, maskedoff, src, vl); +vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl); } -vuint8m1_t test_vfncvt_xu_f_w_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_xu_tum(mask, maskedoff, src, vl); +vuint8m1_t test_vfncvt_xu_f_w_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl); } -vuint8m2_t test_vfncvt_xu_f_w_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_xu_tum(mask, maskedoff, src, vl); +vuint8m2_t test_vfncvt_xu_f_w_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl); } -vuint8m4_t test_vfncvt_xu_f_w_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_xu_tum(mask, maskedoff, src, vl); +vuint8m4_t test_vfncvt_xu_f_w_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl); } -vint16mf4_t test_vfncvt_x_f_w_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_x_tum(mask, maskedoff, src, vl); +vint16mf4_t test_vfncvt_x_f_w_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, vl); } -vint16mf2_t test_vfncvt_x_f_w_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_x_tum(mask, maskedoff, src, vl); +vint16mf2_t test_vfncvt_x_f_w_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, vl); } -vint16m1_t test_vfncvt_x_f_w_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_x_tum(mask, maskedoff, src, vl); +vint16m1_t test_vfncvt_x_f_w_i16m1_tum(vbool16_t vm, vint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, vl); } -vint16m2_t test_vfncvt_x_f_w_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_x_tum(mask, maskedoff, src, vl); +vint16m2_t test_vfncvt_x_f_w_i16m2_tum(vbool8_t vm, vint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, vl); } -vint16m4_t test_vfncvt_x_f_w_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_x_tum(mask, maskedoff, src, vl); +vint16m4_t test_vfncvt_x_f_w_i16m4_tum(vbool4_t vm, vint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, vl); } -vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_xu_tum(mask, maskedoff, src, vl); +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl); } -vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_xu_tum(mask, maskedoff, src, vl); +vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl); } -vuint16m1_t test_vfncvt_xu_f_w_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_xu_tum(mask, maskedoff, src, vl); +vuint16m1_t test_vfncvt_xu_f_w_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl); } -vuint16m2_t test_vfncvt_xu_f_w_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_xu_tum(mask, maskedoff, src, vl); +vuint16m2_t test_vfncvt_xu_f_w_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl); } -vuint16m4_t test_vfncvt_xu_f_w_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_xu_tum(mask, maskedoff, src, vl); +vuint16m4_t test_vfncvt_xu_f_w_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, vl); } -vfloat16m1_t test_vfncvt_f_x_w_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl); +vfloat16m1_t test_vfncvt_f_x_w_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, vl); } -vfloat16m2_t test_vfncvt_f_x_w_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl); +vfloat16m2_t test_vfncvt_f_x_w_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, vl); } -vfloat16m4_t test_vfncvt_f_x_w_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vint32m8_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl); +vfloat16m4_t test_vfncvt_f_x_w_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, vl); } -vfloat16m1_t test_vfncvt_f_xu_w_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl); +vfloat16m1_t test_vfncvt_f_xu_w_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, vl); } -vfloat16m2_t test_vfncvt_f_xu_w_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl); +vfloat16m2_t test_vfncvt_f_xu_w_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, vl); } -vfloat16m4_t test_vfncvt_f_xu_w_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vuint32m8_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl); +vfloat16m4_t test_vfncvt_f_xu_w_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, vl); } -vfloat16m1_t test_vfncvt_f_f_w_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl); +vfloat16m1_t test_vfncvt_f_f_w_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, vl); } -vfloat16m2_t test_vfncvt_f_f_w_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl); +vfloat16m2_t test_vfncvt_f_f_w_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, vl); } -vfloat16m4_t test_vfncvt_f_f_w_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl); +vfloat16m4_t test_vfncvt_f_f_w_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, vl); } -vint32mf2_t test_vfncvt_x_f_w_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_x_tum(mask, maskedoff, src, vl); +vint32mf2_t test_vfncvt_x_f_w_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, vl); } -vint32m1_t test_vfncvt_x_f_w_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_x_tum(mask, maskedoff, src, vl); +vint32m1_t test_vfncvt_x_f_w_i32m1_tum(vbool32_t vm, vint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, vl); } -vint32m2_t test_vfncvt_x_f_w_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_x_tum(mask, maskedoff, src, vl); +vint32m2_t test_vfncvt_x_f_w_i32m2_tum(vbool16_t vm, vint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, vl); } -vint32m4_t test_vfncvt_x_f_w_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_x_tum(mask, maskedoff, src, vl); +vint32m4_t test_vfncvt_x_f_w_i32m4_tum(vbool8_t vm, vint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_xu_tum(mask, maskedoff, src, vl); +vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl); } -vuint32m1_t test_vfncvt_xu_f_w_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_xu_tum(mask, maskedoff, src, vl); +vuint32m1_t test_vfncvt_xu_f_w_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl); } -vuint32m2_t test_vfncvt_xu_f_w_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_xu_tum(mask, maskedoff, src, vl); +vuint32m2_t test_vfncvt_xu_f_w_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl); } -vuint32m4_t test_vfncvt_xu_f_w_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_xu_tum(mask, maskedoff, src, vl); +vuint32m4_t test_vfncvt_xu_f_w_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vint64m1_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, vl); } -vfloat32m1_t test_vfncvt_f_x_w_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vint64m2_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl); +vfloat32m1_t test_vfncvt_f_x_w_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, vl); } -vfloat32m2_t test_vfncvt_f_x_w_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vint64m4_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl); +vfloat32m2_t test_vfncvt_f_x_w_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, vl); } -vfloat32m4_t test_vfncvt_f_x_w_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vint64m8_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl); +vfloat32m4_t test_vfncvt_f_x_w_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vuint64m1_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, vl); } -vfloat32m1_t test_vfncvt_f_xu_w_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vuint64m2_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl); +vfloat32m1_t test_vfncvt_f_xu_w_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, vl); } -vfloat32m2_t test_vfncvt_f_xu_w_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vuint64m4_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl); +vfloat32m2_t test_vfncvt_f_xu_w_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, vl); } -vfloat32m4_t test_vfncvt_f_xu_w_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vuint64m8_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl); +vfloat32m4_t test_vfncvt_f_xu_w_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, vl); } -vfloat32m1_t test_vfncvt_f_f_w_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl); +vfloat32m1_t test_vfncvt_f_f_w_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, vl); } -vfloat32m2_t test_vfncvt_f_f_w_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl); +vfloat32m2_t test_vfncvt_f_f_w_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, vl); } -vfloat32m4_t test_vfncvt_f_f_w_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl); +vfloat32m4_t test_vfncvt_f_f_w_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, vl); } -vint8mf8_t test_vfncvt_x_f_w_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_x_tumu(mask, maskedoff, src, vl); +vint8mf8_t test_vfncvt_x_f_w_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl); } -vint8mf4_t test_vfncvt_x_f_w_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_x_tumu(mask, maskedoff, src, vl); +vint8mf4_t test_vfncvt_x_f_w_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl); } -vint8mf2_t test_vfncvt_x_f_w_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_x_tumu(mask, maskedoff, src, vl); +vint8mf2_t test_vfncvt_x_f_w_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl); } -vint8m1_t test_vfncvt_x_f_w_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_x_tumu(mask, maskedoff, src, vl); +vint8m1_t test_vfncvt_x_f_w_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl); } -vint8m2_t test_vfncvt_x_f_w_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_x_tumu(mask, maskedoff, src, vl); +vint8m2_t test_vfncvt_x_f_w_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl); } -vint8m4_t test_vfncvt_x_f_w_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_x_tumu(mask, maskedoff, src, vl); +vint8m4_t test_vfncvt_x_f_w_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl); } -vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, vl); +vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl); } -vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, vl); +vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl); } -vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, vl); +vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl); } -vuint8m1_t test_vfncvt_xu_f_w_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, vl); +vuint8m1_t test_vfncvt_xu_f_w_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl); } -vuint8m2_t test_vfncvt_xu_f_w_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, vl); +vuint8m2_t test_vfncvt_xu_f_w_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl); } -vuint8m4_t test_vfncvt_xu_f_w_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, vl); +vuint8m4_t test_vfncvt_xu_f_w_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl); } -vint16mf4_t test_vfncvt_x_f_w_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_x_tumu(mask, maskedoff, src, vl); +vint16mf4_t test_vfncvt_x_f_w_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl); } -vint16mf2_t test_vfncvt_x_f_w_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_x_tumu(mask, maskedoff, src, vl); +vint16mf2_t test_vfncvt_x_f_w_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl); } -vint16m1_t test_vfncvt_x_f_w_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_x_tumu(mask, maskedoff, src, vl); +vint16m1_t test_vfncvt_x_f_w_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl); } -vint16m2_t test_vfncvt_x_f_w_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_x_tumu(mask, maskedoff, src, vl); +vint16m2_t test_vfncvt_x_f_w_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl); } -vint16m4_t test_vfncvt_x_f_w_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_x_tumu(mask, maskedoff, src, vl); +vint16m4_t test_vfncvt_x_f_w_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl); } -vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, vl); +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl); } -vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, vl); +vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl); } -vuint16m1_t test_vfncvt_xu_f_w_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, vl); +vuint16m1_t test_vfncvt_xu_f_w_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl); } -vuint16m2_t test_vfncvt_xu_f_w_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, vl); +vuint16m2_t test_vfncvt_xu_f_w_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl); } -vuint16m4_t test_vfncvt_xu_f_w_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, vl); +vuint16m4_t test_vfncvt_xu_f_w_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfncvt_f_x_w_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl); +vfloat16m1_t test_vfncvt_f_x_w_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfncvt_f_x_w_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl); +vfloat16m2_t test_vfncvt_f_x_w_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfncvt_f_x_w_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vint32m8_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl); +vfloat16m4_t test_vfncvt_f_x_w_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfncvt_f_xu_w_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl); +vfloat16m1_t test_vfncvt_f_xu_w_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfncvt_f_xu_w_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl); +vfloat16m2_t test_vfncvt_f_xu_w_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfncvt_f_xu_w_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vuint32m8_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl); +vfloat16m4_t test_vfncvt_f_xu_w_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfncvt_f_f_w_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl); +vfloat16m1_t test_vfncvt_f_f_w_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfncvt_f_f_w_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl); +vfloat16m2_t test_vfncvt_f_f_w_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfncvt_f_f_w_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl); +vfloat16m4_t test_vfncvt_f_f_w_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, vl); } -vint32mf2_t test_vfncvt_x_f_w_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_x_tumu(mask, maskedoff, src, vl); +vint32mf2_t test_vfncvt_x_f_w_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl); } -vint32m1_t test_vfncvt_x_f_w_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_x_tumu(mask, maskedoff, src, vl); +vint32m1_t test_vfncvt_x_f_w_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl); } -vint32m2_t test_vfncvt_x_f_w_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_x_tumu(mask, maskedoff, src, vl); +vint32m2_t test_vfncvt_x_f_w_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl); } -vint32m4_t test_vfncvt_x_f_w_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_x_tumu(mask, maskedoff, src, vl); +vint32m4_t test_vfncvt_x_f_w_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, vl); +vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_vfncvt_xu_f_w_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, vl); +vuint32m1_t test_vfncvt_xu_f_w_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_vfncvt_xu_f_w_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, vl); +vuint32m2_t test_vfncvt_xu_f_w_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_vfncvt_xu_f_w_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, vl); +vuint32m4_t test_vfncvt_xu_f_w_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vint64m1_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfncvt_f_x_w_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vint64m2_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl); +vfloat32m1_t test_vfncvt_f_x_w_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfncvt_f_x_w_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vint64m4_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl); +vfloat32m2_t test_vfncvt_f_x_w_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfncvt_f_x_w_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vint64m8_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl); +vfloat32m4_t test_vfncvt_f_x_w_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vuint64m1_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfncvt_f_xu_w_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vuint64m2_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl); +vfloat32m1_t test_vfncvt_f_xu_w_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfncvt_f_xu_w_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vuint64m4_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl); +vfloat32m2_t test_vfncvt_f_xu_w_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfncvt_f_xu_w_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vuint64m8_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl); +vfloat32m4_t test_vfncvt_f_xu_w_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfncvt_f_f_w_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl); +vfloat32m1_t test_vfncvt_f_f_w_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfncvt_f_f_w_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl); +vfloat32m2_t test_vfncvt_f_f_w_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfncvt_f_f_w_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl); +vfloat32m4_t test_vfncvt_f_f_w_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, vl); } -vint8mf8_t test_vfncvt_x_f_w_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_x_mu(mask, maskedoff, src, vl); +vint8mf8_t test_vfncvt_x_f_w_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, vl); } -vint8mf4_t test_vfncvt_x_f_w_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_x_mu(mask, maskedoff, src, vl); +vint8mf4_t test_vfncvt_x_f_w_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, vl); } -vint8mf2_t test_vfncvt_x_f_w_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_x_mu(mask, maskedoff, src, vl); +vint8mf2_t test_vfncvt_x_f_w_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, vl); } -vint8m1_t test_vfncvt_x_f_w_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_x_mu(mask, maskedoff, src, vl); +vint8m1_t test_vfncvt_x_f_w_i8m1_mu(vbool8_t vm, vint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, vl); } -vint8m2_t test_vfncvt_x_f_w_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_x_mu(mask, maskedoff, src, vl); +vint8m2_t test_vfncvt_x_f_w_i8m2_mu(vbool4_t vm, vint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, vl); } -vint8m4_t test_vfncvt_x_f_w_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_x_mu(mask, maskedoff, src, vl); +vint8m4_t test_vfncvt_x_f_w_i8m4_mu(vbool2_t vm, vint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, vl); } -vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_xu_mu(mask, maskedoff, src, vl); +vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl); } -vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_xu_mu(mask, maskedoff, src, vl); +vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl); } -vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_xu_mu(mask, maskedoff, src, vl); +vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl); } -vuint8m1_t test_vfncvt_xu_f_w_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_xu_mu(mask, maskedoff, src, vl); +vuint8m1_t test_vfncvt_xu_f_w_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl); } -vuint8m2_t test_vfncvt_xu_f_w_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_xu_mu(mask, maskedoff, src, vl); +vuint8m2_t test_vfncvt_xu_f_w_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl); } -vuint8m4_t test_vfncvt_xu_f_w_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_xu_mu(mask, maskedoff, src, vl); +vuint8m4_t test_vfncvt_xu_f_w_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl); } -vint16mf4_t test_vfncvt_x_f_w_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_x_mu(mask, maskedoff, src, vl); +vint16mf4_t test_vfncvt_x_f_w_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, vl); } -vint16mf2_t test_vfncvt_x_f_w_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_x_mu(mask, maskedoff, src, vl); +vint16mf2_t test_vfncvt_x_f_w_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, vl); } -vint16m1_t test_vfncvt_x_f_w_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_x_mu(mask, maskedoff, src, vl); +vint16m1_t test_vfncvt_x_f_w_i16m1_mu(vbool16_t vm, vint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, vl); } -vint16m2_t test_vfncvt_x_f_w_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_x_mu(mask, maskedoff, src, vl); +vint16m2_t test_vfncvt_x_f_w_i16m2_mu(vbool8_t vm, vint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, vl); } -vint16m4_t test_vfncvt_x_f_w_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_x_mu(mask, maskedoff, src, vl); +vint16m4_t test_vfncvt_x_f_w_i16m4_mu(vbool4_t vm, vint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, vl); } -vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_xu_mu(mask, maskedoff, src, vl); +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl); } -vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_xu_mu(mask, maskedoff, src, vl); +vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl); } -vuint16m1_t test_vfncvt_xu_f_w_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_xu_mu(mask, maskedoff, src, vl); +vuint16m1_t test_vfncvt_xu_f_w_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl); } -vuint16m2_t test_vfncvt_xu_f_w_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_xu_mu(mask, maskedoff, src, vl); +vuint16m2_t test_vfncvt_xu_f_w_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl); } -vuint16m4_t test_vfncvt_xu_f_w_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_xu_mu(mask, maskedoff, src, vl); +vuint16m4_t test_vfncvt_xu_f_w_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfncvt_f_x_w_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl); +vfloat16m1_t test_vfncvt_f_x_w_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfncvt_f_x_w_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl); +vfloat16m2_t test_vfncvt_f_x_w_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfncvt_f_x_w_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vint32m8_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl); +vfloat16m4_t test_vfncvt_f_x_w_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfncvt_f_xu_w_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl); +vfloat16m1_t test_vfncvt_f_xu_w_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfncvt_f_xu_w_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl); +vfloat16m2_t test_vfncvt_f_xu_w_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfncvt_f_xu_w_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vuint32m8_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl); +vfloat16m4_t test_vfncvt_f_xu_w_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfncvt_f_f_w_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl); +vfloat16m1_t test_vfncvt_f_f_w_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfncvt_f_f_w_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl); +vfloat16m2_t test_vfncvt_f_f_w_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfncvt_f_f_w_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl); +vfloat16m4_t test_vfncvt_f_f_w_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, vl); } -vint32mf2_t test_vfncvt_x_f_w_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_x_mu(mask, maskedoff, src, vl); +vint32mf2_t test_vfncvt_x_f_w_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, vl); } -vint32m1_t test_vfncvt_x_f_w_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_x_mu(mask, maskedoff, src, vl); +vint32m1_t test_vfncvt_x_f_w_i32m1_mu(vbool32_t vm, vint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, vl); } -vint32m2_t test_vfncvt_x_f_w_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_x_mu(mask, maskedoff, src, vl); +vint32m2_t test_vfncvt_x_f_w_i32m2_mu(vbool16_t vm, vint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, vl); } -vint32m4_t test_vfncvt_x_f_w_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_x_mu(mask, maskedoff, src, vl); +vint32m4_t test_vfncvt_x_f_w_i32m4_mu(vbool8_t vm, vint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, vl); } -vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_xu_mu(mask, maskedoff, src, vl); +vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl); } -vuint32m1_t test_vfncvt_xu_f_w_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_xu_mu(mask, maskedoff, src, vl); +vuint32m1_t test_vfncvt_xu_f_w_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl); } -vuint32m2_t test_vfncvt_xu_f_w_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_xu_mu(mask, maskedoff, src, vl); +vuint32m2_t test_vfncvt_xu_f_w_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl); } -vuint32m4_t test_vfncvt_xu_f_w_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_xu_mu(mask, maskedoff, src, vl); +vuint32m4_t test_vfncvt_xu_f_w_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vint64m1_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfncvt_f_x_w_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vint64m2_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl); +vfloat32m1_t test_vfncvt_f_x_w_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfncvt_f_x_w_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vint64m4_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl); +vfloat32m2_t test_vfncvt_f_x_w_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfncvt_f_x_w_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vint64m8_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl); +vfloat32m4_t test_vfncvt_f_x_w_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vuint64m1_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfncvt_f_xu_w_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vuint64m2_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl); +vfloat32m1_t test_vfncvt_f_xu_w_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfncvt_f_xu_w_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vuint64m4_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl); +vfloat32m2_t test_vfncvt_f_xu_w_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfncvt_f_xu_w_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vuint64m8_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl); +vfloat32m4_t test_vfncvt_f_xu_w_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfncvt_f_f_w_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl); +vfloat32m1_t test_vfncvt_f_f_w_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfncvt_f_f_w_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl); +vfloat32m2_t test_vfncvt_f_f_w_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfncvt_f_f_w_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl); +vfloat32m4_t test_vfncvt_f_f_w_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, vl); } -vint8mf8_t test_vfncvt_x_f_w_i8mf8_rm_tu(vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_x_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint8mf8_t test_vfncvt_x_f_w_i8mf8_rm_tu(vint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint8mf4_t test_vfncvt_x_f_w_i8mf4_rm_tu(vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_x_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint8mf4_t test_vfncvt_x_f_w_i8mf4_rm_tu(vint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint8mf2_t test_vfncvt_x_f_w_i8mf2_rm_tu(vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_x_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint8mf2_t test_vfncvt_x_f_w_i8mf2_rm_tu(vint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint8m1_t test_vfncvt_x_f_w_i8m1_rm_tu(vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_x_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint8m1_t test_vfncvt_x_f_w_i8m1_rm_tu(vint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint8m2_t test_vfncvt_x_f_w_i8m2_rm_tu(vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_x_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint8m2_t test_vfncvt_x_f_w_i8m2_rm_tu(vint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint8m4_t test_vfncvt_x_f_w_i8m4_rm_tu(vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_x_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint8m4_t test_vfncvt_x_f_w_i8m4_rm_tu(vint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_rm_tu(vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_xu_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_rm_tu(vuint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_rm_tu(vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_xu_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_rm_tu(vuint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_rm_tu(vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_xu_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_rm_tu(vuint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8m1_t test_vfncvt_xu_f_w_u8m1_rm_tu(vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_xu_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8m1_t test_vfncvt_xu_f_w_u8m1_rm_tu(vuint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8m2_t test_vfncvt_xu_f_w_u8m2_rm_tu(vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_xu_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8m2_t test_vfncvt_xu_f_w_u8m2_rm_tu(vuint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8m4_t test_vfncvt_xu_f_w_u8m4_rm_tu(vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_xu_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8m4_t test_vfncvt_xu_f_w_u8m4_rm_tu(vuint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint16mf4_t test_vfncvt_x_f_w_i16mf4_rm_tu(vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_x_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint16mf4_t test_vfncvt_x_f_w_i16mf4_rm_tu(vint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint16mf2_t test_vfncvt_x_f_w_i16mf2_rm_tu(vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_x_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint16mf2_t test_vfncvt_x_f_w_i16mf2_rm_tu(vint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m1_t test_vfncvt_x_f_w_i16m1_rm_tu(vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_x_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m1_t test_vfncvt_x_f_w_i16m1_rm_tu(vint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m2_t test_vfncvt_x_f_w_i16m2_rm_tu(vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_x_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m2_t test_vfncvt_x_f_w_i16m2_rm_tu(vint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m4_t test_vfncvt_x_f_w_i16m4_rm_tu(vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_x_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m4_t test_vfncvt_x_f_w_i16m4_rm_tu(vint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_rm_tu(vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_xu_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_rm_tu(vuint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_rm_tu(vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_xu_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_rm_tu(vuint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m1_t test_vfncvt_xu_f_w_u16m1_rm_tu(vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_xu_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m1_t test_vfncvt_xu_f_w_u16m1_rm_tu(vuint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m2_t test_vfncvt_xu_f_w_u16m2_rm_tu(vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_xu_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m2_t test_vfncvt_xu_f_w_u16m2_rm_tu(vuint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m4_t test_vfncvt_xu_f_w_u16m4_rm_tu(vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_xu_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m4_t test_vfncvt_xu_f_w_u16m4_rm_tu(vuint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_rm_tu(vfloat16mf4_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_rm_tu(vfloat16mf2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_x_w_f16m1_rm_tu(vfloat16m1_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfncvt_f_x_w_f16m1_rm_tu(vfloat16m1_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_x_w_f16m2_rm_tu(vfloat16m2_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfncvt_f_x_w_f16m2_rm_tu(vfloat16m2_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_x_w_f16m4_rm_tu(vfloat16m4_t maskedoff, vint32m8_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfncvt_f_x_w_f16m4_rm_tu(vfloat16m4_t vd, vint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_rm_tu(vfloat16mf4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_rm_tu(vfloat16mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_xu_w_f16m1_rm_tu(vfloat16m1_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfncvt_f_xu_w_f16m1_rm_tu(vfloat16m1_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_xu_w_f16m2_rm_tu(vfloat16m2_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfncvt_f_xu_w_f16m2_rm_tu(vfloat16m2_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_xu_w_f16m4_rm_tu(vfloat16m4_t maskedoff, vuint32m8_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfncvt_f_xu_w_f16m4_rm_tu(vfloat16m4_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_f_w_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfncvt_f_f_w_f16m1_rm_tu(vfloat16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_f_w_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfncvt_f_f_w_f16m2_rm_tu(vfloat16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_f_w_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfncvt_f_f_w_f16m4_rm_tu(vfloat16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint32mf2_t test_vfncvt_x_f_w_i32mf2_rm_tu(vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_x_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint32mf2_t test_vfncvt_x_f_w_i32mf2_rm_tu(vint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfncvt_x_f_w_i32m1_rm_tu(vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_x_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m1_t test_vfncvt_x_f_w_i32m1_rm_tu(vint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfncvt_x_f_w_i32m2_rm_tu(vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_x_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m2_t test_vfncvt_x_f_w_i32m2_rm_tu(vint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfncvt_x_f_w_i32m4_rm_tu(vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_x_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m4_t test_vfncvt_x_f_w_i32m4_rm_tu(vint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_rm_tu(vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_xu_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_rm_tu(vuint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfncvt_xu_f_w_u32m1_rm_tu(vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_xu_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m1_t test_vfncvt_xu_f_w_u32m1_rm_tu(vuint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfncvt_xu_f_w_u32m2_rm_tu(vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_xu_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m2_t test_vfncvt_xu_f_w_u32m2_rm_tu(vuint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfncvt_xu_f_w_u32m4_rm_tu(vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_xu_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m4_t test_vfncvt_xu_f_w_u32m4_rm_tu(vuint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vint64m1_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_rm_tu(vfloat32mf2_t vd, vint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_x_w_f32m1_rm_tu(vfloat32m1_t maskedoff, vint64m2_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfncvt_f_x_w_f32m1_rm_tu(vfloat32m1_t vd, vint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_x_w_f32m2_rm_tu(vfloat32m2_t maskedoff, vint64m4_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfncvt_f_x_w_f32m2_rm_tu(vfloat32m2_t vd, vint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_x_w_f32m4_rm_tu(vfloat32m4_t maskedoff, vint64m8_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfncvt_f_x_w_f32m4_rm_tu(vfloat32m4_t vd, vint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vuint64m1_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_rm_tu(vfloat32mf2_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_xu_w_f32m1_rm_tu(vfloat32m1_t maskedoff, vuint64m2_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfncvt_f_xu_w_f32m1_rm_tu(vfloat32m1_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_xu_w_f32m2_rm_tu(vfloat32m2_t maskedoff, vuint64m4_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfncvt_f_xu_w_f32m2_rm_tu(vfloat32m2_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_xu_w_f32m4_rm_tu(vfloat32m4_t maskedoff, vuint64m8_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfncvt_f_xu_w_f32m4_rm_tu(vfloat32m4_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_f_w_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfncvt_f_f_w_f32m1_rm_tu(vfloat32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_f_w_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfncvt_f_f_w_f32m2_rm_tu(vfloat32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_f_w_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_f_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfncvt_f_f_w_f32m4_rm_tu(vfloat32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint8mf8_t test_vfncvt_x_f_w_i8mf8_rm_tum(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_x_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint8mf8_t test_vfncvt_x_f_w_i8mf8_rm_tum(vbool64_t vm, vint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8mf4_t test_vfncvt_x_f_w_i8mf4_rm_tum(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_x_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint8mf4_t test_vfncvt_x_f_w_i8mf4_rm_tum(vbool32_t vm, vint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8mf2_t test_vfncvt_x_f_w_i8mf2_rm_tum(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_x_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint8mf2_t test_vfncvt_x_f_w_i8mf2_rm_tum(vbool16_t vm, vint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8m1_t test_vfncvt_x_f_w_i8m1_rm_tum(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_x_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint8m1_t test_vfncvt_x_f_w_i8m1_rm_tum(vbool8_t vm, vint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8m2_t test_vfncvt_x_f_w_i8m2_rm_tum(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_x_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint8m2_t test_vfncvt_x_f_w_i8m2_rm_tum(vbool4_t vm, vint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8m4_t test_vfncvt_x_f_w_i8m4_rm_tum(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_x_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint8m4_t test_vfncvt_x_f_w_i8m4_rm_tum(vbool2_t vm, vint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_rm_tum(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_xu_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_rm_tum(vbool64_t vm, vuint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_rm_tum(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_xu_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_rm_tum(vbool32_t vm, vuint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_rm_tum(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_xu_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_rm_tum(vbool16_t vm, vuint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8m1_t test_vfncvt_xu_f_w_u8m1_rm_tum(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_xu_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8m1_t test_vfncvt_xu_f_w_u8m1_rm_tum(vbool8_t vm, vuint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8m2_t test_vfncvt_xu_f_w_u8m2_rm_tum(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_xu_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8m2_t test_vfncvt_xu_f_w_u8m2_rm_tum(vbool4_t vm, vuint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8m4_t test_vfncvt_xu_f_w_u8m4_rm_tum(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_xu_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8m4_t test_vfncvt_xu_f_w_u8m4_rm_tum(vbool2_t vm, vuint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16mf4_t test_vfncvt_x_f_w_i16mf4_rm_tum(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_x_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16mf4_t test_vfncvt_x_f_w_i16mf4_rm_tum(vbool64_t vm, vint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16mf2_t test_vfncvt_x_f_w_i16mf2_rm_tum(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_x_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16mf2_t test_vfncvt_x_f_w_i16mf2_rm_tum(vbool32_t vm, vint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m1_t test_vfncvt_x_f_w_i16m1_rm_tum(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_x_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m1_t test_vfncvt_x_f_w_i16m1_rm_tum(vbool16_t vm, vint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m2_t test_vfncvt_x_f_w_i16m2_rm_tum(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_x_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m2_t test_vfncvt_x_f_w_i16m2_rm_tum(vbool8_t vm, vint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m4_t test_vfncvt_x_f_w_i16m4_rm_tum(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_x_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m4_t test_vfncvt_x_f_w_i16m4_rm_tum(vbool4_t vm, vint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_rm_tum(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_xu_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_rm_tum(vbool64_t vm, vuint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_rm_tum(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_xu_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_rm_tum(vbool32_t vm, vuint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m1_t test_vfncvt_xu_f_w_u16m1_rm_tum(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_xu_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m1_t test_vfncvt_xu_f_w_u16m1_rm_tum(vbool16_t vm, vuint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m2_t test_vfncvt_xu_f_w_u16m2_rm_tum(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_xu_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m2_t test_vfncvt_xu_f_w_u16m2_rm_tum(vbool8_t vm, vuint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m4_t test_vfncvt_xu_f_w_u16m4_rm_tum(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_xu_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m4_t test_vfncvt_xu_f_w_u16m4_rm_tum(vbool4_t vm, vuint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_x_w_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfncvt_f_x_w_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_x_w_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfncvt_f_x_w_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_x_w_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vint32m8_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfncvt_f_x_w_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_xu_w_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfncvt_f_xu_w_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_xu_w_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfncvt_f_xu_w_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_xu_w_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vuint32m8_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfncvt_f_xu_w_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_f_w_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfncvt_f_f_w_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_f_w_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfncvt_f_f_w_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_f_w_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfncvt_f_f_w_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32mf2_t test_vfncvt_x_f_w_i32mf2_rm_tum(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_x_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32mf2_t test_vfncvt_x_f_w_i32mf2_rm_tum(vbool64_t vm, vint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfncvt_x_f_w_i32m1_rm_tum(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_x_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m1_t test_vfncvt_x_f_w_i32m1_rm_tum(vbool32_t vm, vint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfncvt_x_f_w_i32m2_rm_tum(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_x_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m2_t test_vfncvt_x_f_w_i32m2_rm_tum(vbool16_t vm, vint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfncvt_x_f_w_i32m4_rm_tum(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_x_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m4_t test_vfncvt_x_f_w_i32m4_rm_tum(vbool8_t vm, vint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_rm_tum(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_xu_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_rm_tum(vbool64_t vm, vuint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfncvt_xu_f_w_u32m1_rm_tum(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_xu_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m1_t test_vfncvt_xu_f_w_u32m1_rm_tum(vbool32_t vm, vuint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfncvt_xu_f_w_u32m2_rm_tum(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_xu_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m2_t test_vfncvt_xu_f_w_u32m2_rm_tum(vbool16_t vm, vuint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfncvt_xu_f_w_u32m4_rm_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_xu_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m4_t test_vfncvt_xu_f_w_u32m4_rm_tum(vbool8_t vm, vuint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vint64m1_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_x_w_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vint64m2_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfncvt_f_x_w_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_x_w_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vint64m4_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfncvt_f_x_w_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_x_w_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vint64m8_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfncvt_f_x_w_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vuint64m1_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_xu_w_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vuint64m2_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfncvt_f_xu_w_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_xu_w_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vuint64m4_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfncvt_f_xu_w_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_xu_w_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vuint64m8_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfncvt_f_xu_w_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_f_w_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfncvt_f_f_w_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_f_w_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfncvt_f_f_w_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_f_w_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_f_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfncvt_f_f_w_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8mf8_t test_vfncvt_x_f_w_i8mf8_rm_tumu(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_x_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint8mf8_t test_vfncvt_x_f_w_i8mf8_rm_tumu(vbool64_t vm, vint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8mf4_t test_vfncvt_x_f_w_i8mf4_rm_tumu(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_x_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint8mf4_t test_vfncvt_x_f_w_i8mf4_rm_tumu(vbool32_t vm, vint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8mf2_t test_vfncvt_x_f_w_i8mf2_rm_tumu(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_x_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint8mf2_t test_vfncvt_x_f_w_i8mf2_rm_tumu(vbool16_t vm, vint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8m1_t test_vfncvt_x_f_w_i8m1_rm_tumu(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_x_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint8m1_t test_vfncvt_x_f_w_i8m1_rm_tumu(vbool8_t vm, vint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8m2_t test_vfncvt_x_f_w_i8m2_rm_tumu(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_x_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint8m2_t test_vfncvt_x_f_w_i8m2_rm_tumu(vbool4_t vm, vint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8m4_t test_vfncvt_x_f_w_i8m4_rm_tumu(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_x_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint8m4_t test_vfncvt_x_f_w_i8m4_rm_tumu(vbool2_t vm, vint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_rm_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_rm_tumu(vbool64_t vm, vuint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_rm_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_rm_tumu(vbool32_t vm, vuint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_rm_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_rm_tumu(vbool16_t vm, vuint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8m1_t test_vfncvt_xu_f_w_u8m1_rm_tumu(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8m1_t test_vfncvt_xu_f_w_u8m1_rm_tumu(vbool8_t vm, vuint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8m2_t test_vfncvt_xu_f_w_u8m2_rm_tumu(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8m2_t test_vfncvt_xu_f_w_u8m2_rm_tumu(vbool4_t vm, vuint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8m4_t test_vfncvt_xu_f_w_u8m4_rm_tumu(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8m4_t test_vfncvt_xu_f_w_u8m4_rm_tumu(vbool2_t vm, vuint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16mf4_t test_vfncvt_x_f_w_i16mf4_rm_tumu(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_x_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16mf4_t test_vfncvt_x_f_w_i16mf4_rm_tumu(vbool64_t vm, vint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16mf2_t test_vfncvt_x_f_w_i16mf2_rm_tumu(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_x_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16mf2_t test_vfncvt_x_f_w_i16mf2_rm_tumu(vbool32_t vm, vint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m1_t test_vfncvt_x_f_w_i16m1_rm_tumu(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_x_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m1_t test_vfncvt_x_f_w_i16m1_rm_tumu(vbool16_t vm, vint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m2_t test_vfncvt_x_f_w_i16m2_rm_tumu(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_x_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m2_t test_vfncvt_x_f_w_i16m2_rm_tumu(vbool8_t vm, vint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m4_t test_vfncvt_x_f_w_i16m4_rm_tumu(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_x_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m4_t test_vfncvt_x_f_w_i16m4_rm_tumu(vbool4_t vm, vint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_rm_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_rm_tumu(vbool64_t vm, vuint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_rm_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_rm_tumu(vbool32_t vm, vuint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m1_t test_vfncvt_xu_f_w_u16m1_rm_tumu(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m1_t test_vfncvt_xu_f_w_u16m1_rm_tumu(vbool16_t vm, vuint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m2_t test_vfncvt_xu_f_w_u16m2_rm_tumu(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m2_t test_vfncvt_xu_f_w_u16m2_rm_tumu(vbool8_t vm, vuint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m4_t test_vfncvt_xu_f_w_u16m4_rm_tumu(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m4_t test_vfncvt_xu_f_w_u16m4_rm_tumu(vbool4_t vm, vuint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_x_w_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfncvt_f_x_w_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_x_w_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfncvt_f_x_w_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_x_w_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vint32m8_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfncvt_f_x_w_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_xu_w_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfncvt_f_xu_w_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_xu_w_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfncvt_f_xu_w_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_xu_w_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vuint32m8_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfncvt_f_xu_w_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_f_w_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfncvt_f_f_w_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_f_w_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfncvt_f_f_w_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_f_w_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfncvt_f_f_w_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32mf2_t test_vfncvt_x_f_w_i32mf2_rm_tumu(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_x_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32mf2_t test_vfncvt_x_f_w_i32mf2_rm_tumu(vbool64_t vm, vint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfncvt_x_f_w_i32m1_rm_tumu(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_x_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m1_t test_vfncvt_x_f_w_i32m1_rm_tumu(vbool32_t vm, vint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfncvt_x_f_w_i32m2_rm_tumu(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_x_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m2_t test_vfncvt_x_f_w_i32m2_rm_tumu(vbool16_t vm, vint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfncvt_x_f_w_i32m4_rm_tumu(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_x_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m4_t test_vfncvt_x_f_w_i32m4_rm_tumu(vbool8_t vm, vint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_rm_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_rm_tumu(vbool64_t vm, vuint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfncvt_xu_f_w_u32m1_rm_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m1_t test_vfncvt_xu_f_w_u32m1_rm_tumu(vbool32_t vm, vuint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfncvt_xu_f_w_u32m2_rm_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m2_t test_vfncvt_xu_f_w_u32m2_rm_tumu(vbool16_t vm, vuint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfncvt_xu_f_w_u32m4_rm_tumu(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m4_t test_vfncvt_xu_f_w_u32m4_rm_tumu(vbool8_t vm, vuint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vint64m1_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_x_w_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vint64m2_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfncvt_f_x_w_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_x_w_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vint64m4_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfncvt_f_x_w_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_x_w_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vint64m8_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfncvt_f_x_w_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vuint64m1_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_xu_w_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vuint64m2_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfncvt_f_xu_w_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_xu_w_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vuint64m4_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfncvt_f_xu_w_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_xu_w_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vuint64m8_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfncvt_f_xu_w_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_f_w_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfncvt_f_f_w_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_f_w_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfncvt_f_f_w_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_f_w_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfncvt_f_f_w_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8mf8_t test_vfncvt_x_f_w_i8mf8_rm_mu(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_x_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint8mf8_t test_vfncvt_x_f_w_i8mf8_rm_mu(vbool64_t vm, vint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8mf4_t test_vfncvt_x_f_w_i8mf4_rm_mu(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_x_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint8mf4_t test_vfncvt_x_f_w_i8mf4_rm_mu(vbool32_t vm, vint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8mf2_t test_vfncvt_x_f_w_i8mf2_rm_mu(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_x_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint8mf2_t test_vfncvt_x_f_w_i8mf2_rm_mu(vbool16_t vm, vint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8m1_t test_vfncvt_x_f_w_i8m1_rm_mu(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_x_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint8m1_t test_vfncvt_x_f_w_i8m1_rm_mu(vbool8_t vm, vint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8m2_t test_vfncvt_x_f_w_i8m2_rm_mu(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_x_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint8m2_t test_vfncvt_x_f_w_i8m2_rm_mu(vbool4_t vm, vint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8m4_t test_vfncvt_x_f_w_i8m4_rm_mu(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_x_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint8m4_t test_vfncvt_x_f_w_i8m4_rm_mu(vbool2_t vm, vint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_rm_mu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_xu_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_rm_mu(vbool64_t vm, vuint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_rm_mu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_xu_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_rm_mu(vbool32_t vm, vuint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_rm_mu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_xu_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_rm_mu(vbool16_t vm, vuint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8m1_t test_vfncvt_xu_f_w_u8m1_rm_mu(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_xu_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8m1_t test_vfncvt_xu_f_w_u8m1_rm_mu(vbool8_t vm, vuint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8m2_t test_vfncvt_xu_f_w_u8m2_rm_mu(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_xu_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8m2_t test_vfncvt_xu_f_w_u8m2_rm_mu(vbool4_t vm, vuint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8m4_t test_vfncvt_xu_f_w_u8m4_rm_mu(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_xu_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint8m4_t test_vfncvt_xu_f_w_u8m4_rm_mu(vbool2_t vm, vuint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16mf4_t test_vfncvt_x_f_w_i16mf4_rm_mu(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_x_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16mf4_t test_vfncvt_x_f_w_i16mf4_rm_mu(vbool64_t vm, vint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16mf2_t test_vfncvt_x_f_w_i16mf2_rm_mu(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_x_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16mf2_t test_vfncvt_x_f_w_i16mf2_rm_mu(vbool32_t vm, vint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m1_t test_vfncvt_x_f_w_i16m1_rm_mu(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_x_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m1_t test_vfncvt_x_f_w_i16m1_rm_mu(vbool16_t vm, vint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m2_t test_vfncvt_x_f_w_i16m2_rm_mu(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_x_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m2_t test_vfncvt_x_f_w_i16m2_rm_mu(vbool8_t vm, vint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m4_t test_vfncvt_x_f_w_i16m4_rm_mu(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_x_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint16m4_t test_vfncvt_x_f_w_i16m4_rm_mu(vbool4_t vm, vint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_rm_mu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_xu_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_rm_mu(vbool64_t vm, vuint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_rm_mu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_xu_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_rm_mu(vbool32_t vm, vuint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m1_t test_vfncvt_xu_f_w_u16m1_rm_mu(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_xu_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m1_t test_vfncvt_xu_f_w_u16m1_rm_mu(vbool16_t vm, vuint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m2_t test_vfncvt_xu_f_w_u16m2_rm_mu(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_xu_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m2_t test_vfncvt_xu_f_w_u16m2_rm_mu(vbool8_t vm, vuint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m4_t test_vfncvt_xu_f_w_u16m4_rm_mu(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_xu_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint16m4_t test_vfncvt_xu_f_w_u16m4_rm_mu(vbool4_t vm, vuint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_x_w_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfncvt_f_x_w_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_x_w_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfncvt_f_x_w_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_x_w_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vint32m8_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfncvt_f_x_w_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_xu_w_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfncvt_f_xu_w_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_xu_w_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfncvt_f_xu_w_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_xu_w_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vuint32m8_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfncvt_f_xu_w_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_f_w_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfncvt_f_f_w_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_f_w_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfncvt_f_f_w_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_f_w_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfncvt_f_f_w_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32mf2_t test_vfncvt_x_f_w_i32mf2_rm_mu(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_x_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32mf2_t test_vfncvt_x_f_w_i32mf2_rm_mu(vbool64_t vm, vint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfncvt_x_f_w_i32m1_rm_mu(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_x_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m1_t test_vfncvt_x_f_w_i32m1_rm_mu(vbool32_t vm, vint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfncvt_x_f_w_i32m2_rm_mu(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_x_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m2_t test_vfncvt_x_f_w_i32m2_rm_mu(vbool16_t vm, vint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfncvt_x_f_w_i32m4_rm_mu(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_x_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m4_t test_vfncvt_x_f_w_i32m4_rm_mu(vbool8_t vm, vint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_rm_mu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_xu_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_rm_mu(vbool64_t vm, vuint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfncvt_xu_f_w_u32m1_rm_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_xu_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m1_t test_vfncvt_xu_f_w_u32m1_rm_mu(vbool32_t vm, vuint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfncvt_xu_f_w_u32m2_rm_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_xu_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m2_t test_vfncvt_xu_f_w_u32m2_rm_mu(vbool16_t vm, vuint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfncvt_xu_f_w_u32m4_rm_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_xu_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m4_t test_vfncvt_xu_f_w_u32m4_rm_mu(vbool8_t vm, vuint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vint64m1_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_x_w_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vint64m2_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfncvt_f_x_w_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_x_w_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vint64m4_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfncvt_f_x_w_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_x_w_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vint64m8_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfncvt_f_x_w_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vuint64m1_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_xu_w_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vuint64m2_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfncvt_f_xu_w_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_xu_w_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vuint64m4_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfncvt_f_xu_w_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_xu_w_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vuint64m8_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfncvt_f_xu_w_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_f_w_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfncvt_f_f_w_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_f_w_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfncvt_f_f_w_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_f_w_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_f_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfncvt_f_f_w_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfncvt\.[ivxfswum.]+\s+} 456 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfncvt_rod.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfncvt_rod.c index 9b4d6be61..22fab80fe 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfncvt_rod.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfncvt_rod.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tu(maskedoff, src, vl); +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tu(vfloat16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_tu(vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tu(maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_tu(vfloat16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_tu(vd, vs2, vl); } -vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_tu(vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tu(maskedoff, src, vl); +vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_tu(vfloat16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_tu(vd, vs2, vl); } -vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_tu(vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tu(maskedoff, src, vl); +vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_tu(vfloat16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_tu(vd, vs2, vl); } -vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_tu(vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tu(maskedoff, src, vl); +vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_tu(vfloat16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_tu(vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tu(maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_tu(vfloat32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_tu(vd, vs2, vl); } -vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_tu(vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tu(maskedoff, src, vl); +vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_tu(vfloat32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_tu(vd, vs2, vl); } -vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_tu(vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tu(maskedoff, src, vl); +vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_tu(vfloat32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_tu(vd, vs2, vl); } -vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_tu(vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tu(maskedoff, src, vl); +vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_tu(vfloat32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_tu(vd, vs2, vl); } -vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tum(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_tum(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tum(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_tum(vm, vd, vs2, vl); } -vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tum(mask, maskedoff, src, vl); +vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_tum(vm, vd, vs2, vl); } -vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tum(mask, maskedoff, src, vl); +vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_tum(vm, vd, vs2, vl); } -vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tum(mask, maskedoff, src, vl); +vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_tum(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tum(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_tum(vm, vd, vs2, vl); } -vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tum(mask, maskedoff, src, vl); +vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_tum(vm, vd, vs2, vl); } -vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tum(mask, maskedoff, src, vl); +vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_tum(vm, vd, vs2, vl); } -vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tum(mask, maskedoff, src, vl); +vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_tum(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tumu(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_tumu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tumu(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_tumu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tumu(mask, maskedoff, src, vl); +vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_tumu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tumu(mask, maskedoff, src, vl); +vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_tumu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tumu(mask, maskedoff, src, vl); +vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_tumu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tumu(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_tumu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tumu(mask, maskedoff, src, vl); +vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_tumu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tumu(mask, maskedoff, src, vl); +vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_tumu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tumu(mask, maskedoff, src, vl); +vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_tumu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_mu(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_mu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f_mu(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_mu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_mu(mask, maskedoff, src, vl); +vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_mu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f_mu(mask, maskedoff, src, vl); +vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_mu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f_mu(mask, maskedoff, src, vl); +vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_mu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f_mu(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_mu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_mu(mask, maskedoff, src, vl); +vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_mu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f_mu(mask, maskedoff, src, vl); +vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_mu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f_mu(mask, maskedoff, src, vl); +vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_mu(vm, vd, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfncvt\.rod[ivxfswum.]*\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfncvt_rtz.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfncvt_rtz.c index e55205286..4e1dd8144 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfncvt_rtz.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfncvt_rtz.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_tu(vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl); +vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_tu(vint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl); } -vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_tu(vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl); +vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_tu(vint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl); } -vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_tu(vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl); +vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_tu(vint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl); } -vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_tu(vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl); +vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_tu(vint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl); } -vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_tu(vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl); +vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_tu(vint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl); } -vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_tu(vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl); +vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_tu(vint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl); } -vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_tu(vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl); +vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_tu(vuint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl); } -vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_tu(vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl); +vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_tu(vuint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl); } -vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_tu(vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl); +vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_tu(vuint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl); } -vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_tu(vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl); +vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_tu(vuint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl); } -vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_tu(vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl); +vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_tu(vuint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl); } -vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_tu(vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl); +vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_tu(vuint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl); } -vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tu(vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl); +vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tu(vint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl); } -vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_tu(vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl); +vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_tu(vint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl); } -vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_tu(vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl); +vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_tu(vint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl); } -vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_tu(vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl); +vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_tu(vint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl); } -vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_tu(vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl); +vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_tu(vint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl); } -vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tu(vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl); +vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tu(vuint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl); } -vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_tu(vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl); +vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_tu(vuint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl); } -vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_tu(vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl); +vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_tu(vuint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl); } -vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_tu(vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl); +vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_tu(vuint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl); } -vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_tu(vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl); +vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_tu(vuint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl); } -vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_tu(vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl); +vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_tu(vint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl); } -vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_tu(vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl); +vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_tu(vint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl); } -vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_tu(vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl); +vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_tu(vint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl); } -vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_tu(vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl); +vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_tu(vint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl); } -vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_tu(vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl); +vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_tu(vuint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl); } -vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_tu(vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl); +vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_tu(vuint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl); } -vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_tu(vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl); +vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_tu(vuint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl); } -vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_tu(vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl); +vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_tu(vuint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl); } -vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl); +vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl); } -vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl); +vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl); } -vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl); +vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl); } -vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl); +vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_tum(vbool8_t vm, vint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl); } -vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl); +vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_tum(vbool4_t vm, vint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl); } -vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl); +vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_tum(vbool2_t vm, vint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl); } -vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl); +vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl); } -vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl); +vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl); } -vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl); +vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl); } -vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl); +vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl); } -vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl); +vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl); } -vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl); +vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl); } -vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl); +vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl); } -vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl); +vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl); } -vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl); +vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_tum(vbool16_t vm, vint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl); } -vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl); +vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_tum(vbool8_t vm, vint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl); } -vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl); +vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_tum(vbool4_t vm, vint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl); } -vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl); +vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl); } -vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl); +vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl); } -vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl); +vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl); } -vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl); +vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl); } -vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl); +vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl); } -vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl); +vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl); } -vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl); +vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_tum(vbool32_t vm, vint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl); } -vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl); +vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_tum(vbool16_t vm, vint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl); } -vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl); +vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_tum(vbool8_t vm, vint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl); +vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl); } -vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl); +vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl); } -vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl); +vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl); } -vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl); +vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl); } -vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl); +vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl); } -vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl); +vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl); } -vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl); +vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl); } -vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl); +vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl); } -vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl); +vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl); } -vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl); +vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl); } -vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl); +vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl); } -vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl); +vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl); } -vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl); +vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl); } -vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl); +vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl); } -vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl); +vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl); } -vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl); +vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl); } -vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl); +vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl); } -vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl); +vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl); } -vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl); +vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl); } -vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl); +vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl); } -vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl); +vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl); } -vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl); +vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl); } -vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl); +vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl); } -vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl); +vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl); } -vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl); +vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl); } -vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl); +vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl); } -vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl); +vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl); } -vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl); +vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl); } -vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl); +vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl); } -vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl); +vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl); +vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl); +vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl); +vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl); +vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl); } -vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl); +vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl); } -vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl); +vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl); } -vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl); +vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl); } -vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl); +vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_mu(vbool8_t vm, vint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl); } -vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl); +vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_mu(vbool4_t vm, vint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl); } -vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl); +vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_mu(vbool2_t vm, vint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl); } -vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl); +vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl); } -vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl); +vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl); } -vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl); +vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl); } -vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl); +vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl); } -vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl); +vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl); } -vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl); +vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl); } -vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl); +vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl); } -vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl); +vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl); } -vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl); +vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_mu(vbool16_t vm, vint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl); } -vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl); +vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_mu(vbool8_t vm, vint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl); } -vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl); +vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_mu(vbool4_t vm, vint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl); } -vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl); +vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl); } -vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl); +vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl); } -vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl); +vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl); } -vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl); +vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl); } -vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl); +vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl); } -vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl); +vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl); } -vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl); +vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_mu(vbool32_t vm, vint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl); } -vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl); +vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_mu(vbool16_t vm, vint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl); } -vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl); +vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_mu(vbool8_t vm, vint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl); } -vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl); +vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl); } -vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl); +vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl); } -vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl); +vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl); } -vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl); +vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfncvt\.rtz[ivxfswum.]*\s+} 120 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfneg.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfneg.c index 96ba660d3..36d90701c 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfneg.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfneg.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfneg_v_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfneg_tu(maskedoff, op1, vl); +vfloat16mf4_t test_vfneg_v_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs, size_t vl) { + return __riscv_vfneg_tu(vd, vs, vl); } -vfloat16mf2_t test_vfneg_v_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfneg_tu(maskedoff, op1, vl); +vfloat16mf2_t test_vfneg_v_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs, size_t vl) { + return __riscv_vfneg_tu(vd, vs, vl); } -vfloat16m1_t test_vfneg_v_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfneg_tu(maskedoff, op1, vl); +vfloat16m1_t test_vfneg_v_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs, size_t vl) { + return __riscv_vfneg_tu(vd, vs, vl); } -vfloat16m2_t test_vfneg_v_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfneg_tu(maskedoff, op1, vl); +vfloat16m2_t test_vfneg_v_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs, size_t vl) { + return __riscv_vfneg_tu(vd, vs, vl); } -vfloat16m4_t test_vfneg_v_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfneg_tu(maskedoff, op1, vl); +vfloat16m4_t test_vfneg_v_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs, size_t vl) { + return __riscv_vfneg_tu(vd, vs, vl); } -vfloat16m8_t test_vfneg_v_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfneg_tu(maskedoff, op1, vl); +vfloat16m8_t test_vfneg_v_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs, size_t vl) { + return __riscv_vfneg_tu(vd, vs, vl); } -vfloat32mf2_t test_vfneg_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfneg_tu(maskedoff, op1, vl); +vfloat32mf2_t test_vfneg_v_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs, size_t vl) { + return __riscv_vfneg_tu(vd, vs, vl); } -vfloat32m1_t test_vfneg_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfneg_tu(maskedoff, op1, vl); +vfloat32m1_t test_vfneg_v_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs, size_t vl) { + return __riscv_vfneg_tu(vd, vs, vl); } -vfloat32m2_t test_vfneg_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfneg_tu(maskedoff, op1, vl); +vfloat32m2_t test_vfneg_v_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs, size_t vl) { + return __riscv_vfneg_tu(vd, vs, vl); } -vfloat32m4_t test_vfneg_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfneg_tu(maskedoff, op1, vl); +vfloat32m4_t test_vfneg_v_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs, size_t vl) { + return __riscv_vfneg_tu(vd, vs, vl); } -vfloat32m8_t test_vfneg_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfneg_tu(maskedoff, op1, vl); +vfloat32m8_t test_vfneg_v_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs, size_t vl) { + return __riscv_vfneg_tu(vd, vs, vl); } -vfloat64m1_t test_vfneg_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfneg_tu(maskedoff, op1, vl); +vfloat64m1_t test_vfneg_v_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs, size_t vl) { + return __riscv_vfneg_tu(vd, vs, vl); } -vfloat64m2_t test_vfneg_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfneg_tu(maskedoff, op1, vl); +vfloat64m2_t test_vfneg_v_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs, size_t vl) { + return __riscv_vfneg_tu(vd, vs, vl); } -vfloat64m4_t test_vfneg_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfneg_tu(maskedoff, op1, vl); +vfloat64m4_t test_vfneg_v_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs, size_t vl) { + return __riscv_vfneg_tu(vd, vs, vl); } -vfloat64m8_t test_vfneg_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfneg_tu(maskedoff, op1, vl); +vfloat64m8_t test_vfneg_v_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs, size_t vl) { + return __riscv_vfneg_tu(vd, vs, vl); } -vfloat16mf4_t test_vfneg_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfneg_tum(mask, maskedoff, op1, vl); +vfloat16mf4_t test_vfneg_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs, size_t vl) { + return __riscv_vfneg_tum(vm, vd, vs, vl); } -vfloat16mf2_t test_vfneg_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfneg_tum(mask, maskedoff, op1, vl); +vfloat16mf2_t test_vfneg_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs, size_t vl) { + return __riscv_vfneg_tum(vm, vd, vs, vl); } -vfloat16m1_t test_vfneg_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfneg_tum(mask, maskedoff, op1, vl); +vfloat16m1_t test_vfneg_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs, size_t vl) { + return __riscv_vfneg_tum(vm, vd, vs, vl); } -vfloat16m2_t test_vfneg_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfneg_tum(mask, maskedoff, op1, vl); +vfloat16m2_t test_vfneg_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs, size_t vl) { + return __riscv_vfneg_tum(vm, vd, vs, vl); } -vfloat16m4_t test_vfneg_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfneg_tum(mask, maskedoff, op1, vl); +vfloat16m4_t test_vfneg_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs, size_t vl) { + return __riscv_vfneg_tum(vm, vd, vs, vl); } -vfloat16m8_t test_vfneg_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfneg_tum(mask, maskedoff, op1, vl); +vfloat16m8_t test_vfneg_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs, size_t vl) { + return __riscv_vfneg_tum(vm, vd, vs, vl); } -vfloat32mf2_t test_vfneg_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfneg_tum(mask, maskedoff, op1, vl); +vfloat32mf2_t test_vfneg_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs, size_t vl) { + return __riscv_vfneg_tum(vm, vd, vs, vl); } -vfloat32m1_t test_vfneg_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfneg_tum(mask, maskedoff, op1, vl); +vfloat32m1_t test_vfneg_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs, size_t vl) { + return __riscv_vfneg_tum(vm, vd, vs, vl); } -vfloat32m2_t test_vfneg_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfneg_tum(mask, maskedoff, op1, vl); +vfloat32m2_t test_vfneg_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs, size_t vl) { + return __riscv_vfneg_tum(vm, vd, vs, vl); } -vfloat32m4_t test_vfneg_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfneg_tum(mask, maskedoff, op1, vl); +vfloat32m4_t test_vfneg_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs, size_t vl) { + return __riscv_vfneg_tum(vm, vd, vs, vl); } -vfloat32m8_t test_vfneg_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfneg_tum(mask, maskedoff, op1, vl); +vfloat32m8_t test_vfneg_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs, size_t vl) { + return __riscv_vfneg_tum(vm, vd, vs, vl); } -vfloat64m1_t test_vfneg_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfneg_tum(mask, maskedoff, op1, vl); +vfloat64m1_t test_vfneg_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs, size_t vl) { + return __riscv_vfneg_tum(vm, vd, vs, vl); } -vfloat64m2_t test_vfneg_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfneg_tum(mask, maskedoff, op1, vl); +vfloat64m2_t test_vfneg_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs, size_t vl) { + return __riscv_vfneg_tum(vm, vd, vs, vl); } -vfloat64m4_t test_vfneg_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfneg_tum(mask, maskedoff, op1, vl); +vfloat64m4_t test_vfneg_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs, size_t vl) { + return __riscv_vfneg_tum(vm, vd, vs, vl); } -vfloat64m8_t test_vfneg_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfneg_tum(mask, maskedoff, op1, vl); +vfloat64m8_t test_vfneg_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs, size_t vl) { + return __riscv_vfneg_tum(vm, vd, vs, vl); } -vfloat16mf4_t test_vfneg_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfneg_tumu(mask, maskedoff, op1, vl); +vfloat16mf4_t test_vfneg_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs, size_t vl) { + return __riscv_vfneg_tumu(vm, vd, vs, vl); } -vfloat16mf2_t test_vfneg_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfneg_tumu(mask, maskedoff, op1, vl); +vfloat16mf2_t test_vfneg_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs, size_t vl) { + return __riscv_vfneg_tumu(vm, vd, vs, vl); } -vfloat16m1_t test_vfneg_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfneg_tumu(mask, maskedoff, op1, vl); +vfloat16m1_t test_vfneg_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs, size_t vl) { + return __riscv_vfneg_tumu(vm, vd, vs, vl); } -vfloat16m2_t test_vfneg_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfneg_tumu(mask, maskedoff, op1, vl); +vfloat16m2_t test_vfneg_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs, size_t vl) { + return __riscv_vfneg_tumu(vm, vd, vs, vl); } -vfloat16m4_t test_vfneg_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfneg_tumu(mask, maskedoff, op1, vl); +vfloat16m4_t test_vfneg_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs, size_t vl) { + return __riscv_vfneg_tumu(vm, vd, vs, vl); } -vfloat16m8_t test_vfneg_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfneg_tumu(mask, maskedoff, op1, vl); +vfloat16m8_t test_vfneg_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs, size_t vl) { + return __riscv_vfneg_tumu(vm, vd, vs, vl); } -vfloat32mf2_t test_vfneg_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfneg_tumu(mask, maskedoff, op1, vl); +vfloat32mf2_t test_vfneg_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs, size_t vl) { + return __riscv_vfneg_tumu(vm, vd, vs, vl); } -vfloat32m1_t test_vfneg_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfneg_tumu(mask, maskedoff, op1, vl); +vfloat32m1_t test_vfneg_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs, size_t vl) { + return __riscv_vfneg_tumu(vm, vd, vs, vl); } -vfloat32m2_t test_vfneg_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfneg_tumu(mask, maskedoff, op1, vl); +vfloat32m2_t test_vfneg_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs, size_t vl) { + return __riscv_vfneg_tumu(vm, vd, vs, vl); } -vfloat32m4_t test_vfneg_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfneg_tumu(mask, maskedoff, op1, vl); +vfloat32m4_t test_vfneg_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs, size_t vl) { + return __riscv_vfneg_tumu(vm, vd, vs, vl); } -vfloat32m8_t test_vfneg_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfneg_tumu(mask, maskedoff, op1, vl); +vfloat32m8_t test_vfneg_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs, size_t vl) { + return __riscv_vfneg_tumu(vm, vd, vs, vl); } -vfloat64m1_t test_vfneg_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfneg_tumu(mask, maskedoff, op1, vl); +vfloat64m1_t test_vfneg_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs, size_t vl) { + return __riscv_vfneg_tumu(vm, vd, vs, vl); } -vfloat64m2_t test_vfneg_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfneg_tumu(mask, maskedoff, op1, vl); +vfloat64m2_t test_vfneg_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs, size_t vl) { + return __riscv_vfneg_tumu(vm, vd, vs, vl); } -vfloat64m4_t test_vfneg_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfneg_tumu(mask, maskedoff, op1, vl); +vfloat64m4_t test_vfneg_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs, size_t vl) { + return __riscv_vfneg_tumu(vm, vd, vs, vl); } -vfloat64m8_t test_vfneg_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfneg_tumu(mask, maskedoff, op1, vl); +vfloat64m8_t test_vfneg_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs, size_t vl) { + return __riscv_vfneg_tumu(vm, vd, vs, vl); } -vfloat16mf4_t test_vfneg_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfneg_mu(mask, maskedoff, op1, vl); +vfloat16mf4_t test_vfneg_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs, size_t vl) { + return __riscv_vfneg_mu(vm, vd, vs, vl); } -vfloat16mf2_t test_vfneg_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfneg_mu(mask, maskedoff, op1, vl); +vfloat16mf2_t test_vfneg_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs, size_t vl) { + return __riscv_vfneg_mu(vm, vd, vs, vl); } -vfloat16m1_t test_vfneg_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfneg_mu(mask, maskedoff, op1, vl); +vfloat16m1_t test_vfneg_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs, size_t vl) { + return __riscv_vfneg_mu(vm, vd, vs, vl); } -vfloat16m2_t test_vfneg_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfneg_mu(mask, maskedoff, op1, vl); +vfloat16m2_t test_vfneg_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs, size_t vl) { + return __riscv_vfneg_mu(vm, vd, vs, vl); } -vfloat16m4_t test_vfneg_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfneg_mu(mask, maskedoff, op1, vl); +vfloat16m4_t test_vfneg_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs, size_t vl) { + return __riscv_vfneg_mu(vm, vd, vs, vl); } -vfloat16m8_t test_vfneg_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfneg_mu(mask, maskedoff, op1, vl); +vfloat16m8_t test_vfneg_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs, size_t vl) { + return __riscv_vfneg_mu(vm, vd, vs, vl); } -vfloat32mf2_t test_vfneg_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfneg_mu(mask, maskedoff, op1, vl); +vfloat32mf2_t test_vfneg_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs, size_t vl) { + return __riscv_vfneg_mu(vm, vd, vs, vl); } -vfloat32m1_t test_vfneg_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfneg_mu(mask, maskedoff, op1, vl); +vfloat32m1_t test_vfneg_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs, size_t vl) { + return __riscv_vfneg_mu(vm, vd, vs, vl); } -vfloat32m2_t test_vfneg_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfneg_mu(mask, maskedoff, op1, vl); +vfloat32m2_t test_vfneg_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs, size_t vl) { + return __riscv_vfneg_mu(vm, vd, vs, vl); } -vfloat32m4_t test_vfneg_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfneg_mu(mask, maskedoff, op1, vl); +vfloat32m4_t test_vfneg_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs, size_t vl) { + return __riscv_vfneg_mu(vm, vd, vs, vl); } -vfloat32m8_t test_vfneg_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfneg_mu(mask, maskedoff, op1, vl); +vfloat32m8_t test_vfneg_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs, size_t vl) { + return __riscv_vfneg_mu(vm, vd, vs, vl); } -vfloat64m1_t test_vfneg_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfneg_mu(mask, maskedoff, op1, vl); +vfloat64m1_t test_vfneg_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs, size_t vl) { + return __riscv_vfneg_mu(vm, vd, vs, vl); } -vfloat64m2_t test_vfneg_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfneg_mu(mask, maskedoff, op1, vl); +vfloat64m2_t test_vfneg_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs, size_t vl) { + return __riscv_vfneg_mu(vm, vd, vs, vl); } -vfloat64m4_t test_vfneg_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfneg_mu(mask, maskedoff, op1, vl); +vfloat64m4_t test_vfneg_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs, size_t vl) { + return __riscv_vfneg_mu(vm, vd, vs, vl); } -vfloat64m8_t test_vfneg_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfneg_mu(mask, maskedoff, op1, vl); +vfloat64m8_t test_vfneg_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs, size_t vl) { + return __riscv_vfneg_mu(vm, vd, vs, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfneg\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfnmacc.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfnmacc.c index a36d71552..715884e06 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfnmacc.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfnmacc.c @@ -126,364 +126,364 @@ vfloat64m8_t test_vfnmacc_vf_f64m8_tu(vfloat64m8_t vd, float64_t rs1, vfloat64m8 return __riscv_vfnmacc_tu(vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmacc_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfnmacc_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmacc_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfnmacc_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmacc_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfnmacc_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmacc_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfnmacc_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmacc_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfnmacc_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmacc_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfnmacc_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmacc_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfnmacc_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmacc_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfnmacc_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmacc_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfnmacc_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmacc_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfnmacc_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmacc_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfnmacc_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmacc_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfnmacc_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmacc_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfnmacc_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmacc_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfnmacc_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmacc_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfnmacc_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmacc_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfnmacc_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmacc_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfnmacc_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmacc_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfnmacc_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmacc_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfnmacc_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmacc_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfnmacc_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmacc_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfnmacc_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmacc_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfnmacc_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmacc_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfnmacc_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmacc_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfnmacc_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmacc_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfnmacc_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmacc_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfnmacc_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmacc_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfnmacc_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmacc_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfnmacc_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmacc_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfnmacc_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmacc_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfnmacc_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmacc_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfnmacc_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmacc_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfnmacc_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmacc_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfnmacc_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmacc_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfnmacc_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmacc_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfnmacc_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmacc_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfnmacc_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmacc_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfnmacc_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmacc_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfnmacc_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmacc_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfnmacc_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmacc_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfnmacc_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmacc_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfnmacc_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmacc_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfnmacc_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmacc_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfnmacc_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmacc_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfnmacc_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmacc_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfnmacc_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmacc_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfnmacc_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmacc_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfnmacc_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmacc_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfnmacc_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmacc_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfnmacc_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmacc_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfnmacc_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmacc_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfnmacc_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmacc_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfnmacc_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmacc_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfnmacc_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmacc_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfnmacc_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmacc_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfnmacc_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmacc_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfnmacc_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmacc_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfnmacc_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmacc_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfnmacc_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmacc_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfnmacc_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmacc_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfnmacc_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmacc_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfnmacc_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmacc_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfnmacc_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmacc_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfnmacc_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmacc_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfnmacc_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmacc_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfnmacc_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmacc_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfnmacc_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmacc_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfnmacc_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmacc_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfnmacc_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmacc_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfnmacc_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmacc_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfnmacc_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmacc_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfnmacc_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmacc_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfnmacc_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmacc_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfnmacc_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmacc_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfnmacc_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmacc_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfnmacc_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmacc_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfnmacc_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmacc_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfnmacc_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmacc_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfnmacc_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmacc_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfnmacc_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmacc_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfnmacc_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmacc_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfnmacc_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmacc_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfnmacc_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmacc_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfnmacc_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmacc_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfnmacc_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmacc_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfnmacc_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmacc_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfnmacc_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmacc_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfnmacc_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmacc_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfnmacc_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmacc_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfnmacc_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmacc_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfnmacc_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, rs1, vs2, vl); } vfloat16mf4_t test_vfnmacc_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -606,364 +606,364 @@ vfloat64m8_t test_vfnmacc_vf_f64m8_rm_tu(vfloat64m8_t vd, float64_t rs1, vfloat6 return __riscv_vfnmacc_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmacc_vv_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmacc_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmacc_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmacc_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmacc_vv_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmacc_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmacc_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmacc_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmacc_vv_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmacc_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmacc_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmacc_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmacc_vv_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmacc_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmacc_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmacc_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmacc_vv_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmacc_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmacc_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmacc_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmacc_vv_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmacc_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmacc_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmacc_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmacc_vv_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmacc_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmacc_vf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmacc_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmacc_vv_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmacc_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmacc_vf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmacc_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmacc_vv_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmacc_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmacc_vf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmacc_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmacc_vv_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmacc_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmacc_vf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmacc_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmacc_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmacc_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmacc_vf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmacc_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmacc_vv_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmacc_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmacc_vf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmacc_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmacc_vv_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmacc_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmacc_vf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmacc_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmacc_vv_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmacc_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmacc_vf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmacc_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmacc_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmacc_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmacc_vf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmacc_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmacc_vv_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmacc_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmacc_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmacc_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmacc_vv_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmacc_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmacc_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmacc_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmacc_vv_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmacc_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmacc_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmacc_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmacc_vv_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmacc_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmacc_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmacc_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmacc_vv_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmacc_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmacc_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmacc_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmacc_vv_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmacc_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmacc_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmacc_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmacc_vv_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmacc_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmacc_vf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmacc_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmacc_vv_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmacc_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmacc_vf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmacc_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmacc_vv_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmacc_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmacc_vf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmacc_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmacc_vv_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmacc_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmacc_vf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmacc_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmacc_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmacc_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmacc_vf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmacc_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmacc_vv_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmacc_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmacc_vf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmacc_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmacc_vv_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmacc_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmacc_vf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmacc_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmacc_vv_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmacc_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmacc_vf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmacc_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmacc_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmacc_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmacc_vf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmacc_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmacc_vv_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmacc_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmacc_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmacc_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmacc_vv_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmacc_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmacc_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmacc_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmacc_vv_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmacc_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmacc_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmacc_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmacc_vv_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmacc_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmacc_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmacc_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmacc_vv_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmacc_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmacc_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmacc_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmacc_vv_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmacc_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmacc_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmacc_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmacc_vv_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmacc_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmacc_vf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmacc_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmacc_vv_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmacc_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmacc_vf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmacc_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmacc_vv_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmacc_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmacc_vf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmacc_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmacc_vv_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmacc_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmacc_vf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmacc_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmacc_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmacc_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmacc_vf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmacc_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmacc_vv_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmacc_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmacc_vf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmacc_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmacc_vv_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmacc_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmacc_vf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmacc_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmacc_vv_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmacc_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmacc_vf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmacc_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmacc_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmacc_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmacc_vf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmacc_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfnmacc\.[ivxfswum.]+\s+} 240 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfnmadd.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfnmadd.c index 27ab5deb6..0be76e582 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfnmadd.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfnmadd.c @@ -126,364 +126,364 @@ vfloat64m8_t test_vfnmadd_vf_f64m8_tu(vfloat64m8_t vd, float64_t rs1, vfloat64m8 return __riscv_vfnmadd_tu(vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmadd_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfnmadd_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmadd_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfnmadd_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmadd_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfnmadd_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmadd_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfnmadd_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmadd_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfnmadd_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmadd_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfnmadd_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmadd_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfnmadd_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmadd_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfnmadd_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmadd_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfnmadd_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmadd_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfnmadd_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmadd_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfnmadd_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmadd_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfnmadd_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmadd_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfnmadd_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmadd_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfnmadd_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmadd_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfnmadd_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmadd_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfnmadd_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmadd_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfnmadd_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmadd_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfnmadd_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmadd_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfnmadd_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmadd_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfnmadd_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmadd_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfnmadd_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmadd_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfnmadd_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmadd_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfnmadd_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmadd_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfnmadd_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmadd_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfnmadd_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmadd_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfnmadd_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmadd_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfnmadd_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmadd_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfnmadd_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmadd_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfnmadd_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmadd_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfnmadd_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmadd_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfnmadd_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmadd_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfnmadd_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmadd_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfnmadd_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmadd_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfnmadd_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmadd_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfnmadd_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmadd_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfnmadd_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmadd_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfnmadd_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmadd_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfnmadd_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmadd_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfnmadd_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmadd_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfnmadd_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmadd_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfnmadd_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmadd_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfnmadd_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmadd_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfnmadd_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmadd_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfnmadd_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmadd_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfnmadd_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmadd_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfnmadd_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmadd_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfnmadd_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmadd_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfnmadd_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmadd_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfnmadd_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmadd_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfnmadd_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmadd_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfnmadd_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmadd_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfnmadd_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmadd_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfnmadd_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmadd_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfnmadd_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmadd_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfnmadd_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmadd_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfnmadd_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmadd_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfnmadd_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmadd_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfnmadd_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmadd_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfnmadd_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmadd_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfnmadd_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmadd_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfnmadd_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmadd_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfnmadd_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmadd_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfnmadd_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmadd_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfnmadd_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmadd_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfnmadd_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmadd_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfnmadd_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmadd_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfnmadd_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmadd_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfnmadd_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmadd_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfnmadd_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmadd_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfnmadd_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmadd_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfnmadd_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmadd_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfnmadd_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmadd_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfnmadd_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmadd_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfnmadd_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmadd_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfnmadd_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmadd_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfnmadd_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmadd_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfnmadd_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmadd_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfnmadd_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmadd_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfnmadd_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmadd_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfnmadd_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmadd_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfnmadd_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmadd_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfnmadd_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmadd_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfnmadd_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmadd_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfnmadd_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmadd_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfnmadd_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmadd_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfnmadd_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmadd_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfnmadd_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmadd_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfnmadd_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmadd_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfnmadd_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmadd_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfnmadd_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, rs1, vs2, vl); } vfloat16mf4_t test_vfnmadd_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -606,364 +606,364 @@ vfloat64m8_t test_vfnmadd_vf_f64m8_rm_tu(vfloat64m8_t vd, float64_t rs1, vfloat6 return __riscv_vfnmadd_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmadd_vv_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmadd_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmadd_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmadd_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmadd_vv_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmadd_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmadd_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmadd_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmadd_vv_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmadd_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmadd_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmadd_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmadd_vv_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmadd_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmadd_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmadd_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmadd_vv_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmadd_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmadd_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmadd_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmadd_vv_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmadd_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmadd_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmadd_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmadd_vv_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmadd_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmadd_vf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmadd_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmadd_vv_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmadd_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmadd_vf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmadd_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmadd_vv_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmadd_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmadd_vf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmadd_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmadd_vv_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmadd_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmadd_vf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmadd_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmadd_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmadd_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmadd_vf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmadd_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmadd_vv_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmadd_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmadd_vf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmadd_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmadd_vv_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmadd_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmadd_vf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmadd_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmadd_vv_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmadd_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmadd_vf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmadd_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmadd_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmadd_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmadd_vf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmadd_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmadd_vv_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmadd_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmadd_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmadd_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmadd_vv_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmadd_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmadd_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmadd_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmadd_vv_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmadd_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmadd_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmadd_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmadd_vv_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmadd_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmadd_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmadd_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmadd_vv_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmadd_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmadd_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmadd_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmadd_vv_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmadd_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmadd_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmadd_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmadd_vv_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmadd_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmadd_vf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmadd_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmadd_vv_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmadd_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmadd_vf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmadd_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmadd_vv_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmadd_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmadd_vf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmadd_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmadd_vv_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmadd_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmadd_vf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmadd_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmadd_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmadd_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmadd_vf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmadd_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmadd_vv_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmadd_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmadd_vf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmadd_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmadd_vv_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmadd_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmadd_vf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmadd_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmadd_vv_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmadd_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmadd_vf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmadd_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmadd_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmadd_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmadd_vf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmadd_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmadd_vv_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmadd_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmadd_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmadd_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmadd_vv_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmadd_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmadd_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmadd_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmadd_vv_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmadd_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmadd_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmadd_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmadd_vv_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmadd_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmadd_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmadd_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmadd_vv_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmadd_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmadd_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmadd_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmadd_vv_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmadd_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmadd_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmadd_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmadd_vv_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmadd_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmadd_vf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmadd_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmadd_vv_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmadd_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmadd_vf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmadd_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmadd_vv_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmadd_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmadd_vf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmadd_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmadd_vv_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmadd_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmadd_vf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmadd_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmadd_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmadd_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmadd_vf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmadd_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmadd_vv_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmadd_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmadd_vf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmadd_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmadd_vv_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmadd_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmadd_vf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmadd_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmadd_vv_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmadd_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmadd_vf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmadd_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmadd_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmadd_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmadd_vf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmadd_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfnmadd\.[ivxfswum.]+\s+} 240 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfnmsac.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfnmsac.c index 1733c2cdc..f4959ffe7 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfnmsac.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfnmsac.c @@ -126,364 +126,364 @@ vfloat64m8_t test_vfnmsac_vf_f64m8_tu(vfloat64m8_t vd, float64_t rs1, vfloat64m8 return __riscv_vfnmsac_tu(vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmsac_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfnmsac_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmsac_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfnmsac_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmsac_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfnmsac_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmsac_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfnmsac_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmsac_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfnmsac_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmsac_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfnmsac_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmsac_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfnmsac_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmsac_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfnmsac_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmsac_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfnmsac_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmsac_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfnmsac_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmsac_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfnmsac_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmsac_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfnmsac_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmsac_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfnmsac_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmsac_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfnmsac_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmsac_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfnmsac_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmsac_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfnmsac_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmsac_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfnmsac_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmsac_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfnmsac_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmsac_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfnmsac_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmsac_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfnmsac_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmsac_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfnmsac_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmsac_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfnmsac_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmsac_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfnmsac_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmsac_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfnmsac_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmsac_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfnmsac_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmsac_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfnmsac_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmsac_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfnmsac_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmsac_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfnmsac_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmsac_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfnmsac_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmsac_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfnmsac_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmsac_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfnmsac_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmsac_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfnmsac_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmsac_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfnmsac_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmsac_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfnmsac_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmsac_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfnmsac_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmsac_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfnmsac_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmsac_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfnmsac_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmsac_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfnmsac_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmsac_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfnmsac_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmsac_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfnmsac_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmsac_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfnmsac_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmsac_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfnmsac_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmsac_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfnmsac_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmsac_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfnmsac_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmsac_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfnmsac_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmsac_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfnmsac_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmsac_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfnmsac_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmsac_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfnmsac_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmsac_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfnmsac_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmsac_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfnmsac_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmsac_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfnmsac_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmsac_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfnmsac_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmsac_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfnmsac_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmsac_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfnmsac_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmsac_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfnmsac_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmsac_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfnmsac_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmsac_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfnmsac_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmsac_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfnmsac_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmsac_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfnmsac_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmsac_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfnmsac_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmsac_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfnmsac_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmsac_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfnmsac_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmsac_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfnmsac_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmsac_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfnmsac_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmsac_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfnmsac_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmsac_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfnmsac_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmsac_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfnmsac_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmsac_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfnmsac_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmsac_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfnmsac_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmsac_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfnmsac_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmsac_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfnmsac_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmsac_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfnmsac_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmsac_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfnmsac_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmsac_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfnmsac_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmsac_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfnmsac_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmsac_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfnmsac_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmsac_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfnmsac_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmsac_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfnmsac_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmsac_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfnmsac_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmsac_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfnmsac_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmsac_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfnmsac_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmsac_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfnmsac_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmsac_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfnmsac_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmsac_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfnmsac_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmsac_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfnmsac_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmsac_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfnmsac_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmsac_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfnmsac_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmsac_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfnmsac_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmsac_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfnmsac_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmsac_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfnmsac_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, rs1, vs2, vl); } vfloat16mf4_t test_vfnmsac_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -606,364 +606,364 @@ vfloat64m8_t test_vfnmsac_vf_f64m8_rm_tu(vfloat64m8_t vd, float64_t rs1, vfloat6 return __riscv_vfnmsac_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmsac_vv_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmsac_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmsac_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmsac_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmsac_vv_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmsac_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmsac_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmsac_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmsac_vv_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmsac_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmsac_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmsac_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmsac_vv_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmsac_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmsac_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmsac_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmsac_vv_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmsac_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmsac_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmsac_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmsac_vv_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmsac_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmsac_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmsac_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmsac_vv_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmsac_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmsac_vf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmsac_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmsac_vv_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmsac_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmsac_vf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmsac_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmsac_vv_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmsac_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmsac_vf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmsac_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmsac_vv_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmsac_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmsac_vf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmsac_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmsac_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmsac_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmsac_vf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmsac_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmsac_vv_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmsac_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmsac_vf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmsac_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmsac_vv_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmsac_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmsac_vf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmsac_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmsac_vv_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmsac_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmsac_vf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmsac_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmsac_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmsac_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmsac_vf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmsac_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmsac_vv_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmsac_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmsac_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmsac_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmsac_vv_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmsac_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmsac_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmsac_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmsac_vv_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmsac_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmsac_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmsac_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmsac_vv_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmsac_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmsac_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmsac_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmsac_vv_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmsac_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmsac_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmsac_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmsac_vv_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmsac_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmsac_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmsac_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmsac_vv_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmsac_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmsac_vf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmsac_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmsac_vv_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmsac_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmsac_vf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmsac_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmsac_vv_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmsac_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmsac_vf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmsac_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmsac_vv_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmsac_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmsac_vf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmsac_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmsac_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmsac_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmsac_vf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmsac_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmsac_vv_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmsac_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmsac_vf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmsac_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmsac_vv_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmsac_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmsac_vf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmsac_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmsac_vv_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmsac_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmsac_vf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmsac_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmsac_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmsac_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmsac_vf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmsac_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmsac_vv_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmsac_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmsac_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmsac_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmsac_vv_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmsac_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmsac_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmsac_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmsac_vv_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmsac_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmsac_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmsac_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmsac_vv_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmsac_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmsac_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmsac_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmsac_vv_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmsac_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmsac_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmsac_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmsac_vv_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmsac_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmsac_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmsac_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmsac_vv_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmsac_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmsac_vf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmsac_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmsac_vv_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmsac_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmsac_vf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmsac_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmsac_vv_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmsac_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmsac_vf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmsac_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmsac_vv_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmsac_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmsac_vf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmsac_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmsac_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmsac_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmsac_vf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmsac_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmsac_vv_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmsac_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmsac_vf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmsac_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmsac_vv_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmsac_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmsac_vf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmsac_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmsac_vv_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmsac_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmsac_vf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmsac_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmsac_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmsac_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmsac_vf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmsac_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfnmsac\.[ivxfswum.]+\s+} 240 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfnmsub.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfnmsub.c index b2906ef7e..a0007b650 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfnmsub.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfnmsub.c @@ -126,364 +126,364 @@ vfloat64m8_t test_vfnmsub_vf_f64m8_tu(vfloat64m8_t vd, float64_t rs1, vfloat64m8 return __riscv_vfnmsub_tu(vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmsub_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfnmsub_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmsub_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfnmsub_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmsub_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfnmsub_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmsub_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfnmsub_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmsub_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfnmsub_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmsub_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfnmsub_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmsub_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfnmsub_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmsub_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfnmsub_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmsub_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfnmsub_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmsub_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfnmsub_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmsub_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfnmsub_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmsub_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfnmsub_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmsub_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfnmsub_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmsub_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfnmsub_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmsub_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfnmsub_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmsub_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfnmsub_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmsub_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfnmsub_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmsub_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfnmsub_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmsub_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfnmsub_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmsub_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfnmsub_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmsub_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfnmsub_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmsub_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfnmsub_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmsub_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfnmsub_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmsub_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfnmsub_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmsub_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfnmsub_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmsub_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfnmsub_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmsub_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfnmsub_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmsub_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfnmsub_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmsub_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfnmsub_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmsub_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfnmsub_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmsub_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfnmsub_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmsub_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfnmsub_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmsub_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfnmsub_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmsub_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfnmsub_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmsub_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfnmsub_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmsub_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfnmsub_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmsub_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfnmsub_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmsub_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfnmsub_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmsub_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfnmsub_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmsub_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfnmsub_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmsub_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfnmsub_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmsub_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfnmsub_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmsub_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfnmsub_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmsub_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfnmsub_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmsub_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfnmsub_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmsub_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfnmsub_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmsub_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfnmsub_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmsub_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfnmsub_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmsub_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfnmsub_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmsub_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfnmsub_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmsub_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfnmsub_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmsub_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfnmsub_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmsub_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfnmsub_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmsub_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfnmsub_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmsub_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfnmsub_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmsub_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfnmsub_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmsub_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfnmsub_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmsub_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfnmsub_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmsub_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfnmsub_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmsub_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfnmsub_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmsub_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl); +vfloat16mf4_t test_vfnmsub_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmsub_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl); +vfloat16mf4_t test_vfnmsub_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmsub_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl); +vfloat16mf2_t test_vfnmsub_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmsub_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl); +vfloat16mf2_t test_vfnmsub_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmsub_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl); +vfloat16m1_t test_vfnmsub_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmsub_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl); +vfloat16m1_t test_vfnmsub_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmsub_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl); +vfloat16m2_t test_vfnmsub_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmsub_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl); +vfloat16m2_t test_vfnmsub_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmsub_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl); +vfloat16m4_t test_vfnmsub_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmsub_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl); +vfloat16m4_t test_vfnmsub_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmsub_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl); +vfloat16m8_t test_vfnmsub_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmsub_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl); +vfloat16m8_t test_vfnmsub_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmsub_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfnmsub_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmsub_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl); +vfloat32mf2_t test_vfnmsub_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmsub_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfnmsub_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmsub_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl); +vfloat32m1_t test_vfnmsub_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmsub_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfnmsub_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmsub_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl); +vfloat32m2_t test_vfnmsub_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmsub_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfnmsub_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmsub_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl); +vfloat32m4_t test_vfnmsub_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmsub_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfnmsub_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmsub_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl); +vfloat32m8_t test_vfnmsub_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmsub_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfnmsub_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmsub_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl); +vfloat64m1_t test_vfnmsub_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmsub_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfnmsub_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmsub_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl); +vfloat64m2_t test_vfnmsub_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmsub_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfnmsub_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmsub_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl); +vfloat64m4_t test_vfnmsub_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmsub_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfnmsub_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmsub_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl); +vfloat64m8_t test_vfnmsub_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, rs1, vs2, vl); } vfloat16mf4_t test_vfnmsub_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -606,364 +606,364 @@ vfloat64m8_t test_vfnmsub_vf_f64m8_rm_tu(vfloat64m8_t vd, float64_t rs1, vfloat6 return __riscv_vfnmsub_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmsub_vv_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmsub_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmsub_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmsub_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmsub_vv_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmsub_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmsub_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmsub_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmsub_vv_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmsub_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmsub_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmsub_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmsub_vv_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmsub_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmsub_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmsub_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmsub_vv_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmsub_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmsub_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmsub_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmsub_vv_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmsub_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmsub_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmsub_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmsub_vv_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmsub_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmsub_vf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmsub_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmsub_vv_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmsub_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmsub_vf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmsub_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmsub_vv_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmsub_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmsub_vf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmsub_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmsub_vv_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmsub_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmsub_vf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmsub_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmsub_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmsub_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmsub_vf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmsub_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmsub_vv_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmsub_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmsub_vf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmsub_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmsub_vv_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmsub_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmsub_vf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmsub_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmsub_vv_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmsub_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmsub_vf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmsub_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmsub_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmsub_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmsub_vf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmsub_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmsub_vv_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmsub_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmsub_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmsub_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmsub_vv_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmsub_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmsub_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmsub_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmsub_vv_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmsub_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmsub_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmsub_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmsub_vv_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmsub_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmsub_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmsub_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmsub_vv_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmsub_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmsub_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmsub_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmsub_vv_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmsub_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmsub_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmsub_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmsub_vv_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmsub_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmsub_vf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmsub_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmsub_vv_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmsub_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmsub_vf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmsub_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmsub_vv_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmsub_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmsub_vf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmsub_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmsub_vv_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmsub_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmsub_vf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmsub_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmsub_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmsub_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmsub_vf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmsub_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmsub_vv_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmsub_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmsub_vf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmsub_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmsub_vv_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmsub_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmsub_vf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmsub_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmsub_vv_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmsub_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmsub_vf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmsub_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmsub_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmsub_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmsub_vf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmsub_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmsub_vv_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmsub_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmsub_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmsub_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmsub_vv_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmsub_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmsub_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmsub_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmsub_vv_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmsub_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmsub_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmsub_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmsub_vv_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmsub_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmsub_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmsub_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmsub_vv_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmsub_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmsub_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmsub_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmsub_vv_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmsub_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmsub_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmsub_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmsub_vv_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmsub_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmsub_vf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmsub_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmsub_vv_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmsub_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmsub_vf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmsub_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmsub_vv_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmsub_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmsub_vf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmsub_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmsub_vv_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmsub_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmsub_vf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmsub_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmsub_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmsub_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmsub_vf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmsub_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmsub_vv_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmsub_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmsub_vf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmsub_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmsub_vv_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmsub_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmsub_vf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmsub_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmsub_vv_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmsub_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmsub_vf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmsub_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmsub_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmsub_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmsub_vf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmsub_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfnmsub\.[ivxfswum.]+\s+} 240 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfrdiv.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfrdiv.c index 5212bbd93..cf738ac90 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfrdiv.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfrdiv.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfrdiv_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_tu(maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfrdiv_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_tu(vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfrdiv_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_tu(maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfrdiv_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_tu(vd, vs2, rs1, vl); } -vfloat16m1_t test_vfrdiv_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_tu(maskedoff, op1, op2, vl); +vfloat16m1_t test_vfrdiv_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_tu(vd, vs2, rs1, vl); } -vfloat16m2_t test_vfrdiv_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_tu(maskedoff, op1, op2, vl); +vfloat16m2_t test_vfrdiv_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_tu(vd, vs2, rs1, vl); } -vfloat16m4_t test_vfrdiv_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_tu(maskedoff, op1, op2, vl); +vfloat16m4_t test_vfrdiv_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_tu(vd, vs2, rs1, vl); } -vfloat16m8_t test_vfrdiv_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_tu(maskedoff, op1, op2, vl); +vfloat16m8_t test_vfrdiv_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfrdiv_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfrdiv_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfrdiv_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfrdiv_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfrdiv_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfrdiv_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfrdiv_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfrdiv_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfrdiv_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfrdiv_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfrdiv_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfrdiv_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfrdiv_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfrdiv_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfrdiv_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfrdiv_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfrdiv_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfrdiv_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_tu(vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfrdiv_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfrdiv_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfrdiv_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfrdiv_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_tum(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfrdiv_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_tum(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfrdiv_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_tum(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfrdiv_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_tum(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfrdiv_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_tum(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfrdiv_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_tum(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfrdiv_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_tum(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfrdiv_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_tum(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfrdiv_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfrdiv_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfrdiv_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfrdiv_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfrdiv_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfrdiv_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfrdiv_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfrdiv_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfrdiv_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfrdiv_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfrdiv_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfrdiv_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfrdiv_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfrdiv_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfrdiv_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfrdiv_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfrdiv_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfrdiv_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfrdiv_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfrdiv_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfrdiv_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfrdiv_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfrdiv_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfrdiv_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfrdiv_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfrdiv_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfrdiv_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfrdiv_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfrdiv_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfrdiv_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfrdiv_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfrdiv_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfrdiv_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfrdiv_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfrdiv_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfrdiv_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfrdiv_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfrdiv_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfrdiv_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfrdiv_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfrdiv_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfrdiv_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfrdiv_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfrdiv_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfrdiv_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfrdiv_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfrdiv_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfrdiv_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfrdiv_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfrdiv_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfrdiv_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfrdiv_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfrdiv_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_mu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfrdiv_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_mu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfrdiv_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_mu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfrdiv_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_mu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfrdiv_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_mu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfrdiv_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_mu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfrdiv_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_mu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfrdiv_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_mu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfrdiv_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfrdiv_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfrdiv_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfrdiv_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfrdiv_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfrdiv_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfrdiv_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfrdiv_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfrdiv_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfrdiv_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfrdiv_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfrdiv_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfrdiv_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfrdiv_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfrdiv_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfrdiv_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfrdiv_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfrdiv_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfrdiv_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfrdiv_vf_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfrdiv_vf_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfrdiv_vf_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfrdiv_vf_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfrdiv_vf_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfrdiv_vf_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrdiv_vf_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfrdiv_vf_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrdiv_vf_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfrdiv_vf_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrdiv_vf_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfrdiv_vf_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrdiv_vf_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfrdiv_vf_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfrdiv_vf_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfrdiv_vf_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrdiv_vf_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfrdiv_vf_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrdiv_vf_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfrdiv_vf_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrdiv_vf_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfrdiv_vf_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrdiv_vf_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfrdiv_vf_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrdiv_vf_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfrdiv_vf_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrdiv_vf_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfrdiv_vf_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrdiv_vf_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfrdiv_vf_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfrdiv_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfrdiv_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfrdiv_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfrdiv_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfrdiv_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfrdiv_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrdiv_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfrdiv_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrdiv_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfrdiv_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrdiv_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfrdiv_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrdiv_vf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfrdiv_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfrdiv_vf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfrdiv_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrdiv_vf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfrdiv_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrdiv_vf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfrdiv_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrdiv_vf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfrdiv_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrdiv_vf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfrdiv_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrdiv_vf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfrdiv_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrdiv_vf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfrdiv_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrdiv_vf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfrdiv_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfrdiv_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfrdiv_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfrdiv_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfrdiv_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfrdiv_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfrdiv_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrdiv_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfrdiv_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrdiv_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfrdiv_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrdiv_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfrdiv_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrdiv_vf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfrdiv_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfrdiv_vf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfrdiv_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrdiv_vf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfrdiv_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrdiv_vf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfrdiv_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrdiv_vf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfrdiv_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrdiv_vf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfrdiv_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrdiv_vf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfrdiv_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrdiv_vf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfrdiv_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrdiv_vf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfrdiv_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfrdiv_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfrdiv_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfrdiv_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfrdiv_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfrdiv_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfrdiv_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrdiv_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfrdiv_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrdiv_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfrdiv_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrdiv_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrdiv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfrdiv_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrdiv_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrdiv_vf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfrdiv_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfrdiv_vf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfrdiv_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrdiv_vf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfrdiv_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrdiv_vf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfrdiv_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrdiv_vf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrdiv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfrdiv_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrdiv_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrdiv_vf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfrdiv_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrdiv_vf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfrdiv_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrdiv_vf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfrdiv_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrdiv_vf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrdiv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfrdiv_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrdiv_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfrdiv\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfrec7.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfrec7.c index f0ddc4512..6fe3544cf 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfrec7.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfrec7.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfrec7_v_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfrec7_tu(maskedoff, op1, vl); +vfloat16mf4_t test_vfrec7_v_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfrec7_tu(vd, vs2, vl); } -vfloat16mf2_t test_vfrec7_v_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfrec7_tu(maskedoff, op1, vl); +vfloat16mf2_t test_vfrec7_v_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfrec7_tu(vd, vs2, vl); } -vfloat16m1_t test_vfrec7_v_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfrec7_tu(maskedoff, op1, vl); +vfloat16m1_t test_vfrec7_v_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfrec7_tu(vd, vs2, vl); } -vfloat16m2_t test_vfrec7_v_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfrec7_tu(maskedoff, op1, vl); +vfloat16m2_t test_vfrec7_v_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfrec7_tu(vd, vs2, vl); } -vfloat16m4_t test_vfrec7_v_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfrec7_tu(maskedoff, op1, vl); +vfloat16m4_t test_vfrec7_v_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfrec7_tu(vd, vs2, vl); } -vfloat16m8_t test_vfrec7_v_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfrec7_tu(maskedoff, op1, vl); +vfloat16m8_t test_vfrec7_v_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfrec7_tu(vd, vs2, vl); } -vfloat32mf2_t test_vfrec7_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfrec7_tu(maskedoff, op1, vl); +vfloat32mf2_t test_vfrec7_v_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfrec7_tu(vd, vs2, vl); } -vfloat32m1_t test_vfrec7_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfrec7_tu(maskedoff, op1, vl); +vfloat32m1_t test_vfrec7_v_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfrec7_tu(vd, vs2, vl); } -vfloat32m2_t test_vfrec7_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfrec7_tu(maskedoff, op1, vl); +vfloat32m2_t test_vfrec7_v_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfrec7_tu(vd, vs2, vl); } -vfloat32m4_t test_vfrec7_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfrec7_tu(maskedoff, op1, vl); +vfloat32m4_t test_vfrec7_v_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfrec7_tu(vd, vs2, vl); } -vfloat32m8_t test_vfrec7_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfrec7_tu(maskedoff, op1, vl); +vfloat32m8_t test_vfrec7_v_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfrec7_tu(vd, vs2, vl); } -vfloat64m1_t test_vfrec7_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfrec7_tu(maskedoff, op1, vl); +vfloat64m1_t test_vfrec7_v_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfrec7_tu(vd, vs2, vl); } -vfloat64m2_t test_vfrec7_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfrec7_tu(maskedoff, op1, vl); +vfloat64m2_t test_vfrec7_v_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfrec7_tu(vd, vs2, vl); } -vfloat64m4_t test_vfrec7_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfrec7_tu(maskedoff, op1, vl); +vfloat64m4_t test_vfrec7_v_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfrec7_tu(vd, vs2, vl); } -vfloat64m8_t test_vfrec7_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfrec7_tu(maskedoff, op1, vl); +vfloat64m8_t test_vfrec7_v_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfrec7_tu(vd, vs2, vl); } -vfloat16mf4_t test_vfrec7_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfrec7_tum(mask, maskedoff, op1, vl); +vfloat16mf4_t test_vfrec7_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfrec7_tum(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfrec7_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfrec7_tum(mask, maskedoff, op1, vl); +vfloat16mf2_t test_vfrec7_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfrec7_tum(vm, vd, vs2, vl); } -vfloat16m1_t test_vfrec7_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfrec7_tum(mask, maskedoff, op1, vl); +vfloat16m1_t test_vfrec7_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfrec7_tum(vm, vd, vs2, vl); } -vfloat16m2_t test_vfrec7_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfrec7_tum(mask, maskedoff, op1, vl); +vfloat16m2_t test_vfrec7_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfrec7_tum(vm, vd, vs2, vl); } -vfloat16m4_t test_vfrec7_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfrec7_tum(mask, maskedoff, op1, vl); +vfloat16m4_t test_vfrec7_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfrec7_tum(vm, vd, vs2, vl); } -vfloat16m8_t test_vfrec7_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfrec7_tum(mask, maskedoff, op1, vl); +vfloat16m8_t test_vfrec7_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfrec7_tum(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfrec7_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfrec7_tum(mask, maskedoff, op1, vl); +vfloat32mf2_t test_vfrec7_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfrec7_tum(vm, vd, vs2, vl); } -vfloat32m1_t test_vfrec7_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfrec7_tum(mask, maskedoff, op1, vl); +vfloat32m1_t test_vfrec7_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfrec7_tum(vm, vd, vs2, vl); } -vfloat32m2_t test_vfrec7_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfrec7_tum(mask, maskedoff, op1, vl); +vfloat32m2_t test_vfrec7_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfrec7_tum(vm, vd, vs2, vl); } -vfloat32m4_t test_vfrec7_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfrec7_tum(mask, maskedoff, op1, vl); +vfloat32m4_t test_vfrec7_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfrec7_tum(vm, vd, vs2, vl); } -vfloat32m8_t test_vfrec7_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfrec7_tum(mask, maskedoff, op1, vl); +vfloat32m8_t test_vfrec7_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfrec7_tum(vm, vd, vs2, vl); } -vfloat64m1_t test_vfrec7_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfrec7_tum(mask, maskedoff, op1, vl); +vfloat64m1_t test_vfrec7_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfrec7_tum(vm, vd, vs2, vl); } -vfloat64m2_t test_vfrec7_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfrec7_tum(mask, maskedoff, op1, vl); +vfloat64m2_t test_vfrec7_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfrec7_tum(vm, vd, vs2, vl); } -vfloat64m4_t test_vfrec7_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfrec7_tum(mask, maskedoff, op1, vl); +vfloat64m4_t test_vfrec7_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfrec7_tum(vm, vd, vs2, vl); } -vfloat64m8_t test_vfrec7_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfrec7_tum(mask, maskedoff, op1, vl); +vfloat64m8_t test_vfrec7_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfrec7_tum(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfrec7_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl); +vfloat16mf4_t test_vfrec7_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfrec7_tumu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfrec7_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl); +vfloat16mf2_t test_vfrec7_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfrec7_tumu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfrec7_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl); +vfloat16m1_t test_vfrec7_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfrec7_tumu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfrec7_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl); +vfloat16m2_t test_vfrec7_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfrec7_tumu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfrec7_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl); +vfloat16m4_t test_vfrec7_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfrec7_tumu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfrec7_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl); +vfloat16m8_t test_vfrec7_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfrec7_tumu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfrec7_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl); +vfloat32mf2_t test_vfrec7_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfrec7_tumu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfrec7_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl); +vfloat32m1_t test_vfrec7_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfrec7_tumu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfrec7_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl); +vfloat32m2_t test_vfrec7_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfrec7_tumu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfrec7_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl); +vfloat32m4_t test_vfrec7_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfrec7_tumu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfrec7_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl); +vfloat32m8_t test_vfrec7_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfrec7_tumu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfrec7_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl); +vfloat64m1_t test_vfrec7_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfrec7_tumu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfrec7_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl); +vfloat64m2_t test_vfrec7_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfrec7_tumu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfrec7_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl); +vfloat64m4_t test_vfrec7_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfrec7_tumu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfrec7_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl); +vfloat64m8_t test_vfrec7_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfrec7_tumu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfrec7_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfrec7_mu(mask, maskedoff, op1, vl); +vfloat16mf4_t test_vfrec7_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfrec7_mu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfrec7_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfrec7_mu(mask, maskedoff, op1, vl); +vfloat16mf2_t test_vfrec7_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfrec7_mu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfrec7_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfrec7_mu(mask, maskedoff, op1, vl); +vfloat16m1_t test_vfrec7_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfrec7_mu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfrec7_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfrec7_mu(mask, maskedoff, op1, vl); +vfloat16m2_t test_vfrec7_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfrec7_mu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfrec7_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfrec7_mu(mask, maskedoff, op1, vl); +vfloat16m4_t test_vfrec7_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfrec7_mu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfrec7_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfrec7_mu(mask, maskedoff, op1, vl); +vfloat16m8_t test_vfrec7_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfrec7_mu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfrec7_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfrec7_mu(mask, maskedoff, op1, vl); +vfloat32mf2_t test_vfrec7_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfrec7_mu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfrec7_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfrec7_mu(mask, maskedoff, op1, vl); +vfloat32m1_t test_vfrec7_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfrec7_mu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfrec7_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfrec7_mu(mask, maskedoff, op1, vl); +vfloat32m2_t test_vfrec7_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfrec7_mu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfrec7_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfrec7_mu(mask, maskedoff, op1, vl); +vfloat32m4_t test_vfrec7_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfrec7_mu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfrec7_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfrec7_mu(mask, maskedoff, op1, vl); +vfloat32m8_t test_vfrec7_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfrec7_mu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfrec7_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfrec7_mu(mask, maskedoff, op1, vl); +vfloat64m1_t test_vfrec7_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfrec7_mu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfrec7_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfrec7_mu(mask, maskedoff, op1, vl); +vfloat64m2_t test_vfrec7_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfrec7_mu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfrec7_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfrec7_mu(mask, maskedoff, op1, vl); +vfloat64m4_t test_vfrec7_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfrec7_mu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfrec7_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfrec7_mu(mask, maskedoff, op1, vl); +vfloat64m8_t test_vfrec7_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfrec7_mu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfrec7_v_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfrec7_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfrec7_v_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfrec7_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfrec7_v_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfrec7_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfrec7_v_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfrec7_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfrec7_v_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfrec7_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfrec7_v_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfrec7_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrec7_v_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfrec7_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfrec7_v_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfrec7_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrec7_v_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfrec7_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfrec7_v_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfrec7_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrec7_v_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfrec7_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfrec7_v_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfrec7_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrec7_v_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfrec7_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfrec7_v_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfrec7_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfrec7_v_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfrec7_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfrec7_v_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfrec7_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrec7_v_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfrec7_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfrec7_v_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfrec7_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrec7_v_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfrec7_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfrec7_v_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfrec7_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrec7_v_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfrec7_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfrec7_v_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfrec7_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrec7_v_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfrec7_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfrec7_v_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfrec7_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrec7_v_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfrec7_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfrec7_v_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfrec7_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrec7_v_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfrec7_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfrec7_v_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfrec7_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrec7_v_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfrec7_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfrec7_v_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfrec7_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfrec7_v_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfrec7_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfrec7_v_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfrec7_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfrec7_v_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfrec7_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfrec7_v_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfrec7_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfrec7_v_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfrec7_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfrec7_v_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfrec7_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrec7_v_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfrec7_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfrec7_v_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfrec7_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrec7_v_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfrec7_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfrec7_v_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfrec7_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrec7_v_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfrec7_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfrec7_v_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfrec7_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrec7_v_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfrec7_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfrec7_v_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfrec7_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfrec7_v_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfrec7_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfrec7_v_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfrec7_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrec7_v_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfrec7_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfrec7_v_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfrec7_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrec7_v_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfrec7_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfrec7_v_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfrec7_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrec7_v_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfrec7_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfrec7_v_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfrec7_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrec7_v_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfrec7_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfrec7_v_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfrec7_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrec7_v_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfrec7_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfrec7_v_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfrec7_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrec7_v_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfrec7_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfrec7_v_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfrec7_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrec7_v_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfrec7_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfrec7_v_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfrec7_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfrec7_v_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfrec7_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfrec7_v_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfrec7_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfrec7_v_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfrec7_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfrec7_v_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfrec7_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfrec7_v_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfrec7_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfrec7_v_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfrec7_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrec7_v_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfrec7_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfrec7_v_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfrec7_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrec7_v_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfrec7_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfrec7_v_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfrec7_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrec7_v_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfrec7_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfrec7_v_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfrec7_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrec7_v_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfrec7_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfrec7_v_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfrec7_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfrec7_v_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfrec7_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfrec7_v_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfrec7_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrec7_v_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfrec7_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfrec7_v_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfrec7_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrec7_v_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfrec7_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfrec7_v_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfrec7_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrec7_v_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfrec7_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfrec7_v_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfrec7_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrec7_v_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfrec7_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfrec7_v_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfrec7_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrec7_v_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfrec7_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfrec7_v_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfrec7_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrec7_v_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfrec7_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfrec7_v_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfrec7_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrec7_v_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfrec7_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfrec7_v_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfrec7_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfrec7_v_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfrec7_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfrec7_v_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfrec7_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfrec7_v_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfrec7_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfrec7_v_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfrec7_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfrec7_v_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfrec7_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfrec7_v_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfrec7_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrec7_v_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfrec7_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfrec7_v_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfrec7_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrec7_v_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfrec7_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfrec7_v_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfrec7_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrec7_v_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfrec7_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfrec7_v_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfrec7_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrec7_v_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfrec7_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfrec7_v_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfrec7_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfrec7_v_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfrec7_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfrec7_v_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfrec7_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrec7_v_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfrec7_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfrec7_v_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfrec7_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrec7_v_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfrec7_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfrec7_v_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfrec7_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrec7_v_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfrec7_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfrec7_v_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfrec7_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrec7_v_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfrec7_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfrec7_v_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfrec7_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrec7_v_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfrec7_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfrec7_v_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfrec7_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrec7_v_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfrec7_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfrec7_v_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfrec7_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrec7_v_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfrec7_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfrec7_v_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfrec7_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfrec7\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfredmax.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfredmax.c index fc59b0119..545bc89ef 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfredmax.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfredmax.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_tu(vfloat16m1_t vd, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_tu(vfloat16m1_t vd, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_tu(vfloat16m1_t vd, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_tu(vfloat16m1_t vd, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_tu(vfloat16m1_t vd, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tu(vfloat32m1_t vd, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmax_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmax_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_tu(vfloat32m1_t vd, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmax_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_tu(vfloat32m1_t vd, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmax_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_tu(vfloat32m1_t vd, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmax_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmax_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmax_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmax_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_tu(vfloat64m1_t vd, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmax_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmax_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_tu(vfloat64m1_t vd, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmax_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmax_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_tu(vfloat64m1_t vd, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmax_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_tum(vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_tum(vbool64_t vm, vfloat16m1_t vd, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_tum(vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_tum(vbool32_t vm, vfloat16m1_t vd, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_tum(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_tum(vbool8_t vm, vfloat16m1_t vd, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_tum(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_tum(vbool4_t vm, vfloat16m1_t vd, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_tum(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_tum(vbool2_t vm, vfloat16m1_t vd, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmax_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tum(vbool64_t vm, vfloat32m1_t vd, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmax_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmax_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_tum(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_tum(vbool16_t vm, vfloat32m1_t vd, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmax_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_tum(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_tum(vbool8_t vm, vfloat32m1_t vd, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmax_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_tum(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_tum(vbool4_t vm, vfloat32m1_t vd, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmax_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmax_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmax_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_tum(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmax_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_tum(vbool32_t vm, vfloat64m1_t vd, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmax_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_tum(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmax_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_tum(vbool16_t vm, vfloat64m1_t vd, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmax_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_tum(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmax_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_tum(vbool8_t vm, vfloat64m1_t vd, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmax_tum(vm, vd, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfredmax\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfredmin.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfredmin.c index fcb8bc01d..5386c761e 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfredmin.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfredmin.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_tu(vfloat16m1_t vd, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_tu(vfloat16m1_t vd, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_tu(vfloat16m1_t vd, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_tu(vfloat16m1_t vd, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_tu(vfloat16m1_t vd, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tu(vfloat32m1_t vd, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmin_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmin_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_tu(vfloat32m1_t vd, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmin_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_tu(vfloat32m1_t vd, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmin_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_tu(vfloat32m1_t vd, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmin_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmin_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmin_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmin_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_tu(vfloat64m1_t vd, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmin_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmin_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_tu(vfloat64m1_t vd, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmin_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmin_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_tu(vfloat64m1_t vd, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmin_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_tum(vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_tum(vbool64_t vm, vfloat16m1_t vd, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_tum(vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_tum(vbool32_t vm, vfloat16m1_t vd, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_tum(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_tum(vbool8_t vm, vfloat16m1_t vd, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_tum(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_tum(vbool4_t vm, vfloat16m1_t vd, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_tum(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_tum(vbool2_t vm, vfloat16m1_t vd, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredmin_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tum(vbool64_t vm, vfloat32m1_t vd, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmin_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmin_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_tum(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_tum(vbool16_t vm, vfloat32m1_t vd, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmin_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_tum(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_tum(vbool8_t vm, vfloat32m1_t vd, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmin_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_tum(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_tum(vbool4_t vm, vfloat32m1_t vd, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredmin_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmin_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmin_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_tum(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmin_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_tum(vbool32_t vm, vfloat64m1_t vd, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmin_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_tum(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmin_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_tum(vbool16_t vm, vfloat64m1_t vd, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmin_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_tum(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmin_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_tum(vbool8_t vm, vfloat64m1_t vd, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredmin_tum(vm, vd, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfredmin\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfredosum.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfredosum.c index a04aaadf6..ed5f24b2c 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfredosum.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfredosum.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_tu(vfloat16m1_t vd, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_tu(vfloat16m1_t vd, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_tu(vfloat16m1_t vd, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_tu(vfloat16m1_t vd, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_tu(vfloat16m1_t vd, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tu(vfloat32m1_t vd, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_tu(vfloat32m1_t vd, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_tu(vfloat32m1_t vd, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_tu(vfloat32m1_t vd, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_tu(vfloat64m1_t vd, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_tu(vfloat64m1_t vd, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_tu(vfloat64m1_t vd, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_tum(vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_tum(vbool64_t vm, vfloat16m1_t vd, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_tum(vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_tum(vbool32_t vm, vfloat16m1_t vd, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_tum(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_tum(vbool8_t vm, vfloat16m1_t vd, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_tum(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_tum(vbool4_t vm, vfloat16m1_t vd, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_tum(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_tum(vbool2_t vm, vfloat16m1_t vd, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tum(vbool64_t vm, vfloat32m1_t vd, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_tum(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_tum(vbool16_t vm, vfloat32m1_t vd, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_tum(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_tum(vbool8_t vm, vfloat32m1_t vd, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_tum(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_tum(vbool4_t vm, vfloat32m1_t vd, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_tum(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_tum(vbool32_t vm, vfloat64m1_t vd, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_tum(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_tum(vbool16_t vm, vfloat64m1_t vd, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_tum(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_tum(vbool8_t vm, vfloat64m1_t vd, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_rm_tu(vfloat16m1_t vd, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_rm_tu(vfloat16m1_t vd, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_rm_tu(vfloat32m1_t vd, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_rm_tum(vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_rm_tum(vbool64_t vm, vfloat16m1_t vd, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_rm_tum(vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_rm_tum(vbool32_t vm, vfloat16m1_t vd, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_rm_tum(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_rm_tum(vbool8_t vm, vfloat16m1_t vd, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_rm_tum(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_rm_tum(vbool4_t vm, vfloat16m1_t vd, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_rm_tum(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_rm_tum(vbool2_t vm, vfloat16m1_t vd, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_rm_tum(vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_rm_tum(vbool64_t vm, vfloat32m1_t vd, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_rm_tum(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_rm_tum(vbool16_t vm, vfloat32m1_t vd, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_rm_tum(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_rm_tum(vbool8_t vm, vfloat32m1_t vd, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_rm_tum(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_rm_tum(vbool4_t vm, vfloat32m1_t vd, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_rm_tum(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_rm_tum(vbool32_t vm, vfloat64m1_t vd, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_rm_tum(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_rm_tum(vbool16_t vm, vfloat64m1_t vd, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_rm_tum(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_rm_tum(vbool8_t vm, vfloat64m1_t vd, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfredosum\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfredusum.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfredusum.c index f7483af84..890eca4af 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfredusum.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfredusum.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_tu(vfloat16m1_t vd, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_tu(vfloat16m1_t vd, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_tu(vfloat16m1_t vd, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_tu(vfloat16m1_t vd, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_tu(maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_tu(vfloat16m1_t vd, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tu(vfloat32m1_t vd, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_tu(vfloat32m1_t vd, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_tu(vfloat32m1_t vd, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_tu(vfloat32m1_t vd, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_tu(vfloat64m1_t vd, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_tu(vfloat64m1_t vd, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_tu(vfloat64m1_t vd, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_tum(vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_tum(vbool64_t vm, vfloat16m1_t vd, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_tum(vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_tum(vbool32_t vm, vfloat16m1_t vd, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_tum(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_tum(vbool8_t vm, vfloat16m1_t vd, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_tum(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_tum(vbool4_t vm, vfloat16m1_t vd, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_tum(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_tum(mask, maskedoff, vector, scalar, vl); +vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_tum(vbool2_t vm, vfloat16m1_t vd, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tum(vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tum(vbool64_t vm, vfloat32m1_t vd, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_tum(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_tum(vbool16_t vm, vfloat32m1_t vd, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_tum(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_tum(vbool8_t vm, vfloat32m1_t vd, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_tum(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_tum(vbool4_t vm, vfloat32m1_t vd, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_tum(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_tum(vbool32_t vm, vfloat64m1_t vd, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_tum(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_tum(vbool16_t vm, vfloat64m1_t vd, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_tum(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_tum(vbool8_t vm, vfloat64m1_t vd, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_rm_tu(vfloat16m1_t vd, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_rm_tu(vfloat16m1_t vd, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_rm_tu(vfloat32m1_t vd, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_rm_tum(vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_rm_tum(vbool64_t vm, vfloat16m1_t vd, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_rm_tum(vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_rm_tum(vbool32_t vm, vfloat16m1_t vd, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_rm_tum(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_rm_tum(vbool8_t vm, vfloat16m1_t vd, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_rm_tum(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_rm_tum(vbool4_t vm, vfloat16m1_t vd, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_rm_tum(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredusum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_rm_tum(vbool2_t vm, vfloat16m1_t vd, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_rm_tum(vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_rm_tum(vbool64_t vm, vfloat32m1_t vd, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_rm_tum(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_rm_tum(vbool16_t vm, vfloat32m1_t vd, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_rm_tum(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_rm_tum(vbool8_t vm, vfloat32m1_t vd, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_rm_tum(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredusum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_rm_tum(vbool4_t vm, vfloat32m1_t vd, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_rm_tum(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_rm_tum(vbool32_t vm, vfloat64m1_t vd, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_rm_tum(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_rm_tum(vbool16_t vm, vfloat64m1_t vd, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_rm_tum(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredusum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_rm_tum(vbool8_t vm, vfloat64m1_t vd, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfredusum\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfrsqrt7.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfrsqrt7.c index 05d96eeca..42b574efb 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfrsqrt7.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfrsqrt7.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfrsqrt7_v_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfrsqrt7_tu(maskedoff, op1, vl); +vfloat16mf4_t test_vfrsqrt7_v_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfrsqrt7_tu(vd, vs2, vl); } -vfloat16mf2_t test_vfrsqrt7_v_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfrsqrt7_tu(maskedoff, op1, vl); +vfloat16mf2_t test_vfrsqrt7_v_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_tu(vd, vs2, vl); } -vfloat16m1_t test_vfrsqrt7_v_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfrsqrt7_tu(maskedoff, op1, vl); +vfloat16m1_t test_vfrsqrt7_v_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfrsqrt7_tu(vd, vs2, vl); } -vfloat16m2_t test_vfrsqrt7_v_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfrsqrt7_tu(maskedoff, op1, vl); +vfloat16m2_t test_vfrsqrt7_v_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_tu(vd, vs2, vl); } -vfloat16m4_t test_vfrsqrt7_v_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfrsqrt7_tu(maskedoff, op1, vl); +vfloat16m4_t test_vfrsqrt7_v_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfrsqrt7_tu(vd, vs2, vl); } -vfloat16m8_t test_vfrsqrt7_v_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfrsqrt7_tu(maskedoff, op1, vl); +vfloat16m8_t test_vfrsqrt7_v_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfrsqrt7_tu(vd, vs2, vl); } -vfloat32mf2_t test_vfrsqrt7_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfrsqrt7_tu(maskedoff, op1, vl); +vfloat32mf2_t test_vfrsqrt7_v_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_tu(vd, vs2, vl); } -vfloat32m1_t test_vfrsqrt7_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfrsqrt7_tu(maskedoff, op1, vl); +vfloat32m1_t test_vfrsqrt7_v_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfrsqrt7_tu(vd, vs2, vl); } -vfloat32m2_t test_vfrsqrt7_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfrsqrt7_tu(maskedoff, op1, vl); +vfloat32m2_t test_vfrsqrt7_v_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_tu(vd, vs2, vl); } -vfloat32m4_t test_vfrsqrt7_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfrsqrt7_tu(maskedoff, op1, vl); +vfloat32m4_t test_vfrsqrt7_v_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfrsqrt7_tu(vd, vs2, vl); } -vfloat32m8_t test_vfrsqrt7_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfrsqrt7_tu(maskedoff, op1, vl); +vfloat32m8_t test_vfrsqrt7_v_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfrsqrt7_tu(vd, vs2, vl); } -vfloat64m1_t test_vfrsqrt7_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfrsqrt7_tu(maskedoff, op1, vl); +vfloat64m1_t test_vfrsqrt7_v_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfrsqrt7_tu(vd, vs2, vl); } -vfloat64m2_t test_vfrsqrt7_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfrsqrt7_tu(maskedoff, op1, vl); +vfloat64m2_t test_vfrsqrt7_v_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_tu(vd, vs2, vl); } -vfloat64m4_t test_vfrsqrt7_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfrsqrt7_tu(maskedoff, op1, vl); +vfloat64m4_t test_vfrsqrt7_v_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfrsqrt7_tu(vd, vs2, vl); } -vfloat64m8_t test_vfrsqrt7_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfrsqrt7_tu(maskedoff, op1, vl); +vfloat64m8_t test_vfrsqrt7_v_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfrsqrt7_tu(vd, vs2, vl); } -vfloat16mf4_t test_vfrsqrt7_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl); +vfloat16mf4_t test_vfrsqrt7_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfrsqrt7_tum(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfrsqrt7_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl); +vfloat16mf2_t test_vfrsqrt7_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_tum(vm, vd, vs2, vl); } -vfloat16m1_t test_vfrsqrt7_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl); +vfloat16m1_t test_vfrsqrt7_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfrsqrt7_tum(vm, vd, vs2, vl); } -vfloat16m2_t test_vfrsqrt7_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl); +vfloat16m2_t test_vfrsqrt7_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_tum(vm, vd, vs2, vl); } -vfloat16m4_t test_vfrsqrt7_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl); +vfloat16m4_t test_vfrsqrt7_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfrsqrt7_tum(vm, vd, vs2, vl); } -vfloat16m8_t test_vfrsqrt7_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl); +vfloat16m8_t test_vfrsqrt7_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfrsqrt7_tum(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfrsqrt7_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl); +vfloat32mf2_t test_vfrsqrt7_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_tum(vm, vd, vs2, vl); } -vfloat32m1_t test_vfrsqrt7_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl); +vfloat32m1_t test_vfrsqrt7_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfrsqrt7_tum(vm, vd, vs2, vl); } -vfloat32m2_t test_vfrsqrt7_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl); +vfloat32m2_t test_vfrsqrt7_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_tum(vm, vd, vs2, vl); } -vfloat32m4_t test_vfrsqrt7_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl); +vfloat32m4_t test_vfrsqrt7_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfrsqrt7_tum(vm, vd, vs2, vl); } -vfloat32m8_t test_vfrsqrt7_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl); +vfloat32m8_t test_vfrsqrt7_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfrsqrt7_tum(vm, vd, vs2, vl); } -vfloat64m1_t test_vfrsqrt7_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl); +vfloat64m1_t test_vfrsqrt7_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfrsqrt7_tum(vm, vd, vs2, vl); } -vfloat64m2_t test_vfrsqrt7_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl); +vfloat64m2_t test_vfrsqrt7_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_tum(vm, vd, vs2, vl); } -vfloat64m4_t test_vfrsqrt7_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl); +vfloat64m4_t test_vfrsqrt7_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfrsqrt7_tum(vm, vd, vs2, vl); } -vfloat64m8_t test_vfrsqrt7_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl); +vfloat64m8_t test_vfrsqrt7_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfrsqrt7_tum(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfrsqrt7_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl); +vfloat16mf4_t test_vfrsqrt7_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfrsqrt7_tumu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfrsqrt7_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl); +vfloat16mf2_t test_vfrsqrt7_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_tumu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfrsqrt7_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl); +vfloat16m1_t test_vfrsqrt7_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfrsqrt7_tumu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfrsqrt7_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl); +vfloat16m2_t test_vfrsqrt7_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_tumu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfrsqrt7_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl); +vfloat16m4_t test_vfrsqrt7_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfrsqrt7_tumu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfrsqrt7_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl); +vfloat16m8_t test_vfrsqrt7_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfrsqrt7_tumu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfrsqrt7_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl); +vfloat32mf2_t test_vfrsqrt7_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_tumu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfrsqrt7_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl); +vfloat32m1_t test_vfrsqrt7_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfrsqrt7_tumu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfrsqrt7_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl); +vfloat32m2_t test_vfrsqrt7_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_tumu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfrsqrt7_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl); +vfloat32m4_t test_vfrsqrt7_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfrsqrt7_tumu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfrsqrt7_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl); +vfloat32m8_t test_vfrsqrt7_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfrsqrt7_tumu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfrsqrt7_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl); +vfloat64m1_t test_vfrsqrt7_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfrsqrt7_tumu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfrsqrt7_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl); +vfloat64m2_t test_vfrsqrt7_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_tumu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfrsqrt7_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl); +vfloat64m4_t test_vfrsqrt7_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfrsqrt7_tumu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfrsqrt7_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl); +vfloat64m8_t test_vfrsqrt7_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfrsqrt7_tumu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfrsqrt7_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl); +vfloat16mf4_t test_vfrsqrt7_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfrsqrt7_mu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfrsqrt7_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl); +vfloat16mf2_t test_vfrsqrt7_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_mu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfrsqrt7_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl); +vfloat16m1_t test_vfrsqrt7_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfrsqrt7_mu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfrsqrt7_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl); +vfloat16m2_t test_vfrsqrt7_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_mu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfrsqrt7_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl); +vfloat16m4_t test_vfrsqrt7_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfrsqrt7_mu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfrsqrt7_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl); +vfloat16m8_t test_vfrsqrt7_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfrsqrt7_mu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfrsqrt7_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl); +vfloat32mf2_t test_vfrsqrt7_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_mu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfrsqrt7_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl); +vfloat32m1_t test_vfrsqrt7_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfrsqrt7_mu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfrsqrt7_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl); +vfloat32m2_t test_vfrsqrt7_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_mu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfrsqrt7_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl); +vfloat32m4_t test_vfrsqrt7_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfrsqrt7_mu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfrsqrt7_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl); +vfloat32m8_t test_vfrsqrt7_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfrsqrt7_mu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfrsqrt7_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl); +vfloat64m1_t test_vfrsqrt7_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfrsqrt7_mu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfrsqrt7_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl); +vfloat64m2_t test_vfrsqrt7_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfrsqrt7_mu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfrsqrt7_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl); +vfloat64m4_t test_vfrsqrt7_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfrsqrt7_mu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfrsqrt7_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl); +vfloat64m8_t test_vfrsqrt7_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfrsqrt7_mu(vm, vd, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfrsqrt7\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfrsub.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfrsub.c index 0c3e90d11..80a7187f5 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfrsub.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfrsub.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfrsub_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_tu(maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfrsub_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_tu(vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfrsub_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_tu(maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfrsub_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_tu(vd, vs2, rs1, vl); } -vfloat16m1_t test_vfrsub_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_tu(maskedoff, op1, op2, vl); +vfloat16m1_t test_vfrsub_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_tu(vd, vs2, rs1, vl); } -vfloat16m2_t test_vfrsub_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_tu(maskedoff, op1, op2, vl); +vfloat16m2_t test_vfrsub_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_tu(vd, vs2, rs1, vl); } -vfloat16m4_t test_vfrsub_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_tu(maskedoff, op1, op2, vl); +vfloat16m4_t test_vfrsub_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_tu(vd, vs2, rs1, vl); } -vfloat16m8_t test_vfrsub_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_tu(maskedoff, op1, op2, vl); +vfloat16m8_t test_vfrsub_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfrsub_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfrsub_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfrsub_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfrsub_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfrsub_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfrsub_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfrsub_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfrsub_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfrsub_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfrsub_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfrsub_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfrsub_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfrsub_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfrsub_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfrsub_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfrsub_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfrsub_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfrsub_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_tu(vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfrsub_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfrsub_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfrsub_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfrsub_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_tum(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfrsub_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfrsub_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_tum(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfrsub_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfrsub_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_tum(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfrsub_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfrsub_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_tum(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfrsub_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfrsub_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfrsub_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfrsub_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfrsub_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfrsub_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfrsub_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfrsub_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfrsub_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfrsub_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfrsub_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfrsub_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfrsub_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfrsub_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfrsub_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfrsub_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfrsub_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfrsub_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfrsub_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfrsub_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfrsub_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfrsub_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfrsub_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfrsub_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfrsub_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfrsub_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfrsub_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfrsub_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfrsub_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfrsub_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfrsub_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfrsub_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfrsub_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfrsub_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfrsub_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfrsub_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfrsub_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfrsub_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfrsub_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfrsub_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfrsub_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfrsub_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfrsub_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfrsub_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfrsub_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfrsub_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfrsub_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfrsub_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfrsub_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfrsub_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfrsub_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfrsub_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfrsub_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfrsub_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_mu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfrsub_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfrsub_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_mu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfrsub_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfrsub_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_mu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfrsub_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfrsub_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_mu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfrsub_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfrsub_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfrsub_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfrsub_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfrsub_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfrsub_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfrsub_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfrsub_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfrsub_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfrsub_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfrsub_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfrsub_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfrsub_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfrsub_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfrsub_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfrsub_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfrsub_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfrsub_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfrsub_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfrsub_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfrsub_vf_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfrsub_vf_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfrsub_vf_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfrsub_vf_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfrsub_vf_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfrsub_vf_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrsub_vf_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfrsub_vf_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrsub_vf_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfrsub_vf_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrsub_vf_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfrsub_vf_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrsub_vf_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfrsub_vf_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfrsub_vf_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfrsub_vf_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrsub_vf_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfrsub_vf_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrsub_vf_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfrsub_vf_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrsub_vf_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfrsub_vf_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrsub_vf_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfrsub_vf_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrsub_vf_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfrsub_vf_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrsub_vf_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfrsub_vf_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrsub_vf_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfrsub_vf_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfrsub_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfrsub_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfrsub_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfrsub_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfrsub_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfrsub_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrsub_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfrsub_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrsub_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfrsub_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrsub_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfrsub_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrsub_vf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfrsub_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfrsub_vf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfrsub_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrsub_vf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfrsub_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrsub_vf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfrsub_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrsub_vf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfrsub_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrsub_vf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfrsub_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrsub_vf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfrsub_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrsub_vf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfrsub_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrsub_vf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfrsub_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfrsub_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfrsub_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfrsub_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfrsub_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfrsub_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfrsub_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrsub_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfrsub_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrsub_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfrsub_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrsub_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfrsub_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrsub_vf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfrsub_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfrsub_vf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfrsub_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrsub_vf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfrsub_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrsub_vf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfrsub_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrsub_vf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfrsub_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrsub_vf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfrsub_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrsub_vf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfrsub_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrsub_vf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfrsub_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrsub_vf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfrsub_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfrsub_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfrsub_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfrsub_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfrsub_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfrsub_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfrsub_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrsub_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfrsub_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrsub_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfrsub_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrsub_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfrsub_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfrsub_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrsub_vf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfrsub_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfrsub_vf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfrsub_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrsub_vf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfrsub_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrsub_vf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfrsub_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrsub_vf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfrsub_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfrsub_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrsub_vf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfrsub_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrsub_vf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfrsub_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrsub_vf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfrsub_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrsub_vf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfrsub_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfrsub_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfrsub\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfsgnj.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfsgnj.c index ca9bb0dee..a88bbb95d 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfsgnj.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfsgnj.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfsgnj_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnj_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsgnj_tu(vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnj_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnj_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_tu(vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnj_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnj_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsgnj_tu(vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnj_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnj_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_tu(vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsgnj_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnj_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsgnj_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsgnj_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnj_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_tu(vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsgnj_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnj_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsgnj_tu(vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsgnj_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnj_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_tu(vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsgnj_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnj_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsgnj_tu(vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsgnj_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnj_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_tu(vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsgnj_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnj_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsgnj_tu(vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsgnj_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnj_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnj_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnj_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsgnj_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnj_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnj_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsgnj_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnj_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsgnj_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsgnj_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnj_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsgnj_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnj_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsgnj_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsgnj_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnj_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsgnj_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnj_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsgnj_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsgnj_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnj_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsgnj_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnj_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsgnj_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsgnj_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnj_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsgnj_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnj_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsgnj_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsgnj_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnj_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsgnj_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnj_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsgnj_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsgnj_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnj_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsgnj_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnj_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsgnj_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsgnj_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnj_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsgnj_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnj_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsgnj_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsgnj_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnj_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj_tu(vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfsgnj_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnj_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsgnj_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnj_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnj_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnj_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnj_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsgnj_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnj_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnj_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_tum(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsgnj_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnj_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsgnj_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsgnj_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnj_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_tum(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsgnj_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnj_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsgnj_tum(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsgnj_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnj_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_tum(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsgnj_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnj_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsgnj_tum(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsgnj_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnj_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_tum(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsgnj_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnj_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsgnj_tum(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsgnj_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnj_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnj_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnj_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsgnj_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnj_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnj_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsgnj_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnj_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsgnj_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsgnj_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnj_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsgnj_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnj_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsgnj_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsgnj_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnj_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsgnj_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnj_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsgnj_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsgnj_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnj_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsgnj_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnj_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsgnj_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsgnj_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnj_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsgnj_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnj_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsgnj_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsgnj_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnj_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsgnj_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnj_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsgnj_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsgnj_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnj_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsgnj_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnj_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsgnj_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsgnj_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnj_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsgnj_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnj_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsgnj_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsgnj_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnj_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfsgnj_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnj_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsgnj_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnj_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnj_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnj_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnj_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsgnj_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnj_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnj_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsgnj_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnj_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsgnj_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsgnj_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnj_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsgnj_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnj_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsgnj_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsgnj_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnj_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsgnj_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnj_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsgnj_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsgnj_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnj_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsgnj_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnj_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsgnj_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsgnj_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnj_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnj_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnj_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsgnj_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnj_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnj_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsgnj_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnj_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsgnj_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsgnj_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnj_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsgnj_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnj_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsgnj_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsgnj_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnj_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsgnj_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnj_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsgnj_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsgnj_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnj_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsgnj_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnj_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsgnj_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsgnj_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnj_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsgnj_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnj_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsgnj_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsgnj_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnj_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsgnj_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnj_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsgnj_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsgnj_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnj_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsgnj_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnj_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsgnj_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsgnj_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnj_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsgnj_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnj_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsgnj_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsgnj_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnj_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfsgnj_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnj_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsgnj_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnj_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnj_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnj_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnj_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsgnj_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnj_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnj_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_mu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsgnj_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnj_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsgnj_mu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsgnj_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnj_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_mu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsgnj_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnj_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsgnj_mu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsgnj_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnj_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_mu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsgnj_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnj_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsgnj_mu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsgnj_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnj_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_mu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsgnj_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnj_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsgnj_mu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsgnj_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnj_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnj_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnj_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnj_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsgnj_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnj_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnj_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsgnj_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnj_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsgnj_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsgnj_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnj_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsgnj_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnj_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsgnj_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsgnj_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnj_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsgnj_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnj_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsgnj_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsgnj_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnj_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsgnj_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnj_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsgnj_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsgnj_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnj_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnj_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsgnj_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnj_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsgnj_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsgnj_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnj_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsgnj_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnj_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsgnj_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsgnj_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnj_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsgnj_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnj_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsgnj_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsgnj_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnj_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsgnj_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnj_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsgnj_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsgnj_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnj_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnj_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfsgnj\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfsgnjn.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfsgnjn.c index 493432d74..43fd2898a 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfsgnjn.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfsgnjn.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfsgnjn_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnjn_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsgnjn_tu(vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnjn_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnjn_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_tu(vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnjn_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnjn_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsgnjn_tu(vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnjn_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnjn_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_tu(vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsgnjn_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnjn_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsgnjn_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsgnjn_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnjn_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_tu(vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsgnjn_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnjn_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsgnjn_tu(vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsgnjn_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnjn_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_tu(vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsgnjn_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnjn_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsgnjn_tu(vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsgnjn_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnjn_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_tu(vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsgnjn_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnjn_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsgnjn_tu(vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsgnjn_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnjn_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnjn_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnjn_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsgnjn_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnjn_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnjn_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsgnjn_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnjn_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsgnjn_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsgnjn_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnjn_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsgnjn_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnjn_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsgnjn_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsgnjn_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnjn_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsgnjn_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnjn_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsgnjn_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsgnjn_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnjn_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsgnjn_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnjn_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsgnjn_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsgnjn_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnjn_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsgnjn_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnjn_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsgnjn_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsgnjn_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnjn_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsgnjn_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnjn_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsgnjn_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsgnjn_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnjn_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsgnjn_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnjn_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsgnjn_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsgnjn_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnjn_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsgnjn_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnjn_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsgnjn_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsgnjn_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnjn_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn_tu(vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfsgnjn_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnjn_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsgnjn_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnjn_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnjn_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnjn_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnjn_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsgnjn_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnjn_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnjn_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_tum(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsgnjn_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnjn_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsgnjn_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsgnjn_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnjn_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_tum(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsgnjn_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnjn_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsgnjn_tum(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsgnjn_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnjn_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_tum(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsgnjn_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnjn_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsgnjn_tum(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsgnjn_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnjn_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_tum(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsgnjn_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnjn_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsgnjn_tum(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsgnjn_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnjn_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnjn_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnjn_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsgnjn_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnjn_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnjn_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsgnjn_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnjn_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsgnjn_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsgnjn_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnjn_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsgnjn_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnjn_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsgnjn_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsgnjn_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnjn_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsgnjn_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnjn_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsgnjn_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsgnjn_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnjn_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsgnjn_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnjn_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsgnjn_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsgnjn_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnjn_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsgnjn_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnjn_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsgnjn_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsgnjn_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnjn_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsgnjn_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnjn_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsgnjn_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsgnjn_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnjn_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsgnjn_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnjn_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsgnjn_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsgnjn_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnjn_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsgnjn_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnjn_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsgnjn_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsgnjn_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnjn_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfsgnjn_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnjn_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsgnjn_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnjn_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnjn_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnjn_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnjn_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsgnjn_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnjn_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnjn_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsgnjn_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnjn_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsgnjn_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsgnjn_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnjn_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsgnjn_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnjn_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsgnjn_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsgnjn_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnjn_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsgnjn_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnjn_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsgnjn_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsgnjn_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnjn_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsgnjn_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnjn_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsgnjn_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsgnjn_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnjn_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnjn_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnjn_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsgnjn_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnjn_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnjn_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsgnjn_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnjn_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsgnjn_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsgnjn_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnjn_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsgnjn_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnjn_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsgnjn_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsgnjn_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnjn_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsgnjn_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnjn_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsgnjn_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsgnjn_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnjn_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsgnjn_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnjn_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsgnjn_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsgnjn_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnjn_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsgnjn_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnjn_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsgnjn_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsgnjn_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnjn_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsgnjn_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnjn_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsgnjn_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsgnjn_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnjn_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsgnjn_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnjn_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsgnjn_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsgnjn_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnjn_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsgnjn_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnjn_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsgnjn_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsgnjn_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnjn_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfsgnjn_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnjn_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsgnjn_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnjn_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnjn_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnjn_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnjn_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsgnjn_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnjn_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnjn_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_mu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsgnjn_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnjn_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsgnjn_mu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsgnjn_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnjn_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_mu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsgnjn_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnjn_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsgnjn_mu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsgnjn_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnjn_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_mu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsgnjn_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnjn_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsgnjn_mu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsgnjn_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnjn_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_mu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsgnjn_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnjn_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsgnjn_mu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsgnjn_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnjn_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjn_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnjn_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnjn_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsgnjn_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnjn_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnjn_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsgnjn_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnjn_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsgnjn_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsgnjn_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnjn_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsgnjn_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnjn_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsgnjn_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsgnjn_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnjn_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsgnjn_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnjn_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsgnjn_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsgnjn_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnjn_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsgnjn_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnjn_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsgnjn_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsgnjn_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnjn_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjn_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsgnjn_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnjn_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsgnjn_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsgnjn_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnjn_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsgnjn_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnjn_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsgnjn_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsgnjn_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnjn_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsgnjn_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnjn_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsgnjn_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsgnjn_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnjn_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsgnjn_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnjn_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsgnjn_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsgnjn_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnjn_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjn_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfsgnjn\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfsgnjx.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfsgnjx.c index 9a145b739..c7d3eb361 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfsgnjx.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfsgnjx.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfsgnjx_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnjx_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsgnjx_tu(vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnjx_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnjx_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_tu(vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnjx_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnjx_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsgnjx_tu(vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnjx_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnjx_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_tu(vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsgnjx_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnjx_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsgnjx_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsgnjx_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnjx_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_tu(vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsgnjx_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnjx_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsgnjx_tu(vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsgnjx_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnjx_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_tu(vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsgnjx_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnjx_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsgnjx_tu(vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsgnjx_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnjx_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_tu(vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsgnjx_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnjx_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsgnjx_tu(vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsgnjx_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnjx_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnjx_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnjx_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsgnjx_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnjx_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnjx_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsgnjx_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnjx_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsgnjx_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsgnjx_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnjx_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsgnjx_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnjx_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsgnjx_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsgnjx_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnjx_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsgnjx_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnjx_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsgnjx_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsgnjx_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnjx_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsgnjx_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnjx_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsgnjx_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsgnjx_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnjx_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsgnjx_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnjx_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsgnjx_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsgnjx_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnjx_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsgnjx_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnjx_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsgnjx_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsgnjx_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnjx_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsgnjx_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnjx_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsgnjx_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsgnjx_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnjx_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsgnjx_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnjx_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsgnjx_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsgnjx_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnjx_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx_tu(vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfsgnjx_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnjx_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsgnjx_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnjx_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnjx_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnjx_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnjx_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsgnjx_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnjx_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnjx_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_tum(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsgnjx_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnjx_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsgnjx_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsgnjx_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnjx_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_tum(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsgnjx_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnjx_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsgnjx_tum(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsgnjx_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnjx_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_tum(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsgnjx_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnjx_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsgnjx_tum(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsgnjx_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnjx_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_tum(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsgnjx_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnjx_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsgnjx_tum(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsgnjx_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnjx_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnjx_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnjx_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsgnjx_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnjx_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnjx_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsgnjx_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnjx_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsgnjx_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsgnjx_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnjx_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsgnjx_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnjx_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsgnjx_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsgnjx_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnjx_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsgnjx_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnjx_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsgnjx_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsgnjx_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnjx_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsgnjx_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnjx_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsgnjx_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsgnjx_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnjx_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsgnjx_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnjx_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsgnjx_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsgnjx_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnjx_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsgnjx_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnjx_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsgnjx_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsgnjx_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnjx_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsgnjx_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnjx_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsgnjx_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsgnjx_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnjx_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsgnjx_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnjx_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsgnjx_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsgnjx_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnjx_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfsgnjx_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnjx_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsgnjx_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnjx_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnjx_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnjx_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnjx_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsgnjx_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnjx_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnjx_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsgnjx_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnjx_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsgnjx_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsgnjx_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnjx_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsgnjx_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnjx_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsgnjx_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsgnjx_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnjx_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsgnjx_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnjx_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsgnjx_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsgnjx_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnjx_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsgnjx_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnjx_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsgnjx_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsgnjx_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnjx_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnjx_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnjx_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsgnjx_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnjx_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnjx_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsgnjx_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnjx_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsgnjx_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsgnjx_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnjx_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsgnjx_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnjx_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsgnjx_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsgnjx_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnjx_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsgnjx_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnjx_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsgnjx_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsgnjx_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnjx_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsgnjx_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnjx_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsgnjx_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsgnjx_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnjx_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsgnjx_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnjx_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsgnjx_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsgnjx_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnjx_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsgnjx_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnjx_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsgnjx_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsgnjx_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnjx_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsgnjx_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnjx_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsgnjx_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsgnjx_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnjx_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsgnjx_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnjx_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsgnjx_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsgnjx_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnjx_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfsgnjx_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnjx_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsgnjx_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnjx_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnjx_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnjx_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnjx_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsgnjx_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnjx_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnjx_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_mu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsgnjx_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnjx_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsgnjx_mu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsgnjx_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnjx_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_mu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsgnjx_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnjx_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsgnjx_mu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsgnjx_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnjx_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_mu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsgnjx_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnjx_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsgnjx_mu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsgnjx_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnjx_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_mu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsgnjx_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnjx_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsgnjx_mu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsgnjx_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnjx_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsgnjx_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnjx_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnjx_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsgnjx_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnjx_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnjx_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsgnjx_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnjx_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsgnjx_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsgnjx_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnjx_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsgnjx_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnjx_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsgnjx_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsgnjx_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnjx_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsgnjx_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnjx_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsgnjx_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsgnjx_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnjx_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsgnjx_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnjx_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsgnjx_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsgnjx_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnjx_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsgnjx_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsgnjx_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnjx_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsgnjx_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsgnjx_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnjx_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsgnjx_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnjx_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsgnjx_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsgnjx_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsgnjx_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsgnjx_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnjx_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsgnjx_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsgnjx_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsgnjx_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsgnjx_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnjx_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsgnjx_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsgnjx_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsgnjx_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsgnjx_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfsgnjx\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfslide1down.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfslide1down.c index 1da9e16da..b68f0650a 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfslide1down.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfslide1down.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfslide1down_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_tu(maskedoff, src, value, vl); +vfloat16mf4_t test_vfslide1down_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_tu(vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfslide1down_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_tu(maskedoff, src, value, vl); +vfloat16mf2_t test_vfslide1down_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_tu(vd, vs2, rs1, vl); } -vfloat16m1_t test_vfslide1down_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_tu(maskedoff, src, value, vl); +vfloat16m1_t test_vfslide1down_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_tu(vd, vs2, rs1, vl); } -vfloat16m2_t test_vfslide1down_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_tu(maskedoff, src, value, vl); +vfloat16m2_t test_vfslide1down_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_tu(vd, vs2, rs1, vl); } -vfloat16m4_t test_vfslide1down_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_tu(maskedoff, src, value, vl); +vfloat16m4_t test_vfslide1down_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_tu(vd, vs2, rs1, vl); } -vfloat16m8_t test_vfslide1down_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_tu(maskedoff, src, value, vl); +vfloat16m8_t test_vfslide1down_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfslide1down_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_tu(maskedoff, src, value, vl); +vfloat32mf2_t test_vfslide1down_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfslide1down_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_tu(maskedoff, src, value, vl); +vfloat32m1_t test_vfslide1down_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfslide1down_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_tu(maskedoff, src, value, vl); +vfloat32m2_t test_vfslide1down_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfslide1down_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_tu(maskedoff, src, value, vl); +vfloat32m4_t test_vfslide1down_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfslide1down_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_tu(maskedoff, src, value, vl); +vfloat32m8_t test_vfslide1down_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfslide1down_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down_tu(maskedoff, src, value, vl); +vfloat64m1_t test_vfslide1down_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfslide1down_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down_tu(maskedoff, src, value, vl); +vfloat64m2_t test_vfslide1down_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfslide1down_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down_tu(maskedoff, src, value, vl); +vfloat64m4_t test_vfslide1down_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfslide1down_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down_tu(maskedoff, src, value, vl); +vfloat64m8_t test_vfslide1down_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down_tu(vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfslide1down_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl); +vfloat16mf4_t test_vfslide1down_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfslide1down_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl); +vfloat16mf2_t test_vfslide1down_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_tum(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfslide1down_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl); +vfloat16m1_t test_vfslide1down_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_tum(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfslide1down_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl); +vfloat16m2_t test_vfslide1down_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_tum(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfslide1down_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl); +vfloat16m4_t test_vfslide1down_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_tum(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfslide1down_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl); +vfloat16m8_t test_vfslide1down_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfslide1down_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl); +vfloat32mf2_t test_vfslide1down_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfslide1down_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl); +vfloat32m1_t test_vfslide1down_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfslide1down_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl); +vfloat32m2_t test_vfslide1down_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfslide1down_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl); +vfloat32m4_t test_vfslide1down_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfslide1down_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl); +vfloat32m8_t test_vfslide1down_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfslide1down_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl); +vfloat64m1_t test_vfslide1down_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfslide1down_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl); +vfloat64m2_t test_vfslide1down_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfslide1down_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl); +vfloat64m4_t test_vfslide1down_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfslide1down_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl); +vfloat64m8_t test_vfslide1down_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfslide1down_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl); +vfloat16mf4_t test_vfslide1down_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfslide1down_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl); +vfloat16mf2_t test_vfslide1down_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfslide1down_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl); +vfloat16m1_t test_vfslide1down_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfslide1down_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl); +vfloat16m2_t test_vfslide1down_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfslide1down_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl); +vfloat16m4_t test_vfslide1down_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfslide1down_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl); +vfloat16m8_t test_vfslide1down_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfslide1down_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl); +vfloat32mf2_t test_vfslide1down_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfslide1down_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl); +vfloat32m1_t test_vfslide1down_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfslide1down_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl); +vfloat32m2_t test_vfslide1down_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfslide1down_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl); +vfloat32m4_t test_vfslide1down_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfslide1down_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl); +vfloat32m8_t test_vfslide1down_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfslide1down_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl); +vfloat64m1_t test_vfslide1down_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfslide1down_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl); +vfloat64m2_t test_vfslide1down_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfslide1down_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl); +vfloat64m4_t test_vfslide1down_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfslide1down_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl); +vfloat64m8_t test_vfslide1down_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfslide1down_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl); +vfloat16mf4_t test_vfslide1down_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfslide1down_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl); +vfloat16mf2_t test_vfslide1down_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_mu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfslide1down_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl); +vfloat16m1_t test_vfslide1down_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_mu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfslide1down_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl); +vfloat16m2_t test_vfslide1down_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_mu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfslide1down_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl); +vfloat16m4_t test_vfslide1down_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_mu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfslide1down_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, float16_t value, size_t vl) { - return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl); +vfloat16m8_t test_vfslide1down_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1down_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfslide1down_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl); +vfloat32mf2_t test_vfslide1down_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfslide1down_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl); +vfloat32m1_t test_vfslide1down_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfslide1down_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl); +vfloat32m2_t test_vfslide1down_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfslide1down_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl); +vfloat32m4_t test_vfslide1down_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfslide1down_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, float32_t value, size_t vl) { - return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl); +vfloat32m8_t test_vfslide1down_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1down_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfslide1down_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl); +vfloat64m1_t test_vfslide1down_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfslide1down_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl); +vfloat64m2_t test_vfslide1down_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfslide1down_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl); +vfloat64m4_t test_vfslide1down_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfslide1down_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, float64_t value, size_t vl) { - return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl); +vfloat64m8_t test_vfslide1down_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1down_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfslide1down\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfslide1up.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfslide1up.c index 4b33385ac..802ade770 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfslide1up.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfslide1up.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfslide1up_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_tu(maskedoff, src, value, vl); +vfloat16mf4_t test_vfslide1up_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_tu(vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfslide1up_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_tu(maskedoff, src, value, vl); +vfloat16mf2_t test_vfslide1up_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_tu(vd, vs2, rs1, vl); } -vfloat16m1_t test_vfslide1up_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_tu(maskedoff, src, value, vl); +vfloat16m1_t test_vfslide1up_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_tu(vd, vs2, rs1, vl); } -vfloat16m2_t test_vfslide1up_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_tu(maskedoff, src, value, vl); +vfloat16m2_t test_vfslide1up_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_tu(vd, vs2, rs1, vl); } -vfloat16m4_t test_vfslide1up_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_tu(maskedoff, src, value, vl); +vfloat16m4_t test_vfslide1up_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_tu(vd, vs2, rs1, vl); } -vfloat16m8_t test_vfslide1up_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_tu(maskedoff, src, value, vl); +vfloat16m8_t test_vfslide1up_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfslide1up_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_tu(maskedoff, src, value, vl); +vfloat32mf2_t test_vfslide1up_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfslide1up_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_tu(maskedoff, src, value, vl); +vfloat32m1_t test_vfslide1up_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfslide1up_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_tu(maskedoff, src, value, vl); +vfloat32m2_t test_vfslide1up_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfslide1up_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_tu(maskedoff, src, value, vl); +vfloat32m4_t test_vfslide1up_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfslide1up_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_tu(maskedoff, src, value, vl); +vfloat32m8_t test_vfslide1up_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfslide1up_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up_tu(maskedoff, src, value, vl); +vfloat64m1_t test_vfslide1up_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfslide1up_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up_tu(maskedoff, src, value, vl); +vfloat64m2_t test_vfslide1up_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfslide1up_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up_tu(maskedoff, src, value, vl); +vfloat64m4_t test_vfslide1up_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfslide1up_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up_tu(maskedoff, src, value, vl); +vfloat64m8_t test_vfslide1up_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up_tu(vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfslide1up_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl); +vfloat16mf4_t test_vfslide1up_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfslide1up_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl); +vfloat16mf2_t test_vfslide1up_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_tum(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfslide1up_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl); +vfloat16m1_t test_vfslide1up_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_tum(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfslide1up_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl); +vfloat16m2_t test_vfslide1up_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_tum(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfslide1up_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl); +vfloat16m4_t test_vfslide1up_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_tum(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfslide1up_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl); +vfloat16m8_t test_vfslide1up_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfslide1up_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl); +vfloat32mf2_t test_vfslide1up_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfslide1up_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl); +vfloat32m1_t test_vfslide1up_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfslide1up_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl); +vfloat32m2_t test_vfslide1up_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfslide1up_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl); +vfloat32m4_t test_vfslide1up_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfslide1up_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl); +vfloat32m8_t test_vfslide1up_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfslide1up_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl); +vfloat64m1_t test_vfslide1up_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfslide1up_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl); +vfloat64m2_t test_vfslide1up_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfslide1up_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl); +vfloat64m4_t test_vfslide1up_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfslide1up_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl); +vfloat64m8_t test_vfslide1up_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfslide1up_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl); +vfloat16mf4_t test_vfslide1up_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfslide1up_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl); +vfloat16mf2_t test_vfslide1up_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfslide1up_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl); +vfloat16m1_t test_vfslide1up_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfslide1up_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl); +vfloat16m2_t test_vfslide1up_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfslide1up_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl); +vfloat16m4_t test_vfslide1up_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfslide1up_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl); +vfloat16m8_t test_vfslide1up_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfslide1up_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl); +vfloat32mf2_t test_vfslide1up_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfslide1up_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl); +vfloat32m1_t test_vfslide1up_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfslide1up_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl); +vfloat32m2_t test_vfslide1up_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfslide1up_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl); +vfloat32m4_t test_vfslide1up_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfslide1up_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl); +vfloat32m8_t test_vfslide1up_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfslide1up_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl); +vfloat64m1_t test_vfslide1up_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfslide1up_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl); +vfloat64m2_t test_vfslide1up_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfslide1up_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl); +vfloat64m4_t test_vfslide1up_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfslide1up_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl); +vfloat64m8_t test_vfslide1up_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfslide1up_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl); +vfloat16mf4_t test_vfslide1up_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfslide1up_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl); +vfloat16mf2_t test_vfslide1up_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_mu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfslide1up_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl); +vfloat16m1_t test_vfslide1up_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_mu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfslide1up_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl); +vfloat16m2_t test_vfslide1up_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_mu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfslide1up_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl); +vfloat16m4_t test_vfslide1up_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_mu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfslide1up_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, float16_t value, size_t vl) { - return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl); +vfloat16m8_t test_vfslide1up_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfslide1up_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfslide1up_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl); +vfloat32mf2_t test_vfslide1up_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfslide1up_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl); +vfloat32m1_t test_vfslide1up_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfslide1up_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl); +vfloat32m2_t test_vfslide1up_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfslide1up_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl); +vfloat32m4_t test_vfslide1up_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfslide1up_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, float32_t value, size_t vl) { - return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl); +vfloat32m8_t test_vfslide1up_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfslide1up_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfslide1up_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl); +vfloat64m1_t test_vfslide1up_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfslide1up_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl); +vfloat64m2_t test_vfslide1up_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfslide1up_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl); +vfloat64m4_t test_vfslide1up_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfslide1up_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, float64_t value, size_t vl) { - return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl); +vfloat64m8_t test_vfslide1up_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfslide1up_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfslide1up\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfsqrt.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfsqrt.c index af6fee4f9..76d1d40f7 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfsqrt.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfsqrt.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfsqrt_v_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfsqrt_tu(maskedoff, op1, vl); +vfloat16mf4_t test_vfsqrt_v_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfsqrt_tu(vd, vs2, vl); } -vfloat16mf2_t test_vfsqrt_v_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfsqrt_tu(maskedoff, op1, vl); +vfloat16mf2_t test_vfsqrt_v_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfsqrt_tu(vd, vs2, vl); } -vfloat16m1_t test_vfsqrt_v_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfsqrt_tu(maskedoff, op1, vl); +vfloat16m1_t test_vfsqrt_v_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfsqrt_tu(vd, vs2, vl); } -vfloat16m2_t test_vfsqrt_v_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfsqrt_tu(maskedoff, op1, vl); +vfloat16m2_t test_vfsqrt_v_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfsqrt_tu(vd, vs2, vl); } -vfloat16m4_t test_vfsqrt_v_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfsqrt_tu(maskedoff, op1, vl); +vfloat16m4_t test_vfsqrt_v_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfsqrt_tu(vd, vs2, vl); } -vfloat16m8_t test_vfsqrt_v_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfsqrt_tu(maskedoff, op1, vl); +vfloat16m8_t test_vfsqrt_v_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfsqrt_tu(vd, vs2, vl); } -vfloat32mf2_t test_vfsqrt_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfsqrt_tu(maskedoff, op1, vl); +vfloat32mf2_t test_vfsqrt_v_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfsqrt_tu(vd, vs2, vl); } -vfloat32m1_t test_vfsqrt_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfsqrt_tu(maskedoff, op1, vl); +vfloat32m1_t test_vfsqrt_v_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfsqrt_tu(vd, vs2, vl); } -vfloat32m2_t test_vfsqrt_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfsqrt_tu(maskedoff, op1, vl); +vfloat32m2_t test_vfsqrt_v_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfsqrt_tu(vd, vs2, vl); } -vfloat32m4_t test_vfsqrt_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfsqrt_tu(maskedoff, op1, vl); +vfloat32m4_t test_vfsqrt_v_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfsqrt_tu(vd, vs2, vl); } -vfloat32m8_t test_vfsqrt_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfsqrt_tu(maskedoff, op1, vl); +vfloat32m8_t test_vfsqrt_v_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfsqrt_tu(vd, vs2, vl); } -vfloat64m1_t test_vfsqrt_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfsqrt_tu(maskedoff, op1, vl); +vfloat64m1_t test_vfsqrt_v_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfsqrt_tu(vd, vs2, vl); } -vfloat64m2_t test_vfsqrt_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfsqrt_tu(maskedoff, op1, vl); +vfloat64m2_t test_vfsqrt_v_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfsqrt_tu(vd, vs2, vl); } -vfloat64m4_t test_vfsqrt_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfsqrt_tu(maskedoff, op1, vl); +vfloat64m4_t test_vfsqrt_v_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfsqrt_tu(vd, vs2, vl); } -vfloat64m8_t test_vfsqrt_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfsqrt_tu(maskedoff, op1, vl); +vfloat64m8_t test_vfsqrt_v_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfsqrt_tu(vd, vs2, vl); } -vfloat16mf4_t test_vfsqrt_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfsqrt_tum(mask, maskedoff, op1, vl); +vfloat16mf4_t test_vfsqrt_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfsqrt_tum(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfsqrt_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfsqrt_tum(mask, maskedoff, op1, vl); +vfloat16mf2_t test_vfsqrt_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfsqrt_tum(vm, vd, vs2, vl); } -vfloat16m1_t test_vfsqrt_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfsqrt_tum(mask, maskedoff, op1, vl); +vfloat16m1_t test_vfsqrt_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfsqrt_tum(vm, vd, vs2, vl); } -vfloat16m2_t test_vfsqrt_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfsqrt_tum(mask, maskedoff, op1, vl); +vfloat16m2_t test_vfsqrt_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfsqrt_tum(vm, vd, vs2, vl); } -vfloat16m4_t test_vfsqrt_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfsqrt_tum(mask, maskedoff, op1, vl); +vfloat16m4_t test_vfsqrt_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfsqrt_tum(vm, vd, vs2, vl); } -vfloat16m8_t test_vfsqrt_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfsqrt_tum(mask, maskedoff, op1, vl); +vfloat16m8_t test_vfsqrt_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfsqrt_tum(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfsqrt_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfsqrt_tum(mask, maskedoff, op1, vl); +vfloat32mf2_t test_vfsqrt_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfsqrt_tum(vm, vd, vs2, vl); } -vfloat32m1_t test_vfsqrt_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfsqrt_tum(mask, maskedoff, op1, vl); +vfloat32m1_t test_vfsqrt_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfsqrt_tum(vm, vd, vs2, vl); } -vfloat32m2_t test_vfsqrt_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfsqrt_tum(mask, maskedoff, op1, vl); +vfloat32m2_t test_vfsqrt_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfsqrt_tum(vm, vd, vs2, vl); } -vfloat32m4_t test_vfsqrt_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfsqrt_tum(mask, maskedoff, op1, vl); +vfloat32m4_t test_vfsqrt_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfsqrt_tum(vm, vd, vs2, vl); } -vfloat32m8_t test_vfsqrt_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfsqrt_tum(mask, maskedoff, op1, vl); +vfloat32m8_t test_vfsqrt_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfsqrt_tum(vm, vd, vs2, vl); } -vfloat64m1_t test_vfsqrt_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfsqrt_tum(mask, maskedoff, op1, vl); +vfloat64m1_t test_vfsqrt_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfsqrt_tum(vm, vd, vs2, vl); } -vfloat64m2_t test_vfsqrt_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfsqrt_tum(mask, maskedoff, op1, vl); +vfloat64m2_t test_vfsqrt_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfsqrt_tum(vm, vd, vs2, vl); } -vfloat64m4_t test_vfsqrt_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfsqrt_tum(mask, maskedoff, op1, vl); +vfloat64m4_t test_vfsqrt_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfsqrt_tum(vm, vd, vs2, vl); } -vfloat64m8_t test_vfsqrt_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfsqrt_tum(mask, maskedoff, op1, vl); +vfloat64m8_t test_vfsqrt_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfsqrt_tum(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfsqrt_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfsqrt_tumu(mask, maskedoff, op1, vl); +vfloat16mf4_t test_vfsqrt_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfsqrt_tumu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfsqrt_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfsqrt_tumu(mask, maskedoff, op1, vl); +vfloat16mf2_t test_vfsqrt_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfsqrt_tumu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfsqrt_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfsqrt_tumu(mask, maskedoff, op1, vl); +vfloat16m1_t test_vfsqrt_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfsqrt_tumu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfsqrt_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfsqrt_tumu(mask, maskedoff, op1, vl); +vfloat16m2_t test_vfsqrt_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfsqrt_tumu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfsqrt_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfsqrt_tumu(mask, maskedoff, op1, vl); +vfloat16m4_t test_vfsqrt_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfsqrt_tumu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfsqrt_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfsqrt_tumu(mask, maskedoff, op1, vl); +vfloat16m8_t test_vfsqrt_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfsqrt_tumu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfsqrt_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfsqrt_tumu(mask, maskedoff, op1, vl); +vfloat32mf2_t test_vfsqrt_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfsqrt_tumu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfsqrt_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfsqrt_tumu(mask, maskedoff, op1, vl); +vfloat32m1_t test_vfsqrt_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfsqrt_tumu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfsqrt_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfsqrt_tumu(mask, maskedoff, op1, vl); +vfloat32m2_t test_vfsqrt_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfsqrt_tumu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfsqrt_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfsqrt_tumu(mask, maskedoff, op1, vl); +vfloat32m4_t test_vfsqrt_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfsqrt_tumu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfsqrt_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfsqrt_tumu(mask, maskedoff, op1, vl); +vfloat32m8_t test_vfsqrt_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfsqrt_tumu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfsqrt_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfsqrt_tumu(mask, maskedoff, op1, vl); +vfloat64m1_t test_vfsqrt_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfsqrt_tumu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfsqrt_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfsqrt_tumu(mask, maskedoff, op1, vl); +vfloat64m2_t test_vfsqrt_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfsqrt_tumu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfsqrt_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfsqrt_tumu(mask, maskedoff, op1, vl); +vfloat64m4_t test_vfsqrt_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfsqrt_tumu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfsqrt_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfsqrt_tumu(mask, maskedoff, op1, vl); +vfloat64m8_t test_vfsqrt_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfsqrt_tumu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfsqrt_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfsqrt_mu(mask, maskedoff, op1, vl); +vfloat16mf4_t test_vfsqrt_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfsqrt_mu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfsqrt_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfsqrt_mu(mask, maskedoff, op1, vl); +vfloat16mf2_t test_vfsqrt_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfsqrt_mu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfsqrt_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfsqrt_mu(mask, maskedoff, op1, vl); +vfloat16m1_t test_vfsqrt_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfsqrt_mu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfsqrt_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfsqrt_mu(mask, maskedoff, op1, vl); +vfloat16m2_t test_vfsqrt_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfsqrt_mu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfsqrt_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfsqrt_mu(mask, maskedoff, op1, vl); +vfloat16m4_t test_vfsqrt_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfsqrt_mu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfsqrt_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfsqrt_mu(mask, maskedoff, op1, vl); +vfloat16m8_t test_vfsqrt_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfsqrt_mu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfsqrt_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfsqrt_mu(mask, maskedoff, op1, vl); +vfloat32mf2_t test_vfsqrt_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfsqrt_mu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfsqrt_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfsqrt_mu(mask, maskedoff, op1, vl); +vfloat32m1_t test_vfsqrt_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfsqrt_mu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfsqrt_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfsqrt_mu(mask, maskedoff, op1, vl); +vfloat32m2_t test_vfsqrt_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfsqrt_mu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfsqrt_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfsqrt_mu(mask, maskedoff, op1, vl); +vfloat32m4_t test_vfsqrt_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfsqrt_mu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfsqrt_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfsqrt_mu(mask, maskedoff, op1, vl); +vfloat32m8_t test_vfsqrt_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfsqrt_mu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfsqrt_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfsqrt_mu(mask, maskedoff, op1, vl); +vfloat64m1_t test_vfsqrt_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfsqrt_mu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfsqrt_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfsqrt_mu(mask, maskedoff, op1, vl); +vfloat64m2_t test_vfsqrt_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfsqrt_mu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfsqrt_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfsqrt_mu(mask, maskedoff, op1, vl); +vfloat64m4_t test_vfsqrt_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfsqrt_mu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfsqrt_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfsqrt_mu(mask, maskedoff, op1, vl); +vfloat64m8_t test_vfsqrt_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfsqrt_mu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfsqrt_v_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfsqrt_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfsqrt_v_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfsqrt_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsqrt_v_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfsqrt_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfsqrt_v_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfsqrt_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsqrt_v_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfsqrt_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfsqrt_v_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfsqrt_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsqrt_v_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfsqrt_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfsqrt_v_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfsqrt_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsqrt_v_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfsqrt_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfsqrt_v_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfsqrt_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsqrt_v_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfsqrt_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfsqrt_v_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfsqrt_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsqrt_v_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfsqrt_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfsqrt_v_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfsqrt_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsqrt_v_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfsqrt_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfsqrt_v_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfsqrt_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsqrt_v_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfsqrt_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfsqrt_v_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfsqrt_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsqrt_v_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfsqrt_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfsqrt_v_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfsqrt_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsqrt_v_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfsqrt_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfsqrt_v_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfsqrt_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsqrt_v_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfsqrt_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfsqrt_v_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfsqrt_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsqrt_v_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfsqrt_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfsqrt_v_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfsqrt_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsqrt_v_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfsqrt_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfsqrt_v_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfsqrt_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsqrt_v_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfsqrt_tu(maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfsqrt_v_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfsqrt_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfsqrt_v_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfsqrt_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfsqrt_v_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfsqrt_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsqrt_v_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfsqrt_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfsqrt_v_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfsqrt_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsqrt_v_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfsqrt_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfsqrt_v_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfsqrt_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsqrt_v_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfsqrt_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfsqrt_v_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfsqrt_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsqrt_v_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfsqrt_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfsqrt_v_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfsqrt_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsqrt_v_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfsqrt_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfsqrt_v_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfsqrt_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsqrt_v_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfsqrt_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfsqrt_v_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfsqrt_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsqrt_v_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfsqrt_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfsqrt_v_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfsqrt_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsqrt_v_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfsqrt_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfsqrt_v_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfsqrt_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsqrt_v_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfsqrt_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfsqrt_v_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfsqrt_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsqrt_v_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfsqrt_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfsqrt_v_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfsqrt_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsqrt_v_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfsqrt_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfsqrt_v_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfsqrt_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsqrt_v_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfsqrt_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfsqrt_v_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfsqrt_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsqrt_v_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfsqrt_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfsqrt_v_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfsqrt_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsqrt_v_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfsqrt_tum(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfsqrt_v_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfsqrt_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfsqrt_v_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfsqrt_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfsqrt_v_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfsqrt_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsqrt_v_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfsqrt_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfsqrt_v_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfsqrt_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsqrt_v_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfsqrt_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfsqrt_v_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfsqrt_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsqrt_v_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfsqrt_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfsqrt_v_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfsqrt_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsqrt_v_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfsqrt_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfsqrt_v_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfsqrt_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsqrt_v_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfsqrt_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfsqrt_v_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfsqrt_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsqrt_v_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfsqrt_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfsqrt_v_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfsqrt_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsqrt_v_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfsqrt_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfsqrt_v_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfsqrt_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsqrt_v_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfsqrt_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfsqrt_v_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfsqrt_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsqrt_v_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfsqrt_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfsqrt_v_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfsqrt_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsqrt_v_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfsqrt_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfsqrt_v_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfsqrt_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsqrt_v_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfsqrt_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfsqrt_v_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfsqrt_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsqrt_v_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfsqrt_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfsqrt_v_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfsqrt_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsqrt_v_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfsqrt_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfsqrt_v_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfsqrt_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsqrt_v_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfsqrt_tumu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfsqrt_v_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfsqrt_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfsqrt_v_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return __riscv_vfsqrt_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfsqrt_v_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfsqrt_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsqrt_v_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return __riscv_vfsqrt_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfsqrt_v_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfsqrt_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsqrt_v_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return __riscv_vfsqrt_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfsqrt_v_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfsqrt_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsqrt_v_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return __riscv_vfsqrt_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfsqrt_v_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfsqrt_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsqrt_v_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return __riscv_vfsqrt_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfsqrt_v_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfsqrt_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsqrt_v_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return __riscv_vfsqrt_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfsqrt_v_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { + return __riscv_vfsqrt_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsqrt_v_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return __riscv_vfsqrt_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfsqrt_v_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfsqrt_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsqrt_v_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return __riscv_vfsqrt_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfsqrt_v_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfsqrt_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsqrt_v_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return __riscv_vfsqrt_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfsqrt_v_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfsqrt_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsqrt_v_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return __riscv_vfsqrt_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfsqrt_v_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfsqrt_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsqrt_v_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return __riscv_vfsqrt_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfsqrt_v_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { + return __riscv_vfsqrt_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsqrt_v_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return __riscv_vfsqrt_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfsqrt_v_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { + return __riscv_vfsqrt_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsqrt_v_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return __riscv_vfsqrt_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfsqrt_v_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { + return __riscv_vfsqrt_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsqrt_v_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return __riscv_vfsqrt_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfsqrt_v_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { + return __riscv_vfsqrt_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsqrt_v_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return __riscv_vfsqrt_mu(mask, maskedoff, op1, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfsqrt_v_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { + return __riscv_vfsqrt_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfsqrt\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfsub.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfsub.c index 6e3da2592..fca50df71 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfsub.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfsub.c @@ -6,964 +6,964 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfsub_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsub_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsub_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsub_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsub_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsub_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsub_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsub_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsub_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsub_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsub_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsub_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsub_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsub_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsub_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsub_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsub_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsub_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsub_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsub_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsub_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsub_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsub_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsub_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsub_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsub_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsub_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsub_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsub_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsub_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsub_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsub_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsub_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsub_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsub_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsub_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsub_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsub_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsub_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsub_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsub_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsub_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsub_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsub_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsub_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsub_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsub_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsub_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsub_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsub_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsub_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsub_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsub_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsub_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsub_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsub_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsub_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsub_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsub_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsub_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfsub_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsub_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsub_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsub_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsub_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsub_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsub_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsub_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsub_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsub_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsub_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsub_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsub_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsub_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsub_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsub_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsub_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsub_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsub_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsub_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsub_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsub_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsub_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsub_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsub_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsub_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsub_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsub_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsub_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsub_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsub_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsub_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsub_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsub_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsub_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsub_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsub_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsub_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsub_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsub_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsub_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsub_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsub_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsub_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsub_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsub_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsub_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsub_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsub_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsub_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsub_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsub_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsub_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsub_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsub_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsub_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsub_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsub_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsub_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsub_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfsub_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsub_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsub_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsub_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsub_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsub_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsub_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsub_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsub_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsub_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsub_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsub_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsub_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsub_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsub_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsub_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsub_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsub_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsub_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsub_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsub_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsub_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsub_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsub_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsub_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsub_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsub_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsub_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsub_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsub_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsub_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsub_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsub_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsub_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsub_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsub_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsub_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsub_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsub_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsub_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsub_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsub_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsub_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsub_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsub_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsub_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsub_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsub_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsub_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsub_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsub_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsub_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsub_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsub_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsub_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsub_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsub_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsub_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsub_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsub_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfsub_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsub_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsub_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsub_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsub_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsub_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsub_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsub_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsub_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsub_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsub_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsub_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsub_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsub_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsub_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsub_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsub_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsub_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsub_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsub_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsub_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsub_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsub_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsub_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsub_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsub_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsub_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsub_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsub_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsub_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsub_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsub_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsub_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsub_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsub_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsub_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsub_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsub_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsub_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsub_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsub_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsub_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsub_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsub_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsub_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsub_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsub_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsub_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsub_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsub_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsub_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfsub_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsub_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsub_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsub_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfsub_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsub_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsub_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsub_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfsub_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfsub_vv_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfsub_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfsub_vf_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfsub_vf_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsub_vv_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfsub_vv_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsub_vf_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfsub_vf_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsub_vv_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfsub_vv_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsub_vf_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfsub_vf_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsub_vv_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfsub_vv_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsub_vf_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfsub_vf_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsub_vv_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfsub_vv_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsub_vf_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfsub_vf_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsub_vv_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfsub_vv_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsub_vf_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfsub_vf_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsub_vv_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfsub_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsub_vf_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfsub_vf_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsub_vv_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfsub_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsub_vf_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfsub_vf_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsub_vv_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfsub_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsub_vf_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfsub_vf_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsub_vv_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfsub_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsub_vf_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfsub_vf_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsub_vv_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfsub_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsub_vf_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfsub_vf_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsub_vv_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfsub_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsub_vf_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfsub_vf_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsub_vv_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfsub_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsub_vf_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfsub_vf_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsub_vv_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfsub_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsub_vf_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfsub_vf_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsub_vv_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfsub_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsub_vf_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfsub_vf_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfsub_vv_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfsub_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfsub_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfsub_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsub_vv_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfsub_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsub_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfsub_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsub_vv_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfsub_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsub_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfsub_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsub_vv_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfsub_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsub_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfsub_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsub_vv_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfsub_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsub_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfsub_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsub_vv_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfsub_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsub_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfsub_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsub_vv_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfsub_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsub_vf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfsub_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsub_vv_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfsub_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsub_vf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfsub_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsub_vv_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfsub_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsub_vf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfsub_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsub_vv_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfsub_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsub_vf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfsub_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsub_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfsub_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsub_vf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfsub_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsub_vv_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfsub_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsub_vf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfsub_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsub_vv_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfsub_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsub_vf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfsub_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsub_vv_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfsub_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsub_vf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfsub_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsub_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfsub_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsub_vf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfsub_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfsub_vv_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfsub_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfsub_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfsub_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsub_vv_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfsub_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsub_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfsub_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsub_vv_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfsub_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsub_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfsub_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsub_vv_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfsub_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsub_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfsub_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsub_vv_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfsub_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsub_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfsub_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsub_vv_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfsub_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsub_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfsub_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsub_vv_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfsub_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsub_vf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfsub_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsub_vv_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfsub_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsub_vf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfsub_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsub_vv_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfsub_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsub_vf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfsub_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsub_vv_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfsub_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsub_vf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfsub_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsub_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfsub_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsub_vf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfsub_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsub_vv_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfsub_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsub_vf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfsub_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsub_vv_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfsub_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsub_vf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfsub_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsub_vv_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfsub_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsub_vf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfsub_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsub_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfsub_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsub_vf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfsub_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfsub_vv_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfsub_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfsub_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfsub_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsub_vv_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfsub_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsub_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfsub_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsub_vv_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfsub_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsub_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfsub_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsub_vv_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfsub_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsub_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfsub_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsub_vv_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfsub_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsub_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfsub_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsub_vv_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfsub_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsub_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfsub_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsub_vv_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfsub_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsub_vf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfsub_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsub_vv_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfsub_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsub_vf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfsub_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsub_vv_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfsub_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsub_vf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfsub_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsub_vv_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfsub_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsub_vf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfsub_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsub_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfsub_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsub_vf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfsub_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsub_vv_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfsub_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsub_vf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfsub_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsub_vv_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfsub_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsub_vf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfsub_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsub_vv_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfsub_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsub_vf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfsub_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsub_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfsub_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsub_vf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vfsub_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfsub_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vfsub_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfsub\.[ivxfswum.]+\s+} 240 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfwadd.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfwadd.c index e3d755e60..600f0c78c 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfwadd.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfwadd.c @@ -6,1156 +6,1156 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2_t test_vfwadd_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_vv_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwadd_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwadd_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwadd_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwadd_wv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_wv_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwadd_wv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwadd_wf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwadd_wf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwadd_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwadd_vv_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwadd_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwadd_vf_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwadd_wv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwadd_wv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwadd_wf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwadd_wf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwadd_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_vv_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwadd_vv_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwadd_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwadd_vf_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwadd_wv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_wv_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwadd_wv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwadd_wf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwadd_wf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwadd_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_vv_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwadd_vv_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwadd_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwadd_vf_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwadd_wv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_wv_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwadd_wv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwadd_wf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwadd_wf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwadd_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_vv_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwadd_vv_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwadd_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwadd_vf_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwadd_wv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_wv_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwadd_wv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwadd_wf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwadd_wf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwadd_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwadd_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwadd_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwadd_vf_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwadd_wv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwadd_wv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwadd_wf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwadd_wf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwadd_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_vv_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwadd_vv_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwadd_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwadd_vf_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwadd_wv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_wv_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwadd_wv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwadd_wf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwadd_wf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwadd_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_vv_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwadd_vv_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwadd_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwadd_vf_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwadd_wv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_wv_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwadd_wv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwadd_wf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwadd_wf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwadd_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_vv_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwadd_vv_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwadd_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwadd_vf_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwadd_wv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_wv_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwadd_wv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwadd_wf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwadd_wf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwadd_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_vv_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwadd_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwadd_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwadd_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwadd_wv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_wv_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwadd_wv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwadd_wf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwadd_wf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwadd_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwadd_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwadd_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwadd_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwadd_wv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwadd_wv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwadd_wf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwadd_wf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwadd_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_vv_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwadd_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwadd_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwadd_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwadd_wv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_wv_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwadd_wv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwadd_wf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwadd_wf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwadd_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_vv_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwadd_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwadd_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwadd_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwadd_wv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_wv_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwadd_wv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwadd_wf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwadd_wf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwadd_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_vv_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwadd_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwadd_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwadd_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwadd_wv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_wv_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwadd_wv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwadd_wf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwadd_wf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwadd_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwadd_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwadd_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwadd_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwadd_wv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwadd_wv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwadd_wf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwadd_wf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwadd_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_vv_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwadd_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwadd_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwadd_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwadd_wv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_wv_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwadd_wv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwadd_wf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwadd_wf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwadd_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_vv_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwadd_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwadd_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwadd_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwadd_wv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_wv_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwadd_wv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwadd_wf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwadd_wf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwadd_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_vv_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwadd_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwadd_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwadd_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwadd_wv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_wv_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwadd_wv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwadd_wf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwadd_wf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwadd_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_vv_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwadd_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwadd_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwadd_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwadd_wv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_wv_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwadd_wv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwadd_wf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwadd_wf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwadd_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwadd_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwadd_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwadd_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwadd_wv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwadd_wv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwadd_wf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwadd_wf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwadd_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_vv_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwadd_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwadd_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwadd_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwadd_wv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_wv_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwadd_wv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwadd_wf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwadd_wf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwadd_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_vv_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwadd_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwadd_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwadd_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwadd_wv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_wv_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwadd_wv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwadd_wf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwadd_wf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwadd_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_vv_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwadd_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwadd_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwadd_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwadd_wv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_wv_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwadd_wv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwadd_wf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwadd_wf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwadd_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwadd_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwadd_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwadd_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwadd_wv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwadd_wv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwadd_wf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwadd_wf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwadd_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_vv_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwadd_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwadd_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwadd_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwadd_wv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_wv_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwadd_wv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwadd_wf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwadd_wf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwadd_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_vv_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwadd_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwadd_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwadd_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwadd_wv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_wv_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwadd_wv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwadd_wf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwadd_wf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwadd_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_vv_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwadd_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwadd_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwadd_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwadd_wv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_wv_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwadd_wv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwadd_wf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwadd_wf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwadd_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_vv_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwadd_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwadd_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwadd_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwadd_wv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_wv_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwadd_wv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwadd_wf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwadd_wf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwadd_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwadd_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwadd_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwadd_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwadd_wv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwadd_wv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwadd_wf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwadd_wf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwadd_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_vv_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwadd_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwadd_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwadd_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwadd_wv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_wv_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwadd_wv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwadd_wf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwadd_wf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwadd_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_vv_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwadd_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwadd_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwadd_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwadd_wv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_wv_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwadd_wv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwadd_wf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwadd_wf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwadd_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_vv_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwadd_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwadd_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwadd_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwadd_wv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_wv_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwadd_wv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwadd_wf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwadd_wf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwadd_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwadd_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwadd_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwadd_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwadd_wv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwadd_wv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwadd_wf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwadd_wf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwadd_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_vv_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwadd_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwadd_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwadd_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwadd_wv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_wv_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwadd_wv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwadd_wf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwadd_wf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwadd_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_vv_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwadd_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwadd_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwadd_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwadd_wv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_wv_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwadd_wv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwadd_wf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwadd_wf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwadd_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_vv_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwadd_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwadd_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwadd_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwadd_wv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_wv_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwadd_wv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwadd_wf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwadd_wf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwadd_vv_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_vv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_vf_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_vf_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_wv_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_wv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_wv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_wf_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_wf_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_vv_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_vf_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_vf_f32m1_rm_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_wv_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_wv_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_wf_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_wf_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_vv_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_vv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_vf_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_vf_f32m2_rm_tu(vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_wv_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_wv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_wv_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_wf_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_wf_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_vv_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_vv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_vf_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_vf_f32m4_rm_tu(vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_wv_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_wv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_wv_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_wf_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_wf_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_vv_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_vv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_vf_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_vf_f32m8_rm_tu(vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_wv_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_wv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_wv_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_wf_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_wf_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_vv_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_vf_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_vf_f64m1_rm_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_wv_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_wv_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_wf_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_wf_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_vv_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_vv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_vf_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_vf_f64m2_rm_tu(vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_wv_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_wv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_wv_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_wf_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_wf_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_vv_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_vv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_vf_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_vf_f64m4_rm_tu(vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_wv_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_wv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_wv_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_wf_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_wf_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_vv_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_vv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_vf_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_vf_f64m8_rm_tu(vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_wv_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_wv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_wv_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_wf_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_wf_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_vv_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_vv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_vf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_wv_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_wv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_wv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_wf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_wf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_vv_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_vf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_wv_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_wv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_wf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_wf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_vv_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_vv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_vf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_wv_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_wv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_wv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_wf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_wf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_vv_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_vv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_vf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_wv_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_wv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_wv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_wf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_wf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_vv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_vf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_wv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_wv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_wv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_wf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_wf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_vv_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_vf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_wv_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_wv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_wf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_wf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_vv_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_vv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_vf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_wv_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_wv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_wv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_wf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_wf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_vv_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_vv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_vf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_wv_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_wv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_wv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_wf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_wf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_vv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_vf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_wv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_wv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_wv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_wf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_wf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_vv_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_vv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_vf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_wv_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_wv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_wv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_wf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_wf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_vv_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_vf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_wv_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_wv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_wf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_wf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_vv_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_vv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_vf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_wv_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_wv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_wv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_wf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_wf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_vv_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_vv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_vf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_wv_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_wv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_wv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_wf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_wf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_vv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_vf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_wv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_wv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_wv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_wf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_wf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_vv_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_vf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_wv_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_wv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_wf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_wf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_vv_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_vv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_vf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_wv_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_wv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_wv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_wf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_wf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_vv_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_vv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_vf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_wv_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_wv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_wv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_wf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_wf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_vv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_vf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_wv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_wv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_wv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_wf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_wf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_vv_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_vv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_vf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_wv_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwadd_wv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_wv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_wf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_wf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_vv_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_vf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_wv_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_wv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_wf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwadd_wf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_vv_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_vv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_vf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_wv_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_wv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_wv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_wf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwadd_wf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_vv_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_vv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_vf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_wv_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_wv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_wv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_wf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwadd_wf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_vv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_vf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_vf_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_wv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_wv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_wv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_wf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwadd_wf_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwadd_wf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_vv_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_vv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_vf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_wv_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwadd_wv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_wv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_wf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwadd_wf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_vv_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_vv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_vf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_wv_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_wv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_wv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_wf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwadd_wf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_vv_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_vv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_vf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_wv_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_wv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_wv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_wf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwadd_wf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_vv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_vf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_vf_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_wv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_wv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_wv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_wf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwadd_wf_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwadd_wf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfwadd\.[ivxfswum.]+\s+} 288 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfwcvt.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfwcvt.c index 6b732c3e2..c17b186a3 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfwcvt.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfwcvt.c @@ -6,1204 +6,1204 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_tu(vfloat16mf4_t maskedoff, vint8mf8_t src, size_t vl) { - return __riscv_vfwcvt_f_tu(maskedoff, src, vl); +vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_tu(vfloat16mf4_t vd, vint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); } -vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_tu(vfloat16mf2_t maskedoff, vint8mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_tu(maskedoff, src, vl); +vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_tu(vfloat16mf2_t vd, vint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); } -vfloat16m1_t test_vfwcvt_f_x_v_f16m1_tu(vfloat16m1_t maskedoff, vint8mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_tu(maskedoff, src, vl); +vfloat16m1_t test_vfwcvt_f_x_v_f16m1_tu(vfloat16m1_t vd, vint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); } -vfloat16m2_t test_vfwcvt_f_x_v_f16m2_tu(vfloat16m2_t maskedoff, vint8m1_t src, size_t vl) { - return __riscv_vfwcvt_f_tu(maskedoff, src, vl); +vfloat16m2_t test_vfwcvt_f_x_v_f16m2_tu(vfloat16m2_t vd, vint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); } -vfloat16m4_t test_vfwcvt_f_x_v_f16m4_tu(vfloat16m4_t maskedoff, vint8m2_t src, size_t vl) { - return __riscv_vfwcvt_f_tu(maskedoff, src, vl); +vfloat16m4_t test_vfwcvt_f_x_v_f16m4_tu(vfloat16m4_t vd, vint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); } -vfloat16m8_t test_vfwcvt_f_x_v_f16m8_tu(vfloat16m8_t maskedoff, vint8m4_t src, size_t vl) { - return __riscv_vfwcvt_f_tu(maskedoff, src, vl); +vfloat16m8_t test_vfwcvt_f_x_v_f16m8_tu(vfloat16m8_t vd, vint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); } -vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_tu(vfloat16mf4_t maskedoff, vuint8mf8_t src, size_t vl) { - return __riscv_vfwcvt_f_tu(maskedoff, src, vl); +vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_tu(vfloat16mf4_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); } -vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_tu(vfloat16mf2_t maskedoff, vuint8mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_tu(maskedoff, src, vl); +vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_tu(vfloat16mf2_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); } -vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_tu(vfloat16m1_t maskedoff, vuint8mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_tu(maskedoff, src, vl); +vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_tu(vfloat16m1_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); } -vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_tu(vfloat16m2_t maskedoff, vuint8m1_t src, size_t vl) { - return __riscv_vfwcvt_f_tu(maskedoff, src, vl); +vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_tu(vfloat16m2_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); } -vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_tu(vfloat16m4_t maskedoff, vuint8m2_t src, size_t vl) { - return __riscv_vfwcvt_f_tu(maskedoff, src, vl); +vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_tu(vfloat16m4_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); } -vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_tu(vfloat16m8_t maskedoff, vuint8m4_t src, size_t vl) { - return __riscv_vfwcvt_f_tu(maskedoff, src, vl); +vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_tu(vfloat16m8_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); } -vint32mf2_t test_vfwcvt_x_f_v_i32mf2_tu(vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_x_tu(maskedoff, src, vl); +vint32mf2_t test_vfwcvt_x_f_v_i32mf2_tu(vint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tu(vd, vs2, vl); } -vint32m1_t test_vfwcvt_x_f_v_i32m1_tu(vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_x_tu(maskedoff, src, vl); +vint32m1_t test_vfwcvt_x_f_v_i32m1_tu(vint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tu(vd, vs2, vl); } -vint32m2_t test_vfwcvt_x_f_v_i32m2_tu(vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_x_tu(maskedoff, src, vl); +vint32m2_t test_vfwcvt_x_f_v_i32m2_tu(vint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tu(vd, vs2, vl); } -vint32m4_t test_vfwcvt_x_f_v_i32m4_tu(vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_x_tu(maskedoff, src, vl); +vint32m4_t test_vfwcvt_x_f_v_i32m4_tu(vint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tu(vd, vs2, vl); } -vint32m8_t test_vfwcvt_x_f_v_i32m8_tu(vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_x_tu(maskedoff, src, vl); +vint32m8_t test_vfwcvt_x_f_v_i32m8_tu(vint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tu(vd, vs2, vl); } -vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_tu(vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_xu_tu(maskedoff, src, vl); +vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_tu(vuint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tu(vd, vs2, vl); } -vuint32m1_t test_vfwcvt_xu_f_v_u32m1_tu(vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu_tu(maskedoff, src, vl); +vuint32m1_t test_vfwcvt_xu_f_v_u32m1_tu(vuint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tu(vd, vs2, vl); } -vuint32m2_t test_vfwcvt_xu_f_v_u32m2_tu(vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_xu_tu(maskedoff, src, vl); +vuint32m2_t test_vfwcvt_xu_f_v_u32m2_tu(vuint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tu(vd, vs2, vl); } -vuint32m4_t test_vfwcvt_xu_f_v_u32m4_tu(vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_xu_tu(maskedoff, src, vl); +vuint32m4_t test_vfwcvt_xu_f_v_u32m4_tu(vuint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tu(vd, vs2, vl); } -vuint32m8_t test_vfwcvt_xu_f_v_u32m8_tu(vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_xu_tu(maskedoff, src, vl); +vuint32m8_t test_vfwcvt_xu_f_v_u32m8_tu(vuint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tu(vd, vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_tu(vfloat32mf2_t maskedoff, vint16mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_tu(maskedoff, src, vl); +vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_tu(vfloat32mf2_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); } -vfloat32m1_t test_vfwcvt_f_x_v_f32m1_tu(vfloat32m1_t maskedoff, vint16mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_tu(maskedoff, src, vl); +vfloat32m1_t test_vfwcvt_f_x_v_f32m1_tu(vfloat32m1_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); } -vfloat32m2_t test_vfwcvt_f_x_v_f32m2_tu(vfloat32m2_t maskedoff, vint16m1_t src, size_t vl) { - return __riscv_vfwcvt_f_tu(maskedoff, src, vl); +vfloat32m2_t test_vfwcvt_f_x_v_f32m2_tu(vfloat32m2_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); } -vfloat32m4_t test_vfwcvt_f_x_v_f32m4_tu(vfloat32m4_t maskedoff, vint16m2_t src, size_t vl) { - return __riscv_vfwcvt_f_tu(maskedoff, src, vl); +vfloat32m4_t test_vfwcvt_f_x_v_f32m4_tu(vfloat32m4_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); } -vfloat32m8_t test_vfwcvt_f_x_v_f32m8_tu(vfloat32m8_t maskedoff, vint16m4_t src, size_t vl) { - return __riscv_vfwcvt_f_tu(maskedoff, src, vl); +vfloat32m8_t test_vfwcvt_f_x_v_f32m8_tu(vfloat32m8_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_tu(vfloat32mf2_t maskedoff, vuint16mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_tu(maskedoff, src, vl); +vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_tu(vfloat32mf2_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); } -vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_tu(vfloat32m1_t maskedoff, vuint16mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_tu(maskedoff, src, vl); +vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_tu(vfloat32m1_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); } -vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_tu(vfloat32m2_t maskedoff, vuint16m1_t src, size_t vl) { - return __riscv_vfwcvt_f_tu(maskedoff, src, vl); +vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_tu(vfloat32m2_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); } -vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_tu(vfloat32m4_t maskedoff, vuint16m2_t src, size_t vl) { - return __riscv_vfwcvt_f_tu(maskedoff, src, vl); +vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_tu(vfloat32m4_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); } -vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_tu(vfloat32m8_t maskedoff, vuint16m4_t src, size_t vl) { - return __riscv_vfwcvt_f_tu(maskedoff, src, vl); +vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_tu(vfloat32m8_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_tu(maskedoff, src, vl); +vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); } -vfloat32m1_t test_vfwcvt_f_f_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_tu(maskedoff, src, vl); +vfloat32m1_t test_vfwcvt_f_f_v_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); } -vfloat32m2_t test_vfwcvt_f_f_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_f_tu(maskedoff, src, vl); +vfloat32m2_t test_vfwcvt_f_f_v_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); } -vfloat32m4_t test_vfwcvt_f_f_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_f_tu(maskedoff, src, vl); +vfloat32m4_t test_vfwcvt_f_f_v_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); } -vfloat32m8_t test_vfwcvt_f_f_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_f_tu(maskedoff, src, vl); +vfloat32m8_t test_vfwcvt_f_f_v_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); } -vint64m1_t test_vfwcvt_x_f_v_i64m1_tu(vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_x_tu(maskedoff, src, vl); +vint64m1_t test_vfwcvt_x_f_v_i64m1_tu(vint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tu(vd, vs2, vl); } -vint64m2_t test_vfwcvt_x_f_v_i64m2_tu(vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_x_tu(maskedoff, src, vl); +vint64m2_t test_vfwcvt_x_f_v_i64m2_tu(vint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tu(vd, vs2, vl); } -vint64m4_t test_vfwcvt_x_f_v_i64m4_tu(vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_x_tu(maskedoff, src, vl); +vint64m4_t test_vfwcvt_x_f_v_i64m4_tu(vint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tu(vd, vs2, vl); } -vint64m8_t test_vfwcvt_x_f_v_i64m8_tu(vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_x_tu(maskedoff, src, vl); +vint64m8_t test_vfwcvt_x_f_v_i64m8_tu(vint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tu(vd, vs2, vl); } -vuint64m1_t test_vfwcvt_xu_f_v_u64m1_tu(vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu_tu(maskedoff, src, vl); +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_tu(vuint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tu(vd, vs2, vl); } -vuint64m2_t test_vfwcvt_xu_f_v_u64m2_tu(vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_xu_tu(maskedoff, src, vl); +vuint64m2_t test_vfwcvt_xu_f_v_u64m2_tu(vuint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tu(vd, vs2, vl); } -vuint64m4_t test_vfwcvt_xu_f_v_u64m4_tu(vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_xu_tu(maskedoff, src, vl); +vuint64m4_t test_vfwcvt_xu_f_v_u64m4_tu(vuint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tu(vd, vs2, vl); } -vuint64m8_t test_vfwcvt_xu_f_v_u64m8_tu(vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_xu_tu(maskedoff, src, vl); +vuint64m8_t test_vfwcvt_xu_f_v_u64m8_tu(vuint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tu(vd, vs2, vl); } -vfloat64m1_t test_vfwcvt_f_x_v_f64m1_tu(vfloat64m1_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_tu(maskedoff, src, vl); +vfloat64m1_t test_vfwcvt_f_x_v_f64m1_tu(vfloat64m1_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); } -vfloat64m2_t test_vfwcvt_f_x_v_f64m2_tu(vfloat64m2_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vfwcvt_f_tu(maskedoff, src, vl); +vfloat64m2_t test_vfwcvt_f_x_v_f64m2_tu(vfloat64m2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); } -vfloat64m4_t test_vfwcvt_f_x_v_f64m4_tu(vfloat64m4_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vfwcvt_f_tu(maskedoff, src, vl); +vfloat64m4_t test_vfwcvt_f_x_v_f64m4_tu(vfloat64m4_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); } -vfloat64m8_t test_vfwcvt_f_x_v_f64m8_tu(vfloat64m8_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vfwcvt_f_tu(maskedoff, src, vl); +vfloat64m8_t test_vfwcvt_f_x_v_f64m8_tu(vfloat64m8_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); } -vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_tu(vfloat64m1_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_tu(maskedoff, src, vl); +vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_tu(vfloat64m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); } -vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_tu(vfloat64m2_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vfwcvt_f_tu(maskedoff, src, vl); +vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_tu(vfloat64m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); } -vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_tu(vfloat64m4_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vfwcvt_f_tu(maskedoff, src, vl); +vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_tu(vfloat64m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); } -vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_tu(vfloat64m8_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vfwcvt_f_tu(maskedoff, src, vl); +vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_tu(vfloat64m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); } -vfloat64m1_t test_vfwcvt_f_f_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_tu(maskedoff, src, vl); +vfloat64m1_t test_vfwcvt_f_f_v_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); } -vfloat64m2_t test_vfwcvt_f_f_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_f_tu(maskedoff, src, vl); +vfloat64m2_t test_vfwcvt_f_f_v_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); } -vfloat64m4_t test_vfwcvt_f_f_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_f_tu(maskedoff, src, vl); +vfloat64m4_t test_vfwcvt_f_f_v_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); } -vfloat64m8_t test_vfwcvt_f_f_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_f_tu(maskedoff, src, vl); +vfloat64m8_t test_vfwcvt_f_f_v_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); } -vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vint8mf8_t src, size_t vl) { - return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vint8mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); } -vfloat16m1_t test_vfwcvt_f_x_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vint8mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl); +vfloat16m1_t test_vfwcvt_f_x_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); } -vfloat16m2_t test_vfwcvt_f_x_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vint8m1_t src, size_t vl) { - return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl); +vfloat16m2_t test_vfwcvt_f_x_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); } -vfloat16m4_t test_vfwcvt_f_x_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vint8m2_t src, size_t vl) { - return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl); +vfloat16m4_t test_vfwcvt_f_x_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); } -vfloat16m8_t test_vfwcvt_f_x_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vint8m4_t src, size_t vl) { - return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl); +vfloat16m8_t test_vfwcvt_f_x_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vuint8mf8_t src, size_t vl) { - return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vuint8mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); } -vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vuint8mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl); +vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); } -vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vuint8m1_t src, size_t vl) { - return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl); +vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); } -vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vuint8m2_t src, size_t vl) { - return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl); +vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); } -vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vuint8m4_t src, size_t vl) { - return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl); +vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); } -vint32mf2_t test_vfwcvt_x_f_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_x_tum(mask, maskedoff, src, vl); +vint32mf2_t test_vfwcvt_x_f_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tum(vm, vd, vs2, vl); } -vint32m1_t test_vfwcvt_x_f_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_x_tum(mask, maskedoff, src, vl); +vint32m1_t test_vfwcvt_x_f_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tum(vm, vd, vs2, vl); } -vint32m2_t test_vfwcvt_x_f_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_x_tum(mask, maskedoff, src, vl); +vint32m2_t test_vfwcvt_x_f_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tum(vm, vd, vs2, vl); } -vint32m4_t test_vfwcvt_x_f_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_x_tum(mask, maskedoff, src, vl); +vint32m4_t test_vfwcvt_x_f_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tum(vm, vd, vs2, vl); } -vint32m8_t test_vfwcvt_x_f_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_x_tum(mask, maskedoff, src, vl); +vint32m8_t test_vfwcvt_x_f_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_xu_tum(mask, maskedoff, src, vl); +vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tum(vm, vd, vs2, vl); } -vuint32m1_t test_vfwcvt_xu_f_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu_tum(mask, maskedoff, src, vl); +vuint32m1_t test_vfwcvt_xu_f_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tum(vm, vd, vs2, vl); } -vuint32m2_t test_vfwcvt_xu_f_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_xu_tum(mask, maskedoff, src, vl); +vuint32m2_t test_vfwcvt_xu_f_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tum(vm, vd, vs2, vl); } -vuint32m4_t test_vfwcvt_xu_f_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_xu_tum(mask, maskedoff, src, vl); +vuint32m4_t test_vfwcvt_xu_f_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tum(vm, vd, vs2, vl); } -vuint32m8_t test_vfwcvt_xu_f_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_xu_tum(mask, maskedoff, src, vl); +vuint32m8_t test_vfwcvt_xu_f_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tum(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vint16mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); } -vfloat32m1_t test_vfwcvt_f_x_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vint16mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl); +vfloat32m1_t test_vfwcvt_f_x_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); } -vfloat32m2_t test_vfwcvt_f_x_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vint16m1_t src, size_t vl) { - return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl); +vfloat32m2_t test_vfwcvt_f_x_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); } -vfloat32m4_t test_vfwcvt_f_x_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vint16m2_t src, size_t vl) { - return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl); +vfloat32m4_t test_vfwcvt_f_x_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); } -vfloat32m8_t test_vfwcvt_f_x_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vint16m4_t src, size_t vl) { - return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl); +vfloat32m8_t test_vfwcvt_f_x_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vuint16mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); } -vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vuint16mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl); +vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); } -vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vuint16m1_t src, size_t vl) { - return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl); +vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); } -vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vuint16m2_t src, size_t vl) { - return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl); +vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); } -vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vuint16m4_t src, size_t vl) { - return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl); +vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); } -vfloat32m1_t test_vfwcvt_f_f_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl); +vfloat32m1_t test_vfwcvt_f_f_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); } -vfloat32m2_t test_vfwcvt_f_f_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl); +vfloat32m2_t test_vfwcvt_f_f_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); } -vfloat32m4_t test_vfwcvt_f_f_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl); +vfloat32m4_t test_vfwcvt_f_f_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); } -vfloat32m8_t test_vfwcvt_f_f_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl); +vfloat32m8_t test_vfwcvt_f_f_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); } -vint64m1_t test_vfwcvt_x_f_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_x_tum(mask, maskedoff, src, vl); +vint64m1_t test_vfwcvt_x_f_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tum(vm, vd, vs2, vl); } -vint64m2_t test_vfwcvt_x_f_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_x_tum(mask, maskedoff, src, vl); +vint64m2_t test_vfwcvt_x_f_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tum(vm, vd, vs2, vl); } -vint64m4_t test_vfwcvt_x_f_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_x_tum(mask, maskedoff, src, vl); +vint64m4_t test_vfwcvt_x_f_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tum(vm, vd, vs2, vl); } -vint64m8_t test_vfwcvt_x_f_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_x_tum(mask, maskedoff, src, vl); +vint64m8_t test_vfwcvt_x_f_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tum(vm, vd, vs2, vl); } -vuint64m1_t test_vfwcvt_xu_f_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu_tum(mask, maskedoff, src, vl); +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tum(vm, vd, vs2, vl); } -vuint64m2_t test_vfwcvt_xu_f_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_xu_tum(mask, maskedoff, src, vl); +vuint64m2_t test_vfwcvt_xu_f_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tum(vm, vd, vs2, vl); } -vuint64m4_t test_vfwcvt_xu_f_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_xu_tum(mask, maskedoff, src, vl); +vuint64m4_t test_vfwcvt_xu_f_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tum(vm, vd, vs2, vl); } -vuint64m8_t test_vfwcvt_xu_f_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_xu_tum(mask, maskedoff, src, vl); +vuint64m8_t test_vfwcvt_xu_f_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tum(vm, vd, vs2, vl); } -vfloat64m1_t test_vfwcvt_f_x_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl); +vfloat64m1_t test_vfwcvt_f_x_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); } -vfloat64m2_t test_vfwcvt_f_x_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl); +vfloat64m2_t test_vfwcvt_f_x_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); } -vfloat64m4_t test_vfwcvt_f_x_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl); +vfloat64m4_t test_vfwcvt_f_x_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); } -vfloat64m8_t test_vfwcvt_f_x_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl); +vfloat64m8_t test_vfwcvt_f_x_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); } -vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl); +vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); } -vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl); +vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); } -vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl); +vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); } -vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl); +vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); } -vfloat64m1_t test_vfwcvt_f_f_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl); +vfloat64m1_t test_vfwcvt_f_f_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); } -vfloat64m2_t test_vfwcvt_f_f_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl); +vfloat64m2_t test_vfwcvt_f_f_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); } -vfloat64m4_t test_vfwcvt_f_f_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl); +vfloat64m4_t test_vfwcvt_f_f_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); } -vfloat64m8_t test_vfwcvt_f_f_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_f_tum(mask, maskedoff, src, vl); +vfloat64m8_t test_vfwcvt_f_f_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vint8mf8_t src, size_t vl) { - return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vint8mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfwcvt_f_x_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vint8mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl); +vfloat16m1_t test_vfwcvt_f_x_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfwcvt_f_x_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vint8m1_t src, size_t vl) { - return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl); +vfloat16m2_t test_vfwcvt_f_x_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfwcvt_f_x_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vint8m2_t src, size_t vl) { - return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl); +vfloat16m4_t test_vfwcvt_f_x_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfwcvt_f_x_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vint8m4_t src, size_t vl) { - return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl); +vfloat16m8_t test_vfwcvt_f_x_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vuint8mf8_t src, size_t vl) { - return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vuint8mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vuint8mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl); +vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vuint8m1_t src, size_t vl) { - return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl); +vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vuint8m2_t src, size_t vl) { - return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl); +vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vuint8m4_t src, size_t vl) { - return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl); +vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); } -vint32mf2_t test_vfwcvt_x_f_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_x_tumu(mask, maskedoff, src, vl); +vint32mf2_t test_vfwcvt_x_f_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tumu(vm, vd, vs2, vl); } -vint32m1_t test_vfwcvt_x_f_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_x_tumu(mask, maskedoff, src, vl); +vint32m1_t test_vfwcvt_x_f_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tumu(vm, vd, vs2, vl); } -vint32m2_t test_vfwcvt_x_f_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_x_tumu(mask, maskedoff, src, vl); +vint32m2_t test_vfwcvt_x_f_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tumu(vm, vd, vs2, vl); } -vint32m4_t test_vfwcvt_x_f_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_x_tumu(mask, maskedoff, src, vl); +vint32m4_t test_vfwcvt_x_f_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tumu(vm, vd, vs2, vl); } -vint32m8_t test_vfwcvt_x_f_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_x_tumu(mask, maskedoff, src, vl); +vint32m8_t test_vfwcvt_x_f_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_xu_tumu(mask, maskedoff, src, vl); +vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_vfwcvt_xu_f_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu_tumu(mask, maskedoff, src, vl); +vuint32m1_t test_vfwcvt_xu_f_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_vfwcvt_xu_f_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_xu_tumu(mask, maskedoff, src, vl); +vuint32m2_t test_vfwcvt_xu_f_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_vfwcvt_xu_f_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_xu_tumu(mask, maskedoff, src, vl); +vuint32m4_t test_vfwcvt_xu_f_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tumu(vm, vd, vs2, vl); } -vuint32m8_t test_vfwcvt_xu_f_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_xu_tumu(mask, maskedoff, src, vl); +vuint32m8_t test_vfwcvt_xu_f_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tumu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vint16mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfwcvt_f_x_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vint16mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl); +vfloat32m1_t test_vfwcvt_f_x_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfwcvt_f_x_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vint16m1_t src, size_t vl) { - return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl); +vfloat32m2_t test_vfwcvt_f_x_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfwcvt_f_x_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vint16m2_t src, size_t vl) { - return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl); +vfloat32m4_t test_vfwcvt_f_x_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfwcvt_f_x_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vint16m4_t src, size_t vl) { - return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl); +vfloat32m8_t test_vfwcvt_f_x_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vuint16mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vuint16mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl); +vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vuint16m1_t src, size_t vl) { - return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl); +vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vuint16m2_t src, size_t vl) { - return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl); +vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vuint16m4_t src, size_t vl) { - return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl); +vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfwcvt_f_f_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl); +vfloat32m1_t test_vfwcvt_f_f_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfwcvt_f_f_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl); +vfloat32m2_t test_vfwcvt_f_f_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfwcvt_f_f_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl); +vfloat32m4_t test_vfwcvt_f_f_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfwcvt_f_f_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl); +vfloat32m8_t test_vfwcvt_f_f_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); } -vint64m1_t test_vfwcvt_x_f_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_x_tumu(mask, maskedoff, src, vl); +vint64m1_t test_vfwcvt_x_f_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tumu(vm, vd, vs2, vl); } -vint64m2_t test_vfwcvt_x_f_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_x_tumu(mask, maskedoff, src, vl); +vint64m2_t test_vfwcvt_x_f_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tumu(vm, vd, vs2, vl); } -vint64m4_t test_vfwcvt_x_f_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_x_tumu(mask, maskedoff, src, vl); +vint64m4_t test_vfwcvt_x_f_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tumu(vm, vd, vs2, vl); } -vint64m8_t test_vfwcvt_x_f_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_x_tumu(mask, maskedoff, src, vl); +vint64m8_t test_vfwcvt_x_f_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tumu(vm, vd, vs2, vl); } -vuint64m1_t test_vfwcvt_xu_f_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu_tumu(mask, maskedoff, src, vl); +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tumu(vm, vd, vs2, vl); } -vuint64m2_t test_vfwcvt_xu_f_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_xu_tumu(mask, maskedoff, src, vl); +vuint64m2_t test_vfwcvt_xu_f_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tumu(vm, vd, vs2, vl); } -vuint64m4_t test_vfwcvt_xu_f_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_xu_tumu(mask, maskedoff, src, vl); +vuint64m4_t test_vfwcvt_xu_f_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tumu(vm, vd, vs2, vl); } -vuint64m8_t test_vfwcvt_xu_f_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_xu_tumu(mask, maskedoff, src, vl); +vuint64m8_t test_vfwcvt_xu_f_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tumu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfwcvt_f_x_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl); +vfloat64m1_t test_vfwcvt_f_x_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfwcvt_f_x_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl); +vfloat64m2_t test_vfwcvt_f_x_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfwcvt_f_x_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl); +vfloat64m4_t test_vfwcvt_f_x_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfwcvt_f_x_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl); +vfloat64m8_t test_vfwcvt_f_x_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl); +vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl); +vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl); +vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl); +vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfwcvt_f_f_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl); +vfloat64m1_t test_vfwcvt_f_f_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfwcvt_f_f_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl); +vfloat64m2_t test_vfwcvt_f_f_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfwcvt_f_f_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl); +vfloat64m4_t test_vfwcvt_f_f_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfwcvt_f_f_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_f_tumu(mask, maskedoff, src, vl); +vfloat64m8_t test_vfwcvt_f_f_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vint8mf8_t src, size_t vl) { - return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vint8mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfwcvt_f_x_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vint8mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl); +vfloat16m1_t test_vfwcvt_f_x_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfwcvt_f_x_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vint8m1_t src, size_t vl) { - return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl); +vfloat16m2_t test_vfwcvt_f_x_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfwcvt_f_x_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vint8m2_t src, size_t vl) { - return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl); +vfloat16m4_t test_vfwcvt_f_x_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfwcvt_f_x_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vint8m4_t src, size_t vl) { - return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl); +vfloat16m8_t test_vfwcvt_f_x_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vuint8mf8_t src, size_t vl) { - return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vuint8mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vuint8mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl); +vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vuint8m1_t src, size_t vl) { - return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl); +vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vuint8m2_t src, size_t vl) { - return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl); +vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vuint8m4_t src, size_t vl) { - return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl); +vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); } -vint32mf2_t test_vfwcvt_x_f_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_x_mu(mask, maskedoff, src, vl); +vint32mf2_t test_vfwcvt_x_f_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_mu(vm, vd, vs2, vl); } -vint32m1_t test_vfwcvt_x_f_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_x_mu(mask, maskedoff, src, vl); +vint32m1_t test_vfwcvt_x_f_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_mu(vm, vd, vs2, vl); } -vint32m2_t test_vfwcvt_x_f_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_x_mu(mask, maskedoff, src, vl); +vint32m2_t test_vfwcvt_x_f_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x_mu(vm, vd, vs2, vl); } -vint32m4_t test_vfwcvt_x_f_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_x_mu(mask, maskedoff, src, vl); +vint32m4_t test_vfwcvt_x_f_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_mu(vm, vd, vs2, vl); } -vint32m8_t test_vfwcvt_x_f_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_x_mu(mask, maskedoff, src, vl); +vint32m8_t test_vfwcvt_x_f_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_mu(vm, vd, vs2, vl); } -vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_xu_mu(mask, maskedoff, src, vl); +vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_mu(vm, vd, vs2, vl); } -vuint32m1_t test_vfwcvt_xu_f_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu_mu(mask, maskedoff, src, vl); +vuint32m1_t test_vfwcvt_xu_f_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_mu(vm, vd, vs2, vl); } -vuint32m2_t test_vfwcvt_xu_f_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_xu_mu(mask, maskedoff, src, vl); +vuint32m2_t test_vfwcvt_xu_f_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_mu(vm, vd, vs2, vl); } -vuint32m4_t test_vfwcvt_xu_f_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_xu_mu(mask, maskedoff, src, vl); +vuint32m4_t test_vfwcvt_xu_f_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_mu(vm, vd, vs2, vl); } -vuint32m8_t test_vfwcvt_xu_f_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_xu_mu(mask, maskedoff, src, vl); +vuint32m8_t test_vfwcvt_xu_f_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_mu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vint16mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfwcvt_f_x_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vint16mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl); +vfloat32m1_t test_vfwcvt_f_x_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfwcvt_f_x_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vint16m1_t src, size_t vl) { - return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl); +vfloat32m2_t test_vfwcvt_f_x_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfwcvt_f_x_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vint16m2_t src, size_t vl) { - return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl); +vfloat32m4_t test_vfwcvt_f_x_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfwcvt_f_x_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vint16m4_t src, size_t vl) { - return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl); +vfloat32m8_t test_vfwcvt_f_x_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vuint16mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vuint16mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl); +vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vuint16m1_t src, size_t vl) { - return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl); +vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vuint16m2_t src, size_t vl) { - return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl); +vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vuint16m4_t src, size_t vl) { - return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl); +vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfwcvt_f_f_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl); +vfloat32m1_t test_vfwcvt_f_f_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfwcvt_f_f_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl); +vfloat32m2_t test_vfwcvt_f_f_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfwcvt_f_f_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl); +vfloat32m4_t test_vfwcvt_f_f_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfwcvt_f_f_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl); +vfloat32m8_t test_vfwcvt_f_f_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); } -vint64m1_t test_vfwcvt_x_f_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_x_mu(mask, maskedoff, src, vl); +vint64m1_t test_vfwcvt_x_f_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_mu(vm, vd, vs2, vl); } -vint64m2_t test_vfwcvt_x_f_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_x_mu(mask, maskedoff, src, vl); +vint64m2_t test_vfwcvt_x_f_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x_mu(vm, vd, vs2, vl); } -vint64m4_t test_vfwcvt_x_f_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_x_mu(mask, maskedoff, src, vl); +vint64m4_t test_vfwcvt_x_f_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_mu(vm, vd, vs2, vl); } -vint64m8_t test_vfwcvt_x_f_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_x_mu(mask, maskedoff, src, vl); +vint64m8_t test_vfwcvt_x_f_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_mu(vm, vd, vs2, vl); } -vuint64m1_t test_vfwcvt_xu_f_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu_mu(mask, maskedoff, src, vl); +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_mu(vm, vd, vs2, vl); } -vuint64m2_t test_vfwcvt_xu_f_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_xu_mu(mask, maskedoff, src, vl); +vuint64m2_t test_vfwcvt_xu_f_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_mu(vm, vd, vs2, vl); } -vuint64m4_t test_vfwcvt_xu_f_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_xu_mu(mask, maskedoff, src, vl); +vuint64m4_t test_vfwcvt_xu_f_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_mu(vm, vd, vs2, vl); } -vuint64m8_t test_vfwcvt_xu_f_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_xu_mu(mask, maskedoff, src, vl); +vuint64m8_t test_vfwcvt_xu_f_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_mu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfwcvt_f_x_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl); +vfloat64m1_t test_vfwcvt_f_x_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfwcvt_f_x_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl); +vfloat64m2_t test_vfwcvt_f_x_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfwcvt_f_x_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl); +vfloat64m4_t test_vfwcvt_f_x_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfwcvt_f_x_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl); +vfloat64m8_t test_vfwcvt_f_x_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl); +vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl); +vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl); +vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl); +vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfwcvt_f_f_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl); +vfloat64m1_t test_vfwcvt_f_f_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfwcvt_f_f_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl); +vfloat64m2_t test_vfwcvt_f_f_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfwcvt_f_f_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl); +vfloat64m4_t test_vfwcvt_f_f_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfwcvt_f_f_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_f_mu(mask, maskedoff, src, vl); +vfloat64m8_t test_vfwcvt_f_f_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); } -vint32mf2_t test_vfwcvt_x_f_v_i32mf2_rm_tu(vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_x_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint32mf2_t test_vfwcvt_x_f_v_i32mf2_rm_tu(vint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfwcvt_x_f_v_i32m1_rm_tu(vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_x_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m1_t test_vfwcvt_x_f_v_i32m1_rm_tu(vint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfwcvt_x_f_v_i32m2_rm_tu(vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_x_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m2_t test_vfwcvt_x_f_v_i32m2_rm_tu(vint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfwcvt_x_f_v_i32m4_rm_tu(vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_x_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m4_t test_vfwcvt_x_f_v_i32m4_rm_tu(vint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m8_t test_vfwcvt_x_f_v_i32m8_rm_tu(vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_x_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m8_t test_vfwcvt_x_f_v_i32m8_rm_tu(vint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_rm_tu(vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_xu_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_rm_tu(vuint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfwcvt_xu_f_v_u32m1_rm_tu(vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m1_t test_vfwcvt_xu_f_v_u32m1_rm_tu(vuint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfwcvt_xu_f_v_u32m2_rm_tu(vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_xu_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m2_t test_vfwcvt_xu_f_v_u32m2_rm_tu(vuint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfwcvt_xu_f_v_u32m4_rm_tu(vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_xu_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m4_t test_vfwcvt_xu_f_v_u32m4_rm_tu(vuint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m8_t test_vfwcvt_xu_f_v_u32m8_rm_tu(vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_xu_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m8_t test_vfwcvt_xu_f_v_u32m8_rm_tu(vuint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m1_t test_vfwcvt_x_f_v_i64m1_rm_tu(vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_x_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m1_t test_vfwcvt_x_f_v_i64m1_rm_tu(vint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m2_t test_vfwcvt_x_f_v_i64m2_rm_tu(vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_x_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m2_t test_vfwcvt_x_f_v_i64m2_rm_tu(vint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m4_t test_vfwcvt_x_f_v_i64m4_rm_tu(vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_x_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m4_t test_vfwcvt_x_f_v_i64m4_rm_tu(vint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m8_t test_vfwcvt_x_f_v_i64m8_rm_tu(vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_x_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m8_t test_vfwcvt_x_f_v_i64m8_rm_tu(vint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m1_t test_vfwcvt_xu_f_v_u64m1_rm_tu(vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_rm_tu(vuint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m2_t test_vfwcvt_xu_f_v_u64m2_rm_tu(vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_xu_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m2_t test_vfwcvt_xu_f_v_u64m2_rm_tu(vuint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m4_t test_vfwcvt_xu_f_v_u64m4_rm_tu(vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_xu_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m4_t test_vfwcvt_xu_f_v_u64m4_rm_tu(vuint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m8_t test_vfwcvt_xu_f_v_u64m8_rm_tu(vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_xu_tu(maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m8_t test_vfwcvt_xu_f_v_u64m8_rm_tu(vuint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint32mf2_t test_vfwcvt_x_f_v_i32mf2_rm_tum(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_x_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32mf2_t test_vfwcvt_x_f_v_i32mf2_rm_tum(vbool64_t vm, vint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfwcvt_x_f_v_i32m1_rm_tum(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_x_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m1_t test_vfwcvt_x_f_v_i32m1_rm_tum(vbool32_t vm, vint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfwcvt_x_f_v_i32m2_rm_tum(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_x_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m2_t test_vfwcvt_x_f_v_i32m2_rm_tum(vbool16_t vm, vint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfwcvt_x_f_v_i32m4_rm_tum(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_x_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m4_t test_vfwcvt_x_f_v_i32m4_rm_tum(vbool8_t vm, vint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m8_t test_vfwcvt_x_f_v_i32m8_rm_tum(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_x_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m8_t test_vfwcvt_x_f_v_i32m8_rm_tum(vbool4_t vm, vint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_rm_tum(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_xu_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_rm_tum(vbool64_t vm, vuint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfwcvt_xu_f_v_u32m1_rm_tum(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m1_t test_vfwcvt_xu_f_v_u32m1_rm_tum(vbool32_t vm, vuint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfwcvt_xu_f_v_u32m2_rm_tum(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_xu_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m2_t test_vfwcvt_xu_f_v_u32m2_rm_tum(vbool16_t vm, vuint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfwcvt_xu_f_v_u32m4_rm_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_xu_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m4_t test_vfwcvt_xu_f_v_u32m4_rm_tum(vbool8_t vm, vuint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m8_t test_vfwcvt_xu_f_v_u32m8_rm_tum(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_xu_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m8_t test_vfwcvt_xu_f_v_u32m8_rm_tum(vbool4_t vm, vuint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m1_t test_vfwcvt_x_f_v_i64m1_rm_tum(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_x_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m1_t test_vfwcvt_x_f_v_i64m1_rm_tum(vbool64_t vm, vint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m2_t test_vfwcvt_x_f_v_i64m2_rm_tum(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_x_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m2_t test_vfwcvt_x_f_v_i64m2_rm_tum(vbool32_t vm, vint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m4_t test_vfwcvt_x_f_v_i64m4_rm_tum(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_x_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m4_t test_vfwcvt_x_f_v_i64m4_rm_tum(vbool16_t vm, vint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m8_t test_vfwcvt_x_f_v_i64m8_rm_tum(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_x_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m8_t test_vfwcvt_x_f_v_i64m8_rm_tum(vbool8_t vm, vint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m1_t test_vfwcvt_xu_f_v_u64m1_rm_tum(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_rm_tum(vbool64_t vm, vuint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m2_t test_vfwcvt_xu_f_v_u64m2_rm_tum(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_xu_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m2_t test_vfwcvt_xu_f_v_u64m2_rm_tum(vbool32_t vm, vuint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m4_t test_vfwcvt_xu_f_v_u64m4_rm_tum(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_xu_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m4_t test_vfwcvt_xu_f_v_u64m4_rm_tum(vbool16_t vm, vuint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m8_t test_vfwcvt_xu_f_v_u64m8_rm_tum(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_xu_tum(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m8_t test_vfwcvt_xu_f_v_u64m8_rm_tum(vbool8_t vm, vuint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32mf2_t test_vfwcvt_x_f_v_i32mf2_rm_tumu(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_x_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32mf2_t test_vfwcvt_x_f_v_i32mf2_rm_tumu(vbool64_t vm, vint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfwcvt_x_f_v_i32m1_rm_tumu(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_x_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m1_t test_vfwcvt_x_f_v_i32m1_rm_tumu(vbool32_t vm, vint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfwcvt_x_f_v_i32m2_rm_tumu(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_x_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m2_t test_vfwcvt_x_f_v_i32m2_rm_tumu(vbool16_t vm, vint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfwcvt_x_f_v_i32m4_rm_tumu(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_x_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m4_t test_vfwcvt_x_f_v_i32m4_rm_tumu(vbool8_t vm, vint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m8_t test_vfwcvt_x_f_v_i32m8_rm_tumu(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_x_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m8_t test_vfwcvt_x_f_v_i32m8_rm_tumu(vbool4_t vm, vint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_rm_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_xu_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_rm_tumu(vbool64_t vm, vuint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfwcvt_xu_f_v_u32m1_rm_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m1_t test_vfwcvt_xu_f_v_u32m1_rm_tumu(vbool32_t vm, vuint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfwcvt_xu_f_v_u32m2_rm_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_xu_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m2_t test_vfwcvt_xu_f_v_u32m2_rm_tumu(vbool16_t vm, vuint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfwcvt_xu_f_v_u32m4_rm_tumu(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_xu_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m4_t test_vfwcvt_xu_f_v_u32m4_rm_tumu(vbool8_t vm, vuint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m8_t test_vfwcvt_xu_f_v_u32m8_rm_tumu(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_xu_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m8_t test_vfwcvt_xu_f_v_u32m8_rm_tumu(vbool4_t vm, vuint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m1_t test_vfwcvt_x_f_v_i64m1_rm_tumu(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_x_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m1_t test_vfwcvt_x_f_v_i64m1_rm_tumu(vbool64_t vm, vint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m2_t test_vfwcvt_x_f_v_i64m2_rm_tumu(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_x_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m2_t test_vfwcvt_x_f_v_i64m2_rm_tumu(vbool32_t vm, vint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m4_t test_vfwcvt_x_f_v_i64m4_rm_tumu(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_x_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m4_t test_vfwcvt_x_f_v_i64m4_rm_tumu(vbool16_t vm, vint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m8_t test_vfwcvt_x_f_v_i64m8_rm_tumu(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_x_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m8_t test_vfwcvt_x_f_v_i64m8_rm_tumu(vbool8_t vm, vint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m1_t test_vfwcvt_xu_f_v_u64m1_rm_tumu(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_rm_tumu(vbool64_t vm, vuint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m2_t test_vfwcvt_xu_f_v_u64m2_rm_tumu(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_xu_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m2_t test_vfwcvt_xu_f_v_u64m2_rm_tumu(vbool32_t vm, vuint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m4_t test_vfwcvt_xu_f_v_u64m4_rm_tumu(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_xu_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m4_t test_vfwcvt_xu_f_v_u64m4_rm_tumu(vbool16_t vm, vuint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m8_t test_vfwcvt_xu_f_v_u64m8_rm_tumu(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_xu_tumu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m8_t test_vfwcvt_xu_f_v_u64m8_rm_tumu(vbool8_t vm, vuint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32mf2_t test_vfwcvt_x_f_v_i32mf2_rm_mu(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_x_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32mf2_t test_vfwcvt_x_f_v_i32mf2_rm_mu(vbool64_t vm, vint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfwcvt_x_f_v_i32m1_rm_mu(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_x_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m1_t test_vfwcvt_x_f_v_i32m1_rm_mu(vbool32_t vm, vint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfwcvt_x_f_v_i32m2_rm_mu(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_x_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m2_t test_vfwcvt_x_f_v_i32m2_rm_mu(vbool16_t vm, vint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfwcvt_x_f_v_i32m4_rm_mu(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_x_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m4_t test_vfwcvt_x_f_v_i32m4_rm_mu(vbool8_t vm, vint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m8_t test_vfwcvt_x_f_v_i32m8_rm_mu(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_x_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint32m8_t test_vfwcvt_x_f_v_i32m8_rm_mu(vbool4_t vm, vint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_rm_mu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_xu_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_rm_mu(vbool64_t vm, vuint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfwcvt_xu_f_v_u32m1_rm_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m1_t test_vfwcvt_xu_f_v_u32m1_rm_mu(vbool32_t vm, vuint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfwcvt_xu_f_v_u32m2_rm_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_xu_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m2_t test_vfwcvt_xu_f_v_u32m2_rm_mu(vbool16_t vm, vuint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfwcvt_xu_f_v_u32m4_rm_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_xu_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m4_t test_vfwcvt_xu_f_v_u32m4_rm_mu(vbool8_t vm, vuint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m8_t test_vfwcvt_xu_f_v_u32m8_rm_mu(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_xu_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint32m8_t test_vfwcvt_xu_f_v_u32m8_rm_mu(vbool4_t vm, vuint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m1_t test_vfwcvt_x_f_v_i64m1_rm_mu(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_x_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m1_t test_vfwcvt_x_f_v_i64m1_rm_mu(vbool64_t vm, vint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m2_t test_vfwcvt_x_f_v_i64m2_rm_mu(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_x_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m2_t test_vfwcvt_x_f_v_i64m2_rm_mu(vbool32_t vm, vint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m4_t test_vfwcvt_x_f_v_i64m4_rm_mu(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_x_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m4_t test_vfwcvt_x_f_v_i64m4_rm_mu(vbool16_t vm, vint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m8_t test_vfwcvt_x_f_v_i64m8_rm_mu(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_x_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vint64m8_t test_vfwcvt_x_f_v_i64m8_rm_mu(vbool8_t vm, vint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m1_t test_vfwcvt_xu_f_v_u64m1_rm_mu(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_xu_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_rm_mu(vbool64_t vm, vuint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m2_t test_vfwcvt_xu_f_v_u64m2_rm_mu(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_xu_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m2_t test_vfwcvt_xu_f_v_u64m2_rm_mu(vbool32_t vm, vuint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m4_t test_vfwcvt_xu_f_v_u64m4_rm_mu(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_xu_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m4_t test_vfwcvt_xu_f_v_u64m4_rm_mu(vbool16_t vm, vuint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m8_t test_vfwcvt_xu_f_v_u64m8_rm_mu(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_xu_mu(mask, maskedoff, src, __RISCV_FRM_RNE, vl); +vuint64m8_t test_vfwcvt_xu_f_v_u64m8_rm_mu(vbool8_t vm, vuint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfwcvt\.[ivxfswum.]+\s+} 300 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfwcvt_rtz.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfwcvt_rtz.c index c4ef8c7a2..d3f39ef10 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfwcvt_rtz.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfwcvt_rtz.c @@ -6,292 +6,292 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_tu(vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tu(maskedoff, src, vl); +vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_tu(vint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_tu(vd, vs2, vl); } -vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_tu(vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tu(maskedoff, src, vl); +vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_tu(vint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_tu(vd, vs2, vl); } -vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_tu(vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tu(maskedoff, src, vl); +vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_tu(vint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_tu(vd, vs2, vl); } -vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_tu(vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tu(maskedoff, src, vl); +vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_tu(vint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_tu(vd, vs2, vl); } -vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_tu(vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tu(maskedoff, src, vl); +vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_tu(vint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_tu(vd, vs2, vl); } -vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_tu(vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tu(maskedoff, src, vl); +vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_tu(vuint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tu(vd, vs2, vl); } -vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_tu(vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tu(maskedoff, src, vl); +vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_tu(vuint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tu(vd, vs2, vl); } -vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_tu(vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tu(maskedoff, src, vl); +vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_tu(vuint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tu(vd, vs2, vl); } -vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_tu(vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tu(maskedoff, src, vl); +vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_tu(vuint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tu(vd, vs2, vl); } -vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_tu(vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tu(maskedoff, src, vl); +vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_tu(vuint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tu(vd, vs2, vl); } -vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tu(vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tu(maskedoff, src, vl); +vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tu(vint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_tu(vd, vs2, vl); } -vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_tu(vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tu(maskedoff, src, vl); +vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_tu(vint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_tu(vd, vs2, vl); } -vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_tu(vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tu(maskedoff, src, vl); +vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_tu(vint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_tu(vd, vs2, vl); } -vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_tu(vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tu(maskedoff, src, vl); +vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_tu(vint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_tu(vd, vs2, vl); } -vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tu(vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tu(maskedoff, src, vl); +vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tu(vuint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tu(vd, vs2, vl); } -vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_tu(vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tu(maskedoff, src, vl); +vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_tu(vuint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tu(vd, vs2, vl); } -vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_tu(vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tu(maskedoff, src, vl); +vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_tu(vuint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tu(vd, vs2, vl); } -vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_tu(vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tu(maskedoff, src, vl); +vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_tu(vuint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tu(vd, vs2, vl); } -vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tum(mask, maskedoff, src, vl); +vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_tum(vm, vd, vs2, vl); } -vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tum(mask, maskedoff, src, vl); +vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_tum(vm, vd, vs2, vl); } -vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tum(mask, maskedoff, src, vl); +vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_tum(vm, vd, vs2, vl); } -vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tum(mask, maskedoff, src, vl); +vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_tum(vm, vd, vs2, vl); } -vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tum(mask, maskedoff, src, vl); +vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tum(mask, maskedoff, src, vl); +vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tum(vm, vd, vs2, vl); } -vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tum(mask, maskedoff, src, vl); +vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tum(vm, vd, vs2, vl); } -vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tum(mask, maskedoff, src, vl); +vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tum(vm, vd, vs2, vl); } -vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tum(mask, maskedoff, src, vl); +vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tum(vm, vd, vs2, vl); } -vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tum(mask, maskedoff, src, vl); +vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tum(vm, vd, vs2, vl); } -vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tum(mask, maskedoff, src, vl); +vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_tum(vm, vd, vs2, vl); } -vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tum(mask, maskedoff, src, vl); +vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_tum(vm, vd, vs2, vl); } -vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tum(mask, maskedoff, src, vl); +vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_tum(vm, vd, vs2, vl); } -vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tum(mask, maskedoff, src, vl); +vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_tum(vm, vd, vs2, vl); } -vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tum(mask, maskedoff, src, vl); +vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tum(vm, vd, vs2, vl); } -vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tum(mask, maskedoff, src, vl); +vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tum(vm, vd, vs2, vl); } -vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tum(mask, maskedoff, src, vl); +vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tum(vm, vd, vs2, vl); } -vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tum(mask, maskedoff, src, vl); +vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tum(vm, vd, vs2, vl); } -vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tumu(mask, maskedoff, src, vl); +vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_tumu(vm, vd, vs2, vl); } -vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tumu(mask, maskedoff, src, vl); +vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_tumu(vm, vd, vs2, vl); } -vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tumu(mask, maskedoff, src, vl); +vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_tumu(vm, vd, vs2, vl); } -vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tumu(mask, maskedoff, src, vl); +vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_tumu(vm, vd, vs2, vl); } -vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tumu(mask, maskedoff, src, vl); +vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tumu(vm, vd, vs2, vl); } -vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tumu(vm, vd, vs2, vl); } -vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tumu(mask, maskedoff, src, vl); +vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_tumu(vm, vd, vs2, vl); } -vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tumu(mask, maskedoff, src, vl); +vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_tumu(vm, vd, vs2, vl); } -vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tumu(mask, maskedoff, src, vl); +vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_tumu(vm, vd, vs2, vl); } -vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tumu(mask, maskedoff, src, vl); +vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_tumu(vm, vd, vs2, vl); } -vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tumu(vm, vd, vs2, vl); } -vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tumu(vm, vd, vs2, vl); } -vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tumu(vm, vd, vs2, vl); } -vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tumu(vm, vd, vs2, vl); } -vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_mu(mask, maskedoff, src, vl); +vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_mu(vm, vd, vs2, vl); } -vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_mu(mask, maskedoff, src, vl); +vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_mu(vm, vd, vs2, vl); } -vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_mu(mask, maskedoff, src, vl); +vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_mu(vm, vd, vs2, vl); } -vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_mu(mask, maskedoff, src, vl); +vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_mu(vm, vd, vs2, vl); } -vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_mu(mask, maskedoff, src, vl); +vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_mu(vm, vd, vs2, vl); } -vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_mu(mask, maskedoff, src, vl); +vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_mu(vm, vd, vs2, vl); } -vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_mu(mask, maskedoff, src, vl); +vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_mu(vm, vd, vs2, vl); } -vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_mu(mask, maskedoff, src, vl); +vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_mu(vm, vd, vs2, vl); } -vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_mu(mask, maskedoff, src, vl); +vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_mu(vm, vd, vs2, vl); } -vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_mu(mask, maskedoff, src, vl); +vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_mu(vm, vd, vs2, vl); } -vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_mu(mask, maskedoff, src, vl); +vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_mu(vm, vd, vs2, vl); } -vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_mu(mask, maskedoff, src, vl); +vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_mu(vm, vd, vs2, vl); } -vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_mu(mask, maskedoff, src, vl); +vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_mu(vm, vd, vs2, vl); } -vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_mu(mask, maskedoff, src, vl); +vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_x_mu(vm, vd, vs2, vl); } -vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_mu(mask, maskedoff, src, vl); +vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_mu(vm, vd, vs2, vl); } -vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_mu(mask, maskedoff, src, vl); +vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_mu(vm, vd, vs2, vl); } -vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_mu(mask, maskedoff, src, vl); +vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_mu(vm, vd, vs2, vl); } -vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_mu(mask, maskedoff, src, vl); +vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwcvt_rtz_xu_mu(vm, vd, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfwcvt\.rtz[ivxfswum.]*\s+} 72 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfwmacc.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfwmacc.c index 5d0613aa7..407d1dda1 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfwmacc.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfwmacc.c @@ -78,220 +78,220 @@ vfloat64m8_t test_vfwmacc_vf_f64m8_tu(vfloat64m8_t vd, float32_t vs1, vfloat32m4 return __riscv_vfwmacc_tu(vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmacc_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwmacc_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmacc_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwmacc_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmacc_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwmacc_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmacc_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwmacc_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmacc_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwmacc_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmacc_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwmacc_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmacc_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwmacc_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmacc_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwmacc_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmacc_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwmacc_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmacc_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwmacc_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmacc_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwmacc_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmacc_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwmacc_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmacc_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwmacc_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmacc_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwmacc_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmacc_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwmacc_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmacc_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwmacc_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmacc_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwmacc_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmacc_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwmacc_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmacc_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwmacc_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmacc_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwmacc_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmacc_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwmacc_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmacc_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwmacc_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmacc_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwmacc_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmacc_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwmacc_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmacc_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwmacc_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmacc_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwmacc_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmacc_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwmacc_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmacc_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwmacc_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmacc_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwmacc_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmacc_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwmacc_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmacc_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwmacc_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmacc_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwmacc_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmacc_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwmacc_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmacc_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwmacc_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmacc_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwmacc_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmacc_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwmacc_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmacc_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwmacc_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmacc_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwmacc_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmacc_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwmacc_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmacc_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwmacc_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmacc_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwmacc_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmacc_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwmacc_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmacc_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwmacc_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmacc_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwmacc_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmacc_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwmacc_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmacc_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwmacc_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmacc_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwmacc_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmacc_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwmacc_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmacc_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwmacc_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmacc_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwmacc_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmacc_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwmacc_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmacc_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwmacc_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmacc_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwmacc_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmacc_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwmacc_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl); } vfloat32mf2_t test_vfwmacc_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -366,220 +366,220 @@ vfloat64m8_t test_vfwmacc_vf_f64m8_rm_tu(vfloat64m8_t vd, float32_t vs1, vfloat3 return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmacc_vv_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmacc_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmacc_vf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmacc_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmacc_vv_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmacc_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmacc_vf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmacc_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmacc_vv_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmacc_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmacc_vf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmacc_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmacc_vv_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmacc_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmacc_vf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmacc_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmacc_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmacc_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmacc_vf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmacc_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmacc_vv_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmacc_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmacc_vf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmacc_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmacc_vv_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmacc_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmacc_vf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmacc_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmacc_vv_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmacc_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmacc_vf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmacc_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmacc_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmacc_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmacc_vf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmacc_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmacc_vv_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmacc_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmacc_vf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmacc_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmacc_vv_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmacc_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmacc_vf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmacc_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmacc_vv_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmacc_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmacc_vf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmacc_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmacc_vv_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmacc_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmacc_vf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmacc_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmacc_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmacc_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmacc_vf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmacc_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmacc_vv_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmacc_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmacc_vf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmacc_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmacc_vv_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmacc_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmacc_vf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmacc_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmacc_vv_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmacc_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmacc_vf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmacc_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmacc_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmacc_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmacc_vf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmacc_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmacc_vv_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmacc_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmacc_vf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmacc_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmacc_vv_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmacc_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmacc_vf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmacc_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmacc_vv_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmacc_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmacc_vf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmacc_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmacc_vv_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmacc_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmacc_vf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmacc_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmacc_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmacc_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmacc_vf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmacc_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmacc_vv_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmacc_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmacc_vf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmacc_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmacc_vv_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmacc_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmacc_vf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmacc_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmacc_vv_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmacc_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmacc_vf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmacc_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmacc_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmacc_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmacc_vf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmacc_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfwmacc\.[ivxfswum.]+\s+} 144 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfwmsac.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfwmsac.c index cf1ae8018..fc03bdad3 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfwmsac.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfwmsac.c @@ -78,220 +78,220 @@ vfloat64m8_t test_vfwmsac_vf_f64m8_tu(vfloat64m8_t vd, float32_t vs1, vfloat32m4 return __riscv_vfwmsac_tu(vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmsac_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwmsac_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmsac_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwmsac_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmsac_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwmsac_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmsac_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwmsac_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmsac_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwmsac_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmsac_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwmsac_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmsac_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwmsac_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmsac_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwmsac_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmsac_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwmsac_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmsac_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwmsac_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmsac_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwmsac_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmsac_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwmsac_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmsac_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwmsac_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmsac_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwmsac_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmsac_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwmsac_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmsac_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwmsac_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmsac_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwmsac_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmsac_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwmsac_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmsac_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwmsac_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmsac_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwmsac_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmsac_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwmsac_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmsac_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwmsac_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmsac_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwmsac_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmsac_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwmsac_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmsac_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwmsac_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmsac_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwmsac_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmsac_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwmsac_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmsac_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwmsac_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmsac_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwmsac_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmsac_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwmsac_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmsac_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwmsac_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmsac_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwmsac_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmsac_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwmsac_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmsac_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwmsac_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmsac_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwmsac_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmsac_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwmsac_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmsac_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwmsac_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmsac_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwmsac_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmsac_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwmsac_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmsac_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwmsac_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmsac_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwmsac_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmsac_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwmsac_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmsac_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwmsac_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmsac_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwmsac_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmsac_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwmsac_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmsac_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwmsac_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmsac_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwmsac_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmsac_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwmsac_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmsac_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwmsac_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmsac_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwmsac_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmsac_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwmsac_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmsac_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwmsac_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmsac_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwmsac_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmsac_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwmsac_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl); } vfloat32mf2_t test_vfwmsac_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -366,220 +366,220 @@ vfloat64m8_t test_vfwmsac_vf_f64m8_rm_tu(vfloat64m8_t vd, float32_t vs1, vfloat3 return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmsac_vv_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmsac_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmsac_vf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmsac_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmsac_vv_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmsac_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmsac_vf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmsac_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmsac_vv_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmsac_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmsac_vf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmsac_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmsac_vv_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmsac_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmsac_vf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmsac_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmsac_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmsac_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmsac_vf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmsac_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmsac_vv_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmsac_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmsac_vf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmsac_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmsac_vv_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmsac_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmsac_vf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmsac_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmsac_vv_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmsac_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmsac_vf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmsac_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmsac_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmsac_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmsac_vf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmsac_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmsac_vv_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmsac_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmsac_vf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmsac_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmsac_vv_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmsac_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmsac_vf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmsac_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmsac_vv_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmsac_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmsac_vf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmsac_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmsac_vv_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmsac_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmsac_vf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmsac_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmsac_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmsac_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmsac_vf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmsac_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmsac_vv_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmsac_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmsac_vf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmsac_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmsac_vv_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmsac_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmsac_vf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmsac_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmsac_vv_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmsac_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmsac_vf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmsac_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmsac_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmsac_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmsac_vf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmsac_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmsac_vv_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmsac_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmsac_vf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmsac_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmsac_vv_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmsac_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmsac_vf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmsac_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmsac_vv_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmsac_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmsac_vf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmsac_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmsac_vv_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmsac_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmsac_vf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmsac_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmsac_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmsac_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmsac_vf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmsac_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmsac_vv_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmsac_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmsac_vf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmsac_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmsac_vv_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmsac_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmsac_vf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmsac_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmsac_vv_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmsac_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmsac_vf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmsac_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmsac_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmsac_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmsac_vf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmsac_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfwmsac\.[ivxfswum.]+\s+} 144 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfwmul.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfwmul.c index a84d51cb5..1eccfa6f7 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfwmul.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfwmul.c @@ -6,580 +6,580 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2_t test_vfwmul_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwmul_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwmul_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwmul_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwmul_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwmul_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwmul_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwmul_vv_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwmul_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwmul_vf_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwmul_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwmul_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwmul_vv_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwmul_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwmul_vf_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwmul_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwmul_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwmul_vv_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwmul_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwmul_vf_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwmul_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwmul_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwmul_vv_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwmul_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwmul_vf_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwmul_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwmul_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwmul_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwmul_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwmul_vf_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwmul_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwmul_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwmul_vv_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwmul_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwmul_vf_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwmul_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwmul_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwmul_vv_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwmul_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwmul_vf_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwmul_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwmul_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwmul_vv_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwmul_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwmul_vf_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwmul_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwmul_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwmul_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwmul_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwmul_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwmul_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwmul_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwmul_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwmul_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwmul_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwmul_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwmul_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwmul_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwmul_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwmul_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwmul_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwmul_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwmul_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwmul_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwmul_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwmul_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwmul_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwmul_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwmul_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwmul_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwmul_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwmul_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwmul_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwmul_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwmul_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwmul_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwmul_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwmul_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwmul_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwmul_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwmul_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwmul_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwmul_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwmul_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwmul_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwmul_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwmul_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwmul_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwmul_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwmul_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwmul_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwmul_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwmul_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwmul_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwmul_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwmul_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwmul_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwmul_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwmul_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwmul_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwmul_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwmul_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwmul_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwmul_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwmul_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwmul_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwmul_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwmul_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwmul_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwmul_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwmul_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwmul_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwmul_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwmul_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwmul_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwmul_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwmul_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwmul_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwmul_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwmul_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwmul_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwmul_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwmul_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwmul_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwmul_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwmul_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwmul_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwmul_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwmul_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwmul_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwmul_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwmul_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwmul_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwmul_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwmul_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwmul_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwmul_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwmul_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwmul_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwmul_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwmul_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwmul_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwmul_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwmul_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwmul_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwmul_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwmul_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwmul_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwmul_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwmul_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwmul_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwmul_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwmul_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwmul_vv_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmul_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmul_vf_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmul_vf_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmul_vv_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmul_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmul_vf_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmul_vf_f32m1_rm_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmul_vv_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmul_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmul_vf_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmul_vf_f32m2_rm_tu(vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmul_vv_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmul_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmul_vf_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmul_vf_f32m4_rm_tu(vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmul_vv_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmul_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmul_vf_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmul_vf_f32m8_rm_tu(vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmul_vv_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmul_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmul_vf_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmul_vf_f64m1_rm_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmul_vv_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmul_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmul_vf_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmul_vf_f64m2_rm_tu(vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmul_vv_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmul_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmul_vf_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmul_vf_f64m4_rm_tu(vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmul_vv_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmul_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmul_vf_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmul_vf_f64m8_rm_tu(vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmul_vv_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmul_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmul_vf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmul_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmul_vv_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmul_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmul_vf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmul_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmul_vv_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmul_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmul_vf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmul_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmul_vv_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmul_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmul_vf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmul_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmul_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmul_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmul_vf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmul_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmul_vv_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmul_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmul_vf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmul_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmul_vv_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmul_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmul_vf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmul_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmul_vv_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmul_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmul_vf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmul_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmul_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmul_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmul_vf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmul_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmul_vv_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmul_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmul_vf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmul_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmul_vv_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmul_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmul_vf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmul_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmul_vv_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmul_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmul_vf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmul_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmul_vv_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmul_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmul_vf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmul_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmul_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmul_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmul_vf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmul_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmul_vv_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmul_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmul_vf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmul_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmul_vv_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmul_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmul_vf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmul_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmul_vv_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmul_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmul_vf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmul_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmul_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmul_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmul_vf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmul_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmul_vv_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmul_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmul_vf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmul_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmul_vv_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmul_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmul_vf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmul_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmul_vv_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmul_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmul_vf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmul_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmul_vv_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmul_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmul_vf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmul_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmul_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmul_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmul_vf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmul_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmul_vv_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmul_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmul_vf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmul_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmul_vv_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmul_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmul_vf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmul_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmul_vv_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmul_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmul_vf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmul_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmul_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmul_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmul_vf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwmul_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmul_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfwmul\.[ivxfswum.]+\s+} 144 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfwnmacc.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfwnmacc.c index 206de255c..30bb85b39 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfwnmacc.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfwnmacc.c @@ -78,220 +78,220 @@ vfloat64m8_t test_vfwnmacc_vf_f64m8_tu(vfloat64m8_t vd, float32_t vs1, vfloat32m return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmacc_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwnmacc_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmacc_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwnmacc_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmacc_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwnmacc_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmacc_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwnmacc_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmacc_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwnmacc_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmacc_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwnmacc_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmacc_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwnmacc_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmacc_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwnmacc_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmacc_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwnmacc_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmacc_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwnmacc_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmacc_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwnmacc_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmacc_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwnmacc_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmacc_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwnmacc_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmacc_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwnmacc_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmacc_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwnmacc_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmacc_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwnmacc_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmacc_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwnmacc_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmacc_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwnmacc_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmacc_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwnmacc_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmacc_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwnmacc_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmacc_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwnmacc_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmacc_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwnmacc_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmacc_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwnmacc_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmacc_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwnmacc_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmacc_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwnmacc_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmacc_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwnmacc_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmacc_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwnmacc_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmacc_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwnmacc_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmacc_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwnmacc_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmacc_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwnmacc_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmacc_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwnmacc_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmacc_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwnmacc_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmacc_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwnmacc_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmacc_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwnmacc_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmacc_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwnmacc_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmacc_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwnmacc_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmacc_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwnmacc_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmacc_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwnmacc_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmacc_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwnmacc_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmacc_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwnmacc_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmacc_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwnmacc_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmacc_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwnmacc_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmacc_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwnmacc_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmacc_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwnmacc_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmacc_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwnmacc_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmacc_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwnmacc_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmacc_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwnmacc_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmacc_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwnmacc_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmacc_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwnmacc_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmacc_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwnmacc_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmacc_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwnmacc_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmacc_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwnmacc_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmacc_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwnmacc_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmacc_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwnmacc_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl); } vfloat32mf2_t test_vfwnmacc_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -366,220 +366,220 @@ vfloat64m8_t test_vfwnmacc_vf_f64m8_rm_tu(vfloat64m8_t vd, float32_t vs1, vfloat return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwnmacc_vv_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwnmacc_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwnmacc_vf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwnmacc_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwnmacc_vv_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwnmacc_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwnmacc_vf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwnmacc_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwnmacc_vv_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwnmacc_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwnmacc_vf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwnmacc_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwnmacc_vv_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwnmacc_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwnmacc_vf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwnmacc_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwnmacc_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwnmacc_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwnmacc_vf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwnmacc_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwnmacc_vv_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwnmacc_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwnmacc_vf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwnmacc_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwnmacc_vv_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwnmacc_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwnmacc_vf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwnmacc_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwnmacc_vv_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwnmacc_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwnmacc_vf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwnmacc_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwnmacc_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwnmacc_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwnmacc_vf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwnmacc_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwnmacc_vv_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwnmacc_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwnmacc_vf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwnmacc_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwnmacc_vv_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwnmacc_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwnmacc_vf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwnmacc_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwnmacc_vv_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwnmacc_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwnmacc_vf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwnmacc_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwnmacc_vv_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwnmacc_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwnmacc_vf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwnmacc_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwnmacc_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwnmacc_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwnmacc_vf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwnmacc_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwnmacc_vv_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwnmacc_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwnmacc_vf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwnmacc_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwnmacc_vv_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwnmacc_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwnmacc_vf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwnmacc_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwnmacc_vv_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwnmacc_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwnmacc_vf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwnmacc_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwnmacc_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwnmacc_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwnmacc_vf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwnmacc_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwnmacc_vv_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwnmacc_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwnmacc_vf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwnmacc_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwnmacc_vv_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwnmacc_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwnmacc_vf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwnmacc_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwnmacc_vv_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwnmacc_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwnmacc_vf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwnmacc_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwnmacc_vv_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwnmacc_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwnmacc_vf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwnmacc_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwnmacc_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwnmacc_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwnmacc_vf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwnmacc_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwnmacc_vv_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwnmacc_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwnmacc_vf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwnmacc_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwnmacc_vv_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwnmacc_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwnmacc_vf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwnmacc_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwnmacc_vv_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwnmacc_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwnmacc_vf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwnmacc_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwnmacc_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwnmacc_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwnmacc_vf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwnmacc_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfwnmacc\.[ivxfswum.]+\s+} 144 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfwnmsac.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfwnmsac.c index fb00bcf8c..cccdf5932 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfwnmsac.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfwnmsac.c @@ -78,220 +78,220 @@ vfloat64m8_t test_vfwnmsac_vf_f64m8_tu(vfloat64m8_t vd, float32_t vs1, vfloat32m return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmsac_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwnmsac_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmsac_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwnmsac_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmsac_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwnmsac_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmsac_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwnmsac_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmsac_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwnmsac_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmsac_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwnmsac_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmsac_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwnmsac_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmsac_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwnmsac_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmsac_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwnmsac_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmsac_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwnmsac_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmsac_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwnmsac_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmsac_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwnmsac_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmsac_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwnmsac_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmsac_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwnmsac_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmsac_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwnmsac_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmsac_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwnmsac_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmsac_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwnmsac_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmsac_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwnmsac_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmsac_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwnmsac_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmsac_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwnmsac_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmsac_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwnmsac_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmsac_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwnmsac_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmsac_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwnmsac_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmsac_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwnmsac_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmsac_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwnmsac_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmsac_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwnmsac_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmsac_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwnmsac_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmsac_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwnmsac_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmsac_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwnmsac_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmsac_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwnmsac_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmsac_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwnmsac_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmsac_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwnmsac_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmsac_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwnmsac_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmsac_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwnmsac_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmsac_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwnmsac_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmsac_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwnmsac_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmsac_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwnmsac_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmsac_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, vl); +vfloat32mf2_t test_vfwnmsac_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmsac_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwnmsac_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmsac_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, vl); +vfloat32m1_t test_vfwnmsac_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmsac_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwnmsac_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmsac_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, vl); +vfloat32m2_t test_vfwnmsac_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmsac_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwnmsac_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmsac_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, vl); +vfloat32m4_t test_vfwnmsac_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmsac_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwnmsac_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmsac_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, vl); +vfloat32m8_t test_vfwnmsac_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmsac_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwnmsac_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmsac_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, vl); +vfloat64m1_t test_vfwnmsac_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmsac_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwnmsac_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmsac_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, vl); +vfloat64m2_t test_vfwnmsac_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmsac_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwnmsac_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmsac_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, vl); +vfloat64m4_t test_vfwnmsac_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmsac_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwnmsac_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmsac_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, vl); +vfloat64m8_t test_vfwnmsac_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl); } vfloat32mf2_t test_vfwnmsac_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { @@ -366,220 +366,220 @@ vfloat64m8_t test_vfwnmsac_vf_f64m8_rm_tu(vfloat64m8_t vd, float32_t vs1, vfloat return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwnmsac_vv_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwnmsac_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwnmsac_vf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwnmsac_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwnmsac_vv_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwnmsac_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwnmsac_vf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwnmsac_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwnmsac_vv_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwnmsac_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwnmsac_vf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwnmsac_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwnmsac_vv_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwnmsac_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwnmsac_vf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwnmsac_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwnmsac_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwnmsac_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwnmsac_vf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwnmsac_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwnmsac_vv_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwnmsac_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwnmsac_vf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwnmsac_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwnmsac_vv_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwnmsac_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwnmsac_vf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwnmsac_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwnmsac_vv_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwnmsac_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwnmsac_vf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwnmsac_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwnmsac_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwnmsac_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwnmsac_vf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_tum(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwnmsac_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwnmsac_vv_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwnmsac_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwnmsac_vf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwnmsac_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwnmsac_vv_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwnmsac_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwnmsac_vf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwnmsac_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwnmsac_vv_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwnmsac_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwnmsac_vf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwnmsac_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwnmsac_vv_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwnmsac_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwnmsac_vf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwnmsac_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwnmsac_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwnmsac_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwnmsac_vf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwnmsac_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwnmsac_vv_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwnmsac_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwnmsac_vf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwnmsac_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwnmsac_vv_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwnmsac_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwnmsac_vf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwnmsac_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwnmsac_vv_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwnmsac_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwnmsac_vf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwnmsac_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwnmsac_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwnmsac_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwnmsac_vf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_tumu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwnmsac_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwnmsac_vv_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwnmsac_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwnmsac_vf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwnmsac_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwnmsac_vv_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwnmsac_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwnmsac_vf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwnmsac_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwnmsac_vv_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwnmsac_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwnmsac_vf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwnmsac_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwnmsac_vv_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwnmsac_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwnmsac_vf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwnmsac_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwnmsac_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwnmsac_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwnmsac_vf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwnmsac_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwnmsac_vv_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwnmsac_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwnmsac_vf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwnmsac_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwnmsac_vv_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwnmsac_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwnmsac_vf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwnmsac_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwnmsac_vv_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwnmsac_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwnmsac_vf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwnmsac_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwnmsac_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwnmsac_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwnmsac_vf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_mu(mask, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwnmsac_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfwnmsac\.[ivxfswum.]+\s+} 144 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfwredosum.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfwredosum.c index db6a374f7..93190ef8a 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfwredosum.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfwredosum.c @@ -6,180 +6,180 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_tu(vfloat32m1_t vd, vfloat16mf4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_tu(vfloat32m1_t vd, vfloat16m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_tu(vfloat32m1_t vd, vfloat16m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_tu(vfloat32m1_t vd, vfloat16m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_tu(vfloat32m1_t vd, vfloat16m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_tu(vfloat64m1_t vd, vfloat32m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_tu(vfloat64m1_t vd, vfloat32m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_tu(vfloat64m1_t vd, vfloat32m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_tu(vfloat64m1_t vd, vfloat32m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_tum(vbool64_t mask, vfloat32m1_t maskedoff, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_tum(vbool64_t vm, vfloat32m1_t vd, vfloat16mf4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_tum(vbool16_t mask, vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_tum(vbool16_t vm, vfloat32m1_t vd, vfloat16m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_tum(vbool8_t mask, vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_tum(vbool8_t vm, vfloat32m1_t vd, vfloat16m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_tum(vbool4_t mask, vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_tum(vbool4_t vm, vfloat32m1_t vd, vfloat16m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_tum(vbool2_t mask, vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_tum(vbool2_t vm, vfloat32m1_t vd, vfloat16m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_tum(vbool32_t mask, vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_tum(vbool32_t vm, vfloat64m1_t vd, vfloat32m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_tum(vbool16_t mask, vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_tum(vbool16_t vm, vfloat64m1_t vd, vfloat32m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_tum(vbool8_t mask, vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_tum(vbool8_t vm, vfloat64m1_t vd, vfloat32m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_tum(vbool4_t mask, vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_tum(vbool4_t vm, vfloat64m1_t vd, vfloat32m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_rm_tu(vfloat32m1_t vd, vfloat16mf4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_rm_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_rm_tu(vfloat32m1_t vd, vfloat16m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_rm_tu(vfloat32m1_t vd, vfloat16m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_rm_tu(vfloat32m1_t vd, vfloat16m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_rm_tu(vfloat32m1_t vd, vfloat16m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_rm_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_rm_tu(vfloat64m1_t vd, vfloat32m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_rm_tu(vfloat64m1_t vd, vfloat32m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_rm_tu(vfloat64m1_t vd, vfloat32m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_rm_tu(vfloat64m1_t vd, vfloat32m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_rm_tum(vbool64_t mask, vfloat32m1_t maskedoff, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_rm_tum(vbool64_t vm, vfloat32m1_t vd, vfloat16mf4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_rm_tum(vbool16_t mask, vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_rm_tum(vbool16_t vm, vfloat32m1_t vd, vfloat16m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_rm_tum(vbool8_t mask, vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_rm_tum(vbool8_t vm, vfloat32m1_t vd, vfloat16m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_rm_tum(vbool4_t mask, vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_rm_tum(vbool4_t vm, vfloat32m1_t vd, vfloat16m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_rm_tum(vbool2_t mask, vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_rm_tum(vbool2_t vm, vfloat32m1_t vd, vfloat16m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_rm_tum(vbool32_t mask, vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_rm_tum(vbool32_t vm, vfloat64m1_t vd, vfloat32m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_rm_tum(vbool16_t mask, vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_rm_tum(vbool16_t vm, vfloat64m1_t vd, vfloat32m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_rm_tum(vbool8_t mask, vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_rm_tum(vbool8_t vm, vfloat64m1_t vd, vfloat32m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_rm_tum(vbool4_t mask, vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_rm_tum(vbool4_t vm, vfloat64m1_t vd, vfloat32m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfwredosum\.[ivxfswum.]+\s+} 44 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfwredusum.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfwredusum.c index e3e69c593..d0aa03c54 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfwredusum.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfwredusum.c @@ -6,180 +6,180 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_tu(vfloat32m1_t vd, vfloat16mf4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_tu(vfloat32m1_t vd, vfloat16m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_tu(vfloat32m1_t vd, vfloat16m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_tu(vfloat32m1_t vd, vfloat16m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_tu(vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_tu(maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_tu(vfloat32m1_t vd, vfloat16m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_tu(vfloat64m1_t vd, vfloat32m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_tu(vfloat64m1_t vd, vfloat32m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_tu(vfloat64m1_t vd, vfloat32m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_tu(vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_tu(maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_tu(vfloat64m1_t vd, vfloat32m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_tum(vbool64_t mask, vfloat32m1_t maskedoff, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_tum(vbool64_t vm, vfloat32m1_t vd, vfloat16mf4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_tum(vbool16_t mask, vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_tum(vbool16_t vm, vfloat32m1_t vd, vfloat16m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_tum(vbool8_t mask, vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_tum(vbool8_t vm, vfloat32m1_t vd, vfloat16m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_tum(vbool4_t mask, vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_tum(vbool4_t vm, vfloat32m1_t vd, vfloat16m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_tum(vbool2_t mask, vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_tum(mask, maskedoff, vector, scalar, vl); +vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_tum(vbool2_t vm, vfloat32m1_t vd, vfloat16m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_tum(vbool32_t mask, vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_tum(vbool32_t vm, vfloat64m1_t vd, vfloat32m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_tum(vbool16_t mask, vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_tum(vbool16_t vm, vfloat64m1_t vd, vfloat32m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_tum(vbool8_t mask, vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_tum(vbool8_t vm, vfloat64m1_t vd, vfloat32m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_tum(vbool4_t mask, vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_tum(mask, maskedoff, vector, scalar, vl); +vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_tum(vbool4_t vm, vfloat64m1_t vd, vfloat32m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_rm_tu(vfloat32m1_t vd, vfloat16mf4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_rm_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_rm_tu(vfloat32m1_t vd, vfloat16m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_rm_tu(vfloat32m1_t vd, vfloat16m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_rm_tu(vfloat32m1_t vd, vfloat16m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_rm_tu(vfloat32m1_t vd, vfloat16m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_rm_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_rm_tu(vfloat64m1_t vd, vfloat32m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_rm_tu(vfloat64m1_t vd, vfloat32m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_rm_tu(vfloat64m1_t vd, vfloat32m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_tu(maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_rm_tu(vfloat64m1_t vd, vfloat32m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_rm_tum(vbool64_t mask, vfloat32m1_t maskedoff, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_rm_tum(vbool64_t vm, vfloat32m1_t vd, vfloat16mf4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_rm_tum(vbool16_t mask, vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_rm_tum(vbool16_t vm, vfloat32m1_t vd, vfloat16m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_rm_tum(vbool8_t mask, vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_rm_tum(vbool8_t vm, vfloat32m1_t vd, vfloat16m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_rm_tum(vbool4_t mask, vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_rm_tum(vbool4_t vm, vfloat32m1_t vd, vfloat16m4_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_rm_tum(vbool2_t mask, vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredusum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_rm_tum(vbool2_t vm, vfloat32m1_t vd, vfloat16m8_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_rm_tum(vbool32_t mask, vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_rm_tum(vbool32_t vm, vfloat64m1_t vd, vfloat32m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_rm_tum(vbool16_t mask, vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_rm_tum(vbool16_t vm, vfloat64m1_t vd, vfloat32m2_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_rm_tum(vbool8_t mask, vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_rm_tum(vbool8_t vm, vfloat64m1_t vd, vfloat32m4_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_rm_tum(vbool4_t mask, vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredusum_tum(mask, maskedoff, vector, scalar, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_rm_tum(vbool4_t vm, vfloat64m1_t vd, vfloat32m8_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfwredusum\.[ivxfswum.]+\s+} 44 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vfwsub.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vfwsub.c index 7b235506d..7a4b2b0d9 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vfwsub.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vfwsub.c @@ -6,1156 +6,1156 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2_t test_vfwsub_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_vv_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwsub_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwsub_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwsub_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwsub_wv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_wv_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwsub_wv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwsub_wf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwsub_wf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwsub_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwsub_vv_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwsub_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwsub_vf_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwsub_wv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwsub_wv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwsub_wf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwsub_wf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwsub_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_vv_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwsub_vv_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwsub_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwsub_vf_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwsub_wv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_wv_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwsub_wv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwsub_wf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwsub_wf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwsub_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_vv_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwsub_vv_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwsub_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwsub_vf_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwsub_wv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_wv_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwsub_wv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwsub_wf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwsub_wf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwsub_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_vv_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwsub_vv_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwsub_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwsub_vf_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwsub_wv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_wv_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwsub_wv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwsub_wf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwsub_wf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwsub_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwsub_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwsub_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwsub_vf_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwsub_wv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwsub_wv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwsub_wf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwsub_wf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwsub_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_vv_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwsub_vv_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwsub_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwsub_vf_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwsub_wv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_wv_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwsub_wv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwsub_wf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwsub_wf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwsub_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_vv_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwsub_vv_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwsub_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwsub_vf_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwsub_wv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_wv_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwsub_wv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwsub_wf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwsub_wf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwsub_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_vv_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwsub_vv_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwsub_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwsub_vf_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwsub_wv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_wv_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwsub_wv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwsub_wf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwsub_wf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwsub_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_vv_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwsub_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwsub_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwsub_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwsub_wv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_wv_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwsub_wv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwsub_wf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwsub_wf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwsub_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwsub_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwsub_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwsub_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwsub_wv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwsub_wv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwsub_wf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwsub_wf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwsub_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_vv_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwsub_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwsub_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwsub_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwsub_wv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_wv_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwsub_wv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwsub_wf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwsub_wf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwsub_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_vv_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwsub_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwsub_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwsub_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwsub_wv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_wv_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwsub_wv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwsub_wf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwsub_wf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwsub_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_vv_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwsub_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwsub_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwsub_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwsub_wv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_wv_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwsub_wv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwsub_wf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwsub_wf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwsub_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwsub_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwsub_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwsub_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwsub_wv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwsub_wv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwsub_wf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwsub_wf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwsub_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_vv_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwsub_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwsub_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwsub_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwsub_wv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_wv_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwsub_wv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwsub_wf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwsub_wf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwsub_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_vv_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwsub_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwsub_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwsub_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwsub_wv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_wv_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwsub_wv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwsub_wf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwsub_wf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwsub_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_vv_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwsub_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwsub_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwsub_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwsub_wv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_wv_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwsub_wv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwsub_wf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwsub_wf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwsub_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_vv_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwsub_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwsub_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwsub_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwsub_wv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_wv_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwsub_wv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwsub_wf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwsub_wf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwsub_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwsub_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwsub_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwsub_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwsub_wv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwsub_wv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwsub_wf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwsub_wf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwsub_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_vv_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwsub_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwsub_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwsub_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwsub_wv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_wv_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwsub_wv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwsub_wf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwsub_wf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwsub_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_vv_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwsub_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwsub_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwsub_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwsub_wv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_wv_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwsub_wv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwsub_wf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwsub_wf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwsub_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_vv_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwsub_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwsub_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwsub_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwsub_wv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_wv_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwsub_wv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwsub_wf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwsub_wf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwsub_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwsub_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwsub_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwsub_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwsub_wv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwsub_wv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwsub_wf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwsub_wf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwsub_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_vv_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwsub_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwsub_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwsub_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwsub_wv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_wv_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwsub_wv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwsub_wf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwsub_wf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwsub_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_vv_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwsub_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwsub_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwsub_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwsub_wv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_wv_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwsub_wv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwsub_wf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwsub_wf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwsub_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_vv_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwsub_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwsub_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwsub_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwsub_wv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_wv_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwsub_wv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwsub_wf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwsub_wf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwsub_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_vv_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwsub_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwsub_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwsub_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwsub_wv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_wv_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwsub_wv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwsub_wf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfwsub_wf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwsub_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwsub_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwsub_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwsub_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwsub_wv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwsub_wv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwsub_wf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfwsub_wf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwsub_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_vv_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwsub_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwsub_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwsub_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwsub_wv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_wv_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwsub_wv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwsub_wf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfwsub_wf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwsub_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_vv_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwsub_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwsub_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwsub_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwsub_wv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_wv_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwsub_wv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwsub_wf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfwsub_wf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwsub_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_vv_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwsub_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwsub_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwsub_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwsub_wv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_wv_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwsub_wv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwsub_wf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwsub_wf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwsub_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwsub_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwsub_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwsub_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwsub_wv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwsub_wv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwsub_wf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwsub_wf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwsub_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_vv_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwsub_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwsub_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwsub_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwsub_wv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_wv_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwsub_wv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwsub_wf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwsub_wf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwsub_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_vv_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwsub_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwsub_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwsub_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwsub_wv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_wv_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwsub_wv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwsub_wf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwsub_wf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwsub_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_vv_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwsub_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwsub_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwsub_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwsub_wv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_wv_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwsub_wv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwsub_wf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwsub_wf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwsub_vv_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_vv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_vf_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_vf_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_wv_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_wv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_wv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_wf_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_wf_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_vv_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_vf_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_vf_f32m1_rm_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_wv_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_wv_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_wf_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_wf_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_vv_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_vv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_vf_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_vf_f32m2_rm_tu(vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_wv_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_wv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_wv_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_wf_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_wf_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_vv_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_vv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_vf_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_vf_f32m4_rm_tu(vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_wv_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_wv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_wv_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_wf_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_wf_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_vv_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_vv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_vf_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_vf_f32m8_rm_tu(vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_wv_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_wv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_wv_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_wf_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_wf_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_vv_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_vf_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_vf_f64m1_rm_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_wv_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_wv_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_wf_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_wf_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_vv_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_vv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_vf_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_vf_f64m2_rm_tu(vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_wv_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_wv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_wv_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_wf_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_wf_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_vv_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_vv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_vf_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_vf_f64m4_rm_tu(vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_wv_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_wv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_wv_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_wf_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_wf_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_vv_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_vv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_vf_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_vf_f64m8_rm_tu(vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_wv_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_wv_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_wv_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_wf_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_wf_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_vv_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_vv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_vf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_wv_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_wv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_wv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_wf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_wf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_vv_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_vf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_wv_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_wv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_wf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_wf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_vv_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_vv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_vf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_wv_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_wv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_wv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_wf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_wf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_vv_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_vv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_vf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_wv_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_wv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_wv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_wf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_wf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_vv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_vf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_wv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_wv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_wv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_wf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_wf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_vv_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_vf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_wv_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_wv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_wf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_wf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_vv_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_vv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_vf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_wv_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_wv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_wv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_wf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_wf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_vv_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_vv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_vf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_wv_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_wv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_wv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_wf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_wf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_vv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_vf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_wv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_wv_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_wv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_wf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_wf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_vv_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_vv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_vf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_wv_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_wv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_wv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_wf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_wf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_vv_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_vf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_wv_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_wv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_wf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_wf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_vv_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_vv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_vf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_wv_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_wv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_wv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_wf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_wf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_vv_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_vv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_vf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_wv_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_wv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_wv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_wf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_wf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_vv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_vf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_wv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_wv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_wv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_wf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_wf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_vv_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_vf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_wv_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_wv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_wf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_wf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_vv_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_vv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_vf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_wv_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_wv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_wv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_wf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_wf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_vv_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_vv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_vf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_wv_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_wv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_wv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_wf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_wf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_vv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_vf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_wv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_wv_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_wv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_wf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_wf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_vv_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_vv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_vf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_wv_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vfwsub_wv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_wv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_wf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_wf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_vv_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_vf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_wv_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_wv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_wf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwsub_wf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_vv_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_vv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_vf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_wv_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_wv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_wv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_wf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwsub_wf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_vv_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_vv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_vf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_wv_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_wv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_wv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_wf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwsub_wf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_vv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_vf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_vf_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_wv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_wv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_wv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_wf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float16_t op2, size_t vl) { - return __riscv_vfwsub_wf_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwsub_wf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_vv_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_vv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_vf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_wv_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vfwsub_wv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_wv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_wf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwsub_wf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_vv_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_vv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_vf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_wv_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_wv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_wv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_wf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwsub_wf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_vv_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_vv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_vf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_wv_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_wv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_wv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_wf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwsub_wf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_vv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_vf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_vf_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_wv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_wv_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_wv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_wf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float32_t op2, size_t vl) { - return __riscv_vfwsub_wf_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwsub_wf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfwsub\.[ivxfswum.]+\s+} 288 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vid.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vid.c index 59f0bd4d2..a48931ec8 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vid.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vid.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vid_v_u8mf8_tu(vuint8mf8_t maskedoff, size_t vl) { - return __riscv_vid_tu(maskedoff, vl); +vuint8mf8_t test_vid_v_u8mf8_tu(vuint8mf8_t vd, size_t vl) { + return __riscv_vid_tu(vd, vl); } -vuint8mf4_t test_vid_v_u8mf4_tu(vuint8mf4_t maskedoff, size_t vl) { - return __riscv_vid_tu(maskedoff, vl); +vuint8mf4_t test_vid_v_u8mf4_tu(vuint8mf4_t vd, size_t vl) { + return __riscv_vid_tu(vd, vl); } -vuint8mf2_t test_vid_v_u8mf2_tu(vuint8mf2_t maskedoff, size_t vl) { - return __riscv_vid_tu(maskedoff, vl); +vuint8mf2_t test_vid_v_u8mf2_tu(vuint8mf2_t vd, size_t vl) { + return __riscv_vid_tu(vd, vl); } -vuint8m1_t test_vid_v_u8m1_tu(vuint8m1_t maskedoff, size_t vl) { - return __riscv_vid_tu(maskedoff, vl); +vuint8m1_t test_vid_v_u8m1_tu(vuint8m1_t vd, size_t vl) { + return __riscv_vid_tu(vd, vl); } -vuint8m2_t test_vid_v_u8m2_tu(vuint8m2_t maskedoff, size_t vl) { - return __riscv_vid_tu(maskedoff, vl); +vuint8m2_t test_vid_v_u8m2_tu(vuint8m2_t vd, size_t vl) { + return __riscv_vid_tu(vd, vl); } -vuint8m4_t test_vid_v_u8m4_tu(vuint8m4_t maskedoff, size_t vl) { - return __riscv_vid_tu(maskedoff, vl); +vuint8m4_t test_vid_v_u8m4_tu(vuint8m4_t vd, size_t vl) { + return __riscv_vid_tu(vd, vl); } -vuint8m8_t test_vid_v_u8m8_tu(vuint8m8_t maskedoff, size_t vl) { - return __riscv_vid_tu(maskedoff, vl); +vuint8m8_t test_vid_v_u8m8_tu(vuint8m8_t vd, size_t vl) { + return __riscv_vid_tu(vd, vl); } -vuint16mf4_t test_vid_v_u16mf4_tu(vuint16mf4_t maskedoff, size_t vl) { - return __riscv_vid_tu(maskedoff, vl); +vuint16mf4_t test_vid_v_u16mf4_tu(vuint16mf4_t vd, size_t vl) { + return __riscv_vid_tu(vd, vl); } -vuint16mf2_t test_vid_v_u16mf2_tu(vuint16mf2_t maskedoff, size_t vl) { - return __riscv_vid_tu(maskedoff, vl); +vuint16mf2_t test_vid_v_u16mf2_tu(vuint16mf2_t vd, size_t vl) { + return __riscv_vid_tu(vd, vl); } -vuint16m1_t test_vid_v_u16m1_tu(vuint16m1_t maskedoff, size_t vl) { - return __riscv_vid_tu(maskedoff, vl); +vuint16m1_t test_vid_v_u16m1_tu(vuint16m1_t vd, size_t vl) { + return __riscv_vid_tu(vd, vl); } -vuint16m2_t test_vid_v_u16m2_tu(vuint16m2_t maskedoff, size_t vl) { - return __riscv_vid_tu(maskedoff, vl); +vuint16m2_t test_vid_v_u16m2_tu(vuint16m2_t vd, size_t vl) { + return __riscv_vid_tu(vd, vl); } -vuint16m4_t test_vid_v_u16m4_tu(vuint16m4_t maskedoff, size_t vl) { - return __riscv_vid_tu(maskedoff, vl); +vuint16m4_t test_vid_v_u16m4_tu(vuint16m4_t vd, size_t vl) { + return __riscv_vid_tu(vd, vl); } -vuint16m8_t test_vid_v_u16m8_tu(vuint16m8_t maskedoff, size_t vl) { - return __riscv_vid_tu(maskedoff, vl); +vuint16m8_t test_vid_v_u16m8_tu(vuint16m8_t vd, size_t vl) { + return __riscv_vid_tu(vd, vl); } -vuint32mf2_t test_vid_v_u32mf2_tu(vuint32mf2_t maskedoff, size_t vl) { - return __riscv_vid_tu(maskedoff, vl); +vuint32mf2_t test_vid_v_u32mf2_tu(vuint32mf2_t vd, size_t vl) { + return __riscv_vid_tu(vd, vl); } -vuint32m1_t test_vid_v_u32m1_tu(vuint32m1_t maskedoff, size_t vl) { - return __riscv_vid_tu(maskedoff, vl); +vuint32m1_t test_vid_v_u32m1_tu(vuint32m1_t vd, size_t vl) { + return __riscv_vid_tu(vd, vl); } -vuint32m2_t test_vid_v_u32m2_tu(vuint32m2_t maskedoff, size_t vl) { - return __riscv_vid_tu(maskedoff, vl); +vuint32m2_t test_vid_v_u32m2_tu(vuint32m2_t vd, size_t vl) { + return __riscv_vid_tu(vd, vl); } -vuint32m4_t test_vid_v_u32m4_tu(vuint32m4_t maskedoff, size_t vl) { - return __riscv_vid_tu(maskedoff, vl); +vuint32m4_t test_vid_v_u32m4_tu(vuint32m4_t vd, size_t vl) { + return __riscv_vid_tu(vd, vl); } -vuint32m8_t test_vid_v_u32m8_tu(vuint32m8_t maskedoff, size_t vl) { - return __riscv_vid_tu(maskedoff, vl); +vuint32m8_t test_vid_v_u32m8_tu(vuint32m8_t vd, size_t vl) { + return __riscv_vid_tu(vd, vl); } -vuint64m1_t test_vid_v_u64m1_tu(vuint64m1_t maskedoff, size_t vl) { - return __riscv_vid_tu(maskedoff, vl); +vuint64m1_t test_vid_v_u64m1_tu(vuint64m1_t vd, size_t vl) { + return __riscv_vid_tu(vd, vl); } -vuint64m2_t test_vid_v_u64m2_tu(vuint64m2_t maskedoff, size_t vl) { - return __riscv_vid_tu(maskedoff, vl); +vuint64m2_t test_vid_v_u64m2_tu(vuint64m2_t vd, size_t vl) { + return __riscv_vid_tu(vd, vl); } -vuint64m4_t test_vid_v_u64m4_tu(vuint64m4_t maskedoff, size_t vl) { - return __riscv_vid_tu(maskedoff, vl); +vuint64m4_t test_vid_v_u64m4_tu(vuint64m4_t vd, size_t vl) { + return __riscv_vid_tu(vd, vl); } -vuint64m8_t test_vid_v_u64m8_tu(vuint64m8_t maskedoff, size_t vl) { - return __riscv_vid_tu(maskedoff, vl); +vuint64m8_t test_vid_v_u64m8_tu(vuint64m8_t vd, size_t vl) { + return __riscv_vid_tu(vd, vl); } -vuint8mf8_t test_vid_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, size_t vl) { - return __riscv_vid_tum(mask, maskedoff, vl); +vuint8mf8_t test_vid_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, size_t vl) { + return __riscv_vid_tum(vm, vd, vl); } -vuint8mf4_t test_vid_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, size_t vl) { - return __riscv_vid_tum(mask, maskedoff, vl); +vuint8mf4_t test_vid_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, size_t vl) { + return __riscv_vid_tum(vm, vd, vl); } -vuint8mf2_t test_vid_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, size_t vl) { - return __riscv_vid_tum(mask, maskedoff, vl); +vuint8mf2_t test_vid_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, size_t vl) { + return __riscv_vid_tum(vm, vd, vl); } -vuint8m1_t test_vid_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, size_t vl) { - return __riscv_vid_tum(mask, maskedoff, vl); +vuint8m1_t test_vid_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, size_t vl) { + return __riscv_vid_tum(vm, vd, vl); } -vuint8m2_t test_vid_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, size_t vl) { - return __riscv_vid_tum(mask, maskedoff, vl); +vuint8m2_t test_vid_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, size_t vl) { + return __riscv_vid_tum(vm, vd, vl); } -vuint8m4_t test_vid_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, size_t vl) { - return __riscv_vid_tum(mask, maskedoff, vl); +vuint8m4_t test_vid_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, size_t vl) { + return __riscv_vid_tum(vm, vd, vl); } -vuint8m8_t test_vid_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, size_t vl) { - return __riscv_vid_tum(mask, maskedoff, vl); +vuint8m8_t test_vid_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, size_t vl) { + return __riscv_vid_tum(vm, vd, vl); } -vuint16mf4_t test_vid_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, size_t vl) { - return __riscv_vid_tum(mask, maskedoff, vl); +vuint16mf4_t test_vid_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, size_t vl) { + return __riscv_vid_tum(vm, vd, vl); } -vuint16mf2_t test_vid_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, size_t vl) { - return __riscv_vid_tum(mask, maskedoff, vl); +vuint16mf2_t test_vid_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, size_t vl) { + return __riscv_vid_tum(vm, vd, vl); } -vuint16m1_t test_vid_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, size_t vl) { - return __riscv_vid_tum(mask, maskedoff, vl); +vuint16m1_t test_vid_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, size_t vl) { + return __riscv_vid_tum(vm, vd, vl); } -vuint16m2_t test_vid_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, size_t vl) { - return __riscv_vid_tum(mask, maskedoff, vl); +vuint16m2_t test_vid_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, size_t vl) { + return __riscv_vid_tum(vm, vd, vl); } -vuint16m4_t test_vid_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, size_t vl) { - return __riscv_vid_tum(mask, maskedoff, vl); +vuint16m4_t test_vid_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, size_t vl) { + return __riscv_vid_tum(vm, vd, vl); } -vuint16m8_t test_vid_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, size_t vl) { - return __riscv_vid_tum(mask, maskedoff, vl); +vuint16m8_t test_vid_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, size_t vl) { + return __riscv_vid_tum(vm, vd, vl); } -vuint32mf2_t test_vid_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, size_t vl) { - return __riscv_vid_tum(mask, maskedoff, vl); +vuint32mf2_t test_vid_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, size_t vl) { + return __riscv_vid_tum(vm, vd, vl); } -vuint32m1_t test_vid_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, size_t vl) { - return __riscv_vid_tum(mask, maskedoff, vl); +vuint32m1_t test_vid_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, size_t vl) { + return __riscv_vid_tum(vm, vd, vl); } -vuint32m2_t test_vid_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, size_t vl) { - return __riscv_vid_tum(mask, maskedoff, vl); +vuint32m2_t test_vid_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, size_t vl) { + return __riscv_vid_tum(vm, vd, vl); } -vuint32m4_t test_vid_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, size_t vl) { - return __riscv_vid_tum(mask, maskedoff, vl); +vuint32m4_t test_vid_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, size_t vl) { + return __riscv_vid_tum(vm, vd, vl); } -vuint32m8_t test_vid_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, size_t vl) { - return __riscv_vid_tum(mask, maskedoff, vl); +vuint32m8_t test_vid_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, size_t vl) { + return __riscv_vid_tum(vm, vd, vl); } -vuint64m1_t test_vid_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, size_t vl) { - return __riscv_vid_tum(mask, maskedoff, vl); +vuint64m1_t test_vid_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, size_t vl) { + return __riscv_vid_tum(vm, vd, vl); } -vuint64m2_t test_vid_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, size_t vl) { - return __riscv_vid_tum(mask, maskedoff, vl); +vuint64m2_t test_vid_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, size_t vl) { + return __riscv_vid_tum(vm, vd, vl); } -vuint64m4_t test_vid_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, size_t vl) { - return __riscv_vid_tum(mask, maskedoff, vl); +vuint64m4_t test_vid_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, size_t vl) { + return __riscv_vid_tum(vm, vd, vl); } -vuint64m8_t test_vid_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, size_t vl) { - return __riscv_vid_tum(mask, maskedoff, vl); +vuint64m8_t test_vid_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, size_t vl) { + return __riscv_vid_tum(vm, vd, vl); } -vuint8mf8_t test_vid_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, size_t vl) { - return __riscv_vid_tumu(mask, maskedoff, vl); +vuint8mf8_t test_vid_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, size_t vl) { + return __riscv_vid_tumu(vm, vd, vl); } -vuint8mf4_t test_vid_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, size_t vl) { - return __riscv_vid_tumu(mask, maskedoff, vl); +vuint8mf4_t test_vid_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, size_t vl) { + return __riscv_vid_tumu(vm, vd, vl); } -vuint8mf2_t test_vid_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, size_t vl) { - return __riscv_vid_tumu(mask, maskedoff, vl); +vuint8mf2_t test_vid_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, size_t vl) { + return __riscv_vid_tumu(vm, vd, vl); } -vuint8m1_t test_vid_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, size_t vl) { - return __riscv_vid_tumu(mask, maskedoff, vl); +vuint8m1_t test_vid_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, size_t vl) { + return __riscv_vid_tumu(vm, vd, vl); } -vuint8m2_t test_vid_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, size_t vl) { - return __riscv_vid_tumu(mask, maskedoff, vl); +vuint8m2_t test_vid_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, size_t vl) { + return __riscv_vid_tumu(vm, vd, vl); } -vuint8m4_t test_vid_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, size_t vl) { - return __riscv_vid_tumu(mask, maskedoff, vl); +vuint8m4_t test_vid_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, size_t vl) { + return __riscv_vid_tumu(vm, vd, vl); } -vuint8m8_t test_vid_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, size_t vl) { - return __riscv_vid_tumu(mask, maskedoff, vl); +vuint8m8_t test_vid_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, size_t vl) { + return __riscv_vid_tumu(vm, vd, vl); } -vuint16mf4_t test_vid_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, size_t vl) { - return __riscv_vid_tumu(mask, maskedoff, vl); +vuint16mf4_t test_vid_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, size_t vl) { + return __riscv_vid_tumu(vm, vd, vl); } -vuint16mf2_t test_vid_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, size_t vl) { - return __riscv_vid_tumu(mask, maskedoff, vl); +vuint16mf2_t test_vid_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, size_t vl) { + return __riscv_vid_tumu(vm, vd, vl); } -vuint16m1_t test_vid_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, size_t vl) { - return __riscv_vid_tumu(mask, maskedoff, vl); +vuint16m1_t test_vid_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, size_t vl) { + return __riscv_vid_tumu(vm, vd, vl); } -vuint16m2_t test_vid_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, size_t vl) { - return __riscv_vid_tumu(mask, maskedoff, vl); +vuint16m2_t test_vid_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, size_t vl) { + return __riscv_vid_tumu(vm, vd, vl); } -vuint16m4_t test_vid_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, size_t vl) { - return __riscv_vid_tumu(mask, maskedoff, vl); +vuint16m4_t test_vid_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, size_t vl) { + return __riscv_vid_tumu(vm, vd, vl); } -vuint16m8_t test_vid_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, size_t vl) { - return __riscv_vid_tumu(mask, maskedoff, vl); +vuint16m8_t test_vid_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, size_t vl) { + return __riscv_vid_tumu(vm, vd, vl); } -vuint32mf2_t test_vid_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, size_t vl) { - return __riscv_vid_tumu(mask, maskedoff, vl); +vuint32mf2_t test_vid_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, size_t vl) { + return __riscv_vid_tumu(vm, vd, vl); } -vuint32m1_t test_vid_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, size_t vl) { - return __riscv_vid_tumu(mask, maskedoff, vl); +vuint32m1_t test_vid_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, size_t vl) { + return __riscv_vid_tumu(vm, vd, vl); } -vuint32m2_t test_vid_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, size_t vl) { - return __riscv_vid_tumu(mask, maskedoff, vl); +vuint32m2_t test_vid_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, size_t vl) { + return __riscv_vid_tumu(vm, vd, vl); } -vuint32m4_t test_vid_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, size_t vl) { - return __riscv_vid_tumu(mask, maskedoff, vl); +vuint32m4_t test_vid_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, size_t vl) { + return __riscv_vid_tumu(vm, vd, vl); } -vuint32m8_t test_vid_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, size_t vl) { - return __riscv_vid_tumu(mask, maskedoff, vl); +vuint32m8_t test_vid_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, size_t vl) { + return __riscv_vid_tumu(vm, vd, vl); } -vuint64m1_t test_vid_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, size_t vl) { - return __riscv_vid_tumu(mask, maskedoff, vl); +vuint64m1_t test_vid_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, size_t vl) { + return __riscv_vid_tumu(vm, vd, vl); } -vuint64m2_t test_vid_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, size_t vl) { - return __riscv_vid_tumu(mask, maskedoff, vl); +vuint64m2_t test_vid_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, size_t vl) { + return __riscv_vid_tumu(vm, vd, vl); } -vuint64m4_t test_vid_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, size_t vl) { - return __riscv_vid_tumu(mask, maskedoff, vl); +vuint64m4_t test_vid_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, size_t vl) { + return __riscv_vid_tumu(vm, vd, vl); } -vuint64m8_t test_vid_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, size_t vl) { - return __riscv_vid_tumu(mask, maskedoff, vl); +vuint64m8_t test_vid_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, size_t vl) { + return __riscv_vid_tumu(vm, vd, vl); } -vuint8mf8_t test_vid_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, size_t vl) { - return __riscv_vid_mu(mask, maskedoff, vl); +vuint8mf8_t test_vid_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, size_t vl) { + return __riscv_vid_mu(vm, vd, vl); } -vuint8mf4_t test_vid_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, size_t vl) { - return __riscv_vid_mu(mask, maskedoff, vl); +vuint8mf4_t test_vid_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, size_t vl) { + return __riscv_vid_mu(vm, vd, vl); } -vuint8mf2_t test_vid_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, size_t vl) { - return __riscv_vid_mu(mask, maskedoff, vl); +vuint8mf2_t test_vid_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, size_t vl) { + return __riscv_vid_mu(vm, vd, vl); } -vuint8m1_t test_vid_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, size_t vl) { - return __riscv_vid_mu(mask, maskedoff, vl); +vuint8m1_t test_vid_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, size_t vl) { + return __riscv_vid_mu(vm, vd, vl); } -vuint8m2_t test_vid_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, size_t vl) { - return __riscv_vid_mu(mask, maskedoff, vl); +vuint8m2_t test_vid_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, size_t vl) { + return __riscv_vid_mu(vm, vd, vl); } -vuint8m4_t test_vid_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, size_t vl) { - return __riscv_vid_mu(mask, maskedoff, vl); +vuint8m4_t test_vid_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, size_t vl) { + return __riscv_vid_mu(vm, vd, vl); } -vuint8m8_t test_vid_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, size_t vl) { - return __riscv_vid_mu(mask, maskedoff, vl); +vuint8m8_t test_vid_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, size_t vl) { + return __riscv_vid_mu(vm, vd, vl); } -vuint16mf4_t test_vid_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, size_t vl) { - return __riscv_vid_mu(mask, maskedoff, vl); +vuint16mf4_t test_vid_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, size_t vl) { + return __riscv_vid_mu(vm, vd, vl); } -vuint16mf2_t test_vid_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, size_t vl) { - return __riscv_vid_mu(mask, maskedoff, vl); +vuint16mf2_t test_vid_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, size_t vl) { + return __riscv_vid_mu(vm, vd, vl); } -vuint16m1_t test_vid_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, size_t vl) { - return __riscv_vid_mu(mask, maskedoff, vl); +vuint16m1_t test_vid_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, size_t vl) { + return __riscv_vid_mu(vm, vd, vl); } -vuint16m2_t test_vid_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, size_t vl) { - return __riscv_vid_mu(mask, maskedoff, vl); +vuint16m2_t test_vid_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, size_t vl) { + return __riscv_vid_mu(vm, vd, vl); } -vuint16m4_t test_vid_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, size_t vl) { - return __riscv_vid_mu(mask, maskedoff, vl); +vuint16m4_t test_vid_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, size_t vl) { + return __riscv_vid_mu(vm, vd, vl); } -vuint16m8_t test_vid_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, size_t vl) { - return __riscv_vid_mu(mask, maskedoff, vl); +vuint16m8_t test_vid_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, size_t vl) { + return __riscv_vid_mu(vm, vd, vl); } -vuint32mf2_t test_vid_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, size_t vl) { - return __riscv_vid_mu(mask, maskedoff, vl); +vuint32mf2_t test_vid_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, size_t vl) { + return __riscv_vid_mu(vm, vd, vl); } -vuint32m1_t test_vid_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, size_t vl) { - return __riscv_vid_mu(mask, maskedoff, vl); +vuint32m1_t test_vid_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, size_t vl) { + return __riscv_vid_mu(vm, vd, vl); } -vuint32m2_t test_vid_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, size_t vl) { - return __riscv_vid_mu(mask, maskedoff, vl); +vuint32m2_t test_vid_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, size_t vl) { + return __riscv_vid_mu(vm, vd, vl); } -vuint32m4_t test_vid_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, size_t vl) { - return __riscv_vid_mu(mask, maskedoff, vl); +vuint32m4_t test_vid_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, size_t vl) { + return __riscv_vid_mu(vm, vd, vl); } -vuint32m8_t test_vid_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, size_t vl) { - return __riscv_vid_mu(mask, maskedoff, vl); +vuint32m8_t test_vid_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, size_t vl) { + return __riscv_vid_mu(vm, vd, vl); } -vuint64m1_t test_vid_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, size_t vl) { - return __riscv_vid_mu(mask, maskedoff, vl); +vuint64m1_t test_vid_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, size_t vl) { + return __riscv_vid_mu(vm, vd, vl); } -vuint64m2_t test_vid_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, size_t vl) { - return __riscv_vid_mu(mask, maskedoff, vl); +vuint64m2_t test_vid_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, size_t vl) { + return __riscv_vid_mu(vm, vd, vl); } -vuint64m4_t test_vid_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, size_t vl) { - return __riscv_vid_mu(mask, maskedoff, vl); +vuint64m4_t test_vid_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, size_t vl) { + return __riscv_vid_mu(vm, vd, vl); } -vuint64m8_t test_vid_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, size_t vl) { - return __riscv_vid_mu(mask, maskedoff, vl); +vuint64m8_t test_vid_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, size_t vl) { + return __riscv_vid_mu(vm, vd, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vid\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/viota.c b/auto-generated/policy_funcs/gnu-overloaded-tests/viota.c index 687fbabde..e8aa70176 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/viota.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/viota.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_viota_m_u8mf8_tu(vuint8mf8_t maskedoff, vbool64_t op1, size_t vl) { - return __riscv_viota_tu(maskedoff, op1, vl); +vuint8mf8_t test_viota_m_u8mf8_tu(vuint8mf8_t vd, vbool64_t vs2, size_t vl) { + return __riscv_viota_tu(vd, vs2, vl); } -vuint8mf4_t test_viota_m_u8mf4_tu(vuint8mf4_t maskedoff, vbool32_t op1, size_t vl) { - return __riscv_viota_tu(maskedoff, op1, vl); +vuint8mf4_t test_viota_m_u8mf4_tu(vuint8mf4_t vd, vbool32_t vs2, size_t vl) { + return __riscv_viota_tu(vd, vs2, vl); } -vuint8mf2_t test_viota_m_u8mf2_tu(vuint8mf2_t maskedoff, vbool16_t op1, size_t vl) { - return __riscv_viota_tu(maskedoff, op1, vl); +vuint8mf2_t test_viota_m_u8mf2_tu(vuint8mf2_t vd, vbool16_t vs2, size_t vl) { + return __riscv_viota_tu(vd, vs2, vl); } -vuint8m1_t test_viota_m_u8m1_tu(vuint8m1_t maskedoff, vbool8_t op1, size_t vl) { - return __riscv_viota_tu(maskedoff, op1, vl); +vuint8m1_t test_viota_m_u8m1_tu(vuint8m1_t vd, vbool8_t vs2, size_t vl) { + return __riscv_viota_tu(vd, vs2, vl); } -vuint8m2_t test_viota_m_u8m2_tu(vuint8m2_t maskedoff, vbool4_t op1, size_t vl) { - return __riscv_viota_tu(maskedoff, op1, vl); +vuint8m2_t test_viota_m_u8m2_tu(vuint8m2_t vd, vbool4_t vs2, size_t vl) { + return __riscv_viota_tu(vd, vs2, vl); } -vuint8m4_t test_viota_m_u8m4_tu(vuint8m4_t maskedoff, vbool2_t op1, size_t vl) { - return __riscv_viota_tu(maskedoff, op1, vl); +vuint8m4_t test_viota_m_u8m4_tu(vuint8m4_t vd, vbool2_t vs2, size_t vl) { + return __riscv_viota_tu(vd, vs2, vl); } -vuint8m8_t test_viota_m_u8m8_tu(vuint8m8_t maskedoff, vbool1_t op1, size_t vl) { - return __riscv_viota_tu(maskedoff, op1, vl); +vuint8m8_t test_viota_m_u8m8_tu(vuint8m8_t vd, vbool1_t vs2, size_t vl) { + return __riscv_viota_tu(vd, vs2, vl); } -vuint16mf4_t test_viota_m_u16mf4_tu(vuint16mf4_t maskedoff, vbool64_t op1, size_t vl) { - return __riscv_viota_tu(maskedoff, op1, vl); +vuint16mf4_t test_viota_m_u16mf4_tu(vuint16mf4_t vd, vbool64_t vs2, size_t vl) { + return __riscv_viota_tu(vd, vs2, vl); } -vuint16mf2_t test_viota_m_u16mf2_tu(vuint16mf2_t maskedoff, vbool32_t op1, size_t vl) { - return __riscv_viota_tu(maskedoff, op1, vl); +vuint16mf2_t test_viota_m_u16mf2_tu(vuint16mf2_t vd, vbool32_t vs2, size_t vl) { + return __riscv_viota_tu(vd, vs2, vl); } -vuint16m1_t test_viota_m_u16m1_tu(vuint16m1_t maskedoff, vbool16_t op1, size_t vl) { - return __riscv_viota_tu(maskedoff, op1, vl); +vuint16m1_t test_viota_m_u16m1_tu(vuint16m1_t vd, vbool16_t vs2, size_t vl) { + return __riscv_viota_tu(vd, vs2, vl); } -vuint16m2_t test_viota_m_u16m2_tu(vuint16m2_t maskedoff, vbool8_t op1, size_t vl) { - return __riscv_viota_tu(maskedoff, op1, vl); +vuint16m2_t test_viota_m_u16m2_tu(vuint16m2_t vd, vbool8_t vs2, size_t vl) { + return __riscv_viota_tu(vd, vs2, vl); } -vuint16m4_t test_viota_m_u16m4_tu(vuint16m4_t maskedoff, vbool4_t op1, size_t vl) { - return __riscv_viota_tu(maskedoff, op1, vl); +vuint16m4_t test_viota_m_u16m4_tu(vuint16m4_t vd, vbool4_t vs2, size_t vl) { + return __riscv_viota_tu(vd, vs2, vl); } -vuint16m8_t test_viota_m_u16m8_tu(vuint16m8_t maskedoff, vbool2_t op1, size_t vl) { - return __riscv_viota_tu(maskedoff, op1, vl); +vuint16m8_t test_viota_m_u16m8_tu(vuint16m8_t vd, vbool2_t vs2, size_t vl) { + return __riscv_viota_tu(vd, vs2, vl); } -vuint32mf2_t test_viota_m_u32mf2_tu(vuint32mf2_t maskedoff, vbool64_t op1, size_t vl) { - return __riscv_viota_tu(maskedoff, op1, vl); +vuint32mf2_t test_viota_m_u32mf2_tu(vuint32mf2_t vd, vbool64_t vs2, size_t vl) { + return __riscv_viota_tu(vd, vs2, vl); } -vuint32m1_t test_viota_m_u32m1_tu(vuint32m1_t maskedoff, vbool32_t op1, size_t vl) { - return __riscv_viota_tu(maskedoff, op1, vl); +vuint32m1_t test_viota_m_u32m1_tu(vuint32m1_t vd, vbool32_t vs2, size_t vl) { + return __riscv_viota_tu(vd, vs2, vl); } -vuint32m2_t test_viota_m_u32m2_tu(vuint32m2_t maskedoff, vbool16_t op1, size_t vl) { - return __riscv_viota_tu(maskedoff, op1, vl); +vuint32m2_t test_viota_m_u32m2_tu(vuint32m2_t vd, vbool16_t vs2, size_t vl) { + return __riscv_viota_tu(vd, vs2, vl); } -vuint32m4_t test_viota_m_u32m4_tu(vuint32m4_t maskedoff, vbool8_t op1, size_t vl) { - return __riscv_viota_tu(maskedoff, op1, vl); +vuint32m4_t test_viota_m_u32m4_tu(vuint32m4_t vd, vbool8_t vs2, size_t vl) { + return __riscv_viota_tu(vd, vs2, vl); } -vuint32m8_t test_viota_m_u32m8_tu(vuint32m8_t maskedoff, vbool4_t op1, size_t vl) { - return __riscv_viota_tu(maskedoff, op1, vl); +vuint32m8_t test_viota_m_u32m8_tu(vuint32m8_t vd, vbool4_t vs2, size_t vl) { + return __riscv_viota_tu(vd, vs2, vl); } -vuint64m1_t test_viota_m_u64m1_tu(vuint64m1_t maskedoff, vbool64_t op1, size_t vl) { - return __riscv_viota_tu(maskedoff, op1, vl); +vuint64m1_t test_viota_m_u64m1_tu(vuint64m1_t vd, vbool64_t vs2, size_t vl) { + return __riscv_viota_tu(vd, vs2, vl); } -vuint64m2_t test_viota_m_u64m2_tu(vuint64m2_t maskedoff, vbool32_t op1, size_t vl) { - return __riscv_viota_tu(maskedoff, op1, vl); +vuint64m2_t test_viota_m_u64m2_tu(vuint64m2_t vd, vbool32_t vs2, size_t vl) { + return __riscv_viota_tu(vd, vs2, vl); } -vuint64m4_t test_viota_m_u64m4_tu(vuint64m4_t maskedoff, vbool16_t op1, size_t vl) { - return __riscv_viota_tu(maskedoff, op1, vl); +vuint64m4_t test_viota_m_u64m4_tu(vuint64m4_t vd, vbool16_t vs2, size_t vl) { + return __riscv_viota_tu(vd, vs2, vl); } -vuint64m8_t test_viota_m_u64m8_tu(vuint64m8_t maskedoff, vbool8_t op1, size_t vl) { - return __riscv_viota_tu(maskedoff, op1, vl); +vuint64m8_t test_viota_m_u64m8_tu(vuint64m8_t vd, vbool8_t vs2, size_t vl) { + return __riscv_viota_tu(vd, vs2, vl); } -vuint8mf8_t test_viota_m_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vbool64_t op1, size_t vl) { - return __riscv_viota_tum(mask, maskedoff, op1, vl); +vuint8mf8_t test_viota_m_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vbool64_t vs2, size_t vl) { + return __riscv_viota_tum(vm, vd, vs2, vl); } -vuint8mf4_t test_viota_m_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vbool32_t op1, size_t vl) { - return __riscv_viota_tum(mask, maskedoff, op1, vl); +vuint8mf4_t test_viota_m_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vbool32_t vs2, size_t vl) { + return __riscv_viota_tum(vm, vd, vs2, vl); } -vuint8mf2_t test_viota_m_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vbool16_t op1, size_t vl) { - return __riscv_viota_tum(mask, maskedoff, op1, vl); +vuint8mf2_t test_viota_m_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vbool16_t vs2, size_t vl) { + return __riscv_viota_tum(vm, vd, vs2, vl); } -vuint8m1_t test_viota_m_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vbool8_t op1, size_t vl) { - return __riscv_viota_tum(mask, maskedoff, op1, vl); +vuint8m1_t test_viota_m_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vbool8_t vs2, size_t vl) { + return __riscv_viota_tum(vm, vd, vs2, vl); } -vuint8m2_t test_viota_m_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vbool4_t op1, size_t vl) { - return __riscv_viota_tum(mask, maskedoff, op1, vl); +vuint8m2_t test_viota_m_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vbool4_t vs2, size_t vl) { + return __riscv_viota_tum(vm, vd, vs2, vl); } -vuint8m4_t test_viota_m_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vbool2_t op1, size_t vl) { - return __riscv_viota_tum(mask, maskedoff, op1, vl); +vuint8m4_t test_viota_m_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vbool2_t vs2, size_t vl) { + return __riscv_viota_tum(vm, vd, vs2, vl); } -vuint8m8_t test_viota_m_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vbool1_t op1, size_t vl) { - return __riscv_viota_tum(mask, maskedoff, op1, vl); +vuint8m8_t test_viota_m_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vbool1_t vs2, size_t vl) { + return __riscv_viota_tum(vm, vd, vs2, vl); } -vuint16mf4_t test_viota_m_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vbool64_t op1, size_t vl) { - return __riscv_viota_tum(mask, maskedoff, op1, vl); +vuint16mf4_t test_viota_m_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vbool64_t vs2, size_t vl) { + return __riscv_viota_tum(vm, vd, vs2, vl); } -vuint16mf2_t test_viota_m_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vbool32_t op1, size_t vl) { - return __riscv_viota_tum(mask, maskedoff, op1, vl); +vuint16mf2_t test_viota_m_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vbool32_t vs2, size_t vl) { + return __riscv_viota_tum(vm, vd, vs2, vl); } -vuint16m1_t test_viota_m_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vbool16_t op1, size_t vl) { - return __riscv_viota_tum(mask, maskedoff, op1, vl); +vuint16m1_t test_viota_m_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vbool16_t vs2, size_t vl) { + return __riscv_viota_tum(vm, vd, vs2, vl); } -vuint16m2_t test_viota_m_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vbool8_t op1, size_t vl) { - return __riscv_viota_tum(mask, maskedoff, op1, vl); +vuint16m2_t test_viota_m_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vbool8_t vs2, size_t vl) { + return __riscv_viota_tum(vm, vd, vs2, vl); } -vuint16m4_t test_viota_m_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vbool4_t op1, size_t vl) { - return __riscv_viota_tum(mask, maskedoff, op1, vl); +vuint16m4_t test_viota_m_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vbool4_t vs2, size_t vl) { + return __riscv_viota_tum(vm, vd, vs2, vl); } -vuint16m8_t test_viota_m_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vbool2_t op1, size_t vl) { - return __riscv_viota_tum(mask, maskedoff, op1, vl); +vuint16m8_t test_viota_m_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vbool2_t vs2, size_t vl) { + return __riscv_viota_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_viota_m_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vbool64_t op1, size_t vl) { - return __riscv_viota_tum(mask, maskedoff, op1, vl); +vuint32mf2_t test_viota_m_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vbool64_t vs2, size_t vl) { + return __riscv_viota_tum(vm, vd, vs2, vl); } -vuint32m1_t test_viota_m_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vbool32_t op1, size_t vl) { - return __riscv_viota_tum(mask, maskedoff, op1, vl); +vuint32m1_t test_viota_m_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vbool32_t vs2, size_t vl) { + return __riscv_viota_tum(vm, vd, vs2, vl); } -vuint32m2_t test_viota_m_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vbool16_t op1, size_t vl) { - return __riscv_viota_tum(mask, maskedoff, op1, vl); +vuint32m2_t test_viota_m_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vbool16_t vs2, size_t vl) { + return __riscv_viota_tum(vm, vd, vs2, vl); } -vuint32m4_t test_viota_m_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vbool8_t op1, size_t vl) { - return __riscv_viota_tum(mask, maskedoff, op1, vl); +vuint32m4_t test_viota_m_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vbool8_t vs2, size_t vl) { + return __riscv_viota_tum(vm, vd, vs2, vl); } -vuint32m8_t test_viota_m_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vbool4_t op1, size_t vl) { - return __riscv_viota_tum(mask, maskedoff, op1, vl); +vuint32m8_t test_viota_m_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vbool4_t vs2, size_t vl) { + return __riscv_viota_tum(vm, vd, vs2, vl); } -vuint64m1_t test_viota_m_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vbool64_t op1, size_t vl) { - return __riscv_viota_tum(mask, maskedoff, op1, vl); +vuint64m1_t test_viota_m_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vbool64_t vs2, size_t vl) { + return __riscv_viota_tum(vm, vd, vs2, vl); } -vuint64m2_t test_viota_m_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vbool32_t op1, size_t vl) { - return __riscv_viota_tum(mask, maskedoff, op1, vl); +vuint64m2_t test_viota_m_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vbool32_t vs2, size_t vl) { + return __riscv_viota_tum(vm, vd, vs2, vl); } -vuint64m4_t test_viota_m_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vbool16_t op1, size_t vl) { - return __riscv_viota_tum(mask, maskedoff, op1, vl); +vuint64m4_t test_viota_m_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vbool16_t vs2, size_t vl) { + return __riscv_viota_tum(vm, vd, vs2, vl); } -vuint64m8_t test_viota_m_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vbool8_t op1, size_t vl) { - return __riscv_viota_tum(mask, maskedoff, op1, vl); +vuint64m8_t test_viota_m_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vbool8_t vs2, size_t vl) { + return __riscv_viota_tum(vm, vd, vs2, vl); } -vuint8mf8_t test_viota_m_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vbool64_t op1, size_t vl) { - return __riscv_viota_tumu(mask, maskedoff, op1, vl); +vuint8mf8_t test_viota_m_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vbool64_t vs2, size_t vl) { + return __riscv_viota_tumu(vm, vd, vs2, vl); } -vuint8mf4_t test_viota_m_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vbool32_t op1, size_t vl) { - return __riscv_viota_tumu(mask, maskedoff, op1, vl); +vuint8mf4_t test_viota_m_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vbool32_t vs2, size_t vl) { + return __riscv_viota_tumu(vm, vd, vs2, vl); } -vuint8mf2_t test_viota_m_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vbool16_t op1, size_t vl) { - return __riscv_viota_tumu(mask, maskedoff, op1, vl); +vuint8mf2_t test_viota_m_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vbool16_t vs2, size_t vl) { + return __riscv_viota_tumu(vm, vd, vs2, vl); } -vuint8m1_t test_viota_m_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vbool8_t op1, size_t vl) { - return __riscv_viota_tumu(mask, maskedoff, op1, vl); +vuint8m1_t test_viota_m_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vbool8_t vs2, size_t vl) { + return __riscv_viota_tumu(vm, vd, vs2, vl); } -vuint8m2_t test_viota_m_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vbool4_t op1, size_t vl) { - return __riscv_viota_tumu(mask, maskedoff, op1, vl); +vuint8m2_t test_viota_m_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vbool4_t vs2, size_t vl) { + return __riscv_viota_tumu(vm, vd, vs2, vl); } -vuint8m4_t test_viota_m_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vbool2_t op1, size_t vl) { - return __riscv_viota_tumu(mask, maskedoff, op1, vl); +vuint8m4_t test_viota_m_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vbool2_t vs2, size_t vl) { + return __riscv_viota_tumu(vm, vd, vs2, vl); } -vuint8m8_t test_viota_m_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vbool1_t op1, size_t vl) { - return __riscv_viota_tumu(mask, maskedoff, op1, vl); +vuint8m8_t test_viota_m_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vbool1_t vs2, size_t vl) { + return __riscv_viota_tumu(vm, vd, vs2, vl); } -vuint16mf4_t test_viota_m_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vbool64_t op1, size_t vl) { - return __riscv_viota_tumu(mask, maskedoff, op1, vl); +vuint16mf4_t test_viota_m_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vbool64_t vs2, size_t vl) { + return __riscv_viota_tumu(vm, vd, vs2, vl); } -vuint16mf2_t test_viota_m_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vbool32_t op1, size_t vl) { - return __riscv_viota_tumu(mask, maskedoff, op1, vl); +vuint16mf2_t test_viota_m_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vbool32_t vs2, size_t vl) { + return __riscv_viota_tumu(vm, vd, vs2, vl); } -vuint16m1_t test_viota_m_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vbool16_t op1, size_t vl) { - return __riscv_viota_tumu(mask, maskedoff, op1, vl); +vuint16m1_t test_viota_m_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vbool16_t vs2, size_t vl) { + return __riscv_viota_tumu(vm, vd, vs2, vl); } -vuint16m2_t test_viota_m_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vbool8_t op1, size_t vl) { - return __riscv_viota_tumu(mask, maskedoff, op1, vl); +vuint16m2_t test_viota_m_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vbool8_t vs2, size_t vl) { + return __riscv_viota_tumu(vm, vd, vs2, vl); } -vuint16m4_t test_viota_m_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vbool4_t op1, size_t vl) { - return __riscv_viota_tumu(mask, maskedoff, op1, vl); +vuint16m4_t test_viota_m_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vbool4_t vs2, size_t vl) { + return __riscv_viota_tumu(vm, vd, vs2, vl); } -vuint16m8_t test_viota_m_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vbool2_t op1, size_t vl) { - return __riscv_viota_tumu(mask, maskedoff, op1, vl); +vuint16m8_t test_viota_m_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vbool2_t vs2, size_t vl) { + return __riscv_viota_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_viota_m_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vbool64_t op1, size_t vl) { - return __riscv_viota_tumu(mask, maskedoff, op1, vl); +vuint32mf2_t test_viota_m_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vbool64_t vs2, size_t vl) { + return __riscv_viota_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_viota_m_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vbool32_t op1, size_t vl) { - return __riscv_viota_tumu(mask, maskedoff, op1, vl); +vuint32m1_t test_viota_m_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vbool32_t vs2, size_t vl) { + return __riscv_viota_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_viota_m_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vbool16_t op1, size_t vl) { - return __riscv_viota_tumu(mask, maskedoff, op1, vl); +vuint32m2_t test_viota_m_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vbool16_t vs2, size_t vl) { + return __riscv_viota_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_viota_m_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vbool8_t op1, size_t vl) { - return __riscv_viota_tumu(mask, maskedoff, op1, vl); +vuint32m4_t test_viota_m_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vbool8_t vs2, size_t vl) { + return __riscv_viota_tumu(vm, vd, vs2, vl); } -vuint32m8_t test_viota_m_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vbool4_t op1, size_t vl) { - return __riscv_viota_tumu(mask, maskedoff, op1, vl); +vuint32m8_t test_viota_m_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vbool4_t vs2, size_t vl) { + return __riscv_viota_tumu(vm, vd, vs2, vl); } -vuint64m1_t test_viota_m_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vbool64_t op1, size_t vl) { - return __riscv_viota_tumu(mask, maskedoff, op1, vl); +vuint64m1_t test_viota_m_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vbool64_t vs2, size_t vl) { + return __riscv_viota_tumu(vm, vd, vs2, vl); } -vuint64m2_t test_viota_m_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vbool32_t op1, size_t vl) { - return __riscv_viota_tumu(mask, maskedoff, op1, vl); +vuint64m2_t test_viota_m_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vbool32_t vs2, size_t vl) { + return __riscv_viota_tumu(vm, vd, vs2, vl); } -vuint64m4_t test_viota_m_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vbool16_t op1, size_t vl) { - return __riscv_viota_tumu(mask, maskedoff, op1, vl); +vuint64m4_t test_viota_m_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vbool16_t vs2, size_t vl) { + return __riscv_viota_tumu(vm, vd, vs2, vl); } -vuint64m8_t test_viota_m_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vbool8_t op1, size_t vl) { - return __riscv_viota_tumu(mask, maskedoff, op1, vl); +vuint64m8_t test_viota_m_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vbool8_t vs2, size_t vl) { + return __riscv_viota_tumu(vm, vd, vs2, vl); } -vuint8mf8_t test_viota_m_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vbool64_t op1, size_t vl) { - return __riscv_viota_mu(mask, maskedoff, op1, vl); +vuint8mf8_t test_viota_m_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vbool64_t vs2, size_t vl) { + return __riscv_viota_mu(vm, vd, vs2, vl); } -vuint8mf4_t test_viota_m_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vbool32_t op1, size_t vl) { - return __riscv_viota_mu(mask, maskedoff, op1, vl); +vuint8mf4_t test_viota_m_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vbool32_t vs2, size_t vl) { + return __riscv_viota_mu(vm, vd, vs2, vl); } -vuint8mf2_t test_viota_m_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vbool16_t op1, size_t vl) { - return __riscv_viota_mu(mask, maskedoff, op1, vl); +vuint8mf2_t test_viota_m_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vbool16_t vs2, size_t vl) { + return __riscv_viota_mu(vm, vd, vs2, vl); } -vuint8m1_t test_viota_m_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vbool8_t op1, size_t vl) { - return __riscv_viota_mu(mask, maskedoff, op1, vl); +vuint8m1_t test_viota_m_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vbool8_t vs2, size_t vl) { + return __riscv_viota_mu(vm, vd, vs2, vl); } -vuint8m2_t test_viota_m_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vbool4_t op1, size_t vl) { - return __riscv_viota_mu(mask, maskedoff, op1, vl); +vuint8m2_t test_viota_m_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vbool4_t vs2, size_t vl) { + return __riscv_viota_mu(vm, vd, vs2, vl); } -vuint8m4_t test_viota_m_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vbool2_t op1, size_t vl) { - return __riscv_viota_mu(mask, maskedoff, op1, vl); +vuint8m4_t test_viota_m_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vbool2_t vs2, size_t vl) { + return __riscv_viota_mu(vm, vd, vs2, vl); } -vuint8m8_t test_viota_m_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vbool1_t op1, size_t vl) { - return __riscv_viota_mu(mask, maskedoff, op1, vl); +vuint8m8_t test_viota_m_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vbool1_t vs2, size_t vl) { + return __riscv_viota_mu(vm, vd, vs2, vl); } -vuint16mf4_t test_viota_m_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vbool64_t op1, size_t vl) { - return __riscv_viota_mu(mask, maskedoff, op1, vl); +vuint16mf4_t test_viota_m_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vbool64_t vs2, size_t vl) { + return __riscv_viota_mu(vm, vd, vs2, vl); } -vuint16mf2_t test_viota_m_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vbool32_t op1, size_t vl) { - return __riscv_viota_mu(mask, maskedoff, op1, vl); +vuint16mf2_t test_viota_m_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vbool32_t vs2, size_t vl) { + return __riscv_viota_mu(vm, vd, vs2, vl); } -vuint16m1_t test_viota_m_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vbool16_t op1, size_t vl) { - return __riscv_viota_mu(mask, maskedoff, op1, vl); +vuint16m1_t test_viota_m_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vbool16_t vs2, size_t vl) { + return __riscv_viota_mu(vm, vd, vs2, vl); } -vuint16m2_t test_viota_m_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vbool8_t op1, size_t vl) { - return __riscv_viota_mu(mask, maskedoff, op1, vl); +vuint16m2_t test_viota_m_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vbool8_t vs2, size_t vl) { + return __riscv_viota_mu(vm, vd, vs2, vl); } -vuint16m4_t test_viota_m_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vbool4_t op1, size_t vl) { - return __riscv_viota_mu(mask, maskedoff, op1, vl); +vuint16m4_t test_viota_m_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vbool4_t vs2, size_t vl) { + return __riscv_viota_mu(vm, vd, vs2, vl); } -vuint16m8_t test_viota_m_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vbool2_t op1, size_t vl) { - return __riscv_viota_mu(mask, maskedoff, op1, vl); +vuint16m8_t test_viota_m_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vbool2_t vs2, size_t vl) { + return __riscv_viota_mu(vm, vd, vs2, vl); } -vuint32mf2_t test_viota_m_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vbool64_t op1, size_t vl) { - return __riscv_viota_mu(mask, maskedoff, op1, vl); +vuint32mf2_t test_viota_m_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vbool64_t vs2, size_t vl) { + return __riscv_viota_mu(vm, vd, vs2, vl); } -vuint32m1_t test_viota_m_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vbool32_t op1, size_t vl) { - return __riscv_viota_mu(mask, maskedoff, op1, vl); +vuint32m1_t test_viota_m_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vbool32_t vs2, size_t vl) { + return __riscv_viota_mu(vm, vd, vs2, vl); } -vuint32m2_t test_viota_m_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vbool16_t op1, size_t vl) { - return __riscv_viota_mu(mask, maskedoff, op1, vl); +vuint32m2_t test_viota_m_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vbool16_t vs2, size_t vl) { + return __riscv_viota_mu(vm, vd, vs2, vl); } -vuint32m4_t test_viota_m_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vbool8_t op1, size_t vl) { - return __riscv_viota_mu(mask, maskedoff, op1, vl); +vuint32m4_t test_viota_m_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vbool8_t vs2, size_t vl) { + return __riscv_viota_mu(vm, vd, vs2, vl); } -vuint32m8_t test_viota_m_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vbool4_t op1, size_t vl) { - return __riscv_viota_mu(mask, maskedoff, op1, vl); +vuint32m8_t test_viota_m_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vbool4_t vs2, size_t vl) { + return __riscv_viota_mu(vm, vd, vs2, vl); } -vuint64m1_t test_viota_m_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vbool64_t op1, size_t vl) { - return __riscv_viota_mu(mask, maskedoff, op1, vl); +vuint64m1_t test_viota_m_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vbool64_t vs2, size_t vl) { + return __riscv_viota_mu(vm, vd, vs2, vl); } -vuint64m2_t test_viota_m_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vbool32_t op1, size_t vl) { - return __riscv_viota_mu(mask, maskedoff, op1, vl); +vuint64m2_t test_viota_m_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vbool32_t vs2, size_t vl) { + return __riscv_viota_mu(vm, vd, vs2, vl); } -vuint64m4_t test_viota_m_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vbool16_t op1, size_t vl) { - return __riscv_viota_mu(mask, maskedoff, op1, vl); +vuint64m4_t test_viota_m_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vbool16_t vs2, size_t vl) { + return __riscv_viota_mu(vm, vd, vs2, vl); } -vuint64m8_t test_viota_m_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vbool8_t op1, size_t vl) { - return __riscv_viota_mu(mask, maskedoff, op1, vl); +vuint64m8_t test_viota_m_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vbool8_t vs2, size_t vl) { + return __riscv_viota_mu(vm, vd, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+viota\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vle16.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vle16.c index b33f1a25d..bddbafbff 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vle16.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vle16.c @@ -6,292 +6,292 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vle16_v_f16mf4_tu(vfloat16mf4_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_tu(maskedoff, base, vl); +vfloat16mf4_t test_vle16_v_f16mf4_tu(vfloat16mf4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_tu(vd, rs1, vl); } -vfloat16mf2_t test_vle16_v_f16mf2_tu(vfloat16mf2_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_tu(maskedoff, base, vl); +vfloat16mf2_t test_vle16_v_f16mf2_tu(vfloat16mf2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_tu(vd, rs1, vl); } -vfloat16m1_t test_vle16_v_f16m1_tu(vfloat16m1_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_tu(maskedoff, base, vl); +vfloat16m1_t test_vle16_v_f16m1_tu(vfloat16m1_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_tu(vd, rs1, vl); } -vfloat16m2_t test_vle16_v_f16m2_tu(vfloat16m2_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_tu(maskedoff, base, vl); +vfloat16m2_t test_vle16_v_f16m2_tu(vfloat16m2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_tu(vd, rs1, vl); } -vfloat16m4_t test_vle16_v_f16m4_tu(vfloat16m4_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_tu(maskedoff, base, vl); +vfloat16m4_t test_vle16_v_f16m4_tu(vfloat16m4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_tu(vd, rs1, vl); } -vfloat16m8_t test_vle16_v_f16m8_tu(vfloat16m8_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_tu(maskedoff, base, vl); +vfloat16m8_t test_vle16_v_f16m8_tu(vfloat16m8_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_tu(vd, rs1, vl); } -vint16mf4_t test_vle16_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_tu(maskedoff, base, vl); +vint16mf4_t test_vle16_v_i16mf4_tu(vint16mf4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_tu(vd, rs1, vl); } -vint16mf2_t test_vle16_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_tu(maskedoff, base, vl); +vint16mf2_t test_vle16_v_i16mf2_tu(vint16mf2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_tu(vd, rs1, vl); } -vint16m1_t test_vle16_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_tu(maskedoff, base, vl); +vint16m1_t test_vle16_v_i16m1_tu(vint16m1_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_tu(vd, rs1, vl); } -vint16m2_t test_vle16_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_tu(maskedoff, base, vl); +vint16m2_t test_vle16_v_i16m2_tu(vint16m2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_tu(vd, rs1, vl); } -vint16m4_t test_vle16_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_tu(maskedoff, base, vl); +vint16m4_t test_vle16_v_i16m4_tu(vint16m4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_tu(vd, rs1, vl); } -vint16m8_t test_vle16_v_i16m8_tu(vint16m8_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_tu(maskedoff, base, vl); +vint16m8_t test_vle16_v_i16m8_tu(vint16m8_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_tu(vd, rs1, vl); } -vuint16mf4_t test_vle16_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_tu(maskedoff, base, vl); +vuint16mf4_t test_vle16_v_u16mf4_tu(vuint16mf4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_tu(vd, rs1, vl); } -vuint16mf2_t test_vle16_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_tu(maskedoff, base, vl); +vuint16mf2_t test_vle16_v_u16mf2_tu(vuint16mf2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_tu(vd, rs1, vl); } -vuint16m1_t test_vle16_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_tu(maskedoff, base, vl); +vuint16m1_t test_vle16_v_u16m1_tu(vuint16m1_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_tu(vd, rs1, vl); } -vuint16m2_t test_vle16_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_tu(maskedoff, base, vl); +vuint16m2_t test_vle16_v_u16m2_tu(vuint16m2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_tu(vd, rs1, vl); } -vuint16m4_t test_vle16_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_tu(maskedoff, base, vl); +vuint16m4_t test_vle16_v_u16m4_tu(vuint16m4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_tu(vd, rs1, vl); } -vuint16m8_t test_vle16_v_u16m8_tu(vuint16m8_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_tu(maskedoff, base, vl); +vuint16m8_t test_vle16_v_u16m8_tu(vuint16m8_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_tu(vd, rs1, vl); } -vfloat16mf4_t test_vle16_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_tum(mask, maskedoff, base, vl); +vfloat16mf4_t test_vle16_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_tum(vm, vd, rs1, vl); } -vfloat16mf2_t test_vle16_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_tum(mask, maskedoff, base, vl); +vfloat16mf2_t test_vle16_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_tum(vm, vd, rs1, vl); } -vfloat16m1_t test_vle16_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_tum(mask, maskedoff, base, vl); +vfloat16m1_t test_vle16_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_tum(vm, vd, rs1, vl); } -vfloat16m2_t test_vle16_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_tum(mask, maskedoff, base, vl); +vfloat16m2_t test_vle16_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_tum(vm, vd, rs1, vl); } -vfloat16m4_t test_vle16_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_tum(mask, maskedoff, base, vl); +vfloat16m4_t test_vle16_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_tum(vm, vd, rs1, vl); } -vfloat16m8_t test_vle16_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_tum(mask, maskedoff, base, vl); +vfloat16m8_t test_vle16_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_tum(vm, vd, rs1, vl); } -vint16mf4_t test_vle16_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_tum(mask, maskedoff, base, vl); +vint16mf4_t test_vle16_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_tum(vm, vd, rs1, vl); } -vint16mf2_t test_vle16_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_tum(mask, maskedoff, base, vl); +vint16mf2_t test_vle16_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_tum(vm, vd, rs1, vl); } -vint16m1_t test_vle16_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_tum(mask, maskedoff, base, vl); +vint16m1_t test_vle16_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_tum(vm, vd, rs1, vl); } -vint16m2_t test_vle16_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_tum(mask, maskedoff, base, vl); +vint16m2_t test_vle16_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_tum(vm, vd, rs1, vl); } -vint16m4_t test_vle16_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_tum(mask, maskedoff, base, vl); +vint16m4_t test_vle16_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_tum(vm, vd, rs1, vl); } -vint16m8_t test_vle16_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_tum(mask, maskedoff, base, vl); +vint16m8_t test_vle16_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_tum(vm, vd, rs1, vl); } -vuint16mf4_t test_vle16_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_tum(mask, maskedoff, base, vl); +vuint16mf4_t test_vle16_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_tum(vm, vd, rs1, vl); } -vuint16mf2_t test_vle16_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_tum(mask, maskedoff, base, vl); +vuint16mf2_t test_vle16_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_tum(vm, vd, rs1, vl); } -vuint16m1_t test_vle16_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_tum(mask, maskedoff, base, vl); +vuint16m1_t test_vle16_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_tum(vm, vd, rs1, vl); } -vuint16m2_t test_vle16_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_tum(mask, maskedoff, base, vl); +vuint16m2_t test_vle16_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_tum(vm, vd, rs1, vl); } -vuint16m4_t test_vle16_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_tum(mask, maskedoff, base, vl); +vuint16m4_t test_vle16_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_tum(vm, vd, rs1, vl); } -vuint16m8_t test_vle16_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_tum(mask, maskedoff, base, vl); +vuint16m8_t test_vle16_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_tum(vm, vd, rs1, vl); } -vfloat16mf4_t test_vle16_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_tumu(mask, maskedoff, base, vl); +vfloat16mf4_t test_vle16_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_tumu(vm, vd, rs1, vl); } -vfloat16mf2_t test_vle16_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_tumu(mask, maskedoff, base, vl); +vfloat16mf2_t test_vle16_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_tumu(vm, vd, rs1, vl); } -vfloat16m1_t test_vle16_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_tumu(mask, maskedoff, base, vl); +vfloat16m1_t test_vle16_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_tumu(vm, vd, rs1, vl); } -vfloat16m2_t test_vle16_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_tumu(mask, maskedoff, base, vl); +vfloat16m2_t test_vle16_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_tumu(vm, vd, rs1, vl); } -vfloat16m4_t test_vle16_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_tumu(mask, maskedoff, base, vl); +vfloat16m4_t test_vle16_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_tumu(vm, vd, rs1, vl); } -vfloat16m8_t test_vle16_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_tumu(mask, maskedoff, base, vl); +vfloat16m8_t test_vle16_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_tumu(vm, vd, rs1, vl); } -vint16mf4_t test_vle16_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_tumu(mask, maskedoff, base, vl); +vint16mf4_t test_vle16_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_tumu(vm, vd, rs1, vl); } -vint16mf2_t test_vle16_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_tumu(mask, maskedoff, base, vl); +vint16mf2_t test_vle16_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_tumu(vm, vd, rs1, vl); } -vint16m1_t test_vle16_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_tumu(mask, maskedoff, base, vl); +vint16m1_t test_vle16_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_tumu(vm, vd, rs1, vl); } -vint16m2_t test_vle16_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_tumu(mask, maskedoff, base, vl); +vint16m2_t test_vle16_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_tumu(vm, vd, rs1, vl); } -vint16m4_t test_vle16_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_tumu(mask, maskedoff, base, vl); +vint16m4_t test_vle16_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_tumu(vm, vd, rs1, vl); } -vint16m8_t test_vle16_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_tumu(mask, maskedoff, base, vl); +vint16m8_t test_vle16_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_tumu(vm, vd, rs1, vl); } -vuint16mf4_t test_vle16_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_tumu(mask, maskedoff, base, vl); +vuint16mf4_t test_vle16_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_tumu(vm, vd, rs1, vl); } -vuint16mf2_t test_vle16_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_tumu(mask, maskedoff, base, vl); +vuint16mf2_t test_vle16_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_tumu(vm, vd, rs1, vl); } -vuint16m1_t test_vle16_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_tumu(mask, maskedoff, base, vl); +vuint16m1_t test_vle16_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_tumu(vm, vd, rs1, vl); } -vuint16m2_t test_vle16_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_tumu(mask, maskedoff, base, vl); +vuint16m2_t test_vle16_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_tumu(vm, vd, rs1, vl); } -vuint16m4_t test_vle16_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_tumu(mask, maskedoff, base, vl); +vuint16m4_t test_vle16_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_tumu(vm, vd, rs1, vl); } -vuint16m8_t test_vle16_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_tumu(mask, maskedoff, base, vl); +vuint16m8_t test_vle16_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_tumu(vm, vd, rs1, vl); } -vfloat16mf4_t test_vle16_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_mu(mask, maskedoff, base, vl); +vfloat16mf4_t test_vle16_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_mu(vm, vd, rs1, vl); } -vfloat16mf2_t test_vle16_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_mu(mask, maskedoff, base, vl); +vfloat16mf2_t test_vle16_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_mu(vm, vd, rs1, vl); } -vfloat16m1_t test_vle16_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_mu(mask, maskedoff, base, vl); +vfloat16m1_t test_vle16_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_mu(vm, vd, rs1, vl); } -vfloat16m2_t test_vle16_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_mu(mask, maskedoff, base, vl); +vfloat16m2_t test_vle16_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_mu(vm, vd, rs1, vl); } -vfloat16m4_t test_vle16_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_mu(mask, maskedoff, base, vl); +vfloat16m4_t test_vle16_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_mu(vm, vd, rs1, vl); } -vfloat16m8_t test_vle16_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, const float16_t *base, size_t vl) { - return __riscv_vle16_mu(mask, maskedoff, base, vl); +vfloat16m8_t test_vle16_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vle16_mu(vm, vd, rs1, vl); } -vint16mf4_t test_vle16_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_mu(mask, maskedoff, base, vl); +vint16mf4_t test_vle16_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_mu(vm, vd, rs1, vl); } -vint16mf2_t test_vle16_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_mu(mask, maskedoff, base, vl); +vint16mf2_t test_vle16_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_mu(vm, vd, rs1, vl); } -vint16m1_t test_vle16_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_mu(mask, maskedoff, base, vl); +vint16m1_t test_vle16_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_mu(vm, vd, rs1, vl); } -vint16m2_t test_vle16_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_mu(mask, maskedoff, base, vl); +vint16m2_t test_vle16_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_mu(vm, vd, rs1, vl); } -vint16m4_t test_vle16_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_mu(mask, maskedoff, base, vl); +vint16m4_t test_vle16_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_mu(vm, vd, rs1, vl); } -vint16m8_t test_vle16_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, size_t vl) { - return __riscv_vle16_mu(mask, maskedoff, base, vl); +vint16m8_t test_vle16_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vle16_mu(vm, vd, rs1, vl); } -vuint16mf4_t test_vle16_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_mu(mask, maskedoff, base, vl); +vuint16mf4_t test_vle16_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_mu(vm, vd, rs1, vl); } -vuint16mf2_t test_vle16_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_mu(mask, maskedoff, base, vl); +vuint16mf2_t test_vle16_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_mu(vm, vd, rs1, vl); } -vuint16m1_t test_vle16_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_mu(mask, maskedoff, base, vl); +vuint16m1_t test_vle16_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_mu(vm, vd, rs1, vl); } -vuint16m2_t test_vle16_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_mu(mask, maskedoff, base, vl); +vuint16m2_t test_vle16_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_mu(vm, vd, rs1, vl); } -vuint16m4_t test_vle16_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_mu(mask, maskedoff, base, vl); +vuint16m4_t test_vle16_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_mu(vm, vd, rs1, vl); } -vuint16m8_t test_vle16_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, size_t vl) { - return __riscv_vle16_mu(mask, maskedoff, base, vl); +vuint16m8_t test_vle16_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vle16_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vle16\.[ivxfswum.]+\s+} 78 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vle16ff.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vle16ff.c index 302cab593..005a01f2f 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vle16ff.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vle16ff.c @@ -6,292 +6,292 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vle16ff_v_f16mf4_tu(vfloat16mf4_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tu(maskedoff, base, new_vl, vl); +vfloat16mf4_t test_vle16ff_v_f16mf4_tu(vfloat16mf4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tu(vd, rs1, new_vl, vl); } -vfloat16mf2_t test_vle16ff_v_f16mf2_tu(vfloat16mf2_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tu(maskedoff, base, new_vl, vl); +vfloat16mf2_t test_vle16ff_v_f16mf2_tu(vfloat16mf2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tu(vd, rs1, new_vl, vl); } -vfloat16m1_t test_vle16ff_v_f16m1_tu(vfloat16m1_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tu(maskedoff, base, new_vl, vl); +vfloat16m1_t test_vle16ff_v_f16m1_tu(vfloat16m1_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tu(vd, rs1, new_vl, vl); } -vfloat16m2_t test_vle16ff_v_f16m2_tu(vfloat16m2_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tu(maskedoff, base, new_vl, vl); +vfloat16m2_t test_vle16ff_v_f16m2_tu(vfloat16m2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tu(vd, rs1, new_vl, vl); } -vfloat16m4_t test_vle16ff_v_f16m4_tu(vfloat16m4_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tu(maskedoff, base, new_vl, vl); +vfloat16m4_t test_vle16ff_v_f16m4_tu(vfloat16m4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tu(vd, rs1, new_vl, vl); } -vfloat16m8_t test_vle16ff_v_f16m8_tu(vfloat16m8_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tu(maskedoff, base, new_vl, vl); +vfloat16m8_t test_vle16ff_v_f16m8_tu(vfloat16m8_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tu(vd, rs1, new_vl, vl); } -vint16mf4_t test_vle16ff_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tu(maskedoff, base, new_vl, vl); +vint16mf4_t test_vle16ff_v_i16mf4_tu(vint16mf4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tu(vd, rs1, new_vl, vl); } -vint16mf2_t test_vle16ff_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tu(maskedoff, base, new_vl, vl); +vint16mf2_t test_vle16ff_v_i16mf2_tu(vint16mf2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tu(vd, rs1, new_vl, vl); } -vint16m1_t test_vle16ff_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tu(maskedoff, base, new_vl, vl); +vint16m1_t test_vle16ff_v_i16m1_tu(vint16m1_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tu(vd, rs1, new_vl, vl); } -vint16m2_t test_vle16ff_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tu(maskedoff, base, new_vl, vl); +vint16m2_t test_vle16ff_v_i16m2_tu(vint16m2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tu(vd, rs1, new_vl, vl); } -vint16m4_t test_vle16ff_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tu(maskedoff, base, new_vl, vl); +vint16m4_t test_vle16ff_v_i16m4_tu(vint16m4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tu(vd, rs1, new_vl, vl); } -vint16m8_t test_vle16ff_v_i16m8_tu(vint16m8_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tu(maskedoff, base, new_vl, vl); +vint16m8_t test_vle16ff_v_i16m8_tu(vint16m8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tu(vd, rs1, new_vl, vl); } -vuint16mf4_t test_vle16ff_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tu(maskedoff, base, new_vl, vl); +vuint16mf4_t test_vle16ff_v_u16mf4_tu(vuint16mf4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tu(vd, rs1, new_vl, vl); } -vuint16mf2_t test_vle16ff_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tu(maskedoff, base, new_vl, vl); +vuint16mf2_t test_vle16ff_v_u16mf2_tu(vuint16mf2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tu(vd, rs1, new_vl, vl); } -vuint16m1_t test_vle16ff_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tu(maskedoff, base, new_vl, vl); +vuint16m1_t test_vle16ff_v_u16m1_tu(vuint16m1_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tu(vd, rs1, new_vl, vl); } -vuint16m2_t test_vle16ff_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tu(maskedoff, base, new_vl, vl); +vuint16m2_t test_vle16ff_v_u16m2_tu(vuint16m2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tu(vd, rs1, new_vl, vl); } -vuint16m4_t test_vle16ff_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tu(maskedoff, base, new_vl, vl); +vuint16m4_t test_vle16ff_v_u16m4_tu(vuint16m4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tu(vd, rs1, new_vl, vl); } -vuint16m8_t test_vle16ff_v_u16m8_tu(vuint16m8_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tu(maskedoff, base, new_vl, vl); +vuint16m8_t test_vle16ff_v_u16m8_tu(vuint16m8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tu(vd, rs1, new_vl, vl); } -vfloat16mf4_t test_vle16ff_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tum(mask, maskedoff, base, new_vl, vl); +vfloat16mf4_t test_vle16ff_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf2_t test_vle16ff_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tum(mask, maskedoff, base, new_vl, vl); +vfloat16mf2_t test_vle16ff_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat16m1_t test_vle16ff_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tum(mask, maskedoff, base, new_vl, vl); +vfloat16m1_t test_vle16ff_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat16m2_t test_vle16ff_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tum(mask, maskedoff, base, new_vl, vl); +vfloat16m2_t test_vle16ff_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat16m4_t test_vle16ff_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tum(mask, maskedoff, base, new_vl, vl); +vfloat16m4_t test_vle16ff_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat16m8_t test_vle16ff_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tum(mask, maskedoff, base, new_vl, vl); +vfloat16m8_t test_vle16ff_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tum(vm, vd, rs1, new_vl, vl); } -vint16mf4_t test_vle16ff_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tum(mask, maskedoff, base, new_vl, vl); +vint16mf4_t test_vle16ff_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tum(vm, vd, rs1, new_vl, vl); } -vint16mf2_t test_vle16ff_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tum(mask, maskedoff, base, new_vl, vl); +vint16mf2_t test_vle16ff_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tum(vm, vd, rs1, new_vl, vl); } -vint16m1_t test_vle16ff_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tum(mask, maskedoff, base, new_vl, vl); +vint16m1_t test_vle16ff_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tum(vm, vd, rs1, new_vl, vl); } -vint16m2_t test_vle16ff_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tum(mask, maskedoff, base, new_vl, vl); +vint16m2_t test_vle16ff_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tum(vm, vd, rs1, new_vl, vl); } -vint16m4_t test_vle16ff_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tum(mask, maskedoff, base, new_vl, vl); +vint16m4_t test_vle16ff_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tum(vm, vd, rs1, new_vl, vl); } -vint16m8_t test_vle16ff_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tum(mask, maskedoff, base, new_vl, vl); +vint16m8_t test_vle16ff_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf4_t test_vle16ff_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tum(mask, maskedoff, base, new_vl, vl); +vuint16mf4_t test_vle16ff_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf2_t test_vle16ff_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tum(mask, maskedoff, base, new_vl, vl); +vuint16mf2_t test_vle16ff_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tum(vm, vd, rs1, new_vl, vl); } -vuint16m1_t test_vle16ff_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tum(mask, maskedoff, base, new_vl, vl); +vuint16m1_t test_vle16ff_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tum(vm, vd, rs1, new_vl, vl); } -vuint16m2_t test_vle16ff_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tum(mask, maskedoff, base, new_vl, vl); +vuint16m2_t test_vle16ff_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tum(vm, vd, rs1, new_vl, vl); } -vuint16m4_t test_vle16ff_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tum(mask, maskedoff, base, new_vl, vl); +vuint16m4_t test_vle16ff_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tum(vm, vd, rs1, new_vl, vl); } -vuint16m8_t test_vle16ff_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tum(mask, maskedoff, base, new_vl, vl); +vuint16m8_t test_vle16ff_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf4_t test_vle16ff_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tumu(mask, maskedoff, base, new_vl, vl); +vfloat16mf4_t test_vle16ff_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2_t test_vle16ff_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tumu(mask, maskedoff, base, new_vl, vl); +vfloat16mf2_t test_vle16ff_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16m1_t test_vle16ff_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tumu(mask, maskedoff, base, new_vl, vl); +vfloat16m1_t test_vle16ff_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16m2_t test_vle16ff_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tumu(mask, maskedoff, base, new_vl, vl); +vfloat16m2_t test_vle16ff_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16m4_t test_vle16ff_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tumu(mask, maskedoff, base, new_vl, vl); +vfloat16m4_t test_vle16ff_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16m8_t test_vle16ff_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tumu(mask, maskedoff, base, new_vl, vl); +vfloat16m8_t test_vle16ff_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf4_t test_vle16ff_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tumu(mask, maskedoff, base, new_vl, vl); +vint16mf4_t test_vle16ff_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf2_t test_vle16ff_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tumu(mask, maskedoff, base, new_vl, vl); +vint16mf2_t test_vle16ff_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tumu(vm, vd, rs1, new_vl, vl); } -vint16m1_t test_vle16ff_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tumu(mask, maskedoff, base, new_vl, vl); +vint16m1_t test_vle16ff_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tumu(vm, vd, rs1, new_vl, vl); } -vint16m2_t test_vle16ff_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tumu(mask, maskedoff, base, new_vl, vl); +vint16m2_t test_vle16ff_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tumu(vm, vd, rs1, new_vl, vl); } -vint16m4_t test_vle16ff_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tumu(mask, maskedoff, base, new_vl, vl); +vint16m4_t test_vle16ff_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tumu(vm, vd, rs1, new_vl, vl); } -vint16m8_t test_vle16ff_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tumu(mask, maskedoff, base, new_vl, vl); +vint16m8_t test_vle16ff_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf4_t test_vle16ff_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tumu(mask, maskedoff, base, new_vl, vl); +vuint16mf4_t test_vle16ff_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf2_t test_vle16ff_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tumu(mask, maskedoff, base, new_vl, vl); +vuint16mf2_t test_vle16ff_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint16m1_t test_vle16ff_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tumu(mask, maskedoff, base, new_vl, vl); +vuint16m1_t test_vle16ff_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint16m2_t test_vle16ff_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tumu(mask, maskedoff, base, new_vl, vl); +vuint16m2_t test_vle16ff_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint16m4_t test_vle16ff_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tumu(mask, maskedoff, base, new_vl, vl); +vuint16m4_t test_vle16ff_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint16m8_t test_vle16ff_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_tumu(mask, maskedoff, base, new_vl, vl); +vuint16m8_t test_vle16ff_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf4_t test_vle16ff_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_mu(mask, maskedoff, base, new_vl, vl); +vfloat16mf4_t test_vle16ff_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_mu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2_t test_vle16ff_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_mu(mask, maskedoff, base, new_vl, vl); +vfloat16mf2_t test_vle16ff_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_mu(vm, vd, rs1, new_vl, vl); } -vfloat16m1_t test_vle16ff_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_mu(mask, maskedoff, base, new_vl, vl); +vfloat16m1_t test_vle16ff_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_mu(vm, vd, rs1, new_vl, vl); } -vfloat16m2_t test_vle16ff_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_mu(mask, maskedoff, base, new_vl, vl); +vfloat16m2_t test_vle16ff_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_mu(vm, vd, rs1, new_vl, vl); } -vfloat16m4_t test_vle16ff_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_mu(mask, maskedoff, base, new_vl, vl); +vfloat16m4_t test_vle16ff_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_mu(vm, vd, rs1, new_vl, vl); } -vfloat16m8_t test_vle16ff_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_mu(mask, maskedoff, base, new_vl, vl); +vfloat16m8_t test_vle16ff_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_mu(vm, vd, rs1, new_vl, vl); } -vint16mf4_t test_vle16ff_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_mu(mask, maskedoff, base, new_vl, vl); +vint16mf4_t test_vle16ff_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_mu(vm, vd, rs1, new_vl, vl); } -vint16mf2_t test_vle16ff_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_mu(mask, maskedoff, base, new_vl, vl); +vint16mf2_t test_vle16ff_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_mu(vm, vd, rs1, new_vl, vl); } -vint16m1_t test_vle16ff_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_mu(mask, maskedoff, base, new_vl, vl); +vint16m1_t test_vle16ff_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_mu(vm, vd, rs1, new_vl, vl); } -vint16m2_t test_vle16ff_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_mu(mask, maskedoff, base, new_vl, vl); +vint16m2_t test_vle16ff_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_mu(vm, vd, rs1, new_vl, vl); } -vint16m4_t test_vle16ff_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_mu(mask, maskedoff, base, new_vl, vl); +vint16m4_t test_vle16ff_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_mu(vm, vd, rs1, new_vl, vl); } -vint16m8_t test_vle16ff_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_mu(mask, maskedoff, base, new_vl, vl); +vint16m8_t test_vle16ff_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf4_t test_vle16ff_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_mu(mask, maskedoff, base, new_vl, vl); +vuint16mf4_t test_vle16ff_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf2_t test_vle16ff_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_mu(mask, maskedoff, base, new_vl, vl); +vuint16mf2_t test_vle16ff_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_mu(vm, vd, rs1, new_vl, vl); } -vuint16m1_t test_vle16ff_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_mu(mask, maskedoff, base, new_vl, vl); +vuint16m1_t test_vle16ff_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_mu(vm, vd, rs1, new_vl, vl); } -vuint16m2_t test_vle16ff_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_mu(mask, maskedoff, base, new_vl, vl); +vuint16m2_t test_vle16ff_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_mu(vm, vd, rs1, new_vl, vl); } -vuint16m4_t test_vle16ff_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_mu(mask, maskedoff, base, new_vl, vl); +vuint16m4_t test_vle16ff_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_mu(vm, vd, rs1, new_vl, vl); } -vuint16m8_t test_vle16ff_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle16ff_mu(mask, maskedoff, base, new_vl, vl); +vuint16m8_t test_vle16ff_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle16ff_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vle16ff\.[ivxfswum.]+\s+} 72 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vle32.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vle32.c index eca99096c..9660584cc 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vle32.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vle32.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2_t test_vle32_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float32_t *base, size_t vl) { - return __riscv_vle32_tu(maskedoff, base, vl); +vfloat32mf2_t test_vle32_v_f32mf2_tu(vfloat32mf2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vle32_tu(vd, rs1, vl); } -vfloat32m1_t test_vle32_v_f32m1_tu(vfloat32m1_t maskedoff, const float32_t *base, size_t vl) { - return __riscv_vle32_tu(maskedoff, base, vl); +vfloat32m1_t test_vle32_v_f32m1_tu(vfloat32m1_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vle32_tu(vd, rs1, vl); } -vfloat32m2_t test_vle32_v_f32m2_tu(vfloat32m2_t maskedoff, const float32_t *base, size_t vl) { - return __riscv_vle32_tu(maskedoff, base, vl); +vfloat32m2_t test_vle32_v_f32m2_tu(vfloat32m2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vle32_tu(vd, rs1, vl); } -vfloat32m4_t test_vle32_v_f32m4_tu(vfloat32m4_t maskedoff, const float32_t *base, size_t vl) { - return __riscv_vle32_tu(maskedoff, base, vl); +vfloat32m4_t test_vle32_v_f32m4_tu(vfloat32m4_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vle32_tu(vd, rs1, vl); } -vfloat32m8_t test_vle32_v_f32m8_tu(vfloat32m8_t maskedoff, const float32_t *base, size_t vl) { - return __riscv_vle32_tu(maskedoff, base, vl); +vfloat32m8_t test_vle32_v_f32m8_tu(vfloat32m8_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vle32_tu(vd, rs1, vl); } -vint32mf2_t test_vle32_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, size_t vl) { - return __riscv_vle32_tu(maskedoff, base, vl); +vint32mf2_t test_vle32_v_i32mf2_tu(vint32mf2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vle32_tu(vd, rs1, vl); } -vint32m1_t test_vle32_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, size_t vl) { - return __riscv_vle32_tu(maskedoff, base, vl); +vint32m1_t test_vle32_v_i32m1_tu(vint32m1_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vle32_tu(vd, rs1, vl); } -vint32m2_t test_vle32_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, size_t vl) { - return __riscv_vle32_tu(maskedoff, base, vl); +vint32m2_t test_vle32_v_i32m2_tu(vint32m2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vle32_tu(vd, rs1, vl); } -vint32m4_t test_vle32_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, size_t vl) { - return __riscv_vle32_tu(maskedoff, base, vl); +vint32m4_t test_vle32_v_i32m4_tu(vint32m4_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vle32_tu(vd, rs1, vl); } -vint32m8_t test_vle32_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, size_t vl) { - return __riscv_vle32_tu(maskedoff, base, vl); +vint32m8_t test_vle32_v_i32m8_tu(vint32m8_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vle32_tu(vd, rs1, vl); } -vuint32mf2_t test_vle32_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *base, size_t vl) { - return __riscv_vle32_tu(maskedoff, base, vl); +vuint32mf2_t test_vle32_v_u32mf2_tu(vuint32mf2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vle32_tu(vd, rs1, vl); } -vuint32m1_t test_vle32_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, size_t vl) { - return __riscv_vle32_tu(maskedoff, base, vl); +vuint32m1_t test_vle32_v_u32m1_tu(vuint32m1_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vle32_tu(vd, rs1, vl); } -vuint32m2_t test_vle32_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, size_t vl) { - return __riscv_vle32_tu(maskedoff, base, vl); +vuint32m2_t test_vle32_v_u32m2_tu(vuint32m2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vle32_tu(vd, rs1, vl); } -vuint32m4_t test_vle32_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, size_t vl) { - return __riscv_vle32_tu(maskedoff, base, vl); +vuint32m4_t test_vle32_v_u32m4_tu(vuint32m4_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vle32_tu(vd, rs1, vl); } -vuint32m8_t test_vle32_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base, size_t vl) { - return __riscv_vle32_tu(maskedoff, base, vl); +vuint32m8_t test_vle32_v_u32m8_tu(vuint32m8_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vle32_tu(vd, rs1, vl); } -vfloat32mf2_t test_vle32_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, size_t vl) { - return __riscv_vle32_tum(mask, maskedoff, base, vl); +vfloat32mf2_t test_vle32_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vle32_tum(vm, vd, rs1, vl); } -vfloat32m1_t test_vle32_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, size_t vl) { - return __riscv_vle32_tum(mask, maskedoff, base, vl); +vfloat32m1_t test_vle32_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vle32_tum(vm, vd, rs1, vl); } -vfloat32m2_t test_vle32_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, size_t vl) { - return __riscv_vle32_tum(mask, maskedoff, base, vl); +vfloat32m2_t test_vle32_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vle32_tum(vm, vd, rs1, vl); } -vfloat32m4_t test_vle32_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, size_t vl) { - return __riscv_vle32_tum(mask, maskedoff, base, vl); +vfloat32m4_t test_vle32_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vle32_tum(vm, vd, rs1, vl); } -vfloat32m8_t test_vle32_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, size_t vl) { - return __riscv_vle32_tum(mask, maskedoff, base, vl); +vfloat32m8_t test_vle32_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vle32_tum(vm, vd, rs1, vl); } -vint32mf2_t test_vle32_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t vl) { - return __riscv_vle32_tum(mask, maskedoff, base, vl); +vint32mf2_t test_vle32_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vle32_tum(vm, vd, rs1, vl); } -vint32m1_t test_vle32_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t vl) { - return __riscv_vle32_tum(mask, maskedoff, base, vl); +vint32m1_t test_vle32_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vle32_tum(vm, vd, rs1, vl); } -vint32m2_t test_vle32_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t vl) { - return __riscv_vle32_tum(mask, maskedoff, base, vl); +vint32m2_t test_vle32_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vle32_tum(vm, vd, rs1, vl); } -vint32m4_t test_vle32_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t vl) { - return __riscv_vle32_tum(mask, maskedoff, base, vl); +vint32m4_t test_vle32_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vle32_tum(vm, vd, rs1, vl); } -vint32m8_t test_vle32_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t vl) { - return __riscv_vle32_tum(mask, maskedoff, base, vl); +vint32m8_t test_vle32_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vle32_tum(vm, vd, rs1, vl); } -vuint32mf2_t test_vle32_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t vl) { - return __riscv_vle32_tum(mask, maskedoff, base, vl); +vuint32mf2_t test_vle32_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vle32_tum(vm, vd, rs1, vl); } -vuint32m1_t test_vle32_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t vl) { - return __riscv_vle32_tum(mask, maskedoff, base, vl); +vuint32m1_t test_vle32_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vle32_tum(vm, vd, rs1, vl); } -vuint32m2_t test_vle32_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t vl) { - return __riscv_vle32_tum(mask, maskedoff, base, vl); +vuint32m2_t test_vle32_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vle32_tum(vm, vd, rs1, vl); } -vuint32m4_t test_vle32_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t vl) { - return __riscv_vle32_tum(mask, maskedoff, base, vl); +vuint32m4_t test_vle32_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vle32_tum(vm, vd, rs1, vl); } -vuint32m8_t test_vle32_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t vl) { - return __riscv_vle32_tum(mask, maskedoff, base, vl); +vuint32m8_t test_vle32_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vle32_tum(vm, vd, rs1, vl); } -vfloat32mf2_t test_vle32_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, size_t vl) { - return __riscv_vle32_tumu(mask, maskedoff, base, vl); +vfloat32mf2_t test_vle32_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vle32_tumu(vm, vd, rs1, vl); } -vfloat32m1_t test_vle32_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, size_t vl) { - return __riscv_vle32_tumu(mask, maskedoff, base, vl); +vfloat32m1_t test_vle32_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vle32_tumu(vm, vd, rs1, vl); } -vfloat32m2_t test_vle32_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, size_t vl) { - return __riscv_vle32_tumu(mask, maskedoff, base, vl); +vfloat32m2_t test_vle32_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vle32_tumu(vm, vd, rs1, vl); } -vfloat32m4_t test_vle32_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, size_t vl) { - return __riscv_vle32_tumu(mask, maskedoff, base, vl); +vfloat32m4_t test_vle32_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vle32_tumu(vm, vd, rs1, vl); } -vfloat32m8_t test_vle32_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, size_t vl) { - return __riscv_vle32_tumu(mask, maskedoff, base, vl); +vfloat32m8_t test_vle32_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vle32_tumu(vm, vd, rs1, vl); } -vint32mf2_t test_vle32_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t vl) { - return __riscv_vle32_tumu(mask, maskedoff, base, vl); +vint32mf2_t test_vle32_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vle32_tumu(vm, vd, rs1, vl); } -vint32m1_t test_vle32_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t vl) { - return __riscv_vle32_tumu(mask, maskedoff, base, vl); +vint32m1_t test_vle32_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vle32_tumu(vm, vd, rs1, vl); } -vint32m2_t test_vle32_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t vl) { - return __riscv_vle32_tumu(mask, maskedoff, base, vl); +vint32m2_t test_vle32_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vle32_tumu(vm, vd, rs1, vl); } -vint32m4_t test_vle32_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t vl) { - return __riscv_vle32_tumu(mask, maskedoff, base, vl); +vint32m4_t test_vle32_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vle32_tumu(vm, vd, rs1, vl); } -vint32m8_t test_vle32_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t vl) { - return __riscv_vle32_tumu(mask, maskedoff, base, vl); +vint32m8_t test_vle32_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vle32_tumu(vm, vd, rs1, vl); } -vuint32mf2_t test_vle32_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t vl) { - return __riscv_vle32_tumu(mask, maskedoff, base, vl); +vuint32mf2_t test_vle32_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vle32_tumu(vm, vd, rs1, vl); } -vuint32m1_t test_vle32_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t vl) { - return __riscv_vle32_tumu(mask, maskedoff, base, vl); +vuint32m1_t test_vle32_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vle32_tumu(vm, vd, rs1, vl); } -vuint32m2_t test_vle32_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t vl) { - return __riscv_vle32_tumu(mask, maskedoff, base, vl); +vuint32m2_t test_vle32_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vle32_tumu(vm, vd, rs1, vl); } -vuint32m4_t test_vle32_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t vl) { - return __riscv_vle32_tumu(mask, maskedoff, base, vl); +vuint32m4_t test_vle32_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vle32_tumu(vm, vd, rs1, vl); } -vuint32m8_t test_vle32_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t vl) { - return __riscv_vle32_tumu(mask, maskedoff, base, vl); +vuint32m8_t test_vle32_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vle32_tumu(vm, vd, rs1, vl); } -vfloat32mf2_t test_vle32_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, size_t vl) { - return __riscv_vle32_mu(mask, maskedoff, base, vl); +vfloat32mf2_t test_vle32_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vle32_mu(vm, vd, rs1, vl); } -vfloat32m1_t test_vle32_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, size_t vl) { - return __riscv_vle32_mu(mask, maskedoff, base, vl); +vfloat32m1_t test_vle32_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vle32_mu(vm, vd, rs1, vl); } -vfloat32m2_t test_vle32_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, size_t vl) { - return __riscv_vle32_mu(mask, maskedoff, base, vl); +vfloat32m2_t test_vle32_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vle32_mu(vm, vd, rs1, vl); } -vfloat32m4_t test_vle32_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, size_t vl) { - return __riscv_vle32_mu(mask, maskedoff, base, vl); +vfloat32m4_t test_vle32_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vle32_mu(vm, vd, rs1, vl); } -vfloat32m8_t test_vle32_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, size_t vl) { - return __riscv_vle32_mu(mask, maskedoff, base, vl); +vfloat32m8_t test_vle32_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vle32_mu(vm, vd, rs1, vl); } -vint32mf2_t test_vle32_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t vl) { - return __riscv_vle32_mu(mask, maskedoff, base, vl); +vint32mf2_t test_vle32_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vle32_mu(vm, vd, rs1, vl); } -vint32m1_t test_vle32_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t vl) { - return __riscv_vle32_mu(mask, maskedoff, base, vl); +vint32m1_t test_vle32_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vle32_mu(vm, vd, rs1, vl); } -vint32m2_t test_vle32_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t vl) { - return __riscv_vle32_mu(mask, maskedoff, base, vl); +vint32m2_t test_vle32_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vle32_mu(vm, vd, rs1, vl); } -vint32m4_t test_vle32_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t vl) { - return __riscv_vle32_mu(mask, maskedoff, base, vl); +vint32m4_t test_vle32_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vle32_mu(vm, vd, rs1, vl); } -vint32m8_t test_vle32_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t vl) { - return __riscv_vle32_mu(mask, maskedoff, base, vl); +vint32m8_t test_vle32_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vle32_mu(vm, vd, rs1, vl); } -vuint32mf2_t test_vle32_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t vl) { - return __riscv_vle32_mu(mask, maskedoff, base, vl); +vuint32mf2_t test_vle32_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vle32_mu(vm, vd, rs1, vl); } -vuint32m1_t test_vle32_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t vl) { - return __riscv_vle32_mu(mask, maskedoff, base, vl); +vuint32m1_t test_vle32_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vle32_mu(vm, vd, rs1, vl); } -vuint32m2_t test_vle32_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t vl) { - return __riscv_vle32_mu(mask, maskedoff, base, vl); +vuint32m2_t test_vle32_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vle32_mu(vm, vd, rs1, vl); } -vuint32m4_t test_vle32_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t vl) { - return __riscv_vle32_mu(mask, maskedoff, base, vl); +vuint32m4_t test_vle32_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vle32_mu(vm, vd, rs1, vl); } -vuint32m8_t test_vle32_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t vl) { - return __riscv_vle32_mu(mask, maskedoff, base, vl); +vuint32m8_t test_vle32_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vle32_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vle32\.[ivxfswum.]+\s+} 63 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vle32ff.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vle32ff.c index 5748695dd..c36831fdc 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vle32ff.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vle32ff.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2_t test_vle32ff_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_tu(maskedoff, base, new_vl, vl); +vfloat32mf2_t test_vle32ff_v_f32mf2_tu(vfloat32mf2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_tu(vd, rs1, new_vl, vl); } -vfloat32m1_t test_vle32ff_v_f32m1_tu(vfloat32m1_t maskedoff, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_tu(maskedoff, base, new_vl, vl); +vfloat32m1_t test_vle32ff_v_f32m1_tu(vfloat32m1_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_tu(vd, rs1, new_vl, vl); } -vfloat32m2_t test_vle32ff_v_f32m2_tu(vfloat32m2_t maskedoff, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_tu(maskedoff, base, new_vl, vl); +vfloat32m2_t test_vle32ff_v_f32m2_tu(vfloat32m2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_tu(vd, rs1, new_vl, vl); } -vfloat32m4_t test_vle32ff_v_f32m4_tu(vfloat32m4_t maskedoff, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_tu(maskedoff, base, new_vl, vl); +vfloat32m4_t test_vle32ff_v_f32m4_tu(vfloat32m4_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_tu(vd, rs1, new_vl, vl); } -vfloat32m8_t test_vle32ff_v_f32m8_tu(vfloat32m8_t maskedoff, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_tu(maskedoff, base, new_vl, vl); +vfloat32m8_t test_vle32ff_v_f32m8_tu(vfloat32m8_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_tu(vd, rs1, new_vl, vl); } -vint32mf2_t test_vle32ff_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_tu(maskedoff, base, new_vl, vl); +vint32mf2_t test_vle32ff_v_i32mf2_tu(vint32mf2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_tu(vd, rs1, new_vl, vl); } -vint32m1_t test_vle32ff_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_tu(maskedoff, base, new_vl, vl); +vint32m1_t test_vle32ff_v_i32m1_tu(vint32m1_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_tu(vd, rs1, new_vl, vl); } -vint32m2_t test_vle32ff_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_tu(maskedoff, base, new_vl, vl); +vint32m2_t test_vle32ff_v_i32m2_tu(vint32m2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_tu(vd, rs1, new_vl, vl); } -vint32m4_t test_vle32ff_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_tu(maskedoff, base, new_vl, vl); +vint32m4_t test_vle32ff_v_i32m4_tu(vint32m4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_tu(vd, rs1, new_vl, vl); } -vint32m8_t test_vle32ff_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_tu(maskedoff, base, new_vl, vl); +vint32m8_t test_vle32ff_v_i32m8_tu(vint32m8_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_tu(vd, rs1, new_vl, vl); } -vuint32mf2_t test_vle32ff_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_tu(maskedoff, base, new_vl, vl); +vuint32mf2_t test_vle32ff_v_u32mf2_tu(vuint32mf2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_tu(vd, rs1, new_vl, vl); } -vuint32m1_t test_vle32ff_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_tu(maskedoff, base, new_vl, vl); +vuint32m1_t test_vle32ff_v_u32m1_tu(vuint32m1_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_tu(vd, rs1, new_vl, vl); } -vuint32m2_t test_vle32ff_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_tu(maskedoff, base, new_vl, vl); +vuint32m2_t test_vle32ff_v_u32m2_tu(vuint32m2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_tu(vd, rs1, new_vl, vl); } -vuint32m4_t test_vle32ff_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_tu(maskedoff, base, new_vl, vl); +vuint32m4_t test_vle32ff_v_u32m4_tu(vuint32m4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_tu(vd, rs1, new_vl, vl); } -vuint32m8_t test_vle32ff_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_tu(maskedoff, base, new_vl, vl); +vuint32m8_t test_vle32ff_v_u32m8_tu(vuint32m8_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_tu(vd, rs1, new_vl, vl); } -vfloat32mf2_t test_vle32ff_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_tum(mask, maskedoff, base, new_vl, vl); +vfloat32mf2_t test_vle32ff_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat32m1_t test_vle32ff_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_tum(mask, maskedoff, base, new_vl, vl); +vfloat32m1_t test_vle32ff_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat32m2_t test_vle32ff_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_tum(mask, maskedoff, base, new_vl, vl); +vfloat32m2_t test_vle32ff_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat32m4_t test_vle32ff_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_tum(mask, maskedoff, base, new_vl, vl); +vfloat32m4_t test_vle32ff_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat32m8_t test_vle32ff_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_tum(mask, maskedoff, base, new_vl, vl); +vfloat32m8_t test_vle32ff_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_tum(vm, vd, rs1, new_vl, vl); } -vint32mf2_t test_vle32ff_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_tum(mask, maskedoff, base, new_vl, vl); +vint32mf2_t test_vle32ff_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_tum(vm, vd, rs1, new_vl, vl); } -vint32m1_t test_vle32ff_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_tum(mask, maskedoff, base, new_vl, vl); +vint32m1_t test_vle32ff_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_tum(vm, vd, rs1, new_vl, vl); } -vint32m2_t test_vle32ff_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_tum(mask, maskedoff, base, new_vl, vl); +vint32m2_t test_vle32ff_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_tum(vm, vd, rs1, new_vl, vl); } -vint32m4_t test_vle32ff_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_tum(mask, maskedoff, base, new_vl, vl); +vint32m4_t test_vle32ff_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_tum(vm, vd, rs1, new_vl, vl); } -vint32m8_t test_vle32ff_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_tum(mask, maskedoff, base, new_vl, vl); +vint32m8_t test_vle32ff_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_tum(vm, vd, rs1, new_vl, vl); } -vuint32mf2_t test_vle32ff_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_tum(mask, maskedoff, base, new_vl, vl); +vuint32mf2_t test_vle32ff_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_tum(vm, vd, rs1, new_vl, vl); } -vuint32m1_t test_vle32ff_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_tum(mask, maskedoff, base, new_vl, vl); +vuint32m1_t test_vle32ff_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_tum(vm, vd, rs1, new_vl, vl); } -vuint32m2_t test_vle32ff_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_tum(mask, maskedoff, base, new_vl, vl); +vuint32m2_t test_vle32ff_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_tum(vm, vd, rs1, new_vl, vl); } -vuint32m4_t test_vle32ff_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_tum(mask, maskedoff, base, new_vl, vl); +vuint32m4_t test_vle32ff_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_tum(vm, vd, rs1, new_vl, vl); } -vuint32m8_t test_vle32ff_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_tum(mask, maskedoff, base, new_vl, vl); +vuint32m8_t test_vle32ff_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat32mf2_t test_vle32ff_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_tumu(mask, maskedoff, base, new_vl, vl); +vfloat32mf2_t test_vle32ff_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32m1_t test_vle32ff_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_tumu(mask, maskedoff, base, new_vl, vl); +vfloat32m1_t test_vle32ff_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32m2_t test_vle32ff_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_tumu(mask, maskedoff, base, new_vl, vl); +vfloat32m2_t test_vle32ff_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32m4_t test_vle32ff_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_tumu(mask, maskedoff, base, new_vl, vl); +vfloat32m4_t test_vle32ff_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32m8_t test_vle32ff_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_tumu(mask, maskedoff, base, new_vl, vl); +vfloat32m8_t test_vle32ff_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_tumu(vm, vd, rs1, new_vl, vl); } -vint32mf2_t test_vle32ff_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_tumu(mask, maskedoff, base, new_vl, vl); +vint32mf2_t test_vle32ff_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_tumu(vm, vd, rs1, new_vl, vl); } -vint32m1_t test_vle32ff_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_tumu(mask, maskedoff, base, new_vl, vl); +vint32m1_t test_vle32ff_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_tumu(vm, vd, rs1, new_vl, vl); } -vint32m2_t test_vle32ff_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_tumu(mask, maskedoff, base, new_vl, vl); +vint32m2_t test_vle32ff_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_tumu(vm, vd, rs1, new_vl, vl); } -vint32m4_t test_vle32ff_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_tumu(mask, maskedoff, base, new_vl, vl); +vint32m4_t test_vle32ff_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_tumu(vm, vd, rs1, new_vl, vl); } -vint32m8_t test_vle32ff_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_tumu(mask, maskedoff, base, new_vl, vl); +vint32m8_t test_vle32ff_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint32mf2_t test_vle32ff_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_tumu(mask, maskedoff, base, new_vl, vl); +vuint32mf2_t test_vle32ff_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint32m1_t test_vle32ff_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_tumu(mask, maskedoff, base, new_vl, vl); +vuint32m1_t test_vle32ff_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint32m2_t test_vle32ff_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_tumu(mask, maskedoff, base, new_vl, vl); +vuint32m2_t test_vle32ff_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint32m4_t test_vle32ff_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_tumu(mask, maskedoff, base, new_vl, vl); +vuint32m4_t test_vle32ff_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint32m8_t test_vle32ff_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_tumu(mask, maskedoff, base, new_vl, vl); +vuint32m8_t test_vle32ff_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32mf2_t test_vle32ff_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_mu(mask, maskedoff, base, new_vl, vl); +vfloat32mf2_t test_vle32ff_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_mu(vm, vd, rs1, new_vl, vl); } -vfloat32m1_t test_vle32ff_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_mu(mask, maskedoff, base, new_vl, vl); +vfloat32m1_t test_vle32ff_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_mu(vm, vd, rs1, new_vl, vl); } -vfloat32m2_t test_vle32ff_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_mu(mask, maskedoff, base, new_vl, vl); +vfloat32m2_t test_vle32ff_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_mu(vm, vd, rs1, new_vl, vl); } -vfloat32m4_t test_vle32ff_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_mu(mask, maskedoff, base, new_vl, vl); +vfloat32m4_t test_vle32ff_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_mu(vm, vd, rs1, new_vl, vl); } -vfloat32m8_t test_vle32ff_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_mu(mask, maskedoff, base, new_vl, vl); +vfloat32m8_t test_vle32ff_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_mu(vm, vd, rs1, new_vl, vl); } -vint32mf2_t test_vle32ff_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_mu(mask, maskedoff, base, new_vl, vl); +vint32mf2_t test_vle32ff_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_mu(vm, vd, rs1, new_vl, vl); } -vint32m1_t test_vle32ff_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_mu(mask, maskedoff, base, new_vl, vl); +vint32m1_t test_vle32ff_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_mu(vm, vd, rs1, new_vl, vl); } -vint32m2_t test_vle32ff_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_mu(mask, maskedoff, base, new_vl, vl); +vint32m2_t test_vle32ff_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_mu(vm, vd, rs1, new_vl, vl); } -vint32m4_t test_vle32ff_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_mu(mask, maskedoff, base, new_vl, vl); +vint32m4_t test_vle32ff_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_mu(vm, vd, rs1, new_vl, vl); } -vint32m8_t test_vle32ff_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_mu(mask, maskedoff, base, new_vl, vl); +vint32m8_t test_vle32ff_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_mu(vm, vd, rs1, new_vl, vl); } -vuint32mf2_t test_vle32ff_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_mu(mask, maskedoff, base, new_vl, vl); +vuint32mf2_t test_vle32ff_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_mu(vm, vd, rs1, new_vl, vl); } -vuint32m1_t test_vle32ff_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_mu(mask, maskedoff, base, new_vl, vl); +vuint32m1_t test_vle32ff_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_mu(vm, vd, rs1, new_vl, vl); } -vuint32m2_t test_vle32ff_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_mu(mask, maskedoff, base, new_vl, vl); +vuint32m2_t test_vle32ff_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_mu(vm, vd, rs1, new_vl, vl); } -vuint32m4_t test_vle32ff_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_mu(mask, maskedoff, base, new_vl, vl); +vuint32m4_t test_vle32ff_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_mu(vm, vd, rs1, new_vl, vl); } -vuint32m8_t test_vle32ff_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle32ff_mu(mask, maskedoff, base, new_vl, vl); +vuint32m8_t test_vle32ff_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle32ff_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vle32ff\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vle64.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vle64.c index a71eca567..b2702d6cb 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vle64.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vle64.c @@ -6,196 +6,196 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1_t test_vle64_v_f64m1_tu(vfloat64m1_t maskedoff, const float64_t *base, size_t vl) { - return __riscv_vle64_tu(maskedoff, base, vl); +vfloat64m1_t test_vle64_v_f64m1_tu(vfloat64m1_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vle64_tu(vd, rs1, vl); } -vfloat64m2_t test_vle64_v_f64m2_tu(vfloat64m2_t maskedoff, const float64_t *base, size_t vl) { - return __riscv_vle64_tu(maskedoff, base, vl); +vfloat64m2_t test_vle64_v_f64m2_tu(vfloat64m2_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vle64_tu(vd, rs1, vl); } -vfloat64m4_t test_vle64_v_f64m4_tu(vfloat64m4_t maskedoff, const float64_t *base, size_t vl) { - return __riscv_vle64_tu(maskedoff, base, vl); +vfloat64m4_t test_vle64_v_f64m4_tu(vfloat64m4_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vle64_tu(vd, rs1, vl); } -vfloat64m8_t test_vle64_v_f64m8_tu(vfloat64m8_t maskedoff, const float64_t *base, size_t vl) { - return __riscv_vle64_tu(maskedoff, base, vl); +vfloat64m8_t test_vle64_v_f64m8_tu(vfloat64m8_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vle64_tu(vd, rs1, vl); } -vint64m1_t test_vle64_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, size_t vl) { - return __riscv_vle64_tu(maskedoff, base, vl); +vint64m1_t test_vle64_v_i64m1_tu(vint64m1_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vle64_tu(vd, rs1, vl); } -vint64m2_t test_vle64_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, size_t vl) { - return __riscv_vle64_tu(maskedoff, base, vl); +vint64m2_t test_vle64_v_i64m2_tu(vint64m2_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vle64_tu(vd, rs1, vl); } -vint64m4_t test_vle64_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, size_t vl) { - return __riscv_vle64_tu(maskedoff, base, vl); +vint64m4_t test_vle64_v_i64m4_tu(vint64m4_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vle64_tu(vd, rs1, vl); } -vint64m8_t test_vle64_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, size_t vl) { - return __riscv_vle64_tu(maskedoff, base, vl); +vint64m8_t test_vle64_v_i64m8_tu(vint64m8_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vle64_tu(vd, rs1, vl); } -vuint64m1_t test_vle64_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, size_t vl) { - return __riscv_vle64_tu(maskedoff, base, vl); +vuint64m1_t test_vle64_v_u64m1_tu(vuint64m1_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vle64_tu(vd, rs1, vl); } -vuint64m2_t test_vle64_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, size_t vl) { - return __riscv_vle64_tu(maskedoff, base, vl); +vuint64m2_t test_vle64_v_u64m2_tu(vuint64m2_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vle64_tu(vd, rs1, vl); } -vuint64m4_t test_vle64_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, size_t vl) { - return __riscv_vle64_tu(maskedoff, base, vl); +vuint64m4_t test_vle64_v_u64m4_tu(vuint64m4_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vle64_tu(vd, rs1, vl); } -vuint64m8_t test_vle64_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, size_t vl) { - return __riscv_vle64_tu(maskedoff, base, vl); +vuint64m8_t test_vle64_v_u64m8_tu(vuint64m8_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vle64_tu(vd, rs1, vl); } -vfloat64m1_t test_vle64_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, size_t vl) { - return __riscv_vle64_tum(mask, maskedoff, base, vl); +vfloat64m1_t test_vle64_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vle64_tum(vm, vd, rs1, vl); } -vfloat64m2_t test_vle64_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, size_t vl) { - return __riscv_vle64_tum(mask, maskedoff, base, vl); +vfloat64m2_t test_vle64_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vle64_tum(vm, vd, rs1, vl); } -vfloat64m4_t test_vle64_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, size_t vl) { - return __riscv_vle64_tum(mask, maskedoff, base, vl); +vfloat64m4_t test_vle64_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vle64_tum(vm, vd, rs1, vl); } -vfloat64m8_t test_vle64_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, size_t vl) { - return __riscv_vle64_tum(mask, maskedoff, base, vl); +vfloat64m8_t test_vle64_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vle64_tum(vm, vd, rs1, vl); } -vint64m1_t test_vle64_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, size_t vl) { - return __riscv_vle64_tum(mask, maskedoff, base, vl); +vint64m1_t test_vle64_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vle64_tum(vm, vd, rs1, vl); } -vint64m2_t test_vle64_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, size_t vl) { - return __riscv_vle64_tum(mask, maskedoff, base, vl); +vint64m2_t test_vle64_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vle64_tum(vm, vd, rs1, vl); } -vint64m4_t test_vle64_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, size_t vl) { - return __riscv_vle64_tum(mask, maskedoff, base, vl); +vint64m4_t test_vle64_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vle64_tum(vm, vd, rs1, vl); } -vint64m8_t test_vle64_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, size_t vl) { - return __riscv_vle64_tum(mask, maskedoff, base, vl); +vint64m8_t test_vle64_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vle64_tum(vm, vd, rs1, vl); } -vuint64m1_t test_vle64_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, size_t vl) { - return __riscv_vle64_tum(mask, maskedoff, base, vl); +vuint64m1_t test_vle64_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vle64_tum(vm, vd, rs1, vl); } -vuint64m2_t test_vle64_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, size_t vl) { - return __riscv_vle64_tum(mask, maskedoff, base, vl); +vuint64m2_t test_vle64_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vle64_tum(vm, vd, rs1, vl); } -vuint64m4_t test_vle64_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, size_t vl) { - return __riscv_vle64_tum(mask, maskedoff, base, vl); +vuint64m4_t test_vle64_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vle64_tum(vm, vd, rs1, vl); } -vuint64m8_t test_vle64_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, size_t vl) { - return __riscv_vle64_tum(mask, maskedoff, base, vl); +vuint64m8_t test_vle64_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vle64_tum(vm, vd, rs1, vl); } -vfloat64m1_t test_vle64_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, size_t vl) { - return __riscv_vle64_tumu(mask, maskedoff, base, vl); +vfloat64m1_t test_vle64_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vle64_tumu(vm, vd, rs1, vl); } -vfloat64m2_t test_vle64_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, size_t vl) { - return __riscv_vle64_tumu(mask, maskedoff, base, vl); +vfloat64m2_t test_vle64_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vle64_tumu(vm, vd, rs1, vl); } -vfloat64m4_t test_vle64_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, size_t vl) { - return __riscv_vle64_tumu(mask, maskedoff, base, vl); +vfloat64m4_t test_vle64_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vle64_tumu(vm, vd, rs1, vl); } -vfloat64m8_t test_vle64_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, size_t vl) { - return __riscv_vle64_tumu(mask, maskedoff, base, vl); +vfloat64m8_t test_vle64_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vle64_tumu(vm, vd, rs1, vl); } -vint64m1_t test_vle64_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, size_t vl) { - return __riscv_vle64_tumu(mask, maskedoff, base, vl); +vint64m1_t test_vle64_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vle64_tumu(vm, vd, rs1, vl); } -vint64m2_t test_vle64_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, size_t vl) { - return __riscv_vle64_tumu(mask, maskedoff, base, vl); +vint64m2_t test_vle64_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vle64_tumu(vm, vd, rs1, vl); } -vint64m4_t test_vle64_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, size_t vl) { - return __riscv_vle64_tumu(mask, maskedoff, base, vl); +vint64m4_t test_vle64_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vle64_tumu(vm, vd, rs1, vl); } -vint64m8_t test_vle64_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, size_t vl) { - return __riscv_vle64_tumu(mask, maskedoff, base, vl); +vint64m8_t test_vle64_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vle64_tumu(vm, vd, rs1, vl); } -vuint64m1_t test_vle64_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, size_t vl) { - return __riscv_vle64_tumu(mask, maskedoff, base, vl); +vuint64m1_t test_vle64_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vle64_tumu(vm, vd, rs1, vl); } -vuint64m2_t test_vle64_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, size_t vl) { - return __riscv_vle64_tumu(mask, maskedoff, base, vl); +vuint64m2_t test_vle64_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vle64_tumu(vm, vd, rs1, vl); } -vuint64m4_t test_vle64_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, size_t vl) { - return __riscv_vle64_tumu(mask, maskedoff, base, vl); +vuint64m4_t test_vle64_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vle64_tumu(vm, vd, rs1, vl); } -vuint64m8_t test_vle64_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, size_t vl) { - return __riscv_vle64_tumu(mask, maskedoff, base, vl); +vuint64m8_t test_vle64_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vle64_tumu(vm, vd, rs1, vl); } -vfloat64m1_t test_vle64_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, size_t vl) { - return __riscv_vle64_mu(mask, maskedoff, base, vl); +vfloat64m1_t test_vle64_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vle64_mu(vm, vd, rs1, vl); } -vfloat64m2_t test_vle64_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, size_t vl) { - return __riscv_vle64_mu(mask, maskedoff, base, vl); +vfloat64m2_t test_vle64_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vle64_mu(vm, vd, rs1, vl); } -vfloat64m4_t test_vle64_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, size_t vl) { - return __riscv_vle64_mu(mask, maskedoff, base, vl); +vfloat64m4_t test_vle64_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vle64_mu(vm, vd, rs1, vl); } -vfloat64m8_t test_vle64_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, size_t vl) { - return __riscv_vle64_mu(mask, maskedoff, base, vl); +vfloat64m8_t test_vle64_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vle64_mu(vm, vd, rs1, vl); } -vint64m1_t test_vle64_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, size_t vl) { - return __riscv_vle64_mu(mask, maskedoff, base, vl); +vint64m1_t test_vle64_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vle64_mu(vm, vd, rs1, vl); } -vint64m2_t test_vle64_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, size_t vl) { - return __riscv_vle64_mu(mask, maskedoff, base, vl); +vint64m2_t test_vle64_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vle64_mu(vm, vd, rs1, vl); } -vint64m4_t test_vle64_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, size_t vl) { - return __riscv_vle64_mu(mask, maskedoff, base, vl); +vint64m4_t test_vle64_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vle64_mu(vm, vd, rs1, vl); } -vint64m8_t test_vle64_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, size_t vl) { - return __riscv_vle64_mu(mask, maskedoff, base, vl); +vint64m8_t test_vle64_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vle64_mu(vm, vd, rs1, vl); } -vuint64m1_t test_vle64_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, size_t vl) { - return __riscv_vle64_mu(mask, maskedoff, base, vl); +vuint64m1_t test_vle64_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vle64_mu(vm, vd, rs1, vl); } -vuint64m2_t test_vle64_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, size_t vl) { - return __riscv_vle64_mu(mask, maskedoff, base, vl); +vuint64m2_t test_vle64_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vle64_mu(vm, vd, rs1, vl); } -vuint64m4_t test_vle64_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, size_t vl) { - return __riscv_vle64_mu(mask, maskedoff, base, vl); +vuint64m4_t test_vle64_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vle64_mu(vm, vd, rs1, vl); } -vuint64m8_t test_vle64_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, size_t vl) { - return __riscv_vle64_mu(mask, maskedoff, base, vl); +vuint64m8_t test_vle64_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vle64_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vle64\.[ivxfswum.]+\s+} 48 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vle64ff.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vle64ff.c index 8b15ebc2c..916c5f4ce 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vle64ff.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vle64ff.c @@ -6,196 +6,196 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1_t test_vle64ff_v_f64m1_tu(vfloat64m1_t maskedoff, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_tu(maskedoff, base, new_vl, vl); +vfloat64m1_t test_vle64ff_v_f64m1_tu(vfloat64m1_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_tu(vd, rs1, new_vl, vl); } -vfloat64m2_t test_vle64ff_v_f64m2_tu(vfloat64m2_t maskedoff, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_tu(maskedoff, base, new_vl, vl); +vfloat64m2_t test_vle64ff_v_f64m2_tu(vfloat64m2_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_tu(vd, rs1, new_vl, vl); } -vfloat64m4_t test_vle64ff_v_f64m4_tu(vfloat64m4_t maskedoff, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_tu(maskedoff, base, new_vl, vl); +vfloat64m4_t test_vle64ff_v_f64m4_tu(vfloat64m4_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_tu(vd, rs1, new_vl, vl); } -vfloat64m8_t test_vle64ff_v_f64m8_tu(vfloat64m8_t maskedoff, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_tu(maskedoff, base, new_vl, vl); +vfloat64m8_t test_vle64ff_v_f64m8_tu(vfloat64m8_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_tu(vd, rs1, new_vl, vl); } -vint64m1_t test_vle64ff_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_tu(maskedoff, base, new_vl, vl); +vint64m1_t test_vle64ff_v_i64m1_tu(vint64m1_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_tu(vd, rs1, new_vl, vl); } -vint64m2_t test_vle64ff_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_tu(maskedoff, base, new_vl, vl); +vint64m2_t test_vle64ff_v_i64m2_tu(vint64m2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_tu(vd, rs1, new_vl, vl); } -vint64m4_t test_vle64ff_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_tu(maskedoff, base, new_vl, vl); +vint64m4_t test_vle64ff_v_i64m4_tu(vint64m4_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_tu(vd, rs1, new_vl, vl); } -vint64m8_t test_vle64ff_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_tu(maskedoff, base, new_vl, vl); +vint64m8_t test_vle64ff_v_i64m8_tu(vint64m8_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_tu(vd, rs1, new_vl, vl); } -vuint64m1_t test_vle64ff_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_tu(maskedoff, base, new_vl, vl); +vuint64m1_t test_vle64ff_v_u64m1_tu(vuint64m1_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_tu(vd, rs1, new_vl, vl); } -vuint64m2_t test_vle64ff_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_tu(maskedoff, base, new_vl, vl); +vuint64m2_t test_vle64ff_v_u64m2_tu(vuint64m2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_tu(vd, rs1, new_vl, vl); } -vuint64m4_t test_vle64ff_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_tu(maskedoff, base, new_vl, vl); +vuint64m4_t test_vle64ff_v_u64m4_tu(vuint64m4_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_tu(vd, rs1, new_vl, vl); } -vuint64m8_t test_vle64ff_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_tu(maskedoff, base, new_vl, vl); +vuint64m8_t test_vle64ff_v_u64m8_tu(vuint64m8_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_tu(vd, rs1, new_vl, vl); } -vfloat64m1_t test_vle64ff_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_tum(mask, maskedoff, base, new_vl, vl); +vfloat64m1_t test_vle64ff_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat64m2_t test_vle64ff_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_tum(mask, maskedoff, base, new_vl, vl); +vfloat64m2_t test_vle64ff_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat64m4_t test_vle64ff_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_tum(mask, maskedoff, base, new_vl, vl); +vfloat64m4_t test_vle64ff_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat64m8_t test_vle64ff_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_tum(mask, maskedoff, base, new_vl, vl); +vfloat64m8_t test_vle64ff_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_tum(vm, vd, rs1, new_vl, vl); } -vint64m1_t test_vle64ff_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_tum(mask, maskedoff, base, new_vl, vl); +vint64m1_t test_vle64ff_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_tum(vm, vd, rs1, new_vl, vl); } -vint64m2_t test_vle64ff_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_tum(mask, maskedoff, base, new_vl, vl); +vint64m2_t test_vle64ff_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_tum(vm, vd, rs1, new_vl, vl); } -vint64m4_t test_vle64ff_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_tum(mask, maskedoff, base, new_vl, vl); +vint64m4_t test_vle64ff_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_tum(vm, vd, rs1, new_vl, vl); } -vint64m8_t test_vle64ff_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_tum(mask, maskedoff, base, new_vl, vl); +vint64m8_t test_vle64ff_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_tum(vm, vd, rs1, new_vl, vl); } -vuint64m1_t test_vle64ff_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_tum(mask, maskedoff, base, new_vl, vl); +vuint64m1_t test_vle64ff_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_tum(vm, vd, rs1, new_vl, vl); } -vuint64m2_t test_vle64ff_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_tum(mask, maskedoff, base, new_vl, vl); +vuint64m2_t test_vle64ff_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_tum(vm, vd, rs1, new_vl, vl); } -vuint64m4_t test_vle64ff_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_tum(mask, maskedoff, base, new_vl, vl); +vuint64m4_t test_vle64ff_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_tum(vm, vd, rs1, new_vl, vl); } -vuint64m8_t test_vle64ff_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_tum(mask, maskedoff, base, new_vl, vl); +vuint64m8_t test_vle64ff_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat64m1_t test_vle64ff_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_tumu(mask, maskedoff, base, new_vl, vl); +vfloat64m1_t test_vle64ff_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat64m2_t test_vle64ff_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_tumu(mask, maskedoff, base, new_vl, vl); +vfloat64m2_t test_vle64ff_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat64m4_t test_vle64ff_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_tumu(mask, maskedoff, base, new_vl, vl); +vfloat64m4_t test_vle64ff_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat64m8_t test_vle64ff_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_tumu(mask, maskedoff, base, new_vl, vl); +vfloat64m8_t test_vle64ff_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_tumu(vm, vd, rs1, new_vl, vl); } -vint64m1_t test_vle64ff_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_tumu(mask, maskedoff, base, new_vl, vl); +vint64m1_t test_vle64ff_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_tumu(vm, vd, rs1, new_vl, vl); } -vint64m2_t test_vle64ff_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_tumu(mask, maskedoff, base, new_vl, vl); +vint64m2_t test_vle64ff_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_tumu(vm, vd, rs1, new_vl, vl); } -vint64m4_t test_vle64ff_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_tumu(mask, maskedoff, base, new_vl, vl); +vint64m4_t test_vle64ff_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_tumu(vm, vd, rs1, new_vl, vl); } -vint64m8_t test_vle64ff_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_tumu(mask, maskedoff, base, new_vl, vl); +vint64m8_t test_vle64ff_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint64m1_t test_vle64ff_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_tumu(mask, maskedoff, base, new_vl, vl); +vuint64m1_t test_vle64ff_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint64m2_t test_vle64ff_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_tumu(mask, maskedoff, base, new_vl, vl); +vuint64m2_t test_vle64ff_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint64m4_t test_vle64ff_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_tumu(mask, maskedoff, base, new_vl, vl); +vuint64m4_t test_vle64ff_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint64m8_t test_vle64ff_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_tumu(mask, maskedoff, base, new_vl, vl); +vuint64m8_t test_vle64ff_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat64m1_t test_vle64ff_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_mu(mask, maskedoff, base, new_vl, vl); +vfloat64m1_t test_vle64ff_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_mu(vm, vd, rs1, new_vl, vl); } -vfloat64m2_t test_vle64ff_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_mu(mask, maskedoff, base, new_vl, vl); +vfloat64m2_t test_vle64ff_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_mu(vm, vd, rs1, new_vl, vl); } -vfloat64m4_t test_vle64ff_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_mu(mask, maskedoff, base, new_vl, vl); +vfloat64m4_t test_vle64ff_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_mu(vm, vd, rs1, new_vl, vl); } -vfloat64m8_t test_vle64ff_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_mu(mask, maskedoff, base, new_vl, vl); +vfloat64m8_t test_vle64ff_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_mu(vm, vd, rs1, new_vl, vl); } -vint64m1_t test_vle64ff_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_mu(mask, maskedoff, base, new_vl, vl); +vint64m1_t test_vle64ff_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_mu(vm, vd, rs1, new_vl, vl); } -vint64m2_t test_vle64ff_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_mu(mask, maskedoff, base, new_vl, vl); +vint64m2_t test_vle64ff_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_mu(vm, vd, rs1, new_vl, vl); } -vint64m4_t test_vle64ff_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_mu(mask, maskedoff, base, new_vl, vl); +vint64m4_t test_vle64ff_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_mu(vm, vd, rs1, new_vl, vl); } -vint64m8_t test_vle64ff_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_mu(mask, maskedoff, base, new_vl, vl); +vint64m8_t test_vle64ff_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_mu(vm, vd, rs1, new_vl, vl); } -vuint64m1_t test_vle64ff_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_mu(mask, maskedoff, base, new_vl, vl); +vuint64m1_t test_vle64ff_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_mu(vm, vd, rs1, new_vl, vl); } -vuint64m2_t test_vle64ff_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_mu(mask, maskedoff, base, new_vl, vl); +vuint64m2_t test_vle64ff_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_mu(vm, vd, rs1, new_vl, vl); } -vuint64m4_t test_vle64ff_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_mu(mask, maskedoff, base, new_vl, vl); +vuint64m4_t test_vle64ff_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_mu(vm, vd, rs1, new_vl, vl); } -vuint64m8_t test_vle64ff_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle64ff_mu(mask, maskedoff, base, new_vl, vl); +vuint64m8_t test_vle64ff_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle64ff_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vle64ff\.[ivxfswum.]+\s+} 48 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vle8.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vle8.c index e445e897f..30be26866 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vle8.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vle8.c @@ -6,228 +6,228 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vle8_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_tu(maskedoff, base, vl); +vint8mf8_t test_vle8_v_i8mf8_tu(vint8mf8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_tu(vd, rs1, vl); } -vint8mf4_t test_vle8_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_tu(maskedoff, base, vl); +vint8mf4_t test_vle8_v_i8mf4_tu(vint8mf4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_tu(vd, rs1, vl); } -vint8mf2_t test_vle8_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_tu(maskedoff, base, vl); +vint8mf2_t test_vle8_v_i8mf2_tu(vint8mf2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_tu(vd, rs1, vl); } -vint8m1_t test_vle8_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_tu(maskedoff, base, vl); +vint8m1_t test_vle8_v_i8m1_tu(vint8m1_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_tu(vd, rs1, vl); } -vint8m2_t test_vle8_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_tu(maskedoff, base, vl); +vint8m2_t test_vle8_v_i8m2_tu(vint8m2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_tu(vd, rs1, vl); } -vint8m4_t test_vle8_v_i8m4_tu(vint8m4_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_tu(maskedoff, base, vl); +vint8m4_t test_vle8_v_i8m4_tu(vint8m4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_tu(vd, rs1, vl); } -vint8m8_t test_vle8_v_i8m8_tu(vint8m8_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_tu(maskedoff, base, vl); +vint8m8_t test_vle8_v_i8m8_tu(vint8m8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_tu(vd, rs1, vl); } -vuint8mf8_t test_vle8_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_tu(maskedoff, base, vl); +vuint8mf8_t test_vle8_v_u8mf8_tu(vuint8mf8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_tu(vd, rs1, vl); } -vuint8mf4_t test_vle8_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_tu(maskedoff, base, vl); +vuint8mf4_t test_vle8_v_u8mf4_tu(vuint8mf4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_tu(vd, rs1, vl); } -vuint8mf2_t test_vle8_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_tu(maskedoff, base, vl); +vuint8mf2_t test_vle8_v_u8mf2_tu(vuint8mf2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_tu(vd, rs1, vl); } -vuint8m1_t test_vle8_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_tu(maskedoff, base, vl); +vuint8m1_t test_vle8_v_u8m1_tu(vuint8m1_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_tu(vd, rs1, vl); } -vuint8m2_t test_vle8_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_tu(maskedoff, base, vl); +vuint8m2_t test_vle8_v_u8m2_tu(vuint8m2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_tu(vd, rs1, vl); } -vuint8m4_t test_vle8_v_u8m4_tu(vuint8m4_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_tu(maskedoff, base, vl); +vuint8m4_t test_vle8_v_u8m4_tu(vuint8m4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_tu(vd, rs1, vl); } -vuint8m8_t test_vle8_v_u8m8_tu(vuint8m8_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_tu(maskedoff, base, vl); +vuint8m8_t test_vle8_v_u8m8_tu(vuint8m8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_tu(vd, rs1, vl); } -vint8mf8_t test_vle8_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_tum(mask, maskedoff, base, vl); +vint8mf8_t test_vle8_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_tum(vm, vd, rs1, vl); } -vint8mf4_t test_vle8_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_tum(mask, maskedoff, base, vl); +vint8mf4_t test_vle8_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_tum(vm, vd, rs1, vl); } -vint8mf2_t test_vle8_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_tum(mask, maskedoff, base, vl); +vint8mf2_t test_vle8_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_tum(vm, vd, rs1, vl); } -vint8m1_t test_vle8_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_tum(mask, maskedoff, base, vl); +vint8m1_t test_vle8_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_tum(vm, vd, rs1, vl); } -vint8m2_t test_vle8_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_tum(mask, maskedoff, base, vl); +vint8m2_t test_vle8_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_tum(vm, vd, rs1, vl); } -vint8m4_t test_vle8_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_tum(mask, maskedoff, base, vl); +vint8m4_t test_vle8_v_i8m4_tum(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_tum(vm, vd, rs1, vl); } -vint8m8_t test_vle8_v_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_tum(mask, maskedoff, base, vl); +vint8m8_t test_vle8_v_i8m8_tum(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_tum(vm, vd, rs1, vl); } -vuint8mf8_t test_vle8_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_tum(mask, maskedoff, base, vl); +vuint8mf8_t test_vle8_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_tum(vm, vd, rs1, vl); } -vuint8mf4_t test_vle8_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_tum(mask, maskedoff, base, vl); +vuint8mf4_t test_vle8_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_tum(vm, vd, rs1, vl); } -vuint8mf2_t test_vle8_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_tum(mask, maskedoff, base, vl); +vuint8mf2_t test_vle8_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_tum(vm, vd, rs1, vl); } -vuint8m1_t test_vle8_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_tum(mask, maskedoff, base, vl); +vuint8m1_t test_vle8_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_tum(vm, vd, rs1, vl); } -vuint8m2_t test_vle8_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_tum(mask, maskedoff, base, vl); +vuint8m2_t test_vle8_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_tum(vm, vd, rs1, vl); } -vuint8m4_t test_vle8_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_tum(mask, maskedoff, base, vl); +vuint8m4_t test_vle8_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_tum(vm, vd, rs1, vl); } -vuint8m8_t test_vle8_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_tum(mask, maskedoff, base, vl); +vuint8m8_t test_vle8_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_tum(vm, vd, rs1, vl); } -vint8mf8_t test_vle8_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_tumu(mask, maskedoff, base, vl); +vint8mf8_t test_vle8_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_tumu(vm, vd, rs1, vl); } -vint8mf4_t test_vle8_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_tumu(mask, maskedoff, base, vl); +vint8mf4_t test_vle8_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_tumu(vm, vd, rs1, vl); } -vint8mf2_t test_vle8_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_tumu(mask, maskedoff, base, vl); +vint8mf2_t test_vle8_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_tumu(vm, vd, rs1, vl); } -vint8m1_t test_vle8_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_tumu(mask, maskedoff, base, vl); +vint8m1_t test_vle8_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_tumu(vm, vd, rs1, vl); } -vint8m2_t test_vle8_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_tumu(mask, maskedoff, base, vl); +vint8m2_t test_vle8_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_tumu(vm, vd, rs1, vl); } -vint8m4_t test_vle8_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_tumu(mask, maskedoff, base, vl); +vint8m4_t test_vle8_v_i8m4_tumu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_tumu(vm, vd, rs1, vl); } -vint8m8_t test_vle8_v_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_tumu(mask, maskedoff, base, vl); +vint8m8_t test_vle8_v_i8m8_tumu(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_tumu(vm, vd, rs1, vl); } -vuint8mf8_t test_vle8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_tumu(mask, maskedoff, base, vl); +vuint8mf8_t test_vle8_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_tumu(vm, vd, rs1, vl); } -vuint8mf4_t test_vle8_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_tumu(mask, maskedoff, base, vl); +vuint8mf4_t test_vle8_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_tumu(vm, vd, rs1, vl); } -vuint8mf2_t test_vle8_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_tumu(mask, maskedoff, base, vl); +vuint8mf2_t test_vle8_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_tumu(vm, vd, rs1, vl); } -vuint8m1_t test_vle8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_tumu(mask, maskedoff, base, vl); +vuint8m1_t test_vle8_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_tumu(vm, vd, rs1, vl); } -vuint8m2_t test_vle8_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_tumu(mask, maskedoff, base, vl); +vuint8m2_t test_vle8_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_tumu(vm, vd, rs1, vl); } -vuint8m4_t test_vle8_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_tumu(mask, maskedoff, base, vl); +vuint8m4_t test_vle8_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_tumu(vm, vd, rs1, vl); } -vuint8m8_t test_vle8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_tumu(mask, maskedoff, base, vl); +vuint8m8_t test_vle8_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_tumu(vm, vd, rs1, vl); } -vint8mf8_t test_vle8_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_mu(mask, maskedoff, base, vl); +vint8mf8_t test_vle8_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_mu(vm, vd, rs1, vl); } -vint8mf4_t test_vle8_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_mu(mask, maskedoff, base, vl); +vint8mf4_t test_vle8_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_mu(vm, vd, rs1, vl); } -vint8mf2_t test_vle8_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_mu(mask, maskedoff, base, vl); +vint8mf2_t test_vle8_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_mu(vm, vd, rs1, vl); } -vint8m1_t test_vle8_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_mu(mask, maskedoff, base, vl); +vint8m1_t test_vle8_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_mu(vm, vd, rs1, vl); } -vint8m2_t test_vle8_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_mu(mask, maskedoff, base, vl); +vint8m2_t test_vle8_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_mu(vm, vd, rs1, vl); } -vint8m4_t test_vle8_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_mu(mask, maskedoff, base, vl); +vint8m4_t test_vle8_v_i8m4_mu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_mu(vm, vd, rs1, vl); } -vint8m8_t test_vle8_v_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, size_t vl) { - return __riscv_vle8_mu(mask, maskedoff, base, vl); +vint8m8_t test_vle8_v_i8m8_mu(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vle8_mu(vm, vd, rs1, vl); } -vuint8mf8_t test_vle8_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_mu(mask, maskedoff, base, vl); +vuint8mf8_t test_vle8_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_mu(vm, vd, rs1, vl); } -vuint8mf4_t test_vle8_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_mu(mask, maskedoff, base, vl); +vuint8mf4_t test_vle8_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_mu(vm, vd, rs1, vl); } -vuint8mf2_t test_vle8_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_mu(mask, maskedoff, base, vl); +vuint8mf2_t test_vle8_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_mu(vm, vd, rs1, vl); } -vuint8m1_t test_vle8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_mu(mask, maskedoff, base, vl); +vuint8m1_t test_vle8_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_mu(vm, vd, rs1, vl); } -vuint8m2_t test_vle8_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_mu(mask, maskedoff, base, vl); +vuint8m2_t test_vle8_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_mu(vm, vd, rs1, vl); } -vuint8m4_t test_vle8_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_mu(mask, maskedoff, base, vl); +vuint8m4_t test_vle8_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_mu(vm, vd, rs1, vl); } -vuint8m8_t test_vle8_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, size_t vl) { - return __riscv_vle8_mu(mask, maskedoff, base, vl); +vuint8m8_t test_vle8_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vle8_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vle8\.[ivxfswum.]+\s+} 62 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vle8ff.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vle8ff.c index 44c957787..1eb61be48 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vle8ff.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vle8ff.c @@ -6,228 +6,228 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vle8ff_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_tu(maskedoff, base, new_vl, vl); +vint8mf8_t test_vle8ff_v_i8mf8_tu(vint8mf8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_tu(vd, rs1, new_vl, vl); } -vint8mf4_t test_vle8ff_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_tu(maskedoff, base, new_vl, vl); +vint8mf4_t test_vle8ff_v_i8mf4_tu(vint8mf4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_tu(vd, rs1, new_vl, vl); } -vint8mf2_t test_vle8ff_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_tu(maskedoff, base, new_vl, vl); +vint8mf2_t test_vle8ff_v_i8mf2_tu(vint8mf2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_tu(vd, rs1, new_vl, vl); } -vint8m1_t test_vle8ff_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_tu(maskedoff, base, new_vl, vl); +vint8m1_t test_vle8ff_v_i8m1_tu(vint8m1_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_tu(vd, rs1, new_vl, vl); } -vint8m2_t test_vle8ff_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_tu(maskedoff, base, new_vl, vl); +vint8m2_t test_vle8ff_v_i8m2_tu(vint8m2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_tu(vd, rs1, new_vl, vl); } -vint8m4_t test_vle8ff_v_i8m4_tu(vint8m4_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_tu(maskedoff, base, new_vl, vl); +vint8m4_t test_vle8ff_v_i8m4_tu(vint8m4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_tu(vd, rs1, new_vl, vl); } -vint8m8_t test_vle8ff_v_i8m8_tu(vint8m8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_tu(maskedoff, base, new_vl, vl); +vint8m8_t test_vle8ff_v_i8m8_tu(vint8m8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_tu(vd, rs1, new_vl, vl); } -vuint8mf8_t test_vle8ff_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_tu(maskedoff, base, new_vl, vl); +vuint8mf8_t test_vle8ff_v_u8mf8_tu(vuint8mf8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_tu(vd, rs1, new_vl, vl); } -vuint8mf4_t test_vle8ff_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_tu(maskedoff, base, new_vl, vl); +vuint8mf4_t test_vle8ff_v_u8mf4_tu(vuint8mf4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_tu(vd, rs1, new_vl, vl); } -vuint8mf2_t test_vle8ff_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_tu(maskedoff, base, new_vl, vl); +vuint8mf2_t test_vle8ff_v_u8mf2_tu(vuint8mf2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_tu(vd, rs1, new_vl, vl); } -vuint8m1_t test_vle8ff_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_tu(maskedoff, base, new_vl, vl); +vuint8m1_t test_vle8ff_v_u8m1_tu(vuint8m1_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_tu(vd, rs1, new_vl, vl); } -vuint8m2_t test_vle8ff_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_tu(maskedoff, base, new_vl, vl); +vuint8m2_t test_vle8ff_v_u8m2_tu(vuint8m2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_tu(vd, rs1, new_vl, vl); } -vuint8m4_t test_vle8ff_v_u8m4_tu(vuint8m4_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_tu(maskedoff, base, new_vl, vl); +vuint8m4_t test_vle8ff_v_u8m4_tu(vuint8m4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_tu(vd, rs1, new_vl, vl); } -vuint8m8_t test_vle8ff_v_u8m8_tu(vuint8m8_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_tu(maskedoff, base, new_vl, vl); +vuint8m8_t test_vle8ff_v_u8m8_tu(vuint8m8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_tu(vd, rs1, new_vl, vl); } -vint8mf8_t test_vle8ff_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_tum(mask, maskedoff, base, new_vl, vl); +vint8mf8_t test_vle8ff_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_tum(vm, vd, rs1, new_vl, vl); } -vint8mf4_t test_vle8ff_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_tum(mask, maskedoff, base, new_vl, vl); +vint8mf4_t test_vle8ff_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_tum(vm, vd, rs1, new_vl, vl); } -vint8mf2_t test_vle8ff_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_tum(mask, maskedoff, base, new_vl, vl); +vint8mf2_t test_vle8ff_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_tum(vm, vd, rs1, new_vl, vl); } -vint8m1_t test_vle8ff_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_tum(mask, maskedoff, base, new_vl, vl); +vint8m1_t test_vle8ff_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_tum(vm, vd, rs1, new_vl, vl); } -vint8m2_t test_vle8ff_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_tum(mask, maskedoff, base, new_vl, vl); +vint8m2_t test_vle8ff_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_tum(vm, vd, rs1, new_vl, vl); } -vint8m4_t test_vle8ff_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_tum(mask, maskedoff, base, new_vl, vl); +vint8m4_t test_vle8ff_v_i8m4_tum(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_tum(vm, vd, rs1, new_vl, vl); } -vint8m8_t test_vle8ff_v_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_tum(mask, maskedoff, base, new_vl, vl); +vint8m8_t test_vle8ff_v_i8m8_tum(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf8_t test_vle8ff_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_tum(mask, maskedoff, base, new_vl, vl); +vuint8mf8_t test_vle8ff_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf4_t test_vle8ff_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_tum(mask, maskedoff, base, new_vl, vl); +vuint8mf4_t test_vle8ff_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf2_t test_vle8ff_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_tum(mask, maskedoff, base, new_vl, vl); +vuint8mf2_t test_vle8ff_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_tum(vm, vd, rs1, new_vl, vl); } -vuint8m1_t test_vle8ff_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_tum(mask, maskedoff, base, new_vl, vl); +vuint8m1_t test_vle8ff_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_tum(vm, vd, rs1, new_vl, vl); } -vuint8m2_t test_vle8ff_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_tum(mask, maskedoff, base, new_vl, vl); +vuint8m2_t test_vle8ff_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_tum(vm, vd, rs1, new_vl, vl); } -vuint8m4_t test_vle8ff_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_tum(mask, maskedoff, base, new_vl, vl); +vuint8m4_t test_vle8ff_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_tum(vm, vd, rs1, new_vl, vl); } -vuint8m8_t test_vle8ff_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_tum(mask, maskedoff, base, new_vl, vl); +vuint8m8_t test_vle8ff_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_tum(vm, vd, rs1, new_vl, vl); } -vint8mf8_t test_vle8ff_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_tumu(mask, maskedoff, base, new_vl, vl); +vint8mf8_t test_vle8ff_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf4_t test_vle8ff_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_tumu(mask, maskedoff, base, new_vl, vl); +vint8mf4_t test_vle8ff_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf2_t test_vle8ff_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_tumu(mask, maskedoff, base, new_vl, vl); +vint8mf2_t test_vle8ff_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_tumu(vm, vd, rs1, new_vl, vl); } -vint8m1_t test_vle8ff_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_tumu(mask, maskedoff, base, new_vl, vl); +vint8m1_t test_vle8ff_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_tumu(vm, vd, rs1, new_vl, vl); } -vint8m2_t test_vle8ff_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_tumu(mask, maskedoff, base, new_vl, vl); +vint8m2_t test_vle8ff_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_tumu(vm, vd, rs1, new_vl, vl); } -vint8m4_t test_vle8ff_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_tumu(mask, maskedoff, base, new_vl, vl); +vint8m4_t test_vle8ff_v_i8m4_tumu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_tumu(vm, vd, rs1, new_vl, vl); } -vint8m8_t test_vle8ff_v_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_tumu(mask, maskedoff, base, new_vl, vl); +vint8m8_t test_vle8ff_v_i8m8_tumu(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf8_t test_vle8ff_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_tumu(mask, maskedoff, base, new_vl, vl); +vuint8mf8_t test_vle8ff_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf4_t test_vle8ff_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_tumu(mask, maskedoff, base, new_vl, vl); +vuint8mf4_t test_vle8ff_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf2_t test_vle8ff_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_tumu(mask, maskedoff, base, new_vl, vl); +vuint8mf2_t test_vle8ff_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint8m1_t test_vle8ff_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_tumu(mask, maskedoff, base, new_vl, vl); +vuint8m1_t test_vle8ff_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint8m2_t test_vle8ff_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_tumu(mask, maskedoff, base, new_vl, vl); +vuint8m2_t test_vle8ff_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint8m4_t test_vle8ff_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_tumu(mask, maskedoff, base, new_vl, vl); +vuint8m4_t test_vle8ff_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint8m8_t test_vle8ff_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_tumu(mask, maskedoff, base, new_vl, vl); +vuint8m8_t test_vle8ff_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf8_t test_vle8ff_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_mu(mask, maskedoff, base, new_vl, vl); +vint8mf8_t test_vle8ff_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_mu(vm, vd, rs1, new_vl, vl); } -vint8mf4_t test_vle8ff_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_mu(mask, maskedoff, base, new_vl, vl); +vint8mf4_t test_vle8ff_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_mu(vm, vd, rs1, new_vl, vl); } -vint8mf2_t test_vle8ff_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_mu(mask, maskedoff, base, new_vl, vl); +vint8mf2_t test_vle8ff_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_mu(vm, vd, rs1, new_vl, vl); } -vint8m1_t test_vle8ff_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_mu(mask, maskedoff, base, new_vl, vl); +vint8m1_t test_vle8ff_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_mu(vm, vd, rs1, new_vl, vl); } -vint8m2_t test_vle8ff_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_mu(mask, maskedoff, base, new_vl, vl); +vint8m2_t test_vle8ff_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_mu(vm, vd, rs1, new_vl, vl); } -vint8m4_t test_vle8ff_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_mu(mask, maskedoff, base, new_vl, vl); +vint8m4_t test_vle8ff_v_i8m4_mu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_mu(vm, vd, rs1, new_vl, vl); } -vint8m8_t test_vle8ff_v_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_mu(mask, maskedoff, base, new_vl, vl); +vint8m8_t test_vle8ff_v_i8m8_mu(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf8_t test_vle8ff_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_mu(mask, maskedoff, base, new_vl, vl); +vuint8mf8_t test_vle8ff_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf4_t test_vle8ff_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_mu(mask, maskedoff, base, new_vl, vl); +vuint8mf4_t test_vle8ff_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf2_t test_vle8ff_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_mu(mask, maskedoff, base, new_vl, vl); +vuint8mf2_t test_vle8ff_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_mu(vm, vd, rs1, new_vl, vl); } -vuint8m1_t test_vle8ff_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_mu(mask, maskedoff, base, new_vl, vl); +vuint8m1_t test_vle8ff_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_mu(vm, vd, rs1, new_vl, vl); } -vuint8m2_t test_vle8ff_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_mu(mask, maskedoff, base, new_vl, vl); +vuint8m2_t test_vle8ff_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_mu(vm, vd, rs1, new_vl, vl); } -vuint8m4_t test_vle8ff_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_mu(mask, maskedoff, base, new_vl, vl); +vuint8m4_t test_vle8ff_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_mu(vm, vd, rs1, new_vl, vl); } -vuint8m8_t test_vle8ff_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vle8ff_mu(mask, maskedoff, base, new_vl, vl); +vuint8m8_t test_vle8ff_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vle8ff_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vle8ff\.[ivxfswum.]+\s+} 56 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxei16.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxei16.c index b293c4e6b..595c9be35 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxei16.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxei16.c @@ -6,916 +6,916 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vloxei16_v_f16mf4_tu(vfloat16mf4_t maskedoff, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vfloat16mf4_t test_vloxei16_v_f16mf4_tu(vfloat16mf4_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei16_v_f16mf2_tu(vfloat16mf2_t maskedoff, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vfloat16mf2_t test_vloxei16_v_f16mf2_tu(vfloat16mf2_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei16_v_f16m1_tu(vfloat16m1_t maskedoff, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vfloat16m1_t test_vloxei16_v_f16m1_tu(vfloat16m1_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei16_v_f16m2_tu(vfloat16m2_t maskedoff, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vfloat16m2_t test_vloxei16_v_f16m2_tu(vfloat16m2_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vfloat16m4_t test_vloxei16_v_f16m4_tu(vfloat16m4_t maskedoff, const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vfloat16m4_t test_vloxei16_v_f16m4_tu(vfloat16m4_t vd, const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vfloat16m8_t test_vloxei16_v_f16m8_tu(vfloat16m8_t maskedoff, const float16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vfloat16m8_t test_vloxei16_v_f16m8_tu(vfloat16m8_t vd, const float16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei16_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vfloat32mf2_t test_vloxei16_v_f32mf2_tu(vfloat32mf2_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei16_v_f32m1_tu(vfloat32m1_t maskedoff, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vfloat32m1_t test_vloxei16_v_f32m1_tu(vfloat32m1_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei16_v_f32m2_tu(vfloat32m2_t maskedoff, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vfloat32m2_t test_vloxei16_v_f32m2_tu(vfloat32m2_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei16_v_f32m4_tu(vfloat32m4_t maskedoff, const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vfloat32m4_t test_vloxei16_v_f32m4_tu(vfloat32m4_t vd, const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vfloat32m8_t test_vloxei16_v_f32m8_tu(vfloat32m8_t maskedoff, const float32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vfloat32m8_t test_vloxei16_v_f32m8_tu(vfloat32m8_t vd, const float32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei16_v_f64m1_tu(vfloat64m1_t maskedoff, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vfloat64m1_t test_vloxei16_v_f64m1_tu(vfloat64m1_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei16_v_f64m2_tu(vfloat64m2_t maskedoff, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vfloat64m2_t test_vloxei16_v_f64m2_tu(vfloat64m2_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei16_v_f64m4_tu(vfloat64m4_t maskedoff, const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vfloat64m4_t test_vloxei16_v_f64m4_tu(vfloat64m4_t vd, const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei16_v_f64m8_tu(vfloat64m8_t maskedoff, const float64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vfloat64m8_t test_vloxei16_v_f64m8_tu(vfloat64m8_t vd, const float64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei16_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vint8mf8_t test_vloxei16_v_i8mf8_tu(vint8mf8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei16_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vint8mf4_t test_vloxei16_v_i8mf4_tu(vint8mf4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei16_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vint8mf2_t test_vloxei16_v_i8mf2_tu(vint8mf2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vint8m1_t test_vloxei16_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vint8m1_t test_vloxei16_v_i8m1_tu(vint8m1_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vint8m2_t test_vloxei16_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vint8m2_t test_vloxei16_v_i8m2_tu(vint8m2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vint8m4_t test_vloxei16_v_i8m4_tu(vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vint8m4_t test_vloxei16_v_i8m4_tu(vint8m4_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei16_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vint16mf4_t test_vloxei16_v_i16mf4_tu(vint16mf4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei16_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vint16mf2_t test_vloxei16_v_i16mf2_tu(vint16mf2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vint16m1_t test_vloxei16_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vint16m1_t test_vloxei16_v_i16m1_tu(vint16m1_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vint16m2_t test_vloxei16_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vint16m2_t test_vloxei16_v_i16m2_tu(vint16m2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vint16m4_t test_vloxei16_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vint16m4_t test_vloxei16_v_i16m4_tu(vint16m4_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vint16m8_t test_vloxei16_v_i16m8_tu(vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vint16m8_t test_vloxei16_v_i16m8_tu(vint16m8_t vd, const int16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei16_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vint32mf2_t test_vloxei16_v_i32mf2_tu(vint32mf2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vint32m1_t test_vloxei16_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vint32m1_t test_vloxei16_v_i32m1_tu(vint32m1_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vint32m2_t test_vloxei16_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vint32m2_t test_vloxei16_v_i32m2_tu(vint32m2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vint32m4_t test_vloxei16_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vint32m4_t test_vloxei16_v_i32m4_tu(vint32m4_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vint32m8_t test_vloxei16_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vint32m8_t test_vloxei16_v_i32m8_tu(vint32m8_t vd, const int32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vint64m1_t test_vloxei16_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vint64m1_t test_vloxei16_v_i64m1_tu(vint64m1_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vint64m2_t test_vloxei16_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vint64m2_t test_vloxei16_v_i64m2_tu(vint64m2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vint64m4_t test_vloxei16_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vint64m4_t test_vloxei16_v_i64m4_tu(vint64m4_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vint64m8_t test_vloxei16_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vint64m8_t test_vloxei16_v_i64m8_tu(vint64m8_t vd, const int64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei16_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vuint8mf8_t test_vloxei16_v_u8mf8_tu(vuint8mf8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei16_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vuint8mf4_t test_vloxei16_v_u8mf4_tu(vuint8mf4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei16_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vuint8mf2_t test_vloxei16_v_u8mf2_tu(vuint8mf2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei16_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vuint8m1_t test_vloxei16_v_u8m1_tu(vuint8m1_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vuint8m2_t test_vloxei16_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vuint8m2_t test_vloxei16_v_u8m2_tu(vuint8m2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vuint8m4_t test_vloxei16_v_u8m4_tu(vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vuint8m4_t test_vloxei16_v_u8m4_tu(vuint8m4_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei16_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vuint16mf4_t test_vloxei16_v_u16mf4_tu(vuint16mf4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei16_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vuint16mf2_t test_vloxei16_v_u16mf2_tu(vuint16mf2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei16_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vuint16m1_t test_vloxei16_v_u16m1_tu(vuint16m1_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei16_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vuint16m2_t test_vloxei16_v_u16m2_tu(vuint16m2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vuint16m4_t test_vloxei16_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vuint16m4_t test_vloxei16_v_u16m4_tu(vuint16m4_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vuint16m8_t test_vloxei16_v_u16m8_tu(vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vuint16m8_t test_vloxei16_v_u16m8_tu(vuint16m8_t vd, const uint16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei16_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vuint32mf2_t test_vloxei16_v_u32mf2_tu(vuint32mf2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei16_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vuint32m1_t test_vloxei16_v_u32m1_tu(vuint32m1_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei16_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vuint32m2_t test_vloxei16_v_u32m2_tu(vuint32m2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei16_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vuint32m4_t test_vloxei16_v_u32m4_tu(vuint32m4_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vuint32m8_t test_vloxei16_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vuint32m8_t test_vloxei16_v_u32m8_tu(vuint32m8_t vd, const uint32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei16_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vuint64m1_t test_vloxei16_v_u64m1_tu(vuint64m1_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei16_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vuint64m2_t test_vloxei16_v_u64m2_tu(vuint64m2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei16_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vuint64m4_t test_vloxei16_v_u64m4_tu(vuint64m4_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei16_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_tu(maskedoff, base, bindex, vl); +vuint64m8_t test_vloxei16_v_u64m8_tu(vuint64m8_t vd, const uint64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); } -vfloat16mf4_t test_vloxei16_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vloxei16_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei16_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vloxei16_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei16_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vloxei16_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei16_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vloxei16_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vloxei16_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vloxei16_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16m8_t test_vloxei16_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, const float16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vfloat16m8_t test_vloxei16_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, const float16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei16_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vloxei16_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei16_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vloxei16_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei16_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vloxei16_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei16_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vloxei16_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vloxei16_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vloxei16_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei16_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vloxei16_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei16_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vloxei16_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei16_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vloxei16_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei16_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vloxei16_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei16_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vloxei16_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei16_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vloxei16_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei16_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vloxei16_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vloxei16_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vloxei16_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vloxei16_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vloxei16_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vint8m4_t test_vloxei16_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vint8m4_t test_vloxei16_v_i8m4_tum(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei16_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vloxei16_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei16_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vloxei16_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vloxei16_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vloxei16_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vloxei16_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vloxei16_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vloxei16_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vloxei16_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vint16m8_t test_vloxei16_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vint16m8_t test_vloxei16_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei16_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vloxei16_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vloxei16_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vloxei16_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vloxei16_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vloxei16_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vloxei16_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vloxei16_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vloxei16_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vloxei16_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vloxei16_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vloxei16_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vloxei16_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vloxei16_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vloxei16_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vloxei16_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vloxei16_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vloxei16_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei16_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vloxei16_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei16_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vloxei16_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei16_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vloxei16_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei16_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vloxei16_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vloxei16_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vloxei16_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vuint8m4_t test_vloxei16_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vuint8m4_t test_vloxei16_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei16_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vloxei16_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei16_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vloxei16_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei16_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vloxei16_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei16_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vloxei16_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vloxei16_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vloxei16_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vuint16m8_t test_vloxei16_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vuint16m8_t test_vloxei16_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei16_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vloxei16_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei16_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vloxei16_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei16_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vloxei16_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei16_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vloxei16_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vloxei16_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vloxei16_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei16_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vloxei16_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei16_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vloxei16_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei16_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vloxei16_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei16_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_tum(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vloxei16_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vloxei16_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vloxei16_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei16_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vloxei16_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei16_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vloxei16_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei16_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vloxei16_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vloxei16_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vloxei16_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m8_t test_vloxei16_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, const float16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m8_t test_vloxei16_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, const float16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei16_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vloxei16_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei16_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vloxei16_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei16_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vloxei16_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei16_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vloxei16_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vloxei16_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vloxei16_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei16_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vloxei16_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei16_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vloxei16_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei16_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vloxei16_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei16_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vloxei16_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei16_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vloxei16_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei16_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vloxei16_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei16_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vloxei16_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vloxei16_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vloxei16_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vloxei16_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vloxei16_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vint8m4_t test_vloxei16_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vint8m4_t test_vloxei16_v_i8m4_tumu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei16_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vloxei16_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei16_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vloxei16_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vloxei16_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vloxei16_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vloxei16_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vloxei16_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vloxei16_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vloxei16_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vint16m8_t test_vloxei16_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vint16m8_t test_vloxei16_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei16_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vloxei16_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vloxei16_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vloxei16_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vloxei16_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vloxei16_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vloxei16_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vloxei16_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vloxei16_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vloxei16_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vloxei16_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vloxei16_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vloxei16_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vloxei16_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vloxei16_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vloxei16_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vloxei16_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vloxei16_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei16_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vloxei16_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei16_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vloxei16_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei16_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vloxei16_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei16_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vloxei16_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vloxei16_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vloxei16_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8m4_t test_vloxei16_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vuint8m4_t test_vloxei16_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei16_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vloxei16_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei16_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vloxei16_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei16_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vloxei16_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei16_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vloxei16_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vloxei16_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vloxei16_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16m8_t test_vloxei16_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vuint16m8_t test_vloxei16_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei16_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vloxei16_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei16_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vloxei16_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei16_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vloxei16_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei16_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vloxei16_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vloxei16_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vloxei16_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei16_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vloxei16_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei16_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vloxei16_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei16_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vloxei16_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei16_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_tumu(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vloxei16_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vloxei16_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vloxei16_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei16_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vloxei16_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei16_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vloxei16_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei16_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vloxei16_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vloxei16_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vloxei16_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vfloat16m8_t test_vloxei16_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, const float16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vfloat16m8_t test_vloxei16_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, const float16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei16_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vloxei16_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei16_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vloxei16_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei16_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vloxei16_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei16_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vloxei16_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vloxei16_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vloxei16_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei16_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vloxei16_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei16_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vloxei16_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei16_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vloxei16_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei16_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vloxei16_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei16_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vloxei16_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei16_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vloxei16_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei16_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vloxei16_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vloxei16_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vloxei16_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vloxei16_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vloxei16_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vint8m4_t test_vloxei16_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vint8m4_t test_vloxei16_v_i8m4_mu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei16_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vloxei16_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei16_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vloxei16_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vloxei16_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vloxei16_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vloxei16_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vloxei16_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vloxei16_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vloxei16_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vint16m8_t test_vloxei16_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vint16m8_t test_vloxei16_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei16_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vloxei16_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vloxei16_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vloxei16_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vloxei16_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vloxei16_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vloxei16_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vloxei16_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vloxei16_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vloxei16_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vloxei16_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vloxei16_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vloxei16_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vloxei16_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vloxei16_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vloxei16_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vloxei16_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vloxei16_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei16_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vloxei16_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei16_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vloxei16_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei16_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vloxei16_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei16_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vloxei16_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vloxei16_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vloxei16_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vuint8m4_t test_vloxei16_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vuint8m4_t test_vloxei16_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei16_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vloxei16_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei16_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vloxei16_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei16_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vloxei16_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei16_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vloxei16_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vloxei16_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vloxei16_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vuint16m8_t test_vloxei16_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vuint16m8_t test_vloxei16_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei16_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vloxei16_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei16_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vloxei16_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei16_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vloxei16_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei16_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vloxei16_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vloxei16_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vloxei16_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei16_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vloxei16_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei16_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vloxei16_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei16_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vloxei16_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei16_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxei16_mu(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vloxei16_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxei16\.[ivxfswum.]+\s+} 228 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxei32.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxei32.c index 4b683b152..81fb823d0 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxei32.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxei32.c @@ -6,836 +6,836 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vloxei32_v_f16mf4_tu(vfloat16mf4_t maskedoff, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vfloat16mf4_t test_vloxei32_v_f16mf4_tu(vfloat16mf4_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei32_v_f16mf2_tu(vfloat16mf2_t maskedoff, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vfloat16mf2_t test_vloxei32_v_f16mf2_tu(vfloat16mf2_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei32_v_f16m1_tu(vfloat16m1_t maskedoff, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vfloat16m1_t test_vloxei32_v_f16m1_tu(vfloat16m1_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei32_v_f16m2_tu(vfloat16m2_t maskedoff, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vfloat16m2_t test_vloxei32_v_f16m2_tu(vfloat16m2_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vfloat16m4_t test_vloxei32_v_f16m4_tu(vfloat16m4_t maskedoff, const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vfloat16m4_t test_vloxei32_v_f16m4_tu(vfloat16m4_t vd, const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei32_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vfloat32mf2_t test_vloxei32_v_f32mf2_tu(vfloat32mf2_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei32_v_f32m1_tu(vfloat32m1_t maskedoff, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vfloat32m1_t test_vloxei32_v_f32m1_tu(vfloat32m1_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei32_v_f32m2_tu(vfloat32m2_t maskedoff, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vfloat32m2_t test_vloxei32_v_f32m2_tu(vfloat32m2_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei32_v_f32m4_tu(vfloat32m4_t maskedoff, const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vfloat32m4_t test_vloxei32_v_f32m4_tu(vfloat32m4_t vd, const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vfloat32m8_t test_vloxei32_v_f32m8_tu(vfloat32m8_t maskedoff, const float32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vfloat32m8_t test_vloxei32_v_f32m8_tu(vfloat32m8_t vd, const float32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei32_v_f64m1_tu(vfloat64m1_t maskedoff, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vfloat64m1_t test_vloxei32_v_f64m1_tu(vfloat64m1_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei32_v_f64m2_tu(vfloat64m2_t maskedoff, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vfloat64m2_t test_vloxei32_v_f64m2_tu(vfloat64m2_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei32_v_f64m4_tu(vfloat64m4_t maskedoff, const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vfloat64m4_t test_vloxei32_v_f64m4_tu(vfloat64m4_t vd, const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei32_v_f64m8_tu(vfloat64m8_t maskedoff, const float64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vfloat64m8_t test_vloxei32_v_f64m8_tu(vfloat64m8_t vd, const float64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei32_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vint8mf8_t test_vloxei32_v_i8mf8_tu(vint8mf8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei32_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vint8mf4_t test_vloxei32_v_i8mf4_tu(vint8mf4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei32_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vint8mf2_t test_vloxei32_v_i8mf2_tu(vint8mf2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vint8m1_t test_vloxei32_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vint8m1_t test_vloxei32_v_i8m1_tu(vint8m1_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vint8m2_t test_vloxei32_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vint8m2_t test_vloxei32_v_i8m2_tu(vint8m2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei32_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vint16mf4_t test_vloxei32_v_i16mf4_tu(vint16mf4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei32_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vint16mf2_t test_vloxei32_v_i16mf2_tu(vint16mf2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vint16m1_t test_vloxei32_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vint16m1_t test_vloxei32_v_i16m1_tu(vint16m1_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vint16m2_t test_vloxei32_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vint16m2_t test_vloxei32_v_i16m2_tu(vint16m2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vint16m4_t test_vloxei32_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vint16m4_t test_vloxei32_v_i16m4_tu(vint16m4_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei32_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vint32mf2_t test_vloxei32_v_i32mf2_tu(vint32mf2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vint32m1_t test_vloxei32_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vint32m1_t test_vloxei32_v_i32m1_tu(vint32m1_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vint32m2_t test_vloxei32_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vint32m2_t test_vloxei32_v_i32m2_tu(vint32m2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vint32m4_t test_vloxei32_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vint32m4_t test_vloxei32_v_i32m4_tu(vint32m4_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vint32m8_t test_vloxei32_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vint32m8_t test_vloxei32_v_i32m8_tu(vint32m8_t vd, const int32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vint64m1_t test_vloxei32_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vint64m1_t test_vloxei32_v_i64m1_tu(vint64m1_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vint64m2_t test_vloxei32_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vint64m2_t test_vloxei32_v_i64m2_tu(vint64m2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vint64m4_t test_vloxei32_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vint64m4_t test_vloxei32_v_i64m4_tu(vint64m4_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vint64m8_t test_vloxei32_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vint64m8_t test_vloxei32_v_i64m8_tu(vint64m8_t vd, const int64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei32_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vuint8mf8_t test_vloxei32_v_u8mf8_tu(vuint8mf8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei32_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vuint8mf4_t test_vloxei32_v_u8mf4_tu(vuint8mf4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei32_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vuint8mf2_t test_vloxei32_v_u8mf2_tu(vuint8mf2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei32_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vuint8m1_t test_vloxei32_v_u8m1_tu(vuint8m1_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vuint8m2_t test_vloxei32_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vuint8m2_t test_vloxei32_v_u8m2_tu(vuint8m2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei32_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vuint16mf4_t test_vloxei32_v_u16mf4_tu(vuint16mf4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei32_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vuint16mf2_t test_vloxei32_v_u16mf2_tu(vuint16mf2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei32_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vuint16m1_t test_vloxei32_v_u16m1_tu(vuint16m1_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei32_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vuint16m2_t test_vloxei32_v_u16m2_tu(vuint16m2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vuint16m4_t test_vloxei32_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vuint16m4_t test_vloxei32_v_u16m4_tu(vuint16m4_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei32_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vuint32mf2_t test_vloxei32_v_u32mf2_tu(vuint32mf2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei32_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vuint32m1_t test_vloxei32_v_u32m1_tu(vuint32m1_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei32_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vuint32m2_t test_vloxei32_v_u32m2_tu(vuint32m2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei32_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vuint32m4_t test_vloxei32_v_u32m4_tu(vuint32m4_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vuint32m8_t test_vloxei32_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vuint32m8_t test_vloxei32_v_u32m8_tu(vuint32m8_t vd, const uint32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei32_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vuint64m1_t test_vloxei32_v_u64m1_tu(vuint64m1_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei32_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vuint64m2_t test_vloxei32_v_u64m2_tu(vuint64m2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei32_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vuint64m4_t test_vloxei32_v_u64m4_tu(vuint64m4_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei32_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_tu(maskedoff, base, bindex, vl); +vuint64m8_t test_vloxei32_v_u64m8_tu(vuint64m8_t vd, const uint64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_tu(vd, rs1, rs2, vl); } -vfloat16mf4_t test_vloxei32_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vloxei32_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei32_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vloxei32_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei32_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vloxei32_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei32_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vloxei32_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vloxei32_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vloxei32_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei32_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vloxei32_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei32_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vloxei32_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei32_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vloxei32_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei32_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vloxei32_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vloxei32_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vloxei32_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei32_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vloxei32_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei32_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vloxei32_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei32_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vloxei32_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei32_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vloxei32_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei32_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vloxei32_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei32_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vloxei32_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei32_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vloxei32_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vloxei32_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vloxei32_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vloxei32_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vloxei32_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei32_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vloxei32_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei32_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vloxei32_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vloxei32_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vloxei32_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vloxei32_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vloxei32_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vloxei32_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vloxei32_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei32_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vloxei32_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vloxei32_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vloxei32_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vloxei32_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vloxei32_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vloxei32_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vloxei32_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vloxei32_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vloxei32_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vloxei32_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vloxei32_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vloxei32_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vloxei32_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vloxei32_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vloxei32_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vloxei32_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vloxei32_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei32_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vloxei32_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei32_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vloxei32_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei32_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vloxei32_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei32_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vloxei32_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vloxei32_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vloxei32_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei32_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vloxei32_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei32_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vloxei32_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei32_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vloxei32_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei32_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vloxei32_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vloxei32_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vloxei32_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei32_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vloxei32_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei32_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vloxei32_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei32_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vloxei32_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei32_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vloxei32_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vloxei32_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vloxei32_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei32_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vloxei32_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei32_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vloxei32_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei32_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vloxei32_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei32_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_tum(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vloxei32_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vloxei32_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vloxei32_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei32_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vloxei32_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei32_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vloxei32_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei32_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vloxei32_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vloxei32_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vloxei32_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei32_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vloxei32_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei32_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vloxei32_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei32_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vloxei32_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei32_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vloxei32_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vloxei32_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vloxei32_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei32_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vloxei32_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei32_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vloxei32_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei32_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vloxei32_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei32_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vloxei32_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei32_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vloxei32_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei32_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vloxei32_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei32_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vloxei32_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vloxei32_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vloxei32_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vloxei32_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vloxei32_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei32_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vloxei32_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei32_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vloxei32_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vloxei32_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vloxei32_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vloxei32_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vloxei32_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vloxei32_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vloxei32_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei32_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vloxei32_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vloxei32_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vloxei32_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vloxei32_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vloxei32_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vloxei32_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vloxei32_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vloxei32_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vloxei32_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vloxei32_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vloxei32_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vloxei32_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vloxei32_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vloxei32_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vloxei32_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vloxei32_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vloxei32_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei32_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vloxei32_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei32_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vloxei32_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei32_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vloxei32_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei32_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vloxei32_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vloxei32_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vloxei32_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei32_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vloxei32_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei32_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vloxei32_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei32_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vloxei32_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei32_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vloxei32_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vloxei32_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vloxei32_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei32_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vloxei32_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei32_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vloxei32_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei32_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vloxei32_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei32_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vloxei32_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vloxei32_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vloxei32_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei32_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vloxei32_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei32_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vloxei32_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei32_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vloxei32_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei32_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_tumu(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vloxei32_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vloxei32_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vloxei32_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei32_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vloxei32_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei32_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vloxei32_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei32_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vloxei32_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vloxei32_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vloxei32_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei32_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vloxei32_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei32_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vloxei32_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei32_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vloxei32_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei32_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vloxei32_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vloxei32_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vloxei32_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei32_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vloxei32_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei32_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vloxei32_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei32_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vloxei32_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei32_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vloxei32_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei32_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vloxei32_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei32_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vloxei32_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei32_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vloxei32_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vloxei32_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vloxei32_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vloxei32_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vloxei32_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei32_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vloxei32_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei32_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vloxei32_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vloxei32_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vloxei32_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vloxei32_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vloxei32_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vloxei32_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vloxei32_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei32_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vloxei32_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vloxei32_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vloxei32_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vloxei32_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vloxei32_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vloxei32_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vloxei32_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vloxei32_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vloxei32_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vloxei32_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vloxei32_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vloxei32_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vloxei32_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vloxei32_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vloxei32_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vloxei32_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vloxei32_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei32_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vloxei32_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei32_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vloxei32_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei32_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vloxei32_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei32_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vloxei32_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vloxei32_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vloxei32_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei32_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vloxei32_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei32_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vloxei32_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei32_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vloxei32_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei32_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vloxei32_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vloxei32_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vloxei32_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei32_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vloxei32_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei32_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vloxei32_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei32_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vloxei32_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei32_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vloxei32_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vloxei32_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vloxei32_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei32_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vloxei32_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei32_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vloxei32_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei32_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vloxei32_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei32_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxei32_mu(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vloxei32_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxei32\.[ivxfswum.]+\s+} 208 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxei64.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxei64.c index e210d2004..7bef8b541 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxei64.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxei64.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vloxei64_v_f16mf4_tu(vfloat16mf4_t maskedoff, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_tu(maskedoff, base, bindex, vl); +vfloat16mf4_t test_vloxei64_v_f16mf4_tu(vfloat16mf4_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_tu(vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei64_v_f16mf2_tu(vfloat16mf2_t maskedoff, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_tu(maskedoff, base, bindex, vl); +vfloat16mf2_t test_vloxei64_v_f16mf2_tu(vfloat16mf2_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_tu(vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei64_v_f16m1_tu(vfloat16m1_t maskedoff, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_tu(maskedoff, base, bindex, vl); +vfloat16m1_t test_vloxei64_v_f16m1_tu(vfloat16m1_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_tu(vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei64_v_f16m2_tu(vfloat16m2_t maskedoff, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_tu(maskedoff, base, bindex, vl); +vfloat16m2_t test_vloxei64_v_f16m2_tu(vfloat16m2_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_tu(vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei64_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_tu(maskedoff, base, bindex, vl); +vfloat32mf2_t test_vloxei64_v_f32mf2_tu(vfloat32mf2_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_tu(vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei64_v_f32m1_tu(vfloat32m1_t maskedoff, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_tu(maskedoff, base, bindex, vl); +vfloat32m1_t test_vloxei64_v_f32m1_tu(vfloat32m1_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_tu(vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei64_v_f32m2_tu(vfloat32m2_t maskedoff, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_tu(maskedoff, base, bindex, vl); +vfloat32m2_t test_vloxei64_v_f32m2_tu(vfloat32m2_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_tu(vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei64_v_f32m4_tu(vfloat32m4_t maskedoff, const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_tu(maskedoff, base, bindex, vl); +vfloat32m4_t test_vloxei64_v_f32m4_tu(vfloat32m4_t vd, const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_tu(vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei64_v_f64m1_tu(vfloat64m1_t maskedoff, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_tu(maskedoff, base, bindex, vl); +vfloat64m1_t test_vloxei64_v_f64m1_tu(vfloat64m1_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_tu(vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei64_v_f64m2_tu(vfloat64m2_t maskedoff, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_tu(maskedoff, base, bindex, vl); +vfloat64m2_t test_vloxei64_v_f64m2_tu(vfloat64m2_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_tu(vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei64_v_f64m4_tu(vfloat64m4_t maskedoff, const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_tu(maskedoff, base, bindex, vl); +vfloat64m4_t test_vloxei64_v_f64m4_tu(vfloat64m4_t vd, const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_tu(vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei64_v_f64m8_tu(vfloat64m8_t maskedoff, const float64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_tu(maskedoff, base, bindex, vl); +vfloat64m8_t test_vloxei64_v_f64m8_tu(vfloat64m8_t vd, const float64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_tu(vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei64_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_tu(maskedoff, base, bindex, vl); +vint8mf8_t test_vloxei64_v_i8mf8_tu(vint8mf8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_tu(vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei64_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_tu(maskedoff, base, bindex, vl); +vint8mf4_t test_vloxei64_v_i8mf4_tu(vint8mf4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_tu(vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei64_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_tu(maskedoff, base, bindex, vl); +vint8mf2_t test_vloxei64_v_i8mf2_tu(vint8mf2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_tu(vd, rs1, rs2, vl); } -vint8m1_t test_vloxei64_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_tu(maskedoff, base, bindex, vl); +vint8m1_t test_vloxei64_v_i8m1_tu(vint8m1_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_tu(vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei64_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_tu(maskedoff, base, bindex, vl); +vint16mf4_t test_vloxei64_v_i16mf4_tu(vint16mf4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_tu(vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei64_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_tu(maskedoff, base, bindex, vl); +vint16mf2_t test_vloxei64_v_i16mf2_tu(vint16mf2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_tu(vd, rs1, rs2, vl); } -vint16m1_t test_vloxei64_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_tu(maskedoff, base, bindex, vl); +vint16m1_t test_vloxei64_v_i16m1_tu(vint16m1_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_tu(vd, rs1, rs2, vl); } -vint16m2_t test_vloxei64_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_tu(maskedoff, base, bindex, vl); +vint16m2_t test_vloxei64_v_i16m2_tu(vint16m2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_tu(vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei64_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_tu(maskedoff, base, bindex, vl); +vint32mf2_t test_vloxei64_v_i32mf2_tu(vint32mf2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_tu(vd, rs1, rs2, vl); } -vint32m1_t test_vloxei64_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_tu(maskedoff, base, bindex, vl); +vint32m1_t test_vloxei64_v_i32m1_tu(vint32m1_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_tu(vd, rs1, rs2, vl); } -vint32m2_t test_vloxei64_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_tu(maskedoff, base, bindex, vl); +vint32m2_t test_vloxei64_v_i32m2_tu(vint32m2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_tu(vd, rs1, rs2, vl); } -vint32m4_t test_vloxei64_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_tu(maskedoff, base, bindex, vl); +vint32m4_t test_vloxei64_v_i32m4_tu(vint32m4_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_tu(vd, rs1, rs2, vl); } -vint64m1_t test_vloxei64_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_tu(maskedoff, base, bindex, vl); +vint64m1_t test_vloxei64_v_i64m1_tu(vint64m1_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_tu(vd, rs1, rs2, vl); } -vint64m2_t test_vloxei64_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_tu(maskedoff, base, bindex, vl); +vint64m2_t test_vloxei64_v_i64m2_tu(vint64m2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_tu(vd, rs1, rs2, vl); } -vint64m4_t test_vloxei64_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_tu(maskedoff, base, bindex, vl); +vint64m4_t test_vloxei64_v_i64m4_tu(vint64m4_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_tu(vd, rs1, rs2, vl); } -vint64m8_t test_vloxei64_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_tu(maskedoff, base, bindex, vl); +vint64m8_t test_vloxei64_v_i64m8_tu(vint64m8_t vd, const int64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_tu(vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei64_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_tu(maskedoff, base, bindex, vl); +vuint8mf8_t test_vloxei64_v_u8mf8_tu(vuint8mf8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_tu(vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei64_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_tu(maskedoff, base, bindex, vl); +vuint8mf4_t test_vloxei64_v_u8mf4_tu(vuint8mf4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_tu(vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei64_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_tu(maskedoff, base, bindex, vl); +vuint8mf2_t test_vloxei64_v_u8mf2_tu(vuint8mf2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_tu(vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei64_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_tu(maskedoff, base, bindex, vl); +vuint8m1_t test_vloxei64_v_u8m1_tu(vuint8m1_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_tu(vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei64_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_tu(maskedoff, base, bindex, vl); +vuint16mf4_t test_vloxei64_v_u16mf4_tu(vuint16mf4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_tu(vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei64_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_tu(maskedoff, base, bindex, vl); +vuint16mf2_t test_vloxei64_v_u16mf2_tu(vuint16mf2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_tu(vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei64_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_tu(maskedoff, base, bindex, vl); +vuint16m1_t test_vloxei64_v_u16m1_tu(vuint16m1_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_tu(vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei64_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_tu(maskedoff, base, bindex, vl); +vuint16m2_t test_vloxei64_v_u16m2_tu(vuint16m2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_tu(vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei64_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_tu(maskedoff, base, bindex, vl); +vuint32mf2_t test_vloxei64_v_u32mf2_tu(vuint32mf2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_tu(vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei64_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_tu(maskedoff, base, bindex, vl); +vuint32m1_t test_vloxei64_v_u32m1_tu(vuint32m1_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_tu(vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei64_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_tu(maskedoff, base, bindex, vl); +vuint32m2_t test_vloxei64_v_u32m2_tu(vuint32m2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_tu(vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei64_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_tu(maskedoff, base, bindex, vl); +vuint32m4_t test_vloxei64_v_u32m4_tu(vuint32m4_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_tu(vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei64_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_tu(maskedoff, base, bindex, vl); +vuint64m1_t test_vloxei64_v_u64m1_tu(vuint64m1_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_tu(vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei64_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_tu(maskedoff, base, bindex, vl); +vuint64m2_t test_vloxei64_v_u64m2_tu(vuint64m2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_tu(vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei64_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_tu(maskedoff, base, bindex, vl); +vuint64m4_t test_vloxei64_v_u64m4_tu(vuint64m4_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_tu(vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei64_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_tu(maskedoff, base, bindex, vl); +vuint64m8_t test_vloxei64_v_u64m8_tu(vuint64m8_t vd, const uint64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_tu(vd, rs1, rs2, vl); } -vfloat16mf4_t test_vloxei64_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vloxei64_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei64_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vloxei64_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei64_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vloxei64_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei64_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vloxei64_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei64_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vloxei64_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei64_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vloxei64_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei64_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vloxei64_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei64_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vloxei64_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei64_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vloxei64_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei64_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vloxei64_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei64_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vloxei64_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei64_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vloxei64_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei64_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vloxei64_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei64_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vloxei64_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei64_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vloxei64_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vloxei64_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vloxei64_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei64_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vloxei64_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei64_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vloxei64_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vloxei64_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vloxei64_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vloxei64_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vloxei64_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei64_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vloxei64_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vloxei64_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vloxei64_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vloxei64_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vloxei64_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vloxei64_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vloxei64_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vloxei64_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vloxei64_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vloxei64_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vloxei64_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vloxei64_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vloxei64_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vloxei64_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vloxei64_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei64_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vloxei64_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei64_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vloxei64_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei64_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vloxei64_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei64_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vloxei64_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei64_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vloxei64_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei64_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vloxei64_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei64_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vloxei64_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei64_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vloxei64_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei64_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vloxei64_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei64_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vloxei64_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei64_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vloxei64_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei64_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vloxei64_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei64_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vloxei64_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei64_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vloxei64_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei64_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vloxei64_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei64_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_tum(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vloxei64_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vloxei64_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vloxei64_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei64_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vloxei64_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei64_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vloxei64_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei64_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vloxei64_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei64_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vloxei64_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei64_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vloxei64_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei64_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vloxei64_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei64_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vloxei64_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei64_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vloxei64_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei64_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vloxei64_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei64_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vloxei64_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei64_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vloxei64_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei64_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vloxei64_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei64_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vloxei64_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei64_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vloxei64_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vloxei64_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vloxei64_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei64_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vloxei64_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei64_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vloxei64_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vloxei64_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vloxei64_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vloxei64_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vloxei64_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei64_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vloxei64_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vloxei64_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vloxei64_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vloxei64_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vloxei64_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vloxei64_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vloxei64_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vloxei64_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vloxei64_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vloxei64_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vloxei64_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vloxei64_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vloxei64_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vloxei64_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vloxei64_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei64_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vloxei64_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei64_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vloxei64_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei64_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vloxei64_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei64_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vloxei64_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei64_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vloxei64_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei64_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vloxei64_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei64_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vloxei64_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei64_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vloxei64_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei64_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vloxei64_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei64_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vloxei64_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei64_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vloxei64_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei64_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vloxei64_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei64_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vloxei64_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei64_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vloxei64_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei64_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vloxei64_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei64_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_tumu(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vloxei64_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vloxei64_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vloxei64_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei64_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vloxei64_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei64_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vloxei64_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei64_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vloxei64_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei64_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vloxei64_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei64_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vloxei64_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei64_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vloxei64_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei64_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vloxei64_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei64_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vloxei64_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei64_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vloxei64_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei64_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vloxei64_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei64_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vloxei64_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei64_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vloxei64_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei64_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vloxei64_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei64_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vloxei64_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vloxei64_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vloxei64_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei64_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vloxei64_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei64_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vloxei64_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vloxei64_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vloxei64_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vloxei64_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vloxei64_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei64_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vloxei64_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vloxei64_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vloxei64_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vloxei64_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vloxei64_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vloxei64_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vloxei64_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vloxei64_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vloxei64_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vloxei64_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vloxei64_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vloxei64_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vloxei64_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vloxei64_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vloxei64_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei64_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vloxei64_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei64_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vloxei64_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei64_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vloxei64_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei64_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vloxei64_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei64_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vloxei64_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei64_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vloxei64_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei64_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vloxei64_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei64_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vloxei64_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei64_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vloxei64_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei64_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vloxei64_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei64_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vloxei64_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei64_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vloxei64_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei64_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vloxei64_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei64_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vloxei64_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei64_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vloxei64_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei64_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxei64_mu(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vloxei64_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxei64\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxei8.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxei8.c index 2e76df20f..5754b7d52 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxei8.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxei8.c @@ -6,948 +6,948 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vloxei8_v_f16mf4_tu(vfloat16mf4_t maskedoff, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vfloat16mf4_t test_vloxei8_v_f16mf4_tu(vfloat16mf4_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei8_v_f16mf2_tu(vfloat16mf2_t maskedoff, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vfloat16mf2_t test_vloxei8_v_f16mf2_tu(vfloat16mf2_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei8_v_f16m1_tu(vfloat16m1_t maskedoff, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vfloat16m1_t test_vloxei8_v_f16m1_tu(vfloat16m1_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei8_v_f16m2_tu(vfloat16m2_t maskedoff, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vfloat16m2_t test_vloxei8_v_f16m2_tu(vfloat16m2_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vfloat16m4_t test_vloxei8_v_f16m4_tu(vfloat16m4_t maskedoff, const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vfloat16m4_t test_vloxei8_v_f16m4_tu(vfloat16m4_t vd, const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vfloat16m8_t test_vloxei8_v_f16m8_tu(vfloat16m8_t maskedoff, const float16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vfloat16m8_t test_vloxei8_v_f16m8_tu(vfloat16m8_t vd, const float16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei8_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vfloat32mf2_t test_vloxei8_v_f32mf2_tu(vfloat32mf2_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei8_v_f32m1_tu(vfloat32m1_t maskedoff, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vfloat32m1_t test_vloxei8_v_f32m1_tu(vfloat32m1_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei8_v_f32m2_tu(vfloat32m2_t maskedoff, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vfloat32m2_t test_vloxei8_v_f32m2_tu(vfloat32m2_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei8_v_f32m4_tu(vfloat32m4_t maskedoff, const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vfloat32m4_t test_vloxei8_v_f32m4_tu(vfloat32m4_t vd, const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vfloat32m8_t test_vloxei8_v_f32m8_tu(vfloat32m8_t maskedoff, const float32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vfloat32m8_t test_vloxei8_v_f32m8_tu(vfloat32m8_t vd, const float32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei8_v_f64m1_tu(vfloat64m1_t maskedoff, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vfloat64m1_t test_vloxei8_v_f64m1_tu(vfloat64m1_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei8_v_f64m2_tu(vfloat64m2_t maskedoff, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vfloat64m2_t test_vloxei8_v_f64m2_tu(vfloat64m2_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei8_v_f64m4_tu(vfloat64m4_t maskedoff, const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vfloat64m4_t test_vloxei8_v_f64m4_tu(vfloat64m4_t vd, const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei8_v_f64m8_tu(vfloat64m8_t maskedoff, const float64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vfloat64m8_t test_vloxei8_v_f64m8_tu(vfloat64m8_t vd, const float64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei8_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vint8mf8_t test_vloxei8_v_i8mf8_tu(vint8mf8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei8_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vint8mf4_t test_vloxei8_v_i8mf4_tu(vint8mf4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei8_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vint8mf2_t test_vloxei8_v_i8mf2_tu(vint8mf2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vint8m1_t test_vloxei8_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vint8m1_t test_vloxei8_v_i8m1_tu(vint8m1_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vint8m2_t test_vloxei8_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vint8m2_t test_vloxei8_v_i8m2_tu(vint8m2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vint8m4_t test_vloxei8_v_i8m4_tu(vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vint8m4_t test_vloxei8_v_i8m4_tu(vint8m4_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vint8m8_t test_vloxei8_v_i8m8_tu(vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vint8m8_t test_vloxei8_v_i8m8_tu(vint8m8_t vd, const int8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei8_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vint16mf4_t test_vloxei8_v_i16mf4_tu(vint16mf4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei8_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vint16mf2_t test_vloxei8_v_i16mf2_tu(vint16mf2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vint16m1_t test_vloxei8_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vint16m1_t test_vloxei8_v_i16m1_tu(vint16m1_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vint16m2_t test_vloxei8_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vint16m2_t test_vloxei8_v_i16m2_tu(vint16m2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vint16m4_t test_vloxei8_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vint16m4_t test_vloxei8_v_i16m4_tu(vint16m4_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vint16m8_t test_vloxei8_v_i16m8_tu(vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vint16m8_t test_vloxei8_v_i16m8_tu(vint16m8_t vd, const int16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei8_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vint32mf2_t test_vloxei8_v_i32mf2_tu(vint32mf2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vint32m1_t test_vloxei8_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vint32m1_t test_vloxei8_v_i32m1_tu(vint32m1_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vint32m2_t test_vloxei8_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vint32m2_t test_vloxei8_v_i32m2_tu(vint32m2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vint32m4_t test_vloxei8_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vint32m4_t test_vloxei8_v_i32m4_tu(vint32m4_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vint32m8_t test_vloxei8_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vint32m8_t test_vloxei8_v_i32m8_tu(vint32m8_t vd, const int32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vint64m1_t test_vloxei8_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vint64m1_t test_vloxei8_v_i64m1_tu(vint64m1_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vint64m2_t test_vloxei8_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vint64m2_t test_vloxei8_v_i64m2_tu(vint64m2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vint64m4_t test_vloxei8_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vint64m4_t test_vloxei8_v_i64m4_tu(vint64m4_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vint64m8_t test_vloxei8_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vint64m8_t test_vloxei8_v_i64m8_tu(vint64m8_t vd, const int64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei8_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vuint8mf8_t test_vloxei8_v_u8mf8_tu(vuint8mf8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei8_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vuint8mf4_t test_vloxei8_v_u8mf4_tu(vuint8mf4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei8_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vuint8mf2_t test_vloxei8_v_u8mf2_tu(vuint8mf2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei8_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vuint8m1_t test_vloxei8_v_u8m1_tu(vuint8m1_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vuint8m2_t test_vloxei8_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vuint8m2_t test_vloxei8_v_u8m2_tu(vuint8m2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vuint8m4_t test_vloxei8_v_u8m4_tu(vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vuint8m4_t test_vloxei8_v_u8m4_tu(vuint8m4_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vuint8m8_t test_vloxei8_v_u8m8_tu(vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vuint8m8_t test_vloxei8_v_u8m8_tu(vuint8m8_t vd, const uint8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei8_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vuint16mf4_t test_vloxei8_v_u16mf4_tu(vuint16mf4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei8_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vuint16mf2_t test_vloxei8_v_u16mf2_tu(vuint16mf2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei8_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vuint16m1_t test_vloxei8_v_u16m1_tu(vuint16m1_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei8_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vuint16m2_t test_vloxei8_v_u16m2_tu(vuint16m2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vuint16m4_t test_vloxei8_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vuint16m4_t test_vloxei8_v_u16m4_tu(vuint16m4_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vuint16m8_t test_vloxei8_v_u16m8_tu(vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vuint16m8_t test_vloxei8_v_u16m8_tu(vuint16m8_t vd, const uint16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei8_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vuint32mf2_t test_vloxei8_v_u32mf2_tu(vuint32mf2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei8_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vuint32m1_t test_vloxei8_v_u32m1_tu(vuint32m1_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei8_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vuint32m2_t test_vloxei8_v_u32m2_tu(vuint32m2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei8_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vuint32m4_t test_vloxei8_v_u32m4_tu(vuint32m4_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vuint32m8_t test_vloxei8_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vuint32m8_t test_vloxei8_v_u32m8_tu(vuint32m8_t vd, const uint32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei8_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vuint64m1_t test_vloxei8_v_u64m1_tu(vuint64m1_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei8_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vuint64m2_t test_vloxei8_v_u64m2_tu(vuint64m2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei8_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vuint64m4_t test_vloxei8_v_u64m4_tu(vuint64m4_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei8_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_tu(maskedoff, base, bindex, vl); +vuint64m8_t test_vloxei8_v_u64m8_tu(vuint64m8_t vd, const uint64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_tu(vd, rs1, rs2, vl); } -vfloat16mf4_t test_vloxei8_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vloxei8_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei8_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vloxei8_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei8_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vloxei8_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei8_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vloxei8_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vloxei8_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vloxei8_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m8_t test_vloxei8_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, const float16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vfloat16m8_t test_vloxei8_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, const float16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei8_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vloxei8_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei8_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vloxei8_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei8_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vloxei8_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei8_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vloxei8_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vloxei8_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vloxei8_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei8_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vloxei8_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei8_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vloxei8_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei8_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vloxei8_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei8_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vloxei8_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei8_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vloxei8_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei8_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vloxei8_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei8_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vloxei8_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vloxei8_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vloxei8_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vloxei8_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vloxei8_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vint8m4_t test_vloxei8_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vint8m4_t test_vloxei8_v_i8m4_tum(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vint8m8_t test_vloxei8_v_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vint8m8_t test_vloxei8_v_i8m8_tum(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei8_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vloxei8_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei8_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vloxei8_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vloxei8_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vloxei8_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vloxei8_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vloxei8_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vloxei8_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vloxei8_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vint16m8_t test_vloxei8_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vint16m8_t test_vloxei8_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei8_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vloxei8_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vloxei8_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vloxei8_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vloxei8_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vloxei8_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vloxei8_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vloxei8_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vloxei8_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vloxei8_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vloxei8_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vloxei8_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vloxei8_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vloxei8_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vloxei8_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vloxei8_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vloxei8_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vloxei8_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei8_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vloxei8_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei8_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vloxei8_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei8_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vloxei8_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei8_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vloxei8_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vloxei8_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vloxei8_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vuint8m4_t test_vloxei8_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vuint8m4_t test_vloxei8_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vuint8m8_t test_vloxei8_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vuint8m8_t test_vloxei8_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei8_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vloxei8_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei8_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vloxei8_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei8_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vloxei8_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei8_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vloxei8_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vloxei8_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vloxei8_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vuint16m8_t test_vloxei8_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vuint16m8_t test_vloxei8_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei8_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vloxei8_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei8_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vloxei8_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei8_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vloxei8_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei8_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vloxei8_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vloxei8_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vloxei8_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei8_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vloxei8_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei8_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vloxei8_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei8_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vloxei8_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei8_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_tum(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vloxei8_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vloxei8_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vloxei8_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei8_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vloxei8_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei8_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vloxei8_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei8_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vloxei8_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vloxei8_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vloxei8_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m8_t test_vloxei8_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, const float16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m8_t test_vloxei8_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, const float16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei8_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vloxei8_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei8_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vloxei8_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei8_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vloxei8_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei8_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vloxei8_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vloxei8_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vloxei8_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei8_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vloxei8_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei8_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vloxei8_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei8_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vloxei8_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei8_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vloxei8_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei8_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vloxei8_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei8_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vloxei8_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei8_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vloxei8_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vloxei8_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vloxei8_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vloxei8_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vloxei8_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vint8m4_t test_vloxei8_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vint8m4_t test_vloxei8_v_i8m4_tumu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vint8m8_t test_vloxei8_v_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vint8m8_t test_vloxei8_v_i8m8_tumu(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei8_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vloxei8_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei8_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vloxei8_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vloxei8_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vloxei8_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vloxei8_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vloxei8_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vloxei8_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vloxei8_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vint16m8_t test_vloxei8_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vint16m8_t test_vloxei8_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei8_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vloxei8_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vloxei8_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vloxei8_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vloxei8_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vloxei8_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vloxei8_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vloxei8_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vloxei8_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vloxei8_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vloxei8_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vloxei8_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vloxei8_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vloxei8_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vloxei8_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vloxei8_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vloxei8_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vloxei8_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vloxei8_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei8_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vloxei8_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei8_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vloxei8_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vloxei8_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vloxei8_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vloxei8_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m4_t test_vloxei8_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vuint8m4_t test_vloxei8_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m8_t test_vloxei8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vuint8m8_t test_vloxei8_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei8_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vloxei8_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei8_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vloxei8_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei8_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vloxei8_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei8_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vloxei8_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vloxei8_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vloxei8_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m8_t test_vloxei8_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vuint16m8_t test_vloxei8_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei8_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vloxei8_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei8_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vloxei8_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei8_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vloxei8_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei8_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vloxei8_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vloxei8_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vloxei8_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei8_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vloxei8_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei8_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vloxei8_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei8_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vloxei8_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei8_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_tumu(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vloxei8_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vloxei8_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vloxei8_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei8_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vloxei8_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei8_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vloxei8_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei8_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vloxei8_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vloxei8_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vloxei8_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m8_t test_vloxei8_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, const float16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vfloat16m8_t test_vloxei8_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, const float16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei8_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vloxei8_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei8_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vloxei8_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei8_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vloxei8_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei8_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vloxei8_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vloxei8_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vloxei8_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei8_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vloxei8_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei8_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vloxei8_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei8_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vloxei8_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei8_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vloxei8_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei8_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vloxei8_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei8_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vloxei8_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei8_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vloxei8_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vloxei8_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vloxei8_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vloxei8_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vloxei8_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vint8m4_t test_vloxei8_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vint8m4_t test_vloxei8_v_i8m4_mu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vint8m8_t test_vloxei8_v_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vint8m8_t test_vloxei8_v_i8m8_mu(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei8_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vloxei8_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei8_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vloxei8_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vloxei8_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vloxei8_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vloxei8_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vloxei8_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vloxei8_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vloxei8_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vint16m8_t test_vloxei8_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vint16m8_t test_vloxei8_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei8_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vloxei8_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vloxei8_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vloxei8_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vloxei8_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vloxei8_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vloxei8_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vloxei8_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vloxei8_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vloxei8_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vloxei8_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vloxei8_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vloxei8_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vloxei8_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vloxei8_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vloxei8_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vloxei8_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vloxei8_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei8_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vloxei8_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei8_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vloxei8_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei8_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vloxei8_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vloxei8_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vloxei8_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vloxei8_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vuint8m4_t test_vloxei8_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vuint8m4_t test_vloxei8_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vuint8m8_t test_vloxei8_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vuint8m8_t test_vloxei8_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei8_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vloxei8_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei8_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vloxei8_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei8_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vloxei8_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei8_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vloxei8_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vloxei8_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vloxei8_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vuint16m8_t test_vloxei8_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vuint16m8_t test_vloxei8_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei8_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vloxei8_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei8_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vloxei8_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei8_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vloxei8_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei8_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vloxei8_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vloxei8_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vloxei8_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei8_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vloxei8_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei8_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vloxei8_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei8_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vloxei8_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei8_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxei8_mu(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vloxei8_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxei8\.[ivxfswum.]+\s+} 236 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg2ei16.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg2ei16.c index ae8803029..7810ee027 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg2ei16.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg2ei16.c @@ -6,772 +6,772 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_tu(vfloat16mf4x2_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_tu(vfloat16mf2x2_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_tu(vfloat16m1x2_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_tu(vfloat16m2x2_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_tu(vfloat16m4x2_t vd, const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_tu(vfloat32mf2x2_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_tu(vfloat32m1x2_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_tu(vfloat32m2x2_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_tu(vfloat32m4x2_t vd, const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_tu(vfloat64m1x2_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_tu(vfloat64m2x2_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_tu(vfloat64m4x2_t vd, const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_tu(vint8mf8x2_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_tu(vint8mf4x2_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_tu(vint8mf2x2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_tu(vint8m1x2_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_tu(vint8m2x2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_tu(vint8m4x2_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_tu(vint16mf4x2_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_tu(vint16mf2x2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_tu(vint16m1x2_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_tu(vint16m2x2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_tu(vint16m4x2_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_tu(vint32mf2x2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_tu(vint32m1x2_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_tu(vint32m2x2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_tu(vint32m4x2_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_tu(vint64m1x2_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_tu(vint64m2x2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_tu(vint64m4x2_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_tu(vuint8mf8x2_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_tu(vuint8mf4x2_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_tu(vuint8mf2x2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_tu(vuint8m1x2_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_tu(vuint8m2x2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_tu(vuint8m4x2_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_tu(vuint16mf4x2_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_tu(vuint16mf2x2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_tu(vuint16m1x2_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_tu(vuint16m2x2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_tu(vuint16m4x2_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_tu(vuint32mf2x2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_tu(vuint32m1x2_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_tu(vuint32m2x2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_tu(vuint32m4x2_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_tu(vuint64m1x2_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_tu(vuint64m2x2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_tu(vuint64m4x2_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_tum(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_tum(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_tum(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_tum(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_tum(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_tum(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_tum(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_tum(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_tum(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_tum(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_tum(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_tum(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_tum(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_tum(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_tum(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_tum(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_tum(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_tum(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_tum(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_tum(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_tum(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_tum(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_tum(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_tum(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_tum(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_tum(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_tum(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_tum(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_tum(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_tum(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_tum(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_tum(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_tum(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_tum(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_tum(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_tum(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_tum(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_tum(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_tum(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_tum(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_tum(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_tum(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_tum(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_tum(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_tum(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_tum(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_tum(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_tum(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_tumu(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_tumu(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_tumu(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_tumu(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_tumu(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_tumu(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_tumu(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_tumu(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_tumu(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_tumu(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_tumu(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_tumu(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_tumu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_tumu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_tumu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_tumu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_tumu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_tumu(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_tumu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_tumu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_tumu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_tumu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_tumu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_tumu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_tumu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_tumu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_tumu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_tumu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_tumu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_tumu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_tumu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_tumu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_tumu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_tumu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_tumu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_tumu(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_tumu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_tumu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_tumu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_tumu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_tumu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_tumu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_tumu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_tumu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_tumu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_tumu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_tumu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_tumu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_mu(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_mu(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_mu(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_mu(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_mu(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_mu(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_mu(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_mu(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_mu(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_mu(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_mu(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_mu(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_mu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_mu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_mu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_mu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_mu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_mu(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_mu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_mu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_mu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_mu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_mu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_mu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_mu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_mu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_mu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_mu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_mu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_mu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_mu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_mu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_mu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_mu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_mu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_mu(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_mu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_mu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_mu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_mu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_mu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_mu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_mu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_mu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_mu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_mu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_mu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_mu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg2ei16\.[ivxfswum.]+\s+} 192 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg2ei32.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg2ei32.c index 3edb7e6c6..32308bdd0 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg2ei32.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg2ei32.c @@ -6,740 +6,740 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_tu(vfloat16mf4x2_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_tu(vfloat16mf2x2_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_tu(vfloat16m1x2_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_tu(vfloat16m2x2_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_tu(vfloat16m4x2_t vd, const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_tu(vfloat32mf2x2_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_tu(vfloat32m1x2_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_tu(vfloat32m2x2_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_tu(vfloat32m4x2_t vd, const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_tu(vfloat64m1x2_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_tu(vfloat64m2x2_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_tu(vfloat64m4x2_t vd, const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_tu(vint8mf8x2_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_tu(vint8mf4x2_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_tu(vint8mf2x2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_tu(vint8m1x2_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_tu(vint8m2x2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_tu(vint16mf4x2_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_tu(vint16mf2x2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_tu(vint16m1x2_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_tu(vint16m2x2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_tu(vint16m4x2_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_tu(vint32mf2x2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_tu(vint32m1x2_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_tu(vint32m2x2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_tu(vint32m4x2_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_tu(vint64m1x2_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_tu(vint64m2x2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_tu(vint64m4x2_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_tu(vuint8mf8x2_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_tu(vuint8mf4x2_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_tu(vuint8mf2x2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_tu(vuint8m1x2_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_tu(vuint8m2x2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_tu(vuint16mf4x2_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_tu(vuint16mf2x2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_tu(vuint16m1x2_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_tu(vuint16m2x2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_tu(vuint16m4x2_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_tu(vuint32mf2x2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_tu(vuint32m1x2_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_tu(vuint32m2x2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_tu(vuint32m4x2_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_tu(vuint64m1x2_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_tu(vuint64m2x2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_tu(vuint64m4x2_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_tum(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_tum(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_tum(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_tum(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_tum(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_tum(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_tum(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_tum(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_tum(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_tum(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_tum(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_tum(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_tum(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_tum(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_tum(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_tum(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_tum(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_tum(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_tum(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_tum(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_tum(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_tum(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_tum(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_tum(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_tum(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_tum(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_tum(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_tum(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_tum(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_tum(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_tum(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_tum(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_tum(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_tum(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_tum(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_tum(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_tum(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_tum(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_tum(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_tum(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_tum(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_tum(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_tum(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_tum(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_tum(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_tum(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_tumu(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_tumu(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_tumu(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_tumu(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_tumu(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_tumu(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_tumu(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_tumu(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_tumu(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_tumu(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_tumu(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_tumu(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_tumu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_tumu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_tumu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_tumu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_tumu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_tumu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_tumu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_tumu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_tumu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_tumu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_tumu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_tumu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_tumu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_tumu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_tumu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_tumu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_tumu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_tumu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_tumu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_tumu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_tumu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_tumu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_tumu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_tumu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_tumu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_tumu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_tumu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_tumu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_tumu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_tumu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_tumu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_tumu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_tumu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_tumu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_mu(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_mu(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_mu(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_mu(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_mu(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_mu(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_mu(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_mu(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_mu(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_mu(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_mu(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_mu(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_mu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_mu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_mu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_mu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_mu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_mu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_mu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_mu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_mu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_mu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_mu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_mu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_mu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_mu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_mu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_mu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_mu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_mu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_mu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_mu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_mu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_mu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_mu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_mu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_mu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_mu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_mu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_mu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_mu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_mu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_mu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_mu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_mu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_mu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg2ei32\.[ivxfswum.]+\s+} 184 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg2ei64.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg2ei64.c index 4156a83e0..13edd86d3 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg2ei64.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg2ei64.c @@ -6,660 +6,660 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_tu(vfloat16mf4x2_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tu(vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_tu(vfloat16mf2x2_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tu(vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_tu(vfloat16m1x2_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tu(vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_tu(vfloat16m2x2_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tu(vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_tu(vfloat32mf2x2_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tu(vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_tu(vfloat32m1x2_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tu(vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_tu(vfloat32m2x2_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tu(vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_tu(vfloat32m4x2_t vd, const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tu(vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_tu(vfloat64m1x2_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tu(vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_tu(vfloat64m2x2_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tu(vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_tu(vfloat64m4x2_t vd, const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tu(vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_tu(vint8mf8x2_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tu(vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_tu(vint8mf4x2_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tu(vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_tu(vint8mf2x2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tu(vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_tu(vint8m1x2_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tu(vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_tu(vint16mf4x2_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tu(vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_tu(vint16mf2x2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tu(vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_tu(vint16m1x2_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tu(vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_tu(vint16m2x2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tu(vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_tu(vint32mf2x2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tu(vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_tu(vint32m1x2_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tu(vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_tu(vint32m2x2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tu(vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_tu(vint32m4x2_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tu(vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_tu(vint64m1x2_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tu(vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_tu(vint64m2x2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tu(vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_tu(vint64m4x2_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tu(vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_tu(vuint8mf8x2_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tu(vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_tu(vuint8mf4x2_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tu(vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_tu(vuint8mf2x2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tu(vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_tu(vuint8m1x2_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tu(vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_tu(vuint16mf4x2_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tu(vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_tu(vuint16mf2x2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tu(vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_tu(vuint16m1x2_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tu(vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_tu(vuint16m2x2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tu(vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_tu(vuint32mf2x2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tu(vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_tu(vuint32m1x2_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tu(vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_tu(vuint32m2x2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tu(vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_tu(vuint32m4x2_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tu(vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_tu(vuint64m1x2_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tu(vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_tu(vuint64m2x2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tu(vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_tu(vuint64m4x2_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tu(vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_tum(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_tum(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_tum(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_tum(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_tum(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_tum(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_tum(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_tum(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_tum(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_tum(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_tum(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_tum(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_tum(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_tum(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_tum(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_tum(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_tum(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_tum(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_tum(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_tum(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_tum(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_tum(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_tum(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_tum(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_tum(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_tum(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_tum(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_tum(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_tum(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_tum(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_tum(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_tum(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_tum(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_tum(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_tum(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_tum(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_tum(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_tum(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_tum(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_tum(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_tum(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_tumu(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_tumu(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_tumu(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_tumu(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_tumu(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_tumu(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_tumu(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_tumu(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_tumu(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_tumu(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_tumu(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_tumu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_tumu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_tumu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_tumu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_tumu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_tumu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_tumu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_tumu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_tumu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_tumu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_tumu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_tumu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_tumu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_tumu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_tumu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_tumu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_tumu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_tumu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_tumu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_tumu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_tumu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_tumu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_tumu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_tumu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_tumu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_tumu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_tumu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_tumu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_tumu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_tumu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_mu(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_mu(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_mu(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_mu(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_mu(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_mu(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_mu(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_mu(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_mu(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_mu(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_mu(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_mu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_mu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_mu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_mu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_mu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_mu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_mu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_mu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_mu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_mu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_mu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_mu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_mu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_mu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_mu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_mu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_mu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_mu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_mu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_mu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_mu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_mu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_mu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_mu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_mu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_mu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_mu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_mu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_mu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_mu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei64_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg2ei64\.[ivxfswum.]+\s+} 164 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg2ei8.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg2ei8.c index 56bda355d..21bd58d8d 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg2ei8.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg2ei8.c @@ -6,772 +6,772 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_tu(vfloat16mf4x2_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_tu(vfloat16mf2x2_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_tu(vfloat16m1x2_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_tu(vfloat16m2x2_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_tu(vfloat16m4x2_t vd, const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_tu(vfloat32mf2x2_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_tu(vfloat32m1x2_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_tu(vfloat32m2x2_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_tu(vfloat32m4x2_t vd, const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_tu(vfloat64m1x2_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_tu(vfloat64m2x2_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_tu(vfloat64m4x2_t vd, const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_tu(vint8mf8x2_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_tu(vint8mf4x2_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_tu(vint8mf2x2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_tu(vint8m1x2_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_tu(vint8m2x2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_tu(vint8m4x2_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_tu(vint16mf4x2_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_tu(vint16mf2x2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_tu(vint16m1x2_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_tu(vint16m2x2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_tu(vint16m4x2_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_tu(vint32mf2x2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_tu(vint32m1x2_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_tu(vint32m2x2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_tu(vint32m4x2_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_tu(vint64m1x2_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_tu(vint64m2x2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_tu(vint64m4x2_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_tu(vuint8mf8x2_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_tu(vuint8mf4x2_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_tu(vuint8mf2x2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_tu(vuint8m1x2_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_tu(vuint8m2x2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_tu(vuint8m4x2_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_tu(vuint16mf4x2_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_tu(vuint16mf2x2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_tu(vuint16m1x2_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_tu(vuint16m2x2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_tu(vuint16m4x2_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_tu(vuint32mf2x2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_tu(vuint32m1x2_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_tu(vuint32m2x2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_tu(vuint32m4x2_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_tu(vuint64m1x2_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_tu(vuint64m2x2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_tu(vuint64m4x2_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_tum(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_tum(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_tum(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_tum(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_tum(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_tum(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_tum(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_tum(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_tum(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_tum(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_tum(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_tum(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_tum(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_tum(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_tum(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_tum(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_tum(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_tum(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_tum(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_tum(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_tum(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_tum(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_tum(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_tum(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_tum(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_tum(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_tum(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_tum(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_tum(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_tum(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_tum(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_tum(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_tum(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_tum(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_tum(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_tum(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_tum(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_tum(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_tum(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_tum(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_tum(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_tum(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_tum(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_tum(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_tum(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_tum(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_tum(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_tum(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_tumu(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_tumu(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_tumu(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_tumu(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_tumu(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_tumu(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_tumu(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_tumu(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_tumu(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_tumu(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_tumu(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_tumu(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_tumu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_tumu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_tumu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_tumu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_tumu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_tumu(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_tumu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_tumu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_tumu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_tumu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_tumu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_tumu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_tumu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_tumu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_tumu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_tumu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_tumu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_tumu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_tumu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_tumu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_tumu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_tumu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_tumu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_tumu(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_tumu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_tumu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_tumu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_tumu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_tumu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_tumu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_tumu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_tumu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_tumu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_tumu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_tumu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_tumu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_mu(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_mu(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_mu(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_mu(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_mu(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_mu(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_mu(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_mu(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_mu(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_mu(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_mu(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_mu(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_mu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_mu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_mu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_mu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_mu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_mu(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_mu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_mu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_mu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_mu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_mu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_mu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_mu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_mu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_mu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_mu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_mu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_mu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_mu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_mu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_mu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_mu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_mu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_mu(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_mu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_mu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_mu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_mu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_mu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_mu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_mu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_mu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_mu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_mu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_mu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_mu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg2ei8\.[ivxfswum.]+\s+} 192 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg3ei16.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg3ei16.c index e642573a1..61de39f68 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg3ei16.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg3ei16.c @@ -6,596 +6,596 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_tu(vfloat16mf4x3_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tu(vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_tu(vfloat16mf2x3_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tu(vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_tu(vfloat16m1x3_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tu(vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_tu(vfloat16m2x3_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tu(vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_tu(vfloat32mf2x3_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tu(vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_tu(vfloat32m1x3_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tu(vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_tu(vfloat32m2x3_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tu(vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_tu(vfloat64m1x3_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tu(vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_tu(vfloat64m2x3_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tu(vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_tu(vint8mf8x3_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tu(vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_tu(vint8mf4x3_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tu(vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_tu(vint8mf2x3_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tu(vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_tu(vint8m1x3_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tu(vd, rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_tu(vint8m2x3_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tu(vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_tu(vint16mf4x3_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tu(vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_tu(vint16mf2x3_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tu(vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_tu(vint16m1x3_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tu(vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_tu(vint16m2x3_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tu(vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_tu(vint32mf2x3_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tu(vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_tu(vint32m1x3_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tu(vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_tu(vint32m2x3_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tu(vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_tu(vint64m1x3_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tu(vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_tu(vint64m2x3_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tu(vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_tu(vuint8mf8x3_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tu(vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_tu(vuint8mf4x3_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tu(vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_tu(vuint8mf2x3_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tu(vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_tu(vuint8m1x3_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tu(vd, rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_tu(vuint8m2x3_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tu(vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_tu(vuint16mf4x3_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tu(vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_tu(vuint16mf2x3_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tu(vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_tu(vuint16m1x3_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tu(vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_tu(vuint16m2x3_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tu(vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_tu(vuint32mf2x3_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tu(vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_tu(vuint32m1x3_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tu(vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_tu(vuint32m2x3_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tu(vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_tu(vuint64m1x3_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tu(vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_tu(vuint64m2x3_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tu(vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_tum(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_tum(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_tum(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_tum(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_tum(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_tum(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_tum(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_tum(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_tum(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_tum(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_tum(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_tum(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_tum(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_tum(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_tum(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_tum(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_tum(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_tum(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_tum(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_tum(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_tum(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_tum(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_tum(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_tum(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_tum(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_tum(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_tum(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_tum(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_tum(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_tum(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_tum(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_tum(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_tum(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_tum(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_tum(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_tum(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_tum(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_tumu(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_tumu(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_tumu(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_tumu(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_tumu(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_tumu(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_tumu(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_tumu(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_tumu(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_tumu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_tumu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_tumu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_tumu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_tumu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_tumu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_tumu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_tumu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_tumu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_tumu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_tumu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_tumu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_tumu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_tumu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_tumu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_tumu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_tumu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_tumu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_tumu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_tumu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_tumu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_tumu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_tumu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_tumu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_tumu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_tumu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_tumu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_tumu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_mu(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_mu(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_mu(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_mu(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_mu(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_mu(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_mu(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_mu(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_mu(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_mu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_mu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_mu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_mu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_mu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_mu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_mu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_mu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_mu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_mu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_mu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_mu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_mu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_mu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_mu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_mu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_mu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_mu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_mu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_mu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_mu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_mu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_mu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_mu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_mu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_mu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_mu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_mu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg3ei16\.[ivxfswum.]+\s+} 148 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg3ei32.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg3ei32.c index e11783416..8382d3e54 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg3ei32.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg3ei32.c @@ -6,596 +6,596 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_tu(vfloat16mf4x3_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tu(vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_tu(vfloat16mf2x3_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tu(vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_tu(vfloat16m1x3_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tu(vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_tu(vfloat16m2x3_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tu(vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_tu(vfloat32mf2x3_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tu(vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_tu(vfloat32m1x3_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tu(vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_tu(vfloat32m2x3_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tu(vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_tu(vfloat64m1x3_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tu(vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_tu(vfloat64m2x3_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tu(vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_tu(vint8mf8x3_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tu(vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_tu(vint8mf4x3_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tu(vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_tu(vint8mf2x3_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tu(vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_tu(vint8m1x3_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tu(vd, rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_tu(vint8m2x3_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tu(vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_tu(vint16mf4x3_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tu(vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_tu(vint16mf2x3_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tu(vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_tu(vint16m1x3_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tu(vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_tu(vint16m2x3_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tu(vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_tu(vint32mf2x3_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tu(vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_tu(vint32m1x3_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tu(vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_tu(vint32m2x3_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tu(vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_tu(vint64m1x3_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tu(vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_tu(vint64m2x3_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tu(vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_tu(vuint8mf8x3_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tu(vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_tu(vuint8mf4x3_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tu(vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_tu(vuint8mf2x3_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tu(vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_tu(vuint8m1x3_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tu(vd, rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_tu(vuint8m2x3_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tu(vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_tu(vuint16mf4x3_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tu(vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_tu(vuint16mf2x3_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tu(vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_tu(vuint16m1x3_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tu(vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_tu(vuint16m2x3_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tu(vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_tu(vuint32mf2x3_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tu(vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_tu(vuint32m1x3_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tu(vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_tu(vuint32m2x3_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tu(vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_tu(vuint64m1x3_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tu(vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_tu(vuint64m2x3_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tu(vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_tum(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_tum(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_tum(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_tum(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_tum(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_tum(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_tum(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_tum(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_tum(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_tum(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_tum(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_tum(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_tum(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_tum(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_tum(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_tum(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_tum(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_tum(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_tum(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_tum(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_tum(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_tum(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_tum(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_tum(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_tum(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_tum(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_tum(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_tum(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_tum(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_tum(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_tum(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_tum(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_tum(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_tum(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_tum(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_tum(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_tum(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_tumu(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_tumu(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_tumu(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_tumu(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_tumu(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_tumu(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_tumu(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_tumu(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_tumu(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_tumu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_tumu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_tumu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_tumu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_tumu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_tumu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_tumu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_tumu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_tumu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_tumu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_tumu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_tumu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_tumu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_tumu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_tumu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_tumu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_tumu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_tumu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_tumu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_tumu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_tumu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_tumu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_tumu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_tumu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_tumu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_tumu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_tumu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_tumu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_mu(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_mu(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_mu(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_mu(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_mu(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_mu(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_mu(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_mu(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_mu(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_mu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_mu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_mu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_mu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_mu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_mu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_mu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_mu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_mu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_mu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_mu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_mu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_mu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_mu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_mu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_mu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_mu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_mu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_mu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_mu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_mu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_mu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_mu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_mu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_mu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_mu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_mu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_mu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei32_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg3ei32\.[ivxfswum.]+\s+} 148 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg3ei64.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg3ei64.c index 82f25ae79..bb4fdb7dc 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg3ei64.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg3ei64.c @@ -6,564 +6,564 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_tu(vfloat16mf4x3_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tu(vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_tu(vfloat16mf2x3_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tu(vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_tu(vfloat16m1x3_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tu(vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_tu(vfloat16m2x3_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tu(vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_tu(vfloat32mf2x3_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tu(vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_tu(vfloat32m1x3_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tu(vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_tu(vfloat32m2x3_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tu(vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_tu(vfloat64m1x3_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tu(vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_tu(vfloat64m2x3_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tu(vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_tu(vint8mf8x3_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tu(vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_tu(vint8mf4x3_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tu(vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_tu(vint8mf2x3_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tu(vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_tu(vint8m1x3_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tu(vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_tu(vint16mf4x3_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tu(vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_tu(vint16mf2x3_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tu(vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_tu(vint16m1x3_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tu(vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_tu(vint16m2x3_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tu(vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_tu(vint32mf2x3_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tu(vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_tu(vint32m1x3_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tu(vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_tu(vint32m2x3_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tu(vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_tu(vint64m1x3_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tu(vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_tu(vint64m2x3_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tu(vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_tu(vuint8mf8x3_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tu(vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_tu(vuint8mf4x3_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tu(vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_tu(vuint8mf2x3_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tu(vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_tu(vuint8m1x3_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tu(vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_tu(vuint16mf4x3_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tu(vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_tu(vuint16mf2x3_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tu(vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_tu(vuint16m1x3_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tu(vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_tu(vuint16m2x3_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tu(vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_tu(vuint32mf2x3_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tu(vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_tu(vuint32m1x3_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tu(vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_tu(vuint32m2x3_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tu(vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_tu(vuint64m1x3_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tu(vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_tu(vuint64m2x3_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tu(vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_tum(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_tum(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_tum(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_tum(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_tum(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_tum(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_tum(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_tum(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_tum(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_tum(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_tum(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_tum(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_tum(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_tum(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_tum(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_tum(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_tum(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_tum(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_tum(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_tum(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_tum(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_tum(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_tum(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_tum(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_tum(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_tum(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_tum(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_tum(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_tum(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_tum(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_tum(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_tum(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_tum(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_tum(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_tum(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_tumu(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_tumu(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_tumu(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_tumu(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_tumu(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_tumu(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_tumu(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_tumu(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_tumu(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_tumu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_tumu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_tumu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_tumu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_tumu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_tumu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_tumu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_tumu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_tumu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_tumu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_tumu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_tumu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_tumu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_tumu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_tumu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_tumu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_tumu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_tumu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_tumu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_tumu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_tumu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_tumu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_tumu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_tumu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_tumu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_tumu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_mu(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_mu(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_mu(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_mu(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_mu(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_mu(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_mu(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_mu(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_mu(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_mu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_mu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_mu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_mu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_mu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_mu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_mu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_mu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_mu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_mu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_mu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_mu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_mu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_mu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_mu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_mu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_mu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_mu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_mu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_mu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_mu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_mu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_mu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_mu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_mu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_mu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei64_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg3ei64\.[ivxfswum.]+\s+} 140 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg3ei8.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg3ei8.c index 31db89a68..d95b8945d 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg3ei8.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg3ei8.c @@ -6,596 +6,596 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_tu(vfloat16mf4x3_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tu(vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_tu(vfloat16mf2x3_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tu(vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_tu(vfloat16m1x3_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tu(vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_tu(vfloat16m2x3_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tu(vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_tu(vfloat32mf2x3_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tu(vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_tu(vfloat32m1x3_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tu(vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_tu(vfloat32m2x3_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tu(vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_tu(vfloat64m1x3_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tu(vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_tu(vfloat64m2x3_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tu(vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_tu(vint8mf8x3_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tu(vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_tu(vint8mf4x3_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tu(vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_tu(vint8mf2x3_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tu(vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_tu(vint8m1x3_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tu(vd, rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_tu(vint8m2x3_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tu(vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_tu(vint16mf4x3_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tu(vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_tu(vint16mf2x3_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tu(vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_tu(vint16m1x3_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tu(vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_tu(vint16m2x3_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tu(vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_tu(vint32mf2x3_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tu(vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_tu(vint32m1x3_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tu(vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_tu(vint32m2x3_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tu(vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_tu(vint64m1x3_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tu(vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_tu(vint64m2x3_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tu(vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_tu(vuint8mf8x3_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tu(vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_tu(vuint8mf4x3_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tu(vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_tu(vuint8mf2x3_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tu(vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_tu(vuint8m1x3_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tu(vd, rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_tu(vuint8m2x3_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tu(vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_tu(vuint16mf4x3_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tu(vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_tu(vuint16mf2x3_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tu(vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_tu(vuint16m1x3_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tu(vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_tu(vuint16m2x3_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tu(vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_tu(vuint32mf2x3_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tu(vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_tu(vuint32m1x3_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tu(vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_tu(vuint32m2x3_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tu(vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_tu(vuint64m1x3_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tu(vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_tu(vuint64m2x3_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tu(vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_tum(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_tum(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_tum(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_tum(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_tum(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_tum(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_tum(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_tum(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_tum(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_tum(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_tum(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_tum(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_tum(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_tum(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_tum(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_tum(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_tum(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_tum(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_tum(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_tum(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_tum(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_tum(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_tum(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_tum(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_tum(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_tum(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_tum(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_tum(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_tum(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_tum(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_tum(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_tum(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_tum(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_tum(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_tum(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_tum(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_tum(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_tumu(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_tumu(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_tumu(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_tumu(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_tumu(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_tumu(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_tumu(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_tumu(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_tumu(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_tumu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_tumu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_tumu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_tumu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_tumu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_tumu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_tumu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_tumu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_tumu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_tumu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_tumu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_tumu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_tumu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_tumu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_tumu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_tumu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_tumu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_tumu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_tumu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_tumu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_tumu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_tumu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_tumu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_tumu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_tumu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_tumu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_tumu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_tumu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_mu(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_mu(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_mu(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_mu(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_mu(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_mu(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_mu(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_mu(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_mu(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_mu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_mu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_mu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_mu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_mu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_mu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_mu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_mu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_mu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_mu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_mu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_mu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_mu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_mu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_mu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_mu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_mu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_mu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_mu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_mu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_mu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_mu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_mu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_mu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_mu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_mu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_mu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_mu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg3ei8\.[ivxfswum.]+\s+} 148 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg4ei16.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg4ei16.c index 00df70dd9..5083cbb43 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg4ei16.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg4ei16.c @@ -6,596 +6,596 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_tu(vfloat16mf4x4_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tu(vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_tu(vfloat16mf2x4_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tu(vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_tu(vfloat16m1x4_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tu(vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_tu(vfloat16m2x4_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tu(vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_tu(vfloat32mf2x4_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tu(vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_tu(vfloat32m1x4_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tu(vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_tu(vfloat32m2x4_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tu(vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_tu(vfloat64m1x4_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tu(vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_tu(vfloat64m2x4_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tu(vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_tu(vint8mf8x4_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tu(vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_tu(vint8mf4x4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tu(vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_tu(vint8mf2x4_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tu(vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_tu(vint8m1x4_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tu(vd, rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_tu(vint8m2x4_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tu(vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_tu(vint16mf4x4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tu(vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_tu(vint16mf2x4_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tu(vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_tu(vint16m1x4_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tu(vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_tu(vint16m2x4_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tu(vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_tu(vint32mf2x4_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tu(vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_tu(vint32m1x4_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tu(vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_tu(vint32m2x4_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tu(vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_tu(vint64m1x4_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tu(vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_tu(vint64m2x4_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tu(vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_tu(vuint8mf8x4_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tu(vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_tu(vuint8mf4x4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tu(vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_tu(vuint8mf2x4_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tu(vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_tu(vuint8m1x4_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tu(vd, rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_tu(vuint8m2x4_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tu(vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_tu(vuint16mf4x4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tu(vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_tu(vuint16mf2x4_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tu(vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_tu(vuint16m1x4_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tu(vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_tu(vuint16m2x4_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tu(vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_tu(vuint32mf2x4_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tu(vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_tu(vuint32m1x4_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tu(vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_tu(vuint32m2x4_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tu(vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_tu(vuint64m1x4_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tu(vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_tu(vuint64m2x4_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tu(vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_tum(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_tum(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_tum(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_tum(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_tum(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_tum(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_tum(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_tum(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_tum(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_tum(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_tum(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_tum(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_tum(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_tum(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_tum(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_tum(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_tum(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_tum(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_tum(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_tum(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_tum(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_tum(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_tum(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_tum(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_tum(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_tum(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_tum(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_tum(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_tum(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_tum(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_tum(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_tum(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_tum(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_tum(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_tum(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_tum(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_tum(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_tumu(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_tumu(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_tumu(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_tumu(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_tumu(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_tumu(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_tumu(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_tumu(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_tumu(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_tumu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_tumu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_tumu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_tumu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_tumu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_tumu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_tumu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_tumu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_tumu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_tumu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_tumu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_tumu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_tumu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_tumu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_tumu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_tumu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_tumu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_tumu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_tumu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_tumu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_tumu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_tumu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_tumu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_tumu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_tumu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_tumu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_tumu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_tumu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_mu(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_mu(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_mu(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_mu(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_mu(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_mu(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_mu(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_mu(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_mu(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_mu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_mu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_mu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_mu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_mu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_mu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_mu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_mu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_mu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_mu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_mu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_mu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_mu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_mu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_mu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_mu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_mu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_mu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_mu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_mu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_mu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_mu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_mu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_mu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_mu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_mu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_mu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_mu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg4ei16\.[ivxfswum.]+\s+} 148 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg4ei32.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg4ei32.c index 5cabbada6..108b3eff3 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg4ei32.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg4ei32.c @@ -6,596 +6,596 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_tu(vfloat16mf4x4_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tu(vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_tu(vfloat16mf2x4_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tu(vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_tu(vfloat16m1x4_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tu(vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_tu(vfloat16m2x4_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tu(vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_tu(vfloat32mf2x4_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tu(vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_tu(vfloat32m1x4_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tu(vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_tu(vfloat32m2x4_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tu(vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_tu(vfloat64m1x4_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tu(vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_tu(vfloat64m2x4_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tu(vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_tu(vint8mf8x4_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tu(vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_tu(vint8mf4x4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tu(vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_tu(vint8mf2x4_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tu(vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_tu(vint8m1x4_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tu(vd, rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_tu(vint8m2x4_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tu(vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_tu(vint16mf4x4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tu(vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_tu(vint16mf2x4_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tu(vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_tu(vint16m1x4_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tu(vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_tu(vint16m2x4_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tu(vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_tu(vint32mf2x4_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tu(vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_tu(vint32m1x4_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tu(vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_tu(vint32m2x4_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tu(vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_tu(vint64m1x4_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tu(vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_tu(vint64m2x4_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tu(vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_tu(vuint8mf8x4_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tu(vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_tu(vuint8mf4x4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tu(vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_tu(vuint8mf2x4_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tu(vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_tu(vuint8m1x4_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tu(vd, rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_tu(vuint8m2x4_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tu(vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_tu(vuint16mf4x4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tu(vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_tu(vuint16mf2x4_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tu(vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_tu(vuint16m1x4_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tu(vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_tu(vuint16m2x4_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tu(vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_tu(vuint32mf2x4_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tu(vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_tu(vuint32m1x4_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tu(vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_tu(vuint32m2x4_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tu(vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_tu(vuint64m1x4_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tu(vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_tu(vuint64m2x4_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tu(vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_tum(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_tum(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_tum(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_tum(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_tum(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_tum(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_tum(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_tum(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_tum(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_tum(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_tum(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_tum(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_tum(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_tum(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_tum(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_tum(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_tum(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_tum(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_tum(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_tum(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_tum(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_tum(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_tum(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_tum(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_tum(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_tum(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_tum(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_tum(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_tum(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_tum(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_tum(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_tum(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_tum(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_tum(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_tum(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_tum(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_tum(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_tumu(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_tumu(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_tumu(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_tumu(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_tumu(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_tumu(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_tumu(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_tumu(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_tumu(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_tumu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_tumu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_tumu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_tumu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_tumu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_tumu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_tumu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_tumu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_tumu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_tumu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_tumu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_tumu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_tumu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_tumu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_tumu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_tumu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_tumu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_tumu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_tumu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_tumu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_tumu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_tumu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_tumu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_tumu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_tumu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_tumu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_tumu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_tumu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_mu(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_mu(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_mu(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_mu(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_mu(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_mu(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_mu(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_mu(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_mu(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_mu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_mu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_mu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_mu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_mu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_mu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_mu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_mu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_mu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_mu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_mu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_mu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_mu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_mu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_mu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_mu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_mu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_mu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_mu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_mu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_mu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_mu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_mu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_mu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_mu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_mu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_mu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_mu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei32_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg4ei32\.[ivxfswum.]+\s+} 148 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg4ei64.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg4ei64.c index 071cb7dc0..7c5a0d8fb 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg4ei64.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg4ei64.c @@ -6,564 +6,564 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_tu(vfloat16mf4x4_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tu(vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_tu(vfloat16mf2x4_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tu(vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_tu(vfloat16m1x4_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tu(vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_tu(vfloat16m2x4_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tu(vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_tu(vfloat32mf2x4_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tu(vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_tu(vfloat32m1x4_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tu(vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_tu(vfloat32m2x4_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tu(vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_tu(vfloat64m1x4_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tu(vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_tu(vfloat64m2x4_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tu(vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_tu(vint8mf8x4_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tu(vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_tu(vint8mf4x4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tu(vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_tu(vint8mf2x4_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tu(vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_tu(vint8m1x4_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tu(vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_tu(vint16mf4x4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tu(vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_tu(vint16mf2x4_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tu(vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_tu(vint16m1x4_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tu(vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_tu(vint16m2x4_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tu(vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_tu(vint32mf2x4_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tu(vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_tu(vint32m1x4_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tu(vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_tu(vint32m2x4_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tu(vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_tu(vint64m1x4_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tu(vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_tu(vint64m2x4_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tu(vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_tu(vuint8mf8x4_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tu(vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_tu(vuint8mf4x4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tu(vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_tu(vuint8mf2x4_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tu(vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_tu(vuint8m1x4_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tu(vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_tu(vuint16mf4x4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tu(vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_tu(vuint16mf2x4_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tu(vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_tu(vuint16m1x4_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tu(vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_tu(vuint16m2x4_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tu(vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_tu(vuint32mf2x4_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tu(vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_tu(vuint32m1x4_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tu(vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_tu(vuint32m2x4_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tu(vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_tu(vuint64m1x4_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tu(vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_tu(vuint64m2x4_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tu(vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_tum(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_tum(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_tum(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_tum(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_tum(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_tum(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_tum(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_tum(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_tum(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_tum(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_tum(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_tum(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_tum(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_tum(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_tum(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_tum(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_tum(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_tum(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_tum(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_tum(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_tum(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_tum(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_tum(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_tum(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_tum(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_tum(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_tum(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_tum(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_tum(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_tum(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_tum(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_tum(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_tum(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_tum(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_tum(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_tumu(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_tumu(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_tumu(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_tumu(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_tumu(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_tumu(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_tumu(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_tumu(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_tumu(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_tumu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_tumu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_tumu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_tumu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_tumu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_tumu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_tumu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_tumu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_tumu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_tumu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_tumu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_tumu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_tumu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_tumu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_tumu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_tumu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_tumu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_tumu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_tumu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_tumu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_tumu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_tumu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_tumu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_tumu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_tumu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_tumu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_mu(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_mu(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_mu(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_mu(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_mu(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_mu(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_mu(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_mu(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_mu(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_mu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_mu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_mu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_mu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_mu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_mu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_mu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_mu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_mu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_mu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_mu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_mu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_mu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_mu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_mu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_mu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_mu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_mu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_mu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_mu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_mu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_mu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_mu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_mu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_mu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_mu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei64_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg4ei64\.[ivxfswum.]+\s+} 140 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg4ei8.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg4ei8.c index 83385b43c..95362850b 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg4ei8.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg4ei8.c @@ -6,596 +6,596 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_tu(vfloat16mf4x4_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tu(vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_tu(vfloat16mf2x4_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tu(vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_tu(vfloat16m1x4_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tu(vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_tu(vfloat16m2x4_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tu(vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_tu(vfloat32mf2x4_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tu(vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_tu(vfloat32m1x4_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tu(vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_tu(vfloat32m2x4_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tu(vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_tu(vfloat64m1x4_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tu(vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_tu(vfloat64m2x4_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tu(vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_tu(vint8mf8x4_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tu(vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_tu(vint8mf4x4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tu(vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_tu(vint8mf2x4_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tu(vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_tu(vint8m1x4_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tu(vd, rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_tu(vint8m2x4_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tu(vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_tu(vint16mf4x4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tu(vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_tu(vint16mf2x4_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tu(vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_tu(vint16m1x4_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tu(vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_tu(vint16m2x4_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tu(vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_tu(vint32mf2x4_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tu(vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_tu(vint32m1x4_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tu(vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_tu(vint32m2x4_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tu(vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_tu(vint64m1x4_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tu(vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_tu(vint64m2x4_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tu(vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_tu(vuint8mf8x4_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tu(vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_tu(vuint8mf4x4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tu(vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_tu(vuint8mf2x4_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tu(vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_tu(vuint8m1x4_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tu(vd, rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_tu(vuint8m2x4_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tu(vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_tu(vuint16mf4x4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tu(vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_tu(vuint16mf2x4_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tu(vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_tu(vuint16m1x4_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tu(vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_tu(vuint16m2x4_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tu(vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_tu(vuint32mf2x4_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tu(vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_tu(vuint32m1x4_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tu(vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_tu(vuint32m2x4_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tu(vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_tu(vuint64m1x4_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tu(vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_tu(vuint64m2x4_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tu(vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_tum(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_tum(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_tum(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_tum(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_tum(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_tum(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_tum(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_tum(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_tum(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_tum(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_tum(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_tum(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_tum(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_tum(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_tum(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_tum(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_tum(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_tum(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_tum(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_tum(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_tum(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_tum(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_tum(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_tum(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_tum(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_tum(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_tum(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_tum(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_tum(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_tum(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_tum(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_tum(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_tum(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_tum(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_tum(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_tum(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_tum(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_tumu(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_tumu(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_tumu(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_tumu(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_tumu(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_tumu(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_tumu(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_tumu(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_tumu(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_tumu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_tumu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_tumu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_tumu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_tumu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_tumu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_tumu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_tumu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_tumu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_tumu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_tumu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_tumu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_tumu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_tumu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_tumu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_tumu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_tumu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_tumu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_tumu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_tumu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_tumu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_tumu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_tumu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_tumu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_tumu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_tumu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_tumu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_tumu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_mu(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_mu(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_mu(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_mu(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_mu(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_mu(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_mu(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_mu(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_mu(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_mu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_mu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_mu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_mu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_mu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_mu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_mu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_mu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_mu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_mu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_mu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_mu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_mu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_mu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_mu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_mu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_mu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_mu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_mu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_mu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_mu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_mu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_mu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_mu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_mu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_mu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_mu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_mu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg4ei8\.[ivxfswum.]+\s+} 148 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg5ei16.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg5ei16.c index 2dc68f7c4..94555bd07 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg5ei16.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg5ei16.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_tu(vfloat16mf4x5_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tu(vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_tu(vfloat16mf2x5_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tu(vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_tu(vfloat16m1x5_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tu(vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_tu(vfloat32mf2x5_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tu(vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_tu(vfloat32m1x5_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tu(vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_tu(vfloat64m1x5_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tu(vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_tu(vint8mf8x5_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tu(vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_tu(vint8mf4x5_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tu(vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_tu(vint8mf2x5_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tu(vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_tu(vint8m1x5_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tu(vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_tu(vint16mf4x5_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tu(vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_tu(vint16mf2x5_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tu(vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_tu(vint16m1x5_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tu(vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_tu(vint32mf2x5_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tu(vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_tu(vint32m1x5_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tu(vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_tu(vint64m1x5_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tu(vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_tu(vuint8mf8x5_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tu(vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_tu(vuint8mf4x5_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tu(vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_tu(vuint8mf2x5_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tu(vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_tu(vuint8m1x5_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tu(vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_tu(vuint16mf4x5_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tu(vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_tu(vuint16mf2x5_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tu(vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_tu(vuint16m1x5_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tu(vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_tu(vuint32mf2x5_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tu(vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_tu(vuint32m1x5_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tu(vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_tu(vuint64m1x5_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tu(vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_tum(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_tum(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_tum(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_tum(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_tum(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_tum(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_tum(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_tum(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_tum(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_tum(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_tum(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_tum(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_tum(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_tum(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_tum(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_tum(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_tum(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_tum(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_tum(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_tum(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_tum(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_tum(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_tum(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_tum(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_tum(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_tum(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_tumu(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_tumu(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_tumu(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_tumu(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_tumu(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_tumu(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_tumu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_tumu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_tumu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_tumu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_tumu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_tumu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_tumu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_tumu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_tumu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_tumu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_tumu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_tumu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_tumu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_tumu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_tumu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_tumu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_tumu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_tumu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_tumu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_tumu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_mu(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_mu(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_mu(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_mu(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_mu(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_mu(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_mu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_mu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_mu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_mu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_mu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_mu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_mu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_mu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_mu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_mu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_mu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_mu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_mu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_mu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_mu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_mu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_mu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_mu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_mu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_mu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg5ei16\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg5ei32.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg5ei32.c index 0ad6fdff4..bbca13947 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg5ei32.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg5ei32.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_tu(vfloat16mf4x5_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tu(vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_tu(vfloat16mf2x5_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tu(vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_tu(vfloat16m1x5_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tu(vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_tu(vfloat32mf2x5_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tu(vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_tu(vfloat32m1x5_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tu(vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_tu(vfloat64m1x5_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tu(vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_tu(vint8mf8x5_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tu(vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_tu(vint8mf4x5_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tu(vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_tu(vint8mf2x5_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tu(vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_tu(vint8m1x5_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tu(vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_tu(vint16mf4x5_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tu(vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_tu(vint16mf2x5_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tu(vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_tu(vint16m1x5_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tu(vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_tu(vint32mf2x5_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tu(vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_tu(vint32m1x5_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tu(vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_tu(vint64m1x5_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tu(vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_tu(vuint8mf8x5_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tu(vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_tu(vuint8mf4x5_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tu(vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_tu(vuint8mf2x5_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tu(vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_tu(vuint8m1x5_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tu(vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_tu(vuint16mf4x5_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tu(vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_tu(vuint16mf2x5_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tu(vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_tu(vuint16m1x5_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tu(vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_tu(vuint32mf2x5_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tu(vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_tu(vuint32m1x5_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tu(vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_tu(vuint64m1x5_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tu(vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_tum(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_tum(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_tum(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_tum(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_tum(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_tum(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_tum(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_tum(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_tum(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_tum(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_tum(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_tum(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_tum(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_tum(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_tum(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_tum(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_tum(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_tum(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_tum(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_tum(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_tum(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_tum(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_tum(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_tum(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_tum(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_tum(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_tumu(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_tumu(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_tumu(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_tumu(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_tumu(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_tumu(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_tumu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_tumu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_tumu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_tumu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_tumu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_tumu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_tumu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_tumu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_tumu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_tumu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_tumu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_tumu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_tumu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_tumu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_tumu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_tumu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_tumu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_tumu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_tumu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_tumu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_mu(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_mu(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_mu(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_mu(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_mu(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_mu(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_mu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_mu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_mu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_mu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_mu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_mu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_mu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_mu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_mu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_mu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_mu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_mu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_mu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_mu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_mu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_mu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_mu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_mu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_mu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_mu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei32_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg5ei32\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg5ei64.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg5ei64.c index 5446718c0..659eafe6f 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg5ei64.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg5ei64.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_tu(vfloat16mf4x5_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tu(vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_tu(vfloat16mf2x5_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tu(vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_tu(vfloat16m1x5_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tu(vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_tu(vfloat32mf2x5_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tu(vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_tu(vfloat32m1x5_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tu(vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_tu(vfloat64m1x5_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tu(vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_tu(vint8mf8x5_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tu(vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_tu(vint8mf4x5_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tu(vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_tu(vint8mf2x5_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tu(vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_tu(vint8m1x5_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tu(vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_tu(vint16mf4x5_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tu(vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_tu(vint16mf2x5_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tu(vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_tu(vint16m1x5_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tu(vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_tu(vint32mf2x5_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tu(vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_tu(vint32m1x5_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tu(vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_tu(vint64m1x5_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tu(vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_tu(vuint8mf8x5_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tu(vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_tu(vuint8mf4x5_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tu(vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_tu(vuint8mf2x5_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tu(vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_tu(vuint8m1x5_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tu(vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_tu(vuint16mf4x5_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tu(vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_tu(vuint16mf2x5_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tu(vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_tu(vuint16m1x5_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tu(vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_tu(vuint32mf2x5_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tu(vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_tu(vuint32m1x5_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tu(vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_tu(vuint64m1x5_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tu(vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_tum(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_tum(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_tum(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_tum(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_tum(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_tum(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_tum(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_tum(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_tum(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_tum(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_tum(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_tum(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_tum(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_tum(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_tum(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_tum(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_tum(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_tum(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_tum(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_tum(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_tum(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_tum(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_tum(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_tum(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_tum(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_tum(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_tumu(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_tumu(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_tumu(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_tumu(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_tumu(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_tumu(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_tumu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_tumu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_tumu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_tumu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_tumu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_tumu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_tumu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_tumu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_tumu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_tumu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_tumu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_tumu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_tumu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_tumu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_tumu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_tumu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_tumu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_tumu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_tumu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_tumu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_mu(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_mu(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_mu(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_mu(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_mu(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_mu(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_mu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_mu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_mu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_mu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_mu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_mu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_mu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_mu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_mu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_mu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_mu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_mu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_mu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_mu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_mu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_mu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_mu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_mu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_mu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_mu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei64_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg5ei64\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg5ei8.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg5ei8.c index 2afd47236..b48514360 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg5ei8.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg5ei8.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_tu(vfloat16mf4x5_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tu(vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_tu(vfloat16mf2x5_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tu(vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_tu(vfloat16m1x5_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tu(vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_tu(vfloat32mf2x5_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tu(vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_tu(vfloat32m1x5_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tu(vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_tu(vfloat64m1x5_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tu(vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_tu(vint8mf8x5_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tu(vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_tu(vint8mf4x5_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tu(vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_tu(vint8mf2x5_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tu(vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_tu(vint8m1x5_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tu(vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_tu(vint16mf4x5_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tu(vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_tu(vint16mf2x5_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tu(vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_tu(vint16m1x5_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tu(vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_tu(vint32mf2x5_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tu(vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_tu(vint32m1x5_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tu(vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_tu(vint64m1x5_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tu(vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_tu(vuint8mf8x5_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tu(vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_tu(vuint8mf4x5_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tu(vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_tu(vuint8mf2x5_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tu(vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_tu(vuint8m1x5_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tu(vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_tu(vuint16mf4x5_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tu(vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_tu(vuint16mf2x5_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tu(vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_tu(vuint16m1x5_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tu(vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_tu(vuint32mf2x5_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tu(vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_tu(vuint32m1x5_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tu(vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_tu(vuint64m1x5_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tu(vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_tum(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_tum(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_tum(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_tum(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_tum(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_tum(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_tum(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_tum(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_tum(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_tum(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_tum(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_tum(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_tum(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_tum(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_tum(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_tum(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_tum(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_tum(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_tum(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_tum(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_tum(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_tum(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_tum(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_tum(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_tum(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_tum(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_tumu(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_tumu(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_tumu(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_tumu(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_tumu(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_tumu(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_tumu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_tumu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_tumu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_tumu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_tumu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_tumu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_tumu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_tumu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_tumu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_tumu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_tumu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_tumu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_tumu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_tumu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_tumu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_tumu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_tumu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_tumu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_tumu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_tumu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_mu(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_mu(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_mu(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_mu(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_mu(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_mu(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_mu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_mu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_mu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_mu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_mu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_mu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_mu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_mu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_mu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_mu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_mu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_mu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_mu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_mu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_mu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_mu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_mu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_mu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_mu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_mu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg5ei8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg5ei8\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg6ei16.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg6ei16.c index 5bc9c8721..c4c264bef 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg6ei16.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg6ei16.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_tu(vfloat16mf4x6_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tu(vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_tu(vfloat16mf2x6_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tu(vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_tu(vfloat16m1x6_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tu(vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_tu(vfloat32mf2x6_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tu(vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_tu(vfloat32m1x6_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tu(vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_tu(vfloat64m1x6_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tu(vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_tu(vint8mf8x6_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tu(vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_tu(vint8mf4x6_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tu(vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_tu(vint8mf2x6_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tu(vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_tu(vint8m1x6_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tu(vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_tu(vint16mf4x6_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tu(vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_tu(vint16mf2x6_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tu(vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_tu(vint16m1x6_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tu(vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_tu(vint32mf2x6_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tu(vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_tu(vint32m1x6_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tu(vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_tu(vint64m1x6_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tu(vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_tu(vuint8mf8x6_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tu(vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_tu(vuint8mf4x6_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tu(vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_tu(vuint8mf2x6_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tu(vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_tu(vuint8m1x6_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tu(vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_tu(vuint16mf4x6_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tu(vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_tu(vuint16mf2x6_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tu(vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_tu(vuint16m1x6_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tu(vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_tu(vuint32mf2x6_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tu(vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_tu(vuint32m1x6_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tu(vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_tu(vuint64m1x6_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tu(vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_tum(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_tum(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_tum(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_tum(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_tum(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_tum(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_tum(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_tum(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_tum(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_tum(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_tum(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_tum(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_tum(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_tum(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_tum(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_tum(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_tum(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_tum(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_tum(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_tum(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_tum(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_tum(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_tum(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_tum(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_tum(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_tum(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_tumu(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_tumu(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_tumu(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_tumu(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_tumu(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_tumu(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_tumu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_tumu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_tumu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_tumu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_tumu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_tumu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_tumu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_tumu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_tumu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_tumu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_tumu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_tumu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_tumu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_tumu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_tumu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_tumu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_tumu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_tumu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_tumu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_tumu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_mu(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_mu(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_mu(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_mu(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_mu(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_mu(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_mu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_mu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_mu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_mu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_mu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_mu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_mu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_mu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_mu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_mu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_mu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_mu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_mu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_mu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_mu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_mu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_mu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_mu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_mu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_mu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg6ei16\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg6ei32.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg6ei32.c index 78e3cb517..a9c4cd9ad 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg6ei32.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg6ei32.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_tu(vfloat16mf4x6_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tu(vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_tu(vfloat16mf2x6_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tu(vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_tu(vfloat16m1x6_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tu(vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_tu(vfloat32mf2x6_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tu(vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_tu(vfloat32m1x6_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tu(vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_tu(vfloat64m1x6_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tu(vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_tu(vint8mf8x6_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tu(vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_tu(vint8mf4x6_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tu(vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_tu(vint8mf2x6_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tu(vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_tu(vint8m1x6_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tu(vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_tu(vint16mf4x6_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tu(vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_tu(vint16mf2x6_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tu(vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_tu(vint16m1x6_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tu(vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_tu(vint32mf2x6_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tu(vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_tu(vint32m1x6_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tu(vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_tu(vint64m1x6_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tu(vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_tu(vuint8mf8x6_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tu(vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_tu(vuint8mf4x6_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tu(vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_tu(vuint8mf2x6_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tu(vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_tu(vuint8m1x6_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tu(vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_tu(vuint16mf4x6_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tu(vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_tu(vuint16mf2x6_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tu(vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_tu(vuint16m1x6_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tu(vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_tu(vuint32mf2x6_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tu(vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_tu(vuint32m1x6_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tu(vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_tu(vuint64m1x6_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tu(vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_tum(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_tum(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_tum(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_tum(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_tum(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_tum(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_tum(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_tum(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_tum(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_tum(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_tum(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_tum(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_tum(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_tum(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_tum(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_tum(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_tum(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_tum(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_tum(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_tum(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_tum(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_tum(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_tum(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_tum(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_tum(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_tum(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_tumu(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_tumu(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_tumu(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_tumu(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_tumu(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_tumu(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_tumu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_tumu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_tumu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_tumu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_tumu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_tumu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_tumu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_tumu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_tumu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_tumu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_tumu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_tumu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_tumu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_tumu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_tumu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_tumu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_tumu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_tumu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_tumu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_tumu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_mu(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_mu(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_mu(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_mu(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_mu(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_mu(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_mu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_mu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_mu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_mu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_mu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_mu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_mu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_mu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_mu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_mu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_mu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_mu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_mu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_mu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_mu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_mu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_mu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_mu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_mu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_mu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei32_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg6ei32\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg6ei64.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg6ei64.c index 39e55de1c..55f88e495 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg6ei64.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg6ei64.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_tu(vfloat16mf4x6_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tu(vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_tu(vfloat16mf2x6_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tu(vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_tu(vfloat16m1x6_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tu(vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_tu(vfloat32mf2x6_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tu(vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_tu(vfloat32m1x6_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tu(vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_tu(vfloat64m1x6_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tu(vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_tu(vint8mf8x6_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tu(vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_tu(vint8mf4x6_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tu(vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_tu(vint8mf2x6_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tu(vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_tu(vint8m1x6_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tu(vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_tu(vint16mf4x6_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tu(vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_tu(vint16mf2x6_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tu(vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_tu(vint16m1x6_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tu(vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_tu(vint32mf2x6_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tu(vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_tu(vint32m1x6_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tu(vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_tu(vint64m1x6_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tu(vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_tu(vuint8mf8x6_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tu(vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_tu(vuint8mf4x6_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tu(vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_tu(vuint8mf2x6_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tu(vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_tu(vuint8m1x6_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tu(vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_tu(vuint16mf4x6_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tu(vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_tu(vuint16mf2x6_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tu(vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_tu(vuint16m1x6_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tu(vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_tu(vuint32mf2x6_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tu(vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_tu(vuint32m1x6_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tu(vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_tu(vuint64m1x6_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tu(vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_tum(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_tum(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_tum(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_tum(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_tum(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_tum(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_tum(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_tum(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_tum(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_tum(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_tum(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_tum(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_tum(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_tum(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_tum(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_tum(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_tum(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_tum(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_tum(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_tum(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_tum(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_tum(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_tum(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_tum(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_tum(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_tum(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_tumu(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_tumu(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_tumu(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_tumu(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_tumu(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_tumu(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_tumu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_tumu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_tumu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_tumu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_tumu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_tumu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_tumu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_tumu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_tumu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_tumu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_tumu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_tumu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_tumu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_tumu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_tumu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_tumu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_tumu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_tumu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_tumu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_tumu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_mu(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_mu(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_mu(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_mu(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_mu(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_mu(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_mu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_mu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_mu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_mu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_mu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_mu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_mu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_mu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_mu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_mu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_mu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_mu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_mu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_mu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_mu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_mu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_mu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_mu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_mu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_mu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei64_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg6ei64\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg6ei8.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg6ei8.c index dee15304b..cb922ec3f 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg6ei8.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg6ei8.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_tu(vfloat16mf4x6_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tu(vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_tu(vfloat16mf2x6_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tu(vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_tu(vfloat16m1x6_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tu(vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_tu(vfloat32mf2x6_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tu(vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_tu(vfloat32m1x6_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tu(vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_tu(vfloat64m1x6_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tu(vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_tu(vint8mf8x6_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tu(vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_tu(vint8mf4x6_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tu(vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_tu(vint8mf2x6_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tu(vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_tu(vint8m1x6_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tu(vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_tu(vint16mf4x6_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tu(vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_tu(vint16mf2x6_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tu(vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_tu(vint16m1x6_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tu(vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_tu(vint32mf2x6_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tu(vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_tu(vint32m1x6_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tu(vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_tu(vint64m1x6_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tu(vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_tu(vuint8mf8x6_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tu(vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_tu(vuint8mf4x6_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tu(vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_tu(vuint8mf2x6_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tu(vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_tu(vuint8m1x6_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tu(vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_tu(vuint16mf4x6_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tu(vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_tu(vuint16mf2x6_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tu(vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_tu(vuint16m1x6_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tu(vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_tu(vuint32mf2x6_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tu(vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_tu(vuint32m1x6_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tu(vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_tu(vuint64m1x6_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tu(vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_tum(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_tum(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_tum(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_tum(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_tum(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_tum(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_tum(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_tum(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_tum(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_tum(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_tum(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_tum(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_tum(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_tum(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_tum(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_tum(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_tum(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_tum(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_tum(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_tum(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_tum(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_tum(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_tum(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_tum(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_tum(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_tum(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_tumu(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_tumu(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_tumu(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_tumu(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_tumu(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_tumu(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_tumu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_tumu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_tumu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_tumu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_tumu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_tumu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_tumu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_tumu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_tumu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_tumu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_tumu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_tumu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_tumu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_tumu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_tumu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_tumu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_tumu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_tumu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_tumu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_tumu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_mu(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_mu(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_mu(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_mu(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_mu(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_mu(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_mu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_mu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_mu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_mu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_mu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_mu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_mu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_mu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_mu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_mu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_mu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_mu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_mu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_mu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_mu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_mu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_mu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_mu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_mu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_mu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg6ei8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg6ei8\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg7ei16.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg7ei16.c index e1c074538..9a9ebf3ee 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg7ei16.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg7ei16.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_tu(vfloat16mf4x7_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tu(vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_tu(vfloat16mf2x7_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tu(vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_tu(vfloat16m1x7_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tu(vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_tu(vfloat32mf2x7_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tu(vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_tu(vfloat32m1x7_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tu(vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_tu(vfloat64m1x7_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tu(vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_tu(vint8mf8x7_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tu(vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_tu(vint8mf4x7_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tu(vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_tu(vint8mf2x7_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tu(vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_tu(vint8m1x7_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tu(vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_tu(vint16mf4x7_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tu(vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_tu(vint16mf2x7_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tu(vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_tu(vint16m1x7_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tu(vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_tu(vint32mf2x7_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tu(vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_tu(vint32m1x7_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tu(vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_tu(vint64m1x7_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tu(vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_tu(vuint8mf8x7_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tu(vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_tu(vuint8mf4x7_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tu(vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_tu(vuint8mf2x7_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tu(vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_tu(vuint8m1x7_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tu(vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_tu(vuint16mf4x7_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tu(vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_tu(vuint16mf2x7_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tu(vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_tu(vuint16m1x7_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tu(vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_tu(vuint32mf2x7_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tu(vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_tu(vuint32m1x7_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tu(vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_tu(vuint64m1x7_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tu(vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_tum(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_tum(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_tum(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_tum(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_tum(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_tum(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_tum(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_tum(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_tum(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_tum(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_tum(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_tum(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_tum(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_tum(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_tum(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_tum(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_tum(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_tum(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_tum(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_tum(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_tum(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_tum(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_tum(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_tum(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_tum(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_tum(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_tumu(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_tumu(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_tumu(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_tumu(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_tumu(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_tumu(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_tumu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_tumu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_tumu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_tumu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_tumu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_tumu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_tumu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_tumu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_tumu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_tumu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_tumu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_tumu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_tumu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_tumu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_tumu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_tumu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_tumu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_tumu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_tumu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_tumu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_mu(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_mu(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_mu(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_mu(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_mu(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_mu(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_mu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_mu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_mu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_mu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_mu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_mu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_mu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_mu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_mu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_mu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_mu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_mu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_mu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_mu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_mu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_mu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_mu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_mu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_mu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_mu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg7ei16\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg7ei32.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg7ei32.c index 78f3bf8a0..a2cbf2a21 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg7ei32.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg7ei32.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_tu(vfloat16mf4x7_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tu(vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_tu(vfloat16mf2x7_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tu(vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_tu(vfloat16m1x7_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tu(vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_tu(vfloat32mf2x7_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tu(vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_tu(vfloat32m1x7_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tu(vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_tu(vfloat64m1x7_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tu(vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_tu(vint8mf8x7_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tu(vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_tu(vint8mf4x7_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tu(vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_tu(vint8mf2x7_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tu(vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_tu(vint8m1x7_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tu(vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_tu(vint16mf4x7_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tu(vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_tu(vint16mf2x7_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tu(vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_tu(vint16m1x7_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tu(vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_tu(vint32mf2x7_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tu(vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_tu(vint32m1x7_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tu(vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_tu(vint64m1x7_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tu(vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_tu(vuint8mf8x7_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tu(vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_tu(vuint8mf4x7_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tu(vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_tu(vuint8mf2x7_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tu(vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_tu(vuint8m1x7_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tu(vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_tu(vuint16mf4x7_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tu(vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_tu(vuint16mf2x7_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tu(vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_tu(vuint16m1x7_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tu(vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_tu(vuint32mf2x7_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tu(vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_tu(vuint32m1x7_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tu(vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_tu(vuint64m1x7_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tu(vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_tum(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_tum(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_tum(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_tum(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_tum(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_tum(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_tum(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_tum(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_tum(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_tum(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_tum(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_tum(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_tum(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_tum(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_tum(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_tum(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_tum(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_tum(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_tum(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_tum(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_tum(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_tum(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_tum(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_tum(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_tum(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_tum(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_tumu(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_tumu(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_tumu(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_tumu(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_tumu(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_tumu(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_tumu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_tumu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_tumu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_tumu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_tumu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_tumu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_tumu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_tumu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_tumu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_tumu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_tumu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_tumu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_tumu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_tumu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_tumu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_tumu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_tumu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_tumu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_tumu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_tumu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_mu(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_mu(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_mu(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_mu(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_mu(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_mu(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_mu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_mu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_mu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_mu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_mu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_mu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_mu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_mu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_mu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_mu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_mu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_mu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_mu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_mu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_mu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_mu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_mu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_mu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_mu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_mu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei32_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg7ei32\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg7ei64.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg7ei64.c index 6830d68cc..64b4cb367 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg7ei64.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg7ei64.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_tu(vfloat16mf4x7_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tu(vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_tu(vfloat16mf2x7_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tu(vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_tu(vfloat16m1x7_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tu(vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_tu(vfloat32mf2x7_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tu(vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_tu(vfloat32m1x7_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tu(vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_tu(vfloat64m1x7_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tu(vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_tu(vint8mf8x7_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tu(vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_tu(vint8mf4x7_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tu(vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_tu(vint8mf2x7_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tu(vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_tu(vint8m1x7_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tu(vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_tu(vint16mf4x7_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tu(vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_tu(vint16mf2x7_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tu(vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_tu(vint16m1x7_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tu(vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_tu(vint32mf2x7_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tu(vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_tu(vint32m1x7_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tu(vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_tu(vint64m1x7_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tu(vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_tu(vuint8mf8x7_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tu(vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_tu(vuint8mf4x7_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tu(vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_tu(vuint8mf2x7_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tu(vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_tu(vuint8m1x7_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tu(vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_tu(vuint16mf4x7_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tu(vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_tu(vuint16mf2x7_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tu(vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_tu(vuint16m1x7_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tu(vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_tu(vuint32mf2x7_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tu(vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_tu(vuint32m1x7_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tu(vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_tu(vuint64m1x7_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tu(vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_tum(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_tum(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_tum(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_tum(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_tum(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_tum(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_tum(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_tum(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_tum(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_tum(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_tum(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_tum(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_tum(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_tum(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_tum(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_tum(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_tum(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_tum(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_tum(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_tum(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_tum(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_tum(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_tum(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_tum(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_tum(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_tum(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_tumu(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_tumu(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_tumu(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_tumu(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_tumu(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_tumu(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_tumu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_tumu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_tumu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_tumu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_tumu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_tumu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_tumu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_tumu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_tumu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_tumu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_tumu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_tumu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_tumu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_tumu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_tumu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_tumu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_tumu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_tumu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_tumu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_tumu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_mu(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_mu(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_mu(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_mu(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_mu(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_mu(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_mu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_mu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_mu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_mu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_mu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_mu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_mu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_mu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_mu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_mu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_mu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_mu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_mu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_mu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_mu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_mu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_mu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_mu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_mu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_mu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei64_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg7ei64\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg7ei8.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg7ei8.c index 75144a01b..8e5b1507c 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg7ei8.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg7ei8.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_tu(vfloat16mf4x7_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tu(vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_tu(vfloat16mf2x7_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tu(vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_tu(vfloat16m1x7_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tu(vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_tu(vfloat32mf2x7_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tu(vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_tu(vfloat32m1x7_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tu(vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_tu(vfloat64m1x7_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tu(vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_tu(vint8mf8x7_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tu(vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_tu(vint8mf4x7_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tu(vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_tu(vint8mf2x7_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tu(vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_tu(vint8m1x7_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tu(vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_tu(vint16mf4x7_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tu(vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_tu(vint16mf2x7_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tu(vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_tu(vint16m1x7_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tu(vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_tu(vint32mf2x7_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tu(vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_tu(vint32m1x7_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tu(vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_tu(vint64m1x7_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tu(vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_tu(vuint8mf8x7_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tu(vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_tu(vuint8mf4x7_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tu(vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_tu(vuint8mf2x7_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tu(vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_tu(vuint8m1x7_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tu(vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_tu(vuint16mf4x7_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tu(vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_tu(vuint16mf2x7_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tu(vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_tu(vuint16m1x7_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tu(vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_tu(vuint32mf2x7_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tu(vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_tu(vuint32m1x7_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tu(vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_tu(vuint64m1x7_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tu(vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_tum(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_tum(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_tum(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_tum(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_tum(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_tum(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_tum(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_tum(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_tum(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_tum(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_tum(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_tum(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_tum(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_tum(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_tum(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_tum(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_tum(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_tum(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_tum(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_tum(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_tum(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_tum(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_tum(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_tum(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_tum(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_tum(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_tumu(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_tumu(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_tumu(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_tumu(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_tumu(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_tumu(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_tumu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_tumu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_tumu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_tumu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_tumu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_tumu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_tumu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_tumu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_tumu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_tumu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_tumu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_tumu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_tumu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_tumu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_tumu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_tumu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_tumu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_tumu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_tumu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_tumu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_mu(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_mu(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_mu(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_mu(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_mu(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_mu(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_mu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_mu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_mu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_mu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_mu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_mu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_mu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_mu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_mu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_mu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_mu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_mu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_mu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_mu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_mu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_mu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_mu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_mu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_mu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_mu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg7ei8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg7ei8\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg8ei16.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg8ei16.c index 86747c34d..2b373eff0 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg8ei16.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg8ei16.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_tu(vfloat16mf4x8_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tu(vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_tu(vfloat16mf2x8_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tu(vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_tu(vfloat16m1x8_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tu(vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_tu(vfloat32mf2x8_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tu(vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_tu(vfloat32m1x8_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tu(vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_tu(vfloat64m1x8_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tu(vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_tu(vint8mf8x8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tu(vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_tu(vint8mf4x8_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tu(vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_tu(vint8mf2x8_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tu(vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_tu(vint8m1x8_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tu(vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_tu(vint16mf4x8_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tu(vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_tu(vint16mf2x8_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tu(vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_tu(vint16m1x8_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tu(vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_tu(vint32mf2x8_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tu(vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_tu(vint32m1x8_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tu(vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_tu(vint64m1x8_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tu(vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_tu(vuint8mf8x8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tu(vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_tu(vuint8mf4x8_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tu(vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_tu(vuint8mf2x8_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tu(vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_tu(vuint8m1x8_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tu(vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_tu(vuint16mf4x8_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tu(vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_tu(vuint16mf2x8_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tu(vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_tu(vuint16m1x8_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tu(vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_tu(vuint32mf2x8_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tu(vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_tu(vuint32m1x8_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tu(vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_tu(vuint64m1x8_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tu(vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_tum(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_tum(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_tum(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_tum(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_tum(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_tum(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_tum(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_tum(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_tum(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_tum(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_tum(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_tum(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_tum(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_tum(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_tum(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_tum(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_tum(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_tum(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_tum(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_tum(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_tum(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_tum(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_tum(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_tum(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_tum(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_tum(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_tumu(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_tumu(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_tumu(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_tumu(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_tumu(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_tumu(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_tumu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_tumu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_tumu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_tumu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_tumu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_tumu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_tumu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_tumu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_tumu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_tumu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_tumu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_tumu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_tumu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_tumu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_tumu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_tumu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_tumu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_tumu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_tumu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_tumu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_mu(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_mu(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_mu(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_mu(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_mu(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_mu(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_mu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_mu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_mu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_mu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_mu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_mu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_mu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_mu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_mu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_mu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_mu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_mu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_mu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_mu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_mu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_mu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_mu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_mu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_mu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_mu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg8ei16\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg8ei32.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg8ei32.c index 85c0757ed..072c88da5 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg8ei32.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg8ei32.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_tu(vfloat16mf4x8_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tu(vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_tu(vfloat16mf2x8_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tu(vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_tu(vfloat16m1x8_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tu(vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_tu(vfloat32mf2x8_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tu(vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_tu(vfloat32m1x8_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tu(vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_tu(vfloat64m1x8_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tu(vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_tu(vint8mf8x8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tu(vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_tu(vint8mf4x8_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tu(vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_tu(vint8mf2x8_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tu(vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_tu(vint8m1x8_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tu(vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_tu(vint16mf4x8_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tu(vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_tu(vint16mf2x8_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tu(vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_tu(vint16m1x8_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tu(vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_tu(vint32mf2x8_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tu(vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_tu(vint32m1x8_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tu(vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_tu(vint64m1x8_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tu(vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_tu(vuint8mf8x8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tu(vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_tu(vuint8mf4x8_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tu(vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_tu(vuint8mf2x8_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tu(vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_tu(vuint8m1x8_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tu(vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_tu(vuint16mf4x8_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tu(vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_tu(vuint16mf2x8_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tu(vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_tu(vuint16m1x8_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tu(vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_tu(vuint32mf2x8_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tu(vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_tu(vuint32m1x8_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tu(vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_tu(vuint64m1x8_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tu(vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_tum(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_tum(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_tum(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_tum(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_tum(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_tum(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_tum(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_tum(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_tum(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_tum(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_tum(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_tum(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_tum(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_tum(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_tum(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_tum(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_tum(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_tum(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_tum(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_tum(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_tum(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_tum(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_tum(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_tum(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_tum(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_tum(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_tumu(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_tumu(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_tumu(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_tumu(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_tumu(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_tumu(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_tumu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_tumu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_tumu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_tumu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_tumu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_tumu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_tumu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_tumu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_tumu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_tumu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_tumu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_tumu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_tumu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_tumu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_tumu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_tumu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_tumu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_tumu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_tumu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_tumu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_mu(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_mu(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_mu(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_mu(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_mu(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_mu(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_mu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_mu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_mu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_mu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_mu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_mu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_mu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_mu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_mu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_mu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_mu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_mu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_mu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_mu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_mu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_mu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_mu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_mu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_mu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_mu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei32_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg8ei32\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg8ei64.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg8ei64.c index 426032a02..c1648d6e3 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg8ei64.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg8ei64.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_tu(vfloat16mf4x8_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tu(vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_tu(vfloat16mf2x8_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tu(vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_tu(vfloat16m1x8_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tu(vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_tu(vfloat32mf2x8_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tu(vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_tu(vfloat32m1x8_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tu(vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_tu(vfloat64m1x8_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tu(vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_tu(vint8mf8x8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tu(vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_tu(vint8mf4x8_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tu(vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_tu(vint8mf2x8_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tu(vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_tu(vint8m1x8_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tu(vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_tu(vint16mf4x8_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tu(vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_tu(vint16mf2x8_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tu(vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_tu(vint16m1x8_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tu(vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_tu(vint32mf2x8_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tu(vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_tu(vint32m1x8_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tu(vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_tu(vint64m1x8_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tu(vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_tu(vuint8mf8x8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tu(vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_tu(vuint8mf4x8_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tu(vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_tu(vuint8mf2x8_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tu(vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_tu(vuint8m1x8_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tu(vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_tu(vuint16mf4x8_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tu(vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_tu(vuint16mf2x8_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tu(vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_tu(vuint16m1x8_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tu(vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_tu(vuint32mf2x8_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tu(vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_tu(vuint32m1x8_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tu(vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_tu(vuint64m1x8_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tu(vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_tum(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_tum(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_tum(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_tum(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_tum(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_tum(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_tum(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_tum(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_tum(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_tum(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_tum(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_tum(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_tum(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_tum(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_tum(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_tum(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_tum(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_tum(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_tum(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_tum(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_tum(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_tum(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_tum(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_tum(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_tum(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_tum(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_tumu(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_tumu(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_tumu(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_tumu(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_tumu(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_tumu(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_tumu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_tumu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_tumu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_tumu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_tumu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_tumu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_tumu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_tumu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_tumu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_tumu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_tumu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_tumu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_tumu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_tumu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_tumu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_tumu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_tumu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_tumu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_tumu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_tumu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_mu(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_mu(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_mu(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_mu(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_mu(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_mu(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_mu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_mu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_mu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_mu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_mu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_mu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_mu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_mu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_mu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_mu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_mu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_mu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_mu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_mu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_mu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_mu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_mu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_mu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_mu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_mu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei64_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg8ei64\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg8ei8.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg8ei8.c index 6ffb6d67b..c22c138c0 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg8ei8.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vloxseg8ei8.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_tu(vfloat16mf4x8_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tu(vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_tu(vfloat16mf2x8_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tu(vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_tu(vfloat16m1x8_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tu(vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_tu(vfloat32mf2x8_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tu(vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_tu(vfloat32m1x8_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tu(vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_tu(vfloat64m1x8_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tu(vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_tu(vint8mf8x8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tu(vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_tu(vint8mf4x8_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tu(vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_tu(vint8mf2x8_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tu(vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_tu(vint8m1x8_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tu(vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_tu(vint16mf4x8_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tu(vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_tu(vint16mf2x8_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tu(vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_tu(vint16m1x8_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tu(vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_tu(vint32mf2x8_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tu(vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_tu(vint32m1x8_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tu(vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_tu(vint64m1x8_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tu(vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_tu(vuint8mf8x8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tu(vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_tu(vuint8mf4x8_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tu(vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_tu(vuint8mf2x8_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tu(vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_tu(vuint8m1x8_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tu(vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_tu(vuint16mf4x8_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tu(vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_tu(vuint16mf2x8_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tu(vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_tu(vuint16m1x8_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tu(vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_tu(vuint32mf2x8_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tu(vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_tu(vuint32m1x8_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tu(vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_tu(vuint64m1x8_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tu(vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_tum(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_tum(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_tum(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_tum(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_tum(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_tum(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_tum(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_tum(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_tum(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_tum(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_tum(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_tum(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_tum(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_tum(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_tum(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_tum(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_tum(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_tum(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_tum(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_tum(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_tum(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_tum(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_tum(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_tum(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_tum(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_tum(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_tumu(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_tumu(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_tumu(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_tumu(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_tumu(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_tumu(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_tumu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_tumu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_tumu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_tumu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_tumu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_tumu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_tumu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_tumu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_tumu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_tumu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_tumu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_tumu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_tumu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_tumu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_tumu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_tumu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_tumu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_tumu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_tumu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_tumu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_mu(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_mu(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_mu(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_mu(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_mu(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_mu(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_mu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_mu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_mu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_mu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_mu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_mu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_mu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_mu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_mu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_mu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_mu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_mu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_mu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_mu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_mu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_mu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_mu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_mu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_mu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vloxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_mu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vloxseg8ei8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vloxseg8ei8\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlse16.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlse16.c index 8ddc59393..681438da5 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlse16.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlse16.c @@ -6,292 +6,292 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vlse16_v_f16mf4_tu(vfloat16mf4_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tu(maskedoff, base, bstride, vl); +vfloat16mf4_t test_vlse16_v_f16mf4_tu(vfloat16mf4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tu(vd, rs1, rs2, vl); } -vfloat16mf2_t test_vlse16_v_f16mf2_tu(vfloat16mf2_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tu(maskedoff, base, bstride, vl); +vfloat16mf2_t test_vlse16_v_f16mf2_tu(vfloat16mf2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tu(vd, rs1, rs2, vl); } -vfloat16m1_t test_vlse16_v_f16m1_tu(vfloat16m1_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tu(maskedoff, base, bstride, vl); +vfloat16m1_t test_vlse16_v_f16m1_tu(vfloat16m1_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tu(vd, rs1, rs2, vl); } -vfloat16m2_t test_vlse16_v_f16m2_tu(vfloat16m2_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tu(maskedoff, base, bstride, vl); +vfloat16m2_t test_vlse16_v_f16m2_tu(vfloat16m2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tu(vd, rs1, rs2, vl); } -vfloat16m4_t test_vlse16_v_f16m4_tu(vfloat16m4_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tu(maskedoff, base, bstride, vl); +vfloat16m4_t test_vlse16_v_f16m4_tu(vfloat16m4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tu(vd, rs1, rs2, vl); } -vfloat16m8_t test_vlse16_v_f16m8_tu(vfloat16m8_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tu(maskedoff, base, bstride, vl); +vfloat16m8_t test_vlse16_v_f16m8_tu(vfloat16m8_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tu(vd, rs1, rs2, vl); } -vint16mf4_t test_vlse16_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tu(maskedoff, base, bstride, vl); +vint16mf4_t test_vlse16_v_i16mf4_tu(vint16mf4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tu(vd, rs1, rs2, vl); } -vint16mf2_t test_vlse16_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tu(maskedoff, base, bstride, vl); +vint16mf2_t test_vlse16_v_i16mf2_tu(vint16mf2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tu(vd, rs1, rs2, vl); } -vint16m1_t test_vlse16_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tu(maskedoff, base, bstride, vl); +vint16m1_t test_vlse16_v_i16m1_tu(vint16m1_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tu(vd, rs1, rs2, vl); } -vint16m2_t test_vlse16_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tu(maskedoff, base, bstride, vl); +vint16m2_t test_vlse16_v_i16m2_tu(vint16m2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tu(vd, rs1, rs2, vl); } -vint16m4_t test_vlse16_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tu(maskedoff, base, bstride, vl); +vint16m4_t test_vlse16_v_i16m4_tu(vint16m4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tu(vd, rs1, rs2, vl); } -vint16m8_t test_vlse16_v_i16m8_tu(vint16m8_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tu(maskedoff, base, bstride, vl); +vint16m8_t test_vlse16_v_i16m8_tu(vint16m8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tu(vd, rs1, rs2, vl); } -vuint16mf4_t test_vlse16_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tu(maskedoff, base, bstride, vl); +vuint16mf4_t test_vlse16_v_u16mf4_tu(vuint16mf4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tu(vd, rs1, rs2, vl); } -vuint16mf2_t test_vlse16_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tu(maskedoff, base, bstride, vl); +vuint16mf2_t test_vlse16_v_u16mf2_tu(vuint16mf2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tu(vd, rs1, rs2, vl); } -vuint16m1_t test_vlse16_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tu(maskedoff, base, bstride, vl); +vuint16m1_t test_vlse16_v_u16m1_tu(vuint16m1_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tu(vd, rs1, rs2, vl); } -vuint16m2_t test_vlse16_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tu(maskedoff, base, bstride, vl); +vuint16m2_t test_vlse16_v_u16m2_tu(vuint16m2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tu(vd, rs1, rs2, vl); } -vuint16m4_t test_vlse16_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tu(maskedoff, base, bstride, vl); +vuint16m4_t test_vlse16_v_u16m4_tu(vuint16m4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tu(vd, rs1, rs2, vl); } -vuint16m8_t test_vlse16_v_u16m8_tu(vuint16m8_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tu(maskedoff, base, bstride, vl); +vuint16m8_t test_vlse16_v_u16m8_tu(vuint16m8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tu(vd, rs1, rs2, vl); } -vfloat16mf4_t test_vlse16_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tum(mask, maskedoff, base, bstride, vl); +vfloat16mf4_t test_vlse16_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vlse16_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tum(mask, maskedoff, base, bstride, vl); +vfloat16mf2_t test_vlse16_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vlse16_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tum(mask, maskedoff, base, bstride, vl); +vfloat16m1_t test_vlse16_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vlse16_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tum(mask, maskedoff, base, bstride, vl); +vfloat16m2_t test_vlse16_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tum(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vlse16_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tum(mask, maskedoff, base, bstride, vl); +vfloat16m4_t test_vlse16_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tum(vm, vd, rs1, rs2, vl); } -vfloat16m8_t test_vlse16_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tum(mask, maskedoff, base, bstride, vl); +vfloat16m8_t test_vlse16_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tum(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vlse16_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tum(mask, maskedoff, base, bstride, vl); +vint16mf4_t test_vlse16_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tum(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vlse16_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tum(mask, maskedoff, base, bstride, vl); +vint16mf2_t test_vlse16_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tum(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vlse16_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tum(mask, maskedoff, base, bstride, vl); +vint16m1_t test_vlse16_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tum(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vlse16_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tum(mask, maskedoff, base, bstride, vl); +vint16m2_t test_vlse16_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tum(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vlse16_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tum(mask, maskedoff, base, bstride, vl); +vint16m4_t test_vlse16_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tum(vm, vd, rs1, rs2, vl); } -vint16m8_t test_vlse16_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tum(mask, maskedoff, base, bstride, vl); +vint16m8_t test_vlse16_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vlse16_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tum(mask, maskedoff, base, bstride, vl); +vuint16mf4_t test_vlse16_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vlse16_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tum(mask, maskedoff, base, bstride, vl); +vuint16mf2_t test_vlse16_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tum(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vlse16_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tum(mask, maskedoff, base, bstride, vl); +vuint16m1_t test_vlse16_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tum(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vlse16_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tum(mask, maskedoff, base, bstride, vl); +vuint16m2_t test_vlse16_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tum(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vlse16_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tum(mask, maskedoff, base, bstride, vl); +vuint16m4_t test_vlse16_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tum(vm, vd, rs1, rs2, vl); } -vuint16m8_t test_vlse16_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tum(mask, maskedoff, base, bstride, vl); +vuint16m8_t test_vlse16_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vlse16_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tumu(mask, maskedoff, base, bstride, vl); +vfloat16mf4_t test_vlse16_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vlse16_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tumu(mask, maskedoff, base, bstride, vl); +vfloat16mf2_t test_vlse16_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vlse16_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tumu(mask, maskedoff, base, bstride, vl); +vfloat16m1_t test_vlse16_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vlse16_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tumu(mask, maskedoff, base, bstride, vl); +vfloat16m2_t test_vlse16_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vlse16_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tumu(mask, maskedoff, base, bstride, vl); +vfloat16m4_t test_vlse16_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m8_t test_vlse16_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tumu(mask, maskedoff, base, bstride, vl); +vfloat16m8_t test_vlse16_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vlse16_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tumu(mask, maskedoff, base, bstride, vl); +vint16mf4_t test_vlse16_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vlse16_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tumu(mask, maskedoff, base, bstride, vl); +vint16mf2_t test_vlse16_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tumu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vlse16_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tumu(mask, maskedoff, base, bstride, vl); +vint16m1_t test_vlse16_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tumu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vlse16_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tumu(mask, maskedoff, base, bstride, vl); +vint16m2_t test_vlse16_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tumu(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vlse16_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tumu(mask, maskedoff, base, bstride, vl); +vint16m4_t test_vlse16_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tumu(vm, vd, rs1, rs2, vl); } -vint16m8_t test_vlse16_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tumu(mask, maskedoff, base, bstride, vl); +vint16m8_t test_vlse16_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vlse16_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tumu(mask, maskedoff, base, bstride, vl); +vuint16mf4_t test_vlse16_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vlse16_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tumu(mask, maskedoff, base, bstride, vl); +vuint16mf2_t test_vlse16_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vlse16_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tumu(mask, maskedoff, base, bstride, vl); +vuint16m1_t test_vlse16_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vlse16_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tumu(mask, maskedoff, base, bstride, vl); +vuint16m2_t test_vlse16_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tumu(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vlse16_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tumu(mask, maskedoff, base, bstride, vl); +vuint16m4_t test_vlse16_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tumu(vm, vd, rs1, rs2, vl); } -vuint16m8_t test_vlse16_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_tumu(mask, maskedoff, base, bstride, vl); +vuint16m8_t test_vlse16_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vlse16_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_mu(mask, maskedoff, base, bstride, vl); +vfloat16mf4_t test_vlse16_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vlse16_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_mu(mask, maskedoff, base, bstride, vl); +vfloat16mf2_t test_vlse16_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vlse16_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_mu(mask, maskedoff, base, bstride, vl); +vfloat16m1_t test_vlse16_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vlse16_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_mu(mask, maskedoff, base, bstride, vl); +vfloat16m2_t test_vlse16_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_mu(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vlse16_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_mu(mask, maskedoff, base, bstride, vl); +vfloat16m4_t test_vlse16_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_mu(vm, vd, rs1, rs2, vl); } -vfloat16m8_t test_vlse16_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_mu(mask, maskedoff, base, bstride, vl); +vfloat16m8_t test_vlse16_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_mu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vlse16_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_mu(mask, maskedoff, base, bstride, vl); +vint16mf4_t test_vlse16_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_mu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vlse16_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_mu(mask, maskedoff, base, bstride, vl); +vint16mf2_t test_vlse16_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_mu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vlse16_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_mu(mask, maskedoff, base, bstride, vl); +vint16m1_t test_vlse16_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_mu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vlse16_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_mu(mask, maskedoff, base, bstride, vl); +vint16m2_t test_vlse16_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_mu(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vlse16_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_mu(mask, maskedoff, base, bstride, vl); +vint16m4_t test_vlse16_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_mu(vm, vd, rs1, rs2, vl); } -vint16m8_t test_vlse16_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_mu(mask, maskedoff, base, bstride, vl); +vint16m8_t test_vlse16_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vlse16_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_mu(mask, maskedoff, base, bstride, vl); +vuint16mf4_t test_vlse16_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vlse16_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_mu(mask, maskedoff, base, bstride, vl); +vuint16mf2_t test_vlse16_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_mu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vlse16_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_mu(mask, maskedoff, base, bstride, vl); +vuint16m1_t test_vlse16_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_mu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vlse16_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_mu(mask, maskedoff, base, bstride, vl); +vuint16m2_t test_vlse16_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_mu(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vlse16_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_mu(mask, maskedoff, base, bstride, vl); +vuint16m4_t test_vlse16_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_mu(vm, vd, rs1, rs2, vl); } -vuint16m8_t test_vlse16_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse16_mu(mask, maskedoff, base, bstride, vl); +vuint16m8_t test_vlse16_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlse16\.[ivxfswum.]+\s+} 72 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlse32.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlse32.c index c3dc2d2ef..bed328ce7 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlse32.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlse32.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2_t test_vlse32_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_tu(maskedoff, base, bstride, vl); +vfloat32mf2_t test_vlse32_v_f32mf2_tu(vfloat32mf2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_tu(vd, rs1, rs2, vl); } -vfloat32m1_t test_vlse32_v_f32m1_tu(vfloat32m1_t maskedoff, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_tu(maskedoff, base, bstride, vl); +vfloat32m1_t test_vlse32_v_f32m1_tu(vfloat32m1_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_tu(vd, rs1, rs2, vl); } -vfloat32m2_t test_vlse32_v_f32m2_tu(vfloat32m2_t maskedoff, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_tu(maskedoff, base, bstride, vl); +vfloat32m2_t test_vlse32_v_f32m2_tu(vfloat32m2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_tu(vd, rs1, rs2, vl); } -vfloat32m4_t test_vlse32_v_f32m4_tu(vfloat32m4_t maskedoff, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_tu(maskedoff, base, bstride, vl); +vfloat32m4_t test_vlse32_v_f32m4_tu(vfloat32m4_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_tu(vd, rs1, rs2, vl); } -vfloat32m8_t test_vlse32_v_f32m8_tu(vfloat32m8_t maskedoff, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_tu(maskedoff, base, bstride, vl); +vfloat32m8_t test_vlse32_v_f32m8_tu(vfloat32m8_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_tu(vd, rs1, rs2, vl); } -vint32mf2_t test_vlse32_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_tu(maskedoff, base, bstride, vl); +vint32mf2_t test_vlse32_v_i32mf2_tu(vint32mf2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_tu(vd, rs1, rs2, vl); } -vint32m1_t test_vlse32_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_tu(maskedoff, base, bstride, vl); +vint32m1_t test_vlse32_v_i32m1_tu(vint32m1_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_tu(vd, rs1, rs2, vl); } -vint32m2_t test_vlse32_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_tu(maskedoff, base, bstride, vl); +vint32m2_t test_vlse32_v_i32m2_tu(vint32m2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_tu(vd, rs1, rs2, vl); } -vint32m4_t test_vlse32_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_tu(maskedoff, base, bstride, vl); +vint32m4_t test_vlse32_v_i32m4_tu(vint32m4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_tu(vd, rs1, rs2, vl); } -vint32m8_t test_vlse32_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_tu(maskedoff, base, bstride, vl); +vint32m8_t test_vlse32_v_i32m8_tu(vint32m8_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_tu(vd, rs1, rs2, vl); } -vuint32mf2_t test_vlse32_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_tu(maskedoff, base, bstride, vl); +vuint32mf2_t test_vlse32_v_u32mf2_tu(vuint32mf2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_tu(vd, rs1, rs2, vl); } -vuint32m1_t test_vlse32_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_tu(maskedoff, base, bstride, vl); +vuint32m1_t test_vlse32_v_u32m1_tu(vuint32m1_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_tu(vd, rs1, rs2, vl); } -vuint32m2_t test_vlse32_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_tu(maskedoff, base, bstride, vl); +vuint32m2_t test_vlse32_v_u32m2_tu(vuint32m2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_tu(vd, rs1, rs2, vl); } -vuint32m4_t test_vlse32_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_tu(maskedoff, base, bstride, vl); +vuint32m4_t test_vlse32_v_u32m4_tu(vuint32m4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_tu(vd, rs1, rs2, vl); } -vuint32m8_t test_vlse32_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_tu(maskedoff, base, bstride, vl); +vuint32m8_t test_vlse32_v_u32m8_tu(vuint32m8_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_tu(vd, rs1, rs2, vl); } -vfloat32mf2_t test_vlse32_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_tum(mask, maskedoff, base, bstride, vl); +vfloat32mf2_t test_vlse32_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vlse32_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_tum(mask, maskedoff, base, bstride, vl); +vfloat32m1_t test_vlse32_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vlse32_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_tum(mask, maskedoff, base, bstride, vl); +vfloat32m2_t test_vlse32_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vlse32_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_tum(mask, maskedoff, base, bstride, vl); +vfloat32m4_t test_vlse32_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_tum(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vlse32_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_tum(mask, maskedoff, base, bstride, vl); +vfloat32m8_t test_vlse32_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_tum(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vlse32_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_tum(mask, maskedoff, base, bstride, vl); +vint32mf2_t test_vlse32_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_tum(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vlse32_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_tum(mask, maskedoff, base, bstride, vl); +vint32m1_t test_vlse32_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_tum(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vlse32_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_tum(mask, maskedoff, base, bstride, vl); +vint32m2_t test_vlse32_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_tum(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vlse32_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_tum(mask, maskedoff, base, bstride, vl); +vint32m4_t test_vlse32_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_tum(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vlse32_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_tum(mask, maskedoff, base, bstride, vl); +vint32m8_t test_vlse32_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vlse32_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_tum(mask, maskedoff, base, bstride, vl); +vuint32mf2_t test_vlse32_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_tum(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vlse32_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_tum(mask, maskedoff, base, bstride, vl); +vuint32m1_t test_vlse32_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_tum(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vlse32_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_tum(mask, maskedoff, base, bstride, vl); +vuint32m2_t test_vlse32_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_tum(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vlse32_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_tum(mask, maskedoff, base, bstride, vl); +vuint32m4_t test_vlse32_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_tum(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vlse32_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_tum(mask, maskedoff, base, bstride, vl); +vuint32m8_t test_vlse32_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vlse32_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_tumu(mask, maskedoff, base, bstride, vl); +vfloat32mf2_t test_vlse32_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vlse32_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_tumu(mask, maskedoff, base, bstride, vl); +vfloat32m1_t test_vlse32_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vlse32_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_tumu(mask, maskedoff, base, bstride, vl); +vfloat32m2_t test_vlse32_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vlse32_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_tumu(mask, maskedoff, base, bstride, vl); +vfloat32m4_t test_vlse32_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vlse32_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_tumu(mask, maskedoff, base, bstride, vl); +vfloat32m8_t test_vlse32_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vlse32_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_tumu(mask, maskedoff, base, bstride, vl); +vint32mf2_t test_vlse32_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_tumu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vlse32_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_tumu(mask, maskedoff, base, bstride, vl); +vint32m1_t test_vlse32_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_tumu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vlse32_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_tumu(mask, maskedoff, base, bstride, vl); +vint32m2_t test_vlse32_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_tumu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vlse32_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_tumu(mask, maskedoff, base, bstride, vl); +vint32m4_t test_vlse32_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_tumu(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vlse32_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_tumu(mask, maskedoff, base, bstride, vl); +vint32m8_t test_vlse32_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vlse32_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_tumu(mask, maskedoff, base, bstride, vl); +vuint32mf2_t test_vlse32_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vlse32_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_tumu(mask, maskedoff, base, bstride, vl); +vuint32m1_t test_vlse32_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vlse32_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_tumu(mask, maskedoff, base, bstride, vl); +vuint32m2_t test_vlse32_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vlse32_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_tumu(mask, maskedoff, base, bstride, vl); +vuint32m4_t test_vlse32_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_tumu(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vlse32_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_tumu(mask, maskedoff, base, bstride, vl); +vuint32m8_t test_vlse32_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vlse32_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_mu(mask, maskedoff, base, bstride, vl); +vfloat32mf2_t test_vlse32_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vlse32_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_mu(mask, maskedoff, base, bstride, vl); +vfloat32m1_t test_vlse32_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vlse32_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_mu(mask, maskedoff, base, bstride, vl); +vfloat32m2_t test_vlse32_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vlse32_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_mu(mask, maskedoff, base, bstride, vl); +vfloat32m4_t test_vlse32_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_mu(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vlse32_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_mu(mask, maskedoff, base, bstride, vl); +vfloat32m8_t test_vlse32_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_mu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vlse32_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_mu(mask, maskedoff, base, bstride, vl); +vint32mf2_t test_vlse32_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_mu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vlse32_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_mu(mask, maskedoff, base, bstride, vl); +vint32m1_t test_vlse32_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_mu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vlse32_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_mu(mask, maskedoff, base, bstride, vl); +vint32m2_t test_vlse32_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_mu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vlse32_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_mu(mask, maskedoff, base, bstride, vl); +vint32m4_t test_vlse32_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_mu(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vlse32_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_mu(mask, maskedoff, base, bstride, vl); +vint32m8_t test_vlse32_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vlse32_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_mu(mask, maskedoff, base, bstride, vl); +vuint32mf2_t test_vlse32_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_mu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vlse32_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_mu(mask, maskedoff, base, bstride, vl); +vuint32m1_t test_vlse32_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_mu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vlse32_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_mu(mask, maskedoff, base, bstride, vl); +vuint32m2_t test_vlse32_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_mu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vlse32_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_mu(mask, maskedoff, base, bstride, vl); +vuint32m4_t test_vlse32_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_mu(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vlse32_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse32_mu(mask, maskedoff, base, bstride, vl); +vuint32m8_t test_vlse32_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse32_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlse32\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlse64.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlse64.c index ff8f082f7..9fe62a5e7 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlse64.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlse64.c @@ -6,196 +6,196 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1_t test_vlse64_v_f64m1_tu(vfloat64m1_t maskedoff, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_tu(maskedoff, base, bstride, vl); +vfloat64m1_t test_vlse64_v_f64m1_tu(vfloat64m1_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_tu(vd, rs1, rs2, vl); } -vfloat64m2_t test_vlse64_v_f64m2_tu(vfloat64m2_t maskedoff, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_tu(maskedoff, base, bstride, vl); +vfloat64m2_t test_vlse64_v_f64m2_tu(vfloat64m2_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_tu(vd, rs1, rs2, vl); } -vfloat64m4_t test_vlse64_v_f64m4_tu(vfloat64m4_t maskedoff, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_tu(maskedoff, base, bstride, vl); +vfloat64m4_t test_vlse64_v_f64m4_tu(vfloat64m4_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_tu(vd, rs1, rs2, vl); } -vfloat64m8_t test_vlse64_v_f64m8_tu(vfloat64m8_t maskedoff, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_tu(maskedoff, base, bstride, vl); +vfloat64m8_t test_vlse64_v_f64m8_tu(vfloat64m8_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_tu(vd, rs1, rs2, vl); } -vint64m1_t test_vlse64_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_tu(maskedoff, base, bstride, vl); +vint64m1_t test_vlse64_v_i64m1_tu(vint64m1_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_tu(vd, rs1, rs2, vl); } -vint64m2_t test_vlse64_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_tu(maskedoff, base, bstride, vl); +vint64m2_t test_vlse64_v_i64m2_tu(vint64m2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_tu(vd, rs1, rs2, vl); } -vint64m4_t test_vlse64_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_tu(maskedoff, base, bstride, vl); +vint64m4_t test_vlse64_v_i64m4_tu(vint64m4_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_tu(vd, rs1, rs2, vl); } -vint64m8_t test_vlse64_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_tu(maskedoff, base, bstride, vl); +vint64m8_t test_vlse64_v_i64m8_tu(vint64m8_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_tu(vd, rs1, rs2, vl); } -vuint64m1_t test_vlse64_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_tu(maskedoff, base, bstride, vl); +vuint64m1_t test_vlse64_v_u64m1_tu(vuint64m1_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_tu(vd, rs1, rs2, vl); } -vuint64m2_t test_vlse64_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_tu(maskedoff, base, bstride, vl); +vuint64m2_t test_vlse64_v_u64m2_tu(vuint64m2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_tu(vd, rs1, rs2, vl); } -vuint64m4_t test_vlse64_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_tu(maskedoff, base, bstride, vl); +vuint64m4_t test_vlse64_v_u64m4_tu(vuint64m4_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_tu(vd, rs1, rs2, vl); } -vuint64m8_t test_vlse64_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_tu(maskedoff, base, bstride, vl); +vuint64m8_t test_vlse64_v_u64m8_tu(vuint64m8_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_tu(vd, rs1, rs2, vl); } -vfloat64m1_t test_vlse64_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_tum(mask, maskedoff, base, bstride, vl); +vfloat64m1_t test_vlse64_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vlse64_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_tum(mask, maskedoff, base, bstride, vl); +vfloat64m2_t test_vlse64_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vlse64_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_tum(mask, maskedoff, base, bstride, vl); +vfloat64m4_t test_vlse64_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_tum(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vlse64_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_tum(mask, maskedoff, base, bstride, vl); +vfloat64m8_t test_vlse64_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_tum(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vlse64_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_tum(mask, maskedoff, base, bstride, vl); +vint64m1_t test_vlse64_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_tum(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vlse64_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_tum(mask, maskedoff, base, bstride, vl); +vint64m2_t test_vlse64_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_tum(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vlse64_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_tum(mask, maskedoff, base, bstride, vl); +vint64m4_t test_vlse64_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_tum(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vlse64_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_tum(mask, maskedoff, base, bstride, vl); +vint64m8_t test_vlse64_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_tum(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vlse64_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_tum(mask, maskedoff, base, bstride, vl); +vuint64m1_t test_vlse64_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_tum(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vlse64_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_tum(mask, maskedoff, base, bstride, vl); +vuint64m2_t test_vlse64_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_tum(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vlse64_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_tum(mask, maskedoff, base, bstride, vl); +vuint64m4_t test_vlse64_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_tum(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vlse64_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_tum(mask, maskedoff, base, bstride, vl); +vuint64m8_t test_vlse64_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vlse64_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_tumu(mask, maskedoff, base, bstride, vl); +vfloat64m1_t test_vlse64_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vlse64_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_tumu(mask, maskedoff, base, bstride, vl); +vfloat64m2_t test_vlse64_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vlse64_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_tumu(mask, maskedoff, base, bstride, vl); +vfloat64m4_t test_vlse64_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vlse64_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_tumu(mask, maskedoff, base, bstride, vl); +vfloat64m8_t test_vlse64_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_tumu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vlse64_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_tumu(mask, maskedoff, base, bstride, vl); +vint64m1_t test_vlse64_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_tumu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vlse64_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_tumu(mask, maskedoff, base, bstride, vl); +vint64m2_t test_vlse64_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_tumu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vlse64_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_tumu(mask, maskedoff, base, bstride, vl); +vint64m4_t test_vlse64_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_tumu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vlse64_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_tumu(mask, maskedoff, base, bstride, vl); +vint64m8_t test_vlse64_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vlse64_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_tumu(mask, maskedoff, base, bstride, vl); +vuint64m1_t test_vlse64_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vlse64_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_tumu(mask, maskedoff, base, bstride, vl); +vuint64m2_t test_vlse64_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vlse64_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_tumu(mask, maskedoff, base, bstride, vl); +vuint64m4_t test_vlse64_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_tumu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vlse64_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_tumu(mask, maskedoff, base, bstride, vl); +vuint64m8_t test_vlse64_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vlse64_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_mu(mask, maskedoff, base, bstride, vl); +vfloat64m1_t test_vlse64_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vlse64_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_mu(mask, maskedoff, base, bstride, vl); +vfloat64m2_t test_vlse64_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vlse64_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_mu(mask, maskedoff, base, bstride, vl); +vfloat64m4_t test_vlse64_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_mu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vlse64_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_mu(mask, maskedoff, base, bstride, vl); +vfloat64m8_t test_vlse64_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_mu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vlse64_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_mu(mask, maskedoff, base, bstride, vl); +vint64m1_t test_vlse64_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_mu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vlse64_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_mu(mask, maskedoff, base, bstride, vl); +vint64m2_t test_vlse64_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_mu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vlse64_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_mu(mask, maskedoff, base, bstride, vl); +vint64m4_t test_vlse64_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_mu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vlse64_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_mu(mask, maskedoff, base, bstride, vl); +vint64m8_t test_vlse64_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_mu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vlse64_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_mu(mask, maskedoff, base, bstride, vl); +vuint64m1_t test_vlse64_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_mu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vlse64_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_mu(mask, maskedoff, base, bstride, vl); +vuint64m2_t test_vlse64_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_mu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vlse64_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_mu(mask, maskedoff, base, bstride, vl); +vuint64m4_t test_vlse64_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_mu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vlse64_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse64_mu(mask, maskedoff, base, bstride, vl); +vuint64m8_t test_vlse64_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse64_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlse64\.[ivxfswum.]+\s+} 48 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlse8.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlse8.c index 654a3d8ae..662b59355 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlse8.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlse8.c @@ -6,228 +6,228 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vlse8_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_tu(maskedoff, base, bstride, vl); +vint8mf8_t test_vlse8_v_i8mf8_tu(vint8mf8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_tu(vd, rs1, rs2, vl); } -vint8mf4_t test_vlse8_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_tu(maskedoff, base, bstride, vl); +vint8mf4_t test_vlse8_v_i8mf4_tu(vint8mf4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_tu(vd, rs1, rs2, vl); } -vint8mf2_t test_vlse8_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_tu(maskedoff, base, bstride, vl); +vint8mf2_t test_vlse8_v_i8mf2_tu(vint8mf2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_tu(vd, rs1, rs2, vl); } -vint8m1_t test_vlse8_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_tu(maskedoff, base, bstride, vl); +vint8m1_t test_vlse8_v_i8m1_tu(vint8m1_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_tu(vd, rs1, rs2, vl); } -vint8m2_t test_vlse8_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_tu(maskedoff, base, bstride, vl); +vint8m2_t test_vlse8_v_i8m2_tu(vint8m2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_tu(vd, rs1, rs2, vl); } -vint8m4_t test_vlse8_v_i8m4_tu(vint8m4_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_tu(maskedoff, base, bstride, vl); +vint8m4_t test_vlse8_v_i8m4_tu(vint8m4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_tu(vd, rs1, rs2, vl); } -vint8m8_t test_vlse8_v_i8m8_tu(vint8m8_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_tu(maskedoff, base, bstride, vl); +vint8m8_t test_vlse8_v_i8m8_tu(vint8m8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_tu(vd, rs1, rs2, vl); } -vuint8mf8_t test_vlse8_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_tu(maskedoff, base, bstride, vl); +vuint8mf8_t test_vlse8_v_u8mf8_tu(vuint8mf8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_tu(vd, rs1, rs2, vl); } -vuint8mf4_t test_vlse8_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_tu(maskedoff, base, bstride, vl); +vuint8mf4_t test_vlse8_v_u8mf4_tu(vuint8mf4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_tu(vd, rs1, rs2, vl); } -vuint8mf2_t test_vlse8_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_tu(maskedoff, base, bstride, vl); +vuint8mf2_t test_vlse8_v_u8mf2_tu(vuint8mf2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_tu(vd, rs1, rs2, vl); } -vuint8m1_t test_vlse8_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_tu(maskedoff, base, bstride, vl); +vuint8m1_t test_vlse8_v_u8m1_tu(vuint8m1_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_tu(vd, rs1, rs2, vl); } -vuint8m2_t test_vlse8_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_tu(maskedoff, base, bstride, vl); +vuint8m2_t test_vlse8_v_u8m2_tu(vuint8m2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_tu(vd, rs1, rs2, vl); } -vuint8m4_t test_vlse8_v_u8m4_tu(vuint8m4_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_tu(maskedoff, base, bstride, vl); +vuint8m4_t test_vlse8_v_u8m4_tu(vuint8m4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_tu(vd, rs1, rs2, vl); } -vuint8m8_t test_vlse8_v_u8m8_tu(vuint8m8_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_tu(maskedoff, base, bstride, vl); +vuint8m8_t test_vlse8_v_u8m8_tu(vuint8m8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_tu(vd, rs1, rs2, vl); } -vint8mf8_t test_vlse8_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_tum(mask, maskedoff, base, bstride, vl); +vint8mf8_t test_vlse8_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vlse8_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_tum(mask, maskedoff, base, bstride, vl); +vint8mf4_t test_vlse8_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_tum(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vlse8_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_tum(mask, maskedoff, base, bstride, vl); +vint8mf2_t test_vlse8_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_tum(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vlse8_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_tum(mask, maskedoff, base, bstride, vl); +vint8m1_t test_vlse8_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_tum(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vlse8_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_tum(mask, maskedoff, base, bstride, vl); +vint8m2_t test_vlse8_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_tum(vm, vd, rs1, rs2, vl); } -vint8m4_t test_vlse8_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_tum(mask, maskedoff, base, bstride, vl); +vint8m4_t test_vlse8_v_i8m4_tum(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_tum(vm, vd, rs1, rs2, vl); } -vint8m8_t test_vlse8_v_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_tum(mask, maskedoff, base, bstride, vl); +vint8m8_t test_vlse8_v_i8m8_tum(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vlse8_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_tum(mask, maskedoff, base, bstride, vl); +vuint8mf8_t test_vlse8_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vlse8_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_tum(mask, maskedoff, base, bstride, vl); +vuint8mf4_t test_vlse8_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vlse8_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_tum(mask, maskedoff, base, bstride, vl); +vuint8mf2_t test_vlse8_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_tum(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vlse8_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_tum(mask, maskedoff, base, bstride, vl); +vuint8m1_t test_vlse8_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_tum(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vlse8_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_tum(mask, maskedoff, base, bstride, vl); +vuint8m2_t test_vlse8_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_tum(vm, vd, rs1, rs2, vl); } -vuint8m4_t test_vlse8_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_tum(mask, maskedoff, base, bstride, vl); +vuint8m4_t test_vlse8_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_tum(vm, vd, rs1, rs2, vl); } -vuint8m8_t test_vlse8_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_tum(mask, maskedoff, base, bstride, vl); +vuint8m8_t test_vlse8_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vlse8_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_tumu(mask, maskedoff, base, bstride, vl); +vint8mf8_t test_vlse8_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vlse8_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_tumu(mask, maskedoff, base, bstride, vl); +vint8mf4_t test_vlse8_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vlse8_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_tumu(mask, maskedoff, base, bstride, vl); +vint8mf2_t test_vlse8_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_tumu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vlse8_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_tumu(mask, maskedoff, base, bstride, vl); +vint8m1_t test_vlse8_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_tumu(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vlse8_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_tumu(mask, maskedoff, base, bstride, vl); +vint8m2_t test_vlse8_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_tumu(vm, vd, rs1, rs2, vl); } -vint8m4_t test_vlse8_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_tumu(mask, maskedoff, base, bstride, vl); +vint8m4_t test_vlse8_v_i8m4_tumu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_tumu(vm, vd, rs1, rs2, vl); } -vint8m8_t test_vlse8_v_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_tumu(mask, maskedoff, base, bstride, vl); +vint8m8_t test_vlse8_v_i8m8_tumu(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vlse8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_tumu(mask, maskedoff, base, bstride, vl); +vuint8mf8_t test_vlse8_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vlse8_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_tumu(mask, maskedoff, base, bstride, vl); +vuint8mf4_t test_vlse8_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vlse8_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_tumu(mask, maskedoff, base, bstride, vl); +vuint8mf2_t test_vlse8_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vlse8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_tumu(mask, maskedoff, base, bstride, vl); +vuint8m1_t test_vlse8_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vlse8_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_tumu(mask, maskedoff, base, bstride, vl); +vuint8m2_t test_vlse8_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m4_t test_vlse8_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_tumu(mask, maskedoff, base, bstride, vl); +vuint8m4_t test_vlse8_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m8_t test_vlse8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_tumu(mask, maskedoff, base, bstride, vl); +vuint8m8_t test_vlse8_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vlse8_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_mu(mask, maskedoff, base, bstride, vl); +vint8mf8_t test_vlse8_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vlse8_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_mu(mask, maskedoff, base, bstride, vl); +vint8mf4_t test_vlse8_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_mu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vlse8_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_mu(mask, maskedoff, base, bstride, vl); +vint8mf2_t test_vlse8_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_mu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vlse8_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_mu(mask, maskedoff, base, bstride, vl); +vint8m1_t test_vlse8_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_mu(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vlse8_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_mu(mask, maskedoff, base, bstride, vl); +vint8m2_t test_vlse8_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_mu(vm, vd, rs1, rs2, vl); } -vint8m4_t test_vlse8_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_mu(mask, maskedoff, base, bstride, vl); +vint8m4_t test_vlse8_v_i8m4_mu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_mu(vm, vd, rs1, rs2, vl); } -vint8m8_t test_vlse8_v_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_mu(mask, maskedoff, base, bstride, vl); +vint8m8_t test_vlse8_v_i8m8_mu(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vlse8_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_mu(mask, maskedoff, base, bstride, vl); +vuint8mf8_t test_vlse8_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vlse8_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_mu(mask, maskedoff, base, bstride, vl); +vuint8mf4_t test_vlse8_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vlse8_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_mu(mask, maskedoff, base, bstride, vl); +vuint8mf2_t test_vlse8_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_mu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vlse8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_mu(mask, maskedoff, base, bstride, vl); +vuint8m1_t test_vlse8_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_mu(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vlse8_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_mu(mask, maskedoff, base, bstride, vl); +vuint8m2_t test_vlse8_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_mu(vm, vd, rs1, rs2, vl); } -vuint8m4_t test_vlse8_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_mu(mask, maskedoff, base, bstride, vl); +vuint8m4_t test_vlse8_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_mu(vm, vd, rs1, rs2, vl); } -vuint8m8_t test_vlse8_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlse8_mu(mask, maskedoff, base, bstride, vl); +vuint8m8_t test_vlse8_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlse8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlse8\.[ivxfswum.]+\s+} 56 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg2e16.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg2e16.c index e1a93d321..00f6120de 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg2e16.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg2e16.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x2_t test_vlseg2e16_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_tu(maskedoff_tuple, base, vl); +vfloat16mf4x2_t test_vlseg2e16_v_f16mf4x2_tu(vfloat16mf4x2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_tu(vd, rs1, vl); } -vfloat16mf2x2_t test_vlseg2e16_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_tu(maskedoff_tuple, base, vl); +vfloat16mf2x2_t test_vlseg2e16_v_f16mf2x2_tu(vfloat16mf2x2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_tu(vd, rs1, vl); } -vfloat16m1x2_t test_vlseg2e16_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_tu(maskedoff_tuple, base, vl); +vfloat16m1x2_t test_vlseg2e16_v_f16m1x2_tu(vfloat16m1x2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_tu(vd, rs1, vl); } -vfloat16m2x2_t test_vlseg2e16_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_tu(maskedoff_tuple, base, vl); +vfloat16m2x2_t test_vlseg2e16_v_f16m2x2_tu(vfloat16m2x2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_tu(vd, rs1, vl); } -vfloat16m4x2_t test_vlseg2e16_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_tu(maskedoff_tuple, base, vl); +vfloat16m4x2_t test_vlseg2e16_v_f16m4x2_tu(vfloat16m4x2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_tu(vd, rs1, vl); } -vint16mf4x2_t test_vlseg2e16_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_tu(maskedoff_tuple, base, vl); +vint16mf4x2_t test_vlseg2e16_v_i16mf4x2_tu(vint16mf4x2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_tu(vd, rs1, vl); } -vint16mf2x2_t test_vlseg2e16_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_tu(maskedoff_tuple, base, vl); +vint16mf2x2_t test_vlseg2e16_v_i16mf2x2_tu(vint16mf2x2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_tu(vd, rs1, vl); } -vint16m1x2_t test_vlseg2e16_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_tu(maskedoff_tuple, base, vl); +vint16m1x2_t test_vlseg2e16_v_i16m1x2_tu(vint16m1x2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_tu(vd, rs1, vl); } -vint16m2x2_t test_vlseg2e16_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_tu(maskedoff_tuple, base, vl); +vint16m2x2_t test_vlseg2e16_v_i16m2x2_tu(vint16m2x2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_tu(vd, rs1, vl); } -vint16m4x2_t test_vlseg2e16_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_tu(maskedoff_tuple, base, vl); +vint16m4x2_t test_vlseg2e16_v_i16m4x2_tu(vint16m4x2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_tu(vd, rs1, vl); } -vuint16mf4x2_t test_vlseg2e16_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_tu(maskedoff_tuple, base, vl); +vuint16mf4x2_t test_vlseg2e16_v_u16mf4x2_tu(vuint16mf4x2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_tu(vd, rs1, vl); } -vuint16mf2x2_t test_vlseg2e16_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_tu(maskedoff_tuple, base, vl); +vuint16mf2x2_t test_vlseg2e16_v_u16mf2x2_tu(vuint16mf2x2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_tu(vd, rs1, vl); } -vuint16m1x2_t test_vlseg2e16_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_tu(maskedoff_tuple, base, vl); +vuint16m1x2_t test_vlseg2e16_v_u16m1x2_tu(vuint16m1x2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_tu(vd, rs1, vl); } -vuint16m2x2_t test_vlseg2e16_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_tu(maskedoff_tuple, base, vl); +vuint16m2x2_t test_vlseg2e16_v_u16m2x2_tu(vuint16m2x2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_tu(vd, rs1, vl); } -vuint16m4x2_t test_vlseg2e16_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_tu(maskedoff_tuple, base, vl); +vuint16m4x2_t test_vlseg2e16_v_u16m4x2_tu(vuint16m4x2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_tu(vd, rs1, vl); } -vfloat16mf4x2_t test_vlseg2e16_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_tum(mask, maskedoff_tuple, base, vl); +vfloat16mf4x2_t test_vlseg2e16_v_f16mf4x2_tum(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_tum(vm, vd, rs1, vl); } -vfloat16mf2x2_t test_vlseg2e16_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_tum(mask, maskedoff_tuple, base, vl); +vfloat16mf2x2_t test_vlseg2e16_v_f16mf2x2_tum(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_tum(vm, vd, rs1, vl); } -vfloat16m1x2_t test_vlseg2e16_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_tum(mask, maskedoff_tuple, base, vl); +vfloat16m1x2_t test_vlseg2e16_v_f16m1x2_tum(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_tum(vm, vd, rs1, vl); } -vfloat16m2x2_t test_vlseg2e16_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_tum(mask, maskedoff_tuple, base, vl); +vfloat16m2x2_t test_vlseg2e16_v_f16m2x2_tum(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_tum(vm, vd, rs1, vl); } -vfloat16m4x2_t test_vlseg2e16_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_tum(mask, maskedoff_tuple, base, vl); +vfloat16m4x2_t test_vlseg2e16_v_f16m4x2_tum(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_tum(vm, vd, rs1, vl); } -vint16mf4x2_t test_vlseg2e16_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_tum(mask, maskedoff_tuple, base, vl); +vint16mf4x2_t test_vlseg2e16_v_i16mf4x2_tum(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_tum(vm, vd, rs1, vl); } -vint16mf2x2_t test_vlseg2e16_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_tum(mask, maskedoff_tuple, base, vl); +vint16mf2x2_t test_vlseg2e16_v_i16mf2x2_tum(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_tum(vm, vd, rs1, vl); } -vint16m1x2_t test_vlseg2e16_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_tum(mask, maskedoff_tuple, base, vl); +vint16m1x2_t test_vlseg2e16_v_i16m1x2_tum(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_tum(vm, vd, rs1, vl); } -vint16m2x2_t test_vlseg2e16_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_tum(mask, maskedoff_tuple, base, vl); +vint16m2x2_t test_vlseg2e16_v_i16m2x2_tum(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_tum(vm, vd, rs1, vl); } -vint16m4x2_t test_vlseg2e16_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_tum(mask, maskedoff_tuple, base, vl); +vint16m4x2_t test_vlseg2e16_v_i16m4x2_tum(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_tum(vm, vd, rs1, vl); } -vuint16mf4x2_t test_vlseg2e16_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_tum(mask, maskedoff_tuple, base, vl); +vuint16mf4x2_t test_vlseg2e16_v_u16mf4x2_tum(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_tum(vm, vd, rs1, vl); } -vuint16mf2x2_t test_vlseg2e16_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_tum(mask, maskedoff_tuple, base, vl); +vuint16mf2x2_t test_vlseg2e16_v_u16mf2x2_tum(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_tum(vm, vd, rs1, vl); } -vuint16m1x2_t test_vlseg2e16_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_tum(mask, maskedoff_tuple, base, vl); +vuint16m1x2_t test_vlseg2e16_v_u16m1x2_tum(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_tum(vm, vd, rs1, vl); } -vuint16m2x2_t test_vlseg2e16_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_tum(mask, maskedoff_tuple, base, vl); +vuint16m2x2_t test_vlseg2e16_v_u16m2x2_tum(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_tum(vm, vd, rs1, vl); } -vuint16m4x2_t test_vlseg2e16_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_tum(mask, maskedoff_tuple, base, vl); +vuint16m4x2_t test_vlseg2e16_v_u16m4x2_tum(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_tum(vm, vd, rs1, vl); } -vfloat16mf4x2_t test_vlseg2e16_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_tumu(mask, maskedoff_tuple, base, vl); +vfloat16mf4x2_t test_vlseg2e16_v_f16mf4x2_tumu(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_tumu(vm, vd, rs1, vl); } -vfloat16mf2x2_t test_vlseg2e16_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_tumu(mask, maskedoff_tuple, base, vl); +vfloat16mf2x2_t test_vlseg2e16_v_f16mf2x2_tumu(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_tumu(vm, vd, rs1, vl); } -vfloat16m1x2_t test_vlseg2e16_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_tumu(mask, maskedoff_tuple, base, vl); +vfloat16m1x2_t test_vlseg2e16_v_f16m1x2_tumu(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_tumu(vm, vd, rs1, vl); } -vfloat16m2x2_t test_vlseg2e16_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_tumu(mask, maskedoff_tuple, base, vl); +vfloat16m2x2_t test_vlseg2e16_v_f16m2x2_tumu(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_tumu(vm, vd, rs1, vl); } -vfloat16m4x2_t test_vlseg2e16_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_tumu(mask, maskedoff_tuple, base, vl); +vfloat16m4x2_t test_vlseg2e16_v_f16m4x2_tumu(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_tumu(vm, vd, rs1, vl); } -vint16mf4x2_t test_vlseg2e16_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_tumu(mask, maskedoff_tuple, base, vl); +vint16mf4x2_t test_vlseg2e16_v_i16mf4x2_tumu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_tumu(vm, vd, rs1, vl); } -vint16mf2x2_t test_vlseg2e16_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_tumu(mask, maskedoff_tuple, base, vl); +vint16mf2x2_t test_vlseg2e16_v_i16mf2x2_tumu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_tumu(vm, vd, rs1, vl); } -vint16m1x2_t test_vlseg2e16_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_tumu(mask, maskedoff_tuple, base, vl); +vint16m1x2_t test_vlseg2e16_v_i16m1x2_tumu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_tumu(vm, vd, rs1, vl); } -vint16m2x2_t test_vlseg2e16_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_tumu(mask, maskedoff_tuple, base, vl); +vint16m2x2_t test_vlseg2e16_v_i16m2x2_tumu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_tumu(vm, vd, rs1, vl); } -vint16m4x2_t test_vlseg2e16_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_tumu(mask, maskedoff_tuple, base, vl); +vint16m4x2_t test_vlseg2e16_v_i16m4x2_tumu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_tumu(vm, vd, rs1, vl); } -vuint16mf4x2_t test_vlseg2e16_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_tumu(mask, maskedoff_tuple, base, vl); +vuint16mf4x2_t test_vlseg2e16_v_u16mf4x2_tumu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_tumu(vm, vd, rs1, vl); } -vuint16mf2x2_t test_vlseg2e16_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_tumu(mask, maskedoff_tuple, base, vl); +vuint16mf2x2_t test_vlseg2e16_v_u16mf2x2_tumu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_tumu(vm, vd, rs1, vl); } -vuint16m1x2_t test_vlseg2e16_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_tumu(mask, maskedoff_tuple, base, vl); +vuint16m1x2_t test_vlseg2e16_v_u16m1x2_tumu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_tumu(vm, vd, rs1, vl); } -vuint16m2x2_t test_vlseg2e16_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_tumu(mask, maskedoff_tuple, base, vl); +vuint16m2x2_t test_vlseg2e16_v_u16m2x2_tumu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_tumu(vm, vd, rs1, vl); } -vuint16m4x2_t test_vlseg2e16_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_tumu(mask, maskedoff_tuple, base, vl); +vuint16m4x2_t test_vlseg2e16_v_u16m4x2_tumu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_tumu(vm, vd, rs1, vl); } -vfloat16mf4x2_t test_vlseg2e16_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_mu(mask, maskedoff_tuple, base, vl); +vfloat16mf4x2_t test_vlseg2e16_v_f16mf4x2_mu(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_mu(vm, vd, rs1, vl); } -vfloat16mf2x2_t test_vlseg2e16_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_mu(mask, maskedoff_tuple, base, vl); +vfloat16mf2x2_t test_vlseg2e16_v_f16mf2x2_mu(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_mu(vm, vd, rs1, vl); } -vfloat16m1x2_t test_vlseg2e16_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_mu(mask, maskedoff_tuple, base, vl); +vfloat16m1x2_t test_vlseg2e16_v_f16m1x2_mu(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_mu(vm, vd, rs1, vl); } -vfloat16m2x2_t test_vlseg2e16_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_mu(mask, maskedoff_tuple, base, vl); +vfloat16m2x2_t test_vlseg2e16_v_f16m2x2_mu(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_mu(vm, vd, rs1, vl); } -vfloat16m4x2_t test_vlseg2e16_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg2e16_mu(mask, maskedoff_tuple, base, vl); +vfloat16m4x2_t test_vlseg2e16_v_f16m4x2_mu(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_mu(vm, vd, rs1, vl); } -vint16mf4x2_t test_vlseg2e16_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_mu(mask, maskedoff_tuple, base, vl); +vint16mf4x2_t test_vlseg2e16_v_i16mf4x2_mu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_mu(vm, vd, rs1, vl); } -vint16mf2x2_t test_vlseg2e16_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_mu(mask, maskedoff_tuple, base, vl); +vint16mf2x2_t test_vlseg2e16_v_i16mf2x2_mu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_mu(vm, vd, rs1, vl); } -vint16m1x2_t test_vlseg2e16_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_mu(mask, maskedoff_tuple, base, vl); +vint16m1x2_t test_vlseg2e16_v_i16m1x2_mu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_mu(vm, vd, rs1, vl); } -vint16m2x2_t test_vlseg2e16_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_mu(mask, maskedoff_tuple, base, vl); +vint16m2x2_t test_vlseg2e16_v_i16m2x2_mu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_mu(vm, vd, rs1, vl); } -vint16m4x2_t test_vlseg2e16_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg2e16_mu(mask, maskedoff_tuple, base, vl); +vint16m4x2_t test_vlseg2e16_v_i16m4x2_mu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_mu(vm, vd, rs1, vl); } -vuint16mf4x2_t test_vlseg2e16_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_mu(mask, maskedoff_tuple, base, vl); +vuint16mf4x2_t test_vlseg2e16_v_u16mf4x2_mu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_mu(vm, vd, rs1, vl); } -vuint16mf2x2_t test_vlseg2e16_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_mu(mask, maskedoff_tuple, base, vl); +vuint16mf2x2_t test_vlseg2e16_v_u16mf2x2_mu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_mu(vm, vd, rs1, vl); } -vuint16m1x2_t test_vlseg2e16_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_mu(mask, maskedoff_tuple, base, vl); +vuint16m1x2_t test_vlseg2e16_v_u16m1x2_mu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_mu(vm, vd, rs1, vl); } -vuint16m2x2_t test_vlseg2e16_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_mu(mask, maskedoff_tuple, base, vl); +vuint16m2x2_t test_vlseg2e16_v_u16m2x2_mu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_mu(vm, vd, rs1, vl); } -vuint16m4x2_t test_vlseg2e16_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg2e16_mu(mask, maskedoff_tuple, base, vl); +vuint16m4x2_t test_vlseg2e16_v_u16m4x2_mu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg2e16_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg2e16\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg2e16ff.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg2e16ff.c index 97f50ef26..42f7f01e5 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg2e16ff.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg2e16ff.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_tu(vfloat16mf4x2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tu(vd, rs1, new_vl, vl); } -vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_tu(vfloat16mf2x2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tu(vd, rs1, new_vl, vl); } -vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_tu(vfloat16m1x2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tu(vd, rs1, new_vl, vl); } -vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_tu(vfloat16m2x2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tu(vd, rs1, new_vl, vl); } -vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_tu(vfloat16m4x2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tu(vd, rs1, new_vl, vl); } -vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_tu(vint16mf4x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tu(vd, rs1, new_vl, vl); } -vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_tu(vint16mf2x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tu(vd, rs1, new_vl, vl); } -vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_tu(vint16m1x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tu(vd, rs1, new_vl, vl); } -vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_tu(vint16m2x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tu(vd, rs1, new_vl, vl); } -vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_tu(vint16m4x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tu(vd, rs1, new_vl, vl); } -vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_tu(vuint16mf4x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tu(vd, rs1, new_vl, vl); } -vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_tu(vuint16mf2x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tu(vd, rs1, new_vl, vl); } -vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_tu(vuint16m1x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tu(vd, rs1, new_vl, vl); } -vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_tu(vuint16m2x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tu(vd, rs1, new_vl, vl); } -vuint16m4x2_t test_vlseg2e16ff_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint16m4x2_t test_vlseg2e16ff_v_u16m4x2_tu(vuint16m4x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tu(vd, rs1, new_vl, vl); } -vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_tum(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_tum(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_tum(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_tum(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_tum(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tum(vm, vd, rs1, new_vl, vl); } -vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_tum(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tum(vm, vd, rs1, new_vl, vl); } -vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_tum(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tum(vm, vd, rs1, new_vl, vl); } -vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_tum(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tum(vm, vd, rs1, new_vl, vl); } -vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_tum(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tum(vm, vd, rs1, new_vl, vl); } -vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_tum(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_tum(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_tum(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tum(vm, vd, rs1, new_vl, vl); } -vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_tum(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tum(vm, vd, rs1, new_vl, vl); } -vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_tum(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tum(vm, vd, rs1, new_vl, vl); } -vuint16m4x2_t test_vlseg2e16ff_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m4x2_t test_vlseg2e16ff_v_u16m4x2_tum(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_tumu(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_tumu(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_tumu(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_tumu(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_tumu(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_tumu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_tumu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_tumu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_tumu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_tumu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_tumu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_tumu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_tumu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_tumu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint16m4x2_t test_vlseg2e16ff_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m4x2_t test_vlseg2e16ff_v_u16m4x2_tumu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_mu(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_mu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_mu(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_mu(vm, vd, rs1, new_vl, vl); } -vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_mu(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_mu(vm, vd, rs1, new_vl, vl); } -vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_mu(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_mu(vm, vd, rs1, new_vl, vl); } -vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_mu(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_mu(vm, vd, rs1, new_vl, vl); } -vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_mu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_mu(vm, vd, rs1, new_vl, vl); } -vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_mu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_mu(vm, vd, rs1, new_vl, vl); } -vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_mu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_mu(vm, vd, rs1, new_vl, vl); } -vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_mu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_mu(vm, vd, rs1, new_vl, vl); } -vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_mu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_mu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_mu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_mu(vm, vd, rs1, new_vl, vl); } -vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_mu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_mu(vm, vd, rs1, new_vl, vl); } -vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_mu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_mu(vm, vd, rs1, new_vl, vl); } -vuint16m4x2_t test_vlseg2e16ff_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m4x2_t test_vlseg2e16ff_v_u16m4x2_mu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg2e16ff\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg2e32.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg2e32.c index dea59f25d..2f002819b 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg2e32.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg2e32.c @@ -6,196 +6,196 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x2_t test_vlseg2e32_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg2e32_tu(maskedoff_tuple, base, vl); +vfloat32mf2x2_t test_vlseg2e32_v_f32mf2x2_tu(vfloat32mf2x2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_tu(vd, rs1, vl); } -vfloat32m1x2_t test_vlseg2e32_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg2e32_tu(maskedoff_tuple, base, vl); +vfloat32m1x2_t test_vlseg2e32_v_f32m1x2_tu(vfloat32m1x2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_tu(vd, rs1, vl); } -vfloat32m2x2_t test_vlseg2e32_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg2e32_tu(maskedoff_tuple, base, vl); +vfloat32m2x2_t test_vlseg2e32_v_f32m2x2_tu(vfloat32m2x2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_tu(vd, rs1, vl); } -vfloat32m4x2_t test_vlseg2e32_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg2e32_tu(maskedoff_tuple, base, vl); +vfloat32m4x2_t test_vlseg2e32_v_f32m4x2_tu(vfloat32m4x2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_tu(vd, rs1, vl); } -vint32mf2x2_t test_vlseg2e32_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg2e32_tu(maskedoff_tuple, base, vl); +vint32mf2x2_t test_vlseg2e32_v_i32mf2x2_tu(vint32mf2x2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_tu(vd, rs1, vl); } -vint32m1x2_t test_vlseg2e32_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg2e32_tu(maskedoff_tuple, base, vl); +vint32m1x2_t test_vlseg2e32_v_i32m1x2_tu(vint32m1x2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_tu(vd, rs1, vl); } -vint32m2x2_t test_vlseg2e32_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg2e32_tu(maskedoff_tuple, base, vl); +vint32m2x2_t test_vlseg2e32_v_i32m2x2_tu(vint32m2x2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_tu(vd, rs1, vl); } -vint32m4x2_t test_vlseg2e32_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg2e32_tu(maskedoff_tuple, base, vl); +vint32m4x2_t test_vlseg2e32_v_i32m4x2_tu(vint32m4x2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_tu(vd, rs1, vl); } -vuint32mf2x2_t test_vlseg2e32_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg2e32_tu(maskedoff_tuple, base, vl); +vuint32mf2x2_t test_vlseg2e32_v_u32mf2x2_tu(vuint32mf2x2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_tu(vd, rs1, vl); } -vuint32m1x2_t test_vlseg2e32_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg2e32_tu(maskedoff_tuple, base, vl); +vuint32m1x2_t test_vlseg2e32_v_u32m1x2_tu(vuint32m1x2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_tu(vd, rs1, vl); } -vuint32m2x2_t test_vlseg2e32_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg2e32_tu(maskedoff_tuple, base, vl); +vuint32m2x2_t test_vlseg2e32_v_u32m2x2_tu(vuint32m2x2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_tu(vd, rs1, vl); } -vuint32m4x2_t test_vlseg2e32_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg2e32_tu(maskedoff_tuple, base, vl); +vuint32m4x2_t test_vlseg2e32_v_u32m4x2_tu(vuint32m4x2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_tu(vd, rs1, vl); } -vfloat32mf2x2_t test_vlseg2e32_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg2e32_tum(mask, maskedoff_tuple, base, vl); +vfloat32mf2x2_t test_vlseg2e32_v_f32mf2x2_tum(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_tum(vm, vd, rs1, vl); } -vfloat32m1x2_t test_vlseg2e32_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg2e32_tum(mask, maskedoff_tuple, base, vl); +vfloat32m1x2_t test_vlseg2e32_v_f32m1x2_tum(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_tum(vm, vd, rs1, vl); } -vfloat32m2x2_t test_vlseg2e32_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg2e32_tum(mask, maskedoff_tuple, base, vl); +vfloat32m2x2_t test_vlseg2e32_v_f32m2x2_tum(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_tum(vm, vd, rs1, vl); } -vfloat32m4x2_t test_vlseg2e32_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg2e32_tum(mask, maskedoff_tuple, base, vl); +vfloat32m4x2_t test_vlseg2e32_v_f32m4x2_tum(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_tum(vm, vd, rs1, vl); } -vint32mf2x2_t test_vlseg2e32_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg2e32_tum(mask, maskedoff_tuple, base, vl); +vint32mf2x2_t test_vlseg2e32_v_i32mf2x2_tum(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_tum(vm, vd, rs1, vl); } -vint32m1x2_t test_vlseg2e32_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg2e32_tum(mask, maskedoff_tuple, base, vl); +vint32m1x2_t test_vlseg2e32_v_i32m1x2_tum(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_tum(vm, vd, rs1, vl); } -vint32m2x2_t test_vlseg2e32_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg2e32_tum(mask, maskedoff_tuple, base, vl); +vint32m2x2_t test_vlseg2e32_v_i32m2x2_tum(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_tum(vm, vd, rs1, vl); } -vint32m4x2_t test_vlseg2e32_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg2e32_tum(mask, maskedoff_tuple, base, vl); +vint32m4x2_t test_vlseg2e32_v_i32m4x2_tum(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_tum(vm, vd, rs1, vl); } -vuint32mf2x2_t test_vlseg2e32_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg2e32_tum(mask, maskedoff_tuple, base, vl); +vuint32mf2x2_t test_vlseg2e32_v_u32mf2x2_tum(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_tum(vm, vd, rs1, vl); } -vuint32m1x2_t test_vlseg2e32_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg2e32_tum(mask, maskedoff_tuple, base, vl); +vuint32m1x2_t test_vlseg2e32_v_u32m1x2_tum(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_tum(vm, vd, rs1, vl); } -vuint32m2x2_t test_vlseg2e32_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg2e32_tum(mask, maskedoff_tuple, base, vl); +vuint32m2x2_t test_vlseg2e32_v_u32m2x2_tum(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_tum(vm, vd, rs1, vl); } -vuint32m4x2_t test_vlseg2e32_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg2e32_tum(mask, maskedoff_tuple, base, vl); +vuint32m4x2_t test_vlseg2e32_v_u32m4x2_tum(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_tum(vm, vd, rs1, vl); } -vfloat32mf2x2_t test_vlseg2e32_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg2e32_tumu(mask, maskedoff_tuple, base, vl); +vfloat32mf2x2_t test_vlseg2e32_v_f32mf2x2_tumu(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_tumu(vm, vd, rs1, vl); } -vfloat32m1x2_t test_vlseg2e32_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg2e32_tumu(mask, maskedoff_tuple, base, vl); +vfloat32m1x2_t test_vlseg2e32_v_f32m1x2_tumu(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_tumu(vm, vd, rs1, vl); } -vfloat32m2x2_t test_vlseg2e32_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg2e32_tumu(mask, maskedoff_tuple, base, vl); +vfloat32m2x2_t test_vlseg2e32_v_f32m2x2_tumu(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_tumu(vm, vd, rs1, vl); } -vfloat32m4x2_t test_vlseg2e32_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg2e32_tumu(mask, maskedoff_tuple, base, vl); +vfloat32m4x2_t test_vlseg2e32_v_f32m4x2_tumu(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_tumu(vm, vd, rs1, vl); } -vint32mf2x2_t test_vlseg2e32_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg2e32_tumu(mask, maskedoff_tuple, base, vl); +vint32mf2x2_t test_vlseg2e32_v_i32mf2x2_tumu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_tumu(vm, vd, rs1, vl); } -vint32m1x2_t test_vlseg2e32_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg2e32_tumu(mask, maskedoff_tuple, base, vl); +vint32m1x2_t test_vlseg2e32_v_i32m1x2_tumu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_tumu(vm, vd, rs1, vl); } -vint32m2x2_t test_vlseg2e32_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg2e32_tumu(mask, maskedoff_tuple, base, vl); +vint32m2x2_t test_vlseg2e32_v_i32m2x2_tumu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_tumu(vm, vd, rs1, vl); } -vint32m4x2_t test_vlseg2e32_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg2e32_tumu(mask, maskedoff_tuple, base, vl); +vint32m4x2_t test_vlseg2e32_v_i32m4x2_tumu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_tumu(vm, vd, rs1, vl); } -vuint32mf2x2_t test_vlseg2e32_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg2e32_tumu(mask, maskedoff_tuple, base, vl); +vuint32mf2x2_t test_vlseg2e32_v_u32mf2x2_tumu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_tumu(vm, vd, rs1, vl); } -vuint32m1x2_t test_vlseg2e32_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg2e32_tumu(mask, maskedoff_tuple, base, vl); +vuint32m1x2_t test_vlseg2e32_v_u32m1x2_tumu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_tumu(vm, vd, rs1, vl); } -vuint32m2x2_t test_vlseg2e32_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg2e32_tumu(mask, maskedoff_tuple, base, vl); +vuint32m2x2_t test_vlseg2e32_v_u32m2x2_tumu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_tumu(vm, vd, rs1, vl); } -vuint32m4x2_t test_vlseg2e32_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg2e32_tumu(mask, maskedoff_tuple, base, vl); +vuint32m4x2_t test_vlseg2e32_v_u32m4x2_tumu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_tumu(vm, vd, rs1, vl); } -vfloat32mf2x2_t test_vlseg2e32_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg2e32_mu(mask, maskedoff_tuple, base, vl); +vfloat32mf2x2_t test_vlseg2e32_v_f32mf2x2_mu(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_mu(vm, vd, rs1, vl); } -vfloat32m1x2_t test_vlseg2e32_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg2e32_mu(mask, maskedoff_tuple, base, vl); +vfloat32m1x2_t test_vlseg2e32_v_f32m1x2_mu(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_mu(vm, vd, rs1, vl); } -vfloat32m2x2_t test_vlseg2e32_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg2e32_mu(mask, maskedoff_tuple, base, vl); +vfloat32m2x2_t test_vlseg2e32_v_f32m2x2_mu(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_mu(vm, vd, rs1, vl); } -vfloat32m4x2_t test_vlseg2e32_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg2e32_mu(mask, maskedoff_tuple, base, vl); +vfloat32m4x2_t test_vlseg2e32_v_f32m4x2_mu(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_mu(vm, vd, rs1, vl); } -vint32mf2x2_t test_vlseg2e32_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg2e32_mu(mask, maskedoff_tuple, base, vl); +vint32mf2x2_t test_vlseg2e32_v_i32mf2x2_mu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_mu(vm, vd, rs1, vl); } -vint32m1x2_t test_vlseg2e32_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg2e32_mu(mask, maskedoff_tuple, base, vl); +vint32m1x2_t test_vlseg2e32_v_i32m1x2_mu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_mu(vm, vd, rs1, vl); } -vint32m2x2_t test_vlseg2e32_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg2e32_mu(mask, maskedoff_tuple, base, vl); +vint32m2x2_t test_vlseg2e32_v_i32m2x2_mu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_mu(vm, vd, rs1, vl); } -vint32m4x2_t test_vlseg2e32_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg2e32_mu(mask, maskedoff_tuple, base, vl); +vint32m4x2_t test_vlseg2e32_v_i32m4x2_mu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_mu(vm, vd, rs1, vl); } -vuint32mf2x2_t test_vlseg2e32_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg2e32_mu(mask, maskedoff_tuple, base, vl); +vuint32mf2x2_t test_vlseg2e32_v_u32mf2x2_mu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_mu(vm, vd, rs1, vl); } -vuint32m1x2_t test_vlseg2e32_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg2e32_mu(mask, maskedoff_tuple, base, vl); +vuint32m1x2_t test_vlseg2e32_v_u32m1x2_mu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_mu(vm, vd, rs1, vl); } -vuint32m2x2_t test_vlseg2e32_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg2e32_mu(mask, maskedoff_tuple, base, vl); +vuint32m2x2_t test_vlseg2e32_v_u32m2x2_mu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_mu(vm, vd, rs1, vl); } -vuint32m4x2_t test_vlseg2e32_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg2e32_mu(mask, maskedoff_tuple, base, vl); +vuint32m4x2_t test_vlseg2e32_v_u32m4x2_mu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg2e32_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg2e32\.[ivxfswum.]+\s+} 48 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg2e32ff.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg2e32ff.c index 7497243e9..5d9dff96d 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg2e32ff.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg2e32ff.c @@ -6,196 +6,196 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_tu(vfloat32mf2x2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_tu(vd, rs1, new_vl, vl); } -vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_tu(vfloat32m1x2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_tu(vd, rs1, new_vl, vl); } -vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_tu(vfloat32m2x2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_tu(vd, rs1, new_vl, vl); } -vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_tu(vfloat32m4x2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_tu(vd, rs1, new_vl, vl); } -vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_tu(vint32mf2x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_tu(vd, rs1, new_vl, vl); } -vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_tu(vint32m1x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_tu(vd, rs1, new_vl, vl); } -vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_tu(vint32m2x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_tu(vd, rs1, new_vl, vl); } -vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_tu(vint32m4x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_tu(vd, rs1, new_vl, vl); } -vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_tu(vuint32mf2x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_tu(vd, rs1, new_vl, vl); } -vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_tu(vuint32m1x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_tu(vd, rs1, new_vl, vl); } -vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_tu(vuint32m2x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_tu(vd, rs1, new_vl, vl); } -vuint32m4x2_t test_vlseg2e32ff_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint32m4x2_t test_vlseg2e32ff_v_u32m4x2_tu(vuint32m4x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_tu(vd, rs1, new_vl, vl); } -vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_tum(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_tum(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_tum(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_tum(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_tum(vm, vd, rs1, new_vl, vl); } -vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_tum(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_tum(vm, vd, rs1, new_vl, vl); } -vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_tum(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_tum(vm, vd, rs1, new_vl, vl); } -vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_tum(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_tum(vm, vd, rs1, new_vl, vl); } -vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_tum(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_tum(vm, vd, rs1, new_vl, vl); } -vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_tum(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_tum(vm, vd, rs1, new_vl, vl); } -vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_tum(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_tum(vm, vd, rs1, new_vl, vl); } -vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_tum(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_tum(vm, vd, rs1, new_vl, vl); } -vuint32m4x2_t test_vlseg2e32ff_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m4x2_t test_vlseg2e32ff_v_u32m4x2_tum(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_tumu(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_tumu(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_tumu(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_tumu(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_tumu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_tumu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_tumu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_tumu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_tumu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_tumu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_tumu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint32m4x2_t test_vlseg2e32ff_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m4x2_t test_vlseg2e32ff_v_u32m4x2_tumu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_mu(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_mu(vm, vd, rs1, new_vl, vl); } -vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_mu(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_mu(vm, vd, rs1, new_vl, vl); } -vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_mu(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_mu(vm, vd, rs1, new_vl, vl); } -vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_mu(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_mu(vm, vd, rs1, new_vl, vl); } -vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_mu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_mu(vm, vd, rs1, new_vl, vl); } -vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_mu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_mu(vm, vd, rs1, new_vl, vl); } -vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_mu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_mu(vm, vd, rs1, new_vl, vl); } -vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_mu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_mu(vm, vd, rs1, new_vl, vl); } -vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_mu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_mu(vm, vd, rs1, new_vl, vl); } -vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_mu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_mu(vm, vd, rs1, new_vl, vl); } -vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_mu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_mu(vm, vd, rs1, new_vl, vl); } -vuint32m4x2_t test_vlseg2e32ff_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m4x2_t test_vlseg2e32ff_v_u32m4x2_mu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e32ff_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg2e32ff\.[ivxfswum.]+\s+} 48 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg2e64.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg2e64.c index 181889aaa..c96ff2f5a 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg2e64.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg2e64.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x2_t test_vlseg2e64_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg2e64_tu(maskedoff_tuple, base, vl); +vfloat64m1x2_t test_vlseg2e64_v_f64m1x2_tu(vfloat64m1x2_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_tu(vd, rs1, vl); } -vfloat64m2x2_t test_vlseg2e64_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg2e64_tu(maskedoff_tuple, base, vl); +vfloat64m2x2_t test_vlseg2e64_v_f64m2x2_tu(vfloat64m2x2_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_tu(vd, rs1, vl); } -vfloat64m4x2_t test_vlseg2e64_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg2e64_tu(maskedoff_tuple, base, vl); +vfloat64m4x2_t test_vlseg2e64_v_f64m4x2_tu(vfloat64m4x2_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_tu(vd, rs1, vl); } -vint64m1x2_t test_vlseg2e64_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg2e64_tu(maskedoff_tuple, base, vl); +vint64m1x2_t test_vlseg2e64_v_i64m1x2_tu(vint64m1x2_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_tu(vd, rs1, vl); } -vint64m2x2_t test_vlseg2e64_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg2e64_tu(maskedoff_tuple, base, vl); +vint64m2x2_t test_vlseg2e64_v_i64m2x2_tu(vint64m2x2_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_tu(vd, rs1, vl); } -vint64m4x2_t test_vlseg2e64_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg2e64_tu(maskedoff_tuple, base, vl); +vint64m4x2_t test_vlseg2e64_v_i64m4x2_tu(vint64m4x2_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_tu(vd, rs1, vl); } -vuint64m1x2_t test_vlseg2e64_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg2e64_tu(maskedoff_tuple, base, vl); +vuint64m1x2_t test_vlseg2e64_v_u64m1x2_tu(vuint64m1x2_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_tu(vd, rs1, vl); } -vuint64m2x2_t test_vlseg2e64_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg2e64_tu(maskedoff_tuple, base, vl); +vuint64m2x2_t test_vlseg2e64_v_u64m2x2_tu(vuint64m2x2_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_tu(vd, rs1, vl); } -vuint64m4x2_t test_vlseg2e64_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg2e64_tu(maskedoff_tuple, base, vl); +vuint64m4x2_t test_vlseg2e64_v_u64m4x2_tu(vuint64m4x2_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_tu(vd, rs1, vl); } -vfloat64m1x2_t test_vlseg2e64_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg2e64_tum(mask, maskedoff_tuple, base, vl); +vfloat64m1x2_t test_vlseg2e64_v_f64m1x2_tum(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_tum(vm, vd, rs1, vl); } -vfloat64m2x2_t test_vlseg2e64_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg2e64_tum(mask, maskedoff_tuple, base, vl); +vfloat64m2x2_t test_vlseg2e64_v_f64m2x2_tum(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_tum(vm, vd, rs1, vl); } -vfloat64m4x2_t test_vlseg2e64_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg2e64_tum(mask, maskedoff_tuple, base, vl); +vfloat64m4x2_t test_vlseg2e64_v_f64m4x2_tum(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_tum(vm, vd, rs1, vl); } -vint64m1x2_t test_vlseg2e64_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg2e64_tum(mask, maskedoff_tuple, base, vl); +vint64m1x2_t test_vlseg2e64_v_i64m1x2_tum(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_tum(vm, vd, rs1, vl); } -vint64m2x2_t test_vlseg2e64_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg2e64_tum(mask, maskedoff_tuple, base, vl); +vint64m2x2_t test_vlseg2e64_v_i64m2x2_tum(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_tum(vm, vd, rs1, vl); } -vint64m4x2_t test_vlseg2e64_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg2e64_tum(mask, maskedoff_tuple, base, vl); +vint64m4x2_t test_vlseg2e64_v_i64m4x2_tum(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_tum(vm, vd, rs1, vl); } -vuint64m1x2_t test_vlseg2e64_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg2e64_tum(mask, maskedoff_tuple, base, vl); +vuint64m1x2_t test_vlseg2e64_v_u64m1x2_tum(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_tum(vm, vd, rs1, vl); } -vuint64m2x2_t test_vlseg2e64_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg2e64_tum(mask, maskedoff_tuple, base, vl); +vuint64m2x2_t test_vlseg2e64_v_u64m2x2_tum(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_tum(vm, vd, rs1, vl); } -vuint64m4x2_t test_vlseg2e64_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg2e64_tum(mask, maskedoff_tuple, base, vl); +vuint64m4x2_t test_vlseg2e64_v_u64m4x2_tum(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_tum(vm, vd, rs1, vl); } -vfloat64m1x2_t test_vlseg2e64_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg2e64_tumu(mask, maskedoff_tuple, base, vl); +vfloat64m1x2_t test_vlseg2e64_v_f64m1x2_tumu(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_tumu(vm, vd, rs1, vl); } -vfloat64m2x2_t test_vlseg2e64_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg2e64_tumu(mask, maskedoff_tuple, base, vl); +vfloat64m2x2_t test_vlseg2e64_v_f64m2x2_tumu(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_tumu(vm, vd, rs1, vl); } -vfloat64m4x2_t test_vlseg2e64_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg2e64_tumu(mask, maskedoff_tuple, base, vl); +vfloat64m4x2_t test_vlseg2e64_v_f64m4x2_tumu(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_tumu(vm, vd, rs1, vl); } -vint64m1x2_t test_vlseg2e64_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg2e64_tumu(mask, maskedoff_tuple, base, vl); +vint64m1x2_t test_vlseg2e64_v_i64m1x2_tumu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_tumu(vm, vd, rs1, vl); } -vint64m2x2_t test_vlseg2e64_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg2e64_tumu(mask, maskedoff_tuple, base, vl); +vint64m2x2_t test_vlseg2e64_v_i64m2x2_tumu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_tumu(vm, vd, rs1, vl); } -vint64m4x2_t test_vlseg2e64_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg2e64_tumu(mask, maskedoff_tuple, base, vl); +vint64m4x2_t test_vlseg2e64_v_i64m4x2_tumu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_tumu(vm, vd, rs1, vl); } -vuint64m1x2_t test_vlseg2e64_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg2e64_tumu(mask, maskedoff_tuple, base, vl); +vuint64m1x2_t test_vlseg2e64_v_u64m1x2_tumu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_tumu(vm, vd, rs1, vl); } -vuint64m2x2_t test_vlseg2e64_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg2e64_tumu(mask, maskedoff_tuple, base, vl); +vuint64m2x2_t test_vlseg2e64_v_u64m2x2_tumu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_tumu(vm, vd, rs1, vl); } -vuint64m4x2_t test_vlseg2e64_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg2e64_tumu(mask, maskedoff_tuple, base, vl); +vuint64m4x2_t test_vlseg2e64_v_u64m4x2_tumu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_tumu(vm, vd, rs1, vl); } -vfloat64m1x2_t test_vlseg2e64_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg2e64_mu(mask, maskedoff_tuple, base, vl); +vfloat64m1x2_t test_vlseg2e64_v_f64m1x2_mu(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_mu(vm, vd, rs1, vl); } -vfloat64m2x2_t test_vlseg2e64_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg2e64_mu(mask, maskedoff_tuple, base, vl); +vfloat64m2x2_t test_vlseg2e64_v_f64m2x2_mu(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_mu(vm, vd, rs1, vl); } -vfloat64m4x2_t test_vlseg2e64_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg2e64_mu(mask, maskedoff_tuple, base, vl); +vfloat64m4x2_t test_vlseg2e64_v_f64m4x2_mu(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_mu(vm, vd, rs1, vl); } -vint64m1x2_t test_vlseg2e64_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg2e64_mu(mask, maskedoff_tuple, base, vl); +vint64m1x2_t test_vlseg2e64_v_i64m1x2_mu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_mu(vm, vd, rs1, vl); } -vint64m2x2_t test_vlseg2e64_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg2e64_mu(mask, maskedoff_tuple, base, vl); +vint64m2x2_t test_vlseg2e64_v_i64m2x2_mu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_mu(vm, vd, rs1, vl); } -vint64m4x2_t test_vlseg2e64_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg2e64_mu(mask, maskedoff_tuple, base, vl); +vint64m4x2_t test_vlseg2e64_v_i64m4x2_mu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_mu(vm, vd, rs1, vl); } -vuint64m1x2_t test_vlseg2e64_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg2e64_mu(mask, maskedoff_tuple, base, vl); +vuint64m1x2_t test_vlseg2e64_v_u64m1x2_mu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_mu(vm, vd, rs1, vl); } -vuint64m2x2_t test_vlseg2e64_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg2e64_mu(mask, maskedoff_tuple, base, vl); +vuint64m2x2_t test_vlseg2e64_v_u64m2x2_mu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_mu(vm, vd, rs1, vl); } -vuint64m4x2_t test_vlseg2e64_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg2e64_mu(mask, maskedoff_tuple, base, vl); +vuint64m4x2_t test_vlseg2e64_v_u64m4x2_mu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg2e64_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg2e64\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg2e64ff.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg2e64ff.c index 2544ba30f..3b7229f07 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg2e64ff.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg2e64ff.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_tu(vfloat64m1x2_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_tu(vd, rs1, new_vl, vl); } -vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_tu(vfloat64m2x2_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_tu(vd, rs1, new_vl, vl); } -vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_tu(vfloat64m4x2_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_tu(vd, rs1, new_vl, vl); } -vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_tu(maskedoff_tuple, base, new_vl, vl); +vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_tu(vint64m1x2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_tu(vd, rs1, new_vl, vl); } -vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_tu(maskedoff_tuple, base, new_vl, vl); +vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_tu(vint64m2x2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_tu(vd, rs1, new_vl, vl); } -vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_tu(maskedoff_tuple, base, new_vl, vl); +vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_tu(vint64m4x2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_tu(vd, rs1, new_vl, vl); } -vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_tu(vuint64m1x2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_tu(vd, rs1, new_vl, vl); } -vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_tu(vuint64m2x2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_tu(vd, rs1, new_vl, vl); } -vuint64m4x2_t test_vlseg2e64ff_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint64m4x2_t test_vlseg2e64ff_v_u64m4x2_tu(vuint64m4x2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_tu(vd, rs1, new_vl, vl); } -vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_tum(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_tum(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_tum(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_tum(vm, vd, rs1, new_vl, vl); } -vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_tum(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_tum(vm, vd, rs1, new_vl, vl); } -vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_tum(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_tum(vm, vd, rs1, new_vl, vl); } -vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_tum(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_tum(vm, vd, rs1, new_vl, vl); } -vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_tum(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_tum(vm, vd, rs1, new_vl, vl); } -vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_tum(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_tum(vm, vd, rs1, new_vl, vl); } -vuint64m4x2_t test_vlseg2e64ff_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m4x2_t test_vlseg2e64ff_v_u64m4x2_tum(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_tumu(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_tumu(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_tumu(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_tumu(vm, vd, rs1, new_vl, vl); } -vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_tumu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_tumu(vm, vd, rs1, new_vl, vl); } -vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_tumu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_tumu(vm, vd, rs1, new_vl, vl); } -vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_tumu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_tumu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_tumu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint64m4x2_t test_vlseg2e64ff_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m4x2_t test_vlseg2e64ff_v_u64m4x2_tumu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_mu(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_mu(vm, vd, rs1, new_vl, vl); } -vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_mu(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_mu(vm, vd, rs1, new_vl, vl); } -vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_mu(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_mu(vm, vd, rs1, new_vl, vl); } -vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_mu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_mu(vm, vd, rs1, new_vl, vl); } -vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_mu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_mu(vm, vd, rs1, new_vl, vl); } -vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_mu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_mu(vm, vd, rs1, new_vl, vl); } -vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_mu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_mu(vm, vd, rs1, new_vl, vl); } -vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_mu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_mu(vm, vd, rs1, new_vl, vl); } -vuint64m4x2_t test_vlseg2e64ff_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e64ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m4x2_t test_vlseg2e64ff_v_u64m4x2_mu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e64ff_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg2e64ff\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg2e8.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg2e8.c index f0bedfc77..ea9d132ca 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg2e8.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg2e8.c @@ -6,196 +6,196 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x2_t test_vlseg2e8_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_tu(maskedoff_tuple, base, vl); +vint8mf8x2_t test_vlseg2e8_v_i8mf8x2_tu(vint8mf8x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_tu(vd, rs1, vl); } -vint8mf4x2_t test_vlseg2e8_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_tu(maskedoff_tuple, base, vl); +vint8mf4x2_t test_vlseg2e8_v_i8mf4x2_tu(vint8mf4x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_tu(vd, rs1, vl); } -vint8mf2x2_t test_vlseg2e8_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_tu(maskedoff_tuple, base, vl); +vint8mf2x2_t test_vlseg2e8_v_i8mf2x2_tu(vint8mf2x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_tu(vd, rs1, vl); } -vint8m1x2_t test_vlseg2e8_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_tu(maskedoff_tuple, base, vl); +vint8m1x2_t test_vlseg2e8_v_i8m1x2_tu(vint8m1x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_tu(vd, rs1, vl); } -vint8m2x2_t test_vlseg2e8_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_tu(maskedoff_tuple, base, vl); +vint8m2x2_t test_vlseg2e8_v_i8m2x2_tu(vint8m2x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_tu(vd, rs1, vl); } -vint8m4x2_t test_vlseg2e8_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_tu(maskedoff_tuple, base, vl); +vint8m4x2_t test_vlseg2e8_v_i8m4x2_tu(vint8m4x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_tu(vd, rs1, vl); } -vuint8mf8x2_t test_vlseg2e8_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_tu(maskedoff_tuple, base, vl); +vuint8mf8x2_t test_vlseg2e8_v_u8mf8x2_tu(vuint8mf8x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_tu(vd, rs1, vl); } -vuint8mf4x2_t test_vlseg2e8_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_tu(maskedoff_tuple, base, vl); +vuint8mf4x2_t test_vlseg2e8_v_u8mf4x2_tu(vuint8mf4x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_tu(vd, rs1, vl); } -vuint8mf2x2_t test_vlseg2e8_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_tu(maskedoff_tuple, base, vl); +vuint8mf2x2_t test_vlseg2e8_v_u8mf2x2_tu(vuint8mf2x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_tu(vd, rs1, vl); } -vuint8m1x2_t test_vlseg2e8_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_tu(maskedoff_tuple, base, vl); +vuint8m1x2_t test_vlseg2e8_v_u8m1x2_tu(vuint8m1x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_tu(vd, rs1, vl); } -vuint8m2x2_t test_vlseg2e8_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_tu(maskedoff_tuple, base, vl); +vuint8m2x2_t test_vlseg2e8_v_u8m2x2_tu(vuint8m2x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_tu(vd, rs1, vl); } -vuint8m4x2_t test_vlseg2e8_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_tu(maskedoff_tuple, base, vl); +vuint8m4x2_t test_vlseg2e8_v_u8m4x2_tu(vuint8m4x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_tu(vd, rs1, vl); } -vint8mf8x2_t test_vlseg2e8_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_tum(mask, maskedoff_tuple, base, vl); +vint8mf8x2_t test_vlseg2e8_v_i8mf8x2_tum(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_tum(vm, vd, rs1, vl); } -vint8mf4x2_t test_vlseg2e8_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_tum(mask, maskedoff_tuple, base, vl); +vint8mf4x2_t test_vlseg2e8_v_i8mf4x2_tum(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_tum(vm, vd, rs1, vl); } -vint8mf2x2_t test_vlseg2e8_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_tum(mask, maskedoff_tuple, base, vl); +vint8mf2x2_t test_vlseg2e8_v_i8mf2x2_tum(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_tum(vm, vd, rs1, vl); } -vint8m1x2_t test_vlseg2e8_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_tum(mask, maskedoff_tuple, base, vl); +vint8m1x2_t test_vlseg2e8_v_i8m1x2_tum(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_tum(vm, vd, rs1, vl); } -vint8m2x2_t test_vlseg2e8_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_tum(mask, maskedoff_tuple, base, vl); +vint8m2x2_t test_vlseg2e8_v_i8m2x2_tum(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_tum(vm, vd, rs1, vl); } -vint8m4x2_t test_vlseg2e8_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_tum(mask, maskedoff_tuple, base, vl); +vint8m4x2_t test_vlseg2e8_v_i8m4x2_tum(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_tum(vm, vd, rs1, vl); } -vuint8mf8x2_t test_vlseg2e8_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_tum(mask, maskedoff_tuple, base, vl); +vuint8mf8x2_t test_vlseg2e8_v_u8mf8x2_tum(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_tum(vm, vd, rs1, vl); } -vuint8mf4x2_t test_vlseg2e8_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_tum(mask, maskedoff_tuple, base, vl); +vuint8mf4x2_t test_vlseg2e8_v_u8mf4x2_tum(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_tum(vm, vd, rs1, vl); } -vuint8mf2x2_t test_vlseg2e8_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_tum(mask, maskedoff_tuple, base, vl); +vuint8mf2x2_t test_vlseg2e8_v_u8mf2x2_tum(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_tum(vm, vd, rs1, vl); } -vuint8m1x2_t test_vlseg2e8_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_tum(mask, maskedoff_tuple, base, vl); +vuint8m1x2_t test_vlseg2e8_v_u8m1x2_tum(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_tum(vm, vd, rs1, vl); } -vuint8m2x2_t test_vlseg2e8_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_tum(mask, maskedoff_tuple, base, vl); +vuint8m2x2_t test_vlseg2e8_v_u8m2x2_tum(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_tum(vm, vd, rs1, vl); } -vuint8m4x2_t test_vlseg2e8_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_tum(mask, maskedoff_tuple, base, vl); +vuint8m4x2_t test_vlseg2e8_v_u8m4x2_tum(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_tum(vm, vd, rs1, vl); } -vint8mf8x2_t test_vlseg2e8_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_tumu(mask, maskedoff_tuple, base, vl); +vint8mf8x2_t test_vlseg2e8_v_i8mf8x2_tumu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_tumu(vm, vd, rs1, vl); } -vint8mf4x2_t test_vlseg2e8_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_tumu(mask, maskedoff_tuple, base, vl); +vint8mf4x2_t test_vlseg2e8_v_i8mf4x2_tumu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_tumu(vm, vd, rs1, vl); } -vint8mf2x2_t test_vlseg2e8_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_tumu(mask, maskedoff_tuple, base, vl); +vint8mf2x2_t test_vlseg2e8_v_i8mf2x2_tumu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_tumu(vm, vd, rs1, vl); } -vint8m1x2_t test_vlseg2e8_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_tumu(mask, maskedoff_tuple, base, vl); +vint8m1x2_t test_vlseg2e8_v_i8m1x2_tumu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_tumu(vm, vd, rs1, vl); } -vint8m2x2_t test_vlseg2e8_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_tumu(mask, maskedoff_tuple, base, vl); +vint8m2x2_t test_vlseg2e8_v_i8m2x2_tumu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_tumu(vm, vd, rs1, vl); } -vint8m4x2_t test_vlseg2e8_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_tumu(mask, maskedoff_tuple, base, vl); +vint8m4x2_t test_vlseg2e8_v_i8m4x2_tumu(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_tumu(vm, vd, rs1, vl); } -vuint8mf8x2_t test_vlseg2e8_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_tumu(mask, maskedoff_tuple, base, vl); +vuint8mf8x2_t test_vlseg2e8_v_u8mf8x2_tumu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_tumu(vm, vd, rs1, vl); } -vuint8mf4x2_t test_vlseg2e8_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_tumu(mask, maskedoff_tuple, base, vl); +vuint8mf4x2_t test_vlseg2e8_v_u8mf4x2_tumu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_tumu(vm, vd, rs1, vl); } -vuint8mf2x2_t test_vlseg2e8_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_tumu(mask, maskedoff_tuple, base, vl); +vuint8mf2x2_t test_vlseg2e8_v_u8mf2x2_tumu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_tumu(vm, vd, rs1, vl); } -vuint8m1x2_t test_vlseg2e8_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_tumu(mask, maskedoff_tuple, base, vl); +vuint8m1x2_t test_vlseg2e8_v_u8m1x2_tumu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_tumu(vm, vd, rs1, vl); } -vuint8m2x2_t test_vlseg2e8_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_tumu(mask, maskedoff_tuple, base, vl); +vuint8m2x2_t test_vlseg2e8_v_u8m2x2_tumu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_tumu(vm, vd, rs1, vl); } -vuint8m4x2_t test_vlseg2e8_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_tumu(mask, maskedoff_tuple, base, vl); +vuint8m4x2_t test_vlseg2e8_v_u8m4x2_tumu(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_tumu(vm, vd, rs1, vl); } -vint8mf8x2_t test_vlseg2e8_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_mu(mask, maskedoff_tuple, base, vl); +vint8mf8x2_t test_vlseg2e8_v_i8mf8x2_mu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_mu(vm, vd, rs1, vl); } -vint8mf4x2_t test_vlseg2e8_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_mu(mask, maskedoff_tuple, base, vl); +vint8mf4x2_t test_vlseg2e8_v_i8mf4x2_mu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_mu(vm, vd, rs1, vl); } -vint8mf2x2_t test_vlseg2e8_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_mu(mask, maskedoff_tuple, base, vl); +vint8mf2x2_t test_vlseg2e8_v_i8mf2x2_mu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_mu(vm, vd, rs1, vl); } -vint8m1x2_t test_vlseg2e8_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_mu(mask, maskedoff_tuple, base, vl); +vint8m1x2_t test_vlseg2e8_v_i8m1x2_mu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_mu(vm, vd, rs1, vl); } -vint8m2x2_t test_vlseg2e8_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_mu(mask, maskedoff_tuple, base, vl); +vint8m2x2_t test_vlseg2e8_v_i8m2x2_mu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_mu(vm, vd, rs1, vl); } -vint8m4x2_t test_vlseg2e8_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg2e8_mu(mask, maskedoff_tuple, base, vl); +vint8m4x2_t test_vlseg2e8_v_i8m4x2_mu(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_mu(vm, vd, rs1, vl); } -vuint8mf8x2_t test_vlseg2e8_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_mu(mask, maskedoff_tuple, base, vl); +vuint8mf8x2_t test_vlseg2e8_v_u8mf8x2_mu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_mu(vm, vd, rs1, vl); } -vuint8mf4x2_t test_vlseg2e8_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_mu(mask, maskedoff_tuple, base, vl); +vuint8mf4x2_t test_vlseg2e8_v_u8mf4x2_mu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_mu(vm, vd, rs1, vl); } -vuint8mf2x2_t test_vlseg2e8_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_mu(mask, maskedoff_tuple, base, vl); +vuint8mf2x2_t test_vlseg2e8_v_u8mf2x2_mu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_mu(vm, vd, rs1, vl); } -vuint8m1x2_t test_vlseg2e8_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_mu(mask, maskedoff_tuple, base, vl); +vuint8m1x2_t test_vlseg2e8_v_u8m1x2_mu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_mu(vm, vd, rs1, vl); } -vuint8m2x2_t test_vlseg2e8_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_mu(mask, maskedoff_tuple, base, vl); +vuint8m2x2_t test_vlseg2e8_v_u8m2x2_mu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_mu(vm, vd, rs1, vl); } -vuint8m4x2_t test_vlseg2e8_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg2e8_mu(mask, maskedoff_tuple, base, vl); +vuint8m4x2_t test_vlseg2e8_v_u8m4x2_mu(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg2e8_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg2e8\.[ivxfswum.]+\s+} 48 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg2e8ff.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg2e8ff.c index 24b3fc248..486ff01b6 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg2e8ff.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg2e8ff.c @@ -6,196 +6,196 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_tu(vint8mf8x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_tu(vd, rs1, new_vl, vl); } -vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_tu(vint8mf4x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_tu(vd, rs1, new_vl, vl); } -vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_tu(vint8mf2x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_tu(vd, rs1, new_vl, vl); } -vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_tu(vint8m1x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_tu(vd, rs1, new_vl, vl); } -vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_tu(vint8m2x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_tu(vd, rs1, new_vl, vl); } -vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_tu(vint8m4x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_tu(vd, rs1, new_vl, vl); } -vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_tu(vuint8mf8x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_tu(vd, rs1, new_vl, vl); } -vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_tu(vuint8mf4x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_tu(vd, rs1, new_vl, vl); } -vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_tu(vuint8mf2x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_tu(vd, rs1, new_vl, vl); } -vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_tu(vuint8m1x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_tu(vd, rs1, new_vl, vl); } -vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_tu(vuint8m2x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_tu(vd, rs1, new_vl, vl); } -vuint8m4x2_t test_vlseg2e8ff_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint8m4x2_t test_vlseg2e8ff_v_u8m4x2_tu(vuint8m4x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_tu(vd, rs1, new_vl, vl); } -vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_tum(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_tum(vm, vd, rs1, new_vl, vl); } -vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_tum(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_tum(vm, vd, rs1, new_vl, vl); } -vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_tum(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_tum(vm, vd, rs1, new_vl, vl); } -vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_tum(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_tum(vm, vd, rs1, new_vl, vl); } -vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_tum(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_tum(vm, vd, rs1, new_vl, vl); } -vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_tum(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_tum(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_tum(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_tum(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_tum(vm, vd, rs1, new_vl, vl); } -vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_tum(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_tum(vm, vd, rs1, new_vl, vl); } -vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_tum(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_tum(vm, vd, rs1, new_vl, vl); } -vuint8m4x2_t test_vlseg2e8ff_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m4x2_t test_vlseg2e8ff_v_u8m4x2_tum(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_tum(vm, vd, rs1, new_vl, vl); } -vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_tumu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_tumu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_tumu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_tumu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_tumu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_tumu(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_tumu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_tumu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_tumu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_tumu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_tumu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint8m4x2_t test_vlseg2e8ff_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m4x2_t test_vlseg2e8ff_v_u8m4x2_tumu(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_mu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_mu(vm, vd, rs1, new_vl, vl); } -vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_mu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_mu(vm, vd, rs1, new_vl, vl); } -vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_mu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_mu(vm, vd, rs1, new_vl, vl); } -vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_mu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_mu(vm, vd, rs1, new_vl, vl); } -vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_mu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_mu(vm, vd, rs1, new_vl, vl); } -vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_mu(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_mu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_mu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_mu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_mu(vm, vd, rs1, new_vl, vl); } -vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_mu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_mu(vm, vd, rs1, new_vl, vl); } -vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_mu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_mu(vm, vd, rs1, new_vl, vl); } -vuint8m4x2_t test_vlseg2e8ff_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg2e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m4x2_t test_vlseg2e8ff_v_u8m4x2_mu(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg2e8ff_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg2e8ff\.[ivxfswum.]+\s+} 48 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg3e16.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg3e16.c index 3de098552..5878cf2f2 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg3e16.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg3e16.c @@ -6,196 +6,196 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x3_t test_vlseg3e16_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg3e16_tu(maskedoff_tuple, base, vl); +vfloat16mf4x3_t test_vlseg3e16_v_f16mf4x3_tu(vfloat16mf4x3_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_tu(vd, rs1, vl); } -vfloat16mf2x3_t test_vlseg3e16_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg3e16_tu(maskedoff_tuple, base, vl); +vfloat16mf2x3_t test_vlseg3e16_v_f16mf2x3_tu(vfloat16mf2x3_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_tu(vd, rs1, vl); } -vfloat16m1x3_t test_vlseg3e16_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg3e16_tu(maskedoff_tuple, base, vl); +vfloat16m1x3_t test_vlseg3e16_v_f16m1x3_tu(vfloat16m1x3_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_tu(vd, rs1, vl); } -vfloat16m2x3_t test_vlseg3e16_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg3e16_tu(maskedoff_tuple, base, vl); +vfloat16m2x3_t test_vlseg3e16_v_f16m2x3_tu(vfloat16m2x3_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_tu(vd, rs1, vl); } -vint16mf4x3_t test_vlseg3e16_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg3e16_tu(maskedoff_tuple, base, vl); +vint16mf4x3_t test_vlseg3e16_v_i16mf4x3_tu(vint16mf4x3_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_tu(vd, rs1, vl); } -vint16mf2x3_t test_vlseg3e16_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg3e16_tu(maskedoff_tuple, base, vl); +vint16mf2x3_t test_vlseg3e16_v_i16mf2x3_tu(vint16mf2x3_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_tu(vd, rs1, vl); } -vint16m1x3_t test_vlseg3e16_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg3e16_tu(maskedoff_tuple, base, vl); +vint16m1x3_t test_vlseg3e16_v_i16m1x3_tu(vint16m1x3_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_tu(vd, rs1, vl); } -vint16m2x3_t test_vlseg3e16_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg3e16_tu(maskedoff_tuple, base, vl); +vint16m2x3_t test_vlseg3e16_v_i16m2x3_tu(vint16m2x3_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_tu(vd, rs1, vl); } -vuint16mf4x3_t test_vlseg3e16_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg3e16_tu(maskedoff_tuple, base, vl); +vuint16mf4x3_t test_vlseg3e16_v_u16mf4x3_tu(vuint16mf4x3_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_tu(vd, rs1, vl); } -vuint16mf2x3_t test_vlseg3e16_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg3e16_tu(maskedoff_tuple, base, vl); +vuint16mf2x3_t test_vlseg3e16_v_u16mf2x3_tu(vuint16mf2x3_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_tu(vd, rs1, vl); } -vuint16m1x3_t test_vlseg3e16_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg3e16_tu(maskedoff_tuple, base, vl); +vuint16m1x3_t test_vlseg3e16_v_u16m1x3_tu(vuint16m1x3_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_tu(vd, rs1, vl); } -vuint16m2x3_t test_vlseg3e16_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg3e16_tu(maskedoff_tuple, base, vl); +vuint16m2x3_t test_vlseg3e16_v_u16m2x3_tu(vuint16m2x3_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_tu(vd, rs1, vl); } -vfloat16mf4x3_t test_vlseg3e16_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg3e16_tum(mask, maskedoff_tuple, base, vl); +vfloat16mf4x3_t test_vlseg3e16_v_f16mf4x3_tum(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_tum(vm, vd, rs1, vl); } -vfloat16mf2x3_t test_vlseg3e16_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg3e16_tum(mask, maskedoff_tuple, base, vl); +vfloat16mf2x3_t test_vlseg3e16_v_f16mf2x3_tum(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_tum(vm, vd, rs1, vl); } -vfloat16m1x3_t test_vlseg3e16_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg3e16_tum(mask, maskedoff_tuple, base, vl); +vfloat16m1x3_t test_vlseg3e16_v_f16m1x3_tum(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_tum(vm, vd, rs1, vl); } -vfloat16m2x3_t test_vlseg3e16_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg3e16_tum(mask, maskedoff_tuple, base, vl); +vfloat16m2x3_t test_vlseg3e16_v_f16m2x3_tum(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_tum(vm, vd, rs1, vl); } -vint16mf4x3_t test_vlseg3e16_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg3e16_tum(mask, maskedoff_tuple, base, vl); +vint16mf4x3_t test_vlseg3e16_v_i16mf4x3_tum(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_tum(vm, vd, rs1, vl); } -vint16mf2x3_t test_vlseg3e16_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg3e16_tum(mask, maskedoff_tuple, base, vl); +vint16mf2x3_t test_vlseg3e16_v_i16mf2x3_tum(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_tum(vm, vd, rs1, vl); } -vint16m1x3_t test_vlseg3e16_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg3e16_tum(mask, maskedoff_tuple, base, vl); +vint16m1x3_t test_vlseg3e16_v_i16m1x3_tum(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_tum(vm, vd, rs1, vl); } -vint16m2x3_t test_vlseg3e16_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg3e16_tum(mask, maskedoff_tuple, base, vl); +vint16m2x3_t test_vlseg3e16_v_i16m2x3_tum(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_tum(vm, vd, rs1, vl); } -vuint16mf4x3_t test_vlseg3e16_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg3e16_tum(mask, maskedoff_tuple, base, vl); +vuint16mf4x3_t test_vlseg3e16_v_u16mf4x3_tum(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_tum(vm, vd, rs1, vl); } -vuint16mf2x3_t test_vlseg3e16_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg3e16_tum(mask, maskedoff_tuple, base, vl); +vuint16mf2x3_t test_vlseg3e16_v_u16mf2x3_tum(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_tum(vm, vd, rs1, vl); } -vuint16m1x3_t test_vlseg3e16_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg3e16_tum(mask, maskedoff_tuple, base, vl); +vuint16m1x3_t test_vlseg3e16_v_u16m1x3_tum(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_tum(vm, vd, rs1, vl); } -vuint16m2x3_t test_vlseg3e16_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg3e16_tum(mask, maskedoff_tuple, base, vl); +vuint16m2x3_t test_vlseg3e16_v_u16m2x3_tum(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_tum(vm, vd, rs1, vl); } -vfloat16mf4x3_t test_vlseg3e16_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg3e16_tumu(mask, maskedoff_tuple, base, vl); +vfloat16mf4x3_t test_vlseg3e16_v_f16mf4x3_tumu(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_tumu(vm, vd, rs1, vl); } -vfloat16mf2x3_t test_vlseg3e16_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg3e16_tumu(mask, maskedoff_tuple, base, vl); +vfloat16mf2x3_t test_vlseg3e16_v_f16mf2x3_tumu(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_tumu(vm, vd, rs1, vl); } -vfloat16m1x3_t test_vlseg3e16_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg3e16_tumu(mask, maskedoff_tuple, base, vl); +vfloat16m1x3_t test_vlseg3e16_v_f16m1x3_tumu(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_tumu(vm, vd, rs1, vl); } -vfloat16m2x3_t test_vlseg3e16_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg3e16_tumu(mask, maskedoff_tuple, base, vl); +vfloat16m2x3_t test_vlseg3e16_v_f16m2x3_tumu(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_tumu(vm, vd, rs1, vl); } -vint16mf4x3_t test_vlseg3e16_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg3e16_tumu(mask, maskedoff_tuple, base, vl); +vint16mf4x3_t test_vlseg3e16_v_i16mf4x3_tumu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_tumu(vm, vd, rs1, vl); } -vint16mf2x3_t test_vlseg3e16_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg3e16_tumu(mask, maskedoff_tuple, base, vl); +vint16mf2x3_t test_vlseg3e16_v_i16mf2x3_tumu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_tumu(vm, vd, rs1, vl); } -vint16m1x3_t test_vlseg3e16_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg3e16_tumu(mask, maskedoff_tuple, base, vl); +vint16m1x3_t test_vlseg3e16_v_i16m1x3_tumu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_tumu(vm, vd, rs1, vl); } -vint16m2x3_t test_vlseg3e16_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg3e16_tumu(mask, maskedoff_tuple, base, vl); +vint16m2x3_t test_vlseg3e16_v_i16m2x3_tumu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_tumu(vm, vd, rs1, vl); } -vuint16mf4x3_t test_vlseg3e16_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg3e16_tumu(mask, maskedoff_tuple, base, vl); +vuint16mf4x3_t test_vlseg3e16_v_u16mf4x3_tumu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_tumu(vm, vd, rs1, vl); } -vuint16mf2x3_t test_vlseg3e16_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg3e16_tumu(mask, maskedoff_tuple, base, vl); +vuint16mf2x3_t test_vlseg3e16_v_u16mf2x3_tumu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_tumu(vm, vd, rs1, vl); } -vuint16m1x3_t test_vlseg3e16_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg3e16_tumu(mask, maskedoff_tuple, base, vl); +vuint16m1x3_t test_vlseg3e16_v_u16m1x3_tumu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_tumu(vm, vd, rs1, vl); } -vuint16m2x3_t test_vlseg3e16_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg3e16_tumu(mask, maskedoff_tuple, base, vl); +vuint16m2x3_t test_vlseg3e16_v_u16m2x3_tumu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_tumu(vm, vd, rs1, vl); } -vfloat16mf4x3_t test_vlseg3e16_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg3e16_mu(mask, maskedoff_tuple, base, vl); +vfloat16mf4x3_t test_vlseg3e16_v_f16mf4x3_mu(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_mu(vm, vd, rs1, vl); } -vfloat16mf2x3_t test_vlseg3e16_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg3e16_mu(mask, maskedoff_tuple, base, vl); +vfloat16mf2x3_t test_vlseg3e16_v_f16mf2x3_mu(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_mu(vm, vd, rs1, vl); } -vfloat16m1x3_t test_vlseg3e16_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg3e16_mu(mask, maskedoff_tuple, base, vl); +vfloat16m1x3_t test_vlseg3e16_v_f16m1x3_mu(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_mu(vm, vd, rs1, vl); } -vfloat16m2x3_t test_vlseg3e16_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg3e16_mu(mask, maskedoff_tuple, base, vl); +vfloat16m2x3_t test_vlseg3e16_v_f16m2x3_mu(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_mu(vm, vd, rs1, vl); } -vint16mf4x3_t test_vlseg3e16_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg3e16_mu(mask, maskedoff_tuple, base, vl); +vint16mf4x3_t test_vlseg3e16_v_i16mf4x3_mu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_mu(vm, vd, rs1, vl); } -vint16mf2x3_t test_vlseg3e16_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg3e16_mu(mask, maskedoff_tuple, base, vl); +vint16mf2x3_t test_vlseg3e16_v_i16mf2x3_mu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_mu(vm, vd, rs1, vl); } -vint16m1x3_t test_vlseg3e16_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg3e16_mu(mask, maskedoff_tuple, base, vl); +vint16m1x3_t test_vlseg3e16_v_i16m1x3_mu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_mu(vm, vd, rs1, vl); } -vint16m2x3_t test_vlseg3e16_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg3e16_mu(mask, maskedoff_tuple, base, vl); +vint16m2x3_t test_vlseg3e16_v_i16m2x3_mu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_mu(vm, vd, rs1, vl); } -vuint16mf4x3_t test_vlseg3e16_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg3e16_mu(mask, maskedoff_tuple, base, vl); +vuint16mf4x3_t test_vlseg3e16_v_u16mf4x3_mu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_mu(vm, vd, rs1, vl); } -vuint16mf2x3_t test_vlseg3e16_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg3e16_mu(mask, maskedoff_tuple, base, vl); +vuint16mf2x3_t test_vlseg3e16_v_u16mf2x3_mu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_mu(vm, vd, rs1, vl); } -vuint16m1x3_t test_vlseg3e16_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg3e16_mu(mask, maskedoff_tuple, base, vl); +vuint16m1x3_t test_vlseg3e16_v_u16m1x3_mu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_mu(vm, vd, rs1, vl); } -vuint16m2x3_t test_vlseg3e16_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg3e16_mu(mask, maskedoff_tuple, base, vl); +vuint16m2x3_t test_vlseg3e16_v_u16m2x3_mu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg3e16_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg3e16\.[ivxfswum.]+\s+} 48 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg3e16ff.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg3e16ff.c index 31da8190a..63b643db5 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg3e16ff.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg3e16ff.c @@ -6,196 +6,196 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_tu(vfloat16mf4x3_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tu(vd, rs1, new_vl, vl); } -vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_tu(vfloat16mf2x3_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tu(vd, rs1, new_vl, vl); } -vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_tu(vfloat16m1x3_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tu(vd, rs1, new_vl, vl); } -vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_tu(vfloat16m2x3_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tu(vd, rs1, new_vl, vl); } -vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_tu(vint16mf4x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tu(vd, rs1, new_vl, vl); } -vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_tu(vint16mf2x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tu(vd, rs1, new_vl, vl); } -vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_tu(vint16m1x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tu(vd, rs1, new_vl, vl); } -vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_tu(vint16m2x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tu(vd, rs1, new_vl, vl); } -vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_tu(vuint16mf4x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tu(vd, rs1, new_vl, vl); } -vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_tu(vuint16mf2x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tu(vd, rs1, new_vl, vl); } -vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_tu(vuint16m1x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tu(vd, rs1, new_vl, vl); } -vuint16m2x3_t test_vlseg3e16ff_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint16m2x3_t test_vlseg3e16ff_v_u16m2x3_tu(vuint16m2x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tu(vd, rs1, new_vl, vl); } -vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_tum(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_tum(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_tum(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_tum(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tum(vm, vd, rs1, new_vl, vl); } -vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_tum(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tum(vm, vd, rs1, new_vl, vl); } -vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_tum(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tum(vm, vd, rs1, new_vl, vl); } -vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_tum(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tum(vm, vd, rs1, new_vl, vl); } -vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_tum(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_tum(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_tum(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tum(vm, vd, rs1, new_vl, vl); } -vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_tum(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tum(vm, vd, rs1, new_vl, vl); } -vuint16m2x3_t test_vlseg3e16ff_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m2x3_t test_vlseg3e16ff_v_u16m2x3_tum(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_tumu(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_tumu(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_tumu(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_tumu(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_tumu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_tumu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_tumu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_tumu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_tumu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_tumu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_tumu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint16m2x3_t test_vlseg3e16ff_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m2x3_t test_vlseg3e16ff_v_u16m2x3_tumu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_mu(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_mu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_mu(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_mu(vm, vd, rs1, new_vl, vl); } -vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_mu(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_mu(vm, vd, rs1, new_vl, vl); } -vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_mu(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_mu(vm, vd, rs1, new_vl, vl); } -vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_mu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_mu(vm, vd, rs1, new_vl, vl); } -vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_mu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_mu(vm, vd, rs1, new_vl, vl); } -vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_mu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_mu(vm, vd, rs1, new_vl, vl); } -vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_mu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_mu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_mu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_mu(vm, vd, rs1, new_vl, vl); } -vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_mu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_mu(vm, vd, rs1, new_vl, vl); } -vuint16m2x3_t test_vlseg3e16ff_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m2x3_t test_vlseg3e16ff_v_u16m2x3_mu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg3e16ff\.[ivxfswum.]+\s+} 48 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg3e32.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg3e32.c index f79760814..174ff0069 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg3e32.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg3e32.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x3_t test_vlseg3e32_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg3e32_tu(maskedoff_tuple, base, vl); +vfloat32mf2x3_t test_vlseg3e32_v_f32mf2x3_tu(vfloat32mf2x3_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_tu(vd, rs1, vl); } -vfloat32m1x3_t test_vlseg3e32_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg3e32_tu(maskedoff_tuple, base, vl); +vfloat32m1x3_t test_vlseg3e32_v_f32m1x3_tu(vfloat32m1x3_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_tu(vd, rs1, vl); } -vfloat32m2x3_t test_vlseg3e32_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg3e32_tu(maskedoff_tuple, base, vl); +vfloat32m2x3_t test_vlseg3e32_v_f32m2x3_tu(vfloat32m2x3_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_tu(vd, rs1, vl); } -vint32mf2x3_t test_vlseg3e32_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg3e32_tu(maskedoff_tuple, base, vl); +vint32mf2x3_t test_vlseg3e32_v_i32mf2x3_tu(vint32mf2x3_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_tu(vd, rs1, vl); } -vint32m1x3_t test_vlseg3e32_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg3e32_tu(maskedoff_tuple, base, vl); +vint32m1x3_t test_vlseg3e32_v_i32m1x3_tu(vint32m1x3_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_tu(vd, rs1, vl); } -vint32m2x3_t test_vlseg3e32_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg3e32_tu(maskedoff_tuple, base, vl); +vint32m2x3_t test_vlseg3e32_v_i32m2x3_tu(vint32m2x3_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_tu(vd, rs1, vl); } -vuint32mf2x3_t test_vlseg3e32_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg3e32_tu(maskedoff_tuple, base, vl); +vuint32mf2x3_t test_vlseg3e32_v_u32mf2x3_tu(vuint32mf2x3_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_tu(vd, rs1, vl); } -vuint32m1x3_t test_vlseg3e32_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg3e32_tu(maskedoff_tuple, base, vl); +vuint32m1x3_t test_vlseg3e32_v_u32m1x3_tu(vuint32m1x3_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_tu(vd, rs1, vl); } -vuint32m2x3_t test_vlseg3e32_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg3e32_tu(maskedoff_tuple, base, vl); +vuint32m2x3_t test_vlseg3e32_v_u32m2x3_tu(vuint32m2x3_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_tu(vd, rs1, vl); } -vfloat32mf2x3_t test_vlseg3e32_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg3e32_tum(mask, maskedoff_tuple, base, vl); +vfloat32mf2x3_t test_vlseg3e32_v_f32mf2x3_tum(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_tum(vm, vd, rs1, vl); } -vfloat32m1x3_t test_vlseg3e32_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg3e32_tum(mask, maskedoff_tuple, base, vl); +vfloat32m1x3_t test_vlseg3e32_v_f32m1x3_tum(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_tum(vm, vd, rs1, vl); } -vfloat32m2x3_t test_vlseg3e32_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg3e32_tum(mask, maskedoff_tuple, base, vl); +vfloat32m2x3_t test_vlseg3e32_v_f32m2x3_tum(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_tum(vm, vd, rs1, vl); } -vint32mf2x3_t test_vlseg3e32_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg3e32_tum(mask, maskedoff_tuple, base, vl); +vint32mf2x3_t test_vlseg3e32_v_i32mf2x3_tum(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_tum(vm, vd, rs1, vl); } -vint32m1x3_t test_vlseg3e32_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg3e32_tum(mask, maskedoff_tuple, base, vl); +vint32m1x3_t test_vlseg3e32_v_i32m1x3_tum(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_tum(vm, vd, rs1, vl); } -vint32m2x3_t test_vlseg3e32_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg3e32_tum(mask, maskedoff_tuple, base, vl); +vint32m2x3_t test_vlseg3e32_v_i32m2x3_tum(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_tum(vm, vd, rs1, vl); } -vuint32mf2x3_t test_vlseg3e32_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg3e32_tum(mask, maskedoff_tuple, base, vl); +vuint32mf2x3_t test_vlseg3e32_v_u32mf2x3_tum(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_tum(vm, vd, rs1, vl); } -vuint32m1x3_t test_vlseg3e32_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg3e32_tum(mask, maskedoff_tuple, base, vl); +vuint32m1x3_t test_vlseg3e32_v_u32m1x3_tum(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_tum(vm, vd, rs1, vl); } -vuint32m2x3_t test_vlseg3e32_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg3e32_tum(mask, maskedoff_tuple, base, vl); +vuint32m2x3_t test_vlseg3e32_v_u32m2x3_tum(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_tum(vm, vd, rs1, vl); } -vfloat32mf2x3_t test_vlseg3e32_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg3e32_tumu(mask, maskedoff_tuple, base, vl); +vfloat32mf2x3_t test_vlseg3e32_v_f32mf2x3_tumu(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_tumu(vm, vd, rs1, vl); } -vfloat32m1x3_t test_vlseg3e32_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg3e32_tumu(mask, maskedoff_tuple, base, vl); +vfloat32m1x3_t test_vlseg3e32_v_f32m1x3_tumu(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_tumu(vm, vd, rs1, vl); } -vfloat32m2x3_t test_vlseg3e32_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg3e32_tumu(mask, maskedoff_tuple, base, vl); +vfloat32m2x3_t test_vlseg3e32_v_f32m2x3_tumu(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_tumu(vm, vd, rs1, vl); } -vint32mf2x3_t test_vlseg3e32_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg3e32_tumu(mask, maskedoff_tuple, base, vl); +vint32mf2x3_t test_vlseg3e32_v_i32mf2x3_tumu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_tumu(vm, vd, rs1, vl); } -vint32m1x3_t test_vlseg3e32_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg3e32_tumu(mask, maskedoff_tuple, base, vl); +vint32m1x3_t test_vlseg3e32_v_i32m1x3_tumu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_tumu(vm, vd, rs1, vl); } -vint32m2x3_t test_vlseg3e32_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg3e32_tumu(mask, maskedoff_tuple, base, vl); +vint32m2x3_t test_vlseg3e32_v_i32m2x3_tumu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_tumu(vm, vd, rs1, vl); } -vuint32mf2x3_t test_vlseg3e32_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg3e32_tumu(mask, maskedoff_tuple, base, vl); +vuint32mf2x3_t test_vlseg3e32_v_u32mf2x3_tumu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_tumu(vm, vd, rs1, vl); } -vuint32m1x3_t test_vlseg3e32_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg3e32_tumu(mask, maskedoff_tuple, base, vl); +vuint32m1x3_t test_vlseg3e32_v_u32m1x3_tumu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_tumu(vm, vd, rs1, vl); } -vuint32m2x3_t test_vlseg3e32_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg3e32_tumu(mask, maskedoff_tuple, base, vl); +vuint32m2x3_t test_vlseg3e32_v_u32m2x3_tumu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_tumu(vm, vd, rs1, vl); } -vfloat32mf2x3_t test_vlseg3e32_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg3e32_mu(mask, maskedoff_tuple, base, vl); +vfloat32mf2x3_t test_vlseg3e32_v_f32mf2x3_mu(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_mu(vm, vd, rs1, vl); } -vfloat32m1x3_t test_vlseg3e32_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg3e32_mu(mask, maskedoff_tuple, base, vl); +vfloat32m1x3_t test_vlseg3e32_v_f32m1x3_mu(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_mu(vm, vd, rs1, vl); } -vfloat32m2x3_t test_vlseg3e32_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg3e32_mu(mask, maskedoff_tuple, base, vl); +vfloat32m2x3_t test_vlseg3e32_v_f32m2x3_mu(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_mu(vm, vd, rs1, vl); } -vint32mf2x3_t test_vlseg3e32_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg3e32_mu(mask, maskedoff_tuple, base, vl); +vint32mf2x3_t test_vlseg3e32_v_i32mf2x3_mu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_mu(vm, vd, rs1, vl); } -vint32m1x3_t test_vlseg3e32_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg3e32_mu(mask, maskedoff_tuple, base, vl); +vint32m1x3_t test_vlseg3e32_v_i32m1x3_mu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_mu(vm, vd, rs1, vl); } -vint32m2x3_t test_vlseg3e32_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg3e32_mu(mask, maskedoff_tuple, base, vl); +vint32m2x3_t test_vlseg3e32_v_i32m2x3_mu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_mu(vm, vd, rs1, vl); } -vuint32mf2x3_t test_vlseg3e32_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg3e32_mu(mask, maskedoff_tuple, base, vl); +vuint32mf2x3_t test_vlseg3e32_v_u32mf2x3_mu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_mu(vm, vd, rs1, vl); } -vuint32m1x3_t test_vlseg3e32_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg3e32_mu(mask, maskedoff_tuple, base, vl); +vuint32m1x3_t test_vlseg3e32_v_u32m1x3_mu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_mu(vm, vd, rs1, vl); } -vuint32m2x3_t test_vlseg3e32_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg3e32_mu(mask, maskedoff_tuple, base, vl); +vuint32m2x3_t test_vlseg3e32_v_u32m2x3_mu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg3e32_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg3e32\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg3e32ff.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg3e32ff.c index fbd9d74bb..4ddee06f6 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg3e32ff.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg3e32ff.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_tu(vfloat32mf2x3_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_tu(vd, rs1, new_vl, vl); } -vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_tu(vfloat32m1x3_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_tu(vd, rs1, new_vl, vl); } -vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_tu(vfloat32m2x3_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_tu(vd, rs1, new_vl, vl); } -vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_tu(vint32mf2x3_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_tu(vd, rs1, new_vl, vl); } -vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_tu(vint32m1x3_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_tu(vd, rs1, new_vl, vl); } -vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_tu(vint32m2x3_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_tu(vd, rs1, new_vl, vl); } -vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_tu(vuint32mf2x3_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_tu(vd, rs1, new_vl, vl); } -vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_tu(vuint32m1x3_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_tu(vd, rs1, new_vl, vl); } -vuint32m2x3_t test_vlseg3e32ff_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint32m2x3_t test_vlseg3e32ff_v_u32m2x3_tu(vuint32m2x3_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_tu(vd, rs1, new_vl, vl); } -vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_tum(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_tum(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_tum(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_tum(vm, vd, rs1, new_vl, vl); } -vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_tum(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_tum(vm, vd, rs1, new_vl, vl); } -vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_tum(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_tum(vm, vd, rs1, new_vl, vl); } -vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_tum(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_tum(vm, vd, rs1, new_vl, vl); } -vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_tum(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_tum(vm, vd, rs1, new_vl, vl); } -vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_tum(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_tum(vm, vd, rs1, new_vl, vl); } -vuint32m2x3_t test_vlseg3e32ff_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m2x3_t test_vlseg3e32ff_v_u32m2x3_tum(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_tumu(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_tumu(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_tumu(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_tumu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_tumu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_tumu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_tumu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_tumu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint32m2x3_t test_vlseg3e32ff_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m2x3_t test_vlseg3e32ff_v_u32m2x3_tumu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_mu(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_mu(vm, vd, rs1, new_vl, vl); } -vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_mu(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_mu(vm, vd, rs1, new_vl, vl); } -vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_mu(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_mu(vm, vd, rs1, new_vl, vl); } -vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_mu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_mu(vm, vd, rs1, new_vl, vl); } -vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_mu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_mu(vm, vd, rs1, new_vl, vl); } -vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_mu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_mu(vm, vd, rs1, new_vl, vl); } -vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_mu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_mu(vm, vd, rs1, new_vl, vl); } -vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_mu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_mu(vm, vd, rs1, new_vl, vl); } -vuint32m2x3_t test_vlseg3e32ff_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m2x3_t test_vlseg3e32ff_v_u32m2x3_mu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e32ff_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg3e32ff\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg3e64.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg3e64.c index 9f0140acf..6940a2bd3 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg3e64.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg3e64.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x3_t test_vlseg3e64_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg3e64_tu(maskedoff_tuple, base, vl); +vfloat64m1x3_t test_vlseg3e64_v_f64m1x3_tu(vfloat64m1x3_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_tu(vd, rs1, vl); } -vfloat64m2x3_t test_vlseg3e64_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg3e64_tu(maskedoff_tuple, base, vl); +vfloat64m2x3_t test_vlseg3e64_v_f64m2x3_tu(vfloat64m2x3_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_tu(vd, rs1, vl); } -vint64m1x3_t test_vlseg3e64_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg3e64_tu(maskedoff_tuple, base, vl); +vint64m1x3_t test_vlseg3e64_v_i64m1x3_tu(vint64m1x3_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_tu(vd, rs1, vl); } -vint64m2x3_t test_vlseg3e64_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg3e64_tu(maskedoff_tuple, base, vl); +vint64m2x3_t test_vlseg3e64_v_i64m2x3_tu(vint64m2x3_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_tu(vd, rs1, vl); } -vuint64m1x3_t test_vlseg3e64_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg3e64_tu(maskedoff_tuple, base, vl); +vuint64m1x3_t test_vlseg3e64_v_u64m1x3_tu(vuint64m1x3_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_tu(vd, rs1, vl); } -vuint64m2x3_t test_vlseg3e64_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg3e64_tu(maskedoff_tuple, base, vl); +vuint64m2x3_t test_vlseg3e64_v_u64m2x3_tu(vuint64m2x3_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_tu(vd, rs1, vl); } -vfloat64m1x3_t test_vlseg3e64_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg3e64_tum(mask, maskedoff_tuple, base, vl); +vfloat64m1x3_t test_vlseg3e64_v_f64m1x3_tum(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_tum(vm, vd, rs1, vl); } -vfloat64m2x3_t test_vlseg3e64_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg3e64_tum(mask, maskedoff_tuple, base, vl); +vfloat64m2x3_t test_vlseg3e64_v_f64m2x3_tum(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_tum(vm, vd, rs1, vl); } -vint64m1x3_t test_vlseg3e64_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg3e64_tum(mask, maskedoff_tuple, base, vl); +vint64m1x3_t test_vlseg3e64_v_i64m1x3_tum(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_tum(vm, vd, rs1, vl); } -vint64m2x3_t test_vlseg3e64_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg3e64_tum(mask, maskedoff_tuple, base, vl); +vint64m2x3_t test_vlseg3e64_v_i64m2x3_tum(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_tum(vm, vd, rs1, vl); } -vuint64m1x3_t test_vlseg3e64_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg3e64_tum(mask, maskedoff_tuple, base, vl); +vuint64m1x3_t test_vlseg3e64_v_u64m1x3_tum(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_tum(vm, vd, rs1, vl); } -vuint64m2x3_t test_vlseg3e64_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg3e64_tum(mask, maskedoff_tuple, base, vl); +vuint64m2x3_t test_vlseg3e64_v_u64m2x3_tum(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_tum(vm, vd, rs1, vl); } -vfloat64m1x3_t test_vlseg3e64_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg3e64_tumu(mask, maskedoff_tuple, base, vl); +vfloat64m1x3_t test_vlseg3e64_v_f64m1x3_tumu(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_tumu(vm, vd, rs1, vl); } -vfloat64m2x3_t test_vlseg3e64_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg3e64_tumu(mask, maskedoff_tuple, base, vl); +vfloat64m2x3_t test_vlseg3e64_v_f64m2x3_tumu(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_tumu(vm, vd, rs1, vl); } -vint64m1x3_t test_vlseg3e64_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg3e64_tumu(mask, maskedoff_tuple, base, vl); +vint64m1x3_t test_vlseg3e64_v_i64m1x3_tumu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_tumu(vm, vd, rs1, vl); } -vint64m2x3_t test_vlseg3e64_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg3e64_tumu(mask, maskedoff_tuple, base, vl); +vint64m2x3_t test_vlseg3e64_v_i64m2x3_tumu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_tumu(vm, vd, rs1, vl); } -vuint64m1x3_t test_vlseg3e64_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg3e64_tumu(mask, maskedoff_tuple, base, vl); +vuint64m1x3_t test_vlseg3e64_v_u64m1x3_tumu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_tumu(vm, vd, rs1, vl); } -vuint64m2x3_t test_vlseg3e64_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg3e64_tumu(mask, maskedoff_tuple, base, vl); +vuint64m2x3_t test_vlseg3e64_v_u64m2x3_tumu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_tumu(vm, vd, rs1, vl); } -vfloat64m1x3_t test_vlseg3e64_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg3e64_mu(mask, maskedoff_tuple, base, vl); +vfloat64m1x3_t test_vlseg3e64_v_f64m1x3_mu(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_mu(vm, vd, rs1, vl); } -vfloat64m2x3_t test_vlseg3e64_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg3e64_mu(mask, maskedoff_tuple, base, vl); +vfloat64m2x3_t test_vlseg3e64_v_f64m2x3_mu(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_mu(vm, vd, rs1, vl); } -vint64m1x3_t test_vlseg3e64_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg3e64_mu(mask, maskedoff_tuple, base, vl); +vint64m1x3_t test_vlseg3e64_v_i64m1x3_mu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_mu(vm, vd, rs1, vl); } -vint64m2x3_t test_vlseg3e64_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg3e64_mu(mask, maskedoff_tuple, base, vl); +vint64m2x3_t test_vlseg3e64_v_i64m2x3_mu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_mu(vm, vd, rs1, vl); } -vuint64m1x3_t test_vlseg3e64_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg3e64_mu(mask, maskedoff_tuple, base, vl); +vuint64m1x3_t test_vlseg3e64_v_u64m1x3_mu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_mu(vm, vd, rs1, vl); } -vuint64m2x3_t test_vlseg3e64_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg3e64_mu(mask, maskedoff_tuple, base, vl); +vuint64m2x3_t test_vlseg3e64_v_u64m2x3_mu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg3e64_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg3e64\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg3e64ff.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg3e64ff.c index b73e67e7e..68cfba6c0 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg3e64ff.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg3e64ff.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_tu(vfloat64m1x3_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_tu(vd, rs1, new_vl, vl); } -vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_tu(vfloat64m2x3_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_tu(vd, rs1, new_vl, vl); } -vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_tu(maskedoff_tuple, base, new_vl, vl); +vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_tu(vint64m1x3_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_tu(vd, rs1, new_vl, vl); } -vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_tu(maskedoff_tuple, base, new_vl, vl); +vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_tu(vint64m2x3_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_tu(vd, rs1, new_vl, vl); } -vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_tu(vuint64m1x3_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_tu(vd, rs1, new_vl, vl); } -vuint64m2x3_t test_vlseg3e64ff_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint64m2x3_t test_vlseg3e64ff_v_u64m2x3_tu(vuint64m2x3_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_tu(vd, rs1, new_vl, vl); } -vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_tum(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_tum(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_tum(vm, vd, rs1, new_vl, vl); } -vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_tum(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_tum(vm, vd, rs1, new_vl, vl); } -vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_tum(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_tum(vm, vd, rs1, new_vl, vl); } -vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_tum(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_tum(vm, vd, rs1, new_vl, vl); } -vuint64m2x3_t test_vlseg3e64ff_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m2x3_t test_vlseg3e64ff_v_u64m2x3_tum(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_tumu(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_tumu(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_tumu(vm, vd, rs1, new_vl, vl); } -vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_tumu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_tumu(vm, vd, rs1, new_vl, vl); } -vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_tumu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_tumu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint64m2x3_t test_vlseg3e64ff_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m2x3_t test_vlseg3e64ff_v_u64m2x3_tumu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_mu(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_mu(vm, vd, rs1, new_vl, vl); } -vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_mu(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_mu(vm, vd, rs1, new_vl, vl); } -vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_mu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_mu(vm, vd, rs1, new_vl, vl); } -vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_mu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_mu(vm, vd, rs1, new_vl, vl); } -vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_mu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_mu(vm, vd, rs1, new_vl, vl); } -vuint64m2x3_t test_vlseg3e64ff_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e64ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m2x3_t test_vlseg3e64ff_v_u64m2x3_mu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e64ff_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg3e64ff\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg3e8.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg3e8.c index 2fc740fb1..1ee46207b 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg3e8.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg3e8.c @@ -6,164 +6,164 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x3_t test_vlseg3e8_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_tu(maskedoff_tuple, base, vl); +vint8mf8x3_t test_vlseg3e8_v_i8mf8x3_tu(vint8mf8x3_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_tu(vd, rs1, vl); } -vint8mf4x3_t test_vlseg3e8_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_tu(maskedoff_tuple, base, vl); +vint8mf4x3_t test_vlseg3e8_v_i8mf4x3_tu(vint8mf4x3_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_tu(vd, rs1, vl); } -vint8mf2x3_t test_vlseg3e8_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_tu(maskedoff_tuple, base, vl); +vint8mf2x3_t test_vlseg3e8_v_i8mf2x3_tu(vint8mf2x3_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_tu(vd, rs1, vl); } -vint8m1x3_t test_vlseg3e8_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_tu(maskedoff_tuple, base, vl); +vint8m1x3_t test_vlseg3e8_v_i8m1x3_tu(vint8m1x3_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_tu(vd, rs1, vl); } -vint8m2x3_t test_vlseg3e8_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_tu(maskedoff_tuple, base, vl); +vint8m2x3_t test_vlseg3e8_v_i8m2x3_tu(vint8m2x3_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_tu(vd, rs1, vl); } -vuint8mf8x3_t test_vlseg3e8_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_tu(maskedoff_tuple, base, vl); +vuint8mf8x3_t test_vlseg3e8_v_u8mf8x3_tu(vuint8mf8x3_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_tu(vd, rs1, vl); } -vuint8mf4x3_t test_vlseg3e8_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_tu(maskedoff_tuple, base, vl); +vuint8mf4x3_t test_vlseg3e8_v_u8mf4x3_tu(vuint8mf4x3_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_tu(vd, rs1, vl); } -vuint8mf2x3_t test_vlseg3e8_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_tu(maskedoff_tuple, base, vl); +vuint8mf2x3_t test_vlseg3e8_v_u8mf2x3_tu(vuint8mf2x3_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_tu(vd, rs1, vl); } -vuint8m1x3_t test_vlseg3e8_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_tu(maskedoff_tuple, base, vl); +vuint8m1x3_t test_vlseg3e8_v_u8m1x3_tu(vuint8m1x3_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_tu(vd, rs1, vl); } -vuint8m2x3_t test_vlseg3e8_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_tu(maskedoff_tuple, base, vl); +vuint8m2x3_t test_vlseg3e8_v_u8m2x3_tu(vuint8m2x3_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_tu(vd, rs1, vl); } -vint8mf8x3_t test_vlseg3e8_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_tum(mask, maskedoff_tuple, base, vl); +vint8mf8x3_t test_vlseg3e8_v_i8mf8x3_tum(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_tum(vm, vd, rs1, vl); } -vint8mf4x3_t test_vlseg3e8_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_tum(mask, maskedoff_tuple, base, vl); +vint8mf4x3_t test_vlseg3e8_v_i8mf4x3_tum(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_tum(vm, vd, rs1, vl); } -vint8mf2x3_t test_vlseg3e8_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_tum(mask, maskedoff_tuple, base, vl); +vint8mf2x3_t test_vlseg3e8_v_i8mf2x3_tum(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_tum(vm, vd, rs1, vl); } -vint8m1x3_t test_vlseg3e8_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_tum(mask, maskedoff_tuple, base, vl); +vint8m1x3_t test_vlseg3e8_v_i8m1x3_tum(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_tum(vm, vd, rs1, vl); } -vint8m2x3_t test_vlseg3e8_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_tum(mask, maskedoff_tuple, base, vl); +vint8m2x3_t test_vlseg3e8_v_i8m2x3_tum(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_tum(vm, vd, rs1, vl); } -vuint8mf8x3_t test_vlseg3e8_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_tum(mask, maskedoff_tuple, base, vl); +vuint8mf8x3_t test_vlseg3e8_v_u8mf8x3_tum(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_tum(vm, vd, rs1, vl); } -vuint8mf4x3_t test_vlseg3e8_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_tum(mask, maskedoff_tuple, base, vl); +vuint8mf4x3_t test_vlseg3e8_v_u8mf4x3_tum(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_tum(vm, vd, rs1, vl); } -vuint8mf2x3_t test_vlseg3e8_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_tum(mask, maskedoff_tuple, base, vl); +vuint8mf2x3_t test_vlseg3e8_v_u8mf2x3_tum(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_tum(vm, vd, rs1, vl); } -vuint8m1x3_t test_vlseg3e8_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_tum(mask, maskedoff_tuple, base, vl); +vuint8m1x3_t test_vlseg3e8_v_u8m1x3_tum(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_tum(vm, vd, rs1, vl); } -vuint8m2x3_t test_vlseg3e8_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_tum(mask, maskedoff_tuple, base, vl); +vuint8m2x3_t test_vlseg3e8_v_u8m2x3_tum(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_tum(vm, vd, rs1, vl); } -vint8mf8x3_t test_vlseg3e8_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_tumu(mask, maskedoff_tuple, base, vl); +vint8mf8x3_t test_vlseg3e8_v_i8mf8x3_tumu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_tumu(vm, vd, rs1, vl); } -vint8mf4x3_t test_vlseg3e8_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_tumu(mask, maskedoff_tuple, base, vl); +vint8mf4x3_t test_vlseg3e8_v_i8mf4x3_tumu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_tumu(vm, vd, rs1, vl); } -vint8mf2x3_t test_vlseg3e8_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_tumu(mask, maskedoff_tuple, base, vl); +vint8mf2x3_t test_vlseg3e8_v_i8mf2x3_tumu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_tumu(vm, vd, rs1, vl); } -vint8m1x3_t test_vlseg3e8_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_tumu(mask, maskedoff_tuple, base, vl); +vint8m1x3_t test_vlseg3e8_v_i8m1x3_tumu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_tumu(vm, vd, rs1, vl); } -vint8m2x3_t test_vlseg3e8_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_tumu(mask, maskedoff_tuple, base, vl); +vint8m2x3_t test_vlseg3e8_v_i8m2x3_tumu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_tumu(vm, vd, rs1, vl); } -vuint8mf8x3_t test_vlseg3e8_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_tumu(mask, maskedoff_tuple, base, vl); +vuint8mf8x3_t test_vlseg3e8_v_u8mf8x3_tumu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_tumu(vm, vd, rs1, vl); } -vuint8mf4x3_t test_vlseg3e8_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_tumu(mask, maskedoff_tuple, base, vl); +vuint8mf4x3_t test_vlseg3e8_v_u8mf4x3_tumu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_tumu(vm, vd, rs1, vl); } -vuint8mf2x3_t test_vlseg3e8_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_tumu(mask, maskedoff_tuple, base, vl); +vuint8mf2x3_t test_vlseg3e8_v_u8mf2x3_tumu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_tumu(vm, vd, rs1, vl); } -vuint8m1x3_t test_vlseg3e8_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_tumu(mask, maskedoff_tuple, base, vl); +vuint8m1x3_t test_vlseg3e8_v_u8m1x3_tumu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_tumu(vm, vd, rs1, vl); } -vuint8m2x3_t test_vlseg3e8_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_tumu(mask, maskedoff_tuple, base, vl); +vuint8m2x3_t test_vlseg3e8_v_u8m2x3_tumu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_tumu(vm, vd, rs1, vl); } -vint8mf8x3_t test_vlseg3e8_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_mu(mask, maskedoff_tuple, base, vl); +vint8mf8x3_t test_vlseg3e8_v_i8mf8x3_mu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_mu(vm, vd, rs1, vl); } -vint8mf4x3_t test_vlseg3e8_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_mu(mask, maskedoff_tuple, base, vl); +vint8mf4x3_t test_vlseg3e8_v_i8mf4x3_mu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_mu(vm, vd, rs1, vl); } -vint8mf2x3_t test_vlseg3e8_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_mu(mask, maskedoff_tuple, base, vl); +vint8mf2x3_t test_vlseg3e8_v_i8mf2x3_mu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_mu(vm, vd, rs1, vl); } -vint8m1x3_t test_vlseg3e8_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_mu(mask, maskedoff_tuple, base, vl); +vint8m1x3_t test_vlseg3e8_v_i8m1x3_mu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_mu(vm, vd, rs1, vl); } -vint8m2x3_t test_vlseg3e8_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg3e8_mu(mask, maskedoff_tuple, base, vl); +vint8m2x3_t test_vlseg3e8_v_i8m2x3_mu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_mu(vm, vd, rs1, vl); } -vuint8mf8x3_t test_vlseg3e8_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_mu(mask, maskedoff_tuple, base, vl); +vuint8mf8x3_t test_vlseg3e8_v_u8mf8x3_mu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_mu(vm, vd, rs1, vl); } -vuint8mf4x3_t test_vlseg3e8_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_mu(mask, maskedoff_tuple, base, vl); +vuint8mf4x3_t test_vlseg3e8_v_u8mf4x3_mu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_mu(vm, vd, rs1, vl); } -vuint8mf2x3_t test_vlseg3e8_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_mu(mask, maskedoff_tuple, base, vl); +vuint8mf2x3_t test_vlseg3e8_v_u8mf2x3_mu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_mu(vm, vd, rs1, vl); } -vuint8m1x3_t test_vlseg3e8_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_mu(mask, maskedoff_tuple, base, vl); +vuint8m1x3_t test_vlseg3e8_v_u8m1x3_mu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_mu(vm, vd, rs1, vl); } -vuint8m2x3_t test_vlseg3e8_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg3e8_mu(mask, maskedoff_tuple, base, vl); +vuint8m2x3_t test_vlseg3e8_v_u8m2x3_mu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg3e8_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg3e8\.[ivxfswum.]+\s+} 40 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg3e8ff.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg3e8ff.c index f09f364f1..bb36cf5ce 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg3e8ff.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg3e8ff.c @@ -6,164 +6,164 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_tu(vint8mf8x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_tu(vd, rs1, new_vl, vl); } -vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_tu(vint8mf4x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_tu(vd, rs1, new_vl, vl); } -vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_tu(vint8mf2x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_tu(vd, rs1, new_vl, vl); } -vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_tu(vint8m1x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_tu(vd, rs1, new_vl, vl); } -vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_tu(vint8m2x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_tu(vd, rs1, new_vl, vl); } -vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_tu(vuint8mf8x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_tu(vd, rs1, new_vl, vl); } -vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_tu(vuint8mf4x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_tu(vd, rs1, new_vl, vl); } -vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_tu(vuint8mf2x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_tu(vd, rs1, new_vl, vl); } -vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_tu(vuint8m1x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_tu(vd, rs1, new_vl, vl); } -vuint8m2x3_t test_vlseg3e8ff_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint8m2x3_t test_vlseg3e8ff_v_u8m2x3_tu(vuint8m2x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_tu(vd, rs1, new_vl, vl); } -vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_tum(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_tum(vm, vd, rs1, new_vl, vl); } -vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_tum(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_tum(vm, vd, rs1, new_vl, vl); } -vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_tum(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_tum(vm, vd, rs1, new_vl, vl); } -vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_tum(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_tum(vm, vd, rs1, new_vl, vl); } -vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_tum(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_tum(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_tum(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_tum(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_tum(vm, vd, rs1, new_vl, vl); } -vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_tum(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_tum(vm, vd, rs1, new_vl, vl); } -vuint8m2x3_t test_vlseg3e8ff_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m2x3_t test_vlseg3e8ff_v_u8m2x3_tum(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_tum(vm, vd, rs1, new_vl, vl); } -vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_tumu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_tumu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_tumu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_tumu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_tumu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_tumu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_tumu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_tumu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_tumu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint8m2x3_t test_vlseg3e8ff_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m2x3_t test_vlseg3e8ff_v_u8m2x3_tumu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_mu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_mu(vm, vd, rs1, new_vl, vl); } -vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_mu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_mu(vm, vd, rs1, new_vl, vl); } -vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_mu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_mu(vm, vd, rs1, new_vl, vl); } -vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_mu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_mu(vm, vd, rs1, new_vl, vl); } -vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_mu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_mu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_mu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_mu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_mu(vm, vd, rs1, new_vl, vl); } -vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_mu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_mu(vm, vd, rs1, new_vl, vl); } -vuint8m2x3_t test_vlseg3e8ff_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg3e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m2x3_t test_vlseg3e8ff_v_u8m2x3_mu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg3e8ff_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg3e8ff\.[ivxfswum.]+\s+} 40 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg4e16.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg4e16.c index cc26c7877..8a3171710 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg4e16.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg4e16.c @@ -6,196 +6,196 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x4_t test_vlseg4e16_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg4e16_tu(maskedoff_tuple, base, vl); +vfloat16mf4x4_t test_vlseg4e16_v_f16mf4x4_tu(vfloat16mf4x4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_tu(vd, rs1, vl); } -vfloat16mf2x4_t test_vlseg4e16_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg4e16_tu(maskedoff_tuple, base, vl); +vfloat16mf2x4_t test_vlseg4e16_v_f16mf2x4_tu(vfloat16mf2x4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_tu(vd, rs1, vl); } -vfloat16m1x4_t test_vlseg4e16_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg4e16_tu(maskedoff_tuple, base, vl); +vfloat16m1x4_t test_vlseg4e16_v_f16m1x4_tu(vfloat16m1x4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_tu(vd, rs1, vl); } -vfloat16m2x4_t test_vlseg4e16_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg4e16_tu(maskedoff_tuple, base, vl); +vfloat16m2x4_t test_vlseg4e16_v_f16m2x4_tu(vfloat16m2x4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_tu(vd, rs1, vl); } -vint16mf4x4_t test_vlseg4e16_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg4e16_tu(maskedoff_tuple, base, vl); +vint16mf4x4_t test_vlseg4e16_v_i16mf4x4_tu(vint16mf4x4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_tu(vd, rs1, vl); } -vint16mf2x4_t test_vlseg4e16_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg4e16_tu(maskedoff_tuple, base, vl); +vint16mf2x4_t test_vlseg4e16_v_i16mf2x4_tu(vint16mf2x4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_tu(vd, rs1, vl); } -vint16m1x4_t test_vlseg4e16_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg4e16_tu(maskedoff_tuple, base, vl); +vint16m1x4_t test_vlseg4e16_v_i16m1x4_tu(vint16m1x4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_tu(vd, rs1, vl); } -vint16m2x4_t test_vlseg4e16_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg4e16_tu(maskedoff_tuple, base, vl); +vint16m2x4_t test_vlseg4e16_v_i16m2x4_tu(vint16m2x4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_tu(vd, rs1, vl); } -vuint16mf4x4_t test_vlseg4e16_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg4e16_tu(maskedoff_tuple, base, vl); +vuint16mf4x4_t test_vlseg4e16_v_u16mf4x4_tu(vuint16mf4x4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_tu(vd, rs1, vl); } -vuint16mf2x4_t test_vlseg4e16_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg4e16_tu(maskedoff_tuple, base, vl); +vuint16mf2x4_t test_vlseg4e16_v_u16mf2x4_tu(vuint16mf2x4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_tu(vd, rs1, vl); } -vuint16m1x4_t test_vlseg4e16_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg4e16_tu(maskedoff_tuple, base, vl); +vuint16m1x4_t test_vlseg4e16_v_u16m1x4_tu(vuint16m1x4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_tu(vd, rs1, vl); } -vuint16m2x4_t test_vlseg4e16_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg4e16_tu(maskedoff_tuple, base, vl); +vuint16m2x4_t test_vlseg4e16_v_u16m2x4_tu(vuint16m2x4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_tu(vd, rs1, vl); } -vfloat16mf4x4_t test_vlseg4e16_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg4e16_tum(mask, maskedoff_tuple, base, vl); +vfloat16mf4x4_t test_vlseg4e16_v_f16mf4x4_tum(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_tum(vm, vd, rs1, vl); } -vfloat16mf2x4_t test_vlseg4e16_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg4e16_tum(mask, maskedoff_tuple, base, vl); +vfloat16mf2x4_t test_vlseg4e16_v_f16mf2x4_tum(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_tum(vm, vd, rs1, vl); } -vfloat16m1x4_t test_vlseg4e16_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg4e16_tum(mask, maskedoff_tuple, base, vl); +vfloat16m1x4_t test_vlseg4e16_v_f16m1x4_tum(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_tum(vm, vd, rs1, vl); } -vfloat16m2x4_t test_vlseg4e16_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg4e16_tum(mask, maskedoff_tuple, base, vl); +vfloat16m2x4_t test_vlseg4e16_v_f16m2x4_tum(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_tum(vm, vd, rs1, vl); } -vint16mf4x4_t test_vlseg4e16_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg4e16_tum(mask, maskedoff_tuple, base, vl); +vint16mf4x4_t test_vlseg4e16_v_i16mf4x4_tum(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_tum(vm, vd, rs1, vl); } -vint16mf2x4_t test_vlseg4e16_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg4e16_tum(mask, maskedoff_tuple, base, vl); +vint16mf2x4_t test_vlseg4e16_v_i16mf2x4_tum(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_tum(vm, vd, rs1, vl); } -vint16m1x4_t test_vlseg4e16_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg4e16_tum(mask, maskedoff_tuple, base, vl); +vint16m1x4_t test_vlseg4e16_v_i16m1x4_tum(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_tum(vm, vd, rs1, vl); } -vint16m2x4_t test_vlseg4e16_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg4e16_tum(mask, maskedoff_tuple, base, vl); +vint16m2x4_t test_vlseg4e16_v_i16m2x4_tum(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_tum(vm, vd, rs1, vl); } -vuint16mf4x4_t test_vlseg4e16_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg4e16_tum(mask, maskedoff_tuple, base, vl); +vuint16mf4x4_t test_vlseg4e16_v_u16mf4x4_tum(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_tum(vm, vd, rs1, vl); } -vuint16mf2x4_t test_vlseg4e16_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg4e16_tum(mask, maskedoff_tuple, base, vl); +vuint16mf2x4_t test_vlseg4e16_v_u16mf2x4_tum(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_tum(vm, vd, rs1, vl); } -vuint16m1x4_t test_vlseg4e16_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg4e16_tum(mask, maskedoff_tuple, base, vl); +vuint16m1x4_t test_vlseg4e16_v_u16m1x4_tum(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_tum(vm, vd, rs1, vl); } -vuint16m2x4_t test_vlseg4e16_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg4e16_tum(mask, maskedoff_tuple, base, vl); +vuint16m2x4_t test_vlseg4e16_v_u16m2x4_tum(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_tum(vm, vd, rs1, vl); } -vfloat16mf4x4_t test_vlseg4e16_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg4e16_tumu(mask, maskedoff_tuple, base, vl); +vfloat16mf4x4_t test_vlseg4e16_v_f16mf4x4_tumu(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_tumu(vm, vd, rs1, vl); } -vfloat16mf2x4_t test_vlseg4e16_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg4e16_tumu(mask, maskedoff_tuple, base, vl); +vfloat16mf2x4_t test_vlseg4e16_v_f16mf2x4_tumu(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_tumu(vm, vd, rs1, vl); } -vfloat16m1x4_t test_vlseg4e16_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg4e16_tumu(mask, maskedoff_tuple, base, vl); +vfloat16m1x4_t test_vlseg4e16_v_f16m1x4_tumu(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_tumu(vm, vd, rs1, vl); } -vfloat16m2x4_t test_vlseg4e16_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg4e16_tumu(mask, maskedoff_tuple, base, vl); +vfloat16m2x4_t test_vlseg4e16_v_f16m2x4_tumu(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_tumu(vm, vd, rs1, vl); } -vint16mf4x4_t test_vlseg4e16_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg4e16_tumu(mask, maskedoff_tuple, base, vl); +vint16mf4x4_t test_vlseg4e16_v_i16mf4x4_tumu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_tumu(vm, vd, rs1, vl); } -vint16mf2x4_t test_vlseg4e16_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg4e16_tumu(mask, maskedoff_tuple, base, vl); +vint16mf2x4_t test_vlseg4e16_v_i16mf2x4_tumu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_tumu(vm, vd, rs1, vl); } -vint16m1x4_t test_vlseg4e16_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg4e16_tumu(mask, maskedoff_tuple, base, vl); +vint16m1x4_t test_vlseg4e16_v_i16m1x4_tumu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_tumu(vm, vd, rs1, vl); } -vint16m2x4_t test_vlseg4e16_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg4e16_tumu(mask, maskedoff_tuple, base, vl); +vint16m2x4_t test_vlseg4e16_v_i16m2x4_tumu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_tumu(vm, vd, rs1, vl); } -vuint16mf4x4_t test_vlseg4e16_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg4e16_tumu(mask, maskedoff_tuple, base, vl); +vuint16mf4x4_t test_vlseg4e16_v_u16mf4x4_tumu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_tumu(vm, vd, rs1, vl); } -vuint16mf2x4_t test_vlseg4e16_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg4e16_tumu(mask, maskedoff_tuple, base, vl); +vuint16mf2x4_t test_vlseg4e16_v_u16mf2x4_tumu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_tumu(vm, vd, rs1, vl); } -vuint16m1x4_t test_vlseg4e16_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg4e16_tumu(mask, maskedoff_tuple, base, vl); +vuint16m1x4_t test_vlseg4e16_v_u16m1x4_tumu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_tumu(vm, vd, rs1, vl); } -vuint16m2x4_t test_vlseg4e16_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg4e16_tumu(mask, maskedoff_tuple, base, vl); +vuint16m2x4_t test_vlseg4e16_v_u16m2x4_tumu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_tumu(vm, vd, rs1, vl); } -vfloat16mf4x4_t test_vlseg4e16_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg4e16_mu(mask, maskedoff_tuple, base, vl); +vfloat16mf4x4_t test_vlseg4e16_v_f16mf4x4_mu(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_mu(vm, vd, rs1, vl); } -vfloat16mf2x4_t test_vlseg4e16_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg4e16_mu(mask, maskedoff_tuple, base, vl); +vfloat16mf2x4_t test_vlseg4e16_v_f16mf2x4_mu(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_mu(vm, vd, rs1, vl); } -vfloat16m1x4_t test_vlseg4e16_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg4e16_mu(mask, maskedoff_tuple, base, vl); +vfloat16m1x4_t test_vlseg4e16_v_f16m1x4_mu(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_mu(vm, vd, rs1, vl); } -vfloat16m2x4_t test_vlseg4e16_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg4e16_mu(mask, maskedoff_tuple, base, vl); +vfloat16m2x4_t test_vlseg4e16_v_f16m2x4_mu(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_mu(vm, vd, rs1, vl); } -vint16mf4x4_t test_vlseg4e16_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg4e16_mu(mask, maskedoff_tuple, base, vl); +vint16mf4x4_t test_vlseg4e16_v_i16mf4x4_mu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_mu(vm, vd, rs1, vl); } -vint16mf2x4_t test_vlseg4e16_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg4e16_mu(mask, maskedoff_tuple, base, vl); +vint16mf2x4_t test_vlseg4e16_v_i16mf2x4_mu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_mu(vm, vd, rs1, vl); } -vint16m1x4_t test_vlseg4e16_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg4e16_mu(mask, maskedoff_tuple, base, vl); +vint16m1x4_t test_vlseg4e16_v_i16m1x4_mu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_mu(vm, vd, rs1, vl); } -vint16m2x4_t test_vlseg4e16_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg4e16_mu(mask, maskedoff_tuple, base, vl); +vint16m2x4_t test_vlseg4e16_v_i16m2x4_mu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_mu(vm, vd, rs1, vl); } -vuint16mf4x4_t test_vlseg4e16_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg4e16_mu(mask, maskedoff_tuple, base, vl); +vuint16mf4x4_t test_vlseg4e16_v_u16mf4x4_mu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_mu(vm, vd, rs1, vl); } -vuint16mf2x4_t test_vlseg4e16_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg4e16_mu(mask, maskedoff_tuple, base, vl); +vuint16mf2x4_t test_vlseg4e16_v_u16mf2x4_mu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_mu(vm, vd, rs1, vl); } -vuint16m1x4_t test_vlseg4e16_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg4e16_mu(mask, maskedoff_tuple, base, vl); +vuint16m1x4_t test_vlseg4e16_v_u16m1x4_mu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_mu(vm, vd, rs1, vl); } -vuint16m2x4_t test_vlseg4e16_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg4e16_mu(mask, maskedoff_tuple, base, vl); +vuint16m2x4_t test_vlseg4e16_v_u16m2x4_mu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg4e16_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg4e16\.[ivxfswum.]+\s+} 48 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg4e16ff.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg4e16ff.c index 2f84a2dbd..64bc39652 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg4e16ff.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg4e16ff.c @@ -6,196 +6,196 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_tu(vfloat16mf4x4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tu(vd, rs1, new_vl, vl); } -vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_tu(vfloat16mf2x4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tu(vd, rs1, new_vl, vl); } -vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_tu(vfloat16m1x4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tu(vd, rs1, new_vl, vl); } -vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_tu(vfloat16m2x4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tu(vd, rs1, new_vl, vl); } -vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_tu(vint16mf4x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tu(vd, rs1, new_vl, vl); } -vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_tu(vint16mf2x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tu(vd, rs1, new_vl, vl); } -vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_tu(vint16m1x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tu(vd, rs1, new_vl, vl); } -vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_tu(vint16m2x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tu(vd, rs1, new_vl, vl); } -vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_tu(vuint16mf4x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tu(vd, rs1, new_vl, vl); } -vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_tu(vuint16mf2x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tu(vd, rs1, new_vl, vl); } -vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_tu(vuint16m1x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tu(vd, rs1, new_vl, vl); } -vuint16m2x4_t test_vlseg4e16ff_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint16m2x4_t test_vlseg4e16ff_v_u16m2x4_tu(vuint16m2x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tu(vd, rs1, new_vl, vl); } -vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_tum(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_tum(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_tum(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_tum(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tum(vm, vd, rs1, new_vl, vl); } -vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_tum(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tum(vm, vd, rs1, new_vl, vl); } -vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_tum(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tum(vm, vd, rs1, new_vl, vl); } -vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_tum(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tum(vm, vd, rs1, new_vl, vl); } -vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_tum(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_tum(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_tum(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tum(vm, vd, rs1, new_vl, vl); } -vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_tum(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tum(vm, vd, rs1, new_vl, vl); } -vuint16m2x4_t test_vlseg4e16ff_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m2x4_t test_vlseg4e16ff_v_u16m2x4_tum(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_tumu(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_tumu(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_tumu(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_tumu(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_tumu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_tumu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_tumu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_tumu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_tumu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_tumu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_tumu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint16m2x4_t test_vlseg4e16ff_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m2x4_t test_vlseg4e16ff_v_u16m2x4_tumu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_mu(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_mu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_mu(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_mu(vm, vd, rs1, new_vl, vl); } -vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_mu(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_mu(vm, vd, rs1, new_vl, vl); } -vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_mu(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_mu(vm, vd, rs1, new_vl, vl); } -vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_mu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_mu(vm, vd, rs1, new_vl, vl); } -vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_mu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_mu(vm, vd, rs1, new_vl, vl); } -vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_mu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_mu(vm, vd, rs1, new_vl, vl); } -vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_mu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_mu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_mu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_mu(vm, vd, rs1, new_vl, vl); } -vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_mu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_mu(vm, vd, rs1, new_vl, vl); } -vuint16m2x4_t test_vlseg4e16ff_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m2x4_t test_vlseg4e16ff_v_u16m2x4_mu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg4e16ff\.[ivxfswum.]+\s+} 48 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg4e32.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg4e32.c index 0a0089b1d..13945de84 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg4e32.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg4e32.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x4_t test_vlseg4e32_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg4e32_tu(maskedoff_tuple, base, vl); +vfloat32mf2x4_t test_vlseg4e32_v_f32mf2x4_tu(vfloat32mf2x4_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_tu(vd, rs1, vl); } -vfloat32m1x4_t test_vlseg4e32_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg4e32_tu(maskedoff_tuple, base, vl); +vfloat32m1x4_t test_vlseg4e32_v_f32m1x4_tu(vfloat32m1x4_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_tu(vd, rs1, vl); } -vfloat32m2x4_t test_vlseg4e32_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg4e32_tu(maskedoff_tuple, base, vl); +vfloat32m2x4_t test_vlseg4e32_v_f32m2x4_tu(vfloat32m2x4_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_tu(vd, rs1, vl); } -vint32mf2x4_t test_vlseg4e32_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg4e32_tu(maskedoff_tuple, base, vl); +vint32mf2x4_t test_vlseg4e32_v_i32mf2x4_tu(vint32mf2x4_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_tu(vd, rs1, vl); } -vint32m1x4_t test_vlseg4e32_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg4e32_tu(maskedoff_tuple, base, vl); +vint32m1x4_t test_vlseg4e32_v_i32m1x4_tu(vint32m1x4_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_tu(vd, rs1, vl); } -vint32m2x4_t test_vlseg4e32_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg4e32_tu(maskedoff_tuple, base, vl); +vint32m2x4_t test_vlseg4e32_v_i32m2x4_tu(vint32m2x4_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_tu(vd, rs1, vl); } -vuint32mf2x4_t test_vlseg4e32_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg4e32_tu(maskedoff_tuple, base, vl); +vuint32mf2x4_t test_vlseg4e32_v_u32mf2x4_tu(vuint32mf2x4_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_tu(vd, rs1, vl); } -vuint32m1x4_t test_vlseg4e32_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg4e32_tu(maskedoff_tuple, base, vl); +vuint32m1x4_t test_vlseg4e32_v_u32m1x4_tu(vuint32m1x4_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_tu(vd, rs1, vl); } -vuint32m2x4_t test_vlseg4e32_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg4e32_tu(maskedoff_tuple, base, vl); +vuint32m2x4_t test_vlseg4e32_v_u32m2x4_tu(vuint32m2x4_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_tu(vd, rs1, vl); } -vfloat32mf2x4_t test_vlseg4e32_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg4e32_tum(mask, maskedoff_tuple, base, vl); +vfloat32mf2x4_t test_vlseg4e32_v_f32mf2x4_tum(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_tum(vm, vd, rs1, vl); } -vfloat32m1x4_t test_vlseg4e32_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg4e32_tum(mask, maskedoff_tuple, base, vl); +vfloat32m1x4_t test_vlseg4e32_v_f32m1x4_tum(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_tum(vm, vd, rs1, vl); } -vfloat32m2x4_t test_vlseg4e32_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg4e32_tum(mask, maskedoff_tuple, base, vl); +vfloat32m2x4_t test_vlseg4e32_v_f32m2x4_tum(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_tum(vm, vd, rs1, vl); } -vint32mf2x4_t test_vlseg4e32_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg4e32_tum(mask, maskedoff_tuple, base, vl); +vint32mf2x4_t test_vlseg4e32_v_i32mf2x4_tum(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_tum(vm, vd, rs1, vl); } -vint32m1x4_t test_vlseg4e32_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg4e32_tum(mask, maskedoff_tuple, base, vl); +vint32m1x4_t test_vlseg4e32_v_i32m1x4_tum(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_tum(vm, vd, rs1, vl); } -vint32m2x4_t test_vlseg4e32_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg4e32_tum(mask, maskedoff_tuple, base, vl); +vint32m2x4_t test_vlseg4e32_v_i32m2x4_tum(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_tum(vm, vd, rs1, vl); } -vuint32mf2x4_t test_vlseg4e32_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg4e32_tum(mask, maskedoff_tuple, base, vl); +vuint32mf2x4_t test_vlseg4e32_v_u32mf2x4_tum(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_tum(vm, vd, rs1, vl); } -vuint32m1x4_t test_vlseg4e32_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg4e32_tum(mask, maskedoff_tuple, base, vl); +vuint32m1x4_t test_vlseg4e32_v_u32m1x4_tum(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_tum(vm, vd, rs1, vl); } -vuint32m2x4_t test_vlseg4e32_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg4e32_tum(mask, maskedoff_tuple, base, vl); +vuint32m2x4_t test_vlseg4e32_v_u32m2x4_tum(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_tum(vm, vd, rs1, vl); } -vfloat32mf2x4_t test_vlseg4e32_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg4e32_tumu(mask, maskedoff_tuple, base, vl); +vfloat32mf2x4_t test_vlseg4e32_v_f32mf2x4_tumu(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_tumu(vm, vd, rs1, vl); } -vfloat32m1x4_t test_vlseg4e32_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg4e32_tumu(mask, maskedoff_tuple, base, vl); +vfloat32m1x4_t test_vlseg4e32_v_f32m1x4_tumu(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_tumu(vm, vd, rs1, vl); } -vfloat32m2x4_t test_vlseg4e32_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg4e32_tumu(mask, maskedoff_tuple, base, vl); +vfloat32m2x4_t test_vlseg4e32_v_f32m2x4_tumu(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_tumu(vm, vd, rs1, vl); } -vint32mf2x4_t test_vlseg4e32_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg4e32_tumu(mask, maskedoff_tuple, base, vl); +vint32mf2x4_t test_vlseg4e32_v_i32mf2x4_tumu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_tumu(vm, vd, rs1, vl); } -vint32m1x4_t test_vlseg4e32_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg4e32_tumu(mask, maskedoff_tuple, base, vl); +vint32m1x4_t test_vlseg4e32_v_i32m1x4_tumu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_tumu(vm, vd, rs1, vl); } -vint32m2x4_t test_vlseg4e32_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg4e32_tumu(mask, maskedoff_tuple, base, vl); +vint32m2x4_t test_vlseg4e32_v_i32m2x4_tumu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_tumu(vm, vd, rs1, vl); } -vuint32mf2x4_t test_vlseg4e32_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg4e32_tumu(mask, maskedoff_tuple, base, vl); +vuint32mf2x4_t test_vlseg4e32_v_u32mf2x4_tumu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_tumu(vm, vd, rs1, vl); } -vuint32m1x4_t test_vlseg4e32_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg4e32_tumu(mask, maskedoff_tuple, base, vl); +vuint32m1x4_t test_vlseg4e32_v_u32m1x4_tumu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_tumu(vm, vd, rs1, vl); } -vuint32m2x4_t test_vlseg4e32_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg4e32_tumu(mask, maskedoff_tuple, base, vl); +vuint32m2x4_t test_vlseg4e32_v_u32m2x4_tumu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_tumu(vm, vd, rs1, vl); } -vfloat32mf2x4_t test_vlseg4e32_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg4e32_mu(mask, maskedoff_tuple, base, vl); +vfloat32mf2x4_t test_vlseg4e32_v_f32mf2x4_mu(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_mu(vm, vd, rs1, vl); } -vfloat32m1x4_t test_vlseg4e32_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg4e32_mu(mask, maskedoff_tuple, base, vl); +vfloat32m1x4_t test_vlseg4e32_v_f32m1x4_mu(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_mu(vm, vd, rs1, vl); } -vfloat32m2x4_t test_vlseg4e32_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg4e32_mu(mask, maskedoff_tuple, base, vl); +vfloat32m2x4_t test_vlseg4e32_v_f32m2x4_mu(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_mu(vm, vd, rs1, vl); } -vint32mf2x4_t test_vlseg4e32_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg4e32_mu(mask, maskedoff_tuple, base, vl); +vint32mf2x4_t test_vlseg4e32_v_i32mf2x4_mu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_mu(vm, vd, rs1, vl); } -vint32m1x4_t test_vlseg4e32_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg4e32_mu(mask, maskedoff_tuple, base, vl); +vint32m1x4_t test_vlseg4e32_v_i32m1x4_mu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_mu(vm, vd, rs1, vl); } -vint32m2x4_t test_vlseg4e32_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg4e32_mu(mask, maskedoff_tuple, base, vl); +vint32m2x4_t test_vlseg4e32_v_i32m2x4_mu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_mu(vm, vd, rs1, vl); } -vuint32mf2x4_t test_vlseg4e32_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg4e32_mu(mask, maskedoff_tuple, base, vl); +vuint32mf2x4_t test_vlseg4e32_v_u32mf2x4_mu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_mu(vm, vd, rs1, vl); } -vuint32m1x4_t test_vlseg4e32_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg4e32_mu(mask, maskedoff_tuple, base, vl); +vuint32m1x4_t test_vlseg4e32_v_u32m1x4_mu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_mu(vm, vd, rs1, vl); } -vuint32m2x4_t test_vlseg4e32_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg4e32_mu(mask, maskedoff_tuple, base, vl); +vuint32m2x4_t test_vlseg4e32_v_u32m2x4_mu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg4e32_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg4e32\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg4e32ff.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg4e32ff.c index 78e844c99..3b2da8eda 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg4e32ff.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg4e32ff.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_tu(vfloat32mf2x4_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_tu(vd, rs1, new_vl, vl); } -vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_tu(vfloat32m1x4_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_tu(vd, rs1, new_vl, vl); } -vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_tu(vfloat32m2x4_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_tu(vd, rs1, new_vl, vl); } -vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_tu(vint32mf2x4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_tu(vd, rs1, new_vl, vl); } -vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_tu(vint32m1x4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_tu(vd, rs1, new_vl, vl); } -vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_tu(vint32m2x4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_tu(vd, rs1, new_vl, vl); } -vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_tu(vuint32mf2x4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_tu(vd, rs1, new_vl, vl); } -vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_tu(vuint32m1x4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_tu(vd, rs1, new_vl, vl); } -vuint32m2x4_t test_vlseg4e32ff_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint32m2x4_t test_vlseg4e32ff_v_u32m2x4_tu(vuint32m2x4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_tu(vd, rs1, new_vl, vl); } -vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_tum(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_tum(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_tum(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_tum(vm, vd, rs1, new_vl, vl); } -vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_tum(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_tum(vm, vd, rs1, new_vl, vl); } -vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_tum(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_tum(vm, vd, rs1, new_vl, vl); } -vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_tum(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_tum(vm, vd, rs1, new_vl, vl); } -vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_tum(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_tum(vm, vd, rs1, new_vl, vl); } -vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_tum(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_tum(vm, vd, rs1, new_vl, vl); } -vuint32m2x4_t test_vlseg4e32ff_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m2x4_t test_vlseg4e32ff_v_u32m2x4_tum(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_tumu(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_tumu(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_tumu(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_tumu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_tumu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_tumu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_tumu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_tumu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint32m2x4_t test_vlseg4e32ff_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m2x4_t test_vlseg4e32ff_v_u32m2x4_tumu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_mu(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_mu(vm, vd, rs1, new_vl, vl); } -vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_mu(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_mu(vm, vd, rs1, new_vl, vl); } -vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_mu(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_mu(vm, vd, rs1, new_vl, vl); } -vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_mu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_mu(vm, vd, rs1, new_vl, vl); } -vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_mu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_mu(vm, vd, rs1, new_vl, vl); } -vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_mu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_mu(vm, vd, rs1, new_vl, vl); } -vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_mu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_mu(vm, vd, rs1, new_vl, vl); } -vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_mu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_mu(vm, vd, rs1, new_vl, vl); } -vuint32m2x4_t test_vlseg4e32ff_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m2x4_t test_vlseg4e32ff_v_u32m2x4_mu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e32ff_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg4e32ff\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg4e64.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg4e64.c index 51b28aff9..8edb900d3 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg4e64.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg4e64.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x4_t test_vlseg4e64_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg4e64_tu(maskedoff_tuple, base, vl); +vfloat64m1x4_t test_vlseg4e64_v_f64m1x4_tu(vfloat64m1x4_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_tu(vd, rs1, vl); } -vfloat64m2x4_t test_vlseg4e64_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg4e64_tu(maskedoff_tuple, base, vl); +vfloat64m2x4_t test_vlseg4e64_v_f64m2x4_tu(vfloat64m2x4_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_tu(vd, rs1, vl); } -vint64m1x4_t test_vlseg4e64_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg4e64_tu(maskedoff_tuple, base, vl); +vint64m1x4_t test_vlseg4e64_v_i64m1x4_tu(vint64m1x4_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_tu(vd, rs1, vl); } -vint64m2x4_t test_vlseg4e64_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg4e64_tu(maskedoff_tuple, base, vl); +vint64m2x4_t test_vlseg4e64_v_i64m2x4_tu(vint64m2x4_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_tu(vd, rs1, vl); } -vuint64m1x4_t test_vlseg4e64_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg4e64_tu(maskedoff_tuple, base, vl); +vuint64m1x4_t test_vlseg4e64_v_u64m1x4_tu(vuint64m1x4_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_tu(vd, rs1, vl); } -vuint64m2x4_t test_vlseg4e64_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg4e64_tu(maskedoff_tuple, base, vl); +vuint64m2x4_t test_vlseg4e64_v_u64m2x4_tu(vuint64m2x4_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_tu(vd, rs1, vl); } -vfloat64m1x4_t test_vlseg4e64_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg4e64_tum(mask, maskedoff_tuple, base, vl); +vfloat64m1x4_t test_vlseg4e64_v_f64m1x4_tum(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_tum(vm, vd, rs1, vl); } -vfloat64m2x4_t test_vlseg4e64_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg4e64_tum(mask, maskedoff_tuple, base, vl); +vfloat64m2x4_t test_vlseg4e64_v_f64m2x4_tum(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_tum(vm, vd, rs1, vl); } -vint64m1x4_t test_vlseg4e64_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg4e64_tum(mask, maskedoff_tuple, base, vl); +vint64m1x4_t test_vlseg4e64_v_i64m1x4_tum(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_tum(vm, vd, rs1, vl); } -vint64m2x4_t test_vlseg4e64_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg4e64_tum(mask, maskedoff_tuple, base, vl); +vint64m2x4_t test_vlseg4e64_v_i64m2x4_tum(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_tum(vm, vd, rs1, vl); } -vuint64m1x4_t test_vlseg4e64_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg4e64_tum(mask, maskedoff_tuple, base, vl); +vuint64m1x4_t test_vlseg4e64_v_u64m1x4_tum(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_tum(vm, vd, rs1, vl); } -vuint64m2x4_t test_vlseg4e64_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg4e64_tum(mask, maskedoff_tuple, base, vl); +vuint64m2x4_t test_vlseg4e64_v_u64m2x4_tum(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_tum(vm, vd, rs1, vl); } -vfloat64m1x4_t test_vlseg4e64_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg4e64_tumu(mask, maskedoff_tuple, base, vl); +vfloat64m1x4_t test_vlseg4e64_v_f64m1x4_tumu(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_tumu(vm, vd, rs1, vl); } -vfloat64m2x4_t test_vlseg4e64_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg4e64_tumu(mask, maskedoff_tuple, base, vl); +vfloat64m2x4_t test_vlseg4e64_v_f64m2x4_tumu(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_tumu(vm, vd, rs1, vl); } -vint64m1x4_t test_vlseg4e64_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg4e64_tumu(mask, maskedoff_tuple, base, vl); +vint64m1x4_t test_vlseg4e64_v_i64m1x4_tumu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_tumu(vm, vd, rs1, vl); } -vint64m2x4_t test_vlseg4e64_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg4e64_tumu(mask, maskedoff_tuple, base, vl); +vint64m2x4_t test_vlseg4e64_v_i64m2x4_tumu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_tumu(vm, vd, rs1, vl); } -vuint64m1x4_t test_vlseg4e64_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg4e64_tumu(mask, maskedoff_tuple, base, vl); +vuint64m1x4_t test_vlseg4e64_v_u64m1x4_tumu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_tumu(vm, vd, rs1, vl); } -vuint64m2x4_t test_vlseg4e64_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg4e64_tumu(mask, maskedoff_tuple, base, vl); +vuint64m2x4_t test_vlseg4e64_v_u64m2x4_tumu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_tumu(vm, vd, rs1, vl); } -vfloat64m1x4_t test_vlseg4e64_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg4e64_mu(mask, maskedoff_tuple, base, vl); +vfloat64m1x4_t test_vlseg4e64_v_f64m1x4_mu(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_mu(vm, vd, rs1, vl); } -vfloat64m2x4_t test_vlseg4e64_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg4e64_mu(mask, maskedoff_tuple, base, vl); +vfloat64m2x4_t test_vlseg4e64_v_f64m2x4_mu(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_mu(vm, vd, rs1, vl); } -vint64m1x4_t test_vlseg4e64_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg4e64_mu(mask, maskedoff_tuple, base, vl); +vint64m1x4_t test_vlseg4e64_v_i64m1x4_mu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_mu(vm, vd, rs1, vl); } -vint64m2x4_t test_vlseg4e64_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg4e64_mu(mask, maskedoff_tuple, base, vl); +vint64m2x4_t test_vlseg4e64_v_i64m2x4_mu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_mu(vm, vd, rs1, vl); } -vuint64m1x4_t test_vlseg4e64_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg4e64_mu(mask, maskedoff_tuple, base, vl); +vuint64m1x4_t test_vlseg4e64_v_u64m1x4_mu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_mu(vm, vd, rs1, vl); } -vuint64m2x4_t test_vlseg4e64_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg4e64_mu(mask, maskedoff_tuple, base, vl); +vuint64m2x4_t test_vlseg4e64_v_u64m2x4_mu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg4e64_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg4e64\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg4e64ff.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg4e64ff.c index e04ba702b..935db6e27 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg4e64ff.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg4e64ff.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_tu(vfloat64m1x4_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_tu(vd, rs1, new_vl, vl); } -vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_tu(vfloat64m2x4_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_tu(vd, rs1, new_vl, vl); } -vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_tu(maskedoff_tuple, base, new_vl, vl); +vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_tu(vint64m1x4_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_tu(vd, rs1, new_vl, vl); } -vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_tu(maskedoff_tuple, base, new_vl, vl); +vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_tu(vint64m2x4_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_tu(vd, rs1, new_vl, vl); } -vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_tu(vuint64m1x4_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_tu(vd, rs1, new_vl, vl); } -vuint64m2x4_t test_vlseg4e64ff_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint64m2x4_t test_vlseg4e64ff_v_u64m2x4_tu(vuint64m2x4_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_tu(vd, rs1, new_vl, vl); } -vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_tum(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_tum(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_tum(vm, vd, rs1, new_vl, vl); } -vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_tum(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_tum(vm, vd, rs1, new_vl, vl); } -vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_tum(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_tum(vm, vd, rs1, new_vl, vl); } -vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_tum(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_tum(vm, vd, rs1, new_vl, vl); } -vuint64m2x4_t test_vlseg4e64ff_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m2x4_t test_vlseg4e64ff_v_u64m2x4_tum(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_tumu(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_tumu(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_tumu(vm, vd, rs1, new_vl, vl); } -vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_tumu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_tumu(vm, vd, rs1, new_vl, vl); } -vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_tumu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_tumu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint64m2x4_t test_vlseg4e64ff_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m2x4_t test_vlseg4e64ff_v_u64m2x4_tumu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_mu(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_mu(vm, vd, rs1, new_vl, vl); } -vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_mu(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_mu(vm, vd, rs1, new_vl, vl); } -vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_mu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_mu(vm, vd, rs1, new_vl, vl); } -vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_mu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_mu(vm, vd, rs1, new_vl, vl); } -vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_mu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_mu(vm, vd, rs1, new_vl, vl); } -vuint64m2x4_t test_vlseg4e64ff_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e64ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m2x4_t test_vlseg4e64ff_v_u64m2x4_mu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e64ff_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg4e64ff\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg4e8.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg4e8.c index eb9cf9378..47f7f9fca 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg4e8.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg4e8.c @@ -6,164 +6,164 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x4_t test_vlseg4e8_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_tu(maskedoff_tuple, base, vl); +vint8mf8x4_t test_vlseg4e8_v_i8mf8x4_tu(vint8mf8x4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_tu(vd, rs1, vl); } -vint8mf4x4_t test_vlseg4e8_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_tu(maskedoff_tuple, base, vl); +vint8mf4x4_t test_vlseg4e8_v_i8mf4x4_tu(vint8mf4x4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_tu(vd, rs1, vl); } -vint8mf2x4_t test_vlseg4e8_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_tu(maskedoff_tuple, base, vl); +vint8mf2x4_t test_vlseg4e8_v_i8mf2x4_tu(vint8mf2x4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_tu(vd, rs1, vl); } -vint8m1x4_t test_vlseg4e8_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_tu(maskedoff_tuple, base, vl); +vint8m1x4_t test_vlseg4e8_v_i8m1x4_tu(vint8m1x4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_tu(vd, rs1, vl); } -vint8m2x4_t test_vlseg4e8_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_tu(maskedoff_tuple, base, vl); +vint8m2x4_t test_vlseg4e8_v_i8m2x4_tu(vint8m2x4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_tu(vd, rs1, vl); } -vuint8mf8x4_t test_vlseg4e8_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_tu(maskedoff_tuple, base, vl); +vuint8mf8x4_t test_vlseg4e8_v_u8mf8x4_tu(vuint8mf8x4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_tu(vd, rs1, vl); } -vuint8mf4x4_t test_vlseg4e8_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_tu(maskedoff_tuple, base, vl); +vuint8mf4x4_t test_vlseg4e8_v_u8mf4x4_tu(vuint8mf4x4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_tu(vd, rs1, vl); } -vuint8mf2x4_t test_vlseg4e8_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_tu(maskedoff_tuple, base, vl); +vuint8mf2x4_t test_vlseg4e8_v_u8mf2x4_tu(vuint8mf2x4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_tu(vd, rs1, vl); } -vuint8m1x4_t test_vlseg4e8_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_tu(maskedoff_tuple, base, vl); +vuint8m1x4_t test_vlseg4e8_v_u8m1x4_tu(vuint8m1x4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_tu(vd, rs1, vl); } -vuint8m2x4_t test_vlseg4e8_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_tu(maskedoff_tuple, base, vl); +vuint8m2x4_t test_vlseg4e8_v_u8m2x4_tu(vuint8m2x4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_tu(vd, rs1, vl); } -vint8mf8x4_t test_vlseg4e8_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_tum(mask, maskedoff_tuple, base, vl); +vint8mf8x4_t test_vlseg4e8_v_i8mf8x4_tum(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_tum(vm, vd, rs1, vl); } -vint8mf4x4_t test_vlseg4e8_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_tum(mask, maskedoff_tuple, base, vl); +vint8mf4x4_t test_vlseg4e8_v_i8mf4x4_tum(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_tum(vm, vd, rs1, vl); } -vint8mf2x4_t test_vlseg4e8_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_tum(mask, maskedoff_tuple, base, vl); +vint8mf2x4_t test_vlseg4e8_v_i8mf2x4_tum(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_tum(vm, vd, rs1, vl); } -vint8m1x4_t test_vlseg4e8_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_tum(mask, maskedoff_tuple, base, vl); +vint8m1x4_t test_vlseg4e8_v_i8m1x4_tum(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_tum(vm, vd, rs1, vl); } -vint8m2x4_t test_vlseg4e8_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_tum(mask, maskedoff_tuple, base, vl); +vint8m2x4_t test_vlseg4e8_v_i8m2x4_tum(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_tum(vm, vd, rs1, vl); } -vuint8mf8x4_t test_vlseg4e8_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_tum(mask, maskedoff_tuple, base, vl); +vuint8mf8x4_t test_vlseg4e8_v_u8mf8x4_tum(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_tum(vm, vd, rs1, vl); } -vuint8mf4x4_t test_vlseg4e8_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_tum(mask, maskedoff_tuple, base, vl); +vuint8mf4x4_t test_vlseg4e8_v_u8mf4x4_tum(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_tum(vm, vd, rs1, vl); } -vuint8mf2x4_t test_vlseg4e8_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_tum(mask, maskedoff_tuple, base, vl); +vuint8mf2x4_t test_vlseg4e8_v_u8mf2x4_tum(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_tum(vm, vd, rs1, vl); } -vuint8m1x4_t test_vlseg4e8_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_tum(mask, maskedoff_tuple, base, vl); +vuint8m1x4_t test_vlseg4e8_v_u8m1x4_tum(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_tum(vm, vd, rs1, vl); } -vuint8m2x4_t test_vlseg4e8_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_tum(mask, maskedoff_tuple, base, vl); +vuint8m2x4_t test_vlseg4e8_v_u8m2x4_tum(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_tum(vm, vd, rs1, vl); } -vint8mf8x4_t test_vlseg4e8_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_tumu(mask, maskedoff_tuple, base, vl); +vint8mf8x4_t test_vlseg4e8_v_i8mf8x4_tumu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_tumu(vm, vd, rs1, vl); } -vint8mf4x4_t test_vlseg4e8_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_tumu(mask, maskedoff_tuple, base, vl); +vint8mf4x4_t test_vlseg4e8_v_i8mf4x4_tumu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_tumu(vm, vd, rs1, vl); } -vint8mf2x4_t test_vlseg4e8_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_tumu(mask, maskedoff_tuple, base, vl); +vint8mf2x4_t test_vlseg4e8_v_i8mf2x4_tumu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_tumu(vm, vd, rs1, vl); } -vint8m1x4_t test_vlseg4e8_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_tumu(mask, maskedoff_tuple, base, vl); +vint8m1x4_t test_vlseg4e8_v_i8m1x4_tumu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_tumu(vm, vd, rs1, vl); } -vint8m2x4_t test_vlseg4e8_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_tumu(mask, maskedoff_tuple, base, vl); +vint8m2x4_t test_vlseg4e8_v_i8m2x4_tumu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_tumu(vm, vd, rs1, vl); } -vuint8mf8x4_t test_vlseg4e8_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_tumu(mask, maskedoff_tuple, base, vl); +vuint8mf8x4_t test_vlseg4e8_v_u8mf8x4_tumu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_tumu(vm, vd, rs1, vl); } -vuint8mf4x4_t test_vlseg4e8_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_tumu(mask, maskedoff_tuple, base, vl); +vuint8mf4x4_t test_vlseg4e8_v_u8mf4x4_tumu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_tumu(vm, vd, rs1, vl); } -vuint8mf2x4_t test_vlseg4e8_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_tumu(mask, maskedoff_tuple, base, vl); +vuint8mf2x4_t test_vlseg4e8_v_u8mf2x4_tumu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_tumu(vm, vd, rs1, vl); } -vuint8m1x4_t test_vlseg4e8_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_tumu(mask, maskedoff_tuple, base, vl); +vuint8m1x4_t test_vlseg4e8_v_u8m1x4_tumu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_tumu(vm, vd, rs1, vl); } -vuint8m2x4_t test_vlseg4e8_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_tumu(mask, maskedoff_tuple, base, vl); +vuint8m2x4_t test_vlseg4e8_v_u8m2x4_tumu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_tumu(vm, vd, rs1, vl); } -vint8mf8x4_t test_vlseg4e8_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_mu(mask, maskedoff_tuple, base, vl); +vint8mf8x4_t test_vlseg4e8_v_i8mf8x4_mu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_mu(vm, vd, rs1, vl); } -vint8mf4x4_t test_vlseg4e8_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_mu(mask, maskedoff_tuple, base, vl); +vint8mf4x4_t test_vlseg4e8_v_i8mf4x4_mu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_mu(vm, vd, rs1, vl); } -vint8mf2x4_t test_vlseg4e8_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_mu(mask, maskedoff_tuple, base, vl); +vint8mf2x4_t test_vlseg4e8_v_i8mf2x4_mu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_mu(vm, vd, rs1, vl); } -vint8m1x4_t test_vlseg4e8_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_mu(mask, maskedoff_tuple, base, vl); +vint8m1x4_t test_vlseg4e8_v_i8m1x4_mu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_mu(vm, vd, rs1, vl); } -vint8m2x4_t test_vlseg4e8_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg4e8_mu(mask, maskedoff_tuple, base, vl); +vint8m2x4_t test_vlseg4e8_v_i8m2x4_mu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_mu(vm, vd, rs1, vl); } -vuint8mf8x4_t test_vlseg4e8_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_mu(mask, maskedoff_tuple, base, vl); +vuint8mf8x4_t test_vlseg4e8_v_u8mf8x4_mu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_mu(vm, vd, rs1, vl); } -vuint8mf4x4_t test_vlseg4e8_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_mu(mask, maskedoff_tuple, base, vl); +vuint8mf4x4_t test_vlseg4e8_v_u8mf4x4_mu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_mu(vm, vd, rs1, vl); } -vuint8mf2x4_t test_vlseg4e8_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_mu(mask, maskedoff_tuple, base, vl); +vuint8mf2x4_t test_vlseg4e8_v_u8mf2x4_mu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_mu(vm, vd, rs1, vl); } -vuint8m1x4_t test_vlseg4e8_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_mu(mask, maskedoff_tuple, base, vl); +vuint8m1x4_t test_vlseg4e8_v_u8m1x4_mu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_mu(vm, vd, rs1, vl); } -vuint8m2x4_t test_vlseg4e8_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg4e8_mu(mask, maskedoff_tuple, base, vl); +vuint8m2x4_t test_vlseg4e8_v_u8m2x4_mu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg4e8_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg4e8\.[ivxfswum.]+\s+} 40 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg4e8ff.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg4e8ff.c index 7b3e550d2..80319ccb3 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg4e8ff.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg4e8ff.c @@ -6,164 +6,164 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_tu(vint8mf8x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_tu(vd, rs1, new_vl, vl); } -vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_tu(vint8mf4x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_tu(vd, rs1, new_vl, vl); } -vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_tu(vint8mf2x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_tu(vd, rs1, new_vl, vl); } -vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_tu(vint8m1x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_tu(vd, rs1, new_vl, vl); } -vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_tu(vint8m2x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_tu(vd, rs1, new_vl, vl); } -vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_tu(vuint8mf8x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_tu(vd, rs1, new_vl, vl); } -vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_tu(vuint8mf4x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_tu(vd, rs1, new_vl, vl); } -vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_tu(vuint8mf2x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_tu(vd, rs1, new_vl, vl); } -vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_tu(vuint8m1x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_tu(vd, rs1, new_vl, vl); } -vuint8m2x4_t test_vlseg4e8ff_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint8m2x4_t test_vlseg4e8ff_v_u8m2x4_tu(vuint8m2x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_tu(vd, rs1, new_vl, vl); } -vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_tum(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_tum(vm, vd, rs1, new_vl, vl); } -vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_tum(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_tum(vm, vd, rs1, new_vl, vl); } -vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_tum(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_tum(vm, vd, rs1, new_vl, vl); } -vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_tum(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_tum(vm, vd, rs1, new_vl, vl); } -vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_tum(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_tum(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_tum(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_tum(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_tum(vm, vd, rs1, new_vl, vl); } -vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_tum(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_tum(vm, vd, rs1, new_vl, vl); } -vuint8m2x4_t test_vlseg4e8ff_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m2x4_t test_vlseg4e8ff_v_u8m2x4_tum(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_tum(vm, vd, rs1, new_vl, vl); } -vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_tumu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_tumu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_tumu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_tumu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_tumu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_tumu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_tumu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_tumu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_tumu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint8m2x4_t test_vlseg4e8ff_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m2x4_t test_vlseg4e8ff_v_u8m2x4_tumu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_mu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_mu(vm, vd, rs1, new_vl, vl); } -vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_mu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_mu(vm, vd, rs1, new_vl, vl); } -vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_mu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_mu(vm, vd, rs1, new_vl, vl); } -vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_mu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_mu(vm, vd, rs1, new_vl, vl); } -vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_mu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_mu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_mu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_mu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_mu(vm, vd, rs1, new_vl, vl); } -vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_mu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_mu(vm, vd, rs1, new_vl, vl); } -vuint8m2x4_t test_vlseg4e8ff_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg4e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m2x4_t test_vlseg4e8ff_v_u8m2x4_mu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg4e8ff_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg4e8ff\.[ivxfswum.]+\s+} 40 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg5e16.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg5e16.c index d73d12766..7e4f2f852 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg5e16.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg5e16.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x5_t test_vlseg5e16_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg5e16_tu(maskedoff_tuple, base, vl); +vfloat16mf4x5_t test_vlseg5e16_v_f16mf4x5_tu(vfloat16mf4x5_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_tu(vd, rs1, vl); } -vfloat16mf2x5_t test_vlseg5e16_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg5e16_tu(maskedoff_tuple, base, vl); +vfloat16mf2x5_t test_vlseg5e16_v_f16mf2x5_tu(vfloat16mf2x5_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_tu(vd, rs1, vl); } -vfloat16m1x5_t test_vlseg5e16_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg5e16_tu(maskedoff_tuple, base, vl); +vfloat16m1x5_t test_vlseg5e16_v_f16m1x5_tu(vfloat16m1x5_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_tu(vd, rs1, vl); } -vint16mf4x5_t test_vlseg5e16_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg5e16_tu(maskedoff_tuple, base, vl); +vint16mf4x5_t test_vlseg5e16_v_i16mf4x5_tu(vint16mf4x5_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_tu(vd, rs1, vl); } -vint16mf2x5_t test_vlseg5e16_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg5e16_tu(maskedoff_tuple, base, vl); +vint16mf2x5_t test_vlseg5e16_v_i16mf2x5_tu(vint16mf2x5_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_tu(vd, rs1, vl); } -vint16m1x5_t test_vlseg5e16_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg5e16_tu(maskedoff_tuple, base, vl); +vint16m1x5_t test_vlseg5e16_v_i16m1x5_tu(vint16m1x5_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_tu(vd, rs1, vl); } -vuint16mf4x5_t test_vlseg5e16_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg5e16_tu(maskedoff_tuple, base, vl); +vuint16mf4x5_t test_vlseg5e16_v_u16mf4x5_tu(vuint16mf4x5_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_tu(vd, rs1, vl); } -vuint16mf2x5_t test_vlseg5e16_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg5e16_tu(maskedoff_tuple, base, vl); +vuint16mf2x5_t test_vlseg5e16_v_u16mf2x5_tu(vuint16mf2x5_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_tu(vd, rs1, vl); } -vuint16m1x5_t test_vlseg5e16_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg5e16_tu(maskedoff_tuple, base, vl); +vuint16m1x5_t test_vlseg5e16_v_u16m1x5_tu(vuint16m1x5_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_tu(vd, rs1, vl); } -vfloat16mf4x5_t test_vlseg5e16_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg5e16_tum(mask, maskedoff_tuple, base, vl); +vfloat16mf4x5_t test_vlseg5e16_v_f16mf4x5_tum(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_tum(vm, vd, rs1, vl); } -vfloat16mf2x5_t test_vlseg5e16_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg5e16_tum(mask, maskedoff_tuple, base, vl); +vfloat16mf2x5_t test_vlseg5e16_v_f16mf2x5_tum(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_tum(vm, vd, rs1, vl); } -vfloat16m1x5_t test_vlseg5e16_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg5e16_tum(mask, maskedoff_tuple, base, vl); +vfloat16m1x5_t test_vlseg5e16_v_f16m1x5_tum(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_tum(vm, vd, rs1, vl); } -vint16mf4x5_t test_vlseg5e16_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg5e16_tum(mask, maskedoff_tuple, base, vl); +vint16mf4x5_t test_vlseg5e16_v_i16mf4x5_tum(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_tum(vm, vd, rs1, vl); } -vint16mf2x5_t test_vlseg5e16_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg5e16_tum(mask, maskedoff_tuple, base, vl); +vint16mf2x5_t test_vlseg5e16_v_i16mf2x5_tum(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_tum(vm, vd, rs1, vl); } -vint16m1x5_t test_vlseg5e16_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg5e16_tum(mask, maskedoff_tuple, base, vl); +vint16m1x5_t test_vlseg5e16_v_i16m1x5_tum(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_tum(vm, vd, rs1, vl); } -vuint16mf4x5_t test_vlseg5e16_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg5e16_tum(mask, maskedoff_tuple, base, vl); +vuint16mf4x5_t test_vlseg5e16_v_u16mf4x5_tum(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_tum(vm, vd, rs1, vl); } -vuint16mf2x5_t test_vlseg5e16_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg5e16_tum(mask, maskedoff_tuple, base, vl); +vuint16mf2x5_t test_vlseg5e16_v_u16mf2x5_tum(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_tum(vm, vd, rs1, vl); } -vuint16m1x5_t test_vlseg5e16_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg5e16_tum(mask, maskedoff_tuple, base, vl); +vuint16m1x5_t test_vlseg5e16_v_u16m1x5_tum(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_tum(vm, vd, rs1, vl); } -vfloat16mf4x5_t test_vlseg5e16_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg5e16_tumu(mask, maskedoff_tuple, base, vl); +vfloat16mf4x5_t test_vlseg5e16_v_f16mf4x5_tumu(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_tumu(vm, vd, rs1, vl); } -vfloat16mf2x5_t test_vlseg5e16_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg5e16_tumu(mask, maskedoff_tuple, base, vl); +vfloat16mf2x5_t test_vlseg5e16_v_f16mf2x5_tumu(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_tumu(vm, vd, rs1, vl); } -vfloat16m1x5_t test_vlseg5e16_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg5e16_tumu(mask, maskedoff_tuple, base, vl); +vfloat16m1x5_t test_vlseg5e16_v_f16m1x5_tumu(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_tumu(vm, vd, rs1, vl); } -vint16mf4x5_t test_vlseg5e16_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg5e16_tumu(mask, maskedoff_tuple, base, vl); +vint16mf4x5_t test_vlseg5e16_v_i16mf4x5_tumu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_tumu(vm, vd, rs1, vl); } -vint16mf2x5_t test_vlseg5e16_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg5e16_tumu(mask, maskedoff_tuple, base, vl); +vint16mf2x5_t test_vlseg5e16_v_i16mf2x5_tumu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_tumu(vm, vd, rs1, vl); } -vint16m1x5_t test_vlseg5e16_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg5e16_tumu(mask, maskedoff_tuple, base, vl); +vint16m1x5_t test_vlseg5e16_v_i16m1x5_tumu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_tumu(vm, vd, rs1, vl); } -vuint16mf4x5_t test_vlseg5e16_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg5e16_tumu(mask, maskedoff_tuple, base, vl); +vuint16mf4x5_t test_vlseg5e16_v_u16mf4x5_tumu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_tumu(vm, vd, rs1, vl); } -vuint16mf2x5_t test_vlseg5e16_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg5e16_tumu(mask, maskedoff_tuple, base, vl); +vuint16mf2x5_t test_vlseg5e16_v_u16mf2x5_tumu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_tumu(vm, vd, rs1, vl); } -vuint16m1x5_t test_vlseg5e16_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg5e16_tumu(mask, maskedoff_tuple, base, vl); +vuint16m1x5_t test_vlseg5e16_v_u16m1x5_tumu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_tumu(vm, vd, rs1, vl); } -vfloat16mf4x5_t test_vlseg5e16_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg5e16_mu(mask, maskedoff_tuple, base, vl); +vfloat16mf4x5_t test_vlseg5e16_v_f16mf4x5_mu(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_mu(vm, vd, rs1, vl); } -vfloat16mf2x5_t test_vlseg5e16_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg5e16_mu(mask, maskedoff_tuple, base, vl); +vfloat16mf2x5_t test_vlseg5e16_v_f16mf2x5_mu(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_mu(vm, vd, rs1, vl); } -vfloat16m1x5_t test_vlseg5e16_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg5e16_mu(mask, maskedoff_tuple, base, vl); +vfloat16m1x5_t test_vlseg5e16_v_f16m1x5_mu(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_mu(vm, vd, rs1, vl); } -vint16mf4x5_t test_vlseg5e16_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg5e16_mu(mask, maskedoff_tuple, base, vl); +vint16mf4x5_t test_vlseg5e16_v_i16mf4x5_mu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_mu(vm, vd, rs1, vl); } -vint16mf2x5_t test_vlseg5e16_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg5e16_mu(mask, maskedoff_tuple, base, vl); +vint16mf2x5_t test_vlseg5e16_v_i16mf2x5_mu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_mu(vm, vd, rs1, vl); } -vint16m1x5_t test_vlseg5e16_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg5e16_mu(mask, maskedoff_tuple, base, vl); +vint16m1x5_t test_vlseg5e16_v_i16m1x5_mu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_mu(vm, vd, rs1, vl); } -vuint16mf4x5_t test_vlseg5e16_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg5e16_mu(mask, maskedoff_tuple, base, vl); +vuint16mf4x5_t test_vlseg5e16_v_u16mf4x5_mu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_mu(vm, vd, rs1, vl); } -vuint16mf2x5_t test_vlseg5e16_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg5e16_mu(mask, maskedoff_tuple, base, vl); +vuint16mf2x5_t test_vlseg5e16_v_u16mf2x5_mu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_mu(vm, vd, rs1, vl); } -vuint16m1x5_t test_vlseg5e16_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg5e16_mu(mask, maskedoff_tuple, base, vl); +vuint16m1x5_t test_vlseg5e16_v_u16m1x5_mu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg5e16_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg5e16\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg5e16ff.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg5e16ff.c index f35223978..f24769cfb 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg5e16ff.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg5e16ff.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_tu(vfloat16mf4x5_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_tu(vd, rs1, new_vl, vl); } -vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_tu(vfloat16mf2x5_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_tu(vd, rs1, new_vl, vl); } -vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_tu(vfloat16m1x5_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_tu(vd, rs1, new_vl, vl); } -vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_tu(vint16mf4x5_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_tu(vd, rs1, new_vl, vl); } -vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_tu(vint16mf2x5_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_tu(vd, rs1, new_vl, vl); } -vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_tu(vint16m1x5_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_tu(vd, rs1, new_vl, vl); } -vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_tu(vuint16mf4x5_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_tu(vd, rs1, new_vl, vl); } -vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_tu(vuint16mf2x5_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_tu(vd, rs1, new_vl, vl); } -vuint16m1x5_t test_vlseg5e16ff_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint16m1x5_t test_vlseg5e16ff_v_u16m1x5_tu(vuint16m1x5_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_tu(vd, rs1, new_vl, vl); } -vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_tum(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_tum(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_tum(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_tum(vm, vd, rs1, new_vl, vl); } -vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_tum(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_tum(vm, vd, rs1, new_vl, vl); } -vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_tum(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_tum(vm, vd, rs1, new_vl, vl); } -vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_tum(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_tum(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_tum(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_tum(vm, vd, rs1, new_vl, vl); } -vuint16m1x5_t test_vlseg5e16ff_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m1x5_t test_vlseg5e16ff_v_u16m1x5_tum(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_tumu(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_tumu(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_tumu(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_tumu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_tumu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_tumu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_tumu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_tumu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint16m1x5_t test_vlseg5e16ff_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m1x5_t test_vlseg5e16ff_v_u16m1x5_tumu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_mu(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_mu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_mu(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_mu(vm, vd, rs1, new_vl, vl); } -vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_mu(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_mu(vm, vd, rs1, new_vl, vl); } -vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_mu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_mu(vm, vd, rs1, new_vl, vl); } -vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_mu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_mu(vm, vd, rs1, new_vl, vl); } -vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_mu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_mu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_mu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_mu(vm, vd, rs1, new_vl, vl); } -vuint16m1x5_t test_vlseg5e16ff_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m1x5_t test_vlseg5e16ff_v_u16m1x5_mu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg5e16ff\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg5e32.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg5e32.c index d62efad0d..537ec030b 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg5e32.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg5e32.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x5_t test_vlseg5e32_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg5e32_tu(maskedoff_tuple, base, vl); +vfloat32mf2x5_t test_vlseg5e32_v_f32mf2x5_tu(vfloat32mf2x5_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_tu(vd, rs1, vl); } -vfloat32m1x5_t test_vlseg5e32_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg5e32_tu(maskedoff_tuple, base, vl); +vfloat32m1x5_t test_vlseg5e32_v_f32m1x5_tu(vfloat32m1x5_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_tu(vd, rs1, vl); } -vint32mf2x5_t test_vlseg5e32_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg5e32_tu(maskedoff_tuple, base, vl); +vint32mf2x5_t test_vlseg5e32_v_i32mf2x5_tu(vint32mf2x5_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_tu(vd, rs1, vl); } -vint32m1x5_t test_vlseg5e32_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg5e32_tu(maskedoff_tuple, base, vl); +vint32m1x5_t test_vlseg5e32_v_i32m1x5_tu(vint32m1x5_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_tu(vd, rs1, vl); } -vuint32mf2x5_t test_vlseg5e32_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg5e32_tu(maskedoff_tuple, base, vl); +vuint32mf2x5_t test_vlseg5e32_v_u32mf2x5_tu(vuint32mf2x5_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_tu(vd, rs1, vl); } -vuint32m1x5_t test_vlseg5e32_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg5e32_tu(maskedoff_tuple, base, vl); +vuint32m1x5_t test_vlseg5e32_v_u32m1x5_tu(vuint32m1x5_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_tu(vd, rs1, vl); } -vfloat32mf2x5_t test_vlseg5e32_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg5e32_tum(mask, maskedoff_tuple, base, vl); +vfloat32mf2x5_t test_vlseg5e32_v_f32mf2x5_tum(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_tum(vm, vd, rs1, vl); } -vfloat32m1x5_t test_vlseg5e32_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg5e32_tum(mask, maskedoff_tuple, base, vl); +vfloat32m1x5_t test_vlseg5e32_v_f32m1x5_tum(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_tum(vm, vd, rs1, vl); } -vint32mf2x5_t test_vlseg5e32_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg5e32_tum(mask, maskedoff_tuple, base, vl); +vint32mf2x5_t test_vlseg5e32_v_i32mf2x5_tum(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_tum(vm, vd, rs1, vl); } -vint32m1x5_t test_vlseg5e32_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg5e32_tum(mask, maskedoff_tuple, base, vl); +vint32m1x5_t test_vlseg5e32_v_i32m1x5_tum(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_tum(vm, vd, rs1, vl); } -vuint32mf2x5_t test_vlseg5e32_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg5e32_tum(mask, maskedoff_tuple, base, vl); +vuint32mf2x5_t test_vlseg5e32_v_u32mf2x5_tum(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_tum(vm, vd, rs1, vl); } -vuint32m1x5_t test_vlseg5e32_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg5e32_tum(mask, maskedoff_tuple, base, vl); +vuint32m1x5_t test_vlseg5e32_v_u32m1x5_tum(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_tum(vm, vd, rs1, vl); } -vfloat32mf2x5_t test_vlseg5e32_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg5e32_tumu(mask, maskedoff_tuple, base, vl); +vfloat32mf2x5_t test_vlseg5e32_v_f32mf2x5_tumu(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_tumu(vm, vd, rs1, vl); } -vfloat32m1x5_t test_vlseg5e32_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg5e32_tumu(mask, maskedoff_tuple, base, vl); +vfloat32m1x5_t test_vlseg5e32_v_f32m1x5_tumu(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_tumu(vm, vd, rs1, vl); } -vint32mf2x5_t test_vlseg5e32_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg5e32_tumu(mask, maskedoff_tuple, base, vl); +vint32mf2x5_t test_vlseg5e32_v_i32mf2x5_tumu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_tumu(vm, vd, rs1, vl); } -vint32m1x5_t test_vlseg5e32_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg5e32_tumu(mask, maskedoff_tuple, base, vl); +vint32m1x5_t test_vlseg5e32_v_i32m1x5_tumu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_tumu(vm, vd, rs1, vl); } -vuint32mf2x5_t test_vlseg5e32_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg5e32_tumu(mask, maskedoff_tuple, base, vl); +vuint32mf2x5_t test_vlseg5e32_v_u32mf2x5_tumu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_tumu(vm, vd, rs1, vl); } -vuint32m1x5_t test_vlseg5e32_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg5e32_tumu(mask, maskedoff_tuple, base, vl); +vuint32m1x5_t test_vlseg5e32_v_u32m1x5_tumu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_tumu(vm, vd, rs1, vl); } -vfloat32mf2x5_t test_vlseg5e32_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg5e32_mu(mask, maskedoff_tuple, base, vl); +vfloat32mf2x5_t test_vlseg5e32_v_f32mf2x5_mu(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_mu(vm, vd, rs1, vl); } -vfloat32m1x5_t test_vlseg5e32_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg5e32_mu(mask, maskedoff_tuple, base, vl); +vfloat32m1x5_t test_vlseg5e32_v_f32m1x5_mu(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_mu(vm, vd, rs1, vl); } -vint32mf2x5_t test_vlseg5e32_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg5e32_mu(mask, maskedoff_tuple, base, vl); +vint32mf2x5_t test_vlseg5e32_v_i32mf2x5_mu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_mu(vm, vd, rs1, vl); } -vint32m1x5_t test_vlseg5e32_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg5e32_mu(mask, maskedoff_tuple, base, vl); +vint32m1x5_t test_vlseg5e32_v_i32m1x5_mu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_mu(vm, vd, rs1, vl); } -vuint32mf2x5_t test_vlseg5e32_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg5e32_mu(mask, maskedoff_tuple, base, vl); +vuint32mf2x5_t test_vlseg5e32_v_u32mf2x5_mu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_mu(vm, vd, rs1, vl); } -vuint32m1x5_t test_vlseg5e32_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg5e32_mu(mask, maskedoff_tuple, base, vl); +vuint32m1x5_t test_vlseg5e32_v_u32m1x5_mu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg5e32_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg5e32\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg5e32ff.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg5e32ff.c index 323b242af..46492691b 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg5e32ff.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg5e32ff.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_tu(vfloat32mf2x5_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_tu(vd, rs1, new_vl, vl); } -vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_tu(vfloat32m1x5_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_tu(vd, rs1, new_vl, vl); } -vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_tu(vint32mf2x5_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_tu(vd, rs1, new_vl, vl); } -vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_tu(vint32m1x5_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_tu(vd, rs1, new_vl, vl); } -vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_tu(vuint32mf2x5_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_tu(vd, rs1, new_vl, vl); } -vuint32m1x5_t test_vlseg5e32ff_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint32m1x5_t test_vlseg5e32ff_v_u32m1x5_tu(vuint32m1x5_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_tu(vd, rs1, new_vl, vl); } -vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_tum(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_tum(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_tum(vm, vd, rs1, new_vl, vl); } -vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_tum(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_tum(vm, vd, rs1, new_vl, vl); } -vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_tum(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_tum(vm, vd, rs1, new_vl, vl); } -vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_tum(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_tum(vm, vd, rs1, new_vl, vl); } -vuint32m1x5_t test_vlseg5e32ff_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m1x5_t test_vlseg5e32ff_v_u32m1x5_tum(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_tumu(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_tumu(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_tumu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_tumu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_tumu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint32m1x5_t test_vlseg5e32ff_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m1x5_t test_vlseg5e32ff_v_u32m1x5_tumu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_mu(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_mu(vm, vd, rs1, new_vl, vl); } -vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_mu(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_mu(vm, vd, rs1, new_vl, vl); } -vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_mu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_mu(vm, vd, rs1, new_vl, vl); } -vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_mu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_mu(vm, vd, rs1, new_vl, vl); } -vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_mu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_mu(vm, vd, rs1, new_vl, vl); } -vuint32m1x5_t test_vlseg5e32ff_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m1x5_t test_vlseg5e32ff_v_u32m1x5_mu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e32ff_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg5e32ff\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg5e64.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg5e64.c index c2498ec13..5bd3dfab9 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg5e64.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg5e64.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x5_t test_vlseg5e64_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg5e64_tu(maskedoff_tuple, base, vl); +vfloat64m1x5_t test_vlseg5e64_v_f64m1x5_tu(vfloat64m1x5_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg5e64_tu(vd, rs1, vl); } -vint64m1x5_t test_vlseg5e64_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg5e64_tu(maskedoff_tuple, base, vl); +vint64m1x5_t test_vlseg5e64_v_i64m1x5_tu(vint64m1x5_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg5e64_tu(vd, rs1, vl); } -vuint64m1x5_t test_vlseg5e64_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg5e64_tu(maskedoff_tuple, base, vl); +vuint64m1x5_t test_vlseg5e64_v_u64m1x5_tu(vuint64m1x5_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg5e64_tu(vd, rs1, vl); } -vfloat64m1x5_t test_vlseg5e64_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg5e64_tum(mask, maskedoff_tuple, base, vl); +vfloat64m1x5_t test_vlseg5e64_v_f64m1x5_tum(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg5e64_tum(vm, vd, rs1, vl); } -vint64m1x5_t test_vlseg5e64_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg5e64_tum(mask, maskedoff_tuple, base, vl); +vint64m1x5_t test_vlseg5e64_v_i64m1x5_tum(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg5e64_tum(vm, vd, rs1, vl); } -vuint64m1x5_t test_vlseg5e64_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg5e64_tum(mask, maskedoff_tuple, base, vl); +vuint64m1x5_t test_vlseg5e64_v_u64m1x5_tum(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg5e64_tum(vm, vd, rs1, vl); } -vfloat64m1x5_t test_vlseg5e64_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg5e64_tumu(mask, maskedoff_tuple, base, vl); +vfloat64m1x5_t test_vlseg5e64_v_f64m1x5_tumu(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg5e64_tumu(vm, vd, rs1, vl); } -vint64m1x5_t test_vlseg5e64_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg5e64_tumu(mask, maskedoff_tuple, base, vl); +vint64m1x5_t test_vlseg5e64_v_i64m1x5_tumu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg5e64_tumu(vm, vd, rs1, vl); } -vuint64m1x5_t test_vlseg5e64_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg5e64_tumu(mask, maskedoff_tuple, base, vl); +vuint64m1x5_t test_vlseg5e64_v_u64m1x5_tumu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg5e64_tumu(vm, vd, rs1, vl); } -vfloat64m1x5_t test_vlseg5e64_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg5e64_mu(mask, maskedoff_tuple, base, vl); +vfloat64m1x5_t test_vlseg5e64_v_f64m1x5_mu(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg5e64_mu(vm, vd, rs1, vl); } -vint64m1x5_t test_vlseg5e64_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg5e64_mu(mask, maskedoff_tuple, base, vl); +vint64m1x5_t test_vlseg5e64_v_i64m1x5_mu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg5e64_mu(vm, vd, rs1, vl); } -vuint64m1x5_t test_vlseg5e64_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg5e64_mu(mask, maskedoff_tuple, base, vl); +vuint64m1x5_t test_vlseg5e64_v_u64m1x5_mu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg5e64_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg5e64\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg5e64ff.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg5e64ff.c index 0b9289186..85be90aa6 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg5e64ff.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg5e64ff.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e64ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_tu(vfloat64m1x5_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e64ff_tu(vd, rs1, new_vl, vl); } -vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e64ff_tu(maskedoff_tuple, base, new_vl, vl); +vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_tu(vint64m1x5_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e64ff_tu(vd, rs1, new_vl, vl); } -vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e64ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5_tu(vuint64m1x5_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e64ff_tu(vd, rs1, new_vl, vl); } -vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e64ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_tum(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e64ff_tum(vm, vd, rs1, new_vl, vl); } -vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e64ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_tum(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e64ff_tum(vm, vd, rs1, new_vl, vl); } -vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e64ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5_tum(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e64ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e64ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_tumu(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e64ff_tumu(vm, vd, rs1, new_vl, vl); } -vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e64ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_tumu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e64ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e64ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5_tumu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e64ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e64ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_mu(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e64ff_mu(vm, vd, rs1, new_vl, vl); } -vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e64ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_mu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e64ff_mu(vm, vd, rs1, new_vl, vl); } -vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e64ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5_mu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e64ff_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg5e64ff\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg5e8.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg5e8.c index 2ba19fd11..8634b9298 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg5e8.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg5e8.c @@ -6,132 +6,132 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x5_t test_vlseg5e8_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg5e8_tu(maskedoff_tuple, base, vl); +vint8mf8x5_t test_vlseg5e8_v_i8mf8x5_tu(vint8mf8x5_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_tu(vd, rs1, vl); } -vint8mf4x5_t test_vlseg5e8_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg5e8_tu(maskedoff_tuple, base, vl); +vint8mf4x5_t test_vlseg5e8_v_i8mf4x5_tu(vint8mf4x5_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_tu(vd, rs1, vl); } -vint8mf2x5_t test_vlseg5e8_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg5e8_tu(maskedoff_tuple, base, vl); +vint8mf2x5_t test_vlseg5e8_v_i8mf2x5_tu(vint8mf2x5_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_tu(vd, rs1, vl); } -vint8m1x5_t test_vlseg5e8_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg5e8_tu(maskedoff_tuple, base, vl); +vint8m1x5_t test_vlseg5e8_v_i8m1x5_tu(vint8m1x5_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_tu(vd, rs1, vl); } -vuint8mf8x5_t test_vlseg5e8_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg5e8_tu(maskedoff_tuple, base, vl); +vuint8mf8x5_t test_vlseg5e8_v_u8mf8x5_tu(vuint8mf8x5_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_tu(vd, rs1, vl); } -vuint8mf4x5_t test_vlseg5e8_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg5e8_tu(maskedoff_tuple, base, vl); +vuint8mf4x5_t test_vlseg5e8_v_u8mf4x5_tu(vuint8mf4x5_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_tu(vd, rs1, vl); } -vuint8mf2x5_t test_vlseg5e8_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg5e8_tu(maskedoff_tuple, base, vl); +vuint8mf2x5_t test_vlseg5e8_v_u8mf2x5_tu(vuint8mf2x5_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_tu(vd, rs1, vl); } -vuint8m1x5_t test_vlseg5e8_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg5e8_tu(maskedoff_tuple, base, vl); +vuint8m1x5_t test_vlseg5e8_v_u8m1x5_tu(vuint8m1x5_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_tu(vd, rs1, vl); } -vint8mf8x5_t test_vlseg5e8_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg5e8_tum(mask, maskedoff_tuple, base, vl); +vint8mf8x5_t test_vlseg5e8_v_i8mf8x5_tum(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_tum(vm, vd, rs1, vl); } -vint8mf4x5_t test_vlseg5e8_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg5e8_tum(mask, maskedoff_tuple, base, vl); +vint8mf4x5_t test_vlseg5e8_v_i8mf4x5_tum(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_tum(vm, vd, rs1, vl); } -vint8mf2x5_t test_vlseg5e8_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg5e8_tum(mask, maskedoff_tuple, base, vl); +vint8mf2x5_t test_vlseg5e8_v_i8mf2x5_tum(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_tum(vm, vd, rs1, vl); } -vint8m1x5_t test_vlseg5e8_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg5e8_tum(mask, maskedoff_tuple, base, vl); +vint8m1x5_t test_vlseg5e8_v_i8m1x5_tum(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_tum(vm, vd, rs1, vl); } -vuint8mf8x5_t test_vlseg5e8_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg5e8_tum(mask, maskedoff_tuple, base, vl); +vuint8mf8x5_t test_vlseg5e8_v_u8mf8x5_tum(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_tum(vm, vd, rs1, vl); } -vuint8mf4x5_t test_vlseg5e8_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg5e8_tum(mask, maskedoff_tuple, base, vl); +vuint8mf4x5_t test_vlseg5e8_v_u8mf4x5_tum(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_tum(vm, vd, rs1, vl); } -vuint8mf2x5_t test_vlseg5e8_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg5e8_tum(mask, maskedoff_tuple, base, vl); +vuint8mf2x5_t test_vlseg5e8_v_u8mf2x5_tum(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_tum(vm, vd, rs1, vl); } -vuint8m1x5_t test_vlseg5e8_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg5e8_tum(mask, maskedoff_tuple, base, vl); +vuint8m1x5_t test_vlseg5e8_v_u8m1x5_tum(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_tum(vm, vd, rs1, vl); } -vint8mf8x5_t test_vlseg5e8_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg5e8_tumu(mask, maskedoff_tuple, base, vl); +vint8mf8x5_t test_vlseg5e8_v_i8mf8x5_tumu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_tumu(vm, vd, rs1, vl); } -vint8mf4x5_t test_vlseg5e8_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg5e8_tumu(mask, maskedoff_tuple, base, vl); +vint8mf4x5_t test_vlseg5e8_v_i8mf4x5_tumu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_tumu(vm, vd, rs1, vl); } -vint8mf2x5_t test_vlseg5e8_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg5e8_tumu(mask, maskedoff_tuple, base, vl); +vint8mf2x5_t test_vlseg5e8_v_i8mf2x5_tumu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_tumu(vm, vd, rs1, vl); } -vint8m1x5_t test_vlseg5e8_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg5e8_tumu(mask, maskedoff_tuple, base, vl); +vint8m1x5_t test_vlseg5e8_v_i8m1x5_tumu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_tumu(vm, vd, rs1, vl); } -vuint8mf8x5_t test_vlseg5e8_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg5e8_tumu(mask, maskedoff_tuple, base, vl); +vuint8mf8x5_t test_vlseg5e8_v_u8mf8x5_tumu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_tumu(vm, vd, rs1, vl); } -vuint8mf4x5_t test_vlseg5e8_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg5e8_tumu(mask, maskedoff_tuple, base, vl); +vuint8mf4x5_t test_vlseg5e8_v_u8mf4x5_tumu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_tumu(vm, vd, rs1, vl); } -vuint8mf2x5_t test_vlseg5e8_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg5e8_tumu(mask, maskedoff_tuple, base, vl); +vuint8mf2x5_t test_vlseg5e8_v_u8mf2x5_tumu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_tumu(vm, vd, rs1, vl); } -vuint8m1x5_t test_vlseg5e8_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg5e8_tumu(mask, maskedoff_tuple, base, vl); +vuint8m1x5_t test_vlseg5e8_v_u8m1x5_tumu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_tumu(vm, vd, rs1, vl); } -vint8mf8x5_t test_vlseg5e8_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg5e8_mu(mask, maskedoff_tuple, base, vl); +vint8mf8x5_t test_vlseg5e8_v_i8mf8x5_mu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_mu(vm, vd, rs1, vl); } -vint8mf4x5_t test_vlseg5e8_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg5e8_mu(mask, maskedoff_tuple, base, vl); +vint8mf4x5_t test_vlseg5e8_v_i8mf4x5_mu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_mu(vm, vd, rs1, vl); } -vint8mf2x5_t test_vlseg5e8_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg5e8_mu(mask, maskedoff_tuple, base, vl); +vint8mf2x5_t test_vlseg5e8_v_i8mf2x5_mu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_mu(vm, vd, rs1, vl); } -vint8m1x5_t test_vlseg5e8_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg5e8_mu(mask, maskedoff_tuple, base, vl); +vint8m1x5_t test_vlseg5e8_v_i8m1x5_mu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_mu(vm, vd, rs1, vl); } -vuint8mf8x5_t test_vlseg5e8_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg5e8_mu(mask, maskedoff_tuple, base, vl); +vuint8mf8x5_t test_vlseg5e8_v_u8mf8x5_mu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_mu(vm, vd, rs1, vl); } -vuint8mf4x5_t test_vlseg5e8_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg5e8_mu(mask, maskedoff_tuple, base, vl); +vuint8mf4x5_t test_vlseg5e8_v_u8mf4x5_mu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_mu(vm, vd, rs1, vl); } -vuint8mf2x5_t test_vlseg5e8_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg5e8_mu(mask, maskedoff_tuple, base, vl); +vuint8mf2x5_t test_vlseg5e8_v_u8mf2x5_mu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_mu(vm, vd, rs1, vl); } -vuint8m1x5_t test_vlseg5e8_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg5e8_mu(mask, maskedoff_tuple, base, vl); +vuint8m1x5_t test_vlseg5e8_v_u8m1x5_mu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg5e8_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg5e8\.[ivxfswum.]+\s+} 32 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg5e8ff.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg5e8ff.c index 1b432157f..ae2e5e299 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg5e8ff.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg5e8ff.c @@ -6,132 +6,132 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_tu(vint8mf8x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_tu(vd, rs1, new_vl, vl); } -vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_tu(vint8mf4x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_tu(vd, rs1, new_vl, vl); } -vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_tu(vint8mf2x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_tu(vd, rs1, new_vl, vl); } -vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_tu(vint8m1x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_tu(vd, rs1, new_vl, vl); } -vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_tu(vuint8mf8x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_tu(vd, rs1, new_vl, vl); } -vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_tu(vuint8mf4x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_tu(vd, rs1, new_vl, vl); } -vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_tu(vuint8mf2x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_tu(vd, rs1, new_vl, vl); } -vuint8m1x5_t test_vlseg5e8ff_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint8m1x5_t test_vlseg5e8ff_v_u8m1x5_tu(vuint8m1x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_tu(vd, rs1, new_vl, vl); } -vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_tum(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_tum(vm, vd, rs1, new_vl, vl); } -vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_tum(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_tum(vm, vd, rs1, new_vl, vl); } -vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_tum(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_tum(vm, vd, rs1, new_vl, vl); } -vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_tum(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_tum(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_tum(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_tum(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_tum(vm, vd, rs1, new_vl, vl); } -vuint8m1x5_t test_vlseg5e8ff_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m1x5_t test_vlseg5e8ff_v_u8m1x5_tum(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_tum(vm, vd, rs1, new_vl, vl); } -vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_tumu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_tumu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_tumu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_tumu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_tumu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_tumu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_tumu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint8m1x5_t test_vlseg5e8ff_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m1x5_t test_vlseg5e8ff_v_u8m1x5_tumu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_mu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_mu(vm, vd, rs1, new_vl, vl); } -vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_mu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_mu(vm, vd, rs1, new_vl, vl); } -vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_mu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_mu(vm, vd, rs1, new_vl, vl); } -vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_mu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_mu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_mu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_mu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_mu(vm, vd, rs1, new_vl, vl); } -vuint8m1x5_t test_vlseg5e8ff_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg5e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m1x5_t test_vlseg5e8ff_v_u8m1x5_mu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg5e8ff_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg5e8ff\.[ivxfswum.]+\s+} 32 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg6e16.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg6e16.c index 8442900d7..e99555186 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg6e16.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg6e16.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x6_t test_vlseg6e16_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg6e16_tu(maskedoff_tuple, base, vl); +vfloat16mf4x6_t test_vlseg6e16_v_f16mf4x6_tu(vfloat16mf4x6_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_tu(vd, rs1, vl); } -vfloat16mf2x6_t test_vlseg6e16_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg6e16_tu(maskedoff_tuple, base, vl); +vfloat16mf2x6_t test_vlseg6e16_v_f16mf2x6_tu(vfloat16mf2x6_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_tu(vd, rs1, vl); } -vfloat16m1x6_t test_vlseg6e16_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg6e16_tu(maskedoff_tuple, base, vl); +vfloat16m1x6_t test_vlseg6e16_v_f16m1x6_tu(vfloat16m1x6_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_tu(vd, rs1, vl); } -vint16mf4x6_t test_vlseg6e16_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg6e16_tu(maskedoff_tuple, base, vl); +vint16mf4x6_t test_vlseg6e16_v_i16mf4x6_tu(vint16mf4x6_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_tu(vd, rs1, vl); } -vint16mf2x6_t test_vlseg6e16_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg6e16_tu(maskedoff_tuple, base, vl); +vint16mf2x6_t test_vlseg6e16_v_i16mf2x6_tu(vint16mf2x6_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_tu(vd, rs1, vl); } -vint16m1x6_t test_vlseg6e16_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg6e16_tu(maskedoff_tuple, base, vl); +vint16m1x6_t test_vlseg6e16_v_i16m1x6_tu(vint16m1x6_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_tu(vd, rs1, vl); } -vuint16mf4x6_t test_vlseg6e16_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg6e16_tu(maskedoff_tuple, base, vl); +vuint16mf4x6_t test_vlseg6e16_v_u16mf4x6_tu(vuint16mf4x6_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_tu(vd, rs1, vl); } -vuint16mf2x6_t test_vlseg6e16_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg6e16_tu(maskedoff_tuple, base, vl); +vuint16mf2x6_t test_vlseg6e16_v_u16mf2x6_tu(vuint16mf2x6_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_tu(vd, rs1, vl); } -vuint16m1x6_t test_vlseg6e16_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg6e16_tu(maskedoff_tuple, base, vl); +vuint16m1x6_t test_vlseg6e16_v_u16m1x6_tu(vuint16m1x6_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_tu(vd, rs1, vl); } -vfloat16mf4x6_t test_vlseg6e16_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg6e16_tum(mask, maskedoff_tuple, base, vl); +vfloat16mf4x6_t test_vlseg6e16_v_f16mf4x6_tum(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_tum(vm, vd, rs1, vl); } -vfloat16mf2x6_t test_vlseg6e16_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg6e16_tum(mask, maskedoff_tuple, base, vl); +vfloat16mf2x6_t test_vlseg6e16_v_f16mf2x6_tum(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_tum(vm, vd, rs1, vl); } -vfloat16m1x6_t test_vlseg6e16_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg6e16_tum(mask, maskedoff_tuple, base, vl); +vfloat16m1x6_t test_vlseg6e16_v_f16m1x6_tum(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_tum(vm, vd, rs1, vl); } -vint16mf4x6_t test_vlseg6e16_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg6e16_tum(mask, maskedoff_tuple, base, vl); +vint16mf4x6_t test_vlseg6e16_v_i16mf4x6_tum(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_tum(vm, vd, rs1, vl); } -vint16mf2x6_t test_vlseg6e16_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg6e16_tum(mask, maskedoff_tuple, base, vl); +vint16mf2x6_t test_vlseg6e16_v_i16mf2x6_tum(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_tum(vm, vd, rs1, vl); } -vint16m1x6_t test_vlseg6e16_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg6e16_tum(mask, maskedoff_tuple, base, vl); +vint16m1x6_t test_vlseg6e16_v_i16m1x6_tum(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_tum(vm, vd, rs1, vl); } -vuint16mf4x6_t test_vlseg6e16_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg6e16_tum(mask, maskedoff_tuple, base, vl); +vuint16mf4x6_t test_vlseg6e16_v_u16mf4x6_tum(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_tum(vm, vd, rs1, vl); } -vuint16mf2x6_t test_vlseg6e16_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg6e16_tum(mask, maskedoff_tuple, base, vl); +vuint16mf2x6_t test_vlseg6e16_v_u16mf2x6_tum(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_tum(vm, vd, rs1, vl); } -vuint16m1x6_t test_vlseg6e16_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg6e16_tum(mask, maskedoff_tuple, base, vl); +vuint16m1x6_t test_vlseg6e16_v_u16m1x6_tum(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_tum(vm, vd, rs1, vl); } -vfloat16mf4x6_t test_vlseg6e16_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg6e16_tumu(mask, maskedoff_tuple, base, vl); +vfloat16mf4x6_t test_vlseg6e16_v_f16mf4x6_tumu(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_tumu(vm, vd, rs1, vl); } -vfloat16mf2x6_t test_vlseg6e16_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg6e16_tumu(mask, maskedoff_tuple, base, vl); +vfloat16mf2x6_t test_vlseg6e16_v_f16mf2x6_tumu(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_tumu(vm, vd, rs1, vl); } -vfloat16m1x6_t test_vlseg6e16_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg6e16_tumu(mask, maskedoff_tuple, base, vl); +vfloat16m1x6_t test_vlseg6e16_v_f16m1x6_tumu(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_tumu(vm, vd, rs1, vl); } -vint16mf4x6_t test_vlseg6e16_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg6e16_tumu(mask, maskedoff_tuple, base, vl); +vint16mf4x6_t test_vlseg6e16_v_i16mf4x6_tumu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_tumu(vm, vd, rs1, vl); } -vint16mf2x6_t test_vlseg6e16_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg6e16_tumu(mask, maskedoff_tuple, base, vl); +vint16mf2x6_t test_vlseg6e16_v_i16mf2x6_tumu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_tumu(vm, vd, rs1, vl); } -vint16m1x6_t test_vlseg6e16_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg6e16_tumu(mask, maskedoff_tuple, base, vl); +vint16m1x6_t test_vlseg6e16_v_i16m1x6_tumu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_tumu(vm, vd, rs1, vl); } -vuint16mf4x6_t test_vlseg6e16_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg6e16_tumu(mask, maskedoff_tuple, base, vl); +vuint16mf4x6_t test_vlseg6e16_v_u16mf4x6_tumu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_tumu(vm, vd, rs1, vl); } -vuint16mf2x6_t test_vlseg6e16_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg6e16_tumu(mask, maskedoff_tuple, base, vl); +vuint16mf2x6_t test_vlseg6e16_v_u16mf2x6_tumu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_tumu(vm, vd, rs1, vl); } -vuint16m1x6_t test_vlseg6e16_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg6e16_tumu(mask, maskedoff_tuple, base, vl); +vuint16m1x6_t test_vlseg6e16_v_u16m1x6_tumu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_tumu(vm, vd, rs1, vl); } -vfloat16mf4x6_t test_vlseg6e16_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg6e16_mu(mask, maskedoff_tuple, base, vl); +vfloat16mf4x6_t test_vlseg6e16_v_f16mf4x6_mu(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_mu(vm, vd, rs1, vl); } -vfloat16mf2x6_t test_vlseg6e16_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg6e16_mu(mask, maskedoff_tuple, base, vl); +vfloat16mf2x6_t test_vlseg6e16_v_f16mf2x6_mu(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_mu(vm, vd, rs1, vl); } -vfloat16m1x6_t test_vlseg6e16_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg6e16_mu(mask, maskedoff_tuple, base, vl); +vfloat16m1x6_t test_vlseg6e16_v_f16m1x6_mu(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_mu(vm, vd, rs1, vl); } -vint16mf4x6_t test_vlseg6e16_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg6e16_mu(mask, maskedoff_tuple, base, vl); +vint16mf4x6_t test_vlseg6e16_v_i16mf4x6_mu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_mu(vm, vd, rs1, vl); } -vint16mf2x6_t test_vlseg6e16_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg6e16_mu(mask, maskedoff_tuple, base, vl); +vint16mf2x6_t test_vlseg6e16_v_i16mf2x6_mu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_mu(vm, vd, rs1, vl); } -vint16m1x6_t test_vlseg6e16_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg6e16_mu(mask, maskedoff_tuple, base, vl); +vint16m1x6_t test_vlseg6e16_v_i16m1x6_mu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_mu(vm, vd, rs1, vl); } -vuint16mf4x6_t test_vlseg6e16_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg6e16_mu(mask, maskedoff_tuple, base, vl); +vuint16mf4x6_t test_vlseg6e16_v_u16mf4x6_mu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_mu(vm, vd, rs1, vl); } -vuint16mf2x6_t test_vlseg6e16_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg6e16_mu(mask, maskedoff_tuple, base, vl); +vuint16mf2x6_t test_vlseg6e16_v_u16mf2x6_mu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_mu(vm, vd, rs1, vl); } -vuint16m1x6_t test_vlseg6e16_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg6e16_mu(mask, maskedoff_tuple, base, vl); +vuint16m1x6_t test_vlseg6e16_v_u16m1x6_mu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg6e16_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg6e16\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg6e16ff.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg6e16ff.c index 8f62f2148..c5e3042d0 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg6e16ff.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg6e16ff.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_tu(vfloat16mf4x6_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_tu(vd, rs1, new_vl, vl); } -vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_tu(vfloat16mf2x6_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_tu(vd, rs1, new_vl, vl); } -vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_tu(vfloat16m1x6_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_tu(vd, rs1, new_vl, vl); } -vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_tu(vint16mf4x6_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_tu(vd, rs1, new_vl, vl); } -vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_tu(vint16mf2x6_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_tu(vd, rs1, new_vl, vl); } -vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_tu(vint16m1x6_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_tu(vd, rs1, new_vl, vl); } -vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_tu(vuint16mf4x6_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_tu(vd, rs1, new_vl, vl); } -vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_tu(vuint16mf2x6_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_tu(vd, rs1, new_vl, vl); } -vuint16m1x6_t test_vlseg6e16ff_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint16m1x6_t test_vlseg6e16ff_v_u16m1x6_tu(vuint16m1x6_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_tu(vd, rs1, new_vl, vl); } -vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_tum(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_tum(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_tum(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_tum(vm, vd, rs1, new_vl, vl); } -vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_tum(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_tum(vm, vd, rs1, new_vl, vl); } -vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_tum(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_tum(vm, vd, rs1, new_vl, vl); } -vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_tum(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_tum(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_tum(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_tum(vm, vd, rs1, new_vl, vl); } -vuint16m1x6_t test_vlseg6e16ff_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m1x6_t test_vlseg6e16ff_v_u16m1x6_tum(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_tumu(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_tumu(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_tumu(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_tumu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_tumu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_tumu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_tumu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_tumu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint16m1x6_t test_vlseg6e16ff_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m1x6_t test_vlseg6e16ff_v_u16m1x6_tumu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_mu(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_mu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_mu(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_mu(vm, vd, rs1, new_vl, vl); } -vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_mu(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_mu(vm, vd, rs1, new_vl, vl); } -vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_mu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_mu(vm, vd, rs1, new_vl, vl); } -vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_mu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_mu(vm, vd, rs1, new_vl, vl); } -vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_mu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_mu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_mu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_mu(vm, vd, rs1, new_vl, vl); } -vuint16m1x6_t test_vlseg6e16ff_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m1x6_t test_vlseg6e16ff_v_u16m1x6_mu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg6e16ff\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg6e32.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg6e32.c index aa5e290f0..adefdc85b 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg6e32.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg6e32.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x6_t test_vlseg6e32_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg6e32_tu(maskedoff_tuple, base, vl); +vfloat32mf2x6_t test_vlseg6e32_v_f32mf2x6_tu(vfloat32mf2x6_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_tu(vd, rs1, vl); } -vfloat32m1x6_t test_vlseg6e32_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg6e32_tu(maskedoff_tuple, base, vl); +vfloat32m1x6_t test_vlseg6e32_v_f32m1x6_tu(vfloat32m1x6_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_tu(vd, rs1, vl); } -vint32mf2x6_t test_vlseg6e32_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg6e32_tu(maskedoff_tuple, base, vl); +vint32mf2x6_t test_vlseg6e32_v_i32mf2x6_tu(vint32mf2x6_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_tu(vd, rs1, vl); } -vint32m1x6_t test_vlseg6e32_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg6e32_tu(maskedoff_tuple, base, vl); +vint32m1x6_t test_vlseg6e32_v_i32m1x6_tu(vint32m1x6_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_tu(vd, rs1, vl); } -vuint32mf2x6_t test_vlseg6e32_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg6e32_tu(maskedoff_tuple, base, vl); +vuint32mf2x6_t test_vlseg6e32_v_u32mf2x6_tu(vuint32mf2x6_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_tu(vd, rs1, vl); } -vuint32m1x6_t test_vlseg6e32_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg6e32_tu(maskedoff_tuple, base, vl); +vuint32m1x6_t test_vlseg6e32_v_u32m1x6_tu(vuint32m1x6_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_tu(vd, rs1, vl); } -vfloat32mf2x6_t test_vlseg6e32_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg6e32_tum(mask, maskedoff_tuple, base, vl); +vfloat32mf2x6_t test_vlseg6e32_v_f32mf2x6_tum(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_tum(vm, vd, rs1, vl); } -vfloat32m1x6_t test_vlseg6e32_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg6e32_tum(mask, maskedoff_tuple, base, vl); +vfloat32m1x6_t test_vlseg6e32_v_f32m1x6_tum(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_tum(vm, vd, rs1, vl); } -vint32mf2x6_t test_vlseg6e32_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg6e32_tum(mask, maskedoff_tuple, base, vl); +vint32mf2x6_t test_vlseg6e32_v_i32mf2x6_tum(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_tum(vm, vd, rs1, vl); } -vint32m1x6_t test_vlseg6e32_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg6e32_tum(mask, maskedoff_tuple, base, vl); +vint32m1x6_t test_vlseg6e32_v_i32m1x6_tum(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_tum(vm, vd, rs1, vl); } -vuint32mf2x6_t test_vlseg6e32_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg6e32_tum(mask, maskedoff_tuple, base, vl); +vuint32mf2x6_t test_vlseg6e32_v_u32mf2x6_tum(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_tum(vm, vd, rs1, vl); } -vuint32m1x6_t test_vlseg6e32_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg6e32_tum(mask, maskedoff_tuple, base, vl); +vuint32m1x6_t test_vlseg6e32_v_u32m1x6_tum(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_tum(vm, vd, rs1, vl); } -vfloat32mf2x6_t test_vlseg6e32_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg6e32_tumu(mask, maskedoff_tuple, base, vl); +vfloat32mf2x6_t test_vlseg6e32_v_f32mf2x6_tumu(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_tumu(vm, vd, rs1, vl); } -vfloat32m1x6_t test_vlseg6e32_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg6e32_tumu(mask, maskedoff_tuple, base, vl); +vfloat32m1x6_t test_vlseg6e32_v_f32m1x6_tumu(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_tumu(vm, vd, rs1, vl); } -vint32mf2x6_t test_vlseg6e32_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg6e32_tumu(mask, maskedoff_tuple, base, vl); +vint32mf2x6_t test_vlseg6e32_v_i32mf2x6_tumu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_tumu(vm, vd, rs1, vl); } -vint32m1x6_t test_vlseg6e32_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg6e32_tumu(mask, maskedoff_tuple, base, vl); +vint32m1x6_t test_vlseg6e32_v_i32m1x6_tumu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_tumu(vm, vd, rs1, vl); } -vuint32mf2x6_t test_vlseg6e32_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg6e32_tumu(mask, maskedoff_tuple, base, vl); +vuint32mf2x6_t test_vlseg6e32_v_u32mf2x6_tumu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_tumu(vm, vd, rs1, vl); } -vuint32m1x6_t test_vlseg6e32_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg6e32_tumu(mask, maskedoff_tuple, base, vl); +vuint32m1x6_t test_vlseg6e32_v_u32m1x6_tumu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_tumu(vm, vd, rs1, vl); } -vfloat32mf2x6_t test_vlseg6e32_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg6e32_mu(mask, maskedoff_tuple, base, vl); +vfloat32mf2x6_t test_vlseg6e32_v_f32mf2x6_mu(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_mu(vm, vd, rs1, vl); } -vfloat32m1x6_t test_vlseg6e32_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg6e32_mu(mask, maskedoff_tuple, base, vl); +vfloat32m1x6_t test_vlseg6e32_v_f32m1x6_mu(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_mu(vm, vd, rs1, vl); } -vint32mf2x6_t test_vlseg6e32_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg6e32_mu(mask, maskedoff_tuple, base, vl); +vint32mf2x6_t test_vlseg6e32_v_i32mf2x6_mu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_mu(vm, vd, rs1, vl); } -vint32m1x6_t test_vlseg6e32_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg6e32_mu(mask, maskedoff_tuple, base, vl); +vint32m1x6_t test_vlseg6e32_v_i32m1x6_mu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_mu(vm, vd, rs1, vl); } -vuint32mf2x6_t test_vlseg6e32_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg6e32_mu(mask, maskedoff_tuple, base, vl); +vuint32mf2x6_t test_vlseg6e32_v_u32mf2x6_mu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_mu(vm, vd, rs1, vl); } -vuint32m1x6_t test_vlseg6e32_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg6e32_mu(mask, maskedoff_tuple, base, vl); +vuint32m1x6_t test_vlseg6e32_v_u32m1x6_mu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg6e32_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg6e32\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg6e32ff.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg6e32ff.c index 766d7721a..a31e7017c 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg6e32ff.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg6e32ff.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_tu(vfloat32mf2x6_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_tu(vd, rs1, new_vl, vl); } -vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_tu(vfloat32m1x6_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_tu(vd, rs1, new_vl, vl); } -vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_tu(vint32mf2x6_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_tu(vd, rs1, new_vl, vl); } -vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_tu(vint32m1x6_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_tu(vd, rs1, new_vl, vl); } -vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_tu(vuint32mf2x6_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_tu(vd, rs1, new_vl, vl); } -vuint32m1x6_t test_vlseg6e32ff_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint32m1x6_t test_vlseg6e32ff_v_u32m1x6_tu(vuint32m1x6_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_tu(vd, rs1, new_vl, vl); } -vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_tum(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_tum(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_tum(vm, vd, rs1, new_vl, vl); } -vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_tum(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_tum(vm, vd, rs1, new_vl, vl); } -vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_tum(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_tum(vm, vd, rs1, new_vl, vl); } -vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_tum(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_tum(vm, vd, rs1, new_vl, vl); } -vuint32m1x6_t test_vlseg6e32ff_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m1x6_t test_vlseg6e32ff_v_u32m1x6_tum(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_tumu(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_tumu(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_tumu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_tumu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_tumu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint32m1x6_t test_vlseg6e32ff_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m1x6_t test_vlseg6e32ff_v_u32m1x6_tumu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_mu(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_mu(vm, vd, rs1, new_vl, vl); } -vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_mu(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_mu(vm, vd, rs1, new_vl, vl); } -vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_mu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_mu(vm, vd, rs1, new_vl, vl); } -vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_mu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_mu(vm, vd, rs1, new_vl, vl); } -vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_mu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_mu(vm, vd, rs1, new_vl, vl); } -vuint32m1x6_t test_vlseg6e32ff_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m1x6_t test_vlseg6e32ff_v_u32m1x6_mu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e32ff_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg6e32ff\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg6e64.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg6e64.c index 0b1068aa2..02d0e5e81 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg6e64.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg6e64.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x6_t test_vlseg6e64_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg6e64_tu(maskedoff_tuple, base, vl); +vfloat64m1x6_t test_vlseg6e64_v_f64m1x6_tu(vfloat64m1x6_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg6e64_tu(vd, rs1, vl); } -vint64m1x6_t test_vlseg6e64_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg6e64_tu(maskedoff_tuple, base, vl); +vint64m1x6_t test_vlseg6e64_v_i64m1x6_tu(vint64m1x6_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg6e64_tu(vd, rs1, vl); } -vuint64m1x6_t test_vlseg6e64_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg6e64_tu(maskedoff_tuple, base, vl); +vuint64m1x6_t test_vlseg6e64_v_u64m1x6_tu(vuint64m1x6_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg6e64_tu(vd, rs1, vl); } -vfloat64m1x6_t test_vlseg6e64_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg6e64_tum(mask, maskedoff_tuple, base, vl); +vfloat64m1x6_t test_vlseg6e64_v_f64m1x6_tum(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg6e64_tum(vm, vd, rs1, vl); } -vint64m1x6_t test_vlseg6e64_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg6e64_tum(mask, maskedoff_tuple, base, vl); +vint64m1x6_t test_vlseg6e64_v_i64m1x6_tum(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg6e64_tum(vm, vd, rs1, vl); } -vuint64m1x6_t test_vlseg6e64_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg6e64_tum(mask, maskedoff_tuple, base, vl); +vuint64m1x6_t test_vlseg6e64_v_u64m1x6_tum(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg6e64_tum(vm, vd, rs1, vl); } -vfloat64m1x6_t test_vlseg6e64_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg6e64_tumu(mask, maskedoff_tuple, base, vl); +vfloat64m1x6_t test_vlseg6e64_v_f64m1x6_tumu(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg6e64_tumu(vm, vd, rs1, vl); } -vint64m1x6_t test_vlseg6e64_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg6e64_tumu(mask, maskedoff_tuple, base, vl); +vint64m1x6_t test_vlseg6e64_v_i64m1x6_tumu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg6e64_tumu(vm, vd, rs1, vl); } -vuint64m1x6_t test_vlseg6e64_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg6e64_tumu(mask, maskedoff_tuple, base, vl); +vuint64m1x6_t test_vlseg6e64_v_u64m1x6_tumu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg6e64_tumu(vm, vd, rs1, vl); } -vfloat64m1x6_t test_vlseg6e64_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg6e64_mu(mask, maskedoff_tuple, base, vl); +vfloat64m1x6_t test_vlseg6e64_v_f64m1x6_mu(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg6e64_mu(vm, vd, rs1, vl); } -vint64m1x6_t test_vlseg6e64_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg6e64_mu(mask, maskedoff_tuple, base, vl); +vint64m1x6_t test_vlseg6e64_v_i64m1x6_mu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg6e64_mu(vm, vd, rs1, vl); } -vuint64m1x6_t test_vlseg6e64_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg6e64_mu(mask, maskedoff_tuple, base, vl); +vuint64m1x6_t test_vlseg6e64_v_u64m1x6_mu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg6e64_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg6e64\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg6e64ff.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg6e64ff.c index 0b66b2342..ac2622c3d 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg6e64ff.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg6e64ff.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e64ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_tu(vfloat64m1x6_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e64ff_tu(vd, rs1, new_vl, vl); } -vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e64ff_tu(maskedoff_tuple, base, new_vl, vl); +vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_tu(vint64m1x6_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e64ff_tu(vd, rs1, new_vl, vl); } -vuint64m1x6_t test_vlseg6e64ff_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e64ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint64m1x6_t test_vlseg6e64ff_v_u64m1x6_tu(vuint64m1x6_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e64ff_tu(vd, rs1, new_vl, vl); } -vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e64ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_tum(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e64ff_tum(vm, vd, rs1, new_vl, vl); } -vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e64ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_tum(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e64ff_tum(vm, vd, rs1, new_vl, vl); } -vuint64m1x6_t test_vlseg6e64ff_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e64ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m1x6_t test_vlseg6e64ff_v_u64m1x6_tum(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e64ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e64ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_tumu(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e64ff_tumu(vm, vd, rs1, new_vl, vl); } -vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e64ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_tumu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e64ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint64m1x6_t test_vlseg6e64ff_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e64ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m1x6_t test_vlseg6e64ff_v_u64m1x6_tumu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e64ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e64ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_mu(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e64ff_mu(vm, vd, rs1, new_vl, vl); } -vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e64ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_mu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e64ff_mu(vm, vd, rs1, new_vl, vl); } -vuint64m1x6_t test_vlseg6e64ff_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e64ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m1x6_t test_vlseg6e64ff_v_u64m1x6_mu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e64ff_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg6e64ff\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg6e8.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg6e8.c index e9929b5aa..1eb5a1294 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg6e8.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg6e8.c @@ -6,132 +6,132 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x6_t test_vlseg6e8_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg6e8_tu(maskedoff_tuple, base, vl); +vint8mf8x6_t test_vlseg6e8_v_i8mf8x6_tu(vint8mf8x6_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_tu(vd, rs1, vl); } -vint8mf4x6_t test_vlseg6e8_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg6e8_tu(maskedoff_tuple, base, vl); +vint8mf4x6_t test_vlseg6e8_v_i8mf4x6_tu(vint8mf4x6_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_tu(vd, rs1, vl); } -vint8mf2x6_t test_vlseg6e8_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg6e8_tu(maskedoff_tuple, base, vl); +vint8mf2x6_t test_vlseg6e8_v_i8mf2x6_tu(vint8mf2x6_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_tu(vd, rs1, vl); } -vint8m1x6_t test_vlseg6e8_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg6e8_tu(maskedoff_tuple, base, vl); +vint8m1x6_t test_vlseg6e8_v_i8m1x6_tu(vint8m1x6_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_tu(vd, rs1, vl); } -vuint8mf8x6_t test_vlseg6e8_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg6e8_tu(maskedoff_tuple, base, vl); +vuint8mf8x6_t test_vlseg6e8_v_u8mf8x6_tu(vuint8mf8x6_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_tu(vd, rs1, vl); } -vuint8mf4x6_t test_vlseg6e8_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg6e8_tu(maskedoff_tuple, base, vl); +vuint8mf4x6_t test_vlseg6e8_v_u8mf4x6_tu(vuint8mf4x6_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_tu(vd, rs1, vl); } -vuint8mf2x6_t test_vlseg6e8_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg6e8_tu(maskedoff_tuple, base, vl); +vuint8mf2x6_t test_vlseg6e8_v_u8mf2x6_tu(vuint8mf2x6_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_tu(vd, rs1, vl); } -vuint8m1x6_t test_vlseg6e8_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg6e8_tu(maskedoff_tuple, base, vl); +vuint8m1x6_t test_vlseg6e8_v_u8m1x6_tu(vuint8m1x6_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_tu(vd, rs1, vl); } -vint8mf8x6_t test_vlseg6e8_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg6e8_tum(mask, maskedoff_tuple, base, vl); +vint8mf8x6_t test_vlseg6e8_v_i8mf8x6_tum(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_tum(vm, vd, rs1, vl); } -vint8mf4x6_t test_vlseg6e8_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg6e8_tum(mask, maskedoff_tuple, base, vl); +vint8mf4x6_t test_vlseg6e8_v_i8mf4x6_tum(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_tum(vm, vd, rs1, vl); } -vint8mf2x6_t test_vlseg6e8_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg6e8_tum(mask, maskedoff_tuple, base, vl); +vint8mf2x6_t test_vlseg6e8_v_i8mf2x6_tum(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_tum(vm, vd, rs1, vl); } -vint8m1x6_t test_vlseg6e8_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg6e8_tum(mask, maskedoff_tuple, base, vl); +vint8m1x6_t test_vlseg6e8_v_i8m1x6_tum(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_tum(vm, vd, rs1, vl); } -vuint8mf8x6_t test_vlseg6e8_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg6e8_tum(mask, maskedoff_tuple, base, vl); +vuint8mf8x6_t test_vlseg6e8_v_u8mf8x6_tum(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_tum(vm, vd, rs1, vl); } -vuint8mf4x6_t test_vlseg6e8_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg6e8_tum(mask, maskedoff_tuple, base, vl); +vuint8mf4x6_t test_vlseg6e8_v_u8mf4x6_tum(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_tum(vm, vd, rs1, vl); } -vuint8mf2x6_t test_vlseg6e8_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg6e8_tum(mask, maskedoff_tuple, base, vl); +vuint8mf2x6_t test_vlseg6e8_v_u8mf2x6_tum(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_tum(vm, vd, rs1, vl); } -vuint8m1x6_t test_vlseg6e8_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg6e8_tum(mask, maskedoff_tuple, base, vl); +vuint8m1x6_t test_vlseg6e8_v_u8m1x6_tum(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_tum(vm, vd, rs1, vl); } -vint8mf8x6_t test_vlseg6e8_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg6e8_tumu(mask, maskedoff_tuple, base, vl); +vint8mf8x6_t test_vlseg6e8_v_i8mf8x6_tumu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_tumu(vm, vd, rs1, vl); } -vint8mf4x6_t test_vlseg6e8_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg6e8_tumu(mask, maskedoff_tuple, base, vl); +vint8mf4x6_t test_vlseg6e8_v_i8mf4x6_tumu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_tumu(vm, vd, rs1, vl); } -vint8mf2x6_t test_vlseg6e8_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg6e8_tumu(mask, maskedoff_tuple, base, vl); +vint8mf2x6_t test_vlseg6e8_v_i8mf2x6_tumu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_tumu(vm, vd, rs1, vl); } -vint8m1x6_t test_vlseg6e8_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg6e8_tumu(mask, maskedoff_tuple, base, vl); +vint8m1x6_t test_vlseg6e8_v_i8m1x6_tumu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_tumu(vm, vd, rs1, vl); } -vuint8mf8x6_t test_vlseg6e8_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg6e8_tumu(mask, maskedoff_tuple, base, vl); +vuint8mf8x6_t test_vlseg6e8_v_u8mf8x6_tumu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_tumu(vm, vd, rs1, vl); } -vuint8mf4x6_t test_vlseg6e8_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg6e8_tumu(mask, maskedoff_tuple, base, vl); +vuint8mf4x6_t test_vlseg6e8_v_u8mf4x6_tumu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_tumu(vm, vd, rs1, vl); } -vuint8mf2x6_t test_vlseg6e8_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg6e8_tumu(mask, maskedoff_tuple, base, vl); +vuint8mf2x6_t test_vlseg6e8_v_u8mf2x6_tumu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_tumu(vm, vd, rs1, vl); } -vuint8m1x6_t test_vlseg6e8_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg6e8_tumu(mask, maskedoff_tuple, base, vl); +vuint8m1x6_t test_vlseg6e8_v_u8m1x6_tumu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_tumu(vm, vd, rs1, vl); } -vint8mf8x6_t test_vlseg6e8_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg6e8_mu(mask, maskedoff_tuple, base, vl); +vint8mf8x6_t test_vlseg6e8_v_i8mf8x6_mu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_mu(vm, vd, rs1, vl); } -vint8mf4x6_t test_vlseg6e8_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg6e8_mu(mask, maskedoff_tuple, base, vl); +vint8mf4x6_t test_vlseg6e8_v_i8mf4x6_mu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_mu(vm, vd, rs1, vl); } -vint8mf2x6_t test_vlseg6e8_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg6e8_mu(mask, maskedoff_tuple, base, vl); +vint8mf2x6_t test_vlseg6e8_v_i8mf2x6_mu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_mu(vm, vd, rs1, vl); } -vint8m1x6_t test_vlseg6e8_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg6e8_mu(mask, maskedoff_tuple, base, vl); +vint8m1x6_t test_vlseg6e8_v_i8m1x6_mu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_mu(vm, vd, rs1, vl); } -vuint8mf8x6_t test_vlseg6e8_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg6e8_mu(mask, maskedoff_tuple, base, vl); +vuint8mf8x6_t test_vlseg6e8_v_u8mf8x6_mu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_mu(vm, vd, rs1, vl); } -vuint8mf4x6_t test_vlseg6e8_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg6e8_mu(mask, maskedoff_tuple, base, vl); +vuint8mf4x6_t test_vlseg6e8_v_u8mf4x6_mu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_mu(vm, vd, rs1, vl); } -vuint8mf2x6_t test_vlseg6e8_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg6e8_mu(mask, maskedoff_tuple, base, vl); +vuint8mf2x6_t test_vlseg6e8_v_u8mf2x6_mu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_mu(vm, vd, rs1, vl); } -vuint8m1x6_t test_vlseg6e8_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg6e8_mu(mask, maskedoff_tuple, base, vl); +vuint8m1x6_t test_vlseg6e8_v_u8m1x6_mu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg6e8_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg6e8\.[ivxfswum.]+\s+} 32 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg6e8ff.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg6e8ff.c index 4a4da897c..2183cbe53 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg6e8ff.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg6e8ff.c @@ -6,132 +6,132 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_tu(vint8mf8x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_tu(vd, rs1, new_vl, vl); } -vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_tu(vint8mf4x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_tu(vd, rs1, new_vl, vl); } -vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_tu(vint8mf2x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_tu(vd, rs1, new_vl, vl); } -vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_tu(vint8m1x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_tu(vd, rs1, new_vl, vl); } -vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_tu(vuint8mf8x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_tu(vd, rs1, new_vl, vl); } -vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_tu(vuint8mf4x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_tu(vd, rs1, new_vl, vl); } -vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_tu(vuint8mf2x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_tu(vd, rs1, new_vl, vl); } -vuint8m1x6_t test_vlseg6e8ff_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint8m1x6_t test_vlseg6e8ff_v_u8m1x6_tu(vuint8m1x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_tu(vd, rs1, new_vl, vl); } -vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_tum(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_tum(vm, vd, rs1, new_vl, vl); } -vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_tum(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_tum(vm, vd, rs1, new_vl, vl); } -vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_tum(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_tum(vm, vd, rs1, new_vl, vl); } -vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_tum(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_tum(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_tum(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_tum(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_tum(vm, vd, rs1, new_vl, vl); } -vuint8m1x6_t test_vlseg6e8ff_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m1x6_t test_vlseg6e8ff_v_u8m1x6_tum(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_tum(vm, vd, rs1, new_vl, vl); } -vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_tumu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_tumu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_tumu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_tumu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_tumu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_tumu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_tumu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint8m1x6_t test_vlseg6e8ff_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m1x6_t test_vlseg6e8ff_v_u8m1x6_tumu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_mu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_mu(vm, vd, rs1, new_vl, vl); } -vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_mu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_mu(vm, vd, rs1, new_vl, vl); } -vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_mu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_mu(vm, vd, rs1, new_vl, vl); } -vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_mu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_mu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_mu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_mu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_mu(vm, vd, rs1, new_vl, vl); } -vuint8m1x6_t test_vlseg6e8ff_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg6e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m1x6_t test_vlseg6e8ff_v_u8m1x6_mu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg6e8ff_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg6e8ff\.[ivxfswum.]+\s+} 32 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg7e16.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg7e16.c index e41282b60..9037e6ccf 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg7e16.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg7e16.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x7_t test_vlseg7e16_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg7e16_tu(maskedoff_tuple, base, vl); +vfloat16mf4x7_t test_vlseg7e16_v_f16mf4x7_tu(vfloat16mf4x7_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_tu(vd, rs1, vl); } -vfloat16mf2x7_t test_vlseg7e16_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg7e16_tu(maskedoff_tuple, base, vl); +vfloat16mf2x7_t test_vlseg7e16_v_f16mf2x7_tu(vfloat16mf2x7_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_tu(vd, rs1, vl); } -vfloat16m1x7_t test_vlseg7e16_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg7e16_tu(maskedoff_tuple, base, vl); +vfloat16m1x7_t test_vlseg7e16_v_f16m1x7_tu(vfloat16m1x7_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_tu(vd, rs1, vl); } -vint16mf4x7_t test_vlseg7e16_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg7e16_tu(maskedoff_tuple, base, vl); +vint16mf4x7_t test_vlseg7e16_v_i16mf4x7_tu(vint16mf4x7_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_tu(vd, rs1, vl); } -vint16mf2x7_t test_vlseg7e16_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg7e16_tu(maskedoff_tuple, base, vl); +vint16mf2x7_t test_vlseg7e16_v_i16mf2x7_tu(vint16mf2x7_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_tu(vd, rs1, vl); } -vint16m1x7_t test_vlseg7e16_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg7e16_tu(maskedoff_tuple, base, vl); +vint16m1x7_t test_vlseg7e16_v_i16m1x7_tu(vint16m1x7_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_tu(vd, rs1, vl); } -vuint16mf4x7_t test_vlseg7e16_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg7e16_tu(maskedoff_tuple, base, vl); +vuint16mf4x7_t test_vlseg7e16_v_u16mf4x7_tu(vuint16mf4x7_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_tu(vd, rs1, vl); } -vuint16mf2x7_t test_vlseg7e16_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg7e16_tu(maskedoff_tuple, base, vl); +vuint16mf2x7_t test_vlseg7e16_v_u16mf2x7_tu(vuint16mf2x7_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_tu(vd, rs1, vl); } -vuint16m1x7_t test_vlseg7e16_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg7e16_tu(maskedoff_tuple, base, vl); +vuint16m1x7_t test_vlseg7e16_v_u16m1x7_tu(vuint16m1x7_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_tu(vd, rs1, vl); } -vfloat16mf4x7_t test_vlseg7e16_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg7e16_tum(mask, maskedoff_tuple, base, vl); +vfloat16mf4x7_t test_vlseg7e16_v_f16mf4x7_tum(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_tum(vm, vd, rs1, vl); } -vfloat16mf2x7_t test_vlseg7e16_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg7e16_tum(mask, maskedoff_tuple, base, vl); +vfloat16mf2x7_t test_vlseg7e16_v_f16mf2x7_tum(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_tum(vm, vd, rs1, vl); } -vfloat16m1x7_t test_vlseg7e16_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg7e16_tum(mask, maskedoff_tuple, base, vl); +vfloat16m1x7_t test_vlseg7e16_v_f16m1x7_tum(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_tum(vm, vd, rs1, vl); } -vint16mf4x7_t test_vlseg7e16_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg7e16_tum(mask, maskedoff_tuple, base, vl); +vint16mf4x7_t test_vlseg7e16_v_i16mf4x7_tum(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_tum(vm, vd, rs1, vl); } -vint16mf2x7_t test_vlseg7e16_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg7e16_tum(mask, maskedoff_tuple, base, vl); +vint16mf2x7_t test_vlseg7e16_v_i16mf2x7_tum(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_tum(vm, vd, rs1, vl); } -vint16m1x7_t test_vlseg7e16_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg7e16_tum(mask, maskedoff_tuple, base, vl); +vint16m1x7_t test_vlseg7e16_v_i16m1x7_tum(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_tum(vm, vd, rs1, vl); } -vuint16mf4x7_t test_vlseg7e16_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg7e16_tum(mask, maskedoff_tuple, base, vl); +vuint16mf4x7_t test_vlseg7e16_v_u16mf4x7_tum(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_tum(vm, vd, rs1, vl); } -vuint16mf2x7_t test_vlseg7e16_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg7e16_tum(mask, maskedoff_tuple, base, vl); +vuint16mf2x7_t test_vlseg7e16_v_u16mf2x7_tum(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_tum(vm, vd, rs1, vl); } -vuint16m1x7_t test_vlseg7e16_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg7e16_tum(mask, maskedoff_tuple, base, vl); +vuint16m1x7_t test_vlseg7e16_v_u16m1x7_tum(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_tum(vm, vd, rs1, vl); } -vfloat16mf4x7_t test_vlseg7e16_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg7e16_tumu(mask, maskedoff_tuple, base, vl); +vfloat16mf4x7_t test_vlseg7e16_v_f16mf4x7_tumu(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_tumu(vm, vd, rs1, vl); } -vfloat16mf2x7_t test_vlseg7e16_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg7e16_tumu(mask, maskedoff_tuple, base, vl); +vfloat16mf2x7_t test_vlseg7e16_v_f16mf2x7_tumu(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_tumu(vm, vd, rs1, vl); } -vfloat16m1x7_t test_vlseg7e16_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg7e16_tumu(mask, maskedoff_tuple, base, vl); +vfloat16m1x7_t test_vlseg7e16_v_f16m1x7_tumu(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_tumu(vm, vd, rs1, vl); } -vint16mf4x7_t test_vlseg7e16_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg7e16_tumu(mask, maskedoff_tuple, base, vl); +vint16mf4x7_t test_vlseg7e16_v_i16mf4x7_tumu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_tumu(vm, vd, rs1, vl); } -vint16mf2x7_t test_vlseg7e16_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg7e16_tumu(mask, maskedoff_tuple, base, vl); +vint16mf2x7_t test_vlseg7e16_v_i16mf2x7_tumu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_tumu(vm, vd, rs1, vl); } -vint16m1x7_t test_vlseg7e16_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg7e16_tumu(mask, maskedoff_tuple, base, vl); +vint16m1x7_t test_vlseg7e16_v_i16m1x7_tumu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_tumu(vm, vd, rs1, vl); } -vuint16mf4x7_t test_vlseg7e16_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg7e16_tumu(mask, maskedoff_tuple, base, vl); +vuint16mf4x7_t test_vlseg7e16_v_u16mf4x7_tumu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_tumu(vm, vd, rs1, vl); } -vuint16mf2x7_t test_vlseg7e16_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg7e16_tumu(mask, maskedoff_tuple, base, vl); +vuint16mf2x7_t test_vlseg7e16_v_u16mf2x7_tumu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_tumu(vm, vd, rs1, vl); } -vuint16m1x7_t test_vlseg7e16_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg7e16_tumu(mask, maskedoff_tuple, base, vl); +vuint16m1x7_t test_vlseg7e16_v_u16m1x7_tumu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_tumu(vm, vd, rs1, vl); } -vfloat16mf4x7_t test_vlseg7e16_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg7e16_mu(mask, maskedoff_tuple, base, vl); +vfloat16mf4x7_t test_vlseg7e16_v_f16mf4x7_mu(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_mu(vm, vd, rs1, vl); } -vfloat16mf2x7_t test_vlseg7e16_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg7e16_mu(mask, maskedoff_tuple, base, vl); +vfloat16mf2x7_t test_vlseg7e16_v_f16mf2x7_mu(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_mu(vm, vd, rs1, vl); } -vfloat16m1x7_t test_vlseg7e16_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg7e16_mu(mask, maskedoff_tuple, base, vl); +vfloat16m1x7_t test_vlseg7e16_v_f16m1x7_mu(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_mu(vm, vd, rs1, vl); } -vint16mf4x7_t test_vlseg7e16_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg7e16_mu(mask, maskedoff_tuple, base, vl); +vint16mf4x7_t test_vlseg7e16_v_i16mf4x7_mu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_mu(vm, vd, rs1, vl); } -vint16mf2x7_t test_vlseg7e16_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg7e16_mu(mask, maskedoff_tuple, base, vl); +vint16mf2x7_t test_vlseg7e16_v_i16mf2x7_mu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_mu(vm, vd, rs1, vl); } -vint16m1x7_t test_vlseg7e16_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg7e16_mu(mask, maskedoff_tuple, base, vl); +vint16m1x7_t test_vlseg7e16_v_i16m1x7_mu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_mu(vm, vd, rs1, vl); } -vuint16mf4x7_t test_vlseg7e16_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg7e16_mu(mask, maskedoff_tuple, base, vl); +vuint16mf4x7_t test_vlseg7e16_v_u16mf4x7_mu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_mu(vm, vd, rs1, vl); } -vuint16mf2x7_t test_vlseg7e16_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg7e16_mu(mask, maskedoff_tuple, base, vl); +vuint16mf2x7_t test_vlseg7e16_v_u16mf2x7_mu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_mu(vm, vd, rs1, vl); } -vuint16m1x7_t test_vlseg7e16_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg7e16_mu(mask, maskedoff_tuple, base, vl); +vuint16m1x7_t test_vlseg7e16_v_u16m1x7_mu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg7e16_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg7e16\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg7e16ff.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg7e16ff.c index 7a3573421..0f468c43e 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg7e16ff.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg7e16ff.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_tu(vfloat16mf4x7_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_tu(vd, rs1, new_vl, vl); } -vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_tu(vfloat16mf2x7_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_tu(vd, rs1, new_vl, vl); } -vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_tu(vfloat16m1x7_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_tu(vd, rs1, new_vl, vl); } -vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_tu(vint16mf4x7_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_tu(vd, rs1, new_vl, vl); } -vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_tu(vint16mf2x7_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_tu(vd, rs1, new_vl, vl); } -vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_tu(vint16m1x7_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_tu(vd, rs1, new_vl, vl); } -vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_tu(vuint16mf4x7_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_tu(vd, rs1, new_vl, vl); } -vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_tu(vuint16mf2x7_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_tu(vd, rs1, new_vl, vl); } -vuint16m1x7_t test_vlseg7e16ff_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint16m1x7_t test_vlseg7e16ff_v_u16m1x7_tu(vuint16m1x7_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_tu(vd, rs1, new_vl, vl); } -vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_tum(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_tum(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_tum(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_tum(vm, vd, rs1, new_vl, vl); } -vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_tum(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_tum(vm, vd, rs1, new_vl, vl); } -vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_tum(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_tum(vm, vd, rs1, new_vl, vl); } -vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_tum(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_tum(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_tum(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_tum(vm, vd, rs1, new_vl, vl); } -vuint16m1x7_t test_vlseg7e16ff_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m1x7_t test_vlseg7e16ff_v_u16m1x7_tum(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_tumu(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_tumu(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_tumu(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_tumu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_tumu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_tumu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_tumu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_tumu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint16m1x7_t test_vlseg7e16ff_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m1x7_t test_vlseg7e16ff_v_u16m1x7_tumu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_mu(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_mu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_mu(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_mu(vm, vd, rs1, new_vl, vl); } -vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_mu(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_mu(vm, vd, rs1, new_vl, vl); } -vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_mu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_mu(vm, vd, rs1, new_vl, vl); } -vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_mu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_mu(vm, vd, rs1, new_vl, vl); } -vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_mu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_mu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_mu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_mu(vm, vd, rs1, new_vl, vl); } -vuint16m1x7_t test_vlseg7e16ff_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m1x7_t test_vlseg7e16ff_v_u16m1x7_mu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg7e16ff\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg7e32.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg7e32.c index eb00dd4ac..f29c9849e 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg7e32.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg7e32.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x7_t test_vlseg7e32_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg7e32_tu(maskedoff_tuple, base, vl); +vfloat32mf2x7_t test_vlseg7e32_v_f32mf2x7_tu(vfloat32mf2x7_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_tu(vd, rs1, vl); } -vfloat32m1x7_t test_vlseg7e32_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg7e32_tu(maskedoff_tuple, base, vl); +vfloat32m1x7_t test_vlseg7e32_v_f32m1x7_tu(vfloat32m1x7_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_tu(vd, rs1, vl); } -vint32mf2x7_t test_vlseg7e32_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg7e32_tu(maskedoff_tuple, base, vl); +vint32mf2x7_t test_vlseg7e32_v_i32mf2x7_tu(vint32mf2x7_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_tu(vd, rs1, vl); } -vint32m1x7_t test_vlseg7e32_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg7e32_tu(maskedoff_tuple, base, vl); +vint32m1x7_t test_vlseg7e32_v_i32m1x7_tu(vint32m1x7_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_tu(vd, rs1, vl); } -vuint32mf2x7_t test_vlseg7e32_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg7e32_tu(maskedoff_tuple, base, vl); +vuint32mf2x7_t test_vlseg7e32_v_u32mf2x7_tu(vuint32mf2x7_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_tu(vd, rs1, vl); } -vuint32m1x7_t test_vlseg7e32_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg7e32_tu(maskedoff_tuple, base, vl); +vuint32m1x7_t test_vlseg7e32_v_u32m1x7_tu(vuint32m1x7_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_tu(vd, rs1, vl); } -vfloat32mf2x7_t test_vlseg7e32_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg7e32_tum(mask, maskedoff_tuple, base, vl); +vfloat32mf2x7_t test_vlseg7e32_v_f32mf2x7_tum(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_tum(vm, vd, rs1, vl); } -vfloat32m1x7_t test_vlseg7e32_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg7e32_tum(mask, maskedoff_tuple, base, vl); +vfloat32m1x7_t test_vlseg7e32_v_f32m1x7_tum(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_tum(vm, vd, rs1, vl); } -vint32mf2x7_t test_vlseg7e32_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg7e32_tum(mask, maskedoff_tuple, base, vl); +vint32mf2x7_t test_vlseg7e32_v_i32mf2x7_tum(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_tum(vm, vd, rs1, vl); } -vint32m1x7_t test_vlseg7e32_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg7e32_tum(mask, maskedoff_tuple, base, vl); +vint32m1x7_t test_vlseg7e32_v_i32m1x7_tum(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_tum(vm, vd, rs1, vl); } -vuint32mf2x7_t test_vlseg7e32_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg7e32_tum(mask, maskedoff_tuple, base, vl); +vuint32mf2x7_t test_vlseg7e32_v_u32mf2x7_tum(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_tum(vm, vd, rs1, vl); } -vuint32m1x7_t test_vlseg7e32_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg7e32_tum(mask, maskedoff_tuple, base, vl); +vuint32m1x7_t test_vlseg7e32_v_u32m1x7_tum(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_tum(vm, vd, rs1, vl); } -vfloat32mf2x7_t test_vlseg7e32_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg7e32_tumu(mask, maskedoff_tuple, base, vl); +vfloat32mf2x7_t test_vlseg7e32_v_f32mf2x7_tumu(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_tumu(vm, vd, rs1, vl); } -vfloat32m1x7_t test_vlseg7e32_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg7e32_tumu(mask, maskedoff_tuple, base, vl); +vfloat32m1x7_t test_vlseg7e32_v_f32m1x7_tumu(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_tumu(vm, vd, rs1, vl); } -vint32mf2x7_t test_vlseg7e32_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg7e32_tumu(mask, maskedoff_tuple, base, vl); +vint32mf2x7_t test_vlseg7e32_v_i32mf2x7_tumu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_tumu(vm, vd, rs1, vl); } -vint32m1x7_t test_vlseg7e32_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg7e32_tumu(mask, maskedoff_tuple, base, vl); +vint32m1x7_t test_vlseg7e32_v_i32m1x7_tumu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_tumu(vm, vd, rs1, vl); } -vuint32mf2x7_t test_vlseg7e32_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg7e32_tumu(mask, maskedoff_tuple, base, vl); +vuint32mf2x7_t test_vlseg7e32_v_u32mf2x7_tumu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_tumu(vm, vd, rs1, vl); } -vuint32m1x7_t test_vlseg7e32_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg7e32_tumu(mask, maskedoff_tuple, base, vl); +vuint32m1x7_t test_vlseg7e32_v_u32m1x7_tumu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_tumu(vm, vd, rs1, vl); } -vfloat32mf2x7_t test_vlseg7e32_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg7e32_mu(mask, maskedoff_tuple, base, vl); +vfloat32mf2x7_t test_vlseg7e32_v_f32mf2x7_mu(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_mu(vm, vd, rs1, vl); } -vfloat32m1x7_t test_vlseg7e32_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg7e32_mu(mask, maskedoff_tuple, base, vl); +vfloat32m1x7_t test_vlseg7e32_v_f32m1x7_mu(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_mu(vm, vd, rs1, vl); } -vint32mf2x7_t test_vlseg7e32_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg7e32_mu(mask, maskedoff_tuple, base, vl); +vint32mf2x7_t test_vlseg7e32_v_i32mf2x7_mu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_mu(vm, vd, rs1, vl); } -vint32m1x7_t test_vlseg7e32_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg7e32_mu(mask, maskedoff_tuple, base, vl); +vint32m1x7_t test_vlseg7e32_v_i32m1x7_mu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_mu(vm, vd, rs1, vl); } -vuint32mf2x7_t test_vlseg7e32_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg7e32_mu(mask, maskedoff_tuple, base, vl); +vuint32mf2x7_t test_vlseg7e32_v_u32mf2x7_mu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_mu(vm, vd, rs1, vl); } -vuint32m1x7_t test_vlseg7e32_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg7e32_mu(mask, maskedoff_tuple, base, vl); +vuint32m1x7_t test_vlseg7e32_v_u32m1x7_mu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg7e32_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg7e32\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg7e32ff.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg7e32ff.c index b326d3be4..6e063cff4 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg7e32ff.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg7e32ff.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_tu(vfloat32mf2x7_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_tu(vd, rs1, new_vl, vl); } -vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_tu(vfloat32m1x7_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_tu(vd, rs1, new_vl, vl); } -vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_tu(vint32mf2x7_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_tu(vd, rs1, new_vl, vl); } -vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_tu(vint32m1x7_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_tu(vd, rs1, new_vl, vl); } -vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_tu(vuint32mf2x7_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_tu(vd, rs1, new_vl, vl); } -vuint32m1x7_t test_vlseg7e32ff_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint32m1x7_t test_vlseg7e32ff_v_u32m1x7_tu(vuint32m1x7_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_tu(vd, rs1, new_vl, vl); } -vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_tum(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_tum(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_tum(vm, vd, rs1, new_vl, vl); } -vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_tum(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_tum(vm, vd, rs1, new_vl, vl); } -vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_tum(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_tum(vm, vd, rs1, new_vl, vl); } -vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_tum(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_tum(vm, vd, rs1, new_vl, vl); } -vuint32m1x7_t test_vlseg7e32ff_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m1x7_t test_vlseg7e32ff_v_u32m1x7_tum(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_tumu(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_tumu(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_tumu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_tumu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_tumu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint32m1x7_t test_vlseg7e32ff_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m1x7_t test_vlseg7e32ff_v_u32m1x7_tumu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_mu(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_mu(vm, vd, rs1, new_vl, vl); } -vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_mu(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_mu(vm, vd, rs1, new_vl, vl); } -vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_mu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_mu(vm, vd, rs1, new_vl, vl); } -vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_mu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_mu(vm, vd, rs1, new_vl, vl); } -vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_mu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_mu(vm, vd, rs1, new_vl, vl); } -vuint32m1x7_t test_vlseg7e32ff_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m1x7_t test_vlseg7e32ff_v_u32m1x7_mu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e32ff_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg7e32ff\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg7e64.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg7e64.c index 33db40915..cbc4c001c 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg7e64.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg7e64.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x7_t test_vlseg7e64_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg7e64_tu(maskedoff_tuple, base, vl); +vfloat64m1x7_t test_vlseg7e64_v_f64m1x7_tu(vfloat64m1x7_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg7e64_tu(vd, rs1, vl); } -vint64m1x7_t test_vlseg7e64_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg7e64_tu(maskedoff_tuple, base, vl); +vint64m1x7_t test_vlseg7e64_v_i64m1x7_tu(vint64m1x7_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg7e64_tu(vd, rs1, vl); } -vuint64m1x7_t test_vlseg7e64_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg7e64_tu(maskedoff_tuple, base, vl); +vuint64m1x7_t test_vlseg7e64_v_u64m1x7_tu(vuint64m1x7_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg7e64_tu(vd, rs1, vl); } -vfloat64m1x7_t test_vlseg7e64_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg7e64_tum(mask, maskedoff_tuple, base, vl); +vfloat64m1x7_t test_vlseg7e64_v_f64m1x7_tum(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg7e64_tum(vm, vd, rs1, vl); } -vint64m1x7_t test_vlseg7e64_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg7e64_tum(mask, maskedoff_tuple, base, vl); +vint64m1x7_t test_vlseg7e64_v_i64m1x7_tum(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg7e64_tum(vm, vd, rs1, vl); } -vuint64m1x7_t test_vlseg7e64_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg7e64_tum(mask, maskedoff_tuple, base, vl); +vuint64m1x7_t test_vlseg7e64_v_u64m1x7_tum(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg7e64_tum(vm, vd, rs1, vl); } -vfloat64m1x7_t test_vlseg7e64_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg7e64_tumu(mask, maskedoff_tuple, base, vl); +vfloat64m1x7_t test_vlseg7e64_v_f64m1x7_tumu(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg7e64_tumu(vm, vd, rs1, vl); } -vint64m1x7_t test_vlseg7e64_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg7e64_tumu(mask, maskedoff_tuple, base, vl); +vint64m1x7_t test_vlseg7e64_v_i64m1x7_tumu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg7e64_tumu(vm, vd, rs1, vl); } -vuint64m1x7_t test_vlseg7e64_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg7e64_tumu(mask, maskedoff_tuple, base, vl); +vuint64m1x7_t test_vlseg7e64_v_u64m1x7_tumu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg7e64_tumu(vm, vd, rs1, vl); } -vfloat64m1x7_t test_vlseg7e64_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg7e64_mu(mask, maskedoff_tuple, base, vl); +vfloat64m1x7_t test_vlseg7e64_v_f64m1x7_mu(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg7e64_mu(vm, vd, rs1, vl); } -vint64m1x7_t test_vlseg7e64_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg7e64_mu(mask, maskedoff_tuple, base, vl); +vint64m1x7_t test_vlseg7e64_v_i64m1x7_mu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg7e64_mu(vm, vd, rs1, vl); } -vuint64m1x7_t test_vlseg7e64_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg7e64_mu(mask, maskedoff_tuple, base, vl); +vuint64m1x7_t test_vlseg7e64_v_u64m1x7_mu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg7e64_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg7e64\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg7e64ff.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg7e64ff.c index 3a5c3c36b..f1adde2a3 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg7e64ff.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg7e64ff.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e64ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_tu(vfloat64m1x7_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e64ff_tu(vd, rs1, new_vl, vl); } -vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e64ff_tu(maskedoff_tuple, base, new_vl, vl); +vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_tu(vint64m1x7_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e64ff_tu(vd, rs1, new_vl, vl); } -vuint64m1x7_t test_vlseg7e64ff_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e64ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint64m1x7_t test_vlseg7e64ff_v_u64m1x7_tu(vuint64m1x7_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e64ff_tu(vd, rs1, new_vl, vl); } -vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e64ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_tum(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e64ff_tum(vm, vd, rs1, new_vl, vl); } -vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e64ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_tum(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e64ff_tum(vm, vd, rs1, new_vl, vl); } -vuint64m1x7_t test_vlseg7e64ff_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e64ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m1x7_t test_vlseg7e64ff_v_u64m1x7_tum(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e64ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e64ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_tumu(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e64ff_tumu(vm, vd, rs1, new_vl, vl); } -vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e64ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_tumu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e64ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint64m1x7_t test_vlseg7e64ff_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e64ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m1x7_t test_vlseg7e64ff_v_u64m1x7_tumu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e64ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e64ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_mu(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e64ff_mu(vm, vd, rs1, new_vl, vl); } -vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e64ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_mu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e64ff_mu(vm, vd, rs1, new_vl, vl); } -vuint64m1x7_t test_vlseg7e64ff_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e64ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m1x7_t test_vlseg7e64ff_v_u64m1x7_mu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e64ff_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg7e64ff\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg7e8.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg7e8.c index 2a60b9542..558eb173b 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg7e8.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg7e8.c @@ -6,132 +6,132 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x7_t test_vlseg7e8_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg7e8_tu(maskedoff_tuple, base, vl); +vint8mf8x7_t test_vlseg7e8_v_i8mf8x7_tu(vint8mf8x7_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_tu(vd, rs1, vl); } -vint8mf4x7_t test_vlseg7e8_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg7e8_tu(maskedoff_tuple, base, vl); +vint8mf4x7_t test_vlseg7e8_v_i8mf4x7_tu(vint8mf4x7_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_tu(vd, rs1, vl); } -vint8mf2x7_t test_vlseg7e8_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg7e8_tu(maskedoff_tuple, base, vl); +vint8mf2x7_t test_vlseg7e8_v_i8mf2x7_tu(vint8mf2x7_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_tu(vd, rs1, vl); } -vint8m1x7_t test_vlseg7e8_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg7e8_tu(maskedoff_tuple, base, vl); +vint8m1x7_t test_vlseg7e8_v_i8m1x7_tu(vint8m1x7_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_tu(vd, rs1, vl); } -vuint8mf8x7_t test_vlseg7e8_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg7e8_tu(maskedoff_tuple, base, vl); +vuint8mf8x7_t test_vlseg7e8_v_u8mf8x7_tu(vuint8mf8x7_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_tu(vd, rs1, vl); } -vuint8mf4x7_t test_vlseg7e8_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg7e8_tu(maskedoff_tuple, base, vl); +vuint8mf4x7_t test_vlseg7e8_v_u8mf4x7_tu(vuint8mf4x7_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_tu(vd, rs1, vl); } -vuint8mf2x7_t test_vlseg7e8_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg7e8_tu(maskedoff_tuple, base, vl); +vuint8mf2x7_t test_vlseg7e8_v_u8mf2x7_tu(vuint8mf2x7_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_tu(vd, rs1, vl); } -vuint8m1x7_t test_vlseg7e8_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg7e8_tu(maskedoff_tuple, base, vl); +vuint8m1x7_t test_vlseg7e8_v_u8m1x7_tu(vuint8m1x7_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_tu(vd, rs1, vl); } -vint8mf8x7_t test_vlseg7e8_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg7e8_tum(mask, maskedoff_tuple, base, vl); +vint8mf8x7_t test_vlseg7e8_v_i8mf8x7_tum(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_tum(vm, vd, rs1, vl); } -vint8mf4x7_t test_vlseg7e8_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg7e8_tum(mask, maskedoff_tuple, base, vl); +vint8mf4x7_t test_vlseg7e8_v_i8mf4x7_tum(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_tum(vm, vd, rs1, vl); } -vint8mf2x7_t test_vlseg7e8_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg7e8_tum(mask, maskedoff_tuple, base, vl); +vint8mf2x7_t test_vlseg7e8_v_i8mf2x7_tum(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_tum(vm, vd, rs1, vl); } -vint8m1x7_t test_vlseg7e8_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg7e8_tum(mask, maskedoff_tuple, base, vl); +vint8m1x7_t test_vlseg7e8_v_i8m1x7_tum(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_tum(vm, vd, rs1, vl); } -vuint8mf8x7_t test_vlseg7e8_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg7e8_tum(mask, maskedoff_tuple, base, vl); +vuint8mf8x7_t test_vlseg7e8_v_u8mf8x7_tum(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_tum(vm, vd, rs1, vl); } -vuint8mf4x7_t test_vlseg7e8_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg7e8_tum(mask, maskedoff_tuple, base, vl); +vuint8mf4x7_t test_vlseg7e8_v_u8mf4x7_tum(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_tum(vm, vd, rs1, vl); } -vuint8mf2x7_t test_vlseg7e8_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg7e8_tum(mask, maskedoff_tuple, base, vl); +vuint8mf2x7_t test_vlseg7e8_v_u8mf2x7_tum(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_tum(vm, vd, rs1, vl); } -vuint8m1x7_t test_vlseg7e8_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg7e8_tum(mask, maskedoff_tuple, base, vl); +vuint8m1x7_t test_vlseg7e8_v_u8m1x7_tum(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_tum(vm, vd, rs1, vl); } -vint8mf8x7_t test_vlseg7e8_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg7e8_tumu(mask, maskedoff_tuple, base, vl); +vint8mf8x7_t test_vlseg7e8_v_i8mf8x7_tumu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_tumu(vm, vd, rs1, vl); } -vint8mf4x7_t test_vlseg7e8_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg7e8_tumu(mask, maskedoff_tuple, base, vl); +vint8mf4x7_t test_vlseg7e8_v_i8mf4x7_tumu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_tumu(vm, vd, rs1, vl); } -vint8mf2x7_t test_vlseg7e8_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg7e8_tumu(mask, maskedoff_tuple, base, vl); +vint8mf2x7_t test_vlseg7e8_v_i8mf2x7_tumu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_tumu(vm, vd, rs1, vl); } -vint8m1x7_t test_vlseg7e8_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg7e8_tumu(mask, maskedoff_tuple, base, vl); +vint8m1x7_t test_vlseg7e8_v_i8m1x7_tumu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_tumu(vm, vd, rs1, vl); } -vuint8mf8x7_t test_vlseg7e8_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg7e8_tumu(mask, maskedoff_tuple, base, vl); +vuint8mf8x7_t test_vlseg7e8_v_u8mf8x7_tumu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_tumu(vm, vd, rs1, vl); } -vuint8mf4x7_t test_vlseg7e8_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg7e8_tumu(mask, maskedoff_tuple, base, vl); +vuint8mf4x7_t test_vlseg7e8_v_u8mf4x7_tumu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_tumu(vm, vd, rs1, vl); } -vuint8mf2x7_t test_vlseg7e8_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg7e8_tumu(mask, maskedoff_tuple, base, vl); +vuint8mf2x7_t test_vlseg7e8_v_u8mf2x7_tumu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_tumu(vm, vd, rs1, vl); } -vuint8m1x7_t test_vlseg7e8_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg7e8_tumu(mask, maskedoff_tuple, base, vl); +vuint8m1x7_t test_vlseg7e8_v_u8m1x7_tumu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_tumu(vm, vd, rs1, vl); } -vint8mf8x7_t test_vlseg7e8_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg7e8_mu(mask, maskedoff_tuple, base, vl); +vint8mf8x7_t test_vlseg7e8_v_i8mf8x7_mu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_mu(vm, vd, rs1, vl); } -vint8mf4x7_t test_vlseg7e8_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg7e8_mu(mask, maskedoff_tuple, base, vl); +vint8mf4x7_t test_vlseg7e8_v_i8mf4x7_mu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_mu(vm, vd, rs1, vl); } -vint8mf2x7_t test_vlseg7e8_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg7e8_mu(mask, maskedoff_tuple, base, vl); +vint8mf2x7_t test_vlseg7e8_v_i8mf2x7_mu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_mu(vm, vd, rs1, vl); } -vint8m1x7_t test_vlseg7e8_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg7e8_mu(mask, maskedoff_tuple, base, vl); +vint8m1x7_t test_vlseg7e8_v_i8m1x7_mu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_mu(vm, vd, rs1, vl); } -vuint8mf8x7_t test_vlseg7e8_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg7e8_mu(mask, maskedoff_tuple, base, vl); +vuint8mf8x7_t test_vlseg7e8_v_u8mf8x7_mu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_mu(vm, vd, rs1, vl); } -vuint8mf4x7_t test_vlseg7e8_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg7e8_mu(mask, maskedoff_tuple, base, vl); +vuint8mf4x7_t test_vlseg7e8_v_u8mf4x7_mu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_mu(vm, vd, rs1, vl); } -vuint8mf2x7_t test_vlseg7e8_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg7e8_mu(mask, maskedoff_tuple, base, vl); +vuint8mf2x7_t test_vlseg7e8_v_u8mf2x7_mu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_mu(vm, vd, rs1, vl); } -vuint8m1x7_t test_vlseg7e8_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg7e8_mu(mask, maskedoff_tuple, base, vl); +vuint8m1x7_t test_vlseg7e8_v_u8m1x7_mu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg7e8_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg7e8\.[ivxfswum.]+\s+} 32 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg7e8ff.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg7e8ff.c index 5e4857638..4318fcbc8 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg7e8ff.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg7e8ff.c @@ -6,132 +6,132 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_tu(vint8mf8x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_tu(vd, rs1, new_vl, vl); } -vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_tu(vint8mf4x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_tu(vd, rs1, new_vl, vl); } -vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_tu(vint8mf2x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_tu(vd, rs1, new_vl, vl); } -vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_tu(vint8m1x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_tu(vd, rs1, new_vl, vl); } -vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_tu(vuint8mf8x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_tu(vd, rs1, new_vl, vl); } -vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_tu(vuint8mf4x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_tu(vd, rs1, new_vl, vl); } -vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_tu(vuint8mf2x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_tu(vd, rs1, new_vl, vl); } -vuint8m1x7_t test_vlseg7e8ff_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint8m1x7_t test_vlseg7e8ff_v_u8m1x7_tu(vuint8m1x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_tu(vd, rs1, new_vl, vl); } -vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_tum(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_tum(vm, vd, rs1, new_vl, vl); } -vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_tum(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_tum(vm, vd, rs1, new_vl, vl); } -vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_tum(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_tum(vm, vd, rs1, new_vl, vl); } -vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_tum(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_tum(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_tum(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_tum(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_tum(vm, vd, rs1, new_vl, vl); } -vuint8m1x7_t test_vlseg7e8ff_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m1x7_t test_vlseg7e8ff_v_u8m1x7_tum(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_tum(vm, vd, rs1, new_vl, vl); } -vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_tumu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_tumu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_tumu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_tumu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_tumu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_tumu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_tumu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint8m1x7_t test_vlseg7e8ff_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m1x7_t test_vlseg7e8ff_v_u8m1x7_tumu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_mu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_mu(vm, vd, rs1, new_vl, vl); } -vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_mu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_mu(vm, vd, rs1, new_vl, vl); } -vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_mu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_mu(vm, vd, rs1, new_vl, vl); } -vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_mu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_mu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_mu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_mu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_mu(vm, vd, rs1, new_vl, vl); } -vuint8m1x7_t test_vlseg7e8ff_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg7e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m1x7_t test_vlseg7e8ff_v_u8m1x7_mu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg7e8ff_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg7e8ff\.[ivxfswum.]+\s+} 32 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg8e16.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg8e16.c index 46b00a93f..1c38fb898 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg8e16.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg8e16.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x8_t test_vlseg8e16_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg8e16_tu(maskedoff_tuple, base, vl); +vfloat16mf4x8_t test_vlseg8e16_v_f16mf4x8_tu(vfloat16mf4x8_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_tu(vd, rs1, vl); } -vfloat16mf2x8_t test_vlseg8e16_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg8e16_tu(maskedoff_tuple, base, vl); +vfloat16mf2x8_t test_vlseg8e16_v_f16mf2x8_tu(vfloat16mf2x8_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_tu(vd, rs1, vl); } -vfloat16m1x8_t test_vlseg8e16_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg8e16_tu(maskedoff_tuple, base, vl); +vfloat16m1x8_t test_vlseg8e16_v_f16m1x8_tu(vfloat16m1x8_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_tu(vd, rs1, vl); } -vint16mf4x8_t test_vlseg8e16_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg8e16_tu(maskedoff_tuple, base, vl); +vint16mf4x8_t test_vlseg8e16_v_i16mf4x8_tu(vint16mf4x8_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_tu(vd, rs1, vl); } -vint16mf2x8_t test_vlseg8e16_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg8e16_tu(maskedoff_tuple, base, vl); +vint16mf2x8_t test_vlseg8e16_v_i16mf2x8_tu(vint16mf2x8_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_tu(vd, rs1, vl); } -vint16m1x8_t test_vlseg8e16_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg8e16_tu(maskedoff_tuple, base, vl); +vint16m1x8_t test_vlseg8e16_v_i16m1x8_tu(vint16m1x8_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_tu(vd, rs1, vl); } -vuint16mf4x8_t test_vlseg8e16_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg8e16_tu(maskedoff_tuple, base, vl); +vuint16mf4x8_t test_vlseg8e16_v_u16mf4x8_tu(vuint16mf4x8_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_tu(vd, rs1, vl); } -vuint16mf2x8_t test_vlseg8e16_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg8e16_tu(maskedoff_tuple, base, vl); +vuint16mf2x8_t test_vlseg8e16_v_u16mf2x8_tu(vuint16mf2x8_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_tu(vd, rs1, vl); } -vuint16m1x8_t test_vlseg8e16_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg8e16_tu(maskedoff_tuple, base, vl); +vuint16m1x8_t test_vlseg8e16_v_u16m1x8_tu(vuint16m1x8_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_tu(vd, rs1, vl); } -vfloat16mf4x8_t test_vlseg8e16_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg8e16_tum(mask, maskedoff_tuple, base, vl); +vfloat16mf4x8_t test_vlseg8e16_v_f16mf4x8_tum(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_tum(vm, vd, rs1, vl); } -vfloat16mf2x8_t test_vlseg8e16_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg8e16_tum(mask, maskedoff_tuple, base, vl); +vfloat16mf2x8_t test_vlseg8e16_v_f16mf2x8_tum(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_tum(vm, vd, rs1, vl); } -vfloat16m1x8_t test_vlseg8e16_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg8e16_tum(mask, maskedoff_tuple, base, vl); +vfloat16m1x8_t test_vlseg8e16_v_f16m1x8_tum(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_tum(vm, vd, rs1, vl); } -vint16mf4x8_t test_vlseg8e16_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg8e16_tum(mask, maskedoff_tuple, base, vl); +vint16mf4x8_t test_vlseg8e16_v_i16mf4x8_tum(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_tum(vm, vd, rs1, vl); } -vint16mf2x8_t test_vlseg8e16_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg8e16_tum(mask, maskedoff_tuple, base, vl); +vint16mf2x8_t test_vlseg8e16_v_i16mf2x8_tum(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_tum(vm, vd, rs1, vl); } -vint16m1x8_t test_vlseg8e16_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg8e16_tum(mask, maskedoff_tuple, base, vl); +vint16m1x8_t test_vlseg8e16_v_i16m1x8_tum(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_tum(vm, vd, rs1, vl); } -vuint16mf4x8_t test_vlseg8e16_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg8e16_tum(mask, maskedoff_tuple, base, vl); +vuint16mf4x8_t test_vlseg8e16_v_u16mf4x8_tum(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_tum(vm, vd, rs1, vl); } -vuint16mf2x8_t test_vlseg8e16_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg8e16_tum(mask, maskedoff_tuple, base, vl); +vuint16mf2x8_t test_vlseg8e16_v_u16mf2x8_tum(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_tum(vm, vd, rs1, vl); } -vuint16m1x8_t test_vlseg8e16_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg8e16_tum(mask, maskedoff_tuple, base, vl); +vuint16m1x8_t test_vlseg8e16_v_u16m1x8_tum(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_tum(vm, vd, rs1, vl); } -vfloat16mf4x8_t test_vlseg8e16_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg8e16_tumu(mask, maskedoff_tuple, base, vl); +vfloat16mf4x8_t test_vlseg8e16_v_f16mf4x8_tumu(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_tumu(vm, vd, rs1, vl); } -vfloat16mf2x8_t test_vlseg8e16_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg8e16_tumu(mask, maskedoff_tuple, base, vl); +vfloat16mf2x8_t test_vlseg8e16_v_f16mf2x8_tumu(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_tumu(vm, vd, rs1, vl); } -vfloat16m1x8_t test_vlseg8e16_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg8e16_tumu(mask, maskedoff_tuple, base, vl); +vfloat16m1x8_t test_vlseg8e16_v_f16m1x8_tumu(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_tumu(vm, vd, rs1, vl); } -vint16mf4x8_t test_vlseg8e16_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg8e16_tumu(mask, maskedoff_tuple, base, vl); +vint16mf4x8_t test_vlseg8e16_v_i16mf4x8_tumu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_tumu(vm, vd, rs1, vl); } -vint16mf2x8_t test_vlseg8e16_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg8e16_tumu(mask, maskedoff_tuple, base, vl); +vint16mf2x8_t test_vlseg8e16_v_i16mf2x8_tumu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_tumu(vm, vd, rs1, vl); } -vint16m1x8_t test_vlseg8e16_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg8e16_tumu(mask, maskedoff_tuple, base, vl); +vint16m1x8_t test_vlseg8e16_v_i16m1x8_tumu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_tumu(vm, vd, rs1, vl); } -vuint16mf4x8_t test_vlseg8e16_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg8e16_tumu(mask, maskedoff_tuple, base, vl); +vuint16mf4x8_t test_vlseg8e16_v_u16mf4x8_tumu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_tumu(vm, vd, rs1, vl); } -vuint16mf2x8_t test_vlseg8e16_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg8e16_tumu(mask, maskedoff_tuple, base, vl); +vuint16mf2x8_t test_vlseg8e16_v_u16mf2x8_tumu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_tumu(vm, vd, rs1, vl); } -vuint16m1x8_t test_vlseg8e16_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg8e16_tumu(mask, maskedoff_tuple, base, vl); +vuint16m1x8_t test_vlseg8e16_v_u16m1x8_tumu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_tumu(vm, vd, rs1, vl); } -vfloat16mf4x8_t test_vlseg8e16_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg8e16_mu(mask, maskedoff_tuple, base, vl); +vfloat16mf4x8_t test_vlseg8e16_v_f16mf4x8_mu(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_mu(vm, vd, rs1, vl); } -vfloat16mf2x8_t test_vlseg8e16_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg8e16_mu(mask, maskedoff_tuple, base, vl); +vfloat16mf2x8_t test_vlseg8e16_v_f16mf2x8_mu(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_mu(vm, vd, rs1, vl); } -vfloat16m1x8_t test_vlseg8e16_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, size_t vl) { - return __riscv_vlseg8e16_mu(mask, maskedoff_tuple, base, vl); +vfloat16m1x8_t test_vlseg8e16_v_f16m1x8_mu(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_mu(vm, vd, rs1, vl); } -vint16mf4x8_t test_vlseg8e16_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg8e16_mu(mask, maskedoff_tuple, base, vl); +vint16mf4x8_t test_vlseg8e16_v_i16mf4x8_mu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_mu(vm, vd, rs1, vl); } -vint16mf2x8_t test_vlseg8e16_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg8e16_mu(mask, maskedoff_tuple, base, vl); +vint16mf2x8_t test_vlseg8e16_v_i16mf2x8_mu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_mu(vm, vd, rs1, vl); } -vint16m1x8_t test_vlseg8e16_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, size_t vl) { - return __riscv_vlseg8e16_mu(mask, maskedoff_tuple, base, vl); +vint16m1x8_t test_vlseg8e16_v_i16m1x8_mu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_mu(vm, vd, rs1, vl); } -vuint16mf4x8_t test_vlseg8e16_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg8e16_mu(mask, maskedoff_tuple, base, vl); +vuint16mf4x8_t test_vlseg8e16_v_u16mf4x8_mu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_mu(vm, vd, rs1, vl); } -vuint16mf2x8_t test_vlseg8e16_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg8e16_mu(mask, maskedoff_tuple, base, vl); +vuint16mf2x8_t test_vlseg8e16_v_u16mf2x8_mu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_mu(vm, vd, rs1, vl); } -vuint16m1x8_t test_vlseg8e16_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, size_t vl) { - return __riscv_vlseg8e16_mu(mask, maskedoff_tuple, base, vl); +vuint16m1x8_t test_vlseg8e16_v_u16m1x8_mu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, size_t vl) { + return __riscv_vlseg8e16_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg8e16\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg8e16ff.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg8e16ff.c index 62716b222..d036ea1fa 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg8e16ff.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg8e16ff.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_tu(vfloat16mf4x8_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_tu(vd, rs1, new_vl, vl); } -vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_tu(vfloat16mf2x8_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_tu(vd, rs1, new_vl, vl); } -vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_tu(vfloat16m1x8_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_tu(vd, rs1, new_vl, vl); } -vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_tu(vint16mf4x8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_tu(vd, rs1, new_vl, vl); } -vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_tu(vint16mf2x8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_tu(vd, rs1, new_vl, vl); } -vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_tu(vint16m1x8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_tu(vd, rs1, new_vl, vl); } -vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_tu(vuint16mf4x8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_tu(vd, rs1, new_vl, vl); } -vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_tu(vuint16mf2x8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_tu(vd, rs1, new_vl, vl); } -vuint16m1x8_t test_vlseg8e16ff_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint16m1x8_t test_vlseg8e16ff_v_u16m1x8_tu(vuint16m1x8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_tu(vd, rs1, new_vl, vl); } -vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_tum(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_tum(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_tum(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_tum(vm, vd, rs1, new_vl, vl); } -vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_tum(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_tum(vm, vd, rs1, new_vl, vl); } -vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_tum(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_tum(vm, vd, rs1, new_vl, vl); } -vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_tum(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_tum(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_tum(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_tum(vm, vd, rs1, new_vl, vl); } -vuint16m1x8_t test_vlseg8e16ff_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m1x8_t test_vlseg8e16ff_v_u16m1x8_tum(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_tumu(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_tumu(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_tumu(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_tumu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_tumu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_tumu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_tumu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_tumu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint16m1x8_t test_vlseg8e16ff_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m1x8_t test_vlseg8e16ff_v_u16m1x8_tumu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_mu(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_mu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_mu(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_mu(vm, vd, rs1, new_vl, vl); } -vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_mu(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_mu(vm, vd, rs1, new_vl, vl); } -vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_mu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_mu(vm, vd, rs1, new_vl, vl); } -vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_mu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_mu(vm, vd, rs1, new_vl, vl); } -vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_mu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_mu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_mu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_mu(vm, vd, rs1, new_vl, vl); } -vuint16m1x8_t test_vlseg8e16ff_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e16ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint16m1x8_t test_vlseg8e16ff_v_u16m1x8_mu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg8e16ff\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg8e32.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg8e32.c index e4a673129..339b78a25 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg8e32.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg8e32.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x8_t test_vlseg8e32_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg8e32_tu(maskedoff_tuple, base, vl); +vfloat32mf2x8_t test_vlseg8e32_v_f32mf2x8_tu(vfloat32mf2x8_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_tu(vd, rs1, vl); } -vfloat32m1x8_t test_vlseg8e32_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg8e32_tu(maskedoff_tuple, base, vl); +vfloat32m1x8_t test_vlseg8e32_v_f32m1x8_tu(vfloat32m1x8_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_tu(vd, rs1, vl); } -vint32mf2x8_t test_vlseg8e32_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg8e32_tu(maskedoff_tuple, base, vl); +vint32mf2x8_t test_vlseg8e32_v_i32mf2x8_tu(vint32mf2x8_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_tu(vd, rs1, vl); } -vint32m1x8_t test_vlseg8e32_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg8e32_tu(maskedoff_tuple, base, vl); +vint32m1x8_t test_vlseg8e32_v_i32m1x8_tu(vint32m1x8_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_tu(vd, rs1, vl); } -vuint32mf2x8_t test_vlseg8e32_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg8e32_tu(maskedoff_tuple, base, vl); +vuint32mf2x8_t test_vlseg8e32_v_u32mf2x8_tu(vuint32mf2x8_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_tu(vd, rs1, vl); } -vuint32m1x8_t test_vlseg8e32_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg8e32_tu(maskedoff_tuple, base, vl); +vuint32m1x8_t test_vlseg8e32_v_u32m1x8_tu(vuint32m1x8_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_tu(vd, rs1, vl); } -vfloat32mf2x8_t test_vlseg8e32_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg8e32_tum(mask, maskedoff_tuple, base, vl); +vfloat32mf2x8_t test_vlseg8e32_v_f32mf2x8_tum(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_tum(vm, vd, rs1, vl); } -vfloat32m1x8_t test_vlseg8e32_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg8e32_tum(mask, maskedoff_tuple, base, vl); +vfloat32m1x8_t test_vlseg8e32_v_f32m1x8_tum(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_tum(vm, vd, rs1, vl); } -vint32mf2x8_t test_vlseg8e32_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg8e32_tum(mask, maskedoff_tuple, base, vl); +vint32mf2x8_t test_vlseg8e32_v_i32mf2x8_tum(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_tum(vm, vd, rs1, vl); } -vint32m1x8_t test_vlseg8e32_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg8e32_tum(mask, maskedoff_tuple, base, vl); +vint32m1x8_t test_vlseg8e32_v_i32m1x8_tum(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_tum(vm, vd, rs1, vl); } -vuint32mf2x8_t test_vlseg8e32_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg8e32_tum(mask, maskedoff_tuple, base, vl); +vuint32mf2x8_t test_vlseg8e32_v_u32mf2x8_tum(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_tum(vm, vd, rs1, vl); } -vuint32m1x8_t test_vlseg8e32_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg8e32_tum(mask, maskedoff_tuple, base, vl); +vuint32m1x8_t test_vlseg8e32_v_u32m1x8_tum(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_tum(vm, vd, rs1, vl); } -vfloat32mf2x8_t test_vlseg8e32_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg8e32_tumu(mask, maskedoff_tuple, base, vl); +vfloat32mf2x8_t test_vlseg8e32_v_f32mf2x8_tumu(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_tumu(vm, vd, rs1, vl); } -vfloat32m1x8_t test_vlseg8e32_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg8e32_tumu(mask, maskedoff_tuple, base, vl); +vfloat32m1x8_t test_vlseg8e32_v_f32m1x8_tumu(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_tumu(vm, vd, rs1, vl); } -vint32mf2x8_t test_vlseg8e32_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg8e32_tumu(mask, maskedoff_tuple, base, vl); +vint32mf2x8_t test_vlseg8e32_v_i32mf2x8_tumu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_tumu(vm, vd, rs1, vl); } -vint32m1x8_t test_vlseg8e32_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg8e32_tumu(mask, maskedoff_tuple, base, vl); +vint32m1x8_t test_vlseg8e32_v_i32m1x8_tumu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_tumu(vm, vd, rs1, vl); } -vuint32mf2x8_t test_vlseg8e32_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg8e32_tumu(mask, maskedoff_tuple, base, vl); +vuint32mf2x8_t test_vlseg8e32_v_u32mf2x8_tumu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_tumu(vm, vd, rs1, vl); } -vuint32m1x8_t test_vlseg8e32_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg8e32_tumu(mask, maskedoff_tuple, base, vl); +vuint32m1x8_t test_vlseg8e32_v_u32m1x8_tumu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_tumu(vm, vd, rs1, vl); } -vfloat32mf2x8_t test_vlseg8e32_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg8e32_mu(mask, maskedoff_tuple, base, vl); +vfloat32mf2x8_t test_vlseg8e32_v_f32mf2x8_mu(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_mu(vm, vd, rs1, vl); } -vfloat32m1x8_t test_vlseg8e32_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, size_t vl) { - return __riscv_vlseg8e32_mu(mask, maskedoff_tuple, base, vl); +vfloat32m1x8_t test_vlseg8e32_v_f32m1x8_mu(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_mu(vm, vd, rs1, vl); } -vint32mf2x8_t test_vlseg8e32_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg8e32_mu(mask, maskedoff_tuple, base, vl); +vint32mf2x8_t test_vlseg8e32_v_i32mf2x8_mu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_mu(vm, vd, rs1, vl); } -vint32m1x8_t test_vlseg8e32_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, size_t vl) { - return __riscv_vlseg8e32_mu(mask, maskedoff_tuple, base, vl); +vint32m1x8_t test_vlseg8e32_v_i32m1x8_mu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_mu(vm, vd, rs1, vl); } -vuint32mf2x8_t test_vlseg8e32_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg8e32_mu(mask, maskedoff_tuple, base, vl); +vuint32mf2x8_t test_vlseg8e32_v_u32mf2x8_mu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_mu(vm, vd, rs1, vl); } -vuint32m1x8_t test_vlseg8e32_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, size_t vl) { - return __riscv_vlseg8e32_mu(mask, maskedoff_tuple, base, vl); +vuint32m1x8_t test_vlseg8e32_v_u32m1x8_mu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, size_t vl) { + return __riscv_vlseg8e32_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg8e32\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg8e32ff.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg8e32ff.c index 32b140102..56673c807 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg8e32ff.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg8e32ff.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_tu(vfloat32mf2x8_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_tu(vd, rs1, new_vl, vl); } -vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_tu(vfloat32m1x8_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_tu(vd, rs1, new_vl, vl); } -vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_tu(vint32mf2x8_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_tu(vd, rs1, new_vl, vl); } -vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_tu(vint32m1x8_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_tu(vd, rs1, new_vl, vl); } -vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_tu(vuint32mf2x8_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_tu(vd, rs1, new_vl, vl); } -vuint32m1x8_t test_vlseg8e32ff_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint32m1x8_t test_vlseg8e32ff_v_u32m1x8_tu(vuint32m1x8_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_tu(vd, rs1, new_vl, vl); } -vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_tum(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_tum(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_tum(vm, vd, rs1, new_vl, vl); } -vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_tum(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_tum(vm, vd, rs1, new_vl, vl); } -vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_tum(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_tum(vm, vd, rs1, new_vl, vl); } -vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_tum(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_tum(vm, vd, rs1, new_vl, vl); } -vuint32m1x8_t test_vlseg8e32ff_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m1x8_t test_vlseg8e32ff_v_u32m1x8_tum(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_tumu(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_tumu(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_tumu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_tumu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_tumu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint32m1x8_t test_vlseg8e32ff_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m1x8_t test_vlseg8e32ff_v_u32m1x8_tumu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_mu(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_mu(vm, vd, rs1, new_vl, vl); } -vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_mu(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_mu(vm, vd, rs1, new_vl, vl); } -vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_mu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_mu(vm, vd, rs1, new_vl, vl); } -vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_mu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_mu(vm, vd, rs1, new_vl, vl); } -vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_mu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_mu(vm, vd, rs1, new_vl, vl); } -vuint32m1x8_t test_vlseg8e32ff_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e32ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint32m1x8_t test_vlseg8e32ff_v_u32m1x8_mu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e32ff_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg8e32ff\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg8e64.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg8e64.c index cff54637f..b1a5c6807 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg8e64.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg8e64.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x8_t test_vlseg8e64_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg8e64_tu(maskedoff_tuple, base, vl); +vfloat64m1x8_t test_vlseg8e64_v_f64m1x8_tu(vfloat64m1x8_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg8e64_tu(vd, rs1, vl); } -vint64m1x8_t test_vlseg8e64_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg8e64_tu(maskedoff_tuple, base, vl); +vint64m1x8_t test_vlseg8e64_v_i64m1x8_tu(vint64m1x8_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg8e64_tu(vd, rs1, vl); } -vuint64m1x8_t test_vlseg8e64_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg8e64_tu(maskedoff_tuple, base, vl); +vuint64m1x8_t test_vlseg8e64_v_u64m1x8_tu(vuint64m1x8_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg8e64_tu(vd, rs1, vl); } -vfloat64m1x8_t test_vlseg8e64_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg8e64_tum(mask, maskedoff_tuple, base, vl); +vfloat64m1x8_t test_vlseg8e64_v_f64m1x8_tum(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg8e64_tum(vm, vd, rs1, vl); } -vint64m1x8_t test_vlseg8e64_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg8e64_tum(mask, maskedoff_tuple, base, vl); +vint64m1x8_t test_vlseg8e64_v_i64m1x8_tum(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg8e64_tum(vm, vd, rs1, vl); } -vuint64m1x8_t test_vlseg8e64_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg8e64_tum(mask, maskedoff_tuple, base, vl); +vuint64m1x8_t test_vlseg8e64_v_u64m1x8_tum(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg8e64_tum(vm, vd, rs1, vl); } -vfloat64m1x8_t test_vlseg8e64_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg8e64_tumu(mask, maskedoff_tuple, base, vl); +vfloat64m1x8_t test_vlseg8e64_v_f64m1x8_tumu(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg8e64_tumu(vm, vd, rs1, vl); } -vint64m1x8_t test_vlseg8e64_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg8e64_tumu(mask, maskedoff_tuple, base, vl); +vint64m1x8_t test_vlseg8e64_v_i64m1x8_tumu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg8e64_tumu(vm, vd, rs1, vl); } -vuint64m1x8_t test_vlseg8e64_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg8e64_tumu(mask, maskedoff_tuple, base, vl); +vuint64m1x8_t test_vlseg8e64_v_u64m1x8_tumu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg8e64_tumu(vm, vd, rs1, vl); } -vfloat64m1x8_t test_vlseg8e64_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, size_t vl) { - return __riscv_vlseg8e64_mu(mask, maskedoff_tuple, base, vl); +vfloat64m1x8_t test_vlseg8e64_v_f64m1x8_mu(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, size_t vl) { + return __riscv_vlseg8e64_mu(vm, vd, rs1, vl); } -vint64m1x8_t test_vlseg8e64_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, size_t vl) { - return __riscv_vlseg8e64_mu(mask, maskedoff_tuple, base, vl); +vint64m1x8_t test_vlseg8e64_v_i64m1x8_mu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, size_t vl) { + return __riscv_vlseg8e64_mu(vm, vd, rs1, vl); } -vuint64m1x8_t test_vlseg8e64_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, size_t vl) { - return __riscv_vlseg8e64_mu(mask, maskedoff_tuple, base, vl); +vuint64m1x8_t test_vlseg8e64_v_u64m1x8_mu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, size_t vl) { + return __riscv_vlseg8e64_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg8e64\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg8e64ff.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg8e64ff.c index 9f8b1237a..866f893d9 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg8e64ff.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg8e64ff.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e64ff_tu(maskedoff_tuple, base, new_vl, vl); +vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_tu(vfloat64m1x8_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e64ff_tu(vd, rs1, new_vl, vl); } -vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e64ff_tu(maskedoff_tuple, base, new_vl, vl); +vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_tu(vint64m1x8_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e64ff_tu(vd, rs1, new_vl, vl); } -vuint64m1x8_t test_vlseg8e64ff_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e64ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint64m1x8_t test_vlseg8e64ff_v_u64m1x8_tu(vuint64m1x8_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e64ff_tu(vd, rs1, new_vl, vl); } -vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e64ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_tum(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e64ff_tum(vm, vd, rs1, new_vl, vl); } -vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e64ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_tum(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e64ff_tum(vm, vd, rs1, new_vl, vl); } -vuint64m1x8_t test_vlseg8e64ff_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e64ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m1x8_t test_vlseg8e64ff_v_u64m1x8_tum(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e64ff_tum(vm, vd, rs1, new_vl, vl); } -vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e64ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_tumu(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e64ff_tumu(vm, vd, rs1, new_vl, vl); } -vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e64ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_tumu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e64ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint64m1x8_t test_vlseg8e64ff_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e64ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m1x8_t test_vlseg8e64ff_v_u64m1x8_tumu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e64ff_tumu(vm, vd, rs1, new_vl, vl); } -vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e64ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_mu(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e64ff_mu(vm, vd, rs1, new_vl, vl); } -vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e64ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_mu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e64ff_mu(vm, vd, rs1, new_vl, vl); } -vuint64m1x8_t test_vlseg8e64ff_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e64ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint64m1x8_t test_vlseg8e64ff_v_u64m1x8_mu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e64ff_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg8e64ff\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg8e8.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg8e8.c index c2c1a3b4c..3bbf9962e 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg8e8.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg8e8.c @@ -6,132 +6,132 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x8_t test_vlseg8e8_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg8e8_tu(maskedoff_tuple, base, vl); +vint8mf8x8_t test_vlseg8e8_v_i8mf8x8_tu(vint8mf8x8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_tu(vd, rs1, vl); } -vint8mf4x8_t test_vlseg8e8_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg8e8_tu(maskedoff_tuple, base, vl); +vint8mf4x8_t test_vlseg8e8_v_i8mf4x8_tu(vint8mf4x8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_tu(vd, rs1, vl); } -vint8mf2x8_t test_vlseg8e8_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg8e8_tu(maskedoff_tuple, base, vl); +vint8mf2x8_t test_vlseg8e8_v_i8mf2x8_tu(vint8mf2x8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_tu(vd, rs1, vl); } -vint8m1x8_t test_vlseg8e8_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg8e8_tu(maskedoff_tuple, base, vl); +vint8m1x8_t test_vlseg8e8_v_i8m1x8_tu(vint8m1x8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_tu(vd, rs1, vl); } -vuint8mf8x8_t test_vlseg8e8_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg8e8_tu(maskedoff_tuple, base, vl); +vuint8mf8x8_t test_vlseg8e8_v_u8mf8x8_tu(vuint8mf8x8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_tu(vd, rs1, vl); } -vuint8mf4x8_t test_vlseg8e8_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg8e8_tu(maskedoff_tuple, base, vl); +vuint8mf4x8_t test_vlseg8e8_v_u8mf4x8_tu(vuint8mf4x8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_tu(vd, rs1, vl); } -vuint8mf2x8_t test_vlseg8e8_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg8e8_tu(maskedoff_tuple, base, vl); +vuint8mf2x8_t test_vlseg8e8_v_u8mf2x8_tu(vuint8mf2x8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_tu(vd, rs1, vl); } -vuint8m1x8_t test_vlseg8e8_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg8e8_tu(maskedoff_tuple, base, vl); +vuint8m1x8_t test_vlseg8e8_v_u8m1x8_tu(vuint8m1x8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_tu(vd, rs1, vl); } -vint8mf8x8_t test_vlseg8e8_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg8e8_tum(mask, maskedoff_tuple, base, vl); +vint8mf8x8_t test_vlseg8e8_v_i8mf8x8_tum(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_tum(vm, vd, rs1, vl); } -vint8mf4x8_t test_vlseg8e8_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg8e8_tum(mask, maskedoff_tuple, base, vl); +vint8mf4x8_t test_vlseg8e8_v_i8mf4x8_tum(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_tum(vm, vd, rs1, vl); } -vint8mf2x8_t test_vlseg8e8_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg8e8_tum(mask, maskedoff_tuple, base, vl); +vint8mf2x8_t test_vlseg8e8_v_i8mf2x8_tum(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_tum(vm, vd, rs1, vl); } -vint8m1x8_t test_vlseg8e8_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg8e8_tum(mask, maskedoff_tuple, base, vl); +vint8m1x8_t test_vlseg8e8_v_i8m1x8_tum(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_tum(vm, vd, rs1, vl); } -vuint8mf8x8_t test_vlseg8e8_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg8e8_tum(mask, maskedoff_tuple, base, vl); +vuint8mf8x8_t test_vlseg8e8_v_u8mf8x8_tum(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_tum(vm, vd, rs1, vl); } -vuint8mf4x8_t test_vlseg8e8_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg8e8_tum(mask, maskedoff_tuple, base, vl); +vuint8mf4x8_t test_vlseg8e8_v_u8mf4x8_tum(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_tum(vm, vd, rs1, vl); } -vuint8mf2x8_t test_vlseg8e8_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg8e8_tum(mask, maskedoff_tuple, base, vl); +vuint8mf2x8_t test_vlseg8e8_v_u8mf2x8_tum(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_tum(vm, vd, rs1, vl); } -vuint8m1x8_t test_vlseg8e8_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg8e8_tum(mask, maskedoff_tuple, base, vl); +vuint8m1x8_t test_vlseg8e8_v_u8m1x8_tum(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_tum(vm, vd, rs1, vl); } -vint8mf8x8_t test_vlseg8e8_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg8e8_tumu(mask, maskedoff_tuple, base, vl); +vint8mf8x8_t test_vlseg8e8_v_i8mf8x8_tumu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_tumu(vm, vd, rs1, vl); } -vint8mf4x8_t test_vlseg8e8_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg8e8_tumu(mask, maskedoff_tuple, base, vl); +vint8mf4x8_t test_vlseg8e8_v_i8mf4x8_tumu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_tumu(vm, vd, rs1, vl); } -vint8mf2x8_t test_vlseg8e8_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg8e8_tumu(mask, maskedoff_tuple, base, vl); +vint8mf2x8_t test_vlseg8e8_v_i8mf2x8_tumu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_tumu(vm, vd, rs1, vl); } -vint8m1x8_t test_vlseg8e8_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg8e8_tumu(mask, maskedoff_tuple, base, vl); +vint8m1x8_t test_vlseg8e8_v_i8m1x8_tumu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_tumu(vm, vd, rs1, vl); } -vuint8mf8x8_t test_vlseg8e8_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg8e8_tumu(mask, maskedoff_tuple, base, vl); +vuint8mf8x8_t test_vlseg8e8_v_u8mf8x8_tumu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_tumu(vm, vd, rs1, vl); } -vuint8mf4x8_t test_vlseg8e8_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg8e8_tumu(mask, maskedoff_tuple, base, vl); +vuint8mf4x8_t test_vlseg8e8_v_u8mf4x8_tumu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_tumu(vm, vd, rs1, vl); } -vuint8mf2x8_t test_vlseg8e8_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg8e8_tumu(mask, maskedoff_tuple, base, vl); +vuint8mf2x8_t test_vlseg8e8_v_u8mf2x8_tumu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_tumu(vm, vd, rs1, vl); } -vuint8m1x8_t test_vlseg8e8_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg8e8_tumu(mask, maskedoff_tuple, base, vl); +vuint8m1x8_t test_vlseg8e8_v_u8m1x8_tumu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_tumu(vm, vd, rs1, vl); } -vint8mf8x8_t test_vlseg8e8_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg8e8_mu(mask, maskedoff_tuple, base, vl); +vint8mf8x8_t test_vlseg8e8_v_i8mf8x8_mu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_mu(vm, vd, rs1, vl); } -vint8mf4x8_t test_vlseg8e8_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg8e8_mu(mask, maskedoff_tuple, base, vl); +vint8mf4x8_t test_vlseg8e8_v_i8mf4x8_mu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_mu(vm, vd, rs1, vl); } -vint8mf2x8_t test_vlseg8e8_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg8e8_mu(mask, maskedoff_tuple, base, vl); +vint8mf2x8_t test_vlseg8e8_v_i8mf2x8_mu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_mu(vm, vd, rs1, vl); } -vint8m1x8_t test_vlseg8e8_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, size_t vl) { - return __riscv_vlseg8e8_mu(mask, maskedoff_tuple, base, vl); +vint8m1x8_t test_vlseg8e8_v_i8m1x8_mu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_mu(vm, vd, rs1, vl); } -vuint8mf8x8_t test_vlseg8e8_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg8e8_mu(mask, maskedoff_tuple, base, vl); +vuint8mf8x8_t test_vlseg8e8_v_u8mf8x8_mu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_mu(vm, vd, rs1, vl); } -vuint8mf4x8_t test_vlseg8e8_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg8e8_mu(mask, maskedoff_tuple, base, vl); +vuint8mf4x8_t test_vlseg8e8_v_u8mf4x8_mu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_mu(vm, vd, rs1, vl); } -vuint8mf2x8_t test_vlseg8e8_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg8e8_mu(mask, maskedoff_tuple, base, vl); +vuint8mf2x8_t test_vlseg8e8_v_u8mf2x8_mu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_mu(vm, vd, rs1, vl); } -vuint8m1x8_t test_vlseg8e8_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, size_t vl) { - return __riscv_vlseg8e8_mu(mask, maskedoff_tuple, base, vl); +vuint8m1x8_t test_vlseg8e8_v_u8m1x8_mu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, size_t vl) { + return __riscv_vlseg8e8_mu(vm, vd, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg8e8\.[ivxfswum.]+\s+} 32 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg8e8ff.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg8e8ff.c index 8f98185f4..78a96198c 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg8e8ff.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlseg8e8ff.c @@ -6,132 +6,132 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_tu(vint8mf8x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_tu(vd, rs1, new_vl, vl); } -vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_tu(vint8mf4x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_tu(vd, rs1, new_vl, vl); } -vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_tu(vint8mf2x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_tu(vd, rs1, new_vl, vl); } -vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_tu(vint8m1x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_tu(vd, rs1, new_vl, vl); } -vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_tu(vuint8mf8x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_tu(vd, rs1, new_vl, vl); } -vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_tu(vuint8mf4x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_tu(vd, rs1, new_vl, vl); } -vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_tu(vuint8mf2x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_tu(vd, rs1, new_vl, vl); } -vuint8m1x8_t test_vlseg8e8ff_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_tu(maskedoff_tuple, base, new_vl, vl); +vuint8m1x8_t test_vlseg8e8ff_v_u8m1x8_tu(vuint8m1x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_tu(vd, rs1, new_vl, vl); } -vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_tum(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_tum(vm, vd, rs1, new_vl, vl); } -vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_tum(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_tum(vm, vd, rs1, new_vl, vl); } -vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_tum(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_tum(vm, vd, rs1, new_vl, vl); } -vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_tum(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_tum(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_tum(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_tum(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_tum(vm, vd, rs1, new_vl, vl); } -vuint8m1x8_t test_vlseg8e8ff_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_tum(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m1x8_t test_vlseg8e8ff_v_u8m1x8_tum(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_tum(vm, vd, rs1, new_vl, vl); } -vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_tumu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_tumu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_tumu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_tumu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_tumu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_tumu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_tumu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vuint8m1x8_t test_vlseg8e8ff_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_tumu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m1x8_t test_vlseg8e8ff_v_u8m1x8_tumu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_mu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_mu(vm, vd, rs1, new_vl, vl); } -vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_mu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_mu(vm, vd, rs1, new_vl, vl); } -vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_mu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_mu(vm, vd, rs1, new_vl, vl); } -vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_mu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_mu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_mu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_mu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_mu(vm, vd, rs1, new_vl, vl); } -vuint8m1x8_t test_vlseg8e8ff_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, size_t *new_vl, size_t vl) { - return __riscv_vlseg8e8ff_mu(mask, maskedoff_tuple, base, new_vl, vl); +vuint8m1x8_t test_vlseg8e8ff_v_u8m1x8_mu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { + return __riscv_vlseg8e8ff_mu(vm, vd, rs1, new_vl, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlseg8e8ff\.[ivxfswum.]+\s+} 32 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg2e16.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg2e16.c index 9421d21ae..ea2b1b987 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg2e16.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg2e16.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tu(maskedoff_tuple, base, bstride, vl); +vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_tu(vfloat16mf4x2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tu(vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tu(maskedoff_tuple, base, bstride, vl); +vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_tu(vfloat16mf2x2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tu(vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tu(maskedoff_tuple, base, bstride, vl); +vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_tu(vfloat16m1x2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tu(vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tu(maskedoff_tuple, base, bstride, vl); +vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_tu(vfloat16m2x2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tu(vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tu(maskedoff_tuple, base, bstride, vl); +vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_tu(vfloat16m4x2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tu(vd, rs1, rs2, vl); } -vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tu(maskedoff_tuple, base, bstride, vl); +vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_tu(vint16mf4x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tu(vd, rs1, rs2, vl); } -vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tu(maskedoff_tuple, base, bstride, vl); +vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_tu(vint16mf2x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tu(vd, rs1, rs2, vl); } -vint16m1x2_t test_vlsseg2e16_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tu(maskedoff_tuple, base, bstride, vl); +vint16m1x2_t test_vlsseg2e16_v_i16m1x2_tu(vint16m1x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tu(vd, rs1, rs2, vl); } -vint16m2x2_t test_vlsseg2e16_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tu(maskedoff_tuple, base, bstride, vl); +vint16m2x2_t test_vlsseg2e16_v_i16m2x2_tu(vint16m2x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tu(vd, rs1, rs2, vl); } -vint16m4x2_t test_vlsseg2e16_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tu(maskedoff_tuple, base, bstride, vl); +vint16m4x2_t test_vlsseg2e16_v_i16m4x2_tu(vint16m4x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tu(vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tu(maskedoff_tuple, base, bstride, vl); +vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_tu(vuint16mf4x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tu(vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tu(maskedoff_tuple, base, bstride, vl); +vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_tu(vuint16mf2x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tu(vd, rs1, rs2, vl); } -vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tu(maskedoff_tuple, base, bstride, vl); +vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_tu(vuint16m1x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tu(vd, rs1, rs2, vl); } -vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tu(maskedoff_tuple, base, bstride, vl); +vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_tu(vuint16m2x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tu(vd, rs1, rs2, vl); } -vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tu(maskedoff_tuple, base, bstride, vl); +vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_tu(vuint16m4x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tu(vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_tum(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_tum(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_tum(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_tum(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tum(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_tum(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_tum(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_tum(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tum(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vlsseg2e16_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16m1x2_t test_vlsseg2e16_v_i16m1x2_tum(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tum(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vlsseg2e16_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16m2x2_t test_vlsseg2e16_v_i16m2x2_tum(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tum(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vlsseg2e16_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16m4x2_t test_vlsseg2e16_v_i16m4x2_tum(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_tum(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_tum(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_tum(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_tum(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tum(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_tum(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_tumu(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_tumu(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_tumu(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_tumu(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_tumu(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_tumu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_tumu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vlsseg2e16_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16m1x2_t test_vlsseg2e16_v_i16m1x2_tumu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vlsseg2e16_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16m2x2_t test_vlsseg2e16_v_i16m2x2_tumu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tumu(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vlsseg2e16_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16m4x2_t test_vlsseg2e16_v_i16m4x2_tumu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_tumu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_tumu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_tumu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_tumu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tumu(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_tumu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_mu(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_mu(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_mu(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_mu(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_mu(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_mu(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_mu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_mu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_mu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vlsseg2e16_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16m1x2_t test_vlsseg2e16_v_i16m1x2_mu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_mu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vlsseg2e16_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16m2x2_t test_vlsseg2e16_v_i16m2x2_mu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_mu(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vlsseg2e16_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16m4x2_t test_vlsseg2e16_v_i16m4x2_mu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_mu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_mu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_mu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_mu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_mu(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_mu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg2e16\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg2e32.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg2e32.c index 26c0705a6..6fb9a7501 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg2e32.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg2e32.c @@ -6,196 +6,196 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tu(maskedoff_tuple, base, bstride, vl); +vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_tu(vfloat32mf2x2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_tu(vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tu(maskedoff_tuple, base, bstride, vl); +vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_tu(vfloat32m1x2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_tu(vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tu(maskedoff_tuple, base, bstride, vl); +vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_tu(vfloat32m2x2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_tu(vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tu(maskedoff_tuple, base, bstride, vl); +vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_tu(vfloat32m4x2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_tu(vd, rs1, rs2, vl); } -vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tu(maskedoff_tuple, base, bstride, vl); +vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_tu(vint32mf2x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_tu(vd, rs1, rs2, vl); } -vint32m1x2_t test_vlsseg2e32_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tu(maskedoff_tuple, base, bstride, vl); +vint32m1x2_t test_vlsseg2e32_v_i32m1x2_tu(vint32m1x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_tu(vd, rs1, rs2, vl); } -vint32m2x2_t test_vlsseg2e32_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tu(maskedoff_tuple, base, bstride, vl); +vint32m2x2_t test_vlsseg2e32_v_i32m2x2_tu(vint32m2x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_tu(vd, rs1, rs2, vl); } -vint32m4x2_t test_vlsseg2e32_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tu(maskedoff_tuple, base, bstride, vl); +vint32m4x2_t test_vlsseg2e32_v_i32m4x2_tu(vint32m4x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_tu(vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tu(maskedoff_tuple, base, bstride, vl); +vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_tu(vuint32mf2x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_tu(vd, rs1, rs2, vl); } -vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tu(maskedoff_tuple, base, bstride, vl); +vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_tu(vuint32m1x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_tu(vd, rs1, rs2, vl); } -vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tu(maskedoff_tuple, base, bstride, vl); +vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_tu(vuint32m2x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_tu(vd, rs1, rs2, vl); } -vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tu(maskedoff_tuple, base, bstride, vl); +vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_tu(vuint32m4x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_tu(vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_tum(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_tum(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_tum(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_tum(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_tum(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_tum(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vlsseg2e32_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vint32m1x2_t test_vlsseg2e32_v_i32m1x2_tum(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_tum(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vlsseg2e32_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vint32m2x2_t test_vlsseg2e32_v_i32m2x2_tum(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_tum(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vlsseg2e32_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vint32m4x2_t test_vlsseg2e32_v_i32m4x2_tum(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_tum(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_tum(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_tum(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_tum(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_tum(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_tumu(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_tumu(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_tumu(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_tumu(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_tumu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vlsseg2e32_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint32m1x2_t test_vlsseg2e32_v_i32m1x2_tumu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vlsseg2e32_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint32m2x2_t test_vlsseg2e32_v_i32m2x2_tumu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_tumu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vlsseg2e32_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint32m4x2_t test_vlsseg2e32_v_i32m4x2_tumu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_tumu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_tumu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_tumu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_tumu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_mu(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_mu(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_mu(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_mu(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_mu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_mu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vlsseg2e32_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vint32m1x2_t test_vlsseg2e32_v_i32m1x2_mu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_mu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vlsseg2e32_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vint32m2x2_t test_vlsseg2e32_v_i32m2x2_mu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_mu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vlsseg2e32_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vint32m4x2_t test_vlsseg2e32_v_i32m4x2_mu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_mu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_mu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_mu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_mu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_mu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e32_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg2e32\.[ivxfswum.]+\s+} 48 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg2e64.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg2e64.c index aab7ae8e3..09c2d616e 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg2e64.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg2e64.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tu(maskedoff_tuple, base, bstride, vl); +vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_tu(vfloat64m1x2_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_tu(vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tu(maskedoff_tuple, base, bstride, vl); +vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_tu(vfloat64m2x2_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_tu(vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tu(maskedoff_tuple, base, bstride, vl); +vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_tu(vfloat64m4x2_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_tu(vd, rs1, rs2, vl); } -vint64m1x2_t test_vlsseg2e64_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tu(maskedoff_tuple, base, bstride, vl); +vint64m1x2_t test_vlsseg2e64_v_i64m1x2_tu(vint64m1x2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_tu(vd, rs1, rs2, vl); } -vint64m2x2_t test_vlsseg2e64_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tu(maskedoff_tuple, base, bstride, vl); +vint64m2x2_t test_vlsseg2e64_v_i64m2x2_tu(vint64m2x2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_tu(vd, rs1, rs2, vl); } -vint64m4x2_t test_vlsseg2e64_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tu(maskedoff_tuple, base, bstride, vl); +vint64m4x2_t test_vlsseg2e64_v_i64m4x2_tu(vint64m4x2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_tu(vd, rs1, rs2, vl); } -vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tu(maskedoff_tuple, base, bstride, vl); +vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_tu(vuint64m1x2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_tu(vd, rs1, rs2, vl); } -vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tu(maskedoff_tuple, base, bstride, vl); +vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_tu(vuint64m2x2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_tu(vd, rs1, rs2, vl); } -vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tu(maskedoff_tuple, base, bstride, vl); +vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_tu(vuint64m4x2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_tu(vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_tum(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_tum(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_tum(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_tum(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vlsseg2e64_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tum(mask, maskedoff_tuple, base, bstride, vl); +vint64m1x2_t test_vlsseg2e64_v_i64m1x2_tum(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_tum(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vlsseg2e64_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tum(mask, maskedoff_tuple, base, bstride, vl); +vint64m2x2_t test_vlsseg2e64_v_i64m2x2_tum(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_tum(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vlsseg2e64_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tum(mask, maskedoff_tuple, base, bstride, vl); +vint64m4x2_t test_vlsseg2e64_v_i64m4x2_tum(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_tum(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_tum(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_tum(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_tum(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_tumu(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_tumu(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_tumu(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vlsseg2e64_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint64m1x2_t test_vlsseg2e64_v_i64m1x2_tumu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vlsseg2e64_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint64m2x2_t test_vlsseg2e64_v_i64m2x2_tumu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_tumu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vlsseg2e64_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint64m4x2_t test_vlsseg2e64_v_i64m4x2_tumu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_tumu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_tumu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_tumu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_mu(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_mu(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_mu(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_mu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vlsseg2e64_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_mu(mask, maskedoff_tuple, base, bstride, vl); +vint64m1x2_t test_vlsseg2e64_v_i64m1x2_mu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_mu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vlsseg2e64_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_mu(mask, maskedoff_tuple, base, bstride, vl); +vint64m2x2_t test_vlsseg2e64_v_i64m2x2_mu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_mu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vlsseg2e64_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_mu(mask, maskedoff_tuple, base, bstride, vl); +vint64m4x2_t test_vlsseg2e64_v_i64m4x2_mu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_mu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_mu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_mu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e64_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_mu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e64_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg2e64\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg2e8.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg2e8.c index d3cc10299..b26614877 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg2e8.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg2e8.c @@ -6,196 +6,196 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tu(maskedoff_tuple, base, bstride, vl); +vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_tu(vint8mf8x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_tu(vd, rs1, rs2, vl); } -vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tu(maskedoff_tuple, base, bstride, vl); +vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_tu(vint8mf4x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_tu(vd, rs1, rs2, vl); } -vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tu(maskedoff_tuple, base, bstride, vl); +vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_tu(vint8mf2x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_tu(vd, rs1, rs2, vl); } -vint8m1x2_t test_vlsseg2e8_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tu(maskedoff_tuple, base, bstride, vl); +vint8m1x2_t test_vlsseg2e8_v_i8m1x2_tu(vint8m1x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_tu(vd, rs1, rs2, vl); } -vint8m2x2_t test_vlsseg2e8_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tu(maskedoff_tuple, base, bstride, vl); +vint8m2x2_t test_vlsseg2e8_v_i8m2x2_tu(vint8m2x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_tu(vd, rs1, rs2, vl); } -vint8m4x2_t test_vlsseg2e8_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tu(maskedoff_tuple, base, bstride, vl); +vint8m4x2_t test_vlsseg2e8_v_i8m4x2_tu(vint8m4x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_tu(vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tu(maskedoff_tuple, base, bstride, vl); +vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_tu(vuint8mf8x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_tu(vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tu(maskedoff_tuple, base, bstride, vl); +vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_tu(vuint8mf4x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_tu(vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tu(maskedoff_tuple, base, bstride, vl); +vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_tu(vuint8mf2x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_tu(vd, rs1, rs2, vl); } -vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tu(maskedoff_tuple, base, bstride, vl); +vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_tu(vuint8m1x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_tu(vd, rs1, rs2, vl); } -vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tu(maskedoff_tuple, base, bstride, vl); +vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_tu(vuint8m2x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_tu(vd, rs1, rs2, vl); } -vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tu(maskedoff_tuple, base, bstride, vl); +vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_tu(vuint8m4x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_tu(vd, rs1, rs2, vl); } -vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_tum(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_tum(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_tum(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_tum(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vlsseg2e8_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8m1x2_t test_vlsseg2e8_v_i8m1x2_tum(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_tum(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vlsseg2e8_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8m2x2_t test_vlsseg2e8_v_i8m2x2_tum(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_tum(vm, vd, rs1, rs2, vl); } -vint8m4x2_t test_vlsseg2e8_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8m4x2_t test_vlsseg2e8_v_i8m4x2_tum(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_tum(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_tum(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_tum(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_tum(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_tum(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_tum(vm, vd, rs1, rs2, vl); } -vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_tum(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_tumu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_tumu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_tumu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vlsseg2e8_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8m1x2_t test_vlsseg2e8_v_i8m1x2_tumu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vlsseg2e8_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8m2x2_t test_vlsseg2e8_v_i8m2x2_tumu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_tumu(vm, vd, rs1, rs2, vl); } -vint8m4x2_t test_vlsseg2e8_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8m4x2_t test_vlsseg2e8_v_i8m4x2_tumu(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_tumu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_tumu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_tumu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_tumu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_tumu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_tumu(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_mu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_mu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_mu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_mu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vlsseg2e8_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8m1x2_t test_vlsseg2e8_v_i8m1x2_mu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_mu(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vlsseg2e8_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8m2x2_t test_vlsseg2e8_v_i8m2x2_mu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_mu(vm, vd, rs1, rs2, vl); } -vint8m4x2_t test_vlsseg2e8_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8m4x2_t test_vlsseg2e8_v_i8m4x2_mu(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_mu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_mu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_mu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_mu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_mu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_mu(vm, vd, rs1, rs2, vl); } -vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg2e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_mu(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg2e8\.[ivxfswum.]+\s+} 48 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg3e16.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg3e16.c index 4e766067f..74e195ce8 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg3e16.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg3e16.c @@ -6,196 +6,196 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tu(maskedoff_tuple, base, bstride, vl); +vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_tu(vfloat16mf4x3_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tu(vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tu(maskedoff_tuple, base, bstride, vl); +vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_tu(vfloat16mf2x3_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tu(vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tu(maskedoff_tuple, base, bstride, vl); +vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_tu(vfloat16m1x3_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tu(vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tu(maskedoff_tuple, base, bstride, vl); +vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_tu(vfloat16m2x3_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tu(vd, rs1, rs2, vl); } -vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tu(maskedoff_tuple, base, bstride, vl); +vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_tu(vint16mf4x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tu(vd, rs1, rs2, vl); } -vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tu(maskedoff_tuple, base, bstride, vl); +vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_tu(vint16mf2x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tu(vd, rs1, rs2, vl); } -vint16m1x3_t test_vlsseg3e16_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tu(maskedoff_tuple, base, bstride, vl); +vint16m1x3_t test_vlsseg3e16_v_i16m1x3_tu(vint16m1x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tu(vd, rs1, rs2, vl); } -vint16m2x3_t test_vlsseg3e16_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tu(maskedoff_tuple, base, bstride, vl); +vint16m2x3_t test_vlsseg3e16_v_i16m2x3_tu(vint16m2x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tu(vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tu(maskedoff_tuple, base, bstride, vl); +vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_tu(vuint16mf4x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tu(vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tu(maskedoff_tuple, base, bstride, vl); +vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_tu(vuint16mf2x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tu(vd, rs1, rs2, vl); } -vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tu(maskedoff_tuple, base, bstride, vl); +vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_tu(vuint16m1x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tu(vd, rs1, rs2, vl); } -vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tu(maskedoff_tuple, base, bstride, vl); +vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_tu(vuint16m2x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tu(vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_tum(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_tum(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_tum(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_tum(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_tum(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_tum(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tum(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vlsseg3e16_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16m1x3_t test_vlsseg3e16_v_i16m1x3_tum(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tum(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vlsseg3e16_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16m2x3_t test_vlsseg3e16_v_i16m2x3_tum(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_tum(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_tum(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_tum(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_tum(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_tumu(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_tumu(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_tumu(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_tumu(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_tumu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_tumu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vlsseg3e16_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16m1x3_t test_vlsseg3e16_v_i16m1x3_tumu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vlsseg3e16_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16m2x3_t test_vlsseg3e16_v_i16m2x3_tumu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_tumu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_tumu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_tumu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_tumu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_mu(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_mu(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_mu(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_mu(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_mu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_mu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_mu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vlsseg3e16_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16m1x3_t test_vlsseg3e16_v_i16m1x3_mu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_mu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vlsseg3e16_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16m2x3_t test_vlsseg3e16_v_i16m2x3_mu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_mu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_mu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_mu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_mu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg3e16\.[ivxfswum.]+\s+} 48 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg3e32.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg3e32.c index e6af2cce7..e54942255 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg3e32.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg3e32.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tu(maskedoff_tuple, base, bstride, vl); +vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_tu(vfloat32mf2x3_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_tu(vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tu(maskedoff_tuple, base, bstride, vl); +vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_tu(vfloat32m1x3_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_tu(vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tu(maskedoff_tuple, base, bstride, vl); +vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_tu(vfloat32m2x3_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_tu(vd, rs1, rs2, vl); } -vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tu(maskedoff_tuple, base, bstride, vl); +vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_tu(vint32mf2x3_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_tu(vd, rs1, rs2, vl); } -vint32m1x3_t test_vlsseg3e32_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tu(maskedoff_tuple, base, bstride, vl); +vint32m1x3_t test_vlsseg3e32_v_i32m1x3_tu(vint32m1x3_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_tu(vd, rs1, rs2, vl); } -vint32m2x3_t test_vlsseg3e32_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tu(maskedoff_tuple, base, bstride, vl); +vint32m2x3_t test_vlsseg3e32_v_i32m2x3_tu(vint32m2x3_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_tu(vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tu(maskedoff_tuple, base, bstride, vl); +vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_tu(vuint32mf2x3_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_tu(vd, rs1, rs2, vl); } -vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tu(maskedoff_tuple, base, bstride, vl); +vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_tu(vuint32m1x3_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_tu(vd, rs1, rs2, vl); } -vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tu(maskedoff_tuple, base, bstride, vl); +vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_tu(vuint32m2x3_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_tu(vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_tum(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_tum(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_tum(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_tum(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_tum(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vlsseg3e32_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vint32m1x3_t test_vlsseg3e32_v_i32m1x3_tum(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_tum(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vlsseg3e32_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vint32m2x3_t test_vlsseg3e32_v_i32m2x3_tum(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_tum(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_tum(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_tum(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_tumu(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_tumu(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_tumu(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_tumu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vlsseg3e32_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint32m1x3_t test_vlsseg3e32_v_i32m1x3_tumu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vlsseg3e32_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint32m2x3_t test_vlsseg3e32_v_i32m2x3_tumu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_tumu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_tumu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_tumu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_mu(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_mu(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_mu(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_mu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_mu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vlsseg3e32_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vint32m1x3_t test_vlsseg3e32_v_i32m1x3_mu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_mu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vlsseg3e32_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vint32m2x3_t test_vlsseg3e32_v_i32m2x3_mu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_mu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_mu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_mu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e32_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg3e32\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg3e64.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg3e64.c index a5fdb2a0c..bd775dc8e 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg3e64.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg3e64.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tu(maskedoff_tuple, base, bstride, vl); +vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_tu(vfloat64m1x3_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_tu(vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tu(maskedoff_tuple, base, bstride, vl); +vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_tu(vfloat64m2x3_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_tu(vd, rs1, rs2, vl); } -vint64m1x3_t test_vlsseg3e64_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tu(maskedoff_tuple, base, bstride, vl); +vint64m1x3_t test_vlsseg3e64_v_i64m1x3_tu(vint64m1x3_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_tu(vd, rs1, rs2, vl); } -vint64m2x3_t test_vlsseg3e64_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tu(maskedoff_tuple, base, bstride, vl); +vint64m2x3_t test_vlsseg3e64_v_i64m2x3_tu(vint64m2x3_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_tu(vd, rs1, rs2, vl); } -vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tu(maskedoff_tuple, base, bstride, vl); +vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_tu(vuint64m1x3_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_tu(vd, rs1, rs2, vl); } -vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tu(maskedoff_tuple, base, bstride, vl); +vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_tu(vuint64m2x3_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_tu(vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_tum(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_tum(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_tum(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vlsseg3e64_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tum(mask, maskedoff_tuple, base, bstride, vl); +vint64m1x3_t test_vlsseg3e64_v_i64m1x3_tum(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_tum(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vlsseg3e64_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tum(mask, maskedoff_tuple, base, bstride, vl); +vint64m2x3_t test_vlsseg3e64_v_i64m2x3_tum(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_tum(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_tum(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_tumu(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_tumu(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vlsseg3e64_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint64m1x3_t test_vlsseg3e64_v_i64m1x3_tumu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vlsseg3e64_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint64m2x3_t test_vlsseg3e64_v_i64m2x3_tumu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_tumu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_tumu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_mu(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_mu(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_mu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vlsseg3e64_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_mu(mask, maskedoff_tuple, base, bstride, vl); +vint64m1x3_t test_vlsseg3e64_v_i64m1x3_mu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_mu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vlsseg3e64_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_mu(mask, maskedoff_tuple, base, bstride, vl); +vint64m2x3_t test_vlsseg3e64_v_i64m2x3_mu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_mu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e64_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_mu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e64_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg3e64\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg3e8.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg3e8.c index 827a0f7db..d217cacc2 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg3e8.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg3e8.c @@ -6,164 +6,164 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tu(maskedoff_tuple, base, bstride, vl); +vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_tu(vint8mf8x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_tu(vd, rs1, rs2, vl); } -vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tu(maskedoff_tuple, base, bstride, vl); +vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_tu(vint8mf4x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_tu(vd, rs1, rs2, vl); } -vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tu(maskedoff_tuple, base, bstride, vl); +vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_tu(vint8mf2x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_tu(vd, rs1, rs2, vl); } -vint8m1x3_t test_vlsseg3e8_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tu(maskedoff_tuple, base, bstride, vl); +vint8m1x3_t test_vlsseg3e8_v_i8m1x3_tu(vint8m1x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_tu(vd, rs1, rs2, vl); } -vint8m2x3_t test_vlsseg3e8_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tu(maskedoff_tuple, base, bstride, vl); +vint8m2x3_t test_vlsseg3e8_v_i8m2x3_tu(vint8m2x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_tu(vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tu(maskedoff_tuple, base, bstride, vl); +vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_tu(vuint8mf8x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_tu(vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tu(maskedoff_tuple, base, bstride, vl); +vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_tu(vuint8mf4x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_tu(vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tu(maskedoff_tuple, base, bstride, vl); +vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_tu(vuint8mf2x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_tu(vd, rs1, rs2, vl); } -vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tu(maskedoff_tuple, base, bstride, vl); +vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_tu(vuint8m1x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_tu(vd, rs1, rs2, vl); } -vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tu(maskedoff_tuple, base, bstride, vl); +vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_tu(vuint8m2x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_tu(vd, rs1, rs2, vl); } -vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_tum(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_tum(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_tum(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_tum(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vlsseg3e8_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8m1x3_t test_vlsseg3e8_v_i8m1x3_tum(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_tum(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vlsseg3e8_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8m2x3_t test_vlsseg3e8_v_i8m2x3_tum(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_tum(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_tum(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_tum(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_tum(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_tum(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_tumu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_tumu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_tumu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vlsseg3e8_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8m1x3_t test_vlsseg3e8_v_i8m1x3_tumu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vlsseg3e8_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8m2x3_t test_vlsseg3e8_v_i8m2x3_tumu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_tumu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_tumu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_tumu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_tumu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_tumu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_mu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_mu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_mu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_mu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vlsseg3e8_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8m1x3_t test_vlsseg3e8_v_i8m1x3_mu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_mu(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vlsseg3e8_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8m2x3_t test_vlsseg3e8_v_i8m2x3_mu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_mu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_mu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_mu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_mu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg3e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_mu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg3e8\.[ivxfswum.]+\s+} 40 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg4e16.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg4e16.c index f16e7c426..c6a1e196d 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg4e16.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg4e16.c @@ -6,196 +6,196 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tu(maskedoff_tuple, base, bstride, vl); +vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_tu(vfloat16mf4x4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tu(vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tu(maskedoff_tuple, base, bstride, vl); +vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_tu(vfloat16mf2x4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tu(vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tu(maskedoff_tuple, base, bstride, vl); +vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_tu(vfloat16m1x4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tu(vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tu(maskedoff_tuple, base, bstride, vl); +vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_tu(vfloat16m2x4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tu(vd, rs1, rs2, vl); } -vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tu(maskedoff_tuple, base, bstride, vl); +vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_tu(vint16mf4x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tu(vd, rs1, rs2, vl); } -vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tu(maskedoff_tuple, base, bstride, vl); +vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_tu(vint16mf2x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tu(vd, rs1, rs2, vl); } -vint16m1x4_t test_vlsseg4e16_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tu(maskedoff_tuple, base, bstride, vl); +vint16m1x4_t test_vlsseg4e16_v_i16m1x4_tu(vint16m1x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tu(vd, rs1, rs2, vl); } -vint16m2x4_t test_vlsseg4e16_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tu(maskedoff_tuple, base, bstride, vl); +vint16m2x4_t test_vlsseg4e16_v_i16m2x4_tu(vint16m2x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tu(vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tu(maskedoff_tuple, base, bstride, vl); +vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_tu(vuint16mf4x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tu(vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tu(maskedoff_tuple, base, bstride, vl); +vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_tu(vuint16mf2x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tu(vd, rs1, rs2, vl); } -vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tu(maskedoff_tuple, base, bstride, vl); +vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_tu(vuint16m1x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tu(vd, rs1, rs2, vl); } -vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tu(maskedoff_tuple, base, bstride, vl); +vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_tu(vuint16m2x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tu(vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_tum(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_tum(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_tum(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_tum(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_tum(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_tum(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tum(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vlsseg4e16_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16m1x4_t test_vlsseg4e16_v_i16m1x4_tum(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tum(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vlsseg4e16_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16m2x4_t test_vlsseg4e16_v_i16m2x4_tum(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_tum(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_tum(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_tum(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_tum(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_tumu(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_tumu(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_tumu(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_tumu(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_tumu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_tumu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vlsseg4e16_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16m1x4_t test_vlsseg4e16_v_i16m1x4_tumu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vlsseg4e16_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16m2x4_t test_vlsseg4e16_v_i16m2x4_tumu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_tumu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_tumu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_tumu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_tumu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_mu(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_mu(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_mu(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_mu(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_mu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_mu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_mu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vlsseg4e16_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16m1x4_t test_vlsseg4e16_v_i16m1x4_mu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_mu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vlsseg4e16_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16m2x4_t test_vlsseg4e16_v_i16m2x4_mu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_mu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_mu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_mu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_mu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg4e16\.[ivxfswum.]+\s+} 48 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg4e32.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg4e32.c index 39638b0fe..5a9cd9214 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg4e32.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg4e32.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tu(maskedoff_tuple, base, bstride, vl); +vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_tu(vfloat32mf2x4_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_tu(vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tu(maskedoff_tuple, base, bstride, vl); +vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_tu(vfloat32m1x4_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_tu(vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tu(maskedoff_tuple, base, bstride, vl); +vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_tu(vfloat32m2x4_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_tu(vd, rs1, rs2, vl); } -vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tu(maskedoff_tuple, base, bstride, vl); +vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_tu(vint32mf2x4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_tu(vd, rs1, rs2, vl); } -vint32m1x4_t test_vlsseg4e32_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tu(maskedoff_tuple, base, bstride, vl); +vint32m1x4_t test_vlsseg4e32_v_i32m1x4_tu(vint32m1x4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_tu(vd, rs1, rs2, vl); } -vint32m2x4_t test_vlsseg4e32_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tu(maskedoff_tuple, base, bstride, vl); +vint32m2x4_t test_vlsseg4e32_v_i32m2x4_tu(vint32m2x4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_tu(vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tu(maskedoff_tuple, base, bstride, vl); +vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_tu(vuint32mf2x4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_tu(vd, rs1, rs2, vl); } -vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tu(maskedoff_tuple, base, bstride, vl); +vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_tu(vuint32m1x4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_tu(vd, rs1, rs2, vl); } -vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tu(maskedoff_tuple, base, bstride, vl); +vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_tu(vuint32m2x4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_tu(vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_tum(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_tum(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_tum(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_tum(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_tum(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vlsseg4e32_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vint32m1x4_t test_vlsseg4e32_v_i32m1x4_tum(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_tum(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vlsseg4e32_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vint32m2x4_t test_vlsseg4e32_v_i32m2x4_tum(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_tum(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_tum(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_tum(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_tumu(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_tumu(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_tumu(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_tumu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vlsseg4e32_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint32m1x4_t test_vlsseg4e32_v_i32m1x4_tumu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vlsseg4e32_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint32m2x4_t test_vlsseg4e32_v_i32m2x4_tumu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_tumu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_tumu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_tumu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_mu(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_mu(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_mu(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_mu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_mu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vlsseg4e32_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vint32m1x4_t test_vlsseg4e32_v_i32m1x4_mu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_mu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vlsseg4e32_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vint32m2x4_t test_vlsseg4e32_v_i32m2x4_mu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_mu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_mu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_mu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e32_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg4e32\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg4e64.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg4e64.c index 78baa6be9..08235baf6 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg4e64.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg4e64.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tu(maskedoff_tuple, base, bstride, vl); +vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_tu(vfloat64m1x4_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_tu(vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tu(maskedoff_tuple, base, bstride, vl); +vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_tu(vfloat64m2x4_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_tu(vd, rs1, rs2, vl); } -vint64m1x4_t test_vlsseg4e64_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tu(maskedoff_tuple, base, bstride, vl); +vint64m1x4_t test_vlsseg4e64_v_i64m1x4_tu(vint64m1x4_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_tu(vd, rs1, rs2, vl); } -vint64m2x4_t test_vlsseg4e64_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tu(maskedoff_tuple, base, bstride, vl); +vint64m2x4_t test_vlsseg4e64_v_i64m2x4_tu(vint64m2x4_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_tu(vd, rs1, rs2, vl); } -vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tu(maskedoff_tuple, base, bstride, vl); +vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_tu(vuint64m1x4_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_tu(vd, rs1, rs2, vl); } -vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tu(maskedoff_tuple, base, bstride, vl); +vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_tu(vuint64m2x4_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_tu(vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_tum(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_tum(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_tum(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vlsseg4e64_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tum(mask, maskedoff_tuple, base, bstride, vl); +vint64m1x4_t test_vlsseg4e64_v_i64m1x4_tum(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_tum(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vlsseg4e64_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tum(mask, maskedoff_tuple, base, bstride, vl); +vint64m2x4_t test_vlsseg4e64_v_i64m2x4_tum(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_tum(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_tum(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_tumu(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_tumu(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vlsseg4e64_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint64m1x4_t test_vlsseg4e64_v_i64m1x4_tumu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vlsseg4e64_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint64m2x4_t test_vlsseg4e64_v_i64m2x4_tumu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_tumu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_tumu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_mu(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_mu(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_mu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vlsseg4e64_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_mu(mask, maskedoff_tuple, base, bstride, vl); +vint64m1x4_t test_vlsseg4e64_v_i64m1x4_mu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_mu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vlsseg4e64_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_mu(mask, maskedoff_tuple, base, bstride, vl); +vint64m2x4_t test_vlsseg4e64_v_i64m2x4_mu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_mu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e64_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_mu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e64_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg4e64\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg4e8.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg4e8.c index f7f3f6915..b052c1aab 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg4e8.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg4e8.c @@ -6,164 +6,164 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tu(maskedoff_tuple, base, bstride, vl); +vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_tu(vint8mf8x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_tu(vd, rs1, rs2, vl); } -vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tu(maskedoff_tuple, base, bstride, vl); +vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_tu(vint8mf4x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_tu(vd, rs1, rs2, vl); } -vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tu(maskedoff_tuple, base, bstride, vl); +vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_tu(vint8mf2x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_tu(vd, rs1, rs2, vl); } -vint8m1x4_t test_vlsseg4e8_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tu(maskedoff_tuple, base, bstride, vl); +vint8m1x4_t test_vlsseg4e8_v_i8m1x4_tu(vint8m1x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_tu(vd, rs1, rs2, vl); } -vint8m2x4_t test_vlsseg4e8_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tu(maskedoff_tuple, base, bstride, vl); +vint8m2x4_t test_vlsseg4e8_v_i8m2x4_tu(vint8m2x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_tu(vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tu(maskedoff_tuple, base, bstride, vl); +vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_tu(vuint8mf8x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_tu(vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tu(maskedoff_tuple, base, bstride, vl); +vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_tu(vuint8mf4x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_tu(vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tu(maskedoff_tuple, base, bstride, vl); +vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_tu(vuint8mf2x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_tu(vd, rs1, rs2, vl); } -vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tu(maskedoff_tuple, base, bstride, vl); +vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_tu(vuint8m1x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_tu(vd, rs1, rs2, vl); } -vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tu(maskedoff_tuple, base, bstride, vl); +vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_tu(vuint8m2x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_tu(vd, rs1, rs2, vl); } -vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_tum(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_tum(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_tum(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_tum(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vlsseg4e8_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8m1x4_t test_vlsseg4e8_v_i8m1x4_tum(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_tum(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vlsseg4e8_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8m2x4_t test_vlsseg4e8_v_i8m2x4_tum(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_tum(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_tum(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_tum(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_tum(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_tum(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_tumu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_tumu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_tumu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vlsseg4e8_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8m1x4_t test_vlsseg4e8_v_i8m1x4_tumu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vlsseg4e8_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8m2x4_t test_vlsseg4e8_v_i8m2x4_tumu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_tumu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_tumu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_tumu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_tumu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_tumu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_mu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_mu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_mu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_mu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vlsseg4e8_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8m1x4_t test_vlsseg4e8_v_i8m1x4_mu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_mu(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vlsseg4e8_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8m2x4_t test_vlsseg4e8_v_i8m2x4_mu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_mu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_mu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_mu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_mu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg4e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_mu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg4e8\.[ivxfswum.]+\s+} 40 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg5e16.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg5e16.c index c40fb4068..41b8c9f51 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg5e16.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg5e16.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tu(maskedoff_tuple, base, bstride, vl); +vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_tu(vfloat16mf4x5_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_tu(vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tu(maskedoff_tuple, base, bstride, vl); +vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_tu(vfloat16mf2x5_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_tu(vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tu(maskedoff_tuple, base, bstride, vl); +vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_tu(vfloat16m1x5_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_tu(vd, rs1, rs2, vl); } -vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tu(maskedoff_tuple, base, bstride, vl); +vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_tu(vint16mf4x5_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_tu(vd, rs1, rs2, vl); } -vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tu(maskedoff_tuple, base, bstride, vl); +vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_tu(vint16mf2x5_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_tu(vd, rs1, rs2, vl); } -vint16m1x5_t test_vlsseg5e16_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tu(maskedoff_tuple, base, bstride, vl); +vint16m1x5_t test_vlsseg5e16_v_i16m1x5_tu(vint16m1x5_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_tu(vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tu(maskedoff_tuple, base, bstride, vl); +vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_tu(vuint16mf4x5_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_tu(vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tu(maskedoff_tuple, base, bstride, vl); +vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_tu(vuint16mf2x5_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_tu(vd, rs1, rs2, vl); } -vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tu(maskedoff_tuple, base, bstride, vl); +vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_tu(vuint16m1x5_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_tu(vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_tum(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_tum(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_tum(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_tum(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_tum(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_tum(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vlsseg5e16_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16m1x5_t test_vlsseg5e16_v_i16m1x5_tum(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_tum(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_tum(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_tum(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_tumu(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_tumu(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_tumu(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_tumu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_tumu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vlsseg5e16_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16m1x5_t test_vlsseg5e16_v_i16m1x5_tumu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_tumu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_tumu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_tumu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_mu(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_mu(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_mu(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_mu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_mu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_mu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vlsseg5e16_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16m1x5_t test_vlsseg5e16_v_i16m1x5_mu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_mu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_mu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_mu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg5e16\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg5e32.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg5e32.c index 32d1b4eb2..2c2d61fec 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg5e32.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg5e32.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tu(maskedoff_tuple, base, bstride, vl); +vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_tu(vfloat32mf2x5_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_tu(vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tu(maskedoff_tuple, base, bstride, vl); +vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_tu(vfloat32m1x5_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_tu(vd, rs1, rs2, vl); } -vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tu(maskedoff_tuple, base, bstride, vl); +vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_tu(vint32mf2x5_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_tu(vd, rs1, rs2, vl); } -vint32m1x5_t test_vlsseg5e32_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tu(maskedoff_tuple, base, bstride, vl); +vint32m1x5_t test_vlsseg5e32_v_i32m1x5_tu(vint32m1x5_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_tu(vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tu(maskedoff_tuple, base, bstride, vl); +vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_tu(vuint32mf2x5_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_tu(vd, rs1, rs2, vl); } -vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tu(maskedoff_tuple, base, bstride, vl); +vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_tu(vuint32m1x5_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_tu(vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_tum(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_tum(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_tum(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_tum(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vlsseg5e32_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vint32m1x5_t test_vlsseg5e32_v_i32m1x5_tum(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_tum(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_tum(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_tumu(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_tumu(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_tumu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vlsseg5e32_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint32m1x5_t test_vlsseg5e32_v_i32m1x5_tumu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_tumu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_tumu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_mu(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_mu(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_mu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_mu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vlsseg5e32_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vint32m1x5_t test_vlsseg5e32_v_i32m1x5_mu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_mu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_mu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e32_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg5e32\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg5e64.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg5e64.c index 5d7538fae..f25d7d749 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg5e64.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg5e64.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_tu(maskedoff_tuple, base, bstride, vl); +vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_tu(vfloat64m1x5_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e64_tu(vd, rs1, rs2, vl); } -vint64m1x5_t test_vlsseg5e64_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_tu(maskedoff_tuple, base, bstride, vl); +vint64m1x5_t test_vlsseg5e64_v_i64m1x5_tu(vint64m1x5_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e64_tu(vd, rs1, rs2, vl); } -vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_tu(maskedoff_tuple, base, bstride, vl); +vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_tu(vuint64m1x5_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e64_tu(vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_tum(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e64_tum(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vlsseg5e64_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_tum(mask, maskedoff_tuple, base, bstride, vl); +vint64m1x5_t test_vlsseg5e64_v_i64m1x5_tum(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e64_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_tum(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e64_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_tumu(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e64_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vlsseg5e64_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint64m1x5_t test_vlsseg5e64_v_i64m1x5_tumu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e64_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_tumu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e64_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_mu(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e64_mu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vlsseg5e64_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_mu(mask, maskedoff_tuple, base, bstride, vl); +vint64m1x5_t test_vlsseg5e64_v_i64m1x5_mu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e64_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e64_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_mu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e64_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg5e64\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg5e8.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg5e8.c index f3577bbfb..97290b46c 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg5e8.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg5e8.c @@ -6,132 +6,132 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tu(maskedoff_tuple, base, bstride, vl); +vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_tu(vint8mf8x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_tu(vd, rs1, rs2, vl); } -vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tu(maskedoff_tuple, base, bstride, vl); +vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_tu(vint8mf4x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_tu(vd, rs1, rs2, vl); } -vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tu(maskedoff_tuple, base, bstride, vl); +vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_tu(vint8mf2x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_tu(vd, rs1, rs2, vl); } -vint8m1x5_t test_vlsseg5e8_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tu(maskedoff_tuple, base, bstride, vl); +vint8m1x5_t test_vlsseg5e8_v_i8m1x5_tu(vint8m1x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_tu(vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tu(maskedoff_tuple, base, bstride, vl); +vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_tu(vuint8mf8x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_tu(vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tu(maskedoff_tuple, base, bstride, vl); +vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_tu(vuint8mf4x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_tu(vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tu(maskedoff_tuple, base, bstride, vl); +vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_tu(vuint8mf2x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_tu(vd, rs1, rs2, vl); } -vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tu(maskedoff_tuple, base, bstride, vl); +vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_tu(vuint8m1x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_tu(vd, rs1, rs2, vl); } -vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_tum(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_tum(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_tum(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_tum(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vlsseg5e8_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8m1x5_t test_vlsseg5e8_v_i8m1x5_tum(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_tum(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_tum(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_tum(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_tum(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_tumu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_tumu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_tumu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vlsseg5e8_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8m1x5_t test_vlsseg5e8_v_i8m1x5_tumu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_tumu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_tumu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_tumu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_tumu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_mu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_mu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_mu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_mu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vlsseg5e8_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8m1x5_t test_vlsseg5e8_v_i8m1x5_mu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_mu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_mu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_mu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg5e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_mu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg5e8\.[ivxfswum.]+\s+} 32 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg6e16.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg6e16.c index fb93b070c..2924f1785 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg6e16.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg6e16.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tu(maskedoff_tuple, base, bstride, vl); +vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_tu(vfloat16mf4x6_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_tu(vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tu(maskedoff_tuple, base, bstride, vl); +vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_tu(vfloat16mf2x6_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_tu(vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tu(maskedoff_tuple, base, bstride, vl); +vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_tu(vfloat16m1x6_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_tu(vd, rs1, rs2, vl); } -vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tu(maskedoff_tuple, base, bstride, vl); +vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_tu(vint16mf4x6_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_tu(vd, rs1, rs2, vl); } -vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tu(maskedoff_tuple, base, bstride, vl); +vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_tu(vint16mf2x6_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_tu(vd, rs1, rs2, vl); } -vint16m1x6_t test_vlsseg6e16_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tu(maskedoff_tuple, base, bstride, vl); +vint16m1x6_t test_vlsseg6e16_v_i16m1x6_tu(vint16m1x6_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_tu(vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tu(maskedoff_tuple, base, bstride, vl); +vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_tu(vuint16mf4x6_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_tu(vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tu(maskedoff_tuple, base, bstride, vl); +vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_tu(vuint16mf2x6_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_tu(vd, rs1, rs2, vl); } -vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tu(maskedoff_tuple, base, bstride, vl); +vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_tu(vuint16m1x6_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_tu(vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_tum(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_tum(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_tum(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_tum(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_tum(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_tum(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vlsseg6e16_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16m1x6_t test_vlsseg6e16_v_i16m1x6_tum(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_tum(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_tum(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_tum(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_tumu(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_tumu(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_tumu(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_tumu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_tumu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vlsseg6e16_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16m1x6_t test_vlsseg6e16_v_i16m1x6_tumu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_tumu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_tumu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_tumu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_mu(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_mu(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_mu(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_mu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_mu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_mu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vlsseg6e16_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16m1x6_t test_vlsseg6e16_v_i16m1x6_mu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_mu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_mu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_mu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg6e16\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg6e32.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg6e32.c index 80c21d0b3..221b0950f 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg6e32.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg6e32.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tu(maskedoff_tuple, base, bstride, vl); +vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_tu(vfloat32mf2x6_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_tu(vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tu(maskedoff_tuple, base, bstride, vl); +vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_tu(vfloat32m1x6_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_tu(vd, rs1, rs2, vl); } -vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tu(maskedoff_tuple, base, bstride, vl); +vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_tu(vint32mf2x6_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_tu(vd, rs1, rs2, vl); } -vint32m1x6_t test_vlsseg6e32_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tu(maskedoff_tuple, base, bstride, vl); +vint32m1x6_t test_vlsseg6e32_v_i32m1x6_tu(vint32m1x6_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_tu(vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tu(maskedoff_tuple, base, bstride, vl); +vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_tu(vuint32mf2x6_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_tu(vd, rs1, rs2, vl); } -vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tu(maskedoff_tuple, base, bstride, vl); +vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_tu(vuint32m1x6_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_tu(vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_tum(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_tum(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_tum(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_tum(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vlsseg6e32_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vint32m1x6_t test_vlsseg6e32_v_i32m1x6_tum(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_tum(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_tum(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_tumu(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_tumu(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_tumu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vlsseg6e32_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint32m1x6_t test_vlsseg6e32_v_i32m1x6_tumu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_tumu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_tumu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_mu(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_mu(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_mu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_mu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vlsseg6e32_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vint32m1x6_t test_vlsseg6e32_v_i32m1x6_mu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_mu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_mu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e32_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg6e32\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg6e64.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg6e64.c index 246abc05f..29ed262b3 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg6e64.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg6e64.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_tu(maskedoff_tuple, base, bstride, vl); +vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_tu(vfloat64m1x6_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e64_tu(vd, rs1, rs2, vl); } -vint64m1x6_t test_vlsseg6e64_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_tu(maskedoff_tuple, base, bstride, vl); +vint64m1x6_t test_vlsseg6e64_v_i64m1x6_tu(vint64m1x6_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e64_tu(vd, rs1, rs2, vl); } -vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_tu(maskedoff_tuple, base, bstride, vl); +vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_tu(vuint64m1x6_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e64_tu(vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_tum(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e64_tum(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vlsseg6e64_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_tum(mask, maskedoff_tuple, base, bstride, vl); +vint64m1x6_t test_vlsseg6e64_v_i64m1x6_tum(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e64_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_tum(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e64_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_tumu(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e64_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vlsseg6e64_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint64m1x6_t test_vlsseg6e64_v_i64m1x6_tumu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e64_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_tumu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e64_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_mu(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e64_mu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vlsseg6e64_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_mu(mask, maskedoff_tuple, base, bstride, vl); +vint64m1x6_t test_vlsseg6e64_v_i64m1x6_mu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e64_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e64_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_mu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e64_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg6e64\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg6e8.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg6e8.c index da97f91ad..bea0b6d4e 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg6e8.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg6e8.c @@ -6,132 +6,132 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tu(maskedoff_tuple, base, bstride, vl); +vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_tu(vint8mf8x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_tu(vd, rs1, rs2, vl); } -vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tu(maskedoff_tuple, base, bstride, vl); +vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_tu(vint8mf4x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_tu(vd, rs1, rs2, vl); } -vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tu(maskedoff_tuple, base, bstride, vl); +vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_tu(vint8mf2x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_tu(vd, rs1, rs2, vl); } -vint8m1x6_t test_vlsseg6e8_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tu(maskedoff_tuple, base, bstride, vl); +vint8m1x6_t test_vlsseg6e8_v_i8m1x6_tu(vint8m1x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_tu(vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tu(maskedoff_tuple, base, bstride, vl); +vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_tu(vuint8mf8x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_tu(vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tu(maskedoff_tuple, base, bstride, vl); +vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_tu(vuint8mf4x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_tu(vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tu(maskedoff_tuple, base, bstride, vl); +vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_tu(vuint8mf2x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_tu(vd, rs1, rs2, vl); } -vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tu(maskedoff_tuple, base, bstride, vl); +vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_tu(vuint8m1x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_tu(vd, rs1, rs2, vl); } -vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_tum(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_tum(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_tum(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_tum(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vlsseg6e8_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8m1x6_t test_vlsseg6e8_v_i8m1x6_tum(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_tum(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_tum(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_tum(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_tum(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_tumu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_tumu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_tumu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vlsseg6e8_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8m1x6_t test_vlsseg6e8_v_i8m1x6_tumu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_tumu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_tumu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_tumu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_tumu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_mu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_mu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_mu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_mu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vlsseg6e8_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8m1x6_t test_vlsseg6e8_v_i8m1x6_mu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_mu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_mu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_mu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg6e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_mu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg6e8\.[ivxfswum.]+\s+} 32 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg7e16.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg7e16.c index 12c4defdc..5c2f0eb37 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg7e16.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg7e16.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tu(maskedoff_tuple, base, bstride, vl); +vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_tu(vfloat16mf4x7_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_tu(vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tu(maskedoff_tuple, base, bstride, vl); +vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_tu(vfloat16mf2x7_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_tu(vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tu(maskedoff_tuple, base, bstride, vl); +vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_tu(vfloat16m1x7_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_tu(vd, rs1, rs2, vl); } -vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tu(maskedoff_tuple, base, bstride, vl); +vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_tu(vint16mf4x7_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_tu(vd, rs1, rs2, vl); } -vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tu(maskedoff_tuple, base, bstride, vl); +vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_tu(vint16mf2x7_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_tu(vd, rs1, rs2, vl); } -vint16m1x7_t test_vlsseg7e16_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tu(maskedoff_tuple, base, bstride, vl); +vint16m1x7_t test_vlsseg7e16_v_i16m1x7_tu(vint16m1x7_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_tu(vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tu(maskedoff_tuple, base, bstride, vl); +vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_tu(vuint16mf4x7_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_tu(vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tu(maskedoff_tuple, base, bstride, vl); +vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_tu(vuint16mf2x7_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_tu(vd, rs1, rs2, vl); } -vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tu(maskedoff_tuple, base, bstride, vl); +vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_tu(vuint16m1x7_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_tu(vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_tum(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_tum(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_tum(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_tum(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_tum(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_tum(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vlsseg7e16_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16m1x7_t test_vlsseg7e16_v_i16m1x7_tum(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_tum(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_tum(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_tum(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_tumu(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_tumu(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_tumu(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_tumu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_tumu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vlsseg7e16_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16m1x7_t test_vlsseg7e16_v_i16m1x7_tumu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_tumu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_tumu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_tumu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_mu(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_mu(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_mu(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_mu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_mu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_mu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vlsseg7e16_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16m1x7_t test_vlsseg7e16_v_i16m1x7_mu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_mu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_mu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_mu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg7e16\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg7e32.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg7e32.c index 7b482accd..3e4b14c08 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg7e32.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg7e32.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tu(maskedoff_tuple, base, bstride, vl); +vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_tu(vfloat32mf2x7_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_tu(vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tu(maskedoff_tuple, base, bstride, vl); +vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_tu(vfloat32m1x7_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_tu(vd, rs1, rs2, vl); } -vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tu(maskedoff_tuple, base, bstride, vl); +vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_tu(vint32mf2x7_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_tu(vd, rs1, rs2, vl); } -vint32m1x7_t test_vlsseg7e32_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tu(maskedoff_tuple, base, bstride, vl); +vint32m1x7_t test_vlsseg7e32_v_i32m1x7_tu(vint32m1x7_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_tu(vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tu(maskedoff_tuple, base, bstride, vl); +vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_tu(vuint32mf2x7_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_tu(vd, rs1, rs2, vl); } -vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tu(maskedoff_tuple, base, bstride, vl); +vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_tu(vuint32m1x7_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_tu(vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_tum(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_tum(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_tum(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_tum(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vlsseg7e32_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vint32m1x7_t test_vlsseg7e32_v_i32m1x7_tum(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_tum(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_tum(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_tumu(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_tumu(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_tumu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vlsseg7e32_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint32m1x7_t test_vlsseg7e32_v_i32m1x7_tumu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_tumu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_tumu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_mu(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_mu(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_mu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_mu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vlsseg7e32_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vint32m1x7_t test_vlsseg7e32_v_i32m1x7_mu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_mu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_mu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e32_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg7e32\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg7e64.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg7e64.c index 961dcefd7..4bedb45d7 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg7e64.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg7e64.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_tu(maskedoff_tuple, base, bstride, vl); +vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_tu(vfloat64m1x7_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e64_tu(vd, rs1, rs2, vl); } -vint64m1x7_t test_vlsseg7e64_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_tu(maskedoff_tuple, base, bstride, vl); +vint64m1x7_t test_vlsseg7e64_v_i64m1x7_tu(vint64m1x7_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e64_tu(vd, rs1, rs2, vl); } -vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_tu(maskedoff_tuple, base, bstride, vl); +vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_tu(vuint64m1x7_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e64_tu(vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_tum(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e64_tum(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vlsseg7e64_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_tum(mask, maskedoff_tuple, base, bstride, vl); +vint64m1x7_t test_vlsseg7e64_v_i64m1x7_tum(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e64_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_tum(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e64_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_tumu(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e64_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vlsseg7e64_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint64m1x7_t test_vlsseg7e64_v_i64m1x7_tumu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e64_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_tumu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e64_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_mu(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e64_mu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vlsseg7e64_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_mu(mask, maskedoff_tuple, base, bstride, vl); +vint64m1x7_t test_vlsseg7e64_v_i64m1x7_mu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e64_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e64_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_mu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e64_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg7e64\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg7e8.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg7e8.c index 9ffc76b8b..e5894c9f8 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg7e8.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg7e8.c @@ -6,132 +6,132 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tu(maskedoff_tuple, base, bstride, vl); +vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_tu(vint8mf8x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_tu(vd, rs1, rs2, vl); } -vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tu(maskedoff_tuple, base, bstride, vl); +vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_tu(vint8mf4x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_tu(vd, rs1, rs2, vl); } -vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tu(maskedoff_tuple, base, bstride, vl); +vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_tu(vint8mf2x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_tu(vd, rs1, rs2, vl); } -vint8m1x7_t test_vlsseg7e8_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tu(maskedoff_tuple, base, bstride, vl); +vint8m1x7_t test_vlsseg7e8_v_i8m1x7_tu(vint8m1x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_tu(vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tu(maskedoff_tuple, base, bstride, vl); +vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_tu(vuint8mf8x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_tu(vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tu(maskedoff_tuple, base, bstride, vl); +vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_tu(vuint8mf4x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_tu(vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tu(maskedoff_tuple, base, bstride, vl); +vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_tu(vuint8mf2x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_tu(vd, rs1, rs2, vl); } -vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tu(maskedoff_tuple, base, bstride, vl); +vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_tu(vuint8m1x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_tu(vd, rs1, rs2, vl); } -vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_tum(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_tum(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_tum(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_tum(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vlsseg7e8_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8m1x7_t test_vlsseg7e8_v_i8m1x7_tum(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_tum(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_tum(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_tum(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_tum(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_tumu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_tumu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_tumu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vlsseg7e8_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8m1x7_t test_vlsseg7e8_v_i8m1x7_tumu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_tumu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_tumu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_tumu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_tumu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_mu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_mu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_mu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_mu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vlsseg7e8_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8m1x7_t test_vlsseg7e8_v_i8m1x7_mu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_mu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_mu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_mu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg7e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_mu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg7e8\.[ivxfswum.]+\s+} 32 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg8e16.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg8e16.c index 985c753ef..04c74c62e 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg8e16.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg8e16.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tu(maskedoff_tuple, base, bstride, vl); +vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_tu(vfloat16mf4x8_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_tu(vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tu(maskedoff_tuple, base, bstride, vl); +vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_tu(vfloat16mf2x8_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_tu(vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tu(maskedoff_tuple, base, bstride, vl); +vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_tu(vfloat16m1x8_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_tu(vd, rs1, rs2, vl); } -vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tu(maskedoff_tuple, base, bstride, vl); +vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_tu(vint16mf4x8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_tu(vd, rs1, rs2, vl); } -vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tu(maskedoff_tuple, base, bstride, vl); +vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_tu(vint16mf2x8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_tu(vd, rs1, rs2, vl); } -vint16m1x8_t test_vlsseg8e16_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tu(maskedoff_tuple, base, bstride, vl); +vint16m1x8_t test_vlsseg8e16_v_i16m1x8_tu(vint16m1x8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_tu(vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tu(maskedoff_tuple, base, bstride, vl); +vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_tu(vuint16mf4x8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_tu(vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tu(maskedoff_tuple, base, bstride, vl); +vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_tu(vuint16mf2x8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_tu(vd, rs1, rs2, vl); } -vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tu(maskedoff_tuple, base, bstride, vl); +vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_tu(vuint16m1x8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_tu(vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_tum(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_tum(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_tum(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_tum(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_tum(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_tum(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vlsseg8e16_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vint16m1x8_t test_vlsseg8e16_v_i16m1x8_tum(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_tum(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_tum(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_tum(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_tumu(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_tumu(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_tumu(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_tumu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_tumu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vlsseg8e16_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint16m1x8_t test_vlsseg8e16_v_i16m1x8_tumu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_tumu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_tumu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_tumu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_mu(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_mu(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_mu(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_mu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_mu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_mu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vlsseg8e16_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vint16m1x8_t test_vlsseg8e16_v_i16m1x8_mu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_mu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_mu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e16_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_mu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg8e16\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg8e32.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg8e32.c index 8a7094474..765fcdd4d 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg8e32.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg8e32.c @@ -6,100 +6,100 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tu(maskedoff_tuple, base, bstride, vl); +vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_tu(vfloat32mf2x8_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_tu(vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tu(maskedoff_tuple, base, bstride, vl); +vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_tu(vfloat32m1x8_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_tu(vd, rs1, rs2, vl); } -vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tu(maskedoff_tuple, base, bstride, vl); +vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_tu(vint32mf2x8_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_tu(vd, rs1, rs2, vl); } -vint32m1x8_t test_vlsseg8e32_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tu(maskedoff_tuple, base, bstride, vl); +vint32m1x8_t test_vlsseg8e32_v_i32m1x8_tu(vint32m1x8_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_tu(vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tu(maskedoff_tuple, base, bstride, vl); +vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_tu(vuint32mf2x8_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_tu(vd, rs1, rs2, vl); } -vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tu(maskedoff_tuple, base, bstride, vl); +vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_tu(vuint32m1x8_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_tu(vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_tum(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_tum(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_tum(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_tum(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vlsseg8e32_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vint32m1x8_t test_vlsseg8e32_v_i32m1x8_tum(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_tum(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_tum(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_tumu(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_tumu(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_tumu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vlsseg8e32_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint32m1x8_t test_vlsseg8e32_v_i32m1x8_tumu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_tumu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_tumu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_mu(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_mu(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_mu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_mu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vlsseg8e32_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vint32m1x8_t test_vlsseg8e32_v_i32m1x8_mu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_mu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e32_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_mu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e32_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg8e32\.[ivxfswum.]+\s+} 24 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg8e64.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg8e64.c index bb7926715..38628714a 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg8e64.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg8e64.c @@ -6,52 +6,52 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_tu(maskedoff_tuple, base, bstride, vl); +vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_tu(vfloat64m1x8_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e64_tu(vd, rs1, rs2, vl); } -vint64m1x8_t test_vlsseg8e64_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_tu(maskedoff_tuple, base, bstride, vl); +vint64m1x8_t test_vlsseg8e64_v_i64m1x8_tu(vint64m1x8_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e64_tu(vd, rs1, rs2, vl); } -vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_tu(maskedoff_tuple, base, bstride, vl); +vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_tu(vuint64m1x8_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e64_tu(vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_tum(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_tum(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e64_tum(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vlsseg8e64_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_tum(mask, maskedoff_tuple, base, bstride, vl); +vint64m1x8_t test_vlsseg8e64_v_i64m1x8_tum(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e64_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_tum(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e64_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_tumu(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_tumu(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e64_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vlsseg8e64_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint64m1x8_t test_vlsseg8e64_v_i64m1x8_tumu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e64_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_tumu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e64_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_mu(mask, maskedoff_tuple, base, bstride, vl); +vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_mu(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e64_mu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vlsseg8e64_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_mu(mask, maskedoff_tuple, base, bstride, vl); +vint64m1x8_t test_vlsseg8e64_v_i64m1x8_mu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e64_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e64_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_mu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e64_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg8e64\.[ivxfswum.]+\s+} 12 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg8e8.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg8e8.c index 898bd4dc0..0cef863c1 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg8e8.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vlsseg8e8.c @@ -6,132 +6,132 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tu(maskedoff_tuple, base, bstride, vl); +vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_tu(vint8mf8x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_tu(vd, rs1, rs2, vl); } -vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tu(maskedoff_tuple, base, bstride, vl); +vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_tu(vint8mf4x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_tu(vd, rs1, rs2, vl); } -vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tu(maskedoff_tuple, base, bstride, vl); +vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_tu(vint8mf2x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_tu(vd, rs1, rs2, vl); } -vint8m1x8_t test_vlsseg8e8_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tu(maskedoff_tuple, base, bstride, vl); +vint8m1x8_t test_vlsseg8e8_v_i8m1x8_tu(vint8m1x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_tu(vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tu(maskedoff_tuple, base, bstride, vl); +vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_tu(vuint8mf8x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_tu(vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tu(maskedoff_tuple, base, bstride, vl); +vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_tu(vuint8mf4x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_tu(vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tu(maskedoff_tuple, base, bstride, vl); +vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_tu(vuint8mf2x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_tu(vd, rs1, rs2, vl); } -vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tu(maskedoff_tuple, base, bstride, vl); +vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_tu(vuint8m1x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_tu(vd, rs1, rs2, vl); } -vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_tum(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_tum(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_tum(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_tum(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vlsseg8e8_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vint8m1x8_t test_vlsseg8e8_v_i8m1x8_tum(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_tum(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_tum(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_tum(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tum(mask, maskedoff_tuple, base, bstride, vl); +vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_tum(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_tumu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_tumu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_tumu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vlsseg8e8_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vint8m1x8_t test_vlsseg8e8_v_i8m1x8_tumu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_tumu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_tumu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_tumu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_tumu(mask, maskedoff_tuple, base, bstride, vl); +vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_tumu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_mu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_mu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_mu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_mu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vlsseg8e8_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vint8m1x8_t test_vlsseg8e8_v_i8m1x8_mu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_mu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_mu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_mu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return __riscv_vlsseg8e8_mu(mask, maskedoff_tuple, base, bstride, vl); +vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_mu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vlsseg8e8\.[ivxfswum.]+\s+} 32 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxei16.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxei16.c index 8f32fb07b..19b6e6341 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxei16.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxei16.c @@ -6,916 +6,916 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vluxei16_v_f16mf4_tu(vfloat16mf4_t maskedoff, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vfloat16mf4_t test_vluxei16_v_f16mf4_tu(vfloat16mf4_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei16_v_f16mf2_tu(vfloat16mf2_t maskedoff, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vfloat16mf2_t test_vluxei16_v_f16mf2_tu(vfloat16mf2_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei16_v_f16m1_tu(vfloat16m1_t maskedoff, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vfloat16m1_t test_vluxei16_v_f16m1_tu(vfloat16m1_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei16_v_f16m2_tu(vfloat16m2_t maskedoff, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vfloat16m2_t test_vluxei16_v_f16m2_tu(vfloat16m2_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vfloat16m4_t test_vluxei16_v_f16m4_tu(vfloat16m4_t maskedoff, const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vfloat16m4_t test_vluxei16_v_f16m4_tu(vfloat16m4_t vd, const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vfloat16m8_t test_vluxei16_v_f16m8_tu(vfloat16m8_t maskedoff, const float16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vfloat16m8_t test_vluxei16_v_f16m8_tu(vfloat16m8_t vd, const float16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei16_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vfloat32mf2_t test_vluxei16_v_f32mf2_tu(vfloat32mf2_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei16_v_f32m1_tu(vfloat32m1_t maskedoff, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vfloat32m1_t test_vluxei16_v_f32m1_tu(vfloat32m1_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei16_v_f32m2_tu(vfloat32m2_t maskedoff, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vfloat32m2_t test_vluxei16_v_f32m2_tu(vfloat32m2_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei16_v_f32m4_tu(vfloat32m4_t maskedoff, const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vfloat32m4_t test_vluxei16_v_f32m4_tu(vfloat32m4_t vd, const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vfloat32m8_t test_vluxei16_v_f32m8_tu(vfloat32m8_t maskedoff, const float32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vfloat32m8_t test_vluxei16_v_f32m8_tu(vfloat32m8_t vd, const float32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei16_v_f64m1_tu(vfloat64m1_t maskedoff, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vfloat64m1_t test_vluxei16_v_f64m1_tu(vfloat64m1_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei16_v_f64m2_tu(vfloat64m2_t maskedoff, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vfloat64m2_t test_vluxei16_v_f64m2_tu(vfloat64m2_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei16_v_f64m4_tu(vfloat64m4_t maskedoff, const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vfloat64m4_t test_vluxei16_v_f64m4_tu(vfloat64m4_t vd, const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei16_v_f64m8_tu(vfloat64m8_t maskedoff, const float64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vfloat64m8_t test_vluxei16_v_f64m8_tu(vfloat64m8_t vd, const float64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei16_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vint8mf8_t test_vluxei16_v_i8mf8_tu(vint8mf8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei16_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vint8mf4_t test_vluxei16_v_i8mf4_tu(vint8mf4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei16_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vint8mf2_t test_vluxei16_v_i8mf2_tu(vint8mf2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vint8m1_t test_vluxei16_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vint8m1_t test_vluxei16_v_i8m1_tu(vint8m1_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vint8m2_t test_vluxei16_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vint8m2_t test_vluxei16_v_i8m2_tu(vint8m2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vint8m4_t test_vluxei16_v_i8m4_tu(vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vint8m4_t test_vluxei16_v_i8m4_tu(vint8m4_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei16_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vint16mf4_t test_vluxei16_v_i16mf4_tu(vint16mf4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei16_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vint16mf2_t test_vluxei16_v_i16mf2_tu(vint16mf2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vint16m1_t test_vluxei16_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vint16m1_t test_vluxei16_v_i16m1_tu(vint16m1_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vint16m2_t test_vluxei16_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vint16m2_t test_vluxei16_v_i16m2_tu(vint16m2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vint16m4_t test_vluxei16_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vint16m4_t test_vluxei16_v_i16m4_tu(vint16m4_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vint16m8_t test_vluxei16_v_i16m8_tu(vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vint16m8_t test_vluxei16_v_i16m8_tu(vint16m8_t vd, const int16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei16_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vint32mf2_t test_vluxei16_v_i32mf2_tu(vint32mf2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vint32m1_t test_vluxei16_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vint32m1_t test_vluxei16_v_i32m1_tu(vint32m1_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vint32m2_t test_vluxei16_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vint32m2_t test_vluxei16_v_i32m2_tu(vint32m2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vint32m4_t test_vluxei16_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vint32m4_t test_vluxei16_v_i32m4_tu(vint32m4_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vint32m8_t test_vluxei16_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vint32m8_t test_vluxei16_v_i32m8_tu(vint32m8_t vd, const int32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vint64m1_t test_vluxei16_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vint64m1_t test_vluxei16_v_i64m1_tu(vint64m1_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vint64m2_t test_vluxei16_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vint64m2_t test_vluxei16_v_i64m2_tu(vint64m2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vint64m4_t test_vluxei16_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vint64m4_t test_vluxei16_v_i64m4_tu(vint64m4_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vint64m8_t test_vluxei16_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vint64m8_t test_vluxei16_v_i64m8_tu(vint64m8_t vd, const int64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei16_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vuint8mf8_t test_vluxei16_v_u8mf8_tu(vuint8mf8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei16_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vuint8mf4_t test_vluxei16_v_u8mf4_tu(vuint8mf4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei16_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vuint8mf2_t test_vluxei16_v_u8mf2_tu(vuint8mf2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei16_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vuint8m1_t test_vluxei16_v_u8m1_tu(vuint8m1_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vuint8m2_t test_vluxei16_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vuint8m2_t test_vluxei16_v_u8m2_tu(vuint8m2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vuint8m4_t test_vluxei16_v_u8m4_tu(vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vuint8m4_t test_vluxei16_v_u8m4_tu(vuint8m4_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei16_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vuint16mf4_t test_vluxei16_v_u16mf4_tu(vuint16mf4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei16_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vuint16mf2_t test_vluxei16_v_u16mf2_tu(vuint16mf2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei16_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vuint16m1_t test_vluxei16_v_u16m1_tu(vuint16m1_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei16_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vuint16m2_t test_vluxei16_v_u16m2_tu(vuint16m2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vuint16m4_t test_vluxei16_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vuint16m4_t test_vluxei16_v_u16m4_tu(vuint16m4_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vuint16m8_t test_vluxei16_v_u16m8_tu(vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vuint16m8_t test_vluxei16_v_u16m8_tu(vuint16m8_t vd, const uint16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei16_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vuint32mf2_t test_vluxei16_v_u32mf2_tu(vuint32mf2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei16_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vuint32m1_t test_vluxei16_v_u32m1_tu(vuint32m1_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei16_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vuint32m2_t test_vluxei16_v_u32m2_tu(vuint32m2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei16_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vuint32m4_t test_vluxei16_v_u32m4_tu(vuint32m4_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vuint32m8_t test_vluxei16_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vuint32m8_t test_vluxei16_v_u32m8_tu(vuint32m8_t vd, const uint32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei16_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vuint64m1_t test_vluxei16_v_u64m1_tu(vuint64m1_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei16_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vuint64m2_t test_vluxei16_v_u64m2_tu(vuint64m2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei16_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vuint64m4_t test_vluxei16_v_u64m4_tu(vuint64m4_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei16_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_tu(maskedoff, base, bindex, vl); +vuint64m8_t test_vluxei16_v_u64m8_tu(vuint64m8_t vd, const uint64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); } -vfloat16mf4_t test_vluxei16_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vluxei16_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei16_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vluxei16_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei16_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vluxei16_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei16_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vluxei16_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vluxei16_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vluxei16_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16m8_t test_vluxei16_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, const float16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vfloat16m8_t test_vluxei16_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, const float16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei16_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vluxei16_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei16_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vluxei16_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei16_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vluxei16_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei16_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vluxei16_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vluxei16_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vluxei16_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei16_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vluxei16_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei16_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vluxei16_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei16_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vluxei16_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei16_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vluxei16_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei16_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vluxei16_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei16_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vluxei16_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei16_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vluxei16_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vluxei16_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vluxei16_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vluxei16_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vluxei16_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vint8m4_t test_vluxei16_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vint8m4_t test_vluxei16_v_i8m4_tum(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei16_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vluxei16_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei16_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vluxei16_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vluxei16_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vluxei16_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vluxei16_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vluxei16_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vluxei16_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vluxei16_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vint16m8_t test_vluxei16_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vint16m8_t test_vluxei16_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei16_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vluxei16_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vluxei16_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vluxei16_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vluxei16_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vluxei16_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vluxei16_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vluxei16_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vluxei16_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vluxei16_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vluxei16_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vluxei16_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vluxei16_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vluxei16_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vluxei16_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vluxei16_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vluxei16_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vluxei16_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei16_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vluxei16_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei16_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vluxei16_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei16_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vluxei16_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei16_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vluxei16_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vluxei16_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vluxei16_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vuint8m4_t test_vluxei16_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vuint8m4_t test_vluxei16_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei16_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vluxei16_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei16_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vluxei16_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei16_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vluxei16_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei16_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vluxei16_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vluxei16_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vluxei16_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vuint16m8_t test_vluxei16_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vuint16m8_t test_vluxei16_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei16_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vluxei16_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei16_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vluxei16_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei16_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vluxei16_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei16_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vluxei16_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vluxei16_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vluxei16_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei16_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vluxei16_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei16_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vluxei16_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei16_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vluxei16_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei16_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_tum(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vluxei16_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vluxei16_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vluxei16_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei16_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vluxei16_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei16_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vluxei16_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei16_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vluxei16_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vluxei16_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vluxei16_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m8_t test_vluxei16_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, const float16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m8_t test_vluxei16_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, const float16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei16_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vluxei16_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei16_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vluxei16_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei16_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vluxei16_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei16_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vluxei16_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vluxei16_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vluxei16_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei16_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vluxei16_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei16_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vluxei16_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei16_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vluxei16_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei16_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vluxei16_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei16_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vluxei16_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei16_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vluxei16_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei16_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vluxei16_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vluxei16_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vluxei16_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vluxei16_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vluxei16_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vint8m4_t test_vluxei16_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vint8m4_t test_vluxei16_v_i8m4_tumu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei16_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vluxei16_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei16_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vluxei16_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vluxei16_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vluxei16_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vluxei16_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vluxei16_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vluxei16_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vluxei16_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vint16m8_t test_vluxei16_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vint16m8_t test_vluxei16_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei16_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vluxei16_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vluxei16_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vluxei16_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vluxei16_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vluxei16_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vluxei16_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vluxei16_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vluxei16_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vluxei16_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vluxei16_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vluxei16_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vluxei16_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vluxei16_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vluxei16_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vluxei16_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vluxei16_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vluxei16_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei16_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vluxei16_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei16_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vluxei16_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei16_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vluxei16_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei16_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vluxei16_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vluxei16_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vluxei16_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8m4_t test_vluxei16_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vuint8m4_t test_vluxei16_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei16_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vluxei16_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei16_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vluxei16_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei16_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vluxei16_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei16_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vluxei16_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vluxei16_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vluxei16_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16m8_t test_vluxei16_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vuint16m8_t test_vluxei16_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei16_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vluxei16_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei16_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vluxei16_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei16_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vluxei16_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei16_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vluxei16_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vluxei16_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vluxei16_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei16_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vluxei16_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei16_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vluxei16_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei16_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vluxei16_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei16_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_tumu(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vluxei16_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vluxei16_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vluxei16_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei16_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vluxei16_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei16_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vluxei16_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei16_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vluxei16_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vluxei16_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vluxei16_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vfloat16m8_t test_vluxei16_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, const float16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vfloat16m8_t test_vluxei16_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, const float16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei16_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vluxei16_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei16_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vluxei16_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei16_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vluxei16_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei16_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vluxei16_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vluxei16_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vluxei16_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei16_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vluxei16_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei16_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vluxei16_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei16_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vluxei16_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei16_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vluxei16_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei16_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vluxei16_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei16_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vluxei16_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei16_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vluxei16_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vluxei16_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vluxei16_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vluxei16_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vluxei16_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vint8m4_t test_vluxei16_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vint8m4_t test_vluxei16_v_i8m4_mu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei16_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vluxei16_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei16_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vluxei16_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vluxei16_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vluxei16_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vluxei16_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vluxei16_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vluxei16_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vluxei16_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vint16m8_t test_vluxei16_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vint16m8_t test_vluxei16_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei16_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vluxei16_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vluxei16_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vluxei16_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vluxei16_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vluxei16_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vluxei16_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vluxei16_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vluxei16_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vluxei16_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vluxei16_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vluxei16_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vluxei16_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vluxei16_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vluxei16_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vluxei16_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vluxei16_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vluxei16_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei16_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vluxei16_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei16_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vluxei16_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei16_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vluxei16_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei16_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vluxei16_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vluxei16_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vluxei16_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vuint8m4_t test_vluxei16_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vuint8m4_t test_vluxei16_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei16_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vluxei16_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei16_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vluxei16_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei16_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vluxei16_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei16_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vluxei16_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vluxei16_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vluxei16_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vuint16m8_t test_vluxei16_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vuint16m8_t test_vluxei16_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei16_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vluxei16_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei16_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vluxei16_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei16_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vluxei16_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei16_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vluxei16_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vluxei16_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vluxei16_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei16_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vluxei16_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei16_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vluxei16_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei16_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vluxei16_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei16_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxei16_mu(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vluxei16_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxei16\.[ivxfswum.]+\s+} 228 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxei32.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxei32.c index 28579a8f0..b0fe1a6e1 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxei32.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxei32.c @@ -6,836 +6,836 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vluxei32_v_f16mf4_tu(vfloat16mf4_t maskedoff, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vfloat16mf4_t test_vluxei32_v_f16mf4_tu(vfloat16mf4_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei32_v_f16mf2_tu(vfloat16mf2_t maskedoff, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vfloat16mf2_t test_vluxei32_v_f16mf2_tu(vfloat16mf2_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei32_v_f16m1_tu(vfloat16m1_t maskedoff, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vfloat16m1_t test_vluxei32_v_f16m1_tu(vfloat16m1_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei32_v_f16m2_tu(vfloat16m2_t maskedoff, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vfloat16m2_t test_vluxei32_v_f16m2_tu(vfloat16m2_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vfloat16m4_t test_vluxei32_v_f16m4_tu(vfloat16m4_t maskedoff, const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vfloat16m4_t test_vluxei32_v_f16m4_tu(vfloat16m4_t vd, const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei32_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vfloat32mf2_t test_vluxei32_v_f32mf2_tu(vfloat32mf2_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei32_v_f32m1_tu(vfloat32m1_t maskedoff, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vfloat32m1_t test_vluxei32_v_f32m1_tu(vfloat32m1_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei32_v_f32m2_tu(vfloat32m2_t maskedoff, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vfloat32m2_t test_vluxei32_v_f32m2_tu(vfloat32m2_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei32_v_f32m4_tu(vfloat32m4_t maskedoff, const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vfloat32m4_t test_vluxei32_v_f32m4_tu(vfloat32m4_t vd, const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vfloat32m8_t test_vluxei32_v_f32m8_tu(vfloat32m8_t maskedoff, const float32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vfloat32m8_t test_vluxei32_v_f32m8_tu(vfloat32m8_t vd, const float32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei32_v_f64m1_tu(vfloat64m1_t maskedoff, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vfloat64m1_t test_vluxei32_v_f64m1_tu(vfloat64m1_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei32_v_f64m2_tu(vfloat64m2_t maskedoff, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vfloat64m2_t test_vluxei32_v_f64m2_tu(vfloat64m2_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei32_v_f64m4_tu(vfloat64m4_t maskedoff, const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vfloat64m4_t test_vluxei32_v_f64m4_tu(vfloat64m4_t vd, const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei32_v_f64m8_tu(vfloat64m8_t maskedoff, const float64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vfloat64m8_t test_vluxei32_v_f64m8_tu(vfloat64m8_t vd, const float64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei32_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vint8mf8_t test_vluxei32_v_i8mf8_tu(vint8mf8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei32_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vint8mf4_t test_vluxei32_v_i8mf4_tu(vint8mf4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei32_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vint8mf2_t test_vluxei32_v_i8mf2_tu(vint8mf2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vint8m1_t test_vluxei32_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vint8m1_t test_vluxei32_v_i8m1_tu(vint8m1_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vint8m2_t test_vluxei32_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vint8m2_t test_vluxei32_v_i8m2_tu(vint8m2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei32_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vint16mf4_t test_vluxei32_v_i16mf4_tu(vint16mf4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei32_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vint16mf2_t test_vluxei32_v_i16mf2_tu(vint16mf2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vint16m1_t test_vluxei32_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vint16m1_t test_vluxei32_v_i16m1_tu(vint16m1_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vint16m2_t test_vluxei32_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vint16m2_t test_vluxei32_v_i16m2_tu(vint16m2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vint16m4_t test_vluxei32_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vint16m4_t test_vluxei32_v_i16m4_tu(vint16m4_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei32_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vint32mf2_t test_vluxei32_v_i32mf2_tu(vint32mf2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vint32m1_t test_vluxei32_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vint32m1_t test_vluxei32_v_i32m1_tu(vint32m1_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vint32m2_t test_vluxei32_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vint32m2_t test_vluxei32_v_i32m2_tu(vint32m2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vint32m4_t test_vluxei32_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vint32m4_t test_vluxei32_v_i32m4_tu(vint32m4_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vint32m8_t test_vluxei32_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vint32m8_t test_vluxei32_v_i32m8_tu(vint32m8_t vd, const int32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vint64m1_t test_vluxei32_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vint64m1_t test_vluxei32_v_i64m1_tu(vint64m1_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vint64m2_t test_vluxei32_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vint64m2_t test_vluxei32_v_i64m2_tu(vint64m2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vint64m4_t test_vluxei32_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vint64m4_t test_vluxei32_v_i64m4_tu(vint64m4_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vint64m8_t test_vluxei32_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vint64m8_t test_vluxei32_v_i64m8_tu(vint64m8_t vd, const int64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei32_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vuint8mf8_t test_vluxei32_v_u8mf8_tu(vuint8mf8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei32_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vuint8mf4_t test_vluxei32_v_u8mf4_tu(vuint8mf4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei32_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vuint8mf2_t test_vluxei32_v_u8mf2_tu(vuint8mf2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei32_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vuint8m1_t test_vluxei32_v_u8m1_tu(vuint8m1_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vuint8m2_t test_vluxei32_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vuint8m2_t test_vluxei32_v_u8m2_tu(vuint8m2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei32_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vuint16mf4_t test_vluxei32_v_u16mf4_tu(vuint16mf4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei32_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vuint16mf2_t test_vluxei32_v_u16mf2_tu(vuint16mf2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei32_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vuint16m1_t test_vluxei32_v_u16m1_tu(vuint16m1_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei32_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vuint16m2_t test_vluxei32_v_u16m2_tu(vuint16m2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vuint16m4_t test_vluxei32_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vuint16m4_t test_vluxei32_v_u16m4_tu(vuint16m4_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei32_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vuint32mf2_t test_vluxei32_v_u32mf2_tu(vuint32mf2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei32_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vuint32m1_t test_vluxei32_v_u32m1_tu(vuint32m1_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei32_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vuint32m2_t test_vluxei32_v_u32m2_tu(vuint32m2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei32_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vuint32m4_t test_vluxei32_v_u32m4_tu(vuint32m4_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vuint32m8_t test_vluxei32_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vuint32m8_t test_vluxei32_v_u32m8_tu(vuint32m8_t vd, const uint32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei32_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vuint64m1_t test_vluxei32_v_u64m1_tu(vuint64m1_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei32_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vuint64m2_t test_vluxei32_v_u64m2_tu(vuint64m2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei32_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vuint64m4_t test_vluxei32_v_u64m4_tu(vuint64m4_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei32_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_tu(maskedoff, base, bindex, vl); +vuint64m8_t test_vluxei32_v_u64m8_tu(vuint64m8_t vd, const uint64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_tu(vd, rs1, rs2, vl); } -vfloat16mf4_t test_vluxei32_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vluxei32_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei32_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vluxei32_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei32_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vluxei32_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei32_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vluxei32_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vluxei32_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vluxei32_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei32_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vluxei32_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei32_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vluxei32_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei32_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vluxei32_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei32_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vluxei32_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vluxei32_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vluxei32_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei32_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vluxei32_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei32_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vluxei32_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei32_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vluxei32_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei32_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vluxei32_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei32_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vluxei32_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei32_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vluxei32_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei32_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vluxei32_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vluxei32_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vluxei32_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vluxei32_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vluxei32_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei32_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vluxei32_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei32_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vluxei32_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vluxei32_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vluxei32_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vluxei32_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vluxei32_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vluxei32_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vluxei32_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei32_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vluxei32_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vluxei32_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vluxei32_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vluxei32_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vluxei32_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vluxei32_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vluxei32_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vluxei32_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vluxei32_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vluxei32_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vluxei32_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vluxei32_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vluxei32_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vluxei32_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vluxei32_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vluxei32_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vluxei32_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei32_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vluxei32_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei32_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vluxei32_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei32_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vluxei32_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei32_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vluxei32_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vluxei32_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vluxei32_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei32_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vluxei32_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei32_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vluxei32_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei32_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vluxei32_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei32_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vluxei32_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vluxei32_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vluxei32_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei32_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vluxei32_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei32_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vluxei32_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei32_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vluxei32_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei32_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vluxei32_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vluxei32_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vluxei32_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei32_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vluxei32_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei32_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vluxei32_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei32_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vluxei32_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei32_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_tum(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vluxei32_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vluxei32_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vluxei32_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei32_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vluxei32_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei32_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vluxei32_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei32_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vluxei32_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vluxei32_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vluxei32_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei32_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vluxei32_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei32_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vluxei32_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei32_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vluxei32_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei32_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vluxei32_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vluxei32_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vluxei32_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei32_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vluxei32_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei32_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vluxei32_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei32_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vluxei32_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei32_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vluxei32_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei32_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vluxei32_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei32_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vluxei32_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei32_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vluxei32_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vluxei32_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vluxei32_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vluxei32_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vluxei32_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei32_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vluxei32_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei32_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vluxei32_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vluxei32_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vluxei32_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vluxei32_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vluxei32_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vluxei32_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vluxei32_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei32_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vluxei32_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vluxei32_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vluxei32_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vluxei32_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vluxei32_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vluxei32_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vluxei32_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vluxei32_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vluxei32_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vluxei32_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vluxei32_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vluxei32_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vluxei32_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vluxei32_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vluxei32_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vluxei32_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vluxei32_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei32_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vluxei32_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei32_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vluxei32_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei32_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vluxei32_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei32_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vluxei32_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vluxei32_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vluxei32_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei32_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vluxei32_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei32_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vluxei32_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei32_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vluxei32_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei32_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vluxei32_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vluxei32_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vluxei32_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei32_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vluxei32_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei32_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vluxei32_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei32_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vluxei32_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei32_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vluxei32_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vluxei32_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vluxei32_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei32_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vluxei32_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei32_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vluxei32_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei32_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vluxei32_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei32_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_tumu(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vluxei32_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vluxei32_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vluxei32_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei32_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vluxei32_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei32_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vluxei32_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei32_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vluxei32_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vluxei32_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vluxei32_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei32_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vluxei32_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei32_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vluxei32_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei32_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vluxei32_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei32_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vluxei32_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vluxei32_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vluxei32_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei32_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vluxei32_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei32_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vluxei32_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei32_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vluxei32_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei32_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vluxei32_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei32_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vluxei32_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei32_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vluxei32_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei32_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vluxei32_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vluxei32_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vluxei32_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vluxei32_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vluxei32_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei32_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vluxei32_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei32_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vluxei32_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vluxei32_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vluxei32_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vluxei32_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vluxei32_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vluxei32_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vluxei32_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei32_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vluxei32_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vluxei32_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vluxei32_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vluxei32_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vluxei32_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vluxei32_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vluxei32_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vluxei32_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vluxei32_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vluxei32_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vluxei32_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vluxei32_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vluxei32_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vluxei32_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vluxei32_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vluxei32_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vluxei32_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei32_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vluxei32_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei32_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vluxei32_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei32_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vluxei32_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei32_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vluxei32_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vluxei32_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vluxei32_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei32_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vluxei32_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei32_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vluxei32_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei32_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vluxei32_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei32_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vluxei32_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vluxei32_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vluxei32_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei32_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vluxei32_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei32_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vluxei32_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei32_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vluxei32_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei32_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vluxei32_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vluxei32_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vluxei32_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei32_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vluxei32_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei32_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vluxei32_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei32_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vluxei32_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei32_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxei32_mu(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vluxei32_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxei32\.[ivxfswum.]+\s+} 208 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxei64.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxei64.c index ee1e9e105..90b8ad607 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxei64.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxei64.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vluxei64_v_f16mf4_tu(vfloat16mf4_t maskedoff, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_tu(maskedoff, base, bindex, vl); +vfloat16mf4_t test_vluxei64_v_f16mf4_tu(vfloat16mf4_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_tu(vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei64_v_f16mf2_tu(vfloat16mf2_t maskedoff, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_tu(maskedoff, base, bindex, vl); +vfloat16mf2_t test_vluxei64_v_f16mf2_tu(vfloat16mf2_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_tu(vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei64_v_f16m1_tu(vfloat16m1_t maskedoff, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_tu(maskedoff, base, bindex, vl); +vfloat16m1_t test_vluxei64_v_f16m1_tu(vfloat16m1_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_tu(vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei64_v_f16m2_tu(vfloat16m2_t maskedoff, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_tu(maskedoff, base, bindex, vl); +vfloat16m2_t test_vluxei64_v_f16m2_tu(vfloat16m2_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_tu(vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei64_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_tu(maskedoff, base, bindex, vl); +vfloat32mf2_t test_vluxei64_v_f32mf2_tu(vfloat32mf2_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_tu(vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei64_v_f32m1_tu(vfloat32m1_t maskedoff, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_tu(maskedoff, base, bindex, vl); +vfloat32m1_t test_vluxei64_v_f32m1_tu(vfloat32m1_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_tu(vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei64_v_f32m2_tu(vfloat32m2_t maskedoff, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_tu(maskedoff, base, bindex, vl); +vfloat32m2_t test_vluxei64_v_f32m2_tu(vfloat32m2_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_tu(vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei64_v_f32m4_tu(vfloat32m4_t maskedoff, const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_tu(maskedoff, base, bindex, vl); +vfloat32m4_t test_vluxei64_v_f32m4_tu(vfloat32m4_t vd, const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_tu(vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei64_v_f64m1_tu(vfloat64m1_t maskedoff, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_tu(maskedoff, base, bindex, vl); +vfloat64m1_t test_vluxei64_v_f64m1_tu(vfloat64m1_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_tu(vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei64_v_f64m2_tu(vfloat64m2_t maskedoff, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_tu(maskedoff, base, bindex, vl); +vfloat64m2_t test_vluxei64_v_f64m2_tu(vfloat64m2_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_tu(vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei64_v_f64m4_tu(vfloat64m4_t maskedoff, const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_tu(maskedoff, base, bindex, vl); +vfloat64m4_t test_vluxei64_v_f64m4_tu(vfloat64m4_t vd, const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_tu(vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei64_v_f64m8_tu(vfloat64m8_t maskedoff, const float64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_tu(maskedoff, base, bindex, vl); +vfloat64m8_t test_vluxei64_v_f64m8_tu(vfloat64m8_t vd, const float64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_tu(vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei64_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_tu(maskedoff, base, bindex, vl); +vint8mf8_t test_vluxei64_v_i8mf8_tu(vint8mf8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_tu(vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei64_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_tu(maskedoff, base, bindex, vl); +vint8mf4_t test_vluxei64_v_i8mf4_tu(vint8mf4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_tu(vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei64_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_tu(maskedoff, base, bindex, vl); +vint8mf2_t test_vluxei64_v_i8mf2_tu(vint8mf2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_tu(vd, rs1, rs2, vl); } -vint8m1_t test_vluxei64_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_tu(maskedoff, base, bindex, vl); +vint8m1_t test_vluxei64_v_i8m1_tu(vint8m1_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_tu(vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei64_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_tu(maskedoff, base, bindex, vl); +vint16mf4_t test_vluxei64_v_i16mf4_tu(vint16mf4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_tu(vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei64_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_tu(maskedoff, base, bindex, vl); +vint16mf2_t test_vluxei64_v_i16mf2_tu(vint16mf2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_tu(vd, rs1, rs2, vl); } -vint16m1_t test_vluxei64_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_tu(maskedoff, base, bindex, vl); +vint16m1_t test_vluxei64_v_i16m1_tu(vint16m1_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_tu(vd, rs1, rs2, vl); } -vint16m2_t test_vluxei64_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_tu(maskedoff, base, bindex, vl); +vint16m2_t test_vluxei64_v_i16m2_tu(vint16m2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_tu(vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei64_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_tu(maskedoff, base, bindex, vl); +vint32mf2_t test_vluxei64_v_i32mf2_tu(vint32mf2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_tu(vd, rs1, rs2, vl); } -vint32m1_t test_vluxei64_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_tu(maskedoff, base, bindex, vl); +vint32m1_t test_vluxei64_v_i32m1_tu(vint32m1_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_tu(vd, rs1, rs2, vl); } -vint32m2_t test_vluxei64_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_tu(maskedoff, base, bindex, vl); +vint32m2_t test_vluxei64_v_i32m2_tu(vint32m2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_tu(vd, rs1, rs2, vl); } -vint32m4_t test_vluxei64_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_tu(maskedoff, base, bindex, vl); +vint32m4_t test_vluxei64_v_i32m4_tu(vint32m4_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_tu(vd, rs1, rs2, vl); } -vint64m1_t test_vluxei64_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_tu(maskedoff, base, bindex, vl); +vint64m1_t test_vluxei64_v_i64m1_tu(vint64m1_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_tu(vd, rs1, rs2, vl); } -vint64m2_t test_vluxei64_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_tu(maskedoff, base, bindex, vl); +vint64m2_t test_vluxei64_v_i64m2_tu(vint64m2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_tu(vd, rs1, rs2, vl); } -vint64m4_t test_vluxei64_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_tu(maskedoff, base, bindex, vl); +vint64m4_t test_vluxei64_v_i64m4_tu(vint64m4_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_tu(vd, rs1, rs2, vl); } -vint64m8_t test_vluxei64_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_tu(maskedoff, base, bindex, vl); +vint64m8_t test_vluxei64_v_i64m8_tu(vint64m8_t vd, const int64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_tu(vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei64_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_tu(maskedoff, base, bindex, vl); +vuint8mf8_t test_vluxei64_v_u8mf8_tu(vuint8mf8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_tu(vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei64_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_tu(maskedoff, base, bindex, vl); +vuint8mf4_t test_vluxei64_v_u8mf4_tu(vuint8mf4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_tu(vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei64_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_tu(maskedoff, base, bindex, vl); +vuint8mf2_t test_vluxei64_v_u8mf2_tu(vuint8mf2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_tu(vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei64_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_tu(maskedoff, base, bindex, vl); +vuint8m1_t test_vluxei64_v_u8m1_tu(vuint8m1_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_tu(vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei64_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_tu(maskedoff, base, bindex, vl); +vuint16mf4_t test_vluxei64_v_u16mf4_tu(vuint16mf4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_tu(vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei64_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_tu(maskedoff, base, bindex, vl); +vuint16mf2_t test_vluxei64_v_u16mf2_tu(vuint16mf2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_tu(vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei64_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_tu(maskedoff, base, bindex, vl); +vuint16m1_t test_vluxei64_v_u16m1_tu(vuint16m1_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_tu(vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei64_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_tu(maskedoff, base, bindex, vl); +vuint16m2_t test_vluxei64_v_u16m2_tu(vuint16m2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_tu(vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei64_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_tu(maskedoff, base, bindex, vl); +vuint32mf2_t test_vluxei64_v_u32mf2_tu(vuint32mf2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_tu(vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei64_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_tu(maskedoff, base, bindex, vl); +vuint32m1_t test_vluxei64_v_u32m1_tu(vuint32m1_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_tu(vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei64_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_tu(maskedoff, base, bindex, vl); +vuint32m2_t test_vluxei64_v_u32m2_tu(vuint32m2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_tu(vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei64_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_tu(maskedoff, base, bindex, vl); +vuint32m4_t test_vluxei64_v_u32m4_tu(vuint32m4_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_tu(vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei64_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_tu(maskedoff, base, bindex, vl); +vuint64m1_t test_vluxei64_v_u64m1_tu(vuint64m1_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_tu(vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei64_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_tu(maskedoff, base, bindex, vl); +vuint64m2_t test_vluxei64_v_u64m2_tu(vuint64m2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_tu(vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei64_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_tu(maskedoff, base, bindex, vl); +vuint64m4_t test_vluxei64_v_u64m4_tu(vuint64m4_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_tu(vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei64_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_tu(maskedoff, base, bindex, vl); +vuint64m8_t test_vluxei64_v_u64m8_tu(vuint64m8_t vd, const uint64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_tu(vd, rs1, rs2, vl); } -vfloat16mf4_t test_vluxei64_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vluxei64_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei64_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vluxei64_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei64_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vluxei64_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei64_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vluxei64_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei64_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vluxei64_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei64_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vluxei64_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei64_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vluxei64_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei64_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vluxei64_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei64_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vluxei64_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei64_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vluxei64_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei64_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vluxei64_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei64_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vluxei64_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei64_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vluxei64_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei64_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vluxei64_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei64_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vluxei64_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vluxei64_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vluxei64_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei64_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vluxei64_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei64_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vluxei64_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vluxei64_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vluxei64_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vluxei64_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vluxei64_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei64_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vluxei64_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vluxei64_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vluxei64_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vluxei64_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vluxei64_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vluxei64_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vluxei64_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vluxei64_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vluxei64_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vluxei64_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vluxei64_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vluxei64_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vluxei64_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vluxei64_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vluxei64_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei64_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vluxei64_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei64_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vluxei64_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei64_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vluxei64_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei64_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vluxei64_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei64_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vluxei64_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei64_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vluxei64_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei64_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vluxei64_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei64_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vluxei64_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei64_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vluxei64_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei64_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vluxei64_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei64_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vluxei64_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei64_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vluxei64_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei64_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vluxei64_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei64_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vluxei64_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei64_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vluxei64_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei64_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_tum(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vluxei64_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vluxei64_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vluxei64_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei64_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vluxei64_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei64_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vluxei64_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei64_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vluxei64_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei64_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vluxei64_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei64_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vluxei64_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei64_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vluxei64_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei64_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vluxei64_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei64_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vluxei64_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei64_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vluxei64_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei64_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vluxei64_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei64_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vluxei64_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei64_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vluxei64_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei64_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vluxei64_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei64_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vluxei64_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vluxei64_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vluxei64_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei64_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vluxei64_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei64_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vluxei64_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vluxei64_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vluxei64_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vluxei64_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vluxei64_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei64_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vluxei64_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vluxei64_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vluxei64_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vluxei64_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vluxei64_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vluxei64_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vluxei64_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vluxei64_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vluxei64_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vluxei64_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vluxei64_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vluxei64_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vluxei64_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vluxei64_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vluxei64_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei64_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vluxei64_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei64_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vluxei64_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei64_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vluxei64_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei64_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vluxei64_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei64_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vluxei64_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei64_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vluxei64_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei64_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vluxei64_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei64_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vluxei64_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei64_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vluxei64_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei64_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vluxei64_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei64_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vluxei64_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei64_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vluxei64_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei64_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vluxei64_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei64_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vluxei64_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei64_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vluxei64_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei64_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_tumu(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vluxei64_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vluxei64_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vluxei64_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei64_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vluxei64_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei64_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vluxei64_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei64_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vluxei64_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei64_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vluxei64_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei64_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vluxei64_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei64_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vluxei64_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei64_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vluxei64_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei64_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vluxei64_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei64_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vluxei64_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei64_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vluxei64_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei64_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vluxei64_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei64_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vluxei64_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei64_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vluxei64_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei64_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vluxei64_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vluxei64_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vluxei64_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei64_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vluxei64_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei64_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vluxei64_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vluxei64_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vluxei64_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vluxei64_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vluxei64_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei64_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vluxei64_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vluxei64_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vluxei64_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vluxei64_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vluxei64_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vluxei64_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vluxei64_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vluxei64_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vluxei64_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vluxei64_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vluxei64_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vluxei64_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vluxei64_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vluxei64_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vluxei64_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei64_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vluxei64_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei64_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vluxei64_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei64_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vluxei64_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei64_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vluxei64_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei64_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vluxei64_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei64_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vluxei64_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei64_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vluxei64_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei64_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vluxei64_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei64_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vluxei64_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei64_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vluxei64_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei64_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vluxei64_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei64_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vluxei64_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei64_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vluxei64_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei64_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vluxei64_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei64_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vluxei64_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei64_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxei64_mu(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vluxei64_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxei64\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxei8.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxei8.c index be08fa92c..34aa0e71b 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxei8.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxei8.c @@ -6,948 +6,948 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vluxei8_v_f16mf4_tu(vfloat16mf4_t maskedoff, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vfloat16mf4_t test_vluxei8_v_f16mf4_tu(vfloat16mf4_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei8_v_f16mf2_tu(vfloat16mf2_t maskedoff, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vfloat16mf2_t test_vluxei8_v_f16mf2_tu(vfloat16mf2_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei8_v_f16m1_tu(vfloat16m1_t maskedoff, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vfloat16m1_t test_vluxei8_v_f16m1_tu(vfloat16m1_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei8_v_f16m2_tu(vfloat16m2_t maskedoff, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vfloat16m2_t test_vluxei8_v_f16m2_tu(vfloat16m2_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vfloat16m4_t test_vluxei8_v_f16m4_tu(vfloat16m4_t maskedoff, const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vfloat16m4_t test_vluxei8_v_f16m4_tu(vfloat16m4_t vd, const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vfloat16m8_t test_vluxei8_v_f16m8_tu(vfloat16m8_t maskedoff, const float16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vfloat16m8_t test_vluxei8_v_f16m8_tu(vfloat16m8_t vd, const float16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei8_v_f32mf2_tu(vfloat32mf2_t maskedoff, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vfloat32mf2_t test_vluxei8_v_f32mf2_tu(vfloat32mf2_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei8_v_f32m1_tu(vfloat32m1_t maskedoff, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vfloat32m1_t test_vluxei8_v_f32m1_tu(vfloat32m1_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei8_v_f32m2_tu(vfloat32m2_t maskedoff, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vfloat32m2_t test_vluxei8_v_f32m2_tu(vfloat32m2_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei8_v_f32m4_tu(vfloat32m4_t maskedoff, const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vfloat32m4_t test_vluxei8_v_f32m4_tu(vfloat32m4_t vd, const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vfloat32m8_t test_vluxei8_v_f32m8_tu(vfloat32m8_t maskedoff, const float32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vfloat32m8_t test_vluxei8_v_f32m8_tu(vfloat32m8_t vd, const float32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei8_v_f64m1_tu(vfloat64m1_t maskedoff, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vfloat64m1_t test_vluxei8_v_f64m1_tu(vfloat64m1_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei8_v_f64m2_tu(vfloat64m2_t maskedoff, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vfloat64m2_t test_vluxei8_v_f64m2_tu(vfloat64m2_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei8_v_f64m4_tu(vfloat64m4_t maskedoff, const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vfloat64m4_t test_vluxei8_v_f64m4_tu(vfloat64m4_t vd, const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei8_v_f64m8_tu(vfloat64m8_t maskedoff, const float64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vfloat64m8_t test_vluxei8_v_f64m8_tu(vfloat64m8_t vd, const float64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei8_v_i8mf8_tu(vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vint8mf8_t test_vluxei8_v_i8mf8_tu(vint8mf8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei8_v_i8mf4_tu(vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vint8mf4_t test_vluxei8_v_i8mf4_tu(vint8mf4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei8_v_i8mf2_tu(vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vint8mf2_t test_vluxei8_v_i8mf2_tu(vint8mf2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vint8m1_t test_vluxei8_v_i8m1_tu(vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vint8m1_t test_vluxei8_v_i8m1_tu(vint8m1_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vint8m2_t test_vluxei8_v_i8m2_tu(vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vint8m2_t test_vluxei8_v_i8m2_tu(vint8m2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vint8m4_t test_vluxei8_v_i8m4_tu(vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vint8m4_t test_vluxei8_v_i8m4_tu(vint8m4_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vint8m8_t test_vluxei8_v_i8m8_tu(vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vint8m8_t test_vluxei8_v_i8m8_tu(vint8m8_t vd, const int8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei8_v_i16mf4_tu(vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vint16mf4_t test_vluxei8_v_i16mf4_tu(vint16mf4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei8_v_i16mf2_tu(vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vint16mf2_t test_vluxei8_v_i16mf2_tu(vint16mf2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vint16m1_t test_vluxei8_v_i16m1_tu(vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vint16m1_t test_vluxei8_v_i16m1_tu(vint16m1_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vint16m2_t test_vluxei8_v_i16m2_tu(vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vint16m2_t test_vluxei8_v_i16m2_tu(vint16m2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vint16m4_t test_vluxei8_v_i16m4_tu(vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vint16m4_t test_vluxei8_v_i16m4_tu(vint16m4_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vint16m8_t test_vluxei8_v_i16m8_tu(vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vint16m8_t test_vluxei8_v_i16m8_tu(vint16m8_t vd, const int16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei8_v_i32mf2_tu(vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vint32mf2_t test_vluxei8_v_i32mf2_tu(vint32mf2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vint32m1_t test_vluxei8_v_i32m1_tu(vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vint32m1_t test_vluxei8_v_i32m1_tu(vint32m1_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vint32m2_t test_vluxei8_v_i32m2_tu(vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vint32m2_t test_vluxei8_v_i32m2_tu(vint32m2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vint32m4_t test_vluxei8_v_i32m4_tu(vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vint32m4_t test_vluxei8_v_i32m4_tu(vint32m4_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vint32m8_t test_vluxei8_v_i32m8_tu(vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vint32m8_t test_vluxei8_v_i32m8_tu(vint32m8_t vd, const int32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vint64m1_t test_vluxei8_v_i64m1_tu(vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vint64m1_t test_vluxei8_v_i64m1_tu(vint64m1_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vint64m2_t test_vluxei8_v_i64m2_tu(vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vint64m2_t test_vluxei8_v_i64m2_tu(vint64m2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vint64m4_t test_vluxei8_v_i64m4_tu(vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vint64m4_t test_vluxei8_v_i64m4_tu(vint64m4_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vint64m8_t test_vluxei8_v_i64m8_tu(vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vint64m8_t test_vluxei8_v_i64m8_tu(vint64m8_t vd, const int64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei8_v_u8mf8_tu(vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vuint8mf8_t test_vluxei8_v_u8mf8_tu(vuint8mf8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei8_v_u8mf4_tu(vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vuint8mf4_t test_vluxei8_v_u8mf4_tu(vuint8mf4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei8_v_u8mf2_tu(vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vuint8mf2_t test_vluxei8_v_u8mf2_tu(vuint8mf2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei8_v_u8m1_tu(vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vuint8m1_t test_vluxei8_v_u8m1_tu(vuint8m1_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vuint8m2_t test_vluxei8_v_u8m2_tu(vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vuint8m2_t test_vluxei8_v_u8m2_tu(vuint8m2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vuint8m4_t test_vluxei8_v_u8m4_tu(vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vuint8m4_t test_vluxei8_v_u8m4_tu(vuint8m4_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vuint8m8_t test_vluxei8_v_u8m8_tu(vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vuint8m8_t test_vluxei8_v_u8m8_tu(vuint8m8_t vd, const uint8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei8_v_u16mf4_tu(vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vuint16mf4_t test_vluxei8_v_u16mf4_tu(vuint16mf4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei8_v_u16mf2_tu(vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vuint16mf2_t test_vluxei8_v_u16mf2_tu(vuint16mf2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei8_v_u16m1_tu(vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vuint16m1_t test_vluxei8_v_u16m1_tu(vuint16m1_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei8_v_u16m2_tu(vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vuint16m2_t test_vluxei8_v_u16m2_tu(vuint16m2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vuint16m4_t test_vluxei8_v_u16m4_tu(vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vuint16m4_t test_vluxei8_v_u16m4_tu(vuint16m4_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vuint16m8_t test_vluxei8_v_u16m8_tu(vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vuint16m8_t test_vluxei8_v_u16m8_tu(vuint16m8_t vd, const uint16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei8_v_u32mf2_tu(vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vuint32mf2_t test_vluxei8_v_u32mf2_tu(vuint32mf2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei8_v_u32m1_tu(vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vuint32m1_t test_vluxei8_v_u32m1_tu(vuint32m1_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei8_v_u32m2_tu(vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vuint32m2_t test_vluxei8_v_u32m2_tu(vuint32m2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei8_v_u32m4_tu(vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vuint32m4_t test_vluxei8_v_u32m4_tu(vuint32m4_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vuint32m8_t test_vluxei8_v_u32m8_tu(vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vuint32m8_t test_vluxei8_v_u32m8_tu(vuint32m8_t vd, const uint32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei8_v_u64m1_tu(vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vuint64m1_t test_vluxei8_v_u64m1_tu(vuint64m1_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei8_v_u64m2_tu(vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vuint64m2_t test_vluxei8_v_u64m2_tu(vuint64m2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei8_v_u64m4_tu(vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vuint64m4_t test_vluxei8_v_u64m4_tu(vuint64m4_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei8_v_u64m8_tu(vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_tu(maskedoff, base, bindex, vl); +vuint64m8_t test_vluxei8_v_u64m8_tu(vuint64m8_t vd, const uint64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_tu(vd, rs1, rs2, vl); } -vfloat16mf4_t test_vluxei8_v_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vluxei8_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei8_v_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vluxei8_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei8_v_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vluxei8_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei8_v_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vluxei8_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vluxei8_v_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vluxei8_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m8_t test_vluxei8_v_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, const float16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vfloat16m8_t test_vluxei8_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, const float16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei8_v_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vluxei8_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei8_v_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vluxei8_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei8_v_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vluxei8_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei8_v_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vluxei8_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vluxei8_v_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vluxei8_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei8_v_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vluxei8_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei8_v_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vluxei8_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei8_v_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vluxei8_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei8_v_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vluxei8_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei8_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vluxei8_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei8_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vluxei8_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei8_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vluxei8_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vluxei8_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vluxei8_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vluxei8_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vluxei8_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vint8m4_t test_vluxei8_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vint8m4_t test_vluxei8_v_i8m4_tum(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vint8m8_t test_vluxei8_v_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vint8m8_t test_vluxei8_v_i8m8_tum(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei8_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vluxei8_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei8_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vluxei8_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vluxei8_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vluxei8_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vluxei8_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vluxei8_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vluxei8_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vluxei8_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vint16m8_t test_vluxei8_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vint16m8_t test_vluxei8_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei8_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vluxei8_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vluxei8_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vluxei8_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vluxei8_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vluxei8_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vluxei8_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vluxei8_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vluxei8_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vluxei8_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vluxei8_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vluxei8_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vluxei8_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vluxei8_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vluxei8_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vluxei8_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vluxei8_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vluxei8_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei8_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vluxei8_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei8_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vluxei8_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei8_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vluxei8_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei8_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vluxei8_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vluxei8_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vluxei8_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vuint8m4_t test_vluxei8_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vuint8m4_t test_vluxei8_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vuint8m8_t test_vluxei8_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vuint8m8_t test_vluxei8_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei8_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vluxei8_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei8_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vluxei8_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei8_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vluxei8_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei8_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vluxei8_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vluxei8_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vluxei8_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vuint16m8_t test_vluxei8_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vuint16m8_t test_vluxei8_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei8_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vluxei8_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei8_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vluxei8_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei8_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vluxei8_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei8_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vluxei8_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vluxei8_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vluxei8_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei8_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vluxei8_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei8_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vluxei8_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei8_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vluxei8_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei8_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_tum(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vluxei8_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vluxei8_v_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vluxei8_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei8_v_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vluxei8_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei8_v_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vluxei8_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei8_v_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vluxei8_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vluxei8_v_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vluxei8_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m8_t test_vluxei8_v_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, const float16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vfloat16m8_t test_vluxei8_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, const float16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei8_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vluxei8_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei8_v_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vluxei8_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei8_v_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vluxei8_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei8_v_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vluxei8_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vluxei8_v_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vluxei8_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei8_v_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vluxei8_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei8_v_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vluxei8_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei8_v_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vluxei8_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei8_v_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vluxei8_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei8_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vluxei8_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei8_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vluxei8_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei8_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vluxei8_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vluxei8_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vluxei8_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vluxei8_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vluxei8_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vint8m4_t test_vluxei8_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vint8m4_t test_vluxei8_v_i8m4_tumu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vint8m8_t test_vluxei8_v_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vint8m8_t test_vluxei8_v_i8m8_tumu(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei8_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vluxei8_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei8_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vluxei8_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vluxei8_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vluxei8_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vluxei8_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vluxei8_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vluxei8_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vluxei8_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vint16m8_t test_vluxei8_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vint16m8_t test_vluxei8_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei8_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vluxei8_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vluxei8_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vluxei8_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vluxei8_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vluxei8_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vluxei8_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vluxei8_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vluxei8_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vluxei8_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vluxei8_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vluxei8_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vluxei8_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vluxei8_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vluxei8_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vluxei8_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vluxei8_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vluxei8_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei8_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vluxei8_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei8_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vluxei8_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei8_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vluxei8_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei8_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vluxei8_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vluxei8_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vluxei8_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m4_t test_vluxei8_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vuint8m4_t test_vluxei8_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m8_t test_vluxei8_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vuint8m8_t test_vluxei8_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei8_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vluxei8_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei8_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vluxei8_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei8_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vluxei8_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei8_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vluxei8_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vluxei8_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vluxei8_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m8_t test_vluxei8_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vuint16m8_t test_vluxei8_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei8_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vluxei8_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei8_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vluxei8_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei8_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vluxei8_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei8_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vluxei8_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vluxei8_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vluxei8_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei8_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vluxei8_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei8_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vluxei8_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei8_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vluxei8_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei8_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_tumu(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vluxei8_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vluxei8_v_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vluxei8_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei8_v_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vluxei8_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei8_v_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vluxei8_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei8_v_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vluxei8_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vluxei8_v_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vluxei8_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m8_t test_vluxei8_v_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, const float16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vfloat16m8_t test_vluxei8_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, const float16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei8_v_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vluxei8_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei8_v_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vluxei8_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei8_v_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vluxei8_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei8_v_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vluxei8_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vluxei8_v_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, const float32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vluxei8_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, const float32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei8_v_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vfloat64m1_t test_vluxei8_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei8_v_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vfloat64m2_t test_vluxei8_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei8_v_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vfloat64m4_t test_vluxei8_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei8_v_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, const float64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vfloat64m8_t test_vluxei8_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, const float64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei8_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vint8mf8_t test_vluxei8_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei8_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vint8mf4_t test_vluxei8_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei8_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vint8mf2_t test_vluxei8_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vluxei8_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vint8m1_t test_vluxei8_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vluxei8_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vint8m2_t test_vluxei8_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vint8m4_t test_vluxei8_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vint8m4_t test_vluxei8_v_i8m4_mu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vint8m8_t test_vluxei8_v_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vint8m8_t test_vluxei8_v_i8m8_mu(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei8_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vint16mf4_t test_vluxei8_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei8_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vint16mf2_t test_vluxei8_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vluxei8_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vint16m1_t test_vluxei8_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vluxei8_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vint16m2_t test_vluxei8_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vluxei8_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vint16m4_t test_vluxei8_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vint16m8_t test_vluxei8_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vint16m8_t test_vluxei8_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei8_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vint32mf2_t test_vluxei8_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vluxei8_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vint32m1_t test_vluxei8_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vluxei8_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vint32m2_t test_vluxei8_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vluxei8_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vint32m4_t test_vluxei8_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vluxei8_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vint32m8_t test_vluxei8_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vluxei8_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vint64m1_t test_vluxei8_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vluxei8_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vint64m2_t test_vluxei8_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vluxei8_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vint64m4_t test_vluxei8_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vluxei8_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vint64m8_t test_vluxei8_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei8_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vuint8mf8_t test_vluxei8_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei8_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vuint8mf4_t test_vluxei8_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei8_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vuint8mf2_t test_vluxei8_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei8_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vuint8m1_t test_vluxei8_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vluxei8_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vuint8m2_t test_vluxei8_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vuint8m4_t test_vluxei8_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vuint8m4_t test_vluxei8_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vuint8m8_t test_vluxei8_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vuint8m8_t test_vluxei8_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, vuint8m8_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei8_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vuint16mf4_t test_vluxei8_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei8_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vuint16mf2_t test_vluxei8_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei8_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vuint16m1_t test_vluxei8_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei8_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vuint16m2_t test_vluxei8_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vluxei8_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vuint16m4_t test_vluxei8_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vuint16m8_t test_vluxei8_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vuint16m8_t test_vluxei8_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei8_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vuint32mf2_t test_vluxei8_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei8_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vuint32m1_t test_vluxei8_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei8_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vuint32m2_t test_vluxei8_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei8_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vuint32m4_t test_vluxei8_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vluxei8_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vuint32m8_t test_vluxei8_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei8_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vuint64m1_t test_vluxei8_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei8_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vuint64m2_t test_vluxei8_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei8_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vuint64m4_t test_vluxei8_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei8_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxei8_mu(mask, maskedoff, base, bindex, vl); +vuint64m8_t test_vluxei8_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxei8\.[ivxfswum.]+\s+} 236 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg2ei16.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg2ei16.c index 963eeac0f..6370cfde2 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg2ei16.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg2ei16.c @@ -6,772 +6,772 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_tu(vfloat16mf4x2_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_tu(vfloat16mf2x2_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_tu(vfloat16m1x2_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_tu(vfloat16m2x2_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_tu(vfloat16m4x2_t vd, const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_tu(vfloat32mf2x2_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_tu(vfloat32m1x2_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_tu(vfloat32m2x2_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_tu(vfloat32m4x2_t vd, const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_tu(vfloat64m1x2_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_tu(vfloat64m2x2_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_tu(vfloat64m4x2_t vd, const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_tu(vint8mf8x2_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_tu(vint8mf4x2_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_tu(vint8mf2x2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_tu(vint8m1x2_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_tu(vint8m2x2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_tu(vint8m4x2_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_tu(vint16mf4x2_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_tu(vint16mf2x2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_tu(vint16m1x2_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_tu(vint16m2x2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_tu(vint16m4x2_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_tu(vint32mf2x2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_tu(vint32m1x2_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_tu(vint32m2x2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_tu(vint32m4x2_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_tu(vint64m1x2_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_tu(vint64m2x2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_tu(vint64m4x2_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_tu(vuint8mf8x2_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_tu(vuint8mf4x2_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_tu(vuint8mf2x2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_tu(vuint8m1x2_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_tu(vuint8m2x2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_tu(vuint8m4x2_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_tu(vuint16mf4x2_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_tu(vuint16mf2x2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_tu(vuint16m1x2_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_tu(vuint16m2x2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_tu(vuint16m4x2_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_tu(vuint32mf2x2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_tu(vuint32m1x2_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_tu(vuint32m2x2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_tu(vuint32m4x2_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_tu(vuint64m1x2_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_tu(vuint64m2x2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_tu(vuint64m4x2_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_tum(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_tum(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_tum(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_tum(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_tum(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_tum(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_tum(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_tum(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_tum(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_tum(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_tum(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_tum(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_tum(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_tum(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_tum(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_tum(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_tum(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_tum(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_tum(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_tum(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_tum(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_tum(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_tum(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_tum(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_tum(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_tum(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_tum(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_tum(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_tum(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_tum(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_tum(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_tum(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_tum(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_tum(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_tum(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_tum(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_tum(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_tum(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_tum(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_tum(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_tum(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_tum(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_tum(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_tum(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_tum(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_tum(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_tum(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_tum(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_tumu(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_tumu(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_tumu(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_tumu(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_tumu(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_tumu(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_tumu(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_tumu(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_tumu(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_tumu(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_tumu(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_tumu(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_tumu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_tumu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_tumu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_tumu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_tumu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_tumu(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_tumu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_tumu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_tumu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_tumu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_tumu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_tumu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_tumu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_tumu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_tumu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_tumu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_tumu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_tumu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_tumu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_tumu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_tumu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_tumu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_tumu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_tumu(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_tumu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_tumu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_tumu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_tumu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_tumu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_tumu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_tumu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_tumu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_tumu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_tumu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_tumu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_tumu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_mu(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_mu(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_mu(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_mu(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_mu(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_mu(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_mu(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_mu(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_mu(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_mu(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_mu(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_mu(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_mu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_mu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_mu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_mu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_mu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_mu(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_mu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_mu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_mu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_mu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_mu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_mu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_mu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_mu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_mu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_mu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_mu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_mu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_mu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_mu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_mu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_mu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_mu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_mu(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_mu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_mu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_mu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_mu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_mu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_mu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_mu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_mu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_mu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_mu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_mu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_mu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg2ei16\.[ivxfswum.]+\s+} 192 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg2ei32.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg2ei32.c index b03c05c57..76a377219 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg2ei32.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg2ei32.c @@ -6,740 +6,740 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_tu(vfloat16mf4x2_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_tu(vfloat16mf2x2_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_tu(vfloat16m1x2_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_tu(vfloat16m2x2_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_tu(vfloat16m4x2_t vd, const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_tu(vfloat32mf2x2_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_tu(vfloat32m1x2_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_tu(vfloat32m2x2_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_tu(vfloat32m4x2_t vd, const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_tu(vfloat64m1x2_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_tu(vfloat64m2x2_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_tu(vfloat64m4x2_t vd, const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_tu(vint8mf8x2_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_tu(vint8mf4x2_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_tu(vint8mf2x2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_tu(vint8m1x2_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_tu(vint8m2x2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_tu(vint16mf4x2_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_tu(vint16mf2x2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_tu(vint16m1x2_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_tu(vint16m2x2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_tu(vint16m4x2_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_tu(vint32mf2x2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_tu(vint32m1x2_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_tu(vint32m2x2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_tu(vint32m4x2_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_tu(vint64m1x2_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_tu(vint64m2x2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_tu(vint64m4x2_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_tu(vuint8mf8x2_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_tu(vuint8mf4x2_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_tu(vuint8mf2x2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_tu(vuint8m1x2_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_tu(vuint8m2x2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_tu(vuint16mf4x2_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_tu(vuint16mf2x2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_tu(vuint16m1x2_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_tu(vuint16m2x2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_tu(vuint16m4x2_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_tu(vuint32mf2x2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_tu(vuint32m1x2_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_tu(vuint32m2x2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_tu(vuint32m4x2_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_tu(vuint64m1x2_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_tu(vuint64m2x2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_tu(vuint64m4x2_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_tum(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_tum(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_tum(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_tum(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_tum(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_tum(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_tum(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_tum(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_tum(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_tum(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_tum(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_tum(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_tum(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_tum(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_tum(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_tum(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_tum(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_tum(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_tum(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_tum(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_tum(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_tum(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_tum(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_tum(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_tum(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_tum(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_tum(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_tum(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_tum(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_tum(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_tum(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_tum(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_tum(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_tum(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_tum(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_tum(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_tum(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_tum(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_tum(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_tum(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_tum(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_tum(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_tum(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_tum(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_tum(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_tum(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_tumu(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_tumu(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_tumu(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_tumu(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_tumu(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_tumu(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_tumu(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_tumu(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_tumu(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_tumu(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_tumu(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_tumu(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_tumu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_tumu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_tumu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_tumu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_tumu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_tumu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_tumu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_tumu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_tumu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_tumu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_tumu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_tumu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_tumu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_tumu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_tumu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_tumu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_tumu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_tumu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_tumu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_tumu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_tumu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_tumu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_tumu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_tumu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_tumu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_tumu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_tumu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_tumu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_tumu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_tumu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_tumu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_tumu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_tumu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_tumu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_mu(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_mu(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_mu(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_mu(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_mu(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_mu(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_mu(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_mu(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_mu(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_mu(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_mu(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_mu(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_mu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_mu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_mu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_mu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_mu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_mu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_mu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_mu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_mu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_mu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_mu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_mu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_mu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_mu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_mu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_mu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_mu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_mu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_mu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_mu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_mu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_mu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_mu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_mu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_mu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_mu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_mu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_mu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_mu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_mu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_mu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_mu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_mu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_mu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg2ei32\.[ivxfswum.]+\s+} 184 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg2ei64.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg2ei64.c index 1b1e473d3..0e0a4219b 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg2ei64.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg2ei64.c @@ -6,660 +6,660 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_tu(vfloat16mf4x2_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tu(vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_tu(vfloat16mf2x2_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tu(vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_tu(vfloat16m1x2_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tu(vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_tu(vfloat16m2x2_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tu(vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_tu(vfloat32mf2x2_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tu(vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_tu(vfloat32m1x2_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tu(vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_tu(vfloat32m2x2_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tu(vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_tu(vfloat32m4x2_t vd, const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tu(vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_tu(vfloat64m1x2_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tu(vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_tu(vfloat64m2x2_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tu(vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_tu(vfloat64m4x2_t vd, const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tu(vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_tu(vint8mf8x2_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tu(vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_tu(vint8mf4x2_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tu(vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_tu(vint8mf2x2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tu(vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_tu(vint8m1x2_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tu(vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_tu(vint16mf4x2_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tu(vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_tu(vint16mf2x2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tu(vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_tu(vint16m1x2_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tu(vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_tu(vint16m2x2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tu(vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_tu(vint32mf2x2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tu(vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_tu(vint32m1x2_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tu(vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_tu(vint32m2x2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tu(vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_tu(vint32m4x2_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tu(vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_tu(vint64m1x2_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tu(vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_tu(vint64m2x2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tu(vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_tu(vint64m4x2_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tu(vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_tu(vuint8mf8x2_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tu(vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_tu(vuint8mf4x2_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tu(vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_tu(vuint8mf2x2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tu(vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_tu(vuint8m1x2_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tu(vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_tu(vuint16mf4x2_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tu(vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_tu(vuint16mf2x2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tu(vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_tu(vuint16m1x2_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tu(vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_tu(vuint16m2x2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tu(vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_tu(vuint32mf2x2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tu(vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_tu(vuint32m1x2_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tu(vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_tu(vuint32m2x2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tu(vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_tu(vuint32m4x2_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tu(vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_tu(vuint64m1x2_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tu(vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_tu(vuint64m2x2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tu(vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_tu(vuint64m4x2_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tu(vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_tum(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_tum(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_tum(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_tum(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_tum(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_tum(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_tum(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_tum(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_tum(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_tum(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_tum(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_tum(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_tum(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_tum(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_tum(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_tum(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_tum(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_tum(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_tum(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_tum(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_tum(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_tum(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_tum(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_tum(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_tum(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_tum(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_tum(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_tum(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_tum(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_tum(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_tum(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_tum(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_tum(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_tum(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_tum(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_tum(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_tum(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_tum(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_tum(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_tum(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_tum(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_tumu(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_tumu(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_tumu(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_tumu(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_tumu(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_tumu(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_tumu(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_tumu(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_tumu(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_tumu(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_tumu(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_tumu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_tumu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_tumu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_tumu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_tumu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_tumu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_tumu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_tumu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_tumu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_tumu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_tumu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_tumu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_tumu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_tumu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_tumu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_tumu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_tumu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_tumu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_tumu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_tumu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_tumu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_tumu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_tumu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_tumu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_tumu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_tumu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_tumu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_tumu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_tumu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_tumu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_mu(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_mu(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_mu(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_mu(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_mu(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_mu(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_mu(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_mu(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_mu(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_mu(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_mu(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_mu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_mu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_mu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_mu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_mu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_mu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_mu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_mu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_mu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_mu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_mu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_mu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_mu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_mu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_mu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_mu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_mu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_mu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_mu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_mu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_mu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_mu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_mu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_mu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_mu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_mu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_mu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_mu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_mu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_mu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_mu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei64_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg2ei64\.[ivxfswum.]+\s+} 164 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg2ei8.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg2ei8.c index d6159c8b7..c724ca70b 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg2ei8.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg2ei8.c @@ -6,772 +6,772 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_tu(vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_tu(vfloat16mf4x2_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_tu(vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_tu(vfloat16mf2x2_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_tu(vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_tu(vfloat16m1x2_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_tu(vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_tu(vfloat16m2x2_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_tu(vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_tu(vfloat16m4x2_t vd, const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_tu(vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_tu(vfloat32mf2x2_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_tu(vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_tu(vfloat32m1x2_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_tu(vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_tu(vfloat32m2x2_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_tu(vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_tu(vfloat32m4x2_t vd, const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_tu(vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_tu(vfloat64m1x2_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_tu(vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_tu(vfloat64m2x2_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_tu(vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_tu(vfloat64m4x2_t vd, const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_tu(vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_tu(vint8mf8x2_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_tu(vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_tu(vint8mf4x2_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_tu(vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_tu(vint8mf2x2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_tu(vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_tu(vint8m1x2_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_tu(vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_tu(vint8m2x2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_tu(vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_tu(vint8m4x2_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_tu(vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_tu(vint16mf4x2_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_tu(vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_tu(vint16mf2x2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_tu(vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_tu(vint16m1x2_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_tu(vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_tu(vint16m2x2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_tu(vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_tu(vint16m4x2_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_tu(vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_tu(vint32mf2x2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_tu(vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_tu(vint32m1x2_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_tu(vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_tu(vint32m2x2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_tu(vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_tu(vint32m4x2_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_tu(vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_tu(vint64m1x2_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_tu(vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_tu(vint64m2x2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_tu(vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_tu(vint64m4x2_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_tu(vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_tu(vuint8mf8x2_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_tu(vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_tu(vuint8mf4x2_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_tu(vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_tu(vuint8mf2x2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_tu(vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_tu(vuint8m1x2_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_tu(vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_tu(vuint8m2x2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_tu(vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_tu(vuint8m4x2_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_tu(vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_tu(vuint16mf4x2_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_tu(vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_tu(vuint16mf2x2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_tu(vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_tu(vuint16m1x2_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_tu(vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_tu(vuint16m2x2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_tu(vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_tu(vuint16m4x2_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_tu(vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_tu(vuint32mf2x2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_tu(vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_tu(vuint32m1x2_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_tu(vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_tu(vuint32m2x2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_tu(vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_tu(vuint32m4x2_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_tu(vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_tu(vuint64m1x2_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_tu(vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_tu(vuint64m2x2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_tu(vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_tu(vuint64m4x2_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_tum(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_tum(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_tum(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_tum(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_tum(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_tum(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_tum(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_tum(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_tum(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_tum(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_tum(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_tum(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_tum(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_tum(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_tum(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_tum(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_tum(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_tum(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_tum(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_tum(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_tum(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_tum(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_tum(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_tum(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_tum(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_tum(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_tum(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_tum(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_tum(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_tum(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_tum(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_tum(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_tum(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_tum(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_tum(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_tum(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_tum(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_tum(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_tum(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_tum(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_tum(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_tum(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_tum(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_tum(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_tum(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_tum(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_tum(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_tum(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_tum(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_tum(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_tum(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_tum(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_tum(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_tum(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_tum(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_tum(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_tum(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_tum(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_tum(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_tum(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_tum(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_tum(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_tum(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_tum(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_tum(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_tum(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_tum(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_tum(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_tum(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_tum(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_tum(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_tum(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_tum(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_tum(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_tum(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_tum(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_tum(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_tum(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_tum(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_tum(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_tum(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_tum(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_tum(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_tum(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_tum(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_tum(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_tum(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_tum(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_tum(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_tum(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_tum(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_tum(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_tum(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_tum(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_tum(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_tum(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_tumu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_tumu(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_tumu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_tumu(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_tumu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_tumu(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_tumu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_tumu(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_tumu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_tumu(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_tumu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_tumu(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_tumu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_tumu(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_tumu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_tumu(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_tumu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_tumu(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_tumu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_tumu(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_tumu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_tumu(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_tumu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_tumu(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_tumu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_tumu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_tumu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_tumu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_tumu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_tumu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_tumu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_tumu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_tumu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_tumu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_tumu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_tumu(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_tumu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_tumu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_tumu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_tumu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_tumu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_tumu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_tumu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_tumu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_tumu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_tumu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_tumu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_tumu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_tumu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_tumu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_tumu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_tumu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_tumu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_tumu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_tumu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_tumu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_tumu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_tumu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_tumu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_tumu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_tumu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_tumu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_tumu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_tumu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_tumu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_tumu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_tumu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_tumu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_tumu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_tumu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_tumu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_tumu(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_tumu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_tumu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_tumu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_tumu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_tumu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_tumu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_tumu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_tumu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_tumu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_tumu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_tumu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_tumu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_tumu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_tumu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_tumu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_tumu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_tumu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_tumu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_tumu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_tumu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_tumu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_tumu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_tumu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_tumu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_mu(vbool64_t mask, vfloat16mf4x2_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_mu(vbool64_t vm, vfloat16mf4x2_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_mu(vbool32_t mask, vfloat16mf2x2_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_mu(vbool32_t vm, vfloat16mf2x2_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_mu(vbool16_t mask, vfloat16m1x2_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_mu(vbool16_t vm, vfloat16m1x2_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_mu(vbool8_t mask, vfloat16m2x2_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_mu(vbool8_t vm, vfloat16m2x2_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_mu(vbool4_t mask, vfloat16m4x2_t maskedoff_tuple, const float16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_mu(vbool4_t vm, vfloat16m4x2_t vd, const float16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_mu(vbool64_t mask, vfloat32mf2x2_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_mu(vbool64_t vm, vfloat32mf2x2_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_mu(vbool32_t mask, vfloat32m1x2_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_mu(vbool32_t vm, vfloat32m1x2_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_mu(vbool16_t mask, vfloat32m2x2_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_mu(vbool16_t vm, vfloat32m2x2_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_mu(vbool8_t mask, vfloat32m4x2_t maskedoff_tuple, const float32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_mu(vbool8_t vm, vfloat32m4x2_t vd, const float32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_mu(vbool64_t mask, vfloat64m1x2_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_mu(vbool64_t vm, vfloat64m1x2_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_mu(vbool32_t mask, vfloat64m2x2_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_mu(vbool32_t vm, vfloat64m2x2_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_mu(vbool16_t mask, vfloat64m4x2_t maskedoff_tuple, const float64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_mu(vbool16_t vm, vfloat64m4x2_t vd, const float64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_mu(vbool64_t mask, vint8mf8x2_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_mu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_mu(vbool32_t mask, vint8mf4x2_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_mu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_mu(vbool16_t mask, vint8mf2x2_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_mu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_mu(vbool8_t mask, vint8m1x2_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_mu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_mu(vbool4_t mask, vint8m2x2_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_mu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_mu(vbool2_t mask, vint8m4x2_t maskedoff_tuple, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_mu(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_mu(vbool64_t mask, vint16mf4x2_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_mu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_mu(vbool32_t mask, vint16mf2x2_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_mu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_mu(vbool16_t mask, vint16m1x2_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_mu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_mu(vbool8_t mask, vint16m2x2_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_mu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_mu(vbool4_t mask, vint16m4x2_t maskedoff_tuple, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_mu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_mu(vbool64_t mask, vint32mf2x2_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_mu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_mu(vbool32_t mask, vint32m1x2_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_mu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_mu(vbool16_t mask, vint32m2x2_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_mu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_mu(vbool8_t mask, vint32m4x2_t maskedoff_tuple, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_mu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_mu(vbool64_t mask, vint64m1x2_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_mu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_mu(vbool32_t mask, vint64m2x2_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_mu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_mu(vbool16_t mask, vint64m4x2_t maskedoff_tuple, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_mu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_mu(vbool64_t mask, vuint8mf8x2_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_mu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_mu(vbool32_t mask, vuint8mf4x2_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_mu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_mu(vbool16_t mask, vuint8mf2x2_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_mu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_mu(vbool8_t mask, vuint8m1x2_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_mu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_mu(vbool4_t mask, vuint8m2x2_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_mu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_mu(vbool2_t mask, vuint8m4x2_t maskedoff_tuple, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_mu(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_mu(vbool64_t mask, vuint16mf4x2_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_mu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_mu(vbool32_t mask, vuint16mf2x2_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_mu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_mu(vbool16_t mask, vuint16m1x2_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_mu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_mu(vbool8_t mask, vuint16m2x2_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_mu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_mu(vbool4_t mask, vuint16m4x2_t maskedoff_tuple, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_mu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_mu(vbool64_t mask, vuint32mf2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_mu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_mu(vbool32_t mask, vuint32m1x2_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_mu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_mu(vbool16_t mask, vuint32m2x2_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_mu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_mu(vbool8_t mask, vuint32m4x2_t maskedoff_tuple, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_mu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_mu(vbool64_t mask, vuint64m1x2_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_mu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_mu(vbool32_t mask, vuint64m2x2_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_mu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_mu(vbool16_t mask, vuint64m4x2_t maskedoff_tuple, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg2ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_mu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg2ei8\.[ivxfswum.]+\s+} 192 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg3ei16.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg3ei16.c index 8b75ffdaa..48a7cdfa5 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg3ei16.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg3ei16.c @@ -6,596 +6,596 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_tu(vfloat16mf4x3_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tu(vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_tu(vfloat16mf2x3_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tu(vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_tu(vfloat16m1x3_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tu(vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_tu(vfloat16m2x3_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tu(vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_tu(vfloat32mf2x3_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tu(vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_tu(vfloat32m1x3_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tu(vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_tu(vfloat32m2x3_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tu(vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_tu(vfloat64m1x3_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tu(vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_tu(vfloat64m2x3_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tu(vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_tu(vint8mf8x3_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tu(vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_tu(vint8mf4x3_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tu(vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_tu(vint8mf2x3_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tu(vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_tu(vint8m1x3_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tu(vd, rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_tu(vint8m2x3_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tu(vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_tu(vint16mf4x3_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tu(vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_tu(vint16mf2x3_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tu(vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_tu(vint16m1x3_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tu(vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_tu(vint16m2x3_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tu(vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_tu(vint32mf2x3_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tu(vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_tu(vint32m1x3_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tu(vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_tu(vint32m2x3_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tu(vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_tu(vint64m1x3_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tu(vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_tu(vint64m2x3_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tu(vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_tu(vuint8mf8x3_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tu(vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_tu(vuint8mf4x3_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tu(vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_tu(vuint8mf2x3_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tu(vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_tu(vuint8m1x3_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tu(vd, rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_tu(vuint8m2x3_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tu(vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_tu(vuint16mf4x3_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tu(vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_tu(vuint16mf2x3_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tu(vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_tu(vuint16m1x3_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tu(vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_tu(vuint16m2x3_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tu(vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_tu(vuint32mf2x3_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tu(vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_tu(vuint32m1x3_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tu(vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_tu(vuint32m2x3_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tu(vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_tu(vuint64m1x3_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tu(vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_tu(vuint64m2x3_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tu(vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_tum(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_tum(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_tum(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_tum(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_tum(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_tum(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_tum(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_tum(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_tum(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_tum(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_tum(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_tum(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_tum(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_tum(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_tum(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_tum(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_tum(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_tum(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_tum(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_tum(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_tum(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_tum(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_tum(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_tum(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_tum(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_tum(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_tum(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_tum(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_tum(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_tum(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_tum(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_tum(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_tum(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_tum(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_tum(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_tum(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_tum(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_tumu(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_tumu(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_tumu(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_tumu(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_tumu(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_tumu(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_tumu(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_tumu(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_tumu(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_tumu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_tumu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_tumu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_tumu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_tumu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_tumu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_tumu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_tumu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_tumu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_tumu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_tumu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_tumu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_tumu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_tumu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_tumu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_tumu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_tumu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_tumu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_tumu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_tumu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_tumu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_tumu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_tumu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_tumu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_tumu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_tumu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_tumu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_tumu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_mu(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_mu(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_mu(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_mu(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_mu(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_mu(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_mu(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_mu(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_mu(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_mu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_mu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_mu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_mu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_mu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_mu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_mu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_mu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_mu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_mu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_mu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_mu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_mu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_mu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_mu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_mu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_mu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_mu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_mu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_mu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_mu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_mu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_mu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_mu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_mu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_mu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_mu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_mu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg3ei16\.[ivxfswum.]+\s+} 148 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg3ei32.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg3ei32.c index 1487d2c13..d86fa7812 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg3ei32.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg3ei32.c @@ -6,596 +6,596 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_tu(vfloat16mf4x3_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tu(vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_tu(vfloat16mf2x3_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tu(vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_tu(vfloat16m1x3_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tu(vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_tu(vfloat16m2x3_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tu(vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_tu(vfloat32mf2x3_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tu(vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_tu(vfloat32m1x3_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tu(vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_tu(vfloat32m2x3_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tu(vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_tu(vfloat64m1x3_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tu(vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_tu(vfloat64m2x3_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tu(vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_tu(vint8mf8x3_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tu(vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_tu(vint8mf4x3_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tu(vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_tu(vint8mf2x3_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tu(vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_tu(vint8m1x3_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tu(vd, rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_tu(vint8m2x3_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tu(vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_tu(vint16mf4x3_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tu(vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_tu(vint16mf2x3_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tu(vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_tu(vint16m1x3_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tu(vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_tu(vint16m2x3_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tu(vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_tu(vint32mf2x3_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tu(vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_tu(vint32m1x3_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tu(vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_tu(vint32m2x3_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tu(vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_tu(vint64m1x3_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tu(vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_tu(vint64m2x3_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tu(vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_tu(vuint8mf8x3_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tu(vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_tu(vuint8mf4x3_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tu(vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_tu(vuint8mf2x3_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tu(vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_tu(vuint8m1x3_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tu(vd, rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_tu(vuint8m2x3_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tu(vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_tu(vuint16mf4x3_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tu(vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_tu(vuint16mf2x3_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tu(vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_tu(vuint16m1x3_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tu(vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_tu(vuint16m2x3_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tu(vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_tu(vuint32mf2x3_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tu(vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_tu(vuint32m1x3_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tu(vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_tu(vuint32m2x3_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tu(vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_tu(vuint64m1x3_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tu(vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_tu(vuint64m2x3_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tu(vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_tum(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_tum(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_tum(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_tum(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_tum(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_tum(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_tum(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_tum(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_tum(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_tum(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_tum(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_tum(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_tum(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_tum(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_tum(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_tum(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_tum(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_tum(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_tum(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_tum(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_tum(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_tum(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_tum(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_tum(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_tum(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_tum(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_tum(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_tum(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_tum(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_tum(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_tum(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_tum(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_tum(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_tum(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_tum(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_tum(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_tum(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_tumu(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_tumu(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_tumu(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_tumu(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_tumu(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_tumu(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_tumu(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_tumu(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_tumu(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_tumu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_tumu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_tumu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_tumu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_tumu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_tumu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_tumu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_tumu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_tumu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_tumu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_tumu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_tumu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_tumu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_tumu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_tumu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_tumu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_tumu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_tumu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_tumu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_tumu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_tumu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_tumu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_tumu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_tumu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_tumu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_tumu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_tumu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_tumu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_mu(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_mu(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_mu(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_mu(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_mu(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_mu(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_mu(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_mu(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_mu(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_mu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_mu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_mu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_mu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_mu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_mu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_mu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_mu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_mu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_mu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_mu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_mu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_mu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_mu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_mu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_mu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_mu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_mu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_mu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_mu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_mu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_mu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_mu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_mu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_mu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_mu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_mu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_mu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei32_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg3ei32\.[ivxfswum.]+\s+} 148 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg3ei64.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg3ei64.c index 702c24d0b..5e1859ed6 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg3ei64.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg3ei64.c @@ -6,564 +6,564 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_tu(vfloat16mf4x3_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tu(vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_tu(vfloat16mf2x3_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tu(vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_tu(vfloat16m1x3_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tu(vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_tu(vfloat16m2x3_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tu(vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_tu(vfloat32mf2x3_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tu(vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_tu(vfloat32m1x3_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tu(vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_tu(vfloat32m2x3_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tu(vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_tu(vfloat64m1x3_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tu(vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_tu(vfloat64m2x3_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tu(vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_tu(vint8mf8x3_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tu(vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_tu(vint8mf4x3_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tu(vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_tu(vint8mf2x3_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tu(vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_tu(vint8m1x3_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tu(vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_tu(vint16mf4x3_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tu(vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_tu(vint16mf2x3_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tu(vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_tu(vint16m1x3_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tu(vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_tu(vint16m2x3_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tu(vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_tu(vint32mf2x3_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tu(vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_tu(vint32m1x3_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tu(vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_tu(vint32m2x3_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tu(vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_tu(vint64m1x3_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tu(vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_tu(vint64m2x3_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tu(vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_tu(vuint8mf8x3_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tu(vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_tu(vuint8mf4x3_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tu(vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_tu(vuint8mf2x3_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tu(vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_tu(vuint8m1x3_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tu(vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_tu(vuint16mf4x3_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tu(vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_tu(vuint16mf2x3_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tu(vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_tu(vuint16m1x3_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tu(vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_tu(vuint16m2x3_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tu(vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_tu(vuint32mf2x3_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tu(vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_tu(vuint32m1x3_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tu(vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_tu(vuint32m2x3_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tu(vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_tu(vuint64m1x3_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tu(vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_tu(vuint64m2x3_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tu(vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_tum(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_tum(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_tum(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_tum(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_tum(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_tum(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_tum(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_tum(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_tum(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_tum(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_tum(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_tum(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_tum(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_tum(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_tum(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_tum(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_tum(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_tum(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_tum(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_tum(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_tum(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_tum(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_tum(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_tum(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_tum(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_tum(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_tum(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_tum(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_tum(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_tum(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_tum(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_tum(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_tum(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_tum(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_tum(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_tumu(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_tumu(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_tumu(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_tumu(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_tumu(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_tumu(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_tumu(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_tumu(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_tumu(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_tumu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_tumu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_tumu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_tumu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_tumu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_tumu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_tumu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_tumu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_tumu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_tumu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_tumu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_tumu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_tumu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_tumu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_tumu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_tumu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_tumu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_tumu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_tumu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_tumu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_tumu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_tumu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_tumu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_tumu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_tumu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_tumu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_mu(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_mu(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_mu(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_mu(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_mu(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_mu(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_mu(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_mu(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_mu(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_mu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_mu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_mu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_mu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_mu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_mu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_mu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_mu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_mu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_mu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_mu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_mu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_mu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_mu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_mu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_mu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_mu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_mu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_mu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_mu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_mu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_mu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_mu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_mu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_mu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_mu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei64_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg3ei64\.[ivxfswum.]+\s+} 140 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg3ei8.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg3ei8.c index 3b01036ec..523d34499 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg3ei8.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg3ei8.c @@ -6,596 +6,596 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_tu(vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_tu(vfloat16mf4x3_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tu(vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_tu(vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_tu(vfloat16mf2x3_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tu(vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_tu(vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_tu(vfloat16m1x3_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tu(vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_tu(vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_tu(vfloat16m2x3_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tu(vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_tu(vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_tu(vfloat32mf2x3_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tu(vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_tu(vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_tu(vfloat32m1x3_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tu(vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_tu(vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_tu(vfloat32m2x3_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tu(vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_tu(vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_tu(vfloat64m1x3_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tu(vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_tu(vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_tu(vfloat64m2x3_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tu(vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_tu(vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_tu(vint8mf8x3_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tu(vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_tu(vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_tu(vint8mf4x3_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tu(vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_tu(vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_tu(vint8mf2x3_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tu(vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_tu(vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_tu(vint8m1x3_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tu(vd, rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_tu(vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_tu(vint8m2x3_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tu(vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_tu(vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_tu(vint16mf4x3_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tu(vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_tu(vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_tu(vint16mf2x3_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tu(vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_tu(vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_tu(vint16m1x3_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tu(vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_tu(vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_tu(vint16m2x3_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tu(vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_tu(vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_tu(vint32mf2x3_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tu(vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_tu(vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_tu(vint32m1x3_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tu(vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_tu(vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_tu(vint32m2x3_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tu(vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_tu(vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_tu(vint64m1x3_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tu(vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_tu(vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_tu(vint64m2x3_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tu(vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_tu(vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_tu(vuint8mf8x3_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tu(vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_tu(vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_tu(vuint8mf4x3_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tu(vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_tu(vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_tu(vuint8mf2x3_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tu(vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_tu(vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_tu(vuint8m1x3_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tu(vd, rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_tu(vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_tu(vuint8m2x3_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tu(vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_tu(vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_tu(vuint16mf4x3_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tu(vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_tu(vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_tu(vuint16mf2x3_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tu(vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_tu(vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_tu(vuint16m1x3_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tu(vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_tu(vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_tu(vuint16m2x3_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tu(vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_tu(vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_tu(vuint32mf2x3_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tu(vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_tu(vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_tu(vuint32m1x3_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tu(vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_tu(vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_tu(vuint32m2x3_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tu(vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_tu(vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_tu(vuint64m1x3_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tu(vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_tu(vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_tu(vuint64m2x3_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tu(vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_tum(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_tum(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_tum(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_tum(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_tum(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_tum(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_tum(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_tum(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_tum(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_tum(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_tum(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_tum(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_tum(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_tum(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_tum(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_tum(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_tum(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_tum(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_tum(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_tum(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_tum(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_tum(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_tum(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_tum(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_tum(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_tum(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_tum(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_tum(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_tum(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_tum(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_tum(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_tum(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_tum(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_tum(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_tum(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_tum(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_tum(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_tum(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_tum(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_tum(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_tum(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_tum(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_tum(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_tum(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_tum(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_tum(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_tum(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_tum(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_tum(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_tum(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_tum(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_tum(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_tum(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_tum(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_tum(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_tum(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_tum(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_tum(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_tum(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_tum(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_tum(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_tum(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_tum(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_tum(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_tum(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_tum(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_tum(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_tum(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_tum(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_tum(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_tum(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_tum(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_tum(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_tum(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_tumu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_tumu(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_tumu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_tumu(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_tumu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_tumu(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_tumu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_tumu(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_tumu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_tumu(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_tumu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_tumu(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_tumu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_tumu(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_tumu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_tumu(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_tumu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_tumu(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_tumu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_tumu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_tumu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_tumu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_tumu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_tumu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_tumu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_tumu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_tumu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_tumu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_tumu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_tumu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_tumu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_tumu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_tumu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_tumu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_tumu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_tumu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_tumu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_tumu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_tumu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_tumu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_tumu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_tumu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_tumu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_tumu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_tumu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_tumu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_tumu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_tumu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_tumu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_tumu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_tumu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_tumu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_tumu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_tumu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_tumu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_tumu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_tumu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_tumu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_tumu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_tumu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_tumu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_tumu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_tumu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_tumu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_tumu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_tumu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_tumu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_tumu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_tumu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_tumu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_tumu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_tumu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_tumu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_tumu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_mu(vbool64_t mask, vfloat16mf4x3_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_mu(vbool64_t vm, vfloat16mf4x3_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_mu(vbool32_t mask, vfloat16mf2x3_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_mu(vbool32_t vm, vfloat16mf2x3_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_mu(vbool16_t mask, vfloat16m1x3_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_mu(vbool16_t vm, vfloat16m1x3_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_mu(vbool8_t mask, vfloat16m2x3_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_mu(vbool8_t vm, vfloat16m2x3_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_mu(vbool64_t mask, vfloat32mf2x3_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_mu(vbool64_t vm, vfloat32mf2x3_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_mu(vbool32_t mask, vfloat32m1x3_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_mu(vbool32_t vm, vfloat32m1x3_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_mu(vbool16_t mask, vfloat32m2x3_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_mu(vbool16_t vm, vfloat32m2x3_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_mu(vbool64_t mask, vfloat64m1x3_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_mu(vbool64_t vm, vfloat64m1x3_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_mu(vbool32_t mask, vfloat64m2x3_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_mu(vbool32_t vm, vfloat64m2x3_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_mu(vbool64_t mask, vint8mf8x3_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_mu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_mu(vbool32_t mask, vint8mf4x3_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_mu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_mu(vbool16_t mask, vint8mf2x3_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_mu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_mu(vbool8_t mask, vint8m1x3_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_mu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_mu(vbool4_t mask, vint8m2x3_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_mu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_mu(vbool64_t mask, vint16mf4x3_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_mu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_mu(vbool32_t mask, vint16mf2x3_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_mu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_mu(vbool16_t mask, vint16m1x3_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_mu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_mu(vbool8_t mask, vint16m2x3_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_mu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_mu(vbool64_t mask, vint32mf2x3_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_mu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_mu(vbool32_t mask, vint32m1x3_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_mu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_mu(vbool16_t mask, vint32m2x3_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_mu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_mu(vbool64_t mask, vint64m1x3_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_mu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_mu(vbool32_t mask, vint64m2x3_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_mu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_mu(vbool64_t mask, vuint8mf8x3_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_mu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_mu(vbool32_t mask, vuint8mf4x3_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_mu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_mu(vbool16_t mask, vuint8mf2x3_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_mu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_mu(vbool8_t mask, vuint8m1x3_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_mu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_mu(vbool4_t mask, vuint8m2x3_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_mu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_mu(vbool64_t mask, vuint16mf4x3_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_mu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_mu(vbool32_t mask, vuint16mf2x3_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_mu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_mu(vbool16_t mask, vuint16m1x3_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_mu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_mu(vbool8_t mask, vuint16m2x3_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_mu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_mu(vbool64_t mask, vuint32mf2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_mu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_mu(vbool32_t mask, vuint32m1x3_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_mu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_mu(vbool16_t mask, vuint32m2x3_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_mu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_mu(vbool64_t mask, vuint64m1x3_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_mu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_mu(vbool32_t mask, vuint64m2x3_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg3ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_mu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg3ei8\.[ivxfswum.]+\s+} 148 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg4ei16.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg4ei16.c index bded54c9d..e2bc4daa2 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg4ei16.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg4ei16.c @@ -6,596 +6,596 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_tu(vfloat16mf4x4_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tu(vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_tu(vfloat16mf2x4_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tu(vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_tu(vfloat16m1x4_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tu(vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_tu(vfloat16m2x4_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tu(vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_tu(vfloat32mf2x4_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tu(vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_tu(vfloat32m1x4_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tu(vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_tu(vfloat32m2x4_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tu(vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_tu(vfloat64m1x4_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tu(vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_tu(vfloat64m2x4_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tu(vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_tu(vint8mf8x4_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tu(vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_tu(vint8mf4x4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tu(vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_tu(vint8mf2x4_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tu(vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_tu(vint8m1x4_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tu(vd, rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_tu(vint8m2x4_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tu(vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_tu(vint16mf4x4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tu(vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_tu(vint16mf2x4_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tu(vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_tu(vint16m1x4_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tu(vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_tu(vint16m2x4_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tu(vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_tu(vint32mf2x4_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tu(vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_tu(vint32m1x4_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tu(vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_tu(vint32m2x4_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tu(vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_tu(vint64m1x4_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tu(vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_tu(vint64m2x4_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tu(vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_tu(vuint8mf8x4_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tu(vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_tu(vuint8mf4x4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tu(vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_tu(vuint8mf2x4_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tu(vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_tu(vuint8m1x4_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tu(vd, rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_tu(vuint8m2x4_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tu(vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_tu(vuint16mf4x4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tu(vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_tu(vuint16mf2x4_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tu(vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_tu(vuint16m1x4_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tu(vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_tu(vuint16m2x4_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tu(vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_tu(vuint32mf2x4_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tu(vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_tu(vuint32m1x4_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tu(vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_tu(vuint32m2x4_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tu(vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_tu(vuint64m1x4_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tu(vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_tu(vuint64m2x4_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tu(vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_tum(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_tum(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_tum(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_tum(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_tum(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_tum(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_tum(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_tum(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_tum(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_tum(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_tum(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_tum(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_tum(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_tum(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_tum(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_tum(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_tum(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_tum(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_tum(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_tum(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_tum(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_tum(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_tum(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_tum(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_tum(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_tum(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_tum(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_tum(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_tum(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_tum(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_tum(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_tum(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_tum(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_tum(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_tum(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_tum(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_tum(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_tumu(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_tumu(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_tumu(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_tumu(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_tumu(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_tumu(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_tumu(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_tumu(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_tumu(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_tumu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_tumu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_tumu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_tumu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_tumu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_tumu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_tumu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_tumu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_tumu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_tumu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_tumu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_tumu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_tumu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_tumu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_tumu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_tumu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_tumu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_tumu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_tumu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_tumu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_tumu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_tumu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_tumu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_tumu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_tumu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_tumu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_tumu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_tumu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_mu(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_mu(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_mu(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_mu(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_mu(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_mu(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_mu(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_mu(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_mu(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_mu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_mu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_mu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_mu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_mu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_mu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_mu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_mu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_mu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_mu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_mu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_mu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_mu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_mu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_mu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_mu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_mu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_mu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_mu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_mu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_mu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_mu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_mu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_mu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_mu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_mu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_mu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_mu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg4ei16\.[ivxfswum.]+\s+} 148 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg4ei32.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg4ei32.c index 788d4a8b7..838623505 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg4ei32.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg4ei32.c @@ -6,596 +6,596 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_tu(vfloat16mf4x4_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tu(vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_tu(vfloat16mf2x4_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tu(vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_tu(vfloat16m1x4_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tu(vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_tu(vfloat16m2x4_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tu(vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_tu(vfloat32mf2x4_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tu(vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_tu(vfloat32m1x4_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tu(vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_tu(vfloat32m2x4_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tu(vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_tu(vfloat64m1x4_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tu(vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_tu(vfloat64m2x4_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tu(vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_tu(vint8mf8x4_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tu(vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_tu(vint8mf4x4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tu(vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_tu(vint8mf2x4_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tu(vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_tu(vint8m1x4_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tu(vd, rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_tu(vint8m2x4_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tu(vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_tu(vint16mf4x4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tu(vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_tu(vint16mf2x4_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tu(vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_tu(vint16m1x4_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tu(vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_tu(vint16m2x4_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tu(vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_tu(vint32mf2x4_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tu(vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_tu(vint32m1x4_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tu(vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_tu(vint32m2x4_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tu(vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_tu(vint64m1x4_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tu(vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_tu(vint64m2x4_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tu(vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_tu(vuint8mf8x4_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tu(vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_tu(vuint8mf4x4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tu(vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_tu(vuint8mf2x4_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tu(vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_tu(vuint8m1x4_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tu(vd, rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_tu(vuint8m2x4_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tu(vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_tu(vuint16mf4x4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tu(vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_tu(vuint16mf2x4_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tu(vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_tu(vuint16m1x4_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tu(vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_tu(vuint16m2x4_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tu(vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_tu(vuint32mf2x4_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tu(vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_tu(vuint32m1x4_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tu(vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_tu(vuint32m2x4_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tu(vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_tu(vuint64m1x4_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tu(vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_tu(vuint64m2x4_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tu(vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_tum(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_tum(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_tum(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_tum(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_tum(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_tum(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_tum(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_tum(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_tum(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_tum(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_tum(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_tum(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_tum(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_tum(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_tum(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_tum(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_tum(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_tum(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_tum(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_tum(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_tum(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_tum(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_tum(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_tum(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_tum(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_tum(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_tum(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_tum(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_tum(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_tum(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_tum(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_tum(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_tum(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_tum(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_tum(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_tum(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_tum(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_tumu(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_tumu(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_tumu(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_tumu(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_tumu(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_tumu(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_tumu(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_tumu(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_tumu(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_tumu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_tumu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_tumu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_tumu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_tumu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_tumu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_tumu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_tumu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_tumu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_tumu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_tumu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_tumu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_tumu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_tumu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_tumu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_tumu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_tumu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_tumu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_tumu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_tumu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_tumu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_tumu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_tumu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_tumu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_tumu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_tumu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_tumu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_tumu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_mu(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_mu(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_mu(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_mu(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_mu(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_mu(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_mu(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_mu(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_mu(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_mu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_mu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_mu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_mu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_mu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_mu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_mu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_mu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_mu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_mu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_mu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_mu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_mu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_mu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_mu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_mu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_mu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_mu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_mu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_mu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_mu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_mu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_mu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_mu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_mu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_mu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_mu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_mu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei32_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg4ei32\.[ivxfswum.]+\s+} 148 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg4ei64.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg4ei64.c index 7dbc6f52a..7ab325dfd 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg4ei64.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg4ei64.c @@ -6,564 +6,564 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_tu(vfloat16mf4x4_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tu(vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_tu(vfloat16mf2x4_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tu(vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_tu(vfloat16m1x4_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tu(vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_tu(vfloat16m2x4_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tu(vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_tu(vfloat32mf2x4_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tu(vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_tu(vfloat32m1x4_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tu(vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_tu(vfloat32m2x4_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tu(vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_tu(vfloat64m1x4_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tu(vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_tu(vfloat64m2x4_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tu(vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_tu(vint8mf8x4_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tu(vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_tu(vint8mf4x4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tu(vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_tu(vint8mf2x4_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tu(vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_tu(vint8m1x4_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tu(vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_tu(vint16mf4x4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tu(vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_tu(vint16mf2x4_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tu(vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_tu(vint16m1x4_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tu(vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_tu(vint16m2x4_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tu(vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_tu(vint32mf2x4_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tu(vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_tu(vint32m1x4_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tu(vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_tu(vint32m2x4_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tu(vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_tu(vint64m1x4_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tu(vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_tu(vint64m2x4_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tu(vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_tu(vuint8mf8x4_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tu(vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_tu(vuint8mf4x4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tu(vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_tu(vuint8mf2x4_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tu(vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_tu(vuint8m1x4_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tu(vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_tu(vuint16mf4x4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tu(vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_tu(vuint16mf2x4_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tu(vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_tu(vuint16m1x4_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tu(vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_tu(vuint16m2x4_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tu(vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_tu(vuint32mf2x4_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tu(vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_tu(vuint32m1x4_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tu(vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_tu(vuint32m2x4_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tu(vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_tu(vuint64m1x4_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tu(vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_tu(vuint64m2x4_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tu(vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_tum(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_tum(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_tum(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_tum(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_tum(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_tum(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_tum(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_tum(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_tum(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_tum(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_tum(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_tum(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_tum(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_tum(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_tum(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_tum(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_tum(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_tum(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_tum(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_tum(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_tum(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_tum(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_tum(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_tum(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_tum(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_tum(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_tum(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_tum(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_tum(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_tum(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_tum(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_tum(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_tum(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_tum(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_tum(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_tumu(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_tumu(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_tumu(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_tumu(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_tumu(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_tumu(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_tumu(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_tumu(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_tumu(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_tumu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_tumu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_tumu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_tumu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_tumu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_tumu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_tumu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_tumu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_tumu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_tumu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_tumu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_tumu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_tumu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_tumu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_tumu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_tumu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_tumu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_tumu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_tumu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_tumu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_tumu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_tumu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_tumu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_tumu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_tumu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_tumu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_mu(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_mu(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_mu(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_mu(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_mu(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_mu(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_mu(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_mu(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_mu(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_mu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_mu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_mu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_mu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_mu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_mu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_mu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_mu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_mu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_mu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_mu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_mu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_mu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_mu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_mu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_mu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_mu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_mu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_mu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_mu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_mu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_mu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_mu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_mu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_mu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_mu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei64_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg4ei64\.[ivxfswum.]+\s+} 140 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg4ei8.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg4ei8.c index 49950ced5..0b5ecb408 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg4ei8.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg4ei8.c @@ -6,596 +6,596 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_tu(vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_tu(vfloat16mf4x4_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tu(vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_tu(vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_tu(vfloat16mf2x4_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tu(vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_tu(vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_tu(vfloat16m1x4_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tu(vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_tu(vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_tu(vfloat16m2x4_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tu(vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_tu(vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_tu(vfloat32mf2x4_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tu(vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_tu(vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_tu(vfloat32m1x4_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tu(vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_tu(vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_tu(vfloat32m2x4_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tu(vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_tu(vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_tu(vfloat64m1x4_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tu(vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_tu(vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_tu(vfloat64m2x4_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tu(vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_tu(vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_tu(vint8mf8x4_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tu(vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_tu(vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_tu(vint8mf4x4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tu(vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_tu(vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_tu(vint8mf2x4_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tu(vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_tu(vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_tu(vint8m1x4_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tu(vd, rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_tu(vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_tu(vint8m2x4_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tu(vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_tu(vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_tu(vint16mf4x4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tu(vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_tu(vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_tu(vint16mf2x4_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tu(vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_tu(vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_tu(vint16m1x4_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tu(vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_tu(vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_tu(vint16m2x4_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tu(vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_tu(vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_tu(vint32mf2x4_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tu(vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_tu(vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_tu(vint32m1x4_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tu(vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_tu(vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_tu(vint32m2x4_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tu(vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_tu(vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_tu(vint64m1x4_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tu(vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_tu(vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_tu(vint64m2x4_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tu(vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_tu(vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_tu(vuint8mf8x4_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tu(vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_tu(vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_tu(vuint8mf4x4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tu(vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_tu(vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_tu(vuint8mf2x4_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tu(vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_tu(vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_tu(vuint8m1x4_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tu(vd, rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_tu(vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_tu(vuint8m2x4_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tu(vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_tu(vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_tu(vuint16mf4x4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tu(vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_tu(vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_tu(vuint16mf2x4_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tu(vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_tu(vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_tu(vuint16m1x4_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tu(vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_tu(vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_tu(vuint16m2x4_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tu(vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_tu(vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_tu(vuint32mf2x4_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tu(vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_tu(vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_tu(vuint32m1x4_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tu(vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_tu(vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_tu(vuint32m2x4_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tu(vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_tu(vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_tu(vuint64m1x4_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tu(vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_tu(vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_tu(vuint64m2x4_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tu(vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_tum(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_tum(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_tum(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_tum(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_tum(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_tum(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_tum(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_tum(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_tum(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_tum(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_tum(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_tum(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_tum(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_tum(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_tum(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_tum(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_tum(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_tum(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_tum(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_tum(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_tum(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_tum(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_tum(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_tum(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_tum(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_tum(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_tum(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_tum(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_tum(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_tum(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_tum(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_tum(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_tum(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_tum(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_tum(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_tum(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_tum(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_tum(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_tum(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_tum(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_tum(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_tum(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_tum(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_tum(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_tum(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_tum(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_tum(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_tum(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_tum(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_tum(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_tum(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_tum(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_tum(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_tum(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_tum(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_tum(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_tum(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_tum(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_tum(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_tum(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_tum(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_tum(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_tum(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_tum(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_tum(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_tum(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_tum(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_tum(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_tum(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_tum(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_tum(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_tum(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_tum(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_tum(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_tumu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_tumu(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_tumu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_tumu(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_tumu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_tumu(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_tumu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_tumu(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_tumu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_tumu(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_tumu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_tumu(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_tumu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_tumu(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_tumu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_tumu(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_tumu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_tumu(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_tumu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_tumu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_tumu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_tumu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_tumu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_tumu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_tumu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_tumu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_tumu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_tumu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_tumu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_tumu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_tumu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_tumu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_tumu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_tumu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_tumu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_tumu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_tumu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_tumu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_tumu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_tumu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_tumu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_tumu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_tumu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_tumu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_tumu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_tumu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_tumu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_tumu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_tumu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_tumu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_tumu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_tumu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_tumu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_tumu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_tumu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_tumu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_tumu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_tumu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_tumu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_tumu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_tumu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_tumu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_tumu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_tumu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_tumu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_tumu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_tumu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_tumu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_tumu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_tumu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_tumu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_tumu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_tumu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_tumu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_mu(vbool64_t mask, vfloat16mf4x4_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_mu(vbool64_t vm, vfloat16mf4x4_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_mu(vbool32_t mask, vfloat16mf2x4_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_mu(vbool32_t vm, vfloat16mf2x4_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_mu(vbool16_t mask, vfloat16m1x4_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_mu(vbool16_t vm, vfloat16m1x4_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_mu(vbool8_t mask, vfloat16m2x4_t maskedoff_tuple, const float16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_mu(vbool8_t vm, vfloat16m2x4_t vd, const float16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_mu(vbool64_t mask, vfloat32mf2x4_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_mu(vbool64_t vm, vfloat32mf2x4_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_mu(vbool32_t mask, vfloat32m1x4_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_mu(vbool32_t vm, vfloat32m1x4_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_mu(vbool16_t mask, vfloat32m2x4_t maskedoff_tuple, const float32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_mu(vbool16_t vm, vfloat32m2x4_t vd, const float32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_mu(vbool64_t mask, vfloat64m1x4_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_mu(vbool64_t vm, vfloat64m1x4_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_mu(vbool32_t mask, vfloat64m2x4_t maskedoff_tuple, const float64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_mu(vbool32_t vm, vfloat64m2x4_t vd, const float64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_mu(vbool64_t mask, vint8mf8x4_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_mu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_mu(vbool32_t mask, vint8mf4x4_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_mu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_mu(vbool16_t mask, vint8mf2x4_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_mu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_mu(vbool8_t mask, vint8m1x4_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_mu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_mu(vbool4_t mask, vint8m2x4_t maskedoff_tuple, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_mu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_mu(vbool64_t mask, vint16mf4x4_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_mu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_mu(vbool32_t mask, vint16mf2x4_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_mu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_mu(vbool16_t mask, vint16m1x4_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_mu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_mu(vbool8_t mask, vint16m2x4_t maskedoff_tuple, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_mu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_mu(vbool64_t mask, vint32mf2x4_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_mu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_mu(vbool32_t mask, vint32m1x4_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_mu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_mu(vbool16_t mask, vint32m2x4_t maskedoff_tuple, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_mu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_mu(vbool64_t mask, vint64m1x4_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_mu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_mu(vbool32_t mask, vint64m2x4_t maskedoff_tuple, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_mu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_mu(vbool64_t mask, vuint8mf8x4_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_mu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_mu(vbool32_t mask, vuint8mf4x4_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_mu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_mu(vbool16_t mask, vuint8mf2x4_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_mu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_mu(vbool8_t mask, vuint8m1x4_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_mu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_mu(vbool4_t mask, vuint8m2x4_t maskedoff_tuple, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_mu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_mu(vbool64_t mask, vuint16mf4x4_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_mu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_mu(vbool32_t mask, vuint16mf2x4_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_mu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_mu(vbool16_t mask, vuint16m1x4_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_mu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_mu(vbool8_t mask, vuint16m2x4_t maskedoff_tuple, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_mu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_mu(vbool64_t mask, vuint32mf2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_mu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_mu(vbool32_t mask, vuint32m1x4_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_mu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_mu(vbool16_t mask, vuint32m2x4_t maskedoff_tuple, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_mu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_mu(vbool64_t mask, vuint64m1x4_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_mu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_mu(vbool32_t mask, vuint64m2x4_t maskedoff_tuple, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg4ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_mu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg4ei8\.[ivxfswum.]+\s+} 148 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg5ei16.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg5ei16.c index 00f13bce0..ad915124c 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg5ei16.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg5ei16.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_tu(vfloat16mf4x5_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tu(vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_tu(vfloat16mf2x5_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tu(vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_tu(vfloat16m1x5_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tu(vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_tu(vfloat32mf2x5_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tu(vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_tu(vfloat32m1x5_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tu(vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_tu(vfloat64m1x5_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tu(vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_tu(vint8mf8x5_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tu(vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_tu(vint8mf4x5_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tu(vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_tu(vint8mf2x5_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tu(vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_tu(vint8m1x5_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tu(vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_tu(vint16mf4x5_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tu(vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_tu(vint16mf2x5_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tu(vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_tu(vint16m1x5_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tu(vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_tu(vint32mf2x5_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tu(vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_tu(vint32m1x5_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tu(vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_tu(vint64m1x5_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tu(vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_tu(vuint8mf8x5_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tu(vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_tu(vuint8mf4x5_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tu(vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_tu(vuint8mf2x5_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tu(vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_tu(vuint8m1x5_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tu(vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_tu(vuint16mf4x5_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tu(vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_tu(vuint16mf2x5_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tu(vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_tu(vuint16m1x5_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tu(vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_tu(vuint32mf2x5_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tu(vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_tu(vuint32m1x5_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tu(vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_tu(vuint64m1x5_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tu(vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_tum(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_tum(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_tum(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_tum(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_tum(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_tum(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_tum(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_tum(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_tum(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_tum(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_tum(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_tum(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_tum(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_tum(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_tum(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_tum(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_tum(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_tum(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_tum(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_tum(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_tum(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_tum(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_tum(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_tum(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_tum(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_tum(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_tumu(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_tumu(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_tumu(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_tumu(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_tumu(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_tumu(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_tumu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_tumu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_tumu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_tumu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_tumu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_tumu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_tumu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_tumu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_tumu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_tumu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_tumu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_tumu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_tumu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_tumu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_tumu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_tumu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_tumu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_tumu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_tumu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_tumu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_mu(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_mu(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_mu(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_mu(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_mu(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_mu(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_mu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_mu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_mu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_mu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_mu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_mu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_mu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_mu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_mu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_mu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_mu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_mu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_mu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_mu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_mu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_mu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_mu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_mu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_mu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_mu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg5ei16\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg5ei32.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg5ei32.c index 165e7503d..736462c0b 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg5ei32.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg5ei32.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_tu(vfloat16mf4x5_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tu(vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_tu(vfloat16mf2x5_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tu(vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_tu(vfloat16m1x5_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tu(vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_tu(vfloat32mf2x5_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tu(vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_tu(vfloat32m1x5_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tu(vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_tu(vfloat64m1x5_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tu(vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_tu(vint8mf8x5_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tu(vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_tu(vint8mf4x5_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tu(vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_tu(vint8mf2x5_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tu(vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_tu(vint8m1x5_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tu(vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_tu(vint16mf4x5_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tu(vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_tu(vint16mf2x5_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tu(vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_tu(vint16m1x5_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tu(vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_tu(vint32mf2x5_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tu(vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_tu(vint32m1x5_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tu(vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_tu(vint64m1x5_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tu(vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_tu(vuint8mf8x5_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tu(vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_tu(vuint8mf4x5_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tu(vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_tu(vuint8mf2x5_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tu(vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_tu(vuint8m1x5_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tu(vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_tu(vuint16mf4x5_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tu(vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_tu(vuint16mf2x5_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tu(vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_tu(vuint16m1x5_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tu(vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_tu(vuint32mf2x5_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tu(vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_tu(vuint32m1x5_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tu(vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_tu(vuint64m1x5_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tu(vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_tum(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_tum(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_tum(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_tum(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_tum(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_tum(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_tum(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_tum(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_tum(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_tum(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_tum(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_tum(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_tum(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_tum(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_tum(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_tum(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_tum(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_tum(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_tum(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_tum(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_tum(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_tum(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_tum(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_tum(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_tum(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_tum(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_tumu(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_tumu(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_tumu(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_tumu(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_tumu(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_tumu(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_tumu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_tumu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_tumu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_tumu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_tumu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_tumu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_tumu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_tumu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_tumu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_tumu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_tumu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_tumu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_tumu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_tumu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_tumu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_tumu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_tumu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_tumu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_tumu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_tumu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_mu(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_mu(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_mu(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_mu(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_mu(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_mu(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_mu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_mu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_mu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_mu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_mu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_mu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_mu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_mu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_mu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_mu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_mu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_mu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_mu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_mu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_mu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_mu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_mu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_mu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_mu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_mu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei32_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg5ei32\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg5ei64.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg5ei64.c index 5671de03c..71abbd21a 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg5ei64.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg5ei64.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_tu(vfloat16mf4x5_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tu(vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_tu(vfloat16mf2x5_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tu(vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_tu(vfloat16m1x5_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tu(vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_tu(vfloat32mf2x5_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tu(vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_tu(vfloat32m1x5_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tu(vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_tu(vfloat64m1x5_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tu(vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_tu(vint8mf8x5_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tu(vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_tu(vint8mf4x5_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tu(vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_tu(vint8mf2x5_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tu(vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_tu(vint8m1x5_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tu(vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_tu(vint16mf4x5_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tu(vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_tu(vint16mf2x5_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tu(vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_tu(vint16m1x5_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tu(vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_tu(vint32mf2x5_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tu(vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_tu(vint32m1x5_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tu(vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_tu(vint64m1x5_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tu(vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_tu(vuint8mf8x5_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tu(vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_tu(vuint8mf4x5_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tu(vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_tu(vuint8mf2x5_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tu(vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_tu(vuint8m1x5_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tu(vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_tu(vuint16mf4x5_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tu(vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_tu(vuint16mf2x5_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tu(vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_tu(vuint16m1x5_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tu(vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_tu(vuint32mf2x5_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tu(vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_tu(vuint32m1x5_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tu(vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_tu(vuint64m1x5_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tu(vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_tum(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_tum(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_tum(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_tum(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_tum(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_tum(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_tum(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_tum(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_tum(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_tum(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_tum(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_tum(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_tum(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_tum(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_tum(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_tum(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_tum(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_tum(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_tum(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_tum(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_tum(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_tum(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_tum(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_tum(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_tum(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_tum(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_tumu(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_tumu(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_tumu(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_tumu(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_tumu(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_tumu(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_tumu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_tumu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_tumu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_tumu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_tumu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_tumu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_tumu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_tumu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_tumu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_tumu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_tumu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_tumu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_tumu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_tumu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_tumu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_tumu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_tumu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_tumu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_tumu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_tumu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_mu(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_mu(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_mu(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_mu(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_mu(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_mu(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_mu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_mu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_mu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_mu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_mu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_mu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_mu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_mu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_mu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_mu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_mu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_mu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_mu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_mu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_mu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_mu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_mu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_mu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_mu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_mu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei64_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg5ei64\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg5ei8.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg5ei8.c index e3a816b64..1f74627fd 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg5ei8.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg5ei8.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_tu(vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_tu(vfloat16mf4x5_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tu(vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_tu(vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_tu(vfloat16mf2x5_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tu(vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_tu(vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_tu(vfloat16m1x5_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tu(vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_tu(vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_tu(vfloat32mf2x5_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tu(vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_tu(vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_tu(vfloat32m1x5_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tu(vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_tu(vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_tu(vfloat64m1x5_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tu(vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_tu(vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_tu(vint8mf8x5_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tu(vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_tu(vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_tu(vint8mf4x5_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tu(vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_tu(vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_tu(vint8mf2x5_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tu(vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_tu(vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_tu(vint8m1x5_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tu(vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_tu(vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_tu(vint16mf4x5_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tu(vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_tu(vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_tu(vint16mf2x5_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tu(vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_tu(vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_tu(vint16m1x5_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tu(vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_tu(vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_tu(vint32mf2x5_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tu(vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_tu(vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_tu(vint32m1x5_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tu(vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_tu(vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_tu(vint64m1x5_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tu(vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_tu(vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_tu(vuint8mf8x5_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tu(vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_tu(vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_tu(vuint8mf4x5_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tu(vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_tu(vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_tu(vuint8mf2x5_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tu(vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_tu(vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_tu(vuint8m1x5_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tu(vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_tu(vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_tu(vuint16mf4x5_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tu(vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_tu(vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_tu(vuint16mf2x5_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tu(vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_tu(vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_tu(vuint16m1x5_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tu(vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_tu(vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_tu(vuint32mf2x5_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tu(vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_tu(vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_tu(vuint32m1x5_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tu(vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_tu(vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_tu(vuint64m1x5_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tu(vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_tum(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_tum(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_tum(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_tum(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_tum(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_tum(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_tum(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_tum(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_tum(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_tum(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_tum(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_tum(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_tum(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_tum(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_tum(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_tum(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_tum(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_tum(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_tum(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_tum(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_tum(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_tum(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_tum(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_tum(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_tum(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_tum(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_tum(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_tum(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_tum(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_tum(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_tum(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_tum(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_tum(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_tum(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_tum(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_tum(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_tum(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_tum(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_tum(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_tum(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_tum(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_tum(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_tum(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_tum(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_tum(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_tum(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_tum(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_tum(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_tum(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_tum(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_tum(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_tum(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_tumu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_tumu(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_tumu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_tumu(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_tumu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_tumu(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_tumu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_tumu(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_tumu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_tumu(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_tumu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_tumu(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_tumu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_tumu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_tumu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_tumu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_tumu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_tumu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_tumu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_tumu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_tumu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_tumu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_tumu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_tumu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_tumu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_tumu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_tumu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_tumu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_tumu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_tumu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_tumu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_tumu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_tumu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_tumu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_tumu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_tumu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_tumu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_tumu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_tumu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_tumu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_tumu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_tumu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_tumu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_tumu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_tumu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_tumu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_tumu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_tumu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_tumu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_tumu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_tumu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_tumu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_mu(vbool64_t mask, vfloat16mf4x5_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_mu(vbool64_t vm, vfloat16mf4x5_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_mu(vbool32_t mask, vfloat16mf2x5_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_mu(vbool32_t vm, vfloat16mf2x5_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_mu(vbool16_t mask, vfloat16m1x5_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_mu(vbool16_t vm, vfloat16m1x5_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_mu(vbool64_t mask, vfloat32mf2x5_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_mu(vbool64_t vm, vfloat32mf2x5_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_mu(vbool32_t mask, vfloat32m1x5_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_mu(vbool32_t vm, vfloat32m1x5_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_mu(vbool64_t mask, vfloat64m1x5_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_mu(vbool64_t vm, vfloat64m1x5_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_mu(vbool64_t mask, vint8mf8x5_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_mu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_mu(vbool32_t mask, vint8mf4x5_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_mu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_mu(vbool16_t mask, vint8mf2x5_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_mu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_mu(vbool8_t mask, vint8m1x5_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_mu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_mu(vbool64_t mask, vint16mf4x5_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_mu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_mu(vbool32_t mask, vint16mf2x5_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_mu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_mu(vbool16_t mask, vint16m1x5_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_mu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_mu(vbool64_t mask, vint32mf2x5_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_mu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_mu(vbool32_t mask, vint32m1x5_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_mu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_mu(vbool64_t mask, vint64m1x5_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_mu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_mu(vbool64_t mask, vuint8mf8x5_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_mu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_mu(vbool32_t mask, vuint8mf4x5_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_mu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_mu(vbool16_t mask, vuint8mf2x5_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_mu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_mu(vbool8_t mask, vuint8m1x5_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_mu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_mu(vbool64_t mask, vuint16mf4x5_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_mu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_mu(vbool32_t mask, vuint16mf2x5_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_mu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_mu(vbool16_t mask, vuint16m1x5_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_mu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_mu(vbool64_t mask, vuint32mf2x5_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_mu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_mu(vbool32_t mask, vuint32m1x5_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_mu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_mu(vbool64_t mask, vuint64m1x5_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg5ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_mu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg5ei8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg5ei8\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg6ei16.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg6ei16.c index a0e12546f..9b254ae79 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg6ei16.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg6ei16.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_tu(vfloat16mf4x6_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tu(vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_tu(vfloat16mf2x6_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tu(vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_tu(vfloat16m1x6_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tu(vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_tu(vfloat32mf2x6_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tu(vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_tu(vfloat32m1x6_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tu(vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_tu(vfloat64m1x6_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tu(vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_tu(vint8mf8x6_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tu(vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_tu(vint8mf4x6_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tu(vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_tu(vint8mf2x6_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tu(vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_tu(vint8m1x6_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tu(vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_tu(vint16mf4x6_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tu(vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_tu(vint16mf2x6_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tu(vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_tu(vint16m1x6_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tu(vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_tu(vint32mf2x6_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tu(vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_tu(vint32m1x6_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tu(vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_tu(vint64m1x6_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tu(vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_tu(vuint8mf8x6_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tu(vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_tu(vuint8mf4x6_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tu(vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_tu(vuint8mf2x6_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tu(vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_tu(vuint8m1x6_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tu(vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_tu(vuint16mf4x6_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tu(vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_tu(vuint16mf2x6_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tu(vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_tu(vuint16m1x6_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tu(vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_tu(vuint32mf2x6_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tu(vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_tu(vuint32m1x6_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tu(vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_tu(vuint64m1x6_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tu(vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_tum(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_tum(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_tum(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_tum(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_tum(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_tum(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_tum(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_tum(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_tum(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_tum(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_tum(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_tum(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_tum(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_tum(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_tum(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_tum(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_tum(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_tum(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_tum(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_tum(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_tum(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_tum(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_tum(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_tum(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_tum(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_tum(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_tumu(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_tumu(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_tumu(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_tumu(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_tumu(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_tumu(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_tumu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_tumu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_tumu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_tumu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_tumu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_tumu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_tumu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_tumu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_tumu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_tumu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_tumu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_tumu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_tumu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_tumu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_tumu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_tumu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_tumu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_tumu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_tumu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_tumu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_mu(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_mu(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_mu(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_mu(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_mu(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_mu(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_mu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_mu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_mu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_mu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_mu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_mu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_mu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_mu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_mu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_mu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_mu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_mu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_mu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_mu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_mu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_mu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_mu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_mu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_mu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_mu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg6ei16\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg6ei32.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg6ei32.c index eaa136ad1..a97bbaf9f 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg6ei32.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg6ei32.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_tu(vfloat16mf4x6_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tu(vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_tu(vfloat16mf2x6_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tu(vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_tu(vfloat16m1x6_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tu(vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_tu(vfloat32mf2x6_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tu(vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_tu(vfloat32m1x6_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tu(vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_tu(vfloat64m1x6_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tu(vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_tu(vint8mf8x6_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tu(vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_tu(vint8mf4x6_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tu(vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_tu(vint8mf2x6_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tu(vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_tu(vint8m1x6_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tu(vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_tu(vint16mf4x6_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tu(vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_tu(vint16mf2x6_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tu(vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_tu(vint16m1x6_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tu(vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_tu(vint32mf2x6_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tu(vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_tu(vint32m1x6_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tu(vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_tu(vint64m1x6_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tu(vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_tu(vuint8mf8x6_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tu(vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_tu(vuint8mf4x6_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tu(vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_tu(vuint8mf2x6_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tu(vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_tu(vuint8m1x6_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tu(vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_tu(vuint16mf4x6_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tu(vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_tu(vuint16mf2x6_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tu(vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_tu(vuint16m1x6_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tu(vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_tu(vuint32mf2x6_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tu(vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_tu(vuint32m1x6_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tu(vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_tu(vuint64m1x6_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tu(vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_tum(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_tum(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_tum(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_tum(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_tum(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_tum(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_tum(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_tum(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_tum(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_tum(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_tum(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_tum(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_tum(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_tum(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_tum(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_tum(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_tum(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_tum(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_tum(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_tum(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_tum(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_tum(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_tum(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_tum(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_tum(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_tum(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_tumu(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_tumu(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_tumu(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_tumu(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_tumu(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_tumu(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_tumu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_tumu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_tumu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_tumu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_tumu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_tumu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_tumu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_tumu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_tumu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_tumu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_tumu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_tumu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_tumu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_tumu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_tumu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_tumu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_tumu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_tumu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_tumu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_tumu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_mu(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_mu(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_mu(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_mu(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_mu(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_mu(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_mu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_mu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_mu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_mu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_mu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_mu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_mu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_mu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_mu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_mu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_mu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_mu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_mu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_mu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_mu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_mu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_mu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_mu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_mu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_mu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei32_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg6ei32\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg6ei64.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg6ei64.c index 37dd8be27..df07a1822 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg6ei64.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg6ei64.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_tu(vfloat16mf4x6_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tu(vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_tu(vfloat16mf2x6_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tu(vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_tu(vfloat16m1x6_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tu(vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_tu(vfloat32mf2x6_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tu(vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_tu(vfloat32m1x6_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tu(vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_tu(vfloat64m1x6_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tu(vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_tu(vint8mf8x6_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tu(vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_tu(vint8mf4x6_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tu(vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_tu(vint8mf2x6_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tu(vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_tu(vint8m1x6_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tu(vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_tu(vint16mf4x6_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tu(vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_tu(vint16mf2x6_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tu(vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_tu(vint16m1x6_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tu(vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_tu(vint32mf2x6_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tu(vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_tu(vint32m1x6_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tu(vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_tu(vint64m1x6_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tu(vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_tu(vuint8mf8x6_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tu(vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_tu(vuint8mf4x6_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tu(vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_tu(vuint8mf2x6_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tu(vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_tu(vuint8m1x6_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tu(vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_tu(vuint16mf4x6_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tu(vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_tu(vuint16mf2x6_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tu(vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_tu(vuint16m1x6_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tu(vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_tu(vuint32mf2x6_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tu(vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_tu(vuint32m1x6_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tu(vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_tu(vuint64m1x6_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tu(vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_tum(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_tum(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_tum(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_tum(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_tum(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_tum(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_tum(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_tum(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_tum(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_tum(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_tum(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_tum(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_tum(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_tum(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_tum(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_tum(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_tum(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_tum(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_tum(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_tum(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_tum(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_tum(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_tum(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_tum(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_tum(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_tum(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_tumu(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_tumu(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_tumu(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_tumu(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_tumu(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_tumu(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_tumu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_tumu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_tumu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_tumu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_tumu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_tumu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_tumu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_tumu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_tumu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_tumu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_tumu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_tumu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_tumu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_tumu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_tumu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_tumu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_tumu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_tumu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_tumu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_tumu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_mu(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_mu(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_mu(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_mu(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_mu(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_mu(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_mu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_mu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_mu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_mu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_mu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_mu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_mu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_mu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_mu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_mu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_mu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_mu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_mu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_mu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_mu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_mu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_mu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_mu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_mu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_mu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei64_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg6ei64\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg6ei8.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg6ei8.c index 2cd831f60..9bc2f7694 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg6ei8.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg6ei8.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_tu(vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_tu(vfloat16mf4x6_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tu(vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_tu(vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_tu(vfloat16mf2x6_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tu(vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_tu(vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_tu(vfloat16m1x6_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tu(vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_tu(vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_tu(vfloat32mf2x6_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tu(vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_tu(vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_tu(vfloat32m1x6_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tu(vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_tu(vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_tu(vfloat64m1x6_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tu(vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_tu(vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_tu(vint8mf8x6_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tu(vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_tu(vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_tu(vint8mf4x6_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tu(vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_tu(vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_tu(vint8mf2x6_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tu(vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_tu(vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_tu(vint8m1x6_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tu(vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_tu(vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_tu(vint16mf4x6_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tu(vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_tu(vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_tu(vint16mf2x6_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tu(vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_tu(vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_tu(vint16m1x6_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tu(vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_tu(vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_tu(vint32mf2x6_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tu(vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_tu(vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_tu(vint32m1x6_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tu(vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_tu(vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_tu(vint64m1x6_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tu(vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_tu(vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_tu(vuint8mf8x6_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tu(vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_tu(vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_tu(vuint8mf4x6_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tu(vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_tu(vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_tu(vuint8mf2x6_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tu(vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_tu(vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_tu(vuint8m1x6_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tu(vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_tu(vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_tu(vuint16mf4x6_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tu(vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_tu(vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_tu(vuint16mf2x6_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tu(vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_tu(vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_tu(vuint16m1x6_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tu(vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_tu(vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_tu(vuint32mf2x6_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tu(vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_tu(vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_tu(vuint32m1x6_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tu(vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_tu(vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_tu(vuint64m1x6_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tu(vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_tum(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_tum(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_tum(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_tum(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_tum(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_tum(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_tum(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_tum(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_tum(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_tum(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_tum(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_tum(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_tum(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_tum(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_tum(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_tum(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_tum(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_tum(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_tum(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_tum(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_tum(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_tum(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_tum(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_tum(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_tum(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_tum(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_tum(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_tum(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_tum(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_tum(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_tum(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_tum(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_tum(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_tum(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_tum(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_tum(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_tum(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_tum(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_tum(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_tum(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_tum(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_tum(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_tum(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_tum(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_tum(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_tum(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_tum(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_tum(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_tum(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_tum(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_tum(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_tum(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_tumu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_tumu(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_tumu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_tumu(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_tumu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_tumu(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_tumu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_tumu(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_tumu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_tumu(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_tumu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_tumu(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_tumu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_tumu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_tumu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_tumu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_tumu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_tumu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_tumu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_tumu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_tumu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_tumu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_tumu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_tumu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_tumu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_tumu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_tumu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_tumu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_tumu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_tumu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_tumu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_tumu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_tumu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_tumu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_tumu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_tumu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_tumu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_tumu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_tumu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_tumu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_tumu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_tumu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_tumu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_tumu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_tumu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_tumu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_tumu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_tumu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_tumu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_tumu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_tumu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_tumu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_mu(vbool64_t mask, vfloat16mf4x6_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_mu(vbool64_t vm, vfloat16mf4x6_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_mu(vbool32_t mask, vfloat16mf2x6_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_mu(vbool32_t vm, vfloat16mf2x6_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_mu(vbool16_t mask, vfloat16m1x6_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_mu(vbool16_t vm, vfloat16m1x6_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_mu(vbool64_t mask, vfloat32mf2x6_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_mu(vbool64_t vm, vfloat32mf2x6_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_mu(vbool32_t mask, vfloat32m1x6_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_mu(vbool32_t vm, vfloat32m1x6_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_mu(vbool64_t mask, vfloat64m1x6_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_mu(vbool64_t vm, vfloat64m1x6_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_mu(vbool64_t mask, vint8mf8x6_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_mu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_mu(vbool32_t mask, vint8mf4x6_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_mu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_mu(vbool16_t mask, vint8mf2x6_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_mu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_mu(vbool8_t mask, vint8m1x6_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_mu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_mu(vbool64_t mask, vint16mf4x6_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_mu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_mu(vbool32_t mask, vint16mf2x6_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_mu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_mu(vbool16_t mask, vint16m1x6_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_mu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_mu(vbool64_t mask, vint32mf2x6_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_mu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_mu(vbool32_t mask, vint32m1x6_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_mu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_mu(vbool64_t mask, vint64m1x6_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_mu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_mu(vbool64_t mask, vuint8mf8x6_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_mu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_mu(vbool32_t mask, vuint8mf4x6_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_mu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_mu(vbool16_t mask, vuint8mf2x6_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_mu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_mu(vbool8_t mask, vuint8m1x6_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_mu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_mu(vbool64_t mask, vuint16mf4x6_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_mu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_mu(vbool32_t mask, vuint16mf2x6_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_mu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_mu(vbool16_t mask, vuint16m1x6_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_mu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_mu(vbool64_t mask, vuint32mf2x6_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_mu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_mu(vbool32_t mask, vuint32m1x6_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_mu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_mu(vbool64_t mask, vuint64m1x6_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg6ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_mu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg6ei8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg6ei8\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg7ei16.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg7ei16.c index 5fe74d1d6..ce4885299 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg7ei16.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg7ei16.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_tu(vfloat16mf4x7_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tu(vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_tu(vfloat16mf2x7_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tu(vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_tu(vfloat16m1x7_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tu(vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_tu(vfloat32mf2x7_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tu(vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_tu(vfloat32m1x7_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tu(vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_tu(vfloat64m1x7_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tu(vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_tu(vint8mf8x7_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tu(vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_tu(vint8mf4x7_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tu(vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_tu(vint8mf2x7_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tu(vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_tu(vint8m1x7_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tu(vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_tu(vint16mf4x7_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tu(vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_tu(vint16mf2x7_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tu(vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_tu(vint16m1x7_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tu(vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_tu(vint32mf2x7_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tu(vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_tu(vint32m1x7_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tu(vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_tu(vint64m1x7_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tu(vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_tu(vuint8mf8x7_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tu(vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_tu(vuint8mf4x7_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tu(vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_tu(vuint8mf2x7_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tu(vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_tu(vuint8m1x7_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tu(vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_tu(vuint16mf4x7_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tu(vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_tu(vuint16mf2x7_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tu(vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_tu(vuint16m1x7_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tu(vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_tu(vuint32mf2x7_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tu(vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_tu(vuint32m1x7_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tu(vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_tu(vuint64m1x7_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tu(vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_tum(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_tum(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_tum(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_tum(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_tum(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_tum(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_tum(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_tum(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_tum(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_tum(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_tum(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_tum(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_tum(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_tum(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_tum(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_tum(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_tum(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_tum(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_tum(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_tum(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_tum(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_tum(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_tum(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_tum(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_tum(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_tum(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_tumu(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_tumu(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_tumu(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_tumu(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_tumu(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_tumu(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_tumu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_tumu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_tumu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_tumu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_tumu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_tumu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_tumu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_tumu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_tumu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_tumu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_tumu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_tumu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_tumu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_tumu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_tumu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_tumu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_tumu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_tumu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_tumu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_tumu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_mu(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_mu(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_mu(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_mu(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_mu(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_mu(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_mu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_mu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_mu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_mu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_mu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_mu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_mu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_mu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_mu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_mu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_mu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_mu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_mu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_mu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_mu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_mu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_mu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_mu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_mu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_mu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg7ei16\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg7ei32.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg7ei32.c index d00742a7d..93008c9fc 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg7ei32.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg7ei32.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_tu(vfloat16mf4x7_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tu(vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_tu(vfloat16mf2x7_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tu(vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_tu(vfloat16m1x7_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tu(vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_tu(vfloat32mf2x7_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tu(vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_tu(vfloat32m1x7_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tu(vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_tu(vfloat64m1x7_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tu(vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_tu(vint8mf8x7_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tu(vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_tu(vint8mf4x7_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tu(vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_tu(vint8mf2x7_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tu(vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_tu(vint8m1x7_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tu(vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_tu(vint16mf4x7_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tu(vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_tu(vint16mf2x7_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tu(vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_tu(vint16m1x7_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tu(vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_tu(vint32mf2x7_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tu(vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_tu(vint32m1x7_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tu(vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_tu(vint64m1x7_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tu(vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_tu(vuint8mf8x7_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tu(vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_tu(vuint8mf4x7_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tu(vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_tu(vuint8mf2x7_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tu(vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_tu(vuint8m1x7_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tu(vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_tu(vuint16mf4x7_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tu(vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_tu(vuint16mf2x7_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tu(vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_tu(vuint16m1x7_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tu(vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_tu(vuint32mf2x7_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tu(vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_tu(vuint32m1x7_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tu(vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_tu(vuint64m1x7_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tu(vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_tum(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_tum(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_tum(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_tum(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_tum(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_tum(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_tum(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_tum(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_tum(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_tum(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_tum(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_tum(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_tum(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_tum(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_tum(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_tum(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_tum(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_tum(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_tum(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_tum(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_tum(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_tum(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_tum(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_tum(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_tum(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_tum(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_tumu(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_tumu(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_tumu(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_tumu(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_tumu(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_tumu(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_tumu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_tumu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_tumu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_tumu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_tumu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_tumu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_tumu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_tumu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_tumu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_tumu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_tumu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_tumu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_tumu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_tumu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_tumu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_tumu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_tumu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_tumu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_tumu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_tumu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_mu(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_mu(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_mu(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_mu(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_mu(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_mu(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_mu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_mu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_mu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_mu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_mu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_mu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_mu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_mu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_mu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_mu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_mu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_mu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_mu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_mu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_mu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_mu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_mu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_mu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_mu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_mu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei32_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg7ei32\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg7ei64.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg7ei64.c index 3984e33b9..41995b8db 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg7ei64.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg7ei64.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_tu(vfloat16mf4x7_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tu(vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_tu(vfloat16mf2x7_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tu(vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_tu(vfloat16m1x7_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tu(vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_tu(vfloat32mf2x7_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tu(vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_tu(vfloat32m1x7_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tu(vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_tu(vfloat64m1x7_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tu(vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_tu(vint8mf8x7_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tu(vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_tu(vint8mf4x7_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tu(vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_tu(vint8mf2x7_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tu(vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_tu(vint8m1x7_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tu(vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_tu(vint16mf4x7_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tu(vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_tu(vint16mf2x7_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tu(vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_tu(vint16m1x7_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tu(vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_tu(vint32mf2x7_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tu(vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_tu(vint32m1x7_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tu(vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_tu(vint64m1x7_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tu(vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_tu(vuint8mf8x7_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tu(vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_tu(vuint8mf4x7_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tu(vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_tu(vuint8mf2x7_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tu(vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_tu(vuint8m1x7_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tu(vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_tu(vuint16mf4x7_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tu(vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_tu(vuint16mf2x7_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tu(vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_tu(vuint16m1x7_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tu(vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_tu(vuint32mf2x7_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tu(vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_tu(vuint32m1x7_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tu(vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_tu(vuint64m1x7_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tu(vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_tum(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_tum(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_tum(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_tum(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_tum(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_tum(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_tum(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_tum(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_tum(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_tum(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_tum(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_tum(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_tum(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_tum(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_tum(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_tum(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_tum(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_tum(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_tum(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_tum(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_tum(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_tum(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_tum(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_tum(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_tum(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_tum(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_tumu(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_tumu(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_tumu(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_tumu(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_tumu(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_tumu(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_tumu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_tumu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_tumu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_tumu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_tumu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_tumu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_tumu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_tumu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_tumu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_tumu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_tumu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_tumu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_tumu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_tumu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_tumu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_tumu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_tumu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_tumu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_tumu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_tumu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_mu(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_mu(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_mu(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_mu(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_mu(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_mu(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_mu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_mu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_mu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_mu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_mu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_mu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_mu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_mu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_mu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_mu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_mu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_mu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_mu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_mu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_mu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_mu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_mu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_mu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_mu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_mu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei64_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg7ei64\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg7ei8.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg7ei8.c index 821488093..2b5186953 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg7ei8.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg7ei8.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_tu(vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_tu(vfloat16mf4x7_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tu(vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_tu(vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_tu(vfloat16mf2x7_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tu(vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_tu(vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_tu(vfloat16m1x7_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tu(vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_tu(vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_tu(vfloat32mf2x7_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tu(vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_tu(vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_tu(vfloat32m1x7_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tu(vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_tu(vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_tu(vfloat64m1x7_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tu(vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_tu(vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_tu(vint8mf8x7_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tu(vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_tu(vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_tu(vint8mf4x7_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tu(vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_tu(vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_tu(vint8mf2x7_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tu(vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_tu(vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_tu(vint8m1x7_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tu(vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_tu(vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_tu(vint16mf4x7_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tu(vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_tu(vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_tu(vint16mf2x7_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tu(vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_tu(vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_tu(vint16m1x7_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tu(vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_tu(vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_tu(vint32mf2x7_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tu(vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_tu(vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_tu(vint32m1x7_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tu(vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_tu(vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_tu(vint64m1x7_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tu(vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_tu(vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_tu(vuint8mf8x7_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tu(vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_tu(vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_tu(vuint8mf4x7_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tu(vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_tu(vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_tu(vuint8mf2x7_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tu(vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_tu(vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_tu(vuint8m1x7_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tu(vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_tu(vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_tu(vuint16mf4x7_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tu(vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_tu(vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_tu(vuint16mf2x7_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tu(vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_tu(vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_tu(vuint16m1x7_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tu(vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_tu(vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_tu(vuint32mf2x7_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tu(vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_tu(vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_tu(vuint32m1x7_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tu(vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_tu(vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_tu(vuint64m1x7_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tu(vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_tum(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_tum(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_tum(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_tum(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_tum(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_tum(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_tum(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_tum(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_tum(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_tum(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_tum(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_tum(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_tum(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_tum(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_tum(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_tum(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_tum(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_tum(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_tum(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_tum(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_tum(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_tum(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_tum(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_tum(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_tum(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_tum(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_tum(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_tum(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_tum(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_tum(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_tum(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_tum(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_tum(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_tum(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_tum(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_tum(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_tum(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_tum(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_tum(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_tum(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_tum(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_tum(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_tum(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_tum(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_tum(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_tum(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_tum(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_tum(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_tum(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_tum(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_tum(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_tum(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_tumu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_tumu(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_tumu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_tumu(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_tumu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_tumu(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_tumu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_tumu(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_tumu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_tumu(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_tumu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_tumu(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_tumu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_tumu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_tumu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_tumu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_tumu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_tumu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_tumu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_tumu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_tumu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_tumu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_tumu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_tumu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_tumu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_tumu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_tumu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_tumu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_tumu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_tumu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_tumu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_tumu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_tumu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_tumu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_tumu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_tumu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_tumu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_tumu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_tumu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_tumu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_tumu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_tumu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_tumu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_tumu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_tumu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_tumu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_tumu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_tumu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_tumu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_tumu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_tumu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_tumu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_mu(vbool64_t mask, vfloat16mf4x7_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_mu(vbool64_t vm, vfloat16mf4x7_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_mu(vbool32_t mask, vfloat16mf2x7_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_mu(vbool32_t vm, vfloat16mf2x7_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_mu(vbool16_t mask, vfloat16m1x7_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_mu(vbool16_t vm, vfloat16m1x7_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_mu(vbool64_t mask, vfloat32mf2x7_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_mu(vbool64_t vm, vfloat32mf2x7_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_mu(vbool32_t mask, vfloat32m1x7_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_mu(vbool32_t vm, vfloat32m1x7_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_mu(vbool64_t mask, vfloat64m1x7_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_mu(vbool64_t vm, vfloat64m1x7_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_mu(vbool64_t mask, vint8mf8x7_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_mu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_mu(vbool32_t mask, vint8mf4x7_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_mu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_mu(vbool16_t mask, vint8mf2x7_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_mu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_mu(vbool8_t mask, vint8m1x7_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_mu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_mu(vbool64_t mask, vint16mf4x7_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_mu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_mu(vbool32_t mask, vint16mf2x7_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_mu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_mu(vbool16_t mask, vint16m1x7_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_mu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_mu(vbool64_t mask, vint32mf2x7_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_mu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_mu(vbool32_t mask, vint32m1x7_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_mu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_mu(vbool64_t mask, vint64m1x7_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_mu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_mu(vbool64_t mask, vuint8mf8x7_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_mu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_mu(vbool32_t mask, vuint8mf4x7_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_mu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_mu(vbool16_t mask, vuint8mf2x7_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_mu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_mu(vbool8_t mask, vuint8m1x7_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_mu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_mu(vbool64_t mask, vuint16mf4x7_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_mu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_mu(vbool32_t mask, vuint16mf2x7_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_mu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_mu(vbool16_t mask, vuint16m1x7_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_mu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_mu(vbool64_t mask, vuint32mf2x7_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_mu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_mu(vbool32_t mask, vuint32m1x7_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_mu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_mu(vbool64_t mask, vuint64m1x7_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg7ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_mu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg7ei8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg7ei8\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg8ei16.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg8ei16.c index a6083f05d..560536323 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg8ei16.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg8ei16.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_tu(vfloat16mf4x8_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tu(vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_tu(vfloat16mf2x8_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tu(vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_tu(vfloat16m1x8_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tu(vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_tu(vfloat32mf2x8_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tu(vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_tu(vfloat32m1x8_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tu(vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_tu(vfloat64m1x8_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tu(vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_tu(vint8mf8x8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tu(vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_tu(vint8mf4x8_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tu(vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_tu(vint8mf2x8_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tu(vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_tu(vint8m1x8_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tu(vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_tu(vint16mf4x8_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tu(vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_tu(vint16mf2x8_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tu(vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_tu(vint16m1x8_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tu(vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_tu(vint32mf2x8_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tu(vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_tu(vint32m1x8_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tu(vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_tu(vint64m1x8_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tu(vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_tu(vuint8mf8x8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tu(vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_tu(vuint8mf4x8_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tu(vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_tu(vuint8mf2x8_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tu(vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_tu(vuint8m1x8_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tu(vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_tu(vuint16mf4x8_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tu(vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_tu(vuint16mf2x8_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tu(vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_tu(vuint16m1x8_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tu(vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_tu(vuint32mf2x8_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tu(vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_tu(vuint32m1x8_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tu(vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_tu(vuint64m1x8_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tu(vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_tum(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_tum(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_tum(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_tum(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_tum(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_tum(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_tum(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_tum(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_tum(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_tum(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_tum(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_tum(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_tum(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_tum(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_tum(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_tum(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_tum(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_tum(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_tum(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_tum(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_tum(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_tum(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_tum(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_tum(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_tum(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_tum(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_tumu(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_tumu(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_tumu(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_tumu(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_tumu(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_tumu(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_tumu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_tumu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_tumu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_tumu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_tumu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_tumu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_tumu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_tumu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_tumu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_tumu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_tumu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_tumu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_tumu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_tumu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_tumu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_tumu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_tumu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_tumu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_tumu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_tumu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_mu(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_mu(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_mu(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_mu(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_mu(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_mu(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_mu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_mu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_mu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_mu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_mu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_mu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_mu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_mu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_mu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_mu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_mu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_mu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_mu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_mu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_mu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_mu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_mu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_mu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_mu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei16_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_mu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg8ei16\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg8ei32.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg8ei32.c index 81094ffa8..eece3bc03 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg8ei32.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg8ei32.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_tu(vfloat16mf4x8_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tu(vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_tu(vfloat16mf2x8_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tu(vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_tu(vfloat16m1x8_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tu(vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_tu(vfloat32mf2x8_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tu(vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_tu(vfloat32m1x8_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tu(vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_tu(vfloat64m1x8_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tu(vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_tu(vint8mf8x8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tu(vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_tu(vint8mf4x8_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tu(vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_tu(vint8mf2x8_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tu(vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_tu(vint8m1x8_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tu(vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_tu(vint16mf4x8_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tu(vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_tu(vint16mf2x8_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tu(vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_tu(vint16m1x8_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tu(vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_tu(vint32mf2x8_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tu(vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_tu(vint32m1x8_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tu(vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_tu(vint64m1x8_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tu(vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_tu(vuint8mf8x8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tu(vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_tu(vuint8mf4x8_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tu(vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_tu(vuint8mf2x8_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tu(vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_tu(vuint8m1x8_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tu(vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_tu(vuint16mf4x8_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tu(vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_tu(vuint16mf2x8_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tu(vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_tu(vuint16m1x8_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tu(vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_tu(vuint32mf2x8_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tu(vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_tu(vuint32m1x8_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tu(vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_tu(vuint64m1x8_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tu(vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_tum(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_tum(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_tum(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_tum(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_tum(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_tum(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_tum(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_tum(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_tum(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_tum(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_tum(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_tum(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_tum(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_tum(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_tum(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_tum(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_tum(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_tum(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_tum(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_tum(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_tum(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_tum(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_tum(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_tum(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_tum(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_tum(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_tumu(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_tumu(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_tumu(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_tumu(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_tumu(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_tumu(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_tumu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_tumu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_tumu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_tumu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_tumu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_tumu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_tumu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_tumu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_tumu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_tumu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_tumu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_tumu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_tumu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_tumu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_tumu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_tumu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_tumu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_tumu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_tumu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_tumu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_mu(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_mu(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_mu(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_mu(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_mu(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_mu(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_mu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_mu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_mu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_mu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_mu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_mu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_mu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_mu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_mu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_mu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_mu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_mu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_mu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_mu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_mu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_mu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_mu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_mu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_mu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei32_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_mu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei32_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg8ei32\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg8ei64.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg8ei64.c index b739656cf..48136aaf0 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg8ei64.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg8ei64.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_tu(vfloat16mf4x8_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tu(vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_tu(vfloat16mf2x8_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tu(vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_tu(vfloat16m1x8_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tu(vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_tu(vfloat32mf2x8_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tu(vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_tu(vfloat32m1x8_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tu(vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_tu(vfloat64m1x8_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tu(vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_tu(vint8mf8x8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tu(vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_tu(vint8mf4x8_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tu(vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_tu(vint8mf2x8_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tu(vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_tu(vint8m1x8_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tu(vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_tu(vint16mf4x8_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tu(vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_tu(vint16mf2x8_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tu(vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_tu(vint16m1x8_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tu(vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_tu(vint32mf2x8_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tu(vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_tu(vint32m1x8_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tu(vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_tu(vint64m1x8_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tu(vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_tu(vuint8mf8x8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tu(vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_tu(vuint8mf4x8_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tu(vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_tu(vuint8mf2x8_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tu(vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_tu(vuint8m1x8_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tu(vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_tu(vuint16mf4x8_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tu(vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_tu(vuint16mf2x8_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tu(vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_tu(vuint16m1x8_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tu(vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_tu(vuint32mf2x8_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tu(vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_tu(vuint32m1x8_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tu(vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_tu(vuint64m1x8_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tu(vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_tum(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_tum(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_tum(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_tum(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_tum(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_tum(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_tum(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_tum(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_tum(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_tum(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_tum(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_tum(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_tum(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_tum(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_tum(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_tum(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_tum(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_tum(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_tum(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_tum(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_tum(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_tum(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_tum(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_tum(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_tum(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_tum(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_tumu(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_tumu(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_tumu(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_tumu(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_tumu(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_tumu(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_tumu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_tumu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_tumu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_tumu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_tumu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_tumu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_tumu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_tumu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_tumu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_tumu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_tumu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_tumu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_tumu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_tumu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_tumu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_tumu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_tumu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_tumu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_tumu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_tumu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_mu(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_mu(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_mu(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_mu(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_mu(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_mu(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_mu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_mu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_mu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_mu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_mu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_mu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_mu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_mu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_mu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_mu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_mu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_mu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_mu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_mu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_mu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_mu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_mu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_mu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_mu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei64_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_mu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei64_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg8ei64\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg8ei8.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg8ei8.c index 6b1ec06b5..0ff1ed908 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg8ei8.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vluxseg8ei8.c @@ -6,420 +6,420 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_tu(vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_tu(vfloat16mf4x8_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tu(vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_tu(vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_tu(vfloat16mf2x8_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tu(vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_tu(vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_tu(vfloat16m1x8_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tu(vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_tu(vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_tu(vfloat32mf2x8_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tu(vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_tu(vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_tu(vfloat32m1x8_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tu(vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_tu(vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_tu(vfloat64m1x8_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tu(vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_tu(vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_tu(vint8mf8x8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tu(vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_tu(vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_tu(vint8mf4x8_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tu(vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_tu(vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_tu(vint8mf2x8_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tu(vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_tu(vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_tu(vint8m1x8_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tu(vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_tu(vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_tu(vint16mf4x8_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tu(vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_tu(vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_tu(vint16mf2x8_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tu(vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_tu(vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_tu(vint16m1x8_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tu(vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_tu(vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_tu(vint32mf2x8_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tu(vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_tu(vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_tu(vint32m1x8_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tu(vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_tu(vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_tu(vint64m1x8_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tu(vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_tu(vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_tu(vuint8mf8x8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tu(vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_tu(vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_tu(vuint8mf4x8_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tu(vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_tu(vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_tu(vuint8mf2x8_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tu(vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_tu(vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_tu(vuint8m1x8_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tu(vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_tu(vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_tu(vuint16mf4x8_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tu(vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_tu(vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_tu(vuint16mf2x8_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tu(vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_tu(vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_tu(vuint16m1x8_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tu(vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_tu(vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_tu(vuint32mf2x8_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tu(vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_tu(vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_tu(vuint32m1x8_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tu(vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_tu(vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tu(maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_tu(vuint64m1x8_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tu(vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_tum(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_tum(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_tum(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_tum(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_tum(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_tum(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_tum(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_tum(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_tum(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_tum(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_tum(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_tum(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_tum(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_tum(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_tum(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_tum(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_tum(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_tum(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_tum(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_tum(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_tum(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_tum(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_tum(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_tum(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_tum(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_tum(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_tum(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_tum(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_tum(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_tum(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_tum(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_tum(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_tum(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_tum(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_tum(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_tum(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_tum(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_tum(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_tum(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_tum(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_tum(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_tum(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_tum(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_tum(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_tum(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_tum(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_tum(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_tum(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_tum(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_tum(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_tum(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tum(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_tum(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_tumu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_tumu(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_tumu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_tumu(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_tumu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_tumu(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_tumu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_tumu(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_tumu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_tumu(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_tumu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_tumu(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_tumu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_tumu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_tumu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_tumu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_tumu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_tumu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_tumu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_tumu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_tumu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_tumu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_tumu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_tumu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_tumu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_tumu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_tumu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_tumu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_tumu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_tumu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_tumu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_tumu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_tumu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_tumu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_tumu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_tumu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_tumu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_tumu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_tumu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_tumu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_tumu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_tumu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_tumu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_tumu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_tumu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_tumu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_tumu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_tumu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_tumu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_tumu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_tumu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_tumu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_tumu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_mu(vbool64_t mask, vfloat16mf4x8_t maskedoff_tuple, const float16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_mu(vbool64_t vm, vfloat16mf4x8_t vd, const float16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_mu(vbool32_t mask, vfloat16mf2x8_t maskedoff_tuple, const float16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_mu(vbool32_t vm, vfloat16mf2x8_t vd, const float16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_mu(vbool16_t mask, vfloat16m1x8_t maskedoff_tuple, const float16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_mu(vbool16_t vm, vfloat16m1x8_t vd, const float16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_mu(vbool64_t mask, vfloat32mf2x8_t maskedoff_tuple, const float32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_mu(vbool64_t vm, vfloat32mf2x8_t vd, const float32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_mu(vbool32_t mask, vfloat32m1x8_t maskedoff_tuple, const float32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_mu(vbool32_t vm, vfloat32m1x8_t vd, const float32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_mu(vbool64_t mask, vfloat64m1x8_t maskedoff_tuple, const float64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_mu(vbool64_t vm, vfloat64m1x8_t vd, const float64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_mu(vbool64_t mask, vint8mf8x8_t maskedoff_tuple, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_mu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_mu(vbool32_t mask, vint8mf4x8_t maskedoff_tuple, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_mu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_mu(vbool16_t mask, vint8mf2x8_t maskedoff_tuple, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_mu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_mu(vbool8_t mask, vint8m1x8_t maskedoff_tuple, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_mu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_mu(vbool64_t mask, vint16mf4x8_t maskedoff_tuple, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_mu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_mu(vbool32_t mask, vint16mf2x8_t maskedoff_tuple, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_mu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_mu(vbool16_t mask, vint16m1x8_t maskedoff_tuple, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_mu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_mu(vbool64_t mask, vint32mf2x8_t maskedoff_tuple, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_mu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_mu(vbool32_t mask, vint32m1x8_t maskedoff_tuple, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_mu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_mu(vbool64_t mask, vint64m1x8_t maskedoff_tuple, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_mu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_mu(vbool64_t mask, vuint8mf8x8_t maskedoff_tuple, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_mu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_mu(vbool32_t mask, vuint8mf4x8_t maskedoff_tuple, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_mu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_mu(vbool16_t mask, vuint8mf2x8_t maskedoff_tuple, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_mu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_mu(vbool8_t mask, vuint8m1x8_t maskedoff_tuple, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_mu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_mu(vbool64_t mask, vuint16mf4x8_t maskedoff_tuple, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_mu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_mu(vbool32_t mask, vuint16mf2x8_t maskedoff_tuple, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_mu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_mu(vbool16_t mask, vuint16m1x8_t maskedoff_tuple, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_mu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_mu(vbool64_t mask, vuint32mf2x8_t maskedoff_tuple, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_mu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_mu(vbool32_t mask, vuint32m1x8_t maskedoff_tuple, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_mu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_mu(vbool64_t mask, vuint64m1x8_t maskedoff_tuple, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return __riscv_vluxseg8ei8_mu(mask, maskedoff_tuple, base, bindex, vl); +vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_mu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { + return __riscv_vluxseg8ei8_mu(vm, vd, rs1, rs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vluxseg8ei8\.[ivxfswum.]+\s+} 104 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vmacc.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vmacc.c index 9a9727ce8..deec31519 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vmacc.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vmacc.c @@ -358,1060 +358,1060 @@ vuint64m8_t test_vmacc_vx_u64m8_tu(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2 return __riscv_vmacc_tu(vd, rs1, vs2, vl); } -vint8mf8_t test_vmacc_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl); +vint8mf8_t test_vmacc_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, vs1, vs2, vl); } -vint8mf8_t test_vmacc_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl); +vint8mf8_t test_vmacc_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, rs1, vs2, vl); } -vint8mf4_t test_vmacc_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl); +vint8mf4_t test_vmacc_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, vs1, vs2, vl); } -vint8mf4_t test_vmacc_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl); +vint8mf4_t test_vmacc_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, rs1, vs2, vl); } -vint8mf2_t test_vmacc_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl); +vint8mf2_t test_vmacc_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, vs1, vs2, vl); } -vint8mf2_t test_vmacc_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl); +vint8mf2_t test_vmacc_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, rs1, vs2, vl); } -vint8m1_t test_vmacc_vv_i8m1_tum(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl); +vint8m1_t test_vmacc_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, vs1, vs2, vl); } -vint8m1_t test_vmacc_vx_i8m1_tum(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl); +vint8m1_t test_vmacc_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, rs1, vs2, vl); } -vint8m2_t test_vmacc_vv_i8m2_tum(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl); +vint8m2_t test_vmacc_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, vs1, vs2, vl); } -vint8m2_t test_vmacc_vx_i8m2_tum(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl); +vint8m2_t test_vmacc_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, rs1, vs2, vl); } -vint8m4_t test_vmacc_vv_i8m4_tum(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl); +vint8m4_t test_vmacc_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, vs1, vs2, vl); } -vint8m4_t test_vmacc_vx_i8m4_tum(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl); +vint8m4_t test_vmacc_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, rs1, vs2, vl); } -vint8m8_t test_vmacc_vv_i8m8_tum(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl); +vint8m8_t test_vmacc_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, vs1, vs2, vl); } -vint8m8_t test_vmacc_vx_i8m8_tum(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl); +vint8m8_t test_vmacc_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vmacc_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vmacc_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vmacc_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vmacc_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vmacc_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vmacc_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vmacc_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vmacc_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vmacc_vv_i16m1_tum(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl); +vint16m1_t test_vmacc_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vmacc_vx_i16m1_tum(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl); +vint16m1_t test_vmacc_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vmacc_vv_i16m2_tum(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl); +vint16m2_t test_vmacc_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vmacc_vx_i16m2_tum(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl); +vint16m2_t test_vmacc_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vmacc_vv_i16m4_tum(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl); +vint16m4_t test_vmacc_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vmacc_vx_i16m4_tum(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl); +vint16m4_t test_vmacc_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vmacc_vv_i16m8_tum(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl); +vint16m8_t test_vmacc_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vmacc_vx_i16m8_tum(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl); +vint16m8_t test_vmacc_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vmacc_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vmacc_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vmacc_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vmacc_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vmacc_vv_i32m1_tum(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl); +vint32m1_t test_vmacc_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vmacc_vx_i32m1_tum(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl); +vint32m1_t test_vmacc_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vmacc_vv_i32m2_tum(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl); +vint32m2_t test_vmacc_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vmacc_vx_i32m2_tum(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl); +vint32m2_t test_vmacc_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vmacc_vv_i32m4_tum(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl); +vint32m4_t test_vmacc_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vmacc_vx_i32m4_tum(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl); +vint32m4_t test_vmacc_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vmacc_vv_i32m8_tum(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl); +vint32m8_t test_vmacc_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vmacc_vx_i32m8_tum(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl); +vint32m8_t test_vmacc_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vmacc_vv_i64m1_tum(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl); +vint64m1_t test_vmacc_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vmacc_vx_i64m1_tum(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl); +vint64m1_t test_vmacc_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vmacc_vv_i64m2_tum(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl); +vint64m2_t test_vmacc_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vmacc_vx_i64m2_tum(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl); +vint64m2_t test_vmacc_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vmacc_vv_i64m4_tum(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl); +vint64m4_t test_vmacc_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vmacc_vx_i64m4_tum(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl); +vint64m4_t test_vmacc_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vmacc_vv_i64m8_tum(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl); +vint64m8_t test_vmacc_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vmacc_vx_i64m8_tum(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl); +vint64m8_t test_vmacc_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, rs1, vs2, vl); } -vuint8mf8_t test_vmacc_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl); +vuint8mf8_t test_vmacc_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, vs1, vs2, vl); } -vuint8mf8_t test_vmacc_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl); +vuint8mf8_t test_vmacc_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, rs1, vs2, vl); } -vuint8mf4_t test_vmacc_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl); +vuint8mf4_t test_vmacc_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, vs1, vs2, vl); } -vuint8mf4_t test_vmacc_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl); +vuint8mf4_t test_vmacc_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, rs1, vs2, vl); } -vuint8mf2_t test_vmacc_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl); +vuint8mf2_t test_vmacc_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, vs1, vs2, vl); } -vuint8mf2_t test_vmacc_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl); +vuint8mf2_t test_vmacc_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, rs1, vs2, vl); } -vuint8m1_t test_vmacc_vv_u8m1_tum(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl); +vuint8m1_t test_vmacc_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, vs1, vs2, vl); } -vuint8m1_t test_vmacc_vx_u8m1_tum(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl); +vuint8m1_t test_vmacc_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, rs1, vs2, vl); } -vuint8m2_t test_vmacc_vv_u8m2_tum(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl); +vuint8m2_t test_vmacc_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, vs1, vs2, vl); } -vuint8m2_t test_vmacc_vx_u8m2_tum(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl); +vuint8m2_t test_vmacc_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, rs1, vs2, vl); } -vuint8m4_t test_vmacc_vv_u8m4_tum(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl); +vuint8m4_t test_vmacc_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, vs1, vs2, vl); } -vuint8m4_t test_vmacc_vx_u8m4_tum(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl); +vuint8m4_t test_vmacc_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, rs1, vs2, vl); } -vuint8m8_t test_vmacc_vv_u8m8_tum(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl); +vuint8m8_t test_vmacc_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, vs1, vs2, vl); } -vuint8m8_t test_vmacc_vx_u8m8_tum(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl); +vuint8m8_t test_vmacc_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vmacc_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl); +vuint16mf4_t test_vmacc_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vmacc_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl); +vuint16mf4_t test_vmacc_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vmacc_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl); +vuint16mf2_t test_vmacc_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vmacc_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl); +vuint16mf2_t test_vmacc_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vmacc_vv_u16m1_tum(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl); +vuint16m1_t test_vmacc_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vmacc_vx_u16m1_tum(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl); +vuint16m1_t test_vmacc_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vmacc_vv_u16m2_tum(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl); +vuint16m2_t test_vmacc_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vmacc_vx_u16m2_tum(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl); +vuint16m2_t test_vmacc_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vmacc_vv_u16m4_tum(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl); +vuint16m4_t test_vmacc_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vmacc_vx_u16m4_tum(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl); +vuint16m4_t test_vmacc_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vmacc_vv_u16m8_tum(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl); +vuint16m8_t test_vmacc_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vmacc_vx_u16m8_tum(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl); +vuint16m8_t test_vmacc_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vmacc_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl); +vuint32mf2_t test_vmacc_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vmacc_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl); +vuint32mf2_t test_vmacc_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vmacc_vv_u32m1_tum(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl); +vuint32m1_t test_vmacc_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vmacc_vx_u32m1_tum(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl); +vuint32m1_t test_vmacc_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vmacc_vv_u32m2_tum(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl); +vuint32m2_t test_vmacc_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vmacc_vx_u32m2_tum(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl); +vuint32m2_t test_vmacc_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vmacc_vv_u32m4_tum(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl); +vuint32m4_t test_vmacc_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vmacc_vx_u32m4_tum(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl); +vuint32m4_t test_vmacc_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vmacc_vv_u32m8_tum(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl); +vuint32m8_t test_vmacc_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vmacc_vx_u32m8_tum(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl); +vuint32m8_t test_vmacc_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vmacc_vv_u64m1_tum(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl); +vuint64m1_t test_vmacc_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vmacc_vx_u64m1_tum(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl); +vuint64m1_t test_vmacc_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vmacc_vv_u64m2_tum(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl); +vuint64m2_t test_vmacc_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vmacc_vx_u64m2_tum(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl); +vuint64m2_t test_vmacc_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vmacc_vv_u64m4_tum(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl); +vuint64m4_t test_vmacc_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vmacc_vx_u64m4_tum(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl); +vuint64m4_t test_vmacc_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vmacc_vv_u64m8_tum(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, vs1, vs2, vl); +vuint64m8_t test_vmacc_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vmacc_vx_u64m8_tum(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vmacc_tum(mask, vd, rs1, vs2, vl); +vuint64m8_t test_vmacc_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vmacc_tum(vm, vd, rs1, vs2, vl); } -vint8mf8_t test_vmacc_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl); +vint8mf8_t test_vmacc_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, vs1, vs2, vl); } -vint8mf8_t test_vmacc_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl); +vint8mf8_t test_vmacc_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, rs1, vs2, vl); } -vint8mf4_t test_vmacc_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl); +vint8mf4_t test_vmacc_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, vs1, vs2, vl); } -vint8mf4_t test_vmacc_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl); +vint8mf4_t test_vmacc_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, rs1, vs2, vl); } -vint8mf2_t test_vmacc_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl); +vint8mf2_t test_vmacc_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, vs1, vs2, vl); } -vint8mf2_t test_vmacc_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl); +vint8mf2_t test_vmacc_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, rs1, vs2, vl); } -vint8m1_t test_vmacc_vv_i8m1_tumu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl); +vint8m1_t test_vmacc_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, vs1, vs2, vl); } -vint8m1_t test_vmacc_vx_i8m1_tumu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl); +vint8m1_t test_vmacc_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, rs1, vs2, vl); } -vint8m2_t test_vmacc_vv_i8m2_tumu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl); +vint8m2_t test_vmacc_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, vs1, vs2, vl); } -vint8m2_t test_vmacc_vx_i8m2_tumu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl); +vint8m2_t test_vmacc_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, rs1, vs2, vl); } -vint8m4_t test_vmacc_vv_i8m4_tumu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl); +vint8m4_t test_vmacc_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, vs1, vs2, vl); } -vint8m4_t test_vmacc_vx_i8m4_tumu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl); +vint8m4_t test_vmacc_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, rs1, vs2, vl); } -vint8m8_t test_vmacc_vv_i8m8_tumu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl); +vint8m8_t test_vmacc_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, vs1, vs2, vl); } -vint8m8_t test_vmacc_vx_i8m8_tumu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl); +vint8m8_t test_vmacc_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vmacc_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vmacc_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vmacc_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vmacc_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vmacc_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vmacc_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vmacc_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vmacc_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vmacc_vv_i16m1_tumu(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl); +vint16m1_t test_vmacc_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vmacc_vx_i16m1_tumu(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl); +vint16m1_t test_vmacc_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vmacc_vv_i16m2_tumu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl); +vint16m2_t test_vmacc_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vmacc_vx_i16m2_tumu(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl); +vint16m2_t test_vmacc_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vmacc_vv_i16m4_tumu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl); +vint16m4_t test_vmacc_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vmacc_vx_i16m4_tumu(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl); +vint16m4_t test_vmacc_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vmacc_vv_i16m8_tumu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl); +vint16m8_t test_vmacc_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vmacc_vx_i16m8_tumu(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl); +vint16m8_t test_vmacc_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vmacc_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vmacc_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vmacc_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vmacc_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vmacc_vv_i32m1_tumu(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl); +vint32m1_t test_vmacc_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vmacc_vx_i32m1_tumu(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl); +vint32m1_t test_vmacc_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vmacc_vv_i32m2_tumu(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl); +vint32m2_t test_vmacc_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vmacc_vx_i32m2_tumu(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl); +vint32m2_t test_vmacc_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vmacc_vv_i32m4_tumu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl); +vint32m4_t test_vmacc_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vmacc_vx_i32m4_tumu(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl); +vint32m4_t test_vmacc_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vmacc_vv_i32m8_tumu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl); +vint32m8_t test_vmacc_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vmacc_vx_i32m8_tumu(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl); +vint32m8_t test_vmacc_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vmacc_vv_i64m1_tumu(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl); +vint64m1_t test_vmacc_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vmacc_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl); +vint64m1_t test_vmacc_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vmacc_vv_i64m2_tumu(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl); +vint64m2_t test_vmacc_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vmacc_vx_i64m2_tumu(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl); +vint64m2_t test_vmacc_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vmacc_vv_i64m4_tumu(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl); +vint64m4_t test_vmacc_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vmacc_vx_i64m4_tumu(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl); +vint64m4_t test_vmacc_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vmacc_vv_i64m8_tumu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl); +vint64m8_t test_vmacc_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vmacc_vx_i64m8_tumu(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl); +vint64m8_t test_vmacc_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, rs1, vs2, vl); } -vuint8mf8_t test_vmacc_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl); +vuint8mf8_t test_vmacc_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, vs1, vs2, vl); } -vuint8mf8_t test_vmacc_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl); +vuint8mf8_t test_vmacc_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, rs1, vs2, vl); } -vuint8mf4_t test_vmacc_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl); +vuint8mf4_t test_vmacc_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, vs1, vs2, vl); } -vuint8mf4_t test_vmacc_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl); +vuint8mf4_t test_vmacc_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, rs1, vs2, vl); } -vuint8mf2_t test_vmacc_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl); +vuint8mf2_t test_vmacc_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, vs1, vs2, vl); } -vuint8mf2_t test_vmacc_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl); +vuint8mf2_t test_vmacc_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, rs1, vs2, vl); } -vuint8m1_t test_vmacc_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl); +vuint8m1_t test_vmacc_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, vs1, vs2, vl); } -vuint8m1_t test_vmacc_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl); +vuint8m1_t test_vmacc_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, rs1, vs2, vl); } -vuint8m2_t test_vmacc_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl); +vuint8m2_t test_vmacc_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, vs1, vs2, vl); } -vuint8m2_t test_vmacc_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl); +vuint8m2_t test_vmacc_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, rs1, vs2, vl); } -vuint8m4_t test_vmacc_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl); +vuint8m4_t test_vmacc_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, vs1, vs2, vl); } -vuint8m4_t test_vmacc_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl); +vuint8m4_t test_vmacc_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, rs1, vs2, vl); } -vuint8m8_t test_vmacc_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl); +vuint8m8_t test_vmacc_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, vs1, vs2, vl); } -vuint8m8_t test_vmacc_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl); +vuint8m8_t test_vmacc_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vmacc_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl); +vuint16mf4_t test_vmacc_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vmacc_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl); +vuint16mf4_t test_vmacc_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vmacc_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl); +vuint16mf2_t test_vmacc_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vmacc_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl); +vuint16mf2_t test_vmacc_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vmacc_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl); +vuint16m1_t test_vmacc_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vmacc_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl); +vuint16m1_t test_vmacc_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vmacc_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl); +vuint16m2_t test_vmacc_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vmacc_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl); +vuint16m2_t test_vmacc_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vmacc_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl); +vuint16m4_t test_vmacc_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vmacc_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl); +vuint16m4_t test_vmacc_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vmacc_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl); +vuint16m8_t test_vmacc_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vmacc_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl); +vuint16m8_t test_vmacc_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vmacc_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl); +vuint32mf2_t test_vmacc_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vmacc_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl); +vuint32mf2_t test_vmacc_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vmacc_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl); +vuint32m1_t test_vmacc_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vmacc_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl); +vuint32m1_t test_vmacc_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vmacc_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl); +vuint32m2_t test_vmacc_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vmacc_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl); +vuint32m2_t test_vmacc_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vmacc_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl); +vuint32m4_t test_vmacc_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vmacc_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl); +vuint32m4_t test_vmacc_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vmacc_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl); +vuint32m8_t test_vmacc_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vmacc_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl); +vuint32m8_t test_vmacc_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vmacc_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl); +vuint64m1_t test_vmacc_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vmacc_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl); +vuint64m1_t test_vmacc_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vmacc_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl); +vuint64m2_t test_vmacc_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vmacc_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl); +vuint64m2_t test_vmacc_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vmacc_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl); +vuint64m4_t test_vmacc_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vmacc_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl); +vuint64m4_t test_vmacc_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vmacc_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, vs1, vs2, vl); +vuint64m8_t test_vmacc_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vmacc_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vmacc_tumu(mask, vd, rs1, vs2, vl); +vuint64m8_t test_vmacc_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vmacc_tumu(vm, vd, rs1, vs2, vl); } -vint8mf8_t test_vmacc_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl); +vint8mf8_t test_vmacc_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, vs1, vs2, vl); } -vint8mf8_t test_vmacc_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl); +vint8mf8_t test_vmacc_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, rs1, vs2, vl); } -vint8mf4_t test_vmacc_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl); +vint8mf4_t test_vmacc_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, vs1, vs2, vl); } -vint8mf4_t test_vmacc_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl); +vint8mf4_t test_vmacc_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, rs1, vs2, vl); } -vint8mf2_t test_vmacc_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl); +vint8mf2_t test_vmacc_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, vs1, vs2, vl); } -vint8mf2_t test_vmacc_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl); +vint8mf2_t test_vmacc_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, rs1, vs2, vl); } -vint8m1_t test_vmacc_vv_i8m1_mu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl); +vint8m1_t test_vmacc_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, vs1, vs2, vl); } -vint8m1_t test_vmacc_vx_i8m1_mu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl); +vint8m1_t test_vmacc_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, rs1, vs2, vl); } -vint8m2_t test_vmacc_vv_i8m2_mu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl); +vint8m2_t test_vmacc_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, vs1, vs2, vl); } -vint8m2_t test_vmacc_vx_i8m2_mu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl); +vint8m2_t test_vmacc_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, rs1, vs2, vl); } -vint8m4_t test_vmacc_vv_i8m4_mu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl); +vint8m4_t test_vmacc_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, vs1, vs2, vl); } -vint8m4_t test_vmacc_vx_i8m4_mu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl); +vint8m4_t test_vmacc_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, rs1, vs2, vl); } -vint8m8_t test_vmacc_vv_i8m8_mu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl); +vint8m8_t test_vmacc_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, vs1, vs2, vl); } -vint8m8_t test_vmacc_vx_i8m8_mu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl); +vint8m8_t test_vmacc_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vmacc_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vmacc_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vmacc_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vmacc_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vmacc_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vmacc_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vmacc_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vmacc_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vmacc_vv_i16m1_mu(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl); +vint16m1_t test_vmacc_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vmacc_vx_i16m1_mu(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl); +vint16m1_t test_vmacc_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vmacc_vv_i16m2_mu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl); +vint16m2_t test_vmacc_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vmacc_vx_i16m2_mu(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl); +vint16m2_t test_vmacc_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vmacc_vv_i16m4_mu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl); +vint16m4_t test_vmacc_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vmacc_vx_i16m4_mu(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl); +vint16m4_t test_vmacc_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vmacc_vv_i16m8_mu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl); +vint16m8_t test_vmacc_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vmacc_vx_i16m8_mu(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl); +vint16m8_t test_vmacc_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vmacc_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vmacc_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vmacc_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vmacc_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vmacc_vv_i32m1_mu(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl); +vint32m1_t test_vmacc_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vmacc_vx_i32m1_mu(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl); +vint32m1_t test_vmacc_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vmacc_vv_i32m2_mu(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl); +vint32m2_t test_vmacc_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vmacc_vx_i32m2_mu(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl); +vint32m2_t test_vmacc_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vmacc_vv_i32m4_mu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl); +vint32m4_t test_vmacc_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vmacc_vx_i32m4_mu(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl); +vint32m4_t test_vmacc_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vmacc_vv_i32m8_mu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl); +vint32m8_t test_vmacc_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vmacc_vx_i32m8_mu(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl); +vint32m8_t test_vmacc_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vmacc_vv_i64m1_mu(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl); +vint64m1_t test_vmacc_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vmacc_vx_i64m1_mu(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl); +vint64m1_t test_vmacc_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vmacc_vv_i64m2_mu(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl); +vint64m2_t test_vmacc_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vmacc_vx_i64m2_mu(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl); +vint64m2_t test_vmacc_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vmacc_vv_i64m4_mu(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl); +vint64m4_t test_vmacc_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vmacc_vx_i64m4_mu(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl); +vint64m4_t test_vmacc_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vmacc_vv_i64m8_mu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl); +vint64m8_t test_vmacc_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vmacc_vx_i64m8_mu(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl); +vint64m8_t test_vmacc_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, rs1, vs2, vl); } -vuint8mf8_t test_vmacc_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl); +vuint8mf8_t test_vmacc_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, vs1, vs2, vl); } -vuint8mf8_t test_vmacc_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl); +vuint8mf8_t test_vmacc_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, rs1, vs2, vl); } -vuint8mf4_t test_vmacc_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl); +vuint8mf4_t test_vmacc_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, vs1, vs2, vl); } -vuint8mf4_t test_vmacc_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl); +vuint8mf4_t test_vmacc_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, rs1, vs2, vl); } -vuint8mf2_t test_vmacc_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl); +vuint8mf2_t test_vmacc_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, vs1, vs2, vl); } -vuint8mf2_t test_vmacc_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl); +vuint8mf2_t test_vmacc_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, rs1, vs2, vl); } -vuint8m1_t test_vmacc_vv_u8m1_mu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl); +vuint8m1_t test_vmacc_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, vs1, vs2, vl); } -vuint8m1_t test_vmacc_vx_u8m1_mu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl); +vuint8m1_t test_vmacc_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, rs1, vs2, vl); } -vuint8m2_t test_vmacc_vv_u8m2_mu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl); +vuint8m2_t test_vmacc_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, vs1, vs2, vl); } -vuint8m2_t test_vmacc_vx_u8m2_mu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl); +vuint8m2_t test_vmacc_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, rs1, vs2, vl); } -vuint8m4_t test_vmacc_vv_u8m4_mu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl); +vuint8m4_t test_vmacc_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, vs1, vs2, vl); } -vuint8m4_t test_vmacc_vx_u8m4_mu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl); +vuint8m4_t test_vmacc_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, rs1, vs2, vl); } -vuint8m8_t test_vmacc_vv_u8m8_mu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl); +vuint8m8_t test_vmacc_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, vs1, vs2, vl); } -vuint8m8_t test_vmacc_vx_u8m8_mu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl); +vuint8m8_t test_vmacc_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vmacc_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl); +vuint16mf4_t test_vmacc_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vmacc_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl); +vuint16mf4_t test_vmacc_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vmacc_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl); +vuint16mf2_t test_vmacc_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vmacc_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl); +vuint16mf2_t test_vmacc_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vmacc_vv_u16m1_mu(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl); +vuint16m1_t test_vmacc_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vmacc_vx_u16m1_mu(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl); +vuint16m1_t test_vmacc_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vmacc_vv_u16m2_mu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl); +vuint16m2_t test_vmacc_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vmacc_vx_u16m2_mu(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl); +vuint16m2_t test_vmacc_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vmacc_vv_u16m4_mu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl); +vuint16m4_t test_vmacc_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vmacc_vx_u16m4_mu(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl); +vuint16m4_t test_vmacc_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vmacc_vv_u16m8_mu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl); +vuint16m8_t test_vmacc_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vmacc_vx_u16m8_mu(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl); +vuint16m8_t test_vmacc_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vmacc_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl); +vuint32mf2_t test_vmacc_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vmacc_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl); +vuint32mf2_t test_vmacc_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vmacc_vv_u32m1_mu(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl); +vuint32m1_t test_vmacc_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vmacc_vx_u32m1_mu(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl); +vuint32m1_t test_vmacc_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vmacc_vv_u32m2_mu(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl); +vuint32m2_t test_vmacc_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vmacc_vx_u32m2_mu(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl); +vuint32m2_t test_vmacc_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vmacc_vv_u32m4_mu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl); +vuint32m4_t test_vmacc_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vmacc_vx_u32m4_mu(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl); +vuint32m4_t test_vmacc_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vmacc_vv_u32m8_mu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl); +vuint32m8_t test_vmacc_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vmacc_vx_u32m8_mu(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl); +vuint32m8_t test_vmacc_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vmacc_vv_u64m1_mu(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl); +vuint64m1_t test_vmacc_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vmacc_vx_u64m1_mu(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl); +vuint64m1_t test_vmacc_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vmacc_vv_u64m2_mu(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl); +vuint64m2_t test_vmacc_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vmacc_vx_u64m2_mu(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl); +vuint64m2_t test_vmacc_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vmacc_vv_u64m4_mu(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl); +vuint64m4_t test_vmacc_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vmacc_vx_u64m4_mu(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl); +vuint64m4_t test_vmacc_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vmacc_vv_u64m8_mu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, vs1, vs2, vl); +vuint64m8_t test_vmacc_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vmacc_vx_u64m8_mu(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vmacc_mu(mask, vd, rs1, vs2, vl); +vuint64m8_t test_vmacc_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vmacc_mu(vm, vd, rs1, vs2, vl); } /* { dg-final { scan-assembler-times {vma[c-d][c-d]\.[ivxfswum.]+\s+} 352 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vmadd.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vmadd.c index 2a886a43a..fe05b9d39 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vmadd.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vmadd.c @@ -358,1060 +358,1060 @@ vuint64m8_t test_vmadd_vx_u64m8_tu(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2 return __riscv_vmadd_tu(vd, rs1, vs2, vl); } -vint8mf8_t test_vmadd_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl); +vint8mf8_t test_vmadd_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, vs1, vs2, vl); } -vint8mf8_t test_vmadd_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl); +vint8mf8_t test_vmadd_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, rs1, vs2, vl); } -vint8mf4_t test_vmadd_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl); +vint8mf4_t test_vmadd_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, vs1, vs2, vl); } -vint8mf4_t test_vmadd_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl); +vint8mf4_t test_vmadd_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, rs1, vs2, vl); } -vint8mf2_t test_vmadd_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl); +vint8mf2_t test_vmadd_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, vs1, vs2, vl); } -vint8mf2_t test_vmadd_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl); +vint8mf2_t test_vmadd_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, rs1, vs2, vl); } -vint8m1_t test_vmadd_vv_i8m1_tum(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl); +vint8m1_t test_vmadd_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, vs1, vs2, vl); } -vint8m1_t test_vmadd_vx_i8m1_tum(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl); +vint8m1_t test_vmadd_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, rs1, vs2, vl); } -vint8m2_t test_vmadd_vv_i8m2_tum(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl); +vint8m2_t test_vmadd_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, vs1, vs2, vl); } -vint8m2_t test_vmadd_vx_i8m2_tum(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl); +vint8m2_t test_vmadd_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, rs1, vs2, vl); } -vint8m4_t test_vmadd_vv_i8m4_tum(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl); +vint8m4_t test_vmadd_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, vs1, vs2, vl); } -vint8m4_t test_vmadd_vx_i8m4_tum(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl); +vint8m4_t test_vmadd_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, rs1, vs2, vl); } -vint8m8_t test_vmadd_vv_i8m8_tum(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl); +vint8m8_t test_vmadd_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, vs1, vs2, vl); } -vint8m8_t test_vmadd_vx_i8m8_tum(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl); +vint8m8_t test_vmadd_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vmadd_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vmadd_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vmadd_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vmadd_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vmadd_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vmadd_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vmadd_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vmadd_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vmadd_vv_i16m1_tum(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl); +vint16m1_t test_vmadd_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vmadd_vx_i16m1_tum(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl); +vint16m1_t test_vmadd_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vmadd_vv_i16m2_tum(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl); +vint16m2_t test_vmadd_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vmadd_vx_i16m2_tum(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl); +vint16m2_t test_vmadd_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vmadd_vv_i16m4_tum(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl); +vint16m4_t test_vmadd_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vmadd_vx_i16m4_tum(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl); +vint16m4_t test_vmadd_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vmadd_vv_i16m8_tum(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl); +vint16m8_t test_vmadd_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vmadd_vx_i16m8_tum(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl); +vint16m8_t test_vmadd_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vmadd_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vmadd_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vmadd_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vmadd_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vmadd_vv_i32m1_tum(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl); +vint32m1_t test_vmadd_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vmadd_vx_i32m1_tum(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl); +vint32m1_t test_vmadd_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vmadd_vv_i32m2_tum(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl); +vint32m2_t test_vmadd_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vmadd_vx_i32m2_tum(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl); +vint32m2_t test_vmadd_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vmadd_vv_i32m4_tum(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl); +vint32m4_t test_vmadd_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vmadd_vx_i32m4_tum(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl); +vint32m4_t test_vmadd_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vmadd_vv_i32m8_tum(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl); +vint32m8_t test_vmadd_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vmadd_vx_i32m8_tum(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl); +vint32m8_t test_vmadd_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vmadd_vv_i64m1_tum(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl); +vint64m1_t test_vmadd_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vmadd_vx_i64m1_tum(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl); +vint64m1_t test_vmadd_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vmadd_vv_i64m2_tum(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl); +vint64m2_t test_vmadd_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vmadd_vx_i64m2_tum(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl); +vint64m2_t test_vmadd_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vmadd_vv_i64m4_tum(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl); +vint64m4_t test_vmadd_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vmadd_vx_i64m4_tum(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl); +vint64m4_t test_vmadd_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vmadd_vv_i64m8_tum(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl); +vint64m8_t test_vmadd_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vmadd_vx_i64m8_tum(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl); +vint64m8_t test_vmadd_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, rs1, vs2, vl); } -vuint8mf8_t test_vmadd_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl); +vuint8mf8_t test_vmadd_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, vs1, vs2, vl); } -vuint8mf8_t test_vmadd_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl); +vuint8mf8_t test_vmadd_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, rs1, vs2, vl); } -vuint8mf4_t test_vmadd_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl); +vuint8mf4_t test_vmadd_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, vs1, vs2, vl); } -vuint8mf4_t test_vmadd_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl); +vuint8mf4_t test_vmadd_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, rs1, vs2, vl); } -vuint8mf2_t test_vmadd_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl); +vuint8mf2_t test_vmadd_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, vs1, vs2, vl); } -vuint8mf2_t test_vmadd_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl); +vuint8mf2_t test_vmadd_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, rs1, vs2, vl); } -vuint8m1_t test_vmadd_vv_u8m1_tum(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl); +vuint8m1_t test_vmadd_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, vs1, vs2, vl); } -vuint8m1_t test_vmadd_vx_u8m1_tum(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl); +vuint8m1_t test_vmadd_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, rs1, vs2, vl); } -vuint8m2_t test_vmadd_vv_u8m2_tum(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl); +vuint8m2_t test_vmadd_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, vs1, vs2, vl); } -vuint8m2_t test_vmadd_vx_u8m2_tum(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl); +vuint8m2_t test_vmadd_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, rs1, vs2, vl); } -vuint8m4_t test_vmadd_vv_u8m4_tum(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl); +vuint8m4_t test_vmadd_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, vs1, vs2, vl); } -vuint8m4_t test_vmadd_vx_u8m4_tum(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl); +vuint8m4_t test_vmadd_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, rs1, vs2, vl); } -vuint8m8_t test_vmadd_vv_u8m8_tum(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl); +vuint8m8_t test_vmadd_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, vs1, vs2, vl); } -vuint8m8_t test_vmadd_vx_u8m8_tum(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl); +vuint8m8_t test_vmadd_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vmadd_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl); +vuint16mf4_t test_vmadd_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vmadd_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl); +vuint16mf4_t test_vmadd_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vmadd_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl); +vuint16mf2_t test_vmadd_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vmadd_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl); +vuint16mf2_t test_vmadd_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vmadd_vv_u16m1_tum(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl); +vuint16m1_t test_vmadd_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vmadd_vx_u16m1_tum(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl); +vuint16m1_t test_vmadd_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vmadd_vv_u16m2_tum(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl); +vuint16m2_t test_vmadd_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vmadd_vx_u16m2_tum(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl); +vuint16m2_t test_vmadd_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vmadd_vv_u16m4_tum(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl); +vuint16m4_t test_vmadd_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vmadd_vx_u16m4_tum(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl); +vuint16m4_t test_vmadd_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vmadd_vv_u16m8_tum(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl); +vuint16m8_t test_vmadd_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vmadd_vx_u16m8_tum(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl); +vuint16m8_t test_vmadd_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vmadd_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl); +vuint32mf2_t test_vmadd_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vmadd_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl); +vuint32mf2_t test_vmadd_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vmadd_vv_u32m1_tum(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl); +vuint32m1_t test_vmadd_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vmadd_vx_u32m1_tum(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl); +vuint32m1_t test_vmadd_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vmadd_vv_u32m2_tum(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl); +vuint32m2_t test_vmadd_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vmadd_vx_u32m2_tum(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl); +vuint32m2_t test_vmadd_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vmadd_vv_u32m4_tum(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl); +vuint32m4_t test_vmadd_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vmadd_vx_u32m4_tum(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl); +vuint32m4_t test_vmadd_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vmadd_vv_u32m8_tum(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl); +vuint32m8_t test_vmadd_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vmadd_vx_u32m8_tum(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl); +vuint32m8_t test_vmadd_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vmadd_vv_u64m1_tum(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl); +vuint64m1_t test_vmadd_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vmadd_vx_u64m1_tum(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl); +vuint64m1_t test_vmadd_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vmadd_vv_u64m2_tum(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl); +vuint64m2_t test_vmadd_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vmadd_vx_u64m2_tum(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl); +vuint64m2_t test_vmadd_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vmadd_vv_u64m4_tum(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl); +vuint64m4_t test_vmadd_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vmadd_vx_u64m4_tum(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl); +vuint64m4_t test_vmadd_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vmadd_vv_u64m8_tum(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, vs1, vs2, vl); +vuint64m8_t test_vmadd_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vmadd_vx_u64m8_tum(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vmadd_tum(mask, vd, rs1, vs2, vl); +vuint64m8_t test_vmadd_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vmadd_tum(vm, vd, rs1, vs2, vl); } -vint8mf8_t test_vmadd_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl); +vint8mf8_t test_vmadd_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, vs1, vs2, vl); } -vint8mf8_t test_vmadd_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl); +vint8mf8_t test_vmadd_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, rs1, vs2, vl); } -vint8mf4_t test_vmadd_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl); +vint8mf4_t test_vmadd_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, vs1, vs2, vl); } -vint8mf4_t test_vmadd_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl); +vint8mf4_t test_vmadd_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, rs1, vs2, vl); } -vint8mf2_t test_vmadd_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl); +vint8mf2_t test_vmadd_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, vs1, vs2, vl); } -vint8mf2_t test_vmadd_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl); +vint8mf2_t test_vmadd_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, rs1, vs2, vl); } -vint8m1_t test_vmadd_vv_i8m1_tumu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl); +vint8m1_t test_vmadd_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, vs1, vs2, vl); } -vint8m1_t test_vmadd_vx_i8m1_tumu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl); +vint8m1_t test_vmadd_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, rs1, vs2, vl); } -vint8m2_t test_vmadd_vv_i8m2_tumu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl); +vint8m2_t test_vmadd_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, vs1, vs2, vl); } -vint8m2_t test_vmadd_vx_i8m2_tumu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl); +vint8m2_t test_vmadd_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, rs1, vs2, vl); } -vint8m4_t test_vmadd_vv_i8m4_tumu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl); +vint8m4_t test_vmadd_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, vs1, vs2, vl); } -vint8m4_t test_vmadd_vx_i8m4_tumu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl); +vint8m4_t test_vmadd_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, rs1, vs2, vl); } -vint8m8_t test_vmadd_vv_i8m8_tumu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl); +vint8m8_t test_vmadd_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, vs1, vs2, vl); } -vint8m8_t test_vmadd_vx_i8m8_tumu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl); +vint8m8_t test_vmadd_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vmadd_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vmadd_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vmadd_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vmadd_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vmadd_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vmadd_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vmadd_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vmadd_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vmadd_vv_i16m1_tumu(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl); +vint16m1_t test_vmadd_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vmadd_vx_i16m1_tumu(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl); +vint16m1_t test_vmadd_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vmadd_vv_i16m2_tumu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl); +vint16m2_t test_vmadd_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vmadd_vx_i16m2_tumu(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl); +vint16m2_t test_vmadd_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vmadd_vv_i16m4_tumu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl); +vint16m4_t test_vmadd_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vmadd_vx_i16m4_tumu(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl); +vint16m4_t test_vmadd_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vmadd_vv_i16m8_tumu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl); +vint16m8_t test_vmadd_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vmadd_vx_i16m8_tumu(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl); +vint16m8_t test_vmadd_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vmadd_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vmadd_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vmadd_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vmadd_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vmadd_vv_i32m1_tumu(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl); +vint32m1_t test_vmadd_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vmadd_vx_i32m1_tumu(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl); +vint32m1_t test_vmadd_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vmadd_vv_i32m2_tumu(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl); +vint32m2_t test_vmadd_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vmadd_vx_i32m2_tumu(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl); +vint32m2_t test_vmadd_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vmadd_vv_i32m4_tumu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl); +vint32m4_t test_vmadd_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vmadd_vx_i32m4_tumu(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl); +vint32m4_t test_vmadd_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vmadd_vv_i32m8_tumu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl); +vint32m8_t test_vmadd_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vmadd_vx_i32m8_tumu(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl); +vint32m8_t test_vmadd_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vmadd_vv_i64m1_tumu(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl); +vint64m1_t test_vmadd_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vmadd_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl); +vint64m1_t test_vmadd_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vmadd_vv_i64m2_tumu(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl); +vint64m2_t test_vmadd_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vmadd_vx_i64m2_tumu(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl); +vint64m2_t test_vmadd_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vmadd_vv_i64m4_tumu(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl); +vint64m4_t test_vmadd_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vmadd_vx_i64m4_tumu(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl); +vint64m4_t test_vmadd_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vmadd_vv_i64m8_tumu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl); +vint64m8_t test_vmadd_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vmadd_vx_i64m8_tumu(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl); +vint64m8_t test_vmadd_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, rs1, vs2, vl); } -vuint8mf8_t test_vmadd_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl); +vuint8mf8_t test_vmadd_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, vs1, vs2, vl); } -vuint8mf8_t test_vmadd_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl); +vuint8mf8_t test_vmadd_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, rs1, vs2, vl); } -vuint8mf4_t test_vmadd_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl); +vuint8mf4_t test_vmadd_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, vs1, vs2, vl); } -vuint8mf4_t test_vmadd_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl); +vuint8mf4_t test_vmadd_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, rs1, vs2, vl); } -vuint8mf2_t test_vmadd_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl); +vuint8mf2_t test_vmadd_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, vs1, vs2, vl); } -vuint8mf2_t test_vmadd_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl); +vuint8mf2_t test_vmadd_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, rs1, vs2, vl); } -vuint8m1_t test_vmadd_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl); +vuint8m1_t test_vmadd_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, vs1, vs2, vl); } -vuint8m1_t test_vmadd_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl); +vuint8m1_t test_vmadd_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, rs1, vs2, vl); } -vuint8m2_t test_vmadd_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl); +vuint8m2_t test_vmadd_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, vs1, vs2, vl); } -vuint8m2_t test_vmadd_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl); +vuint8m2_t test_vmadd_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, rs1, vs2, vl); } -vuint8m4_t test_vmadd_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl); +vuint8m4_t test_vmadd_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, vs1, vs2, vl); } -vuint8m4_t test_vmadd_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl); +vuint8m4_t test_vmadd_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, rs1, vs2, vl); } -vuint8m8_t test_vmadd_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl); +vuint8m8_t test_vmadd_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, vs1, vs2, vl); } -vuint8m8_t test_vmadd_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl); +vuint8m8_t test_vmadd_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vmadd_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl); +vuint16mf4_t test_vmadd_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vmadd_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl); +vuint16mf4_t test_vmadd_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vmadd_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl); +vuint16mf2_t test_vmadd_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vmadd_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl); +vuint16mf2_t test_vmadd_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vmadd_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl); +vuint16m1_t test_vmadd_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vmadd_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl); +vuint16m1_t test_vmadd_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vmadd_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl); +vuint16m2_t test_vmadd_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vmadd_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl); +vuint16m2_t test_vmadd_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vmadd_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl); +vuint16m4_t test_vmadd_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vmadd_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl); +vuint16m4_t test_vmadd_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vmadd_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl); +vuint16m8_t test_vmadd_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vmadd_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl); +vuint16m8_t test_vmadd_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vmadd_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl); +vuint32mf2_t test_vmadd_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vmadd_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl); +vuint32mf2_t test_vmadd_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vmadd_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl); +vuint32m1_t test_vmadd_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vmadd_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl); +vuint32m1_t test_vmadd_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vmadd_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl); +vuint32m2_t test_vmadd_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vmadd_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl); +vuint32m2_t test_vmadd_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vmadd_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl); +vuint32m4_t test_vmadd_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vmadd_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl); +vuint32m4_t test_vmadd_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vmadd_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl); +vuint32m8_t test_vmadd_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vmadd_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl); +vuint32m8_t test_vmadd_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vmadd_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl); +vuint64m1_t test_vmadd_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vmadd_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl); +vuint64m1_t test_vmadd_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vmadd_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl); +vuint64m2_t test_vmadd_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vmadd_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl); +vuint64m2_t test_vmadd_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vmadd_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl); +vuint64m4_t test_vmadd_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vmadd_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl); +vuint64m4_t test_vmadd_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vmadd_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, vs1, vs2, vl); +vuint64m8_t test_vmadd_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vmadd_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vmadd_tumu(mask, vd, rs1, vs2, vl); +vuint64m8_t test_vmadd_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vmadd_tumu(vm, vd, rs1, vs2, vl); } -vint8mf8_t test_vmadd_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl); +vint8mf8_t test_vmadd_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, vs1, vs2, vl); } -vint8mf8_t test_vmadd_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl); +vint8mf8_t test_vmadd_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, rs1, vs2, vl); } -vint8mf4_t test_vmadd_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl); +vint8mf4_t test_vmadd_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, vs1, vs2, vl); } -vint8mf4_t test_vmadd_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl); +vint8mf4_t test_vmadd_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, rs1, vs2, vl); } -vint8mf2_t test_vmadd_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl); +vint8mf2_t test_vmadd_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, vs1, vs2, vl); } -vint8mf2_t test_vmadd_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl); +vint8mf2_t test_vmadd_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, rs1, vs2, vl); } -vint8m1_t test_vmadd_vv_i8m1_mu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl); +vint8m1_t test_vmadd_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, vs1, vs2, vl); } -vint8m1_t test_vmadd_vx_i8m1_mu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl); +vint8m1_t test_vmadd_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, rs1, vs2, vl); } -vint8m2_t test_vmadd_vv_i8m2_mu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl); +vint8m2_t test_vmadd_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, vs1, vs2, vl); } -vint8m2_t test_vmadd_vx_i8m2_mu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl); +vint8m2_t test_vmadd_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, rs1, vs2, vl); } -vint8m4_t test_vmadd_vv_i8m4_mu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl); +vint8m4_t test_vmadd_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, vs1, vs2, vl); } -vint8m4_t test_vmadd_vx_i8m4_mu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl); +vint8m4_t test_vmadd_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, rs1, vs2, vl); } -vint8m8_t test_vmadd_vv_i8m8_mu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl); +vint8m8_t test_vmadd_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, vs1, vs2, vl); } -vint8m8_t test_vmadd_vx_i8m8_mu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl); +vint8m8_t test_vmadd_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vmadd_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vmadd_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vmadd_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vmadd_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vmadd_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vmadd_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vmadd_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vmadd_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vmadd_vv_i16m1_mu(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl); +vint16m1_t test_vmadd_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vmadd_vx_i16m1_mu(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl); +vint16m1_t test_vmadd_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vmadd_vv_i16m2_mu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl); +vint16m2_t test_vmadd_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vmadd_vx_i16m2_mu(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl); +vint16m2_t test_vmadd_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vmadd_vv_i16m4_mu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl); +vint16m4_t test_vmadd_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vmadd_vx_i16m4_mu(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl); +vint16m4_t test_vmadd_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vmadd_vv_i16m8_mu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl); +vint16m8_t test_vmadd_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vmadd_vx_i16m8_mu(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl); +vint16m8_t test_vmadd_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vmadd_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vmadd_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vmadd_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vmadd_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vmadd_vv_i32m1_mu(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl); +vint32m1_t test_vmadd_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vmadd_vx_i32m1_mu(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl); +vint32m1_t test_vmadd_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vmadd_vv_i32m2_mu(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl); +vint32m2_t test_vmadd_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vmadd_vx_i32m2_mu(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl); +vint32m2_t test_vmadd_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vmadd_vv_i32m4_mu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl); +vint32m4_t test_vmadd_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vmadd_vx_i32m4_mu(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl); +vint32m4_t test_vmadd_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vmadd_vv_i32m8_mu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl); +vint32m8_t test_vmadd_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vmadd_vx_i32m8_mu(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl); +vint32m8_t test_vmadd_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vmadd_vv_i64m1_mu(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl); +vint64m1_t test_vmadd_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vmadd_vx_i64m1_mu(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl); +vint64m1_t test_vmadd_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vmadd_vv_i64m2_mu(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl); +vint64m2_t test_vmadd_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vmadd_vx_i64m2_mu(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl); +vint64m2_t test_vmadd_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vmadd_vv_i64m4_mu(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl); +vint64m4_t test_vmadd_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vmadd_vx_i64m4_mu(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl); +vint64m4_t test_vmadd_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vmadd_vv_i64m8_mu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl); +vint64m8_t test_vmadd_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vmadd_vx_i64m8_mu(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl); +vint64m8_t test_vmadd_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, rs1, vs2, vl); } -vuint8mf8_t test_vmadd_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl); +vuint8mf8_t test_vmadd_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, vs1, vs2, vl); } -vuint8mf8_t test_vmadd_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl); +vuint8mf8_t test_vmadd_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, rs1, vs2, vl); } -vuint8mf4_t test_vmadd_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl); +vuint8mf4_t test_vmadd_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, vs1, vs2, vl); } -vuint8mf4_t test_vmadd_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl); +vuint8mf4_t test_vmadd_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, rs1, vs2, vl); } -vuint8mf2_t test_vmadd_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl); +vuint8mf2_t test_vmadd_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, vs1, vs2, vl); } -vuint8mf2_t test_vmadd_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl); +vuint8mf2_t test_vmadd_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, rs1, vs2, vl); } -vuint8m1_t test_vmadd_vv_u8m1_mu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl); +vuint8m1_t test_vmadd_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, vs1, vs2, vl); } -vuint8m1_t test_vmadd_vx_u8m1_mu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl); +vuint8m1_t test_vmadd_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, rs1, vs2, vl); } -vuint8m2_t test_vmadd_vv_u8m2_mu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl); +vuint8m2_t test_vmadd_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, vs1, vs2, vl); } -vuint8m2_t test_vmadd_vx_u8m2_mu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl); +vuint8m2_t test_vmadd_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, rs1, vs2, vl); } -vuint8m4_t test_vmadd_vv_u8m4_mu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl); +vuint8m4_t test_vmadd_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, vs1, vs2, vl); } -vuint8m4_t test_vmadd_vx_u8m4_mu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl); +vuint8m4_t test_vmadd_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, rs1, vs2, vl); } -vuint8m8_t test_vmadd_vv_u8m8_mu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl); +vuint8m8_t test_vmadd_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, vs1, vs2, vl); } -vuint8m8_t test_vmadd_vx_u8m8_mu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl); +vuint8m8_t test_vmadd_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vmadd_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl); +vuint16mf4_t test_vmadd_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vmadd_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl); +vuint16mf4_t test_vmadd_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vmadd_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl); +vuint16mf2_t test_vmadd_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vmadd_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl); +vuint16mf2_t test_vmadd_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vmadd_vv_u16m1_mu(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl); +vuint16m1_t test_vmadd_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vmadd_vx_u16m1_mu(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl); +vuint16m1_t test_vmadd_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vmadd_vv_u16m2_mu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl); +vuint16m2_t test_vmadd_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vmadd_vx_u16m2_mu(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl); +vuint16m2_t test_vmadd_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vmadd_vv_u16m4_mu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl); +vuint16m4_t test_vmadd_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vmadd_vx_u16m4_mu(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl); +vuint16m4_t test_vmadd_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vmadd_vv_u16m8_mu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl); +vuint16m8_t test_vmadd_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vmadd_vx_u16m8_mu(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl); +vuint16m8_t test_vmadd_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vmadd_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl); +vuint32mf2_t test_vmadd_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vmadd_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl); +vuint32mf2_t test_vmadd_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vmadd_vv_u32m1_mu(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl); +vuint32m1_t test_vmadd_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vmadd_vx_u32m1_mu(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl); +vuint32m1_t test_vmadd_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vmadd_vv_u32m2_mu(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl); +vuint32m2_t test_vmadd_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vmadd_vx_u32m2_mu(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl); +vuint32m2_t test_vmadd_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vmadd_vv_u32m4_mu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl); +vuint32m4_t test_vmadd_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vmadd_vx_u32m4_mu(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl); +vuint32m4_t test_vmadd_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vmadd_vv_u32m8_mu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl); +vuint32m8_t test_vmadd_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vmadd_vx_u32m8_mu(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl); +vuint32m8_t test_vmadd_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vmadd_vv_u64m1_mu(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl); +vuint64m1_t test_vmadd_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vmadd_vx_u64m1_mu(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl); +vuint64m1_t test_vmadd_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vmadd_vv_u64m2_mu(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl); +vuint64m2_t test_vmadd_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vmadd_vx_u64m2_mu(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl); +vuint64m2_t test_vmadd_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vmadd_vv_u64m4_mu(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl); +vuint64m4_t test_vmadd_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vmadd_vx_u64m4_mu(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl); +vuint64m4_t test_vmadd_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vmadd_vv_u64m8_mu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, vs1, vs2, vl); +vuint64m8_t test_vmadd_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vmadd_vx_u64m8_mu(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vmadd_mu(mask, vd, rs1, vs2, vl); +vuint64m8_t test_vmadd_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vmadd_mu(vm, vd, rs1, vs2, vl); } /* { dg-final { scan-assembler-times {vma[c-d][c-d]\.[ivxfswum.]+\s+} 352 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vmax.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vmax.c index 3a7c5557a..772d8b5ea 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vmax.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vmax.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vmax_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmax_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vmax_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmax_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vmax_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vmax_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vmax_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmax_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vmax_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmax_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vmax_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vmax_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vmax_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmax_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vmax_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmax_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vmax_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vmax_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vmax_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmax_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vmax_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmax_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vmax_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vmax_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vmax_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmax_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vmax_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmax_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vmax_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vmax_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vmax_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmax_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vmax_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmax_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vmax_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vmax_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vmax_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmax_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vmax_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmax_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vmax_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vmax_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vmax_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmax_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vmax_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmax_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vmax_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vmax_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vmax_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmax_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vmax_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmax_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vmax_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vmax_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vmax_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmax_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vmax_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmax_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vmax_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vmax_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vmax_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmax_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vmax_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmax_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vmax_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vmax_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vmax_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmax_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vmax_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmax_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vmax_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vmax_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vmax_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmax_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vmax_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmax_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vmax_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vmax_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vmax_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmax_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vmax_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmax_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vmax_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vmax_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vmax_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmax_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vmax_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmax_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vmax_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vmax_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vmax_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmax_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vmax_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmax_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vmax_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vmax_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vmax_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmax_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vmax_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmax_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vmax_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vmax_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vmax_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmax_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vmax_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmax_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vmax_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vmax_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vmax_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmax_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vmax_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmax_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vmax_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmax_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vmax_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vmax_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmax_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vmax_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmax_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vmax_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmax_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vmax_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vmax_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmax_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vmax_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmax_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vmax_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmax_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vmax_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vmax_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmax_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vmax_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmax_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vmax_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmax_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vmax_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vmax_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmax_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmax_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vmax_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmax_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vmax_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmax_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmax_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vmax_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmax_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vmax_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmax_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmax_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vmax_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmax_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vmax_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmax_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmax_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vmax_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmax_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vmax_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmax_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmax_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vmax_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmax_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vmax_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmax_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmax_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vmax_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmax_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vmax_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmax_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmax_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vmax_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmax_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vmax_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmax_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmax_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vmax_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmax_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vmax_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmax_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmax_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vmax_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmax_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vmax_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmax_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmax_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vmax_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmax_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vmax_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmax_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmax_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vmax_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmax_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vmax_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmax_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmax_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vmax_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmax_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vmax_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmax_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmax_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vmax_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmax_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vmax_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmax_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmax_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vmax_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmax_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vmax_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmax_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmax_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vmax_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmax_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vmax_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmax_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmax_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vmax_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmax_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vmax_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmax_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmax_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vmax_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmax_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vmax_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmax_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmax_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vmax_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmax_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vmax_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmax_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmax_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vmax_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmax_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vmax_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmax_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmax_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vmax_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmax_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vmax_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmax_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmax_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vmax_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmax_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vmax_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmax_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmax_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vmax_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmax_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmax_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vmax_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmax_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmax_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vmax_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmax_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vmax_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmax_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmax_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vmax_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmax_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vmax_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmax_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmax_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vmax_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmax_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vmax_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmax_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmax_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vmax_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmax_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vmax_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmax_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmax_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vmax_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmax_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vmax_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmax_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmax_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vmax_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmax_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vmax_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmax_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmax_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vmax_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmax_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vmax_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmax_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmax_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vmax_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmax_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vmax_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmax_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmax_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vmax_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmax_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vmax_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmax_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmax_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vmax_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmax_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vmax_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmax_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmax_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vmax_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmax_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vmax_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmax_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmax_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vmax_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmax_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vmax_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmax_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmax_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vmax_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmax_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vmax_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmax_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmax_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vmax_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmax_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vmax_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmax_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmax_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vmax_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmax_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vmax_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmax_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmax_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vmax_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmax_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vmax_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmax_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmax_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vmax_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmax_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vmax_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmax_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmax_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vmax_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmax_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vmax_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmax_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmax_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vmax_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmax_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vmax_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmax_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmax_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vmax_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmax_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vmax_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmax_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmax_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vmax_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmax_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vmax_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmax_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmax_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vmax_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmax_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmax_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vmax_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmax_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmax_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vmax_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmax_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vmax_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmax_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmax_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vmax_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmax_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vmax_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmax_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmax_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vmax_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmax_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vmax_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmax_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmax_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vmax_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmax_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vmax_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmax_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmax_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vmax_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmax_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vmax_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmax_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmax_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vmax_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmax_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vmax_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmax_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmax_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vmax_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmax_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmax_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vmax_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmax_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmax_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vmax_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmax_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vmax_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmax_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmax_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vmax_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmax_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vmax_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmax_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmax_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vmax_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmax_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vmax_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmax_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmax_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vmax_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmax_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vmax_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmax_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmax_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vmax_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmax_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vmax_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmax_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmax_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vmax_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmax_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmax_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vmax_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmax_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmax_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vmax_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmax_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vmax_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmax_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmax_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vmax_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmax_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vmax_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmax_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmax_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vmax_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmax_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vmax_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmax_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmax_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vmax_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmax_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vmax_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmax_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmax_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vmax_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmax_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmax_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vmax_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmax_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmax_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vmax_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmax_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vmax_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmax_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmax_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vmax_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmax_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vmax_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmax_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmax_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vmax_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmax_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vmax_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmax_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmax_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vmax_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmax_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmax_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmax_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vmax\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vmaxu.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vmaxu.c index 7a489ea37..410d760c7 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vmaxu.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vmaxu.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vmaxu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmaxu_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vmaxu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmaxu_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vmaxu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vmaxu_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vmaxu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmaxu_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vmaxu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmaxu_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vmaxu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vmaxu_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vmaxu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmaxu_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vmaxu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmaxu_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vmaxu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vmaxu_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vmaxu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmaxu_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vmaxu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmaxu_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vmaxu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vmaxu_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vmaxu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmaxu_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vmaxu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmaxu_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vmaxu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vmaxu_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vmaxu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmaxu_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vmaxu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmaxu_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vmaxu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vmaxu_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vmaxu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmaxu_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vmaxu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmaxu_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vmaxu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vmaxu_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vmaxu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmaxu_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vmaxu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmaxu_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vmaxu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vmaxu_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vmaxu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmaxu_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vmaxu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmaxu_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vmaxu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vmaxu_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vmaxu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmaxu_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vmaxu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmaxu_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vmaxu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vmaxu_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vmaxu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmaxu_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vmaxu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmaxu_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vmaxu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vmaxu_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vmaxu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmaxu_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vmaxu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmaxu_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vmaxu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vmaxu_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vmaxu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmaxu_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vmaxu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmaxu_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vmaxu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vmaxu_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vmaxu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmaxu_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vmaxu_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmaxu_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vmaxu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vmaxu_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vmaxu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmaxu_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vmaxu_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmaxu_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vmaxu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vmaxu_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vmaxu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmaxu_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vmaxu_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmaxu_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vmaxu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vmaxu_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vmaxu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmaxu_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vmaxu_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmaxu_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vmaxu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vmaxu_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vmaxu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmaxu_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vmaxu_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmaxu_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vmaxu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vmaxu_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vmaxu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmaxu_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vmaxu_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmaxu_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vmaxu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vmaxu_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vmaxu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmaxu_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vmaxu_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmaxu_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vmaxu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vmaxu_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vmaxu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmaxu_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vmaxu_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmaxu_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vmaxu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vmaxu_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vmaxu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmaxu_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vmaxu_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmaxu_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vmaxu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vmaxu_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vmaxu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmaxu_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmaxu_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vmaxu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmaxu_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vmaxu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmaxu_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmaxu_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vmaxu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmaxu_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vmaxu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmaxu_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmaxu_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vmaxu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmaxu_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vmaxu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmaxu_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmaxu_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vmaxu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmaxu_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vmaxu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmaxu_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmaxu_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vmaxu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmaxu_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vmaxu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmaxu_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmaxu_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vmaxu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmaxu_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vmaxu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmaxu_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmaxu_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vmaxu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmaxu_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vmaxu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmaxu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmaxu_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vmaxu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmaxu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vmaxu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmaxu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmaxu_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vmaxu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmaxu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vmaxu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmaxu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmaxu_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vmaxu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmaxu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vmaxu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmaxu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmaxu_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vmaxu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmaxu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vmaxu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmaxu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmaxu_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vmaxu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmaxu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vmaxu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmaxu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmaxu_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vmaxu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmaxu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vmaxu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmaxu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmaxu_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vmaxu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmaxu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vmaxu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmaxu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmaxu_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vmaxu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmaxu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vmaxu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmaxu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmaxu_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vmaxu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmaxu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vmaxu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmaxu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmaxu_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vmaxu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmaxu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vmaxu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmaxu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmaxu_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vmaxu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmaxu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vmaxu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmaxu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmaxu_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vmaxu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmaxu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vmaxu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmaxu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmaxu_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vmaxu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmaxu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vmaxu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmaxu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmaxu_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vmaxu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmaxu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vmaxu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmaxu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmaxu_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vmaxu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmaxu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vmaxu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmaxu_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmaxu_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vmaxu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmaxu_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vmaxu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmaxu_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmaxu_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vmaxu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmaxu_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vmaxu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmaxu_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmaxu_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vmaxu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmaxu_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vmaxu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmaxu_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmaxu_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vmaxu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmaxu_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vmaxu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmaxu_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmaxu_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vmaxu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmaxu_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vmaxu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmaxu_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmaxu_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vmaxu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmaxu_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vmaxu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmaxu_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmaxu_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vmaxu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmaxu_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vmaxu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmaxu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmaxu_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vmaxu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmaxu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vmaxu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmaxu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmaxu_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vmaxu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmaxu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vmaxu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmaxu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmaxu_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vmaxu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmaxu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vmaxu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmaxu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmaxu_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vmaxu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmaxu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vmaxu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmaxu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmaxu_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vmaxu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmaxu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vmaxu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmaxu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmaxu_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vmaxu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmaxu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vmaxu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmaxu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmaxu_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vmaxu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmaxu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vmaxu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmaxu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmaxu_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vmaxu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmaxu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vmaxu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmaxu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmaxu_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vmaxu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmaxu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vmaxu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmaxu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmaxu_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vmaxu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmaxu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vmaxu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmaxu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmaxu_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vmaxu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmaxu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vmaxu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmaxu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmaxu_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vmaxu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmaxu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vmaxu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmaxu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmaxu_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vmaxu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmaxu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vmaxu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmaxu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmaxu_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vmaxu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmaxu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vmaxu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmaxu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmaxu_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vmaxu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmaxu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vmaxu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmaxu_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmaxu_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vmaxu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmaxu_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vmaxu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmaxu_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmaxu_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vmaxu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmaxu_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vmaxu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmaxu_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmaxu_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vmaxu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmaxu_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vmaxu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmaxu_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmaxu_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vmaxu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmaxu_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vmaxu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmaxu_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmaxu_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vmaxu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmaxu_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vmaxu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmaxu_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmaxu_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vmaxu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmaxu_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vmaxu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmaxu_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmaxu_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vmaxu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmaxu_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmaxu_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vmaxu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmaxu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmaxu_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vmaxu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmaxu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vmaxu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmaxu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmaxu_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vmaxu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmaxu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vmaxu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmaxu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmaxu_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vmaxu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmaxu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vmaxu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmaxu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmaxu_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vmaxu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmaxu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vmaxu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmaxu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmaxu_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vmaxu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmaxu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vmaxu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmaxu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmaxu_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vmaxu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmaxu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmaxu_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vmaxu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmaxu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmaxu_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vmaxu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmaxu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vmaxu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmaxu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmaxu_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vmaxu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmaxu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vmaxu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmaxu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmaxu_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vmaxu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmaxu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vmaxu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmaxu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmaxu_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vmaxu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmaxu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vmaxu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmaxu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmaxu_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vmaxu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmaxu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmaxu_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vmaxu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmaxu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmaxu_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vmaxu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmaxu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vmaxu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmaxu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmaxu_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vmaxu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmaxu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vmaxu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmaxu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmaxu_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vmaxu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmaxu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vmaxu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmaxu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmaxu_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vmaxu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmaxu_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmaxu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmaxu_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vmaxu\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vmerge.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vmerge.c index 6efb8dc0d..fd62dc8b2 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vmerge.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vmerge.c @@ -6,416 +6,416 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vmerge_vvm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vint8mf8_t test_vmerge_vvm_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vint8mf8_t test_vmerge_vxm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vint8mf8_t test_vmerge_vxm_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, rs1, v0, vl); } -vint8mf4_t test_vmerge_vvm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vint8mf4_t test_vmerge_vvm_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vint8mf4_t test_vmerge_vxm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vint8mf4_t test_vmerge_vxm_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, rs1, v0, vl); } -vint8mf2_t test_vmerge_vvm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vint8mf2_t test_vmerge_vvm_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vint8mf2_t test_vmerge_vxm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vint8mf2_t test_vmerge_vxm_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, rs1, v0, vl); } -vint8m1_t test_vmerge_vvm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vint8m1_t test_vmerge_vvm_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vint8m1_t test_vmerge_vxm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vint8m1_t test_vmerge_vxm_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, rs1, v0, vl); } -vint8m2_t test_vmerge_vvm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vint8m2_t test_vmerge_vvm_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vint8m2_t test_vmerge_vxm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vint8m2_t test_vmerge_vxm_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, rs1, v0, vl); } -vint8m4_t test_vmerge_vvm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, vbool2_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vint8m4_t test_vmerge_vvm_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vint8m4_t test_vmerge_vxm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, vbool2_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vint8m4_t test_vmerge_vxm_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, rs1, v0, vl); } -vint8m8_t test_vmerge_vvm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, vbool1_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vint8m8_t test_vmerge_vvm_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, vbool1_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vint8m8_t test_vmerge_vxm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, vbool1_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vint8m8_t test_vmerge_vxm_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, vbool1_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, rs1, v0, vl); } -vint16mf4_t test_vmerge_vvm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vint16mf4_t test_vmerge_vvm_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vint16mf4_t test_vmerge_vxm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vint16mf4_t test_vmerge_vxm_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, rs1, v0, vl); } -vint16mf2_t test_vmerge_vvm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vint16mf2_t test_vmerge_vvm_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vint16mf2_t test_vmerge_vxm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vint16mf2_t test_vmerge_vxm_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, rs1, v0, vl); } -vint16m1_t test_vmerge_vvm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vint16m1_t test_vmerge_vvm_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vint16m1_t test_vmerge_vxm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vint16m1_t test_vmerge_vxm_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, rs1, v0, vl); } -vint16m2_t test_vmerge_vvm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vint16m2_t test_vmerge_vvm_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vint16m2_t test_vmerge_vxm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vint16m2_t test_vmerge_vxm_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, rs1, v0, vl); } -vint16m4_t test_vmerge_vvm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vint16m4_t test_vmerge_vvm_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vint16m4_t test_vmerge_vxm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vint16m4_t test_vmerge_vxm_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, rs1, v0, vl); } -vint16m8_t test_vmerge_vvm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, vbool2_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vint16m8_t test_vmerge_vvm_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vint16m8_t test_vmerge_vxm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, vbool2_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vint16m8_t test_vmerge_vxm_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, rs1, v0, vl); } -vint32mf2_t test_vmerge_vvm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vint32mf2_t test_vmerge_vvm_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vint32mf2_t test_vmerge_vxm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vint32mf2_t test_vmerge_vxm_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, rs1, v0, vl); } -vint32m1_t test_vmerge_vvm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vint32m1_t test_vmerge_vvm_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vint32m1_t test_vmerge_vxm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vint32m1_t test_vmerge_vxm_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, rs1, v0, vl); } -vint32m2_t test_vmerge_vvm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vint32m2_t test_vmerge_vvm_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vint32m2_t test_vmerge_vxm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vint32m2_t test_vmerge_vxm_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, rs1, v0, vl); } -vint32m4_t test_vmerge_vvm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vint32m4_t test_vmerge_vvm_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vint32m4_t test_vmerge_vxm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vint32m4_t test_vmerge_vxm_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, rs1, v0, vl); } -vint32m8_t test_vmerge_vvm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vint32m8_t test_vmerge_vvm_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vint32m8_t test_vmerge_vxm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vint32m8_t test_vmerge_vxm_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, rs1, v0, vl); } -vint64m1_t test_vmerge_vvm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vint64m1_t test_vmerge_vvm_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vint64m1_t test_vmerge_vxm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vint64m1_t test_vmerge_vxm_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, rs1, v0, vl); } -vint64m2_t test_vmerge_vvm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vint64m2_t test_vmerge_vvm_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vint64m2_t test_vmerge_vxm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vint64m2_t test_vmerge_vxm_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, rs1, v0, vl); } -vint64m4_t test_vmerge_vvm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vint64m4_t test_vmerge_vvm_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vint64m4_t test_vmerge_vxm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vint64m4_t test_vmerge_vxm_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, rs1, v0, vl); } -vint64m8_t test_vmerge_vvm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vint64m8_t test_vmerge_vvm_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vint64m8_t test_vmerge_vxm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vint64m8_t test_vmerge_vxm_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, rs1, v0, vl); } -vuint8mf8_t test_vmerge_vvm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vuint8mf8_t test_vmerge_vvm_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vuint8mf8_t test_vmerge_vxm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vuint8mf8_t test_vmerge_vxm_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, rs1, v0, vl); } -vuint8mf4_t test_vmerge_vvm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vuint8mf4_t test_vmerge_vvm_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vuint8mf4_t test_vmerge_vxm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vuint8mf4_t test_vmerge_vxm_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, rs1, v0, vl); } -vuint8mf2_t test_vmerge_vvm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vuint8mf2_t test_vmerge_vvm_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vuint8mf2_t test_vmerge_vxm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vuint8mf2_t test_vmerge_vxm_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, rs1, v0, vl); } -vuint8m1_t test_vmerge_vvm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vuint8m1_t test_vmerge_vvm_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vuint8m1_t test_vmerge_vxm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vuint8m1_t test_vmerge_vxm_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, rs1, v0, vl); } -vuint8m2_t test_vmerge_vvm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vuint8m2_t test_vmerge_vvm_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vuint8m2_t test_vmerge_vxm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vuint8m2_t test_vmerge_vxm_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, rs1, v0, vl); } -vuint8m4_t test_vmerge_vvm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, vbool2_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vuint8m4_t test_vmerge_vvm_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vuint8m4_t test_vmerge_vxm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, vbool2_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vuint8m4_t test_vmerge_vxm_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, rs1, v0, vl); } -vuint8m8_t test_vmerge_vvm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, vbool1_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vuint8m8_t test_vmerge_vvm_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, vbool1_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vuint8m8_t test_vmerge_vxm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, vbool1_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vuint8m8_t test_vmerge_vxm_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, vbool1_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, rs1, v0, vl); } -vuint16mf4_t test_vmerge_vvm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vuint16mf4_t test_vmerge_vvm_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vuint16mf4_t test_vmerge_vxm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vuint16mf4_t test_vmerge_vxm_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, rs1, v0, vl); } -vuint16mf2_t test_vmerge_vvm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vuint16mf2_t test_vmerge_vvm_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vuint16mf2_t test_vmerge_vxm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vuint16mf2_t test_vmerge_vxm_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, rs1, v0, vl); } -vuint16m1_t test_vmerge_vvm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vuint16m1_t test_vmerge_vvm_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vuint16m1_t test_vmerge_vxm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vuint16m1_t test_vmerge_vxm_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, rs1, v0, vl); } -vuint16m2_t test_vmerge_vvm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vuint16m2_t test_vmerge_vvm_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vuint16m2_t test_vmerge_vxm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vuint16m2_t test_vmerge_vxm_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, rs1, v0, vl); } -vuint16m4_t test_vmerge_vvm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vuint16m4_t test_vmerge_vvm_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vuint16m4_t test_vmerge_vxm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vuint16m4_t test_vmerge_vxm_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, rs1, v0, vl); } -vuint16m8_t test_vmerge_vvm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, vbool2_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vuint16m8_t test_vmerge_vvm_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vuint16m8_t test_vmerge_vxm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, vbool2_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vuint16m8_t test_vmerge_vxm_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, rs1, v0, vl); } -vuint32mf2_t test_vmerge_vvm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vuint32mf2_t test_vmerge_vvm_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vuint32mf2_t test_vmerge_vxm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vuint32mf2_t test_vmerge_vxm_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, rs1, v0, vl); } -vuint32m1_t test_vmerge_vvm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vuint32m1_t test_vmerge_vvm_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vuint32m1_t test_vmerge_vxm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vuint32m1_t test_vmerge_vxm_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, rs1, v0, vl); } -vuint32m2_t test_vmerge_vvm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vuint32m2_t test_vmerge_vvm_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vuint32m2_t test_vmerge_vxm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vuint32m2_t test_vmerge_vxm_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, rs1, v0, vl); } -vuint32m4_t test_vmerge_vvm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vuint32m4_t test_vmerge_vvm_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vuint32m4_t test_vmerge_vxm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vuint32m4_t test_vmerge_vxm_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, rs1, v0, vl); } -vuint32m8_t test_vmerge_vvm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vuint32m8_t test_vmerge_vvm_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vuint32m8_t test_vmerge_vxm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vuint32m8_t test_vmerge_vxm_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, rs1, v0, vl); } -vuint64m1_t test_vmerge_vvm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vuint64m1_t test_vmerge_vvm_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vuint64m1_t test_vmerge_vxm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vuint64m1_t test_vmerge_vxm_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, rs1, v0, vl); } -vuint64m2_t test_vmerge_vvm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vuint64m2_t test_vmerge_vvm_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vuint64m2_t test_vmerge_vxm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vuint64m2_t test_vmerge_vxm_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, rs1, v0, vl); } -vuint64m4_t test_vmerge_vvm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vuint64m4_t test_vmerge_vvm_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vuint64m4_t test_vmerge_vxm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vuint64m4_t test_vmerge_vxm_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, rs1, v0, vl); } -vuint64m8_t test_vmerge_vvm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vuint64m8_t test_vmerge_vvm_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vuint64m8_t test_vmerge_vxm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vuint64m8_t test_vmerge_vxm_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, rs1, v0, vl); } -vfloat16mf4_t test_vmerge_vvm_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vfloat16mf4_t test_vmerge_vvm_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vfloat16mf2_t test_vmerge_vvm_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vfloat16mf2_t test_vmerge_vvm_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vfloat16m1_t test_vmerge_vvm_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vfloat16m1_t test_vmerge_vvm_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vfloat16m2_t test_vmerge_vvm_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vfloat16m2_t test_vmerge_vvm_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vfloat16m4_t test_vmerge_vvm_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vfloat16m4_t test_vmerge_vvm_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vfloat16m8_t test_vmerge_vvm_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, vbool2_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vfloat16m8_t test_vmerge_vvm_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vfloat32mf2_t test_vmerge_vvm_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vfloat32mf2_t test_vmerge_vvm_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vfloat32m1_t test_vmerge_vvm_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vfloat32m1_t test_vmerge_vvm_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vfloat32m2_t test_vmerge_vvm_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vfloat32m2_t test_vmerge_vvm_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vfloat32m4_t test_vmerge_vvm_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vfloat32m4_t test_vmerge_vvm_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vfloat32m8_t test_vmerge_vvm_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, vbool4_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vfloat32m8_t test_vmerge_vvm_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vfloat64m1_t test_vmerge_vvm_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, vbool64_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vfloat64m1_t test_vmerge_vvm_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vfloat64m2_t test_vmerge_vvm_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, vbool32_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vfloat64m2_t test_vmerge_vvm_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vfloat64m4_t test_vmerge_vvm_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, vbool16_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vfloat64m4_t test_vmerge_vvm_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } -vfloat64m8_t test_vmerge_vvm_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, vbool8_t mask, size_t vl) { - return __riscv_vmerge_tu(maskedoff, op1, op2, mask, vl); +vfloat64m8_t test_vmerge_vvm_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); } /* { dg-final { scan-assembler-times {vmerge\.[ivxfswum.]+\s+} 103 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vmfeq.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vmfeq.c index bf232e504..8650ad69d 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vmfeq.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vmfeq.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmfeq_vv_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfeq_vv_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vmfeq_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmfeq_vf_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfeq_vf_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfeq_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmfeq_vv_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfeq_vv_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vmfeq_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmfeq_vf_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfeq_vf_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfeq_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmfeq_vv_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfeq_vv_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vmfeq_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmfeq_vf_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfeq_vf_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfeq_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmfeq_vv_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfeq_vv_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vmfeq_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmfeq_vf_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfeq_vf_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfeq_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmfeq_vv_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfeq_vv_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vmfeq_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmfeq_vf_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfeq_vf_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfeq_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmfeq_vv_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmfeq_vv_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vmfeq_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmfeq_vf_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmfeq_vf_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfeq_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmfeq_vv_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfeq_vv_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vmfeq_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmfeq_vf_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfeq_vf_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfeq_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmfeq_vv_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfeq_vv_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vmfeq_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmfeq_vf_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfeq_vf_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfeq_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmfeq_vv_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfeq_vv_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vmfeq_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmfeq_vf_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfeq_vf_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfeq_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmfeq_vv_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfeq_vv_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vmfeq_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmfeq_vf_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfeq_vf_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfeq_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmfeq_vv_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfeq_vv_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vmfeq_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmfeq_vf_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfeq_vf_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfeq_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmfeq_vv_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfeq_vv_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vmfeq_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmfeq_vf_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfeq_vf_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfeq_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmfeq_vv_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfeq_vv_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vmfeq_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmfeq_vf_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfeq_vf_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfeq_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmfeq_vv_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfeq_vv_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vmfeq_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmfeq_vf_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfeq_vf_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfeq_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmfeq_vv_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfeq_vv_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vmfeq_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmfeq_vf_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfeq_vf_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfeq_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmfeq\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vmfge.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vmfge.c index 3dba0de55..6a24eed26 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vmfge.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vmfge.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmfge_vv_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfge_vv_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vmfge_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmfge_vf_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfge_vf_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfge_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmfge_vv_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfge_vv_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vmfge_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmfge_vf_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfge_vf_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfge_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmfge_vv_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfge_vv_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vmfge_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmfge_vf_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfge_vf_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfge_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmfge_vv_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfge_vv_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vmfge_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmfge_vf_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfge_vf_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfge_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmfge_vv_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfge_vv_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vmfge_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmfge_vf_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfge_vf_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfge_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmfge_vv_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmfge_vv_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vmfge_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmfge_vf_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmfge_vf_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfge_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmfge_vv_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfge_vv_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vmfge_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmfge_vf_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfge_vf_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfge_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmfge_vv_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfge_vv_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vmfge_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmfge_vf_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfge_vf_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfge_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmfge_vv_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfge_vv_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vmfge_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmfge_vf_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfge_vf_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfge_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmfge_vv_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfge_vv_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vmfge_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmfge_vf_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfge_vf_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfge_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmfge_vv_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfge_vv_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vmfge_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmfge_vf_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfge_vf_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfge_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmfge_vv_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfge_vv_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vmfge_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmfge_vf_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfge_vf_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfge_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmfge_vv_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfge_vv_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vmfge_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmfge_vf_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfge_vf_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfge_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmfge_vv_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfge_vv_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vmfge_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmfge_vf_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfge_vf_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfge_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmfge_vv_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfge_vv_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vmfge_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmfge_vf_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfge_vf_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfge_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmfge\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vmfgt.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vmfgt.c index 3afaa9159..38e723080 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vmfgt.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vmfgt.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmfgt_vv_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfgt_vv_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vmfgt_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmfgt_vf_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfgt_vf_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfgt_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmfgt_vv_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfgt_vv_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vmfgt_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmfgt_vf_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfgt_vf_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfgt_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmfgt_vv_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfgt_vv_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vmfgt_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmfgt_vf_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfgt_vf_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfgt_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmfgt_vv_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfgt_vv_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vmfgt_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmfgt_vf_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfgt_vf_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfgt_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmfgt_vv_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfgt_vv_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vmfgt_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmfgt_vf_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfgt_vf_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfgt_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmfgt_vv_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmfgt_vv_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vmfgt_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmfgt_vf_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmfgt_vf_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfgt_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmfgt_vv_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfgt_vv_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vmfgt_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmfgt_vf_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfgt_vf_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfgt_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmfgt_vv_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfgt_vv_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vmfgt_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmfgt_vf_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfgt_vf_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfgt_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmfgt_vv_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfgt_vv_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vmfgt_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmfgt_vf_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfgt_vf_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfgt_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmfgt_vv_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfgt_vv_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vmfgt_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmfgt_vf_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfgt_vf_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfgt_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmfgt_vv_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfgt_vv_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vmfgt_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmfgt_vf_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfgt_vf_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfgt_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmfgt_vv_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfgt_vv_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vmfgt_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmfgt_vf_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfgt_vf_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfgt_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmfgt_vv_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfgt_vv_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vmfgt_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmfgt_vf_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfgt_vf_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfgt_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmfgt_vv_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfgt_vv_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vmfgt_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmfgt_vf_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfgt_vf_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfgt_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmfgt_vv_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfgt_vv_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vmfgt_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmfgt_vf_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfgt_vf_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfgt_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmfgt\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vmfle.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vmfle.c index 5aabf3cb2..0325867eb 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vmfle.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vmfle.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmfle_vv_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfle_vv_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vmfle_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmfle_vf_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfle_vf_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfle_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmfle_vv_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfle_vv_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vmfle_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmfle_vf_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfle_vf_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfle_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmfle_vv_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfle_vv_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vmfle_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmfle_vf_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfle_vf_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfle_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmfle_vv_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfle_vv_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vmfle_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmfle_vf_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfle_vf_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfle_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmfle_vv_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfle_vv_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vmfle_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmfle_vf_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfle_vf_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfle_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmfle_vv_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmfle_vv_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vmfle_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmfle_vf_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmfle_vf_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfle_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmfle_vv_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfle_vv_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vmfle_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmfle_vf_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfle_vf_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfle_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmfle_vv_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfle_vv_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vmfle_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmfle_vf_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfle_vf_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfle_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmfle_vv_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfle_vv_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vmfle_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmfle_vf_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfle_vf_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfle_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmfle_vv_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfle_vv_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vmfle_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmfle_vf_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfle_vf_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfle_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmfle_vv_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfle_vv_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vmfle_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmfle_vf_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfle_vf_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfle_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmfle_vv_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfle_vv_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vmfle_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmfle_vf_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfle_vf_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfle_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmfle_vv_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfle_vv_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vmfle_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmfle_vf_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfle_vf_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfle_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmfle_vv_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfle_vv_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vmfle_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmfle_vf_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfle_vf_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfle_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmfle_vv_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfle_vv_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vmfle_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmfle_vf_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfle_vf_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfle_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmfle\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vmflt.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vmflt.c index fab8aef5c..ff700af8b 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vmflt.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vmflt.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmflt_vv_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmflt_vv_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vmflt_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmflt_vf_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmflt_vf_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmflt_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmflt_vv_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmflt_vv_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vmflt_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmflt_vf_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmflt_vf_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmflt_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmflt_vv_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmflt_vv_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vmflt_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmflt_vf_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmflt_vf_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmflt_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmflt_vv_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmflt_vv_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vmflt_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmflt_vf_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmflt_vf_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmflt_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmflt_vv_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmflt_vv_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vmflt_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmflt_vf_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmflt_vf_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmflt_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmflt_vv_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmflt_vv_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vmflt_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmflt_vf_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmflt_vf_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmflt_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmflt_vv_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmflt_vv_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vmflt_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmflt_vf_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmflt_vf_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmflt_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmflt_vv_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmflt_vv_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vmflt_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmflt_vf_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmflt_vf_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmflt_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmflt_vv_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmflt_vv_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vmflt_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmflt_vf_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmflt_vf_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmflt_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmflt_vv_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmflt_vv_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vmflt_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmflt_vf_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmflt_vf_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmflt_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmflt_vv_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmflt_vv_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vmflt_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmflt_vf_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmflt_vf_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmflt_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmflt_vv_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmflt_vv_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vmflt_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmflt_vf_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmflt_vf_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmflt_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmflt_vv_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmflt_vv_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vmflt_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmflt_vf_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmflt_vf_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmflt_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmflt_vv_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmflt_vv_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vmflt_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmflt_vf_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmflt_vf_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmflt_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmflt_vv_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmflt_vv_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vmflt_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmflt_vf_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmflt_vf_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmflt_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmflt\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vmfne.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vmfne.c index 9d3d84c39..601ee5ae7 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vmfne.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vmfne.c @@ -6,124 +6,124 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmfne_vv_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfne_vv_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vmfne_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmfne_vf_f16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfne_vf_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vfloat16mf4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfne_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmfne_vv_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfne_vv_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vmfne_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmfne_vf_f16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfne_vf_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat16mf2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfne_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmfne_vv_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfne_vv_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { + return __riscv_vmfne_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmfne_vf_f16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, float16_t op2, size_t vl) { - return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfne_vf_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, vfloat16m1_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfne_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmfne_vv_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfne_vv_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { + return __riscv_vmfne_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmfne_vf_f16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, float16_t op2, size_t vl) { - return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfne_vf_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfne_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmfne_vv_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfne_vv_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { + return __riscv_vmfne_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmfne_vf_f16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, float16_t op2, size_t vl) { - return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfne_vf_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfne_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmfne_vv_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmfne_vv_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { + return __riscv_vmfne_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmfne_vf_f16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, float16_t op2, size_t vl) { - return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmfne_vf_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, float16_t rs1, size_t vl) { + return __riscv_vmfne_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmfne_vv_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfne_vv_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vmfne_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmfne_vf_f32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfne_vf_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vfloat32mf2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfne_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmfne_vv_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfne_vv_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { + return __riscv_vmfne_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmfne_vf_f32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float32_t op2, size_t vl) { - return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfne_vf_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, vfloat32m1_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfne_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmfne_vv_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfne_vv_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { + return __riscv_vmfne_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmfne_vf_f32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float32_t op2, size_t vl) { - return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfne_vf_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, vfloat32m2_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfne_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmfne_vv_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfne_vv_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { + return __riscv_vmfne_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmfne_vf_f32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float32_t op2, size_t vl) { - return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfne_vf_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfne_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmfne_vv_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfne_vv_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { + return __riscv_vmfne_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmfne_vf_f32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float32_t op2, size_t vl) { - return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfne_vf_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, float32_t rs1, size_t vl) { + return __riscv_vmfne_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmfne_vv_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfne_vv_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { + return __riscv_vmfne_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmfne_vf_f64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, float64_t op2, size_t vl) { - return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfne_vf_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, vfloat64m1_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfne_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmfne_vv_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfne_vv_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { + return __riscv_vmfne_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmfne_vf_f64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, float64_t op2, size_t vl) { - return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfne_vf_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat64m2_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfne_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmfne_vv_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfne_vv_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { + return __riscv_vmfne_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmfne_vf_f64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, float64_t op2, size_t vl) { - return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfne_vf_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, vfloat64m4_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfne_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmfne_vv_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfne_vv_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { + return __riscv_vmfne_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmfne_vf_f64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, float64_t op2, size_t vl) { - return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfne_vf_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, float64_t rs1, size_t vl) { + return __riscv_vmfne_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmfne\.[ivxfswum.]+\s+} 30 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vmin.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vmin.c index 6623aa80f..1412d973a 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vmin.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vmin.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vmin_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmin_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vmin_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmin_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vmin_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vmin_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vmin_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmin_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vmin_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmin_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vmin_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vmin_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vmin_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmin_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vmin_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmin_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vmin_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vmin_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vmin_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmin_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vmin_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmin_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vmin_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vmin_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vmin_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmin_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vmin_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmin_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vmin_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vmin_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vmin_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmin_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vmin_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmin_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vmin_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vmin_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vmin_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmin_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vmin_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmin_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vmin_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vmin_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vmin_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmin_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vmin_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmin_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vmin_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vmin_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vmin_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmin_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vmin_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmin_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vmin_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vmin_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vmin_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmin_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vmin_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmin_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vmin_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vmin_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vmin_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmin_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vmin_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmin_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vmin_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vmin_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vmin_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmin_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vmin_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmin_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vmin_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vmin_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vmin_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmin_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vmin_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmin_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vmin_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vmin_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vmin_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmin_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vmin_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmin_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vmin_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vmin_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vmin_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmin_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vmin_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmin_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vmin_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vmin_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vmin_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmin_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vmin_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmin_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vmin_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vmin_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vmin_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmin_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vmin_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmin_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vmin_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vmin_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vmin_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmin_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vmin_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmin_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vmin_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vmin_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vmin_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmin_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vmin_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmin_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vmin_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmin_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vmin_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vmin_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmin_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vmin_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmin_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vmin_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmin_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vmin_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vmin_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmin_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vmin_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmin_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vmin_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmin_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vmin_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vmin_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmin_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vmin_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmin_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vmin_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmin_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vmin_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vmin_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmin_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmin_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vmin_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmin_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vmin_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmin_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmin_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vmin_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmin_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vmin_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmin_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmin_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vmin_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmin_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vmin_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmin_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmin_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vmin_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmin_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vmin_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmin_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmin_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vmin_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmin_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vmin_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmin_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmin_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vmin_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmin_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vmin_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmin_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmin_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vmin_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmin_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vmin_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmin_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmin_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vmin_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmin_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vmin_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmin_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmin_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vmin_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmin_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vmin_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmin_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmin_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vmin_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmin_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vmin_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmin_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmin_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vmin_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmin_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vmin_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmin_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmin_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vmin_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmin_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vmin_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmin_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmin_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vmin_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmin_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vmin_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmin_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmin_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vmin_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmin_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vmin_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmin_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmin_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vmin_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmin_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vmin_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmin_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmin_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vmin_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmin_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vmin_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmin_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmin_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vmin_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmin_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vmin_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmin_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmin_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vmin_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmin_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vmin_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmin_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmin_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vmin_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmin_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vmin_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmin_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmin_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vmin_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmin_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vmin_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmin_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmin_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vmin_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmin_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vmin_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmin_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmin_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vmin_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmin_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmin_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vmin_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmin_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmin_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vmin_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmin_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vmin_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmin_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmin_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vmin_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmin_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vmin_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmin_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmin_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vmin_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmin_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vmin_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmin_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmin_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vmin_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmin_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vmin_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmin_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmin_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vmin_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmin_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vmin_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmin_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmin_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vmin_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmin_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vmin_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmin_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmin_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vmin_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmin_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vmin_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmin_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmin_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vmin_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmin_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vmin_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmin_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmin_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vmin_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmin_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vmin_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmin_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmin_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vmin_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmin_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vmin_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmin_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmin_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vmin_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmin_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vmin_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmin_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmin_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vmin_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmin_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vmin_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmin_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmin_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vmin_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmin_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vmin_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmin_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmin_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vmin_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmin_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vmin_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmin_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmin_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vmin_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmin_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vmin_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmin_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmin_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vmin_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmin_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vmin_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmin_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmin_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vmin_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmin_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vmin_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmin_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmin_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vmin_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmin_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vmin_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmin_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmin_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vmin_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmin_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vmin_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmin_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmin_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vmin_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmin_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vmin_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmin_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmin_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vmin_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmin_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vmin_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmin_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmin_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vmin_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmin_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmin_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vmin_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmin_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmin_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vmin_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmin_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vmin_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmin_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmin_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vmin_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmin_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vmin_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmin_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmin_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vmin_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmin_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vmin_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmin_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmin_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vmin_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmin_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vmin_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmin_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmin_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vmin_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmin_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vmin_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmin_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmin_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vmin_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmin_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vmin_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmin_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmin_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vmin_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmin_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmin_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vmin_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmin_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmin_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vmin_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmin_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vmin_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmin_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmin_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vmin_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmin_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vmin_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmin_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmin_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vmin_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmin_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vmin_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmin_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmin_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vmin_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmin_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vmin_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmin_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmin_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vmin_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmin_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vmin_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmin_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmin_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vmin_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmin_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmin_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vmin_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmin_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmin_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vmin_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmin_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vmin_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmin_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmin_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vmin_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmin_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vmin_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmin_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmin_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vmin_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmin_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vmin_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmin_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmin_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vmin_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmin_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vmin_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmin_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmin_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vmin_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmin_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmin_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vmin_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmin_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmin_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vmin_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmin_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vmin_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmin_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmin_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vmin_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmin_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vmin_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmin_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmin_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vmin_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmin_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vmin_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmin_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmin_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vmin_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmin_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmin_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmin_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vmin\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vminu.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vminu.c index bf87e4472..09458c0c1 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vminu.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vminu.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vminu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vminu_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vminu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vminu_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vminu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vminu_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vminu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vminu_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vminu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vminu_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vminu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vminu_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vminu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vminu_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vminu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vminu_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vminu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vminu_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vminu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vminu_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vminu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vminu_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vminu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vminu_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vminu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vminu_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vminu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vminu_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vminu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vminu_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vminu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vminu_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vminu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vminu_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vminu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vminu_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vminu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vminu_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vminu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vminu_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vminu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vminu_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vminu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vminu_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vminu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vminu_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vminu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vminu_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vminu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vminu_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vminu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vminu_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vminu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vminu_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vminu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vminu_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vminu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vminu_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vminu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vminu_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vminu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vminu_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vminu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vminu_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vminu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vminu_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vminu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vminu_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vminu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vminu_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vminu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vminu_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vminu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vminu_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vminu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vminu_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vminu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vminu_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vminu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vminu_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vminu_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vminu_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vminu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vminu_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vminu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vminu_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vminu_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vminu_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vminu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vminu_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vminu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vminu_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vminu_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vminu_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vminu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vminu_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vminu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vminu_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vminu_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vminu_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vminu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vminu_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vminu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vminu_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vminu_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vminu_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vminu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vminu_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vminu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vminu_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vminu_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vminu_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vminu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vminu_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vminu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vminu_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vminu_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vminu_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vminu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vminu_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vminu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vminu_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vminu_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vminu_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vminu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vminu_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vminu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vminu_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vminu_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vminu_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vminu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vminu_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vminu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vminu_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vminu_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vminu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vminu_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vminu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vminu_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vminu_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vminu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vminu_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vminu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vminu_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vminu_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vminu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vminu_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vminu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vminu_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vminu_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vminu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vminu_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vminu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vminu_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vminu_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vminu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vminu_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vminu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vminu_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vminu_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vminu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vminu_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vminu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vminu_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vminu_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vminu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vminu_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vminu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vminu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vminu_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vminu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vminu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vminu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vminu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vminu_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vminu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vminu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vminu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vminu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vminu_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vminu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vminu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vminu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vminu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vminu_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vminu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vminu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vminu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vminu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vminu_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vminu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vminu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vminu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vminu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vminu_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vminu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vminu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vminu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vminu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vminu_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vminu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vminu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vminu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vminu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vminu_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vminu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vminu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vminu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vminu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vminu_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vminu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vminu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vminu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vminu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vminu_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vminu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vminu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vminu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vminu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vminu_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vminu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vminu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vminu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vminu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vminu_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vminu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vminu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vminu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vminu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vminu_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vminu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vminu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vminu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vminu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vminu_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vminu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vminu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vminu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vminu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vminu_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vminu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vminu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vminu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vminu_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vminu_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vminu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vminu_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vminu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vminu_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vminu_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vminu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vminu_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vminu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vminu_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vminu_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vminu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vminu_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vminu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vminu_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vminu_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vminu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vminu_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vminu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vminu_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vminu_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vminu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vminu_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vminu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vminu_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vminu_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vminu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vminu_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vminu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vminu_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vminu_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vminu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vminu_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vminu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vminu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vminu_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vminu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vminu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vminu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vminu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vminu_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vminu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vminu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vminu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vminu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vminu_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vminu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vminu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vminu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vminu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vminu_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vminu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vminu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vminu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vminu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vminu_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vminu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vminu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vminu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vminu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vminu_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vminu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vminu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vminu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vminu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vminu_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vminu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vminu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vminu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vminu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vminu_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vminu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vminu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vminu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vminu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vminu_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vminu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vminu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vminu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vminu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vminu_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vminu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vminu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vminu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vminu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vminu_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vminu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vminu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vminu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vminu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vminu_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vminu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vminu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vminu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vminu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vminu_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vminu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vminu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vminu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vminu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vminu_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vminu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vminu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vminu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vminu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vminu_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vminu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vminu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vminu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vminu_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vminu_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vminu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vminu_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vminu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vminu_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vminu_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vminu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vminu_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vminu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vminu_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vminu_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vminu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vminu_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vminu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vminu_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vminu_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vminu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vminu_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vminu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vminu_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vminu_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vminu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vminu_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vminu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vminu_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vminu_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vminu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vminu_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vminu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vminu_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vminu_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vminu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vminu_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vminu_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vminu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vminu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vminu_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vminu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vminu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vminu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vminu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vminu_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vminu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vminu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vminu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vminu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vminu_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vminu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vminu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vminu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vminu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vminu_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vminu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vminu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vminu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vminu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vminu_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vminu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vminu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vminu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vminu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vminu_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vminu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vminu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vminu_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vminu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vminu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vminu_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vminu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vminu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vminu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vminu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vminu_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vminu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vminu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vminu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vminu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vminu_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vminu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vminu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vminu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vminu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vminu_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vminu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vminu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vminu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vminu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vminu_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vminu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vminu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vminu_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vminu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vminu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vminu_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vminu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vminu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vminu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vminu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vminu_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vminu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vminu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vminu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vminu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vminu_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vminu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vminu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vminu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vminu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vminu_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vminu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vminu_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vminu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vminu_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vminu\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vmsbf.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vmsbf.c index bf0a6e620..ab717a75b 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vmsbf.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vmsbf.c @@ -6,32 +6,32 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool1_t test_vmsbf_m_b1_mu(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1, size_t vl) { - return __riscv_vmsbf_mu(mask, maskedoff, op1, vl); +vbool1_t test_vmsbf_m_b1_mu(vbool1_t vm, vbool1_t vd, vbool1_t vs2, size_t vl) { + return __riscv_vmsbf_mu(vm, vd, vs2, vl); } -vbool2_t test_vmsbf_m_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1, size_t vl) { - return __riscv_vmsbf_mu(mask, maskedoff, op1, vl); +vbool2_t test_vmsbf_m_b2_mu(vbool2_t vm, vbool2_t vd, vbool2_t vs2, size_t vl) { + return __riscv_vmsbf_mu(vm, vd, vs2, vl); } -vbool4_t test_vmsbf_m_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1, size_t vl) { - return __riscv_vmsbf_mu(mask, maskedoff, op1, vl); +vbool4_t test_vmsbf_m_b4_mu(vbool4_t vm, vbool4_t vd, vbool4_t vs2, size_t vl) { + return __riscv_vmsbf_mu(vm, vd, vs2, vl); } -vbool8_t test_vmsbf_m_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1, size_t vl) { - return __riscv_vmsbf_mu(mask, maskedoff, op1, vl); +vbool8_t test_vmsbf_m_b8_mu(vbool8_t vm, vbool8_t vd, vbool8_t vs2, size_t vl) { + return __riscv_vmsbf_mu(vm, vd, vs2, vl); } -vbool16_t test_vmsbf_m_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1, size_t vl) { - return __riscv_vmsbf_mu(mask, maskedoff, op1, vl); +vbool16_t test_vmsbf_m_b16_mu(vbool16_t vm, vbool16_t vd, vbool16_t vs2, size_t vl) { + return __riscv_vmsbf_mu(vm, vd, vs2, vl); } -vbool32_t test_vmsbf_m_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1, size_t vl) { - return __riscv_vmsbf_mu(mask, maskedoff, op1, vl); +vbool32_t test_vmsbf_m_b32_mu(vbool32_t vm, vbool32_t vd, vbool32_t vs2, size_t vl) { + return __riscv_vmsbf_mu(vm, vd, vs2, vl); } -vbool64_t test_vmsbf_m_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1, size_t vl) { - return __riscv_vmsbf_mu(mask, maskedoff, op1, vl); +vbool64_t test_vmsbf_m_b64_mu(vbool64_t vm, vbool64_t vd, vbool64_t vs2, size_t vl) { + return __riscv_vmsbf_mu(vm, vd, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmsbf\.[ivxfswum.]+\s+} 7 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vmseq.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vmseq.c index 3f0edea2e..ed5ca2009 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vmseq.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vmseq.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmseq_vv_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vv_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmseq_vx_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vx_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmseq_vv_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vv_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmseq_vx_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vx_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmseq_vv_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vv_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmseq_vx_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vx_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmseq_vv_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vv_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmseq_vx_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vx_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmseq_vv_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vv_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmseq_vx_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vx_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmseq_vv_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmseq_vv_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmseq_vx_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmseq_vx_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, rs1, vl); } -vbool1_t test_vmseq_vv_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmseq_vv_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, vs1, vl); } -vbool1_t test_vmseq_vx_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmseq_vx_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmseq_vv_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vv_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmseq_vx_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vx_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmseq_vv_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vv_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmseq_vx_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vx_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmseq_vv_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vv_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmseq_vx_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vx_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmseq_vv_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vv_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmseq_vx_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vx_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmseq_vv_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vv_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmseq_vx_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vx_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmseq_vv_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmseq_vv_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmseq_vx_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmseq_vx_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmseq_vv_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vv_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmseq_vx_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vx_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmseq_vv_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vv_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmseq_vx_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vx_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmseq_vv_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vv_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmseq_vx_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vx_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmseq_vv_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vv_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmseq_vx_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vx_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmseq_vv_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vv_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmseq_vx_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vx_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmseq_vv_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vv_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmseq_vx_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vx_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmseq_vv_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vv_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmseq_vx_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vx_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmseq_vv_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vv_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmseq_vx_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vx_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmseq_vv_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vv_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmseq_vx_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vx_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmseq_vv_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vv_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmseq_vx_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vx_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmseq_vv_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vv_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmseq_vx_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vx_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmseq_vv_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vv_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmseq_vx_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vx_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmseq_vv_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vv_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmseq_vx_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vx_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmseq_vv_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vv_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmseq_vx_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vx_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmseq_vv_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmseq_vv_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmseq_vx_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmseq_vx_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, rs1, vl); } -vbool1_t test_vmseq_vv_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmseq_vv_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, vs1, vl); } -vbool1_t test_vmseq_vx_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmseq_vx_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmseq_vv_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vv_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmseq_vx_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vx_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmseq_vv_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vv_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmseq_vx_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vx_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmseq_vv_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vv_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmseq_vx_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vx_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmseq_vv_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vv_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmseq_vx_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vx_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmseq_vv_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vv_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmseq_vx_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vx_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmseq_vv_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmseq_vv_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmseq_vx_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmseq_vx_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmseq_vv_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vv_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmseq_vx_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vx_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmseq_vv_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vv_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmseq_vx_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vx_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmseq_vv_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vv_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmseq_vx_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vx_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmseq_vv_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vv_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmseq_vx_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vx_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmseq_vv_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vv_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmseq_vx_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmseq_vx_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmseq_vv_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vv_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmseq_vx_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmseq_vx_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmseq_vv_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vv_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmseq_vx_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmseq_vx_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmseq_vv_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vv_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmseq_vx_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmseq_vx_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmseq_vv_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vv_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmseq_vx_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmseq_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmseq_vx_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmseq_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vmseq\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vmsge.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vmsge.c index 1d973a28c..98ca3ffca 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vmsge.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vmsge.c @@ -6,180 +6,180 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmsge_vv_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsge_vv_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmsge_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsge_vx_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsge_vx_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsge_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsge_vv_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsge_vv_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmsge_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsge_vx_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsge_vx_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsge_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsge_vv_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsge_vv_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmsge_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsge_vx_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsge_vx_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsge_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsge_vv_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsge_vv_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmsge_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsge_vx_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsge_vx_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsge_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsge_vv_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsge_vv_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmsge_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsge_vx_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsge_vx_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsge_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsge_vv_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsge_vv_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmsge_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsge_vx_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsge_vx_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsge_mu(vm, vd, vs2, rs1, vl); } -vbool1_t test_vmsge_vv_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsge_vv_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmsge_mu(vm, vd, vs2, vs1, vl); } -vbool1_t test_vmsge_vx_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsge_vx_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsge_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsge_vv_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsge_vv_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmsge_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsge_vx_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsge_vx_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsge_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsge_vv_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsge_vv_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmsge_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsge_vx_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsge_vx_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsge_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsge_vv_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsge_vv_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmsge_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsge_vx_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsge_vx_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsge_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsge_vv_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsge_vv_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmsge_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsge_vx_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsge_vx_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsge_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsge_vv_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsge_vv_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmsge_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsge_vx_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsge_vx_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsge_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsge_vv_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsge_vv_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmsge_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsge_vx_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsge_vx_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsge_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsge_vv_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsge_vv_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmsge_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsge_vx_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsge_vx_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsge_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsge_vv_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsge_vv_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmsge_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsge_vx_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsge_vx_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsge_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsge_vv_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsge_vv_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmsge_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsge_vx_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsge_vx_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsge_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsge_vv_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsge_vv_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmsge_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsge_vx_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsge_vx_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsge_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsge_vv_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsge_vv_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmsge_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsge_vx_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsge_vx_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsge_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsge_vv_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsge_vv_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmsge_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsge_vx_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsge_vx_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsge_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsge_vv_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsge_vv_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmsge_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsge_vx_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsge_vx_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsge_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsge_vv_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsge_vv_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmsge_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsge_vx_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsge_vx_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsge_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsge_vv_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsge_vv_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmsge_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsge_vx_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmsge_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsge_vx_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsge_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vms[gl][et]\.[ivxfswum.]+\s+} 44 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vmsgeu.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vmsgeu.c index 235f919ff..8fbcf5b8f 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vmsgeu.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vmsgeu.c @@ -6,180 +6,180 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmsgeu_vv_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgeu_vv_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmsgeu_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsgeu_vx_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgeu_vx_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgeu_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsgeu_vv_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgeu_vv_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmsgeu_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsgeu_vx_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgeu_vx_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgeu_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsgeu_vv_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgeu_vv_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmsgeu_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsgeu_vx_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgeu_vx_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgeu_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsgeu_vv_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgeu_vv_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmsgeu_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsgeu_vx_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgeu_vx_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgeu_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsgeu_vv_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgeu_vv_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmsgeu_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsgeu_vx_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgeu_vx_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgeu_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsgeu_vv_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgeu_vv_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmsgeu_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsgeu_vx_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgeu_vx_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgeu_mu(vm, vd, vs2, rs1, vl); } -vbool1_t test_vmsgeu_vv_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsgeu_vv_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmsgeu_mu(vm, vd, vs2, vs1, vl); } -vbool1_t test_vmsgeu_vx_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsgeu_vx_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgeu_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsgeu_vv_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgeu_vv_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmsgeu_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsgeu_vx_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgeu_vx_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgeu_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsgeu_vv_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgeu_vv_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmsgeu_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsgeu_vx_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgeu_vx_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgeu_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsgeu_vv_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgeu_vv_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmsgeu_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsgeu_vx_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgeu_vx_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgeu_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsgeu_vv_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgeu_vv_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmsgeu_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsgeu_vx_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgeu_vx_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgeu_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsgeu_vv_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgeu_vv_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmsgeu_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsgeu_vx_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgeu_vx_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgeu_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsgeu_vv_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgeu_vv_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmsgeu_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsgeu_vx_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgeu_vx_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgeu_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsgeu_vv_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgeu_vv_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmsgeu_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsgeu_vx_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgeu_vx_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgeu_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsgeu_vv_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgeu_vv_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmsgeu_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsgeu_vx_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgeu_vx_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgeu_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsgeu_vv_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgeu_vv_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmsgeu_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsgeu_vx_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgeu_vx_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgeu_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsgeu_vv_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgeu_vv_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmsgeu_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsgeu_vx_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgeu_vx_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgeu_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsgeu_vv_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgeu_vv_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmsgeu_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsgeu_vx_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgeu_vx_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgeu_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsgeu_vv_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgeu_vv_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmsgeu_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsgeu_vx_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgeu_vx_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgeu_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsgeu_vv_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgeu_vv_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmsgeu_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsgeu_vx_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgeu_vx_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgeu_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsgeu_vv_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgeu_vv_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmsgeu_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsgeu_vx_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgeu_vx_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgeu_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsgeu_vv_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgeu_vv_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmsgeu_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsgeu_vx_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgeu_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgeu_vx_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgeu_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vms[gl][et]u\.[ivxfswum.]+\s+} 44 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vmsgt.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vmsgt.c index 285e628a4..ce4d365af 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vmsgt.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vmsgt.c @@ -6,180 +6,180 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmsgt_vv_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgt_vv_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmsgt_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsgt_vx_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgt_vx_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsgt_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsgt_vv_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgt_vv_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmsgt_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsgt_vx_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgt_vx_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsgt_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsgt_vv_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgt_vv_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmsgt_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsgt_vx_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgt_vx_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsgt_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsgt_vv_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgt_vv_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmsgt_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsgt_vx_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgt_vx_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsgt_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsgt_vv_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgt_vv_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmsgt_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsgt_vx_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgt_vx_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsgt_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsgt_vv_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgt_vv_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmsgt_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsgt_vx_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgt_vx_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsgt_mu(vm, vd, vs2, rs1, vl); } -vbool1_t test_vmsgt_vv_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsgt_vv_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmsgt_mu(vm, vd, vs2, vs1, vl); } -vbool1_t test_vmsgt_vx_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsgt_vx_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsgt_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsgt_vv_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgt_vv_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmsgt_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsgt_vx_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgt_vx_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsgt_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsgt_vv_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgt_vv_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmsgt_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsgt_vx_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgt_vx_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsgt_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsgt_vv_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgt_vv_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmsgt_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsgt_vx_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgt_vx_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsgt_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsgt_vv_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgt_vv_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmsgt_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsgt_vx_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgt_vx_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsgt_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsgt_vv_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgt_vv_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmsgt_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsgt_vx_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgt_vx_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsgt_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsgt_vv_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgt_vv_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmsgt_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsgt_vx_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgt_vx_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsgt_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsgt_vv_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgt_vv_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmsgt_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsgt_vx_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgt_vx_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsgt_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsgt_vv_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgt_vv_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmsgt_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsgt_vx_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgt_vx_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsgt_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsgt_vv_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgt_vv_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmsgt_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsgt_vx_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgt_vx_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsgt_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsgt_vv_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgt_vv_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmsgt_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsgt_vx_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgt_vx_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsgt_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsgt_vv_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgt_vv_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmsgt_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsgt_vx_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgt_vx_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsgt_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsgt_vv_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgt_vv_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmsgt_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsgt_vx_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgt_vx_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsgt_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsgt_vv_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgt_vv_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmsgt_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsgt_vx_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgt_vx_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsgt_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsgt_vv_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgt_vv_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmsgt_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsgt_vx_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgt_vx_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsgt_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsgt_vv_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgt_vv_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmsgt_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsgt_vx_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmsgt_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgt_vx_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsgt_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vmsgt\.[ivxfswum.]+\s+} 44 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vmsgtu.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vmsgtu.c index 374441307..d8ae23a49 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vmsgtu.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vmsgtu.c @@ -6,180 +6,180 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmsgtu_vv_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgtu_vv_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmsgtu_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsgtu_vx_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgtu_vx_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgtu_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsgtu_vv_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgtu_vv_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmsgtu_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsgtu_vx_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgtu_vx_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgtu_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsgtu_vv_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgtu_vv_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmsgtu_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsgtu_vx_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgtu_vx_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgtu_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsgtu_vv_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgtu_vv_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmsgtu_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsgtu_vx_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgtu_vx_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgtu_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsgtu_vv_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgtu_vv_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmsgtu_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsgtu_vx_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgtu_vx_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgtu_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsgtu_vv_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgtu_vv_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmsgtu_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsgtu_vx_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgtu_vx_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgtu_mu(vm, vd, vs2, rs1, vl); } -vbool1_t test_vmsgtu_vv_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsgtu_vv_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmsgtu_mu(vm, vd, vs2, vs1, vl); } -vbool1_t test_vmsgtu_vx_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsgtu_vx_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsgtu_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsgtu_vv_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgtu_vv_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmsgtu_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsgtu_vx_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgtu_vx_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgtu_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsgtu_vv_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgtu_vv_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmsgtu_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsgtu_vx_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgtu_vx_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgtu_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsgtu_vv_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgtu_vv_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmsgtu_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsgtu_vx_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgtu_vx_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgtu_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsgtu_vv_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgtu_vv_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmsgtu_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsgtu_vx_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgtu_vx_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgtu_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsgtu_vv_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgtu_vv_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmsgtu_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsgtu_vx_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgtu_vx_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgtu_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsgtu_vv_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgtu_vv_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmsgtu_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsgtu_vx_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsgtu_vx_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsgtu_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsgtu_vv_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgtu_vv_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmsgtu_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsgtu_vx_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgtu_vx_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgtu_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsgtu_vv_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgtu_vv_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmsgtu_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsgtu_vx_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgtu_vx_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgtu_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsgtu_vv_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgtu_vv_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmsgtu_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsgtu_vx_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgtu_vx_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgtu_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsgtu_vv_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgtu_vv_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmsgtu_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsgtu_vx_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgtu_vx_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgtu_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsgtu_vv_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgtu_vv_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmsgtu_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsgtu_vx_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsgtu_vx_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsgtu_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsgtu_vv_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgtu_vv_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmsgtu_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsgtu_vx_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsgtu_vx_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgtu_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsgtu_vv_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgtu_vv_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmsgtu_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsgtu_vx_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsgtu_vx_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgtu_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsgtu_vv_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgtu_vv_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmsgtu_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsgtu_vx_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsgtu_vx_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgtu_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsgtu_vv_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgtu_vv_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmsgtu_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsgtu_vx_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgtu_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsgtu_vx_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsgtu_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vmsgtu\.[ivxfswum.]+\s+} 44 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vmsif.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vmsif.c index 96aa3f58d..c8e9b72f4 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vmsif.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vmsif.c @@ -6,32 +6,32 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool1_t test_vmsif_m_b1_mu(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1, size_t vl) { - return __riscv_vmsif_mu(mask, maskedoff, op1, vl); +vbool1_t test_vmsif_m_b1_mu(vbool1_t vm, vbool1_t vd, vbool1_t vs2, size_t vl) { + return __riscv_vmsif_mu(vm, vd, vs2, vl); } -vbool2_t test_vmsif_m_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1, size_t vl) { - return __riscv_vmsif_mu(mask, maskedoff, op1, vl); +vbool2_t test_vmsif_m_b2_mu(vbool2_t vm, vbool2_t vd, vbool2_t vs2, size_t vl) { + return __riscv_vmsif_mu(vm, vd, vs2, vl); } -vbool4_t test_vmsif_m_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1, size_t vl) { - return __riscv_vmsif_mu(mask, maskedoff, op1, vl); +vbool4_t test_vmsif_m_b4_mu(vbool4_t vm, vbool4_t vd, vbool4_t vs2, size_t vl) { + return __riscv_vmsif_mu(vm, vd, vs2, vl); } -vbool8_t test_vmsif_m_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1, size_t vl) { - return __riscv_vmsif_mu(mask, maskedoff, op1, vl); +vbool8_t test_vmsif_m_b8_mu(vbool8_t vm, vbool8_t vd, vbool8_t vs2, size_t vl) { + return __riscv_vmsif_mu(vm, vd, vs2, vl); } -vbool16_t test_vmsif_m_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1, size_t vl) { - return __riscv_vmsif_mu(mask, maskedoff, op1, vl); +vbool16_t test_vmsif_m_b16_mu(vbool16_t vm, vbool16_t vd, vbool16_t vs2, size_t vl) { + return __riscv_vmsif_mu(vm, vd, vs2, vl); } -vbool32_t test_vmsif_m_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1, size_t vl) { - return __riscv_vmsif_mu(mask, maskedoff, op1, vl); +vbool32_t test_vmsif_m_b32_mu(vbool32_t vm, vbool32_t vd, vbool32_t vs2, size_t vl) { + return __riscv_vmsif_mu(vm, vd, vs2, vl); } -vbool64_t test_vmsif_m_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1, size_t vl) { - return __riscv_vmsif_mu(mask, maskedoff, op1, vl); +vbool64_t test_vmsif_m_b64_mu(vbool64_t vm, vbool64_t vd, vbool64_t vs2, size_t vl) { + return __riscv_vmsif_mu(vm, vd, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmsif\.[ivxfswum.]+\s+} 7 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vmsle.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vmsle.c index 973c234ff..ed73faf08 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vmsle.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vmsle.c @@ -6,180 +6,180 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmsle_vv_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsle_vv_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmsle_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsle_vx_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsle_vx_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsle_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsle_vv_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsle_vv_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmsle_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsle_vx_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsle_vx_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsle_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsle_vv_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsle_vv_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmsle_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsle_vx_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsle_vx_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsle_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsle_vv_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsle_vv_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmsle_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsle_vx_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsle_vx_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsle_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsle_vv_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsle_vv_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmsle_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsle_vx_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsle_vx_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsle_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsle_vv_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsle_vv_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmsle_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsle_vx_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsle_vx_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsle_mu(vm, vd, vs2, rs1, vl); } -vbool1_t test_vmsle_vv_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsle_vv_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmsle_mu(vm, vd, vs2, vs1, vl); } -vbool1_t test_vmsle_vx_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsle_vx_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsle_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsle_vv_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsle_vv_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmsle_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsle_vx_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsle_vx_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsle_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsle_vv_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsle_vv_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmsle_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsle_vx_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsle_vx_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsle_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsle_vv_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsle_vv_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmsle_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsle_vx_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsle_vx_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsle_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsle_vv_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsle_vv_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmsle_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsle_vx_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsle_vx_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsle_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsle_vv_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsle_vv_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmsle_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsle_vx_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsle_vx_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsle_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsle_vv_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsle_vv_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmsle_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsle_vx_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsle_vx_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsle_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsle_vv_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsle_vv_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmsle_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsle_vx_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsle_vx_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsle_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsle_vv_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsle_vv_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmsle_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsle_vx_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsle_vx_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsle_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsle_vv_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsle_vv_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmsle_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsle_vx_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsle_vx_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsle_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsle_vv_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsle_vv_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmsle_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsle_vx_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsle_vx_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsle_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsle_vv_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsle_vv_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmsle_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsle_vx_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsle_vx_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsle_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsle_vv_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsle_vv_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmsle_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsle_vx_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsle_vx_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsle_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsle_vv_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsle_vv_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmsle_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsle_vx_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsle_vx_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsle_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsle_vv_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsle_vv_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmsle_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsle_vx_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsle_vx_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsle_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsle_vv_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsle_vv_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmsle_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsle_vx_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmsle_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsle_vx_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsle_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vmsle\.[ivxfswum.]+\s+} 44 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vmsleu.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vmsleu.c index dc84a551d..90c89a0c2 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vmsleu.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vmsleu.c @@ -6,180 +6,180 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmsleu_vv_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsleu_vv_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmsleu_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsleu_vx_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsleu_vx_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsleu_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsleu_vv_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsleu_vv_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmsleu_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsleu_vx_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsleu_vx_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsleu_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsleu_vv_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsleu_vv_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmsleu_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsleu_vx_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsleu_vx_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsleu_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsleu_vv_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsleu_vv_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmsleu_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsleu_vx_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsleu_vx_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsleu_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsleu_vv_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsleu_vv_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmsleu_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsleu_vx_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsleu_vx_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsleu_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsleu_vv_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsleu_vv_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmsleu_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsleu_vx_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsleu_vx_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsleu_mu(vm, vd, vs2, rs1, vl); } -vbool1_t test_vmsleu_vv_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsleu_vv_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmsleu_mu(vm, vd, vs2, vs1, vl); } -vbool1_t test_vmsleu_vx_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsleu_vx_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsleu_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsleu_vv_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsleu_vv_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmsleu_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsleu_vx_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsleu_vx_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsleu_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsleu_vv_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsleu_vv_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmsleu_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsleu_vx_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsleu_vx_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsleu_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsleu_vv_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsleu_vv_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmsleu_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsleu_vx_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsleu_vx_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsleu_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsleu_vv_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsleu_vv_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmsleu_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsleu_vx_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsleu_vx_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsleu_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsleu_vv_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsleu_vv_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmsleu_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsleu_vx_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsleu_vx_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsleu_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsleu_vv_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsleu_vv_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmsleu_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsleu_vx_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsleu_vx_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsleu_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsleu_vv_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsleu_vv_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmsleu_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsleu_vx_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsleu_vx_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsleu_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsleu_vv_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsleu_vv_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmsleu_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsleu_vx_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsleu_vx_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsleu_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsleu_vv_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsleu_vv_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmsleu_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsleu_vx_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsleu_vx_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsleu_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsleu_vv_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsleu_vv_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmsleu_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsleu_vx_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsleu_vx_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsleu_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsleu_vv_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsleu_vv_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmsleu_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsleu_vx_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsleu_vx_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsleu_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsleu_vv_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsleu_vv_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmsleu_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsleu_vx_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsleu_vx_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsleu_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsleu_vv_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsleu_vv_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmsleu_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsleu_vx_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsleu_vx_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsleu_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsleu_vv_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsleu_vv_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmsleu_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsleu_vx_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsleu_vx_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsleu_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsleu_vv_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsleu_vv_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmsleu_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsleu_vx_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsleu_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsleu_vx_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsleu_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vmsleu\.[ivxfswum.]+\s+} 44 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vmslt.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vmslt.c index 96e7737b7..3d5fd3db6 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vmslt.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vmslt.c @@ -6,180 +6,180 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmslt_vv_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmslt_vv_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmslt_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmslt_vx_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmslt_vx_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmslt_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmslt_vv_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmslt_vv_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmslt_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmslt_vx_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmslt_vx_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmslt_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmslt_vv_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmslt_vv_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmslt_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmslt_vx_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmslt_vx_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmslt_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmslt_vv_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmslt_vv_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmslt_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmslt_vx_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmslt_vx_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmslt_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmslt_vv_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmslt_vv_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmslt_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmslt_vx_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmslt_vx_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmslt_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmslt_vv_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmslt_vv_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmslt_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmslt_vx_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmslt_vx_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmslt_mu(vm, vd, vs2, rs1, vl); } -vbool1_t test_vmslt_vv_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmslt_vv_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmslt_mu(vm, vd, vs2, vs1, vl); } -vbool1_t test_vmslt_vx_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmslt_vx_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmslt_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmslt_vv_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmslt_vv_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmslt_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmslt_vx_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmslt_vx_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmslt_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmslt_vv_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmslt_vv_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmslt_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmslt_vx_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmslt_vx_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmslt_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmslt_vv_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmslt_vv_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmslt_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmslt_vx_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmslt_vx_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmslt_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmslt_vv_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmslt_vv_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmslt_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmslt_vx_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmslt_vx_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmslt_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmslt_vv_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmslt_vv_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmslt_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmslt_vx_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmslt_vx_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmslt_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmslt_vv_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmslt_vv_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmslt_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmslt_vx_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmslt_vx_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmslt_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmslt_vv_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmslt_vv_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmslt_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmslt_vx_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmslt_vx_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmslt_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmslt_vv_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmslt_vv_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmslt_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmslt_vx_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmslt_vx_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmslt_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmslt_vv_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmslt_vv_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmslt_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmslt_vx_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmslt_vx_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmslt_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmslt_vv_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmslt_vv_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmslt_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmslt_vx_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmslt_vx_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmslt_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmslt_vv_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmslt_vv_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmslt_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmslt_vx_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmslt_vx_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmslt_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmslt_vv_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmslt_vv_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmslt_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmslt_vx_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmslt_vx_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmslt_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmslt_vv_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmslt_vv_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmslt_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmslt_vx_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmslt_vx_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmslt_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmslt_vv_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmslt_vv_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmslt_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmslt_vx_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmslt_vx_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmslt_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmslt_vv_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmslt_vv_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmslt_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmslt_vx_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmslt_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmslt_vx_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmslt_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vms[gl][et]\.[ivxfswum.]+\s+} 44 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vmsltu.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vmsltu.c index 9ec24bef8..bbe9d0eb4 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vmsltu.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vmsltu.c @@ -6,180 +6,180 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmsltu_vv_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsltu_vv_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmsltu_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsltu_vx_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsltu_vx_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsltu_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsltu_vv_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsltu_vv_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmsltu_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsltu_vx_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsltu_vx_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsltu_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsltu_vv_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsltu_vv_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmsltu_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsltu_vx_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsltu_vx_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsltu_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsltu_vv_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsltu_vv_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmsltu_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsltu_vx_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsltu_vx_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsltu_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsltu_vv_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsltu_vv_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmsltu_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsltu_vx_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsltu_vx_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsltu_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsltu_vv_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsltu_vv_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmsltu_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsltu_vx_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsltu_vx_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsltu_mu(vm, vd, vs2, rs1, vl); } -vbool1_t test_vmsltu_vv_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsltu_vv_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmsltu_mu(vm, vd, vs2, vs1, vl); } -vbool1_t test_vmsltu_vx_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsltu_vx_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsltu_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsltu_vv_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsltu_vv_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmsltu_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsltu_vx_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsltu_vx_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsltu_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsltu_vv_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsltu_vv_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmsltu_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsltu_vx_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsltu_vx_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsltu_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsltu_vv_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsltu_vv_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmsltu_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsltu_vx_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsltu_vx_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsltu_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsltu_vv_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsltu_vv_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmsltu_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsltu_vx_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsltu_vx_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsltu_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsltu_vv_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsltu_vv_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmsltu_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsltu_vx_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsltu_vx_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsltu_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsltu_vv_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsltu_vv_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmsltu_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsltu_vx_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsltu_vx_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsltu_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsltu_vv_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsltu_vv_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmsltu_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsltu_vx_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsltu_vx_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsltu_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsltu_vv_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsltu_vv_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmsltu_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsltu_vx_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsltu_vx_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsltu_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsltu_vv_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsltu_vv_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmsltu_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsltu_vx_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsltu_vx_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsltu_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsltu_vv_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsltu_vv_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmsltu_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsltu_vx_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsltu_vx_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsltu_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsltu_vv_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsltu_vv_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmsltu_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsltu_vx_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsltu_vx_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsltu_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsltu_vv_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsltu_vv_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmsltu_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsltu_vx_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsltu_vx_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsltu_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsltu_vv_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsltu_vv_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmsltu_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsltu_vx_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsltu_vx_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsltu_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsltu_vv_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsltu_vv_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmsltu_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsltu_vx_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsltu_vx_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsltu_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsltu_vv_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsltu_vv_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmsltu_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsltu_vx_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsltu_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsltu_vx_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsltu_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vms[gl][et]u\.[ivxfswum.]+\s+} 44 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vmsne.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vmsne.c index e0ebb66a0..77cb0ca26 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vmsne.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vmsne.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool64_t test_vmsne_vv_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vv_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsne_vx_i8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vx_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsne_vv_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vv_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsne_vx_i8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vx_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsne_vv_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vv_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsne_vx_i8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vx_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsne_vv_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vv_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsne_vx_i8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vx_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsne_vv_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vv_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsne_vx_i8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vx_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsne_vv_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsne_vv_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsne_vx_i8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsne_vx_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, rs1, vl); } -vbool1_t test_vmsne_vv_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsne_vv_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, vs1, vl); } -vbool1_t test_vmsne_vx_i8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsne_vx_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsne_vv_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vv_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsne_vx_i16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vx_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsne_vv_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vv_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsne_vx_i16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vx_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsne_vv_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vv_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsne_vx_i16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vx_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsne_vv_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vv_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsne_vx_i16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vx_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsne_vv_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vv_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsne_vx_i16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vx_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsne_vv_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsne_vv_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsne_vx_i16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsne_vx_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsne_vv_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vv_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsne_vx_i32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vx_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsne_vv_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vv_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsne_vx_i32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vx_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsne_vv_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vv_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsne_vx_i32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vx_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsne_vv_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vv_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsne_vx_i32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vx_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsne_vv_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vv_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsne_vx_i32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vx_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsne_vv_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vv_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsne_vx_i64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vx_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsne_vv_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vv_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsne_vx_i64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vx_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsne_vv_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vv_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsne_vx_i64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vx_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsne_vv_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vv_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsne_vx_i64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vx_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsne_vv_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vv_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsne_vx_u8mf8_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vx_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsne_vv_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vv_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsne_vx_u8mf4_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vx_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsne_vv_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vv_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsne_vx_u8mf2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vx_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsne_vv_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vv_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsne_vx_u8m1_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vx_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsne_vv_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vv_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsne_vx_u8m2_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vx_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsne_vv_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsne_vv_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsne_vx_u8m4_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsne_vx_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, rs1, vl); } -vbool1_t test_vmsne_vv_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsne_vv_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, vs1, vl); } -vbool1_t test_vmsne_vx_u8m8_b1_mu(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool1_t test_vmsne_vx_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsne_vv_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vv_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsne_vx_u16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vx_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsne_vv_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vv_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsne_vx_u16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vx_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsne_vv_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vv_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsne_vx_u16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vx_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsne_vv_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vv_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsne_vx_u16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vx_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsne_vv_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vv_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsne_vx_u16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vx_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsne_vv_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsne_vv_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsne_vx_u16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmsne_vx_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsne_vv_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vv_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsne_vx_u32mf2_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vx_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsne_vv_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vv_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsne_vx_u32m1_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vx_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsne_vv_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vv_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsne_vx_u32m2_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vx_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsne_vv_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vv_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsne_vx_u32m4_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vx_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsne_vv_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vv_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsne_vx_u32m8_b4_mu(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmsne_vx_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsne_vv_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vv_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsne_vx_u64m1_b64_mu(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmsne_vx_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsne_vv_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vv_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsne_vx_u64m2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmsne_vx_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsne_vv_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vv_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsne_vx_u64m4_b16_mu(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmsne_vx_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsne_vv_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vv_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsne_vx_u64m8_b8_mu(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsne_mu(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmsne_vx_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmsne_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vmsne\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vmsof.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vmsof.c index 1b01dc961..04eb19ead 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vmsof.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vmsof.c @@ -6,32 +6,32 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vbool1_t test_vmsof_m_b1_mu(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1, size_t vl) { - return __riscv_vmsof_mu(mask, maskedoff, op1, vl); +vbool1_t test_vmsof_m_b1_mu(vbool1_t vm, vbool1_t vd, vbool1_t vs2, size_t vl) { + return __riscv_vmsof_mu(vm, vd, vs2, vl); } -vbool2_t test_vmsof_m_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1, size_t vl) { - return __riscv_vmsof_mu(mask, maskedoff, op1, vl); +vbool2_t test_vmsof_m_b2_mu(vbool2_t vm, vbool2_t vd, vbool2_t vs2, size_t vl) { + return __riscv_vmsof_mu(vm, vd, vs2, vl); } -vbool4_t test_vmsof_m_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1, size_t vl) { - return __riscv_vmsof_mu(mask, maskedoff, op1, vl); +vbool4_t test_vmsof_m_b4_mu(vbool4_t vm, vbool4_t vd, vbool4_t vs2, size_t vl) { + return __riscv_vmsof_mu(vm, vd, vs2, vl); } -vbool8_t test_vmsof_m_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1, size_t vl) { - return __riscv_vmsof_mu(mask, maskedoff, op1, vl); +vbool8_t test_vmsof_m_b8_mu(vbool8_t vm, vbool8_t vd, vbool8_t vs2, size_t vl) { + return __riscv_vmsof_mu(vm, vd, vs2, vl); } -vbool16_t test_vmsof_m_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1, size_t vl) { - return __riscv_vmsof_mu(mask, maskedoff, op1, vl); +vbool16_t test_vmsof_m_b16_mu(vbool16_t vm, vbool16_t vd, vbool16_t vs2, size_t vl) { + return __riscv_vmsof_mu(vm, vd, vs2, vl); } -vbool32_t test_vmsof_m_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1, size_t vl) { - return __riscv_vmsof_mu(mask, maskedoff, op1, vl); +vbool32_t test_vmsof_m_b32_mu(vbool32_t vm, vbool32_t vd, vbool32_t vs2, size_t vl) { + return __riscv_vmsof_mu(vm, vd, vs2, vl); } -vbool64_t test_vmsof_m_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1, size_t vl) { - return __riscv_vmsof_mu(mask, maskedoff, op1, vl); +vbool64_t test_vmsof_m_b64_mu(vbool64_t vm, vbool64_t vd, vbool64_t vs2, size_t vl) { + return __riscv_vmsof_mu(vm, vd, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmsof\.[ivxfswum.]+\s+} 7 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vmul.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vmul.c index 87fe03d2f..3ea591f4b 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vmul.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vmul.c @@ -6,1412 +6,1412 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vmul_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vmul_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vmul_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vmul_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vmul_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vmul_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vmul_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vmul_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vmul_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vmul_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vmul_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vmul_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vmul_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vmul_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vmul_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vmul_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vmul_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vmul_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vmul_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vmul_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vmul_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vmul_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vmul_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vmul_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vmul_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vmul_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vmul_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vmul_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vmul_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vmul_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vmul_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vmul_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vmul_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vmul_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vmul_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vmul_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vmul_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vmul_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vmul_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vmul_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vmul_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vmul_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vmul_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vmul_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vmul_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vmul_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vmul_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vmul_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vmul_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vmul_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vmul_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vmul_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vmul_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vmul_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vmul_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vmul_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vmul_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vmul_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vmul_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vmul_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vmul_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vmul_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vmul_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vmul_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vmul_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vmul_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vmul_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vmul_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vmul_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vmul_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vmul_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vmul_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vmul_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vmul_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vmul_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vmul_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vmul_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vmul_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vmul_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vmul_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vmul_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vmul_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vmul_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vmul_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vmul_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vmul_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vmul_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vmul_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vmul_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vmul_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vmul_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vmul_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vmul_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vmul_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vmul_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vmul_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vmul_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vmul_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vmul_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vmul_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vmul_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vmul_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vmul_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vmul_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vmul_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vmul_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vmul_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vmul_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vmul_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vmul_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vmul_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vmul_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vmul_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vmul_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vmul_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vmul_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vmul_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vmul_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vmul_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vmul_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vmul_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vmul_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vmul_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vmul_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vmul_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vmul_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vmul_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vmul_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vmul_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vmul_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vmul_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vmul_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vmul_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vmul_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vmul_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vmul_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vmul_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vmul_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vmul_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vmul_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vmul_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vmul_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vmul_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vmul_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vmul_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vmul_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vmul_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vmul_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vmul_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vmul_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vmul_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vmul_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vmul_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vmul_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vmul_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vmul_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vmul_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vmul_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vmul_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vmul_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vmul_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vmul_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vmul_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vmul_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vmul_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vmul_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vmul_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vmul_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vmul_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vmul_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vmul_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vmul_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vmul_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vmul_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vmul_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vmul_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vmul_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmul_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vmul_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmul_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vmul_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmul_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vmul_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmul_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vmul_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmul_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vmul_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmul_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vmul_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmul_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vmul_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmul_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vmul_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmul_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vmul_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmul_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vmul_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmul_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vmul_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmul_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vmul_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmul_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vmul_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmul_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vmul_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmul_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vmul_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmul_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vmul_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmul_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vmul_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmul_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vmul_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmul_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vmul_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmul_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vmul_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmul_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vmul_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmul_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vmul_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmul_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vmul_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmul_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vmul_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmul_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vmul_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmul_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vmul_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmul_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vmul_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmul_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vmul_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmul_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vmul_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmul_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vmul_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmul_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vmul_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmul_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vmul_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmul_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vmul_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmul_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vmul_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmul_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vmul_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmul_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vmul_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmul_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vmul_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmul_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vmul_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmul_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vmul_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmul_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vmul_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmul_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vmul_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmul_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vmul_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmul_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vmul_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmul_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vmul_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmul_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vmul_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmul_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vmul_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmul_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vmul_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmul_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vmul_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmul_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vmul_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmul_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vmul_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmul_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vmul_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmul_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vmul_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmul_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vmul_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmul_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vmul_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmul_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vmul_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmul_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vmul_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmul_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vmul_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmul_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vmul_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmul_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vmul_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmul_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vmul_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmul_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vmul_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmul_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vmul_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmul_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vmul_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmul_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vmul_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmul_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vmul_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmul_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vmul_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmul_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vmul_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmul_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vmul_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmul_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vmul_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmul_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vmul_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmul_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vmul_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmul_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vmul_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmul_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vmul_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmul_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vmul_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmul_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vmul_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmul_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vmul_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmul_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vmul_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmul_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vmul_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmul_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vmul_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmul_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vmul_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmul_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vmul_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmul_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vmul_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmul_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vmul_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmul_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vmul_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmul_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vmul_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmul_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vmul_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmul_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vmul_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmul_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vmul_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmul_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vmul_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmul_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vmul_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmul_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vmul_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmul_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vmul_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmul_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vmul_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmul_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vmul_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmul_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vmul_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmul_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vmul_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmul_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vmul_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmul_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vmul_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmul_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vmul_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmul_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vmul_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmul_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vmul_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmul_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vmul_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmul_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vmul_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmul_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vmul_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmul_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vmul_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmul_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vmul_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmul_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vmul_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmul_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vmul_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmul_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vmul_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmul_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vmul_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmul_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vmul_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmul_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vmul_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmul_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vmul_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmul_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vmul_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmul_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vmul_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmul_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vmul_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmul_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vmul_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmul_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vmul_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmul_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vmul_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmul_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vmul_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmul_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vmul_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmul_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vmul_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmul_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vmul_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmul_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vmul_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmul_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vmul_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmul_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vmul_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmul_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vmul_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmul_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vmul_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmul_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vmul_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmul_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vmul_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmul_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vmul_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmul_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vmul_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmul_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vmul_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmul_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vmul_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmul_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vmul_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmul_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vmul_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmul_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vmul_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmul_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vmul_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmul_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vmul_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmul_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vmul_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmul_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vmul_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmul_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vmul_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmul_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vmul_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmul_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vmul_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmul_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vmul_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmul_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vmul_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmul_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vmul_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmul_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vmul_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmul_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vmul_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmul_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vmul_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmul_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vmul_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmul_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vmul_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmul_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vmul_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmul_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vmul_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmul_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vmul_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmul_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vmul_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmul_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vmul_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmul_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vmul_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmul_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vmul_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmul_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vmul_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmul_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vmul_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmul_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vmul_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmul_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vmul_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmul_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vmul_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmul_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vmul_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmul_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vmul_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmul_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vmul_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmul_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vmul_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmul_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vmul_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmul_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vmul_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmul_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vmul_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmul_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vmul_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmul_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vmul_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmul_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vmul_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmul_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vmul_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmul_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vmul_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmul_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vmul_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmul_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vmul_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmul_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vmul_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmul_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vmul_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmul_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vmul_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmul_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vmul_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmul_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vmul_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmul_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vmul_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmul_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vmul_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmul_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vmul_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmul_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vmul_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmul_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vmul_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmul_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vmul_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmul_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vmul_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmul_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vmul_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmul_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vmul_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmul_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vmul_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmul_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vmul_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmul_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vmul_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmul_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vmul_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmul_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vmul_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmul_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vmul_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmul_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vmul_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmul_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vmul_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmul_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vmul_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmul_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vmul_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmul_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vmul_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmul_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vmul_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmul_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vmul_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmul_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vmul_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmul_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vmul_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmul_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vmul_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmul_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vmul_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmul_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vmul_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmul_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vmul_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmul_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vmul_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmul_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vmul_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmul_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vmul_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmul_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vmul_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmul_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vmul_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmul_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vmul_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmul_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vmul_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmul_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vmul_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmul_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vmul_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmul_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vmul_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmul_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vmul_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmul_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vmul_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmul_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vmul_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmul_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vmul_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmul_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vmul_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmul_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vmul_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmul_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vmul_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmul_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vmul_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmul_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vmul_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmul_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vmul_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmul_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vmul_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmul_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vmul_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmul_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vmul_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmul_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vmul_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmul_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vmul_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmul_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vmul_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmul_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vmul_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmul_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vmul_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmul_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vmul_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmul_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vmul_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmul_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vmul_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmul_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vmul_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmul_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vmul_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmul_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vmul_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmul_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vmul_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmul_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vmul_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmul_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vmul_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmul_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vmul_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmul_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vmul_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmul_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vmul_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmul_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vmul_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmul_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vmul_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmul_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vmul_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmul_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vmul_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmul_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vmul_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmul_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vmul_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmul_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vmul_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmul_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vmul_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmul_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vmul_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmul_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vmul_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmul_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vmul_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmul_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vmul_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmul_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmul_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmul_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vmul\.[ivxfswum.]+\s+} 352 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vmulh.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vmulh.c index 5e731f95d..c7ee4c2fb 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vmulh.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vmulh.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vmulh_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmulh_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vmulh_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmulh_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vmulh_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vmulh_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vmulh_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmulh_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vmulh_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmulh_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vmulh_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vmulh_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vmulh_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmulh_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vmulh_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmulh_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vmulh_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vmulh_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vmulh_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmulh_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vmulh_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmulh_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vmulh_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vmulh_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vmulh_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmulh_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vmulh_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmulh_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vmulh_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vmulh_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vmulh_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmulh_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vmulh_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmulh_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vmulh_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vmulh_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vmulh_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmulh_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vmulh_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmulh_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vmulh_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vmulh_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vmulh_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmulh_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vmulh_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmulh_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vmulh_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vmulh_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vmulh_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmulh_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vmulh_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmulh_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vmulh_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vmulh_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vmulh_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmulh_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vmulh_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmulh_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vmulh_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vmulh_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vmulh_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmulh_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vmulh_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmulh_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vmulh_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vmulh_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vmulh_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmulh_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vmulh_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmulh_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vmulh_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vmulh_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vmulh_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmulh_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vmulh_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmulh_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vmulh_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vmulh_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vmulh_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmulh_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vmulh_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmulh_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vmulh_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vmulh_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vmulh_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmulh_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vmulh_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmulh_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vmulh_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vmulh_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vmulh_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmulh_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vmulh_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmulh_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vmulh_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vmulh_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vmulh_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmulh_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vmulh_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmulh_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vmulh_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vmulh_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vmulh_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmulh_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vmulh_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmulh_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vmulh_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vmulh_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vmulh_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmulh_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vmulh_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmulh_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vmulh_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vmulh_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vmulh_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmulh_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vmulh_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmulh_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vmulh_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vmulh_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vmulh_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmulh_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vmulh_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmulh_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vmulh_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vmulh_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vmulh_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmulh_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vmulh_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmulh_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vmulh_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vmulh_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vmulh_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmulh_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmulh_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vmulh_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmulh_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vmulh_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmulh_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmulh_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vmulh_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmulh_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vmulh_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmulh_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmulh_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vmulh_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmulh_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vmulh_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmulh_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmulh_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vmulh_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmulh_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vmulh_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmulh_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmulh_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vmulh_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmulh_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vmulh_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmulh_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmulh_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vmulh_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmulh_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vmulh_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmulh_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmulh_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vmulh_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmulh_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vmulh_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmulh_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmulh_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vmulh_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmulh_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vmulh_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmulh_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmulh_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vmulh_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmulh_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vmulh_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmulh_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmulh_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vmulh_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmulh_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vmulh_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmulh_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmulh_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vmulh_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmulh_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vmulh_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmulh_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmulh_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vmulh_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmulh_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vmulh_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmulh_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmulh_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vmulh_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmulh_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vmulh_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmulh_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmulh_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vmulh_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmulh_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vmulh_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmulh_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmulh_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vmulh_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmulh_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vmulh_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmulh_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmulh_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vmulh_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmulh_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vmulh_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmulh_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmulh_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vmulh_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmulh_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vmulh_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmulh_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmulh_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vmulh_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmulh_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vmulh_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmulh_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmulh_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vmulh_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmulh_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vmulh_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmulh_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmulh_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vmulh_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmulh_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vmulh_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmulh_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmulh_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vmulh_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmulh_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vmulh_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmulh_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmulh_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vmulh_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmulh_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vmulh_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmulh_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmulh_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vmulh_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmulh_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vmulh_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmulh_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmulh_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vmulh_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmulh_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vmulh_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmulh_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmulh_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vmulh_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmulh_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vmulh_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmulh_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmulh_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vmulh_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmulh_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vmulh_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmulh_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmulh_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vmulh_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmulh_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vmulh_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmulh_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmulh_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vmulh_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmulh_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vmulh_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmulh_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmulh_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vmulh_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmulh_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vmulh_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmulh_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmulh_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vmulh_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmulh_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vmulh_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmulh_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmulh_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vmulh_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmulh_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vmulh_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmulh_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmulh_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vmulh_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmulh_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vmulh_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmulh_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmulh_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vmulh_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmulh_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vmulh_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmulh_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmulh_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vmulh_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmulh_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vmulh_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmulh_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmulh_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vmulh_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmulh_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vmulh_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmulh_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmulh_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vmulh_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmulh_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vmulh_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmulh_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmulh_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vmulh_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmulh_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vmulh_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmulh_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmulh_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vmulh_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmulh_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vmulh_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmulh_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmulh_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vmulh_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmulh_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vmulh_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmulh_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmulh_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vmulh_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmulh_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vmulh_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmulh_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmulh_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vmulh_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmulh_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vmulh_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmulh_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmulh_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vmulh_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmulh_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vmulh_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmulh_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmulh_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vmulh_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmulh_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vmulh_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmulh_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmulh_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vmulh_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmulh_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vmulh_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmulh_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vmulh_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vmulh_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmulh_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vmulh_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmulh_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vmulh_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vmulh_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmulh_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vmulh_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmulh_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vmulh_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vmulh_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmulh_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vmulh_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmulh_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vmulh_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vmulh_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmulh_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vmulh_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmulh_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vmulh_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vmulh_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmulh_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vmulh_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmulh_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vmulh_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vmulh_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmulh_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vmulh_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmulh_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vmulh_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vmulh_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmulh_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vmulh_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vmulh_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmulh_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vmulh_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vmulh_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmulh_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vmulh_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmulh_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vmulh_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vmulh_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmulh_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vmulh_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmulh_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vmulh_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vmulh_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmulh_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vmulh_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmulh_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vmulh_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vmulh_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmulh_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vmulh_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmulh_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vmulh_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vmulh_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmulh_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vmulh_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmulh_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vmulh_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vmulh_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmulh_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vmulh_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vmulh_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmulh_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vmulh_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vmulh_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmulh_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vmulh_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmulh_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vmulh_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vmulh_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmulh_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vmulh_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmulh_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vmulh_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vmulh_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmulh_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vmulh_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmulh_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vmulh_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vmulh_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmulh_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vmulh_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmulh_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vmulh_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vmulh_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmulh_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vmulh_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vmulh_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmulh_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vmulh_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vmulh_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmulh_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vmulh_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmulh_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vmulh_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vmulh_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmulh_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vmulh_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmulh_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vmulh_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vmulh_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmulh_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vmulh_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmulh_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vmulh_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vmulh_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmulh_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmulh_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vmulh_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmulh\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vmulhsu.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vmulhsu.c index f4b1a9b13..25032dd85 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vmulhsu.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vmulhsu.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vmulhsu_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vmulhsu_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmulhsu_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vmulhsu_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vmulhsu_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vmulhsu_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vmulhsu_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmulhsu_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vmulhsu_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vmulhsu_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vmulhsu_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vmulhsu_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmulhsu_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vmulhsu_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vmulhsu_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vmulhsu_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vmulhsu_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmulhsu_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vmulhsu_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vmulhsu_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vmulhsu_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vmulhsu_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmulhsu_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vmulhsu_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vmulhsu_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vmulhsu_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vmulhsu_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmulhsu_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vmulhsu_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vmulhsu_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vmulhsu_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vmulhsu_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmulhsu_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vmulhsu_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vmulhsu_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vmulhsu_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vmulhsu_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmulhsu_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vmulhsu_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vmulhsu_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vmulhsu_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vmulhsu_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmulhsu_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vmulhsu_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vmulhsu_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vmulhsu_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vmulhsu_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmulhsu_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vmulhsu_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vmulhsu_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vmulhsu_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vmulhsu_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmulhsu_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vmulhsu_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vmulhsu_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vmulhsu_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vmulhsu_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmulhsu_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vmulhsu_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vmulhsu_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vmulhsu_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vmulhsu_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmulhsu_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vmulhsu_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vmulhsu_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vmulhsu_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vmulhsu_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmulhsu_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vmulhsu_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vmulhsu_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vmulhsu_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vmulhsu_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmulhsu_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vmulhsu_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vmulhsu_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vmulhsu_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vmulhsu_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmulhsu_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vmulhsu_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vmulhsu_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vmulhsu_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vmulhsu_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmulhsu_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vmulhsu_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vmulhsu_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vmulhsu_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vmulhsu_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmulhsu_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vmulhsu_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vmulhsu_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vmulhsu_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vmulhsu_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmulhsu_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vmulhsu_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vmulhsu_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vmulhsu_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vmulhsu_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmulhsu_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vmulhsu_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vmulhsu_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vmulhsu_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vmulhsu_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmulhsu_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vmulhsu_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vmulhsu_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vmulhsu_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vmulhsu_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmulhsu_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vmulhsu_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vmulhsu_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vmulhsu_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmulhsu_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmulhsu_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vmulhsu_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmulhsu_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vmulhsu_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmulhsu_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmulhsu_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vmulhsu_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmulhsu_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vmulhsu_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmulhsu_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmulhsu_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vmulhsu_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmulhsu_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vmulhsu_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmulhsu_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmulhsu_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vmulhsu_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmulhsu_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vmulhsu_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmulhsu_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmulhsu_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vmulhsu_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmulhsu_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vmulhsu_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmulhsu_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmulhsu_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vmulhsu_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmulhsu_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vmulhsu_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmulhsu_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmulhsu_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vmulhsu_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmulhsu_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vmulhsu_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmulhsu_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmulhsu_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vmulhsu_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmulhsu_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vmulhsu_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmulhsu_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmulhsu_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vmulhsu_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmulhsu_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vmulhsu_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmulhsu_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmulhsu_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vmulhsu_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmulhsu_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vmulhsu_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmulhsu_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmulhsu_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vmulhsu_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmulhsu_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vmulhsu_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmulhsu_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmulhsu_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vmulhsu_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmulhsu_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vmulhsu_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmulhsu_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmulhsu_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vmulhsu_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmulhsu_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vmulhsu_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmulhsu_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmulhsu_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vmulhsu_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmulhsu_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vmulhsu_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmulhsu_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmulhsu_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vmulhsu_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmulhsu_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vmulhsu_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmulhsu_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmulhsu_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vmulhsu_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmulhsu_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vmulhsu_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmulhsu_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmulhsu_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vmulhsu_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmulhsu_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vmulhsu_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmulhsu_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmulhsu_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vmulhsu_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmulhsu_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vmulhsu_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmulhsu_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmulhsu_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vmulhsu_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmulhsu_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vmulhsu_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmulhsu_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmulhsu_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vmulhsu_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmulhsu_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vmulhsu_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmulhsu_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmulhsu_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vmulhsu_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmulhsu_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vmulhsu_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmulhsu_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmulhsu_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vmulhsu_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmulhsu_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vmulhsu_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmulhsu_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmulhsu_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vmulhsu_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmulhsu_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vmulhsu_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmulhsu_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmulhsu_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vmulhsu_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmulhsu_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vmulhsu_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmulhsu_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmulhsu_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vmulhsu_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmulhsu_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vmulhsu_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmulhsu_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmulhsu_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vmulhsu_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmulhsu_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vmulhsu_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmulhsu_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmulhsu_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vmulhsu_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmulhsu_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vmulhsu_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmulhsu_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmulhsu_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vmulhsu_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmulhsu_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vmulhsu_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmulhsu_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmulhsu_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vmulhsu_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmulhsu_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vmulhsu_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmulhsu_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmulhsu_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vmulhsu_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmulhsu_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vmulhsu_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmulhsu_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmulhsu_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vmulhsu_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmulhsu_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vmulhsu_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmulhsu_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmulhsu_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vmulhsu_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmulhsu_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vmulhsu_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmulhsu_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmulhsu_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vmulhsu_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmulhsu_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vmulhsu_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmulhsu_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmulhsu_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vmulhsu_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmulhsu_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vmulhsu_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmulhsu_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmulhsu_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vmulhsu_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmulhsu_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vmulhsu_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmulhsu_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmulhsu_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vmulhsu_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmulhsu_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vmulhsu_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmulhsu_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmulhsu_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vmulhsu_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmulhsu_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vmulhsu_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmulhsu_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmulhsu_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vmulhsu_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmulhsu_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vmulhsu_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmulhsu_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmulhsu_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vmulhsu_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmulhsu_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vmulhsu_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmulhsu_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmulhsu_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vmulhsu_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmulhsu_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vmulhsu_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmulhsu_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmulhsu_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vmulhsu_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmulhsu_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vmulhsu_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmulhsu_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmulhsu_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vmulhsu_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmulhsu_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vmulhsu_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmulhsu_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmulhsu_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vmulhsu_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmulhsu_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vmulhsu_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmulhsu_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmulhsu_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vmulhsu_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmulhsu_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vmulhsu_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmulhsu_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmulhsu_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vmulhsu_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vmulhsu_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vmulhsu_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmulhsu_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmulhsu_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vmulhsu_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vmulhsu_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vmulhsu_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmulhsu_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmulhsu_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vmulhsu_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vmulhsu_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vmulhsu_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmulhsu_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmulhsu_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vmulhsu_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vmulhsu_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vmulhsu_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmulhsu_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmulhsu_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vmulhsu_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vmulhsu_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vmulhsu_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmulhsu_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmulhsu_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vmulhsu_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vmulhsu_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vmulhsu_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmulhsu_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmulhsu_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vmulhsu_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vmulhsu_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhsu_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vmulhsu_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmulhsu_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmulhsu_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vmulhsu_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vmulhsu_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vmulhsu_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmulhsu_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmulhsu_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vmulhsu_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vmulhsu_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vmulhsu_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmulhsu_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmulhsu_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vmulhsu_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vmulhsu_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vmulhsu_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmulhsu_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmulhsu_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vmulhsu_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vmulhsu_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vmulhsu_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmulhsu_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmulhsu_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vmulhsu_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vmulhsu_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vmulhsu_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmulhsu_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmulhsu_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vmulhsu_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vmulhsu_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhsu_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vmulhsu_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmulhsu_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmulhsu_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vmulhsu_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vmulhsu_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vmulhsu_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmulhsu_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmulhsu_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vmulhsu_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vmulhsu_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vmulhsu_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmulhsu_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmulhsu_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vmulhsu_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vmulhsu_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vmulhsu_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmulhsu_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmulhsu_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vmulhsu_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vmulhsu_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vmulhsu_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmulhsu_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmulhsu_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vmulhsu_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vmulhsu_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhsu_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vmulhsu_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmulhsu_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmulhsu_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vmulhsu_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vmulhsu_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vmulhsu_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmulhsu_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmulhsu_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vmulhsu_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vmulhsu_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vmulhsu_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmulhsu_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmulhsu_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vmulhsu_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vmulhsu_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vmulhsu_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmulhsu_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmulhsu_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vmulhsu_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhsu_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vmulhsu_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhsu_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmulhsu\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vmulhu.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vmulhu.c index b82932a12..c1456f8f3 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vmulhu.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vmulhu.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vmulhu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmulhu_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vmulhu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmulhu_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vmulhu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vmulhu_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vmulhu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmulhu_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vmulhu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmulhu_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vmulhu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vmulhu_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vmulhu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmulhu_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vmulhu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmulhu_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vmulhu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vmulhu_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vmulhu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmulhu_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vmulhu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmulhu_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vmulhu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vmulhu_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vmulhu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmulhu_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vmulhu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmulhu_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vmulhu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vmulhu_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vmulhu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmulhu_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vmulhu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmulhu_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vmulhu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vmulhu_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vmulhu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmulhu_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vmulhu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmulhu_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vmulhu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vmulhu_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vmulhu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmulhu_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vmulhu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmulhu_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vmulhu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vmulhu_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vmulhu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmulhu_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vmulhu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmulhu_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vmulhu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vmulhu_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vmulhu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmulhu_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vmulhu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmulhu_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vmulhu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vmulhu_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vmulhu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmulhu_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vmulhu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmulhu_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vmulhu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vmulhu_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vmulhu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmulhu_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vmulhu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmulhu_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vmulhu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vmulhu_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vmulhu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmulhu_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vmulhu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmulhu_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vmulhu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vmulhu_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vmulhu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmulhu_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vmulhu_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmulhu_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vmulhu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vmulhu_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vmulhu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmulhu_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vmulhu_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmulhu_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vmulhu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vmulhu_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vmulhu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmulhu_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vmulhu_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmulhu_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vmulhu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vmulhu_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vmulhu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmulhu_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vmulhu_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmulhu_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vmulhu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vmulhu_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vmulhu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmulhu_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vmulhu_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmulhu_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vmulhu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vmulhu_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vmulhu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmulhu_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vmulhu_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmulhu_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vmulhu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vmulhu_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vmulhu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmulhu_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vmulhu_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmulhu_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vmulhu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vmulhu_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vmulhu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmulhu_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vmulhu_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmulhu_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vmulhu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vmulhu_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vmulhu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmulhu_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vmulhu_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmulhu_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vmulhu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vmulhu_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vmulhu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmulhu_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmulhu_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vmulhu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmulhu_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vmulhu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmulhu_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmulhu_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vmulhu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmulhu_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vmulhu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmulhu_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmulhu_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vmulhu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmulhu_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vmulhu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmulhu_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmulhu_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vmulhu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmulhu_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vmulhu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmulhu_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmulhu_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vmulhu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmulhu_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vmulhu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmulhu_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmulhu_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vmulhu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmulhu_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vmulhu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmulhu_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmulhu_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vmulhu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmulhu_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vmulhu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmulhu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmulhu_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vmulhu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmulhu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vmulhu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmulhu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmulhu_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vmulhu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmulhu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vmulhu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmulhu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmulhu_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vmulhu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmulhu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vmulhu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmulhu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmulhu_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vmulhu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmulhu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vmulhu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmulhu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmulhu_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vmulhu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmulhu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vmulhu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmulhu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmulhu_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vmulhu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmulhu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vmulhu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmulhu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmulhu_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vmulhu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmulhu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vmulhu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmulhu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmulhu_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vmulhu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmulhu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vmulhu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmulhu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmulhu_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vmulhu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmulhu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vmulhu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmulhu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmulhu_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vmulhu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmulhu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vmulhu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmulhu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmulhu_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vmulhu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmulhu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vmulhu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmulhu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmulhu_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vmulhu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmulhu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vmulhu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmulhu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmulhu_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vmulhu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmulhu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vmulhu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmulhu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmulhu_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vmulhu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmulhu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vmulhu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmulhu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmulhu_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vmulhu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmulhu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vmulhu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmulhu_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmulhu_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vmulhu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmulhu_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vmulhu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmulhu_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmulhu_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vmulhu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmulhu_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vmulhu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmulhu_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmulhu_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vmulhu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmulhu_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vmulhu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmulhu_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmulhu_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vmulhu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmulhu_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vmulhu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmulhu_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmulhu_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vmulhu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmulhu_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vmulhu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmulhu_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmulhu_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vmulhu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmulhu_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vmulhu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmulhu_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmulhu_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vmulhu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmulhu_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vmulhu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmulhu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmulhu_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vmulhu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmulhu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vmulhu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmulhu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmulhu_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vmulhu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmulhu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vmulhu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmulhu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmulhu_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vmulhu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmulhu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vmulhu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmulhu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmulhu_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vmulhu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmulhu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vmulhu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmulhu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmulhu_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vmulhu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmulhu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vmulhu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmulhu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmulhu_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vmulhu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmulhu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vmulhu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmulhu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmulhu_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vmulhu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmulhu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vmulhu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmulhu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmulhu_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vmulhu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmulhu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vmulhu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmulhu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmulhu_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vmulhu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmulhu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vmulhu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmulhu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmulhu_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vmulhu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmulhu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vmulhu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmulhu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmulhu_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vmulhu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmulhu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vmulhu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmulhu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmulhu_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vmulhu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmulhu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vmulhu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmulhu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmulhu_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vmulhu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmulhu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vmulhu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmulhu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmulhu_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vmulhu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmulhu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vmulhu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmulhu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmulhu_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vmulhu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmulhu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vmulhu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmulhu_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmulhu_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vmulhu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vmulhu_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vmulhu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmulhu_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmulhu_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vmulhu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vmulhu_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vmulhu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmulhu_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmulhu_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vmulhu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vmulhu_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vmulhu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmulhu_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vmulhu_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vmulhu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vmulhu_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vmulhu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmulhu_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vmulhu_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vmulhu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vmulhu_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vmulhu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmulhu_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vmulhu_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vmulhu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vmulhu_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vmulhu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmulhu_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vmulhu_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vmulhu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vmulhu_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vmulhu_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vmulhu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmulhu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmulhu_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vmulhu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vmulhu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vmulhu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmulhu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmulhu_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vmulhu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vmulhu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vmulhu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmulhu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vmulhu_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vmulhu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vmulhu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vmulhu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmulhu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vmulhu_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vmulhu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vmulhu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vmulhu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmulhu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vmulhu_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vmulhu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vmulhu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vmulhu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmulhu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vmulhu_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vmulhu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vmulhu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vmulhu_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vmulhu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmulhu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmulhu_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vmulhu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vmulhu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vmulhu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmulhu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vmulhu_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vmulhu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vmulhu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vmulhu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmulhu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vmulhu_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vmulhu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vmulhu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vmulhu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmulhu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vmulhu_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vmulhu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vmulhu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vmulhu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmulhu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vmulhu_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vmulhu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vmulhu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vmulhu_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vmulhu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmulhu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vmulhu_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vmulhu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vmulhu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vmulhu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmulhu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vmulhu_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vmulhu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vmulhu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vmulhu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmulhu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vmulhu_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vmulhu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vmulhu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vmulhu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmulhu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vmulhu_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vmulhu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmulhu_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vmulhu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vmulhu_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vmulhu\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vmv.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vmv.c index a5509687c..df4d02738 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vmv.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vmv.c @@ -6,592 +6,592 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vmv_v_v_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vint8mf8_t test_vmv_v_v_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vint8mf8_t test_vmv_v_x_i8mf8_tu(vint8mf8_t maskedoff, int8_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vint8mf8_t test_vmv_v_x_i8mf8_tu(vint8mf8_t vd, int8_t rs1, size_t vl) { + return __riscv_vmv_v_tu(vd, rs1, vl); } -vint8mf4_t test_vmv_v_v_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vint8mf4_t test_vmv_v_v_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vint8mf4_t test_vmv_v_x_i8mf4_tu(vint8mf4_t maskedoff, int8_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vint8mf4_t test_vmv_v_x_i8mf4_tu(vint8mf4_t vd, int8_t rs1, size_t vl) { + return __riscv_vmv_v_tu(vd, rs1, vl); } -vint8mf2_t test_vmv_v_v_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vint8mf2_t test_vmv_v_v_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vint8mf2_t test_vmv_v_x_i8mf2_tu(vint8mf2_t maskedoff, int8_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vint8mf2_t test_vmv_v_x_i8mf2_tu(vint8mf2_t vd, int8_t rs1, size_t vl) { + return __riscv_vmv_v_tu(vd, rs1, vl); } -vint8m1_t test_vmv_v_v_i8m1_tu(vint8m1_t maskedoff, vint8m1_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vint8m1_t test_vmv_v_v_i8m1_tu(vint8m1_t vd, vint8m1_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vint8m1_t test_vmv_v_x_i8m1_tu(vint8m1_t maskedoff, int8_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vint8m1_t test_vmv_v_x_i8m1_tu(vint8m1_t vd, int8_t rs1, size_t vl) { + return __riscv_vmv_v_tu(vd, rs1, vl); } -vint8m2_t test_vmv_v_v_i8m2_tu(vint8m2_t maskedoff, vint8m2_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vint8m2_t test_vmv_v_v_i8m2_tu(vint8m2_t vd, vint8m2_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vint8m2_t test_vmv_v_x_i8m2_tu(vint8m2_t maskedoff, int8_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vint8m2_t test_vmv_v_x_i8m2_tu(vint8m2_t vd, int8_t rs1, size_t vl) { + return __riscv_vmv_v_tu(vd, rs1, vl); } -vint8m4_t test_vmv_v_v_i8m4_tu(vint8m4_t maskedoff, vint8m4_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vint8m4_t test_vmv_v_v_i8m4_tu(vint8m4_t vd, vint8m4_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vint8m4_t test_vmv_v_x_i8m4_tu(vint8m4_t maskedoff, int8_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vint8m4_t test_vmv_v_x_i8m4_tu(vint8m4_t vd, int8_t rs1, size_t vl) { + return __riscv_vmv_v_tu(vd, rs1, vl); } -vint8m8_t test_vmv_v_v_i8m8_tu(vint8m8_t maskedoff, vint8m8_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vint8m8_t test_vmv_v_v_i8m8_tu(vint8m8_t vd, vint8m8_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vint8m8_t test_vmv_v_x_i8m8_tu(vint8m8_t maskedoff, int8_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vint8m8_t test_vmv_v_x_i8m8_tu(vint8m8_t vd, int8_t rs1, size_t vl) { + return __riscv_vmv_v_tu(vd, rs1, vl); } -vint16mf4_t test_vmv_v_v_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vint16mf4_t test_vmv_v_v_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vint16mf4_t test_vmv_v_x_i16mf4_tu(vint16mf4_t maskedoff, int16_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vint16mf4_t test_vmv_v_x_i16mf4_tu(vint16mf4_t vd, int16_t rs1, size_t vl) { + return __riscv_vmv_v_tu(vd, rs1, vl); } -vint16mf2_t test_vmv_v_v_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vint16mf2_t test_vmv_v_v_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vint16mf2_t test_vmv_v_x_i16mf2_tu(vint16mf2_t maskedoff, int16_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vint16mf2_t test_vmv_v_x_i16mf2_tu(vint16mf2_t vd, int16_t rs1, size_t vl) { + return __riscv_vmv_v_tu(vd, rs1, vl); } -vint16m1_t test_vmv_v_v_i16m1_tu(vint16m1_t maskedoff, vint16m1_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vint16m1_t test_vmv_v_v_i16m1_tu(vint16m1_t vd, vint16m1_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vint16m1_t test_vmv_v_x_i16m1_tu(vint16m1_t maskedoff, int16_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vint16m1_t test_vmv_v_x_i16m1_tu(vint16m1_t vd, int16_t rs1, size_t vl) { + return __riscv_vmv_v_tu(vd, rs1, vl); } -vint16m2_t test_vmv_v_v_i16m2_tu(vint16m2_t maskedoff, vint16m2_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vint16m2_t test_vmv_v_v_i16m2_tu(vint16m2_t vd, vint16m2_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vint16m2_t test_vmv_v_x_i16m2_tu(vint16m2_t maskedoff, int16_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vint16m2_t test_vmv_v_x_i16m2_tu(vint16m2_t vd, int16_t rs1, size_t vl) { + return __riscv_vmv_v_tu(vd, rs1, vl); } -vint16m4_t test_vmv_v_v_i16m4_tu(vint16m4_t maskedoff, vint16m4_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vint16m4_t test_vmv_v_v_i16m4_tu(vint16m4_t vd, vint16m4_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vint16m4_t test_vmv_v_x_i16m4_tu(vint16m4_t maskedoff, int16_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vint16m4_t test_vmv_v_x_i16m4_tu(vint16m4_t vd, int16_t rs1, size_t vl) { + return __riscv_vmv_v_tu(vd, rs1, vl); } -vint16m8_t test_vmv_v_v_i16m8_tu(vint16m8_t maskedoff, vint16m8_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vint16m8_t test_vmv_v_v_i16m8_tu(vint16m8_t vd, vint16m8_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vint16m8_t test_vmv_v_x_i16m8_tu(vint16m8_t maskedoff, int16_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vint16m8_t test_vmv_v_x_i16m8_tu(vint16m8_t vd, int16_t rs1, size_t vl) { + return __riscv_vmv_v_tu(vd, rs1, vl); } -vint32mf2_t test_vmv_v_v_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vint32mf2_t test_vmv_v_v_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vint32mf2_t test_vmv_v_x_i32mf2_tu(vint32mf2_t maskedoff, int32_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vint32mf2_t test_vmv_v_x_i32mf2_tu(vint32mf2_t vd, int32_t rs1, size_t vl) { + return __riscv_vmv_v_tu(vd, rs1, vl); } -vint32m1_t test_vmv_v_v_i32m1_tu(vint32m1_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vint32m1_t test_vmv_v_v_i32m1_tu(vint32m1_t vd, vint32m1_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vint32m1_t test_vmv_v_x_i32m1_tu(vint32m1_t maskedoff, int32_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vint32m1_t test_vmv_v_x_i32m1_tu(vint32m1_t vd, int32_t rs1, size_t vl) { + return __riscv_vmv_v_tu(vd, rs1, vl); } -vint32m2_t test_vmv_v_v_i32m2_tu(vint32m2_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vint32m2_t test_vmv_v_v_i32m2_tu(vint32m2_t vd, vint32m2_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vint32m2_t test_vmv_v_x_i32m2_tu(vint32m2_t maskedoff, int32_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vint32m2_t test_vmv_v_x_i32m2_tu(vint32m2_t vd, int32_t rs1, size_t vl) { + return __riscv_vmv_v_tu(vd, rs1, vl); } -vint32m4_t test_vmv_v_v_i32m4_tu(vint32m4_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vint32m4_t test_vmv_v_v_i32m4_tu(vint32m4_t vd, vint32m4_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vint32m4_t test_vmv_v_x_i32m4_tu(vint32m4_t maskedoff, int32_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vint32m4_t test_vmv_v_x_i32m4_tu(vint32m4_t vd, int32_t rs1, size_t vl) { + return __riscv_vmv_v_tu(vd, rs1, vl); } -vint32m8_t test_vmv_v_v_i32m8_tu(vint32m8_t maskedoff, vint32m8_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vint32m8_t test_vmv_v_v_i32m8_tu(vint32m8_t vd, vint32m8_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vint32m8_t test_vmv_v_x_i32m8_tu(vint32m8_t maskedoff, int32_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vint32m8_t test_vmv_v_x_i32m8_tu(vint32m8_t vd, int32_t rs1, size_t vl) { + return __riscv_vmv_v_tu(vd, rs1, vl); } -vint64m1_t test_vmv_v_v_i64m1_tu(vint64m1_t maskedoff, vint64m1_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vint64m1_t test_vmv_v_v_i64m1_tu(vint64m1_t vd, vint64m1_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vint64m1_t test_vmv_v_x_i64m1_tu(vint64m1_t maskedoff, int64_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vint64m1_t test_vmv_v_x_i64m1_tu(vint64m1_t vd, int64_t rs1, size_t vl) { + return __riscv_vmv_v_tu(vd, rs1, vl); } -vint64m2_t test_vmv_v_v_i64m2_tu(vint64m2_t maskedoff, vint64m2_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vint64m2_t test_vmv_v_v_i64m2_tu(vint64m2_t vd, vint64m2_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vint64m2_t test_vmv_v_x_i64m2_tu(vint64m2_t maskedoff, int64_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vint64m2_t test_vmv_v_x_i64m2_tu(vint64m2_t vd, int64_t rs1, size_t vl) { + return __riscv_vmv_v_tu(vd, rs1, vl); } -vint64m4_t test_vmv_v_v_i64m4_tu(vint64m4_t maskedoff, vint64m4_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vint64m4_t test_vmv_v_v_i64m4_tu(vint64m4_t vd, vint64m4_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vint64m4_t test_vmv_v_x_i64m4_tu(vint64m4_t maskedoff, int64_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vint64m4_t test_vmv_v_x_i64m4_tu(vint64m4_t vd, int64_t rs1, size_t vl) { + return __riscv_vmv_v_tu(vd, rs1, vl); } -vint64m8_t test_vmv_v_v_i64m8_tu(vint64m8_t maskedoff, vint64m8_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vint64m8_t test_vmv_v_v_i64m8_tu(vint64m8_t vd, vint64m8_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vint64m8_t test_vmv_v_x_i64m8_tu(vint64m8_t maskedoff, int64_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vint64m8_t test_vmv_v_x_i64m8_tu(vint64m8_t vd, int64_t rs1, size_t vl) { + return __riscv_vmv_v_tu(vd, rs1, vl); } -vuint8mf8_t test_vmv_v_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vuint8mf8_t test_vmv_v_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vuint8mf8_t test_vmv_v_x_u8mf8_tu(vuint8mf8_t maskedoff, uint8_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vuint8mf8_t test_vmv_v_x_u8mf8_tu(vuint8mf8_t vd, uint8_t rs1, size_t vl) { + return __riscv_vmv_v_tu(vd, rs1, vl); } -vuint8mf4_t test_vmv_v_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vuint8mf4_t test_vmv_v_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vuint8mf4_t test_vmv_v_x_u8mf4_tu(vuint8mf4_t maskedoff, uint8_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vuint8mf4_t test_vmv_v_x_u8mf4_tu(vuint8mf4_t vd, uint8_t rs1, size_t vl) { + return __riscv_vmv_v_tu(vd, rs1, vl); } -vuint8mf2_t test_vmv_v_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vuint8mf2_t test_vmv_v_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vuint8mf2_t test_vmv_v_x_u8mf2_tu(vuint8mf2_t maskedoff, uint8_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vuint8mf2_t test_vmv_v_x_u8mf2_tu(vuint8mf2_t vd, uint8_t rs1, size_t vl) { + return __riscv_vmv_v_tu(vd, rs1, vl); } -vuint8m1_t test_vmv_v_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vuint8m1_t test_vmv_v_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vuint8m1_t test_vmv_v_x_u8m1_tu(vuint8m1_t maskedoff, uint8_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vuint8m1_t test_vmv_v_x_u8m1_tu(vuint8m1_t vd, uint8_t rs1, size_t vl) { + return __riscv_vmv_v_tu(vd, rs1, vl); } -vuint8m2_t test_vmv_v_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vuint8m2_t test_vmv_v_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vuint8m2_t test_vmv_v_x_u8m2_tu(vuint8m2_t maskedoff, uint8_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vuint8m2_t test_vmv_v_x_u8m2_tu(vuint8m2_t vd, uint8_t rs1, size_t vl) { + return __riscv_vmv_v_tu(vd, rs1, vl); } -vuint8m4_t test_vmv_v_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vuint8m4_t test_vmv_v_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vuint8m4_t test_vmv_v_x_u8m4_tu(vuint8m4_t maskedoff, uint8_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vuint8m4_t test_vmv_v_x_u8m4_tu(vuint8m4_t vd, uint8_t rs1, size_t vl) { + return __riscv_vmv_v_tu(vd, rs1, vl); } -vuint8m8_t test_vmv_v_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vuint8m8_t test_vmv_v_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vuint8m8_t test_vmv_v_x_u8m8_tu(vuint8m8_t maskedoff, uint8_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vuint8m8_t test_vmv_v_x_u8m8_tu(vuint8m8_t vd, uint8_t rs1, size_t vl) { + return __riscv_vmv_v_tu(vd, rs1, vl); } -vuint16mf4_t test_vmv_v_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vuint16mf4_t test_vmv_v_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vuint16mf4_t test_vmv_v_x_u16mf4_tu(vuint16mf4_t maskedoff, uint16_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vuint16mf4_t test_vmv_v_x_u16mf4_tu(vuint16mf4_t vd, uint16_t rs1, size_t vl) { + return __riscv_vmv_v_tu(vd, rs1, vl); } -vuint16mf2_t test_vmv_v_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vuint16mf2_t test_vmv_v_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vuint16mf2_t test_vmv_v_x_u16mf2_tu(vuint16mf2_t maskedoff, uint16_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vuint16mf2_t test_vmv_v_x_u16mf2_tu(vuint16mf2_t vd, uint16_t rs1, size_t vl) { + return __riscv_vmv_v_tu(vd, rs1, vl); } -vuint16m1_t test_vmv_v_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vuint16m1_t test_vmv_v_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vuint16m1_t test_vmv_v_x_u16m1_tu(vuint16m1_t maskedoff, uint16_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vuint16m1_t test_vmv_v_x_u16m1_tu(vuint16m1_t vd, uint16_t rs1, size_t vl) { + return __riscv_vmv_v_tu(vd, rs1, vl); } -vuint16m2_t test_vmv_v_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vuint16m2_t test_vmv_v_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vuint16m2_t test_vmv_v_x_u16m2_tu(vuint16m2_t maskedoff, uint16_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vuint16m2_t test_vmv_v_x_u16m2_tu(vuint16m2_t vd, uint16_t rs1, size_t vl) { + return __riscv_vmv_v_tu(vd, rs1, vl); } -vuint16m4_t test_vmv_v_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vuint16m4_t test_vmv_v_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vuint16m4_t test_vmv_v_x_u16m4_tu(vuint16m4_t maskedoff, uint16_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vuint16m4_t test_vmv_v_x_u16m4_tu(vuint16m4_t vd, uint16_t rs1, size_t vl) { + return __riscv_vmv_v_tu(vd, rs1, vl); } -vuint16m8_t test_vmv_v_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vuint16m8_t test_vmv_v_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vuint16m8_t test_vmv_v_x_u16m8_tu(vuint16m8_t maskedoff, uint16_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vuint16m8_t test_vmv_v_x_u16m8_tu(vuint16m8_t vd, uint16_t rs1, size_t vl) { + return __riscv_vmv_v_tu(vd, rs1, vl); } -vuint32mf2_t test_vmv_v_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vuint32mf2_t test_vmv_v_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vuint32mf2_t test_vmv_v_x_u32mf2_tu(vuint32mf2_t maskedoff, uint32_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vuint32mf2_t test_vmv_v_x_u32mf2_tu(vuint32mf2_t vd, uint32_t rs1, size_t vl) { + return __riscv_vmv_v_tu(vd, rs1, vl); } -vuint32m1_t test_vmv_v_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vuint32m1_t test_vmv_v_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vuint32m1_t test_vmv_v_x_u32m1_tu(vuint32m1_t maskedoff, uint32_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vuint32m1_t test_vmv_v_x_u32m1_tu(vuint32m1_t vd, uint32_t rs1, size_t vl) { + return __riscv_vmv_v_tu(vd, rs1, vl); } -vuint32m2_t test_vmv_v_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vuint32m2_t test_vmv_v_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vuint32m2_t test_vmv_v_x_u32m2_tu(vuint32m2_t maskedoff, uint32_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vuint32m2_t test_vmv_v_x_u32m2_tu(vuint32m2_t vd, uint32_t rs1, size_t vl) { + return __riscv_vmv_v_tu(vd, rs1, vl); } -vuint32m4_t test_vmv_v_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vuint32m4_t test_vmv_v_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vuint32m4_t test_vmv_v_x_u32m4_tu(vuint32m4_t maskedoff, uint32_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vuint32m4_t test_vmv_v_x_u32m4_tu(vuint32m4_t vd, uint32_t rs1, size_t vl) { + return __riscv_vmv_v_tu(vd, rs1, vl); } -vuint32m8_t test_vmv_v_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vuint32m8_t test_vmv_v_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vuint32m8_t test_vmv_v_x_u32m8_tu(vuint32m8_t maskedoff, uint32_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vuint32m8_t test_vmv_v_x_u32m8_tu(vuint32m8_t vd, uint32_t rs1, size_t vl) { + return __riscv_vmv_v_tu(vd, rs1, vl); } -vuint64m1_t test_vmv_v_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vuint64m1_t test_vmv_v_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vuint64m1_t test_vmv_v_x_u64m1_tu(vuint64m1_t maskedoff, uint64_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vuint64m1_t test_vmv_v_x_u64m1_tu(vuint64m1_t vd, uint64_t rs1, size_t vl) { + return __riscv_vmv_v_tu(vd, rs1, vl); } -vuint64m2_t test_vmv_v_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vuint64m2_t test_vmv_v_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vuint64m2_t test_vmv_v_x_u64m2_tu(vuint64m2_t maskedoff, uint64_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vuint64m2_t test_vmv_v_x_u64m2_tu(vuint64m2_t vd, uint64_t rs1, size_t vl) { + return __riscv_vmv_v_tu(vd, rs1, vl); } -vuint64m4_t test_vmv_v_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vuint64m4_t test_vmv_v_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vuint64m4_t test_vmv_v_x_u64m4_tu(vuint64m4_t maskedoff, uint64_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vuint64m4_t test_vmv_v_x_u64m4_tu(vuint64m4_t vd, uint64_t rs1, size_t vl) { + return __riscv_vmv_v_tu(vd, rs1, vl); } -vuint64m8_t test_vmv_v_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vuint64m8_t test_vmv_v_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vuint64m8_t test_vmv_v_x_u64m8_tu(vuint64m8_t maskedoff, uint64_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vuint64m8_t test_vmv_v_x_u64m8_tu(vuint64m8_t vd, uint64_t rs1, size_t vl) { + return __riscv_vmv_v_tu(vd, rs1, vl); } -vfloat16mf4_t test_vmv_v_v_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vfloat16mf4_t test_vmv_v_v_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vfloat16mf2_t test_vmv_v_v_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vfloat16mf2_t test_vmv_v_v_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vfloat16m1_t test_vmv_v_v_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vfloat16m1_t test_vmv_v_v_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vfloat16m2_t test_vmv_v_v_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vfloat16m2_t test_vmv_v_v_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vfloat16m4_t test_vmv_v_v_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vfloat16m4_t test_vmv_v_v_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vfloat16m8_t test_vmv_v_v_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vfloat16m8_t test_vmv_v_v_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vfloat32mf2_t test_vmv_v_v_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vfloat32mf2_t test_vmv_v_v_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vfloat32m1_t test_vmv_v_v_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vfloat32m1_t test_vmv_v_v_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vfloat32m2_t test_vmv_v_v_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vfloat32m2_t test_vmv_v_v_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vfloat32m4_t test_vmv_v_v_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vfloat32m4_t test_vmv_v_v_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vfloat32m8_t test_vmv_v_v_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vfloat32m8_t test_vmv_v_v_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vfloat64m1_t test_vmv_v_v_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vfloat64m1_t test_vmv_v_v_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vfloat64m2_t test_vmv_v_v_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vfloat64m2_t test_vmv_v_v_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vfloat64m4_t test_vmv_v_v_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vfloat64m4_t test_vmv_v_v_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vfloat64m8_t test_vmv_v_v_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vmv_v_tu(maskedoff, src, vl); +vfloat64m8_t test_vmv_v_v_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); } -vint8mf8_t test_vmv_s_x_i8mf8_tu(vint8mf8_t maskedoff, int8_t src, size_t vl) { - return __riscv_vmv_s_tu(maskedoff, src, vl); +vint8mf8_t test_vmv_s_x_i8mf8_tu(vint8mf8_t vd, int8_t rs1, size_t vl) { + return __riscv_vmv_s_tu(vd, rs1, vl); } -vint8mf4_t test_vmv_s_x_i8mf4_tu(vint8mf4_t maskedoff, int8_t src, size_t vl) { - return __riscv_vmv_s_tu(maskedoff, src, vl); +vint8mf4_t test_vmv_s_x_i8mf4_tu(vint8mf4_t vd, int8_t rs1, size_t vl) { + return __riscv_vmv_s_tu(vd, rs1, vl); } -vint8mf2_t test_vmv_s_x_i8mf2_tu(vint8mf2_t maskedoff, int8_t src, size_t vl) { - return __riscv_vmv_s_tu(maskedoff, src, vl); +vint8mf2_t test_vmv_s_x_i8mf2_tu(vint8mf2_t vd, int8_t rs1, size_t vl) { + return __riscv_vmv_s_tu(vd, rs1, vl); } -vint8m1_t test_vmv_s_x_i8m1_tu(vint8m1_t maskedoff, int8_t src, size_t vl) { - return __riscv_vmv_s_tu(maskedoff, src, vl); +vint8m1_t test_vmv_s_x_i8m1_tu(vint8m1_t vd, int8_t rs1, size_t vl) { + return __riscv_vmv_s_tu(vd, rs1, vl); } -vint8m2_t test_vmv_s_x_i8m2_tu(vint8m2_t maskedoff, int8_t src, size_t vl) { - return __riscv_vmv_s_tu(maskedoff, src, vl); +vint8m2_t test_vmv_s_x_i8m2_tu(vint8m2_t vd, int8_t rs1, size_t vl) { + return __riscv_vmv_s_tu(vd, rs1, vl); } -vint8m4_t test_vmv_s_x_i8m4_tu(vint8m4_t maskedoff, int8_t src, size_t vl) { - return __riscv_vmv_s_tu(maskedoff, src, vl); +vint8m4_t test_vmv_s_x_i8m4_tu(vint8m4_t vd, int8_t rs1, size_t vl) { + return __riscv_vmv_s_tu(vd, rs1, vl); } -vint8m8_t test_vmv_s_x_i8m8_tu(vint8m8_t maskedoff, int8_t src, size_t vl) { - return __riscv_vmv_s_tu(maskedoff, src, vl); +vint8m8_t test_vmv_s_x_i8m8_tu(vint8m8_t vd, int8_t rs1, size_t vl) { + return __riscv_vmv_s_tu(vd, rs1, vl); } -vint16mf4_t test_vmv_s_x_i16mf4_tu(vint16mf4_t maskedoff, int16_t src, size_t vl) { - return __riscv_vmv_s_tu(maskedoff, src, vl); +vint16mf4_t test_vmv_s_x_i16mf4_tu(vint16mf4_t vd, int16_t rs1, size_t vl) { + return __riscv_vmv_s_tu(vd, rs1, vl); } -vint16mf2_t test_vmv_s_x_i16mf2_tu(vint16mf2_t maskedoff, int16_t src, size_t vl) { - return __riscv_vmv_s_tu(maskedoff, src, vl); +vint16mf2_t test_vmv_s_x_i16mf2_tu(vint16mf2_t vd, int16_t rs1, size_t vl) { + return __riscv_vmv_s_tu(vd, rs1, vl); } -vint16m1_t test_vmv_s_x_i16m1_tu(vint16m1_t maskedoff, int16_t src, size_t vl) { - return __riscv_vmv_s_tu(maskedoff, src, vl); +vint16m1_t test_vmv_s_x_i16m1_tu(vint16m1_t vd, int16_t rs1, size_t vl) { + return __riscv_vmv_s_tu(vd, rs1, vl); } -vint16m2_t test_vmv_s_x_i16m2_tu(vint16m2_t maskedoff, int16_t src, size_t vl) { - return __riscv_vmv_s_tu(maskedoff, src, vl); +vint16m2_t test_vmv_s_x_i16m2_tu(vint16m2_t vd, int16_t rs1, size_t vl) { + return __riscv_vmv_s_tu(vd, rs1, vl); } -vint16m4_t test_vmv_s_x_i16m4_tu(vint16m4_t maskedoff, int16_t src, size_t vl) { - return __riscv_vmv_s_tu(maskedoff, src, vl); +vint16m4_t test_vmv_s_x_i16m4_tu(vint16m4_t vd, int16_t rs1, size_t vl) { + return __riscv_vmv_s_tu(vd, rs1, vl); } -vint16m8_t test_vmv_s_x_i16m8_tu(vint16m8_t maskedoff, int16_t src, size_t vl) { - return __riscv_vmv_s_tu(maskedoff, src, vl); +vint16m8_t test_vmv_s_x_i16m8_tu(vint16m8_t vd, int16_t rs1, size_t vl) { + return __riscv_vmv_s_tu(vd, rs1, vl); } -vint32mf2_t test_vmv_s_x_i32mf2_tu(vint32mf2_t maskedoff, int32_t src, size_t vl) { - return __riscv_vmv_s_tu(maskedoff, src, vl); +vint32mf2_t test_vmv_s_x_i32mf2_tu(vint32mf2_t vd, int32_t rs1, size_t vl) { + return __riscv_vmv_s_tu(vd, rs1, vl); } -vint32m1_t test_vmv_s_x_i32m1_tu(vint32m1_t maskedoff, int32_t src, size_t vl) { - return __riscv_vmv_s_tu(maskedoff, src, vl); +vint32m1_t test_vmv_s_x_i32m1_tu(vint32m1_t vd, int32_t rs1, size_t vl) { + return __riscv_vmv_s_tu(vd, rs1, vl); } -vint32m2_t test_vmv_s_x_i32m2_tu(vint32m2_t maskedoff, int32_t src, size_t vl) { - return __riscv_vmv_s_tu(maskedoff, src, vl); +vint32m2_t test_vmv_s_x_i32m2_tu(vint32m2_t vd, int32_t rs1, size_t vl) { + return __riscv_vmv_s_tu(vd, rs1, vl); } -vint32m4_t test_vmv_s_x_i32m4_tu(vint32m4_t maskedoff, int32_t src, size_t vl) { - return __riscv_vmv_s_tu(maskedoff, src, vl); +vint32m4_t test_vmv_s_x_i32m4_tu(vint32m4_t vd, int32_t rs1, size_t vl) { + return __riscv_vmv_s_tu(vd, rs1, vl); } -vint32m8_t test_vmv_s_x_i32m8_tu(vint32m8_t maskedoff, int32_t src, size_t vl) { - return __riscv_vmv_s_tu(maskedoff, src, vl); +vint32m8_t test_vmv_s_x_i32m8_tu(vint32m8_t vd, int32_t rs1, size_t vl) { + return __riscv_vmv_s_tu(vd, rs1, vl); } -vint64m1_t test_vmv_s_x_i64m1_tu(vint64m1_t maskedoff, int64_t src, size_t vl) { - return __riscv_vmv_s_tu(maskedoff, src, vl); +vint64m1_t test_vmv_s_x_i64m1_tu(vint64m1_t vd, int64_t rs1, size_t vl) { + return __riscv_vmv_s_tu(vd, rs1, vl); } -vint64m2_t test_vmv_s_x_i64m2_tu(vint64m2_t maskedoff, int64_t src, size_t vl) { - return __riscv_vmv_s_tu(maskedoff, src, vl); +vint64m2_t test_vmv_s_x_i64m2_tu(vint64m2_t vd, int64_t rs1, size_t vl) { + return __riscv_vmv_s_tu(vd, rs1, vl); } -vint64m4_t test_vmv_s_x_i64m4_tu(vint64m4_t maskedoff, int64_t src, size_t vl) { - return __riscv_vmv_s_tu(maskedoff, src, vl); +vint64m4_t test_vmv_s_x_i64m4_tu(vint64m4_t vd, int64_t rs1, size_t vl) { + return __riscv_vmv_s_tu(vd, rs1, vl); } -vint64m8_t test_vmv_s_x_i64m8_tu(vint64m8_t maskedoff, int64_t src, size_t vl) { - return __riscv_vmv_s_tu(maskedoff, src, vl); +vint64m8_t test_vmv_s_x_i64m8_tu(vint64m8_t vd, int64_t rs1, size_t vl) { + return __riscv_vmv_s_tu(vd, rs1, vl); } -vuint8mf8_t test_vmv_s_x_u8mf8_tu(vuint8mf8_t maskedoff, uint8_t src, size_t vl) { - return __riscv_vmv_s_tu(maskedoff, src, vl); +vuint8mf8_t test_vmv_s_x_u8mf8_tu(vuint8mf8_t vd, uint8_t rs1, size_t vl) { + return __riscv_vmv_s_tu(vd, rs1, vl); } -vuint8mf4_t test_vmv_s_x_u8mf4_tu(vuint8mf4_t maskedoff, uint8_t src, size_t vl) { - return __riscv_vmv_s_tu(maskedoff, src, vl); +vuint8mf4_t test_vmv_s_x_u8mf4_tu(vuint8mf4_t vd, uint8_t rs1, size_t vl) { + return __riscv_vmv_s_tu(vd, rs1, vl); } -vuint8mf2_t test_vmv_s_x_u8mf2_tu(vuint8mf2_t maskedoff, uint8_t src, size_t vl) { - return __riscv_vmv_s_tu(maskedoff, src, vl); +vuint8mf2_t test_vmv_s_x_u8mf2_tu(vuint8mf2_t vd, uint8_t rs1, size_t vl) { + return __riscv_vmv_s_tu(vd, rs1, vl); } -vuint8m1_t test_vmv_s_x_u8m1_tu(vuint8m1_t maskedoff, uint8_t src, size_t vl) { - return __riscv_vmv_s_tu(maskedoff, src, vl); +vuint8m1_t test_vmv_s_x_u8m1_tu(vuint8m1_t vd, uint8_t rs1, size_t vl) { + return __riscv_vmv_s_tu(vd, rs1, vl); } -vuint8m2_t test_vmv_s_x_u8m2_tu(vuint8m2_t maskedoff, uint8_t src, size_t vl) { - return __riscv_vmv_s_tu(maskedoff, src, vl); +vuint8m2_t test_vmv_s_x_u8m2_tu(vuint8m2_t vd, uint8_t rs1, size_t vl) { + return __riscv_vmv_s_tu(vd, rs1, vl); } -vuint8m4_t test_vmv_s_x_u8m4_tu(vuint8m4_t maskedoff, uint8_t src, size_t vl) { - return __riscv_vmv_s_tu(maskedoff, src, vl); +vuint8m4_t test_vmv_s_x_u8m4_tu(vuint8m4_t vd, uint8_t rs1, size_t vl) { + return __riscv_vmv_s_tu(vd, rs1, vl); } -vuint8m8_t test_vmv_s_x_u8m8_tu(vuint8m8_t maskedoff, uint8_t src, size_t vl) { - return __riscv_vmv_s_tu(maskedoff, src, vl); +vuint8m8_t test_vmv_s_x_u8m8_tu(vuint8m8_t vd, uint8_t rs1, size_t vl) { + return __riscv_vmv_s_tu(vd, rs1, vl); } -vuint16mf4_t test_vmv_s_x_u16mf4_tu(vuint16mf4_t maskedoff, uint16_t src, size_t vl) { - return __riscv_vmv_s_tu(maskedoff, src, vl); +vuint16mf4_t test_vmv_s_x_u16mf4_tu(vuint16mf4_t vd, uint16_t rs1, size_t vl) { + return __riscv_vmv_s_tu(vd, rs1, vl); } -vuint16mf2_t test_vmv_s_x_u16mf2_tu(vuint16mf2_t maskedoff, uint16_t src, size_t vl) { - return __riscv_vmv_s_tu(maskedoff, src, vl); +vuint16mf2_t test_vmv_s_x_u16mf2_tu(vuint16mf2_t vd, uint16_t rs1, size_t vl) { + return __riscv_vmv_s_tu(vd, rs1, vl); } -vuint16m1_t test_vmv_s_x_u16m1_tu(vuint16m1_t maskedoff, uint16_t src, size_t vl) { - return __riscv_vmv_s_tu(maskedoff, src, vl); +vuint16m1_t test_vmv_s_x_u16m1_tu(vuint16m1_t vd, uint16_t rs1, size_t vl) { + return __riscv_vmv_s_tu(vd, rs1, vl); } -vuint16m2_t test_vmv_s_x_u16m2_tu(vuint16m2_t maskedoff, uint16_t src, size_t vl) { - return __riscv_vmv_s_tu(maskedoff, src, vl); +vuint16m2_t test_vmv_s_x_u16m2_tu(vuint16m2_t vd, uint16_t rs1, size_t vl) { + return __riscv_vmv_s_tu(vd, rs1, vl); } -vuint16m4_t test_vmv_s_x_u16m4_tu(vuint16m4_t maskedoff, uint16_t src, size_t vl) { - return __riscv_vmv_s_tu(maskedoff, src, vl); +vuint16m4_t test_vmv_s_x_u16m4_tu(vuint16m4_t vd, uint16_t rs1, size_t vl) { + return __riscv_vmv_s_tu(vd, rs1, vl); } -vuint16m8_t test_vmv_s_x_u16m8_tu(vuint16m8_t maskedoff, uint16_t src, size_t vl) { - return __riscv_vmv_s_tu(maskedoff, src, vl); +vuint16m8_t test_vmv_s_x_u16m8_tu(vuint16m8_t vd, uint16_t rs1, size_t vl) { + return __riscv_vmv_s_tu(vd, rs1, vl); } -vuint32mf2_t test_vmv_s_x_u32mf2_tu(vuint32mf2_t maskedoff, uint32_t src, size_t vl) { - return __riscv_vmv_s_tu(maskedoff, src, vl); +vuint32mf2_t test_vmv_s_x_u32mf2_tu(vuint32mf2_t vd, uint32_t rs1, size_t vl) { + return __riscv_vmv_s_tu(vd, rs1, vl); } -vuint32m1_t test_vmv_s_x_u32m1_tu(vuint32m1_t maskedoff, uint32_t src, size_t vl) { - return __riscv_vmv_s_tu(maskedoff, src, vl); +vuint32m1_t test_vmv_s_x_u32m1_tu(vuint32m1_t vd, uint32_t rs1, size_t vl) { + return __riscv_vmv_s_tu(vd, rs1, vl); } -vuint32m2_t test_vmv_s_x_u32m2_tu(vuint32m2_t maskedoff, uint32_t src, size_t vl) { - return __riscv_vmv_s_tu(maskedoff, src, vl); +vuint32m2_t test_vmv_s_x_u32m2_tu(vuint32m2_t vd, uint32_t rs1, size_t vl) { + return __riscv_vmv_s_tu(vd, rs1, vl); } -vuint32m4_t test_vmv_s_x_u32m4_tu(vuint32m4_t maskedoff, uint32_t src, size_t vl) { - return __riscv_vmv_s_tu(maskedoff, src, vl); +vuint32m4_t test_vmv_s_x_u32m4_tu(vuint32m4_t vd, uint32_t rs1, size_t vl) { + return __riscv_vmv_s_tu(vd, rs1, vl); } -vuint32m8_t test_vmv_s_x_u32m8_tu(vuint32m8_t maskedoff, uint32_t src, size_t vl) { - return __riscv_vmv_s_tu(maskedoff, src, vl); +vuint32m8_t test_vmv_s_x_u32m8_tu(vuint32m8_t vd, uint32_t rs1, size_t vl) { + return __riscv_vmv_s_tu(vd, rs1, vl); } -vuint64m1_t test_vmv_s_x_u64m1_tu(vuint64m1_t maskedoff, uint64_t src, size_t vl) { - return __riscv_vmv_s_tu(maskedoff, src, vl); +vuint64m1_t test_vmv_s_x_u64m1_tu(vuint64m1_t vd, uint64_t rs1, size_t vl) { + return __riscv_vmv_s_tu(vd, rs1, vl); } -vuint64m2_t test_vmv_s_x_u64m2_tu(vuint64m2_t maskedoff, uint64_t src, size_t vl) { - return __riscv_vmv_s_tu(maskedoff, src, vl); +vuint64m2_t test_vmv_s_x_u64m2_tu(vuint64m2_t vd, uint64_t rs1, size_t vl) { + return __riscv_vmv_s_tu(vd, rs1, vl); } -vuint64m4_t test_vmv_s_x_u64m4_tu(vuint64m4_t maskedoff, uint64_t src, size_t vl) { - return __riscv_vmv_s_tu(maskedoff, src, vl); +vuint64m4_t test_vmv_s_x_u64m4_tu(vuint64m4_t vd, uint64_t rs1, size_t vl) { + return __riscv_vmv_s_tu(vd, rs1, vl); } -vuint64m8_t test_vmv_s_x_u64m8_tu(vuint64m8_t maskedoff, uint64_t src, size_t vl) { - return __riscv_vmv_s_tu(maskedoff, src, vl); +vuint64m8_t test_vmv_s_x_u64m8_tu(vuint64m8_t vd, uint64_t rs1, size_t vl) { + return __riscv_vmv_s_tu(vd, rs1, vl); } /* { dg-final { scan-assembler-times {v[ml][s]*[ve][0-9]*\.[ivxfswum.]+\s+} 201 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vnclip.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vnclip.c index 4eec784eb..2fa5f3de8 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vnclip.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vnclip.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vnclip_wv_i8mf8_tu(vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vnclip_wv_i8mf8_tu(vint8mf8_t vd, vint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnclip_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vnclip_wx_i8mf8_tu(vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vnclip_wx_i8mf8_tu(vint8mf8_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vnclip_wv_i8mf4_tu(vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vnclip_wv_i8mf4_tu(vint8mf4_t vd, vint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnclip_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vnclip_wx_i8mf4_tu(vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vnclip_wx_i8mf4_tu(vint8mf4_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vnclip_wv_i8mf2_tu(vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vnclip_wv_i8mf2_tu(vint8mf2_t vd, vint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnclip_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vnclip_wx_i8mf2_tu(vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vnclip_wx_i8mf2_tu(vint8mf2_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vnclip_wv_i8m1_tu(vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vnclip_wv_i8m1_tu(vint8m1_t vd, vint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnclip_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vnclip_wx_i8m1_tu(vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vnclip_wx_i8m1_tu(vint8m1_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vnclip_wv_i8m2_tu(vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vnclip_wv_i8m2_tu(vint8m2_t vd, vint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnclip_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vnclip_wx_i8m2_tu(vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vnclip_wx_i8m2_tu(vint8m2_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vnclip_wv_i8m4_tu(vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vnclip_wv_i8m4_tu(vint8m4_t vd, vint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnclip_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vnclip_wx_i8m4_tu(vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vnclip_wx_i8m4_tu(vint8m4_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vnclip_wv_i16mf4_tu(vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vnclip_wv_i16mf4_tu(vint16mf4_t vd, vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnclip_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vnclip_wx_i16mf4_tu(vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vnclip_wx_i16mf4_tu(vint16mf4_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vnclip_wv_i16mf2_tu(vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vnclip_wv_i16mf2_tu(vint16mf2_t vd, vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnclip_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vnclip_wx_i16mf2_tu(vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vnclip_wx_i16mf2_tu(vint16mf2_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vnclip_wv_i16m1_tu(vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vnclip_wv_i16m1_tu(vint16m1_t vd, vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnclip_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vnclip_wx_i16m1_tu(vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vnclip_wx_i16m1_tu(vint16m1_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vnclip_wv_i16m2_tu(vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vnclip_wv_i16m2_tu(vint16m2_t vd, vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnclip_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vnclip_wx_i16m2_tu(vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vnclip_wx_i16m2_tu(vint16m2_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vnclip_wv_i16m4_tu(vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vnclip_wv_i16m4_tu(vint16m4_t vd, vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnclip_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vnclip_wx_i16m4_tu(vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vnclip_wx_i16m4_tu(vint16m4_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vnclip_wv_i32mf2_tu(vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vnclip_wv_i32mf2_tu(vint32mf2_t vd, vint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnclip_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vnclip_wx_i32mf2_tu(vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vnclip_wx_i32mf2_tu(vint32mf2_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vnclip_wv_i32m1_tu(vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vnclip_wv_i32m1_tu(vint32m1_t vd, vint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnclip_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vnclip_wx_i32m1_tu(vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vnclip_wx_i32m1_tu(vint32m1_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vnclip_wv_i32m2_tu(vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vnclip_wv_i32m2_tu(vint32m2_t vd, vint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnclip_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vnclip_wx_i32m2_tu(vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vnclip_wx_i32m2_tu(vint32m2_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vnclip_wv_i32m4_tu(vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vnclip_wv_i32m4_tu(vint32m4_t vd, vint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnclip_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vnclip_wx_i32m4_tu(vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vnclip_wx_i32m4_tu(vint32m4_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vnclip_wv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vnclip_wv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnclip_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vnclip_wx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vnclip_wx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vnclip_wv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vnclip_wv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnclip_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vnclip_wx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vnclip_wx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vnclip_wv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vnclip_wv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnclip_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vnclip_wx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vnclip_wx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vnclip_wv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vnclip_wv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnclip_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vnclip_wx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vnclip_wx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vnclip_wv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vnclip_wv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnclip_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vnclip_wx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vnclip_wx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vnclip_wv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vnclip_wv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnclip_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vnclip_wx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vnclip_wx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vnclip_wv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vnclip_wv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnclip_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vnclip_wx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vnclip_wx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vnclip_wv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vnclip_wv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnclip_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vnclip_wx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vnclip_wx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vnclip_wv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vnclip_wv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnclip_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vnclip_wx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vnclip_wx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vnclip_wv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vnclip_wv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnclip_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vnclip_wx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vnclip_wx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vnclip_wv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vnclip_wv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnclip_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vnclip_wx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vnclip_wx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vnclip_wv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vnclip_wv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnclip_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vnclip_wx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vnclip_wx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vnclip_wv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vnclip_wv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnclip_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vnclip_wx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vnclip_wx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vnclip_wv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vnclip_wv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnclip_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vnclip_wx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vnclip_wx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vnclip_wv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vnclip_wv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnclip_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vnclip_wx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vnclip_wx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vnclip_wv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vnclip_wv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnclip_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vnclip_wx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vnclip_wx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vnclip_wv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vnclip_wv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnclip_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vnclip_wx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vnclip_wx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vnclip_wv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vnclip_wv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnclip_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vnclip_wx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vnclip_wx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vnclip_wv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vnclip_wv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnclip_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vnclip_wx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vnclip_wx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vnclip_wv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vnclip_wv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnclip_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vnclip_wx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vnclip_wx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vnclip_wv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vnclip_wv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnclip_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vnclip_wx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vnclip_wx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vnclip_wv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vnclip_wv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnclip_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vnclip_wx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vnclip_wx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vnclip_wv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vnclip_wv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnclip_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vnclip_wx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vnclip_wx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vnclip_wv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vnclip_wv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnclip_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vnclip_wx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vnclip_wx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vnclip_wv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vnclip_wv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnclip_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vnclip_wx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vnclip_wx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vnclip_wv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vnclip_wv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnclip_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vnclip_wx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vnclip_wx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vnclip_wv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vnclip_wv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnclip_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vnclip_wx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vnclip_wx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vnclip_wv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vnclip_wv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnclip_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vnclip_wx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vnclip_wx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vnclip_wv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vnclip_wv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnclip_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vnclip_wx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vnclip_wx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vnclip_wv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vnclip_wv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnclip_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vnclip_wx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vnclip_wx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vnclip_wv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vnclip_wv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnclip_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vnclip_wx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vnclip_wx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vnclip_wv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vnclip_wv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnclip_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vnclip_wx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vnclip_wx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vnclip_wv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vnclip_wv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnclip_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vnclip_wx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vnclip_wx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vnclip_wv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vnclip_wv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnclip_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vnclip_wx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vnclip_wx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vnclip_wv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vnclip_wv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnclip_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vnclip_wx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vnclip_wx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vnclip_wv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vnclip_wv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnclip_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vnclip_wx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vnclip_wx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vnclip_wv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vnclip_wv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnclip_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vnclip_wx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vnclip_wx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vnclip_wv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vnclip_wv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnclip_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vnclip_wx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vnclip_wx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vnclip_wv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vnclip_wv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnclip_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vnclip_wx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vnclip_wx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vnclip_wv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vnclip_wv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnclip_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vnclip_wx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vnclip_wx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vnclip_wv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vnclip_wv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnclip_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vnclip_wx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vnclip_wx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vnclip_wv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vnclip_wv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnclip_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vnclip_wx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vnclip_wx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vnclip_wv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vnclip_wv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnclip_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vnclip_wx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vnclip_wx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vnclip_wv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vnclip_wv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnclip_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vnclip_wx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vnclip_wx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vnclip_wv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vnclip_wv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnclip_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vnclip_wx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclip_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vnclip_wx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclip_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vnclip\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vnclipu.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vnclipu.c index 758b5f371..20a5f38db 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vnclipu.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vnclipu.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vnclipu_wv_u8mf8_tu(vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vnclipu_wv_u8mf8_tu(vuint8mf8_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnclipu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vnclipu_wx_u8mf8_tu(vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vnclipu_wx_u8mf8_tu(vuint8mf8_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vnclipu_wv_u8mf4_tu(vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vnclipu_wv_u8mf4_tu(vuint8mf4_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnclipu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vnclipu_wx_u8mf4_tu(vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vnclipu_wx_u8mf4_tu(vuint8mf4_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vnclipu_wv_u8mf2_tu(vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vnclipu_wv_u8mf2_tu(vuint8mf2_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnclipu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vnclipu_wx_u8mf2_tu(vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vnclipu_wx_u8mf2_tu(vuint8mf2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vnclipu_wv_u8m1_tu(vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vnclipu_wv_u8m1_tu(vuint8m1_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnclipu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vnclipu_wx_u8m1_tu(vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vnclipu_wx_u8m1_tu(vuint8m1_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vnclipu_wv_u8m2_tu(vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vnclipu_wv_u8m2_tu(vuint8m2_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnclipu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vnclipu_wx_u8m2_tu(vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vnclipu_wx_u8m2_tu(vuint8m2_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vnclipu_wv_u8m4_tu(vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vnclipu_wv_u8m4_tu(vuint8m4_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnclipu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vnclipu_wx_u8m4_tu(vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vnclipu_wx_u8m4_tu(vuint8m4_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vnclipu_wv_u16mf4_tu(vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vnclipu_wv_u16mf4_tu(vuint16mf4_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnclipu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vnclipu_wx_u16mf4_tu(vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vnclipu_wx_u16mf4_tu(vuint16mf4_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vnclipu_wv_u16mf2_tu(vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vnclipu_wv_u16mf2_tu(vuint16mf2_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnclipu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vnclipu_wx_u16mf2_tu(vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vnclipu_wx_u16mf2_tu(vuint16mf2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vnclipu_wv_u16m1_tu(vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vnclipu_wv_u16m1_tu(vuint16m1_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnclipu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vnclipu_wx_u16m1_tu(vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vnclipu_wx_u16m1_tu(vuint16m1_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vnclipu_wv_u16m2_tu(vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vnclipu_wv_u16m2_tu(vuint16m2_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnclipu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vnclipu_wx_u16m2_tu(vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vnclipu_wx_u16m2_tu(vuint16m2_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vnclipu_wv_u16m4_tu(vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vnclipu_wv_u16m4_tu(vuint16m4_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnclipu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vnclipu_wx_u16m4_tu(vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vnclipu_wx_u16m4_tu(vuint16m4_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vnclipu_wv_u32mf2_tu(vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vnclipu_wv_u32mf2_tu(vuint32mf2_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnclipu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vnclipu_wx_u32mf2_tu(vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vnclipu_wx_u32mf2_tu(vuint32mf2_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vnclipu_wv_u32m1_tu(vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vnclipu_wv_u32m1_tu(vuint32m1_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnclipu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vnclipu_wx_u32m1_tu(vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vnclipu_wx_u32m1_tu(vuint32m1_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vnclipu_wv_u32m2_tu(vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vnclipu_wv_u32m2_tu(vuint32m2_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnclipu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vnclipu_wx_u32m2_tu(vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vnclipu_wx_u32m2_tu(vuint32m2_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vnclipu_wv_u32m4_tu(vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vnclipu_wv_u32m4_tu(vuint32m4_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnclipu_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vnclipu_wx_u32m4_tu(vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vnclipu_wx_u32m4_tu(vuint32m4_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vnclipu_wv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vnclipu_wv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnclipu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vnclipu_wx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vnclipu_wx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vnclipu_wv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vnclipu_wv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnclipu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vnclipu_wx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vnclipu_wx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vnclipu_wv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vnclipu_wv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnclipu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vnclipu_wx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vnclipu_wx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vnclipu_wv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vnclipu_wv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnclipu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vnclipu_wx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vnclipu_wx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vnclipu_wv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vnclipu_wv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnclipu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vnclipu_wx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vnclipu_wx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vnclipu_wv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vnclipu_wv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnclipu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vnclipu_wx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vnclipu_wx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vnclipu_wv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vnclipu_wv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnclipu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vnclipu_wx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vnclipu_wx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vnclipu_wv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vnclipu_wv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnclipu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vnclipu_wx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vnclipu_wx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vnclipu_wv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vnclipu_wv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnclipu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vnclipu_wx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vnclipu_wx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vnclipu_wv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vnclipu_wv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnclipu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vnclipu_wx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vnclipu_wx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vnclipu_wv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vnclipu_wv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnclipu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vnclipu_wx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vnclipu_wx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vnclipu_wv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vnclipu_wv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnclipu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vnclipu_wx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vnclipu_wx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vnclipu_wv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vnclipu_wv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnclipu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vnclipu_wx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vnclipu_wx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vnclipu_wv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vnclipu_wv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnclipu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vnclipu_wx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vnclipu_wx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vnclipu_wv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vnclipu_wv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnclipu_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vnclipu_wx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vnclipu_wx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vnclipu_wv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vnclipu_wv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnclipu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vnclipu_wx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vnclipu_wx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vnclipu_wv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vnclipu_wv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnclipu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vnclipu_wx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vnclipu_wx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vnclipu_wv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vnclipu_wv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnclipu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vnclipu_wx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vnclipu_wx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vnclipu_wv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vnclipu_wv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnclipu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vnclipu_wx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vnclipu_wx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vnclipu_wv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vnclipu_wv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnclipu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vnclipu_wx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vnclipu_wx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vnclipu_wv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vnclipu_wv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnclipu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vnclipu_wx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vnclipu_wx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vnclipu_wv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vnclipu_wv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnclipu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vnclipu_wx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vnclipu_wx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vnclipu_wv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vnclipu_wv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnclipu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vnclipu_wx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vnclipu_wx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vnclipu_wv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vnclipu_wv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnclipu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vnclipu_wx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vnclipu_wx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vnclipu_wv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vnclipu_wv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnclipu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vnclipu_wx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vnclipu_wx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vnclipu_wv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vnclipu_wv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnclipu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vnclipu_wx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vnclipu_wx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vnclipu_wv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vnclipu_wv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnclipu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vnclipu_wx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vnclipu_wx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vnclipu_wv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vnclipu_wv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnclipu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vnclipu_wx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vnclipu_wx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vnclipu_wv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vnclipu_wv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnclipu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vnclipu_wx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vnclipu_wx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vnclipu_wv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vnclipu_wv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnclipu_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vnclipu_wx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vnclipu_wx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vnclipu_wv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vnclipu_wv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnclipu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vnclipu_wx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vnclipu_wx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vnclipu_wv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vnclipu_wv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnclipu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vnclipu_wx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vnclipu_wx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vnclipu_wv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vnclipu_wv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnclipu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vnclipu_wx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vnclipu_wx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vnclipu_wv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vnclipu_wv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnclipu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vnclipu_wx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vnclipu_wx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vnclipu_wv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vnclipu_wv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnclipu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vnclipu_wx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vnclipu_wx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vnclipu_wv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vnclipu_wv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnclipu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vnclipu_wx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vnclipu_wx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vnclipu_wv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vnclipu_wv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnclipu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vnclipu_wx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vnclipu_wx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vnclipu_wv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vnclipu_wv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnclipu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vnclipu_wx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vnclipu_wx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vnclipu_wv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vnclipu_wv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnclipu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vnclipu_wx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vnclipu_wx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vnclipu_wv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vnclipu_wv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnclipu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vnclipu_wx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vnclipu_wx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vnclipu_wv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vnclipu_wv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnclipu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vnclipu_wx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vnclipu_wx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vnclipu_wv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vnclipu_wv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnclipu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vnclipu_wx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vnclipu_wx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vnclipu_wv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vnclipu_wv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnclipu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vnclipu_wx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vnclipu_wx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vnclipu_wv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vnclipu_wv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnclipu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vnclipu_wx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vnclipu_wx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vnclipu_wv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vnclipu_wv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnclipu_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vnclipu_wx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnclipu_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vnclipu_wx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnclipu_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vnclipu\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vncvt.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vncvt.c index 19f560a06..b38621b48 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vncvt.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vncvt.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vncvt_x_x_w_i8mf8_tu(vint8mf8_t maskedoff, vint16mf4_t src, size_t vl) { - return __riscv_vncvt_x_tu(maskedoff, src, vl); +vint8mf8_t test_vncvt_x_x_w_i8mf8_tu(vint8mf8_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vncvt_x_tu(vd, vs2, vl); } -vint8mf4_t test_vncvt_x_x_w_i8mf4_tu(vint8mf4_t maskedoff, vint16mf2_t src, size_t vl) { - return __riscv_vncvt_x_tu(maskedoff, src, vl); +vint8mf4_t test_vncvt_x_x_w_i8mf4_tu(vint8mf4_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vncvt_x_tu(vd, vs2, vl); } -vint8mf2_t test_vncvt_x_x_w_i8mf2_tu(vint8mf2_t maskedoff, vint16m1_t src, size_t vl) { - return __riscv_vncvt_x_tu(maskedoff, src, vl); +vint8mf2_t test_vncvt_x_x_w_i8mf2_tu(vint8mf2_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vncvt_x_tu(vd, vs2, vl); } -vint8m1_t test_vncvt_x_x_w_i8m1_tu(vint8m1_t maskedoff, vint16m2_t src, size_t vl) { - return __riscv_vncvt_x_tu(maskedoff, src, vl); +vint8m1_t test_vncvt_x_x_w_i8m1_tu(vint8m1_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vncvt_x_tu(vd, vs2, vl); } -vint8m2_t test_vncvt_x_x_w_i8m2_tu(vint8m2_t maskedoff, vint16m4_t src, size_t vl) { - return __riscv_vncvt_x_tu(maskedoff, src, vl); +vint8m2_t test_vncvt_x_x_w_i8m2_tu(vint8m2_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vncvt_x_tu(vd, vs2, vl); } -vint8m4_t test_vncvt_x_x_w_i8m4_tu(vint8m4_t maskedoff, vint16m8_t src, size_t vl) { - return __riscv_vncvt_x_tu(maskedoff, src, vl); +vint8m4_t test_vncvt_x_x_w_i8m4_tu(vint8m4_t vd, vint16m8_t vs2, size_t vl) { + return __riscv_vncvt_x_tu(vd, vs2, vl); } -vuint8mf8_t test_vncvt_x_x_w_u8mf8_tu(vuint8mf8_t maskedoff, vuint16mf4_t src, size_t vl) { - return __riscv_vncvt_x_tu(maskedoff, src, vl); +vuint8mf8_t test_vncvt_x_x_w_u8mf8_tu(vuint8mf8_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vncvt_x_tu(vd, vs2, vl); } -vuint8mf4_t test_vncvt_x_x_w_u8mf4_tu(vuint8mf4_t maskedoff, vuint16mf2_t src, size_t vl) { - return __riscv_vncvt_x_tu(maskedoff, src, vl); +vuint8mf4_t test_vncvt_x_x_w_u8mf4_tu(vuint8mf4_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vncvt_x_tu(vd, vs2, vl); } -vuint8mf2_t test_vncvt_x_x_w_u8mf2_tu(vuint8mf2_t maskedoff, vuint16m1_t src, size_t vl) { - return __riscv_vncvt_x_tu(maskedoff, src, vl); +vuint8mf2_t test_vncvt_x_x_w_u8mf2_tu(vuint8mf2_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vncvt_x_tu(vd, vs2, vl); } -vuint8m1_t test_vncvt_x_x_w_u8m1_tu(vuint8m1_t maskedoff, vuint16m2_t src, size_t vl) { - return __riscv_vncvt_x_tu(maskedoff, src, vl); +vuint8m1_t test_vncvt_x_x_w_u8m1_tu(vuint8m1_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vncvt_x_tu(vd, vs2, vl); } -vuint8m2_t test_vncvt_x_x_w_u8m2_tu(vuint8m2_t maskedoff, vuint16m4_t src, size_t vl) { - return __riscv_vncvt_x_tu(maskedoff, src, vl); +vuint8m2_t test_vncvt_x_x_w_u8m2_tu(vuint8m2_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vncvt_x_tu(vd, vs2, vl); } -vuint8m4_t test_vncvt_x_x_w_u8m4_tu(vuint8m4_t maskedoff, vuint16m8_t src, size_t vl) { - return __riscv_vncvt_x_tu(maskedoff, src, vl); +vuint8m4_t test_vncvt_x_x_w_u8m4_tu(vuint8m4_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vncvt_x_tu(vd, vs2, vl); } -vint16mf4_t test_vncvt_x_x_w_i16mf4_tu(vint16mf4_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vncvt_x_tu(maskedoff, src, vl); +vint16mf4_t test_vncvt_x_x_w_i16mf4_tu(vint16mf4_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vncvt_x_tu(vd, vs2, vl); } -vint16mf2_t test_vncvt_x_x_w_i16mf2_tu(vint16mf2_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vncvt_x_tu(maskedoff, src, vl); +vint16mf2_t test_vncvt_x_x_w_i16mf2_tu(vint16mf2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vncvt_x_tu(vd, vs2, vl); } -vint16m1_t test_vncvt_x_x_w_i16m1_tu(vint16m1_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vncvt_x_tu(maskedoff, src, vl); +vint16m1_t test_vncvt_x_x_w_i16m1_tu(vint16m1_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vncvt_x_tu(vd, vs2, vl); } -vint16m2_t test_vncvt_x_x_w_i16m2_tu(vint16m2_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vncvt_x_tu(maskedoff, src, vl); +vint16m2_t test_vncvt_x_x_w_i16m2_tu(vint16m2_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vncvt_x_tu(vd, vs2, vl); } -vint16m4_t test_vncvt_x_x_w_i16m4_tu(vint16m4_t maskedoff, vint32m8_t src, size_t vl) { - return __riscv_vncvt_x_tu(maskedoff, src, vl); +vint16m4_t test_vncvt_x_x_w_i16m4_tu(vint16m4_t vd, vint32m8_t vs2, size_t vl) { + return __riscv_vncvt_x_tu(vd, vs2, vl); } -vuint16mf4_t test_vncvt_x_x_w_u16mf4_tu(vuint16mf4_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vncvt_x_tu(maskedoff, src, vl); +vuint16mf4_t test_vncvt_x_x_w_u16mf4_tu(vuint16mf4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vncvt_x_tu(vd, vs2, vl); } -vuint16mf2_t test_vncvt_x_x_w_u16mf2_tu(vuint16mf2_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vncvt_x_tu(maskedoff, src, vl); +vuint16mf2_t test_vncvt_x_x_w_u16mf2_tu(vuint16mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vncvt_x_tu(vd, vs2, vl); } -vuint16m1_t test_vncvt_x_x_w_u16m1_tu(vuint16m1_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vncvt_x_tu(maskedoff, src, vl); +vuint16m1_t test_vncvt_x_x_w_u16m1_tu(vuint16m1_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vncvt_x_tu(vd, vs2, vl); } -vuint16m2_t test_vncvt_x_x_w_u16m2_tu(vuint16m2_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vncvt_x_tu(maskedoff, src, vl); +vuint16m2_t test_vncvt_x_x_w_u16m2_tu(vuint16m2_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vncvt_x_tu(vd, vs2, vl); } -vuint16m4_t test_vncvt_x_x_w_u16m4_tu(vuint16m4_t maskedoff, vuint32m8_t src, size_t vl) { - return __riscv_vncvt_x_tu(maskedoff, src, vl); +vuint16m4_t test_vncvt_x_x_w_u16m4_tu(vuint16m4_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vncvt_x_tu(vd, vs2, vl); } -vint32mf2_t test_vncvt_x_x_w_i32mf2_tu(vint32mf2_t maskedoff, vint64m1_t src, size_t vl) { - return __riscv_vncvt_x_tu(maskedoff, src, vl); +vint32mf2_t test_vncvt_x_x_w_i32mf2_tu(vint32mf2_t vd, vint64m1_t vs2, size_t vl) { + return __riscv_vncvt_x_tu(vd, vs2, vl); } -vint32m1_t test_vncvt_x_x_w_i32m1_tu(vint32m1_t maskedoff, vint64m2_t src, size_t vl) { - return __riscv_vncvt_x_tu(maskedoff, src, vl); +vint32m1_t test_vncvt_x_x_w_i32m1_tu(vint32m1_t vd, vint64m2_t vs2, size_t vl) { + return __riscv_vncvt_x_tu(vd, vs2, vl); } -vint32m2_t test_vncvt_x_x_w_i32m2_tu(vint32m2_t maskedoff, vint64m4_t src, size_t vl) { - return __riscv_vncvt_x_tu(maskedoff, src, vl); +vint32m2_t test_vncvt_x_x_w_i32m2_tu(vint32m2_t vd, vint64m4_t vs2, size_t vl) { + return __riscv_vncvt_x_tu(vd, vs2, vl); } -vint32m4_t test_vncvt_x_x_w_i32m4_tu(vint32m4_t maskedoff, vint64m8_t src, size_t vl) { - return __riscv_vncvt_x_tu(maskedoff, src, vl); +vint32m4_t test_vncvt_x_x_w_i32m4_tu(vint32m4_t vd, vint64m8_t vs2, size_t vl) { + return __riscv_vncvt_x_tu(vd, vs2, vl); } -vuint32mf2_t test_vncvt_x_x_w_u32mf2_tu(vuint32mf2_t maskedoff, vuint64m1_t src, size_t vl) { - return __riscv_vncvt_x_tu(maskedoff, src, vl); +vuint32mf2_t test_vncvt_x_x_w_u32mf2_tu(vuint32mf2_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vncvt_x_tu(vd, vs2, vl); } -vuint32m1_t test_vncvt_x_x_w_u32m1_tu(vuint32m1_t maskedoff, vuint64m2_t src, size_t vl) { - return __riscv_vncvt_x_tu(maskedoff, src, vl); +vuint32m1_t test_vncvt_x_x_w_u32m1_tu(vuint32m1_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vncvt_x_tu(vd, vs2, vl); } -vuint32m2_t test_vncvt_x_x_w_u32m2_tu(vuint32m2_t maskedoff, vuint64m4_t src, size_t vl) { - return __riscv_vncvt_x_tu(maskedoff, src, vl); +vuint32m2_t test_vncvt_x_x_w_u32m2_tu(vuint32m2_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vncvt_x_tu(vd, vs2, vl); } -vuint32m4_t test_vncvt_x_x_w_u32m4_tu(vuint32m4_t maskedoff, vuint64m8_t src, size_t vl) { - return __riscv_vncvt_x_tu(maskedoff, src, vl); +vuint32m4_t test_vncvt_x_x_w_u32m4_tu(vuint32m4_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vncvt_x_tu(vd, vs2, vl); } -vint8mf8_t test_vncvt_x_x_w_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t src, size_t vl) { - return __riscv_vncvt_x_tum(mask, maskedoff, src, vl); +vint8mf8_t test_vncvt_x_x_w_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vncvt_x_tum(vm, vd, vs2, vl); } -vint8mf4_t test_vncvt_x_x_w_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t src, size_t vl) { - return __riscv_vncvt_x_tum(mask, maskedoff, src, vl); +vint8mf4_t test_vncvt_x_x_w_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vncvt_x_tum(vm, vd, vs2, vl); } -vint8mf2_t test_vncvt_x_x_w_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t src, size_t vl) { - return __riscv_vncvt_x_tum(mask, maskedoff, src, vl); +vint8mf2_t test_vncvt_x_x_w_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vncvt_x_tum(vm, vd, vs2, vl); } -vint8m1_t test_vncvt_x_x_w_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t src, size_t vl) { - return __riscv_vncvt_x_tum(mask, maskedoff, src, vl); +vint8m1_t test_vncvt_x_x_w_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vncvt_x_tum(vm, vd, vs2, vl); } -vint8m2_t test_vncvt_x_x_w_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t src, size_t vl) { - return __riscv_vncvt_x_tum(mask, maskedoff, src, vl); +vint8m2_t test_vncvt_x_x_w_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vncvt_x_tum(vm, vd, vs2, vl); } -vint8m4_t test_vncvt_x_x_w_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t src, size_t vl) { - return __riscv_vncvt_x_tum(mask, maskedoff, src, vl); +vint8m4_t test_vncvt_x_x_w_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, size_t vl) { + return __riscv_vncvt_x_tum(vm, vd, vs2, vl); } -vuint8mf8_t test_vncvt_x_x_w_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t src, size_t vl) { - return __riscv_vncvt_x_tum(mask, maskedoff, src, vl); +vuint8mf8_t test_vncvt_x_x_w_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vncvt_x_tum(vm, vd, vs2, vl); } -vuint8mf4_t test_vncvt_x_x_w_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t src, size_t vl) { - return __riscv_vncvt_x_tum(mask, maskedoff, src, vl); +vuint8mf4_t test_vncvt_x_x_w_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vncvt_x_tum(vm, vd, vs2, vl); } -vuint8mf2_t test_vncvt_x_x_w_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t src, size_t vl) { - return __riscv_vncvt_x_tum(mask, maskedoff, src, vl); +vuint8mf2_t test_vncvt_x_x_w_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vncvt_x_tum(vm, vd, vs2, vl); } -vuint8m1_t test_vncvt_x_x_w_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t src, size_t vl) { - return __riscv_vncvt_x_tum(mask, maskedoff, src, vl); +vuint8m1_t test_vncvt_x_x_w_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vncvt_x_tum(vm, vd, vs2, vl); } -vuint8m2_t test_vncvt_x_x_w_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t src, size_t vl) { - return __riscv_vncvt_x_tum(mask, maskedoff, src, vl); +vuint8m2_t test_vncvt_x_x_w_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vncvt_x_tum(vm, vd, vs2, vl); } -vuint8m4_t test_vncvt_x_x_w_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t src, size_t vl) { - return __riscv_vncvt_x_tum(mask, maskedoff, src, vl); +vuint8m4_t test_vncvt_x_x_w_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vncvt_x_tum(vm, vd, vs2, vl); } -vint16mf4_t test_vncvt_x_x_w_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vncvt_x_tum(mask, maskedoff, src, vl); +vint16mf4_t test_vncvt_x_x_w_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vncvt_x_tum(vm, vd, vs2, vl); } -vint16mf2_t test_vncvt_x_x_w_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vncvt_x_tum(mask, maskedoff, src, vl); +vint16mf2_t test_vncvt_x_x_w_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vncvt_x_tum(vm, vd, vs2, vl); } -vint16m1_t test_vncvt_x_x_w_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vncvt_x_tum(mask, maskedoff, src, vl); +vint16m1_t test_vncvt_x_x_w_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vncvt_x_tum(vm, vd, vs2, vl); } -vint16m2_t test_vncvt_x_x_w_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vncvt_x_tum(mask, maskedoff, src, vl); +vint16m2_t test_vncvt_x_x_w_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vncvt_x_tum(vm, vd, vs2, vl); } -vint16m4_t test_vncvt_x_x_w_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t src, size_t vl) { - return __riscv_vncvt_x_tum(mask, maskedoff, src, vl); +vint16m4_t test_vncvt_x_x_w_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, size_t vl) { + return __riscv_vncvt_x_tum(vm, vd, vs2, vl); } -vuint16mf4_t test_vncvt_x_x_w_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vncvt_x_tum(mask, maskedoff, src, vl); +vuint16mf4_t test_vncvt_x_x_w_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vncvt_x_tum(vm, vd, vs2, vl); } -vuint16mf2_t test_vncvt_x_x_w_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vncvt_x_tum(mask, maskedoff, src, vl); +vuint16mf2_t test_vncvt_x_x_w_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vncvt_x_tum(vm, vd, vs2, vl); } -vuint16m1_t test_vncvt_x_x_w_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vncvt_x_tum(mask, maskedoff, src, vl); +vuint16m1_t test_vncvt_x_x_w_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vncvt_x_tum(vm, vd, vs2, vl); } -vuint16m2_t test_vncvt_x_x_w_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vncvt_x_tum(mask, maskedoff, src, vl); +vuint16m2_t test_vncvt_x_x_w_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vncvt_x_tum(vm, vd, vs2, vl); } -vuint16m4_t test_vncvt_x_x_w_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t src, size_t vl) { - return __riscv_vncvt_x_tum(mask, maskedoff, src, vl); +vuint16m4_t test_vncvt_x_x_w_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vncvt_x_tum(vm, vd, vs2, vl); } -vint32mf2_t test_vncvt_x_x_w_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t src, size_t vl) { - return __riscv_vncvt_x_tum(mask, maskedoff, src, vl); +vint32mf2_t test_vncvt_x_x_w_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint64m1_t vs2, size_t vl) { + return __riscv_vncvt_x_tum(vm, vd, vs2, vl); } -vint32m1_t test_vncvt_x_x_w_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t src, size_t vl) { - return __riscv_vncvt_x_tum(mask, maskedoff, src, vl); +vint32m1_t test_vncvt_x_x_w_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, size_t vl) { + return __riscv_vncvt_x_tum(vm, vd, vs2, vl); } -vint32m2_t test_vncvt_x_x_w_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t src, size_t vl) { - return __riscv_vncvt_x_tum(mask, maskedoff, src, vl); +vint32m2_t test_vncvt_x_x_w_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, size_t vl) { + return __riscv_vncvt_x_tum(vm, vd, vs2, vl); } -vint32m4_t test_vncvt_x_x_w_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t src, size_t vl) { - return __riscv_vncvt_x_tum(mask, maskedoff, src, vl); +vint32m4_t test_vncvt_x_x_w_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, size_t vl) { + return __riscv_vncvt_x_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_vncvt_x_x_w_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t src, size_t vl) { - return __riscv_vncvt_x_tum(mask, maskedoff, src, vl); +vuint32mf2_t test_vncvt_x_x_w_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vncvt_x_tum(vm, vd, vs2, vl); } -vuint32m1_t test_vncvt_x_x_w_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t src, size_t vl) { - return __riscv_vncvt_x_tum(mask, maskedoff, src, vl); +vuint32m1_t test_vncvt_x_x_w_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vncvt_x_tum(vm, vd, vs2, vl); } -vuint32m2_t test_vncvt_x_x_w_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t src, size_t vl) { - return __riscv_vncvt_x_tum(mask, maskedoff, src, vl); +vuint32m2_t test_vncvt_x_x_w_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vncvt_x_tum(vm, vd, vs2, vl); } -vuint32m4_t test_vncvt_x_x_w_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t src, size_t vl) { - return __riscv_vncvt_x_tum(mask, maskedoff, src, vl); +vuint32m4_t test_vncvt_x_x_w_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vncvt_x_tum(vm, vd, vs2, vl); } -vint8mf8_t test_vncvt_x_x_w_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t src, size_t vl) { - return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl); +vint8mf8_t test_vncvt_x_x_w_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vncvt_x_tumu(vm, vd, vs2, vl); } -vint8mf4_t test_vncvt_x_x_w_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t src, size_t vl) { - return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl); +vint8mf4_t test_vncvt_x_x_w_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vncvt_x_tumu(vm, vd, vs2, vl); } -vint8mf2_t test_vncvt_x_x_w_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t src, size_t vl) { - return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl); +vint8mf2_t test_vncvt_x_x_w_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vncvt_x_tumu(vm, vd, vs2, vl); } -vint8m1_t test_vncvt_x_x_w_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t src, size_t vl) { - return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl); +vint8m1_t test_vncvt_x_x_w_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vncvt_x_tumu(vm, vd, vs2, vl); } -vint8m2_t test_vncvt_x_x_w_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t src, size_t vl) { - return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl); +vint8m2_t test_vncvt_x_x_w_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vncvt_x_tumu(vm, vd, vs2, vl); } -vint8m4_t test_vncvt_x_x_w_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t src, size_t vl) { - return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl); +vint8m4_t test_vncvt_x_x_w_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, size_t vl) { + return __riscv_vncvt_x_tumu(vm, vd, vs2, vl); } -vuint8mf8_t test_vncvt_x_x_w_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t src, size_t vl) { - return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl); +vuint8mf8_t test_vncvt_x_x_w_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vncvt_x_tumu(vm, vd, vs2, vl); } -vuint8mf4_t test_vncvt_x_x_w_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t src, size_t vl) { - return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl); +vuint8mf4_t test_vncvt_x_x_w_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vncvt_x_tumu(vm, vd, vs2, vl); } -vuint8mf2_t test_vncvt_x_x_w_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t src, size_t vl) { - return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl); +vuint8mf2_t test_vncvt_x_x_w_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vncvt_x_tumu(vm, vd, vs2, vl); } -vuint8m1_t test_vncvt_x_x_w_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t src, size_t vl) { - return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl); +vuint8m1_t test_vncvt_x_x_w_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vncvt_x_tumu(vm, vd, vs2, vl); } -vuint8m2_t test_vncvt_x_x_w_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t src, size_t vl) { - return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl); +vuint8m2_t test_vncvt_x_x_w_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vncvt_x_tumu(vm, vd, vs2, vl); } -vuint8m4_t test_vncvt_x_x_w_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t src, size_t vl) { - return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl); +vuint8m4_t test_vncvt_x_x_w_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vncvt_x_tumu(vm, vd, vs2, vl); } -vint16mf4_t test_vncvt_x_x_w_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl); +vint16mf4_t test_vncvt_x_x_w_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vncvt_x_tumu(vm, vd, vs2, vl); } -vint16mf2_t test_vncvt_x_x_w_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl); +vint16mf2_t test_vncvt_x_x_w_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vncvt_x_tumu(vm, vd, vs2, vl); } -vint16m1_t test_vncvt_x_x_w_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl); +vint16m1_t test_vncvt_x_x_w_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vncvt_x_tumu(vm, vd, vs2, vl); } -vint16m2_t test_vncvt_x_x_w_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl); +vint16m2_t test_vncvt_x_x_w_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vncvt_x_tumu(vm, vd, vs2, vl); } -vint16m4_t test_vncvt_x_x_w_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t src, size_t vl) { - return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl); +vint16m4_t test_vncvt_x_x_w_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, size_t vl) { + return __riscv_vncvt_x_tumu(vm, vd, vs2, vl); } -vuint16mf4_t test_vncvt_x_x_w_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl); +vuint16mf4_t test_vncvt_x_x_w_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vncvt_x_tumu(vm, vd, vs2, vl); } -vuint16mf2_t test_vncvt_x_x_w_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl); +vuint16mf2_t test_vncvt_x_x_w_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vncvt_x_tumu(vm, vd, vs2, vl); } -vuint16m1_t test_vncvt_x_x_w_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl); +vuint16m1_t test_vncvt_x_x_w_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vncvt_x_tumu(vm, vd, vs2, vl); } -vuint16m2_t test_vncvt_x_x_w_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl); +vuint16m2_t test_vncvt_x_x_w_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vncvt_x_tumu(vm, vd, vs2, vl); } -vuint16m4_t test_vncvt_x_x_w_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t src, size_t vl) { - return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl); +vuint16m4_t test_vncvt_x_x_w_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vncvt_x_tumu(vm, vd, vs2, vl); } -vint32mf2_t test_vncvt_x_x_w_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t src, size_t vl) { - return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl); +vint32mf2_t test_vncvt_x_x_w_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint64m1_t vs2, size_t vl) { + return __riscv_vncvt_x_tumu(vm, vd, vs2, vl); } -vint32m1_t test_vncvt_x_x_w_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t src, size_t vl) { - return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl); +vint32m1_t test_vncvt_x_x_w_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, size_t vl) { + return __riscv_vncvt_x_tumu(vm, vd, vs2, vl); } -vint32m2_t test_vncvt_x_x_w_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t src, size_t vl) { - return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl); +vint32m2_t test_vncvt_x_x_w_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, size_t vl) { + return __riscv_vncvt_x_tumu(vm, vd, vs2, vl); } -vint32m4_t test_vncvt_x_x_w_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t src, size_t vl) { - return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl); +vint32m4_t test_vncvt_x_x_w_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, size_t vl) { + return __riscv_vncvt_x_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_vncvt_x_x_w_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t src, size_t vl) { - return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl); +vuint32mf2_t test_vncvt_x_x_w_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vncvt_x_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_vncvt_x_x_w_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t src, size_t vl) { - return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl); +vuint32m1_t test_vncvt_x_x_w_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vncvt_x_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_vncvt_x_x_w_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t src, size_t vl) { - return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl); +vuint32m2_t test_vncvt_x_x_w_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vncvt_x_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_vncvt_x_x_w_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t src, size_t vl) { - return __riscv_vncvt_x_tumu(mask, maskedoff, src, vl); +vuint32m4_t test_vncvt_x_x_w_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vncvt_x_tumu(vm, vd, vs2, vl); } -vint8mf8_t test_vncvt_x_x_w_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t src, size_t vl) { - return __riscv_vncvt_x_mu(mask, maskedoff, src, vl); +vint8mf8_t test_vncvt_x_x_w_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vncvt_x_mu(vm, vd, vs2, vl); } -vint8mf4_t test_vncvt_x_x_w_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t src, size_t vl) { - return __riscv_vncvt_x_mu(mask, maskedoff, src, vl); +vint8mf4_t test_vncvt_x_x_w_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vncvt_x_mu(vm, vd, vs2, vl); } -vint8mf2_t test_vncvt_x_x_w_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t src, size_t vl) { - return __riscv_vncvt_x_mu(mask, maskedoff, src, vl); +vint8mf2_t test_vncvt_x_x_w_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vncvt_x_mu(vm, vd, vs2, vl); } -vint8m1_t test_vncvt_x_x_w_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t src, size_t vl) { - return __riscv_vncvt_x_mu(mask, maskedoff, src, vl); +vint8m1_t test_vncvt_x_x_w_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vncvt_x_mu(vm, vd, vs2, vl); } -vint8m2_t test_vncvt_x_x_w_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t src, size_t vl) { - return __riscv_vncvt_x_mu(mask, maskedoff, src, vl); +vint8m2_t test_vncvt_x_x_w_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vncvt_x_mu(vm, vd, vs2, vl); } -vint8m4_t test_vncvt_x_x_w_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t src, size_t vl) { - return __riscv_vncvt_x_mu(mask, maskedoff, src, vl); +vint8m4_t test_vncvt_x_x_w_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, size_t vl) { + return __riscv_vncvt_x_mu(vm, vd, vs2, vl); } -vuint8mf8_t test_vncvt_x_x_w_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t src, size_t vl) { - return __riscv_vncvt_x_mu(mask, maskedoff, src, vl); +vuint8mf8_t test_vncvt_x_x_w_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vncvt_x_mu(vm, vd, vs2, vl); } -vuint8mf4_t test_vncvt_x_x_w_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t src, size_t vl) { - return __riscv_vncvt_x_mu(mask, maskedoff, src, vl); +vuint8mf4_t test_vncvt_x_x_w_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vncvt_x_mu(vm, vd, vs2, vl); } -vuint8mf2_t test_vncvt_x_x_w_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t src, size_t vl) { - return __riscv_vncvt_x_mu(mask, maskedoff, src, vl); +vuint8mf2_t test_vncvt_x_x_w_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vncvt_x_mu(vm, vd, vs2, vl); } -vuint8m1_t test_vncvt_x_x_w_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t src, size_t vl) { - return __riscv_vncvt_x_mu(mask, maskedoff, src, vl); +vuint8m1_t test_vncvt_x_x_w_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vncvt_x_mu(vm, vd, vs2, vl); } -vuint8m2_t test_vncvt_x_x_w_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t src, size_t vl) { - return __riscv_vncvt_x_mu(mask, maskedoff, src, vl); +vuint8m2_t test_vncvt_x_x_w_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vncvt_x_mu(vm, vd, vs2, vl); } -vuint8m4_t test_vncvt_x_x_w_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t src, size_t vl) { - return __riscv_vncvt_x_mu(mask, maskedoff, src, vl); +vuint8m4_t test_vncvt_x_x_w_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vncvt_x_mu(vm, vd, vs2, vl); } -vint16mf4_t test_vncvt_x_x_w_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vncvt_x_mu(mask, maskedoff, src, vl); +vint16mf4_t test_vncvt_x_x_w_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vncvt_x_mu(vm, vd, vs2, vl); } -vint16mf2_t test_vncvt_x_x_w_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vncvt_x_mu(mask, maskedoff, src, vl); +vint16mf2_t test_vncvt_x_x_w_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vncvt_x_mu(vm, vd, vs2, vl); } -vint16m1_t test_vncvt_x_x_w_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vncvt_x_mu(mask, maskedoff, src, vl); +vint16m1_t test_vncvt_x_x_w_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vncvt_x_mu(vm, vd, vs2, vl); } -vint16m2_t test_vncvt_x_x_w_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vncvt_x_mu(mask, maskedoff, src, vl); +vint16m2_t test_vncvt_x_x_w_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vncvt_x_mu(vm, vd, vs2, vl); } -vint16m4_t test_vncvt_x_x_w_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t src, size_t vl) { - return __riscv_vncvt_x_mu(mask, maskedoff, src, vl); +vint16m4_t test_vncvt_x_x_w_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, size_t vl) { + return __riscv_vncvt_x_mu(vm, vd, vs2, vl); } -vuint16mf4_t test_vncvt_x_x_w_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vncvt_x_mu(mask, maskedoff, src, vl); +vuint16mf4_t test_vncvt_x_x_w_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vncvt_x_mu(vm, vd, vs2, vl); } -vuint16mf2_t test_vncvt_x_x_w_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vncvt_x_mu(mask, maskedoff, src, vl); +vuint16mf2_t test_vncvt_x_x_w_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vncvt_x_mu(vm, vd, vs2, vl); } -vuint16m1_t test_vncvt_x_x_w_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vncvt_x_mu(mask, maskedoff, src, vl); +vuint16m1_t test_vncvt_x_x_w_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vncvt_x_mu(vm, vd, vs2, vl); } -vuint16m2_t test_vncvt_x_x_w_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vncvt_x_mu(mask, maskedoff, src, vl); +vuint16m2_t test_vncvt_x_x_w_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vncvt_x_mu(vm, vd, vs2, vl); } -vuint16m4_t test_vncvt_x_x_w_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t src, size_t vl) { - return __riscv_vncvt_x_mu(mask, maskedoff, src, vl); +vuint16m4_t test_vncvt_x_x_w_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vncvt_x_mu(vm, vd, vs2, vl); } -vint32mf2_t test_vncvt_x_x_w_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t src, size_t vl) { - return __riscv_vncvt_x_mu(mask, maskedoff, src, vl); +vint32mf2_t test_vncvt_x_x_w_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint64m1_t vs2, size_t vl) { + return __riscv_vncvt_x_mu(vm, vd, vs2, vl); } -vint32m1_t test_vncvt_x_x_w_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t src, size_t vl) { - return __riscv_vncvt_x_mu(mask, maskedoff, src, vl); +vint32m1_t test_vncvt_x_x_w_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, size_t vl) { + return __riscv_vncvt_x_mu(vm, vd, vs2, vl); } -vint32m2_t test_vncvt_x_x_w_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t src, size_t vl) { - return __riscv_vncvt_x_mu(mask, maskedoff, src, vl); +vint32m2_t test_vncvt_x_x_w_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, size_t vl) { + return __riscv_vncvt_x_mu(vm, vd, vs2, vl); } -vint32m4_t test_vncvt_x_x_w_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t src, size_t vl) { - return __riscv_vncvt_x_mu(mask, maskedoff, src, vl); +vint32m4_t test_vncvt_x_x_w_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, size_t vl) { + return __riscv_vncvt_x_mu(vm, vd, vs2, vl); } -vuint32mf2_t test_vncvt_x_x_w_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t src, size_t vl) { - return __riscv_vncvt_x_mu(mask, maskedoff, src, vl); +vuint32mf2_t test_vncvt_x_x_w_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vncvt_x_mu(vm, vd, vs2, vl); } -vuint32m1_t test_vncvt_x_x_w_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t src, size_t vl) { - return __riscv_vncvt_x_mu(mask, maskedoff, src, vl); +vuint32m1_t test_vncvt_x_x_w_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vncvt_x_mu(vm, vd, vs2, vl); } -vuint32m2_t test_vncvt_x_x_w_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t src, size_t vl) { - return __riscv_vncvt_x_mu(mask, maskedoff, src, vl); +vuint32m2_t test_vncvt_x_x_w_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vncvt_x_mu(vm, vd, vs2, vl); } -vuint32m4_t test_vncvt_x_x_w_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t src, size_t vl) { - return __riscv_vncvt_x_mu(mask, maskedoff, src, vl); +vuint32m4_t test_vncvt_x_x_w_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vncvt_x_mu(vm, vd, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vncvt\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vneg.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vneg.c index c9a50d1b9..f01e8d4ad 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vneg.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vneg.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vneg_v_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vneg_tu(maskedoff, op1, vl); +vint8mf8_t test_vneg_v_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs, size_t vl) { + return __riscv_vneg_tu(vd, vs, vl); } -vint8mf4_t test_vneg_v_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vneg_tu(maskedoff, op1, vl); +vint8mf4_t test_vneg_v_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs, size_t vl) { + return __riscv_vneg_tu(vd, vs, vl); } -vint8mf2_t test_vneg_v_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vneg_tu(maskedoff, op1, vl); +vint8mf2_t test_vneg_v_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs, size_t vl) { + return __riscv_vneg_tu(vd, vs, vl); } -vint8m1_t test_vneg_v_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vneg_tu(maskedoff, op1, vl); +vint8m1_t test_vneg_v_i8m1_tu(vint8m1_t vd, vint8m1_t vs, size_t vl) { + return __riscv_vneg_tu(vd, vs, vl); } -vint8m2_t test_vneg_v_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, size_t vl) { - return __riscv_vneg_tu(maskedoff, op1, vl); +vint8m2_t test_vneg_v_i8m2_tu(vint8m2_t vd, vint8m2_t vs, size_t vl) { + return __riscv_vneg_tu(vd, vs, vl); } -vint8m4_t test_vneg_v_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, size_t vl) { - return __riscv_vneg_tu(maskedoff, op1, vl); +vint8m4_t test_vneg_v_i8m4_tu(vint8m4_t vd, vint8m4_t vs, size_t vl) { + return __riscv_vneg_tu(vd, vs, vl); } -vint8m8_t test_vneg_v_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, size_t vl) { - return __riscv_vneg_tu(maskedoff, op1, vl); +vint8m8_t test_vneg_v_i8m8_tu(vint8m8_t vd, vint8m8_t vs, size_t vl) { + return __riscv_vneg_tu(vd, vs, vl); } -vint16mf4_t test_vneg_v_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl) { - return __riscv_vneg_tu(maskedoff, op1, vl); +vint16mf4_t test_vneg_v_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs, size_t vl) { + return __riscv_vneg_tu(vd, vs, vl); } -vint16mf2_t test_vneg_v_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl) { - return __riscv_vneg_tu(maskedoff, op1, vl); +vint16mf2_t test_vneg_v_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs, size_t vl) { + return __riscv_vneg_tu(vd, vs, vl); } -vint16m1_t test_vneg_v_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, size_t vl) { - return __riscv_vneg_tu(maskedoff, op1, vl); +vint16m1_t test_vneg_v_i16m1_tu(vint16m1_t vd, vint16m1_t vs, size_t vl) { + return __riscv_vneg_tu(vd, vs, vl); } -vint16m2_t test_vneg_v_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, size_t vl) { - return __riscv_vneg_tu(maskedoff, op1, vl); +vint16m2_t test_vneg_v_i16m2_tu(vint16m2_t vd, vint16m2_t vs, size_t vl) { + return __riscv_vneg_tu(vd, vs, vl); } -vint16m4_t test_vneg_v_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, size_t vl) { - return __riscv_vneg_tu(maskedoff, op1, vl); +vint16m4_t test_vneg_v_i16m4_tu(vint16m4_t vd, vint16m4_t vs, size_t vl) { + return __riscv_vneg_tu(vd, vs, vl); } -vint16m8_t test_vneg_v_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, size_t vl) { - return __riscv_vneg_tu(maskedoff, op1, vl); +vint16m8_t test_vneg_v_i16m8_tu(vint16m8_t vd, vint16m8_t vs, size_t vl) { + return __riscv_vneg_tu(vd, vs, vl); } -vint32mf2_t test_vneg_v_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl) { - return __riscv_vneg_tu(maskedoff, op1, vl); +vint32mf2_t test_vneg_v_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs, size_t vl) { + return __riscv_vneg_tu(vd, vs, vl); } -vint32m1_t test_vneg_v_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, size_t vl) { - return __riscv_vneg_tu(maskedoff, op1, vl); +vint32m1_t test_vneg_v_i32m1_tu(vint32m1_t vd, vint32m1_t vs, size_t vl) { + return __riscv_vneg_tu(vd, vs, vl); } -vint32m2_t test_vneg_v_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, size_t vl) { - return __riscv_vneg_tu(maskedoff, op1, vl); +vint32m2_t test_vneg_v_i32m2_tu(vint32m2_t vd, vint32m2_t vs, size_t vl) { + return __riscv_vneg_tu(vd, vs, vl); } -vint32m4_t test_vneg_v_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, size_t vl) { - return __riscv_vneg_tu(maskedoff, op1, vl); +vint32m4_t test_vneg_v_i32m4_tu(vint32m4_t vd, vint32m4_t vs, size_t vl) { + return __riscv_vneg_tu(vd, vs, vl); } -vint32m8_t test_vneg_v_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, size_t vl) { - return __riscv_vneg_tu(maskedoff, op1, vl); +vint32m8_t test_vneg_v_i32m8_tu(vint32m8_t vd, vint32m8_t vs, size_t vl) { + return __riscv_vneg_tu(vd, vs, vl); } -vint64m1_t test_vneg_v_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, size_t vl) { - return __riscv_vneg_tu(maskedoff, op1, vl); +vint64m1_t test_vneg_v_i64m1_tu(vint64m1_t vd, vint64m1_t vs, size_t vl) { + return __riscv_vneg_tu(vd, vs, vl); } -vint64m2_t test_vneg_v_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, size_t vl) { - return __riscv_vneg_tu(maskedoff, op1, vl); +vint64m2_t test_vneg_v_i64m2_tu(vint64m2_t vd, vint64m2_t vs, size_t vl) { + return __riscv_vneg_tu(vd, vs, vl); } -vint64m4_t test_vneg_v_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, size_t vl) { - return __riscv_vneg_tu(maskedoff, op1, vl); +vint64m4_t test_vneg_v_i64m4_tu(vint64m4_t vd, vint64m4_t vs, size_t vl) { + return __riscv_vneg_tu(vd, vs, vl); } -vint64m8_t test_vneg_v_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, size_t vl) { - return __riscv_vneg_tu(maskedoff, op1, vl); +vint64m8_t test_vneg_v_i64m8_tu(vint64m8_t vd, vint64m8_t vs, size_t vl) { + return __riscv_vneg_tu(vd, vs, vl); } -vint8mf8_t test_vneg_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vneg_tum(mask, maskedoff, op1, vl); +vint8mf8_t test_vneg_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs, size_t vl) { + return __riscv_vneg_tum(vm, vd, vs, vl); } -vint8mf4_t test_vneg_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vneg_tum(mask, maskedoff, op1, vl); +vint8mf4_t test_vneg_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs, size_t vl) { + return __riscv_vneg_tum(vm, vd, vs, vl); } -vint8mf2_t test_vneg_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vneg_tum(mask, maskedoff, op1, vl); +vint8mf2_t test_vneg_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs, size_t vl) { + return __riscv_vneg_tum(vm, vd, vs, vl); } -vint8m1_t test_vneg_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vneg_tum(mask, maskedoff, op1, vl); +vint8m1_t test_vneg_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs, size_t vl) { + return __riscv_vneg_tum(vm, vd, vs, vl); } -vint8m2_t test_vneg_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t vl) { - return __riscv_vneg_tum(mask, maskedoff, op1, vl); +vint8m2_t test_vneg_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs, size_t vl) { + return __riscv_vneg_tum(vm, vd, vs, vl); } -vint8m4_t test_vneg_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t vl) { - return __riscv_vneg_tum(mask, maskedoff, op1, vl); +vint8m4_t test_vneg_v_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs, size_t vl) { + return __riscv_vneg_tum(vm, vd, vs, vl); } -vint8m8_t test_vneg_v_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t vl) { - return __riscv_vneg_tum(mask, maskedoff, op1, vl); +vint8m8_t test_vneg_v_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs, size_t vl) { + return __riscv_vneg_tum(vm, vd, vs, vl); } -vint16mf4_t test_vneg_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl) { - return __riscv_vneg_tum(mask, maskedoff, op1, vl); +vint16mf4_t test_vneg_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs, size_t vl) { + return __riscv_vneg_tum(vm, vd, vs, vl); } -vint16mf2_t test_vneg_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl) { - return __riscv_vneg_tum(mask, maskedoff, op1, vl); +vint16mf2_t test_vneg_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs, size_t vl) { + return __riscv_vneg_tum(vm, vd, vs, vl); } -vint16m1_t test_vneg_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t vl) { - return __riscv_vneg_tum(mask, maskedoff, op1, vl); +vint16m1_t test_vneg_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs, size_t vl) { + return __riscv_vneg_tum(vm, vd, vs, vl); } -vint16m2_t test_vneg_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t vl) { - return __riscv_vneg_tum(mask, maskedoff, op1, vl); +vint16m2_t test_vneg_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs, size_t vl) { + return __riscv_vneg_tum(vm, vd, vs, vl); } -vint16m4_t test_vneg_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t vl) { - return __riscv_vneg_tum(mask, maskedoff, op1, vl); +vint16m4_t test_vneg_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs, size_t vl) { + return __riscv_vneg_tum(vm, vd, vs, vl); } -vint16m8_t test_vneg_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t vl) { - return __riscv_vneg_tum(mask, maskedoff, op1, vl); +vint16m8_t test_vneg_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs, size_t vl) { + return __riscv_vneg_tum(vm, vd, vs, vl); } -vint32mf2_t test_vneg_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl) { - return __riscv_vneg_tum(mask, maskedoff, op1, vl); +vint32mf2_t test_vneg_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs, size_t vl) { + return __riscv_vneg_tum(vm, vd, vs, vl); } -vint32m1_t test_vneg_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t vl) { - return __riscv_vneg_tum(mask, maskedoff, op1, vl); +vint32m1_t test_vneg_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs, size_t vl) { + return __riscv_vneg_tum(vm, vd, vs, vl); } -vint32m2_t test_vneg_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t vl) { - return __riscv_vneg_tum(mask, maskedoff, op1, vl); +vint32m2_t test_vneg_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs, size_t vl) { + return __riscv_vneg_tum(vm, vd, vs, vl); } -vint32m4_t test_vneg_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t vl) { - return __riscv_vneg_tum(mask, maskedoff, op1, vl); +vint32m4_t test_vneg_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs, size_t vl) { + return __riscv_vneg_tum(vm, vd, vs, vl); } -vint32m8_t test_vneg_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t vl) { - return __riscv_vneg_tum(mask, maskedoff, op1, vl); +vint32m8_t test_vneg_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs, size_t vl) { + return __riscv_vneg_tum(vm, vd, vs, vl); } -vint64m1_t test_vneg_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t vl) { - return __riscv_vneg_tum(mask, maskedoff, op1, vl); +vint64m1_t test_vneg_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs, size_t vl) { + return __riscv_vneg_tum(vm, vd, vs, vl); } -vint64m2_t test_vneg_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t vl) { - return __riscv_vneg_tum(mask, maskedoff, op1, vl); +vint64m2_t test_vneg_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs, size_t vl) { + return __riscv_vneg_tum(vm, vd, vs, vl); } -vint64m4_t test_vneg_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t vl) { - return __riscv_vneg_tum(mask, maskedoff, op1, vl); +vint64m4_t test_vneg_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs, size_t vl) { + return __riscv_vneg_tum(vm, vd, vs, vl); } -vint64m8_t test_vneg_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t vl) { - return __riscv_vneg_tum(mask, maskedoff, op1, vl); +vint64m8_t test_vneg_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs, size_t vl) { + return __riscv_vneg_tum(vm, vd, vs, vl); } -vint8mf8_t test_vneg_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vneg_tumu(mask, maskedoff, op1, vl); +vint8mf8_t test_vneg_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs, size_t vl) { + return __riscv_vneg_tumu(vm, vd, vs, vl); } -vint8mf4_t test_vneg_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vneg_tumu(mask, maskedoff, op1, vl); +vint8mf4_t test_vneg_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs, size_t vl) { + return __riscv_vneg_tumu(vm, vd, vs, vl); } -vint8mf2_t test_vneg_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vneg_tumu(mask, maskedoff, op1, vl); +vint8mf2_t test_vneg_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs, size_t vl) { + return __riscv_vneg_tumu(vm, vd, vs, vl); } -vint8m1_t test_vneg_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vneg_tumu(mask, maskedoff, op1, vl); +vint8m1_t test_vneg_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs, size_t vl) { + return __riscv_vneg_tumu(vm, vd, vs, vl); } -vint8m2_t test_vneg_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t vl) { - return __riscv_vneg_tumu(mask, maskedoff, op1, vl); +vint8m2_t test_vneg_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs, size_t vl) { + return __riscv_vneg_tumu(vm, vd, vs, vl); } -vint8m4_t test_vneg_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t vl) { - return __riscv_vneg_tumu(mask, maskedoff, op1, vl); +vint8m4_t test_vneg_v_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs, size_t vl) { + return __riscv_vneg_tumu(vm, vd, vs, vl); } -vint8m8_t test_vneg_v_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t vl) { - return __riscv_vneg_tumu(mask, maskedoff, op1, vl); +vint8m8_t test_vneg_v_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs, size_t vl) { + return __riscv_vneg_tumu(vm, vd, vs, vl); } -vint16mf4_t test_vneg_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl) { - return __riscv_vneg_tumu(mask, maskedoff, op1, vl); +vint16mf4_t test_vneg_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs, size_t vl) { + return __riscv_vneg_tumu(vm, vd, vs, vl); } -vint16mf2_t test_vneg_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl) { - return __riscv_vneg_tumu(mask, maskedoff, op1, vl); +vint16mf2_t test_vneg_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs, size_t vl) { + return __riscv_vneg_tumu(vm, vd, vs, vl); } -vint16m1_t test_vneg_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t vl) { - return __riscv_vneg_tumu(mask, maskedoff, op1, vl); +vint16m1_t test_vneg_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs, size_t vl) { + return __riscv_vneg_tumu(vm, vd, vs, vl); } -vint16m2_t test_vneg_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t vl) { - return __riscv_vneg_tumu(mask, maskedoff, op1, vl); +vint16m2_t test_vneg_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs, size_t vl) { + return __riscv_vneg_tumu(vm, vd, vs, vl); } -vint16m4_t test_vneg_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t vl) { - return __riscv_vneg_tumu(mask, maskedoff, op1, vl); +vint16m4_t test_vneg_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs, size_t vl) { + return __riscv_vneg_tumu(vm, vd, vs, vl); } -vint16m8_t test_vneg_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t vl) { - return __riscv_vneg_tumu(mask, maskedoff, op1, vl); +vint16m8_t test_vneg_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs, size_t vl) { + return __riscv_vneg_tumu(vm, vd, vs, vl); } -vint32mf2_t test_vneg_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl) { - return __riscv_vneg_tumu(mask, maskedoff, op1, vl); +vint32mf2_t test_vneg_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs, size_t vl) { + return __riscv_vneg_tumu(vm, vd, vs, vl); } -vint32m1_t test_vneg_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t vl) { - return __riscv_vneg_tumu(mask, maskedoff, op1, vl); +vint32m1_t test_vneg_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs, size_t vl) { + return __riscv_vneg_tumu(vm, vd, vs, vl); } -vint32m2_t test_vneg_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t vl) { - return __riscv_vneg_tumu(mask, maskedoff, op1, vl); +vint32m2_t test_vneg_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs, size_t vl) { + return __riscv_vneg_tumu(vm, vd, vs, vl); } -vint32m4_t test_vneg_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t vl) { - return __riscv_vneg_tumu(mask, maskedoff, op1, vl); +vint32m4_t test_vneg_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs, size_t vl) { + return __riscv_vneg_tumu(vm, vd, vs, vl); } -vint32m8_t test_vneg_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t vl) { - return __riscv_vneg_tumu(mask, maskedoff, op1, vl); +vint32m8_t test_vneg_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs, size_t vl) { + return __riscv_vneg_tumu(vm, vd, vs, vl); } -vint64m1_t test_vneg_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t vl) { - return __riscv_vneg_tumu(mask, maskedoff, op1, vl); +vint64m1_t test_vneg_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs, size_t vl) { + return __riscv_vneg_tumu(vm, vd, vs, vl); } -vint64m2_t test_vneg_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t vl) { - return __riscv_vneg_tumu(mask, maskedoff, op1, vl); +vint64m2_t test_vneg_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs, size_t vl) { + return __riscv_vneg_tumu(vm, vd, vs, vl); } -vint64m4_t test_vneg_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t vl) { - return __riscv_vneg_tumu(mask, maskedoff, op1, vl); +vint64m4_t test_vneg_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs, size_t vl) { + return __riscv_vneg_tumu(vm, vd, vs, vl); } -vint64m8_t test_vneg_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t vl) { - return __riscv_vneg_tumu(mask, maskedoff, op1, vl); +vint64m8_t test_vneg_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs, size_t vl) { + return __riscv_vneg_tumu(vm, vd, vs, vl); } -vint8mf8_t test_vneg_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vneg_mu(mask, maskedoff, op1, vl); +vint8mf8_t test_vneg_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs, size_t vl) { + return __riscv_vneg_mu(vm, vd, vs, vl); } -vint8mf4_t test_vneg_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vneg_mu(mask, maskedoff, op1, vl); +vint8mf4_t test_vneg_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs, size_t vl) { + return __riscv_vneg_mu(vm, vd, vs, vl); } -vint8mf2_t test_vneg_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vneg_mu(mask, maskedoff, op1, vl); +vint8mf2_t test_vneg_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs, size_t vl) { + return __riscv_vneg_mu(vm, vd, vs, vl); } -vint8m1_t test_vneg_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vneg_mu(mask, maskedoff, op1, vl); +vint8m1_t test_vneg_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs, size_t vl) { + return __riscv_vneg_mu(vm, vd, vs, vl); } -vint8m2_t test_vneg_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t vl) { - return __riscv_vneg_mu(mask, maskedoff, op1, vl); +vint8m2_t test_vneg_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs, size_t vl) { + return __riscv_vneg_mu(vm, vd, vs, vl); } -vint8m4_t test_vneg_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t vl) { - return __riscv_vneg_mu(mask, maskedoff, op1, vl); +vint8m4_t test_vneg_v_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs, size_t vl) { + return __riscv_vneg_mu(vm, vd, vs, vl); } -vint8m8_t test_vneg_v_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t vl) { - return __riscv_vneg_mu(mask, maskedoff, op1, vl); +vint8m8_t test_vneg_v_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs, size_t vl) { + return __riscv_vneg_mu(vm, vd, vs, vl); } -vint16mf4_t test_vneg_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl) { - return __riscv_vneg_mu(mask, maskedoff, op1, vl); +vint16mf4_t test_vneg_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs, size_t vl) { + return __riscv_vneg_mu(vm, vd, vs, vl); } -vint16mf2_t test_vneg_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl) { - return __riscv_vneg_mu(mask, maskedoff, op1, vl); +vint16mf2_t test_vneg_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs, size_t vl) { + return __riscv_vneg_mu(vm, vd, vs, vl); } -vint16m1_t test_vneg_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t vl) { - return __riscv_vneg_mu(mask, maskedoff, op1, vl); +vint16m1_t test_vneg_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs, size_t vl) { + return __riscv_vneg_mu(vm, vd, vs, vl); } -vint16m2_t test_vneg_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t vl) { - return __riscv_vneg_mu(mask, maskedoff, op1, vl); +vint16m2_t test_vneg_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs, size_t vl) { + return __riscv_vneg_mu(vm, vd, vs, vl); } -vint16m4_t test_vneg_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t vl) { - return __riscv_vneg_mu(mask, maskedoff, op1, vl); +vint16m4_t test_vneg_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs, size_t vl) { + return __riscv_vneg_mu(vm, vd, vs, vl); } -vint16m8_t test_vneg_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t vl) { - return __riscv_vneg_mu(mask, maskedoff, op1, vl); +vint16m8_t test_vneg_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs, size_t vl) { + return __riscv_vneg_mu(vm, vd, vs, vl); } -vint32mf2_t test_vneg_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl) { - return __riscv_vneg_mu(mask, maskedoff, op1, vl); +vint32mf2_t test_vneg_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs, size_t vl) { + return __riscv_vneg_mu(vm, vd, vs, vl); } -vint32m1_t test_vneg_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t vl) { - return __riscv_vneg_mu(mask, maskedoff, op1, vl); +vint32m1_t test_vneg_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs, size_t vl) { + return __riscv_vneg_mu(vm, vd, vs, vl); } -vint32m2_t test_vneg_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t vl) { - return __riscv_vneg_mu(mask, maskedoff, op1, vl); +vint32m2_t test_vneg_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs, size_t vl) { + return __riscv_vneg_mu(vm, vd, vs, vl); } -vint32m4_t test_vneg_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t vl) { - return __riscv_vneg_mu(mask, maskedoff, op1, vl); +vint32m4_t test_vneg_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs, size_t vl) { + return __riscv_vneg_mu(vm, vd, vs, vl); } -vint32m8_t test_vneg_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t vl) { - return __riscv_vneg_mu(mask, maskedoff, op1, vl); +vint32m8_t test_vneg_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs, size_t vl) { + return __riscv_vneg_mu(vm, vd, vs, vl); } -vint64m1_t test_vneg_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t vl) { - return __riscv_vneg_mu(mask, maskedoff, op1, vl); +vint64m1_t test_vneg_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs, size_t vl) { + return __riscv_vneg_mu(vm, vd, vs, vl); } -vint64m2_t test_vneg_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t vl) { - return __riscv_vneg_mu(mask, maskedoff, op1, vl); +vint64m2_t test_vneg_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs, size_t vl) { + return __riscv_vneg_mu(vm, vd, vs, vl); } -vint64m4_t test_vneg_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t vl) { - return __riscv_vneg_mu(mask, maskedoff, op1, vl); +vint64m4_t test_vneg_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs, size_t vl) { + return __riscv_vneg_mu(vm, vd, vs, vl); } -vint64m8_t test_vneg_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t vl) { - return __riscv_vneg_mu(mask, maskedoff, op1, vl); +vint64m8_t test_vneg_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs, size_t vl) { + return __riscv_vneg_mu(vm, vd, vs, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vneg\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vnmsac.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vnmsac.c index c2a9a8d03..d5bf8f211 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vnmsac.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vnmsac.c @@ -358,1060 +358,1060 @@ vuint64m8_t test_vnmsac_vx_u64m8_tu(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs return __riscv_vnmsac_tu(vd, rs1, vs2, vl); } -vint8mf8_t test_vnmsac_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl); +vint8mf8_t test_vnmsac_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, vs1, vs2, vl); } -vint8mf8_t test_vnmsac_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl); +vint8mf8_t test_vnmsac_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, rs1, vs2, vl); } -vint8mf4_t test_vnmsac_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl); +vint8mf4_t test_vnmsac_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, vs1, vs2, vl); } -vint8mf4_t test_vnmsac_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl); +vint8mf4_t test_vnmsac_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, rs1, vs2, vl); } -vint8mf2_t test_vnmsac_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl); +vint8mf2_t test_vnmsac_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, vs1, vs2, vl); } -vint8mf2_t test_vnmsac_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl); +vint8mf2_t test_vnmsac_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, rs1, vs2, vl); } -vint8m1_t test_vnmsac_vv_i8m1_tum(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl); +vint8m1_t test_vnmsac_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, vs1, vs2, vl); } -vint8m1_t test_vnmsac_vx_i8m1_tum(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl); +vint8m1_t test_vnmsac_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, rs1, vs2, vl); } -vint8m2_t test_vnmsac_vv_i8m2_tum(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl); +vint8m2_t test_vnmsac_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, vs1, vs2, vl); } -vint8m2_t test_vnmsac_vx_i8m2_tum(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl); +vint8m2_t test_vnmsac_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, rs1, vs2, vl); } -vint8m4_t test_vnmsac_vv_i8m4_tum(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl); +vint8m4_t test_vnmsac_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, vs1, vs2, vl); } -vint8m4_t test_vnmsac_vx_i8m4_tum(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl); +vint8m4_t test_vnmsac_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, rs1, vs2, vl); } -vint8m8_t test_vnmsac_vv_i8m8_tum(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl); +vint8m8_t test_vnmsac_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, vs1, vs2, vl); } -vint8m8_t test_vnmsac_vx_i8m8_tum(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl); +vint8m8_t test_vnmsac_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vnmsac_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vnmsac_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vnmsac_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vnmsac_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vnmsac_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vnmsac_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vnmsac_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vnmsac_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vnmsac_vv_i16m1_tum(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl); +vint16m1_t test_vnmsac_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vnmsac_vx_i16m1_tum(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl); +vint16m1_t test_vnmsac_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vnmsac_vv_i16m2_tum(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl); +vint16m2_t test_vnmsac_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vnmsac_vx_i16m2_tum(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl); +vint16m2_t test_vnmsac_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vnmsac_vv_i16m4_tum(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl); +vint16m4_t test_vnmsac_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vnmsac_vx_i16m4_tum(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl); +vint16m4_t test_vnmsac_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vnmsac_vv_i16m8_tum(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl); +vint16m8_t test_vnmsac_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vnmsac_vx_i16m8_tum(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl); +vint16m8_t test_vnmsac_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vnmsac_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vnmsac_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vnmsac_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vnmsac_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vnmsac_vv_i32m1_tum(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl); +vint32m1_t test_vnmsac_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vnmsac_vx_i32m1_tum(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl); +vint32m1_t test_vnmsac_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vnmsac_vv_i32m2_tum(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl); +vint32m2_t test_vnmsac_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vnmsac_vx_i32m2_tum(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl); +vint32m2_t test_vnmsac_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vnmsac_vv_i32m4_tum(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl); +vint32m4_t test_vnmsac_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vnmsac_vx_i32m4_tum(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl); +vint32m4_t test_vnmsac_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vnmsac_vv_i32m8_tum(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl); +vint32m8_t test_vnmsac_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vnmsac_vx_i32m8_tum(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl); +vint32m8_t test_vnmsac_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vnmsac_vv_i64m1_tum(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl); +vint64m1_t test_vnmsac_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vnmsac_vx_i64m1_tum(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl); +vint64m1_t test_vnmsac_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vnmsac_vv_i64m2_tum(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl); +vint64m2_t test_vnmsac_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vnmsac_vx_i64m2_tum(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl); +vint64m2_t test_vnmsac_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vnmsac_vv_i64m4_tum(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl); +vint64m4_t test_vnmsac_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vnmsac_vx_i64m4_tum(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl); +vint64m4_t test_vnmsac_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vnmsac_vv_i64m8_tum(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl); +vint64m8_t test_vnmsac_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vnmsac_vx_i64m8_tum(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl); +vint64m8_t test_vnmsac_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, rs1, vs2, vl); } -vuint8mf8_t test_vnmsac_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl); +vuint8mf8_t test_vnmsac_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, vs1, vs2, vl); } -vuint8mf8_t test_vnmsac_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl); +vuint8mf8_t test_vnmsac_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, rs1, vs2, vl); } -vuint8mf4_t test_vnmsac_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl); +vuint8mf4_t test_vnmsac_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, vs1, vs2, vl); } -vuint8mf4_t test_vnmsac_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl); +vuint8mf4_t test_vnmsac_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, rs1, vs2, vl); } -vuint8mf2_t test_vnmsac_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl); +vuint8mf2_t test_vnmsac_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, vs1, vs2, vl); } -vuint8mf2_t test_vnmsac_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl); +vuint8mf2_t test_vnmsac_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, rs1, vs2, vl); } -vuint8m1_t test_vnmsac_vv_u8m1_tum(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl); +vuint8m1_t test_vnmsac_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, vs1, vs2, vl); } -vuint8m1_t test_vnmsac_vx_u8m1_tum(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl); +vuint8m1_t test_vnmsac_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, rs1, vs2, vl); } -vuint8m2_t test_vnmsac_vv_u8m2_tum(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl); +vuint8m2_t test_vnmsac_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, vs1, vs2, vl); } -vuint8m2_t test_vnmsac_vx_u8m2_tum(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl); +vuint8m2_t test_vnmsac_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, rs1, vs2, vl); } -vuint8m4_t test_vnmsac_vv_u8m4_tum(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl); +vuint8m4_t test_vnmsac_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, vs1, vs2, vl); } -vuint8m4_t test_vnmsac_vx_u8m4_tum(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl); +vuint8m4_t test_vnmsac_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, rs1, vs2, vl); } -vuint8m8_t test_vnmsac_vv_u8m8_tum(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl); +vuint8m8_t test_vnmsac_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, vs1, vs2, vl); } -vuint8m8_t test_vnmsac_vx_u8m8_tum(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl); +vuint8m8_t test_vnmsac_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vnmsac_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl); +vuint16mf4_t test_vnmsac_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vnmsac_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl); +vuint16mf4_t test_vnmsac_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vnmsac_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl); +vuint16mf2_t test_vnmsac_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vnmsac_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl); +vuint16mf2_t test_vnmsac_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vnmsac_vv_u16m1_tum(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl); +vuint16m1_t test_vnmsac_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vnmsac_vx_u16m1_tum(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl); +vuint16m1_t test_vnmsac_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vnmsac_vv_u16m2_tum(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl); +vuint16m2_t test_vnmsac_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vnmsac_vx_u16m2_tum(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl); +vuint16m2_t test_vnmsac_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vnmsac_vv_u16m4_tum(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl); +vuint16m4_t test_vnmsac_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vnmsac_vx_u16m4_tum(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl); +vuint16m4_t test_vnmsac_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vnmsac_vv_u16m8_tum(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl); +vuint16m8_t test_vnmsac_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vnmsac_vx_u16m8_tum(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl); +vuint16m8_t test_vnmsac_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vnmsac_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl); +vuint32mf2_t test_vnmsac_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vnmsac_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl); +vuint32mf2_t test_vnmsac_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vnmsac_vv_u32m1_tum(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl); +vuint32m1_t test_vnmsac_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vnmsac_vx_u32m1_tum(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl); +vuint32m1_t test_vnmsac_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vnmsac_vv_u32m2_tum(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl); +vuint32m2_t test_vnmsac_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vnmsac_vx_u32m2_tum(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl); +vuint32m2_t test_vnmsac_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vnmsac_vv_u32m4_tum(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl); +vuint32m4_t test_vnmsac_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vnmsac_vx_u32m4_tum(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl); +vuint32m4_t test_vnmsac_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vnmsac_vv_u32m8_tum(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl); +vuint32m8_t test_vnmsac_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vnmsac_vx_u32m8_tum(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl); +vuint32m8_t test_vnmsac_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vnmsac_vv_u64m1_tum(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl); +vuint64m1_t test_vnmsac_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vnmsac_vx_u64m1_tum(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl); +vuint64m1_t test_vnmsac_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vnmsac_vv_u64m2_tum(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl); +vuint64m2_t test_vnmsac_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vnmsac_vx_u64m2_tum(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl); +vuint64m2_t test_vnmsac_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vnmsac_vv_u64m4_tum(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl); +vuint64m4_t test_vnmsac_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vnmsac_vx_u64m4_tum(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl); +vuint64m4_t test_vnmsac_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vnmsac_vv_u64m8_tum(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, vs1, vs2, vl); +vuint64m8_t test_vnmsac_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vnmsac_vx_u64m8_tum(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vnmsac_tum(mask, vd, rs1, vs2, vl); +vuint64m8_t test_vnmsac_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vnmsac_tum(vm, vd, rs1, vs2, vl); } -vint8mf8_t test_vnmsac_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl); +vint8mf8_t test_vnmsac_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, vs1, vs2, vl); } -vint8mf8_t test_vnmsac_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl); +vint8mf8_t test_vnmsac_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, rs1, vs2, vl); } -vint8mf4_t test_vnmsac_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl); +vint8mf4_t test_vnmsac_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, vs1, vs2, vl); } -vint8mf4_t test_vnmsac_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl); +vint8mf4_t test_vnmsac_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, rs1, vs2, vl); } -vint8mf2_t test_vnmsac_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl); +vint8mf2_t test_vnmsac_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, vs1, vs2, vl); } -vint8mf2_t test_vnmsac_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl); +vint8mf2_t test_vnmsac_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, rs1, vs2, vl); } -vint8m1_t test_vnmsac_vv_i8m1_tumu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl); +vint8m1_t test_vnmsac_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, vs1, vs2, vl); } -vint8m1_t test_vnmsac_vx_i8m1_tumu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl); +vint8m1_t test_vnmsac_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, rs1, vs2, vl); } -vint8m2_t test_vnmsac_vv_i8m2_tumu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl); +vint8m2_t test_vnmsac_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, vs1, vs2, vl); } -vint8m2_t test_vnmsac_vx_i8m2_tumu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl); +vint8m2_t test_vnmsac_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, rs1, vs2, vl); } -vint8m4_t test_vnmsac_vv_i8m4_tumu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl); +vint8m4_t test_vnmsac_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, vs1, vs2, vl); } -vint8m4_t test_vnmsac_vx_i8m4_tumu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl); +vint8m4_t test_vnmsac_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, rs1, vs2, vl); } -vint8m8_t test_vnmsac_vv_i8m8_tumu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl); +vint8m8_t test_vnmsac_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, vs1, vs2, vl); } -vint8m8_t test_vnmsac_vx_i8m8_tumu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl); +vint8m8_t test_vnmsac_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vnmsac_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vnmsac_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vnmsac_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vnmsac_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vnmsac_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vnmsac_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vnmsac_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vnmsac_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vnmsac_vv_i16m1_tumu(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl); +vint16m1_t test_vnmsac_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vnmsac_vx_i16m1_tumu(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl); +vint16m1_t test_vnmsac_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vnmsac_vv_i16m2_tumu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl); +vint16m2_t test_vnmsac_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vnmsac_vx_i16m2_tumu(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl); +vint16m2_t test_vnmsac_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vnmsac_vv_i16m4_tumu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl); +vint16m4_t test_vnmsac_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vnmsac_vx_i16m4_tumu(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl); +vint16m4_t test_vnmsac_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vnmsac_vv_i16m8_tumu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl); +vint16m8_t test_vnmsac_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vnmsac_vx_i16m8_tumu(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl); +vint16m8_t test_vnmsac_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vnmsac_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vnmsac_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vnmsac_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vnmsac_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vnmsac_vv_i32m1_tumu(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl); +vint32m1_t test_vnmsac_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vnmsac_vx_i32m1_tumu(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl); +vint32m1_t test_vnmsac_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vnmsac_vv_i32m2_tumu(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl); +vint32m2_t test_vnmsac_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vnmsac_vx_i32m2_tumu(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl); +vint32m2_t test_vnmsac_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vnmsac_vv_i32m4_tumu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl); +vint32m4_t test_vnmsac_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vnmsac_vx_i32m4_tumu(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl); +vint32m4_t test_vnmsac_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vnmsac_vv_i32m8_tumu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl); +vint32m8_t test_vnmsac_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vnmsac_vx_i32m8_tumu(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl); +vint32m8_t test_vnmsac_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vnmsac_vv_i64m1_tumu(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl); +vint64m1_t test_vnmsac_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vnmsac_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl); +vint64m1_t test_vnmsac_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vnmsac_vv_i64m2_tumu(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl); +vint64m2_t test_vnmsac_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vnmsac_vx_i64m2_tumu(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl); +vint64m2_t test_vnmsac_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vnmsac_vv_i64m4_tumu(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl); +vint64m4_t test_vnmsac_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vnmsac_vx_i64m4_tumu(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl); +vint64m4_t test_vnmsac_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vnmsac_vv_i64m8_tumu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl); +vint64m8_t test_vnmsac_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vnmsac_vx_i64m8_tumu(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl); +vint64m8_t test_vnmsac_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, rs1, vs2, vl); } -vuint8mf8_t test_vnmsac_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl); +vuint8mf8_t test_vnmsac_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, vs1, vs2, vl); } -vuint8mf8_t test_vnmsac_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl); +vuint8mf8_t test_vnmsac_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, rs1, vs2, vl); } -vuint8mf4_t test_vnmsac_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl); +vuint8mf4_t test_vnmsac_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, vs1, vs2, vl); } -vuint8mf4_t test_vnmsac_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl); +vuint8mf4_t test_vnmsac_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, rs1, vs2, vl); } -vuint8mf2_t test_vnmsac_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl); +vuint8mf2_t test_vnmsac_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, vs1, vs2, vl); } -vuint8mf2_t test_vnmsac_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl); +vuint8mf2_t test_vnmsac_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, rs1, vs2, vl); } -vuint8m1_t test_vnmsac_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl); +vuint8m1_t test_vnmsac_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, vs1, vs2, vl); } -vuint8m1_t test_vnmsac_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl); +vuint8m1_t test_vnmsac_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, rs1, vs2, vl); } -vuint8m2_t test_vnmsac_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl); +vuint8m2_t test_vnmsac_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, vs1, vs2, vl); } -vuint8m2_t test_vnmsac_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl); +vuint8m2_t test_vnmsac_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, rs1, vs2, vl); } -vuint8m4_t test_vnmsac_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl); +vuint8m4_t test_vnmsac_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, vs1, vs2, vl); } -vuint8m4_t test_vnmsac_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl); +vuint8m4_t test_vnmsac_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, rs1, vs2, vl); } -vuint8m8_t test_vnmsac_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl); +vuint8m8_t test_vnmsac_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, vs1, vs2, vl); } -vuint8m8_t test_vnmsac_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl); +vuint8m8_t test_vnmsac_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vnmsac_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl); +vuint16mf4_t test_vnmsac_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vnmsac_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl); +vuint16mf4_t test_vnmsac_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vnmsac_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl); +vuint16mf2_t test_vnmsac_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vnmsac_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl); +vuint16mf2_t test_vnmsac_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vnmsac_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl); +vuint16m1_t test_vnmsac_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vnmsac_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl); +vuint16m1_t test_vnmsac_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vnmsac_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl); +vuint16m2_t test_vnmsac_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vnmsac_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl); +vuint16m2_t test_vnmsac_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vnmsac_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl); +vuint16m4_t test_vnmsac_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vnmsac_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl); +vuint16m4_t test_vnmsac_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vnmsac_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl); +vuint16m8_t test_vnmsac_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vnmsac_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl); +vuint16m8_t test_vnmsac_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vnmsac_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl); +vuint32mf2_t test_vnmsac_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vnmsac_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl); +vuint32mf2_t test_vnmsac_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vnmsac_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl); +vuint32m1_t test_vnmsac_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vnmsac_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl); +vuint32m1_t test_vnmsac_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vnmsac_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl); +vuint32m2_t test_vnmsac_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vnmsac_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl); +vuint32m2_t test_vnmsac_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vnmsac_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl); +vuint32m4_t test_vnmsac_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vnmsac_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl); +vuint32m4_t test_vnmsac_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vnmsac_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl); +vuint32m8_t test_vnmsac_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vnmsac_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl); +vuint32m8_t test_vnmsac_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vnmsac_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl); +vuint64m1_t test_vnmsac_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vnmsac_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl); +vuint64m1_t test_vnmsac_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vnmsac_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl); +vuint64m2_t test_vnmsac_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vnmsac_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl); +vuint64m2_t test_vnmsac_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vnmsac_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl); +vuint64m4_t test_vnmsac_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vnmsac_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl); +vuint64m4_t test_vnmsac_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vnmsac_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, vs1, vs2, vl); +vuint64m8_t test_vnmsac_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vnmsac_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vnmsac_tumu(mask, vd, rs1, vs2, vl); +vuint64m8_t test_vnmsac_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vnmsac_tumu(vm, vd, rs1, vs2, vl); } -vint8mf8_t test_vnmsac_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl); +vint8mf8_t test_vnmsac_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, vs1, vs2, vl); } -vint8mf8_t test_vnmsac_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl); +vint8mf8_t test_vnmsac_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, rs1, vs2, vl); } -vint8mf4_t test_vnmsac_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl); +vint8mf4_t test_vnmsac_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, vs1, vs2, vl); } -vint8mf4_t test_vnmsac_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl); +vint8mf4_t test_vnmsac_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, rs1, vs2, vl); } -vint8mf2_t test_vnmsac_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl); +vint8mf2_t test_vnmsac_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, vs1, vs2, vl); } -vint8mf2_t test_vnmsac_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl); +vint8mf2_t test_vnmsac_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, rs1, vs2, vl); } -vint8m1_t test_vnmsac_vv_i8m1_mu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl); +vint8m1_t test_vnmsac_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, vs1, vs2, vl); } -vint8m1_t test_vnmsac_vx_i8m1_mu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl); +vint8m1_t test_vnmsac_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, rs1, vs2, vl); } -vint8m2_t test_vnmsac_vv_i8m2_mu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl); +vint8m2_t test_vnmsac_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, vs1, vs2, vl); } -vint8m2_t test_vnmsac_vx_i8m2_mu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl); +vint8m2_t test_vnmsac_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, rs1, vs2, vl); } -vint8m4_t test_vnmsac_vv_i8m4_mu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl); +vint8m4_t test_vnmsac_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, vs1, vs2, vl); } -vint8m4_t test_vnmsac_vx_i8m4_mu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl); +vint8m4_t test_vnmsac_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, rs1, vs2, vl); } -vint8m8_t test_vnmsac_vv_i8m8_mu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl); +vint8m8_t test_vnmsac_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, vs1, vs2, vl); } -vint8m8_t test_vnmsac_vx_i8m8_mu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl); +vint8m8_t test_vnmsac_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vnmsac_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vnmsac_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vnmsac_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vnmsac_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vnmsac_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vnmsac_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vnmsac_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vnmsac_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vnmsac_vv_i16m1_mu(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl); +vint16m1_t test_vnmsac_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vnmsac_vx_i16m1_mu(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl); +vint16m1_t test_vnmsac_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vnmsac_vv_i16m2_mu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl); +vint16m2_t test_vnmsac_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vnmsac_vx_i16m2_mu(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl); +vint16m2_t test_vnmsac_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vnmsac_vv_i16m4_mu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl); +vint16m4_t test_vnmsac_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vnmsac_vx_i16m4_mu(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl); +vint16m4_t test_vnmsac_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vnmsac_vv_i16m8_mu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl); +vint16m8_t test_vnmsac_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vnmsac_vx_i16m8_mu(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl); +vint16m8_t test_vnmsac_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vnmsac_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vnmsac_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vnmsac_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vnmsac_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vnmsac_vv_i32m1_mu(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl); +vint32m1_t test_vnmsac_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vnmsac_vx_i32m1_mu(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl); +vint32m1_t test_vnmsac_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vnmsac_vv_i32m2_mu(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl); +vint32m2_t test_vnmsac_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vnmsac_vx_i32m2_mu(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl); +vint32m2_t test_vnmsac_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vnmsac_vv_i32m4_mu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl); +vint32m4_t test_vnmsac_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vnmsac_vx_i32m4_mu(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl); +vint32m4_t test_vnmsac_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vnmsac_vv_i32m8_mu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl); +vint32m8_t test_vnmsac_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vnmsac_vx_i32m8_mu(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl); +vint32m8_t test_vnmsac_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vnmsac_vv_i64m1_mu(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl); +vint64m1_t test_vnmsac_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vnmsac_vx_i64m1_mu(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl); +vint64m1_t test_vnmsac_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vnmsac_vv_i64m2_mu(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl); +vint64m2_t test_vnmsac_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vnmsac_vx_i64m2_mu(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl); +vint64m2_t test_vnmsac_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vnmsac_vv_i64m4_mu(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl); +vint64m4_t test_vnmsac_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vnmsac_vx_i64m4_mu(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl); +vint64m4_t test_vnmsac_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vnmsac_vv_i64m8_mu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl); +vint64m8_t test_vnmsac_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vnmsac_vx_i64m8_mu(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl); +vint64m8_t test_vnmsac_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, rs1, vs2, vl); } -vuint8mf8_t test_vnmsac_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl); +vuint8mf8_t test_vnmsac_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, vs1, vs2, vl); } -vuint8mf8_t test_vnmsac_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl); +vuint8mf8_t test_vnmsac_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, rs1, vs2, vl); } -vuint8mf4_t test_vnmsac_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl); +vuint8mf4_t test_vnmsac_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, vs1, vs2, vl); } -vuint8mf4_t test_vnmsac_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl); +vuint8mf4_t test_vnmsac_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, rs1, vs2, vl); } -vuint8mf2_t test_vnmsac_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl); +vuint8mf2_t test_vnmsac_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, vs1, vs2, vl); } -vuint8mf2_t test_vnmsac_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl); +vuint8mf2_t test_vnmsac_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, rs1, vs2, vl); } -vuint8m1_t test_vnmsac_vv_u8m1_mu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl); +vuint8m1_t test_vnmsac_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, vs1, vs2, vl); } -vuint8m1_t test_vnmsac_vx_u8m1_mu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl); +vuint8m1_t test_vnmsac_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, rs1, vs2, vl); } -vuint8m2_t test_vnmsac_vv_u8m2_mu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl); +vuint8m2_t test_vnmsac_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, vs1, vs2, vl); } -vuint8m2_t test_vnmsac_vx_u8m2_mu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl); +vuint8m2_t test_vnmsac_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, rs1, vs2, vl); } -vuint8m4_t test_vnmsac_vv_u8m4_mu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl); +vuint8m4_t test_vnmsac_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, vs1, vs2, vl); } -vuint8m4_t test_vnmsac_vx_u8m4_mu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl); +vuint8m4_t test_vnmsac_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, rs1, vs2, vl); } -vuint8m8_t test_vnmsac_vv_u8m8_mu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl); +vuint8m8_t test_vnmsac_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, vs1, vs2, vl); } -vuint8m8_t test_vnmsac_vx_u8m8_mu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl); +vuint8m8_t test_vnmsac_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vnmsac_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl); +vuint16mf4_t test_vnmsac_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vnmsac_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl); +vuint16mf4_t test_vnmsac_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vnmsac_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl); +vuint16mf2_t test_vnmsac_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vnmsac_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl); +vuint16mf2_t test_vnmsac_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vnmsac_vv_u16m1_mu(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl); +vuint16m1_t test_vnmsac_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vnmsac_vx_u16m1_mu(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl); +vuint16m1_t test_vnmsac_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vnmsac_vv_u16m2_mu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl); +vuint16m2_t test_vnmsac_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vnmsac_vx_u16m2_mu(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl); +vuint16m2_t test_vnmsac_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vnmsac_vv_u16m4_mu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl); +vuint16m4_t test_vnmsac_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vnmsac_vx_u16m4_mu(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl); +vuint16m4_t test_vnmsac_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vnmsac_vv_u16m8_mu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl); +vuint16m8_t test_vnmsac_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vnmsac_vx_u16m8_mu(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl); +vuint16m8_t test_vnmsac_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vnmsac_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl); +vuint32mf2_t test_vnmsac_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vnmsac_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl); +vuint32mf2_t test_vnmsac_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vnmsac_vv_u32m1_mu(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl); +vuint32m1_t test_vnmsac_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vnmsac_vx_u32m1_mu(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl); +vuint32m1_t test_vnmsac_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vnmsac_vv_u32m2_mu(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl); +vuint32m2_t test_vnmsac_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vnmsac_vx_u32m2_mu(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl); +vuint32m2_t test_vnmsac_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vnmsac_vv_u32m4_mu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl); +vuint32m4_t test_vnmsac_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vnmsac_vx_u32m4_mu(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl); +vuint32m4_t test_vnmsac_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vnmsac_vv_u32m8_mu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl); +vuint32m8_t test_vnmsac_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vnmsac_vx_u32m8_mu(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl); +vuint32m8_t test_vnmsac_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vnmsac_vv_u64m1_mu(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl); +vuint64m1_t test_vnmsac_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vnmsac_vx_u64m1_mu(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl); +vuint64m1_t test_vnmsac_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vnmsac_vv_u64m2_mu(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl); +vuint64m2_t test_vnmsac_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vnmsac_vx_u64m2_mu(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl); +vuint64m2_t test_vnmsac_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vnmsac_vv_u64m4_mu(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl); +vuint64m4_t test_vnmsac_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vnmsac_vx_u64m4_mu(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl); +vuint64m4_t test_vnmsac_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vnmsac_vv_u64m8_mu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, vs1, vs2, vl); +vuint64m8_t test_vnmsac_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vnmsac_vx_u64m8_mu(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vnmsac_mu(mask, vd, rs1, vs2, vl); +vuint64m8_t test_vnmsac_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vnmsac_mu(vm, vd, rs1, vs2, vl); } /* { dg-final { scan-assembler-times {vnms[acub]+\.[ivxfswum.]+\s+} 352 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vnmsub.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vnmsub.c index 56e55efd6..7bb4fc5dc 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vnmsub.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vnmsub.c @@ -358,1060 +358,1060 @@ vuint64m8_t test_vnmsub_vx_u64m8_tu(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs return __riscv_vnmsub_tu(vd, rs1, vs2, vl); } -vint8mf8_t test_vnmsub_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl); +vint8mf8_t test_vnmsub_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, vs1, vs2, vl); } -vint8mf8_t test_vnmsub_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl); +vint8mf8_t test_vnmsub_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, rs1, vs2, vl); } -vint8mf4_t test_vnmsub_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl); +vint8mf4_t test_vnmsub_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, vs1, vs2, vl); } -vint8mf4_t test_vnmsub_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl); +vint8mf4_t test_vnmsub_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, rs1, vs2, vl); } -vint8mf2_t test_vnmsub_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl); +vint8mf2_t test_vnmsub_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, vs1, vs2, vl); } -vint8mf2_t test_vnmsub_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl); +vint8mf2_t test_vnmsub_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, rs1, vs2, vl); } -vint8m1_t test_vnmsub_vv_i8m1_tum(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl); +vint8m1_t test_vnmsub_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, vs1, vs2, vl); } -vint8m1_t test_vnmsub_vx_i8m1_tum(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl); +vint8m1_t test_vnmsub_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, rs1, vs2, vl); } -vint8m2_t test_vnmsub_vv_i8m2_tum(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl); +vint8m2_t test_vnmsub_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, vs1, vs2, vl); } -vint8m2_t test_vnmsub_vx_i8m2_tum(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl); +vint8m2_t test_vnmsub_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, rs1, vs2, vl); } -vint8m4_t test_vnmsub_vv_i8m4_tum(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl); +vint8m4_t test_vnmsub_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, vs1, vs2, vl); } -vint8m4_t test_vnmsub_vx_i8m4_tum(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl); +vint8m4_t test_vnmsub_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, rs1, vs2, vl); } -vint8m8_t test_vnmsub_vv_i8m8_tum(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl); +vint8m8_t test_vnmsub_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, vs1, vs2, vl); } -vint8m8_t test_vnmsub_vx_i8m8_tum(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl); +vint8m8_t test_vnmsub_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vnmsub_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vnmsub_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vnmsub_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vnmsub_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vnmsub_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vnmsub_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vnmsub_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vnmsub_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vnmsub_vv_i16m1_tum(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl); +vint16m1_t test_vnmsub_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vnmsub_vx_i16m1_tum(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl); +vint16m1_t test_vnmsub_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vnmsub_vv_i16m2_tum(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl); +vint16m2_t test_vnmsub_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vnmsub_vx_i16m2_tum(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl); +vint16m2_t test_vnmsub_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vnmsub_vv_i16m4_tum(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl); +vint16m4_t test_vnmsub_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vnmsub_vx_i16m4_tum(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl); +vint16m4_t test_vnmsub_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vnmsub_vv_i16m8_tum(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl); +vint16m8_t test_vnmsub_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vnmsub_vx_i16m8_tum(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl); +vint16m8_t test_vnmsub_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vnmsub_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vnmsub_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vnmsub_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vnmsub_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vnmsub_vv_i32m1_tum(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl); +vint32m1_t test_vnmsub_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vnmsub_vx_i32m1_tum(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl); +vint32m1_t test_vnmsub_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vnmsub_vv_i32m2_tum(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl); +vint32m2_t test_vnmsub_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vnmsub_vx_i32m2_tum(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl); +vint32m2_t test_vnmsub_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vnmsub_vv_i32m4_tum(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl); +vint32m4_t test_vnmsub_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vnmsub_vx_i32m4_tum(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl); +vint32m4_t test_vnmsub_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vnmsub_vv_i32m8_tum(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl); +vint32m8_t test_vnmsub_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vnmsub_vx_i32m8_tum(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl); +vint32m8_t test_vnmsub_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vnmsub_vv_i64m1_tum(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl); +vint64m1_t test_vnmsub_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vnmsub_vx_i64m1_tum(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl); +vint64m1_t test_vnmsub_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vnmsub_vv_i64m2_tum(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl); +vint64m2_t test_vnmsub_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vnmsub_vx_i64m2_tum(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl); +vint64m2_t test_vnmsub_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vnmsub_vv_i64m4_tum(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl); +vint64m4_t test_vnmsub_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vnmsub_vx_i64m4_tum(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl); +vint64m4_t test_vnmsub_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vnmsub_vv_i64m8_tum(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl); +vint64m8_t test_vnmsub_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vnmsub_vx_i64m8_tum(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl); +vint64m8_t test_vnmsub_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, rs1, vs2, vl); } -vuint8mf8_t test_vnmsub_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl); +vuint8mf8_t test_vnmsub_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, vs1, vs2, vl); } -vuint8mf8_t test_vnmsub_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl); +vuint8mf8_t test_vnmsub_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, rs1, vs2, vl); } -vuint8mf4_t test_vnmsub_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl); +vuint8mf4_t test_vnmsub_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, vs1, vs2, vl); } -vuint8mf4_t test_vnmsub_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl); +vuint8mf4_t test_vnmsub_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, rs1, vs2, vl); } -vuint8mf2_t test_vnmsub_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl); +vuint8mf2_t test_vnmsub_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, vs1, vs2, vl); } -vuint8mf2_t test_vnmsub_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl); +vuint8mf2_t test_vnmsub_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, rs1, vs2, vl); } -vuint8m1_t test_vnmsub_vv_u8m1_tum(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl); +vuint8m1_t test_vnmsub_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, vs1, vs2, vl); } -vuint8m1_t test_vnmsub_vx_u8m1_tum(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl); +vuint8m1_t test_vnmsub_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, rs1, vs2, vl); } -vuint8m2_t test_vnmsub_vv_u8m2_tum(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl); +vuint8m2_t test_vnmsub_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, vs1, vs2, vl); } -vuint8m2_t test_vnmsub_vx_u8m2_tum(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl); +vuint8m2_t test_vnmsub_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, rs1, vs2, vl); } -vuint8m4_t test_vnmsub_vv_u8m4_tum(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl); +vuint8m4_t test_vnmsub_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, vs1, vs2, vl); } -vuint8m4_t test_vnmsub_vx_u8m4_tum(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl); +vuint8m4_t test_vnmsub_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, rs1, vs2, vl); } -vuint8m8_t test_vnmsub_vv_u8m8_tum(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl); +vuint8m8_t test_vnmsub_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, vs1, vs2, vl); } -vuint8m8_t test_vnmsub_vx_u8m8_tum(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl); +vuint8m8_t test_vnmsub_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vnmsub_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl); +vuint16mf4_t test_vnmsub_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vnmsub_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl); +vuint16mf4_t test_vnmsub_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vnmsub_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl); +vuint16mf2_t test_vnmsub_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vnmsub_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl); +vuint16mf2_t test_vnmsub_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vnmsub_vv_u16m1_tum(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl); +vuint16m1_t test_vnmsub_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vnmsub_vx_u16m1_tum(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl); +vuint16m1_t test_vnmsub_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vnmsub_vv_u16m2_tum(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl); +vuint16m2_t test_vnmsub_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vnmsub_vx_u16m2_tum(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl); +vuint16m2_t test_vnmsub_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vnmsub_vv_u16m4_tum(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl); +vuint16m4_t test_vnmsub_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vnmsub_vx_u16m4_tum(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl); +vuint16m4_t test_vnmsub_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vnmsub_vv_u16m8_tum(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl); +vuint16m8_t test_vnmsub_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vnmsub_vx_u16m8_tum(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl); +vuint16m8_t test_vnmsub_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vnmsub_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl); +vuint32mf2_t test_vnmsub_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vnmsub_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl); +vuint32mf2_t test_vnmsub_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vnmsub_vv_u32m1_tum(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl); +vuint32m1_t test_vnmsub_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vnmsub_vx_u32m1_tum(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl); +vuint32m1_t test_vnmsub_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vnmsub_vv_u32m2_tum(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl); +vuint32m2_t test_vnmsub_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vnmsub_vx_u32m2_tum(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl); +vuint32m2_t test_vnmsub_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vnmsub_vv_u32m4_tum(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl); +vuint32m4_t test_vnmsub_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vnmsub_vx_u32m4_tum(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl); +vuint32m4_t test_vnmsub_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vnmsub_vv_u32m8_tum(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl); +vuint32m8_t test_vnmsub_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vnmsub_vx_u32m8_tum(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl); +vuint32m8_t test_vnmsub_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vnmsub_vv_u64m1_tum(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl); +vuint64m1_t test_vnmsub_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vnmsub_vx_u64m1_tum(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl); +vuint64m1_t test_vnmsub_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vnmsub_vv_u64m2_tum(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl); +vuint64m2_t test_vnmsub_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vnmsub_vx_u64m2_tum(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl); +vuint64m2_t test_vnmsub_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vnmsub_vv_u64m4_tum(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl); +vuint64m4_t test_vnmsub_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vnmsub_vx_u64m4_tum(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl); +vuint64m4_t test_vnmsub_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vnmsub_vv_u64m8_tum(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, vs1, vs2, vl); +vuint64m8_t test_vnmsub_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vnmsub_vx_u64m8_tum(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vnmsub_tum(mask, vd, rs1, vs2, vl); +vuint64m8_t test_vnmsub_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vnmsub_tum(vm, vd, rs1, vs2, vl); } -vint8mf8_t test_vnmsub_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl); +vint8mf8_t test_vnmsub_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, vs1, vs2, vl); } -vint8mf8_t test_vnmsub_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl); +vint8mf8_t test_vnmsub_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, rs1, vs2, vl); } -vint8mf4_t test_vnmsub_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl); +vint8mf4_t test_vnmsub_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, vs1, vs2, vl); } -vint8mf4_t test_vnmsub_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl); +vint8mf4_t test_vnmsub_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, rs1, vs2, vl); } -vint8mf2_t test_vnmsub_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl); +vint8mf2_t test_vnmsub_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, vs1, vs2, vl); } -vint8mf2_t test_vnmsub_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl); +vint8mf2_t test_vnmsub_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, rs1, vs2, vl); } -vint8m1_t test_vnmsub_vv_i8m1_tumu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl); +vint8m1_t test_vnmsub_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, vs1, vs2, vl); } -vint8m1_t test_vnmsub_vx_i8m1_tumu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl); +vint8m1_t test_vnmsub_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, rs1, vs2, vl); } -vint8m2_t test_vnmsub_vv_i8m2_tumu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl); +vint8m2_t test_vnmsub_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, vs1, vs2, vl); } -vint8m2_t test_vnmsub_vx_i8m2_tumu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl); +vint8m2_t test_vnmsub_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, rs1, vs2, vl); } -vint8m4_t test_vnmsub_vv_i8m4_tumu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl); +vint8m4_t test_vnmsub_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, vs1, vs2, vl); } -vint8m4_t test_vnmsub_vx_i8m4_tumu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl); +vint8m4_t test_vnmsub_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, rs1, vs2, vl); } -vint8m8_t test_vnmsub_vv_i8m8_tumu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl); +vint8m8_t test_vnmsub_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, vs1, vs2, vl); } -vint8m8_t test_vnmsub_vx_i8m8_tumu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl); +vint8m8_t test_vnmsub_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vnmsub_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vnmsub_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vnmsub_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vnmsub_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vnmsub_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vnmsub_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vnmsub_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vnmsub_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vnmsub_vv_i16m1_tumu(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl); +vint16m1_t test_vnmsub_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vnmsub_vx_i16m1_tumu(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl); +vint16m1_t test_vnmsub_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vnmsub_vv_i16m2_tumu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl); +vint16m2_t test_vnmsub_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vnmsub_vx_i16m2_tumu(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl); +vint16m2_t test_vnmsub_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vnmsub_vv_i16m4_tumu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl); +vint16m4_t test_vnmsub_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vnmsub_vx_i16m4_tumu(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl); +vint16m4_t test_vnmsub_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vnmsub_vv_i16m8_tumu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl); +vint16m8_t test_vnmsub_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vnmsub_vx_i16m8_tumu(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl); +vint16m8_t test_vnmsub_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vnmsub_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vnmsub_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vnmsub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vnmsub_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vnmsub_vv_i32m1_tumu(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl); +vint32m1_t test_vnmsub_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vnmsub_vx_i32m1_tumu(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl); +vint32m1_t test_vnmsub_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vnmsub_vv_i32m2_tumu(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl); +vint32m2_t test_vnmsub_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vnmsub_vx_i32m2_tumu(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl); +vint32m2_t test_vnmsub_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vnmsub_vv_i32m4_tumu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl); +vint32m4_t test_vnmsub_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vnmsub_vx_i32m4_tumu(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl); +vint32m4_t test_vnmsub_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vnmsub_vv_i32m8_tumu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl); +vint32m8_t test_vnmsub_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vnmsub_vx_i32m8_tumu(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl); +vint32m8_t test_vnmsub_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vnmsub_vv_i64m1_tumu(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl); +vint64m1_t test_vnmsub_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vnmsub_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl); +vint64m1_t test_vnmsub_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vnmsub_vv_i64m2_tumu(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl); +vint64m2_t test_vnmsub_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vnmsub_vx_i64m2_tumu(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl); +vint64m2_t test_vnmsub_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vnmsub_vv_i64m4_tumu(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl); +vint64m4_t test_vnmsub_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vnmsub_vx_i64m4_tumu(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl); +vint64m4_t test_vnmsub_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vnmsub_vv_i64m8_tumu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl); +vint64m8_t test_vnmsub_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vnmsub_vx_i64m8_tumu(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl); +vint64m8_t test_vnmsub_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, rs1, vs2, vl); } -vuint8mf8_t test_vnmsub_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl); +vuint8mf8_t test_vnmsub_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, vs1, vs2, vl); } -vuint8mf8_t test_vnmsub_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl); +vuint8mf8_t test_vnmsub_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, rs1, vs2, vl); } -vuint8mf4_t test_vnmsub_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl); +vuint8mf4_t test_vnmsub_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, vs1, vs2, vl); } -vuint8mf4_t test_vnmsub_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl); +vuint8mf4_t test_vnmsub_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, rs1, vs2, vl); } -vuint8mf2_t test_vnmsub_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl); +vuint8mf2_t test_vnmsub_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, vs1, vs2, vl); } -vuint8mf2_t test_vnmsub_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl); +vuint8mf2_t test_vnmsub_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, rs1, vs2, vl); } -vuint8m1_t test_vnmsub_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl); +vuint8m1_t test_vnmsub_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, vs1, vs2, vl); } -vuint8m1_t test_vnmsub_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl); +vuint8m1_t test_vnmsub_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, rs1, vs2, vl); } -vuint8m2_t test_vnmsub_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl); +vuint8m2_t test_vnmsub_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, vs1, vs2, vl); } -vuint8m2_t test_vnmsub_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl); +vuint8m2_t test_vnmsub_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, rs1, vs2, vl); } -vuint8m4_t test_vnmsub_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl); +vuint8m4_t test_vnmsub_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, vs1, vs2, vl); } -vuint8m4_t test_vnmsub_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl); +vuint8m4_t test_vnmsub_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, rs1, vs2, vl); } -vuint8m8_t test_vnmsub_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl); +vuint8m8_t test_vnmsub_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, vs1, vs2, vl); } -vuint8m8_t test_vnmsub_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl); +vuint8m8_t test_vnmsub_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vnmsub_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl); +vuint16mf4_t test_vnmsub_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vnmsub_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl); +vuint16mf4_t test_vnmsub_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vnmsub_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl); +vuint16mf2_t test_vnmsub_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vnmsub_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl); +vuint16mf2_t test_vnmsub_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vnmsub_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl); +vuint16m1_t test_vnmsub_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vnmsub_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl); +vuint16m1_t test_vnmsub_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vnmsub_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl); +vuint16m2_t test_vnmsub_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vnmsub_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl); +vuint16m2_t test_vnmsub_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vnmsub_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl); +vuint16m4_t test_vnmsub_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vnmsub_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl); +vuint16m4_t test_vnmsub_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vnmsub_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl); +vuint16m8_t test_vnmsub_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vnmsub_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl); +vuint16m8_t test_vnmsub_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vnmsub_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl); +vuint32mf2_t test_vnmsub_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vnmsub_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl); +vuint32mf2_t test_vnmsub_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vnmsub_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl); +vuint32m1_t test_vnmsub_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vnmsub_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl); +vuint32m1_t test_vnmsub_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vnmsub_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl); +vuint32m2_t test_vnmsub_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vnmsub_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl); +vuint32m2_t test_vnmsub_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vnmsub_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl); +vuint32m4_t test_vnmsub_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vnmsub_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl); +vuint32m4_t test_vnmsub_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vnmsub_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl); +vuint32m8_t test_vnmsub_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vnmsub_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl); +vuint32m8_t test_vnmsub_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vnmsub_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl); +vuint64m1_t test_vnmsub_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vnmsub_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl); +vuint64m1_t test_vnmsub_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vnmsub_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl); +vuint64m2_t test_vnmsub_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vnmsub_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl); +vuint64m2_t test_vnmsub_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vnmsub_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl); +vuint64m4_t test_vnmsub_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vnmsub_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl); +vuint64m4_t test_vnmsub_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vnmsub_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, vs1, vs2, vl); +vuint64m8_t test_vnmsub_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vnmsub_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vnmsub_tumu(mask, vd, rs1, vs2, vl); +vuint64m8_t test_vnmsub_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vnmsub_tumu(vm, vd, rs1, vs2, vl); } -vint8mf8_t test_vnmsub_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl); +vint8mf8_t test_vnmsub_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, vs1, vs2, vl); } -vint8mf8_t test_vnmsub_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl); +vint8mf8_t test_vnmsub_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, rs1, vs2, vl); } -vint8mf4_t test_vnmsub_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl); +vint8mf4_t test_vnmsub_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, vs1, vs2, vl); } -vint8mf4_t test_vnmsub_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl); +vint8mf4_t test_vnmsub_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, rs1, vs2, vl); } -vint8mf2_t test_vnmsub_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl); +vint8mf2_t test_vnmsub_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, vs1, vs2, vl); } -vint8mf2_t test_vnmsub_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl); +vint8mf2_t test_vnmsub_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, rs1, vs2, vl); } -vint8m1_t test_vnmsub_vv_i8m1_mu(vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl); +vint8m1_t test_vnmsub_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, vs1, vs2, vl); } -vint8m1_t test_vnmsub_vx_i8m1_mu(vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl); +vint8m1_t test_vnmsub_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, rs1, vs2, vl); } -vint8m2_t test_vnmsub_vv_i8m2_mu(vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl); +vint8m2_t test_vnmsub_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, vs1, vs2, vl); } -vint8m2_t test_vnmsub_vx_i8m2_mu(vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl); +vint8m2_t test_vnmsub_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, rs1, vs2, vl); } -vint8m4_t test_vnmsub_vv_i8m4_mu(vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl); +vint8m4_t test_vnmsub_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, vs1, vs2, vl); } -vint8m4_t test_vnmsub_vx_i8m4_mu(vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl); +vint8m4_t test_vnmsub_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, rs1, vs2, vl); } -vint8m8_t test_vnmsub_vv_i8m8_mu(vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl); +vint8m8_t test_vnmsub_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, vs1, vs2, vl); } -vint8m8_t test_vnmsub_vx_i8m8_mu(vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl); +vint8m8_t test_vnmsub_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vnmsub_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vnmsub_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vnmsub_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vnmsub_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vnmsub_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vnmsub_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vnmsub_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vnmsub_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vnmsub_vv_i16m1_mu(vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl); +vint16m1_t test_vnmsub_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vnmsub_vx_i16m1_mu(vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl); +vint16m1_t test_vnmsub_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vnmsub_vv_i16m2_mu(vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl); +vint16m2_t test_vnmsub_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vnmsub_vx_i16m2_mu(vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl); +vint16m2_t test_vnmsub_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vnmsub_vv_i16m4_mu(vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl); +vint16m4_t test_vnmsub_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vnmsub_vx_i16m4_mu(vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl); +vint16m4_t test_vnmsub_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vnmsub_vv_i16m8_mu(vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl); +vint16m8_t test_vnmsub_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vnmsub_vx_i16m8_mu(vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl); +vint16m8_t test_vnmsub_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vnmsub_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vnmsub_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vnmsub_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vnmsub_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vnmsub_vv_i32m1_mu(vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl); +vint32m1_t test_vnmsub_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vnmsub_vx_i32m1_mu(vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl); +vint32m1_t test_vnmsub_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vnmsub_vv_i32m2_mu(vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl); +vint32m2_t test_vnmsub_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vnmsub_vx_i32m2_mu(vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl); +vint32m2_t test_vnmsub_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vnmsub_vv_i32m4_mu(vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl); +vint32m4_t test_vnmsub_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vnmsub_vx_i32m4_mu(vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl); +vint32m4_t test_vnmsub_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vnmsub_vv_i32m8_mu(vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl); +vint32m8_t test_vnmsub_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vnmsub_vx_i32m8_mu(vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl); +vint32m8_t test_vnmsub_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vnmsub_vv_i64m1_mu(vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl); +vint64m1_t test_vnmsub_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vnmsub_vx_i64m1_mu(vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl); +vint64m1_t test_vnmsub_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vnmsub_vv_i64m2_mu(vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl); +vint64m2_t test_vnmsub_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vnmsub_vx_i64m2_mu(vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl); +vint64m2_t test_vnmsub_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vnmsub_vv_i64m4_mu(vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl); +vint64m4_t test_vnmsub_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vnmsub_vx_i64m4_mu(vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl); +vint64m4_t test_vnmsub_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vnmsub_vv_i64m8_mu(vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl); +vint64m8_t test_vnmsub_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vnmsub_vx_i64m8_mu(vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl); +vint64m8_t test_vnmsub_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, rs1, vs2, vl); } -vuint8mf8_t test_vnmsub_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl); +vuint8mf8_t test_vnmsub_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, vs1, vs2, vl); } -vuint8mf8_t test_vnmsub_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl); +vuint8mf8_t test_vnmsub_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, rs1, vs2, vl); } -vuint8mf4_t test_vnmsub_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl); +vuint8mf4_t test_vnmsub_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, vs1, vs2, vl); } -vuint8mf4_t test_vnmsub_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl); +vuint8mf4_t test_vnmsub_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, rs1, vs2, vl); } -vuint8mf2_t test_vnmsub_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl); +vuint8mf2_t test_vnmsub_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, vs1, vs2, vl); } -vuint8mf2_t test_vnmsub_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl); +vuint8mf2_t test_vnmsub_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, rs1, vs2, vl); } -vuint8m1_t test_vnmsub_vv_u8m1_mu(vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl); +vuint8m1_t test_vnmsub_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, vs1, vs2, vl); } -vuint8m1_t test_vnmsub_vx_u8m1_mu(vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl); +vuint8m1_t test_vnmsub_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, rs1, vs2, vl); } -vuint8m2_t test_vnmsub_vv_u8m2_mu(vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl); +vuint8m2_t test_vnmsub_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, vs1, vs2, vl); } -vuint8m2_t test_vnmsub_vx_u8m2_mu(vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl); +vuint8m2_t test_vnmsub_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, rs1, vs2, vl); } -vuint8m4_t test_vnmsub_vv_u8m4_mu(vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl); +vuint8m4_t test_vnmsub_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, vs1, vs2, vl); } -vuint8m4_t test_vnmsub_vx_u8m4_mu(vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl); +vuint8m4_t test_vnmsub_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, rs1, vs2, vl); } -vuint8m8_t test_vnmsub_vv_u8m8_mu(vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl); +vuint8m8_t test_vnmsub_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, vs1, vs2, vl); } -vuint8m8_t test_vnmsub_vx_u8m8_mu(vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl); +vuint8m8_t test_vnmsub_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vnmsub_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl); +vuint16mf4_t test_vnmsub_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vnmsub_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl); +vuint16mf4_t test_vnmsub_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vnmsub_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl); +vuint16mf2_t test_vnmsub_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vnmsub_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl); +vuint16mf2_t test_vnmsub_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vnmsub_vv_u16m1_mu(vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl); +vuint16m1_t test_vnmsub_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vnmsub_vx_u16m1_mu(vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl); +vuint16m1_t test_vnmsub_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vnmsub_vv_u16m2_mu(vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl); +vuint16m2_t test_vnmsub_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vnmsub_vx_u16m2_mu(vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl); +vuint16m2_t test_vnmsub_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vnmsub_vv_u16m4_mu(vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl); +vuint16m4_t test_vnmsub_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vnmsub_vx_u16m4_mu(vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl); +vuint16m4_t test_vnmsub_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vnmsub_vv_u16m8_mu(vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl); +vuint16m8_t test_vnmsub_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vnmsub_vx_u16m8_mu(vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl); +vuint16m8_t test_vnmsub_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vnmsub_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl); +vuint32mf2_t test_vnmsub_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vnmsub_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl); +vuint32mf2_t test_vnmsub_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vnmsub_vv_u32m1_mu(vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl); +vuint32m1_t test_vnmsub_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vnmsub_vx_u32m1_mu(vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl); +vuint32m1_t test_vnmsub_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vnmsub_vv_u32m2_mu(vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl); +vuint32m2_t test_vnmsub_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vnmsub_vx_u32m2_mu(vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl); +vuint32m2_t test_vnmsub_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vnmsub_vv_u32m4_mu(vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl); +vuint32m4_t test_vnmsub_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vnmsub_vx_u32m4_mu(vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl); +vuint32m4_t test_vnmsub_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vnmsub_vv_u32m8_mu(vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl); +vuint32m8_t test_vnmsub_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vnmsub_vx_u32m8_mu(vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl); +vuint32m8_t test_vnmsub_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vnmsub_vv_u64m1_mu(vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl); +vuint64m1_t test_vnmsub_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vnmsub_vx_u64m1_mu(vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl); +vuint64m1_t test_vnmsub_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vnmsub_vv_u64m2_mu(vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl); +vuint64m2_t test_vnmsub_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vnmsub_vx_u64m2_mu(vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl); +vuint64m2_t test_vnmsub_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vnmsub_vv_u64m4_mu(vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl); +vuint64m4_t test_vnmsub_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vnmsub_vx_u64m4_mu(vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl); +vuint64m4_t test_vnmsub_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vnmsub_vv_u64m8_mu(vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, vs1, vs2, vl); +vuint64m8_t test_vnmsub_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vnmsub_vx_u64m8_mu(vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { - return __riscv_vnmsub_mu(mask, vd, rs1, vs2, vl); +vuint64m8_t test_vnmsub_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { + return __riscv_vnmsub_mu(vm, vd, rs1, vs2, vl); } /* { dg-final { scan-assembler-times {vnms[acub]+\.[ivxfswum.]+\s+} 352 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vnot.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vnot.c index 8a2c5f283..aaeb38870 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vnot.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vnot.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vnot_v_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vnot_tu(maskedoff, op1, vl); +vint8mf8_t test_vnot_v_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs, size_t vl) { + return __riscv_vnot_tu(vd, vs, vl); } -vint8mf4_t test_vnot_v_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vnot_tu(maskedoff, op1, vl); +vint8mf4_t test_vnot_v_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs, size_t vl) { + return __riscv_vnot_tu(vd, vs, vl); } -vint8mf2_t test_vnot_v_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vnot_tu(maskedoff, op1, vl); +vint8mf2_t test_vnot_v_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs, size_t vl) { + return __riscv_vnot_tu(vd, vs, vl); } -vint8m1_t test_vnot_v_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vnot_tu(maskedoff, op1, vl); +vint8m1_t test_vnot_v_i8m1_tu(vint8m1_t vd, vint8m1_t vs, size_t vl) { + return __riscv_vnot_tu(vd, vs, vl); } -vint8m2_t test_vnot_v_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, size_t vl) { - return __riscv_vnot_tu(maskedoff, op1, vl); +vint8m2_t test_vnot_v_i8m2_tu(vint8m2_t vd, vint8m2_t vs, size_t vl) { + return __riscv_vnot_tu(vd, vs, vl); } -vint8m4_t test_vnot_v_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, size_t vl) { - return __riscv_vnot_tu(maskedoff, op1, vl); +vint8m4_t test_vnot_v_i8m4_tu(vint8m4_t vd, vint8m4_t vs, size_t vl) { + return __riscv_vnot_tu(vd, vs, vl); } -vint8m8_t test_vnot_v_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, size_t vl) { - return __riscv_vnot_tu(maskedoff, op1, vl); +vint8m8_t test_vnot_v_i8m8_tu(vint8m8_t vd, vint8m8_t vs, size_t vl) { + return __riscv_vnot_tu(vd, vs, vl); } -vint16mf4_t test_vnot_v_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl) { - return __riscv_vnot_tu(maskedoff, op1, vl); +vint16mf4_t test_vnot_v_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs, size_t vl) { + return __riscv_vnot_tu(vd, vs, vl); } -vint16mf2_t test_vnot_v_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl) { - return __riscv_vnot_tu(maskedoff, op1, vl); +vint16mf2_t test_vnot_v_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs, size_t vl) { + return __riscv_vnot_tu(vd, vs, vl); } -vint16m1_t test_vnot_v_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, size_t vl) { - return __riscv_vnot_tu(maskedoff, op1, vl); +vint16m1_t test_vnot_v_i16m1_tu(vint16m1_t vd, vint16m1_t vs, size_t vl) { + return __riscv_vnot_tu(vd, vs, vl); } -vint16m2_t test_vnot_v_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, size_t vl) { - return __riscv_vnot_tu(maskedoff, op1, vl); +vint16m2_t test_vnot_v_i16m2_tu(vint16m2_t vd, vint16m2_t vs, size_t vl) { + return __riscv_vnot_tu(vd, vs, vl); } -vint16m4_t test_vnot_v_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, size_t vl) { - return __riscv_vnot_tu(maskedoff, op1, vl); +vint16m4_t test_vnot_v_i16m4_tu(vint16m4_t vd, vint16m4_t vs, size_t vl) { + return __riscv_vnot_tu(vd, vs, vl); } -vint16m8_t test_vnot_v_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, size_t vl) { - return __riscv_vnot_tu(maskedoff, op1, vl); +vint16m8_t test_vnot_v_i16m8_tu(vint16m8_t vd, vint16m8_t vs, size_t vl) { + return __riscv_vnot_tu(vd, vs, vl); } -vint32mf2_t test_vnot_v_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl) { - return __riscv_vnot_tu(maskedoff, op1, vl); +vint32mf2_t test_vnot_v_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs, size_t vl) { + return __riscv_vnot_tu(vd, vs, vl); } -vint32m1_t test_vnot_v_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, size_t vl) { - return __riscv_vnot_tu(maskedoff, op1, vl); +vint32m1_t test_vnot_v_i32m1_tu(vint32m1_t vd, vint32m1_t vs, size_t vl) { + return __riscv_vnot_tu(vd, vs, vl); } -vint32m2_t test_vnot_v_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, size_t vl) { - return __riscv_vnot_tu(maskedoff, op1, vl); +vint32m2_t test_vnot_v_i32m2_tu(vint32m2_t vd, vint32m2_t vs, size_t vl) { + return __riscv_vnot_tu(vd, vs, vl); } -vint32m4_t test_vnot_v_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, size_t vl) { - return __riscv_vnot_tu(maskedoff, op1, vl); +vint32m4_t test_vnot_v_i32m4_tu(vint32m4_t vd, vint32m4_t vs, size_t vl) { + return __riscv_vnot_tu(vd, vs, vl); } -vint32m8_t test_vnot_v_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, size_t vl) { - return __riscv_vnot_tu(maskedoff, op1, vl); +vint32m8_t test_vnot_v_i32m8_tu(vint32m8_t vd, vint32m8_t vs, size_t vl) { + return __riscv_vnot_tu(vd, vs, vl); } -vint64m1_t test_vnot_v_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, size_t vl) { - return __riscv_vnot_tu(maskedoff, op1, vl); +vint64m1_t test_vnot_v_i64m1_tu(vint64m1_t vd, vint64m1_t vs, size_t vl) { + return __riscv_vnot_tu(vd, vs, vl); } -vint64m2_t test_vnot_v_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, size_t vl) { - return __riscv_vnot_tu(maskedoff, op1, vl); +vint64m2_t test_vnot_v_i64m2_tu(vint64m2_t vd, vint64m2_t vs, size_t vl) { + return __riscv_vnot_tu(vd, vs, vl); } -vint64m4_t test_vnot_v_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, size_t vl) { - return __riscv_vnot_tu(maskedoff, op1, vl); +vint64m4_t test_vnot_v_i64m4_tu(vint64m4_t vd, vint64m4_t vs, size_t vl) { + return __riscv_vnot_tu(vd, vs, vl); } -vint64m8_t test_vnot_v_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, size_t vl) { - return __riscv_vnot_tu(maskedoff, op1, vl); +vint64m8_t test_vnot_v_i64m8_tu(vint64m8_t vd, vint64m8_t vs, size_t vl) { + return __riscv_vnot_tu(vd, vs, vl); } -vuint8mf8_t test_vnot_v_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vnot_tu(maskedoff, op1, vl); +vuint8mf8_t test_vnot_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs, size_t vl) { + return __riscv_vnot_tu(vd, vs, vl); } -vuint8mf4_t test_vnot_v_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vnot_tu(maskedoff, op1, vl); +vuint8mf4_t test_vnot_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs, size_t vl) { + return __riscv_vnot_tu(vd, vs, vl); } -vuint8mf2_t test_vnot_v_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vnot_tu(maskedoff, op1, vl); +vuint8mf2_t test_vnot_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs, size_t vl) { + return __riscv_vnot_tu(vd, vs, vl); } -vuint8m1_t test_vnot_v_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vnot_tu(maskedoff, op1, vl); +vuint8m1_t test_vnot_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs, size_t vl) { + return __riscv_vnot_tu(vd, vs, vl); } -vuint8m2_t test_vnot_v_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, size_t vl) { - return __riscv_vnot_tu(maskedoff, op1, vl); +vuint8m2_t test_vnot_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs, size_t vl) { + return __riscv_vnot_tu(vd, vs, vl); } -vuint8m4_t test_vnot_v_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, size_t vl) { - return __riscv_vnot_tu(maskedoff, op1, vl); +vuint8m4_t test_vnot_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs, size_t vl) { + return __riscv_vnot_tu(vd, vs, vl); } -vuint8m8_t test_vnot_v_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, size_t vl) { - return __riscv_vnot_tu(maskedoff, op1, vl); +vuint8m8_t test_vnot_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs, size_t vl) { + return __riscv_vnot_tu(vd, vs, vl); } -vuint16mf4_t test_vnot_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t vl) { - return __riscv_vnot_tu(maskedoff, op1, vl); +vuint16mf4_t test_vnot_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs, size_t vl) { + return __riscv_vnot_tu(vd, vs, vl); } -vuint16mf2_t test_vnot_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t vl) { - return __riscv_vnot_tu(maskedoff, op1, vl); +vuint16mf2_t test_vnot_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs, size_t vl) { + return __riscv_vnot_tu(vd, vs, vl); } -vuint16m1_t test_vnot_v_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, size_t vl) { - return __riscv_vnot_tu(maskedoff, op1, vl); +vuint16m1_t test_vnot_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs, size_t vl) { + return __riscv_vnot_tu(vd, vs, vl); } -vuint16m2_t test_vnot_v_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, size_t vl) { - return __riscv_vnot_tu(maskedoff, op1, vl); +vuint16m2_t test_vnot_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs, size_t vl) { + return __riscv_vnot_tu(vd, vs, vl); } -vuint16m4_t test_vnot_v_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, size_t vl) { - return __riscv_vnot_tu(maskedoff, op1, vl); +vuint16m4_t test_vnot_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs, size_t vl) { + return __riscv_vnot_tu(vd, vs, vl); } -vuint16m8_t test_vnot_v_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, size_t vl) { - return __riscv_vnot_tu(maskedoff, op1, vl); +vuint16m8_t test_vnot_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs, size_t vl) { + return __riscv_vnot_tu(vd, vs, vl); } -vuint32mf2_t test_vnot_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t vl) { - return __riscv_vnot_tu(maskedoff, op1, vl); +vuint32mf2_t test_vnot_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs, size_t vl) { + return __riscv_vnot_tu(vd, vs, vl); } -vuint32m1_t test_vnot_v_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, size_t vl) { - return __riscv_vnot_tu(maskedoff, op1, vl); +vuint32m1_t test_vnot_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs, size_t vl) { + return __riscv_vnot_tu(vd, vs, vl); } -vuint32m2_t test_vnot_v_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, size_t vl) { - return __riscv_vnot_tu(maskedoff, op1, vl); +vuint32m2_t test_vnot_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs, size_t vl) { + return __riscv_vnot_tu(vd, vs, vl); } -vuint32m4_t test_vnot_v_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, size_t vl) { - return __riscv_vnot_tu(maskedoff, op1, vl); +vuint32m4_t test_vnot_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs, size_t vl) { + return __riscv_vnot_tu(vd, vs, vl); } -vuint32m8_t test_vnot_v_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, size_t vl) { - return __riscv_vnot_tu(maskedoff, op1, vl); +vuint32m8_t test_vnot_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs, size_t vl) { + return __riscv_vnot_tu(vd, vs, vl); } -vuint64m1_t test_vnot_v_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, size_t vl) { - return __riscv_vnot_tu(maskedoff, op1, vl); +vuint64m1_t test_vnot_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs, size_t vl) { + return __riscv_vnot_tu(vd, vs, vl); } -vuint64m2_t test_vnot_v_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, size_t vl) { - return __riscv_vnot_tu(maskedoff, op1, vl); +vuint64m2_t test_vnot_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs, size_t vl) { + return __riscv_vnot_tu(vd, vs, vl); } -vuint64m4_t test_vnot_v_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, size_t vl) { - return __riscv_vnot_tu(maskedoff, op1, vl); +vuint64m4_t test_vnot_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs, size_t vl) { + return __riscv_vnot_tu(vd, vs, vl); } -vuint64m8_t test_vnot_v_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, size_t vl) { - return __riscv_vnot_tu(maskedoff, op1, vl); +vuint64m8_t test_vnot_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs, size_t vl) { + return __riscv_vnot_tu(vd, vs, vl); } -vint8mf8_t test_vnot_v_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vnot_tum(mask, maskedoff, op1, vl); +vint8mf8_t test_vnot_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs, size_t vl) { + return __riscv_vnot_tum(vm, vd, vs, vl); } -vint8mf4_t test_vnot_v_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vnot_tum(mask, maskedoff, op1, vl); +vint8mf4_t test_vnot_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs, size_t vl) { + return __riscv_vnot_tum(vm, vd, vs, vl); } -vint8mf2_t test_vnot_v_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vnot_tum(mask, maskedoff, op1, vl); +vint8mf2_t test_vnot_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs, size_t vl) { + return __riscv_vnot_tum(vm, vd, vs, vl); } -vint8m1_t test_vnot_v_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vnot_tum(mask, maskedoff, op1, vl); +vint8m1_t test_vnot_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs, size_t vl) { + return __riscv_vnot_tum(vm, vd, vs, vl); } -vint8m2_t test_vnot_v_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t vl) { - return __riscv_vnot_tum(mask, maskedoff, op1, vl); +vint8m2_t test_vnot_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs, size_t vl) { + return __riscv_vnot_tum(vm, vd, vs, vl); } -vint8m4_t test_vnot_v_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t vl) { - return __riscv_vnot_tum(mask, maskedoff, op1, vl); +vint8m4_t test_vnot_v_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs, size_t vl) { + return __riscv_vnot_tum(vm, vd, vs, vl); } -vint8m8_t test_vnot_v_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t vl) { - return __riscv_vnot_tum(mask, maskedoff, op1, vl); +vint8m8_t test_vnot_v_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs, size_t vl) { + return __riscv_vnot_tum(vm, vd, vs, vl); } -vint16mf4_t test_vnot_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl) { - return __riscv_vnot_tum(mask, maskedoff, op1, vl); +vint16mf4_t test_vnot_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs, size_t vl) { + return __riscv_vnot_tum(vm, vd, vs, vl); } -vint16mf2_t test_vnot_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl) { - return __riscv_vnot_tum(mask, maskedoff, op1, vl); +vint16mf2_t test_vnot_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs, size_t vl) { + return __riscv_vnot_tum(vm, vd, vs, vl); } -vint16m1_t test_vnot_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t vl) { - return __riscv_vnot_tum(mask, maskedoff, op1, vl); +vint16m1_t test_vnot_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs, size_t vl) { + return __riscv_vnot_tum(vm, vd, vs, vl); } -vint16m2_t test_vnot_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t vl) { - return __riscv_vnot_tum(mask, maskedoff, op1, vl); +vint16m2_t test_vnot_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs, size_t vl) { + return __riscv_vnot_tum(vm, vd, vs, vl); } -vint16m4_t test_vnot_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t vl) { - return __riscv_vnot_tum(mask, maskedoff, op1, vl); +vint16m4_t test_vnot_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs, size_t vl) { + return __riscv_vnot_tum(vm, vd, vs, vl); } -vint16m8_t test_vnot_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t vl) { - return __riscv_vnot_tum(mask, maskedoff, op1, vl); +vint16m8_t test_vnot_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs, size_t vl) { + return __riscv_vnot_tum(vm, vd, vs, vl); } -vint32mf2_t test_vnot_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl) { - return __riscv_vnot_tum(mask, maskedoff, op1, vl); +vint32mf2_t test_vnot_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs, size_t vl) { + return __riscv_vnot_tum(vm, vd, vs, vl); } -vint32m1_t test_vnot_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t vl) { - return __riscv_vnot_tum(mask, maskedoff, op1, vl); +vint32m1_t test_vnot_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs, size_t vl) { + return __riscv_vnot_tum(vm, vd, vs, vl); } -vint32m2_t test_vnot_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t vl) { - return __riscv_vnot_tum(mask, maskedoff, op1, vl); +vint32m2_t test_vnot_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs, size_t vl) { + return __riscv_vnot_tum(vm, vd, vs, vl); } -vint32m4_t test_vnot_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t vl) { - return __riscv_vnot_tum(mask, maskedoff, op1, vl); +vint32m4_t test_vnot_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs, size_t vl) { + return __riscv_vnot_tum(vm, vd, vs, vl); } -vint32m8_t test_vnot_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t vl) { - return __riscv_vnot_tum(mask, maskedoff, op1, vl); +vint32m8_t test_vnot_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs, size_t vl) { + return __riscv_vnot_tum(vm, vd, vs, vl); } -vint64m1_t test_vnot_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t vl) { - return __riscv_vnot_tum(mask, maskedoff, op1, vl); +vint64m1_t test_vnot_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs, size_t vl) { + return __riscv_vnot_tum(vm, vd, vs, vl); } -vint64m2_t test_vnot_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t vl) { - return __riscv_vnot_tum(mask, maskedoff, op1, vl); +vint64m2_t test_vnot_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs, size_t vl) { + return __riscv_vnot_tum(vm, vd, vs, vl); } -vint64m4_t test_vnot_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t vl) { - return __riscv_vnot_tum(mask, maskedoff, op1, vl); +vint64m4_t test_vnot_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs, size_t vl) { + return __riscv_vnot_tum(vm, vd, vs, vl); } -vint64m8_t test_vnot_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t vl) { - return __riscv_vnot_tum(mask, maskedoff, op1, vl); +vint64m8_t test_vnot_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs, size_t vl) { + return __riscv_vnot_tum(vm, vd, vs, vl); } -vuint8mf8_t test_vnot_v_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vnot_tum(mask, maskedoff, op1, vl); +vuint8mf8_t test_vnot_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs, size_t vl) { + return __riscv_vnot_tum(vm, vd, vs, vl); } -vuint8mf4_t test_vnot_v_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vnot_tum(mask, maskedoff, op1, vl); +vuint8mf4_t test_vnot_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs, size_t vl) { + return __riscv_vnot_tum(vm, vd, vs, vl); } -vuint8mf2_t test_vnot_v_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vnot_tum(mask, maskedoff, op1, vl); +vuint8mf2_t test_vnot_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs, size_t vl) { + return __riscv_vnot_tum(vm, vd, vs, vl); } -vuint8m1_t test_vnot_v_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vnot_tum(mask, maskedoff, op1, vl); +vuint8m1_t test_vnot_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs, size_t vl) { + return __riscv_vnot_tum(vm, vd, vs, vl); } -vuint8m2_t test_vnot_v_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t vl) { - return __riscv_vnot_tum(mask, maskedoff, op1, vl); +vuint8m2_t test_vnot_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs, size_t vl) { + return __riscv_vnot_tum(vm, vd, vs, vl); } -vuint8m4_t test_vnot_v_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t vl) { - return __riscv_vnot_tum(mask, maskedoff, op1, vl); +vuint8m4_t test_vnot_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs, size_t vl) { + return __riscv_vnot_tum(vm, vd, vs, vl); } -vuint8m8_t test_vnot_v_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t vl) { - return __riscv_vnot_tum(mask, maskedoff, op1, vl); +vuint8m8_t test_vnot_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs, size_t vl) { + return __riscv_vnot_tum(vm, vd, vs, vl); } -vuint16mf4_t test_vnot_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t vl) { - return __riscv_vnot_tum(mask, maskedoff, op1, vl); +vuint16mf4_t test_vnot_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs, size_t vl) { + return __riscv_vnot_tum(vm, vd, vs, vl); } -vuint16mf2_t test_vnot_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t vl) { - return __riscv_vnot_tum(mask, maskedoff, op1, vl); +vuint16mf2_t test_vnot_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs, size_t vl) { + return __riscv_vnot_tum(vm, vd, vs, vl); } -vuint16m1_t test_vnot_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t vl) { - return __riscv_vnot_tum(mask, maskedoff, op1, vl); +vuint16m1_t test_vnot_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs, size_t vl) { + return __riscv_vnot_tum(vm, vd, vs, vl); } -vuint16m2_t test_vnot_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t vl) { - return __riscv_vnot_tum(mask, maskedoff, op1, vl); +vuint16m2_t test_vnot_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs, size_t vl) { + return __riscv_vnot_tum(vm, vd, vs, vl); } -vuint16m4_t test_vnot_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t vl) { - return __riscv_vnot_tum(mask, maskedoff, op1, vl); +vuint16m4_t test_vnot_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs, size_t vl) { + return __riscv_vnot_tum(vm, vd, vs, vl); } -vuint16m8_t test_vnot_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t vl) { - return __riscv_vnot_tum(mask, maskedoff, op1, vl); +vuint16m8_t test_vnot_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs, size_t vl) { + return __riscv_vnot_tum(vm, vd, vs, vl); } -vuint32mf2_t test_vnot_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t vl) { - return __riscv_vnot_tum(mask, maskedoff, op1, vl); +vuint32mf2_t test_vnot_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs, size_t vl) { + return __riscv_vnot_tum(vm, vd, vs, vl); } -vuint32m1_t test_vnot_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t vl) { - return __riscv_vnot_tum(mask, maskedoff, op1, vl); +vuint32m1_t test_vnot_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs, size_t vl) { + return __riscv_vnot_tum(vm, vd, vs, vl); } -vuint32m2_t test_vnot_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t vl) { - return __riscv_vnot_tum(mask, maskedoff, op1, vl); +vuint32m2_t test_vnot_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs, size_t vl) { + return __riscv_vnot_tum(vm, vd, vs, vl); } -vuint32m4_t test_vnot_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t vl) { - return __riscv_vnot_tum(mask, maskedoff, op1, vl); +vuint32m4_t test_vnot_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs, size_t vl) { + return __riscv_vnot_tum(vm, vd, vs, vl); } -vuint32m8_t test_vnot_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t vl) { - return __riscv_vnot_tum(mask, maskedoff, op1, vl); +vuint32m8_t test_vnot_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs, size_t vl) { + return __riscv_vnot_tum(vm, vd, vs, vl); } -vuint64m1_t test_vnot_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t vl) { - return __riscv_vnot_tum(mask, maskedoff, op1, vl); +vuint64m1_t test_vnot_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs, size_t vl) { + return __riscv_vnot_tum(vm, vd, vs, vl); } -vuint64m2_t test_vnot_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t vl) { - return __riscv_vnot_tum(mask, maskedoff, op1, vl); +vuint64m2_t test_vnot_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs, size_t vl) { + return __riscv_vnot_tum(vm, vd, vs, vl); } -vuint64m4_t test_vnot_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t vl) { - return __riscv_vnot_tum(mask, maskedoff, op1, vl); +vuint64m4_t test_vnot_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs, size_t vl) { + return __riscv_vnot_tum(vm, vd, vs, vl); } -vuint64m8_t test_vnot_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t vl) { - return __riscv_vnot_tum(mask, maskedoff, op1, vl); +vuint64m8_t test_vnot_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs, size_t vl) { + return __riscv_vnot_tum(vm, vd, vs, vl); } -vint8mf8_t test_vnot_v_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vnot_tumu(mask, maskedoff, op1, vl); +vint8mf8_t test_vnot_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs, size_t vl) { + return __riscv_vnot_tumu(vm, vd, vs, vl); } -vint8mf4_t test_vnot_v_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vnot_tumu(mask, maskedoff, op1, vl); +vint8mf4_t test_vnot_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs, size_t vl) { + return __riscv_vnot_tumu(vm, vd, vs, vl); } -vint8mf2_t test_vnot_v_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vnot_tumu(mask, maskedoff, op1, vl); +vint8mf2_t test_vnot_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs, size_t vl) { + return __riscv_vnot_tumu(vm, vd, vs, vl); } -vint8m1_t test_vnot_v_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vnot_tumu(mask, maskedoff, op1, vl); +vint8m1_t test_vnot_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs, size_t vl) { + return __riscv_vnot_tumu(vm, vd, vs, vl); } -vint8m2_t test_vnot_v_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t vl) { - return __riscv_vnot_tumu(mask, maskedoff, op1, vl); +vint8m2_t test_vnot_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs, size_t vl) { + return __riscv_vnot_tumu(vm, vd, vs, vl); } -vint8m4_t test_vnot_v_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t vl) { - return __riscv_vnot_tumu(mask, maskedoff, op1, vl); +vint8m4_t test_vnot_v_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs, size_t vl) { + return __riscv_vnot_tumu(vm, vd, vs, vl); } -vint8m8_t test_vnot_v_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t vl) { - return __riscv_vnot_tumu(mask, maskedoff, op1, vl); +vint8m8_t test_vnot_v_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs, size_t vl) { + return __riscv_vnot_tumu(vm, vd, vs, vl); } -vint16mf4_t test_vnot_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl) { - return __riscv_vnot_tumu(mask, maskedoff, op1, vl); +vint16mf4_t test_vnot_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs, size_t vl) { + return __riscv_vnot_tumu(vm, vd, vs, vl); } -vint16mf2_t test_vnot_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl) { - return __riscv_vnot_tumu(mask, maskedoff, op1, vl); +vint16mf2_t test_vnot_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs, size_t vl) { + return __riscv_vnot_tumu(vm, vd, vs, vl); } -vint16m1_t test_vnot_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t vl) { - return __riscv_vnot_tumu(mask, maskedoff, op1, vl); +vint16m1_t test_vnot_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs, size_t vl) { + return __riscv_vnot_tumu(vm, vd, vs, vl); } -vint16m2_t test_vnot_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t vl) { - return __riscv_vnot_tumu(mask, maskedoff, op1, vl); +vint16m2_t test_vnot_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs, size_t vl) { + return __riscv_vnot_tumu(vm, vd, vs, vl); } -vint16m4_t test_vnot_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t vl) { - return __riscv_vnot_tumu(mask, maskedoff, op1, vl); +vint16m4_t test_vnot_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs, size_t vl) { + return __riscv_vnot_tumu(vm, vd, vs, vl); } -vint16m8_t test_vnot_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t vl) { - return __riscv_vnot_tumu(mask, maskedoff, op1, vl); +vint16m8_t test_vnot_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs, size_t vl) { + return __riscv_vnot_tumu(vm, vd, vs, vl); } -vint32mf2_t test_vnot_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl) { - return __riscv_vnot_tumu(mask, maskedoff, op1, vl); +vint32mf2_t test_vnot_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs, size_t vl) { + return __riscv_vnot_tumu(vm, vd, vs, vl); } -vint32m1_t test_vnot_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t vl) { - return __riscv_vnot_tumu(mask, maskedoff, op1, vl); +vint32m1_t test_vnot_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs, size_t vl) { + return __riscv_vnot_tumu(vm, vd, vs, vl); } -vint32m2_t test_vnot_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t vl) { - return __riscv_vnot_tumu(mask, maskedoff, op1, vl); +vint32m2_t test_vnot_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs, size_t vl) { + return __riscv_vnot_tumu(vm, vd, vs, vl); } -vint32m4_t test_vnot_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t vl) { - return __riscv_vnot_tumu(mask, maskedoff, op1, vl); +vint32m4_t test_vnot_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs, size_t vl) { + return __riscv_vnot_tumu(vm, vd, vs, vl); } -vint32m8_t test_vnot_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t vl) { - return __riscv_vnot_tumu(mask, maskedoff, op1, vl); +vint32m8_t test_vnot_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs, size_t vl) { + return __riscv_vnot_tumu(vm, vd, vs, vl); } -vint64m1_t test_vnot_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t vl) { - return __riscv_vnot_tumu(mask, maskedoff, op1, vl); +vint64m1_t test_vnot_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs, size_t vl) { + return __riscv_vnot_tumu(vm, vd, vs, vl); } -vint64m2_t test_vnot_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t vl) { - return __riscv_vnot_tumu(mask, maskedoff, op1, vl); +vint64m2_t test_vnot_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs, size_t vl) { + return __riscv_vnot_tumu(vm, vd, vs, vl); } -vint64m4_t test_vnot_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t vl) { - return __riscv_vnot_tumu(mask, maskedoff, op1, vl); +vint64m4_t test_vnot_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs, size_t vl) { + return __riscv_vnot_tumu(vm, vd, vs, vl); } -vint64m8_t test_vnot_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t vl) { - return __riscv_vnot_tumu(mask, maskedoff, op1, vl); +vint64m8_t test_vnot_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs, size_t vl) { + return __riscv_vnot_tumu(vm, vd, vs, vl); } -vuint8mf8_t test_vnot_v_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vnot_tumu(mask, maskedoff, op1, vl); +vuint8mf8_t test_vnot_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs, size_t vl) { + return __riscv_vnot_tumu(vm, vd, vs, vl); } -vuint8mf4_t test_vnot_v_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vnot_tumu(mask, maskedoff, op1, vl); +vuint8mf4_t test_vnot_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs, size_t vl) { + return __riscv_vnot_tumu(vm, vd, vs, vl); } -vuint8mf2_t test_vnot_v_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vnot_tumu(mask, maskedoff, op1, vl); +vuint8mf2_t test_vnot_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs, size_t vl) { + return __riscv_vnot_tumu(vm, vd, vs, vl); } -vuint8m1_t test_vnot_v_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vnot_tumu(mask, maskedoff, op1, vl); +vuint8m1_t test_vnot_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs, size_t vl) { + return __riscv_vnot_tumu(vm, vd, vs, vl); } -vuint8m2_t test_vnot_v_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t vl) { - return __riscv_vnot_tumu(mask, maskedoff, op1, vl); +vuint8m2_t test_vnot_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs, size_t vl) { + return __riscv_vnot_tumu(vm, vd, vs, vl); } -vuint8m4_t test_vnot_v_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t vl) { - return __riscv_vnot_tumu(mask, maskedoff, op1, vl); +vuint8m4_t test_vnot_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs, size_t vl) { + return __riscv_vnot_tumu(vm, vd, vs, vl); } -vuint8m8_t test_vnot_v_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t vl) { - return __riscv_vnot_tumu(mask, maskedoff, op1, vl); +vuint8m8_t test_vnot_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs, size_t vl) { + return __riscv_vnot_tumu(vm, vd, vs, vl); } -vuint16mf4_t test_vnot_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t vl) { - return __riscv_vnot_tumu(mask, maskedoff, op1, vl); +vuint16mf4_t test_vnot_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs, size_t vl) { + return __riscv_vnot_tumu(vm, vd, vs, vl); } -vuint16mf2_t test_vnot_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t vl) { - return __riscv_vnot_tumu(mask, maskedoff, op1, vl); +vuint16mf2_t test_vnot_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs, size_t vl) { + return __riscv_vnot_tumu(vm, vd, vs, vl); } -vuint16m1_t test_vnot_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t vl) { - return __riscv_vnot_tumu(mask, maskedoff, op1, vl); +vuint16m1_t test_vnot_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs, size_t vl) { + return __riscv_vnot_tumu(vm, vd, vs, vl); } -vuint16m2_t test_vnot_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t vl) { - return __riscv_vnot_tumu(mask, maskedoff, op1, vl); +vuint16m2_t test_vnot_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs, size_t vl) { + return __riscv_vnot_tumu(vm, vd, vs, vl); } -vuint16m4_t test_vnot_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t vl) { - return __riscv_vnot_tumu(mask, maskedoff, op1, vl); +vuint16m4_t test_vnot_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs, size_t vl) { + return __riscv_vnot_tumu(vm, vd, vs, vl); } -vuint16m8_t test_vnot_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t vl) { - return __riscv_vnot_tumu(mask, maskedoff, op1, vl); +vuint16m8_t test_vnot_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs, size_t vl) { + return __riscv_vnot_tumu(vm, vd, vs, vl); } -vuint32mf2_t test_vnot_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t vl) { - return __riscv_vnot_tumu(mask, maskedoff, op1, vl); +vuint32mf2_t test_vnot_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs, size_t vl) { + return __riscv_vnot_tumu(vm, vd, vs, vl); } -vuint32m1_t test_vnot_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t vl) { - return __riscv_vnot_tumu(mask, maskedoff, op1, vl); +vuint32m1_t test_vnot_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs, size_t vl) { + return __riscv_vnot_tumu(vm, vd, vs, vl); } -vuint32m2_t test_vnot_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t vl) { - return __riscv_vnot_tumu(mask, maskedoff, op1, vl); +vuint32m2_t test_vnot_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs, size_t vl) { + return __riscv_vnot_tumu(vm, vd, vs, vl); } -vuint32m4_t test_vnot_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t vl) { - return __riscv_vnot_tumu(mask, maskedoff, op1, vl); +vuint32m4_t test_vnot_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs, size_t vl) { + return __riscv_vnot_tumu(vm, vd, vs, vl); } -vuint32m8_t test_vnot_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t vl) { - return __riscv_vnot_tumu(mask, maskedoff, op1, vl); +vuint32m8_t test_vnot_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs, size_t vl) { + return __riscv_vnot_tumu(vm, vd, vs, vl); } -vuint64m1_t test_vnot_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t vl) { - return __riscv_vnot_tumu(mask, maskedoff, op1, vl); +vuint64m1_t test_vnot_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs, size_t vl) { + return __riscv_vnot_tumu(vm, vd, vs, vl); } -vuint64m2_t test_vnot_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t vl) { - return __riscv_vnot_tumu(mask, maskedoff, op1, vl); +vuint64m2_t test_vnot_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs, size_t vl) { + return __riscv_vnot_tumu(vm, vd, vs, vl); } -vuint64m4_t test_vnot_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t vl) { - return __riscv_vnot_tumu(mask, maskedoff, op1, vl); +vuint64m4_t test_vnot_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs, size_t vl) { + return __riscv_vnot_tumu(vm, vd, vs, vl); } -vuint64m8_t test_vnot_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t vl) { - return __riscv_vnot_tumu(mask, maskedoff, op1, vl); +vuint64m8_t test_vnot_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs, size_t vl) { + return __riscv_vnot_tumu(vm, vd, vs, vl); } -vint8mf8_t test_vnot_v_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vnot_mu(mask, maskedoff, op1, vl); +vint8mf8_t test_vnot_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs, size_t vl) { + return __riscv_vnot_mu(vm, vd, vs, vl); } -vint8mf4_t test_vnot_v_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vnot_mu(mask, maskedoff, op1, vl); +vint8mf4_t test_vnot_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs, size_t vl) { + return __riscv_vnot_mu(vm, vd, vs, vl); } -vint8mf2_t test_vnot_v_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vnot_mu(mask, maskedoff, op1, vl); +vint8mf2_t test_vnot_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs, size_t vl) { + return __riscv_vnot_mu(vm, vd, vs, vl); } -vint8m1_t test_vnot_v_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vnot_mu(mask, maskedoff, op1, vl); +vint8m1_t test_vnot_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs, size_t vl) { + return __riscv_vnot_mu(vm, vd, vs, vl); } -vint8m2_t test_vnot_v_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t vl) { - return __riscv_vnot_mu(mask, maskedoff, op1, vl); +vint8m2_t test_vnot_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs, size_t vl) { + return __riscv_vnot_mu(vm, vd, vs, vl); } -vint8m4_t test_vnot_v_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t vl) { - return __riscv_vnot_mu(mask, maskedoff, op1, vl); +vint8m4_t test_vnot_v_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs, size_t vl) { + return __riscv_vnot_mu(vm, vd, vs, vl); } -vint8m8_t test_vnot_v_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t vl) { - return __riscv_vnot_mu(mask, maskedoff, op1, vl); +vint8m8_t test_vnot_v_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs, size_t vl) { + return __riscv_vnot_mu(vm, vd, vs, vl); } -vint16mf4_t test_vnot_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl) { - return __riscv_vnot_mu(mask, maskedoff, op1, vl); +vint16mf4_t test_vnot_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs, size_t vl) { + return __riscv_vnot_mu(vm, vd, vs, vl); } -vint16mf2_t test_vnot_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl) { - return __riscv_vnot_mu(mask, maskedoff, op1, vl); +vint16mf2_t test_vnot_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs, size_t vl) { + return __riscv_vnot_mu(vm, vd, vs, vl); } -vint16m1_t test_vnot_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t vl) { - return __riscv_vnot_mu(mask, maskedoff, op1, vl); +vint16m1_t test_vnot_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs, size_t vl) { + return __riscv_vnot_mu(vm, vd, vs, vl); } -vint16m2_t test_vnot_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t vl) { - return __riscv_vnot_mu(mask, maskedoff, op1, vl); +vint16m2_t test_vnot_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs, size_t vl) { + return __riscv_vnot_mu(vm, vd, vs, vl); } -vint16m4_t test_vnot_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t vl) { - return __riscv_vnot_mu(mask, maskedoff, op1, vl); +vint16m4_t test_vnot_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs, size_t vl) { + return __riscv_vnot_mu(vm, vd, vs, vl); } -vint16m8_t test_vnot_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t vl) { - return __riscv_vnot_mu(mask, maskedoff, op1, vl); +vint16m8_t test_vnot_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs, size_t vl) { + return __riscv_vnot_mu(vm, vd, vs, vl); } -vint32mf2_t test_vnot_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl) { - return __riscv_vnot_mu(mask, maskedoff, op1, vl); +vint32mf2_t test_vnot_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs, size_t vl) { + return __riscv_vnot_mu(vm, vd, vs, vl); } -vint32m1_t test_vnot_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t vl) { - return __riscv_vnot_mu(mask, maskedoff, op1, vl); +vint32m1_t test_vnot_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs, size_t vl) { + return __riscv_vnot_mu(vm, vd, vs, vl); } -vint32m2_t test_vnot_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t vl) { - return __riscv_vnot_mu(mask, maskedoff, op1, vl); +vint32m2_t test_vnot_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs, size_t vl) { + return __riscv_vnot_mu(vm, vd, vs, vl); } -vint32m4_t test_vnot_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t vl) { - return __riscv_vnot_mu(mask, maskedoff, op1, vl); +vint32m4_t test_vnot_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs, size_t vl) { + return __riscv_vnot_mu(vm, vd, vs, vl); } -vint32m8_t test_vnot_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t vl) { - return __riscv_vnot_mu(mask, maskedoff, op1, vl); +vint32m8_t test_vnot_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs, size_t vl) { + return __riscv_vnot_mu(vm, vd, vs, vl); } -vint64m1_t test_vnot_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t vl) { - return __riscv_vnot_mu(mask, maskedoff, op1, vl); +vint64m1_t test_vnot_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs, size_t vl) { + return __riscv_vnot_mu(vm, vd, vs, vl); } -vint64m2_t test_vnot_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t vl) { - return __riscv_vnot_mu(mask, maskedoff, op1, vl); +vint64m2_t test_vnot_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs, size_t vl) { + return __riscv_vnot_mu(vm, vd, vs, vl); } -vint64m4_t test_vnot_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t vl) { - return __riscv_vnot_mu(mask, maskedoff, op1, vl); +vint64m4_t test_vnot_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs, size_t vl) { + return __riscv_vnot_mu(vm, vd, vs, vl); } -vint64m8_t test_vnot_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t vl) { - return __riscv_vnot_mu(mask, maskedoff, op1, vl); +vint64m8_t test_vnot_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs, size_t vl) { + return __riscv_vnot_mu(vm, vd, vs, vl); } -vuint8mf8_t test_vnot_v_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vnot_mu(mask, maskedoff, op1, vl); +vuint8mf8_t test_vnot_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs, size_t vl) { + return __riscv_vnot_mu(vm, vd, vs, vl); } -vuint8mf4_t test_vnot_v_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vnot_mu(mask, maskedoff, op1, vl); +vuint8mf4_t test_vnot_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs, size_t vl) { + return __riscv_vnot_mu(vm, vd, vs, vl); } -vuint8mf2_t test_vnot_v_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vnot_mu(mask, maskedoff, op1, vl); +vuint8mf2_t test_vnot_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs, size_t vl) { + return __riscv_vnot_mu(vm, vd, vs, vl); } -vuint8m1_t test_vnot_v_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vnot_mu(mask, maskedoff, op1, vl); +vuint8m1_t test_vnot_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs, size_t vl) { + return __riscv_vnot_mu(vm, vd, vs, vl); } -vuint8m2_t test_vnot_v_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t vl) { - return __riscv_vnot_mu(mask, maskedoff, op1, vl); +vuint8m2_t test_vnot_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs, size_t vl) { + return __riscv_vnot_mu(vm, vd, vs, vl); } -vuint8m4_t test_vnot_v_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t vl) { - return __riscv_vnot_mu(mask, maskedoff, op1, vl); +vuint8m4_t test_vnot_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs, size_t vl) { + return __riscv_vnot_mu(vm, vd, vs, vl); } -vuint8m8_t test_vnot_v_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t vl) { - return __riscv_vnot_mu(mask, maskedoff, op1, vl); +vuint8m8_t test_vnot_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs, size_t vl) { + return __riscv_vnot_mu(vm, vd, vs, vl); } -vuint16mf4_t test_vnot_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t vl) { - return __riscv_vnot_mu(mask, maskedoff, op1, vl); +vuint16mf4_t test_vnot_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs, size_t vl) { + return __riscv_vnot_mu(vm, vd, vs, vl); } -vuint16mf2_t test_vnot_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t vl) { - return __riscv_vnot_mu(mask, maskedoff, op1, vl); +vuint16mf2_t test_vnot_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs, size_t vl) { + return __riscv_vnot_mu(vm, vd, vs, vl); } -vuint16m1_t test_vnot_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t vl) { - return __riscv_vnot_mu(mask, maskedoff, op1, vl); +vuint16m1_t test_vnot_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs, size_t vl) { + return __riscv_vnot_mu(vm, vd, vs, vl); } -vuint16m2_t test_vnot_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t vl) { - return __riscv_vnot_mu(mask, maskedoff, op1, vl); +vuint16m2_t test_vnot_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs, size_t vl) { + return __riscv_vnot_mu(vm, vd, vs, vl); } -vuint16m4_t test_vnot_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t vl) { - return __riscv_vnot_mu(mask, maskedoff, op1, vl); +vuint16m4_t test_vnot_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs, size_t vl) { + return __riscv_vnot_mu(vm, vd, vs, vl); } -vuint16m8_t test_vnot_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t vl) { - return __riscv_vnot_mu(mask, maskedoff, op1, vl); +vuint16m8_t test_vnot_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs, size_t vl) { + return __riscv_vnot_mu(vm, vd, vs, vl); } -vuint32mf2_t test_vnot_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t vl) { - return __riscv_vnot_mu(mask, maskedoff, op1, vl); +vuint32mf2_t test_vnot_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs, size_t vl) { + return __riscv_vnot_mu(vm, vd, vs, vl); } -vuint32m1_t test_vnot_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t vl) { - return __riscv_vnot_mu(mask, maskedoff, op1, vl); +vuint32m1_t test_vnot_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs, size_t vl) { + return __riscv_vnot_mu(vm, vd, vs, vl); } -vuint32m2_t test_vnot_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t vl) { - return __riscv_vnot_mu(mask, maskedoff, op1, vl); +vuint32m2_t test_vnot_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs, size_t vl) { + return __riscv_vnot_mu(vm, vd, vs, vl); } -vuint32m4_t test_vnot_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t vl) { - return __riscv_vnot_mu(mask, maskedoff, op1, vl); +vuint32m4_t test_vnot_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs, size_t vl) { + return __riscv_vnot_mu(vm, vd, vs, vl); } -vuint32m8_t test_vnot_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t vl) { - return __riscv_vnot_mu(mask, maskedoff, op1, vl); +vuint32m8_t test_vnot_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs, size_t vl) { + return __riscv_vnot_mu(vm, vd, vs, vl); } -vuint64m1_t test_vnot_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t vl) { - return __riscv_vnot_mu(mask, maskedoff, op1, vl); +vuint64m1_t test_vnot_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs, size_t vl) { + return __riscv_vnot_mu(vm, vd, vs, vl); } -vuint64m2_t test_vnot_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t vl) { - return __riscv_vnot_mu(mask, maskedoff, op1, vl); +vuint64m2_t test_vnot_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs, size_t vl) { + return __riscv_vnot_mu(vm, vd, vs, vl); } -vuint64m4_t test_vnot_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t vl) { - return __riscv_vnot_mu(mask, maskedoff, op1, vl); +vuint64m4_t test_vnot_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs, size_t vl) { + return __riscv_vnot_mu(vm, vd, vs, vl); } -vuint64m8_t test_vnot_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t vl) { - return __riscv_vnot_mu(mask, maskedoff, op1, vl); +vuint64m8_t test_vnot_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs, size_t vl) { + return __riscv_vnot_mu(vm, vd, vs, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vnot\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vnsra.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vnsra.c index 99ca6eec2..315c52f96 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vnsra.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vnsra.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vnsra_wv_i8mf8_tu(vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnsra_tu(maskedoff, op1, shift, vl); +vint8mf8_t test_vnsra_wv_i8mf8_tu(vint8mf8_t vd, vint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnsra_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vnsra_wx_i8mf8_tu(vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_tu(maskedoff, op1, shift, vl); +vint8mf8_t test_vnsra_wx_i8mf8_tu(vint8mf8_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vnsra_wv_i8mf4_tu(vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnsra_tu(maskedoff, op1, shift, vl); +vint8mf4_t test_vnsra_wv_i8mf4_tu(vint8mf4_t vd, vint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnsra_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vnsra_wx_i8mf4_tu(vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_tu(maskedoff, op1, shift, vl); +vint8mf4_t test_vnsra_wx_i8mf4_tu(vint8mf4_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vnsra_wv_i8mf2_tu(vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnsra_tu(maskedoff, op1, shift, vl); +vint8mf2_t test_vnsra_wv_i8mf2_tu(vint8mf2_t vd, vint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnsra_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vnsra_wx_i8mf2_tu(vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_tu(maskedoff, op1, shift, vl); +vint8mf2_t test_vnsra_wx_i8mf2_tu(vint8mf2_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vnsra_wv_i8m1_tu(vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnsra_tu(maskedoff, op1, shift, vl); +vint8m1_t test_vnsra_wv_i8m1_tu(vint8m1_t vd, vint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnsra_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vnsra_wx_i8m1_tu(vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_tu(maskedoff, op1, shift, vl); +vint8m1_t test_vnsra_wx_i8m1_tu(vint8m1_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vnsra_wv_i8m2_tu(vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnsra_tu(maskedoff, op1, shift, vl); +vint8m2_t test_vnsra_wv_i8m2_tu(vint8m2_t vd, vint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnsra_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vnsra_wx_i8m2_tu(vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_tu(maskedoff, op1, shift, vl); +vint8m2_t test_vnsra_wx_i8m2_tu(vint8m2_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vnsra_wv_i8m4_tu(vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnsra_tu(maskedoff, op1, shift, vl); +vint8m4_t test_vnsra_wv_i8m4_tu(vint8m4_t vd, vint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnsra_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vnsra_wx_i8m4_tu(vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_tu(maskedoff, op1, shift, vl); +vint8m4_t test_vnsra_wx_i8m4_tu(vint8m4_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vnsra_wv_i16mf4_tu(vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnsra_tu(maskedoff, op1, shift, vl); +vint16mf4_t test_vnsra_wv_i16mf4_tu(vint16mf4_t vd, vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnsra_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vnsra_wx_i16mf4_tu(vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_tu(maskedoff, op1, shift, vl); +vint16mf4_t test_vnsra_wx_i16mf4_tu(vint16mf4_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vnsra_wv_i16mf2_tu(vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnsra_tu(maskedoff, op1, shift, vl); +vint16mf2_t test_vnsra_wv_i16mf2_tu(vint16mf2_t vd, vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnsra_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vnsra_wx_i16mf2_tu(vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_tu(maskedoff, op1, shift, vl); +vint16mf2_t test_vnsra_wx_i16mf2_tu(vint16mf2_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vnsra_wv_i16m1_tu(vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnsra_tu(maskedoff, op1, shift, vl); +vint16m1_t test_vnsra_wv_i16m1_tu(vint16m1_t vd, vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnsra_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vnsra_wx_i16m1_tu(vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_tu(maskedoff, op1, shift, vl); +vint16m1_t test_vnsra_wx_i16m1_tu(vint16m1_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vnsra_wv_i16m2_tu(vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnsra_tu(maskedoff, op1, shift, vl); +vint16m2_t test_vnsra_wv_i16m2_tu(vint16m2_t vd, vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnsra_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vnsra_wx_i16m2_tu(vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_tu(maskedoff, op1, shift, vl); +vint16m2_t test_vnsra_wx_i16m2_tu(vint16m2_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vnsra_wv_i16m4_tu(vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnsra_tu(maskedoff, op1, shift, vl); +vint16m4_t test_vnsra_wv_i16m4_tu(vint16m4_t vd, vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnsra_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vnsra_wx_i16m4_tu(vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_tu(maskedoff, op1, shift, vl); +vint16m4_t test_vnsra_wx_i16m4_tu(vint16m4_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vnsra_wv_i32mf2_tu(vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnsra_tu(maskedoff, op1, shift, vl); +vint32mf2_t test_vnsra_wv_i32mf2_tu(vint32mf2_t vd, vint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnsra_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vnsra_wx_i32mf2_tu(vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_tu(maskedoff, op1, shift, vl); +vint32mf2_t test_vnsra_wx_i32mf2_tu(vint32mf2_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vnsra_wv_i32m1_tu(vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnsra_tu(maskedoff, op1, shift, vl); +vint32m1_t test_vnsra_wv_i32m1_tu(vint32m1_t vd, vint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnsra_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vnsra_wx_i32m1_tu(vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_tu(maskedoff, op1, shift, vl); +vint32m1_t test_vnsra_wx_i32m1_tu(vint32m1_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vnsra_wv_i32m2_tu(vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnsra_tu(maskedoff, op1, shift, vl); +vint32m2_t test_vnsra_wv_i32m2_tu(vint32m2_t vd, vint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnsra_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vnsra_wx_i32m2_tu(vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_tu(maskedoff, op1, shift, vl); +vint32m2_t test_vnsra_wx_i32m2_tu(vint32m2_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vnsra_wv_i32m4_tu(vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnsra_tu(maskedoff, op1, shift, vl); +vint32m4_t test_vnsra_wv_i32m4_tu(vint32m4_t vd, vint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnsra_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vnsra_wx_i32m4_tu(vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_tu(maskedoff, op1, shift, vl); +vint32m4_t test_vnsra_wx_i32m4_tu(vint32m4_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vnsra_wv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vnsra_wv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnsra_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vnsra_wx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vnsra_wx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vnsra_wv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vnsra_wv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnsra_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vnsra_wx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vnsra_wx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vnsra_wv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vnsra_wv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnsra_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vnsra_wx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vnsra_wx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vnsra_wv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vnsra_wv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnsra_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vnsra_wx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vnsra_wx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vnsra_wv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vnsra_wv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnsra_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vnsra_wx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vnsra_wx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vnsra_wv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vnsra_wv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnsra_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vnsra_wx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vnsra_wx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vnsra_wv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vnsra_wv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnsra_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vnsra_wx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vnsra_wx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vnsra_wv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vnsra_wv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnsra_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vnsra_wx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vnsra_wx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vnsra_wv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vnsra_wv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnsra_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vnsra_wx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vnsra_wx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vnsra_wv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vnsra_wv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnsra_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vnsra_wx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vnsra_wx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vnsra_wv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vnsra_wv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnsra_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vnsra_wx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vnsra_wx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vnsra_wv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vnsra_wv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnsra_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vnsra_wx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vnsra_wx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vnsra_wv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vnsra_wv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnsra_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vnsra_wx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vnsra_wx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vnsra_wv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vnsra_wv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnsra_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vnsra_wx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vnsra_wx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vnsra_wv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vnsra_wv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnsra_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vnsra_wx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_tum(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vnsra_wx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vnsra_wv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vnsra_wv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnsra_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vnsra_wx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vnsra_wx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vnsra_wv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vnsra_wv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnsra_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vnsra_wx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vnsra_wx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vnsra_wv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vnsra_wv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnsra_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vnsra_wx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vnsra_wx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vnsra_wv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vnsra_wv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnsra_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vnsra_wx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vnsra_wx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vnsra_wv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vnsra_wv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnsra_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vnsra_wx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vnsra_wx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vnsra_wv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vnsra_wv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnsra_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vnsra_wx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vnsra_wx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vnsra_wv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vnsra_wv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnsra_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vnsra_wx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vnsra_wx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vnsra_wv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vnsra_wv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnsra_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vnsra_wx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vnsra_wx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vnsra_wv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vnsra_wv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnsra_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vnsra_wx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vnsra_wx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vnsra_wv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vnsra_wv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnsra_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vnsra_wx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vnsra_wx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vnsra_wv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vnsra_wv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnsra_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vnsra_wx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vnsra_wx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vnsra_wv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vnsra_wv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnsra_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vnsra_wx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vnsra_wx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vnsra_wv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vnsra_wv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnsra_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vnsra_wx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vnsra_wx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vnsra_wv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vnsra_wv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnsra_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vnsra_wx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vnsra_wx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vnsra_wv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vnsra_wv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnsra_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vnsra_wx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_tumu(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vnsra_wx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vnsra_wv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vnsra_wv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnsra_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vnsra_wx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vnsra_wx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vnsra_wv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vnsra_wv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnsra_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vnsra_wx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vnsra_wx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vnsra_wv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vnsra_wv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnsra_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vnsra_wx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vnsra_wx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vnsra_wv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vnsra_wv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnsra_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vnsra_wx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vnsra_wx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vnsra_wv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vnsra_wv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnsra_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vnsra_wx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vnsra_wx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vnsra_wv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vnsra_wv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnsra_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vnsra_wx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vnsra_wx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vnsra_wv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vnsra_wv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnsra_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vnsra_wx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vnsra_wx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vnsra_wv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vnsra_wv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnsra_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vnsra_wx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vnsra_wx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vnsra_wv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vnsra_wv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnsra_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vnsra_wx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vnsra_wx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vnsra_wv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vnsra_wv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnsra_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vnsra_wx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vnsra_wx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vnsra_wv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vnsra_wv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnsra_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vnsra_wx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vnsra_wx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vnsra_wv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vnsra_wv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnsra_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vnsra_wx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vnsra_wx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vnsra_wv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vnsra_wv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnsra_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vnsra_wx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vnsra_wx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vnsra_wv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vnsra_wv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnsra_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vnsra_wx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vnsra_wx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vnsra_wv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vnsra_wv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnsra_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vnsra_wx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsra_mu(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vnsra_wx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsra_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vnsra\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vnsrl.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vnsrl.c index d55f34e9f..9f2a228a6 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vnsrl.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vnsrl.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vnsrl_wv_u8mf8_tu(vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnsrl_tu(maskedoff, op1, shift, vl); +vuint8mf8_t test_vnsrl_wv_u8mf8_tu(vuint8mf8_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnsrl_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vnsrl_wx_u8mf8_tu(vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_tu(maskedoff, op1, shift, vl); +vuint8mf8_t test_vnsrl_wx_u8mf8_tu(vuint8mf8_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vnsrl_wv_u8mf4_tu(vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnsrl_tu(maskedoff, op1, shift, vl); +vuint8mf4_t test_vnsrl_wv_u8mf4_tu(vuint8mf4_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnsrl_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vnsrl_wx_u8mf4_tu(vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_tu(maskedoff, op1, shift, vl); +vuint8mf4_t test_vnsrl_wx_u8mf4_tu(vuint8mf4_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vnsrl_wv_u8mf2_tu(vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnsrl_tu(maskedoff, op1, shift, vl); +vuint8mf2_t test_vnsrl_wv_u8mf2_tu(vuint8mf2_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnsrl_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vnsrl_wx_u8mf2_tu(vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_tu(maskedoff, op1, shift, vl); +vuint8mf2_t test_vnsrl_wx_u8mf2_tu(vuint8mf2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vnsrl_wv_u8m1_tu(vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnsrl_tu(maskedoff, op1, shift, vl); +vuint8m1_t test_vnsrl_wv_u8m1_tu(vuint8m1_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnsrl_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vnsrl_wx_u8m1_tu(vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_tu(maskedoff, op1, shift, vl); +vuint8m1_t test_vnsrl_wx_u8m1_tu(vuint8m1_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vnsrl_wv_u8m2_tu(vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnsrl_tu(maskedoff, op1, shift, vl); +vuint8m2_t test_vnsrl_wv_u8m2_tu(vuint8m2_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnsrl_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vnsrl_wx_u8m2_tu(vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_tu(maskedoff, op1, shift, vl); +vuint8m2_t test_vnsrl_wx_u8m2_tu(vuint8m2_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vnsrl_wv_u8m4_tu(vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnsrl_tu(maskedoff, op1, shift, vl); +vuint8m4_t test_vnsrl_wv_u8m4_tu(vuint8m4_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnsrl_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vnsrl_wx_u8m4_tu(vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_tu(maskedoff, op1, shift, vl); +vuint8m4_t test_vnsrl_wx_u8m4_tu(vuint8m4_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vnsrl_wv_u16mf4_tu(vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnsrl_tu(maskedoff, op1, shift, vl); +vuint16mf4_t test_vnsrl_wv_u16mf4_tu(vuint16mf4_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnsrl_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vnsrl_wx_u16mf4_tu(vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_tu(maskedoff, op1, shift, vl); +vuint16mf4_t test_vnsrl_wx_u16mf4_tu(vuint16mf4_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vnsrl_wv_u16mf2_tu(vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnsrl_tu(maskedoff, op1, shift, vl); +vuint16mf2_t test_vnsrl_wv_u16mf2_tu(vuint16mf2_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnsrl_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vnsrl_wx_u16mf2_tu(vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_tu(maskedoff, op1, shift, vl); +vuint16mf2_t test_vnsrl_wx_u16mf2_tu(vuint16mf2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vnsrl_wv_u16m1_tu(vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnsrl_tu(maskedoff, op1, shift, vl); +vuint16m1_t test_vnsrl_wv_u16m1_tu(vuint16m1_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnsrl_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vnsrl_wx_u16m1_tu(vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_tu(maskedoff, op1, shift, vl); +vuint16m1_t test_vnsrl_wx_u16m1_tu(vuint16m1_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vnsrl_wv_u16m2_tu(vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnsrl_tu(maskedoff, op1, shift, vl); +vuint16m2_t test_vnsrl_wv_u16m2_tu(vuint16m2_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnsrl_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vnsrl_wx_u16m2_tu(vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_tu(maskedoff, op1, shift, vl); +vuint16m2_t test_vnsrl_wx_u16m2_tu(vuint16m2_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vnsrl_wv_u16m4_tu(vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnsrl_tu(maskedoff, op1, shift, vl); +vuint16m4_t test_vnsrl_wv_u16m4_tu(vuint16m4_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnsrl_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vnsrl_wx_u16m4_tu(vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_tu(maskedoff, op1, shift, vl); +vuint16m4_t test_vnsrl_wx_u16m4_tu(vuint16m4_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vnsrl_wv_u32mf2_tu(vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnsrl_tu(maskedoff, op1, shift, vl); +vuint32mf2_t test_vnsrl_wv_u32mf2_tu(vuint32mf2_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnsrl_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vnsrl_wx_u32mf2_tu(vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_tu(maskedoff, op1, shift, vl); +vuint32mf2_t test_vnsrl_wx_u32mf2_tu(vuint32mf2_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vnsrl_wv_u32m1_tu(vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnsrl_tu(maskedoff, op1, shift, vl); +vuint32m1_t test_vnsrl_wv_u32m1_tu(vuint32m1_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnsrl_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vnsrl_wx_u32m1_tu(vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_tu(maskedoff, op1, shift, vl); +vuint32m1_t test_vnsrl_wx_u32m1_tu(vuint32m1_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vnsrl_wv_u32m2_tu(vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnsrl_tu(maskedoff, op1, shift, vl); +vuint32m2_t test_vnsrl_wv_u32m2_tu(vuint32m2_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnsrl_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vnsrl_wx_u32m2_tu(vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_tu(maskedoff, op1, shift, vl); +vuint32m2_t test_vnsrl_wx_u32m2_tu(vuint32m2_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vnsrl_wv_u32m4_tu(vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnsrl_tu(maskedoff, op1, shift, vl); +vuint32m4_t test_vnsrl_wv_u32m4_tu(vuint32m4_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnsrl_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vnsrl_wx_u32m4_tu(vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_tu(maskedoff, op1, shift, vl); +vuint32m4_t test_vnsrl_wx_u32m4_tu(vuint32m4_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vnsrl_wv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vnsrl_wv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnsrl_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vnsrl_wx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vnsrl_wx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vnsrl_wv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vnsrl_wv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnsrl_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vnsrl_wx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vnsrl_wx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vnsrl_wv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vnsrl_wv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnsrl_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vnsrl_wx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vnsrl_wx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vnsrl_wv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vnsrl_wv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnsrl_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vnsrl_wx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vnsrl_wx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vnsrl_wv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vnsrl_wv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnsrl_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vnsrl_wx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vnsrl_wx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vnsrl_wv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vnsrl_wv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnsrl_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vnsrl_wx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vnsrl_wx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vnsrl_wv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vnsrl_wv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnsrl_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vnsrl_wx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vnsrl_wx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vnsrl_wv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vnsrl_wv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnsrl_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vnsrl_wx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vnsrl_wx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vnsrl_wv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vnsrl_wv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnsrl_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vnsrl_wx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vnsrl_wx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vnsrl_wv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vnsrl_wv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnsrl_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vnsrl_wx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vnsrl_wx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vnsrl_wv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vnsrl_wv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnsrl_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vnsrl_wx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vnsrl_wx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vnsrl_wv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vnsrl_wv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnsrl_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vnsrl_wx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vnsrl_wx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vnsrl_wv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vnsrl_wv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnsrl_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vnsrl_wx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vnsrl_wx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vnsrl_wv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vnsrl_wv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnsrl_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vnsrl_wx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vnsrl_wx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vnsrl_wv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vnsrl_wv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnsrl_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vnsrl_wx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_tum(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vnsrl_wx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vnsrl_wv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vnsrl_wv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnsrl_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vnsrl_wx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vnsrl_wx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vnsrl_wv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vnsrl_wv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnsrl_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vnsrl_wx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vnsrl_wx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vnsrl_wv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vnsrl_wv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnsrl_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vnsrl_wx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vnsrl_wx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vnsrl_wv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vnsrl_wv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnsrl_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vnsrl_wx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vnsrl_wx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vnsrl_wv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vnsrl_wv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnsrl_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vnsrl_wx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vnsrl_wx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vnsrl_wv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vnsrl_wv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnsrl_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vnsrl_wx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vnsrl_wx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vnsrl_wv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vnsrl_wv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnsrl_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vnsrl_wx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vnsrl_wx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vnsrl_wv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vnsrl_wv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnsrl_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vnsrl_wx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vnsrl_wx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vnsrl_wv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vnsrl_wv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnsrl_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vnsrl_wx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vnsrl_wx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vnsrl_wv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vnsrl_wv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnsrl_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vnsrl_wx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vnsrl_wx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vnsrl_wv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vnsrl_wv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnsrl_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vnsrl_wx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vnsrl_wx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vnsrl_wv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vnsrl_wv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnsrl_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vnsrl_wx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vnsrl_wx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vnsrl_wv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vnsrl_wv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnsrl_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vnsrl_wx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vnsrl_wx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vnsrl_wv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vnsrl_wv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnsrl_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vnsrl_wx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vnsrl_wx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vnsrl_wv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vnsrl_wv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnsrl_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vnsrl_wx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vnsrl_wx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vnsrl_wv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vnsrl_wv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vnsrl_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vnsrl_wx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vnsrl_wx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vnsrl_wv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vnsrl_wv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vnsrl_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vnsrl_wx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vnsrl_wx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vnsrl_wv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vnsrl_wv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vnsrl_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vnsrl_wx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vnsrl_wx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vnsrl_wv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vnsrl_wv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vnsrl_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vnsrl_wx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vnsrl_wx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vnsrl_wv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vnsrl_wv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vnsrl_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vnsrl_wx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vnsrl_wx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vnsrl_wv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vnsrl_wv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vnsrl_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vnsrl_wx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vnsrl_wx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vnsrl_wv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vnsrl_wv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vnsrl_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vnsrl_wx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vnsrl_wx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vnsrl_wv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vnsrl_wv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vnsrl_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vnsrl_wx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vnsrl_wx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vnsrl_wv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vnsrl_wv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vnsrl_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vnsrl_wx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vnsrl_wx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vnsrl_wv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vnsrl_wv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vnsrl_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vnsrl_wx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vnsrl_wx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vnsrl_wv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vnsrl_wv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vnsrl_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vnsrl_wx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vnsrl_wx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vnsrl_wv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vnsrl_wv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vnsrl_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vnsrl_wx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vnsrl_wx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vnsrl_wv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vnsrl_wv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vnsrl_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vnsrl_wx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vnsrl_wx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vnsrl_wv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vnsrl_wv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vnsrl_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vnsrl_wx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vnsrl_wx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vnsrl_wv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vnsrl_wv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vnsrl_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vnsrl_wx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vnsrl_mu(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vnsrl_wx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vnsrl_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vnsrl\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vor.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vor.c index aa367af6b..8392961c7 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vor.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vor.c @@ -6,1412 +6,1412 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vor_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vor_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vor_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vor_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vor_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vor_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vor_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vor_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vor_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vor_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vor_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vor_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vor_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vor_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vor_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vor_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vor_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vor_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vor_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vor_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vor_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vor_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vor_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vor_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vor_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vor_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vor_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vor_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vor_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vor_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vor_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vor_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vor_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vor_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vor_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vor_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vor_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vor_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vor_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vor_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vor_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vor_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vor_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vor_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vor_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vor_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vor_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vor_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vor_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vor_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vor_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vor_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vor_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vor_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vor_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vor_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vor_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vor_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vor_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vor_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vor_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vor_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vor_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vor_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vor_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vor_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vor_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vor_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vor_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vor_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vor_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vor_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vor_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vor_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vor_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vor_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vor_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vor_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vor_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vor_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vor_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vor_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vor_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vor_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vor_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vor_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vor_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vor_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vor_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vor_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vor_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vor_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vor_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vor_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vor_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vor_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vor_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vor_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vor_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vor_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vor_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vor_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vor_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vor_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vor_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vor_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vor_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vor_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vor_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vor_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vor_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vor_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vor_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vor_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vor_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vor_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vor_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vor_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vor_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vor_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vor_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vor_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vor_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vor_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vor_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vor_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vor_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vor_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vor_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vor_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vor_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vor_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vor_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vor_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vor_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vor_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vor_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vor_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vor_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vor_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vor_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vor_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vor_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vor_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vor_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vor_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vor_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vor_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vor_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vor_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vor_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vor_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vor_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vor_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vor_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vor_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vor_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vor_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vor_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vor_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vor_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vor_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vor_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vor_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vor_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vor_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vor_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vor_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vor_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vor_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vor_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vor_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vor_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vor_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vor_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vor_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vor_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vor_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vor_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vor_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vor_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vor_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vor_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vor_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vor_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vor_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vor_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vor_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vor_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vor_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vor_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vor_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vor_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vor_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vor_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vor_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vor_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vor_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vor_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vor_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vor_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vor_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vor_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vor_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vor_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vor_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vor_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vor_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vor_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vor_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vor_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vor_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vor_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vor_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vor_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vor_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vor_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vor_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vor_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vor_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vor_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vor_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vor_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vor_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vor_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vor_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vor_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vor_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vor_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vor_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vor_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vor_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vor_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vor_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vor_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vor_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vor_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vor_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vor_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vor_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vor_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vor_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vor_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vor_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vor_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vor_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vor_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vor_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vor_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vor_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vor_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vor_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vor_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vor_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vor_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vor_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vor_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vor_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vor_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vor_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vor_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vor_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vor_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vor_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vor_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vor_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vor_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vor_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vor_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vor_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vor_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vor_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vor_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vor_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vor_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vor_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vor_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vor_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vor_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vor_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vor_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vor_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vor_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vor_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vor_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vor_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vor_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vor_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vor_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vor_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vor_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vor_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vor_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vor_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vor_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vor_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vor_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vor_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vor_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vor_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vor_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vor_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vor_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vor_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vor_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vor_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vor_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vor_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vor_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vor_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vor_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vor_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vor_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vor_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vor_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vor_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vor_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vor_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vor_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vor_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vor_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vor_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vor_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vor_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vor_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vor_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vor_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vor_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vor_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vor_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vor_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vor_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vor_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vor_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vor_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vor_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vor_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vor_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vor_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vor_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vor_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vor_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vor_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vor_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vor_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vor_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vor_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vor_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vor_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vor_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vor_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vor_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vor_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vor_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vor_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vor_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vor_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vor_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vor_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vor_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vor_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vor_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vor_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vor_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vor_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vor_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vor_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vor_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vor_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vor_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vor_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vor_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vor_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vor_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vor_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vor_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vor_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vor_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vor_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vor_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vor_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vor_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vor_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vor_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vor_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vor_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vor_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vor_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vor_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vor_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vor_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vor_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vor_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vor_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vor_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vor_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vor_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vor_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vor_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vor_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vor_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vor_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vor_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vor_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vor_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vor_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vor_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vor_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vor_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vor_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vor_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vor_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vor_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vor_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vor_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vor_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vor_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vor_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vor_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vor_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vor_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vor_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vor_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vor_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vor_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vor_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vor_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vor_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vor_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vor_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vor_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vor_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vor_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vor_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vor_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vor_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vor_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vor_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vor_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vor_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vor_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vor_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vor_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vor_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vor_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vor_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vor_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vor_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vor_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vor_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vor_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vor_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vor_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vor_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vor_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vor_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vor_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vor_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vor_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vor_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vor_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vor_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vor_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vor_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vor_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vor_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vor_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vor_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vor_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vor_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vor_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vor_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vor_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vor_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vor_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vor_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vor_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vor_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vor_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vor_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vor_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vor_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vor_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vor_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vor_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vor_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vor_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vor_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vor_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vor_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vor_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vor_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vor_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vor_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vor_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vor_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vor_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vor_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vor_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vor_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vor_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vor_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vor_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vor_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vor_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vor_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vor_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vor_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vor_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vor_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vor_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vor_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vor_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vor_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vor_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vor_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vor_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vor_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vor_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vor_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vor_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vor_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vor_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vor_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vor_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vor_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vor_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vor_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vor_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vor_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vor_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vor_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vor_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vor_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vor_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vor_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vor_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vor_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vor_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vor_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vor_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vor_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vor_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vor_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vor_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vor_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vor_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vor_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vor_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vor_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vor_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vor_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vor_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vor_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vor_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vor_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vor_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vor_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vor_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vor_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vor_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vor_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vor_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vor_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vor_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vor_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vor_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vor_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vor_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vor_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vor_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vor_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vor_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vor_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vor_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vor_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vor_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vor_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vor_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vor_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vor_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vor_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vor_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vor_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vor_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vor_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vor_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vor_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vor_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vor_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vor_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vor_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vor_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vor_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vor_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vor_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vor_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vor_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vor_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vor_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vor_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vor_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vor_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vor_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vor_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vor_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vor_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vor_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vor_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vor_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vor_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vor_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vor_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vor_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vor_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vor_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vor_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vor_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vor_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vor_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vor_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vor_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vor_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vor_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vor_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vor_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vor_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vor_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vor_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vor_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vor_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vor_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vor_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vor_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vor_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vor_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vor_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vor_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vor_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vor_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vor_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vor_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vor_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vor_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vor_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vor_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vor_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vor_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vor_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vor_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vor_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vor_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vor_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vor_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vor_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vor_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vor_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vor_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vor_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vor_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vor_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vor_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vor_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vor_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vor_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vor_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vor_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vor_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vor_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vor_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vor_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vor_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vor_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vor_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vor_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vor_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vor_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vor_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vor_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vor_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vor_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vor_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vor_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vor_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vor_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vor_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vor_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vor_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vor_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vor_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vor_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vor_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vor_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vor_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vor_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vor_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vor_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vor_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vor_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vor_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vor_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vor_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vor_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vor_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vor_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vor_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vor_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vor_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vor_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vor\.[ivxfswum.]+\s+} 352 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vredand.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vredand.c index cb0c80b33..325de86b6 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vredand.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vredand.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8m1_t test_vredand_vs_i8mf8_i8m1_tu(vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf8_i8m1_tu(vint8m1_t vd, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8mf4_i8m1_tu(vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf4_i8m1_tu(vint8m1_t vd, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8mf2_i8m1_tu(vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf2_i8m1_tu(vint8m1_t vd, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8m1_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m1_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8m2_i8m1_tu(vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m2_i8m1_tu(vint8m1_t vd, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8m4_i8m1_tu(vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m4_i8m1_tu(vint8m1_t vd, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8m8_i8m1_tu(vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m8_i8m1_tu(vint8m1_t vd, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16mf4_i16m1_tu(vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16mf4_i16m1_tu(vint16m1_t vd, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16mf2_i16m1_tu(vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16mf2_i16m1_tu(vint16m1_t vd, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16m1_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m1_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16m2_i16m1_tu(vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m2_i16m1_tu(vint16m1_t vd, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16m4_i16m1_tu(vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m4_i16m1_tu(vint16m1_t vd, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16m8_i16m1_tu(vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m8_i16m1_tu(vint16m1_t vd, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32mf2_i32m1_tu(vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32mf2_i32m1_tu(vint32m1_t vd, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredand_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32m1_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m1_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredand_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32m2_i32m1_tu(vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m2_i32m1_tu(vint32m1_t vd, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredand_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32m4_i32m1_tu(vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m4_i32m1_tu(vint32m1_t vd, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredand_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32m8_i32m1_tu(vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m8_i32m1_tu(vint32m1_t vd, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredand_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredand_vs_i64m1_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredand_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m1_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredand_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredand_vs_i64m2_i64m1_tu(vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredand_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m2_i64m1_tu(vint64m1_t vd, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredand_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredand_vs_i64m4_i64m1_tu(vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredand_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m4_i64m1_tu(vint64m1_t vd, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredand_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredand_vs_i64m8_i64m1_tu(vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredand_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m8_i64m1_tu(vint64m1_t vd, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredand_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8mf8_u8m1_tu(vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf8_u8m1_tu(vuint8m1_t vd, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8mf4_u8m1_tu(vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf4_u8m1_tu(vuint8m1_t vd, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8mf2_u8m1_tu(vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf2_u8m1_tu(vuint8m1_t vd, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8m1_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m1_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8m2_u8m1_tu(vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m2_u8m1_tu(vuint8m1_t vd, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8m4_u8m1_tu(vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m4_u8m1_tu(vuint8m1_t vd, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8m8_u8m1_tu(vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m8_u8m1_tu(vuint8m1_t vd, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16mf4_u16m1_tu(vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16mf4_u16m1_tu(vuint16m1_t vd, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16mf2_u16m1_tu(vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16mf2_u16m1_tu(vuint16m1_t vd, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16m1_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m1_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16m2_u16m1_tu(vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m2_u16m1_tu(vuint16m1_t vd, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16m4_u16m1_tu(vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m4_u16m1_tu(vuint16m1_t vd, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16m8_u16m1_tu(vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m8_u16m1_tu(vuint16m1_t vd, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32mf2_u32m1_tu(vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredand_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32m1_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredand_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32m2_u32m1_tu(vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m2_u32m1_tu(vuint32m1_t vd, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredand_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32m4_u32m1_tu(vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m4_u32m1_tu(vuint32m1_t vd, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredand_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32m8_u32m1_tu(vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m8_u32m1_tu(vuint32m1_t vd, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredand_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredand_vs_u64m1_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredand_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m1_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredand_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredand_vs_u64m2_u64m1_tu(vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredand_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m2_u64m1_tu(vuint64m1_t vd, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredand_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredand_vs_u64m4_u64m1_tu(vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredand_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m4_u64m1_tu(vuint64m1_t vd, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredand_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredand_vs_u64m8_u64m1_tu(vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredand_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m8_u64m1_tu(vuint64m1_t vd, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredand_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8mf8_i8m1_tum(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf8_i8m1_tum(vbool64_t vm, vint8m1_t vd, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8mf4_i8m1_tum(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf4_i8m1_tum(vbool32_t vm, vint8m1_t vd, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8mf2_i8m1_tum(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8mf2_i8m1_tum(vbool16_t vm, vint8m1_t vd, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8m1_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m1_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8m2_i8m1_tum(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m2_i8m1_tum(vbool4_t vm, vint8m1_t vd, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8m4_i8m1_tum(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m4_i8m1_tum(vbool2_t vm, vint8m1_t vd, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8m8_i8m1_tum(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m8_i8m1_tum(vbool1_t vm, vint8m1_t vd, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredand_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16mf4_i16m1_tum(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16mf4_i16m1_tum(vbool64_t vm, vint16m1_t vd, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16mf2_i16m1_tum(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16mf2_i16m1_tum(vbool32_t vm, vint16m1_t vd, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16m1_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m1_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16m2_i16m1_tum(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m2_i16m1_tum(vbool8_t vm, vint16m1_t vd, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16m4_i16m1_tum(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m4_i16m1_tum(vbool4_t vm, vint16m1_t vd, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16m8_i16m1_tum(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m8_i16m1_tum(vbool2_t vm, vint16m1_t vd, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredand_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32mf2_i32m1_tum(vbool64_t vm, vint32m1_t vd, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredand_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32m1_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m1_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredand_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32m2_i32m1_tum(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m2_i32m1_tum(vbool16_t vm, vint32m1_t vd, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredand_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32m4_i32m1_tum(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m4_i32m1_tum(vbool8_t vm, vint32m1_t vd, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredand_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32m8_i32m1_tum(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m8_i32m1_tum(vbool4_t vm, vint32m1_t vd, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredand_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredand_vs_i64m1_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m1_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredand_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredand_vs_i64m2_i64m1_tum(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m2_i64m1_tum(vbool32_t vm, vint64m1_t vd, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredand_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredand_vs_i64m4_i64m1_tum(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m4_i64m1_tum(vbool16_t vm, vint64m1_t vd, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredand_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredand_vs_i64m8_i64m1_tum(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m8_i64m1_tum(vbool8_t vm, vint64m1_t vd, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredand_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8mf8_u8m1_tum(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf8_u8m1_tum(vbool64_t vm, vuint8m1_t vd, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8mf4_u8m1_tum(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf4_u8m1_tum(vbool32_t vm, vuint8m1_t vd, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8mf2_u8m1_tum(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8mf2_u8m1_tum(vbool16_t vm, vuint8m1_t vd, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8m1_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m1_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8m2_u8m1_tum(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m2_u8m1_tum(vbool4_t vm, vuint8m1_t vd, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8m4_u8m1_tum(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m4_u8m1_tum(vbool2_t vm, vuint8m1_t vd, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8m8_u8m1_tum(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m8_u8m1_tum(vbool1_t vm, vuint8m1_t vd, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredand_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16mf4_u16m1_tum(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16mf4_u16m1_tum(vbool64_t vm, vuint16m1_t vd, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16mf2_u16m1_tum(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16mf2_u16m1_tum(vbool32_t vm, vuint16m1_t vd, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16m1_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m1_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16m2_u16m1_tum(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m2_u16m1_tum(vbool8_t vm, vuint16m1_t vd, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16m4_u16m1_tum(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m4_u16m1_tum(vbool4_t vm, vuint16m1_t vd, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16m8_u16m1_tum(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m8_u16m1_tum(vbool2_t vm, vuint16m1_t vd, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredand_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32mf2_u32m1_tum(vbool64_t vm, vuint32m1_t vd, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredand_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32m1_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m1_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredand_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32m2_u32m1_tum(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m2_u32m1_tum(vbool16_t vm, vuint32m1_t vd, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredand_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32m4_u32m1_tum(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m4_u32m1_tum(vbool8_t vm, vuint32m1_t vd, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredand_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32m8_u32m1_tum(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m8_u32m1_tum(vbool4_t vm, vuint32m1_t vd, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredand_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredand_vs_u64m1_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m1_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredand_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredand_vs_u64m2_u64m1_tum(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m2_u64m1_tum(vbool32_t vm, vuint64m1_t vd, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredand_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredand_vs_u64m4_u64m1_tum(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m4_u64m1_tum(vbool16_t vm, vuint64m1_t vd, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredand_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredand_vs_u64m8_u64m1_tum(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredand_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m8_u64m1_tum(vbool8_t vm, vuint64m1_t vd, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredand_tum(vm, vd, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vredand\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vredmax.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vredmax.c index 958de984d..985db89ab 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vredmax.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vredmax.c @@ -6,180 +6,180 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8m1_t test_vredmax_vs_i8mf8_i8m1_tu(vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf8_i8m1_tu(vint8m1_t vd, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8mf4_i8m1_tu(vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf4_i8m1_tu(vint8m1_t vd, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8mf2_i8m1_tu(vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf2_i8m1_tu(vint8m1_t vd, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8m1_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m1_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8m2_i8m1_tu(vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m2_i8m1_tu(vint8m1_t vd, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8m4_i8m1_tu(vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m4_i8m1_tu(vint8m1_t vd, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8m8_i8m1_tu(vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m8_i8m1_tu(vint8m1_t vd, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16mf4_i16m1_tu(vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16mf4_i16m1_tu(vint16m1_t vd, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16mf2_i16m1_tu(vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16mf2_i16m1_tu(vint16m1_t vd, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16m1_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m1_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16m2_i16m1_tu(vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m2_i16m1_tu(vint16m1_t vd, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16m4_i16m1_tu(vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m4_i16m1_tu(vint16m1_t vd, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16m8_i16m1_tu(vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m8_i16m1_tu(vint16m1_t vd, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32mf2_i32m1_tu(vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32mf2_i32m1_tu(vint32m1_t vd, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmax_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32m1_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m1_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmax_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32m2_i32m1_tu(vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m2_i32m1_tu(vint32m1_t vd, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmax_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32m4_i32m1_tu(vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m4_i32m1_tu(vint32m1_t vd, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmax_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32m8_i32m1_tu(vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m8_i32m1_tu(vint32m1_t vd, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmax_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredmax_vs_i64m1_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmax_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m1_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmax_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredmax_vs_i64m2_i64m1_tu(vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmax_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m2_i64m1_tu(vint64m1_t vd, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmax_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredmax_vs_i64m4_i64m1_tu(vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmax_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m4_i64m1_tu(vint64m1_t vd, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmax_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredmax_vs_i64m8_i64m1_tu(vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmax_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m8_i64m1_tu(vint64m1_t vd, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmax_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8mf8_i8m1_tum(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf8_i8m1_tum(vbool64_t vm, vint8m1_t vd, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8mf4_i8m1_tum(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf4_i8m1_tum(vbool32_t vm, vint8m1_t vd, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8mf2_i8m1_tum(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8mf2_i8m1_tum(vbool16_t vm, vint8m1_t vd, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8m1_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m1_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8m2_i8m1_tum(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m2_i8m1_tum(vbool4_t vm, vint8m1_t vd, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8m4_i8m1_tum(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m4_i8m1_tum(vbool2_t vm, vint8m1_t vd, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8m8_i8m1_tum(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m8_i8m1_tum(vbool1_t vm, vint8m1_t vd, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmax_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16mf4_i16m1_tum(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16mf4_i16m1_tum(vbool64_t vm, vint16m1_t vd, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16mf2_i16m1_tum(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16mf2_i16m1_tum(vbool32_t vm, vint16m1_t vd, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16m1_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m1_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16m2_i16m1_tum(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m2_i16m1_tum(vbool8_t vm, vint16m1_t vd, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16m4_i16m1_tum(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m4_i16m1_tum(vbool4_t vm, vint16m1_t vd, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16m8_i16m1_tum(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m8_i16m1_tum(vbool2_t vm, vint16m1_t vd, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmax_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32mf2_i32m1_tum(vbool64_t vm, vint32m1_t vd, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmax_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32m1_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m1_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmax_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32m2_i32m1_tum(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m2_i32m1_tum(vbool16_t vm, vint32m1_t vd, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmax_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32m4_i32m1_tum(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m4_i32m1_tum(vbool8_t vm, vint32m1_t vd, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmax_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32m8_i32m1_tum(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m8_i32m1_tum(vbool4_t vm, vint32m1_t vd, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmax_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredmax_vs_i64m1_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmax_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m1_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmax_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredmax_vs_i64m2_i64m1_tum(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmax_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m2_i64m1_tum(vbool32_t vm, vint64m1_t vd, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmax_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredmax_vs_i64m4_i64m1_tum(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmax_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m4_i64m1_tum(vbool16_t vm, vint64m1_t vd, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmax_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredmax_vs_i64m8_i64m1_tum(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmax_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m8_i64m1_tum(vbool8_t vm, vint64m1_t vd, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmax_tum(vm, vd, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vredmax\.[ivxfswum.]+\s+} 44 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vredmaxu.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vredmaxu.c index 30999b9cb..4826916a6 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vredmaxu.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vredmaxu.c @@ -6,180 +6,180 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_tu(vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_tu(vuint8m1_t vd, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_tu(vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_tu(vuint8m1_t vd, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_tu(vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_tu(vuint8m1_t vd, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_tu(vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_tu(vuint8m1_t vd, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_tu(vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_tu(vuint8m1_t vd, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_tu(vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_tu(vuint8m1_t vd, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_tu(vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_tu(vuint16m1_t vd, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_tu(vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_tu(vuint16m1_t vd, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_tu(vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_tu(vuint16m1_t vd, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_tu(vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_tu(vuint16m1_t vd, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_tu(vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_tu(vuint16m1_t vd, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tu(vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredmaxu_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredmaxu_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_tu(vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_tu(vuint32m1_t vd, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredmaxu_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_tu(vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_tu(vuint32m1_t vd, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredmaxu_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_tu(vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_tu(vuint32m1_t vd, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredmaxu_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredmaxu_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredmaxu_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_tu(vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredmaxu_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_tu(vuint64m1_t vd, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredmaxu_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_tu(vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredmaxu_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_tu(vuint64m1_t vd, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredmaxu_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_tu(vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredmaxu_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_tu(vuint64m1_t vd, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredmaxu_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_tum(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_tum(vbool64_t vm, vuint8m1_t vd, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_tum(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_tum(vbool32_t vm, vuint8m1_t vd, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_tum(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_tum(vbool16_t vm, vuint8m1_t vd, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_tum(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_tum(vbool4_t vm, vuint8m1_t vd, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_tum(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_tum(vbool2_t vm, vuint8m1_t vd, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_tum(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_tum(vbool1_t vm, vuint8m1_t vd, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredmaxu_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_tum(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_tum(vbool64_t vm, vuint16m1_t vd, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_tum(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_tum(vbool32_t vm, vuint16m1_t vd, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_tum(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_tum(vbool8_t vm, vuint16m1_t vd, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_tum(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_tum(vbool4_t vm, vuint16m1_t vd, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_tum(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_tum(vbool2_t vm, vuint16m1_t vd, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredmaxu_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tum(vbool64_t vm, vuint32m1_t vd, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredmaxu_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredmaxu_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_tum(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_tum(vbool16_t vm, vuint32m1_t vd, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredmaxu_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_tum(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_tum(vbool8_t vm, vuint32m1_t vd, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredmaxu_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_tum(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_tum(vbool4_t vm, vuint32m1_t vd, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredmaxu_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredmaxu_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredmaxu_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_tum(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredmaxu_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_tum(vbool32_t vm, vuint64m1_t vd, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredmaxu_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_tum(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredmaxu_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_tum(vbool16_t vm, vuint64m1_t vd, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredmaxu_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_tum(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredmaxu_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_tum(vbool8_t vm, vuint64m1_t vd, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredmaxu_tum(vm, vd, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vredmaxu\.[ivxfswum.]+\s+} 44 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vredmin.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vredmin.c index e36f86ee9..1aed06675 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vredmin.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vredmin.c @@ -6,180 +6,180 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8m1_t test_vredmin_vs_i8mf8_i8m1_tu(vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf8_i8m1_tu(vint8m1_t vd, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8mf4_i8m1_tu(vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf4_i8m1_tu(vint8m1_t vd, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8mf2_i8m1_tu(vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf2_i8m1_tu(vint8m1_t vd, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8m1_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m1_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8m2_i8m1_tu(vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m2_i8m1_tu(vint8m1_t vd, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8m4_i8m1_tu(vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m4_i8m1_tu(vint8m1_t vd, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8m8_i8m1_tu(vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m8_i8m1_tu(vint8m1_t vd, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16mf4_i16m1_tu(vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16mf4_i16m1_tu(vint16m1_t vd, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16mf2_i16m1_tu(vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16mf2_i16m1_tu(vint16m1_t vd, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16m1_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m1_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16m2_i16m1_tu(vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m2_i16m1_tu(vint16m1_t vd, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16m4_i16m1_tu(vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m4_i16m1_tu(vint16m1_t vd, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16m8_i16m1_tu(vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m8_i16m1_tu(vint16m1_t vd, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32mf2_i32m1_tu(vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32mf2_i32m1_tu(vint32m1_t vd, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmin_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32m1_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m1_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmin_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32m2_i32m1_tu(vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m2_i32m1_tu(vint32m1_t vd, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmin_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32m4_i32m1_tu(vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m4_i32m1_tu(vint32m1_t vd, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmin_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32m8_i32m1_tu(vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m8_i32m1_tu(vint32m1_t vd, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmin_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredmin_vs_i64m1_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmin_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m1_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmin_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredmin_vs_i64m2_i64m1_tu(vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmin_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m2_i64m1_tu(vint64m1_t vd, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmin_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredmin_vs_i64m4_i64m1_tu(vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmin_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m4_i64m1_tu(vint64m1_t vd, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmin_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredmin_vs_i64m8_i64m1_tu(vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmin_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m8_i64m1_tu(vint64m1_t vd, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmin_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8mf8_i8m1_tum(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf8_i8m1_tum(vbool64_t vm, vint8m1_t vd, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8mf4_i8m1_tum(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf4_i8m1_tum(vbool32_t vm, vint8m1_t vd, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8mf2_i8m1_tum(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8mf2_i8m1_tum(vbool16_t vm, vint8m1_t vd, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8m1_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m1_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8m2_i8m1_tum(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m2_i8m1_tum(vbool4_t vm, vint8m1_t vd, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8m4_i8m1_tum(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m4_i8m1_tum(vbool2_t vm, vint8m1_t vd, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8m8_i8m1_tum(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m8_i8m1_tum(vbool1_t vm, vint8m1_t vd, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredmin_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16mf4_i16m1_tum(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16mf4_i16m1_tum(vbool64_t vm, vint16m1_t vd, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16mf2_i16m1_tum(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16mf2_i16m1_tum(vbool32_t vm, vint16m1_t vd, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16m1_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m1_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16m2_i16m1_tum(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m2_i16m1_tum(vbool8_t vm, vint16m1_t vd, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16m4_i16m1_tum(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m4_i16m1_tum(vbool4_t vm, vint16m1_t vd, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16m8_i16m1_tum(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m8_i16m1_tum(vbool2_t vm, vint16m1_t vd, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredmin_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32mf2_i32m1_tum(vbool64_t vm, vint32m1_t vd, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmin_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32m1_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m1_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmin_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32m2_i32m1_tum(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m2_i32m1_tum(vbool16_t vm, vint32m1_t vd, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmin_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32m4_i32m1_tum(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m4_i32m1_tum(vbool8_t vm, vint32m1_t vd, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmin_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32m8_i32m1_tum(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m8_i32m1_tum(vbool4_t vm, vint32m1_t vd, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredmin_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredmin_vs_i64m1_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmin_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m1_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmin_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredmin_vs_i64m2_i64m1_tum(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmin_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m2_i64m1_tum(vbool32_t vm, vint64m1_t vd, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmin_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredmin_vs_i64m4_i64m1_tum(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmin_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m4_i64m1_tum(vbool16_t vm, vint64m1_t vd, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmin_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredmin_vs_i64m8_i64m1_tum(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmin_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m8_i64m1_tum(vbool8_t vm, vint64m1_t vd, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredmin_tum(vm, vd, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vredmin\.[ivxfswum.]+\s+} 44 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vredminu.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vredminu.c index f67637b44..8de5639ea 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vredminu.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vredminu.c @@ -6,180 +6,180 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8m1_t test_vredminu_vs_u8mf8_u8m1_tu(vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf8_u8m1_tu(vuint8m1_t vd, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8mf4_u8m1_tu(vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf4_u8m1_tu(vuint8m1_t vd, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8mf2_u8m1_tu(vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf2_u8m1_tu(vuint8m1_t vd, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8m1_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m1_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8m2_u8m1_tu(vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m2_u8m1_tu(vuint8m1_t vd, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8m4_u8m1_tu(vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m4_u8m1_tu(vuint8m1_t vd, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8m8_u8m1_tu(vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m8_u8m1_tu(vuint8m1_t vd, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16mf4_u16m1_tu(vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16mf4_u16m1_tu(vuint16m1_t vd, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16mf2_u16m1_tu(vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16mf2_u16m1_tu(vuint16m1_t vd, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16m1_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m1_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16m2_u16m1_tu(vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m2_u16m1_tu(vuint16m1_t vd, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16m4_u16m1_tu(vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m4_u16m1_tu(vuint16m1_t vd, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16m8_u16m1_tu(vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m8_u16m1_tu(vuint16m1_t vd, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tu(vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredminu_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32m1_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredminu_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32m2_u32m1_tu(vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m2_u32m1_tu(vuint32m1_t vd, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredminu_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32m4_u32m1_tu(vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m4_u32m1_tu(vuint32m1_t vd, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredminu_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32m8_u32m1_tu(vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m8_u32m1_tu(vuint32m1_t vd, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredminu_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredminu_vs_u64m1_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredminu_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m1_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredminu_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredminu_vs_u64m2_u64m1_tu(vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredminu_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m2_u64m1_tu(vuint64m1_t vd, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredminu_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredminu_vs_u64m4_u64m1_tu(vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredminu_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m4_u64m1_tu(vuint64m1_t vd, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredminu_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredminu_vs_u64m8_u64m1_tu(vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredminu_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m8_u64m1_tu(vuint64m1_t vd, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredminu_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8mf8_u8m1_tum(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf8_u8m1_tum(vbool64_t vm, vuint8m1_t vd, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8mf4_u8m1_tum(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf4_u8m1_tum(vbool32_t vm, vuint8m1_t vd, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8mf2_u8m1_tum(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8mf2_u8m1_tum(vbool16_t vm, vuint8m1_t vd, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8m1_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m1_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8m2_u8m1_tum(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m2_u8m1_tum(vbool4_t vm, vuint8m1_t vd, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8m4_u8m1_tum(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m4_u8m1_tum(vbool2_t vm, vuint8m1_t vd, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8m8_u8m1_tum(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m8_u8m1_tum(vbool1_t vm, vuint8m1_t vd, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredminu_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16mf4_u16m1_tum(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16mf4_u16m1_tum(vbool64_t vm, vuint16m1_t vd, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16mf2_u16m1_tum(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16mf2_u16m1_tum(vbool32_t vm, vuint16m1_t vd, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16m1_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m1_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16m2_u16m1_tum(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m2_u16m1_tum(vbool8_t vm, vuint16m1_t vd, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16m4_u16m1_tum(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m4_u16m1_tum(vbool4_t vm, vuint16m1_t vd, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16m8_u16m1_tum(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m8_u16m1_tum(vbool2_t vm, vuint16m1_t vd, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredminu_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tum(vbool64_t vm, vuint32m1_t vd, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredminu_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32m1_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m1_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredminu_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32m2_u32m1_tum(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m2_u32m1_tum(vbool16_t vm, vuint32m1_t vd, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredminu_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32m4_u32m1_tum(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m4_u32m1_tum(vbool8_t vm, vuint32m1_t vd, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredminu_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32m8_u32m1_tum(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m8_u32m1_tum(vbool4_t vm, vuint32m1_t vd, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredminu_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredminu_vs_u64m1_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredminu_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m1_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredminu_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredminu_vs_u64m2_u64m1_tum(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredminu_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m2_u64m1_tum(vbool32_t vm, vuint64m1_t vd, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredminu_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredminu_vs_u64m4_u64m1_tum(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredminu_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m4_u64m1_tum(vbool16_t vm, vuint64m1_t vd, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredminu_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredminu_vs_u64m8_u64m1_tum(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredminu_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m8_u64m1_tum(vbool8_t vm, vuint64m1_t vd, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredminu_tum(vm, vd, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vredminu\.[ivxfswum.]+\s+} 44 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vredor.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vredor.c index ab222b7e5..e39ac9cc0 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vredor.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vredor.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8m1_t test_vredor_vs_i8mf8_i8m1_tu(vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf8_i8m1_tu(vint8m1_t vd, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8mf4_i8m1_tu(vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf4_i8m1_tu(vint8m1_t vd, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8mf2_i8m1_tu(vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf2_i8m1_tu(vint8m1_t vd, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8m1_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m1_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8m2_i8m1_tu(vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m2_i8m1_tu(vint8m1_t vd, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8m4_i8m1_tu(vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m4_i8m1_tu(vint8m1_t vd, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8m8_i8m1_tu(vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m8_i8m1_tu(vint8m1_t vd, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16mf4_i16m1_tu(vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16mf4_i16m1_tu(vint16m1_t vd, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16mf2_i16m1_tu(vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16mf2_i16m1_tu(vint16m1_t vd, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16m1_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m1_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16m2_i16m1_tu(vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m2_i16m1_tu(vint16m1_t vd, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16m4_i16m1_tu(vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m4_i16m1_tu(vint16m1_t vd, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16m8_i16m1_tu(vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m8_i16m1_tu(vint16m1_t vd, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32mf2_i32m1_tu(vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32mf2_i32m1_tu(vint32m1_t vd, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredor_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32m1_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m1_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredor_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32m2_i32m1_tu(vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m2_i32m1_tu(vint32m1_t vd, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredor_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32m4_i32m1_tu(vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m4_i32m1_tu(vint32m1_t vd, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredor_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32m8_i32m1_tu(vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m8_i32m1_tu(vint32m1_t vd, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredor_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredor_vs_i64m1_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredor_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m1_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredor_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredor_vs_i64m2_i64m1_tu(vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredor_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m2_i64m1_tu(vint64m1_t vd, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredor_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredor_vs_i64m4_i64m1_tu(vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredor_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m4_i64m1_tu(vint64m1_t vd, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredor_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredor_vs_i64m8_i64m1_tu(vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredor_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m8_i64m1_tu(vint64m1_t vd, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredor_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8mf8_u8m1_tu(vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf8_u8m1_tu(vuint8m1_t vd, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8mf4_u8m1_tu(vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf4_u8m1_tu(vuint8m1_t vd, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8mf2_u8m1_tu(vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf2_u8m1_tu(vuint8m1_t vd, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8m1_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m1_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8m2_u8m1_tu(vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m2_u8m1_tu(vuint8m1_t vd, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8m4_u8m1_tu(vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m4_u8m1_tu(vuint8m1_t vd, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8m8_u8m1_tu(vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m8_u8m1_tu(vuint8m1_t vd, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16mf4_u16m1_tu(vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16mf4_u16m1_tu(vuint16m1_t vd, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16mf2_u16m1_tu(vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16mf2_u16m1_tu(vuint16m1_t vd, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16m1_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m1_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16m2_u16m1_tu(vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m2_u16m1_tu(vuint16m1_t vd, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16m4_u16m1_tu(vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m4_u16m1_tu(vuint16m1_t vd, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16m8_u16m1_tu(vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m8_u16m1_tu(vuint16m1_t vd, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32mf2_u32m1_tu(vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredor_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32m1_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredor_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32m2_u32m1_tu(vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m2_u32m1_tu(vuint32m1_t vd, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredor_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32m4_u32m1_tu(vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m4_u32m1_tu(vuint32m1_t vd, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredor_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32m8_u32m1_tu(vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m8_u32m1_tu(vuint32m1_t vd, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredor_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredor_vs_u64m1_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredor_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m1_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredor_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredor_vs_u64m2_u64m1_tu(vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredor_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m2_u64m1_tu(vuint64m1_t vd, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredor_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredor_vs_u64m4_u64m1_tu(vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredor_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m4_u64m1_tu(vuint64m1_t vd, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredor_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredor_vs_u64m8_u64m1_tu(vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredor_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m8_u64m1_tu(vuint64m1_t vd, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredor_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8mf8_i8m1_tum(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf8_i8m1_tum(vbool64_t vm, vint8m1_t vd, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8mf4_i8m1_tum(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf4_i8m1_tum(vbool32_t vm, vint8m1_t vd, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8mf2_i8m1_tum(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8mf2_i8m1_tum(vbool16_t vm, vint8m1_t vd, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8m1_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m1_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8m2_i8m1_tum(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m2_i8m1_tum(vbool4_t vm, vint8m1_t vd, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8m4_i8m1_tum(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m4_i8m1_tum(vbool2_t vm, vint8m1_t vd, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8m8_i8m1_tum(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m8_i8m1_tum(vbool1_t vm, vint8m1_t vd, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredor_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16mf4_i16m1_tum(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16mf4_i16m1_tum(vbool64_t vm, vint16m1_t vd, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16mf2_i16m1_tum(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16mf2_i16m1_tum(vbool32_t vm, vint16m1_t vd, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16m1_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m1_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16m2_i16m1_tum(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m2_i16m1_tum(vbool8_t vm, vint16m1_t vd, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16m4_i16m1_tum(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m4_i16m1_tum(vbool4_t vm, vint16m1_t vd, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16m8_i16m1_tum(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m8_i16m1_tum(vbool2_t vm, vint16m1_t vd, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredor_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32mf2_i32m1_tum(vbool64_t vm, vint32m1_t vd, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredor_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32m1_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m1_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredor_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32m2_i32m1_tum(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m2_i32m1_tum(vbool16_t vm, vint32m1_t vd, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredor_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32m4_i32m1_tum(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m4_i32m1_tum(vbool8_t vm, vint32m1_t vd, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredor_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32m8_i32m1_tum(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m8_i32m1_tum(vbool4_t vm, vint32m1_t vd, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredor_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredor_vs_i64m1_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m1_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredor_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredor_vs_i64m2_i64m1_tum(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m2_i64m1_tum(vbool32_t vm, vint64m1_t vd, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredor_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredor_vs_i64m4_i64m1_tum(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m4_i64m1_tum(vbool16_t vm, vint64m1_t vd, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredor_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredor_vs_i64m8_i64m1_tum(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m8_i64m1_tum(vbool8_t vm, vint64m1_t vd, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredor_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8mf8_u8m1_tum(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf8_u8m1_tum(vbool64_t vm, vuint8m1_t vd, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8mf4_u8m1_tum(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf4_u8m1_tum(vbool32_t vm, vuint8m1_t vd, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8mf2_u8m1_tum(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8mf2_u8m1_tum(vbool16_t vm, vuint8m1_t vd, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8m1_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m1_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8m2_u8m1_tum(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m2_u8m1_tum(vbool4_t vm, vuint8m1_t vd, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8m4_u8m1_tum(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m4_u8m1_tum(vbool2_t vm, vuint8m1_t vd, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8m8_u8m1_tum(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m8_u8m1_tum(vbool1_t vm, vuint8m1_t vd, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredor_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16mf4_u16m1_tum(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16mf4_u16m1_tum(vbool64_t vm, vuint16m1_t vd, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16mf2_u16m1_tum(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16mf2_u16m1_tum(vbool32_t vm, vuint16m1_t vd, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16m1_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m1_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16m2_u16m1_tum(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m2_u16m1_tum(vbool8_t vm, vuint16m1_t vd, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16m4_u16m1_tum(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m4_u16m1_tum(vbool4_t vm, vuint16m1_t vd, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16m8_u16m1_tum(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m8_u16m1_tum(vbool2_t vm, vuint16m1_t vd, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredor_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32mf2_u32m1_tum(vbool64_t vm, vuint32m1_t vd, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredor_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32m1_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m1_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredor_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32m2_u32m1_tum(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m2_u32m1_tum(vbool16_t vm, vuint32m1_t vd, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredor_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32m4_u32m1_tum(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m4_u32m1_tum(vbool8_t vm, vuint32m1_t vd, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredor_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32m8_u32m1_tum(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m8_u32m1_tum(vbool4_t vm, vuint32m1_t vd, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredor_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredor_vs_u64m1_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m1_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredor_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredor_vs_u64m2_u64m1_tum(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m2_u64m1_tum(vbool32_t vm, vuint64m1_t vd, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredor_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredor_vs_u64m4_u64m1_tum(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m4_u64m1_tum(vbool16_t vm, vuint64m1_t vd, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredor_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredor_vs_u64m8_u64m1_tum(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredor_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m8_u64m1_tum(vbool8_t vm, vuint64m1_t vd, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredor_tum(vm, vd, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vredor\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vredsum.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vredsum.c index df5a0105d..c15e508ff 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vredsum.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vredsum.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8m1_t test_vredsum_vs_i8mf8_i8m1_tu(vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf8_i8m1_tu(vint8m1_t vd, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8mf4_i8m1_tu(vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf4_i8m1_tu(vint8m1_t vd, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8mf2_i8m1_tu(vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf2_i8m1_tu(vint8m1_t vd, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8m1_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m1_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8m2_i8m1_tu(vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m2_i8m1_tu(vint8m1_t vd, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8m4_i8m1_tu(vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m4_i8m1_tu(vint8m1_t vd, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8m8_i8m1_tu(vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m8_i8m1_tu(vint8m1_t vd, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16mf4_i16m1_tu(vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16mf4_i16m1_tu(vint16m1_t vd, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16mf2_i16m1_tu(vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16mf2_i16m1_tu(vint16m1_t vd, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16m1_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m1_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16m2_i16m1_tu(vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m2_i16m1_tu(vint16m1_t vd, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16m4_i16m1_tu(vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m4_i16m1_tu(vint16m1_t vd, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16m8_i16m1_tu(vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m8_i16m1_tu(vint16m1_t vd, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32mf2_i32m1_tu(vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32mf2_i32m1_tu(vint32m1_t vd, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredsum_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32m1_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m1_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredsum_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32m2_i32m1_tu(vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m2_i32m1_tu(vint32m1_t vd, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredsum_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32m4_i32m1_tu(vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m4_i32m1_tu(vint32m1_t vd, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredsum_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32m8_i32m1_tu(vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m8_i32m1_tu(vint32m1_t vd, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredsum_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredsum_vs_i64m1_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredsum_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m1_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredsum_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredsum_vs_i64m2_i64m1_tu(vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredsum_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m2_i64m1_tu(vint64m1_t vd, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredsum_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredsum_vs_i64m4_i64m1_tu(vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredsum_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m4_i64m1_tu(vint64m1_t vd, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredsum_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredsum_vs_i64m8_i64m1_tu(vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredsum_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m8_i64m1_tu(vint64m1_t vd, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredsum_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8mf8_u8m1_tu(vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf8_u8m1_tu(vuint8m1_t vd, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8mf4_u8m1_tu(vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf4_u8m1_tu(vuint8m1_t vd, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8mf2_u8m1_tu(vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf2_u8m1_tu(vuint8m1_t vd, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8m1_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m1_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8m2_u8m1_tu(vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m2_u8m1_tu(vuint8m1_t vd, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8m4_u8m1_tu(vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m4_u8m1_tu(vuint8m1_t vd, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8m8_u8m1_tu(vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m8_u8m1_tu(vuint8m1_t vd, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16mf4_u16m1_tu(vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16mf4_u16m1_tu(vuint16m1_t vd, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16mf2_u16m1_tu(vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16mf2_u16m1_tu(vuint16m1_t vd, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16m1_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m1_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16m2_u16m1_tu(vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m2_u16m1_tu(vuint16m1_t vd, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16m4_u16m1_tu(vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m4_u16m1_tu(vuint16m1_t vd, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16m8_u16m1_tu(vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m8_u16m1_tu(vuint16m1_t vd, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tu(vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredsum_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32m1_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredsum_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32m2_u32m1_tu(vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m2_u32m1_tu(vuint32m1_t vd, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredsum_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32m4_u32m1_tu(vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m4_u32m1_tu(vuint32m1_t vd, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredsum_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32m8_u32m1_tu(vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m8_u32m1_tu(vuint32m1_t vd, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredsum_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredsum_vs_u64m1_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredsum_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m1_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredsum_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredsum_vs_u64m2_u64m1_tu(vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredsum_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m2_u64m1_tu(vuint64m1_t vd, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredsum_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredsum_vs_u64m4_u64m1_tu(vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredsum_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m4_u64m1_tu(vuint64m1_t vd, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredsum_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredsum_vs_u64m8_u64m1_tu(vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredsum_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m8_u64m1_tu(vuint64m1_t vd, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredsum_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8mf8_i8m1_tum(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf8_i8m1_tum(vbool64_t vm, vint8m1_t vd, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8mf4_i8m1_tum(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf4_i8m1_tum(vbool32_t vm, vint8m1_t vd, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8mf2_i8m1_tum(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8mf2_i8m1_tum(vbool16_t vm, vint8m1_t vd, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8m1_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m1_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8m2_i8m1_tum(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m2_i8m1_tum(vbool4_t vm, vint8m1_t vd, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8m4_i8m1_tum(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m4_i8m1_tum(vbool2_t vm, vint8m1_t vd, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8m8_i8m1_tum(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m8_i8m1_tum(vbool1_t vm, vint8m1_t vd, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredsum_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16mf4_i16m1_tum(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16mf4_i16m1_tum(vbool64_t vm, vint16m1_t vd, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16mf2_i16m1_tum(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16mf2_i16m1_tum(vbool32_t vm, vint16m1_t vd, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16m1_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m1_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16m2_i16m1_tum(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m2_i16m1_tum(vbool8_t vm, vint16m1_t vd, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16m4_i16m1_tum(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m4_i16m1_tum(vbool4_t vm, vint16m1_t vd, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16m8_i16m1_tum(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m8_i16m1_tum(vbool2_t vm, vint16m1_t vd, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredsum_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32mf2_i32m1_tum(vbool64_t vm, vint32m1_t vd, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredsum_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32m1_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m1_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredsum_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32m2_i32m1_tum(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m2_i32m1_tum(vbool16_t vm, vint32m1_t vd, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredsum_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32m4_i32m1_tum(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m4_i32m1_tum(vbool8_t vm, vint32m1_t vd, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredsum_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32m8_i32m1_tum(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m8_i32m1_tum(vbool4_t vm, vint32m1_t vd, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredsum_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredsum_vs_i64m1_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m1_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredsum_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredsum_vs_i64m2_i64m1_tum(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m2_i64m1_tum(vbool32_t vm, vint64m1_t vd, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredsum_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredsum_vs_i64m4_i64m1_tum(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m4_i64m1_tum(vbool16_t vm, vint64m1_t vd, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredsum_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredsum_vs_i64m8_i64m1_tum(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m8_i64m1_tum(vbool8_t vm, vint64m1_t vd, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredsum_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8mf8_u8m1_tum(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf8_u8m1_tum(vbool64_t vm, vuint8m1_t vd, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8mf4_u8m1_tum(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf4_u8m1_tum(vbool32_t vm, vuint8m1_t vd, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8mf2_u8m1_tum(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8mf2_u8m1_tum(vbool16_t vm, vuint8m1_t vd, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8m1_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m1_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8m2_u8m1_tum(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m2_u8m1_tum(vbool4_t vm, vuint8m1_t vd, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8m4_u8m1_tum(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m4_u8m1_tum(vbool2_t vm, vuint8m1_t vd, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8m8_u8m1_tum(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m8_u8m1_tum(vbool1_t vm, vuint8m1_t vd, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredsum_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16mf4_u16m1_tum(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16mf4_u16m1_tum(vbool64_t vm, vuint16m1_t vd, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16mf2_u16m1_tum(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16mf2_u16m1_tum(vbool32_t vm, vuint16m1_t vd, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16m1_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m1_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16m2_u16m1_tum(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m2_u16m1_tum(vbool8_t vm, vuint16m1_t vd, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16m4_u16m1_tum(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m4_u16m1_tum(vbool4_t vm, vuint16m1_t vd, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16m8_u16m1_tum(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m8_u16m1_tum(vbool2_t vm, vuint16m1_t vd, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredsum_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tum(vbool64_t vm, vuint32m1_t vd, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredsum_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32m1_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m1_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredsum_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32m2_u32m1_tum(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m2_u32m1_tum(vbool16_t vm, vuint32m1_t vd, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredsum_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32m4_u32m1_tum(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m4_u32m1_tum(vbool8_t vm, vuint32m1_t vd, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredsum_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32m8_u32m1_tum(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m8_u32m1_tum(vbool4_t vm, vuint32m1_t vd, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredsum_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredsum_vs_u64m1_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m1_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredsum_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredsum_vs_u64m2_u64m1_tum(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m2_u64m1_tum(vbool32_t vm, vuint64m1_t vd, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredsum_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredsum_vs_u64m4_u64m1_tum(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m4_u64m1_tum(vbool16_t vm, vuint64m1_t vd, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredsum_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredsum_vs_u64m8_u64m1_tum(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredsum_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m8_u64m1_tum(vbool8_t vm, vuint64m1_t vd, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredsum_tum(vm, vd, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vredsum\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vredxor.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vredxor.c index 840d8db7f..508acbd58 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vredxor.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vredxor.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8m1_t test_vredxor_vs_i8mf8_i8m1_tu(vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf8_i8m1_tu(vint8m1_t vd, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8mf4_i8m1_tu(vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf4_i8m1_tu(vint8m1_t vd, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8mf2_i8m1_tu(vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf2_i8m1_tu(vint8m1_t vd, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8m1_i8m1_tu(vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m1_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8m2_i8m1_tu(vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m2_i8m1_tu(vint8m1_t vd, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8m4_i8m1_tu(vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m4_i8m1_tu(vint8m1_t vd, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8m8_i8m1_tu(vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_tu(maskedoff, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m8_i8m1_tu(vint8m1_t vd, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16mf4_i16m1_tu(vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16mf4_i16m1_tu(vint16m1_t vd, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16mf2_i16m1_tu(vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16mf2_i16m1_tu(vint16m1_t vd, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16m1_i16m1_tu(vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m1_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16m2_i16m1_tu(vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m2_i16m1_tu(vint16m1_t vd, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16m4_i16m1_tu(vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m4_i16m1_tu(vint16m1_t vd, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16m8_i16m1_tu(vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m8_i16m1_tu(vint16m1_t vd, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32mf2_i32m1_tu(vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32mf2_i32m1_tu(vint32m1_t vd, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredxor_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32m1_i32m1_tu(vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m1_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredxor_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32m2_i32m1_tu(vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m2_i32m1_tu(vint32m1_t vd, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredxor_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32m4_i32m1_tu(vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m4_i32m1_tu(vint32m1_t vd, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredxor_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32m8_i32m1_tu(vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m8_i32m1_tu(vint32m1_t vd, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredxor_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredxor_vs_i64m1_i64m1_tu(vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredxor_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m1_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredxor_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredxor_vs_i64m2_i64m1_tu(vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredxor_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m2_i64m1_tu(vint64m1_t vd, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredxor_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredxor_vs_i64m4_i64m1_tu(vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredxor_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m4_i64m1_tu(vint64m1_t vd, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredxor_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredxor_vs_i64m8_i64m1_tu(vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredxor_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m8_i64m1_tu(vint64m1_t vd, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredxor_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8mf8_u8m1_tu(vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf8_u8m1_tu(vuint8m1_t vd, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8mf4_u8m1_tu(vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf4_u8m1_tu(vuint8m1_t vd, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8mf2_u8m1_tu(vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf2_u8m1_tu(vuint8m1_t vd, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8m1_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m1_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8m2_u8m1_tu(vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m2_u8m1_tu(vuint8m1_t vd, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8m4_u8m1_tu(vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m4_u8m1_tu(vuint8m1_t vd, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8m8_u8m1_tu(vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_tu(maskedoff, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m8_u8m1_tu(vuint8m1_t vd, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16mf4_u16m1_tu(vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16mf4_u16m1_tu(vuint16m1_t vd, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16mf2_u16m1_tu(vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16mf2_u16m1_tu(vuint16m1_t vd, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16m1_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m1_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16m2_u16m1_tu(vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m2_u16m1_tu(vuint16m1_t vd, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16m4_u16m1_tu(vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m4_u16m1_tu(vuint16m1_t vd, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16m8_u16m1_tu(vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m8_u16m1_tu(vuint16m1_t vd, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tu(vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredxor_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32m1_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredxor_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32m2_u32m1_tu(vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m2_u32m1_tu(vuint32m1_t vd, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredxor_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32m4_u32m1_tu(vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m4_u32m1_tu(vuint32m1_t vd, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredxor_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32m8_u32m1_tu(vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m8_u32m1_tu(vuint32m1_t vd, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredxor_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredxor_vs_u64m1_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredxor_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m1_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredxor_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredxor_vs_u64m2_u64m1_tu(vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredxor_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m2_u64m1_tu(vuint64m1_t vd, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredxor_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredxor_vs_u64m4_u64m1_tu(vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredxor_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m4_u64m1_tu(vuint64m1_t vd, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredxor_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredxor_vs_u64m8_u64m1_tu(vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredxor_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m8_u64m1_tu(vuint64m1_t vd, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredxor_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8mf8_i8m1_tum(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf8_i8m1_tum(vbool64_t vm, vint8m1_t vd, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8mf4_i8m1_tum(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf4_i8m1_tum(vbool32_t vm, vint8m1_t vd, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8mf2_i8m1_tum(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8mf2_i8m1_tum(vbool16_t vm, vint8m1_t vd, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8m1_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m1_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8m2_i8m1_tum(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m2_i8m1_tum(vbool4_t vm, vint8m1_t vd, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8m4_i8m1_tum(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m4_i8m1_tum(vbool2_t vm, vint8m1_t vd, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8m8_i8m1_tum(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m8_i8m1_tum(vbool1_t vm, vint8m1_t vd, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vredxor_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16mf4_i16m1_tum(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16mf4_i16m1_tum(vbool64_t vm, vint16m1_t vd, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16mf2_i16m1_tum(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16mf2_i16m1_tum(vbool32_t vm, vint16m1_t vd, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16m1_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m1_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16m2_i16m1_tum(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m2_i16m1_tum(vbool8_t vm, vint16m1_t vd, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16m4_i16m1_tum(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m4_i16m1_tum(vbool4_t vm, vint16m1_t vd, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16m8_i16m1_tum(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m8_i16m1_tum(vbool2_t vm, vint16m1_t vd, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vredxor_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32mf2_i32m1_tum(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32mf2_i32m1_tum(vbool64_t vm, vint32m1_t vd, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredxor_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32m1_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m1_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredxor_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32m2_i32m1_tum(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m2_i32m1_tum(vbool16_t vm, vint32m1_t vd, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredxor_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32m4_i32m1_tum(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m4_i32m1_tum(vbool8_t vm, vint32m1_t vd, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredxor_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32m8_i32m1_tum(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m8_i32m1_tum(vbool4_t vm, vint32m1_t vd, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vredxor_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredxor_vs_i64m1_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m1_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredxor_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredxor_vs_i64m2_i64m1_tum(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m2_i64m1_tum(vbool32_t vm, vint64m1_t vd, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredxor_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredxor_vs_i64m4_i64m1_tum(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m4_i64m1_tum(vbool16_t vm, vint64m1_t vd, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredxor_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredxor_vs_i64m8_i64m1_tum(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m8_i64m1_tum(vbool8_t vm, vint64m1_t vd, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vredxor_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8mf8_u8m1_tum(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf8_u8m1_tum(vbool64_t vm, vuint8m1_t vd, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8mf4_u8m1_tum(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf4_u8m1_tum(vbool32_t vm, vuint8m1_t vd, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8mf2_u8m1_tum(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8mf2_u8m1_tum(vbool16_t vm, vuint8m1_t vd, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8m1_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m1_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8m2_u8m1_tum(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m2_u8m1_tum(vbool4_t vm, vuint8m1_t vd, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8m4_u8m1_tum(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m4_u8m1_tum(vbool2_t vm, vuint8m1_t vd, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8m8_u8m1_tum(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m8_u8m1_tum(vbool1_t vm, vuint8m1_t vd, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vredxor_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16mf4_u16m1_tum(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16mf4_u16m1_tum(vbool64_t vm, vuint16m1_t vd, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16mf2_u16m1_tum(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16mf2_u16m1_tum(vbool32_t vm, vuint16m1_t vd, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16m1_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m1_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16m2_u16m1_tum(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m2_u16m1_tum(vbool8_t vm, vuint16m1_t vd, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16m4_u16m1_tum(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m4_u16m1_tum(vbool4_t vm, vuint16m1_t vd, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16m8_u16m1_tum(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m8_u16m1_tum(vbool2_t vm, vuint16m1_t vd, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vredxor_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tum(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tum(vbool64_t vm, vuint32m1_t vd, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredxor_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32m1_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m1_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredxor_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32m2_u32m1_tum(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m2_u32m1_tum(vbool16_t vm, vuint32m1_t vd, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredxor_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32m4_u32m1_tum(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m4_u32m1_tum(vbool8_t vm, vuint32m1_t vd, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredxor_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32m8_u32m1_tum(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m8_u32m1_tum(vbool4_t vm, vuint32m1_t vd, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vredxor_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredxor_vs_u64m1_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m1_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredxor_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredxor_vs_u64m2_u64m1_tum(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m2_u64m1_tum(vbool32_t vm, vuint64m1_t vd, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredxor_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredxor_vs_u64m4_u64m1_tum(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m4_u64m1_tum(vbool16_t vm, vuint64m1_t vd, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredxor_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredxor_vs_u64m8_u64m1_tum(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredxor_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m8_u64m1_tum(vbool8_t vm, vuint64m1_t vd, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vredxor_tum(vm, vd, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vredxor\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vrem.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vrem.c index 2ec8bf8dd..06abca5c7 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vrem.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vrem.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vrem_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vrem_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vrem_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vrem_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vrem_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vrem_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vrem_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vrem_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vrem_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vrem_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vrem_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vrem_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vrem_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vrem_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vrem_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vrem_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vrem_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vrem_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vrem_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vrem_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vrem_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vrem_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vrem_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vrem_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vrem_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vrem_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vrem_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vrem_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vrem_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vrem_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vrem_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vrem_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vrem_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vrem_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vrem_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vrem_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vrem_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vrem_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vrem_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vrem_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vrem_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vrem_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vrem_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vrem_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vrem_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vrem_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vrem_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vrem_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vrem_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vrem_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vrem_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vrem_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vrem_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vrem_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vrem_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vrem_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vrem_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vrem_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vrem_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vrem_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vrem_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vrem_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vrem_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vrem_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vrem_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vrem_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vrem_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vrem_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vrem_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vrem_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vrem_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vrem_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vrem_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vrem_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vrem_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vrem_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vrem_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vrem_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vrem_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vrem_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vrem_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vrem_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vrem_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vrem_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vrem_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vrem_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vrem_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vrem_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vrem_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vrem_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vrem_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vrem_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vrem_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vrem_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vrem_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vrem_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vrem_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vrem_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vrem_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vrem_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vrem_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vrem_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vrem_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vrem_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vrem_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vrem_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vrem_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vrem_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vrem_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vrem_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vrem_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vrem_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vrem_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vrem_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vrem_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vrem_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vrem_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vrem_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vrem_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vrem_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vrem_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vrem_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vrem_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vrem_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vrem_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vrem_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vrem_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vrem_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vrem_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vrem_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vrem_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vrem_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vrem_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vrem_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vrem_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vrem_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vrem_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vrem_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vrem_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vrem_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vrem_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vrem_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vrem_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vrem_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vrem_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vrem_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vrem_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vrem_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vrem_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vrem_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vrem_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vrem_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vrem_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vrem_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vrem_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vrem_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vrem_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vrem_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vrem_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vrem_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vrem_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vrem_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vrem_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vrem_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vrem_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vrem_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vrem_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vrem_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vrem_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vrem_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vrem_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vrem_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vrem_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vrem_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vrem_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vrem_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vrem_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vrem_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vrem_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vrem_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vrem_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vrem_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vrem_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vrem_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vrem_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vrem_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vrem_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vrem_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vrem_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vrem_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vrem_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vrem_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vrem_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vrem_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vrem_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vrem_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vrem_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vrem_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vrem_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vrem_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vrem_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vrem_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vrem_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vrem_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vrem_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vrem_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vrem_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vrem_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vrem_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vrem_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vrem_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vrem_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vrem_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vrem_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vrem_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vrem_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vrem_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vrem_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vrem_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vrem_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vrem_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vrem_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vrem_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vrem_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vrem_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vrem_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vrem_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vrem_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vrem_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vrem_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vrem_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vrem_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vrem_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vrem_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vrem_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vrem_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vrem_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vrem_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vrem_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vrem_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vrem_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vrem_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vrem_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vrem_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vrem_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vrem_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vrem_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vrem_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vrem_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vrem_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vrem_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vrem_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vrem_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vrem_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vrem_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vrem_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vrem_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vrem_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vrem_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vrem_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vrem_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vrem_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vrem_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vrem_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vrem_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vrem_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vrem_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vrem_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vrem_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vrem_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vrem_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vrem_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vrem_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vrem_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vrem_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vrem_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vrem_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vrem_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vrem_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vrem_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vrem_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vrem_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vrem_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vrem_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vrem_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vrem_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vrem_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vrem_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vrem_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vrem_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vrem_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vrem_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vrem_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vrem_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vrem_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vrem_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vrem_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vrem_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vrem_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vrem_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vrem_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vrem_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vrem_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vrem_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vrem_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vrem_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vrem_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vrem_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vrem_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vrem_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vrem_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vrem_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vrem_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vrem_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vrem_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vrem_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vrem_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vrem_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vrem_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vrem_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vrem_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vrem_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vrem_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vrem_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vrem_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vrem_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vrem_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vrem_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vrem_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vrem_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vrem_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vrem_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vrem_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vrem_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vrem_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vrem_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vrem_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vrem_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vrem_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vrem_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vrem_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vrem_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vrem_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vrem_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vrem_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vrem_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vrem_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vrem_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vrem_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vrem_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vrem_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vrem_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vrem_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vrem_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vrem_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vrem_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vrem_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vrem_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vrem_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vrem_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vrem_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vrem_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vrem_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vrem_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vrem_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vrem_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vrem_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vrem_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vrem_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vrem_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vrem_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vrem_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vrem_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vrem_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vrem_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vrem_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vrem_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vrem_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vrem_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vrem_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vrem_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vrem_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vrem_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vrem_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vrem_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vrem_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vrem_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vrem_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vrem_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vrem_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vrem_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vrem_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vrem_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrem_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vrem_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vrem_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vrem_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vrem_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vrem_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vrem_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vrem_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vrem_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vrem_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vrem_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vrem_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vrem_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vrem_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vrem_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vrem_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vrem_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vrem_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vrem_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vrem_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vrem_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vrem_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vrem_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vrem_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vrem_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vrem_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vrem_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vrem_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vrem_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vrem_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vrem_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrem_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vrem_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vrem_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vrem_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vrem_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vrem_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vrem_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vrem_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vrem_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vrem_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vrem_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vrem_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vrem_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vrem_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vrem_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vrem_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vrem_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vrem_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vrem_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vrem_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vrem_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vrem_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vrem_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vrem_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vrem_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vrem_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrem_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vrem_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vrem_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vrem_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vrem_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vrem_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vrem_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vrem_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vrem_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vrem_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vrem_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vrem_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vrem_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vrem_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vrem_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vrem_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vrem_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vrem_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vrem_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vrem_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vrem_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vrem_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrem_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vrem\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vremu.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vremu.c index 15e3d1c8f..259f452c2 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vremu.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vremu.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vremu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vremu_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vremu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vremu_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vremu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vremu_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vremu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vremu_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vremu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vremu_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vremu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vremu_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vremu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vremu_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vremu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vremu_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vremu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vremu_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vremu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vremu_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vremu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vremu_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vremu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vremu_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vremu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vremu_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vremu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vremu_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vremu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vremu_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vremu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vremu_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vremu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vremu_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vremu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vremu_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vremu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vremu_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vremu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vremu_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vremu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vremu_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vremu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vremu_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vremu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vremu_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vremu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vremu_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vremu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vremu_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vremu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vremu_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vremu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vremu_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vremu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vremu_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vremu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vremu_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vremu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vremu_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vremu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vremu_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vremu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vremu_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vremu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vremu_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vremu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vremu_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vremu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vremu_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vremu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vremu_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vremu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vremu_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vremu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vremu_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vremu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vremu_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vremu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vremu_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vremu_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vremu_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vremu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vremu_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vremu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vremu_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vremu_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vremu_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vremu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vremu_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vremu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vremu_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vremu_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vremu_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vremu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vremu_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vremu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vremu_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vremu_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vremu_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vremu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vremu_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vremu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vremu_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vremu_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vremu_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vremu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vremu_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vremu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vremu_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vremu_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vremu_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vremu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vremu_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vremu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vremu_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vremu_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vremu_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vremu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vremu_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vremu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vremu_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vremu_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vremu_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vremu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vremu_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vremu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vremu_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vremu_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vremu_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vremu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vremu_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vremu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vremu_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vremu_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vremu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vremu_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vremu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vremu_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vremu_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vremu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vremu_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vremu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vremu_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vremu_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vremu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vremu_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vremu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vremu_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vremu_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vremu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vremu_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vremu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vremu_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vremu_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vremu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vremu_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vremu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vremu_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vremu_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vremu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vremu_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vremu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vremu_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vremu_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vremu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vremu_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vremu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vremu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vremu_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vremu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vremu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vremu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vremu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vremu_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vremu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vremu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vremu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vremu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vremu_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vremu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vremu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vremu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vremu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vremu_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vremu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vremu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vremu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vremu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vremu_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vremu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vremu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vremu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vremu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vremu_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vremu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vremu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vremu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vremu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vremu_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vremu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vremu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vremu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vremu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vremu_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vremu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vremu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vremu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vremu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vremu_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vremu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vremu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vremu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vremu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vremu_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vremu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vremu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vremu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vremu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vremu_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vremu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vremu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vremu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vremu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vremu_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vremu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vremu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vremu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vremu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vremu_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vremu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vremu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vremu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vremu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vremu_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vremu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vremu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vremu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vremu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vremu_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vremu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vremu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vremu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vremu_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vremu_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vremu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vremu_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vremu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vremu_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vremu_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vremu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vremu_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vremu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vremu_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vremu_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vremu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vremu_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vremu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vremu_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vremu_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vremu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vremu_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vremu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vremu_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vremu_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vremu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vremu_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vremu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vremu_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vremu_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vremu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vremu_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vremu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vremu_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vremu_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vremu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vremu_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vremu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vremu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vremu_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vremu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vremu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vremu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vremu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vremu_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vremu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vremu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vremu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vremu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vremu_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vremu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vremu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vremu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vremu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vremu_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vremu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vremu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vremu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vremu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vremu_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vremu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vremu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vremu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vremu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vremu_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vremu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vremu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vremu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vremu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vremu_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vremu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vremu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vremu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vremu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vremu_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vremu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vremu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vremu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vremu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vremu_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vremu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vremu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vremu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vremu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vremu_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vremu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vremu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vremu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vremu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vremu_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vremu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vremu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vremu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vremu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vremu_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vremu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vremu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vremu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vremu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vremu_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vremu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vremu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vremu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vremu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vremu_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vremu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vremu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vremu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vremu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vremu_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vremu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vremu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vremu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vremu_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vremu_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vremu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vremu_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vremu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vremu_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vremu_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vremu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vremu_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vremu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vremu_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vremu_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vremu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vremu_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vremu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vremu_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vremu_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vremu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vremu_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vremu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vremu_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vremu_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vremu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vremu_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vremu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vremu_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vremu_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vremu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vremu_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vremu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vremu_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vremu_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vremu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vremu_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vremu_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vremu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vremu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vremu_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vremu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vremu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vremu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vremu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vremu_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vremu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vremu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vremu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vremu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vremu_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vremu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vremu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vremu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vremu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vremu_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vremu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vremu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vremu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vremu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vremu_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vremu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vremu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vremu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vremu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vremu_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vremu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vremu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vremu_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vremu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vremu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vremu_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vremu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vremu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vremu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vremu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vremu_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vremu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vremu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vremu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vremu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vremu_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vremu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vremu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vremu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vremu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vremu_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vremu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vremu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vremu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vremu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vremu_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vremu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vremu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vremu_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vremu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vremu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vremu_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vremu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vremu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vremu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vremu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vremu_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vremu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vremu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vremu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vremu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vremu_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vremu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vremu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vremu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vremu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vremu_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vremu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vremu_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vremu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vremu_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vremu\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vrgather.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vrgather.c index 01fb3fd03..27750c2ae 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vrgather.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vrgather.c @@ -6,1892 +6,1892 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vrgather_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vfloat16mf4_t test_vrgather_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vfloat16mf4_t test_vrgather_vx_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vfloat16mf4_t test_vrgather_vx_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vfloat16mf2_t test_vrgather_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vfloat16mf2_t test_vrgather_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vfloat16mf2_t test_vrgather_vx_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vfloat16mf2_t test_vrgather_vx_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vrgather_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vfloat16m1_t test_vrgather_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vrgather_vx_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vfloat16m1_t test_vrgather_vx_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vfloat16m2_t test_vrgather_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vfloat16m2_t test_vrgather_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vfloat16m2_t test_vrgather_vx_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vfloat16m2_t test_vrgather_vx_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vfloat16m4_t test_vrgather_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vfloat16m4_t test_vrgather_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vfloat16m4_t test_vrgather_vx_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vfloat16m4_t test_vrgather_vx_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vfloat16m8_t test_vrgather_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vfloat16m8_t test_vrgather_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vfloat16m8_t test_vrgather_vx_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vfloat16m8_t test_vrgather_vx_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vrgather_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vfloat32mf2_t test_vrgather_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vrgather_vx_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vfloat32mf2_t test_vrgather_vx_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vrgather_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vuint32m1_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vfloat32m1_t test_vrgather_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vrgather_vx_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vfloat32m1_t test_vrgather_vx_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vrgather_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vuint32m2_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vfloat32m2_t test_vrgather_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vrgather_vx_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vfloat32m2_t test_vrgather_vx_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vrgather_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vuint32m4_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vfloat32m4_t test_vrgather_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vrgather_vx_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vfloat32m4_t test_vrgather_vx_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vrgather_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vuint32m8_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vfloat32m8_t test_vrgather_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vrgather_vx_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vfloat32m8_t test_vrgather_vx_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vrgather_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vuint64m1_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vfloat64m1_t test_vrgather_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vrgather_vx_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vfloat64m1_t test_vrgather_vx_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vrgather_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vuint64m2_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vfloat64m2_t test_vrgather_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vrgather_vx_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vfloat64m2_t test_vrgather_vx_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vrgather_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vuint64m4_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vfloat64m4_t test_vrgather_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vrgather_vx_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vfloat64m4_t test_vrgather_vx_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vrgather_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vuint64m8_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vfloat64m8_t test_vrgather_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vrgather_vx_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vfloat64m8_t test_vrgather_vx_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vrgather_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vint8mf8_t test_vrgather_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vrgather_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vint8mf8_t test_vrgather_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vrgather_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vint8mf4_t test_vrgather_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vrgather_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vint8mf4_t test_vrgather_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vrgather_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vint8mf2_t test_vrgather_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vrgather_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vint8mf2_t test_vrgather_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vrgather_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vint8m1_t test_vrgather_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vrgather_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vint8m1_t test_vrgather_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vrgather_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vint8m2_t test_vrgather_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vrgather_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vint8m2_t test_vrgather_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vrgather_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vint8m4_t test_vrgather_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vrgather_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vint8m4_t test_vrgather_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vrgather_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vint8m8_t test_vrgather_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vrgather_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vint8m8_t test_vrgather_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vrgather_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vint16mf4_t test_vrgather_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vrgather_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vint16mf4_t test_vrgather_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vrgather_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vint16mf2_t test_vrgather_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vrgather_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vint16mf2_t test_vrgather_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vrgather_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vint16m1_t test_vrgather_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vrgather_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vint16m1_t test_vrgather_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vrgather_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vint16m2_t test_vrgather_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vrgather_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vint16m2_t test_vrgather_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vrgather_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vint16m4_t test_vrgather_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vrgather_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vint16m4_t test_vrgather_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vrgather_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vint16m8_t test_vrgather_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vrgather_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vint16m8_t test_vrgather_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vrgather_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vint32mf2_t test_vrgather_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vrgather_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vint32mf2_t test_vrgather_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vrgather_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vint32m1_t test_vrgather_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vrgather_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vint32m1_t test_vrgather_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vrgather_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vint32m2_t test_vrgather_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vrgather_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vint32m2_t test_vrgather_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vrgather_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vint32m4_t test_vrgather_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vrgather_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vint32m4_t test_vrgather_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vrgather_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vint32m8_t test_vrgather_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vrgather_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vint32m8_t test_vrgather_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vrgather_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vint64m1_t test_vrgather_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vrgather_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vint64m1_t test_vrgather_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vrgather_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vint64m2_t test_vrgather_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vrgather_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vint64m2_t test_vrgather_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vrgather_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vint64m4_t test_vrgather_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vrgather_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vint64m4_t test_vrgather_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vrgather_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vint64m8_t test_vrgather_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vrgather_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vint64m8_t test_vrgather_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vrgather_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vuint8mf8_t test_vrgather_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vrgather_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vuint8mf8_t test_vrgather_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vrgather_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vuint8mf4_t test_vrgather_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vrgather_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vuint8mf4_t test_vrgather_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vrgather_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vuint8mf2_t test_vrgather_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vrgather_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vuint8mf2_t test_vrgather_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vrgather_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vuint8m1_t test_vrgather_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vrgather_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vuint8m1_t test_vrgather_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vrgather_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vuint8m2_t test_vrgather_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vrgather_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vuint8m2_t test_vrgather_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vrgather_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vuint8m4_t test_vrgather_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vrgather_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vuint8m4_t test_vrgather_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vrgather_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vuint8m8_t test_vrgather_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vrgather_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vuint8m8_t test_vrgather_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vrgather_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vuint16mf4_t test_vrgather_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vrgather_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vuint16mf4_t test_vrgather_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vrgather_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vuint16mf2_t test_vrgather_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vrgather_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vuint16mf2_t test_vrgather_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vrgather_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vuint16m1_t test_vrgather_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vrgather_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vuint16m1_t test_vrgather_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vrgather_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vuint16m2_t test_vrgather_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vrgather_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vuint16m2_t test_vrgather_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vrgather_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vuint16m4_t test_vrgather_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vrgather_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vuint16m4_t test_vrgather_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vrgather_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vuint16m8_t test_vrgather_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vrgather_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vuint16m8_t test_vrgather_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vrgather_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vuint32mf2_t test_vrgather_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vrgather_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vuint32mf2_t test_vrgather_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vrgather_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vuint32m1_t test_vrgather_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vrgather_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vuint32m1_t test_vrgather_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vrgather_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vuint32m2_t test_vrgather_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vrgather_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vuint32m2_t test_vrgather_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vrgather_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vuint32m4_t test_vrgather_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vrgather_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vuint32m4_t test_vrgather_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vrgather_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vuint32m8_t test_vrgather_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vrgather_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vuint32m8_t test_vrgather_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vrgather_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vuint64m1_t test_vrgather_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vrgather_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vuint64m1_t test_vrgather_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vrgather_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vuint64m2_t test_vrgather_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vrgather_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vuint64m2_t test_vrgather_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vrgather_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vuint64m4_t test_vrgather_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vrgather_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vuint64m4_t test_vrgather_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vrgather_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vuint64m8_t test_vrgather_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vrgather_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tu(maskedoff, op1, index, vl); +vuint64m8_t test_vrgather_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tu(vd, vs2, vs1, vl); } -vfloat16mf4_t test_vrgather_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vfloat16mf4_t test_vrgather_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vrgather_vx_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vfloat16mf4_t test_vrgather_vx_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vrgather_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vfloat16mf2_t test_vrgather_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vrgather_vx_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vfloat16mf2_t test_vrgather_vx_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vrgather_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vfloat16m1_t test_vrgather_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vrgather_vx_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vfloat16m1_t test_vrgather_vx_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vrgather_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vfloat16m2_t test_vrgather_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vrgather_vx_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vfloat16m2_t test_vrgather_vx_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vrgather_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vfloat16m4_t test_vrgather_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vrgather_vx_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vfloat16m4_t test_vrgather_vx_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vrgather_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vfloat16m8_t test_vrgather_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vrgather_vx_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vfloat16m8_t test_vrgather_vx_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vrgather_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vfloat32mf2_t test_vrgather_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vrgather_vx_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vfloat32mf2_t test_vrgather_vx_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vrgather_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vuint32m1_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vfloat32m1_t test_vrgather_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vrgather_vx_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vfloat32m1_t test_vrgather_vx_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vrgather_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vuint32m2_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vfloat32m2_t test_vrgather_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vrgather_vx_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vfloat32m2_t test_vrgather_vx_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vrgather_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vuint32m4_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vfloat32m4_t test_vrgather_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vrgather_vx_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vfloat32m4_t test_vrgather_vx_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vrgather_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vuint32m8_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vfloat32m8_t test_vrgather_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vrgather_vx_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vfloat32m8_t test_vrgather_vx_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vrgather_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vuint64m1_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vfloat64m1_t test_vrgather_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vrgather_vx_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vfloat64m1_t test_vrgather_vx_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vrgather_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vuint64m2_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vfloat64m2_t test_vrgather_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vrgather_vx_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vfloat64m2_t test_vrgather_vx_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vrgather_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vuint64m4_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vfloat64m4_t test_vrgather_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vrgather_vx_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vfloat64m4_t test_vrgather_vx_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vrgather_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vuint64m8_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vfloat64m8_t test_vrgather_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vrgather_vx_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vfloat64m8_t test_vrgather_vx_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vrgather_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vint8mf8_t test_vrgather_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vrgather_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vint8mf8_t test_vrgather_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vrgather_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vint8mf4_t test_vrgather_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vrgather_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vint8mf4_t test_vrgather_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vrgather_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vint8mf2_t test_vrgather_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vrgather_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vint8mf2_t test_vrgather_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vrgather_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vint8m1_t test_vrgather_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vrgather_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vint8m1_t test_vrgather_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vrgather_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vint8m2_t test_vrgather_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vrgather_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vint8m2_t test_vrgather_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vrgather_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vint8m4_t test_vrgather_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vrgather_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vint8m4_t test_vrgather_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vrgather_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vint8m8_t test_vrgather_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vrgather_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vint8m8_t test_vrgather_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vrgather_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vint16mf4_t test_vrgather_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vrgather_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vint16mf4_t test_vrgather_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vrgather_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vint16mf2_t test_vrgather_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vrgather_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vint16mf2_t test_vrgather_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vrgather_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vint16m1_t test_vrgather_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vrgather_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vint16m1_t test_vrgather_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vrgather_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vint16m2_t test_vrgather_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vrgather_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vint16m2_t test_vrgather_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vrgather_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vint16m4_t test_vrgather_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vrgather_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vint16m4_t test_vrgather_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vrgather_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vint16m8_t test_vrgather_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vrgather_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vint16m8_t test_vrgather_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vrgather_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vint32mf2_t test_vrgather_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vrgather_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vint32mf2_t test_vrgather_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vrgather_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vint32m1_t test_vrgather_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vrgather_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vint32m1_t test_vrgather_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vrgather_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vint32m2_t test_vrgather_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vrgather_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vint32m2_t test_vrgather_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vrgather_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vint32m4_t test_vrgather_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vrgather_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vint32m4_t test_vrgather_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vrgather_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vint32m8_t test_vrgather_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vrgather_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vint32m8_t test_vrgather_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vrgather_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vint64m1_t test_vrgather_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vrgather_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vint64m1_t test_vrgather_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vrgather_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vint64m2_t test_vrgather_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vrgather_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vint64m2_t test_vrgather_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vrgather_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vint64m4_t test_vrgather_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vrgather_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vint64m4_t test_vrgather_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vrgather_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vint64m8_t test_vrgather_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vrgather_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vint64m8_t test_vrgather_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vrgather_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vuint8mf8_t test_vrgather_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vrgather_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vuint8mf8_t test_vrgather_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vrgather_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vuint8mf4_t test_vrgather_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vrgather_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vuint8mf4_t test_vrgather_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vrgather_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vuint8mf2_t test_vrgather_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vrgather_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vuint8mf2_t test_vrgather_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vrgather_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vuint8m1_t test_vrgather_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vrgather_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vuint8m1_t test_vrgather_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vrgather_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vuint8m2_t test_vrgather_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vrgather_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vuint8m2_t test_vrgather_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vrgather_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vuint8m4_t test_vrgather_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vrgather_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vuint8m4_t test_vrgather_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vrgather_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vuint8m8_t test_vrgather_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vrgather_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vuint8m8_t test_vrgather_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vrgather_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vuint16mf4_t test_vrgather_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vrgather_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vuint16mf4_t test_vrgather_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vrgather_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vuint16mf2_t test_vrgather_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vrgather_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vuint16mf2_t test_vrgather_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vrgather_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vuint16m1_t test_vrgather_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vrgather_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vuint16m1_t test_vrgather_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vrgather_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vuint16m2_t test_vrgather_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vrgather_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vuint16m2_t test_vrgather_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vrgather_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vuint16m4_t test_vrgather_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vrgather_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vuint16m4_t test_vrgather_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vrgather_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vuint16m8_t test_vrgather_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vrgather_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vuint16m8_t test_vrgather_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vrgather_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vuint32mf2_t test_vrgather_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vrgather_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vuint32mf2_t test_vrgather_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vrgather_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vuint32m1_t test_vrgather_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vrgather_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vuint32m1_t test_vrgather_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vrgather_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vuint32m2_t test_vrgather_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vrgather_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vuint32m2_t test_vrgather_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vrgather_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vuint32m4_t test_vrgather_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vrgather_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vuint32m4_t test_vrgather_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vrgather_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vuint32m8_t test_vrgather_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vrgather_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vuint32m8_t test_vrgather_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vrgather_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vuint64m1_t test_vrgather_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vrgather_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vuint64m1_t test_vrgather_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vrgather_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vuint64m2_t test_vrgather_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vrgather_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vuint64m2_t test_vrgather_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vrgather_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vuint64m4_t test_vrgather_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vrgather_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vuint64m4_t test_vrgather_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vrgather_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vuint64m8_t test_vrgather_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vrgather_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tum(mask, maskedoff, op1, index, vl); +vuint64m8_t test_vrgather_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vrgather_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vfloat16mf4_t test_vrgather_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vrgather_vx_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vfloat16mf4_t test_vrgather_vx_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vrgather_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vfloat16mf2_t test_vrgather_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vrgather_vx_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vfloat16mf2_t test_vrgather_vx_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vrgather_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vfloat16m1_t test_vrgather_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vrgather_vx_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vfloat16m1_t test_vrgather_vx_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vrgather_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vfloat16m2_t test_vrgather_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vrgather_vx_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vfloat16m2_t test_vrgather_vx_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vrgather_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vfloat16m4_t test_vrgather_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vrgather_vx_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vfloat16m4_t test_vrgather_vx_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vrgather_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vfloat16m8_t test_vrgather_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vrgather_vx_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vfloat16m8_t test_vrgather_vx_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vrgather_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vfloat32mf2_t test_vrgather_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vrgather_vx_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vfloat32mf2_t test_vrgather_vx_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vrgather_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vuint32m1_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vfloat32m1_t test_vrgather_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vrgather_vx_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vfloat32m1_t test_vrgather_vx_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vrgather_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vuint32m2_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vfloat32m2_t test_vrgather_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vrgather_vx_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vfloat32m2_t test_vrgather_vx_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vrgather_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vuint32m4_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vfloat32m4_t test_vrgather_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vrgather_vx_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vfloat32m4_t test_vrgather_vx_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vrgather_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vuint32m8_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vfloat32m8_t test_vrgather_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vrgather_vx_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vfloat32m8_t test_vrgather_vx_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vrgather_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vuint64m1_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vfloat64m1_t test_vrgather_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vrgather_vx_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vfloat64m1_t test_vrgather_vx_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vrgather_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vuint64m2_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vfloat64m2_t test_vrgather_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vrgather_vx_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vfloat64m2_t test_vrgather_vx_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vrgather_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vuint64m4_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vfloat64m4_t test_vrgather_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vrgather_vx_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vfloat64m4_t test_vrgather_vx_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vrgather_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vuint64m8_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vfloat64m8_t test_vrgather_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vrgather_vx_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vfloat64m8_t test_vrgather_vx_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vrgather_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vint8mf8_t test_vrgather_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vrgather_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vint8mf8_t test_vrgather_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vrgather_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vint8mf4_t test_vrgather_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vrgather_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vint8mf4_t test_vrgather_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vrgather_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vint8mf2_t test_vrgather_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vrgather_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vint8mf2_t test_vrgather_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vrgather_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vint8m1_t test_vrgather_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vrgather_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vint8m1_t test_vrgather_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vrgather_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vint8m2_t test_vrgather_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vrgather_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vint8m2_t test_vrgather_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vrgather_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vint8m4_t test_vrgather_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vrgather_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vint8m4_t test_vrgather_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vrgather_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vint8m8_t test_vrgather_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vrgather_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vint8m8_t test_vrgather_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vrgather_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vint16mf4_t test_vrgather_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vrgather_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vint16mf4_t test_vrgather_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vrgather_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vint16mf2_t test_vrgather_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vrgather_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vint16mf2_t test_vrgather_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vrgather_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vint16m1_t test_vrgather_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vrgather_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vint16m1_t test_vrgather_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vrgather_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vint16m2_t test_vrgather_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vrgather_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vint16m2_t test_vrgather_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vrgather_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vint16m4_t test_vrgather_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vrgather_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vint16m4_t test_vrgather_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vrgather_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vint16m8_t test_vrgather_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vrgather_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vint16m8_t test_vrgather_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vrgather_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vint32mf2_t test_vrgather_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vrgather_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vint32mf2_t test_vrgather_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vrgather_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vint32m1_t test_vrgather_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vrgather_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vint32m1_t test_vrgather_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vrgather_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vint32m2_t test_vrgather_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vrgather_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vint32m2_t test_vrgather_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vrgather_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vint32m4_t test_vrgather_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vrgather_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vint32m4_t test_vrgather_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vrgather_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vint32m8_t test_vrgather_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vrgather_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vint32m8_t test_vrgather_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vrgather_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vint64m1_t test_vrgather_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vrgather_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vint64m1_t test_vrgather_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vrgather_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vint64m2_t test_vrgather_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vrgather_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vint64m2_t test_vrgather_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vrgather_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vint64m4_t test_vrgather_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vrgather_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vint64m4_t test_vrgather_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vrgather_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vint64m8_t test_vrgather_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vrgather_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vint64m8_t test_vrgather_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vrgather_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vuint8mf8_t test_vrgather_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vrgather_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vuint8mf8_t test_vrgather_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vrgather_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vuint8mf4_t test_vrgather_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vrgather_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vuint8mf4_t test_vrgather_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vrgather_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vuint8mf2_t test_vrgather_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vrgather_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vuint8mf2_t test_vrgather_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vrgather_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vuint8m1_t test_vrgather_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vrgather_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vuint8m1_t test_vrgather_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vrgather_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vuint8m2_t test_vrgather_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vrgather_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vuint8m2_t test_vrgather_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vrgather_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vuint8m4_t test_vrgather_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vrgather_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vuint8m4_t test_vrgather_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vrgather_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vuint8m8_t test_vrgather_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vrgather_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vuint8m8_t test_vrgather_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vrgather_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vuint16mf4_t test_vrgather_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vrgather_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vuint16mf4_t test_vrgather_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vrgather_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vuint16mf2_t test_vrgather_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vrgather_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vuint16mf2_t test_vrgather_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vrgather_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vuint16m1_t test_vrgather_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vrgather_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vuint16m1_t test_vrgather_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vrgather_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vuint16m2_t test_vrgather_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vrgather_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vuint16m2_t test_vrgather_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vrgather_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vuint16m4_t test_vrgather_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vrgather_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vuint16m4_t test_vrgather_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vrgather_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vuint16m8_t test_vrgather_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vrgather_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vuint16m8_t test_vrgather_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vrgather_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vuint32mf2_t test_vrgather_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vrgather_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vuint32mf2_t test_vrgather_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vrgather_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vuint32m1_t test_vrgather_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vrgather_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vuint32m1_t test_vrgather_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vrgather_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vuint32m2_t test_vrgather_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vrgather_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vuint32m2_t test_vrgather_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vrgather_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vuint32m4_t test_vrgather_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vrgather_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vuint32m4_t test_vrgather_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vrgather_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vuint32m8_t test_vrgather_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vrgather_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vuint32m8_t test_vrgather_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vrgather_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vuint64m1_t test_vrgather_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vrgather_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vuint64m1_t test_vrgather_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vrgather_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vuint64m2_t test_vrgather_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vrgather_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vuint64m2_t test_vrgather_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vrgather_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vuint64m4_t test_vrgather_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vrgather_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vuint64m4_t test_vrgather_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vrgather_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vuint64m8_t test_vrgather_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vrgather_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_tumu(mask, maskedoff, op1, index, vl); +vuint64m8_t test_vrgather_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vrgather_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vfloat16mf4_t test_vrgather_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vrgather_vx_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vfloat16mf4_t test_vrgather_vx_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vrgather_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vfloat16mf2_t test_vrgather_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vrgather_vx_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vfloat16mf2_t test_vrgather_vx_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vrgather_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vfloat16m1_t test_vrgather_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vrgather_vx_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vfloat16m1_t test_vrgather_vx_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vrgather_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vfloat16m2_t test_vrgather_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vrgather_vx_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vfloat16m2_t test_vrgather_vx_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vrgather_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vfloat16m4_t test_vrgather_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vrgather_vx_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vfloat16m4_t test_vrgather_vx_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vrgather_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vfloat16m8_t test_vrgather_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vrgather_vx_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vfloat16m8_t test_vrgather_vx_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vrgather_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vfloat32mf2_t test_vrgather_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vrgather_vx_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vfloat32mf2_t test_vrgather_vx_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vrgather_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vuint32m1_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vfloat32m1_t test_vrgather_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vrgather_vx_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vfloat32m1_t test_vrgather_vx_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vrgather_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vuint32m2_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vfloat32m2_t test_vrgather_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vrgather_vx_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vfloat32m2_t test_vrgather_vx_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vrgather_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vuint32m4_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vfloat32m4_t test_vrgather_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vrgather_vx_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vfloat32m4_t test_vrgather_vx_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vrgather_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vuint32m8_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vfloat32m8_t test_vrgather_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vrgather_vx_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vfloat32m8_t test_vrgather_vx_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vrgather_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vuint64m1_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vfloat64m1_t test_vrgather_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vrgather_vx_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vfloat64m1_t test_vrgather_vx_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vrgather_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vuint64m2_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vfloat64m2_t test_vrgather_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vrgather_vx_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vfloat64m2_t test_vrgather_vx_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vrgather_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vuint64m4_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vfloat64m4_t test_vrgather_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vrgather_vx_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vfloat64m4_t test_vrgather_vx_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vrgather_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vuint64m8_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vfloat64m8_t test_vrgather_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vrgather_vx_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vfloat64m8_t test_vrgather_vx_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vrgather_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vint8mf8_t test_vrgather_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vrgather_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vint8mf8_t test_vrgather_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vrgather_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vint8mf4_t test_vrgather_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vrgather_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vint8mf4_t test_vrgather_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vrgather_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vint8mf2_t test_vrgather_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vrgather_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vint8mf2_t test_vrgather_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vrgather_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vint8m1_t test_vrgather_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vrgather_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vint8m1_t test_vrgather_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vrgather_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vint8m2_t test_vrgather_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vrgather_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vint8m2_t test_vrgather_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vrgather_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vint8m4_t test_vrgather_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vrgather_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vint8m4_t test_vrgather_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vrgather_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vint8m8_t test_vrgather_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vrgather_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vint8m8_t test_vrgather_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vrgather_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vint16mf4_t test_vrgather_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vrgather_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vint16mf4_t test_vrgather_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vrgather_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vint16mf2_t test_vrgather_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vrgather_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vint16mf2_t test_vrgather_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vrgather_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vint16m1_t test_vrgather_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vrgather_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vint16m1_t test_vrgather_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vrgather_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vint16m2_t test_vrgather_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vrgather_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vint16m2_t test_vrgather_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vrgather_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vint16m4_t test_vrgather_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vrgather_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vint16m4_t test_vrgather_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vrgather_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vint16m8_t test_vrgather_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vrgather_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vint16m8_t test_vrgather_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vrgather_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vint32mf2_t test_vrgather_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vrgather_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vint32mf2_t test_vrgather_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vrgather_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vint32m1_t test_vrgather_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vrgather_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vint32m1_t test_vrgather_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vrgather_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vint32m2_t test_vrgather_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vrgather_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vint32m2_t test_vrgather_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vrgather_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vint32m4_t test_vrgather_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vrgather_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vint32m4_t test_vrgather_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vrgather_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vint32m8_t test_vrgather_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vrgather_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vint32m8_t test_vrgather_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vrgather_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vint64m1_t test_vrgather_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vrgather_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vint64m1_t test_vrgather_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vrgather_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vint64m2_t test_vrgather_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vrgather_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vint64m2_t test_vrgather_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vrgather_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vint64m4_t test_vrgather_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vrgather_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vint64m4_t test_vrgather_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vrgather_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vint64m8_t test_vrgather_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vrgather_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vint64m8_t test_vrgather_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vrgather_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vuint8mf8_t test_vrgather_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vrgather_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vuint8mf8_t test_vrgather_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vrgather_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vuint8mf4_t test_vrgather_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vrgather_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vuint8mf4_t test_vrgather_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vrgather_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vuint8mf2_t test_vrgather_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vrgather_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vuint8mf2_t test_vrgather_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vrgather_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vuint8m1_t test_vrgather_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vrgather_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vuint8m1_t test_vrgather_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vrgather_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vuint8m2_t test_vrgather_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vrgather_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vuint8m2_t test_vrgather_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vrgather_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vuint8m4_t test_vrgather_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vrgather_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vuint8m4_t test_vrgather_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vrgather_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vuint8m8_t test_vrgather_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vrgather_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vuint8m8_t test_vrgather_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vrgather_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vuint16mf4_t test_vrgather_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vrgather_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vuint16mf4_t test_vrgather_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vrgather_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vuint16mf2_t test_vrgather_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vrgather_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vuint16mf2_t test_vrgather_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vrgather_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vuint16m1_t test_vrgather_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vrgather_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vuint16m1_t test_vrgather_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vrgather_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vuint16m2_t test_vrgather_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vrgather_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vuint16m2_t test_vrgather_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vrgather_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vuint16m4_t test_vrgather_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vrgather_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vuint16m4_t test_vrgather_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vrgather_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vuint16m8_t test_vrgather_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vrgather_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vuint16m8_t test_vrgather_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vrgather_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vuint32mf2_t test_vrgather_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vrgather_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vuint32mf2_t test_vrgather_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vrgather_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vuint32m1_t test_vrgather_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vrgather_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vuint32m1_t test_vrgather_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vrgather_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vuint32m2_t test_vrgather_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vrgather_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vuint32m2_t test_vrgather_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vrgather_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vuint32m4_t test_vrgather_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vrgather_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vuint32m4_t test_vrgather_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vrgather_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vuint32m8_t test_vrgather_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vrgather_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vuint32m8_t test_vrgather_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vrgather_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vuint64m1_t test_vrgather_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vrgather_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vuint64m1_t test_vrgather_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vrgather_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vuint64m2_t test_vrgather_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vrgather_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vuint64m2_t test_vrgather_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vrgather_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vuint64m4_t test_vrgather_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vrgather_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vuint64m4_t test_vrgather_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vrgather_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vuint64m8_t test_vrgather_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vrgather_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl); +vuint64m8_t test_vrgather_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vs1, size_t vl) { + return __riscv_vrgather_mu(vm, vd, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vrgather\.[ivxfswum.]+\s+} 472 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vrgatherei16.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vrgatherei16.c index 4bf3391ce..a3eba3a76 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vrgatherei16.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vrgatherei16.c @@ -6,916 +6,916 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vrgatherei16_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vfloat16mf4_t test_vrgatherei16_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vfloat16mf2_t test_vrgatherei16_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vfloat16mf2_t test_vrgatherei16_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vrgatherei16_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vfloat16m1_t test_vrgatherei16_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vfloat16m2_t test_vrgatherei16_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vfloat16m2_t test_vrgatherei16_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vfloat16m4_t test_vrgatherei16_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vfloat16m4_t test_vrgatherei16_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vfloat16m8_t test_vrgatherei16_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vfloat16m8_t test_vrgatherei16_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vrgatherei16_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vfloat32mf2_t test_vrgatherei16_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vrgatherei16_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vfloat32m1_t test_vrgatherei16_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vrgatherei16_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vfloat32m2_t test_vrgatherei16_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vrgatherei16_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vfloat32m4_t test_vrgatherei16_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vrgatherei16_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vfloat32m8_t test_vrgatherei16_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vrgatherei16_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vfloat64m1_t test_vrgatherei16_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vrgatherei16_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vfloat64m2_t test_vrgatherei16_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vrgatherei16_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vfloat64m4_t test_vrgatherei16_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vrgatherei16_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vfloat64m8_t test_vrgatherei16_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vrgatherei16_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vrgatherei16_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vrgatherei16_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vrgatherei16_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vrgatherei16_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vrgatherei16_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vrgatherei16_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vrgatherei16_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vrgatherei16_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vrgatherei16_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vrgatherei16_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vrgatherei16_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vrgatherei16_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vrgatherei16_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vrgatherei16_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vrgatherei16_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vrgatherei16_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vrgatherei16_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vrgatherei16_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vrgatherei16_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vrgatherei16_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vrgatherei16_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vrgatherei16_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vrgatherei16_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vrgatherei16_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vrgatherei16_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vrgatherei16_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vrgatherei16_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vrgatherei16_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vrgatherei16_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vrgatherei16_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vrgatherei16_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vrgatherei16_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vrgatherei16_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vrgatherei16_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vrgatherei16_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vrgatherei16_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vrgatherei16_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vrgatherei16_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vrgatherei16_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vrgatherei16_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vrgatherei16_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vrgatherei16_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vrgatherei16_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vrgatherei16_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vrgatherei16_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vrgatherei16_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vrgatherei16_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vrgatherei16_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vrgatherei16_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vrgatherei16_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vrgatherei16_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vrgatherei16_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vrgatherei16_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vrgatherei16_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vrgatherei16_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vrgatherei16_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vrgatherei16_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vrgatherei16_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vrgatherei16_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vrgatherei16_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vrgatherei16_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vrgatherei16_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vrgatherei16_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vrgatherei16_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vrgatherei16_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vrgatherei16_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vrgatherei16_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vrgatherei16_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vrgatherei16_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vrgatherei16_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vrgatherei16_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vrgatherei16_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vrgatherei16_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vrgatherei16_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vrgatherei16_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vrgatherei16_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vrgatherei16_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vrgatherei16_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vrgatherei16_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vrgatherei16_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vrgatherei16_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vrgatherei16_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vrgatherei16_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tu(vd, vs2, vs1, vl); } -vfloat16mf4_t test_vrgatherei16_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vrgatherei16_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vrgatherei16_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vrgatherei16_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vrgatherei16_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vrgatherei16_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vrgatherei16_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vrgatherei16_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vrgatherei16_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vrgatherei16_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vrgatherei16_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vrgatherei16_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vrgatherei16_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vrgatherei16_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vrgatherei16_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vrgatherei16_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vrgatherei16_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vrgatherei16_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vrgatherei16_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vrgatherei16_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vrgatherei16_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vrgatherei16_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vrgatherei16_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vrgatherei16_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vrgatherei16_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vrgatherei16_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vrgatherei16_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vrgatherei16_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vrgatherei16_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vrgatherei16_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vrgatherei16_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vrgatherei16_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vrgatherei16_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vrgatherei16_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vrgatherei16_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vrgatherei16_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vrgatherei16_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vrgatherei16_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vrgatherei16_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vrgatherei16_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vrgatherei16_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vrgatherei16_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vrgatherei16_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vrgatherei16_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vrgatherei16_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vrgatherei16_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vrgatherei16_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vrgatherei16_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vrgatherei16_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vrgatherei16_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vrgatherei16_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vrgatherei16_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vrgatherei16_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vrgatherei16_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vrgatherei16_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vrgatherei16_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vrgatherei16_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vrgatherei16_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vrgatherei16_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vrgatherei16_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vrgatherei16_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vrgatherei16_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vrgatherei16_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vrgatherei16_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vrgatherei16_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vrgatherei16_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vrgatherei16_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vrgatherei16_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vrgatherei16_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vrgatherei16_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vrgatherei16_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vrgatherei16_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vrgatherei16_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vrgatherei16_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vrgatherei16_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vrgatherei16_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vrgatherei16_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vrgatherei16_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vrgatherei16_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vrgatherei16_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vrgatherei16_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vrgatherei16_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vrgatherei16_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vrgatherei16_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vrgatherei16_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vrgatherei16_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vrgatherei16_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vrgatherei16_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vrgatherei16_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vrgatherei16_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vrgatherei16_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vrgatherei16_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vrgatherei16_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vrgatherei16_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vrgatherei16_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vrgatherei16_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vrgatherei16_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vrgatherei16_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vrgatherei16_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vrgatherei16_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vrgatherei16_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vrgatherei16_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vrgatherei16_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vrgatherei16_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vrgatherei16_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vrgatherei16_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vrgatherei16_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vrgatherei16_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vrgatherei16_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vrgatherei16_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vrgatherei16_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vrgatherei16_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vrgatherei16_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vrgatherei16_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vrgatherei16_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vrgatherei16_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vrgatherei16_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vrgatherei16_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vrgatherei16_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vrgatherei16_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vrgatherei16_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vrgatherei16_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vrgatherei16_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vrgatherei16_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vrgatherei16_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vrgatherei16_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vrgatherei16_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vrgatherei16_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vrgatherei16_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vrgatherei16_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vrgatherei16_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vrgatherei16_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vrgatherei16_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vrgatherei16_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vrgatherei16_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vrgatherei16_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vrgatherei16_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vrgatherei16_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vrgatherei16_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vrgatherei16_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vrgatherei16_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vrgatherei16_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vrgatherei16_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vrgatherei16_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vrgatherei16_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vrgatherei16_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vrgatherei16_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vrgatherei16_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vrgatherei16_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vrgatherei16_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vrgatherei16_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vrgatherei16_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vrgatherei16_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vrgatherei16_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vrgatherei16_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vrgatherei16_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vrgatherei16_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vrgatherei16_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vrgatherei16_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vrgatherei16_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vrgatherei16_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vrgatherei16_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vrgatherei16_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vrgatherei16_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vrgatherei16_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vrgatherei16_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vrgatherei16_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vrgatherei16_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vrgatherei16_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vrgatherei16_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vrgatherei16_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vrgatherei16_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vrgatherei16_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vrgatherei16_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vrgatherei16_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vrgatherei16_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vrgatherei16_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vrgatherei16_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vrgatherei16_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vrgatherei16_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vrgatherei16_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vrgatherei16_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vrgatherei16_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vrgatherei16_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vrgatherei16_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vrgatherei16_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vrgatherei16_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vrgatherei16_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vrgatherei16_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vrgatherei16_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vrgatherei16_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vrgatherei16_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vrgatherei16_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vrgatherei16_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vrgatherei16_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vrgatherei16_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vrgatherei16_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vrgatherei16_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vrgatherei16_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vrgatherei16_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vrgatherei16_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vrgatherei16_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vrgatherei16_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vrgatherei16_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vrgatherei16_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vrgatherei16_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vrgatherei16_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vrgatherei16_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vrgatherei16_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vrgatherei16_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vrgatherei16_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vrgatherei16_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vrgatherei16_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vrgatherei16_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vrgatherei16_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vrgatherei16_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vrgatherei16_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vrgatherei16_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vrgatherei16_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vrgatherei16_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vrgatherei16_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vrgatherei16_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vrgatherei16_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vrgatherei16_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vrgatherei16_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vrgatherei16_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vrgatherei16_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vrgatherei16_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vrgatherei16_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vrgatherei16_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vrgatherei16_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vrgatherei16_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vrgatherei16_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vrgatherei16_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vrgatherei16_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vrgatherei16_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vrgatherei16_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vrgatherei16_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vrgatherei16_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vrgatherei16_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vrgatherei16_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vrgatherei16_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vrgatherei16_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vrgatherei16_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vrgatherei16_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vrgatherei16_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vrgatherei16_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vrgatherei16_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vrgatherei16_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vrgatherei16_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vrgatherei16_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vrgatherei16_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vrgatherei16_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vrgatherei16_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vrgatherei16_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vrgatherei16_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vrgatherei16_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vrgatherei16_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vrgatherei16_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vrgatherei16_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vrgatherei16_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vrgatherei16_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vrgatherei16_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vrgatherei16_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vrgatherei16_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vrgatherei16_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vrgatherei16_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vrgatherei16_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vrgatherei16_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vrgatherei16_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vrgatherei16_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vrgatherei16_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vrgatherei16_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vrgatherei16_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vrgatherei16_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vrgatherei16_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vrgatherei16_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vrgatherei16_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vrgatherei16_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vrgatherei16_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vrgatherei16_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vrgatherei16_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vrgatherei16_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vrgatherei16_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vrgatherei16_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vrgatherei16_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vrgatherei16_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vrgatherei16_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vrgatherei16_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vrgatherei16_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vrgatherei16_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vrgatherei16_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vrgatherei16_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vrgatherei16_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vrgatherei16_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vrgatherei16_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vrgatherei16_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vrgatherei16_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vrgatherei16_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vrgatherei16_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vrgatherei16_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vrgatherei16_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vrgatherei16_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vrgatherei16_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vrgatherei16_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vrgatherei16_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vrgatherei16_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vrgatherei16_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vrgatherei16_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vrgatherei16_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vrgatherei16_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vrgatherei16_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vrgatherei16_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vrgatherei16_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vrgatherei16_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vrgatherei16_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vrgatherei16_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vrgatherei16_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vrgatherei16_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vrgatherei16_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vrgatherei16_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vrgatherei16_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vrgatherei16_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vrgatherei16_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vrgatherei16_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vrgatherei16_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vrgatherei16_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vrgatherei16_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vrgatherei16_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vrgatherei16_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vrgatherei16_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vrgatherei16_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vrgatherei16_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vrgatherei16_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vrgatherei16_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vrgatherei16_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vrgatherei16_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vrgatherei16_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vrgatherei16_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vrgatherei16_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vrgatherei16_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vrgatherei16_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vrgatherei16_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrgatherei16_mu(vm, vd, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vrgatherei16\.[ivxfswum.]+\s+} 228 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vrsub.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vrsub.c index ce39c94d4..9ba581e14 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vrsub.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vrsub.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vrsub_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vrsub_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vrsub_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vrsub_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vrsub_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vrsub_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vrsub_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vrsub_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vrsub_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vrsub_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vrsub_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vrsub_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vrsub_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vrsub_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vrsub_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vrsub_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vrsub_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vrsub_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vrsub_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vrsub_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vrsub_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vrsub_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vrsub_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vrsub_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vrsub_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vrsub_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vrsub_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vrsub_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vrsub_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vrsub_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vrsub_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vrsub_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vrsub_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vrsub_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vrsub_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vrsub_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vrsub_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vrsub_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vrsub_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vrsub_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vrsub_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vrsub_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vrsub_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vrsub_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vrsub_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vrsub_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vrsub_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vrsub_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vrsub_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vrsub_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vrsub_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vrsub_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vrsub_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vrsub_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vrsub_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vrsub_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vrsub_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vrsub_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vrsub_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vrsub_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vrsub_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vrsub_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vrsub_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vrsub_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vrsub_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vrsub_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vrsub_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vrsub_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vrsub_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vrsub_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vrsub_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vrsub_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vrsub_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vrsub_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vrsub_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vrsub_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vrsub_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vrsub_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vrsub_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vrsub_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vrsub_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vrsub_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vrsub_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vrsub_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vrsub_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vrsub_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vrsub_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vrsub_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vrsub_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vrsub_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vrsub_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vrsub_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vrsub_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vrsub_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vrsub_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vrsub_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vrsub_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vrsub_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vrsub_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vrsub_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vrsub_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vrsub_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vrsub_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vrsub_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vrsub_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vrsub_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vrsub_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vrsub_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vrsub_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vrsub_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vrsub_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vrsub_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vrsub_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vrsub_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vrsub_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vrsub_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vrsub_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vrsub_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vrsub_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vrsub_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vrsub_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vrsub_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vrsub_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vrsub_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vrsub_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vrsub_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vrsub_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vrsub_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vrsub_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vrsub_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vrsub_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vrsub_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vrsub_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vrsub_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vrsub_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vrsub_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vrsub_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vrsub_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vrsub_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vrsub_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vrsub_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vrsub_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vrsub_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vrsub_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vrsub_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vrsub_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vrsub_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vrsub_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vrsub_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vrsub_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vrsub_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vrsub_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vrsub_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vrsub_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vrsub_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vrsub_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vrsub_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vrsub_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vrsub_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vrsub_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vrsub_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vrsub_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vrsub_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vrsub_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vrsub_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vrsub_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vrsub_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vrsub_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vrsub_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vrsub_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vrsub_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vrsub_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vrsub_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vrsub_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vrsub_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vrsub_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vrsub_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vrsub_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vrsub_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vrsub_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vrsub_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vrsub_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vrsub_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vrsub_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vrsub_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vrsub_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vrsub_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vrsub_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vrsub_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vrsub_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vrsub_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vrsub_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vrsub_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vrsub_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vrsub_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vrsub_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vrsub_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vrsub_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vrsub_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vrsub_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vrsub_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vrsub_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vrsub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vrsub_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vrsub_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vrsub_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vrsub_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vrsub_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vrsub_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vrsub_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vrsub_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vrsub_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vrsub_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vrsub_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vrsub_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vrsub_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vrsub_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vrsub_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vrsub_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vrsub_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vrsub_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vrsub_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vrsub_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vrsub_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vrsub_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vrsub_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vrsub_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vrsub_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vrsub_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vrsub_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vrsub_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vrsub_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vrsub_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vrsub_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vrsub_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vrsub_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vrsub_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vrsub_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vrsub_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vrsub_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vrsub_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vrsub_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vrsub_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vrsub_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vrsub_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vrsub_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vrsub_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vrsub_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vrsub_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vrsub_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vrsub_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vrsub_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vrsub_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vrsub_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vrsub_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vrsub_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vrsub_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vrsub_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vrsub_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vrsub_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vrsub_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vrsub_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vrsub_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vrsub_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vrsub_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vrsub_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vrsub_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vrsub_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vrsub_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vrsub_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vrsub_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vrsub_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vrsub_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vrsub_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vrsub_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vrsub_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vrsub_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vrsub_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vrsub_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vrsub_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vrsub_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vrsub_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vrsub_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vrsub_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vrsub_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vrsub_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vrsub_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vrsub_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vrsub_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vrsub_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vrsub_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vrsub_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vrsub_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vrsub_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vrsub_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vrsub_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vrsub_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vrsub_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vrsub_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vrsub_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vrsub_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vrsub_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vrsub_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vrsub_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vrsub_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vrsub_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vrsub_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vrsub_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vrsub_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vrsub_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vrsub_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vrsub_mu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vrsub_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vrsub_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vrsub_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vrsub_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vrsub_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vrsub_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vrsub_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vrsub_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vrsub_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vrsub_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vrsub_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vrsub_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vrsub_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vrsub_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vrsub_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vrsub_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vrsub_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vrsub_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vrsub_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vrsub_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vrsub_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vrsub_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vrsub_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vrsub_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vrsub_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vrsub_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vrsub_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vrsub_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vrsub_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vrsub_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vrsub_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vrsub_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vrsub_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vrsub_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vrsub_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vrsub_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vrsub_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vrsub_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vrsub_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vrsub_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vrsub_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vrsub_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vrsub_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vrsub_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vrsub_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vrsub_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vrsub_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vrsub_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vrsub_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vrsub\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vsadd.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vsadd.c index 433c9c36d..e61663a41 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vsadd.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vsadd.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vsadd_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vsadd_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vsadd_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vsadd_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vsadd_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vsadd_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vsadd_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vsadd_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vsadd_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vsadd_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vsadd_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vsadd_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vsadd_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vsadd_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vsadd_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vsadd_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vsadd_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vsadd_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vsadd_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vsadd_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vsadd_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vsadd_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vsadd_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vsadd_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vsadd_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vsadd_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vsadd_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vsadd_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vsadd_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vsadd_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vsadd_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vsadd_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vsadd_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vsadd_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vsadd_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vsadd_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vsadd_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vsadd_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vsadd_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vsadd_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vsadd_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vsadd_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vsadd_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vsadd_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vsadd_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vsadd_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vsadd_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vsadd_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vsadd_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vsadd_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vsadd_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vsadd_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vsadd_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vsadd_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vsadd_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vsadd_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vsadd_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vsadd_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vsadd_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vsadd_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vsadd_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vsadd_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vsadd_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vsadd_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vsadd_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vsadd_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vsadd_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vsadd_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vsadd_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vsadd_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vsadd_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vsadd_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vsadd_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vsadd_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vsadd_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vsadd_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vsadd_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vsadd_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vsadd_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vsadd_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vsadd_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vsadd_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vsadd_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vsadd_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vsadd_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vsadd_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vsadd_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vsadd_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vsadd_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vsadd_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vsadd_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vsadd_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vsadd_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vsadd_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vsadd_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vsadd_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vsadd_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vsadd_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vsadd_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vsadd_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vsadd_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vsadd_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vsadd_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vsadd_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vsadd_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vsadd_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vsadd_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vsadd_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vsadd_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vsadd_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vsadd_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vsadd_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vsadd_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vsadd_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vsadd_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vsadd_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vsadd_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vsadd_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vsadd_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vsadd_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vsadd_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vsadd_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vsadd_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vsadd_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vsadd_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vsadd_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vsadd_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vsadd_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vsadd_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vsadd_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vsadd_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vsadd_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vsadd_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vsadd_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vsadd_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vsadd_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vsadd_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vsadd_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vsadd_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vsadd_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vsadd_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vsadd_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vsadd_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vsadd_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vsadd_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vsadd_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vsadd_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vsadd_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vsadd_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vsadd_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vsadd_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vsadd_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vsadd_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vsadd_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vsadd_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vsadd_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vsadd_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vsadd_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vsadd_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vsadd_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vsadd_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vsadd_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vsadd_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vsadd_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vsadd_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vsadd_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vsadd_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vsadd_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vsadd_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vsadd_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vsadd_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vsadd_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vsadd_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vsadd_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vsadd_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vsadd_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vsadd_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vsadd_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vsadd_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vsadd_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vsadd_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vsadd_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vsadd_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vsadd_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vsadd_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vsadd_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vsadd_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vsadd_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vsadd_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vsadd_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vsadd_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vsadd_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vsadd_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vsadd_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vsadd_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vsadd_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vsadd_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vsadd_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vsadd_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vsadd_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vsadd_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vsadd_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vsadd_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vsadd_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vsadd_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vsadd_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vsadd_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vsadd_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vsadd_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vsadd_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vsadd_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vsadd_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vsadd_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vsadd_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vsadd_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vsadd_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vsadd_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vsadd_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vsadd_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vsadd_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vsadd_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vsadd_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vsadd_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vsadd_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vsadd_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vsadd_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vsadd_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vsadd_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vsadd_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vsadd_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vsadd_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vsadd_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vsadd_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vsadd_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vsadd_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vsadd_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vsadd_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vsadd_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vsadd_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vsadd_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vsadd_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vsadd_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vsadd_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vsadd_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vsadd_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vsadd_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vsadd_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vsadd_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vsadd_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vsadd_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vsadd_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vsadd_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vsadd_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vsadd_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vsadd_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vsadd_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vsadd_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vsadd_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vsadd_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vsadd_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vsadd_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vsadd_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vsadd_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vsadd_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vsadd_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vsadd_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vsadd_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vsadd_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vsadd_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vsadd_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vsadd_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vsadd_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vsadd_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vsadd_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vsadd_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vsadd_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vsadd_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vsadd_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vsadd_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vsadd_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vsadd_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vsadd_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vsadd_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vsadd_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vsadd_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vsadd_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vsadd_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vsadd_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vsadd_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vsadd_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vsadd_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vsadd_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vsadd_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vsadd_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vsadd_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vsadd_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vsadd_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vsadd_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vsadd_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vsadd_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vsadd_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vsadd_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vsadd_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vsadd_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vsadd_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vsadd_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vsadd_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vsadd_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vsadd_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vsadd_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vsadd_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vsadd_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vsadd_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vsadd_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vsadd_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vsadd_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vsadd_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vsadd_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vsadd_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vsadd_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vsadd_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vsadd_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vsadd_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vsadd_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vsadd_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vsadd_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vsadd_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vsadd_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vsadd_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vsadd_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vsadd_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vsadd_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vsadd_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vsadd_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vsadd_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vsadd_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vsadd_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vsadd_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vsadd_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vsadd_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vsadd_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vsadd_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vsadd_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vsadd_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vsadd_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vsadd_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vsadd_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vsadd_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vsadd_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vsadd_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vsadd_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vsadd_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vsadd_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vsadd_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vsadd_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vsadd_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vsadd_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vsadd_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vsadd_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vsadd_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vsadd_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vsadd_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vsadd_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vsadd_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vsadd_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsadd_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vsadd_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vsadd_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vsadd_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vsadd_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vsadd_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vsadd_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vsadd_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vsadd_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vsadd_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vsadd_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vsadd_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vsadd_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vsadd_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vsadd_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vsadd_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vsadd_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vsadd_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vsadd_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vsadd_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vsadd_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vsadd_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vsadd_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vsadd_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vsadd_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vsadd_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vsadd_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vsadd_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vsadd_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vsadd_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vsadd_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsadd_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vsadd_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vsadd_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vsadd_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vsadd_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vsadd_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vsadd_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vsadd_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vsadd_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vsadd_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vsadd_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vsadd_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vsadd_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vsadd_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vsadd_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vsadd_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vsadd_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vsadd_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vsadd_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vsadd_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vsadd_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vsadd_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vsadd_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vsadd_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vsadd_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vsadd_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsadd_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vsadd_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vsadd_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vsadd_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vsadd_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vsadd_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vsadd_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vsadd_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vsadd_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vsadd_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vsadd_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vsadd_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vsadd_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vsadd_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vsadd_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vsadd_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vsadd_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vsadd_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vsadd_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vsadd_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vsadd_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsadd_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsadd\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vsaddu.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vsaddu.c index d29a67267..f30ee863b 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vsaddu.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vsaddu.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vsaddu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vsaddu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsaddu_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vsaddu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vsaddu_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vsaddu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vsaddu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsaddu_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vsaddu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vsaddu_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vsaddu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vsaddu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsaddu_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vsaddu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vsaddu_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vsaddu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vsaddu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsaddu_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vsaddu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vsaddu_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vsaddu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vsaddu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsaddu_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vsaddu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vsaddu_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vsaddu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vsaddu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsaddu_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vsaddu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vsaddu_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vsaddu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vsaddu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsaddu_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vsaddu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vsaddu_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vsaddu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vsaddu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsaddu_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vsaddu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vsaddu_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vsaddu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vsaddu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsaddu_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vsaddu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vsaddu_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vsaddu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vsaddu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsaddu_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vsaddu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vsaddu_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vsaddu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vsaddu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsaddu_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vsaddu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vsaddu_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vsaddu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vsaddu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsaddu_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vsaddu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vsaddu_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vsaddu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vsaddu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsaddu_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vsaddu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vsaddu_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vsaddu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vsaddu_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsaddu_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vsaddu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vsaddu_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vsaddu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vsaddu_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsaddu_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vsaddu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vsaddu_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vsaddu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vsaddu_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsaddu_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vsaddu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vsaddu_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vsaddu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vsaddu_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsaddu_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vsaddu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vsaddu_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vsaddu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vsaddu_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsaddu_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vsaddu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vsaddu_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vsaddu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vsaddu_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsaddu_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vsaddu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vsaddu_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vsaddu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vsaddu_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsaddu_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vsaddu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vsaddu_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vsaddu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vsaddu_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsaddu_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vsaddu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vsaddu_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vsaddu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vsaddu_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsaddu_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vsaddu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vsaddu_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vsaddu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vsaddu_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsaddu_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vsaddu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vsaddu_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vsaddu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vsaddu_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsaddu_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vsaddu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vsaddu_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vsaddu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vsaddu_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsaddu_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vsaddu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vsaddu_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vsaddu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vsaddu_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsaddu_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vsaddu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vsaddu_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vsaddu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vsaddu_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsaddu_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vsaddu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vsaddu_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vsaddu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vsaddu_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsaddu_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vsaddu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vsaddu_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vsaddu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vsaddu_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsaddu_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vsaddu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vsaddu_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vsaddu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vsaddu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsaddu_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vsaddu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vsaddu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vsaddu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vsaddu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsaddu_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vsaddu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vsaddu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vsaddu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vsaddu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsaddu_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vsaddu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vsaddu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vsaddu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vsaddu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsaddu_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vsaddu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vsaddu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vsaddu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vsaddu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsaddu_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vsaddu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vsaddu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vsaddu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vsaddu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsaddu_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vsaddu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vsaddu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vsaddu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vsaddu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsaddu_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vsaddu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vsaddu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vsaddu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vsaddu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsaddu_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vsaddu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vsaddu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vsaddu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vsaddu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsaddu_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vsaddu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vsaddu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vsaddu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vsaddu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsaddu_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vsaddu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vsaddu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vsaddu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vsaddu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsaddu_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vsaddu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vsaddu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vsaddu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vsaddu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsaddu_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vsaddu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vsaddu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vsaddu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vsaddu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsaddu_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vsaddu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vsaddu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vsaddu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vsaddu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsaddu_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vsaddu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vsaddu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vsaddu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vsaddu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsaddu_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vsaddu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vsaddu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vsaddu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vsaddu_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsaddu_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vsaddu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vsaddu_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vsaddu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vsaddu_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsaddu_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vsaddu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vsaddu_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vsaddu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vsaddu_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsaddu_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vsaddu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vsaddu_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vsaddu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vsaddu_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsaddu_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vsaddu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vsaddu_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vsaddu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vsaddu_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsaddu_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vsaddu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vsaddu_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vsaddu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vsaddu_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsaddu_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vsaddu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vsaddu_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vsaddu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vsaddu_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsaddu_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vsaddu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vsaddu_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vsaddu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vsaddu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsaddu_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vsaddu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vsaddu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vsaddu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vsaddu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsaddu_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vsaddu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vsaddu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vsaddu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vsaddu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsaddu_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vsaddu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vsaddu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vsaddu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vsaddu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsaddu_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vsaddu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vsaddu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vsaddu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vsaddu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsaddu_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vsaddu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vsaddu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vsaddu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vsaddu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsaddu_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vsaddu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vsaddu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vsaddu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vsaddu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsaddu_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vsaddu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vsaddu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vsaddu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vsaddu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsaddu_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vsaddu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vsaddu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vsaddu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vsaddu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsaddu_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vsaddu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vsaddu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vsaddu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vsaddu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsaddu_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vsaddu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vsaddu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vsaddu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vsaddu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsaddu_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vsaddu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vsaddu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vsaddu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vsaddu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsaddu_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vsaddu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vsaddu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vsaddu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vsaddu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsaddu_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vsaddu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vsaddu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vsaddu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vsaddu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsaddu_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vsaddu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vsaddu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vsaddu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vsaddu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsaddu_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vsaddu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vsaddu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vsaddu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vsaddu_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsaddu_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vsaddu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vsaddu_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vsaddu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vsaddu_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsaddu_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vsaddu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vsaddu_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vsaddu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vsaddu_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsaddu_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vsaddu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vsaddu_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vsaddu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vsaddu_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsaddu_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vsaddu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vsaddu_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vsaddu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vsaddu_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsaddu_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vsaddu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vsaddu_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vsaddu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vsaddu_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsaddu_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vsaddu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vsaddu_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vsaddu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vsaddu_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsaddu_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vsaddu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vsaddu_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsaddu_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vsaddu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vsaddu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsaddu_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vsaddu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vsaddu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vsaddu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vsaddu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsaddu_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vsaddu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vsaddu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vsaddu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vsaddu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsaddu_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vsaddu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vsaddu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vsaddu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vsaddu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsaddu_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vsaddu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vsaddu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vsaddu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vsaddu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsaddu_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vsaddu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vsaddu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vsaddu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vsaddu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsaddu_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vsaddu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vsaddu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsaddu_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vsaddu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vsaddu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsaddu_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vsaddu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vsaddu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vsaddu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vsaddu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsaddu_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vsaddu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vsaddu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vsaddu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vsaddu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsaddu_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vsaddu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vsaddu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vsaddu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vsaddu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsaddu_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vsaddu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vsaddu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vsaddu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vsaddu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsaddu_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vsaddu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vsaddu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsaddu_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vsaddu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vsaddu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsaddu_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vsaddu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vsaddu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vsaddu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vsaddu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsaddu_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vsaddu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vsaddu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vsaddu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vsaddu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsaddu_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vsaddu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vsaddu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vsaddu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vsaddu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsaddu_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vsaddu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vsaddu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsaddu_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsaddu\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vsbc.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vsbc.c index 419ca2748..a51b98937 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vsbc.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vsbc.c @@ -6,356 +6,356 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vsbc_vvm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vint8mf8_t test_vsbc_vvm_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, vs1, v0, vl); } -vint8mf8_t test_vsbc_vxm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vint8mf8_t test_vsbc_vxm_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, rs1, v0, vl); } -vint8mf4_t test_vsbc_vvm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vint8mf4_t test_vsbc_vvm_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, vs1, v0, vl); } -vint8mf4_t test_vsbc_vxm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vint8mf4_t test_vsbc_vxm_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, rs1, v0, vl); } -vint8mf2_t test_vsbc_vvm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vint8mf2_t test_vsbc_vvm_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, vs1, v0, vl); } -vint8mf2_t test_vsbc_vxm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vint8mf2_t test_vsbc_vxm_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, rs1, v0, vl); } -vint8m1_t test_vsbc_vvm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vint8m1_t test_vsbc_vvm_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, vs1, v0, vl); } -vint8m1_t test_vsbc_vxm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vint8m1_t test_vsbc_vxm_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, rs1, v0, vl); } -vint8m2_t test_vsbc_vvm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vint8m2_t test_vsbc_vvm_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, vs1, v0, vl); } -vint8m2_t test_vsbc_vxm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vint8m2_t test_vsbc_vxm_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, rs1, v0, vl); } -vint8m4_t test_vsbc_vvm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vint8m4_t test_vsbc_vvm_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, vs1, v0, vl); } -vint8m4_t test_vsbc_vxm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vint8m4_t test_vsbc_vxm_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, rs1, v0, vl); } -vint8m8_t test_vsbc_vvm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, vbool1_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vint8m8_t test_vsbc_vvm_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, vbool1_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, vs1, v0, vl); } -vint8m8_t test_vsbc_vxm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, vbool1_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vint8m8_t test_vsbc_vxm_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, vbool1_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, rs1, v0, vl); } -vint16mf4_t test_vsbc_vvm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vint16mf4_t test_vsbc_vvm_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, vs1, v0, vl); } -vint16mf4_t test_vsbc_vxm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vint16mf4_t test_vsbc_vxm_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, rs1, v0, vl); } -vint16mf2_t test_vsbc_vvm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vint16mf2_t test_vsbc_vvm_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, vs1, v0, vl); } -vint16mf2_t test_vsbc_vxm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vint16mf2_t test_vsbc_vxm_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, rs1, v0, vl); } -vint16m1_t test_vsbc_vvm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vint16m1_t test_vsbc_vvm_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, vs1, v0, vl); } -vint16m1_t test_vsbc_vxm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vint16m1_t test_vsbc_vxm_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, rs1, v0, vl); } -vint16m2_t test_vsbc_vvm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vint16m2_t test_vsbc_vvm_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, vs1, v0, vl); } -vint16m2_t test_vsbc_vxm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vint16m2_t test_vsbc_vxm_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, rs1, v0, vl); } -vint16m4_t test_vsbc_vvm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vint16m4_t test_vsbc_vvm_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, vs1, v0, vl); } -vint16m4_t test_vsbc_vxm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vint16m4_t test_vsbc_vxm_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, rs1, v0, vl); } -vint16m8_t test_vsbc_vvm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vint16m8_t test_vsbc_vvm_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, vs1, v0, vl); } -vint16m8_t test_vsbc_vxm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vint16m8_t test_vsbc_vxm_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, rs1, v0, vl); } -vint32mf2_t test_vsbc_vvm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vint32mf2_t test_vsbc_vvm_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, vs1, v0, vl); } -vint32mf2_t test_vsbc_vxm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vint32mf2_t test_vsbc_vxm_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, rs1, v0, vl); } -vint32m1_t test_vsbc_vvm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vint32m1_t test_vsbc_vvm_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, vs1, v0, vl); } -vint32m1_t test_vsbc_vxm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vint32m1_t test_vsbc_vxm_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, rs1, v0, vl); } -vint32m2_t test_vsbc_vvm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vint32m2_t test_vsbc_vvm_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, vs1, v0, vl); } -vint32m2_t test_vsbc_vxm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vint32m2_t test_vsbc_vxm_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, rs1, v0, vl); } -vint32m4_t test_vsbc_vvm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vint32m4_t test_vsbc_vvm_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, vs1, v0, vl); } -vint32m4_t test_vsbc_vxm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vint32m4_t test_vsbc_vxm_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, rs1, v0, vl); } -vint32m8_t test_vsbc_vvm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vint32m8_t test_vsbc_vvm_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, vs1, v0, vl); } -vint32m8_t test_vsbc_vxm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vint32m8_t test_vsbc_vxm_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, rs1, v0, vl); } -vint64m1_t test_vsbc_vvm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vint64m1_t test_vsbc_vvm_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, vs1, v0, vl); } -vint64m1_t test_vsbc_vxm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vint64m1_t test_vsbc_vxm_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, rs1, v0, vl); } -vint64m2_t test_vsbc_vvm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vint64m2_t test_vsbc_vvm_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, vs1, v0, vl); } -vint64m2_t test_vsbc_vxm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vint64m2_t test_vsbc_vxm_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, rs1, v0, vl); } -vint64m4_t test_vsbc_vvm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vint64m4_t test_vsbc_vvm_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, vs1, v0, vl); } -vint64m4_t test_vsbc_vxm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vint64m4_t test_vsbc_vxm_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, rs1, v0, vl); } -vint64m8_t test_vsbc_vvm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vint64m8_t test_vsbc_vvm_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, vs1, v0, vl); } -vint64m8_t test_vsbc_vxm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vint64m8_t test_vsbc_vxm_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, rs1, v0, vl); } -vuint8mf8_t test_vsbc_vvm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vuint8mf8_t test_vsbc_vvm_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, vs1, v0, vl); } -vuint8mf8_t test_vsbc_vxm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vuint8mf8_t test_vsbc_vxm_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, rs1, v0, vl); } -vuint8mf4_t test_vsbc_vvm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vuint8mf4_t test_vsbc_vvm_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, vs1, v0, vl); } -vuint8mf4_t test_vsbc_vxm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vuint8mf4_t test_vsbc_vxm_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, rs1, v0, vl); } -vuint8mf2_t test_vsbc_vvm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vuint8mf2_t test_vsbc_vvm_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, vs1, v0, vl); } -vuint8mf2_t test_vsbc_vxm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vuint8mf2_t test_vsbc_vxm_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, rs1, v0, vl); } -vuint8m1_t test_vsbc_vvm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vuint8m1_t test_vsbc_vvm_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, vs1, v0, vl); } -vuint8m1_t test_vsbc_vxm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vuint8m1_t test_vsbc_vxm_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, rs1, v0, vl); } -vuint8m2_t test_vsbc_vvm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vuint8m2_t test_vsbc_vvm_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, vs1, v0, vl); } -vuint8m2_t test_vsbc_vxm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vuint8m2_t test_vsbc_vxm_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, rs1, v0, vl); } -vuint8m4_t test_vsbc_vvm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vuint8m4_t test_vsbc_vvm_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, vs1, v0, vl); } -vuint8m4_t test_vsbc_vxm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vuint8m4_t test_vsbc_vxm_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, rs1, v0, vl); } -vuint8m8_t test_vsbc_vvm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, vbool1_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vuint8m8_t test_vsbc_vvm_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, vbool1_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, vs1, v0, vl); } -vuint8m8_t test_vsbc_vxm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, vbool1_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vuint8m8_t test_vsbc_vxm_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, vbool1_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, rs1, v0, vl); } -vuint16mf4_t test_vsbc_vvm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vuint16mf4_t test_vsbc_vvm_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, vs1, v0, vl); } -vuint16mf4_t test_vsbc_vxm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vuint16mf4_t test_vsbc_vxm_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, rs1, v0, vl); } -vuint16mf2_t test_vsbc_vvm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vuint16mf2_t test_vsbc_vvm_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, vs1, v0, vl); } -vuint16mf2_t test_vsbc_vxm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vuint16mf2_t test_vsbc_vxm_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, rs1, v0, vl); } -vuint16m1_t test_vsbc_vvm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vuint16m1_t test_vsbc_vvm_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, vs1, v0, vl); } -vuint16m1_t test_vsbc_vxm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vuint16m1_t test_vsbc_vxm_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, rs1, v0, vl); } -vuint16m2_t test_vsbc_vvm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vuint16m2_t test_vsbc_vvm_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, vs1, v0, vl); } -vuint16m2_t test_vsbc_vxm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vuint16m2_t test_vsbc_vxm_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, rs1, v0, vl); } -vuint16m4_t test_vsbc_vvm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vuint16m4_t test_vsbc_vvm_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, vs1, v0, vl); } -vuint16m4_t test_vsbc_vxm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vuint16m4_t test_vsbc_vxm_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, rs1, v0, vl); } -vuint16m8_t test_vsbc_vvm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vuint16m8_t test_vsbc_vvm_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, vbool2_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, vs1, v0, vl); } -vuint16m8_t test_vsbc_vxm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, vbool2_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vuint16m8_t test_vsbc_vxm_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, vbool2_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, rs1, v0, vl); } -vuint32mf2_t test_vsbc_vvm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vuint32mf2_t test_vsbc_vvm_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, vs1, v0, vl); } -vuint32mf2_t test_vsbc_vxm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vuint32mf2_t test_vsbc_vxm_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, rs1, v0, vl); } -vuint32m1_t test_vsbc_vvm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vuint32m1_t test_vsbc_vvm_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, vs1, v0, vl); } -vuint32m1_t test_vsbc_vxm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vuint32m1_t test_vsbc_vxm_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, rs1, v0, vl); } -vuint32m2_t test_vsbc_vvm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vuint32m2_t test_vsbc_vvm_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, vs1, v0, vl); } -vuint32m2_t test_vsbc_vxm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vuint32m2_t test_vsbc_vxm_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, rs1, v0, vl); } -vuint32m4_t test_vsbc_vvm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vuint32m4_t test_vsbc_vvm_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, vs1, v0, vl); } -vuint32m4_t test_vsbc_vxm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vuint32m4_t test_vsbc_vxm_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, rs1, v0, vl); } -vuint32m8_t test_vsbc_vvm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vuint32m8_t test_vsbc_vvm_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, vs1, v0, vl); } -vuint32m8_t test_vsbc_vxm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, vbool4_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vuint32m8_t test_vsbc_vxm_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, vbool4_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, rs1, v0, vl); } -vuint64m1_t test_vsbc_vvm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vuint64m1_t test_vsbc_vvm_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, vs1, v0, vl); } -vuint64m1_t test_vsbc_vxm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, vbool64_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vuint64m1_t test_vsbc_vxm_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, vbool64_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, rs1, v0, vl); } -vuint64m2_t test_vsbc_vvm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vuint64m2_t test_vsbc_vvm_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, vs1, v0, vl); } -vuint64m2_t test_vsbc_vxm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, vbool32_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vuint64m2_t test_vsbc_vxm_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, vbool32_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, rs1, v0, vl); } -vuint64m4_t test_vsbc_vvm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vuint64m4_t test_vsbc_vvm_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, vs1, v0, vl); } -vuint64m4_t test_vsbc_vxm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, vbool16_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vuint64m4_t test_vsbc_vxm_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, vbool16_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, rs1, v0, vl); } -vuint64m8_t test_vsbc_vvm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vuint64m8_t test_vsbc_vvm_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, vs1, v0, vl); } -vuint64m8_t test_vsbc_vxm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, vbool8_t borrowin, size_t vl) { - return __riscv_vsbc_tu(maskedoff, op1, op2, borrowin, vl); +vuint64m8_t test_vsbc_vxm_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, vbool8_t v0, size_t vl) { + return __riscv_vsbc_tu(vd, vs2, rs1, v0, vl); } /* { dg-final { scan-assembler-times {vsbc\.[ivxfswum.]+\s+} 88 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vsext_vf2.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vsext_vf2.c index f24c91c2c..c6fce1bab 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vsext_vf2.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vsext_vf2.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint16mf4_t test_vsext_vf2_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf2_tu(maskedoff, op1, vl); +vint16mf4_t test_vsext_vf2_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs2, size_t vl) { + return __riscv_vsext_vf2_tu(vd, vs2, vl); } -vint16mf2_t test_vsext_vf2_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf2_tu(maskedoff, op1, vl); +vint16mf2_t test_vsext_vf2_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs2, size_t vl) { + return __riscv_vsext_vf2_tu(vd, vs2, vl); } -vint16m1_t test_vsext_vf2_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf2_tu(maskedoff, op1, vl); +vint16m1_t test_vsext_vf2_i16m1_tu(vint16m1_t vd, vint8mf2_t vs2, size_t vl) { + return __riscv_vsext_vf2_tu(vd, vs2, vl); } -vint16m2_t test_vsext_vf2_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf2_tu(maskedoff, op1, vl); +vint16m2_t test_vsext_vf2_i16m2_tu(vint16m2_t vd, vint8m1_t vs2, size_t vl) { + return __riscv_vsext_vf2_tu(vd, vs2, vl); } -vint16m4_t test_vsext_vf2_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, size_t vl) { - return __riscv_vsext_vf2_tu(maskedoff, op1, vl); +vint16m4_t test_vsext_vf2_i16m4_tu(vint16m4_t vd, vint8m2_t vs2, size_t vl) { + return __riscv_vsext_vf2_tu(vd, vs2, vl); } -vint16m8_t test_vsext_vf2_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, size_t vl) { - return __riscv_vsext_vf2_tu(maskedoff, op1, vl); +vint16m8_t test_vsext_vf2_i16m8_tu(vint16m8_t vd, vint8m4_t vs2, size_t vl) { + return __riscv_vsext_vf2_tu(vd, vs2, vl); } -vint32mf2_t test_vsext_vf2_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, size_t vl) { - return __riscv_vsext_vf2_tu(maskedoff, op1, vl); +vint32mf2_t test_vsext_vf2_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vsext_vf2_tu(vd, vs2, vl); } -vint32m1_t test_vsext_vf2_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, size_t vl) { - return __riscv_vsext_vf2_tu(maskedoff, op1, vl); +vint32m1_t test_vsext_vf2_i32m1_tu(vint32m1_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vsext_vf2_tu(vd, vs2, vl); } -vint32m2_t test_vsext_vf2_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, size_t vl) { - return __riscv_vsext_vf2_tu(maskedoff, op1, vl); +vint32m2_t test_vsext_vf2_i32m2_tu(vint32m2_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vsext_vf2_tu(vd, vs2, vl); } -vint32m4_t test_vsext_vf2_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, size_t vl) { - return __riscv_vsext_vf2_tu(maskedoff, op1, vl); +vint32m4_t test_vsext_vf2_i32m4_tu(vint32m4_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vsext_vf2_tu(vd, vs2, vl); } -vint32m8_t test_vsext_vf2_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, size_t vl) { - return __riscv_vsext_vf2_tu(maskedoff, op1, vl); +vint32m8_t test_vsext_vf2_i32m8_tu(vint32m8_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vsext_vf2_tu(vd, vs2, vl); } -vint64m1_t test_vsext_vf2_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, size_t vl) { - return __riscv_vsext_vf2_tu(maskedoff, op1, vl); +vint64m1_t test_vsext_vf2_i64m1_tu(vint64m1_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vsext_vf2_tu(vd, vs2, vl); } -vint64m2_t test_vsext_vf2_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, size_t vl) { - return __riscv_vsext_vf2_tu(maskedoff, op1, vl); +vint64m2_t test_vsext_vf2_i64m2_tu(vint64m2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vsext_vf2_tu(vd, vs2, vl); } -vint64m4_t test_vsext_vf2_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, size_t vl) { - return __riscv_vsext_vf2_tu(maskedoff, op1, vl); +vint64m4_t test_vsext_vf2_i64m4_tu(vint64m4_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vsext_vf2_tu(vd, vs2, vl); } -vint64m8_t test_vsext_vf2_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, size_t vl) { - return __riscv_vsext_vf2_tu(maskedoff, op1, vl); +vint64m8_t test_vsext_vf2_i64m8_tu(vint64m8_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vsext_vf2_tu(vd, vs2, vl); } -vint16mf4_t test_vsext_vf2_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf2_tum(mask, maskedoff, op1, vl); +vint16mf4_t test_vsext_vf2_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, size_t vl) { + return __riscv_vsext_vf2_tum(vm, vd, vs2, vl); } -vint16mf2_t test_vsext_vf2_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf2_tum(mask, maskedoff, op1, vl); +vint16mf2_t test_vsext_vf2_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, size_t vl) { + return __riscv_vsext_vf2_tum(vm, vd, vs2, vl); } -vint16m1_t test_vsext_vf2_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf2_tum(mask, maskedoff, op1, vl); +vint16m1_t test_vsext_vf2_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, size_t vl) { + return __riscv_vsext_vf2_tum(vm, vd, vs2, vl); } -vint16m2_t test_vsext_vf2_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf2_tum(mask, maskedoff, op1, vl); +vint16m2_t test_vsext_vf2_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, size_t vl) { + return __riscv_vsext_vf2_tum(vm, vd, vs2, vl); } -vint16m4_t test_vsext_vf2_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, size_t vl) { - return __riscv_vsext_vf2_tum(mask, maskedoff, op1, vl); +vint16m4_t test_vsext_vf2_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, size_t vl) { + return __riscv_vsext_vf2_tum(vm, vd, vs2, vl); } -vint16m8_t test_vsext_vf2_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, size_t vl) { - return __riscv_vsext_vf2_tum(mask, maskedoff, op1, vl); +vint16m8_t test_vsext_vf2_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, size_t vl) { + return __riscv_vsext_vf2_tum(vm, vd, vs2, vl); } -vint32mf2_t test_vsext_vf2_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, size_t vl) { - return __riscv_vsext_vf2_tum(mask, maskedoff, op1, vl); +vint32mf2_t test_vsext_vf2_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vsext_vf2_tum(vm, vd, vs2, vl); } -vint32m1_t test_vsext_vf2_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, size_t vl) { - return __riscv_vsext_vf2_tum(mask, maskedoff, op1, vl); +vint32m1_t test_vsext_vf2_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vsext_vf2_tum(vm, vd, vs2, vl); } -vint32m2_t test_vsext_vf2_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, size_t vl) { - return __riscv_vsext_vf2_tum(mask, maskedoff, op1, vl); +vint32m2_t test_vsext_vf2_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vsext_vf2_tum(vm, vd, vs2, vl); } -vint32m4_t test_vsext_vf2_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, size_t vl) { - return __riscv_vsext_vf2_tum(mask, maskedoff, op1, vl); +vint32m4_t test_vsext_vf2_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vsext_vf2_tum(vm, vd, vs2, vl); } -vint32m8_t test_vsext_vf2_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, size_t vl) { - return __riscv_vsext_vf2_tum(mask, maskedoff, op1, vl); +vint32m8_t test_vsext_vf2_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vsext_vf2_tum(vm, vd, vs2, vl); } -vint64m1_t test_vsext_vf2_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, size_t vl) { - return __riscv_vsext_vf2_tum(mask, maskedoff, op1, vl); +vint64m1_t test_vsext_vf2_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vsext_vf2_tum(vm, vd, vs2, vl); } -vint64m2_t test_vsext_vf2_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, size_t vl) { - return __riscv_vsext_vf2_tum(mask, maskedoff, op1, vl); +vint64m2_t test_vsext_vf2_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vsext_vf2_tum(vm, vd, vs2, vl); } -vint64m4_t test_vsext_vf2_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, size_t vl) { - return __riscv_vsext_vf2_tum(mask, maskedoff, op1, vl); +vint64m4_t test_vsext_vf2_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vsext_vf2_tum(vm, vd, vs2, vl); } -vint64m8_t test_vsext_vf2_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, size_t vl) { - return __riscv_vsext_vf2_tum(mask, maskedoff, op1, vl); +vint64m8_t test_vsext_vf2_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vsext_vf2_tum(vm, vd, vs2, vl); } -vint16mf4_t test_vsext_vf2_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf2_tumu(mask, maskedoff, op1, vl); +vint16mf4_t test_vsext_vf2_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, size_t vl) { + return __riscv_vsext_vf2_tumu(vm, vd, vs2, vl); } -vint16mf2_t test_vsext_vf2_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf2_tumu(mask, maskedoff, op1, vl); +vint16mf2_t test_vsext_vf2_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, size_t vl) { + return __riscv_vsext_vf2_tumu(vm, vd, vs2, vl); } -vint16m1_t test_vsext_vf2_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf2_tumu(mask, maskedoff, op1, vl); +vint16m1_t test_vsext_vf2_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, size_t vl) { + return __riscv_vsext_vf2_tumu(vm, vd, vs2, vl); } -vint16m2_t test_vsext_vf2_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf2_tumu(mask, maskedoff, op1, vl); +vint16m2_t test_vsext_vf2_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, size_t vl) { + return __riscv_vsext_vf2_tumu(vm, vd, vs2, vl); } -vint16m4_t test_vsext_vf2_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, size_t vl) { - return __riscv_vsext_vf2_tumu(mask, maskedoff, op1, vl); +vint16m4_t test_vsext_vf2_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, size_t vl) { + return __riscv_vsext_vf2_tumu(vm, vd, vs2, vl); } -vint16m8_t test_vsext_vf2_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, size_t vl) { - return __riscv_vsext_vf2_tumu(mask, maskedoff, op1, vl); +vint16m8_t test_vsext_vf2_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, size_t vl) { + return __riscv_vsext_vf2_tumu(vm, vd, vs2, vl); } -vint32mf2_t test_vsext_vf2_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, size_t vl) { - return __riscv_vsext_vf2_tumu(mask, maskedoff, op1, vl); +vint32mf2_t test_vsext_vf2_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vsext_vf2_tumu(vm, vd, vs2, vl); } -vint32m1_t test_vsext_vf2_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, size_t vl) { - return __riscv_vsext_vf2_tumu(mask, maskedoff, op1, vl); +vint32m1_t test_vsext_vf2_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vsext_vf2_tumu(vm, vd, vs2, vl); } -vint32m2_t test_vsext_vf2_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, size_t vl) { - return __riscv_vsext_vf2_tumu(mask, maskedoff, op1, vl); +vint32m2_t test_vsext_vf2_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vsext_vf2_tumu(vm, vd, vs2, vl); } -vint32m4_t test_vsext_vf2_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, size_t vl) { - return __riscv_vsext_vf2_tumu(mask, maskedoff, op1, vl); +vint32m4_t test_vsext_vf2_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vsext_vf2_tumu(vm, vd, vs2, vl); } -vint32m8_t test_vsext_vf2_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, size_t vl) { - return __riscv_vsext_vf2_tumu(mask, maskedoff, op1, vl); +vint32m8_t test_vsext_vf2_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vsext_vf2_tumu(vm, vd, vs2, vl); } -vint64m1_t test_vsext_vf2_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, size_t vl) { - return __riscv_vsext_vf2_tumu(mask, maskedoff, op1, vl); +vint64m1_t test_vsext_vf2_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vsext_vf2_tumu(vm, vd, vs2, vl); } -vint64m2_t test_vsext_vf2_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, size_t vl) { - return __riscv_vsext_vf2_tumu(mask, maskedoff, op1, vl); +vint64m2_t test_vsext_vf2_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vsext_vf2_tumu(vm, vd, vs2, vl); } -vint64m4_t test_vsext_vf2_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, size_t vl) { - return __riscv_vsext_vf2_tumu(mask, maskedoff, op1, vl); +vint64m4_t test_vsext_vf2_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vsext_vf2_tumu(vm, vd, vs2, vl); } -vint64m8_t test_vsext_vf2_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, size_t vl) { - return __riscv_vsext_vf2_tumu(mask, maskedoff, op1, vl); +vint64m8_t test_vsext_vf2_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vsext_vf2_tumu(vm, vd, vs2, vl); } -vint16mf4_t test_vsext_vf2_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf2_mu(mask, maskedoff, op1, vl); +vint16mf4_t test_vsext_vf2_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, size_t vl) { + return __riscv_vsext_vf2_mu(vm, vd, vs2, vl); } -vint16mf2_t test_vsext_vf2_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf2_mu(mask, maskedoff, op1, vl); +vint16mf2_t test_vsext_vf2_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, size_t vl) { + return __riscv_vsext_vf2_mu(vm, vd, vs2, vl); } -vint16m1_t test_vsext_vf2_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf2_mu(mask, maskedoff, op1, vl); +vint16m1_t test_vsext_vf2_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, size_t vl) { + return __riscv_vsext_vf2_mu(vm, vd, vs2, vl); } -vint16m2_t test_vsext_vf2_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf2_mu(mask, maskedoff, op1, vl); +vint16m2_t test_vsext_vf2_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, size_t vl) { + return __riscv_vsext_vf2_mu(vm, vd, vs2, vl); } -vint16m4_t test_vsext_vf2_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, size_t vl) { - return __riscv_vsext_vf2_mu(mask, maskedoff, op1, vl); +vint16m4_t test_vsext_vf2_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, size_t vl) { + return __riscv_vsext_vf2_mu(vm, vd, vs2, vl); } -vint16m8_t test_vsext_vf2_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, size_t vl) { - return __riscv_vsext_vf2_mu(mask, maskedoff, op1, vl); +vint16m8_t test_vsext_vf2_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, size_t vl) { + return __riscv_vsext_vf2_mu(vm, vd, vs2, vl); } -vint32mf2_t test_vsext_vf2_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, size_t vl) { - return __riscv_vsext_vf2_mu(mask, maskedoff, op1, vl); +vint32mf2_t test_vsext_vf2_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vsext_vf2_mu(vm, vd, vs2, vl); } -vint32m1_t test_vsext_vf2_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, size_t vl) { - return __riscv_vsext_vf2_mu(mask, maskedoff, op1, vl); +vint32m1_t test_vsext_vf2_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vsext_vf2_mu(vm, vd, vs2, vl); } -vint32m2_t test_vsext_vf2_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, size_t vl) { - return __riscv_vsext_vf2_mu(mask, maskedoff, op1, vl); +vint32m2_t test_vsext_vf2_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vsext_vf2_mu(vm, vd, vs2, vl); } -vint32m4_t test_vsext_vf2_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, size_t vl) { - return __riscv_vsext_vf2_mu(mask, maskedoff, op1, vl); +vint32m4_t test_vsext_vf2_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vsext_vf2_mu(vm, vd, vs2, vl); } -vint32m8_t test_vsext_vf2_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, size_t vl) { - return __riscv_vsext_vf2_mu(mask, maskedoff, op1, vl); +vint32m8_t test_vsext_vf2_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vsext_vf2_mu(vm, vd, vs2, vl); } -vint64m1_t test_vsext_vf2_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, size_t vl) { - return __riscv_vsext_vf2_mu(mask, maskedoff, op1, vl); +vint64m1_t test_vsext_vf2_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vsext_vf2_mu(vm, vd, vs2, vl); } -vint64m2_t test_vsext_vf2_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, size_t vl) { - return __riscv_vsext_vf2_mu(mask, maskedoff, op1, vl); +vint64m2_t test_vsext_vf2_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vsext_vf2_mu(vm, vd, vs2, vl); } -vint64m4_t test_vsext_vf2_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, size_t vl) { - return __riscv_vsext_vf2_mu(mask, maskedoff, op1, vl); +vint64m4_t test_vsext_vf2_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vsext_vf2_mu(vm, vd, vs2, vl); } -vint64m8_t test_vsext_vf2_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, size_t vl) { - return __riscv_vsext_vf2_mu(mask, maskedoff, op1, vl); +vint64m8_t test_vsext_vf2_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vsext_vf2_mu(vm, vd, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsext\.vf2[ivxfswum.]*\s+} 60 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vsext_vf4.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vsext_vf4.c index d1abf6945..94c72c4d4 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vsext_vf4.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vsext_vf4.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint32mf2_t test_vsext_vf4_i32mf2_tu(vint32mf2_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf4_tu(maskedoff, op1, vl); +vint32mf2_t test_vsext_vf4_i32mf2_tu(vint32mf2_t vd, vint8mf8_t vs2, size_t vl) { + return __riscv_vsext_vf4_tu(vd, vs2, vl); } -vint32m1_t test_vsext_vf4_i32m1_tu(vint32m1_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf4_tu(maskedoff, op1, vl); +vint32m1_t test_vsext_vf4_i32m1_tu(vint32m1_t vd, vint8mf4_t vs2, size_t vl) { + return __riscv_vsext_vf4_tu(vd, vs2, vl); } -vint32m2_t test_vsext_vf4_i32m2_tu(vint32m2_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf4_tu(maskedoff, op1, vl); +vint32m2_t test_vsext_vf4_i32m2_tu(vint32m2_t vd, vint8mf2_t vs2, size_t vl) { + return __riscv_vsext_vf4_tu(vd, vs2, vl); } -vint32m4_t test_vsext_vf4_i32m4_tu(vint32m4_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf4_tu(maskedoff, op1, vl); +vint32m4_t test_vsext_vf4_i32m4_tu(vint32m4_t vd, vint8m1_t vs2, size_t vl) { + return __riscv_vsext_vf4_tu(vd, vs2, vl); } -vint32m8_t test_vsext_vf4_i32m8_tu(vint32m8_t maskedoff, vint8m2_t op1, size_t vl) { - return __riscv_vsext_vf4_tu(maskedoff, op1, vl); +vint32m8_t test_vsext_vf4_i32m8_tu(vint32m8_t vd, vint8m2_t vs2, size_t vl) { + return __riscv_vsext_vf4_tu(vd, vs2, vl); } -vint64m1_t test_vsext_vf4_i64m1_tu(vint64m1_t maskedoff, vint16mf4_t op1, size_t vl) { - return __riscv_vsext_vf4_tu(maskedoff, op1, vl); +vint64m1_t test_vsext_vf4_i64m1_tu(vint64m1_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vsext_vf4_tu(vd, vs2, vl); } -vint64m2_t test_vsext_vf4_i64m2_tu(vint64m2_t maskedoff, vint16mf2_t op1, size_t vl) { - return __riscv_vsext_vf4_tu(maskedoff, op1, vl); +vint64m2_t test_vsext_vf4_i64m2_tu(vint64m2_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vsext_vf4_tu(vd, vs2, vl); } -vint64m4_t test_vsext_vf4_i64m4_tu(vint64m4_t maskedoff, vint16m1_t op1, size_t vl) { - return __riscv_vsext_vf4_tu(maskedoff, op1, vl); +vint64m4_t test_vsext_vf4_i64m4_tu(vint64m4_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vsext_vf4_tu(vd, vs2, vl); } -vint64m8_t test_vsext_vf4_i64m8_tu(vint64m8_t maskedoff, vint16m2_t op1, size_t vl) { - return __riscv_vsext_vf4_tu(maskedoff, op1, vl); +vint64m8_t test_vsext_vf4_i64m8_tu(vint64m8_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vsext_vf4_tu(vd, vs2, vl); } -vint32mf2_t test_vsext_vf4_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf4_tum(mask, maskedoff, op1, vl); +vint32mf2_t test_vsext_vf4_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint8mf8_t vs2, size_t vl) { + return __riscv_vsext_vf4_tum(vm, vd, vs2, vl); } -vint32m1_t test_vsext_vf4_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf4_tum(mask, maskedoff, op1, vl); +vint32m1_t test_vsext_vf4_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint8mf4_t vs2, size_t vl) { + return __riscv_vsext_vf4_tum(vm, vd, vs2, vl); } -vint32m2_t test_vsext_vf4_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf4_tum(mask, maskedoff, op1, vl); +vint32m2_t test_vsext_vf4_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint8mf2_t vs2, size_t vl) { + return __riscv_vsext_vf4_tum(vm, vd, vs2, vl); } -vint32m4_t test_vsext_vf4_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf4_tum(mask, maskedoff, op1, vl); +vint32m4_t test_vsext_vf4_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint8m1_t vs2, size_t vl) { + return __riscv_vsext_vf4_tum(vm, vd, vs2, vl); } -vint32m8_t test_vsext_vf4_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint8m2_t op1, size_t vl) { - return __riscv_vsext_vf4_tum(mask, maskedoff, op1, vl); +vint32m8_t test_vsext_vf4_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint8m2_t vs2, size_t vl) { + return __riscv_vsext_vf4_tum(vm, vd, vs2, vl); } -vint64m1_t test_vsext_vf4_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint16mf4_t op1, size_t vl) { - return __riscv_vsext_vf4_tum(mask, maskedoff, op1, vl); +vint64m1_t test_vsext_vf4_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vsext_vf4_tum(vm, vd, vs2, vl); } -vint64m2_t test_vsext_vf4_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint16mf2_t op1, size_t vl) { - return __riscv_vsext_vf4_tum(mask, maskedoff, op1, vl); +vint64m2_t test_vsext_vf4_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vsext_vf4_tum(vm, vd, vs2, vl); } -vint64m4_t test_vsext_vf4_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint16m1_t op1, size_t vl) { - return __riscv_vsext_vf4_tum(mask, maskedoff, op1, vl); +vint64m4_t test_vsext_vf4_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vsext_vf4_tum(vm, vd, vs2, vl); } -vint64m8_t test_vsext_vf4_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint16m2_t op1, size_t vl) { - return __riscv_vsext_vf4_tum(mask, maskedoff, op1, vl); +vint64m8_t test_vsext_vf4_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vsext_vf4_tum(vm, vd, vs2, vl); } -vint32mf2_t test_vsext_vf4_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf4_tumu(mask, maskedoff, op1, vl); +vint32mf2_t test_vsext_vf4_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint8mf8_t vs2, size_t vl) { + return __riscv_vsext_vf4_tumu(vm, vd, vs2, vl); } -vint32m1_t test_vsext_vf4_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf4_tumu(mask, maskedoff, op1, vl); +vint32m1_t test_vsext_vf4_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint8mf4_t vs2, size_t vl) { + return __riscv_vsext_vf4_tumu(vm, vd, vs2, vl); } -vint32m2_t test_vsext_vf4_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf4_tumu(mask, maskedoff, op1, vl); +vint32m2_t test_vsext_vf4_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint8mf2_t vs2, size_t vl) { + return __riscv_vsext_vf4_tumu(vm, vd, vs2, vl); } -vint32m4_t test_vsext_vf4_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf4_tumu(mask, maskedoff, op1, vl); +vint32m4_t test_vsext_vf4_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint8m1_t vs2, size_t vl) { + return __riscv_vsext_vf4_tumu(vm, vd, vs2, vl); } -vint32m8_t test_vsext_vf4_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint8m2_t op1, size_t vl) { - return __riscv_vsext_vf4_tumu(mask, maskedoff, op1, vl); +vint32m8_t test_vsext_vf4_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint8m2_t vs2, size_t vl) { + return __riscv_vsext_vf4_tumu(vm, vd, vs2, vl); } -vint64m1_t test_vsext_vf4_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint16mf4_t op1, size_t vl) { - return __riscv_vsext_vf4_tumu(mask, maskedoff, op1, vl); +vint64m1_t test_vsext_vf4_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vsext_vf4_tumu(vm, vd, vs2, vl); } -vint64m2_t test_vsext_vf4_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint16mf2_t op1, size_t vl) { - return __riscv_vsext_vf4_tumu(mask, maskedoff, op1, vl); +vint64m2_t test_vsext_vf4_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vsext_vf4_tumu(vm, vd, vs2, vl); } -vint64m4_t test_vsext_vf4_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint16m1_t op1, size_t vl) { - return __riscv_vsext_vf4_tumu(mask, maskedoff, op1, vl); +vint64m4_t test_vsext_vf4_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vsext_vf4_tumu(vm, vd, vs2, vl); } -vint64m8_t test_vsext_vf4_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint16m2_t op1, size_t vl) { - return __riscv_vsext_vf4_tumu(mask, maskedoff, op1, vl); +vint64m8_t test_vsext_vf4_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vsext_vf4_tumu(vm, vd, vs2, vl); } -vint32mf2_t test_vsext_vf4_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf4_mu(mask, maskedoff, op1, vl); +vint32mf2_t test_vsext_vf4_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint8mf8_t vs2, size_t vl) { + return __riscv_vsext_vf4_mu(vm, vd, vs2, vl); } -vint32m1_t test_vsext_vf4_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf4_mu(mask, maskedoff, op1, vl); +vint32m1_t test_vsext_vf4_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint8mf4_t vs2, size_t vl) { + return __riscv_vsext_vf4_mu(vm, vd, vs2, vl); } -vint32m2_t test_vsext_vf4_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf4_mu(mask, maskedoff, op1, vl); +vint32m2_t test_vsext_vf4_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint8mf2_t vs2, size_t vl) { + return __riscv_vsext_vf4_mu(vm, vd, vs2, vl); } -vint32m4_t test_vsext_vf4_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf4_mu(mask, maskedoff, op1, vl); +vint32m4_t test_vsext_vf4_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint8m1_t vs2, size_t vl) { + return __riscv_vsext_vf4_mu(vm, vd, vs2, vl); } -vint32m8_t test_vsext_vf4_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint8m2_t op1, size_t vl) { - return __riscv_vsext_vf4_mu(mask, maskedoff, op1, vl); +vint32m8_t test_vsext_vf4_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint8m2_t vs2, size_t vl) { + return __riscv_vsext_vf4_mu(vm, vd, vs2, vl); } -vint64m1_t test_vsext_vf4_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint16mf4_t op1, size_t vl) { - return __riscv_vsext_vf4_mu(mask, maskedoff, op1, vl); +vint64m1_t test_vsext_vf4_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vsext_vf4_mu(vm, vd, vs2, vl); } -vint64m2_t test_vsext_vf4_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint16mf2_t op1, size_t vl) { - return __riscv_vsext_vf4_mu(mask, maskedoff, op1, vl); +vint64m2_t test_vsext_vf4_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vsext_vf4_mu(vm, vd, vs2, vl); } -vint64m4_t test_vsext_vf4_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint16m1_t op1, size_t vl) { - return __riscv_vsext_vf4_mu(mask, maskedoff, op1, vl); +vint64m4_t test_vsext_vf4_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vsext_vf4_mu(vm, vd, vs2, vl); } -vint64m8_t test_vsext_vf4_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint16m2_t op1, size_t vl) { - return __riscv_vsext_vf4_mu(mask, maskedoff, op1, vl); +vint64m8_t test_vsext_vf4_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vsext_vf4_mu(vm, vd, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsext\.vf4[ivxfswum.]*\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vsext_vf8.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vsext_vf8.c index 7c130eeb1..d800f0cb1 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vsext_vf8.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vsext_vf8.c @@ -6,68 +6,68 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint64m1_t test_vsext_vf8_i64m1_tu(vint64m1_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf8_tu(maskedoff, op1, vl); +vint64m1_t test_vsext_vf8_i64m1_tu(vint64m1_t vd, vint8mf8_t vs2, size_t vl) { + return __riscv_vsext_vf8_tu(vd, vs2, vl); } -vint64m2_t test_vsext_vf8_i64m2_tu(vint64m2_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf8_tu(maskedoff, op1, vl); +vint64m2_t test_vsext_vf8_i64m2_tu(vint64m2_t vd, vint8mf4_t vs2, size_t vl) { + return __riscv_vsext_vf8_tu(vd, vs2, vl); } -vint64m4_t test_vsext_vf8_i64m4_tu(vint64m4_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf8_tu(maskedoff, op1, vl); +vint64m4_t test_vsext_vf8_i64m4_tu(vint64m4_t vd, vint8mf2_t vs2, size_t vl) { + return __riscv_vsext_vf8_tu(vd, vs2, vl); } -vint64m8_t test_vsext_vf8_i64m8_tu(vint64m8_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf8_tu(maskedoff, op1, vl); +vint64m8_t test_vsext_vf8_i64m8_tu(vint64m8_t vd, vint8m1_t vs2, size_t vl) { + return __riscv_vsext_vf8_tu(vd, vs2, vl); } -vint64m1_t test_vsext_vf8_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf8_tum(mask, maskedoff, op1, vl); +vint64m1_t test_vsext_vf8_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint8mf8_t vs2, size_t vl) { + return __riscv_vsext_vf8_tum(vm, vd, vs2, vl); } -vint64m2_t test_vsext_vf8_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf8_tum(mask, maskedoff, op1, vl); +vint64m2_t test_vsext_vf8_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint8mf4_t vs2, size_t vl) { + return __riscv_vsext_vf8_tum(vm, vd, vs2, vl); } -vint64m4_t test_vsext_vf8_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf8_tum(mask, maskedoff, op1, vl); +vint64m4_t test_vsext_vf8_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint8mf2_t vs2, size_t vl) { + return __riscv_vsext_vf8_tum(vm, vd, vs2, vl); } -vint64m8_t test_vsext_vf8_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf8_tum(mask, maskedoff, op1, vl); +vint64m8_t test_vsext_vf8_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint8m1_t vs2, size_t vl) { + return __riscv_vsext_vf8_tum(vm, vd, vs2, vl); } -vint64m1_t test_vsext_vf8_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf8_tumu(mask, maskedoff, op1, vl); +vint64m1_t test_vsext_vf8_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint8mf8_t vs2, size_t vl) { + return __riscv_vsext_vf8_tumu(vm, vd, vs2, vl); } -vint64m2_t test_vsext_vf8_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf8_tumu(mask, maskedoff, op1, vl); +vint64m2_t test_vsext_vf8_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint8mf4_t vs2, size_t vl) { + return __riscv_vsext_vf8_tumu(vm, vd, vs2, vl); } -vint64m4_t test_vsext_vf8_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf8_tumu(mask, maskedoff, op1, vl); +vint64m4_t test_vsext_vf8_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint8mf2_t vs2, size_t vl) { + return __riscv_vsext_vf8_tumu(vm, vd, vs2, vl); } -vint64m8_t test_vsext_vf8_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf8_tumu(mask, maskedoff, op1, vl); +vint64m8_t test_vsext_vf8_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint8m1_t vs2, size_t vl) { + return __riscv_vsext_vf8_tumu(vm, vd, vs2, vl); } -vint64m1_t test_vsext_vf8_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf8_mu(mask, maskedoff, op1, vl); +vint64m1_t test_vsext_vf8_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint8mf8_t vs2, size_t vl) { + return __riscv_vsext_vf8_mu(vm, vd, vs2, vl); } -vint64m2_t test_vsext_vf8_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf8_mu(mask, maskedoff, op1, vl); +vint64m2_t test_vsext_vf8_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint8mf4_t vs2, size_t vl) { + return __riscv_vsext_vf8_mu(vm, vd, vs2, vl); } -vint64m4_t test_vsext_vf8_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf8_mu(mask, maskedoff, op1, vl); +vint64m4_t test_vsext_vf8_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint8mf2_t vs2, size_t vl) { + return __riscv_vsext_vf8_mu(vm, vd, vs2, vl); } -vint64m8_t test_vsext_vf8_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf8_mu(mask, maskedoff, op1, vl); +vint64m8_t test_vsext_vf8_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint8m1_t vs2, size_t vl) { + return __riscv_vsext_vf8_mu(vm, vd, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsext\.vf8[ivxfswum.]*\s+} 16 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vslide1down.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vslide1down.c index 146f28c7c..e1d8238d1 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vslide1down.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vslide1down.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vslide1down_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_tu(maskedoff, src, value, vl); +vint8mf8_t test_vslide1down_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vslide1down_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_tu(maskedoff, src, value, vl); +vint8mf4_t test_vslide1down_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vslide1down_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_tu(maskedoff, src, value, vl); +vint8mf2_t test_vslide1down_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vslide1down_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_tu(maskedoff, src, value, vl); +vint8m1_t test_vslide1down_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vslide1down_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_tu(maskedoff, src, value, vl); +vint8m2_t test_vslide1down_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vslide1down_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_tu(maskedoff, src, value, vl); +vint8m4_t test_vslide1down_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vslide1down_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_tu(maskedoff, src, value, vl); +vint8m8_t test_vslide1down_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vslide1down_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_tu(maskedoff, src, value, vl); +vint16mf4_t test_vslide1down_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vslide1down_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_tu(maskedoff, src, value, vl); +vint16mf2_t test_vslide1down_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vslide1down_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_tu(maskedoff, src, value, vl); +vint16m1_t test_vslide1down_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vslide1down_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_tu(maskedoff, src, value, vl); +vint16m2_t test_vslide1down_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vslide1down_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_tu(maskedoff, src, value, vl); +vint16m4_t test_vslide1down_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vslide1down_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_tu(maskedoff, src, value, vl); +vint16m8_t test_vslide1down_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vslide1down_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_tu(maskedoff, src, value, vl); +vint32mf2_t test_vslide1down_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vslide1down_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_tu(maskedoff, src, value, vl); +vint32m1_t test_vslide1down_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vslide1down_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_tu(maskedoff, src, value, vl); +vint32m2_t test_vslide1down_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vslide1down_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_tu(maskedoff, src, value, vl); +vint32m4_t test_vslide1down_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vslide1down_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_tu(maskedoff, src, value, vl); +vint32m8_t test_vslide1down_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vslide1down_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl) { - return __riscv_vslide1down_tu(maskedoff, src, value, vl); +vint64m1_t test_vslide1down_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vslide1down_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl) { - return __riscv_vslide1down_tu(maskedoff, src, value, vl); +vint64m2_t test_vslide1down_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vslide1down_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl) { - return __riscv_vslide1down_tu(maskedoff, src, value, vl); +vint64m4_t test_vslide1down_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vslide1down_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl) { - return __riscv_vslide1down_tu(maskedoff, src, value, vl); +vint64m8_t test_vslide1down_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vslide1down_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_tu(maskedoff, src, value, vl); +vuint8mf8_t test_vslide1down_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vslide1down_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_tu(maskedoff, src, value, vl); +vuint8mf4_t test_vslide1down_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vslide1down_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_tu(maskedoff, src, value, vl); +vuint8mf2_t test_vslide1down_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vslide1down_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_tu(maskedoff, src, value, vl); +vuint8m1_t test_vslide1down_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vslide1down_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_tu(maskedoff, src, value, vl); +vuint8m2_t test_vslide1down_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vslide1down_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_tu(maskedoff, src, value, vl); +vuint8m4_t test_vslide1down_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vslide1down_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_tu(maskedoff, src, value, vl); +vuint8m8_t test_vslide1down_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vslide1down_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_tu(maskedoff, src, value, vl); +vuint16mf4_t test_vslide1down_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vslide1down_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_tu(maskedoff, src, value, vl); +vuint16mf2_t test_vslide1down_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vslide1down_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_tu(maskedoff, src, value, vl); +vuint16m1_t test_vslide1down_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vslide1down_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_tu(maskedoff, src, value, vl); +vuint16m2_t test_vslide1down_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vslide1down_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_tu(maskedoff, src, value, vl); +vuint16m4_t test_vslide1down_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vslide1down_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_tu(maskedoff, src, value, vl); +vuint16m8_t test_vslide1down_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vslide1down_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_tu(maskedoff, src, value, vl); +vuint32mf2_t test_vslide1down_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vslide1down_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_tu(maskedoff, src, value, vl); +vuint32m1_t test_vslide1down_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vslide1down_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_tu(maskedoff, src, value, vl); +vuint32m2_t test_vslide1down_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vslide1down_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_tu(maskedoff, src, value, vl); +vuint32m4_t test_vslide1down_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vslide1down_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_tu(maskedoff, src, value, vl); +vuint32m8_t test_vslide1down_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vslide1down_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down_tu(maskedoff, src, value, vl); +vuint64m1_t test_vslide1down_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vslide1down_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down_tu(maskedoff, src, value, vl); +vuint64m2_t test_vslide1down_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vslide1down_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down_tu(maskedoff, src, value, vl); +vuint64m4_t test_vslide1down_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vslide1down_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down_tu(maskedoff, src, value, vl); +vuint64m8_t test_vslide1down_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vslide1down_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl); +vint8mf8_t test_vslide1down_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vslide1down_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl); +vint8mf4_t test_vslide1down_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vslide1down_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl); +vint8mf2_t test_vslide1down_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vslide1down_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl); +vint8m1_t test_vslide1down_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vslide1down_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl); +vint8m2_t test_vslide1down_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vslide1down_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl); +vint8m4_t test_vslide1down_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vslide1down_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl); +vint8m8_t test_vslide1down_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vslide1down_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl); +vint16mf4_t test_vslide1down_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vslide1down_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl); +vint16mf2_t test_vslide1down_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vslide1down_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl); +vint16m1_t test_vslide1down_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vslide1down_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl); +vint16m2_t test_vslide1down_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vslide1down_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl); +vint16m4_t test_vslide1down_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vslide1down_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl); +vint16m8_t test_vslide1down_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vslide1down_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl); +vint32mf2_t test_vslide1down_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vslide1down_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl); +vint32m1_t test_vslide1down_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vslide1down_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl); +vint32m2_t test_vslide1down_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vslide1down_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl); +vint32m4_t test_vslide1down_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vslide1down_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl); +vint32m8_t test_vslide1down_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vslide1down_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl) { - return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl); +vint64m1_t test_vslide1down_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vslide1down_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl) { - return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl); +vint64m2_t test_vslide1down_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vslide1down_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl) { - return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl); +vint64m4_t test_vslide1down_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vslide1down_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl) { - return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl); +vint64m8_t test_vslide1down_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vslide1down_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl); +vuint8mf8_t test_vslide1down_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vslide1down_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl); +vuint8mf4_t test_vslide1down_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vslide1down_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl); +vuint8mf2_t test_vslide1down_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vslide1down_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl); +vuint8m1_t test_vslide1down_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vslide1down_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl); +vuint8m2_t test_vslide1down_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vslide1down_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl); +vuint8m4_t test_vslide1down_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vslide1down_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl); +vuint8m8_t test_vslide1down_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vslide1down_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl); +vuint16mf4_t test_vslide1down_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vslide1down_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl); +vuint16mf2_t test_vslide1down_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vslide1down_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl); +vuint16m1_t test_vslide1down_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vslide1down_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl); +vuint16m2_t test_vslide1down_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vslide1down_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl); +vuint16m4_t test_vslide1down_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vslide1down_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl); +vuint16m8_t test_vslide1down_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vslide1down_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl); +vuint32mf2_t test_vslide1down_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vslide1down_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl); +vuint32m1_t test_vslide1down_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vslide1down_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl); +vuint32m2_t test_vslide1down_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vslide1down_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl); +vuint32m4_t test_vslide1down_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vslide1down_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl); +vuint32m8_t test_vslide1down_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vslide1down_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl); +vuint64m1_t test_vslide1down_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vslide1down_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl); +vuint64m2_t test_vslide1down_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vslide1down_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl); +vuint64m4_t test_vslide1down_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vslide1down_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down_tum(mask, maskedoff, src, value, vl); +vuint64m8_t test_vslide1down_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vslide1down_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl); +vint8mf8_t test_vslide1down_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vslide1down_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl); +vint8mf4_t test_vslide1down_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vslide1down_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl); +vint8mf2_t test_vslide1down_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vslide1down_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl); +vint8m1_t test_vslide1down_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vslide1down_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl); +vint8m2_t test_vslide1down_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vslide1down_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl); +vint8m4_t test_vslide1down_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vslide1down_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl); +vint8m8_t test_vslide1down_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vslide1down_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl); +vint16mf4_t test_vslide1down_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vslide1down_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl); +vint16mf2_t test_vslide1down_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vslide1down_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl); +vint16m1_t test_vslide1down_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vslide1down_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl); +vint16m2_t test_vslide1down_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vslide1down_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl); +vint16m4_t test_vslide1down_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vslide1down_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl); +vint16m8_t test_vslide1down_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vslide1down_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl); +vint32mf2_t test_vslide1down_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vslide1down_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl); +vint32m1_t test_vslide1down_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vslide1down_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl); +vint32m2_t test_vslide1down_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vslide1down_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl); +vint32m4_t test_vslide1down_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vslide1down_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl); +vint32m8_t test_vslide1down_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vslide1down_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl) { - return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl); +vint64m1_t test_vslide1down_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vslide1down_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl) { - return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl); +vint64m2_t test_vslide1down_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vslide1down_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl) { - return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl); +vint64m4_t test_vslide1down_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vslide1down_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl) { - return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl); +vint64m8_t test_vslide1down_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vslide1down_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl); +vuint8mf8_t test_vslide1down_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vslide1down_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl); +vuint8mf4_t test_vslide1down_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vslide1down_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl); +vuint8mf2_t test_vslide1down_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vslide1down_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl); +vuint8m1_t test_vslide1down_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vslide1down_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl); +vuint8m2_t test_vslide1down_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vslide1down_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl); +vuint8m4_t test_vslide1down_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vslide1down_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl); +vuint8m8_t test_vslide1down_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vslide1down_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl); +vuint16mf4_t test_vslide1down_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vslide1down_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl); +vuint16mf2_t test_vslide1down_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vslide1down_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl); +vuint16m1_t test_vslide1down_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vslide1down_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl); +vuint16m2_t test_vslide1down_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vslide1down_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl); +vuint16m4_t test_vslide1down_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vslide1down_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl); +vuint16m8_t test_vslide1down_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vslide1down_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl); +vuint32mf2_t test_vslide1down_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vslide1down_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl); +vuint32m1_t test_vslide1down_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vslide1down_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl); +vuint32m2_t test_vslide1down_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vslide1down_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl); +vuint32m4_t test_vslide1down_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vslide1down_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl); +vuint32m8_t test_vslide1down_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vslide1down_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl); +vuint64m1_t test_vslide1down_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vslide1down_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl); +vuint64m2_t test_vslide1down_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vslide1down_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl); +vuint64m4_t test_vslide1down_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vslide1down_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down_tumu(mask, maskedoff, src, value, vl); +vuint64m8_t test_vslide1down_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vslide1down_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl); +vint8mf8_t test_vslide1down_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vslide1down_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl); +vint8mf4_t test_vslide1down_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vslide1down_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl); +vint8mf2_t test_vslide1down_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vslide1down_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl); +vint8m1_t test_vslide1down_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vslide1down_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl); +vint8m2_t test_vslide1down_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vslide1down_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl); +vint8m4_t test_vslide1down_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vslide1down_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl); +vint8m8_t test_vslide1down_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1down_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vslide1down_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl); +vint16mf4_t test_vslide1down_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vslide1down_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl); +vint16mf2_t test_vslide1down_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vslide1down_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl); +vint16m1_t test_vslide1down_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vslide1down_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl); +vint16m2_t test_vslide1down_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vslide1down_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl); +vint16m4_t test_vslide1down_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vslide1down_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl); +vint16m8_t test_vslide1down_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1down_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vslide1down_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl); +vint32mf2_t test_vslide1down_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vslide1down_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl); +vint32m1_t test_vslide1down_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vslide1down_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl); +vint32m2_t test_vslide1down_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vslide1down_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl); +vint32m4_t test_vslide1down_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vslide1down_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl); +vint32m8_t test_vslide1down_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1down_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vslide1down_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl) { - return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl); +vint64m1_t test_vslide1down_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vslide1down_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl) { - return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl); +vint64m2_t test_vslide1down_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vslide1down_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl) { - return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl); +vint64m4_t test_vslide1down_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vslide1down_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl) { - return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl); +vint64m8_t test_vslide1down_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1down_mu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vslide1down_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl); +vuint8mf8_t test_vslide1down_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vslide1down_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl); +vuint8mf4_t test_vslide1down_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vslide1down_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl); +vuint8mf2_t test_vslide1down_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vslide1down_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl); +vuint8m1_t test_vslide1down_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vslide1down_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl); +vuint8m2_t test_vslide1down_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vslide1down_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl); +vuint8m4_t test_vslide1down_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vslide1down_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl); +vuint8m8_t test_vslide1down_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1down_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vslide1down_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl); +vuint16mf4_t test_vslide1down_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vslide1down_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl); +vuint16mf2_t test_vslide1down_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vslide1down_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl); +vuint16m1_t test_vslide1down_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vslide1down_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl); +vuint16m2_t test_vslide1down_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vslide1down_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl); +vuint16m4_t test_vslide1down_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vslide1down_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl); +vuint16m8_t test_vslide1down_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1down_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vslide1down_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl); +vuint32mf2_t test_vslide1down_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vslide1down_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl); +vuint32m1_t test_vslide1down_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vslide1down_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl); +vuint32m2_t test_vslide1down_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vslide1down_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl); +vuint32m4_t test_vslide1down_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vslide1down_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl); +vuint32m8_t test_vslide1down_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1down_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vslide1down_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl); +vuint64m1_t test_vslide1down_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vslide1down_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl); +vuint64m2_t test_vslide1down_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vslide1down_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl); +vuint64m4_t test_vslide1down_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vslide1down_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down_mu(mask, maskedoff, src, value, vl); +vuint64m8_t test_vslide1down_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1down_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vslide1down\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vslide1up.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vslide1up.c index 6c30dbdb2..6e9688757 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vslide1up.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vslide1up.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vslide1up_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_tu(maskedoff, src, value, vl); +vint8mf8_t test_vslide1up_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vslide1up_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_tu(maskedoff, src, value, vl); +vint8mf4_t test_vslide1up_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vslide1up_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_tu(maskedoff, src, value, vl); +vint8mf2_t test_vslide1up_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vslide1up_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_tu(maskedoff, src, value, vl); +vint8m1_t test_vslide1up_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vslide1up_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_tu(maskedoff, src, value, vl); +vint8m2_t test_vslide1up_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vslide1up_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_tu(maskedoff, src, value, vl); +vint8m4_t test_vslide1up_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vslide1up_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_tu(maskedoff, src, value, vl); +vint8m8_t test_vslide1up_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vslide1up_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_tu(maskedoff, src, value, vl); +vint16mf4_t test_vslide1up_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vslide1up_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_tu(maskedoff, src, value, vl); +vint16mf2_t test_vslide1up_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vslide1up_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_tu(maskedoff, src, value, vl); +vint16m1_t test_vslide1up_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vslide1up_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_tu(maskedoff, src, value, vl); +vint16m2_t test_vslide1up_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vslide1up_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_tu(maskedoff, src, value, vl); +vint16m4_t test_vslide1up_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vslide1up_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_tu(maskedoff, src, value, vl); +vint16m8_t test_vslide1up_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vslide1up_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_tu(maskedoff, src, value, vl); +vint32mf2_t test_vslide1up_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vslide1up_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_tu(maskedoff, src, value, vl); +vint32m1_t test_vslide1up_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vslide1up_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_tu(maskedoff, src, value, vl); +vint32m2_t test_vslide1up_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vslide1up_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_tu(maskedoff, src, value, vl); +vint32m4_t test_vslide1up_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vslide1up_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_tu(maskedoff, src, value, vl); +vint32m8_t test_vslide1up_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vslide1up_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl) { - return __riscv_vslide1up_tu(maskedoff, src, value, vl); +vint64m1_t test_vslide1up_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vslide1up_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl) { - return __riscv_vslide1up_tu(maskedoff, src, value, vl); +vint64m2_t test_vslide1up_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vslide1up_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl) { - return __riscv_vslide1up_tu(maskedoff, src, value, vl); +vint64m4_t test_vslide1up_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vslide1up_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl) { - return __riscv_vslide1up_tu(maskedoff, src, value, vl); +vint64m8_t test_vslide1up_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vslide1up_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_tu(maskedoff, src, value, vl); +vuint8mf8_t test_vslide1up_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vslide1up_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_tu(maskedoff, src, value, vl); +vuint8mf4_t test_vslide1up_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vslide1up_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_tu(maskedoff, src, value, vl); +vuint8mf2_t test_vslide1up_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vslide1up_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_tu(maskedoff, src, value, vl); +vuint8m1_t test_vslide1up_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vslide1up_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_tu(maskedoff, src, value, vl); +vuint8m2_t test_vslide1up_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vslide1up_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_tu(maskedoff, src, value, vl); +vuint8m4_t test_vslide1up_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vslide1up_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_tu(maskedoff, src, value, vl); +vuint8m8_t test_vslide1up_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vslide1up_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_tu(maskedoff, src, value, vl); +vuint16mf4_t test_vslide1up_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vslide1up_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_tu(maskedoff, src, value, vl); +vuint16mf2_t test_vslide1up_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vslide1up_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_tu(maskedoff, src, value, vl); +vuint16m1_t test_vslide1up_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vslide1up_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_tu(maskedoff, src, value, vl); +vuint16m2_t test_vslide1up_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vslide1up_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_tu(maskedoff, src, value, vl); +vuint16m4_t test_vslide1up_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vslide1up_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_tu(maskedoff, src, value, vl); +vuint16m8_t test_vslide1up_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vslide1up_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_tu(maskedoff, src, value, vl); +vuint32mf2_t test_vslide1up_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vslide1up_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_tu(maskedoff, src, value, vl); +vuint32m1_t test_vslide1up_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vslide1up_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_tu(maskedoff, src, value, vl); +vuint32m2_t test_vslide1up_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vslide1up_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_tu(maskedoff, src, value, vl); +vuint32m4_t test_vslide1up_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vslide1up_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_tu(maskedoff, src, value, vl); +vuint32m8_t test_vslide1up_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vslide1up_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up_tu(maskedoff, src, value, vl); +vuint64m1_t test_vslide1up_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vslide1up_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up_tu(maskedoff, src, value, vl); +vuint64m2_t test_vslide1up_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vslide1up_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up_tu(maskedoff, src, value, vl); +vuint64m4_t test_vslide1up_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vslide1up_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up_tu(maskedoff, src, value, vl); +vuint64m8_t test_vslide1up_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vslide1up_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl); +vint8mf8_t test_vslide1up_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vslide1up_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl); +vint8mf4_t test_vslide1up_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vslide1up_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl); +vint8mf2_t test_vslide1up_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vslide1up_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl); +vint8m1_t test_vslide1up_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vslide1up_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl); +vint8m2_t test_vslide1up_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vslide1up_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl); +vint8m4_t test_vslide1up_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vslide1up_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl); +vint8m8_t test_vslide1up_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vslide1up_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl); +vint16mf4_t test_vslide1up_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vslide1up_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl); +vint16mf2_t test_vslide1up_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vslide1up_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl); +vint16m1_t test_vslide1up_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vslide1up_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl); +vint16m2_t test_vslide1up_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vslide1up_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl); +vint16m4_t test_vslide1up_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vslide1up_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl); +vint16m8_t test_vslide1up_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vslide1up_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl); +vint32mf2_t test_vslide1up_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vslide1up_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl); +vint32m1_t test_vslide1up_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vslide1up_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl); +vint32m2_t test_vslide1up_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vslide1up_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl); +vint32m4_t test_vslide1up_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vslide1up_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl); +vint32m8_t test_vslide1up_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vslide1up_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl) { - return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl); +vint64m1_t test_vslide1up_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vslide1up_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl) { - return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl); +vint64m2_t test_vslide1up_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vslide1up_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl) { - return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl); +vint64m4_t test_vslide1up_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vslide1up_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl) { - return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl); +vint64m8_t test_vslide1up_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vslide1up_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl); +vuint8mf8_t test_vslide1up_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vslide1up_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl); +vuint8mf4_t test_vslide1up_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vslide1up_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl); +vuint8mf2_t test_vslide1up_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vslide1up_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl); +vuint8m1_t test_vslide1up_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vslide1up_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl); +vuint8m2_t test_vslide1up_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vslide1up_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl); +vuint8m4_t test_vslide1up_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vslide1up_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl); +vuint8m8_t test_vslide1up_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vslide1up_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl); +vuint16mf4_t test_vslide1up_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vslide1up_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl); +vuint16mf2_t test_vslide1up_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vslide1up_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl); +vuint16m1_t test_vslide1up_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vslide1up_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl); +vuint16m2_t test_vslide1up_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vslide1up_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl); +vuint16m4_t test_vslide1up_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vslide1up_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl); +vuint16m8_t test_vslide1up_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vslide1up_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl); +vuint32mf2_t test_vslide1up_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vslide1up_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl); +vuint32m1_t test_vslide1up_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vslide1up_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl); +vuint32m2_t test_vslide1up_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vslide1up_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl); +vuint32m4_t test_vslide1up_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vslide1up_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl); +vuint32m8_t test_vslide1up_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vslide1up_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl); +vuint64m1_t test_vslide1up_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vslide1up_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl); +vuint64m2_t test_vslide1up_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vslide1up_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl); +vuint64m4_t test_vslide1up_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vslide1up_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up_tum(mask, maskedoff, src, value, vl); +vuint64m8_t test_vslide1up_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vslide1up_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl); +vint8mf8_t test_vslide1up_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vslide1up_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl); +vint8mf4_t test_vslide1up_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vslide1up_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl); +vint8mf2_t test_vslide1up_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vslide1up_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl); +vint8m1_t test_vslide1up_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vslide1up_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl); +vint8m2_t test_vslide1up_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vslide1up_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl); +vint8m4_t test_vslide1up_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vslide1up_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl); +vint8m8_t test_vslide1up_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vslide1up_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl); +vint16mf4_t test_vslide1up_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vslide1up_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl); +vint16mf2_t test_vslide1up_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vslide1up_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl); +vint16m1_t test_vslide1up_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vslide1up_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl); +vint16m2_t test_vslide1up_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vslide1up_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl); +vint16m4_t test_vslide1up_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vslide1up_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl); +vint16m8_t test_vslide1up_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vslide1up_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl); +vint32mf2_t test_vslide1up_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vslide1up_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl); +vint32m1_t test_vslide1up_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vslide1up_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl); +vint32m2_t test_vslide1up_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vslide1up_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl); +vint32m4_t test_vslide1up_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vslide1up_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl); +vint32m8_t test_vslide1up_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vslide1up_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl) { - return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl); +vint64m1_t test_vslide1up_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vslide1up_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl) { - return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl); +vint64m2_t test_vslide1up_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vslide1up_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl) { - return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl); +vint64m4_t test_vslide1up_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vslide1up_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl) { - return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl); +vint64m8_t test_vslide1up_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vslide1up_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl); +vuint8mf8_t test_vslide1up_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vslide1up_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl); +vuint8mf4_t test_vslide1up_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vslide1up_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl); +vuint8mf2_t test_vslide1up_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vslide1up_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl); +vuint8m1_t test_vslide1up_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vslide1up_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl); +vuint8m2_t test_vslide1up_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vslide1up_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl); +vuint8m4_t test_vslide1up_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vslide1up_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl); +vuint8m8_t test_vslide1up_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vslide1up_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl); +vuint16mf4_t test_vslide1up_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vslide1up_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl); +vuint16mf2_t test_vslide1up_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vslide1up_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl); +vuint16m1_t test_vslide1up_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vslide1up_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl); +vuint16m2_t test_vslide1up_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vslide1up_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl); +vuint16m4_t test_vslide1up_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vslide1up_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl); +vuint16m8_t test_vslide1up_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vslide1up_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl); +vuint32mf2_t test_vslide1up_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vslide1up_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl); +vuint32m1_t test_vslide1up_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vslide1up_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl); +vuint32m2_t test_vslide1up_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vslide1up_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl); +vuint32m4_t test_vslide1up_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vslide1up_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl); +vuint32m8_t test_vslide1up_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vslide1up_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl); +vuint64m1_t test_vslide1up_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vslide1up_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl); +vuint64m2_t test_vslide1up_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vslide1up_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl); +vuint64m4_t test_vslide1up_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vslide1up_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up_tumu(mask, maskedoff, src, value, vl); +vuint64m8_t test_vslide1up_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vslide1up_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl); +vint8mf8_t test_vslide1up_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vslide1up_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl); +vint8mf4_t test_vslide1up_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vslide1up_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl); +vint8mf2_t test_vslide1up_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vslide1up_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl); +vint8m1_t test_vslide1up_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vslide1up_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl); +vint8m2_t test_vslide1up_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vslide1up_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl); +vint8m4_t test_vslide1up_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vslide1up_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl); +vint8m8_t test_vslide1up_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vslide1up_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vslide1up_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl); +vint16mf4_t test_vslide1up_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vslide1up_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl); +vint16mf2_t test_vslide1up_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vslide1up_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl); +vint16m1_t test_vslide1up_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vslide1up_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl); +vint16m2_t test_vslide1up_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vslide1up_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl); +vint16m4_t test_vslide1up_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vslide1up_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl); +vint16m8_t test_vslide1up_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vslide1up_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vslide1up_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl); +vint32mf2_t test_vslide1up_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vslide1up_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl); +vint32m1_t test_vslide1up_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vslide1up_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl); +vint32m2_t test_vslide1up_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vslide1up_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl); +vint32m4_t test_vslide1up_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vslide1up_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl); +vint32m8_t test_vslide1up_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vslide1up_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vslide1up_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl) { - return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl); +vint64m1_t test_vslide1up_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vslide1up_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl) { - return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl); +vint64m2_t test_vslide1up_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vslide1up_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl) { - return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl); +vint64m4_t test_vslide1up_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vslide1up_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl) { - return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl); +vint64m8_t test_vslide1up_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vslide1up_mu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vslide1up_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl); +vuint8mf8_t test_vslide1up_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vslide1up_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl); +vuint8mf4_t test_vslide1up_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vslide1up_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl); +vuint8mf2_t test_vslide1up_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vslide1up_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl); +vuint8m1_t test_vslide1up_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vslide1up_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl); +vuint8m2_t test_vslide1up_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vslide1up_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl); +vuint8m4_t test_vslide1up_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vslide1up_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl); +vuint8m8_t test_vslide1up_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vslide1up_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vslide1up_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl); +vuint16mf4_t test_vslide1up_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vslide1up_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl); +vuint16mf2_t test_vslide1up_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vslide1up_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl); +vuint16m1_t test_vslide1up_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vslide1up_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl); +vuint16m2_t test_vslide1up_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vslide1up_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl); +vuint16m4_t test_vslide1up_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vslide1up_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl); +vuint16m8_t test_vslide1up_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vslide1up_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vslide1up_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl); +vuint32mf2_t test_vslide1up_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vslide1up_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl); +vuint32m1_t test_vslide1up_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vslide1up_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl); +vuint32m2_t test_vslide1up_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vslide1up_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl); +vuint32m4_t test_vslide1up_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vslide1up_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl); +vuint32m8_t test_vslide1up_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vslide1up_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vslide1up_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl); +vuint64m1_t test_vslide1up_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vslide1up_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl); +vuint64m2_t test_vslide1up_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vslide1up_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl); +vuint64m4_t test_vslide1up_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vslide1up_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up_mu(mask, maskedoff, src, value, vl); +vuint64m8_t test_vslide1up_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vslide1up_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vslide1up\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vslidedown.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vslidedown.c index 233ccba28..fe477d117 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vslidedown.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vslidedown.c @@ -6,948 +6,948 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vslidedown_vx_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vfloat16mf4_t test_vslidedown_vx_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vfloat16mf2_t test_vslidedown_vx_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vfloat16mf2_t test_vslidedown_vx_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vfloat16m1_t test_vslidedown_vx_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vfloat16m1_t test_vslidedown_vx_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vfloat16m2_t test_vslidedown_vx_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vfloat16m2_t test_vslidedown_vx_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vfloat16m4_t test_vslidedown_vx_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vfloat16m4_t test_vslidedown_vx_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vfloat16m8_t test_vslidedown_vx_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vfloat16m8_t test_vslidedown_vx_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vslidedown_vx_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vfloat32mf2_t test_vslidedown_vx_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vslidedown_vx_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vfloat32m1_t test_vslidedown_vx_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vslidedown_vx_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vfloat32m2_t test_vslidedown_vx_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vslidedown_vx_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vfloat32m4_t test_vslidedown_vx_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vslidedown_vx_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vfloat32m8_t test_vslidedown_vx_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vslidedown_vx_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vfloat64m1_t test_vslidedown_vx_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vslidedown_vx_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vfloat64m2_t test_vslidedown_vx_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vslidedown_vx_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vfloat64m4_t test_vslidedown_vx_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vslidedown_vx_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vfloat64m8_t test_vslidedown_vx_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vslidedown_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vint8mf8_t test_vslidedown_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vslidedown_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vint8mf4_t test_vslidedown_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vslidedown_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vint8mf2_t test_vslidedown_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vslidedown_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vint8m1_t test_vslidedown_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vslidedown_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vint8m2_t test_vslidedown_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vslidedown_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vint8m4_t test_vslidedown_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vslidedown_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vint8m8_t test_vslidedown_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vslidedown_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vint16mf4_t test_vslidedown_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vslidedown_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vint16mf2_t test_vslidedown_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vslidedown_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vint16m1_t test_vslidedown_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vslidedown_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vint16m2_t test_vslidedown_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vslidedown_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vint16m4_t test_vslidedown_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vslidedown_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vint16m8_t test_vslidedown_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vslidedown_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vint32mf2_t test_vslidedown_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vslidedown_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vint32m1_t test_vslidedown_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vslidedown_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vint32m2_t test_vslidedown_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vslidedown_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vint32m4_t test_vslidedown_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vslidedown_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vint32m8_t test_vslidedown_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vslidedown_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vint64m1_t test_vslidedown_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vslidedown_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vint64m2_t test_vslidedown_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vslidedown_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vint64m4_t test_vslidedown_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vslidedown_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vint64m8_t test_vslidedown_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vslidedown_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vuint8mf8_t test_vslidedown_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vslidedown_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vuint8mf4_t test_vslidedown_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vslidedown_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vuint8mf2_t test_vslidedown_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vslidedown_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vuint8m1_t test_vslidedown_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vslidedown_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vuint8m2_t test_vslidedown_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vslidedown_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vuint8m4_t test_vslidedown_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vslidedown_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vuint8m8_t test_vslidedown_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vslidedown_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vuint16mf4_t test_vslidedown_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vslidedown_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vuint16mf2_t test_vslidedown_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vslidedown_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vuint16m1_t test_vslidedown_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vslidedown_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vuint16m2_t test_vslidedown_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vslidedown_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vuint16m4_t test_vslidedown_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vslidedown_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vuint16m8_t test_vslidedown_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vslidedown_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vuint32mf2_t test_vslidedown_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vslidedown_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vuint32m1_t test_vslidedown_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vslidedown_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vuint32m2_t test_vslidedown_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vslidedown_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vuint32m4_t test_vslidedown_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vslidedown_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vuint32m8_t test_vslidedown_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vslidedown_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vuint64m1_t test_vslidedown_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vslidedown_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vuint64m2_t test_vslidedown_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vslidedown_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vuint64m4_t test_vslidedown_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vslidedown_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tu(maskedoff, src, offset, vl); +vuint64m8_t test_vslidedown_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tu(vd, vs2, rs1, vl); } -vfloat16mf4_t test_vslidedown_vx_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vfloat16mf4_t test_vslidedown_vx_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vslidedown_vx_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vfloat16mf2_t test_vslidedown_vx_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vslidedown_vx_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vfloat16m1_t test_vslidedown_vx_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vslidedown_vx_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vfloat16m2_t test_vslidedown_vx_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vslidedown_vx_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vfloat16m4_t test_vslidedown_vx_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vslidedown_vx_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vfloat16m8_t test_vslidedown_vx_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vslidedown_vx_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vfloat32mf2_t test_vslidedown_vx_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vslidedown_vx_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vfloat32m1_t test_vslidedown_vx_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vslidedown_vx_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vfloat32m2_t test_vslidedown_vx_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vslidedown_vx_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vfloat32m4_t test_vslidedown_vx_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vslidedown_vx_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vfloat32m8_t test_vslidedown_vx_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vslidedown_vx_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vfloat64m1_t test_vslidedown_vx_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vslidedown_vx_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vfloat64m2_t test_vslidedown_vx_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vslidedown_vx_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vfloat64m4_t test_vslidedown_vx_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vslidedown_vx_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vfloat64m8_t test_vslidedown_vx_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vslidedown_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vint8mf8_t test_vslidedown_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vslidedown_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vint8mf4_t test_vslidedown_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vslidedown_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vint8mf2_t test_vslidedown_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vslidedown_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vint8m1_t test_vslidedown_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vslidedown_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vint8m2_t test_vslidedown_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vslidedown_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vint8m4_t test_vslidedown_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vslidedown_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vint8m8_t test_vslidedown_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vslidedown_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vint16mf4_t test_vslidedown_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vslidedown_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vint16mf2_t test_vslidedown_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vslidedown_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vint16m1_t test_vslidedown_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vslidedown_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vint16m2_t test_vslidedown_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vslidedown_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vint16m4_t test_vslidedown_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vslidedown_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vint16m8_t test_vslidedown_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vslidedown_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vint32mf2_t test_vslidedown_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vslidedown_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vint32m1_t test_vslidedown_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vslidedown_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vint32m2_t test_vslidedown_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vslidedown_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vint32m4_t test_vslidedown_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vslidedown_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vint32m8_t test_vslidedown_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vslidedown_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vint64m1_t test_vslidedown_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vslidedown_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vint64m2_t test_vslidedown_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vslidedown_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vint64m4_t test_vslidedown_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vslidedown_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vint64m8_t test_vslidedown_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vslidedown_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vuint8mf8_t test_vslidedown_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vslidedown_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vuint8mf4_t test_vslidedown_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vslidedown_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vuint8mf2_t test_vslidedown_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vslidedown_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vuint8m1_t test_vslidedown_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vslidedown_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vuint8m2_t test_vslidedown_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vslidedown_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vuint8m4_t test_vslidedown_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vslidedown_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vuint8m8_t test_vslidedown_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vslidedown_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vuint16mf4_t test_vslidedown_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vslidedown_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vuint16mf2_t test_vslidedown_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vslidedown_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vuint16m1_t test_vslidedown_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vslidedown_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vuint16m2_t test_vslidedown_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vslidedown_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vuint16m4_t test_vslidedown_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vslidedown_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vuint16m8_t test_vslidedown_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vslidedown_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vuint32mf2_t test_vslidedown_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vslidedown_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vuint32m1_t test_vslidedown_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vslidedown_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vuint32m2_t test_vslidedown_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vslidedown_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vuint32m4_t test_vslidedown_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vslidedown_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vuint32m8_t test_vslidedown_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vslidedown_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vuint64m1_t test_vslidedown_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vslidedown_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vuint64m2_t test_vslidedown_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vslidedown_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vuint64m4_t test_vslidedown_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vslidedown_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tum(mask, maskedoff, src, offset, vl); +vuint64m8_t test_vslidedown_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vslidedown_vx_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vfloat16mf4_t test_vslidedown_vx_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vslidedown_vx_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vfloat16mf2_t test_vslidedown_vx_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vslidedown_vx_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vfloat16m1_t test_vslidedown_vx_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vslidedown_vx_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vfloat16m2_t test_vslidedown_vx_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vslidedown_vx_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vfloat16m4_t test_vslidedown_vx_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vslidedown_vx_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vfloat16m8_t test_vslidedown_vx_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vslidedown_vx_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vfloat32mf2_t test_vslidedown_vx_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vslidedown_vx_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vfloat32m1_t test_vslidedown_vx_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vslidedown_vx_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vfloat32m2_t test_vslidedown_vx_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vslidedown_vx_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vfloat32m4_t test_vslidedown_vx_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vslidedown_vx_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vfloat32m8_t test_vslidedown_vx_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vslidedown_vx_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vfloat64m1_t test_vslidedown_vx_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vslidedown_vx_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vfloat64m2_t test_vslidedown_vx_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vslidedown_vx_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vfloat64m4_t test_vslidedown_vx_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vslidedown_vx_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vfloat64m8_t test_vslidedown_vx_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vslidedown_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vint8mf8_t test_vslidedown_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vslidedown_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vint8mf4_t test_vslidedown_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vslidedown_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vint8mf2_t test_vslidedown_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vslidedown_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vint8m1_t test_vslidedown_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vslidedown_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vint8m2_t test_vslidedown_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vslidedown_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vint8m4_t test_vslidedown_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vslidedown_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vint8m8_t test_vslidedown_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vslidedown_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vint16mf4_t test_vslidedown_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vslidedown_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vint16mf2_t test_vslidedown_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vslidedown_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vint16m1_t test_vslidedown_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vslidedown_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vint16m2_t test_vslidedown_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vslidedown_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vint16m4_t test_vslidedown_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vslidedown_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vint16m8_t test_vslidedown_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vslidedown_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vint32mf2_t test_vslidedown_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vslidedown_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vint32m1_t test_vslidedown_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vslidedown_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vint32m2_t test_vslidedown_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vslidedown_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vint32m4_t test_vslidedown_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vslidedown_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vint32m8_t test_vslidedown_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vslidedown_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vint64m1_t test_vslidedown_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vslidedown_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vint64m2_t test_vslidedown_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vslidedown_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vint64m4_t test_vslidedown_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vslidedown_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vint64m8_t test_vslidedown_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vslidedown_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vuint8mf8_t test_vslidedown_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vslidedown_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vuint8mf4_t test_vslidedown_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vslidedown_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vuint8mf2_t test_vslidedown_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vslidedown_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vuint8m1_t test_vslidedown_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vslidedown_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vuint8m2_t test_vslidedown_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vslidedown_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vuint8m4_t test_vslidedown_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vslidedown_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vuint8m8_t test_vslidedown_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vslidedown_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vuint16mf4_t test_vslidedown_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vslidedown_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vuint16mf2_t test_vslidedown_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vslidedown_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vuint16m1_t test_vslidedown_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vslidedown_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vuint16m2_t test_vslidedown_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vslidedown_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vuint16m4_t test_vslidedown_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vslidedown_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vuint16m8_t test_vslidedown_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vslidedown_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vuint32mf2_t test_vslidedown_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vslidedown_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vuint32m1_t test_vslidedown_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vslidedown_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vuint32m2_t test_vslidedown_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vslidedown_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vuint32m4_t test_vslidedown_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vslidedown_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vuint32m8_t test_vslidedown_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vslidedown_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vuint64m1_t test_vslidedown_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vslidedown_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vuint64m2_t test_vslidedown_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vslidedown_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vuint64m4_t test_vslidedown_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vslidedown_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_tumu(mask, maskedoff, src, offset, vl); +vuint64m8_t test_vslidedown_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vslidedown_vx_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vfloat16mf4_t test_vslidedown_vx_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vslidedown_vx_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vfloat16mf2_t test_vslidedown_vx_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vslidedown_vx_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vfloat16m1_t test_vslidedown_vx_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vslidedown_vx_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vfloat16m2_t test_vslidedown_vx_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vslidedown_vx_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vfloat16m4_t test_vslidedown_vx_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vslidedown_vx_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vfloat16m8_t test_vslidedown_vx_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vslidedown_vx_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vfloat32mf2_t test_vslidedown_vx_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vslidedown_vx_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vfloat32m1_t test_vslidedown_vx_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vslidedown_vx_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vfloat32m2_t test_vslidedown_vx_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vslidedown_vx_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vfloat32m4_t test_vslidedown_vx_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vslidedown_vx_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vfloat32m8_t test_vslidedown_vx_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vslidedown_vx_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vfloat64m1_t test_vslidedown_vx_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vslidedown_vx_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vfloat64m2_t test_vslidedown_vx_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vslidedown_vx_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vfloat64m4_t test_vslidedown_vx_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vslidedown_vx_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vfloat64m8_t test_vslidedown_vx_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vslidedown_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vint8mf8_t test_vslidedown_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vslidedown_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vint8mf4_t test_vslidedown_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vslidedown_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vint8mf2_t test_vslidedown_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vslidedown_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vint8m1_t test_vslidedown_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vslidedown_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vint8m2_t test_vslidedown_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vslidedown_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vint8m4_t test_vslidedown_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vslidedown_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vint8m8_t test_vslidedown_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vslidedown_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vint16mf4_t test_vslidedown_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vslidedown_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vint16mf2_t test_vslidedown_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vslidedown_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vint16m1_t test_vslidedown_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vslidedown_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vint16m2_t test_vslidedown_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vslidedown_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vint16m4_t test_vslidedown_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vslidedown_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vint16m8_t test_vslidedown_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vslidedown_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vint32mf2_t test_vslidedown_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vslidedown_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vint32m1_t test_vslidedown_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vslidedown_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vint32m2_t test_vslidedown_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vslidedown_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vint32m4_t test_vslidedown_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vslidedown_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vint32m8_t test_vslidedown_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vslidedown_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vint64m1_t test_vslidedown_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vslidedown_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vint64m2_t test_vslidedown_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vslidedown_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vint64m4_t test_vslidedown_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vslidedown_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vint64m8_t test_vslidedown_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vslidedown_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vuint8mf8_t test_vslidedown_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vslidedown_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vuint8mf4_t test_vslidedown_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vslidedown_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vuint8mf2_t test_vslidedown_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vslidedown_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vuint8m1_t test_vslidedown_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vslidedown_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vuint8m2_t test_vslidedown_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vslidedown_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vuint8m4_t test_vslidedown_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vslidedown_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vuint8m8_t test_vslidedown_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vslidedown_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vuint16mf4_t test_vslidedown_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vslidedown_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vuint16mf2_t test_vslidedown_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vslidedown_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vuint16m1_t test_vslidedown_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vslidedown_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vuint16m2_t test_vslidedown_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vslidedown_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vuint16m4_t test_vslidedown_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vslidedown_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vuint16m8_t test_vslidedown_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vslidedown_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vuint32mf2_t test_vslidedown_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vslidedown_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vuint32m1_t test_vslidedown_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vslidedown_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vuint32m2_t test_vslidedown_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vslidedown_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vuint32m4_t test_vslidedown_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vslidedown_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vuint32m8_t test_vslidedown_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vslidedown_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vuint64m1_t test_vslidedown_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vslidedown_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vuint64m2_t test_vslidedown_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vslidedown_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vuint64m4_t test_vslidedown_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vslidedown_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_mu(mask, maskedoff, src, offset, vl); +vuint64m8_t test_vslidedown_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslidedown_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vslidedown\.[ivxfswum.]+\s+} 236 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vslideup.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vslideup.c index 66e442841..89bff1858 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vslideup.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vslideup.c @@ -6,948 +6,948 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vfloat16mf4_t test_vslideup_vx_f16mf4_tu(vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vfloat16mf4_t test_vslideup_vx_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vfloat16mf2_t test_vslideup_vx_f16mf2_tu(vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vfloat16mf2_t test_vslideup_vx_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vfloat16m1_t test_vslideup_vx_f16m1_tu(vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vfloat16m1_t test_vslideup_vx_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vfloat16m2_t test_vslideup_vx_f16m2_tu(vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vfloat16m2_t test_vslideup_vx_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vfloat16m4_t test_vslideup_vx_f16m4_tu(vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vfloat16m4_t test_vslideup_vx_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vfloat16m8_t test_vslideup_vx_f16m8_tu(vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vfloat16m8_t test_vslideup_vx_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vslideup_vx_f32mf2_tu(vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vfloat32mf2_t test_vslideup_vx_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vslideup_vx_f32m1_tu(vfloat32m1_t dest, vfloat32m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vfloat32m1_t test_vslideup_vx_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vslideup_vx_f32m2_tu(vfloat32m2_t dest, vfloat32m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vfloat32m2_t test_vslideup_vx_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vslideup_vx_f32m4_tu(vfloat32m4_t dest, vfloat32m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vfloat32m4_t test_vslideup_vx_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vslideup_vx_f32m8_tu(vfloat32m8_t dest, vfloat32m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vfloat32m8_t test_vslideup_vx_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vslideup_vx_f64m1_tu(vfloat64m1_t dest, vfloat64m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vfloat64m1_t test_vslideup_vx_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vslideup_vx_f64m2_tu(vfloat64m2_t dest, vfloat64m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vfloat64m2_t test_vslideup_vx_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vslideup_vx_f64m4_tu(vfloat64m4_t dest, vfloat64m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vfloat64m4_t test_vslideup_vx_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vslideup_vx_f64m8_tu(vfloat64m8_t dest, vfloat64m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vfloat64m8_t test_vslideup_vx_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vslideup_vx_i8mf8_tu(vint8mf8_t dest, vint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vint8mf8_t test_vslideup_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vslideup_vx_i8mf4_tu(vint8mf4_t dest, vint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vint8mf4_t test_vslideup_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vslideup_vx_i8mf2_tu(vint8mf2_t dest, vint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vint8mf2_t test_vslideup_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vslideup_vx_i8m1_tu(vint8m1_t dest, vint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vint8m1_t test_vslideup_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vslideup_vx_i8m2_tu(vint8m2_t dest, vint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vint8m2_t test_vslideup_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vslideup_vx_i8m4_tu(vint8m4_t dest, vint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vint8m4_t test_vslideup_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vslideup_vx_i8m8_tu(vint8m8_t dest, vint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vint8m8_t test_vslideup_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vslideup_vx_i16mf4_tu(vint16mf4_t dest, vint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vint16mf4_t test_vslideup_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vslideup_vx_i16mf2_tu(vint16mf2_t dest, vint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vint16mf2_t test_vslideup_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vslideup_vx_i16m1_tu(vint16m1_t dest, vint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vint16m1_t test_vslideup_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vslideup_vx_i16m2_tu(vint16m2_t dest, vint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vint16m2_t test_vslideup_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vslideup_vx_i16m4_tu(vint16m4_t dest, vint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vint16m4_t test_vslideup_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vslideup_vx_i16m8_tu(vint16m8_t dest, vint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vint16m8_t test_vslideup_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vslideup_vx_i32mf2_tu(vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vint32mf2_t test_vslideup_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vslideup_vx_i32m1_tu(vint32m1_t dest, vint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vint32m1_t test_vslideup_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vslideup_vx_i32m2_tu(vint32m2_t dest, vint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vint32m2_t test_vslideup_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vslideup_vx_i32m4_tu(vint32m4_t dest, vint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vint32m4_t test_vslideup_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vslideup_vx_i32m8_tu(vint32m8_t dest, vint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vint32m8_t test_vslideup_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vslideup_vx_i64m1_tu(vint64m1_t dest, vint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vint64m1_t test_vslideup_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vslideup_vx_i64m2_tu(vint64m2_t dest, vint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vint64m2_t test_vslideup_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vslideup_vx_i64m4_tu(vint64m4_t dest, vint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vint64m4_t test_vslideup_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vslideup_vx_i64m8_tu(vint64m8_t dest, vint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vint64m8_t test_vslideup_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vslideup_vx_u8mf8_tu(vuint8mf8_t dest, vuint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vuint8mf8_t test_vslideup_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vslideup_vx_u8mf4_tu(vuint8mf4_t dest, vuint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vuint8mf4_t test_vslideup_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vslideup_vx_u8mf2_tu(vuint8mf2_t dest, vuint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vuint8mf2_t test_vslideup_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vslideup_vx_u8m1_tu(vuint8m1_t dest, vuint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vuint8m1_t test_vslideup_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vslideup_vx_u8m2_tu(vuint8m2_t dest, vuint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vuint8m2_t test_vslideup_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vslideup_vx_u8m4_tu(vuint8m4_t dest, vuint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vuint8m4_t test_vslideup_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vslideup_vx_u8m8_tu(vuint8m8_t dest, vuint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vuint8m8_t test_vslideup_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vslideup_vx_u16mf4_tu(vuint16mf4_t dest, vuint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vuint16mf4_t test_vslideup_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vslideup_vx_u16mf2_tu(vuint16mf2_t dest, vuint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vuint16mf2_t test_vslideup_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vslideup_vx_u16m1_tu(vuint16m1_t dest, vuint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vuint16m1_t test_vslideup_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vslideup_vx_u16m2_tu(vuint16m2_t dest, vuint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vuint16m2_t test_vslideup_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vslideup_vx_u16m4_tu(vuint16m4_t dest, vuint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vuint16m4_t test_vslideup_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vslideup_vx_u16m8_tu(vuint16m8_t dest, vuint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vuint16m8_t test_vslideup_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vslideup_vx_u32mf2_tu(vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vuint32mf2_t test_vslideup_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vslideup_vx_u32m1_tu(vuint32m1_t dest, vuint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vuint32m1_t test_vslideup_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vslideup_vx_u32m2_tu(vuint32m2_t dest, vuint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vuint32m2_t test_vslideup_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vslideup_vx_u32m4_tu(vuint32m4_t dest, vuint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vuint32m4_t test_vslideup_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vslideup_vx_u32m8_tu(vuint32m8_t dest, vuint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vuint32m8_t test_vslideup_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vslideup_vx_u64m1_tu(vuint64m1_t dest, vuint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vuint64m1_t test_vslideup_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vslideup_vx_u64m2_tu(vuint64m2_t dest, vuint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vuint64m2_t test_vslideup_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vslideup_vx_u64m4_tu(vuint64m4_t dest, vuint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vuint64m4_t test_vslideup_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vslideup_vx_u64m8_tu(vuint64m8_t dest, vuint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tu(dest, src, offset, vl); +vuint64m8_t test_vslideup_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tu(vd, vs2, rs1, vl); } -vfloat16mf4_t test_vslideup_vx_f16mf4_tum(vbool64_t mask, vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vfloat16mf4_t test_vslideup_vx_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vslideup_vx_f16mf2_tum(vbool32_t mask, vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vfloat16mf2_t test_vslideup_vx_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vslideup_vx_f16m1_tum(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vfloat16m1_t test_vslideup_vx_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vslideup_vx_f16m2_tum(vbool8_t mask, vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vfloat16m2_t test_vslideup_vx_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vslideup_vx_f16m4_tum(vbool4_t mask, vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vfloat16m4_t test_vslideup_vx_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vslideup_vx_f16m8_tum(vbool2_t mask, vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vfloat16m8_t test_vslideup_vx_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vslideup_vx_f32mf2_tum(vbool64_t mask, vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vfloat32mf2_t test_vslideup_vx_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vslideup_vx_f32m1_tum(vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vfloat32m1_t test_vslideup_vx_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vslideup_vx_f32m2_tum(vbool16_t mask, vfloat32m2_t dest, vfloat32m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vfloat32m2_t test_vslideup_vx_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vslideup_vx_f32m4_tum(vbool8_t mask, vfloat32m4_t dest, vfloat32m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vfloat32m4_t test_vslideup_vx_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vslideup_vx_f32m8_tum(vbool4_t mask, vfloat32m8_t dest, vfloat32m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vfloat32m8_t test_vslideup_vx_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vslideup_vx_f64m1_tum(vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vfloat64m1_t test_vslideup_vx_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vslideup_vx_f64m2_tum(vbool32_t mask, vfloat64m2_t dest, vfloat64m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vfloat64m2_t test_vslideup_vx_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vslideup_vx_f64m4_tum(vbool16_t mask, vfloat64m4_t dest, vfloat64m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vfloat64m4_t test_vslideup_vx_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vslideup_vx_f64m8_tum(vbool8_t mask, vfloat64m8_t dest, vfloat64m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vfloat64m8_t test_vslideup_vx_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vslideup_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t dest, vint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vint8mf8_t test_vslideup_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vslideup_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t dest, vint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vint8mf4_t test_vslideup_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vslideup_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t dest, vint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vint8mf2_t test_vslideup_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vslideup_vx_i8m1_tum(vbool8_t mask, vint8m1_t dest, vint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vint8m1_t test_vslideup_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vslideup_vx_i8m2_tum(vbool4_t mask, vint8m2_t dest, vint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vint8m2_t test_vslideup_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vslideup_vx_i8m4_tum(vbool2_t mask, vint8m4_t dest, vint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vint8m4_t test_vslideup_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vslideup_vx_i8m8_tum(vbool1_t mask, vint8m8_t dest, vint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vint8m8_t test_vslideup_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vslideup_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t dest, vint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vint16mf4_t test_vslideup_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vslideup_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t dest, vint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vint16mf2_t test_vslideup_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vslideup_vx_i16m1_tum(vbool16_t mask, vint16m1_t dest, vint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vint16m1_t test_vslideup_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vslideup_vx_i16m2_tum(vbool8_t mask, vint16m2_t dest, vint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vint16m2_t test_vslideup_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vslideup_vx_i16m4_tum(vbool4_t mask, vint16m4_t dest, vint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vint16m4_t test_vslideup_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vslideup_vx_i16m8_tum(vbool2_t mask, vint16m8_t dest, vint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vint16m8_t test_vslideup_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vslideup_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vint32mf2_t test_vslideup_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vslideup_vx_i32m1_tum(vbool32_t mask, vint32m1_t dest, vint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vint32m1_t test_vslideup_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vslideup_vx_i32m2_tum(vbool16_t mask, vint32m2_t dest, vint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vint32m2_t test_vslideup_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vslideup_vx_i32m4_tum(vbool8_t mask, vint32m4_t dest, vint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vint32m4_t test_vslideup_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vslideup_vx_i32m8_tum(vbool4_t mask, vint32m8_t dest, vint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vint32m8_t test_vslideup_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vslideup_vx_i64m1_tum(vbool64_t mask, vint64m1_t dest, vint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vint64m1_t test_vslideup_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vslideup_vx_i64m2_tum(vbool32_t mask, vint64m2_t dest, vint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vint64m2_t test_vslideup_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vslideup_vx_i64m4_tum(vbool16_t mask, vint64m4_t dest, vint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vint64m4_t test_vslideup_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vslideup_vx_i64m8_tum(vbool8_t mask, vint64m8_t dest, vint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vint64m8_t test_vslideup_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vslideup_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t dest, vuint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vuint8mf8_t test_vslideup_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vslideup_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t dest, vuint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vuint8mf4_t test_vslideup_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vslideup_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t dest, vuint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vuint8mf2_t test_vslideup_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vslideup_vx_u8m1_tum(vbool8_t mask, vuint8m1_t dest, vuint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vuint8m1_t test_vslideup_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vslideup_vx_u8m2_tum(vbool4_t mask, vuint8m2_t dest, vuint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vuint8m2_t test_vslideup_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vslideup_vx_u8m4_tum(vbool2_t mask, vuint8m4_t dest, vuint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vuint8m4_t test_vslideup_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vslideup_vx_u8m8_tum(vbool1_t mask, vuint8m8_t dest, vuint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vuint8m8_t test_vslideup_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vslideup_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t dest, vuint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vuint16mf4_t test_vslideup_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vslideup_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t dest, vuint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vuint16mf2_t test_vslideup_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vslideup_vx_u16m1_tum(vbool16_t mask, vuint16m1_t dest, vuint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vuint16m1_t test_vslideup_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vslideup_vx_u16m2_tum(vbool8_t mask, vuint16m2_t dest, vuint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vuint16m2_t test_vslideup_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vslideup_vx_u16m4_tum(vbool4_t mask, vuint16m4_t dest, vuint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vuint16m4_t test_vslideup_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vslideup_vx_u16m8_tum(vbool2_t mask, vuint16m8_t dest, vuint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vuint16m8_t test_vslideup_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vslideup_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vuint32mf2_t test_vslideup_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vslideup_vx_u32m1_tum(vbool32_t mask, vuint32m1_t dest, vuint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vuint32m1_t test_vslideup_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vslideup_vx_u32m2_tum(vbool16_t mask, vuint32m2_t dest, vuint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vuint32m2_t test_vslideup_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vslideup_vx_u32m4_tum(vbool8_t mask, vuint32m4_t dest, vuint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vuint32m4_t test_vslideup_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vslideup_vx_u32m8_tum(vbool4_t mask, vuint32m8_t dest, vuint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vuint32m8_t test_vslideup_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vslideup_vx_u64m1_tum(vbool64_t mask, vuint64m1_t dest, vuint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vuint64m1_t test_vslideup_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vslideup_vx_u64m2_tum(vbool32_t mask, vuint64m2_t dest, vuint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vuint64m2_t test_vslideup_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vslideup_vx_u64m4_tum(vbool16_t mask, vuint64m4_t dest, vuint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vuint64m4_t test_vslideup_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vslideup_vx_u64m8_tum(vbool8_t mask, vuint64m8_t dest, vuint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tum(mask, dest, src, offset, vl); +vuint64m8_t test_vslideup_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vslideup_vx_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vfloat16mf4_t test_vslideup_vx_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vslideup_vx_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vfloat16mf2_t test_vslideup_vx_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vslideup_vx_f16m1_tumu(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vfloat16m1_t test_vslideup_vx_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vslideup_vx_f16m2_tumu(vbool8_t mask, vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vfloat16m2_t test_vslideup_vx_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vslideup_vx_f16m4_tumu(vbool4_t mask, vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vfloat16m4_t test_vslideup_vx_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vslideup_vx_f16m8_tumu(vbool2_t mask, vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vfloat16m8_t test_vslideup_vx_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vslideup_vx_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vfloat32mf2_t test_vslideup_vx_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vslideup_vx_f32m1_tumu(vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vfloat32m1_t test_vslideup_vx_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vslideup_vx_f32m2_tumu(vbool16_t mask, vfloat32m2_t dest, vfloat32m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vfloat32m2_t test_vslideup_vx_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vslideup_vx_f32m4_tumu(vbool8_t mask, vfloat32m4_t dest, vfloat32m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vfloat32m4_t test_vslideup_vx_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vslideup_vx_f32m8_tumu(vbool4_t mask, vfloat32m8_t dest, vfloat32m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vfloat32m8_t test_vslideup_vx_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vslideup_vx_f64m1_tumu(vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vfloat64m1_t test_vslideup_vx_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vslideup_vx_f64m2_tumu(vbool32_t mask, vfloat64m2_t dest, vfloat64m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vfloat64m2_t test_vslideup_vx_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vslideup_vx_f64m4_tumu(vbool16_t mask, vfloat64m4_t dest, vfloat64m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vfloat64m4_t test_vslideup_vx_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vslideup_vx_f64m8_tumu(vbool8_t mask, vfloat64m8_t dest, vfloat64m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vfloat64m8_t test_vslideup_vx_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vslideup_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t dest, vint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vint8mf8_t test_vslideup_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vslideup_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t dest, vint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vint8mf4_t test_vslideup_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vslideup_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t dest, vint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vint8mf2_t test_vslideup_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vslideup_vx_i8m1_tumu(vbool8_t mask, vint8m1_t dest, vint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vint8m1_t test_vslideup_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vslideup_vx_i8m2_tumu(vbool4_t mask, vint8m2_t dest, vint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vint8m2_t test_vslideup_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vslideup_vx_i8m4_tumu(vbool2_t mask, vint8m4_t dest, vint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vint8m4_t test_vslideup_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vslideup_vx_i8m8_tumu(vbool1_t mask, vint8m8_t dest, vint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vint8m8_t test_vslideup_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vslideup_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t dest, vint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vint16mf4_t test_vslideup_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vslideup_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t dest, vint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vint16mf2_t test_vslideup_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vslideup_vx_i16m1_tumu(vbool16_t mask, vint16m1_t dest, vint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vint16m1_t test_vslideup_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vslideup_vx_i16m2_tumu(vbool8_t mask, vint16m2_t dest, vint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vint16m2_t test_vslideup_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vslideup_vx_i16m4_tumu(vbool4_t mask, vint16m4_t dest, vint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vint16m4_t test_vslideup_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vslideup_vx_i16m8_tumu(vbool2_t mask, vint16m8_t dest, vint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vint16m8_t test_vslideup_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vslideup_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vint32mf2_t test_vslideup_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vslideup_vx_i32m1_tumu(vbool32_t mask, vint32m1_t dest, vint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vint32m1_t test_vslideup_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vslideup_vx_i32m2_tumu(vbool16_t mask, vint32m2_t dest, vint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vint32m2_t test_vslideup_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vslideup_vx_i32m4_tumu(vbool8_t mask, vint32m4_t dest, vint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vint32m4_t test_vslideup_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vslideup_vx_i32m8_tumu(vbool4_t mask, vint32m8_t dest, vint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vint32m8_t test_vslideup_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vslideup_vx_i64m1_tumu(vbool64_t mask, vint64m1_t dest, vint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vint64m1_t test_vslideup_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vslideup_vx_i64m2_tumu(vbool32_t mask, vint64m2_t dest, vint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vint64m2_t test_vslideup_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vslideup_vx_i64m4_tumu(vbool16_t mask, vint64m4_t dest, vint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vint64m4_t test_vslideup_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vslideup_vx_i64m8_tumu(vbool8_t mask, vint64m8_t dest, vint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vint64m8_t test_vslideup_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vslideup_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t dest, vuint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vuint8mf8_t test_vslideup_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vslideup_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t dest, vuint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vuint8mf4_t test_vslideup_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vslideup_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t dest, vuint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vuint8mf2_t test_vslideup_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vslideup_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t dest, vuint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vuint8m1_t test_vslideup_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vslideup_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t dest, vuint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vuint8m2_t test_vslideup_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vslideup_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t dest, vuint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vuint8m4_t test_vslideup_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vslideup_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t dest, vuint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vuint8m8_t test_vslideup_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vslideup_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t dest, vuint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vuint16mf4_t test_vslideup_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vslideup_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t dest, vuint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vuint16mf2_t test_vslideup_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vslideup_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t dest, vuint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vuint16m1_t test_vslideup_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vslideup_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t dest, vuint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vuint16m2_t test_vslideup_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vslideup_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t dest, vuint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vuint16m4_t test_vslideup_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vslideup_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t dest, vuint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vuint16m8_t test_vslideup_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vslideup_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vuint32mf2_t test_vslideup_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vslideup_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t dest, vuint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vuint32m1_t test_vslideup_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vslideup_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t dest, vuint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vuint32m2_t test_vslideup_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vslideup_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t dest, vuint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vuint32m4_t test_vslideup_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vslideup_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t dest, vuint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vuint32m8_t test_vslideup_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vslideup_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t dest, vuint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vuint64m1_t test_vslideup_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vslideup_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t dest, vuint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vuint64m2_t test_vslideup_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vslideup_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t dest, vuint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vuint64m4_t test_vslideup_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vslideup_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t dest, vuint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_tumu(mask, dest, src, offset, vl); +vuint64m8_t test_vslideup_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vslideup_vx_f16mf4_mu(vbool64_t mask, vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vfloat16mf4_t test_vslideup_vx_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vslideup_vx_f16mf2_mu(vbool32_t mask, vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vfloat16mf2_t test_vslideup_vx_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vslideup_vx_f16m1_mu(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vfloat16m1_t test_vslideup_vx_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vslideup_vx_f16m2_mu(vbool8_t mask, vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vfloat16m2_t test_vslideup_vx_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vslideup_vx_f16m4_mu(vbool4_t mask, vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vfloat16m4_t test_vslideup_vx_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vslideup_vx_f16m8_mu(vbool2_t mask, vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vfloat16m8_t test_vslideup_vx_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vslideup_vx_f32mf2_mu(vbool64_t mask, vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vfloat32mf2_t test_vslideup_vx_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vslideup_vx_f32m1_mu(vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vfloat32m1_t test_vslideup_vx_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vslideup_vx_f32m2_mu(vbool16_t mask, vfloat32m2_t dest, vfloat32m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vfloat32m2_t test_vslideup_vx_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vslideup_vx_f32m4_mu(vbool8_t mask, vfloat32m4_t dest, vfloat32m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vfloat32m4_t test_vslideup_vx_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vslideup_vx_f32m8_mu(vbool4_t mask, vfloat32m8_t dest, vfloat32m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vfloat32m8_t test_vslideup_vx_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vslideup_vx_f64m1_mu(vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vfloat64m1_t test_vslideup_vx_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vslideup_vx_f64m2_mu(vbool32_t mask, vfloat64m2_t dest, vfloat64m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vfloat64m2_t test_vslideup_vx_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vslideup_vx_f64m4_mu(vbool16_t mask, vfloat64m4_t dest, vfloat64m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vfloat64m4_t test_vslideup_vx_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vslideup_vx_f64m8_mu(vbool8_t mask, vfloat64m8_t dest, vfloat64m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vfloat64m8_t test_vslideup_vx_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vslideup_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t dest, vint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vint8mf8_t test_vslideup_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vslideup_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t dest, vint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vint8mf4_t test_vslideup_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vslideup_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t dest, vint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vint8mf2_t test_vslideup_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vslideup_vx_i8m1_mu(vbool8_t mask, vint8m1_t dest, vint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vint8m1_t test_vslideup_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vslideup_vx_i8m2_mu(vbool4_t mask, vint8m2_t dest, vint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vint8m2_t test_vslideup_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vslideup_vx_i8m4_mu(vbool2_t mask, vint8m4_t dest, vint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vint8m4_t test_vslideup_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vslideup_vx_i8m8_mu(vbool1_t mask, vint8m8_t dest, vint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vint8m8_t test_vslideup_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vslideup_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t dest, vint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vint16mf4_t test_vslideup_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vslideup_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t dest, vint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vint16mf2_t test_vslideup_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vslideup_vx_i16m1_mu(vbool16_t mask, vint16m1_t dest, vint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vint16m1_t test_vslideup_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vslideup_vx_i16m2_mu(vbool8_t mask, vint16m2_t dest, vint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vint16m2_t test_vslideup_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vslideup_vx_i16m4_mu(vbool4_t mask, vint16m4_t dest, vint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vint16m4_t test_vslideup_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vslideup_vx_i16m8_mu(vbool2_t mask, vint16m8_t dest, vint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vint16m8_t test_vslideup_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vslideup_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vint32mf2_t test_vslideup_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vslideup_vx_i32m1_mu(vbool32_t mask, vint32m1_t dest, vint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vint32m1_t test_vslideup_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vslideup_vx_i32m2_mu(vbool16_t mask, vint32m2_t dest, vint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vint32m2_t test_vslideup_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vslideup_vx_i32m4_mu(vbool8_t mask, vint32m4_t dest, vint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vint32m4_t test_vslideup_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vslideup_vx_i32m8_mu(vbool4_t mask, vint32m8_t dest, vint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vint32m8_t test_vslideup_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vslideup_vx_i64m1_mu(vbool64_t mask, vint64m1_t dest, vint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vint64m1_t test_vslideup_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vslideup_vx_i64m2_mu(vbool32_t mask, vint64m2_t dest, vint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vint64m2_t test_vslideup_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vslideup_vx_i64m4_mu(vbool16_t mask, vint64m4_t dest, vint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vint64m4_t test_vslideup_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vslideup_vx_i64m8_mu(vbool8_t mask, vint64m8_t dest, vint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vint64m8_t test_vslideup_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vslideup_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t dest, vuint8mf8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vuint8mf8_t test_vslideup_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vslideup_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t dest, vuint8mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vuint8mf4_t test_vslideup_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vslideup_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t dest, vuint8mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vuint8mf2_t test_vslideup_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vslideup_vx_u8m1_mu(vbool8_t mask, vuint8m1_t dest, vuint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vuint8m1_t test_vslideup_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vslideup_vx_u8m2_mu(vbool4_t mask, vuint8m2_t dest, vuint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vuint8m2_t test_vslideup_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vslideup_vx_u8m4_mu(vbool2_t mask, vuint8m4_t dest, vuint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vuint8m4_t test_vslideup_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vslideup_vx_u8m8_mu(vbool1_t mask, vuint8m8_t dest, vuint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vuint8m8_t test_vslideup_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vslideup_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t dest, vuint16mf4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vuint16mf4_t test_vslideup_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vslideup_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t dest, vuint16mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vuint16mf2_t test_vslideup_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vslideup_vx_u16m1_mu(vbool16_t mask, vuint16m1_t dest, vuint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vuint16m1_t test_vslideup_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vslideup_vx_u16m2_mu(vbool8_t mask, vuint16m2_t dest, vuint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vuint16m2_t test_vslideup_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vslideup_vx_u16m4_mu(vbool4_t mask, vuint16m4_t dest, vuint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vuint16m4_t test_vslideup_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vslideup_vx_u16m8_mu(vbool2_t mask, vuint16m8_t dest, vuint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vuint16m8_t test_vslideup_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vslideup_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vuint32mf2_t test_vslideup_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vslideup_vx_u32m1_mu(vbool32_t mask, vuint32m1_t dest, vuint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vuint32m1_t test_vslideup_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vslideup_vx_u32m2_mu(vbool16_t mask, vuint32m2_t dest, vuint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vuint32m2_t test_vslideup_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vslideup_vx_u32m4_mu(vbool8_t mask, vuint32m4_t dest, vuint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vuint32m4_t test_vslideup_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vslideup_vx_u32m8_mu(vbool4_t mask, vuint32m8_t dest, vuint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vuint32m8_t test_vslideup_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vslideup_vx_u64m1_mu(vbool64_t mask, vuint64m1_t dest, vuint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vuint64m1_t test_vslideup_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vslideup_vx_u64m2_mu(vbool32_t mask, vuint64m2_t dest, vuint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vuint64m2_t test_vslideup_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vslideup_vx_u64m4_mu(vbool16_t mask, vuint64m4_t dest, vuint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vuint64m4_t test_vslideup_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vslideup_vx_u64m8_mu(vbool8_t mask, vuint64m8_t dest, vuint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslideup_mu(mask, dest, src, offset, vl); +vuint64m8_t test_vslideup_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vslideup_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vslideup\.[ivxfswum.]+\s+} 236 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vsll.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vsll.c index a1541407a..2a6cd1213 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vsll.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vsll.c @@ -6,1412 +6,1412 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vsll_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vint8mf8_t test_vsll_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vsll_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vint8mf8_t test_vsll_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vsll_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vint8mf4_t test_vsll_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vsll_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vint8mf4_t test_vsll_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vsll_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vint8mf2_t test_vsll_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vsll_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vint8mf2_t test_vsll_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vsll_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vint8m1_t test_vsll_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vsll_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vint8m1_t test_vsll_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vsll_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vint8m2_t test_vsll_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vsll_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vint8m2_t test_vsll_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vsll_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vint8m4_t test_vsll_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vsll_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vint8m4_t test_vsll_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vsll_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vint8m8_t test_vsll_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vsll_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vint8m8_t test_vsll_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vsll_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vint16mf4_t test_vsll_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vsll_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vint16mf4_t test_vsll_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vsll_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vint16mf2_t test_vsll_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vsll_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vint16mf2_t test_vsll_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vsll_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vint16m1_t test_vsll_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vsll_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vint16m1_t test_vsll_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vsll_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vint16m2_t test_vsll_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vsll_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vint16m2_t test_vsll_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vsll_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vint16m4_t test_vsll_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vsll_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vint16m4_t test_vsll_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vsll_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vint16m8_t test_vsll_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vsll_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vint16m8_t test_vsll_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vsll_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vint32mf2_t test_vsll_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vsll_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vint32mf2_t test_vsll_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vsll_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vint32m1_t test_vsll_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vsll_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vint32m1_t test_vsll_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vsll_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vint32m2_t test_vsll_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vsll_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vint32m2_t test_vsll_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vsll_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vint32m4_t test_vsll_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vsll_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vint32m4_t test_vsll_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vsll_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vint32m8_t test_vsll_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vsll_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vint32m8_t test_vsll_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vsll_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vint64m1_t test_vsll_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vsll_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vint64m1_t test_vsll_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vsll_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vint64m2_t test_vsll_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vsll_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vint64m2_t test_vsll_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vsll_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vint64m4_t test_vsll_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vsll_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vint64m4_t test_vsll_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vsll_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vint64m8_t test_vsll_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vsll_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vint64m8_t test_vsll_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vsll_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vuint8mf8_t test_vsll_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vsll_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vuint8mf8_t test_vsll_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vsll_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vuint8mf4_t test_vsll_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vsll_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vuint8mf4_t test_vsll_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vsll_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vuint8mf2_t test_vsll_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vsll_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vuint8mf2_t test_vsll_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vsll_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vuint8m1_t test_vsll_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vsll_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vuint8m1_t test_vsll_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vsll_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vuint8m2_t test_vsll_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vsll_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vuint8m2_t test_vsll_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vsll_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vuint8m4_t test_vsll_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vsll_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vuint8m4_t test_vsll_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vsll_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vuint8m8_t test_vsll_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vsll_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vuint8m8_t test_vsll_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vsll_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vuint16mf4_t test_vsll_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vsll_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vuint16mf4_t test_vsll_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vsll_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vuint16mf2_t test_vsll_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vsll_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vuint16mf2_t test_vsll_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vsll_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vuint16m1_t test_vsll_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vsll_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vuint16m1_t test_vsll_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vsll_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vuint16m2_t test_vsll_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vsll_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vuint16m2_t test_vsll_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vsll_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vuint16m4_t test_vsll_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vsll_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vuint16m4_t test_vsll_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vsll_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vuint16m8_t test_vsll_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vsll_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vuint16m8_t test_vsll_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vsll_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vuint32mf2_t test_vsll_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vsll_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vuint32mf2_t test_vsll_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vsll_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vuint32m1_t test_vsll_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vsll_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vuint32m1_t test_vsll_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vsll_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vuint32m2_t test_vsll_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vsll_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vuint32m2_t test_vsll_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vsll_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vuint32m4_t test_vsll_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vsll_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vuint32m4_t test_vsll_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vsll_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vuint32m8_t test_vsll_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vsll_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vuint32m8_t test_vsll_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vsll_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vuint64m1_t test_vsll_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vsll_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vuint64m1_t test_vsll_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vsll_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vuint64m2_t test_vsll_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vsll_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vuint64m2_t test_vsll_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vsll_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vuint64m4_t test_vsll_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vsll_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vuint64m4_t test_vsll_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vsll_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vuint64m8_t test_vsll_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vsll_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tu(maskedoff, op1, shift, vl); +vuint64m8_t test_vsll_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vsll_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vsll_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vsll_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vsll_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vsll_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vsll_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vsll_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vsll_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vsll_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vsll_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vsll_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vsll_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vsll_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vsll_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vsll_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vsll_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vsll_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vsll_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vsll_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vsll_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vsll_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vsll_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vsll_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vsll_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vsll_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vint8m8_t test_vsll_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vsll_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vint8m8_t test_vsll_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vsll_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vsll_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vsll_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vsll_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vsll_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vsll_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vsll_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vsll_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vsll_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vsll_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vsll_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vsll_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vsll_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vsll_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vsll_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vsll_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vsll_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vsll_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vsll_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vsll_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vsll_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vint16m8_t test_vsll_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vsll_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vint16m8_t test_vsll_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vsll_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vsll_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vsll_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vsll_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vsll_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vsll_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vsll_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vsll_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vsll_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vsll_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vsll_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vsll_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vsll_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vsll_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vsll_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vsll_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vsll_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vint32m8_t test_vsll_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vsll_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vint32m8_t test_vsll_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vsll_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vint64m1_t test_vsll_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vsll_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vint64m1_t test_vsll_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vsll_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vint64m2_t test_vsll_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vsll_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vint64m2_t test_vsll_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vsll_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vint64m4_t test_vsll_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vsll_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vint64m4_t test_vsll_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vsll_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vint64m8_t test_vsll_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vsll_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vint64m8_t test_vsll_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vsll_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vsll_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vsll_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vsll_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vsll_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vsll_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vsll_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vsll_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vsll_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vsll_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vsll_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vsll_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vsll_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vsll_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vsll_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vsll_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vsll_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vsll_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vsll_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vsll_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vsll_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vsll_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vsll_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vsll_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vsll_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vuint8m8_t test_vsll_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vsll_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vuint8m8_t test_vsll_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vsll_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vsll_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vsll_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vsll_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vsll_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vsll_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vsll_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vsll_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vsll_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vsll_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vsll_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vsll_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vsll_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vsll_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vsll_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vsll_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vsll_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vsll_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vsll_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vsll_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vsll_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vuint16m8_t test_vsll_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vsll_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vuint16m8_t test_vsll_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vsll_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vsll_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vsll_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vsll_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vsll_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vsll_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vsll_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vsll_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vsll_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vsll_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vsll_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vsll_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vsll_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vsll_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vsll_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vsll_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vsll_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vuint32m8_t test_vsll_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vsll_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vuint32m8_t test_vsll_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vsll_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vuint64m1_t test_vsll_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vsll_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vuint64m1_t test_vsll_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vsll_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vuint64m2_t test_vsll_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vsll_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vuint64m2_t test_vsll_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vsll_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vuint64m4_t test_vsll_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vsll_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vuint64m4_t test_vsll_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vsll_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vuint64m8_t test_vsll_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vsll_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tum(mask, maskedoff, op1, shift, vl); +vuint64m8_t test_vsll_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vsll_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vsll_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vsll_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vsll_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vsll_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vsll_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vsll_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vsll_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vsll_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vsll_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vsll_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vsll_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vsll_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vsll_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vsll_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vsll_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vsll_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vsll_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vsll_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vsll_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vsll_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vsll_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vsll_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vsll_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vsll_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vint8m8_t test_vsll_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vsll_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vint8m8_t test_vsll_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vsll_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vsll_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vsll_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vsll_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vsll_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vsll_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vsll_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vsll_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vsll_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vsll_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vsll_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vsll_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vsll_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vsll_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vsll_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vsll_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vsll_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vsll_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vsll_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vsll_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vsll_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vint16m8_t test_vsll_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vsll_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vint16m8_t test_vsll_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vsll_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vsll_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vsll_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vsll_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vsll_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vsll_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vsll_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vsll_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vsll_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vsll_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vsll_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vsll_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vsll_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vsll_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vsll_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vsll_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vsll_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vint32m8_t test_vsll_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vsll_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vint32m8_t test_vsll_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vsll_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vint64m1_t test_vsll_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vsll_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vint64m1_t test_vsll_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vsll_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vint64m2_t test_vsll_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vsll_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vint64m2_t test_vsll_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vsll_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vint64m4_t test_vsll_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vsll_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vint64m4_t test_vsll_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vsll_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vint64m8_t test_vsll_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vsll_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vint64m8_t test_vsll_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vsll_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vsll_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vsll_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vsll_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vsll_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vsll_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vsll_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vsll_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vsll_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vsll_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vsll_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vsll_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vsll_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vsll_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vsll_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vsll_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vsll_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vsll_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vsll_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vsll_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vsll_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vsll_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vsll_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vsll_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vsll_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vuint8m8_t test_vsll_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vsll_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vuint8m8_t test_vsll_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vsll_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vsll_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vsll_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vsll_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vsll_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vsll_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vsll_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vsll_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vsll_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vsll_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vsll_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vsll_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vsll_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vsll_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vsll_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vsll_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vsll_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vsll_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vsll_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vsll_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vsll_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vuint16m8_t test_vsll_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vsll_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vuint16m8_t test_vsll_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vsll_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vsll_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vsll_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vsll_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vsll_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vsll_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vsll_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vsll_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vsll_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vsll_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vsll_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vsll_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vsll_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vsll_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vsll_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vsll_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vsll_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vuint32m8_t test_vsll_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vsll_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vuint32m8_t test_vsll_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vsll_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vuint64m1_t test_vsll_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vsll_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vuint64m1_t test_vsll_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vsll_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vuint64m2_t test_vsll_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vsll_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vuint64m2_t test_vsll_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vsll_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vuint64m4_t test_vsll_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vsll_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vuint64m4_t test_vsll_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vsll_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vuint64m8_t test_vsll_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vsll_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_tumu(mask, maskedoff, op1, shift, vl); +vuint64m8_t test_vsll_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vsll_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vsll_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vsll_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vsll_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vsll_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vsll_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vsll_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vsll_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vsll_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vsll_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vsll_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vsll_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vsll_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vsll_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vsll_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vsll_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vsll_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vsll_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vsll_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vsll_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vsll_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vsll_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vsll_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vsll_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vsll_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vint8m8_t test_vsll_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vsll_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vint8m8_t test_vsll_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vsll_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vsll_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vsll_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vsll_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vsll_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vsll_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vsll_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vsll_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vsll_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vsll_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vsll_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vsll_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vsll_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vsll_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vsll_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vsll_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vsll_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vsll_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vsll_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vsll_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vsll_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vint16m8_t test_vsll_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vsll_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vint16m8_t test_vsll_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vsll_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vsll_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vsll_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vsll_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vsll_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vsll_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vsll_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vsll_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vsll_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vsll_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vsll_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vsll_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vsll_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vsll_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vsll_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vsll_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vsll_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vint32m8_t test_vsll_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vsll_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vint32m8_t test_vsll_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vsll_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vint64m1_t test_vsll_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vsll_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vint64m1_t test_vsll_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vsll_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vint64m2_t test_vsll_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vsll_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vint64m2_t test_vsll_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vsll_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vint64m4_t test_vsll_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vsll_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vint64m4_t test_vsll_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vsll_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vint64m8_t test_vsll_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vsll_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vint64m8_t test_vsll_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vsll_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vsll_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vsll_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vsll_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vsll_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vsll_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vsll_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vsll_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vsll_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vsll_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vsll_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vsll_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vsll_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vsll_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vsll_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vsll_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vsll_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vsll_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vsll_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vsll_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vsll_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vsll_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vsll_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vsll_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vsll_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vuint8m8_t test_vsll_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vsll_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vuint8m8_t test_vsll_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vsll_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vsll_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vsll_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vsll_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vsll_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vsll_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vsll_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vsll_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vsll_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vsll_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vsll_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vsll_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vsll_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vsll_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vsll_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vsll_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vsll_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vsll_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vsll_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vsll_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vsll_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vuint16m8_t test_vsll_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vsll_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vuint16m8_t test_vsll_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vsll_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vsll_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vsll_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vsll_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vsll_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vsll_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vsll_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vsll_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vsll_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vsll_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vsll_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vsll_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vsll_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vsll_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vsll_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vsll_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vsll_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vuint32m8_t test_vsll_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vsll_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vuint32m8_t test_vsll_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vsll_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vuint64m1_t test_vsll_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vsll_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vuint64m1_t test_vsll_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vsll_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vuint64m2_t test_vsll_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vsll_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vuint64m2_t test_vsll_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vsll_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vuint64m4_t test_vsll_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vsll_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vuint64m4_t test_vsll_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vsll_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vuint64m8_t test_vsll_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vsll_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsll_mu(mask, maskedoff, op1, shift, vl); +vuint64m8_t test_vsll_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsll_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsll\.[ivxfswum.]+\s+} 352 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vsmul.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vsmul.c index 549a3a180..7fa6ae2a2 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vsmul.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vsmul.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vsmul_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vsmul_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vsmul_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vsmul_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vsmul_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vsmul_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vsmul_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vsmul_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vsmul_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vsmul_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vsmul_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vsmul_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vsmul_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vsmul_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vsmul_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vsmul_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vsmul_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vsmul_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vsmul_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vsmul_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vsmul_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vsmul_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vsmul_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vsmul_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vsmul_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vsmul_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vsmul_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vsmul_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vsmul_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vsmul_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vsmul_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vsmul_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vsmul_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vsmul_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vsmul_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vsmul_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vsmul_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vsmul_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vsmul_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vsmul_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vsmul_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vsmul_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vsmul_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vsmul_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vsmul_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vsmul_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vsmul_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vsmul_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vsmul_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vsmul_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vsmul_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vsmul_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vsmul_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vsmul_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vsmul_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vsmul_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vsmul_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vsmul_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vsmul_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vsmul_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vsmul_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vsmul_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vsmul_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vsmul_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vsmul_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vsmul_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vsmul_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vsmul_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vsmul_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vsmul_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vsmul_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vsmul_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vsmul_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vsmul_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vsmul_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vsmul_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vsmul_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vsmul_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vsmul_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vsmul_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vsmul_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vsmul_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vsmul_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vsmul_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vsmul_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vsmul_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vsmul_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vsmul_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vsmul_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vsmul_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vsmul_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vsmul_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vsmul_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vsmul_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vsmul_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vsmul_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vsmul_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vsmul_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vsmul_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vsmul_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vsmul_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vsmul_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vsmul_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vsmul_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vsmul_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vsmul_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vsmul_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vsmul_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vsmul_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_tu(maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vsmul_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vsmul_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vsmul_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vsmul_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vsmul_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vsmul_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vsmul_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vsmul_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vsmul_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vsmul_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vsmul_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vsmul_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vsmul_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vsmul_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vsmul_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vsmul_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vsmul_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vsmul_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vsmul_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vsmul_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vsmul_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vsmul_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vsmul_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vsmul_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vsmul_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vsmul_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vsmul_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vsmul_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vsmul_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vsmul_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vsmul_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vsmul_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vsmul_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vsmul_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vsmul_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vsmul_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vsmul_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vsmul_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vsmul_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vsmul_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vsmul_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vsmul_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vsmul_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vsmul_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vsmul_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vsmul_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vsmul_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vsmul_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vsmul_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vsmul_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vsmul_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vsmul_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vsmul_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vsmul_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vsmul_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vsmul_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vsmul_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vsmul_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vsmul_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vsmul_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vsmul_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vsmul_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vsmul_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vsmul_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vsmul_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vsmul_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vsmul_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vsmul_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vsmul_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vsmul_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vsmul_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vsmul_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vsmul_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vsmul_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vsmul_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vsmul_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vsmul_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vsmul_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vsmul_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vsmul_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vsmul_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vsmul_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vsmul_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vsmul_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vsmul_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vsmul_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vsmul_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vsmul_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vsmul_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vsmul_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vsmul_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vsmul_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vsmul_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vsmul_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vsmul_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vsmul_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vsmul_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vsmul_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vsmul_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vsmul_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vsmul_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vsmul_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vsmul_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vsmul_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vsmul_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vsmul_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vsmul_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vsmul_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vsmul_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vsmul_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_tum(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vsmul_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vsmul_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vsmul_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vsmul_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vsmul_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vsmul_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vsmul_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vsmul_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vsmul_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vsmul_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vsmul_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vsmul_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vsmul_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vsmul_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vsmul_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vsmul_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vsmul_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vsmul_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vsmul_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vsmul_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vsmul_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vsmul_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vsmul_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vsmul_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vsmul_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vsmul_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vsmul_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vsmul_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vsmul_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vsmul_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vsmul_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vsmul_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vsmul_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vsmul_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vsmul_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vsmul_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vsmul_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vsmul_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vsmul_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vsmul_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vsmul_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vsmul_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vsmul_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vsmul_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vsmul_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vsmul_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vsmul_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vsmul_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vsmul_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vsmul_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vsmul_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vsmul_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vsmul_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vsmul_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vsmul_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vsmul_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vsmul_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vsmul_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vsmul_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vsmul_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vsmul_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vsmul_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vsmul_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vsmul_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vsmul_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vsmul_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vsmul_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vsmul_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vsmul_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vsmul_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vsmul_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vsmul_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vsmul_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vsmul_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vsmul_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vsmul_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vsmul_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vsmul_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vsmul_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vsmul_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vsmul_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vsmul_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vsmul_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vsmul_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vsmul_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vsmul_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vsmul_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vsmul_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vsmul_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vsmul_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vsmul_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vsmul_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vsmul_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vsmul_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vsmul_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vsmul_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vsmul_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vsmul_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vsmul_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vsmul_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vsmul_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vsmul_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vsmul_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vsmul_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vsmul_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vsmul_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vsmul_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vsmul_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vsmul_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vsmul_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_tumu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vsmul_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vsmul_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vsmul_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vsmul_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vsmul_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vsmul_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vsmul_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vsmul_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vsmul_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vsmul_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vsmul_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vsmul_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vsmul_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vsmul_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vsmul_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vsmul_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vsmul_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vsmul_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vsmul_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vsmul_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vsmul_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vsmul_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vsmul_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vsmul_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vsmul_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vsmul_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vsmul_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vsmul_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vsmul_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vsmul_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vsmul_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vsmul_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vsmul_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vsmul_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vsmul_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vsmul_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsmul_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vsmul_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vsmul_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vsmul_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vsmul_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vsmul_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vsmul_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vsmul_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vsmul_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vsmul_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vsmul_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vsmul_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vsmul_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vsmul_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vsmul_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vsmul_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vsmul_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vsmul_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vsmul_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vsmul_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vsmul_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vsmul_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vsmul_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vsmul_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vsmul_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vsmul_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vsmul_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vsmul_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vsmul_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vsmul_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vsmul_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsmul_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vsmul_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vsmul_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vsmul_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vsmul_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vsmul_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vsmul_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vsmul_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vsmul_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vsmul_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vsmul_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vsmul_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vsmul_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vsmul_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vsmul_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vsmul_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vsmul_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vsmul_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vsmul_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vsmul_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vsmul_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vsmul_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vsmul_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vsmul_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vsmul_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vsmul_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsmul_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vsmul_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vsmul_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vsmul_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vsmul_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vsmul_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vsmul_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vsmul_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vsmul_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vsmul_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vsmul_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vsmul_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vsmul_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vsmul_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vsmul_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vsmul_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vsmul_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vsmul_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vsmul_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vsmul_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_mu(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vsmul_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsmul_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsmul\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vsra.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vsra.c index 102d28980..cf59383ce 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vsra.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vsra.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vsra_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsra_tu(maskedoff, op1, shift, vl); +vint8mf8_t test_vsra_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsra_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vsra_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tu(maskedoff, op1, shift, vl); +vint8mf8_t test_vsra_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vsra_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsra_tu(maskedoff, op1, shift, vl); +vint8mf4_t test_vsra_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsra_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vsra_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tu(maskedoff, op1, shift, vl); +vint8mf4_t test_vsra_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vsra_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsra_tu(maskedoff, op1, shift, vl); +vint8mf2_t test_vsra_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsra_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vsra_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tu(maskedoff, op1, shift, vl); +vint8mf2_t test_vsra_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vsra_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsra_tu(maskedoff, op1, shift, vl); +vint8m1_t test_vsra_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsra_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vsra_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tu(maskedoff, op1, shift, vl); +vint8m1_t test_vsra_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vsra_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsra_tu(maskedoff, op1, shift, vl); +vint8m2_t test_vsra_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsra_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vsra_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tu(maskedoff, op1, shift, vl); +vint8m2_t test_vsra_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vsra_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsra_tu(maskedoff, op1, shift, vl); +vint8m4_t test_vsra_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsra_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vsra_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tu(maskedoff, op1, shift, vl); +vint8m4_t test_vsra_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vsra_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsra_tu(maskedoff, op1, shift, vl); +vint8m8_t test_vsra_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsra_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vsra_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tu(maskedoff, op1, shift, vl); +vint8m8_t test_vsra_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vsra_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsra_tu(maskedoff, op1, shift, vl); +vint16mf4_t test_vsra_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsra_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vsra_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tu(maskedoff, op1, shift, vl); +vint16mf4_t test_vsra_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vsra_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsra_tu(maskedoff, op1, shift, vl); +vint16mf2_t test_vsra_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsra_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vsra_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tu(maskedoff, op1, shift, vl); +vint16mf2_t test_vsra_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vsra_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsra_tu(maskedoff, op1, shift, vl); +vint16m1_t test_vsra_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsra_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vsra_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tu(maskedoff, op1, shift, vl); +vint16m1_t test_vsra_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vsra_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsra_tu(maskedoff, op1, shift, vl); +vint16m2_t test_vsra_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsra_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vsra_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tu(maskedoff, op1, shift, vl); +vint16m2_t test_vsra_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vsra_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsra_tu(maskedoff, op1, shift, vl); +vint16m4_t test_vsra_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsra_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vsra_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tu(maskedoff, op1, shift, vl); +vint16m4_t test_vsra_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vsra_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsra_tu(maskedoff, op1, shift, vl); +vint16m8_t test_vsra_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsra_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vsra_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tu(maskedoff, op1, shift, vl); +vint16m8_t test_vsra_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vsra_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsra_tu(maskedoff, op1, shift, vl); +vint32mf2_t test_vsra_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsra_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vsra_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tu(maskedoff, op1, shift, vl); +vint32mf2_t test_vsra_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vsra_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsra_tu(maskedoff, op1, shift, vl); +vint32m1_t test_vsra_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsra_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vsra_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tu(maskedoff, op1, shift, vl); +vint32m1_t test_vsra_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vsra_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsra_tu(maskedoff, op1, shift, vl); +vint32m2_t test_vsra_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsra_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vsra_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tu(maskedoff, op1, shift, vl); +vint32m2_t test_vsra_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vsra_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsra_tu(maskedoff, op1, shift, vl); +vint32m4_t test_vsra_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsra_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vsra_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tu(maskedoff, op1, shift, vl); +vint32m4_t test_vsra_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vsra_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsra_tu(maskedoff, op1, shift, vl); +vint32m8_t test_vsra_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsra_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vsra_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tu(maskedoff, op1, shift, vl); +vint32m8_t test_vsra_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vsra_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsra_tu(maskedoff, op1, shift, vl); +vint64m1_t test_vsra_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsra_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vsra_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tu(maskedoff, op1, shift, vl); +vint64m1_t test_vsra_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vsra_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsra_tu(maskedoff, op1, shift, vl); +vint64m2_t test_vsra_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsra_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vsra_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tu(maskedoff, op1, shift, vl); +vint64m2_t test_vsra_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vsra_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsra_tu(maskedoff, op1, shift, vl); +vint64m4_t test_vsra_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsra_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vsra_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tu(maskedoff, op1, shift, vl); +vint64m4_t test_vsra_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vsra_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsra_tu(maskedoff, op1, shift, vl); +vint64m8_t test_vsra_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsra_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vsra_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tu(maskedoff, op1, shift, vl); +vint64m8_t test_vsra_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vsra_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vsra_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsra_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vsra_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vsra_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vsra_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vsra_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsra_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vsra_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vsra_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vsra_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vsra_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsra_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vsra_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vsra_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vsra_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vsra_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsra_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vsra_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vsra_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vsra_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vsra_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsra_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vsra_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vsra_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vsra_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vsra_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsra_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vsra_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vsra_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vsra_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl); +vint8m8_t test_vsra_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsra_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vsra_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl); +vint8m8_t test_vsra_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vsra_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vsra_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsra_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vsra_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vsra_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vsra_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vsra_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsra_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vsra_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vsra_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vsra_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vsra_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsra_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vsra_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vsra_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vsra_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vsra_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsra_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vsra_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vsra_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vsra_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vsra_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsra_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vsra_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vsra_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vsra_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl); +vint16m8_t test_vsra_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsra_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vsra_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl); +vint16m8_t test_vsra_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vsra_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vsra_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsra_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vsra_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vsra_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vsra_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vsra_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsra_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vsra_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vsra_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vsra_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vsra_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsra_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vsra_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vsra_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vsra_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vsra_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsra_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vsra_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vsra_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vsra_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl); +vint32m8_t test_vsra_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsra_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vsra_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl); +vint32m8_t test_vsra_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vsra_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl); +vint64m1_t test_vsra_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsra_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vsra_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl); +vint64m1_t test_vsra_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vsra_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl); +vint64m2_t test_vsra_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsra_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vsra_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl); +vint64m2_t test_vsra_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vsra_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl); +vint64m4_t test_vsra_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsra_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vsra_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl); +vint64m4_t test_vsra_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vsra_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl); +vint64m8_t test_vsra_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsra_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vsra_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tum(mask, maskedoff, op1, shift, vl); +vint64m8_t test_vsra_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vsra_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vsra_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsra_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vsra_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vsra_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vsra_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vsra_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsra_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vsra_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vsra_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vsra_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vsra_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsra_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vsra_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vsra_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vsra_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vsra_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsra_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vsra_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vsra_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vsra_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vsra_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsra_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vsra_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vsra_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vsra_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vsra_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsra_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vsra_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vsra_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vsra_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl); +vint8m8_t test_vsra_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsra_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vsra_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl); +vint8m8_t test_vsra_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vsra_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vsra_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsra_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vsra_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vsra_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vsra_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vsra_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsra_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vsra_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vsra_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vsra_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vsra_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsra_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vsra_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vsra_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vsra_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vsra_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsra_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vsra_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vsra_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vsra_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vsra_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsra_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vsra_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vsra_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vsra_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl); +vint16m8_t test_vsra_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsra_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vsra_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl); +vint16m8_t test_vsra_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vsra_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vsra_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsra_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vsra_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vsra_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vsra_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vsra_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsra_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vsra_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vsra_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vsra_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vsra_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsra_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vsra_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vsra_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vsra_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vsra_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsra_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vsra_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vsra_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vsra_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl); +vint32m8_t test_vsra_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsra_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vsra_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl); +vint32m8_t test_vsra_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vsra_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl); +vint64m1_t test_vsra_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsra_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vsra_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl); +vint64m1_t test_vsra_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vsra_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl); +vint64m2_t test_vsra_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsra_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vsra_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl); +vint64m2_t test_vsra_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vsra_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl); +vint64m4_t test_vsra_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsra_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vsra_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl); +vint64m4_t test_vsra_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vsra_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl); +vint64m8_t test_vsra_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsra_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vsra_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_tumu(mask, maskedoff, op1, shift, vl); +vint64m8_t test_vsra_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vsra_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vsra_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsra_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vsra_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl); +vint8mf8_t test_vsra_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vsra_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vsra_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsra_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vsra_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl); +vint8mf4_t test_vsra_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vsra_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vsra_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsra_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vsra_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl); +vint8mf2_t test_vsra_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vsra_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vsra_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsra_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vsra_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl); +vint8m1_t test_vsra_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vsra_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vsra_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsra_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vsra_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl); +vint8m2_t test_vsra_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vsra_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vsra_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsra_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vsra_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl); +vint8m4_t test_vsra_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vsra_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl); +vint8m8_t test_vsra_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsra_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vsra_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl); +vint8m8_t test_vsra_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vsra_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vsra_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsra_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vsra_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl); +vint16mf4_t test_vsra_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vsra_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vsra_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsra_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vsra_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl); +vint16mf2_t test_vsra_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vsra_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vsra_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsra_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vsra_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl); +vint16m1_t test_vsra_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vsra_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vsra_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsra_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vsra_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl); +vint16m2_t test_vsra_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vsra_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vsra_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsra_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vsra_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl); +vint16m4_t test_vsra_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vsra_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl); +vint16m8_t test_vsra_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsra_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vsra_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl); +vint16m8_t test_vsra_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vsra_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vsra_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsra_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vsra_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl); +vint32mf2_t test_vsra_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vsra_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vsra_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsra_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vsra_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl); +vint32m1_t test_vsra_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vsra_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vsra_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsra_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vsra_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl); +vint32m2_t test_vsra_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vsra_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vsra_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsra_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vsra_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl); +vint32m4_t test_vsra_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vsra_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl); +vint32m8_t test_vsra_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsra_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vsra_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl); +vint32m8_t test_vsra_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vsra_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl); +vint64m1_t test_vsra_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsra_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vsra_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl); +vint64m1_t test_vsra_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vsra_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl); +vint64m2_t test_vsra_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsra_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vsra_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl); +vint64m2_t test_vsra_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vsra_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl); +vint64m4_t test_vsra_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsra_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vsra_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl); +vint64m4_t test_vsra_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vsra_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl); +vint64m8_t test_vsra_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsra_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vsra_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsra_mu(mask, maskedoff, op1, shift, vl); +vint64m8_t test_vsra_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsra_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsra\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vsrl.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vsrl.c index c8be58d83..56ff737c7 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vsrl.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vsrl.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vsrl_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsrl_tu(maskedoff, op1, shift, vl); +vuint8mf8_t test_vsrl_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsrl_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vsrl_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tu(maskedoff, op1, shift, vl); +vuint8mf8_t test_vsrl_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vsrl_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsrl_tu(maskedoff, op1, shift, vl); +vuint8mf4_t test_vsrl_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsrl_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vsrl_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tu(maskedoff, op1, shift, vl); +vuint8mf4_t test_vsrl_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vsrl_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsrl_tu(maskedoff, op1, shift, vl); +vuint8mf2_t test_vsrl_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsrl_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vsrl_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tu(maskedoff, op1, shift, vl); +vuint8mf2_t test_vsrl_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vsrl_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsrl_tu(maskedoff, op1, shift, vl); +vuint8m1_t test_vsrl_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsrl_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vsrl_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tu(maskedoff, op1, shift, vl); +vuint8m1_t test_vsrl_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vsrl_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsrl_tu(maskedoff, op1, shift, vl); +vuint8m2_t test_vsrl_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsrl_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vsrl_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tu(maskedoff, op1, shift, vl); +vuint8m2_t test_vsrl_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vsrl_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsrl_tu(maskedoff, op1, shift, vl); +vuint8m4_t test_vsrl_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsrl_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vsrl_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tu(maskedoff, op1, shift, vl); +vuint8m4_t test_vsrl_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vsrl_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsrl_tu(maskedoff, op1, shift, vl); +vuint8m8_t test_vsrl_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsrl_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vsrl_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tu(maskedoff, op1, shift, vl); +vuint8m8_t test_vsrl_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vsrl_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsrl_tu(maskedoff, op1, shift, vl); +vuint16mf4_t test_vsrl_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsrl_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vsrl_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tu(maskedoff, op1, shift, vl); +vuint16mf4_t test_vsrl_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vsrl_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsrl_tu(maskedoff, op1, shift, vl); +vuint16mf2_t test_vsrl_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsrl_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vsrl_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tu(maskedoff, op1, shift, vl); +vuint16mf2_t test_vsrl_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vsrl_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsrl_tu(maskedoff, op1, shift, vl); +vuint16m1_t test_vsrl_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsrl_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vsrl_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tu(maskedoff, op1, shift, vl); +vuint16m1_t test_vsrl_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vsrl_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsrl_tu(maskedoff, op1, shift, vl); +vuint16m2_t test_vsrl_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsrl_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vsrl_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tu(maskedoff, op1, shift, vl); +vuint16m2_t test_vsrl_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vsrl_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsrl_tu(maskedoff, op1, shift, vl); +vuint16m4_t test_vsrl_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsrl_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vsrl_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tu(maskedoff, op1, shift, vl); +vuint16m4_t test_vsrl_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vsrl_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsrl_tu(maskedoff, op1, shift, vl); +vuint16m8_t test_vsrl_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsrl_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vsrl_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tu(maskedoff, op1, shift, vl); +vuint16m8_t test_vsrl_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vsrl_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsrl_tu(maskedoff, op1, shift, vl); +vuint32mf2_t test_vsrl_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsrl_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vsrl_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tu(maskedoff, op1, shift, vl); +vuint32mf2_t test_vsrl_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vsrl_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsrl_tu(maskedoff, op1, shift, vl); +vuint32m1_t test_vsrl_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsrl_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vsrl_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tu(maskedoff, op1, shift, vl); +vuint32m1_t test_vsrl_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vsrl_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsrl_tu(maskedoff, op1, shift, vl); +vuint32m2_t test_vsrl_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsrl_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vsrl_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tu(maskedoff, op1, shift, vl); +vuint32m2_t test_vsrl_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vsrl_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsrl_tu(maskedoff, op1, shift, vl); +vuint32m4_t test_vsrl_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsrl_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vsrl_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tu(maskedoff, op1, shift, vl); +vuint32m4_t test_vsrl_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vsrl_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsrl_tu(maskedoff, op1, shift, vl); +vuint32m8_t test_vsrl_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsrl_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vsrl_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tu(maskedoff, op1, shift, vl); +vuint32m8_t test_vsrl_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vsrl_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsrl_tu(maskedoff, op1, shift, vl); +vuint64m1_t test_vsrl_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsrl_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vsrl_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tu(maskedoff, op1, shift, vl); +vuint64m1_t test_vsrl_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vsrl_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsrl_tu(maskedoff, op1, shift, vl); +vuint64m2_t test_vsrl_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsrl_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vsrl_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tu(maskedoff, op1, shift, vl); +vuint64m2_t test_vsrl_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vsrl_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsrl_tu(maskedoff, op1, shift, vl); +vuint64m4_t test_vsrl_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsrl_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vsrl_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tu(maskedoff, op1, shift, vl); +vuint64m4_t test_vsrl_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vsrl_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsrl_tu(maskedoff, op1, shift, vl); +vuint64m8_t test_vsrl_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsrl_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vsrl_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tu(maskedoff, op1, shift, vl); +vuint64m8_t test_vsrl_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vsrl_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vsrl_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsrl_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vsrl_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vsrl_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vsrl_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vsrl_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsrl_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vsrl_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vsrl_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vsrl_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vsrl_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsrl_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vsrl_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vsrl_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vsrl_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vsrl_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsrl_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vsrl_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vsrl_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vsrl_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vsrl_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsrl_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vsrl_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vsrl_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vsrl_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vsrl_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsrl_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vsrl_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vsrl_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vsrl_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl); +vuint8m8_t test_vsrl_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsrl_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vsrl_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl); +vuint8m8_t test_vsrl_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vsrl_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vsrl_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsrl_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vsrl_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vsrl_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vsrl_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vsrl_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsrl_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vsrl_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vsrl_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vsrl_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vsrl_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsrl_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vsrl_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vsrl_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vsrl_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vsrl_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsrl_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vsrl_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vsrl_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vsrl_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vsrl_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsrl_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vsrl_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vsrl_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vsrl_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl); +vuint16m8_t test_vsrl_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsrl_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vsrl_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl); +vuint16m8_t test_vsrl_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vsrl_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vsrl_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsrl_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vsrl_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vsrl_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vsrl_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vsrl_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsrl_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vsrl_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vsrl_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vsrl_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vsrl_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsrl_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vsrl_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vsrl_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vsrl_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vsrl_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsrl_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vsrl_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vsrl_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vsrl_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl); +vuint32m8_t test_vsrl_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsrl_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vsrl_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl); +vuint32m8_t test_vsrl_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vsrl_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl); +vuint64m1_t test_vsrl_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsrl_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vsrl_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl); +vuint64m1_t test_vsrl_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vsrl_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl); +vuint64m2_t test_vsrl_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsrl_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vsrl_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl); +vuint64m2_t test_vsrl_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vsrl_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl); +vuint64m4_t test_vsrl_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsrl_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vsrl_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl); +vuint64m4_t test_vsrl_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vsrl_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl); +vuint64m8_t test_vsrl_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsrl_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vsrl_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tum(mask, maskedoff, op1, shift, vl); +vuint64m8_t test_vsrl_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vsrl_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vsrl_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsrl_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vsrl_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vsrl_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vsrl_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vsrl_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsrl_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vsrl_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vsrl_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vsrl_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vsrl_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsrl_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vsrl_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vsrl_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vsrl_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vsrl_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsrl_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vsrl_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vsrl_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vsrl_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vsrl_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsrl_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vsrl_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vsrl_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vsrl_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vsrl_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsrl_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vsrl_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vsrl_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vsrl_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint8m8_t test_vsrl_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsrl_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vsrl_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint8m8_t test_vsrl_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vsrl_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vsrl_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsrl_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vsrl_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vsrl_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vsrl_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vsrl_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsrl_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vsrl_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vsrl_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vsrl_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vsrl_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsrl_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vsrl_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vsrl_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vsrl_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vsrl_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsrl_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vsrl_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vsrl_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vsrl_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vsrl_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsrl_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vsrl_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vsrl_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vsrl_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint16m8_t test_vsrl_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsrl_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vsrl_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint16m8_t test_vsrl_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vsrl_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vsrl_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsrl_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vsrl_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vsrl_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vsrl_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vsrl_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsrl_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vsrl_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vsrl_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vsrl_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vsrl_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsrl_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vsrl_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vsrl_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vsrl_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vsrl_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsrl_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vsrl_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vsrl_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vsrl_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint32m8_t test_vsrl_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsrl_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vsrl_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint32m8_t test_vsrl_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vsrl_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint64m1_t test_vsrl_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsrl_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vsrl_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint64m1_t test_vsrl_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vsrl_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint64m2_t test_vsrl_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsrl_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vsrl_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint64m2_t test_vsrl_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vsrl_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint64m4_t test_vsrl_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsrl_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vsrl_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint64m4_t test_vsrl_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vsrl_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint64m8_t test_vsrl_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsrl_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vsrl_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_tumu(mask, maskedoff, op1, shift, vl); +vuint64m8_t test_vsrl_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vsrl_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vsrl_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsrl_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vsrl_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl); +vuint8mf8_t test_vsrl_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vsrl_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vsrl_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsrl_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vsrl_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl); +vuint8mf4_t test_vsrl_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vsrl_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vsrl_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsrl_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vsrl_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl); +vuint8mf2_t test_vsrl_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vsrl_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vsrl_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsrl_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vsrl_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl); +vuint8m1_t test_vsrl_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vsrl_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vsrl_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsrl_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vsrl_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl); +vuint8m2_t test_vsrl_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vsrl_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vsrl_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsrl_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vsrl_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl); +vuint8m4_t test_vsrl_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vsrl_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl); +vuint8m8_t test_vsrl_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsrl_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vsrl_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl); +vuint8m8_t test_vsrl_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vsrl_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vsrl_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsrl_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vsrl_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl); +vuint16mf4_t test_vsrl_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vsrl_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vsrl_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsrl_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vsrl_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl); +vuint16mf2_t test_vsrl_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vsrl_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vsrl_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsrl_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vsrl_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl); +vuint16m1_t test_vsrl_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vsrl_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vsrl_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsrl_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vsrl_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl); +vuint16m2_t test_vsrl_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vsrl_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vsrl_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsrl_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vsrl_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl); +vuint16m4_t test_vsrl_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vsrl_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl); +vuint16m8_t test_vsrl_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsrl_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vsrl_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl); +vuint16m8_t test_vsrl_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vsrl_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vsrl_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsrl_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vsrl_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl); +vuint32mf2_t test_vsrl_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vsrl_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vsrl_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsrl_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vsrl_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl); +vuint32m1_t test_vsrl_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vsrl_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vsrl_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsrl_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vsrl_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl); +vuint32m2_t test_vsrl_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vsrl_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vsrl_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsrl_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vsrl_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl); +vuint32m4_t test_vsrl_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vsrl_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl); +vuint32m8_t test_vsrl_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsrl_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vsrl_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl); +vuint32m8_t test_vsrl_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vsrl_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl); +vuint64m1_t test_vsrl_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsrl_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vsrl_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl); +vuint64m1_t test_vsrl_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vsrl_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl); +vuint64m2_t test_vsrl_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsrl_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vsrl_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl); +vuint64m2_t test_vsrl_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vsrl_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl); +vuint64m4_t test_vsrl_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsrl_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vsrl_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl); +vuint64m4_t test_vsrl_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vsrl_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl); +vuint64m8_t test_vsrl_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsrl_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vsrl_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vsrl_mu(mask, maskedoff, op1, shift, vl); +vuint64m8_t test_vsrl_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vsrl_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vsrl\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vssra.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vssra.c index 7b4409e46..84e800229 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vssra.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vssra.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vssra_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vssra_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vssra_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vssra_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vssra_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vssra_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vssra_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vssra_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vssra_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vssra_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vssra_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vssra_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vssra_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vssra_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vssra_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vssra_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vssra_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vssra_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vssra_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vssra_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vssra_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vssra_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vssra_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vssra_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vssra_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vssra_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vssra_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vssra_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vssra_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vssra_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vssra_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vssra_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vssra_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vssra_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vssra_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vssra_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vssra_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vssra_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vssra_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vssra_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vssra_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vssra_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vssra_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vssra_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vssra_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vssra_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vssra_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vssra_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vssra_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vssra_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vssra_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vssra_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vssra_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vssra_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vssra_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vssra_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vssra_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vssra_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vssra_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vssra_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vssra_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vssra_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vssra_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vssra_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vssra_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vssra_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vssra_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vssra_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vssra_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vssra_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vssra_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vssra_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vssra_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vssra_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vssra_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vssra_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vssra_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vssra_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vssra_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vssra_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vssra_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vssra_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vssra_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vssra_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vssra_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vssra_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vssra_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vssra_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vssra_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vssra_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vssra_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vssra_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vssra_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vssra_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vssra_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vssra_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vssra_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vssra_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vssra_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vssra_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vssra_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vssra_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vssra_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vssra_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vssra_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vssra_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vssra_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vssra_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vssra_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vssra_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vssra_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vssra_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vssra_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vssra_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vssra_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vssra_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vssra_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vssra_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vssra_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vssra_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vssra_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vssra_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vssra_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vssra_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vssra_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vssra_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vssra_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vssra_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vssra_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vssra_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vssra_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vssra_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vssra_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vssra_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vssra_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vssra_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vssra_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vssra_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vssra_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vssra_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vssra_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vssra_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vssra_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vssra_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vssra_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vssra_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vssra_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vssra_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vssra_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vssra_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vssra_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vssra_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vssra_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vssra_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vssra_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vssra_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vssra_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vssra_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vssra_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vssra_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vssra_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vssra_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vssra_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vssra_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vssra_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vssra_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vssra_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vssra_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vssra_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vssra_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vssra_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vssra_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vssra_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vssra_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vssra_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vssra_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vssra_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vssra_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vssra_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vssra_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vssra_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vssra_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vssra_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vssra_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vssra_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vssra_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vssra_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vssra_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vssra_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vssra_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vssra_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vssra_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vssra_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vssra_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vssra_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vssra_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vssra_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vssra_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vssra_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vssra_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vssra_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vssra_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vssra_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vssra_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vssra_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vssra_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vssra_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vssra_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vssra_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vssra_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vssra_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vssra_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vssra_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vssra_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vssra_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vssra_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vssra_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vssra_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vssra_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vssra_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vssra_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vssra_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vssra_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vssra_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vssra_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vssra_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vssra_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vssra_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vssra_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vssra_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vssra_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vssra_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vssra_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vssra_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vssra_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vssra_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vssra_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vssra_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vssra_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vssra_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vssra_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vssra_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vssra_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vssra_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vssra_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vssra_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vssra_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vssra_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vssra_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vssra_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vssra_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vssra_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vssra_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vssra_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vssra_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vssra_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vssra_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vssra_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vssra_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vssra_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vssra_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vssra_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vssra_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vssra_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vssra_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vssra_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vssra_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vssra_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vssra_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vssra_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vssra_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vssra_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vssra_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vssra_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vssra_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vssra_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vssra_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vssra_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vssra_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vssra_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vssra_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vssra_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vssra_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vssra_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vssra_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vssra_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vssra_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vssra_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vssra_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vssra_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vssra_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vssra_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vssra_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vssra_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vssra_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vssra_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vssra_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vssra_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vssra_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vssra_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vssra_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vssra_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vssra_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vssra_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vssra_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vssra_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vssra_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vssra_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vssra_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vssra_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vssra_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vssra_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vssra_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vssra_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vssra_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vssra_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vssra_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vssra_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vssra_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vssra_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vssra_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vssra_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vssra_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vssra_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vssra_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vssra_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vssra_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vssra_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vssra_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vssra_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vssra_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vssra_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vssra_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vssra_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf8_t test_vssra_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vssra_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vssra_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vssra_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vssra_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf4_t test_vssra_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vssra_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vssra_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vssra_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vssra_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8mf2_t test_vssra_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vssra_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vssra_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vssra_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vssra_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vssra_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vssra_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vssra_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vssra_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vssra_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vssra_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vssra_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vssra_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vssra_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vssra_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vssra_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vssra_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vssra_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vssra_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vssra_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vssra_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vssra_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vssra_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vssra_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vssra_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf4_t test_vssra_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vssra_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vssra_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vssra_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vssra_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16mf2_t test_vssra_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vssra_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vssra_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vssra_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vssra_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vssra_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vssra_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vssra_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vssra_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vssra_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vssra_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vssra_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vssra_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vssra_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vssra_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vssra_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vssra_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vssra_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vssra_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vssra_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vssra_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vssra_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vssra_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vssra_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vssra_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32mf2_t test_vssra_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vssra_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vssra_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vssra_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vssra_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vssra_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vssra_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vssra_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vssra_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vssra_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vssra_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vssra_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vssra_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vssra_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vssra_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vssra_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vssra_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vssra_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vssra_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vssra_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vssra_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vssra_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vssra_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vssra_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vssra_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vssra_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vssra_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vssra_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vssra_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vssra_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vssra_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vssra_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vssra_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vssra_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vssra_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vssra_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vssra_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vssra_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vssra_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vssra_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssra_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vssra_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssra_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssra\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vssrl.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vssrl.c index f51c4c8f4..984fcdcbc 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vssrl.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vssrl.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vssrl_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vssrl_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vssrl_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vssrl_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vssrl_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vssrl_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vssrl_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vssrl_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vssrl_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vssrl_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vssrl_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vssrl_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vssrl_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vssrl_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vssrl_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vssrl_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vssrl_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vssrl_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vssrl_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vssrl_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vssrl_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vssrl_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vssrl_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vssrl_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vssrl_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vssrl_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vssrl_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vssrl_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vssrl_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vssrl_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vssrl_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vssrl_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vssrl_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vssrl_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vssrl_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vssrl_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vssrl_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vssrl_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vssrl_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vssrl_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vssrl_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vssrl_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vssrl_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vssrl_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vssrl_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vssrl_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vssrl_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vssrl_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vssrl_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vssrl_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vssrl_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vssrl_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vssrl_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vssrl_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vssrl_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vssrl_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vssrl_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vssrl_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vssrl_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vssrl_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vssrl_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vssrl_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vssrl_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vssrl_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vssrl_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vssrl_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vssrl_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vssrl_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vssrl_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vssrl_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vssrl_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vssrl_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vssrl_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vssrl_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vssrl_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vssrl_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vssrl_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vssrl_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vssrl_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vssrl_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vssrl_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vssrl_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vssrl_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vssrl_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vssrl_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vssrl_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vssrl_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vssrl_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vssrl_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vssrl_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vssrl_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vssrl_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vssrl_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vssrl_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vssrl_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vssrl_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vssrl_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vssrl_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vssrl_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vssrl_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vssrl_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vssrl_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vssrl_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vssrl_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vssrl_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vssrl_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vssrl_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vssrl_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vssrl_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tu(maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vssrl_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vssrl_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vssrl_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vssrl_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vssrl_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vssrl_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vssrl_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vssrl_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vssrl_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vssrl_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vssrl_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vssrl_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vssrl_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vssrl_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vssrl_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vssrl_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vssrl_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vssrl_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vssrl_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vssrl_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vssrl_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vssrl_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vssrl_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vssrl_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vssrl_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vssrl_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vssrl_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vssrl_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vssrl_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vssrl_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vssrl_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vssrl_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vssrl_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vssrl_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vssrl_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vssrl_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vssrl_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vssrl_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vssrl_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vssrl_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vssrl_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vssrl_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vssrl_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vssrl_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vssrl_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vssrl_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vssrl_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vssrl_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vssrl_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vssrl_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vssrl_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vssrl_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vssrl_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vssrl_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vssrl_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vssrl_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vssrl_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vssrl_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vssrl_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vssrl_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vssrl_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vssrl_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vssrl_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vssrl_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vssrl_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vssrl_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vssrl_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vssrl_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vssrl_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vssrl_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vssrl_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vssrl_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vssrl_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vssrl_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vssrl_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vssrl_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vssrl_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vssrl_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vssrl_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vssrl_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vssrl_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vssrl_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vssrl_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vssrl_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vssrl_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vssrl_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vssrl_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vssrl_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vssrl_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vssrl_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vssrl_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vssrl_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vssrl_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vssrl_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vssrl_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vssrl_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vssrl_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vssrl_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vssrl_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vssrl_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vssrl_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vssrl_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vssrl_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vssrl_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vssrl_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vssrl_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vssrl_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vssrl_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vssrl_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vssrl_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tum(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vssrl_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vssrl_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vssrl_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vssrl_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vssrl_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vssrl_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vssrl_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vssrl_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vssrl_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vssrl_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vssrl_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vssrl_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vssrl_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vssrl_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vssrl_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vssrl_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vssrl_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vssrl_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vssrl_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vssrl_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vssrl_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vssrl_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vssrl_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vssrl_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vssrl_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vssrl_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vssrl_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vssrl_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vssrl_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vssrl_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vssrl_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vssrl_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vssrl_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vssrl_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vssrl_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vssrl_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vssrl_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vssrl_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vssrl_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vssrl_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vssrl_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vssrl_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vssrl_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vssrl_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vssrl_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vssrl_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vssrl_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vssrl_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vssrl_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vssrl_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vssrl_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vssrl_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vssrl_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vssrl_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vssrl_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vssrl_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vssrl_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vssrl_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vssrl_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vssrl_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vssrl_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vssrl_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vssrl_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vssrl_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vssrl_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vssrl_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vssrl_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vssrl_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vssrl_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vssrl_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vssrl_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vssrl_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vssrl_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vssrl_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vssrl_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vssrl_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vssrl_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vssrl_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vssrl_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vssrl_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vssrl_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vssrl_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vssrl_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vssrl_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vssrl_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vssrl_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vssrl_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vssrl_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vssrl_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vssrl_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vssrl_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vssrl_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vssrl_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vssrl_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vssrl_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vssrl_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vssrl_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vssrl_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vssrl_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vssrl_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vssrl_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vssrl_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vssrl_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vssrl_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vssrl_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vssrl_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vssrl_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vssrl_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vssrl_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vssrl_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_tumu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vssrl_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vssrl_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vssrl_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vssrl_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vssrl_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf8_t test_vssrl_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vssrl_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vssrl_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vssrl_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vssrl_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf4_t test_vssrl_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vssrl_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vssrl_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vssrl_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vssrl_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8mf2_t test_vssrl_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vssrl_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vssrl_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vssrl_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vssrl_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m1_t test_vssrl_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vssrl_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vssrl_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vssrl_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vssrl_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m2_t test_vssrl_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vssrl_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vssrl_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vssrl_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vssrl_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m4_t test_vssrl_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vssrl_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vssrl_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vssrl_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vssrl_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint8m8_t test_vssrl_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vssrl_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vssrl_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vssrl_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vssrl_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf4_t test_vssrl_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vssrl_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vssrl_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vssrl_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vssrl_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16mf2_t test_vssrl_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vssrl_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vssrl_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vssrl_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vssrl_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m1_t test_vssrl_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vssrl_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vssrl_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vssrl_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vssrl_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m2_t test_vssrl_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vssrl_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vssrl_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vssrl_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vssrl_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m4_t test_vssrl_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vssrl_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vssrl_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vssrl_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vssrl_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint16m8_t test_vssrl_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vssrl_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vssrl_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vssrl_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vssrl_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32mf2_t test_vssrl_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vssrl_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vssrl_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vssrl_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vssrl_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m1_t test_vssrl_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vssrl_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vssrl_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vssrl_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vssrl_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m2_t test_vssrl_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vssrl_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vssrl_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vssrl_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vssrl_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m4_t test_vssrl_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vssrl_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vssrl_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vssrl_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vssrl_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint32m8_t test_vssrl_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vssrl_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vssrl_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vssrl_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vssrl_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m1_t test_vssrl_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vssrl_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vssrl_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vssrl_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vssrl_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m2_t test_vssrl_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vssrl_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vssrl_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vssrl_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vssrl_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m4_t test_vssrl_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vssrl_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vssrl_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vssrl_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vssrl_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return __riscv_vssrl_mu(mask, maskedoff, op1, shift, __RISCV_VXRM_RNU, vl); +vuint64m8_t test_vssrl_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vssrl_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssrl\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vssub.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vssub.c index 5e87ba175..a591796d5 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vssub.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vssub.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vssub_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vssub_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vssub_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vssub_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vssub_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vssub_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vssub_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vssub_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vssub_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vssub_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vssub_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vssub_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vssub_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vssub_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vssub_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vssub_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vssub_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vssub_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vssub_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vssub_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vssub_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vssub_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vssub_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vssub_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vssub_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vssub_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vssub_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vssub_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vssub_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vssub_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vssub_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vssub_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vssub_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vssub_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vssub_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vssub_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vssub_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vssub_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vssub_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vssub_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vssub_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vssub_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vssub_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vssub_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vssub_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vssub_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vssub_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vssub_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vssub_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vssub_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vssub_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vssub_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vssub_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vssub_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vssub_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vssub_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vssub_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vssub_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vssub_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vssub_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vssub_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vssub_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vssub_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vssub_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vssub_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vssub_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vssub_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vssub_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vssub_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vssub_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vssub_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vssub_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vssub_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vssub_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vssub_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vssub_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vssub_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vssub_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vssub_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vssub_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vssub_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vssub_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vssub_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vssub_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vssub_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vssub_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vssub_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vssub_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vssub_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vssub_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vssub_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vssub_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vssub_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vssub_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vssub_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vssub_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vssub_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vssub_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vssub_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vssub_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vssub_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vssub_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vssub_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vssub_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vssub_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vssub_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vssub_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vssub_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vssub_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vssub_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vssub_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vssub_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vssub_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vssub_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vssub_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vssub_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vssub_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vssub_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vssub_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vssub_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vssub_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vssub_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vssub_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vssub_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vssub_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vssub_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vssub_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vssub_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vssub_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vssub_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vssub_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vssub_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vssub_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vssub_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vssub_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vssub_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vssub_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vssub_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vssub_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vssub_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vssub_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vssub_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vssub_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vssub_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vssub_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vssub_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vssub_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vssub_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vssub_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vssub_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vssub_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vssub_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vssub_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vssub_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vssub_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vssub_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vssub_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vssub_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vssub_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vssub_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vssub_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vssub_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vssub_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vssub_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vssub_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vssub_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vssub_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vssub_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vssub_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vssub_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vssub_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vssub_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vssub_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vssub_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vssub_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vssub_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vssub_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vssub_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vssub_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vssub_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vssub_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vssub_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vssub_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vssub_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vssub_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vssub_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vssub_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vssub_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vssub_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vssub_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vssub_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vssub_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vssub_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vssub_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vssub_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vssub_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vssub_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vssub_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vssub_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vssub_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vssub_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vssub_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vssub_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vssub_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vssub_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vssub_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vssub_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vssub_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vssub_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vssub_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vssub_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vssub_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vssub_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vssub_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vssub_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vssub_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vssub_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vssub_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vssub_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vssub_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vssub_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vssub_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vssub_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vssub_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vssub_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vssub_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vssub_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vssub_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vssub_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vssub_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vssub_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vssub_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vssub_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vssub_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vssub_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vssub_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vssub_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vssub_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vssub_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vssub_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vssub_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vssub_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vssub_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vssub_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vssub_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vssub_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vssub_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vssub_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vssub_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vssub_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vssub_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vssub_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vssub_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vssub_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vssub_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vssub_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vssub_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vssub_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vssub_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vssub_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vssub_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vssub_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vssub_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vssub_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vssub_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vssub_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vssub_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vssub_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vssub_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vssub_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vssub_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vssub_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vssub_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vssub_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vssub_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vssub_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vssub_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vssub_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vssub_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vssub_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vssub_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vssub_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vssub_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vssub_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vssub_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vssub_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vssub_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vssub_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vssub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vssub_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vssub_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vssub_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vssub_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vssub_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vssub_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vssub_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vssub_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vssub_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vssub_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vssub_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vssub_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vssub_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vssub_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vssub_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vssub_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vssub_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vssub_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vssub_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vssub_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vssub_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vssub_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vssub_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vssub_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vssub_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vssub_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vssub_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vssub_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vssub_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vssub_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vssub_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vssub_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vssub_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vssub_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vssub_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vssub_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vssub_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vssub_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vssub_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vssub_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vssub_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vssub_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vssub_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vssub_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vssub_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vssub_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vssub_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vssub_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vssub_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vssub_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vssub_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vssub_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vssub_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vssub_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vssub_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vssub_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vssub_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vssub_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vssub_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vssub_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vssub_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vssub_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vssub_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vssub_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vssub_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vssub_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vssub_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vssub_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vssub_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vssub_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vssub_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vssub_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vssub_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vssub_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vssub_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vssub_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vssub_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vssub_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vssub_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vssub_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vssub_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vssub_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vssub_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vssub_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vssub_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vssub_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vssub_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vssub_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vssub_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vssub_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vssub_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vssub_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vssub_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vssub_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vssub_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vssub_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vssub_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vssub_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vssub_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vssub_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vssub_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vssub_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vssub_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vssub_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vssub_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vssub_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vssub_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vssub_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vssub_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vssub_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vssub_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vssub_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vssub_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vssub_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vssub_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vssub_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vssub_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vssub_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vssub_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vssub_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vssub_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vssub_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vssub_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vssub_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vssub_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vssub_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vssub_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vssub_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vssub_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vssub_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vssub_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vssub_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vssub_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vssub_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vssub_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vssub_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vssub_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vssub_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vssub_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vssub_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vssub_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vssub_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vssub_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vssub_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vssub_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vssub_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vssub_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vssub_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vssub_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vssub_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vssub_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vssub_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vssub_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vssub_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vssub_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssub\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vssubu.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vssubu.c index 0b921ada5..27a37c6fe 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vssubu.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vssubu.c @@ -6,708 +6,708 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint8mf8_t test_vssubu_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vssubu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vssubu_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vssubu_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vssubu_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vssubu_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vssubu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vssubu_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vssubu_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vssubu_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vssubu_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vssubu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vssubu_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vssubu_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vssubu_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vssubu_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vssubu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vssubu_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vssubu_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vssubu_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vssubu_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vssubu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vssubu_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vssubu_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vssubu_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vssubu_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vssubu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vssubu_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vssubu_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vssubu_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vssubu_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vssubu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vssubu_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vssubu_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vssubu_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vssubu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vssubu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vssubu_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vssubu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vssubu_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vssubu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vssubu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vssubu_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vssubu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vssubu_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vssubu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vssubu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vssubu_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vssubu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vssubu_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vssubu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vssubu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vssubu_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vssubu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vssubu_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vssubu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vssubu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vssubu_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vssubu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vssubu_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vssubu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vssubu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vssubu_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vssubu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vssubu_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vssubu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vssubu_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vssubu_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vssubu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vssubu_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vssubu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vssubu_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vssubu_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vssubu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vssubu_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vssubu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vssubu_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vssubu_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vssubu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vssubu_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vssubu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vssubu_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vssubu_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vssubu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vssubu_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vssubu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vssubu_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vssubu_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vssubu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vssubu_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vssubu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vssubu_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vssubu_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vssubu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vssubu_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vssubu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vssubu_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vssubu_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vssubu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vssubu_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vssubu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vssubu_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vssubu_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vssubu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vssubu_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vssubu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vssubu_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vssubu_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vssubu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vssubu_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vssubu_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vssubu_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vssubu_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vssubu_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vssubu_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vssubu_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vssubu_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vssubu_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vssubu_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vssubu_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vssubu_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vssubu_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vssubu_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vssubu_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vssubu_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vssubu_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vssubu_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vssubu_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vssubu_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vssubu_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vssubu_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vssubu_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vssubu_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vssubu_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vssubu_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vssubu_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vssubu_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vssubu_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vssubu_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vssubu_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vssubu_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vssubu_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vssubu_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vssubu_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vssubu_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vssubu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vssubu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vssubu_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vssubu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vssubu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vssubu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vssubu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vssubu_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vssubu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vssubu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vssubu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vssubu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vssubu_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vssubu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vssubu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vssubu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vssubu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vssubu_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vssubu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vssubu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vssubu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vssubu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vssubu_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vssubu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vssubu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vssubu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vssubu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vssubu_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vssubu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vssubu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vssubu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vssubu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vssubu_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vssubu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vssubu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vssubu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vssubu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vssubu_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vssubu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vssubu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vssubu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vssubu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vssubu_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vssubu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vssubu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vssubu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vssubu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vssubu_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vssubu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vssubu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vssubu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vssubu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vssubu_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vssubu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vssubu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vssubu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vssubu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vssubu_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vssubu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vssubu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vssubu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vssubu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vssubu_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vssubu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vssubu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vssubu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vssubu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vssubu_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vssubu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vssubu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vssubu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vssubu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vssubu_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vssubu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vssubu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vssubu_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vssubu_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vssubu_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vssubu_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vssubu_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vssubu_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vssubu_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vssubu_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vssubu_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vssubu_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vssubu_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vssubu_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vssubu_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vssubu_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vssubu_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vssubu_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vssubu_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vssubu_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vssubu_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vssubu_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vssubu_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vssubu_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vssubu_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vssubu_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vssubu_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vssubu_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vssubu_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vssubu_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vssubu_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vssubu_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vssubu_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vssubu_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vssubu_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vssubu_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vssubu_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vssubu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vssubu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vssubu_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vssubu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vssubu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vssubu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vssubu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vssubu_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vssubu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vssubu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vssubu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vssubu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vssubu_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vssubu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vssubu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vssubu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vssubu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vssubu_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vssubu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vssubu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vssubu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vssubu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vssubu_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vssubu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vssubu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vssubu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vssubu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vssubu_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vssubu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vssubu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vssubu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vssubu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vssubu_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vssubu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vssubu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vssubu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vssubu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vssubu_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vssubu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vssubu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vssubu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vssubu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vssubu_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vssubu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vssubu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vssubu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vssubu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vssubu_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vssubu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vssubu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vssubu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vssubu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vssubu_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vssubu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vssubu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vssubu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vssubu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vssubu_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vssubu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vssubu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vssubu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vssubu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vssubu_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vssubu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vssubu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vssubu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vssubu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vssubu_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vssubu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vssubu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vssubu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vssubu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vssubu_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vssubu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vssubu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vssubu_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vssubu_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vssubu_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vssubu_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vssubu_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vssubu_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vssubu_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vssubu_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vssubu_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vssubu_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vssubu_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vssubu_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vssubu_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vssubu_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vssubu_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vssubu_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vssubu_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vssubu_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vssubu_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vssubu_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vssubu_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vssubu_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vssubu_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vssubu_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vssubu_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vssubu_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vssubu_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vssubu_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vssubu_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vssubu_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vssubu_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vssubu_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vssubu_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vssubu_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vssubu_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vssubu_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vssubu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vssubu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vssubu_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vssubu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vssubu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vssubu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vssubu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vssubu_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vssubu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vssubu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vssubu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vssubu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vssubu_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vssubu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vssubu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vssubu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vssubu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vssubu_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vssubu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vssubu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vssubu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vssubu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vssubu_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vssubu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vssubu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vssubu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vssubu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vssubu_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vssubu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vssubu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vssubu_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vssubu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vssubu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vssubu_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vssubu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vssubu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vssubu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vssubu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vssubu_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vssubu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vssubu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vssubu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vssubu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vssubu_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vssubu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vssubu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vssubu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vssubu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vssubu_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vssubu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vssubu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vssubu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vssubu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vssubu_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vssubu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vssubu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vssubu_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vssubu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vssubu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vssubu_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vssubu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vssubu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vssubu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vssubu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vssubu_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vssubu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vssubu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vssubu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vssubu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vssubu_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vssubu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vssubu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vssubu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vssubu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vssubu_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vssubu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vssubu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vssubu_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vssubu\.[ivxfswum.]+\s+} 176 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vsub.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vsub.c index db78ff2d8..0d18b3175 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vsub.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vsub.c @@ -6,1412 +6,1412 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vsub_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vsub_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vsub_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vsub_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vsub_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vsub_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vsub_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vsub_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vsub_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vsub_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vsub_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vsub_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vsub_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vsub_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vsub_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vsub_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vsub_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vsub_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vsub_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vsub_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vsub_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vsub_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vsub_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vsub_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vsub_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vsub_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vsub_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vsub_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vsub_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vsub_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vsub_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vsub_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vsub_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vsub_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vsub_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vsub_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vsub_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vsub_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vsub_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vsub_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vsub_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vsub_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vsub_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vsub_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vsub_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vsub_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vsub_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vsub_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vsub_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vsub_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vsub_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vsub_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vsub_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vsub_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vsub_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vsub_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vsub_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vsub_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vsub_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vsub_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vsub_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vsub_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vsub_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vsub_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vsub_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vsub_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vsub_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vsub_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vsub_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vsub_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vsub_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vsub_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vsub_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vsub_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vsub_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vsub_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vsub_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vsub_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vsub_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vsub_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vsub_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vsub_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vsub_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vsub_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vsub_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vsub_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vsub_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vsub_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vsub_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vsub_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vsub_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vsub_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vsub_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vsub_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vsub_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vsub_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vsub_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vsub_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vsub_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vsub_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vsub_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vsub_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vsub_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vsub_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vsub_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vsub_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vsub_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vsub_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vsub_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vsub_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vsub_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vsub_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vsub_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vsub_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vsub_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vsub_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vsub_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vsub_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vsub_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vsub_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vsub_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vsub_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vsub_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vsub_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vsub_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vsub_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vsub_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vsub_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vsub_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vsub_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vsub_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vsub_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vsub_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vsub_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vsub_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vsub_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vsub_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vsub_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vsub_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vsub_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vsub_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vsub_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vsub_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vsub_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vsub_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vsub_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vsub_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vsub_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vsub_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vsub_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vsub_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vsub_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vsub_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vsub_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vsub_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vsub_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vsub_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vsub_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vsub_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vsub_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vsub_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vsub_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vsub_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vsub_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vsub_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vsub_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vsub_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vsub_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vsub_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vsub_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vsub_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vsub_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vsub_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vsub_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vsub_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vsub_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vsub_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vsub_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vsub_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vsub_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vsub_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vsub_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vsub_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vsub_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vsub_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vsub_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vsub_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vsub_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vsub_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vsub_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vsub_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vsub_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vsub_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vsub_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vsub_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vsub_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vsub_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vsub_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vsub_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vsub_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vsub_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vsub_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vsub_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vsub_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vsub_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vsub_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vsub_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vsub_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vsub_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vsub_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vsub_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vsub_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vsub_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vsub_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vsub_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vsub_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vsub_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vsub_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vsub_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vsub_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vsub_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vsub_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vsub_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vsub_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vsub_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vsub_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vsub_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vsub_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vsub_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vsub_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vsub_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vsub_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vsub_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vsub_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vsub_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vsub_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vsub_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vsub_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vsub_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vsub_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vsub_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vsub_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vsub_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vsub_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vsub_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vsub_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vsub_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vsub_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vsub_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vsub_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vsub_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vsub_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vsub_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vsub_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vsub_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vsub_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vsub_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vsub_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vsub_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vsub_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vsub_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vsub_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vsub_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vsub_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vsub_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vsub_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vsub_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vsub_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vsub_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vsub_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vsub_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vsub_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vsub_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vsub_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vsub_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vsub_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vsub_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vsub_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vsub_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vsub_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vsub_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vsub_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vsub_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vsub_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vsub_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vsub_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vsub_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vsub_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vsub_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vsub_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vsub_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vsub_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vsub_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vsub_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vsub_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vsub_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vsub_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vsub_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vsub_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vsub_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vsub_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vsub_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vsub_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vsub_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vsub_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vsub_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vsub_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vsub_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vsub_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vsub_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vsub_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vsub_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vsub_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vsub_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vsub_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vsub_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vsub_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vsub_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vsub_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vsub_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vsub_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vsub_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vsub_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vsub_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vsub_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vsub_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vsub_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vsub_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vsub_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vsub_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vsub_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vsub_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vsub_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vsub_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vsub_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vsub_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vsub_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vsub_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vsub_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vsub_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vsub_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vsub_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vsub_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vsub_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vsub_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vsub_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vsub_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vsub_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vsub_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vsub_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vsub_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vsub_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vsub_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vsub_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vsub_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vsub_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vsub_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vsub_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vsub_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vsub_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vsub_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vsub_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vsub_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vsub_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vsub_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vsub_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vsub_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vsub_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vsub_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vsub_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vsub_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vsub_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vsub_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vsub_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vsub_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vsub_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vsub_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vsub_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vsub_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vsub_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vsub_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vsub_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vsub_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vsub_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vsub_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vsub_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vsub_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vsub_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vsub_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vsub_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vsub_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vsub_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vsub_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vsub_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vsub_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vsub_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vsub_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vsub_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vsub_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vsub_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vsub_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vsub_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vsub_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vsub_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vsub_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vsub_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vsub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vsub_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vsub_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vsub_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vsub_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vsub_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vsub_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vsub_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vsub_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vsub_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vsub_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vsub_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vsub_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vsub_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vsub_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vsub_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vsub_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vsub_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vsub_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vsub_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vsub_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vsub_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vsub_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vsub_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vsub_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vsub_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vsub_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vsub_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vsub_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vsub_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vsub_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vsub_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vsub_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vsub_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vsub_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vsub_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vsub_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vsub_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vsub_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vsub_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vsub_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vsub_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vsub_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vsub_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vsub_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vsub_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vsub_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vsub_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vsub_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vsub_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vsub_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vsub_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vsub_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vsub_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vsub_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vsub_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vsub_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vsub_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vsub_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vsub_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vsub_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vsub_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vsub_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vsub_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vsub_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vsub_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vsub_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vsub_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vsub_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vsub_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vsub_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vsub_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vsub_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vsub_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vsub_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vsub_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vsub_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vsub_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vsub_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vsub_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vsub_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vsub_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vsub_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vsub_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vsub_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vsub_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vsub_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vsub_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vsub_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vsub_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vsub_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vsub_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vsub_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vsub_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vsub_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vsub_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vsub_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vsub_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vsub_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vsub_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vsub_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vsub_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vsub_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vsub_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vsub_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vsub_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vsub_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vsub_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vsub_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vsub_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vsub_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vsub_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vsub_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vsub_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vsub_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vsub_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vsub_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vsub_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vsub_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vsub_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vsub_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vsub_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vsub_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vsub_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vsub_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vsub_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vsub_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vsub_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vsub_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vsub_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vsub_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vsub_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vsub_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vsub_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vsub_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vsub_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vsub_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vsub_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vsub_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vsub_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vsub_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vsub_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vsub_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vsub_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vsub_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vsub_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vsub_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vsub_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vsub_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vsub_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vsub_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vsub_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vsub_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vsub_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vsub_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vsub_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vsub_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vsub_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vsub_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vsub_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vsub_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vsub_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vsub_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vsub_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vsub_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vsub_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vsub_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vsub_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vsub_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vsub_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vsub_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vsub_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vsub_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vsub_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vsub_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vsub_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vsub_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vsub_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vsub_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vsub_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vsub_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vsub_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vsub_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vsub_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vsub_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vsub_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vsub_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vsub_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vsub_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vsub_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vsub_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vsub_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vsub_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vsub_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vsub_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vsub_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vsub_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vsub_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vsub_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vsub_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vsub_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vsub_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vsub_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vsub_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vsub_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vsub_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vsub_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vsub_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vsub_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vsub_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vsub_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vsub_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vsub_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vsub_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vsub_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vsub_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vsub_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vsub_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vsub_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vsub_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vsub_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vsub_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vsub_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vsub_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vsub_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vsub_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vsub_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vsub_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vsub_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vsub_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vsub_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vsub_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vsub_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vsub_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vsub_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vsub_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vsub_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vsub_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vsub_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vsub_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vsub_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vsub_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vsub_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vsub_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vsub_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vsub_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vsub_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vsub_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vsub_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vsub_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vsub_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vsub_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vsub_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vsub_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vsub_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vsub_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vsub_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vsub_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vsub_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vsub_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vsub_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vsub_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vsub_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vsub_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vsub_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vsub_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vsub_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vsub_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vsub_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vsub_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vsub_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vsub_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vsub_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vsub_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vsub_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vsub_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vsub_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vsub_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vsub_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vsub_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vsub_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vsub_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vsub_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vsub_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vsub_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vsub_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vsub_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vsub_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vsub_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vsub_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vsub_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vsub_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vsub_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vsub_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vsub_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vsub_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vsub_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vsub_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vsub_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vsub_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vsub\.[ivxfswum.]+\s+} 352 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vwadd.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vwadd.c index 470f3514b..ab9eeddd5 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vwadd.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vwadd.c @@ -6,964 +6,964 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint16mf4_t test_vwadd_vv_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwadd_vv_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vwadd_vv_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwadd_vv_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vwadd_vx_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vwadd_vx_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vwadd_wv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwadd_wv_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vwadd_wv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwadd_wv_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vwadd_wx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vwadd_wx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vwadd_vv_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwadd_vv_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vwadd_vv_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwadd_vv_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vwadd_vx_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vwadd_vx_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vwadd_wv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwadd_wv_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vwadd_wv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwadd_wv_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vwadd_wx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vwadd_wx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vwadd_vv_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwadd_vv_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vwadd_vv_i16m1_tu(vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwadd_vv_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vwadd_vx_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vwadd_vx_i16m1_tu(vint16m1_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vwadd_wv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwadd_wv_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vwadd_wv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwadd_wv_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vwadd_wx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vwadd_wx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vwadd_vv_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwadd_vv_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vwadd_vv_i16m2_tu(vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwadd_vv_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vwadd_vx_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vwadd_vx_i16m2_tu(vint16m2_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vwadd_wv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwadd_wv_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vwadd_wv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwadd_wv_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vwadd_wx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vwadd_wx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vwadd_vv_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwadd_vv_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vwadd_vv_i16m4_tu(vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwadd_vv_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vwadd_vx_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vwadd_vx_i16m4_tu(vint16m4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vwadd_wv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwadd_wv_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vwadd_wv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwadd_wv_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vwadd_wx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vwadd_wx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vwadd_vv_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwadd_vv_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vwadd_vv_i16m8_tu(vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwadd_vv_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vwadd_vx_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vwadd_vx_i16m8_tu(vint16m8_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vwadd_wv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwadd_wv_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vwadd_wv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwadd_wv_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vwadd_wx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vwadd_wx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vwadd_vv_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwadd_vv_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vwadd_vv_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwadd_vv_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vwadd_vx_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vwadd_vx_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vwadd_wv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwadd_wv_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vwadd_wv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwadd_wv_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vwadd_wx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vwadd_wx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vwadd_vv_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwadd_vv_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vwadd_vv_i32m1_tu(vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwadd_vv_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vwadd_vx_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vwadd_vx_i32m1_tu(vint32m1_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vwadd_wv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwadd_wv_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vwadd_wv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwadd_wv_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vwadd_wx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vwadd_wx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vwadd_vv_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwadd_vv_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vwadd_vv_i32m2_tu(vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwadd_vv_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vwadd_vx_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vwadd_vx_i32m2_tu(vint32m2_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vwadd_wv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwadd_wv_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vwadd_wv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwadd_wv_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vwadd_wx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vwadd_wx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vwadd_vv_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwadd_vv_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vwadd_vv_i32m4_tu(vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwadd_vv_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vwadd_vx_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vwadd_vx_i32m4_tu(vint32m4_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vwadd_wv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwadd_wv_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vwadd_wv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwadd_wv_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vwadd_wx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vwadd_wx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vwadd_vv_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwadd_vv_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vwadd_vv_i32m8_tu(vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwadd_vv_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vwadd_vx_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vwadd_vx_i32m8_tu(vint32m8_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vwadd_wv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwadd_wv_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vwadd_wv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwadd_wv_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vwadd_wx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vwadd_wx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vwadd_vv_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwadd_vv_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vwadd_vv_i64m1_tu(vint64m1_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwadd_vv_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vwadd_vx_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vwadd_vx_i64m1_tu(vint64m1_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vwadd_wv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwadd_wv_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vwadd_wv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwadd_wv_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vwadd_wx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vwadd_wx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vwadd_vv_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwadd_vv_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vwadd_vv_i64m2_tu(vint64m2_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwadd_vv_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vwadd_vx_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vwadd_vx_i64m2_tu(vint64m2_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vwadd_wv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwadd_wv_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vwadd_wv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwadd_wv_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vwadd_wx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vwadd_wx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vwadd_vv_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwadd_vv_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vwadd_vv_i64m4_tu(vint64m4_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwadd_vv_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vwadd_vx_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vwadd_vx_i64m4_tu(vint64m4_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vwadd_wv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwadd_wv_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vwadd_wv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwadd_wv_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vwadd_wx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vwadd_wx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vwadd_vv_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwadd_vv_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vwadd_vv_i64m8_tu(vint64m8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwadd_vv_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vwadd_vx_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vwadd_vx_i64m8_tu(vint64m8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vwadd_wv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwadd_wv_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vwadd_wv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwadd_wv_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vwadd_wx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vwadd_wx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vwadd_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwadd_vv_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwadd_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwadd_vv_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwadd_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwadd_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vwadd_wv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwadd_wv_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwadd_wv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwadd_wv_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwadd_wx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwadd_wx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwadd_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwadd_vv_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwadd_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwadd_vv_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwadd_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwadd_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwadd_wv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwadd_wv_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwadd_wv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwadd_wv_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwadd_wx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwadd_wx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwadd_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwadd_vv_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwadd_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwadd_vv_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwadd_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwadd_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwadd_wv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwadd_wv_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwadd_wv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwadd_wv_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwadd_wx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwadd_wx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwadd_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwadd_vv_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwadd_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwadd_vv_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwadd_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwadd_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwadd_wv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwadd_wv_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwadd_wv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwadd_wv_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwadd_wx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwadd_wx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwadd_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwadd_vv_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwadd_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwadd_vv_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwadd_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwadd_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwadd_wv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwadd_wv_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwadd_wv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwadd_wv_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwadd_wx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwadd_wx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwadd_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwadd_vv_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwadd_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwadd_vv_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwadd_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwadd_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwadd_wv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwadd_wv_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwadd_wv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwadd_wv_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwadd_wx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwadd_wx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwadd_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwadd_vv_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwadd_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwadd_vv_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwadd_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwadd_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwadd_wv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwadd_wv_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwadd_wv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwadd_wv_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwadd_wx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwadd_wx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwadd_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwadd_vv_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwadd_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwadd_vv_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwadd_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwadd_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwadd_wv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwadd_wv_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwadd_wv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwadd_wv_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwadd_wx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwadd_wx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwadd_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwadd_vv_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwadd_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwadd_vv_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwadd_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwadd_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwadd_wv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwadd_wv_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwadd_wv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwadd_wv_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwadd_wx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwadd_wx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwadd_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwadd_vv_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwadd_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwadd_vv_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwadd_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwadd_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwadd_wv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwadd_wv_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwadd_wv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwadd_wv_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwadd_wx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwadd_wx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwadd_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwadd_vv_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwadd_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwadd_vv_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwadd_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwadd_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwadd_wv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwadd_wv_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwadd_wv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwadd_wv_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwadd_wx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwadd_wx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwadd_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwadd_vv_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwadd_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwadd_vv_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwadd_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwadd_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwadd_wv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwadd_wv_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwadd_wv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwadd_wv_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwadd_wx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwadd_wx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwadd_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwadd_vv_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwadd_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwadd_vv_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwadd_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwadd_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwadd_wv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwadd_wv_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwadd_wv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwadd_wv_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwadd_wx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwadd_wx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwadd_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwadd_vv_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwadd_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwadd_vv_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwadd_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwadd_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwadd_wv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwadd_wv_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwadd_wv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwadd_wv_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwadd_wx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwadd_wx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwadd_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwadd_vv_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwadd_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwadd_vv_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwadd_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwadd_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwadd_wv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwadd_wv_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwadd_wv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwadd_wv_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwadd_wx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwadd_wx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vwadd_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwadd_vv_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwadd_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwadd_vv_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwadd_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwadd_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vwadd_wv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwadd_wv_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwadd_wv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwadd_wv_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwadd_wx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwadd_wx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwadd_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwadd_vv_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwadd_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwadd_vv_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwadd_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwadd_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwadd_wv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwadd_wv_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwadd_wv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwadd_wv_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwadd_wx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwadd_wx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwadd_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwadd_vv_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwadd_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwadd_vv_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwadd_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwadd_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwadd_wv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwadd_wv_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwadd_wv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwadd_wv_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwadd_wx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwadd_wx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwadd_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwadd_vv_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwadd_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwadd_vv_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwadd_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwadd_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwadd_wv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwadd_wv_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwadd_wv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwadd_wv_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwadd_wx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwadd_wx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwadd_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwadd_vv_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwadd_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwadd_vv_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwadd_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwadd_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwadd_wv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwadd_wv_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwadd_wv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwadd_wv_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwadd_wx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwadd_wx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwadd_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwadd_vv_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwadd_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwadd_vv_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwadd_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwadd_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwadd_wv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwadd_wv_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwadd_wv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwadd_wv_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwadd_wx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwadd_wx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwadd_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwadd_vv_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwadd_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwadd_vv_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwadd_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwadd_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwadd_wv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwadd_wv_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwadd_wv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwadd_wv_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwadd_wx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwadd_wx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwadd_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwadd_vv_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwadd_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwadd_vv_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwadd_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwadd_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwadd_wv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwadd_wv_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwadd_wv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwadd_wv_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwadd_wx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwadd_wx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwadd_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwadd_vv_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwadd_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwadd_vv_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwadd_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwadd_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwadd_wv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwadd_wv_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwadd_wv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwadd_wv_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwadd_wx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwadd_wx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwadd_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwadd_vv_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwadd_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwadd_vv_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwadd_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwadd_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwadd_wv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwadd_wv_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwadd_wv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwadd_wv_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwadd_wx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwadd_wx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwadd_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwadd_vv_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwadd_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwadd_vv_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwadd_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwadd_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwadd_wv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwadd_wv_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwadd_wv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwadd_wv_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwadd_wx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwadd_wx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwadd_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwadd_vv_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwadd_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwadd_vv_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwadd_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwadd_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwadd_wv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwadd_wv_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwadd_wv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwadd_wv_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwadd_wx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwadd_wx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwadd_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwadd_vv_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwadd_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwadd_vv_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwadd_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwadd_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwadd_wv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwadd_wv_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwadd_wv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwadd_wv_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwadd_wx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwadd_wx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwadd_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwadd_vv_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwadd_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwadd_vv_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwadd_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwadd_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwadd_wv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwadd_wv_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwadd_wv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwadd_wv_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwadd_wx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwadd_wx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwadd_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwadd_vv_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwadd_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwadd_vv_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwadd_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwadd_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwadd_wv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwadd_wv_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwadd_wv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwadd_wv_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwadd_wx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwadd_wx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vwadd_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwadd_vv_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwadd_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwadd_vv_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwadd_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwadd_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vwadd_wv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwadd_wv_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwadd_wv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwadd_wv_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwadd_wx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwadd_wx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwadd_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwadd_vv_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwadd_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwadd_vv_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwadd_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwadd_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwadd_wv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwadd_wv_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwadd_wv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwadd_wv_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwadd_wx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwadd_wx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwadd_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwadd_vv_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwadd_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwadd_vv_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwadd_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwadd_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwadd_wv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwadd_wv_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwadd_wv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwadd_wv_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwadd_wx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwadd_wx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwadd_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwadd_vv_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwadd_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwadd_vv_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwadd_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwadd_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwadd_wv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwadd_wv_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwadd_wv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwadd_wv_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwadd_wx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwadd_wx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwadd_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwadd_vv_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwadd_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwadd_vv_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwadd_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwadd_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwadd_wv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwadd_wv_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwadd_wv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwadd_wv_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwadd_wx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwadd_wx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwadd_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwadd_vv_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwadd_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwadd_vv_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwadd_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_vx_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwadd_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_vx_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwadd_wv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwadd_wv_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwadd_wv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwadd_wv_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwadd_wx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl) { - return __riscv_vwadd_wx_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwadd_wx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwadd_wx_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwadd_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwadd_vv_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwadd_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwadd_vv_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwadd_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwadd_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwadd_wv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwadd_wv_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwadd_wv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwadd_wv_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwadd_wx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwadd_wx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwadd_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwadd_vv_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwadd_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwadd_vv_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwadd_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwadd_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwadd_wv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwadd_wv_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwadd_wv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwadd_wv_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwadd_wx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwadd_wx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwadd_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwadd_vv_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwadd_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwadd_vv_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwadd_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwadd_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwadd_wv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwadd_wv_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwadd_wv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwadd_wv_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwadd_wx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwadd_wx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwadd_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwadd_vv_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwadd_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwadd_vv_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwadd_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwadd_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwadd_wv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwadd_wv_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwadd_wv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwadd_wv_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwadd_wx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwadd_wx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwadd_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwadd_vv_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwadd_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwadd_vv_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwadd_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_vx_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwadd_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_vx_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwadd_wv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwadd_wv_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwadd_wv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwadd_wv_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwadd_wx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl) { - return __riscv_vwadd_wx_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwadd_wx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwadd_wx_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwadd_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwadd_vv_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwadd_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwadd_vv_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwadd_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwadd_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwadd_wv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwadd_wv_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwadd_wv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwadd_wv_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwadd_wx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwadd_wx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwadd_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwadd_vv_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwadd_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwadd_vv_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwadd_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwadd_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwadd_wv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwadd_wv_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwadd_wv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwadd_wv_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwadd_wx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwadd_wx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwadd_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwadd_vv_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwadd_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwadd_vv_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwadd_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwadd_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwadd_wv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwadd_wv_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwadd_wv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwadd_wv_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwadd_wx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwadd_wx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwadd_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwadd_vv_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwadd_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwadd_vv_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwadd_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_vx_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwadd_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_vx_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwadd_wv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwadd_wv_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwadd_wv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwadd_wv_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwadd_wx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl) { - return __riscv_vwadd_wx_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwadd_wx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwadd_wx_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+v[w]?add\.[ivxfswum.]+\s+} 240 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vwaddu.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vwaddu.c index 7f9fbec8d..a5c36dd8e 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vwaddu.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vwaddu.c @@ -6,964 +6,964 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint16mf4_t test_vwaddu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwaddu_vv_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vwaddu_vv_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwaddu_vv_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vwaddu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vwaddu_vx_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vwaddu_wv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwaddu_wv_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vwaddu_wv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwaddu_wv_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vwaddu_wx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vwaddu_wx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vwaddu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwaddu_vv_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vwaddu_vv_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vwaddu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vwaddu_vx_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vwaddu_wv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwaddu_wv_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vwaddu_wv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vwaddu_wx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vwaddu_wx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vwaddu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwaddu_vv_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vwaddu_vv_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vwaddu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vwaddu_vx_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vwaddu_wv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwaddu_wv_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vwaddu_wv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vwaddu_wx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vwaddu_wx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vwaddu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwaddu_vv_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vwaddu_vv_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwaddu_vv_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vwaddu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vwaddu_vx_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vwaddu_wv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwaddu_wv_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vwaddu_wv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwaddu_wv_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vwaddu_wx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vwaddu_wx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vwaddu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwaddu_vv_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vwaddu_vv_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vwaddu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vwaddu_vx_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vwaddu_wv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwaddu_wv_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vwaddu_wv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vwaddu_wx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vwaddu_wx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vwaddu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwaddu_vv_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vwaddu_vv_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vwaddu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vwaddu_vx_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vwaddu_wv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwaddu_wv_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vwaddu_wv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vwaddu_wx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vwaddu_wx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vwaddu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwaddu_vv_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vwaddu_vv_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vwaddu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vwaddu_vx_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vwaddu_wv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwaddu_wv_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vwaddu_wv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vwaddu_wx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vwaddu_wx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vwaddu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwaddu_vv_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vwaddu_vv_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vwaddu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vwaddu_vx_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vwaddu_wv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwaddu_wv_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vwaddu_wv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vwaddu_wx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vwaddu_wx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vwaddu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwaddu_vv_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vwaddu_vv_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwaddu_vv_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vwaddu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vwaddu_vx_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vwaddu_wv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwaddu_wv_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vwaddu_wv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwaddu_wv_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vwaddu_wx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vwaddu_wx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vwaddu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwaddu_vv_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vwaddu_vv_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vwaddu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vwaddu_vx_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vwaddu_wv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwaddu_wv_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vwaddu_wv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vwaddu_wx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vwaddu_wx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vwaddu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwaddu_vv_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vwaddu_vv_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vwaddu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vwaddu_vx_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vwaddu_wv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwaddu_wv_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vwaddu_wv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vwaddu_wx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vwaddu_wx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vwaddu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwaddu_vv_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vwaddu_vv_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vwaddu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vwaddu_vx_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vwaddu_wv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwaddu_wv_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vwaddu_wv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vwaddu_wx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vwaddu_wx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vwaddu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwaddu_vv_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vwaddu_vv_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwaddu_vv_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vwaddu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vwaddu_vx_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vwaddu_wv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwaddu_wv_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vwaddu_wv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwaddu_wv_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vwaddu_wx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vwaddu_wx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vwaddu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwaddu_vv_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vwaddu_vv_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vwaddu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vwaddu_vx_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vwaddu_wv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwaddu_wv_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vwaddu_wv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vwaddu_wx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vwaddu_wx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vwaddu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwaddu_vv_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vwaddu_vv_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vwaddu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vwaddu_vx_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vwaddu_wv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwaddu_wv_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vwaddu_wv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vwaddu_wx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vwaddu_wx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vwaddu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwaddu_vv_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwaddu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwaddu_vv_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwaddu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwaddu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vwaddu_wv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwaddu_wv_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwaddu_wv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwaddu_wv_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwaddu_wx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwaddu_wx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwaddu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwaddu_vv_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwaddu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwaddu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwaddu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwaddu_wv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwaddu_wv_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwaddu_wv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwaddu_wx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwaddu_wx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwaddu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwaddu_vv_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwaddu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwaddu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwaddu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwaddu_wv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwaddu_wv_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwaddu_wv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwaddu_wx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwaddu_wx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwaddu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwaddu_vv_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwaddu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwaddu_vv_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwaddu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwaddu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwaddu_wv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwaddu_wv_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwaddu_wv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwaddu_wv_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwaddu_wx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwaddu_wx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwaddu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwaddu_vv_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwaddu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwaddu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwaddu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwaddu_wv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwaddu_wv_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwaddu_wv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwaddu_wx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwaddu_wx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwaddu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwaddu_vv_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwaddu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwaddu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwaddu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwaddu_wv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwaddu_wv_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwaddu_wv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwaddu_wx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwaddu_wx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwaddu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwaddu_vv_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwaddu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwaddu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwaddu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwaddu_wv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwaddu_wv_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwaddu_wv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwaddu_wx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwaddu_wx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwaddu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwaddu_vv_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwaddu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwaddu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwaddu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwaddu_wv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwaddu_wv_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwaddu_wv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwaddu_wx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwaddu_wx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwaddu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwaddu_vv_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwaddu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwaddu_vv_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwaddu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwaddu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwaddu_wv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwaddu_wv_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwaddu_wv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwaddu_wv_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwaddu_wx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwaddu_wx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwaddu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwaddu_vv_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwaddu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwaddu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwaddu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwaddu_wv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwaddu_wv_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwaddu_wv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwaddu_wx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwaddu_wx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwaddu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwaddu_vv_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwaddu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwaddu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwaddu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwaddu_wv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwaddu_wv_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwaddu_wv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwaddu_wx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwaddu_wx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwaddu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwaddu_vv_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwaddu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwaddu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwaddu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwaddu_wv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwaddu_wv_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwaddu_wv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwaddu_wx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwaddu_wx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwaddu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwaddu_vv_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwaddu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwaddu_vv_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwaddu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwaddu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwaddu_wv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwaddu_wv_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwaddu_wv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwaddu_wv_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwaddu_wx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwaddu_wx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwaddu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwaddu_vv_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwaddu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwaddu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwaddu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwaddu_wv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwaddu_wv_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwaddu_wv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwaddu_wx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwaddu_wx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwaddu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwaddu_vv_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwaddu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwaddu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwaddu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwaddu_wv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwaddu_wv_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwaddu_wv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwaddu_wx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwaddu_wx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vwaddu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwaddu_vv_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwaddu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwaddu_vv_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwaddu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwaddu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vwaddu_wv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwaddu_wv_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwaddu_wv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwaddu_wv_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwaddu_wx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwaddu_wx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwaddu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwaddu_vv_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwaddu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwaddu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwaddu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwaddu_wv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwaddu_wv_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwaddu_wv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwaddu_wx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwaddu_wx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwaddu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwaddu_vv_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwaddu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwaddu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwaddu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwaddu_wv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwaddu_wv_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwaddu_wv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwaddu_wx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwaddu_wx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwaddu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwaddu_vv_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwaddu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwaddu_vv_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwaddu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwaddu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwaddu_wv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwaddu_wv_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwaddu_wv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwaddu_wv_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwaddu_wx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwaddu_wx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwaddu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwaddu_vv_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwaddu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwaddu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwaddu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwaddu_wv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwaddu_wv_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwaddu_wv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwaddu_wx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwaddu_wx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwaddu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwaddu_vv_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwaddu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwaddu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwaddu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwaddu_wv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwaddu_wv_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwaddu_wv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwaddu_wx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwaddu_wx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwaddu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwaddu_vv_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwaddu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwaddu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwaddu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwaddu_wv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwaddu_wv_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwaddu_wv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwaddu_wx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwaddu_wx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwaddu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwaddu_vv_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwaddu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwaddu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwaddu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwaddu_wv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwaddu_wv_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwaddu_wv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwaddu_wx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwaddu_wx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwaddu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwaddu_vv_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwaddu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwaddu_vv_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwaddu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwaddu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwaddu_wv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwaddu_wv_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwaddu_wv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwaddu_wv_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwaddu_wx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwaddu_wx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwaddu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwaddu_vv_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwaddu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwaddu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwaddu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwaddu_wv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwaddu_wv_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwaddu_wv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwaddu_wx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwaddu_wx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwaddu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwaddu_vv_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwaddu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwaddu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwaddu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwaddu_wv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwaddu_wv_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwaddu_wv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwaddu_wx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwaddu_wx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwaddu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwaddu_vv_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwaddu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwaddu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwaddu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwaddu_wv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwaddu_wv_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwaddu_wv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwaddu_wx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwaddu_wx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwaddu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwaddu_vv_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwaddu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwaddu_vv_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwaddu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwaddu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwaddu_wv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwaddu_wv_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwaddu_wv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwaddu_wv_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwaddu_wx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwaddu_wx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwaddu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwaddu_vv_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwaddu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwaddu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwaddu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwaddu_wv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwaddu_wv_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwaddu_wv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwaddu_wx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwaddu_wx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwaddu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwaddu_vv_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwaddu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwaddu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwaddu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwaddu_wv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwaddu_wv_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwaddu_wv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwaddu_wx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwaddu_wx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vwaddu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwaddu_vv_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwaddu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwaddu_vv_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwaddu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwaddu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vwaddu_wv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwaddu_wv_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwaddu_wv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwaddu_wv_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwaddu_wx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwaddu_wx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwaddu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwaddu_vv_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwaddu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwaddu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwaddu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwaddu_wv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwaddu_wv_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwaddu_wv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwaddu_wx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwaddu_wx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwaddu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwaddu_vv_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwaddu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwaddu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwaddu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwaddu_wv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwaddu_wv_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwaddu_wv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwaddu_wx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwaddu_wx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwaddu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwaddu_vv_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwaddu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwaddu_vv_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwaddu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwaddu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwaddu_wv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwaddu_wv_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwaddu_wv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwaddu_wv_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwaddu_wx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwaddu_wx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwaddu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwaddu_vv_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwaddu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwaddu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwaddu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwaddu_wv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwaddu_wv_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwaddu_wv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwaddu_wx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwaddu_wx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwaddu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwaddu_vv_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwaddu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwaddu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_vx_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwaddu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_vx_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwaddu_wv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwaddu_wv_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwaddu_wv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwaddu_wx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwaddu_wx_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwaddu_wx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwaddu_wx_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwaddu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwaddu_vv_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwaddu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwaddu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwaddu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwaddu_wv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwaddu_wv_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwaddu_wv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwaddu_wx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwaddu_wx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwaddu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwaddu_vv_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwaddu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwaddu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwaddu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwaddu_wv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwaddu_wv_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwaddu_wv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwaddu_wx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwaddu_wx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwaddu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwaddu_vv_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwaddu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwaddu_vv_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwaddu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwaddu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwaddu_wv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwaddu_wv_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwaddu_wv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwaddu_wv_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwaddu_wx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwaddu_wx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwaddu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwaddu_vv_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwaddu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwaddu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwaddu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwaddu_wv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwaddu_wv_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwaddu_wv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwaddu_wx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwaddu_wx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwaddu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwaddu_vv_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwaddu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwaddu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_vx_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwaddu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_vx_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwaddu_wv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwaddu_wv_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwaddu_wv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwaddu_wx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vwaddu_wx_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwaddu_wx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwaddu_wx_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwaddu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwaddu_vv_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwaddu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwaddu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwaddu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwaddu_wv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwaddu_wv_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwaddu_wv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwaddu_wx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwaddu_wx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwaddu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwaddu_vv_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwaddu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwaddu_vv_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwaddu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwaddu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwaddu_wv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwaddu_wv_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwaddu_wv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwaddu_wv_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwaddu_wx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwaddu_wx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwaddu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwaddu_vv_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwaddu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwaddu_vv_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwaddu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwaddu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwaddu_wv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwaddu_wv_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwaddu_wv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwaddu_wv_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwaddu_wx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwaddu_wx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwaddu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwaddu_vv_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwaddu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwaddu_vv_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwaddu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_vx_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwaddu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_vx_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwaddu_wv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwaddu_wv_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwaddu_wv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwaddu_wv_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwaddu_wx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vwaddu_wx_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwaddu_wx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwaddu_wx_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+v[w]?add[u]?\.[ivxfswum.]+\s+} 240 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vwcvt.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vwcvt.c index 121a2d0cd..d257ac929 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vwcvt.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vwcvt.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint16mf4_t test_vwcvt_x_x_v_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t src, size_t vl) { - return __riscv_vwcvt_x_tu(maskedoff, src, vl); +vint16mf4_t test_vwcvt_x_x_v_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs2, size_t vl) { + return __riscv_vwcvt_x_tu(vd, vs2, vl); } -vint16mf2_t test_vwcvt_x_x_v_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t src, size_t vl) { - return __riscv_vwcvt_x_tu(maskedoff, src, vl); +vint16mf2_t test_vwcvt_x_x_v_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs2, size_t vl) { + return __riscv_vwcvt_x_tu(vd, vs2, vl); } -vint16m1_t test_vwcvt_x_x_v_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t src, size_t vl) { - return __riscv_vwcvt_x_tu(maskedoff, src, vl); +vint16m1_t test_vwcvt_x_x_v_i16m1_tu(vint16m1_t vd, vint8mf2_t vs2, size_t vl) { + return __riscv_vwcvt_x_tu(vd, vs2, vl); } -vint16m2_t test_vwcvt_x_x_v_i16m2_tu(vint16m2_t maskedoff, vint8m1_t src, size_t vl) { - return __riscv_vwcvt_x_tu(maskedoff, src, vl); +vint16m2_t test_vwcvt_x_x_v_i16m2_tu(vint16m2_t vd, vint8m1_t vs2, size_t vl) { + return __riscv_vwcvt_x_tu(vd, vs2, vl); } -vint16m4_t test_vwcvt_x_x_v_i16m4_tu(vint16m4_t maskedoff, vint8m2_t src, size_t vl) { - return __riscv_vwcvt_x_tu(maskedoff, src, vl); +vint16m4_t test_vwcvt_x_x_v_i16m4_tu(vint16m4_t vd, vint8m2_t vs2, size_t vl) { + return __riscv_vwcvt_x_tu(vd, vs2, vl); } -vint16m8_t test_vwcvt_x_x_v_i16m8_tu(vint16m8_t maskedoff, vint8m4_t src, size_t vl) { - return __riscv_vwcvt_x_tu(maskedoff, src, vl); +vint16m8_t test_vwcvt_x_x_v_i16m8_tu(vint16m8_t vd, vint8m4_t vs2, size_t vl) { + return __riscv_vwcvt_x_tu(vd, vs2, vl); } -vint32mf2_t test_vwcvt_x_x_v_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t src, size_t vl) { - return __riscv_vwcvt_x_tu(maskedoff, src, vl); +vint32mf2_t test_vwcvt_x_x_v_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vwcvt_x_tu(vd, vs2, vl); } -vint32m1_t test_vwcvt_x_x_v_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t src, size_t vl) { - return __riscv_vwcvt_x_tu(maskedoff, src, vl); +vint32m1_t test_vwcvt_x_x_v_i32m1_tu(vint32m1_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vwcvt_x_tu(vd, vs2, vl); } -vint32m2_t test_vwcvt_x_x_v_i32m2_tu(vint32m2_t maskedoff, vint16m1_t src, size_t vl) { - return __riscv_vwcvt_x_tu(maskedoff, src, vl); +vint32m2_t test_vwcvt_x_x_v_i32m2_tu(vint32m2_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vwcvt_x_tu(vd, vs2, vl); } -vint32m4_t test_vwcvt_x_x_v_i32m4_tu(vint32m4_t maskedoff, vint16m2_t src, size_t vl) { - return __riscv_vwcvt_x_tu(maskedoff, src, vl); +vint32m4_t test_vwcvt_x_x_v_i32m4_tu(vint32m4_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vwcvt_x_tu(vd, vs2, vl); } -vint32m8_t test_vwcvt_x_x_v_i32m8_tu(vint32m8_t maskedoff, vint16m4_t src, size_t vl) { - return __riscv_vwcvt_x_tu(maskedoff, src, vl); +vint32m8_t test_vwcvt_x_x_v_i32m8_tu(vint32m8_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vwcvt_x_tu(vd, vs2, vl); } -vint64m1_t test_vwcvt_x_x_v_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vwcvt_x_tu(maskedoff, src, vl); +vint64m1_t test_vwcvt_x_x_v_i64m1_tu(vint64m1_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vwcvt_x_tu(vd, vs2, vl); } -vint64m2_t test_vwcvt_x_x_v_i64m2_tu(vint64m2_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vwcvt_x_tu(maskedoff, src, vl); +vint64m2_t test_vwcvt_x_x_v_i64m2_tu(vint64m2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vwcvt_x_tu(vd, vs2, vl); } -vint64m4_t test_vwcvt_x_x_v_i64m4_tu(vint64m4_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vwcvt_x_tu(maskedoff, src, vl); +vint64m4_t test_vwcvt_x_x_v_i64m4_tu(vint64m4_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vwcvt_x_tu(vd, vs2, vl); } -vint64m8_t test_vwcvt_x_x_v_i64m8_tu(vint64m8_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vwcvt_x_tu(maskedoff, src, vl); +vint64m8_t test_vwcvt_x_x_v_i64m8_tu(vint64m8_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vwcvt_x_tu(vd, vs2, vl); } -vint16mf4_t test_vwcvt_x_x_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t src, size_t vl) { - return __riscv_vwcvt_x_tum(mask, maskedoff, src, vl); +vint16mf4_t test_vwcvt_x_x_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, size_t vl) { + return __riscv_vwcvt_x_tum(vm, vd, vs2, vl); } -vint16mf2_t test_vwcvt_x_x_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t src, size_t vl) { - return __riscv_vwcvt_x_tum(mask, maskedoff, src, vl); +vint16mf2_t test_vwcvt_x_x_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, size_t vl) { + return __riscv_vwcvt_x_tum(vm, vd, vs2, vl); } -vint16m1_t test_vwcvt_x_x_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t src, size_t vl) { - return __riscv_vwcvt_x_tum(mask, maskedoff, src, vl); +vint16m1_t test_vwcvt_x_x_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, size_t vl) { + return __riscv_vwcvt_x_tum(vm, vd, vs2, vl); } -vint16m2_t test_vwcvt_x_x_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t src, size_t vl) { - return __riscv_vwcvt_x_tum(mask, maskedoff, src, vl); +vint16m2_t test_vwcvt_x_x_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, size_t vl) { + return __riscv_vwcvt_x_tum(vm, vd, vs2, vl); } -vint16m4_t test_vwcvt_x_x_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t src, size_t vl) { - return __riscv_vwcvt_x_tum(mask, maskedoff, src, vl); +vint16m4_t test_vwcvt_x_x_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, size_t vl) { + return __riscv_vwcvt_x_tum(vm, vd, vs2, vl); } -vint16m8_t test_vwcvt_x_x_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t src, size_t vl) { - return __riscv_vwcvt_x_tum(mask, maskedoff, src, vl); +vint16m8_t test_vwcvt_x_x_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, size_t vl) { + return __riscv_vwcvt_x_tum(vm, vd, vs2, vl); } -vint32mf2_t test_vwcvt_x_x_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t src, size_t vl) { - return __riscv_vwcvt_x_tum(mask, maskedoff, src, vl); +vint32mf2_t test_vwcvt_x_x_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vwcvt_x_tum(vm, vd, vs2, vl); } -vint32m1_t test_vwcvt_x_x_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t src, size_t vl) { - return __riscv_vwcvt_x_tum(mask, maskedoff, src, vl); +vint32m1_t test_vwcvt_x_x_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vwcvt_x_tum(vm, vd, vs2, vl); } -vint32m2_t test_vwcvt_x_x_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t src, size_t vl) { - return __riscv_vwcvt_x_tum(mask, maskedoff, src, vl); +vint32m2_t test_vwcvt_x_x_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vwcvt_x_tum(vm, vd, vs2, vl); } -vint32m4_t test_vwcvt_x_x_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t src, size_t vl) { - return __riscv_vwcvt_x_tum(mask, maskedoff, src, vl); +vint32m4_t test_vwcvt_x_x_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vwcvt_x_tum(vm, vd, vs2, vl); } -vint32m8_t test_vwcvt_x_x_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t src, size_t vl) { - return __riscv_vwcvt_x_tum(mask, maskedoff, src, vl); +vint32m8_t test_vwcvt_x_x_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vwcvt_x_tum(vm, vd, vs2, vl); } -vint64m1_t test_vwcvt_x_x_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vwcvt_x_tum(mask, maskedoff, src, vl); +vint64m1_t test_vwcvt_x_x_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vwcvt_x_tum(vm, vd, vs2, vl); } -vint64m2_t test_vwcvt_x_x_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vwcvt_x_tum(mask, maskedoff, src, vl); +vint64m2_t test_vwcvt_x_x_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vwcvt_x_tum(vm, vd, vs2, vl); } -vint64m4_t test_vwcvt_x_x_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vwcvt_x_tum(mask, maskedoff, src, vl); +vint64m4_t test_vwcvt_x_x_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vwcvt_x_tum(vm, vd, vs2, vl); } -vint64m8_t test_vwcvt_x_x_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vwcvt_x_tum(mask, maskedoff, src, vl); +vint64m8_t test_vwcvt_x_x_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vwcvt_x_tum(vm, vd, vs2, vl); } -vint16mf4_t test_vwcvt_x_x_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t src, size_t vl) { - return __riscv_vwcvt_x_tumu(mask, maskedoff, src, vl); +vint16mf4_t test_vwcvt_x_x_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, size_t vl) { + return __riscv_vwcvt_x_tumu(vm, vd, vs2, vl); } -vint16mf2_t test_vwcvt_x_x_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t src, size_t vl) { - return __riscv_vwcvt_x_tumu(mask, maskedoff, src, vl); +vint16mf2_t test_vwcvt_x_x_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, size_t vl) { + return __riscv_vwcvt_x_tumu(vm, vd, vs2, vl); } -vint16m1_t test_vwcvt_x_x_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t src, size_t vl) { - return __riscv_vwcvt_x_tumu(mask, maskedoff, src, vl); +vint16m1_t test_vwcvt_x_x_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, size_t vl) { + return __riscv_vwcvt_x_tumu(vm, vd, vs2, vl); } -vint16m2_t test_vwcvt_x_x_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t src, size_t vl) { - return __riscv_vwcvt_x_tumu(mask, maskedoff, src, vl); +vint16m2_t test_vwcvt_x_x_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, size_t vl) { + return __riscv_vwcvt_x_tumu(vm, vd, vs2, vl); } -vint16m4_t test_vwcvt_x_x_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t src, size_t vl) { - return __riscv_vwcvt_x_tumu(mask, maskedoff, src, vl); +vint16m4_t test_vwcvt_x_x_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, size_t vl) { + return __riscv_vwcvt_x_tumu(vm, vd, vs2, vl); } -vint16m8_t test_vwcvt_x_x_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t src, size_t vl) { - return __riscv_vwcvt_x_tumu(mask, maskedoff, src, vl); +vint16m8_t test_vwcvt_x_x_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, size_t vl) { + return __riscv_vwcvt_x_tumu(vm, vd, vs2, vl); } -vint32mf2_t test_vwcvt_x_x_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t src, size_t vl) { - return __riscv_vwcvt_x_tumu(mask, maskedoff, src, vl); +vint32mf2_t test_vwcvt_x_x_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vwcvt_x_tumu(vm, vd, vs2, vl); } -vint32m1_t test_vwcvt_x_x_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t src, size_t vl) { - return __riscv_vwcvt_x_tumu(mask, maskedoff, src, vl); +vint32m1_t test_vwcvt_x_x_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vwcvt_x_tumu(vm, vd, vs2, vl); } -vint32m2_t test_vwcvt_x_x_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t src, size_t vl) { - return __riscv_vwcvt_x_tumu(mask, maskedoff, src, vl); +vint32m2_t test_vwcvt_x_x_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vwcvt_x_tumu(vm, vd, vs2, vl); } -vint32m4_t test_vwcvt_x_x_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t src, size_t vl) { - return __riscv_vwcvt_x_tumu(mask, maskedoff, src, vl); +vint32m4_t test_vwcvt_x_x_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vwcvt_x_tumu(vm, vd, vs2, vl); } -vint32m8_t test_vwcvt_x_x_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t src, size_t vl) { - return __riscv_vwcvt_x_tumu(mask, maskedoff, src, vl); +vint32m8_t test_vwcvt_x_x_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vwcvt_x_tumu(vm, vd, vs2, vl); } -vint64m1_t test_vwcvt_x_x_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vwcvt_x_tumu(mask, maskedoff, src, vl); +vint64m1_t test_vwcvt_x_x_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vwcvt_x_tumu(vm, vd, vs2, vl); } -vint64m2_t test_vwcvt_x_x_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vwcvt_x_tumu(mask, maskedoff, src, vl); +vint64m2_t test_vwcvt_x_x_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vwcvt_x_tumu(vm, vd, vs2, vl); } -vint64m4_t test_vwcvt_x_x_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vwcvt_x_tumu(mask, maskedoff, src, vl); +vint64m4_t test_vwcvt_x_x_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vwcvt_x_tumu(vm, vd, vs2, vl); } -vint64m8_t test_vwcvt_x_x_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vwcvt_x_tumu(mask, maskedoff, src, vl); +vint64m8_t test_vwcvt_x_x_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vwcvt_x_tumu(vm, vd, vs2, vl); } -vint16mf4_t test_vwcvt_x_x_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t src, size_t vl) { - return __riscv_vwcvt_x_mu(mask, maskedoff, src, vl); +vint16mf4_t test_vwcvt_x_x_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, size_t vl) { + return __riscv_vwcvt_x_mu(vm, vd, vs2, vl); } -vint16mf2_t test_vwcvt_x_x_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t src, size_t vl) { - return __riscv_vwcvt_x_mu(mask, maskedoff, src, vl); +vint16mf2_t test_vwcvt_x_x_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, size_t vl) { + return __riscv_vwcvt_x_mu(vm, vd, vs2, vl); } -vint16m1_t test_vwcvt_x_x_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t src, size_t vl) { - return __riscv_vwcvt_x_mu(mask, maskedoff, src, vl); +vint16m1_t test_vwcvt_x_x_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, size_t vl) { + return __riscv_vwcvt_x_mu(vm, vd, vs2, vl); } -vint16m2_t test_vwcvt_x_x_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t src, size_t vl) { - return __riscv_vwcvt_x_mu(mask, maskedoff, src, vl); +vint16m2_t test_vwcvt_x_x_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, size_t vl) { + return __riscv_vwcvt_x_mu(vm, vd, vs2, vl); } -vint16m4_t test_vwcvt_x_x_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t src, size_t vl) { - return __riscv_vwcvt_x_mu(mask, maskedoff, src, vl); +vint16m4_t test_vwcvt_x_x_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, size_t vl) { + return __riscv_vwcvt_x_mu(vm, vd, vs2, vl); } -vint16m8_t test_vwcvt_x_x_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t src, size_t vl) { - return __riscv_vwcvt_x_mu(mask, maskedoff, src, vl); +vint16m8_t test_vwcvt_x_x_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, size_t vl) { + return __riscv_vwcvt_x_mu(vm, vd, vs2, vl); } -vint32mf2_t test_vwcvt_x_x_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t src, size_t vl) { - return __riscv_vwcvt_x_mu(mask, maskedoff, src, vl); +vint32mf2_t test_vwcvt_x_x_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, size_t vl) { + return __riscv_vwcvt_x_mu(vm, vd, vs2, vl); } -vint32m1_t test_vwcvt_x_x_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t src, size_t vl) { - return __riscv_vwcvt_x_mu(mask, maskedoff, src, vl); +vint32m1_t test_vwcvt_x_x_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, size_t vl) { + return __riscv_vwcvt_x_mu(vm, vd, vs2, vl); } -vint32m2_t test_vwcvt_x_x_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t src, size_t vl) { - return __riscv_vwcvt_x_mu(mask, maskedoff, src, vl); +vint32m2_t test_vwcvt_x_x_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, size_t vl) { + return __riscv_vwcvt_x_mu(vm, vd, vs2, vl); } -vint32m4_t test_vwcvt_x_x_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t src, size_t vl) { - return __riscv_vwcvt_x_mu(mask, maskedoff, src, vl); +vint32m4_t test_vwcvt_x_x_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, size_t vl) { + return __riscv_vwcvt_x_mu(vm, vd, vs2, vl); } -vint32m8_t test_vwcvt_x_x_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t src, size_t vl) { - return __riscv_vwcvt_x_mu(mask, maskedoff, src, vl); +vint32m8_t test_vwcvt_x_x_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, size_t vl) { + return __riscv_vwcvt_x_mu(vm, vd, vs2, vl); } -vint64m1_t test_vwcvt_x_x_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t src, size_t vl) { - return __riscv_vwcvt_x_mu(mask, maskedoff, src, vl); +vint64m1_t test_vwcvt_x_x_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, size_t vl) { + return __riscv_vwcvt_x_mu(vm, vd, vs2, vl); } -vint64m2_t test_vwcvt_x_x_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t src, size_t vl) { - return __riscv_vwcvt_x_mu(mask, maskedoff, src, vl); +vint64m2_t test_vwcvt_x_x_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, size_t vl) { + return __riscv_vwcvt_x_mu(vm, vd, vs2, vl); } -vint64m4_t test_vwcvt_x_x_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t src, size_t vl) { - return __riscv_vwcvt_x_mu(mask, maskedoff, src, vl); +vint64m4_t test_vwcvt_x_x_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, size_t vl) { + return __riscv_vwcvt_x_mu(vm, vd, vs2, vl); } -vint64m8_t test_vwcvt_x_x_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t src, size_t vl) { - return __riscv_vwcvt_x_mu(mask, maskedoff, src, vl); +vint64m8_t test_vwcvt_x_x_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, size_t vl) { + return __riscv_vwcvt_x_mu(vm, vd, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vwcvt\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vwcvtu.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vwcvtu.c index 6eb4f54fb..a2b9ee5a5 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vwcvtu.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vwcvtu.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t src, size_t vl) { - return __riscv_vwcvtu_x_tu(maskedoff, src, vl); +vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vwcvtu_x_tu(vd, vs2, vl); } -vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t src, size_t vl) { - return __riscv_vwcvtu_x_tu(maskedoff, src, vl); +vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_tu(vd, vs2, vl); } -vuint16m1_t test_vwcvtu_x_x_v_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t src, size_t vl) { - return __riscv_vwcvtu_x_tu(maskedoff, src, vl); +vuint16m1_t test_vwcvtu_x_x_v_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_tu(vd, vs2, vl); } -vuint16m2_t test_vwcvtu_x_x_v_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t src, size_t vl) { - return __riscv_vwcvtu_x_tu(maskedoff, src, vl); +vuint16m2_t test_vwcvtu_x_x_v_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vwcvtu_x_tu(vd, vs2, vl); } -vuint16m4_t test_vwcvtu_x_x_v_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t src, size_t vl) { - return __riscv_vwcvtu_x_tu(maskedoff, src, vl); +vuint16m4_t test_vwcvtu_x_x_v_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_tu(vd, vs2, vl); } -vuint16m8_t test_vwcvtu_x_x_v_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t src, size_t vl) { - return __riscv_vwcvtu_x_tu(maskedoff, src, vl); +vuint16m8_t test_vwcvtu_x_x_v_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_tu(vd, vs2, vl); } -vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t src, size_t vl) { - return __riscv_vwcvtu_x_tu(maskedoff, src, vl); +vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_tu(vd, vs2, vl); } -vuint32m1_t test_vwcvtu_x_x_v_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t src, size_t vl) { - return __riscv_vwcvtu_x_tu(maskedoff, src, vl); +vuint32m1_t test_vwcvtu_x_x_v_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_tu(vd, vs2, vl); } -vuint32m2_t test_vwcvtu_x_x_v_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t src, size_t vl) { - return __riscv_vwcvtu_x_tu(maskedoff, src, vl); +vuint32m2_t test_vwcvtu_x_x_v_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vwcvtu_x_tu(vd, vs2, vl); } -vuint32m4_t test_vwcvtu_x_x_v_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t src, size_t vl) { - return __riscv_vwcvtu_x_tu(maskedoff, src, vl); +vuint32m4_t test_vwcvtu_x_x_v_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_tu(vd, vs2, vl); } -vuint32m8_t test_vwcvtu_x_x_v_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t src, size_t vl) { - return __riscv_vwcvtu_x_tu(maskedoff, src, vl); +vuint32m8_t test_vwcvtu_x_x_v_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_tu(vd, vs2, vl); } -vuint64m1_t test_vwcvtu_x_x_v_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vwcvtu_x_tu(maskedoff, src, vl); +vuint64m1_t test_vwcvtu_x_x_v_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_tu(vd, vs2, vl); } -vuint64m2_t test_vwcvtu_x_x_v_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vwcvtu_x_tu(maskedoff, src, vl); +vuint64m2_t test_vwcvtu_x_x_v_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vwcvtu_x_tu(vd, vs2, vl); } -vuint64m4_t test_vwcvtu_x_x_v_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vwcvtu_x_tu(maskedoff, src, vl); +vuint64m4_t test_vwcvtu_x_x_v_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_tu(vd, vs2, vl); } -vuint64m8_t test_vwcvtu_x_x_v_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vwcvtu_x_tu(maskedoff, src, vl); +vuint64m8_t test_vwcvtu_x_x_v_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_tu(vd, vs2, vl); } -vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t src, size_t vl) { - return __riscv_vwcvtu_x_tum(mask, maskedoff, src, vl); +vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vwcvtu_x_tum(vm, vd, vs2, vl); } -vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t src, size_t vl) { - return __riscv_vwcvtu_x_tum(mask, maskedoff, src, vl); +vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_tum(vm, vd, vs2, vl); } -vuint16m1_t test_vwcvtu_x_x_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t src, size_t vl) { - return __riscv_vwcvtu_x_tum(mask, maskedoff, src, vl); +vuint16m1_t test_vwcvtu_x_x_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_tum(vm, vd, vs2, vl); } -vuint16m2_t test_vwcvtu_x_x_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t src, size_t vl) { - return __riscv_vwcvtu_x_tum(mask, maskedoff, src, vl); +vuint16m2_t test_vwcvtu_x_x_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vwcvtu_x_tum(vm, vd, vs2, vl); } -vuint16m4_t test_vwcvtu_x_x_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t src, size_t vl) { - return __riscv_vwcvtu_x_tum(mask, maskedoff, src, vl); +vuint16m4_t test_vwcvtu_x_x_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_tum(vm, vd, vs2, vl); } -vuint16m8_t test_vwcvtu_x_x_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t src, size_t vl) { - return __riscv_vwcvtu_x_tum(mask, maskedoff, src, vl); +vuint16m8_t test_vwcvtu_x_x_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t src, size_t vl) { - return __riscv_vwcvtu_x_tum(mask, maskedoff, src, vl); +vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_tum(vm, vd, vs2, vl); } -vuint32m1_t test_vwcvtu_x_x_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t src, size_t vl) { - return __riscv_vwcvtu_x_tum(mask, maskedoff, src, vl); +vuint32m1_t test_vwcvtu_x_x_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_tum(vm, vd, vs2, vl); } -vuint32m2_t test_vwcvtu_x_x_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t src, size_t vl) { - return __riscv_vwcvtu_x_tum(mask, maskedoff, src, vl); +vuint32m2_t test_vwcvtu_x_x_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vwcvtu_x_tum(vm, vd, vs2, vl); } -vuint32m4_t test_vwcvtu_x_x_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t src, size_t vl) { - return __riscv_vwcvtu_x_tum(mask, maskedoff, src, vl); +vuint32m4_t test_vwcvtu_x_x_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_tum(vm, vd, vs2, vl); } -vuint32m8_t test_vwcvtu_x_x_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t src, size_t vl) { - return __riscv_vwcvtu_x_tum(mask, maskedoff, src, vl); +vuint32m8_t test_vwcvtu_x_x_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_tum(vm, vd, vs2, vl); } -vuint64m1_t test_vwcvtu_x_x_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vwcvtu_x_tum(mask, maskedoff, src, vl); +vuint64m1_t test_vwcvtu_x_x_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_tum(vm, vd, vs2, vl); } -vuint64m2_t test_vwcvtu_x_x_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vwcvtu_x_tum(mask, maskedoff, src, vl); +vuint64m2_t test_vwcvtu_x_x_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vwcvtu_x_tum(vm, vd, vs2, vl); } -vuint64m4_t test_vwcvtu_x_x_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vwcvtu_x_tum(mask, maskedoff, src, vl); +vuint64m4_t test_vwcvtu_x_x_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_tum(vm, vd, vs2, vl); } -vuint64m8_t test_vwcvtu_x_x_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vwcvtu_x_tum(mask, maskedoff, src, vl); +vuint64m8_t test_vwcvtu_x_x_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_tum(vm, vd, vs2, vl); } -vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t src, size_t vl) { - return __riscv_vwcvtu_x_tumu(mask, maskedoff, src, vl); +vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vwcvtu_x_tumu(vm, vd, vs2, vl); } -vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t src, size_t vl) { - return __riscv_vwcvtu_x_tumu(mask, maskedoff, src, vl); +vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_tumu(vm, vd, vs2, vl); } -vuint16m1_t test_vwcvtu_x_x_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t src, size_t vl) { - return __riscv_vwcvtu_x_tumu(mask, maskedoff, src, vl); +vuint16m1_t test_vwcvtu_x_x_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_tumu(vm, vd, vs2, vl); } -vuint16m2_t test_vwcvtu_x_x_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t src, size_t vl) { - return __riscv_vwcvtu_x_tumu(mask, maskedoff, src, vl); +vuint16m2_t test_vwcvtu_x_x_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vwcvtu_x_tumu(vm, vd, vs2, vl); } -vuint16m4_t test_vwcvtu_x_x_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t src, size_t vl) { - return __riscv_vwcvtu_x_tumu(mask, maskedoff, src, vl); +vuint16m4_t test_vwcvtu_x_x_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_tumu(vm, vd, vs2, vl); } -vuint16m8_t test_vwcvtu_x_x_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t src, size_t vl) { - return __riscv_vwcvtu_x_tumu(mask, maskedoff, src, vl); +vuint16m8_t test_vwcvtu_x_x_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t src, size_t vl) { - return __riscv_vwcvtu_x_tumu(mask, maskedoff, src, vl); +vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_vwcvtu_x_x_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t src, size_t vl) { - return __riscv_vwcvtu_x_tumu(mask, maskedoff, src, vl); +vuint32m1_t test_vwcvtu_x_x_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_vwcvtu_x_x_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t src, size_t vl) { - return __riscv_vwcvtu_x_tumu(mask, maskedoff, src, vl); +vuint32m2_t test_vwcvtu_x_x_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vwcvtu_x_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_vwcvtu_x_x_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t src, size_t vl) { - return __riscv_vwcvtu_x_tumu(mask, maskedoff, src, vl); +vuint32m4_t test_vwcvtu_x_x_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_tumu(vm, vd, vs2, vl); } -vuint32m8_t test_vwcvtu_x_x_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t src, size_t vl) { - return __riscv_vwcvtu_x_tumu(mask, maskedoff, src, vl); +vuint32m8_t test_vwcvtu_x_x_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_tumu(vm, vd, vs2, vl); } -vuint64m1_t test_vwcvtu_x_x_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vwcvtu_x_tumu(mask, maskedoff, src, vl); +vuint64m1_t test_vwcvtu_x_x_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_tumu(vm, vd, vs2, vl); } -vuint64m2_t test_vwcvtu_x_x_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vwcvtu_x_tumu(mask, maskedoff, src, vl); +vuint64m2_t test_vwcvtu_x_x_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vwcvtu_x_tumu(vm, vd, vs2, vl); } -vuint64m4_t test_vwcvtu_x_x_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vwcvtu_x_tumu(mask, maskedoff, src, vl); +vuint64m4_t test_vwcvtu_x_x_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_tumu(vm, vd, vs2, vl); } -vuint64m8_t test_vwcvtu_x_x_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vwcvtu_x_tumu(mask, maskedoff, src, vl); +vuint64m8_t test_vwcvtu_x_x_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_tumu(vm, vd, vs2, vl); } -vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t src, size_t vl) { - return __riscv_vwcvtu_x_mu(mask, maskedoff, src, vl); +vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vwcvtu_x_mu(vm, vd, vs2, vl); } -vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t src, size_t vl) { - return __riscv_vwcvtu_x_mu(mask, maskedoff, src, vl); +vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_mu(vm, vd, vs2, vl); } -vuint16m1_t test_vwcvtu_x_x_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t src, size_t vl) { - return __riscv_vwcvtu_x_mu(mask, maskedoff, src, vl); +vuint16m1_t test_vwcvtu_x_x_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_mu(vm, vd, vs2, vl); } -vuint16m2_t test_vwcvtu_x_x_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t src, size_t vl) { - return __riscv_vwcvtu_x_mu(mask, maskedoff, src, vl); +vuint16m2_t test_vwcvtu_x_x_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vwcvtu_x_mu(vm, vd, vs2, vl); } -vuint16m4_t test_vwcvtu_x_x_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t src, size_t vl) { - return __riscv_vwcvtu_x_mu(mask, maskedoff, src, vl); +vuint16m4_t test_vwcvtu_x_x_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_mu(vm, vd, vs2, vl); } -vuint16m8_t test_vwcvtu_x_x_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t src, size_t vl) { - return __riscv_vwcvtu_x_mu(mask, maskedoff, src, vl); +vuint16m8_t test_vwcvtu_x_x_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_mu(vm, vd, vs2, vl); } -vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t src, size_t vl) { - return __riscv_vwcvtu_x_mu(mask, maskedoff, src, vl); +vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_mu(vm, vd, vs2, vl); } -vuint32m1_t test_vwcvtu_x_x_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t src, size_t vl) { - return __riscv_vwcvtu_x_mu(mask, maskedoff, src, vl); +vuint32m1_t test_vwcvtu_x_x_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_mu(vm, vd, vs2, vl); } -vuint32m2_t test_vwcvtu_x_x_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t src, size_t vl) { - return __riscv_vwcvtu_x_mu(mask, maskedoff, src, vl); +vuint32m2_t test_vwcvtu_x_x_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vwcvtu_x_mu(vm, vd, vs2, vl); } -vuint32m4_t test_vwcvtu_x_x_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t src, size_t vl) { - return __riscv_vwcvtu_x_mu(mask, maskedoff, src, vl); +vuint32m4_t test_vwcvtu_x_x_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_mu(vm, vd, vs2, vl); } -vuint32m8_t test_vwcvtu_x_x_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t src, size_t vl) { - return __riscv_vwcvtu_x_mu(mask, maskedoff, src, vl); +vuint32m8_t test_vwcvtu_x_x_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_mu(vm, vd, vs2, vl); } -vuint64m1_t test_vwcvtu_x_x_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t src, size_t vl) { - return __riscv_vwcvtu_x_mu(mask, maskedoff, src, vl); +vuint64m1_t test_vwcvtu_x_x_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_mu(vm, vd, vs2, vl); } -vuint64m2_t test_vwcvtu_x_x_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t src, size_t vl) { - return __riscv_vwcvtu_x_mu(mask, maskedoff, src, vl); +vuint64m2_t test_vwcvtu_x_x_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vwcvtu_x_mu(vm, vd, vs2, vl); } -vuint64m4_t test_vwcvtu_x_x_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t src, size_t vl) { - return __riscv_vwcvtu_x_mu(mask, maskedoff, src, vl); +vuint64m4_t test_vwcvtu_x_x_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vwcvtu_x_mu(vm, vd, vs2, vl); } -vuint64m8_t test_vwcvtu_x_x_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t src, size_t vl) { - return __riscv_vwcvtu_x_mu(mask, maskedoff, src, vl); +vuint64m8_t test_vwcvtu_x_x_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vwcvtu_x_mu(vm, vd, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vwcvtu\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vwmacc.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vwmacc.c index 918393134..8d8303f89 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vwmacc.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vwmacc.c @@ -126,364 +126,364 @@ vint64m8_t test_vwmacc_vx_i64m8_tu(vint64m8_t vd, int32_t rs1, vint32m4_t vs2, s return __riscv_vwmacc_tu(vd, rs1, vs2, vl); } -vint16mf4_t test_vwmacc_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vwmacc_tum(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vwmacc_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vwmacc_tum(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vwmacc_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vwmacc_tum(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vwmacc_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vwmacc_tum(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vwmacc_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vwmacc_tum(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vwmacc_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vwmacc_tum(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vwmacc_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vwmacc_tum(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vwmacc_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vwmacc_tum(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vwmacc_vv_i16m1_tum(vbool16_t mask, vint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vwmacc_tum(mask, vd, vs1, vs2, vl); +vint16m1_t test_vwmacc_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vwmacc_tum(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vwmacc_vx_i16m1_tum(vbool16_t mask, vint16m1_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vwmacc_tum(mask, vd, rs1, vs2, vl); +vint16m1_t test_vwmacc_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vwmacc_tum(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vwmacc_vv_i16m2_tum(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_vwmacc_tum(mask, vd, vs1, vs2, vl); +vint16m2_t test_vwmacc_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { + return __riscv_vwmacc_tum(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vwmacc_vx_i16m2_tum(vbool8_t mask, vint16m2_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vwmacc_tum(mask, vd, rs1, vs2, vl); +vint16m2_t test_vwmacc_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vwmacc_tum(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vwmacc_vv_i16m4_tum(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return __riscv_vwmacc_tum(mask, vd, vs1, vs2, vl); +vint16m4_t test_vwmacc_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { + return __riscv_vwmacc_tum(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vwmacc_vx_i16m4_tum(vbool4_t mask, vint16m4_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vwmacc_tum(mask, vd, rs1, vs2, vl); +vint16m4_t test_vwmacc_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vwmacc_tum(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vwmacc_vv_i16m8_tum(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return __riscv_vwmacc_tum(mask, vd, vs1, vs2, vl); +vint16m8_t test_vwmacc_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { + return __riscv_vwmacc_tum(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vwmacc_vx_i16m8_tum(vbool2_t mask, vint16m8_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vwmacc_tum(mask, vd, rs1, vs2, vl); +vint16m8_t test_vwmacc_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vwmacc_tum(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vwmacc_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vwmacc_tum(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vwmacc_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vwmacc_tum(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vwmacc_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vwmacc_tum(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vwmacc_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vwmacc_tum(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vwmacc_vv_i32m1_tum(vbool32_t mask, vint32m1_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vwmacc_tum(mask, vd, vs1, vs2, vl); +vint32m1_t test_vwmacc_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vwmacc_tum(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vwmacc_vx_i32m1_tum(vbool32_t mask, vint32m1_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vwmacc_tum(mask, vd, rs1, vs2, vl); +vint32m1_t test_vwmacc_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vwmacc_tum(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vwmacc_vv_i32m2_tum(vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_vwmacc_tum(mask, vd, vs1, vs2, vl); +vint32m2_t test_vwmacc_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { + return __riscv_vwmacc_tum(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vwmacc_vx_i32m2_tum(vbool16_t mask, vint32m2_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vwmacc_tum(mask, vd, rs1, vs2, vl); +vint32m2_t test_vwmacc_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vwmacc_tum(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vwmacc_vv_i32m4_tum(vbool8_t mask, vint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return __riscv_vwmacc_tum(mask, vd, vs1, vs2, vl); +vint32m4_t test_vwmacc_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { + return __riscv_vwmacc_tum(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vwmacc_vx_i32m4_tum(vbool8_t mask, vint32m4_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vwmacc_tum(mask, vd, rs1, vs2, vl); +vint32m4_t test_vwmacc_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vwmacc_tum(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vwmacc_vv_i32m8_tum(vbool4_t mask, vint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return __riscv_vwmacc_tum(mask, vd, vs1, vs2, vl); +vint32m8_t test_vwmacc_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { + return __riscv_vwmacc_tum(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vwmacc_vx_i32m8_tum(vbool4_t mask, vint32m8_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vwmacc_tum(mask, vd, rs1, vs2, vl); +vint32m8_t test_vwmacc_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vwmacc_tum(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vwmacc_vv_i64m1_tum(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vwmacc_tum(mask, vd, vs1, vs2, vl); +vint64m1_t test_vwmacc_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vwmacc_tum(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vwmacc_vx_i64m1_tum(vbool64_t mask, vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vwmacc_tum(mask, vd, rs1, vs2, vl); +vint64m1_t test_vwmacc_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vwmacc_tum(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vwmacc_vv_i64m2_tum(vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_vwmacc_tum(mask, vd, vs1, vs2, vl); +vint64m2_t test_vwmacc_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { + return __riscv_vwmacc_tum(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vwmacc_vx_i64m2_tum(vbool32_t mask, vint64m2_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vwmacc_tum(mask, vd, rs1, vs2, vl); +vint64m2_t test_vwmacc_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vwmacc_tum(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vwmacc_vv_i64m4_tum(vbool16_t mask, vint64m4_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return __riscv_vwmacc_tum(mask, vd, vs1, vs2, vl); +vint64m4_t test_vwmacc_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { + return __riscv_vwmacc_tum(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vwmacc_vx_i64m4_tum(vbool16_t mask, vint64m4_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vwmacc_tum(mask, vd, rs1, vs2, vl); +vint64m4_t test_vwmacc_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vwmacc_tum(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vwmacc_vv_i64m8_tum(vbool8_t mask, vint64m8_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return __riscv_vwmacc_tum(mask, vd, vs1, vs2, vl); +vint64m8_t test_vwmacc_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { + return __riscv_vwmacc_tum(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vwmacc_vx_i64m8_tum(vbool8_t mask, vint64m8_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vwmacc_tum(mask, vd, rs1, vs2, vl); +vint64m8_t test_vwmacc_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vwmacc_tum(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vwmacc_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vwmacc_tumu(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vwmacc_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vwmacc_tumu(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vwmacc_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vwmacc_tumu(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vwmacc_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vwmacc_tumu(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vwmacc_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vwmacc_tumu(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vwmacc_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vwmacc_tumu(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vwmacc_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vwmacc_tumu(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vwmacc_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vwmacc_tumu(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vwmacc_vv_i16m1_tumu(vbool16_t mask, vint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vwmacc_tumu(mask, vd, vs1, vs2, vl); +vint16m1_t test_vwmacc_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vwmacc_tumu(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vwmacc_vx_i16m1_tumu(vbool16_t mask, vint16m1_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vwmacc_tumu(mask, vd, rs1, vs2, vl); +vint16m1_t test_vwmacc_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vwmacc_tumu(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vwmacc_vv_i16m2_tumu(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_vwmacc_tumu(mask, vd, vs1, vs2, vl); +vint16m2_t test_vwmacc_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { + return __riscv_vwmacc_tumu(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vwmacc_vx_i16m2_tumu(vbool8_t mask, vint16m2_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vwmacc_tumu(mask, vd, rs1, vs2, vl); +vint16m2_t test_vwmacc_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vwmacc_tumu(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vwmacc_vv_i16m4_tumu(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return __riscv_vwmacc_tumu(mask, vd, vs1, vs2, vl); +vint16m4_t test_vwmacc_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { + return __riscv_vwmacc_tumu(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vwmacc_vx_i16m4_tumu(vbool4_t mask, vint16m4_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vwmacc_tumu(mask, vd, rs1, vs2, vl); +vint16m4_t test_vwmacc_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vwmacc_tumu(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vwmacc_vv_i16m8_tumu(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return __riscv_vwmacc_tumu(mask, vd, vs1, vs2, vl); +vint16m8_t test_vwmacc_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { + return __riscv_vwmacc_tumu(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vwmacc_vx_i16m8_tumu(vbool2_t mask, vint16m8_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vwmacc_tumu(mask, vd, rs1, vs2, vl); +vint16m8_t test_vwmacc_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vwmacc_tumu(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vwmacc_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vwmacc_tumu(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vwmacc_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vwmacc_tumu(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vwmacc_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vwmacc_tumu(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vwmacc_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vwmacc_tumu(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vwmacc_vv_i32m1_tumu(vbool32_t mask, vint32m1_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vwmacc_tumu(mask, vd, vs1, vs2, vl); +vint32m1_t test_vwmacc_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vwmacc_tumu(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vwmacc_vx_i32m1_tumu(vbool32_t mask, vint32m1_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vwmacc_tumu(mask, vd, rs1, vs2, vl); +vint32m1_t test_vwmacc_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vwmacc_tumu(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vwmacc_vv_i32m2_tumu(vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_vwmacc_tumu(mask, vd, vs1, vs2, vl); +vint32m2_t test_vwmacc_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { + return __riscv_vwmacc_tumu(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vwmacc_vx_i32m2_tumu(vbool16_t mask, vint32m2_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vwmacc_tumu(mask, vd, rs1, vs2, vl); +vint32m2_t test_vwmacc_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vwmacc_tumu(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vwmacc_vv_i32m4_tumu(vbool8_t mask, vint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return __riscv_vwmacc_tumu(mask, vd, vs1, vs2, vl); +vint32m4_t test_vwmacc_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { + return __riscv_vwmacc_tumu(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vwmacc_vx_i32m4_tumu(vbool8_t mask, vint32m4_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vwmacc_tumu(mask, vd, rs1, vs2, vl); +vint32m4_t test_vwmacc_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vwmacc_tumu(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vwmacc_vv_i32m8_tumu(vbool4_t mask, vint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return __riscv_vwmacc_tumu(mask, vd, vs1, vs2, vl); +vint32m8_t test_vwmacc_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { + return __riscv_vwmacc_tumu(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vwmacc_vx_i32m8_tumu(vbool4_t mask, vint32m8_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vwmacc_tumu(mask, vd, rs1, vs2, vl); +vint32m8_t test_vwmacc_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vwmacc_tumu(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vwmacc_vv_i64m1_tumu(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vwmacc_tumu(mask, vd, vs1, vs2, vl); +vint64m1_t test_vwmacc_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vwmacc_tumu(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vwmacc_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vwmacc_tumu(mask, vd, rs1, vs2, vl); +vint64m1_t test_vwmacc_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vwmacc_tumu(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vwmacc_vv_i64m2_tumu(vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_vwmacc_tumu(mask, vd, vs1, vs2, vl); +vint64m2_t test_vwmacc_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { + return __riscv_vwmacc_tumu(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vwmacc_vx_i64m2_tumu(vbool32_t mask, vint64m2_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vwmacc_tumu(mask, vd, rs1, vs2, vl); +vint64m2_t test_vwmacc_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vwmacc_tumu(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vwmacc_vv_i64m4_tumu(vbool16_t mask, vint64m4_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return __riscv_vwmacc_tumu(mask, vd, vs1, vs2, vl); +vint64m4_t test_vwmacc_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { + return __riscv_vwmacc_tumu(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vwmacc_vx_i64m4_tumu(vbool16_t mask, vint64m4_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vwmacc_tumu(mask, vd, rs1, vs2, vl); +vint64m4_t test_vwmacc_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vwmacc_tumu(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vwmacc_vv_i64m8_tumu(vbool8_t mask, vint64m8_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return __riscv_vwmacc_tumu(mask, vd, vs1, vs2, vl); +vint64m8_t test_vwmacc_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { + return __riscv_vwmacc_tumu(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vwmacc_vx_i64m8_tumu(vbool8_t mask, vint64m8_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vwmacc_tumu(mask, vd, rs1, vs2, vl); +vint64m8_t test_vwmacc_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vwmacc_tumu(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vwmacc_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vwmacc_mu(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vwmacc_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vwmacc_mu(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vwmacc_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vwmacc_mu(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vwmacc_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vwmacc_mu(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vwmacc_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vwmacc_mu(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vwmacc_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vwmacc_mu(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vwmacc_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vwmacc_mu(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vwmacc_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vwmacc_mu(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vwmacc_vv_i16m1_mu(vbool16_t mask, vint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vwmacc_mu(mask, vd, vs1, vs2, vl); +vint16m1_t test_vwmacc_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vwmacc_mu(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vwmacc_vx_i16m1_mu(vbool16_t mask, vint16m1_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vwmacc_mu(mask, vd, rs1, vs2, vl); +vint16m1_t test_vwmacc_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vwmacc_mu(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vwmacc_vv_i16m2_mu(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { - return __riscv_vwmacc_mu(mask, vd, vs1, vs2, vl); +vint16m2_t test_vwmacc_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { + return __riscv_vwmacc_mu(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vwmacc_vx_i16m2_mu(vbool8_t mask, vint16m2_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vwmacc_mu(mask, vd, rs1, vs2, vl); +vint16m2_t test_vwmacc_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vwmacc_mu(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vwmacc_vv_i16m4_mu(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { - return __riscv_vwmacc_mu(mask, vd, vs1, vs2, vl); +vint16m4_t test_vwmacc_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { + return __riscv_vwmacc_mu(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vwmacc_vx_i16m4_mu(vbool4_t mask, vint16m4_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vwmacc_mu(mask, vd, rs1, vs2, vl); +vint16m4_t test_vwmacc_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vwmacc_mu(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vwmacc_vv_i16m8_mu(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { - return __riscv_vwmacc_mu(mask, vd, vs1, vs2, vl); +vint16m8_t test_vwmacc_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { + return __riscv_vwmacc_mu(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vwmacc_vx_i16m8_mu(vbool2_t mask, vint16m8_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vwmacc_mu(mask, vd, rs1, vs2, vl); +vint16m8_t test_vwmacc_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vwmacc_mu(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vwmacc_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vwmacc_mu(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vwmacc_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vwmacc_mu(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vwmacc_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vwmacc_mu(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vwmacc_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vwmacc_mu(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vwmacc_vv_i32m1_mu(vbool32_t mask, vint32m1_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vwmacc_mu(mask, vd, vs1, vs2, vl); +vint32m1_t test_vwmacc_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vwmacc_mu(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vwmacc_vx_i32m1_mu(vbool32_t mask, vint32m1_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vwmacc_mu(mask, vd, rs1, vs2, vl); +vint32m1_t test_vwmacc_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vwmacc_mu(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vwmacc_vv_i32m2_mu(vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { - return __riscv_vwmacc_mu(mask, vd, vs1, vs2, vl); +vint32m2_t test_vwmacc_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { + return __riscv_vwmacc_mu(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vwmacc_vx_i32m2_mu(vbool16_t mask, vint32m2_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vwmacc_mu(mask, vd, rs1, vs2, vl); +vint32m2_t test_vwmacc_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vwmacc_mu(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vwmacc_vv_i32m4_mu(vbool8_t mask, vint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { - return __riscv_vwmacc_mu(mask, vd, vs1, vs2, vl); +vint32m4_t test_vwmacc_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { + return __riscv_vwmacc_mu(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vwmacc_vx_i32m4_mu(vbool8_t mask, vint32m4_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vwmacc_mu(mask, vd, rs1, vs2, vl); +vint32m4_t test_vwmacc_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vwmacc_mu(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vwmacc_vv_i32m8_mu(vbool4_t mask, vint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { - return __riscv_vwmacc_mu(mask, vd, vs1, vs2, vl); +vint32m8_t test_vwmacc_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { + return __riscv_vwmacc_mu(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vwmacc_vx_i32m8_mu(vbool4_t mask, vint32m8_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vwmacc_mu(mask, vd, rs1, vs2, vl); +vint32m8_t test_vwmacc_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vwmacc_mu(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vwmacc_vv_i64m1_mu(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vwmacc_mu(mask, vd, vs1, vs2, vl); +vint64m1_t test_vwmacc_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vwmacc_mu(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vwmacc_vx_i64m1_mu(vbool64_t mask, vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vwmacc_mu(mask, vd, rs1, vs2, vl); +vint64m1_t test_vwmacc_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vwmacc_mu(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vwmacc_vv_i64m2_mu(vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { - return __riscv_vwmacc_mu(mask, vd, vs1, vs2, vl); +vint64m2_t test_vwmacc_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { + return __riscv_vwmacc_mu(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vwmacc_vx_i64m2_mu(vbool32_t mask, vint64m2_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vwmacc_mu(mask, vd, rs1, vs2, vl); +vint64m2_t test_vwmacc_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vwmacc_mu(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vwmacc_vv_i64m4_mu(vbool16_t mask, vint64m4_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { - return __riscv_vwmacc_mu(mask, vd, vs1, vs2, vl); +vint64m4_t test_vwmacc_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { + return __riscv_vwmacc_mu(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vwmacc_vx_i64m4_mu(vbool16_t mask, vint64m4_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vwmacc_mu(mask, vd, rs1, vs2, vl); +vint64m4_t test_vwmacc_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vwmacc_mu(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vwmacc_vv_i64m8_mu(vbool8_t mask, vint64m8_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { - return __riscv_vwmacc_mu(mask, vd, vs1, vs2, vl); +vint64m8_t test_vwmacc_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { + return __riscv_vwmacc_mu(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vwmacc_vx_i64m8_mu(vbool8_t mask, vint64m8_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vwmacc_mu(mask, vd, rs1, vs2, vl); +vint64m8_t test_vwmacc_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vwmacc_mu(vm, vd, rs1, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vwmacc\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vwmaccsu.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vwmaccsu.c index 517bf50b6..b8251b6a3 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vwmaccsu.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vwmaccsu.c @@ -126,364 +126,364 @@ vint64m8_t test_vwmaccsu_vx_i64m8_tu(vint64m8_t vd, int32_t rs1, vuint32m4_t vs2 return __riscv_vwmaccsu_tu(vd, rs1, vs2, vl); } -vint16mf4_t test_vwmaccsu_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, vint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vwmaccsu_tum(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vwmaccsu_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vwmaccsu_tum(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vwmaccsu_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, int8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vwmaccsu_tum(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vwmaccsu_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, int8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vwmaccsu_tum(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vwmaccsu_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, vint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vwmaccsu_tum(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vwmaccsu_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vwmaccsu_tum(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vwmaccsu_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, int8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vwmaccsu_tum(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vwmaccsu_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, int8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vwmaccsu_tum(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vwmaccsu_vv_i16m1_tum(vbool16_t mask, vint16m1_t vd, vint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu_tum(mask, vd, vs1, vs2, vl); +vint16m1_t test_vwmaccsu_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu_tum(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vwmaccsu_vx_i16m1_tum(vbool16_t mask, vint16m1_t vd, int8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu_tum(mask, vd, rs1, vs2, vl); +vint16m1_t test_vwmaccsu_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, int8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu_tum(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vwmaccsu_vv_i16m2_tum(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vwmaccsu_tum(mask, vd, vs1, vs2, vl); +vint16m2_t test_vwmaccsu_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vwmaccsu_tum(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vwmaccsu_vx_i16m2_tum(vbool8_t mask, vint16m2_t vd, int8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vwmaccsu_tum(mask, vd, rs1, vs2, vl); +vint16m2_t test_vwmaccsu_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, int8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vwmaccsu_tum(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vwmaccsu_vv_i16m4_tum(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vwmaccsu_tum(mask, vd, vs1, vs2, vl); +vint16m4_t test_vwmaccsu_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vwmaccsu_tum(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vwmaccsu_vx_i16m4_tum(vbool4_t mask, vint16m4_t vd, int8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vwmaccsu_tum(mask, vd, rs1, vs2, vl); +vint16m4_t test_vwmaccsu_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, int8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vwmaccsu_tum(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vwmaccsu_vv_i16m8_tum(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vwmaccsu_tum(mask, vd, vs1, vs2, vl); +vint16m8_t test_vwmaccsu_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vwmaccsu_tum(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vwmaccsu_vx_i16m8_tum(vbool2_t mask, vint16m8_t vd, int8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vwmaccsu_tum(mask, vd, rs1, vs2, vl); +vint16m8_t test_vwmaccsu_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, int8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vwmaccsu_tum(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vwmaccsu_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, vint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vwmaccsu_tum(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vwmaccsu_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vwmaccsu_tum(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vwmaccsu_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, int16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vwmaccsu_tum(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vwmaccsu_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, int16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vwmaccsu_tum(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vwmaccsu_vv_i32m1_tum(vbool32_t mask, vint32m1_t vd, vint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu_tum(mask, vd, vs1, vs2, vl); +vint32m1_t test_vwmaccsu_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu_tum(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vwmaccsu_vx_i32m1_tum(vbool32_t mask, vint32m1_t vd, int16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu_tum(mask, vd, rs1, vs2, vl); +vint32m1_t test_vwmaccsu_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, int16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu_tum(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vwmaccsu_vv_i32m2_tum(vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vwmaccsu_tum(mask, vd, vs1, vs2, vl); +vint32m2_t test_vwmaccsu_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vwmaccsu_tum(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vwmaccsu_vx_i32m2_tum(vbool16_t mask, vint32m2_t vd, int16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vwmaccsu_tum(mask, vd, rs1, vs2, vl); +vint32m2_t test_vwmaccsu_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, int16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vwmaccsu_tum(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vwmaccsu_vv_i32m4_tum(vbool8_t mask, vint32m4_t vd, vint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vwmaccsu_tum(mask, vd, vs1, vs2, vl); +vint32m4_t test_vwmaccsu_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vwmaccsu_tum(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vwmaccsu_vx_i32m4_tum(vbool8_t mask, vint32m4_t vd, int16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vwmaccsu_tum(mask, vd, rs1, vs2, vl); +vint32m4_t test_vwmaccsu_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, int16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vwmaccsu_tum(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vwmaccsu_vv_i32m8_tum(vbool4_t mask, vint32m8_t vd, vint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vwmaccsu_tum(mask, vd, vs1, vs2, vl); +vint32m8_t test_vwmaccsu_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vwmaccsu_tum(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vwmaccsu_vx_i32m8_tum(vbool4_t mask, vint32m8_t vd, int16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vwmaccsu_tum(mask, vd, rs1, vs2, vl); +vint32m8_t test_vwmaccsu_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, int16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vwmaccsu_tum(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vwmaccsu_vv_i64m1_tum(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu_tum(mask, vd, vs1, vs2, vl); +vint64m1_t test_vwmaccsu_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu_tum(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vwmaccsu_vx_i64m1_tum(vbool64_t mask, vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu_tum(mask, vd, rs1, vs2, vl); +vint64m1_t test_vwmaccsu_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu_tum(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vwmaccsu_vv_i64m2_tum(vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vwmaccsu_tum(mask, vd, vs1, vs2, vl); +vint64m2_t test_vwmaccsu_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vwmaccsu_tum(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vwmaccsu_vx_i64m2_tum(vbool32_t mask, vint64m2_t vd, int32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vwmaccsu_tum(mask, vd, rs1, vs2, vl); +vint64m2_t test_vwmaccsu_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, int32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vwmaccsu_tum(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vwmaccsu_vv_i64m4_tum(vbool16_t mask, vint64m4_t vd, vint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vwmaccsu_tum(mask, vd, vs1, vs2, vl); +vint64m4_t test_vwmaccsu_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vwmaccsu_tum(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vwmaccsu_vx_i64m4_tum(vbool16_t mask, vint64m4_t vd, int32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vwmaccsu_tum(mask, vd, rs1, vs2, vl); +vint64m4_t test_vwmaccsu_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, int32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vwmaccsu_tum(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vwmaccsu_vv_i64m8_tum(vbool8_t mask, vint64m8_t vd, vint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vwmaccsu_tum(mask, vd, vs1, vs2, vl); +vint64m8_t test_vwmaccsu_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vwmaccsu_tum(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vwmaccsu_vx_i64m8_tum(vbool8_t mask, vint64m8_t vd, int32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vwmaccsu_tum(mask, vd, rs1, vs2, vl); +vint64m8_t test_vwmaccsu_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, int32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vwmaccsu_tum(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vwmaccsu_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, vint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vwmaccsu_tumu(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vwmaccsu_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vwmaccsu_tumu(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vwmaccsu_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, int8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vwmaccsu_tumu(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vwmaccsu_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, int8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vwmaccsu_tumu(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vwmaccsu_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, vint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vwmaccsu_tumu(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vwmaccsu_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vwmaccsu_tumu(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vwmaccsu_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, int8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vwmaccsu_tumu(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vwmaccsu_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, int8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vwmaccsu_tumu(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vwmaccsu_vv_i16m1_tumu(vbool16_t mask, vint16m1_t vd, vint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu_tumu(mask, vd, vs1, vs2, vl); +vint16m1_t test_vwmaccsu_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu_tumu(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vwmaccsu_vx_i16m1_tumu(vbool16_t mask, vint16m1_t vd, int8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu_tumu(mask, vd, rs1, vs2, vl); +vint16m1_t test_vwmaccsu_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, int8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu_tumu(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vwmaccsu_vv_i16m2_tumu(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vwmaccsu_tumu(mask, vd, vs1, vs2, vl); +vint16m2_t test_vwmaccsu_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vwmaccsu_tumu(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vwmaccsu_vx_i16m2_tumu(vbool8_t mask, vint16m2_t vd, int8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vwmaccsu_tumu(mask, vd, rs1, vs2, vl); +vint16m2_t test_vwmaccsu_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, int8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vwmaccsu_tumu(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vwmaccsu_vv_i16m4_tumu(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vwmaccsu_tumu(mask, vd, vs1, vs2, vl); +vint16m4_t test_vwmaccsu_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vwmaccsu_tumu(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vwmaccsu_vx_i16m4_tumu(vbool4_t mask, vint16m4_t vd, int8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vwmaccsu_tumu(mask, vd, rs1, vs2, vl); +vint16m4_t test_vwmaccsu_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, int8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vwmaccsu_tumu(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vwmaccsu_vv_i16m8_tumu(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vwmaccsu_tumu(mask, vd, vs1, vs2, vl); +vint16m8_t test_vwmaccsu_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vwmaccsu_tumu(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vwmaccsu_vx_i16m8_tumu(vbool2_t mask, vint16m8_t vd, int8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vwmaccsu_tumu(mask, vd, rs1, vs2, vl); +vint16m8_t test_vwmaccsu_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, int8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vwmaccsu_tumu(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vwmaccsu_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, vint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vwmaccsu_tumu(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vwmaccsu_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vwmaccsu_tumu(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vwmaccsu_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, int16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vwmaccsu_tumu(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vwmaccsu_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, int16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vwmaccsu_tumu(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vwmaccsu_vv_i32m1_tumu(vbool32_t mask, vint32m1_t vd, vint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu_tumu(mask, vd, vs1, vs2, vl); +vint32m1_t test_vwmaccsu_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu_tumu(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vwmaccsu_vx_i32m1_tumu(vbool32_t mask, vint32m1_t vd, int16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu_tumu(mask, vd, rs1, vs2, vl); +vint32m1_t test_vwmaccsu_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, int16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu_tumu(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vwmaccsu_vv_i32m2_tumu(vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vwmaccsu_tumu(mask, vd, vs1, vs2, vl); +vint32m2_t test_vwmaccsu_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vwmaccsu_tumu(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vwmaccsu_vx_i32m2_tumu(vbool16_t mask, vint32m2_t vd, int16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vwmaccsu_tumu(mask, vd, rs1, vs2, vl); +vint32m2_t test_vwmaccsu_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, int16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vwmaccsu_tumu(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vwmaccsu_vv_i32m4_tumu(vbool8_t mask, vint32m4_t vd, vint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vwmaccsu_tumu(mask, vd, vs1, vs2, vl); +vint32m4_t test_vwmaccsu_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vwmaccsu_tumu(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vwmaccsu_vx_i32m4_tumu(vbool8_t mask, vint32m4_t vd, int16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vwmaccsu_tumu(mask, vd, rs1, vs2, vl); +vint32m4_t test_vwmaccsu_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, int16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vwmaccsu_tumu(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vwmaccsu_vv_i32m8_tumu(vbool4_t mask, vint32m8_t vd, vint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vwmaccsu_tumu(mask, vd, vs1, vs2, vl); +vint32m8_t test_vwmaccsu_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vwmaccsu_tumu(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vwmaccsu_vx_i32m8_tumu(vbool4_t mask, vint32m8_t vd, int16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vwmaccsu_tumu(mask, vd, rs1, vs2, vl); +vint32m8_t test_vwmaccsu_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, int16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vwmaccsu_tumu(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vwmaccsu_vv_i64m1_tumu(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu_tumu(mask, vd, vs1, vs2, vl); +vint64m1_t test_vwmaccsu_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu_tumu(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vwmaccsu_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu_tumu(mask, vd, rs1, vs2, vl); +vint64m1_t test_vwmaccsu_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu_tumu(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vwmaccsu_vv_i64m2_tumu(vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vwmaccsu_tumu(mask, vd, vs1, vs2, vl); +vint64m2_t test_vwmaccsu_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vwmaccsu_tumu(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vwmaccsu_vx_i64m2_tumu(vbool32_t mask, vint64m2_t vd, int32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vwmaccsu_tumu(mask, vd, rs1, vs2, vl); +vint64m2_t test_vwmaccsu_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, int32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vwmaccsu_tumu(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vwmaccsu_vv_i64m4_tumu(vbool16_t mask, vint64m4_t vd, vint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vwmaccsu_tumu(mask, vd, vs1, vs2, vl); +vint64m4_t test_vwmaccsu_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vwmaccsu_tumu(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vwmaccsu_vx_i64m4_tumu(vbool16_t mask, vint64m4_t vd, int32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vwmaccsu_tumu(mask, vd, rs1, vs2, vl); +vint64m4_t test_vwmaccsu_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, int32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vwmaccsu_tumu(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vwmaccsu_vv_i64m8_tumu(vbool8_t mask, vint64m8_t vd, vint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vwmaccsu_tumu(mask, vd, vs1, vs2, vl); +vint64m8_t test_vwmaccsu_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vwmaccsu_tumu(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vwmaccsu_vx_i64m8_tumu(vbool8_t mask, vint64m8_t vd, int32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vwmaccsu_tumu(mask, vd, rs1, vs2, vl); +vint64m8_t test_vwmaccsu_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, int32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vwmaccsu_tumu(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vwmaccsu_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, vint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vwmaccsu_mu(mask, vd, vs1, vs2, vl); +vint16mf4_t test_vwmaccsu_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vwmaccsu_mu(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vwmaccsu_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, int8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vwmaccsu_mu(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vwmaccsu_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, int8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vwmaccsu_mu(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vwmaccsu_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, vint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vwmaccsu_mu(mask, vd, vs1, vs2, vl); +vint16mf2_t test_vwmaccsu_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vwmaccsu_mu(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vwmaccsu_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, int8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vwmaccsu_mu(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vwmaccsu_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, int8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vwmaccsu_mu(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vwmaccsu_vv_i16m1_mu(vbool16_t mask, vint16m1_t vd, vint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu_mu(mask, vd, vs1, vs2, vl); +vint16m1_t test_vwmaccsu_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu_mu(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vwmaccsu_vx_i16m1_mu(vbool16_t mask, vint16m1_t vd, int8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu_mu(mask, vd, rs1, vs2, vl); +vint16m1_t test_vwmaccsu_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, int8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu_mu(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vwmaccsu_vv_i16m2_mu(vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vwmaccsu_mu(mask, vd, vs1, vs2, vl); +vint16m2_t test_vwmaccsu_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vwmaccsu_mu(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vwmaccsu_vx_i16m2_mu(vbool8_t mask, vint16m2_t vd, int8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vwmaccsu_mu(mask, vd, rs1, vs2, vl); +vint16m2_t test_vwmaccsu_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, int8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vwmaccsu_mu(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vwmaccsu_vv_i16m4_mu(vbool4_t mask, vint16m4_t vd, vint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vwmaccsu_mu(mask, vd, vs1, vs2, vl); +vint16m4_t test_vwmaccsu_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vwmaccsu_mu(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vwmaccsu_vx_i16m4_mu(vbool4_t mask, vint16m4_t vd, int8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vwmaccsu_mu(mask, vd, rs1, vs2, vl); +vint16m4_t test_vwmaccsu_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, int8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vwmaccsu_mu(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vwmaccsu_vv_i16m8_mu(vbool2_t mask, vint16m8_t vd, vint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vwmaccsu_mu(mask, vd, vs1, vs2, vl); +vint16m8_t test_vwmaccsu_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vwmaccsu_mu(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vwmaccsu_vx_i16m8_mu(vbool2_t mask, vint16m8_t vd, int8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vwmaccsu_mu(mask, vd, rs1, vs2, vl); +vint16m8_t test_vwmaccsu_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, int8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vwmaccsu_mu(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vwmaccsu_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, vint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vwmaccsu_mu(mask, vd, vs1, vs2, vl); +vint32mf2_t test_vwmaccsu_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vwmaccsu_mu(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vwmaccsu_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, int16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vwmaccsu_mu(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vwmaccsu_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, int16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vwmaccsu_mu(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vwmaccsu_vv_i32m1_mu(vbool32_t mask, vint32m1_t vd, vint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu_mu(mask, vd, vs1, vs2, vl); +vint32m1_t test_vwmaccsu_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu_mu(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vwmaccsu_vx_i32m1_mu(vbool32_t mask, vint32m1_t vd, int16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu_mu(mask, vd, rs1, vs2, vl); +vint32m1_t test_vwmaccsu_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, int16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu_mu(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vwmaccsu_vv_i32m2_mu(vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vwmaccsu_mu(mask, vd, vs1, vs2, vl); +vint32m2_t test_vwmaccsu_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vwmaccsu_mu(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vwmaccsu_vx_i32m2_mu(vbool16_t mask, vint32m2_t vd, int16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vwmaccsu_mu(mask, vd, rs1, vs2, vl); +vint32m2_t test_vwmaccsu_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, int16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vwmaccsu_mu(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vwmaccsu_vv_i32m4_mu(vbool8_t mask, vint32m4_t vd, vint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vwmaccsu_mu(mask, vd, vs1, vs2, vl); +vint32m4_t test_vwmaccsu_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vwmaccsu_mu(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vwmaccsu_vx_i32m4_mu(vbool8_t mask, vint32m4_t vd, int16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vwmaccsu_mu(mask, vd, rs1, vs2, vl); +vint32m4_t test_vwmaccsu_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, int16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vwmaccsu_mu(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vwmaccsu_vv_i32m8_mu(vbool4_t mask, vint32m8_t vd, vint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vwmaccsu_mu(mask, vd, vs1, vs2, vl); +vint32m8_t test_vwmaccsu_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vwmaccsu_mu(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vwmaccsu_vx_i32m8_mu(vbool4_t mask, vint32m8_t vd, int16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vwmaccsu_mu(mask, vd, rs1, vs2, vl); +vint32m8_t test_vwmaccsu_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, int16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vwmaccsu_mu(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vwmaccsu_vv_i64m1_mu(vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu_mu(mask, vd, vs1, vs2, vl); +vint64m1_t test_vwmaccsu_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu_mu(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vwmaccsu_vx_i64m1_mu(vbool64_t mask, vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vwmaccsu_mu(mask, vd, rs1, vs2, vl); +vint64m1_t test_vwmaccsu_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vwmaccsu_mu(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vwmaccsu_vv_i64m2_mu(vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vwmaccsu_mu(mask, vd, vs1, vs2, vl); +vint64m2_t test_vwmaccsu_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vwmaccsu_mu(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vwmaccsu_vx_i64m2_mu(vbool32_t mask, vint64m2_t vd, int32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vwmaccsu_mu(mask, vd, rs1, vs2, vl); +vint64m2_t test_vwmaccsu_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, int32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vwmaccsu_mu(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vwmaccsu_vv_i64m4_mu(vbool16_t mask, vint64m4_t vd, vint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vwmaccsu_mu(mask, vd, vs1, vs2, vl); +vint64m4_t test_vwmaccsu_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vwmaccsu_mu(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vwmaccsu_vx_i64m4_mu(vbool16_t mask, vint64m4_t vd, int32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vwmaccsu_mu(mask, vd, rs1, vs2, vl); +vint64m4_t test_vwmaccsu_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, int32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vwmaccsu_mu(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vwmaccsu_vv_i64m8_mu(vbool8_t mask, vint64m8_t vd, vint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vwmaccsu_mu(mask, vd, vs1, vs2, vl); +vint64m8_t test_vwmaccsu_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vwmaccsu_mu(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vwmaccsu_vx_i64m8_mu(vbool8_t mask, vint64m8_t vd, int32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vwmaccsu_mu(mask, vd, rs1, vs2, vl); +vint64m8_t test_vwmaccsu_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, int32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vwmaccsu_mu(vm, vd, rs1, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vwmaccsu\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vwmaccu.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vwmaccu.c index 4dc2948b2..8696cb243 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vwmaccu.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vwmaccu.c @@ -126,364 +126,364 @@ vuint64m8_t test_vwmaccu_vx_u64m8_tu(vuint64m8_t vd, uint32_t rs1, vuint32m4_t v return __riscv_vwmaccu_tu(vd, rs1, vs2, vl); } -vuint16mf4_t test_vwmaccu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vwmaccu_tum(mask, vd, vs1, vs2, vl); +vuint16mf4_t test_vwmaccu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vwmaccu_tum(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vwmaccu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vwmaccu_tum(mask, vd, rs1, vs2, vl); +vuint16mf4_t test_vwmaccu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vwmaccu_tum(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vwmaccu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vwmaccu_tum(mask, vd, vs1, vs2, vl); +vuint16mf2_t test_vwmaccu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vwmaccu_tum(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vwmaccu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vwmaccu_tum(mask, vd, rs1, vs2, vl); +vuint16mf2_t test_vwmaccu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vwmaccu_tum(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vwmaccu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vwmaccu_tum(mask, vd, vs1, vs2, vl); +vuint16m1_t test_vwmaccu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vwmaccu_tum(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vwmaccu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vwmaccu_tum(mask, vd, rs1, vs2, vl); +vuint16m1_t test_vwmaccu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vwmaccu_tum(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vwmaccu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vwmaccu_tum(mask, vd, vs1, vs2, vl); +vuint16m2_t test_vwmaccu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vwmaccu_tum(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vwmaccu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vwmaccu_tum(mask, vd, rs1, vs2, vl); +vuint16m2_t test_vwmaccu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vwmaccu_tum(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vwmaccu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vwmaccu_tum(mask, vd, vs1, vs2, vl); +vuint16m4_t test_vwmaccu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vwmaccu_tum(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vwmaccu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vwmaccu_tum(mask, vd, rs1, vs2, vl); +vuint16m4_t test_vwmaccu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vwmaccu_tum(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vwmaccu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vwmaccu_tum(mask, vd, vs1, vs2, vl); +vuint16m8_t test_vwmaccu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vwmaccu_tum(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vwmaccu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vwmaccu_tum(mask, vd, rs1, vs2, vl); +vuint16m8_t test_vwmaccu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vwmaccu_tum(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vwmaccu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vwmaccu_tum(mask, vd, vs1, vs2, vl); +vuint32mf2_t test_vwmaccu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vwmaccu_tum(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vwmaccu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vwmaccu_tum(mask, vd, rs1, vs2, vl); +vuint32mf2_t test_vwmaccu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vwmaccu_tum(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vwmaccu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vwmaccu_tum(mask, vd, vs1, vs2, vl); +vuint32m1_t test_vwmaccu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vwmaccu_tum(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vwmaccu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vwmaccu_tum(mask, vd, rs1, vs2, vl); +vuint32m1_t test_vwmaccu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vwmaccu_tum(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vwmaccu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vwmaccu_tum(mask, vd, vs1, vs2, vl); +vuint32m2_t test_vwmaccu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vwmaccu_tum(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vwmaccu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vwmaccu_tum(mask, vd, rs1, vs2, vl); +vuint32m2_t test_vwmaccu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vwmaccu_tum(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vwmaccu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vwmaccu_tum(mask, vd, vs1, vs2, vl); +vuint32m4_t test_vwmaccu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vwmaccu_tum(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vwmaccu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vwmaccu_tum(mask, vd, rs1, vs2, vl); +vuint32m4_t test_vwmaccu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vwmaccu_tum(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vwmaccu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vwmaccu_tum(mask, vd, vs1, vs2, vl); +vuint32m8_t test_vwmaccu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vwmaccu_tum(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vwmaccu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vwmaccu_tum(mask, vd, rs1, vs2, vl); +vuint32m8_t test_vwmaccu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vwmaccu_tum(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vwmaccu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vwmaccu_tum(mask, vd, vs1, vs2, vl); +vuint64m1_t test_vwmaccu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vwmaccu_tum(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vwmaccu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vwmaccu_tum(mask, vd, rs1, vs2, vl); +vuint64m1_t test_vwmaccu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vwmaccu_tum(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vwmaccu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vwmaccu_tum(mask, vd, vs1, vs2, vl); +vuint64m2_t test_vwmaccu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vwmaccu_tum(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vwmaccu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vwmaccu_tum(mask, vd, rs1, vs2, vl); +vuint64m2_t test_vwmaccu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vwmaccu_tum(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vwmaccu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vwmaccu_tum(mask, vd, vs1, vs2, vl); +vuint64m4_t test_vwmaccu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vwmaccu_tum(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vwmaccu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vwmaccu_tum(mask, vd, rs1, vs2, vl); +vuint64m4_t test_vwmaccu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vwmaccu_tum(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vwmaccu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vwmaccu_tum(mask, vd, vs1, vs2, vl); +vuint64m8_t test_vwmaccu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vwmaccu_tum(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vwmaccu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vwmaccu_tum(mask, vd, rs1, vs2, vl); +vuint64m8_t test_vwmaccu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vwmaccu_tum(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vwmaccu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vwmaccu_tumu(mask, vd, vs1, vs2, vl); +vuint16mf4_t test_vwmaccu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vwmaccu_tumu(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vwmaccu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vwmaccu_tumu(mask, vd, rs1, vs2, vl); +vuint16mf4_t test_vwmaccu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vwmaccu_tumu(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vwmaccu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vwmaccu_tumu(mask, vd, vs1, vs2, vl); +vuint16mf2_t test_vwmaccu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vwmaccu_tumu(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vwmaccu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vwmaccu_tumu(mask, vd, rs1, vs2, vl); +vuint16mf2_t test_vwmaccu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vwmaccu_tumu(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vwmaccu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vwmaccu_tumu(mask, vd, vs1, vs2, vl); +vuint16m1_t test_vwmaccu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vwmaccu_tumu(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vwmaccu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vwmaccu_tumu(mask, vd, rs1, vs2, vl); +vuint16m1_t test_vwmaccu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vwmaccu_tumu(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vwmaccu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vwmaccu_tumu(mask, vd, vs1, vs2, vl); +vuint16m2_t test_vwmaccu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vwmaccu_tumu(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vwmaccu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vwmaccu_tumu(mask, vd, rs1, vs2, vl); +vuint16m2_t test_vwmaccu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vwmaccu_tumu(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vwmaccu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vwmaccu_tumu(mask, vd, vs1, vs2, vl); +vuint16m4_t test_vwmaccu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vwmaccu_tumu(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vwmaccu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vwmaccu_tumu(mask, vd, rs1, vs2, vl); +vuint16m4_t test_vwmaccu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vwmaccu_tumu(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vwmaccu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vwmaccu_tumu(mask, vd, vs1, vs2, vl); +vuint16m8_t test_vwmaccu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vwmaccu_tumu(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vwmaccu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vwmaccu_tumu(mask, vd, rs1, vs2, vl); +vuint16m8_t test_vwmaccu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vwmaccu_tumu(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vwmaccu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vwmaccu_tumu(mask, vd, vs1, vs2, vl); +vuint32mf2_t test_vwmaccu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vwmaccu_tumu(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vwmaccu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vwmaccu_tumu(mask, vd, rs1, vs2, vl); +vuint32mf2_t test_vwmaccu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vwmaccu_tumu(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vwmaccu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vwmaccu_tumu(mask, vd, vs1, vs2, vl); +vuint32m1_t test_vwmaccu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vwmaccu_tumu(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vwmaccu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vwmaccu_tumu(mask, vd, rs1, vs2, vl); +vuint32m1_t test_vwmaccu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vwmaccu_tumu(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vwmaccu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vwmaccu_tumu(mask, vd, vs1, vs2, vl); +vuint32m2_t test_vwmaccu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vwmaccu_tumu(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vwmaccu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vwmaccu_tumu(mask, vd, rs1, vs2, vl); +vuint32m2_t test_vwmaccu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vwmaccu_tumu(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vwmaccu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vwmaccu_tumu(mask, vd, vs1, vs2, vl); +vuint32m4_t test_vwmaccu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vwmaccu_tumu(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vwmaccu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vwmaccu_tumu(mask, vd, rs1, vs2, vl); +vuint32m4_t test_vwmaccu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vwmaccu_tumu(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vwmaccu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vwmaccu_tumu(mask, vd, vs1, vs2, vl); +vuint32m8_t test_vwmaccu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vwmaccu_tumu(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vwmaccu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vwmaccu_tumu(mask, vd, rs1, vs2, vl); +vuint32m8_t test_vwmaccu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vwmaccu_tumu(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vwmaccu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vwmaccu_tumu(mask, vd, vs1, vs2, vl); +vuint64m1_t test_vwmaccu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vwmaccu_tumu(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vwmaccu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vwmaccu_tumu(mask, vd, rs1, vs2, vl); +vuint64m1_t test_vwmaccu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vwmaccu_tumu(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vwmaccu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vwmaccu_tumu(mask, vd, vs1, vs2, vl); +vuint64m2_t test_vwmaccu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vwmaccu_tumu(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vwmaccu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vwmaccu_tumu(mask, vd, rs1, vs2, vl); +vuint64m2_t test_vwmaccu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vwmaccu_tumu(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vwmaccu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vwmaccu_tumu(mask, vd, vs1, vs2, vl); +vuint64m4_t test_vwmaccu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vwmaccu_tumu(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vwmaccu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vwmaccu_tumu(mask, vd, rs1, vs2, vl); +vuint64m4_t test_vwmaccu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vwmaccu_tumu(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vwmaccu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vwmaccu_tumu(mask, vd, vs1, vs2, vl); +vuint64m8_t test_vwmaccu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vwmaccu_tumu(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vwmaccu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vwmaccu_tumu(mask, vd, rs1, vs2, vl); +vuint64m8_t test_vwmaccu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vwmaccu_tumu(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vwmaccu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vwmaccu_mu(mask, vd, vs1, vs2, vl); +vuint16mf4_t test_vwmaccu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vwmaccu_mu(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vwmaccu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { - return __riscv_vwmaccu_mu(mask, vd, rs1, vs2, vl); +vuint16mf4_t test_vwmaccu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { + return __riscv_vwmaccu_mu(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vwmaccu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vwmaccu_mu(mask, vd, vs1, vs2, vl); +vuint16mf2_t test_vwmaccu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vwmaccu_mu(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vwmaccu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { - return __riscv_vwmaccu_mu(mask, vd, rs1, vs2, vl); +vuint16mf2_t test_vwmaccu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { + return __riscv_vwmaccu_mu(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vwmaccu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vwmaccu_mu(mask, vd, vs1, vs2, vl); +vuint16m1_t test_vwmaccu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vwmaccu_mu(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vwmaccu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { - return __riscv_vwmaccu_mu(mask, vd, rs1, vs2, vl); +vuint16m1_t test_vwmaccu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { + return __riscv_vwmaccu_mu(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vwmaccu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vwmaccu_mu(mask, vd, vs1, vs2, vl); +vuint16m2_t test_vwmaccu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vwmaccu_mu(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vwmaccu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { - return __riscv_vwmaccu_mu(mask, vd, rs1, vs2, vl); +vuint16m2_t test_vwmaccu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { + return __riscv_vwmaccu_mu(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vwmaccu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vwmaccu_mu(mask, vd, vs1, vs2, vl); +vuint16m4_t test_vwmaccu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vwmaccu_mu(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vwmaccu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { - return __riscv_vwmaccu_mu(mask, vd, rs1, vs2, vl); +vuint16m4_t test_vwmaccu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { + return __riscv_vwmaccu_mu(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vwmaccu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vwmaccu_mu(mask, vd, vs1, vs2, vl); +vuint16m8_t test_vwmaccu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vwmaccu_mu(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vwmaccu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { - return __riscv_vwmaccu_mu(mask, vd, rs1, vs2, vl); +vuint16m8_t test_vwmaccu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { + return __riscv_vwmaccu_mu(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vwmaccu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vwmaccu_mu(mask, vd, vs1, vs2, vl); +vuint32mf2_t test_vwmaccu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vwmaccu_mu(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vwmaccu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { - return __riscv_vwmaccu_mu(mask, vd, rs1, vs2, vl); +vuint32mf2_t test_vwmaccu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { + return __riscv_vwmaccu_mu(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vwmaccu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vwmaccu_mu(mask, vd, vs1, vs2, vl); +vuint32m1_t test_vwmaccu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vwmaccu_mu(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vwmaccu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { - return __riscv_vwmaccu_mu(mask, vd, rs1, vs2, vl); +vuint32m1_t test_vwmaccu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { + return __riscv_vwmaccu_mu(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vwmaccu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vwmaccu_mu(mask, vd, vs1, vs2, vl); +vuint32m2_t test_vwmaccu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vwmaccu_mu(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vwmaccu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { - return __riscv_vwmaccu_mu(mask, vd, rs1, vs2, vl); +vuint32m2_t test_vwmaccu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { + return __riscv_vwmaccu_mu(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vwmaccu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vwmaccu_mu(mask, vd, vs1, vs2, vl); +vuint32m4_t test_vwmaccu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vwmaccu_mu(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vwmaccu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { - return __riscv_vwmaccu_mu(mask, vd, rs1, vs2, vl); +vuint32m4_t test_vwmaccu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { + return __riscv_vwmaccu_mu(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vwmaccu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vwmaccu_mu(mask, vd, vs1, vs2, vl); +vuint32m8_t test_vwmaccu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vwmaccu_mu(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vwmaccu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { - return __riscv_vwmaccu_mu(mask, vd, rs1, vs2, vl); +vuint32m8_t test_vwmaccu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { + return __riscv_vwmaccu_mu(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vwmaccu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vwmaccu_mu(mask, vd, vs1, vs2, vl); +vuint64m1_t test_vwmaccu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vwmaccu_mu(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vwmaccu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { - return __riscv_vwmaccu_mu(mask, vd, rs1, vs2, vl); +vuint64m1_t test_vwmaccu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { + return __riscv_vwmaccu_mu(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vwmaccu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vwmaccu_mu(mask, vd, vs1, vs2, vl); +vuint64m2_t test_vwmaccu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vwmaccu_mu(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vwmaccu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { - return __riscv_vwmaccu_mu(mask, vd, rs1, vs2, vl); +vuint64m2_t test_vwmaccu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { + return __riscv_vwmaccu_mu(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vwmaccu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vwmaccu_mu(mask, vd, vs1, vs2, vl); +vuint64m4_t test_vwmaccu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vwmaccu_mu(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vwmaccu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { - return __riscv_vwmaccu_mu(mask, vd, rs1, vs2, vl); +vuint64m4_t test_vwmaccu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { + return __riscv_vwmaccu_mu(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vwmaccu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vwmaccu_mu(mask, vd, vs1, vs2, vl); +vuint64m8_t test_vwmaccu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vwmaccu_mu(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vwmaccu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { - return __riscv_vwmaccu_mu(mask, vd, rs1, vs2, vl); +vuint64m8_t test_vwmaccu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { + return __riscv_vwmaccu_mu(vm, vd, rs1, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vwmaccu\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vwmaccus.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vwmaccus.c index 5b418b4b6..5468c413c 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vwmaccus.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vwmaccus.c @@ -66,184 +66,184 @@ vint64m8_t test_vwmaccus_vx_i64m8_tu(vint64m8_t vd, uint32_t rs1, vint32m4_t vs2 return __riscv_vwmaccus_tu(vd, rs1, vs2, vl); } -vint16mf4_t test_vwmaccus_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t vd, uint8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vwmaccus_tum(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vwmaccus_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, uint8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vwmaccus_tum(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vwmaccus_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t vd, uint8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vwmaccus_tum(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vwmaccus_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, uint8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vwmaccus_tum(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vwmaccus_vx_i16m1_tum(vbool16_t mask, vint16m1_t vd, uint8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vwmaccus_tum(mask, vd, rs1, vs2, vl); +vint16m1_t test_vwmaccus_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, uint8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vwmaccus_tum(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vwmaccus_vx_i16m2_tum(vbool8_t mask, vint16m2_t vd, uint8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vwmaccus_tum(mask, vd, rs1, vs2, vl); +vint16m2_t test_vwmaccus_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, uint8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vwmaccus_tum(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vwmaccus_vx_i16m4_tum(vbool4_t mask, vint16m4_t vd, uint8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vwmaccus_tum(mask, vd, rs1, vs2, vl); +vint16m4_t test_vwmaccus_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, uint8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vwmaccus_tum(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vwmaccus_vx_i16m8_tum(vbool2_t mask, vint16m8_t vd, uint8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vwmaccus_tum(mask, vd, rs1, vs2, vl); +vint16m8_t test_vwmaccus_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, uint8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vwmaccus_tum(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vwmaccus_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t vd, uint16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vwmaccus_tum(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vwmaccus_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, uint16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vwmaccus_tum(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vwmaccus_vx_i32m1_tum(vbool32_t mask, vint32m1_t vd, uint16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vwmaccus_tum(mask, vd, rs1, vs2, vl); +vint32m1_t test_vwmaccus_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, uint16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vwmaccus_tum(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vwmaccus_vx_i32m2_tum(vbool16_t mask, vint32m2_t vd, uint16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vwmaccus_tum(mask, vd, rs1, vs2, vl); +vint32m2_t test_vwmaccus_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, uint16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vwmaccus_tum(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vwmaccus_vx_i32m4_tum(vbool8_t mask, vint32m4_t vd, uint16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vwmaccus_tum(mask, vd, rs1, vs2, vl); +vint32m4_t test_vwmaccus_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, uint16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vwmaccus_tum(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vwmaccus_vx_i32m8_tum(vbool4_t mask, vint32m8_t vd, uint16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vwmaccus_tum(mask, vd, rs1, vs2, vl); +vint32m8_t test_vwmaccus_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, uint16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vwmaccus_tum(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vwmaccus_vx_i64m1_tum(vbool64_t mask, vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vwmaccus_tum(mask, vd, rs1, vs2, vl); +vint64m1_t test_vwmaccus_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vwmaccus_tum(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vwmaccus_vx_i64m2_tum(vbool32_t mask, vint64m2_t vd, uint32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vwmaccus_tum(mask, vd, rs1, vs2, vl); +vint64m2_t test_vwmaccus_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, uint32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vwmaccus_tum(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vwmaccus_vx_i64m4_tum(vbool16_t mask, vint64m4_t vd, uint32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vwmaccus_tum(mask, vd, rs1, vs2, vl); +vint64m4_t test_vwmaccus_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, uint32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vwmaccus_tum(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vwmaccus_vx_i64m8_tum(vbool8_t mask, vint64m8_t vd, uint32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vwmaccus_tum(mask, vd, rs1, vs2, vl); +vint64m8_t test_vwmaccus_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, uint32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vwmaccus_tum(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vwmaccus_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t vd, uint8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vwmaccus_tumu(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vwmaccus_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, uint8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vwmaccus_tumu(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vwmaccus_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t vd, uint8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vwmaccus_tumu(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vwmaccus_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, uint8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vwmaccus_tumu(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vwmaccus_vx_i16m1_tumu(vbool16_t mask, vint16m1_t vd, uint8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vwmaccus_tumu(mask, vd, rs1, vs2, vl); +vint16m1_t test_vwmaccus_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, uint8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vwmaccus_tumu(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vwmaccus_vx_i16m2_tumu(vbool8_t mask, vint16m2_t vd, uint8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vwmaccus_tumu(mask, vd, rs1, vs2, vl); +vint16m2_t test_vwmaccus_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, uint8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vwmaccus_tumu(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vwmaccus_vx_i16m4_tumu(vbool4_t mask, vint16m4_t vd, uint8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vwmaccus_tumu(mask, vd, rs1, vs2, vl); +vint16m4_t test_vwmaccus_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, uint8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vwmaccus_tumu(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vwmaccus_vx_i16m8_tumu(vbool2_t mask, vint16m8_t vd, uint8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vwmaccus_tumu(mask, vd, rs1, vs2, vl); +vint16m8_t test_vwmaccus_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, uint8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vwmaccus_tumu(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vwmaccus_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t vd, uint16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vwmaccus_tumu(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vwmaccus_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, uint16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vwmaccus_tumu(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vwmaccus_vx_i32m1_tumu(vbool32_t mask, vint32m1_t vd, uint16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vwmaccus_tumu(mask, vd, rs1, vs2, vl); +vint32m1_t test_vwmaccus_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, uint16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vwmaccus_tumu(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vwmaccus_vx_i32m2_tumu(vbool16_t mask, vint32m2_t vd, uint16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vwmaccus_tumu(mask, vd, rs1, vs2, vl); +vint32m2_t test_vwmaccus_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, uint16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vwmaccus_tumu(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vwmaccus_vx_i32m4_tumu(vbool8_t mask, vint32m4_t vd, uint16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vwmaccus_tumu(mask, vd, rs1, vs2, vl); +vint32m4_t test_vwmaccus_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, uint16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vwmaccus_tumu(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vwmaccus_vx_i32m8_tumu(vbool4_t mask, vint32m8_t vd, uint16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vwmaccus_tumu(mask, vd, rs1, vs2, vl); +vint32m8_t test_vwmaccus_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, uint16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vwmaccus_tumu(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vwmaccus_vx_i64m1_tumu(vbool64_t mask, vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vwmaccus_tumu(mask, vd, rs1, vs2, vl); +vint64m1_t test_vwmaccus_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vwmaccus_tumu(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vwmaccus_vx_i64m2_tumu(vbool32_t mask, vint64m2_t vd, uint32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vwmaccus_tumu(mask, vd, rs1, vs2, vl); +vint64m2_t test_vwmaccus_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, uint32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vwmaccus_tumu(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vwmaccus_vx_i64m4_tumu(vbool16_t mask, vint64m4_t vd, uint32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vwmaccus_tumu(mask, vd, rs1, vs2, vl); +vint64m4_t test_vwmaccus_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, uint32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vwmaccus_tumu(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vwmaccus_vx_i64m8_tumu(vbool8_t mask, vint64m8_t vd, uint32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vwmaccus_tumu(mask, vd, rs1, vs2, vl); +vint64m8_t test_vwmaccus_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, uint32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vwmaccus_tumu(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vwmaccus_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t vd, uint8_t rs1, vint8mf8_t vs2, size_t vl) { - return __riscv_vwmaccus_mu(mask, vd, rs1, vs2, vl); +vint16mf4_t test_vwmaccus_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, uint8_t rs1, vint8mf8_t vs2, size_t vl) { + return __riscv_vwmaccus_mu(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vwmaccus_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t vd, uint8_t rs1, vint8mf4_t vs2, size_t vl) { - return __riscv_vwmaccus_mu(mask, vd, rs1, vs2, vl); +vint16mf2_t test_vwmaccus_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, uint8_t rs1, vint8mf4_t vs2, size_t vl) { + return __riscv_vwmaccus_mu(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vwmaccus_vx_i16m1_mu(vbool16_t mask, vint16m1_t vd, uint8_t rs1, vint8mf2_t vs2, size_t vl) { - return __riscv_vwmaccus_mu(mask, vd, rs1, vs2, vl); +vint16m1_t test_vwmaccus_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, uint8_t rs1, vint8mf2_t vs2, size_t vl) { + return __riscv_vwmaccus_mu(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vwmaccus_vx_i16m2_mu(vbool8_t mask, vint16m2_t vd, uint8_t rs1, vint8m1_t vs2, size_t vl) { - return __riscv_vwmaccus_mu(mask, vd, rs1, vs2, vl); +vint16m2_t test_vwmaccus_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, uint8_t rs1, vint8m1_t vs2, size_t vl) { + return __riscv_vwmaccus_mu(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vwmaccus_vx_i16m4_mu(vbool4_t mask, vint16m4_t vd, uint8_t rs1, vint8m2_t vs2, size_t vl) { - return __riscv_vwmaccus_mu(mask, vd, rs1, vs2, vl); +vint16m4_t test_vwmaccus_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, uint8_t rs1, vint8m2_t vs2, size_t vl) { + return __riscv_vwmaccus_mu(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vwmaccus_vx_i16m8_mu(vbool2_t mask, vint16m8_t vd, uint8_t rs1, vint8m4_t vs2, size_t vl) { - return __riscv_vwmaccus_mu(mask, vd, rs1, vs2, vl); +vint16m8_t test_vwmaccus_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, uint8_t rs1, vint8m4_t vs2, size_t vl) { + return __riscv_vwmaccus_mu(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vwmaccus_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t vd, uint16_t rs1, vint16mf4_t vs2, size_t vl) { - return __riscv_vwmaccus_mu(mask, vd, rs1, vs2, vl); +vint32mf2_t test_vwmaccus_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, uint16_t rs1, vint16mf4_t vs2, size_t vl) { + return __riscv_vwmaccus_mu(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vwmaccus_vx_i32m1_mu(vbool32_t mask, vint32m1_t vd, uint16_t rs1, vint16mf2_t vs2, size_t vl) { - return __riscv_vwmaccus_mu(mask, vd, rs1, vs2, vl); +vint32m1_t test_vwmaccus_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, uint16_t rs1, vint16mf2_t vs2, size_t vl) { + return __riscv_vwmaccus_mu(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vwmaccus_vx_i32m2_mu(vbool16_t mask, vint32m2_t vd, uint16_t rs1, vint16m1_t vs2, size_t vl) { - return __riscv_vwmaccus_mu(mask, vd, rs1, vs2, vl); +vint32m2_t test_vwmaccus_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, uint16_t rs1, vint16m1_t vs2, size_t vl) { + return __riscv_vwmaccus_mu(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vwmaccus_vx_i32m4_mu(vbool8_t mask, vint32m4_t vd, uint16_t rs1, vint16m2_t vs2, size_t vl) { - return __riscv_vwmaccus_mu(mask, vd, rs1, vs2, vl); +vint32m4_t test_vwmaccus_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, uint16_t rs1, vint16m2_t vs2, size_t vl) { + return __riscv_vwmaccus_mu(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vwmaccus_vx_i32m8_mu(vbool4_t mask, vint32m8_t vd, uint16_t rs1, vint16m4_t vs2, size_t vl) { - return __riscv_vwmaccus_mu(mask, vd, rs1, vs2, vl); +vint32m8_t test_vwmaccus_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, uint16_t rs1, vint16m4_t vs2, size_t vl) { + return __riscv_vwmaccus_mu(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vwmaccus_vx_i64m1_mu(vbool64_t mask, vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) { - return __riscv_vwmaccus_mu(mask, vd, rs1, vs2, vl); +vint64m1_t test_vwmaccus_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) { + return __riscv_vwmaccus_mu(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vwmaccus_vx_i64m2_mu(vbool32_t mask, vint64m2_t vd, uint32_t rs1, vint32m1_t vs2, size_t vl) { - return __riscv_vwmaccus_mu(mask, vd, rs1, vs2, vl); +vint64m2_t test_vwmaccus_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, uint32_t rs1, vint32m1_t vs2, size_t vl) { + return __riscv_vwmaccus_mu(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vwmaccus_vx_i64m4_mu(vbool16_t mask, vint64m4_t vd, uint32_t rs1, vint32m2_t vs2, size_t vl) { - return __riscv_vwmaccus_mu(mask, vd, rs1, vs2, vl); +vint64m4_t test_vwmaccus_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, uint32_t rs1, vint32m2_t vs2, size_t vl) { + return __riscv_vwmaccus_mu(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vwmaccus_vx_i64m8_mu(vbool8_t mask, vint64m8_t vd, uint32_t rs1, vint32m4_t vs2, size_t vl) { - return __riscv_vwmaccus_mu(mask, vd, rs1, vs2, vl); +vint64m8_t test_vwmaccus_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, uint32_t rs1, vint32m4_t vs2, size_t vl) { + return __riscv_vwmaccus_mu(vm, vd, rs1, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vwmaccus\.[ivxfswum.]+\s+} 60 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vwmul.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vwmul.c index ee1ee4b41..9be4e6f07 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vwmul.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vwmul.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint16mf4_t test_vwmul_vv_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwmul_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vwmul_vv_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwmul_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vwmul_vx_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vwmul_vx_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vwmul_vv_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwmul_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vwmul_vv_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwmul_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vwmul_vx_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vwmul_vx_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vwmul_vv_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwmul_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vwmul_vv_i16m1_tu(vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwmul_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vwmul_vx_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vwmul_vx_i16m1_tu(vint16m1_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vwmul_vv_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwmul_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vwmul_vv_i16m2_tu(vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwmul_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vwmul_vx_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vwmul_vx_i16m2_tu(vint16m2_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vwmul_vv_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwmul_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vwmul_vv_i16m4_tu(vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwmul_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vwmul_vx_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vwmul_vx_i16m4_tu(vint16m4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vwmul_vv_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwmul_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vwmul_vv_i16m8_tu(vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwmul_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vwmul_vx_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vwmul_vx_i16m8_tu(vint16m8_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vwmul_vv_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwmul_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vwmul_vv_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwmul_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vwmul_vx_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vwmul_vx_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vwmul_vv_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwmul_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vwmul_vv_i32m1_tu(vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwmul_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vwmul_vx_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vwmul_vx_i32m1_tu(vint32m1_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vwmul_vv_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwmul_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vwmul_vv_i32m2_tu(vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwmul_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vwmul_vx_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vwmul_vx_i32m2_tu(vint32m2_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vwmul_vv_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwmul_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vwmul_vv_i32m4_tu(vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwmul_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vwmul_vx_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vwmul_vx_i32m4_tu(vint32m4_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vwmul_vv_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwmul_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vwmul_vv_i32m8_tu(vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwmul_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vwmul_vx_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vwmul_vx_i32m8_tu(vint32m8_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vwmul_vv_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwmul_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vwmul_vv_i64m1_tu(vint64m1_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwmul_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vwmul_vx_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vwmul_vx_i64m1_tu(vint64m1_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vwmul_vv_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwmul_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vwmul_vv_i64m2_tu(vint64m2_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwmul_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vwmul_vx_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vwmul_vx_i64m2_tu(vint64m2_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vwmul_vv_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwmul_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vwmul_vv_i64m4_tu(vint64m4_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwmul_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vwmul_vx_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vwmul_vx_i64m4_tu(vint64m4_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vwmul_vv_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwmul_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vwmul_vv_i64m8_tu(vint64m8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwmul_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vwmul_vx_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vwmul_vx_i64m8_tu(vint64m8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vwmul_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwmul_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwmul_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwmul_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwmul_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwmul_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwmul_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwmul_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwmul_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwmul_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwmul_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwmul_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwmul_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwmul_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwmul_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwmul_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwmul_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwmul_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwmul_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwmul_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwmul_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwmul_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwmul_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwmul_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwmul_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwmul_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwmul_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwmul_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwmul_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwmul_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwmul_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwmul_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwmul_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwmul_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwmul_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwmul_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwmul_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwmul_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwmul_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwmul_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwmul_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwmul_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwmul_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwmul_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwmul_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwmul_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwmul_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwmul_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwmul_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwmul_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwmul_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwmul_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwmul_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwmul_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwmul_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwmul_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwmul_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwmul_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwmul_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwmul_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwmul_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwmul_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwmul_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwmul_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwmul_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwmul_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwmul_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwmul_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwmul_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwmul_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwmul_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwmul_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwmul_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwmul_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwmul_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vwmul_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwmul_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwmul_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwmul_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwmul_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwmul_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwmul_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwmul_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwmul_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwmul_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwmul_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwmul_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwmul_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwmul_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwmul_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwmul_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwmul_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwmul_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwmul_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwmul_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwmul_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwmul_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwmul_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwmul_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwmul_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwmul_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwmul_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwmul_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwmul_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwmul_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwmul_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwmul_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwmul_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwmul_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwmul_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwmul_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwmul_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwmul_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwmul_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwmul_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwmul_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwmul_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwmul_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwmul_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwmul_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwmul_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwmul_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwmul_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwmul_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwmul_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwmul_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwmul_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwmul_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwmul_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwmul_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwmul_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwmul_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwmul_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwmul_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwmul_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwmul_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwmul_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwmul_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwmul_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwmul_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwmul_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwmul_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwmul_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwmul_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwmul_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwmul_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwmul_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwmul_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwmul_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwmul_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vwmul_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwmul_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwmul_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwmul_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwmul_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwmul_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwmul_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwmul_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwmul_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwmul_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwmul_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwmul_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwmul_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwmul_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwmul_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwmul_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwmul_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwmul_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwmul_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwmul_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwmul_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwmul_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwmul_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwmul_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwmul_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwmul_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwmul_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwmul_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwmul_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwmul_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwmul_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwmul_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwmul_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwmul_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwmul_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwmul_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwmul_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwmul_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwmul_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwmul_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwmul_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwmul_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwmul_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwmul_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwmul_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwmul_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwmul_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwmul_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwmul_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwmul_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwmul_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwmul_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwmul_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwmul_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwmul_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwmul_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwmul_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwmul_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwmul_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwmul_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwmul_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwmul_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwmul_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwmul_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwmul_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwmul_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwmul_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwmul_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwmul_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwmul_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwmul_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwmul_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwmul_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwmul_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwmul_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwmul_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwmul_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwmul_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwmul_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vwmul\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vwmulsu.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vwmulsu.c index 4105e341d..b1e058d53 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vwmulsu.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vwmulsu.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint16mf4_t test_vwmulsu_vv_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vwmulsu_vv_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwmulsu_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vwmulsu_vx_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vwmulsu_vx_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vwmulsu_vv_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vwmulsu_vv_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwmulsu_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vwmulsu_vx_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vwmulsu_vx_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vwmulsu_vv_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vwmulsu_vv_i16m1_tu(vint16m1_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwmulsu_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vwmulsu_vx_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vwmulsu_vx_i16m1_tu(vint16m1_t vd, vint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vwmulsu_vv_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vwmulsu_vv_i16m2_tu(vint16m2_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwmulsu_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vwmulsu_vx_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vwmulsu_vx_i16m2_tu(vint16m2_t vd, vint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vwmulsu_vv_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vwmulsu_vv_i16m4_tu(vint16m4_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwmulsu_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vwmulsu_vx_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vwmulsu_vx_i16m4_tu(vint16m4_t vd, vint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vwmulsu_vv_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vwmulsu_vv_i16m8_tu(vint16m8_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwmulsu_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vwmulsu_vx_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vwmulsu_vx_i16m8_tu(vint16m8_t vd, vint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vwmulsu_vv_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vwmulsu_vv_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwmulsu_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vwmulsu_vx_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vwmulsu_vx_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vwmulsu_vv_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vwmulsu_vv_i32m1_tu(vint32m1_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwmulsu_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vwmulsu_vx_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vwmulsu_vx_i32m1_tu(vint32m1_t vd, vint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vwmulsu_vv_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vwmulsu_vv_i32m2_tu(vint32m2_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwmulsu_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vwmulsu_vx_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vwmulsu_vx_i32m2_tu(vint32m2_t vd, vint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vwmulsu_vv_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vwmulsu_vv_i32m4_tu(vint32m4_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwmulsu_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vwmulsu_vx_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vwmulsu_vx_i32m4_tu(vint32m4_t vd, vint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vwmulsu_vv_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vwmulsu_vv_i32m8_tu(vint32m8_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwmulsu_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vwmulsu_vx_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vwmulsu_vx_i32m8_tu(vint32m8_t vd, vint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vwmulsu_vv_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vwmulsu_vv_i64m1_tu(vint64m1_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwmulsu_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vwmulsu_vx_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vwmulsu_vx_i64m1_tu(vint64m1_t vd, vint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vwmulsu_vv_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vwmulsu_vv_i64m2_tu(vint64m2_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwmulsu_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vwmulsu_vx_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vwmulsu_vx_i64m2_tu(vint64m2_t vd, vint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vwmulsu_vv_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vwmulsu_vv_i64m4_tu(vint64m4_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwmulsu_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vwmulsu_vx_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vwmulsu_vx_i64m4_tu(vint64m4_t vd, vint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vwmulsu_vv_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vwmulsu_vv_i64m8_tu(vint64m8_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwmulsu_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vwmulsu_vx_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vwmulsu_vx_i64m8_tu(vint64m8_t vd, vint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vwmulsu_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwmulsu_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwmulsu_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwmulsu_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwmulsu_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwmulsu_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwmulsu_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwmulsu_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwmulsu_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwmulsu_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwmulsu_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwmulsu_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwmulsu_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwmulsu_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwmulsu_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwmulsu_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwmulsu_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwmulsu_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwmulsu_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwmulsu_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwmulsu_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwmulsu_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwmulsu_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwmulsu_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwmulsu_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwmulsu_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwmulsu_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwmulsu_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwmulsu_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwmulsu_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwmulsu_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwmulsu_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwmulsu_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwmulsu_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwmulsu_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwmulsu_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwmulsu_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwmulsu_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwmulsu_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwmulsu_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwmulsu_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwmulsu_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwmulsu_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwmulsu_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwmulsu_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwmulsu_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwmulsu_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwmulsu_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwmulsu_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwmulsu_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwmulsu_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwmulsu_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwmulsu_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwmulsu_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwmulsu_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwmulsu_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwmulsu_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwmulsu_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwmulsu_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwmulsu_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwmulsu_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwmulsu_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwmulsu_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwmulsu_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwmulsu_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwmulsu_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwmulsu_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwmulsu_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwmulsu_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwmulsu_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwmulsu_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwmulsu_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwmulsu_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwmulsu_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwmulsu_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vwmulsu_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwmulsu_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwmulsu_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwmulsu_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwmulsu_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwmulsu_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwmulsu_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwmulsu_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwmulsu_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwmulsu_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwmulsu_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwmulsu_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwmulsu_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwmulsu_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwmulsu_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwmulsu_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwmulsu_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwmulsu_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwmulsu_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwmulsu_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwmulsu_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwmulsu_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwmulsu_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwmulsu_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwmulsu_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwmulsu_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwmulsu_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwmulsu_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwmulsu_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwmulsu_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwmulsu_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwmulsu_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwmulsu_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwmulsu_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwmulsu_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwmulsu_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwmulsu_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwmulsu_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwmulsu_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwmulsu_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwmulsu_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwmulsu_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwmulsu_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwmulsu_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwmulsu_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwmulsu_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwmulsu_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwmulsu_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwmulsu_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwmulsu_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwmulsu_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwmulsu_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwmulsu_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwmulsu_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwmulsu_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwmulsu_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwmulsu_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwmulsu_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwmulsu_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwmulsu_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwmulsu_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwmulsu_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwmulsu_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwmulsu_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwmulsu_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwmulsu_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwmulsu_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwmulsu_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwmulsu_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwmulsu_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwmulsu_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwmulsu_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwmulsu_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwmulsu_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwmulsu_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vwmulsu_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwmulsu_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwmulsu_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwmulsu_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwmulsu_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwmulsu_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwmulsu_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwmulsu_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwmulsu_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwmulsu_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwmulsu_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwmulsu_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwmulsu_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwmulsu_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwmulsu_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwmulsu_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwmulsu_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwmulsu_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwmulsu_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwmulsu_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwmulsu_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwmulsu_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwmulsu_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwmulsu_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwmulsu_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwmulsu_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwmulsu_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwmulsu_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwmulsu_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwmulsu_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulsu_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwmulsu_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwmulsu_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwmulsu_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwmulsu_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwmulsu_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwmulsu_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwmulsu_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwmulsu_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwmulsu_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwmulsu_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwmulsu_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwmulsu_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwmulsu_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwmulsu_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwmulsu_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwmulsu_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwmulsu_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwmulsu_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwmulsu_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwmulsu_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwmulsu_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwmulsu_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwmulsu_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwmulsu_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwmulsu_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulsu_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwmulsu_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwmulsu_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwmulsu_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwmulsu_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwmulsu_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwmulsu_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwmulsu_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwmulsu_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwmulsu_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwmulsu_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwmulsu_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwmulsu_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwmulsu_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwmulsu_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwmulsu_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwmulsu_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwmulsu_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwmulsu_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwmulsu_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulsu_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwmulsu_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulsu_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vwmulsu\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vwmulu.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vwmulu.c index 5f9804538..556bb1e2b 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vwmulu.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vwmulu.c @@ -6,484 +6,484 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint16mf4_t test_vwmulu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwmulu_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vwmulu_vv_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwmulu_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vwmulu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vwmulu_vx_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vwmulu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwmulu_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vwmulu_vv_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwmulu_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vwmulu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vwmulu_vx_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vwmulu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwmulu_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vwmulu_vv_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwmulu_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vwmulu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vwmulu_vx_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vwmulu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwmulu_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vwmulu_vv_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwmulu_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vwmulu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vwmulu_vx_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vwmulu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwmulu_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vwmulu_vv_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwmulu_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vwmulu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vwmulu_vx_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vwmulu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwmulu_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vwmulu_vv_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwmulu_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vwmulu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vwmulu_vx_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vwmulu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwmulu_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vwmulu_vv_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwmulu_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vwmulu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vwmulu_vx_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vwmulu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwmulu_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vwmulu_vv_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwmulu_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vwmulu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vwmulu_vx_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vwmulu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwmulu_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vwmulu_vv_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwmulu_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vwmulu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vwmulu_vx_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vwmulu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwmulu_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vwmulu_vv_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwmulu_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vwmulu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vwmulu_vx_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vwmulu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwmulu_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vwmulu_vv_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwmulu_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vwmulu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vwmulu_vx_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vwmulu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwmulu_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vwmulu_vv_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwmulu_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vwmulu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vwmulu_vx_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vwmulu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwmulu_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vwmulu_vv_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwmulu_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vwmulu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vwmulu_vx_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vwmulu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwmulu_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vwmulu_vv_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwmulu_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vwmulu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vwmulu_vx_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vwmulu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwmulu_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vwmulu_vv_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwmulu_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vwmulu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vwmulu_vx_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vwmulu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwmulu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwmulu_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwmulu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwmulu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwmulu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwmulu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwmulu_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwmulu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwmulu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwmulu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwmulu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwmulu_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwmulu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwmulu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwmulu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwmulu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwmulu_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwmulu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwmulu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwmulu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwmulu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwmulu_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwmulu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwmulu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwmulu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwmulu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwmulu_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwmulu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwmulu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwmulu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwmulu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwmulu_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwmulu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwmulu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwmulu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwmulu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwmulu_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwmulu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwmulu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwmulu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwmulu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwmulu_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwmulu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwmulu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwmulu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwmulu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwmulu_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwmulu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwmulu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwmulu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwmulu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwmulu_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwmulu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwmulu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwmulu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwmulu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwmulu_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwmulu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwmulu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwmulu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwmulu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwmulu_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwmulu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwmulu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwmulu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwmulu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwmulu_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwmulu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwmulu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwmulu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwmulu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwmulu_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwmulu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwmulu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vwmulu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwmulu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwmulu_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwmulu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwmulu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwmulu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwmulu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwmulu_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwmulu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwmulu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwmulu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwmulu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwmulu_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwmulu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwmulu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwmulu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwmulu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwmulu_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwmulu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwmulu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwmulu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwmulu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwmulu_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwmulu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwmulu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwmulu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwmulu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwmulu_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwmulu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwmulu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwmulu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwmulu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwmulu_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwmulu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwmulu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwmulu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwmulu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwmulu_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwmulu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwmulu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwmulu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwmulu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwmulu_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwmulu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwmulu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwmulu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwmulu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwmulu_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwmulu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwmulu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwmulu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwmulu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwmulu_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwmulu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwmulu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwmulu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwmulu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwmulu_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwmulu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwmulu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwmulu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwmulu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwmulu_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwmulu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwmulu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwmulu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwmulu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwmulu_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwmulu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwmulu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwmulu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwmulu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwmulu_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwmulu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwmulu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vwmulu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwmulu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwmulu_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwmulu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwmulu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwmulu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwmulu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwmulu_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwmulu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwmulu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwmulu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwmulu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwmulu_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwmulu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwmulu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwmulu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwmulu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwmulu_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwmulu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwmulu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwmulu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwmulu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwmulu_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwmulu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwmulu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwmulu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwmulu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwmulu_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwmulu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwmulu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwmulu_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwmulu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwmulu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwmulu_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwmulu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwmulu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwmulu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwmulu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwmulu_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwmulu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwmulu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwmulu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwmulu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwmulu_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwmulu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwmulu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwmulu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwmulu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwmulu_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwmulu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwmulu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwmulu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwmulu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwmulu_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwmulu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwmulu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwmulu_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwmulu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwmulu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwmulu_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwmulu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwmulu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwmulu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwmulu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwmulu_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwmulu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwmulu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwmulu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwmulu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwmulu_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwmulu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwmulu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwmulu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwmulu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwmulu_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwmulu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwmulu_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwmulu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwmulu_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vwmulu\.[ivxfswum.]+\s+} 120 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vwredsum.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vwredsum.c index 00ab62f0e..2e21eb7d5 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vwredsum.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vwredsum.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint16m1_t test_vwredsum_vs_i8mf8_i16m1_tu(vint16m1_t maskedoff, vint8mf8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf8_i16m1_tu(vint16m1_t vd, vint8mf8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8mf4_i16m1_tu(vint16m1_t maskedoff, vint8mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf4_i16m1_tu(vint16m1_t vd, vint8mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8mf2_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf2_i16m1_tu(vint16m1_t vd, vint8mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8m1_i16m1_tu(vint16m1_t maskedoff, vint8m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m1_i16m1_tu(vint16m1_t vd, vint8m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8m2_i16m1_tu(vint16m1_t maskedoff, vint8m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m2_i16m1_tu(vint16m1_t vd, vint8m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8m4_i16m1_tu(vint16m1_t maskedoff, vint8m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m4_i16m1_tu(vint16m1_t vd, vint8m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8m8_i16m1_tu(vint16m1_t maskedoff, vint8m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_tu(maskedoff, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m8_i16m1_tu(vint16m1_t vd, vint8m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16mf4_i32m1_tu(vint32m1_t maskedoff, vint16mf4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16mf4_i32m1_tu(vint32m1_t vd, vint16mf4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16mf2_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16mf2_i32m1_tu(vint32m1_t vd, vint16mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16m1_i32m1_tu(vint32m1_t maskedoff, vint16m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m1_i32m1_tu(vint32m1_t vd, vint16m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16m2_i32m1_tu(vint32m1_t maskedoff, vint16m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m2_i32m1_tu(vint32m1_t vd, vint16m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16m4_i32m1_tu(vint32m1_t maskedoff, vint16m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m4_i32m1_tu(vint32m1_t vd, vint16m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16m8_i32m1_tu(vint32m1_t maskedoff, vint16m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum_tu(maskedoff, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m8_i32m1_tu(vint32m1_t vd, vint16m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tu(vint64m1_t vd, vint32mf2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vwredsum_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32m1_i64m1_tu(vint64m1_t maskedoff, vint32m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m1_i64m1_tu(vint64m1_t vd, vint32m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vwredsum_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32m2_i64m1_tu(vint64m1_t maskedoff, vint32m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m2_i64m1_tu(vint64m1_t vd, vint32m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vwredsum_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32m4_i64m1_tu(vint64m1_t maskedoff, vint32m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m4_i64m1_tu(vint64m1_t vd, vint32m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vwredsum_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32m8_i64m1_tu(vint64m1_t maskedoff, vint32m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum_tu(maskedoff, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m8_i64m1_tu(vint64m1_t vd, vint32m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vwredsum_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8mf8_i16m1_tum(vbool64_t mask, vint16m1_t maskedoff, vint8mf8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf8_i16m1_tum(vbool64_t vm, vint16m1_t vd, vint8mf8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8mf4_i16m1_tum(vbool32_t mask, vint16m1_t maskedoff, vint8mf4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf4_i16m1_tum(vbool32_t vm, vint16m1_t vd, vint8mf4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8mf2_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8mf2_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8m1_i16m1_tum(vbool8_t mask, vint16m1_t maskedoff, vint8m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m1_i16m1_tum(vbool8_t vm, vint16m1_t vd, vint8m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8m2_i16m1_tum(vbool4_t mask, vint16m1_t maskedoff, vint8m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m2_i16m1_tum(vbool4_t vm, vint16m1_t vd, vint8m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8m4_i16m1_tum(vbool2_t mask, vint16m1_t maskedoff, vint8m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m4_i16m1_tum(vbool2_t vm, vint16m1_t vd, vint8m4_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8m8_i16m1_tum(vbool1_t mask, vint16m1_t maskedoff, vint8m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_tum(mask, maskedoff, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m8_i16m1_tum(vbool1_t vm, vint16m1_t vd, vint8m8_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwredsum_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16mf4_i32m1_tum(vbool64_t mask, vint32m1_t maskedoff, vint16mf4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16mf4_i32m1_tum(vbool64_t vm, vint32m1_t vd, vint16mf4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16mf2_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16mf2_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16m1_i32m1_tum(vbool16_t mask, vint32m1_t maskedoff, vint16m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m1_i32m1_tum(vbool16_t vm, vint32m1_t vd, vint16m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16m2_i32m1_tum(vbool8_t mask, vint32m1_t maskedoff, vint16m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m2_i32m1_tum(vbool8_t vm, vint32m1_t vd, vint16m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16m4_i32m1_tum(vbool4_t mask, vint32m1_t maskedoff, vint16m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m4_i32m1_tum(vbool4_t vm, vint32m1_t vd, vint16m4_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16m8_i32m1_tum(vbool2_t mask, vint32m1_t maskedoff, vint16m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum_tum(mask, maskedoff, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m8_i32m1_tum(vbool2_t vm, vint32m1_t vd, vint16m8_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwredsum_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vwredsum_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32m1_i64m1_tum(vbool32_t mask, vint64m1_t maskedoff, vint32m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m1_i64m1_tum(vbool32_t vm, vint64m1_t vd, vint32m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vwredsum_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32m2_i64m1_tum(vbool16_t mask, vint64m1_t maskedoff, vint32m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m2_i64m1_tum(vbool16_t vm, vint64m1_t vd, vint32m2_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vwredsum_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32m4_i64m1_tum(vbool8_t mask, vint64m1_t maskedoff, vint32m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m4_i64m1_tum(vbool8_t vm, vint64m1_t vd, vint32m4_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vwredsum_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32m8_i64m1_tum(vbool4_t mask, vint64m1_t maskedoff, vint32m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum_tum(mask, maskedoff, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m8_i64m1_tum(vbool4_t vm, vint64m1_t vd, vint32m8_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vwredsum_tum(vm, vd, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vwredsum\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vwredsumu.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vwredsumu.c index 05f97c63c..1a469604f 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vwredsumu.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vwredsumu.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_tu(vuint16m1_t maskedoff, vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_tu(vuint16m1_t vd, vuint8mf8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_tu(vuint16m1_t maskedoff, vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_tu(vuint16m1_t vd, vuint8mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_tu(vuint16m1_t maskedoff, vuint8m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_tu(vuint16m1_t vd, vuint8m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_tu(vuint16m1_t maskedoff, vuint8m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_tu(vuint16m1_t vd, vuint8m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_tu(vuint16m1_t maskedoff, vuint8m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_tu(vuint16m1_t vd, vuint8m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_tu(vuint16m1_t maskedoff, vuint8m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_tu(maskedoff, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_tu(vuint16m1_t vd, vuint8m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_tu(vuint32m1_t maskedoff, vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_tu(vuint32m1_t vd, vuint16mf4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_tu(vuint32m1_t maskedoff, vuint16m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_tu(vuint32m1_t vd, vuint16m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_tu(vuint32m1_t maskedoff, vuint16m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_tu(vuint32m1_t vd, vuint16m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_tu(vuint32m1_t maskedoff, vuint16m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_tu(vuint32m1_t vd, vuint16m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_tu(vuint32m1_t maskedoff, vuint16m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_tu(maskedoff, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_tu(vuint32m1_t vd, vuint16m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vwredsumu_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_tu(vuint64m1_t maskedoff, vuint32m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_tu(vuint64m1_t vd, vuint32m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vwredsumu_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_tu(vuint64m1_t maskedoff, vuint32m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_tu(vuint64m1_t vd, vuint32m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vwredsumu_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_tu(vuint64m1_t maskedoff, vuint32m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_tu(vuint64m1_t vd, vuint32m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vwredsumu_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_tu(vuint64m1_t maskedoff, vuint32m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu_tu(maskedoff, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_tu(vuint64m1_t vd, vuint32m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vwredsumu_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_tum(vbool64_t mask, vuint16m1_t maskedoff, vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_tum(vbool64_t vm, vuint16m1_t vd, vuint8mf8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_tum(vbool32_t mask, vuint16m1_t maskedoff, vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_tum(vbool32_t vm, vuint16m1_t vd, vuint8mf4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_tum(vbool8_t mask, vuint16m1_t maskedoff, vuint8m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_tum(vbool8_t vm, vuint16m1_t vd, vuint8m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_tum(vbool4_t mask, vuint16m1_t maskedoff, vuint8m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_tum(vbool4_t vm, vuint16m1_t vd, vuint8m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_tum(vbool2_t mask, vuint16m1_t maskedoff, vuint8m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_tum(vbool2_t vm, vuint16m1_t vd, vuint8m4_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_tum(vbool1_t mask, vuint16m1_t maskedoff, vuint8m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_tum(mask, maskedoff, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_tum(vbool1_t vm, vuint16m1_t vd, vuint8m8_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwredsumu_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_tum(vbool64_t mask, vuint32m1_t maskedoff, vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_tum(vbool64_t vm, vuint32m1_t vd, vuint16mf4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_tum(vbool16_t mask, vuint32m1_t maskedoff, vuint16m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_tum(vbool16_t vm, vuint32m1_t vd, vuint16m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_tum(vbool8_t mask, vuint32m1_t maskedoff, vuint16m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_tum(vbool8_t vm, vuint32m1_t vd, vuint16m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_tum(vbool4_t mask, vuint32m1_t maskedoff, vuint16m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_tum(vbool4_t vm, vuint32m1_t vd, vuint16m4_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_tum(vbool2_t mask, vuint32m1_t maskedoff, vuint16m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_tum(mask, maskedoff, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_tum(vbool2_t vm, vuint32m1_t vd, vuint16m8_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwredsumu_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vwredsumu_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_tum(vbool32_t mask, vuint64m1_t maskedoff, vuint32m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_tum(vbool32_t vm, vuint64m1_t vd, vuint32m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vwredsumu_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_tum(vbool16_t mask, vuint64m1_t maskedoff, vuint32m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_tum(vbool16_t vm, vuint64m1_t vd, vuint32m2_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vwredsumu_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_tum(vbool8_t mask, vuint64m1_t maskedoff, vuint32m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_tum(vbool8_t vm, vuint64m1_t vd, vuint32m4_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vwredsumu_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_tum(vbool4_t mask, vuint64m1_t maskedoff, vuint32m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu_tum(mask, maskedoff, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_tum(vbool4_t vm, vuint64m1_t vd, vuint32m8_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vwredsumu_tum(vm, vd, vs2, vs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vwredsumu\.[ivxfswum.]+\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vwsub.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vwsub.c index a713924f4..5086a4b64 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vwsub.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vwsub.c @@ -6,964 +6,964 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint16mf4_t test_vwsub_vv_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwsub_vv_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vwsub_vv_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwsub_vv_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vwsub_vx_i16mf4_tu(vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vwsub_vx_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vwsub_wv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwsub_wv_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vwsub_wv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwsub_wv_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vwsub_wx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vwsub_wx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vwsub_vv_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwsub_vv_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vwsub_vv_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwsub_vv_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vwsub_vx_i16mf2_tu(vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vwsub_vx_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vwsub_wv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwsub_wv_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vwsub_wv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwsub_wv_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vwsub_wx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vwsub_wx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vwsub_vv_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwsub_vv_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vwsub_vv_i16m1_tu(vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwsub_vv_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vwsub_vx_i16m1_tu(vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vwsub_vx_i16m1_tu(vint16m1_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vwsub_wv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwsub_wv_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vwsub_wv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwsub_wv_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vwsub_wx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vwsub_wx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vwsub_vv_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwsub_vv_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vwsub_vv_i16m2_tu(vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwsub_vv_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vwsub_vx_i16m2_tu(vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vwsub_vx_i16m2_tu(vint16m2_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vwsub_wv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwsub_wv_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vwsub_wv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwsub_wv_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vwsub_wx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vwsub_wx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vwsub_vv_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwsub_vv_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vwsub_vv_i16m4_tu(vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwsub_vv_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vwsub_vx_i16m4_tu(vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vwsub_vx_i16m4_tu(vint16m4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vwsub_wv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwsub_wv_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vwsub_wv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwsub_wv_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vwsub_wx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vwsub_wx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vwsub_vv_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwsub_vv_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vwsub_vv_i16m8_tu(vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwsub_vv_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vwsub_vx_i16m8_tu(vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vwsub_vx_i16m8_tu(vint16m8_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vwsub_wv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwsub_wv_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vwsub_wv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwsub_wv_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vwsub_wx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vwsub_wx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vwsub_vv_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwsub_vv_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vwsub_vv_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwsub_vv_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vwsub_vx_i32mf2_tu(vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vwsub_vx_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vwsub_wv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwsub_wv_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vwsub_wv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwsub_wv_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vwsub_wx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vwsub_wx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vwsub_vv_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwsub_vv_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vwsub_vv_i32m1_tu(vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwsub_vv_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vwsub_vx_i32m1_tu(vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vwsub_vx_i32m1_tu(vint32m1_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vwsub_wv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwsub_wv_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vwsub_wv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwsub_wv_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vwsub_wx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vwsub_wx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vwsub_vv_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwsub_vv_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vwsub_vv_i32m2_tu(vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwsub_vv_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vwsub_vx_i32m2_tu(vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vwsub_vx_i32m2_tu(vint32m2_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vwsub_wv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwsub_wv_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vwsub_wv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwsub_wv_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vwsub_wx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vwsub_wx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vwsub_vv_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwsub_vv_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vwsub_vv_i32m4_tu(vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwsub_vv_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vwsub_vx_i32m4_tu(vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vwsub_vx_i32m4_tu(vint32m4_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vwsub_wv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwsub_wv_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vwsub_wv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwsub_wv_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vwsub_wx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vwsub_wx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vwsub_vv_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwsub_vv_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vwsub_vv_i32m8_tu(vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwsub_vv_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vwsub_vx_i32m8_tu(vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vwsub_vx_i32m8_tu(vint32m8_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vwsub_wv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwsub_wv_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vwsub_wv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwsub_wv_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vwsub_wx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vwsub_wx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vwsub_vv_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwsub_vv_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vwsub_vv_i64m1_tu(vint64m1_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwsub_vv_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vwsub_vx_i64m1_tu(vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vwsub_vx_i64m1_tu(vint64m1_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vwsub_wv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwsub_wv_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vwsub_wv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwsub_wv_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vwsub_wx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vwsub_wx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vwsub_vv_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwsub_vv_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vwsub_vv_i64m2_tu(vint64m2_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwsub_vv_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vwsub_vx_i64m2_tu(vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vwsub_vx_i64m2_tu(vint64m2_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vwsub_wv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwsub_wv_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vwsub_wv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwsub_wv_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vwsub_wx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vwsub_wx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vwsub_vv_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwsub_vv_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vwsub_vv_i64m4_tu(vint64m4_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwsub_vv_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vwsub_vx_i64m4_tu(vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vwsub_vx_i64m4_tu(vint64m4_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vwsub_wv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwsub_wv_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vwsub_wv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwsub_wv_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vwsub_wx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vwsub_wx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vwsub_vv_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwsub_vv_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vwsub_vv_i64m8_tu(vint64m8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwsub_vv_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vwsub_vx_i64m8_tu(vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vwsub_vx_i64m8_tu(vint64m8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vwsub_wv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwsub_wv_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vwsub_wv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwsub_wv_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vwsub_wx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vwsub_wx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vwsub_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwsub_vv_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwsub_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwsub_vv_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwsub_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwsub_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vwsub_wv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwsub_wv_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwsub_wv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwsub_wv_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwsub_wx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwsub_wx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwsub_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwsub_vv_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwsub_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwsub_vv_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwsub_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwsub_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwsub_wv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwsub_wv_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwsub_wv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwsub_wv_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwsub_wx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwsub_wx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwsub_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwsub_vv_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwsub_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwsub_vv_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwsub_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwsub_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwsub_wv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwsub_wv_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwsub_wv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwsub_wv_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwsub_wx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwsub_wx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwsub_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwsub_vv_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwsub_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwsub_vv_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwsub_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwsub_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwsub_wv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwsub_wv_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwsub_wv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwsub_wv_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwsub_wx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwsub_wx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwsub_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwsub_vv_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwsub_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwsub_vv_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwsub_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwsub_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwsub_wv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwsub_wv_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwsub_wv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwsub_wv_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwsub_wx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwsub_wx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwsub_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwsub_vv_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwsub_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwsub_vv_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwsub_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwsub_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwsub_wv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwsub_wv_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwsub_wv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwsub_wv_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwsub_wx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwsub_wx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwsub_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwsub_vv_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwsub_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwsub_vv_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwsub_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwsub_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwsub_wv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwsub_wv_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwsub_wv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwsub_wv_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwsub_wx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwsub_wx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwsub_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwsub_vv_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwsub_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwsub_vv_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwsub_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwsub_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwsub_wv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwsub_wv_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwsub_wv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwsub_wv_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwsub_wx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwsub_wx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwsub_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwsub_vv_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwsub_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwsub_vv_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwsub_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwsub_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwsub_wv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwsub_wv_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwsub_wv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwsub_wv_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwsub_wx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwsub_wx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwsub_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwsub_vv_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwsub_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwsub_vv_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwsub_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwsub_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwsub_wv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwsub_wv_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwsub_wv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwsub_wv_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwsub_wx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwsub_wx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwsub_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwsub_vv_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwsub_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwsub_vv_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwsub_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwsub_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwsub_wv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwsub_wv_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwsub_wv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwsub_wv_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwsub_wx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwsub_wx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwsub_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwsub_vv_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwsub_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwsub_vv_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwsub_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwsub_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwsub_wv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwsub_wv_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwsub_wv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwsub_wv_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwsub_wx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwsub_wx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwsub_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwsub_vv_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwsub_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwsub_vv_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwsub_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwsub_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwsub_wv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwsub_wv_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwsub_wv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwsub_wv_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwsub_wx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwsub_wx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwsub_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwsub_vv_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwsub_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwsub_vv_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwsub_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwsub_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwsub_wv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwsub_wv_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwsub_wv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwsub_wv_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwsub_wx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwsub_wx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwsub_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwsub_vv_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwsub_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwsub_vv_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwsub_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwsub_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwsub_wv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwsub_wv_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwsub_wv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwsub_wv_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwsub_wx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwsub_wx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vwsub_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwsub_vv_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwsub_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwsub_vv_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwsub_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwsub_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vwsub_wv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwsub_wv_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwsub_wv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwsub_wv_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwsub_wx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwsub_wx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwsub_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwsub_vv_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwsub_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwsub_vv_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwsub_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwsub_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwsub_wv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwsub_wv_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwsub_wv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwsub_wv_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwsub_wx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwsub_wx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwsub_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwsub_vv_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwsub_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwsub_vv_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwsub_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwsub_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwsub_wv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwsub_wv_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwsub_wv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwsub_wv_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwsub_wx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwsub_wx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwsub_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwsub_vv_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwsub_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwsub_vv_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwsub_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwsub_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwsub_wv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwsub_wv_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwsub_wv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwsub_wv_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwsub_wx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwsub_wx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwsub_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwsub_vv_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwsub_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwsub_vv_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwsub_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwsub_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwsub_wv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwsub_wv_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwsub_wv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwsub_wv_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwsub_wx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwsub_wx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwsub_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwsub_vv_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwsub_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwsub_vv_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwsub_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwsub_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwsub_wv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwsub_wv_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwsub_wv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwsub_wv_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwsub_wx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwsub_wx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwsub_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwsub_vv_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwsub_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwsub_vv_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwsub_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwsub_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwsub_wv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwsub_wv_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwsub_wv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwsub_wv_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwsub_wx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwsub_wx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwsub_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwsub_vv_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwsub_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwsub_vv_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwsub_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwsub_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwsub_wv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwsub_wv_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwsub_wv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwsub_wv_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwsub_wx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwsub_wx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwsub_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwsub_vv_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwsub_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwsub_vv_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwsub_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwsub_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwsub_wv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwsub_wv_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwsub_wv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwsub_wv_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwsub_wx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwsub_wx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwsub_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwsub_vv_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwsub_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwsub_vv_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwsub_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwsub_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwsub_wv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwsub_wv_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwsub_wv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwsub_wv_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwsub_wx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwsub_wx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwsub_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwsub_vv_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwsub_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwsub_vv_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwsub_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwsub_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwsub_wv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwsub_wv_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwsub_wv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwsub_wv_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwsub_wx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwsub_wx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwsub_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwsub_vv_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwsub_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwsub_vv_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwsub_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwsub_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwsub_wv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwsub_wv_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwsub_wv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwsub_wv_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwsub_wx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwsub_wx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwsub_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwsub_vv_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwsub_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwsub_vv_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwsub_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwsub_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwsub_wv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwsub_wv_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwsub_wv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwsub_wv_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwsub_wx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwsub_wx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwsub_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwsub_vv_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwsub_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwsub_vv_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwsub_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwsub_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwsub_wv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwsub_wv_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwsub_wv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwsub_wv_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwsub_wx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwsub_wx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwsub_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwsub_vv_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwsub_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwsub_vv_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwsub_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwsub_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwsub_wv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwsub_wv_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwsub_wv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwsub_wv_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwsub_wx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwsub_wx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vwsub_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwsub_vv_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwsub_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwsub_vv_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwsub_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwsub_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vwsub_wv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vwsub_wv_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwsub_wv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vwsub_wv_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwsub_wx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vwsub_wx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwsub_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwsub_vv_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwsub_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwsub_vv_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwsub_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwsub_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwsub_wv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vwsub_wv_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwsub_wv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vwsub_wv_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwsub_wx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vwsub_wx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwsub_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwsub_vv_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwsub_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwsub_vv_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwsub_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwsub_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwsub_wv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vwsub_wv_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwsub_wv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vwsub_wv_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwsub_wx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vwsub_wx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwsub_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwsub_vv_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwsub_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwsub_vv_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwsub_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwsub_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwsub_wv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vwsub_wv_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwsub_wv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vwsub_wv_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwsub_wx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vwsub_wx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwsub_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwsub_vv_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwsub_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwsub_vv_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwsub_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwsub_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwsub_wv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vwsub_wv_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwsub_wv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vwsub_wv_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwsub_wx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vwsub_wx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwsub_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwsub_vv_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwsub_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwsub_vv_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwsub_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_vx_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwsub_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_vx_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwsub_wv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vwsub_wv_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwsub_wv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vwsub_wv_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwsub_wx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl) { - return __riscv_vwsub_wx_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vwsub_wx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vwsub_wx_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwsub_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwsub_vv_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwsub_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwsub_vv_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwsub_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwsub_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwsub_wv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vwsub_wv_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwsub_wv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vwsub_wv_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwsub_wx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vwsub_wx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwsub_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwsub_vv_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwsub_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwsub_vv_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwsub_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwsub_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwsub_wv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vwsub_wv_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwsub_wv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vwsub_wv_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwsub_wx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vwsub_wx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwsub_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwsub_vv_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwsub_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwsub_vv_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwsub_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwsub_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwsub_wv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vwsub_wv_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwsub_wv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vwsub_wv_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwsub_wx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vwsub_wx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwsub_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwsub_vv_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwsub_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwsub_vv_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwsub_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwsub_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwsub_wv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vwsub_wv_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwsub_wv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vwsub_wv_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwsub_wx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vwsub_wx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwsub_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwsub_vv_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwsub_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwsub_vv_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwsub_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_vx_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwsub_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_vx_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwsub_wv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vwsub_wv_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwsub_wv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vwsub_wv_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwsub_wx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl) { - return __riscv_vwsub_wx_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vwsub_wx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vwsub_wx_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwsub_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwsub_vv_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwsub_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwsub_vv_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwsub_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwsub_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwsub_wv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vwsub_wv_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwsub_wv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vwsub_wv_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwsub_wx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vwsub_wx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwsub_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwsub_vv_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwsub_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwsub_vv_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwsub_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwsub_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwsub_wv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vwsub_wv_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwsub_wv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vwsub_wv_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwsub_wx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vwsub_wx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwsub_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwsub_vv_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwsub_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwsub_vv_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwsub_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwsub_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwsub_wv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vwsub_wv_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwsub_wv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vwsub_wv_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwsub_wx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vwsub_wx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwsub_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwsub_vv_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwsub_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwsub_vv_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwsub_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_vx_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwsub_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_vx_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwsub_wv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vwsub_wv_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwsub_wv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vwsub_wv_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwsub_wx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl) { - return __riscv_vwsub_wx_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vwsub_wx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vwsub_wx_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+v[w]?sub\.[ivxfswum.]+\s+} 240 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vwsubu.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vwsubu.c index 722b65273..66eab1a61 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vwsubu.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vwsubu.c @@ -6,964 +6,964 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint16mf4_t test_vwsubu_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwsubu_vv_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vwsubu_vv_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsubu_vv_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vwsubu_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vwsubu_vx_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vwsubu_wv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwsubu_wv_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vwsubu_wv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsubu_wv_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vwsubu_wx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vwsubu_wx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vwsubu_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwsubu_vv_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vwsubu_vv_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vwsubu_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vwsubu_vx_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vwsubu_wv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwsubu_wv_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vwsubu_wv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vwsubu_wx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vwsubu_wx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vwsubu_vv_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwsubu_vv_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vwsubu_vv_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vwsubu_vx_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vwsubu_vx_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vwsubu_wv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwsubu_wv_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vwsubu_wv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vwsubu_wx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vwsubu_wx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vwsubu_vv_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwsubu_vv_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vwsubu_vv_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsubu_vv_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vwsubu_vx_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vwsubu_vx_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vwsubu_wv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwsubu_wv_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vwsubu_wv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsubu_wv_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vwsubu_wx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vwsubu_wx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vwsubu_vv_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwsubu_vv_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vwsubu_vv_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vwsubu_vx_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vwsubu_vx_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vwsubu_wv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwsubu_wv_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vwsubu_wv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vwsubu_wx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vwsubu_wx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vwsubu_vv_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwsubu_vv_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vwsubu_vv_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vwsubu_vx_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vwsubu_vx_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vwsubu_wv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwsubu_wv_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vwsubu_wv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vwsubu_wx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vwsubu_wx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vwsubu_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwsubu_vv_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vwsubu_vv_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vwsubu_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vwsubu_vx_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vwsubu_wv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwsubu_wv_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vwsubu_wv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vwsubu_wx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vwsubu_wx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vwsubu_vv_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwsubu_vv_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vwsubu_vv_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vwsubu_vx_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vwsubu_vx_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vwsubu_wv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwsubu_wv_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vwsubu_wv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vwsubu_wx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vwsubu_wx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vwsubu_vv_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwsubu_vv_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vwsubu_vv_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsubu_vv_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vwsubu_vx_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vwsubu_vx_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vwsubu_wv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwsubu_wv_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vwsubu_wv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsubu_wv_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vwsubu_wx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vwsubu_wx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vwsubu_vv_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwsubu_vv_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vwsubu_vv_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vwsubu_vx_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vwsubu_vx_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vwsubu_wv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwsubu_wv_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vwsubu_wv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vwsubu_wx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vwsubu_wx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vwsubu_vv_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwsubu_vv_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vwsubu_vv_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vwsubu_vx_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vwsubu_vx_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vwsubu_wv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwsubu_wv_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vwsubu_wv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vwsubu_wx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vwsubu_wx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vwsubu_vv_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwsubu_vv_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vwsubu_vv_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vwsubu_vx_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vwsubu_vx_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vwsubu_wv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwsubu_wv_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vwsubu_wv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vwsubu_wx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vwsubu_wx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vwsubu_vv_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwsubu_vv_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vwsubu_vv_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsubu_vv_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vwsubu_vx_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vwsubu_vx_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vwsubu_wv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwsubu_wv_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vwsubu_wv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsubu_wv_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vwsubu_wx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vwsubu_wx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vwsubu_vv_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwsubu_vv_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vwsubu_vv_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vwsubu_vx_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vwsubu_vx_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vwsubu_wv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwsubu_wv_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vwsubu_wv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vwsubu_wx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vwsubu_wx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vwsubu_vv_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwsubu_vv_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vwsubu_vv_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vwsubu_vx_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vwsubu_vx_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vwsubu_wv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwsubu_wv_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vwsubu_wv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vwsubu_wx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vwsubu_wx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vwsubu_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwsubu_vv_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwsubu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsubu_vv_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwsubu_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwsubu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vwsubu_wv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwsubu_wv_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwsubu_wv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsubu_wv_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwsubu_wx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwsubu_wx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwsubu_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwsubu_vv_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwsubu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwsubu_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwsubu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwsubu_wv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwsubu_wv_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwsubu_wv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwsubu_wx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwsubu_wx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwsubu_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwsubu_vv_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwsubu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwsubu_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwsubu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwsubu_wv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwsubu_wv_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwsubu_wv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwsubu_wx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwsubu_wx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwsubu_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwsubu_vv_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwsubu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsubu_vv_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwsubu_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwsubu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwsubu_wv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwsubu_wv_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwsubu_wv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsubu_wv_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwsubu_wx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwsubu_wx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwsubu_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwsubu_vv_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwsubu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwsubu_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwsubu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwsubu_wv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwsubu_wv_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwsubu_wv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwsubu_wx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwsubu_wx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwsubu_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwsubu_vv_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwsubu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwsubu_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwsubu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwsubu_wv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwsubu_wv_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwsubu_wv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwsubu_wx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwsubu_wx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwsubu_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwsubu_vv_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwsubu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwsubu_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwsubu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwsubu_wv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwsubu_wv_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwsubu_wv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwsubu_wx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwsubu_wx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwsubu_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwsubu_vv_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwsubu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwsubu_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwsubu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwsubu_wv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwsubu_wv_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwsubu_wv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwsubu_wx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwsubu_wx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwsubu_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwsubu_vv_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwsubu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsubu_vv_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwsubu_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwsubu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwsubu_wv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwsubu_wv_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwsubu_wv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsubu_wv_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwsubu_wx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwsubu_wx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwsubu_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwsubu_vv_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwsubu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwsubu_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwsubu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwsubu_wv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwsubu_wv_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwsubu_wv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwsubu_wx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwsubu_wx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwsubu_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwsubu_vv_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwsubu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwsubu_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwsubu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwsubu_wv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwsubu_wv_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwsubu_wv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwsubu_wx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwsubu_wx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwsubu_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwsubu_vv_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwsubu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwsubu_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwsubu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwsubu_wv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwsubu_wv_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwsubu_wv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwsubu_wx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwsubu_wx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwsubu_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwsubu_vv_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwsubu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsubu_vv_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwsubu_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwsubu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwsubu_wv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwsubu_wv_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwsubu_wv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsubu_wv_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwsubu_wx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwsubu_wx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwsubu_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwsubu_vv_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwsubu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwsubu_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwsubu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwsubu_wv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwsubu_wv_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwsubu_wv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwsubu_wx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwsubu_wx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwsubu_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwsubu_vv_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwsubu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwsubu_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwsubu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwsubu_wv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwsubu_wv_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwsubu_wv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwsubu_wx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwsubu_wx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vwsubu_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwsubu_vv_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwsubu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsubu_vv_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwsubu_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwsubu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vwsubu_wv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwsubu_wv_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwsubu_wv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsubu_wv_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwsubu_wx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwsubu_wx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwsubu_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwsubu_vv_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwsubu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwsubu_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwsubu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwsubu_wv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwsubu_wv_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwsubu_wv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwsubu_wx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwsubu_wx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwsubu_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwsubu_vv_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwsubu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwsubu_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwsubu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwsubu_wv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwsubu_wv_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwsubu_wv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwsubu_wx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwsubu_wx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwsubu_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwsubu_vv_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwsubu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsubu_vv_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwsubu_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwsubu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwsubu_wv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwsubu_wv_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwsubu_wv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsubu_wv_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwsubu_wx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwsubu_wx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwsubu_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwsubu_vv_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwsubu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwsubu_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwsubu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwsubu_wv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwsubu_wv_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwsubu_wv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwsubu_wx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwsubu_wx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwsubu_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwsubu_vv_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwsubu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwsubu_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwsubu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwsubu_wv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwsubu_wv_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwsubu_wv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwsubu_wx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwsubu_wx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwsubu_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwsubu_vv_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwsubu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwsubu_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwsubu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwsubu_wv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwsubu_wv_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwsubu_wv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwsubu_wx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwsubu_wx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwsubu_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwsubu_vv_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwsubu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwsubu_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwsubu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwsubu_wv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwsubu_wv_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwsubu_wv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwsubu_wx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwsubu_wx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwsubu_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwsubu_vv_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwsubu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsubu_vv_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwsubu_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwsubu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwsubu_wv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwsubu_wv_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwsubu_wv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsubu_wv_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwsubu_wx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwsubu_wx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwsubu_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwsubu_vv_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwsubu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwsubu_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwsubu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwsubu_wv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwsubu_wv_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwsubu_wv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwsubu_wx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwsubu_wx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwsubu_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwsubu_vv_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwsubu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwsubu_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwsubu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwsubu_wv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwsubu_wv_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwsubu_wv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwsubu_wx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwsubu_wx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwsubu_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwsubu_vv_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwsubu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwsubu_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwsubu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwsubu_wv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwsubu_wv_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwsubu_wv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwsubu_wx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwsubu_wx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwsubu_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwsubu_vv_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwsubu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsubu_vv_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwsubu_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwsubu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwsubu_wv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwsubu_wv_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwsubu_wv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsubu_wv_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwsubu_wx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwsubu_wx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwsubu_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwsubu_vv_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwsubu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwsubu_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwsubu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwsubu_wv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwsubu_wv_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwsubu_wv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwsubu_wx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwsubu_wx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwsubu_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwsubu_vv_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwsubu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwsubu_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwsubu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwsubu_wv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwsubu_wv_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwsubu_wv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwsubu_wx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwsubu_wx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vwsubu_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwsubu_vv_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwsubu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsubu_vv_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwsubu_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwsubu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vwsubu_wv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vwsubu_wv_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwsubu_wv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsubu_wv_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwsubu_wx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vwsubu_wx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwsubu_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwsubu_vv_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwsubu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwsubu_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwsubu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwsubu_wv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vwsubu_wv_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwsubu_wv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwsubu_wx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vwsubu_wx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwsubu_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwsubu_vv_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwsubu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwsubu_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwsubu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwsubu_wv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vwsubu_wv_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwsubu_wv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwsubu_wx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vwsubu_wx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwsubu_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwsubu_vv_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwsubu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsubu_vv_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwsubu_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwsubu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwsubu_wv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vwsubu_wv_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwsubu_wv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsubu_wv_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwsubu_wx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vwsubu_wx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwsubu_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwsubu_vv_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwsubu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwsubu_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwsubu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwsubu_wv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vwsubu_wv_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwsubu_wv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwsubu_wx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vwsubu_wx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwsubu_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwsubu_vv_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwsubu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwsubu_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_vx_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwsubu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_vx_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwsubu_wv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vwsubu_wv_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwsubu_wv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwsubu_wx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vwsubu_wx_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vwsubu_wx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vwsubu_wx_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwsubu_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwsubu_vv_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwsubu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwsubu_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwsubu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwsubu_wv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vwsubu_wv_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwsubu_wv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwsubu_wx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vwsubu_wx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwsubu_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwsubu_vv_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwsubu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwsubu_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwsubu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwsubu_wv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vwsubu_wv_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwsubu_wv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwsubu_wx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vwsubu_wx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwsubu_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwsubu_vv_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwsubu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsubu_vv_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwsubu_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwsubu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwsubu_wv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vwsubu_wv_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwsubu_wv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsubu_wv_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwsubu_wx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vwsubu_wx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwsubu_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwsubu_vv_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwsubu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwsubu_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwsubu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwsubu_wv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vwsubu_wv_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwsubu_wv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwsubu_wx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vwsubu_wx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwsubu_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwsubu_vv_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwsubu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwsubu_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_vx_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwsubu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_vx_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwsubu_wv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vwsubu_wv_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwsubu_wv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwsubu_wx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vwsubu_wx_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vwsubu_wx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vwsubu_wx_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwsubu_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwsubu_vv_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwsubu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwsubu_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwsubu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwsubu_wv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vwsubu_wv_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwsubu_wv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwsubu_wx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vwsubu_wx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwsubu_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwsubu_vv_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwsubu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsubu_vv_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwsubu_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwsubu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwsubu_wv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vwsubu_wv_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwsubu_wv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsubu_wv_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwsubu_wx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vwsubu_wx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwsubu_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwsubu_vv_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwsubu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsubu_vv_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwsubu_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwsubu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwsubu_wv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vwsubu_wv_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwsubu_wv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsubu_wv_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwsubu_wx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vwsubu_wx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwsubu_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwsubu_vv_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwsubu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsubu_vv_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwsubu_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_vx_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwsubu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_vx_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwsubu_wv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vwsubu_wv_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwsubu_wv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsubu_wv_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwsubu_wx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vwsubu_wx_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vwsubu_wx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vwsubu_wx_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+v[w]?sub[u]?\.[ivxfswum.]+\s+} 240 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vxor.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vxor.c index 83e6296fb..4133c95bd 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vxor.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vxor.c @@ -6,1412 +6,1412 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vint8mf8_t test_vxor_vv_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vxor_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vxor_vx_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vint8mf8_t test_vxor_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vxor_vv_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vxor_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vxor_vx_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vint8mf4_t test_vxor_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vxor_vv_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vxor_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vxor_vx_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vint8mf2_t test_vxor_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vxor_vv_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vxor_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vxor_vx_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vint8m1_t test_vxor_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vxor_vv_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vxor_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vxor_vx_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vint8m2_t test_vxor_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vxor_vv_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vxor_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vxor_vx_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vint8m4_t test_vxor_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vxor_vv_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vxor_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vxor_vx_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vint8m8_t test_vxor_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vxor_vv_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vxor_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vxor_vx_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vint16mf4_t test_vxor_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vxor_vv_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vxor_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vxor_vx_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vint16mf2_t test_vxor_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vxor_vv_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vxor_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vxor_vx_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vint16m1_t test_vxor_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vxor_vv_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vxor_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vxor_vx_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vint16m2_t test_vxor_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vxor_vv_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vxor_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vxor_vx_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vint16m4_t test_vxor_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vxor_vv_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vxor_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vxor_vx_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vint16m8_t test_vxor_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vxor_vv_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vxor_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vxor_vx_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vint32mf2_t test_vxor_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vxor_vv_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vxor_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vxor_vx_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vint32m1_t test_vxor_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vxor_vv_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vxor_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vxor_vx_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vint32m2_t test_vxor_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vxor_vv_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vxor_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vxor_vx_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vint32m4_t test_vxor_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vxor_vv_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vxor_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vxor_vx_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vint32m8_t test_vxor_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vxor_vv_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vxor_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vxor_vx_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vint64m1_t test_vxor_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vxor_vv_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vxor_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vxor_vx_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vint64m2_t test_vxor_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vxor_vv_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vxor_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vxor_vx_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vint64m4_t test_vxor_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vxor_vv_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vxor_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vxor_vx_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vint64m8_t test_vxor_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vxor_vv_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vxor_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vxor_vx_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vuint8mf8_t test_vxor_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vxor_vv_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vxor_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vxor_vx_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vuint8mf4_t test_vxor_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vxor_vv_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vxor_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vxor_vx_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vuint8mf2_t test_vxor_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vxor_vv_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vxor_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vxor_vx_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vuint8m1_t test_vxor_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vxor_vv_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vxor_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vxor_vx_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vuint8m2_t test_vxor_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vxor_vv_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vxor_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vxor_vx_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vuint8m4_t test_vxor_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vxor_vv_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vxor_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vxor_vx_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vuint8m8_t test_vxor_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vxor_vv_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vxor_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vxor_vx_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vuint16mf4_t test_vxor_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vxor_vv_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vxor_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vxor_vx_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vuint16mf2_t test_vxor_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vxor_vv_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vxor_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vxor_vx_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vuint16m1_t test_vxor_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vxor_vv_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vxor_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vxor_vx_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vuint16m2_t test_vxor_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vxor_vv_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vxor_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vxor_vx_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vuint16m4_t test_vxor_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vxor_vv_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vxor_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vxor_vx_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vuint16m8_t test_vxor_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vxor_vv_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vxor_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vxor_vx_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vuint32mf2_t test_vxor_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vxor_vv_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vxor_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vxor_vx_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vuint32m1_t test_vxor_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vxor_vv_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vxor_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vxor_vx_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vuint32m2_t test_vxor_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vxor_vv_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vxor_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vxor_vx_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vuint32m4_t test_vxor_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vxor_vv_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vxor_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vxor_vx_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vuint32m8_t test_vxor_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vxor_vv_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vxor_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vxor_vx_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vuint64m1_t test_vxor_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vxor_vv_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vxor_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vxor_vx_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vuint64m2_t test_vxor_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vxor_vv_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vxor_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vxor_vx_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vuint64m4_t test_vxor_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vxor_vv_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vxor_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vxor_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor_tu(maskedoff, op1, op2, vl); +vuint64m8_t test_vxor_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vxor_vv_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vxor_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vxor_vx_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vxor_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vxor_vv_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vxor_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vxor_vx_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vxor_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vxor_vv_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vxor_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vxor_vx_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vxor_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vxor_vv_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vxor_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vxor_vx_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vxor_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vxor_vv_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vxor_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vxor_vx_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vxor_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vxor_vv_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vxor_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vxor_vx_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vxor_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vxor_vv_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vxor_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vxor_vx_i8m8_tum(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vxor_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vxor_vv_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vxor_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vxor_vx_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vxor_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vxor_vv_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vxor_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vxor_vx_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vxor_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vxor_vv_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vxor_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vxor_vx_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vxor_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vxor_vv_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vxor_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vxor_vx_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vxor_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vxor_vv_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vxor_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vxor_vx_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vxor_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vxor_vv_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vxor_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vxor_vx_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vxor_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vxor_vv_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vxor_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vxor_vx_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vxor_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vxor_vv_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vxor_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vxor_vx_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vxor_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vxor_vv_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vxor_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vxor_vx_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vxor_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vxor_vv_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vxor_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vxor_vx_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vxor_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vxor_vv_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vxor_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vxor_vx_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vxor_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vxor_vv_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vxor_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vxor_vx_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vxor_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vxor_vv_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vxor_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vxor_vx_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vxor_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vxor_vv_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vxor_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vxor_vx_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vxor_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vxor_vv_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vxor_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vxor_vx_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vxor_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vxor_vv_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vxor_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vxor_vx_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vxor_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vxor_vv_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vxor_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vxor_vx_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vxor_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vxor_vv_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vxor_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vxor_vx_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vxor_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vxor_vv_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vxor_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vxor_vx_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vxor_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vxor_vv_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vxor_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vxor_vx_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vxor_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vxor_vv_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vxor_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vxor_vx_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vxor_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vxor_vv_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vxor_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vxor_vx_u8m8_tum(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vxor_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vxor_vv_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vxor_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vxor_vx_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vxor_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vxor_vv_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vxor_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vxor_vx_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vxor_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vxor_vv_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vxor_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vxor_vx_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vxor_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vxor_vv_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vxor_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vxor_vx_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vxor_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vxor_vv_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vxor_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vxor_vx_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vxor_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vxor_vv_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vxor_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vxor_vx_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vxor_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vxor_vv_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vxor_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vxor_vx_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vxor_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vxor_vv_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vxor_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vxor_vx_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vxor_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vxor_vv_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vxor_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vxor_vx_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vxor_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vxor_vv_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vxor_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vxor_vx_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vxor_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vxor_vv_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vxor_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vxor_vx_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vxor_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vxor_vv_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vxor_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vxor_vx_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vxor_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vxor_vv_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vxor_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vxor_vx_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vxor_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vxor_vv_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vxor_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vxor_vx_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vxor_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vxor_vv_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vxor_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vxor_vx_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor_tum(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vxor_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vxor_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vxor_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vxor_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vxor_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vxor_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vxor_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vxor_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vxor_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vxor_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vxor_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vxor_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vxor_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vxor_vv_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vxor_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vxor_vx_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vxor_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vxor_vv_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vxor_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vxor_vx_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vxor_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vxor_vv_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vxor_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vxor_vx_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vxor_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vxor_vv_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vxor_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vxor_vx_i8m8_tumu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vxor_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vxor_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vxor_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vxor_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vxor_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vxor_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vxor_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vxor_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vxor_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vxor_vv_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vxor_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vxor_vx_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vxor_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vxor_vv_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vxor_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vxor_vx_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vxor_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vxor_vv_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vxor_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vxor_vx_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vxor_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vxor_vv_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vxor_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vxor_vx_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vxor_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vxor_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vxor_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vxor_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vxor_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vxor_vv_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vxor_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vxor_vx_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vxor_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vxor_vv_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vxor_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vxor_vx_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vxor_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vxor_vv_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vxor_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vxor_vx_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vxor_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vxor_vv_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vxor_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vxor_vx_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vxor_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vxor_vv_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vxor_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vxor_vx_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vxor_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vxor_vv_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vxor_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vxor_vx_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vxor_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vxor_vv_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vxor_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vxor_vx_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vxor_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vxor_vv_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vxor_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vxor_vx_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vxor_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vxor_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vxor_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vxor_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vxor_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vxor_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vxor_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vxor_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vxor_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vxor_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vxor_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vxor_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vxor_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vxor_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vxor_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vxor_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vxor_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vxor_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vxor_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vxor_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vxor_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vxor_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vxor_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vxor_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vxor_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vxor_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vxor_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vxor_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vxor_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vxor_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vxor_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vxor_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vxor_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vxor_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vxor_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vxor_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vxor_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vxor_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vxor_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vxor_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vxor_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vxor_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vxor_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vxor_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vxor_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vxor_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vxor_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vxor_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vxor_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vxor_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vxor_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vxor_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vxor_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vxor_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vxor_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vxor_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vxor_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vxor_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vxor_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vxor_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vxor_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vxor_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vxor_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vxor_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vxor_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vxor_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vxor_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vxor_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vxor_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vxor_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vxor_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vxor_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vxor_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vxor_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vxor_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vxor_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vxor_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vxor_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vxor_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vxor_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vxor_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vxor_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vxor_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vxor_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vxor_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vxor_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vxor_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vxor_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor_tumu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vxor_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vxor_vv_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vxor_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vxor_vx_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vint8mf8_t test_vxor_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vxor_vv_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vxor_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vxor_vx_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vint8mf4_t test_vxor_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vxor_vv_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vxor_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vxor_vx_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vint8mf2_t test_vxor_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vxor_vv_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vxor_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vxor_vx_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vint8m1_t test_vxor_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vxor_vv_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vxor_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vxor_vx_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vint8m2_t test_vxor_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vxor_vv_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vxor_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vxor_vx_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vint8m4_t test_vxor_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vxor_vv_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vxor_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vxor_vx_i8m8_mu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vint8m8_t test_vxor_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vxor_vv_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vxor_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vxor_vx_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vint16mf4_t test_vxor_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vxor_vv_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vxor_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vxor_vx_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vint16mf2_t test_vxor_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vxor_vv_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vxor_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vxor_vx_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vint16m1_t test_vxor_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vxor_vv_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vxor_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vxor_vx_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vint16m2_t test_vxor_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vxor_vv_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vxor_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vxor_vx_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vint16m4_t test_vxor_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vxor_vv_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vxor_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vxor_vx_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vint16m8_t test_vxor_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vxor_vv_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vxor_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vxor_vx_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vint32mf2_t test_vxor_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vxor_vv_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vxor_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vxor_vx_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vint32m1_t test_vxor_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vxor_vv_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vxor_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vxor_vx_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vxor_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vxor_vv_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vxor_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vxor_vx_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vxor_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vxor_vv_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vxor_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vxor_vx_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vxor_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vxor_vv_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vxor_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vxor_vx_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vint64m1_t test_vxor_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vxor_vv_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vxor_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vxor_vx_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vint64m2_t test_vxor_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vxor_vv_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vxor_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vxor_vx_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vint64m4_t test_vxor_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vxor_vv_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vxor_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vxor_vx_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vint64m8_t test_vxor_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vxor_vv_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vxor_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vxor_vx_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vuint8mf8_t test_vxor_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vxor_vv_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vxor_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vxor_vx_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vuint8mf4_t test_vxor_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vxor_vv_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vxor_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vxor_vx_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vuint8mf2_t test_vxor_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vxor_vv_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vxor_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vxor_vx_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vuint8m1_t test_vxor_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vxor_vv_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vxor_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vxor_vx_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vuint8m2_t test_vxor_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vxor_vv_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vxor_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vxor_vx_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vuint8m4_t test_vxor_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vxor_vv_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vxor_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vxor_vx_u8m8_mu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vuint8m8_t test_vxor_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vxor_vv_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vxor_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vxor_vx_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vuint16mf4_t test_vxor_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vxor_vv_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vxor_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vxor_vx_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vuint16mf2_t test_vxor_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vxor_vv_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vxor_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vxor_vx_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vuint16m1_t test_vxor_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vxor_vv_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vxor_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vxor_vx_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vuint16m2_t test_vxor_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vxor_vv_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vxor_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vxor_vx_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vuint16m4_t test_vxor_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vxor_vv_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vxor_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vxor_vx_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vuint16m8_t test_vxor_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vxor_vv_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vxor_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vxor_vx_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vuint32mf2_t test_vxor_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vxor_vv_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vxor_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vxor_vx_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vuint32m1_t test_vxor_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vxor_vv_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vxor_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vxor_vx_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vuint32m2_t test_vxor_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vxor_vv_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vxor_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vxor_vx_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vuint32m4_t test_vxor_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vxor_vv_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vxor_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vxor_vx_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vuint32m8_t test_vxor_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vxor_vv_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vxor_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vxor_vx_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vuint64m1_t test_vxor_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vxor_vv_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vxor_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vxor_vx_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vuint64m2_t test_vxor_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vxor_vv_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vxor_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vxor_vx_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vuint64m4_t test_vxor_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vxor_vv_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vxor_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vxor_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vxor_mu(mask, maskedoff, op1, op2, vl); +vuint64m8_t test_vxor_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vxor_mu(vm, vd, vs2, rs1, vl); } /* { dg-final { scan-assembler-times {vxor\.[ivxfswum.]+\s+} 352 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vzext_vf2.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vzext_vf2.c index 1d2636ab1..cc7b3f8f7 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vzext_vf2.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vzext_vf2.c @@ -6,244 +6,244 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint16mf4_t test_vzext_vf2_u16mf4_tu(vuint16mf4_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf2_tu(maskedoff, op1, vl); +vuint16mf4_t test_vzext_vf2_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vzext_vf2_tu(vd, vs2, vl); } -vuint16mf2_t test_vzext_vf2_u16mf2_tu(vuint16mf2_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf2_tu(maskedoff, op1, vl); +vuint16mf2_t test_vzext_vf2_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vzext_vf2_tu(vd, vs2, vl); } -vuint16m1_t test_vzext_vf2_u16m1_tu(vuint16m1_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf2_tu(maskedoff, op1, vl); +vuint16m1_t test_vzext_vf2_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vzext_vf2_tu(vd, vs2, vl); } -vuint16m2_t test_vzext_vf2_u16m2_tu(vuint16m2_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf2_tu(maskedoff, op1, vl); +vuint16m2_t test_vzext_vf2_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vzext_vf2_tu(vd, vs2, vl); } -vuint16m4_t test_vzext_vf2_u16m4_tu(vuint16m4_t maskedoff, vuint8m2_t op1, size_t vl) { - return __riscv_vzext_vf2_tu(maskedoff, op1, vl); +vuint16m4_t test_vzext_vf2_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vzext_vf2_tu(vd, vs2, vl); } -vuint16m8_t test_vzext_vf2_u16m8_tu(vuint16m8_t maskedoff, vuint8m4_t op1, size_t vl) { - return __riscv_vzext_vf2_tu(maskedoff, op1, vl); +vuint16m8_t test_vzext_vf2_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vzext_vf2_tu(vd, vs2, vl); } -vuint32mf2_t test_vzext_vf2_u32mf2_tu(vuint32mf2_t maskedoff, vuint16mf4_t op1, size_t vl) { - return __riscv_vzext_vf2_tu(maskedoff, op1, vl); +vuint32mf2_t test_vzext_vf2_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vzext_vf2_tu(vd, vs2, vl); } -vuint32m1_t test_vzext_vf2_u32m1_tu(vuint32m1_t maskedoff, vuint16mf2_t op1, size_t vl) { - return __riscv_vzext_vf2_tu(maskedoff, op1, vl); +vuint32m1_t test_vzext_vf2_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vzext_vf2_tu(vd, vs2, vl); } -vuint32m2_t test_vzext_vf2_u32m2_tu(vuint32m2_t maskedoff, vuint16m1_t op1, size_t vl) { - return __riscv_vzext_vf2_tu(maskedoff, op1, vl); +vuint32m2_t test_vzext_vf2_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vzext_vf2_tu(vd, vs2, vl); } -vuint32m4_t test_vzext_vf2_u32m4_tu(vuint32m4_t maskedoff, vuint16m2_t op1, size_t vl) { - return __riscv_vzext_vf2_tu(maskedoff, op1, vl); +vuint32m4_t test_vzext_vf2_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vzext_vf2_tu(vd, vs2, vl); } -vuint32m8_t test_vzext_vf2_u32m8_tu(vuint32m8_t maskedoff, vuint16m4_t op1, size_t vl) { - return __riscv_vzext_vf2_tu(maskedoff, op1, vl); +vuint32m8_t test_vzext_vf2_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vzext_vf2_tu(vd, vs2, vl); } -vuint64m1_t test_vzext_vf2_u64m1_tu(vuint64m1_t maskedoff, vuint32mf2_t op1, size_t vl) { - return __riscv_vzext_vf2_tu(maskedoff, op1, vl); +vuint64m1_t test_vzext_vf2_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vzext_vf2_tu(vd, vs2, vl); } -vuint64m2_t test_vzext_vf2_u64m2_tu(vuint64m2_t maskedoff, vuint32m1_t op1, size_t vl) { - return __riscv_vzext_vf2_tu(maskedoff, op1, vl); +vuint64m2_t test_vzext_vf2_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vzext_vf2_tu(vd, vs2, vl); } -vuint64m4_t test_vzext_vf2_u64m4_tu(vuint64m4_t maskedoff, vuint32m2_t op1, size_t vl) { - return __riscv_vzext_vf2_tu(maskedoff, op1, vl); +vuint64m4_t test_vzext_vf2_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vzext_vf2_tu(vd, vs2, vl); } -vuint64m8_t test_vzext_vf2_u64m8_tu(vuint64m8_t maskedoff, vuint32m4_t op1, size_t vl) { - return __riscv_vzext_vf2_tu(maskedoff, op1, vl); +vuint64m8_t test_vzext_vf2_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vzext_vf2_tu(vd, vs2, vl); } -vuint16mf4_t test_vzext_vf2_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf2_tum(mask, maskedoff, op1, vl); +vuint16mf4_t test_vzext_vf2_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vzext_vf2_tum(vm, vd, vs2, vl); } -vuint16mf2_t test_vzext_vf2_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf2_tum(mask, maskedoff, op1, vl); +vuint16mf2_t test_vzext_vf2_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vzext_vf2_tum(vm, vd, vs2, vl); } -vuint16m1_t test_vzext_vf2_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf2_tum(mask, maskedoff, op1, vl); +vuint16m1_t test_vzext_vf2_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vzext_vf2_tum(vm, vd, vs2, vl); } -vuint16m2_t test_vzext_vf2_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf2_tum(mask, maskedoff, op1, vl); +vuint16m2_t test_vzext_vf2_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vzext_vf2_tum(vm, vd, vs2, vl); } -vuint16m4_t test_vzext_vf2_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, size_t vl) { - return __riscv_vzext_vf2_tum(mask, maskedoff, op1, vl); +vuint16m4_t test_vzext_vf2_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vzext_vf2_tum(vm, vd, vs2, vl); } -vuint16m8_t test_vzext_vf2_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, size_t vl) { - return __riscv_vzext_vf2_tum(mask, maskedoff, op1, vl); +vuint16m8_t test_vzext_vf2_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vzext_vf2_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_vzext_vf2_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, size_t vl) { - return __riscv_vzext_vf2_tum(mask, maskedoff, op1, vl); +vuint32mf2_t test_vzext_vf2_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vzext_vf2_tum(vm, vd, vs2, vl); } -vuint32m1_t test_vzext_vf2_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, size_t vl) { - return __riscv_vzext_vf2_tum(mask, maskedoff, op1, vl); +vuint32m1_t test_vzext_vf2_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vzext_vf2_tum(vm, vd, vs2, vl); } -vuint32m2_t test_vzext_vf2_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, size_t vl) { - return __riscv_vzext_vf2_tum(mask, maskedoff, op1, vl); +vuint32m2_t test_vzext_vf2_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vzext_vf2_tum(vm, vd, vs2, vl); } -vuint32m4_t test_vzext_vf2_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, size_t vl) { - return __riscv_vzext_vf2_tum(mask, maskedoff, op1, vl); +vuint32m4_t test_vzext_vf2_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vzext_vf2_tum(vm, vd, vs2, vl); } -vuint32m8_t test_vzext_vf2_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, size_t vl) { - return __riscv_vzext_vf2_tum(mask, maskedoff, op1, vl); +vuint32m8_t test_vzext_vf2_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vzext_vf2_tum(vm, vd, vs2, vl); } -vuint64m1_t test_vzext_vf2_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, size_t vl) { - return __riscv_vzext_vf2_tum(mask, maskedoff, op1, vl); +vuint64m1_t test_vzext_vf2_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vzext_vf2_tum(vm, vd, vs2, vl); } -vuint64m2_t test_vzext_vf2_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, size_t vl) { - return __riscv_vzext_vf2_tum(mask, maskedoff, op1, vl); +vuint64m2_t test_vzext_vf2_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vzext_vf2_tum(vm, vd, vs2, vl); } -vuint64m4_t test_vzext_vf2_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, size_t vl) { - return __riscv_vzext_vf2_tum(mask, maskedoff, op1, vl); +vuint64m4_t test_vzext_vf2_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vzext_vf2_tum(vm, vd, vs2, vl); } -vuint64m8_t test_vzext_vf2_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, size_t vl) { - return __riscv_vzext_vf2_tum(mask, maskedoff, op1, vl); +vuint64m8_t test_vzext_vf2_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vzext_vf2_tum(vm, vd, vs2, vl); } -vuint16mf4_t test_vzext_vf2_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf2_tumu(mask, maskedoff, op1, vl); +vuint16mf4_t test_vzext_vf2_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vzext_vf2_tumu(vm, vd, vs2, vl); } -vuint16mf2_t test_vzext_vf2_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf2_tumu(mask, maskedoff, op1, vl); +vuint16mf2_t test_vzext_vf2_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vzext_vf2_tumu(vm, vd, vs2, vl); } -vuint16m1_t test_vzext_vf2_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf2_tumu(mask, maskedoff, op1, vl); +vuint16m1_t test_vzext_vf2_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vzext_vf2_tumu(vm, vd, vs2, vl); } -vuint16m2_t test_vzext_vf2_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf2_tumu(mask, maskedoff, op1, vl); +vuint16m2_t test_vzext_vf2_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vzext_vf2_tumu(vm, vd, vs2, vl); } -vuint16m4_t test_vzext_vf2_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, size_t vl) { - return __riscv_vzext_vf2_tumu(mask, maskedoff, op1, vl); +vuint16m4_t test_vzext_vf2_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vzext_vf2_tumu(vm, vd, vs2, vl); } -vuint16m8_t test_vzext_vf2_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, size_t vl) { - return __riscv_vzext_vf2_tumu(mask, maskedoff, op1, vl); +vuint16m8_t test_vzext_vf2_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vzext_vf2_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_vzext_vf2_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, size_t vl) { - return __riscv_vzext_vf2_tumu(mask, maskedoff, op1, vl); +vuint32mf2_t test_vzext_vf2_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vzext_vf2_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_vzext_vf2_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, size_t vl) { - return __riscv_vzext_vf2_tumu(mask, maskedoff, op1, vl); +vuint32m1_t test_vzext_vf2_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vzext_vf2_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_vzext_vf2_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, size_t vl) { - return __riscv_vzext_vf2_tumu(mask, maskedoff, op1, vl); +vuint32m2_t test_vzext_vf2_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vzext_vf2_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_vzext_vf2_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, size_t vl) { - return __riscv_vzext_vf2_tumu(mask, maskedoff, op1, vl); +vuint32m4_t test_vzext_vf2_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vzext_vf2_tumu(vm, vd, vs2, vl); } -vuint32m8_t test_vzext_vf2_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, size_t vl) { - return __riscv_vzext_vf2_tumu(mask, maskedoff, op1, vl); +vuint32m8_t test_vzext_vf2_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vzext_vf2_tumu(vm, vd, vs2, vl); } -vuint64m1_t test_vzext_vf2_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, size_t vl) { - return __riscv_vzext_vf2_tumu(mask, maskedoff, op1, vl); +vuint64m1_t test_vzext_vf2_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vzext_vf2_tumu(vm, vd, vs2, vl); } -vuint64m2_t test_vzext_vf2_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, size_t vl) { - return __riscv_vzext_vf2_tumu(mask, maskedoff, op1, vl); +vuint64m2_t test_vzext_vf2_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vzext_vf2_tumu(vm, vd, vs2, vl); } -vuint64m4_t test_vzext_vf2_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, size_t vl) { - return __riscv_vzext_vf2_tumu(mask, maskedoff, op1, vl); +vuint64m4_t test_vzext_vf2_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vzext_vf2_tumu(vm, vd, vs2, vl); } -vuint64m8_t test_vzext_vf2_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, size_t vl) { - return __riscv_vzext_vf2_tumu(mask, maskedoff, op1, vl); +vuint64m8_t test_vzext_vf2_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vzext_vf2_tumu(vm, vd, vs2, vl); } -vuint16mf4_t test_vzext_vf2_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf2_mu(mask, maskedoff, op1, vl); +vuint16mf4_t test_vzext_vf2_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vzext_vf2_mu(vm, vd, vs2, vl); } -vuint16mf2_t test_vzext_vf2_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf2_mu(mask, maskedoff, op1, vl); +vuint16mf2_t test_vzext_vf2_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vzext_vf2_mu(vm, vd, vs2, vl); } -vuint16m1_t test_vzext_vf2_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf2_mu(mask, maskedoff, op1, vl); +vuint16m1_t test_vzext_vf2_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vzext_vf2_mu(vm, vd, vs2, vl); } -vuint16m2_t test_vzext_vf2_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf2_mu(mask, maskedoff, op1, vl); +vuint16m2_t test_vzext_vf2_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vzext_vf2_mu(vm, vd, vs2, vl); } -vuint16m4_t test_vzext_vf2_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, size_t vl) { - return __riscv_vzext_vf2_mu(mask, maskedoff, op1, vl); +vuint16m4_t test_vzext_vf2_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vzext_vf2_mu(vm, vd, vs2, vl); } -vuint16m8_t test_vzext_vf2_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, size_t vl) { - return __riscv_vzext_vf2_mu(mask, maskedoff, op1, vl); +vuint16m8_t test_vzext_vf2_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vzext_vf2_mu(vm, vd, vs2, vl); } -vuint32mf2_t test_vzext_vf2_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, size_t vl) { - return __riscv_vzext_vf2_mu(mask, maskedoff, op1, vl); +vuint32mf2_t test_vzext_vf2_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vzext_vf2_mu(vm, vd, vs2, vl); } -vuint32m1_t test_vzext_vf2_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, size_t vl) { - return __riscv_vzext_vf2_mu(mask, maskedoff, op1, vl); +vuint32m1_t test_vzext_vf2_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vzext_vf2_mu(vm, vd, vs2, vl); } -vuint32m2_t test_vzext_vf2_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, size_t vl) { - return __riscv_vzext_vf2_mu(mask, maskedoff, op1, vl); +vuint32m2_t test_vzext_vf2_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vzext_vf2_mu(vm, vd, vs2, vl); } -vuint32m4_t test_vzext_vf2_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, size_t vl) { - return __riscv_vzext_vf2_mu(mask, maskedoff, op1, vl); +vuint32m4_t test_vzext_vf2_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vzext_vf2_mu(vm, vd, vs2, vl); } -vuint32m8_t test_vzext_vf2_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, size_t vl) { - return __riscv_vzext_vf2_mu(mask, maskedoff, op1, vl); +vuint32m8_t test_vzext_vf2_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vzext_vf2_mu(vm, vd, vs2, vl); } -vuint64m1_t test_vzext_vf2_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, size_t vl) { - return __riscv_vzext_vf2_mu(mask, maskedoff, op1, vl); +vuint64m1_t test_vzext_vf2_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vzext_vf2_mu(vm, vd, vs2, vl); } -vuint64m2_t test_vzext_vf2_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, size_t vl) { - return __riscv_vzext_vf2_mu(mask, maskedoff, op1, vl); +vuint64m2_t test_vzext_vf2_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vzext_vf2_mu(vm, vd, vs2, vl); } -vuint64m4_t test_vzext_vf2_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, size_t vl) { - return __riscv_vzext_vf2_mu(mask, maskedoff, op1, vl); +vuint64m4_t test_vzext_vf2_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vzext_vf2_mu(vm, vd, vs2, vl); } -vuint64m8_t test_vzext_vf2_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, size_t vl) { - return __riscv_vzext_vf2_mu(mask, maskedoff, op1, vl); +vuint64m8_t test_vzext_vf2_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vzext_vf2_mu(vm, vd, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vzext\.vf2[ivxfswum.]*\s+} 60 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vzext_vf4.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vzext_vf4.c index 6fbdfe423..edc2ca64b 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vzext_vf4.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vzext_vf4.c @@ -6,148 +6,148 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint32mf2_t test_vzext_vf4_u32mf2_tu(vuint32mf2_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf4_tu(maskedoff, op1, vl); +vuint32mf2_t test_vzext_vf4_u32mf2_tu(vuint32mf2_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vzext_vf4_tu(vd, vs2, vl); } -vuint32m1_t test_vzext_vf4_u32m1_tu(vuint32m1_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf4_tu(maskedoff, op1, vl); +vuint32m1_t test_vzext_vf4_u32m1_tu(vuint32m1_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vzext_vf4_tu(vd, vs2, vl); } -vuint32m2_t test_vzext_vf4_u32m2_tu(vuint32m2_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf4_tu(maskedoff, op1, vl); +vuint32m2_t test_vzext_vf4_u32m2_tu(vuint32m2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vzext_vf4_tu(vd, vs2, vl); } -vuint32m4_t test_vzext_vf4_u32m4_tu(vuint32m4_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf4_tu(maskedoff, op1, vl); +vuint32m4_t test_vzext_vf4_u32m4_tu(vuint32m4_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vzext_vf4_tu(vd, vs2, vl); } -vuint32m8_t test_vzext_vf4_u32m8_tu(vuint32m8_t maskedoff, vuint8m2_t op1, size_t vl) { - return __riscv_vzext_vf4_tu(maskedoff, op1, vl); +vuint32m8_t test_vzext_vf4_u32m8_tu(vuint32m8_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vzext_vf4_tu(vd, vs2, vl); } -vuint64m1_t test_vzext_vf4_u64m1_tu(vuint64m1_t maskedoff, vuint16mf4_t op1, size_t vl) { - return __riscv_vzext_vf4_tu(maskedoff, op1, vl); +vuint64m1_t test_vzext_vf4_u64m1_tu(vuint64m1_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vzext_vf4_tu(vd, vs2, vl); } -vuint64m2_t test_vzext_vf4_u64m2_tu(vuint64m2_t maskedoff, vuint16mf2_t op1, size_t vl) { - return __riscv_vzext_vf4_tu(maskedoff, op1, vl); +vuint64m2_t test_vzext_vf4_u64m2_tu(vuint64m2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vzext_vf4_tu(vd, vs2, vl); } -vuint64m4_t test_vzext_vf4_u64m4_tu(vuint64m4_t maskedoff, vuint16m1_t op1, size_t vl) { - return __riscv_vzext_vf4_tu(maskedoff, op1, vl); +vuint64m4_t test_vzext_vf4_u64m4_tu(vuint64m4_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vzext_vf4_tu(vd, vs2, vl); } -vuint64m8_t test_vzext_vf4_u64m8_tu(vuint64m8_t maskedoff, vuint16m2_t op1, size_t vl) { - return __riscv_vzext_vf4_tu(maskedoff, op1, vl); +vuint64m8_t test_vzext_vf4_u64m8_tu(vuint64m8_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vzext_vf4_tu(vd, vs2, vl); } -vuint32mf2_t test_vzext_vf4_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf4_tum(mask, maskedoff, op1, vl); +vuint32mf2_t test_vzext_vf4_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vzext_vf4_tum(vm, vd, vs2, vl); } -vuint32m1_t test_vzext_vf4_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf4_tum(mask, maskedoff, op1, vl); +vuint32m1_t test_vzext_vf4_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vzext_vf4_tum(vm, vd, vs2, vl); } -vuint32m2_t test_vzext_vf4_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf4_tum(mask, maskedoff, op1, vl); +vuint32m2_t test_vzext_vf4_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vzext_vf4_tum(vm, vd, vs2, vl); } -vuint32m4_t test_vzext_vf4_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf4_tum(mask, maskedoff, op1, vl); +vuint32m4_t test_vzext_vf4_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vzext_vf4_tum(vm, vd, vs2, vl); } -vuint32m8_t test_vzext_vf4_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint8m2_t op1, size_t vl) { - return __riscv_vzext_vf4_tum(mask, maskedoff, op1, vl); +vuint32m8_t test_vzext_vf4_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vzext_vf4_tum(vm, vd, vs2, vl); } -vuint64m1_t test_vzext_vf4_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint16mf4_t op1, size_t vl) { - return __riscv_vzext_vf4_tum(mask, maskedoff, op1, vl); +vuint64m1_t test_vzext_vf4_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vzext_vf4_tum(vm, vd, vs2, vl); } -vuint64m2_t test_vzext_vf4_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint16mf2_t op1, size_t vl) { - return __riscv_vzext_vf4_tum(mask, maskedoff, op1, vl); +vuint64m2_t test_vzext_vf4_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vzext_vf4_tum(vm, vd, vs2, vl); } -vuint64m4_t test_vzext_vf4_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint16m1_t op1, size_t vl) { - return __riscv_vzext_vf4_tum(mask, maskedoff, op1, vl); +vuint64m4_t test_vzext_vf4_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vzext_vf4_tum(vm, vd, vs2, vl); } -vuint64m8_t test_vzext_vf4_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint16m2_t op1, size_t vl) { - return __riscv_vzext_vf4_tum(mask, maskedoff, op1, vl); +vuint64m8_t test_vzext_vf4_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vzext_vf4_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_vzext_vf4_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf4_tumu(mask, maskedoff, op1, vl); +vuint32mf2_t test_vzext_vf4_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vzext_vf4_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_vzext_vf4_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf4_tumu(mask, maskedoff, op1, vl); +vuint32m1_t test_vzext_vf4_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vzext_vf4_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_vzext_vf4_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf4_tumu(mask, maskedoff, op1, vl); +vuint32m2_t test_vzext_vf4_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vzext_vf4_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_vzext_vf4_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf4_tumu(mask, maskedoff, op1, vl); +vuint32m4_t test_vzext_vf4_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vzext_vf4_tumu(vm, vd, vs2, vl); } -vuint32m8_t test_vzext_vf4_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint8m2_t op1, size_t vl) { - return __riscv_vzext_vf4_tumu(mask, maskedoff, op1, vl); +vuint32m8_t test_vzext_vf4_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vzext_vf4_tumu(vm, vd, vs2, vl); } -vuint64m1_t test_vzext_vf4_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint16mf4_t op1, size_t vl) { - return __riscv_vzext_vf4_tumu(mask, maskedoff, op1, vl); +vuint64m1_t test_vzext_vf4_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vzext_vf4_tumu(vm, vd, vs2, vl); } -vuint64m2_t test_vzext_vf4_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint16mf2_t op1, size_t vl) { - return __riscv_vzext_vf4_tumu(mask, maskedoff, op1, vl); +vuint64m2_t test_vzext_vf4_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vzext_vf4_tumu(vm, vd, vs2, vl); } -vuint64m4_t test_vzext_vf4_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint16m1_t op1, size_t vl) { - return __riscv_vzext_vf4_tumu(mask, maskedoff, op1, vl); +vuint64m4_t test_vzext_vf4_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vzext_vf4_tumu(vm, vd, vs2, vl); } -vuint64m8_t test_vzext_vf4_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint16m2_t op1, size_t vl) { - return __riscv_vzext_vf4_tumu(mask, maskedoff, op1, vl); +vuint64m8_t test_vzext_vf4_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vzext_vf4_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_vzext_vf4_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf4_mu(mask, maskedoff, op1, vl); +vuint32mf2_t test_vzext_vf4_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vzext_vf4_mu(vm, vd, vs2, vl); } -vuint32m1_t test_vzext_vf4_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf4_mu(mask, maskedoff, op1, vl); +vuint32m1_t test_vzext_vf4_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vzext_vf4_mu(vm, vd, vs2, vl); } -vuint32m2_t test_vzext_vf4_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf4_mu(mask, maskedoff, op1, vl); +vuint32m2_t test_vzext_vf4_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vzext_vf4_mu(vm, vd, vs2, vl); } -vuint32m4_t test_vzext_vf4_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf4_mu(mask, maskedoff, op1, vl); +vuint32m4_t test_vzext_vf4_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vzext_vf4_mu(vm, vd, vs2, vl); } -vuint32m8_t test_vzext_vf4_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint8m2_t op1, size_t vl) { - return __riscv_vzext_vf4_mu(mask, maskedoff, op1, vl); +vuint32m8_t test_vzext_vf4_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vzext_vf4_mu(vm, vd, vs2, vl); } -vuint64m1_t test_vzext_vf4_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint16mf4_t op1, size_t vl) { - return __riscv_vzext_vf4_mu(mask, maskedoff, op1, vl); +vuint64m1_t test_vzext_vf4_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vzext_vf4_mu(vm, vd, vs2, vl); } -vuint64m2_t test_vzext_vf4_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint16mf2_t op1, size_t vl) { - return __riscv_vzext_vf4_mu(mask, maskedoff, op1, vl); +vuint64m2_t test_vzext_vf4_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vzext_vf4_mu(vm, vd, vs2, vl); } -vuint64m4_t test_vzext_vf4_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint16m1_t op1, size_t vl) { - return __riscv_vzext_vf4_mu(mask, maskedoff, op1, vl); +vuint64m4_t test_vzext_vf4_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vzext_vf4_mu(vm, vd, vs2, vl); } -vuint64m8_t test_vzext_vf4_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint16m2_t op1, size_t vl) { - return __riscv_vzext_vf4_mu(mask, maskedoff, op1, vl); +vuint64m8_t test_vzext_vf4_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vzext_vf4_mu(vm, vd, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vzext\.vf4[ivxfswum.]*\s+} 36 } } */ diff --git a/auto-generated/policy_funcs/gnu-overloaded-tests/vzext_vf8.c b/auto-generated/policy_funcs/gnu-overloaded-tests/vzext_vf8.c index 9e3ac0680..dc3663b9f 100644 --- a/auto-generated/policy_funcs/gnu-overloaded-tests/vzext_vf8.c +++ b/auto-generated/policy_funcs/gnu-overloaded-tests/vzext_vf8.c @@ -6,68 +6,68 @@ typedef _Float16 float16_t; typedef float float32_t; typedef double float64_t; -vuint64m1_t test_vzext_vf8_u64m1_tu(vuint64m1_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf8_tu(maskedoff, op1, vl); +vuint64m1_t test_vzext_vf8_u64m1_tu(vuint64m1_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vzext_vf8_tu(vd, vs2, vl); } -vuint64m2_t test_vzext_vf8_u64m2_tu(vuint64m2_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf8_tu(maskedoff, op1, vl); +vuint64m2_t test_vzext_vf8_u64m2_tu(vuint64m2_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vzext_vf8_tu(vd, vs2, vl); } -vuint64m4_t test_vzext_vf8_u64m4_tu(vuint64m4_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf8_tu(maskedoff, op1, vl); +vuint64m4_t test_vzext_vf8_u64m4_tu(vuint64m4_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vzext_vf8_tu(vd, vs2, vl); } -vuint64m8_t test_vzext_vf8_u64m8_tu(vuint64m8_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf8_tu(maskedoff, op1, vl); +vuint64m8_t test_vzext_vf8_u64m8_tu(vuint64m8_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vzext_vf8_tu(vd, vs2, vl); } -vuint64m1_t test_vzext_vf8_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf8_tum(mask, maskedoff, op1, vl); +vuint64m1_t test_vzext_vf8_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vzext_vf8_tum(vm, vd, vs2, vl); } -vuint64m2_t test_vzext_vf8_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf8_tum(mask, maskedoff, op1, vl); +vuint64m2_t test_vzext_vf8_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vzext_vf8_tum(vm, vd, vs2, vl); } -vuint64m4_t test_vzext_vf8_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf8_tum(mask, maskedoff, op1, vl); +vuint64m4_t test_vzext_vf8_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vzext_vf8_tum(vm, vd, vs2, vl); } -vuint64m8_t test_vzext_vf8_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf8_tum(mask, maskedoff, op1, vl); +vuint64m8_t test_vzext_vf8_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vzext_vf8_tum(vm, vd, vs2, vl); } -vuint64m1_t test_vzext_vf8_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf8_tumu(mask, maskedoff, op1, vl); +vuint64m1_t test_vzext_vf8_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vzext_vf8_tumu(vm, vd, vs2, vl); } -vuint64m2_t test_vzext_vf8_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf8_tumu(mask, maskedoff, op1, vl); +vuint64m2_t test_vzext_vf8_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vzext_vf8_tumu(vm, vd, vs2, vl); } -vuint64m4_t test_vzext_vf8_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf8_tumu(mask, maskedoff, op1, vl); +vuint64m4_t test_vzext_vf8_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vzext_vf8_tumu(vm, vd, vs2, vl); } -vuint64m8_t test_vzext_vf8_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf8_tumu(mask, maskedoff, op1, vl); +vuint64m8_t test_vzext_vf8_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vzext_vf8_tumu(vm, vd, vs2, vl); } -vuint64m1_t test_vzext_vf8_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf8_mu(mask, maskedoff, op1, vl); +vuint64m1_t test_vzext_vf8_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vzext_vf8_mu(vm, vd, vs2, vl); } -vuint64m2_t test_vzext_vf8_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf8_mu(mask, maskedoff, op1, vl); +vuint64m2_t test_vzext_vf8_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vzext_vf8_mu(vm, vd, vs2, vl); } -vuint64m4_t test_vzext_vf8_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf8_mu(mask, maskedoff, op1, vl); +vuint64m4_t test_vzext_vf8_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vzext_vf8_mu(vm, vd, vs2, vl); } -vuint64m8_t test_vzext_vf8_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf8_mu(mask, maskedoff, op1, vl); +vuint64m8_t test_vzext_vf8_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vzext_vf8_mu(vm, vd, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vzext\.vf8[ivxfswum.]*\s+} 16 } } */